From 056c924594ba41000a0d3d24b84d2dc27e141954 Mon Sep 17 00:00:00 2001 From: jayyoung0802 Date: Thu, 6 Jun 2024 09:57:06 +0800 Subject: [PATCH] add uniad/vad codebase --- .gitignore | 160 ++ README.md | 52 + adzoo/__init__.py | 0 adzoo/bevformer/analysis_tools/__init__.py | 0 .../bevformer/analysis_tools/analyze_logs.py | 201 ++ adzoo/bevformer/analysis_tools/benchmark.py | 98 + adzoo/bevformer/analysis_tools/get_params.py | 10 + adzoo/bevformer/analysis_tools/visual.py | 477 ++++ adzoo/bevformer/apis/__init__.py | 2 + adzoo/bevformer/apis/mmdet_train.py | 193 ++ adzoo/bevformer/apis/test.py | 163 ++ adzoo/bevformer/apis/train.py | 65 + .../configs/_base_/datasets/coco_instance.py | 48 + .../_base_/datasets/kitti-3d-3class.py | 140 + .../configs/_base_/datasets/kitti-3d-car.py | 138 + .../configs/_base_/datasets/lyft-3d.py | 136 + .../configs/_base_/datasets/nuim_instance.py | 59 + .../configs/_base_/datasets/nus-3d.py | 142 + .../configs/_base_/datasets/nus-mono3d.py | 100 + .../_base_/datasets/range100_lyft-3d.py | 136 + .../_base_/datasets/s3dis-3d-5class.py | 114 + .../_base_/datasets/s3dis_seg-3d-13class.py | 139 + .../_base_/datasets/scannet-3d-18class.py | 128 + .../_base_/datasets/scannet_seg-3d-20class.py | 132 + .../_base_/datasets/sunrgbd-3d-10class.py | 107 + .../_base_/datasets/waymoD5-3d-3class.py | 145 + .../configs/_base_/datasets/waymoD5-3d-car.py | 143 + .../configs/_base_/default_runtime.py | 18 + .../bevformer/configs/_base_/models/3dssd.py | 77 + .../models/cascade_mask_rcnn_r50_fpn.py | 200 ++ .../centerpoint_01voxel_second_secfpn_nus.py | 83 + .../centerpoint_02pillar_second_secfpn_nus.py | 83 + .../bevformer/configs/_base_/models/fcos3d.py | 74 + .../configs/_base_/models/groupfree3d.py | 71 + .../bevformer/configs/_base_/models/h3dnet.py | 341 +++ .../_base_/models/hv_pointpillars_fpn_lyft.py | 22 + .../_base_/models/hv_pointpillars_fpn_nus.py | 96 + .../hv_pointpillars_fpn_range100_lyft.py | 22 + .../models/hv_pointpillars_secfpn_kitti.py | 93 + .../models/hv_pointpillars_secfpn_waymo.py | 108 + .../_base_/models/hv_second_secfpn_kitti.py | 89 + .../_base_/models/hv_second_secfpn_waymo.py | 100 + .../configs/_base_/models/imvotenet_image.py | 108 + .../_base_/models/mask_rcnn_r50_fpn.py | 124 + .../configs/_base_/models/paconv_cuda_ssg.py | 7 + .../configs/_base_/models/paconv_ssg.py | 49 + .../bevformer/configs/_base_/models/parta2.py | 201 ++ .../configs/_base_/models/pointnet2_msg.py | 28 + .../configs/_base_/models/pointnet2_ssg.py | 35 + .../configs/_base_/models/votenet.py | 73 + .../configs/_base_/schedules/cosine.py | 20 + .../configs/_base_/schedules/cyclic_20e.py | 24 + .../configs/_base_/schedules/cyclic_40e.py | 31 + .../_base_/schedules/mmdet_schedule_1x.py | 11 + .../configs/_base_/schedules/schedule_2x.py | 14 + .../configs/_base_/schedules/schedule_3x.py | 9 + .../_base_/schedules/seg_cosine_150e.py | 9 + .../_base_/schedules/seg_cosine_200e.py | 9 + .../_base_/schedules/seg_cosine_50e.py | 9 + .../configs/bevformer/bevformer_base.py | 260 ++ .../configs/bevformer/bevformer_base_b2d.py | 363 +++ .../configs/bevformer/bevformer_tiny.py | 270 ++ .../configs/bevformer/bevformer_tiny_b2d.py | 360 +++ .../bevformer_fp16/bevformer_tiny_fp16.py | 272 ++ .../bevformerv2/bevformerv2-r50-t1-24ep.py | 360 +++ .../bevformerv2/bevformerv2-r50-t1-48ep.py | 360 +++ .../bevformerv2-r50-t1-base-24ep.py | 349 +++ .../bevformerv2-r50-t1-base-48ep.py | 349 +++ .../bevformerv2/bevformerv2-r50-t2-24ep.py | 360 +++ .../bevformerv2/bevformerv2-r50-t2-48ep.py | 360 +++ .../bevformerv2/bevformerv2-r50-t8-24ep.py | 361 +++ .../configs/datasets/custom_lyft-3d.py | 136 + .../configs/datasets/custom_nus-3d.py | 141 + .../configs/datasets/custom_waymo-3d.py | 112 + adzoo/bevformer/create_data.py | 305 +++ adzoo/bevformer/data_converter/__init__.py | 1 + .../data_converter/create_gt_database.py | 338 +++ .../data_converter/indoor_converter.py | 108 + .../data_converter/kitti_converter.py | 546 ++++ .../data_converter/kitti_data_utils.py | 554 ++++ .../data_converter/lyft_converter.py | 268 ++ .../data_converter/lyft_data_fixer.py | 38 + .../data_converter/nuimage_converter.py | 225 ++ .../data_converter/nuscenes_converter.py | 674 +++++ .../data_converter/s3dis_data_utils.py | 241 ++ .../data_converter/scannet_data_utils.py | 293 ++ .../data_converter/sunrgbd_data_utils.py | 221 ++ .../data_converter/waymo_converter.py | 519 ++++ adzoo/bevformer/dist_test.sh | 10 + adzoo/bevformer/dist_train.sh | 9 + adzoo/bevformer/fp16/dist_train.sh | 9 + adzoo/bevformer/fp16/train.py | 271 ++ adzoo/bevformer/misc/browse_dataset.py | 240 ++ adzoo/bevformer/misc/print_config.py | 26 + adzoo/bevformer/misc/visualize_results.py | 49 + .../mmdet3d_plugin/bevformer/__init__.py | 1 + .../mmdet3d_plugin/bevformer/apis/__init__.py | 3 + .../bevformer/apis/mmdet_train.py | 203 ++ .../mmdet3d_plugin/bevformer/apis/test.py | 164 ++ .../mmdet3d_plugin/bevformer/apis/train.py | 65 + .../bevformer/hooks/__init__.py | 1 + .../bevformer/hooks/custom_hooks.py | 12 + .../bevformer/mmdet3d_plugin/dd3d/__init__.py | 1 + .../mmdet3d_plugin/dd3d/datasets/__init__.py | 0 .../mmdet3d_plugin/dd3d/datasets/nuscenes.py | 360 +++ .../dd3d/datasets/transform_utils.py | 136 + .../mmdet3d_plugin/dd3d/layers/iou_loss.py | 71 + .../dd3d/layers/normalization.py | 40 + .../dd3d/layers/smooth_l1_loss.py | 80 + .../mmdet3d_plugin/dd3d/modeling/__init__.py | 1 + .../mmdet3d_plugin/dd3d/modeling/core.py | 217 ++ .../dd3d/modeling/disentangled_box3d_loss.py | 46 + .../mmdet3d_plugin/dd3d/modeling/fcos2d.py | 388 +++ .../mmdet3d_plugin/dd3d/modeling/fcos3d.py | 427 +++ .../dd3d/modeling/nuscenes_dd3d.py | 525 ++++ .../dd3d/modeling/prepare_targets.py | 242 ++ .../dd3d/structures/__init__.py | 2 + .../mmdet3d_plugin/dd3d/structures/boxes3d.py | 321 +++ .../dd3d/structures/image_list.py | 157 ++ .../mmdet3d_plugin/dd3d/structures/pose.py | 164 ++ .../dd3d/structures/transform3d.py | 896 +++++++ .../mmdet3d_plugin/dd3d/utils/comm.py | 105 + .../mmdet3d_plugin/dd3d/utils/geometry.py | 204 ++ .../mmdet3d_plugin/dd3d/utils/tasks.py | 97 + .../mmdet3d_plugin/dd3d/utils/tensor2d.py | 47 + .../dd3d/utils/visualization.py | 147 + .../mmdet3d_plugin/models/hooks/__init__.py | 1 + .../mmdet3d_plugin/models/hooks/hooks.py | 13 + .../convert_votenet_checkpoints.py | 152 ++ .../model_converters/publish_model.py | 35 + .../model_converters/regnet2mmdet.py | 89 + adzoo/bevformer/test.py | 259 ++ adzoo/bevformer/train.py | 237 ++ adzoo/uniad/analysis_tools/__init__.py | 0 adzoo/uniad/analysis_tools/analyze_logs.py | 201 ++ adzoo/uniad/analysis_tools/benchmark.py | 97 + .../visualize/render/base_render.py | 32 + .../visualize/render/bev_render.py | 264 ++ .../visualize/render/cam_render.py | 202 ++ adzoo/uniad/analysis_tools/visualize/run.py | 338 +++ adzoo/uniad/analysis_tools/visualize/utils.py | 131 + adzoo/uniad/configs/_base_/datasets/nus-3d.py | 142 + adzoo/uniad/configs/_base_/default_runtime.py | 18 + .../stage1_track_map/base_track_map.py | 580 ++++ .../stage1_track_map/base_track_map_b2d.py | 665 +++++ .../stage1_track_map/tiny_track_map_b2d.py | 656 +++++ adzoo/uniad/configs/stage2_e2e/base_e2e.py | 696 +++++ .../uniad/configs/stage2_e2e/base_e2e_b2d.py | 819 ++++++ .../uniad/configs/stage2_e2e/tiny_e2e_b2d.py | 813 ++++++ adzoo/uniad/data_converter/create_data.py | 109 + .../uniad/data_converter/uniad_create_data.sh | 7 + .../uniad_nuscenes_converter.py | 723 +++++ adzoo/uniad/test.py | 145 + adzoo/uniad/test_utils.py | 318 +++ adzoo/uniad/train.py | 212 ++ adzoo/uniad/uniad_dist_eval.sh | 31 + adzoo/uniad/uniad_dist_train.sh | 36 + adzoo/uniad/uniad_vis_result.sh | 7 + adzoo/vad/analysis_tools/__init__.py | 0 adzoo/vad/analysis_tools/analyze_logs.py | 201 ++ adzoo/vad/analysis_tools/benchmark.py | 98 + adzoo/vad/analysis_tools/get_flops.py | 747 ++++++ adzoo/vad/analysis_tools/get_params.py | 8 + adzoo/vad/analysis_tools/visualization.py | 911 +++++++ adzoo/vad/apis/__init__.py | 3 + adzoo/vad/apis/mmdet_train.py | 196 ++ adzoo/vad/apis/test.py | 215 ++ adzoo/vad/apis/train.py | 60 + adzoo/vad/configs/VAD/VAD_base_e2e.py | 438 +++ adzoo/vad/configs/VAD/VAD_base_e2e_b2d.py | 568 ++++ adzoo/vad/configs/VAD/VAD_tiny_e2e.py | 454 ++++ .../configs/_base_/datasets/coco_instance.py | 48 + .../_base_/datasets/kitti-3d-3class.py | 140 + .../configs/_base_/datasets/kitti-3d-car.py | 138 + adzoo/vad/configs/_base_/datasets/lyft-3d.py | 136 + .../configs/_base_/datasets/nuim_instance.py | 59 + adzoo/vad/configs/_base_/datasets/nus-3d.py | 142 + .../vad/configs/_base_/datasets/nus-mono3d.py | 100 + .../_base_/datasets/range100_lyft-3d.py | 136 + .../_base_/datasets/s3dis-3d-5class.py | 114 + .../_base_/datasets/s3dis_seg-3d-13class.py | 139 + .../_base_/datasets/scannet-3d-18class.py | 128 + .../_base_/datasets/scannet_seg-3d-20class.py | 132 + .../_base_/datasets/sunrgbd-3d-10class.py | 107 + .../_base_/datasets/waymoD5-3d-3class.py | 145 + .../configs/_base_/datasets/waymoD5-3d-car.py | 143 + adzoo/vad/configs/_base_/default_runtime.py | 18 + adzoo/vad/configs/_base_/models/3dssd.py | 77 + .../models/cascade_mask_rcnn_r50_fpn.py | 200 ++ .../centerpoint_01voxel_second_secfpn_nus.py | 83 + .../centerpoint_02pillar_second_secfpn_nus.py | 83 + adzoo/vad/configs/_base_/models/fcos3d.py | 74 + .../vad/configs/_base_/models/groupfree3d.py | 71 + adzoo/vad/configs/_base_/models/h3dnet.py | 341 +++ .../_base_/models/hv_pointpillars_fpn_lyft.py | 22 + .../_base_/models/hv_pointpillars_fpn_nus.py | 96 + .../hv_pointpillars_fpn_range100_lyft.py | 22 + .../models/hv_pointpillars_secfpn_kitti.py | 93 + .../models/hv_pointpillars_secfpn_waymo.py | 108 + .../_base_/models/hv_second_secfpn_kitti.py | 89 + .../_base_/models/hv_second_secfpn_waymo.py | 100 + .../configs/_base_/models/imvotenet_image.py | 108 + .../_base_/models/mask_rcnn_r50_fpn.py | 124 + .../configs/_base_/models/paconv_cuda_ssg.py | 7 + adzoo/vad/configs/_base_/models/paconv_ssg.py | 49 + adzoo/vad/configs/_base_/models/parta2.py | 201 ++ .../configs/_base_/models/pointnet2_msg.py | 28 + .../configs/_base_/models/pointnet2_ssg.py | 35 + adzoo/vad/configs/_base_/models/votenet.py | 73 + adzoo/vad/configs/_base_/schedules/cosine.py | 20 + .../configs/_base_/schedules/cyclic_20e.py | 24 + .../configs/_base_/schedules/cyclic_40e.py | 31 + .../_base_/schedules/mmdet_schedule_1x.py | 11 + .../configs/_base_/schedules/schedule_2x.py | 14 + .../configs/_base_/schedules/schedule_3x.py | 9 + .../_base_/schedules/seg_cosine_150e.py | 9 + .../_base_/schedules/seg_cosine_200e.py | 9 + .../_base_/schedules/seg_cosine_50e.py | 9 + adzoo/vad/configs/datasets/custom_lyft-3d.py | 136 + adzoo/vad/configs/datasets/custom_nus-3d.py | 141 + adzoo/vad/configs/datasets/custom_waymo-3d.py | 112 + adzoo/vad/create_data.py | 305 +++ adzoo/vad/data_converter/__init__.py | 1 + .../vad/data_converter/create_gt_database.py | 338 +++ .../data_converter/vad_nuscenes_converter.py | 1005 +++++++ adzoo/vad/dist_test.sh | 10 + adzoo/vad/dist_train.sh | 9 + adzoo/vad/misc/browse_dataset.py | 240 ++ adzoo/vad/misc/fuse_conv_bn.py | 67 + adzoo/vad/misc/print_config.py | 26 + adzoo/vad/misc/visualize_results.py | 49 + .../convert_votenet_checkpoints.py | 152 ++ adzoo/vad/model_converters/publish_model.py | 35 + adzoo/vad/model_converters/regnet2mmdet.py | 89 + adzoo/vad/test.py | 277 ++ adzoo/vad/train.py | 237 ++ asserts/bench2drive.jpg | Bin 0 -> 30763 bytes asserts/bench2drivezoo.png | Bin 0 -> 4921531 bytes clear.py | 55 + data/others/b2d_motion_anchor_infos_mode6.pkl | Bin 0 -> 3829 bytes .../bench2drive_base_train_val_split.json | 1 + docs/CONVERT_GUIDE.md | 29 + docs/DATA_PREP.md | 81 + docs/EVAL_IN_CARLA.md | 26 + docs/INSTALL.md | 52 + docs/TRAIN_EVAL.md | 68 + mmcv/__init__.py | 15 + mmcv/core/__init__.py | 10 + mmcv/core/anchor/__init__.py | 18 + mmcv/core/anchor/anchor_3d_generator.py | 404 +++ mmcv/core/anchor/anchor_generator.py | 838 ++++++ mmcv/core/anchor/builder.py | 18 + mmcv/core/anchor/point_generator.py | 241 ++ mmcv/core/anchor/utils.py | 71 + mmcv/core/bbox/__init__.py | 13 + mmcv/core/bbox/assigners/__init__.py | 10 + mmcv/core/bbox/assigners/assign_result.py | 204 ++ mmcv/core/bbox/assigners/base_assigner.py | 9 + .../core/bbox/assigners/hungarian_assigner.py | 145 + .../bbox/assigners/hungarian_assigner_3d.py | 136 + .../assigners/hungarian_assigner_3d_track.py | 122 + .../assigners/map_hungarian_assigner_3d.py | 162 ++ mmcv/core/bbox/box_np_ops.py | 896 +++++++ mmcv/core/bbox/builder.py | 20 + mmcv/core/bbox/coder/__init__.py | 11 + mmcv/core/bbox/coder/base_bbox_coder.py | 17 + mmcv/core/bbox/coder/detr3d_track_coder.py | 156 ++ mmcv/core/bbox/coder/fut_nms_free_coder.py | 127 + mmcv/core/bbox/coder/map_nms_free_coder.py | 126 + mmcv/core/bbox/coder/nms_free_coder.py | 124 + mmcv/core/bbox/iou_calculators/__init__.py | 11 + mmcv/core/bbox/iou_calculators/builder.py | 8 + .../bbox/iou_calculators/iou2d_calculator.py | 260 ++ .../bbox/iou_calculators/iou3d_calculator.py | 321 +++ mmcv/core/bbox/match_costs/__init__.py | 7 + mmcv/core/bbox/match_costs/builder.py | 8 + mmcv/core/bbox/match_costs/match_cost.py | 324 +++ mmcv/core/bbox/samplers/__init__.py | 6 + mmcv/core/bbox/samplers/base_sampler.py | 101 + mmcv/core/bbox/samplers/pseudo_sampler.py | 41 + mmcv/core/bbox/samplers/sampling_result.py | 152 ++ mmcv/core/bbox/structures/__init__.py | 5 + mmcv/core/bbox/structures/base_box3d.py | 462 ++++ mmcv/core/bbox/structures/box_3d_mode.py | 166 ++ mmcv/core/bbox/structures/cam_box3d.py | 324 +++ mmcv/core/bbox/structures/coord_3d_mode.py | 281 ++ mmcv/core/bbox/structures/depth_box3d.py | 343 +++ mmcv/core/bbox/structures/lidar_box3d.py | 270 ++ mmcv/core/bbox/structures/nuscenes_box.py | 458 ++++ mmcv/core/bbox/structures/utils.py | 214 ++ mmcv/core/bbox/transforms.py | 320 +++ mmcv/core/bbox/util.py | 53 + mmcv/core/evaluation/__init__.py | 13 + mmcv/core/evaluation/bbox_overlaps.py | 48 + mmcv/core/evaluation/class_names.py | 219 ++ mmcv/core/evaluation/eval_hooks.py | 133 + mmcv/core/evaluation/indoor_eval.py | 310 +++ mmcv/core/evaluation/kitti_utils/__init__.py | 4 + mmcv/core/evaluation/kitti_utils/eval.py | 847 ++++++ .../core/evaluation/kitti_utils/rotate_iou.py | 379 +++ mmcv/core/evaluation/lyft_eval.py | 284 ++ mmcv/core/evaluation/mean_ap.py | 467 ++++ mmcv/core/evaluation/metric_motion.py | 70 + mmcv/core/evaluation/metrics.py | 325 +++ mmcv/core/evaluation/recall.py | 189 ++ mmcv/core/evaluation/seg_eval.py | 131 + .../waymo_utils/prediction_kitti_to_waymo.py | 262 ++ mmcv/core/mask/__init__.py | 6 + mmcv/core/mask/mask_target.py | 126 + mmcv/core/mask/structures.py | 1037 ++++++++ mmcv/core/mask/utils.py | 63 + mmcv/core/points/__init__.py | 30 + mmcv/core/points/base_points.py | 436 +++ mmcv/core/points/cam_points.py | 70 + mmcv/core/points/depth_points.py | 70 + mmcv/core/points/lidar_points.py | 70 + mmcv/core/post_processing/__init__.py | 9 + mmcv/core/post_processing/bbox_nms.py | 170 ++ mmcv/core/post_processing/box3d_nms.py | 220 ++ mmcv/core/post_processing/merge_augs.py | 241 ++ mmcv/core/utils/__init__.py | 9 + mmcv/core/utils/dist_utils.py | 69 + mmcv/core/utils/gaussian.py | 86 + mmcv/core/utils/misc.py | 102 + mmcv/core/visualization/__init__.py | 4 + mmcv/core/visualization/image.py | 372 +++ mmcv/core/visualizer/__init__.py | 5 + mmcv/core/visualizer/image_vis.py | 198 ++ mmcv/core/visualizer/open3d_vis.py | 443 ++++ mmcv/core/visualizer/show_result.py | 272 ++ mmcv/core/voxel/__init__.py | 5 + mmcv/core/voxel/builder.py | 14 + mmcv/core/voxel/voxel_generator.py | 280 ++ mmcv/datasets/B2D_dataset.py | 504 ++++ mmcv/datasets/B2D_e2e_dataset.py | 855 ++++++ mmcv/datasets/B2D_vad_dataset.py | 1037 ++++++++ mmcv/datasets/__init__.py | 15 + mmcv/datasets/api_wrappers/__init__.py | 3 + mmcv/datasets/api_wrappers/coco_api.py | 46 + mmcv/datasets/builder.py | 204 ++ mmcv/datasets/coco.py | 558 ++++ mmcv/datasets/custom.py | 362 +++ mmcv/datasets/custom_3d.py | 370 +++ mmcv/datasets/custom_nuscenes_dataset.py | 246 ++ mmcv/datasets/custom_nuscenes_dataset_v2.py | 302 +++ mmcv/datasets/data_utils/data_utils.py | 174 ++ mmcv/datasets/data_utils/rasterize.py | 160 ++ mmcv/datasets/data_utils/trajectory_api.py | 283 ++ mmcv/datasets/data_utils/vector_map.py | 246 ++ mmcv/datasets/dataset_wrappers.py | 353 +++ mmcv/datasets/dd3d_nuscenes_dataset.py | 359 +++ mmcv/datasets/eval_utils/eval_utils.py | 911 +++++++ mmcv/datasets/eval_utils/map_api.py | 2355 +++++++++++++++++ mmcv/datasets/eval_utils/metric_utils.py | 104 + mmcv/datasets/eval_utils/nuscenes_eval.py | 705 +++++ .../eval_utils/nuscenes_eval_motion.py | 933 +++++++ mmcv/datasets/lyft_dataset.py | 561 ++++ mmcv/datasets/map_utils/mean_ap.py | 390 +++ mmcv/datasets/map_utils/struct.py | 438 +++ mmcv/datasets/map_utils/tpfp.py | 363 +++ mmcv/datasets/map_utils/tpfp_chamfer.py | 335 +++ mmcv/datasets/nuscenes_dataset.py | 658 +++++ mmcv/datasets/nuscenes_e2e_dataset.py | 1247 +++++++++ mmcv/datasets/nuscenes_eval.py | 752 ++++++ mmcv/datasets/nuscenes_mono_dataset.py | 777 ++++++ mmcv/datasets/nuscenes_styled_eval_utils.py | 755 ++++++ mmcv/datasets/nuscenes_vad_dataset.py | 1933 ++++++++++++++ mmcv/datasets/nuscnes_eval.py | 756 ++++++ mmcv/datasets/pipelines/__init__.py | 50 + mmcv/datasets/pipelines/compose.py | 51 + mmcv/datasets/pipelines/data_augment_utils.py | 409 +++ mmcv/datasets/pipelines/formating.py | 700 +++++ mmcv/datasets/pipelines/loading.py | 1709 ++++++++++++ mmcv/datasets/pipelines/occflow_label.py | 286 ++ mmcv/datasets/pipelines/test_time_aug.py | 233 ++ mmcv/datasets/pipelines/transforms.py | 1906 +++++++++++++ mmcv/datasets/pipelines/transforms_3d.py | 2042 ++++++++++++++ mmcv/datasets/prepare_B2D.py | 401 +++ mmcv/datasets/samplers/__init__.py | 5 + mmcv/datasets/samplers/distributed_sampler.py | 41 + mmcv/datasets/samplers/group_sampler.py | 146 + mmcv/datasets/samplers/sampler.py | 7 + mmcv/datasets/utils.py | 298 +++ mmcv/datasets/vad_custom_nuscenes_eval.py | 834 ++++++ mmcv/datasets/vis_utils.py | 670 +++++ mmcv/fileio/__init__.py | 5 + mmcv/fileio/file_client.py | 1146 ++++++++ mmcv/fileio/handlers/__init__.py | 4 + mmcv/fileio/handlers/base.py | 30 + mmcv/fileio/handlers/json_handler.py | 36 + mmcv/fileio/handlers/pickle_handler.py | 28 + mmcv/fileio/io.py | 154 ++ mmcv/fileio/parse.py | 97 + mmcv/image/__init__.py | 27 + mmcv/image/colorspace.py | 306 +++ mmcv/image/geometric.py | 728 +++++ mmcv/image/io.py | 262 ++ mmcv/image/misc.py | 43 + mmcv/image/photometric.py | 428 +++ mmcv/layers/__init__.py | 6 + mmcv/layers/aspp.py | 144 + mmcv/layers/batch_norm.py | 384 +++ mmcv/layers/blocks.py | 111 + mmcv/layers/csrc/README.md | 7 + .../csrc/ROIAlignRotated/ROIAlignRotated.h | 115 + .../ROIAlignRotated/ROIAlignRotated_cpu.cpp | 522 ++++ .../ROIAlignRotated/ROIAlignRotated_cuda.cu | 443 ++++ .../csrc/box_iou_rotated/box_iou_rotated.h | 35 + .../box_iou_rotated/box_iou_rotated_cpu.cpp | 39 + .../box_iou_rotated/box_iou_rotated_cuda.cu | 130 + .../box_iou_rotated/box_iou_rotated_utils.h | 391 +++ mmcv/layers/csrc/cocoeval/cocoeval.cpp | 507 ++++ mmcv/layers/csrc/cocoeval/cocoeval.h | 88 + mmcv/layers/csrc/cuda_version.cu | 26 + mmcv/layers/csrc/deformable/deform_conv.h | 377 +++ .../csrc/deformable/deform_conv_cuda.cu | 1223 +++++++++ .../deformable/deform_conv_cuda_kernel.cu | 1288 +++++++++ mmcv/layers/csrc/nms_rotated/nms_rotated.h | 39 + .../csrc/nms_rotated/nms_rotated_cpu.cpp | 75 + .../csrc/nms_rotated/nms_rotated_cuda.cu | 145 + mmcv/layers/csrc/vision.cpp | 117 + mmcv/layers/deform_conv.py | 514 ++++ mmcv/layers/losses.py | 133 + mmcv/layers/mask_ops.py | 275 ++ mmcv/layers/nms.py | 144 + mmcv/layers/roi_align.py | 74 + mmcv/layers/roi_align_rotated.py | 100 + mmcv/layers/rotated_boxes.py | 21 + mmcv/layers/shape_spec.py | 18 + mmcv/layers/wrappers.py | 162 ++ mmcv/losses/__init__.py | 7 + mmcv/losses/dice_loss.py | 61 + mmcv/losses/focal_loss.py | 105 + mmcv/losses/fvcore_smooth_l1_loss.py | 76 + mmcv/losses/occflow_loss.py | 226 ++ mmcv/losses/planning_loss.py | 77 + mmcv/losses/track_loss.py | 619 +++++ mmcv/losses/traj_loss.py | 233 ++ mmcv/metrics/classification.py | 178 ++ mmcv/metrics/compositional.py | 40 + mmcv/metrics/distributed.py | 214 ++ mmcv/metrics/metric.py | 199 ++ mmcv/metrics/reduction.py | 26 + mmcv/metrics/utils.py | 292 ++ mmcv/modeling/postprocessing.py | 100 + mmcv/models/__init__.py | 14 + mmcv/models/backbones/__init__.py | 3 + mmcv/models/backbones/base_module.py | 195 ++ mmcv/models/backbones/resnet.py | 671 +++++ mmcv/models/backbones/vgg.py | 175 ++ mmcv/models/backbones/vovnet.py | 375 +++ mmcv/models/bricks/__init__.py | 12 + mmcv/models/bricks/activation.py | 91 + mmcv/models/bricks/conv.py | 44 + mmcv/models/bricks/conv_module.py | 207 ++ mmcv/models/bricks/drop.py | 65 + mmcv/models/bricks/norm.py | 145 + mmcv/models/bricks/padding.py | 36 + mmcv/models/bricks/plugin.py | 88 + mmcv/models/bricks/registry.py | 16 + mmcv/models/bricks/transformer.py | 611 +++++ mmcv/models/bricks/wrappers.py | 175 ++ mmcv/models/builder.py | 137 + mmcv/models/dense_heads/VAD_head.py | 1898 +++++++++++++ mmcv/models/dense_heads/__init__.py | 10 + mmcv/models/dense_heads/anchor3d_head.py | 513 ++++ mmcv/models/dense_heads/anchor_free_head.py | 340 +++ mmcv/models/dense_heads/anchor_head.py | 746 ++++++ mmcv/models/dense_heads/base_dense_head.py | 78 + mmcv/models/dense_heads/bev_head.py | 130 + mmcv/models/dense_heads/bevformer_head.py | 686 +++++ mmcv/models/dense_heads/dense_test_mixins.py | 202 ++ mmcv/models/dense_heads/detr_head.py | 843 ++++++ mmcv/models/dense_heads/free_anchor3d_head.py | 284 ++ mmcv/models/dense_heads/ga_rpn_head.py | 176 ++ mmcv/models/dense_heads/guided_anchor_head.py | 862 ++++++ mmcv/models/dense_heads/motion_head.py | 560 ++++ .../motion_head_plugin/__init__.py | 4 + .../motion_head_plugin/base_motion_head.py | 140 + .../dense_heads/motion_head_plugin/modules.py | 280 ++ .../motion_deformable_attn.py | 632 +++++ .../motion_head_plugin/motion_optimization.py | 218 ++ .../motion_head_plugin/motion_utils.py | 99 + mmcv/models/dense_heads/occ_head.py | 482 ++++ .../dense_heads/occ_head_plugin/__init__.py | 3 + .../dense_heads/occ_head_plugin/metrics.py | 258 ++ .../dense_heads/occ_head_plugin/modules.py | 342 +++ .../dense_heads/occ_head_plugin/utils.py | 87 + mmcv/models/dense_heads/panseg_head.py | 1327 ++++++++++ mmcv/models/dense_heads/planning_head.py | 251 ++ .../planning_head_plugin/__init__.py | 4 + .../collision_optimization.py | 116 + .../planning_head_plugin/metric_stp3.py | 337 +++ .../planning_head_plugin/planning_metrics.py | 147 + mmcv/models/dense_heads/rpn_head.py | 319 +++ .../dense_heads/seg_head_plugin/__init__.py | 5 + .../seg_head_plugin/seg_assigner.py | 446 ++++ .../seg_deformable_transformer.py | 385 +++ .../seg_head_plugin/seg_detr_head.py | 689 +++++ .../seg_head_plugin/seg_mask_head.py | 393 +++ .../dense_heads/seg_head_plugin/seg_utils.py | 7 + mmcv/models/dense_heads/track_head.py | 533 ++++ .../dense_heads/track_head_plugin/__init__.py | 3 + .../dense_heads/track_head_plugin/modules.py | 254 ++ .../track_head_plugin/track_instance.py | 198 ++ .../dense_heads/track_head_plugin/tracker.py | 42 + mmcv/models/dense_heads/train_mixins.py | 347 +++ mmcv/models/detectors/VAD.py | 684 +++++ mmcv/models/detectors/__init__.py | 5 + mmcv/models/detectors/base.py | 407 +++ mmcv/models/detectors/bevformer.py | 295 +++ mmcv/models/detectors/bevformerV2.py | 269 ++ mmcv/models/detectors/bevformer_fp16.py | 89 + mmcv/models/detectors/mvx_two_stage.py | 506 ++++ mmcv/models/detectors/single_stage.py | 234 ++ mmcv/models/detectors/single_stage_mono3d.py | 224 ++ mmcv/models/detectors/uniad_e2e.py | 385 +++ mmcv/models/detectors/uniad_track.py | 869 ++++++ mmcv/models/losses/__init__.py | 20 + mmcv/models/losses/focal_loss.py | 181 ++ mmcv/models/losses/iou_loss.py | 440 +++ mmcv/models/losses/smooth_l1_loss.py | 136 + mmcv/models/losses/utils.py | 115 + mmcv/models/modules/VAD_transformer.py | 489 ++++ mmcv/models/modules/__init__.py | 8 + .../modules/custom_base_transformer_layer.py | 243 ++ mmcv/models/modules/decoder.py | 344 +++ mmcv/models/modules/encoder.py | 405 +++ mmcv/models/modules/group_attention.py | 162 ++ .../multi_scale_deformable_attn_function.py | 163 ++ .../models/modules/spatial_cross_attention.py | 398 +++ .../models/modules/temporal_self_attention.py | 269 ++ mmcv/models/modules/transformer.py | 632 +++++ mmcv/models/modules/transformerV2.py | 353 +++ mmcv/models/modules/vote_module.py | 181 ++ mmcv/models/necks/__init__.py | 24 + mmcv/models/necks/fpn.py | 203 ++ mmcv/models/opt/__init__.py | 1 + mmcv/models/opt/adamw.py | 131 + mmcv/models/roi_heads/mask_heads/__init__.py | 1 + .../mask_heads/fused_semantic_head.py | 107 + mmcv/models/segmentors/__init__.py | 7 + mmcv/models/segmentors/base.py | 379 +++ mmcv/models/utils/__init__.py | 25 + mmcv/models/utils/builder.py | 46 + mmcv/models/utils/functional.py | 141 + mmcv/models/utils/fuse_conv_bn.py | 59 + mmcv/models/utils/grid_mask.py | 124 + mmcv/models/utils/positional_encoding.py | 162 ++ mmcv/models/utils/res_layer.py | 191 ++ mmcv/models/utils/transformer.py | 800 ++++++ mmcv/models/utils/weight_init.py | 683 +++++ mmcv/models/vad_utils/CD_loss.py | 710 +++++ mmcv/models/vad_utils/__init__.py | 7 + mmcv/models/vad_utils/map_utils.py | 41 + mmcv/models/vad_utils/plan_loss.py | 440 +++ mmcv/models/vad_utils/traj_lr_warmup.py | 13 + mmcv/ops/__init__.py | 57 + .../ops/csrc/common/box_iou_rotated_utils.hpp | 343 +++ .../cuda/assign_score_withk_cuda_kernel.cuh | 112 + .../common/cuda/ball_query_cuda_kernel.cuh | 53 + .../common/cuda/bbox_overlaps_cuda_kernel.cuh | 80 + .../common/cuda/border_align_cuda_kernel.cuh | 196 ++ .../csrc/common/cuda/box_iou_rotated_cuda.cuh | 78 + .../csrc/common/cuda/carafe_cuda_kernel.cuh | 328 +++ .../common/cuda/carafe_naive_cuda_kernel.cuh | 107 + .../csrc/common/cuda/common_cuda_helper.hpp | 112 + .../ops/csrc/common/cuda/correlation_cuda.cuh | 227 ++ .../common/cuda/deform_conv_cuda_kernel.cuh | 363 +++ .../cuda/deform_roi_pool_cuda_kernel.cuh | 182 ++ .../furthest_point_sample_cuda_kernel.cuh | 148 ++ .../common/cuda/gather_points_cuda_kernel.cuh | 52 + .../common/cuda/group_points_cuda_kernel.cuh | 59 + .../csrc/common/cuda/iou3d_cuda_kernel.cuh | 365 +++ mmcv/ops/csrc/common/cuda/knn_cuda_kernel.cuh | 87 + .../common/cuda/masked_conv2d_cuda_kernel.cuh | 58 + .../modulated_deform_conv_cuda_kernel.cuh | 395 +++ .../cuda/ms_deform_attn_cuda_kernel.cuh | 800 ++++++ mmcv/ops/csrc/common/cuda/nms_cuda_kernel.cuh | 70 + .../ops/csrc/common/cuda/nms_rotated_cuda.cuh | 131 + .../cuda/points_in_boxes_cuda_kernel.cuh | 89 + .../csrc/common/cuda/psamask_cuda_kernel.cuh | 137 + .../common/cuda/roi_align_cuda_kernel.cuh | 208 ++ .../cuda/roi_align_rotated_cuda_kernel.cuh | 198 ++ .../csrc/common/cuda/roi_pool_cuda_kernel.cuh | 89 + .../cuda/roiaware_pool3d_cuda_kernel.cuh | 264 ++ .../cuda/roipoint_pool3d_cuda_kernel.cuh | 140 + .../cuda/scatter_points_cuda_kernel.cuh | 183 ++ .../cuda/sigmoid_focal_loss_cuda_kernel.cuh | 67 + .../cuda/softmax_focal_loss_cuda_kernel.cuh | 68 + .../csrc/common/cuda/sync_bn_cuda_kernel.cuh | 327 +++ .../cuda/three_interpolate_cuda_kernel.cuh | 57 + .../csrc/common/cuda/three_nn_cuda_kernel.cuh | 62 + .../common/cuda/tin_shift_cuda_kernel.cuh | 57 + .../common/cuda/voxelization_cuda_kernel.cuh | 165 ++ mmcv/ops/csrc/common/pytorch_cpp_helper.hpp | 24 + mmcv/ops/csrc/common/pytorch_cuda_helper.hpp | 19 + .../csrc/common/pytorch_device_registry.hpp | 141 + mmcv/ops/csrc/pytorch/assign_score_withk.cpp | 42 + mmcv/ops/csrc/pytorch/ball_query.cpp | 20 + mmcv/ops/csrc/pytorch/bbox_overlaps.cpp | 14 + mmcv/ops/csrc/pytorch/border_align.cpp | 30 + mmcv/ops/csrc/pytorch/box_iou_rotated.cpp | 19 + mmcv/ops/csrc/pytorch/carafe.cpp | 38 + mmcv/ops/csrc/pytorch/carafe_naive.cpp | 32 + mmcv/ops/csrc/pytorch/contour_expand.cpp | 112 + mmcv/ops/csrc/pytorch/corner_pool.cpp | 240 ++ mmcv/ops/csrc/pytorch/correlation.cpp | 47 + mmcv/ops/csrc/pytorch/cpu/box_iou_rotated.cpp | 38 + mmcv/ops/csrc/pytorch/cpu/deform_conv.cpp | 408 +++ .../pytorch/cpu/modulated_deform_conv.cpp | 436 +++ mmcv/ops/csrc/pytorch/cpu/nms.cpp | 230 ++ mmcv/ops/csrc/pytorch/cpu/nms_rotated.cpp | 66 + mmcv/ops/csrc/pytorch/cpu/pixel_group.cpp | 124 + mmcv/ops/csrc/pytorch/cpu/points_in_boxes.cpp | 53 + mmcv/ops/csrc/pytorch/cpu/psamask.cpp | 199 ++ mmcv/ops/csrc/pytorch/cpu/roi_align.cpp | 466 ++++ .../csrc/pytorch/cpu/roi_align_rotated.cpp | 458 ++++ mmcv/ops/csrc/pytorch/cpu/voxelization.cpp | 170 ++ .../pytorch/cuda/assign_score_withk_cuda.cu | 66 + mmcv/ops/csrc/pytorch/cuda/ball_query_cuda.cu | 38 + .../csrc/pytorch/cuda/bbox_overlaps_cuda.cu | 23 + .../csrc/pytorch/cuda/border_align_cuda.cu | 68 + .../csrc/pytorch/cuda/box_iou_rotated_cuda.cu | 25 + mmcv/ops/csrc/pytorch/cuda/carafe_cuda.cu | 180 ++ .../csrc/pytorch/cuda/carafe_naive_cuda.cu | 52 + .../ops/csrc/pytorch/cuda/correlation_cuda.cu | 93 + mmcv/ops/csrc/pytorch/cuda/cudabind.cpp | 1364 ++++++++++ .../ops/csrc/pytorch/cuda/deform_conv_cuda.cu | 105 + .../csrc/pytorch/cuda/deform_roi_pool_cuda.cu | 55 + mmcv/ops/csrc/pytorch/cuda/focal_loss_cuda.cu | 111 + .../cuda/furthest_point_sample_cuda.cu | 143 + .../pytorch/cuda/fused_bias_leakyrelu_cuda.cu | 109 + .../csrc/pytorch/cuda/gather_points_cuda.cu | 58 + .../csrc/pytorch/cuda/group_points_cuda.cu | 61 + mmcv/ops/csrc/pytorch/cuda/iou3d_cuda.cu | 86 + mmcv/ops/csrc/pytorch/cuda/knn_cuda.cu | 34 + .../csrc/pytorch/cuda/masked_conv2d_cuda.cu | 54 + .../cuda/modulated_deform_conv_cuda.cu | 96 + .../csrc/pytorch/cuda/ms_deform_attn_cuda.cu | 361 +++ mmcv/ops/csrc/pytorch/cuda/nms_cuda.cu | 53 + .../ops/csrc/pytorch/cuda/nms_rotated_cuda.cu | 62 + .../csrc/pytorch/cuda/points_in_boxes_cuda.cu | 62 + mmcv/ops/csrc/pytorch/cuda/psamask_cuda.cu | 59 + mmcv/ops/csrc/pytorch/cuda/roi_align_cuda.cu | 58 + .../pytorch/cuda/roi_align_rotated_cuda.cu | 45 + mmcv/ops/csrc/pytorch/cuda/roi_pool_cuda.cu | 50 + .../csrc/pytorch/cuda/roiaware_pool3d_cuda.cu | 118 + .../csrc/pytorch/cuda/roipoint_pool3d_cuda.cu | 60 + .../csrc/pytorch/cuda/scatter_points_cuda.cu | 127 + mmcv/ops/csrc/pytorch/cuda/sync_bn_cuda.cu | 110 + .../pytorch/cuda/three_interpolate_cuda.cu | 66 + mmcv/ops/csrc/pytorch/cuda/three_nn_cuda.cu | 35 + mmcv/ops/csrc/pytorch/cuda/tin_shift_cuda.cu | 55 + .../ops/csrc/pytorch/cuda/upfirdn2d_kernel.cu | 370 +++ .../csrc/pytorch/cuda/voxelization_cuda.cu | 188 ++ mmcv/ops/csrc/pytorch/deform_conv.cpp | 517 ++++ mmcv/ops/csrc/pytorch/deform_roi_pool.cpp | 42 + mmcv/ops/csrc/pytorch/focal_loss.cpp | 53 + .../csrc/pytorch/furthest_point_sample.cpp | 34 + .../ops/csrc/pytorch/fused_bias_leakyrelu.cpp | 119 + mmcv/ops/csrc/pytorch/gather_points.cpp | 30 + mmcv/ops/csrc/pytorch/group_points.cpp | 34 + mmcv/ops/csrc/pytorch/info.cpp | 56 + mmcv/ops/csrc/pytorch/iou3d.cpp | 151 ++ mmcv/ops/csrc/pytorch/knn.cpp | 17 + mmcv/ops/csrc/pytorch/masked_conv2d.cpp | 33 + .../csrc/pytorch/modulated_deform_conv.cpp | 237 ++ mmcv/ops/csrc/pytorch/ms_deform_attn.cpp | 60 + mmcv/ops/csrc/pytorch/nms.cpp | 33 + mmcv/ops/csrc/pytorch/nms_rotated.cpp | 32 + mmcv/ops/csrc/pytorch/pixel_group.cpp | 26 + mmcv/ops/csrc/pytorch/points_in_boxes.cpp | 44 + mmcv/ops/csrc/pytorch/psamask.cpp | 41 + mmcv/ops/csrc/pytorch/pybind.cpp | 689 +++++ mmcv/ops/csrc/pytorch/roi_align.cpp | 41 + mmcv/ops/csrc/pytorch/roi_align_rotated.cpp | 41 + mmcv/ops/csrc/pytorch/roi_pool.cpp | 31 + mmcv/ops/csrc/pytorch/roiaware_pool3d.cpp | 72 + mmcv/ops/csrc/pytorch/roipoint_pool3d.cpp | 39 + mmcv/ops/csrc/pytorch/scatter_points.cpp | 53 + mmcv/ops/csrc/pytorch/sync_bn.cpp | 69 + mmcv/ops/csrc/pytorch/three_interpolate.cpp | 33 + mmcv/ops/csrc/pytorch/three_nn.cpp | 18 + mmcv/ops/csrc/pytorch/tin_shift.cpp | 20 + mmcv/ops/csrc/pytorch/upfirdn2d.cpp | 118 + mmcv/ops/csrc/pytorch/voxelization.cpp | 56 + mmcv/ops/deform_conv.py | 405 +++ mmcv/ops/focal_loss.py | 212 ++ mmcv/ops/iou3d.py | 89 + mmcv/ops/iou3d_det/__init__.py | 3 + mmcv/ops/iou3d_det/iou3d_utils.py | 71 + mmcv/ops/iou3d_det/src/iou3d.cpp | 210 ++ mmcv/ops/iou3d_det/src/iou3d_kernel.cu | 439 +++ mmcv/ops/masked_conv.py | 111 + mmcv/ops/modulated_deform_conv.py | 282 ++ mmcv/ops/multi_scale_deform_attn.py | 358 +++ mmcv/ops/nms.py | 388 +++ mmcv/ops/roi_align.py | 223 ++ mmcv/ops/roiaware_pool3d/__init__.py | 8 + mmcv/ops/roiaware_pool3d/points_in_boxes.py | 123 + mmcv/ops/roiaware_pool3d/roiaware_pool3d.py | 110 + .../src/points_in_boxes_cpu.cpp | 69 + .../src/points_in_boxes_cuda.cu | 203 ++ .../roiaware_pool3d/src/roiaware_pool3d.cpp | 136 + .../src/roiaware_pool3d_kernel.cu | 366 +++ mmcv/ops/voxelize.py | 145 + mmcv/optims/__init__.py | 1 + mmcv/optims/adamw.py | 131 + mmcv/optims/optimizer.py | 268 ++ mmcv/parallel/__init__.py | 4 + mmcv/parallel/collate.py | 95 + mmcv/parallel/data_container.py | 88 + mmcv/parallel/registry.py | 8 + mmcv/parallel/utils.py | 20 + mmcv/runner/__init__.py | 3 + mmcv/runner/base_runner.py | 532 ++++ mmcv/runner/builder.py | 10 + mmcv/runner/epoch_based_runner.py | 262 ++ mmcv/runner/hooks/__init__.py | 9 + mmcv/runner/hooks/checkpoint.py | 167 ++ mmcv/runner/hooks/evaluation.py | 507 ++++ mmcv/runner/hooks/hook.py | 92 + mmcv/runner/hooks/iter_timer.py | 18 + mmcv/runner/hooks/logger/__init__.py | 3 + mmcv/runner/hooks/logger/base.py | 166 ++ mmcv/runner/hooks/logger/tensorboard.py | 55 + mmcv/runner/hooks/logger/text.py | 256 ++ mmcv/runner/hooks/lr_updater.py | 670 +++++ mmcv/runner/hooks/optimizer.py | 506 ++++ mmcv/runner/hooks/sampler_seed.py | 20 + mmcv/runner/hooks/vad_hooks.py | 17 + mmcv/structures/__init__.py | 8 + mmcv/structures/boxes.py | 425 +++ mmcv/structures/image_list.py | 129 + mmcv/structures/instances.py | 194 ++ mmcv/structures/keypoints.py | 235 ++ mmcv/structures/masks.py | 534 ++++ mmcv/structures/rotated_boxes.py | 505 ++++ mmcv/utils/__init__.py | 29 + mmcv/utils/bricks.py | 20 + mmcv/utils/checkpoint.py | 153 ++ mmcv/utils/collect_env.py | 13 + mmcv/utils/config.py | 687 +++++ mmcv/utils/contextmanagers.py | 121 + mmcv/utils/ext_loader.py | 18 + mmcv/utils/fp16_utils.py | 407 +++ mmcv/utils/grid_mask.py | 124 + mmcv/utils/hub.py | 128 + mmcv/utils/log_buffer.py | 41 + mmcv/utils/logger.py | 21 + mmcv/utils/logging.py | 110 + mmcv/utils/memory.py | 84 + mmcv/utils/misc.py | 377 +++ mmcv/utils/path.py | 101 + mmcv/utils/position_embedding.py | 34 + mmcv/utils/priority.py | 60 + mmcv/utils/progressbar.py | 208 ++ mmcv/utils/registry.py | 315 +++ mmcv/utils/runner_utils.py | 254 ++ mmcv/utils/timer.py | 118 + mmcv/utils/util_mixins.py | 104 + mmcv/utils/version_utils.py | 88 + mmcv/utils/visual.py | 24 + requirements.txt | 48 + setup.py | 224 ++ team_code/pid_controller.py | 113 + team_code/planner.py | 128 + team_code/uniad_b2d_agent.py | 433 +++ team_code/vad_b2d_agent.py | 460 ++++ 770 files changed, 162764 insertions(+) create mode 100644 .gitignore create mode 100644 README.md create mode 100644 adzoo/__init__.py create mode 100644 adzoo/bevformer/analysis_tools/__init__.py create mode 100755 adzoo/bevformer/analysis_tools/analyze_logs.py create mode 100755 adzoo/bevformer/analysis_tools/benchmark.py create mode 100644 adzoo/bevformer/analysis_tools/get_params.py create mode 100644 adzoo/bevformer/analysis_tools/visual.py create mode 100644 adzoo/bevformer/apis/__init__.py create mode 100644 adzoo/bevformer/apis/mmdet_train.py create mode 100644 adzoo/bevformer/apis/test.py create mode 100644 adzoo/bevformer/apis/train.py create mode 100644 adzoo/bevformer/configs/_base_/datasets/coco_instance.py create mode 100644 adzoo/bevformer/configs/_base_/datasets/kitti-3d-3class.py create mode 100644 adzoo/bevformer/configs/_base_/datasets/kitti-3d-car.py create mode 100644 adzoo/bevformer/configs/_base_/datasets/lyft-3d.py create mode 100644 adzoo/bevformer/configs/_base_/datasets/nuim_instance.py create mode 100644 adzoo/bevformer/configs/_base_/datasets/nus-3d.py create mode 100644 adzoo/bevformer/configs/_base_/datasets/nus-mono3d.py create mode 100644 adzoo/bevformer/configs/_base_/datasets/range100_lyft-3d.py create mode 100644 adzoo/bevformer/configs/_base_/datasets/s3dis-3d-5class.py create mode 100644 adzoo/bevformer/configs/_base_/datasets/s3dis_seg-3d-13class.py create mode 100644 adzoo/bevformer/configs/_base_/datasets/scannet-3d-18class.py create mode 100644 adzoo/bevformer/configs/_base_/datasets/scannet_seg-3d-20class.py create mode 100644 adzoo/bevformer/configs/_base_/datasets/sunrgbd-3d-10class.py create mode 100644 adzoo/bevformer/configs/_base_/datasets/waymoD5-3d-3class.py create mode 100644 adzoo/bevformer/configs/_base_/datasets/waymoD5-3d-car.py create mode 100644 adzoo/bevformer/configs/_base_/default_runtime.py create mode 100644 adzoo/bevformer/configs/_base_/models/3dssd.py create mode 100644 adzoo/bevformer/configs/_base_/models/cascade_mask_rcnn_r50_fpn.py create mode 100644 adzoo/bevformer/configs/_base_/models/centerpoint_01voxel_second_secfpn_nus.py create mode 100644 adzoo/bevformer/configs/_base_/models/centerpoint_02pillar_second_secfpn_nus.py create mode 100644 adzoo/bevformer/configs/_base_/models/fcos3d.py create mode 100644 adzoo/bevformer/configs/_base_/models/groupfree3d.py create mode 100644 adzoo/bevformer/configs/_base_/models/h3dnet.py create mode 100644 adzoo/bevformer/configs/_base_/models/hv_pointpillars_fpn_lyft.py create mode 100644 adzoo/bevformer/configs/_base_/models/hv_pointpillars_fpn_nus.py create mode 100644 adzoo/bevformer/configs/_base_/models/hv_pointpillars_fpn_range100_lyft.py create mode 100644 adzoo/bevformer/configs/_base_/models/hv_pointpillars_secfpn_kitti.py create mode 100644 adzoo/bevformer/configs/_base_/models/hv_pointpillars_secfpn_waymo.py create mode 100644 adzoo/bevformer/configs/_base_/models/hv_second_secfpn_kitti.py create mode 100644 adzoo/bevformer/configs/_base_/models/hv_second_secfpn_waymo.py create mode 100644 adzoo/bevformer/configs/_base_/models/imvotenet_image.py create mode 100644 adzoo/bevformer/configs/_base_/models/mask_rcnn_r50_fpn.py create mode 100644 adzoo/bevformer/configs/_base_/models/paconv_cuda_ssg.py create mode 100644 adzoo/bevformer/configs/_base_/models/paconv_ssg.py create mode 100644 adzoo/bevformer/configs/_base_/models/parta2.py create mode 100644 adzoo/bevformer/configs/_base_/models/pointnet2_msg.py create mode 100644 adzoo/bevformer/configs/_base_/models/pointnet2_ssg.py create mode 100644 adzoo/bevformer/configs/_base_/models/votenet.py create mode 100644 adzoo/bevformer/configs/_base_/schedules/cosine.py create mode 100644 adzoo/bevformer/configs/_base_/schedules/cyclic_20e.py create mode 100644 adzoo/bevformer/configs/_base_/schedules/cyclic_40e.py create mode 100644 adzoo/bevformer/configs/_base_/schedules/mmdet_schedule_1x.py create mode 100644 adzoo/bevformer/configs/_base_/schedules/schedule_2x.py create mode 100644 adzoo/bevformer/configs/_base_/schedules/schedule_3x.py create mode 100644 adzoo/bevformer/configs/_base_/schedules/seg_cosine_150e.py create mode 100644 adzoo/bevformer/configs/_base_/schedules/seg_cosine_200e.py create mode 100644 adzoo/bevformer/configs/_base_/schedules/seg_cosine_50e.py create mode 100644 adzoo/bevformer/configs/bevformer/bevformer_base.py create mode 100644 adzoo/bevformer/configs/bevformer/bevformer_base_b2d.py create mode 100644 adzoo/bevformer/configs/bevformer/bevformer_tiny.py create mode 100644 adzoo/bevformer/configs/bevformer/bevformer_tiny_b2d.py create mode 100644 adzoo/bevformer/configs/bevformer_fp16/bevformer_tiny_fp16.py create mode 100644 adzoo/bevformer/configs/bevformerv2/bevformerv2-r50-t1-24ep.py create mode 100644 adzoo/bevformer/configs/bevformerv2/bevformerv2-r50-t1-48ep.py create mode 100644 adzoo/bevformer/configs/bevformerv2/bevformerv2-r50-t1-base-24ep.py create mode 100644 adzoo/bevformer/configs/bevformerv2/bevformerv2-r50-t1-base-48ep.py create mode 100644 adzoo/bevformer/configs/bevformerv2/bevformerv2-r50-t2-24ep.py create mode 100644 adzoo/bevformer/configs/bevformerv2/bevformerv2-r50-t2-48ep.py create mode 100644 adzoo/bevformer/configs/bevformerv2/bevformerv2-r50-t8-24ep.py create mode 100644 adzoo/bevformer/configs/datasets/custom_lyft-3d.py create mode 100644 adzoo/bevformer/configs/datasets/custom_nus-3d.py create mode 100644 adzoo/bevformer/configs/datasets/custom_waymo-3d.py create mode 100755 adzoo/bevformer/create_data.py create mode 100755 adzoo/bevformer/data_converter/__init__.py create mode 100755 adzoo/bevformer/data_converter/create_gt_database.py create mode 100755 adzoo/bevformer/data_converter/indoor_converter.py create mode 100755 adzoo/bevformer/data_converter/kitti_converter.py create mode 100755 adzoo/bevformer/data_converter/kitti_data_utils.py create mode 100755 adzoo/bevformer/data_converter/lyft_converter.py create mode 100755 adzoo/bevformer/data_converter/lyft_data_fixer.py create mode 100755 adzoo/bevformer/data_converter/nuimage_converter.py create mode 100755 adzoo/bevformer/data_converter/nuscenes_converter.py create mode 100755 adzoo/bevformer/data_converter/s3dis_data_utils.py create mode 100755 adzoo/bevformer/data_converter/scannet_data_utils.py create mode 100755 adzoo/bevformer/data_converter/sunrgbd_data_utils.py create mode 100755 adzoo/bevformer/data_converter/waymo_converter.py create mode 100755 adzoo/bevformer/dist_test.sh create mode 100755 adzoo/bevformer/dist_train.sh create mode 100755 adzoo/bevformer/fp16/dist_train.sh create mode 100644 adzoo/bevformer/fp16/train.py create mode 100755 adzoo/bevformer/misc/browse_dataset.py create mode 100755 adzoo/bevformer/misc/print_config.py create mode 100755 adzoo/bevformer/misc/visualize_results.py create mode 100644 adzoo/bevformer/mmdet3d_plugin/bevformer/__init__.py create mode 100644 adzoo/bevformer/mmdet3d_plugin/bevformer/apis/__init__.py create mode 100644 adzoo/bevformer/mmdet3d_plugin/bevformer/apis/mmdet_train.py create mode 100644 adzoo/bevformer/mmdet3d_plugin/bevformer/apis/test.py create mode 100644 adzoo/bevformer/mmdet3d_plugin/bevformer/apis/train.py create mode 100644 adzoo/bevformer/mmdet3d_plugin/bevformer/hooks/__init__.py create mode 100644 adzoo/bevformer/mmdet3d_plugin/bevformer/hooks/custom_hooks.py create mode 100644 adzoo/bevformer/mmdet3d_plugin/dd3d/__init__.py create mode 100644 adzoo/bevformer/mmdet3d_plugin/dd3d/datasets/__init__.py create mode 100644 adzoo/bevformer/mmdet3d_plugin/dd3d/datasets/nuscenes.py create mode 100644 adzoo/bevformer/mmdet3d_plugin/dd3d/datasets/transform_utils.py create mode 100644 adzoo/bevformer/mmdet3d_plugin/dd3d/layers/iou_loss.py create mode 100644 adzoo/bevformer/mmdet3d_plugin/dd3d/layers/normalization.py create mode 100644 adzoo/bevformer/mmdet3d_plugin/dd3d/layers/smooth_l1_loss.py create mode 100644 adzoo/bevformer/mmdet3d_plugin/dd3d/modeling/__init__.py create mode 100644 adzoo/bevformer/mmdet3d_plugin/dd3d/modeling/core.py create mode 100644 adzoo/bevformer/mmdet3d_plugin/dd3d/modeling/disentangled_box3d_loss.py create mode 100644 adzoo/bevformer/mmdet3d_plugin/dd3d/modeling/fcos2d.py create mode 100644 adzoo/bevformer/mmdet3d_plugin/dd3d/modeling/fcos3d.py create mode 100644 adzoo/bevformer/mmdet3d_plugin/dd3d/modeling/nuscenes_dd3d.py create mode 100644 adzoo/bevformer/mmdet3d_plugin/dd3d/modeling/prepare_targets.py create mode 100644 adzoo/bevformer/mmdet3d_plugin/dd3d/structures/__init__.py create mode 100644 adzoo/bevformer/mmdet3d_plugin/dd3d/structures/boxes3d.py create mode 100644 adzoo/bevformer/mmdet3d_plugin/dd3d/structures/image_list.py create mode 100644 adzoo/bevformer/mmdet3d_plugin/dd3d/structures/pose.py create mode 100644 adzoo/bevformer/mmdet3d_plugin/dd3d/structures/transform3d.py create mode 100644 adzoo/bevformer/mmdet3d_plugin/dd3d/utils/comm.py create mode 100644 adzoo/bevformer/mmdet3d_plugin/dd3d/utils/geometry.py create mode 100644 adzoo/bevformer/mmdet3d_plugin/dd3d/utils/tasks.py create mode 100644 adzoo/bevformer/mmdet3d_plugin/dd3d/utils/tensor2d.py create mode 100644 adzoo/bevformer/mmdet3d_plugin/dd3d/utils/visualization.py create mode 100644 adzoo/bevformer/mmdet3d_plugin/models/hooks/__init__.py create mode 100644 adzoo/bevformer/mmdet3d_plugin/models/hooks/hooks.py create mode 100755 adzoo/bevformer/model_converters/convert_votenet_checkpoints.py create mode 100755 adzoo/bevformer/model_converters/publish_model.py create mode 100755 adzoo/bevformer/model_converters/regnet2mmdet.py create mode 100755 adzoo/bevformer/test.py create mode 100755 adzoo/bevformer/train.py create mode 100644 adzoo/uniad/analysis_tools/__init__.py create mode 100755 adzoo/uniad/analysis_tools/analyze_logs.py create mode 100755 adzoo/uniad/analysis_tools/benchmark.py create mode 100644 adzoo/uniad/analysis_tools/visualize/render/base_render.py create mode 100644 adzoo/uniad/analysis_tools/visualize/render/bev_render.py create mode 100644 adzoo/uniad/analysis_tools/visualize/render/cam_render.py create mode 100644 adzoo/uniad/analysis_tools/visualize/run.py create mode 100644 adzoo/uniad/analysis_tools/visualize/utils.py create mode 100644 adzoo/uniad/configs/_base_/datasets/nus-3d.py create mode 100644 adzoo/uniad/configs/_base_/default_runtime.py create mode 100644 adzoo/uniad/configs/stage1_track_map/base_track_map.py create mode 100644 adzoo/uniad/configs/stage1_track_map/base_track_map_b2d.py create mode 100644 adzoo/uniad/configs/stage1_track_map/tiny_track_map_b2d.py create mode 100644 adzoo/uniad/configs/stage2_e2e/base_e2e.py create mode 100644 adzoo/uniad/configs/stage2_e2e/base_e2e_b2d.py create mode 100644 adzoo/uniad/configs/stage2_e2e/tiny_e2e_b2d.py create mode 100755 adzoo/uniad/data_converter/create_data.py create mode 100755 adzoo/uniad/data_converter/uniad_create_data.sh create mode 100644 adzoo/uniad/data_converter/uniad_nuscenes_converter.py create mode 100755 adzoo/uniad/test.py create mode 100644 adzoo/uniad/test_utils.py create mode 100755 adzoo/uniad/train.py create mode 100755 adzoo/uniad/uniad_dist_eval.sh create mode 100755 adzoo/uniad/uniad_dist_train.sh create mode 100755 adzoo/uniad/uniad_vis_result.sh create mode 100644 adzoo/vad/analysis_tools/__init__.py create mode 100644 adzoo/vad/analysis_tools/analyze_logs.py create mode 100644 adzoo/vad/analysis_tools/benchmark.py create mode 100644 adzoo/vad/analysis_tools/get_flops.py create mode 100644 adzoo/vad/analysis_tools/get_params.py create mode 100644 adzoo/vad/analysis_tools/visualization.py create mode 100644 adzoo/vad/apis/__init__.py create mode 100644 adzoo/vad/apis/mmdet_train.py create mode 100644 adzoo/vad/apis/test.py create mode 100644 adzoo/vad/apis/train.py create mode 100644 adzoo/vad/configs/VAD/VAD_base_e2e.py create mode 100644 adzoo/vad/configs/VAD/VAD_base_e2e_b2d.py create mode 100644 adzoo/vad/configs/VAD/VAD_tiny_e2e.py create mode 100644 adzoo/vad/configs/_base_/datasets/coco_instance.py create mode 100644 adzoo/vad/configs/_base_/datasets/kitti-3d-3class.py create mode 100644 adzoo/vad/configs/_base_/datasets/kitti-3d-car.py create mode 100644 adzoo/vad/configs/_base_/datasets/lyft-3d.py create mode 100644 adzoo/vad/configs/_base_/datasets/nuim_instance.py create mode 100644 adzoo/vad/configs/_base_/datasets/nus-3d.py create mode 100644 adzoo/vad/configs/_base_/datasets/nus-mono3d.py create mode 100644 adzoo/vad/configs/_base_/datasets/range100_lyft-3d.py create mode 100644 adzoo/vad/configs/_base_/datasets/s3dis-3d-5class.py create mode 100644 adzoo/vad/configs/_base_/datasets/s3dis_seg-3d-13class.py create mode 100644 adzoo/vad/configs/_base_/datasets/scannet-3d-18class.py create mode 100644 adzoo/vad/configs/_base_/datasets/scannet_seg-3d-20class.py create mode 100644 adzoo/vad/configs/_base_/datasets/sunrgbd-3d-10class.py create mode 100644 adzoo/vad/configs/_base_/datasets/waymoD5-3d-3class.py create mode 100644 adzoo/vad/configs/_base_/datasets/waymoD5-3d-car.py create mode 100644 adzoo/vad/configs/_base_/default_runtime.py create mode 100644 adzoo/vad/configs/_base_/models/3dssd.py create mode 100644 adzoo/vad/configs/_base_/models/cascade_mask_rcnn_r50_fpn.py create mode 100644 adzoo/vad/configs/_base_/models/centerpoint_01voxel_second_secfpn_nus.py create mode 100644 adzoo/vad/configs/_base_/models/centerpoint_02pillar_second_secfpn_nus.py create mode 100644 adzoo/vad/configs/_base_/models/fcos3d.py create mode 100644 adzoo/vad/configs/_base_/models/groupfree3d.py create mode 100644 adzoo/vad/configs/_base_/models/h3dnet.py create mode 100644 adzoo/vad/configs/_base_/models/hv_pointpillars_fpn_lyft.py create mode 100644 adzoo/vad/configs/_base_/models/hv_pointpillars_fpn_nus.py create mode 100644 adzoo/vad/configs/_base_/models/hv_pointpillars_fpn_range100_lyft.py create mode 100644 adzoo/vad/configs/_base_/models/hv_pointpillars_secfpn_kitti.py create mode 100644 adzoo/vad/configs/_base_/models/hv_pointpillars_secfpn_waymo.py create mode 100644 adzoo/vad/configs/_base_/models/hv_second_secfpn_kitti.py create mode 100644 adzoo/vad/configs/_base_/models/hv_second_secfpn_waymo.py create mode 100644 adzoo/vad/configs/_base_/models/imvotenet_image.py create mode 100644 adzoo/vad/configs/_base_/models/mask_rcnn_r50_fpn.py create mode 100644 adzoo/vad/configs/_base_/models/paconv_cuda_ssg.py create mode 100644 adzoo/vad/configs/_base_/models/paconv_ssg.py create mode 100644 adzoo/vad/configs/_base_/models/parta2.py create mode 100644 adzoo/vad/configs/_base_/models/pointnet2_msg.py create mode 100644 adzoo/vad/configs/_base_/models/pointnet2_ssg.py create mode 100644 adzoo/vad/configs/_base_/models/votenet.py create mode 100644 adzoo/vad/configs/_base_/schedules/cosine.py create mode 100644 adzoo/vad/configs/_base_/schedules/cyclic_20e.py create mode 100644 adzoo/vad/configs/_base_/schedules/cyclic_40e.py create mode 100644 adzoo/vad/configs/_base_/schedules/mmdet_schedule_1x.py create mode 100644 adzoo/vad/configs/_base_/schedules/schedule_2x.py create mode 100644 adzoo/vad/configs/_base_/schedules/schedule_3x.py create mode 100644 adzoo/vad/configs/_base_/schedules/seg_cosine_150e.py create mode 100644 adzoo/vad/configs/_base_/schedules/seg_cosine_200e.py create mode 100644 adzoo/vad/configs/_base_/schedules/seg_cosine_50e.py create mode 100644 adzoo/vad/configs/datasets/custom_lyft-3d.py create mode 100644 adzoo/vad/configs/datasets/custom_nus-3d.py create mode 100644 adzoo/vad/configs/datasets/custom_waymo-3d.py create mode 100644 adzoo/vad/create_data.py create mode 100644 adzoo/vad/data_converter/__init__.py create mode 100644 adzoo/vad/data_converter/create_gt_database.py create mode 100644 adzoo/vad/data_converter/vad_nuscenes_converter.py create mode 100755 adzoo/vad/dist_test.sh create mode 100755 adzoo/vad/dist_train.sh create mode 100644 adzoo/vad/misc/browse_dataset.py create mode 100644 adzoo/vad/misc/fuse_conv_bn.py create mode 100644 adzoo/vad/misc/print_config.py create mode 100644 adzoo/vad/misc/visualize_results.py create mode 100644 adzoo/vad/model_converters/convert_votenet_checkpoints.py create mode 100644 adzoo/vad/model_converters/publish_model.py create mode 100644 adzoo/vad/model_converters/regnet2mmdet.py create mode 100644 adzoo/vad/test.py create mode 100644 adzoo/vad/train.py create mode 100644 asserts/bench2drive.jpg create mode 100644 asserts/bench2drivezoo.png create mode 100644 clear.py create mode 100644 data/others/b2d_motion_anchor_infos_mode6.pkl create mode 100644 data/splits/bench2drive_base_train_val_split.json create mode 100644 docs/CONVERT_GUIDE.md create mode 100644 docs/DATA_PREP.md create mode 100644 docs/EVAL_IN_CARLA.md create mode 100644 docs/INSTALL.md create mode 100644 docs/TRAIN_EVAL.md create mode 100644 mmcv/__init__.py create mode 100644 mmcv/core/__init__.py create mode 100644 mmcv/core/anchor/__init__.py create mode 100644 mmcv/core/anchor/anchor_3d_generator.py create mode 100644 mmcv/core/anchor/anchor_generator.py create mode 100644 mmcv/core/anchor/builder.py create mode 100644 mmcv/core/anchor/point_generator.py create mode 100644 mmcv/core/anchor/utils.py create mode 100644 mmcv/core/bbox/__init__.py create mode 100644 mmcv/core/bbox/assigners/__init__.py create mode 100644 mmcv/core/bbox/assigners/assign_result.py create mode 100644 mmcv/core/bbox/assigners/base_assigner.py create mode 100644 mmcv/core/bbox/assigners/hungarian_assigner.py create mode 100755 mmcv/core/bbox/assigners/hungarian_assigner_3d.py create mode 100644 mmcv/core/bbox/assigners/hungarian_assigner_3d_track.py create mode 100644 mmcv/core/bbox/assigners/map_hungarian_assigner_3d.py create mode 100644 mmcv/core/bbox/box_np_ops.py create mode 100644 mmcv/core/bbox/builder.py create mode 100644 mmcv/core/bbox/coder/__init__.py create mode 100644 mmcv/core/bbox/coder/base_bbox_coder.py create mode 100755 mmcv/core/bbox/coder/detr3d_track_coder.py create mode 100644 mmcv/core/bbox/coder/fut_nms_free_coder.py create mode 100644 mmcv/core/bbox/coder/map_nms_free_coder.py create mode 100755 mmcv/core/bbox/coder/nms_free_coder.py create mode 100644 mmcv/core/bbox/iou_calculators/__init__.py create mode 100644 mmcv/core/bbox/iou_calculators/builder.py create mode 100644 mmcv/core/bbox/iou_calculators/iou2d_calculator.py create mode 100644 mmcv/core/bbox/iou_calculators/iou3d_calculator.py create mode 100644 mmcv/core/bbox/match_costs/__init__.py create mode 100644 mmcv/core/bbox/match_costs/builder.py create mode 100644 mmcv/core/bbox/match_costs/match_cost.py create mode 100644 mmcv/core/bbox/samplers/__init__.py create mode 100644 mmcv/core/bbox/samplers/base_sampler.py create mode 100644 mmcv/core/bbox/samplers/pseudo_sampler.py create mode 100644 mmcv/core/bbox/samplers/sampling_result.py create mode 100644 mmcv/core/bbox/structures/__init__.py create mode 100644 mmcv/core/bbox/structures/base_box3d.py create mode 100644 mmcv/core/bbox/structures/box_3d_mode.py create mode 100644 mmcv/core/bbox/structures/cam_box3d.py create mode 100644 mmcv/core/bbox/structures/coord_3d_mode.py create mode 100644 mmcv/core/bbox/structures/depth_box3d.py create mode 100644 mmcv/core/bbox/structures/lidar_box3d.py create mode 100644 mmcv/core/bbox/structures/nuscenes_box.py create mode 100644 mmcv/core/bbox/structures/utils.py create mode 100644 mmcv/core/bbox/transforms.py create mode 100755 mmcv/core/bbox/util.py create mode 100644 mmcv/core/evaluation/__init__.py create mode 100644 mmcv/core/evaluation/bbox_overlaps.py create mode 100644 mmcv/core/evaluation/class_names.py create mode 100644 mmcv/core/evaluation/eval_hooks.py create mode 100644 mmcv/core/evaluation/indoor_eval.py create mode 100644 mmcv/core/evaluation/kitti_utils/__init__.py create mode 100644 mmcv/core/evaluation/kitti_utils/eval.py create mode 100644 mmcv/core/evaluation/kitti_utils/rotate_iou.py create mode 100644 mmcv/core/evaluation/lyft_eval.py create mode 100644 mmcv/core/evaluation/mean_ap.py create mode 100644 mmcv/core/evaluation/metric_motion.py create mode 100644 mmcv/core/evaluation/metrics.py create mode 100644 mmcv/core/evaluation/recall.py create mode 100644 mmcv/core/evaluation/seg_eval.py create mode 100644 mmcv/core/evaluation/waymo_utils/prediction_kitti_to_waymo.py create mode 100644 mmcv/core/mask/__init__.py create mode 100644 mmcv/core/mask/mask_target.py create mode 100644 mmcv/core/mask/structures.py create mode 100644 mmcv/core/mask/utils.py create mode 100644 mmcv/core/points/__init__.py create mode 100644 mmcv/core/points/base_points.py create mode 100644 mmcv/core/points/cam_points.py create mode 100644 mmcv/core/points/depth_points.py create mode 100644 mmcv/core/points/lidar_points.py create mode 100644 mmcv/core/post_processing/__init__.py create mode 100644 mmcv/core/post_processing/bbox_nms.py create mode 100644 mmcv/core/post_processing/box3d_nms.py create mode 100644 mmcv/core/post_processing/merge_augs.py create mode 100644 mmcv/core/utils/__init__.py create mode 100644 mmcv/core/utils/dist_utils.py create mode 100644 mmcv/core/utils/gaussian.py create mode 100644 mmcv/core/utils/misc.py create mode 100644 mmcv/core/visualization/__init__.py create mode 100644 mmcv/core/visualization/image.py create mode 100644 mmcv/core/visualizer/__init__.py create mode 100644 mmcv/core/visualizer/image_vis.py create mode 100644 mmcv/core/visualizer/open3d_vis.py create mode 100644 mmcv/core/visualizer/show_result.py create mode 100644 mmcv/core/voxel/__init__.py create mode 100644 mmcv/core/voxel/builder.py create mode 100644 mmcv/core/voxel/voxel_generator.py create mode 100644 mmcv/datasets/B2D_dataset.py create mode 100644 mmcv/datasets/B2D_e2e_dataset.py create mode 100644 mmcv/datasets/B2D_vad_dataset.py create mode 100644 mmcv/datasets/__init__.py create mode 100644 mmcv/datasets/api_wrappers/__init__.py create mode 100644 mmcv/datasets/api_wrappers/coco_api.py create mode 100644 mmcv/datasets/builder.py create mode 100644 mmcv/datasets/coco.py create mode 100644 mmcv/datasets/custom.py create mode 100644 mmcv/datasets/custom_3d.py create mode 100644 mmcv/datasets/custom_nuscenes_dataset.py create mode 100644 mmcv/datasets/custom_nuscenes_dataset_v2.py create mode 100644 mmcv/datasets/data_utils/data_utils.py create mode 100644 mmcv/datasets/data_utils/rasterize.py create mode 100644 mmcv/datasets/data_utils/trajectory_api.py create mode 100644 mmcv/datasets/data_utils/vector_map.py create mode 100644 mmcv/datasets/dataset_wrappers.py create mode 100644 mmcv/datasets/dd3d_nuscenes_dataset.py create mode 100644 mmcv/datasets/eval_utils/eval_utils.py create mode 100644 mmcv/datasets/eval_utils/map_api.py create mode 100644 mmcv/datasets/eval_utils/metric_utils.py create mode 100644 mmcv/datasets/eval_utils/nuscenes_eval.py create mode 100644 mmcv/datasets/eval_utils/nuscenes_eval_motion.py create mode 100644 mmcv/datasets/lyft_dataset.py create mode 100644 mmcv/datasets/map_utils/mean_ap.py create mode 100644 mmcv/datasets/map_utils/struct.py create mode 100644 mmcv/datasets/map_utils/tpfp.py create mode 100644 mmcv/datasets/map_utils/tpfp_chamfer.py create mode 100644 mmcv/datasets/nuscenes_dataset.py create mode 100644 mmcv/datasets/nuscenes_e2e_dataset.py create mode 100644 mmcv/datasets/nuscenes_eval.py create mode 100644 mmcv/datasets/nuscenes_mono_dataset.py create mode 100644 mmcv/datasets/nuscenes_styled_eval_utils.py create mode 100644 mmcv/datasets/nuscenes_vad_dataset.py create mode 100644 mmcv/datasets/nuscnes_eval.py create mode 100644 mmcv/datasets/pipelines/__init__.py create mode 100644 mmcv/datasets/pipelines/compose.py create mode 100644 mmcv/datasets/pipelines/data_augment_utils.py create mode 100644 mmcv/datasets/pipelines/formating.py create mode 100644 mmcv/datasets/pipelines/loading.py create mode 100644 mmcv/datasets/pipelines/occflow_label.py create mode 100644 mmcv/datasets/pipelines/test_time_aug.py create mode 100644 mmcv/datasets/pipelines/transforms.py create mode 100644 mmcv/datasets/pipelines/transforms_3d.py create mode 100644 mmcv/datasets/prepare_B2D.py create mode 100644 mmcv/datasets/samplers/__init__.py create mode 100644 mmcv/datasets/samplers/distributed_sampler.py create mode 100644 mmcv/datasets/samplers/group_sampler.py create mode 100644 mmcv/datasets/samplers/sampler.py create mode 100644 mmcv/datasets/utils.py create mode 100644 mmcv/datasets/vad_custom_nuscenes_eval.py create mode 100644 mmcv/datasets/vis_utils.py create mode 100644 mmcv/fileio/__init__.py create mode 100644 mmcv/fileio/file_client.py create mode 100644 mmcv/fileio/handlers/__init__.py create mode 100644 mmcv/fileio/handlers/base.py create mode 100644 mmcv/fileio/handlers/json_handler.py create mode 100644 mmcv/fileio/handlers/pickle_handler.py create mode 100644 mmcv/fileio/io.py create mode 100644 mmcv/fileio/parse.py create mode 100644 mmcv/image/__init__.py create mode 100644 mmcv/image/colorspace.py create mode 100644 mmcv/image/geometric.py create mode 100644 mmcv/image/io.py create mode 100644 mmcv/image/misc.py create mode 100644 mmcv/image/photometric.py create mode 100644 mmcv/layers/__init__.py create mode 100644 mmcv/layers/aspp.py create mode 100644 mmcv/layers/batch_norm.py create mode 100644 mmcv/layers/blocks.py create mode 100644 mmcv/layers/csrc/README.md create mode 100644 mmcv/layers/csrc/ROIAlignRotated/ROIAlignRotated.h create mode 100644 mmcv/layers/csrc/ROIAlignRotated/ROIAlignRotated_cpu.cpp create mode 100644 mmcv/layers/csrc/ROIAlignRotated/ROIAlignRotated_cuda.cu create mode 100644 mmcv/layers/csrc/box_iou_rotated/box_iou_rotated.h create mode 100644 mmcv/layers/csrc/box_iou_rotated/box_iou_rotated_cpu.cpp create mode 100644 mmcv/layers/csrc/box_iou_rotated/box_iou_rotated_cuda.cu create mode 100644 mmcv/layers/csrc/box_iou_rotated/box_iou_rotated_utils.h create mode 100644 mmcv/layers/csrc/cocoeval/cocoeval.cpp create mode 100644 mmcv/layers/csrc/cocoeval/cocoeval.h create mode 100644 mmcv/layers/csrc/cuda_version.cu create mode 100644 mmcv/layers/csrc/deformable/deform_conv.h create mode 100644 mmcv/layers/csrc/deformable/deform_conv_cuda.cu create mode 100644 mmcv/layers/csrc/deformable/deform_conv_cuda_kernel.cu create mode 100644 mmcv/layers/csrc/nms_rotated/nms_rotated.h create mode 100644 mmcv/layers/csrc/nms_rotated/nms_rotated_cpu.cpp create mode 100644 mmcv/layers/csrc/nms_rotated/nms_rotated_cuda.cu create mode 100644 mmcv/layers/csrc/vision.cpp create mode 100644 mmcv/layers/deform_conv.py create mode 100644 mmcv/layers/losses.py create mode 100644 mmcv/layers/mask_ops.py create mode 100644 mmcv/layers/nms.py create mode 100644 mmcv/layers/roi_align.py create mode 100644 mmcv/layers/roi_align_rotated.py create mode 100644 mmcv/layers/rotated_boxes.py create mode 100644 mmcv/layers/shape_spec.py create mode 100644 mmcv/layers/wrappers.py create mode 100644 mmcv/losses/__init__.py create mode 100644 mmcv/losses/dice_loss.py create mode 100644 mmcv/losses/focal_loss.py create mode 100644 mmcv/losses/fvcore_smooth_l1_loss.py create mode 100644 mmcv/losses/occflow_loss.py create mode 100644 mmcv/losses/planning_loss.py create mode 100644 mmcv/losses/track_loss.py create mode 100644 mmcv/losses/traj_loss.py create mode 100644 mmcv/metrics/classification.py create mode 100644 mmcv/metrics/compositional.py create mode 100644 mmcv/metrics/distributed.py create mode 100644 mmcv/metrics/metric.py create mode 100644 mmcv/metrics/reduction.py create mode 100644 mmcv/metrics/utils.py create mode 100644 mmcv/modeling/postprocessing.py create mode 100644 mmcv/models/__init__.py create mode 100644 mmcv/models/backbones/__init__.py create mode 100644 mmcv/models/backbones/base_module.py create mode 100644 mmcv/models/backbones/resnet.py create mode 100644 mmcv/models/backbones/vgg.py create mode 100755 mmcv/models/backbones/vovnet.py create mode 100644 mmcv/models/bricks/__init__.py create mode 100644 mmcv/models/bricks/activation.py create mode 100644 mmcv/models/bricks/conv.py create mode 100644 mmcv/models/bricks/conv_module.py create mode 100644 mmcv/models/bricks/drop.py create mode 100644 mmcv/models/bricks/norm.py create mode 100644 mmcv/models/bricks/padding.py create mode 100644 mmcv/models/bricks/plugin.py create mode 100644 mmcv/models/bricks/registry.py create mode 100644 mmcv/models/bricks/transformer.py create mode 100644 mmcv/models/bricks/wrappers.py create mode 100644 mmcv/models/builder.py create mode 100644 mmcv/models/dense_heads/VAD_head.py create mode 100644 mmcv/models/dense_heads/__init__.py create mode 100644 mmcv/models/dense_heads/anchor3d_head.py create mode 100644 mmcv/models/dense_heads/anchor_free_head.py create mode 100644 mmcv/models/dense_heads/anchor_head.py create mode 100644 mmcv/models/dense_heads/base_dense_head.py create mode 100644 mmcv/models/dense_heads/bev_head.py create mode 100644 mmcv/models/dense_heads/bevformer_head.py create mode 100644 mmcv/models/dense_heads/dense_test_mixins.py create mode 100644 mmcv/models/dense_heads/detr_head.py create mode 100644 mmcv/models/dense_heads/free_anchor3d_head.py create mode 100644 mmcv/models/dense_heads/ga_rpn_head.py create mode 100644 mmcv/models/dense_heads/guided_anchor_head.py create mode 100644 mmcv/models/dense_heads/motion_head.py create mode 100644 mmcv/models/dense_heads/motion_head_plugin/__init__.py create mode 100644 mmcv/models/dense_heads/motion_head_plugin/base_motion_head.py create mode 100644 mmcv/models/dense_heads/motion_head_plugin/modules.py create mode 100644 mmcv/models/dense_heads/motion_head_plugin/motion_deformable_attn.py create mode 100644 mmcv/models/dense_heads/motion_head_plugin/motion_optimization.py create mode 100644 mmcv/models/dense_heads/motion_head_plugin/motion_utils.py create mode 100644 mmcv/models/dense_heads/occ_head.py create mode 100644 mmcv/models/dense_heads/occ_head_plugin/__init__.py create mode 100644 mmcv/models/dense_heads/occ_head_plugin/metrics.py create mode 100644 mmcv/models/dense_heads/occ_head_plugin/modules.py create mode 100644 mmcv/models/dense_heads/occ_head_plugin/utils.py create mode 100644 mmcv/models/dense_heads/panseg_head.py create mode 100644 mmcv/models/dense_heads/planning_head.py create mode 100644 mmcv/models/dense_heads/planning_head_plugin/__init__.py create mode 100644 mmcv/models/dense_heads/planning_head_plugin/collision_optimization.py create mode 100644 mmcv/models/dense_heads/planning_head_plugin/metric_stp3.py create mode 100644 mmcv/models/dense_heads/planning_head_plugin/planning_metrics.py create mode 100644 mmcv/models/dense_heads/rpn_head.py create mode 100644 mmcv/models/dense_heads/seg_head_plugin/__init__.py create mode 100644 mmcv/models/dense_heads/seg_head_plugin/seg_assigner.py create mode 100644 mmcv/models/dense_heads/seg_head_plugin/seg_deformable_transformer.py create mode 100644 mmcv/models/dense_heads/seg_head_plugin/seg_detr_head.py create mode 100644 mmcv/models/dense_heads/seg_head_plugin/seg_mask_head.py create mode 100644 mmcv/models/dense_heads/seg_head_plugin/seg_utils.py create mode 100644 mmcv/models/dense_heads/track_head.py create mode 100644 mmcv/models/dense_heads/track_head_plugin/__init__.py create mode 100644 mmcv/models/dense_heads/track_head_plugin/modules.py create mode 100644 mmcv/models/dense_heads/track_head_plugin/track_instance.py create mode 100644 mmcv/models/dense_heads/track_head_plugin/tracker.py create mode 100644 mmcv/models/dense_heads/train_mixins.py create mode 100644 mmcv/models/detectors/VAD.py create mode 100644 mmcv/models/detectors/__init__.py create mode 100644 mmcv/models/detectors/base.py create mode 100644 mmcv/models/detectors/bevformer.py create mode 100644 mmcv/models/detectors/bevformerV2.py create mode 100644 mmcv/models/detectors/bevformer_fp16.py create mode 100644 mmcv/models/detectors/mvx_two_stage.py create mode 100644 mmcv/models/detectors/single_stage.py create mode 100644 mmcv/models/detectors/single_stage_mono3d.py create mode 100644 mmcv/models/detectors/uniad_e2e.py create mode 100644 mmcv/models/detectors/uniad_track.py create mode 100644 mmcv/models/losses/__init__.py create mode 100644 mmcv/models/losses/focal_loss.py create mode 100644 mmcv/models/losses/iou_loss.py create mode 100644 mmcv/models/losses/smooth_l1_loss.py create mode 100644 mmcv/models/losses/utils.py create mode 100644 mmcv/models/modules/VAD_transformer.py create mode 100644 mmcv/models/modules/__init__.py create mode 100644 mmcv/models/modules/custom_base_transformer_layer.py create mode 100644 mmcv/models/modules/decoder.py create mode 100644 mmcv/models/modules/encoder.py create mode 100644 mmcv/models/modules/group_attention.py create mode 100644 mmcv/models/modules/multi_scale_deformable_attn_function.py create mode 100644 mmcv/models/modules/spatial_cross_attention.py create mode 100644 mmcv/models/modules/temporal_self_attention.py create mode 100644 mmcv/models/modules/transformer.py create mode 100644 mmcv/models/modules/transformerV2.py create mode 100644 mmcv/models/modules/vote_module.py create mode 100644 mmcv/models/necks/__init__.py create mode 100644 mmcv/models/necks/fpn.py create mode 100644 mmcv/models/opt/__init__.py create mode 100644 mmcv/models/opt/adamw.py create mode 100644 mmcv/models/roi_heads/mask_heads/__init__.py create mode 100644 mmcv/models/roi_heads/mask_heads/fused_semantic_head.py create mode 100644 mmcv/models/segmentors/__init__.py create mode 100644 mmcv/models/segmentors/base.py create mode 100644 mmcv/models/utils/__init__.py create mode 100644 mmcv/models/utils/builder.py create mode 100644 mmcv/models/utils/functional.py create mode 100644 mmcv/models/utils/fuse_conv_bn.py create mode 100755 mmcv/models/utils/grid_mask.py create mode 100644 mmcv/models/utils/positional_encoding.py create mode 100644 mmcv/models/utils/res_layer.py create mode 100644 mmcv/models/utils/transformer.py create mode 100644 mmcv/models/utils/weight_init.py create mode 100644 mmcv/models/vad_utils/CD_loss.py create mode 100644 mmcv/models/vad_utils/__init__.py create mode 100644 mmcv/models/vad_utils/map_utils.py create mode 100644 mmcv/models/vad_utils/plan_loss.py create mode 100644 mmcv/models/vad_utils/traj_lr_warmup.py create mode 100644 mmcv/ops/__init__.py create mode 100644 mmcv/ops/csrc/common/box_iou_rotated_utils.hpp create mode 100644 mmcv/ops/csrc/common/cuda/assign_score_withk_cuda_kernel.cuh create mode 100644 mmcv/ops/csrc/common/cuda/ball_query_cuda_kernel.cuh create mode 100644 mmcv/ops/csrc/common/cuda/bbox_overlaps_cuda_kernel.cuh create mode 100644 mmcv/ops/csrc/common/cuda/border_align_cuda_kernel.cuh create mode 100644 mmcv/ops/csrc/common/cuda/box_iou_rotated_cuda.cuh create mode 100644 mmcv/ops/csrc/common/cuda/carafe_cuda_kernel.cuh create mode 100644 mmcv/ops/csrc/common/cuda/carafe_naive_cuda_kernel.cuh create mode 100644 mmcv/ops/csrc/common/cuda/common_cuda_helper.hpp create mode 100644 mmcv/ops/csrc/common/cuda/correlation_cuda.cuh create mode 100644 mmcv/ops/csrc/common/cuda/deform_conv_cuda_kernel.cuh create mode 100644 mmcv/ops/csrc/common/cuda/deform_roi_pool_cuda_kernel.cuh create mode 100644 mmcv/ops/csrc/common/cuda/furthest_point_sample_cuda_kernel.cuh create mode 100644 mmcv/ops/csrc/common/cuda/gather_points_cuda_kernel.cuh create mode 100644 mmcv/ops/csrc/common/cuda/group_points_cuda_kernel.cuh create mode 100644 mmcv/ops/csrc/common/cuda/iou3d_cuda_kernel.cuh create mode 100644 mmcv/ops/csrc/common/cuda/knn_cuda_kernel.cuh create mode 100644 mmcv/ops/csrc/common/cuda/masked_conv2d_cuda_kernel.cuh create mode 100644 mmcv/ops/csrc/common/cuda/modulated_deform_conv_cuda_kernel.cuh create mode 100644 mmcv/ops/csrc/common/cuda/ms_deform_attn_cuda_kernel.cuh create mode 100644 mmcv/ops/csrc/common/cuda/nms_cuda_kernel.cuh create mode 100644 mmcv/ops/csrc/common/cuda/nms_rotated_cuda.cuh create mode 100644 mmcv/ops/csrc/common/cuda/points_in_boxes_cuda_kernel.cuh create mode 100644 mmcv/ops/csrc/common/cuda/psamask_cuda_kernel.cuh create mode 100644 mmcv/ops/csrc/common/cuda/roi_align_cuda_kernel.cuh create mode 100644 mmcv/ops/csrc/common/cuda/roi_align_rotated_cuda_kernel.cuh create mode 100644 mmcv/ops/csrc/common/cuda/roi_pool_cuda_kernel.cuh create mode 100644 mmcv/ops/csrc/common/cuda/roiaware_pool3d_cuda_kernel.cuh create mode 100644 mmcv/ops/csrc/common/cuda/roipoint_pool3d_cuda_kernel.cuh create mode 100644 mmcv/ops/csrc/common/cuda/scatter_points_cuda_kernel.cuh create mode 100644 mmcv/ops/csrc/common/cuda/sigmoid_focal_loss_cuda_kernel.cuh create mode 100644 mmcv/ops/csrc/common/cuda/softmax_focal_loss_cuda_kernel.cuh create mode 100644 mmcv/ops/csrc/common/cuda/sync_bn_cuda_kernel.cuh create mode 100644 mmcv/ops/csrc/common/cuda/three_interpolate_cuda_kernel.cuh create mode 100644 mmcv/ops/csrc/common/cuda/three_nn_cuda_kernel.cuh create mode 100644 mmcv/ops/csrc/common/cuda/tin_shift_cuda_kernel.cuh create mode 100644 mmcv/ops/csrc/common/cuda/voxelization_cuda_kernel.cuh create mode 100644 mmcv/ops/csrc/common/pytorch_cpp_helper.hpp create mode 100644 mmcv/ops/csrc/common/pytorch_cuda_helper.hpp create mode 100644 mmcv/ops/csrc/common/pytorch_device_registry.hpp create mode 100644 mmcv/ops/csrc/pytorch/assign_score_withk.cpp create mode 100644 mmcv/ops/csrc/pytorch/ball_query.cpp create mode 100644 mmcv/ops/csrc/pytorch/bbox_overlaps.cpp create mode 100644 mmcv/ops/csrc/pytorch/border_align.cpp create mode 100644 mmcv/ops/csrc/pytorch/box_iou_rotated.cpp create mode 100644 mmcv/ops/csrc/pytorch/carafe.cpp create mode 100644 mmcv/ops/csrc/pytorch/carafe_naive.cpp create mode 100755 mmcv/ops/csrc/pytorch/contour_expand.cpp create mode 100644 mmcv/ops/csrc/pytorch/corner_pool.cpp create mode 100644 mmcv/ops/csrc/pytorch/correlation.cpp create mode 100644 mmcv/ops/csrc/pytorch/cpu/box_iou_rotated.cpp create mode 100644 mmcv/ops/csrc/pytorch/cpu/deform_conv.cpp create mode 100644 mmcv/ops/csrc/pytorch/cpu/modulated_deform_conv.cpp create mode 100644 mmcv/ops/csrc/pytorch/cpu/nms.cpp create mode 100644 mmcv/ops/csrc/pytorch/cpu/nms_rotated.cpp create mode 100755 mmcv/ops/csrc/pytorch/cpu/pixel_group.cpp create mode 100644 mmcv/ops/csrc/pytorch/cpu/points_in_boxes.cpp create mode 100644 mmcv/ops/csrc/pytorch/cpu/psamask.cpp create mode 100644 mmcv/ops/csrc/pytorch/cpu/roi_align.cpp create mode 100644 mmcv/ops/csrc/pytorch/cpu/roi_align_rotated.cpp create mode 100644 mmcv/ops/csrc/pytorch/cpu/voxelization.cpp create mode 100644 mmcv/ops/csrc/pytorch/cuda/assign_score_withk_cuda.cu create mode 100644 mmcv/ops/csrc/pytorch/cuda/ball_query_cuda.cu create mode 100644 mmcv/ops/csrc/pytorch/cuda/bbox_overlaps_cuda.cu create mode 100644 mmcv/ops/csrc/pytorch/cuda/border_align_cuda.cu create mode 100644 mmcv/ops/csrc/pytorch/cuda/box_iou_rotated_cuda.cu create mode 100644 mmcv/ops/csrc/pytorch/cuda/carafe_cuda.cu create mode 100644 mmcv/ops/csrc/pytorch/cuda/carafe_naive_cuda.cu create mode 100644 mmcv/ops/csrc/pytorch/cuda/correlation_cuda.cu create mode 100644 mmcv/ops/csrc/pytorch/cuda/cudabind.cpp create mode 100644 mmcv/ops/csrc/pytorch/cuda/deform_conv_cuda.cu create mode 100644 mmcv/ops/csrc/pytorch/cuda/deform_roi_pool_cuda.cu create mode 100644 mmcv/ops/csrc/pytorch/cuda/focal_loss_cuda.cu create mode 100644 mmcv/ops/csrc/pytorch/cuda/furthest_point_sample_cuda.cu create mode 100644 mmcv/ops/csrc/pytorch/cuda/fused_bias_leakyrelu_cuda.cu create mode 100644 mmcv/ops/csrc/pytorch/cuda/gather_points_cuda.cu create mode 100644 mmcv/ops/csrc/pytorch/cuda/group_points_cuda.cu create mode 100644 mmcv/ops/csrc/pytorch/cuda/iou3d_cuda.cu create mode 100644 mmcv/ops/csrc/pytorch/cuda/knn_cuda.cu create mode 100644 mmcv/ops/csrc/pytorch/cuda/masked_conv2d_cuda.cu create mode 100644 mmcv/ops/csrc/pytorch/cuda/modulated_deform_conv_cuda.cu create mode 100644 mmcv/ops/csrc/pytorch/cuda/ms_deform_attn_cuda.cu create mode 100644 mmcv/ops/csrc/pytorch/cuda/nms_cuda.cu create mode 100644 mmcv/ops/csrc/pytorch/cuda/nms_rotated_cuda.cu create mode 100644 mmcv/ops/csrc/pytorch/cuda/points_in_boxes_cuda.cu create mode 100644 mmcv/ops/csrc/pytorch/cuda/psamask_cuda.cu create mode 100644 mmcv/ops/csrc/pytorch/cuda/roi_align_cuda.cu create mode 100644 mmcv/ops/csrc/pytorch/cuda/roi_align_rotated_cuda.cu create mode 100644 mmcv/ops/csrc/pytorch/cuda/roi_pool_cuda.cu create mode 100644 mmcv/ops/csrc/pytorch/cuda/roiaware_pool3d_cuda.cu create mode 100644 mmcv/ops/csrc/pytorch/cuda/roipoint_pool3d_cuda.cu create mode 100644 mmcv/ops/csrc/pytorch/cuda/scatter_points_cuda.cu create mode 100644 mmcv/ops/csrc/pytorch/cuda/sync_bn_cuda.cu create mode 100644 mmcv/ops/csrc/pytorch/cuda/three_interpolate_cuda.cu create mode 100644 mmcv/ops/csrc/pytorch/cuda/three_nn_cuda.cu create mode 100644 mmcv/ops/csrc/pytorch/cuda/tin_shift_cuda.cu create mode 100644 mmcv/ops/csrc/pytorch/cuda/upfirdn2d_kernel.cu create mode 100644 mmcv/ops/csrc/pytorch/cuda/voxelization_cuda.cu create mode 100644 mmcv/ops/csrc/pytorch/deform_conv.cpp create mode 100644 mmcv/ops/csrc/pytorch/deform_roi_pool.cpp create mode 100644 mmcv/ops/csrc/pytorch/focal_loss.cpp create mode 100644 mmcv/ops/csrc/pytorch/furthest_point_sample.cpp create mode 100644 mmcv/ops/csrc/pytorch/fused_bias_leakyrelu.cpp create mode 100644 mmcv/ops/csrc/pytorch/gather_points.cpp create mode 100644 mmcv/ops/csrc/pytorch/group_points.cpp create mode 100644 mmcv/ops/csrc/pytorch/info.cpp create mode 100644 mmcv/ops/csrc/pytorch/iou3d.cpp create mode 100644 mmcv/ops/csrc/pytorch/knn.cpp create mode 100644 mmcv/ops/csrc/pytorch/masked_conv2d.cpp create mode 100644 mmcv/ops/csrc/pytorch/modulated_deform_conv.cpp create mode 100644 mmcv/ops/csrc/pytorch/ms_deform_attn.cpp create mode 100644 mmcv/ops/csrc/pytorch/nms.cpp create mode 100644 mmcv/ops/csrc/pytorch/nms_rotated.cpp create mode 100755 mmcv/ops/csrc/pytorch/pixel_group.cpp create mode 100644 mmcv/ops/csrc/pytorch/points_in_boxes.cpp create mode 100644 mmcv/ops/csrc/pytorch/psamask.cpp create mode 100644 mmcv/ops/csrc/pytorch/pybind.cpp create mode 100644 mmcv/ops/csrc/pytorch/roi_align.cpp create mode 100644 mmcv/ops/csrc/pytorch/roi_align_rotated.cpp create mode 100644 mmcv/ops/csrc/pytorch/roi_pool.cpp create mode 100644 mmcv/ops/csrc/pytorch/roiaware_pool3d.cpp create mode 100644 mmcv/ops/csrc/pytorch/roipoint_pool3d.cpp create mode 100644 mmcv/ops/csrc/pytorch/scatter_points.cpp create mode 100644 mmcv/ops/csrc/pytorch/sync_bn.cpp create mode 100644 mmcv/ops/csrc/pytorch/three_interpolate.cpp create mode 100644 mmcv/ops/csrc/pytorch/three_nn.cpp create mode 100644 mmcv/ops/csrc/pytorch/tin_shift.cpp create mode 100644 mmcv/ops/csrc/pytorch/upfirdn2d.cpp create mode 100644 mmcv/ops/csrc/pytorch/voxelization.cpp create mode 100644 mmcv/ops/deform_conv.py create mode 100644 mmcv/ops/focal_loss.py create mode 100644 mmcv/ops/iou3d.py create mode 100644 mmcv/ops/iou3d_det/__init__.py create mode 100644 mmcv/ops/iou3d_det/iou3d_utils.py create mode 100644 mmcv/ops/iou3d_det/src/iou3d.cpp create mode 100644 mmcv/ops/iou3d_det/src/iou3d_kernel.cu create mode 100644 mmcv/ops/masked_conv.py create mode 100644 mmcv/ops/modulated_deform_conv.py create mode 100644 mmcv/ops/multi_scale_deform_attn.py create mode 100644 mmcv/ops/nms.py create mode 100644 mmcv/ops/roi_align.py create mode 100644 mmcv/ops/roiaware_pool3d/__init__.py create mode 100644 mmcv/ops/roiaware_pool3d/points_in_boxes.py create mode 100644 mmcv/ops/roiaware_pool3d/roiaware_pool3d.py create mode 100644 mmcv/ops/roiaware_pool3d/src/points_in_boxes_cpu.cpp create mode 100644 mmcv/ops/roiaware_pool3d/src/points_in_boxes_cuda.cu create mode 100644 mmcv/ops/roiaware_pool3d/src/roiaware_pool3d.cpp create mode 100644 mmcv/ops/roiaware_pool3d/src/roiaware_pool3d_kernel.cu create mode 100644 mmcv/ops/voxelize.py create mode 100644 mmcv/optims/__init__.py create mode 100644 mmcv/optims/adamw.py create mode 100644 mmcv/optims/optimizer.py create mode 100644 mmcv/parallel/__init__.py create mode 100644 mmcv/parallel/collate.py create mode 100644 mmcv/parallel/data_container.py create mode 100644 mmcv/parallel/registry.py create mode 100644 mmcv/parallel/utils.py create mode 100644 mmcv/runner/__init__.py create mode 100644 mmcv/runner/base_runner.py create mode 100644 mmcv/runner/builder.py create mode 100644 mmcv/runner/epoch_based_runner.py create mode 100644 mmcv/runner/hooks/__init__.py create mode 100644 mmcv/runner/hooks/checkpoint.py create mode 100644 mmcv/runner/hooks/evaluation.py create mode 100644 mmcv/runner/hooks/hook.py create mode 100644 mmcv/runner/hooks/iter_timer.py create mode 100644 mmcv/runner/hooks/logger/__init__.py create mode 100644 mmcv/runner/hooks/logger/base.py create mode 100644 mmcv/runner/hooks/logger/tensorboard.py create mode 100644 mmcv/runner/hooks/logger/text.py create mode 100644 mmcv/runner/hooks/lr_updater.py create mode 100644 mmcv/runner/hooks/optimizer.py create mode 100644 mmcv/runner/hooks/sampler_seed.py create mode 100644 mmcv/runner/hooks/vad_hooks.py create mode 100644 mmcv/structures/__init__.py create mode 100644 mmcv/structures/boxes.py create mode 100644 mmcv/structures/image_list.py create mode 100644 mmcv/structures/instances.py create mode 100644 mmcv/structures/keypoints.py create mode 100644 mmcv/structures/masks.py create mode 100644 mmcv/structures/rotated_boxes.py create mode 100644 mmcv/utils/__init__.py create mode 100644 mmcv/utils/bricks.py create mode 100644 mmcv/utils/checkpoint.py create mode 100644 mmcv/utils/collect_env.py create mode 100644 mmcv/utils/config.py create mode 100644 mmcv/utils/contextmanagers.py create mode 100644 mmcv/utils/ext_loader.py create mode 100644 mmcv/utils/fp16_utils.py create mode 100755 mmcv/utils/grid_mask.py create mode 100644 mmcv/utils/hub.py create mode 100644 mmcv/utils/log_buffer.py create mode 100644 mmcv/utils/logger.py create mode 100644 mmcv/utils/logging.py create mode 100644 mmcv/utils/memory.py create mode 100644 mmcv/utils/misc.py create mode 100644 mmcv/utils/path.py create mode 100644 mmcv/utils/position_embedding.py create mode 100644 mmcv/utils/priority.py create mode 100644 mmcv/utils/progressbar.py create mode 100644 mmcv/utils/registry.py create mode 100644 mmcv/utils/runner_utils.py create mode 100644 mmcv/utils/timer.py create mode 100644 mmcv/utils/util_mixins.py create mode 100644 mmcv/utils/version_utils.py create mode 100644 mmcv/utils/visual.py create mode 100644 requirements.txt create mode 100644 setup.py create mode 100644 team_code/pid_controller.py create mode 100644 team_code/planner.py create mode 100644 team_code/uniad_b2d_agent.py create mode 100644 team_code/vad_b2d_agent.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..70e40d6 --- /dev/null +++ b/.gitignore @@ -0,0 +1,160 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.dist_test/ +*.avi +ckpts/ +data/bench2drive/ +data/infos +data_carla +viz/ +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# output & ckpts +output/ +test/ +ckpts/ +ckpts + +# work_dirs +**/work_dirs + +batchscript* +phoenix* + +debug/ +*projs/ + +INFO +pyrightconfig.json +.vscode/ +*.pth +*.log +tmp_ckpts/ +val/ +*.ipynb \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..59c5fd8 --- /dev/null +++ b/README.md @@ -0,0 +1,52 @@ + +

+ +

+

+Bench2DriveZoo +

+

+ +

+ + +# Introduction + +- We implement training and open-loop evaluation for [BEVFormer](https://github.com/fundamentalvision/BEVFormer), [UniAD](https://github.com/OpenDriveLab/UniAD) , [VAD](https://github.com/hustvl/VAD) on [Bench2Drive](https://github.com/Thinklab-SJTU/Bench2Drive) dataset. +- We completed the closed-loop evaluation process in Carla for Uniad and VAD on Bench2Drive. +- We simplified the code framework by merging multiple dependencies like mmcv, mmseg, mmdet, and mmdet3d into a single library, and support the latest version of pytorch(2.3.1), which greatly facilitating installation and development. + + + +# Getting Started + +- [Installation](docs/INSTALL.md) +- [Prepare Dataset](docs/INSTALL.md) +- [Train and Open-Loop Eval](docs/TRAIN_EVAL.md) +- [Closed-Loop Eval in Carla](docs/EVAL_IN_CARLA.md) +- [Convert Codes from Nuscenes to Bench2Drive](docs/CONVERT_GUIDE.md) + +# Results and Pre-trained Models + +## UniAD and VAD + +| Method | L2 (m) 2s | Driving Score | Success Rate(%) | Config | Download | +| :---: | :---: | :---: | :---: | :---: |:---: | +| UniAD-Tiny |0.80 | 32.00 | 9.54 | [config](adzoo/uniad/configs/stage2_e2e/base_e2e_b2d.py) | [Hugging Face](https://huggingface.co/rethinklab/Bench2DriveZoo/blob/main/bevformer_tiny_b2d.pth)/[Baidu Cloud](https://pan.baidu.com/s/1psr7AKYHD7CitZ30Bz-9sA?pwd=1234 )| +| UniAD-Base |0.73 | 37.72 | 9.54 | [config](adzoo/uniad/configs/stage2_e2e/tiny_e2e_b2d.py) | [Hugging Face](https://huggingface.co/rethinklab/Bench2DriveZoo/blob/main/uniad_base_b2d.pth)/[Baidu Cloud](https://pan.baidu.com/s/11p9IUGqTax1f4W_qsdLCRw?pwd=1234) | +| VAD |0.91 | 39.4 | 10.0 | [config](adzoo/vad/configs/VAD/VAD_base_e2e_b2d.py) | [Hugging Face](https://huggingface.co/rethinklab/Bench2DriveZoo/blob/main/vad_b2d_base.pth)/[Baidu Cloud]( https://pan.baidu.com/s/11p9IUGqTax1f4W_qsdLCRw?pwd=1234) | + +## BEVFormer + +| Method | mAP | NDS | Config | Download | +| :---: | :---: | :---: | :---: | :---: | +| BEVFormer-Tiny | 0.37 | 0.43 | [config](adzoo/bevformer/configs/bevformer/bevformer_tiny_b2d.py) | [Hugging Face](https://huggingface.co/rethinklab/Bench2DriveZoo/blob/main/bevformer_tiny_b2d.pth)/[Baidu Cloud](https://pan.baidu.com/s/1TWMs9YgKYm2DF5YfXF8i3g?pwd=1234) | +| BEVFormer-Base | 0.63 | 0.67 | [config](adzoo/bevformer/configs/bevformer/bevformer_base_b2d.py) | [Hugging Face](https://huggingface.co/rethinklab/Bench2DriveZoo/blob/main/bevformer_base_b2d.pth)/[Baidu Cloud](https://pan.baidu.com/s/1Y4VkE1gc8BU0zJ4z2fmIkQ?pwd=1234) | + + +# Related Resources + +- [Bench2Drive](https://github.com/Thinklab-SJTU/Bench2Drive) +- [BEVFormer](https://github.com/fundamentalvision/BEVFormer) +- [UniAD](https://github.com/OpenDriveLab/UniAD) +- [VAD](https://github.com/hustvl/VAD) diff --git a/adzoo/__init__.py b/adzoo/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/adzoo/bevformer/analysis_tools/__init__.py b/adzoo/bevformer/analysis_tools/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/adzoo/bevformer/analysis_tools/analyze_logs.py b/adzoo/bevformer/analysis_tools/analyze_logs.py new file mode 100755 index 0000000..806175f --- /dev/null +++ b/adzoo/bevformer/analysis_tools/analyze_logs.py @@ -0,0 +1,201 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import json +import numpy as np +import seaborn as sns +from collections import defaultdict +from matplotlib import pyplot as plt + + +def cal_train_time(log_dicts, args): + for i, log_dict in enumerate(log_dicts): + print(f'{"-" * 5}Analyze train time of {args.json_logs[i]}{"-" * 5}') + all_times = [] + for epoch in log_dict.keys(): + if args.include_outliers: + all_times.append(log_dict[epoch]['time']) + else: + all_times.append(log_dict[epoch]['time'][1:]) + all_times = np.array(all_times) + epoch_ave_time = all_times.mean(-1) + slowest_epoch = epoch_ave_time.argmax() + fastest_epoch = epoch_ave_time.argmin() + std_over_epoch = epoch_ave_time.std() + print(f'slowest epoch {slowest_epoch + 1}, ' + f'average time is {epoch_ave_time[slowest_epoch]:.4f}') + print(f'fastest epoch {fastest_epoch + 1}, ' + f'average time is {epoch_ave_time[fastest_epoch]:.4f}') + print(f'time std over epochs is {std_over_epoch:.4f}') + print(f'average iter time: {np.mean(all_times):.4f} s/iter') + print() + + +def plot_curve(log_dicts, args): + if args.backend is not None: + plt.switch_backend(args.backend) + sns.set_style(args.style) + # if legend is None, use {filename}_{key} as legend + legend = args.legend + if legend is None: + legend = [] + for json_log in args.json_logs: + for metric in args.keys: + legend.append(f'{json_log}_{metric}') + assert len(legend) == (len(args.json_logs) * len(args.keys)) + metrics = args.keys + + num_metrics = len(metrics) + for i, log_dict in enumerate(log_dicts): + epochs = list(log_dict.keys()) + for j, metric in enumerate(metrics): + print(f'plot curve of {args.json_logs[i]}, metric is {metric}') + if metric not in log_dict[epochs[args.interval - 1]]: + raise KeyError( + f'{args.json_logs[i]} does not contain metric {metric}') + + if args.mode == 'eval': + if min(epochs) == args.interval: + x0 = args.interval + else: + # if current training is resumed from previous checkpoint + # we lost information in early epochs + # `xs` should start according to `min(epochs)` + if min(epochs) % args.interval == 0: + x0 = min(epochs) + else: + # find the first epoch that do eval + x0 = min(epochs) + args.interval - \ + min(epochs) % args.interval + xs = np.arange(x0, max(epochs) + 1, args.interval) + ys = [] + for epoch in epochs[args.interval - 1::args.interval]: + ys += log_dict[epoch][metric] + + # if training is aborted before eval of the last epoch + # `xs` and `ys` will have different length and cause an error + # check if `ys[-1]` is empty here + if not log_dict[epoch][metric]: + xs = xs[:-1] + + ax = plt.gca() + ax.set_xticks(xs) + plt.xlabel('epoch') + plt.plot(xs, ys, label=legend[i * num_metrics + j], marker='o') + else: + xs = [] + ys = [] + num_iters_per_epoch = \ + log_dict[epochs[args.interval-1]]['iter'][-1] + for epoch in epochs[args.interval - 1::args.interval]: + iters = log_dict[epoch]['iter'] + if log_dict[epoch]['mode'][-1] == 'val': + iters = iters[:-1] + xs.append( + np.array(iters) + (epoch - 1) * num_iters_per_epoch) + ys.append(np.array(log_dict[epoch][metric][:len(iters)])) + xs = np.concatenate(xs) + ys = np.concatenate(ys) + plt.xlabel('iter') + plt.plot( + xs, ys, label=legend[i * num_metrics + j], linewidth=0.5) + plt.legend() + if args.title is not None: + plt.title(args.title) + if args.out is None: + plt.show() + else: + print(f'save curve to: {args.out}') + plt.savefig(args.out) + plt.cla() + + +def add_plot_parser(subparsers): + parser_plt = subparsers.add_parser( + 'plot_curve', help='parser for plotting curves') + parser_plt.add_argument( + 'json_logs', + type=str, + nargs='+', + help='path of train log in json format') + parser_plt.add_argument( + '--keys', + type=str, + nargs='+', + default=['mAP_0.25'], + help='the metric that you want to plot') + parser_plt.add_argument('--title', type=str, help='title of figure') + parser_plt.add_argument( + '--legend', + type=str, + nargs='+', + default=None, + help='legend of each plot') + parser_plt.add_argument( + '--backend', type=str, default=None, help='backend of plt') + parser_plt.add_argument( + '--style', type=str, default='dark', help='style of plt') + parser_plt.add_argument('--out', type=str, default=None) + parser_plt.add_argument('--mode', type=str, default='train') + parser_plt.add_argument('--interval', type=int, default=1) + + +def add_time_parser(subparsers): + parser_time = subparsers.add_parser( + 'cal_train_time', + help='parser for computing the average time per training iteration') + parser_time.add_argument( + 'json_logs', + type=str, + nargs='+', + help='path of train log in json format') + parser_time.add_argument( + '--include-outliers', + action='store_true', + help='include the first value of every epoch when computing ' + 'the average time') + + +def parse_args(): + parser = argparse.ArgumentParser(description='Analyze Json Log') + # currently only support plot curve and calculate average train time + subparsers = parser.add_subparsers(dest='task', help='task parser') + add_plot_parser(subparsers) + add_time_parser(subparsers) + args = parser.parse_args() + return args + + +def load_json_logs(json_logs): + # load and convert json_logs to log_dict, key is epoch, value is a sub dict + # keys of sub dict is different metrics, e.g. memory, bbox_mAP + # value of sub dict is a list of corresponding values of all iterations + log_dicts = [dict() for _ in json_logs] + for json_log, log_dict in zip(json_logs, log_dicts): + with open(json_log, 'r') as log_file: + for line in log_file: + log = json.loads(line.strip()) + # skip lines without `epoch` field + if 'epoch' not in log: + continue + epoch = log.pop('epoch') + if epoch not in log_dict: + log_dict[epoch] = defaultdict(list) + for k, v in log.items(): + log_dict[epoch][k].append(v) + return log_dicts + + +def main(): + args = parse_args() + + json_logs = args.json_logs + for json_log in json_logs: + assert json_log.endswith('.json') + + log_dicts = load_json_logs(json_logs) + + eval(args.task)(log_dicts, args) + + +if __name__ == '__main__': + main() diff --git a/adzoo/bevformer/analysis_tools/benchmark.py b/adzoo/bevformer/analysis_tools/benchmark.py new file mode 100755 index 0000000..487a348 --- /dev/null +++ b/adzoo/bevformer/analysis_tools/benchmark.py @@ -0,0 +1,98 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import time +import torch +from mmcv import Config +from mmcv.parallel import MMDataParallel +from mmcv.runner import load_checkpoint, wrap_fp16_model +import sys +sys.path.append('.') +from projects.mmdet3d_plugin.datasets.builder import build_dataloader +from projects.mmdet3d_plugin.datasets import custom_build_dataset +# from mmdet3d.datasets import build_dataloader, build_dataset +from mmdet3d.models import build_detector +#from tools.misc.fuse_conv_bn import fuse_module + + +def parse_args(): + parser = argparse.ArgumentParser(description='MMDet benchmark a model') + parser.add_argument('config', help='test config file path') + parser.add_argument('--checkpoint', default=None, help='checkpoint file') + parser.add_argument('--samples', default=2000, help='samples to benchmark') + parser.add_argument( + '--log-interval', default=50, help='interval of logging') + parser.add_argument( + '--fuse-conv-bn', + action='store_true', + help='Whether to fuse conv and bn, this will slightly increase' + 'the inference speed') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + + cfg = Config.fromfile(args.config) + # set cudnn_benchmark + if cfg.get('cudnn_benchmark', False): + torch.backends.cudnn.benchmark = True + cfg.model.pretrained = None + cfg.data.test.test_mode = True + + # build the dataloader + # TODO: support multiple images per gpu (only minor changes are needed) + print(cfg.data.test) + dataset = custom_build_dataset(cfg.data.test) + data_loader = build_dataloader( + dataset, + samples_per_gpu=1, + workers_per_gpu=cfg.data.workers_per_gpu, + dist=False, + shuffle=False) + + # build the model and load checkpoint + cfg.model.train_cfg = None + model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg')) + fp16_cfg = cfg.get('fp16', None) + if fp16_cfg is not None: + wrap_fp16_model(model) + if args.checkpoint is not None: + load_checkpoint(model, args.checkpoint, map_location='cpu') + #if args.fuse_conv_bn: + # model = fuse_module(model) + + model = MMDataParallel(model, device_ids=[0]) + + model.eval() + + # the first several iterations may be very slow so skip them + num_warmup = 5 + pure_inf_time = 0 + + # benchmark with several samples and take the average + for i, data in enumerate(data_loader): + torch.cuda.synchronize() + start_time = time.perf_counter() + with torch.no_grad(): + model(return_loss=False, rescale=True, **data) + + torch.cuda.synchronize() + elapsed = time.perf_counter() - start_time + + if i >= num_warmup: + pure_inf_time += elapsed + if (i + 1) % args.log_interval == 0: + fps = (i + 1 - num_warmup) / pure_inf_time + print(f'Done image [{i + 1:<3}/ {args.samples}], ' + f'fps: {fps:.1f} img / s') + + if (i + 1) == args.samples: + pure_inf_time += elapsed + fps = (i + 1 - num_warmup) / pure_inf_time + print(f'Overall fps: {fps:.1f} img / s') + break + + +if __name__ == '__main__': + main() diff --git a/adzoo/bevformer/analysis_tools/get_params.py b/adzoo/bevformer/analysis_tools/get_params.py new file mode 100644 index 0000000..fb697ad --- /dev/null +++ b/adzoo/bevformer/analysis_tools/get_params.py @@ -0,0 +1,10 @@ +import torch +file_path = './ckpts/bevformer_v4.pth' +model = torch.load(file_path, map_location='cpu') +all = 0 +for key in list(model['state_dict'].keys()): + all += model['state_dict'][key].nelement() +print(all) + +# smaller 63374123 +# v4 69140395 diff --git a/adzoo/bevformer/analysis_tools/visual.py b/adzoo/bevformer/analysis_tools/visual.py new file mode 100644 index 0000000..f711b75 --- /dev/null +++ b/adzoo/bevformer/analysis_tools/visual.py @@ -0,0 +1,477 @@ +# Based on https://github.com/nutonomy/nuscenes-devkit +# --------------------------------------------- +# Modified by Zhiqi Li +# --------------------------------------------- + +import mmcv +from nuscenes.nuscenes import NuScenes +from PIL import Image +from nuscenes.utils.geometry_utils import view_points, box_in_image, BoxVisibility, transform_matrix +from typing import Tuple, List, Iterable +import matplotlib.pyplot as plt +import numpy as np +from PIL import Image +from matplotlib import rcParams +from matplotlib.axes import Axes +from pyquaternion import Quaternion +from PIL import Image +from matplotlib import rcParams +from matplotlib.axes import Axes +from pyquaternion import Quaternion +from tqdm import tqdm +from nuscenes.utils.data_classes import LidarPointCloud, RadarPointCloud, Box +from nuscenes.utils.geometry_utils import view_points, box_in_image, BoxVisibility, transform_matrix +from nuscenes.eval.common.data_classes import EvalBoxes, EvalBox +from nuscenes.eval.detection.data_classes import DetectionBox +from nuscenes.eval.detection.utils import category_to_detection_name +from nuscenes.eval.detection.render import visualize_sample + + + + +cams = ['CAM_FRONT', + 'CAM_FRONT_RIGHT', + 'CAM_BACK_RIGHT', + 'CAM_BACK', + 'CAM_BACK_LEFT', + 'CAM_FRONT_LEFT'] + +import numpy as np +import matplotlib.pyplot as plt +from nuscenes.utils.data_classes import LidarPointCloud, RadarPointCloud, Box +from PIL import Image +from matplotlib import rcParams + + +def render_annotation( + anntoken: str, + margin: float = 10, + view: np.ndarray = np.eye(4), + box_vis_level: BoxVisibility = BoxVisibility.ANY, + out_path: str = 'render.png', + extra_info: bool = False) -> None: + """ + Render selected annotation. + :param anntoken: Sample_annotation token. + :param margin: How many meters in each direction to include in LIDAR view. + :param view: LIDAR view point. + :param box_vis_level: If sample_data is an image, this sets required visibility for boxes. + :param out_path: Optional path to save the rendered figure to disk. + :param extra_info: Whether to render extra information below camera view. + """ + ann_record = nusc.get('sample_annotation', anntoken) + sample_record = nusc.get('sample', ann_record['sample_token']) + assert 'LIDAR_TOP' in sample_record['data'].keys(), 'Error: No LIDAR_TOP in data, unable to render.' + + # Figure out which camera the object is fully visible in (this may return nothing). + boxes, cam = [], [] + cams = [key for key in sample_record['data'].keys() if 'CAM' in key] + all_bboxes = [] + select_cams = [] + for cam in cams: + _, boxes, _ = nusc.get_sample_data(sample_record['data'][cam], box_vis_level=box_vis_level, + selected_anntokens=[anntoken]) + if len(boxes) > 0: + all_bboxes.append(boxes) + select_cams.append(cam) + # We found an image that matches. Let's abort. + # assert len(boxes) > 0, 'Error: Could not find image where annotation is visible. ' \ + # 'Try using e.g. BoxVisibility.ANY.' + # assert len(boxes) < 2, 'Error: Found multiple annotations. Something is wrong!' + + num_cam = len(all_bboxes) + + fig, axes = plt.subplots(1, num_cam + 1, figsize=(18, 9)) + select_cams = [sample_record['data'][cam] for cam in select_cams] + print('bbox in cams:', select_cams) + # Plot LIDAR view. + lidar = sample_record['data']['LIDAR_TOP'] + data_path, boxes, camera_intrinsic = nusc.get_sample_data(lidar, selected_anntokens=[anntoken]) + LidarPointCloud.from_file(data_path).render_height(axes[0], view=view) + for box in boxes: + c = np.array(get_color(box.name)) / 255.0 + box.render(axes[0], view=view, colors=(c, c, c)) + corners = view_points(boxes[0].corners(), view, False)[:2, :] + axes[0].set_xlim([np.min(corners[0, :]) - margin, np.max(corners[0, :]) + margin]) + axes[0].set_ylim([np.min(corners[1, :]) - margin, np.max(corners[1, :]) + margin]) + axes[0].axis('off') + axes[0].set_aspect('equal') + + # Plot CAMERA view. + for i in range(1, num_cam + 1): + cam = select_cams[i - 1] + data_path, boxes, camera_intrinsic = nusc.get_sample_data(cam, selected_anntokens=[anntoken]) + im = Image.open(data_path) + axes[i].imshow(im) + axes[i].set_title(nusc.get('sample_data', cam)['channel']) + axes[i].axis('off') + axes[i].set_aspect('equal') + for box in boxes: + c = np.array(get_color(box.name)) / 255.0 + box.render(axes[i], view=camera_intrinsic, normalize=True, colors=(c, c, c)) + + # Print extra information about the annotation below the camera view. + axes[i].set_xlim(0, im.size[0]) + axes[i].set_ylim(im.size[1], 0) + + if extra_info: + rcParams['font.family'] = 'monospace' + + w, l, h = ann_record['size'] + category = ann_record['category_name'] + lidar_points = ann_record['num_lidar_pts'] + radar_points = ann_record['num_radar_pts'] + + sample_data_record = nusc.get('sample_data', sample_record['data']['LIDAR_TOP']) + pose_record = nusc.get('ego_pose', sample_data_record['ego_pose_token']) + dist = np.linalg.norm(np.array(pose_record['translation']) - np.array(ann_record['translation'])) + + information = ' \n'.join(['category: {}'.format(category), + '', + '# lidar points: {0:>4}'.format(lidar_points), + '# radar points: {0:>4}'.format(radar_points), + '', + 'distance: {:>7.3f}m'.format(dist), + '', + 'width: {:>7.3f}m'.format(w), + 'length: {:>7.3f}m'.format(l), + 'height: {:>7.3f}m'.format(h)]) + + plt.annotate(information, (0, 0), (0, -20), xycoords='axes fraction', textcoords='offset points', va='top') + + if out_path is not None: + plt.savefig(out_path) + + + +def get_sample_data(sample_data_token: str, + box_vis_level: BoxVisibility = BoxVisibility.ANY, + selected_anntokens=None, + use_flat_vehicle_coordinates: bool = False): + """ + Returns the data path as well as all annotations related to that sample_data. + Note that the boxes are transformed into the current sensor's coordinate frame. + :param sample_data_token: Sample_data token. + :param box_vis_level: If sample_data is an image, this sets required visibility for boxes. + :param selected_anntokens: If provided only return the selected annotation. + :param use_flat_vehicle_coordinates: Instead of the current sensor's coordinate frame, use ego frame which is + aligned to z-plane in the world. + :return: (data_path, boxes, camera_intrinsic ) + """ + + # Retrieve sensor & pose records + sd_record = nusc.get('sample_data', sample_data_token) + cs_record = nusc.get('calibrated_sensor', sd_record['calibrated_sensor_token']) + sensor_record = nusc.get('sensor', cs_record['sensor_token']) + pose_record = nusc.get('ego_pose', sd_record['ego_pose_token']) + + data_path = nusc.get_sample_data_path(sample_data_token) + + if sensor_record['modality'] == 'camera': + cam_intrinsic = np.array(cs_record['camera_intrinsic']) + imsize = (sd_record['width'], sd_record['height']) + else: + cam_intrinsic = None + imsize = None + + # Retrieve all sample annotations and map to sensor coordinate system. + if selected_anntokens is not None: + boxes = list(map(nusc.get_box, selected_anntokens)) + else: + boxes = nusc.get_boxes(sample_data_token) + + # Make list of Box objects including coord system transforms. + box_list = [] + for box in boxes: + if use_flat_vehicle_coordinates: + # Move box to ego vehicle coord system parallel to world z plane. + yaw = Quaternion(pose_record['rotation']).yaw_pitch_roll[0] + box.translate(-np.array(pose_record['translation'])) + box.rotate(Quaternion(scalar=np.cos(yaw / 2), vector=[0, 0, np.sin(yaw / 2)]).inverse) + else: + # Move box to ego vehicle coord system. + box.translate(-np.array(pose_record['translation'])) + box.rotate(Quaternion(pose_record['rotation']).inverse) + + # Move box to sensor coord system. + box.translate(-np.array(cs_record['translation'])) + box.rotate(Quaternion(cs_record['rotation']).inverse) + + if sensor_record['modality'] == 'camera' and not \ + box_in_image(box, cam_intrinsic, imsize, vis_level=box_vis_level): + continue + + box_list.append(box) + + return data_path, box_list, cam_intrinsic + + + +def get_predicted_data(sample_data_token: str, + box_vis_level: BoxVisibility = BoxVisibility.ANY, + selected_anntokens=None, + use_flat_vehicle_coordinates: bool = False, + pred_anns=None + ): + """ + Returns the data path as well as all annotations related to that sample_data. + Note that the boxes are transformed into the current sensor's coordinate frame. + :param sample_data_token: Sample_data token. + :param box_vis_level: If sample_data is an image, this sets required visibility for boxes. + :param selected_anntokens: If provided only return the selected annotation. + :param use_flat_vehicle_coordinates: Instead of the current sensor's coordinate frame, use ego frame which is + aligned to z-plane in the world. + :return: (data_path, boxes, camera_intrinsic ) + """ + + # Retrieve sensor & pose records + sd_record = nusc.get('sample_data', sample_data_token) + cs_record = nusc.get('calibrated_sensor', sd_record['calibrated_sensor_token']) + sensor_record = nusc.get('sensor', cs_record['sensor_token']) + pose_record = nusc.get('ego_pose', sd_record['ego_pose_token']) + + data_path = nusc.get_sample_data_path(sample_data_token) + + if sensor_record['modality'] == 'camera': + cam_intrinsic = np.array(cs_record['camera_intrinsic']) + imsize = (sd_record['width'], sd_record['height']) + else: + cam_intrinsic = None + imsize = None + + # Retrieve all sample annotations and map to sensor coordinate system. + # if selected_anntokens is not None: + # boxes = list(map(nusc.get_box, selected_anntokens)) + # else: + # boxes = nusc.get_boxes(sample_data_token) + boxes = pred_anns + # Make list of Box objects including coord system transforms. + box_list = [] + for box in boxes: + if use_flat_vehicle_coordinates: + # Move box to ego vehicle coord system parallel to world z plane. + yaw = Quaternion(pose_record['rotation']).yaw_pitch_roll[0] + box.translate(-np.array(pose_record['translation'])) + box.rotate(Quaternion(scalar=np.cos(yaw / 2), vector=[0, 0, np.sin(yaw / 2)]).inverse) + else: + # Move box to ego vehicle coord system. + box.translate(-np.array(pose_record['translation'])) + box.rotate(Quaternion(pose_record['rotation']).inverse) + + # Move box to sensor coord system. + box.translate(-np.array(cs_record['translation'])) + box.rotate(Quaternion(cs_record['rotation']).inverse) + + if sensor_record['modality'] == 'camera' and not \ + box_in_image(box, cam_intrinsic, imsize, vis_level=box_vis_level): + continue + box_list.append(box) + + return data_path, box_list, cam_intrinsic + + + + +def lidiar_render(sample_token, data,out_path=None): + bbox_gt_list = [] + bbox_pred_list = [] + anns = nusc.get('sample', sample_token)['anns'] + for ann in anns: + content = nusc.get('sample_annotation', ann) + try: + bbox_gt_list.append(DetectionBox( + sample_token=content['sample_token'], + translation=tuple(content['translation']), + size=tuple(content['size']), + rotation=tuple(content['rotation']), + velocity=nusc.box_velocity(content['token'])[:2], + ego_translation=(0.0, 0.0, 0.0) if 'ego_translation' not in content + else tuple(content['ego_translation']), + num_pts=-1 if 'num_pts' not in content else int(content['num_pts']), + detection_name=category_to_detection_name(content['category_name']), + detection_score=-1.0 if 'detection_score' not in content else float(content['detection_score']), + attribute_name='')) + except: + pass + + bbox_anns = data['results'][sample_token] + for content in bbox_anns: + bbox_pred_list.append(DetectionBox( + sample_token=content['sample_token'], + translation=tuple(content['translation']), + size=tuple(content['size']), + rotation=tuple(content['rotation']), + velocity=tuple(content['velocity']), + ego_translation=(0.0, 0.0, 0.0) if 'ego_translation' not in content + else tuple(content['ego_translation']), + num_pts=-1 if 'num_pts' not in content else int(content['num_pts']), + detection_name=content['detection_name'], + detection_score=-1.0 if 'detection_score' not in content else float(content['detection_score']), + attribute_name=content['attribute_name'])) + gt_annotations = EvalBoxes() + pred_annotations = EvalBoxes() + gt_annotations.add_boxes(sample_token, bbox_gt_list) + pred_annotations.add_boxes(sample_token, bbox_pred_list) + print('green is ground truth') + print('blue is the predited result') + visualize_sample(nusc, sample_token, gt_annotations, pred_annotations, savepath=out_path+'_bev') + + +def get_color(category_name: str): + """ + Provides the default colors based on the category names. + This method works for the general nuScenes categories, as well as the nuScenes detection categories. + """ + a = ['noise', 'animal', 'human.pedestrian.adult', 'human.pedestrian.child', 'human.pedestrian.construction_worker', + 'human.pedestrian.personal_mobility', 'human.pedestrian.police_officer', 'human.pedestrian.stroller', + 'human.pedestrian.wheelchair', 'movable_object.barrier', 'movable_object.debris', + 'movable_object.pushable_pullable', 'movable_object.trafficcone', 'static_object.bicycle_rack', 'vehicle.bicycle', + 'vehicle.bus.bendy', 'vehicle.bus.rigid', 'vehicle.car', 'vehicle.construction', 'vehicle.emergency.ambulance', + 'vehicle.emergency.police', 'vehicle.motorcycle', 'vehicle.trailer', 'vehicle.truck', 'flat.driveable_surface', + 'flat.other', 'flat.sidewalk', 'flat.terrain', 'static.manmade', 'static.other', 'static.vegetation', + 'vehicle.ego'] + class_names = [ + 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', + 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone' + ] + #print(category_name) + if category_name == 'bicycle': + return nusc.colormap['vehicle.bicycle'] + elif category_name == 'construction_vehicle': + return nusc.colormap['vehicle.construction'] + elif category_name == 'traffic_cone': + return nusc.colormap['movable_object.trafficcone'] + + for key in nusc.colormap.keys(): + if category_name in key: + return nusc.colormap[key] + return [0, 0, 0] + + +def render_sample_data( + sample_toekn: str, + with_anns: bool = True, + box_vis_level: BoxVisibility = BoxVisibility.ANY, + axes_limit: float = 40, + ax=None, + nsweeps: int = 1, + out_path: str = None, + underlay_map: bool = True, + use_flat_vehicle_coordinates: bool = True, + show_lidarseg: bool = False, + show_lidarseg_legend: bool = False, + filter_lidarseg_labels=None, + lidarseg_preds_bin_path: str = None, + verbose: bool = True, + show_panoptic: bool = False, + pred_data=None, + ) -> None: + """ + Render sample data onto axis. + :param sample_data_token: Sample_data token. + :param with_anns: Whether to draw box annotations. + :param box_vis_level: If sample_data is an image, this sets required visibility for boxes. + :param axes_limit: Axes limit for lidar and radar (measured in meters). + :param ax: Axes onto which to render. + :param nsweeps: Number of sweeps for lidar and radar. + :param out_path: Optional path to save the rendered figure to disk. + :param underlay_map: When set to true, lidar data is plotted onto the map. This can be slow. + :param use_flat_vehicle_coordinates: Instead of the current sensor's coordinate frame, use ego frame which is + aligned to z-plane in the world. Note: Previously this method did not use flat vehicle coordinates, which + can lead to small errors when the vertical axis of the global frame and lidar are not aligned. The new + setting is more correct and rotates the plot by ~90 degrees. + :param show_lidarseg: When set to True, the lidar data is colored with the segmentation labels. When set + to False, the colors of the lidar data represent the distance from the center of the ego vehicle. + :param show_lidarseg_legend: Whether to display the legend for the lidarseg labels in the frame. + :param filter_lidarseg_labels: Only show lidar points which belong to the given list of classes. If None + or the list is empty, all classes will be displayed. + :param lidarseg_preds_bin_path: A path to the .bin file which contains the user's lidar segmentation + predictions for the sample. + :param verbose: Whether to display the image after it is rendered. + :param show_panoptic: When set to True, the lidar data is colored with the panoptic labels. When set + to False, the colors of the lidar data represent the distance from the center of the ego vehicle. + If show_lidarseg is True, show_panoptic will be set to False. + """ + lidiar_render(sample_toekn, pred_data, out_path=out_path) + sample = nusc.get('sample', sample_toekn) + # sample = data['results'][sample_token_list[0]][0] + cams = [ + 'CAM_FRONT_LEFT', + 'CAM_FRONT', + 'CAM_FRONT_RIGHT', + 'CAM_BACK_LEFT', + 'CAM_BACK', + 'CAM_BACK_RIGHT', + ] + if ax is None: + _, ax = plt.subplots(4, 3, figsize=(24, 18)) + j = 0 + for ind, cam in enumerate(cams): + sample_data_token = sample['data'][cam] + + sd_record = nusc.get('sample_data', sample_data_token) + sensor_modality = sd_record['sensor_modality'] + + if sensor_modality in ['lidar', 'radar']: + assert False + elif sensor_modality == 'camera': + # Load boxes and image. + boxes = [Box(record['translation'], record['size'], Quaternion(record['rotation']), + name=record['detection_name'], token='predicted') for record in + pred_data['results'][sample_toekn] if record['detection_score'] > 0.2] + + data_path, boxes_pred, camera_intrinsic = get_predicted_data(sample_data_token, + box_vis_level=box_vis_level, pred_anns=boxes) + _, boxes_gt, _ = nusc.get_sample_data(sample_data_token, box_vis_level=box_vis_level) + if ind == 3: + j += 1 + ind = ind % 3 + data = Image.open(data_path) + # mmcv.imwrite(np.array(data)[:,:,::-1], f'{cam}.png') + # Init axes. + + # Show image. + ax[j, ind].imshow(data) + ax[j + 2, ind].imshow(data) + + # Show boxes. + if with_anns: + for box in boxes_pred: + c = np.array(get_color(box.name)) / 255.0 + box.render(ax[j, ind], view=camera_intrinsic, normalize=True, colors=(c, c, c)) + for box in boxes_gt: + c = np.array(get_color(box.name)) / 255.0 + box.render(ax[j + 2, ind], view=camera_intrinsic, normalize=True, colors=(c, c, c)) + + # Limit visible range. + ax[j, ind].set_xlim(0, data.size[0]) + ax[j, ind].set_ylim(data.size[1], 0) + ax[j + 2, ind].set_xlim(0, data.size[0]) + ax[j + 2, ind].set_ylim(data.size[1], 0) + + else: + raise ValueError("Error: Unknown sensor modality!") + + ax[j, ind].axis('off') + ax[j, ind].set_title('PRED: {} {labels_type}'.format( + sd_record['channel'], labels_type='(predictions)' if lidarseg_preds_bin_path else '')) + ax[j, ind].set_aspect('equal') + + ax[j + 2, ind].axis('off') + ax[j + 2, ind].set_title('GT:{} {labels_type}'.format( + sd_record['channel'], labels_type='(predictions)' if lidarseg_preds_bin_path else '')) + ax[j + 2, ind].set_aspect('equal') + + if out_path is not None: + plt.savefig(out_path+'_camera', bbox_inches='tight', pad_inches=0, dpi=200) + if verbose: + plt.show() + plt.close() + +if __name__ == '__main__': + nusc = NuScenes(version='v1.0-trainval', dataroot='./data/nuscenes', verbose=True) + # render_annotation('7603b030b42a4b1caa8c443ccc1a7d52') + bevformer_results = mmcv.load('test/bevformer_base/Thu_Jun__9_16_22_37_2022/pts_bbox/results_nusc.json') + sample_token_list = list(bevformer_results['results'].keys()) + for id in range(0, 10): + render_sample_data(sample_token_list[id], pred_data=bevformer_results, out_path=sample_token_list[id]) diff --git a/adzoo/bevformer/apis/__init__.py b/adzoo/bevformer/apis/__init__.py new file mode 100644 index 0000000..15520b2 --- /dev/null +++ b/adzoo/bevformer/apis/__init__.py @@ -0,0 +1,2 @@ +from .train import custom_train_model +from .mmdet_train import custom_train_detector \ No newline at end of file diff --git a/adzoo/bevformer/apis/mmdet_train.py b/adzoo/bevformer/apis/mmdet_train.py new file mode 100644 index 0000000..3372f16 --- /dev/null +++ b/adzoo/bevformer/apis/mmdet_train.py @@ -0,0 +1,193 @@ +# --------------------------------------------- +# Copyright (c) OpenMMLab. All rights reserved. +# --------------------------------------------- +# Modified by Zhiqi Li +# --------------------------------------------- +import random +import warnings + +import numpy as np +import torch +import torch.distributed as dist +from torch.nn import DataParallel +from torch.nn.parallel.distributed import DistributedDataParallel +from mmcv.runner import HOOKS, DistSamplerSeedHook, EpochBasedRunner, Fp16OptimizerHook, OptimizerHook, build_runner +from mmcv.utils import build_from_cfg, get_root_logger + +from mmcv.core import EvalHook +from mmcv.optims import build_optimizer +from mmcv.datasets import build_dataset, replace_ImageToTensor +import time +import os.path as osp +from mmcv.datasets.builder import build_dataloader +from mmcv.core.evaluation.eval_hooks import CustomDistEvalHook + +def custom_train_detector(model, + dataset, + cfg, + distributed=False, + validate=False, + timestamp=None, + eval_model=None, + meta=None): + logger = get_root_logger(cfg.log_level) + + # prepare data loaders + + dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset] + #assert len(dataset)==1s + if 'imgs_per_gpu' in cfg.data: + logger.warning('"imgs_per_gpu" is deprecated in MMDet V2.0. ' + 'Please use "samples_per_gpu" instead') + if 'samples_per_gpu' in cfg.data: + logger.warning( + f'Got "imgs_per_gpu"={cfg.data.imgs_per_gpu} and ' + f'"samples_per_gpu"={cfg.data.samples_per_gpu}, "imgs_per_gpu"' + f'={cfg.data.imgs_per_gpu} is used in this experiments') + else: + logger.warning( + 'Automatically set "samples_per_gpu"="imgs_per_gpu"=' + f'{cfg.data.imgs_per_gpu} in this experiments') + cfg.data.samples_per_gpu = cfg.data.imgs_per_gpu + + data_loaders = [ + build_dataloader( + ds, + cfg.data.samples_per_gpu, + cfg.data.workers_per_gpu, + # cfg.gpus will be ignored if distributed + len(cfg.gpu_ids), + dist=distributed, + seed=cfg.seed, + shuffler_sampler=cfg.data.shuffler_sampler, # dict(type='DistributedGroupSampler'), + nonshuffler_sampler=cfg.data.nonshuffler_sampler, # dict(type='DistributedSampler'), + ) for ds in dataset + ] + # put model on gpus + if distributed: + find_unused_parameters = cfg.get('find_unused_parameters', False) + # Sets the `find_unused_parameters` parameter in + # torch.nn.parallel.DistributedDataParallel + model = DistributedDataParallel( + model.cuda(), + device_ids=[torch.cuda.current_device()], + broadcast_buffers=False, + find_unused_parameters=find_unused_parameters) + if eval_model is not None: + eval_model = DistributedDataParallel( + eval_model.cuda(), + device_ids=[torch.cuda.current_device()], + broadcast_buffers=False, + find_unused_parameters=find_unused_parameters) + else: + model = DataParallel( + model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids) + if eval_model is not None: + eval_model = DataParallel( + eval_model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids) + # build runner + optimizer = build_optimizer(model, cfg.optimizer) + + if 'runner' not in cfg: + cfg.runner = { + 'type': 'EpochBasedRunner', + 'max_epochs': cfg.total_epochs + } + warnings.warn( + 'config is now expected to have a `runner` section, ' + 'please set `runner` in your config.', UserWarning) + else: + if 'total_epochs' in cfg: + assert cfg.total_epochs == cfg.runner.max_epochs + if eval_model is not None: + runner = build_runner( + cfg.runner, + default_args=dict( + model=model, + eval_model=eval_model, + optimizer=optimizer, + work_dir=cfg.work_dir, + logger=logger, + meta=meta)) + else: + runner = build_runner( + cfg.runner, + default_args=dict( + model=model, + optimizer=optimizer, + work_dir=cfg.work_dir, + logger=logger, + meta=meta)) + + # an ugly workaround to make .log and .log.json filenames the same + runner.timestamp = timestamp + + # fp16 setting + fp16_cfg = cfg.get('fp16', None) + if fp16_cfg is not None: + optimizer_config = Fp16OptimizerHook( + **cfg.optimizer_config, **fp16_cfg, distributed=distributed) + elif distributed and 'type' not in cfg.optimizer_config: + optimizer_config = OptimizerHook(**cfg.optimizer_config) + else: + optimizer_config = cfg.optimizer_config + + # register hooks + runner.register_training_hooks(cfg.lr_config, optimizer_config, + cfg.checkpoint_config, cfg.log_config, + cfg.get('momentum_config', None)) + + # register profiler hook + #trace_config = dict(type='tb_trace', dir_name='work_dir') + #profiler_config = dict(on_trace_ready=trace_config) + #runner.register_profiler_hook(profiler_config) + + if distributed: + if isinstance(runner, EpochBasedRunner): + runner.register_hook(DistSamplerSeedHook()) + + # register eval hooks + if validate: + # Support batch_size > 1 in validation + val_samples_per_gpu = cfg.data.val.pop('samples_per_gpu', 1) + if val_samples_per_gpu > 1: + assert False + # Replace 'ImageToTensor' to 'DefaultFormatBundle' + cfg.data.val.pipeline = replace_ImageToTensor( + cfg.data.val.pipeline) + val_dataset = build_dataset(cfg.data.val, dict(test_mode=True)) + + val_dataloader = build_dataloader( + val_dataset, + samples_per_gpu=val_samples_per_gpu, + workers_per_gpu=cfg.data.workers_per_gpu, + dist=distributed, + shuffle=False, + shuffler_sampler=cfg.data.shuffler_sampler, # dict(type='DistributedGroupSampler'), + nonshuffler_sampler=cfg.data.nonshuffler_sampler, # dict(type='DistributedSampler'), + ) + eval_cfg = cfg.get('evaluation', {}) + eval_cfg['by_epoch'] = cfg.runner['type'] != 'IterBasedRunner' + eval_cfg['jsonfile_prefix'] = osp.join('val', cfg.work_dir, time.ctime().replace(' ','_').replace(':','_')) + eval_hook = CustomDistEvalHook if distributed else EvalHook + runner.register_hook(eval_hook(val_dataloader, **eval_cfg)) + + # user-defined hooks + if cfg.get('custom_hooks', None): + custom_hooks = cfg.custom_hooks + assert isinstance(custom_hooks, list), \ + f'custom_hooks expect list type, but got {type(custom_hooks)}' + for hook_cfg in cfg.custom_hooks: + assert isinstance(hook_cfg, dict), \ + 'Each item in custom_hooks expects dict type, but got ' \ + f'{type(hook_cfg)}' + hook_cfg = hook_cfg.copy() + priority = hook_cfg.pop('priority', 'NORMAL') + hook = build_from_cfg(hook_cfg, HOOKS) + runner.register_hook(hook, priority=priority) + if cfg.resume_from: + runner.resume(cfg.resume_from) + elif cfg.load_from: + runner.load_checkpoint(cfg.load_from) + runner.run(data_loaders, cfg.workflow) + diff --git a/adzoo/bevformer/apis/test.py b/adzoo/bevformer/apis/test.py new file mode 100644 index 0000000..7667395 --- /dev/null +++ b/adzoo/bevformer/apis/test.py @@ -0,0 +1,163 @@ +# --------------------------------------------- +# Copyright (c) OpenMMLab. All rights reserved. +# --------------------------------------------- +# Modified by Zhiqi Li +# --------------------------------------------- +import os.path as osp +import pickle +import shutil +import tempfile +import time + +import torch +import torch.distributed as dist +from mmcv.image import tensor2imgs +from mmcv.utils import get_dist_info + +from mmcv.core import encode_mask_results +from mmcv.fileio.io import dump, load +from mmcv.utils import mkdir_or_exist, ProgressBar + +import numpy as np +import pycocotools.mask as mask_util + +def custom_encode_mask_results(mask_results): + """Encode bitmap mask to RLE code. Semantic Masks only + Args: + mask_results (list | tuple[list]): bitmap mask results. + In mask scoring rcnn, mask_results is a tuple of (segm_results, + segm_cls_score). + Returns: + list | tuple: RLE encoded mask. + """ + cls_segms = mask_results + num_classes = len(cls_segms) + encoded_mask_results = [] + for i in range(len(cls_segms)): + encoded_mask_results.append( + mask_util.encode( + np.array( + cls_segms[i][:, :, np.newaxis], order='F', + dtype='uint8'))[0]) # encoded with RLE + return [encoded_mask_results] + +def custom_multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False): + """Test model with multiple gpus. + This method tests model with multiple gpus and collects the results + under two different modes: gpu and cpu modes. By setting 'gpu_collect=True' + it encodes results to gpu tensors and use gpu communication for results + collection. On cpu mode it saves the results on different gpus to 'tmpdir' + and collects them by the rank 0 worker. + Args: + model (nn.Module): Model to be tested. + data_loader (nn.Dataloader): Pytorch data loader. + tmpdir (str): Path of directory to save the temporary results from + different gpus under cpu mode. + gpu_collect (bool): Option to use either gpu or cpu to collect results. + Returns: + list: The prediction results. + """ + model.eval() + bbox_results = [] + mask_results = [] + dataset = data_loader.dataset + rank, world_size = get_dist_info() + if rank == 0: + prog_bar = ProgressBar(len(dataset)) + time.sleep(2) # This line can prevent deadlock problem in some cases. + have_mask = False + for i, data in enumerate(data_loader): + with torch.no_grad(): + result = model(data, return_loss=False, rescale=True) + # encode mask results + if isinstance(result, dict): + if 'bbox_results' in result.keys(): + bbox_result = result['bbox_results'] + batch_size = len(result['bbox_results']) + bbox_results.extend(bbox_result) + if 'mask_results' in result.keys() and result['mask_results'] is not None: + mask_result = custom_encode_mask_results(result['mask_results']) + mask_results.extend(mask_result) + have_mask = True + else: + batch_size = len(result) + bbox_results.extend(result) + + #if isinstance(result[0], tuple): + # assert False, 'this code is for instance segmentation, which our code will not utilize.' + # result = [(bbox_results, encode_mask_results(mask_results)) + # for bbox_results, mask_results in result] + if rank == 0: + + for _ in range(batch_size * world_size): + prog_bar.update() + + # collect results from all ranks + if gpu_collect: + bbox_results = collect_results_gpu(bbox_results, len(dataset)) + if have_mask: + mask_results = collect_results_gpu(mask_results, len(dataset)) + else: + mask_results = None + else: + bbox_results = collect_results_cpu(bbox_results, len(dataset), tmpdir) + tmpdir = tmpdir+'_mask' if tmpdir is not None else None + if have_mask: + mask_results = collect_results_cpu(mask_results, len(dataset), tmpdir) + else: + mask_results = None + + if mask_results is None: + return bbox_results + return {'bbox_results': bbox_results, 'mask_results': mask_results} + + +def collect_results_cpu(result_part, size, tmpdir=None): + rank, world_size = get_dist_info() + # create a tmp dir if it is not specified + if tmpdir is None: + MAX_LEN = 512 + # 32 is whitespace + dir_tensor = torch.full((MAX_LEN, ), + 32, + dtype=torch.uint8, + device='cuda') + if rank == 0: + mkdir_or_exist('.dist_test') + tmpdir = tempfile.mkdtemp(dir='.dist_test') + tmpdir = torch.tensor( + bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda') + dir_tensor[:len(tmpdir)] = tmpdir + dist.broadcast(dir_tensor, 0) + tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip() + else: + mkdir_or_exist(tmpdir) + # dump the part result to the dir + dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl')) + dist.barrier() + # collect all parts + if rank != 0: + return None + else: + # load results of all parts from tmp dir + part_list = [] + for i in range(world_size): + part_file = osp.join(tmpdir, f'part_{i}.pkl') + part_list.append(load(part_file)) + # sort the results + ordered_results = [] + ''' + bacause we change the sample of the evaluation stage to make sure that each gpu will handle continuous sample, + ''' + #for res in zip(*part_list): + for res in part_list: + ordered_results.extend(list(res)) + # the dataloader may pad some samples + ordered_results = ordered_results[:size] + # remove tmp dir + shutil.rmtree(tmpdir) + return ordered_results + + +def collect_results_gpu(result_part, size): + collect_results_cpu(result_part, size) \ No newline at end of file diff --git a/adzoo/bevformer/apis/train.py b/adzoo/bevformer/apis/train.py new file mode 100644 index 0000000..dcae402 --- /dev/null +++ b/adzoo/bevformer/apis/train.py @@ -0,0 +1,65 @@ +# --------------------------------------------- +# Copyright (c) OpenMMLab. All rights reserved. +# --------------------------------------------- +# Modified by Zhiqi Li +# --------------------------------------------- + +from .mmdet_train import custom_train_detector + +def custom_train_model(model, + dataset, + cfg, + distributed=False, + validate=False, + timestamp=None, + eval_model=None, + meta=None): + """A function wrapper for launching model training according to cfg. + + Because we need different eval_hook in runner. Should be deprecated in the + future. + """ + if cfg.model.type in ['EncoderDecoder3D']: + assert False + else: + custom_train_detector( + model, + dataset, + cfg, + distributed=distributed, + validate=validate, + timestamp=timestamp, + eval_model=eval_model, + meta=meta) + + +def train_model(model, + dataset, + cfg, + distributed=False, + validate=False, + timestamp=None, + meta=None): + """A function wrapper for launching model training according to cfg. + + Because we need different eval_hook in runner. Should be deprecated in the + future. + """ + if cfg.model.type in ['EncoderDecoder3D']: + train_segmentor( + model, + dataset, + cfg, + distributed=distributed, + validate=validate, + timestamp=timestamp, + meta=meta) + else: + train_detector( + model, + dataset, + cfg, + distributed=distributed, + validate=validate, + timestamp=timestamp, + meta=meta) diff --git a/adzoo/bevformer/configs/_base_/datasets/coco_instance.py b/adzoo/bevformer/configs/_base_/datasets/coco_instance.py new file mode 100644 index 0000000..f6ea4f4 --- /dev/null +++ b/adzoo/bevformer/configs/_base_/datasets/coco_instance.py @@ -0,0 +1,48 @@ +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline)) +evaluation = dict(metric=['bbox', 'segm']) diff --git a/adzoo/bevformer/configs/_base_/datasets/kitti-3d-3class.py b/adzoo/bevformer/configs/_base_/datasets/kitti-3d-3class.py new file mode 100644 index 0000000..1822af4 --- /dev/null +++ b/adzoo/bevformer/configs/_base_/datasets/kitti-3d-3class.py @@ -0,0 +1,140 @@ +# dataset settings +dataset_type = 'KittiDataset' +data_root = 'data/kitti/' +class_names = ['Pedestrian', 'Cyclist', 'Car'] +point_cloud_range = [0, -40, -3, 70.4, 40, 1] +input_modality = dict(use_lidar=True, use_camera=False) +db_sampler = dict( + data_root=data_root, + info_path=data_root + 'kitti_dbinfos_train.pkl', + rate=1.0, + prepare=dict( + filter_by_difficulty=[-1], + filter_by_min_points=dict(Car=5, Pedestrian=10, Cyclist=10)), + classes=class_names, + sample_groups=dict(Car=12, Pedestrian=6, Cyclist=6)) + +file_client_args = dict(backend='disk') +# Uncomment the following if use ceph or other file clients. +# See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient +# for more details. +# file_client_args = dict( +# backend='petrel', path_mapping=dict(data='s3://kitti_data/')) + +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + file_client_args=file_client_args), + dict( + type='LoadAnnotations3D', + with_bbox_3d=True, + with_label_3d=True, + file_client_args=file_client_args), + dict(type='ObjectSample', db_sampler=db_sampler), + dict( + type='ObjectNoise', + num_try=100, + translation_std=[1.0, 1.0, 0.5], + global_rot_range=[0.0, 0.0], + rot_range=[-0.78539816, 0.78539816]), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.78539816, 0.78539816], + scale_ratio_range=[0.95, 1.05]), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='PointShuffle'), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + file_client_args=file_client_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) + ]) +] +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) +eval_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + file_client_args=file_client_args), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) +] + +data = dict( + samples_per_gpu=6, + workers_per_gpu=4, + train=dict( + type='RepeatDataset', + times=2, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'kitti_infos_train.pkl', + split='training', + pts_prefix='velodyne_reduced', + pipeline=train_pipeline, + modality=input_modality, + classes=class_names, + test_mode=False, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='LiDAR')), + val=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'kitti_infos_val.pkl', + split='training', + pts_prefix='velodyne_reduced', + pipeline=test_pipeline, + modality=input_modality, + classes=class_names, + test_mode=True, + box_type_3d='LiDAR'), + test=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'kitti_infos_val.pkl', + split='training', + pts_prefix='velodyne_reduced', + pipeline=test_pipeline, + modality=input_modality, + classes=class_names, + test_mode=True, + box_type_3d='LiDAR')) + +evaluation = dict(interval=1, pipeline=eval_pipeline) diff --git a/adzoo/bevformer/configs/_base_/datasets/kitti-3d-car.py b/adzoo/bevformer/configs/_base_/datasets/kitti-3d-car.py new file mode 100644 index 0000000..1e81226 --- /dev/null +++ b/adzoo/bevformer/configs/_base_/datasets/kitti-3d-car.py @@ -0,0 +1,138 @@ +# dataset settings +dataset_type = 'KittiDataset' +data_root = 'data/kitti/' +class_names = ['Car'] +point_cloud_range = [0, -40, -3, 70.4, 40, 1] +input_modality = dict(use_lidar=True, use_camera=False) +db_sampler = dict( + data_root=data_root, + info_path=data_root + 'kitti_dbinfos_train.pkl', + rate=1.0, + prepare=dict(filter_by_difficulty=[-1], filter_by_min_points=dict(Car=5)), + classes=class_names, + sample_groups=dict(Car=15)) + +file_client_args = dict(backend='disk') +# Uncomment the following if use ceph or other file clients. +# See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient +# for more details. +# file_client_args = dict( +# backend='petrel', path_mapping=dict(data='s3://kitti_data/')) + +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + file_client_args=file_client_args), + dict( + type='LoadAnnotations3D', + with_bbox_3d=True, + with_label_3d=True, + file_client_args=file_client_args), + dict(type='ObjectSample', db_sampler=db_sampler), + dict( + type='ObjectNoise', + num_try=100, + translation_std=[1.0, 1.0, 0.5], + global_rot_range=[0.0, 0.0], + rot_range=[-0.78539816, 0.78539816]), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.78539816, 0.78539816], + scale_ratio_range=[0.95, 1.05]), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='PointShuffle'), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + file_client_args=file_client_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) + ]) +] +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) +eval_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + file_client_args=file_client_args), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) +] + +data = dict( + samples_per_gpu=6, + workers_per_gpu=4, + train=dict( + type='RepeatDataset', + times=2, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'kitti_infos_train.pkl', + split='training', + pts_prefix='velodyne_reduced', + pipeline=train_pipeline, + modality=input_modality, + classes=class_names, + test_mode=False, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='LiDAR')), + val=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'kitti_infos_val.pkl', + split='training', + pts_prefix='velodyne_reduced', + pipeline=test_pipeline, + modality=input_modality, + classes=class_names, + test_mode=True, + box_type_3d='LiDAR'), + test=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'kitti_infos_val.pkl', + split='training', + pts_prefix='velodyne_reduced', + pipeline=test_pipeline, + modality=input_modality, + classes=class_names, + test_mode=True, + box_type_3d='LiDAR')) + +evaluation = dict(interval=1, pipeline=eval_pipeline) diff --git a/adzoo/bevformer/configs/_base_/datasets/lyft-3d.py b/adzoo/bevformer/configs/_base_/datasets/lyft-3d.py new file mode 100644 index 0000000..71baff0 --- /dev/null +++ b/adzoo/bevformer/configs/_base_/datasets/lyft-3d.py @@ -0,0 +1,136 @@ +# If point cloud range is changed, the models should also change their point +# cloud range accordingly +point_cloud_range = [-80, -80, -5, 80, 80, 3] +# For Lyft we usually do 9-class detection +class_names = [ + 'car', 'truck', 'bus', 'emergency_vehicle', 'other_vehicle', 'motorcycle', + 'bicycle', 'pedestrian', 'animal' +] +dataset_type = 'LyftDataset' +data_root = 'data/lyft/' +# Input modality for Lyft dataset, this is consistent with the submission +# format which requires the information in input_modality. +input_modality = dict( + use_lidar=True, + use_camera=False, + use_radar=False, + use_map=False, + use_external=False) +file_client_args = dict(backend='disk') +# Uncomment the following if use ceph or other file clients. +# See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient +# for more details. +# file_client_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/lyft/': 's3://lyft/lyft/', +# 'data/lyft/': 's3://lyft/lyft/' +# })) +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + file_client_args=file_client_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + file_client_args=file_client_args), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.3925, 0.3925], + scale_ratio_range=[0.95, 1.05], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='PointShuffle'), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + file_client_args=file_client_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + file_client_args=file_client_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) + ]) +] +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) +eval_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + file_client_args=file_client_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + file_client_args=file_client_args), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) +] + +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'lyft_infos_train.pkl', + pipeline=train_pipeline, + classes=class_names, + modality=input_modality, + test_mode=False), + val=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'lyft_infos_val.pkl', + pipeline=test_pipeline, + classes=class_names, + modality=input_modality, + test_mode=True), + test=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'lyft_infos_test.pkl', + pipeline=test_pipeline, + classes=class_names, + modality=input_modality, + test_mode=True)) +# For Lyft dataset, we usually evaluate the model at the end of training. +# Since the models are trained by 24 epochs by default, we set evaluation +# interval to be 24. Please change the interval accordingly if you do not +# use a default schedule. +evaluation = dict(interval=24, pipeline=eval_pipeline) diff --git a/adzoo/bevformer/configs/_base_/datasets/nuim_instance.py b/adzoo/bevformer/configs/_base_/datasets/nuim_instance.py new file mode 100644 index 0000000..82fce56 --- /dev/null +++ b/adzoo/bevformer/configs/_base_/datasets/nuim_instance.py @@ -0,0 +1,59 @@ +dataset_type = 'CocoDataset' +data_root = 'data/nuimages/' +class_names = [ + 'car', 'truck', 'trailer', 'bus', 'construction_vehicle', 'bicycle', + 'motorcycle', 'pedestrian', 'traffic_cone', 'barrier' +] +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='Resize', + img_scale=[(1280, 720), (1920, 1080)], + multiscale_mode='range', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1600, 900), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/nuimages_v1.0-train.json', + img_prefix=data_root, + classes=class_names, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/nuimages_v1.0-val.json', + img_prefix=data_root, + classes=class_names, + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/nuimages_v1.0-val.json', + img_prefix=data_root, + classes=class_names, + pipeline=test_pipeline)) +evaluation = dict(metric=['bbox', 'segm']) diff --git a/adzoo/bevformer/configs/_base_/datasets/nus-3d.py b/adzoo/bevformer/configs/_base_/datasets/nus-3d.py new file mode 100644 index 0000000..1548171 --- /dev/null +++ b/adzoo/bevformer/configs/_base_/datasets/nus-3d.py @@ -0,0 +1,142 @@ +# If point cloud range is changed, the models should also change their point +# cloud range accordingly +point_cloud_range = [-50, -50, -5, 50, 50, 3] +# For nuScenes we usually do 10-class detection +class_names = [ + 'car', 'truck', 'trailer', 'bus', 'construction_vehicle', 'bicycle', + 'motorcycle', 'pedestrian', 'traffic_cone', 'barrier' +] +dataset_type = 'NuScenesDataset' +data_root = 'data/nuscenes/' +# Input modality for nuScenes dataset, this is consistent with the submission +# format which requires the information in input_modality. +input_modality = dict( + use_lidar=True, + use_camera=False, + use_radar=False, + use_map=False, + use_external=False) +file_client_args = dict(backend='disk') +# Uncomment the following if use ceph or other file clients. +# See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient +# for more details. +# file_client_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/nuscenes/': 's3://nuscenes/nuscenes/', +# 'data/nuscenes/': 's3://nuscenes/nuscenes/' +# })) +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + file_client_args=file_client_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + file_client_args=file_client_args), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.3925, 0.3925], + scale_ratio_range=[0.95, 1.05], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectNameFilter', classes=class_names), + dict(type='PointShuffle'), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + file_client_args=file_client_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + file_client_args=file_client_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) + ]) +] +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) +eval_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + file_client_args=file_client_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + file_client_args=file_client_args), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) +] + +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'nuscenes_infos_train.pkl', + pipeline=train_pipeline, + classes=class_names, + modality=input_modality, + test_mode=False, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='LiDAR'), + val=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'nuscenes_infos_val.pkl', + pipeline=test_pipeline, + classes=class_names, + modality=input_modality, + test_mode=True, + box_type_3d='LiDAR'), + test=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'nuscenes_infos_val.pkl', + pipeline=test_pipeline, + classes=class_names, + modality=input_modality, + test_mode=True, + box_type_3d='LiDAR')) +# For nuScenes dataset, we usually evaluate the model at the end of training. +# Since the models are trained by 24 epochs by default, we set evaluation +# interval to be 24. Please change the interval accordingly if you do not +# use a default schedule. +evaluation = dict(interval=24, pipeline=eval_pipeline) diff --git a/adzoo/bevformer/configs/_base_/datasets/nus-mono3d.py b/adzoo/bevformer/configs/_base_/datasets/nus-mono3d.py new file mode 100644 index 0000000..1363a94 --- /dev/null +++ b/adzoo/bevformer/configs/_base_/datasets/nus-mono3d.py @@ -0,0 +1,100 @@ +dataset_type = 'CustomNuScenesMonoDataset' +data_root = 'data/nuscenes/' +class_names = [ + 'car', 'truck', 'trailer', 'bus', 'construction_vehicle', 'bicycle', + 'motorcycle', 'pedestrian', 'traffic_cone', 'barrier' +] +# Input modality for nuScenes dataset, this is consistent with the submission +# format which requires the information in input_modality. +input_modality = dict( + use_lidar=False, + use_camera=True, + use_radar=False, + use_map=False, + use_external=False) +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFileMono3D'), + dict( + type='LoadAnnotations3D', + with_bbox=True, + with_label=True, + with_attr_label=True, + with_bbox_3d=True, + with_label_3d=True, + with_bbox_depth=True), + dict(type='Resize', img_scale=(1600, 900), keep_ratio=True), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict( + type='Collect3D', + keys=[ + 'img', 'gt_bboxes', 'gt_labels', 'attr_labels', 'gt_bboxes_3d', + 'gt_labels_3d', 'centers2d', 'depths' + ]), +] +test_pipeline = [ + dict(type='LoadImageFromFileMono3D'), + dict( + type='MultiScaleFlipAug', + scale_factor=1.0, + flip=False, + transforms=[ + dict(type='RandomFlip3D'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['img']), + ]) +] +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) +eval_pipeline = [ + dict(type='LoadImageFromFileMono3D'), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['img']) +] + +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'nuscenes_infos_train_mono3d.coco.json', + img_prefix=data_root, + classes=class_names, + pipeline=train_pipeline, + modality=input_modality, + test_mode=False, + box_type_3d='Camera'), + val=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'nuscenes_infos_val_mono3d.coco.json', + img_prefix=data_root, + classes=class_names, + pipeline=test_pipeline, + modality=input_modality, + test_mode=True, + box_type_3d='Camera'), + test=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'nuscenes_infos_val_mono3d.coco.json', + img_prefix=data_root, + classes=class_names, + pipeline=test_pipeline, + modality=input_modality, + test_mode=True, + box_type_3d='Camera')) +evaluation = dict(interval=2) diff --git a/adzoo/bevformer/configs/_base_/datasets/range100_lyft-3d.py b/adzoo/bevformer/configs/_base_/datasets/range100_lyft-3d.py new file mode 100644 index 0000000..efa63ea --- /dev/null +++ b/adzoo/bevformer/configs/_base_/datasets/range100_lyft-3d.py @@ -0,0 +1,136 @@ +# If point cloud range is changed, the models should also change their point +# cloud range accordingly +point_cloud_range = [-100, -100, -5, 100, 100, 3] +# For Lyft we usually do 9-class detection +class_names = [ + 'car', 'truck', 'bus', 'emergency_vehicle', 'other_vehicle', 'motorcycle', + 'bicycle', 'pedestrian', 'animal' +] +dataset_type = 'LyftDataset' +data_root = 'data/lyft/' +# Input modality for Lyft dataset, this is consistent with the submission +# format which requires the information in input_modality. +input_modality = dict( + use_lidar=True, + use_camera=False, + use_radar=False, + use_map=False, + use_external=False) +file_client_args = dict(backend='disk') +# Uncomment the following if use ceph or other file clients. +# See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient +# for more details. +# file_client_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/lyft/': 's3://lyft/lyft/', +# 'data/lyft/': 's3://lyft/lyft/' +# })) +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + file_client_args=file_client_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + file_client_args=file_client_args), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.3925, 0.3925], + scale_ratio_range=[0.95, 1.05], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='PointShuffle'), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + file_client_args=file_client_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + file_client_args=file_client_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) + ]) +] +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) +eval_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + file_client_args=file_client_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + file_client_args=file_client_args), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) +] + +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'lyft_infos_train.pkl', + pipeline=train_pipeline, + classes=class_names, + modality=input_modality, + test_mode=False), + val=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'lyft_infos_val.pkl', + pipeline=test_pipeline, + classes=class_names, + modality=input_modality, + test_mode=True), + test=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'lyft_infos_test.pkl', + pipeline=test_pipeline, + classes=class_names, + modality=input_modality, + test_mode=True)) +# For Lyft dataset, we usually evaluate the model at the end of training. +# Since the models are trained by 24 epochs by default, we set evaluation +# interval to be 24. Please change the interval accordingly if you do not +# use a default schedule. +evaluation = dict(interval=24, pipeline=eval_pipeline) diff --git a/adzoo/bevformer/configs/_base_/datasets/s3dis-3d-5class.py b/adzoo/bevformer/configs/_base_/datasets/s3dis-3d-5class.py new file mode 100644 index 0000000..2422766 --- /dev/null +++ b/adzoo/bevformer/configs/_base_/datasets/s3dis-3d-5class.py @@ -0,0 +1,114 @@ +# dataset settings +dataset_type = 'S3DISDataset' +data_root = './data/s3dis/' +class_names = ('table', 'chair', 'sofa', 'bookcase', 'board') +train_area = [1, 2, 3, 4, 6] +test_area = 5 + +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=True, + load_dim=6, + use_dim=[0, 1, 2, 3, 4, 5]), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict(type='PointSample', num_points=40000), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + flip_ratio_bev_vertical=0.5), + dict( + type='GlobalRotScaleTrans', + # following ScanNet dataset the rotation range is 5 degrees + rot_range=[-0.087266, 0.087266], + scale_ratio_range=[1.0, 1.0], + shift_height=True), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=True, + load_dim=6, + use_dim=[0, 1, 2, 3, 4, 5]), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + flip_ratio_bev_vertical=0.5), + dict(type='PointSample', num_points=40000), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) + ]) +] +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) +eval_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=False, + load_dim=6, + use_dim=[0, 1, 2, 3, 4, 5]), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) +] + +data = dict( + samples_per_gpu=8, + workers_per_gpu=4, + train=dict( + type='RepeatDataset', + times=5, + dataset=dict( + type='ConcatDataset', + datasets=[ + dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + f's3dis_infos_Area_{i}.pkl', + pipeline=train_pipeline, + filter_empty_gt=False, + classes=class_names, + box_type_3d='Depth') for i in train_area + ], + separate_eval=False)), + val=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + f's3dis_infos_Area_{test_area}.pkl', + pipeline=test_pipeline, + classes=class_names, + test_mode=True, + box_type_3d='Depth'), + test=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + f's3dis_infos_Area_{test_area}.pkl', + pipeline=test_pipeline, + classes=class_names, + test_mode=True, + box_type_3d='Depth')) + +evaluation = dict(pipeline=eval_pipeline) diff --git a/adzoo/bevformer/configs/_base_/datasets/s3dis_seg-3d-13class.py b/adzoo/bevformer/configs/_base_/datasets/s3dis_seg-3d-13class.py new file mode 100644 index 0000000..39bf556 --- /dev/null +++ b/adzoo/bevformer/configs/_base_/datasets/s3dis_seg-3d-13class.py @@ -0,0 +1,139 @@ +# dataset settings +dataset_type = 'S3DISSegDataset' +data_root = './data/s3dis/' +class_names = ('ceiling', 'floor', 'wall', 'beam', 'column', 'window', 'door', + 'table', 'chair', 'sofa', 'bookcase', 'board', 'clutter') +num_points = 4096 +train_area = [1, 2, 3, 4, 6] +test_area = 5 +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=False, + use_color=True, + load_dim=6, + use_dim=[0, 1, 2, 3, 4, 5]), + dict( + type='LoadAnnotations3D', + with_bbox_3d=False, + with_label_3d=False, + with_mask_3d=False, + with_seg_3d=True), + dict( + type='PointSegClassMapping', + valid_cat_ids=tuple(range(len(class_names))), + max_cat_id=13), + dict( + type='IndoorPatchPointSample', + num_points=num_points, + block_size=1.0, + ignore_index=len(class_names), + use_normalized_coord=True, + enlarge_size=0.2, + min_unique_num=None), + dict(type='NormalizePointsColor', color_mean=None), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict(type='Collect3D', keys=['points', 'pts_semantic_mask']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=False, + use_color=True, + load_dim=6, + use_dim=[0, 1, 2, 3, 4, 5]), + dict(type='NormalizePointsColor', color_mean=None), + dict( + # a wrapper in order to successfully call test function + # actually we don't perform test-time-aug + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.0, + flip_ratio_bev_vertical=0.0), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) + ]) +] +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) +# we need to load gt seg_mask! +eval_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=False, + use_color=True, + load_dim=6, + use_dim=[0, 1, 2, 3, 4, 5]), + dict( + type='LoadAnnotations3D', + with_bbox_3d=False, + with_label_3d=False, + with_mask_3d=False, + with_seg_3d=True), + dict( + type='PointSegClassMapping', + valid_cat_ids=tuple(range(len(class_names))), + max_cat_id=13), + dict( + type='DefaultFormatBundle3D', + with_label=False, + class_names=class_names), + dict(type='Collect3D', keys=['points', 'pts_semantic_mask']) +] + +data = dict( + samples_per_gpu=8, + workers_per_gpu=4, + # train on area 1, 2, 3, 4, 6 + # test on area 5 + train=dict( + type=dataset_type, + data_root=data_root, + ann_files=[ + data_root + f's3dis_infos_Area_{i}.pkl' for i in train_area + ], + pipeline=train_pipeline, + classes=class_names, + test_mode=False, + ignore_index=len(class_names), + scene_idxs=[ + data_root + f'seg_info/Area_{i}_resampled_scene_idxs.npy' + for i in train_area + ]), + val=dict( + type=dataset_type, + data_root=data_root, + ann_files=data_root + f's3dis_infos_Area_{test_area}.pkl', + pipeline=test_pipeline, + classes=class_names, + test_mode=True, + ignore_index=len(class_names), + scene_idxs=data_root + + f'seg_info/Area_{test_area}_resampled_scene_idxs.npy'), + test=dict( + type=dataset_type, + data_root=data_root, + ann_files=data_root + f's3dis_infos_Area_{test_area}.pkl', + pipeline=test_pipeline, + classes=class_names, + test_mode=True, + ignore_index=len(class_names))) + +evaluation = dict(pipeline=eval_pipeline) diff --git a/adzoo/bevformer/configs/_base_/datasets/scannet-3d-18class.py b/adzoo/bevformer/configs/_base_/datasets/scannet-3d-18class.py new file mode 100644 index 0000000..93da1e5 --- /dev/null +++ b/adzoo/bevformer/configs/_base_/datasets/scannet-3d-18class.py @@ -0,0 +1,128 @@ +# dataset settings +dataset_type = 'ScanNetDataset' +data_root = './data/scannet/' +class_names = ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', + 'bookshelf', 'picture', 'counter', 'desk', 'curtain', + 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', + 'garbagebin') +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=True, + load_dim=6, + use_dim=[0, 1, 2]), + dict( + type='LoadAnnotations3D', + with_bbox_3d=True, + with_label_3d=True, + with_mask_3d=True, + with_seg_3d=True), + dict(type='GlobalAlignment', rotation_axis=2), + dict( + type='PointSegClassMapping', + valid_cat_ids=(3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, + 36, 39), + max_cat_id=40), + dict(type='PointSample', num_points=40000), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + flip_ratio_bev_vertical=0.5), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.087266, 0.087266], + scale_ratio_range=[1.0, 1.0], + shift_height=True), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict( + type='Collect3D', + keys=[ + 'points', 'gt_bboxes_3d', 'gt_labels_3d', 'pts_semantic_mask', + 'pts_instance_mask' + ]) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=True, + load_dim=6, + use_dim=[0, 1, 2]), + dict(type='GlobalAlignment', rotation_axis=2), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + flip_ratio_bev_vertical=0.5), + dict(type='PointSample', num_points=40000), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) + ]) +] +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) +eval_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=False, + load_dim=6, + use_dim=[0, 1, 2]), + dict(type='GlobalAlignment', rotation_axis=2), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) +] + +data = dict( + samples_per_gpu=8, + workers_per_gpu=4, + train=dict( + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'scannet_infos_train.pkl', + pipeline=train_pipeline, + filter_empty_gt=False, + classes=class_names, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='Depth')), + val=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'scannet_infos_val.pkl', + pipeline=test_pipeline, + classes=class_names, + test_mode=True, + box_type_3d='Depth'), + test=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'scannet_infos_val.pkl', + pipeline=test_pipeline, + classes=class_names, + test_mode=True, + box_type_3d='Depth')) + +evaluation = dict(pipeline=eval_pipeline) diff --git a/adzoo/bevformer/configs/_base_/datasets/scannet_seg-3d-20class.py b/adzoo/bevformer/configs/_base_/datasets/scannet_seg-3d-20class.py new file mode 100644 index 0000000..cf73b09 --- /dev/null +++ b/adzoo/bevformer/configs/_base_/datasets/scannet_seg-3d-20class.py @@ -0,0 +1,132 @@ +# dataset settings +dataset_type = 'ScanNetSegDataset' +data_root = './data/scannet/' +class_names = ('wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', + 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', + 'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink', + 'bathtub', 'otherfurniture') +num_points = 8192 +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=False, + use_color=True, + load_dim=6, + use_dim=[0, 1, 2, 3, 4, 5]), + dict( + type='LoadAnnotations3D', + with_bbox_3d=False, + with_label_3d=False, + with_mask_3d=False, + with_seg_3d=True), + dict( + type='PointSegClassMapping', + valid_cat_ids=(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, + 33, 34, 36, 39), + max_cat_id=40), + dict( + type='IndoorPatchPointSample', + num_points=num_points, + block_size=1.5, + ignore_index=len(class_names), + use_normalized_coord=False, + enlarge_size=0.2, + min_unique_num=None), + dict(type='NormalizePointsColor', color_mean=None), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict(type='Collect3D', keys=['points', 'pts_semantic_mask']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=False, + use_color=True, + load_dim=6, + use_dim=[0, 1, 2, 3, 4, 5]), + dict(type='NormalizePointsColor', color_mean=None), + dict( + # a wrapper in order to successfully call test function + # actually we don't perform test-time-aug + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.0, + flip_ratio_bev_vertical=0.0), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) + ]) +] +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) +# we need to load gt seg_mask! +eval_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=False, + use_color=True, + load_dim=6, + use_dim=[0, 1, 2, 3, 4, 5]), + dict( + type='LoadAnnotations3D', + with_bbox_3d=False, + with_label_3d=False, + with_mask_3d=False, + with_seg_3d=True), + dict( + type='PointSegClassMapping', + valid_cat_ids=(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, + 33, 34, 36, 39), + max_cat_id=40), + dict( + type='DefaultFormatBundle3D', + with_label=False, + class_names=class_names), + dict(type='Collect3D', keys=['points', 'pts_semantic_mask']) +] + +data = dict( + samples_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'scannet_infos_train.pkl', + pipeline=train_pipeline, + classes=class_names, + test_mode=False, + ignore_index=len(class_names), + scene_idxs=data_root + 'seg_info/train_resampled_scene_idxs.npy'), + val=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'scannet_infos_val.pkl', + pipeline=test_pipeline, + classes=class_names, + test_mode=True, + ignore_index=len(class_names)), + test=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'scannet_infos_val.pkl', + pipeline=test_pipeline, + classes=class_names, + test_mode=True, + ignore_index=len(class_names))) + +evaluation = dict(pipeline=eval_pipeline) diff --git a/adzoo/bevformer/configs/_base_/datasets/sunrgbd-3d-10class.py b/adzoo/bevformer/configs/_base_/datasets/sunrgbd-3d-10class.py new file mode 100644 index 0000000..7121b75 --- /dev/null +++ b/adzoo/bevformer/configs/_base_/datasets/sunrgbd-3d-10class.py @@ -0,0 +1,107 @@ +dataset_type = 'SUNRGBDDataset' +data_root = 'data/sunrgbd/' +class_names = ('bed', 'table', 'sofa', 'chair', 'toilet', 'desk', 'dresser', + 'night_stand', 'bookshelf', 'bathtub') +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=True, + load_dim=6, + use_dim=[0, 1, 2]), + dict(type='LoadAnnotations3D'), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + ), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.523599, 0.523599], + scale_ratio_range=[0.85, 1.15], + shift_height=True), + dict(type='PointSample', num_points=20000), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=True, + load_dim=6, + use_dim=[0, 1, 2]), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + ), + dict(type='PointSample', num_points=20000), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) + ]) +] +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) +eval_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=False, + load_dim=6, + use_dim=[0, 1, 2]), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) +] + +data = dict( + samples_per_gpu=16, + workers_per_gpu=4, + train=dict( + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'sunrgbd_infos_train.pkl', + pipeline=train_pipeline, + classes=class_names, + filter_empty_gt=False, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='Depth')), + val=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'sunrgbd_infos_val.pkl', + pipeline=test_pipeline, + classes=class_names, + test_mode=True, + box_type_3d='Depth'), + test=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'sunrgbd_infos_val.pkl', + pipeline=test_pipeline, + classes=class_names, + test_mode=True, + box_type_3d='Depth')) + +evaluation = dict(pipeline=eval_pipeline) diff --git a/adzoo/bevformer/configs/_base_/datasets/waymoD5-3d-3class.py b/adzoo/bevformer/configs/_base_/datasets/waymoD5-3d-3class.py new file mode 100644 index 0000000..920ac15 --- /dev/null +++ b/adzoo/bevformer/configs/_base_/datasets/waymoD5-3d-3class.py @@ -0,0 +1,145 @@ +# dataset settings +# D5 in the config name means the whole dataset is divided into 5 folds +# We only use one fold for efficient experiments +dataset_type = 'LidarWaymoDataset' +data_root = 'data/waymo-full/kitti_format/' +file_client_args = dict(backend='disk') +# Uncomment the following if use ceph or other file clients. +# See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient +# for more details. +# file_client_args = dict( +# backend='petrel', path_mapping=dict(data='s3://waymo_data/')) + +class_names = ['Car', 'Pedestrian', 'Cyclist'] +point_cloud_range = [-74.88, -74.88, -2, 74.88, 74.88, 4] +input_modality = dict(use_lidar=True, use_camera=False) +db_sampler = dict( + data_root=data_root, + info_path=data_root + 'waymo_dbinfos_train.pkl', + rate=1.0, + prepare=dict( + filter_by_difficulty=[-1], + filter_by_min_points=dict(Car=5, Pedestrian=10, Cyclist=10)), + classes=class_names, + sample_groups=dict(Car=15, Pedestrian=10, Cyclist=10), + points_loader=dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=[0, 1, 2, 3, 4], + file_client_args=file_client_args)) + +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=6, + use_dim=5, + file_client_args=file_client_args), + dict( + type='LoadAnnotations3D', + with_bbox_3d=True, + with_label_3d=True, + file_client_args=file_client_args), + dict(type='ObjectSample', db_sampler=db_sampler), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + flip_ratio_bev_vertical=0.5), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.78539816, 0.78539816], + scale_ratio_range=[0.95, 1.05]), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='PointShuffle'), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=6, + use_dim=5, + file_client_args=file_client_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) + ]) +] +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) +eval_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=6, + use_dim=5, + file_client_args=file_client_args), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) +] + +data = dict( + samples_per_gpu=2, + workers_per_gpu=4, + train=dict( + type='RepeatDataset', + times=2, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'waymo_infos_train.pkl', + split='training', + pipeline=train_pipeline, + modality=input_modality, + classes=class_names, + test_mode=False, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='LiDAR', + # load one frame every five frames + load_interval=5)), + val=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'waymo_infos_val.pkl', + split='training', + pipeline=test_pipeline, + modality=input_modality, + classes=class_names, + test_mode=True, + box_type_3d='LiDAR'), + test=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'waymo_infos_val.pkl', + split='training', + pipeline=test_pipeline, + modality=input_modality, + classes=class_names, + test_mode=True, + box_type_3d='LiDAR')) + +evaluation = dict(interval=24, pipeline=eval_pipeline) diff --git a/adzoo/bevformer/configs/_base_/datasets/waymoD5-3d-car.py b/adzoo/bevformer/configs/_base_/datasets/waymoD5-3d-car.py new file mode 100644 index 0000000..02e2627 --- /dev/null +++ b/adzoo/bevformer/configs/_base_/datasets/waymoD5-3d-car.py @@ -0,0 +1,143 @@ +# dataset settings +# D5 in the config name means the whole dataset is divided into 5 folds +# We only use one fold for efficient experiments +dataset_type = 'WaymoDataset' +data_root = 'data/waymo/kitti_format/' +file_client_args = dict(backend='disk') +# Uncomment the following if use ceph or other file clients. +# See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient +# for more details. +# file_client_args = dict( +# backend='petrel', path_mapping=dict(data='s3://waymo_data/')) + +class_names = ['Car'] +point_cloud_range = [-74.88, -74.88, -2, 74.88, 74.88, 4] +input_modality = dict(use_lidar=True, use_camera=False) +db_sampler = dict( + data_root=data_root, + info_path=data_root + 'waymo_dbinfos_train.pkl', + rate=1.0, + prepare=dict(filter_by_difficulty=[-1], filter_by_min_points=dict(Car=5)), + classes=class_names, + sample_groups=dict(Car=15), + points_loader=dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=[0, 1, 2, 3, 4], + file_client_args=file_client_args)) + +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=6, + use_dim=5, + file_client_args=file_client_args), + dict( + type='LoadAnnotations3D', + with_bbox_3d=True, + with_label_3d=True, + file_client_args=file_client_args), + dict(type='ObjectSample', db_sampler=db_sampler), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + flip_ratio_bev_vertical=0.5), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.78539816, 0.78539816], + scale_ratio_range=[0.95, 1.05]), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='PointShuffle'), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=6, + use_dim=5, + file_client_args=file_client_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) + ]) +] +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) +eval_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=6, + use_dim=5, + file_client_args=file_client_args), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) +] + +data = dict( + samples_per_gpu=2, + workers_per_gpu=4, + train=dict( + type='RepeatDataset', + times=2, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'waymo_infos_train.pkl', + split='training', + pipeline=train_pipeline, + modality=input_modality, + classes=class_names, + test_mode=False, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='LiDAR', + # load one frame every five frames + load_interval=5)), + val=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'waymo_infos_val.pkl', + split='training', + pipeline=test_pipeline, + modality=input_modality, + classes=class_names, + test_mode=True, + box_type_3d='LiDAR'), + test=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'waymo_infos_val.pkl', + split='training', + pipeline=test_pipeline, + modality=input_modality, + classes=class_names, + test_mode=True, + box_type_3d='LiDAR')) + +evaluation = dict(interval=24, pipeline=eval_pipeline) diff --git a/adzoo/bevformer/configs/_base_/default_runtime.py b/adzoo/bevformer/configs/_base_/default_runtime.py new file mode 100644 index 0000000..4e85b69 --- /dev/null +++ b/adzoo/bevformer/configs/_base_/default_runtime.py @@ -0,0 +1,18 @@ +checkpoint_config = dict(interval=1) +# yapf:disable push +# By default we use textlogger hook and tensorboard +# For more loggers see +# https://mmcv.readthedocs.io/en/latest/api.html#mmcv.runner.LoggerHook +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = None +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/adzoo/bevformer/configs/_base_/models/3dssd.py b/adzoo/bevformer/configs/_base_/models/3dssd.py new file mode 100644 index 0000000..55344c7 --- /dev/null +++ b/adzoo/bevformer/configs/_base_/models/3dssd.py @@ -0,0 +1,77 @@ +model = dict( + type='SSD3DNet', + backbone=dict( + type='PointNet2SAMSG', + in_channels=4, + num_points=(4096, 512, (256, 256)), + radii=((0.2, 0.4, 0.8), (0.4, 0.8, 1.6), (1.6, 3.2, 4.8)), + num_samples=((32, 32, 64), (32, 32, 64), (32, 32, 32)), + sa_channels=(((16, 16, 32), (16, 16, 32), (32, 32, 64)), + ((64, 64, 128), (64, 64, 128), (64, 96, 128)), + ((128, 128, 256), (128, 192, 256), (128, 256, 256))), + aggregation_channels=(64, 128, 256), + fps_mods=(('D-FPS'), ('FS'), ('F-FPS', 'D-FPS')), + fps_sample_range_lists=((-1), (-1), (512, -1)), + norm_cfg=dict(type='BN2d', eps=1e-3, momentum=0.1), + sa_cfg=dict( + type='PointSAModuleMSG', + pool_mod='max', + use_xyz=True, + normalize_xyz=False)), + bbox_head=dict( + type='SSD3DHead', + in_channels=256, + vote_module_cfg=dict( + in_channels=256, + num_points=256, + gt_per_seed=1, + conv_channels=(128, ), + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.1), + with_res_feat=False, + vote_xyz_range=(3.0, 3.0, 2.0)), + vote_aggregation_cfg=dict( + type='PointSAModuleMSG', + num_point=256, + radii=(4.8, 6.4), + sample_nums=(16, 32), + mlp_channels=((256, 256, 256, 512), (256, 256, 512, 1024)), + norm_cfg=dict(type='BN2d', eps=1e-3, momentum=0.1), + use_xyz=True, + normalize_xyz=False, + bias=True), + pred_layer_cfg=dict( + in_channels=1536, + shared_conv_channels=(512, 128), + cls_conv_channels=(128, ), + reg_conv_channels=(128, ), + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.1), + bias=True), + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.1), + objectness_loss=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + reduction='sum', + loss_weight=1.0), + center_loss=dict( + type='SmoothL1Loss', reduction='sum', loss_weight=1.0), + dir_class_loss=dict( + type='CrossEntropyLoss', reduction='sum', loss_weight=1.0), + dir_res_loss=dict( + type='SmoothL1Loss', reduction='sum', loss_weight=1.0), + size_res_loss=dict( + type='SmoothL1Loss', reduction='sum', loss_weight=1.0), + corner_loss=dict( + type='SmoothL1Loss', reduction='sum', loss_weight=1.0), + vote_loss=dict(type='SmoothL1Loss', reduction='sum', loss_weight=1.0)), + # model training and testing settings + train_cfg=dict( + sample_mod='spec', pos_distance_thr=10.0, expand_dims_length=0.05), + test_cfg=dict( + nms_cfg=dict(type='nms', iou_thr=0.1), + sample_mod='spec', + score_thr=0.0, + per_class_proposal=True, + max_output_num=100)) diff --git a/adzoo/bevformer/configs/_base_/models/cascade_mask_rcnn_r50_fpn.py b/adzoo/bevformer/configs/_base_/models/cascade_mask_rcnn_r50_fpn.py new file mode 100644 index 0000000..fb9e0a8 --- /dev/null +++ b/adzoo/bevformer/configs/_base_/models/cascade_mask_rcnn_r50_fpn.py @@ -0,0 +1,200 @@ +# model settings +model = dict( + type='CascadeRCNN', + pretrained='torchvision://resnet50', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), + roi_head=dict( + type='CascadeRoIHead', + num_stages=3, + stage_loss_weights=[1, 0.5, 0.25], + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=[ + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) + ], + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=dict( + type='FCNMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=80, + loss_mask=dict( + type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=[ + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.6, + neg_iou_thr=0.6, + min_pos_iou=0.6, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.7, + min_pos_iou=0.7, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False) + ]), + test_cfg=dict( + rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100, + mask_thr_binary=0.5))) diff --git a/adzoo/bevformer/configs/_base_/models/centerpoint_01voxel_second_secfpn_nus.py b/adzoo/bevformer/configs/_base_/models/centerpoint_01voxel_second_secfpn_nus.py new file mode 100644 index 0000000..efdce59 --- /dev/null +++ b/adzoo/bevformer/configs/_base_/models/centerpoint_01voxel_second_secfpn_nus.py @@ -0,0 +1,83 @@ +voxel_size = [0.1, 0.1, 0.2] +model = dict( + type='CenterPoint', + pts_voxel_layer=dict( + max_num_points=10, voxel_size=voxel_size, max_voxels=(90000, 120000)), + pts_voxel_encoder=dict(type='HardSimpleVFE', num_features=5), + pts_middle_encoder=dict( + type='SparseEncoder', + in_channels=5, + sparse_shape=[41, 1024, 1024], + output_channels=128, + order=('conv', 'norm', 'act'), + encoder_channels=((16, 16, 32), (32, 32, 64), (64, 64, 128), (128, + 128)), + encoder_paddings=((0, 0, 1), (0, 0, 1), (0, 0, [0, 1, 1]), (0, 0)), + block_type='basicblock'), + pts_backbone=dict( + type='SECOND', + in_channels=256, + out_channels=[128, 256], + layer_nums=[5, 5], + layer_strides=[1, 2], + norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01), + conv_cfg=dict(type='Conv2d', bias=False)), + pts_neck=dict( + type='SECONDFPN', + in_channels=[128, 256], + out_channels=[256, 256], + upsample_strides=[1, 2], + norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01), + upsample_cfg=dict(type='deconv', bias=False), + use_conv_for_no_stride=True), + pts_bbox_head=dict( + type='CenterHead', + in_channels=sum([256, 256]), + tasks=[ + dict(num_class=1, class_names=['car']), + dict(num_class=2, class_names=['truck', 'construction_vehicle']), + dict(num_class=2, class_names=['bus', 'trailer']), + dict(num_class=1, class_names=['barrier']), + dict(num_class=2, class_names=['motorcycle', 'bicycle']), + dict(num_class=2, class_names=['pedestrian', 'traffic_cone']), + ], + common_heads=dict( + reg=(2, 2), height=(1, 2), dim=(3, 2), rot=(2, 2), vel=(2, 2)), + share_conv_channel=64, + bbox_coder=dict( + type='CenterPointBBoxCoder', + post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], + max_num=500, + score_threshold=0.1, + out_size_factor=8, + voxel_size=voxel_size[:2], + code_size=9), + separate_head=dict( + type='SeparateHead', init_bias=-2.19, final_kernel=3), + loss_cls=dict(type='GaussianFocalLoss', reduction='mean'), + loss_bbox=dict(type='L1Loss', reduction='mean', loss_weight=0.25), + norm_bbox=True), + # model training and testing settings + train_cfg=dict( + pts=dict( + grid_size=[1024, 1024, 40], + voxel_size=voxel_size, + out_size_factor=8, + dense_reg=1, + gaussian_overlap=0.1, + max_objs=500, + min_radius=2, + code_weights=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2])), + test_cfg=dict( + pts=dict( + post_center_limit_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], + max_per_img=500, + max_pool_nms=False, + min_radius=[4, 12, 10, 1, 0.85, 0.175], + score_threshold=0.1, + out_size_factor=8, + voxel_size=voxel_size[:2], + nms_type='rotate', + pre_max_size=1000, + post_max_size=83, + nms_thr=0.2))) diff --git a/adzoo/bevformer/configs/_base_/models/centerpoint_02pillar_second_secfpn_nus.py b/adzoo/bevformer/configs/_base_/models/centerpoint_02pillar_second_secfpn_nus.py new file mode 100644 index 0000000..311d763 --- /dev/null +++ b/adzoo/bevformer/configs/_base_/models/centerpoint_02pillar_second_secfpn_nus.py @@ -0,0 +1,83 @@ +voxel_size = [0.2, 0.2, 8] +model = dict( + type='CenterPoint', + pts_voxel_layer=dict( + max_num_points=20, voxel_size=voxel_size, max_voxels=(30000, 40000)), + pts_voxel_encoder=dict( + type='PillarFeatureNet', + in_channels=5, + feat_channels=[64], + with_distance=False, + voxel_size=(0.2, 0.2, 8), + norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01), + legacy=False), + pts_middle_encoder=dict( + type='PointPillarsScatter', in_channels=64, output_shape=(512, 512)), + pts_backbone=dict( + type='SECOND', + in_channels=64, + out_channels=[64, 128, 256], + layer_nums=[3, 5, 5], + layer_strides=[2, 2, 2], + norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01), + conv_cfg=dict(type='Conv2d', bias=False)), + pts_neck=dict( + type='SECONDFPN', + in_channels=[64, 128, 256], + out_channels=[128, 128, 128], + upsample_strides=[0.5, 1, 2], + norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01), + upsample_cfg=dict(type='deconv', bias=False), + use_conv_for_no_stride=True), + pts_bbox_head=dict( + type='CenterHead', + in_channels=sum([128, 128, 128]), + tasks=[ + dict(num_class=1, class_names=['car']), + dict(num_class=2, class_names=['truck', 'construction_vehicle']), + dict(num_class=2, class_names=['bus', 'trailer']), + dict(num_class=1, class_names=['barrier']), + dict(num_class=2, class_names=['motorcycle', 'bicycle']), + dict(num_class=2, class_names=['pedestrian', 'traffic_cone']), + ], + common_heads=dict( + reg=(2, 2), height=(1, 2), dim=(3, 2), rot=(2, 2), vel=(2, 2)), + share_conv_channel=64, + bbox_coder=dict( + type='CenterPointBBoxCoder', + post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], + max_num=500, + score_threshold=0.1, + out_size_factor=4, + voxel_size=voxel_size[:2], + code_size=9), + separate_head=dict( + type='SeparateHead', init_bias=-2.19, final_kernel=3), + loss_cls=dict(type='GaussianFocalLoss', reduction='mean'), + loss_bbox=dict(type='L1Loss', reduction='mean', loss_weight=0.25), + norm_bbox=True), + # model training and testing settings + train_cfg=dict( + pts=dict( + grid_size=[512, 512, 1], + voxel_size=voxel_size, + out_size_factor=4, + dense_reg=1, + gaussian_overlap=0.1, + max_objs=500, + min_radius=2, + code_weights=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2])), + test_cfg=dict( + pts=dict( + post_center_limit_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], + max_per_img=500, + max_pool_nms=False, + min_radius=[4, 12, 10, 1, 0.85, 0.175], + score_threshold=0.1, + pc_range=[-51.2, -51.2], + out_size_factor=4, + voxel_size=voxel_size[:2], + nms_type='rotate', + pre_max_size=1000, + post_max_size=83, + nms_thr=0.2))) diff --git a/adzoo/bevformer/configs/_base_/models/fcos3d.py b/adzoo/bevformer/configs/_base_/models/fcos3d.py new file mode 100644 index 0000000..92ea907 --- /dev/null +++ b/adzoo/bevformer/configs/_base_/models/fcos3d.py @@ -0,0 +1,74 @@ +model = dict( + type='FCOSMono3D', + pretrained='open-mmlab://detectron2/resnet101_caffe', + backbone=dict( + type='ResNet', + depth=101, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + style='caffe'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_output', + num_outs=5, + relu_before_extra_convs=True), + bbox_head=dict( + type='FCOSMono3DHead', + num_classes=10, + in_channels=256, + stacked_convs=2, + feat_channels=256, + use_direction_classifier=True, + diff_rad_by_sin=True, + pred_attrs=True, + pred_velo=True, + dir_offset=0.7854, # pi/4 + strides=[8, 16, 32, 64, 128], + group_reg_dims=(2, 1, 3, 1, 2), # offset, depth, size, rot, velo + cls_branch=(256, ), + reg_branch=( + (256, ), # offset + (256, ), # depth + (256, ), # size + (256, ), # rot + () # velo + ), + dir_branch=(256, ), + attr_branch=(256, ), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0), + loss_dir=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_attr=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_centerness=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + norm_on_bbox=True, + centerness_on_reg=True, + center_sampling=True, + conv_bias=True, + dcn_on_last_conv=True), + train_cfg=dict( + allowed_border=0, + code_weight=[1.0, 1.0, 0.2, 1.0, 1.0, 1.0, 1.0, 0.05, 0.05], + pos_weight=-1, + debug=False), + test_cfg=dict( + use_rotate_nms=True, + nms_across_levels=False, + nms_pre=1000, + nms_thr=0.8, + score_thr=0.05, + min_bbox_size=0, + max_per_img=200)) diff --git a/adzoo/bevformer/configs/_base_/models/groupfree3d.py b/adzoo/bevformer/configs/_base_/models/groupfree3d.py new file mode 100644 index 0000000..077d049 --- /dev/null +++ b/adzoo/bevformer/configs/_base_/models/groupfree3d.py @@ -0,0 +1,71 @@ +model = dict( + type='GroupFree3DNet', + backbone=dict( + type='PointNet2SASSG', + in_channels=3, + num_points=(2048, 1024, 512, 256), + radius=(0.2, 0.4, 0.8, 1.2), + num_samples=(64, 32, 16, 16), + sa_channels=((64, 64, 128), (128, 128, 256), (128, 128, 256), + (128, 128, 256)), + fp_channels=((256, 256), (256, 288)), + norm_cfg=dict(type='BN2d'), + sa_cfg=dict( + type='PointSAModule', + pool_mod='max', + use_xyz=True, + normalize_xyz=True)), + bbox_head=dict( + type='GroupFree3DHead', + in_channels=288, + num_decoder_layers=6, + num_proposal=256, + transformerlayers=dict( + type='BaseTransformerLayer', + attn_cfgs=dict( + type='GroupFree3DMHA', + embed_dims=288, + num_heads=8, + attn_drop=0.1, + dropout_layer=dict(type='Dropout', drop_prob=0.1)), + ffn_cfgs=dict( + embed_dims=288, + feedforward_channels=2048, + ffn_drop=0.1, + act_cfg=dict(type='ReLU', inplace=True)), + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', 'ffn', + 'norm')), + pred_layer_cfg=dict( + in_channels=288, shared_conv_channels=(288, 288), bias=True), + sampling_objectness_loss=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=8.0), + objectness_loss=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + center_loss=dict( + type='SmoothL1Loss', reduction='sum', loss_weight=10.0), + dir_class_loss=dict( + type='CrossEntropyLoss', reduction='sum', loss_weight=1.0), + dir_res_loss=dict( + type='SmoothL1Loss', reduction='sum', loss_weight=10.0), + size_class_loss=dict( + type='CrossEntropyLoss', reduction='sum', loss_weight=1.0), + size_res_loss=dict( + type='SmoothL1Loss', beta=1.0, reduction='sum', loss_weight=10.0), + semantic_loss=dict( + type='CrossEntropyLoss', reduction='sum', loss_weight=1.0)), + # model training and testing settings + train_cfg=dict(sample_mod='kps'), + test_cfg=dict( + sample_mod='kps', + nms_thr=0.25, + score_thr=0.0, + per_class_proposal=True, + prediction_stages='last')) diff --git a/adzoo/bevformer/configs/_base_/models/h3dnet.py b/adzoo/bevformer/configs/_base_/models/h3dnet.py new file mode 100644 index 0000000..7605667 --- /dev/null +++ b/adzoo/bevformer/configs/_base_/models/h3dnet.py @@ -0,0 +1,341 @@ +primitive_z_cfg = dict( + type='PrimitiveHead', + num_dims=2, + num_classes=18, + primitive_mode='z', + upper_thresh=100.0, + surface_thresh=0.5, + vote_module_cfg=dict( + in_channels=256, + vote_per_seed=1, + gt_per_seed=1, + conv_channels=(256, 256), + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + norm_feats=True, + vote_loss=dict( + type='ChamferDistance', + mode='l1', + reduction='none', + loss_dst_weight=10.0)), + vote_aggregation_cfg=dict( + type='PointSAModule', + num_point=1024, + radius=0.3, + num_sample=16, + mlp_channels=[256, 128, 128, 128], + use_xyz=True, + normalize_xyz=True), + feat_channels=(128, 128), + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + objectness_loss=dict( + type='CrossEntropyLoss', + class_weight=[0.4, 0.6], + reduction='mean', + loss_weight=30.0), + center_loss=dict( + type='ChamferDistance', + mode='l1', + reduction='sum', + loss_src_weight=0.5, + loss_dst_weight=0.5), + semantic_reg_loss=dict( + type='ChamferDistance', + mode='l1', + reduction='sum', + loss_src_weight=0.5, + loss_dst_weight=0.5), + semantic_cls_loss=dict( + type='CrossEntropyLoss', reduction='sum', loss_weight=1.0), + train_cfg=dict( + dist_thresh=0.2, + var_thresh=1e-2, + lower_thresh=1e-6, + num_point=100, + num_point_line=10, + line_thresh=0.2)) + +primitive_xy_cfg = dict( + type='PrimitiveHead', + num_dims=1, + num_classes=18, + primitive_mode='xy', + upper_thresh=100.0, + surface_thresh=0.5, + vote_module_cfg=dict( + in_channels=256, + vote_per_seed=1, + gt_per_seed=1, + conv_channels=(256, 256), + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + norm_feats=True, + vote_loss=dict( + type='ChamferDistance', + mode='l1', + reduction='none', + loss_dst_weight=10.0)), + vote_aggregation_cfg=dict( + type='PointSAModule', + num_point=1024, + radius=0.3, + num_sample=16, + mlp_channels=[256, 128, 128, 128], + use_xyz=True, + normalize_xyz=True), + feat_channels=(128, 128), + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + objectness_loss=dict( + type='CrossEntropyLoss', + class_weight=[0.4, 0.6], + reduction='mean', + loss_weight=30.0), + center_loss=dict( + type='ChamferDistance', + mode='l1', + reduction='sum', + loss_src_weight=0.5, + loss_dst_weight=0.5), + semantic_reg_loss=dict( + type='ChamferDistance', + mode='l1', + reduction='sum', + loss_src_weight=0.5, + loss_dst_weight=0.5), + semantic_cls_loss=dict( + type='CrossEntropyLoss', reduction='sum', loss_weight=1.0), + train_cfg=dict( + dist_thresh=0.2, + var_thresh=1e-2, + lower_thresh=1e-6, + num_point=100, + num_point_line=10, + line_thresh=0.2)) + +primitive_line_cfg = dict( + type='PrimitiveHead', + num_dims=0, + num_classes=18, + primitive_mode='line', + upper_thresh=100.0, + surface_thresh=0.5, + vote_module_cfg=dict( + in_channels=256, + vote_per_seed=1, + gt_per_seed=1, + conv_channels=(256, 256), + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + norm_feats=True, + vote_loss=dict( + type='ChamferDistance', + mode='l1', + reduction='none', + loss_dst_weight=10.0)), + vote_aggregation_cfg=dict( + type='PointSAModule', + num_point=1024, + radius=0.3, + num_sample=16, + mlp_channels=[256, 128, 128, 128], + use_xyz=True, + normalize_xyz=True), + feat_channels=(128, 128), + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + objectness_loss=dict( + type='CrossEntropyLoss', + class_weight=[0.4, 0.6], + reduction='mean', + loss_weight=30.0), + center_loss=dict( + type='ChamferDistance', + mode='l1', + reduction='sum', + loss_src_weight=1.0, + loss_dst_weight=1.0), + semantic_reg_loss=dict( + type='ChamferDistance', + mode='l1', + reduction='sum', + loss_src_weight=1.0, + loss_dst_weight=1.0), + semantic_cls_loss=dict( + type='CrossEntropyLoss', reduction='sum', loss_weight=2.0), + train_cfg=dict( + dist_thresh=0.2, + var_thresh=1e-2, + lower_thresh=1e-6, + num_point=100, + num_point_line=10, + line_thresh=0.2)) + +model = dict( + type='H3DNet', + backbone=dict( + type='MultiBackbone', + num_streams=4, + suffixes=['net0', 'net1', 'net2', 'net3'], + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d', eps=1e-5, momentum=0.01), + act_cfg=dict(type='ReLU'), + backbones=dict( + type='PointNet2SASSG', + in_channels=4, + num_points=(2048, 1024, 512, 256), + radius=(0.2, 0.4, 0.8, 1.2), + num_samples=(64, 32, 16, 16), + sa_channels=((64, 64, 128), (128, 128, 256), (128, 128, 256), + (128, 128, 256)), + fp_channels=((256, 256), (256, 256)), + norm_cfg=dict(type='BN2d'), + sa_cfg=dict( + type='PointSAModule', + pool_mod='max', + use_xyz=True, + normalize_xyz=True))), + rpn_head=dict( + type='VoteHead', + vote_module_cfg=dict( + in_channels=256, + vote_per_seed=1, + gt_per_seed=3, + conv_channels=(256, 256), + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + norm_feats=True, + vote_loss=dict( + type='ChamferDistance', + mode='l1', + reduction='none', + loss_dst_weight=10.0)), + vote_aggregation_cfg=dict( + type='PointSAModule', + num_point=256, + radius=0.3, + num_sample=16, + mlp_channels=[256, 128, 128, 128], + use_xyz=True, + normalize_xyz=True), + pred_layer_cfg=dict( + in_channels=128, shared_conv_channels=(128, 128), bias=True), + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + objectness_loss=dict( + type='CrossEntropyLoss', + class_weight=[0.2, 0.8], + reduction='sum', + loss_weight=5.0), + center_loss=dict( + type='ChamferDistance', + mode='l2', + reduction='sum', + loss_src_weight=10.0, + loss_dst_weight=10.0), + dir_class_loss=dict( + type='CrossEntropyLoss', reduction='sum', loss_weight=1.0), + dir_res_loss=dict( + type='SmoothL1Loss', reduction='sum', loss_weight=10.0), + size_class_loss=dict( + type='CrossEntropyLoss', reduction='sum', loss_weight=1.0), + size_res_loss=dict( + type='SmoothL1Loss', reduction='sum', loss_weight=10.0), + semantic_loss=dict( + type='CrossEntropyLoss', reduction='sum', loss_weight=1.0)), + roi_head=dict( + type='H3DRoIHead', + primitive_list=[primitive_z_cfg, primitive_xy_cfg, primitive_line_cfg], + bbox_head=dict( + type='H3DBboxHead', + gt_per_seed=3, + num_proposal=256, + suface_matching_cfg=dict( + type='PointSAModule', + num_point=256 * 6, + radius=0.5, + num_sample=32, + mlp_channels=[128 + 6, 128, 64, 32], + use_xyz=True, + normalize_xyz=True), + line_matching_cfg=dict( + type='PointSAModule', + num_point=256 * 12, + radius=0.5, + num_sample=32, + mlp_channels=[128 + 12, 128, 64, 32], + use_xyz=True, + normalize_xyz=True), + feat_channels=(128, 128), + primitive_refine_channels=[128, 128, 128], + upper_thresh=100.0, + surface_thresh=0.5, + line_thresh=0.5, + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + objectness_loss=dict( + type='CrossEntropyLoss', + class_weight=[0.2, 0.8], + reduction='sum', + loss_weight=5.0), + center_loss=dict( + type='ChamferDistance', + mode='l2', + reduction='sum', + loss_src_weight=10.0, + loss_dst_weight=10.0), + dir_class_loss=dict( + type='CrossEntropyLoss', reduction='sum', loss_weight=0.1), + dir_res_loss=dict( + type='SmoothL1Loss', reduction='sum', loss_weight=10.0), + size_class_loss=dict( + type='CrossEntropyLoss', reduction='sum', loss_weight=0.1), + size_res_loss=dict( + type='SmoothL1Loss', reduction='sum', loss_weight=10.0), + semantic_loss=dict( + type='CrossEntropyLoss', reduction='sum', loss_weight=0.1), + cues_objectness_loss=dict( + type='CrossEntropyLoss', + class_weight=[0.3, 0.7], + reduction='mean', + loss_weight=5.0), + cues_semantic_loss=dict( + type='CrossEntropyLoss', + class_weight=[0.3, 0.7], + reduction='mean', + loss_weight=5.0), + proposal_objectness_loss=dict( + type='CrossEntropyLoss', + class_weight=[0.2, 0.8], + reduction='none', + loss_weight=5.0), + primitive_center_loss=dict( + type='MSELoss', reduction='none', loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + pos_distance_thr=0.3, neg_distance_thr=0.6, sample_mod='vote'), + rpn_proposal=dict(use_nms=False), + rcnn=dict( + pos_distance_thr=0.3, + neg_distance_thr=0.6, + sample_mod='vote', + far_threshold=0.6, + near_threshold=0.3, + mask_surface_threshold=0.3, + label_surface_threshold=0.3, + mask_line_threshold=0.3, + label_line_threshold=0.3)), + test_cfg=dict( + rpn=dict( + sample_mod='seed', + nms_thr=0.25, + score_thr=0.05, + per_class_proposal=True, + use_nms=False), + rcnn=dict( + sample_mod='seed', + nms_thr=0.25, + score_thr=0.05, + per_class_proposal=True))) diff --git a/adzoo/bevformer/configs/_base_/models/hv_pointpillars_fpn_lyft.py b/adzoo/bevformer/configs/_base_/models/hv_pointpillars_fpn_lyft.py new file mode 100644 index 0000000..87c7fe0 --- /dev/null +++ b/adzoo/bevformer/configs/_base_/models/hv_pointpillars_fpn_lyft.py @@ -0,0 +1,22 @@ +_base_ = './hv_pointpillars_fpn_nus.py' + +# model settings (based on nuScenes model settings) +# Voxel size for voxel encoder +# Usually voxel size is changed consistently with the point cloud range +# If point cloud range is modified, do remember to change all related +# keys in the config. +model = dict( + pts_voxel_layer=dict( + max_num_points=20, + point_cloud_range=[-80, -80, -5, 80, 80, 3], + max_voxels=(60000, 60000)), + pts_voxel_encoder=dict( + feat_channels=[64], point_cloud_range=[-80, -80, -5, 80, 80, 3]), + pts_middle_encoder=dict(output_shape=[640, 640]), + pts_bbox_head=dict( + num_classes=9, + anchor_generator=dict( + ranges=[[-80, -80, -1.8, 80, 80, -1.8]], custom_values=[]), + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder', code_size=7)), + # model training settings (based on nuScenes model settings) + train_cfg=dict(pts=dict(code_weight=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]))) diff --git a/adzoo/bevformer/configs/_base_/models/hv_pointpillars_fpn_nus.py b/adzoo/bevformer/configs/_base_/models/hv_pointpillars_fpn_nus.py new file mode 100644 index 0000000..e153f6c --- /dev/null +++ b/adzoo/bevformer/configs/_base_/models/hv_pointpillars_fpn_nus.py @@ -0,0 +1,96 @@ +# model settings +# Voxel size for voxel encoder +# Usually voxel size is changed consistently with the point cloud range +# If point cloud range is modified, do remember to change all related +# keys in the config. +voxel_size = [0.25, 0.25, 8] +model = dict( + type='MVXFasterRCNN', + pts_voxel_layer=dict( + max_num_points=64, + point_cloud_range=[-50, -50, -5, 50, 50, 3], + voxel_size=voxel_size, + max_voxels=(30000, 40000)), + pts_voxel_encoder=dict( + type='HardVFE', + in_channels=4, + feat_channels=[64, 64], + with_distance=False, + voxel_size=voxel_size, + with_cluster_center=True, + with_voxel_center=True, + point_cloud_range=[-50, -50, -5, 50, 50, 3], + norm_cfg=dict(type='naiveSyncBN1d', eps=1e-3, momentum=0.01)), + pts_middle_encoder=dict( + type='PointPillarsScatter', in_channels=64, output_shape=[400, 400]), + pts_backbone=dict( + type='SECOND', + in_channels=64, + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), + layer_nums=[3, 5, 5], + layer_strides=[2, 2, 2], + out_channels=[64, 128, 256]), + pts_neck=dict( + type='FPN', + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), + act_cfg=dict(type='ReLU'), + in_channels=[64, 128, 256], + out_channels=256, + start_level=0, + num_outs=3), + pts_bbox_head=dict( + type='Anchor3DHead', + num_classes=10, + in_channels=256, + feat_channels=256, + use_direction_classifier=True, + anchor_generator=dict( + type='AlignedAnchor3DRangeGenerator', + ranges=[[-50, -50, -1.8, 50, 50, -1.8]], + scales=[1, 2, 4], + sizes=[ + [0.8660, 2.5981, 1.], # 1.5/sqrt(3) + [0.5774, 1.7321, 1.], # 1/sqrt(3) + [1., 1., 1.], + [0.4, 0.4, 1], + ], + custom_values=[0, 0], + rotations=[0, 1.57], + reshape_out=True), + assigner_per_size=False, + diff_rad_by_sin=True, + dir_offset=0.7854, # pi/4 + dir_limit_offset=0, + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder', code_size=9), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0), + loss_dir=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.2)), + # model training and testing settings + train_cfg=dict( + pts=dict( + assigner=dict( + type='MaxIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.6, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + allowed_border=0, + code_weight=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2], + pos_weight=-1, + debug=False)), + test_cfg=dict( + pts=dict( + use_rotate_nms=True, + nms_across_levels=False, + nms_pre=1000, + nms_thr=0.2, + score_thr=0.05, + min_bbox_size=0, + max_num=500))) diff --git a/adzoo/bevformer/configs/_base_/models/hv_pointpillars_fpn_range100_lyft.py b/adzoo/bevformer/configs/_base_/models/hv_pointpillars_fpn_range100_lyft.py new file mode 100644 index 0000000..9cd200f --- /dev/null +++ b/adzoo/bevformer/configs/_base_/models/hv_pointpillars_fpn_range100_lyft.py @@ -0,0 +1,22 @@ +_base_ = './hv_pointpillars_fpn_nus.py' + +# model settings (based on nuScenes model settings) +# Voxel size for voxel encoder +# Usually voxel size is changed consistently with the point cloud range +# If point cloud range is modified, do remember to change all related +# keys in the config. +model = dict( + pts_voxel_layer=dict( + max_num_points=20, + point_cloud_range=[-100, -100, -5, 100, 100, 3], + max_voxels=(60000, 60000)), + pts_voxel_encoder=dict( + feat_channels=[64], point_cloud_range=[-100, -100, -5, 100, 100, 3]), + pts_middle_encoder=dict(output_shape=[800, 800]), + pts_bbox_head=dict( + num_classes=9, + anchor_generator=dict( + ranges=[[-100, -100, -1.8, 100, 100, -1.8]], custom_values=[]), + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder', code_size=7)), + # model training settings (based on nuScenes model settings) + train_cfg=dict(pts=dict(code_weight=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]))) diff --git a/adzoo/bevformer/configs/_base_/models/hv_pointpillars_secfpn_kitti.py b/adzoo/bevformer/configs/_base_/models/hv_pointpillars_secfpn_kitti.py new file mode 100644 index 0000000..85076d0 --- /dev/null +++ b/adzoo/bevformer/configs/_base_/models/hv_pointpillars_secfpn_kitti.py @@ -0,0 +1,93 @@ +voxel_size = [0.16, 0.16, 4] + +model = dict( + type='VoxelNet', + voxel_layer=dict( + max_num_points=32, # max_points_per_voxel + point_cloud_range=[0, -39.68, -3, 69.12, 39.68, 1], + voxel_size=voxel_size, + max_voxels=(16000, 40000) # (training, testing) max_voxels + ), + voxel_encoder=dict( + type='PillarFeatureNet', + in_channels=4, + feat_channels=[64], + with_distance=False, + voxel_size=voxel_size, + point_cloud_range=[0, -39.68, -3, 69.12, 39.68, 1]), + middle_encoder=dict( + type='PointPillarsScatter', in_channels=64, output_shape=[496, 432]), + backbone=dict( + type='SECOND', + in_channels=64, + layer_nums=[3, 5, 5], + layer_strides=[2, 2, 2], + out_channels=[64, 128, 256]), + neck=dict( + type='SECONDFPN', + in_channels=[64, 128, 256], + upsample_strides=[1, 2, 4], + out_channels=[128, 128, 128]), + bbox_head=dict( + type='Anchor3DHead', + num_classes=3, + in_channels=384, + feat_channels=384, + use_direction_classifier=True, + anchor_generator=dict( + type='Anchor3DRangeGenerator', + ranges=[ + [0, -39.68, -0.6, 70.4, 39.68, -0.6], + [0, -39.68, -0.6, 70.4, 39.68, -0.6], + [0, -39.68, -1.78, 70.4, 39.68, -1.78], + ], + sizes=[[0.6, 0.8, 1.73], [0.6, 1.76, 1.73], [1.6, 3.9, 1.56]], + rotations=[0, 1.57], + reshape_out=False), + diff_rad_by_sin=True, + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=2.0), + loss_dir=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.2)), + # model training and testing settings + train_cfg=dict( + assigner=[ + dict( # for Pedestrian + type='MaxIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.5, + neg_iou_thr=0.35, + min_pos_iou=0.35, + ignore_iof_thr=-1), + dict( # for Cyclist + type='MaxIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.5, + neg_iou_thr=0.35, + min_pos_iou=0.35, + ignore_iof_thr=-1), + dict( # for Car + type='MaxIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.6, + neg_iou_thr=0.45, + min_pos_iou=0.45, + ignore_iof_thr=-1), + ], + allowed_border=0, + pos_weight=-1, + debug=False), + test_cfg=dict( + use_rotate_nms=True, + nms_across_levels=False, + nms_thr=0.01, + score_thr=0.1, + min_bbox_size=0, + nms_pre=100, + max_num=50)) diff --git a/adzoo/bevformer/configs/_base_/models/hv_pointpillars_secfpn_waymo.py b/adzoo/bevformer/configs/_base_/models/hv_pointpillars_secfpn_waymo.py new file mode 100644 index 0000000..14873ea --- /dev/null +++ b/adzoo/bevformer/configs/_base_/models/hv_pointpillars_secfpn_waymo.py @@ -0,0 +1,108 @@ +# model settings +# Voxel size for voxel encoder +# Usually voxel size is changed consistently with the point cloud range +# If point cloud range is modified, do remember to change all related +# keys in the config. +voxel_size = [0.32, 0.32, 6] +model = dict( + type='MVXFasterRCNN', + pts_voxel_layer=dict( + max_num_points=20, + point_cloud_range=[-74.88, -74.88, -2, 74.88, 74.88, 4], + voxel_size=voxel_size, + max_voxels=(32000, 32000)), + pts_voxel_encoder=dict( + type='HardVFE', + in_channels=5, + feat_channels=[64], + with_distance=False, + voxel_size=voxel_size, + with_cluster_center=True, + with_voxel_center=True, + point_cloud_range=[-74.88, -74.88, -2, 74.88, 74.88, 4], + norm_cfg=dict(type='naiveSyncBN1d', eps=1e-3, momentum=0.01)), + pts_middle_encoder=dict( + type='PointPillarsScatter', in_channels=64, output_shape=[468, 468]), + pts_backbone=dict( + type='SECOND', + in_channels=64, + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), + layer_nums=[3, 5, 5], + layer_strides=[1, 2, 2], + out_channels=[64, 128, 256]), + pts_neck=dict( + type='SECONDFPN', + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), + in_channels=[64, 128, 256], + upsample_strides=[1, 2, 4], + out_channels=[128, 128, 128]), + pts_bbox_head=dict( + type='Anchor3DHead', + num_classes=3, + in_channels=384, + feat_channels=384, + use_direction_classifier=True, + anchor_generator=dict( + type='AlignedAnchor3DRangeGenerator', + ranges=[[-74.88, -74.88, -0.0345, 74.88, 74.88, -0.0345], + [-74.88, -74.88, -0.1188, 74.88, 74.88, -0.1188], + [-74.88, -74.88, 0, 74.88, 74.88, 0]], + sizes=[ + [2.08, 4.73, 1.77], # car + [0.84, 1.81, 1.77], # cyclist + [0.84, 0.91, 1.74] # pedestrian + ], + rotations=[0, 1.57], + reshape_out=False), + diff_rad_by_sin=True, + dir_offset=0.7854, # pi/4 + dir_limit_offset=0, + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder', code_size=7), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0), + loss_dir=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.2)), + # model training and testing settings + train_cfg=dict( + pts=dict( + assigner=[ + dict( # car + type='MaxIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.55, + neg_iou_thr=0.4, + min_pos_iou=0.4, + ignore_iof_thr=-1), + dict( # cyclist + type='MaxIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.5, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + dict( # pedestrian + type='MaxIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.5, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + ], + allowed_border=0, + code_weight=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], + pos_weight=-1, + debug=False)), + test_cfg=dict( + pts=dict( + use_rotate_nms=True, + nms_across_levels=False, + nms_pre=4096, + nms_thr=0.25, + score_thr=0.1, + min_bbox_size=0, + max_num=500))) diff --git a/adzoo/bevformer/configs/_base_/models/hv_second_secfpn_kitti.py b/adzoo/bevformer/configs/_base_/models/hv_second_secfpn_kitti.py new file mode 100644 index 0000000..6bf18ab --- /dev/null +++ b/adzoo/bevformer/configs/_base_/models/hv_second_secfpn_kitti.py @@ -0,0 +1,89 @@ +voxel_size = [0.05, 0.05, 0.1] + +model = dict( + type='VoxelNet', + voxel_layer=dict( + max_num_points=5, + point_cloud_range=[0, -40, -3, 70.4, 40, 1], + voxel_size=voxel_size, + max_voxels=(16000, 40000)), + voxel_encoder=dict(type='HardSimpleVFE'), + middle_encoder=dict( + type='SparseEncoder', + in_channels=4, + sparse_shape=[41, 1600, 1408], + order=('conv', 'norm', 'act')), + backbone=dict( + type='SECOND', + in_channels=256, + layer_nums=[5, 5], + layer_strides=[1, 2], + out_channels=[128, 256]), + neck=dict( + type='SECONDFPN', + in_channels=[128, 256], + upsample_strides=[1, 2], + out_channels=[256, 256]), + bbox_head=dict( + type='Anchor3DHead', + num_classes=3, + in_channels=512, + feat_channels=512, + use_direction_classifier=True, + anchor_generator=dict( + type='Anchor3DRangeGenerator', + ranges=[ + [0, -40.0, -0.6, 70.4, 40.0, -0.6], + [0, -40.0, -0.6, 70.4, 40.0, -0.6], + [0, -40.0, -1.78, 70.4, 40.0, -1.78], + ], + sizes=[[0.6, 0.8, 1.73], [0.6, 1.76, 1.73], [1.6, 3.9, 1.56]], + rotations=[0, 1.57], + reshape_out=False), + diff_rad_by_sin=True, + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=2.0), + loss_dir=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.2)), + # model training and testing settings + train_cfg=dict( + assigner=[ + dict( # for Pedestrian + type='MaxIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.35, + neg_iou_thr=0.2, + min_pos_iou=0.2, + ignore_iof_thr=-1), + dict( # for Cyclist + type='MaxIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.35, + neg_iou_thr=0.2, + min_pos_iou=0.2, + ignore_iof_thr=-1), + dict( # for Car + type='MaxIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.6, + neg_iou_thr=0.45, + min_pos_iou=0.45, + ignore_iof_thr=-1), + ], + allowed_border=0, + pos_weight=-1, + debug=False), + test_cfg=dict( + use_rotate_nms=True, + nms_across_levels=False, + nms_thr=0.01, + score_thr=0.1, + min_bbox_size=0, + nms_pre=100, + max_num=50)) diff --git a/adzoo/bevformer/configs/_base_/models/hv_second_secfpn_waymo.py b/adzoo/bevformer/configs/_base_/models/hv_second_secfpn_waymo.py new file mode 100644 index 0000000..eb9bd3a --- /dev/null +++ b/adzoo/bevformer/configs/_base_/models/hv_second_secfpn_waymo.py @@ -0,0 +1,100 @@ +# model settings +# Voxel size for voxel encoder +# Usually voxel size is changed consistently with the point cloud range +# If point cloud range is modified, do remember to change all related +# keys in the config. +voxel_size = [0.08, 0.08, 0.1] +model = dict( + type='VoxelNet', + voxel_layer=dict( + max_num_points=10, + point_cloud_range=[-76.8, -51.2, -2, 76.8, 51.2, 4], + voxel_size=voxel_size, + max_voxels=(80000, 90000)), + voxel_encoder=dict(type='HardSimpleVFE', num_features=5), + middle_encoder=dict( + type='SparseEncoder', + in_channels=5, + sparse_shape=[61, 1280, 1920], + order=('conv', 'norm', 'act')), + backbone=dict( + type='SECOND', + in_channels=384, + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), + layer_nums=[5, 5], + layer_strides=[1, 2], + out_channels=[128, 256]), + neck=dict( + type='SECONDFPN', + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), + in_channels=[128, 256], + upsample_strides=[1, 2], + out_channels=[256, 256]), + bbox_head=dict( + type='Anchor3DHead', + num_classes=3, + in_channels=512, + feat_channels=512, + use_direction_classifier=True, + anchor_generator=dict( + type='AlignedAnchor3DRangeGenerator', + ranges=[[-76.8, -51.2, -0.0345, 76.8, 51.2, -0.0345], + [-76.8, -51.2, 0, 76.8, 51.2, 0], + [-76.8, -51.2, -0.1188, 76.8, 51.2, -0.1188]], + sizes=[ + [2.08, 4.73, 1.77], # car + [0.84, 0.91, 1.74], # pedestrian + [0.84, 1.81, 1.77] # cyclist + ], + rotations=[0, 1.57], + reshape_out=False), + diff_rad_by_sin=True, + dir_offset=0.7854, # pi/4 + dir_limit_offset=0, + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder', code_size=7), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0), + loss_dir=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.2)), + # model training and testing settings + train_cfg=dict( + assigner=[ + dict( # car + type='MaxIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.55, + neg_iou_thr=0.4, + min_pos_iou=0.4, + ignore_iof_thr=-1), + dict( # pedestrian + type='MaxIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.5, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + dict( # cyclist + type='MaxIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.5, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1) + ], + allowed_border=0, + code_weight=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], + pos_weight=-1, + debug=False), + test_cfg=dict( + use_rotate_nms=True, + nms_across_levels=False, + nms_pre=4096, + nms_thr=0.25, + score_thr=0.1, + min_bbox_size=0, + max_num=500)) diff --git a/adzoo/bevformer/configs/_base_/models/imvotenet_image.py b/adzoo/bevformer/configs/_base_/models/imvotenet_image.py new file mode 100644 index 0000000..981f8bc --- /dev/null +++ b/adzoo/bevformer/configs/_base_/models/imvotenet_image.py @@ -0,0 +1,108 @@ +model = dict( + type='ImVoteNet', + img_backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + style='caffe'), + img_neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + img_rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + img_roi_head=dict( + type='StandardRoIHead', + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=10, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0))), + + # model training and testing settings + train_cfg=dict( + img_rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=-1, + pos_weight=-1, + debug=False), + img_rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + img_rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False)), + test_cfg=dict( + img_rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + img_rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100))) diff --git a/adzoo/bevformer/configs/_base_/models/mask_rcnn_r50_fpn.py b/adzoo/bevformer/configs/_base_/models/mask_rcnn_r50_fpn.py new file mode 100644 index 0000000..c5d5e32 --- /dev/null +++ b/adzoo/bevformer/configs/_base_/models/mask_rcnn_r50_fpn.py @@ -0,0 +1,124 @@ +# model settings +model = dict( + type='MaskRCNN', + pretrained='torchvision://resnet50', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + roi_head=dict( + type='StandardRoIHead', + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=dict( + type='FCNMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=80, + loss_mask=dict( + type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=-1, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False)), + test_cfg=dict( + rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100, + mask_thr_binary=0.5))) diff --git a/adzoo/bevformer/configs/_base_/models/paconv_cuda_ssg.py b/adzoo/bevformer/configs/_base_/models/paconv_cuda_ssg.py new file mode 100644 index 0000000..f513bd4 --- /dev/null +++ b/adzoo/bevformer/configs/_base_/models/paconv_cuda_ssg.py @@ -0,0 +1,7 @@ +_base_ = './paconv_ssg.py' + +model = dict( + backbone=dict( + sa_cfg=dict( + type='PAConvCUDASAModule', + scorenet_cfg=dict(mlp_channels=[8, 16, 16])))) diff --git a/adzoo/bevformer/configs/_base_/models/paconv_ssg.py b/adzoo/bevformer/configs/_base_/models/paconv_ssg.py new file mode 100644 index 0000000..1d4f1ed --- /dev/null +++ b/adzoo/bevformer/configs/_base_/models/paconv_ssg.py @@ -0,0 +1,49 @@ +# model settings +model = dict( + type='EncoderDecoder3D', + backbone=dict( + type='PointNet2SASSG', + in_channels=9, # [xyz, rgb, normalized_xyz] + num_points=(1024, 256, 64, 16), + radius=(None, None, None, None), # use kNN instead of ball query + num_samples=(32, 32, 32, 32), + sa_channels=((32, 32, 64), (64, 64, 128), (128, 128, 256), (256, 256, + 512)), + fp_channels=(), + norm_cfg=dict(type='BN2d', momentum=0.1), + sa_cfg=dict( + type='PAConvSAModule', + pool_mod='max', + use_xyz=True, + normalize_xyz=False, + paconv_num_kernels=[16, 16, 16], + paconv_kernel_input='w_neighbor', + scorenet_input='w_neighbor_dist', + scorenet_cfg=dict( + mlp_channels=[16, 16, 16], + score_norm='softmax', + temp_factor=1.0, + last_bn=False))), + decode_head=dict( + type='PAConvHead', + # PAConv model's decoder takes skip connections from beckbone + # different from PointNet++, it also concats input features in the last + # level of decoder, leading to `128 + 6` as the channel number + fp_channels=((768, 256, 256), (384, 256, 256), (320, 256, 128), + (128 + 6, 128, 128, 128)), + channels=128, + dropout_ratio=0.5, + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + act_cfg=dict(type='ReLU'), + loss_decode=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + class_weight=None, # should be modified with dataset + loss_weight=1.0)), + # correlation loss to regularize PAConv's kernel weights + loss_regularization=dict( + type='PAConvRegularizationLoss', reduction='sum', loss_weight=10.0), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='slide')) diff --git a/adzoo/bevformer/configs/_base_/models/parta2.py b/adzoo/bevformer/configs/_base_/models/parta2.py new file mode 100644 index 0000000..6c5ae9a --- /dev/null +++ b/adzoo/bevformer/configs/_base_/models/parta2.py @@ -0,0 +1,201 @@ +# model settings +voxel_size = [0.05, 0.05, 0.1] +point_cloud_range = [0, -40, -3, 70.4, 40, 1] + +model = dict( + type='PartA2', + voxel_layer=dict( + max_num_points=5, # max_points_per_voxel + point_cloud_range=point_cloud_range, + voxel_size=voxel_size, + max_voxels=(16000, 40000) # (training, testing) max_voxels + ), + voxel_encoder=dict(type='HardSimpleVFE'), + middle_encoder=dict( + type='SparseUNet', + in_channels=4, + sparse_shape=[41, 1600, 1408], + order=('conv', 'norm', 'act')), + backbone=dict( + type='SECOND', + in_channels=256, + layer_nums=[5, 5], + layer_strides=[1, 2], + out_channels=[128, 256]), + neck=dict( + type='SECONDFPN', + in_channels=[128, 256], + upsample_strides=[1, 2], + out_channels=[256, 256]), + rpn_head=dict( + type='PartA2RPNHead', + num_classes=3, + in_channels=512, + feat_channels=512, + use_direction_classifier=True, + anchor_generator=dict( + type='Anchor3DRangeGenerator', + ranges=[[0, -40.0, -0.6, 70.4, 40.0, -0.6], + [0, -40.0, -0.6, 70.4, 40.0, -0.6], + [0, -40.0, -1.78, 70.4, 40.0, -1.78]], + sizes=[[0.6, 0.8, 1.73], [0.6, 1.76, 1.73], [1.6, 3.9, 1.56]], + rotations=[0, 1.57], + reshape_out=False), + diff_rad_by_sin=True, + assigner_per_size=True, + assign_per_class=True, + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=2.0), + loss_dir=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.2)), + roi_head=dict( + type='PartAggregationROIHead', + num_classes=3, + semantic_head=dict( + type='PointwiseSemanticHead', + in_channels=16, + extra_width=0.2, + seg_score_thr=0.3, + num_classes=3, + loss_seg=dict( + type='FocalLoss', + use_sigmoid=True, + reduction='sum', + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_part=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), + seg_roi_extractor=dict( + type='Single3DRoIAwareExtractor', + roi_layer=dict( + type='RoIAwarePool3d', + out_size=14, + max_pts_per_voxel=128, + mode='max')), + part_roi_extractor=dict( + type='Single3DRoIAwareExtractor', + roi_layer=dict( + type='RoIAwarePool3d', + out_size=14, + max_pts_per_voxel=128, + mode='avg')), + bbox_head=dict( + type='PartA2BboxHead', + num_classes=3, + seg_in_channels=16, + part_in_channels=4, + seg_conv_channels=[64, 64], + part_conv_channels=[64, 64], + merge_conv_channels=[128, 128], + down_conv_channels=[128, 256], + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'), + shared_fc_channels=[256, 512, 512, 512], + cls_channels=[256, 256], + reg_channels=[256, 256], + dropout_ratio=0.1, + roi_feat_size=14, + with_corner_loss=True, + loss_bbox=dict( + type='SmoothL1Loss', + beta=1.0 / 9.0, + reduction='sum', + loss_weight=1.0), + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + reduction='sum', + loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=[ + dict( # for Pedestrian + type='MaxIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.5, + neg_iou_thr=0.35, + min_pos_iou=0.35, + ignore_iof_thr=-1), + dict( # for Cyclist + type='MaxIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.5, + neg_iou_thr=0.35, + min_pos_iou=0.35, + ignore_iof_thr=-1), + dict( # for Car + type='MaxIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.6, + neg_iou_thr=0.45, + min_pos_iou=0.45, + ignore_iof_thr=-1) + ], + allowed_border=0, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=9000, + nms_post=512, + max_num=512, + nms_thr=0.8, + score_thr=0, + use_rotate_nms=False), + rcnn=dict( + assigner=[ + dict( # for Pedestrian + type='MaxIoUAssigner', + iou_calculator=dict( + type='BboxOverlaps3D', coordinate='lidar'), + pos_iou_thr=0.55, + neg_iou_thr=0.55, + min_pos_iou=0.55, + ignore_iof_thr=-1), + dict( # for Cyclist + type='MaxIoUAssigner', + iou_calculator=dict( + type='BboxOverlaps3D', coordinate='lidar'), + pos_iou_thr=0.55, + neg_iou_thr=0.55, + min_pos_iou=0.55, + ignore_iof_thr=-1), + dict( # for Car + type='MaxIoUAssigner', + iou_calculator=dict( + type='BboxOverlaps3D', coordinate='lidar'), + pos_iou_thr=0.55, + neg_iou_thr=0.55, + min_pos_iou=0.55, + ignore_iof_thr=-1) + ], + sampler=dict( + type='IoUNegPiecewiseSampler', + num=128, + pos_fraction=0.55, + neg_piece_fractions=[0.8, 0.2], + neg_iou_piece_thrs=[0.55, 0.1], + neg_pos_ub=-1, + add_gt_as_proposals=False, + return_iou=True), + cls_pos_thr=0.75, + cls_neg_thr=0.25)), + test_cfg=dict( + rpn=dict( + nms_pre=1024, + nms_post=100, + max_num=100, + nms_thr=0.7, + score_thr=0, + use_rotate_nms=True), + rcnn=dict( + use_rotate_nms=True, + use_raw_score=True, + nms_thr=0.01, + score_thr=0.1))) diff --git a/adzoo/bevformer/configs/_base_/models/pointnet2_msg.py b/adzoo/bevformer/configs/_base_/models/pointnet2_msg.py new file mode 100644 index 0000000..222ab88 --- /dev/null +++ b/adzoo/bevformer/configs/_base_/models/pointnet2_msg.py @@ -0,0 +1,28 @@ +_base_ = './pointnet2_ssg.py' + +# model settings +model = dict( + backbone=dict( + _delete_=True, + type='PointNet2SAMSG', + in_channels=6, # [xyz, rgb], should be modified with dataset + num_points=(1024, 256, 64, 16), + radii=((0.05, 0.1), (0.1, 0.2), (0.2, 0.4), (0.4, 0.8)), + num_samples=((16, 32), (16, 32), (16, 32), (16, 32)), + sa_channels=(((16, 16, 32), (32, 32, 64)), ((64, 64, 128), (64, 96, + 128)), + ((128, 196, 256), (128, 196, 256)), ((256, 256, 512), + (256, 384, 512))), + aggregation_channels=(None, None, None, None), + fps_mods=(('D-FPS'), ('D-FPS'), ('D-FPS'), ('D-FPS')), + fps_sample_range_lists=((-1), (-1), (-1), (-1)), + dilated_group=(False, False, False, False), + out_indices=(0, 1, 2, 3), + sa_cfg=dict( + type='PointSAModuleMSG', + pool_mod='max', + use_xyz=True, + normalize_xyz=False)), + decode_head=dict( + fp_channels=((1536, 256, 256), (512, 256, 256), (352, 256, 128), + (128, 128, 128, 128)))) diff --git a/adzoo/bevformer/configs/_base_/models/pointnet2_ssg.py b/adzoo/bevformer/configs/_base_/models/pointnet2_ssg.py new file mode 100644 index 0000000..58b4c24 --- /dev/null +++ b/adzoo/bevformer/configs/_base_/models/pointnet2_ssg.py @@ -0,0 +1,35 @@ +# model settings +model = dict( + type='EncoderDecoder3D', + backbone=dict( + type='PointNet2SASSG', + in_channels=6, # [xyz, rgb], should be modified with dataset + num_points=(1024, 256, 64, 16), + radius=(0.1, 0.2, 0.4, 0.8), + num_samples=(32, 32, 32, 32), + sa_channels=((32, 32, 64), (64, 64, 128), (128, 128, 256), (256, 256, + 512)), + fp_channels=(), + norm_cfg=dict(type='BN2d'), + sa_cfg=dict( + type='PointSAModule', + pool_mod='max', + use_xyz=True, + normalize_xyz=False)), + decode_head=dict( + type='PointNet2Head', + fp_channels=((768, 256, 256), (384, 256, 256), (320, 256, 128), + (128, 128, 128, 128)), + channels=128, + dropout_ratio=0.5, + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + act_cfg=dict(type='ReLU'), + loss_decode=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + class_weight=None, # should be modified with dataset + loss_weight=1.0)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='slide')) diff --git a/adzoo/bevformer/configs/_base_/models/votenet.py b/adzoo/bevformer/configs/_base_/models/votenet.py new file mode 100644 index 0000000..129339d --- /dev/null +++ b/adzoo/bevformer/configs/_base_/models/votenet.py @@ -0,0 +1,73 @@ +model = dict( + type='VoteNet', + backbone=dict( + type='PointNet2SASSG', + in_channels=4, + num_points=(2048, 1024, 512, 256), + radius=(0.2, 0.4, 0.8, 1.2), + num_samples=(64, 32, 16, 16), + sa_channels=((64, 64, 128), (128, 128, 256), (128, 128, 256), + (128, 128, 256)), + fp_channels=((256, 256), (256, 256)), + norm_cfg=dict(type='BN2d'), + sa_cfg=dict( + type='PointSAModule', + pool_mod='max', + use_xyz=True, + normalize_xyz=True)), + bbox_head=dict( + type='VoteHead', + vote_module_cfg=dict( + in_channels=256, + vote_per_seed=1, + gt_per_seed=3, + conv_channels=(256, 256), + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + norm_feats=True, + vote_loss=dict( + type='ChamferDistance', + mode='l1', + reduction='none', + loss_dst_weight=10.0)), + vote_aggregation_cfg=dict( + type='PointSAModule', + num_point=256, + radius=0.3, + num_sample=16, + mlp_channels=[256, 128, 128, 128], + use_xyz=True, + normalize_xyz=True), + pred_layer_cfg=dict( + in_channels=128, shared_conv_channels=(128, 128), bias=True), + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + objectness_loss=dict( + type='CrossEntropyLoss', + class_weight=[0.2, 0.8], + reduction='sum', + loss_weight=5.0), + center_loss=dict( + type='ChamferDistance', + mode='l2', + reduction='sum', + loss_src_weight=10.0, + loss_dst_weight=10.0), + dir_class_loss=dict( + type='CrossEntropyLoss', reduction='sum', loss_weight=1.0), + dir_res_loss=dict( + type='SmoothL1Loss', reduction='sum', loss_weight=10.0), + size_class_loss=dict( + type='CrossEntropyLoss', reduction='sum', loss_weight=1.0), + size_res_loss=dict( + type='SmoothL1Loss', reduction='sum', loss_weight=10.0 / 3.0), + semantic_loss=dict( + type='CrossEntropyLoss', reduction='sum', loss_weight=1.0)), + # model training and testing settings + train_cfg=dict( + pos_distance_thr=0.3, neg_distance_thr=0.6, sample_mod='vote'), + test_cfg=dict( + sample_mod='seed', + nms_thr=0.25, + score_thr=0.05, + per_class_proposal=True)) diff --git a/adzoo/bevformer/configs/_base_/schedules/cosine.py b/adzoo/bevformer/configs/_base_/schedules/cosine.py new file mode 100644 index 0000000..69cb7df --- /dev/null +++ b/adzoo/bevformer/configs/_base_/schedules/cosine.py @@ -0,0 +1,20 @@ +# This schedule is mainly used by models with dynamic voxelization +# optimizer +lr = 0.003 # max learning rate +optimizer = dict( + type='AdamW', + lr=lr, + betas=(0.95, 0.99), # the momentum is change during training + weight_decay=0.001) +optimizer_config = dict(grad_clip=dict(max_norm=10, norm_type=2)) + +lr_config = dict( + policy='CosineAnnealing', + warmup='linear', + warmup_iters=1000, + warmup_ratio=1.0 / 10, + min_lr_ratio=1e-5) + +momentum_config = None + +runner = dict(type='EpochBasedRunner', max_epochs=40) diff --git a/adzoo/bevformer/configs/_base_/schedules/cyclic_20e.py b/adzoo/bevformer/configs/_base_/schedules/cyclic_20e.py new file mode 100644 index 0000000..704740e --- /dev/null +++ b/adzoo/bevformer/configs/_base_/schedules/cyclic_20e.py @@ -0,0 +1,24 @@ +# For nuScenes dataset, we usually evaluate the model at the end of training. +# Since the models are trained by 24 epochs by default, we set evaluation +# interval to be 20. Please change the interval accordingly if you do not +# use a default schedule. +# optimizer +# This schedule is mainly used by models on nuScenes dataset +optimizer = dict(type='AdamW', lr=1e-4, weight_decay=0.01) +# max_norm=10 is better for SECOND +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +lr_config = dict( + policy='cyclic', + target_ratio=(10, 1e-4), + cyclic_times=1, + step_ratio_up=0.4, +) +momentum_config = dict( + policy='cyclic', + target_ratio=(0.85 / 0.95, 1), + cyclic_times=1, + step_ratio_up=0.4, +) + +# runtime settings +runner = dict(type='EpochBasedRunner', max_epochs=20) diff --git a/adzoo/bevformer/configs/_base_/schedules/cyclic_40e.py b/adzoo/bevformer/configs/_base_/schedules/cyclic_40e.py new file mode 100644 index 0000000..4a711ac --- /dev/null +++ b/adzoo/bevformer/configs/_base_/schedules/cyclic_40e.py @@ -0,0 +1,31 @@ +# The schedule is usually used by models trained on KITTI dataset + +# The learning rate set in the cyclic schedule is the initial learning rate +# rather than the max learning rate. Since the target_ratio is (10, 1e-4), +# the learning rate will change from 0.0018 to 0.018, than go to 0.0018*1e-4 +lr = 0.0018 +# The optimizer follows the setting in SECOND.Pytorch, but here we use +# the offcial AdamW optimizer implemented by PyTorch. +optimizer = dict(type='AdamW', lr=lr, betas=(0.95, 0.99), weight_decay=0.01) +optimizer_config = dict(grad_clip=dict(max_norm=10, norm_type=2)) +# We use cyclic learning rate and momentum schedule following SECOND.Pytorch +# https://github.com/traveller59/second.pytorch/blob/3aba19c9688274f75ebb5e576f65cfe54773c021/torchplus/train/learning_schedules_fastai.py#L69 # noqa +# We implement them in mmcv, for more details, please refer to +# https://github.com/open-mmlab/mmcv/blob/f48241a65aebfe07db122e9db320c31b685dc674/mmcv/runner/hooks/lr_updater.py#L327 # noqa +# https://github.com/open-mmlab/mmcv/blob/f48241a65aebfe07db122e9db320c31b685dc674/mmcv/runner/hooks/momentum_updater.py#L130 # noqa +lr_config = dict( + policy='cyclic', + target_ratio=(10, 1e-4), + cyclic_times=1, + step_ratio_up=0.4, +) +momentum_config = dict( + policy='cyclic', + target_ratio=(0.85 / 0.95, 1), + cyclic_times=1, + step_ratio_up=0.4, +) +# Although the max_epochs is 40, this schedule is usually used we +# RepeatDataset with repeat ratio N, thus the actual max epoch +# number could be Nx40 +runner = dict(type='EpochBasedRunner', max_epochs=40) diff --git a/adzoo/bevformer/configs/_base_/schedules/mmdet_schedule_1x.py b/adzoo/bevformer/configs/_base_/schedules/mmdet_schedule_1x.py new file mode 100644 index 0000000..13b3783 --- /dev/null +++ b/adzoo/bevformer/configs/_base_/schedules/mmdet_schedule_1x.py @@ -0,0 +1,11 @@ +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=0.001, + step=[8, 11]) +runner = dict(type='EpochBasedRunner', max_epochs=12) diff --git a/adzoo/bevformer/configs/_base_/schedules/schedule_2x.py b/adzoo/bevformer/configs/_base_/schedules/schedule_2x.py new file mode 100644 index 0000000..afde799 --- /dev/null +++ b/adzoo/bevformer/configs/_base_/schedules/schedule_2x.py @@ -0,0 +1,14 @@ +# optimizer +# This schedule is mainly used by models on nuScenes dataset +optimizer = dict(type='AdamW', lr=0.001, weight_decay=0.01) +# max_norm=10 is better for SECOND +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=1000, + warmup_ratio=1.0 / 1000, + step=[20, 23]) +momentum_config = None +# runtime settings +runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/adzoo/bevformer/configs/_base_/schedules/schedule_3x.py b/adzoo/bevformer/configs/_base_/schedules/schedule_3x.py new file mode 100644 index 0000000..115cd26 --- /dev/null +++ b/adzoo/bevformer/configs/_base_/schedules/schedule_3x.py @@ -0,0 +1,9 @@ +# optimizer +# This schedule is mainly used by models on indoor dataset, +# e.g., VoteNet on SUNRGBD and ScanNet +lr = 0.008 # max learning rate +optimizer = dict(type='AdamW', lr=lr, weight_decay=0.01) +optimizer_config = dict(grad_clip=dict(max_norm=10, norm_type=2)) +lr_config = dict(policy='step', warmup=None, step=[24, 32]) +# runtime settings +runner = dict(type='EpochBasedRunner', max_epochs=36) diff --git a/adzoo/bevformer/configs/_base_/schedules/seg_cosine_150e.py b/adzoo/bevformer/configs/_base_/schedules/seg_cosine_150e.py new file mode 100644 index 0000000..04b44e5 --- /dev/null +++ b/adzoo/bevformer/configs/_base_/schedules/seg_cosine_150e.py @@ -0,0 +1,9 @@ +# optimizer +# This schedule is mainly used on S3DIS dataset in segmentation task +optimizer = dict(type='SGD', lr=0.2, weight_decay=0.0001, momentum=0.9) +optimizer_config = dict(grad_clip=None) +lr_config = dict(policy='CosineAnnealing', warmup=None, min_lr=0.002) +momentum_config = None + +# runtime settings +runner = dict(type='EpochBasedRunner', max_epochs=150) diff --git a/adzoo/bevformer/configs/_base_/schedules/seg_cosine_200e.py b/adzoo/bevformer/configs/_base_/schedules/seg_cosine_200e.py new file mode 100644 index 0000000..6a49484 --- /dev/null +++ b/adzoo/bevformer/configs/_base_/schedules/seg_cosine_200e.py @@ -0,0 +1,9 @@ +# optimizer +# This schedule is mainly used on ScanNet dataset in segmentation task +optimizer = dict(type='Adam', lr=0.001, weight_decay=0.01) +optimizer_config = dict(grad_clip=None) +lr_config = dict(policy='CosineAnnealing', warmup=None, min_lr=1e-5) +momentum_config = None + +# runtime settings +runner = dict(type='EpochBasedRunner', max_epochs=200) diff --git a/adzoo/bevformer/configs/_base_/schedules/seg_cosine_50e.py b/adzoo/bevformer/configs/_base_/schedules/seg_cosine_50e.py new file mode 100644 index 0000000..975a8f9 --- /dev/null +++ b/adzoo/bevformer/configs/_base_/schedules/seg_cosine_50e.py @@ -0,0 +1,9 @@ +# optimizer +# This schedule is mainly used on S3DIS dataset in segmentation task +optimizer = dict(type='Adam', lr=0.001, weight_decay=0.001) +optimizer_config = dict(grad_clip=None) +lr_config = dict(policy='CosineAnnealing', warmup=None, min_lr=1e-5) +momentum_config = None + +# runtime settings +runner = dict(type='EpochBasedRunner', max_epochs=50) diff --git a/adzoo/bevformer/configs/bevformer/bevformer_base.py b/adzoo/bevformer/configs/bevformer/bevformer_base.py new file mode 100644 index 0000000..c67c978 --- /dev/null +++ b/adzoo/bevformer/configs/bevformer/bevformer_base.py @@ -0,0 +1,260 @@ +_base_ = [ + '../datasets/custom_nus-3d.py', + '../_base_/default_runtime.py' +] +# +plugin = True +plugin_dir = 'projects/mmdet3d_plugin/' + +# If point cloud range is changed, the models should also change their point +# cloud range accordingly +point_cloud_range = [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0] +voxel_size = [0.2, 0.2, 8] + + + +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +# For nuScenes we usually do 10-class detection +class_names = [ + 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', + 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone' +] + +input_modality = dict( + use_lidar=False, + use_camera=True, + use_radar=False, + use_map=False, + use_external=True) + +_dim_ = 256 +_pos_dim_ = _dim_//2 +_ffn_dim_ = _dim_*2 +_num_levels_ = 4 +bev_h_ = 200 +bev_w_ = 200 +queue_length = 4 # each sequence contains `queue_length` frames. +num_cams = 6 +model = dict( + type='BEVFormer', + use_grid_mask=True, + video_test_mode=True, + img_backbone=dict( + type='ResNet', + depth=101, + num_stages=4, + out_indices=(1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN2d', requires_grad=False), + norm_eval=True, + style='caffe', + dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), # original DCNv2 will print log when perform load_state_dict + stage_with_dcn=(False, False, True, True)), + img_neck=dict( + type='FPN', + in_channels=[512, 1024, 2048], + out_channels=_dim_, + start_level=0, + add_extra_convs='on_output', + num_outs=4, + relu_before_extra_convs=True), + pts_bbox_head=dict( + type='BEVFormerHead', + bev_h=bev_h_, + bev_w=bev_w_, + num_query=900, + num_classes=10, + in_channels=_dim_, + sync_cls_avg_factor=True, + with_box_refine=True, + as_two_stage=False, + transformer=dict( + type='BEVFormerPerceptionTransformer', + num_cams=num_cams, + rotate_prev_bev=True, + use_shift=True, + use_can_bus=True, + embed_dims=_dim_, + encoder=dict( + type='BEVFormerEncoder', + num_layers=6, + pc_range=point_cloud_range, + num_points_in_pillar=4, + return_intermediate=False, + transformerlayers=dict( + type='BEVFormerLayer', + attn_cfgs=[ + dict( + type='TemporalSelfAttention', + embed_dims=_dim_, + num_levels=1), + dict( + type='SpatialCrossAttention', + num_cams=num_cams, + pc_range=point_cloud_range, + deformable_attention=dict( + type='MSDeformableAttention3D', + embed_dims=_dim_, + num_points=8, + num_levels=_num_levels_), + embed_dims=_dim_, + ) + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm'))), + decoder=dict( + type='DetectionTransformerDecoder', + num_layers=6, + return_intermediate=True, + transformerlayers=dict( + type='DetrTransformerDecoderLayer', + attn_cfgs=[ + dict( + type='MultiheadAttention', + embed_dims=_dim_, + num_heads=8, + dropout=0.1), + dict( + type='CustomMSDeformableAttention', + embed_dims=_dim_, + num_levels=1), + ], + + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm')))), + bbox_coder=dict( + type='NMSFreeCoder', + post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], + pc_range=point_cloud_range, + max_num=300, + voxel_size=voxel_size, + num_classes=10), + positional_encoding=dict( + type='LearnedPositionalEncoding', + num_feats=_pos_dim_, + row_num_embed=bev_h_, + col_num_embed=bev_w_, + ), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=2.0), + loss_bbox=dict(type='L1Loss', loss_weight=0.25), + loss_iou=dict(type='GIoULoss', loss_weight=0.0)), + # model training and testing settings + train_cfg=dict(pts=dict( + grid_size=[512, 512, 1], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_size_factor=4, + assigner=dict( + type='HungarianAssigner3D', + cls_cost=dict(type='FocalLossCost', weight=2.0), + reg_cost=dict(type='BBox3DL1Cost', weight=0.25), + iou_cost=dict(type='IoUCost', weight=0.0), # Fake cost. This is just to make it compatible with DETR head. + pc_range=point_cloud_range)))) + +dataset_type = 'CustomNuScenesDataset' +data_root = 'data/nuscenes/' +anno_root = 'data/infos/' +file_client_args = dict(backend='disk') + + +train_pipeline = [ + dict(type='LoadMultiViewImageFromFiles', to_float32=True), + dict(type='PhotoMetricDistortionMultiViewImage'), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, with_attr_label=False), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectNameFilter', classes=class_names), + dict(type='NormalizeMultiviewImage', **img_norm_cfg), + dict(type='PadMultiViewImage', size_divisor=32), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict(type='CustomCollect3D', keys=['gt_bboxes_3d', 'gt_labels_3d', 'img']) +] + +test_pipeline = [ + dict(type='LoadMultiViewImageFromFiles', to_float32=True), + dict(type='NormalizeMultiviewImage', **img_norm_cfg), + dict(type='PadMultiViewImage', size_divisor=32), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1600, 900), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='CustomCollect3D', keys=['img']) + ]) +] + +data = dict( + samples_per_gpu=1, + workers_per_gpu=1, + train=dict( + type=dataset_type, + data_root=data_root, + ann_file=anno_root + 'nuscenes_infos_temporal_train.pkl', + pipeline=train_pipeline, + classes=class_names, + modality=input_modality, + test_mode=False, + use_valid_flag=True, + bev_size=(bev_h_, bev_w_), + queue_length=queue_length, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='LiDAR'), + val=dict(type=dataset_type, + data_root=data_root, + ann_file=anno_root + 'nuscenes_infos_temporal_val.pkl', + pipeline=test_pipeline, bev_size=(bev_h_, bev_w_), + classes=class_names, modality=input_modality, samples_per_gpu=1), + test=dict(type=dataset_type, + data_root=data_root, + ann_file=anno_root + 'nuscenes_infos_temporal_val.pkl', + pipeline=test_pipeline, bev_size=(bev_h_, bev_w_), + classes=class_names, modality=input_modality), + shuffler_sampler=dict(type='DistributedGroupSampler'), + nonshuffler_sampler=dict(type='DistributedSampler') +) + +optimizer = dict( + type='AdamW', + lr=2e-4, + paramwise_cfg=dict( + custom_keys={ + 'img_backbone': dict(lr_mult=0.1), + }), + weight_decay=0.01) + +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='CosineAnnealing', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + min_lr_ratio=1e-3) +total_epochs = 24 +evaluation = dict(interval=1, pipeline=test_pipeline) + +runner = dict(type='EpochBasedRunner', max_epochs=total_epochs) +load_from = 'ckpts/r101_dcn_fcos3d_pretrain.pth' +log_config = dict( + interval=1, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook') + ]) + +checkpoint_config = dict(interval=1) diff --git a/adzoo/bevformer/configs/bevformer/bevformer_base_b2d.py b/adzoo/bevformer/configs/bevformer/bevformer_base_b2d.py new file mode 100644 index 0000000..f987e1b --- /dev/null +++ b/adzoo/bevformer/configs/bevformer/bevformer_base_b2d.py @@ -0,0 +1,363 @@ +_base_ = [ + '../datasets/custom_nus-3d.py', + '../_base_/default_runtime.py' +] +# +plugin = True +plugin_dir = 'projects/mmdet3d_plugin/' + +# If point cloud range is changed, the models should also change their point +# cloud range accordingly +point_cloud_range = [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0] +voxel_size = [0.2, 0.2, 8] + + + + + +NameMapping = { + #=================vehicle================= + # bicycle + 'vehicle.bh.crossbike': 'bicycle', + "vehicle.diamondback.century": 'bicycle', + "vehicle.gazelle.omafiets": 'bicycle', + # car + "vehicle.chevrolet.impala": 'car', + "vehicle.dodge.charger_2020": 'car', + "vehicle.dodge.charger_police": 'car', + "vehicle.dodge.charger_police_2020": 'car', + "vehicle.lincoln.mkz_2017": 'car', + "vehicle.lincoln.mkz_2020": 'car', + "vehicle.mini.cooper_s_2021": 'car', + "vehicle.mercedes.coupe_2020": 'car', + "vehicle.ford.mustang": 'car', + "vehicle.nissan.patrol_2021": 'car', + "vehicle.audi.tt": 'car', + "vehicle.audi.etron": 'car', + "vehicle.ford.crown": 'car', + "vehicle.ford.mustang": 'car', + "vehicle.tesla.model3": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/FordCrown/SM_FordCrown_parked.SM_FordCrown_parked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/Charger/SM_ChargerParked.SM_ChargerParked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/Lincoln/SM_LincolnParked.SM_LincolnParked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/MercedesCCC/SM_MercedesCCC_Parked.SM_MercedesCCC_Parked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/Mini2021/SM_Mini2021_parked.SM_Mini2021_parked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/NissanPatrol2021/SM_NissanPatrol2021_parked.SM_NissanPatrol2021_parked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/TeslaM3/SM_TeslaM3_parked.SM_TeslaM3_parked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/VolkswagenT2/SM_VolkswagenT2_2021_Parked.SM_VolkswagenT2_2021_Parked": 'car', + # bus + # van + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/VolkswagenT2/SM_VolkswagenT2_2021_Parked.SM_VolkswagenT2_2021_Parked": "van", + "vehicle.ford.ambulance": "van", + # truck + "vehicle.carlamotors.firetruck": 'truck', + #========================================= + + #=================traffic sign============ + # traffic.speed_limit + "traffic.speed_limit.30": 'traffic_sign', + "traffic.speed_limit.40": 'traffic_sign', + "traffic.speed_limit.50": 'traffic_sign', + "traffic.speed_limit.60": 'traffic_sign', + "traffic.speed_limit.90": 'traffic_sign', + "traffic.speed_limit.120": 'traffic_sign', + + "traffic.stop": 'traffic_sign', + "traffic.yield": 'traffic_sign', + "traffic.traffic_light": 'traffic_light', + #========================================= + + #===================Construction=========== + "static.prop.warningconstruction" : 'traffic_cone', + "static.prop.warningaccident": 'traffic_cone', + "static.prop.trafficwarning": "traffic_cone", + + #===================Construction=========== + "static.prop.constructioncone": 'traffic_cone', + + #=================pedestrian============== + "walker.pedestrian.0001": 'pedestrian', + "walker.pedestrian.0004": 'pedestrian', + "walker.pedestrian.0005": 'pedestrian', + "walker.pedestrian.0007": 'pedestrian', + "walker.pedestrian.0013": 'pedestrian', + "walker.pedestrian.0014": 'pedestrian', + "walker.pedestrian.0017": 'pedestrian', + "walker.pedestrian.0018": 'pedestrian', + "walker.pedestrian.0019": 'pedestrian', + "walker.pedestrian.0020": 'pedestrian', + "walker.pedestrian.0022": 'pedestrian', + "walker.pedestrian.0025": 'pedestrian', + "walker.pedestrian.0035": 'pedestrian', + "walker.pedestrian.0041": 'pedestrian', + "walker.pedestrian.0046": 'pedestrian', + "walker.pedestrian.0047": 'pedestrian', + + # ========================================== + "static.prop.dirtdebris01": 'others', + "static.prop.dirtdebris02": 'others', +} + + + + +eval_cfg = { + "dist_ths": [0.5, 1.0, 2.0, 4.0], + "dist_th_tp": 2.0, + "min_recall": 0.1, + "min_precision": 0.1, + "mean_ap_weight": 5, + "class_names":['car','van','truck','bicycle','traffic_sign','traffic_cone','traffic_light','pedestrian'], + "tp_metrics":['trans_err', 'scale_err', 'orient_err', 'vel_err'], + "err_name_maping":{'trans_err': 'mATE','scale_err': 'mASE','orient_err': 'mAOE','vel_err': 'mAVE','attr_err': 'mAAE'}, + "class_range":{'car':(50,50),'van':(50,50),'truck':(50,50),'bicycle':(40,40),'traffic_sign':(30,30),'traffic_cone':(30,30),'traffic_light':(30,30),'pedestrian':(40,40)} + } + +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) + +class_names = [ +'car','van','truck','bicycle','traffic_sign','traffic_cone','traffic_light','pedestrian','others' +] + +input_modality = dict( + use_lidar=False, + use_camera=True, + use_radar=False, + use_map=False, + use_external=True) + +_dim_ = 256 +_pos_dim_ = _dim_//2 +_ffn_dim_ = _dim_*2 +_num_levels_ = 4 +bev_h_ = 200 +bev_w_ = 200 +queue_length = 4 # each sequence contains `queue_length` frames. + +model = dict( + type='BEVFormer', + use_grid_mask=True, + video_test_mode=True, + img_backbone=dict( + type='ResNet', + depth=101, + num_stages=4, + out_indices=(1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN2d', requires_grad=False), + norm_eval=True, + style='caffe', + dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), # original DCNv2 will print log when perform load_state_dict + stage_with_dcn=(False, False, True, True)), + img_neck=dict( + type='FPN', + in_channels=[512, 1024, 2048], + out_channels=_dim_, + start_level=0, + add_extra_convs='on_output', + num_outs=4, + relu_before_extra_convs=True), + pts_bbox_head=dict( + type='BEVFormerHead', + bev_h=bev_h_, + bev_w=bev_w_, + num_query=900, + num_classes=len(class_names), + in_channels=_dim_, + sync_cls_avg_factor=True, + with_box_refine=True, + as_two_stage=False, + transformer=dict( + type='BEVFormerPerceptionTransformer', + rotate_prev_bev=True, + use_shift=True, + use_can_bus=True, + embed_dims=_dim_, + encoder=dict( + type='BEVFormerEncoder', + num_layers=6, + pc_range=point_cloud_range, + num_points_in_pillar=4, + return_intermediate=False, + transformerlayers=dict( + type='BEVFormerLayer', + attn_cfgs=[ + dict( + type='TemporalSelfAttention', + embed_dims=_dim_, + num_levels=1), + dict( + type='SpatialCrossAttention', + pc_range=point_cloud_range, + deformable_attention=dict( + type='MSDeformableAttention3D', + embed_dims=_dim_, + num_points=8, + num_levels=_num_levels_), + embed_dims=_dim_, + ) + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm'))), + decoder=dict( + type='DetectionTransformerDecoder', + num_layers=6, + return_intermediate=True, + transformerlayers=dict( + type='DetrTransformerDecoderLayer', + attn_cfgs=[ + dict( + type='MultiheadAttention', + embed_dims=_dim_, + num_heads=8, + dropout=0.1), + dict( + type='CustomMSDeformableAttention', + embed_dims=_dim_, + num_levels=1), + ], + + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm')))), + bbox_coder=dict( + type='NMSFreeCoder', + post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], + pc_range=point_cloud_range, + max_num=300, + voxel_size=voxel_size, + num_classes=len(class_names)), + positional_encoding=dict( + type='LearnedPositionalEncoding', + num_feats=_pos_dim_, + row_num_embed=bev_h_, + col_num_embed=bev_w_, + ), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=2.0), + loss_bbox=dict(type='L1Loss', loss_weight=0.25), + loss_iou=dict(type='GIoULoss', loss_weight=0.0)), + # model training and testing settings + train_cfg=dict(pts=dict( + grid_size=[512, 512, 1], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_size_factor=4, + assigner=dict( + type='HungarianAssigner3D', + cls_cost=dict(type='FocalLossCost', weight=2.0), + reg_cost=dict(type='BBox3DL1Cost', weight=0.25), + iou_cost=dict(type='IoUCost', weight=0.0), # Fake cost. This is just to make it compatible with DETR head. + pc_range=point_cloud_range)))) + +dataset_type = "B2D_Dataset" +data_root = "data/bench2drive" +info_root = "data/infos" +file_client_args = dict(backend="disk") +ann_file_train=info_root + f"/b2d_infos_train.pkl" +ann_file_val=info_root + f"/b2d_infos_val.pkl" +ann_file_test=info_root + f"/b2d_infos_val.pkl" + + +train_pipeline = [ + dict(type='LoadMultiViewImageFromFiles', to_float32=True), + dict(type='PhotoMetricDistortionMultiViewImage'), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, with_attr_label=False), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectNameFilter', classes=class_names), + dict(type='NormalizeMultiviewImage', **img_norm_cfg), + dict(type='PadMultiViewImage', size_divisor=32), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict(type='CustomCollect3D', keys=['gt_bboxes_3d', 'gt_labels_3d', 'img']) +] + +test_pipeline = [ + dict(type='LoadMultiViewImageFromFiles', to_float32=True), + dict(type='NormalizeMultiviewImage', **img_norm_cfg), + dict(type='PadMultiViewImage', size_divisor=32), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1600, 900), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='CustomCollect3D', keys=['img']) + ]) +] + +data = dict( + samples_per_gpu=1, + workers_per_gpu=6, + train=dict( + type=dataset_type, + data_root=data_root, + ann_file=ann_file_train, + pipeline=train_pipeline, + classes=class_names, + modality=input_modality, + test_mode=False, + bev_size=(bev_h_, bev_w_), + queue_length=queue_length, + sample_interval=5, + name_mapping=NameMapping, + eval_cfg=eval_cfg, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='LiDAR'), + val=dict(type=dataset_type, + data_root=data_root, + ann_file=ann_file_val, + pipeline=test_pipeline, bev_size=(bev_h_, bev_w_), + classes=class_names, modality=input_modality, samples_per_gpu=1,sample_interval=5, name_mapping=NameMapping,eval_cfg=eval_cfg,), + test=dict(type=dataset_type, + data_root=data_root, + ann_file=ann_file_val, + pipeline=test_pipeline, bev_size=(bev_h_, bev_w_), + classes=class_names, modality=input_modality,sample_interval=5, + name_mapping=NameMapping,eval_cfg=eval_cfg,), + shuffler_sampler=dict(type='DistributedGroupSampler'), + nonshuffler_sampler=dict(type='DistributedSampler') +) + +optimizer = dict( + type='AdamW', + lr=2e-4, + paramwise_cfg=dict( + custom_keys={ + 'img_backbone': dict(lr_mult=0.1), + }), + weight_decay=0.01) + +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='CosineAnnealing', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + min_lr_ratio=1e-3) +total_epochs = 24 +evaluation = dict(interval=1, pipeline=test_pipeline) + +runner = dict(type='EpochBasedRunner', max_epochs=total_epochs) +load_from = 'ckpts/r101_dcn_fcos3d_pretrain.pth' +log_config = dict( + interval=1, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook') + ]) + +checkpoint_config = dict(interval=1) diff --git a/adzoo/bevformer/configs/bevformer/bevformer_tiny.py b/adzoo/bevformer/configs/bevformer/bevformer_tiny.py new file mode 100644 index 0000000..78858ee --- /dev/null +++ b/adzoo/bevformer/configs/bevformer/bevformer_tiny.py @@ -0,0 +1,270 @@ +# BEvFormer-tiny consumes at lease 6700M GPU memory +# compared to bevformer_base, bevformer_tiny has +# smaller backbone: R101-DCN -> R50 +# smaller BEV: 200*200 -> 50*50 +# less encoder layers: 6 -> 3 +# smaller input size: 1600*900 -> 800*450 +# multi-scale feautres -> single scale features (C5) + + +_base_ = [ + '../datasets/custom_nus-3d.py', + '../_base_/default_runtime.py' +] +# +plugin = True +plugin_dir = 'projects/mmdet3d_plugin/' + +# If point cloud range is changed, the models should also change their point +# cloud range accordingly +point_cloud_range = [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0] +voxel_size = [0.2, 0.2, 8] + + + + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + +# For nuScenes we usually do 10-class detection +class_names = [ + 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', + 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone' +] + +input_modality = dict( + use_lidar=False, + use_camera=True, + use_radar=False, + use_map=False, + use_external=True) + +_dim_ = 256 +_pos_dim_ = _dim_//2 +_ffn_dim_ = _dim_*2 +_num_levels_ = 1 +bev_h_ = 50 +bev_w_ = 50 +queue_length = 3 # each sequence contains `queue_length` frames. + +model = dict( + type='BEVFormer', + use_grid_mask=True, + video_test_mode=True, + pretrained=dict(img='torchvision://resnet50'), + img_backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(3,), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + style='pytorch'), + img_neck=dict( + type='FPN', + in_channels=[2048], + out_channels=_dim_, + start_level=0, + add_extra_convs='on_output', + num_outs=_num_levels_, + relu_before_extra_convs=True), + pts_bbox_head=dict( + type='BEVFormerHead', + bev_h=bev_h_, + bev_w=bev_w_, + num_query=900, + num_classes=10, + in_channels=_dim_, + sync_cls_avg_factor=True, + with_box_refine=True, + as_two_stage=False, + transformer=dict( + type='PerceptionTransformer', + rotate_prev_bev=True, + use_shift=True, + use_can_bus=True, + embed_dims=_dim_, + encoder=dict( + type='BEVFormerEncoder', + num_layers=3, + pc_range=point_cloud_range, + num_points_in_pillar=4, + return_intermediate=False, + transformerlayers=dict( + type='BEVFormerLayer', + attn_cfgs=[ + dict( + type='TemporalSelfAttention', + embed_dims=_dim_, + num_levels=1), + dict( + type='SpatialCrossAttention', + pc_range=point_cloud_range, + deformable_attention=dict( + type='MSDeformableAttention3D', + embed_dims=_dim_, + num_points=8, + num_levels=_num_levels_), + embed_dims=_dim_, + ) + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm'))), + decoder=dict( + type='DetectionTransformerDecoder', + num_layers=6, + return_intermediate=True, + transformerlayers=dict( + type='DetrTransformerDecoderLayer', + attn_cfgs=[ + dict( + type='MultiheadAttention', + embed_dims=_dim_, + num_heads=8, + dropout=0.1), + dict( + type='CustomMSDeformableAttention', + embed_dims=_dim_, + num_levels=1), + ], + + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm')))), + bbox_coder=dict( + type='NMSFreeCoder', + post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], + pc_range=point_cloud_range, + max_num=300, + voxel_size=voxel_size, + num_classes=10), + positional_encoding=dict( + type='LearnedPositionalEncoding', + num_feats=_pos_dim_, + row_num_embed=bev_h_, + col_num_embed=bev_w_, + ), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=2.0), + loss_bbox=dict(type='L1Loss', loss_weight=0.25), + loss_iou=dict(type='GIoULoss', loss_weight=0.0)), + # model training and testing settings + train_cfg=dict(pts=dict( + grid_size=[512, 512, 1], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_size_factor=4, + assigner=dict( + type='HungarianAssigner3D', + cls_cost=dict(type='FocalLossCost', weight=2.0), + reg_cost=dict(type='BBox3DL1Cost', weight=0.25), + iou_cost=dict(type='IoUCost', weight=0.0), # Fake cost. This is just to make it compatible with DETR head. + pc_range=point_cloud_range)))) + +dataset_type = 'CustomNuScenesDataset' +data_root = 'data/nuscenes/' +file_client_args = dict(backend='disk') + + +train_pipeline = [ + dict(type='LoadMultiViewImageFromFiles', to_float32=True), + dict(type='PhotoMetricDistortionMultiViewImage'), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, with_attr_label=False), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectNameFilter', classes=class_names), + dict(type='NormalizeMultiviewImage', **img_norm_cfg), + dict(type='RandomScaleImageMultiViewImage', scales=[0.5]), + dict(type='PadMultiViewImage', size_divisor=32), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict(type='CustomCollect3D', keys=['gt_bboxes_3d', 'gt_labels_3d', 'img']) +] + +test_pipeline = [ + dict(type='LoadMultiViewImageFromFiles', to_float32=True), + dict(type='NormalizeMultiviewImage', **img_norm_cfg), + + dict( + type='MultiScaleFlipAug3D', + img_scale=(1600, 900), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict(type='RandomScaleImageMultiViewImage', scales=[0.5]), + dict(type='PadMultiViewImage', size_divisor=32), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='CustomCollect3D', keys=['img']) + ]) +] + +data = dict( + samples_per_gpu=1, + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'nuscenes_infos_temporal_train.pkl', + pipeline=train_pipeline, + classes=class_names, + modality=input_modality, + test_mode=False, + use_valid_flag=True, + bev_size=(bev_h_, bev_w_), + queue_length=queue_length, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='LiDAR'), + val=dict(type=dataset_type, + data_root=data_root, + ann_file=data_root + 'nuscenes_infos_temporal_val.pkl', + pipeline=test_pipeline, bev_size=(bev_h_, bev_w_), + classes=class_names, modality=input_modality, samples_per_gpu=1), + test=dict(type=dataset_type, + data_root=data_root, + ann_file=data_root + 'nuscenes_infos_temporal_val.pkl', + pipeline=test_pipeline, bev_size=(bev_h_, bev_w_), + classes=class_names, modality=input_modality), + shuffler_sampler=dict(type='DistributedGroupSampler'), + nonshuffler_sampler=dict(type='DistributedSampler') +) + +optimizer = dict( + type='AdamW', + lr=2e-4, + paramwise_cfg=dict( + custom_keys={ + 'img_backbone': dict(lr_mult=0.1), + }), + weight_decay=0.01) + +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='CosineAnnealing', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + min_lr_ratio=1e-3) +total_epochs = 24 +evaluation = dict(interval=1, pipeline=test_pipeline) + +runner = dict(type='EpochBasedRunner', max_epochs=total_epochs) + +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook') + ]) + +checkpoint_config = dict(interval=1) diff --git a/adzoo/bevformer/configs/bevformer/bevformer_tiny_b2d.py b/adzoo/bevformer/configs/bevformer/bevformer_tiny_b2d.py new file mode 100644 index 0000000..d4f92f7 --- /dev/null +++ b/adzoo/bevformer/configs/bevformer/bevformer_tiny_b2d.py @@ -0,0 +1,360 @@ +_base_ = [ + '../datasets/custom_nus-3d.py', + '../_base_/default_runtime.py' +] +# +plugin = True +plugin_dir = 'projects/mmdet3d_plugin/' + +# If point cloud range is changed, the models should also change their point +# cloud range accordingly +point_cloud_range = [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0] +voxel_size = [0.2, 0.2, 8] + + + + + +NameMapping = { + #=================vehicle================= + # bicycle + 'vehicle.bh.crossbike': 'bicycle', + "vehicle.diamondback.century": 'bicycle', + "vehicle.gazelle.omafiets": 'bicycle', + # car + "vehicle.chevrolet.impala": 'car', + "vehicle.dodge.charger_2020": 'car', + "vehicle.dodge.charger_police": 'car', + "vehicle.dodge.charger_police_2020": 'car', + "vehicle.lincoln.mkz_2017": 'car', + "vehicle.lincoln.mkz_2020": 'car', + "vehicle.mini.cooper_s_2021": 'car', + "vehicle.mercedes.coupe_2020": 'car', + "vehicle.ford.mustang": 'car', + "vehicle.nissan.patrol_2021": 'car', + "vehicle.audi.tt": 'car', + "vehicle.audi.etron": 'car', + "vehicle.ford.crown": 'car', + "vehicle.ford.mustang": 'car', + "vehicle.tesla.model3": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/FordCrown/SM_FordCrown_parked.SM_FordCrown_parked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/Charger/SM_ChargerParked.SM_ChargerParked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/Lincoln/SM_LincolnParked.SM_LincolnParked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/MercedesCCC/SM_MercedesCCC_Parked.SM_MercedesCCC_Parked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/Mini2021/SM_Mini2021_parked.SM_Mini2021_parked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/NissanPatrol2021/SM_NissanPatrol2021_parked.SM_NissanPatrol2021_parked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/TeslaM3/SM_TeslaM3_parked.SM_TeslaM3_parked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/VolkswagenT2/SM_VolkswagenT2_2021_Parked.SM_VolkswagenT2_2021_Parked": 'car', + # bus + # van + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/VolkswagenT2/SM_VolkswagenT2_2021_Parked.SM_VolkswagenT2_2021_Parked": "van", + "vehicle.ford.ambulance": "van", + # truck + "vehicle.carlamotors.firetruck": 'truck', + #========================================= + + #=================traffic sign============ + # traffic.speed_limit + "traffic.speed_limit.30": 'traffic_sign', + "traffic.speed_limit.40": 'traffic_sign', + "traffic.speed_limit.50": 'traffic_sign', + "traffic.speed_limit.60": 'traffic_sign', + "traffic.speed_limit.90": 'traffic_sign', + "traffic.speed_limit.120": 'traffic_sign', + + "traffic.stop": 'traffic_sign', + "traffic.yield": 'traffic_sign', + "traffic.traffic_light": 'traffic_light', + #========================================= + + #===================Construction=========== + "static.prop.warningconstruction" : 'traffic_cone', + "static.prop.warningaccident": 'traffic_cone', + "static.prop.trafficwarning": "traffic_cone", + + #===================Construction=========== + "static.prop.constructioncone": 'traffic_cone', + + #=================pedestrian============== + "walker.pedestrian.0001": 'pedestrian', + "walker.pedestrian.0004": 'pedestrian', + "walker.pedestrian.0005": 'pedestrian', + "walker.pedestrian.0007": 'pedestrian', + "walker.pedestrian.0013": 'pedestrian', + "walker.pedestrian.0014": 'pedestrian', + "walker.pedestrian.0017": 'pedestrian', + "walker.pedestrian.0018": 'pedestrian', + "walker.pedestrian.0019": 'pedestrian', + "walker.pedestrian.0020": 'pedestrian', + "walker.pedestrian.0022": 'pedestrian', + "walker.pedestrian.0025": 'pedestrian', + "walker.pedestrian.0035": 'pedestrian', + "walker.pedestrian.0041": 'pedestrian', + "walker.pedestrian.0046": 'pedestrian', + "walker.pedestrian.0047": 'pedestrian', + + # ========================================== + "static.prop.dirtdebris01": 'others', + "static.prop.dirtdebris02": 'others', +} + + + + +eval_cfg = { + "dist_ths": [0.5, 1.0, 2.0, 4.0], + "dist_th_tp": 2.0, + "min_recall": 0.1, + "min_precision": 0.1, + "mean_ap_weight": 5, + "class_names":['car','van','truck','bicycle','traffic_sign','traffic_cone','traffic_light','pedestrian'], + "tp_metrics":['trans_err', 'scale_err', 'orient_err', 'vel_err'], + "err_name_maping":{'trans_err': 'mATE','scale_err': 'mASE','orient_err': 'mAOE','vel_err': 'mAVE','attr_err': 'mAAE'}, + "class_range":{'car':(50,50),'van':(50,50),'truck':(50,50),'bicycle':(40,40),'traffic_sign':(30,30),'traffic_cone':(30,30),'traffic_light':(30,30),'pedestrian':(40,40)} + } + +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) + +class_names = [ +'car','van','truck','bicycle','traffic_sign','traffic_cone','traffic_light','pedestrian','others' +] + +input_modality = dict( + use_lidar=False, + use_camera=True, + use_radar=False, + use_map=False, + use_external=True) + +_dim_ = 256 +_pos_dim_ = _dim_ // 2 +_ffn_dim_ = _dim_ * 2 +_num_levels_ = 4 +bev_h_ = 100 +bev_w_ = 100 +queue_length = 3 # each sequence contains `queue_length` frames. + +model = dict( + type='BEVFormer', + use_grid_mask=True, + video_test_mode=True, + img_backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(1,2,3), + frozen_stages=4, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + style='pytorch'), + img_neck=dict( + type='FPN', + in_channels=[512, 1024, 2048], + out_channels=_dim_, + start_level=0, + add_extra_convs='on_output', + num_outs=_num_levels_, + relu_before_extra_convs=True), + pts_bbox_head=dict( + type='BEVFormerHead', + bev_h=bev_h_, + bev_w=bev_w_, + num_query=900, + num_classes=len(class_names), + in_channels=_dim_, + sync_cls_avg_factor=True, + with_box_refine=True, + as_two_stage=False, + transformer=dict( + type='BEVFormerPerceptionTransformer', + rotate_prev_bev=True, + use_shift=True, + use_can_bus=True, + embed_dims=_dim_, + encoder=dict( + type='BEVFormerEncoder', + num_layers=3, + pc_range=point_cloud_range, + num_points_in_pillar=4, + return_intermediate=False, + transformerlayers=dict( + type='BEVFormerLayer', + attn_cfgs=[ + dict( + type='TemporalSelfAttention', + embed_dims=_dim_, + num_levels=1), + dict( + type='SpatialCrossAttention', + pc_range=point_cloud_range, + deformable_attention=dict( + type='MSDeformableAttention3D', + embed_dims=_dim_, + num_points=8, + num_levels=_num_levels_), + embed_dims=_dim_, + ) + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.0, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm'))), + decoder=dict( + type='DetectionTransformerDecoder', + num_layers=6, + return_intermediate=True, + transformerlayers=dict( + type='DetrTransformerDecoderLayer', + attn_cfgs=[ + dict( + type='MultiheadAttention', + embed_dims=_dim_, + num_heads=_dim_//32, + dropout=0.0), + dict( + type='CustomMSDeformableAttention', + embed_dims=_dim_, + num_levels=1), + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.0, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm')))), + bbox_coder=dict( + type='NMSFreeCoder', + post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], + pc_range=point_cloud_range, + max_num=300, + voxel_size=voxel_size, + num_classes=len(class_names)), + positional_encoding=dict( + type='LearnedPositionalEncoding', + num_feats=_pos_dim_, + row_num_embed=bev_h_, + col_num_embed=bev_w_, + ), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=2.0), + loss_bbox=dict(type='L1Loss', loss_weight=0.25), + loss_iou=dict(type='GIoULoss', loss_weight=0.0)), + # model training and testing settings + train_cfg=dict(pts=dict( + grid_size=[512, 512, 1], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_size_factor=4, + assigner=dict( + type='HungarianAssigner3D', + cls_cost=dict(type='FocalLossCost', weight=2.0), + reg_cost=dict(type='BBox3DL1Cost', weight=0.25), + iou_cost=dict(type='IoUCost', weight=0.0), # Fake cost. This is just to make it compatible with DETR head. + pc_range=point_cloud_range)))) + +dataset_type = "B2D_Dataset" +data_root = "data/bench2drive" +info_root = "data/infos" +file_client_args = dict(backend="disk") +ann_file_train=info_root + f"/b2d_infos_train.pkl" +ann_file_val=info_root + f"/b2d_infos_val.pkl" +ann_file_test=info_root + f"/b2d_infos_val.pkl" + + +train_pipeline = [ + dict(type='LoadMultiViewImageFromFiles', to_float32=True), + dict(type='PhotoMetricDistortionMultiViewImage'), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, with_attr_label=False), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectNameFilter', classes=class_names), + dict(type='NormalizeMultiviewImage', **img_norm_cfg), + dict(type='PadMultiViewImage', size_divisor=32), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict(type='CustomCollect3D', keys=['gt_bboxes_3d', 'gt_labels_3d', 'img']) +] + +test_pipeline = [ + dict(type='LoadMultiViewImageFromFiles', to_float32=True), + dict(type='NormalizeMultiviewImage', **img_norm_cfg), + dict(type='PadMultiViewImage', size_divisor=32), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1600, 900), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='CustomCollect3D', keys=['img']) + ]) +] + +data = dict( + samples_per_gpu=1, + workers_per_gpu=6, + train=dict( + type=dataset_type, + data_root=data_root, + ann_file=ann_file_train, + pipeline=train_pipeline, + classes=class_names, + modality=input_modality, + test_mode=False, + bev_size=(bev_h_, bev_w_), + queue_length=queue_length, + sample_interval=5, + name_mapping=NameMapping, + eval_cfg=eval_cfg, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='LiDAR'), + val=dict(type=dataset_type, + data_root=data_root, + ann_file=ann_file_val, + pipeline=test_pipeline, bev_size=(bev_h_, bev_w_), + classes=class_names, modality=input_modality, samples_per_gpu=1,sample_interval=5, name_mapping=NameMapping,eval_cfg=eval_cfg,), + test=dict(type=dataset_type, + data_root=data_root, + ann_file=ann_file_val, + pipeline=test_pipeline, bev_size=(bev_h_, bev_w_), + classes=class_names, modality=input_modality,sample_interval=5, + name_mapping=NameMapping,eval_cfg=eval_cfg,), + shuffler_sampler=dict(type='DistributedGroupSampler'), + nonshuffler_sampler=dict(type='DistributedSampler') +) + +optimizer = dict( + type='AdamW', + lr=2e-4, + paramwise_cfg=dict( + custom_keys={ + 'img_backbone': dict(lr_mult=0.1), + }), + weight_decay=0.01) + +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + by_epoch=False, + policy='CosineAnnealing', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + min_lr_ratio=1e-3) +total_epochs = 1 +evaluation = dict(interval=1, pipeline=test_pipeline) + +runner = dict(type='EpochBasedRunner', max_epochs=total_epochs) +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook') + ]) + +checkpoint_config = dict(interval=3000, by_epoch=False) diff --git a/adzoo/bevformer/configs/bevformer_fp16/bevformer_tiny_fp16.py b/adzoo/bevformer/configs/bevformer_fp16/bevformer_tiny_fp16.py new file mode 100644 index 0000000..aa1e043 --- /dev/null +++ b/adzoo/bevformer/configs/bevformer_fp16/bevformer_tiny_fp16.py @@ -0,0 +1,272 @@ +# BEvFormer-tiny consumes at lease 6700M GPU memory +# compared to bevformer_base, bevformer_tiny has +# smaller backbone: R101-DCN -> R50 +# smaller BEV: 200*200 -> 50*50 +# less encoder layers: 6 -> 3 +# smaller input size: 1600*900 -> 800*450 +# multi-scale feautres -> single scale features (C5) + + +_base_ = [ + '../datasets/custom_nus-3d.py', + '../_base_/default_runtime.py' +] +# +plugin = True +plugin_dir = 'projects/mmdet3d_plugin/' + +# If point cloud range is changed, the models should also change their point +# cloud range accordingly +point_cloud_range = [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0] +voxel_size = [0.2, 0.2, 8] + + + + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + +# For nuScenes we usually do 10-class detection +class_names = [ + 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', + 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone' +] + +input_modality = dict( + use_lidar=False, + use_camera=True, + use_radar=False, + use_map=False, + use_external=True) + +_dim_ = 256 +_pos_dim_ = _dim_//2 +_ffn_dim_ = _dim_*2 +_num_levels_ = 1 +bev_h_ = 50 +bev_w_ = 50 +queue_length = 3 # each sequence contains `queue_length` frames. + +model = dict( + type='BEVFormer_fp16', + use_grid_mask=True, + video_test_mode=True, + pretrained=dict(img='torchvision://resnet50'), + img_backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(3,), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + style='pytorch'), + img_neck=dict( + type='FPN', + in_channels=[2048], + out_channels=_dim_, + start_level=0, + add_extra_convs='on_output', + num_outs=_num_levels_, + relu_before_extra_convs=True), + pts_bbox_head=dict( + type='BEVFormerHead', + bev_h=bev_h_, + bev_w=bev_w_, + num_query=900, + num_classes=10, + in_channels=_dim_, + sync_cls_avg_factor=True, + with_box_refine=True, + as_two_stage=False, + transformer=dict( + type='PerceptionTransformer', + rotate_prev_bev=True, + use_shift=True, + use_can_bus=True, + embed_dims=_dim_, + encoder=dict( + type='BEVFormerEncoder', + num_layers=3, + pc_range=point_cloud_range, + num_points_in_pillar=4, + return_intermediate=False, + transformerlayers=dict( + type='BEVFormerLayer', + attn_cfgs=[ + dict( + type='TemporalSelfAttention', + embed_dims=_dim_, + num_levels=1), + dict( + type='SpatialCrossAttention', + pc_range=point_cloud_range, + deformable_attention=dict( + type='MSDeformableAttention3D', + embed_dims=_dim_, + num_points=8, + num_levels=_num_levels_), + embed_dims=_dim_, + ) + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm'))), + decoder=dict( + type='DetectionTransformerDecoder', + num_layers=6, + return_intermediate=True, + transformerlayers=dict( + type='DetrTransformerDecoderLayer', + attn_cfgs=[ + dict( + type='MultiheadAttention', + embed_dims=_dim_, + num_heads=8, + dropout=0.1), + dict( + type='CustomMSDeformableAttention', + embed_dims=_dim_, + num_levels=1), + ], + + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm')))), + bbox_coder=dict( + type='NMSFreeCoder', + post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], + pc_range=point_cloud_range, + max_num=300, + voxel_size=voxel_size, + num_classes=10), + positional_encoding=dict( + type='LearnedPositionalEncoding', + num_feats=_pos_dim_, + row_num_embed=bev_h_, + col_num_embed=bev_w_, + ), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=2.0), + loss_bbox=dict(type='L1Loss', loss_weight=0.25), + loss_iou=dict(type='GIoULoss', loss_weight=0.0)), + # model training and testing settings + train_cfg=dict(pts=dict( + grid_size=[512, 512, 1], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_size_factor=4, + assigner=dict( + type='HungarianAssigner3D', + cls_cost=dict(type='FocalLossCost', weight=2.0), + reg_cost=dict(type='BBox3DL1Cost', weight=0.25), + iou_cost=dict(type='IoUCost', weight=0.0), # Fake cost. This is just to make it compatible with DETR head. + pc_range=point_cloud_range)))) + +dataset_type = 'CustomNuScenesDataset' +data_root = 'data/nuscenes/' +file_client_args = dict(backend='disk') + + +train_pipeline = [ + dict(type='LoadMultiViewImageFromFiles', to_float32=True), + dict(type='PhotoMetricDistortionMultiViewImage'), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, with_attr_label=False), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectNameFilter', classes=class_names), + dict(type='NormalizeMultiviewImage', **img_norm_cfg), + dict(type='RandomScaleImageMultiViewImage', scales=[0.5]), + dict(type='PadMultiViewImage', size_divisor=32), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict(type='CustomCollect3D', keys=['gt_bboxes_3d', 'gt_labels_3d', 'img']) +] + +test_pipeline = [ + dict(type='LoadMultiViewImageFromFiles', to_float32=True), + dict(type='NormalizeMultiviewImage', **img_norm_cfg), + + dict( + type='MultiScaleFlipAug3D', + img_scale=(1600, 900), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict(type='RandomScaleImageMultiViewImage', scales=[0.5]), + dict(type='PadMultiViewImage', size_divisor=32), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='CustomCollect3D', keys=['img']) + ]) +] + +data = dict( + samples_per_gpu=2, + workers_per_gpu=8, + train=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'nuscenes_infos_temporal_train.pkl', + pipeline=train_pipeline, + classes=class_names, + modality=input_modality, + test_mode=False, + use_valid_flag=True, + bev_size=(bev_h_, bev_w_), + queue_length=queue_length, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='LiDAR'), + val=dict(type=dataset_type, + data_root=data_root, + ann_file=data_root + 'nuscenes_infos_temporal_val.pkl', + pipeline=test_pipeline, bev_size=(bev_h_, bev_w_), + classes=class_names, modality=input_modality, samples_per_gpu=1), + test=dict(type=dataset_type, + data_root=data_root, + ann_file=data_root + 'nuscenes_infos_temporal_val.pkl', + pipeline=test_pipeline, bev_size=(bev_h_, bev_w_), + classes=class_names, modality=input_modality), + shuffler_sampler=dict(type='DistributedGroupSampler'), + nonshuffler_sampler=dict(type='DistributedSampler') +) + +optimizer = dict( + type='AdamW', + lr=2.8e-4, + paramwise_cfg=dict( + custom_keys={ + 'img_backbone': dict(lr_mult=0.1), + }), + weight_decay=0.01) + +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='CosineAnnealing', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + min_lr_ratio=1e-3) +total_epochs = 24 +evaluation = dict(interval=1, pipeline=test_pipeline) + +runner = dict(type='EpochBasedRunner_video', max_epochs=total_epochs) + +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook') + ]) + +fp16 = dict(loss_scale=512.) +checkpoint_config = dict(interval=1) +custom_hooks = [dict(type='TransferWeight',priority='LOWEST')] \ No newline at end of file diff --git a/adzoo/bevformer/configs/bevformerv2/bevformerv2-r50-t1-24ep.py b/adzoo/bevformer/configs/bevformerv2/bevformerv2-r50-t1-24ep.py new file mode 100644 index 0000000..594f34b --- /dev/null +++ b/adzoo/bevformer/configs/bevformerv2/bevformerv2-r50-t1-24ep.py @@ -0,0 +1,360 @@ +# mAP: 0.3805 +# mATE: 0.7198 +# mASE: 0.2805 +# mAOE: 0.4131 +# mAVE: 0.7652 +# mAAE: 0.1951 +# NDS: 0.4529 +_base_ = [ + '../_base_/default_runtime.py' +] +# Dataset +# If point cloud range is changed, the models should also change their point +# cloud range accordingly +point_cloud_range = [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0] +# For nuScenes we usually do 10-class detection +class_names = [ + 'barrier', 'bicycle', 'bus', 'car', 'construction_vehicle', 'motorcycle', + 'pedestrian', 'traffic_cone', 'trailer', 'truck' +] +dataset_type = 'CustomNuScenesDatasetV2' +data_root = 'data/nuscenes/' +# Input modality for nuScenes dataset, this is consistent with the submission +# format which requires the information in input_modality. +input_modality = dict( + use_lidar=False, + use_camera=True, + use_radar=False, + use_map=False, + use_external=False) +img_norm_cfg = dict(mean=[103.53, 116.28, 123.675], std=[1, 1, 1], to_rgb=False) +bev_h_ = 200 +bev_w_ = 200 +frames = (0,) +group_detr = 11 +voxel_size = [102.4 / bev_h_, 102.4 / bev_w_, 8] +ida_aug_conf = { + "reisze": [512, 544, 576, 608, 640, 672, 704, 736, 768], # (0.8, 1.2) + "crop": (0, 260, 1600, 900), + "H": 900, + "W": 1600, + "rand_flip": True, +} +ida_aug_conf_eval = { + "reisze": [640, ], + "crop": (0, 260, 1600, 900), + "H": 900, + "W": 1600, + "rand_flip": False, +} +# file_client_args = dict(backend='disk') +# Uncomment the following if use ceph or other file clients. +# See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient +# for more details. +# file_client_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/nuscenes/': 's3://nuscenes/nuscenes/', +# 'data/nuscenes/': 's3://nuscenes/nuscenes/' +# })) +train_pipeline = [ + dict(type='LoadMultiViewImageFromFiles', to_float32=True), + dict(type='PhotoMetricDistortionMultiViewImage'), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, with_attr_label=False), + dict(type='GlobalRotScaleTransImage', + rot_range=[-22.5, 22.5], + scale_ratio_range=[0.95, 1.05], + translation_std=[0, 0, 0], + reverse_angle=True, + training=True, + flip_dx_ratio=0.5, + flip_dy_ratio=0.5, + only_gt=True,), + dict( + type='ObjectRangeFilter', + point_cloud_range=point_cloud_range), + dict( + type='ObjectNameFilter', + classes=class_names), + dict(type='CropResizeFlipImage', data_aug_conf=ida_aug_conf, training=True, debug=False), + dict(type='NormalizeMultiviewImage', **img_norm_cfg), + dict(type='PadMultiViewImage', size_divisor=32), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict( + type='CustomCollect3D', + keys=['gt_bboxes_3d', 'gt_labels_3d', 'img', + 'ego2global_translation', 'ego2global_rotation', 'lidar2ego_translation', 'lidar2ego_rotation', + 'timestamp', 'mono_input_dict', 'mono_ann_idx', 'aug_param']), + dict(type='DD3DMapper', + is_train=True, + tasks=dict(box2d_on=True, box3d_on=True),) +] +eval_pipeline = [ + dict(type='LoadMultiViewImageFromFiles', to_float32=True, ), + dict(type='CropResizeFlipImage', data_aug_conf=ida_aug_conf_eval, training=False, debug=False), + dict(type='NormalizeMultiviewImage', **img_norm_cfg), + dict(type='PadMultiViewImage', size_divisor=32), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1600, 640), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='CustomCollect3D', + keys=['img', 'ego2global_translation', 'ego2global_rotation', 'lidar2ego_translation', + 'lidar2ego_rotation', 'timestamp']) + ]) +] + +data = dict( + samples_per_gpu=1, + workers_per_gpu=4, + persistent_workers=True, + train=dict( + type='CustomNuScenesDatasetV2', + frames=frames, + data_root=data_root, + ann_file=data_root + 'nuscenes_infos_temporal_train.pkl', + pipeline=train_pipeline, + classes=class_names, + modality=input_modality, + test_mode=False, + use_valid_flag=True, + box_type_3d='LiDAR', + mono_cfg=dict( + name='nusc_trainval', + data_root='data/nuscenes/', + min_num_lidar_points=3, + min_box_visibility=0.2)), + val=dict( + type='CustomNuScenesDatasetV2', + frames=frames, + data_root='data/nuscenes/', + ann_file=data_root + 'nuscenes_infos_temporal_val.pkl', + pipeline=eval_pipeline, + classes=class_names, + modality=input_modality, + samples_per_gpu=1), + test=dict( + type='CustomNuScenesDatasetV2', + frames=frames, + data_root='data/nuscenes/', + ann_file=data_root + 'nuscenes_infos_temporal_val.pkl', + pipeline=eval_pipeline, + classes=class_names, + modality=input_modality), + shuffler_sampler=dict(type='DistributedGroupSampler'), + nonshuffler_sampler=dict(type='DistributedSampler')) +evaluation = dict(interval=4, pipeline=eval_pipeline) + +# model +load_from = './ckpts/fcos_r50_coco_2mmdet.pth' +plugin = True +plugin_dir = 'projects/mmdet3d_plugin/' +_dim_ = 256 +_pos_dim_ = 128 +_ffn_dim_ = 512 +_num_levels_ = 4 +_num_mono_levels_ = 5 + +model = dict( + type='BEVFormerV2', + use_grid_mask=True, + video_test_mode=False, + num_levels=_num_levels_, + num_mono_levels=_num_mono_levels_, + mono_loss_weight=1.0, + frames=frames, + img_backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(1, 2, 3), + frozen_stages=-1, + norm_cfg=dict(type='SyncBN'), + norm_eval=False, + style='caffe'), + img_neck=dict( + type='FPN', + in_channels=[512, 1024, 2048], + out_channels=_dim_, + start_level=0, + add_extra_convs='on_output', + num_outs=_num_mono_levels_, + relu_before_extra_convs=True), + pts_bbox_head=dict( + type='BEVFormerHead_GroupDETR', + group_detr=group_detr, + bev_h=bev_h_, + bev_w=bev_w_, + num_query=900, + num_classes=10, + in_channels=_dim_, + sync_cls_avg_factor=True, + with_box_refine=True, + as_two_stage=False, + transformer=dict( + type='PerceptionTransformerV2', + embed_dims=_dim_, + frames=frames, + encoder=dict( + type='BEVFormerEncoder', + num_layers=6, + pc_range=point_cloud_range, + num_points_in_pillar=4, + return_intermediate=False, + transformerlayers=dict( + type='BEVFormerLayer', + attn_cfgs=[ + dict( + type='TemporalSelfAttention', + embed_dims=_dim_, + num_levels=1), + dict( + type='SpatialCrossAttention', + pc_range=point_cloud_range, + deformable_attention=dict( + type='MSDeformableAttention3D', + embed_dims=_dim_, + num_points=8, + num_levels=4), + embed_dims=_dim_) + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm'))), + decoder=dict( + type='DetectionTransformerDecoder', + num_layers=6, + return_intermediate=True, + transformerlayers=dict( + type='DetrTransformerDecoderLayer', + attn_cfgs=[ + dict( + type='GroupMultiheadAttention', + group=group_detr, + embed_dims=_dim_, + num_heads=8, + dropout=0.1), + dict( + type='CustomMSDeformableAttention', + embed_dims=_dim_, + num_levels=1) + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm')))), + bbox_coder=dict( + type='NMSFreeCoder', + post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], + pc_range=point_cloud_range, + max_num=300, + voxel_size=voxel_size, + num_classes=10), + positional_encoding=dict( + type='LearnedPositionalEncoding', + num_feats=_pos_dim_, + row_num_embed=bev_h_, + col_num_embed=bev_w_), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=2.0), + loss_bbox=dict(type='SmoothL1Loss', loss_weight=0.75, beta=1.0), + loss_iou=dict(type='GIoULoss', loss_weight=0.0)), + fcos3d_bbox_head=dict( + type='NuscenesDD3D', + num_classes=10, + in_channels=_dim_, + strides=[8, 16, 32, 64, 128], + box3d_on=True, + feature_locations_offset='none', + fcos2d_cfg=dict( + num_cls_convs=4, + num_box_convs=4, + norm='SyncBN', + use_deformable=False, + use_scale=True, + box2d_scale_init_factor=1.0), + fcos2d_loss_cfg=dict( + focal_loss_alpha=0.25, focal_loss_gamma=2.0, loc_loss_type='giou'), + fcos3d_cfg=dict( + num_convs=4, + norm='SyncBN', + use_scale=True, + depth_scale_init_factor=0.3, + proj_ctr_scale_init_factor=1.0, + use_per_level_predictors=False, + class_agnostic=False, + use_deformable=False, + mean_depth_per_level=[44.921, 20.252, 11.712, 7.166, 8.548], + std_depth_per_level=[24.331, 9.833, 6.223, 4.611, 8.275]), + fcos3d_loss_cfg=dict( + min_depth=0.1, + max_depth=80.0, + box3d_loss_weight=2.0, + conf3d_loss_weight=1.0, + conf_3d_temperature=1.0, + smooth_l1_loss_beta=0.05, + max_loss_per_group=20, + predict_allocentric_rot=True, + scale_depth_by_focal_lengths=True, + scale_depth_by_focal_lengths_factor=500.0, + class_agnostic=False, + predict_distance=False, + canon_box_sizes=[[2.3524184, 0.5062202, 1.0413622], + [0.61416006, 1.7016163, 1.3054738], + [2.9139307, 10.725025, 3.2832346], + [1.9751819, 4.641267, 1.74352], + [2.772134, 6.565072, 3.2474296], + [0.7800532, 2.138673, 1.4437162], + [0.6667362, 0.7181772, 1.7616143], + [0.40246472, 0.4027083, 1.0084083], + [3.0059454, 12.8197, 4.1213827], + [2.4986045, 6.9310856, 2.8382742]]), + target_assign_cfg=dict( + center_sample=True, + pos_radius=1.5, + sizes_of_interest=((-1, 64), (64, 128), (128, 256), (256, 512), + (512, 100000000.0))), + nusc_loss_weight=dict(attr_loss_weight=0.2, speed_loss_weight=0.2)), + train_cfg=dict( + pts=dict( + grid_size=[512, 512, 1], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_size_factor=4, + assigner=dict( + type='HungarianAssigner3D', + cls_cost=dict(type='FocalLossCost', weight=2.0), + reg_cost=dict(type='SmoothL1Cost', weight=0.75), + iou_cost=dict(type='IoUCost', weight=0.0), + pc_range=point_cloud_range)))) + +# optimizer +optimizer = dict( + type='AdamW', + lr=4e-4, + paramwise_cfg=dict( + custom_keys=dict( + img_backbone=dict(lr_mult=0.5), + )), + weight_decay=0.01) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=2000, + warmup_ratio=1.0 / 3, + step=[20, ]) +total_epochs = 24 +runner = dict(type='EpochBasedRunner', max_epochs=total_epochs) diff --git a/adzoo/bevformer/configs/bevformerv2/bevformerv2-r50-t1-48ep.py b/adzoo/bevformer/configs/bevformerv2/bevformerv2-r50-t1-48ep.py new file mode 100644 index 0000000..a720051 --- /dev/null +++ b/adzoo/bevformer/configs/bevformerv2/bevformerv2-r50-t1-48ep.py @@ -0,0 +1,360 @@ +# mAP: 0.3953 +# mATE: 0.6941 +# mASE: 0.2765 +# mAOE: 0.4199 +# mAVE: 0.7537 +# mAAE: 0.1866 +# NDS: 0.4646 +_base_ = [ + '../_base_/default_runtime.py' +] +# Dataset +# If point cloud range is changed, the models should also change their point +# cloud range accordingly +point_cloud_range = [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0] +# For nuScenes we usually do 10-class detection +class_names = [ + 'barrier', 'bicycle', 'bus', 'car', 'construction_vehicle', 'motorcycle', + 'pedestrian', 'traffic_cone', 'trailer', 'truck' +] +dataset_type = 'CustomNuScenesDatasetV2' +data_root = 'data/nuscenes/' +# Input modality for nuScenes dataset, this is consistent with the submission +# format which requires the information in input_modality. +input_modality = dict( + use_lidar=False, + use_camera=True, + use_radar=False, + use_map=False, + use_external=False) +img_norm_cfg = dict(mean=[103.53, 116.28, 123.675], std=[1, 1, 1], to_rgb=False) +bev_h_ = 200 +bev_w_ = 200 +frames = (0,) +group_detr = 11 +voxel_size = [102.4 / bev_h_, 102.4 / bev_w_, 8] +ida_aug_conf = { + "reisze": [512, 544, 576, 608, 640, 672, 704, 736, 768], # (0.8, 1.2) + "crop": (0, 260, 1600, 900), + "H": 900, + "W": 1600, + "rand_flip": True, +} +ida_aug_conf_eval = { + "reisze": [640, ], + "crop": (0, 260, 1600, 900), + "H": 900, + "W": 1600, + "rand_flip": False, +} +# file_client_args = dict(backend='disk') +# Uncomment the following if use ceph or other file clients. +# See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient +# for more details. +# file_client_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/nuscenes/': 's3://nuscenes/nuscenes/', +# 'data/nuscenes/': 's3://nuscenes/nuscenes/' +# })) +train_pipeline = [ + dict(type='LoadMultiViewImageFromFiles', to_float32=True), + dict(type='PhotoMetricDistortionMultiViewImage'), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, with_attr_label=False), + dict(type='GlobalRotScaleTransImage', + rot_range=[-22.5, 22.5], + scale_ratio_range=[0.95, 1.05], + translation_std=[0, 0, 0], + reverse_angle=True, + training=True, + flip_dx_ratio=0.5, + flip_dy_ratio=0.5, + only_gt=True,), + dict( + type='ObjectRangeFilter', + point_cloud_range=point_cloud_range), + dict( + type='ObjectNameFilter', + classes=class_names), + dict(type='CropResizeFlipImage', data_aug_conf=ida_aug_conf, training=True, debug=False), + dict(type='NormalizeMultiviewImage', **img_norm_cfg), + dict(type='PadMultiViewImage', size_divisor=32), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict( + type='CustomCollect3D', + keys=['gt_bboxes_3d', 'gt_labels_3d', 'img', + 'ego2global_translation', 'ego2global_rotation', 'lidar2ego_translation', 'lidar2ego_rotation', + 'timestamp', 'mono_input_dict', 'mono_ann_idx', 'aug_param']), + dict(type='DD3DMapper', + is_train=True, + tasks=dict(box2d_on=True, box3d_on=True),) +] +eval_pipeline = [ + dict(type='LoadMultiViewImageFromFiles', to_float32=True, ), + dict(type='CropResizeFlipImage', data_aug_conf=ida_aug_conf_eval, training=False, debug=False), + dict(type='NormalizeMultiviewImage', **img_norm_cfg), + dict(type='PadMultiViewImage', size_divisor=32), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1600, 640), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='CustomCollect3D', + keys=['img', 'ego2global_translation', 'ego2global_rotation', 'lidar2ego_translation', + 'lidar2ego_rotation', 'timestamp']) + ]) +] + +data = dict( + samples_per_gpu=1, + workers_per_gpu=4, + persistent_workers=True, + train=dict( + type='CustomNuScenesDatasetV2', + frames=frames, + data_root=data_root, + ann_file=data_root + 'nuscenes_infos_temporal_train.pkl', + pipeline=train_pipeline, + classes=class_names, + modality=input_modality, + test_mode=False, + use_valid_flag=True, + box_type_3d='LiDAR', + mono_cfg=dict( + name='nusc_trainval', + data_root='data/nuscenes/', + min_num_lidar_points=3, + min_box_visibility=0.2)), + val=dict( + type='CustomNuScenesDatasetV2', + frames=frames, + data_root='data/nuscenes/', + ann_file=data_root + 'nuscenes_infos_temporal_val.pkl', + pipeline=eval_pipeline, + classes=class_names, + modality=input_modality, + samples_per_gpu=1), + test=dict( + type='CustomNuScenesDatasetV2', + frames=frames, + data_root='data/nuscenes/', + ann_file=data_root + 'nuscenes_infos_temporal_val.pkl', + pipeline=eval_pipeline, + classes=class_names, + modality=input_modality), + shuffler_sampler=dict(type='DistributedGroupSampler'), + nonshuffler_sampler=dict(type='DistributedSampler')) +evaluation = dict(interval=4, pipeline=eval_pipeline) + +# model +load_from = './ckpts/fcos_r50_coco_2mmdet.pth' +plugin = True +plugin_dir = 'projects/mmdet3d_plugin/' +_dim_ = 256 +_pos_dim_ = 128 +_ffn_dim_ = 512 +_num_levels_ = 4 +_num_mono_levels_ = 5 + +model = dict( + type='BEVFormerV2', + use_grid_mask=True, + video_test_mode=False, + num_levels=_num_levels_, + num_mono_levels=_num_mono_levels_, + mono_loss_weight=1.0, + frames=frames, + img_backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(1, 2, 3), + frozen_stages=-1, + norm_cfg=dict(type='SyncBN'), + norm_eval=False, + style='caffe'), + img_neck=dict( + type='FPN', + in_channels=[512, 1024, 2048], + out_channels=_dim_, + start_level=0, + add_extra_convs='on_output', + num_outs=_num_mono_levels_, + relu_before_extra_convs=True), + pts_bbox_head=dict( + type='BEVFormerHead_GroupDETR', + group_detr=group_detr, + bev_h=bev_h_, + bev_w=bev_w_, + num_query=900, + num_classes=10, + in_channels=_dim_, + sync_cls_avg_factor=True, + with_box_refine=True, + as_two_stage=False, + transformer=dict( + type='PerceptionTransformerV2', + embed_dims=_dim_, + frames=frames, + encoder=dict( + type='BEVFormerEncoder', + num_layers=6, + pc_range=point_cloud_range, + num_points_in_pillar=4, + return_intermediate=False, + transformerlayers=dict( + type='BEVFormerLayer', + attn_cfgs=[ + dict( + type='TemporalSelfAttention', + embed_dims=_dim_, + num_levels=1), + dict( + type='SpatialCrossAttention', + pc_range=point_cloud_range, + deformable_attention=dict( + type='MSDeformableAttention3D', + embed_dims=_dim_, + num_points=8, + num_levels=4), + embed_dims=_dim_) + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm'))), + decoder=dict( + type='DetectionTransformerDecoder', + num_layers=6, + return_intermediate=True, + transformerlayers=dict( + type='DetrTransformerDecoderLayer', + attn_cfgs=[ + dict( + type='GroupMultiheadAttention', + group=group_detr, + embed_dims=_dim_, + num_heads=8, + dropout=0.1), + dict( + type='CustomMSDeformableAttention', + embed_dims=_dim_, + num_levels=1) + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm')))), + bbox_coder=dict( + type='NMSFreeCoder', + post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], + pc_range=point_cloud_range, + max_num=300, + voxel_size=voxel_size, + num_classes=10), + positional_encoding=dict( + type='LearnedPositionalEncoding', + num_feats=_pos_dim_, + row_num_embed=bev_h_, + col_num_embed=bev_w_), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=2.0), + loss_bbox=dict(type='SmoothL1Loss', loss_weight=0.75, beta=1.0), + loss_iou=dict(type='GIoULoss', loss_weight=0.0)), + fcos3d_bbox_head=dict( + type='NuscenesDD3D', + num_classes=10, + in_channels=_dim_, + strides=[8, 16, 32, 64, 128], + box3d_on=True, + feature_locations_offset='none', + fcos2d_cfg=dict( + num_cls_convs=4, + num_box_convs=4, + norm='SyncBN', + use_deformable=False, + use_scale=True, + box2d_scale_init_factor=1.0), + fcos2d_loss_cfg=dict( + focal_loss_alpha=0.25, focal_loss_gamma=2.0, loc_loss_type='giou'), + fcos3d_cfg=dict( + num_convs=4, + norm='SyncBN', + use_scale=True, + depth_scale_init_factor=0.3, + proj_ctr_scale_init_factor=1.0, + use_per_level_predictors=False, + class_agnostic=False, + use_deformable=False, + mean_depth_per_level=[44.921, 20.252, 11.712, 7.166, 8.548], + std_depth_per_level=[24.331, 9.833, 6.223, 4.611, 8.275]), + fcos3d_loss_cfg=dict( + min_depth=0.1, + max_depth=80.0, + box3d_loss_weight=2.0, + conf3d_loss_weight=1.0, + conf_3d_temperature=1.0, + smooth_l1_loss_beta=0.05, + max_loss_per_group=20, + predict_allocentric_rot=True, + scale_depth_by_focal_lengths=True, + scale_depth_by_focal_lengths_factor=500.0, + class_agnostic=False, + predict_distance=False, + canon_box_sizes=[[2.3524184, 0.5062202, 1.0413622], + [0.61416006, 1.7016163, 1.3054738], + [2.9139307, 10.725025, 3.2832346], + [1.9751819, 4.641267, 1.74352], + [2.772134, 6.565072, 3.2474296], + [0.7800532, 2.138673, 1.4437162], + [0.6667362, 0.7181772, 1.7616143], + [0.40246472, 0.4027083, 1.0084083], + [3.0059454, 12.8197, 4.1213827], + [2.4986045, 6.9310856, 2.8382742]]), + target_assign_cfg=dict( + center_sample=True, + pos_radius=1.5, + sizes_of_interest=((-1, 64), (64, 128), (128, 256), (256, 512), + (512, 100000000.0))), + nusc_loss_weight=dict(attr_loss_weight=0.2, speed_loss_weight=0.2)), + train_cfg=dict( + pts=dict( + grid_size=[512, 512, 1], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_size_factor=4, + assigner=dict( + type='HungarianAssigner3D', + cls_cost=dict(type='FocalLossCost', weight=2.0), + reg_cost=dict(type='SmoothL1Cost', weight=0.75), + iou_cost=dict(type='IoUCost', weight=0.0), + pc_range=point_cloud_range)))) + +# optimizer +optimizer = dict( + type='AdamW', + lr=4e-4, + paramwise_cfg=dict( + custom_keys=dict( + img_backbone=dict(lr_mult=0.5), + )), + weight_decay=0.01) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=2000, + warmup_ratio=1.0 / 3, + step=[44, ]) +total_epochs = 48 +runner = dict(type='EpochBasedRunner', max_epochs=total_epochs) diff --git a/adzoo/bevformer/configs/bevformerv2/bevformerv2-r50-t1-base-24ep.py b/adzoo/bevformer/configs/bevformerv2/bevformerv2-r50-t1-base-24ep.py new file mode 100644 index 0000000..10330cf --- /dev/null +++ b/adzoo/bevformer/configs/bevformerv2/bevformerv2-r50-t1-base-24ep.py @@ -0,0 +1,349 @@ +# mAP: 0.3512 +# mATE: 0.7534 +# mASE: 0.2863 +# mAOE: 0.4665 +# mAVE: 0.8070 +# mAAE: 0.1861 +# NDS: 0.4257 + +_base_ = [ + '../_base_/default_runtime.py' +] +# Dataset +# If point cloud range is changed, the models should also change their point +# cloud range accordingly +point_cloud_range = [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0] +# For nuScenes we usually do 10-class detection +class_names = [ + 'barrier', 'bicycle', 'bus', 'car', 'construction_vehicle', 'motorcycle', + 'pedestrian', 'traffic_cone', 'trailer', 'truck' +] +dataset_type = 'CustomNuScenesDatasetV2' +data_root = 'data/nuscenes/' +# Input modality for nuScenes dataset, this is consistent with the submission +# format which requires the information in input_modality. +input_modality = dict( + use_lidar=False, + use_camera=True, + use_radar=False, + use_map=False, + use_external=False) +img_norm_cfg = dict(mean=[103.53, 116.28, 123.675], std=[1, 1, 1], to_rgb=False) +bev_h_ = 200 +bev_w_ = 200 +frames = (0,) +voxel_size = [102.4 / bev_h_, 102.4 / bev_w_, 8] +ida_aug_conf = { + "reisze": [640, ], + "crop": (0, 260, 1600, 900), + "H": 900, + "W": 1600, + "rand_flip": False, +} +ida_aug_conf_eval = { + "reisze": [640, ], + "crop": (0, 260, 1600, 900), + "H": 900, + "W": 1600, + "rand_flip": False, +} +# file_client_args = dict(backend='disk') +# Uncomment the following if use ceph or other file clients. +# See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient +# for more details. +# file_client_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/nuscenes/': 's3://nuscenes/nuscenes/', +# 'data/nuscenes/': 's3://nuscenes/nuscenes/' +# })) +train_pipeline = [ + dict(type='LoadMultiViewImageFromFiles', to_float32=True), + dict(type='PhotoMetricDistortionMultiViewImage'), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, with_attr_label=False), + dict( + type='ObjectRangeFilter', + point_cloud_range=point_cloud_range), + dict( + type='ObjectNameFilter', + classes=class_names), + dict(type='CropResizeFlipImage', data_aug_conf=ida_aug_conf, training=True, debug=False), + dict(type='NormalizeMultiviewImage', **img_norm_cfg), + dict(type='PadMultiViewImage', size_divisor=32), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict( + type='CustomCollect3D', + keys=['gt_bboxes_3d', 'gt_labels_3d', 'img', + 'ego2global_translation', 'ego2global_rotation', 'lidar2ego_translation', 'lidar2ego_rotation', + 'timestamp', 'mono_input_dict', 'mono_ann_idx', 'aug_param']), + dict(type='DD3DMapper', + is_train=True, + tasks=dict(box2d_on=True, box3d_on=True),) +] +eval_pipeline = [ + dict(type='LoadMultiViewImageFromFiles', to_float32=True, ), + dict(type='CropResizeFlipImage', data_aug_conf=ida_aug_conf_eval, training=False, debug=False), + dict(type='NormalizeMultiviewImage', **img_norm_cfg), + dict(type='PadMultiViewImage', size_divisor=32), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1600, 640), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='CustomCollect3D', + keys=['img', 'ego2global_translation', 'ego2global_rotation', 'lidar2ego_translation', + 'lidar2ego_rotation', 'timestamp']) + ]) +] + +data = dict( + samples_per_gpu=1, + workers_per_gpu=4, + persistent_workers=True, + train=dict( + type='CustomNuScenesDatasetV2', + frames=frames, + data_root=data_root, + ann_file=data_root + 'nuscenes_infos_temporal_train.pkl', + pipeline=train_pipeline, + classes=class_names, + modality=input_modality, + test_mode=False, + use_valid_flag=True, + box_type_3d='LiDAR', + mono_cfg=dict( + name='nusc_trainval', + data_root='data/nuscenes/', + min_num_lidar_points=3, + min_box_visibility=0.2)), + val=dict( + type='CustomNuScenesDatasetV2', + frames=frames, + data_root='data/nuscenes/', + ann_file=data_root + 'nuscenes_infos_temporal_val.pkl', + pipeline=eval_pipeline, + classes=class_names, + modality=input_modality, + samples_per_gpu=1), + test=dict( + type='CustomNuScenesDatasetV2', + frames=frames, + data_root='data/nuscenes/', + ann_file=data_root + 'nuscenes_infos_temporal_val.pkl', + pipeline=eval_pipeline, + classes=class_names, + modality=input_modality), + shuffler_sampler=dict(type='DistributedGroupSampler'), + nonshuffler_sampler=dict(type='DistributedSampler')) +evaluation = dict(interval=4, pipeline=eval_pipeline) + +# model +load_from = './ckpts/fcos_r50_coco_2mmdet.pth' +plugin = True +plugin_dir = 'projects/mmdet3d_plugin/' +_dim_ = 256 +_pos_dim_ = 128 +_ffn_dim_ = 512 +_num_levels_ = 4 +_num_mono_levels_ = 5 + +model = dict( + type='BEVFormerV2', + use_grid_mask=True, + video_test_mode=False, + num_levels=_num_levels_, + num_mono_levels=_num_mono_levels_, + mono_loss_weight=1.0, + frames=frames, + img_backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(1, 2, 3), + frozen_stages=-1, + norm_cfg=dict(type='SyncBN'), + norm_eval=False, + style='caffe'), + img_neck=dict( + type='FPN', + in_channels=[512, 1024, 2048], + out_channels=_dim_, + start_level=0, + add_extra_convs='on_output', + num_outs=_num_mono_levels_, + relu_before_extra_convs=True), + pts_bbox_head=dict( + type='BEVFormerHead', + bev_h=bev_h_, + bev_w=bev_w_, + num_query=900, + num_classes=10, + in_channels=_dim_, + sync_cls_avg_factor=True, + with_box_refine=True, + as_two_stage=False, + transformer=dict( + type='PerceptionTransformerV2', + embed_dims=_dim_, + frames=frames, + encoder=dict( + type='BEVFormerEncoder', + num_layers=6, + pc_range=point_cloud_range, + num_points_in_pillar=4, + return_intermediate=False, + transformerlayers=dict( + type='BEVFormerLayer', + attn_cfgs=[ + dict( + type='TemporalSelfAttention', + embed_dims=_dim_, + num_levels=1), + dict( + type='SpatialCrossAttention', + pc_range=point_cloud_range, + deformable_attention=dict( + type='MSDeformableAttention3D', + embed_dims=_dim_, + num_points=8, + num_levels=4), + embed_dims=_dim_) + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm'))), + decoder=dict( + type='DetectionTransformerDecoder', + num_layers=6, + return_intermediate=True, + transformerlayers=dict( + type='DetrTransformerDecoderLayer', + attn_cfgs=[ + dict( + type='MultiheadAttention', + embed_dims=_dim_, + num_heads=8, + dropout=0.1), + dict( + type='CustomMSDeformableAttention', + embed_dims=_dim_, + num_levels=1) + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm')))), + bbox_coder=dict( + type='NMSFreeCoder', + post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], + pc_range=point_cloud_range, + max_num=300, + voxel_size=voxel_size, + num_classes=10), + positional_encoding=dict( + type='LearnedPositionalEncoding', + num_feats=_pos_dim_, + row_num_embed=bev_h_, + col_num_embed=bev_w_), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=2.0), + loss_bbox=dict(type='SmoothL1Loss', loss_weight=0.75, beta=1.0), + loss_iou=dict(type='GIoULoss', loss_weight=0.0)), + fcos3d_bbox_head=dict( + type='NuscenesDD3D', + num_classes=10, + in_channels=_dim_, + strides=[8, 16, 32, 64, 128], + box3d_on=True, + feature_locations_offset='none', + fcos2d_cfg=dict( + num_cls_convs=4, + num_box_convs=4, + norm='SyncBN', + use_deformable=False, + use_scale=True, + box2d_scale_init_factor=1.0), + fcos2d_loss_cfg=dict( + focal_loss_alpha=0.25, focal_loss_gamma=2.0, loc_loss_type='giou'), + fcos3d_cfg=dict( + num_convs=4, + norm='SyncBN', + use_scale=True, + depth_scale_init_factor=0.3, + proj_ctr_scale_init_factor=1.0, + use_per_level_predictors=False, + class_agnostic=False, + use_deformable=False, + mean_depth_per_level=[44.921, 20.252, 11.712, 7.166, 8.548], + std_depth_per_level=[24.331, 9.833, 6.223, 4.611, 8.275]), + fcos3d_loss_cfg=dict( + min_depth=0.1, + max_depth=80.0, + box3d_loss_weight=2.0, + conf3d_loss_weight=1.0, + conf_3d_temperature=1.0, + smooth_l1_loss_beta=0.05, + max_loss_per_group=20, + predict_allocentric_rot=True, + scale_depth_by_focal_lengths=True, + scale_depth_by_focal_lengths_factor=500.0, + class_agnostic=False, + predict_distance=False, + canon_box_sizes=[[2.3524184, 0.5062202, 1.0413622], + [0.61416006, 1.7016163, 1.3054738], + [2.9139307, 10.725025, 3.2832346], + [1.9751819, 4.641267, 1.74352], + [2.772134, 6.565072, 3.2474296], + [0.7800532, 2.138673, 1.4437162], + [0.6667362, 0.7181772, 1.7616143], + [0.40246472, 0.4027083, 1.0084083], + [3.0059454, 12.8197, 4.1213827], + [2.4986045, 6.9310856, 2.8382742]]), + target_assign_cfg=dict( + center_sample=True, + pos_radius=1.5, + sizes_of_interest=((-1, 64), (64, 128), (128, 256), (256, 512), + (512, 100000000.0))), + nusc_loss_weight=dict(attr_loss_weight=0.2, speed_loss_weight=0.2)), + train_cfg=dict( + pts=dict( + grid_size=[512, 512, 1], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_size_factor=4, + assigner=dict( + type='HungarianAssigner3D', + cls_cost=dict(type='FocalLossCost', weight=2.0), + reg_cost=dict(type='SmoothL1Cost', weight=0.75), + iou_cost=dict(type='IoUCost', weight=0.0), + pc_range=point_cloud_range)))) + +# optimizer +optimizer = dict( + type='AdamW', + lr=4e-4, + paramwise_cfg=dict( + custom_keys=dict( + img_backbone=dict(lr_mult=0.5), + )), + weight_decay=0.01) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=2000, + warmup_ratio=1.0 / 3, + step=[20, ]) +total_epochs = 24 +runner = dict(type='EpochBasedRunner', max_epochs=total_epochs) diff --git a/adzoo/bevformer/configs/bevformerv2/bevformerv2-r50-t1-base-48ep.py b/adzoo/bevformer/configs/bevformerv2/bevformerv2-r50-t1-base-48ep.py new file mode 100644 index 0000000..9c6d3cc --- /dev/null +++ b/adzoo/bevformer/configs/bevformerv2/bevformerv2-r50-t1-base-48ep.py @@ -0,0 +1,349 @@ +# mAP: 0.3594 +# mATE: 0.7327 +# mASE: 0.2814 +# mAOE: 0.4074 +# mAVE: 0.7831 +# mAAE: 0.1983 +# NDS: 0.4394 + +_base_ = [ + '../_base_/default_runtime.py' +] +# Dataset +# If point cloud range is changed, the models should also change their point +# cloud range accordingly +point_cloud_range = [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0] +# For nuScenes we usually do 10-class detection +class_names = [ + 'barrier', 'bicycle', 'bus', 'car', 'construction_vehicle', 'motorcycle', + 'pedestrian', 'traffic_cone', 'trailer', 'truck' +] +dataset_type = 'CustomNuScenesDatasetV2' +data_root = 'data/nuscenes/' +# Input modality for nuScenes dataset, this is consistent with the submission +# format which requires the information in input_modality. +input_modality = dict( + use_lidar=False, + use_camera=True, + use_radar=False, + use_map=False, + use_external=False) +img_norm_cfg = dict(mean=[103.53, 116.28, 123.675], std=[1, 1, 1], to_rgb=False) +bev_h_ = 200 +bev_w_ = 200 +frames = (0,) +voxel_size = [102.4 / bev_h_, 102.4 / bev_w_, 8] +ida_aug_conf = { + "reisze": [640, ], + "crop": (0, 260, 1600, 900), + "H": 900, + "W": 1600, + "rand_flip": False, +} +ida_aug_conf_eval = { + "reisze": [640, ], + "crop": (0, 260, 1600, 900), + "H": 900, + "W": 1600, + "rand_flip": False, +} +# file_client_args = dict(backend='disk') +# Uncomment the following if use ceph or other file clients. +# See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient +# for more details. +# file_client_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/nuscenes/': 's3://nuscenes/nuscenes/', +# 'data/nuscenes/': 's3://nuscenes/nuscenes/' +# })) +train_pipeline = [ + dict(type='LoadMultiViewImageFromFiles', to_float32=True), + dict(type='PhotoMetricDistortionMultiViewImage'), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, with_attr_label=False), + dict( + type='ObjectRangeFilter', + point_cloud_range=point_cloud_range), + dict( + type='ObjectNameFilter', + classes=class_names), + dict(type='CropResizeFlipImage', data_aug_conf=ida_aug_conf, training=True, debug=False), + dict(type='NormalizeMultiviewImage', **img_norm_cfg), + dict(type='PadMultiViewImage', size_divisor=32), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict( + type='CustomCollect3D', + keys=['gt_bboxes_3d', 'gt_labels_3d', 'img', + 'ego2global_translation', 'ego2global_rotation', 'lidar2ego_translation', 'lidar2ego_rotation', + 'timestamp', 'mono_input_dict', 'mono_ann_idx', 'aug_param']), + dict(type='DD3DMapper', + is_train=True, + tasks=dict(box2d_on=True, box3d_on=True),) +] +eval_pipeline = [ + dict(type='LoadMultiViewImageFromFiles', to_float32=True, ), + dict(type='CropResizeFlipImage', data_aug_conf=ida_aug_conf_eval, training=False, debug=False), + dict(type='NormalizeMultiviewImage', **img_norm_cfg), + dict(type='PadMultiViewImage', size_divisor=32), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1600, 640), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='CustomCollect3D', + keys=['img', 'ego2global_translation', 'ego2global_rotation', 'lidar2ego_translation', + 'lidar2ego_rotation', 'timestamp']) + ]) +] + +data = dict( + samples_per_gpu=1, + workers_per_gpu=4, + persistent_workers=True, + train=dict( + type='CustomNuScenesDatasetV2', + frames=frames, + data_root=data_root, + ann_file=data_root + 'nuscenes_infos_temporal_train.pkl', + pipeline=train_pipeline, + classes=class_names, + modality=input_modality, + test_mode=False, + use_valid_flag=True, + box_type_3d='LiDAR', + mono_cfg=dict( + name='nusc_trainval', + data_root='data/nuscenes/', + min_num_lidar_points=3, + min_box_visibility=0.2)), + val=dict( + type='CustomNuScenesDatasetV2', + frames=frames, + data_root='data/nuscenes/', + ann_file=data_root + 'nuscenes_infos_temporal_val.pkl', + pipeline=eval_pipeline, + classes=class_names, + modality=input_modality, + samples_per_gpu=1), + test=dict( + type='CustomNuScenesDatasetV2', + frames=frames, + data_root='data/nuscenes/', + ann_file=data_root + 'nuscenes_infos_temporal_val.pkl', + pipeline=eval_pipeline, + classes=class_names, + modality=input_modality), + shuffler_sampler=dict(type='DistributedGroupSampler'), + nonshuffler_sampler=dict(type='DistributedSampler')) +evaluation = dict(interval=4, pipeline=eval_pipeline) + +# model +load_from = './ckpts/fcos_r50_coco_2mmdet.pth' +plugin = True +plugin_dir = 'projects/mmdet3d_plugin/' +_dim_ = 256 +_pos_dim_ = 128 +_ffn_dim_ = 512 +_num_levels_ = 4 +_num_mono_levels_ = 5 + +model = dict( + type='BEVFormerV2', + use_grid_mask=True, + video_test_mode=False, + num_levels=_num_levels_, + num_mono_levels=_num_mono_levels_, + mono_loss_weight=1.0, + frames=frames, + img_backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(1, 2, 3), + frozen_stages=-1, + norm_cfg=dict(type='SyncBN'), + norm_eval=False, + style='caffe'), + img_neck=dict( + type='FPN', + in_channels=[512, 1024, 2048], + out_channels=_dim_, + start_level=0, + add_extra_convs='on_output', + num_outs=_num_mono_levels_, + relu_before_extra_convs=True), + pts_bbox_head=dict( + type='BEVFormerHead', + bev_h=bev_h_, + bev_w=bev_w_, + num_query=900, + num_classes=10, + in_channels=_dim_, + sync_cls_avg_factor=True, + with_box_refine=True, + as_two_stage=False, + transformer=dict( + type='PerceptionTransformerV2', + embed_dims=_dim_, + frames=frames, + encoder=dict( + type='BEVFormerEncoder', + num_layers=6, + pc_range=point_cloud_range, + num_points_in_pillar=4, + return_intermediate=False, + transformerlayers=dict( + type='BEVFormerLayer', + attn_cfgs=[ + dict( + type='TemporalSelfAttention', + embed_dims=_dim_, + num_levels=1), + dict( + type='SpatialCrossAttention', + pc_range=point_cloud_range, + deformable_attention=dict( + type='MSDeformableAttention3D', + embed_dims=_dim_, + num_points=8, + num_levels=4), + embed_dims=_dim_) + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm'))), + decoder=dict( + type='DetectionTransformerDecoder', + num_layers=6, + return_intermediate=True, + transformerlayers=dict( + type='DetrTransformerDecoderLayer', + attn_cfgs=[ + dict( + type='MultiheadAttention', + embed_dims=_dim_, + num_heads=8, + dropout=0.1), + dict( + type='CustomMSDeformableAttention', + embed_dims=_dim_, + num_levels=1) + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm')))), + bbox_coder=dict( + type='NMSFreeCoder', + post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], + pc_range=point_cloud_range, + max_num=300, + voxel_size=voxel_size, + num_classes=10), + positional_encoding=dict( + type='LearnedPositionalEncoding', + num_feats=_pos_dim_, + row_num_embed=bev_h_, + col_num_embed=bev_w_), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=2.0), + loss_bbox=dict(type='SmoothL1Loss', loss_weight=0.75, beta=1.0), + loss_iou=dict(type='GIoULoss', loss_weight=0.0)), + fcos3d_bbox_head=dict( + type='NuscenesDD3D', + num_classes=10, + in_channels=_dim_, + strides=[8, 16, 32, 64, 128], + box3d_on=True, + feature_locations_offset='none', + fcos2d_cfg=dict( + num_cls_convs=4, + num_box_convs=4, + norm='SyncBN', + use_deformable=False, + use_scale=True, + box2d_scale_init_factor=1.0), + fcos2d_loss_cfg=dict( + focal_loss_alpha=0.25, focal_loss_gamma=2.0, loc_loss_type='giou'), + fcos3d_cfg=dict( + num_convs=4, + norm='SyncBN', + use_scale=True, + depth_scale_init_factor=0.3, + proj_ctr_scale_init_factor=1.0, + use_per_level_predictors=False, + class_agnostic=False, + use_deformable=False, + mean_depth_per_level=[44.921, 20.252, 11.712, 7.166, 8.548], + std_depth_per_level=[24.331, 9.833, 6.223, 4.611, 8.275]), + fcos3d_loss_cfg=dict( + min_depth=0.1, + max_depth=80.0, + box3d_loss_weight=2.0, + conf3d_loss_weight=1.0, + conf_3d_temperature=1.0, + smooth_l1_loss_beta=0.05, + max_loss_per_group=20, + predict_allocentric_rot=True, + scale_depth_by_focal_lengths=True, + scale_depth_by_focal_lengths_factor=500.0, + class_agnostic=False, + predict_distance=False, + canon_box_sizes=[[2.3524184, 0.5062202, 1.0413622], + [0.61416006, 1.7016163, 1.3054738], + [2.9139307, 10.725025, 3.2832346], + [1.9751819, 4.641267, 1.74352], + [2.772134, 6.565072, 3.2474296], + [0.7800532, 2.138673, 1.4437162], + [0.6667362, 0.7181772, 1.7616143], + [0.40246472, 0.4027083, 1.0084083], + [3.0059454, 12.8197, 4.1213827], + [2.4986045, 6.9310856, 2.8382742]]), + target_assign_cfg=dict( + center_sample=True, + pos_radius=1.5, + sizes_of_interest=((-1, 64), (64, 128), (128, 256), (256, 512), + (512, 100000000.0))), + nusc_loss_weight=dict(attr_loss_weight=0.2, speed_loss_weight=0.2)), + train_cfg=dict( + pts=dict( + grid_size=[512, 512, 1], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_size_factor=4, + assigner=dict( + type='HungarianAssigner3D', + cls_cost=dict(type='FocalLossCost', weight=2.0), + reg_cost=dict(type='SmoothL1Cost', weight=0.75), + iou_cost=dict(type='IoUCost', weight=0.0), + pc_range=point_cloud_range)))) + +# optimizer +optimizer = dict( + type='AdamW', + lr=4e-4, + paramwise_cfg=dict( + custom_keys=dict( + img_backbone=dict(lr_mult=0.5), + )), + weight_decay=0.01) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=2000, + warmup_ratio=1.0 / 3, + step=[44, ]) +total_epochs = 48 +runner = dict(type='EpochBasedRunner', max_epochs=total_epochs) diff --git a/adzoo/bevformer/configs/bevformerv2/bevformerv2-r50-t2-24ep.py b/adzoo/bevformer/configs/bevformerv2/bevformerv2-r50-t2-24ep.py new file mode 100644 index 0000000..05bf708 --- /dev/null +++ b/adzoo/bevformer/configs/bevformerv2/bevformerv2-r50-t2-24ep.py @@ -0,0 +1,360 @@ +# mAP: 0.4199 +# mATE: 0.6689 +# mASE: 0.2814 +# mAOE: 0.3915 +# mAVE: 0.3834 +# mAAE: 0.1928 +# NDS: 0.5182 +_base_ = [ + '../_base_/default_runtime.py' +] +# Dataset +# If point cloud range is changed, the models should also change their point +# cloud range accordingly +point_cloud_range = [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0] +# For nuScenes we usually do 10-class detection +class_names = [ + 'barrier', 'bicycle', 'bus', 'car', 'construction_vehicle', 'motorcycle', + 'pedestrian', 'traffic_cone', 'trailer', 'truck' +] +dataset_type = 'CustomNuScenesDatasetV2' +data_root = 'data/nuscenes/' +# Input modality for nuScenes dataset, this is consistent with the submission +# format which requires the information in input_modality. +input_modality = dict( + use_lidar=False, + use_camera=True, + use_radar=False, + use_map=False, + use_external=False) +img_norm_cfg = dict(mean=[103.53, 116.28, 123.675], std=[1, 1, 1], to_rgb=False) +bev_h_ = 200 +bev_w_ = 200 +frames = (-1, 0,) +group_detr = 11 +voxel_size = [102.4 / bev_h_, 102.4 / bev_w_, 8] +ida_aug_conf = { + "reisze": [512, 544, 576, 608, 640, 672, 704, 736, 768], # (0.8, 1.2) + "crop": (0, 260, 1600, 900), + "H": 900, + "W": 1600, + "rand_flip": True, +} +ida_aug_conf_eval = { + "reisze": [640, ], + "crop": (0, 260, 1600, 900), + "H": 900, + "W": 1600, + "rand_flip": False, +} +# file_client_args = dict(backend='disk') +# Uncomment the following if use ceph or other file clients. +# See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient +# for more details. +# file_client_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/nuscenes/': 's3://nuscenes/nuscenes/', +# 'data/nuscenes/': 's3://nuscenes/nuscenes/' +# })) +train_pipeline = [ + dict(type='LoadMultiViewImageFromFiles', to_float32=True), + dict(type='PhotoMetricDistortionMultiViewImage'), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, with_attr_label=False), + dict(type='GlobalRotScaleTransImage', + rot_range=[-22.5, 22.5], + scale_ratio_range=[0.95, 1.05], + translation_std=[0, 0, 0], + reverse_angle=True, + training=True, + flip_dx_ratio=0.5, + flip_dy_ratio=0.5, + only_gt=True,), + dict( + type='ObjectRangeFilter', + point_cloud_range=point_cloud_range), + dict( + type='ObjectNameFilter', + classes=class_names), + dict(type='CropResizeFlipImage', data_aug_conf=ida_aug_conf, training=True, debug=False), + dict(type='NormalizeMultiviewImage', **img_norm_cfg), + dict(type='PadMultiViewImage', size_divisor=32), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict( + type='CustomCollect3D', + keys=['gt_bboxes_3d', 'gt_labels_3d', 'img', + 'ego2global_translation', 'ego2global_rotation', 'lidar2ego_translation', 'lidar2ego_rotation', + 'timestamp', 'mono_input_dict', 'mono_ann_idx', 'aug_param']), + dict(type='DD3DMapper', + is_train=True, + tasks=dict(box2d_on=True, box3d_on=True),) +] +eval_pipeline = [ + dict(type='LoadMultiViewImageFromFiles', to_float32=True, ), + dict(type='CropResizeFlipImage', data_aug_conf=ida_aug_conf_eval, training=False, debug=False), + dict(type='NormalizeMultiviewImage', **img_norm_cfg), + dict(type='PadMultiViewImage', size_divisor=32), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1600, 640), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='CustomCollect3D', + keys=['img', 'ego2global_translation', 'ego2global_rotation', 'lidar2ego_translation', + 'lidar2ego_rotation', 'timestamp']) + ]) +] + +data = dict( + samples_per_gpu=1, + workers_per_gpu=4, + persistent_workers=True, + train=dict( + type='CustomNuScenesDatasetV2', + frames=frames, + data_root=data_root, + ann_file=data_root + 'nuscenes_infos_temporal_train.pkl', + pipeline=train_pipeline, + classes=class_names, + modality=input_modality, + test_mode=False, + use_valid_flag=True, + box_type_3d='LiDAR', + mono_cfg=dict( + name='nusc_trainval', + data_root='data/nuscenes/', + min_num_lidar_points=3, + min_box_visibility=0.2)), + val=dict( + type='CustomNuScenesDatasetV2', + frames=frames, + data_root='data/nuscenes/', + ann_file=data_root + 'nuscenes_infos_temporal_val.pkl', + pipeline=eval_pipeline, + classes=class_names, + modality=input_modality, + samples_per_gpu=1), + test=dict( + type='CustomNuScenesDatasetV2', + frames=frames, + data_root='data/nuscenes/', + ann_file=data_root + 'nuscenes_infos_temporal_val.pkl', + pipeline=eval_pipeline, + classes=class_names, + modality=input_modality), + shuffler_sampler=dict(type='DistributedGroupSampler'), + nonshuffler_sampler=dict(type='DistributedSampler')) +evaluation = dict(interval=4, pipeline=eval_pipeline) + +# model +load_from = './ckpts/fcos_r50_coco_2mmdet.pth' +plugin = True +plugin_dir = 'projects/mmdet3d_plugin/' +_dim_ = 256 +_pos_dim_ = 128 +_ffn_dim_ = 512 +_num_levels_ = 4 +_num_mono_levels_ = 5 + +model = dict( + type='BEVFormerV2', + use_grid_mask=True, + video_test_mode=False, + num_levels=_num_levels_, + num_mono_levels=_num_mono_levels_, + mono_loss_weight=1.0, + frames=frames, + img_backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(1, 2, 3), + frozen_stages=-1, + norm_cfg=dict(type='SyncBN'), + norm_eval=False, + style='caffe'), + img_neck=dict( + type='FPN', + in_channels=[512, 1024, 2048], + out_channels=_dim_, + start_level=0, + add_extra_convs='on_output', + num_outs=_num_mono_levels_, + relu_before_extra_convs=True), + pts_bbox_head=dict( + type='BEVFormerHead_GroupDETR', + group_detr=group_detr, + bev_h=bev_h_, + bev_w=bev_w_, + num_query=900, + num_classes=10, + in_channels=_dim_, + sync_cls_avg_factor=True, + with_box_refine=True, + as_two_stage=False, + transformer=dict( + type='PerceptionTransformerV2', + embed_dims=_dim_, + frames=frames, + encoder=dict( + type='BEVFormerEncoder', + num_layers=6, + pc_range=point_cloud_range, + num_points_in_pillar=4, + return_intermediate=False, + transformerlayers=dict( + type='BEVFormerLayer', + attn_cfgs=[ + dict( + type='TemporalSelfAttention', + embed_dims=_dim_, + num_levels=1), + dict( + type='SpatialCrossAttention', + pc_range=point_cloud_range, + deformable_attention=dict( + type='MSDeformableAttention3D', + embed_dims=_dim_, + num_points=8, + num_levels=4), + embed_dims=_dim_) + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm'))), + decoder=dict( + type='DetectionTransformerDecoder', + num_layers=6, + return_intermediate=True, + transformerlayers=dict( + type='DetrTransformerDecoderLayer', + attn_cfgs=[ + dict( + type='GroupMultiheadAttention', + group=group_detr, + embed_dims=_dim_, + num_heads=8, + dropout=0.1), + dict( + type='CustomMSDeformableAttention', + embed_dims=_dim_, + num_levels=1) + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm')))), + bbox_coder=dict( + type='NMSFreeCoder', + post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], + pc_range=point_cloud_range, + max_num=300, + voxel_size=voxel_size, + num_classes=10), + positional_encoding=dict( + type='LearnedPositionalEncoding', + num_feats=_pos_dim_, + row_num_embed=bev_h_, + col_num_embed=bev_w_), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=2.0), + loss_bbox=dict(type='SmoothL1Loss', loss_weight=0.75, beta=1.0), + loss_iou=dict(type='GIoULoss', loss_weight=0.0)), + fcos3d_bbox_head=dict( + type='NuscenesDD3D', + num_classes=10, + in_channels=_dim_, + strides=[8, 16, 32, 64, 128], + box3d_on=True, + feature_locations_offset='none', + fcos2d_cfg=dict( + num_cls_convs=4, + num_box_convs=4, + norm='SyncBN', + use_deformable=False, + use_scale=True, + box2d_scale_init_factor=1.0), + fcos2d_loss_cfg=dict( + focal_loss_alpha=0.25, focal_loss_gamma=2.0, loc_loss_type='giou'), + fcos3d_cfg=dict( + num_convs=4, + norm='SyncBN', + use_scale=True, + depth_scale_init_factor=0.3, + proj_ctr_scale_init_factor=1.0, + use_per_level_predictors=False, + class_agnostic=False, + use_deformable=False, + mean_depth_per_level=[44.921, 20.252, 11.712, 7.166, 8.548], + std_depth_per_level=[24.331, 9.833, 6.223, 4.611, 8.275]), + fcos3d_loss_cfg=dict( + min_depth=0.1, + max_depth=80.0, + box3d_loss_weight=2.0, + conf3d_loss_weight=1.0, + conf_3d_temperature=1.0, + smooth_l1_loss_beta=0.05, + max_loss_per_group=20, + predict_allocentric_rot=True, + scale_depth_by_focal_lengths=True, + scale_depth_by_focal_lengths_factor=500.0, + class_agnostic=False, + predict_distance=False, + canon_box_sizes=[[2.3524184, 0.5062202, 1.0413622], + [0.61416006, 1.7016163, 1.3054738], + [2.9139307, 10.725025, 3.2832346], + [1.9751819, 4.641267, 1.74352], + [2.772134, 6.565072, 3.2474296], + [0.7800532, 2.138673, 1.4437162], + [0.6667362, 0.7181772, 1.7616143], + [0.40246472, 0.4027083, 1.0084083], + [3.0059454, 12.8197, 4.1213827], + [2.4986045, 6.9310856, 2.8382742]]), + target_assign_cfg=dict( + center_sample=True, + pos_radius=1.5, + sizes_of_interest=((-1, 64), (64, 128), (128, 256), (256, 512), + (512, 100000000.0))), + nusc_loss_weight=dict(attr_loss_weight=0.2, speed_loss_weight=0.2)), + train_cfg=dict( + pts=dict( + grid_size=[512, 512, 1], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_size_factor=4, + assigner=dict( + type='HungarianAssigner3D', + cls_cost=dict(type='FocalLossCost', weight=2.0), + reg_cost=dict(type='SmoothL1Cost', weight=0.75), + iou_cost=dict(type='IoUCost', weight=0.0), + pc_range=point_cloud_range)))) + +# optimizer +optimizer = dict( + type='AdamW', + lr=4e-4, + paramwise_cfg=dict( + custom_keys=dict( + img_backbone=dict(lr_mult=0.5), + )), + weight_decay=0.01) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=2000, + warmup_ratio=1.0 / 3, + step=[20, ]) +total_epochs = 24 +runner = dict(type='EpochBasedRunner', max_epochs=total_epochs) diff --git a/adzoo/bevformer/configs/bevformerv2/bevformerv2-r50-t2-48ep.py b/adzoo/bevformer/configs/bevformerv2/bevformerv2-r50-t2-48ep.py new file mode 100644 index 0000000..2c1dab2 --- /dev/null +++ b/adzoo/bevformer/configs/bevformerv2/bevformerv2-r50-t2-48ep.py @@ -0,0 +1,360 @@ +# mAP: 0.4313 +# mATE: 0.6557 +# mASE: 0.2775 +# mAOE: 0.3851 +# mAVE: 0.3861 +# mAAE: 0.1882 +# NDS: 0.5264 +_base_ = [ + '../_base_/default_runtime.py' +] +# Dataset +# If point cloud range is changed, the models should also change their point +# cloud range accordingly +point_cloud_range = [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0] +# For nuScenes we usually do 10-class detection +class_names = [ + 'barrier', 'bicycle', 'bus', 'car', 'construction_vehicle', 'motorcycle', + 'pedestrian', 'traffic_cone', 'trailer', 'truck' +] +dataset_type = 'CustomNuScenesDatasetV2' +data_root = 'data/nuscenes/' +# Input modality for nuScenes dataset, this is consistent with the submission +# format which requires the information in input_modality. +input_modality = dict( + use_lidar=False, + use_camera=True, + use_radar=False, + use_map=False, + use_external=False) +img_norm_cfg = dict(mean=[103.53, 116.28, 123.675], std=[1, 1, 1], to_rgb=False) +bev_h_ = 200 +bev_w_ = 200 +frames = (-1, 0,) +group_detr = 11 +voxel_size = [102.4 / bev_h_, 102.4 / bev_w_, 8] +ida_aug_conf = { + "reisze": [512, 544, 576, 608, 640, 672, 704, 736, 768], # (0.8, 1.2) + "crop": (0, 260, 1600, 900), + "H": 900, + "W": 1600, + "rand_flip": True, +} +ida_aug_conf_eval = { + "reisze": [640, ], + "crop": (0, 260, 1600, 900), + "H": 900, + "W": 1600, + "rand_flip": False, +} +# file_client_args = dict(backend='disk') +# Uncomment the following if use ceph or other file clients. +# See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient +# for more details. +# file_client_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/nuscenes/': 's3://nuscenes/nuscenes/', +# 'data/nuscenes/': 's3://nuscenes/nuscenes/' +# })) +train_pipeline = [ + dict(type='LoadMultiViewImageFromFiles', to_float32=True), + dict(type='PhotoMetricDistortionMultiViewImage'), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, with_attr_label=False), + dict(type='GlobalRotScaleTransImage', + rot_range=[-22.5, 22.5], + scale_ratio_range=[0.95, 1.05], + translation_std=[0, 0, 0], + reverse_angle=True, + training=True, + flip_dx_ratio=0.5, + flip_dy_ratio=0.5, + only_gt=True,), + dict( + type='ObjectRangeFilter', + point_cloud_range=point_cloud_range), + dict( + type='ObjectNameFilter', + classes=class_names), + dict(type='CropResizeFlipImage', data_aug_conf=ida_aug_conf, training=True, debug=False), + dict(type='NormalizeMultiviewImage', **img_norm_cfg), + dict(type='PadMultiViewImage', size_divisor=32), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict( + type='CustomCollect3D', + keys=['gt_bboxes_3d', 'gt_labels_3d', 'img', + 'ego2global_translation', 'ego2global_rotation', 'lidar2ego_translation', 'lidar2ego_rotation', + 'timestamp', 'mono_input_dict', 'mono_ann_idx', 'aug_param']), + dict(type='DD3DMapper', + is_train=True, + tasks=dict(box2d_on=True, box3d_on=True),) +] +eval_pipeline = [ + dict(type='LoadMultiViewImageFromFiles', to_float32=True, ), + dict(type='CropResizeFlipImage', data_aug_conf=ida_aug_conf_eval, training=False, debug=False), + dict(type='NormalizeMultiviewImage', **img_norm_cfg), + dict(type='PadMultiViewImage', size_divisor=32), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1600, 640), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='CustomCollect3D', + keys=['img', 'ego2global_translation', 'ego2global_rotation', 'lidar2ego_translation', + 'lidar2ego_rotation', 'timestamp']) + ]) +] + +data = dict( + samples_per_gpu=1, + workers_per_gpu=4, + persistent_workers=True, + train=dict( + type='CustomNuScenesDatasetV2', + frames=frames, + data_root=data_root, + ann_file=data_root + 'nuscenes_infos_temporal_train.pkl', + pipeline=train_pipeline, + classes=class_names, + modality=input_modality, + test_mode=False, + use_valid_flag=True, + box_type_3d='LiDAR', + mono_cfg=dict( + name='nusc_trainval', + data_root='data/nuscenes/', + min_num_lidar_points=3, + min_box_visibility=0.2)), + val=dict( + type='CustomNuScenesDatasetV2', + frames=frames, + data_root='data/nuscenes/', + ann_file=data_root + 'nuscenes_infos_temporal_val.pkl', + pipeline=eval_pipeline, + classes=class_names, + modality=input_modality, + samples_per_gpu=1), + test=dict( + type='CustomNuScenesDatasetV2', + frames=frames, + data_root='data/nuscenes/', + ann_file=data_root + 'nuscenes_infos_temporal_val.pkl', + pipeline=eval_pipeline, + classes=class_names, + modality=input_modality), + shuffler_sampler=dict(type='DistributedGroupSampler'), + nonshuffler_sampler=dict(type='DistributedSampler')) +evaluation = dict(interval=4, pipeline=eval_pipeline) + +# model +load_from = './ckpts/fcos_r50_coco_2mmdet.pth' +plugin = True +plugin_dir = 'projects/mmdet3d_plugin/' +_dim_ = 256 +_pos_dim_ = 128 +_ffn_dim_ = 512 +_num_levels_ = 4 +_num_mono_levels_ = 5 + +model = dict( + type='BEVFormerV2', + use_grid_mask=True, + video_test_mode=False, + num_levels=_num_levels_, + num_mono_levels=_num_mono_levels_, + mono_loss_weight=1.0, + frames=frames, + img_backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(1, 2, 3), + frozen_stages=-1, + norm_cfg=dict(type='SyncBN'), + norm_eval=False, + style='caffe'), + img_neck=dict( + type='FPN', + in_channels=[512, 1024, 2048], + out_channels=_dim_, + start_level=0, + add_extra_convs='on_output', + num_outs=_num_mono_levels_, + relu_before_extra_convs=True), + pts_bbox_head=dict( + type='BEVFormerHead_GroupDETR', + group_detr=group_detr, + bev_h=bev_h_, + bev_w=bev_w_, + num_query=900, + num_classes=10, + in_channels=_dim_, + sync_cls_avg_factor=True, + with_box_refine=True, + as_two_stage=False, + transformer=dict( + type='PerceptionTransformerV2', + embed_dims=_dim_, + frames=frames, + encoder=dict( + type='BEVFormerEncoder', + num_layers=6, + pc_range=point_cloud_range, + num_points_in_pillar=4, + return_intermediate=False, + transformerlayers=dict( + type='BEVFormerLayer', + attn_cfgs=[ + dict( + type='TemporalSelfAttention', + embed_dims=_dim_, + num_levels=1), + dict( + type='SpatialCrossAttention', + pc_range=point_cloud_range, + deformable_attention=dict( + type='MSDeformableAttention3D', + embed_dims=_dim_, + num_points=8, + num_levels=4), + embed_dims=_dim_) + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm'))), + decoder=dict( + type='DetectionTransformerDecoder', + num_layers=6, + return_intermediate=True, + transformerlayers=dict( + type='DetrTransformerDecoderLayer', + attn_cfgs=[ + dict( + type='GroupMultiheadAttention', + group=group_detr, + embed_dims=_dim_, + num_heads=8, + dropout=0.1), + dict( + type='CustomMSDeformableAttention', + embed_dims=_dim_, + num_levels=1) + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm')))), + bbox_coder=dict( + type='NMSFreeCoder', + post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], + pc_range=point_cloud_range, + max_num=300, + voxel_size=voxel_size, + num_classes=10), + positional_encoding=dict( + type='LearnedPositionalEncoding', + num_feats=_pos_dim_, + row_num_embed=bev_h_, + col_num_embed=bev_w_), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=2.0), + loss_bbox=dict(type='SmoothL1Loss', loss_weight=0.75, beta=1.0), + loss_iou=dict(type='GIoULoss', loss_weight=0.0)), + fcos3d_bbox_head=dict( + type='NuscenesDD3D', + num_classes=10, + in_channels=_dim_, + strides=[8, 16, 32, 64, 128], + box3d_on=True, + feature_locations_offset='none', + fcos2d_cfg=dict( + num_cls_convs=4, + num_box_convs=4, + norm='SyncBN', + use_deformable=False, + use_scale=True, + box2d_scale_init_factor=1.0), + fcos2d_loss_cfg=dict( + focal_loss_alpha=0.25, focal_loss_gamma=2.0, loc_loss_type='giou'), + fcos3d_cfg=dict( + num_convs=4, + norm='SyncBN', + use_scale=True, + depth_scale_init_factor=0.3, + proj_ctr_scale_init_factor=1.0, + use_per_level_predictors=False, + class_agnostic=False, + use_deformable=False, + mean_depth_per_level=[44.921, 20.252, 11.712, 7.166, 8.548], + std_depth_per_level=[24.331, 9.833, 6.223, 4.611, 8.275]), + fcos3d_loss_cfg=dict( + min_depth=0.1, + max_depth=80.0, + box3d_loss_weight=2.0, + conf3d_loss_weight=1.0, + conf_3d_temperature=1.0, + smooth_l1_loss_beta=0.05, + max_loss_per_group=20, + predict_allocentric_rot=True, + scale_depth_by_focal_lengths=True, + scale_depth_by_focal_lengths_factor=500.0, + class_agnostic=False, + predict_distance=False, + canon_box_sizes=[[2.3524184, 0.5062202, 1.0413622], + [0.61416006, 1.7016163, 1.3054738], + [2.9139307, 10.725025, 3.2832346], + [1.9751819, 4.641267, 1.74352], + [2.772134, 6.565072, 3.2474296], + [0.7800532, 2.138673, 1.4437162], + [0.6667362, 0.7181772, 1.7616143], + [0.40246472, 0.4027083, 1.0084083], + [3.0059454, 12.8197, 4.1213827], + [2.4986045, 6.9310856, 2.8382742]]), + target_assign_cfg=dict( + center_sample=True, + pos_radius=1.5, + sizes_of_interest=((-1, 64), (64, 128), (128, 256), (256, 512), + (512, 100000000.0))), + nusc_loss_weight=dict(attr_loss_weight=0.2, speed_loss_weight=0.2)), + train_cfg=dict( + pts=dict( + grid_size=[512, 512, 1], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_size_factor=4, + assigner=dict( + type='HungarianAssigner3D', + cls_cost=dict(type='FocalLossCost', weight=2.0), + reg_cost=dict(type='SmoothL1Cost', weight=0.75), + iou_cost=dict(type='IoUCost', weight=0.0), + pc_range=point_cloud_range)))) + +# optimizer +optimizer = dict( + type='AdamW', + lr=4e-4, + paramwise_cfg=dict( + custom_keys=dict( + img_backbone=dict(lr_mult=0.5), + )), + weight_decay=0.01) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=2000, + warmup_ratio=1.0 / 3, + step=[20, ]) +total_epochs = 24 +runner = dict(type='EpochBasedRunner', max_epochs=total_epochs) diff --git a/adzoo/bevformer/configs/bevformerv2/bevformerv2-r50-t8-24ep.py b/adzoo/bevformer/configs/bevformerv2/bevformerv2-r50-t8-24ep.py new file mode 100644 index 0000000..76cca1e --- /dev/null +++ b/adzoo/bevformer/configs/bevformerv2/bevformerv2-r50-t8-24ep.py @@ -0,0 +1,361 @@ +# mAP: 0.4600 +# mATE: 0.6185 +# mASE: 0.2815 +# mAOE: 0.3660 +# mAVE: 0.3157 +# mAAE: 0.1902 +# NDS: 0.5528 +_base_ = [ + '../_base_/default_runtime.py' +] +# Dataset +# If point cloud range is changed, the models should also change their point +# cloud range accordingly +point_cloud_range = [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0] +# For nuScenes we usually do 10-class detection +class_names = [ + 'barrier', 'bicycle', 'bus', 'car', 'construction_vehicle', 'motorcycle', + 'pedestrian', 'traffic_cone', 'trailer', 'truck' +] +dataset_type = 'CustomNuScenesDatasetV2' +data_root = 'data/nuscenes/' +# Input modality for nuScenes dataset, this is consistent with the submission +# format which requires the information in input_modality. +input_modality = dict( + use_lidar=False, + use_camera=True, + use_radar=False, + use_map=False, + use_external=False) +img_norm_cfg = dict(mean=[103.53, 116.28, 123.675], std=[1, 1, 1], to_rgb=False) +bev_h_ = 200 +bev_w_ = 200 +frames = (-7,-6,-5,-4,-3,-2,-1,0) +group_detr = 11 +voxel_size = [102.4 / bev_h_, 102.4 / bev_w_, 8] +ida_aug_conf = { + "reisze": [512, 544, 576, 608, 640, 672, 704, 736, 768], # (0.8, 1.2) + "crop": (0, 260, 1600, 900), + "H": 900, + "W": 1600, + "rand_flip": True, +} +ida_aug_conf_eval = { + "reisze": [640, ], + "crop": (0, 260, 1600, 900), + "H": 900, + "W": 1600, + "rand_flip": False, +} +# file_client_args = dict(backend='disk') +# Uncomment the following if use ceph or other file clients. +# See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient +# for more details. +# file_client_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/nuscenes/': 's3://nuscenes/nuscenes/', +# 'data/nuscenes/': 's3://nuscenes/nuscenes/' +# })) +train_pipeline = [ + dict(type='LoadMultiViewImageFromFiles', to_float32=True), + dict(type='PhotoMetricDistortionMultiViewImage'), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, with_attr_label=False), + dict(type='GlobalRotScaleTransImage', + rot_range=[-22.5, 22.5], + scale_ratio_range=[0.95, 1.05], + translation_std=[0, 0, 0], + reverse_angle=True, + training=True, + flip_dx_ratio=0.5, + flip_dy_ratio=0.5, + only_gt=True,), + dict( + type='ObjectRangeFilter', + point_cloud_range=point_cloud_range), + dict( + type='ObjectNameFilter', + classes=class_names), + dict(type='CropResizeFlipImage', data_aug_conf=ida_aug_conf, training=True, debug=False), + dict(type='NormalizeMultiviewImage', **img_norm_cfg), + dict(type='PadMultiViewImage', size_divisor=32), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict( + type='CustomCollect3D', + keys=['gt_bboxes_3d', 'gt_labels_3d', 'img', + 'ego2global_translation', 'ego2global_rotation', 'lidar2ego_translation', 'lidar2ego_rotation', + 'timestamp', 'mono_input_dict', 'mono_ann_idx', 'aug_param']), + dict(type='DD3DMapper', + is_train=True, + tasks=dict(box2d_on=True, box3d_on=True),) +] +eval_pipeline = [ + dict(type='LoadMultiViewImageFromFiles', to_float32=True, ), + dict(type='CropResizeFlipImage', data_aug_conf=ida_aug_conf_eval, training=False, debug=False), + dict(type='NormalizeMultiviewImage', **img_norm_cfg), + dict(type='PadMultiViewImage', size_divisor=32), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1600, 640), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='CustomCollect3D', + keys=['img', 'ego2global_translation', 'ego2global_rotation', 'lidar2ego_translation', + 'lidar2ego_rotation', 'timestamp']) + ]) +] + +data = dict( + samples_per_gpu=1, + workers_per_gpu=4, + persistent_workers=True, + train=dict( + type='CustomNuScenesDatasetV2', + frames=frames, + data_root=data_root, + ann_file=data_root + 'nuscenes_infos_temporal_train.pkl', + pipeline=train_pipeline, + classes=class_names, + modality=input_modality, + test_mode=False, + use_valid_flag=True, + box_type_3d='LiDAR', + mono_cfg=dict( + name='nusc_trainval', + data_root='data/nuscenes/', + min_num_lidar_points=3, + min_box_visibility=0.2)), + val=dict( + type='CustomNuScenesDatasetV2', + frames=frames, + data_root='data/nuscenes/', + ann_file=data_root + 'nuscenes_infos_temporal_val.pkl', + pipeline=eval_pipeline, + classes=class_names, + modality=input_modality, + samples_per_gpu=1), + test=dict( + type='CustomNuScenesDatasetV2', + frames=frames, + data_root='data/nuscenes/', + ann_file=data_root + 'nuscenes_infos_temporal_val.pkl', + pipeline=eval_pipeline, + classes=class_names, + modality=input_modality), + shuffler_sampler=dict(type='DistributedGroupSampler'), + nonshuffler_sampler=dict(type='DistributedSampler')) +evaluation = dict(interval=4, pipeline=eval_pipeline) + +# model +load_from = './ckpts/fcos_r50_coco_2mmdet.pth' +plugin = True +plugin_dir = 'projects/mmdet3d_plugin/' +_dim_ = 256 +_pos_dim_ = 128 +_ffn_dim_ = 512 +_num_levels_ = 4 +_num_mono_levels_ = 5 + +model = dict( + type='BEVFormerV2', + use_grid_mask=True, + video_test_mode=False, + num_levels=_num_levels_, + num_mono_levels=_num_mono_levels_, + mono_loss_weight=1.0, + frames=frames, + img_backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(1, 2, 3), + frozen_stages=-1, + norm_cfg=dict(type='SyncBN'), + norm_eval=False, + style='caffe'), + img_neck=dict( + type='FPN', + in_channels=[512, 1024, 2048], + out_channels=_dim_, + start_level=0, + add_extra_convs='on_output', + num_outs=_num_mono_levels_, + relu_before_extra_convs=True), + pts_bbox_head=dict( + type='BEVFormerHead_GroupDETR', + group_detr=group_detr, + bev_h=bev_h_, + bev_w=bev_w_, + num_query=900, + num_classes=10, + in_channels=_dim_, + sync_cls_avg_factor=True, + with_box_refine=True, + as_two_stage=False, + transformer=dict( + type='PerceptionTransformerV2', + embed_dims=_dim_, + frames=frames, + inter_channels=_dim_*2, + encoder=dict( + type='BEVFormerEncoder', + num_layers=6, + pc_range=point_cloud_range, + num_points_in_pillar=4, + return_intermediate=False, + transformerlayers=dict( + type='BEVFormerLayer', + attn_cfgs=[ + dict( + type='TemporalSelfAttention', + embed_dims=_dim_, + num_levels=1), + dict( + type='SpatialCrossAttention', + pc_range=point_cloud_range, + deformable_attention=dict( + type='MSDeformableAttention3D', + embed_dims=_dim_, + num_points=8, + num_levels=4), + embed_dims=_dim_) + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm'))), + decoder=dict( + type='DetectionTransformerDecoder', + num_layers=6, + return_intermediate=True, + transformerlayers=dict( + type='DetrTransformerDecoderLayer', + attn_cfgs=[ + dict( + type='GroupMultiheadAttention', + group=group_detr, + embed_dims=_dim_, + num_heads=8, + dropout=0.1), + dict( + type='CustomMSDeformableAttention', + embed_dims=_dim_, + num_levels=1) + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm')))), + bbox_coder=dict( + type='NMSFreeCoder', + post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], + pc_range=point_cloud_range, + max_num=300, + voxel_size=voxel_size, + num_classes=10), + positional_encoding=dict( + type='LearnedPositionalEncoding', + num_feats=_pos_dim_, + row_num_embed=bev_h_, + col_num_embed=bev_w_), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=2.0), + loss_bbox=dict(type='SmoothL1Loss', loss_weight=0.75, beta=1.0), + loss_iou=dict(type='GIoULoss', loss_weight=0.0)), + fcos3d_bbox_head=dict( + type='NuscenesDD3D', + num_classes=10, + in_channels=_dim_, + strides=[8, 16, 32, 64, 128], + box3d_on=True, + feature_locations_offset='none', + fcos2d_cfg=dict( + num_cls_convs=4, + num_box_convs=4, + norm='SyncBN', + use_deformable=False, + use_scale=True, + box2d_scale_init_factor=1.0), + fcos2d_loss_cfg=dict( + focal_loss_alpha=0.25, focal_loss_gamma=2.0, loc_loss_type='giou'), + fcos3d_cfg=dict( + num_convs=4, + norm='SyncBN', + use_scale=True, + depth_scale_init_factor=0.3, + proj_ctr_scale_init_factor=1.0, + use_per_level_predictors=False, + class_agnostic=False, + use_deformable=False, + mean_depth_per_level=[44.921, 20.252, 11.712, 7.166, 8.548], + std_depth_per_level=[24.331, 9.833, 6.223, 4.611, 8.275]), + fcos3d_loss_cfg=dict( + min_depth=0.1, + max_depth=80.0, + box3d_loss_weight=2.0, + conf3d_loss_weight=1.0, + conf_3d_temperature=1.0, + smooth_l1_loss_beta=0.05, + max_loss_per_group=20, + predict_allocentric_rot=True, + scale_depth_by_focal_lengths=True, + scale_depth_by_focal_lengths_factor=500.0, + class_agnostic=False, + predict_distance=False, + canon_box_sizes=[[2.3524184, 0.5062202, 1.0413622], + [0.61416006, 1.7016163, 1.3054738], + [2.9139307, 10.725025, 3.2832346], + [1.9751819, 4.641267, 1.74352], + [2.772134, 6.565072, 3.2474296], + [0.7800532, 2.138673, 1.4437162], + [0.6667362, 0.7181772, 1.7616143], + [0.40246472, 0.4027083, 1.0084083], + [3.0059454, 12.8197, 4.1213827], + [2.4986045, 6.9310856, 2.8382742]]), + target_assign_cfg=dict( + center_sample=True, + pos_radius=1.5, + sizes_of_interest=((-1, 64), (64, 128), (128, 256), (256, 512), + (512, 100000000.0))), + nusc_loss_weight=dict(attr_loss_weight=0.2, speed_loss_weight=0.2)), + train_cfg=dict( + pts=dict( + grid_size=[512, 512, 1], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_size_factor=4, + assigner=dict( + type='HungarianAssigner3D', + cls_cost=dict(type='FocalLossCost', weight=2.0), + reg_cost=dict(type='SmoothL1Cost', weight=0.75), + iou_cost=dict(type='IoUCost', weight=0.0), + pc_range=point_cloud_range)))) + +# optimizer +optimizer = dict( + type='AdamW', + lr=4e-4, + paramwise_cfg=dict( + custom_keys=dict( + img_backbone=dict(lr_mult=0.5), + )), + weight_decay=0.01) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=2000, + warmup_ratio=1.0 / 3, + step=[20, ]) +total_epochs = 24 +runner = dict(type='EpochBasedRunner', max_epochs=total_epochs) diff --git a/adzoo/bevformer/configs/datasets/custom_lyft-3d.py b/adzoo/bevformer/configs/datasets/custom_lyft-3d.py new file mode 100644 index 0000000..5a95d89 --- /dev/null +++ b/adzoo/bevformer/configs/datasets/custom_lyft-3d.py @@ -0,0 +1,136 @@ +# If point cloud range is changed, the models should also change their point +# cloud range accordingly +point_cloud_range = [-80, -80, -5, 80, 80, 3] +# For Lyft we usually do 9-class detection +class_names = [ + 'car', 'truck', 'bus', 'emergency_vehicle', 'other_vehicle', 'motorcycle', + 'bicycle', 'pedestrian', 'animal' +] +dataset_type = 'CustomLyftDataset' +data_root = 'data/lyft/' +# Input modality for Lyft dataset, this is consistent with the submission +# format which requires the information in input_modality. +input_modality = dict( + use_lidar=True, + use_camera=False, + use_radar=False, + use_map=False, + use_external=True) +file_client_args = dict(backend='disk') +# Uncomment the following if use ceph or other file clients. +# See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient +# for more details. +# file_client_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/lyft/': 's3://lyft/lyft/', +# 'data/lyft/': 's3://lyft/lyft/' +# })) +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + file_client_args=file_client_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + file_client_args=file_client_args), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.3925, 0.3925], + scale_ratio_range=[0.95, 1.05], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='PointShuffle'), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + file_client_args=file_client_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + file_client_args=file_client_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) + ]) +] +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) +eval_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + file_client_args=file_client_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + file_client_args=file_client_args), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) +] + +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'lyft_infos_train.pkl', + pipeline=train_pipeline, + classes=class_names, + modality=input_modality, + test_mode=False), + val=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'lyft_infos_val.pkl', + pipeline=test_pipeline, + classes=class_names, + modality=input_modality, + test_mode=True), + test=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'lyft_infos_val.pkl', + pipeline=test_pipeline, + classes=class_names, + modality=input_modality, + test_mode=True)) +# For Lyft dataset, we usually evaluate the model at the end of training. +# Since the models are trained by 24 epochs by default, we set evaluation +# interval to be 24. Please change the interval accordingly if you do not +# use a default schedule. +evaluation = dict(interval=24, pipeline=eval_pipeline) \ No newline at end of file diff --git a/adzoo/bevformer/configs/datasets/custom_nus-3d.py b/adzoo/bevformer/configs/datasets/custom_nus-3d.py new file mode 100644 index 0000000..af81f9b --- /dev/null +++ b/adzoo/bevformer/configs/datasets/custom_nus-3d.py @@ -0,0 +1,141 @@ +# If point cloud range is changed, the models should also change their point +# cloud range accordingly +point_cloud_range = [-50, -50, -5, 50, 50, 3] +# For nuScenes we usually do 10-class detection +class_names = [ + 'car', 'truck', 'trailer', 'bus', 'construction_vehicle', 'bicycle', + 'motorcycle', 'pedestrian', 'traffic_cone', 'barrier' +] +dataset_type = 'NuScenesDataset_eval_modified' +data_root = 'data/nuscenes/' +# Input modality for nuScenes dataset, this is consistent with the submission +# format which requires the information in input_modality. +input_modality = dict( + use_lidar=True, + use_camera=False, + use_radar=False, + use_map=False, + use_external=False) +file_client_args = dict(backend='disk') +# Uncomment the following if use ceph or other file clients. +# See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient +# for more details. +# file_client_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/nuscenes/': 's3://nuscenes/nuscenes/', +# 'data/nuscenes/': 's3://nuscenes/nuscenes/' +# })) +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + file_client_args=file_client_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + file_client_args=file_client_args), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.3925, 0.3925], + scale_ratio_range=[0.95, 1.05], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectNameFilter', classes=class_names), + dict(type='PointShuffle'), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + file_client_args=file_client_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + file_client_args=file_client_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) + ]) +] +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) +eval_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + file_client_args=file_client_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + file_client_args=file_client_args), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) +] + +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'nuscenes_infos_train.pkl', + pipeline=train_pipeline, + classes=class_names, + modality=input_modality, + test_mode=False, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='LiDAR'), + val=dict( + type=dataset_type, + ann_file=data_root + 'nuscenes_infos_val.pkl', + pipeline=test_pipeline, + classes=class_names, + modality=input_modality, + test_mode=True, + box_type_3d='LiDAR'), + test=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'nuscenes_infos_val.pkl', + pipeline=test_pipeline, + classes=class_names, + modality=input_modality, + test_mode=True, + box_type_3d='LiDAR')) +# For nuScenes dataset, we usually evaluate the model at the end of training. +# Since the models are trained by 24 epochs by default, we set evaluation +# interval to be 24. Please change the interval accordingly if you do not +# use a default schedule. +evaluation = dict(interval=24, pipeline=eval_pipeline) diff --git a/adzoo/bevformer/configs/datasets/custom_waymo-3d.py b/adzoo/bevformer/configs/datasets/custom_waymo-3d.py new file mode 100644 index 0000000..4100e13 --- /dev/null +++ b/adzoo/bevformer/configs/datasets/custom_waymo-3d.py @@ -0,0 +1,112 @@ +# dataset settings +# D5 in the config name means the whole dataset is divided into 5 folds +# We only use one fold for efficient experiments +dataset_type = 'CustomWaymoDataset' +data_root = 'data/waymo/kitti_format/' +file_client_args = dict(backend='disk') +# Uncomment the following if use ceph or other file clients. +# See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient +# for more details. +# file_client_args = dict( +# backend='petrel', path_mapping=dict(data='s3://waymo_data/')) + +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +class_names = ['Car', 'Pedestrian', 'Cyclist'] +point_cloud_range = [-74.88, -74.88, -2, 74.88, 74.88, 4] +input_modality = dict(use_lidar=False, use_camera=True) +db_sampler = dict( + data_root=data_root, + info_path=data_root + 'waymo_dbinfos_train.pkl', + rate=1.0, + prepare=dict( + filter_by_difficulty=[-1], + filter_by_min_points=dict(Car=5, Pedestrian=10, Cyclist=10)), + classes=class_names, + sample_groups=dict(Car=15, Pedestrian=10, Cyclist=10), + points_loader=dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=[0, 1, 2, 3, 4], + file_client_args=file_client_args)) + + + +train_pipeline = [ + dict(type='LoadMultiViewImageFromFiles', to_float32=True), + dict(type='PhotoMetricDistortionMultiViewImage'), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, with_attr_label=False), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectNameFilter', classes=class_names), + dict(type='NormalizeMultiviewImage', **img_norm_cfg), + dict(type='PadMultiViewImage', size_divisor=32), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict(type='CustomCollect3D', keys=['gt_bboxes_3d', 'gt_labels_3d', 'img']) +] + + +test_pipeline = [ + dict(type='LoadMultiViewImageFromFiles', to_float32=True), + dict(type='NormalizeMultiviewImage', **img_norm_cfg), + dict(type='PadMultiViewImage', size_divisor=32), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1920, 1280), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='CustomCollect3D', keys=['img']) + ]) +] + + +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) + +data = dict( + samples_per_gpu=2, + workers_per_gpu=4, + train=dict( + type='RepeatDataset', + times=2, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'waymo_infos_train.pkl', + split='training', + pipeline=train_pipeline, + modality=input_modality, + classes=class_names, + test_mode=False, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='LiDAR', + # load one frame every five frames + load_interval=5)), + val=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'waymo_infos_val.pkl', + split='training', + pipeline=test_pipeline, + modality=input_modality, + classes=class_names, + test_mode=True, + box_type_3d='LiDAR'), + test=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'waymo_infos_val.pkl', + split='training', + pipeline=test_pipeline, + modality=input_modality, + classes=class_names, + test_mode=True, + box_type_3d='LiDAR')) + +evaluation = dict(interval=24, pipeline=test_pipeline) \ No newline at end of file diff --git a/adzoo/bevformer/create_data.py b/adzoo/bevformer/create_data.py new file mode 100755 index 0000000..f2b0cc1 --- /dev/null +++ b/adzoo/bevformer/create_data.py @@ -0,0 +1,305 @@ +# --------------------------------------------- +# Copyright (c) OpenMMLab. All rights reserved. +# --------------------------------------------- +# Modified by Zhiqi Li +# --------------------------------------------- +from data_converter.create_gt_database import create_groundtruth_database +from data_converter import nuscenes_converter as nuscenes_converter +from data_converter import lyft_converter as lyft_converter +from data_converter import kitti_converter as kitti +from data_converter import indoor_converter as indoor +import argparse +from os import path as osp +import sys +sys.path.append('.') + + +def kitti_data_prep(root_path, info_prefix, version, out_dir): + """Prepare data related to Kitti dataset. + + Related data consists of '.pkl' files recording basic infos, + 2D annotations and groundtruth database. + + Args: + root_path (str): Path of dataset root. + info_prefix (str): The prefix of info filenames. + version (str): Dataset version. + out_dir (str): Output directory of the groundtruth database info. + """ + kitti.create_kitti_info_file(root_path, info_prefix) + kitti.create_reduced_point_cloud(root_path, info_prefix) + + info_train_path = osp.join(root_path, f'{info_prefix}_infos_train.pkl') + info_val_path = osp.join(root_path, f'{info_prefix}_infos_val.pkl') + info_trainval_path = osp.join(root_path, + f'{info_prefix}_infos_trainval.pkl') + info_test_path = osp.join(root_path, f'{info_prefix}_infos_test.pkl') + kitti.export_2d_annotation(root_path, info_train_path) + kitti.export_2d_annotation(root_path, info_val_path) + kitti.export_2d_annotation(root_path, info_trainval_path) + kitti.export_2d_annotation(root_path, info_test_path) + + create_groundtruth_database( + 'KittiDataset', + root_path, + info_prefix, + f'{out_dir}/{info_prefix}_infos_train.pkl', + relative_path=False, + mask_anno_path='instances_train.json', + with_mask=(version == 'mask')) + + +def nuscenes_data_prep(root_path, + can_bus_root_path, + info_prefix, + version, + dataset_name, + out_dir, + max_sweeps=10): + """Prepare data related to nuScenes dataset. + + Related data consists of '.pkl' files recording basic infos, + 2D annotations and groundtruth database. + + Args: + root_path (str): Path of dataset root. + info_prefix (str): The prefix of info filenames. + version (str): Dataset version. + dataset_name (str): The dataset class name. + out_dir (str): Output directory of the groundtruth database info. + max_sweeps (int): Number of input consecutive frames. Default: 10 + """ + nuscenes_converter.create_nuscenes_infos( + root_path, out_dir, can_bus_root_path, info_prefix, version=version, max_sweeps=max_sweeps) + + if version == 'v1.0-test': + info_test_path = osp.join( + out_dir, f'{info_prefix}_infos_temporal_test.pkl') + nuscenes_converter.export_2d_annotation( + root_path, info_test_path, version=version) + else: + info_train_path = osp.join( + out_dir, f'{info_prefix}_infos_temporal_train.pkl') + info_val_path = osp.join( + out_dir, f'{info_prefix}_infos_temporal_val.pkl') + nuscenes_converter.export_2d_annotation( + root_path, info_train_path, version=version) + nuscenes_converter.export_2d_annotation( + root_path, info_val_path, version=version) + # create_groundtruth_database(dataset_name, root_path, info_prefix, + # f'{out_dir}/{info_prefix}_infos_train.pkl') + + +def lyft_data_prep(root_path, info_prefix, version, max_sweeps=10): + """Prepare data related to Lyft dataset. + + Related data consists of '.pkl' files recording basic infos. + Although the ground truth database and 2D annotations are not used in + Lyft, it can also be generated like nuScenes. + + Args: + root_path (str): Path of dataset root. + info_prefix (str): The prefix of info filenames. + version (str): Dataset version. + max_sweeps (int, optional): Number of input consecutive frames. + Defaults to 10. + """ + lyft_converter.create_lyft_infos( + root_path, info_prefix, version=version, max_sweeps=max_sweeps) + + +def scannet_data_prep(root_path, info_prefix, out_dir, workers): + """Prepare the info file for scannet dataset. + + Args: + root_path (str): Path of dataset root. + info_prefix (str): The prefix of info filenames. + out_dir (str): Output directory of the generated info file. + workers (int): Number of threads to be used. + """ + indoor.create_indoor_info_file( + root_path, info_prefix, out_dir, workers=workers) + + +def s3dis_data_prep(root_path, info_prefix, out_dir, workers): + """Prepare the info file for s3dis dataset. + + Args: + root_path (str): Path of dataset root. + info_prefix (str): The prefix of info filenames. + out_dir (str): Output directory of the generated info file. + workers (int): Number of threads to be used. + """ + indoor.create_indoor_info_file( + root_path, info_prefix, out_dir, workers=workers) + + +def sunrgbd_data_prep(root_path, info_prefix, out_dir, workers): + """Prepare the info file for sunrgbd dataset. + + Args: + root_path (str): Path of dataset root. + info_prefix (str): The prefix of info filenames. + out_dir (str): Output directory of the generated info file. + workers (int): Number of threads to be used. + """ + indoor.create_indoor_info_file( + root_path, info_prefix, out_dir, workers=workers) + + +def waymo_data_prep(root_path, + info_prefix, + version, + out_dir, + workers, + max_sweeps=5): + """Prepare the info file for waymo dataset. + + Args: + root_path (str): Path of dataset root. + info_prefix (str): The prefix of info filenames. + out_dir (str): Output directory of the generated info file. + workers (int): Number of threads to be used. + max_sweeps (int): Number of input consecutive frames. Default: 5 \ + Here we store pose information of these frames for later use. + """ + from tools.data_converter import waymo_converter as waymo + + splits = ['training', 'validation', 'testing'] + + for i, split in enumerate(splits): + load_dir = osp.join(root_path, 'waymo_format', split) + if split == 'validation': + save_dir = osp.join(out_dir, 'kitti_format', 'training') + else: + save_dir = osp.join(out_dir, 'kitti_format', split) + converter = waymo.Waymo2KITTI( + load_dir, + save_dir, + prefix=str(i), + workers=workers, + test_mode=(split == 'test')) + converter.convert() + # Generate waymo infos + out_dir = osp.join(out_dir, 'kitti_format') + kitti.create_waymo_info_file(out_dir, info_prefix, max_sweeps=max_sweeps) + + create_groundtruth_database( + 'WaymoDataset', + out_dir, + info_prefix, + f'{out_dir}/{info_prefix}_infos_train.pkl', + relative_path=False, + with_mask=False) + + +parser = argparse.ArgumentParser(description='Data converter arg parser') +parser.add_argument('dataset', metavar='kitti', help='name of the dataset') +parser.add_argument( + '--root-path', + type=str, + default='./data/kitti', + help='specify the root path of dataset') +parser.add_argument( + '--canbus', + type=str, + default='./data', + help='specify the root path of nuScenes canbus') +parser.add_argument( + '--version', + type=str, + default='v1.0', + required=False, + help='specify the dataset version, no need for kitti') +parser.add_argument( + '--max-sweeps', + type=int, + default=10, + required=False, + help='specify sweeps of lidar per example') +parser.add_argument( + '--out-dir', + type=str, + default='./data/kitti', + required='False', + help='name of info pkl') +parser.add_argument('--extra-tag', type=str, default='kitti') +parser.add_argument( + '--workers', type=int, default=4, help='number of threads to be used') +args = parser.parse_args() + +if __name__ == '__main__': + if args.dataset == 'kitti': + kitti_data_prep( + root_path=args.root_path, + info_prefix=args.extra_tag, + version=args.version, + out_dir=args.out_dir) + elif args.dataset == 'nuscenes' and args.version != 'v1.0-mini': + train_version = f'{args.version}-trainval' + nuscenes_data_prep( + root_path=args.root_path, + can_bus_root_path=args.canbus, + info_prefix=args.extra_tag, + version=train_version, + dataset_name='NuScenesDataset', + out_dir=args.out_dir, + max_sweeps=args.max_sweeps) + test_version = f'{args.version}-test' + nuscenes_data_prep( + root_path=args.root_path, + can_bus_root_path=args.canbus, + info_prefix=args.extra_tag, + version=test_version, + dataset_name='NuScenesDataset', + out_dir=args.out_dir, + max_sweeps=args.max_sweeps) + elif args.dataset == 'nuscenes' and args.version == 'v1.0-mini': + train_version = f'{args.version}' + nuscenes_data_prep( + root_path=args.root_path, + can_bus_root_path=args.canbus, + info_prefix=args.extra_tag, + version=train_version, + dataset_name='NuScenesDataset', + out_dir=args.out_dir, + max_sweeps=args.max_sweeps) + elif args.dataset == 'lyft': + train_version = f'{args.version}-train' + lyft_data_prep( + root_path=args.root_path, + info_prefix=args.extra_tag, + version=train_version, + max_sweeps=args.max_sweeps) + test_version = f'{args.version}-test' + lyft_data_prep( + root_path=args.root_path, + info_prefix=args.extra_tag, + version=test_version, + max_sweeps=args.max_sweeps) + elif args.dataset == 'waymo': + waymo_data_prep( + root_path=args.root_path, + info_prefix=args.extra_tag, + version=args.version, + out_dir=args.out_dir, + workers=args.workers, + max_sweeps=args.max_sweeps) + elif args.dataset == 'scannet': + scannet_data_prep( + root_path=args.root_path, + info_prefix=args.extra_tag, + out_dir=args.out_dir, + workers=args.workers) + elif args.dataset == 's3dis': + s3dis_data_prep( + root_path=args.root_path, + info_prefix=args.extra_tag, + out_dir=args.out_dir, + workers=args.workers) + elif args.dataset == 'sunrgbd': + sunrgbd_data_prep( + root_path=args.root_path, + info_prefix=args.extra_tag, + out_dir=args.out_dir, + workers=args.workers) diff --git a/adzoo/bevformer/data_converter/__init__.py b/adzoo/bevformer/data_converter/__init__.py new file mode 100755 index 0000000..ef101fe --- /dev/null +++ b/adzoo/bevformer/data_converter/__init__.py @@ -0,0 +1 @@ +# Copyright (c) OpenMMLab. All rights reserved. diff --git a/adzoo/bevformer/data_converter/create_gt_database.py b/adzoo/bevformer/data_converter/create_gt_database.py new file mode 100755 index 0000000..6be53ec --- /dev/null +++ b/adzoo/bevformer/data_converter/create_gt_database.py @@ -0,0 +1,338 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmcv +import numpy as np +import pickle +from mmcv import track_iter_progress +from mmcv.ops import roi_align +from os import path as osp +from pycocotools import mask as maskUtils +from pycocotools.coco import COCO + +from mmcv.core.bbox import box_np_ops as box_np_ops +from mmcv.datasets import build_dataset +from mmcv.core.evaluation.bbox_overlaps import bbox_overlaps + + +def _poly2mask(mask_ann, img_h, img_w): + if isinstance(mask_ann, list): + # polygon -- a single object might consist of multiple parts + # we merge all parts into one mask rle code + rles = maskUtils.frPyObjects(mask_ann, img_h, img_w) + rle = maskUtils.merge(rles) + elif isinstance(mask_ann['counts'], list): + # uncompressed RLE + rle = maskUtils.frPyObjects(mask_ann, img_h, img_w) + else: + # rle + rle = mask_ann + mask = maskUtils.decode(rle) + return mask + + +def _parse_coco_ann_info(ann_info): + gt_bboxes = [] + gt_labels = [] + gt_bboxes_ignore = [] + gt_masks_ann = [] + + for i, ann in enumerate(ann_info): + if ann.get('ignore', False): + continue + x1, y1, w, h = ann['bbox'] + if ann['area'] <= 0: + continue + bbox = [x1, y1, x1 + w, y1 + h] + if ann.get('iscrowd', False): + gt_bboxes_ignore.append(bbox) + else: + gt_bboxes.append(bbox) + gt_masks_ann.append(ann['segmentation']) + + if gt_bboxes: + gt_bboxes = np.array(gt_bboxes, dtype=np.float32) + gt_labels = np.array(gt_labels, dtype=np.int64) + else: + gt_bboxes = np.zeros((0, 4), dtype=np.float32) + gt_labels = np.array([], dtype=np.int64) + + if gt_bboxes_ignore: + gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32) + else: + gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32) + + ann = dict( + bboxes=gt_bboxes, bboxes_ignore=gt_bboxes_ignore, masks=gt_masks_ann) + + return ann + + +def crop_image_patch_v2(pos_proposals, pos_assigned_gt_inds, gt_masks): + import torch + from torch.nn.modules.utils import _pair + device = pos_proposals.device + num_pos = pos_proposals.size(0) + fake_inds = ( + torch.arange(num_pos, + device=device).to(dtype=pos_proposals.dtype)[:, None]) + rois = torch.cat([fake_inds, pos_proposals], dim=1) # Nx5 + mask_size = _pair(28) + rois = rois.to(device=device) + gt_masks_th = ( + torch.from_numpy(gt_masks).to(device).index_select( + 0, pos_assigned_gt_inds).to(dtype=rois.dtype)) + # Use RoIAlign could apparently accelerate the training (~0.1s/iter) + targets = ( + roi_align(gt_masks_th, rois, mask_size[::-1], 1.0, 0, True).squeeze(1)) + return targets + + +def crop_image_patch(pos_proposals, gt_masks, pos_assigned_gt_inds, org_img): + num_pos = pos_proposals.shape[0] + masks = [] + img_patches = [] + for i in range(num_pos): + gt_mask = gt_masks[pos_assigned_gt_inds[i]] + bbox = pos_proposals[i, :].astype(np.int32) + x1, y1, x2, y2 = bbox + w = np.maximum(x2 - x1 + 1, 1) + h = np.maximum(y2 - y1 + 1, 1) + + mask_patch = gt_mask[y1:y1 + h, x1:x1 + w] + masked_img = gt_mask[..., None] * org_img + img_patch = masked_img[y1:y1 + h, x1:x1 + w] + + img_patches.append(img_patch) + masks.append(mask_patch) + return img_patches, masks + + +def create_groundtruth_database(dataset_class_name, + data_path, + info_prefix, + info_path=None, + mask_anno_path=None, + used_classes=None, + database_save_path=None, + db_info_save_path=None, + relative_path=True, + add_rgb=False, + lidar_only=False, + bev_only=False, + coors_range=None, + with_mask=False): + """Given the raw data, generate the ground truth database. + + Args: + dataset_class_name (str): Name of the input dataset. + data_path (str): Path of the data. + info_prefix (str): Prefix of the info file. + info_path (str): Path of the info file. + Default: None. + mask_anno_path (str): Path of the mask_anno. + Default: None. + used_classes (list[str]): Classes have been used. + Default: None. + database_save_path (str): Path to save database. + Default: None. + db_info_save_path (str): Path to save db_info. + Default: None. + relative_path (bool): Whether to use relative path. + Default: True. + with_mask (bool): Whether to use mask. + Default: False. + """ + print(f'Create GT Database of {dataset_class_name}') + dataset_cfg = dict( + type=dataset_class_name, data_root=data_path, ann_file=info_path) + if dataset_class_name == 'KittiDataset': + file_client_args = dict(backend='disk') + dataset_cfg.update( + test_mode=False, + split='training', + modality=dict( + use_lidar=True, + use_depth=False, + use_lidar_intensity=True, + use_camera=with_mask, + ), + pipeline=[ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + file_client_args=file_client_args), + dict( + type='LoadAnnotations3D', + with_bbox_3d=True, + with_label_3d=True, + file_client_args=file_client_args) + ]) + + elif dataset_class_name == 'NuScenesDataset': + dataset_cfg.update( + use_valid_flag=True, + pipeline=[ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + use_dim=[0, 1, 2, 3, 4], + pad_empty_sweeps=True, + remove_close=True), + dict( + type='LoadAnnotations3D', + with_bbox_3d=True, + with_label_3d=True) + ]) + + elif dataset_class_name == 'WaymoDataset': + file_client_args = dict(backend='disk') + dataset_cfg.update( + test_mode=False, + split='training', + modality=dict( + use_lidar=True, + use_depth=False, + use_lidar_intensity=True, + use_camera=False, + ), + pipeline=[ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=6, + use_dim=5, + file_client_args=file_client_args), + dict( + type='LoadAnnotations3D', + with_bbox_3d=True, + with_label_3d=True, + file_client_args=file_client_args) + ]) + + dataset = build_dataset(dataset_cfg) + + if database_save_path is None: + database_save_path = osp.join(data_path, f'{info_prefix}_gt_database') + if db_info_save_path is None: + db_info_save_path = osp.join(data_path, + f'{info_prefix}_dbinfos_train.pkl') + mmcv.mkdir_or_exist(database_save_path) + all_db_infos = dict() + if with_mask: + coco = COCO(osp.join(data_path, mask_anno_path)) + imgIds = coco.getImgIds() + file2id = dict() + for i in imgIds: + info = coco.loadImgs([i])[0] + file2id.update({info['file_name']: i}) + + group_counter = 0 + for j in track_iter_progress(list(range(len(dataset)))): + input_dict = dataset.get_data_info(j) + dataset.pre_pipeline(input_dict) + example = dataset.pipeline(input_dict) + annos = example['ann_info'] + image_idx = example['sample_idx'] + points = example['points'].tensor.numpy() + gt_boxes_3d = annos['gt_bboxes_3d'].tensor.numpy() + names = annos['gt_names'] + group_dict = dict() + if 'group_ids' in annos: + group_ids = annos['group_ids'] + else: + group_ids = np.arange(gt_boxes_3d.shape[0], dtype=np.int64) + difficulty = np.zeros(gt_boxes_3d.shape[0], dtype=np.int32) + if 'difficulty' in annos: + difficulty = annos['difficulty'] + + num_obj = gt_boxes_3d.shape[0] + point_indices = box_np_ops.points_in_rbbox(points, gt_boxes_3d) + + if with_mask: + # prepare masks + gt_boxes = annos['gt_bboxes'] + img_path = osp.split(example['img_info']['filename'])[-1] + if img_path not in file2id.keys(): + print(f'skip image {img_path} for empty mask') + continue + img_id = file2id[img_path] + kins_annIds = coco.getAnnIds(imgIds=img_id) + kins_raw_info = coco.loadAnns(kins_annIds) + kins_ann_info = _parse_coco_ann_info(kins_raw_info) + h, w = annos['img_shape'][:2] + gt_masks = [ + _poly2mask(mask, h, w) for mask in kins_ann_info['masks'] + ] + # get mask inds based on iou mapping + bbox_iou = bbox_overlaps(kins_ann_info['bboxes'], gt_boxes) + mask_inds = bbox_iou.argmax(axis=0) + valid_inds = (bbox_iou.max(axis=0) > 0.5) + + # mask the image + # use more precise crop when it is ready + # object_img_patches = np.ascontiguousarray( + # np.stack(object_img_patches, axis=0).transpose(0, 3, 1, 2)) + # crop image patches using roi_align + # object_img_patches = crop_image_patch_v2( + # torch.Tensor(gt_boxes), + # torch.Tensor(mask_inds).long(), object_img_patches) + object_img_patches, object_masks = crop_image_patch( + gt_boxes, gt_masks, mask_inds, annos['img']) + + for i in range(num_obj): + filename = f'{image_idx}_{names[i]}_{i}.bin' + abs_filepath = osp.join(database_save_path, filename) + rel_filepath = osp.join(f'{info_prefix}_gt_database', filename) + + # save point clouds and image patches for each object + gt_points = points[point_indices[:, i]] + gt_points[:, :3] -= gt_boxes_3d[i, :3] + + if with_mask: + if object_masks[i].sum() == 0 or not valid_inds[i]: + # Skip object for empty or invalid mask + continue + img_patch_path = abs_filepath + '.png' + mask_patch_path = abs_filepath + '.mask.png' + mmcv.imwrite(object_img_patches[i], img_patch_path) + mmcv.imwrite(object_masks[i], mask_patch_path) + + with open(abs_filepath, 'w') as f: + gt_points.tofile(f) + + if (used_classes is None) or names[i] in used_classes: + db_info = { + 'name': names[i], + 'path': rel_filepath, + 'image_idx': image_idx, + 'gt_idx': i, + 'box3d_lidar': gt_boxes_3d[i], + 'num_points_in_gt': gt_points.shape[0], + 'difficulty': difficulty[i], + } + local_group_id = group_ids[i] + # if local_group_id >= 0: + if local_group_id not in group_dict: + group_dict[local_group_id] = group_counter + group_counter += 1 + db_info['group_id'] = group_dict[local_group_id] + if 'score' in annos: + db_info['score'] = annos['score'][i] + if with_mask: + db_info.update({'box2d_camera': gt_boxes[i]}) + if names[i] in all_db_infos: + all_db_infos[names[i]].append(db_info) + else: + all_db_infos[names[i]] = [db_info] + + for k, v in all_db_infos.items(): + print(f'load {len(v)} {k} database infos') + + with open(db_info_save_path, 'wb') as f: + pickle.dump(all_db_infos, f) diff --git a/adzoo/bevformer/data_converter/indoor_converter.py b/adzoo/bevformer/data_converter/indoor_converter.py new file mode 100755 index 0000000..0aa5820 --- /dev/null +++ b/adzoo/bevformer/data_converter/indoor_converter.py @@ -0,0 +1,108 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmcv +import numpy as np +import os + +from .s3dis_data_utils import S3DISData, S3DISSegData +from .scannet_data_utils import ScanNetData, ScanNetSegData +from .sunrgbd_data_utils import SUNRGBDData + + +def create_indoor_info_file(data_path, + pkl_prefix='sunrgbd', + save_path=None, + use_v1=False, + workers=4): + """Create indoor information file. + + Get information of the raw data and save it to the pkl file. + + Args: + data_path (str): Path of the data. + pkl_prefix (str): Prefix of the pkl to be saved. Default: 'sunrgbd'. + save_path (str): Path of the pkl to be saved. Default: None. + use_v1 (bool): Whether to use v1. Default: False. + workers (int): Number of threads to be used. Default: 4. + """ + assert os.path.exists(data_path) + assert pkl_prefix in ['sunrgbd', 'scannet', 's3dis'], \ + f'unsupported indoor dataset {pkl_prefix}' + save_path = data_path if save_path is None else save_path + assert os.path.exists(save_path) + + # generate infos for both detection and segmentation task + if pkl_prefix in ['sunrgbd', 'scannet']: + train_filename = os.path.join(save_path, + f'{pkl_prefix}_infos_train.pkl') + val_filename = os.path.join(save_path, f'{pkl_prefix}_infos_val.pkl') + if pkl_prefix == 'sunrgbd': + # SUN RGB-D has a train-val split + train_dataset = SUNRGBDData( + root_path=data_path, split='train', use_v1=use_v1) + val_dataset = SUNRGBDData( + root_path=data_path, split='val', use_v1=use_v1) + else: + # ScanNet has a train-val-test split + train_dataset = ScanNetData(root_path=data_path, split='train') + val_dataset = ScanNetData(root_path=data_path, split='val') + test_dataset = ScanNetData(root_path=data_path, split='test') + test_filename = os.path.join(save_path, + f'{pkl_prefix}_infos_test.pkl') + + infos_train = train_dataset.get_infos( + num_workers=workers, has_label=True) + mmcv.dump(infos_train, train_filename, 'pkl') + print(f'{pkl_prefix} info train file is saved to {train_filename}') + + infos_val = val_dataset.get_infos(num_workers=workers, has_label=True) + mmcv.dump(infos_val, val_filename, 'pkl') + print(f'{pkl_prefix} info val file is saved to {val_filename}') + + if pkl_prefix == 'scannet': + infos_test = test_dataset.get_infos( + num_workers=workers, has_label=False) + mmcv.dump(infos_test, test_filename, 'pkl') + print(f'{pkl_prefix} info test file is saved to {test_filename}') + + # generate infos for the semantic segmentation task + # e.g. re-sampled scene indexes and label weights + # scene indexes are used to re-sample rooms with different number of points + # label weights are used to balance classes with different number of points + if pkl_prefix == 'scannet': + # label weight computation function is adopted from + # https://github.com/charlesq34/pointnet2/blob/master/scannet/scannet_dataset.py#L24 + train_dataset = ScanNetSegData( + data_root=data_path, + ann_file=train_filename, + split='train', + num_points=8192, + label_weight_func=lambda x: 1.0 / np.log(1.2 + x)) + # TODO: do we need to generate on val set? + val_dataset = ScanNetSegData( + data_root=data_path, + ann_file=val_filename, + split='val', + num_points=8192, + label_weight_func=lambda x: 1.0 / np.log(1.2 + x)) + # no need to generate for test set + train_dataset.get_seg_infos() + val_dataset.get_seg_infos() + elif pkl_prefix == 's3dis': + # S3DIS doesn't have a fixed train-val split + # it has 6 areas instead, so we generate info file for each of them + # in training, we will use dataset to wrap different areas + splits = [f'Area_{i}' for i in [1, 2, 3, 4, 5, 6]] + for split in splits: + dataset = S3DISData(root_path=data_path, split=split) + info = dataset.get_infos(num_workers=workers, has_label=True) + filename = os.path.join(save_path, + f'{pkl_prefix}_infos_{split}.pkl') + mmcv.dump(info, filename, 'pkl') + print(f'{pkl_prefix} info {split} file is saved to {filename}') + seg_dataset = S3DISSegData( + data_root=data_path, + ann_file=filename, + split=split, + num_points=4096, + label_weight_func=lambda x: 1.0 / np.log(1.2 + x)) + seg_dataset.get_seg_infos() diff --git a/adzoo/bevformer/data_converter/kitti_converter.py b/adzoo/bevformer/data_converter/kitti_converter.py new file mode 100755 index 0000000..6ac2cef --- /dev/null +++ b/adzoo/bevformer/data_converter/kitti_converter.py @@ -0,0 +1,546 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmcv +import numpy as np +from collections import OrderedDict +from nuscenes.utils.geometry_utils import view_points +from pathlib import Path + +from mmcv.core.bbox import box_np_ops +from .kitti_data_utils import get_kitti_image_info, get_waymo_image_info +from .nuscenes_converter import post_process_coords + +kitti_categories = ('Pedestrian', 'Cyclist', 'Car') + + +def convert_to_kitti_info_version2(info): + """convert kitti info v1 to v2 if possible. + + Args: + info (dict): Info of the input kitti data. + - image (dict): image info + - calib (dict): calibration info + - point_cloud (dict): point cloud info + """ + if 'image' not in info or 'calib' not in info or 'point_cloud' not in info: + info['image'] = { + 'image_shape': info['img_shape'], + 'image_idx': info['image_idx'], + 'image_path': info['img_path'], + } + info['calib'] = { + 'R0_rect': info['calib/R0_rect'], + 'Tr_velo_to_cam': info['calib/Tr_velo_to_cam'], + 'P2': info['calib/P2'], + } + info['point_cloud'] = { + 'velodyne_path': info['velodyne_path'], + } + + +def _read_imageset_file(path): + with open(path, 'r') as f: + lines = f.readlines() + return [int(line) for line in lines] + + +def _calculate_num_points_in_gt(data_path, + infos, + relative_path, + remove_outside=True, + num_features=4): + for info in mmcv.track_iter_progress(infos): + pc_info = info['point_cloud'] + image_info = info['image'] + calib = info['calib'] + if relative_path: + v_path = str(Path(data_path) / pc_info['velodyne_path']) + else: + v_path = pc_info['velodyne_path'] + points_v = np.fromfile( + v_path, dtype=np.float32, count=-1).reshape([-1, num_features]) + rect = calib['R0_rect'] + Trv2c = calib['Tr_velo_to_cam'] + P2 = calib['P2'] + if remove_outside: + points_v = box_np_ops.remove_outside_points( + points_v, rect, Trv2c, P2, image_info['image_shape']) + + # points_v = points_v[points_v[:, 0] > 0] + annos = info['annos'] + num_obj = len([n for n in annos['name'] if n != 'DontCare']) + # annos = kitti.filter_kitti_anno(annos, ['DontCare']) + dims = annos['dimensions'][:num_obj] + loc = annos['location'][:num_obj] + rots = annos['rotation_y'][:num_obj] + gt_boxes_camera = np.concatenate([loc, dims, rots[..., np.newaxis]], + axis=1) + gt_boxes_lidar = box_np_ops.box_camera_to_lidar( + gt_boxes_camera, rect, Trv2c) + indices = box_np_ops.points_in_rbbox(points_v[:, :3], gt_boxes_lidar) + num_points_in_gt = indices.sum(0) + num_ignored = len(annos['dimensions']) - num_obj + num_points_in_gt = np.concatenate( + [num_points_in_gt, -np.ones([num_ignored])]) + annos['num_points_in_gt'] = num_points_in_gt.astype(np.int32) + + +def create_kitti_info_file(data_path, + pkl_prefix='kitti', + save_path=None, + relative_path=True): + """Create info file of KITTI dataset. + + Given the raw data, generate its related info file in pkl format. + + Args: + data_path (str): Path of the data root. + pkl_prefix (str): Prefix of the info file to be generated. + save_path (str): Path to save the info file. + relative_path (bool): Whether to use relative path. + """ + imageset_folder = Path(data_path) / 'ImageSets' + train_img_ids = _read_imageset_file(str(imageset_folder / 'train.txt')) + + val_img_ids = _read_imageset_file(str(imageset_folder / 'val.txt')) + test_img_ids = _read_imageset_file(str(imageset_folder / 'test.txt')) + + print('Generate info. this may take several minutes.') + if save_path is None: + save_path = Path(data_path) + else: + save_path = Path(save_path) + kitti_infos_train = get_kitti_image_info( + data_path, + training=True, + velodyne=True, + calib=True, + image_ids=train_img_ids, + relative_path=relative_path) + _calculate_num_points_in_gt(data_path, kitti_infos_train, relative_path) + filename = save_path / f'{pkl_prefix}_infos_train.pkl' + print(f'Kitti info train file is saved to {filename}') + mmcv.dump(kitti_infos_train, filename) + kitti_infos_val = get_kitti_image_info( + data_path, + training=True, + velodyne=True, + calib=True, + image_ids=val_img_ids, + relative_path=relative_path) + _calculate_num_points_in_gt(data_path, kitti_infos_val, relative_path) + filename = save_path / f'{pkl_prefix}_infos_val.pkl' + print(f'Kitti info val file is saved to {filename}') + mmcv.dump(kitti_infos_val, filename) + filename = save_path / f'{pkl_prefix}_infos_trainval.pkl' + print(f'Kitti info trainval file is saved to {filename}') + mmcv.dump(kitti_infos_train + kitti_infos_val, filename) + + kitti_infos_test = get_kitti_image_info( + data_path, + training=False, + label_info=False, + velodyne=True, + calib=True, + image_ids=test_img_ids, + relative_path=relative_path) + filename = save_path / f'{pkl_prefix}_infos_test.pkl' + print(f'Kitti info test file is saved to {filename}') + mmcv.dump(kitti_infos_test, filename) + + +def create_waymo_info_file(data_path, + pkl_prefix='waymo', + save_path=None, + relative_path=True, + max_sweeps=5): + """Create info file of waymo dataset. + + Given the raw data, generate its related info file in pkl format. + + Args: + data_path (str): Path of the data root. + pkl_prefix (str): Prefix of the info file to be generated. + save_path (str | None): Path to save the info file. + relative_path (bool): Whether to use relative path. + max_sweeps (int): Max sweeps before the detection frame to be used. + """ + imageset_folder = Path(data_path) / 'ImageSets' + train_img_ids = _read_imageset_file(str(imageset_folder / 'train.txt')) + # val_img_ids = _read_imageset_file(str(imageset_folder / 'val.txt')) + # test_img_ids = _read_imageset_file(str(imageset_folder / 'test.txt')) + train_img_ids = [each for each in train_img_ids if each % 5 == 0] + print('Generate info. this may take several minutes.') + if save_path is None: + save_path = Path(data_path) + else: + save_path = Path(save_path) + waymo_infos_train = get_waymo_image_info( + data_path, + training=True, + velodyne=True, + calib=True, + pose=True, + image_ids=train_img_ids, + relative_path=relative_path, + max_sweeps=max_sweeps) + _calculate_num_points_in_gt( + data_path, + waymo_infos_train, + relative_path, + num_features=6, + remove_outside=False) + filename = save_path / f'{pkl_prefix}_infos_train.pkl' + print(f'Waymo info train file is saved to {filename}') + mmcv.dump(waymo_infos_train, filename) + # + # waymo_infos_val = get_waymo_image_info( + # data_path, + # training=True, + # velodyne=True, + # calib=True, + # pose=True, + # image_ids=val_img_ids, + # relative_path=relative_path, + # max_sweeps=max_sweeps) + # _calculate_num_points_in_gt( + # data_path, + # waymo_infos_val, + # relative_path, + # num_features=6, + # remove_outside=False) + # filename = save_path / f'{pkl_prefix}_infos_val.pkl' + # print(f'Waymo info val file is saved to {filename}') + # mmcv.dump(waymo_infos_val, filename) + # filename = save_path / f'{pkl_prefix}_infos_trainval.pkl' + # print(f'Waymo info trainval file is saved to {filename}') + # mmcv.dump(waymo_infos_train + waymo_infos_val, filename) + # waymo_infos_test = get_waymo_image_info( + # data_path, + # training=False, + # label_info=False, + # velodyne=True, + # calib=True, + # pose=True, + # image_ids=test_img_ids, + # relative_path=relative_path, + # max_sweeps=max_sweeps) + # filename = save_path / f'{pkl_prefix}_infos_test.pkl' + # print(f'Waymo info test file is saved to {filename}') + # mmcv.dump(waymo_infos_test, filename) + + +def _create_reduced_point_cloud(data_path, + info_path, + save_path=None, + back=False, + num_features=4, + front_camera_id=2): + """Create reduced point clouds for given info. + + Args: + data_path (str): Path of original data. + info_path (str): Path of data info. + save_path (str | None): Path to save reduced point cloud data. + Default: None. + back (bool): Whether to flip the points to back. + num_features (int): Number of point features. Default: 4. + front_camera_id (int): The referenced/front camera ID. Default: 2. + """ + kitti_infos = mmcv.load(info_path) + + for info in mmcv.track_iter_progress(kitti_infos): + pc_info = info['point_cloud'] + image_info = info['image'] + calib = info['calib'] + + v_path = pc_info['velodyne_path'] + v_path = Path(data_path) / v_path + points_v = np.fromfile( + str(v_path), dtype=np.float32, + count=-1).reshape([-1, num_features]) + rect = calib['R0_rect'] + if front_camera_id == 2: + P2 = calib['P2'] + else: + P2 = calib[f'P{str(front_camera_id)}'] + Trv2c = calib['Tr_velo_to_cam'] + # first remove z < 0 points + # keep = points_v[:, -1] > 0 + # points_v = points_v[keep] + # then remove outside. + if back: + points_v[:, 0] = -points_v[:, 0] + points_v = box_np_ops.remove_outside_points(points_v, rect, Trv2c, P2, + image_info['image_shape']) + if save_path is None: + save_dir = v_path.parent.parent / (v_path.parent.stem + '_reduced') + if not save_dir.exists(): + save_dir.mkdir() + save_filename = save_dir / v_path.name + # save_filename = str(v_path) + '_reduced' + if back: + save_filename += '_back' + else: + save_filename = str(Path(save_path) / v_path.name) + if back: + save_filename += '_back' + with open(save_filename, 'w') as f: + points_v.tofile(f) + + +def create_reduced_point_cloud(data_path, + pkl_prefix, + train_info_path=None, + val_info_path=None, + test_info_path=None, + save_path=None, + with_back=False): + """Create reduced point clouds for training/validation/testing. + + Args: + data_path (str): Path of original data. + pkl_prefix (str): Prefix of info files. + train_info_path (str | None): Path of training set info. + Default: None. + val_info_path (str | None): Path of validation set info. + Default: None. + test_info_path (str | None): Path of test set info. + Default: None. + save_path (str | None): Path to save reduced point cloud data. + with_back (bool): Whether to flip the points to back. + """ + if train_info_path is None: + train_info_path = Path(data_path) / f'{pkl_prefix}_infos_train.pkl' + if val_info_path is None: + val_info_path = Path(data_path) / f'{pkl_prefix}_infos_val.pkl' + if test_info_path is None: + test_info_path = Path(data_path) / f'{pkl_prefix}_infos_test.pkl' + + print('create reduced point cloud for training set') + _create_reduced_point_cloud(data_path, train_info_path, save_path) + print('create reduced point cloud for validation set') + _create_reduced_point_cloud(data_path, val_info_path, save_path) + print('create reduced point cloud for testing set') + _create_reduced_point_cloud(data_path, test_info_path, save_path) + if with_back: + _create_reduced_point_cloud( + data_path, train_info_path, save_path, back=True) + _create_reduced_point_cloud( + data_path, val_info_path, save_path, back=True) + _create_reduced_point_cloud( + data_path, test_info_path, save_path, back=True) + + +def export_2d_annotation(root_path, info_path, mono3d=True): + """Export 2d annotation from the info file and raw data. + + Args: + root_path (str): Root path of the raw data. + info_path (str): Path of the info file. + mono3d (bool): Whether to export mono3d annotation. Default: True. + """ + # get bbox annotations for camera + kitti_infos = mmcv.load(info_path) + cat2Ids = [ + dict(id=kitti_categories.index(cat_name), name=cat_name) + for cat_name in kitti_categories + ] + coco_ann_id = 0 + coco_2d_dict = dict(annotations=[], images=[], categories=cat2Ids) + from os import path as osp + for info in mmcv.track_iter_progress(kitti_infos): + coco_infos = get_2d_boxes(info, occluded=[0, 1, 2, 3], mono3d=mono3d) + (height, width, + _) = mmcv.imread(osp.join(root_path, + info['image']['image_path'])).shape + coco_2d_dict['images'].append( + dict( + file_name=info['image']['image_path'], + id=info['image']['image_idx'], + Tri2v=info['calib']['Tr_imu_to_velo'], + Trv2c=info['calib']['Tr_velo_to_cam'], + rect=info['calib']['R0_rect'], + cam_intrinsic=info['calib']['P2'], + width=width, + height=height)) + for coco_info in coco_infos: + if coco_info is None: + continue + # add an empty key for coco format + coco_info['segmentation'] = [] + coco_info['id'] = coco_ann_id + coco_2d_dict['annotations'].append(coco_info) + coco_ann_id += 1 + if mono3d: + json_prefix = f'{info_path[:-4]}_mono3d' + else: + json_prefix = f'{info_path[:-4]}' + mmcv.dump(coco_2d_dict, f'{json_prefix}.coco.json') + + +def get_2d_boxes(info, occluded, mono3d=True): + """Get the 2D annotation records for a given info. + + Args: + info: Information of the given sample data. + occluded: Integer (0, 1, 2, 3) indicating occlusion state: \ + 0 = fully visible, 1 = partly occluded, 2 = largely occluded, \ + 3 = unknown, -1 = DontCare + mono3d (bool): Whether to get boxes with mono3d annotation. + + Return: + list[dict]: List of 2D annotation record that belongs to the input + `sample_data_token`. + """ + # Get calibration information + P2 = info['calib']['P2'] + + repro_recs = [] + # if no annotations in info (test dataset), then return + if 'annos' not in info: + return repro_recs + + # Get all the annotation with the specified visibilties. + ann_dicts = info['annos'] + mask = [(ocld in occluded) for ocld in ann_dicts['occluded']] + for k in ann_dicts.keys(): + ann_dicts[k] = ann_dicts[k][mask] + + # convert dict of list to list of dict + ann_recs = [] + for i in range(len(ann_dicts['occluded'])): + ann_rec = {} + for k in ann_dicts.keys(): + ann_rec[k] = ann_dicts[k][i] + ann_recs.append(ann_rec) + + for ann_idx, ann_rec in enumerate(ann_recs): + # Augment sample_annotation with token information. + ann_rec['sample_annotation_token'] = \ + f"{info['image']['image_idx']}.{ann_idx}" + ann_rec['sample_data_token'] = info['image']['image_idx'] + sample_data_token = info['image']['image_idx'] + + loc = ann_rec['location'][np.newaxis, :] + dim = ann_rec['dimensions'][np.newaxis, :] + rot = ann_rec['rotation_y'][np.newaxis, np.newaxis] + # transform the center from [0.5, 1.0, 0.5] to [0.5, 0.5, 0.5] + dst = np.array([0.5, 0.5, 0.5]) + src = np.array([0.5, 1.0, 0.5]) + loc = loc + dim * (dst - src) + offset = (info['calib']['P2'][0, 3] - info['calib']['P0'][0, 3]) \ + / info['calib']['P2'][0, 0] + loc_3d = np.copy(loc) + loc_3d[0, 0] += offset + gt_bbox_3d = np.concatenate([loc, dim, rot], axis=1).astype(np.float32) + + # Filter out the corners that are not in front of the calibrated + # sensor. + corners_3d = box_np_ops.center_to_corner_box3d( + gt_bbox_3d[:, :3], + gt_bbox_3d[:, 3:6], + gt_bbox_3d[:, 6], [0.5, 0.5, 0.5], + axis=1) + corners_3d = corners_3d[0].T # (1, 8, 3) -> (3, 8) + in_front = np.argwhere(corners_3d[2, :] > 0).flatten() + corners_3d = corners_3d[:, in_front] + + # Project 3d box to 2d. + camera_intrinsic = P2 + corner_coords = view_points(corners_3d, camera_intrinsic, + True).T[:, :2].tolist() + + # Keep only corners that fall within the image. + final_coords = post_process_coords(corner_coords) + + # Skip if the convex hull of the re-projected corners + # does not intersect the image canvas. + if final_coords is None: + continue + else: + min_x, min_y, max_x, max_y = final_coords + + # Generate dictionary record to be included in the .json file. + repro_rec = generate_record(ann_rec, min_x, min_y, max_x, max_y, + sample_data_token, + info['image']['image_path']) + + # If mono3d=True, add 3D annotations in camera coordinates + if mono3d and (repro_rec is not None): + repro_rec['bbox_cam3d'] = np.concatenate( + [loc_3d, dim, rot], + axis=1).astype(np.float32).squeeze().tolist() + repro_rec['velo_cam3d'] = -1 # no velocity in KITTI + + center3d = np.array(loc).reshape([1, 3]) + center2d = box_np_ops.points_cam2img( + center3d, camera_intrinsic, with_depth=True) + repro_rec['center2d'] = center2d.squeeze().tolist() + # normalized center2D + depth + # samples with depth < 0 will be removed + if repro_rec['center2d'][2] <= 0: + continue + + repro_rec['attribute_name'] = -1 # no attribute in KITTI + repro_rec['attribute_id'] = -1 + + repro_recs.append(repro_rec) + + return repro_recs + + +def generate_record(ann_rec, x1, y1, x2, y2, sample_data_token, filename): + """Generate one 2D annotation record given various informations on top of + the 2D bounding box coordinates. + + Args: + ann_rec (dict): Original 3d annotation record. + x1 (float): Minimum value of the x coordinate. + y1 (float): Minimum value of the y coordinate. + x2 (float): Maximum value of the x coordinate. + y2 (float): Maximum value of the y coordinate. + sample_data_token (str): Sample data token. + filename (str):The corresponding image file where the annotation + is present. + + Returns: + dict: A sample 2D annotation record. + - file_name (str): flie name + - image_id (str): sample data token + - area (float): 2d box area + - category_name (str): category name + - category_id (int): category id + - bbox (list[float]): left x, top y, dx, dy of 2d box + - iscrowd (int): whether the area is crowd + """ + repro_rec = OrderedDict() + repro_rec['sample_data_token'] = sample_data_token + coco_rec = dict() + + key_mapping = { + 'name': 'category_name', + 'num_points_in_gt': 'num_lidar_pts', + 'sample_annotation_token': 'sample_annotation_token', + 'sample_data_token': 'sample_data_token', + } + + for key, value in ann_rec.items(): + if key in key_mapping.keys(): + repro_rec[key_mapping[key]] = value + + repro_rec['bbox_corners'] = [x1, y1, x2, y2] + repro_rec['filename'] = filename + + coco_rec['file_name'] = filename + coco_rec['image_id'] = sample_data_token + coco_rec['area'] = (y2 - y1) * (x2 - x1) + + if repro_rec['category_name'] not in kitti_categories: + return None + cat_name = repro_rec['category_name'] + coco_rec['category_name'] = cat_name + coco_rec['category_id'] = kitti_categories.index(cat_name) + coco_rec['bbox'] = [x1, y1, x2 - x1, y2 - y1] + coco_rec['iscrowd'] = 0 + + return coco_rec diff --git a/adzoo/bevformer/data_converter/kitti_data_utils.py b/adzoo/bevformer/data_converter/kitti_data_utils.py new file mode 100755 index 0000000..01538e0 --- /dev/null +++ b/adzoo/bevformer/data_converter/kitti_data_utils.py @@ -0,0 +1,554 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +from collections import OrderedDict +from concurrent import futures as futures +from os import path as osp +from pathlib import Path +from skimage import io + + +def get_image_index_str(img_idx, use_prefix_id=False): + if use_prefix_id: + return '{:07d}'.format(img_idx) + else: + return '{:06d}'.format(img_idx) + + +def get_kitti_info_path(idx, + prefix, + info_type='image_2', + file_tail='.png', + training=True, + relative_path=True, + exist_check=True, + use_prefix_id=False): + img_idx_str = get_image_index_str(idx, use_prefix_id) + img_idx_str += file_tail + prefix = Path(prefix) + if training: + file_path = Path('training') / info_type / img_idx_str + else: + file_path = Path('testing') / info_type / img_idx_str + if exist_check and not (prefix / file_path).exists(): + raise ValueError('file not exist: {}'.format(file_path)) + if relative_path: + return str(file_path) + else: + return str(prefix / file_path) + + +def get_image_path(idx, + prefix, + training=True, + relative_path=True, + exist_check=True, + info_type='image_2', + use_prefix_id=False): + return get_kitti_info_path(idx, prefix, info_type, '.png', training, + relative_path, exist_check, use_prefix_id) + + +def get_label_path(idx, + prefix, + training=True, + relative_path=True, + exist_check=True, + info_type='label_2', + use_prefix_id=False): + return get_kitti_info_path(idx, prefix, info_type, '.txt', training, + relative_path, exist_check, use_prefix_id) + + +def get_velodyne_path(idx, + prefix, + training=True, + relative_path=True, + exist_check=True, + use_prefix_id=False): + return get_kitti_info_path(idx, prefix, 'velodyne', '.bin', training, + relative_path, exist_check, use_prefix_id) + + +def get_calib_path(idx, + prefix, + training=True, + relative_path=True, + exist_check=True, + use_prefix_id=False): + return get_kitti_info_path(idx, prefix, 'calib', '.txt', training, + relative_path, exist_check, use_prefix_id) + + +def get_pose_path(idx, + prefix, + training=True, + relative_path=True, + exist_check=True, + use_prefix_id=False): + return get_kitti_info_path(idx, prefix, 'pose', '.txt', training, + relative_path, exist_check, use_prefix_id) + + +def get_label_anno(label_path): + annotations = {} + annotations.update({ + 'name': [], + 'truncated': [], + 'occluded': [], + 'alpha': [], + 'bbox': [], + 'dimensions': [], + 'location': [], + 'rotation_y': [] + }) + with open(label_path, 'r') as f: + lines = f.readlines() + # if len(lines) == 0 or len(lines[0]) < 15: + # content = [] + # else: + content = [line.strip().split(' ') for line in lines] + num_objects = len([x[0] for x in content if x[0] != 'DontCare']) + annotations['name'] = np.array([x[0] for x in content]) + num_gt = len(annotations['name']) + annotations['truncated'] = np.array([float(x[1]) for x in content]) + annotations['occluded'] = np.array([int(x[2]) for x in content]) + annotations['alpha'] = np.array([float(x[3]) for x in content]) + annotations['bbox'] = np.array([[float(info) for info in x[4:8]] + for x in content]).reshape(-1, 4) + # dimensions will convert hwl format to standard lhw(camera) format. + annotations['dimensions'] = np.array([[float(info) for info in x[8:11]] + for x in content + ]).reshape(-1, 3)[:, [2, 0, 1]] + annotations['location'] = np.array([[float(info) for info in x[11:14]] + for x in content]).reshape(-1, 3) + annotations['rotation_y'] = np.array([float(x[14]) + for x in content]).reshape(-1) + if len(content) != 0 and len(content[0]) == 16: # have score + annotations['score'] = np.array([float(x[15]) for x in content]) + else: + annotations['score'] = np.zeros((annotations['bbox'].shape[0], )) + index = list(range(num_objects)) + [-1] * (num_gt - num_objects) + annotations['index'] = np.array(index, dtype=np.int32) + annotations['group_ids'] = np.arange(num_gt, dtype=np.int32) + return annotations + + +def _extend_matrix(mat): + mat = np.concatenate([mat, np.array([[0., 0., 0., 1.]])], axis=0) + return mat + + +def get_kitti_image_info(path, + training=True, + label_info=True, + velodyne=False, + calib=False, + image_ids=7481, + extend_matrix=True, + num_worker=8, + relative_path=True, + with_imageshape=True): + """ + KITTI annotation format version 2: + { + [optional]points: [N, 3+] point cloud + [optional, for kitti]image: { + image_idx: ... + image_path: ... + image_shape: ... + } + point_cloud: { + num_features: 4 + velodyne_path: ... + } + [optional, for kitti]calib: { + R0_rect: ... + Tr_velo_to_cam: ... + P2: ... + } + annos: { + location: [num_gt, 3] array + dimensions: [num_gt, 3] array + rotation_y: [num_gt] angle array + name: [num_gt] ground truth name array + [optional]difficulty: kitti difficulty + [optional]group_ids: used for multi-part object + } + } + """ + root_path = Path(path) + if not isinstance(image_ids, list): + image_ids = list(range(image_ids)) + + def map_func(idx): + info = {} + pc_info = {'num_features': 4} + calib_info = {} + + image_info = {'image_idx': idx} + annotations = None + if velodyne: + pc_info['velodyne_path'] = get_velodyne_path( + idx, path, training, relative_path) + image_info['image_path'] = get_image_path(idx, path, training, + relative_path) + if with_imageshape: + img_path = image_info['image_path'] + if relative_path: + img_path = str(root_path / img_path) + image_info['image_shape'] = np.array( + io.imread(img_path).shape[:2], dtype=np.int32) + if label_info: + label_path = get_label_path(idx, path, training, relative_path) + if relative_path: + label_path = str(root_path / label_path) + annotations = get_label_anno(label_path) + info['image'] = image_info + info['point_cloud'] = pc_info + if calib: + calib_path = get_calib_path( + idx, path, training, relative_path=False) + with open(calib_path, 'r') as f: + lines = f.readlines() + P0 = np.array([float(info) for info in lines[0].split(' ')[1:13] + ]).reshape([3, 4]) + P1 = np.array([float(info) for info in lines[1].split(' ')[1:13] + ]).reshape([3, 4]) + P2 = np.array([float(info) for info in lines[2].split(' ')[1:13] + ]).reshape([3, 4]) + P3 = np.array([float(info) for info in lines[3].split(' ')[1:13] + ]).reshape([3, 4]) + if extend_matrix: + P0 = _extend_matrix(P0) + P1 = _extend_matrix(P1) + P2 = _extend_matrix(P2) + P3 = _extend_matrix(P3) + R0_rect = np.array([ + float(info) for info in lines[4].split(' ')[1:10] + ]).reshape([3, 3]) + if extend_matrix: + rect_4x4 = np.zeros([4, 4], dtype=R0_rect.dtype) + rect_4x4[3, 3] = 1. + rect_4x4[:3, :3] = R0_rect + else: + rect_4x4 = R0_rect + + Tr_velo_to_cam = np.array([ + float(info) for info in lines[5].split(' ')[1:13] + ]).reshape([3, 4]) + Tr_imu_to_velo = np.array([ + float(info) for info in lines[6].split(' ')[1:13] + ]).reshape([3, 4]) + if extend_matrix: + Tr_velo_to_cam = _extend_matrix(Tr_velo_to_cam) + Tr_imu_to_velo = _extend_matrix(Tr_imu_to_velo) + calib_info['P0'] = P0 + calib_info['P1'] = P1 + calib_info['P2'] = P2 + calib_info['P3'] = P3 + calib_info['R0_rect'] = rect_4x4 + calib_info['Tr_velo_to_cam'] = Tr_velo_to_cam + calib_info['Tr_imu_to_velo'] = Tr_imu_to_velo + info['calib'] = calib_info + + if annotations is not None: + info['annos'] = annotations + add_difficulty_to_annos(info) + return info + + with futures.ThreadPoolExecutor(num_worker) as executor: + image_infos = executor.map(map_func, image_ids) + + return list(image_infos) + + +def get_waymo_image_info(path, + training=True, + label_info=True, + velodyne=False, + calib=False, + pose=False, + image_ids=7481, + extend_matrix=True, + num_worker=8, + relative_path=True, + with_imageshape=True, + max_sweeps=5): + """ + Waymo annotation format version like KITTI: + { + [optional]points: [N, 3+] point cloud + [optional, for kitti]image: { + image_idx: ... + image_path: ... + image_shape: ... + } + point_cloud: { + num_features: 6 + velodyne_path: ... + } + [optional, for kitti]calib: { + R0_rect: ... + Tr_velo_to_cam0: ... + P0: ... + } + annos: { + location: [num_gt, 3] array + dimensions: [num_gt, 3] array + rotation_y: [num_gt] angle array + name: [num_gt] ground truth name array + [optional]difficulty: kitti difficulty + [optional]group_ids: used for multi-part object + } + } + """ + root_path = Path(path) + if not isinstance(image_ids, list): + image_ids = list(range(image_ids)) + + def map_func(idx): + info = {} + pc_info = {'num_features': 6} + calib_info = {} + + image_info = {'image_idx': idx} + annotations = None + if velodyne: + pc_info['velodyne_path'] = get_velodyne_path( + idx, path, training, relative_path, use_prefix_id=True) + points = np.fromfile( + Path(path) / pc_info['velodyne_path'], dtype=np.float32) + points = np.copy(points).reshape(-1, pc_info['num_features']) + info['timestamp'] = np.int64(points[0, -1]) + # values of the last dim are all the timestamp + image_info['image_path'] = get_image_path( + idx, + path, + training, + relative_path, + info_type='image_0', + use_prefix_id=True) + if with_imageshape: + img_path = image_info['image_path'] + if relative_path: + img_path = str(root_path / img_path) + image_info['image_shape'] = np.array( + io.imread(img_path).shape[:2], dtype=np.int32) + if label_info: + label_path = get_label_path( + idx, + path, + training, + relative_path, + info_type='label_all', + use_prefix_id=True) + if relative_path: + label_path = str(root_path / label_path) + annotations = get_label_anno(label_path) + info['image'] = image_info + info['point_cloud'] = pc_info + if calib: + calib_path = get_calib_path( + idx, path, training, relative_path=False, use_prefix_id=True) + with open(calib_path, 'r') as f: + lines = f.readlines() + P0 = np.array([float(info) for info in lines[0].split(' ')[1:13] + ]).reshape([3, 4]) + P1 = np.array([float(info) for info in lines[1].split(' ')[1:13] + ]).reshape([3, 4]) + P2 = np.array([float(info) for info in lines[2].split(' ')[1:13] + ]).reshape([3, 4]) + P3 = np.array([float(info) for info in lines[3].split(' ')[1:13] + ]).reshape([3, 4]) + P4 = np.array([float(info) for info in lines[4].split(' ')[1:13] + ]).reshape([3, 4]) + if extend_matrix: + P0 = _extend_matrix(P0) + P1 = _extend_matrix(P1) + P2 = _extend_matrix(P2) + P3 = _extend_matrix(P3) + P4 = _extend_matrix(P4) + R0_rect = np.array([ + float(info) for info in lines[5].split(' ')[1:10] + ]).reshape([3, 3]) + if extend_matrix: + rect_4x4 = np.zeros([4, 4], dtype=R0_rect.dtype) + rect_4x4[3, 3] = 1. + rect_4x4[:3, :3] = R0_rect + else: + rect_4x4 = R0_rect + + Tr_velo_to_cam = np.array([ + float(info) for info in lines[6].split(' ')[1:13] + ]).reshape([3, 4]) + if extend_matrix: + Tr_velo_to_cam = _extend_matrix(Tr_velo_to_cam) + calib_info['P0'] = P0 + calib_info['P1'] = P1 + calib_info['P2'] = P2 + calib_info['P3'] = P3 + calib_info['P4'] = P4 + calib_info['R0_rect'] = rect_4x4 + calib_info['Tr_velo_to_cam'] = Tr_velo_to_cam + info['calib'] = calib_info + if pose: + pose_path = get_pose_path( + idx, path, training, relative_path=False, use_prefix_id=True) + info['pose'] = np.loadtxt(pose_path) + + if annotations is not None: + info['annos'] = annotations + info['annos']['camera_id'] = info['annos'].pop('score') + add_difficulty_to_annos(info) + + sweeps = [] + prev_idx = idx + while len(sweeps) < max_sweeps: + prev_info = {} + prev_idx -= 1 + prev_info['velodyne_path'] = get_velodyne_path( + prev_idx, + path, + training, + relative_path, + exist_check=False, + use_prefix_id=True) + if_prev_exists = osp.exists( + Path(path) / prev_info['velodyne_path']) + if if_prev_exists: + prev_points = np.fromfile( + Path(path) / prev_info['velodyne_path'], dtype=np.float32) + prev_points = np.copy(prev_points).reshape( + -1, pc_info['num_features']) + prev_info['timestamp'] = np.int64(prev_points[0, -1]) + prev_pose_path = get_pose_path( + prev_idx, + path, + training, + relative_path=False, + use_prefix_id=True) + prev_info['pose'] = np.loadtxt(prev_pose_path) + sweeps.append(prev_info) + else: + break + info['sweeps'] = sweeps + + return info + + with futures.ThreadPoolExecutor(num_worker) as executor: + image_infos = executor.map(map_func, image_ids) + + return list(image_infos) + + +def kitti_anno_to_label_file(annos, folder): + folder = Path(folder) + for anno in annos: + image_idx = anno['metadata']['image_idx'] + label_lines = [] + for j in range(anno['bbox'].shape[0]): + label_dict = { + 'name': anno['name'][j], + 'alpha': anno['alpha'][j], + 'bbox': anno['bbox'][j], + 'location': anno['location'][j], + 'dimensions': anno['dimensions'][j], + 'rotation_y': anno['rotation_y'][j], + 'score': anno['score'][j], + } + label_line = kitti_result_line(label_dict) + label_lines.append(label_line) + label_file = folder / f'{get_image_index_str(image_idx)}.txt' + label_str = '\n'.join(label_lines) + with open(label_file, 'w') as f: + f.write(label_str) + + +def add_difficulty_to_annos(info): + min_height = [40, 25, + 25] # minimum height for evaluated groundtruth/detections + max_occlusion = [ + 0, 1, 2 + ] # maximum occlusion level of the groundtruth used for evaluation + max_trunc = [ + 0.15, 0.3, 0.5 + ] # maximum truncation level of the groundtruth used for evaluation + annos = info['annos'] + dims = annos['dimensions'] # lhw format + bbox = annos['bbox'] + height = bbox[:, 3] - bbox[:, 1] + occlusion = annos['occluded'] + truncation = annos['truncated'] + diff = [] + easy_mask = np.ones((len(dims), ), dtype=np.bool) + moderate_mask = np.ones((len(dims), ), dtype=np.bool) + hard_mask = np.ones((len(dims), ), dtype=np.bool) + i = 0 + for h, o, t in zip(height, occlusion, truncation): + if o > max_occlusion[0] or h <= min_height[0] or t > max_trunc[0]: + easy_mask[i] = False + if o > max_occlusion[1] or h <= min_height[1] or t > max_trunc[1]: + moderate_mask[i] = False + if o > max_occlusion[2] or h <= min_height[2] or t > max_trunc[2]: + hard_mask[i] = False + i += 1 + is_easy = easy_mask + is_moderate = np.logical_xor(easy_mask, moderate_mask) + is_hard = np.logical_xor(hard_mask, moderate_mask) + + for i in range(len(dims)): + if is_easy[i]: + diff.append(0) + elif is_moderate[i]: + diff.append(1) + elif is_hard[i]: + diff.append(2) + else: + diff.append(-1) + annos['difficulty'] = np.array(diff, np.int32) + return diff + + +def kitti_result_line(result_dict, precision=4): + prec_float = '{' + ':.{}f'.format(precision) + '}' + res_line = [] + all_field_default = OrderedDict([ + ('name', None), + ('truncated', -1), + ('occluded', -1), + ('alpha', -10), + ('bbox', None), + ('dimensions', [-1, -1, -1]), + ('location', [-1000, -1000, -1000]), + ('rotation_y', -10), + ('score', 0.0), + ]) + res_dict = [(key, None) for key, val in all_field_default.items()] + res_dict = OrderedDict(res_dict) + for key, val in result_dict.items(): + if all_field_default[key] is None and val is None: + raise ValueError('you must specify a value for {}'.format(key)) + res_dict[key] = val + + for key, val in res_dict.items(): + if key == 'name': + res_line.append(val) + elif key in ['truncated', 'alpha', 'rotation_y', 'score']: + if val is None: + res_line.append(str(all_field_default[key])) + else: + res_line.append(prec_float.format(val)) + elif key == 'occluded': + if val is None: + res_line.append(str(all_field_default[key])) + else: + res_line.append('{}'.format(val)) + elif key in ['bbox', 'dimensions', 'location']: + if val is None: + res_line += [str(v) for v in all_field_default[key]] + else: + res_line += [prec_float.format(v) for v in val] + else: + raise ValueError('unknown key. supported key:{}'.format( + res_dict.keys())) + return ' '.join(res_line) diff --git a/adzoo/bevformer/data_converter/lyft_converter.py b/adzoo/bevformer/data_converter/lyft_converter.py new file mode 100755 index 0000000..db4f0fb --- /dev/null +++ b/adzoo/bevformer/data_converter/lyft_converter.py @@ -0,0 +1,268 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmcv +import numpy as np +import os +from logging import warning +from lyft_dataset_sdk.lyftdataset import LyftDataset as Lyft +from os import path as osp +from pyquaternion import Quaternion + +from mmcv.datasets import LyftDataset +from .nuscenes_converter import (get_2d_boxes, get_available_scenes, + obtain_sensor2top) + +lyft_categories = ('car', 'truck', 'bus', 'emergency_vehicle', 'other_vehicle', + 'motorcycle', 'bicycle', 'pedestrian', 'animal') + + +def create_lyft_infos(root_path, + info_prefix, + version='v1.01-train', + max_sweeps=10): + """Create info file of lyft dataset. + + Given the raw data, generate its related info file in pkl format. + + Args: + root_path (str): Path of the data root. + info_prefix (str): Prefix of the info file to be generated. + version (str): Version of the data. + Default: 'v1.01-train' + max_sweeps (int): Max number of sweeps. + Default: 10 + """ + lyft = Lyft( + data_path=osp.join(root_path, version), + json_path=osp.join(root_path, version, version), + verbose=True) + available_vers = ['v1.01-train', 'v1.01-test'] + assert version in available_vers + if version == 'v1.01-train': + train_scenes = mmcv.list_from_file('data/lyft/train.txt') + val_scenes = mmcv.list_from_file('data/lyft/val.txt') + elif version == 'v1.01-test': + train_scenes = mmcv.list_from_file('data/lyft/test.txt') + val_scenes = [] + else: + raise ValueError('unknown') + + # filter existing scenes. + available_scenes = get_available_scenes(lyft) + available_scene_names = [s['name'] for s in available_scenes] + train_scenes = list( + filter(lambda x: x in available_scene_names, train_scenes)) + val_scenes = list(filter(lambda x: x in available_scene_names, val_scenes)) + train_scenes = set([ + available_scenes[available_scene_names.index(s)]['token'] + for s in train_scenes + ]) + val_scenes = set([ + available_scenes[available_scene_names.index(s)]['token'] + for s in val_scenes + ]) + + test = 'test' in version + if test: + print(f'test scene: {len(train_scenes)}') + else: + print(f'train scene: {len(train_scenes)}, \ + val scene: {len(val_scenes)}') + train_lyft_infos, val_lyft_infos = _fill_trainval_infos( + lyft, train_scenes, val_scenes, test, max_sweeps=max_sweeps) + + metadata = dict(version=version) + if test: + print(f'test sample: {len(train_lyft_infos)}') + data = dict(infos=train_lyft_infos, metadata=metadata) + info_name = f'{info_prefix}_infos_test' + info_path = osp.join(root_path, f'{info_name}.pkl') + mmcv.dump(data, info_path) + else: + print(f'train sample: {len(train_lyft_infos)}, \ + val sample: {len(val_lyft_infos)}') + data = dict(infos=train_lyft_infos, metadata=metadata) + train_info_name = f'{info_prefix}_infos_train' + info_path = osp.join(root_path, f'{train_info_name}.pkl') + mmcv.dump(data, info_path) + data['infos'] = val_lyft_infos + val_info_name = f'{info_prefix}_infos_val' + info_val_path = osp.join(root_path, f'{val_info_name}.pkl') + mmcv.dump(data, info_val_path) + + +def _fill_trainval_infos(lyft, + train_scenes, + val_scenes, + test=False, + max_sweeps=10): + """Generate the train/val infos from the raw data. + + Args: + lyft (:obj:`LyftDataset`): Dataset class in the Lyft dataset. + train_scenes (list[str]): Basic information of training scenes. + val_scenes (list[str]): Basic information of validation scenes. + test (bool): Whether use the test mode. In the test mode, no + annotations can be accessed. Default: False. + max_sweeps (int): Max number of sweeps. Default: 10. + + Returns: + tuple[list[dict]]: Information of training set and + validation set that will be saved to the info file. + """ + train_lyft_infos = [] + val_lyft_infos = [] + + for sample in mmcv.track_iter_progress(lyft.sample): + lidar_token = sample['data']['LIDAR_TOP'] + sd_rec = lyft.get('sample_data', sample['data']['LIDAR_TOP']) + cs_record = lyft.get('calibrated_sensor', + sd_rec['calibrated_sensor_token']) + pose_record = lyft.get('ego_pose', sd_rec['ego_pose_token']) + abs_lidar_path, boxes, _ = lyft.get_sample_data(lidar_token) + # nuScenes devkit returns more convenient relative paths while + # lyft devkit returns absolute paths + abs_lidar_path = str(abs_lidar_path) # absolute path + lidar_path = abs_lidar_path.split(f'{os.getcwd()}/')[-1] + # relative path + + mmcv.check_file_exist(lidar_path) + + info = { + 'lidar_path': lidar_path, + 'token': sample['token'], + 'sweeps': [], + 'cams': dict(), + 'lidar2ego_translation': cs_record['translation'], + 'lidar2ego_rotation': cs_record['rotation'], + 'ego2global_translation': pose_record['translation'], + 'ego2global_rotation': pose_record['rotation'], + 'timestamp': sample['timestamp'], + } + + l2e_r = info['lidar2ego_rotation'] + l2e_t = info['lidar2ego_translation'] + e2g_r = info['ego2global_rotation'] + e2g_t = info['ego2global_translation'] + l2e_r_mat = Quaternion(l2e_r).rotation_matrix + e2g_r_mat = Quaternion(e2g_r).rotation_matrix + + # obtain 6 image's information per frame + camera_types = [ + 'CAM_FRONT', + 'CAM_FRONT_RIGHT', + 'CAM_FRONT_LEFT', + 'CAM_BACK', + 'CAM_BACK_LEFT', + 'CAM_BACK_RIGHT', + ] + for cam in camera_types: + cam_token = sample['data'][cam] + cam_path, _, cam_intrinsic = lyft.get_sample_data(cam_token) + cam_info = obtain_sensor2top(lyft, cam_token, l2e_t, l2e_r_mat, + e2g_t, e2g_r_mat, cam) + cam_info.update(cam_intrinsic=cam_intrinsic) + info['cams'].update({cam: cam_info}) + + # obtain sweeps for a single key-frame + sd_rec = lyft.get('sample_data', sample['data']['LIDAR_TOP']) + sweeps = [] + while len(sweeps) < max_sweeps: + if not sd_rec['prev'] == '': + sweep = obtain_sensor2top(lyft, sd_rec['prev'], l2e_t, + l2e_r_mat, e2g_t, e2g_r_mat, 'lidar') + sweeps.append(sweep) + sd_rec = lyft.get('sample_data', sd_rec['prev']) + else: + break + info['sweeps'] = sweeps + # obtain annotation + if not test: + annotations = [ + lyft.get('sample_annotation', token) + for token in sample['anns'] + ] + locs = np.array([b.center for b in boxes]).reshape(-1, 3) + dims = np.array([b.wlh for b in boxes]).reshape(-1, 3) + rots = np.array([b.orientation.yaw_pitch_roll[0] + for b in boxes]).reshape(-1, 1) + + names = [b.name for b in boxes] + for i in range(len(names)): + if names[i] in LyftDataset.NameMapping: + names[i] = LyftDataset.NameMapping[names[i]] + names = np.array(names) + + # we need to convert rot to SECOND format. + gt_boxes = np.concatenate([locs, dims, -rots - np.pi / 2], axis=1) + assert len(gt_boxes) == len( + annotations), f'{len(gt_boxes)}, {len(annotations)}' + info['gt_boxes'] = gt_boxes + info['gt_names'] = names + info['num_lidar_pts'] = np.array( + [a['num_lidar_pts'] for a in annotations]) + info['num_radar_pts'] = np.array( + [a['num_radar_pts'] for a in annotations]) + + if sample['scene_token'] in train_scenes: + train_lyft_infos.append(info) + else: + val_lyft_infos.append(info) + + return train_lyft_infos, val_lyft_infos + + +def export_2d_annotation(root_path, info_path, version): + """Export 2d annotation from the info file and raw data. + + Args: + root_path (str): Root path of the raw data. + info_path (str): Path of the info file. + version (str): Dataset version. + """ + warning.warn('DeprecationWarning: 2D annotations are not used on the ' + 'Lyft dataset. The function export_2d_annotation will be ' + 'deprecated.') + # get bbox annotations for camera + camera_types = [ + 'CAM_FRONT', + 'CAM_FRONT_RIGHT', + 'CAM_FRONT_LEFT', + 'CAM_BACK', + 'CAM_BACK_LEFT', + 'CAM_BACK_RIGHT', + ] + lyft_infos = mmcv.load(info_path)['infos'] + lyft = Lyft( + data_path=osp.join(root_path, version), + json_path=osp.join(root_path, version, version), + verbose=True) + # info_2d_list = [] + cat2Ids = [ + dict(id=lyft_categories.index(cat_name), name=cat_name) + for cat_name in lyft_categories + ] + coco_ann_id = 0 + coco_2d_dict = dict(annotations=[], images=[], categories=cat2Ids) + for info in mmcv.track_iter_progress(lyft_infos): + for cam in camera_types: + cam_info = info['cams'][cam] + coco_infos = get_2d_boxes( + lyft, + cam_info['sample_data_token'], + visibilities=['', '1', '2', '3', '4']) + (height, width, _) = mmcv.imread(cam_info['data_path']).shape + coco_2d_dict['images'].append( + dict( + file_name=cam_info['data_path'], + id=cam_info['sample_data_token'], + width=width, + height=height)) + for coco_info in coco_infos: + if coco_info is None: + continue + # add an empty key for coco format + coco_info['segmentation'] = [] + coco_info['id'] = coco_ann_id + coco_2d_dict['annotations'].append(coco_info) + coco_ann_id += 1 + mmcv.dump(coco_2d_dict, f'{info_path[:-4]}.coco.json') diff --git a/adzoo/bevformer/data_converter/lyft_data_fixer.py b/adzoo/bevformer/data_converter/lyft_data_fixer.py new file mode 100755 index 0000000..4207049 --- /dev/null +++ b/adzoo/bevformer/data_converter/lyft_data_fixer.py @@ -0,0 +1,38 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import numpy as np +import os + + +def fix_lyft(root_folder='./data/lyft', version='v1.01'): + # refer to https://www.kaggle.com/c/3d-object-detection-for-autonomous-vehicles/discussion/110000 # noqa + lidar_path = 'lidar/host-a011_lidar1_1233090652702363606.bin' + root_folder = os.path.join(root_folder, f'{version}-train') + lidar_path = os.path.join(root_folder, lidar_path) + assert os.path.isfile(lidar_path), f'Please download the complete Lyft ' \ + f'dataset and make sure {lidar_path} is present.' + points = np.fromfile(lidar_path, dtype=np.float32, count=-1) + try: + points.reshape([-1, 5]) + print(f'This fix is not required for version {version}.') + except ValueError: + new_points = np.array(list(points) + [100.0, 1.0], dtype='float32') + new_points.tofile(lidar_path) + print(f'Appended 100.0 and 1.0 to the end of {lidar_path}.') + + +parser = argparse.ArgumentParser(description='Lyft dataset fixer arg parser') +parser.add_argument( + '--root-folder', + type=str, + default='./data/lyft', + help='specify the root path of Lyft dataset') +parser.add_argument( + '--version', + type=str, + default='v1.01', + help='specify Lyft dataset version') +args = parser.parse_args() + +if __name__ == '__main__': + fix_lyft(root_folder=args.root_folder, version=args.version) diff --git a/adzoo/bevformer/data_converter/nuimage_converter.py b/adzoo/bevformer/data_converter/nuimage_converter.py new file mode 100755 index 0000000..92be1de --- /dev/null +++ b/adzoo/bevformer/data_converter/nuimage_converter.py @@ -0,0 +1,225 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import base64 +import mmcv +import numpy as np +from nuimages import NuImages +from nuimages.utils.utils import mask_decode, name_to_index_mapping +from os import path as osp + +nus_categories = ('car', 'truck', 'trailer', 'bus', 'construction_vehicle', + 'bicycle', 'motorcycle', 'pedestrian', 'traffic_cone', + 'barrier') + +NAME_MAPPING = { + 'movable_object.barrier': 'barrier', + 'vehicle.bicycle': 'bicycle', + 'vehicle.bus.bendy': 'bus', + 'vehicle.bus.rigid': 'bus', + 'vehicle.car': 'car', + 'vehicle.construction': 'construction_vehicle', + 'vehicle.motorcycle': 'motorcycle', + 'human.pedestrian.adult': 'pedestrian', + 'human.pedestrian.child': 'pedestrian', + 'human.pedestrian.construction_worker': 'pedestrian', + 'human.pedestrian.police_officer': 'pedestrian', + 'movable_object.trafficcone': 'traffic_cone', + 'vehicle.trailer': 'trailer', + 'vehicle.truck': 'truck', +} + + +def parse_args(): + parser = argparse.ArgumentParser(description='Data converter arg parser') + parser.add_argument( + '--data-root', + type=str, + default='./data/nuimages', + help='specify the root path of dataset') + parser.add_argument( + '--version', + type=str, + nargs='+', + default=['v1.0-mini'], + required=False, + help='specify the dataset version') + parser.add_argument( + '--out-dir', + type=str, + default='./data/nuimages/annotations/', + required=False, + help='path to save the exported json') + parser.add_argument( + '--nproc', + type=int, + default=4, + required=False, + help='workers to process semantic masks') + parser.add_argument('--extra-tag', type=str, default='nuimages') + args = parser.parse_args() + return args + + +def get_img_annos(nuim, img_info, cat2id, out_dir, data_root, seg_root): + """Get semantic segmentation map for an image. + + Args: + nuim (obj:`NuImages`): NuImages dataset object + img_info (dict): Meta information of img + + Returns: + np.ndarray: Semantic segmentation map of the image + """ + sd_token = img_info['token'] + image_id = img_info['id'] + name_to_index = name_to_index_mapping(nuim.category) + + # Get image data. + width, height = img_info['width'], img_info['height'] + semseg_mask = np.zeros((height, width)).astype('uint8') + + # Load stuff / surface regions. + surface_anns = [ + o for o in nuim.surface_ann if o['sample_data_token'] == sd_token + ] + + # Draw stuff / surface regions. + for ann in surface_anns: + # Get color and mask. + category_token = ann['category_token'] + category_name = nuim.get('category', category_token)['name'] + if ann['mask'] is None: + continue + mask = mask_decode(ann['mask']) + + # Draw mask for semantic segmentation. + semseg_mask[mask == 1] = name_to_index[category_name] + + # Load object instances. + object_anns = [ + o for o in nuim.object_ann if o['sample_data_token'] == sd_token + ] + + # Sort by token to ensure that objects always appear in the + # instance mask in the same order. + object_anns = sorted(object_anns, key=lambda k: k['token']) + + # Draw object instances. + # The 0 index is reserved for background; thus, the instances + # should start from index 1. + annotations = [] + for i, ann in enumerate(object_anns, start=1): + # Get color, box, mask and name. + category_token = ann['category_token'] + category_name = nuim.get('category', category_token)['name'] + if ann['mask'] is None: + continue + mask = mask_decode(ann['mask']) + + # Draw masks for semantic segmentation and instance segmentation. + semseg_mask[mask == 1] = name_to_index[category_name] + + if category_name in NAME_MAPPING: + cat_name = NAME_MAPPING[category_name] + cat_id = cat2id[cat_name] + + x_min, y_min, x_max, y_max = ann['bbox'] + # encode calibrated instance mask + mask_anno = dict() + mask_anno['counts'] = base64.b64decode( + ann['mask']['counts']).decode() + mask_anno['size'] = ann['mask']['size'] + + data_anno = dict( + image_id=image_id, + category_id=cat_id, + bbox=[x_min, y_min, x_max - x_min, y_max - y_min], + area=(x_max - x_min) * (y_max - y_min), + segmentation=mask_anno, + iscrowd=0) + annotations.append(data_anno) + + # after process, save semantic masks + img_filename = img_info['file_name'] + seg_filename = img_filename.replace('jpg', 'png') + seg_filename = osp.join(seg_root, seg_filename) + mmcv.imwrite(semseg_mask, seg_filename) + return annotations, np.max(semseg_mask) + + +def export_nuim_to_coco(nuim, data_root, out_dir, extra_tag, version, nproc): + print('Process category information') + categories = [] + categories = [ + dict(id=nus_categories.index(cat_name), name=cat_name) + for cat_name in nus_categories + ] + cat2id = {k_v['name']: k_v['id'] for k_v in categories} + + images = [] + print('Process image meta information...') + for sample_info in mmcv.track_iter_progress(nuim.sample_data): + if sample_info['is_key_frame']: + img_idx = len(images) + images.append( + dict( + id=img_idx, + token=sample_info['token'], + file_name=sample_info['filename'], + width=sample_info['width'], + height=sample_info['height'])) + + seg_root = f'{out_dir}semantic_masks' + mmcv.mkdir_or_exist(seg_root) + mmcv.mkdir_or_exist(osp.join(data_root, 'calibrated')) + + global process_img_anno + + def process_img_anno(img_info): + single_img_annos, max_cls_id = get_img_annos(nuim, img_info, cat2id, + out_dir, data_root, + seg_root) + return single_img_annos, max_cls_id + + print('Process img annotations...') + if nproc > 1: + outputs = mmcv.track_parallel_progress( + process_img_anno, images, nproc=nproc) + else: + outputs = [] + for img_info in mmcv.track_iter_progress(images): + outputs.append(process_img_anno(img_info)) + + # Determine the index of object annotation + print('Process annotation information...') + annotations = [] + max_cls_ids = [] + for single_img_annos, max_cls_id in outputs: + max_cls_ids.append(max_cls_id) + for img_anno in single_img_annos: + img_anno.update(id=len(annotations)) + annotations.append(img_anno) + + max_cls_id = max(max_cls_ids) + print(f'Max ID of class in the semantic map: {max_cls_id}') + + coco_format_json = dict( + images=images, annotations=annotations, categories=categories) + + mmcv.mkdir_or_exist(out_dir) + out_file = osp.join(out_dir, f'{extra_tag}_{version}.json') + print(f'Annotation dumped to {out_file}') + mmcv.dump(coco_format_json, out_file) + + +def main(): + args = parse_args() + for version in args.version: + nuim = NuImages( + dataroot=args.data_root, version=version, verbose=True, lazy=True) + export_nuim_to_coco(nuim, args.data_root, args.out_dir, args.extra_tag, + version, args.nproc) + + +if __name__ == '__main__': + main() diff --git a/adzoo/bevformer/data_converter/nuscenes_converter.py b/adzoo/bevformer/data_converter/nuscenes_converter.py new file mode 100755 index 0000000..c3c071e --- /dev/null +++ b/adzoo/bevformer/data_converter/nuscenes_converter.py @@ -0,0 +1,674 @@ +# --------------------------------------------- +# Copyright (c) OpenMMLab. All rights reserved. +# --------------------------------------------- +# Modified by Zhiqi Li +# --------------------------------------------- +import numpy as np +import os +from collections import OrderedDict +from nuscenes.nuscenes import NuScenes +from nuscenes.utils.geometry_utils import view_points +from os import path as osp +from pyquaternion import Quaternion +from shapely.geometry import MultiPoint, box +from typing import List, Tuple, Union + +from mmcv.core.bbox.box_np_ops import points_cam2img +from mmcv.datasets import NuScenesDataset +from mmcv.fileio.io import dump, load +from mmcv.image.io import imread +from mmcv.utils import is_filepath, check_file_exist, track_iter_progress + +nus_categories = ('car', 'truck', 'trailer', 'bus', 'construction_vehicle', + 'bicycle', 'motorcycle', 'pedestrian', 'traffic_cone', + 'barrier') + +nus_attributes = ('cycle.with_rider', 'cycle.without_rider', + 'pedestrian.moving', 'pedestrian.standing', + 'pedestrian.sitting_lying_down', 'vehicle.moving', + 'vehicle.parked', 'vehicle.stopped', 'None') + + +def create_nuscenes_infos(root_path, + out_path, + can_bus_root_path, + info_prefix, + version='v1.0-trainval', + max_sweeps=10): + """Create info file of nuscene dataset. + + Given the raw data, generate its related info file in pkl format. + + Args: + root_path (str): Path of the data root. + info_prefix (str): Prefix of the info file to be generated. + version (str): Version of the data. + Default: 'v1.0-trainval' + max_sweeps (int): Max number of sweeps. + Default: 10 + """ + from nuscenes.nuscenes import NuScenes + from nuscenes.can_bus.can_bus_api import NuScenesCanBus + print(version, root_path) + nusc = NuScenes(version=version, dataroot=root_path, verbose=True) + nusc_can_bus = NuScenesCanBus(dataroot=can_bus_root_path) + from nuscenes.utils import splits + available_vers = ['v1.0-trainval', 'v1.0-test', 'v1.0-mini'] + assert version in available_vers + if version == 'v1.0-trainval': + train_scenes = splits.train + val_scenes = splits.val + elif version == 'v1.0-test': + train_scenes = splits.test + val_scenes = [] + elif version == 'v1.0-mini': + train_scenes = splits.mini_train + val_scenes = splits.mini_val + else: + raise ValueError('unknown') + + # filter existing scenes. + available_scenes = get_available_scenes(nusc) + available_scene_names = [s['name'] for s in available_scenes] + train_scenes = list( + filter(lambda x: x in available_scene_names, train_scenes)) + val_scenes = list(filter(lambda x: x in available_scene_names, val_scenes)) + train_scenes = set([ + available_scenes[available_scene_names.index(s)]['token'] + for s in train_scenes + ]) + val_scenes = set([ + available_scenes[available_scene_names.index(s)]['token'] + for s in val_scenes + ]) + + test = 'test' in version + if test: + print('test scene: {}'.format(len(train_scenes))) + else: + print('train scene: {}, val scene: {}'.format( + len(train_scenes), len(val_scenes))) + + train_nusc_infos, val_nusc_infos = _fill_trainval_infos( + nusc, nusc_can_bus, train_scenes, val_scenes, test, max_sweeps=max_sweeps) + + metadata = dict(version=version) + if test: + print('test sample: {}'.format(len(train_nusc_infos))) + data = dict(infos=train_nusc_infos, metadata=metadata) + info_path = osp.join(out_path, + '{}_infos_temporal_test.pkl'.format(info_prefix)) + dump(data, info_path) + else: + print('train sample: {}, val sample: {}'.format( + len(train_nusc_infos), len(val_nusc_infos))) + data = dict(infos=train_nusc_infos, metadata=metadata) + info_path = osp.join(out_path, + '{}_infos_temporal_train.pkl'.format(info_prefix)) + dump(data, info_path) + data['infos'] = val_nusc_infos + info_val_path = osp.join(out_path, + '{}_infos_temporal_val.pkl'.format(info_prefix)) + dump(data, info_val_path) + + +def get_available_scenes(nusc): + """Get available scenes from the input nuscenes class. + + Given the raw data, get the information of available scenes for + further info generation. + + Args: + nusc (class): Dataset class in the nuScenes dataset. + + Returns: + available_scenes (list[dict]): List of basic information for the + available scenes. + """ + available_scenes = [] + print('total scene num: {}'.format(len(nusc.scene))) + for scene in nusc.scene: + scene_token = scene['token'] + scene_rec = nusc.get('scene', scene_token) + sample_rec = nusc.get('sample', scene_rec['first_sample_token']) + sd_rec = nusc.get('sample_data', sample_rec['data']['LIDAR_TOP']) + has_more_frames = True + scene_not_exist = False + while has_more_frames: + lidar_path, boxes, _ = nusc.get_sample_data(sd_rec['token']) + lidar_path = str(lidar_path) + if os.getcwd() in lidar_path: + # path from lyftdataset is absolute path + lidar_path = lidar_path.split(f'{os.getcwd()}/')[-1] + # relative path + if not is_filepath(lidar_path): + scene_not_exist = True + break + else: + break + if scene_not_exist: + continue + available_scenes.append(scene) + print('exist scene num: {}'.format(len(available_scenes))) + return available_scenes + + +def _get_can_bus_info(nusc, nusc_can_bus, sample): + scene_name = nusc.get('scene', sample['scene_token'])['name'] + sample_timestamp = sample['timestamp'] + try: + pose_list = nusc_can_bus.get_messages(scene_name, 'pose') + except: + return np.zeros(18) # server scenes do not have can bus information. + can_bus = [] + # during each scene, the first timestamp of can_bus may be large than the first sample's timestamp + last_pose = pose_list[0] + for i, pose in enumerate(pose_list): + if pose['utime'] > sample_timestamp: + break + last_pose = pose + _ = last_pose.pop('utime') # useless + pos = last_pose.pop('pos') + rotation = last_pose.pop('orientation') + can_bus.extend(pos) + can_bus.extend(rotation) + for key in last_pose.keys(): + can_bus.extend(pose[key]) # 16 elements + can_bus.extend([0., 0.]) + return np.array(can_bus) + + +def _fill_trainval_infos(nusc, + nusc_can_bus, + train_scenes, + val_scenes, + test=False, + max_sweeps=10): + """Generate the train/val infos from the raw data. + + Args: + nusc (:obj:`NuScenes`): Dataset class in the nuScenes dataset. + train_scenes (list[str]): Basic information of training scenes. + val_scenes (list[str]): Basic information of validation scenes. + test (bool): Whether use the test mode. In the test mode, no + annotations can be accessed. Default: False. + max_sweeps (int): Max number of sweeps. Default: 10. + + Returns: + tuple[list[dict]]: Information of training set and validation set + that will be saved to the info file. + """ + train_nusc_infos = [] + val_nusc_infos = [] + frame_idx = 0 + for sample in track_iter_progress(nusc.sample): + lidar_token = sample['data']['LIDAR_TOP'] + sd_rec = nusc.get('sample_data', sample['data']['LIDAR_TOP']) + cs_record = nusc.get('calibrated_sensor', + sd_rec['calibrated_sensor_token']) + pose_record = nusc.get('ego_pose', sd_rec['ego_pose_token']) + lidar_path, boxes, _ = nusc.get_sample_data(lidar_token) + + check_file_exist(lidar_path) + can_bus = _get_can_bus_info(nusc, nusc_can_bus, sample) + ## + info = { + 'lidar_path': lidar_path, + 'token': sample['token'], + 'prev': sample['prev'], + 'next': sample['next'], + 'can_bus': can_bus, + 'frame_idx': frame_idx, # temporal related info + 'sweeps': [], + 'cams': dict(), + 'scene_token': sample['scene_token'], # temporal related info + 'lidar2ego_translation': cs_record['translation'], + 'lidar2ego_rotation': cs_record['rotation'], + 'ego2global_translation': pose_record['translation'], + 'ego2global_rotation': pose_record['rotation'], + 'timestamp': sample['timestamp'], + } + + if sample['next'] == '': + frame_idx = 0 + else: + frame_idx += 1 + + l2e_r = info['lidar2ego_rotation'] + l2e_t = info['lidar2ego_translation'] + e2g_r = info['ego2global_rotation'] + e2g_t = info['ego2global_translation'] + l2e_r_mat = Quaternion(l2e_r).rotation_matrix + e2g_r_mat = Quaternion(e2g_r).rotation_matrix + + # obtain 6 image's information per frame + camera_types = [ + 'CAM_FRONT', + 'CAM_FRONT_RIGHT', + 'CAM_FRONT_LEFT', + 'CAM_BACK', + 'CAM_BACK_LEFT', + 'CAM_BACK_RIGHT', + ] + for cam in camera_types: + cam_token = sample['data'][cam] + cam_path, _, cam_intrinsic = nusc.get_sample_data(cam_token) + cam_info = obtain_sensor2top(nusc, cam_token, l2e_t, l2e_r_mat, + e2g_t, e2g_r_mat, cam) + cam_info.update(cam_intrinsic=cam_intrinsic) + info['cams'].update({cam: cam_info}) + + # obtain sweeps for a single key-frame + sd_rec = nusc.get('sample_data', sample['data']['LIDAR_TOP']) + sweeps = [] + while len(sweeps) < max_sweeps: + if not sd_rec['prev'] == '': + sweep = obtain_sensor2top(nusc, sd_rec['prev'], l2e_t, + l2e_r_mat, e2g_t, e2g_r_mat, 'lidar') + sweeps.append(sweep) + sd_rec = nusc.get('sample_data', sd_rec['prev']) + else: + break + info['sweeps'] = sweeps + # obtain annotation + if not test: + annotations = [ + nusc.get('sample_annotation', token) + for token in sample['anns'] + ] + locs = np.array([b.center for b in boxes]).reshape(-1, 3) + dims = np.array([b.wlh for b in boxes]).reshape(-1, 3) + rots = np.array([b.orientation.yaw_pitch_roll[0] + for b in boxes]).reshape(-1, 1) + velocity = np.array( + [nusc.box_velocity(token)[:2] for token in sample['anns']]) + valid_flag = np.array( + [(anno['num_lidar_pts'] + anno['num_radar_pts']) > 0 + for anno in annotations], + dtype=bool).reshape(-1) + # convert velo from global to lidar + for i in range(len(boxes)): + velo = np.array([*velocity[i], 0.0]) + velo = velo @ np.linalg.inv(e2g_r_mat).T @ np.linalg.inv( + l2e_r_mat).T + velocity[i] = velo[:2] + + names = [b.name for b in boxes] + for i in range(len(names)): + if names[i] in NuScenesDataset.NameMapping: + names[i] = NuScenesDataset.NameMapping[names[i]] + names = np.array(names) + # we need to convert rot to SECOND format. + gt_boxes = np.concatenate([locs, dims, -rots - np.pi / 2], axis=1) + assert len(gt_boxes) == len( + annotations), f'{len(gt_boxes)}, {len(annotations)}' + info['gt_boxes'] = gt_boxes + info['gt_names'] = names + info['gt_velocity'] = velocity.reshape(-1, 2) + info['num_lidar_pts'] = np.array( + [a['num_lidar_pts'] for a in annotations]) + info['num_radar_pts'] = np.array( + [a['num_radar_pts'] for a in annotations]) + info['valid_flag'] = valid_flag + + if sample['scene_token'] in train_scenes: + train_nusc_infos.append(info) + else: + val_nusc_infos.append(info) + + return train_nusc_infos, val_nusc_infos + + +def obtain_sensor2top(nusc, + sensor_token, + l2e_t, + l2e_r_mat, + e2g_t, + e2g_r_mat, + sensor_type='lidar'): + """Obtain the info with RT matric from general sensor to Top LiDAR. + + Args: + nusc (class): Dataset class in the nuScenes dataset. + sensor_token (str): Sample data token corresponding to the + specific sensor type. + l2e_t (np.ndarray): Translation from lidar to ego in shape (1, 3). + l2e_r_mat (np.ndarray): Rotation matrix from lidar to ego + in shape (3, 3). + e2g_t (np.ndarray): Translation from ego to global in shape (1, 3). + e2g_r_mat (np.ndarray): Rotation matrix from ego to global + in shape (3, 3). + sensor_type (str): Sensor to calibrate. Default: 'lidar'. + + Returns: + sweep (dict): Sweep information after transformation. + """ + sd_rec = nusc.get('sample_data', sensor_token) + cs_record = nusc.get('calibrated_sensor', + sd_rec['calibrated_sensor_token']) + pose_record = nusc.get('ego_pose', sd_rec['ego_pose_token']) + data_path = str(nusc.get_sample_data_path(sd_rec['token'])) + if os.getcwd() in data_path: # path from lyftdataset is absolute path + data_path = data_path.split(f'{os.getcwd()}/')[-1] # relative path + sweep = { + 'data_path': data_path, + 'type': sensor_type, + 'sample_data_token': sd_rec['token'], + 'sensor2ego_translation': cs_record['translation'], + 'sensor2ego_rotation': cs_record['rotation'], + 'ego2global_translation': pose_record['translation'], + 'ego2global_rotation': pose_record['rotation'], + 'timestamp': sd_rec['timestamp'] + } + + l2e_r_s = sweep['sensor2ego_rotation'] + l2e_t_s = sweep['sensor2ego_translation'] + e2g_r_s = sweep['ego2global_rotation'] + e2g_t_s = sweep['ego2global_translation'] + + # obtain the RT from sensor to Top LiDAR + # sweep->ego->global->ego'->lidar + l2e_r_s_mat = Quaternion(l2e_r_s).rotation_matrix + e2g_r_s_mat = Quaternion(e2g_r_s).rotation_matrix + R = (l2e_r_s_mat.T @ e2g_r_s_mat.T) @ ( + np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T) + T = (l2e_t_s @ e2g_r_s_mat.T + e2g_t_s) @ ( + np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T) + T -= e2g_t @ (np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T + ) + l2e_t @ np.linalg.inv(l2e_r_mat).T + sweep['sensor2lidar_rotation'] = R.T # points @ R.T + T + sweep['sensor2lidar_translation'] = T + return sweep + + +def export_2d_annotation(root_path, info_path, version, mono3d=True): + """Export 2d annotation from the info file and raw data. + + Args: + root_path (str): Root path of the raw data. + info_path (str): Path of the info file. + version (str): Dataset version. + mono3d (bool): Whether to export mono3d annotation. Default: True. + """ + # get bbox annotations for camera + camera_types = [ + 'CAM_FRONT', + 'CAM_FRONT_RIGHT', + 'CAM_FRONT_LEFT', + 'CAM_BACK', + 'CAM_BACK_LEFT', + 'CAM_BACK_RIGHT', + ] + nusc_infos = load(info_path)['infos'] + nusc = NuScenes(version=version, dataroot=root_path, verbose=True) + # info_2d_list = [] + cat2Ids = [ + dict(id=nus_categories.index(cat_name), name=cat_name) + for cat_name in nus_categories + ] + coco_ann_id = 0 + coco_2d_dict = dict(annotations=[], images=[], categories=cat2Ids) + for info in track_iter_progress(nusc_infos): + for cam in camera_types: + cam_info = info['cams'][cam] + coco_infos = get_2d_boxes( + nusc, + cam_info['sample_data_token'], + visibilities=['', '1', '2', '3', '4'], + mono3d=mono3d) + (height, width, _) = imread(cam_info['data_path']).shape + coco_2d_dict['images'].append( + dict( + file_name=cam_info['data_path'].split('data/nuscenes/') + [-1], + id=cam_info['sample_data_token'], + token=info['token'], + cam2ego_rotation=cam_info['sensor2ego_rotation'], + cam2ego_translation=cam_info['sensor2ego_translation'], + ego2global_rotation=info['ego2global_rotation'], + ego2global_translation=info['ego2global_translation'], + cam_intrinsic=cam_info['cam_intrinsic'], + width=width, + height=height)) + for coco_info in coco_infos: + if coco_info is None: + continue + # add an empty key for coco format + coco_info['segmentation'] = [] + coco_info['id'] = coco_ann_id + coco_2d_dict['annotations'].append(coco_info) + coco_ann_id += 1 + if mono3d: + json_prefix = f'{info_path[:-4]}_mono3d' + else: + json_prefix = f'{info_path[:-4]}' + dump(coco_2d_dict, f'{json_prefix}.coco.json') + + +def get_2d_boxes(nusc, + sample_data_token: str, + visibilities: List[str], + mono3d=True): + """Get the 2D annotation records for a given `sample_data_token`. + + Args: + sample_data_token (str): Sample data token belonging to a camera \ + keyframe. + visibilities (list[str]): Visibility filter. + mono3d (bool): Whether to get boxes with mono3d annotation. + + Return: + list[dict]: List of 2D annotation record that belongs to the input + `sample_data_token`. + """ + + # Get the sample data and the sample corresponding to that sample data. + sd_rec = nusc.get('sample_data', sample_data_token) + + assert sd_rec[ + 'sensor_modality'] == 'camera', 'Error: get_2d_boxes only works' \ + ' for camera sample_data!' + if not sd_rec['is_key_frame']: + raise ValueError( + 'The 2D re-projections are available only for keyframes.') + + s_rec = nusc.get('sample', sd_rec['sample_token']) + + # Get the calibrated sensor and ego pose + # record to get the transformation matrices. + cs_rec = nusc.get('calibrated_sensor', sd_rec['calibrated_sensor_token']) + pose_rec = nusc.get('ego_pose', sd_rec['ego_pose_token']) + camera_intrinsic = np.array(cs_rec['camera_intrinsic']) + + # Get all the annotation with the specified visibilties. + ann_recs = [ + nusc.get('sample_annotation', token) for token in s_rec['anns'] + ] + ann_recs = [ + ann_rec for ann_rec in ann_recs + if (ann_rec['visibility_token'] in visibilities) + ] + + repro_recs = [] + + for ann_rec in ann_recs: + # Augment sample_annotation with token information. + ann_rec['sample_annotation_token'] = ann_rec['token'] + ann_rec['sample_data_token'] = sample_data_token + + # Get the box in global coordinates. + box = nusc.get_box(ann_rec['token']) + + # Move them to the ego-pose frame. + box.translate(-np.array(pose_rec['translation'])) + box.rotate(Quaternion(pose_rec['rotation']).inverse) + + # Move them to the calibrated sensor frame. + box.translate(-np.array(cs_rec['translation'])) + box.rotate(Quaternion(cs_rec['rotation']).inverse) + + # Filter out the corners that are not in front of the calibrated + # sensor. + corners_3d = box.corners() + in_front = np.argwhere(corners_3d[2, :] > 0).flatten() + corners_3d = corners_3d[:, in_front] + + # Project 3d box to 2d. + corner_coords = view_points(corners_3d, camera_intrinsic, + True).T[:, :2].tolist() + + # Keep only corners that fall within the image. + final_coords = post_process_coords(corner_coords) + + # Skip if the convex hull of the re-projected corners + # does not intersect the image canvas. + if final_coords is None: + continue + else: + min_x, min_y, max_x, max_y = final_coords + + # Generate dictionary record to be included in the .json file. + repro_rec = generate_record(ann_rec, min_x, min_y, max_x, max_y, + sample_data_token, sd_rec['filename']) + + # If mono3d=True, add 3D annotations in camera coordinates + if mono3d and (repro_rec is not None): + loc = box.center.tolist() + + dim = box.wlh + dim[[0, 1, 2]] = dim[[1, 2, 0]] # convert wlh to our lhw + dim = dim.tolist() + + rot = box.orientation.yaw_pitch_roll[0] + rot = [-rot] # convert the rot to our cam coordinate + + global_velo2d = nusc.box_velocity(box.token)[:2] + global_velo3d = np.array([*global_velo2d, 0.0]) + e2g_r_mat = Quaternion(pose_rec['rotation']).rotation_matrix + c2e_r_mat = Quaternion(cs_rec['rotation']).rotation_matrix + cam_velo3d = global_velo3d @ np.linalg.inv( + e2g_r_mat).T @ np.linalg.inv(c2e_r_mat).T + velo = cam_velo3d[0::2].tolist() + + repro_rec['bbox_cam3d'] = loc + dim + rot + repro_rec['velo_cam3d'] = velo + + center3d = np.array(loc).reshape([1, 3]) + center2d = points_cam2img( + center3d, camera_intrinsic, with_depth=True) + repro_rec['center2d'] = center2d.squeeze().tolist() + # normalized center2D + depth + # if samples with depth < 0 will be removed + if repro_rec['center2d'][2] <= 0: + continue + + ann_token = nusc.get('sample_annotation', + box.token)['attribute_tokens'] + if len(ann_token) == 0: + attr_name = 'None' + else: + attr_name = nusc.get('attribute', ann_token[0])['name'] + attr_id = nus_attributes.index(attr_name) + repro_rec['attribute_name'] = attr_name + repro_rec['attribute_id'] = attr_id + + repro_recs.append(repro_rec) + + return repro_recs + + +def post_process_coords( + corner_coords: List, imsize: Tuple[int, int] = (1600, 900) +) -> Union[Tuple[float, float, float, float], None]: + """Get the intersection of the convex hull of the reprojected bbox corners + and the image canvas, return None if no intersection. + + Args: + corner_coords (list[int]): Corner coordinates of reprojected + bounding box. + imsize (tuple[int]): Size of the image canvas. + + Return: + tuple [float]: Intersection of the convex hull of the 2D box + corners and the image canvas. + """ + polygon_from_2d_box = MultiPoint(corner_coords).convex_hull + img_canvas = box(0, 0, imsize[0], imsize[1]) + + if polygon_from_2d_box.intersects(img_canvas): + img_intersection = polygon_from_2d_box.intersection(img_canvas) + intersection_coords = np.array( + [coord for coord in img_intersection.exterior.coords]) + + min_x = min(intersection_coords[:, 0]) + min_y = min(intersection_coords[:, 1]) + max_x = max(intersection_coords[:, 0]) + max_y = max(intersection_coords[:, 1]) + + return min_x, min_y, max_x, max_y + else: + return None + + +def generate_record(ann_rec: dict, x1: float, y1: float, x2: float, y2: float, + sample_data_token: str, filename: str) -> OrderedDict: + """Generate one 2D annotation record given various informations on top of + the 2D bounding box coordinates. + + Args: + ann_rec (dict): Original 3d annotation record. + x1 (float): Minimum value of the x coordinate. + y1 (float): Minimum value of the y coordinate. + x2 (float): Maximum value of the x coordinate. + y2 (float): Maximum value of the y coordinate. + sample_data_token (str): Sample data token. + filename (str):The corresponding image file where the annotation + is present. + + Returns: + dict: A sample 2D annotation record. + - file_name (str): flie name + - image_id (str): sample data token + - area (float): 2d box area + - category_name (str): category name + - category_id (int): category id + - bbox (list[float]): left x, top y, dx, dy of 2d box + - iscrowd (int): whether the area is crowd + """ + repro_rec = OrderedDict() + repro_rec['sample_data_token'] = sample_data_token + coco_rec = dict() + + relevant_keys = [ + 'attribute_tokens', + 'category_name', + 'instance_token', + 'next', + 'num_lidar_pts', + 'num_radar_pts', + 'prev', + 'sample_annotation_token', + 'sample_data_token', + 'visibility_token', + ] + + for key, value in ann_rec.items(): + if key in relevant_keys: + repro_rec[key] = value + + repro_rec['bbox_corners'] = [x1, y1, x2, y2] + repro_rec['filename'] = filename + + coco_rec['file_name'] = filename + coco_rec['image_id'] = sample_data_token + coco_rec['area'] = (y2 - y1) * (x2 - x1) + + if repro_rec['category_name'] not in NuScenesDataset.NameMapping: + return None + cat_name = NuScenesDataset.NameMapping[repro_rec['category_name']] + coco_rec['category_name'] = cat_name + coco_rec['category_id'] = nus_categories.index(cat_name) + coco_rec['bbox'] = [x1, y1, x2 - x1, y2 - y1] + coco_rec['iscrowd'] = 0 + + return coco_rec diff --git a/adzoo/bevformer/data_converter/s3dis_data_utils.py b/adzoo/bevformer/data_converter/s3dis_data_utils.py new file mode 100755 index 0000000..d2b6b77 --- /dev/null +++ b/adzoo/bevformer/data_converter/s3dis_data_utils.py @@ -0,0 +1,241 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmcv +import numpy as np +import os +from concurrent import futures as futures +from os import path as osp + + +class S3DISData(object): + """S3DIS data. + + Generate s3dis infos for s3dis_converter. + + Args: + root_path (str): Root path of the raw data. + split (str): Set split type of the data. Default: 'Area_1'. + """ + + def __init__(self, root_path, split='Area_1'): + self.root_dir = root_path + self.split = split + self.data_dir = osp.join(root_path, + 'Stanford3dDataset_v1.2_Aligned_Version') + + # Following `GSDN `_, use 5 furniture + # classes for detection: table, chair, sofa, bookcase, board. + self.cat_ids = np.array([7, 8, 9, 10, 11]) + self.cat_ids2class = { + cat_id: i + for i, cat_id in enumerate(list(self.cat_ids)) + } + + assert split in [ + 'Area_1', 'Area_2', 'Area_3', 'Area_4', 'Area_5', 'Area_6' + ] + self.sample_id_list = os.listdir(osp.join(self.data_dir, + split)) # conferenceRoom_1 + for sample_id in self.sample_id_list: + if os.path.isfile(osp.join(self.data_dir, split, sample_id)): + self.sample_id_list.remove(sample_id) + + def __len__(self): + return len(self.sample_id_list) + + def get_infos(self, num_workers=4, has_label=True, sample_id_list=None): + """Get data infos. + + This method gets information from the raw data. + + Args: + num_workers (int): Number of threads to be used. Default: 4. + has_label (bool): Whether the data has label. Default: True. + sample_id_list (list[int]): Index list of the sample. + Default: None. + + Returns: + infos (list[dict]): Information of the raw data. + """ + + def process_single_scene(sample_idx): + print(f'{self.split} sample_idx: {sample_idx}') + info = dict() + pc_info = { + 'num_features': 6, + 'lidar_idx': f'{self.split}_{sample_idx}' + } + info['point_cloud'] = pc_info + pts_filename = osp.join(self.root_dir, 's3dis_data', + f'{self.split}_{sample_idx}_point.npy') + pts_instance_mask_path = osp.join( + self.root_dir, 's3dis_data', + f'{self.split}_{sample_idx}_ins_label.npy') + pts_semantic_mask_path = osp.join( + self.root_dir, 's3dis_data', + f'{self.split}_{sample_idx}_sem_label.npy') + + points = np.load(pts_filename).astype(np.float32) + pts_instance_mask = np.load(pts_instance_mask_path).astype(np.int) + pts_semantic_mask = np.load(pts_semantic_mask_path).astype(np.int) + + mmcv.mkdir_or_exist(osp.join(self.root_dir, 'points')) + mmcv.mkdir_or_exist(osp.join(self.root_dir, 'instance_mask')) + mmcv.mkdir_or_exist(osp.join(self.root_dir, 'semantic_mask')) + + points.tofile( + osp.join(self.root_dir, 'points', + f'{self.split}_{sample_idx}.bin')) + pts_instance_mask.tofile( + osp.join(self.root_dir, 'instance_mask', + f'{self.split}_{sample_idx}.bin')) + pts_semantic_mask.tofile( + osp.join(self.root_dir, 'semantic_mask', + f'{self.split}_{sample_idx}.bin')) + + info['pts_path'] = osp.join('points', + f'{self.split}_{sample_idx}.bin') + info['pts_instance_mask_path'] = osp.join( + 'instance_mask', f'{self.split}_{sample_idx}.bin') + info['pts_semantic_mask_path'] = osp.join( + 'semantic_mask', f'{self.split}_{sample_idx}.bin') + info['annos'] = self.get_bboxes(points, pts_instance_mask, + pts_semantic_mask) + + return info + + sample_id_list = sample_id_list if sample_id_list is not None \ + else self.sample_id_list + with futures.ThreadPoolExecutor(num_workers) as executor: + infos = executor.map(process_single_scene, sample_id_list) + return list(infos) + + def get_bboxes(self, points, pts_instance_mask, pts_semantic_mask): + """Convert instance masks to axis-aligned bounding boxes. + + Args: + points (np.array): Scene points of shape (n, 6). + pts_instance_mask (np.ndarray): Instance labels of shape (n,). + pts_semantic_mask (np.ndarray): Semantic labels of shape (n,). + + Returns: + dict: A dict containing detection infos with following keys: + + - gt_boxes_upright_depth (np.ndarray): Bounding boxes + of shape (n, 6) + - class (np.ndarray): Box labels of shape (n,) + - gt_num (int): Number of boxes. + """ + bboxes, labels = [], [] + for i in range(1, pts_instance_mask.max()): + ids = pts_instance_mask == i + # check if all instance points have same semantic label + assert pts_semantic_mask[ids].min() == pts_semantic_mask[ids].max() + label = pts_semantic_mask[ids][0] + # keep only furniture objects + if label in self.cat_ids2class: + labels.append(self.cat_ids2class[pts_semantic_mask[ids][0]]) + pts = points[:, :3][ids] + min_pts = pts.min(axis=0) + max_pts = pts.max(axis=0) + locations = (min_pts + max_pts) / 2 + dimensions = max_pts - min_pts + bboxes.append(np.concatenate((locations, dimensions))) + annotation = dict() + # follow ScanNet and SUN RGB-D keys + annotation['gt_boxes_upright_depth'] = np.array(bboxes) + annotation['class'] = np.array(labels) + annotation['gt_num'] = len(labels) + return annotation + + +class S3DISSegData(object): + """S3DIS dataset used to generate infos for semantic segmentation task. + + Args: + data_root (str): Root path of the raw data. + ann_file (str): The generated scannet infos. + split (str): Set split type of the data. Default: 'train'. + num_points (int): Number of points in each data input. Default: 8192. + label_weight_func (function): Function to compute the label weight. + Default: None. + """ + + def __init__(self, + data_root, + ann_file, + split='Area_1', + num_points=4096, + label_weight_func=None): + self.data_root = data_root + self.data_infos = mmcv.load(ann_file) + self.split = split + self.num_points = num_points + + self.all_ids = np.arange(13) # all possible ids + self.cat_ids = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 12]) # used for seg task + self.ignore_index = len(self.cat_ids) + + self.cat_id2class = np.ones((self.all_ids.shape[0],), dtype=np.int) * \ + self.ignore_index + for i, cat_id in enumerate(self.cat_ids): + self.cat_id2class[cat_id] = i + + # label weighting function is taken from + # https://github.com/charlesq34/pointnet2/blob/master/scannet/scannet_dataset.py#L24 + self.label_weight_func = (lambda x: 1.0 / np.log(1.2 + x)) if \ + label_weight_func is None else label_weight_func + + def get_seg_infos(self): + scene_idxs, label_weight = self.get_scene_idxs_and_label_weight() + save_folder = osp.join(self.data_root, 'seg_info') + mmcv.mkdir_or_exist(save_folder) + np.save( + osp.join(save_folder, f'{self.split}_resampled_scene_idxs.npy'), + scene_idxs) + np.save( + osp.join(save_folder, f'{self.split}_label_weight.npy'), + label_weight) + print(f'{self.split} resampled scene index and label weight saved') + + def _convert_to_label(self, mask): + """Convert class_id in loaded segmentation mask to label.""" + if isinstance(mask, str): + if mask.endswith('npy'): + mask = np.load(mask) + else: + mask = np.fromfile(mask, dtype=np.long) + label = self.cat_id2class[mask] + return label + + def get_scene_idxs_and_label_weight(self): + """Compute scene_idxs for data sampling and label weight for loss \ + calculation. + + We sample more times for scenes with more points. Label_weight is + inversely proportional to number of class points. + """ + num_classes = len(self.cat_ids) + num_point_all = [] + label_weight = np.zeros((num_classes + 1, )) # ignore_index + for data_info in self.data_infos: + label = self._convert_to_label( + osp.join(self.data_root, data_info['pts_semantic_mask_path'])) + num_point_all.append(label.shape[0]) + class_count, _ = np.histogram(label, range(num_classes + 2)) + label_weight += class_count + + # repeat scene_idx for num_scene_point // num_sample_point times + sample_prob = np.array(num_point_all) / float(np.sum(num_point_all)) + num_iter = int(np.sum(num_point_all) / float(self.num_points)) + scene_idxs = [] + for idx in range(len(self.data_infos)): + scene_idxs.extend([idx] * int(round(sample_prob[idx] * num_iter))) + scene_idxs = np.array(scene_idxs).astype(np.int32) + + # calculate label weight, adopted from PointNet++ + label_weight = label_weight[:-1].astype(np.float32) + label_weight = label_weight / label_weight.sum() + label_weight = self.label_weight_func(label_weight).astype(np.float32) + + return scene_idxs, label_weight diff --git a/adzoo/bevformer/data_converter/scannet_data_utils.py b/adzoo/bevformer/data_converter/scannet_data_utils.py new file mode 100755 index 0000000..a437fe0 --- /dev/null +++ b/adzoo/bevformer/data_converter/scannet_data_utils.py @@ -0,0 +1,293 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmcv +import numpy as np +import os +from concurrent import futures as futures +from os import path as osp + + +class ScanNetData(object): + """ScanNet data. + + Generate scannet infos for scannet_converter. + + Args: + root_path (str): Root path of the raw data. + split (str): Set split type of the data. Default: 'train'. + """ + + def __init__(self, root_path, split='train'): + self.root_dir = root_path + self.split = split + self.split_dir = osp.join(root_path) + self.classes = [ + 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', + 'bookshelf', 'picture', 'counter', 'desk', 'curtain', + 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', + 'garbagebin' + ] + self.cat2label = {cat: self.classes.index(cat) for cat in self.classes} + self.label2cat = {self.cat2label[t]: t for t in self.cat2label} + self.cat_ids = np.array( + [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39]) + self.cat_ids2class = { + nyu40id: i + for i, nyu40id in enumerate(list(self.cat_ids)) + } + assert split in ['train', 'val', 'test'] + split_file = osp.join(self.root_dir, 'meta_data', + f'scannetv2_{split}.txt') + mmcv.check_file_exist(split_file) + self.sample_id_list = mmcv.list_from_file(split_file) + self.test_mode = (split == 'test') + + def __len__(self): + return len(self.sample_id_list) + + def get_aligned_box_label(self, idx): + box_file = osp.join(self.root_dir, 'scannet_instance_data', + f'{idx}_aligned_bbox.npy') + mmcv.check_file_exist(box_file) + return np.load(box_file) + + def get_unaligned_box_label(self, idx): + box_file = osp.join(self.root_dir, 'scannet_instance_data', + f'{idx}_unaligned_bbox.npy') + mmcv.check_file_exist(box_file) + return np.load(box_file) + + def get_axis_align_matrix(self, idx): + matrix_file = osp.join(self.root_dir, 'scannet_instance_data', + f'{idx}_axis_align_matrix.npy') + mmcv.check_file_exist(matrix_file) + return np.load(matrix_file) + + def get_images(self, idx): + paths = [] + path = osp.join(self.root_dir, 'posed_images', idx) + for file in sorted(os.listdir(path)): + if file.endswith('.jpg'): + paths.append(osp.join('posed_images', idx, file)) + return paths + + def get_extrinsics(self, idx): + extrinsics = [] + path = osp.join(self.root_dir, 'posed_images', idx) + for file in sorted(os.listdir(path)): + if file.endswith('.txt') and not file == 'intrinsic.txt': + extrinsics.append(np.loadtxt(osp.join(path, file))) + return extrinsics + + def get_intrinsics(self, idx): + matrix_file = osp.join(self.root_dir, 'posed_images', idx, + 'intrinsic.txt') + mmcv.check_file_exist(matrix_file) + return np.loadtxt(matrix_file) + + def get_infos(self, num_workers=4, has_label=True, sample_id_list=None): + """Get data infos. + + This method gets information from the raw data. + + Args: + num_workers (int): Number of threads to be used. Default: 4. + has_label (bool): Whether the data has label. Default: True. + sample_id_list (list[int]): Index list of the sample. + Default: None. + + Returns: + infos (list[dict]): Information of the raw data. + """ + + def process_single_scene(sample_idx): + print(f'{self.split} sample_idx: {sample_idx}') + info = dict() + pc_info = {'num_features': 6, 'lidar_idx': sample_idx} + info['point_cloud'] = pc_info + pts_filename = osp.join(self.root_dir, 'scannet_instance_data', + f'{sample_idx}_vert.npy') + points = np.load(pts_filename) + mmcv.mkdir_or_exist(osp.join(self.root_dir, 'points')) + points.tofile( + osp.join(self.root_dir, 'points', f'{sample_idx}.bin')) + info['pts_path'] = osp.join('points', f'{sample_idx}.bin') + + # update with RGB image paths if exist + if os.path.exists(osp.join(self.root_dir, 'posed_images')): + info['intrinsics'] = self.get_intrinsics(sample_idx) + all_extrinsics = self.get_extrinsics(sample_idx) + all_img_paths = self.get_images(sample_idx) + # some poses in ScanNet are invalid + extrinsics, img_paths = [], [] + for extrinsic, img_path in zip(all_extrinsics, all_img_paths): + if np.all(np.isfinite(extrinsic)): + img_paths.append(img_path) + extrinsics.append(extrinsic) + info['extrinsics'] = extrinsics + info['img_paths'] = img_paths + + if not self.test_mode: + pts_instance_mask_path = osp.join( + self.root_dir, 'scannet_instance_data', + f'{sample_idx}_ins_label.npy') + pts_semantic_mask_path = osp.join( + self.root_dir, 'scannet_instance_data', + f'{sample_idx}_sem_label.npy') + + pts_instance_mask = np.load(pts_instance_mask_path).astype( + np.long) + pts_semantic_mask = np.load(pts_semantic_mask_path).astype( + np.long) + + mmcv.mkdir_or_exist(osp.join(self.root_dir, 'instance_mask')) + mmcv.mkdir_or_exist(osp.join(self.root_dir, 'semantic_mask')) + + pts_instance_mask.tofile( + osp.join(self.root_dir, 'instance_mask', + f'{sample_idx}.bin')) + pts_semantic_mask.tofile( + osp.join(self.root_dir, 'semantic_mask', + f'{sample_idx}.bin')) + + info['pts_instance_mask_path'] = osp.join( + 'instance_mask', f'{sample_idx}.bin') + info['pts_semantic_mask_path'] = osp.join( + 'semantic_mask', f'{sample_idx}.bin') + + if has_label: + annotations = {} + # box is of shape [k, 6 + class] + aligned_box_label = self.get_aligned_box_label(sample_idx) + unaligned_box_label = self.get_unaligned_box_label(sample_idx) + annotations['gt_num'] = aligned_box_label.shape[0] + if annotations['gt_num'] != 0: + aligned_box = aligned_box_label[:, :-1] # k, 6 + unaligned_box = unaligned_box_label[:, :-1] + classes = aligned_box_label[:, -1] # k + annotations['name'] = np.array([ + self.label2cat[self.cat_ids2class[classes[i]]] + for i in range(annotations['gt_num']) + ]) + # default names are given to aligned bbox for compatibility + # we also save unaligned bbox info with marked names + annotations['location'] = aligned_box[:, :3] + annotations['dimensions'] = aligned_box[:, 3:6] + annotations['gt_boxes_upright_depth'] = aligned_box + annotations['unaligned_location'] = unaligned_box[:, :3] + annotations['unaligned_dimensions'] = unaligned_box[:, 3:6] + annotations[ + 'unaligned_gt_boxes_upright_depth'] = unaligned_box + annotations['index'] = np.arange( + annotations['gt_num'], dtype=np.int32) + annotations['class'] = np.array([ + self.cat_ids2class[classes[i]] + for i in range(annotations['gt_num']) + ]) + axis_align_matrix = self.get_axis_align_matrix(sample_idx) + annotations['axis_align_matrix'] = axis_align_matrix # 4x4 + info['annos'] = annotations + return info + + sample_id_list = sample_id_list if sample_id_list is not None \ + else self.sample_id_list + with futures.ThreadPoolExecutor(num_workers) as executor: + infos = executor.map(process_single_scene, sample_id_list) + return list(infos) + + +class ScanNetSegData(object): + """ScanNet dataset used to generate infos for semantic segmentation task. + + Args: + data_root (str): Root path of the raw data. + ann_file (str): The generated scannet infos. + split (str): Set split type of the data. Default: 'train'. + num_points (int): Number of points in each data input. Default: 8192. + label_weight_func (function): Function to compute the label weight. + Default: None. + """ + + def __init__(self, + data_root, + ann_file, + split='train', + num_points=8192, + label_weight_func=None): + self.data_root = data_root + self.data_infos = mmcv.load(ann_file) + self.split = split + assert split in ['train', 'val', 'test'] + self.num_points = num_points + + self.all_ids = np.arange(41) # all possible ids + self.cat_ids = np.array([ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, + 39 + ]) # used for seg task + self.ignore_index = len(self.cat_ids) + + self.cat_id2class = np.ones((self.all_ids.shape[0],), dtype=np.int) * \ + self.ignore_index + for i, cat_id in enumerate(self.cat_ids): + self.cat_id2class[cat_id] = i + + # label weighting function is taken from + # https://github.com/charlesq34/pointnet2/blob/master/scannet/scannet_dataset.py#L24 + self.label_weight_func = (lambda x: 1.0 / np.log(1.2 + x)) if \ + label_weight_func is None else label_weight_func + + def get_seg_infos(self): + if self.split == 'test': + return + scene_idxs, label_weight = self.get_scene_idxs_and_label_weight() + save_folder = osp.join(self.data_root, 'seg_info') + mmcv.mkdir_or_exist(save_folder) + np.save( + osp.join(save_folder, f'{self.split}_resampled_scene_idxs.npy'), + scene_idxs) + np.save( + osp.join(save_folder, f'{self.split}_label_weight.npy'), + label_weight) + print(f'{self.split} resampled scene index and label weight saved') + + def _convert_to_label(self, mask): + """Convert class_id in loaded segmentation mask to label.""" + if isinstance(mask, str): + if mask.endswith('npy'): + mask = np.load(mask) + else: + mask = np.fromfile(mask, dtype=np.long) + label = self.cat_id2class[mask] + return label + + def get_scene_idxs_and_label_weight(self): + """Compute scene_idxs for data sampling and label weight for loss \ + calculation. + + We sample more times for scenes with more points. Label_weight is + inversely proportional to number of class points. + """ + num_classes = len(self.cat_ids) + num_point_all = [] + label_weight = np.zeros((num_classes + 1, )) # ignore_index + for data_info in self.data_infos: + label = self._convert_to_label( + osp.join(self.data_root, data_info['pts_semantic_mask_path'])) + num_point_all.append(label.shape[0]) + class_count, _ = np.histogram(label, range(num_classes + 2)) + label_weight += class_count + + # repeat scene_idx for num_scene_point // num_sample_point times + sample_prob = np.array(num_point_all) / float(np.sum(num_point_all)) + num_iter = int(np.sum(num_point_all) / float(self.num_points)) + scene_idxs = [] + for idx in range(len(self.data_infos)): + scene_idxs.extend([idx] * int(round(sample_prob[idx] * num_iter))) + scene_idxs = np.array(scene_idxs).astype(np.int32) + + # calculate label weight, adopted from PointNet++ + label_weight = label_weight[:-1].astype(np.float32) + label_weight = label_weight / label_weight.sum() + label_weight = self.label_weight_func(label_weight).astype(np.float32) + + return scene_idxs, label_weight diff --git a/adzoo/bevformer/data_converter/sunrgbd_data_utils.py b/adzoo/bevformer/data_converter/sunrgbd_data_utils.py new file mode 100755 index 0000000..9f8a502 --- /dev/null +++ b/adzoo/bevformer/data_converter/sunrgbd_data_utils.py @@ -0,0 +1,221 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmcv +import numpy as np +from concurrent import futures as futures +from os import path as osp +from scipy import io as sio + + +def random_sampling(points, num_points, replace=None, return_choices=False): + """Random sampling. + + Sampling point cloud to a certain number of points. + + Args: + points (ndarray): Point cloud. + num_points (int): The number of samples. + replace (bool): Whether the sample is with or without replacement. + return_choices (bool): Whether to return choices. + + Returns: + points (ndarray): Point cloud after sampling. + """ + + if replace is None: + replace = (points.shape[0] < num_points) + choices = np.random.choice(points.shape[0], num_points, replace=replace) + if return_choices: + return points[choices], choices + else: + return points[choices] + + +class SUNRGBDInstance(object): + + def __init__(self, line): + data = line.split(' ') + data[1:] = [float(x) for x in data[1:]] + self.classname = data[0] + self.xmin = data[1] + self.ymin = data[2] + self.xmax = data[1] + data[3] + self.ymax = data[2] + data[4] + self.box2d = np.array([self.xmin, self.ymin, self.xmax, self.ymax]) + self.centroid = np.array([data[5], data[6], data[7]]) + self.w = data[8] + self.l = data[9] # noqa: E741 + self.h = data[10] + self.orientation = np.zeros((3, )) + self.orientation[0] = data[11] + self.orientation[1] = data[12] + self.heading_angle = -1 * np.arctan2(self.orientation[1], + self.orientation[0]) + self.box3d = np.concatenate([ + self.centroid, + np.array([self.l * 2, self.w * 2, self.h * 2, self.heading_angle]) + ]) + + +class SUNRGBDData(object): + """SUNRGBD data. + + Generate scannet infos for sunrgbd_converter. + + Args: + root_path (str): Root path of the raw data. + split (str): Set split type of the data. Default: 'train'. + use_v1 (bool): Whether to use v1. Default: False. + """ + + def __init__(self, root_path, split='train', use_v1=False): + self.root_dir = root_path + self.split = split + self.split_dir = osp.join(root_path, 'sunrgbd_trainval') + self.classes = [ + 'bed', 'table', 'sofa', 'chair', 'toilet', 'desk', 'dresser', + 'night_stand', 'bookshelf', 'bathtub' + ] + self.cat2label = {cat: self.classes.index(cat) for cat in self.classes} + self.label2cat = { + label: self.classes[label] + for label in range(len(self.classes)) + } + assert split in ['train', 'val', 'test'] + split_file = osp.join(self.split_dir, f'{split}_data_idx.txt') + mmcv.check_file_exist(split_file) + self.sample_id_list = map(int, mmcv.list_from_file(split_file)) + self.image_dir = osp.join(self.split_dir, 'image') + self.calib_dir = osp.join(self.split_dir, 'calib') + self.depth_dir = osp.join(self.split_dir, 'depth') + if use_v1: + self.label_dir = osp.join(self.split_dir, 'label_v1') + else: + self.label_dir = osp.join(self.split_dir, 'label') + + def __len__(self): + return len(self.sample_id_list) + + def get_image(self, idx): + img_filename = osp.join(self.image_dir, f'{idx:06d}.jpg') + return mmcv.imread(img_filename) + + def get_image_shape(self, idx): + image = self.get_image(idx) + return np.array(image.shape[:2], dtype=np.int32) + + def get_depth(self, idx): + depth_filename = osp.join(self.depth_dir, f'{idx:06d}.mat') + depth = sio.loadmat(depth_filename)['instance'] + return depth + + def get_calibration(self, idx): + calib_filepath = osp.join(self.calib_dir, f'{idx:06d}.txt') + lines = [line.rstrip() for line in open(calib_filepath)] + Rt = np.array([float(x) for x in lines[0].split(' ')]) + Rt = np.reshape(Rt, (3, 3), order='F').astype(np.float32) + K = np.array([float(x) for x in lines[1].split(' ')]) + K = np.reshape(K, (3, 3), order='F').astype(np.float32) + return K, Rt + + def get_label_objects(self, idx): + label_filename = osp.join(self.label_dir, f'{idx:06d}.txt') + lines = [line.rstrip() for line in open(label_filename)] + objects = [SUNRGBDInstance(line) for line in lines] + return objects + + def get_infos(self, num_workers=4, has_label=True, sample_id_list=None): + """Get data infos. + + This method gets information from the raw data. + + Args: + num_workers (int): Number of threads to be used. Default: 4. + has_label (bool): Whether the data has label. Default: True. + sample_id_list (list[int]): Index list of the sample. + Default: None. + + Returns: + infos (list[dict]): Information of the raw data. + """ + + def process_single_scene(sample_idx): + print(f'{self.split} sample_idx: {sample_idx}') + # convert depth to points + SAMPLE_NUM = 50000 + # TODO: Check whether can move the point + # sampling process during training. + pc_upright_depth = self.get_depth(sample_idx) + pc_upright_depth_subsampled = random_sampling( + pc_upright_depth, SAMPLE_NUM) + + info = dict() + pc_info = {'num_features': 6, 'lidar_idx': sample_idx} + info['point_cloud'] = pc_info + + mmcv.mkdir_or_exist(osp.join(self.root_dir, 'points')) + pc_upright_depth_subsampled.tofile( + osp.join(self.root_dir, 'points', f'{sample_idx:06d}.bin')) + + info['pts_path'] = osp.join('points', f'{sample_idx:06d}.bin') + img_path = osp.join('image', f'{sample_idx:06d}.jpg') + image_info = { + 'image_idx': sample_idx, + 'image_shape': self.get_image_shape(sample_idx), + 'image_path': img_path + } + info['image'] = image_info + + K, Rt = self.get_calibration(sample_idx) + calib_info = {'K': K, 'Rt': Rt} + info['calib'] = calib_info + + if has_label: + obj_list = self.get_label_objects(sample_idx) + annotations = {} + annotations['gt_num'] = len([ + obj.classname for obj in obj_list + if obj.classname in self.cat2label.keys() + ]) + if annotations['gt_num'] != 0: + annotations['name'] = np.array([ + obj.classname for obj in obj_list + if obj.classname in self.cat2label.keys() + ]) + annotations['bbox'] = np.concatenate([ + obj.box2d.reshape(1, 4) for obj in obj_list + if obj.classname in self.cat2label.keys() + ], + axis=0) + annotations['location'] = np.concatenate([ + obj.centroid.reshape(1, 3) for obj in obj_list + if obj.classname in self.cat2label.keys() + ], + axis=0) + annotations['dimensions'] = 2 * np.array([ + [obj.l, obj.w, obj.h] for obj in obj_list + if obj.classname in self.cat2label.keys() + ]) # lwh (depth) format + annotations['rotation_y'] = np.array([ + obj.heading_angle for obj in obj_list + if obj.classname in self.cat2label.keys() + ]) + annotations['index'] = np.arange( + len(obj_list), dtype=np.int32) + annotations['class'] = np.array([ + self.cat2label[obj.classname] for obj in obj_list + if obj.classname in self.cat2label.keys() + ]) + annotations['gt_boxes_upright_depth'] = np.stack( + [ + obj.box3d for obj in obj_list + if obj.classname in self.cat2label.keys() + ], + axis=0) # (K,8) + info['annos'] = annotations + return info + + sample_id_list = sample_id_list if \ + sample_id_list is not None else self.sample_id_list + with futures.ThreadPoolExecutor(num_workers) as executor: + infos = executor.map(process_single_scene, sample_id_list) + return list(infos) diff --git a/adzoo/bevformer/data_converter/waymo_converter.py b/adzoo/bevformer/data_converter/waymo_converter.py new file mode 100755 index 0000000..94fcae1 --- /dev/null +++ b/adzoo/bevformer/data_converter/waymo_converter.py @@ -0,0 +1,519 @@ +# Copyright (c) OpenMMLab. All rights reserved. +r"""Adapted from `Waymo to KITTI converter + `_. +""" + +try: + from waymo_open_dataset import dataset_pb2 +except ImportError: + raise ImportError( + 'Please run "pip install waymo-open-dataset-tf-2-2-0==1.2.0" ' + 'to install the official devkit first.') + +import mmcv +import numpy as np +import tensorflow as tf +from glob import glob +from os.path import join +from waymo_open_dataset.utils import range_image_utils, transform_utils +from waymo_open_dataset.utils.frame_utils import \ + parse_range_image_and_camera_projection + + +class Waymo2KITTI(object): + """Waymo to KITTI converter. + + This class serves as the converter to change the waymo raw data to KITTI + format. + + Args: + load_dir (str): Directory to load waymo raw data. + save_dir (str): Directory to save data in KITTI format. + prefix (str): Prefix of filename. In general, 0 for training, 1 for + validation and 2 for testing. + workers (str): Number of workers for the parallel process. + test_mode (bool): Whether in the test_mode. Default: False. + """ + + def __init__(self, + load_dir, + save_dir, + prefix, + workers=64, + test_mode=False): + self.filter_empty_3dboxes = True + self.filter_no_label_zone_points = True + + self.selected_waymo_classes = ['VEHICLE', 'PEDESTRIAN', 'CYCLIST'] + + # Only data collected in specific locations will be converted + # If set None, this filter is disabled + # Available options: location_sf (main dataset) + self.selected_waymo_locations = None + self.save_track_id = False + + # turn on eager execution for older tensorflow versions + if int(tf.__version__.split('.')[0]) < 2: + tf.enable_eager_execution() + + self.lidar_list = [ + '_FRONT', '_FRONT_RIGHT', '_FRONT_LEFT', '_SIDE_RIGHT', + '_SIDE_LEFT' + ] + self.type_list = [ + 'UNKNOWN', 'VEHICLE', 'PEDESTRIAN', 'SIGN', 'CYCLIST' + ] + self.waymo_to_kitti_class_map = { + 'UNKNOWN': 'DontCare', + 'PEDESTRIAN': 'Pedestrian', + 'VEHICLE': 'Car', + 'CYCLIST': 'Cyclist', + 'SIGN': 'Sign' # not in kitti + } + + self.load_dir = load_dir + self.save_dir = save_dir + self.prefix = prefix + self.workers = int(workers) + self.test_mode = test_mode + + self.tfrecord_pathnames = sorted( + glob(join(self.load_dir, '*.tfrecord'))) + + self.label_save_dir = f'{self.save_dir}/label_' + self.label_all_save_dir = f'{self.save_dir}/label_all' + self.image_save_dir = f'{self.save_dir}/image_' + self.calib_save_dir = f'{self.save_dir}/calib' + self.point_cloud_save_dir = f'{self.save_dir}/velodyne' + self.pose_save_dir = f'{self.save_dir}/pose' + + self.create_folder() + + def convert(self): + """Convert action.""" + print('Start converting ...') + mmcv.track_parallel_progress(self.convert_one, range(len(self)), + self.workers) + print('\nFinished ...') + + def convert_one(self, file_idx): + """Convert action for single file. + + Args: + file_idx (int): Index of the file to be converted. + """ + pathname = self.tfrecord_pathnames[file_idx] + dataset = tf.data.TFRecordDataset(pathname, compression_type='') + + for frame_idx, data in enumerate(dataset): + + if frame_idx % 5 != 0: + continue + # print(frame_idx) + frame = dataset_pb2.Frame() + frame.ParseFromString(bytearray(data.numpy())) + if (self.selected_waymo_locations is not None + and frame.context.stats.location + not in self.selected_waymo_locations): + continue + + self.save_image(frame, file_idx, frame_idx) + self.save_calib(frame, file_idx, frame_idx) + self.save_lidar(frame, file_idx, frame_idx) + self.save_pose(frame, file_idx, frame_idx) + + if not self.test_mode: + self.save_label(frame, file_idx, frame_idx) + + def __len__(self): + """Length of the filename list.""" + return len(self.tfrecord_pathnames) + + def save_image(self, frame, file_idx, frame_idx): + """Parse and save the images in png format. + + Args: + frame (:obj:`Frame`): Open dataset frame proto. + file_idx (int): Current file index. + frame_idx (int): Current frame index. + """ + for img in frame.images: + img_path = f'{self.image_save_dir}{str(img.name - 1)}/' + \ + f'{self.prefix}{str(file_idx).zfill(3)}' + \ + f'{str(frame_idx).zfill(3)}.png' + img = mmcv.imfrombytes(img.image) + mmcv.imwrite(img, img_path) + + def save_calib(self, frame, file_idx, frame_idx): + """Parse and save the calibration data. + + Args: + frame (:obj:`Frame`): Open dataset frame proto. + file_idx (int): Current file index. + frame_idx (int): Current frame index. + """ + # waymo front camera to kitti reference camera + T_front_cam_to_ref = np.array([[0.0, -1.0, 0.0], [0.0, 0.0, -1.0], + [1.0, 0.0, 0.0]]) + camera_calibs = [] + R0_rect = [f'{i:e}' for i in np.eye(3).flatten()] + Tr_velo_to_cams = [] + calib_context = '' + + for camera in frame.context.camera_calibrations: + # extrinsic parameters + T_cam_to_vehicle = np.array(camera.extrinsic.transform).reshape( + 4, 4) + T_vehicle_to_cam = np.linalg.inv(T_cam_to_vehicle) + Tr_velo_to_cam = \ + self.cart_to_homo(T_front_cam_to_ref) @ T_vehicle_to_cam + if camera.name == 1: # FRONT = 1, see dataset.proto for details + self.T_velo_to_front_cam = Tr_velo_to_cam.copy() + Tr_velo_to_cam = Tr_velo_to_cam[:3, :].reshape((12, )) + Tr_velo_to_cams.append([f'{i:e}' for i in Tr_velo_to_cam]) + + # intrinsic parameters + camera_calib = np.zeros((3, 4)) + camera_calib[0, 0] = camera.intrinsic[0] + camera_calib[1, 1] = camera.intrinsic[1] + camera_calib[0, 2] = camera.intrinsic[2] + camera_calib[1, 2] = camera.intrinsic[3] + camera_calib[2, 2] = 1 + camera_calib = list(camera_calib.reshape(12)) + camera_calib = [f'{i:e}' for i in camera_calib] + camera_calibs.append(camera_calib) + + # all camera ids are saved as id-1 in the result because + # camera 0 is unknown in the proto + for i in range(5): + calib_context += 'P' + str(i) + ': ' + \ + ' '.join(camera_calibs[i]) + '\n' + calib_context += 'R0_rect' + ': ' + ' '.join(R0_rect) + '\n' + for i in range(5): + calib_context += 'Tr_velo_to_cam_' + str(i) + ': ' + \ + ' '.join(Tr_velo_to_cams[i]) + '\n' + + with open( + f'{self.calib_save_dir}/{self.prefix}' + + f'{str(file_idx).zfill(3)}{str(frame_idx).zfill(3)}.txt', + 'w+') as fp_calib: + fp_calib.write(calib_context) + fp_calib.close() + + def save_lidar(self, frame, file_idx, frame_idx): + """Parse and save the lidar data in psd format. + + Args: + frame (:obj:`Frame`): Open dataset frame proto. + file_idx (int): Current file index. + frame_idx (int): Current frame index. + """ + range_images, camera_projections, range_image_top_pose = \ + parse_range_image_and_camera_projection(frame) + + # First return + points_0, cp_points_0, intensity_0, elongation_0 = \ + self.convert_range_image_to_point_cloud( + frame, + range_images, + camera_projections, + range_image_top_pose, + ri_index=0 + ) + points_0 = np.concatenate(points_0, axis=0) + intensity_0 = np.concatenate(intensity_0, axis=0) + elongation_0 = np.concatenate(elongation_0, axis=0) + + # Second return + points_1, cp_points_1, intensity_1, elongation_1 = \ + self.convert_range_image_to_point_cloud( + frame, + range_images, + camera_projections, + range_image_top_pose, + ri_index=1 + ) + points_1 = np.concatenate(points_1, axis=0) + intensity_1 = np.concatenate(intensity_1, axis=0) + elongation_1 = np.concatenate(elongation_1, axis=0) + + points = np.concatenate([points_0, points_1], axis=0) + intensity = np.concatenate([intensity_0, intensity_1], axis=0) + elongation = np.concatenate([elongation_0, elongation_1], axis=0) + timestamp = frame.timestamp_micros * np.ones_like(intensity) + + # concatenate x,y,z, intensity, elongation, timestamp (6-dim) + point_cloud = np.column_stack( + (points, intensity, elongation, timestamp)) + + pc_path = f'{self.point_cloud_save_dir}/{self.prefix}' + \ + f'{str(file_idx).zfill(3)}{str(frame_idx).zfill(3)}.bin' + point_cloud.astype(np.float32).tofile(pc_path) + + def save_label(self, frame, file_idx, frame_idx): + """Parse and save the label data in txt format. + The relation between waymo and kitti coordinates is noteworthy: + 1. x, y, z correspond to l, w, h (waymo) -> l, h, w (kitti) + 2. x-y-z: front-left-up (waymo) -> right-down-front(kitti) + 3. bbox origin at volumetric center (waymo) -> bottom center (kitti) + 4. rotation: +x around y-axis (kitti) -> +x around z-axis (waymo) + + Args: + frame (:obj:`Frame`): Open dataset frame proto. + file_idx (int): Current file index. + frame_idx (int): Current frame index. + """ + fp_label_all = open( + f'{self.label_all_save_dir}/{self.prefix}' + + f'{str(file_idx).zfill(3)}{str(frame_idx).zfill(3)}.txt', 'w+') + id_to_bbox = dict() + id_to_name = dict() + for labels in frame.projected_lidar_labels: + name = labels.name + for label in labels.labels: + # TODO: need a workaround as bbox may not belong to front cam + bbox = [ + label.box.center_x - label.box.length / 2, + label.box.center_y - label.box.width / 2, + label.box.center_x + label.box.length / 2, + label.box.center_y + label.box.width / 2 + ] + id_to_bbox[label.id] = bbox + id_to_name[label.id] = name - 1 + + for obj in frame.laser_labels: + bounding_box = None + name = None + id = obj.id + for lidar in self.lidar_list: + if id + lidar in id_to_bbox: + bounding_box = id_to_bbox.get(id + lidar) + name = str(id_to_name.get(id + lidar)) + break + + if bounding_box is None or name is None: + name = '0' + bounding_box = (0, 0, 0, 0) + + my_type = self.type_list[obj.type] + + if my_type not in self.selected_waymo_classes: + continue + + if self.filter_empty_3dboxes and obj.num_lidar_points_in_box < 1: + continue + + my_type = self.waymo_to_kitti_class_map[my_type] + + height = obj.box.height + width = obj.box.width + length = obj.box.length + + x = obj.box.center_x + y = obj.box.center_y + z = obj.box.center_z - height / 2 + + # project bounding box to the virtual reference frame + pt_ref = self.T_velo_to_front_cam @ \ + np.array([x, y, z, 1]).reshape((4, 1)) + x, y, z, _ = pt_ref.flatten().tolist() + + rotation_y = -obj.box.heading - np.pi / 2 + track_id = obj.id + + # not available + truncated = 0 + occluded = 0 + alpha = -10 + + line = my_type + \ + ' {} {} {} {} {} {} {} {} {} {} {} {} {} {}\n'.format( + round(truncated, 2), occluded, round(alpha, 2), + round(bounding_box[0], 2), round(bounding_box[1], 2), + round(bounding_box[2], 2), round(bounding_box[3], 2), + round(height, 2), round(width, 2), round(length, 2), + round(x, 2), round(y, 2), round(z, 2), + round(rotation_y, 2)) + + if self.save_track_id: + line_all = line[:-1] + ' ' + name + ' ' + track_id + '\n' + else: + line_all = line[:-1] + ' ' + name + '\n' + + fp_label = open( + f'{self.label_save_dir}{name}/{self.prefix}' + + f'{str(file_idx).zfill(3)}{str(frame_idx).zfill(3)}.txt', 'a') + fp_label.write(line) + fp_label.close() + + fp_label_all.write(line_all) + + fp_label_all.close() + + def save_pose(self, frame, file_idx, frame_idx): + """Parse and save the pose data. + + Note that SDC's own pose is not included in the regular training + of KITTI dataset. KITTI raw dataset contains ego motion files + but are not often used. Pose is important for algorithms that + take advantage of the temporal information. + + Args: + frame (:obj:`Frame`): Open dataset frame proto. + file_idx (int): Current file index. + frame_idx (int): Current frame index. + """ + pose = np.array(frame.pose.transform).reshape(4, 4) + np.savetxt( + join(f'{self.pose_save_dir}/{self.prefix}' + + f'{str(file_idx).zfill(3)}{str(frame_idx).zfill(3)}.txt'), + pose) + + def create_folder(self): + """Create folder for data preprocessing.""" + if not self.test_mode: + dir_list1 = [ + self.label_all_save_dir, self.calib_save_dir, + self.point_cloud_save_dir, self.pose_save_dir + ] + dir_list2 = [self.label_save_dir, self.image_save_dir] + else: + dir_list1 = [ + self.calib_save_dir, self.point_cloud_save_dir, + self.pose_save_dir + ] + dir_list2 = [self.image_save_dir] + for d in dir_list1: + mmcv.mkdir_or_exist(d) + for d in dir_list2: + for i in range(5): + mmcv.mkdir_or_exist(f'{d}{str(i)}') + + def convert_range_image_to_point_cloud(self, + frame, + range_images, + camera_projections, + range_image_top_pose, + ri_index=0): + """Convert range images to point cloud. + + Args: + frame (:obj:`Frame`): Open dataset frame. + range_images (dict): Mapping from laser_name to list of two + range images corresponding with two returns. + camera_projections (dict): Mapping from laser_name to list of two + camera projections corresponding with two returns. + range_image_top_pose (:obj:`Transform`): Range image pixel pose for + top lidar. + ri_index (int): 0 for the first return, 1 for the second return. + Default: 0. + + Returns: + tuple[list[np.ndarray]]: (List of points with shape [N, 3], + camera projections of points with shape [N, 6], intensity + with shape [N, 1], elongation with shape [N, 1]). All the + lists have the length of lidar numbers (5). + """ + calibrations = sorted( + frame.context.laser_calibrations, key=lambda c: c.name) + points = [] + cp_points = [] + intensity = [] + elongation = [] + + frame_pose = tf.convert_to_tensor( + value=np.reshape(np.array(frame.pose.transform), [4, 4])) + # [H, W, 6] + range_image_top_pose_tensor = tf.reshape( + tf.convert_to_tensor(value=range_image_top_pose.data), + range_image_top_pose.shape.dims) + # [H, W, 3, 3] + range_image_top_pose_tensor_rotation = \ + transform_utils.get_rotation_matrix( + range_image_top_pose_tensor[..., 0], + range_image_top_pose_tensor[..., 1], + range_image_top_pose_tensor[..., 2]) + range_image_top_pose_tensor_translation = \ + range_image_top_pose_tensor[..., 3:] + range_image_top_pose_tensor = transform_utils.get_transform( + range_image_top_pose_tensor_rotation, + range_image_top_pose_tensor_translation) + for c in calibrations: + range_image = range_images[c.name][ri_index] + if len(c.beam_inclinations) == 0: + beam_inclinations = range_image_utils.compute_inclination( + tf.constant( + [c.beam_inclination_min, c.beam_inclination_max]), + height=range_image.shape.dims[0]) + else: + beam_inclinations = tf.constant(c.beam_inclinations) + + beam_inclinations = tf.reverse(beam_inclinations, axis=[-1]) + extrinsic = np.reshape(np.array(c.extrinsic.transform), [4, 4]) + + range_image_tensor = tf.reshape( + tf.convert_to_tensor(value=range_image.data), + range_image.shape.dims) + pixel_pose_local = None + frame_pose_local = None + if c.name == dataset_pb2.LaserName.TOP: + pixel_pose_local = range_image_top_pose_tensor + pixel_pose_local = tf.expand_dims(pixel_pose_local, axis=0) + frame_pose_local = tf.expand_dims(frame_pose, axis=0) + range_image_mask = range_image_tensor[..., 0] > 0 + + if self.filter_no_label_zone_points: + nlz_mask = range_image_tensor[..., 3] != 1.0 # 1.0: in NLZ + range_image_mask = range_image_mask & nlz_mask + + range_image_cartesian = \ + range_image_utils.extract_point_cloud_from_range_image( + tf.expand_dims(range_image_tensor[..., 0], axis=0), + tf.expand_dims(extrinsic, axis=0), + tf.expand_dims(tf.convert_to_tensor( + value=beam_inclinations), axis=0), + pixel_pose=pixel_pose_local, + frame_pose=frame_pose_local) + + range_image_cartesian = tf.squeeze(range_image_cartesian, axis=0) + points_tensor = tf.gather_nd(range_image_cartesian, + tf.compat.v1.where(range_image_mask)) + + cp = camera_projections[c.name][ri_index] + cp_tensor = tf.reshape( + tf.convert_to_tensor(value=cp.data), cp.shape.dims) + cp_points_tensor = tf.gather_nd( + cp_tensor, tf.compat.v1.where(range_image_mask)) + points.append(points_tensor.numpy()) + cp_points.append(cp_points_tensor.numpy()) + + intensity_tensor = tf.gather_nd(range_image_tensor[..., 1], + tf.where(range_image_mask)) + intensity.append(intensity_tensor.numpy()) + + elongation_tensor = tf.gather_nd(range_image_tensor[..., 2], + tf.where(range_image_mask)) + elongation.append(elongation_tensor.numpy()) + + return points, cp_points, intensity, elongation + + def cart_to_homo(self, mat): + """Convert transformation matrix in Cartesian coordinates to + homogeneous format. + + Args: + mat (np.ndarray): Transformation matrix in Cartesian. + The input matrix shape is 3x3 or 3x4. + + Returns: + np.ndarray: Transformation matrix in homogeneous format. + The matrix shape is 4x4. + """ + ret = np.eye(4) + if mat.shape == (3, 3): + ret[:3, :3] = mat + elif mat.shape == (3, 4): + ret[:3, :] = mat + else: + raise ValueError(mat.shape) + return ret diff --git a/adzoo/bevformer/dist_test.sh b/adzoo/bevformer/dist_test.sh new file mode 100755 index 0000000..8b19a04 --- /dev/null +++ b/adzoo/bevformer/dist_test.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +CONFIG=$1 +CHECKPOINT=$2 +GPUS=$3 +PORT=${PORT:-29203} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \ + $(dirname "$0")/test.py $CONFIG $CHECKPOINT --launcher pytorch ${@:4} --eval bbox diff --git a/adzoo/bevformer/dist_train.sh b/adzoo/bevformer/dist_train.sh new file mode 100755 index 0000000..84d7fd7 --- /dev/null +++ b/adzoo/bevformer/dist_train.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +CONFIG=$1 +GPUS=$2 +PORT=${PORT:-38912} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \ + $(dirname "$0")/train.py $CONFIG --launcher pytorch ${@:3} diff --git a/adzoo/bevformer/fp16/dist_train.sh b/adzoo/bevformer/fp16/dist_train.sh new file mode 100755 index 0000000..4ac9a15 --- /dev/null +++ b/adzoo/bevformer/fp16/dist_train.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +CONFIG=$1 +GPUS=$2 +PORT=${PORT:-28508} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \ + $(dirname "$0")/train.py $CONFIG --launcher pytorch ${@:3} --deterministic diff --git a/adzoo/bevformer/fp16/train.py b/adzoo/bevformer/fp16/train.py new file mode 100644 index 0000000..eddc349 --- /dev/null +++ b/adzoo/bevformer/fp16/train.py @@ -0,0 +1,271 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from __future__ import division + +import argparse +import copy +import mmcv +import os +import time +import torch +import warnings +from mmcv import Config, DictAction +from mmcv.runner import get_dist_info, init_dist, wrap_fp16_model +from os import path as osp + +from mmdet import __version__ as mmdet_version +from mmdet3d import __version__ as mmdet3d_version +#from mmdet3d.apis import train_model + +from mmdet3d.datasets import build_dataset +from mmdet3d.models import build_model +from mmdet3d.utils import collect_env, get_root_logger +from mmdet.apis import set_random_seed +from mmseg import __version__ as mmseg_version + +from mmcv.utils import TORCH_VERSION, digit_version + +def parse_args(): + parser = argparse.ArgumentParser(description='Train a detector') + parser.add_argument('config', help='train config file path') + parser.add_argument('--work-dir', help='the dir to save logs and models') + parser.add_argument( + '--resume-from', help='the checkpoint file to resume from') + parser.add_argument( + '--no-validate', + action='store_true', + help='whether not to evaluate the checkpoint during training') + group_gpus = parser.add_mutually_exclusive_group() + group_gpus.add_argument( + '--gpus', + type=int, + help='number of gpus to use ' + '(only applicable to non-distributed training)') + group_gpus.add_argument( + '--gpu-ids', + type=int, + nargs='+', + help='ids of gpus to use ' + '(only applicable to non-distributed training)') + parser.add_argument('--seed', type=int, default=0, help='random seed') + parser.add_argument( + '--deterministic', + action='store_true', + help='whether to set deterministic options for CUDNN backend.') + parser.add_argument( + '--options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file (deprecate), ' + 'change to --cfg-options instead.') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + parser.add_argument( + '--launcher', + choices=['none', 'pytorch', 'slurm', 'mpi'], + default='none', + help='job launcher') + parser.add_argument('--local_rank', type=int, default=0) + parser.add_argument( + '--autoscale-lr', + action='store_true', + help='automatically scale lr with the number of gpus') + args = parser.parse_args() + if 'LOCAL_RANK' not in os.environ: + os.environ['LOCAL_RANK'] = str(args.local_rank) + + if args.options and args.cfg_options: + raise ValueError( + '--options and --cfg-options cannot be both specified, ' + '--options is deprecated in favor of --cfg-options') + if args.options: + warnings.warn('--options is deprecated in favor of --cfg-options') + args.cfg_options = args.options + + return args + + +def main(): + args = parse_args() + + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + # import modules from string list. + if cfg.get('custom_imports', None): + from mmcv.utils import import_modules_from_strings + import_modules_from_strings(**cfg['custom_imports']) + + # import modules from plguin/xx, registry will be updated + if hasattr(cfg, 'plugin'): + if cfg.plugin: + import importlib + if hasattr(cfg, 'plugin_dir'): + plugin_dir = cfg.plugin_dir + _module_dir = os.path.dirname(plugin_dir) + _module_dir = _module_dir.split('/') + _module_path = _module_dir[0] + + for m in _module_dir[1:]: + _module_path = _module_path + '.' + m + print(_module_path) + plg_lib = importlib.import_module(_module_path) + else: + # import dir is the dirpath for the config file + _module_dir = os.path.dirname(args.config) + _module_dir = _module_dir.split('/') + _module_path = _module_dir[0] + for m in _module_dir[1:]: + _module_path = _module_path + '.' + m + print(_module_path) + plg_lib = importlib.import_module(_module_path) + + from projects.mmdet3d_plugin.bevformer.apis import custom_train_model + # set cudnn_benchmark + if cfg.get('cudnn_benchmark', False): + torch.backends.cudnn.benchmark = True + + # work_dir is determined in this priority: CLI > segment in file > filename + if args.work_dir is not None: + # update configs according to CLI args if args.work_dir is not None + cfg.work_dir = args.work_dir + elif cfg.get('work_dir', None) is None: + # use config filename as default work_dir if cfg.work_dir is None + cfg.work_dir = osp.join('./work_dirs', + osp.splitext(osp.basename(args.config))[0]) + #if args.resume_from is not None: + + if args.resume_from is not None and osp.isfile(args.resume_from): + cfg.resume_from = args.resume_from + + if args.gpu_ids is not None: + cfg.gpu_ids = args.gpu_ids + else: + cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus) + if digit_version(TORCH_VERSION) != digit_version('1.8.1'): + cfg.optimizer['type'] = 'AdamW' + if args.autoscale_lr: + # apply the linear scaling rule (https://arxiv.org/abs/1706.02677) + cfg.optimizer['lr'] = cfg.optimizer['lr'] * len(cfg.gpu_ids) / 8 + + # init distributed env first, since logger depends on the dist info. + if args.launcher == 'none': + assert False, 'DOT NOT SUPPORT!!!' + distributed = False + else: + distributed = True + init_dist(args.launcher, **cfg.dist_params) + # re-set gpu_ids with distributed training mode + _, world_size = get_dist_info() + cfg.gpu_ids = range(world_size) + + # create work_dir + mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir)) + # dump config + cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config))) + # init the logger before other steps + timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) + log_file = osp.join(cfg.work_dir, f'{timestamp}.log') + # specify logger name, if we still use 'mmdet', the output info will be + # filtered and won't be saved in the log_file + # TODO: ugly workaround to judge whether we are training det or seg model + if cfg.model.type in ['EncoderDecoder3D']: + logger_name = 'mmseg' + else: + logger_name = 'mmdet' + logger = get_root_logger( + log_file=log_file, log_level=cfg.log_level, name=logger_name) + + # init the meta dict to record some important information such as + # environment info and seed, which will be logged + meta = dict() + # log env info + env_info_dict = collect_env() + env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()]) + dash_line = '-' * 60 + '\n' + logger.info('Environment info:\n' + dash_line + env_info + '\n' + + dash_line) + meta['env_info'] = env_info + meta['config'] = cfg.pretty_text + + # log some basic info + logger.info(f'Distributed training: {distributed}') + logger.info(f'Config:\n{cfg.pretty_text}') + + # set random seeds + if args.seed is not None: + logger.info(f'Set random seed to {args.seed}, ' + f'deterministic: {args.deterministic}') + set_random_seed(args.seed, deterministic=args.deterministic) + cfg.seed = args.seed + meta['seed'] = args.seed + meta['exp_name'] = osp.basename(args.config) + + model = build_model( + cfg.model, + train_cfg=cfg.get('train_cfg'), + test_cfg=cfg.get('test_cfg')) + model.init_weights() + + eval_model_config = copy.deepcopy(cfg.model) + eval_model = build_model( + eval_model_config, + train_cfg=cfg.get('train_cfg'), + test_cfg=cfg.get('test_cfg')) + + fp16_cfg = cfg.get('fp16', None) + if fp16_cfg is not None: + wrap_fp16_model(eval_model) + + #eval_model.init_weights() + eval_model.load_state_dict(model.state_dict()) + + logger.info(f'Model:\n{model}') + from projects.mmdet3d_plugin.datasets import custom_build_dataset + datasets = [custom_build_dataset(cfg.data.train)] + if len(cfg.workflow) == 2: + val_dataset = copy.deepcopy(cfg.data.val) + # in case we use a dataset wrapper + if 'dataset' in cfg.data.train: + val_dataset.pipeline = cfg.data.train.dataset.pipeline + else: + val_dataset.pipeline = cfg.data.train.pipeline + # set test_mode=False here in deep copied config + # which do not affect AP/AR calculation later + # refer to https://mmdetection3d.readthedocs.io/en/latest/tutorials/customize_runtime.html#customize-workflow # noqa + val_dataset.test_mode = False + datasets.append(custom_build_dataset(val_dataset)) + if cfg.checkpoint_config is not None: + # save mmdet version, config file content and class names in + # checkpoints as meta data + cfg.checkpoint_config.meta = dict( + mmdet_version=mmdet_version, + mmseg_version=mmseg_version, + mmdet3d_version=mmdet3d_version, + config=cfg.pretty_text, + CLASSES=datasets[0].CLASSES, + PALETTE=datasets[0].PALETTE # for segmentors + if hasattr(datasets[0], 'PALETTE') else None) + # add an attribute for visualization convenience + model.CLASSES = datasets[0].CLASSES + custom_train_model( + model, + datasets, + cfg, + eval_model=eval_model, + distributed=distributed, + validate=(not args.no_validate), + timestamp=timestamp, + meta=meta) + + +if __name__ == '__main__': + main() diff --git a/adzoo/bevformer/misc/browse_dataset.py b/adzoo/bevformer/misc/browse_dataset.py new file mode 100755 index 0000000..e3419f6 --- /dev/null +++ b/adzoo/bevformer/misc/browse_dataset.py @@ -0,0 +1,240 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import numpy as np +import warnings +from mmcv import Config, DictAction, mkdir_or_exist, track_iter_progress +from os import path as osp + +from mmdet3d.core.bbox import (Box3DMode, CameraInstance3DBoxes, Coord3DMode, + DepthInstance3DBoxes, LiDARInstance3DBoxes) +from mmdet3d.core.visualizer import (show_multi_modality_result, show_result, + show_seg_result) +from mmdet3d.datasets import build_dataset + + +def parse_args(): + parser = argparse.ArgumentParser(description='Browse a dataset') + parser.add_argument('config', help='train config file path') + parser.add_argument( + '--skip-type', + type=str, + nargs='+', + default=['Normalize'], + help='skip some useless pipeline') + parser.add_argument( + '--output-dir', + default=None, + type=str, + help='If there is no display interface, you can save it') + parser.add_argument( + '--task', + type=str, + choices=['det', 'seg', 'multi_modality-det', 'mono-det'], + help='Determine the visualization method depending on the task.') + parser.add_argument( + '--online', + action='store_true', + help='Whether to perform online visualization. Note that you often ' + 'need a monitor to do so.') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + args = parser.parse_args() + return args + + +def build_data_cfg(config_path, skip_type, cfg_options): + """Build data config for loading visualization data.""" + cfg = Config.fromfile(config_path) + if cfg_options is not None: + cfg.merge_from_dict(cfg_options) + # import modules from string list. + if cfg.get('custom_imports', None): + from mmcv.utils import import_modules_from_strings + import_modules_from_strings(**cfg['custom_imports']) + # extract inner dataset of `RepeatDataset` as `cfg.data.train` + # so we don't need to worry about it later + if cfg.data.train['type'] == 'RepeatDataset': + cfg.data.train = cfg.data.train.dataset + # use only first dataset for `ConcatDataset` + if cfg.data.train['type'] == 'ConcatDataset': + cfg.data.train = cfg.data.train.datasets[0] + train_data_cfg = cfg.data.train + # eval_pipeline purely consists of loading functions + # use eval_pipeline for data loading + train_data_cfg['pipeline'] = [ + x for x in cfg.eval_pipeline if x['type'] not in skip_type + ] + + return cfg + + +def to_depth_mode(points, bboxes): + """Convert points and bboxes to Depth Coord and Depth Box mode.""" + if points is not None: + points = Coord3DMode.convert_point(points.copy(), Coord3DMode.LIDAR, + Coord3DMode.DEPTH) + if bboxes is not None: + bboxes = Box3DMode.convert(bboxes.clone(), Box3DMode.LIDAR, + Box3DMode.DEPTH) + return points, bboxes + + +def show_det_data(idx, dataset, out_dir, filename, show=False): + """Visualize 3D point cloud and 3D bboxes.""" + example = dataset.prepare_train_data(idx) + points = example['points']._data.numpy() + gt_bboxes = dataset.get_ann_info(idx)['gt_bboxes_3d'].tensor + if dataset.box_mode_3d != Box3DMode.DEPTH: + points, gt_bboxes = to_depth_mode(points, gt_bboxes) + show_result( + points, + gt_bboxes.clone(), + None, + out_dir, + filename, + show=show, + snapshot=True) + + +def show_seg_data(idx, dataset, out_dir, filename, show=False): + """Visualize 3D point cloud and segmentation mask.""" + example = dataset.prepare_train_data(idx) + points = example['points']._data.numpy() + gt_seg = example['pts_semantic_mask']._data.numpy() + show_seg_result( + points, + gt_seg.copy(), + None, + out_dir, + filename, + np.array(dataset.PALETTE), + dataset.ignore_index, + show=show, + snapshot=True) + + +def show_proj_bbox_img(idx, + dataset, + out_dir, + filename, + show=False, + is_nus_mono=False): + """Visualize 3D bboxes on 2D image by projection.""" + try: + example = dataset.prepare_train_data(idx) + except AttributeError: # for Mono-3D datasets + example = dataset.prepare_train_img(idx) + gt_bboxes = dataset.get_ann_info(idx)['gt_bboxes_3d'] + img_metas = example['img_metas']._data + img = example['img']._data.numpy() + # need to transpose channel to first dim + img = img.transpose(1, 2, 0) + # no 3D gt bboxes, just show img + if gt_bboxes.tensor.shape[0] == 0: + gt_bboxes = None + if isinstance(gt_bboxes, DepthInstance3DBoxes): + show_multi_modality_result( + img, + gt_bboxes, + None, + None, + out_dir, + filename, + box_mode='depth', + img_metas=img_metas, + show=show) + elif isinstance(gt_bboxes, LiDARInstance3DBoxes): + show_multi_modality_result( + img, + gt_bboxes, + None, + img_metas['lidar2img'], + out_dir, + filename, + box_mode='lidar', + img_metas=img_metas, + show=show) + elif isinstance(gt_bboxes, CameraInstance3DBoxes): + show_multi_modality_result( + img, + gt_bboxes, + None, + img_metas['cam2img'], + out_dir, + filename, + box_mode='camera', + img_metas=img_metas, + show=show) + else: + # can't project, just show img + warnings.warn( + f'unrecognized gt box type {type(gt_bboxes)}, only show image') + show_multi_modality_result( + img, None, None, None, out_dir, filename, show=show) + + +def main(): + args = parse_args() + + if args.output_dir is not None: + mkdir_or_exist(args.output_dir) + + cfg = build_data_cfg(args.config, args.skip_type, args.cfg_options) + try: + dataset = build_dataset( + cfg.data.train, default_args=dict(filter_empty_gt=False)) + except TypeError: # seg dataset doesn't have `filter_empty_gt` key + dataset = build_dataset(cfg.data.train) + data_infos = dataset.data_infos + dataset_type = cfg.dataset_type + + # configure visualization mode + vis_task = args.task # 'det', 'seg', 'multi_modality-det', 'mono-det' + + for idx, data_info in enumerate(track_iter_progress(data_infos)): + if dataset_type in ['KittiDataset', 'WaymoDataset']: + data_path = data_info['point_cloud']['velodyne_path'] + elif dataset_type in [ + 'ScanNetDataset', 'SUNRGBDDataset', 'ScanNetSegDataset', + 'S3DISSegDataset', 'S3DISDataset' + ]: + data_path = data_info['pts_path'] + elif dataset_type in ['NuScenesDataset', 'LyftDataset']: + data_path = data_info['lidar_path'] + elif dataset_type in ['NuScenesMonoDataset']: + data_path = data_info['file_name'] + else: + raise NotImplementedError( + f'unsupported dataset type {dataset_type}') + + file_name = osp.splitext(osp.basename(data_path))[0] + + if vis_task in ['det', 'multi_modality-det']: + # show 3D bboxes on 3D point clouds + show_det_data( + idx, dataset, args.output_dir, file_name, show=args.online) + if vis_task in ['multi_modality-det', 'mono-det']: + # project 3D bboxes to 2D image + show_proj_bbox_img( + idx, + dataset, + args.output_dir, + file_name, + show=args.online, + is_nus_mono=(dataset_type == 'NuScenesMonoDataset')) + elif vis_task in ['seg']: + # show 3D segmentation mask on 3D point clouds + show_seg_data( + idx, dataset, args.output_dir, file_name, show=args.online) + + +if __name__ == '__main__': + main() diff --git a/adzoo/bevformer/misc/print_config.py b/adzoo/bevformer/misc/print_config.py new file mode 100755 index 0000000..3100fc3 --- /dev/null +++ b/adzoo/bevformer/misc/print_config.py @@ -0,0 +1,26 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +from mmcv import Config, DictAction + + +def parse_args(): + parser = argparse.ArgumentParser(description='Print the whole config') + parser.add_argument('config', help='config file path') + parser.add_argument( + '--options', nargs='+', action=DictAction, help='arguments in dict') + args = parser.parse_args() + + return args + + +def main(): + args = parse_args() + + cfg = Config.fromfile(args.config) + if args.options is not None: + cfg.merge_from_dict(args.options) + print(f'Config:\n{cfg.pretty_text}') + + +if __name__ == '__main__': + main() diff --git a/adzoo/bevformer/misc/visualize_results.py b/adzoo/bevformer/misc/visualize_results.py new file mode 100755 index 0000000..302adc5 --- /dev/null +++ b/adzoo/bevformer/misc/visualize_results.py @@ -0,0 +1,49 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import mmcv +from mmcv import Config + +from mmdet3d.datasets import build_dataset + + +def parse_args(): + parser = argparse.ArgumentParser( + description='MMDet3D visualize the results') + parser.add_argument('config', help='test config file path') + parser.add_argument('--result', help='results file in pickle format') + parser.add_argument( + '--show-dir', help='directory where visualize results will be saved') + args = parser.parse_args() + + return args + + +def main(): + args = parse_args() + + if args.result is not None and \ + not args.result.endswith(('.pkl', '.pickle')): + raise ValueError('The results file must be a pkl file.') + + cfg = Config.fromfile(args.config) + cfg.data.test.test_mode = True + + # build the dataset + dataset = build_dataset(cfg.data.test) + results = mmcv.load(args.result) + + if getattr(dataset, 'show', None) is not None: + # data loading pipeline for showing + eval_pipeline = cfg.get('eval_pipeline', {}) + if eval_pipeline: + dataset.show(results, args.show_dir, pipeline=eval_pipeline) + else: + dataset.show(results, args.show_dir) # use default pipeline + else: + raise NotImplementedError( + 'Show is not implemented for dataset {}!'.format( + type(dataset).__name__)) + + +if __name__ == '__main__': + main() diff --git a/adzoo/bevformer/mmdet3d_plugin/bevformer/__init__.py b/adzoo/bevformer/mmdet3d_plugin/bevformer/__init__.py new file mode 100644 index 0000000..0ead209 --- /dev/null +++ b/adzoo/bevformer/mmdet3d_plugin/bevformer/__init__.py @@ -0,0 +1 @@ +from .hooks import * diff --git a/adzoo/bevformer/mmdet3d_plugin/bevformer/apis/__init__.py b/adzoo/bevformer/mmdet3d_plugin/bevformer/apis/__init__.py new file mode 100644 index 0000000..15dff22 --- /dev/null +++ b/adzoo/bevformer/mmdet3d_plugin/bevformer/apis/__init__.py @@ -0,0 +1,3 @@ +from .train import custom_train_model +from .mmdet_train import custom_train_detector +# from .test import custom_multi_gpu_test \ No newline at end of file diff --git a/adzoo/bevformer/mmdet3d_plugin/bevformer/apis/mmdet_train.py b/adzoo/bevformer/mmdet3d_plugin/bevformer/apis/mmdet_train.py new file mode 100644 index 0000000..1a218f0 --- /dev/null +++ b/adzoo/bevformer/mmdet3d_plugin/bevformer/apis/mmdet_train.py @@ -0,0 +1,203 @@ +# --------------------------------------------- +# Copyright (c) OpenMMLab. All rights reserved. +# --------------------------------------------- +# Modified by Zhiqi Li +# --------------------------------------------- +import random +import warnings + +import numpy as np +import torch +import torch.distributed as dist +from torch.nn import DataParallel +from torch.nn.parallel.distributed import DistributedDataParallel +from mmcv.runner import (HOOKS, DistSamplerSeedHook, EpochBasedRunner, + Fp16OptimizerHook, OptimizerHook, + build_runner, ) +from mmcv.optims import build_optimizer +from mmcv.utils import build_from_cfg + +from mmcv.core import EvalHook + +from mmcv.datasets import (build_dataset, replace_ImageToTensor) +from mmcv.utils import get_root_logger, get_dist_info +import time +import os.path as osp +from mmcv.datasets import build_dataloader +from mmcv.core.evaluation.eval_hooks import CustomDistEvalHook +from adzoo.bevformer.apis.test import custom_multi_gpu_test + +def custom_train_detector(model, + dataset, + cfg, + distributed=False, + validate=False, + timestamp=None, + eval_model=None, + meta=None): + logger = get_root_logger(cfg.log_level) + + # prepare data loaders + + dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset] + #assert len(dataset)==1s + if 'imgs_per_gpu' in cfg.data: + logger.warning('"imgs_per_gpu" is deprecated in MMDet V2.0. ' + 'Please use "samples_per_gpu" instead') + if 'samples_per_gpu' in cfg.data: + logger.warning( + f'Got "imgs_per_gpu"={cfg.data.imgs_per_gpu} and ' + f'"samples_per_gpu"={cfg.data.samples_per_gpu}, "imgs_per_gpu"' + f'={cfg.data.imgs_per_gpu} is used in this experiments') + else: + logger.warning( + 'Automatically set "samples_per_gpu"="imgs_per_gpu"=' + f'{cfg.data.imgs_per_gpu} in this experiments') + cfg.data.samples_per_gpu = cfg.data.imgs_per_gpu + + data_loaders = [ + build_dataloader( + ds, + cfg.data.samples_per_gpu, + cfg.data.workers_per_gpu, + # cfg.gpus will be ignored if distributed + len(cfg.gpu_ids), + dist=distributed, + seed=cfg.seed, + shuffler_sampler=cfg.data.shuffler_sampler, # dict(type='DistributedGroupSampler'), + nonshuffler_sampler=cfg.data.nonshuffler_sampler, # dict(type='DistributedSampler'), + ) for ds in dataset + ] + + # import ipdb + # ipdb.set_trace() + # put model on gpus + if distributed: + find_unused_parameters = cfg.get('find_unused_parameters', False) + # Sets the `find_unused_parameters` parameter in + # torch.nn.parallel.DistributedDataParallel + model = DistributedDataParallel( + model.cuda(), + device_ids=[torch.cuda.current_device()], + broadcast_buffers=False, + find_unused_parameters=find_unused_parameters) + if eval_model is not None: + eval_model = DistributedDataParallel( + eval_model.cuda(), + device_ids=[torch.cuda.current_device()], + broadcast_buffers=False, + find_unused_parameters=find_unused_parameters) + else: + model = DataParallel( + model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids) + if eval_model is not None: + eval_model = DataParallel( + eval_model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids) + + # build runner + optimizer = build_optimizer(model, cfg.optimizer) + + if 'runner' not in cfg: + cfg.runner = { + 'type': 'EpochBasedRunner', + 'max_epochs': cfg.total_epochs + } + warnings.warn( + 'config is now expected to have a `runner` section, ' + 'please set `runner` in your config.', UserWarning) + else: + if 'total_epochs' in cfg: + assert cfg.total_epochs == cfg.runner.max_epochs + if eval_model is not None: + runner = build_runner( + cfg.runner, + default_args=dict( + model=model, + eval_model=eval_model, + optimizer=optimizer, + work_dir=cfg.work_dir, + logger=logger, + meta=meta)) + else: + runner = build_runner( + cfg.runner, + default_args=dict( + model=model, + optimizer=optimizer, + work_dir=cfg.work_dir, + logger=logger, + meta=meta)) + + # an ugly workaround to make .log and .log.json filenames the same + runner.timestamp = timestamp + + # fp16 setting + fp16_cfg = cfg.get('fp16', None) + if fp16_cfg is not None: + optimizer_config = Fp16OptimizerHook( + **cfg.optimizer_config, **fp16_cfg, distributed=distributed) + elif distributed and 'type' not in cfg.optimizer_config: + optimizer_config = OptimizerHook(**cfg.optimizer_config) + else: + optimizer_config = cfg.optimizer_config + + # register hooks + runner.register_training_hooks(cfg.lr_config, optimizer_config, + cfg.checkpoint_config, cfg.log_config, + cfg.get('momentum_config', None)) + + # register profiler hook + #trace_config = dict(type='tb_trace', dir_name='work_dir') + #profiler_config = dict(on_trace_ready=trace_config) + #runner.register_profiler_hook(profiler_config) + + if distributed: + if isinstance(runner, EpochBasedRunner): + runner.register_hook(DistSamplerSeedHook()) + + # register eval hooks + if validate: + # Support batch_size > 1 in validation + val_samples_per_gpu = cfg.data.val.pop('samples_per_gpu', 1) + if val_samples_per_gpu > 1: + assert False + # Replace 'ImageToTensor' to 'DefaultFormatBundle' + cfg.data.val.pipeline = replace_ImageToTensor( + cfg.data.val.pipeline) + val_dataset = build_dataset(cfg.data.val, dict(test_mode=True)) + + val_dataloader = build_dataloader( + val_dataset, + samples_per_gpu=val_samples_per_gpu, + workers_per_gpu=cfg.data.workers_per_gpu, + dist=distributed, + shuffle=False, + shuffler_sampler=cfg.data.shuffler_sampler, # dict(type='DistributedGroupSampler'), + nonshuffler_sampler=cfg.data.nonshuffler_sampler, # dict(type='DistributedSampler'), + ) + eval_cfg = cfg.get('evaluation', {}) + eval_cfg['by_epoch'] = cfg.runner['type'] != 'IterBasedRunner' + eval_cfg['jsonfile_prefix'] = osp.join('val', cfg.work_dir, time.ctime().replace(' ','_').replace(':','_')) + eval_hook = CustomDistEvalHook if distributed else EvalHook + runner.register_hook(eval_hook(val_dataloader, test_fn=custom_multi_gpu_test, **eval_cfg)) + + # user-defined hooks + if cfg.get('custom_hooks', None): + custom_hooks = cfg.custom_hooks + assert isinstance(custom_hooks, list), \ + f'custom_hooks expect list type, but got {type(custom_hooks)}' + for hook_cfg in cfg.custom_hooks: + assert isinstance(hook_cfg, dict), \ + 'Each item in custom_hooks expects dict type, but got ' \ + f'{type(hook_cfg)}' + hook_cfg = hook_cfg.copy() + priority = hook_cfg.pop('priority', 'NORMAL') + hook = build_from_cfg(hook_cfg, HOOKS) + runner.register_hook(hook, priority=priority) + + if cfg.resume_from: + runner.resume(cfg.resume_from) + elif cfg.load_from: + runner.load_checkpoint(cfg.load_from) + runner.run(data_loaders, cfg.workflow) + diff --git a/adzoo/bevformer/mmdet3d_plugin/bevformer/apis/test.py b/adzoo/bevformer/mmdet3d_plugin/bevformer/apis/test.py new file mode 100644 index 0000000..cd507e4 --- /dev/null +++ b/adzoo/bevformer/mmdet3d_plugin/bevformer/apis/test.py @@ -0,0 +1,164 @@ +# --------------------------------------------- +# Copyright (c) OpenMMLab. All rights reserved. +# --------------------------------------------- +# Modified by Zhiqi Li +# --------------------------------------------- +import os.path as osp +import pickle +import shutil +import tempfile +import time + +import mmcv +import torch +import torch.distributed as dist +from mmcv.image import tensor2imgs +from mmcv.runner import get_dist_info + +from mmdet.core import encode_mask_results + + +import mmcv +import numpy as np +import pycocotools.mask as mask_util + +def custom_encode_mask_results(mask_results): + """Encode bitmap mask to RLE code. Semantic Masks only + Args: + mask_results (list | tuple[list]): bitmap mask results. + In mask scoring rcnn, mask_results is a tuple of (segm_results, + segm_cls_score). + Returns: + list | tuple: RLE encoded mask. + """ + cls_segms = mask_results + num_classes = len(cls_segms) + encoded_mask_results = [] + for i in range(len(cls_segms)): + encoded_mask_results.append( + mask_util.encode( + np.array( + cls_segms[i][:, :, np.newaxis], order='F', + dtype='uint8'))[0]) # encoded with RLE + return [encoded_mask_results] + +def custom_multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False): + """Test model with multiple gpus. + This method tests model with multiple gpus and collects the results + under two different modes: gpu and cpu modes. By setting 'gpu_collect=True' + it encodes results to gpu tensors and use gpu communication for results + collection. On cpu mode it saves the results on different gpus to 'tmpdir' + and collects them by the rank 0 worker. + Args: + model (nn.Module): Model to be tested. + data_loader (nn.Dataloader): Pytorch data loader. + tmpdir (str): Path of directory to save the temporary results from + different gpus under cpu mode. + gpu_collect (bool): Option to use either gpu or cpu to collect results. + Returns: + list: The prediction results. + """ + model.eval() + bbox_results = [] + mask_results = [] + dataset = data_loader.dataset + rank, world_size = get_dist_info() + if rank == 0: + prog_bar = mmcv.ProgressBar(len(dataset)) + time.sleep(2) # This line can prevent deadlock problem in some cases. + have_mask = False + for i, data in enumerate(data_loader): + with torch.no_grad(): + result = model(return_loss=False, rescale=True, **data) + # encode mask results + if isinstance(result, dict): + if 'bbox_results' in result.keys(): + bbox_result = result['bbox_results'] + batch_size = len(result['bbox_results']) + bbox_results.extend(bbox_result) + if 'mask_results' in result.keys() and result['mask_results'] is not None: + mask_result = custom_encode_mask_results(result['mask_results']) + mask_results.extend(mask_result) + have_mask = True + else: + batch_size = len(result) + bbox_results.extend(result) + + #if isinstance(result[0], tuple): + # assert False, 'this code is for instance segmentation, which our code will not utilize.' + # result = [(bbox_results, encode_mask_results(mask_results)) + # for bbox_results, mask_results in result] + if rank == 0: + + for _ in range(batch_size * world_size): + prog_bar.update() + + # collect results from all ranks + if gpu_collect: + bbox_results = collect_results_gpu(bbox_results, len(dataset)) + if have_mask: + mask_results = collect_results_gpu(mask_results, len(dataset)) + else: + mask_results = None + else: + bbox_results = collect_results_cpu(bbox_results, len(dataset), tmpdir) + tmpdir = tmpdir+'_mask' if tmpdir is not None else None + if have_mask: + mask_results = collect_results_cpu(mask_results, len(dataset), tmpdir) + else: + mask_results = None + + if mask_results is None: + return bbox_results + return {'bbox_results': bbox_results, 'mask_results': mask_results} + + +def collect_results_cpu(result_part, size, tmpdir=None): + rank, world_size = get_dist_info() + # create a tmp dir if it is not specified + if tmpdir is None: + MAX_LEN = 512 + # 32 is whitespace + dir_tensor = torch.full((MAX_LEN, ), + 32, + dtype=torch.uint8, + device='cuda') + if rank == 0: + mmcv.mkdir_or_exist('.dist_test') + tmpdir = tempfile.mkdtemp(dir='.dist_test') + tmpdir = torch.tensor( + bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda') + dir_tensor[:len(tmpdir)] = tmpdir + dist.broadcast(dir_tensor, 0) + tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip() + else: + mmcv.mkdir_or_exist(tmpdir) + # dump the part result to the dir + mmcv.dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl')) + dist.barrier() + # collect all parts + if rank != 0: + return None + else: + # load results of all parts from tmp dir + part_list = [] + for i in range(world_size): + part_file = osp.join(tmpdir, f'part_{i}.pkl') + part_list.append(mmcv.load(part_file)) + # sort the results + ordered_results = [] + ''' + bacause we change the sample of the evaluation stage to make sure that each gpu will handle continuous sample, + ''' + #for res in zip(*part_list): + for res in part_list: + ordered_results.extend(list(res)) + # the dataloader may pad some samples + ordered_results = ordered_results[:size] + # remove tmp dir + shutil.rmtree(tmpdir) + return ordered_results + + +def collect_results_gpu(result_part, size): + collect_results_cpu(result_part, size) \ No newline at end of file diff --git a/adzoo/bevformer/mmdet3d_plugin/bevformer/apis/train.py b/adzoo/bevformer/mmdet3d_plugin/bevformer/apis/train.py new file mode 100644 index 0000000..dcae402 --- /dev/null +++ b/adzoo/bevformer/mmdet3d_plugin/bevformer/apis/train.py @@ -0,0 +1,65 @@ +# --------------------------------------------- +# Copyright (c) OpenMMLab. All rights reserved. +# --------------------------------------------- +# Modified by Zhiqi Li +# --------------------------------------------- + +from .mmdet_train import custom_train_detector + +def custom_train_model(model, + dataset, + cfg, + distributed=False, + validate=False, + timestamp=None, + eval_model=None, + meta=None): + """A function wrapper for launching model training according to cfg. + + Because we need different eval_hook in runner. Should be deprecated in the + future. + """ + if cfg.model.type in ['EncoderDecoder3D']: + assert False + else: + custom_train_detector( + model, + dataset, + cfg, + distributed=distributed, + validate=validate, + timestamp=timestamp, + eval_model=eval_model, + meta=meta) + + +def train_model(model, + dataset, + cfg, + distributed=False, + validate=False, + timestamp=None, + meta=None): + """A function wrapper for launching model training according to cfg. + + Because we need different eval_hook in runner. Should be deprecated in the + future. + """ + if cfg.model.type in ['EncoderDecoder3D']: + train_segmentor( + model, + dataset, + cfg, + distributed=distributed, + validate=validate, + timestamp=timestamp, + meta=meta) + else: + train_detector( + model, + dataset, + cfg, + distributed=distributed, + validate=validate, + timestamp=timestamp, + meta=meta) diff --git a/adzoo/bevformer/mmdet3d_plugin/bevformer/hooks/__init__.py b/adzoo/bevformer/mmdet3d_plugin/bevformer/hooks/__init__.py new file mode 100644 index 0000000..aa04ec1 --- /dev/null +++ b/adzoo/bevformer/mmdet3d_plugin/bevformer/hooks/__init__.py @@ -0,0 +1 @@ +from .custom_hooks import TransferWeight \ No newline at end of file diff --git a/adzoo/bevformer/mmdet3d_plugin/bevformer/hooks/custom_hooks.py b/adzoo/bevformer/mmdet3d_plugin/bevformer/hooks/custom_hooks.py new file mode 100644 index 0000000..ef1e35d --- /dev/null +++ b/adzoo/bevformer/mmdet3d_plugin/bevformer/hooks/custom_hooks.py @@ -0,0 +1,12 @@ +from mmcv.runner.hooks.hook import HOOKS, Hook + + +@HOOKS.register_module() +class TransferWeight(Hook): + + def __init__(self, every_n_inters=1): + self.every_n_inters=every_n_inters + + def after_train_iter(self, runner): + if self.every_n_inner_iters(runner, self.every_n_inters): + runner.eval_model.load_state_dict(runner.model.state_dict()) diff --git a/adzoo/bevformer/mmdet3d_plugin/dd3d/__init__.py b/adzoo/bevformer/mmdet3d_plugin/dd3d/__init__.py new file mode 100644 index 0000000..64eaac4 --- /dev/null +++ b/adzoo/bevformer/mmdet3d_plugin/dd3d/__init__.py @@ -0,0 +1 @@ +from .modeling import * \ No newline at end of file diff --git a/adzoo/bevformer/mmdet3d_plugin/dd3d/datasets/__init__.py b/adzoo/bevformer/mmdet3d_plugin/dd3d/datasets/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/adzoo/bevformer/mmdet3d_plugin/dd3d/datasets/nuscenes.py b/adzoo/bevformer/mmdet3d_plugin/dd3d/datasets/nuscenes.py new file mode 100644 index 0000000..9eed59b --- /dev/null +++ b/adzoo/bevformer/mmdet3d_plugin/dd3d/datasets/nuscenes.py @@ -0,0 +1,360 @@ +# Copyright 2021 Toyota Research Institute. All rights reserved. +#import functools +from collections import OrderedDict + +import numpy as np +import seaborn as sns +from torch.utils.data import Dataset +from tqdm import tqdm + +#from detectron2.data import MetadataCatalog +from mmcv.structures import BoxMode +from nuscenes.eval.detection.utils import category_to_detection_name +from nuscenes.nuscenes import NuScenes +from nuscenes.utils.splits import create_splits_scenes + +#from tridet.data import collect_dataset_dicts +from adzoo.bevformer.mmdet3d_plugin.dd3d.structures.boxes3d import GenericBoxes3D +from adzoo.bevformer.mmdet3d_plugin.dd3d.structures.pose import Pose +from adzoo.bevformer.mmdet3d_plugin.dd3d.utils.geometry import project_points3d +from adzoo.bevformer.mmdet3d_plugin.dd3d.utils.visualization import float_to_uint8_color + +# https://github.com/nutonomy/nuscenes-devkit/blob/9b209638ef3dee6d0cdc5ac700c493747f5b35fe/python-sdk/nuscenes/utils/splits.py#L189 +# - train/val/test: The standard splits of the nuScenes dataset (700/150/150 scenes). +# - mini_train/mini_val: Train and val splits of the mini subset used for visualization and debugging (8/2 scenes). +# - train_detect/train_track: Two halves of the train split used for separating the training sets of detector and +# tracker if required +DATASET_NAME_TO_VERSION = { + "nusc_train": "v1.0-trainval", + "nusc_val": "v1.0-trainval", + "nusc_val-subsample-8": "v1.0-trainval", + "nusc_trainval": "v1.0-trainval", + "nusc_test": "v1.0-test", + "nusc_mini_train": "v1.0-mini", + "nusc_mini_val": "v1.0-mini", +} + +CAMERA_NAMES = ('CAM_FRONT_LEFT', 'CAM_FRONT', 'CAM_FRONT_RIGHT', 'CAM_BACK_LEFT', 'CAM_BACK', 'CAM_BACK_RIGHT') + +ATTRIBUTE_IDS = { + 'vehicle.moving': 0, + 'vehicle.parked': 1, + 'vehicle.stopped': 2, + 'pedestrian.moving': 0, + 'pedestrian.standing': 1, + 'pedestrian.sitting_lying_down': 2, + 'cycle.with_rider': 0, + 'cycle.without_rider': 1, +} + +CATEGORY_IDS = OrderedDict({ + 'barrier': 0, + 'bicycle': 1, + 'bus': 2, + 'car': 3, + 'construction_vehicle': 4, + 'motorcycle': 5, + 'pedestrian': 6, + 'traffic_cone': 7, + 'trailer': 8, + 'truck': 9, +}) + +COLORS = [float_to_uint8_color(clr) for clr in sns.color_palette("bright", n_colors=10)] +COLORMAP = OrderedDict({ + 'barrier': COLORS[8], # yellow + 'bicycle': COLORS[0], # blue + 'bus': COLORS[6], # pink + 'car': COLORS[2], # green + 'construction_vehicle': COLORS[7], # gray + 'motorcycle': COLORS[4], # purple + 'pedestrian': COLORS[1], # orange + 'traffic_cone': COLORS[3], # red + 'trailer': COLORS[9], # skyblue + 'truck': COLORS[5], # brown +}) + +MAX_NUM_ATTRIBUTES = 3 + + +def _compute_iou(box1, box2): + """ + Parameters + ---------- + box1, box2: + (x1, y1, x2, y2) + """ + xx1 = max(box1[0], box2[0]) + yy1 = max(box1[1], box2[1]) + xx2 = min(box1[2], box2[2]) + yy2 = min(box1[3], box2[3]) + if xx1 >= xx2 or yy1 >= yy2: + return 0. + inter = (xx2 - xx1) * (yy2 - yy1) + a1 = (box1[2] - box1[0]) * (box1[3] - box1[1]) + a2 = (box2[2] - box2[0]) * (box2[3] - box2[1]) + return inter / (a1 + a2 - inter) + + +class NuscenesDataset(Dataset): + def __init__(self, name, data_root, datum_names=CAMERA_NAMES, min_num_lidar_points=3, min_box_visibility=0.2, **unused): + self.data_root = data_root + assert name in DATASET_NAME_TO_VERSION + version = DATASET_NAME_TO_VERSION[name] + self.nusc = NuScenes(version=version, dataroot=data_root, verbose=True) + + self.datum_names = datum_names + self.min_num_lidar_points = min_num_lidar_points + self.min_box_visibility = min_box_visibility + + self.dataset_item_info = self._build_dataset_item_info(name) + + # Index instance tokens to their IDs + self._instance_token_to_id = self._index_instance_tokens() + + # Construct the mapping from datum_token (image id) to index + print("Generating the mapping from image id to idx...") + self.datumtoken2idx = {} + for idx, (datum_token, _, _, _, _) in enumerate(self.dataset_item_info): + self.datumtoken2idx[datum_token] = idx + print("Done.") + + def _build_dataset_item_info(self, name): + scenes_in_split = self._get_split_scenes(name) + + dataset_items = [] + for _, scene_token in tqdm(scenes_in_split): + scene = self.nusc.get('scene', scene_token) + sample_token = scene['first_sample_token'] + for sample_idx in range(scene['nbr_samples']): + if name.endswith('subsample-8') and sample_idx % 8 > 0: + # Sample-level subsampling. + continue + + sample = self.nusc.get('sample', sample_token) + for datum_name, datum_token in sample['data'].items(): + if datum_name not in self.datum_names: + continue + dataset_items.append((datum_token, sample_token, scene['name'], sample_idx, datum_name)) + sample_token = sample['next'] + return dataset_items + + def _get_split_scenes(self, name): + scenes_in_splits = create_splits_scenes() + if name == "nusc_trainval": + scenes = scenes_in_splits["train"] + scenes_in_splits["val"] + elif name == "nusc_val-subsample-8": + scenes = scenes_in_splits["val"] + else: + assert name.startswith('nusc_'), f"Invalid dataset name: {name}" + split = name[5:] + assert split in scenes_in_splits, f"Invalid dataset: {split}" + scenes = scenes_in_splits[split] + + # Mapping from scene name to token. + name_to_token = {scene['name']: scene['token'] for scene in self.nusc.scene} + return [(name, name_to_token[name]) for name in scenes] + + def __len__(self): + return len(self.dataset_item_info) + + def _build_id(self, scene_name, sample_idx, datum_name): + sample_id = f"{scene_name}_{sample_idx:03d}" + image_id = f"{sample_id}_{datum_name}" + return image_id, sample_id + + def _index_instance_tokens(self): + """Index instance tokens for uniquely identifying instances across samples""" + instance_token_to_id = {} + for record in self.nusc.sample_annotation: + instance_token = record['instance_token'] + if instance_token not in instance_token_to_id: + next_instance_id = len(instance_token_to_id) + instance_token_to_id[instance_token] = next_instance_id + return instance_token_to_id + + def get_instance_annotations(self, annotation_list, K, image_shape, pose_WS): + annotations = [] + for _ann in annotation_list: + ann = self.nusc.get('sample_annotation', _ann.token) + if ann['num_lidar_pts'] + ann['num_radar_pts'] < self.min_num_lidar_points: + continue + annotation = OrderedDict() + + # -------- + # Category + # -------- + category = category_to_detection_name(ann['category_name']) + if category is None: + continue + annotation['category_id'] = CATEGORY_IDS[category] + + # ------ + # 3D box + # ------ + # NOTE: ann['rotation'], ann['translation'] is in global frame. + pose_SO = Pose(wxyz=_ann.orientation, tvec=_ann.center) # pose in sensor frame + # DEBUG: + # pose_WO_1 = Pose(np.array(ann['rotation']), np.array(ann['translation'])) + # pose_WO_2 = pose_WS * pose_SO + # assert np.allclose(pose_WO_1.matrix, pose_WO_2.matrix) + bbox3d = GenericBoxes3D(_ann.orientation, _ann.center, _ann.wlh) + annotation['bbox3d'] = bbox3d.vectorize().tolist()[0] + + # -------------------------------------- + # 2D box -- project 8 corners of 3D bbox + # -------------------------------------- + corners = project_points3d(bbox3d.corners.cpu().numpy().squeeze(0), K) + l, t = corners[:, 0].min(), corners[:, 1].min() + r, b = corners[:, 0].max(), corners[:, 1].max() + + x1 = max(0, l) + y1 = max(0, t) + x2 = min(image_shape[1], r) + y2 = min(image_shape[0], b) + + iou = _compute_iou([l, t, r, b], [x1, y1, x2, y2]) + if iou < self.min_box_visibility: + continue + + annotation['bbox'] = [x1, y1, x2, y2] + annotation['bbox_mode'] = BoxMode.XYXY_ABS + + # -------- + # Track ID + # -------- + annotation['track_id'] = self._instance_token_to_id[ann['instance_token']] + + # --------- + # Attribute + # --------- + attr_tokens = ann['attribute_tokens'] + assert len(attr_tokens) < 2 # NOTE: Allow only single attrubute. + attribute_id = MAX_NUM_ATTRIBUTES # By default, MAX_NUM_ATTRIBUTES -- this is to be ignored in loss compute. + if attr_tokens: + attribute = self.nusc.get('attribute', attr_tokens[0])['name'] + attribute_id = ATTRIBUTE_IDS[attribute] + annotation['attribute_id'] = attribute_id + + # ----- + # Speed + # ----- + vel_global = self.nusc.box_velocity(ann['token']) + speed = np.linalg.norm(vel_global) # NOTE: This can be NaN. + # DEBUG: + # speed * Quaternion(ann['rotation']).rotation_matrix.T[0] ~= vel_global + annotation['speed'] = speed + + annotations.append(annotation) + + return annotations + + def _get_ego_velocity(self, current, max_time_diff=1.5): + """Velocity of ego-vehicle in m/s. + """ + has_prev = current['prev'] != '' + has_next = current['next'] != '' + + # Cannot estimate velocity for a single annotation. + if not has_prev and not has_next: + return np.array([np.nan, np.nan, np.nan]) + + if has_prev: + first = self.nusc.get('sample_data', current['prev']) + else: + first = current + + if has_next: + last = self.nusc.get('sample_data', current['next']) + else: + last = current + + pos_first = self.nusc.get('ego_pose', first['ego_pose_token'])['translation'] + pos_last = self.nusc.get('ego_pose', last['ego_pose_token'])['translation'] + pos_diff = np.float32(pos_last) - np.float32(pos_first) + + time_last = 1e-6 * last['timestamp'] + time_first = 1e-6 * first['timestamp'] + time_diff = time_last - time_first + + if has_next and has_prev: + # If doing centered difference, allow for up to double the max_time_diff. + max_time_diff *= 2 + + if time_diff > max_time_diff: + # If time_diff is too big, don't return an estimate. + return np.array([np.nan, np.nan, np.nan]) + else: + return pos_diff / time_diff + + def __getitem__(self, idx): + datum_token, sample_token, scene_name, sample_idx, datum_name = self.dataset_item_info[idx] + datum = self.nusc.get('sample_data', datum_token) + assert datum['is_key_frame'] + + filename, _annotations, K = self.nusc.get_sample_data(datum_token) + image_id, sample_id = self._build_id(scene_name, sample_idx, datum_name) + height, width = datum['height'], datum['width'] + d2_dict = OrderedDict( + file_name=filename, + height=height, + width=width, + image_id=image_id, + sample_id=sample_id, + sample_token=sample_token + ) + + # Intrinsics + d2_dict['intrinsics'] = list(K.flatten()) + + # Get pose of the sensor (S) from vehicle (V) frame + _pose_VS = self.nusc.get('calibrated_sensor', datum['calibrated_sensor_token']) + pose_VS = Pose(wxyz=np.float64(_pose_VS['rotation']), tvec=np.float64(_pose_VS['translation'])) + + # Get ego-pose of the vehicle (V) from global/world (W) frame + _pose_WV = self.nusc.get('ego_pose', datum['ego_pose_token']) + pose_WV = Pose(wxyz=np.float64(_pose_WV['rotation']), tvec=np.float64(_pose_WV['translation'])) + pose_WS = pose_WV * pose_VS + + d2_dict['pose'] = {'wxyz': list(pose_WS.quat.elements), 'tvec': list(pose_WS.tvec)} + d2_dict['extrinsics'] = {'wxyz': list(pose_VS.quat.elements), 'tvec': list(pose_VS.tvec)} + + d2_dict['ego_speed'] = np.linalg.norm(self._get_ego_velocity(datum)) + + d2_dict['annotations'] = self.get_instance_annotations(_annotations, K, (height, width), pose_WS) + + return d2_dict + + def getitem_by_datumtoken(self, datum_token): + # idx = self.datumtoken2idx[datum_token] + # ret = self.__getitem__(idx) + + datum = self.nusc.get('sample_data', datum_token) + sample_token = datum['sample_token'] + filename, _annotations, K = self.nusc.get_sample_data(datum_token) + height, width = datum['height'], datum['width'] + d2_dict = OrderedDict( + file_name=filename, + height=height, + width=width, + image_id=0, + sample_id=0, + sample_token=sample_token + ) + # Intrinsics + d2_dict['intrinsics'] = list(K.flatten()) + # Get pose of the sensor (S) from vehicle (V) frame + _pose_VS = self.nusc.get('calibrated_sensor', datum['calibrated_sensor_token']) + pose_VS = Pose(wxyz=np.float64(_pose_VS['rotation']), tvec=np.float64(_pose_VS['translation'])) + # Get ego-pose of the vehicle (V) from global/world (W) frame + _pose_WV = self.nusc.get('ego_pose', datum['ego_pose_token']) + pose_WV = Pose(wxyz=np.float64(_pose_WV['rotation']), tvec=np.float64(_pose_WV['translation'])) + pose_WS = pose_WV * pose_VS + + d2_dict['pose'] = {'wxyz': list(pose_WS.quat.elements), 'tvec': list(pose_WS.tvec)} + d2_dict['extrinsics'] = {'wxyz': list(pose_VS.quat.elements), 'tvec': list(pose_VS.tvec)} + + d2_dict['ego_speed'] = np.linalg.norm(self._get_ego_velocity(datum)) + + d2_dict['annotations'] = self.get_instance_annotations(_annotations, K, (height, width), pose_WS) + return d2_dict \ No newline at end of file diff --git a/adzoo/bevformer/mmdet3d_plugin/dd3d/datasets/transform_utils.py b/adzoo/bevformer/mmdet3d_plugin/dd3d/datasets/transform_utils.py new file mode 100644 index 0000000..623bd6e --- /dev/null +++ b/adzoo/bevformer/mmdet3d_plugin/dd3d/datasets/transform_utils.py @@ -0,0 +1,136 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +# Copyright 2021 Toyota Research Institute. All rights reserved. +# Adapted from detectron2: +# https://github.com/facebookresearch/detectron2/blob/master/detectron2/data/detection_utils.py +import numpy as np +import torch + +from detectron2.data import transforms as T +from detectron2.structures import Boxes, BoxMode, Instances + +from projects.mmdet3d_plugin.dd3d.structures.boxes3d import Boxes3D + +__all__ = ["transform_instance_annotations", "annotations_to_instances"] + + +def transform_instance_annotations( + annotation, + transforms, + image_size, +): + """Adapted from: + https://github.com/facebookresearch/detectron2/blob/master/detectron2/data/detection_utils.py#L254 + + The changes from original: + - The presence of 2D bounding box (i.e. "bbox" field) is assumed by default in d2; here it's optional. + - Add optional 3D bounding box support. + - If the instance mask annotation is in RLE, then it's decoded into polygons, not bitmask, to save memory. + + =============================================================================================================== + + Apply transforms to box, segmentation and keypoints annotations of a single instance. + + It will use `transforms.apply_box` for the box, and + `transforms.apply_coords` for segmentation polygons & keypoints. + If you need anything more specially designed for each data structure, + you'll need to implement your own version of this function or the transforms. + + Args: + annotation (dict): dict of instance annotations for a single instance. + It will be modified in-place. + transforms (TransformList or list[Transform]): + image_size (tuple): the height, width of the transformed image + keypoint_hflip_indices (ndarray[int]): see `create_keypoint_hflip_indices`. + + Returns: + dict: + the same input dict with fields "bbox", "segmentation", "keypoints" + transformed according to `transforms`. + The "bbox_mode" field will be set to XYXY_ABS. + """ + if isinstance(transforms, (tuple, list)): + transforms = T.TransformList(transforms) + # (dennis.park) Here 2D bounding box is optional. + if "bbox" in annotation: + assert "bbox_mode" in annotation, "'bbox' is present, but 'bbox_mode' is not." + # bbox is 1d (per-instance bounding box) + bbox = BoxMode.convert(annotation["bbox"], annotation["bbox_mode"], BoxMode.XYXY_ABS) + bbox = transforms.apply_box(np.array([bbox]))[0] + # clip transformed bbox to image size + bbox = bbox.clip(min=0) + bbox = np.minimum(bbox, list(image_size + image_size)[::-1]) + annotation["bbox"] = bbox + annotation["bbox_mode"] = BoxMode.XYXY_ABS + + # Vertical flipping is not implemented (`flip_transform.py`). TODO: implement if needed. + if "bbox3d" in annotation: + bbox3d = np.array(annotation["bbox3d"]) + annotation['bbox3d'] = transforms.apply_box3d(bbox3d) + + return annotation + + +def _create_empty_instances(image_size): + target = Instances(image_size) + + target.gt_boxes = Boxes([]) + target.gt_classes = torch.tensor([], dtype=torch.int64) + target.gt_boxes3d = Boxes3D.from_vectors([], torch.eye(3, dtype=torch.float32)) + + return target + + +def annotations_to_instances( + annos, + image_size, + intrinsics=None, +): + """ + Create an :class:`Instances` object used by the models, + from instance annotations in the dataset dict. + + Args: + annos (list[dict]): a list of instance annotations in one image, each + element for one instance. + image_size (tuple): height, width + + Returns: + Instances: + It will contain fields "gt_boxes", "gt_classes", + "gt_masks", "gt_keypoints", if they can be obtained from `annos`. + This is the format that builtin models expect. + """ + if len(annos) == 0: + return _create_empty_instances(image_size) + + boxes = [BoxMode.convert(obj["bbox"], obj["bbox_mode"], BoxMode.XYXY_ABS) for obj in annos] + target = Instances(image_size) + target.gt_boxes = Boxes(boxes) + + classes = [obj["category_id"] for obj in annos] + classes = torch.tensor(classes, dtype=torch.int64) + target.gt_classes = classes + + if len(annos) and "bbox3d" in annos[0]: + assert intrinsics is not None + target.gt_boxes3d = Boxes3D.from_vectors([anno['bbox3d'] for anno in annos], intrinsics) + if len(target.gt_boxes3d) != target.gt_boxes.tensor.shape[0]: + raise ValueError( + f"The sizes of `gt_boxes3d` and `gt_boxes` do not match: a={len(target.gt_boxes3d)}, b={target.gt_boxes.tensor.shape[0]}." + ) + + # NOTE: add nuscenes attributes here + # NOTE: instances will be filtered later + # NuScenes attributes + if len(annos) and "attribute_id" in annos[0]: + attributes = [obj["attribute_id"] for obj in annos] + target.gt_attributes = torch.tensor(attributes, dtype=torch.int64) + + # Speed (magnitude of velocity) + if len(annos) and "speed" in annos[0]: + speeds = [obj["speed"] for obj in annos] + target.gt_speeds = torch.tensor(speeds, dtype=torch.float32) + + assert len(boxes) == len(classes) == len(attributes) == len(speeds), \ + 'the numbers of annotations should be the same' + return target diff --git a/adzoo/bevformer/mmdet3d_plugin/dd3d/layers/iou_loss.py b/adzoo/bevformer/mmdet3d_plugin/dd3d/layers/iou_loss.py new file mode 100644 index 0000000..97638ef --- /dev/null +++ b/adzoo/bevformer/mmdet3d_plugin/dd3d/layers/iou_loss.py @@ -0,0 +1,71 @@ +# Copyright 2021 Toyota Research Institute. All rights reserved. +# Adapted from AdelaiDet: +# https://github.com/aim-uofa/AdelaiDet/blob/master/adet/layers/iou_loss.py +import torch +from torch import nn + + +class IOULoss(nn.Module): + """ + Intersetion Over Union (IoU) loss which supports three + different IoU computations: + + * IoU + * Linear IoU + * gIoU + """ + def __init__(self, loc_loss_type='iou'): + super(IOULoss, self).__init__() + self.loc_loss_type = loc_loss_type + + def forward(self, pred, target, weight=None): + """ + Args: + pred: Nx4 predicted bounding boxes + target: Nx4 target bounding boxes + weight: N loss weight for each instance + """ + pred_left = pred[:, 0] + pred_top = pred[:, 1] + pred_right = pred[:, 2] + pred_bottom = pred[:, 3] + + target_left = target[:, 0] + target_top = target[:, 1] + target_right = target[:, 2] + target_bottom = target[:, 3] + + target_aera = (target_left + target_right) * \ + (target_top + target_bottom) + pred_aera = (pred_left + pred_right) * \ + (pred_top + pred_bottom) + + w_intersect = torch.min(pred_left, target_left) + \ + torch.min(pred_right, target_right) + h_intersect = torch.min(pred_bottom, target_bottom) + \ + torch.min(pred_top, target_top) + + g_w_intersect = torch.max(pred_left, target_left) + \ + torch.max(pred_right, target_right) + g_h_intersect = torch.max(pred_bottom, target_bottom) + \ + torch.max(pred_top, target_top) + ac_uion = g_w_intersect * g_h_intersect + + area_intersect = w_intersect * h_intersect + area_union = target_aera + pred_aera - area_intersect + + ious = (area_intersect + 1.0) / (area_union + 1.0) + gious = ious - (ac_uion - area_union) / ac_uion + if self.loc_loss_type == 'iou': + losses = -torch.log(ious) + elif self.loc_loss_type == 'linear_iou': + losses = 1 - ious + elif self.loc_loss_type == 'giou': + losses = 1 - gious + else: + raise NotImplementedError + + if weight is not None: + return (losses * weight).sum() + else: + return losses.sum() diff --git a/adzoo/bevformer/mmdet3d_plugin/dd3d/layers/normalization.py b/adzoo/bevformer/mmdet3d_plugin/dd3d/layers/normalization.py new file mode 100644 index 0000000..bed7c63 --- /dev/null +++ b/adzoo/bevformer/mmdet3d_plugin/dd3d/layers/normalization.py @@ -0,0 +1,40 @@ +# Copyright 2021 Toyota Research Institute. All rights reserved. +# Adapted from AdelaiDet +# https://github.com/aim-uofa/AdelaiDet/ +import logging + +import torch +from torch import nn + +LOG = logging.getLogger(__name__) + + +class Scale(nn.Module): + def __init__(self, init_value=1.0): + super(Scale, self).__init__() + self.scale = nn.Parameter(torch.FloatTensor([init_value])) + + def forward(self, input): + return input * self.scale + + +class Offset(nn.Module): + def __init__(self, init_value=0.): + super(Offset, self).__init__() + self.bias = nn.Parameter(torch.FloatTensor([init_value])) + + def forward(self, input): + return input + self.bias + + +class ModuleListDial(nn.ModuleList): + def __init__(self, modules=None): + super(ModuleListDial, self).__init__(modules) + self.cur_position = 0 + + def forward(self, x): + result = self[self.cur_position](x) + self.cur_position += 1 + if self.cur_position >= len(self): + self.cur_position = 0 + return result diff --git a/adzoo/bevformer/mmdet3d_plugin/dd3d/layers/smooth_l1_loss.py b/adzoo/bevformer/mmdet3d_plugin/dd3d/layers/smooth_l1_loss.py new file mode 100644 index 0000000..b5448d0 --- /dev/null +++ b/adzoo/bevformer/mmdet3d_plugin/dd3d/layers/smooth_l1_loss.py @@ -0,0 +1,80 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +# Copyright 2021 Toyota Research Institute. All rights reserved. +# Adapted from fvcore: +# https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/smooth_l1_loss.py + +import torch + + +def smooth_l1_loss(input: torch.Tensor, target: torch.Tensor, beta: float, reduction: str = "none") -> torch.Tensor: + """ + Smooth L1 loss defined in the Fast R-CNN paper as: + + | 0.5 * x ** 2 / beta if abs(x) < beta + smoothl1(x) = | + | abs(x) - 0.5 * beta otherwise, + + where x = input - target. + + Smooth L1 loss is related to Huber loss, which is defined as: + + | 0.5 * x ** 2 if abs(x) < beta + huber(x) = | + | beta * (abs(x) - 0.5 * beta) otherwise + + Smooth L1 loss is equal to huber(x) / beta. This leads to the following + differences: + + - As beta -> 0, Smooth L1 loss converges to L1 loss, while Huber loss + converges to a constant 0 loss. + - As beta -> +inf, Smooth L1 converges to a constant 0 loss, while Huber loss + converges to L2 loss. + - For Smooth L1 loss, as beta varies, the L1 segment of the loss has a constant + slope of 1. For Huber loss, the slope of the L1 segment is beta. + + Smooth L1 loss can be seen as exactly L1 loss, but with the abs(x) < beta + portion replaced with a quadratic function such that at abs(x) = beta, its + slope is 1. The quadratic segment smooths the L1 loss near x = 0. + + Args: + input (Tensor): input tensor of any shape + target (Tensor): target value tensor with the same shape as input + beta (float): L1 to L2 change point. + For beta values < 1e-5, L1 loss is computed. + reduction: 'none' | 'mean' | 'sum' + 'none': No reduction will be applied to the output. + 'mean': The output will be averaged. + 'sum': The output will be summed. + + Returns: + The loss with the reduction option applied. + + Note: + PyTorch's builtin "Smooth L1 loss" implementation does not actually + implement Smooth L1 loss, nor does it implement Huber loss. It implements + the special case of both in which they are equal (beta=1). + See: https://pytorch.org/docs/stable/nn.html#torch.nn.SmoothL1Loss. + """ + # (dennis.park) Make it work with mixed precision training. + beta = torch.as_tensor(beta).to(input.dtype) + if beta < 1e-5: + # if beta == 0, then torch.where will result in nan gradients when + # the chain rule is applied due to pytorch implementation details + # (the False branch "0.5 * n ** 2 / 0" has an incoming gradient of + # zeros, rather than "no gradient"). To avoid this issue, we define + # small values of beta to be exactly l1 loss. + loss = torch.abs(input - target) + else: + n = torch.abs(input - target) + cond = n < beta + a = 0.5 * n**2 + b = n - 0.5 * beta + a, b = a.to(input.dtype), b.to(input.dtype) + loss = torch.where(cond, a, b) + # loss = torch.where(cond, 0.5 * n ** 2 / beta, n - 0.5 * beta) + + if reduction == "mean": + loss = loss.mean() + elif reduction == "sum": + loss = loss.sum() + return loss diff --git a/adzoo/bevformer/mmdet3d_plugin/dd3d/modeling/__init__.py b/adzoo/bevformer/mmdet3d_plugin/dd3d/modeling/__init__.py new file mode 100644 index 0000000..dd76a61 --- /dev/null +++ b/adzoo/bevformer/mmdet3d_plugin/dd3d/modeling/__init__.py @@ -0,0 +1 @@ +from .nuscenes_dd3d import NuscenesDD3D \ No newline at end of file diff --git a/adzoo/bevformer/mmdet3d_plugin/dd3d/modeling/core.py b/adzoo/bevformer/mmdet3d_plugin/dd3d/modeling/core.py new file mode 100644 index 0000000..4830248 --- /dev/null +++ b/adzoo/bevformer/mmdet3d_plugin/dd3d/modeling/core.py @@ -0,0 +1,217 @@ +# Copyright 2021 Toyota Research Institute. All rights reserved. +import torch +from torch import nn + +#from detectron2.modeling.meta_arch.build import META_ARCH_REGISTRY +from mmcv.modeling.postprocessing import detector_postprocess as resize_instances +from mmcv.structures import Instances +from mmcv.layers import ShapeSpec +from mmcv.utils import force_fp32 + +from .fcos2d import FCOS2DHead, FCOS2DInference, FCOS2DLoss +from .fcos3d import FCOS3DHead, FCOS3DInference, FCOS3DLoss +#from tridet.modeling.dd3d.postprocessing import nuscenes_sample_aggregate +from .prepare_targets import DD3DTargetPreparer +#from tridet.modeling.feature_extractor import build_feature_extractor +from ..structures.image_list import ImageList +from adzoo.bevformer.mmdet3d_plugin.dd3d.utils.tensor2d import compute_features_locations as compute_locations_per_level + + +#@META_ARCH_REGISTRY.register() +class DD3D(nn.Module): + def __init__(self, + num_classes, + in_channels, + strides, + fcos2d_cfg=dict(), + fcos2d_loss_cfg=dict(), + fcos3d_cfg=dict(), + fcos3d_loss_cfg=dict(), + target_assign_cfg=dict(), + box3d_on=True, + feature_locations_offset="none"): + super().__init__() + # NOTE: do not need backbone + # self.backbone = build_feature_extractor(cfg) + # backbone_output_shape = self.backbone.output_shape() + # self.in_features = cfg.DD3D.IN_FEATURES or list(backbone_output_shape.keys()) + + self.backbone_output_shape = [ShapeSpec(channels=in_channels, stride=s) for s in strides] + + self.feature_locations_offset = feature_locations_offset + + self.fcos2d_head = FCOS2DHead(num_classes=num_classes, input_shape=self.backbone_output_shape, + **fcos2d_cfg) + self.fcos2d_loss = FCOS2DLoss(num_classes=num_classes, **fcos2d_loss_cfg) + # NOTE: inference later + # self.fcos2d_inference = FCOS2DInference(cfg) + + if box3d_on: + self.fcos3d_head = FCOS3DHead(num_classes=num_classes, input_shape=self.backbone_output_shape, + **fcos3d_cfg) + self.fcos3d_loss = FCOS3DLoss(num_classes=num_classes, **fcos3d_loss_cfg) + # NOTE: inference later + # self.fcos3d_inference = FCOS3DInference(cfg) + self.only_box2d = False + else: + self.only_box2d = True + + self.prepare_targets = DD3DTargetPreparer(num_classes=num_classes, + input_shape=self.backbone_output_shape, + box3d_on=box3d_on, + **target_assign_cfg) + + # NOTE: inference later + # self.postprocess_in_inference = cfg.DD3D.INFERENCE.DO_POSTPROCESS + + # self.do_nms = cfg.DD3D.INFERENCE.DO_NMS + # self.do_bev_nms = cfg.DD3D.INFERENCE.DO_BEV_NMS + # self.bev_nms_iou_thresh = cfg.DD3D.INFERENCE.BEV_NMS_IOU_THRESH + + # nuScenes inference aggregates detections over all 6 cameras. + # self.nusc_sample_aggregate_in_inference = cfg.DD3D.INFERENCE.NUSC_SAMPLE_AGGREGATE + self.num_classes = num_classes + + # NOTE: do not need normalize + # self.register_buffer("pixel_mean", torch.Tensor(cfg.MODEL.PIXEL_MEAN).view(-1, 1, 1)) + # self.register_buffer("pixel_std", torch.Tensor(cfg.MODEL.PIXEL_STD).view(-1, 1, 1)) + + # NOTE: + # @property + # def device(self): + # return self.pixel_mean.device + + # def preprocess_image(self, x): + # return (x - self.pixel_mean) / self.pixel_std + + @force_fp32(apply_to=('features')) + def forward(self, features, batched_inputs): + # NOTE: + # images = [x["image"].to(self.device) for x in batched_inputs] + # images = [self.preprocess_image(x) for x in images] + + # NOTE: directly use inv_intrinsics + # if 'intrinsics' in batched_inputs[0]: + # intrinsics = [x['intrinsics'].to(self.device) for x in batched_inputs] + # else: + # intrinsics = None + # images = ImageList.from_tensors(images, self.backbone.size_divisibility, intrinsics=intrinsics) + if 'inv_intrinsics' in batched_inputs[0]: + inv_intrinsics = [x['inv_intrinsics'].to(features[0].device) for x in batched_inputs] + inv_intrinsics = torch.stack(inv_intrinsics, dim=0) + else: + inv_intrinsics = None + + # NOTE: + # gt_dense_depth = None + # if 'depth' in batched_inputs[0]: + # gt_dense_depth = [x["depth"].to(self.device) for x in batched_inputs] + # gt_dense_depth = ImageList.from_tensors( + # gt_dense_depth, self.backbone.size_divisibility, intrinsics=intrinsics + # ) + + # NOTE: directly input feature + # features = self.backbone(images.tensor) + # features = [features[f] for f in self.in_features] + + if "instances" in batched_inputs[0]: + gt_instances = [x["instances"].to(features[0].device) for x in batched_inputs] + else: + gt_instances = None + + locations = self.compute_locations(features) + logits, box2d_reg, centerness, _ = self.fcos2d_head(features) + if not self.only_box2d: + box3d_quat, box3d_ctr, box3d_depth, box3d_size, box3d_conf, dense_depth = self.fcos3d_head(features) + # NOTE: directly use inv_intrinsics + # inv_intrinsics = images.intrinsics.inverse() if images.intrinsics is not None else None + + if self.training: + assert gt_instances is not None + feature_shapes = [x.shape[-2:] for x in features] + training_targets = self.prepare_targets(locations, gt_instances, feature_shapes) + # NOTE: + # if gt_dense_depth is not None: + # training_targets.update({"dense_depth": gt_dense_depth}) + + losses = {} + fcos2d_loss, fcos2d_info = self.fcos2d_loss(logits, box2d_reg, centerness, training_targets) + losses.update(fcos2d_loss) + + if not self.only_box2d: + fcos3d_loss = self.fcos3d_loss( + box3d_quat, box3d_ctr, box3d_depth, box3d_size, box3d_conf, dense_depth, inv_intrinsics, + fcos2d_info, training_targets + ) + losses.update(fcos3d_loss) + return losses + else: + # TODO: do not support inference now + raise NotImplementedError + + pred_instances, fcos2d_info = self.fcos2d_inference( + logits, box2d_reg, centerness, locations, images.image_sizes + ) + if not self.only_box2d: + # This adds 'pred_boxes3d' and 'scores_3d' to Instances in 'pred_instances' in place. + self.fcos3d_inference( + box3d_quat, box3d_ctr, box3d_depth, box3d_size, box3d_conf, inv_intrinsics, pred_instances, + fcos2d_info + ) + + # 3D score == 2D score x confidence. + score_key = "scores_3d" + else: + score_key = "scores" + + # Transpose to "image-first", i.e. (B, L) + pred_instances = list(zip(*pred_instances)) + pred_instances = [Instances.cat(instances) for instances in pred_instances] + + # 2D NMS and pick top-K. + if self.do_nms: + pred_instances = self.fcos2d_inference.nms_and_top_k(pred_instances, score_key) + + if not self.only_box2d and self.do_bev_nms: + # Bird-eye-view NMS. + dummy_group_idxs = {i: [i] for i, _ in enumerate(pred_instances)} + if 'pose' in batched_inputs[0]: + poses = [x['pose'] for x in batched_inputs] + else: + poses = [x['extrinsics'] for x in batched_inputs] + pred_instances = nuscenes_sample_aggregate( + pred_instances, + dummy_group_idxs, + self.num_classes, + poses, + iou_threshold=self.bev_nms_iou_thresh, + include_boxes3d_global=False + ) + + if self.postprocess_in_inference: + processed_results = [] + for results_per_image, input_per_image, image_size in \ + zip(pred_instances, batched_inputs, images.image_sizes): + height = input_per_image.get("height", image_size[0]) + width = input_per_image.get("width", image_size[1]) + r = resize_instances(results_per_image, height, width) + processed_results.append({"instances": r}) + else: + processed_results = [{"instances": x} for x in pred_instances] + + return processed_results + + def compute_locations(self, features): + locations = [] + in_strides = [x.stride for x in self.backbone_output_shape] + for level, feature in enumerate(features): + h, w = feature.size()[-2:] + locations_per_level = compute_locations_per_level( + h, w, in_strides[level], feature.dtype, feature.device, offset=self.feature_locations_offset + ) + locations.append(locations_per_level) + return locations + + def forward_train(self, features, batched_inputs): + self.train() + return self.forward(features, batched_inputs) \ No newline at end of file diff --git a/adzoo/bevformer/mmdet3d_plugin/dd3d/modeling/disentangled_box3d_loss.py b/adzoo/bevformer/mmdet3d_plugin/dd3d/modeling/disentangled_box3d_loss.py new file mode 100644 index 0000000..5cdaf0f --- /dev/null +++ b/adzoo/bevformer/mmdet3d_plugin/dd3d/modeling/disentangled_box3d_loss.py @@ -0,0 +1,46 @@ +# Copyright 2021 Toyota Research Institute. All rights reserved. +import logging + +import torch +import torch.nn as nn + +from adzoo.bevformer.mmdet3d_plugin.dd3d.layers.smooth_l1_loss import smooth_l1_loss + +LOG = logging.getLogger(__name__) + + +class DisentangledBox3DLoss(nn.Module): + def __init__(self, smooth_l1_loss_beta, max_loss_per_group): + super().__init__() + self.smooth_l1_loss_beta = smooth_l1_loss_beta + self.max_loss_per_group = max_loss_per_group + + def forward(self, box3d_pred, box3d_targets, locations, weights=None): + + box3d_pred = box3d_pred.to(torch.float32) + box3d_targets = box3d_targets.to(torch.float32) + + target_corners = box3d_targets.corners + + disentangled_losses = {} + for component_key in ["quat", "proj_ctr", "depth", "size"]: + disentangled_boxes = box3d_targets.clone() + setattr(disentangled_boxes, component_key, getattr(box3d_pred, component_key)) + pred_corners = disentangled_boxes.to(torch.float32).corners + + loss = smooth_l1_loss(pred_corners, target_corners, beta=self.smooth_l1_loss_beta) + + # Bound the loss + loss.clamp(max=self.max_loss_per_group) + + if weights is not None: + # loss = torch.sum(loss.reshape(-1, 24) * weights.unsqueeze(-1)) + loss = torch.sum(loss.reshape(-1, 24).mean(dim=1) * weights) + else: + loss = loss.reshape(-1, 24).mean() + + disentangled_losses["loss_box3d_" + component_key] = loss + + entangled_l1_dist = (target_corners - box3d_pred.corners).detach().abs().reshape(-1, 24).mean(dim=1) + + return disentangled_losses, entangled_l1_dist diff --git a/adzoo/bevformer/mmdet3d_plugin/dd3d/modeling/fcos2d.py b/adzoo/bevformer/mmdet3d_plugin/dd3d/modeling/fcos2d.py new file mode 100644 index 0000000..c9c6c08 --- /dev/null +++ b/adzoo/bevformer/mmdet3d_plugin/dd3d/modeling/fcos2d.py @@ -0,0 +1,388 @@ +# Copyright 2021 Toyota Research Institute. All rights reserved. +# Adapted from AdelaiDet: +# https://github.com/aim-uofa/AdelaiDet +import torch +from mmcv.losses import sigmoid_focal_loss +from torch import nn +from torch.nn import functional as F + +from mmcv.layers import batched_nms, get_norm +from mmcv.structures import Instances, Boxes +from torch import distributed as dist +from mmcv.utils import force_fp32 +from mmcv.layers import Conv2d, batched_nms, cat, get_norm + +from adzoo.bevformer.mmdet3d_plugin.dd3d.layers.iou_loss import IOULoss +from adzoo.bevformer.mmdet3d_plugin.dd3d.layers.normalization import ModuleListDial, Scale +from adzoo.bevformer.mmdet3d_plugin.dd3d.utils.comm import reduce_sum + +INF = 100000000 + +def get_world_size() -> int: + if not dist.is_available(): + return 1 + if not dist.is_initialized(): + return 1 + return dist.get_world_size() + +def compute_ctrness_targets(reg_targets): + if len(reg_targets) == 0: + return reg_targets.new_zeros(len(reg_targets)) + left_right = reg_targets[:, [0, 2]] + top_bottom = reg_targets[:, [1, 3]] + ctrness = (left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * \ + (top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0]) + return torch.sqrt(ctrness) + +class FCOS2DHead(nn.Module): + def __init__(self, + num_classes, + input_shape, + num_cls_convs=4, + num_box_convs=4, + norm='BN', + use_deformable=False, + use_scale=True, + box2d_scale_init_factor=1.0, + version='v2'): + super().__init__() + + self.num_classes = num_classes + self.in_strides = [shape.stride for shape in input_shape] + self.num_levels = len(input_shape) + + self.use_scale = use_scale + self.box2d_scale_init_factor = box2d_scale_init_factor + + self._version = version + + in_channels = [s.channels for s in input_shape] + assert len(set(in_channels)) == 1, "Each level must have the same channel!" + in_channels = in_channels[0] + + if use_deformable: + raise ValueError("Not supported yet.") + + head_configs = {'cls': num_cls_convs, 'box2d': num_box_convs} + + for head_name, num_convs in head_configs.items(): + tower = [] + if self._version == "v1": + for _ in range(num_convs): + conv_func = nn.Conv2d + tower.append(conv_func(in_channels, in_channels, kernel_size=3, stride=1, padding=1, bias=True)) + if norm == "GN": + raise NotImplementedError() + elif norm == "NaiveGN": + raise NotImplementedError() + elif norm == "BN": + tower.append(ModuleListDial([nn.BatchNorm2d(in_channels) for _ in range(self.num_levels)])) + elif norm == "SyncBN": + raise NotImplementedError() + tower.append(nn.ReLU()) + elif self._version == "v2": + for _ in range(num_convs): + if norm in ("BN", "FrozenBN", "SyncBN", "GN"): + # NOTE: need to add norm here! + # Each FPN level has its own batchnorm layer. + # NOTE: do not use dd3d train.py! + # "BN" is converted to "SyncBN" in distributed training (see train.py) + norm_layer = ModuleListDial([get_norm(norm, in_channels) for _ in range(self.num_levels)]) + else: + norm_layer = get_norm(norm, in_channels) + tower.append( + Conv2d( + in_channels, + in_channels, + kernel_size=3, + stride=1, + padding=1, + bias=norm_layer is None, + norm=norm_layer, + activation=F.relu + ) + ) + else: + raise ValueError(f"Invalid FCOS2D version: {self._version}") + self.add_module(f'{head_name}_tower', nn.Sequential(*tower)) + + self.cls_logits = nn.Conv2d(in_channels, self.num_classes, kernel_size=3, stride=1, padding=1) + self.box2d_reg = nn.Conv2d(in_channels, 4, kernel_size=3, stride=1, padding=1) + self.centerness = nn.Conv2d(in_channels, 1, kernel_size=3, stride=1, padding=1) + + if self.use_scale: + if self._version == "v1": + self.scales_reg = nn.ModuleList([ + Scale(init_value=stride * self.box2d_scale_init_factor) for stride in self.in_strides + ]) + else: + self.scales_box2d_reg = nn.ModuleList([ + Scale(init_value=stride * self.box2d_scale_init_factor) for stride in self.in_strides + ]) + + self.init_weights() + + def init_weights(self): + + for tower in [self.cls_tower, self.box2d_tower]: + for l in tower.modules(): + if isinstance(l, nn.Conv2d): + torch.nn.init.kaiming_normal_(l.weight, mode='fan_out', nonlinearity='relu') + if l.bias is not None: + torch.nn.init.constant_(l.bias, 0) + + predictors = [self.cls_logits, self.box2d_reg, self.centerness] + + for modules in predictors: + for l in modules.modules(): + if isinstance(l, nn.Conv2d): + torch.nn.init.kaiming_uniform_(l.weight, a=1) + if l.bias is not None: # depth head may not have bias. + torch.nn.init.constant_(l.bias, 0) + + def forward(self, x): + logits = [] + box2d_reg = [] + centerness = [] + + extra_output = {"cls_tower_out": []} + + for l, feature in enumerate(x): + cls_tower_out = self.cls_tower(feature) + bbox_tower_out = self.box2d_tower(feature) + + # 2D box + logits.append(self.cls_logits(cls_tower_out)) + centerness.append(self.centerness(bbox_tower_out)) + box_reg = self.box2d_reg(bbox_tower_out) + if self.use_scale: + # TODO: to optimize the runtime, apply this scaling in inference (and loss compute) only on FG pixels? + if self._version == "v1": + box_reg = self.scales_reg[l](box_reg) + else: + box_reg = self.scales_box2d_reg[l](box_reg) + # Note that we use relu, as in the improved FCOS, instead of exp. + box2d_reg.append(F.relu(box_reg)) + + extra_output['cls_tower_out'].append(cls_tower_out) + + return logits, box2d_reg, centerness, extra_output + + +class FCOS2DLoss(nn.Module): + def __init__(self, + num_classes, + focal_loss_alpha=0.25, + focal_loss_gamma=2.0, + loc_loss_type='giou', + ): + super().__init__() + self.focal_loss_alpha = focal_loss_alpha + self.focal_loss_gamma = focal_loss_gamma + + self.box2d_reg_loss_fn = IOULoss(loc_loss_type) + + self.num_classes = num_classes + + @force_fp32(apply_to=('logits', 'box2d_reg', 'centerness')) + def forward(self, logits, box2d_reg, centerness, targets): + labels = targets['labels'] + box2d_reg_targets = targets['box2d_reg_targets'] + pos_inds = targets["pos_inds"] + + if len(labels) != box2d_reg_targets.shape[0]: + raise ValueError( + f"The size of 'labels' and 'box2d_reg_targets' does not match: a={len(labels)}, b={box2d_reg_targets.shape[0]}" + ) + + # Flatten predictions + logits = cat([x.permute(0, 2, 3, 1).reshape(-1, self.num_classes) for x in logits]) + box2d_reg_pred = cat([x.permute(0, 2, 3, 1).reshape(-1, 4) for x in box2d_reg]) + centerness_pred = cat([x.permute(0, 2, 3, 1).reshape(-1) for x in centerness]) + + # ------------------- + # Classification loss + # ------------------- + num_pos_local = pos_inds.numel() + num_gpus = get_world_size() + total_num_pos = reduce_sum(pos_inds.new_tensor([num_pos_local])).item() + num_pos_avg = max(total_num_pos / num_gpus, 1.0) + + # prepare one_hot + cls_target = torch.zeros_like(logits) + cls_target[pos_inds, labels[pos_inds]] = 1 + + loss_cls = sigmoid_focal_loss( + logits, + cls_target, + alpha=self.focal_loss_alpha, + gamma=self.focal_loss_gamma, + reduction="sum", + ) / num_pos_avg + + # NOTE: The rest of losses only consider foreground pixels. + box2d_reg_pred = box2d_reg_pred[pos_inds] + box2d_reg_targets = box2d_reg_targets[pos_inds] + + centerness_pred = centerness_pred[pos_inds] + + # Compute centerness targets here using 2D regression targets of foreground pixels. + centerness_targets = compute_ctrness_targets(box2d_reg_targets) + + # Denominator for all foreground losses. + ctrness_targets_sum = centerness_targets.sum() + loss_denom = max(reduce_sum(ctrness_targets_sum).item() / num_gpus, 1e-6) + + # NOTE: change the return after reduce_sum + if pos_inds.numel() == 0: + losses = { + "loss_cls": loss_cls, + "loss_box2d_reg": box2d_reg_pred.sum() * 0., + "loss_centerness": centerness_pred.sum() * 0., + } + return losses, {} + + # ---------------------- + # 2D box regression loss + # ---------------------- + loss_box2d_reg = self.box2d_reg_loss_fn(box2d_reg_pred, box2d_reg_targets, centerness_targets) / loss_denom + + # --------------- + # Centerness loss + # --------------- + loss_centerness = F.binary_cross_entropy_with_logits( + centerness_pred, centerness_targets, reduction="sum" + ) / num_pos_avg + + loss_dict = {"loss_cls": loss_cls, "loss_box2d_reg": loss_box2d_reg, "loss_centerness": loss_centerness} + extra_info = {"loss_denom": loss_denom, "centerness_targets": centerness_targets} + + return loss_dict, extra_info + + +class FCOS2DInference(): + def __init__(self, cfg): + self.thresh_with_ctr = cfg.DD3D.FCOS2D.INFERENCE.THRESH_WITH_CTR + self.pre_nms_thresh = cfg.DD3D.FCOS2D.INFERENCE.PRE_NMS_THRESH + self.pre_nms_topk = cfg.DD3D.FCOS2D.INFERENCE.PRE_NMS_TOPK + self.post_nms_topk = cfg.DD3D.FCOS2D.INFERENCE.POST_NMS_TOPK + self.nms_thresh = cfg.DD3D.FCOS2D.INFERENCE.NMS_THRESH + self.num_classes = cfg.DD3D.NUM_CLASSES + + def __call__(self, logits, box2d_reg, centerness, locations, image_sizes): + + pred_instances = [] # List[List[Instances]], shape = (L, B) + extra_info = [] + for lvl, (logits_lvl, box2d_reg_lvl, centerness_lvl, locations_lvl) in \ + enumerate(zip(logits, box2d_reg, centerness, locations)): + + instances_per_lvl, extra_info_per_lvl = self.forward_for_single_feature_map( + logits_lvl, box2d_reg_lvl, centerness_lvl, locations_lvl, image_sizes + ) # List of Instances; one for each image. + + for instances_per_im in instances_per_lvl: + instances_per_im.fpn_levels = locations_lvl.new_ones(len(instances_per_im), dtype=torch.long) * lvl + + pred_instances.append(instances_per_lvl) + extra_info.append(extra_info_per_lvl) + + return pred_instances, extra_info + + def forward_for_single_feature_map(self, logits, box2d_reg, centerness, locations, image_sizes): + N, C, _, __ = logits.shape + + # put in the same format as locations + scores = logits.permute(0, 2, 3, 1).reshape(N, -1, C).sigmoid() + box2d_reg = box2d_reg.permute(0, 2, 3, 1).reshape(N, -1, 4) + centerness = centerness.permute(0, 2, 3, 1).reshape(N, -1).sigmoid() + + # if self.thresh_with_ctr is True, we multiply the classification + # scores with centerness scores before applying the threshold. + if self.thresh_with_ctr: + scores = scores * centerness[:, :, None] + + candidate_mask = scores > self.pre_nms_thresh + + pre_nms_topk = candidate_mask.reshape(N, -1).sum(1) + pre_nms_topk = pre_nms_topk.clamp(max=self.pre_nms_topk) + + if not self.thresh_with_ctr: + scores = scores * centerness[:, :, None] + + results = [] + all_fg_inds_per_im, all_topk_indices, all_class_inds_per_im = [], [], [] + for i in range(N): + scores_per_im = scores[i] + candidate_mask_per_im = candidate_mask[i] + scores_per_im = scores_per_im[candidate_mask_per_im] + + candidate_inds_per_im = candidate_mask_per_im.nonzero(as_tuple=False) + fg_inds_per_im = candidate_inds_per_im[:, 0] + class_inds_per_im = candidate_inds_per_im[:, 1] + + # Cache info here. + all_fg_inds_per_im.append(fg_inds_per_im) + all_class_inds_per_im.append(class_inds_per_im) + + box2d_reg_per_im = box2d_reg[i][fg_inds_per_im] + locations_per_im = locations[fg_inds_per_im] + + pre_nms_topk_per_im = pre_nms_topk[i] + + if candidate_mask_per_im.sum().item() > pre_nms_topk_per_im.item(): + scores_per_im, topk_indices = \ + scores_per_im.topk(pre_nms_topk_per_im, sorted=False) + + class_inds_per_im = class_inds_per_im[topk_indices] + box2d_reg_per_im = box2d_reg_per_im[topk_indices] + locations_per_im = locations_per_im[topk_indices] + else: + topk_indices = None + + all_topk_indices.append(topk_indices) + + detections = torch.stack([ + locations_per_im[:, 0] - box2d_reg_per_im[:, 0], + locations_per_im[:, 1] - box2d_reg_per_im[:, 1], + locations_per_im[:, 0] + box2d_reg_per_im[:, 2], + locations_per_im[:, 1] + box2d_reg_per_im[:, 3], + ], + dim=1) + + instances = Instances(image_sizes[i]) + instances.pred_boxes = Boxes(detections) + instances.scores = torch.sqrt(scores_per_im) + instances.pred_classes = class_inds_per_im + instances.locations = locations_per_im + + results.append(instances) + + extra_info = { + "fg_inds_per_im": all_fg_inds_per_im, + "class_inds_per_im": all_class_inds_per_im, + "topk_indices": all_topk_indices + } + return results, extra_info + + def nms_and_top_k(self, instances_per_im, score_key_for_nms="scores"): + results = [] + for instances in instances_per_im: + if self.nms_thresh > 0: + # Multiclass NMS. + keep = batched_nms( + instances.pred_boxes.tensor, instances.get(score_key_for_nms), instances.pred_classes, + self.nms_thresh + ) + instances = instances[keep] + num_detections = len(instances) + + # Limit to max_per_image detections **over all classes** + if num_detections > self.post_nms_topk > 0: + scores = instances.scores + # image_thresh, _ = torch.kthvalue(scores.cpu(), num_detections - self.post_nms_topk + 1) + image_thresh, _ = torch.kthvalue(scores, num_detections - self.post_nms_topk + 1) + keep = scores >= image_thresh.item() + keep = torch.nonzero(keep).squeeze(1) + instances = instances[keep] + results.append(instances) + return results diff --git a/adzoo/bevformer/mmdet3d_plugin/dd3d/modeling/fcos3d.py b/adzoo/bevformer/mmdet3d_plugin/dd3d/modeling/fcos3d.py new file mode 100644 index 0000000..f0669a6 --- /dev/null +++ b/adzoo/bevformer/mmdet3d_plugin/dd3d/modeling/fcos3d.py @@ -0,0 +1,427 @@ +# Copyright 2021 Toyota Research Institute. All rights reserved. +import torch +import torch.nn.functional as F +from torch import nn + +from mmcv.layers import Conv2d, batched_nms, cat, get_norm +from mmcv.utils import force_fp32 + +from .disentangled_box3d_loss import DisentangledBox3DLoss +from adzoo.bevformer.mmdet3d_plugin.dd3d.layers.normalization import ModuleListDial, Offset, Scale +from adzoo.bevformer.mmdet3d_plugin.dd3d.structures.boxes3d import Boxes3D +from adzoo.bevformer.mmdet3d_plugin.dd3d.utils.geometry import allocentric_to_egocentric, unproject_points2d + +EPS = 1e-7 + + +def predictions_to_boxes3d( + quat, + proj_ctr, + depth, + size, + locations, + inv_intrinsics, + canon_box_sizes, + min_depth, + max_depth, + scale_depth_by_focal_lengths_factor, + scale_depth_by_focal_lengths=True, + quat_is_allocentric=True, + depth_is_distance=False +): + # Normalize to make quat unit norm. + quat = quat / quat.norm(dim=1, keepdim=True).clamp(min=EPS) + # Make sure again it's numerically unit-norm. + quat = quat / quat.norm(dim=1, keepdim=True) + + if scale_depth_by_focal_lengths: + pixel_size = torch.norm(torch.stack([inv_intrinsics[:, 0, 0], inv_intrinsics[:, 1, 1]], dim=-1), dim=-1) + depth = depth / (pixel_size * scale_depth_by_focal_lengths_factor) + + if depth_is_distance: + depth = depth / unproject_points2d(locations, inv_intrinsics).norm(dim=1).clamp(min=EPS) + + depth = depth.reshape(-1, 1).clamp(min_depth, max_depth) + + proj_ctr = proj_ctr + locations + + if quat_is_allocentric: + quat = allocentric_to_egocentric(quat, proj_ctr, inv_intrinsics) + + size = (size.tanh() + 1.) * canon_box_sizes # max size = 2 * canon_size + + return Boxes3D(quat, proj_ctr, depth, size, inv_intrinsics) + + +class FCOS3DHead(nn.Module): + def __init__(self, + num_classes, + input_shape, + num_convs=4, + norm='BN', + use_scale=True, + depth_scale_init_factor=0.3, + proj_ctr_scale_init_factor=1.0, + use_per_level_predictors=False, + class_agnostic=False, + use_deformable=False, + mean_depth_per_level=None, + std_depth_per_level=None, + ): + super().__init__() + self.num_classes = num_classes + self.in_strides = [shape.stride for shape in input_shape] + self.num_levels = len(input_shape) + + self.use_scale = use_scale + self.depth_scale_init_factor = depth_scale_init_factor + self.proj_ctr_scale_init_factor = proj_ctr_scale_init_factor + self.use_per_level_predictors = use_per_level_predictors + + self.register_buffer("mean_depth_per_level", torch.Tensor(mean_depth_per_level)) + self.register_buffer("std_depth_per_level", torch.Tensor(std_depth_per_level)) + + in_channels = [s.channels for s in input_shape] + assert len(set(in_channels)) == 1, "Each level must have the same channel!" + in_channels = in_channels[0] + + if use_deformable: + raise ValueError("Not supported yet.") + + box3d_tower = [] + for i in range(num_convs): + if norm in ("BN", "FrozenBN", "SyncBN", "GN"): + # NOTE: need to add norm here! + # Each FPN level has its own batchnorm layer. + # NOTE: do not use dd3d train.py! + # "BN" is converted to "SyncBN" in distributed training (see train.py) + norm_layer = ModuleListDial([get_norm(norm, in_channels) for _ in range(self.num_levels)]) + else: + norm_layer = get_norm(norm, in_channels) + box3d_tower.append( + Conv2d( + in_channels, + in_channels, + kernel_size=3, + stride=1, + padding=1, + bias=norm_layer is None, + norm=norm_layer, + activation=F.relu + ) + ) + self.add_module('box3d_tower', nn.Sequential(*box3d_tower)) + + num_classes = self.num_classes if not class_agnostic else 1 + num_levels = self.num_levels if use_per_level_predictors else 1 + + # 3D box branches. + self.box3d_quat = nn.ModuleList([ + Conv2d(in_channels, 4 * num_classes, kernel_size=3, stride=1, padding=1, bias=True) + for _ in range(num_levels) + ]) + self.box3d_ctr = nn.ModuleList([ + Conv2d(in_channels, 2 * num_classes, kernel_size=3, stride=1, padding=1, bias=True) + for _ in range(num_levels) + ]) + self.box3d_depth = nn.ModuleList([ + Conv2d(in_channels, 1 * num_classes, kernel_size=3, stride=1, padding=1, bias=(not self.use_scale)) + for _ in range(num_levels) + ]) + self.box3d_size = nn.ModuleList([ + Conv2d(in_channels, 3 * num_classes, kernel_size=3, stride=1, padding=1, bias=True) + for _ in range(num_levels) + ]) + self.box3d_conf = nn.ModuleList([ + Conv2d(in_channels, 1 * num_classes, kernel_size=3, stride=1, padding=1, bias=True) + for _ in range(num_levels) + ]) + + if self.use_scale: + self.scales_proj_ctr = nn.ModuleList([ + Scale(init_value=stride * self.proj_ctr_scale_init_factor) for stride in self.in_strides + ]) + # (pre-)compute (mean, std) of depth for each level, and determine the init value here. + self.scales_size = nn.ModuleList([Scale(init_value=1.0) for _ in range(self.num_levels)]) + self.scales_conf = nn.ModuleList([Scale(init_value=1.0) for _ in range(self.num_levels)]) + + self.scales_depth = nn.ModuleList([ + Scale(init_value=sigma * self.depth_scale_init_factor) for sigma in self.std_depth_per_level + ]) + self.offsets_depth = nn.ModuleList([Offset(init_value=b) for b in self.mean_depth_per_level]) + + self._init_weights() + + def _init_weights(self): + + for l in self.box3d_tower.modules(): + if isinstance(l, nn.Conv2d): + torch.nn.init.kaiming_normal_(l.weight, mode='fan_out', nonlinearity='relu') + if l.bias is not None: + torch.nn.init.constant_(l.bias, 0) + + predictors = [self.box3d_quat, self.box3d_ctr, self.box3d_depth, self.box3d_size, self.box3d_conf] + + for modules in predictors: + for l in modules.modules(): + if isinstance(l, nn.Conv2d): + torch.nn.init.kaiming_uniform_(l.weight, a=1) + if l.bias is not None: # depth head may not have bias. + torch.nn.init.constant_(l.bias, 0) + + def forward(self, x): + box3d_quat, box3d_ctr, box3d_depth, box3d_size, box3d_conf = [], [], [], [], [] + dense_depth = None + for l, features in enumerate(x): + box3d_tower_out = self.box3d_tower(features) + + _l = l if self.use_per_level_predictors else 0 + + # 3D box + quat = self.box3d_quat[_l](box3d_tower_out) + proj_ctr = self.box3d_ctr[_l](box3d_tower_out) + depth = self.box3d_depth[_l](box3d_tower_out) + size3d = self.box3d_size[_l](box3d_tower_out) + conf3d = self.box3d_conf[_l](box3d_tower_out) + + if self.use_scale: + # TODO: to optimize the runtime, apply this scaling in inference (and loss compute) only on FG pixels? + proj_ctr = self.scales_proj_ctr[l](proj_ctr) + size3d = self.scales_size[l](size3d) + conf3d = self.scales_conf[l](conf3d) + depth = self.offsets_depth[l](self.scales_depth[l](depth)) + + box3d_quat.append(quat) + box3d_ctr.append(proj_ctr) + box3d_depth.append(depth) + box3d_size.append(size3d) + box3d_conf.append(conf3d) + + return box3d_quat, box3d_ctr, box3d_depth, box3d_size, box3d_conf, dense_depth + + +class FCOS3DLoss(nn.Module): + def __init__(self, + num_classes, + min_depth=0.1, + max_depth=80.0, + box3d_loss_weight=2.0, + conf3d_loss_weight=1.0, + conf_3d_temperature=1.0, + smooth_l1_loss_beta=0.05, + max_loss_per_group=20, + predict_allocentric_rot=True, + scale_depth_by_focal_lengths=True, + scale_depth_by_focal_lengths_factor=500.0, + class_agnostic=False, + predict_distance=False, + canon_box_sizes=None): + super().__init__() + self.canon_box_sizes = canon_box_sizes + self.min_depth = min_depth + self.max_depth = max_depth + self.predict_allocentric_rot = predict_allocentric_rot + self.scale_depth_by_focal_lengths = scale_depth_by_focal_lengths + self.scale_depth_by_focal_lengths_factor = scale_depth_by_focal_lengths_factor + self.predict_distance = predict_distance + + self.box3d_reg_loss_fn = DisentangledBox3DLoss(smooth_l1_loss_beta, max_loss_per_group) + self.box3d_loss_weight = box3d_loss_weight + self.conf3d_loss_weight = conf3d_loss_weight + self.conf_3d_temperature = conf_3d_temperature + + self.num_classes = num_classes + self.class_agnostic = class_agnostic + + @force_fp32(apply_to=('box3d_quat', 'box3d_ctr', 'box3d_depth', 'box3d_size','box3d_conf', 'inv_intrinsics')) + def forward( + self, box3d_quat, box3d_ctr, box3d_depth, box3d_size, box3d_conf, dense_depth, inv_intrinsics, fcos2d_info, + targets + ): + labels = targets['labels'] + box3d_targets = targets['box3d_targets'] + pos_inds = targets["pos_inds"] + + if pos_inds.numel() == 0: + losses = { + "loss_box3d_quat": torch.stack([x.sum() * 0. for x in box3d_quat]).sum(), + "loss_box3d_proj_ctr": torch.stack([x.sum() * 0. for x in box3d_ctr]).sum(), + "loss_box3d_depth": torch.stack([x.sum() * 0. for x in box3d_depth]).sum(), + "loss_box3d_size": torch.stack([x.sum() * 0. for x in box3d_size]).sum(), + "loss_conf3d": torch.stack([x.sum() * 0. for x in box3d_conf]).sum() + } + return losses + + if len(labels) != len(box3d_targets): + raise ValueError( + f"The size of 'labels' and 'box3d_targets' does not match: a={len(labels)}, b={len(box3d_targets)}" + ) + + num_classes = self.num_classes if not self.class_agnostic else 1 + + box3d_quat_pred = cat([x.permute(0, 2, 3, 1).reshape(-1, 4, num_classes) for x in box3d_quat]) + box3d_ctr_pred = cat([x.permute(0, 2, 3, 1).reshape(-1, 2, num_classes) for x in box3d_ctr]) + box3d_depth_pred = cat([x.permute(0, 2, 3, 1).reshape(-1, num_classes) for x in box3d_depth]) + box3d_size_pred = cat([x.permute(0, 2, 3, 1).reshape(-1, 3, num_classes) for x in box3d_size]) + box3d_conf_pred = cat([x.permute(0, 2, 3, 1).reshape(-1, num_classes) for x in box3d_conf]) + + # ---------------------- + # 3D box disentangled loss + # ---------------------- + box3d_targets = box3d_targets[pos_inds] + + box3d_quat_pred = box3d_quat_pred[pos_inds] + box3d_ctr_pred = box3d_ctr_pred[pos_inds] + box3d_depth_pred = box3d_depth_pred[pos_inds] + box3d_size_pred = box3d_size_pred[pos_inds] + box3d_conf_pred = box3d_conf_pred[pos_inds] + + if self.class_agnostic: + box3d_quat_pred = box3d_quat_pred.squeeze(-1) + box3d_ctr_pred = box3d_ctr_pred.squeeze(-1) + box3d_depth_pred = box3d_depth_pred.squeeze(-1) + box3d_size_pred = box3d_size_pred.squeeze(-1) + box3d_conf_pred = box3d_conf_pred.squeeze(-1) + else: + I = labels[pos_inds][..., None, None] + box3d_quat_pred = torch.gather(box3d_quat_pred, dim=2, index=I.repeat(1, 4, 1)).squeeze(-1) + box3d_ctr_pred = torch.gather(box3d_ctr_pred, dim=2, index=I.repeat(1, 2, 1)).squeeze(-1) + box3d_depth_pred = torch.gather(box3d_depth_pred, dim=1, index=I.squeeze(-1)).squeeze(-1) + box3d_size_pred = torch.gather(box3d_size_pred, dim=2, index=I.repeat(1, 3, 1)).squeeze(-1) + box3d_conf_pred = torch.gather(box3d_conf_pred, dim=1, index=I.squeeze(-1)).squeeze(-1) + + canon_box_sizes = box3d_quat_pred.new_tensor(self.canon_box_sizes)[labels[pos_inds]] + + locations = targets["locations"][pos_inds] + im_inds = targets["im_inds"][pos_inds] + inv_intrinsics = inv_intrinsics[im_inds] + + box3d_pred = predictions_to_boxes3d( + box3d_quat_pred, + box3d_ctr_pred, + box3d_depth_pred, + box3d_size_pred, + locations, + inv_intrinsics, + canon_box_sizes, + self.min_depth, + self.max_depth, + scale_depth_by_focal_lengths_factor=self.scale_depth_by_focal_lengths_factor, + scale_depth_by_focal_lengths=self.scale_depth_by_focal_lengths, + quat_is_allocentric=self.predict_allocentric_rot, + depth_is_distance=self.predict_distance + ) + + centerness_targets = fcos2d_info["centerness_targets"] + loss_denom = fcos2d_info["loss_denom"] + losses_box3d, box3d_l1_error = self.box3d_reg_loss_fn(box3d_pred, box3d_targets, locations, centerness_targets) + + losses_box3d = {k: self.box3d_loss_weight * v / loss_denom for k, v in losses_box3d.items()} + + conf_3d_targets = torch.exp(-1. / self.conf_3d_temperature * box3d_l1_error) + loss_conf3d = F.binary_cross_entropy_with_logits(box3d_conf_pred, conf_3d_targets, reduction='none') + loss_conf3d = self.conf3d_loss_weight * (loss_conf3d * centerness_targets).sum() / loss_denom + + losses = {"loss_conf3d": loss_conf3d, **losses_box3d} + + return losses + + +class FCOS3DInference(): + def __init__(self, cfg): + self.canon_box_sizes = cfg.DD3D.FCOS3D.CANONICAL_BOX3D_SIZES + self.min_depth = cfg.DD3D.FCOS3D.MIN_DEPTH + self.max_depth = cfg.DD3D.FCOS3D.MAX_DEPTH + self.predict_allocentric_rot = cfg.DD3D.FCOS3D.PREDICT_ALLOCENTRIC_ROT + self.scale_depth_by_focal_lengths = cfg.DD3D.FCOS3D.SCALE_DEPTH_BY_FOCAL_LENGTHS + self.scale_depth_by_focal_lengths_factor = cfg.DD3D.FCOS3D.SCALE_DEPTH_BY_FOCAL_LENGTHS_FACTOR + self.predict_distance = cfg.DD3D.FCOS3D.PREDICT_DISTANCE + + self.num_classes = cfg.DD3D.NUM_CLASSES + self.class_agnostic = cfg.DD3D.FCOS3D.CLASS_AGNOSTIC_BOX3D + + def __call__( + self, box3d_quat, box3d_ctr, box3d_depth, box3d_size, box3d_conf, inv_intrinsics, pred_instances, fcos2d_info + ): + # pred_instances: # List[List[Instances]], shape = (L, B) + for lvl, (box3d_quat_lvl, box3d_ctr_lvl, box3d_depth_lvl, box3d_size_lvl, box3d_conf_lvl) in \ + enumerate(zip(box3d_quat, box3d_ctr, box3d_depth, box3d_size, box3d_conf)): + + # In-place modification: update per-level pred_instances. + self.forward_for_single_feature_map( + box3d_quat_lvl, box3d_ctr_lvl, box3d_depth_lvl, box3d_size_lvl, box3d_conf_lvl, inv_intrinsics, + pred_instances[lvl], fcos2d_info[lvl] + ) # List of Instances; one for each image. + + def forward_for_single_feature_map( + self, box3d_quat, box3d_ctr, box3d_depth, box3d_size, box3d_conf, inv_intrinsics, pred_instances, fcos2d_info + ): + N = box3d_quat.shape[0] + + num_classes = self.num_classes if not self.class_agnostic else 1 + + box3d_quat = box3d_quat.permute(0, 2, 3, 1).reshape(N, -1, 4, num_classes) + box3d_ctr = box3d_ctr.permute(0, 2, 3, 1).reshape(N, -1, 2, num_classes) + box3d_depth = box3d_depth.permute(0, 2, 3, 1).reshape(N, -1, num_classes) + box3d_size = box3d_size.permute(0, 2, 3, 1).reshape(N, -1, 3, num_classes) + box3d_conf = box3d_conf.permute(0, 2, 3, 1).reshape(N, -1, num_classes).sigmoid() + + for i in range(N): + fg_inds_per_im = fcos2d_info['fg_inds_per_im'][i] + class_inds_per_im = fcos2d_info['class_inds_per_im'][i] + topk_indices = fcos2d_info['topk_indices'][i] + + box3d_quat_per_im = box3d_quat[i][fg_inds_per_im] + box3d_ctr_per_im = box3d_ctr[i][fg_inds_per_im] + box3d_depth_per_im = box3d_depth[i][fg_inds_per_im] + box3d_size_per_im = box3d_size[i][fg_inds_per_im] + box3d_conf_per_im = box3d_conf[i][fg_inds_per_im] + + if self.class_agnostic: + box3d_quat_per_im = box3d_quat_per_im.squeeze(-1) + box3d_ctr_per_im = box3d_ctr_per_im.squeeze(-1) + box3d_depth_per_im = box3d_depth_per_im.squeeze(-1) + box3d_size_per_im = box3d_size_per_im.squeeze(-1) + box3d_conf_per_im = box3d_conf_per_im.squeeze(-1) + else: + I = class_inds_per_im[..., None, None] + box3d_quat_per_im = torch.gather(box3d_quat_per_im, dim=2, index=I.repeat(1, 4, 1)).squeeze(-1) + box3d_ctr_per_im = torch.gather(box3d_ctr_per_im, dim=2, index=I.repeat(1, 2, 1)).squeeze(-1) + box3d_depth_per_im = torch.gather(box3d_depth_per_im, dim=1, index=I.squeeze(-1)).squeeze(-1) + box3d_size_per_im = torch.gather(box3d_size_per_im, dim=2, index=I.repeat(1, 3, 1)).squeeze(-1) + box3d_conf_per_im = torch.gather(box3d_conf_per_im, dim=1, index=I.squeeze(-1)).squeeze(-1) + + if topk_indices is not None: + box3d_quat_per_im = box3d_quat_per_im[topk_indices] + box3d_ctr_per_im = box3d_ctr_per_im[topk_indices] + box3d_depth_per_im = box3d_depth_per_im[topk_indices] + box3d_size_per_im = box3d_size_per_im[topk_indices] + box3d_conf_per_im = box3d_conf_per_im[topk_indices] + + # scores_per_im = pred_instances[i].scores.square() + # NOTE: Before refactoring, the squared score was used. Is raw 2D score better? + scores_per_im = pred_instances[i].scores + scores_3d_per_im = scores_per_im * box3d_conf_per_im + + canon_box_sizes = box3d_quat.new_tensor(self.canon_box_sizes)[pred_instances[i].pred_classes] + inv_K = inv_intrinsics[i][None, ...].expand(len(box3d_quat_per_im), 3, 3) + locations = pred_instances[i].locations + pred_boxes3d = predictions_to_boxes3d( + box3d_quat_per_im, + box3d_ctr_per_im, + box3d_depth_per_im, + box3d_size_per_im, + locations, + inv_K, + canon_box_sizes, + self.min_depth, + self.max_depth, + scale_depth_by_focal_lengths_factor=self.scale_depth_by_focal_lengths_factor, + scale_depth_by_focal_lengths=self.scale_depth_by_focal_lengths, + quat_is_allocentric=self.predict_allocentric_rot, + depth_is_distance=self.predict_distance + ) + + # In-place modification: add fields to instances. + pred_instances[i].pred_boxes3d = pred_boxes3d + pred_instances[i].scores_3d = scores_3d_per_im diff --git a/adzoo/bevformer/mmdet3d_plugin/dd3d/modeling/nuscenes_dd3d.py b/adzoo/bevformer/mmdet3d_plugin/dd3d/modeling/nuscenes_dd3d.py new file mode 100644 index 0000000..04a78d7 --- /dev/null +++ b/adzoo/bevformer/mmdet3d_plugin/dd3d/modeling/nuscenes_dd3d.py @@ -0,0 +1,525 @@ +# Copyright 2021 Toyota Research Institute. All rights reserved. +import torch +import torch.nn.functional as F +from mmcv.losses.fvcore_smooth_l1_loss import smooth_l1_loss +from torch import nn + +from mmcv.structures import Instances +from mmcv.models.builder import HEADS +from mmcv.utils import force_fp32 +from torch import distributed as dist +from mmcv.modeling.postprocessing import detector_postprocess as resize_instances +from mmcv.layers import cat, Conv2d +from adzoo.bevformer.mmdet3d_plugin.dd3d.datasets.nuscenes import MAX_NUM_ATTRIBUTES +from .core import DD3D +from .prepare_targets import DD3DTargetPreparer +from adzoo.bevformer.mmdet3d_plugin.dd3d.structures.boxes3d import Boxes3D +from adzoo.bevformer.mmdet3d_plugin.dd3d.structures.image_list import ImageList +from adzoo.bevformer.mmdet3d_plugin.dd3d.utils.comm import reduce_sum + +INF = 100000000. + +def get_world_size() -> int: + if not dist.is_available(): + return 1 + if not dist.is_initialized(): + return 1 + return dist.get_world_size() + +class NuscenesDD3DTargetPreparer(DD3DTargetPreparer): + def __init__(self, **kwargs): + super().__init__(**kwargs) + assert self.dd3d_enabled, f"{type(self).__name__} requires dd3d_enabled = True" + + def __call__(self, locations, gt_instances, feature_shapes): + num_loc_list = [len(loc) for loc in locations] + + # compute locations to size ranges + loc_to_size_range = [] + for l, loc_per_level in enumerate(locations): + loc_to_size_range_per_level = loc_per_level.new_tensor(self.sizes_of_interest[l]) + loc_to_size_range.append(loc_to_size_range_per_level[None].expand(num_loc_list[l], -1)) + + loc_to_size_range = torch.cat(loc_to_size_range, dim=0) + locations = torch.cat(locations, dim=0) + + training_targets = self.compute_targets_for_locations(locations, gt_instances, loc_to_size_range, num_loc_list) + + training_targets["locations"] = [locations.clone() for _ in range(len(gt_instances))] + training_targets["im_inds"] = [ + locations.new_ones(locations.size(0), dtype=torch.long) * i for i in range(len(gt_instances)) + ] + + box2d = training_targets.pop("box2d", None) + + # transpose im first training_targets to level first ones + training_targets = {k: self._transpose(v, num_loc_list) for k, v in training_targets.items() if k != "box2d"} + + training_targets["fpn_levels"] = [ + loc.new_ones(len(loc), dtype=torch.long) * level for level, loc in enumerate(training_targets["locations"]) + ] + + # Flatten targets: (L x B x H x W, TARGET_SIZE) + labels = cat([x.reshape(-1) for x in training_targets["labels"]]) + box2d_reg_targets = cat([x.reshape(-1, 4) for x in training_targets["box2d_reg"]]) + + target_inds = cat([x.reshape(-1) for x in training_targets["target_inds"]]) + locations = cat([x.reshape(-1, 2) for x in training_targets["locations"]]) + im_inds = cat([x.reshape(-1) for x in training_targets["im_inds"]]) + fpn_levels = cat([x.reshape(-1) for x in training_targets["fpn_levels"]]) + + pos_inds = torch.nonzero(labels != self.num_classes).squeeze(1) + + targets = { + "labels": labels, + "box2d_reg_targets": box2d_reg_targets, + "locations": locations, + "target_inds": target_inds, + "im_inds": im_inds, + "fpn_levels": fpn_levels, + "pos_inds": pos_inds + } + + if self.dd3d_enabled: + box3d_targets = Boxes3D.cat(training_targets["box3d"]) + targets.update({"box3d_targets": box3d_targets}) + + if box2d is not None: + # Original format is B x L x (H x W, 4) + # Need to be in L x (B, 4, H, W). + batched_box2d = [] + for lvl, per_lvl_box2d in enumerate(zip(*box2d)): + # B x (H x W, 4) + h, w = feature_shapes[lvl] + batched_box2d_lvl = torch.stack([x.T.reshape(4, h, w) for x in per_lvl_box2d], dim=0) + batched_box2d.append(batched_box2d_lvl) + targets.update({"batched_box2d": batched_box2d}) + + # Nuscenes targets -- attribute / speed + attributes = cat([x.reshape(-1) for x in training_targets["attributes"]]) + speeds = cat([x.reshape(-1) for x in training_targets["speeds"]]) + + targets.update({'attributes': attributes, 'speeds': speeds}) + + return targets + + def compute_targets_for_locations(self, locations, targets, size_ranges, num_loc_list): + labels = [] + box2d_reg = [] + + if self.dd3d_enabled: + box3d = [] + + target_inds = [] + xs, ys = locations[:, 0], locations[:, 1] + + # NuScenes targets -- attribute / speed + attributes, speeds = [], [] + + num_targets = 0 + for im_i in range(len(targets)): + targets_per_im = targets[im_i] + bboxes = targets_per_im.gt_boxes.tensor + labels_per_im = targets_per_im.gt_classes + + # no gt + if bboxes.numel() == 0: + labels.append(labels_per_im.new_zeros(locations.size(0)) + self.num_classes) + # reg_targets.append(locations.new_zeros((locations.size(0), 4))) + box2d_reg.append(locations.new_zeros((locations.size(0), 4))) + target_inds.append(labels_per_im.new_zeros(locations.size(0)) - 1) + + if self.dd3d_enabled: + box3d.append( + Boxes3D( + locations.new_zeros(locations.size(0), 4), + locations.new_zeros(locations.size(0), 2), + locations.new_zeros(locations.size(0), 1), + locations.new_zeros(locations.size(0), 3), + locations.new_zeros(locations.size(0), 3, 3), + ).to(torch.float32) + ) + # NOTE: attributes and speeds. + attributes.append(labels_per_im.new_zeros(locations.size(0))) + speeds.append(labels_per_im.new_zeros(locations.size(0))) + continue + + area = targets_per_im.gt_boxes.area() + + l = xs[:, None] - bboxes[:, 0][None] + t = ys[:, None] - bboxes[:, 1][None] + r = bboxes[:, 2][None] - xs[:, None] + b = bboxes[:, 3][None] - ys[:, None] + # reg_targets_per_im = torch.stack([l, t, r, b], dim=2) + box2d_reg_per_im = torch.stack([l, t, r, b], dim=2) + + if self.center_sample: + is_in_boxes = self.get_sample_region(bboxes, num_loc_list, xs, ys) + else: + is_in_boxes = box2d_reg_per_im.min(dim=2)[0] > 0 + + max_reg_targets_per_im = box2d_reg_per_im.max(dim=2)[0] + # limit the regression range for each location + is_cared_in_the_level = \ + (max_reg_targets_per_im >= size_ranges[:, [0]]) & \ + (max_reg_targets_per_im <= size_ranges[:, [1]]) + + locations_to_gt_area = area[None].repeat(len(locations), 1) + locations_to_gt_area[is_in_boxes == 0] = INF + locations_to_gt_area[is_cared_in_the_level == 0] = INF + + # if there are still more than one objects for a location, + # we choose the one with minimal area + locations_to_min_area, locations_to_gt_inds = locations_to_gt_area.min(dim=1) + + box2d_reg_per_im = box2d_reg_per_im[range(len(locations)), locations_to_gt_inds] + target_inds_per_im = locations_to_gt_inds + num_targets + num_targets += len(targets_per_im) + + labels_per_im = labels_per_im[locations_to_gt_inds] + labels_per_im[locations_to_min_area == INF] = self.num_classes + + labels.append(labels_per_im) + box2d_reg.append(box2d_reg_per_im) + target_inds.append(target_inds_per_im) + + if self.dd3d_enabled: + # 3D box targets + box3d_per_im = targets_per_im.gt_boxes3d[locations_to_gt_inds] + box3d.append(box3d_per_im) + + # NuScenes targets -- attribute / speed + attributes_per_im = targets_per_im.gt_attributes[locations_to_gt_inds] + speeds_per_im = targets_per_im.gt_speeds[locations_to_gt_inds] + attributes.append(attributes_per_im) + speeds.append(speeds_per_im) + + ret = {"labels": labels, "box2d_reg": box2d_reg, "target_inds": target_inds} + if self.dd3d_enabled: + ret.update({"box3d": box3d}) + + # NuScenes targets -- attribute / speed + ret.update({"attributes": attributes, "speeds": speeds}) + + return ret + + +class NuscenesLoss(nn.Module): + def __init__(self, attr_loss_weight=0.2, speed_loss_weight=0.2): + super().__init__() + self.attr_loss_weight = attr_loss_weight + self.speed_loss_weight = speed_loss_weight + + @force_fp32(apply_to=('attr_logits', 'speeds')) + def forward(self, attr_logits, speeds, fcos2d_info, targets): + # Flatten predictions + attr_logits = cat([x.permute(0, 2, 3, 1).reshape(-1, MAX_NUM_ATTRIBUTES) for x in attr_logits]) + speeds = cat([x.permute(0, 2, 3, 1).reshape(-1) for x in speeds]) + + pos_inds = targets['pos_inds'] + + losses = {} + + # 1. Attributes + attr_logits = attr_logits[pos_inds] + target_attr = targets['attributes'][pos_inds] + valid_attr_mask = target_attr != MAX_NUM_ATTRIBUTES # No attrs associated with class, or just attr missing. + + if pos_inds.numel() == 0: + attr_weights = attr_logits.new_tensor(0.0) #torch.tensor(0.0).cuda() + else: + attr_weights = fcos2d_info['centerness_targets'][valid_attr_mask] + # Denominator for all foreground losses -- re-computed for features with valid attributes. + # attr_loss_denom = max(reduce_sum(attr_weights.sum()).item() / d2_comm.get_world_size(), 1e-6) + # NOTE: compute attr_weights_sum, and then feed it to reduce_sum() works, but not above. + attr_weights_sum = attr_weights.sum() + attr_loss_denom = max(reduce_sum(attr_weights_sum).item() / get_world_size(), 1e-6) + + if valid_attr_mask.sum() == 0: + losses.update({"loss_attr": attr_logits.sum() * 0.}) + else: + attr_logits = attr_logits[valid_attr_mask] + target_attr = target_attr[valid_attr_mask] + + xent = F.cross_entropy(attr_logits, target_attr) + loss_attr = (xent * attr_weights).sum() / attr_loss_denom + + losses.update({"loss_attr": self.attr_loss_weight * loss_attr}) + + # 2. Speed + speeds = speeds[pos_inds] + target_speeds = targets['speeds'][pos_inds] + # NOTE: some GT speeds are NaN. + valid_gt_mask = torch.logical_not(torch.isnan(target_speeds)) + + if pos_inds.numel() == 0: + speed_weights = speeds.new_tensor(0.0) #torch.tensor(0.0).cuda() + else: + speed_weights = fcos2d_info['centerness_targets'][valid_gt_mask] + # Denominator for all foreground losses -- re-computed for features with valid speeds. + # speed_loss_denom = max(reduce_sum(speed_weights.sum()).item() / d2_comm.get_world_size(), 1e-6) + speed_weights_sum = speed_weights.sum() + speed_loss_denom = max(reduce_sum(speed_weights_sum).item() / get_world_size(), 1e-6) + + # NOTE: move after reduce sum + if pos_inds.numel() == 0: + losses = {"loss_attr": attr_logits.sum() * 0., "loss_speed": speeds.sum() * 0.} + # NOTE: This is probably un-reachable, because the training filter images with empty annotations. + # NOTE: If not, attr_weights can be unavailable in the reduce_sum below(). + return losses + + if valid_gt_mask.sum() == 0: + losses.update({"loss_speed": speeds.sum() * 0.}) + # return losses + else: + speeds = speeds[valid_gt_mask] + target_speeds = target_speeds[valid_gt_mask] + + l1_error = smooth_l1_loss(speeds, target_speeds, beta=0.05) + loss_speed = (l1_error * speed_weights).sum() / speed_loss_denom + losses.update({"loss_speed": self.speed_loss_weight * loss_speed}) + + return losses + + +class NuscenesInference(): + def __init__(self, cfg): + pass + + def __call__(self, attr_logits, speeds, pred_instances, fcos2d_info): + """Add 'pred_attribute', 'pred_speed' to Instances in 'pred_instances'.""" + N = attr_logits[0].shape[0] + for lvl, (attr_logits_lvl, speed_lvl, info_lvl, instances_lvl) in \ + enumerate(zip(attr_logits, speeds, fcos2d_info, pred_instances)): + + attr_logits_lvl = attr_logits_lvl.permute(0, 2, 3, 1).reshape(N, -1, MAX_NUM_ATTRIBUTES) + speed_lvl = speed_lvl.permute(0, 2, 3, 1).reshape(N, -1) + for i in range(N): + fg_inds_per_im = info_lvl['fg_inds_per_im'][i] + topk_indices = info_lvl['topk_indices'][i] + + attr_logits_per_im = attr_logits_lvl[i][fg_inds_per_im] + speed_per_im = speed_lvl[i][fg_inds_per_im] + + if topk_indices is not None: + attr_logits_per_im = attr_logits_per_im[topk_indices] + speed_per_im = speed_per_im[topk_indices] + + if len(attr_logits_per_im) == 0: + instances_lvl[i].pred_attributes = instances_lvl[i].pred_classes.new_tensor([]) + instances_lvl[i].pred_speeds = instances_lvl[i].scores.new_tensor([]) + else: + instances_lvl[i].pred_attributes = attr_logits_per_im.argmax(dim=1) + instances_lvl[i].pred_speeds = speed_per_im + + +@HEADS.register_module() +class NuscenesDD3D(DD3D): + def __init__(self, + num_classes, + in_channels, + strides, + fcos2d_cfg=dict(), + fcos2d_loss_cfg=dict(), + fcos3d_cfg=dict(), + fcos3d_loss_cfg=dict(), + target_assign_cfg=dict(), + nusc_loss_weight=dict(), + box3d_on=True, + feature_locations_offset="none"): + super().__init__(num_classes, + in_channels, + strides, + fcos2d_cfg=fcos2d_cfg, + fcos2d_loss_cfg=fcos2d_loss_cfg, + fcos3d_cfg=fcos3d_cfg, + fcos3d_loss_cfg=fcos3d_loss_cfg, + target_assign_cfg=target_assign_cfg, + box3d_on=box3d_on, + feature_locations_offset=feature_locations_offset) + + # backbone_output_shape = self.backbone_output_shape + # in_channels = backbone_output_shape[0].channels + + # -------------------------------------------------------------------------- + # NuScenes predictions -- attribute / speed, computed from cls_tower output. + # -------------------------------------------------------------------------- + self.attr_logits = Conv2d(in_channels, MAX_NUM_ATTRIBUTES, kernel_size=3, stride=1, padding=1, bias=True) + self.speed = Conv2d(in_channels, 1, kernel_size=3, stride=1, padding=1, bias=True, activation=F.relu) + + # init weights + for modules in [self.attr_logits, self.speed]: + for l in modules.modules(): + if isinstance(l, nn.Conv2d): + torch.nn.init.kaiming_uniform_(l.weight, a=1) + if l.bias is not None: # depth head may not have bias. + torch.nn.init.constant_(l.bias, 0) + + # Re-define target preparer + del self.prepare_targets + self.prepare_targets = NuscenesDD3DTargetPreparer(num_classes=num_classes, + input_shape=self.backbone_output_shape, + box3d_on=box3d_on, + **target_assign_cfg) + + self.nuscenes_loss = NuscenesLoss(**nusc_loss_weight) + # NOTE: inference later + # self.nuscenes_inference = NuscenesInference(cfg) + + # self.num_images_per_sample = cfg.MODEL.FCOS3D.NUSC_NUM_IMAGES_PER_SAMPLE + # NOTE: inference later + # self.num_images_per_sample = cfg.DD3D.NUSC.INFERENCE.NUM_IMAGES_PER_SAMPLE + + # assert self.num_images_per_sample == 6 + # assert cfg.DATALOADER.TEST.NUM_IMAGES_PER_GROUP == 6 + + # NOTE: NuScenes evaluator allows max. 500 detections per sample. + # self.max_num_dets_per_sample = cfg.DD3D.NUSC.INFERENCE.MAX_NUM_DETS_PER_SAMPLE + + @force_fp32(apply_to=('features')) + def forward(self, features, batched_inputs): + # NOTE: + # images = [x["image"].to(self.device) for x in batched_inputs] + # images = [self.preprocess_image(x) for x in images] + + # NOTE: directly use inv_intrinsics + # if 'intrinsics' in batched_inputs[0]: + # intrinsics = [x['intrinsics'].to(self.device) for x in batched_inputs] + # else: + # intrinsics = None + # images = ImageList.from_tensors(images, self.backbone.size_divisibility, intrinsics=intrinsics) + if 'inv_intrinsics' in batched_inputs[0]: + inv_intrinsics = [x['inv_intrinsics'].to(features[0].device) for x in batched_inputs] + inv_intrinsics = torch.stack(inv_intrinsics, dim=0) + else: + inv_intrinsics = None + + # NOTE: + # gt_dense_depth = None + # if 'depth' in batched_inputs[0]: + # gt_dense_depth = [x["depth"].to(self.device) for x in batched_inputs] + # gt_dense_depth = ImageList.from_tensors( + # gt_dense_depth, self.backbone.size_divisibility, intrinsics=intrinsics + # ) + + # NOTE: directly input feature + # features = self.backbone(images.tensor) + # features = [features[f] for f in self.in_features] + + if "instances" in batched_inputs[0]: + gt_instances = [x["instances"].to(features[0].device) for x in batched_inputs] + else: + gt_instances = None + + locations = self.compute_locations(features) + logits, box2d_reg, centerness, fcos2d_extra_output = self.fcos2d_head(features) + if not self.only_box2d: + box3d_quat, box3d_ctr, box3d_depth, box3d_size, box3d_conf, dense_depth = self.fcos3d_head(features) + # NOTE: directly use inv_intrinsics + # inv_intrinsics = images.intrinsics.inverse() if images.intrinsics is not None else None + + # -------------------------------------------------------------------------- + # NuScenes predictions -- attribute / speed, computed from cls_tower output. + # -------------------------------------------------------------------------- + attr_logits, speeds = [], [] + for x in fcos2d_extra_output['cls_tower_out']: + attr_logits.append(self.attr_logits(x)) + speeds.append(self.speed(x)) + + if self.training: + assert gt_instances is not None + feature_shapes = [x.shape[-2:] for x in features] + training_targets = self.prepare_targets(locations, gt_instances, feature_shapes) + # NOTE: + # if gt_dense_depth is not None: + # training_targets.update({"dense_depth": gt_dense_depth}) + + losses = {} + fcos2d_loss, fcos2d_info = self.fcos2d_loss(logits, box2d_reg, centerness, training_targets) + losses.update(fcos2d_loss) + + if not self.only_box2d: + fcos3d_loss = self.fcos3d_loss( + box3d_quat, box3d_ctr, box3d_depth, box3d_size, box3d_conf, dense_depth, inv_intrinsics, + fcos2d_info, training_targets + ) + losses.update(fcos3d_loss) + + # Nuscenes loss -- attribute / speed + nuscenes_loss = self.nuscenes_loss(attr_logits, speeds, fcos2d_info, training_targets) + losses.update(nuscenes_loss) + return losses + else: + # TODO: do not support inference now + raise NotImplementedError + pred_instances, fcos2d_info = self.fcos2d_inference( + logits, box2d_reg, centerness, locations, images.image_sizes + ) + if not self.only_box2d: + # This adds 'pred_boxes3d' and 'scores_3d' to Instances in 'pred_instances'. + self.fcos3d_inference( + box3d_quat, box3d_ctr, box3d_depth, box3d_size, box3d_conf, inv_intrinsics, pred_instances, + fcos2d_info + ) + score_key = "scores_3d" + else: + score_key = "scores" + + # This adds 'pred_attributes', 'pred_speed' to Instances in 'pred_instances'. + self.nuscenes_inference(attr_logits, speeds, pred_instances, fcos2d_info) + + # Transpose to "image-first", i.e. (B, L) + pred_instances = list(zip(*pred_instances)) + pred_instances = [Instances.cat(instances) for instances in pred_instances] + + # 2D NMS and pick top-K. + if self.do_nms: + pred_instances = self.fcos2d_inference.nms_and_top_k(pred_instances, score_key) + + if not self.only_box2d and self.do_bev_nms: + # Bird-eye-view NMS. + dummy_group_idxs = {i: [i] for i, _ in enumerate(pred_instances)} + if 'pose' in batched_inputs[0]: + poses = [x['pose'] for x in batched_inputs] + else: + poses = [x['extrinsics'] for x in batched_inputs] + pred_instances = nuscenes_sample_aggregate( + pred_instances, + dummy_group_idxs, + self.num_classes, + poses, + iou_threshold=self.bev_nms_iou_thresh, + include_boxes3d_global=False + ) + + if self.postprocess_in_inference: + processed_results = [] + for results_per_image, input_per_image, image_size in \ + zip(pred_instances, batched_inputs, images.image_sizes): + height = input_per_image.get("height", image_size[0]) + width = input_per_image.get("width", image_size[1]) + r = resize_instances(results_per_image, height, width) + processed_results.append({"instances": r}) + + # ---------------------------------------------------------- + # NuScenes specific: cross-image (i.e. sample-level) BEV NMS. + # ---------------------------------------------------------- + sample_tokens = [x['sample_token'] for x in batched_inputs] + group_idxs = get_group_idxs(sample_tokens, self.num_images_per_sample) + + instances = [x['instances'] for x in processed_results] + global_poses = [x['pose'] for x in batched_inputs] + + filtered_instances = nuscenes_sample_aggregate( + instances, + group_idxs, + self.num_classes, + global_poses, + self.bev_nms_iou_thresh, + max_num_dets_per_sample=self.max_num_dets_per_sample + ) + processed_results = [{"instances": x} for x in filtered_instances] + else: + processed_results = [{"instances": x} for x in pred_instances] + + return processed_results diff --git a/adzoo/bevformer/mmdet3d_plugin/dd3d/modeling/prepare_targets.py b/adzoo/bevformer/mmdet3d_plugin/dd3d/modeling/prepare_targets.py new file mode 100644 index 0000000..91f76b5 --- /dev/null +++ b/adzoo/bevformer/mmdet3d_plugin/dd3d/modeling/prepare_targets.py @@ -0,0 +1,242 @@ +# Copyright 2021 Toyota Research Institute. All rights reserved. +import torch + +from mmcv.layers import cat + +from adzoo.bevformer.mmdet3d_plugin.dd3d.structures.boxes3d import Boxes3D + +INF = 100000000. + + +class DD3DTargetPreparer(): + def __init__(self, + num_classes, + input_shape, + box3d_on=True, + center_sample=True, + pos_radius=1.5, + sizes_of_interest=None): + self.num_classes = num_classes + self.center_sample = center_sample + self.strides = [shape.stride for shape in input_shape] + self.radius = pos_radius + self.dd3d_enabled = box3d_on + + # generate sizes of interest + # NOTE: + # soi = [] + # prev_size = -1 + # for s in sizes_of_interest: + # soi.append([prev_size, s]) + # prev_size = s + # soi.append([prev_size, INF]) + self.sizes_of_interest = sizes_of_interest + + def __call__(self, locations, gt_instances, feature_shapes): + num_loc_list = [len(loc) for loc in locations] + + # compute locations to size ranges + loc_to_size_range = [] + for l, loc_per_level in enumerate(locations): + loc_to_size_range_per_level = loc_per_level.new_tensor(self.sizes_of_interest[l]) + loc_to_size_range.append(loc_to_size_range_per_level[None].expand(num_loc_list[l], -1)) + + loc_to_size_range = torch.cat(loc_to_size_range, dim=0) + locations = torch.cat(locations, dim=0) + + training_targets = self.compute_targets_for_locations(locations, gt_instances, loc_to_size_range, num_loc_list) + + training_targets["locations"] = [locations.clone() for _ in range(len(gt_instances))] + training_targets["im_inds"] = [ + locations.new_ones(locations.size(0), dtype=torch.long) * i for i in range(len(gt_instances)) + ] + + box2d = training_targets.pop("box2d", None) + + # transpose im first training_targets to level first ones + training_targets = {k: self._transpose(v, num_loc_list) for k, v in training_targets.items() if k != "box2d"} + + training_targets["fpn_levels"] = [ + loc.new_ones(len(loc), dtype=torch.long) * level for level, loc in enumerate(training_targets["locations"]) + ] + + # Flatten targets: (L x B x H x W, TARGET_SIZE) + labels = cat([x.reshape(-1) for x in training_targets["labels"]]) + box2d_reg_targets = cat([x.reshape(-1, 4) for x in training_targets["box2d_reg"]]) + + target_inds = cat([x.reshape(-1) for x in training_targets["target_inds"]]) + locations = cat([x.reshape(-1, 2) for x in training_targets["locations"]]) + im_inds = cat([x.reshape(-1) for x in training_targets["im_inds"]]) + fpn_levels = cat([x.reshape(-1) for x in training_targets["fpn_levels"]]) + + pos_inds = torch.nonzero(labels != self.num_classes).squeeze(1) + + targets = { + "labels": labels, + "box2d_reg_targets": box2d_reg_targets, + "locations": locations, + "target_inds": target_inds, + "im_inds": im_inds, + "fpn_levels": fpn_levels, + "pos_inds": pos_inds + } + + if self.dd3d_enabled: + box3d_targets = Boxes3D.cat(training_targets["box3d"]) + targets.update({"box3d_targets": box3d_targets}) + + if box2d is not None: + # Original format is B x L x (H x W, 4) + # Need to be in L x (B, 4, H, W). + batched_box2d = [] + for lvl, per_lvl_box2d in enumerate(zip(*box2d)): + # B x (H x W, 4) + h, w = feature_shapes[lvl] + batched_box2d_lvl = torch.stack([x.T.reshape(4, h, w) for x in per_lvl_box2d], dim=0) + batched_box2d.append(batched_box2d_lvl) + targets.update({"batched_box2d": batched_box2d}) + + return targets + + def compute_targets_for_locations(self, locations, targets, size_ranges, num_loc_list): + labels = [] + box2d_reg = [] + + if self.dd3d_enabled: + box3d = [] + + target_inds = [] + xs, ys = locations[:, 0], locations[:, 1] + + num_targets = 0 + for im_i in range(len(targets)): + targets_per_im = targets[im_i] + bboxes = targets_per_im.gt_boxes.tensor + labels_per_im = targets_per_im.gt_classes + + # no gt + if bboxes.numel() == 0: + labels.append(labels_per_im.new_zeros(locations.size(0)) + self.num_classes) + # reg_targets.append(locations.new_zeros((locations.size(0), 4))) + box2d_reg.append(locations.new_zeros((locations.size(0), 4))) + target_inds.append(labels_per_im.new_zeros(locations.size(0)) - 1) + + if self.dd3d_enabled: + box3d.append( + Boxes3D( + locations.new_zeros(locations.size(0), 4), + locations.new_zeros(locations.size(0), 2), + locations.new_zeros(locations.size(0), 1), + locations.new_zeros(locations.size(0), 3), + locations.new_zeros(locations.size(0), 3, 3), + ).to(torch.float32) + ) + continue + + area = targets_per_im.gt_boxes.area() + + l = xs[:, None] - bboxes[:, 0][None] + t = ys[:, None] - bboxes[:, 1][None] + r = bboxes[:, 2][None] - xs[:, None] + b = bboxes[:, 3][None] - ys[:, None] + # reg_targets_per_im = torch.stack([l, t, r, b], dim=2) + box2d_reg_per_im = torch.stack([l, t, r, b], dim=2) + + if self.center_sample: + is_in_boxes = self.get_sample_region(bboxes, num_loc_list, xs, ys) + else: + is_in_boxes = box2d_reg_per_im.min(dim=2)[0] > 0 + + max_reg_targets_per_im = box2d_reg_per_im.max(dim=2)[0] + # limit the regression range for each location + is_cared_in_the_level = \ + (max_reg_targets_per_im >= size_ranges[:, [0]]) & \ + (max_reg_targets_per_im <= size_ranges[:, [1]]) + + locations_to_gt_area = area[None].repeat(len(locations), 1) + locations_to_gt_area[is_in_boxes == 0] = INF + locations_to_gt_area[is_cared_in_the_level == 0] = INF + + # if there are still more than one objects for a location, + # we choose the one with minimal area + locations_to_min_area, locations_to_gt_inds = locations_to_gt_area.min(dim=1) + + box2d_reg_per_im = box2d_reg_per_im[range(len(locations)), locations_to_gt_inds] + target_inds_per_im = locations_to_gt_inds + num_targets + num_targets += len(targets_per_im) + + labels_per_im = labels_per_im[locations_to_gt_inds] + labels_per_im[locations_to_min_area == INF] = self.num_classes + + labels.append(labels_per_im) + box2d_reg.append(box2d_reg_per_im) + target_inds.append(target_inds_per_im) + + if self.dd3d_enabled: + # 3D box targets + box3d_per_im = targets_per_im.gt_boxes3d[locations_to_gt_inds] + box3d.append(box3d_per_im) + + ret = {"labels": labels, "box2d_reg": box2d_reg, "target_inds": target_inds} + if self.dd3d_enabled: + ret.update({"box3d": box3d}) + + return ret + + def get_sample_region(self, boxes, num_loc_list, loc_xs, loc_ys): + center_x = boxes[..., [0, 2]].sum(dim=-1) * 0.5 + center_y = boxes[..., [1, 3]].sum(dim=-1) * 0.5 + + num_gts = boxes.shape[0] + K = len(loc_xs) + boxes = boxes[None].expand(K, num_gts, 4) + center_x = center_x[None].expand(K, num_gts) + center_y = center_y[None].expand(K, num_gts) + center_gt = boxes.new_zeros(boxes.shape) + # no gt + if center_x.numel() == 0 or center_x[..., 0].sum() == 0: + return loc_xs.new_zeros(loc_xs.shape, dtype=torch.uint8) + beg = 0 + for level, num_loc in enumerate(num_loc_list): + end = beg + num_loc + stride = self.strides[level] * self.radius + xmin = center_x[beg:end] - stride + ymin = center_y[beg:end] - stride + xmax = center_x[beg:end] + stride + ymax = center_y[beg:end] + stride + # limit sample region in gt + center_gt[beg:end, :, 0] = torch.where(xmin > boxes[beg:end, :, 0], xmin, boxes[beg:end, :, 0]) + center_gt[beg:end, :, 1] = torch.where(ymin > boxes[beg:end, :, 1], ymin, boxes[beg:end, :, 1]) + center_gt[beg:end, :, 2] = torch.where(xmax > boxes[beg:end, :, 2], boxes[beg:end, :, 2], xmax) + center_gt[beg:end, :, 3] = torch.where(ymax > boxes[beg:end, :, 3], boxes[beg:end, :, 3], ymax) + beg = end + left = loc_xs[:, None] - center_gt[..., 0] + right = center_gt[..., 2] - loc_xs[:, None] + top = loc_ys[:, None] - center_gt[..., 1] + bottom = center_gt[..., 3] - loc_ys[:, None] + center_bbox = torch.stack((left, top, right, bottom), -1) + inside_gt_bbox_mask = center_bbox.min(-1)[0] > 0 + return inside_gt_bbox_mask + + def _transpose(self, training_targets, num_loc_list): + ''' + This function is used to transpose image first training targets to level first ones + :return: level first training targets + ''' + if isinstance(training_targets[0], Boxes3D): + for im_i in range(len(training_targets)): + # training_targets[im_i] = torch.split(training_targets[im_i], num_loc_list, dim=0) + training_targets[im_i] = training_targets[im_i].split(num_loc_list, dim=0) + + targets_level_first = [] + for targets_per_level in zip(*training_targets): + targets_level_first.append(Boxes3D.cat(targets_per_level, dim=0)) + return targets_level_first + + for im_i in range(len(training_targets)): + training_targets[im_i] = torch.split(training_targets[im_i], num_loc_list, dim=0) + + targets_level_first = [] + for targets_per_level in zip(*training_targets): + targets_level_first.append(torch.cat(targets_per_level, dim=0)) + return targets_level_first diff --git a/adzoo/bevformer/mmdet3d_plugin/dd3d/structures/__init__.py b/adzoo/bevformer/mmdet3d_plugin/dd3d/structures/__init__.py new file mode 100644 index 0000000..3857649 --- /dev/null +++ b/adzoo/bevformer/mmdet3d_plugin/dd3d/structures/__init__.py @@ -0,0 +1,2 @@ +# Copyright 2021 Toyota Research Institute. All rights reserved. +from .image_list import ImageList diff --git a/adzoo/bevformer/mmdet3d_plugin/dd3d/structures/boxes3d.py b/adzoo/bevformer/mmdet3d_plugin/dd3d/structures/boxes3d.py new file mode 100644 index 0000000..0823602 --- /dev/null +++ b/adzoo/bevformer/mmdet3d_plugin/dd3d/structures/boxes3d.py @@ -0,0 +1,321 @@ +# Copyright 2021 Toyota Research Institute. All rights reserved. +import numpy as np +import torch +from pyquaternion import Quaternion +from torch.cuda import amp + +from adzoo.bevformer.mmdet3d_plugin.dd3d.utils.geometry import unproject_points2d +import adzoo.bevformer.mmdet3d_plugin.dd3d.structures.transform3d as t3d +# yapf: disable +BOX3D_CORNER_MAPPING = [ + [1, 1, 1, 1, -1, -1, -1, -1], + [1, -1, -1, 1, 1, -1, -1, 1], + [1, 1, -1, -1, 1, 1, -1, -1] +] +# yapf: enable + +def quaternion_to_matrix(quaternions: torch.Tensor) -> torch.Tensor: + """ + Convert rotations given as quaternions to rotation matrices. + + Args: + quaternions: quaternions with real part first, + as tensor of shape (..., 4). + + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + """ + r, i, j, k = torch.unbind(quaternions, -1) + two_s = 2.0 / (quaternions * quaternions).sum(-1) + + o = torch.stack( + ( + 1 - two_s * (j * j + k * k), + two_s * (i * j - k * r), + two_s * (i * k + j * r), + two_s * (i * j + k * r), + 1 - two_s * (i * i + k * k), + two_s * (j * k - i * r), + two_s * (i * k - j * r), + two_s * (j * k + i * r), + 1 - two_s * (i * i + j * j), + ), + -1, + ) + return o.reshape(quaternions.shape[:-1] + (3, 3)) + +def _to_tensor(x, dim): + if isinstance(x, torch.Tensor): + x = x.to(torch.float32) + elif isinstance(x, np.ndarray) or isinstance(x, list) or isinstance(x, tuple): + x = torch.tensor(x, dtype=torch.float32) + elif isinstance(x, Quaternion): + x = torch.tensor(x.elements, dtype=torch.float32) + else: + raise ValueError(f"Unsupported type: {type(x).__name__}") + + if x.ndim == 1: + x = x.reshape(-1, dim) + elif x.ndim > 2: + raise ValueError(f"Invalid shape of input: {x.shape.__str__()}") + return x + + +class GenericBoxes3D(): + def __init__(self, quat, tvec, size): + self.quat = _to_tensor(quat, dim=4) + self._tvec = _to_tensor(tvec, dim=3) + self.size = _to_tensor(size, dim=3) + + @property + def tvec(self): + return self._tvec + + @property + @amp.autocast(enabled=False) + def corners(self): + allow_tf32 = torch.backends.cuda.matmul.allow_tf32 + torch.backends.cuda.matmul.allow_tf32 = False + torch.backends.cudnn.allow_tf32 = False + + translation = t3d.Translate(self.tvec, device=self.device) + + R = quaternion_to_matrix(self.quat) + rotation = t3d.Rotate(R=R.transpose(1, 2), device=self.device) # Need to transpose to make it work. + + tfm = rotation.compose(translation) + + _corners = 0.5 * self.quat.new_tensor(BOX3D_CORNER_MAPPING).T + # corners_in_obj_frame = self.size.unsqueeze(1) * _corners.unsqueeze(0) + lwh = self.size[:, [1, 0, 2]] # wlh -> lwh + corners_in_obj_frame = lwh.unsqueeze(1) * _corners.unsqueeze(0) + + corners3d = tfm.transform_points(corners_in_obj_frame) + torch.backends.cuda.matmul.allow_tf32 = allow_tf32 + torch.backends.cudnn.allow_tf32 = allow_tf32 + return corners3d + + @classmethod + def from_vectors(cls, vecs, device="cpu"): + """ + Parameters + ---------- + vecs: Iterable[np.ndarray] + Iterable of 10D pose representation. + + intrinsics: np.ndarray + (3, 3) intrinsics matrix. + """ + quats, tvecs, sizes = [], [], [] + for vec in vecs: + quat = vec[:4] + tvec = vec[4:7] + size = vec[7:] + + quats.append(quat) + tvecs.append(tvec) + sizes.append(size) + + quats = torch.as_tensor(quats, dtype=torch.float32, device=device) + tvecs = torch.as_tensor(tvecs, dtype=torch.float32, device=device) + sizes = torch.as_tensor(sizes, device=device) + + return cls(quats, tvecs, sizes) + + @classmethod + def cat(cls, boxes_list, dim=0): + + assert isinstance(boxes_list, (list, tuple)) + if len(boxes_list) == 0: + return cls(torch.empty(0), torch.empty(0), torch.empty(0)) + assert all([isinstance(box, GenericBoxes3D) for box in boxes_list]) + + # use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input + quat = torch.cat([b.quat for b in boxes_list], dim=dim) + tvec = torch.cat([b.tvec for b in boxes_list], dim=dim) + size = torch.cat([b.size for b in boxes_list], dim=dim) + + cat_boxes = cls(quat, tvec, size) + return cat_boxes + + def split(self, split_sizes, dim=0): + assert sum(split_sizes) == len(self) + quat_list = torch.split(self.quat, split_sizes, dim=dim) + tvec_list = torch.split(self.tvec, split_sizes, dim=dim) + size_list = torch.split(self.size, split_sizes, dim=dim) + + return [GenericBoxes3D(*x) for x in zip(quat_list, tvec_list, size_list)] + + def __getitem__(self, item): + """ + """ + if isinstance(item, int): + return GenericBoxes3D(self.quat[item].view(1, -1), self.tvec[item].view(1, -1), self.size[item].view(1, -1)) + + quat = self.quat[item] + tvec = self.tvec[item] + size = self.size[item] + + assert quat.dim() == 2, "Indexing on Boxes3D with {} failed to return a matrix!".format(item) + assert tvec.dim() == 2, "Indexing on Boxes3D with {} failed to return a matrix!".format(item) + assert size.dim() == 2, "Indexing on Boxes3D with {} failed to return a matrix!".format(item) + + return GenericBoxes3D(quat, tvec, size) + + def __len__(self): + assert len(self.quat) == len(self.tvec) == len(self.size) + return self.quat.shape[0] + + def clone(self): + """ + """ + return GenericBoxes3D(self.quat.clone(), self.tvec.clone(), self.size.clone()) + + def vectorize(self): + xyz = self.tvec + return torch.cat([self.quat, xyz, self.size], dim=1) + + @property + def device(self): + return self.quat.device + + def to(self, *args, **kwargs): + quat = self.quat.to(*args, **kwargs) + tvec = self.tvec.to(*args, **kwargs) + size = self.size.to(*args, **kwargs) + return GenericBoxes3D(quat, tvec, size) + + +class Boxes3D(GenericBoxes3D): + """Vision-based 3D box container. + + The tvec is computed from projected center, depth, and intrinsics. + """ + def __init__(self, quat, proj_ctr, depth, size, inv_intrinsics): + self.quat = quat + self.proj_ctr = proj_ctr + self.depth = depth + self.size = size + self.inv_intrinsics = inv_intrinsics + + @property + def tvec(self): + ray = unproject_points2d(self.proj_ctr, self.inv_intrinsics) + xyz = ray * self.depth + return xyz + + @classmethod + def from_vectors(cls, vecs, intrinsics, device="cpu"): + """ + Parameters + ---------- + vecs: Iterable[np.ndarray] + Iterable of 10D pose representation. + + intrinsics: np.ndarray + (3, 3) intrinsics matrix. + """ + if len(vecs) == 0: + quats = torch.as_tensor([], dtype=torch.float32, device=device).view(-1, 4) + proj_ctrs = torch.as_tensor([], dtype=torch.float32, device=device).view(-1, 2) + depths = torch.as_tensor([], dtype=torch.float32, device=device).view(-1, 1) + sizes = torch.as_tensor([], dtype=torch.float32, device=device).view(-1, 3) + inv_intrinsics = torch.as_tensor([], dtype=torch.float32, device=device).view(-1, 3, 3) + return cls(quats, proj_ctrs, depths, sizes, inv_intrinsics) + + quats, proj_ctrs, depths, sizes = [], [], [], [] + for vec in vecs: + quat = vec[:4] + + proj_ctr = intrinsics.dot(vec[4:7]) + proj_ctr = proj_ctr[:2] / proj_ctr[-1] + + depth = vec[6:7] + + size = vec[7:] + + quats.append(quat) + proj_ctrs.append(proj_ctr) + depths.append(depth) + sizes.append(size) + + quats = torch.as_tensor(np.array(quats), dtype=torch.float32, device=device) + proj_ctrs = torch.as_tensor(np.array(proj_ctrs), dtype=torch.float32, device=device) + depths = torch.as_tensor(np.array(depths), dtype=torch.float32, device=device) + sizes = torch.as_tensor(np.array(sizes), dtype=torch.float32, device=device) + + inv_intrinsics = np.linalg.inv(intrinsics) + inv_intrinsics = torch.as_tensor(inv_intrinsics[None, ...], device=device).expand(len(vecs), 3, 3) + + return cls(quats, proj_ctrs, depths, sizes, inv_intrinsics) + + @classmethod + def cat(cls, boxes_list, dim=0): + + assert isinstance(boxes_list, (list, tuple)) + if len(boxes_list) == 0: + return cls(torch.empty(0), torch.empty(0), torch.empty(0), torch.empty(0), torch.empty(0)) + assert all([isinstance(box, Boxes3D) for box in boxes_list]) + + # use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input + quat = torch.cat([b.quat for b in boxes_list], dim=dim) + proj_ctr = torch.cat([b.proj_ctr for b in boxes_list], dim=dim) + depth = torch.cat([b.depth for b in boxes_list], dim=dim) + size = torch.cat([b.size for b in boxes_list], dim=dim) + inv_intrinsics = torch.cat([b.inv_intrinsics for b in boxes_list], dim=dim) + + cat_boxes = cls(quat, proj_ctr, depth, size, inv_intrinsics) + return cat_boxes + + def split(self, split_sizes, dim=0): + assert sum(split_sizes) == len(self) + quat_list = torch.split(self.quat, split_sizes, dim=dim) + proj_ctr_list = torch.split(self.proj_ctr, split_sizes, dim=dim) + depth_list = torch.split(self.depth, split_sizes, dim=dim) + size_list = torch.split(self.size, split_sizes, dim=dim) + inv_K_list = torch.split(self.inv_intrinsics, split_sizes, dim=dim) + + return [Boxes3D(*x) for x in zip(quat_list, proj_ctr_list, depth_list, size_list, inv_K_list)] + + def __getitem__(self, item): + """ + """ + if isinstance(item, int): + return Boxes3D( + self.quat[item].view(1, -1), self.proj_ctr[item].view(1, -1), self.depth[item].view(1, -1), + self.size[item].view(1, -1), self.inv_intrinsics[item].view(1, 3, 3) + ) + + quat = self.quat[item] + ctr = self.proj_ctr[item] + depth = self.depth[item] + size = self.size[item] + inv_K = self.inv_intrinsics[item] + + assert quat.dim() == 2, "Indexing on Boxes3D with {} failed to return a matrix!".format(item) + assert ctr.dim() == 2, "Indexing on Boxes3D with {} failed to return a matrix!".format(item) + assert depth.dim() == 2, "Indexing on Boxes3D with {} failed to return a matrix!".format(item) + assert size.dim() == 2, "Indexing on Boxes3D with {} failed to return a matrix!".format(item) + assert inv_K.dim() == 3, "Indexing on Boxes3D with {} failed to return a matrix!".format(item) + assert inv_K.shape[1:] == (3, 3), "Indexing on Boxes3D with {} failed to return a matrix!".format(item) + + return Boxes3D(quat, ctr, depth, size, inv_K) + + def __len__(self): + assert len(self.quat) == len(self.proj_ctr) == len(self.depth) == len(self.size) == len(self.inv_intrinsics) + return self.quat.shape[0] + + def clone(self): + """ + """ + return Boxes3D( + self.quat.clone(), self.proj_ctr.clone(), self.depth.clone(), self.size.clone(), self.inv_intrinsics.clone() + ) + + def to(self, *args, **kwargs): + quat = self.quat.to(*args, **kwargs) + proj_ctr = self.proj_ctr.to(*args, **kwargs) + depth = self.depth.to(*args, **kwargs) + size = self.size.to(*args, **kwargs) + inv_K = self.inv_intrinsics.to(*args, **kwargs) + return Boxes3D(quat, proj_ctr, depth, size, inv_K) diff --git a/adzoo/bevformer/mmdet3d_plugin/dd3d/structures/image_list.py b/adzoo/bevformer/mmdet3d_plugin/dd3d/structures/image_list.py new file mode 100644 index 0000000..f27b3c0 --- /dev/null +++ b/adzoo/bevformer/mmdet3d_plugin/dd3d/structures/image_list.py @@ -0,0 +1,157 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +# Copyright 2021 Toyota Research Institute. All rights reserved. +from __future__ import division + +from typing import Any, List, Sequence, Tuple + +import torch +from torch import device +from torch.nn import functional as F + +TORCH_VERSION = tuple(int(x) for x in torch.__version__.split(".")[:2]) + +def _as_tensor(x: Tuple[int, int]) -> torch.Tensor: + """ + An equivalent of `torch.as_tensor`, but works under tracing if input + is a list of tensor. `torch.as_tensor` will record a constant in tracing, + but this function will use `torch.stack` instead. + """ + if torch.jit.is_scripting(): + return torch.as_tensor(x) + if isinstance(x, (list, tuple)) and all([isinstance(t, torch.Tensor) for t in x]): + return torch.stack(x) + return torch.as_tensor(x) + + +class ImageList(object): + """ + Adapted from detectron2: + https://github.com/facebookresearch/detectron2/blob/master/detectron2/structures/image_list.py) + + Key differences: + - add optional intrinsics + - add optional image path (useful for debugging) + ================================================================================================================== + + Structure that holds a list of images (of possibly + varying sizes) as a single tensor. + This works by padding the images to the same size, + and storing in a field the original sizes of each image + + Attributes: + image_sizes (list[tuple[int, int]]): each tuple is (h, w) + """ + def __init__(self, tensor: torch.Tensor, image_sizes: List[Tuple[int, int]], intrinsics=None, image_paths=None): + """ + Arguments: + tensor (Tensor): of shape (N, H, W) or (N, C_1, ..., C_K, H, W) where K >= 1 + image_sizes (list[tuple[int, int]]): Each tuple is (h, w). It can + be smaller than (H, W) due to padding. + """ + self.tensor = tensor + self.image_sizes = image_sizes + self._intrinsics = intrinsics + self._image_paths = image_paths + + @property + def intrinsics(self): + if torch.allclose(self._intrinsics[0], torch.eye(3, device=self._intrinsics.device)): + # TODO: torch.inverse(images.intrinsics) often return identity, when it shouldn't. Is it pytorch bug? + raise ValueError("Intrinsics is Identity.") + return self._intrinsics + + @property + def image_paths(self): + return self._image_paths + + def __len__(self) -> int: + return len(self.image_sizes) + + def __getitem__(self, idx) -> torch.Tensor: + """ + Access the individual image in its original size. + + Args: + idx: int or slice + + Returns: + Tensor: an image of shape (H, W) or (C_1, ..., C_K, H, W) where K >= 1 + """ + size = self.image_sizes[idx] + return self.tensor[idx, ..., :size[0], :size[1]] + + @torch.jit.unused + def to(self, *args: Any, **kwargs: Any) -> "ImageList": + cast_tensor = self.tensor.to(*args, **kwargs) + return ImageList(cast_tensor, self.image_sizes, intrinsics=self.intrinsics) + + @property + def device(self) -> device: + return self.tensor.device + + @staticmethod + def from_tensors( + tensors: List[torch.Tensor], + size_divisibility: int = 0, + pad_value: float = 0.0, + intrinsics=None, + image_paths=None + ) -> "ImageList": + """ + Args: + tensors: a tuple or list of `torch.Tensor`, each of shape (Hi, Wi) or + (C_1, ..., C_K, Hi, Wi) where K >= 1. The Tensors will be padded + to the same shape with `pad_value`. + size_divisibility (int): If `size_divisibility > 0`, add padding to ensure + the common height and width is divisible by `size_divisibility`. + This depends on the model and many models need a divisibility of 32. + pad_value (float): value to pad + + Returns: + an `ImageList`. + """ + assert len(tensors) > 0 + assert isinstance(tensors, (tuple, list)) + for t in tensors: + assert isinstance(t, torch.Tensor), type(t) + assert t.shape[:-2] == tensors[0].shape[:-2], t.shape + + image_sizes = [(im.shape[-2], im.shape[-1]) for im in tensors] + image_sizes_tensor = [_as_tensor(x) for x in image_sizes] + max_size = torch.stack(image_sizes_tensor).max(0).values + + if size_divisibility > 1: + stride = size_divisibility + # the last two dims are H,W, both subject to divisibility requirement + max_size = torch.div(max_size + (stride - 1), stride, rounding_mode='floor') * stride + + # handle weirdness of scripting and tracing ... + if torch.jit.is_scripting(): + max_size: List[int] = max_size.to(dtype=torch.long).tolist() + else: + # https://github.com/pytorch/pytorch/issues/42448 + if TORCH_VERSION >= (1, 7) and torch.jit.is_tracing(): + image_sizes = image_sizes_tensor + + if len(tensors) == 1: + # This seems slightly (2%) faster. + # TODO: check whether it's faster for multiple images as well + image_size = image_sizes[0] + padding_size = [0, max_size[-1] - image_size[1], 0, max_size[-2] - image_size[0]] + batched_imgs = F.pad(tensors[0], padding_size, value=pad_value).unsqueeze_(0) + else: + # max_size can be a tensor in tracing mode, therefore convert to list + batch_shape = [len(tensors)] + list(tensors[0].shape[:-2]) + list(max_size) + batched_imgs = tensors[0].new_full(batch_shape, pad_value) + for img, pad_img in zip(tensors, batched_imgs): + pad_img[..., :img.shape[-2], :img.shape[-1]].copy_(img) + + if intrinsics is not None: + assert isinstance(intrinsics, (tuple, list)) + assert len(intrinsics) == len(tensors) + intrinsics = torch.stack(intrinsics, dim=0) + + if image_paths is not None: + assert len(image_paths) == len(tensors) + + return ImageList(batched_imgs.contiguous(), image_sizes, intrinsics, image_paths) diff --git a/adzoo/bevformer/mmdet3d_plugin/dd3d/structures/pose.py b/adzoo/bevformer/mmdet3d_plugin/dd3d/structures/pose.py new file mode 100644 index 0000000..2746f92 --- /dev/null +++ b/adzoo/bevformer/mmdet3d_plugin/dd3d/structures/pose.py @@ -0,0 +1,164 @@ +# Copyright 2021 Toyota Research Institute. All rights reserved. +import numpy as np +from pyquaternion import Quaternion + + +class Pose: + """SE(3) rigid transform class that allows compounding of 6-DOF poses + and provides common transformations that are commonly seen in geometric problems. + """ + def __init__(self, wxyz=np.float32([1., 0., 0., 0.]), tvec=np.float32([0., 0., 0.])): + """Initialize a Pose with Quaternion and 3D Position + + Parameters + ---------- + wxyz: np.float32 or Quaternion (default: np.float32([1,0,0,0])) + Quaternion/Rotation (wxyz) + + tvec: np.float32 (default: np.float32([0,0,0])) + Translation (xyz) + """ + assert isinstance(wxyz, (np.ndarray, Quaternion)) + assert isinstance(tvec, np.ndarray) + + if isinstance(wxyz, np.ndarray): + assert np.abs(1.0 - np.linalg.norm(wxyz)) < 1.0e-3 + + self.quat = Quaternion(wxyz) + self.tvec = tvec + + def __repr__(self): + formatter = {'float_kind': lambda x: '%.2f' % x} + tvec_str = np.array2string(self.tvec, formatter=formatter) + return 'wxyz: {}, tvec: ({})'.format(self.quat, tvec_str) + + def copy(self): + """Return a copy of this pose object. + + Returns + ---------- + result: Pose + Copied pose object. + """ + return self.__class__(Quaternion(self.quat), self.tvec.copy()) + + def __mul__(self, other): + """Left-multiply Pose with another Pose or 3D-Points. + + Parameters + ---------- + other: Pose or np.ndarray + 1. Pose: Identical to oplus operation. + (i.e. self_pose * other_pose) + 2. ndarray: transform [N x 3] point set + (i.e. X' = self_pose * X) + + Returns + ---------- + result: Pose or np.ndarray + Transformed pose or point cloud + """ + if isinstance(other, Pose): + assert isinstance(other, self.__class__) + t = self.quat.rotate(other.tvec) + self.tvec + q = self.quat * other.quat + return self.__class__(q, t) + elif isinstance(other, np.ndarray): + assert other.shape[-1] == 3, 'Point cloud is not 3-dimensional' + X = np.hstack([other, np.ones((len(other), 1))]).T + return (np.dot(self.matrix, X).T)[:, :3] + else: + return NotImplemented + + def __rmul__(self, other): + raise NotImplementedError('Right multiply not implemented yet!') + + def inverse(self): + """Returns a new Pose that corresponds to the + inverse of this one. + + Returns + ---------- + result: Pose + Inverted pose + """ + qinv = self.quat.inverse + return self.__class__(qinv, qinv.rotate(-self.tvec)) + + @property + def matrix(self): + """Returns a 4x4 homogeneous matrix of the form [R t; 0 1] + + Returns + ---------- + result: np.ndarray + 4x4 homogeneous matrix + """ + result = self.quat.transformation_matrix + result[:3, 3] = self.tvec + return result + + @property + def rotation_matrix(self): + """Returns the 3x3 rotation matrix (R) + + Returns + ---------- + result: np.ndarray + 3x3 rotation matrix + """ + result = self.quat.transformation_matrix + return result[:3, :3] + + @property + def rotation(self): + """Return the rotation component of the pose as a Quaternion object. + + Returns + ---------- + self.quat: Quaternion + Rotation component of the Pose object. + """ + return self.quat + + @property + def translation(self): + """Return the translation component of the pose as a np.ndarray. + + Returns + ---------- + self.tvec: np.ndarray + Translation component of the Pose object. + """ + return self.tvec + + @classmethod + def from_matrix(cls, transformation_matrix): + """Initialize pose from 4x4 transformation matrix + + Parameters + ---------- + transformation_matrix: np.ndarray + 4x4 containing rotation/translation + + Returns + ------- + Pose + """ + return cls(wxyz=Quaternion(matrix=transformation_matrix[:3, :3]), tvec=np.float32(transformation_matrix[:3, 3])) + + @classmethod + def from_rotation_translation(cls, rotation_matrix, tvec): + """Initialize pose from rotation matrix and translation vector. + + Parameters + ---------- + rotation_matrix : np.ndarray + 3x3 rotation matrix + tvec : np.ndarray + length-3 translation vector + """ + return cls(wxyz=Quaternion(matrix=rotation_matrix), tvec=np.float64(tvec)) + + def __eq__(self, other): + return self.quat == other.quat and (self.tvec == other.tvec).all() diff --git a/adzoo/bevformer/mmdet3d_plugin/dd3d/structures/transform3d.py b/adzoo/bevformer/mmdet3d_plugin/dd3d/structures/transform3d.py new file mode 100644 index 0000000..36133d0 --- /dev/null +++ b/adzoo/bevformer/mmdet3d_plugin/dd3d/structures/transform3d.py @@ -0,0 +1,896 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import math +import warnings +from typing import List, Optional, Union + +import torch + +Device = Union[str, torch.device] + + +def make_device(device: Device) -> torch.device: + """ + Makes an actual torch.device object from the device specified as + either a string or torch.device object. If the device is `cuda` without + a specific index, the index of the current device is assigned. + + Args: + device: Device (as str or torch.device) + + Returns: + A matching torch.device object + """ + device = torch.device(device) if isinstance(device, str) else device + if device.type == "cuda" and device.index is None: # pyre-ignore[16] + # If cuda but with no index, then the current cuda device is indicated. + # In that case, we fix to that device + device = torch.device(f"cuda:{torch.cuda.current_device()}") + return device + + +def get_device(x, device: Optional[Device] = None) -> torch.device: + """ + Gets the device of the specified variable x if it is a tensor, or + falls back to a default CPU device otherwise. Allows overriding by + providing an explicit device. + + Args: + x: a torch.Tensor to get the device from or another type + device: Device (as str or torch.device) to fall back to + + Returns: + A matching torch.device object + """ + + # User overrides device + if device is not None: + return make_device(device) + + # Set device based on input tensor + if torch.is_tensor(x): + return x.device + + # Default device is cpu + return torch.device("cpu") + + +def _safe_det_3x3(t: torch.Tensor): + """ + Fast determinant calculation for a batch of 3x3 matrices. + + Note, result of this function might not be the same as `torch.det()`. + The differences might be in the last significant digit. + + Args: + t: Tensor of shape (N, 3, 3). + + Returns: + Tensor of shape (N) with determinants. + """ + + det = ( + t[..., 0, 0] * (t[..., 1, 1] * t[..., 2, 2] - t[..., 1, 2] * t[..., 2, 1]) + - t[..., 0, 1] * (t[..., 1, 0] * t[..., 2, 2] - t[..., 2, 0] * t[..., 1, 2]) + + t[..., 0, 2] * (t[..., 1, 0] * t[..., 2, 1] - t[..., 2, 0] * t[..., 1, 1]) + ) + + return det + +def _axis_angle_rotation(axis: str, angle: torch.Tensor) -> torch.Tensor: + """ + Return the rotation matrices for one of the rotations about an axis + of which Euler angles describe, for each value of the angle given. + + Args: + axis: Axis label "X" or "Y or "Z". + angle: any shape tensor of Euler angles in radians + + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + """ + + cos = torch.cos(angle) + sin = torch.sin(angle) + one = torch.ones_like(angle) + zero = torch.zeros_like(angle) + + if axis == "X": + R_flat = (one, zero, zero, zero, cos, -sin, zero, sin, cos) + elif axis == "Y": + R_flat = (cos, zero, sin, zero, one, zero, -sin, zero, cos) + elif axis == "Z": + R_flat = (cos, -sin, zero, sin, cos, zero, zero, zero, one) + else: + raise ValueError("letter must be either X, Y or Z.") + + return torch.stack(R_flat, -1).reshape(angle.shape + (3, 3)) + +class Transform3d: + """ + A Transform3d object encapsulates a batch of N 3D transformations, and knows + how to transform points and normal vectors. Suppose that t is a Transform3d; + then we can do the following: + + .. code-block:: python + + N = len(t) + points = torch.randn(N, P, 3) + normals = torch.randn(N, P, 3) + points_transformed = t.transform_points(points) # => (N, P, 3) + normals_transformed = t.transform_normals(normals) # => (N, P, 3) + + + BROADCASTING + Transform3d objects supports broadcasting. Suppose that t1 and tN are + Transform3d objects with len(t1) == 1 and len(tN) == N respectively. Then we + can broadcast transforms like this: + + .. code-block:: python + + t1.transform_points(torch.randn(P, 3)) # => (P, 3) + t1.transform_points(torch.randn(1, P, 3)) # => (1, P, 3) + t1.transform_points(torch.randn(M, P, 3)) # => (M, P, 3) + tN.transform_points(torch.randn(P, 3)) # => (N, P, 3) + tN.transform_points(torch.randn(1, P, 3)) # => (N, P, 3) + + + COMBINING TRANSFORMS + Transform3d objects can be combined in two ways: composing and stacking. + Composing is function composition. Given Transform3d objects t1, t2, t3, + the following all compute the same thing: + + .. code-block:: python + + y1 = t3.transform_points(t2.transform_points(t1.transform_points(x))) + y2 = t1.compose(t2).compose(t3).transform_points(x) + y3 = t1.compose(t2, t3).transform_points(x) + + + Composing transforms should broadcast. + + .. code-block:: python + + if len(t1) == 1 and len(t2) == N, then len(t1.compose(t2)) == N. + + We can also stack a sequence of Transform3d objects, which represents + composition along the batch dimension; then the following should compute the + same thing. + + .. code-block:: python + + N, M = len(tN), len(tM) + xN = torch.randn(N, P, 3) + xM = torch.randn(M, P, 3) + y1 = torch.cat([tN.transform_points(xN), tM.transform_points(xM)], dim=0) + y2 = tN.stack(tM).transform_points(torch.cat([xN, xM], dim=0)) + + BUILDING TRANSFORMS + We provide convenience methods for easily building Transform3d objects + as compositions of basic transforms. + + .. code-block:: python + + # Scale by 0.5, then translate by (1, 2, 3) + t1 = Transform3d().scale(0.5).translate(1, 2, 3) + + # Scale each axis by a different amount, then translate, then scale + t2 = Transform3d().scale(1, 3, 3).translate(2, 3, 1).scale(2.0) + + t3 = t1.compose(t2) + tN = t1.stack(t3, t3) + + + BACKPROP THROUGH TRANSFORMS + When building transforms, we can also parameterize them by Torch tensors; + in this case we can backprop through the construction and application of + Transform objects, so they could be learned via gradient descent or + predicted by a neural network. + + .. code-block:: python + + s1_params = torch.randn(N, requires_grad=True) + t_params = torch.randn(N, 3, requires_grad=True) + s2_params = torch.randn(N, 3, requires_grad=True) + + t = Transform3d().scale(s1_params).translate(t_params).scale(s2_params) + x = torch.randn(N, 3) + y = t.transform_points(x) + loss = compute_loss(y) + loss.backward() + + with torch.no_grad(): + s1_params -= lr * s1_params.grad + t_params -= lr * t_params.grad + s2_params -= lr * s2_params.grad + + CONVENTIONS + We adopt a right-hand coordinate system, meaning that rotation about an axis + with a positive angle results in a counter clockwise rotation. + + This class assumes that transformations are applied on inputs which + are row vectors. The internal representation of the Nx4x4 transformation + matrix is of the form: + + .. code-block:: python + + M = [ + [Rxx, Ryx, Rzx, 0], + [Rxy, Ryy, Rzy, 0], + [Rxz, Ryz, Rzz, 0], + [Tx, Ty, Tz, 1], + ] + + To apply the transformation to points which are row vectors, the M matrix + can be pre multiplied by the points: + + .. code-block:: python + + points = [[0, 1, 2]] # (1 x 3) xyz coordinates of a point + transformed_points = points * M + + """ + + def __init__( + self, + dtype: torch.dtype = torch.float32, + device: Device = "cpu", + matrix: Optional[torch.Tensor] = None, + ) -> None: + """ + Args: + dtype: The data type of the transformation matrix. + to be used if `matrix = None`. + device: The device for storing the implemented transformation. + If `matrix != None`, uses the device of input `matrix`. + matrix: A tensor of shape (4, 4) or of shape (minibatch, 4, 4) + representing the 4x4 3D transformation matrix. + If `None`, initializes with identity using + the specified `device` and `dtype`. + """ + + if matrix is None: + self._matrix = torch.eye(4, dtype=dtype, device=device).view(1, 4, 4) + else: + if matrix.ndim not in (2, 3): + raise ValueError('"matrix" has to be a 2- or a 3-dimensional tensor.') + if matrix.shape[-2] != 4 or matrix.shape[-1] != 4: + raise ValueError( + '"matrix" has to be a tensor of shape (minibatch, 4, 4)' + ) + # set dtype and device from matrix + dtype = matrix.dtype + device = matrix.device + self._matrix = matrix.view(-1, 4, 4) + + self._transforms = [] # store transforms to compose + self._lu = None + self.device = make_device(device) + self.dtype = dtype + + def __len__(self) -> int: + return self.get_matrix().shape[0] + + def __getitem__( + self, index: Union[int, List[int], slice, torch.Tensor] + ) -> "Transform3d": + """ + Args: + index: Specifying the index of the transform to retrieve. + Can be an int, slice, list of ints, boolean, long tensor. + Supports negative indices. + + Returns: + Transform3d object with selected transforms. The tensors are not cloned. + """ + if isinstance(index, int): + index = [index] + return self.__class__(matrix=self.get_matrix()[index]) + + def compose(self, *others: "Transform3d") -> "Transform3d": + """ + Return a new Transform3d representing the composition of self with the + given other transforms, which will be stored as an internal list. + + Args: + *others: Any number of Transform3d objects + + Returns: + A new Transform3d with the stored transforms + """ + out = Transform3d(dtype=self.dtype, device=self.device) + out._matrix = self._matrix.clone() + for other in others: + if not isinstance(other, Transform3d): + msg = "Only possible to compose Transform3d objects; got %s" + raise ValueError(msg % type(other)) + out._transforms = self._transforms + list(others) + return out + + def get_matrix(self) -> torch.Tensor: + """ + Return a matrix which is the result of composing this transform + with others stored in self.transforms. Where necessary transforms + are broadcast against each other. + For example, if self.transforms contains transforms t1, t2, and t3, and + given a set of points x, the following should be true: + + .. code-block:: python + + y1 = t1.compose(t2, t3).transform(x) + y2 = t3.transform(t2.transform(t1.transform(x))) + y1.get_matrix() == y2.get_matrix() + + Returns: + A transformation matrix representing the composed inputs. + """ + composed_matrix = self._matrix.clone() + if len(self._transforms) > 0: + for other in self._transforms: + other_matrix = other.get_matrix() + composed_matrix = _broadcast_bmm(composed_matrix, other_matrix) + return composed_matrix + + def _get_matrix_inverse(self) -> torch.Tensor: + """ + Return the inverse of self._matrix. + """ + return torch.inverse(self._matrix) + + def inverse(self, invert_composed: bool = False) -> "Transform3d": + """ + Returns a new Transform3d object that represents an inverse of the + current transformation. + + Args: + invert_composed: + - True: First compose the list of stored transformations + and then apply inverse to the result. This is + potentially slower for classes of transformations + with inverses that can be computed efficiently + (e.g. rotations and translations). + - False: Invert the individual stored transformations + independently without composing them. + + Returns: + A new Transform3d object containing the inverse of the original + transformation. + """ + + tinv = Transform3d(dtype=self.dtype, device=self.device) + + if invert_composed: + # first compose then invert + tinv._matrix = torch.inverse(self.get_matrix()) + else: + # self._get_matrix_inverse() implements efficient inverse + # of self._matrix + i_matrix = self._get_matrix_inverse() + + # 2 cases: + if len(self._transforms) > 0: + # a) Either we have a non-empty list of transforms: + # Here we take self._matrix and append its inverse at the + # end of the reverted _transforms list. After composing + # the transformations with get_matrix(), this correctly + # right-multiplies by the inverse of self._matrix + # at the end of the composition. + tinv._transforms = [t.inverse() for t in reversed(self._transforms)] + last = Transform3d(dtype=self.dtype, device=self.device) + last._matrix = i_matrix + tinv._transforms.append(last) + else: + # b) Or there are no stored transformations + # we just set inverted matrix + tinv._matrix = i_matrix + + return tinv + + def stack(self, *others: "Transform3d") -> "Transform3d": + """ + Return a new batched Transform3d representing the batch elements from + self and all the given other transforms all batched together. + + Args: + *others: Any number of Transform3d objects + + Returns: + A new Transform3d. + """ + transforms = [self] + list(others) + matrix = torch.cat([t.get_matrix() for t in transforms], dim=0) + out = Transform3d(dtype=self.dtype, device=self.device) + out._matrix = matrix + return out + + def transform_points(self, points, eps: Optional[float] = None) -> torch.Tensor: + """ + Use this transform to transform a set of 3D points. Assumes row major + ordering of the input points. + + Args: + points: Tensor of shape (P, 3) or (N, P, 3) + eps: If eps!=None, the argument is used to clamp the + last coordinate before performing the final division. + The clamping corresponds to: + last_coord := (last_coord.sign() + (last_coord==0)) * + torch.clamp(last_coord.abs(), eps), + i.e. the last coordinates that are exactly 0 will + be clamped to +eps. + + Returns: + points_out: points of shape (N, P, 3) or (P, 3) depending + on the dimensions of the transform + """ + points_batch = points.clone() + if points_batch.dim() == 2: + points_batch = points_batch[None] # (P, 3) -> (1, P, 3) + if points_batch.dim() != 3: + msg = "Expected points to have dim = 2 or dim = 3: got shape %r" + raise ValueError(msg % repr(points.shape)) + + N, P, _3 = points_batch.shape + ones = torch.ones(N, P, 1, dtype=points.dtype, device=points.device) + points_batch = torch.cat([points_batch, ones], dim=2) + + composed_matrix = self.get_matrix() + points_out = _broadcast_bmm(points_batch, composed_matrix) + denom = points_out[..., 3:] # denominator + if eps is not None: + denom_sign = denom.sign() + (denom == 0.0).type_as(denom) + denom = denom_sign * torch.clamp(denom.abs(), eps) + points_out = points_out[..., :3] / denom + + # When transform is (1, 4, 4) and points is (P, 3) return + # points_out of shape (P, 3) + if points_out.shape[0] == 1 and points.dim() == 2: + points_out = points_out.reshape(points.shape) + + return points_out + + def transform_normals(self, normals) -> torch.Tensor: + """ + Use this transform to transform a set of normal vectors. + + Args: + normals: Tensor of shape (P, 3) or (N, P, 3) + + Returns: + normals_out: Tensor of shape (P, 3) or (N, P, 3) depending + on the dimensions of the transform + """ + if normals.dim() not in [2, 3]: + msg = "Expected normals to have dim = 2 or dim = 3: got shape %r" + raise ValueError(msg % (normals.shape,)) + composed_matrix = self.get_matrix() + + # TODO: inverse is bad! Solve a linear system instead + mat = composed_matrix[:, :3, :3] + normals_out = _broadcast_bmm(normals, mat.transpose(1, 2).inverse()) + + # This doesn't pass unit tests. TODO investigate further + # if self._lu is None: + # self._lu = self._matrix[:, :3, :3].transpose(1, 2).lu() + # normals_out = normals.lu_solve(*self._lu) + + # When transform is (1, 4, 4) and normals is (P, 3) return + # normals_out of shape (P, 3) + if normals_out.shape[0] == 1 and normals.dim() == 2: + normals_out = normals_out.reshape(normals.shape) + + return normals_out + + def translate(self, *args, **kwargs) -> "Transform3d": + return self.compose( + Translate(device=self.device, dtype=self.dtype, *args, **kwargs) + ) + + def scale(self, *args, **kwargs) -> "Transform3d": + return self.compose( + Scale(device=self.device, dtype=self.dtype, *args, **kwargs) + ) + + def rotate(self, *args, **kwargs) -> "Transform3d": + return self.compose( + Rotate(device=self.device, dtype=self.dtype, *args, **kwargs) + ) + + def rotate_axis_angle(self, *args, **kwargs) -> "Transform3d": + return self.compose( + RotateAxisAngle(device=self.device, dtype=self.dtype, *args, **kwargs) + ) + + def clone(self) -> "Transform3d": + """ + Deep copy of Transforms object. All internal tensors are cloned + individually. + + Returns: + new Transforms object. + """ + other = Transform3d(dtype=self.dtype, device=self.device) + if self._lu is not None: + other._lu = [elem.clone() for elem in self._lu] + other._matrix = self._matrix.clone() + other._transforms = [t.clone() for t in self._transforms] + return other + + def to( + self, + device: Device, + copy: bool = False, + dtype: Optional[torch.dtype] = None, + ) -> "Transform3d": + """ + Match functionality of torch.Tensor.to() + If copy = True or the self Tensor is on a different device, the + returned tensor is a copy of self with the desired torch.device. + If copy = False and the self Tensor already has the correct torch.device, + then self is returned. + + Args: + device: Device (as str or torch.device) for the new tensor. + copy: Boolean indicator whether or not to clone self. Default False. + dtype: If not None, casts the internal tensor variables + to a given torch.dtype. + + Returns: + Transform3d object. + """ + device_ = make_device(device) + dtype_ = self.dtype if dtype is None else dtype + skip_to = self.device == device_ and self.dtype == dtype_ + + if not copy and skip_to: + return self + + other = self.clone() + + if skip_to: + return other + + other.device = device_ + other.dtype = dtype_ + other._matrix = other._matrix.to(device=device_, dtype=dtype_) + other._transforms = [ + t.to(device_, copy=copy, dtype=dtype_) for t in other._transforms + ] + return other + + def cpu(self) -> "Transform3d": + return self.to("cpu") + + def cuda(self) -> "Transform3d": + return self.to("cuda") + + +class Translate(Transform3d): + def __init__( + self, + x, + y=None, + z=None, + dtype: torch.dtype = torch.float32, + device: Optional[Device] = None, + ) -> None: + """ + Create a new Transform3d representing 3D translations. + + Option I: Translate(xyz, dtype=torch.float32, device='cpu') + xyz should be a tensor of shape (N, 3) + + Option II: Translate(x, y, z, dtype=torch.float32, device='cpu') + Here x, y, and z will be broadcast against each other and + concatenated to form the translation. Each can be: + - A python scalar + - A torch scalar + - A 1D torch tensor + """ + xyz = _handle_input(x, y, z, dtype, device, "Translate") + super().__init__(device=xyz.device, dtype=dtype) + N = xyz.shape[0] + + mat = torch.eye(4, dtype=dtype, device=self.device) + mat = mat.view(1, 4, 4).repeat(N, 1, 1) + mat[:, 3, :3] = xyz + self._matrix = mat + + def _get_matrix_inverse(self) -> torch.Tensor: + """ + Return the inverse of self._matrix. + """ + inv_mask = self._matrix.new_ones([1, 4, 4]) + inv_mask[0, 3, :3] = -1.0 + i_matrix = self._matrix * inv_mask + return i_matrix + + +class Scale(Transform3d): + def __init__( + self, + x, + y=None, + z=None, + dtype: torch.dtype = torch.float32, + device: Optional[Device] = None, + ) -> None: + """ + A Transform3d representing a scaling operation, with different scale + factors along each coordinate axis. + + Option I: Scale(s, dtype=torch.float32, device='cpu') + s can be one of + - Python scalar or torch scalar: Single uniform scale + - 1D torch tensor of shape (N,): A batch of uniform scale + - 2D torch tensor of shape (N, 3): Scale differently along each axis + + Option II: Scale(x, y, z, dtype=torch.float32, device='cpu') + Each of x, y, and z can be one of + - python scalar + - torch scalar + - 1D torch tensor + """ + xyz = _handle_input(x, y, z, dtype, device, "scale", allow_singleton=True) + super().__init__(device=xyz.device, dtype=dtype) + N = xyz.shape[0] + + # TODO: Can we do this all in one go somehow? + mat = torch.eye(4, dtype=dtype, device=self.device) + mat = mat.view(1, 4, 4).repeat(N, 1, 1) + mat[:, 0, 0] = xyz[:, 0] + mat[:, 1, 1] = xyz[:, 1] + mat[:, 2, 2] = xyz[:, 2] + self._matrix = mat + + def _get_matrix_inverse(self) -> torch.Tensor: + """ + Return the inverse of self._matrix. + """ + xyz = torch.stack([self._matrix[:, i, i] for i in range(4)], dim=1) + ixyz = 1.0 / xyz + imat = torch.diag_embed(ixyz, dim1=1, dim2=2) + return imat + + +class Rotate(Transform3d): + def __init__( + self, + R: torch.Tensor, + dtype: torch.dtype = torch.float32, + device: Optional[Device] = None, + orthogonal_tol: float = 1e-5, + ) -> None: + """ + Create a new Transform3d representing 3D rotation using a rotation + matrix as the input. + + Args: + R: a tensor of shape (3, 3) or (N, 3, 3) + orthogonal_tol: tolerance for the test of the orthogonality of R + + """ + device_ = get_device(R, device) + super().__init__(device=device_, dtype=dtype) + if R.dim() == 2: + R = R[None] + if R.shape[-2:] != (3, 3): + msg = "R must have shape (3, 3) or (N, 3, 3); got %s" + raise ValueError(msg % repr(R.shape)) + R = R.to(device=device_, dtype=dtype) + _check_valid_rotation_matrix(R, tol=orthogonal_tol) + N = R.shape[0] + mat = torch.eye(4, dtype=dtype, device=device_) + mat = mat.view(1, 4, 4).repeat(N, 1, 1) + mat[:, :3, :3] = R + self._matrix = mat + + def _get_matrix_inverse(self) -> torch.Tensor: + """ + Return the inverse of self._matrix. + """ + return self._matrix.permute(0, 2, 1).contiguous() + + +class RotateAxisAngle(Rotate): + def __init__( + self, + angle, + axis: str = "X", + degrees: bool = True, + dtype: torch.dtype = torch.float32, + device: Optional[Device] = None, + ) -> None: + """ + Create a new Transform3d representing 3D rotation about an axis + by an angle. + + Assuming a right-hand coordinate system, positive rotation angles result + in a counter clockwise rotation. + + Args: + angle: + - A torch tensor of shape (N,) + - A python scalar + - A torch scalar + axis: + string: one of ["X", "Y", "Z"] indicating the axis about which + to rotate. + NOTE: All batch elements are rotated about the same axis. + """ + axis = axis.upper() + if axis not in ["X", "Y", "Z"]: + msg = "Expected axis to be one of ['X', 'Y', 'Z']; got %s" + raise ValueError(msg % axis) + angle = _handle_angle_input(angle, dtype, device, "RotateAxisAngle") + angle = (angle / 180.0 * math.pi) if degrees else angle + # We assume the points on which this transformation will be applied + # are row vectors. The rotation matrix returned from _axis_angle_rotation + # is for transforming column vectors. Therefore we transpose this matrix. + # R will always be of shape (N, 3, 3) + R = _axis_angle_rotation(axis, angle).transpose(1, 2) + super().__init__(device=angle.device, R=R, dtype=dtype) + + +def _handle_coord(c, dtype: torch.dtype, device: torch.device) -> torch.Tensor: + """ + Helper function for _handle_input. + + Args: + c: Python scalar, torch scalar, or 1D torch tensor + + Returns: + c_vec: 1D torch tensor + """ + if not torch.is_tensor(c): + c = torch.tensor(c, dtype=dtype, device=device) + if c.dim() == 0: + c = c.view(1) + if c.device != device or c.dtype != dtype: + c = c.to(device=device, dtype=dtype) + return c + + +def _handle_input( + x, + y, + z, + dtype: torch.dtype, + device: Optional[Device], + name: str, + allow_singleton: bool = False, +) -> torch.Tensor: + """ + Helper function to handle parsing logic for building transforms. The output + is always a tensor of shape (N, 3), but there are several types of allowed + input. + + Case I: Single Matrix + In this case x is a tensor of shape (N, 3), and y and z are None. Here just + return x. + + Case II: Vectors and Scalars + In this case each of x, y, and z can be one of the following + - Python scalar + - Torch scalar + - Torch tensor of shape (N, 1) or (1, 1) + In this case x, y and z are broadcast to tensors of shape (N, 1) + and concatenated to a tensor of shape (N, 3) + + Case III: Singleton (only if allow_singleton=True) + In this case y and z are None, and x can be one of the following: + - Python scalar + - Torch scalar + - Torch tensor of shape (N, 1) or (1, 1) + Here x will be duplicated 3 times, and we return a tensor of shape (N, 3) + + Returns: + xyz: Tensor of shape (N, 3) + """ + device_ = get_device(x, device) + # If x is actually a tensor of shape (N, 3) then just return it + if torch.is_tensor(x) and x.dim() == 2: + if x.shape[1] != 3: + msg = "Expected tensor of shape (N, 3); got %r (in %s)" + raise ValueError(msg % (x.shape, name)) + if y is not None or z is not None: + msg = "Expected y and z to be None (in %s)" % name + raise ValueError(msg) + return x.to(device=device_, dtype=dtype) + + if allow_singleton and y is None and z is None: + y = x + z = x + + # Convert all to 1D tensors + xyz = [_handle_coord(c, dtype, device_) for c in [x, y, z]] + + # Broadcast and concatenate + sizes = [c.shape[0] for c in xyz] + N = max(sizes) + for c in xyz: + if c.shape[0] != 1 and c.shape[0] != N: + msg = "Got non-broadcastable sizes %r (in %s)" % (sizes, name) + raise ValueError(msg) + xyz = [c.expand(N) for c in xyz] + xyz = torch.stack(xyz, dim=1) + return xyz + + +def _handle_angle_input( + x, dtype: torch.dtype, device: Optional[Device], name: str +) -> torch.Tensor: + """ + Helper function for building a rotation function using angles. + The output is always of shape (N,). + + The input can be one of: + - Torch tensor of shape (N,) + - Python scalar + - Torch scalar + """ + device_ = get_device(x, device) + if torch.is_tensor(x) and x.dim() > 1: + msg = "Expected tensor of shape (N,); got %r (in %s)" + raise ValueError(msg % (x.shape, name)) + else: + return _handle_coord(x, dtype, device_) + + +def _broadcast_bmm(a, b) -> torch.Tensor: + """ + Batch multiply two matrices and broadcast if necessary. + + Args: + a: torch tensor of shape (P, K) or (M, P, K) + b: torch tensor of shape (N, K, K) + + Returns: + a and b broadcast multiplied. The output batch dimension is max(N, M). + + To broadcast transforms across a batch dimension if M != N then + expect that either M = 1 or N = 1. The tensor with batch dimension 1 is + expanded to have shape N or M. + """ + if a.dim() == 2: + a = a[None] + if len(a) != len(b): + if not ((len(a) == 1) or (len(b) == 1)): + msg = "Expected batch dim for bmm to be equal or 1; got %r, %r" + raise ValueError(msg % (a.shape, b.shape)) + if len(a) == 1: + a = a.expand(len(b), -1, -1) + if len(b) == 1: + b = b.expand(len(a), -1, -1) + return a.bmm(b) + + +@torch.no_grad() +def _check_valid_rotation_matrix(R, tol: float = 1e-7) -> None: + """ + Determine if R is a valid rotation matrix by checking it satisfies the + following conditions: + + ``RR^T = I and det(R) = 1`` + + Args: + R: an (N, 3, 3) matrix + + Returns: + None + + Emits a warning if R is an invalid rotation matrix. + """ + N = R.shape[0] + eye = torch.eye(3, dtype=R.dtype, device=R.device) + eye = eye.view(1, 3, 3).expand(N, -1, -1) + orthogonal = torch.allclose(R.bmm(R.transpose(1, 2)), eye, atol=tol) + det_R = _safe_det_3x3(R) + no_distortion = torch.allclose(det_R, torch.ones_like(det_R)) + if not (orthogonal and no_distortion): + msg = "R is not a valid rotation matrix" + warnings.warn(msg) + return diff --git a/adzoo/bevformer/mmdet3d_plugin/dd3d/utils/comm.py b/adzoo/bevformer/mmdet3d_plugin/dd3d/utils/comm.py new file mode 100644 index 0000000..77f3bdb --- /dev/null +++ b/adzoo/bevformer/mmdet3d_plugin/dd3d/utils/comm.py @@ -0,0 +1,105 @@ +# Copyright 2021 Toyota Research Institute. All rights reserved. +import logging +from functools import wraps + +import torch.distributed as dist + +LOG = logging.getLogger(__name__) + +_NESTED_BROADCAST_FROM_MASTER = False + + +def get_world_size() -> int: + if not dist.is_available(): + return 1 + if not dist.is_initialized(): + return 1 + return dist.get_world_size() + +def is_distributed(): + return get_world_size() > 1 + + +def broadcast_from_master(fn): + """If distributed, only the master executes the function and broadcast the results to other workers. + + Usage: + @broadcast_from_master + def foo(a, b): ... + """ + @wraps(fn) + def wrapper(*args, **kwargs): # pylint: disable=unused-argument + global _NESTED_BROADCAST_FROM_MASTER + + if not is_distributed(): + return fn(*args, **kwargs) + + if _NESTED_BROADCAST_FROM_MASTER: + assert d2_comm.is_main_process() + LOG.warning(f"_NESTED_BROADCAST_FROM_MASTER = True, {fn.__name__}") + return fn(*args, **kwargs) + + if d2_comm.is_main_process(): + _NESTED_BROADCAST_FROM_MASTER = True + ret = [fn(*args, **kwargs), ] + _NESTED_BROADCAST_FROM_MASTER = False + else: + ret = [None, ] + if dist.is_initialized(): + dist.broadcast_object_list(ret) + ret = ret[0] + + assert ret is not None + return ret + + return wrapper + + +def master_only(fn): + """If distributed, only the master executes the function. + + Usage: + @master_only + def foo(a, b): ... + """ + @wraps(fn) + def wrapped_fn(*args, **kwargs): + if d2_comm.is_main_process(): + ret = fn(*args, **kwargs) + d2_comm.synchronize() + if d2_comm.is_main_process(): + return ret + + return wrapped_fn + + +def gather_dict(dikt): + """Gather python dictionaries from all workers to the rank=0 worker. + + Assumption: the keys of `dikt` are disjoint across all workers. + + If rank = 0, then returned aggregated dict. + If rank > 0, then return `None`. + """ + dict_lst = d2_comm.gather(dikt, dst=0) + if d2_comm.is_main_process(): + gathered_dict = {} + for dic in dict_lst: + for k in dic.keys(): + assert k not in gathered_dict, f"Dictionary key overlaps: {k}" + gathered_dict.update(dic) + return gathered_dict + else: + return None + + +def reduce_sum(tensor): + """ + Adapted from AdelaiDet: + https://github.com/aim-uofa/AdelaiDet/blob/master/adet/utils/comm.py + """ + if not is_distributed(): + return tensor + tensor = tensor.clone() + dist.all_reduce(tensor, op=dist.ReduceOp.SUM) + return tensor diff --git a/adzoo/bevformer/mmdet3d_plugin/dd3d/utils/geometry.py b/adzoo/bevformer/mmdet3d_plugin/dd3d/utils/geometry.py new file mode 100644 index 0000000..d8f546b --- /dev/null +++ b/adzoo/bevformer/mmdet3d_plugin/dd3d/utils/geometry.py @@ -0,0 +1,204 @@ +# Copyright 2021 Toyota Research Institute. All rights reserved. +import logging + +import cv2 +import numpy as np +import torch +import torch.nn.functional as F + +LOG = logging.getLogger(__name__) + +PI = 3.14159265358979323846 +EPS = 1e-7 + +def _sqrt_positive_part(x: torch.Tensor) -> torch.Tensor: + """ + Returns torch.sqrt(torch.max(0, x)) + but with a zero subgradient where x is 0. + """ + ret = torch.zeros_like(x) + positive_mask = x > 0 + ret[positive_mask] = torch.sqrt(x[positive_mask]) + return ret + +def matrix_to_quaternion(matrix: torch.Tensor) -> torch.Tensor: + """ + Convert rotations given as rotation matrices to quaternions. + + Args: + matrix: Rotation matrices as tensor of shape (..., 3, 3). + + Returns: + quaternions with real part first, as tensor of shape (..., 4). + """ + if matrix.size(-1) != 3 or matrix.size(-2) != 3: + raise ValueError(f"Invalid rotation matrix shape {matrix.shape}.") + + batch_dim = matrix.shape[:-2] + m00, m01, m02, m10, m11, m12, m20, m21, m22 = torch.unbind( + matrix.reshape(batch_dim + (9,)), dim=-1 + ) + + q_abs = _sqrt_positive_part( + torch.stack( + [ + 1.0 + m00 + m11 + m22, + 1.0 + m00 - m11 - m22, + 1.0 - m00 + m11 - m22, + 1.0 - m00 - m11 + m22, + ], + dim=-1, + ) + ) + + # we produce the desired quaternion multiplied by each of r, i, j, k + quat_by_rijk = torch.stack( + [ + torch.stack([q_abs[..., 0] ** 2, m21 - m12, m02 - m20, m10 - m01], dim=-1), + torch.stack([m21 - m12, q_abs[..., 1] ** 2, m10 + m01, m02 + m20], dim=-1), + torch.stack([m02 - m20, m10 + m01, q_abs[..., 2] ** 2, m12 + m21], dim=-1), + torch.stack([m10 - m01, m20 + m02, m21 + m12, q_abs[..., 3] ** 2], dim=-1), + ], + dim=-2, + ) + + # We floor here at 0.1 but the exact level is not important; if q_abs is small, + # the candidate won't be picked. + flr = torch.tensor(0.1).to(dtype=q_abs.dtype, device=q_abs.device) + quat_candidates = quat_by_rijk / (2.0 * q_abs[..., None].max(flr)) + + # if not for numerical problems, quat_candidates[i] should be same (up to a sign), + # forall i; we pick the best-conditioned one (with the largest denominator) + + return quat_candidates[ + F.one_hot(q_abs.argmax(dim=-1), num_classes=4) > 0.5, : # pyre-ignore[16] + ].reshape(batch_dim + (4,)) + +def quaternion_to_matrix(quaternions: torch.Tensor) -> torch.Tensor: + """ + Convert rotations given as quaternions to rotation matrices. + + Args: + quaternions: quaternions with real part first, + as tensor of shape (..., 4). + + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + """ + r, i, j, k = torch.unbind(quaternions, -1) + two_s = 2.0 / (quaternions * quaternions).sum(-1) + + o = torch.stack( + ( + 1 - two_s * (j * j + k * k), + two_s * (i * j - k * r), + two_s * (i * k + j * r), + two_s * (i * j + k * r), + 1 - two_s * (i * i + k * k), + two_s * (j * k - i * r), + two_s * (i * k - j * r), + two_s * (j * k + i * r), + 1 - two_s * (i * i + j * j), + ), + -1, + ) + return o.reshape(quaternions.shape[:-1] + (3, 3)) + +def allocentric_to_egocentric(quat, proj_ctr, inv_intrinsics): + """ + Parameters + ---------- + quat: Tensor + (N, 4). Batch of (allocentric) quaternions. + + proj_ctr: Tensor + (N, 2). Projected centers. xy coordninates. + + inv_intrinsics: [type] + (N, 3, 3). Inverted intrinsics. + """ + R_obj_to_local = quaternion_to_matrix(quat) + + # ray == z-axis in local orientaion + ray = unproject_points2d(proj_ctr, inv_intrinsics) + z = ray / ray.norm(dim=1, keepdim=True) + + # gram-schmit process: local_y = global_y - global_y \dot local_z + y = z.new_tensor([[0., 1., 0.]]) - z[:, 1:2] * z + y = y / y.norm(dim=1, keepdim=True) + x = torch.cross(y, z, dim=1) + + # local -> global + R_local_to_global = torch.stack([x, y, z], dim=-1) + + # obj -> global + R_obj_to_global = torch.bmm(R_local_to_global, R_obj_to_local) + + egocentric_quat = matrix_to_quaternion(R_obj_to_global) + + # Make sure it's unit norm. + quat_norm = egocentric_quat.norm(dim=1, keepdim=True) + if not torch.allclose(quat_norm, torch.as_tensor(1.), atol=1e-3): + LOG.warning( + f"Some of the input quaternions are not unit norm: min={quat_norm.min()}, max={quat_norm.max()}; therefore normalizing." + ) + egocentric_quat = egocentric_quat / quat_norm.clamp(min=EPS) + + return egocentric_quat + + +def homogenize_points(xy): + """ + Parameters + ---------- + xy: Tensor + xy coordinates. shape=(N, ..., 2) + E.g., (N, 2) or (N, K, 2) or (N, H, W, 2) + + Returns + ------- + Tensor: + 1. is appended to the last dimension. shape=(N, ..., 3) + E.g, (N, 3) or (N, K, 3) or (N, H, W, 3). + """ + # NOTE: this seems to work for arbitrary number of dimensions of input + pad = torch.nn.ConstantPad1d(padding=(0, 1), value=1.) + return pad(xy) + + +def project_points3d(Xw, K): + _, C = Xw.shape + assert C == 3 + uv, _ = cv2.projectPoints( + Xw, np.zeros((3, 1), dtype=np.float32), np.zeros(3, dtype=np.float32), K, np.zeros(5, dtype=np.float32) + ) + return uv.reshape(-1, 2) + + +def unproject_points2d(points2d, inv_K, scale=1.0): + """ + Parameters + ---------- + points2d: Tensor + xy coordinates. shape=(N, ..., 2) + E.g., (N, 2) or (N, K, 2) or (N, H, W, 2) + + inv_K: Tensor + Inverted intrinsics; shape=(N, 3, 3) + + scale: float, default: 1.0 + Scaling factor. + + Returns + ------- + Tensor: + Unprojected 3D point. shape=(N, ..., 3) + E.g., (N, 3) or (N, K, 3) or (N, H, W, 3) + """ + points2d = homogenize_points(points2d) + siz = points2d.size() + points2d = points2d.view(-1, 3).unsqueeze(-1) # (N, 3, 1) + unprojected = torch.matmul(inv_K, points2d) # (N, 3, 3) x (N, 3, 1) -> (N, 3, 1) + unprojected = unprojected.view(siz) + + return unprojected * scale diff --git a/adzoo/bevformer/mmdet3d_plugin/dd3d/utils/tasks.py b/adzoo/bevformer/mmdet3d_plugin/dd3d/utils/tasks.py new file mode 100644 index 0000000..997fbb3 --- /dev/null +++ b/adzoo/bevformer/mmdet3d_plugin/dd3d/utils/tasks.py @@ -0,0 +1,97 @@ +# Copyright 2021 Toyota Research Institute. All rights reserved. +from collections import OrderedDict + +# from detectron2.config import configurable + + +class Task(): + def __init__(self, name, is_detection_task, is_dense_prediction_task): + self.name = name + self.is_detection_task = is_detection_task + self.is_dense_prediction_task = is_dense_prediction_task + + +# yapf: disable +TASKS = [ + Task( + name="box2d", + is_detection_task=True, + is_dense_prediction_task=False, + ), + Task( + name="box3d", + is_detection_task=True, + is_dense_prediction_task=False, + ), + Task( + name="depth", + is_detection_task=False, + is_dense_prediction_task=True, + ) +] +# yapf: enable + +NAME_TO_TASK = OrderedDict([(task.name, task) for task in TASKS]) + + +class TaskManager(): + #@configurable + def __init__(self, box2d_on=False, box3d_on=False, depth_on=False): + """ + configurable is experimental. + """ + self._box2d_on = self._mask2d_on = self._box3d_on = self._semseg2d_on = self._depth_on = False + tasks = [] + if box2d_on: + tasks.append(NAME_TO_TASK['box2d']) + self._box2d_on = True + if box3d_on: + tasks.append(NAME_TO_TASK['box3d']) + self._box3d_on = True + if depth_on: + tasks.append(NAME_TO_TASK['depth']) + self._depth_on = True + + if not tasks: + raise ValueError("No task specified.") + + self._tasks = tasks + + @property + def tasks(self): + return self._tasks + + '''@classmethod + def from_config(cls, cfg): + # yapf: disable + return OrderedDict( + box2d_on = cfg.MODEL.BOX2D_ON, + box3d_on = cfg.MODEL.BOX3D_ON, + depth_on = cfg.MODEL.DEPTH_ON, + ) + # yapf: enable''' + + # Indicators that tells if each task is enabled. + @property + def box2d_on(self): + return self._box2d_on + + @property + def box3d_on(self): + return self._box3d_on + + @property + def depth_on(self): + return self._depth_on + + @property + def has_dense_prediction_task(self): + return any([task.is_dense_prediction_task for task in self.tasks]) + + @property + def has_detection_task(self): + return any([task.is_detection_task for task in self.tasks]) + + @property + def task_names(self): + return [task.name for task in self.tasks] diff --git a/adzoo/bevformer/mmdet3d_plugin/dd3d/utils/tensor2d.py b/adzoo/bevformer/mmdet3d_plugin/dd3d/utils/tensor2d.py new file mode 100644 index 0000000..2922567 --- /dev/null +++ b/adzoo/bevformer/mmdet3d_plugin/dd3d/utils/tensor2d.py @@ -0,0 +1,47 @@ +# Copyright 2021 Toyota Research Institute. All rights reserved. +import torch +import torch.nn.functional as F + + +def compute_features_locations(h, w, stride, dtype=torch.float32, device='cpu', offset="none"): + """Adapted from AdelaiDet: + https://github.com/aim-uofa/AdelaiDet/blob/master/adet/utils/comm.py + + Key differnece: offset is configurable. + """ + shifts_x = torch.arange(0, w * stride, step=stride, dtype=dtype, device=device) + shifts_y = torch.arange(0, h * stride, step=stride, dtype=dtype, device=device) + shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x) + shift_x = shift_x.reshape(-1) + shift_y = shift_y.reshape(-1) + # (dennis.park) + # locations = torch.stack((shift_x, shift_y), dim=1) + stride // 2 + locations = torch.stack((shift_x, shift_y), dim=1) + if offset == "half": + locations += stride // 2 + else: + assert offset == "none" + + return locations + + +def aligned_bilinear(tensor, factor, offset="none"): + """Adapted from AdelaiDet: + https://github.com/aim-uofa/AdelaiDet/blob/master/adet/utils/comm.py + """ + assert tensor.dim() == 4 + assert factor >= 1 + assert int(factor) == factor + + if factor == 1: + return tensor + + h, w = tensor.size()[2:] + tensor = F.pad(tensor, pad=(0, 1, 0, 1), mode="replicate") + oh = factor * h + 1 + ow = factor * w + 1 + tensor = F.interpolate(tensor, size=(oh, ow), mode='bilinear', align_corners=True) + if offset == "half": + tensor = F.pad(tensor, pad=(factor // 2, 0, factor // 2, 0), mode="replicate") + + return tensor[:, :, :oh - 1, :ow - 1] diff --git a/adzoo/bevformer/mmdet3d_plugin/dd3d/utils/visualization.py b/adzoo/bevformer/mmdet3d_plugin/dd3d/utils/visualization.py new file mode 100644 index 0000000..71e78b1 --- /dev/null +++ b/adzoo/bevformer/mmdet3d_plugin/dd3d/utils/visualization.py @@ -0,0 +1,147 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +# Copyright 2021 Toyota Research Institute. All rights reserved. +import colorsys +import os + +import cv2 +import matplotlib.colors as mplc +import numpy as np +from PIL import Image, ImageDraw + + +def fill_color_polygon(image, polygon, color, alpha=0.5): + """Color interior of polygon with alpha-blending. This function modified input in place. + """ + _mask = Image.new('L', (image.shape[1], image.shape[0]), 0) + ImageDraw.Draw(_mask).polygon(polygon, outline=1, fill=1) + mask = np.array(_mask, np.bool) + for c in range(3): + channel = image[:, :, c] + channel[mask] = channel[mask] * (1. - alpha) + color[c] * alpha + + +def change_color_brightness(color, brightness_factor): + """ + Copied from detectron2.utils.visualizer.py + ------------------------------------------- + + Depending on the brightness_factor, gives a lighter or darker color i.e. a color with + less or more saturation than the original color. + + Args: + color: color of the polygon. Refer to `matplotlib.colors` for a full list of + formats that are accepted. + brightness_factor (float): a value in [-1.0, 1.0] range. A lightness factor of + 0 will correspond to no change, a factor in [-1.0, 0) range will result in + a darker color and a factor in (0, 1.0] range will result in a lighter color. + + Returns: + modified_color (tuple[double]): a tuple containing the RGB values of the + modified color. Each value in the tuple is in the [0.0, 1.0] range. + """ + assert brightness_factor >= -1.0 and brightness_factor <= 1.0 + color = mplc.to_rgb(color) + polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color)) + modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1]) + modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness + modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness + modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2]) + return modified_color + + +def draw_text(ax, text, position, *, font_size, color="g", horizontal_alignment="center", rotation=0): + """ + Copied from Visualizer.draw_text() + ----------------------------------- + + Args: + text (str): class label + position (tuple): a tuple of the x and y coordinates to place text on image. + font_size (int, optional): font of the text. If not provided, a font size + proportional to the image width is calculated and used. + color: color of the text. Refer to `matplotlib.colors` for full list + of formats that are accepted. + horizontal_alignment (str): see `matplotlib.text.Text` + rotation: rotation angle in degrees CCW + + Returns: + output (VisImage): image object with text drawn. + """ + # since the text background is dark, we don't want the text to be dark + color = np.maximum(list(mplc.to_rgb(color)), 0.2) + color[np.argmax(color)] = max(0.8, np.max(color)) + + x, y = position + ax.text( + x, + y, + text, + size=font_size, + family="sans-serif", + bbox={ + "facecolor": "black", + "alpha": 0.8, + "pad": 0.7, + "edgecolor": "none" + }, + verticalalignment="top", + horizontalalignment=horizontal_alignment, + color=color, + zorder=10, + rotation=rotation, + ) + return ax + + +def float_to_uint8_color(float_clr): + assert all([c >= 0. for c in float_clr]) + assert all([c <= 1. for c in float_clr]) + return [int(c * 255.) for c in float_clr] + + +def mosaic(items, scale=1.0, pad=3, grid_width=None): + """Creates a mosaic from list of images. + + Parameters + ---------- + items: list of np.ndarray + List of images to mosaic. + + scale: float, default=1.0 + Scale factor applied to images. scale > 1.0 enlarges images. + + pad: int, default=3 + Padding size of the images before mosaic + + grid_width: int, default=None + Mosaic width or grid width of the mosaic + + Returns + ------- + image: np.array of shape (H, W, 3) + Image mosaic + """ + # Determine tile width and height + N = len(items) + assert N > 0, 'No items to mosaic!' + grid_width = grid_width if grid_width else np.ceil(np.sqrt(N)).astype(int) + grid_height = np.ceil(N * 1. / grid_width).astype(np.int) + input_size = items[0].shape[:2] + target_shape = (int(input_size[1] * scale), int(input_size[0] * scale)) + mosaic_items = [] + for j in range(grid_width * grid_height): + if j < N: + # Only the first image is scaled, the rest are re-shaped + # to the same size as the previous image in the mosaic + im = cv2.resize(items[j], dsize=target_shape) + mosaic_items.append(im) + else: + mosaic_items.append(np.zeros_like(mosaic_items[-1])) + + # Stack W tiles horizontally first, then vertically + im_pad = lambda im: cv2.copyMakeBorder(im, pad, pad, pad, pad, cv2.BORDER_CONSTANT, 0) + mosaic_items = [im_pad(im) for im in mosaic_items] + hstack = [np.hstack(mosaic_items[j:j + grid_width]) for j in range(0, len(mosaic_items), grid_width)] + mosaic_viz = np.vstack(hstack) if len(hstack) > 1 \ + else hstack[0] + return mosaic_viz diff --git a/adzoo/bevformer/mmdet3d_plugin/models/hooks/__init__.py b/adzoo/bevformer/mmdet3d_plugin/models/hooks/__init__.py new file mode 100644 index 0000000..93b13c9 --- /dev/null +++ b/adzoo/bevformer/mmdet3d_plugin/models/hooks/__init__.py @@ -0,0 +1 @@ +from .hooks import GradChecker \ No newline at end of file diff --git a/adzoo/bevformer/mmdet3d_plugin/models/hooks/hooks.py b/adzoo/bevformer/mmdet3d_plugin/models/hooks/hooks.py new file mode 100644 index 0000000..56ff7fd --- /dev/null +++ b/adzoo/bevformer/mmdet3d_plugin/models/hooks/hooks.py @@ -0,0 +1,13 @@ +from mmcv.runner.hooks.hook import HOOKS, Hook +from projects.mmdet3d_plugin.models.utils import run_time + + +@HOOKS.register_module() +class GradChecker(Hook): + + def after_train_iter(self, runner): + for key, val in runner.model.named_parameters(): + if val.grad == None and val.requires_grad: + print('WARNNING: {key}\'s parameters are not be used!!!!'.format(key=key)) + + diff --git a/adzoo/bevformer/model_converters/convert_votenet_checkpoints.py b/adzoo/bevformer/model_converters/convert_votenet_checkpoints.py new file mode 100755 index 0000000..33792b0 --- /dev/null +++ b/adzoo/bevformer/model_converters/convert_votenet_checkpoints.py @@ -0,0 +1,152 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import tempfile +import torch +from mmcv import Config +from mmcv.runner import load_state_dict + +from mmdet3d.models import build_detector + + +def parse_args(): + parser = argparse.ArgumentParser( + description='MMDet3D upgrade model version(before v0.6.0) of VoteNet') + parser.add_argument('checkpoint', help='checkpoint file') + parser.add_argument('--out', help='path of the output checkpoint file') + args = parser.parse_args() + return args + + +def parse_config(config_strings): + """Parse config from strings. + + Args: + config_strings (string): strings of model config. + + Returns: + Config: model config + """ + temp_file = tempfile.NamedTemporaryFile() + config_path = f'{temp_file.name}.py' + with open(config_path, 'w') as f: + f.write(config_strings) + + config = Config.fromfile(config_path) + + # Update backbone config + if 'pool_mod' in config.model.backbone: + config.model.backbone.pop('pool_mod') + + if 'sa_cfg' not in config.model.backbone: + config.model.backbone['sa_cfg'] = dict( + type='PointSAModule', + pool_mod='max', + use_xyz=True, + normalize_xyz=True) + + if 'type' not in config.model.bbox_head.vote_aggregation_cfg: + config.model.bbox_head.vote_aggregation_cfg['type'] = 'PointSAModule' + + # Update bbox_head config + if 'pred_layer_cfg' not in config.model.bbox_head: + config.model.bbox_head['pred_layer_cfg'] = dict( + in_channels=128, shared_conv_channels=(128, 128), bias=True) + + if 'feat_channels' in config.model.bbox_head: + config.model.bbox_head.pop('feat_channels') + + if 'vote_moudule_cfg' in config.model.bbox_head: + config.model.bbox_head['vote_module_cfg'] = config.model.bbox_head.pop( + 'vote_moudule_cfg') + + if config.model.bbox_head.vote_aggregation_cfg.use_xyz: + config.model.bbox_head.vote_aggregation_cfg.mlp_channels[0] -= 3 + + temp_file.close() + + return config + + +def main(): + """Convert keys in checkpoints for VoteNet. + + There can be some breaking changes during the development of mmdetection3d, + and this tool is used for upgrading checkpoints trained with old versions + (before v0.6.0) to the latest one. + """ + args = parse_args() + checkpoint = torch.load(args.checkpoint) + cfg = parse_config(checkpoint['meta']['config']) + # Build the model and load checkpoint + model = build_detector( + cfg.model, + train_cfg=cfg.get('train_cfg'), + test_cfg=cfg.get('test_cfg')) + orig_ckpt = checkpoint['state_dict'] + converted_ckpt = orig_ckpt.copy() + + if cfg['dataset_type'] == 'ScanNetDataset': + NUM_CLASSES = 18 + elif cfg['dataset_type'] == 'SUNRGBDDataset': + NUM_CLASSES = 10 + else: + raise NotImplementedError + + RENAME_PREFIX = { + 'bbox_head.conv_pred.0': 'bbox_head.conv_pred.shared_convs.layer0', + 'bbox_head.conv_pred.1': 'bbox_head.conv_pred.shared_convs.layer1' + } + + DEL_KEYS = [ + 'bbox_head.conv_pred.0.bn.num_batches_tracked', + 'bbox_head.conv_pred.1.bn.num_batches_tracked' + ] + + EXTRACT_KEYS = { + 'bbox_head.conv_pred.conv_cls.weight': + ('bbox_head.conv_pred.conv_out.weight', [(0, 2), (-NUM_CLASSES, -1)]), + 'bbox_head.conv_pred.conv_cls.bias': + ('bbox_head.conv_pred.conv_out.bias', [(0, 2), (-NUM_CLASSES, -1)]), + 'bbox_head.conv_pred.conv_reg.weight': + ('bbox_head.conv_pred.conv_out.weight', [(2, -NUM_CLASSES)]), + 'bbox_head.conv_pred.conv_reg.bias': + ('bbox_head.conv_pred.conv_out.bias', [(2, -NUM_CLASSES)]) + } + + # Delete some useless keys + for key in DEL_KEYS: + converted_ckpt.pop(key) + + # Rename keys with specific prefix + RENAME_KEYS = dict() + for old_key in converted_ckpt.keys(): + for rename_prefix in RENAME_PREFIX.keys(): + if rename_prefix in old_key: + new_key = old_key.replace(rename_prefix, + RENAME_PREFIX[rename_prefix]) + RENAME_KEYS[new_key] = old_key + for new_key, old_key in RENAME_KEYS.items(): + converted_ckpt[new_key] = converted_ckpt.pop(old_key) + + # Extract weights and rename the keys + for new_key, (old_key, indices) in EXTRACT_KEYS.items(): + cur_layers = orig_ckpt[old_key] + converted_layers = [] + for (start, end) in indices: + if end != -1: + converted_layers.append(cur_layers[start:end]) + else: + converted_layers.append(cur_layers[start:]) + converted_layers = torch.cat(converted_layers, 0) + converted_ckpt[new_key] = converted_layers + if old_key in converted_ckpt.keys(): + converted_ckpt.pop(old_key) + + # Check the converted checkpoint by loading to the model + load_state_dict(model, converted_ckpt, strict=True) + checkpoint['state_dict'] = converted_ckpt + torch.save(checkpoint, args.out) + + +if __name__ == '__main__': + main() diff --git a/adzoo/bevformer/model_converters/publish_model.py b/adzoo/bevformer/model_converters/publish_model.py new file mode 100755 index 0000000..318fd46 --- /dev/null +++ b/adzoo/bevformer/model_converters/publish_model.py @@ -0,0 +1,35 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import subprocess +import torch + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Process a checkpoint to be published') + parser.add_argument('in_file', help='input checkpoint filename') + parser.add_argument('out_file', help='output checkpoint filename') + args = parser.parse_args() + return args + + +def process_checkpoint(in_file, out_file): + checkpoint = torch.load(in_file, map_location='cpu') + # remove optimizer for smaller file size + if 'optimizer' in checkpoint: + del checkpoint['optimizer'] + # if it is necessary to remove some sensitive data in checkpoint['meta'], + # add the code here. + torch.save(checkpoint, out_file) + sha = subprocess.check_output(['sha256sum', out_file]).decode() + final_file = out_file.rstrip('.pth') + '-{}.pth'.format(sha[:8]) + subprocess.Popen(['mv', out_file, final_file]) + + +def main(): + args = parse_args() + process_checkpoint(args.in_file, args.out_file) + + +if __name__ == '__main__': + main() diff --git a/adzoo/bevformer/model_converters/regnet2mmdet.py b/adzoo/bevformer/model_converters/regnet2mmdet.py new file mode 100755 index 0000000..9dee3c8 --- /dev/null +++ b/adzoo/bevformer/model_converters/regnet2mmdet.py @@ -0,0 +1,89 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import torch +from collections import OrderedDict + + +def convert_stem(model_key, model_weight, state_dict, converted_names): + new_key = model_key.replace('stem.conv', 'conv1') + new_key = new_key.replace('stem.bn', 'bn1') + state_dict[new_key] = model_weight + converted_names.add(model_key) + print(f'Convert {model_key} to {new_key}') + + +def convert_head(model_key, model_weight, state_dict, converted_names): + new_key = model_key.replace('head.fc', 'fc') + state_dict[new_key] = model_weight + converted_names.add(model_key) + print(f'Convert {model_key} to {new_key}') + + +def convert_reslayer(model_key, model_weight, state_dict, converted_names): + split_keys = model_key.split('.') + layer, block, module = split_keys[:3] + block_id = int(block[1:]) + layer_name = f'layer{int(layer[1:])}' + block_name = f'{block_id - 1}' + + if block_id == 1 and module == 'bn': + new_key = f'{layer_name}.{block_name}.downsample.1.{split_keys[-1]}' + elif block_id == 1 and module == 'proj': + new_key = f'{layer_name}.{block_name}.downsample.0.{split_keys[-1]}' + elif module == 'f': + if split_keys[3] == 'a_bn': + module_name = 'bn1' + elif split_keys[3] == 'b_bn': + module_name = 'bn2' + elif split_keys[3] == 'c_bn': + module_name = 'bn3' + elif split_keys[3] == 'a': + module_name = 'conv1' + elif split_keys[3] == 'b': + module_name = 'conv2' + elif split_keys[3] == 'c': + module_name = 'conv3' + new_key = f'{layer_name}.{block_name}.{module_name}.{split_keys[-1]}' + else: + raise ValueError(f'Unsupported conversion of key {model_key}') + print(f'Convert {model_key} to {new_key}') + state_dict[new_key] = model_weight + converted_names.add(model_key) + + +def convert(src, dst): + """Convert keys in pycls pretrained RegNet models to mmdet style.""" + # load caffe model + regnet_model = torch.load(src) + blobs = regnet_model['model_state'] + # convert to pytorch style + state_dict = OrderedDict() + converted_names = set() + for key, weight in blobs.items(): + if 'stem' in key: + convert_stem(key, weight, state_dict, converted_names) + elif 'head' in key: + convert_head(key, weight, state_dict, converted_names) + elif key.startswith('s'): + convert_reslayer(key, weight, state_dict, converted_names) + + # check if all layers are converted + for key in blobs: + if key not in converted_names: + print(f'not converted: {key}') + # save checkpoint + checkpoint = dict() + checkpoint['state_dict'] = state_dict + torch.save(checkpoint, dst) + + +def main(): + parser = argparse.ArgumentParser(description='Convert model keys') + parser.add_argument('src', help='src detectron model path') + parser.add_argument('dst', help='save path') + args = parser.parse_args() + convert(args.src, args.dst) + + +if __name__ == '__main__': + main() diff --git a/adzoo/bevformer/test.py b/adzoo/bevformer/test.py new file mode 100755 index 0000000..ca3a035 --- /dev/null +++ b/adzoo/bevformer/test.py @@ -0,0 +1,259 @@ +# --------------------------------------------- +# Copyright (c) OpenMMLab. All rights reserved. +# --------------------------------------------- +# Modified by Zhiqi Li +# --------------------------------------------- +import argparse +import os +import torch +import warnings +from mmcv.utils import get_dist_info, init_dist, wrap_fp16_model, set_random_seed, Config, DictAction, load_checkpoint +from mmcv.models import build_model, fuse_conv_bn +from torch.nn import DataParallel +from torch.nn.parallel.distributed import DistributedDataParallel + +from mmcv.datasets import build_dataset, build_dataloader, replace_ImageToTensor +import time +import os.path as osp +from adzoo.bevformer.apis.test import custom_multi_gpu_test + + +def parse_args(): + parser = argparse.ArgumentParser( + description='MMDet test (and eval) a model') + parser.add_argument('config', help='test config file path') + parser.add_argument('checkpoint', help='checkpoint file') + parser.add_argument('--out', help='output result file in pickle format') + parser.add_argument( + '--fuse-conv-bn', + action='store_true', + help='Whether to fuse conv and bn, this will slightly increase' + 'the inference speed') + parser.add_argument( + '--format-only', + action='store_true', + help='Format the output results without perform evaluation. It is' + 'useful when you want to format the result to a specific format and ' + 'submit it to the test server') + parser.add_argument( + '--eval', + type=str, + nargs='+', + help='evaluation metrics, which depends on the dataset, e.g., "bbox",' + ' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC') + parser.add_argument('--show', action='store_true', help='show results') + parser.add_argument( + '--show-dir', help='directory where results will be saved') + parser.add_argument( + '--gpu-collect', + action='store_true', + help='whether to use gpu to collect results.') + parser.add_argument( + '--tmpdir', + help='tmp directory used for collecting results from multiple ' + 'workers, available when gpu-collect is not specified') + parser.add_argument('--seed', type=int, default=0, help='random seed') + parser.add_argument( + '--deterministic', + action='store_true', + help='whether to set deterministic options for CUDNN backend.') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + parser.add_argument( + '--options', + nargs='+', + action=DictAction, + help='custom options for evaluation, the key-value pair in xxx=yyy ' + 'format will be kwargs for dataset.evaluate() function (deprecate), ' + 'change to --eval-options instead.') + parser.add_argument( + '--eval-options', + nargs='+', + action=DictAction, + help='custom options for evaluation, the key-value pair in xxx=yyy ' + 'format will be kwargs for dataset.evaluate() function') + parser.add_argument( + '--launcher', + choices=['none', 'pytorch', 'slurm', 'mpi'], + default='none', + help='job launcher') + parser.add_argument('--local-rank', type=int, default=0) + args = parser.parse_args() + if 'LOCAL_RANK' not in os.environ: + os.environ['LOCAL_RANK'] = str(args.local_rank) + + if args.options and args.eval_options: + raise ValueError( + '--options and --eval-options cannot be both specified, ' + '--options is deprecated in favor of --eval-options') + if args.options: + warnings.warn('--options is deprecated in favor of --eval-options') + args.eval_options = args.options + return args + + +def main(): + args = parse_args() + + assert args.out or args.eval or args.format_only or args.show \ + or args.show_dir, \ + ('Please specify at least one operation (save/eval/format/show the ' + 'results / save the results) with the argument "--out", "--eval"' + ', "--format-only", "--show" or "--show-dir"') + + if args.eval and args.format_only: + raise ValueError('--eval and --format_only cannot be both specified') + + if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): + raise ValueError('The output file must be a pkl file.') + + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + # import modules from string list. + if cfg.get('custom_imports', None): + from mmcv.utils import import_modules_from_strings + import_modules_from_strings(**cfg['custom_imports']) + + # # import modules from plguin/xx, registry will be updated + # if hasattr(cfg, 'plugin'): + # if cfg.plugin: + # import importlib + # if hasattr(cfg, 'plugin_dir'): + # plugin_dir = cfg.plugin_dir + # _module_dir = os.path.dirname(plugin_dir) + # _module_dir = _module_dir.split('/') + # _module_path = _module_dir[0] + + # for m in _module_dir[1:]: + # _module_path = _module_path + '.' + m + # print(_module_path) + # plg_lib = importlib.import_module(_module_path) + # else: + # # import dir is the dirpath for the config file + # _module_dir = os.path.dirname(args.config) + # _module_dir = _module_dir.split('/') + # _module_path = _module_dir[0] + # for m in _module_dir[1:]: + # _module_path = _module_path + '.' + m + # print(_module_path) + # plg_lib = importlib.import_module(_module_path) + + # set cudnn_benchmark + if cfg.get('cudnn_benchmark', False): + torch.backends.cudnn.benchmark = True + # set tf32 + if cfg.get('close_tf32', False): + torch.backends.cuda.matmul.allow_tf32 = False + torch.backends.cudnn.allow_tf32 = False + + cfg.model.pretrained = None + # in case the test dataset is concatenated + samples_per_gpu = 1 + if isinstance(cfg.data.test, dict): + cfg.data.test.test_mode = True + samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1) + if samples_per_gpu > 1: + # Replace 'ImageToTensor' to 'DefaultFormatBundle' + cfg.data.test.pipeline = replace_ImageToTensor( + cfg.data.test.pipeline) + elif isinstance(cfg.data.test, list): + for ds_cfg in cfg.data.test: + ds_cfg.test_mode = True + samples_per_gpu = max( + [ds_cfg.pop('samples_per_gpu', 1) for ds_cfg in cfg.data.test]) + if samples_per_gpu > 1: + for ds_cfg in cfg.data.test: + ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline) + + # init distributed env first, since logger depends on the dist info. + if args.launcher == 'none': + distributed = False + else: + distributed = True + init_dist(args.launcher, **cfg.dist_params) + + # set random seeds + if args.seed is not None: + set_random_seed(args.seed, deterministic=args.deterministic) + + # build the dataloader + dataset = build_dataset(cfg.data.test) + data_loader = build_dataloader( + dataset, + samples_per_gpu=samples_per_gpu, + workers_per_gpu=cfg.data.workers_per_gpu, + dist=distributed, + shuffle=False, + nonshuffler_sampler=cfg.data.nonshuffler_sampler, + ) + + # build the model and load checkpoint + cfg.model.train_cfg = None + model = build_model(cfg.model, test_cfg=cfg.get('test_cfg')) + fp16_cfg = cfg.get('fp16', None) + if fp16_cfg is not None: + wrap_fp16_model(model) + checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') + if args.fuse_conv_bn: + model = fuse_conv_bn(model) + # old versions did not save class info in checkpoints, this walkaround is + # for backward compatibility + if 'CLASSES' in checkpoint.get('meta', {}): + model.CLASSES = checkpoint['meta']['CLASSES'] + else: + model.CLASSES = dataset.CLASSES + # palette for visualization in segmentation tasks + if 'PALETTE' in checkpoint.get('meta', {}): + model.PALETTE = checkpoint['meta']['PALETTE'] + elif hasattr(dataset, 'PALETTE'): + # segmentation dataset has `PALETTE` attribute + model.PALETTE = dataset.PALETTE + + if not distributed: + assert False + # model = MMDataParallel(model, device_ids=[0]) + # outputs = single_gpu_test(model, data_loader, args.show, args.show_dir) + else: + model = DistributedDataParallel( + model.cuda(), + device_ids=[torch.cuda.current_device()], + broadcast_buffers=False) + outputs = custom_multi_gpu_test(model, data_loader, args.tmpdir, + args.gpu_collect) + + rank, _ = get_dist_info() + if rank == 0: + if args.out: + print(f'\nwriting results to {args.out}') + assert False + #mmcv.dump(outputs['bbox_results'], args.out) + kwargs = {} if args.eval_options is None else args.eval_options + kwargs['jsonfile_prefix'] = osp.join('test', args.config.split( + '/')[-1].split('.')[-2], time.ctime().replace(' ', '_').replace(':', '_')) + if args.format_only: + dataset.format_results(outputs, **kwargs) + + if args.eval: + eval_kwargs = cfg.get('evaluation', {}).copy() + # hard-code way to remove EvalHook args + for key in [ + 'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', + 'rule' + ]: + eval_kwargs.pop(key, None) + eval_kwargs.update(dict(metric=args.eval, **kwargs)) + + print(dataset.evaluate(outputs, **eval_kwargs)) + + +if __name__ == '__main__': + main() diff --git a/adzoo/bevformer/train.py b/adzoo/bevformer/train.py new file mode 100755 index 0000000..ce20ce4 --- /dev/null +++ b/adzoo/bevformer/train.py @@ -0,0 +1,237 @@ +# --------------------------------------------- +# Copyright (c) OpenMMLab. All rights reserved. +# --------------------------------------------- +# Modified by Zhiqi Li +# --------------------------------------------- + +from __future__ import division + +import argparse +import copy +import mmcv +import os +import time +import torch +import warnings +from mmcv import Config, DictAction +from mmcv.utils import get_dist_info, init_dist +from os import path as osp + + +from mmcv.datasets import build_dataset +from mmcv.models import build_model +from mmcv.utils import collect_env, get_root_logger +from mmcv.utils import set_random_seed + +from mmcv.utils import TORCH_VERSION, digit_version +from adzoo.bevformer.mmdet3d_plugin.bevformer.apis.train import custom_train_model + + +def parse_args(): + parser = argparse.ArgumentParser(description='Train a detector') + parser.add_argument('config', help='train config file path') + parser.add_argument('--work-dir', help='the dir to save logs and models') + parser.add_argument( + '--resume-from', help='the checkpoint file to resume from') + parser.add_argument( + '--load-from', help='the checkpoint file to resume from') + parser.add_argument( + '--no-validate', + action='store_true', + help='whether not to evaluate the checkpoint during training') + group_gpus = parser.add_mutually_exclusive_group() + group_gpus.add_argument( + '--gpus', + type=int, + help='number of gpus to use ' + '(only applicable to non-distributed training)') + group_gpus.add_argument( + '--gpu-ids', + type=int, + nargs='+', + help='ids of gpus to use ' + '(only applicable to non-distributed training)') + parser.add_argument('--seed', type=int, default=0, help='random seed') + parser.add_argument( + '--deterministic', + action='store_true', + help='whether to set deterministic options for CUDNN backend.') + parser.add_argument( + '--options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file (deprecate), ' + 'change to --cfg-options instead.') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + parser.add_argument( + '--launcher', + choices=['none', 'pytorch', 'slurm', 'mpi'], + default='none', + help='job launcher') + parser.add_argument('--local-rank', type=int, default=0) + parser.add_argument( + '--autoscale-lr', + action='store_true', + help='automatically scale lr with the number of gpus') + args = parser.parse_args() + if 'LOCAL_RANK' not in os.environ: + os.environ['LOCAL_RANK'] = str(args.local_rank) + + if args.options and args.cfg_options: + raise ValueError( + '--options and --cfg-options cannot be both specified, ' + '--options is deprecated in favor of --cfg-options') + if args.options: + warnings.warn('--options is deprecated in favor of --cfg-options') + args.cfg_options = args.options + + return args + + +def main(): + args = parse_args() + + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + # import modules from string list. + if cfg.get('custom_imports', None): + from mmcv.utils import import_modules_from_strings + import_modules_from_strings(**cfg['custom_imports']) + + # set cudnn_benchmark + #if cfg.get('cudnn_benchmark', False): + torch.backends.cudnn.benchmark = True + # set tf32 + # if cfg.get('close_tf32', False): + # torch.backends.cuda.matmul.allow_tf32 = False + # torch.backends.cudnn.allow_tf32 = False + + # work_dir is determined in this priority: CLI > segment in file > filename + if args.work_dir is not None: + # update configs according to CLI args if args.work_dir is not None + cfg.work_dir = args.work_dir + elif cfg.get('work_dir', None) is None: + # use config filename as default work_dir if cfg.work_dir is None + cfg.work_dir = osp.join('./work_dirs', + osp.splitext(osp.basename(args.config))[0]) + # if args.resume_from is not None: + if args.resume_from is not None and osp.isfile(args.resume_from): + cfg.resume_from = args.resume_from + if args.load_from is not None and osp.isfile(args.load_from): + cfg.load_from = args.load_from + if args.gpu_ids is not None: + cfg.gpu_ids = args.gpu_ids + else: + cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus) + if digit_version(TORCH_VERSION) == digit_version('1.8.1') and cfg.optimizer['type'] == 'AdamW': + cfg.optimizer['type'] = 'AdamW2' # fix bug in Adamw + if args.autoscale_lr: + # apply the linear scaling rule (https://arxiv.org/abs/1706.02677) + cfg.optimizer['lr'] = cfg.optimizer['lr'] * len(cfg.gpu_ids) / 8 + + # init distributed env first, since logger depends on the dist info. + if args.launcher == 'none': + distributed = False + else: + distributed = True + torch.cuda.set_device(int(os.environ["LOCAL_RANK"])) + init_dist(args.launcher, **cfg.dist_params) + # re-set gpu_ids with distributed training mode + _, world_size = get_dist_info() + cfg.gpu_ids = range(world_size) + + # create work_dir + mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir)) + # dump config + cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config))) + # init the logger before other steps + timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) + log_file = osp.join(cfg.work_dir, f'{timestamp}.log') + # specify logger name, if we still use 'mmdet', the output info will be + # filtered and won't be saved in the log_file + # TODO: ugly workaround to judge whether we are training det or seg model + if cfg.model.type in ['EncoderDecoder3D']: + logger_name = 'mmseg' + else: + logger_name = 'mmdet' + logger = get_root_logger( + log_file=log_file, log_level=cfg.log_level, name=logger_name) + + # init the meta dict to record some important information such as + # environment info and seed, which will be logged + meta = dict() + # log env info + env_info_dict = collect_env() + env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()]) + dash_line = '-' * 60 + '\n' + logger.info('Environment info:\n' + dash_line + env_info + '\n' + + dash_line) + meta['env_info'] = env_info + meta['config'] = cfg.pretty_text + + # log some basic info + logger.info(f'Distributed training: {distributed}') + logger.info(f'Config:\n{cfg.pretty_text}') + + # set random seeds + if args.seed is not None: + logger.info(f'Set random seed to {args.seed}, ' + f'deterministic: {args.deterministic}') + set_random_seed(args.seed, deterministic=args.deterministic) + cfg.seed = args.seed + meta['seed'] = args.seed + meta['exp_name'] = osp.basename(args.config) + + model = build_model( + cfg.model, + train_cfg=cfg.get('train_cfg'), + test_cfg=cfg.get('test_cfg')) + model.init_weights() + + logger.info(f'Model:\n{model}') + datasets = [build_dataset(cfg.data.train)] + if len(cfg.workflow) == 2: + val_dataset = copy.deepcopy(cfg.data.val) + # in case we use a dataset wrapper + if 'dataset' in cfg.data.train: + val_dataset.pipeline = cfg.data.train.dataset.pipeline + else: + val_dataset.pipeline = cfg.data.train.pipeline + # set test_mode=False here in deep copied config + # which do not affect AP/AR calculation later + # refer to https://mmdetection3d.readthedocs.io/en/latest/tutorials/customize_runtime.html#customize-workflow # noqa + val_dataset.test_mode = False + datasets.append(build_dataset(val_dataset)) + if cfg.checkpoint_config is not None: + # save mmdet version, config file content and class names in + # checkpoints as meta data + cfg.checkpoint_config.meta = dict( + config=cfg.pretty_text, + CLASSES=datasets[0].CLASSES, + PALETTE=datasets[0].PALETTE # for segmentors + if hasattr(datasets[0], 'PALETTE') else None) + # add an attribute for visualization convenience + model.CLASSES = datasets[0].CLASSES + custom_train_model( + model, + datasets, + cfg, + distributed=distributed, + validate=(not args.no_validate), + timestamp=timestamp, + meta=meta) + + +if __name__ == '__main__': + main() diff --git a/adzoo/uniad/analysis_tools/__init__.py b/adzoo/uniad/analysis_tools/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/adzoo/uniad/analysis_tools/analyze_logs.py b/adzoo/uniad/analysis_tools/analyze_logs.py new file mode 100755 index 0000000..806175f --- /dev/null +++ b/adzoo/uniad/analysis_tools/analyze_logs.py @@ -0,0 +1,201 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import json +import numpy as np +import seaborn as sns +from collections import defaultdict +from matplotlib import pyplot as plt + + +def cal_train_time(log_dicts, args): + for i, log_dict in enumerate(log_dicts): + print(f'{"-" * 5}Analyze train time of {args.json_logs[i]}{"-" * 5}') + all_times = [] + for epoch in log_dict.keys(): + if args.include_outliers: + all_times.append(log_dict[epoch]['time']) + else: + all_times.append(log_dict[epoch]['time'][1:]) + all_times = np.array(all_times) + epoch_ave_time = all_times.mean(-1) + slowest_epoch = epoch_ave_time.argmax() + fastest_epoch = epoch_ave_time.argmin() + std_over_epoch = epoch_ave_time.std() + print(f'slowest epoch {slowest_epoch + 1}, ' + f'average time is {epoch_ave_time[slowest_epoch]:.4f}') + print(f'fastest epoch {fastest_epoch + 1}, ' + f'average time is {epoch_ave_time[fastest_epoch]:.4f}') + print(f'time std over epochs is {std_over_epoch:.4f}') + print(f'average iter time: {np.mean(all_times):.4f} s/iter') + print() + + +def plot_curve(log_dicts, args): + if args.backend is not None: + plt.switch_backend(args.backend) + sns.set_style(args.style) + # if legend is None, use {filename}_{key} as legend + legend = args.legend + if legend is None: + legend = [] + for json_log in args.json_logs: + for metric in args.keys: + legend.append(f'{json_log}_{metric}') + assert len(legend) == (len(args.json_logs) * len(args.keys)) + metrics = args.keys + + num_metrics = len(metrics) + for i, log_dict in enumerate(log_dicts): + epochs = list(log_dict.keys()) + for j, metric in enumerate(metrics): + print(f'plot curve of {args.json_logs[i]}, metric is {metric}') + if metric not in log_dict[epochs[args.interval - 1]]: + raise KeyError( + f'{args.json_logs[i]} does not contain metric {metric}') + + if args.mode == 'eval': + if min(epochs) == args.interval: + x0 = args.interval + else: + # if current training is resumed from previous checkpoint + # we lost information in early epochs + # `xs` should start according to `min(epochs)` + if min(epochs) % args.interval == 0: + x0 = min(epochs) + else: + # find the first epoch that do eval + x0 = min(epochs) + args.interval - \ + min(epochs) % args.interval + xs = np.arange(x0, max(epochs) + 1, args.interval) + ys = [] + for epoch in epochs[args.interval - 1::args.interval]: + ys += log_dict[epoch][metric] + + # if training is aborted before eval of the last epoch + # `xs` and `ys` will have different length and cause an error + # check if `ys[-1]` is empty here + if not log_dict[epoch][metric]: + xs = xs[:-1] + + ax = plt.gca() + ax.set_xticks(xs) + plt.xlabel('epoch') + plt.plot(xs, ys, label=legend[i * num_metrics + j], marker='o') + else: + xs = [] + ys = [] + num_iters_per_epoch = \ + log_dict[epochs[args.interval-1]]['iter'][-1] + for epoch in epochs[args.interval - 1::args.interval]: + iters = log_dict[epoch]['iter'] + if log_dict[epoch]['mode'][-1] == 'val': + iters = iters[:-1] + xs.append( + np.array(iters) + (epoch - 1) * num_iters_per_epoch) + ys.append(np.array(log_dict[epoch][metric][:len(iters)])) + xs = np.concatenate(xs) + ys = np.concatenate(ys) + plt.xlabel('iter') + plt.plot( + xs, ys, label=legend[i * num_metrics + j], linewidth=0.5) + plt.legend() + if args.title is not None: + plt.title(args.title) + if args.out is None: + plt.show() + else: + print(f'save curve to: {args.out}') + plt.savefig(args.out) + plt.cla() + + +def add_plot_parser(subparsers): + parser_plt = subparsers.add_parser( + 'plot_curve', help='parser for plotting curves') + parser_plt.add_argument( + 'json_logs', + type=str, + nargs='+', + help='path of train log in json format') + parser_plt.add_argument( + '--keys', + type=str, + nargs='+', + default=['mAP_0.25'], + help='the metric that you want to plot') + parser_plt.add_argument('--title', type=str, help='title of figure') + parser_plt.add_argument( + '--legend', + type=str, + nargs='+', + default=None, + help='legend of each plot') + parser_plt.add_argument( + '--backend', type=str, default=None, help='backend of plt') + parser_plt.add_argument( + '--style', type=str, default='dark', help='style of plt') + parser_plt.add_argument('--out', type=str, default=None) + parser_plt.add_argument('--mode', type=str, default='train') + parser_plt.add_argument('--interval', type=int, default=1) + + +def add_time_parser(subparsers): + parser_time = subparsers.add_parser( + 'cal_train_time', + help='parser for computing the average time per training iteration') + parser_time.add_argument( + 'json_logs', + type=str, + nargs='+', + help='path of train log in json format') + parser_time.add_argument( + '--include-outliers', + action='store_true', + help='include the first value of every epoch when computing ' + 'the average time') + + +def parse_args(): + parser = argparse.ArgumentParser(description='Analyze Json Log') + # currently only support plot curve and calculate average train time + subparsers = parser.add_subparsers(dest='task', help='task parser') + add_plot_parser(subparsers) + add_time_parser(subparsers) + args = parser.parse_args() + return args + + +def load_json_logs(json_logs): + # load and convert json_logs to log_dict, key is epoch, value is a sub dict + # keys of sub dict is different metrics, e.g. memory, bbox_mAP + # value of sub dict is a list of corresponding values of all iterations + log_dicts = [dict() for _ in json_logs] + for json_log, log_dict in zip(json_logs, log_dicts): + with open(json_log, 'r') as log_file: + for line in log_file: + log = json.loads(line.strip()) + # skip lines without `epoch` field + if 'epoch' not in log: + continue + epoch = log.pop('epoch') + if epoch not in log_dict: + log_dict[epoch] = defaultdict(list) + for k, v in log.items(): + log_dict[epoch][k].append(v) + return log_dicts + + +def main(): + args = parse_args() + + json_logs = args.json_logs + for json_log in json_logs: + assert json_log.endswith('.json') + + log_dicts = load_json_logs(json_logs) + + eval(args.task)(log_dicts, args) + + +if __name__ == '__main__': + main() diff --git a/adzoo/uniad/analysis_tools/benchmark.py b/adzoo/uniad/analysis_tools/benchmark.py new file mode 100755 index 0000000..6ed3976 --- /dev/null +++ b/adzoo/uniad/analysis_tools/benchmark.py @@ -0,0 +1,97 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import time +import torch +from mmcv import Config +from mmcv.parallel import MMDataParallel +from mmcv.runner import load_checkpoint, wrap_fp16_model +import sys +sys.path.append('.') +from mmcv.datasets.builder import build_dataloader +from mmcv.datasets import build_dataset +from mmcv.models import build_detector +#from tools.misc.fuse_conv_bn import fuse_module + + +def parse_args(): + parser = argparse.ArgumentParser(description='MMDet benchmark a model') + parser.add_argument('config', help='test config file path') + parser.add_argument('--checkpoint', default=None, help='checkpoint file') + parser.add_argument('--samples', default=2000, help='samples to benchmark') + parser.add_argument( + '--log-interval', default=10, help='interval of logging') + parser.add_argument( + '--fuse-conv-bn', + action='store_true', + help='Whether to fuse conv and bn, this will slightly increase' + 'the inference speed') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + + cfg = Config.fromfile(args.config) + # set cudnn_benchmark + if cfg.get('cudnn_benchmark', False): + torch.backends.cudnn.benchmark = True + cfg.model.pretrained = None + cfg.data.test.test_mode = True + + # build the dataloader + # TODO: support multiple images per gpu (only minor changes are needed) + print(cfg.data.test) + dataset = build_dataset(cfg.data.test) + data_loader = build_dataloader( + dataset, + samples_per_gpu=1, + workers_per_gpu=cfg.data.workers_per_gpu, + dist=False, + shuffle=False) + + # build the model and load checkpoint + cfg.model.train_cfg = None + model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg')) + fp16_cfg = cfg.get('fp16', None) + if fp16_cfg is not None: + wrap_fp16_model(model) + if args.checkpoint is not None: + load_checkpoint(model, args.checkpoint, map_location='cpu') + #if args.fuse_conv_bn: + # model = fuse_module(model) + + model = MMDataParallel(model, device_ids=[0]) + + model.eval() + + # the first several iterations may be very slow so skip them + num_warmup = 5 + pure_inf_time = 0 + + # benchmark with several samples and take the average + for i, data in enumerate(data_loader): + torch.cuda.synchronize() + start_time = time.perf_counter() + with torch.no_grad(): + model(return_loss=False, rescale=True, **data) + + torch.cuda.synchronize() + elapsed = time.perf_counter() - start_time + + if i >= num_warmup: + pure_inf_time += elapsed + if (i + 1) % args.log_interval == 0: + fps = (i + 1 - num_warmup) / pure_inf_time + print(f'Done image [{i + 1:<3}/ {args.samples}], ' + f'fps: {fps:.1f} img / s') + + if (i + 1) == args.samples: + pure_inf_time += elapsed + fps = (i + 1 - num_warmup) / pure_inf_time + print(f'Overall fps: {fps:.1f} img / s') + break + + +if __name__ == '__main__': + main() diff --git a/adzoo/uniad/analysis_tools/visualize/render/base_render.py b/adzoo/uniad/analysis_tools/visualize/render/base_render.py new file mode 100644 index 0000000..65dbbeb --- /dev/null +++ b/adzoo/uniad/analysis_tools/visualize/render/base_render.py @@ -0,0 +1,32 @@ +import matplotlib.pyplot as plt +from pyquaternion import Quaternion + + +class BaseRender: + """ + BaseRender class + """ + + def __init__( + self, + figsize=(10, 10)): + self.figsize = figsize + self.fig, self.axes = None, None + + def reset_canvas(self, dx=1, dy=1, tight_layout=False): + plt.close() + plt.gca().set_axis_off() + plt.axis('off') + self.fig, self.axes = plt.subplots(dx, dy, figsize=self.figsize) + if tight_layout: + plt.tight_layout() + + def close_canvas(self): + plt.close() + + def save_fig(self, filename): + plt.subplots_adjust(top=1, bottom=0, right=1, left=0, + hspace=0, wspace=0) + plt.margins(0, 0) + print(f'saving to {filename}') + plt.savefig(filename) diff --git a/adzoo/uniad/analysis_tools/visualize/render/bev_render.py b/adzoo/uniad/analysis_tools/visualize/render/bev_render.py new file mode 100644 index 0000000..fcc6ffa --- /dev/null +++ b/adzoo/uniad/analysis_tools/visualize/render/bev_render.py @@ -0,0 +1,264 @@ +import cv2 +import numpy as np +import matplotlib +import matplotlib.pyplot as plt +from pyquaternion import Quaternion +from nuscenes.prediction import PredictHelper, convert_local_coords_to_global +from tools.analysis_tools.visualize.render.base_render import BaseRender +from tools.analysis_tools.visualize.utils import color_mapping, AgentPredictionData + + +class BEVRender(BaseRender): + """ + Render class for BEV + """ + + def __init__(self, + figsize=(20, 20), + margin: float = 50, + view: np.ndarray = np.eye(4), + show_gt_boxes=False): + super(BEVRender, self).__init__(figsize) + self.margin = margin + self.view = view + self.show_gt_boxes = show_gt_boxes + + def set_plot_cfg(self): + self.axes.set_xlim([-self.margin, self.margin]) + self.axes.set_ylim([-self.margin, self.margin]) + self.axes.set_aspect('equal') + self.axes.grid(False) + + def render_sample_data(self, canvas, sample_token): + pass + + def render_anno_data( + self, + sample_token, + nusc, + predict_helper): + sample_record = nusc.get('sample', sample_token) + assert 'LIDAR_TOP' in sample_record['data'].keys( + ), 'Error: No LIDAR_TOP in data, unable to render.' + lidar_record = sample_record['data']['LIDAR_TOP'] + data_path, boxes, _ = nusc.get_sample_data( + lidar_record, selected_anntokens=sample_record['anns']) + for box in boxes: + instance_token = nusc.get('sample_annotation', box.token)[ + 'instance_token'] + future_xy_local = predict_helper.get_future_for_agent( + instance_token, sample_token, seconds=6, in_agent_frame=True) + if future_xy_local.shape[0] > 0: + trans = box.center + rot = Quaternion(matrix=box.rotation_matrix) + future_xy = convert_local_coords_to_global( + future_xy_local, trans, rot) + future_xy = np.concatenate( + [trans[None, :2], future_xy], axis=0) + c = np.array([0, 0.8, 0]) + box.render(self.axes, view=self.view, colors=(c, c, c)) + self._render_traj(future_xy, line_color=c, dot_color=(0, 0, 0)) + self.axes.set_xlim([-self.margin, self.margin]) + self.axes.set_ylim([-self.margin, self.margin]) + + def show_lidar_data( + self, + sample_token, + nusc): + sample_record = nusc.get('sample', sample_token) + assert 'LIDAR_TOP' in sample_record['data'].keys( + ), 'Error: No LIDAR_TOP in data, unable to render.' + lidar_record = sample_record['data']['LIDAR_TOP'] + data_path, boxes, _ = nusc.get_sample_data( + lidar_record, selected_anntokens=sample_record['anns']) + LidarPointCloud.from_file(data_path).render_height( + self.axes, view=self.view) + self.axes.set_xlim([-self.margin, self.margin]) + self.axes.set_ylim([-self.margin, self.margin]) + self.axes.axis('off') + self.axes.set_aspect('equal') + + def render_pred_box_data(self, agent_prediction_list): + for pred_agent in agent_prediction_list: + c = np.array([0, 1, 0]) + if hasattr(pred_agent, 'pred_track_id') and pred_agent.pred_track_id is not None: # this is true + tr_id = pred_agent.pred_track_id + c = color_mapping[tr_id % len(color_mapping)] + pred_agent.nusc_box.render( + axis=self.axes, view=self.view, colors=(c, c, c)) + if pred_agent.is_sdc: + c = np.array([1, 0, 0]) + pred_agent.nusc_box.render( + axis=self.axes, view=self.view, colors=(c, c, c)) + + def render_pred_traj(self, agent_prediction_list, top_k=3): + for pred_agent in agent_prediction_list: + if pred_agent.is_sdc: + continue + sorted_ind = np.argsort(pred_agent.pred_traj_score)[ + ::-1] # from high to low + num_modes = len(sorted_ind) + sorted_traj = pred_agent.pred_traj[sorted_ind, :, :2] + sorted_score = pred_agent.pred_traj_score[sorted_ind] + # norm_score = np.sum(np.exp(sorted_score)) + norm_score = np.exp(sorted_score[0]) + + sorted_traj = np.concatenate( + [np.zeros((num_modes, 1, 2)), sorted_traj], axis=1) + trans = pred_agent.pred_center + rot = Quaternion(axis=np.array([0, 0.0, 1.0]), angle=np.pi/2) + vehicle_id_list = [0, 1, 2, 3, 4, 6, 7] + if pred_agent.pred_label in vehicle_id_list: + dot_size = 150 + else: + dot_size = 25 + # print(sorted_score) + for i in range(top_k-1, -1, -1): + viz_traj = sorted_traj[i, :, :2] + viz_traj = convert_local_coords_to_global(viz_traj, trans, rot) + traj_score = np.exp(sorted_score[i])/norm_score + # traj_score = [1.0, 0.01, 0.01, 0.01, 0.01, 0.01][i] + self._render_traj(viz_traj, traj_score=traj_score, + colormap='winter', dot_size=dot_size) + + def render_pred_map_data(self, predicted_map_seg): + # rendered_map = map_color_dict + # divider, crossing, contour + map_color_dict = np.array( + [(204, 128, 0), (102, 255, 102), (102, 255, 102)]) + rendered_map = map_color_dict[predicted_map_seg.argmax( + -1).reshape(-1)].reshape(200, 200, -1) + bg_mask = predicted_map_seg.sum(-1) == 0 + rendered_map[bg_mask, :] = 255 + self.axes.imshow(rendered_map, alpha=0.6, + interpolation='nearest', extent=(-51.2, 51.2, -51.2, 51.2)) + + def render_occ_map_data(self, agent_list): + rendered_map = np.ones((200, 200, 3)) + rendered_map_hsv = matplotlib.colors.rgb_to_hsv(rendered_map) + occ_prob_map = np.zeros((200, 200)) + for i in range(len(agent_list)): + pred_agent = agent_list[i] + if pred_agent.pred_occ_map is None: + continue + if hasattr(pred_agent, 'pred_track_id') and pred_agent.pred_track_id is not None: # this is true + tr_id = pred_agent.pred_track_id + c = color_mapping[tr_id % len(color_mapping)] + pred_occ_map = pred_agent.pred_occ_map.max(0) + update_mask = pred_occ_map > occ_prob_map + occ_prob_map[update_mask] = pred_occ_map[update_mask] + pred_occ_map *= update_mask + hsv_c = matplotlib.colors.rgb_to_hsv(c) + rendered_map_hsv[pred_occ_map > 0.1] = ( + np.ones((200, 200, 1)) * hsv_c)[pred_occ_map > 0.1] + max_prob = pred_occ_map.max() + renorm_pred_occ_map = (pred_occ_map - max_prob) * 0.7 + 1 + sat_map = (renorm_pred_occ_map * hsv_c[1]) + rendered_map_hsv[pred_occ_map > 0.1, + 1] = sat_map[pred_occ_map > 0.1] + rendered_map = matplotlib.colors.hsv_to_rgb(rendered_map_hsv) + self.axes.imshow(rendered_map, alpha=0.8, + interpolation='nearest', extent=(-50, 50, -50, 50)) + + def render_occ_map_data_time(self, agent_list, t): + rendered_map = np.ones((200, 200, 3)) + rendered_map_hsv = matplotlib.colors.rgb_to_hsv(rendered_map) + occ_prob_map = np.zeros((200, 200)) + for i in range(len(agent_list)): + pred_agent = agent_list[i] + if pred_agent.pred_occ_map is None: + continue + if hasattr(pred_agent, 'pred_track_id') and pred_agent.pred_track_id is not None: # this is true + tr_id = pred_agent.pred_track_id + c = color_mapping[tr_id % len(color_mapping)] + pred_occ_map = pred_agent.pred_occ_map[t] + update_mask = pred_occ_map > occ_prob_map + occ_prob_map[update_mask] = pred_occ_map[update_mask] + pred_occ_map *= update_mask + hsv_c = matplotlib.colors.rgb_to_hsv(c) + rendered_map_hsv[pred_occ_map > 0.1] = ( + np.ones((200, 200, 1)) * hsv_c)[pred_occ_map > 0.1] + max_prob = pred_occ_map.max() + renorm_pred_occ_map = (pred_occ_map - max_prob) * 0.7 + 1 + sat_map = (renorm_pred_occ_map * hsv_c[1]) + rendered_map_hsv[pred_occ_map > 0.1, + 1] = sat_map[pred_occ_map > 0.1] + rendered_map = matplotlib.colors.hsv_to_rgb(rendered_map_hsv) + self.axes.imshow(rendered_map, alpha=0.8, + interpolation='nearest', extent=(-50, 50, -50, 50)) + + def render_planning_data(self, predicted_planning, show_command=False): + planning_traj = predicted_planning.pred_traj + planning_traj = np.concatenate( + [np.zeros((1, 2)), planning_traj], axis=0) + self._render_traj(planning_traj, colormap='autumn', dot_size=50) + if show_command: + self._render_command(predicted_planning.command) + + def render_planning_attn_mask(self, predicted_planning): + planning_attn_mask = predicted_planning.attn_mask + planning_attn_mask = planning_attn_mask/planning_attn_mask.max() + cmap_name = 'plasma' + self.axes.imshow(planning_attn_mask, alpha=0.8, interpolation='nearest', extent=( + -51.2, 51.2, -51.2, 51.2), vmax=0.2, cmap=matplotlib.colormaps[cmap_name]) + + def render_hd_map(self, nusc, nusc_maps, sample_token): + sample_record = nusc.get('sample', sample_token) + sd_rec = nusc.get('sample_data', sample_record['data']['LIDAR_TOP']) + cs_record = nusc.get('calibrated_sensor', + sd_rec['calibrated_sensor_token']) + pose_record = nusc.get('ego_pose', sd_rec['ego_pose_token']) + info = { + 'lidar2ego_translation': cs_record['translation'], + 'lidar2ego_rotation': cs_record['rotation'], + 'ego2global_translation': pose_record['translation'], + 'ego2global_rotation': pose_record['rotation'], + 'scene_token': sample_record['scene_token'] + } + + layer_names = ['road_divider', 'road_segment', 'lane_divider', + 'lane', 'road_divider', 'traffic_light', 'ped_crossing'] + map_mask = obtain_map_info(nusc, + nusc_maps, + info, + patch_size=(102.4, 102.4), + canvas_size=(1024, 1024), + layer_names=layer_names) + map_mask = np.flip(map_mask, axis=1) + map_mask = np.rot90(map_mask, k=-1, axes=(1, 2)) + map_mask = map_mask[:, ::-1] > 0 + map_show = np.ones((1024, 1024, 3)) + map_show[map_mask[0], :] = np.array([1.00, 0.50, 0.31]) + map_show[map_mask[1], :] = np.array([159./255., 0.0, 1.0]) + self.axes.imshow(map_show, alpha=0.2, interpolation='nearest', + extent=(-51.2, 51.2, -51.2, 51.2)) + + def _render_traj(self, future_traj, traj_score=1, colormap='winter', points_per_step=20, line_color=None, dot_color=None, dot_size=25): + total_steps = (len(future_traj)-1) * points_per_step + 1 + dot_colors = matplotlib.colormaps[colormap]( + np.linspace(0, 1, total_steps))[:, :3] + dot_colors = dot_colors*traj_score + \ + (1-traj_score)*np.ones_like(dot_colors) + total_xy = np.zeros((total_steps, 2)) + for i in range(total_steps-1): + unit_vec = future_traj[i//points_per_step + + 1] - future_traj[i//points_per_step] + total_xy[i] = (i/points_per_step - i//points_per_step) * \ + unit_vec + future_traj[i//points_per_step] + total_xy[-1] = future_traj[-1] + self.axes.scatter( + total_xy[:, 0], total_xy[:, 1], c=dot_colors, s=dot_size) + + def _render_command(self, command): + command_dict = ['TURN RIGHT', 'TURN LEFT', 'KEEP FORWARD'] + self.axes.text(-48, -45, command_dict[int(command)], fontsize=45) + + def render_sdc_car(self): + sdc_car_png = cv2.imread('sources/sdc_car.png') + sdc_car_png = cv2.cvtColor(sdc_car_png, cv2.COLOR_BGR2RGB) + self.axes.imshow(sdc_car_png, extent=(-1, 1, -2, 2)) + + def render_legend(self): + legend = cv2.imread('sources/legend.png') + legend = cv2.cvtColor(legend, cv2.COLOR_BGR2RGB) + self.axes.imshow(legend, extent=(23, 51.2, -50, -40)) diff --git a/adzoo/uniad/analysis_tools/visualize/render/cam_render.py b/adzoo/uniad/analysis_tools/visualize/render/cam_render.py new file mode 100644 index 0000000..c2646b1 --- /dev/null +++ b/adzoo/uniad/analysis_tools/visualize/render/cam_render.py @@ -0,0 +1,202 @@ +import cv2 +import numpy as np +from PIL import Image +import matplotlib.pyplot as plt +from nuscenes.utils.data_classes import LidarPointCloud, Box +from nuscenes.utils.geometry_utils import view_points, box_in_image, BoxVisibility, transform_matrix +from tools.analysis_tools.visualize.utils import color_mapping, AgentPredictionData +from tools.analysis_tools.visualize.render.base_render import BaseRender +from pyquaternion import Quaternion + +# Define a constant for camera names +CAM_NAMES = [ + 'CAM_FRONT_LEFT', + 'CAM_FRONT', + 'CAM_FRONT_RIGHT', + 'CAM_BACK_RIGHT', + 'CAM_BACK', + 'CAM_BACK_LEFT', +] + + +class CameraRender(BaseRender): + """ + Render class for Camera View + """ + + def __init__(self, + figsize=(53.3333, 20), + show_gt_boxes=False): + super().__init__(figsize) + self.cams = CAM_NAMES + self.show_gt_boxes = show_gt_boxes + + def get_axis(self, index): + """Retrieve the corresponding axis based on the index.""" + return self.axes[index//3, index % 3] + + def project_to_cam(self, + agent_prediction_list, + sample_data_token, + nusc, + lidar_cs_record, + project_traj=False, + cam=None, + ): + """Project predictions to camera view.""" + _, cs_record, pose_record, cam_intrinsic, imsize = self.get_image_info( + sample_data_token, nusc) + boxes = [] + for agent in agent_prediction_list: + box = Box(agent.pred_center, agent.pred_dim, Quaternion(axis=(0.0, 0.0, 1.0), radians=agent.pred_yaw), + name=agent.pred_label, token='predicted') + box.is_sdc = agent.is_sdc + if project_traj: + box.pred_traj = np.zeros((agent.pred_traj_max.shape[0]+1, 3)) + box.pred_traj[:, 0] = agent.pred_center[0] + box.pred_traj[:, 1] = agent.pred_center[1] + box.pred_traj[:, 2] = agent.pred_center[2] - \ + agent.pred_dim[2]/2 + box.pred_traj[1:, :2] += agent.pred_traj_max[:, :2] + box.pred_traj = (Quaternion( + lidar_cs_record['rotation']).rotation_matrix @ box.pred_traj.T).T + box.pred_traj += np.array( + lidar_cs_record['translation'])[None, :] + box.rotate(Quaternion(lidar_cs_record['rotation'])) + box.translate(np.array(lidar_cs_record['translation'])) + boxes.append(box) + # Make list of Box objects including coord system transforms. + + box_list = [] + tr_id_list = [] + for i, box in enumerate(boxes): + # Move box to sensor coord system. + box.translate(-np.array(cs_record['translation'])) + box.rotate(Quaternion(cs_record['rotation']).inverse) + if project_traj: + box.pred_traj += -np.array(cs_record['translation'])[None, :] + box.pred_traj = (Quaternion( + cs_record['rotation']).inverse.rotation_matrix @ box.pred_traj.T).T + + tr_id = agent_prediction_list[i].pred_track_id + if box.is_sdc and cam == 'CAM_FRONT': + box_list.append(box) + if not box_in_image(box, cam_intrinsic, imsize): + continue + box_list.append(box) + tr_id_list.append(tr_id) + return box_list, tr_id_list, cam_intrinsic, imsize + + def render_image_data(self, sample_token, nusc): + """Load and annotate image based on the provided path.""" + sample = nusc.get('sample', sample_token) + for i, cam in enumerate(self.cams): + sample_data_token = sample['data'][cam] + data_path, _, _, _, _ = self.get_image_info( + sample_data_token, nusc) + image = self.load_image(data_path, cam) + self.update_image(image, i, cam) + + def load_image(self, data_path, cam): + """Update the axis of the plot with the provided image.""" + image = np.array(Image.open(data_path)) + font = cv2.FONT_HERSHEY_SIMPLEX + org = (50, 60) + fontScale = 2 + color = (0, 0, 0) + thickness = 4 + return cv2.putText(image, cam, org, font, fontScale, color, thickness, cv2.LINE_AA) + + def update_image(self, image, index, cam): + """Render image data for each camera.""" + ax = self.get_axis(index) + ax.imshow(image) + plt.axis('off') + ax.axis('off') + ax.grid(False) + + def render_pred_track_bbox(self, predicted_agent_list, sample_token, nusc): + """Render bounding box for predicted tracks.""" + sample = nusc.get('sample', sample_token) + lidar_cs_record = nusc.get('calibrated_sensor', nusc.get( + 'sample_data', sample['data']['LIDAR_TOP'])['calibrated_sensor_token']) + for i, cam in enumerate(self.cams): + sample_data_token = sample['data'][cam] + box_list, tr_id_list, camera_intrinsic, imsize = self.project_to_cam( + predicted_agent_list, sample_data_token, nusc, lidar_cs_record) + for j, box in enumerate(box_list): + if box.is_sdc: + continue + tr_id = tr_id_list[j] + if tr_id is None: + tr_id = 0 + c = color_mapping[tr_id % len(color_mapping)] + box.render( + self.axes[i//3, i % 3], view=camera_intrinsic, normalize=True, colors=(c, c, c)) + # plot gt + if self.show_gt_boxes: + data_path, boxes, camera_intrinsic = nusc.get_sample_data( + sample_data_token, selected_anntokens=sample['anns']) + for j, box in enumerate(boxes): + c = [0, 1, 0] + box.render( + self.axes[i//3, i % 3], view=camera_intrinsic, normalize=True, colors=(c, c, c)) + self.axes[i//3, i % 3].set_xlim(0, imsize[0]) + self.axes[i//3, i % 3].set_ylim(imsize[1], 0) + + def render_pred_traj(self, predicted_agent_list, sample_token, nusc, render_sdc=False, points_per_step=10): + """Render predicted trajectories.""" + sample = nusc.get('sample', sample_token) + lidar_cs_record = nusc.get('calibrated_sensor', nusc.get( + 'sample_data', sample['data']['LIDAR_TOP'])['calibrated_sensor_token']) + for i, cam in enumerate(self.cams): + sample_data_token = sample['data'][cam] + box_list, tr_id_list, camera_intrinsic, imsize = self.project_to_cam( + predicted_agent_list, sample_data_token, nusc, lidar_cs_record, project_traj=True, cam=cam) + for j, box in enumerate(box_list): + traj_points = box.pred_traj[:, :3] + + total_steps = (len(traj_points)-1) * points_per_step + 1 + total_xy = np.zeros((total_steps, 3)) + for k in range(total_steps-1): + unit_vec = traj_points[k//points_per_step + + 1] - traj_points[k//points_per_step] + total_xy[k] = (k/points_per_step - k//points_per_step) * \ + unit_vec + traj_points[k//points_per_step] + in_range_mask = total_xy[:, 2] > 0.1 + traj_points = view_points( + total_xy.T, camera_intrinsic, normalize=True)[:2, :] + traj_points = traj_points[:2, in_range_mask] + if box.is_sdc: + if render_sdc: + self.axes[i//3, i % 3].scatter( + traj_points[0], traj_points[1], color=(1, 0.5, 0), s=150) + else: + continue + else: + tr_id = tr_id_list[j] + if tr_id is None: + tr_id = 0 + c = color_mapping[tr_id % len(color_mapping)] + self.axes[i//3, i % + 3].scatter(traj_points[0], traj_points[1], color=c, s=15) + self.axes[i//3, i % 3].set_xlim(0, imsize[0]) + self.axes[i//3, i % 3].set_ylim(imsize[1], 0) + + def get_image_info(self, sample_data_token, nusc): + """Retrieve image information.""" + sd_record = nusc.get('sample_data', sample_data_token) + cs_record = nusc.get('calibrated_sensor', + sd_record['calibrated_sensor_token']) + sensor_record = nusc.get('sensor', cs_record['sensor_token']) + pose_record = nusc.get('ego_pose', sd_record['ego_pose_token']) + + data_path = nusc.get_sample_data_path(sample_data_token) + + if sensor_record['modality'] == 'camera': + cam_intrinsic = np.array(cs_record['camera_intrinsic']) + imsize = (sd_record['width'], sd_record['height']) + else: + cam_intrinsic = None + imsize = None + return data_path, cs_record, pose_record, cam_intrinsic, imsize diff --git a/adzoo/uniad/analysis_tools/visualize/run.py b/adzoo/uniad/analysis_tools/visualize/run.py new file mode 100644 index 0000000..b64b545 --- /dev/null +++ b/adzoo/uniad/analysis_tools/visualize/run.py @@ -0,0 +1,338 @@ +import cv2 +import torch +import argparse +import os +import glob +import numpy as np +import matplotlib +import matplotlib.pyplot as plt +from nuscenes import NuScenes +from nuscenes.prediction import PredictHelper, convert_local_coords_to_global +from nuscenes.utils.geometry_utils import view_points, box_in_image, BoxVisibility, transform_matrix +from nuscenes.utils.data_classes import LidarPointCloud, Box +from nuscenes.utils import splits +from pyquaternion import Quaternion +from mmcv.datasets.nuscenes_e2e_dataset import obtain_map_info +from mmcv.datasets.eval_utils.map_api import NuScenesMap +from mmcv.fileio.io import load +from PIL import Image +from tools.analysis_tools.visualize.utils import color_mapping, AgentPredictionData +from tools.analysis_tools.visualize.render.bev_render import BEVRender +from tools.analysis_tools.visualize.render.cam_render import CameraRender + + +class Visualizer: + """ + BaseRender class + """ + + def __init__( + self, + dataroot='/mnt/petrelfs/yangjiazhi/e2e_proj/data/nus_mini', + version='v1.0-mini', + predroot=None, + with_occ_map=False, + with_map=False, + with_planning=False, + with_pred_box=True, + with_pred_traj=False, + show_gt_boxes=False, + show_lidar=False, + show_command=False, + show_hd_map=False, + show_sdc_car=False, + show_sdc_traj=False, + show_legend=False): + self.nusc = NuScenes(version=version, dataroot=dataroot, verbose=True) + self.predict_helper = PredictHelper(self.nusc) + self.with_occ_map = with_occ_map + self.with_map = with_map + self.with_planning = with_planning + self.show_lidar = show_lidar + self.show_command = show_command + self.show_hd_map = show_hd_map + self.show_sdc_car = show_sdc_car + self.show_sdc_traj = show_sdc_traj + self.show_legend = show_legend + self.with_pred_traj = with_pred_traj + self.with_pred_box = with_pred_box + self.veh_id_list = [0, 1, 2, 3, 4, 6, 7] + self.use_json = '.json' in predroot + self.token_set = set() + self.predictions = self._parse_predictions_multitask_pkl(predroot) + self.bev_render = BEVRender(show_gt_boxes=show_gt_boxes) + self.cam_render = CameraRender(show_gt_boxes=show_gt_boxes) + + if self.show_hd_map: + self.nusc_maps = { + 'boston-seaport': NuScenesMap(dataroot=dataroot, map_name='boston-seaport'), + 'singapore-hollandvillage': NuScenesMap(dataroot=dataroot, map_name='singapore-hollandvillage'), + 'singapore-onenorth': NuScenesMap(dataroot=dataroot, map_name='singapore-onenorth'), + 'singapore-queenstown': NuScenesMap(dataroot=dataroot, map_name='singapore-queenstown'), + } + + def _parse_predictions_multitask_pkl(self, predroot): + + outputs = load(predroot) + outputs = outputs['bbox_results'] + prediction_dict = dict() + for k in range(len(outputs)): + token = outputs[k]['token'] + self.token_set.add(token) + if self.show_sdc_traj: + outputs[k]['boxes_3d'].tensor = torch.cat( + [outputs[k]['boxes_3d'].tensor, outputs[k]['sdc_boxes_3d'].tensor], dim=0) + outputs[k]['scores_3d'] = torch.cat( + [outputs[k]['scores_3d'], outputs[k]['sdc_scores_3d']], dim=0) + outputs[k]['labels_3d'] = torch.cat([outputs[k]['labels_3d'], torch.zeros( + (1,), device=outputs[k]['labels_3d'].device)], dim=0) + # detection + bboxes = outputs[k]['boxes_3d'] + scores = outputs[k]['scores_3d'] + labels = outputs[k]['labels_3d'] + + track_scores = scores.cpu().detach().numpy() + track_labels = labels.cpu().detach().numpy() + track_boxes = bboxes.tensor.cpu().detach().numpy() + + track_centers = bboxes.gravity_center.cpu().detach().numpy() + track_dims = bboxes.dims.cpu().detach().numpy() + track_yaw = bboxes.yaw.cpu().detach().numpy() + + if 'track_ids' in outputs[k]: + track_ids = outputs[k]['track_ids'].cpu().detach().numpy() + else: + track_ids = None + + # speed + track_velocity = bboxes.tensor.cpu().detach().numpy()[:, -2:] + + # trajectories + trajs = outputs[k][f'traj'].numpy() + traj_scores = outputs[k][f'traj_scores'].numpy() + + predicted_agent_list = [] + + # occflow + if self.with_occ_map: + if 'topk_query_ins_segs' in outputs[k]['occ']: + occ_map = outputs[k]['occ']['topk_query_ins_segs'][0].cpu( + ).numpy() + else: + occ_map = np.zeros((1, 5, 200, 200)) + else: + occ_map = None + + occ_idx = 0 + for i in range(track_scores.shape[0]): + if track_scores[i] < 0.25: + continue + if occ_map is not None and track_labels[i] in self.veh_id_list: + occ_map_cur = occ_map[occ_idx, :, ::-1] + occ_idx += 1 + else: + occ_map_cur = None + if track_ids is not None: + if i < len(track_ids): + track_id = track_ids[i] + else: + track_id = 0 + else: + track_id = None + # if track_labels[i] not in [0, 1, 2, 3, 4, 6, 7]: + # continue + predicted_agent_list.append( + AgentPredictionData( + track_scores[i], + track_labels[i], + track_centers[i], + track_dims[i], + track_yaw[i], + track_velocity[i], + trajs[i], + traj_scores[i], + pred_track_id=track_id, + pred_occ_map=occ_map_cur, + past_pred_traj=None + ) + ) + + if self.with_map: + map_thres = 0.7 + score_list = outputs[k]['pts_bbox']['score_list'].cpu().numpy().transpose([ + 1, 2, 0]) + predicted_map_seg = outputs[k]['pts_bbox']['lane_score'].cpu().numpy().transpose([ + 1, 2, 0]) # H, W, C + predicted_map_seg[..., -1] = score_list[..., -1] + predicted_map_seg = (predicted_map_seg > map_thres) * 1.0 + predicted_map_seg = predicted_map_seg[::-1, :, :] + else: + predicted_map_seg = None + + if self.with_planning: + # detection + bboxes = outputs[k]['sdc_boxes_3d'] + scores = outputs[k]['sdc_scores_3d'] + labels = 0 + + track_scores = scores.cpu().detach().numpy() + track_labels = labels + track_boxes = bboxes.tensor.cpu().detach().numpy() + + track_centers = bboxes.gravity_center.cpu().detach().numpy() + track_dims = bboxes.dims.cpu().detach().numpy() + track_yaw = bboxes.yaw.cpu().detach().numpy() + track_velocity = bboxes.tensor.cpu().detach().numpy()[:, -2:] + + if self.show_command: + command = outputs[k]['command'][0].cpu().detach().numpy() + else: + command = None + planning_agent = AgentPredictionData( + track_scores[0], + track_labels, + track_centers[0], + track_dims[0], + track_yaw[0], + track_velocity[0], + outputs[k]['planning_traj'][0].cpu().detach().numpy(), + 1, + pred_track_id=-1, + pred_occ_map=None, + past_pred_traj=None, + is_sdc=True, + command=command, + ) + predicted_agent_list.append(planning_agent) + else: + planning_agent = None + prediction_dict[token] = dict(predicted_agent_list=predicted_agent_list, + predicted_map_seg=predicted_map_seg, + predicted_planning=planning_agent) + return prediction_dict + + def visualize_bev(self, sample_token, out_filename, t=None): + self.bev_render.reset_canvas(dx=1, dy=1) + self.bev_render.set_plot_cfg() + + if self.show_lidar: + self.bev_render.show_lidar_data(sample_token, self.nusc) + if self.bev_render.show_gt_boxes: + self.bev_render.render_anno_data( + sample_token, self.nusc, self.predict_helper) + if self.with_pred_box: + self.bev_render.render_pred_box_data( + self.predictions[sample_token]['predicted_agent_list']) + if self.with_pred_traj: + self.bev_render.render_pred_traj( + self.predictions[sample_token]['predicted_agent_list']) + if self.with_map: + self.bev_render.render_pred_map_data( + self.predictions[sample_token]['predicted_map_seg']) + if self.with_occ_map: + self.bev_render.render_occ_map_data( + self.predictions[sample_token]['predicted_agent_list']) + if self.with_planning: + self.bev_render.render_pred_box_data( + [self.predictions[sample_token]['predicted_planning']]) + self.bev_render.render_planning_data( + self.predictions[sample_token]['predicted_planning'], show_command=self.show_command) + if self.show_hd_map: + self.bev_render.render_hd_map( + self.nusc, self.nusc_maps, sample_token) + if self.show_sdc_car: + self.bev_render.render_sdc_car() + if self.show_legend: + self.bev_render.render_legend() + self.bev_render.save_fig(out_filename + '.jpg') + + def visualize_cam(self, sample_token, out_filename): + self.cam_render.reset_canvas(dx=2, dy=3, tight_layout=True) + self.cam_render.render_image_data(sample_token, self.nusc) + self.cam_render.render_pred_track_bbox( + self.predictions[sample_token]['predicted_agent_list'], sample_token, self.nusc) + self.cam_render.render_pred_traj( + self.predictions[sample_token]['predicted_agent_list'], sample_token, self.nusc, render_sdc=self.with_planning) + self.cam_render.save_fig(out_filename + '_cam.jpg') + + def combine(self, out_filename): + # pass + bev_image = cv2.imread(out_filename + '.jpg') + cam_image = cv2.imread(out_filename + '_cam.jpg') + merge_image = cv2.hconcat([cam_image, bev_image]) + cv2.imwrite(out_filename + '.jpg', merge_image) + os.remove(out_filename + '_cam.jpg') + + def to_video(self, folder_path, out_path, fps=4, downsample=1): + imgs_path = glob.glob(os.path.join(folder_path, '*.jpg')) + imgs_path = sorted(imgs_path) + img_array = [] + for img_path in imgs_path: + img = cv2.imread(img_path) + height, width, channel = img.shape + img = cv2.resize(img, (width//downsample, height // + downsample), interpolation=cv2.INTER_AREA) + height, width, channel = img.shape + size = (width, height) + img_array.append(img) + out = cv2.VideoWriter( + out_path, cv2.VideoWriter_fourcc(*'DIVX'), fps, size) + for i in range(len(img_array)): + out.write(img_array[i]) + out.release() + +def main(args): + render_cfg = dict( + with_occ_map=False, + with_map=False, + with_planning=True, + with_pred_box=True, + with_pred_traj=True, + show_gt_boxes=False, + show_lidar=False, + show_command=True, + show_hd_map=False, + show_sdc_car=True, + show_legend=True, + show_sdc_traj=False + ) + + viser = Visualizer(version='v1.0-mini', predroot=args.predroot, dataroot='data/nuscenes', **render_cfg) + + if not os.path.exists(args.out_folder): + os.makedirs(args.out_folder) + + val_splits = splits.val + + scene_token_to_name = dict() + for i in range(len(viser.nusc.scene)): + scene_token_to_name[viser.nusc.scene[i]['token']] = viser.nusc.scene[i]['name'] + + for i in range(len(viser.nusc.sample)): + sample_token = viser.nusc.sample[i]['token'] + scene_token = viser.nusc.sample[i]['scene_token'] + + if scene_token_to_name[scene_token] not in val_splits: + continue + + if sample_token not in viser.token_set: + print(i, sample_token, 'not in prediction pkl!') + continue + + viser.visualize_bev(sample_token, os.path.join(args.out_folder, str(i).zfill(3))) + + if args.project_to_cam: + viser.visualize_cam(sample_token, os.path.join(args.out_folder, str(i).zfill(3))) + viser.combine(os.path.join(args.out_folder, str(i).zfill(3))) + + viser.to_video(args.out_folder, args.demo_video, fps=4, downsample=2) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--predroot', default='/mnt/nas20/yihan01.hu/tmp/results.pkl', help='Path to results.pkl') + parser.add_argument('--out_folder', default='/mnt/nas20/yihan01.hu/tmp/viz/demo_test/', help='Output folder path') + parser.add_argument('--demo_video', default='mini_val_final.avi', help='Demo video name') + parser.add_argument('--project_to_cam', default=True, help='Project to cam (default: True)') + args = parser.parse_args() + main(args) diff --git a/adzoo/uniad/analysis_tools/visualize/utils.py b/adzoo/uniad/analysis_tools/visualize/utils.py new file mode 100644 index 0000000..315344e --- /dev/null +++ b/adzoo/uniad/analysis_tools/visualize/utils.py @@ -0,0 +1,131 @@ +import numpy as np +from nuscenes.utils.data_classes import LidarPointCloud, Box +from pyquaternion import Quaternion + + +color_mapping = np.asarray([ + [0, 0, 0], + [255, 179, 0], + [128, 62, 117], + [255, 104, 0], + [166, 189, 215], + [193, 0, 32], + [206, 162, 98], + [129, 112, 102], + [0, 125, 52], + [246, 118, 142], + [0, 83, 138], + [255, 122, 92], + [83, 55, 122], + [255, 142, 0], + [179, 40, 81], + [244, 200, 0], + [127, 24, 13], + [147, 170, 0], + [89, 51, 21], + [241, 58, 19], + [35, 44, 22], + [112, 224, 255], + [70, 184, 160], + [153, 0, 255], + [71, 255, 0], + [255, 0, 163], + [255, 204, 0], + [0, 255, 235], + [255, 0, 235], + [255, 0, 122], + [255, 245, 0], + [10, 190, 212], + [214, 255, 0], + [0, 204, 255], + [20, 0, 255], + [255, 255, 0], + [0, 153, 255], + [0, 255, 204], + [41, 255, 0], + [173, 0, 255], + [0, 245, 255], + [71, 0, 255], + [0, 255, 184], + [0, 92, 255], + [184, 255, 0], + [255, 214, 0], + [25, 194, 194], + [92, 0, 255], + [220, 220, 220], + [255, 9, 92], + [112, 9, 255], + [8, 255, 214], + [255, 184, 6], + [10, 255, 71], + [255, 41, 10], + [7, 255, 255], + [224, 255, 8], + [102, 8, 255], + [255, 61, 6], + [255, 194, 7], + [0, 255, 20], + [255, 8, 41], + [255, 5, 153], + [6, 51, 255], + [235, 12, 255], + [160, 150, 20], + [0, 163, 255], + [140, 140, 140], + [250, 10, 15], + [20, 255, 0], +])/255 + + +class AgentPredictionData: + """ + Agent data class, includes bbox, traj, and occflow + """ + + def __init__(self, + pred_score, + pred_label, + pred_center, + pred_dim, + pred_yaw, + pred_vel, + pred_traj, + pred_traj_score, + pred_track_id=None, + pred_occ_map=None, + is_sdc=False, + past_pred_traj=None, + command=None, + attn_mask=None, + ): + self.pred_score = pred_score + self.pred_label = pred_label + self.pred_center = pred_center + self.pred_dim = pred_dim + self.pred_yaw = -pred_yaw-np.pi/2 + self.pred_vel = pred_vel + self.pred_traj = pred_traj + self.pred_traj_score = pred_traj_score + self.pred_track_id = pred_track_id + self.pred_occ_map = pred_occ_map + if self.pred_traj is not None: + if isinstance(self.pred_traj_score, int): + self.pred_traj_max = self.pred_traj + else: + self.pred_traj_max = self.pred_traj[self.pred_traj_score.argmax( + )] + else: + self.pred_traj_max = None + self.nusc_box = Box( + center=pred_center, + size=pred_dim, + orientation=Quaternion(axis=[0, 0, 1], radians=self.pred_yaw), + label=pred_label, + score=pred_score + ) + if is_sdc: + self.pred_center = [0, 0, -1.2+1.56/2] + self.is_sdc = is_sdc + self.past_pred_traj = past_pred_traj + self.command = command + self.attn_mask = attn_mask diff --git a/adzoo/uniad/configs/_base_/datasets/nus-3d.py b/adzoo/uniad/configs/_base_/datasets/nus-3d.py new file mode 100644 index 0000000..1548171 --- /dev/null +++ b/adzoo/uniad/configs/_base_/datasets/nus-3d.py @@ -0,0 +1,142 @@ +# If point cloud range is changed, the models should also change their point +# cloud range accordingly +point_cloud_range = [-50, -50, -5, 50, 50, 3] +# For nuScenes we usually do 10-class detection +class_names = [ + 'car', 'truck', 'trailer', 'bus', 'construction_vehicle', 'bicycle', + 'motorcycle', 'pedestrian', 'traffic_cone', 'barrier' +] +dataset_type = 'NuScenesDataset' +data_root = 'data/nuscenes/' +# Input modality for nuScenes dataset, this is consistent with the submission +# format which requires the information in input_modality. +input_modality = dict( + use_lidar=True, + use_camera=False, + use_radar=False, + use_map=False, + use_external=False) +file_client_args = dict(backend='disk') +# Uncomment the following if use ceph or other file clients. +# See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient +# for more details. +# file_client_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/nuscenes/': 's3://nuscenes/nuscenes/', +# 'data/nuscenes/': 's3://nuscenes/nuscenes/' +# })) +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + file_client_args=file_client_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + file_client_args=file_client_args), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.3925, 0.3925], + scale_ratio_range=[0.95, 1.05], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectNameFilter', classes=class_names), + dict(type='PointShuffle'), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + file_client_args=file_client_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + file_client_args=file_client_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) + ]) +] +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) +eval_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + file_client_args=file_client_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + file_client_args=file_client_args), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) +] + +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'nuscenes_infos_train.pkl', + pipeline=train_pipeline, + classes=class_names, + modality=input_modality, + test_mode=False, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='LiDAR'), + val=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'nuscenes_infos_val.pkl', + pipeline=test_pipeline, + classes=class_names, + modality=input_modality, + test_mode=True, + box_type_3d='LiDAR'), + test=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'nuscenes_infos_val.pkl', + pipeline=test_pipeline, + classes=class_names, + modality=input_modality, + test_mode=True, + box_type_3d='LiDAR')) +# For nuScenes dataset, we usually evaluate the model at the end of training. +# Since the models are trained by 24 epochs by default, we set evaluation +# interval to be 24. Please change the interval accordingly if you do not +# use a default schedule. +evaluation = dict(interval=24, pipeline=eval_pipeline) diff --git a/adzoo/uniad/configs/_base_/default_runtime.py b/adzoo/uniad/configs/_base_/default_runtime.py new file mode 100644 index 0000000..4e85b69 --- /dev/null +++ b/adzoo/uniad/configs/_base_/default_runtime.py @@ -0,0 +1,18 @@ +checkpoint_config = dict(interval=1) +# yapf:disable push +# By default we use textlogger hook and tensorboard +# For more loggers see +# https://mmcv.readthedocs.io/en/latest/api.html#mmcv.runner.LoggerHook +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = None +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/adzoo/uniad/configs/stage1_track_map/base_track_map.py b/adzoo/uniad/configs/stage1_track_map/base_track_map.py new file mode 100644 index 0000000..cd18640 --- /dev/null +++ b/adzoo/uniad/configs/stage1_track_map/base_track_map.py @@ -0,0 +1,580 @@ +_base_ = ["../_base_/datasets/nus-3d.py", + "../_base_/default_runtime.py"] + +# Update-2023-06-12: +# [Enhance] Update some freezing args of UniAD +# [Bugfix] Reproduce the from-scratch results of stage1 +# 1. Remove loss_past_traj in stage1 training +# 2. Unfreeze neck and BN +# --> Reproduced tracking result: AMOTA 0.393 + + +# Unfreeze neck and BN, the from-scratch results of stage1 could be reproduced +plugin = True +# plugin_dir = "projects/mmdet3d_plugin/" +# If point cloud range is changed, the models should also change their point +# cloud range accordingly +point_cloud_range = [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0] +voxel_size = [0.2, 0.2, 8] +patch_size = [102.4, 102.4] +img_norm_cfg = dict(mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +# For nuScenes we usually do 10-class detection +class_names = [ + "car", + "truck", + "construction_vehicle", + "bus", + "trailer", + "barrier", + "motorcycle", + "bicycle", + "pedestrian", + "traffic_cone", +] + +input_modality = dict( + use_lidar=False, use_camera=True, use_radar=False, use_map=False, use_external=True +) +_dim_ = 256 +_pos_dim_ = _dim_ // 2 +_ffn_dim_ = _dim_ * 2 +_num_levels_ = 4 +bev_h_ = 200 +bev_w_ = 200 +_feed_dim_ = _ffn_dim_ +_dim_half_ = _pos_dim_ +canvas_size = (bev_h_, bev_w_) + +# NOTE: You can change queue_length from 5 to 3 to save GPU memory, but at risk of performance drop. +queue_length = 3 # each sequence contains `queue_length` frames. + +### traj prediction args ### +predict_steps = 12 +predict_modes = 6 +fut_steps = 4 +past_steps = 4 +use_nonlinear_optimizer = True + +## occflow setting +occ_n_future = 4 +occ_n_future_plan = 6 +occ_n_future_max = max([occ_n_future, occ_n_future_plan]) + +### planning ### +planning_steps = 6 +use_col_optim = True + +### Occ args ### +occflow_grid_conf = { + 'xbound': [-50.0, 50.0, 0.5], + 'ybound': [-50.0, 50.0, 0.5], + 'zbound': [-10.0, 10.0, 20.0], +} + +# Other settings +train_gt_iou_threshold=0.3 + +model = dict( + type="UniAD", + gt_iou_threshold=train_gt_iou_threshold, + queue_length=queue_length, + use_grid_mask=True, + video_test_mode=True, + num_query=900, + num_classes=10, + pc_range=point_cloud_range, + img_backbone=dict( + type="ResNet", + depth=101, + num_stages=4, + out_indices=(1, 2, 3), + frozen_stages=4, + norm_cfg=dict(type="BN2d", requires_grad=False), + norm_eval=True, + style="caffe", + dcn=dict( + type="DCNv2", deform_groups=1, fallback_on_stride=False + ), # original DCNv2 will print log when perform load_state_dict + stage_with_dcn=(False, False, True, True), + ), + img_neck=dict( + type="FPN", + in_channels=[512, 1024, 2048], + out_channels=_dim_, + start_level=0, + add_extra_convs="on_output", + num_outs=4, + relu_before_extra_convs=True, + ), + freeze_img_backbone=True, + freeze_img_neck=False, + freeze_bn=False, + score_thresh=0.4, + filter_score_thresh=0.35, + qim_args=dict( + qim_type="QIMBase", + merger_dropout=0, + update_query_pos=True, + fp_ratio=0.3, + random_drop=0.1, + ), # hyper-param for query dropping mentioned in MOTR + mem_args=dict( + memory_bank_type="MemoryBank", + memory_bank_score_thresh=0.0, + memory_bank_len=4, + ), + loss_cfg=dict( + type="ClipMatcher", + num_classes=10, + weight_dict=None, + code_weights=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2], + assigner=dict( + type="HungarianAssigner3DTrack", + cls_cost=dict(type="FocalLossCost", weight=2.0), + reg_cost=dict(type="BBox3DL1Cost", weight=0.25), + pc_range=point_cloud_range, + ), + loss_cls=dict( + type="FocalLoss", use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=2.0 + ), + loss_bbox=dict(type="L1Loss", loss_weight=0.25), + loss_past_traj_weight=0.0, + ), # loss cfg for tracking + pts_bbox_head=dict( + type="BEVFormerTrackHead", + bev_h=bev_h_, + bev_w=bev_w_, + num_query=900, + num_classes=10, + in_channels=_dim_, + sync_cls_avg_factor=True, + with_box_refine=True, + as_two_stage=False, + past_steps=past_steps, + fut_steps=fut_steps, + transformer=dict( + type="UniADPerceptionTransformer", + rotate_prev_bev=True, + use_shift=True, + use_can_bus=True, + embed_dims=_dim_, + encoder=dict( + type="BEVFormerEncoder", + num_layers=6, + pc_range=point_cloud_range, + num_points_in_pillar=4, + return_intermediate=False, + transformerlayers=dict( + type="BEVFormerLayer", + attn_cfgs=[ + dict( + type="TemporalSelfAttention", embed_dims=_dim_, num_levels=1 + ), + dict( + type="SpatialCrossAttention", + pc_range=point_cloud_range, + deformable_attention=dict( + type="MSDeformableAttention3D", + embed_dims=_dim_, + num_points=8, + num_levels=_num_levels_, + ), + embed_dims=_dim_, + ), + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=( + "self_attn", + "norm", + "cross_attn", + "norm", + "ffn", + "norm", + ), + ), + ), + decoder=dict( + type="DetectionTransformerDecoder", + num_layers=6, + return_intermediate=True, + transformerlayers=dict( + type="DetrTransformerDecoderLayer", + attn_cfgs=[ + dict( + type="MultiheadAttention", + embed_dims=_dim_, + num_heads=8, + dropout=0.1, + ), + dict( + type="CustomMSDeformableAttention", + embed_dims=_dim_, + num_levels=1, + ), + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=( + "self_attn", + "norm", + "cross_attn", + "norm", + "ffn", + "norm", + ), + ), + ), + ), + bbox_coder=dict( + type="NMSFreeCoder", + post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], + pc_range=point_cloud_range, + max_num=300, + voxel_size=voxel_size, + num_classes=10, + ), + positional_encoding=dict( + type="LearnedPositionalEncoding", + num_feats=_pos_dim_, + row_num_embed=bev_h_, + col_num_embed=bev_w_, + ), + loss_cls=dict( + type="FocalLoss", use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=2.0 + ), + loss_bbox=dict(type="L1Loss", loss_weight=0.25), + loss_iou=dict(type="GIoULoss", loss_weight=0.0), + ), + seg_head=dict( + type='PansegformerHead', + bev_h=bev_h_, + bev_w=bev_w_, + canvas_size=canvas_size, + pc_range=point_cloud_range, + num_query=300, + num_classes=4, + num_things_classes=3, + num_stuff_classes=1, + in_channels=2048, + sync_cls_avg_factor=True, + as_two_stage=False, + with_box_refine=True, + transformer=dict( + type='SegDeformableTransformer', + encoder=dict( + type='DetrTransformerEncoder', + num_layers=6, + transformerlayers=dict( + type='BaseTransformerLayer', + attn_cfgs=dict( + type='MultiScaleDeformableAttention', + embed_dims=_dim_, + num_levels=_num_levels_, + ), + feedforward_channels=_feed_dim_, + ffn_dropout=0.1, + operation_order=('self_attn', 'norm', 'ffn', 'norm'))), + decoder=dict( + type='DeformableDetrTransformerDecoder', + num_layers=6, + return_intermediate=True, + transformerlayers=dict( + type='DetrTransformerDecoderLayer', + attn_cfgs=[ + dict( + type='MultiheadAttention', + embed_dims=_dim_, + num_heads=8, + dropout=0.1), + dict( + type='MultiScaleDeformableAttention', + embed_dims=_dim_, + num_levels=_num_levels_, + ) + ], + feedforward_channels=_feed_dim_, + ffn_dropout=0.1, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm') + ), + ), + ), + positional_encoding=dict( + type='SinePositionalEncoding', + num_feats=_dim_half_, + normalize=True, + offset=-0.5), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=2.0), + loss_bbox=dict(type='L1Loss', loss_weight=5.0), + loss_iou=dict(type='GIoULoss', loss_weight=2.0), + loss_mask=dict(type='DiceLoss', loss_weight=2.0), + thing_transformer_head=dict(type='SegMaskHead',d_model=_dim_,nhead=8,num_decoder_layers=4), + stuff_transformer_head=dict(type='SegMaskHead',d_model=_dim_,nhead=8,num_decoder_layers=6,self_attn=True), + train_cfg=dict( + assigner=dict( + type='HungarianAssigner', + cls_cost=dict(type='FocalLossCost', weight=2.0), + reg_cost=dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'), + iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0), + ), + assigner_with_mask=dict( + type='HungarianAssigner_multi_info', + cls_cost=dict(type='FocalLossCost', weight=2.0), + reg_cost=dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'), + iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0), + mask_cost=dict(type='DiceCost', weight=2.0), + ), + sampler =dict(type='PseudoSampler'), + sampler_with_mask =dict(type='PseudoSampler_segformer'), + ), + ), + + # model training and testing settings + train_cfg=dict( + pts=dict( + grid_size=[512, 512, 1], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_size_factor=4, + assigner=dict( + type="HungarianAssigner3D", + cls_cost=dict(type="FocalLossCost", weight=2.0), + reg_cost=dict(type="BBox3DL1Cost", weight=0.25), + iou_cost=dict( + type="IoUCost", weight=0.0 + ), # Fake cost. This is just to make it compatible with DETR head. + pc_range=point_cloud_range, + ), + ) + ), +) +dataset_type = "NuScenesE2EDataset" +data_root = "data/nuscenes/" +info_root = "data/infos/" +file_client_args = dict(backend="disk") +ann_file_train=info_root + f"nuscenes_infos_temporal_train.pkl" +ann_file_val=info_root + f"nuscenes_infos_temporal_val.pkl" +ann_file_test=info_root + f"nuscenes_infos_temporal_val.pkl" + + +train_pipeline = [ + dict(type="LoadMultiViewImageFromFilesInCeph", to_float32=True, file_client_args=file_client_args, img_root=data_root), + dict(type="PhotoMetricDistortionMultiViewImage"), + dict( + type="LoadAnnotations3D_E2E", + with_bbox_3d=True, + with_label_3d=True, + with_attr_label=False, + + with_future_anns=True, # occ_flow gt + with_ins_inds_3d=True, # ins_inds + ins_inds_add_1=True, # ins_inds start from 1 + ), + + dict(type='GenerateOccFlowLabels', grid_conf=occflow_grid_conf, ignore_index=255, only_vehicle=True, + filter_invisible=False), # NOTE: Currently vis_token is not in pkl + + dict(type="ObjectRangeFilterTrack", point_cloud_range=point_cloud_range), + dict(type="ObjectNameFilterTrack", classes=class_names), + dict(type="NormalizeMultiviewImage", **img_norm_cfg), + dict(type="PadMultiViewImage", size_divisor=32), + dict(type="DefaultFormatBundle3D", class_names=class_names), + dict( + type="CustomCollect3D", + keys=[ + "gt_bboxes_3d", + "gt_labels_3d", + "gt_inds", + "img", + "timestamp", + "l2g_r_mat", + "l2g_t", + "gt_fut_traj", + "gt_fut_traj_mask", + "gt_past_traj", + "gt_past_traj_mask", + "gt_sdc_bbox", + "gt_sdc_label", + "gt_sdc_fut_traj", + "gt_sdc_fut_traj_mask", + "gt_lane_labels", + "gt_lane_bboxes", + "gt_lane_masks", + # Occ gt + # "gt_segmentation", + # "gt_instance", + # "gt_centerness", + # "gt_offset", + # "gt_flow", + # "gt_backward_flow", + # "gt_occ_has_invalid_frame", + # "gt_occ_img_is_valid", + # # gt future bbox for plan + # "gt_future_boxes", + # "gt_future_labels", + # # planning + # "sdc_planning", + # "sdc_planning_mask", + # "command", + ], + ), +] +test_pipeline = [ + dict(type='LoadMultiViewImageFromFilesInCeph', to_float32=True, + file_client_args=file_client_args, img_root=data_root), + dict(type="NormalizeMultiviewImage", **img_norm_cfg), + dict(type="PadMultiViewImage", size_divisor=32), + dict(type='LoadAnnotations3D_E2E', + with_bbox_3d=False, + with_label_3d=False, + with_attr_label=False, + + with_future_anns=True, + with_ins_inds_3d=False, + ins_inds_add_1=True, # ins_inds start from 1 + ), + dict(type='GenerateOccFlowLabels', grid_conf=occflow_grid_conf, ignore_index=255, only_vehicle=True, + filter_invisible=False), + dict( + type="MultiScaleFlipAug3D", + img_scale=(1600, 900), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type="DefaultFormatBundle3D", class_names=class_names, with_label=False + ), + dict( + type="CustomCollect3D", keys=[ + "img", + "timestamp", + "l2g_r_mat", + "l2g_t", + "gt_lane_labels", + "gt_lane_bboxes", + "gt_lane_masks", + "gt_segmentation", + "gt_instance", + "gt_centerness", + "gt_offset", + "gt_flow", + "gt_backward_flow", + "gt_occ_has_invalid_frame", + "gt_occ_img_is_valid", + # planning + "sdc_planning", + "sdc_planning_mask", + "command", + ] + ), + ], + ), +] +data = dict( + samples_per_gpu=1, + workers_per_gpu=1, + train=dict( + type=dataset_type, + file_client_args=file_client_args, + data_root=data_root, + ann_file=ann_file_train, + pipeline=train_pipeline, + classes=class_names, + modality=input_modality, + test_mode=False, + use_valid_flag=True, + patch_size=patch_size, + canvas_size=canvas_size, + bev_size=(bev_h_, bev_w_), + queue_length=queue_length, + predict_steps=predict_steps, + past_steps=past_steps, + fut_steps=fut_steps, + use_nonlinear_optimizer=use_nonlinear_optimizer, + occ_receptive_field=3, + occ_n_future=occ_n_future_max, + occ_filter_invalid_sample=False, + + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d="LiDAR", + ), + val=dict( + type=dataset_type, + file_client_args=file_client_args, + data_root=data_root, + ann_file=ann_file_val, + pipeline=test_pipeline, + patch_size=patch_size, + canvas_size=canvas_size, + bev_size=(bev_h_, bev_w_), + predict_steps=predict_steps, + past_steps=past_steps, + fut_steps=fut_steps, + use_nonlinear_optimizer=use_nonlinear_optimizer, + classes=class_names, + modality=input_modality, + samples_per_gpu=1, + eval_mod=['det', 'track', 'map'], + + occ_receptive_field=3, + occ_n_future=occ_n_future_max, + occ_filter_invalid_sample=False, + ), + test=dict( + type=dataset_type, + file_client_args=file_client_args, + data_root=data_root, + test_mode=True, + ann_file=ann_file_test, + pipeline=test_pipeline, + patch_size=patch_size, + canvas_size=canvas_size, + bev_size=(bev_h_, bev_w_), + predict_steps=predict_steps, + past_steps=past_steps, + fut_steps=fut_steps, + occ_n_future=occ_n_future_max, + use_nonlinear_optimizer=use_nonlinear_optimizer, + classes=class_names, + modality=input_modality, + eval_mod=['det', 'map', 'track'], + ), + shuffler_sampler=dict(type="DistributedGroupSampler"), + nonshuffler_sampler=dict(type="DistributedSampler"), +) +optimizer = dict( + type="AdamW", + lr=2e-4, + paramwise_cfg=dict( + custom_keys={ + "img_backbone": dict(lr_mult=0.1), + } + ), + weight_decay=0.01, +) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy="CosineAnnealing", + warmup="linear", + warmup_iters=500, + warmup_ratio=1.0 / 3, + min_lr_ratio=1e-3, +) +total_epochs = 6 +evaluation = dict(interval=6, pipeline=test_pipeline) +runner = dict(type="EpochBasedRunner", max_epochs=total_epochs) +log_config = dict( + interval=1, hooks=[dict(type="TextLoggerHook"), dict(type="TensorboardLoggerHook")] +) +checkpoint_config = dict(interval=1) +load_from = "ckpts/bevformer_r101_dcn_24ep.pth" + +find_unused_parameters = True \ No newline at end of file diff --git a/adzoo/uniad/configs/stage1_track_map/base_track_map_b2d.py b/adzoo/uniad/configs/stage1_track_map/base_track_map_b2d.py new file mode 100644 index 0000000..2b0308d --- /dev/null +++ b/adzoo/uniad/configs/stage1_track_map/base_track_map_b2d.py @@ -0,0 +1,665 @@ +_base_ = ["../_base_/datasets/nus-3d.py", + "../_base_/default_runtime.py"] + +# Update-2023-06-12: +# [Enhance] Update some freezing args of UniAD +# [Bugfix] Reproduce the from-scratch results of stage1 +# 1. Remove loss_past_traj in stage1 training +# 2. Unfreeze neck and BN +# --> Reproduced tracking result: AMOTA 0.393 + + +# Unfreeze neck and BN, the from-scratch results of stage1 could be reproduced +plugin = True +# plugin_dir = "projects/mmdet3d_plugin/" +# If point cloud range is changed, the models should also change their point +# cloud range accordingly +point_cloud_range = [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0] +voxel_size = [0.2, 0.2, 8] +patch_size = [102.4, 102.4] +img_norm_cfg = dict(mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +# For nuScenes we usually do 10-class detection + +NameMapping = { + #=================vehicle================= + # bicycle + 'vehicle.bh.crossbike': 'bicycle', + "vehicle.diamondback.century": 'bicycle', + "vehicle.gazelle.omafiets": 'bicycle', + # car + "vehicle.chevrolet.impala": 'car', + "vehicle.dodge.charger_2020": 'car', + "vehicle.dodge.charger_police": 'car', + "vehicle.dodge.charger_police_2020": 'car', + "vehicle.lincoln.mkz_2017": 'car', + "vehicle.lincoln.mkz_2020": 'car', + "vehicle.mini.cooper_s_2021": 'car', + "vehicle.mercedes.coupe_2020": 'car', + "vehicle.ford.mustang": 'car', + "vehicle.nissan.patrol_2021": 'car', + "vehicle.audi.tt": 'car', + "vehicle.audi.etron": 'car', + "vehicle.ford.crown": 'car', + "vehicle.ford.mustang": 'car', + "vehicle.tesla.model3": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/FordCrown/SM_FordCrown_parked.SM_FordCrown_parked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/Charger/SM_ChargerParked.SM_ChargerParked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/Lincoln/SM_LincolnParked.SM_LincolnParked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/MercedesCCC/SM_MercedesCCC_Parked.SM_MercedesCCC_Parked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/Mini2021/SM_Mini2021_parked.SM_Mini2021_parked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/NissanPatrol2021/SM_NissanPatrol2021_parked.SM_NissanPatrol2021_parked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/TeslaM3/SM_TeslaM3_parked.SM_TeslaM3_parked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/VolkswagenT2/SM_VolkswagenT2_2021_Parked.SM_VolkswagenT2_2021_Parked": 'car', + # bus + # van + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/VolkswagenT2/SM_VolkswagenT2_2021_Parked.SM_VolkswagenT2_2021_Parked": "van", + "vehicle.ford.ambulance": "van", + # truck + "vehicle.carlamotors.firetruck": 'truck', + #========================================= + + #=================traffic sign============ + # traffic.speed_limit + "traffic.speed_limit.30": 'traffic_sign', + "traffic.speed_limit.40": 'traffic_sign', + "traffic.speed_limit.50": 'traffic_sign', + "traffic.speed_limit.60": 'traffic_sign', + "traffic.speed_limit.90": 'traffic_sign', + "traffic.speed_limit.120": 'traffic_sign', + + "traffic.stop": 'traffic_sign', + "traffic.yield": 'traffic_sign', + "traffic.traffic_light": 'traffic_light', + #========================================= + + #===================Construction=========== + "static.prop.warningconstruction" : 'traffic_cone', + "static.prop.warningaccident": 'traffic_cone', + "static.prop.trafficwarning": "traffic_cone", + + #===================Construction=========== + "static.prop.constructioncone": 'traffic_cone', + + #=================pedestrian============== + "walker.pedestrian.0001": 'pedestrian', + "walker.pedestrian.0004": 'pedestrian', + "walker.pedestrian.0005": 'pedestrian', + "walker.pedestrian.0007": 'pedestrian', + "walker.pedestrian.0013": 'pedestrian', + "walker.pedestrian.0014": 'pedestrian', + "walker.pedestrian.0017": 'pedestrian', + "walker.pedestrian.0018": 'pedestrian', + "walker.pedestrian.0019": 'pedestrian', + "walker.pedestrian.0020": 'pedestrian', + "walker.pedestrian.0022": 'pedestrian', + "walker.pedestrian.0025": 'pedestrian', + "walker.pedestrian.0035": 'pedestrian', + "walker.pedestrian.0041": 'pedestrian', + "walker.pedestrian.0046": 'pedestrian', + "walker.pedestrian.0047": 'pedestrian', + + # ========================================== + "static.prop.dirtdebris01": 'others', + "static.prop.dirtdebris02": 'others', +} + +eval_cfg = { + "dist_ths": [0.5, 1.0, 2.0, 4.0], + "dist_th_tp": 2.0, + "min_recall": 0.1, + "min_precision": 0.1, + "mean_ap_weight": 5, + "class_names":['car','van','truck','bicycle','traffic_sign','traffic_cone','traffic_light','pedestrian'], + "tp_metrics":['trans_err', 'scale_err', 'orient_err', 'vel_err'], + "err_name_maping":{'trans_err': 'mATE','scale_err': 'mASE','orient_err': 'mAOE','vel_err': 'mAVE','attr_err': 'mAAE'}, + "class_range":{'car':(50,50),'van':(50,50),'truck':(50,50),'bicycle':(40,40),'traffic_sign':(30,30),'traffic_cone':(30,30),'traffic_light':(30,30),'pedestrian':(40,40)} + } + + + + +class_names = [ +'car','van','truck','bicycle','traffic_sign','traffic_cone','traffic_light','pedestrian','others' +] + +input_modality = dict( + use_lidar=False, use_camera=True, use_radar=False, use_map=False, use_external=True +) +_dim_ = 256 +_pos_dim_ = _dim_ // 2 +_ffn_dim_ = _dim_ * 2 +_num_levels_ = 4 +bev_h_ = 200 +bev_w_ = 200 +_feed_dim_ = _ffn_dim_ +_dim_half_ = _pos_dim_ +canvas_size = (bev_h_, bev_w_) + +# NOTE: You can change queue_length from 5 to 3 to save GPU memory, but at risk of performance drop. +queue_length = 5 # each sequence contains `queue_length` frames. + +### traj prediction args ### +predict_steps = 12 +predict_modes = 6 +fut_steps = 4 +past_steps = 4 +use_nonlinear_optimizer = True + +## occflow setting +occ_n_future = 4 +occ_n_future_plan = 6 +occ_n_future_max = max([occ_n_future, occ_n_future_plan]) + +### planning ### +planning_steps = 6 +use_col_optim = True + +### Occ args ### +occflow_grid_conf = { + 'xbound': [-50.0, 50.0, 0.5], + 'ybound': [-50.0, 50.0, 0.5], + 'zbound': [-10.0, 10.0, 20.0], +} + +# Other settings +train_gt_iou_threshold=0.3 + +model = dict( + type="UniAD", + gt_iou_threshold=train_gt_iou_threshold, + queue_length=queue_length, + use_grid_mask=True, + video_test_mode=True, + num_query=900, + num_classes=len(class_names), + pc_range=point_cloud_range, + img_backbone=dict( + type="ResNet", + depth=101, + num_stages=4, + out_indices=(1, 2, 3), + frozen_stages=4, + norm_cfg=dict(type="BN2d", requires_grad=False), + norm_eval=True, + style="caffe", + dcn=dict( + type="DCNv2", deform_groups=1, fallback_on_stride=False + ), # original DCNv2 will print log when perform load_state_dict + stage_with_dcn=(False, False, True, True), + ), + img_neck=dict( + type="FPN", + in_channels=[512, 1024, 2048], + out_channels=_dim_, + start_level=0, + add_extra_convs="on_output", + num_outs=4, + relu_before_extra_convs=True, + ), + freeze_img_backbone=True, + freeze_img_neck=False, + freeze_bn=False, + score_thresh=0.4, + filter_score_thresh=0.35, + qim_args=dict( + qim_type="QIMBase", + merger_dropout=0, + update_query_pos=True, + fp_ratio=0.3, + random_drop=0.1, + ), # hyper-param for query dropping mentioned in MOTR + mem_args=dict( + memory_bank_type="MemoryBank", + memory_bank_score_thresh=0.0, + memory_bank_len=4, + ), + loss_cfg=dict( + type="ClipMatcher", + num_classes=len(class_names), + weight_dict=None, + code_weights=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2], + assigner=dict( + type="HungarianAssigner3DTrack", + cls_cost=dict(type="FocalLossCost", weight=2.0), + reg_cost=dict(type="BBox3DL1Cost", weight=0.25), + pc_range=point_cloud_range, + ), + loss_cls=dict( + type="FocalLoss", use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=2.0 + ), + loss_bbox=dict(type="L1Loss", loss_weight=0.25), + loss_past_traj_weight=0.0, + ), # loss cfg for tracking + pts_bbox_head=dict( + type="BEVFormerTrackHead", + bev_h=bev_h_, + bev_w=bev_w_, + num_query=900, + num_classes=len(class_names), + in_channels=_dim_, + sync_cls_avg_factor=True, + with_box_refine=True, + as_two_stage=False, + past_steps=past_steps, + fut_steps=fut_steps, + transformer=dict( + type="UniADPerceptionTransformer", + rotate_prev_bev=True, + use_shift=True, + use_can_bus=True, + embed_dims=_dim_, + encoder=dict( + type="BEVFormerEncoder", + num_layers=6, + pc_range=point_cloud_range, + num_points_in_pillar=4, + return_intermediate=False, + transformerlayers=dict( + type="BEVFormerLayer", + attn_cfgs=[ + dict( + type="TemporalSelfAttention", embed_dims=_dim_, num_levels=1 + ), + dict( + type="SpatialCrossAttention", + pc_range=point_cloud_range, + deformable_attention=dict( + type="MSDeformableAttention3D", + embed_dims=_dim_, + num_points=8, + num_levels=_num_levels_, + ), + embed_dims=_dim_, + ), + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.0, + operation_order=( + "self_attn", + "norm", + "cross_attn", + "norm", + "ffn", + "norm", + ), + ), + ), + decoder=dict( + type="DetectionTransformerDecoder", + num_layers=6, + return_intermediate=True, + transformerlayers=dict( + type="DetrTransformerDecoderLayer", + attn_cfgs=[ + dict( + type="MultiheadAttention", + embed_dims=_dim_, + num_heads=8, + dropout=0.0, + ), + dict( + type="CustomMSDeformableAttention", + embed_dims=_dim_, + num_levels=1, + ), + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.0, + operation_order=( + "self_attn", + "norm", + "cross_attn", + "norm", + "ffn", + "norm", + ), + ), + ), + ), + bbox_coder=dict( + type="NMSFreeCoder", + post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], + pc_range=point_cloud_range, + max_num=300, + voxel_size=voxel_size, + num_classes=len(class_names), + ), + positional_encoding=dict( + type="LearnedPositionalEncoding", + num_feats=_pos_dim_, + row_num_embed=bev_h_, + col_num_embed=bev_w_, + ), + loss_cls=dict( + type="FocalLoss", use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=2.0 + ), + loss_bbox=dict(type="L1Loss", loss_weight=0.25), + loss_iou=dict(type="GIoULoss", loss_weight=0.0), + ), + seg_head=dict( + type='PansegformerHead', + bev_h=bev_h_, + bev_w=bev_w_, + canvas_size=canvas_size, + pc_range=point_cloud_range, + num_query=300, + num_classes=6, + num_things_classes=6, + num_stuff_classes=0, + in_channels=2048, + sync_cls_avg_factor=True, + as_two_stage=False, + with_box_refine=True, + transformer=dict( + type='SegDeformableTransformer', + encoder=dict( + type='DetrTransformerEncoder', + num_layers=6, + transformerlayers=dict( + type='BaseTransformerLayer', + attn_cfgs=dict( + type='MultiScaleDeformableAttention', + embed_dims=_dim_, + num_levels=_num_levels_, + ), + feedforward_channels=_feed_dim_, + ffn_dropout=0.0, + operation_order=('self_attn', 'norm', 'ffn', 'norm'))), + decoder=dict( + type='DeformableDetrTransformerDecoder', + num_layers=6, + return_intermediate=True, + transformerlayers=dict( + type='DetrTransformerDecoderLayer', + attn_cfgs=[ + dict( + type='MultiheadAttention', + embed_dims=_dim_, + num_heads=8, + dropout=0.0), + dict( + type='MultiScaleDeformableAttention', + embed_dims=_dim_, + num_levels=_num_levels_, + ) + ], + feedforward_channels=_feed_dim_, + ffn_dropout=0.0, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm') + ), + ), + ), + positional_encoding=dict( + type='SinePositionalEncoding', + num_feats=_dim_half_, + normalize=True, + offset=-0.5), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=2.0), + loss_bbox=dict(type='L1Loss', loss_weight=5.0), + loss_iou=dict(type='GIoULoss', loss_weight=2.0), + loss_mask=dict(type='DiceLoss', loss_weight=2.0), + thing_transformer_head=dict(type='SegMaskHead',d_model=_dim_,nhead=8,num_decoder_layers=4), + stuff_transformer_head=dict(type='SegMaskHead',d_model=_dim_,nhead=8,num_decoder_layers=6,self_attn=True), + train_cfg=dict( + assigner=dict( + type='HungarianAssigner', + cls_cost=dict(type='FocalLossCost', weight=2.0), + reg_cost=dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'), + iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0), + ), + assigner_with_mask=dict( + type='HungarianAssigner_multi_info', + cls_cost=dict(type='FocalLossCost', weight=2.0), + reg_cost=dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'), + iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0), + mask_cost=dict(type='DiceCost', weight=2.0), + ), + sampler =dict(type='PseudoSampler'), + sampler_with_mask =dict(type='PseudoSampler_segformer'), + ), + ), + + # model training and testing settings + train_cfg=dict( + pts=dict( + grid_size=[512, 512, 1], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_size_factor=4, + assigner=dict( + type="HungarianAssigner3D", + cls_cost=dict(type="FocalLossCost", weight=2.0), + reg_cost=dict(type="BBox3DL1Cost", weight=0.25), + iou_cost=dict( + type="IoUCost", weight=0.0 + ), # Fake cost. This is just to make it compatible with DETR head. + pc_range=point_cloud_range, + ), + ) + ), +) +dataset_type = "B2D_E2E_Dataset" +data_root = "data/bench2drive" +info_root = "data/infos" +map_root = "data/bench2drive/maps" +map_file = "data/infos/b2d_map_infos.pkl" +file_client_args = dict(backend="disk") +ann_file_train=info_root + f"/b2d_infos_train.pkl" +ann_file_val=info_root + f"/b2d_infos_val.pkl" +ann_file_test=info_root + f"/b2d_infos_val.pkl" + + +train_pipeline = [ + dict(type="LoadMultiViewImageFromFilesInCeph", to_float32=True, file_client_args=file_client_args, img_root=data_root), + dict(type="PhotoMetricDistortionMultiViewImage"), + dict( + type="LoadAnnotations3D_E2E", + with_bbox_3d=True, + with_label_3d=True, + with_attr_label=False, + with_vis_token=False, + with_future_anns=False, # occ_flow gt + with_ins_inds_3d=True, # ins_inds + ins_inds_add_1=True, # ins_inds start from 1 + ), + + # dict(type='GenerateOccFlowLabels', grid_conf=occflow_grid_conf, ignore_index=255, only_vehicle=True, + # filter_invisible=False), # NOTE: Currently vis_token is not in pkl + + dict(type="ObjectRangeFilterTrack", point_cloud_range=point_cloud_range), + dict(type="ObjectNameFilterTrack", classes=class_names), + dict(type="NormalizeMultiviewImage", **img_norm_cfg), + dict(type="PadMultiViewImage", size_divisor=32), + dict(type="DefaultFormatBundle3D", class_names=class_names), + dict( + type="CustomCollect3D", + keys=[ + "gt_bboxes_3d", + "gt_labels_3d", + "gt_inds", + "img", + "timestamp", + "l2g_r_mat", + "l2g_t", + "gt_fut_traj", + "gt_fut_traj_mask", + "gt_past_traj", + "gt_past_traj_mask", + "gt_sdc_bbox", + "gt_sdc_label", + "gt_sdc_fut_traj", + "gt_sdc_fut_traj_mask", + "gt_lane_labels", + "gt_lane_bboxes", + "gt_lane_masks", + # Occ gt + # "gt_segmentation", + # "gt_instance", + # "gt_centerness", + # "gt_offset", + # "gt_flow", + # "gt_backward_flow", + # "gt_occ_has_invalid_frame", + # "gt_occ_img_is_valid", + # # gt future bbox for plan + # "gt_future_boxes", + # "gt_future_labels", + # # planning + # "sdc_planning", + # "sdc_planning_mask", + # "command", + ], + ), +] +test_pipeline = [ + dict(type='LoadMultiViewImageFromFilesInCeph', to_float32=True, + file_client_args=file_client_args, img_root=data_root), + dict(type="NormalizeMultiviewImage", **img_norm_cfg), + dict(type="PadMultiViewImage", size_divisor=32), + dict(type='LoadAnnotations3D_E2E', + with_bbox_3d=False, + with_label_3d=False, + with_attr_label=False, + with_vis_token=False, + with_future_anns=False, + with_ins_inds_3d=False, + ins_inds_add_1=True, # ins_inds start from 1 + ), + # dict(type='GenerateOccFlowLabels', grid_conf=occflow_grid_conf, ignore_index=255, only_vehicle=True, + # filter_invisible=False), + dict( + type="MultiScaleFlipAug3D", + img_scale=(1600, 900), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type="DefaultFormatBundle3D", class_names=class_names, with_label=False + ), + dict( + type="CustomCollect3D", keys=[ + "img", + "timestamp", + "l2g_r_mat", + "l2g_t", + "gt_lane_labels", + "gt_lane_bboxes", + "gt_lane_masks", + # "gt_segmentation", + # "gt_instance", + # "gt_centerness", + # "gt_offset", + # "gt_flow", + # "gt_backward_flow", + # "gt_occ_has_invalid_frame", + # "gt_occ_img_is_valid", + # # planning + # "sdc_planning", + # "sdc_planning_mask", + # "command", + ] + ), + ], + ), +] +data = dict( + samples_per_gpu=1, + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_root=data_root, + ann_file=ann_file_train, + pipeline=train_pipeline, + classes=class_names, + name_mapping=NameMapping, + map_root=map_root, + map_file=map_file, + modality=input_modality, + patch_size=patch_size, + bev_size=(bev_h_, bev_w_), + queue_length=queue_length, + predict_frames=predict_steps, + past_frames=past_steps, + future_frames=fut_steps, + point_cloud_range=point_cloud_range, + box_type_3d="LiDAR", + ), + val=dict( + type=dataset_type, + data_root=data_root, + ann_file=ann_file_val, + pipeline=test_pipeline, + name_mapping=NameMapping, + map_root=map_root, + map_file=map_file, + bev_size=(bev_h_, bev_w_), + predict_frames=predict_steps, + past_frames=past_steps, + future_frames=fut_steps, + classes=class_names, + modality=input_modality, + samples_per_gpu=1, + point_cloud_range=point_cloud_range, + eval_cfg=eval_cfg, + #eval_mod=['det', 'track', 'map'], + box_type_3d="LiDAR", + ), + test=dict( + type=dataset_type, + data_root=data_root, + ann_file=ann_file_val, + pipeline=test_pipeline, + name_mapping=NameMapping, + map_root=map_root, + map_file=map_file, + bev_size=(bev_h_, bev_w_), + predict_frames=predict_steps, + past_frames=past_steps, + future_frames=fut_steps, + classes=class_names, + modality=input_modality, + samples_per_gpu=1, + point_cloud_range=point_cloud_range, + eval_cfg=eval_cfg, + #eval_mod=['det', 'track', 'map'], + box_type_3d="LiDAR", + ), + shuffler_sampler=dict(type="DistributedGroupSampler"), + nonshuffler_sampler=dict(type="DistributedSampler"), +) + +optimizer = dict( + type="AdamW", + lr=2e-4, + paramwise_cfg=dict( + custom_keys={ + "img_backbone": dict(lr_mult=0.1), + } + ), + weight_decay=0.01, +) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + by_epoch=False, + policy="CosineAnnealing", + warmup="linear", + warmup_iters=500, + warmup_ratio=1.0 / 3, + min_lr_ratio=1e-3, +) +total_epochs = 1 +evaluation = dict(interval=1, pipeline=test_pipeline) +runner = dict(type="EpochBasedRunner", max_epochs=total_epochs) +log_config = dict( + interval=1, hooks=[dict(type="TextLoggerHook"), dict(type="TensorboardLoggerHook")] +) +checkpoint_config = dict(interval=3000, by_epoch=False) + +find_unused_parameters = True \ No newline at end of file diff --git a/adzoo/uniad/configs/stage1_track_map/tiny_track_map_b2d.py b/adzoo/uniad/configs/stage1_track_map/tiny_track_map_b2d.py new file mode 100644 index 0000000..c94ff40 --- /dev/null +++ b/adzoo/uniad/configs/stage1_track_map/tiny_track_map_b2d.py @@ -0,0 +1,656 @@ +_base_ = ["../_base_/datasets/nus-3d.py", + "../_base_/default_runtime.py"] + +# Update-2023-06-12: +# [Enhance] Update some freezing args of UniAD +# [Bugfix] Reproduce the from-scratch results of stage1 +# 1. Remove loss_past_traj in stage1 training +# 2. Unfreeze neck and BN +# --> Reproduced tracking result: AMOTA 0.393 + + +# Unfreeze neck and BN, the from-scratch results of stage1 could be reproduced +plugin = True +# plugin_dir = "projects/mmdet3d_plugin/" +# If point cloud range is changed, the models should also change their point +# cloud range accordingly +point_cloud_range = [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0] +voxel_size = [0.2, 0.2, 8] +patch_size = [102.4, 102.4] +img_norm_cfg = dict(mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +# For nuScenes we usually do 10-class detection + +NameMapping = { + #=================vehicle================= + # bicycle + 'vehicle.bh.crossbike': 'bicycle', + "vehicle.diamondback.century": 'bicycle', + "vehicle.gazelle.omafiets": 'bicycle', + # car + "vehicle.chevrolet.impala": 'car', + "vehicle.dodge.charger_2020": 'car', + "vehicle.dodge.charger_police": 'car', + "vehicle.dodge.charger_police_2020": 'car', + "vehicle.lincoln.mkz_2017": 'car', + "vehicle.lincoln.mkz_2020": 'car', + "vehicle.mini.cooper_s_2021": 'car', + "vehicle.mercedes.coupe_2020": 'car', + "vehicle.ford.mustang": 'car', + "vehicle.nissan.patrol_2021": 'car', + "vehicle.audi.tt": 'car', + "vehicle.audi.etron": 'car', + "vehicle.ford.crown": 'car', + "vehicle.ford.mustang": 'car', + "vehicle.tesla.model3": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/FordCrown/SM_FordCrown_parked.SM_FordCrown_parked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/Charger/SM_ChargerParked.SM_ChargerParked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/Lincoln/SM_LincolnParked.SM_LincolnParked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/MercedesCCC/SM_MercedesCCC_Parked.SM_MercedesCCC_Parked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/Mini2021/SM_Mini2021_parked.SM_Mini2021_parked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/NissanPatrol2021/SM_NissanPatrol2021_parked.SM_NissanPatrol2021_parked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/TeslaM3/SM_TeslaM3_parked.SM_TeslaM3_parked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/VolkswagenT2/SM_VolkswagenT2_2021_Parked.SM_VolkswagenT2_2021_Parked": 'car', + # bus + # van + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/VolkswagenT2/SM_VolkswagenT2_2021_Parked.SM_VolkswagenT2_2021_Parked": "van", + "vehicle.ford.ambulance": "van", + # truck + "vehicle.carlamotors.firetruck": 'truck', + #========================================= + + #=================traffic sign============ + # traffic.speed_limit + "traffic.speed_limit.30": 'traffic_sign', + "traffic.speed_limit.40": 'traffic_sign', + "traffic.speed_limit.50": 'traffic_sign', + "traffic.speed_limit.60": 'traffic_sign', + "traffic.speed_limit.90": 'traffic_sign', + "traffic.speed_limit.120": 'traffic_sign', + + "traffic.stop": 'traffic_sign', + "traffic.yield": 'traffic_sign', + "traffic.traffic_light": 'traffic_light', + #========================================= + + #===================Construction=========== + "static.prop.warningconstruction" : 'traffic_cone', + "static.prop.warningaccident": 'traffic_cone', + "static.prop.trafficwarning": "traffic_cone", + + #===================Construction=========== + "static.prop.constructioncone": 'traffic_cone', + + #=================pedestrian============== + "walker.pedestrian.0001": 'pedestrian', + "walker.pedestrian.0004": 'pedestrian', + "walker.pedestrian.0005": 'pedestrian', + "walker.pedestrian.0007": 'pedestrian', + "walker.pedestrian.0013": 'pedestrian', + "walker.pedestrian.0014": 'pedestrian', + "walker.pedestrian.0017": 'pedestrian', + "walker.pedestrian.0018": 'pedestrian', + "walker.pedestrian.0019": 'pedestrian', + "walker.pedestrian.0020": 'pedestrian', + "walker.pedestrian.0022": 'pedestrian', + "walker.pedestrian.0025": 'pedestrian', + "walker.pedestrian.0035": 'pedestrian', + "walker.pedestrian.0041": 'pedestrian', + "walker.pedestrian.0046": 'pedestrian', + "walker.pedestrian.0047": 'pedestrian', + + # ========================================== + "static.prop.dirtdebris01": 'others', + "static.prop.dirtdebris02": 'others', +} + +eval_cfg = { + "dist_ths": [0.5, 1.0, 2.0, 4.0], + "dist_th_tp": 2.0, + "min_recall": 0.1, + "min_precision": 0.1, + "mean_ap_weight": 5, + "class_names":['car','van','truck','bicycle','traffic_sign','traffic_cone','traffic_light','pedestrian'], + "tp_metrics":['trans_err', 'scale_err', 'orient_err', 'vel_err'], + "err_name_maping":{'trans_err': 'mATE','scale_err': 'mASE','orient_err': 'mAOE','vel_err': 'mAVE','attr_err': 'mAAE'}, + "class_range":{'car':(50,50),'van':(50,50),'truck':(50,50),'bicycle':(40,40),'traffic_sign':(30,30),'traffic_cone':(30,30),'traffic_light':(30,30),'pedestrian':(40,40)} + } + +class_names = [ +'car','van','truck','bicycle','traffic_sign','traffic_cone','traffic_light','pedestrian','others' +] + +input_modality = dict( + use_lidar=False, use_camera=True, use_radar=False, use_map=False, use_external=True +) +_dim_ = 256 +_pos_dim_ = _dim_ // 2 +_ffn_dim_ = _dim_ * 2 +_num_levels_ = 4 +bev_h_ = 100 +bev_w_ = 100 +_feed_dim_ = _ffn_dim_ +_dim_half_ = _pos_dim_ +canvas_size = (bev_h_*2, bev_w_*2) + +# NOTE: You can change queue_length from 5 to 3 to save GPU memory, but at risk of performance drop. +queue_length = 3 # each sequence contains `queue_length` frames. + +### traj prediction args ### +predict_steps = 12 +predict_modes = 6 +fut_steps = 4 +past_steps = 4 +use_nonlinear_optimizer = True + +## occflow setting +occ_n_future = 4 +occ_n_future_plan = 6 +occ_n_future_max = max([occ_n_future, occ_n_future_plan]) + +### planning ### +planning_steps = 6 +use_col_optim = True + +### Occ args ### +occflow_grid_conf = { + 'xbound': [-50.0, 50.0, 0.5], + 'ybound': [-50.0, 50.0, 0.5], + 'zbound': [-10.0, 10.0, 20.0], +} + +# Other settings +train_gt_iou_threshold=0.3 + +model = dict( + type="UniAD", + gt_iou_threshold=train_gt_iou_threshold, + queue_length=queue_length, + use_grid_mask=True, + video_test_mode=True, + num_query=900, + num_classes=len(class_names), + pc_range=point_cloud_range, + img_backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(1,2,3), + frozen_stages=4, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + style='pytorch'), + img_neck=dict( + type="FPN", + in_channels=[512, 1024, 2048], + out_channels=_dim_, + start_level=0, + add_extra_convs="on_output", + num_outs=4, + relu_before_extra_convs=True, + ), + freeze_img_backbone=True, + freeze_img_neck=False, + freeze_bn=False, + score_thresh=0.4, + filter_score_thresh=0.35, + qim_args=dict( + qim_type="QIMBase", + merger_dropout=0, + update_query_pos=True, + fp_ratio=0.3, + random_drop=0.1, + ), # hyper-param for query dropping mentioned in MOTR + mem_args=dict( + memory_bank_type="MemoryBank", + memory_bank_score_thresh=0.0, + memory_bank_len=4, + ), + loss_cfg=dict( + type="ClipMatcher", + num_classes=len(class_names), + weight_dict=None, + code_weights=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2], + assigner=dict( + type="HungarianAssigner3DTrack", + cls_cost=dict(type="FocalLossCost", weight=2.0), + reg_cost=dict(type="BBox3DL1Cost", weight=0.25), + pc_range=point_cloud_range, + ), + loss_cls=dict( + type="FocalLoss", use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=2.0 + ), + loss_bbox=dict(type="L1Loss", loss_weight=0.25), + loss_past_traj_weight=0.0, + ), # loss cfg for tracking + pts_bbox_head=dict( + type="BEVFormerTrackHead", + bev_h=bev_h_, + bev_w=bev_w_, + num_query=900, + num_classes=len(class_names), + in_channels=_dim_, + sync_cls_avg_factor=True, + with_box_refine=True, + as_two_stage=False, + past_steps=past_steps, + fut_steps=fut_steps, + transformer=dict( + type="UniADPerceptionTransformer", + rotate_prev_bev=True, + use_shift=True, + use_can_bus=True, + embed_dims=_dim_, + encoder=dict( + type="BEVFormerEncoder", + num_layers=3, + pc_range=point_cloud_range, + num_points_in_pillar=4, + return_intermediate=False, + transformerlayers=dict( + type="BEVFormerLayer", + attn_cfgs=[ + dict( + type="TemporalSelfAttention", embed_dims=_dim_, num_levels=1 + ), + dict( + type="SpatialCrossAttention", + pc_range=point_cloud_range, + deformable_attention=dict( + type="MSDeformableAttention3D", + embed_dims=_dim_, + num_points=8, + num_levels=_num_levels_, + ), + embed_dims=_dim_, + ), + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.0, + operation_order=( + "self_attn", + "norm", + "cross_attn", + "norm", + "ffn", + "norm", + ), + ), + ), + decoder=dict( + type="DetectionTransformerDecoder", + num_layers=6, + return_intermediate=True, + transformerlayers=dict( + type="DetrTransformerDecoderLayer", + attn_cfgs=[ + dict( + type="MultiheadAttention", + embed_dims=_dim_, + num_heads=8, + dropout=0.0, + ), + dict( + type="CustomMSDeformableAttention", + embed_dims=_dim_, + num_levels=1, + ), + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.0, + operation_order=( + "self_attn", + "norm", + "cross_attn", + "norm", + "ffn", + "norm", + ), + ), + ), + ), + bbox_coder=dict( + type="NMSFreeCoder", + post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], + pc_range=point_cloud_range, + max_num=300, + voxel_size=voxel_size, + num_classes=len(class_names), + ), + positional_encoding=dict( + type="LearnedPositionalEncoding", + num_feats=_pos_dim_, + row_num_embed=bev_h_, + col_num_embed=bev_w_, + ), + loss_cls=dict( + type="FocalLoss", use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=2.0 + ), + loss_bbox=dict(type="L1Loss", loss_weight=0.25), + loss_iou=dict(type="GIoULoss", loss_weight=0.0), + ), + seg_head=dict( + type='PansegformerHead', + bev_h=bev_h_*2, + bev_w=bev_w_*2, + canvas_size=canvas_size, + pc_range=point_cloud_range, + num_query=300, + num_classes=6, + num_things_classes=6, + num_stuff_classes=0, + in_channels=2048, + sync_cls_avg_factor=True, + as_two_stage=False, + with_box_refine=True, + transformer=dict( + type='SegDeformableTransformer', + encoder=dict( + type='DetrTransformerEncoder', + num_layers=6, + transformerlayers=dict( + type='BaseTransformerLayer', + attn_cfgs=dict( + type='MultiScaleDeformableAttention', + embed_dims=_dim_, + num_levels=_num_levels_, + ), + feedforward_channels=_feed_dim_, + ffn_dropout=0.0, + operation_order=('self_attn', 'norm', 'ffn', 'norm'))), + decoder=dict( + type='DeformableDetrTransformerDecoder', + num_layers=6, + return_intermediate=True, + transformerlayers=dict( + type='DetrTransformerDecoderLayer', + attn_cfgs=[ + dict( + type='MultiheadAttention', + embed_dims=_dim_, + num_heads=8, + dropout=0.0), + dict( + type='MultiScaleDeformableAttention', + embed_dims=_dim_, + num_levels=_num_levels_, + ) + ], + feedforward_channels=_feed_dim_, + ffn_dropout=0.0, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm') + ), + ), + ), + positional_encoding=dict( + type='SinePositionalEncoding', + num_feats=_dim_half_, + normalize=True, + offset=-0.5), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=2.0), + loss_bbox=dict(type='L1Loss', loss_weight=5.0), + loss_iou=dict(type='GIoULoss', loss_weight=2.0), + loss_mask=dict(type='DiceLoss', loss_weight=2.0), + thing_transformer_head=dict(type='SegMaskHead',d_model=_dim_,nhead=8,num_decoder_layers=4), + stuff_transformer_head=dict(type='SegMaskHead',d_model=_dim_,nhead=8,num_decoder_layers=6,self_attn=True), + train_cfg=dict( + assigner=dict( + type='HungarianAssigner', + cls_cost=dict(type='FocalLossCost', weight=2.0), + reg_cost=dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'), + iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0), + ), + assigner_with_mask=dict( + type='HungarianAssigner_multi_info', + cls_cost=dict(type='FocalLossCost', weight=2.0), + reg_cost=dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'), + iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0), + mask_cost=dict(type='DiceCost', weight=2.0), + ), + sampler =dict(type='PseudoSampler'), + sampler_with_mask =dict(type='PseudoSampler_segformer'), + ), + ), + + # model training and testing settings + train_cfg=dict( + pts=dict( + grid_size=[512, 512, 1], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_size_factor=4, + assigner=dict( + type="HungarianAssigner3D", + cls_cost=dict(type="FocalLossCost", weight=2.0), + reg_cost=dict(type="BBox3DL1Cost", weight=0.25), + iou_cost=dict( + type="IoUCost", weight=0.0 + ), # Fake cost. This is just to make it compatible with DETR head. + pc_range=point_cloud_range, + ), + ) + ), +) +dataset_type = "B2D_E2E_Dataset" +data_root = "data/bench2drive" +info_root = "data/infos" +map_root = "data/bench2drive/maps" +map_file = "data/infos/b2d_map_infos.pkl" +file_client_args = dict(backend="disk") +ann_file_train=info_root + f"/b2d_infos_train.pkl" +ann_file_val=info_root + f"/b2d_infos_val.pkl" +ann_file_test=info_root + f"/b2d_infos_val.pkl" + + +train_pipeline = [ + dict(type="LoadMultiViewImageFromFilesInCeph", to_float32=True, file_client_args=file_client_args, img_root=data_root), + dict(type="PhotoMetricDistortionMultiViewImage"), + dict( + type="LoadAnnotations3D_E2E", + with_bbox_3d=True, + with_label_3d=True, + with_attr_label=False, + + with_future_anns=False, # occ_flow gt + with_ins_inds_3d=True, # ins_inds + ins_inds_add_1=True, # ins_inds start from 1 + ), + + # dict(type='GenerateOccFlowLabels', grid_conf=occflow_grid_conf, ignore_index=255, only_vehicle=True, + # filter_invisible=False), # NOTE: Currently vis_token is not in pkl + + dict(type="ObjectRangeFilterTrack", point_cloud_range=point_cloud_range), + dict(type="ObjectNameFilterTrack", classes=class_names), + dict(type="NormalizeMultiviewImage", **img_norm_cfg), + dict(type="PadMultiViewImage", size_divisor=32), + dict(type="DefaultFormatBundle3D", class_names=class_names), + dict( + type="CustomCollect3D", + keys=[ + "gt_bboxes_3d", + "gt_labels_3d", + "gt_inds", + "img", + "timestamp", + "l2g_r_mat", + "l2g_t", + "gt_fut_traj", + "gt_fut_traj_mask", + "gt_past_traj", + "gt_past_traj_mask", + "gt_sdc_bbox", + "gt_sdc_label", + "gt_sdc_fut_traj", + "gt_sdc_fut_traj_mask", + "gt_lane_labels", + "gt_lane_bboxes", + "gt_lane_masks", + # Occ gt + # "gt_segmentation", + # "gt_instance", + # "gt_centerness", + # "gt_offset", + # "gt_flow", + # "gt_backward_flow", + # "gt_occ_has_invalid_frame", + # "gt_occ_img_is_valid", + # # gt future bbox for plan + # "gt_future_boxes", + # "gt_future_labels", + # # planning + # "sdc_planning", + # "sdc_planning_mask", + # "command", + ], + ), +] +test_pipeline = [ + dict(type='LoadMultiViewImageFromFilesInCeph', to_float32=True, + file_client_args=file_client_args, img_root=data_root), + dict(type="NormalizeMultiviewImage", **img_norm_cfg), + dict(type="PadMultiViewImage", size_divisor=32), + dict(type='LoadAnnotations3D_E2E', + with_bbox_3d=False, + with_label_3d=False, + with_attr_label=False, + + with_future_anns=False, + with_ins_inds_3d=False, + ins_inds_add_1=True, # ins_inds start from 1 + ), + # dict(type='GenerateOccFlowLabels', grid_conf=occflow_grid_conf, ignore_index=255, only_vehicle=True, + # filter_invisible=False), + dict( + type="MultiScaleFlipAug3D", + img_scale=(1600, 900), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type="DefaultFormatBundle3D", class_names=class_names, with_label=False + ), + dict( + type="CustomCollect3D", keys=[ + "img", + # "timestamp", + "l2g_r_mat", + "l2g_t", + "gt_lane_labels", + "gt_lane_bboxes", + "gt_lane_masks", + # "gt_segmentation", + # "gt_instance", + # "gt_centerness", + # "gt_offset", + # "gt_flow", + # "gt_backward_flow", + # "gt_occ_has_invalid_frame", + # "gt_occ_img_is_valid", + # # planning + # "sdc_planning", + # "sdc_planning_mask", + # "command", + ] + ), + ], + ), +] +data = dict( + samples_per_gpu=1, + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_root=data_root, + ann_file=ann_file_train, + pipeline=train_pipeline, + classes=class_names, + name_mapping=NameMapping, + map_root=map_root, + map_file=map_file, + modality=input_modality, + patch_size=patch_size, + bev_size=(bev_h_, bev_w_), + queue_length=queue_length, + predict_frames=predict_steps, + past_frames=past_steps, + future_frames=fut_steps, + point_cloud_range=point_cloud_range, + box_type_3d="LiDAR", + ), + val=dict( + type=dataset_type, + data_root=data_root, + ann_file=ann_file_val, + pipeline=test_pipeline, + name_mapping=NameMapping, + map_root=map_root, + map_file=map_file, + bev_size=(bev_h_, bev_w_), + predict_frames=predict_steps, + past_frames=past_steps, + future_frames=fut_steps, + classes=class_names, + modality=input_modality, + samples_per_gpu=1, + point_cloud_range=point_cloud_range, + eval_cfg=eval_cfg, + #eval_mod=['det', 'track', 'map'], + box_type_3d="LiDAR", + ), + test=dict( + type=dataset_type, + data_root=data_root, + ann_file=ann_file_val, + pipeline=test_pipeline, + name_mapping=NameMapping, + map_root=map_root, + map_file=map_file, + bev_size=(bev_h_, bev_w_), + predict_frames=predict_steps, + past_frames=past_steps, + future_frames=fut_steps, + classes=class_names, + modality=input_modality, + samples_per_gpu=1, + point_cloud_range=point_cloud_range, + eval_cfg=eval_cfg, + #eval_mod=['det', 'track', 'map'], + box_type_3d="LiDAR", + ), + shuffler_sampler=dict(type="DistributedGroupSampler"), + nonshuffler_sampler=dict(type="DistributedSampler"), +) +optimizer = dict( + type="AdamW", + lr=2e-4, + paramwise_cfg=dict( + custom_keys={ + "img_backbone": dict(lr_mult=0.1), + } + ), + weight_decay=0.01, +) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + by_epoch=False, + policy="CosineAnnealing", + warmup="linear", + warmup_iters=500, + warmup_ratio=1.0 / 3, + min_lr_ratio=1e-3, +) +total_epochs = 1 +evaluation = dict(interval=1, pipeline=test_pipeline) +runner = dict(type="EpochBasedRunner", max_epochs=total_epochs) +log_config = dict( + interval=1, hooks=[dict(type="TextLoggerHook"), dict(type="TensorboardLoggerHook")] +) +checkpoint_config = dict(interval=3000, by_epoch=False) + +find_unused_parameters = True \ No newline at end of file diff --git a/adzoo/uniad/configs/stage2_e2e/base_e2e.py b/adzoo/uniad/configs/stage2_e2e/base_e2e.py new file mode 100644 index 0000000..86a09fd --- /dev/null +++ b/adzoo/uniad/configs/stage2_e2e/base_e2e.py @@ -0,0 +1,696 @@ +_base_ = ["../_base_/datasets/nus-3d.py", + "../_base_/default_runtime.py"] + +# Update-2023-06-12: +# [Enhance] Update some freezing args of UniAD +plugin = True +# plugin_dir = "projects/mmdet3d_plugin/" +# If point cloud range is changed, the models should also change their point +# cloud range accordingly +point_cloud_range = [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0] +voxel_size = [0.2, 0.2, 8] +patch_size = [102.4, 102.4] +img_norm_cfg = dict(mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +# For nuScenes we usually do 10-class detection +class_names = [ + "car", + "truck", + "construction_vehicle", + "bus", + "trailer", + "barrier", + "motorcycle", + "bicycle", + "pedestrian", + "traffic_cone", +] +vehicle_id_list = [0, 1, 2, 3, 4, 6, 7] +group_id_list = [[0,1,2,3,4], [6,7], [8], [5,9]] +input_modality = dict( + use_lidar=False, use_camera=True, use_radar=False, use_map=False, use_external=True +) +_dim_ = 256 +_pos_dim_ = _dim_ // 2 +_ffn_dim_ = _dim_ * 2 +_num_levels_ = 4 +bev_h_ = 200 +bev_w_ = 200 +_feed_dim_ = _ffn_dim_ +_dim_half_ = _pos_dim_ +canvas_size = (bev_h_, bev_w_) +queue_length = 3 # each sequence contains `queue_length` frames. + +### traj prediction args ### +predict_steps = 12 +predict_modes = 6 +fut_steps = 4 +past_steps = 4 +use_nonlinear_optimizer = True + +## occflow setting +occ_n_future = 4 +occ_n_future_plan = 6 +occ_n_future_max = max([occ_n_future, occ_n_future_plan]) + +### planning ### +planning_steps = 6 +use_col_optim = True + +### Occ args ### +occflow_grid_conf = { + 'xbound': [-50.0, 50.0, 0.5], + 'ybound': [-50.0, 50.0, 0.5], + 'zbound': [-10.0, 10.0, 20.0], +} + +# Other settings +train_gt_iou_threshold=0.3 + +model = dict( + type="UniAD", + gt_iou_threshold=train_gt_iou_threshold, + queue_length=queue_length, + use_grid_mask=True, + video_test_mode=True, + num_query=900, + num_classes=10, + vehicle_id_list=vehicle_id_list, + pc_range=point_cloud_range, + img_backbone=dict( + type="ResNet", + depth=101, + num_stages=4, + out_indices=(1, 2, 3), + frozen_stages=4, + norm_cfg=dict(type="BN2d", requires_grad=False), + norm_eval=True, + style="caffe", + dcn=dict( + type="DCNv2", deform_groups=1, fallback_on_stride=False + ), # original DCNv2 will print log when perform load_state_dict + stage_with_dcn=(False, False, True, True), + ), + img_neck=dict( + type="FPN", + in_channels=[512, 1024, 2048], + out_channels=_dim_, + start_level=0, + add_extra_convs="on_output", + num_outs=4, + relu_before_extra_convs=True, + ), + freeze_img_backbone=True, + freeze_img_neck=True, + freeze_bn=True, + freeze_bev_encoder=True, + score_thresh=0.4, + filter_score_thresh=0.35, + qim_args=dict( + qim_type="QIMBase", + merger_dropout=0, + update_query_pos=True, + fp_ratio=0.3, + random_drop=0.1, + ), # hyper-param for query dropping mentioned in MOTR + mem_args=dict( + memory_bank_type="MemoryBank", + memory_bank_score_thresh=0.0, + memory_bank_len=4, + ), + loss_cfg=dict( + type="ClipMatcher", + num_classes=10, + weight_dict=None, + code_weights=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2], + assigner=dict( + type="HungarianAssigner3DTrack", + cls_cost=dict(type="FocalLossCost", weight=2.0), + reg_cost=dict(type="BBox3DL1Cost", weight=0.25), + pc_range=point_cloud_range, + ), + loss_cls=dict( + type="FocalLoss", use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=2.0 + ), + loss_bbox=dict(type="L1Loss", loss_weight=0.25), + ), # loss cfg for tracking + pts_bbox_head=dict( + type="BEVFormerTrackHead", + bev_h=bev_h_, + bev_w=bev_w_, + num_query=900, + num_classes=10, + in_channels=_dim_, + sync_cls_avg_factor=True, + with_box_refine=True, + as_two_stage=False, + past_steps=past_steps, + fut_steps=fut_steps, + transformer=dict( + type="UniADPerceptionTransformer", + rotate_prev_bev=True, + use_shift=True, + use_can_bus=True, + embed_dims=_dim_, + encoder=dict( + type="BEVFormerEncoder", + num_layers=6, + pc_range=point_cloud_range, + num_points_in_pillar=4, + return_intermediate=False, + transformerlayers=dict( + type="BEVFormerLayer", + attn_cfgs=[ + dict( + type="TemporalSelfAttention", embed_dims=_dim_, num_levels=1 + ), + dict( + type="SpatialCrossAttention", + pc_range=point_cloud_range, + deformable_attention=dict( + type="MSDeformableAttention3D", + embed_dims=_dim_, + num_points=8, + num_levels=_num_levels_, + ), + embed_dims=_dim_, + ), + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=( + "self_attn", + "norm", + "cross_attn", + "norm", + "ffn", + "norm", + ), + ), + ), + decoder=dict( + type="DetectionTransformerDecoder", + num_layers=6, + return_intermediate=True, + transformerlayers=dict( + type="DetrTransformerDecoderLayer", + attn_cfgs=[ + dict( + type="MultiheadAttention", + embed_dims=_dim_, + num_heads=8, + dropout=0.1, + ), + dict( + type="CustomMSDeformableAttention", + embed_dims=_dim_, + num_levels=1, + ), + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=( + "self_attn", + "norm", + "cross_attn", + "norm", + "ffn", + "norm", + ), + ), + ), + ), + bbox_coder=dict( + type="NMSFreeCoder", + post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], + pc_range=point_cloud_range, + max_num=300, + voxel_size=voxel_size, + num_classes=10, + ), + positional_encoding=dict( + type="LearnedPositionalEncoding", + num_feats=_pos_dim_, + row_num_embed=bev_h_, + col_num_embed=bev_w_, + ), + loss_cls=dict( + type="FocalLoss", use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=2.0 + ), + loss_bbox=dict(type="L1Loss", loss_weight=0.25), + loss_iou=dict(type="GIoULoss", loss_weight=0.0), + ), + seg_head=dict( + type='PansegformerHead', + bev_h=bev_h_, + bev_w=bev_w_, + canvas_size=canvas_size, + pc_range=point_cloud_range, + num_query=300, + num_classes=4, + num_things_classes=3, + num_stuff_classes=1, + in_channels=2048, + sync_cls_avg_factor=True, + as_two_stage=False, + with_box_refine=True, + transformer=dict( + type='SegDeformableTransformer', + encoder=dict( + type='DetrTransformerEncoder', + num_layers=6, + transformerlayers=dict( + type='BaseTransformerLayer', + attn_cfgs=dict( + type='MultiScaleDeformableAttention', + embed_dims=_dim_, + num_levels=_num_levels_, + ), + feedforward_channels=_feed_dim_, + ffn_dropout=0.1, + operation_order=('self_attn', 'norm', 'ffn', 'norm'))), + decoder=dict( + type='DeformableDetrTransformerDecoder', + num_layers=6, + return_intermediate=True, + transformerlayers=dict( + type='DetrTransformerDecoderLayer', + attn_cfgs=[ + dict( + type='MultiheadAttention', + embed_dims=_dim_, + num_heads=8, + dropout=0.1), + dict( + type='MultiScaleDeformableAttention', + embed_dims=_dim_, + num_levels=_num_levels_, + ) + ], + feedforward_channels=_feed_dim_, + ffn_dropout=0.1, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm') + ), + ), + ), + positional_encoding=dict( + type='SinePositionalEncoding', + num_feats=_dim_half_, + normalize=True, + offset=-0.5), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=2.0), + loss_bbox=dict(type='L1Loss', loss_weight=5.0), + loss_iou=dict(type='GIoULoss', loss_weight=2.0), + loss_mask=dict(type='DiceLoss', loss_weight=2.0), + thing_transformer_head=dict(type='SegMaskHead',d_model=_dim_,nhead=8,num_decoder_layers=4), + stuff_transformer_head=dict(type='SegMaskHead',d_model=_dim_,nhead=8,num_decoder_layers=6,self_attn=True), + train_cfg=dict( + assigner=dict( + type='HungarianAssigner', + cls_cost=dict(type='FocalLossCost', weight=2.0), + reg_cost=dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'), + iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0), + ), + assigner_with_mask=dict( + type='HungarianAssigner_multi_info', + cls_cost=dict(type='FocalLossCost', weight=2.0), + reg_cost=dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'), + iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0), + mask_cost=dict(type='DiceCost', weight=2.0), + ), + sampler =dict(type='PseudoSampler'), + sampler_with_mask =dict(type='PseudoSampler_segformer'), + ), + ), + occ_head=dict( + type='OccHead', + + grid_conf=occflow_grid_conf, + ignore_index=255, + + bev_proj_dim=256, + bev_proj_nlayers=4, + + # Transformer + attn_mask_thresh=0.3, + transformer_decoder=dict( + type='DetrTransformerDecoder', + return_intermediate=True, + num_layers=5, + transformerlayers=dict( + type='DetrTransformerDecoderLayer', + attn_cfgs=dict( + type='MultiheadAttention', + embed_dims=256, + num_heads=8, + attn_drop=0.0, + proj_drop=0.0, + dropout_layer=None, + batch_first=False), + ffn_cfgs=dict( + embed_dims=256, + feedforward_channels=2048, # change to 512 + num_fcs=2, + act_cfg=dict(type='ReLU', inplace=True), + ffn_drop=0.0, + dropout_layer=None, + add_identity=True), + feedforward_channels=2048, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm')), + init_cfg=None), + # Query + query_dim=256, + query_mlp_layers=3, + + aux_loss_weight=1., + loss_mask=dict( + type='FieryBinarySegmentationLoss', + use_top_k=True, + top_k_ratio=0.25, + future_discount=0.95, + loss_weight=5.0, + ignore_index=255, + ), + loss_dice=dict( + type='DiceLossWithMasks', + use_sigmoid=True, + activate=True, + reduction='mean', + naive_dice=True, + eps=1.0, + ignore_index=255, + loss_weight=1.0), + + + pan_eval=True, + test_seg_thresh=0.1, + test_with_track_score=True, + ), + motion_head=dict( + type='MotionHead', + bev_h=bev_h_, + bev_w=bev_w_, + num_query=300, + num_classes=10, + predict_steps=predict_steps, + predict_modes=predict_modes, + embed_dims=_dim_, + loss_traj=dict(type='TrajLoss', + use_variance=True, + cls_loss_weight=0.5, + nll_loss_weight=0.5, + loss_weight_minade=0., + loss_weight_minfde=0.25), + num_cls_fcs=3, + pc_range=point_cloud_range, + group_id_list=group_id_list, + num_anchor=6, + use_nonlinear_optimizer=use_nonlinear_optimizer, + anchor_info_path='data/others/motion_anchor_infos_mode6.pkl', + transformerlayers=dict( + type='MotionTransformerDecoder', + pc_range=point_cloud_range, + embed_dims=_dim_, + num_layers=3, + transformerlayers=dict( + type='MotionTransformerAttentionLayer', + batch_first=True, + attn_cfgs=[ + dict( + type='MotionDeformableAttention', + num_steps=predict_steps, + embed_dims=_dim_, + num_levels=1, + num_heads=8, + num_points=4, + sample_index=-1), + ], + + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=('cross_attn', 'norm', 'ffn', 'norm')), + ), + ), + planning_head=dict( + type='PlanningHeadSingleMode', + embed_dims=256, + planning_steps=planning_steps, + loss_planning=dict(type='PlanningLoss'), + loss_collision=[dict(type='CollisionLoss', delta=0.0, weight=2.5), + dict(type='CollisionLoss', delta=0.5, weight=1.0), + dict(type='CollisionLoss', delta=1.0, weight=0.25)], + use_col_optim=use_col_optim, + planning_eval=True, + with_adapter=True, + ), + # model training and testing settings + train_cfg=dict( + pts=dict( + grid_size=[512, 512, 1], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_size_factor=4, + assigner=dict( + type="HungarianAssigner3D", + cls_cost=dict(type="FocalLossCost", weight=2.0), + reg_cost=dict(type="BBox3DL1Cost", weight=0.25), + iou_cost=dict( + type="IoUCost", weight=0.0 + ), # Fake cost. This is just to make it compatible with DETR head. + pc_range=point_cloud_range, + ), + ) + ), +) +dataset_type = "NuScenesE2EDataset" +data_root = "data/nuscenes/" +info_root = "data/infos/" +file_client_args = dict(backend="disk") +ann_file_train=info_root + f"nuscenes_infos_temporal_train.pkl" +ann_file_val=info_root + f"nuscenes_infos_temporal_val.pkl" +ann_file_test=info_root + f"nuscenes_infos_temporal_val.pkl" + + +train_pipeline = [ + dict(type="LoadMultiViewImageFromFilesInCeph", to_float32=True, file_client_args=file_client_args, img_root=data_root), + dict(type="PhotoMetricDistortionMultiViewImage"), + dict( + type="LoadAnnotations3D_E2E", + with_bbox_3d=True, + with_label_3d=True, + with_attr_label=False, + + with_future_anns=True, # occ_flow gt + with_ins_inds_3d=True, # ins_inds + ins_inds_add_1=True, # ins_inds start from 1 + ), + + dict(type='GenerateOccFlowLabels', grid_conf=occflow_grid_conf, ignore_index=255, only_vehicle=True, + filter_invisible=False), # NOTE: Currently vis_token is not in pkl + + dict(type="ObjectRangeFilterTrack", point_cloud_range=point_cloud_range), + dict(type="ObjectNameFilterTrack", classes=class_names), + dict(type="NormalizeMultiviewImage", **img_norm_cfg), + dict(type="PadMultiViewImage", size_divisor=32), + dict(type="DefaultFormatBundle3D", class_names=class_names), + dict( + type="CustomCollect3D", + keys=[ + "gt_bboxes_3d", + "gt_labels_3d", + "gt_inds", + "img", + "timestamp", + "l2g_r_mat", + "l2g_t", + "gt_fut_traj", + "gt_fut_traj_mask", + "gt_past_traj", + "gt_past_traj_mask", + "gt_sdc_bbox", + "gt_sdc_label", + "gt_sdc_fut_traj", + "gt_sdc_fut_traj_mask", + "gt_lane_labels", + "gt_lane_bboxes", + "gt_lane_masks", + # Occ gt + "gt_segmentation", + "gt_instance", + "gt_centerness", + "gt_offset", + "gt_flow", + "gt_backward_flow", + "gt_occ_has_invalid_frame", + "gt_occ_img_is_valid", + # gt future bbox for plan + "gt_future_boxes", + "gt_future_labels", + # planning + "sdc_planning", + "sdc_planning_mask", + "command", + ], + ), +] +test_pipeline = [ + dict(type='LoadMultiViewImageFromFilesInCeph', to_float32=True, + file_client_args=file_client_args, img_root=data_root), + dict(type="NormalizeMultiviewImage", **img_norm_cfg), + dict(type="PadMultiViewImage", size_divisor=32), + dict(type='LoadAnnotations3D_E2E', + with_bbox_3d=False, + with_label_3d=False, + with_attr_label=False, + + with_future_anns=True, + with_ins_inds_3d=False, + ins_inds_add_1=True, # ins_inds start from 1 + ), + dict(type='GenerateOccFlowLabels', grid_conf=occflow_grid_conf, ignore_index=255, only_vehicle=True, + filter_invisible=False), + dict( + type="MultiScaleFlipAug3D", + img_scale=(1600, 900), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type="DefaultFormatBundle3D", class_names=class_names, with_label=False + ), + dict( + type="CustomCollect3D", keys=[ + "img", + "timestamp", + "l2g_r_mat", + "l2g_t", + "gt_lane_labels", + "gt_lane_bboxes", + "gt_lane_masks", + "gt_segmentation", + "gt_instance", + "gt_centerness", + "gt_offset", + "gt_flow", + "gt_backward_flow", + "gt_occ_has_invalid_frame", + "gt_occ_img_is_valid", + # planning + "sdc_planning", + "sdc_planning_mask", + "command", + ] + ), + ], + ), +] +data = dict( + samples_per_gpu=1, + workers_per_gpu=8, + train=dict( + type=dataset_type, + file_client_args=file_client_args, + data_root=data_root, + ann_file=ann_file_train, + pipeline=train_pipeline, + classes=class_names, + modality=input_modality, + test_mode=False, + use_valid_flag=True, + patch_size=patch_size, + canvas_size=canvas_size, + bev_size=(bev_h_, bev_w_), + queue_length=queue_length, + predict_steps=predict_steps, + past_steps=past_steps, + fut_steps=fut_steps, + use_nonlinear_optimizer=use_nonlinear_optimizer, + + occ_receptive_field=3, + occ_n_future=occ_n_future_max, + occ_filter_invalid_sample=False, + + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d="LiDAR", + ), + val=dict( + type=dataset_type, + file_client_args=file_client_args, + data_root=data_root, + ann_file=ann_file_val, + pipeline=test_pipeline, + patch_size=patch_size, + canvas_size=canvas_size, + bev_size=(bev_h_, bev_w_), + predict_steps=predict_steps, + past_steps=past_steps, + fut_steps=fut_steps, + use_nonlinear_optimizer=use_nonlinear_optimizer, + classes=class_names, + modality=input_modality, + samples_per_gpu=1, + eval_mod=['det', 'map', 'track','motion'], + + + occ_receptive_field=3, + occ_n_future=occ_n_future_max, + occ_filter_invalid_sample=False, + ), + test=dict( + type=dataset_type, + file_client_args=file_client_args, + data_root=data_root, + test_mode=True, + ann_file=ann_file_test, + pipeline=test_pipeline, + patch_size=patch_size, + canvas_size=canvas_size, + bev_size=(bev_h_, bev_w_), + predict_steps=predict_steps, + past_steps=past_steps, + fut_steps=fut_steps, + occ_n_future=occ_n_future_max, + use_nonlinear_optimizer=use_nonlinear_optimizer, + classes=class_names, + modality=input_modality, + eval_mod=['det', 'map', 'track','motion'], + ), + shuffler_sampler=dict(type="DistributedGroupSampler"), + nonshuffler_sampler=dict(type="DistributedSampler"), +) +optimizer = dict( + type="AdamW", + lr=2e-4, + paramwise_cfg=dict( + custom_keys={ + "img_backbone": dict(lr_mult=0.1), + } + ), + weight_decay=0.01, +) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy="CosineAnnealing", + warmup="linear", + warmup_iters=500, + warmup_ratio=1.0 / 3, + min_lr_ratio=1e-3, +) +total_epochs = 2 +evaluation = dict(interval=1, pipeline=test_pipeline) +runner = dict(type="EpochBasedRunner", max_epochs=total_epochs) +log_config = dict( + interval=10, hooks=[dict(type="TextLoggerHook"), dict(type="TensorboardLoggerHook")] +) +checkpoint_config = dict(interval=1) +load_from = "ckpts/uniad_base_track_map.pth" + +find_unused_parameters = True \ No newline at end of file diff --git a/adzoo/uniad/configs/stage2_e2e/base_e2e_b2d.py b/adzoo/uniad/configs/stage2_e2e/base_e2e_b2d.py new file mode 100644 index 0000000..a0e156c --- /dev/null +++ b/adzoo/uniad/configs/stage2_e2e/base_e2e_b2d.py @@ -0,0 +1,819 @@ +_base_ = ["../_base_/datasets/nus-3d.py", + "../_base_/default_runtime.py"] + +# Update-2023-06-12: +# [Enhance] Update some freezing args of UniAD +# plugin_dir = "projects/mmdet3d_plugin/" +# If point cloud range is changed, the models should also change their point +# cloud range accordingly +point_cloud_range = [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0] +voxel_size = [0.2, 0.2, 8] +patch_size = [102.4, 102.4] +img_norm_cfg = dict(mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +NameMapping = { + #=================vehicle================= + # bicycle + 'vehicle.bh.crossbike': 'bicycle', + "vehicle.diamondback.century": 'bicycle', + "vehicle.gazelle.omafiets": 'bicycle', + # car + "vehicle.chevrolet.impala": 'car', + "vehicle.dodge.charger_2020": 'car', + "vehicle.dodge.charger_police": 'car', + "vehicle.dodge.charger_police_2020": 'car', + "vehicle.lincoln.mkz_2017": 'car', + "vehicle.lincoln.mkz_2020": 'car', + "vehicle.mini.cooper_s_2021": 'car', + "vehicle.mercedes.coupe_2020": 'car', + "vehicle.ford.mustang": 'car', + "vehicle.nissan.patrol_2021": 'car', + "vehicle.audi.tt": 'car', + "vehicle.audi.etron": 'car', + "vehicle.ford.crown": 'car', + "vehicle.ford.mustang": 'car', + "vehicle.tesla.model3": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/FordCrown/SM_FordCrown_parked.SM_FordCrown_parked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/Charger/SM_ChargerParked.SM_ChargerParked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/Lincoln/SM_LincolnParked.SM_LincolnParked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/MercedesCCC/SM_MercedesCCC_Parked.SM_MercedesCCC_Parked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/Mini2021/SM_Mini2021_parked.SM_Mini2021_parked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/NissanPatrol2021/SM_NissanPatrol2021_parked.SM_NissanPatrol2021_parked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/TeslaM3/SM_TeslaM3_parked.SM_TeslaM3_parked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/VolkswagenT2/SM_VolkswagenT2_2021_Parked.SM_VolkswagenT2_2021_Parked": 'car', + # bus + # van + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/VolkswagenT2/SM_VolkswagenT2_2021_Parked.SM_VolkswagenT2_2021_Parked": "van", + "vehicle.ford.ambulance": "van", + # truck + "vehicle.carlamotors.firetruck": 'truck', + #========================================= + + #=================traffic sign============ + # traffic.speed_limit + "traffic.speed_limit.30": 'traffic_sign', + "traffic.speed_limit.40": 'traffic_sign', + "traffic.speed_limit.50": 'traffic_sign', + "traffic.speed_limit.60": 'traffic_sign', + "traffic.speed_limit.90": 'traffic_sign', + "traffic.speed_limit.120": 'traffic_sign', + + "traffic.stop": 'traffic_sign', + "traffic.yield": 'traffic_sign', + "traffic.traffic_light": 'traffic_light', + #========================================= + + #===================Construction=========== + "static.prop.warningconstruction" : 'traffic_cone', + "static.prop.warningaccident": 'traffic_cone', + "static.prop.trafficwarning": "traffic_cone", + + #===================Construction=========== + "static.prop.constructioncone": 'traffic_cone', + + #=================pedestrian============== + "walker.pedestrian.0001": 'pedestrian', + "walker.pedestrian.0004": 'pedestrian', + "walker.pedestrian.0005": 'pedestrian', + "walker.pedestrian.0007": 'pedestrian', + "walker.pedestrian.0013": 'pedestrian', + "walker.pedestrian.0014": 'pedestrian', + "walker.pedestrian.0017": 'pedestrian', + "walker.pedestrian.0018": 'pedestrian', + "walker.pedestrian.0019": 'pedestrian', + "walker.pedestrian.0020": 'pedestrian', + "walker.pedestrian.0022": 'pedestrian', + "walker.pedestrian.0025": 'pedestrian', + "walker.pedestrian.0035": 'pedestrian', + "walker.pedestrian.0041": 'pedestrian', + "walker.pedestrian.0046": 'pedestrian', + "walker.pedestrian.0047": 'pedestrian', + + # ========================================== + "static.prop.dirtdebris01": 'others', + "static.prop.dirtdebris02": 'others', +} + +eval_cfg = { + "dist_ths": [0.5, 1.0, 2.0, 4.0], + "dist_th_tp": 2.0, + "min_recall": 0.1, + "min_precision": 0.1, + "mean_ap_weight": 5, + "class_names":['car','van','truck','bicycle','traffic_sign','traffic_cone','traffic_light','pedestrian'], + "tp_metrics":['trans_err', 'scale_err', 'orient_err', 'vel_err'], + "err_name_maping":{'trans_err': 'mATE','scale_err': 'mASE','orient_err': 'mAOE','vel_err': 'mAVE','attr_err': 'mAAE'}, + "class_range":{'car':(50,50),'van':(50,50),'truck':(50,50),'bicycle':(40,40),'traffic_sign':(30,30),'traffic_cone':(30,30),'traffic_light':(30,30),'pedestrian':(40,40)} + } + +class_names = [ +'car','van','truck','bicycle','traffic_sign','traffic_cone','traffic_light','pedestrian','others' +] + +vehicle_id_list = [0,1,2] +group_id_list = [[0, 1, 2], [3], [7]] + +input_modality = dict( + use_lidar=False, use_camera=True, use_radar=False, use_map=False, use_external=True +) +_dim_ = 256 +_pos_dim_ = _dim_ // 2 +_ffn_dim_ = _dim_ * 2 +_num_levels_ = 4 +bev_h_ = 200 +bev_w_ = 200 +_feed_dim_ = _ffn_dim_ +_dim_half_ = _pos_dim_ +canvas_size = (bev_h_, bev_w_) +queue_length = 3 # each sequence contains `queue_length` frames. + +### traj prediction args ### +predict_steps = 12 +predict_modes = 6 +fut_steps = 4 +past_steps = 4 +use_nonlinear_optimizer = True + +## occflow setting +occ_n_future = 4 +occ_n_future_plan = 6 +occ_n_future_max = max([occ_n_future, occ_n_future_plan]) + +### planning ### +planning_steps = 6 +use_col_optim = True + +### Occ args ### +occflow_grid_conf = { + 'xbound': [-50.0, 50.0, 0.5], + 'ybound': [-50.0, 50.0, 0.5], + 'zbound': [-10.0, 10.0, 20.0], +} + +# Other settings +train_gt_iou_threshold=0.3 + +model = dict( + type="UniAD", + gt_iou_threshold=train_gt_iou_threshold, + queue_length=queue_length, + use_grid_mask=True, + video_test_mode=True, + num_query=900, + num_classes=len(class_names), + vehicle_id_list=vehicle_id_list, + pc_range=point_cloud_range, + img_backbone=dict( + type="ResNet", + depth=101, + num_stages=4, + out_indices=(1, 2, 3), + frozen_stages=4, + norm_cfg=dict(type="BN2d", requires_grad=False), + norm_eval=True, + style="caffe", + dcn=dict( + type="DCNv2", deform_groups=1, fallback_on_stride=False + ), # original DCNv2 will print log when perform load_state_dict + stage_with_dcn=(False, False, True, True), + ), + img_neck=dict( + type="FPN", + in_channels=[512, 1024, 2048], + out_channels=_dim_, + start_level=0, + add_extra_convs="on_output", + num_outs=4, + relu_before_extra_convs=True, + ), + freeze_img_backbone=True, + freeze_img_neck=True, + freeze_bn=True, + freeze_bev_encoder=True, + score_thresh=0.4, + filter_score_thresh=0.35, + qim_args=dict( + qim_type="QIMBase", + merger_dropout=0, + update_query_pos=True, + fp_ratio=0.3, + random_drop=0.1, + ), # hyper-param for query dropping mentioned in MOTR + mem_args=dict( + memory_bank_type="MemoryBank", + memory_bank_score_thresh=0.0, + memory_bank_len=4, + ), + loss_cfg=dict( + type="ClipMatcher", + num_classes=len(class_names), + weight_dict=None, + code_weights=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2], + assigner=dict( + type="HungarianAssigner3DTrack", + cls_cost=dict(type="FocalLossCost", weight=2.0), + reg_cost=dict(type="BBox3DL1Cost", weight=0.25), + pc_range=point_cloud_range, + ), + loss_cls=dict( + type="FocalLoss", use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=2.0 + ), + loss_bbox=dict(type="L1Loss", loss_weight=0.25), + ), # loss cfg for tracking + pts_bbox_head=dict( + type="BEVFormerTrackHead", + bev_h=bev_h_, + bev_w=bev_w_, + num_query=900, + num_classes=len(class_names), + in_channels=_dim_, + sync_cls_avg_factor=True, + with_box_refine=True, + as_two_stage=False, + past_steps=past_steps, + fut_steps=fut_steps, + transformer=dict( + type="UniADPerceptionTransformer", + rotate_prev_bev=True, + use_shift=True, + use_can_bus=True, + embed_dims=_dim_, + encoder=dict( + type="BEVFormerEncoder", + num_layers=6, + pc_range=point_cloud_range, + num_points_in_pillar=4, + return_intermediate=False, + transformerlayers=dict( + type="BEVFormerLayer", + attn_cfgs=[ + dict( + type="TemporalSelfAttention", embed_dims=_dim_, num_levels=1 + ), + dict( + type="SpatialCrossAttention", + pc_range=point_cloud_range, + deformable_attention=dict( + type="MSDeformableAttention3D", + embed_dims=_dim_, + num_points=8, + num_levels=_num_levels_, + ), + embed_dims=_dim_, + ), + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.0, + operation_order=( + "self_attn", + "norm", + "cross_attn", + "norm", + "ffn", + "norm", + ), + ), + ), + decoder=dict( + type="DetectionTransformerDecoder", + num_layers=6, + return_intermediate=True, + transformerlayers=dict( + type="DetrTransformerDecoderLayer", + attn_cfgs=[ + dict( + type="MultiheadAttention", + embed_dims=_dim_, + num_heads=8, + dropout=0.0, + ), + dict( + type="CustomMSDeformableAttention", + embed_dims=_dim_, + num_levels=1, + ), + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.0, + operation_order=( + "self_attn", + "norm", + "cross_attn", + "norm", + "ffn", + "norm", + ), + ), + ), + ), + bbox_coder=dict( + type="NMSFreeCoder", + post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], + pc_range=point_cloud_range, + max_num=300, + voxel_size=voxel_size, + num_classes=len(class_names), + ), + positional_encoding=dict( + type="LearnedPositionalEncoding", + num_feats=_pos_dim_, + row_num_embed=bev_h_, + col_num_embed=bev_w_, + ), + loss_cls=dict( + type="FocalLoss", use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=2.0 + ), + loss_bbox=dict(type="L1Loss", loss_weight=0.25), + loss_iou=dict(type="GIoULoss", loss_weight=0.0), + ), + seg_head=dict( + type='PansegformerHead', + bev_h=bev_h_, + bev_w=bev_w_, + canvas_size=canvas_size, + pc_range=point_cloud_range, + num_query=300, + num_classes=6, + num_things_classes=6, + num_stuff_classes=0, + in_channels=2048, + sync_cls_avg_factor=True, + as_two_stage=False, + with_box_refine=True, + transformer=dict( + type='SegDeformableTransformer', + encoder=dict( + type='DetrTransformerEncoder', + num_layers=6, + transformerlayers=dict( + type='BaseTransformerLayer', + attn_cfgs=dict( + type='MultiScaleDeformableAttention', + embed_dims=_dim_, + num_levels=_num_levels_, + ), + feedforward_channels=_feed_dim_, + ffn_dropout=0.0, + operation_order=('self_attn', 'norm', 'ffn', 'norm'))), + decoder=dict( + type='DeformableDetrTransformerDecoder', + num_layers=6, + return_intermediate=True, + transformerlayers=dict( + type='DetrTransformerDecoderLayer', + attn_cfgs=[ + dict( + type='MultiheadAttention', + embed_dims=_dim_, + num_heads=8, + dropout=0.0), + dict( + type='MultiScaleDeformableAttention', + embed_dims=_dim_, + num_levels=_num_levels_, + ) + ], + feedforward_channels=_feed_dim_, + ffn_dropout=0.0, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm') + ), + ), + ), + positional_encoding=dict( + type='SinePositionalEncoding', + num_feats=_dim_half_, + normalize=True, + offset=-0.5), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=2.0), + loss_bbox=dict(type='L1Loss', loss_weight=5.0), + loss_iou=dict(type='GIoULoss', loss_weight=2.0), + loss_mask=dict(type='DiceLoss', loss_weight=2.0), + thing_transformer_head=dict(type='SegMaskHead',d_model=_dim_,nhead=8,num_decoder_layers=4), + stuff_transformer_head=dict(type='SegMaskHead',d_model=_dim_,nhead=8,num_decoder_layers=6,self_attn=True), + train_cfg=dict( + assigner=dict( + type='HungarianAssigner', + cls_cost=dict(type='FocalLossCost', weight=2.0), + reg_cost=dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'), + iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0), + ), + assigner_with_mask=dict( + type='HungarianAssigner_multi_info', + cls_cost=dict(type='FocalLossCost', weight=2.0), + reg_cost=dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'), + iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0), + mask_cost=dict(type='DiceCost', weight=2.0), + ), + sampler =dict(type='PseudoSampler'), + sampler_with_mask =dict(type='PseudoSampler_segformer'), + ), + ), + occ_head=dict( + type='OccHead', + + grid_conf=occflow_grid_conf, + ignore_index=255, + + bev_proj_dim=256, + bev_proj_nlayers=4, + + # Transformer + attn_mask_thresh=0.3, + transformer_decoder=dict( + type='DetrTransformerDecoder', + return_intermediate=True, + num_layers=5, + transformerlayers=dict( + type='DetrTransformerDecoderLayer', + attn_cfgs=dict( + type='MultiheadAttention', + embed_dims=256, + num_heads=8, + attn_drop=0.0, + proj_drop=0.0, + dropout_layer=None, + batch_first=False), + ffn_cfgs=dict( + embed_dims=256, + feedforward_channels=2048, # change to 512 + num_fcs=2, + act_cfg=dict(type='ReLU', inplace=True), + ffn_drop=0.0, + dropout_layer=None, + add_identity=True), + feedforward_channels=2048, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm')), + init_cfg=None), + # Query + query_dim=256, + query_mlp_layers=3, + + aux_loss_weight=1., + loss_mask=dict( + type='FieryBinarySegmentationLoss', + use_top_k=True, + top_k_ratio=0.25, + future_discount=0.95, + loss_weight=5.0, + ignore_index=255, + ), + loss_dice=dict( + type='DiceLossWithMasks', + use_sigmoid=True, + activate=True, + reduction='mean', + naive_dice=True, + eps=1.0, + ignore_index=255, + loss_weight=1.0), + + + pan_eval=True, + test_seg_thresh=0.1, + test_with_track_score=True, + ), + motion_head=dict( + type='MotionHead', + bev_h=bev_h_, + bev_w=bev_w_, + num_query=300, + num_classes=len(class_names), + vehicle_id_list=vehicle_id_list, + predict_steps=predict_steps, + predict_modes=predict_modes, + embed_dims=_dim_, + loss_traj=dict(type='TrajLoss', + use_variance=True, + cls_loss_weight=0.5, + nll_loss_weight=0.5, + loss_weight_minade=0., + loss_weight_minfde=0.25), + num_cls_fcs=3, + pc_range=point_cloud_range, + group_id_list=group_id_list, + num_anchor=6, + use_nonlinear_optimizer=use_nonlinear_optimizer, + anchor_info_path='data/others/b2d_motion_anchor_infos_mode6.pkl', + transformerlayers=dict( + type='MotionTransformerDecoder', + pc_range=point_cloud_range, + embed_dims=_dim_, + num_layers=3, + transformerlayers=dict( + type='MotionTransformerAttentionLayer', + batch_first=True, + attn_cfgs=[ + dict( + type='MotionDeformableAttention', + num_steps=predict_steps, + embed_dims=_dim_, + num_levels=1, + num_heads=8, + num_points=4, + sample_index=-1), + ], + + feedforward_channels=_ffn_dim_, + ffn_dropout=0.0, + operation_order=('cross_attn', 'norm', 'ffn', 'norm')), + ), + ), + planning_head=dict( + type='PlanningHeadSingleMode', + embed_dims=256, + command_dim=6, + planning_steps=planning_steps, + loss_planning=dict(type='PlanningLoss'), + loss_collision=[dict(type='CollisionLoss', delta=0.0, weight=2.5), + dict(type='CollisionLoss', delta=0.5, weight=1.0), + dict(type='CollisionLoss', delta=1.0, weight=0.25)], + use_col_optim=use_col_optim, + planning_eval=True, + with_adapter=True, + ), + # model training and testing settings + train_cfg=dict( + pts=dict( + grid_size=[512, 512, 1], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_size_factor=4, + assigner=dict( + type="HungarianAssigner3D", + cls_cost=dict(type="FocalLossCost", weight=2.0), + reg_cost=dict(type="BBox3DL1Cost", weight=0.25), + iou_cost=dict( + type="IoUCost", weight=0.0 + ), # Fake cost. This is just to make it compatible with DETR head. + pc_range=point_cloud_range, + ), + ) + ), +) +dataset_type = "B2D_E2E_Dataset" +data_root = "data/bench2drive" +info_root = "data/infos" +map_root = "data/bench2drive/maps" +map_file = "data/infos/b2d_map_infos.pkl" +file_client_args = dict(backend="disk") +ann_file_train=info_root + f"/b2d_infos_train.pkl" +ann_file_val=info_root + f"/b2d_infos_val.pkl" +ann_file_test=info_root + f"/b2d_infos_val.pkl" + + +train_pipeline = [ + dict(type="LoadMultiViewImageFromFilesInCeph", to_float32=True, file_client_args=file_client_args, img_root=data_root), + dict(type="PhotoMetricDistortionMultiViewImage"), + dict( + type="LoadAnnotations3D_E2E", + with_bbox_3d=True, + with_label_3d=True, + with_attr_label=False, + with_vis_token=False, + with_future_anns=True, # occ_flow gt + with_ins_inds_3d=True, # ins_inds + ins_inds_add_1=True, # ins_inds start from 1 + ), + + dict(type='GenerateOccFlowLabels', + grid_conf=occflow_grid_conf, + ignore_index=255, + only_vehicle=True, + filter_invisible=False, + all_classes = class_names, + vehicle_classes = ['car','van','truck','bicycle'], + plan_classes = ['car','van','truck','bicycle','pedestrian'], + ), + + dict(type="ObjectRangeFilterTrack", point_cloud_range=point_cloud_range), + dict(type="ObjectNameFilterTrack", classes=class_names), + dict(type="NormalizeMultiviewImage", **img_norm_cfg), + dict(type="PadMultiViewImage", size_divisor=32), + dict(type="DefaultFormatBundle3D", class_names=class_names), + dict( + type="CustomCollect3D", + keys=[ + "gt_bboxes_3d", + "gt_labels_3d", + "gt_inds", + "img", + "timestamp", + "l2g_r_mat", + "l2g_t", + "gt_fut_traj", + "gt_fut_traj_mask", + "gt_past_traj", + "gt_past_traj_mask", + "gt_sdc_bbox", + "gt_sdc_label", + "gt_sdc_fut_traj", + "gt_sdc_fut_traj_mask", + "gt_lane_labels", + "gt_lane_bboxes", + "gt_lane_masks", + # Occ gt + "gt_segmentation", + "gt_instance", + "gt_centerness", + "gt_offset", + "gt_flow", + "gt_backward_flow", + "gt_occ_has_invalid_frame", + "gt_occ_img_is_valid", + # gt future bbox for plan + "gt_future_boxes", + "gt_future_labels", + # planning + "sdc_planning", + "sdc_planning_mask", + "command", + ], + ), +] +test_pipeline = [ + dict(type='LoadMultiViewImageFromFilesInCeph', to_float32=True, + file_client_args=file_client_args, img_root=data_root), + dict(type="NormalizeMultiviewImage", **img_norm_cfg), + dict(type="PadMultiViewImage", size_divisor=32), + dict(type='LoadAnnotations3D_E2E', + with_bbox_3d=False, + with_label_3d=False, + with_attr_label=False, + with_vis_token=False, + with_future_anns=True, + with_ins_inds_3d=False, + ins_inds_add_1=True, # ins_inds start from 1 + ), + dict(type='GenerateOccFlowLabels', + grid_conf=occflow_grid_conf, + ignore_index=255, + only_vehicle=True, + filter_invisible=False, + all_classes = class_names, + vehicle_classes = ['car','van','truck','bicycle'], + plan_classes = ['car','van','truck','bicycle','pedestrian'], + ), + dict( + type="MultiScaleFlipAug3D", + img_scale=(1600, 900), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type="DefaultFormatBundle3D", class_names=class_names, with_label=False + ), + dict( + type="CustomCollect3D", keys=[ + "img", + "timestamp", + "l2g_r_mat", + "l2g_t", + "gt_lane_labels", + "gt_lane_bboxes", + "gt_lane_masks", + "gt_segmentation", + "gt_instance", + "gt_centerness", + "gt_offset", + "gt_flow", + "gt_backward_flow", + "gt_occ_has_invalid_frame", + "gt_occ_img_is_valid", + # planning + "sdc_planning", + "sdc_planning_mask", + "command", + ] + ), + ], + ), +] + +inference_only_pipeline = [ + dict(type='LoadMultiViewImageFromFilesInCeph', to_float32=True, + file_client_args=file_client_args, img_root=data_root), + dict(type="NormalizeMultiviewImage", **img_norm_cfg), + dict(type="PadMultiViewImage", size_divisor=32), + dict( + type="MultiScaleFlipAug3D", + img_scale=(1600, 900), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type="DefaultFormatBundle3D", class_names=class_names, with_label=False + ), + dict( + type="CustomCollect3D", keys=[ + "img", + "timestamp", + "l2g_r_mat", + "l2g_t", + "command", + ] + ), + ], + ), +] + +data = dict( + samples_per_gpu=1, + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_root=data_root, + ann_file=ann_file_train, + pipeline=train_pipeline, + classes=class_names, + name_mapping=NameMapping, + map_root=map_root, + map_file=map_file, + modality=input_modality, + patch_size=patch_size, + bev_size=(bev_h_, bev_w_), + queue_length=queue_length, + predict_frames=predict_steps, + past_frames=past_steps, + future_frames=fut_steps, + point_cloud_range=point_cloud_range, + box_type_3d="LiDAR", + ), + val=dict( + type=dataset_type, + data_root=data_root, + ann_file=ann_file_val, + pipeline=test_pipeline, + name_mapping=NameMapping, + map_root=map_root, + map_file=map_file, + bev_size=(bev_h_, bev_w_), + predict_frames=predict_steps, + past_frames=past_steps, + future_frames=fut_steps, + classes=class_names, + modality=input_modality, + samples_per_gpu=1, + point_cloud_range=point_cloud_range, + eval_cfg=eval_cfg, + #eval_mod=['det', 'track', 'map'], + box_type_3d="LiDAR", + ), + test=dict( + type=dataset_type, + data_root=data_root, + ann_file=ann_file_val, + pipeline=test_pipeline, + name_mapping=NameMapping, + map_root=map_root, + map_file=map_file, + bev_size=(bev_h_, bev_w_), + predict_frames=predict_steps, + past_frames=past_steps, + future_frames=fut_steps, + classes=class_names, + modality=input_modality, + samples_per_gpu=1, + point_cloud_range=point_cloud_range, + eval_cfg=eval_cfg, + #eval_mod=['det', 'track', 'map'], + box_type_3d="LiDAR", + ), + shuffler_sampler=dict(type="DistributedGroupSampler"), + nonshuffler_sampler=dict(type="DistributedSampler"), +) +optimizer = dict( + type="AdamW", + lr=2e-4, + paramwise_cfg=dict( + custom_keys={ + "img_backbone": dict(lr_mult=0.1), + } + ), + weight_decay=0.01, +) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + by_epoch=False, + policy="CosineAnnealing", + warmup="linear", + warmup_iters=500, + warmup_ratio=1.0 / 3, + min_lr_ratio=1e-3, +) +total_epochs = 2 +evaluation = dict(interval=2, pipeline=test_pipeline) +runner = dict(type="EpochBasedRunner", max_epochs=total_epochs) +log_config = dict( + interval=1, hooks=[dict(type="TextLoggerHook"), dict(type="TensorboardLoggerHook")] +) +checkpoint_config = dict(interval=3000, by_epoch=False) + + +find_unused_parameters = True \ No newline at end of file diff --git a/adzoo/uniad/configs/stage2_e2e/tiny_e2e_b2d.py b/adzoo/uniad/configs/stage2_e2e/tiny_e2e_b2d.py new file mode 100644 index 0000000..b6c7a2c --- /dev/null +++ b/adzoo/uniad/configs/stage2_e2e/tiny_e2e_b2d.py @@ -0,0 +1,813 @@ +_base_ = ["../_base_/datasets/nus-3d.py", + "../_base_/default_runtime.py"] + +# Update-2023-06-12: +# [Enhance] Update some freezing args of UniAD +# plugin_dir = "projects/mmdet3d_plugin/" +# If point cloud range is changed, the models should also change their point +# cloud range accordingly +point_cloud_range = [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0] +voxel_size = [0.2, 0.2, 8] +patch_size = [102.4, 102.4] +img_norm_cfg = dict(mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +NameMapping = { + #=================vehicle================= + # bicycle + 'vehicle.bh.crossbike': 'bicycle', + "vehicle.diamondback.century": 'bicycle', + "vehicle.gazelle.omafiets": 'bicycle', + # car + "vehicle.chevrolet.impala": 'car', + "vehicle.dodge.charger_2020": 'car', + "vehicle.dodge.charger_police": 'car', + "vehicle.dodge.charger_police_2020": 'car', + "vehicle.lincoln.mkz_2017": 'car', + "vehicle.lincoln.mkz_2020": 'car', + "vehicle.mini.cooper_s_2021": 'car', + "vehicle.mercedes.coupe_2020": 'car', + "vehicle.ford.mustang": 'car', + "vehicle.nissan.patrol_2021": 'car', + "vehicle.audi.tt": 'car', + "vehicle.audi.etron": 'car', + "vehicle.ford.crown": 'car', + "vehicle.ford.mustang": 'car', + "vehicle.tesla.model3": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/FordCrown/SM_FordCrown_parked.SM_FordCrown_parked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/Charger/SM_ChargerParked.SM_ChargerParked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/Lincoln/SM_LincolnParked.SM_LincolnParked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/MercedesCCC/SM_MercedesCCC_Parked.SM_MercedesCCC_Parked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/Mini2021/SM_Mini2021_parked.SM_Mini2021_parked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/NissanPatrol2021/SM_NissanPatrol2021_parked.SM_NissanPatrol2021_parked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/TeslaM3/SM_TeslaM3_parked.SM_TeslaM3_parked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/VolkswagenT2/SM_VolkswagenT2_2021_Parked.SM_VolkswagenT2_2021_Parked": 'car', + # bus + # van + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/VolkswagenT2/SM_VolkswagenT2_2021_Parked.SM_VolkswagenT2_2021_Parked": "van", + "vehicle.ford.ambulance": "van", + # truck + "vehicle.carlamotors.firetruck": 'truck', + #========================================= + + #=================traffic sign============ + # traffic.speed_limit + "traffic.speed_limit.30": 'traffic_sign', + "traffic.speed_limit.40": 'traffic_sign', + "traffic.speed_limit.50": 'traffic_sign', + "traffic.speed_limit.60": 'traffic_sign', + "traffic.speed_limit.90": 'traffic_sign', + "traffic.speed_limit.120": 'traffic_sign', + + "traffic.stop": 'traffic_sign', + "traffic.yield": 'traffic_sign', + "traffic.traffic_light": 'traffic_light', + #========================================= + + #===================Construction=========== + "static.prop.warningconstruction" : 'traffic_cone', + "static.prop.warningaccident": 'traffic_cone', + "static.prop.trafficwarning": "traffic_cone", + + #===================Construction=========== + "static.prop.constructioncone": 'traffic_cone', + + #=================pedestrian============== + "walker.pedestrian.0001": 'pedestrian', + "walker.pedestrian.0004": 'pedestrian', + "walker.pedestrian.0005": 'pedestrian', + "walker.pedestrian.0007": 'pedestrian', + "walker.pedestrian.0013": 'pedestrian', + "walker.pedestrian.0014": 'pedestrian', + "walker.pedestrian.0017": 'pedestrian', + "walker.pedestrian.0018": 'pedestrian', + "walker.pedestrian.0019": 'pedestrian', + "walker.pedestrian.0020": 'pedestrian', + "walker.pedestrian.0022": 'pedestrian', + "walker.pedestrian.0025": 'pedestrian', + "walker.pedestrian.0035": 'pedestrian', + "walker.pedestrian.0041": 'pedestrian', + "walker.pedestrian.0046": 'pedestrian', + "walker.pedestrian.0047": 'pedestrian', + + # ========================================== + "static.prop.dirtdebris01": 'others', + "static.prop.dirtdebris02": 'others', +} + +eval_cfg = { + "dist_ths": [0.5, 1.0, 2.0, 4.0], + "dist_th_tp": 2.0, + "min_recall": 0.1, + "min_precision": 0.1, + "mean_ap_weight": 5, + "class_names":['car','van','truck','bicycle','traffic_sign','traffic_cone','traffic_light','pedestrian'], + "tp_metrics":['trans_err', 'scale_err', 'orient_err', 'vel_err'], + "err_name_maping":{'trans_err': 'mATE','scale_err': 'mASE','orient_err': 'mAOE','vel_err': 'mAVE','attr_err': 'mAAE'}, + "class_range":{'car':(50,50),'van':(50,50),'truck':(50,50),'bicycle':(40,40),'traffic_sign':(30,30),'traffic_cone':(30,30),'traffic_light':(30,30),'pedestrian':(40,40)} + } + +class_names = [ +'car','van','truck','bicycle','traffic_sign','traffic_cone','traffic_light','pedestrian','others' +] + +vehicle_id_list = [0,1,2] +group_id_list = [[0, 1, 2], [3], [7]] + +input_modality = dict( + use_lidar=False, use_camera=True, use_radar=False, use_map=False, use_external=True +) +_dim_ = 256 +_pos_dim_ = _dim_ // 2 +_ffn_dim_ = _dim_ * 2 +_num_levels_ = 4 +bev_h_ = 100 +bev_w_ = 100 +_feed_dim_ = _ffn_dim_ +_dim_half_ = _pos_dim_ +canvas_size = (bev_h_*2, bev_w_*2) +queue_length = 3 # each sequence contains `queue_length` frames. + +### traj prediction args ### +predict_steps = 12 +predict_modes = 6 +fut_steps = 4 +past_steps = 4 +use_nonlinear_optimizer = True + +## occflow setting +occ_n_future = 4 +occ_n_future_plan = 6 +occ_n_future_max = max([occ_n_future, occ_n_future_plan]) + +### planning ### +planning_steps = 6 +use_col_optim = True + +### Occ args ### +occflow_grid_conf = { + 'xbound': [-50.0, 50.0, 0.5], + 'ybound': [-50.0, 50.0, 0.5], + 'zbound': [-10.0, 10.0, 20.0], +} + +# Other settings +train_gt_iou_threshold=0.3 + +model = dict( + type="UniAD", + gt_iou_threshold=train_gt_iou_threshold, + queue_length=queue_length, + use_grid_mask=True, + video_test_mode=True, + prev_frame_num=10, + num_query=900, + num_classes=len(class_names), + vehicle_id_list=vehicle_id_list, + pc_range=point_cloud_range, + img_backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(1,2,3), + frozen_stages=4, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + style='pytorch'), + img_neck=dict( + type="FPN", + in_channels=[512, 1024, 2048], + out_channels=_dim_, + start_level=0, + add_extra_convs="on_output", + num_outs=4, + relu_before_extra_convs=True, + ), + freeze_img_backbone=True, + freeze_img_neck=True, + freeze_bn=True, + freeze_bev_encoder=True, + score_thresh=0.4, + filter_score_thresh=0.35, + qim_args=dict( + qim_type="QIMBase", + merger_dropout=0, + update_query_pos=True, + fp_ratio=0.3, + random_drop=0.1, + ), # hyper-param for query dropping mentioned in MOTR + mem_args=dict( + memory_bank_type="MemoryBank", + memory_bank_score_thresh=0.0, + memory_bank_len=4, + ), + loss_cfg=dict( + type="ClipMatcher", + num_classes=len(class_names), + weight_dict=None, + code_weights=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2], + assigner=dict( + type="HungarianAssigner3DTrack", + cls_cost=dict(type="FocalLossCost", weight=2.0), + reg_cost=dict(type="BBox3DL1Cost", weight=0.25), + pc_range=point_cloud_range, + ), + loss_cls=dict( + type="FocalLoss", use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=2.0 + ), + loss_bbox=dict(type="L1Loss", loss_weight=0.25), + ), # loss cfg for tracking + pts_bbox_head=dict( + type="BEVFormerTrackHead", + bev_h=bev_h_, + bev_w=bev_w_, + num_query=900, + num_classes=len(class_names), + in_channels=_dim_, + sync_cls_avg_factor=True, + with_box_refine=True, + as_two_stage=False, + past_steps=past_steps, + fut_steps=fut_steps, + transformer=dict( + type="UniADPerceptionTransformer", + rotate_prev_bev=True, + use_shift=True, + use_can_bus=True, + embed_dims=_dim_, + encoder=dict( + type="BEVFormerEncoder", + num_layers=3, + pc_range=point_cloud_range, + num_points_in_pillar=4, + return_intermediate=False, + transformerlayers=dict( + type="BEVFormerLayer", + attn_cfgs=[ + dict( + type="TemporalSelfAttention", embed_dims=_dim_, num_levels=1 + ), + dict( + type="SpatialCrossAttention", + pc_range=point_cloud_range, + deformable_attention=dict( + type="MSDeformableAttention3D", + embed_dims=_dim_, + num_points=8, + num_levels=_num_levels_, + ), + embed_dims=_dim_, + ), + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.0, + operation_order=( + "self_attn", + "norm", + "cross_attn", + "norm", + "ffn", + "norm", + ), + ), + ), + decoder=dict( + type="DetectionTransformerDecoder", + num_layers=6, + return_intermediate=True, + transformerlayers=dict( + type="DetrTransformerDecoderLayer", + attn_cfgs=[ + dict( + type="MultiheadAttention", + embed_dims=_dim_, + num_heads=8, + dropout=0.0, + ), + dict( + type="CustomMSDeformableAttention", + embed_dims=_dim_, + num_levels=1, + ), + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.0, + operation_order=( + "self_attn", + "norm", + "cross_attn", + "norm", + "ffn", + "norm", + ), + ), + ), + ), + bbox_coder=dict( + type="NMSFreeCoder", + post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], + pc_range=point_cloud_range, + max_num=300, + voxel_size=voxel_size, + num_classes=len(class_names), + ), + positional_encoding=dict( + type="LearnedPositionalEncoding", + num_feats=_pos_dim_, + row_num_embed=bev_h_, + col_num_embed=bev_w_, + ), + loss_cls=dict( + type="FocalLoss", use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=2.0 + ), + loss_bbox=dict(type="L1Loss", loss_weight=0.25), + loss_iou=dict(type="GIoULoss", loss_weight=0.0), + ), + seg_head=dict( + type='PansegformerHead', + bev_h=bev_h_*2, + bev_w=bev_w_*2, + canvas_size=canvas_size, + pc_range=point_cloud_range, + num_query=300, + num_classes=6, + num_things_classes=6, + num_stuff_classes=0, + in_channels=2048, + sync_cls_avg_factor=True, + as_two_stage=False, + with_box_refine=True, + transformer=dict( + type='SegDeformableTransformer', + encoder=dict( + type='DetrTransformerEncoder', + num_layers=6, + transformerlayers=dict( + type='BaseTransformerLayer', + attn_cfgs=dict( + type='MultiScaleDeformableAttention', + embed_dims=_dim_, + num_levels=_num_levels_, + ), + feedforward_channels=_feed_dim_, + ffn_dropout=0.0, + operation_order=('self_attn', 'norm', 'ffn', 'norm'))), + decoder=dict( + type='DeformableDetrTransformerDecoder', + num_layers=6, + return_intermediate=True, + transformerlayers=dict( + type='DetrTransformerDecoderLayer', + attn_cfgs=[ + dict( + type='MultiheadAttention', + embed_dims=_dim_, + num_heads=8, + dropout=0.0), + dict( + type='MultiScaleDeformableAttention', + embed_dims=_dim_, + num_levels=_num_levels_, + ) + ], + feedforward_channels=_feed_dim_, + ffn_dropout=0.0, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm') + ), + ), + ), + positional_encoding=dict( + type='SinePositionalEncoding', + num_feats=_dim_half_, + normalize=True, + offset=-0.5), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=2.0), + loss_bbox=dict(type='L1Loss', loss_weight=5.0), + loss_iou=dict(type='GIoULoss', loss_weight=2.0), + loss_mask=dict(type='DiceLoss', loss_weight=2.0), + thing_transformer_head=dict(type='SegMaskHead',d_model=_dim_,nhead=8,num_decoder_layers=4), + stuff_transformer_head=dict(type='SegMaskHead',d_model=_dim_,nhead=8,num_decoder_layers=6,self_attn=True), + train_cfg=dict( + assigner=dict( + type='HungarianAssigner', + cls_cost=dict(type='FocalLossCost', weight=2.0), + reg_cost=dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'), + iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0), + ), + assigner_with_mask=dict( + type='HungarianAssigner_multi_info', + cls_cost=dict(type='FocalLossCost', weight=2.0), + reg_cost=dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'), + iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0), + mask_cost=dict(type='DiceCost', weight=2.0), + ), + sampler =dict(type='PseudoSampler'), + sampler_with_mask =dict(type='PseudoSampler_segformer'), + ), + ), + occ_head=dict( + type='OccHead', + + grid_conf=occflow_grid_conf, + ignore_index=255, + + bev_proj_dim=256, + bev_proj_nlayers=4, + + # Transformer + attn_mask_thresh=0.3, + transformer_decoder=dict( + type='DetrTransformerDecoder', + return_intermediate=True, + num_layers=5, + transformerlayers=dict( + type='DetrTransformerDecoderLayer', + attn_cfgs=dict( + type='MultiheadAttention', + embed_dims=256, + num_heads=8, + attn_drop=0.0, + proj_drop=0.0, + dropout_layer=None, + batch_first=False), + ffn_cfgs=dict( + embed_dims=256, + feedforward_channels=2048, # change to 512 + num_fcs=2, + act_cfg=dict(type='ReLU', inplace=True), + ffn_drop=0.0, + dropout_layer=None, + add_identity=True), + feedforward_channels=2048, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm')), + init_cfg=None), + # Query + query_dim=256, + query_mlp_layers=3, + + aux_loss_weight=1., + loss_mask=dict( + type='FieryBinarySegmentationLoss', + use_top_k=True, + top_k_ratio=0.25, + future_discount=0.95, + loss_weight=5.0, + ignore_index=255, + ), + loss_dice=dict( + type='DiceLossWithMasks', + use_sigmoid=True, + activate=True, + reduction='mean', + naive_dice=True, + eps=1.0, + ignore_index=255, + loss_weight=1.0), + + + pan_eval=True, + test_seg_thresh=0.1, + test_with_track_score=True, + ), + motion_head=dict( + type='MotionHead', + bev_h=bev_h_*2, + bev_w=bev_w_*2, + num_query=300, + num_classes=len(class_names), + predict_steps=predict_steps, + predict_modes=predict_modes, + embed_dims=_dim_, + loss_traj=dict(type='TrajLoss', + use_variance=True, + cls_loss_weight=0.5, + nll_loss_weight=0.5, + loss_weight_minade=0., + loss_weight_minfde=0.25), + num_cls_fcs=3, + pc_range=point_cloud_range, + group_id_list=group_id_list, + num_anchor=6, + use_nonlinear_optimizer=use_nonlinear_optimizer, + anchor_info_path='data/others/b2d_motion_anchor_infos_mode6.pkl', + transformerlayers=dict( + type='MotionTransformerDecoder', + pc_range=point_cloud_range, + embed_dims=_dim_, + num_layers=3, + transformerlayers=dict( + type='MotionTransformerAttentionLayer', + batch_first=True, + attn_cfgs=[ + dict( + type='MotionDeformableAttention', + num_steps=predict_steps, + embed_dims=_dim_, + num_levels=1, + num_heads=8, + num_points=4, + sample_index=-1), + ], + + feedforward_channels=_ffn_dim_, + ffn_dropout=0.0, + operation_order=('cross_attn', 'norm', 'ffn', 'norm')), + ), + ), + planning_head=dict( + type='PlanningHeadSingleMode', + embed_dims=256, + command_dim=6, + planning_steps=planning_steps, + loss_planning=dict(type='PlanningLoss'), + loss_collision=[dict(type='CollisionLoss', delta=0.0, weight=2.5), + dict(type='CollisionLoss', delta=0.5, weight=1.0), + dict(type='CollisionLoss', delta=1.0, weight=0.25)], + use_col_optim=use_col_optim, + planning_eval=True, + with_adapter=True, + ), + # model training and testing settings + train_cfg=dict( + pts=dict( + grid_size=[512, 512, 1], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_size_factor=4, + assigner=dict( + type="HungarianAssigner3D", + cls_cost=dict(type="FocalLossCost", weight=2.0), + reg_cost=dict(type="BBox3DL1Cost", weight=0.25), + iou_cost=dict( + type="IoUCost", weight=0.0 + ), # Fake cost. This is just to make it compatible with DETR head. + pc_range=point_cloud_range, + ), + ) + ), +) +dataset_type = "B2D_E2E_Dataset" +data_root = "data/bench2drive" +info_root = "data/infos" +map_root = "data/bench2drive/maps" +map_file = "data/infos/b2d_map_infos.pkl" +file_client_args = dict(backend="disk") +ann_file_train=info_root + f"/b2d_infos_train.pkl" +ann_file_val=info_root + f"/b2d_infos_val.pkl" +ann_file_test=info_root + f"/b2d_infos_val.pkl" + +train_pipeline = [ + dict(type="LoadMultiViewImageFromFilesInCeph", to_float32=True, file_client_args=file_client_args, img_root=data_root), + dict(type="PhotoMetricDistortionMultiViewImage"), + dict( + type="LoadAnnotations3D_E2E", + with_bbox_3d=True, + with_label_3d=True, + with_attr_label=False, + with_vis_token=False, + with_future_anns=True, # occ_flow gt + with_ins_inds_3d=True, # ins_inds + ins_inds_add_1=True, # ins_inds start from 1 + ), + + dict(type='GenerateOccFlowLabels', + grid_conf=occflow_grid_conf, + ignore_index=255, + only_vehicle=True, + filter_invisible=False, + all_classes = class_names, + vehicle_classes = ['car','van','truck','bicycle'], + plan_classes = ['car','van','truck','bicycle','pedestrian'], + ), + + dict(type="ObjectRangeFilterTrack", point_cloud_range=point_cloud_range), + dict(type="ObjectNameFilterTrack", classes=class_names), + dict(type="NormalizeMultiviewImage", **img_norm_cfg), + dict(type="PadMultiViewImage", size_divisor=32), + dict(type="DefaultFormatBundle3D", class_names=class_names), + dict( + type="CustomCollect3D", + keys=[ + "gt_bboxes_3d", + "gt_labels_3d", + "gt_inds", + "img", + "timestamp", + "l2g_r_mat", + "l2g_t", + "gt_fut_traj", + "gt_fut_traj_mask", + "gt_past_traj", + "gt_past_traj_mask", + "gt_sdc_bbox", + "gt_sdc_label", + "gt_sdc_fut_traj", + "gt_sdc_fut_traj_mask", + "gt_lane_labels", + "gt_lane_bboxes", + "gt_lane_masks", + # Occ gt + "gt_segmentation", + "gt_instance", + "gt_centerness", + "gt_offset", + "gt_flow", + "gt_backward_flow", + "gt_occ_has_invalid_frame", + "gt_occ_img_is_valid", + # gt future bbox for plan + "gt_future_boxes", + "gt_future_labels", + # planning + "sdc_planning", + "sdc_planning_mask", + "command", + ], + ), +] +test_pipeline = [ + dict(type='LoadMultiViewImageFromFilesInCeph', to_float32=True, + file_client_args=file_client_args, img_root=data_root), + dict(type="NormalizeMultiviewImage", **img_norm_cfg), + dict(type="PadMultiViewImage", size_divisor=32), + dict(type='LoadAnnotations3D_E2E', + with_bbox_3d=False, + with_label_3d=False, + with_attr_label=False, + with_vis_token=False, + with_future_anns=True, + with_ins_inds_3d=False, + ins_inds_add_1=True, # ins_inds start from 1 + ), + dict(type='GenerateOccFlowLabels', + grid_conf=occflow_grid_conf, + ignore_index=255, + only_vehicle=True, + filter_invisible=False, + all_classes = class_names, + vehicle_classes = ['car','van','truck','bicycle'], + plan_classes = ['car','van','truck','bicycle','pedestrian'], + ), + dict( + type="MultiScaleFlipAug3D", + img_scale=(1600, 900), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type="DefaultFormatBundle3D", class_names=class_names, with_label=False + ), + dict( + type="CustomCollect3D", keys=[ + "img", + "timestamp", + "l2g_r_mat", + "l2g_t", + "gt_lane_labels", + "gt_lane_bboxes", + "gt_lane_masks", + "gt_segmentation", + "gt_instance", + "gt_centerness", + "gt_offset", + "gt_flow", + "gt_backward_flow", + "gt_occ_has_invalid_frame", + "gt_occ_img_is_valid", + # planning + "sdc_planning", + "sdc_planning_mask", + "command", + ] + ), + ], + ), +] + +inference_only_pipeline = [ + dict(type='LoadMultiViewImageFromFilesInCeph', to_float32=True, + file_client_args=file_client_args, img_root=data_root), + dict(type="NormalizeMultiviewImage", **img_norm_cfg), + dict(type="PadMultiViewImage", size_divisor=32), + dict( + type="MultiScaleFlipAug3D", + img_scale=(1600, 900), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type="DefaultFormatBundle3D", class_names=class_names, with_label=False + ), + dict( + type="CustomCollect3D", keys=[ + "img", + "timestamp", + "l2g_r_mat", + "l2g_t", + "command", + ] + ), + ], + ), +] + + +data = dict( + samples_per_gpu=1, + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_root=data_root, + ann_file=ann_file_train, + pipeline=train_pipeline, + classes=class_names, + name_mapping=NameMapping, + map_root=map_root, + map_file=map_file, + modality=input_modality, + patch_size=patch_size, + bev_size=(bev_h_, bev_w_), + queue_length=queue_length, + predict_frames=predict_steps, + past_frames=past_steps, + future_frames=fut_steps, + point_cloud_range=point_cloud_range, + box_type_3d="LiDAR", + ), + val=dict( + type=dataset_type, + data_root=data_root, + ann_file=ann_file_val, + pipeline=test_pipeline, + name_mapping=NameMapping, + map_root=map_root, + map_file=map_file, + bev_size=(bev_h_, bev_w_), + predict_frames=predict_steps, + past_frames=past_steps, + future_frames=fut_steps, + classes=class_names, + modality=input_modality, + samples_per_gpu=1, + point_cloud_range=point_cloud_range, + eval_cfg=eval_cfg, + #eval_mod=['det', 'track', 'map'], + box_type_3d="LiDAR", + ), + test=dict( + type=dataset_type, + data_root=data_root, + ann_file=ann_file_val, + pipeline=test_pipeline, + name_mapping=NameMapping, + map_root=map_root, + map_file=map_file, + bev_size=(bev_h_, bev_w_), + predict_frames=predict_steps, + past_frames=past_steps, + future_frames=fut_steps, + classes=class_names, + modality=input_modality, + samples_per_gpu=1, + point_cloud_range=point_cloud_range, + eval_cfg=eval_cfg, + #eval_mod=['det', 'track', 'map'], + box_type_3d="LiDAR", + ), + shuffler_sampler=dict(type="DistributedGroupSampler"), + nonshuffler_sampler=dict(type="DistributedSampler"), +) +optimizer = dict( + type="AdamW", + lr=2e-4, + paramwise_cfg=dict( + custom_keys={ + "img_backbone": dict(lr_mult=0.1), + } + ), + weight_decay=0.01, +) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + by_epoch=False, + policy="CosineAnnealing", + warmup="linear", + warmup_iters=500, + warmup_ratio=1.0 / 3, + min_lr_ratio=1e-3, +) +total_epochs = 1 +evaluation = dict(interval=1, pipeline=test_pipeline) +runner = dict(type="EpochBasedRunner", max_epochs=total_epochs) +log_config = dict( + interval=1, hooks=[dict(type="TextLoggerHook"), dict(type="TensorboardLoggerHook")] +) +checkpoint_config = dict(interval=3000, by_epoch=False) + +find_unused_parameters = True \ No newline at end of file diff --git a/adzoo/uniad/data_converter/create_data.py b/adzoo/uniad/data_converter/create_data.py new file mode 100755 index 0000000..0adb360 --- /dev/null +++ b/adzoo/uniad/data_converter/create_data.py @@ -0,0 +1,109 @@ +import argparse +from os import path as osp +import sys +from data_converter import uniad_nuscenes_converter as nuscenes_converter + +def nuscenes_data_prep(root_path, + can_bus_root_path, + info_prefix, + version, + dataset_name, + out_dir, + max_sweeps=10): + """Prepare data related to nuScenes dataset. + + Related data consists of '.pkl' files recording basic infos, + 2D annotations and groundtruth database. + + Args: + root_path (str): Path of dataset root. + info_prefix (str): The prefix of info filenames. + version (str): Dataset version. + dataset_name (str): The dataset class name. + out_dir (str): Output directory of the groundtruth database info. + max_sweeps (int): Number of input consecutive frames. Default: 10 + """ + nuscenes_converter.create_nuscenes_infos( + root_path, out_dir, can_bus_root_path, info_prefix, version=version, max_sweeps=max_sweeps) + + if version == 'v1.0-test': + info_test_path = osp.join( + out_dir, f'{info_prefix}_infos_temporal_test.pkl') + nuscenes_converter.export_2d_annotation( + root_path, info_test_path, version=version) + else: + info_train_path = osp.join( + out_dir, f'{info_prefix}_infos_temporal_train.pkl') + info_val_path = osp.join( + out_dir, f'{info_prefix}_infos_temporal_val.pkl') + nuscenes_converter.export_2d_annotation( + root_path, info_train_path, version=version) + nuscenes_converter.export_2d_annotation( + root_path, info_val_path, version=version) + + +parser = argparse.ArgumentParser(description='Data converter arg parser') +parser.add_argument('dataset', metavar='kitti', help='name of the dataset') +parser.add_argument( + '--root-path', + type=str, + default='./data/kitti', + help='specify the root path of dataset') +parser.add_argument( + '--canbus', + type=str, + default='./data', + help='specify the root path of nuScenes canbus') +parser.add_argument( + '--version', + type=str, + default='v1.0', + required=False, + help='specify the dataset version, no need for kitti') +parser.add_argument( + '--max-sweeps', + type=int, + default=10, + required=False, + help='specify sweeps of lidar per example') +parser.add_argument( + '--out-dir', + type=str, + default='./data/kitti', + required=False, + help='name of info pkl') +parser.add_argument('--extra-tag', type=str, default='kitti') +parser.add_argument( + '--workers', type=int, default=4, help='number of threads to be used') +args = parser.parse_args() + +if __name__ == '__main__': + if args.dataset == 'nuscenes' and args.version != 'v1.0-mini': + train_version = f'{args.version}-trainval' + nuscenes_data_prep( + root_path=args.root_path, + can_bus_root_path=args.canbus, + info_prefix=args.extra_tag, + version=train_version, + dataset_name='NuScenesDataset', + out_dir=args.out_dir, + max_sweeps=args.max_sweeps) + test_version = f'{args.version}-test' + nuscenes_data_prep( + root_path=args.root_path, + can_bus_root_path=args.canbus, + info_prefix=args.extra_tag, + version=test_version, + dataset_name='NuScenesDataset', + out_dir=args.out_dir, + max_sweeps=args.max_sweeps) + elif args.dataset == 'nuscenes' and args.version == 'v1.0-mini': + train_version = f'{args.version}' + nuscenes_data_prep( + root_path=args.root_path, + can_bus_root_path=args.canbus, + info_prefix=args.extra_tag, + version=train_version, + dataset_name='NuScenesDataset', + out_dir=args.out_dir, + max_sweeps=args.max_sweeps) \ No newline at end of file diff --git a/adzoo/uniad/data_converter/uniad_create_data.sh b/adzoo/uniad/data_converter/uniad_create_data.sh new file mode 100755 index 0000000..b9ac04d --- /dev/null +++ b/adzoo/uniad/data_converter/uniad_create_data.sh @@ -0,0 +1,7 @@ + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +python tools/create_data.py nuscenes --root-path ./data/nuscenes \ + --out-dir ./data/infos \ + --extra-tag nuscenes \ + --version v1.0 \ + --canbus ./data/nuscenes \ \ No newline at end of file diff --git a/adzoo/uniad/data_converter/uniad_nuscenes_converter.py b/adzoo/uniad/data_converter/uniad_nuscenes_converter.py new file mode 100644 index 0000000..4ff6ef8 --- /dev/null +++ b/adzoo/uniad/data_converter/uniad_nuscenes_converter.py @@ -0,0 +1,723 @@ +import numpy as np +import os +from collections import OrderedDict +from nuscenes.nuscenes import NuScenes +from nuscenes.utils.geometry_utils import view_points +from nuscenes.prediction import PredictHelper +from os import path as osp +from pyquaternion import Quaternion +from shapely.geometry import MultiPoint, box +from typing import List, Tuple, Union + +from mmcv.core.bbox.box_np_ops import points_cam2img +from mmcv.datasets import NuScenesDataset +from mmcv.fileio.io import load, dump +from mmcv.utils import is_filepath, track_iter_progress, check_file_exist +from mmcv.image import imread + +nus_categories = ('car', 'truck', 'trailer', 'bus', 'construction_vehicle', + 'bicycle', 'motorcycle', 'pedestrian', 'traffic_cone', + 'barrier') + +nus_attributes = ('cycle.with_rider', 'cycle.without_rider', + 'pedestrian.moving', 'pedestrian.standing', + 'pedestrian.sitting_lying_down', 'vehicle.moving', + 'vehicle.parked', 'vehicle.stopped', 'None') + + +def create_nuscenes_infos(root_path, + out_path, + can_bus_root_path, + info_prefix, + version='v1.0-trainval', + max_sweeps=10): + """Create info file of nuscene dataset. + + Given the raw data, generate its related info file in pkl format. + + Args: + root_path (str): Path of the data root. + info_prefix (str): Prefix of the info file to be generated. + version (str): Version of the data. + Default: 'v1.0-trainval' + max_sweeps (int): Max number of sweeps. + Default: 10 + """ + from nuscenes.nuscenes import NuScenes + from nuscenes.can_bus.can_bus_api import NuScenesCanBus + print(version, root_path) + nusc = NuScenes(version=version, dataroot=root_path, verbose=True) + nusc_can_bus = NuScenesCanBus(dataroot=can_bus_root_path) + from nuscenes.utils import splits + available_vers = ['v1.0-trainval', 'v1.0-test', 'v1.0-mini'] + assert version in available_vers + if version == 'v1.0-trainval': + train_scenes = splits.train + val_scenes = splits.val + elif version == 'v1.0-test': + train_scenes = splits.test + val_scenes = [] + elif version == 'v1.0-mini': + train_scenes = splits.mini_train + val_scenes = splits.mini_val + else: + raise ValueError('unknown') + + # filter existing scenes. + available_scenes = get_available_scenes(nusc) + available_scene_names = [s['name'] for s in available_scenes] + train_scenes = list( + filter(lambda x: x in available_scene_names, train_scenes)) + val_scenes = list(filter(lambda x: x in available_scene_names, val_scenes)) + train_scenes = set([ + available_scenes[available_scene_names.index(s)]['token'] + for s in train_scenes + ]) + val_scenes = set([ + available_scenes[available_scene_names.index(s)]['token'] + for s in val_scenes + ]) + + test = 'test' in version + if test: + print('test scene: {}'.format(len(train_scenes))) + else: + print('train scene: {}, val scene: {}'.format( + len(train_scenes), len(val_scenes))) + + train_nusc_infos, val_nusc_infos = _fill_trainval_infos( + nusc, nusc_can_bus, train_scenes, val_scenes, test, max_sweeps=max_sweeps) + + metadata = dict(version=version) + if test: + print('test sample: {}'.format(len(train_nusc_infos))) + data = dict(infos=train_nusc_infos, metadata=metadata) + info_path = osp.join(out_path, + '{}_infos_temporal_test.pkl'.format(info_prefix)) + dump(data, info_path) + else: + print('train sample: {}, val sample: {}'.format( + len(train_nusc_infos), len(val_nusc_infos))) + data = dict(infos=train_nusc_infos, metadata=metadata) + info_path = osp.join(out_path, + '{}_infos_temporal_train.pkl'.format(info_prefix)) + dump(data, info_path) + data['infos'] = val_nusc_infos + info_val_path = osp.join(out_path, + '{}_infos_temporal_val.pkl'.format(info_prefix)) + dump(data, info_val_path) + + +def get_available_scenes(nusc): + """Get available scenes from the input nuscenes class. + + Given the raw data, get the information of available scenes for + further info generation. + + Args: + nusc (class): Dataset class in the nuScenes dataset. + + Returns: + available_scenes (list[dict]): List of basic information for the + available scenes. + """ + available_scenes = [] + print('total scene num: {}'.format(len(nusc.scene))) + for scene in nusc.scene: + scene_token = scene['token'] + scene_rec = nusc.get('scene', scene_token) + sample_rec = nusc.get('sample', scene_rec['first_sample_token']) + sd_rec = nusc.get('sample_data', sample_rec['data']['LIDAR_TOP']) + has_more_frames = True + scene_not_exist = False + while has_more_frames: + lidar_path, boxes, _ = nusc.get_sample_data(sd_rec['token']) + lidar_path = str(lidar_path) + if os.getcwd() in lidar_path: + # path from lyftdataset is absolute path + lidar_path = lidar_path.split(f'{os.getcwd()}/')[-1] + # relative path + if not is_filepath(lidar_path): + scene_not_exist = True + break + else: + break + if scene_not_exist: + continue + available_scenes.append(scene) + print('exist scene num: {}'.format(len(available_scenes))) + return available_scenes + + +def _get_can_bus_info(nusc, nusc_can_bus, sample): + scene_name = nusc.get('scene', sample['scene_token'])['name'] + sample_timestamp = sample['timestamp'] + try: + pose_list = nusc_can_bus.get_messages(scene_name, 'pose') + except: + return np.zeros(18) # server scenes do not have can bus information. + can_bus = [] + # during each scene, the first timestamp of can_bus may be large than the first sample's timestamp + last_pose = pose_list[0] + for i, pose in enumerate(pose_list): + if pose['utime'] > sample_timestamp: + break + last_pose = pose + _ = last_pose.pop('utime') # useless + pos = last_pose.pop('pos') + rotation = last_pose.pop('orientation') + can_bus.extend(pos) + can_bus.extend(rotation) + for key in last_pose.keys(): + can_bus.extend(pose[key]) # 16 elements + can_bus.extend([0., 0.]) + return np.array(can_bus) + +def _get_future_traj_info(nusc, sample, predict_steps=16): + sample_token = sample['token'] + ann_tokens = np.array(sample['anns']) + sd_rec = nusc.get('sample', sample_token) + fut_traj_all = [] + fut_traj_valid_mask_all = [] + _, boxes, _ = nusc.get_sample_data(sd_rec['data']['LIDAR_TOP'], selected_anntokens=ann_tokens) + predict_helper = PredictHelper(nusc) + for i, ann_token in enumerate(ann_tokens): + box = boxes[i] + instance_token = nusc.get('sample_annotation', ann_token)['instance_token'] + fut_traj_local = predict_helper.get_future_for_agent(instance_token, + sample_token, + seconds=predict_steps//2, + in_agent_frame=True) + + fut_traj = np.zeros((predict_steps, 2)) + fut_traj_valid_mask = np.zeros((predict_steps, 2)) + if fut_traj_local.shape[0] > 0: + # trans = box.center + # trans = np.array([0, 0, 0]) + # rot = Quaternion(matrix=box.rotation_matrix) + # fut_traj_scence_centric = convert_local_coords_to_global(fut_traj_local, trans, rot) + fut_traj_scence_centric = fut_traj_local + fut_traj[:fut_traj_scence_centric.shape[0], :] = fut_traj_scence_centric + fut_traj_valid_mask[:fut_traj_scence_centric.shape[0], :] = 1 + fut_traj_all.append(fut_traj) + fut_traj_valid_mask_all.append(fut_traj_valid_mask) + if len(ann_tokens) > 0: + fut_traj_all = np.stack(fut_traj_all, axis=0) + fut_traj_valid_mask_all = np.stack(fut_traj_valid_mask_all, axis=0) + else: + fut_traj_all = np.zeros((0, predict_steps, 2)) + fut_traj_valid_mask_all = np.zeros((0, predict_steps, 2)) + return fut_traj_all, fut_traj_valid_mask_all + +def _fill_trainval_infos(nusc, + nusc_can_bus, + train_scenes, + val_scenes, + test=False, + max_sweeps=10): + """Generate the train/val infos from the raw data. + + Args: + nusc (:obj:`NuScenes`): Dataset class in the nuScenes dataset. + train_scenes (list[str]): Basic information of training scenes. + val_scenes (list[str]): Basic information of validation scenes. + test (bool): Whether use the test mode. In the test mode, no + annotations can be accessed. Default: False. + max_sweeps (int): Max number of sweeps. Default: 10. + + Returns: + tuple[list[dict]]: Information of training set and validation set + that will be saved to the info file. + """ + train_nusc_infos = [] + val_nusc_infos = [] + frame_idx = 0 + for sample in track_iter_progress(nusc.sample): + lidar_token = sample['data']['LIDAR_TOP'] + sd_rec = nusc.get('sample_data', sample['data']['LIDAR_TOP']) + cs_record = nusc.get('calibrated_sensor', + sd_rec['calibrated_sensor_token']) + pose_record = nusc.get('ego_pose', sd_rec['ego_pose_token']) + lidar_path, boxes, _ = nusc.get_sample_data(lidar_token) + + check_file_exist(lidar_path) + can_bus = _get_can_bus_info(nusc, nusc_can_bus, sample) + ## + info = { + 'lidar_path': lidar_path, + 'token': sample['token'], + 'prev': sample['prev'], + 'next': sample['next'], + 'can_bus': can_bus, + 'frame_idx': frame_idx, # temporal related info + 'sweeps': [], + 'cams': dict(), + 'scene_token': sample['scene_token'], # temporal related info + 'lidar2ego_translation': cs_record['translation'], + 'lidar2ego_rotation': cs_record['rotation'], + 'ego2global_translation': pose_record['translation'], + 'ego2global_rotation': pose_record['rotation'], + 'timestamp': sample['timestamp'], + } + + if sample['next'] == '': + frame_idx = 0 + else: + frame_idx += 1 + + l2e_r = info['lidar2ego_rotation'] + l2e_t = info['lidar2ego_translation'] + e2g_r = info['ego2global_rotation'] + e2g_t = info['ego2global_translation'] + l2e_r_mat = Quaternion(l2e_r).rotation_matrix + e2g_r_mat = Quaternion(e2g_r).rotation_matrix + + # obtain 6 image's information per frame + camera_types = [ + 'CAM_FRONT', + 'CAM_FRONT_RIGHT', + 'CAM_FRONT_LEFT', + 'CAM_BACK', + 'CAM_BACK_LEFT', + 'CAM_BACK_RIGHT', + ] + for cam in camera_types: + cam_token = sample['data'][cam] + cam_path, _, cam_intrinsic = nusc.get_sample_data(cam_token) + cam_info = obtain_sensor2top(nusc, cam_token, l2e_t, l2e_r_mat, + e2g_t, e2g_r_mat, cam) + cam_info.update(cam_intrinsic=cam_intrinsic) + info['cams'].update({cam: cam_info}) + + # obtain sweeps for a single key-frame + sd_rec = nusc.get('sample_data', sample['data']['LIDAR_TOP']) + sweeps = [] + while len(sweeps) < max_sweeps: + if not sd_rec['prev'] == '': + sweep = obtain_sensor2top(nusc, sd_rec['prev'], l2e_t, + l2e_r_mat, e2g_t, e2g_r_mat, 'lidar') + sweeps.append(sweep) + sd_rec = nusc.get('sample_data', sd_rec['prev']) + else: + break + info['sweeps'] = sweeps + # obtain annotation + if not test: + annotations = [ + nusc.get('sample_annotation', token) + for token in sample['anns'] + ] + locs = np.array([b.center for b in boxes]).reshape(-1, 3) + dims = np.array([b.wlh for b in boxes]).reshape(-1, 3) + rots = np.array([b.orientation.yaw_pitch_roll[0] + for b in boxes]).reshape(-1, 1) + velocity = np.array( + [nusc.box_velocity(token)[:2] for token in sample['anns']]) + valid_flag = np.array( + [(anno['num_lidar_pts'] + anno['num_radar_pts']) > 0 + for anno in annotations], + dtype=bool).reshape(-1) + instance_inds = [nusc.getind('instance', ann['instance_token']) + for ann in annotations] + future_traj_all, future_traj_valid_mask_all = _get_future_traj_info(nusc, sample) + instance_tokens = [ann['instance_token'] for ann in annotations] # dtype('ego->global->ego'->lidar + l2e_r_s_mat = Quaternion(l2e_r_s).rotation_matrix + e2g_r_s_mat = Quaternion(e2g_r_s).rotation_matrix + R = (l2e_r_s_mat.T @ e2g_r_s_mat.T) @ ( + np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T) + T = (l2e_t_s @ e2g_r_s_mat.T + e2g_t_s) @ ( + np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T) + T -= e2g_t @ (np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T + ) + l2e_t @ np.linalg.inv(l2e_r_mat).T + sweep['sensor2lidar_rotation'] = R.T # points @ R.T + T + sweep['sensor2lidar_translation'] = T + return sweep + + +def export_2d_annotation(root_path, info_path, version, mono3d=True): + """Export 2d annotation from the info file and raw data. + + Args: + root_path (str): Root path of the raw data. + info_path (str): Path of the info file. + version (str): Dataset version. + mono3d (bool): Whether to export mono3d annotation. Default: True. + """ + # get bbox annotations for camera + camera_types = [ + 'CAM_FRONT', + 'CAM_FRONT_RIGHT', + 'CAM_FRONT_LEFT', + 'CAM_BACK', + 'CAM_BACK_LEFT', + 'CAM_BACK_RIGHT', + ] + nusc_infos = load(info_path)['infos'] + nusc = NuScenes(version=version, dataroot=root_path, verbose=True) + # info_2d_list = [] + cat2Ids = [ + dict(id=nus_categories.index(cat_name), name=cat_name) + for cat_name in nus_categories + ] + coco_ann_id = 0 + coco_2d_dict = dict(annotations=[], images=[], categories=cat2Ids) + for info in track_iter_progress(nusc_infos): + for cam in camera_types: + cam_info = info['cams'][cam] + coco_infos = get_2d_boxes( + nusc, + cam_info['sample_data_token'], + visibilities=['', '1', '2', '3', '4'], + mono3d=mono3d) + (height, width, _) = imread(cam_info['data_path']).shape + coco_2d_dict['images'].append( + dict( + file_name=cam_info['data_path'].split('data/nuscenes/') + [-1], + id=cam_info['sample_data_token'], + token=info['token'], + cam2ego_rotation=cam_info['sensor2ego_rotation'], + cam2ego_translation=cam_info['sensor2ego_translation'], + ego2global_rotation=info['ego2global_rotation'], + ego2global_translation=info['ego2global_translation'], + cam_intrinsic=cam_info['cam_intrinsic'], + width=width, + height=height)) + for coco_info in coco_infos: + if coco_info is None: + continue + # add an empty key for coco format + coco_info['segmentation'] = [] + coco_info['id'] = coco_ann_id + coco_2d_dict['annotations'].append(coco_info) + coco_ann_id += 1 + if mono3d: + json_prefix = f'{info_path[:-4]}_mono3d' + else: + json_prefix = f'{info_path[:-4]}' + dump(coco_2d_dict, f'{json_prefix}.coco.json') + + +def get_2d_boxes(nusc, + sample_data_token: str, + visibilities: List[str], + mono3d=True): + """Get the 2D annotation records for a given `sample_data_token`. + + Args: + sample_data_token (str): Sample data token belonging to a camera \ + keyframe. + visibilities (list[str]): Visibility filter. + mono3d (bool): Whether to get boxes with mono3d annotation. + + Return: + list[dict]: List of 2D annotation record that belongs to the input + `sample_data_token`. + """ + + # Get the sample data and the sample corresponding to that sample data. + sd_rec = nusc.get('sample_data', sample_data_token) + + assert sd_rec[ + 'sensor_modality'] == 'camera', 'Error: get_2d_boxes only works' \ + ' for camera sample_data!' + if not sd_rec['is_key_frame']: + raise ValueError( + 'The 2D re-projections are available only for keyframes.') + + s_rec = nusc.get('sample', sd_rec['sample_token']) + + # Get the calibrated sensor and ego pose + # record to get the transformation matrices. + cs_rec = nusc.get('calibrated_sensor', sd_rec['calibrated_sensor_token']) + pose_rec = nusc.get('ego_pose', sd_rec['ego_pose_token']) + camera_intrinsic = np.array(cs_rec['camera_intrinsic']) + + # Get all the annotation with the specified visibilties. + ann_recs = [ + nusc.get('sample_annotation', token) for token in s_rec['anns'] + ] + ann_recs = [ + ann_rec for ann_rec in ann_recs + if (ann_rec['visibility_token'] in visibilities) + ] + + repro_recs = [] + + for ann_rec in ann_recs: + # Augment sample_annotation with token information. + ann_rec['sample_annotation_token'] = ann_rec['token'] + ann_rec['sample_data_token'] = sample_data_token + + # Get the box in global coordinates. + box = nusc.get_box(ann_rec['token']) + + # Move them to the ego-pose frame. + box.translate(-np.array(pose_rec['translation'])) + box.rotate(Quaternion(pose_rec['rotation']).inverse) + + # Move them to the calibrated sensor frame. + box.translate(-np.array(cs_rec['translation'])) + box.rotate(Quaternion(cs_rec['rotation']).inverse) + + # Filter out the corners that are not in front of the calibrated + # sensor. + corners_3d = box.corners() + in_front = np.argwhere(corners_3d[2, :] > 0).flatten() + corners_3d = corners_3d[:, in_front] + + # Project 3d box to 2d. + corner_coords = view_points(corners_3d, camera_intrinsic, + True).T[:, :2].tolist() + + # Keep only corners that fall within the image. + final_coords = post_process_coords(corner_coords) + + # Skip if the convex hull of the re-projected corners + # does not intersect the image canvas. + if final_coords is None: + continue + else: + min_x, min_y, max_x, max_y = final_coords + + # Generate dictionary record to be included in the .json file. + repro_rec = generate_record(ann_rec, min_x, min_y, max_x, max_y, + sample_data_token, sd_rec['filename']) + + # If mono3d=True, add 3D annotations in camera coordinates + if mono3d and (repro_rec is not None): + loc = box.center.tolist() + + dim = box.wlh + dim[[0, 1, 2]] = dim[[1, 2, 0]] # convert wlh to our lhw + dim = dim.tolist() + + rot = box.orientation.yaw_pitch_roll[0] + rot = [-rot] # convert the rot to our cam coordinate + + global_velo2d = nusc.box_velocity(box.token)[:2] + global_velo3d = np.array([*global_velo2d, 0.0]) + e2g_r_mat = Quaternion(pose_rec['rotation']).rotation_matrix + c2e_r_mat = Quaternion(cs_rec['rotation']).rotation_matrix + cam_velo3d = global_velo3d @ np.linalg.inv( + e2g_r_mat).T @ np.linalg.inv(c2e_r_mat).T + velo = cam_velo3d[0::2].tolist() + + repro_rec['bbox_cam3d'] = loc + dim + rot + repro_rec['velo_cam3d'] = velo + + center3d = np.array(loc).reshape([1, 3]) + center2d = points_cam2img( + center3d, camera_intrinsic, with_depth=True) + repro_rec['center2d'] = center2d.squeeze().tolist() + # normalized center2D + depth + # if samples with depth < 0 will be removed + if repro_rec['center2d'][2] <= 0: + continue + + ann_token = nusc.get('sample_annotation', + box.token)['attribute_tokens'] + if len(ann_token) == 0: + attr_name = 'None' + else: + attr_name = nusc.get('attribute', ann_token[0])['name'] + attr_id = nus_attributes.index(attr_name) + repro_rec['attribute_name'] = attr_name + repro_rec['attribute_id'] = attr_id + + repro_recs.append(repro_rec) + + return repro_recs + + +def post_process_coords( + corner_coords: List, imsize: Tuple[int, int] = (1600, 900) +) -> Union[Tuple[float, float, float, float], None]: + """Get the intersection of the convex hull of the reprojected bbox corners + and the image canvas, return None if no intersection. + + Args: + corner_coords (list[int]): Corner coordinates of reprojected + bounding box. + imsize (tuple[int]): Size of the image canvas. + + Return: + tuple [float]: Intersection of the convex hull of the 2D box + corners and the image canvas. + """ + polygon_from_2d_box = MultiPoint(corner_coords).convex_hull + img_canvas = box(0, 0, imsize[0], imsize[1]) + + if polygon_from_2d_box.intersects(img_canvas): + img_intersection = polygon_from_2d_box.intersection(img_canvas) + intersection_coords = np.array( + [coord for coord in img_intersection.exterior.coords]) + + min_x = min(intersection_coords[:, 0]) + min_y = min(intersection_coords[:, 1]) + max_x = max(intersection_coords[:, 0]) + max_y = max(intersection_coords[:, 1]) + + return min_x, min_y, max_x, max_y + else: + return None + + +def generate_record(ann_rec: dict, x1: float, y1: float, x2: float, y2: float, + sample_data_token: str, filename: str) -> OrderedDict: + """Generate one 2D annotation record given various informations on top of + the 2D bounding box coordinates. + + Args: + ann_rec (dict): Original 3d annotation record. + x1 (float): Minimum value of the x coordinate. + y1 (float): Minimum value of the y coordinate. + x2 (float): Maximum value of the x coordinate. + y2 (float): Maximum value of the y coordinate. + sample_data_token (str): Sample data token. + filename (str):The corresponding image file where the annotation + is present. + + Returns: + dict: A sample 2D annotation record. + - file_name (str): flie name + - image_id (str): sample data token + - area (float): 2d box area + - category_name (str): category name + - category_id (int): category id + - bbox (list[float]): left x, top y, dx, dy of 2d box + - iscrowd (int): whether the area is crowd + """ + repro_rec = OrderedDict() + repro_rec['sample_data_token'] = sample_data_token + coco_rec = dict() + + relevant_keys = [ + 'attribute_tokens', + 'category_name', + 'instance_token', + 'next', + 'num_lidar_pts', + 'num_radar_pts', + 'prev', + 'sample_annotation_token', + 'sample_data_token', + 'visibility_token', + ] + + for key, value in ann_rec.items(): + if key in relevant_keys: + repro_rec[key] = value + + repro_rec['bbox_corners'] = [x1, y1, x2, y2] + repro_rec['filename'] = filename + + coco_rec['file_name'] = filename + coco_rec['image_id'] = sample_data_token + coco_rec['area'] = (y2 - y1) * (x2 - x1) + + if repro_rec['category_name'] not in NuScenesDataset.NameMapping: + return None + cat_name = NuScenesDataset.NameMapping[repro_rec['category_name']] + coco_rec['category_name'] = cat_name + coco_rec['category_id'] = nus_categories.index(cat_name) + coco_rec['bbox'] = [x1, y1, x2 - x1, y2 - y1] + coco_rec['iscrowd'] = 0 + + return coco_rec + \ No newline at end of file diff --git a/adzoo/uniad/test.py b/adzoo/uniad/test.py new file mode 100755 index 0000000..9442514 --- /dev/null +++ b/adzoo/uniad/test.py @@ -0,0 +1,145 @@ +import argparse +import torch +import os +import warnings +from torch.nn.parallel.distributed import DistributedDataParallel +from mmcv.utils import get_dist_info, init_dist, wrap_fp16_model, set_random_seed, Config, DictAction, load_checkpoint +from mmcv.fileio.io import dump +from mmcv.datasets import build_dataset, build_dataloader, replace_ImageToTensor +from mmcv.models import build_model, fuse_conv_bn +import time +import os.path as osp +from adzoo.uniad.test_utils import custom_multi_gpu_test, custom_single_gpu_test +import cv2 +cv2.setNumThreads(1) + +warnings.filterwarnings("ignore") + +def parse_args(): + parser = argparse.ArgumentParser( + description='MMDet test (and eval) a model') + parser.add_argument('config', help='test config file path') + parser.add_argument('checkpoint', help='checkpoint file') + parser.add_argument('--out', default='output/results.pkl', help='output result file in pickle format') + parser.add_argument( + '--fuse-conv-bn', + action='store_true', + help='Whether to fuse conv and bn, this will slightly increase' + 'the inference speed') + parser.add_argument( + '--eval', + type=str, + nargs='+', + help='evaluation metrics, which depends on the dataset, e.g., "bbox",' + ' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC') + parser.add_argument('--show', action='store_true', help='show results') + parser.add_argument( + '--show-dir', help='directory where results will be saved') + parser.add_argument( + '--gpu-collect', + action='store_true', + help='whether to use gpu to collect results.') + parser.add_argument( + '--tmpdir', + help='tmp directory used for collecting results from multiple ' + 'workers, available when gpu-collect is not specified') + parser.add_argument('--seed', type=int, default=0, help='random seed') + parser.add_argument( + '--deterministic', + action='store_true', + help='whether to set deterministic options for CUDNN backend.') + parser.add_argument( + '--launcher', + choices=['none', 'pytorch', 'slurm', 'mpi'], + default='none', + help='job launcher') + parser.add_argument('--local-rank', type=int, default=0) + args = parser.parse_args() + if 'LOCAL_RANK' not in os.environ: + os.environ['LOCAL_RANK'] = str(args.local_rank) + return args + + +def main(): + args = parse_args() + cfg = Config.fromfile(args.config) + + cfg.model.pretrained = None + cfg.data.test.test_mode = True + samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1) + if samples_per_gpu > 1: + # Replace 'ImageToTensor' to 'DefaultFormatBundle' + cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline) + + # init distributed env first, since logger depends on the dist info. + if args.launcher == 'none': + distributed = False + else: + distributed = True + torch.backends.cudnn.benchmark = True + init_dist(args.launcher, **cfg.dist_params) + rank, world_size = get_dist_info() + + set_random_seed(args.seed, deterministic=args.deterministic) + + # Dataloader + dataset = build_dataset(cfg.data.test) + data_loader = build_dataloader(dataset, + samples_per_gpu=samples_per_gpu, + workers_per_gpu=cfg.data.workers_per_gpu, + dist=distributed, + shuffle=False, + nonshuffler_sampler=cfg.data.nonshuffler_sampler, + ) + + # Model + cfg.model.train_cfg = None + model = build_model(cfg.model, test_cfg=cfg.get('test_cfg')) + fp16_cfg = cfg.get('fp16', None) + if fp16_cfg is not None: + wrap_fp16_model(model) + checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') + if args.fuse_conv_bn: + model = fuse_conv_bn(model) + + # Add classese info + if 'CLASSES' in checkpoint.get('meta', {}): # for det + model.CLASSES = checkpoint['meta']['CLASSES'] + else: + model.CLASSES = dataset.CLASSES + if 'PALETTE' in checkpoint.get('meta', {}): # for seg + model.PALETTE = checkpoint['meta']['PALETTE'] + elif hasattr(dataset, 'PALETTE'): + model.PALETTE = dataset.PALETTE + + if not distributed: + assert False #TODO(yzj) + # model = MMDataParallel(model, device_ids=[0]) + # outputs = custom_single_gpu_test(model, data_loader, args.show, args.show_dir) + else: + model = DistributedDataParallel(model.cuda(), + device_ids=[torch.cuda.current_device()], + broadcast_buffers=False, + ) + outputs = custom_multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect) + + + + if rank == 0: + if args.out: + print(f'\nwriting results to {args.out}') + dump(outputs, args.out) + kwargs = {} + kwargs['jsonfile_prefix'] = osp.join('test', args.config.split('/')[-1].split('.')[-2], time.ctime().replace(' ', '_').replace(':', '_')) + + if args.eval: + eval_kwargs = cfg.get('evaluation', {}).copy() + # hard-code way to remove EvalHook args + for key in ['interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', 'rule']: + eval_kwargs.pop(key, None) + eval_kwargs.update(dict(metric=args.eval, **kwargs)) + print(dataset.evaluate(outputs, **eval_kwargs)) + + +if __name__ == '__main__': + main() diff --git a/adzoo/uniad/test_utils.py b/adzoo/uniad/test_utils.py new file mode 100644 index 0000000..4be8936 --- /dev/null +++ b/adzoo/uniad/test_utils.py @@ -0,0 +1,318 @@ +import os +import os.path as osp +import pickle +import shutil +import tempfile +import time + +import torch +import torch.distributed as dist + +from mmcv.models.dense_heads.occ_head_plugin import IntersectionOverUnion, PanopticMetric +from mmcv.models.dense_heads.planning_head_plugin import UniADPlanningMetric +from mmcv.utils import ProgressBar, mkdir_or_exist, get_dist_info +from mmcv.fileio.io import load, dump +import numpy as np +import pycocotools.mask as mask_util + +def custom_encode_mask_results(mask_results): + """Encode bitmap mask to RLE code. Semantic Masks only + Args: + mask_results (list | tuple[list]): bitmap mask results. + In mask scoring rcnn, mask_results is a tuple of (segm_results, + segm_cls_score). + Returns: + list | tuple: RLE encoded mask. + """ + cls_segms = mask_results + num_classes = len(cls_segms) + encoded_mask_results = [] + for i in range(len(cls_segms)): + encoded_mask_results.append( + mask_util.encode( + np.array( + cls_segms[i][:, :, np.newaxis], order='F', + dtype='uint8'))[0]) # encoded with RLE + return [encoded_mask_results] + +def custom_multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False): + """Test model with multiple gpus. + This method tests model with multiple gpus and collects the results + under two different modes: gpu and cpu modes. By setting 'gpu_collect=True' + it encodes results to gpu tensors and use gpu communication for results + collection. On cpu mode it saves the results on different gpus to 'tmpdir' + and collects them by the rank 0 worker. + Args: + model (nn.Module): Model to be tested. + data_loader (nn.Dataloader): Pytorch data loader. + tmpdir (str): Path of directory to save the temporary results from + different gpus under cpu mode. + gpu_collect (bool): Option to use either gpu or cpu to collect results. + Returns: + list: The prediction results. + """ + model.eval() + + # Occ eval init + eval_occ = hasattr(model.module, 'with_occ_head') \ + and model.module.with_occ_head + if eval_occ: + # 30mx30m, 100mx100m at 50cm resolution + EVALUATION_RANGES = {'30x30': (70, 130), + '100x100': (0, 200)} + n_classes = 2 + iou_metrics = {} + for key in EVALUATION_RANGES.keys(): + iou_metrics[key] = IntersectionOverUnion(n_classes).cuda() + panoptic_metrics = {} + for key in EVALUATION_RANGES.keys(): + panoptic_metrics[key] = PanopticMetric(n_classes=n_classes, temporally_consistent=True).cuda() + + # Plan eval init + eval_planning = hasattr(model.module, 'with_planning_head') \ + and model.module.with_planning_head + if eval_planning: + planning_metrics = UniADPlanningMetric().cuda() + + bbox_results = [] + mask_results = [] + dataset = data_loader.dataset + rank, world_size = get_dist_info() + if rank == 0: + prog_bar = ProgressBar(len(dataset)) + time.sleep(2) # This line can prevent deadlock problem in some cases. + have_mask = False + num_occ = 0 + for i, data in enumerate(data_loader): + with torch.no_grad(): + result = model(data, return_loss=False, rescale=True) + + + #import pdb;pdb.set_trace() + + # # EVAL planning + if eval_planning: + # TODO: Wrap below into a func + segmentation = result[0]['planning']['planning_gt']['segmentation'] + sdc_planning = result[0]['planning']['planning_gt']['sdc_planning'] + sdc_planning_mask = result[0]['planning']['planning_gt']['sdc_planning_mask'] + pred_sdc_traj = result[0]['planning']['result_planning']['sdc_traj'] + result[0]['planning_traj'] = result[0]['planning']['result_planning']['sdc_traj'] + result[0]['planning_traj_gt'] = result[0]['planning']['planning_gt']['sdc_planning'] + result[0]['command'] = result[0]['planning']['planning_gt']['command'] + planning_metrics(pred_sdc_traj[:, :6, :2], sdc_planning[0][0,:, :6, :2], sdc_planning_mask[0][0,:, :6, :2], segmentation[0][:, [1,2,3,4,5,6]]) + + # # Eval Occ + if eval_occ: + occ_has_invalid_frame = data['gt_occ_has_invalid_frame'][0] + occ_to_eval = not occ_has_invalid_frame.item() + if occ_to_eval and 'occ' in result[0].keys(): + num_occ += 1 + for key, grid in EVALUATION_RANGES.items(): + limits = slice(grid[0], grid[1]) + iou_metrics[key](result[0]['occ']['seg_out'][..., limits, limits].contiguous(), + result[0]['occ']['seg_gt'][..., limits, limits].contiguous()) + panoptic_metrics[key](result[0]['occ']['ins_seg_out'][..., limits, limits].contiguous().detach(), + result[0]['occ']['ins_seg_gt'][..., limits, limits].contiguous()) + + # Pop out unnecessary occ results, avoid appending it to cpu when collect_results_cpu + if os.environ.get('ENABLE_PLOT_MODE', None) is None: + result[0].pop('occ', None) + result[0].pop('planning', None) + else: + for k in ['seg_gt', 'ins_seg_gt', 'pred_ins_sigmoid', 'seg_out', 'ins_seg_out']: + if k in result[0]['occ']: + result[0]['occ'][k] = result[0]['occ'][k].detach().cpu() + for k in ['bbox', 'segm', 'labels', 'panoptic', 'drivable', 'score_list', 'lane', 'lane_score', 'stuff_score_list']: + if k in result[0]['pts_bbox'] and isinstance(result[0]['pts_bbox'][k], torch.Tensor): + result[0]['pts_bbox'][k] = result[0]['pts_bbox'][k].detach().cpu() + + # # encode mask results + if isinstance(result, dict): + if 'bbox_results' in result.keys(): + bbox_result = result['bbox_results'] + batch_size = len(result['bbox_results']) + bbox_results.extend(bbox_result) + if 'mask_results' in result.keys() and result['mask_results'] is not None: + mask_result = custom_encode_mask_results(result['mask_results']) + mask_results.extend(mask_result) + have_mask = True + else: + batch_size = len(result) + bbox_results.extend(result) + + + if rank == 0: + for _ in range(batch_size * world_size): + prog_bar.update() + + # break + + # collect results from all ranks + if gpu_collect: + bbox_results = collect_results_gpu(bbox_results, len(dataset)) + if have_mask: + mask_results = collect_results_gpu(mask_results, len(dataset)) + else: + mask_results = None + else: + bbox_results = collect_results_cpu(bbox_results, len(dataset), tmpdir) + tmpdir = tmpdir+'_mask' if tmpdir is not None else None + if have_mask: + mask_results = collect_results_cpu(mask_results, len(dataset), tmpdir) + else: + mask_results = None + + if eval_planning: + planning_results = planning_metrics.compute() + planning_metrics.reset() + + ret_results = dict() + ret_results['bbox_results'] = bbox_results + if eval_occ: + occ_results = {} + for key, grid in EVALUATION_RANGES.items(): + panoptic_scores = panoptic_metrics[key].compute() + for panoptic_key, value in panoptic_scores.items(): + occ_results[f'{panoptic_key}'] = occ_results.get(f'{panoptic_key}', []) + [100 * value[1].item()] + panoptic_metrics[key].reset() + + iou_scores = iou_metrics[key].compute() + occ_results['iou'] = occ_results.get('iou', []) + [100 * iou_scores[1].item()] + iou_metrics[key].reset() + + occ_results['num_occ'] = num_occ # count on one gpu + occ_results['ratio_occ'] = num_occ / len(dataset) # count on one gpu, but reflect the relative ratio + ret_results['occ_results_computed'] = occ_results + if eval_planning: + ret_results['planning_results_computed'] = planning_results + + if mask_results is not None: + ret_results['mask_results'] = mask_results + return ret_results + + +def collect_results_cpu(result_part, size, tmpdir=None): + rank, world_size = get_dist_info() + # create a tmp dir if it is not specified + if tmpdir is None: + MAX_LEN = 512 + # 32 is whitespace + dir_tensor = torch.full((MAX_LEN, ), + 32, + dtype=torch.uint8, + device='cuda') + if rank == 0: + mkdir_or_exist('.dist_test') + tmpdir = tempfile.mkdtemp(dir='.dist_test') + tmpdir = torch.tensor( + bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda') + dir_tensor[:len(tmpdir)] = tmpdir + dist.broadcast(dir_tensor, 0) + tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip() + else: + mkdir_or_exist(tmpdir) + # dump the part result to the dir + dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl')) + dist.barrier() + # collect all parts + if rank != 0: + return None + else: + # load results of all parts from tmp dir + part_list = [] + for i in range(world_size): + part_file = osp.join(tmpdir, f'part_{i}.pkl') + part_list.append(load(part_file)) + # sort the results + ordered_results = [] + ''' + bacause we change the sample of the evaluation stage to make sure that each gpu will handle continuous sample, + ''' + #for res in zip(*part_list): + for res in part_list: + ordered_results.extend(list(res)) + # the dataloader may pad some samples + ordered_results = ordered_results[:size] + # remove tmp dir + shutil.rmtree(tmpdir) + return ordered_results + + +def collect_results_gpu(result_part, size): + collect_results_cpu(result_part, size) + +def custom_single_gpu_test(model, + data_loader, + show=False, + out_dir=None, + show_score_thr=0.3): + """Test model with single gpu. + + This method tests model with single gpu and gives the 'show' option. + By setting ``show=True``, it saves the visualization results under + ``out_dir``. + + Args: + model (nn.Module): Model to be tested. + data_loader (nn.Dataloader): Pytorch data loader. + show (bool): Whether to save viualization results. + Default: True. + out_dir (str): The path to save visualization results. + Default: None. + + Returns: + list[dict]: The prediction results. + """ + model.eval() + results = [] + dataset = data_loader.dataset + prog_bar = ProgressBar(len(dataset)) + for i, data in enumerate(data_loader): + with torch.no_grad(): + result = model(return_loss=False, rescale=True, **data) + + if show: + # Visualize the results of MMDetection3D model + # 'show_results' is MMdetection3D visualization API + models_3d = (Base3DDetector, Base3DSegmentor, + SingleStageMono3DDetector) + if isinstance(model.module, models_3d): + model.module.show_results(data, result, out_dir=out_dir) + # Visualize the results of MMDetection model + # 'show_result' is MMdetection visualization API + else: + batch_size = len(result) + if batch_size == 1 and isinstance(data['img'][0], + torch.Tensor): + img_tensor = data['img'][0] + else: + img_tensor = data['img'][0].data[0] + img_metas = data['img_metas'][0].data[0] + imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg']) + assert len(imgs) == len(img_metas) + + for i, (img, img_meta) in enumerate(zip(imgs, img_metas)): + h, w, _ = img_meta['img_shape'] + img_show = img[:h, :w, :] + + ori_h, ori_w = img_meta['ori_shape'][:-1] + img_show = imresize(img_show, (ori_w, ori_h)) + + if out_dir: + out_file = osp.join(out_dir, img_meta['ori_filename']) + else: + out_file = None + + model.module.show_result( + img_show, + result[i], + show=show, + out_file=out_file, + score_thr=show_score_thr) + results.extend(result) + + batch_size = len(result) + for _ in range(batch_size): + prog_bar.update() + return results \ No newline at end of file diff --git a/adzoo/uniad/train.py b/adzoo/uniad/train.py new file mode 100755 index 0000000..1df7f99 --- /dev/null +++ b/adzoo/uniad/train.py @@ -0,0 +1,212 @@ +import argparse +import torch +import copy +import os +import time +import warnings +from os import path as osp +from mmcv import __version__ as mmcv_version +from mmcv.datasets import build_dataset +from mmcv.models import build_model +from mmcv.utils import collect_env, get_root_logger, mkdir_or_exist, set_random_seed, get_dist_info, init_dist, \ + Config, DictAction, TORCH_VERSION, digit_version +from mmcv.datasets.builder import build_dataloader +from mmcv.optims import build_optimizer +from torch.nn.parallel import DataParallel, DistributedDataParallel +from mmcv.core.evaluation.eval_hooks import CustomDistEvalHook +from mmcv.core import EvalHook +from mmcv.runner import (HOOKS, DistSamplerSeedHook, EpochBasedRunner, + Fp16OptimizerHook, OptimizerHook, build_runner) +from adzoo.uniad.test_utils import custom_multi_gpu_test + +warnings.filterwarnings("ignore") + +def parse_args(): + parser = argparse.ArgumentParser(description='Train a detector') + parser.add_argument('config', help='train config file path') + parser.add_argument('--work-dir', help='the dir to save logs and models') + parser.add_argument( + '--resume-from', help='the checkpoint file to resume from') + parser.add_argument( + '--no-validate', + action='store_true', + help='whether not to evaluate the checkpoint during training') + group_gpus = parser.add_mutually_exclusive_group() + group_gpus.add_argument( + '--gpus', + type=int, + help='number of gpus to use ' + '(only applicable to non-distributed training)') + group_gpus.add_argument( + '--gpu-ids', + type=int, + nargs='+', + help='ids of gpus to use ' + '(only applicable to non-distributed training)') + parser.add_argument('--seed', type=int, default=0, help='random seed') + parser.add_argument( + '--deterministic', + action='store_true', + help='whether to set deterministic options for CUDNN backend.') + parser.add_argument( + '--launcher', + choices=['none', 'pytorch', 'slurm', 'mpi'], + default='none', + help='job launcher') + parser.add_argument('--local-rank', type=int, default=0) + parser.add_argument( + '--autoscale-lr', + action='store_true', + help='automatically scale lr with the number of gpus') + args = parser.parse_args() + if 'LOCAL_RANK' not in os.environ: + os.environ['LOCAL_RANK'] = str(args.local_rank) + + return args + + +def main(): + args = parse_args() + cfg = Config.fromfile(args.config) + + if args.work_dir is not None: + cfg.work_dir = args.work_dir + else: + cfg.work_dir = osp.join('./work_dirs', osp.splitext(osp.basename(args.config))[0]) + + # if args.resume_from is not None: + if args.resume_from is not None and osp.isfile(args.resume_from): + cfg.resume_from = args.resume_from + + if args.gpu_ids is not None: + cfg.gpu_ids = args.gpu_ids + else: + cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus) + if args.autoscale_lr: + # apply the linear scaling rule (https://arxiv.org/abs/1706.02677) + cfg.optimizer['lr'] = cfg.optimizer['lr'] * len(cfg.gpu_ids) / 8 + + # init distributed env first, since logger depends on the dist info. + if args.launcher == 'none': + distributed = False + elif args.launcher == 'pytorch': + torch.backends.cudnn.benchmark = True + distributed = True + init_dist(args.launcher, **cfg.dist_params) + rank, world_size = get_dist_info() + cfg.gpu_ids = range(world_size) + + # Create work_dir + mkdir_or_exist(osp.abspath(cfg.work_dir)) + cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config))) + timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) + + # meta info + meta = dict() + env_info_dict = collect_env() + env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()]) + dash_line = '-' * 60 + '\n' + meta['env_info'] = env_info + meta['config'] = cfg.pretty_text + meta['seed'] = args.seed + meta['exp_name'] = osp.basename(args.config) + + # seed + cfg.seed = args.seed + set_random_seed(args.seed, deterministic=args.deterministic) + + # logger + log_file = osp.join(cfg.work_dir, f'{timestamp}.log') + logger = get_root_logger(log_file=log_file, log_level=cfg.log_level, name=cfg.model.type) + logger.info('Environment info:\n' + dash_line + env_info + '\n' + dash_line) + logger.info(f'Distributed training: {distributed}') + logger.info(f'Config:\n{cfg.pretty_text}') + logger.info(f'Set random seed to {args.seed}, 'f'deterministic: {args.deterministic}') + + # Dataset + datasets = [build_dataset(cfg.data.train)] + + # Save meta info + if cfg.checkpoint_config is not None: + cfg.checkpoint_config.meta = dict(mmcv_version=mmcv_version, config=cfg.pretty_text, CLASSES=datasets[0].CLASSES, \ + PALETTE=datasets[0].PALETTE if hasattr(datasets[0], 'PALETTE') else None) # # for segmentors + + # Dataloader + datasets = datasets if isinstance(datasets, (list, tuple)) else [datasets] + data_loaders = [build_dataloader(ds, + cfg.data.samples_per_gpu, + cfg.data.workers_per_gpu, + # cfg.gpus will be ignored if distributed + len(cfg.gpu_ids), + dist=distributed, + seed=cfg.seed, + shuffler_sampler=cfg.data.shuffler_sampler, # dict(type='DistributedGroupSampler'), + nonshuffler_sampler=cfg.data.nonshuffler_sampler, # dict(type='DistributedSampler'), + ) for ds in datasets + ] + + # Model + model = build_model(cfg.model, train_cfg=cfg.get('train_cfg'), test_cfg=cfg.get('test_cfg')) + model.init_weights() + model.CLASSES = datasets[0].CLASSES # add an attribute for visualization convenience + logger.info(f'Model:\n{model}') + if distributed: + find_unused_parameters = cfg.get('find_unused_parameters', False) + model = DistributedDataParallel(model.cuda(), + device_ids=[torch.cuda.current_device()], + broadcast_buffers=False, + find_unused_parameters=find_unused_parameters + ) + else: + model = DataParallel(model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids) + + # Optimizer + optimizer = build_optimizer(model, cfg.optimizer) + optimizer_config = OptimizerHook(**cfg.optimizer_config) + + # Runner + runner = build_runner(cfg.runner, default_args=dict(model=model, + optimizer=optimizer, + work_dir=cfg.work_dir, + logger=logger, + meta=meta)) + runner.timestamp = timestamp + runner.register_training_hooks(cfg.lr_config, optimizer_config, + cfg.checkpoint_config, cfg.log_config, + cfg.get('momentum_config', None)) + if distributed: + if isinstance(runner, EpochBasedRunner): + runner.register_hook(DistSamplerSeedHook()) + + # Register eval hooks for interval eval + val_samples_per_gpu = cfg.data.val.pop('samples_per_gpu', 1) + if val_samples_per_gpu > 1: + assert False + # Replace 'ImageToTensor' to 'DefaultFormatBundle' + cfg.data.val.pipeline = replace_ImageToTensor( + cfg.data.val.pipeline) + val_dataset = build_dataset(cfg.data.val, dict(test_mode=True)) + + val_dataloader = build_dataloader( + val_dataset, + samples_per_gpu=val_samples_per_gpu, + workers_per_gpu=cfg.data.workers_per_gpu, + dist=distributed, + shuffle=False, + shuffler_sampler=cfg.data.shuffler_sampler, # dict(type='DistributedGroupSampler'), + nonshuffler_sampler=cfg.data.nonshuffler_sampler, # dict(type='DistributedSampler'), + ) + eval_cfg = cfg.get('evaluation', {}) + eval_cfg['by_epoch'] = cfg.runner['type'] != 'IterBasedRunner' + eval_cfg['jsonfile_prefix'] = osp.join('val', cfg.work_dir, time.ctime().replace(' ','_').replace(':','_')) + eval_hook = CustomDistEvalHook if distributed else EvalHook + runner.register_hook(eval_hook(val_dataloader, test_fn=custom_multi_gpu_test, **eval_cfg)) + + if cfg.resume_from and os.path.exists(cfg.resume_from): + runner.resume(cfg.resume_from) + elif cfg.load_from: + runner.load_checkpoint(cfg.load_from) + runner.run(data_loaders, cfg.workflow) + +if __name__ == '__main__': + main() diff --git a/adzoo/uniad/uniad_dist_eval.sh b/adzoo/uniad/uniad_dist_eval.sh new file mode 100755 index 0000000..12b2720 --- /dev/null +++ b/adzoo/uniad/uniad_dist_eval.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +T=`date +%m%d%H%M` + +# -------------------------------------------------- # +# Usually you only need to customize these variables # +CFG=$1 # +CKPT=$2 # +GPUS=$3 # +# -------------------------------------------------- # +GPUS_PER_NODE=$(($GPUS<8?$GPUS:8)) + +MASTER_PORT=${MASTER_PORT:-12145} +WORK_DIR=$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/ +# Intermediate files and logs will be saved to UniAD/projects/work_dirs/ + +if [ ! -d ${WORK_DIR}logs ]; then + mkdir -p ${WORK_DIR}logs +fi + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +python -m torch.distributed.launch \ + --nproc_per_node=$GPUS_PER_NODE \ + --master_port=$MASTER_PORT \ + $(dirname "$0")/test.py \ + $CFG \ + $CKPT \ + --launcher pytorch ${@:4} \ + --eval bbox \ + --show-dir ${WORK_DIR} \ + 2>&1 | tee ${WORK_DIR}logs/eval.$T \ No newline at end of file diff --git a/adzoo/uniad/uniad_dist_train.sh b/adzoo/uniad/uniad_dist_train.sh new file mode 100755 index 0000000..313e20a --- /dev/null +++ b/adzoo/uniad/uniad_dist_train.sh @@ -0,0 +1,36 @@ +#!/usr/bin/env bash + +T=`date +%m%d%H%M` + +# -------------------------------------------------- # +# Usually you only need to customize these variables # +CFG=$1 # +GPUS=$2 # +# -------------------------------------------------- # +GPUS_PER_NODE=$(($GPUS<8?$GPUS:8)) +NNODES=`expr $GPUS / $GPUS_PER_NODE` + +MASTER_PORT=${MASTER_PORT:-54621} +MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"} +RANK=${RANK:-0} + +WORK_DIR=$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/ +# Intermediate files and logs will be saved to UniAD/projects/work_dirs/ + +if [ ! -d ${WORK_DIR}logs ]; then + mkdir -p ${WORK_DIR}logs +fi + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +python -m torch.distributed.launch \ + --nproc_per_node=${GPUS_PER_NODE} \ + --master_addr=${MASTER_ADDR} \ + --master_port=${MASTER_PORT} \ + --nnodes=${NNODES} \ + --node_rank=${RANK} \ + $(dirname "$0")/train.py \ + $CFG \ + --launcher pytorch ${@:3} \ + --deterministic \ + --work-dir ${WORK_DIR} \ + 2>&1 | tee ${WORK_DIR}logs/train.$T \ No newline at end of file diff --git a/adzoo/uniad/uniad_vis_result.sh b/adzoo/uniad/uniad_vis_result.sh new file mode 100755 index 0000000..b43a1be --- /dev/null +++ b/adzoo/uniad/uniad_vis_result.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +python ./tools/analysis_tools/visualize/run.py \ + --predroot PATH_TO_YOUR_PREDISION_RESULT_PKL \ + --out_folder PATH_TO_YOUR_OUTPUT_FOLDER \ + --demo_video FILENAME_OF_OUTPUT_VIDEO \ + --project_to_cam True \ No newline at end of file diff --git a/adzoo/vad/analysis_tools/__init__.py b/adzoo/vad/analysis_tools/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/adzoo/vad/analysis_tools/analyze_logs.py b/adzoo/vad/analysis_tools/analyze_logs.py new file mode 100644 index 0000000..806175f --- /dev/null +++ b/adzoo/vad/analysis_tools/analyze_logs.py @@ -0,0 +1,201 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import json +import numpy as np +import seaborn as sns +from collections import defaultdict +from matplotlib import pyplot as plt + + +def cal_train_time(log_dicts, args): + for i, log_dict in enumerate(log_dicts): + print(f'{"-" * 5}Analyze train time of {args.json_logs[i]}{"-" * 5}') + all_times = [] + for epoch in log_dict.keys(): + if args.include_outliers: + all_times.append(log_dict[epoch]['time']) + else: + all_times.append(log_dict[epoch]['time'][1:]) + all_times = np.array(all_times) + epoch_ave_time = all_times.mean(-1) + slowest_epoch = epoch_ave_time.argmax() + fastest_epoch = epoch_ave_time.argmin() + std_over_epoch = epoch_ave_time.std() + print(f'slowest epoch {slowest_epoch + 1}, ' + f'average time is {epoch_ave_time[slowest_epoch]:.4f}') + print(f'fastest epoch {fastest_epoch + 1}, ' + f'average time is {epoch_ave_time[fastest_epoch]:.4f}') + print(f'time std over epochs is {std_over_epoch:.4f}') + print(f'average iter time: {np.mean(all_times):.4f} s/iter') + print() + + +def plot_curve(log_dicts, args): + if args.backend is not None: + plt.switch_backend(args.backend) + sns.set_style(args.style) + # if legend is None, use {filename}_{key} as legend + legend = args.legend + if legend is None: + legend = [] + for json_log in args.json_logs: + for metric in args.keys: + legend.append(f'{json_log}_{metric}') + assert len(legend) == (len(args.json_logs) * len(args.keys)) + metrics = args.keys + + num_metrics = len(metrics) + for i, log_dict in enumerate(log_dicts): + epochs = list(log_dict.keys()) + for j, metric in enumerate(metrics): + print(f'plot curve of {args.json_logs[i]}, metric is {metric}') + if metric not in log_dict[epochs[args.interval - 1]]: + raise KeyError( + f'{args.json_logs[i]} does not contain metric {metric}') + + if args.mode == 'eval': + if min(epochs) == args.interval: + x0 = args.interval + else: + # if current training is resumed from previous checkpoint + # we lost information in early epochs + # `xs` should start according to `min(epochs)` + if min(epochs) % args.interval == 0: + x0 = min(epochs) + else: + # find the first epoch that do eval + x0 = min(epochs) + args.interval - \ + min(epochs) % args.interval + xs = np.arange(x0, max(epochs) + 1, args.interval) + ys = [] + for epoch in epochs[args.interval - 1::args.interval]: + ys += log_dict[epoch][metric] + + # if training is aborted before eval of the last epoch + # `xs` and `ys` will have different length and cause an error + # check if `ys[-1]` is empty here + if not log_dict[epoch][metric]: + xs = xs[:-1] + + ax = plt.gca() + ax.set_xticks(xs) + plt.xlabel('epoch') + plt.plot(xs, ys, label=legend[i * num_metrics + j], marker='o') + else: + xs = [] + ys = [] + num_iters_per_epoch = \ + log_dict[epochs[args.interval-1]]['iter'][-1] + for epoch in epochs[args.interval - 1::args.interval]: + iters = log_dict[epoch]['iter'] + if log_dict[epoch]['mode'][-1] == 'val': + iters = iters[:-1] + xs.append( + np.array(iters) + (epoch - 1) * num_iters_per_epoch) + ys.append(np.array(log_dict[epoch][metric][:len(iters)])) + xs = np.concatenate(xs) + ys = np.concatenate(ys) + plt.xlabel('iter') + plt.plot( + xs, ys, label=legend[i * num_metrics + j], linewidth=0.5) + plt.legend() + if args.title is not None: + plt.title(args.title) + if args.out is None: + plt.show() + else: + print(f'save curve to: {args.out}') + plt.savefig(args.out) + plt.cla() + + +def add_plot_parser(subparsers): + parser_plt = subparsers.add_parser( + 'plot_curve', help='parser for plotting curves') + parser_plt.add_argument( + 'json_logs', + type=str, + nargs='+', + help='path of train log in json format') + parser_plt.add_argument( + '--keys', + type=str, + nargs='+', + default=['mAP_0.25'], + help='the metric that you want to plot') + parser_plt.add_argument('--title', type=str, help='title of figure') + parser_plt.add_argument( + '--legend', + type=str, + nargs='+', + default=None, + help='legend of each plot') + parser_plt.add_argument( + '--backend', type=str, default=None, help='backend of plt') + parser_plt.add_argument( + '--style', type=str, default='dark', help='style of plt') + parser_plt.add_argument('--out', type=str, default=None) + parser_plt.add_argument('--mode', type=str, default='train') + parser_plt.add_argument('--interval', type=int, default=1) + + +def add_time_parser(subparsers): + parser_time = subparsers.add_parser( + 'cal_train_time', + help='parser for computing the average time per training iteration') + parser_time.add_argument( + 'json_logs', + type=str, + nargs='+', + help='path of train log in json format') + parser_time.add_argument( + '--include-outliers', + action='store_true', + help='include the first value of every epoch when computing ' + 'the average time') + + +def parse_args(): + parser = argparse.ArgumentParser(description='Analyze Json Log') + # currently only support plot curve and calculate average train time + subparsers = parser.add_subparsers(dest='task', help='task parser') + add_plot_parser(subparsers) + add_time_parser(subparsers) + args = parser.parse_args() + return args + + +def load_json_logs(json_logs): + # load and convert json_logs to log_dict, key is epoch, value is a sub dict + # keys of sub dict is different metrics, e.g. memory, bbox_mAP + # value of sub dict is a list of corresponding values of all iterations + log_dicts = [dict() for _ in json_logs] + for json_log, log_dict in zip(json_logs, log_dicts): + with open(json_log, 'r') as log_file: + for line in log_file: + log = json.loads(line.strip()) + # skip lines without `epoch` field + if 'epoch' not in log: + continue + epoch = log.pop('epoch') + if epoch not in log_dict: + log_dict[epoch] = defaultdict(list) + for k, v in log.items(): + log_dict[epoch][k].append(v) + return log_dicts + + +def main(): + args = parse_args() + + json_logs = args.json_logs + for json_log in json_logs: + assert json_log.endswith('.json') + + log_dicts = load_json_logs(json_logs) + + eval(args.task)(log_dicts, args) + + +if __name__ == '__main__': + main() diff --git a/adzoo/vad/analysis_tools/benchmark.py b/adzoo/vad/analysis_tools/benchmark.py new file mode 100644 index 0000000..487a348 --- /dev/null +++ b/adzoo/vad/analysis_tools/benchmark.py @@ -0,0 +1,98 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import time +import torch +from mmcv import Config +from mmcv.parallel import MMDataParallel +from mmcv.runner import load_checkpoint, wrap_fp16_model +import sys +sys.path.append('.') +from projects.mmdet3d_plugin.datasets.builder import build_dataloader +from projects.mmdet3d_plugin.datasets import custom_build_dataset +# from mmdet3d.datasets import build_dataloader, build_dataset +from mmdet3d.models import build_detector +#from tools.misc.fuse_conv_bn import fuse_module + + +def parse_args(): + parser = argparse.ArgumentParser(description='MMDet benchmark a model') + parser.add_argument('config', help='test config file path') + parser.add_argument('--checkpoint', default=None, help='checkpoint file') + parser.add_argument('--samples', default=2000, help='samples to benchmark') + parser.add_argument( + '--log-interval', default=50, help='interval of logging') + parser.add_argument( + '--fuse-conv-bn', + action='store_true', + help='Whether to fuse conv and bn, this will slightly increase' + 'the inference speed') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + + cfg = Config.fromfile(args.config) + # set cudnn_benchmark + if cfg.get('cudnn_benchmark', False): + torch.backends.cudnn.benchmark = True + cfg.model.pretrained = None + cfg.data.test.test_mode = True + + # build the dataloader + # TODO: support multiple images per gpu (only minor changes are needed) + print(cfg.data.test) + dataset = custom_build_dataset(cfg.data.test) + data_loader = build_dataloader( + dataset, + samples_per_gpu=1, + workers_per_gpu=cfg.data.workers_per_gpu, + dist=False, + shuffle=False) + + # build the model and load checkpoint + cfg.model.train_cfg = None + model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg')) + fp16_cfg = cfg.get('fp16', None) + if fp16_cfg is not None: + wrap_fp16_model(model) + if args.checkpoint is not None: + load_checkpoint(model, args.checkpoint, map_location='cpu') + #if args.fuse_conv_bn: + # model = fuse_module(model) + + model = MMDataParallel(model, device_ids=[0]) + + model.eval() + + # the first several iterations may be very slow so skip them + num_warmup = 5 + pure_inf_time = 0 + + # benchmark with several samples and take the average + for i, data in enumerate(data_loader): + torch.cuda.synchronize() + start_time = time.perf_counter() + with torch.no_grad(): + model(return_loss=False, rescale=True, **data) + + torch.cuda.synchronize() + elapsed = time.perf_counter() - start_time + + if i >= num_warmup: + pure_inf_time += elapsed + if (i + 1) % args.log_interval == 0: + fps = (i + 1 - num_warmup) / pure_inf_time + print(f'Done image [{i + 1:<3}/ {args.samples}], ' + f'fps: {fps:.1f} img / s') + + if (i + 1) == args.samples: + pure_inf_time += elapsed + fps = (i + 1 - num_warmup) / pure_inf_time + print(f'Overall fps: {fps:.1f} img / s') + break + + +if __name__ == '__main__': + main() diff --git a/adzoo/vad/analysis_tools/get_flops.py b/adzoo/vad/analysis_tools/get_flops.py new file mode 100644 index 0000000..1b9fb01 --- /dev/null +++ b/adzoo/vad/analysis_tools/get_flops.py @@ -0,0 +1,747 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +import argparse + +import torch +from mmcv import Config, DictAction + +from mmdet3d.models import build_model +from mmdet3d.datasets import build_dataset +from projects.mmdet3d_plugin.datasets.builder import build_dataloader + +# try: +# from mmcv.cnn import get_model_complexity_info +# except ImportError: +# raise ImportError('Please upgrade mmcv to >0.6.2') + +import sys +sys.path.append('.') + + +from functools import partial + +import numpy as np +import torch +import torch.nn as nn + +import mmcv + + +def get_model_complexity_info(model, + data, + input_shape=(1280, 720), + print_per_layer_stat=True, + as_strings=True, + input_constructor=None, + flush=False, + ost=sys.stdout): + """Get complexity information of a model. + + This method can calculate FLOPs and parameter counts of a model with + corresponding input shape. It can also print complexity information for + each layer in a model. + + Supported layers are listed as below: + - Convolutions: ``nn.Conv1d``, ``nn.Conv2d``, ``nn.Conv3d``. + - Activations: ``nn.ReLU``, ``nn.PReLU``, ``nn.ELU``, ``nn.LeakyReLU``, + ``nn.ReLU6``. + - Poolings: ``nn.MaxPool1d``, ``nn.MaxPool2d``, ``nn.MaxPool3d``, + ``nn.AvgPool1d``, ``nn.AvgPool2d``, ``nn.AvgPool3d``, + ``nn.AdaptiveMaxPool1d``, ``nn.AdaptiveMaxPool2d``, + ``nn.AdaptiveMaxPool3d``, ``nn.AdaptiveAvgPool1d``, + ``nn.AdaptiveAvgPool2d``, ``nn.AdaptiveAvgPool3d``. + - BatchNorms: ``nn.BatchNorm1d``, ``nn.BatchNorm2d``, + ``nn.BatchNorm3d``, ``nn.GroupNorm``, ``nn.InstanceNorm1d``, + ``InstanceNorm2d``, ``InstanceNorm3d``, ``nn.LayerNorm``. + - Linear: ``nn.Linear``. + - Deconvolution: ``nn.ConvTranspose2d``. + - Upsample: ``nn.Upsample``. + + Args: + model (nn.Module): The model for complexity calculation. + input_shape (tuple): Input shape used for calculation. + print_per_layer_stat (bool): Whether to print complexity information + for each layer in a model. Default: True. + as_strings (bool): Output FLOPs and params counts in a string form. + Default: True. + input_constructor (None | callable): If specified, it takes a callable + method that generates input. otherwise, it will generate a random + tensor with input shape to calculate FLOPs. Default: None. + flush (bool): same as that in :func:`print`. Default: False. + ost (stream): same as ``file`` param in :func:`print`. + Default: sys.stdout. + + Returns: + tuple[float | str]: If ``as_strings`` is set to True, it will return + FLOPs and parameter counts in a string format. otherwise, it will + return those in a float number format. + """ + + assert isinstance(model, nn.Module) + flops_model = add_flops_counting_methods(model) + flops_model.eval() + flops_model.start_flops_count() + if input_constructor: + input = input_constructor(input_shape) + _ = flops_model(**input) + else: + try: + batch = torch.ones(()).new_empty( + (1, 6, 3, *input_shape), + dtype=next(flops_model.parameters()).dtype, + device=next(flops_model.parameters()).device) + except StopIteration: + # Avoid StopIteration for models which have no parameters, + # like `nn.Relu()`, `nn.AvgPool2d`, etc. + batch = torch.ones(()).new_empty((1, 6, 3, *input_shape)) + + # img_metas = [data['img_metas'][0].data[0]] + # img = data['img'][0].data[0] + # points = data['points'][0].data[0][0] + # fut_valid_flag = data['fut_valid_flag'][0].data[0] + # img = img.to(batch.device) + # points = [points.to(batch.device)] + # ego_his_trajs = data['ego_his_trajs'][0].data[0].to(batch.device) + # ego_lcf_feat = data['ego_lcf_feat'][0].data[0].to(batch.device).unsqueeze(0) + + # _ = flops_model(rescale=True, img=img, img_metas=img_metas, points=points, + # fut_valid_flag=fut_valid_flag, ego_his_trajs=ego_his_trajs, ego_lcf_feat=ego_lcf_feat) + + img_metas = [data['img_metas'][0].data[0]] + img = data['img'][0].data[0] + img = img.to(batch.device) + + _ = flops_model(rescale=True, img=img, img_metas=img_metas) + + flops_count, params_count = flops_model.compute_average_flops_cost() + if print_per_layer_stat: + print_model_with_flops( + flops_model, flops_count, params_count, ost=ost, flush=flush) + flops_model.stop_flops_count() + + if as_strings: + return flops_to_string(flops_count), params_to_string(params_count) + + return flops_count, params_count + + +def flops_to_string(flops, units='GFLOPs', precision=2): + """Convert FLOPs number into a string. + + Note that Here we take a multiply-add counts as one FLOP. + + Args: + flops (float): FLOPs number to be converted. + units (str | None): Converted FLOPs units. Options are None, 'GFLOPs', + 'MFLOPs', 'KFLOPs', 'FLOPs'. If set to None, it will automatically + choose the most suitable unit for FLOPs. Default: 'GFLOPs'. + precision (int): Digit number after the decimal point. Default: 2. + + Returns: + str: The converted FLOPs number with units. + + Examples: + >>> flops_to_string(1e9) + '1.0 GFLOPs' + >>> flops_to_string(2e5, 'MFLOPs') + '0.2 MFLOPs' + >>> flops_to_string(3e-9, None) + '3e-09 FLOPs' + """ + if units is None: + if flops // 10**9 > 0: + return str(round(flops / 10.**9, precision)) + ' GFLOPs' + elif flops // 10**6 > 0: + return str(round(flops / 10.**6, precision)) + ' MFLOPs' + elif flops // 10**3 > 0: + return str(round(flops / 10.**3, precision)) + ' KFLOPs' + else: + return str(flops) + ' FLOPs' + else: + if units == 'GFLOPs': + return str(round(flops / 10.**9, precision)) + ' ' + units + elif units == 'MFLOPs': + return str(round(flops / 10.**6, precision)) + ' ' + units + elif units == 'KFLOPs': + return str(round(flops / 10.**3, precision)) + ' ' + units + else: + return str(flops) + ' FLOPs' + + +def params_to_string(num_params, units=None, precision=2): + """Convert parameter number into a string. + + Args: + num_params (float): Parameter number to be converted. + units (str | None): Converted FLOPs units. Options are None, 'M', + 'K' and ''. If set to None, it will automatically choose the most + suitable unit for Parameter number. Default: None. + precision (int): Digit number after the decimal point. Default: 2. + + Returns: + str: The converted parameter number with units. + + Examples: + >>> params_to_string(1e9) + '1000.0 M' + >>> params_to_string(2e5) + '200.0 k' + >>> params_to_string(3e-9) + '3e-09' + """ + if units is None: + if num_params // 10**6 > 0: + return str(round(num_params / 10**6, precision)) + ' M' + elif num_params // 10**3: + return str(round(num_params / 10**3, precision)) + ' k' + else: + return str(num_params) + else: + if units == 'M': + return str(round(num_params / 10.**6, precision)) + ' ' + units + elif units == 'K': + return str(round(num_params / 10.**3, precision)) + ' ' + units + else: + return str(num_params) + + +def print_model_with_flops(model, + total_flops, + total_params, + units='GFLOPs', + precision=3, + ost=sys.stdout, + flush=False): + """Print a model with FLOPs for each layer. + + Args: + model (nn.Module): The model to be printed. + total_flops (float): Total FLOPs of the model. + total_params (float): Total parameter counts of the model. + units (str | None): Converted FLOPs units. Default: 'GFLOPs'. + precision (int): Digit number after the decimal point. Default: 3. + ost (stream): same as `file` param in :func:`print`. + Default: sys.stdout. + flush (bool): same as that in :func:`print`. Default: False. + + Example: + >>> class ExampleModel(nn.Module): + + >>> def __init__(self): + >>> super().__init__() + >>> self.conv1 = nn.Conv2d(3, 8, 3) + >>> self.conv2 = nn.Conv2d(8, 256, 3) + >>> self.conv3 = nn.Conv2d(256, 8, 3) + >>> self.avg_pool = nn.AdaptiveAvgPool2d((1, 1)) + >>> self.flatten = nn.Flatten() + >>> self.fc = nn.Linear(8, 1) + + >>> def forward(self, x): + >>> x = self.conv1(x) + >>> x = self.conv2(x) + >>> x = self.conv3(x) + >>> x = self.avg_pool(x) + >>> x = self.flatten(x) + >>> x = self.fc(x) + >>> return x + + >>> model = ExampleModel() + >>> x = (3, 16, 16) + to print the complexity information state for each layer, you can use + >>> get_model_complexity_info(model, x) + or directly use + >>> print_model_with_flops(model, 4579784.0, 37361) + ExampleModel( + 0.037 M, 100.000% Params, 0.005 GFLOPs, 100.000% FLOPs, + (conv1): Conv2d(0.0 M, 0.600% Params, 0.0 GFLOPs, 0.959% FLOPs, 3, 8, kernel_size=(3, 3), stride=(1, 1)) # noqa: E501 + (conv2): Conv2d(0.019 M, 50.020% Params, 0.003 GFLOPs, 58.760% FLOPs, 8, 256, kernel_size=(3, 3), stride=(1, 1)) + (conv3): Conv2d(0.018 M, 49.356% Params, 0.002 GFLOPs, 40.264% FLOPs, 256, 8, kernel_size=(3, 3), stride=(1, 1)) + (avg_pool): AdaptiveAvgPool2d(0.0 M, 0.000% Params, 0.0 GFLOPs, 0.017% FLOPs, output_size=(1, 1)) + (flatten): Flatten(0.0 M, 0.000% Params, 0.0 GFLOPs, 0.000% FLOPs, ) + (fc): Linear(0.0 M, 0.024% Params, 0.0 GFLOPs, 0.000% FLOPs, in_features=8, out_features=1, bias=True) + ) + """ + + def accumulate_params(self): + if is_supported_instance(self): + return self.__params__ + else: + sum = 0 + for m in self.children(): + sum += m.accumulate_params() + return sum + + def accumulate_flops(self): + if is_supported_instance(self): + return self.__flops__ / model.__batch_counter__ + else: + sum = 0 + for m in self.children(): + sum += m.accumulate_flops() + return sum + + def flops_repr(self): + accumulated_num_params = self.accumulate_params() + accumulated_flops_cost = self.accumulate_flops() + return ', '.join([ + params_to_string( + accumulated_num_params, units='M', precision=precision), + '{:.3%} Params'.format(accumulated_num_params / total_params), + flops_to_string( + accumulated_flops_cost, units=units, precision=precision), + '{:.3%} FLOPs'.format(accumulated_flops_cost / total_flops), + self.original_extra_repr() + ]) + + def add_extra_repr(m): + m.accumulate_flops = accumulate_flops.__get__(m) + m.accumulate_params = accumulate_params.__get__(m) + flops_extra_repr = flops_repr.__get__(m) + if m.extra_repr != flops_extra_repr: + m.original_extra_repr = m.extra_repr + m.extra_repr = flops_extra_repr + assert m.extra_repr != m.original_extra_repr + + def del_extra_repr(m): + if hasattr(m, 'original_extra_repr'): + m.extra_repr = m.original_extra_repr + del m.original_extra_repr + if hasattr(m, 'accumulate_flops'): + del m.accumulate_flops + + model.apply(add_extra_repr) + print(model, file=ost, flush=flush) + model.apply(del_extra_repr) + + +def get_model_parameters_number(model): + """Calculate parameter number of a model. + + Args: + model (nn.module): The model for parameter number calculation. + + Returns: + float: Parameter number of the model. + """ + num_params = sum(p.numel() for p in model.parameters() if p.requires_grad) + return num_params + + +def add_flops_counting_methods(net_main_module): + # adding additional methods to the existing module object, + # this is done this way so that each function has access to self object + net_main_module.start_flops_count = start_flops_count.__get__( + net_main_module) + net_main_module.stop_flops_count = stop_flops_count.__get__( + net_main_module) + net_main_module.reset_flops_count = reset_flops_count.__get__( + net_main_module) + net_main_module.compute_average_flops_cost = compute_average_flops_cost.__get__( # noqa: E501 + net_main_module) + + net_main_module.reset_flops_count() + + return net_main_module + + +def compute_average_flops_cost(self): + """Compute average FLOPs cost. + + A method to compute average FLOPs cost, which will be available after + `add_flops_counting_methods()` is called on a desired net object. + + Returns: + float: Current mean flops consumption per image. + """ + batches_count = self.__batch_counter__ + flops_sum = 0 + for module in self.modules(): + if is_supported_instance(module): + flops_sum += module.__flops__ + params_sum = get_model_parameters_number(self) + return flops_sum / batches_count, params_sum + + +def start_flops_count(self): + """Activate the computation of mean flops consumption per image. + + A method to activate the computation of mean flops consumption per image. + which will be available after ``add_flops_counting_methods()`` is called on + a desired net object. It should be called before running the network. + """ + add_batch_counter_hook_function(self) + + def add_flops_counter_hook_function(module): + if is_supported_instance(module): + if hasattr(module, '__flops_handle__'): + return + + else: + handle = module.register_forward_hook( + get_modules_mapping()[type(module)]) + + module.__flops_handle__ = handle + + self.apply(partial(add_flops_counter_hook_function)) + + +def stop_flops_count(self): + """Stop computing the mean flops consumption per image. + + A method to stop computing the mean flops consumption per image, which will + be available after ``add_flops_counting_methods()`` is called on a desired + net object. It can be called to pause the computation whenever. + """ + remove_batch_counter_hook_function(self) + self.apply(remove_flops_counter_hook_function) + + +def reset_flops_count(self): + """Reset statistics computed so far. + + A method to Reset computed statistics, which will be available after + `add_flops_counting_methods()` is called on a desired net object. + """ + add_batch_counter_variables_or_reset(self) + self.apply(add_flops_counter_variable_or_reset) + + +# ---- Internal functions +def empty_flops_counter_hook(module, input, output): + module.__flops__ += 0 + + +def upsample_flops_counter_hook(module, input, output): + output_size = output[0] + batch_size = output_size.shape[0] + output_elements_count = batch_size + for val in output_size.shape[1:]: + output_elements_count *= val + module.__flops__ += int(output_elements_count) + + +def relu_flops_counter_hook(module, input, output): + active_elements_count = output.numel() + module.__flops__ += int(active_elements_count) + + +def linear_flops_counter_hook(module, input, output): + input = input[0] + output_last_dim = output.shape[ + -1] # pytorch checks dimensions, so here we don't care much + module.__flops__ += int(np.prod(input.shape) * output_last_dim) + + +def pool_flops_counter_hook(module, input, output): + input = input[0] + module.__flops__ += int(np.prod(input.shape)) + + +def norm_flops_counter_hook(module, input, output): + input = input[0] + + batch_flops = np.prod(input.shape) + if (getattr(module, 'affine', False) + or getattr(module, 'elementwise_affine', False)): + batch_flops *= 2 + module.__flops__ += int(batch_flops) + + +def deconv_flops_counter_hook(conv_module, input, output): + # Can have multiple inputs, getting the first one + input = input[0] + + batch_size = input.shape[0] + input_height, input_width = input.shape[2:] + + kernel_height, kernel_width = conv_module.kernel_size + in_channels = conv_module.in_channels + out_channels = conv_module.out_channels + groups = conv_module.groups + + filters_per_channel = out_channels // groups + conv_per_position_flops = ( + kernel_height * kernel_width * in_channels * filters_per_channel) + + active_elements_count = batch_size * input_height * input_width + overall_conv_flops = conv_per_position_flops * active_elements_count + bias_flops = 0 + if conv_module.bias is not None: + output_height, output_width = output.shape[2:] + bias_flops = out_channels * batch_size * output_height * output_height + overall_flops = overall_conv_flops + bias_flops + + conv_module.__flops__ += int(overall_flops) + + +def conv_flops_counter_hook(conv_module, input, output): + # Can have multiple inputs, getting the first one + input = input[0] + + batch_size = input.shape[0] + output_dims = list(output.shape[2:]) + + kernel_dims = list(conv_module.kernel_size) + in_channels = conv_module.in_channels + out_channels = conv_module.out_channels + groups = conv_module.groups + + filters_per_channel = out_channels // groups + conv_per_position_flops = int( + np.prod(kernel_dims)) * in_channels * filters_per_channel + + active_elements_count = batch_size * int(np.prod(output_dims)) + + overall_conv_flops = conv_per_position_flops * active_elements_count + + bias_flops = 0 + + if conv_module.bias is not None: + + bias_flops = out_channels * active_elements_count + + overall_flops = overall_conv_flops + bias_flops + + conv_module.__flops__ += int(overall_flops) + + +def batch_counter_hook(module, input, output): + batch_size = 1 + if len(input) > 0: + # Can have multiple inputs, getting the first one + input = input[0] + batch_size = len(input) + else: + pass + print('Warning! No positional inputs found for a module, ' + 'assuming batch size is 1.') + module.__batch_counter__ += batch_size + + +def add_batch_counter_variables_or_reset(module): + + module.__batch_counter__ = 0 + + +def add_batch_counter_hook_function(module): + if hasattr(module, '__batch_counter_handle__'): + return + + handle = module.register_forward_hook(batch_counter_hook) + module.__batch_counter_handle__ = handle + + +def remove_batch_counter_hook_function(module): + if hasattr(module, '__batch_counter_handle__'): + module.__batch_counter_handle__.remove() + del module.__batch_counter_handle__ + + +def add_flops_counter_variable_or_reset(module): + if is_supported_instance(module): + if hasattr(module, '__flops__') or hasattr(module, '__params__'): + print('Warning: variables __flops__ or __params__ are already ' + 'defined for the module' + type(module).__name__ + + ' ptflops can affect your code!') + module.__flops__ = 0 + module.__params__ = get_model_parameters_number(module) + + +def is_supported_instance(module): + if type(module) in get_modules_mapping(): + return True + return False + + +def remove_flops_counter_hook_function(module): + if is_supported_instance(module): + if hasattr(module, '__flops_handle__'): + module.__flops_handle__.remove() + del module.__flops_handle__ + + +def get_modules_mapping(): + return { + # convolutions + nn.Conv1d: conv_flops_counter_hook, + nn.Conv2d: conv_flops_counter_hook, + mmcv.cnn.bricks.Conv2d: conv_flops_counter_hook, + nn.Conv3d: conv_flops_counter_hook, + mmcv.cnn.bricks.Conv3d: conv_flops_counter_hook, + # activations + nn.ReLU: relu_flops_counter_hook, + nn.PReLU: relu_flops_counter_hook, + nn.ELU: relu_flops_counter_hook, + nn.LeakyReLU: relu_flops_counter_hook, + nn.ReLU6: relu_flops_counter_hook, + # poolings + nn.MaxPool1d: pool_flops_counter_hook, + nn.AvgPool1d: pool_flops_counter_hook, + nn.AvgPool2d: pool_flops_counter_hook, + nn.MaxPool2d: pool_flops_counter_hook, + mmcv.cnn.bricks.MaxPool2d: pool_flops_counter_hook, + nn.MaxPool3d: pool_flops_counter_hook, + mmcv.cnn.bricks.MaxPool3d: pool_flops_counter_hook, + nn.AvgPool3d: pool_flops_counter_hook, + nn.AdaptiveMaxPool1d: pool_flops_counter_hook, + nn.AdaptiveAvgPool1d: pool_flops_counter_hook, + nn.AdaptiveMaxPool2d: pool_flops_counter_hook, + nn.AdaptiveAvgPool2d: pool_flops_counter_hook, + nn.AdaptiveMaxPool3d: pool_flops_counter_hook, + nn.AdaptiveAvgPool3d: pool_flops_counter_hook, + # normalizations + nn.BatchNorm1d: norm_flops_counter_hook, + nn.BatchNorm2d: norm_flops_counter_hook, + nn.BatchNorm3d: norm_flops_counter_hook, + nn.GroupNorm: norm_flops_counter_hook, + nn.InstanceNorm1d: norm_flops_counter_hook, + nn.InstanceNorm2d: norm_flops_counter_hook, + nn.InstanceNorm3d: norm_flops_counter_hook, + nn.LayerNorm: norm_flops_counter_hook, + # FC + nn.Linear: linear_flops_counter_hook, + mmcv.cnn.bricks.Linear: linear_flops_counter_hook, + # Upscale + nn.Upsample: upsample_flops_counter_hook, + # Deconvolution + nn.ConvTranspose2d: deconv_flops_counter_hook, + mmcv.cnn.bricks.ConvTranspose2d: deconv_flops_counter_hook, + } + + +def parse_args(): + parser = argparse.ArgumentParser(description='Train a detector') + parser.add_argument('config', help='train config file path') + parser.add_argument( + '--shape', + type=int, + nargs='+', + default=[40000, 4], + help='input point cloud size') + parser.add_argument( + '--modality', + type=str, + default='point', + choices=['point', 'image', 'multi'], + help='input data modality') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + args = parser.parse_args() + return args + + +def main(): + + args = parse_args() + + if args.modality == 'point': + assert len(args.shape) == 2, 'invalid input shape' + input_shape = tuple(args.shape) + elif args.modality == 'image': + if len(args.shape) == 1: + input_shape = (3, args.shape[0], args.shape[0]) + elif len(args.shape) == 2: + input_shape = (3, ) + tuple(args.shape) + else: + raise ValueError('invalid input shape') + elif args.modality == 'multi': + raise NotImplementedError( + 'FLOPs counter is currently not supported for models with ' + 'multi-modality input') + + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + if hasattr(cfg, 'plugin'): + if cfg.plugin: + import importlib + if hasattr(cfg, 'plugin_dir'): + plugin_dir = cfg.plugin_dir + _module_dir = os.path.dirname(plugin_dir) + _module_dir = _module_dir.split('/') + _module_path = _module_dir[0] + + for m in _module_dir[1:]: + _module_path = _module_path + '.' + m + print(_module_path) + plg_lib = importlib.import_module(_module_path) + else: + # import dir is the dirpath for the config file + _module_dir = os.path.dirname(args.config) + _module_dir = _module_dir.split('/') + _module_path = _module_dir[0] + for m in _module_dir[1:]: + _module_path = _module_path + '.' + m + print(_module_path) + plg_lib = importlib.import_module(_module_path) + + samples_per_gpu = 1 + from mmdet.datasets import replace_ImageToTensor + if isinstance(cfg.data.test, dict): + cfg.data.test.test_mode = True + samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1) + if samples_per_gpu > 1: + # Replace 'ImageToTensor' to 'DefaultFormatBundle' + cfg.data.test.pipeline = replace_ImageToTensor( + cfg.data.test.pipeline) + elif isinstance(cfg.data.test, list): + for ds_cfg in cfg.data.test: + ds_cfg.test_mode = True + samples_per_gpu = max( + [ds_cfg.pop('samples_per_gpu', 1) for ds_cfg in cfg.data.test]) + if samples_per_gpu > 1: + for ds_cfg in cfg.data.test: + ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline) + + dataset = build_dataset(cfg.data.test) + dataset.is_vis_on_test = True #TODO, this is a hack + data_loader = build_dataloader( + dataset, + samples_per_gpu=1, + workers_per_gpu=0, + dist=False, + shuffle=False, + nonshuffler_sampler=cfg.data.nonshuffler_sampler, + ) + for i, data in enumerate(data_loader): + # if ~(data['map_gt_labels_3d'].data[0][0] != -1).any(): + # continue + img = data['img'][0].data[0] + img_metas = data['img_metas'][0].data[0] + break + + model = build_model( + cfg.model, + train_cfg=cfg.get('train_cfg'), + test_cfg=cfg.get('test_cfg')) + if torch.cuda.is_available(): + model.cuda() + model.eval() + + if hasattr(model, 'forward_dummy'): + model.forward = model.forward_dummy + else: + raise NotImplementedError( + 'FLOPs counter is currently not supported for {}'.format( + model.__class__.__name__)) + + flops, params = get_model_complexity_info(model, data) + split_line = '=' * 30 + print(f'{split_line}\nInput shape: {input_shape}\n' + f'Flops: {flops}\nParams: {params}\n{split_line}') + print('!!!Please be cautious if you use the results in papers. ' + 'You may need to check if all ops are supported and verify that the ' + 'flops computation is correct.') + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/adzoo/vad/analysis_tools/get_params.py b/adzoo/vad/analysis_tools/get_params.py new file mode 100644 index 0000000..6bf4ecf --- /dev/null +++ b/adzoo/vad/analysis_tools/get_params.py @@ -0,0 +1,8 @@ +import torch +YOUR_CKPT_PATH = None +file_path = YOUR_CKPT_PATH +model = torch.load(file_path, map_location='cpu') +all = 0 +for key in list(model['state_dict'].keys()): + all += model['state_dict'][key].nelement() +print(all) diff --git a/adzoo/vad/analysis_tools/visualization.py b/adzoo/vad/analysis_tools/visualization.py new file mode 100644 index 0000000..7fb9776 --- /dev/null +++ b/adzoo/vad/analysis_tools/visualization.py @@ -0,0 +1,911 @@ +import sys +sys.path.append('') +import os +import argparse +import os.path as osp +from PIL import Image +from tqdm import tqdm +from typing import List, Dict + +import cv2 +import mmcv +import torch +import numpy as np +import matplotlib.pyplot as plt +from matplotlib import rcParams +from pyquaternion import Quaternion +from nuscenes.nuscenes import NuScenes +from mmdet.datasets.pipelines import to_tensor +from matplotlib.collections import LineCollection +from nuscenes.utils.data_classes import LidarPointCloud, Box +from nuscenes.eval.common.data_classes import EvalBoxes, EvalBox +from nuscenes.eval.detection.utils import category_to_detection_name +from nuscenes.utils.geometry_utils import view_points, box_in_image, BoxVisibility + +from projects.mmdet3d_plugin.core.bbox.structures.nuscenes_box import CustomNuscenesBox, CustomDetectionBox, color_map +from projects.mmdet3d_plugin.datasets.nuscenes_vad_dataset import VectorizedLocalMap, LiDARInstanceLines + + +cams = ['CAM_FRONT', + 'CAM_FRONT_RIGHT', + 'CAM_BACK_RIGHT', + 'CAM_BACK', + 'CAM_BACK_LEFT', + 'CAM_FRONT_LEFT'] + + +def render_annotation( + anntoken: str, + margin: float = 10, + view: np.ndarray = np.eye(4), + box_vis_level: BoxVisibility = BoxVisibility.ANY, + out_path: str = 'render.png', + extra_info: bool = False) -> None: + """ + Render selected annotation. + :param anntoken: Sample_annotation token. + :param margin: How many meters in each direction to include in LIDAR view. + :param view: LIDAR view point. + :param box_vis_level: If sample_data is an image, this sets required visibility for boxes. + :param out_path: Optional path to save the rendered figure to disk. + :param extra_info: Whether to render extra information below camera view. + """ + ann_record = nusc.get('sample_annotation', anntoken) + sample_record = nusc.get('sample', ann_record['sample_token']) + assert 'LIDAR_TOP' in sample_record['data'].keys(), 'Error: No LIDAR_TOP in data, unable to render.' + + # Figure out which camera the object is fully visible in (this may return nothing). + boxes, cam = [], [] + cams = [key for key in sample_record['data'].keys() if 'CAM' in key] + all_bboxes = [] + select_cams = [] + for cam in cams: + _, boxes, _ = nusc.get_sample_data(sample_record['data'][cam], box_vis_level=box_vis_level, + selected_anntokens=[anntoken]) + if len(boxes) > 0: + all_bboxes.append(boxes) + select_cams.append(cam) + # We found an image that matches. Let's abort. + # assert len(boxes) > 0, 'Error: Could not find image where annotation is visible. ' \ + # 'Try using e.g. BoxVisibility.ANY.' + # assert len(boxes) < 2, 'Error: Found multiple annotations. Something is wrong!' + + num_cam = len(all_bboxes) + + fig, axes = plt.subplots(1, num_cam + 1, figsize=(18, 9)) + select_cams = [sample_record['data'][cam] for cam in select_cams] + print('bbox in cams:', select_cams) + # Plot LIDAR view. + lidar = sample_record['data']['LIDAR_TOP'] + data_path, boxes, camera_intrinsic = nusc.get_sample_data(lidar, selected_anntokens=[anntoken]) + LidarPointCloud.from_file(data_path).render_height(axes[0], view=view) + for box in boxes: + c = np.array(get_color(box.name)) / 255.0 + box.render(axes[0], view=view, colors=(c, c, c)) + corners = view_points(boxes[0].corners(), view, False)[:2, :] + axes[0].set_xlim([np.min(corners[0, :]) - margin, np.max(corners[0, :]) + margin]) + axes[0].set_ylim([np.min(corners[1, :]) - margin, np.max(corners[1, :]) + margin]) + axes[0].axis('off') + axes[0].set_aspect('equal') + + # Plot CAMERA view. + for i in range(1, num_cam + 1): + cam = select_cams[i - 1] + data_path, boxes, camera_intrinsic = nusc.get_sample_data(cam, selected_anntokens=[anntoken]) + im = Image.open(data_path) + axes[i].imshow(im) + axes[i].set_title(nusc.get('sample_data', cam)['channel']) + axes[i].axis('off') + axes[i].set_aspect('equal') + for box in boxes: + c = np.array(get_color(box.name)) / 255.0 + box.render(axes[i], view=camera_intrinsic, normalize=True, colors=(c, c, c)) + + # Print extra information about the annotation below the camera view. + axes[i].set_xlim(0, im.size[0]) + axes[i].set_ylim(im.size[1], 0) + + if extra_info: + rcParams['font.family'] = 'monospace' + + w, l, h = ann_record['size'] + category = ann_record['category_name'] + lidar_points = ann_record['num_lidar_pts'] + radar_points = ann_record['num_radar_pts'] + + sample_data_record = nusc.get('sample_data', sample_record['data']['LIDAR_TOP']) + pose_record = nusc.get('ego_pose', sample_data_record['ego_pose_token']) + dist = np.linalg.norm(np.array(pose_record['translation']) - np.array(ann_record['translation'])) + + information = ' \n'.join(['category: {}'.format(category), + '', + '# lidar points: {0:>4}'.format(lidar_points), + '# radar points: {0:>4}'.format(radar_points), + '', + 'distance: {:>7.3f}m'.format(dist), + '', + 'width: {:>7.3f}m'.format(w), + 'length: {:>7.3f}m'.format(l), + 'height: {:>7.3f}m'.format(h)]) + + plt.annotate(information, (0, 0), (0, -20), xycoords='axes fraction', textcoords='offset points', va='top') + + if out_path is not None: + plt.savefig(out_path) + + +def get_sample_data(sample_data_token: str, + box_vis_level: BoxVisibility = BoxVisibility.ANY, + selected_anntokens=None, + use_flat_vehicle_coordinates: bool = False): + """ + Returns the data path as well as all annotations related to that sample_data. + Note that the boxes are transformed into the current sensor's coordinate frame. + :param sample_data_token: Sample_data token. + :param box_vis_level: If sample_data is an image, this sets required visibility for boxes. + :param selected_anntokens: If provided only return the selected annotation. + :param use_flat_vehicle_coordinates: Instead of the current sensor's coordinate frame, use ego frame which is + aligned to z-plane in the world. + :return: (data_path, boxes, camera_intrinsic ) + """ + + # Retrieve sensor & pose records + sd_record = nusc.get('sample_data', sample_data_token) + cs_record = nusc.get('calibrated_sensor', sd_record['calibrated_sensor_token']) + sensor_record = nusc.get('sensor', cs_record['sensor_token']) + pose_record = nusc.get('ego_pose', sd_record['ego_pose_token']) + + data_path = nusc.get_sample_data_path(sample_data_token) + + if sensor_record['modality'] == 'camera': + cam_intrinsic = np.array(cs_record['camera_intrinsic']) + imsize = (sd_record['width'], sd_record['height']) + else: + cam_intrinsic = None + imsize = None + + # Retrieve all sample annotations and map to sensor coordinate system. + if selected_anntokens is not None: + boxes = list(map(nusc.get_box, selected_anntokens)) + else: + boxes = nusc.get_boxes(sample_data_token) + + # Make list of Box objects including coord system transforms. + box_list = [] + for box in boxes: + if use_flat_vehicle_coordinates: + # Move box to ego vehicle coord system parallel to world z plane. + yaw = Quaternion(pose_record['rotation']).yaw_pitch_roll[0] + box.translate(-np.array(pose_record['translation'])) + box.rotate(Quaternion(scalar=np.cos(yaw / 2), vector=[0, 0, np.sin(yaw / 2)]).inverse) + else: + # Move box to ego vehicle coord system. + box.translate(-np.array(pose_record['translation'])) + box.rotate(Quaternion(pose_record['rotation']).inverse) + + # Move box to sensor coord system. + box.translate(-np.array(cs_record['translation'])) + box.rotate(Quaternion(cs_record['rotation']).inverse) + + if sensor_record['modality'] == 'camera' and not \ + box_in_image(box, cam_intrinsic, imsize, vis_level=box_vis_level): + continue + + box_list.append(box) + + return data_path, box_list, cam_intrinsic + + +def get_predicted_data(sample_data_token: str, + box_vis_level: BoxVisibility = BoxVisibility.ANY, + selected_anntokens=None, + use_flat_vehicle_coordinates: bool = False, + pred_anns=None + ): + """ + Returns the data path as well as all annotations related to that sample_data. + Note that the boxes are transformed into the current sensor's coordinate frame. + :param sample_data_token: Sample_data token. + :param box_vis_level: If sample_data is an image, this sets required visibility for boxes. + :param selected_anntokens: If provided only return the selected annotation. + :param use_flat_vehicle_coordinates: Instead of the current sensor's coordinate frame, use ego frame which is + aligned to z-plane in the world. + :return: (data_path, boxes, camera_intrinsic ) + """ + + # Retrieve sensor & pose records + sd_record = nusc.get('sample_data', sample_data_token) + cs_record = nusc.get('calibrated_sensor', sd_record['calibrated_sensor_token']) + sensor_record = nusc.get('sensor', cs_record['sensor_token']) + pose_record = nusc.get('ego_pose', sd_record['ego_pose_token']) + + data_path = nusc.get_sample_data_path(sample_data_token) + + if sensor_record['modality'] == 'camera': + cam_intrinsic = np.array(cs_record['camera_intrinsic']) + imsize = (sd_record['width'], sd_record['height']) + else: + cam_intrinsic = None + imsize = None + + # Retrieve all sample annotations and map to sensor coordinate system. + # if selected_anntokens is not None: + # boxes = list(map(nusc.get_box, selected_anntokens)) + # else: + # boxes = nusc.get_boxes(sample_data_token) + boxes = pred_anns + # Make list of Box objects including coord system transforms. + box_list = [] + for box in boxes: + if use_flat_vehicle_coordinates: + # Move box to ego vehicle coord system parallel to world z plane. + yaw = Quaternion(pose_record['rotation']).yaw_pitch_roll[0] + box.translate(-np.array(pose_record['translation'])) + box.rotate(Quaternion(scalar=np.cos(yaw / 2), vector=[0, 0, np.sin(yaw / 2)]).inverse) + else: + # Move box to ego vehicle coord system. + box.translate(-np.array(pose_record['translation'])) + box.rotate(Quaternion(pose_record['rotation']).inverse) + + # Move box to sensor coord system. + box.translate(-np.array(cs_record['translation'])) + box.rotate(Quaternion(cs_record['rotation']).inverse) + + if sensor_record['modality'] == 'camera' and not \ + box_in_image(box, cam_intrinsic, imsize, vis_level=box_vis_level): + continue + box_list.append(box) + + return data_path, box_list, cam_intrinsic + + +def lidiar_render(sample_token, data, out_path=None, out_name=None, traj_use_perstep_offset=True): + bbox_gt_list = [] + bbox_pred_list = [] + sample_rec = nusc.get('sample', sample_token) + anns = sample_rec['anns'] + sd_record = nusc.get('sample_data', sample_rec['data']['LIDAR_TOP']) + cs_record = nusc.get('calibrated_sensor', sd_record['calibrated_sensor_token']) + pose_record = nusc.get('ego_pose', sd_record['ego_pose_token']) + + for ann in anns: + content = nusc.get('sample_annotation', ann) + gt_fut_trajs, gt_fut_masks = get_gt_fut_trajs( + nusc=nusc, anno=content, cs_record=cs_record, + pose_record=pose_record, fut_ts=6 + ) + try: + bbox_gt_list.append(CustomDetectionBox( + sample_token=content['sample_token'], + translation=tuple(content['translation']), + size=tuple(content['size']), + rotation=tuple(content['rotation']), + velocity=nusc.box_velocity(content['token'])[:2], + fut_trajs=tuple(gt_fut_trajs), + ego_translation=(0.0, 0.0, 0.0) if 'ego_translation' not in content + else tuple(content['ego_translation']), + num_pts=-1 if 'num_pts' not in content else int(content['num_pts']), + detection_name=category_to_detection_name(content['category_name']), + detection_score=-1.0 if 'detection_score' not in content else float(content['detection_score']), + attribute_name='')) + except: + pass + + bbox_anns = data['results'][sample_token] + for content in bbox_anns: + bbox_pred_list.append(CustomDetectionBox( + sample_token=content['sample_token'], + translation=tuple(content['translation']), + size=tuple(content['size']), + rotation=tuple(content['rotation']), + velocity=tuple(content['velocity']), + fut_trajs=tuple(content['fut_traj']), + ego_translation=(0.0, 0.0, 0.0) if 'ego_translation' not in content + else tuple(content['ego_translation']), + num_pts=-1 if 'num_pts' not in content else int(content['num_pts']), + detection_name=content['detection_name'], + detection_score=-1.0 if 'detection_score' not in content else float(content['detection_score']), + attribute_name=content['attribute_name'])) + gt_annotations = EvalBoxes() + pred_annotations = EvalBoxes() + gt_annotations.add_boxes(sample_token, bbox_gt_list) + pred_annotations.add_boxes(sample_token, bbox_pred_list) + # print('green is ground truth') + # print('blue is the predited result') + visualize_sample(nusc, sample_token, gt_annotations, pred_annotations, + savepath=out_path, traj_use_perstep_offset=traj_use_perstep_offset, pred_data=data) + + +def get_color(category_name: str): + """ + Provides the default colors based on the category names. + This method works for the general nuScenes categories, as well as the nuScenes detection categories. + """ + a = ['noise', 'animal', 'human.pedestrian.adult', 'human.pedestrian.child', 'human.pedestrian.construction_worker', + 'human.pedestrian.personal_mobility', 'human.pedestrian.police_officer', 'human.pedestrian.stroller', + 'human.pedestrian.wheelchair', 'movable_object.barrier', 'movable_object.debris', + 'movable_object.pushable_pullable', 'movable_object.trafficcone', 'static_object.bicycle_rack', 'vehicle.bicycle', + 'vehicle.bus.bendy', 'vehicle.bus.rigid', 'vehicle.car', 'vehicle.construction', 'vehicle.emergency.ambulance', + 'vehicle.emergency.police', 'vehicle.motorcycle', 'vehicle.trailer', 'vehicle.truck', 'flat.driveable_surface', + 'flat.other', 'flat.sidewalk', 'flat.terrain', 'static.manmade', 'static.other', 'static.vegetation', + 'vehicle.ego'] + class_names = [ + 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', + 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone' + ] + #print(category_name) + if category_name == 'bicycle': + return nusc.colormap['vehicle.bicycle'] + elif category_name == 'construction_vehicle': + return nusc.colormap['vehicle.construction'] + elif category_name == 'traffic_cone': + return nusc.colormap['movable_object.trafficcone'] + + for key in nusc.colormap.keys(): + if category_name in key: + return nusc.colormap[key] + return [0, 0, 0] + +# TODO: whether to rotate traj +def boxes_to_sensor(boxes: List[EvalBox], pose_record: Dict, cs_record: Dict): + """ + Map boxes from global coordinates to the vehicle's sensor coordinate system. + :param boxes: The boxes in global coordinates. + :param pose_record: The pose record of the vehicle at the current timestamp. + :param cs_record: The calibrated sensor record of the sensor. + :return: The transformed boxes. + """ + boxes_out = [] + for box in boxes: + # Create Box instance. + box = CustomNuscenesBox( + box.translation, box.size, Quaternion(box.rotation), box.fut_trajs, name=box.detection_name + ) + # Move box to ego vehicle coord system. + box.translate(-np.array(pose_record['translation'])) + box.rotate(Quaternion(pose_record['rotation']).inverse) + # Move box to sensor coord system. + box.translate(-np.array(cs_record['translation'])) + box.rotate(Quaternion(cs_record['rotation']).inverse) + + boxes_out.append(box) + + return boxes_out + + +def get_gt_fut_trajs(nusc: NuScenes, + anno, + cs_record, + pose_record, + fut_ts) -> None: + """ + Visualizes a sample from BEV with annotations and detection results. + :param nusc: NuScenes object. + """ + box = Box(anno['translation'], anno['size'], Quaternion(anno['rotation'])) + # Move box to ego vehicle coord system. + box.translate(-np.array(pose_record['translation'])) + box.rotate(Quaternion(pose_record['rotation']).inverse) + # Move box to sensor coord system. + box.translate(-np.array(cs_record['translation'])) + box.rotate(Quaternion(cs_record['rotation']).inverse) + + # get future trajectory coords for each box + gt_fut_trajs = np.zeros((fut_ts, 2)) # [fut_ts*2] + gt_fut_masks = np.zeros((fut_ts)) # [fut_ts] + gt_fut_trajs[:] = box.center[:2] + cur_box = box + cur_anno = anno + for i in range(fut_ts): + if cur_anno['next'] != '': + anno_next = nusc.get('sample_annotation', cur_anno['next']) + box_next = Box( + anno_next['translation'], anno_next['size'], Quaternion(anno_next['rotation']) + ) + # Move box to ego vehicle coord system. + box_next.translate(-np.array(pose_record['translation'])) + box_next.rotate(Quaternion(pose_record['rotation']).inverse) + # Move box to sensor coord system. + box_next.translate(-np.array(cs_record['translation'])) + box_next.rotate(Quaternion(cs_record['rotation']).inverse) + # gt_fut_trajs[i] = box_next.center[:2] + gt_fut_trajs[i] = box_next.center[:2] - cur_box.center[:2] + gt_fut_masks[i] = 1 + cur_anno = anno_next + cur_box = box_next + else: + # gt_fut_trajs[i:] = gt_fut_trajs[i-1] + gt_fut_trajs[i:] = 0 + break + + return gt_fut_trajs.reshape(-1).tolist(), gt_fut_masks.reshape(-1).tolist() + +def get_gt_vec_maps( + sample_token, + data_root='data/nuscenes/', + pc_range=[-15.0, -30.0, -4.0, 15.0, 30.0, 4.0], + padding_value=-10000, + map_classes=['divider', 'ped_crossing', 'boundary'], + map_fixed_ptsnum_per_line=20 +) -> None: + """ + Get gt vec map for a given sample. + """ + sample_rec = nusc.get('sample', sample_token) + sd_record = nusc.get('sample_data', sample_rec['data']['LIDAR_TOP']) + cs_record = nusc.get('calibrated_sensor', sd_record['calibrated_sensor_token']) + pose_record = nusc.get('ego_pose', sd_record['ego_pose_token']) + lidar2ego_translation = cs_record['translation'], + lidar2ego_rotation = cs_record['rotation'], + ego2global_translation = pose_record['translation'], + ego2global_rotation = pose_record['rotation'], + map_location = nusc.get('log', nusc.get('scene', sample_rec['scene_token'])['log_token'])['location'] + + lidar2ego = np.eye(4) + lidar2ego[:3,:3] = Quaternion(cs_record['rotation']).rotation_matrix + lidar2ego[:3, 3] = cs_record['translation'] + ego2global = np.eye(4) + ego2global[:3,:3] = Quaternion(pose_record['rotation']).rotation_matrix + ego2global[:3, 3] = pose_record['translation'] + lidar2global = ego2global @ lidar2ego + lidar2global_translation = list(lidar2global[:3,3]) + lidar2global_rotation = list(Quaternion(matrix=lidar2global).q) + patch_h = pc_range[4]-pc_range[1] + patch_w = pc_range[3]-pc_range[0] + patch_size = (patch_h, patch_w) + + vector_map = VectorizedLocalMap(data_root, patch_size=patch_size, + map_classes=map_classes, + fixed_ptsnum_per_line=map_fixed_ptsnum_per_line, + padding_value=padding_value) + + + anns_results = vector_map.gen_vectorized_samples( + map_location, lidar2global_translation, lidar2global_rotation + ) + + ''' + anns_results, type: dict + 'gt_vecs_pts_loc': list[num_vecs], vec with num_points*2 coordinates + 'gt_vecs_pts_num': list[num_vecs], vec with num_points + 'gt_vecs_label': list[num_vecs], vec with cls index + ''' + gt_vecs_label = to_tensor(anns_results['gt_vecs_label']) + if isinstance(anns_results['gt_vecs_pts_loc'], LiDARInstanceLines): + gt_vecs_pts_loc = anns_results['gt_vecs_pts_loc'] + else: + gt_vecs_pts_loc = to_tensor(anns_results['gt_vecs_pts_loc']) + try: + gt_vecs_pts_loc = gt_vecs_pts_loc.flatten(1).to(dtype=torch.float32) + except: + gt_vecs_pts_loc = gt_vecs_pts_loc + + return gt_vecs_pts_loc, gt_vecs_label + + +def visualize_sample(nusc: NuScenes, + sample_token: str, + gt_boxes: EvalBoxes, + pred_boxes: EvalBoxes, + nsweeps: int = 1, + conf_th: float = 0.4, + pc_range: list = [-30.0, -30.0, -4.0, 30.0, 30.0, 4.0], + verbose: bool = True, + savepath: str = None, + traj_use_perstep_offset: bool = True, + data_root='data/nuscenes/', + map_pc_range: list = [-15.0, -30.0, -4.0, 15.0, 30.0, 4.0], + padding_value=-10000, + map_classes=['divider', 'ped_crossing', 'boundary'], + map_fixed_ptsnum_per_line=20, + gt_format=['fixed_num_pts'], + colors_plt = ['cornflowerblue', 'royalblue', 'slategrey'], + pred_data = None) -> None: + """ + Visualizes a sample from BEV with annotations and detection results. + :param nusc: NuScenes object. + :param sample_token: The nuScenes sample token. + :param gt_boxes: Ground truth boxes grouped by sample. + :param pred_boxes: Prediction grouped by sample. + :param nsweeps: Number of sweeps used for lidar visualization. + :param conf_th: The confidence threshold used to filter negatives. + :param eval_range: Range in meters beyond which boxes are ignored. + :param verbose: Whether to print to stdout. + :param savepath: If given, saves the the rendering here instead of displaying. + """ + # Retrieve sensor & pose records. + sample_rec = nusc.get('sample', sample_token) + sd_record = nusc.get('sample_data', sample_rec['data']['LIDAR_TOP']) + cs_record = nusc.get('calibrated_sensor', sd_record['calibrated_sensor_token']) + pose_record = nusc.get('ego_pose', sd_record['ego_pose_token']) + # Get boxes. + boxes_gt_global = gt_boxes[sample_token] + boxes_est_global = pred_boxes[sample_token] + # Map GT boxes to lidar. + boxes_gt = boxes_to_sensor(boxes_gt_global, pose_record, cs_record) + # Map EST boxes to lidar. + boxes_est = boxes_to_sensor(boxes_est_global, pose_record, cs_record) + # Add scores to EST boxes. + for box_est, box_est_global in zip(boxes_est, boxes_est_global): + box_est.score = box_est_global.detection_score + + # Init axes. + fig, axes = plt.subplots(1, 1, figsize=(4, 4)) + plt.xlim(xmin=-30, xmax=30) + plt.ylim(ymin=-30, ymax=30) + + # Show Pred Map + result_dic = pred_data['map_results'][sample_token]['vectors'] + + for vector in result_dic: + if vector['confidence_level'] < 0.6: + continue + pred_pts_3d = vector['pts'] + pred_label_3d = vector['type'] + pts_x = np.array([pt[0] for pt in pred_pts_3d]) + pts_y = np.array([pt[1] for pt in pred_pts_3d]) + + axes.plot(pts_x, pts_y, color=colors_plt[pred_label_3d],linewidth=1,alpha=0.8,zorder=-1) + axes.scatter(pts_x, pts_y, color=colors_plt[pred_label_3d],s=1,alpha=0.8,zorder=-1) + + # ignore_list = ['barrier', 'motorcycle', 'bicycle', 'traffic_cone'] + ignore_list = ['barrier', 'bicycle', 'traffic_cone'] + + # Show Pred boxes. + for i, box in enumerate(boxes_est): + if box.name in ignore_list: + continue + # Show only predictions with a high score. + assert not np.isnan(box.score), 'Error: Box score cannot be NaN!' + if box.score < conf_th or abs(box.center[0]) > 15 or abs(box.center[1]) > 30: + continue + box.render(axes, view=np.eye(4), colors=('tomato', 'tomato', 'tomato'), linewidth=1, box_idx=None) + # if box.name in ['pedestrian']: + # continue + if traj_use_perstep_offset: + mode_idx = [0, 1, 2, 3, 4, 5] + box.render_fut_trajs_grad_color(axes, linewidth=1, mode_idx=mode_idx, fut_ts=6, cmap='autumn') + else: + box.render_fut_trajs_coords(axes, color='tomato', linewidth=1) + + # Show Planning. + axes.plot([-0.9, -0.9], [-2, 2], color='mediumseagreen', linewidth=1, alpha=0.8) + axes.plot([-0.9, 0.9], [2, 2], color='mediumseagreen', linewidth=1, alpha=0.8) + axes.plot([0.9, 0.9], [2, -2], color='mediumseagreen', linewidth=1, alpha=0.8) + axes.plot([0.9, -0.9], [-2, -2], color='mediumseagreen', linewidth=1, alpha=0.8) + axes.plot([0.0, 0.0], [0.0, 2], color='mediumseagreen', linewidth=1, alpha=0.8) + plan_cmd = np.argmax(pred_data['plan_results'][sample_token][1][0,0,0]) + plan_traj = pred_data['plan_results'][sample_token][0][plan_cmd] + plan_traj[abs(plan_traj) < 0.01] = 0.0 + plan_traj = plan_traj.cumsum(axis=0) + plan_traj = np.concatenate((np.zeros((1, plan_traj.shape[1])), plan_traj), axis=0) + plan_traj = np.stack((plan_traj[:-1], plan_traj[1:]), axis=1) + + plan_vecs = None + for i in range(plan_traj.shape[0]): + plan_vec_i = plan_traj[i] + x_linspace = np.linspace(plan_vec_i[0, 0], plan_vec_i[1, 0], 51) + y_linspace = np.linspace(plan_vec_i[0, 1], plan_vec_i[1, 1], 51) + xy = np.stack((x_linspace, y_linspace), axis=1) + xy = np.stack((xy[:-1], xy[1:]), axis=1) + if plan_vecs is None: + plan_vecs = xy + else: + plan_vecs = np.concatenate((plan_vecs, xy), axis=0) + + cmap = 'winter' + y = np.sin(np.linspace(1/2*np.pi, 3/2*np.pi, 301)) + colors = color_map(y[:-1], cmap) + line_segments = LineCollection(plan_vecs, colors=colors, linewidths=1, linestyles='solid', cmap=cmap) + axes.add_collection(line_segments) + + axes.axes.xaxis.set_ticks([]) + axes.axes.yaxis.set_ticks([]) + axes.axis('off') + fig.set_tight_layout(True) + fig.canvas.draw() + plt.savefig(savepath+'/bev_pred.png', bbox_inches='tight', dpi=200) + plt.close() + + +def obtain_sensor2top(nusc, + sensor_token, + l2e_t, + l2e_r_mat, + e2g_t, + e2g_r_mat, + sensor_type='lidar'): + """Obtain the info with RT matric from general sensor to Top LiDAR. + + Args: + nusc (class): Dataset class in the nuScenes dataset. + sensor_token (str): Sample data token corresponding to the + specific sensor type. + l2e_t (np.ndarray): Translation from lidar to ego in shape (1, 3). + l2e_r_mat (np.ndarray): Rotation matrix from lidar to ego + in shape (3, 3). + e2g_t (np.ndarray): Translation from ego to global in shape (1, 3). + e2g_r_mat (np.ndarray): Rotation matrix from ego to global + in shape (3, 3). + sensor_type (str): Sensor to calibrate. Default: 'lidar'. + + Returns: + sweep (dict): Sweep information after transformation. + """ + sd_rec = nusc.get('sample_data', sensor_token) + cs_record = nusc.get('calibrated_sensor', + sd_rec['calibrated_sensor_token']) + pose_record = nusc.get('ego_pose', sd_rec['ego_pose_token']) + data_path = str(nusc.get_sample_data_path(sd_rec['token'])) + if os.getcwd() in data_path: # path from lyftdataset is absolute path + data_path = data_path.split(f'{os.getcwd()}/')[-1] # relative path + sweep = { + 'data_path': data_path, + 'type': sensor_type, + 'sample_data_token': sd_rec['token'], + 'sensor2ego_translation': cs_record['translation'], + 'sensor2ego_rotation': cs_record['rotation'], + 'ego2global_translation': pose_record['translation'], + 'ego2global_rotation': pose_record['rotation'], + 'timestamp': sd_rec['timestamp'] + } + + l2e_r_s = sweep['sensor2ego_rotation'] + l2e_t_s = sweep['sensor2ego_translation'] + e2g_r_s = sweep['ego2global_rotation'] + e2g_t_s = sweep['ego2global_translation'] + + # obtain the RT from sensor to Top LiDAR + # sweep->ego->global->ego'->lidar + l2e_r_s_mat = Quaternion(l2e_r_s).rotation_matrix + e2g_r_s_mat = Quaternion(e2g_r_s).rotation_matrix + R = (l2e_r_s_mat.T @ e2g_r_s_mat.T) @ ( + np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T) + T = (l2e_t_s @ e2g_r_s_mat.T + e2g_t_s) @ ( + np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T) + T -= e2g_t @ (np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T + ) + l2e_t @ np.linalg.inv(l2e_r_mat).T + sensor2lidar_rotation = R.T # points @ R.T + T + sensor2lidar_translation = T + + return sensor2lidar_rotation, sensor2lidar_translation + +def render_sample_data( + sample_toekn: str, + with_anns: bool = True, + box_vis_level: BoxVisibility = BoxVisibility.ANY, + axes_limit: float = 40, + ax=None, + nsweeps: int = 1, + out_path: str = None, + out_name: str = None, + underlay_map: bool = True, + use_flat_vehicle_coordinates: bool = True, + show_lidarseg: bool = False, + show_lidarseg_legend: bool = False, + filter_lidarseg_labels=None, + lidarseg_preds_bin_path: str = None, + verbose: bool = True, + show_panoptic: bool = False, + pred_data=None, + traj_use_perstep_offset: bool = True + ) -> None: + """ + Render sample data onto axis. + :param sample_data_token: Sample_data token. + :param with_anns: Whether to draw box annotations. + :param box_vis_level: If sample_data is an image, this sets required visibility for boxes. + :param axes_limit: Axes limit for lidar and radar (measured in meters). + :param ax: Axes onto which to render. + :param nsweeps: Number of sweeps for lidar and radar. + :param out_path: Optional path to save the rendered figure to disk. + :param underlay_map: When set to true, lidar data is plotted onto the map. This can be slow. + :param use_flat_vehicle_coordinates: Instead of the current sensor's coordinate frame, use ego frame which is + aligned to z-plane in the world. Note: Previously this method did not use flat vehicle coordinates, which + can lead to small errors when the vertical axis of the global frame and lidar are not aligned. The new + setting is more correct and rotates the plot by ~90 degrees. + :param show_lidarseg: When set to True, the lidar data is colored with the segmentation labels. When set + to False, the colors of the lidar data represent the distance from the center of the ego vehicle. + :param show_lidarseg_legend: Whether to display the legend for the lidarseg labels in the frame. + :param filter_lidarseg_labels: Only show lidar points which belong to the given list of classes. If None + or the list is empty, all classes will be displayed. + :param lidarseg_preds_bin_path: A path to the .bin file which contains the user's lidar segmentation + predictions for the sample. + :param verbose: Whether to display the image after it is rendered. + :param show_panoptic: When set to True, the lidar data is colored with the panoptic labels. When set + to False, the colors of the lidar data represent the distance from the center of the ego vehicle. + If show_lidarseg is True, show_panoptic will be set to False. + """ + lidiar_render(sample_toekn, pred_data, out_path=out_path, + out_name=out_name, traj_use_perstep_offset=traj_use_perstep_offset) + + +def parse_args(): + parser = argparse.ArgumentParser(description='Visualize VAD predictions') + parser.add_argument('--result-path', help='inference result file path') + parser.add_argument('--save-path', help='the dir to save visualization results') + args = parser.parse_args() + + return args + + +if __name__ == '__main__': + args = parse_args() + inference_result_path = args.result_path + out_path = args.save_path + bevformer_results = mmcv.load(inference_result_path) + sample_token_list = list(bevformer_results['results'].keys()) + + nusc = NuScenes(version='v1.0-trainval', dataroot='./data/nuscenes', verbose=True) + + imgs = [] + fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v') + video_path = osp.join(out_path, 'vis.mp4') + video = cv2.VideoWriter(video_path, fourcc, 10, (2933, 800), True) + for id in tqdm(range(len(sample_token_list))): + mmcv.mkdir_or_exist(out_path) + render_sample_data(sample_token_list[id], + pred_data=bevformer_results, + out_path=out_path) + pred_path = osp.join(out_path, 'bev_pred.png') + pred_img = cv2.imread(pred_path) + os.remove(pred_path) + + sample_token = sample_token_list[id] + sample = nusc.get('sample', sample_token) + # sample = data['results'][sample_token_list[0]][0] + cams = [ + 'CAM_FRONT_LEFT', + 'CAM_FRONT', + 'CAM_FRONT_RIGHT', + 'CAM_BACK_LEFT', + 'CAM_BACK', + 'CAM_BACK_RIGHT', + ] + + cam_imgs = [] + for cam in cams: + sample_data_token = sample['data'][cam] + sd_record = nusc.get('sample_data', sample_data_token) + sensor_modality = sd_record['sensor_modality'] + if sensor_modality in ['lidar', 'radar']: + assert False + elif sensor_modality == 'camera': + boxes = [Box(record['translation'], record['size'], Quaternion(record['rotation']), + name=record['detection_name'], token='predicted') for record in + bevformer_results['results'][sample_token]] + data_path, boxes_pred, camera_intrinsic = get_predicted_data(sample_data_token, + box_vis_level=BoxVisibility.ANY, + pred_anns=boxes) + _, boxes_gt, _ = nusc.get_sample_data(sample_data_token, box_vis_level=BoxVisibility.ANY) + + data = Image.open(data_path) + + # Show image. + _, ax = plt.subplots(1, 1, figsize=(6, 12)) + ax.imshow(data) + + if cam == 'CAM_FRONT': + lidar_sd_record = nusc.get('sample_data', sample['data']['LIDAR_TOP']) + lidar_cs_record = nusc.get('calibrated_sensor', lidar_sd_record['calibrated_sensor_token']) + lidar_pose_record = nusc.get('ego_pose', lidar_sd_record['ego_pose_token']) + + # get plan traj [x,y,z,w] quaternion, w=1 + # we set z=-1 to get points near the ground in lidar coord system + plan_cmd = np.argmax(bevformer_results['plan_results'][sample_token][1][0,0,0]) + plan_traj = bevformer_results['plan_results'][sample_token][0][plan_cmd] + plan_traj[abs(plan_traj) < 0.01] = 0.0 + plan_traj = plan_traj.cumsum(axis=0) + + plan_traj = np.concatenate(( + plan_traj[:, [0]], + plan_traj[:, [1]], + -1.0*np.ones((plan_traj.shape[0], 1)), + np.ones((plan_traj.shape[0], 1)), + ), axis=1) + # add the start point in lcf + plan_traj = np.concatenate((np.zeros((1, plan_traj.shape[1])), plan_traj), axis=0) + # plan_traj[0, :2] = 2*plan_traj[1, :2] - plan_traj[2, :2] + plan_traj[0, 0] = 0.3 + plan_traj[0, 2] = -1.0 + plan_traj[0, 3] = 1.0 + + l2e_r = lidar_cs_record['rotation'] + l2e_t = lidar_cs_record['translation'] + e2g_r = lidar_pose_record['rotation'] + e2g_t = lidar_pose_record['translation'] + l2e_r_mat = Quaternion(l2e_r).rotation_matrix + e2g_r_mat = Quaternion(e2g_r).rotation_matrix + s2l_r, s2l_t = obtain_sensor2top(nusc, sample_data_token, l2e_t, l2e_r_mat, e2g_t, e2g_r_mat, cam) + # obtain lidar to image transformation matrix + lidar2cam_r = np.linalg.inv(s2l_r) + lidar2cam_t = s2l_t @ lidar2cam_r.T + lidar2cam_rt = np.eye(4) + lidar2cam_rt[:3, :3] = lidar2cam_r.T + lidar2cam_rt[3, :3] = -lidar2cam_t + viewpad = np.eye(4) + viewpad[:camera_intrinsic.shape[0], :camera_intrinsic.shape[1]] = camera_intrinsic + lidar2img_rt = (viewpad @ lidar2cam_rt.T) + plan_traj = lidar2img_rt @ plan_traj.T + plan_traj = plan_traj[0:2, ...] / np.maximum( + plan_traj[2:3, ...], np.ones_like(plan_traj[2:3, ...]) * 1e-5) + plan_traj = plan_traj.T + plan_traj = np.stack((plan_traj[:-1], plan_traj[1:]), axis=1) + + plan_vecs = None + for i in range(plan_traj.shape[0]): + plan_vec_i = plan_traj[i] + x_linspace = np.linspace(plan_vec_i[0, 0], plan_vec_i[1, 0], 51) + y_linspace = np.linspace(plan_vec_i[0, 1], plan_vec_i[1, 1], 51) + xy = np.stack((x_linspace, y_linspace), axis=1) + xy = np.stack((xy[:-1], xy[1:]), axis=1) + if plan_vecs is None: + plan_vecs = xy + else: + plan_vecs = np.concatenate((plan_vecs, xy), axis=0) + + cmap = 'winter' + y = np.sin(np.linspace(1/2*np.pi, 3/2*np.pi, 301)) + colors = color_map(y[:-1], cmap) + line_segments = LineCollection(plan_vecs, colors=colors, linewidths=2, linestyles='solid', cmap=cmap) + ax.add_collection(line_segments) + + ax.set_xlim(0, data.size[0]) + ax.set_ylim(data.size[1], 0) + ax.axis('off') + if out_path is not None: + savepath = osp.join(out_path, f'{cam}_PRED') + plt.savefig(savepath, bbox_inches='tight', dpi=200, pad_inches=0.0) + plt.close() + + # Load boxes and image. + data_path = osp.join(out_path, f'{cam}_PRED.png') + cam_img = cv2.imread(data_path) + lw = 6 + tf = max(lw - 3, 1) + w, h = cv2.getTextSize(cam, 0, fontScale=lw / 6, thickness=tf)[0] # text width, height + # color=(0, 0, 0) + txt_color=(255, 255, 255) + cv2.putText(cam_img, + cam, (10, h + 10), + 0, + lw / 6, + txt_color, + thickness=tf, + lineType=cv2.LINE_AA) + cam_imgs.append(cam_img) + else: + raise ValueError("Error: Unknown sensor modality!") + + plan_cmd = np.argmax(bevformer_results['plan_results'][sample_token][1][0,0,0]) + cmd_list = ['Turn Right', 'Turn Left', 'Go Straight'] + plan_cmd_str = cmd_list[plan_cmd] + pred_img = cv2.copyMakeBorder(pred_img, 10, 10, 10, 10, cv2.BORDER_CONSTANT, None, value = 0) + # font + font = cv2.FONT_HERSHEY_SIMPLEX + # fontScale + fontScale = 1 + # Line thickness of 2 px + thickness = 3 + # org + org = (20, 40) + # Blue color in BGR + color = (0, 0, 0) + # Using cv2.putText() method + pred_img = cv2.putText(pred_img, 'BEV', org, font, + fontScale, color, thickness, cv2.LINE_AA) + pred_img = cv2.putText(pred_img, plan_cmd_str, (20, 770), font, + fontScale, color, thickness, cv2.LINE_AA) + + sample_img = pred_img + cam_img_top = cv2.hconcat([cam_imgs[0], cam_imgs[1], cam_imgs[2]]) + cam_img_down = cv2.hconcat([cam_imgs[3], cam_imgs[4], cam_imgs[5]]) + cam_img = cv2.vconcat([cam_img_top, cam_img_down]) + size = (2133, 800) + cam_img = cv2.resize(cam_img, size) + vis_img = cv2.hconcat([cam_img, sample_img]) + + video.write(vis_img) + + video.release() + cv2.destroyAllWindows() diff --git a/adzoo/vad/apis/__init__.py b/adzoo/vad/apis/__init__.py new file mode 100644 index 0000000..15dff22 --- /dev/null +++ b/adzoo/vad/apis/__init__.py @@ -0,0 +1,3 @@ +from .train import custom_train_model +from .mmdet_train import custom_train_detector +# from .test import custom_multi_gpu_test \ No newline at end of file diff --git a/adzoo/vad/apis/mmdet_train.py b/adzoo/vad/apis/mmdet_train.py new file mode 100644 index 0000000..687b989 --- /dev/null +++ b/adzoo/vad/apis/mmdet_train.py @@ -0,0 +1,196 @@ +import random +import warnings + +import numpy as np +import torch +import torch.distributed as dist +from torch.nn import DataParallel +from torch.nn.parallel.distributed import DistributedDataParallel +from mmcv.runner import (HOOKS, DistSamplerSeedHook, EpochBasedRunner, + Fp16OptimizerHook, OptimizerHook, + build_runner) +from mmcv.utils import build_from_cfg +from mmcv.optims import build_optimizer +from mmcv.core import EvalHook + +from mmcv.datasets import (build_dataset, + replace_ImageToTensor) +from mmcv.utils import get_root_logger +import time +import os.path as osp +from mmcv.datasets.builder import build_dataloader +from mmcv.core.evaluation.eval_hooks import CustomDistEvalHook +from mmcv.datasets.builder import custom_build_dataset +def custom_train_detector(model, + dataset, + cfg, + distributed=False, + validate=False, + timestamp=None, + eval_model=None, + meta=None): + logger = get_root_logger(cfg.log_level) + + # prepare data loaders + + dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset] + #assert len(dataset)==1s + if 'imgs_per_gpu' in cfg.data: + logger.warning('"imgs_per_gpu" is deprecated in MMDet V2.0. ' + 'Please use "samples_per_gpu" instead') + if 'samples_per_gpu' in cfg.data: + logger.warning( + f'Got "imgs_per_gpu"={cfg.data.imgs_per_gpu} and ' + f'"samples_per_gpu"={cfg.data.samples_per_gpu}, "imgs_per_gpu"' + f'={cfg.data.imgs_per_gpu} is used in this experiments') + else: + logger.warning( + 'Automatically set "samples_per_gpu"="imgs_per_gpu"=' + f'{cfg.data.imgs_per_gpu} in this experiments') + cfg.data.samples_per_gpu = cfg.data.imgs_per_gpu + + data_loaders = [ + build_dataloader( + ds, + cfg.data.samples_per_gpu, + cfg.data.workers_per_gpu, + # cfg.gpus will be ignored if distributed + len(cfg.gpu_ids), + dist=distributed, + seed=cfg.seed, + shuffler_sampler=cfg.data.shuffler_sampler, # dict(type='DistributedGroupSampler'), + nonshuffler_sampler=cfg.data.nonshuffler_sampler, # dict(type='DistributedSampler'), + ) for ds in dataset + ] + + # put model on gpus + if distributed: + find_unused_parameters = cfg.get('find_unused_parameters', False) + # Sets the `find_unused_parameters` parameter in + # torch.nn.parallel.DistributedDataParallel + model = DistributedDataParallel( + model.cuda(), + device_ids=[torch.cuda.current_device()], + broadcast_buffers=False, + find_unused_parameters=find_unused_parameters) + if eval_model is not None: + eval_model = DistributedDataParallel( + eval_model.cuda(), + device_ids=[torch.cuda.current_device()], + broadcast_buffers=False, + find_unused_parameters=find_unused_parameters) + else: + model = DataParallel( + model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids) + if eval_model is not None: + eval_model = DataParallel( + eval_model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids) + + + # build runner + optimizer = build_optimizer(model, cfg.optimizer) + + if 'runner' not in cfg: + cfg.runner = { + 'type': 'EpochBasedRunner', + 'max_epochs': cfg.total_epochs + } + warnings.warn( + 'config is now expected to have a `runner` section, ' + 'please set `runner` in your config.', UserWarning) + else: + if 'total_epochs' in cfg: + assert cfg.total_epochs == cfg.runner.max_epochs + if eval_model is not None: + runner = build_runner( + cfg.runner, + default_args=dict( + model=model, + eval_model=eval_model, + optimizer=optimizer, + work_dir=cfg.work_dir, + logger=logger, + meta=meta)) + else: + runner = build_runner( + cfg.runner, + default_args=dict( + model=model, + optimizer=optimizer, + work_dir=cfg.work_dir, + logger=logger, + meta=meta)) + + # an ugly workaround to make .log and .log.json filenames the same + runner.timestamp = timestamp + + # fp16 setting + fp16_cfg = cfg.get('fp16', None) + if fp16_cfg is not None: + optimizer_config = Fp16OptimizerHook( + **cfg.optimizer_config, **fp16_cfg, distributed=distributed) + elif distributed and 'type' not in cfg.optimizer_config: + optimizer_config = OptimizerHook(**cfg.optimizer_config) + else: + optimizer_config = cfg.optimizer_config + + # register hooks + runner.register_training_hooks(cfg.lr_config, optimizer_config, + cfg.checkpoint_config, cfg.log_config, + cfg.get('momentum_config', None)) + + # register profiler hook + #trace_config = dict(type='tb_trace', dir_name='work_dir') + #profiler_config = dict(on_trace_ready=trace_config) + #runner.register_profiler_hook(profiler_config) + + if distributed: + if isinstance(runner, EpochBasedRunner): + runner.register_hook(DistSamplerSeedHook()) + + # register eval hooks + if validate: + # Support batch_size > 1 in validation + val_samples_per_gpu = cfg.data.val.pop('samples_per_gpu', 1) + if val_samples_per_gpu > 1: + assert False + # Replace 'ImageToTensor' to 'DefaultFormatBundle' + cfg.data.val.pipeline = replace_ImageToTensor( + cfg.data.val.pipeline) + val_dataset = custom_build_dataset(cfg.data.val, dict(test_mode=True)) + + val_dataloader = build_dataloader( + val_dataset, + samples_per_gpu=val_samples_per_gpu, + workers_per_gpu=cfg.data.workers_per_gpu, + dist=distributed, + shuffle=False, + shuffler_sampler=cfg.data.shuffler_sampler, # dict(type='DistributedGroupSampler'), + nonshuffler_sampler=cfg.data.nonshuffler_sampler, # dict(type='DistributedSampler'), + ) + eval_cfg = cfg.get('evaluation', {}) + eval_cfg['by_epoch'] = cfg.runner['type'] != 'IterBasedRunner' + eval_cfg['jsonfile_prefix'] = osp.join('val', cfg.work_dir, time.ctime().replace(' ','_').replace(':','_')) + eval_hook = CustomDistEvalHook if distributed else EvalHook + runner.register_hook(eval_hook(val_dataloader, **eval_cfg)) + + # user-defined hooks + if cfg.get('custom_hooks', None): + custom_hooks = cfg.custom_hooks + assert isinstance(custom_hooks, list), \ + f'custom_hooks expect list type, but got {type(custom_hooks)}' + for hook_cfg in cfg.custom_hooks: + assert isinstance(hook_cfg, dict), \ + 'Each item in custom_hooks expects dict type, but got ' \ + f'{type(hook_cfg)}' + hook_cfg = hook_cfg.copy() + priority = hook_cfg.pop('priority', 'NORMAL') + hook = build_from_cfg(hook_cfg, HOOKS) + runner.register_hook(hook, priority=priority) + + if cfg.resume_from: + runner.resume(cfg.resume_from) + elif cfg.load_from: + runner.load_checkpoint(cfg.load_from) + runner.run(data_loaders, cfg.workflow) + diff --git a/adzoo/vad/apis/test.py b/adzoo/vad/apis/test.py new file mode 100644 index 0000000..3d31abb --- /dev/null +++ b/adzoo/vad/apis/test.py @@ -0,0 +1,215 @@ +import os.path as osp +import pickle +import shutil +import tempfile +import time + +import torch +import torch.distributed as dist +from mmcv.image import tensor2imgs +from mmcv.utils import get_dist_info + +from mmcv.core import encode_mask_results +from mmcv.fileio.io import dump, load +from mmcv.utils import mkdir_or_exist, ProgressBar + +import numpy as np +import pycocotools.mask as mask_util + +def custom_encode_mask_results(mask_results): + """Encode bitmap mask to RLE code. Semantic Masks only + Args: + mask_results (list | tuple[list]): bitmap mask results. + In mask scoring rcnn, mask_results is a tuple of (segm_results, + segm_cls_score). + Returns: + list | tuple: RLE encoded mask. + """ + cls_segms = mask_results + num_classes = len(cls_segms) + encoded_mask_results = [] + for i in range(len(cls_segms)): + encoded_mask_results.append( + mask_util.encode( + np.array( + cls_segms[i][:, :, np.newaxis], order='F', + dtype='uint8'))[0]) # encoded with RLE + return [encoded_mask_results] + +def custom_multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False): + """Test model with multiple gpus. + This method tests model with multiple gpus and collects the results + under two different modes: gpu and cpu modes. By setting 'gpu_collect=True' + it encodes results to gpu tensors and use gpu communication for results + collection. On cpu mode it saves the results on different gpus to 'tmpdir' + and collects them by the rank 0 worker. + Args: + model (nn.Module): Model to be tested. + data_loader (nn.Dataloader): Pytorch data loader. + tmpdir (str): Path of directory to save the temporary results from + different gpus under cpu mode. + gpu_collect (bool): Option to use either gpu or cpu to collect results. + Returns: + list: The prediction results. + """ + model.eval() + bbox_results = [] + mask_results = [] + dataset = data_loader.dataset + rank, world_size = get_dist_info() + if rank == 0: + prog_bar = ProgressBar(len(dataset)) + time.sleep(2) # This line can prevent deadlock problem in some cases. + have_mask = False + for i, data in enumerate(data_loader): + with torch.no_grad(): + result = model(data,return_loss=False, rescale=True) + # encode mask results + if isinstance(result, dict): + if 'bbox_results' in result.keys(): + bbox_result = result['bbox_results'] + batch_size = len(result['bbox_results']) + bbox_results.extend(bbox_result) + if 'mask_results' in result.keys() and result['mask_results'] is not None: + mask_result = custom_encode_mask_results(result['mask_results']) + mask_results.extend(mask_result) + have_mask = True + else: + batch_size = len(result) + bbox_results.extend(result) + + if i>150: + break + + #if isinstance(result[0], tuple): + # assert False, 'this code is for instance segmentation, which our code will not utilize.' + # result = [(bbox_results, encode_mask_results(mask_results)) + # for bbox_results, mask_results in result] + if rank == 0: + + for _ in range(batch_size * world_size): + prog_bar.update() + + # collect results from all ranks + if gpu_collect: + bbox_results = collect_results_gpu(bbox_results, len(dataset)) + if have_mask: + mask_results = collect_results_gpu(mask_results, len(dataset)) + else: + mask_results = None + else: + bbox_results = collect_results_cpu(bbox_results, len(dataset), tmpdir) + tmpdir = tmpdir+'_mask' if tmpdir is not None else None + if have_mask: + mask_results = collect_results_cpu(mask_results, len(dataset), tmpdir) + else: + mask_results = None + + return {'bbox_results': bbox_results, 'mask_results': mask_results} + + +def collect_results_cpu(result_part, size, tmpdir=None): + rank, world_size = get_dist_info() + # create a tmp dir if it is not specified + if tmpdir is None: + MAX_LEN = 512 + # 32 is whitespace + dir_tensor = torch.full((MAX_LEN, ), + 32, + dtype=torch.uint8, + device='cuda') + if rank == 0: + mkdir_or_exist('.dist_test') + tmpdir = tempfile.mkdtemp(dir='.dist_test') + tmpdir = torch.tensor( + bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda') + dir_tensor[:len(tmpdir)] = tmpdir + dist.broadcast(dir_tensor, 0) + tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip() + else: + mkdir_or_exist(tmpdir) + # dump the part result to the dir + dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl')) + dist.barrier() + # collect all parts + if rank != 0: + return None + else: + # load results of all parts from tmp dir + part_list = [] + for i in range(world_size): + part_file = osp.join(tmpdir, f'part_{i}.pkl') + part_list.append(load(part_file)) + # sort the results + ordered_results = [] + ''' + bacause we change the sample of the evaluation stage to make sure that each gpu will handle continuous sample, + ''' + #for res in zip(*part_list): + for res in part_list: + ordered_results.extend(list(res)) + # the dataloader may pad some samples + ordered_results = ordered_results[:size] + # remove tmp dir + shutil.rmtree(tmpdir) + return ordered_results + + +def collect_results_gpu(result_part, size): + collect_results_cpu(result_part, size) + + +def single_gpu_test(model, data_loader): + """Test model with single gpu. + + This method tests model with single gpu and gives the 'show' option. + By setting ``show=True``, it saves the visualization results under + ``out_dir``. + + Args: + model (nn.Module): Model to be tested. + data_loader (nn.Dataloader): Pytorch data loader. + show (bool, optional): Whether to save viualization results. + Default: True. + out_dir (str, optional): The path to save visualization results. + Default: None. + + Returns: + list[dict]: The prediction results. + """ + model.eval() + bbox_results = [] + mask_results = [] + dataset = data_loader.dataset + prog_bar = ProgressBar(len(dataset)) + time.sleep(2) # This line can prevent deadlock problem in some cases. + have_mask = False + + for i, data in enumerate(data_loader): + with torch.no_grad(): + result = model(data,return_loss=False, rescale=True) + batch_size = len(result['bbox_results']) + + # encode mask results + if isinstance(result, dict): + if 'bbox_results' in result.keys(): + bbox_result = result['bbox_results'] + batch_size = len(result['bbox_results']) + bbox_results.extend(bbox_result) + if 'mask_results' in result.keys() and result['mask_results'] is not None: + mask_result = custom_encode_mask_results(result['mask_results']) + mask_results.extend(mask_result) + have_mask = True + else: + batch_size = len(result) + bbox_results.extend(result) + + if isinstance(result[0], tuple): + assert False, 'this code is for instance segmentation, which our code will not utilize.' + result = [(bbox_results, encode_mask_results(mask_results)) + for bbox_results, mask_results in result] + + for _ in range(batch_size): + prog_bar.update() + + return {'bbox_results': bbox_results, 'mask_results': mask_results} diff --git a/adzoo/vad/apis/train.py b/adzoo/vad/apis/train.py new file mode 100644 index 0000000..049cd5c --- /dev/null +++ b/adzoo/vad/apis/train.py @@ -0,0 +1,60 @@ +from .mmdet_train import custom_train_detector + + +def custom_train_model(model, + dataset, + cfg, + distributed=False, + validate=False, + timestamp=None, + eval_model=None, + meta=None): + """A function wrapper for launching model training according to cfg. + + Because we need different eval_hook in runner. Should be deprecated in the + future. + """ + if cfg.model.type in ['EncoderDecoder3D']: + assert False + else: + custom_train_detector( + model, + dataset, + cfg, + distributed=distributed, + validate=validate, + timestamp=timestamp, + eval_model=eval_model, + meta=meta) + + +def train_model(model, + dataset, + cfg, + distributed=False, + validate=False, + timestamp=None, + meta=None): + """A function wrapper for launching model training according to cfg. + + Because we need different eval_hook in runner. Should be deprecated in the + future. + """ + if cfg.model.type in ['EncoderDecoder3D']: + train_segmentor( + model, + dataset, + cfg, + distributed=distributed, + validate=validate, + timestamp=timestamp, + meta=meta) + else: + train_detector( + model, + dataset, + cfg, + distributed=distributed, + validate=validate, + timestamp=timestamp, + meta=meta) diff --git a/adzoo/vad/configs/VAD/VAD_base_e2e.py b/adzoo/vad/configs/VAD/VAD_base_e2e.py new file mode 100644 index 0000000..10319db --- /dev/null +++ b/adzoo/vad/configs/VAD/VAD_base_e2e.py @@ -0,0 +1,438 @@ +_base_ = [ + '../datasets/custom_nus-3d.py', + '../_base_/default_runtime.py' +] +# + +# If point cloud range is changed, the models should also change their point +# cloud range accordingly +point_cloud_range = [-15.0, -30.0, -2.0, 15.0, 30.0, 2.0] +voxel_size = [0.15, 0.15, 4] + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +# For nuScenes we usually do 10-class detection +class_names = [ + 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', + 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone' +] +num_classes = len(class_names) + +# map has classes: divider, ped_crossing, boundary +map_classes = ['divider', 'ped_crossing', 'boundary'] +map_num_vec = 100 +map_fixed_ptsnum_per_gt_line = 20 # now only support fixed_pts > 0 +map_fixed_ptsnum_per_pred_line = 20 +map_eval_use_same_gt_sample_num_flag = True +map_num_classes = len(map_classes) + +input_modality = dict( + use_lidar=False, + use_camera=True, + use_radar=False, + use_map=False, + use_external=True) + +_dim_ = 256 +_pos_dim_ = _dim_//2 +_ffn_dim_ = _dim_*2 +_num_levels_ = 4 +bev_h_ = 200 +bev_w_ = 200 +queue_length = 4 # each sequence contains `queue_length` frames. +total_epochs = 60 + +model = dict( + type='VAD', + use_grid_mask=True, + video_test_mode=True, + pretrained=dict(img='ckpts/resnet50-19c8e357.pth'), + img_backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + style='pytorch'), + img_neck=dict( + type='FPN', + in_channels=[512, 1024, 2048], + out_channels=_dim_, + start_level=0, + add_extra_convs='on_output', + num_outs=_num_levels_, + relu_before_extra_convs=True), + pts_bbox_head=dict( + type='VADHead', + map_thresh=0.5, + dis_thresh=0.2, + pe_normalization=True, + tot_epoch=total_epochs, + use_traj_lr_warmup=False, + query_thresh=0.0, + query_use_fix_pad=False, + ego_his_encoder=None, + ego_lcf_feat_idx=None, + valid_fut_ts=6, + ego_agent_decoder=dict( + type='CustomTransformerDecoder', + num_layers=1, + return_intermediate=False, + transformerlayers=dict( + type='BaseTransformerLayer', + attn_cfgs=[ + dict( + type='MultiheadAttention', + embed_dims=_dim_, + num_heads=8, + dropout=0.1), + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=('cross_attn', 'norm', 'ffn', 'norm'))), + ego_map_decoder=dict( + type='CustomTransformerDecoder', + num_layers=1, + return_intermediate=False, + transformerlayers=dict( + type='BaseTransformerLayer', + attn_cfgs=[ + dict( + type='MultiheadAttention', + embed_dims=_dim_, + num_heads=8, + dropout=0.1), + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=('cross_attn', 'norm', 'ffn', 'norm'))), + motion_decoder=dict( + type='CustomTransformerDecoder', + num_layers=1, + return_intermediate=False, + transformerlayers=dict( + type='BaseTransformerLayer', + attn_cfgs=[ + dict( + type='MultiheadAttention', + embed_dims=_dim_, + num_heads=8, + dropout=0.1), + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=('cross_attn', 'norm', 'ffn', 'norm'))), + motion_map_decoder=dict( + type='CustomTransformerDecoder', + num_layers=1, + return_intermediate=False, + transformerlayers=dict( + type='BaseTransformerLayer', + attn_cfgs=[ + dict( + type='MultiheadAttention', + embed_dims=_dim_, + num_heads=8, + dropout=0.1), + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=('cross_attn', 'norm', 'ffn', 'norm'))), + use_pe=True, + bev_h=bev_h_, + bev_w=bev_w_, + num_query=300, + num_classes=num_classes, + in_channels=_dim_, + sync_cls_avg_factor=True, + with_box_refine=True, + as_two_stage=False, + map_num_vec=map_num_vec, + map_num_classes=map_num_classes, + map_num_pts_per_vec=map_fixed_ptsnum_per_pred_line, + map_num_pts_per_gt_vec=map_fixed_ptsnum_per_gt_line, + map_query_embed_type='instance_pts', + map_transform_method='minmax', + map_gt_shift_pts_pattern='v2', + map_dir_interval=1, + map_code_size=2, + map_code_weights=[1.0, 1.0, 1.0, 1.0], + transformer=dict( + type='VADPerceptionTransformer', + map_num_vec=map_num_vec, + map_num_pts_per_vec=map_fixed_ptsnum_per_pred_line, + rotate_prev_bev=True, + use_shift=True, + use_can_bus=True, + embed_dims=_dim_, + encoder=dict( + type='BEVFormerEncoder', + num_layers=6, + pc_range=point_cloud_range, + num_points_in_pillar=4, + return_intermediate=False, + transformerlayers=dict( + type='BEVFormerLayer', + attn_cfgs=[ + dict( + type='TemporalSelfAttention', + embed_dims=_dim_, + num_levels=1), + dict( + type='SpatialCrossAttention', + pc_range=point_cloud_range, + deformable_attention=dict( + type='MSDeformableAttention3D', + embed_dims=_dim_, + num_points=8, + num_levels=_num_levels_), + embed_dims=_dim_, + ) + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm'))), + decoder=dict( + type='DetectionTransformerDecoder', + num_layers=6, + return_intermediate=True, + transformerlayers=dict( + type='DetrTransformerDecoderLayer', + attn_cfgs=[ + dict( + type='MultiheadAttention', + embed_dims=_dim_, + num_heads=8, + dropout=0.1), + dict( + type='CustomMSDeformableAttention', + embed_dims=_dim_, + num_levels=1), + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm'))), + map_decoder=dict( + type='MapDetectionTransformerDecoder', + num_layers=6, + return_intermediate=True, + transformerlayers=dict( + type='DetrTransformerDecoderLayer', + attn_cfgs=[ + dict( + type='MultiheadAttention', + embed_dims=_dim_, + num_heads=8, + dropout=0.1), + dict( + type='CustomMSDeformableAttention', + embed_dims=_dim_, + num_levels=1), + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm')))), + bbox_coder=dict( + type='CustomNMSFreeCoder', + post_center_range=[-20, -35, -10.0, 20, 35, 10.0], + pc_range=point_cloud_range, + max_num=100, + voxel_size=voxel_size, + num_classes=num_classes), + map_bbox_coder=dict( + type='MapNMSFreeCoder', + post_center_range=[-20, -35, -20, -35, 20, 35, 20, 35], + pc_range=point_cloud_range, + max_num=50, + voxel_size=voxel_size, + num_classes=map_num_classes), + positional_encoding=dict( + type='LearnedPositionalEncoding', + num_feats=_pos_dim_, + row_num_embed=bev_h_, + col_num_embed=bev_w_, + ), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=2.0), + loss_bbox=dict(type='L1Loss', loss_weight=0.25), + loss_traj=dict(type='L1Loss', loss_weight=0.2), + loss_traj_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=0.2), + loss_iou=dict(type='GIoULoss', loss_weight=0.0), + loss_map_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=2.0), + loss_map_bbox=dict(type='L1Loss', loss_weight=0.0), + loss_map_iou=dict(type='GIoULoss', loss_weight=0.0), + loss_map_pts=dict(type='PtsL1Loss', loss_weight=1.0), + loss_map_dir=dict(type='PtsDirCosLoss', loss_weight=0.005), + loss_plan_reg=dict(type='L1Loss', loss_weight=1.0), + loss_plan_bound=dict(type='PlanMapBoundLoss', loss_weight=1.0, dis_thresh=1.0), + loss_plan_col=dict(type='PlanCollisionLoss', loss_weight=1.0), + loss_plan_dir=dict(type='PlanMapDirectionLoss', loss_weight=0.5)), + # model training and testing settings + train_cfg=dict(pts=dict( + grid_size=[512, 512, 1], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_size_factor=4, + assigner=dict( + type='HungarianAssigner3D', + cls_cost=dict(type='FocalLossCost', weight=2.0), + reg_cost=dict(type='BBox3DL1Cost', weight=0.25), + iou_cost=dict(type='IoUCost', weight=0.0), # Fake cost. This is just to make it compatible with DETR head. + pc_range=point_cloud_range), + map_assigner=dict( + type='MapHungarianAssigner3D', + cls_cost=dict(type='FocalLossCost', weight=2.0), + reg_cost=dict(type='BBoxL1Cost', weight=0.0, box_format='xywh'), + iou_cost=dict(type='IoUCost', iou_mode='giou', weight=0.0), + pts_cost=dict(type='OrderedPtsL1Cost', weight=1.0), + pc_range=point_cloud_range)))) + +dataset_type = 'VADCustomNuScenesDataset' +data_root = 'data/nuscenes/' +file_client_args = dict(backend='disk') + +train_pipeline = [ + dict(type='LoadMultiViewImageFromFiles', to_float32=True), + dict(type='PhotoMetricDistortionMultiViewImage'), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, with_attr_label=True), + dict(type='VADObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='VADObjectNameFilter', classes=class_names), + dict(type='NormalizeMultiviewImage', **img_norm_cfg), + dict(type='RandomScaleImageMultiViewImage', scales=[0.8]), + dict(type='PadMultiViewImage', size_divisor=32), + dict(type='VADFormatBundle3D', class_names=class_names, with_ego=True), + dict(type='CustomCollect3D',\ + keys=['gt_bboxes_3d', 'gt_labels_3d', 'img', 'ego_his_trajs', + 'ego_fut_trajs', 'ego_fut_masks', 'ego_fut_cmd', 'ego_lcf_feat', 'gt_attr_labels']) +] + +test_pipeline = [ + dict(type='LoadMultiViewImageFromFiles', to_float32=True), + dict(type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + file_client_args=file_client_args), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, with_attr_label=True), + dict(type='VADObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='VADObjectNameFilter', classes=class_names), + dict(type='NormalizeMultiviewImage', **img_norm_cfg), + # dict(type='PadMultiViewImage', size_divisor=32), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1600, 900), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict(type='RandomScaleImageMultiViewImage', scales=[0.8]), + dict(type='PadMultiViewImage', size_divisor=32), + dict(type='VADFormatBundle3D', class_names=class_names, with_label=False, with_ego=True), + dict(type='CustomCollect3D',\ + keys=['points', 'gt_bboxes_3d', 'gt_labels_3d', 'img', 'fut_valid_flag', + 'ego_his_trajs', 'ego_fut_trajs', 'ego_fut_masks', 'ego_fut_cmd', + 'ego_lcf_feat', 'gt_attr_labels'])]) +] + +data = dict( + samples_per_gpu=1, + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'vad_nuscenes_infos_temporal_train.pkl', + pipeline=train_pipeline, + classes=class_names, + modality=input_modality, + test_mode=False, + use_valid_flag=True, + bev_size=(bev_h_, bev_w_), + pc_range=point_cloud_range, + queue_length=queue_length, + map_classes=map_classes, + map_fixed_ptsnum_per_line=map_fixed_ptsnum_per_gt_line, + map_eval_use_same_gt_sample_num_flag=map_eval_use_same_gt_sample_num_flag, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='LiDAR', + custom_eval_version='vad_nusc_detection_cvpr_2019'), + val=dict(type=dataset_type, + data_root=data_root, + pc_range=point_cloud_range, + ann_file=data_root + 'vad_nuscenes_infos_temporal_val.pkl', + pipeline=test_pipeline, bev_size=(bev_h_, bev_w_), + classes=class_names, modality=input_modality, samples_per_gpu=1, + map_classes=map_classes, + map_ann_file=data_root + 'nuscenes_map_anns_val.json', + map_fixed_ptsnum_per_line=map_fixed_ptsnum_per_gt_line, + map_eval_use_same_gt_sample_num_flag=map_eval_use_same_gt_sample_num_flag, + use_pkl_result=True, + custom_eval_version='vad_nusc_detection_cvpr_2019'), + test=dict(type=dataset_type, + data_root=data_root, + pc_range=point_cloud_range, + ann_file=data_root + 'vad_nuscenes_infos_temporal_val.pkl', + pipeline=test_pipeline, bev_size=(bev_h_, bev_w_), + classes=class_names, modality=input_modality, samples_per_gpu=1, + map_classes=map_classes, + map_ann_file=data_root + 'nuscenes_map_anns_val.json', + map_fixed_ptsnum_per_line=map_fixed_ptsnum_per_gt_line, + map_eval_use_same_gt_sample_num_flag=map_eval_use_same_gt_sample_num_flag, + use_pkl_result=True, + custom_eval_version='vad_nusc_detection_cvpr_2019'), + shuffler_sampler=dict(type='DistributedGroupSampler'), + nonshuffler_sampler=dict(type='DistributedSampler') +) + +optimizer = dict( + type='AdamW', + lr=2e-4, + paramwise_cfg=dict( + custom_keys={ + 'img_backbone': dict(lr_mult=0.1), + }), + weight_decay=0.01) + +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='CosineAnnealing', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + min_lr_ratio=1e-3) + +evaluation = dict(interval=total_epochs, pipeline=test_pipeline, metric='bbox', map_metric='chamfer') + +runner = dict(type='EpochBasedRunner', max_epochs=total_epochs) + +log_config = dict( + interval=1, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook') + ]) +# fp16 = dict(loss_scale=512.) +# find_unused_parameters = True +checkpoint_config = dict(interval=1, max_keep_ckpts=total_epochs) + + +custom_hooks = [dict(type='CustomSetEpochInfoHook')] \ No newline at end of file diff --git a/adzoo/vad/configs/VAD/VAD_base_e2e_b2d.py b/adzoo/vad/configs/VAD/VAD_base_e2e_b2d.py new file mode 100644 index 0000000..8d59fa0 --- /dev/null +++ b/adzoo/vad/configs/VAD/VAD_base_e2e_b2d.py @@ -0,0 +1,568 @@ +_base_ = [ + '../datasets/custom_nus-3d.py', + '../_base_/default_runtime.py' +] + + +# If point cloud range is changed, the models should also change their point +# cloud range accordingly +point_cloud_range = [-15.0, -30.0, -2.0, 15.0, 30.0, 2.0] +voxel_size = [0.15, 0.15, 4] + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +# For nuScenes we usually do 10-class detection + +NameMapping = { + #=================vehicle================= + # bicycle + 'vehicle.bh.crossbike': 'bicycle', + "vehicle.diamondback.century": 'bicycle', + "vehicle.gazelle.omafiets": 'bicycle', + # car + "vehicle.chevrolet.impala": 'car', + "vehicle.dodge.charger_2020": 'car', + "vehicle.dodge.charger_police": 'car', + "vehicle.dodge.charger_police_2020": 'car', + "vehicle.lincoln.mkz_2017": 'car', + "vehicle.lincoln.mkz_2020": 'car', + "vehicle.mini.cooper_s_2021": 'car', + "vehicle.mercedes.coupe_2020": 'car', + "vehicle.ford.mustang": 'car', + "vehicle.nissan.patrol_2021": 'car', + "vehicle.audi.tt": 'car', + "vehicle.audi.etron": 'car', + "vehicle.ford.crown": 'car', + "vehicle.ford.mustang": 'car', + "vehicle.tesla.model3": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/FordCrown/SM_FordCrown_parked.SM_FordCrown_parked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/Charger/SM_ChargerParked.SM_ChargerParked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/Lincoln/SM_LincolnParked.SM_LincolnParked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/MercedesCCC/SM_MercedesCCC_Parked.SM_MercedesCCC_Parked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/Mini2021/SM_Mini2021_parked.SM_Mini2021_parked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/NissanPatrol2021/SM_NissanPatrol2021_parked.SM_NissanPatrol2021_parked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/TeslaM3/SM_TeslaM3_parked.SM_TeslaM3_parked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/VolkswagenT2/SM_VolkswagenT2_2021_Parked.SM_VolkswagenT2_2021_Parked": 'car', + # bus + # van + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/VolkswagenT2/SM_VolkswagenT2_2021_Parked.SM_VolkswagenT2_2021_Parked": "van", + "vehicle.ford.ambulance": "van", + # truck + "vehicle.carlamotors.firetruck": 'truck', + #========================================= + + #=================traffic sign============ + # traffic.speed_limit + "traffic.speed_limit.30": 'traffic_sign', + "traffic.speed_limit.40": 'traffic_sign', + "traffic.speed_limit.50": 'traffic_sign', + "traffic.speed_limit.60": 'traffic_sign', + "traffic.speed_limit.90": 'traffic_sign', + "traffic.speed_limit.120": 'traffic_sign', + + "traffic.stop": 'traffic_sign', + "traffic.yield": 'traffic_sign', + "traffic.traffic_light": 'traffic_light', + #========================================= + + #===================Construction=========== + "static.prop.warningconstruction" : 'traffic_cone', + "static.prop.warningaccident": 'traffic_cone', + "static.prop.trafficwarning": "traffic_cone", + + #===================Construction=========== + "static.prop.constructioncone": 'traffic_cone', + + #=================pedestrian============== + "walker.pedestrian.0001": 'pedestrian', + "walker.pedestrian.0004": 'pedestrian', + "walker.pedestrian.0005": 'pedestrian', + "walker.pedestrian.0007": 'pedestrian', + "walker.pedestrian.0013": 'pedestrian', + "walker.pedestrian.0014": 'pedestrian', + "walker.pedestrian.0017": 'pedestrian', + "walker.pedestrian.0018": 'pedestrian', + "walker.pedestrian.0019": 'pedestrian', + "walker.pedestrian.0020": 'pedestrian', + "walker.pedestrian.0022": 'pedestrian', + "walker.pedestrian.0025": 'pedestrian', + "walker.pedestrian.0035": 'pedestrian', + "walker.pedestrian.0041": 'pedestrian', + "walker.pedestrian.0046": 'pedestrian', + "walker.pedestrian.0047": 'pedestrian', + + # ========================================== + "static.prop.dirtdebris01": 'others', + "static.prop.dirtdebris02": 'others', +} + +eval_cfg = { + "dist_ths": [0.5, 1.0, 2.0, 4.0], + "dist_th_tp": 2.0, + "min_recall": 0.1, + "min_precision": 0.1, + "mean_ap_weight": 5, + "class_names":['car','van','truck','bicycle','traffic_sign','traffic_cone','traffic_light','pedestrian'], + "tp_metrics":['trans_err', 'scale_err', 'orient_err', 'vel_err'], + "err_name_maping":{'trans_err': 'mATE','scale_err': 'mASE','orient_err': 'mAOE','vel_err': 'mAVE','attr_err': 'mAAE'}, + "class_range":{'car':(50,50),'van':(50,50),'truck':(50,50),'bicycle':(40,40),'traffic_sign':(30,30),'traffic_cone':(30,30),'traffic_light':(30,30),'pedestrian':(40,40)} + } + +class_names = [ +'car','van','truck','bicycle','traffic_sign','traffic_cone','traffic_light','pedestrian','others' +] +num_classes = len(class_names) + +# map has classes: divider, ped_crossing, boundary +map_classes = ['Broken','Solid','SolidSolid','Center','TrafficLight','StopSign'] +map_num_vec = 100 +map_fixed_ptsnum_per_gt_line = 20 # now only support fixed_pts > 0 +map_fixed_ptsnum_per_pred_line = 20 +map_eval_use_same_gt_sample_num_flag = True +map_num_classes = len(map_classes) +past_frames = 2 +future_frames = 6 + +input_modality = dict( + use_lidar=False, + use_camera=True, + use_radar=False, + use_map=False, + use_external=True) + +_dim_ = 256 +_pos_dim_ = _dim_//2 +_ffn_dim_ = _dim_*2 +_num_levels_ = 4 +bev_h_ = 200 +bev_w_ = 200 +queue_length = 4 # each sequence contains `queue_length` frames. +total_epochs = 60 + +model = dict( + type='VAD', + use_grid_mask=True, + video_test_mode=True, + pretrained=dict(img='ckpts/resnet50-19c8e357.pth'), + img_backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + style='pytorch'), + img_neck=dict( + type='FPN', + in_channels=[512, 1024, 2048], + out_channels=_dim_, + start_level=0, + add_extra_convs='on_output', + num_outs=_num_levels_, + relu_before_extra_convs=True), + pts_bbox_head=dict( + type='VADHead', + map_thresh=0.5, + dis_thresh=0.2, + pe_normalization=True, + tot_epoch=total_epochs, + use_traj_lr_warmup=False, + query_thresh=0.0, + query_use_fix_pad=False, + ego_his_encoder=None, + ego_lcf_feat_idx=None, + valid_fut_ts=6, + ego_fut_mode=6, + ego_agent_decoder=dict( + type='CustomTransformerDecoder', + num_layers=1, + return_intermediate=False, + transformerlayers=dict( + type='BaseTransformerLayer', + attn_cfgs=[ + dict( + type='MultiheadAttention', + embed_dims=_dim_, + num_heads=8, + dropout=0.1), + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=('cross_attn', 'norm', 'ffn', 'norm'))), + ego_map_decoder=dict( + type='CustomTransformerDecoder', + num_layers=1, + return_intermediate=False, + transformerlayers=dict( + type='BaseTransformerLayer', + attn_cfgs=[ + dict( + type='MultiheadAttention', + embed_dims=_dim_, + num_heads=8, + dropout=0.1), + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=('cross_attn', 'norm', 'ffn', 'norm'))), + motion_decoder=dict( + type='CustomTransformerDecoder', + num_layers=1, + return_intermediate=False, + transformerlayers=dict( + type='BaseTransformerLayer', + attn_cfgs=[ + dict( + type='MultiheadAttention', + embed_dims=_dim_, + num_heads=8, + dropout=0.1), + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=('cross_attn', 'norm', 'ffn', 'norm'))), + motion_map_decoder=dict( + type='CustomTransformerDecoder', + num_layers=1, + return_intermediate=False, + transformerlayers=dict( + type='BaseTransformerLayer', + attn_cfgs=[ + dict( + type='MultiheadAttention', + embed_dims=_dim_, + num_heads=8, + dropout=0.1), + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=('cross_attn', 'norm', 'ffn', 'norm'))), + use_pe=True, + bev_h=bev_h_, + bev_w=bev_w_, + num_query=300, + num_classes=num_classes, + in_channels=_dim_, + sync_cls_avg_factor=True, + with_box_refine=True, + as_two_stage=False, + map_num_vec=map_num_vec, + map_num_classes=map_num_classes, + map_num_pts_per_vec=map_fixed_ptsnum_per_pred_line, + map_num_pts_per_gt_vec=map_fixed_ptsnum_per_gt_line, + map_query_embed_type='instance_pts', + map_transform_method='minmax', + map_gt_shift_pts_pattern='v2', + map_dir_interval=1, + map_code_size=2, + map_code_weights=[1.0, 1.0, 1.0, 1.0], + transformer=dict( + type='VADPerceptionTransformer', + map_num_vec=map_num_vec, + map_num_pts_per_vec=map_fixed_ptsnum_per_pred_line, + rotate_prev_bev=True, + use_shift=True, + use_can_bus=True, + embed_dims=_dim_, + encoder=dict( + type='BEVFormerEncoder', + num_layers=6, + pc_range=point_cloud_range, + num_points_in_pillar=4, + return_intermediate=False, + transformerlayers=dict( + type='BEVFormerLayer', + attn_cfgs=[ + dict( + type='TemporalSelfAttention', + embed_dims=_dim_, + num_levels=1), + dict( + type='SpatialCrossAttention', + pc_range=point_cloud_range, + deformable_attention=dict( + type='MSDeformableAttention3D', + embed_dims=_dim_, + num_points=8, + num_levels=_num_levels_), + embed_dims=_dim_, + ) + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm'))), + decoder=dict( + type='DetectionTransformerDecoder', + num_layers=6, + return_intermediate=True, + transformerlayers=dict( + type='DetrTransformerDecoderLayer', + attn_cfgs=[ + dict( + type='MultiheadAttention', + embed_dims=_dim_, + num_heads=8, + dropout=0.1), + dict( + type='CustomMSDeformableAttention', + embed_dims=_dim_, + num_levels=1), + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm'))), + map_decoder=dict( + type='MapDetectionTransformerDecoder', + num_layers=6, + return_intermediate=True, + transformerlayers=dict( + type='DetrTransformerDecoderLayer', + attn_cfgs=[ + dict( + type='MultiheadAttention', + embed_dims=_dim_, + num_heads=8, + dropout=0.1), + dict( + type='CustomMSDeformableAttention', + embed_dims=_dim_, + num_levels=1), + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm')))), + bbox_coder=dict( + type='CustomNMSFreeCoder', + post_center_range=[-20, -35, -10.0, 20, 35, 10.0], + pc_range=point_cloud_range, + max_num=100, + voxel_size=voxel_size, + num_classes=num_classes), + map_bbox_coder=dict( + type='MapNMSFreeCoder', + post_center_range=[-20, -35, -20, -35, 20, 35, 20, 35], + pc_range=point_cloud_range, + max_num=50, + voxel_size=voxel_size, + num_classes=map_num_classes), + positional_encoding=dict( + type='LearnedPositionalEncoding', + num_feats=_pos_dim_, + row_num_embed=bev_h_, + col_num_embed=bev_w_, + ), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=2.0), + loss_bbox=dict(type='L1Loss', loss_weight=0.25), + loss_traj=dict(type='L1Loss', loss_weight=0.2), + loss_traj_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=0.2), + loss_iou=dict(type='GIoULoss', loss_weight=0.0), + loss_map_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=2.0), + loss_map_bbox=dict(type='L1Loss', loss_weight=0.0), + loss_map_iou=dict(type='GIoULoss', loss_weight=0.0), + loss_map_pts=dict(type='PtsL1Loss', loss_weight=1.0), + loss_map_dir=dict(type='PtsDirCosLoss', loss_weight=0.005), + loss_plan_reg=dict(type='L1Loss', loss_weight=1.0), + loss_plan_bound=dict(type='PlanMapBoundLoss', loss_weight=1.0, dis_thresh=1.0), + loss_plan_col=dict(type='PlanCollisionLoss', loss_weight=1.0), + loss_plan_dir=dict(type='PlanMapDirectionLoss', loss_weight=0.5)), + # model training and testing settings + train_cfg=dict(pts=dict( + grid_size=[512, 512, 1], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_size_factor=4, + assigner=dict( + type='HungarianAssigner3D', + cls_cost=dict(type='FocalLossCost', weight=2.0), + reg_cost=dict(type='BBox3DL1Cost', weight=0.25), + iou_cost=dict(type='IoUCost', weight=0.0), # Fake cost. This is just to make it compatible with DETR head. + pc_range=point_cloud_range), + map_assigner=dict( + type='MapHungarianAssigner3D', + cls_cost=dict(type='FocalLossCost', weight=2.0), + reg_cost=dict(type='BBoxL1Cost', weight=0.0, box_format='xywh'), + iou_cost=dict(type='IoUCost', iou_mode='giou', weight=0.0), + pts_cost=dict(type='OrderedPtsL1Cost', weight=1.0), + pc_range=point_cloud_range)))) + +dataset_type = "B2D_VAD_Dataset" +data_root = "data/bench2drive" +info_root = "data/infos" +map_root = "data/bench2drive/maps" +map_file = "data/infos/b2d_map_infos.pkl" +file_client_args = dict(backend="disk") +ann_file_train=info_root + f"/b2d_infos_train.pkl" +ann_file_val=info_root + f"/b2d_infos_val.pkl" +ann_file_test=info_root + f"/b2d_infos_val.pkl" + +train_pipeline = [ + dict(type='LoadMultiViewImageFromFiles', to_float32=True), + dict(type='PhotoMetricDistortionMultiViewImage'), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, with_attr_label=True), + dict(type='VADObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='VADObjectNameFilter', classes=class_names), + dict(type='NormalizeMultiviewImage', **img_norm_cfg), + dict(type='RandomScaleImageMultiViewImage', scales=[0.8]), + dict(type='PadMultiViewImage', size_divisor=32), + dict(type='VADFormatBundle3D', class_names=class_names, with_ego=True), + dict(type='CustomCollect3D',\ + keys=['gt_bboxes_3d', 'gt_labels_3d', 'img', 'ego_his_trajs','gt_attr_labels','ego_fut_trajs', 'ego_fut_masks', 'ego_fut_cmd', 'ego_lcf_feat']) +] + +test_pipeline = [ + dict(type='LoadMultiViewImageFromFiles', to_float32=True), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, with_attr_label=True), + dict(type='VADObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='VADObjectNameFilter', classes=class_names), + dict(type='NormalizeMultiviewImage', **img_norm_cfg), + # dict(type='PadMultiViewImage', size_divisor=32), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1600, 900), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict(type='RandomScaleImageMultiViewImage', scales=[0.8]), + dict(type='PadMultiViewImage', size_divisor=32), + dict(type='VADFormatBundle3D', class_names=class_names, with_label=False, with_ego=True), + dict(type='CustomCollect3D',\ + keys=['gt_bboxes_3d', 'gt_labels_3d', 'img', 'fut_valid_flag', + 'ego_his_trajs', 'ego_fut_trajs', 'ego_fut_masks', 'ego_fut_cmd', + 'ego_lcf_feat','gt_attr_labels'])]) +] + +inference_only_pipeline = [ + dict(type='LoadMultiViewImageFromFiles', to_float32=True), + dict(type='NormalizeMultiviewImage', **img_norm_cfg), + dict(type='PadMultiViewImage', size_divisor=32), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1600, 900), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict(type='RandomScaleImageMultiViewImage', scales=[0.8]), + dict(type='PadMultiViewImage', size_divisor=32), + dict(type='VADFormatBundle3D', class_names=class_names, with_label=False, with_ego=True), + dict(type='CustomCollect3D', keys=[ 'img', 'ego_fut_cmd'])]) +] + + +data = dict( + samples_per_gpu=1, + workers_per_gpu=6, + train=dict( + + type=dataset_type, + data_root=data_root, + ann_file=ann_file_train, + pipeline=train_pipeline, + classes=class_names, + name_mapping=NameMapping, + map_root=map_root, + map_file=map_file, + modality=input_modality, + bev_size=(bev_h_, bev_w_), + queue_length=queue_length, + past_frames=past_frames, + future_frames=future_frames, + point_cloud_range=point_cloud_range, + polyline_points_num=map_fixed_ptsnum_per_gt_line, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='LiDAR', + #custom_eval_version='vad_nusc_detection_cvpr_2019' + ), + val=dict(type=dataset_type, + + data_root=data_root, + ann_file=ann_file_train, + pipeline=train_pipeline, + classes=class_names, + name_mapping=NameMapping, + map_root=map_root, + map_file=map_file, + modality=input_modality, + bev_size=(bev_h_, bev_w_), + queue_length=queue_length, + past_frames=past_frames, + future_frames=future_frames, + point_cloud_range=point_cloud_range, + polyline_points_num=map_fixed_ptsnum_per_gt_line, + #use_pkl_result=True, + #custom_eval_version='vad_nusc_detection_cvpr_2019' + ), + test=dict(type=dataset_type, + data_root=data_root, + ann_file=ann_file_val, + pipeline=test_pipeline, + classes=class_names, + name_mapping=NameMapping, + map_root=map_root, + map_file=map_file, + modality=input_modality, + bev_size=(bev_h_, bev_w_), + queue_length=queue_length, + past_frames=past_frames, + future_frames=future_frames, + point_cloud_range=point_cloud_range, + polyline_points_num=map_fixed_ptsnum_per_gt_line, + eval_cfg=eval_cfg + ), + shuffler_sampler=dict(type='DistributedGroupSampler'), + nonshuffler_sampler=dict(type='DistributedSampler') +) + +optimizer = dict( + type='AdamW', + lr=2e-4, + paramwise_cfg=dict( + custom_keys={ + 'img_backbone': dict(lr_mult=0.1), + }), + weight_decay=0.01) + +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='CosineAnnealing', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + min_lr_ratio=1e-3) + +evaluation = dict(interval=total_epochs, pipeline=test_pipeline, metric='bbox', map_metric='chamfer') + +runner = dict(type='EpochBasedRunner', max_epochs=total_epochs) + +log_config = dict( + interval=1, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook') + ]) +# fp16 = dict(loss_scale=512.) +# find_unused_parameters = True +checkpoint_config = dict(interval=1, max_keep_ckpts=total_epochs) + + +custom_hooks = [dict(type='CustomSetEpochInfoHook')] \ No newline at end of file diff --git a/adzoo/vad/configs/VAD/VAD_tiny_e2e.py b/adzoo/vad/configs/VAD/VAD_tiny_e2e.py new file mode 100644 index 0000000..67e088a --- /dev/null +++ b/adzoo/vad/configs/VAD/VAD_tiny_e2e.py @@ -0,0 +1,454 @@ +_base_ = [ + '../datasets/custom_nus-3d.py', + '../_base_/default_runtime.py' +] + + +# If point cloud range is changed, the models should also change their point +# cloud range accordingly +point_cloud_range = [-15.0, -30.0, -2.0, 15.0, 30.0, 2.0] +voxel_size = [0.15, 0.15, 4] + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +# For nuScenes we usually do 10-class detection +class_names = [ + 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', + 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone' +] +num_classes = len(class_names) + +# map has classes: divider, ped_crossing, boundary +map_classes = ['divider', 'ped_crossing', 'boundary'] +map_num_vec = 100 +map_fixed_ptsnum_per_gt_line = 20 # now only support fixed_pts > 0 +map_fixed_ptsnum_per_pred_line = 20 +map_eval_use_same_gt_sample_num_flag = True +map_num_classes = len(map_classes) + +input_modality = dict( + use_lidar=False, + use_camera=True, + use_radar=False, + use_map=False, + use_external=True) + +_dim_ = 256 +_pos_dim_ = _dim_//2 +_ffn_dim_ = _dim_*2 +_num_levels_ = 1 +bev_h_ = 100 +bev_w_ = 100 +queue_length = 3 # each sequence contains `queue_length` frames. +total_epochs = 60 + +model = dict( + type='VAD', + use_grid_mask=True, + video_test_mode=True, + pretrained=dict(img='torchvision://resnet50'), + img_backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(3,), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + style='pytorch'), + img_neck=dict( + type='FPN', + in_channels=[2048], + out_channels=_dim_, + start_level=0, + add_extra_convs='on_output', + num_outs=_num_levels_, + relu_before_extra_convs=True), + pts_bbox_head=dict( + type='VADHead', + map_thresh=0.5, + dis_thresh=0.2, + pe_normalization=True, + tot_epoch=total_epochs, + use_traj_lr_warmup=False, + query_thresh=0.0, + query_use_fix_pad=False, + ego_his_encoder=None, + ego_lcf_feat_idx=None, + valid_fut_ts=6, + ego_agent_decoder=dict( + type='CustomTransformerDecoder', + num_layers=1, + return_intermediate=False, + transformerlayers=dict( + type='BaseTransformerLayer', + attn_cfgs=[ + dict( + type='MultiheadAttention', + embed_dims=_dim_, + num_heads=8, + dropout=0.1), + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=('cross_attn', 'norm', 'ffn', 'norm'))), + ego_map_decoder=dict( + type='CustomTransformerDecoder', + num_layers=1, + return_intermediate=False, + transformerlayers=dict( + type='BaseTransformerLayer', + attn_cfgs=[ + dict( + type='MultiheadAttention', + embed_dims=_dim_, + num_heads=8, + dropout=0.1), + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=('cross_attn', 'norm', 'ffn', 'norm'))), + motion_decoder=dict( + type='CustomTransformerDecoder', + num_layers=1, + return_intermediate=False, + transformerlayers=dict( + type='BaseTransformerLayer', + attn_cfgs=[ + dict( + type='MultiheadAttention', + embed_dims=_dim_, + num_heads=8, + dropout=0.1), + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=('cross_attn', 'norm', 'ffn', 'norm'))), + motion_map_decoder=dict( + type='CustomTransformerDecoder', + num_layers=1, + return_intermediate=False, + transformerlayers=dict( + type='BaseTransformerLayer', + attn_cfgs=[ + dict( + type='MultiheadAttention', + embed_dims=_dim_, + num_heads=8, + dropout=0.1), + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=('cross_attn', 'norm', 'ffn', 'norm'))), + use_pe=True, + bev_h=bev_h_, + bev_w=bev_w_, + num_query=300, + num_classes=num_classes, + in_channels=_dim_, + sync_cls_avg_factor=True, + with_box_refine=True, + as_two_stage=False, + map_num_vec=map_num_vec, + map_num_classes=map_num_classes, + map_num_pts_per_vec=map_fixed_ptsnum_per_pred_line, + map_num_pts_per_gt_vec=map_fixed_ptsnum_per_gt_line, + map_query_embed_type='instance_pts', + map_transform_method='minmax', + map_gt_shift_pts_pattern='v2', + map_dir_interval=1, + map_code_size=2, + map_code_weights=[1.0, 1.0, 1.0, 1.0], + transformer=dict( + type='VADPerceptionTransformer', + map_num_vec=map_num_vec, + map_num_pts_per_vec=map_fixed_ptsnum_per_pred_line, + rotate_prev_bev=True, + use_shift=True, + use_can_bus=True, + embed_dims=_dim_, + encoder=dict( + type='BEVFormerEncoder', + num_layers=3, + pc_range=point_cloud_range, + num_points_in_pillar=4, + return_intermediate=False, + transformerlayers=dict( + type='BEVFormerLayer', + attn_cfgs=[ + dict( + type='TemporalSelfAttention', + embed_dims=_dim_, + num_levels=1), + dict( + type='SpatialCrossAttention', + pc_range=point_cloud_range, + deformable_attention=dict( + type='MSDeformableAttention3D', + embed_dims=_dim_, + num_points=8, + num_levels=_num_levels_), + embed_dims=_dim_, + ) + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm'))), + decoder=dict( + type='DetectionTransformerDecoder', + num_layers=3, + return_intermediate=True, + transformerlayers=dict( + type='DetrTransformerDecoderLayer', + attn_cfgs=[ + dict( + type='MultiheadAttention', + embed_dims=_dim_, + num_heads=8, + dropout=0.1), + dict( + type='CustomMSDeformableAttention', + embed_dims=_dim_, + num_levels=1), + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm'))), + map_decoder=dict( + type='MapDetectionTransformerDecoder', + num_layers=3, + return_intermediate=True, + transformerlayers=dict( + type='DetrTransformerDecoderLayer', + attn_cfgs=[ + dict( + type='MultiheadAttention', + embed_dims=_dim_, + num_heads=8, + dropout=0.1), + dict( + type='CustomMSDeformableAttention', + embed_dims=_dim_, + num_levels=1), + ], + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', + 'ffn', 'norm')))), + bbox_coder=dict( + type='CustomNMSFreeCoder', + post_center_range=[-20, -35, -10.0, 20, 35, 10.0], + pc_range=point_cloud_range, + max_num=100, + voxel_size=voxel_size, + num_classes=num_classes), + map_bbox_coder=dict( + type='MapNMSFreeCoder', + post_center_range=[-20, -35, -20, -35, 20, 35, 20, 35], + pc_range=point_cloud_range, + max_num=50, + voxel_size=voxel_size, + num_classes=map_num_classes), + positional_encoding=dict( + type='LearnedPositionalEncoding', + num_feats=_pos_dim_, + row_num_embed=bev_h_, + col_num_embed=bev_w_, + ), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=2.0), + loss_bbox=dict(type='L1Loss', loss_weight=0.25), + loss_traj=dict(type='L1Loss', loss_weight=0.2), + loss_traj_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=0.2), + loss_iou=dict(type='GIoULoss', loss_weight=0.0), + loss_map_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=2.0), + loss_map_bbox=dict(type='L1Loss', loss_weight=0.0), + loss_map_iou=dict(type='GIoULoss', loss_weight=0.0), + loss_map_pts=dict(type='PtsL1Loss', loss_weight=1.0), + loss_map_dir=dict(type='PtsDirCosLoss', loss_weight=0.005), + loss_plan_reg=dict(type='L1Loss', loss_weight=1.0), + loss_plan_bound=dict(type='PlanMapBoundLoss', loss_weight=1.0, dis_thresh=1.0), + loss_plan_col=dict(type='PlanCollisionLoss', loss_weight=1.0), + loss_plan_dir=dict(type='PlanMapDirectionLoss', loss_weight=0.5)), + # model training and testing settings + train_cfg=dict(pts=dict( + grid_size=[512, 512, 1], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_size_factor=4, + assigner=dict( + type='HungarianAssigner3D', + cls_cost=dict(type='FocalLossCost', weight=2.0), + reg_cost=dict(type='BBox3DL1Cost', weight=0.25), + iou_cost=dict(type='IoUCost', weight=0.0), # Fake cost. This is just to make it compatible with DETR head. + pc_range=point_cloud_range), + map_assigner=dict( + type='MapHungarianAssigner3D', + cls_cost=dict(type='FocalLossCost', weight=2.0), + reg_cost=dict(type='BBoxL1Cost', weight=0.0, box_format='xywh'), + iou_cost=dict(type='IoUCost', iou_mode='giou', weight=0.0), + pts_cost=dict(type='OrderedPtsL1Cost', weight=1.0), + pc_range=point_cloud_range)))) + +dataset_type = 'VADCustomNuScenesDataset' +data_root = 'data/nuscenes/' +file_client_args = dict(backend='disk') + +train_pipeline = [ + dict(type='LoadMultiViewImageFromFiles', to_float32=True), + dict(type='PhotoMetricDistortionMultiViewImage'), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, with_attr_label=True), + dict(type='CustomObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='CustomObjectNameFilter', classes=class_names), + dict(type='NormalizeMultiviewImage', **img_norm_cfg), + dict(type='RandomScaleImageMultiViewImage', scales=[0.4]), + dict(type='PadMultiViewImage', size_divisor=32), + dict(type='CustomDefaultFormatBundle3D', class_names=class_names, with_ego=True), + dict(type='CustomCollect3D',\ + keys=['gt_bboxes_3d', 'gt_labels_3d', 'img', 'ego_his_trajs', + 'ego_fut_trajs', 'ego_fut_masks', 'ego_fut_cmd', 'ego_lcf_feat', 'gt_attr_labels']) +] + +test_pipeline = [ + dict(type='LoadMultiViewImageFromFiles', to_float32=True), + dict(type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + file_client_args=file_client_args), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, with_attr_label=True), + dict(type='CustomObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='CustomObjectNameFilter', classes=class_names), + dict(type='NormalizeMultiviewImage', **img_norm_cfg), + # dict(type='PadMultiViewImage', size_divisor=32), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1600, 900), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict(type='RandomScaleImageMultiViewImage', scales=[0.4]), + dict(type='PadMultiViewImage', size_divisor=32), + dict(type='CustomDefaultFormatBundle3D', class_names=class_names, with_label=False, with_ego=True), + dict(type='CustomCollect3D',\ + keys=['points', 'gt_bboxes_3d', 'gt_labels_3d', 'img', 'fut_valid_flag', + 'ego_his_trajs', 'ego_fut_trajs', 'ego_fut_masks', 'ego_fut_cmd', + 'ego_lcf_feat', 'gt_attr_labels'])]) +] + +inference_only_pipeline = [ + dict(type='LoadMultiViewImageFromFiles', to_float32=True), + dict(type='NormalizeMultiviewImage', **img_norm_cfg), + dict(type='PadMultiViewImage', size_divisor=32), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1600, 900), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict(type='RandomScaleImageMultiViewImage', scales=[0.8]), + dict(type='PadMultiViewImage', size_divisor=32), + dict(type='VADFormatBundle3D', class_names=class_names, with_label=False, with_ego=True), + dict(type='CustomCollect3D', keys=[ 'img', 'ego_fut_cmd'])]) +] + +data = dict( + samples_per_gpu=1, + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'vad_nuscenes_infos_temporal_train.pkl', + pipeline=train_pipeline, + classes=class_names, + modality=input_modality, + test_mode=False, + use_valid_flag=True, + bev_size=(bev_h_, bev_w_), + pc_range=point_cloud_range, + queue_length=queue_length, + map_classes=map_classes, + map_fixed_ptsnum_per_line=map_fixed_ptsnum_per_gt_line, + map_eval_use_same_gt_sample_num_flag=map_eval_use_same_gt_sample_num_flag, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='LiDAR', + custom_eval_version='vad_nusc_detection_cvpr_2019'), + val=dict(type=dataset_type, + data_root=data_root, + pc_range=point_cloud_range, + ann_file=data_root + 'vad_nuscenes_infos_temporal_val.pkl', + pipeline=test_pipeline, bev_size=(bev_h_, bev_w_), + classes=class_names, modality=input_modality, samples_per_gpu=1, + map_classes=map_classes, + map_ann_file=data_root + 'nuscenes_map_anns_val.json', + map_fixed_ptsnum_per_line=map_fixed_ptsnum_per_gt_line, + map_eval_use_same_gt_sample_num_flag=map_eval_use_same_gt_sample_num_flag, + use_pkl_result=True, + custom_eval_version='vad_nusc_detection_cvpr_2019'), + test=dict(type=dataset_type, + data_root=data_root, + pc_range=point_cloud_range, + ann_file=data_root + 'vad_nuscenes_infos_temporal_val.pkl', + pipeline=test_pipeline, bev_size=(bev_h_, bev_w_), + classes=class_names, modality=input_modality, samples_per_gpu=1, + map_classes=map_classes, + map_ann_file=data_root + 'nuscenes_map_anns_val.json', + map_fixed_ptsnum_per_line=map_fixed_ptsnum_per_gt_line, + map_eval_use_same_gt_sample_num_flag=map_eval_use_same_gt_sample_num_flag, + use_pkl_result=True, + custom_eval_version='vad_nusc_detection_cvpr_2019'), + shuffler_sampler=dict(type='DistributedGroupSampler'), + nonshuffler_sampler=dict(type='DistributedSampler') +) + +optimizer = dict( + type='AdamW', + lr=2e-4, + paramwise_cfg=dict( + custom_keys={ + 'img_backbone': dict(lr_mult=0.1), + }), + weight_decay=0.01) + +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='CosineAnnealing', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + min_lr_ratio=1e-3) + +evaluation = dict(interval=total_epochs, pipeline=test_pipeline, metric='bbox', map_metric='chamfer') + +runner = dict(type='EpochBasedRunner', max_epochs=total_epochs) + +log_config = dict( + interval=100, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook') + ]) +# fp16 = dict(loss_scale=512.) +# find_unused_parameters = True +checkpoint_config = dict(interval=1, max_keep_ckpts=total_epochs) + + +custom_hooks = [dict(type='CustomSetEpochInfoHook')] \ No newline at end of file diff --git a/adzoo/vad/configs/_base_/datasets/coco_instance.py b/adzoo/vad/configs/_base_/datasets/coco_instance.py new file mode 100644 index 0000000..f6ea4f4 --- /dev/null +++ b/adzoo/vad/configs/_base_/datasets/coco_instance.py @@ -0,0 +1,48 @@ +dataset_type = 'CocoDataset' +data_root = 'data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline)) +evaluation = dict(metric=['bbox', 'segm']) diff --git a/adzoo/vad/configs/_base_/datasets/kitti-3d-3class.py b/adzoo/vad/configs/_base_/datasets/kitti-3d-3class.py new file mode 100644 index 0000000..1822af4 --- /dev/null +++ b/adzoo/vad/configs/_base_/datasets/kitti-3d-3class.py @@ -0,0 +1,140 @@ +# dataset settings +dataset_type = 'KittiDataset' +data_root = 'data/kitti/' +class_names = ['Pedestrian', 'Cyclist', 'Car'] +point_cloud_range = [0, -40, -3, 70.4, 40, 1] +input_modality = dict(use_lidar=True, use_camera=False) +db_sampler = dict( + data_root=data_root, + info_path=data_root + 'kitti_dbinfos_train.pkl', + rate=1.0, + prepare=dict( + filter_by_difficulty=[-1], + filter_by_min_points=dict(Car=5, Pedestrian=10, Cyclist=10)), + classes=class_names, + sample_groups=dict(Car=12, Pedestrian=6, Cyclist=6)) + +file_client_args = dict(backend='disk') +# Uncomment the following if use ceph or other file clients. +# See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient +# for more details. +# file_client_args = dict( +# backend='petrel', path_mapping=dict(data='s3://kitti_data/')) + +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + file_client_args=file_client_args), + dict( + type='LoadAnnotations3D', + with_bbox_3d=True, + with_label_3d=True, + file_client_args=file_client_args), + dict(type='ObjectSample', db_sampler=db_sampler), + dict( + type='ObjectNoise', + num_try=100, + translation_std=[1.0, 1.0, 0.5], + global_rot_range=[0.0, 0.0], + rot_range=[-0.78539816, 0.78539816]), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.78539816, 0.78539816], + scale_ratio_range=[0.95, 1.05]), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='PointShuffle'), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + file_client_args=file_client_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) + ]) +] +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) +eval_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + file_client_args=file_client_args), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) +] + +data = dict( + samples_per_gpu=6, + workers_per_gpu=4, + train=dict( + type='RepeatDataset', + times=2, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'kitti_infos_train.pkl', + split='training', + pts_prefix='velodyne_reduced', + pipeline=train_pipeline, + modality=input_modality, + classes=class_names, + test_mode=False, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='LiDAR')), + val=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'kitti_infos_val.pkl', + split='training', + pts_prefix='velodyne_reduced', + pipeline=test_pipeline, + modality=input_modality, + classes=class_names, + test_mode=True, + box_type_3d='LiDAR'), + test=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'kitti_infos_val.pkl', + split='training', + pts_prefix='velodyne_reduced', + pipeline=test_pipeline, + modality=input_modality, + classes=class_names, + test_mode=True, + box_type_3d='LiDAR')) + +evaluation = dict(interval=1, pipeline=eval_pipeline) diff --git a/adzoo/vad/configs/_base_/datasets/kitti-3d-car.py b/adzoo/vad/configs/_base_/datasets/kitti-3d-car.py new file mode 100644 index 0000000..1e81226 --- /dev/null +++ b/adzoo/vad/configs/_base_/datasets/kitti-3d-car.py @@ -0,0 +1,138 @@ +# dataset settings +dataset_type = 'KittiDataset' +data_root = 'data/kitti/' +class_names = ['Car'] +point_cloud_range = [0, -40, -3, 70.4, 40, 1] +input_modality = dict(use_lidar=True, use_camera=False) +db_sampler = dict( + data_root=data_root, + info_path=data_root + 'kitti_dbinfos_train.pkl', + rate=1.0, + prepare=dict(filter_by_difficulty=[-1], filter_by_min_points=dict(Car=5)), + classes=class_names, + sample_groups=dict(Car=15)) + +file_client_args = dict(backend='disk') +# Uncomment the following if use ceph or other file clients. +# See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient +# for more details. +# file_client_args = dict( +# backend='petrel', path_mapping=dict(data='s3://kitti_data/')) + +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + file_client_args=file_client_args), + dict( + type='LoadAnnotations3D', + with_bbox_3d=True, + with_label_3d=True, + file_client_args=file_client_args), + dict(type='ObjectSample', db_sampler=db_sampler), + dict( + type='ObjectNoise', + num_try=100, + translation_std=[1.0, 1.0, 0.5], + global_rot_range=[0.0, 0.0], + rot_range=[-0.78539816, 0.78539816]), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.78539816, 0.78539816], + scale_ratio_range=[0.95, 1.05]), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='PointShuffle'), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + file_client_args=file_client_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) + ]) +] +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) +eval_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + file_client_args=file_client_args), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) +] + +data = dict( + samples_per_gpu=6, + workers_per_gpu=4, + train=dict( + type='RepeatDataset', + times=2, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'kitti_infos_train.pkl', + split='training', + pts_prefix='velodyne_reduced', + pipeline=train_pipeline, + modality=input_modality, + classes=class_names, + test_mode=False, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='LiDAR')), + val=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'kitti_infos_val.pkl', + split='training', + pts_prefix='velodyne_reduced', + pipeline=test_pipeline, + modality=input_modality, + classes=class_names, + test_mode=True, + box_type_3d='LiDAR'), + test=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'kitti_infos_val.pkl', + split='training', + pts_prefix='velodyne_reduced', + pipeline=test_pipeline, + modality=input_modality, + classes=class_names, + test_mode=True, + box_type_3d='LiDAR')) + +evaluation = dict(interval=1, pipeline=eval_pipeline) diff --git a/adzoo/vad/configs/_base_/datasets/lyft-3d.py b/adzoo/vad/configs/_base_/datasets/lyft-3d.py new file mode 100644 index 0000000..71baff0 --- /dev/null +++ b/adzoo/vad/configs/_base_/datasets/lyft-3d.py @@ -0,0 +1,136 @@ +# If point cloud range is changed, the models should also change their point +# cloud range accordingly +point_cloud_range = [-80, -80, -5, 80, 80, 3] +# For Lyft we usually do 9-class detection +class_names = [ + 'car', 'truck', 'bus', 'emergency_vehicle', 'other_vehicle', 'motorcycle', + 'bicycle', 'pedestrian', 'animal' +] +dataset_type = 'LyftDataset' +data_root = 'data/lyft/' +# Input modality for Lyft dataset, this is consistent with the submission +# format which requires the information in input_modality. +input_modality = dict( + use_lidar=True, + use_camera=False, + use_radar=False, + use_map=False, + use_external=False) +file_client_args = dict(backend='disk') +# Uncomment the following if use ceph or other file clients. +# See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient +# for more details. +# file_client_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/lyft/': 's3://lyft/lyft/', +# 'data/lyft/': 's3://lyft/lyft/' +# })) +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + file_client_args=file_client_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + file_client_args=file_client_args), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.3925, 0.3925], + scale_ratio_range=[0.95, 1.05], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='PointShuffle'), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + file_client_args=file_client_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + file_client_args=file_client_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) + ]) +] +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) +eval_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + file_client_args=file_client_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + file_client_args=file_client_args), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) +] + +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'lyft_infos_train.pkl', + pipeline=train_pipeline, + classes=class_names, + modality=input_modality, + test_mode=False), + val=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'lyft_infos_val.pkl', + pipeline=test_pipeline, + classes=class_names, + modality=input_modality, + test_mode=True), + test=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'lyft_infos_test.pkl', + pipeline=test_pipeline, + classes=class_names, + modality=input_modality, + test_mode=True)) +# For Lyft dataset, we usually evaluate the model at the end of training. +# Since the models are trained by 24 epochs by default, we set evaluation +# interval to be 24. Please change the interval accordingly if you do not +# use a default schedule. +evaluation = dict(interval=24, pipeline=eval_pipeline) diff --git a/adzoo/vad/configs/_base_/datasets/nuim_instance.py b/adzoo/vad/configs/_base_/datasets/nuim_instance.py new file mode 100644 index 0000000..82fce56 --- /dev/null +++ b/adzoo/vad/configs/_base_/datasets/nuim_instance.py @@ -0,0 +1,59 @@ +dataset_type = 'CocoDataset' +data_root = 'data/nuimages/' +class_names = [ + 'car', 'truck', 'trailer', 'bus', 'construction_vehicle', 'bicycle', + 'motorcycle', 'pedestrian', 'traffic_cone', 'barrier' +] +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='Resize', + img_scale=[(1280, 720), (1920, 1080)], + multiscale_mode='range', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1600, 900), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/nuimages_v1.0-train.json', + img_prefix=data_root, + classes=class_names, + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/nuimages_v1.0-val.json', + img_prefix=data_root, + classes=class_names, + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/nuimages_v1.0-val.json', + img_prefix=data_root, + classes=class_names, + pipeline=test_pipeline)) +evaluation = dict(metric=['bbox', 'segm']) diff --git a/adzoo/vad/configs/_base_/datasets/nus-3d.py b/adzoo/vad/configs/_base_/datasets/nus-3d.py new file mode 100644 index 0000000..1548171 --- /dev/null +++ b/adzoo/vad/configs/_base_/datasets/nus-3d.py @@ -0,0 +1,142 @@ +# If point cloud range is changed, the models should also change their point +# cloud range accordingly +point_cloud_range = [-50, -50, -5, 50, 50, 3] +# For nuScenes we usually do 10-class detection +class_names = [ + 'car', 'truck', 'trailer', 'bus', 'construction_vehicle', 'bicycle', + 'motorcycle', 'pedestrian', 'traffic_cone', 'barrier' +] +dataset_type = 'NuScenesDataset' +data_root = 'data/nuscenes/' +# Input modality for nuScenes dataset, this is consistent with the submission +# format which requires the information in input_modality. +input_modality = dict( + use_lidar=True, + use_camera=False, + use_radar=False, + use_map=False, + use_external=False) +file_client_args = dict(backend='disk') +# Uncomment the following if use ceph or other file clients. +# See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient +# for more details. +# file_client_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/nuscenes/': 's3://nuscenes/nuscenes/', +# 'data/nuscenes/': 's3://nuscenes/nuscenes/' +# })) +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + file_client_args=file_client_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + file_client_args=file_client_args), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.3925, 0.3925], + scale_ratio_range=[0.95, 1.05], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectNameFilter', classes=class_names), + dict(type='PointShuffle'), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + file_client_args=file_client_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + file_client_args=file_client_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) + ]) +] +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) +eval_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + file_client_args=file_client_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + file_client_args=file_client_args), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) +] + +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'nuscenes_infos_train.pkl', + pipeline=train_pipeline, + classes=class_names, + modality=input_modality, + test_mode=False, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='LiDAR'), + val=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'nuscenes_infos_val.pkl', + pipeline=test_pipeline, + classes=class_names, + modality=input_modality, + test_mode=True, + box_type_3d='LiDAR'), + test=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'nuscenes_infos_val.pkl', + pipeline=test_pipeline, + classes=class_names, + modality=input_modality, + test_mode=True, + box_type_3d='LiDAR')) +# For nuScenes dataset, we usually evaluate the model at the end of training. +# Since the models are trained by 24 epochs by default, we set evaluation +# interval to be 24. Please change the interval accordingly if you do not +# use a default schedule. +evaluation = dict(interval=24, pipeline=eval_pipeline) diff --git a/adzoo/vad/configs/_base_/datasets/nus-mono3d.py b/adzoo/vad/configs/_base_/datasets/nus-mono3d.py new file mode 100644 index 0000000..1363a94 --- /dev/null +++ b/adzoo/vad/configs/_base_/datasets/nus-mono3d.py @@ -0,0 +1,100 @@ +dataset_type = 'CustomNuScenesMonoDataset' +data_root = 'data/nuscenes/' +class_names = [ + 'car', 'truck', 'trailer', 'bus', 'construction_vehicle', 'bicycle', + 'motorcycle', 'pedestrian', 'traffic_cone', 'barrier' +] +# Input modality for nuScenes dataset, this is consistent with the submission +# format which requires the information in input_modality. +input_modality = dict( + use_lidar=False, + use_camera=True, + use_radar=False, + use_map=False, + use_external=False) +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFileMono3D'), + dict( + type='LoadAnnotations3D', + with_bbox=True, + with_label=True, + with_attr_label=True, + with_bbox_3d=True, + with_label_3d=True, + with_bbox_depth=True), + dict(type='Resize', img_scale=(1600, 900), keep_ratio=True), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict( + type='Collect3D', + keys=[ + 'img', 'gt_bboxes', 'gt_labels', 'attr_labels', 'gt_bboxes_3d', + 'gt_labels_3d', 'centers2d', 'depths' + ]), +] +test_pipeline = [ + dict(type='LoadImageFromFileMono3D'), + dict( + type='MultiScaleFlipAug', + scale_factor=1.0, + flip=False, + transforms=[ + dict(type='RandomFlip3D'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['img']), + ]) +] +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) +eval_pipeline = [ + dict(type='LoadImageFromFileMono3D'), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['img']) +] + +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'nuscenes_infos_train_mono3d.coco.json', + img_prefix=data_root, + classes=class_names, + pipeline=train_pipeline, + modality=input_modality, + test_mode=False, + box_type_3d='Camera'), + val=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'nuscenes_infos_val_mono3d.coco.json', + img_prefix=data_root, + classes=class_names, + pipeline=test_pipeline, + modality=input_modality, + test_mode=True, + box_type_3d='Camera'), + test=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'nuscenes_infos_val_mono3d.coco.json', + img_prefix=data_root, + classes=class_names, + pipeline=test_pipeline, + modality=input_modality, + test_mode=True, + box_type_3d='Camera')) +evaluation = dict(interval=2) diff --git a/adzoo/vad/configs/_base_/datasets/range100_lyft-3d.py b/adzoo/vad/configs/_base_/datasets/range100_lyft-3d.py new file mode 100644 index 0000000..efa63ea --- /dev/null +++ b/adzoo/vad/configs/_base_/datasets/range100_lyft-3d.py @@ -0,0 +1,136 @@ +# If point cloud range is changed, the models should also change their point +# cloud range accordingly +point_cloud_range = [-100, -100, -5, 100, 100, 3] +# For Lyft we usually do 9-class detection +class_names = [ + 'car', 'truck', 'bus', 'emergency_vehicle', 'other_vehicle', 'motorcycle', + 'bicycle', 'pedestrian', 'animal' +] +dataset_type = 'LyftDataset' +data_root = 'data/lyft/' +# Input modality for Lyft dataset, this is consistent with the submission +# format which requires the information in input_modality. +input_modality = dict( + use_lidar=True, + use_camera=False, + use_radar=False, + use_map=False, + use_external=False) +file_client_args = dict(backend='disk') +# Uncomment the following if use ceph or other file clients. +# See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient +# for more details. +# file_client_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/lyft/': 's3://lyft/lyft/', +# 'data/lyft/': 's3://lyft/lyft/' +# })) +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + file_client_args=file_client_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + file_client_args=file_client_args), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.3925, 0.3925], + scale_ratio_range=[0.95, 1.05], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='PointShuffle'), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + file_client_args=file_client_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + file_client_args=file_client_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) + ]) +] +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) +eval_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + file_client_args=file_client_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + file_client_args=file_client_args), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) +] + +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'lyft_infos_train.pkl', + pipeline=train_pipeline, + classes=class_names, + modality=input_modality, + test_mode=False), + val=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'lyft_infos_val.pkl', + pipeline=test_pipeline, + classes=class_names, + modality=input_modality, + test_mode=True), + test=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'lyft_infos_test.pkl', + pipeline=test_pipeline, + classes=class_names, + modality=input_modality, + test_mode=True)) +# For Lyft dataset, we usually evaluate the model at the end of training. +# Since the models are trained by 24 epochs by default, we set evaluation +# interval to be 24. Please change the interval accordingly if you do not +# use a default schedule. +evaluation = dict(interval=24, pipeline=eval_pipeline) diff --git a/adzoo/vad/configs/_base_/datasets/s3dis-3d-5class.py b/adzoo/vad/configs/_base_/datasets/s3dis-3d-5class.py new file mode 100644 index 0000000..2422766 --- /dev/null +++ b/adzoo/vad/configs/_base_/datasets/s3dis-3d-5class.py @@ -0,0 +1,114 @@ +# dataset settings +dataset_type = 'S3DISDataset' +data_root = './data/s3dis/' +class_names = ('table', 'chair', 'sofa', 'bookcase', 'board') +train_area = [1, 2, 3, 4, 6] +test_area = 5 + +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=True, + load_dim=6, + use_dim=[0, 1, 2, 3, 4, 5]), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict(type='PointSample', num_points=40000), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + flip_ratio_bev_vertical=0.5), + dict( + type='GlobalRotScaleTrans', + # following ScanNet dataset the rotation range is 5 degrees + rot_range=[-0.087266, 0.087266], + scale_ratio_range=[1.0, 1.0], + shift_height=True), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=True, + load_dim=6, + use_dim=[0, 1, 2, 3, 4, 5]), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + flip_ratio_bev_vertical=0.5), + dict(type='PointSample', num_points=40000), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) + ]) +] +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) +eval_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=False, + load_dim=6, + use_dim=[0, 1, 2, 3, 4, 5]), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) +] + +data = dict( + samples_per_gpu=8, + workers_per_gpu=4, + train=dict( + type='RepeatDataset', + times=5, + dataset=dict( + type='ConcatDataset', + datasets=[ + dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + f's3dis_infos_Area_{i}.pkl', + pipeline=train_pipeline, + filter_empty_gt=False, + classes=class_names, + box_type_3d='Depth') for i in train_area + ], + separate_eval=False)), + val=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + f's3dis_infos_Area_{test_area}.pkl', + pipeline=test_pipeline, + classes=class_names, + test_mode=True, + box_type_3d='Depth'), + test=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + f's3dis_infos_Area_{test_area}.pkl', + pipeline=test_pipeline, + classes=class_names, + test_mode=True, + box_type_3d='Depth')) + +evaluation = dict(pipeline=eval_pipeline) diff --git a/adzoo/vad/configs/_base_/datasets/s3dis_seg-3d-13class.py b/adzoo/vad/configs/_base_/datasets/s3dis_seg-3d-13class.py new file mode 100644 index 0000000..39bf556 --- /dev/null +++ b/adzoo/vad/configs/_base_/datasets/s3dis_seg-3d-13class.py @@ -0,0 +1,139 @@ +# dataset settings +dataset_type = 'S3DISSegDataset' +data_root = './data/s3dis/' +class_names = ('ceiling', 'floor', 'wall', 'beam', 'column', 'window', 'door', + 'table', 'chair', 'sofa', 'bookcase', 'board', 'clutter') +num_points = 4096 +train_area = [1, 2, 3, 4, 6] +test_area = 5 +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=False, + use_color=True, + load_dim=6, + use_dim=[0, 1, 2, 3, 4, 5]), + dict( + type='LoadAnnotations3D', + with_bbox_3d=False, + with_label_3d=False, + with_mask_3d=False, + with_seg_3d=True), + dict( + type='PointSegClassMapping', + valid_cat_ids=tuple(range(len(class_names))), + max_cat_id=13), + dict( + type='IndoorPatchPointSample', + num_points=num_points, + block_size=1.0, + ignore_index=len(class_names), + use_normalized_coord=True, + enlarge_size=0.2, + min_unique_num=None), + dict(type='NormalizePointsColor', color_mean=None), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict(type='Collect3D', keys=['points', 'pts_semantic_mask']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=False, + use_color=True, + load_dim=6, + use_dim=[0, 1, 2, 3, 4, 5]), + dict(type='NormalizePointsColor', color_mean=None), + dict( + # a wrapper in order to successfully call test function + # actually we don't perform test-time-aug + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.0, + flip_ratio_bev_vertical=0.0), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) + ]) +] +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) +# we need to load gt seg_mask! +eval_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=False, + use_color=True, + load_dim=6, + use_dim=[0, 1, 2, 3, 4, 5]), + dict( + type='LoadAnnotations3D', + with_bbox_3d=False, + with_label_3d=False, + with_mask_3d=False, + with_seg_3d=True), + dict( + type='PointSegClassMapping', + valid_cat_ids=tuple(range(len(class_names))), + max_cat_id=13), + dict( + type='DefaultFormatBundle3D', + with_label=False, + class_names=class_names), + dict(type='Collect3D', keys=['points', 'pts_semantic_mask']) +] + +data = dict( + samples_per_gpu=8, + workers_per_gpu=4, + # train on area 1, 2, 3, 4, 6 + # test on area 5 + train=dict( + type=dataset_type, + data_root=data_root, + ann_files=[ + data_root + f's3dis_infos_Area_{i}.pkl' for i in train_area + ], + pipeline=train_pipeline, + classes=class_names, + test_mode=False, + ignore_index=len(class_names), + scene_idxs=[ + data_root + f'seg_info/Area_{i}_resampled_scene_idxs.npy' + for i in train_area + ]), + val=dict( + type=dataset_type, + data_root=data_root, + ann_files=data_root + f's3dis_infos_Area_{test_area}.pkl', + pipeline=test_pipeline, + classes=class_names, + test_mode=True, + ignore_index=len(class_names), + scene_idxs=data_root + + f'seg_info/Area_{test_area}_resampled_scene_idxs.npy'), + test=dict( + type=dataset_type, + data_root=data_root, + ann_files=data_root + f's3dis_infos_Area_{test_area}.pkl', + pipeline=test_pipeline, + classes=class_names, + test_mode=True, + ignore_index=len(class_names))) + +evaluation = dict(pipeline=eval_pipeline) diff --git a/adzoo/vad/configs/_base_/datasets/scannet-3d-18class.py b/adzoo/vad/configs/_base_/datasets/scannet-3d-18class.py new file mode 100644 index 0000000..93da1e5 --- /dev/null +++ b/adzoo/vad/configs/_base_/datasets/scannet-3d-18class.py @@ -0,0 +1,128 @@ +# dataset settings +dataset_type = 'ScanNetDataset' +data_root = './data/scannet/' +class_names = ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', + 'bookshelf', 'picture', 'counter', 'desk', 'curtain', + 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', + 'garbagebin') +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=True, + load_dim=6, + use_dim=[0, 1, 2]), + dict( + type='LoadAnnotations3D', + with_bbox_3d=True, + with_label_3d=True, + with_mask_3d=True, + with_seg_3d=True), + dict(type='GlobalAlignment', rotation_axis=2), + dict( + type='PointSegClassMapping', + valid_cat_ids=(3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, + 36, 39), + max_cat_id=40), + dict(type='PointSample', num_points=40000), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + flip_ratio_bev_vertical=0.5), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.087266, 0.087266], + scale_ratio_range=[1.0, 1.0], + shift_height=True), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict( + type='Collect3D', + keys=[ + 'points', 'gt_bboxes_3d', 'gt_labels_3d', 'pts_semantic_mask', + 'pts_instance_mask' + ]) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=True, + load_dim=6, + use_dim=[0, 1, 2]), + dict(type='GlobalAlignment', rotation_axis=2), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + flip_ratio_bev_vertical=0.5), + dict(type='PointSample', num_points=40000), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) + ]) +] +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) +eval_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=False, + load_dim=6, + use_dim=[0, 1, 2]), + dict(type='GlobalAlignment', rotation_axis=2), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) +] + +data = dict( + samples_per_gpu=8, + workers_per_gpu=4, + train=dict( + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'scannet_infos_train.pkl', + pipeline=train_pipeline, + filter_empty_gt=False, + classes=class_names, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='Depth')), + val=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'scannet_infos_val.pkl', + pipeline=test_pipeline, + classes=class_names, + test_mode=True, + box_type_3d='Depth'), + test=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'scannet_infos_val.pkl', + pipeline=test_pipeline, + classes=class_names, + test_mode=True, + box_type_3d='Depth')) + +evaluation = dict(pipeline=eval_pipeline) diff --git a/adzoo/vad/configs/_base_/datasets/scannet_seg-3d-20class.py b/adzoo/vad/configs/_base_/datasets/scannet_seg-3d-20class.py new file mode 100644 index 0000000..cf73b09 --- /dev/null +++ b/adzoo/vad/configs/_base_/datasets/scannet_seg-3d-20class.py @@ -0,0 +1,132 @@ +# dataset settings +dataset_type = 'ScanNetSegDataset' +data_root = './data/scannet/' +class_names = ('wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', + 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', + 'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink', + 'bathtub', 'otherfurniture') +num_points = 8192 +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=False, + use_color=True, + load_dim=6, + use_dim=[0, 1, 2, 3, 4, 5]), + dict( + type='LoadAnnotations3D', + with_bbox_3d=False, + with_label_3d=False, + with_mask_3d=False, + with_seg_3d=True), + dict( + type='PointSegClassMapping', + valid_cat_ids=(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, + 33, 34, 36, 39), + max_cat_id=40), + dict( + type='IndoorPatchPointSample', + num_points=num_points, + block_size=1.5, + ignore_index=len(class_names), + use_normalized_coord=False, + enlarge_size=0.2, + min_unique_num=None), + dict(type='NormalizePointsColor', color_mean=None), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict(type='Collect3D', keys=['points', 'pts_semantic_mask']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=False, + use_color=True, + load_dim=6, + use_dim=[0, 1, 2, 3, 4, 5]), + dict(type='NormalizePointsColor', color_mean=None), + dict( + # a wrapper in order to successfully call test function + # actually we don't perform test-time-aug + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.0, + flip_ratio_bev_vertical=0.0), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) + ]) +] +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) +# we need to load gt seg_mask! +eval_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=False, + use_color=True, + load_dim=6, + use_dim=[0, 1, 2, 3, 4, 5]), + dict( + type='LoadAnnotations3D', + with_bbox_3d=False, + with_label_3d=False, + with_mask_3d=False, + with_seg_3d=True), + dict( + type='PointSegClassMapping', + valid_cat_ids=(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, + 33, 34, 36, 39), + max_cat_id=40), + dict( + type='DefaultFormatBundle3D', + with_label=False, + class_names=class_names), + dict(type='Collect3D', keys=['points', 'pts_semantic_mask']) +] + +data = dict( + samples_per_gpu=8, + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'scannet_infos_train.pkl', + pipeline=train_pipeline, + classes=class_names, + test_mode=False, + ignore_index=len(class_names), + scene_idxs=data_root + 'seg_info/train_resampled_scene_idxs.npy'), + val=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'scannet_infos_val.pkl', + pipeline=test_pipeline, + classes=class_names, + test_mode=True, + ignore_index=len(class_names)), + test=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'scannet_infos_val.pkl', + pipeline=test_pipeline, + classes=class_names, + test_mode=True, + ignore_index=len(class_names))) + +evaluation = dict(pipeline=eval_pipeline) diff --git a/adzoo/vad/configs/_base_/datasets/sunrgbd-3d-10class.py b/adzoo/vad/configs/_base_/datasets/sunrgbd-3d-10class.py new file mode 100644 index 0000000..7121b75 --- /dev/null +++ b/adzoo/vad/configs/_base_/datasets/sunrgbd-3d-10class.py @@ -0,0 +1,107 @@ +dataset_type = 'SUNRGBDDataset' +data_root = 'data/sunrgbd/' +class_names = ('bed', 'table', 'sofa', 'chair', 'toilet', 'desk', 'dresser', + 'night_stand', 'bookshelf', 'bathtub') +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=True, + load_dim=6, + use_dim=[0, 1, 2]), + dict(type='LoadAnnotations3D'), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + ), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.523599, 0.523599], + scale_ratio_range=[0.85, 1.15], + shift_height=True), + dict(type='PointSample', num_points=20000), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=True, + load_dim=6, + use_dim=[0, 1, 2]), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + ), + dict(type='PointSample', num_points=20000), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) + ]) +] +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) +eval_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='DEPTH', + shift_height=False, + load_dim=6, + use_dim=[0, 1, 2]), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) +] + +data = dict( + samples_per_gpu=16, + workers_per_gpu=4, + train=dict( + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'sunrgbd_infos_train.pkl', + pipeline=train_pipeline, + classes=class_names, + filter_empty_gt=False, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='Depth')), + val=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'sunrgbd_infos_val.pkl', + pipeline=test_pipeline, + classes=class_names, + test_mode=True, + box_type_3d='Depth'), + test=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'sunrgbd_infos_val.pkl', + pipeline=test_pipeline, + classes=class_names, + test_mode=True, + box_type_3d='Depth')) + +evaluation = dict(pipeline=eval_pipeline) diff --git a/adzoo/vad/configs/_base_/datasets/waymoD5-3d-3class.py b/adzoo/vad/configs/_base_/datasets/waymoD5-3d-3class.py new file mode 100644 index 0000000..920ac15 --- /dev/null +++ b/adzoo/vad/configs/_base_/datasets/waymoD5-3d-3class.py @@ -0,0 +1,145 @@ +# dataset settings +# D5 in the config name means the whole dataset is divided into 5 folds +# We only use one fold for efficient experiments +dataset_type = 'LidarWaymoDataset' +data_root = 'data/waymo-full/kitti_format/' +file_client_args = dict(backend='disk') +# Uncomment the following if use ceph or other file clients. +# See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient +# for more details. +# file_client_args = dict( +# backend='petrel', path_mapping=dict(data='s3://waymo_data/')) + +class_names = ['Car', 'Pedestrian', 'Cyclist'] +point_cloud_range = [-74.88, -74.88, -2, 74.88, 74.88, 4] +input_modality = dict(use_lidar=True, use_camera=False) +db_sampler = dict( + data_root=data_root, + info_path=data_root + 'waymo_dbinfos_train.pkl', + rate=1.0, + prepare=dict( + filter_by_difficulty=[-1], + filter_by_min_points=dict(Car=5, Pedestrian=10, Cyclist=10)), + classes=class_names, + sample_groups=dict(Car=15, Pedestrian=10, Cyclist=10), + points_loader=dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=[0, 1, 2, 3, 4], + file_client_args=file_client_args)) + +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=6, + use_dim=5, + file_client_args=file_client_args), + dict( + type='LoadAnnotations3D', + with_bbox_3d=True, + with_label_3d=True, + file_client_args=file_client_args), + dict(type='ObjectSample', db_sampler=db_sampler), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + flip_ratio_bev_vertical=0.5), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.78539816, 0.78539816], + scale_ratio_range=[0.95, 1.05]), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='PointShuffle'), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=6, + use_dim=5, + file_client_args=file_client_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) + ]) +] +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) +eval_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=6, + use_dim=5, + file_client_args=file_client_args), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) +] + +data = dict( + samples_per_gpu=2, + workers_per_gpu=4, + train=dict( + type='RepeatDataset', + times=2, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'waymo_infos_train.pkl', + split='training', + pipeline=train_pipeline, + modality=input_modality, + classes=class_names, + test_mode=False, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='LiDAR', + # load one frame every five frames + load_interval=5)), + val=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'waymo_infos_val.pkl', + split='training', + pipeline=test_pipeline, + modality=input_modality, + classes=class_names, + test_mode=True, + box_type_3d='LiDAR'), + test=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'waymo_infos_val.pkl', + split='training', + pipeline=test_pipeline, + modality=input_modality, + classes=class_names, + test_mode=True, + box_type_3d='LiDAR')) + +evaluation = dict(interval=24, pipeline=eval_pipeline) diff --git a/adzoo/vad/configs/_base_/datasets/waymoD5-3d-car.py b/adzoo/vad/configs/_base_/datasets/waymoD5-3d-car.py new file mode 100644 index 0000000..02e2627 --- /dev/null +++ b/adzoo/vad/configs/_base_/datasets/waymoD5-3d-car.py @@ -0,0 +1,143 @@ +# dataset settings +# D5 in the config name means the whole dataset is divided into 5 folds +# We only use one fold for efficient experiments +dataset_type = 'WaymoDataset' +data_root = 'data/waymo/kitti_format/' +file_client_args = dict(backend='disk') +# Uncomment the following if use ceph or other file clients. +# See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient +# for more details. +# file_client_args = dict( +# backend='petrel', path_mapping=dict(data='s3://waymo_data/')) + +class_names = ['Car'] +point_cloud_range = [-74.88, -74.88, -2, 74.88, 74.88, 4] +input_modality = dict(use_lidar=True, use_camera=False) +db_sampler = dict( + data_root=data_root, + info_path=data_root + 'waymo_dbinfos_train.pkl', + rate=1.0, + prepare=dict(filter_by_difficulty=[-1], filter_by_min_points=dict(Car=5)), + classes=class_names, + sample_groups=dict(Car=15), + points_loader=dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=[0, 1, 2, 3, 4], + file_client_args=file_client_args)) + +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=6, + use_dim=5, + file_client_args=file_client_args), + dict( + type='LoadAnnotations3D', + with_bbox_3d=True, + with_label_3d=True, + file_client_args=file_client_args), + dict(type='ObjectSample', db_sampler=db_sampler), + dict( + type='RandomFlip3D', + sync_2d=False, + flip_ratio_bev_horizontal=0.5, + flip_ratio_bev_vertical=0.5), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.78539816, 0.78539816], + scale_ratio_range=[0.95, 1.05]), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='PointShuffle'), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=6, + use_dim=5, + file_client_args=file_client_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) + ]) +] +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) +eval_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=6, + use_dim=5, + file_client_args=file_client_args), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) +] + +data = dict( + samples_per_gpu=2, + workers_per_gpu=4, + train=dict( + type='RepeatDataset', + times=2, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'waymo_infos_train.pkl', + split='training', + pipeline=train_pipeline, + modality=input_modality, + classes=class_names, + test_mode=False, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='LiDAR', + # load one frame every five frames + load_interval=5)), + val=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'waymo_infos_val.pkl', + split='training', + pipeline=test_pipeline, + modality=input_modality, + classes=class_names, + test_mode=True, + box_type_3d='LiDAR'), + test=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'waymo_infos_val.pkl', + split='training', + pipeline=test_pipeline, + modality=input_modality, + classes=class_names, + test_mode=True, + box_type_3d='LiDAR')) + +evaluation = dict(interval=24, pipeline=eval_pipeline) diff --git a/adzoo/vad/configs/_base_/default_runtime.py b/adzoo/vad/configs/_base_/default_runtime.py new file mode 100644 index 0000000..4e85b69 --- /dev/null +++ b/adzoo/vad/configs/_base_/default_runtime.py @@ -0,0 +1,18 @@ +checkpoint_config = dict(interval=1) +# yapf:disable push +# By default we use textlogger hook and tensorboard +# For more loggers see +# https://mmcv.readthedocs.io/en/latest/api.html#mmcv.runner.LoggerHook +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = None +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/adzoo/vad/configs/_base_/models/3dssd.py b/adzoo/vad/configs/_base_/models/3dssd.py new file mode 100644 index 0000000..55344c7 --- /dev/null +++ b/adzoo/vad/configs/_base_/models/3dssd.py @@ -0,0 +1,77 @@ +model = dict( + type='SSD3DNet', + backbone=dict( + type='PointNet2SAMSG', + in_channels=4, + num_points=(4096, 512, (256, 256)), + radii=((0.2, 0.4, 0.8), (0.4, 0.8, 1.6), (1.6, 3.2, 4.8)), + num_samples=((32, 32, 64), (32, 32, 64), (32, 32, 32)), + sa_channels=(((16, 16, 32), (16, 16, 32), (32, 32, 64)), + ((64, 64, 128), (64, 64, 128), (64, 96, 128)), + ((128, 128, 256), (128, 192, 256), (128, 256, 256))), + aggregation_channels=(64, 128, 256), + fps_mods=(('D-FPS'), ('FS'), ('F-FPS', 'D-FPS')), + fps_sample_range_lists=((-1), (-1), (512, -1)), + norm_cfg=dict(type='BN2d', eps=1e-3, momentum=0.1), + sa_cfg=dict( + type='PointSAModuleMSG', + pool_mod='max', + use_xyz=True, + normalize_xyz=False)), + bbox_head=dict( + type='SSD3DHead', + in_channels=256, + vote_module_cfg=dict( + in_channels=256, + num_points=256, + gt_per_seed=1, + conv_channels=(128, ), + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.1), + with_res_feat=False, + vote_xyz_range=(3.0, 3.0, 2.0)), + vote_aggregation_cfg=dict( + type='PointSAModuleMSG', + num_point=256, + radii=(4.8, 6.4), + sample_nums=(16, 32), + mlp_channels=((256, 256, 256, 512), (256, 256, 512, 1024)), + norm_cfg=dict(type='BN2d', eps=1e-3, momentum=0.1), + use_xyz=True, + normalize_xyz=False, + bias=True), + pred_layer_cfg=dict( + in_channels=1536, + shared_conv_channels=(512, 128), + cls_conv_channels=(128, ), + reg_conv_channels=(128, ), + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.1), + bias=True), + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.1), + objectness_loss=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + reduction='sum', + loss_weight=1.0), + center_loss=dict( + type='SmoothL1Loss', reduction='sum', loss_weight=1.0), + dir_class_loss=dict( + type='CrossEntropyLoss', reduction='sum', loss_weight=1.0), + dir_res_loss=dict( + type='SmoothL1Loss', reduction='sum', loss_weight=1.0), + size_res_loss=dict( + type='SmoothL1Loss', reduction='sum', loss_weight=1.0), + corner_loss=dict( + type='SmoothL1Loss', reduction='sum', loss_weight=1.0), + vote_loss=dict(type='SmoothL1Loss', reduction='sum', loss_weight=1.0)), + # model training and testing settings + train_cfg=dict( + sample_mod='spec', pos_distance_thr=10.0, expand_dims_length=0.05), + test_cfg=dict( + nms_cfg=dict(type='nms', iou_thr=0.1), + sample_mod='spec', + score_thr=0.0, + per_class_proposal=True, + max_output_num=100)) diff --git a/adzoo/vad/configs/_base_/models/cascade_mask_rcnn_r50_fpn.py b/adzoo/vad/configs/_base_/models/cascade_mask_rcnn_r50_fpn.py new file mode 100644 index 0000000..fb9e0a8 --- /dev/null +++ b/adzoo/vad/configs/_base_/models/cascade_mask_rcnn_r50_fpn.py @@ -0,0 +1,200 @@ +# model settings +model = dict( + type='CascadeRCNN', + pretrained='torchvision://resnet50', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), + roi_head=dict( + type='CascadeRoIHead', + num_stages=3, + stage_loss_weights=[1, 0.5, 0.25], + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=[ + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.05, 0.05, 0.1, 0.1]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.033, 0.033, 0.067, 0.067]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) + ], + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=dict( + type='FCNMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=80, + loss_mask=dict( + type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=0, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=2000, + max_num=2000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=[ + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.6, + neg_iou_thr=0.6, + min_pos_iou=0.6, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False), + dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.7, + min_pos_iou=0.7, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False) + ]), + test_cfg=dict( + rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100, + mask_thr_binary=0.5))) diff --git a/adzoo/vad/configs/_base_/models/centerpoint_01voxel_second_secfpn_nus.py b/adzoo/vad/configs/_base_/models/centerpoint_01voxel_second_secfpn_nus.py new file mode 100644 index 0000000..efdce59 --- /dev/null +++ b/adzoo/vad/configs/_base_/models/centerpoint_01voxel_second_secfpn_nus.py @@ -0,0 +1,83 @@ +voxel_size = [0.1, 0.1, 0.2] +model = dict( + type='CenterPoint', + pts_voxel_layer=dict( + max_num_points=10, voxel_size=voxel_size, max_voxels=(90000, 120000)), + pts_voxel_encoder=dict(type='HardSimpleVFE', num_features=5), + pts_middle_encoder=dict( + type='SparseEncoder', + in_channels=5, + sparse_shape=[41, 1024, 1024], + output_channels=128, + order=('conv', 'norm', 'act'), + encoder_channels=((16, 16, 32), (32, 32, 64), (64, 64, 128), (128, + 128)), + encoder_paddings=((0, 0, 1), (0, 0, 1), (0, 0, [0, 1, 1]), (0, 0)), + block_type='basicblock'), + pts_backbone=dict( + type='SECOND', + in_channels=256, + out_channels=[128, 256], + layer_nums=[5, 5], + layer_strides=[1, 2], + norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01), + conv_cfg=dict(type='Conv2d', bias=False)), + pts_neck=dict( + type='SECONDFPN', + in_channels=[128, 256], + out_channels=[256, 256], + upsample_strides=[1, 2], + norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01), + upsample_cfg=dict(type='deconv', bias=False), + use_conv_for_no_stride=True), + pts_bbox_head=dict( + type='CenterHead', + in_channels=sum([256, 256]), + tasks=[ + dict(num_class=1, class_names=['car']), + dict(num_class=2, class_names=['truck', 'construction_vehicle']), + dict(num_class=2, class_names=['bus', 'trailer']), + dict(num_class=1, class_names=['barrier']), + dict(num_class=2, class_names=['motorcycle', 'bicycle']), + dict(num_class=2, class_names=['pedestrian', 'traffic_cone']), + ], + common_heads=dict( + reg=(2, 2), height=(1, 2), dim=(3, 2), rot=(2, 2), vel=(2, 2)), + share_conv_channel=64, + bbox_coder=dict( + type='CenterPointBBoxCoder', + post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], + max_num=500, + score_threshold=0.1, + out_size_factor=8, + voxel_size=voxel_size[:2], + code_size=9), + separate_head=dict( + type='SeparateHead', init_bias=-2.19, final_kernel=3), + loss_cls=dict(type='GaussianFocalLoss', reduction='mean'), + loss_bbox=dict(type='L1Loss', reduction='mean', loss_weight=0.25), + norm_bbox=True), + # model training and testing settings + train_cfg=dict( + pts=dict( + grid_size=[1024, 1024, 40], + voxel_size=voxel_size, + out_size_factor=8, + dense_reg=1, + gaussian_overlap=0.1, + max_objs=500, + min_radius=2, + code_weights=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2])), + test_cfg=dict( + pts=dict( + post_center_limit_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], + max_per_img=500, + max_pool_nms=False, + min_radius=[4, 12, 10, 1, 0.85, 0.175], + score_threshold=0.1, + out_size_factor=8, + voxel_size=voxel_size[:2], + nms_type='rotate', + pre_max_size=1000, + post_max_size=83, + nms_thr=0.2))) diff --git a/adzoo/vad/configs/_base_/models/centerpoint_02pillar_second_secfpn_nus.py b/adzoo/vad/configs/_base_/models/centerpoint_02pillar_second_secfpn_nus.py new file mode 100644 index 0000000..311d763 --- /dev/null +++ b/adzoo/vad/configs/_base_/models/centerpoint_02pillar_second_secfpn_nus.py @@ -0,0 +1,83 @@ +voxel_size = [0.2, 0.2, 8] +model = dict( + type='CenterPoint', + pts_voxel_layer=dict( + max_num_points=20, voxel_size=voxel_size, max_voxels=(30000, 40000)), + pts_voxel_encoder=dict( + type='PillarFeatureNet', + in_channels=5, + feat_channels=[64], + with_distance=False, + voxel_size=(0.2, 0.2, 8), + norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01), + legacy=False), + pts_middle_encoder=dict( + type='PointPillarsScatter', in_channels=64, output_shape=(512, 512)), + pts_backbone=dict( + type='SECOND', + in_channels=64, + out_channels=[64, 128, 256], + layer_nums=[3, 5, 5], + layer_strides=[2, 2, 2], + norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01), + conv_cfg=dict(type='Conv2d', bias=False)), + pts_neck=dict( + type='SECONDFPN', + in_channels=[64, 128, 256], + out_channels=[128, 128, 128], + upsample_strides=[0.5, 1, 2], + norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01), + upsample_cfg=dict(type='deconv', bias=False), + use_conv_for_no_stride=True), + pts_bbox_head=dict( + type='CenterHead', + in_channels=sum([128, 128, 128]), + tasks=[ + dict(num_class=1, class_names=['car']), + dict(num_class=2, class_names=['truck', 'construction_vehicle']), + dict(num_class=2, class_names=['bus', 'trailer']), + dict(num_class=1, class_names=['barrier']), + dict(num_class=2, class_names=['motorcycle', 'bicycle']), + dict(num_class=2, class_names=['pedestrian', 'traffic_cone']), + ], + common_heads=dict( + reg=(2, 2), height=(1, 2), dim=(3, 2), rot=(2, 2), vel=(2, 2)), + share_conv_channel=64, + bbox_coder=dict( + type='CenterPointBBoxCoder', + post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], + max_num=500, + score_threshold=0.1, + out_size_factor=4, + voxel_size=voxel_size[:2], + code_size=9), + separate_head=dict( + type='SeparateHead', init_bias=-2.19, final_kernel=3), + loss_cls=dict(type='GaussianFocalLoss', reduction='mean'), + loss_bbox=dict(type='L1Loss', reduction='mean', loss_weight=0.25), + norm_bbox=True), + # model training and testing settings + train_cfg=dict( + pts=dict( + grid_size=[512, 512, 1], + voxel_size=voxel_size, + out_size_factor=4, + dense_reg=1, + gaussian_overlap=0.1, + max_objs=500, + min_radius=2, + code_weights=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2])), + test_cfg=dict( + pts=dict( + post_center_limit_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], + max_per_img=500, + max_pool_nms=False, + min_radius=[4, 12, 10, 1, 0.85, 0.175], + score_threshold=0.1, + pc_range=[-51.2, -51.2], + out_size_factor=4, + voxel_size=voxel_size[:2], + nms_type='rotate', + pre_max_size=1000, + post_max_size=83, + nms_thr=0.2))) diff --git a/adzoo/vad/configs/_base_/models/fcos3d.py b/adzoo/vad/configs/_base_/models/fcos3d.py new file mode 100644 index 0000000..92ea907 --- /dev/null +++ b/adzoo/vad/configs/_base_/models/fcos3d.py @@ -0,0 +1,74 @@ +model = dict( + type='FCOSMono3D', + pretrained='open-mmlab://detectron2/resnet101_caffe', + backbone=dict( + type='ResNet', + depth=101, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + style='caffe'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_output', + num_outs=5, + relu_before_extra_convs=True), + bbox_head=dict( + type='FCOSMono3DHead', + num_classes=10, + in_channels=256, + stacked_convs=2, + feat_channels=256, + use_direction_classifier=True, + diff_rad_by_sin=True, + pred_attrs=True, + pred_velo=True, + dir_offset=0.7854, # pi/4 + strides=[8, 16, 32, 64, 128], + group_reg_dims=(2, 1, 3, 1, 2), # offset, depth, size, rot, velo + cls_branch=(256, ), + reg_branch=( + (256, ), # offset + (256, ), # depth + (256, ), # size + (256, ), # rot + () # velo + ), + dir_branch=(256, ), + attr_branch=(256, ), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0), + loss_dir=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_attr=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_centerness=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + norm_on_bbox=True, + centerness_on_reg=True, + center_sampling=True, + conv_bias=True, + dcn_on_last_conv=True), + train_cfg=dict( + allowed_border=0, + code_weight=[1.0, 1.0, 0.2, 1.0, 1.0, 1.0, 1.0, 0.05, 0.05], + pos_weight=-1, + debug=False), + test_cfg=dict( + use_rotate_nms=True, + nms_across_levels=False, + nms_pre=1000, + nms_thr=0.8, + score_thr=0.05, + min_bbox_size=0, + max_per_img=200)) diff --git a/adzoo/vad/configs/_base_/models/groupfree3d.py b/adzoo/vad/configs/_base_/models/groupfree3d.py new file mode 100644 index 0000000..077d049 --- /dev/null +++ b/adzoo/vad/configs/_base_/models/groupfree3d.py @@ -0,0 +1,71 @@ +model = dict( + type='GroupFree3DNet', + backbone=dict( + type='PointNet2SASSG', + in_channels=3, + num_points=(2048, 1024, 512, 256), + radius=(0.2, 0.4, 0.8, 1.2), + num_samples=(64, 32, 16, 16), + sa_channels=((64, 64, 128), (128, 128, 256), (128, 128, 256), + (128, 128, 256)), + fp_channels=((256, 256), (256, 288)), + norm_cfg=dict(type='BN2d'), + sa_cfg=dict( + type='PointSAModule', + pool_mod='max', + use_xyz=True, + normalize_xyz=True)), + bbox_head=dict( + type='GroupFree3DHead', + in_channels=288, + num_decoder_layers=6, + num_proposal=256, + transformerlayers=dict( + type='BaseTransformerLayer', + attn_cfgs=dict( + type='GroupFree3DMHA', + embed_dims=288, + num_heads=8, + attn_drop=0.1, + dropout_layer=dict(type='Dropout', drop_prob=0.1)), + ffn_cfgs=dict( + embed_dims=288, + feedforward_channels=2048, + ffn_drop=0.1, + act_cfg=dict(type='ReLU', inplace=True)), + operation_order=('self_attn', 'norm', 'cross_attn', 'norm', 'ffn', + 'norm')), + pred_layer_cfg=dict( + in_channels=288, shared_conv_channels=(288, 288), bias=True), + sampling_objectness_loss=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=8.0), + objectness_loss=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + center_loss=dict( + type='SmoothL1Loss', reduction='sum', loss_weight=10.0), + dir_class_loss=dict( + type='CrossEntropyLoss', reduction='sum', loss_weight=1.0), + dir_res_loss=dict( + type='SmoothL1Loss', reduction='sum', loss_weight=10.0), + size_class_loss=dict( + type='CrossEntropyLoss', reduction='sum', loss_weight=1.0), + size_res_loss=dict( + type='SmoothL1Loss', beta=1.0, reduction='sum', loss_weight=10.0), + semantic_loss=dict( + type='CrossEntropyLoss', reduction='sum', loss_weight=1.0)), + # model training and testing settings + train_cfg=dict(sample_mod='kps'), + test_cfg=dict( + sample_mod='kps', + nms_thr=0.25, + score_thr=0.0, + per_class_proposal=True, + prediction_stages='last')) diff --git a/adzoo/vad/configs/_base_/models/h3dnet.py b/adzoo/vad/configs/_base_/models/h3dnet.py new file mode 100644 index 0000000..7605667 --- /dev/null +++ b/adzoo/vad/configs/_base_/models/h3dnet.py @@ -0,0 +1,341 @@ +primitive_z_cfg = dict( + type='PrimitiveHead', + num_dims=2, + num_classes=18, + primitive_mode='z', + upper_thresh=100.0, + surface_thresh=0.5, + vote_module_cfg=dict( + in_channels=256, + vote_per_seed=1, + gt_per_seed=1, + conv_channels=(256, 256), + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + norm_feats=True, + vote_loss=dict( + type='ChamferDistance', + mode='l1', + reduction='none', + loss_dst_weight=10.0)), + vote_aggregation_cfg=dict( + type='PointSAModule', + num_point=1024, + radius=0.3, + num_sample=16, + mlp_channels=[256, 128, 128, 128], + use_xyz=True, + normalize_xyz=True), + feat_channels=(128, 128), + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + objectness_loss=dict( + type='CrossEntropyLoss', + class_weight=[0.4, 0.6], + reduction='mean', + loss_weight=30.0), + center_loss=dict( + type='ChamferDistance', + mode='l1', + reduction='sum', + loss_src_weight=0.5, + loss_dst_weight=0.5), + semantic_reg_loss=dict( + type='ChamferDistance', + mode='l1', + reduction='sum', + loss_src_weight=0.5, + loss_dst_weight=0.5), + semantic_cls_loss=dict( + type='CrossEntropyLoss', reduction='sum', loss_weight=1.0), + train_cfg=dict( + dist_thresh=0.2, + var_thresh=1e-2, + lower_thresh=1e-6, + num_point=100, + num_point_line=10, + line_thresh=0.2)) + +primitive_xy_cfg = dict( + type='PrimitiveHead', + num_dims=1, + num_classes=18, + primitive_mode='xy', + upper_thresh=100.0, + surface_thresh=0.5, + vote_module_cfg=dict( + in_channels=256, + vote_per_seed=1, + gt_per_seed=1, + conv_channels=(256, 256), + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + norm_feats=True, + vote_loss=dict( + type='ChamferDistance', + mode='l1', + reduction='none', + loss_dst_weight=10.0)), + vote_aggregation_cfg=dict( + type='PointSAModule', + num_point=1024, + radius=0.3, + num_sample=16, + mlp_channels=[256, 128, 128, 128], + use_xyz=True, + normalize_xyz=True), + feat_channels=(128, 128), + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + objectness_loss=dict( + type='CrossEntropyLoss', + class_weight=[0.4, 0.6], + reduction='mean', + loss_weight=30.0), + center_loss=dict( + type='ChamferDistance', + mode='l1', + reduction='sum', + loss_src_weight=0.5, + loss_dst_weight=0.5), + semantic_reg_loss=dict( + type='ChamferDistance', + mode='l1', + reduction='sum', + loss_src_weight=0.5, + loss_dst_weight=0.5), + semantic_cls_loss=dict( + type='CrossEntropyLoss', reduction='sum', loss_weight=1.0), + train_cfg=dict( + dist_thresh=0.2, + var_thresh=1e-2, + lower_thresh=1e-6, + num_point=100, + num_point_line=10, + line_thresh=0.2)) + +primitive_line_cfg = dict( + type='PrimitiveHead', + num_dims=0, + num_classes=18, + primitive_mode='line', + upper_thresh=100.0, + surface_thresh=0.5, + vote_module_cfg=dict( + in_channels=256, + vote_per_seed=1, + gt_per_seed=1, + conv_channels=(256, 256), + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + norm_feats=True, + vote_loss=dict( + type='ChamferDistance', + mode='l1', + reduction='none', + loss_dst_weight=10.0)), + vote_aggregation_cfg=dict( + type='PointSAModule', + num_point=1024, + radius=0.3, + num_sample=16, + mlp_channels=[256, 128, 128, 128], + use_xyz=True, + normalize_xyz=True), + feat_channels=(128, 128), + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + objectness_loss=dict( + type='CrossEntropyLoss', + class_weight=[0.4, 0.6], + reduction='mean', + loss_weight=30.0), + center_loss=dict( + type='ChamferDistance', + mode='l1', + reduction='sum', + loss_src_weight=1.0, + loss_dst_weight=1.0), + semantic_reg_loss=dict( + type='ChamferDistance', + mode='l1', + reduction='sum', + loss_src_weight=1.0, + loss_dst_weight=1.0), + semantic_cls_loss=dict( + type='CrossEntropyLoss', reduction='sum', loss_weight=2.0), + train_cfg=dict( + dist_thresh=0.2, + var_thresh=1e-2, + lower_thresh=1e-6, + num_point=100, + num_point_line=10, + line_thresh=0.2)) + +model = dict( + type='H3DNet', + backbone=dict( + type='MultiBackbone', + num_streams=4, + suffixes=['net0', 'net1', 'net2', 'net3'], + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d', eps=1e-5, momentum=0.01), + act_cfg=dict(type='ReLU'), + backbones=dict( + type='PointNet2SASSG', + in_channels=4, + num_points=(2048, 1024, 512, 256), + radius=(0.2, 0.4, 0.8, 1.2), + num_samples=(64, 32, 16, 16), + sa_channels=((64, 64, 128), (128, 128, 256), (128, 128, 256), + (128, 128, 256)), + fp_channels=((256, 256), (256, 256)), + norm_cfg=dict(type='BN2d'), + sa_cfg=dict( + type='PointSAModule', + pool_mod='max', + use_xyz=True, + normalize_xyz=True))), + rpn_head=dict( + type='VoteHead', + vote_module_cfg=dict( + in_channels=256, + vote_per_seed=1, + gt_per_seed=3, + conv_channels=(256, 256), + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + norm_feats=True, + vote_loss=dict( + type='ChamferDistance', + mode='l1', + reduction='none', + loss_dst_weight=10.0)), + vote_aggregation_cfg=dict( + type='PointSAModule', + num_point=256, + radius=0.3, + num_sample=16, + mlp_channels=[256, 128, 128, 128], + use_xyz=True, + normalize_xyz=True), + pred_layer_cfg=dict( + in_channels=128, shared_conv_channels=(128, 128), bias=True), + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + objectness_loss=dict( + type='CrossEntropyLoss', + class_weight=[0.2, 0.8], + reduction='sum', + loss_weight=5.0), + center_loss=dict( + type='ChamferDistance', + mode='l2', + reduction='sum', + loss_src_weight=10.0, + loss_dst_weight=10.0), + dir_class_loss=dict( + type='CrossEntropyLoss', reduction='sum', loss_weight=1.0), + dir_res_loss=dict( + type='SmoothL1Loss', reduction='sum', loss_weight=10.0), + size_class_loss=dict( + type='CrossEntropyLoss', reduction='sum', loss_weight=1.0), + size_res_loss=dict( + type='SmoothL1Loss', reduction='sum', loss_weight=10.0), + semantic_loss=dict( + type='CrossEntropyLoss', reduction='sum', loss_weight=1.0)), + roi_head=dict( + type='H3DRoIHead', + primitive_list=[primitive_z_cfg, primitive_xy_cfg, primitive_line_cfg], + bbox_head=dict( + type='H3DBboxHead', + gt_per_seed=3, + num_proposal=256, + suface_matching_cfg=dict( + type='PointSAModule', + num_point=256 * 6, + radius=0.5, + num_sample=32, + mlp_channels=[128 + 6, 128, 64, 32], + use_xyz=True, + normalize_xyz=True), + line_matching_cfg=dict( + type='PointSAModule', + num_point=256 * 12, + radius=0.5, + num_sample=32, + mlp_channels=[128 + 12, 128, 64, 32], + use_xyz=True, + normalize_xyz=True), + feat_channels=(128, 128), + primitive_refine_channels=[128, 128, 128], + upper_thresh=100.0, + surface_thresh=0.5, + line_thresh=0.5, + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + objectness_loss=dict( + type='CrossEntropyLoss', + class_weight=[0.2, 0.8], + reduction='sum', + loss_weight=5.0), + center_loss=dict( + type='ChamferDistance', + mode='l2', + reduction='sum', + loss_src_weight=10.0, + loss_dst_weight=10.0), + dir_class_loss=dict( + type='CrossEntropyLoss', reduction='sum', loss_weight=0.1), + dir_res_loss=dict( + type='SmoothL1Loss', reduction='sum', loss_weight=10.0), + size_class_loss=dict( + type='CrossEntropyLoss', reduction='sum', loss_weight=0.1), + size_res_loss=dict( + type='SmoothL1Loss', reduction='sum', loss_weight=10.0), + semantic_loss=dict( + type='CrossEntropyLoss', reduction='sum', loss_weight=0.1), + cues_objectness_loss=dict( + type='CrossEntropyLoss', + class_weight=[0.3, 0.7], + reduction='mean', + loss_weight=5.0), + cues_semantic_loss=dict( + type='CrossEntropyLoss', + class_weight=[0.3, 0.7], + reduction='mean', + loss_weight=5.0), + proposal_objectness_loss=dict( + type='CrossEntropyLoss', + class_weight=[0.2, 0.8], + reduction='none', + loss_weight=5.0), + primitive_center_loss=dict( + type='MSELoss', reduction='none', loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + pos_distance_thr=0.3, neg_distance_thr=0.6, sample_mod='vote'), + rpn_proposal=dict(use_nms=False), + rcnn=dict( + pos_distance_thr=0.3, + neg_distance_thr=0.6, + sample_mod='vote', + far_threshold=0.6, + near_threshold=0.3, + mask_surface_threshold=0.3, + label_surface_threshold=0.3, + mask_line_threshold=0.3, + label_line_threshold=0.3)), + test_cfg=dict( + rpn=dict( + sample_mod='seed', + nms_thr=0.25, + score_thr=0.05, + per_class_proposal=True, + use_nms=False), + rcnn=dict( + sample_mod='seed', + nms_thr=0.25, + score_thr=0.05, + per_class_proposal=True))) diff --git a/adzoo/vad/configs/_base_/models/hv_pointpillars_fpn_lyft.py b/adzoo/vad/configs/_base_/models/hv_pointpillars_fpn_lyft.py new file mode 100644 index 0000000..87c7fe0 --- /dev/null +++ b/adzoo/vad/configs/_base_/models/hv_pointpillars_fpn_lyft.py @@ -0,0 +1,22 @@ +_base_ = './hv_pointpillars_fpn_nus.py' + +# model settings (based on nuScenes model settings) +# Voxel size for voxel encoder +# Usually voxel size is changed consistently with the point cloud range +# If point cloud range is modified, do remember to change all related +# keys in the config. +model = dict( + pts_voxel_layer=dict( + max_num_points=20, + point_cloud_range=[-80, -80, -5, 80, 80, 3], + max_voxels=(60000, 60000)), + pts_voxel_encoder=dict( + feat_channels=[64], point_cloud_range=[-80, -80, -5, 80, 80, 3]), + pts_middle_encoder=dict(output_shape=[640, 640]), + pts_bbox_head=dict( + num_classes=9, + anchor_generator=dict( + ranges=[[-80, -80, -1.8, 80, 80, -1.8]], custom_values=[]), + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder', code_size=7)), + # model training settings (based on nuScenes model settings) + train_cfg=dict(pts=dict(code_weight=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]))) diff --git a/adzoo/vad/configs/_base_/models/hv_pointpillars_fpn_nus.py b/adzoo/vad/configs/_base_/models/hv_pointpillars_fpn_nus.py new file mode 100644 index 0000000..e153f6c --- /dev/null +++ b/adzoo/vad/configs/_base_/models/hv_pointpillars_fpn_nus.py @@ -0,0 +1,96 @@ +# model settings +# Voxel size for voxel encoder +# Usually voxel size is changed consistently with the point cloud range +# If point cloud range is modified, do remember to change all related +# keys in the config. +voxel_size = [0.25, 0.25, 8] +model = dict( + type='MVXFasterRCNN', + pts_voxel_layer=dict( + max_num_points=64, + point_cloud_range=[-50, -50, -5, 50, 50, 3], + voxel_size=voxel_size, + max_voxels=(30000, 40000)), + pts_voxel_encoder=dict( + type='HardVFE', + in_channels=4, + feat_channels=[64, 64], + with_distance=False, + voxel_size=voxel_size, + with_cluster_center=True, + with_voxel_center=True, + point_cloud_range=[-50, -50, -5, 50, 50, 3], + norm_cfg=dict(type='naiveSyncBN1d', eps=1e-3, momentum=0.01)), + pts_middle_encoder=dict( + type='PointPillarsScatter', in_channels=64, output_shape=[400, 400]), + pts_backbone=dict( + type='SECOND', + in_channels=64, + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), + layer_nums=[3, 5, 5], + layer_strides=[2, 2, 2], + out_channels=[64, 128, 256]), + pts_neck=dict( + type='FPN', + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), + act_cfg=dict(type='ReLU'), + in_channels=[64, 128, 256], + out_channels=256, + start_level=0, + num_outs=3), + pts_bbox_head=dict( + type='Anchor3DHead', + num_classes=10, + in_channels=256, + feat_channels=256, + use_direction_classifier=True, + anchor_generator=dict( + type='AlignedAnchor3DRangeGenerator', + ranges=[[-50, -50, -1.8, 50, 50, -1.8]], + scales=[1, 2, 4], + sizes=[ + [0.8660, 2.5981, 1.], # 1.5/sqrt(3) + [0.5774, 1.7321, 1.], # 1/sqrt(3) + [1., 1., 1.], + [0.4, 0.4, 1], + ], + custom_values=[0, 0], + rotations=[0, 1.57], + reshape_out=True), + assigner_per_size=False, + diff_rad_by_sin=True, + dir_offset=0.7854, # pi/4 + dir_limit_offset=0, + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder', code_size=9), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0), + loss_dir=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.2)), + # model training and testing settings + train_cfg=dict( + pts=dict( + assigner=dict( + type='MaxIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.6, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + allowed_border=0, + code_weight=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2], + pos_weight=-1, + debug=False)), + test_cfg=dict( + pts=dict( + use_rotate_nms=True, + nms_across_levels=False, + nms_pre=1000, + nms_thr=0.2, + score_thr=0.05, + min_bbox_size=0, + max_num=500))) diff --git a/adzoo/vad/configs/_base_/models/hv_pointpillars_fpn_range100_lyft.py b/adzoo/vad/configs/_base_/models/hv_pointpillars_fpn_range100_lyft.py new file mode 100644 index 0000000..9cd200f --- /dev/null +++ b/adzoo/vad/configs/_base_/models/hv_pointpillars_fpn_range100_lyft.py @@ -0,0 +1,22 @@ +_base_ = './hv_pointpillars_fpn_nus.py' + +# model settings (based on nuScenes model settings) +# Voxel size for voxel encoder +# Usually voxel size is changed consistently with the point cloud range +# If point cloud range is modified, do remember to change all related +# keys in the config. +model = dict( + pts_voxel_layer=dict( + max_num_points=20, + point_cloud_range=[-100, -100, -5, 100, 100, 3], + max_voxels=(60000, 60000)), + pts_voxel_encoder=dict( + feat_channels=[64], point_cloud_range=[-100, -100, -5, 100, 100, 3]), + pts_middle_encoder=dict(output_shape=[800, 800]), + pts_bbox_head=dict( + num_classes=9, + anchor_generator=dict( + ranges=[[-100, -100, -1.8, 100, 100, -1.8]], custom_values=[]), + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder', code_size=7)), + # model training settings (based on nuScenes model settings) + train_cfg=dict(pts=dict(code_weight=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]))) diff --git a/adzoo/vad/configs/_base_/models/hv_pointpillars_secfpn_kitti.py b/adzoo/vad/configs/_base_/models/hv_pointpillars_secfpn_kitti.py new file mode 100644 index 0000000..85076d0 --- /dev/null +++ b/adzoo/vad/configs/_base_/models/hv_pointpillars_secfpn_kitti.py @@ -0,0 +1,93 @@ +voxel_size = [0.16, 0.16, 4] + +model = dict( + type='VoxelNet', + voxel_layer=dict( + max_num_points=32, # max_points_per_voxel + point_cloud_range=[0, -39.68, -3, 69.12, 39.68, 1], + voxel_size=voxel_size, + max_voxels=(16000, 40000) # (training, testing) max_voxels + ), + voxel_encoder=dict( + type='PillarFeatureNet', + in_channels=4, + feat_channels=[64], + with_distance=False, + voxel_size=voxel_size, + point_cloud_range=[0, -39.68, -3, 69.12, 39.68, 1]), + middle_encoder=dict( + type='PointPillarsScatter', in_channels=64, output_shape=[496, 432]), + backbone=dict( + type='SECOND', + in_channels=64, + layer_nums=[3, 5, 5], + layer_strides=[2, 2, 2], + out_channels=[64, 128, 256]), + neck=dict( + type='SECONDFPN', + in_channels=[64, 128, 256], + upsample_strides=[1, 2, 4], + out_channels=[128, 128, 128]), + bbox_head=dict( + type='Anchor3DHead', + num_classes=3, + in_channels=384, + feat_channels=384, + use_direction_classifier=True, + anchor_generator=dict( + type='Anchor3DRangeGenerator', + ranges=[ + [0, -39.68, -0.6, 70.4, 39.68, -0.6], + [0, -39.68, -0.6, 70.4, 39.68, -0.6], + [0, -39.68, -1.78, 70.4, 39.68, -1.78], + ], + sizes=[[0.6, 0.8, 1.73], [0.6, 1.76, 1.73], [1.6, 3.9, 1.56]], + rotations=[0, 1.57], + reshape_out=False), + diff_rad_by_sin=True, + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=2.0), + loss_dir=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.2)), + # model training and testing settings + train_cfg=dict( + assigner=[ + dict( # for Pedestrian + type='MaxIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.5, + neg_iou_thr=0.35, + min_pos_iou=0.35, + ignore_iof_thr=-1), + dict( # for Cyclist + type='MaxIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.5, + neg_iou_thr=0.35, + min_pos_iou=0.35, + ignore_iof_thr=-1), + dict( # for Car + type='MaxIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.6, + neg_iou_thr=0.45, + min_pos_iou=0.45, + ignore_iof_thr=-1), + ], + allowed_border=0, + pos_weight=-1, + debug=False), + test_cfg=dict( + use_rotate_nms=True, + nms_across_levels=False, + nms_thr=0.01, + score_thr=0.1, + min_bbox_size=0, + nms_pre=100, + max_num=50)) diff --git a/adzoo/vad/configs/_base_/models/hv_pointpillars_secfpn_waymo.py b/adzoo/vad/configs/_base_/models/hv_pointpillars_secfpn_waymo.py new file mode 100644 index 0000000..14873ea --- /dev/null +++ b/adzoo/vad/configs/_base_/models/hv_pointpillars_secfpn_waymo.py @@ -0,0 +1,108 @@ +# model settings +# Voxel size for voxel encoder +# Usually voxel size is changed consistently with the point cloud range +# If point cloud range is modified, do remember to change all related +# keys in the config. +voxel_size = [0.32, 0.32, 6] +model = dict( + type='MVXFasterRCNN', + pts_voxel_layer=dict( + max_num_points=20, + point_cloud_range=[-74.88, -74.88, -2, 74.88, 74.88, 4], + voxel_size=voxel_size, + max_voxels=(32000, 32000)), + pts_voxel_encoder=dict( + type='HardVFE', + in_channels=5, + feat_channels=[64], + with_distance=False, + voxel_size=voxel_size, + with_cluster_center=True, + with_voxel_center=True, + point_cloud_range=[-74.88, -74.88, -2, 74.88, 74.88, 4], + norm_cfg=dict(type='naiveSyncBN1d', eps=1e-3, momentum=0.01)), + pts_middle_encoder=dict( + type='PointPillarsScatter', in_channels=64, output_shape=[468, 468]), + pts_backbone=dict( + type='SECOND', + in_channels=64, + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), + layer_nums=[3, 5, 5], + layer_strides=[1, 2, 2], + out_channels=[64, 128, 256]), + pts_neck=dict( + type='SECONDFPN', + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), + in_channels=[64, 128, 256], + upsample_strides=[1, 2, 4], + out_channels=[128, 128, 128]), + pts_bbox_head=dict( + type='Anchor3DHead', + num_classes=3, + in_channels=384, + feat_channels=384, + use_direction_classifier=True, + anchor_generator=dict( + type='AlignedAnchor3DRangeGenerator', + ranges=[[-74.88, -74.88, -0.0345, 74.88, 74.88, -0.0345], + [-74.88, -74.88, -0.1188, 74.88, 74.88, -0.1188], + [-74.88, -74.88, 0, 74.88, 74.88, 0]], + sizes=[ + [2.08, 4.73, 1.77], # car + [0.84, 1.81, 1.77], # cyclist + [0.84, 0.91, 1.74] # pedestrian + ], + rotations=[0, 1.57], + reshape_out=False), + diff_rad_by_sin=True, + dir_offset=0.7854, # pi/4 + dir_limit_offset=0, + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder', code_size=7), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0), + loss_dir=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.2)), + # model training and testing settings + train_cfg=dict( + pts=dict( + assigner=[ + dict( # car + type='MaxIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.55, + neg_iou_thr=0.4, + min_pos_iou=0.4, + ignore_iof_thr=-1), + dict( # cyclist + type='MaxIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.5, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + dict( # pedestrian + type='MaxIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.5, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + ], + allowed_border=0, + code_weight=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], + pos_weight=-1, + debug=False)), + test_cfg=dict( + pts=dict( + use_rotate_nms=True, + nms_across_levels=False, + nms_pre=4096, + nms_thr=0.25, + score_thr=0.1, + min_bbox_size=0, + max_num=500))) diff --git a/adzoo/vad/configs/_base_/models/hv_second_secfpn_kitti.py b/adzoo/vad/configs/_base_/models/hv_second_secfpn_kitti.py new file mode 100644 index 0000000..6bf18ab --- /dev/null +++ b/adzoo/vad/configs/_base_/models/hv_second_secfpn_kitti.py @@ -0,0 +1,89 @@ +voxel_size = [0.05, 0.05, 0.1] + +model = dict( + type='VoxelNet', + voxel_layer=dict( + max_num_points=5, + point_cloud_range=[0, -40, -3, 70.4, 40, 1], + voxel_size=voxel_size, + max_voxels=(16000, 40000)), + voxel_encoder=dict(type='HardSimpleVFE'), + middle_encoder=dict( + type='SparseEncoder', + in_channels=4, + sparse_shape=[41, 1600, 1408], + order=('conv', 'norm', 'act')), + backbone=dict( + type='SECOND', + in_channels=256, + layer_nums=[5, 5], + layer_strides=[1, 2], + out_channels=[128, 256]), + neck=dict( + type='SECONDFPN', + in_channels=[128, 256], + upsample_strides=[1, 2], + out_channels=[256, 256]), + bbox_head=dict( + type='Anchor3DHead', + num_classes=3, + in_channels=512, + feat_channels=512, + use_direction_classifier=True, + anchor_generator=dict( + type='Anchor3DRangeGenerator', + ranges=[ + [0, -40.0, -0.6, 70.4, 40.0, -0.6], + [0, -40.0, -0.6, 70.4, 40.0, -0.6], + [0, -40.0, -1.78, 70.4, 40.0, -1.78], + ], + sizes=[[0.6, 0.8, 1.73], [0.6, 1.76, 1.73], [1.6, 3.9, 1.56]], + rotations=[0, 1.57], + reshape_out=False), + diff_rad_by_sin=True, + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=2.0), + loss_dir=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.2)), + # model training and testing settings + train_cfg=dict( + assigner=[ + dict( # for Pedestrian + type='MaxIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.35, + neg_iou_thr=0.2, + min_pos_iou=0.2, + ignore_iof_thr=-1), + dict( # for Cyclist + type='MaxIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.35, + neg_iou_thr=0.2, + min_pos_iou=0.2, + ignore_iof_thr=-1), + dict( # for Car + type='MaxIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.6, + neg_iou_thr=0.45, + min_pos_iou=0.45, + ignore_iof_thr=-1), + ], + allowed_border=0, + pos_weight=-1, + debug=False), + test_cfg=dict( + use_rotate_nms=True, + nms_across_levels=False, + nms_thr=0.01, + score_thr=0.1, + min_bbox_size=0, + nms_pre=100, + max_num=50)) diff --git a/adzoo/vad/configs/_base_/models/hv_second_secfpn_waymo.py b/adzoo/vad/configs/_base_/models/hv_second_secfpn_waymo.py new file mode 100644 index 0000000..eb9bd3a --- /dev/null +++ b/adzoo/vad/configs/_base_/models/hv_second_secfpn_waymo.py @@ -0,0 +1,100 @@ +# model settings +# Voxel size for voxel encoder +# Usually voxel size is changed consistently with the point cloud range +# If point cloud range is modified, do remember to change all related +# keys in the config. +voxel_size = [0.08, 0.08, 0.1] +model = dict( + type='VoxelNet', + voxel_layer=dict( + max_num_points=10, + point_cloud_range=[-76.8, -51.2, -2, 76.8, 51.2, 4], + voxel_size=voxel_size, + max_voxels=(80000, 90000)), + voxel_encoder=dict(type='HardSimpleVFE', num_features=5), + middle_encoder=dict( + type='SparseEncoder', + in_channels=5, + sparse_shape=[61, 1280, 1920], + order=('conv', 'norm', 'act')), + backbone=dict( + type='SECOND', + in_channels=384, + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), + layer_nums=[5, 5], + layer_strides=[1, 2], + out_channels=[128, 256]), + neck=dict( + type='SECONDFPN', + norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), + in_channels=[128, 256], + upsample_strides=[1, 2], + out_channels=[256, 256]), + bbox_head=dict( + type='Anchor3DHead', + num_classes=3, + in_channels=512, + feat_channels=512, + use_direction_classifier=True, + anchor_generator=dict( + type='AlignedAnchor3DRangeGenerator', + ranges=[[-76.8, -51.2, -0.0345, 76.8, 51.2, -0.0345], + [-76.8, -51.2, 0, 76.8, 51.2, 0], + [-76.8, -51.2, -0.1188, 76.8, 51.2, -0.1188]], + sizes=[ + [2.08, 4.73, 1.77], # car + [0.84, 0.91, 1.74], # pedestrian + [0.84, 1.81, 1.77] # cyclist + ], + rotations=[0, 1.57], + reshape_out=False), + diff_rad_by_sin=True, + dir_offset=0.7854, # pi/4 + dir_limit_offset=0, + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder', code_size=7), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0), + loss_dir=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.2)), + # model training and testing settings + train_cfg=dict( + assigner=[ + dict( # car + type='MaxIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.55, + neg_iou_thr=0.4, + min_pos_iou=0.4, + ignore_iof_thr=-1), + dict( # pedestrian + type='MaxIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.5, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1), + dict( # cyclist + type='MaxIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.5, + neg_iou_thr=0.3, + min_pos_iou=0.3, + ignore_iof_thr=-1) + ], + allowed_border=0, + code_weight=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], + pos_weight=-1, + debug=False), + test_cfg=dict( + use_rotate_nms=True, + nms_across_levels=False, + nms_pre=4096, + nms_thr=0.25, + score_thr=0.1, + min_bbox_size=0, + max_num=500)) diff --git a/adzoo/vad/configs/_base_/models/imvotenet_image.py b/adzoo/vad/configs/_base_/models/imvotenet_image.py new file mode 100644 index 0000000..981f8bc --- /dev/null +++ b/adzoo/vad/configs/_base_/models/imvotenet_image.py @@ -0,0 +1,108 @@ +model = dict( + type='ImVoteNet', + img_backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + style='caffe'), + img_neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + img_rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + img_roi_head=dict( + type='StandardRoIHead', + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=10, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0))), + + # model training and testing settings + train_cfg=dict( + img_rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=-1, + pos_weight=-1, + debug=False), + img_rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + img_rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=False, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + pos_weight=-1, + debug=False)), + test_cfg=dict( + img_rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + img_rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100))) diff --git a/adzoo/vad/configs/_base_/models/mask_rcnn_r50_fpn.py b/adzoo/vad/configs/_base_/models/mask_rcnn_r50_fpn.py new file mode 100644 index 0000000..c5d5e32 --- /dev/null +++ b/adzoo/vad/configs/_base_/models/mask_rcnn_r50_fpn.py @@ -0,0 +1,124 @@ +# model settings +model = dict( + type='MaskRCNN', + pretrained='torchvision://resnet50', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + roi_head=dict( + type='StandardRoIHead', + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + mask_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + mask_head=dict( + type='FCNMaskHead', + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=80, + loss_mask=dict( + type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.7, + neg_iou_thr=0.3, + min_pos_iou=0.3, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=256, + pos_fraction=0.5, + neg_pos_ub=-1, + add_gt_as_proposals=False), + allowed_border=-1, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_across_levels=False, + nms_pre=2000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0.5, + match_low_quality=True, + ignore_iof_thr=-1), + sampler=dict( + type='RandomSampler', + num=512, + pos_fraction=0.25, + neg_pos_ub=-1, + add_gt_as_proposals=True), + mask_size=28, + pos_weight=-1, + debug=False)), + test_cfg=dict( + rpn=dict( + nms_across_levels=False, + nms_pre=1000, + nms_post=1000, + max_num=1000, + nms_thr=0.7, + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100, + mask_thr_binary=0.5))) diff --git a/adzoo/vad/configs/_base_/models/paconv_cuda_ssg.py b/adzoo/vad/configs/_base_/models/paconv_cuda_ssg.py new file mode 100644 index 0000000..f513bd4 --- /dev/null +++ b/adzoo/vad/configs/_base_/models/paconv_cuda_ssg.py @@ -0,0 +1,7 @@ +_base_ = './paconv_ssg.py' + +model = dict( + backbone=dict( + sa_cfg=dict( + type='PAConvCUDASAModule', + scorenet_cfg=dict(mlp_channels=[8, 16, 16])))) diff --git a/adzoo/vad/configs/_base_/models/paconv_ssg.py b/adzoo/vad/configs/_base_/models/paconv_ssg.py new file mode 100644 index 0000000..1d4f1ed --- /dev/null +++ b/adzoo/vad/configs/_base_/models/paconv_ssg.py @@ -0,0 +1,49 @@ +# model settings +model = dict( + type='EncoderDecoder3D', + backbone=dict( + type='PointNet2SASSG', + in_channels=9, # [xyz, rgb, normalized_xyz] + num_points=(1024, 256, 64, 16), + radius=(None, None, None, None), # use kNN instead of ball query + num_samples=(32, 32, 32, 32), + sa_channels=((32, 32, 64), (64, 64, 128), (128, 128, 256), (256, 256, + 512)), + fp_channels=(), + norm_cfg=dict(type='BN2d', momentum=0.1), + sa_cfg=dict( + type='PAConvSAModule', + pool_mod='max', + use_xyz=True, + normalize_xyz=False, + paconv_num_kernels=[16, 16, 16], + paconv_kernel_input='w_neighbor', + scorenet_input='w_neighbor_dist', + scorenet_cfg=dict( + mlp_channels=[16, 16, 16], + score_norm='softmax', + temp_factor=1.0, + last_bn=False))), + decode_head=dict( + type='PAConvHead', + # PAConv model's decoder takes skip connections from beckbone + # different from PointNet++, it also concats input features in the last + # level of decoder, leading to `128 + 6` as the channel number + fp_channels=((768, 256, 256), (384, 256, 256), (320, 256, 128), + (128 + 6, 128, 128, 128)), + channels=128, + dropout_ratio=0.5, + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + act_cfg=dict(type='ReLU'), + loss_decode=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + class_weight=None, # should be modified with dataset + loss_weight=1.0)), + # correlation loss to regularize PAConv's kernel weights + loss_regularization=dict( + type='PAConvRegularizationLoss', reduction='sum', loss_weight=10.0), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='slide')) diff --git a/adzoo/vad/configs/_base_/models/parta2.py b/adzoo/vad/configs/_base_/models/parta2.py new file mode 100644 index 0000000..6c5ae9a --- /dev/null +++ b/adzoo/vad/configs/_base_/models/parta2.py @@ -0,0 +1,201 @@ +# model settings +voxel_size = [0.05, 0.05, 0.1] +point_cloud_range = [0, -40, -3, 70.4, 40, 1] + +model = dict( + type='PartA2', + voxel_layer=dict( + max_num_points=5, # max_points_per_voxel + point_cloud_range=point_cloud_range, + voxel_size=voxel_size, + max_voxels=(16000, 40000) # (training, testing) max_voxels + ), + voxel_encoder=dict(type='HardSimpleVFE'), + middle_encoder=dict( + type='SparseUNet', + in_channels=4, + sparse_shape=[41, 1600, 1408], + order=('conv', 'norm', 'act')), + backbone=dict( + type='SECOND', + in_channels=256, + layer_nums=[5, 5], + layer_strides=[1, 2], + out_channels=[128, 256]), + neck=dict( + type='SECONDFPN', + in_channels=[128, 256], + upsample_strides=[1, 2], + out_channels=[256, 256]), + rpn_head=dict( + type='PartA2RPNHead', + num_classes=3, + in_channels=512, + feat_channels=512, + use_direction_classifier=True, + anchor_generator=dict( + type='Anchor3DRangeGenerator', + ranges=[[0, -40.0, -0.6, 70.4, 40.0, -0.6], + [0, -40.0, -0.6, 70.4, 40.0, -0.6], + [0, -40.0, -1.78, 70.4, 40.0, -1.78]], + sizes=[[0.6, 0.8, 1.73], [0.6, 1.76, 1.73], [1.6, 3.9, 1.56]], + rotations=[0, 1.57], + reshape_out=False), + diff_rad_by_sin=True, + assigner_per_size=True, + assign_per_class=True, + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=2.0), + loss_dir=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.2)), + roi_head=dict( + type='PartAggregationROIHead', + num_classes=3, + semantic_head=dict( + type='PointwiseSemanticHead', + in_channels=16, + extra_width=0.2, + seg_score_thr=0.3, + num_classes=3, + loss_seg=dict( + type='FocalLoss', + use_sigmoid=True, + reduction='sum', + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_part=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), + seg_roi_extractor=dict( + type='Single3DRoIAwareExtractor', + roi_layer=dict( + type='RoIAwarePool3d', + out_size=14, + max_pts_per_voxel=128, + mode='max')), + part_roi_extractor=dict( + type='Single3DRoIAwareExtractor', + roi_layer=dict( + type='RoIAwarePool3d', + out_size=14, + max_pts_per_voxel=128, + mode='avg')), + bbox_head=dict( + type='PartA2BboxHead', + num_classes=3, + seg_in_channels=16, + part_in_channels=4, + seg_conv_channels=[64, 64], + part_conv_channels=[64, 64], + merge_conv_channels=[128, 128], + down_conv_channels=[128, 256], + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'), + shared_fc_channels=[256, 512, 512, 512], + cls_channels=[256, 256], + reg_channels=[256, 256], + dropout_ratio=0.1, + roi_feat_size=14, + with_corner_loss=True, + loss_bbox=dict( + type='SmoothL1Loss', + beta=1.0 / 9.0, + reduction='sum', + loss_weight=1.0), + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + reduction='sum', + loss_weight=1.0))), + # model training and testing settings + train_cfg=dict( + rpn=dict( + assigner=[ + dict( # for Pedestrian + type='MaxIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.5, + neg_iou_thr=0.35, + min_pos_iou=0.35, + ignore_iof_thr=-1), + dict( # for Cyclist + type='MaxIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.5, + neg_iou_thr=0.35, + min_pos_iou=0.35, + ignore_iof_thr=-1), + dict( # for Car + type='MaxIoUAssigner', + iou_calculator=dict(type='BboxOverlapsNearest3D'), + pos_iou_thr=0.6, + neg_iou_thr=0.45, + min_pos_iou=0.45, + ignore_iof_thr=-1) + ], + allowed_border=0, + pos_weight=-1, + debug=False), + rpn_proposal=dict( + nms_pre=9000, + nms_post=512, + max_num=512, + nms_thr=0.8, + score_thr=0, + use_rotate_nms=False), + rcnn=dict( + assigner=[ + dict( # for Pedestrian + type='MaxIoUAssigner', + iou_calculator=dict( + type='BboxOverlaps3D', coordinate='lidar'), + pos_iou_thr=0.55, + neg_iou_thr=0.55, + min_pos_iou=0.55, + ignore_iof_thr=-1), + dict( # for Cyclist + type='MaxIoUAssigner', + iou_calculator=dict( + type='BboxOverlaps3D', coordinate='lidar'), + pos_iou_thr=0.55, + neg_iou_thr=0.55, + min_pos_iou=0.55, + ignore_iof_thr=-1), + dict( # for Car + type='MaxIoUAssigner', + iou_calculator=dict( + type='BboxOverlaps3D', coordinate='lidar'), + pos_iou_thr=0.55, + neg_iou_thr=0.55, + min_pos_iou=0.55, + ignore_iof_thr=-1) + ], + sampler=dict( + type='IoUNegPiecewiseSampler', + num=128, + pos_fraction=0.55, + neg_piece_fractions=[0.8, 0.2], + neg_iou_piece_thrs=[0.55, 0.1], + neg_pos_ub=-1, + add_gt_as_proposals=False, + return_iou=True), + cls_pos_thr=0.75, + cls_neg_thr=0.25)), + test_cfg=dict( + rpn=dict( + nms_pre=1024, + nms_post=100, + max_num=100, + nms_thr=0.7, + score_thr=0, + use_rotate_nms=True), + rcnn=dict( + use_rotate_nms=True, + use_raw_score=True, + nms_thr=0.01, + score_thr=0.1))) diff --git a/adzoo/vad/configs/_base_/models/pointnet2_msg.py b/adzoo/vad/configs/_base_/models/pointnet2_msg.py new file mode 100644 index 0000000..222ab88 --- /dev/null +++ b/adzoo/vad/configs/_base_/models/pointnet2_msg.py @@ -0,0 +1,28 @@ +_base_ = './pointnet2_ssg.py' + +# model settings +model = dict( + backbone=dict( + _delete_=True, + type='PointNet2SAMSG', + in_channels=6, # [xyz, rgb], should be modified with dataset + num_points=(1024, 256, 64, 16), + radii=((0.05, 0.1), (0.1, 0.2), (0.2, 0.4), (0.4, 0.8)), + num_samples=((16, 32), (16, 32), (16, 32), (16, 32)), + sa_channels=(((16, 16, 32), (32, 32, 64)), ((64, 64, 128), (64, 96, + 128)), + ((128, 196, 256), (128, 196, 256)), ((256, 256, 512), + (256, 384, 512))), + aggregation_channels=(None, None, None, None), + fps_mods=(('D-FPS'), ('D-FPS'), ('D-FPS'), ('D-FPS')), + fps_sample_range_lists=((-1), (-1), (-1), (-1)), + dilated_group=(False, False, False, False), + out_indices=(0, 1, 2, 3), + sa_cfg=dict( + type='PointSAModuleMSG', + pool_mod='max', + use_xyz=True, + normalize_xyz=False)), + decode_head=dict( + fp_channels=((1536, 256, 256), (512, 256, 256), (352, 256, 128), + (128, 128, 128, 128)))) diff --git a/adzoo/vad/configs/_base_/models/pointnet2_ssg.py b/adzoo/vad/configs/_base_/models/pointnet2_ssg.py new file mode 100644 index 0000000..58b4c24 --- /dev/null +++ b/adzoo/vad/configs/_base_/models/pointnet2_ssg.py @@ -0,0 +1,35 @@ +# model settings +model = dict( + type='EncoderDecoder3D', + backbone=dict( + type='PointNet2SASSG', + in_channels=6, # [xyz, rgb], should be modified with dataset + num_points=(1024, 256, 64, 16), + radius=(0.1, 0.2, 0.4, 0.8), + num_samples=(32, 32, 32, 32), + sa_channels=((32, 32, 64), (64, 64, 128), (128, 128, 256), (256, 256, + 512)), + fp_channels=(), + norm_cfg=dict(type='BN2d'), + sa_cfg=dict( + type='PointSAModule', + pool_mod='max', + use_xyz=True, + normalize_xyz=False)), + decode_head=dict( + type='PointNet2Head', + fp_channels=((768, 256, 256), (384, 256, 256), (320, 256, 128), + (128, 128, 128, 128)), + channels=128, + dropout_ratio=0.5, + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + act_cfg=dict(type='ReLU'), + loss_decode=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + class_weight=None, # should be modified with dataset + loss_weight=1.0)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='slide')) diff --git a/adzoo/vad/configs/_base_/models/votenet.py b/adzoo/vad/configs/_base_/models/votenet.py new file mode 100644 index 0000000..129339d --- /dev/null +++ b/adzoo/vad/configs/_base_/models/votenet.py @@ -0,0 +1,73 @@ +model = dict( + type='VoteNet', + backbone=dict( + type='PointNet2SASSG', + in_channels=4, + num_points=(2048, 1024, 512, 256), + radius=(0.2, 0.4, 0.8, 1.2), + num_samples=(64, 32, 16, 16), + sa_channels=((64, 64, 128), (128, 128, 256), (128, 128, 256), + (128, 128, 256)), + fp_channels=((256, 256), (256, 256)), + norm_cfg=dict(type='BN2d'), + sa_cfg=dict( + type='PointSAModule', + pool_mod='max', + use_xyz=True, + normalize_xyz=True)), + bbox_head=dict( + type='VoteHead', + vote_module_cfg=dict( + in_channels=256, + vote_per_seed=1, + gt_per_seed=3, + conv_channels=(256, 256), + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + norm_feats=True, + vote_loss=dict( + type='ChamferDistance', + mode='l1', + reduction='none', + loss_dst_weight=10.0)), + vote_aggregation_cfg=dict( + type='PointSAModule', + num_point=256, + radius=0.3, + num_sample=16, + mlp_channels=[256, 128, 128, 128], + use_xyz=True, + normalize_xyz=True), + pred_layer_cfg=dict( + in_channels=128, shared_conv_channels=(128, 128), bias=True), + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + objectness_loss=dict( + type='CrossEntropyLoss', + class_weight=[0.2, 0.8], + reduction='sum', + loss_weight=5.0), + center_loss=dict( + type='ChamferDistance', + mode='l2', + reduction='sum', + loss_src_weight=10.0, + loss_dst_weight=10.0), + dir_class_loss=dict( + type='CrossEntropyLoss', reduction='sum', loss_weight=1.0), + dir_res_loss=dict( + type='SmoothL1Loss', reduction='sum', loss_weight=10.0), + size_class_loss=dict( + type='CrossEntropyLoss', reduction='sum', loss_weight=1.0), + size_res_loss=dict( + type='SmoothL1Loss', reduction='sum', loss_weight=10.0 / 3.0), + semantic_loss=dict( + type='CrossEntropyLoss', reduction='sum', loss_weight=1.0)), + # model training and testing settings + train_cfg=dict( + pos_distance_thr=0.3, neg_distance_thr=0.6, sample_mod='vote'), + test_cfg=dict( + sample_mod='seed', + nms_thr=0.25, + score_thr=0.05, + per_class_proposal=True)) diff --git a/adzoo/vad/configs/_base_/schedules/cosine.py b/adzoo/vad/configs/_base_/schedules/cosine.py new file mode 100644 index 0000000..69cb7df --- /dev/null +++ b/adzoo/vad/configs/_base_/schedules/cosine.py @@ -0,0 +1,20 @@ +# This schedule is mainly used by models with dynamic voxelization +# optimizer +lr = 0.003 # max learning rate +optimizer = dict( + type='AdamW', + lr=lr, + betas=(0.95, 0.99), # the momentum is change during training + weight_decay=0.001) +optimizer_config = dict(grad_clip=dict(max_norm=10, norm_type=2)) + +lr_config = dict( + policy='CosineAnnealing', + warmup='linear', + warmup_iters=1000, + warmup_ratio=1.0 / 10, + min_lr_ratio=1e-5) + +momentum_config = None + +runner = dict(type='EpochBasedRunner', max_epochs=40) diff --git a/adzoo/vad/configs/_base_/schedules/cyclic_20e.py b/adzoo/vad/configs/_base_/schedules/cyclic_20e.py new file mode 100644 index 0000000..704740e --- /dev/null +++ b/adzoo/vad/configs/_base_/schedules/cyclic_20e.py @@ -0,0 +1,24 @@ +# For nuScenes dataset, we usually evaluate the model at the end of training. +# Since the models are trained by 24 epochs by default, we set evaluation +# interval to be 20. Please change the interval accordingly if you do not +# use a default schedule. +# optimizer +# This schedule is mainly used by models on nuScenes dataset +optimizer = dict(type='AdamW', lr=1e-4, weight_decay=0.01) +# max_norm=10 is better for SECOND +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +lr_config = dict( + policy='cyclic', + target_ratio=(10, 1e-4), + cyclic_times=1, + step_ratio_up=0.4, +) +momentum_config = dict( + policy='cyclic', + target_ratio=(0.85 / 0.95, 1), + cyclic_times=1, + step_ratio_up=0.4, +) + +# runtime settings +runner = dict(type='EpochBasedRunner', max_epochs=20) diff --git a/adzoo/vad/configs/_base_/schedules/cyclic_40e.py b/adzoo/vad/configs/_base_/schedules/cyclic_40e.py new file mode 100644 index 0000000..4a711ac --- /dev/null +++ b/adzoo/vad/configs/_base_/schedules/cyclic_40e.py @@ -0,0 +1,31 @@ +# The schedule is usually used by models trained on KITTI dataset + +# The learning rate set in the cyclic schedule is the initial learning rate +# rather than the max learning rate. Since the target_ratio is (10, 1e-4), +# the learning rate will change from 0.0018 to 0.018, than go to 0.0018*1e-4 +lr = 0.0018 +# The optimizer follows the setting in SECOND.Pytorch, but here we use +# the offcial AdamW optimizer implemented by PyTorch. +optimizer = dict(type='AdamW', lr=lr, betas=(0.95, 0.99), weight_decay=0.01) +optimizer_config = dict(grad_clip=dict(max_norm=10, norm_type=2)) +# We use cyclic learning rate and momentum schedule following SECOND.Pytorch +# https://github.com/traveller59/second.pytorch/blob/3aba19c9688274f75ebb5e576f65cfe54773c021/torchplus/train/learning_schedules_fastai.py#L69 # noqa +# We implement them in mmcv, for more details, please refer to +# https://github.com/open-mmlab/mmcv/blob/f48241a65aebfe07db122e9db320c31b685dc674/mmcv/runner/hooks/lr_updater.py#L327 # noqa +# https://github.com/open-mmlab/mmcv/blob/f48241a65aebfe07db122e9db320c31b685dc674/mmcv/runner/hooks/momentum_updater.py#L130 # noqa +lr_config = dict( + policy='cyclic', + target_ratio=(10, 1e-4), + cyclic_times=1, + step_ratio_up=0.4, +) +momentum_config = dict( + policy='cyclic', + target_ratio=(0.85 / 0.95, 1), + cyclic_times=1, + step_ratio_up=0.4, +) +# Although the max_epochs is 40, this schedule is usually used we +# RepeatDataset with repeat ratio N, thus the actual max epoch +# number could be Nx40 +runner = dict(type='EpochBasedRunner', max_epochs=40) diff --git a/adzoo/vad/configs/_base_/schedules/mmdet_schedule_1x.py b/adzoo/vad/configs/_base_/schedules/mmdet_schedule_1x.py new file mode 100644 index 0000000..13b3783 --- /dev/null +++ b/adzoo/vad/configs/_base_/schedules/mmdet_schedule_1x.py @@ -0,0 +1,11 @@ +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=0.001, + step=[8, 11]) +runner = dict(type='EpochBasedRunner', max_epochs=12) diff --git a/adzoo/vad/configs/_base_/schedules/schedule_2x.py b/adzoo/vad/configs/_base_/schedules/schedule_2x.py new file mode 100644 index 0000000..afde799 --- /dev/null +++ b/adzoo/vad/configs/_base_/schedules/schedule_2x.py @@ -0,0 +1,14 @@ +# optimizer +# This schedule is mainly used by models on nuScenes dataset +optimizer = dict(type='AdamW', lr=0.001, weight_decay=0.01) +# max_norm=10 is better for SECOND +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=1000, + warmup_ratio=1.0 / 1000, + step=[20, 23]) +momentum_config = None +# runtime settings +runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/adzoo/vad/configs/_base_/schedules/schedule_3x.py b/adzoo/vad/configs/_base_/schedules/schedule_3x.py new file mode 100644 index 0000000..115cd26 --- /dev/null +++ b/adzoo/vad/configs/_base_/schedules/schedule_3x.py @@ -0,0 +1,9 @@ +# optimizer +# This schedule is mainly used by models on indoor dataset, +# e.g., VoteNet on SUNRGBD and ScanNet +lr = 0.008 # max learning rate +optimizer = dict(type='AdamW', lr=lr, weight_decay=0.01) +optimizer_config = dict(grad_clip=dict(max_norm=10, norm_type=2)) +lr_config = dict(policy='step', warmup=None, step=[24, 32]) +# runtime settings +runner = dict(type='EpochBasedRunner', max_epochs=36) diff --git a/adzoo/vad/configs/_base_/schedules/seg_cosine_150e.py b/adzoo/vad/configs/_base_/schedules/seg_cosine_150e.py new file mode 100644 index 0000000..04b44e5 --- /dev/null +++ b/adzoo/vad/configs/_base_/schedules/seg_cosine_150e.py @@ -0,0 +1,9 @@ +# optimizer +# This schedule is mainly used on S3DIS dataset in segmentation task +optimizer = dict(type='SGD', lr=0.2, weight_decay=0.0001, momentum=0.9) +optimizer_config = dict(grad_clip=None) +lr_config = dict(policy='CosineAnnealing', warmup=None, min_lr=0.002) +momentum_config = None + +# runtime settings +runner = dict(type='EpochBasedRunner', max_epochs=150) diff --git a/adzoo/vad/configs/_base_/schedules/seg_cosine_200e.py b/adzoo/vad/configs/_base_/schedules/seg_cosine_200e.py new file mode 100644 index 0000000..6a49484 --- /dev/null +++ b/adzoo/vad/configs/_base_/schedules/seg_cosine_200e.py @@ -0,0 +1,9 @@ +# optimizer +# This schedule is mainly used on ScanNet dataset in segmentation task +optimizer = dict(type='Adam', lr=0.001, weight_decay=0.01) +optimizer_config = dict(grad_clip=None) +lr_config = dict(policy='CosineAnnealing', warmup=None, min_lr=1e-5) +momentum_config = None + +# runtime settings +runner = dict(type='EpochBasedRunner', max_epochs=200) diff --git a/adzoo/vad/configs/_base_/schedules/seg_cosine_50e.py b/adzoo/vad/configs/_base_/schedules/seg_cosine_50e.py new file mode 100644 index 0000000..975a8f9 --- /dev/null +++ b/adzoo/vad/configs/_base_/schedules/seg_cosine_50e.py @@ -0,0 +1,9 @@ +# optimizer +# This schedule is mainly used on S3DIS dataset in segmentation task +optimizer = dict(type='Adam', lr=0.001, weight_decay=0.001) +optimizer_config = dict(grad_clip=None) +lr_config = dict(policy='CosineAnnealing', warmup=None, min_lr=1e-5) +momentum_config = None + +# runtime settings +runner = dict(type='EpochBasedRunner', max_epochs=50) diff --git a/adzoo/vad/configs/datasets/custom_lyft-3d.py b/adzoo/vad/configs/datasets/custom_lyft-3d.py new file mode 100644 index 0000000..5a95d89 --- /dev/null +++ b/adzoo/vad/configs/datasets/custom_lyft-3d.py @@ -0,0 +1,136 @@ +# If point cloud range is changed, the models should also change their point +# cloud range accordingly +point_cloud_range = [-80, -80, -5, 80, 80, 3] +# For Lyft we usually do 9-class detection +class_names = [ + 'car', 'truck', 'bus', 'emergency_vehicle', 'other_vehicle', 'motorcycle', + 'bicycle', 'pedestrian', 'animal' +] +dataset_type = 'CustomLyftDataset' +data_root = 'data/lyft/' +# Input modality for Lyft dataset, this is consistent with the submission +# format which requires the information in input_modality. +input_modality = dict( + use_lidar=True, + use_camera=False, + use_radar=False, + use_map=False, + use_external=True) +file_client_args = dict(backend='disk') +# Uncomment the following if use ceph or other file clients. +# See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient +# for more details. +# file_client_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/lyft/': 's3://lyft/lyft/', +# 'data/lyft/': 's3://lyft/lyft/' +# })) +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + file_client_args=file_client_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + file_client_args=file_client_args), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.3925, 0.3925], + scale_ratio_range=[0.95, 1.05], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='PointShuffle'), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + file_client_args=file_client_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + file_client_args=file_client_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) + ]) +] +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) +eval_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + file_client_args=file_client_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + file_client_args=file_client_args), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) +] + +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'lyft_infos_train.pkl', + pipeline=train_pipeline, + classes=class_names, + modality=input_modality, + test_mode=False), + val=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'lyft_infos_val.pkl', + pipeline=test_pipeline, + classes=class_names, + modality=input_modality, + test_mode=True), + test=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'lyft_infos_val.pkl', + pipeline=test_pipeline, + classes=class_names, + modality=input_modality, + test_mode=True)) +# For Lyft dataset, we usually evaluate the model at the end of training. +# Since the models are trained by 24 epochs by default, we set evaluation +# interval to be 24. Please change the interval accordingly if you do not +# use a default schedule. +evaluation = dict(interval=24, pipeline=eval_pipeline) \ No newline at end of file diff --git a/adzoo/vad/configs/datasets/custom_nus-3d.py b/adzoo/vad/configs/datasets/custom_nus-3d.py new file mode 100644 index 0000000..af81f9b --- /dev/null +++ b/adzoo/vad/configs/datasets/custom_nus-3d.py @@ -0,0 +1,141 @@ +# If point cloud range is changed, the models should also change their point +# cloud range accordingly +point_cloud_range = [-50, -50, -5, 50, 50, 3] +# For nuScenes we usually do 10-class detection +class_names = [ + 'car', 'truck', 'trailer', 'bus', 'construction_vehicle', 'bicycle', + 'motorcycle', 'pedestrian', 'traffic_cone', 'barrier' +] +dataset_type = 'NuScenesDataset_eval_modified' +data_root = 'data/nuscenes/' +# Input modality for nuScenes dataset, this is consistent with the submission +# format which requires the information in input_modality. +input_modality = dict( + use_lidar=True, + use_camera=False, + use_radar=False, + use_map=False, + use_external=False) +file_client_args = dict(backend='disk') +# Uncomment the following if use ceph or other file clients. +# See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient +# for more details. +# file_client_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/nuscenes/': 's3://nuscenes/nuscenes/', +# 'data/nuscenes/': 's3://nuscenes/nuscenes/' +# })) +train_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + file_client_args=file_client_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + file_client_args=file_client_args), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + dict( + type='GlobalRotScaleTrans', + rot_range=[-0.3925, 0.3925], + scale_ratio_range=[0.95, 1.05], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectNameFilter', classes=class_names), + dict(type='PointShuffle'), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + file_client_args=file_client_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + file_client_args=file_client_args), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1333, 800), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='GlobalRotScaleTrans', + rot_range=[0, 0], + scale_ratio_range=[1., 1.], + translation_std=[0, 0, 0]), + dict(type='RandomFlip3D'), + dict( + type='PointsRangeFilter', point_cloud_range=point_cloud_range), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) + ]) +] +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) +eval_pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + file_client_args=file_client_args), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + file_client_args=file_client_args), + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='Collect3D', keys=['points']) +] + +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'nuscenes_infos_train.pkl', + pipeline=train_pipeline, + classes=class_names, + modality=input_modality, + test_mode=False, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='LiDAR'), + val=dict( + type=dataset_type, + ann_file=data_root + 'nuscenes_infos_val.pkl', + pipeline=test_pipeline, + classes=class_names, + modality=input_modality, + test_mode=True, + box_type_3d='LiDAR'), + test=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'nuscenes_infos_val.pkl', + pipeline=test_pipeline, + classes=class_names, + modality=input_modality, + test_mode=True, + box_type_3d='LiDAR')) +# For nuScenes dataset, we usually evaluate the model at the end of training. +# Since the models are trained by 24 epochs by default, we set evaluation +# interval to be 24. Please change the interval accordingly if you do not +# use a default schedule. +evaluation = dict(interval=24, pipeline=eval_pipeline) diff --git a/adzoo/vad/configs/datasets/custom_waymo-3d.py b/adzoo/vad/configs/datasets/custom_waymo-3d.py new file mode 100644 index 0000000..4100e13 --- /dev/null +++ b/adzoo/vad/configs/datasets/custom_waymo-3d.py @@ -0,0 +1,112 @@ +# dataset settings +# D5 in the config name means the whole dataset is divided into 5 folds +# We only use one fold for efficient experiments +dataset_type = 'CustomWaymoDataset' +data_root = 'data/waymo/kitti_format/' +file_client_args = dict(backend='disk') +# Uncomment the following if use ceph or other file clients. +# See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient +# for more details. +# file_client_args = dict( +# backend='petrel', path_mapping=dict(data='s3://waymo_data/')) + +img_norm_cfg = dict( + mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +class_names = ['Car', 'Pedestrian', 'Cyclist'] +point_cloud_range = [-74.88, -74.88, -2, 74.88, 74.88, 4] +input_modality = dict(use_lidar=False, use_camera=True) +db_sampler = dict( + data_root=data_root, + info_path=data_root + 'waymo_dbinfos_train.pkl', + rate=1.0, + prepare=dict( + filter_by_difficulty=[-1], + filter_by_min_points=dict(Car=5, Pedestrian=10, Cyclist=10)), + classes=class_names, + sample_groups=dict(Car=15, Pedestrian=10, Cyclist=10), + points_loader=dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=[0, 1, 2, 3, 4], + file_client_args=file_client_args)) + + + +train_pipeline = [ + dict(type='LoadMultiViewImageFromFiles', to_float32=True), + dict(type='PhotoMetricDistortionMultiViewImage'), + dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, with_attr_label=False), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectNameFilter', classes=class_names), + dict(type='NormalizeMultiviewImage', **img_norm_cfg), + dict(type='PadMultiViewImage', size_divisor=32), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict(type='CustomCollect3D', keys=['gt_bboxes_3d', 'gt_labels_3d', 'img']) +] + + +test_pipeline = [ + dict(type='LoadMultiViewImageFromFiles', to_float32=True), + dict(type='NormalizeMultiviewImage', **img_norm_cfg), + dict(type='PadMultiViewImage', size_divisor=32), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1920, 1280), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='DefaultFormatBundle3D', + class_names=class_names, + with_label=False), + dict(type='CustomCollect3D', keys=['img']) + ]) +] + + +# construct a pipeline for data and gt loading in show function +# please keep its loading function consistent with test_pipeline (e.g. client) + +data = dict( + samples_per_gpu=2, + workers_per_gpu=4, + train=dict( + type='RepeatDataset', + times=2, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'waymo_infos_train.pkl', + split='training', + pipeline=train_pipeline, + modality=input_modality, + classes=class_names, + test_mode=False, + # we use box_type_3d='LiDAR' in kitti and nuscenes dataset + # and box_type_3d='Depth' in sunrgbd and scannet dataset. + box_type_3d='LiDAR', + # load one frame every five frames + load_interval=5)), + val=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'waymo_infos_val.pkl', + split='training', + pipeline=test_pipeline, + modality=input_modality, + classes=class_names, + test_mode=True, + box_type_3d='LiDAR'), + test=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'waymo_infos_val.pkl', + split='training', + pipeline=test_pipeline, + modality=input_modality, + classes=class_names, + test_mode=True, + box_type_3d='LiDAR')) + +evaluation = dict(interval=24, pipeline=test_pipeline) \ No newline at end of file diff --git a/adzoo/vad/create_data.py b/adzoo/vad/create_data.py new file mode 100644 index 0000000..f2b0cc1 --- /dev/null +++ b/adzoo/vad/create_data.py @@ -0,0 +1,305 @@ +# --------------------------------------------- +# Copyright (c) OpenMMLab. All rights reserved. +# --------------------------------------------- +# Modified by Zhiqi Li +# --------------------------------------------- +from data_converter.create_gt_database import create_groundtruth_database +from data_converter import nuscenes_converter as nuscenes_converter +from data_converter import lyft_converter as lyft_converter +from data_converter import kitti_converter as kitti +from data_converter import indoor_converter as indoor +import argparse +from os import path as osp +import sys +sys.path.append('.') + + +def kitti_data_prep(root_path, info_prefix, version, out_dir): + """Prepare data related to Kitti dataset. + + Related data consists of '.pkl' files recording basic infos, + 2D annotations and groundtruth database. + + Args: + root_path (str): Path of dataset root. + info_prefix (str): The prefix of info filenames. + version (str): Dataset version. + out_dir (str): Output directory of the groundtruth database info. + """ + kitti.create_kitti_info_file(root_path, info_prefix) + kitti.create_reduced_point_cloud(root_path, info_prefix) + + info_train_path = osp.join(root_path, f'{info_prefix}_infos_train.pkl') + info_val_path = osp.join(root_path, f'{info_prefix}_infos_val.pkl') + info_trainval_path = osp.join(root_path, + f'{info_prefix}_infos_trainval.pkl') + info_test_path = osp.join(root_path, f'{info_prefix}_infos_test.pkl') + kitti.export_2d_annotation(root_path, info_train_path) + kitti.export_2d_annotation(root_path, info_val_path) + kitti.export_2d_annotation(root_path, info_trainval_path) + kitti.export_2d_annotation(root_path, info_test_path) + + create_groundtruth_database( + 'KittiDataset', + root_path, + info_prefix, + f'{out_dir}/{info_prefix}_infos_train.pkl', + relative_path=False, + mask_anno_path='instances_train.json', + with_mask=(version == 'mask')) + + +def nuscenes_data_prep(root_path, + can_bus_root_path, + info_prefix, + version, + dataset_name, + out_dir, + max_sweeps=10): + """Prepare data related to nuScenes dataset. + + Related data consists of '.pkl' files recording basic infos, + 2D annotations and groundtruth database. + + Args: + root_path (str): Path of dataset root. + info_prefix (str): The prefix of info filenames. + version (str): Dataset version. + dataset_name (str): The dataset class name. + out_dir (str): Output directory of the groundtruth database info. + max_sweeps (int): Number of input consecutive frames. Default: 10 + """ + nuscenes_converter.create_nuscenes_infos( + root_path, out_dir, can_bus_root_path, info_prefix, version=version, max_sweeps=max_sweeps) + + if version == 'v1.0-test': + info_test_path = osp.join( + out_dir, f'{info_prefix}_infos_temporal_test.pkl') + nuscenes_converter.export_2d_annotation( + root_path, info_test_path, version=version) + else: + info_train_path = osp.join( + out_dir, f'{info_prefix}_infos_temporal_train.pkl') + info_val_path = osp.join( + out_dir, f'{info_prefix}_infos_temporal_val.pkl') + nuscenes_converter.export_2d_annotation( + root_path, info_train_path, version=version) + nuscenes_converter.export_2d_annotation( + root_path, info_val_path, version=version) + # create_groundtruth_database(dataset_name, root_path, info_prefix, + # f'{out_dir}/{info_prefix}_infos_train.pkl') + + +def lyft_data_prep(root_path, info_prefix, version, max_sweeps=10): + """Prepare data related to Lyft dataset. + + Related data consists of '.pkl' files recording basic infos. + Although the ground truth database and 2D annotations are not used in + Lyft, it can also be generated like nuScenes. + + Args: + root_path (str): Path of dataset root. + info_prefix (str): The prefix of info filenames. + version (str): Dataset version. + max_sweeps (int, optional): Number of input consecutive frames. + Defaults to 10. + """ + lyft_converter.create_lyft_infos( + root_path, info_prefix, version=version, max_sweeps=max_sweeps) + + +def scannet_data_prep(root_path, info_prefix, out_dir, workers): + """Prepare the info file for scannet dataset. + + Args: + root_path (str): Path of dataset root. + info_prefix (str): The prefix of info filenames. + out_dir (str): Output directory of the generated info file. + workers (int): Number of threads to be used. + """ + indoor.create_indoor_info_file( + root_path, info_prefix, out_dir, workers=workers) + + +def s3dis_data_prep(root_path, info_prefix, out_dir, workers): + """Prepare the info file for s3dis dataset. + + Args: + root_path (str): Path of dataset root. + info_prefix (str): The prefix of info filenames. + out_dir (str): Output directory of the generated info file. + workers (int): Number of threads to be used. + """ + indoor.create_indoor_info_file( + root_path, info_prefix, out_dir, workers=workers) + + +def sunrgbd_data_prep(root_path, info_prefix, out_dir, workers): + """Prepare the info file for sunrgbd dataset. + + Args: + root_path (str): Path of dataset root. + info_prefix (str): The prefix of info filenames. + out_dir (str): Output directory of the generated info file. + workers (int): Number of threads to be used. + """ + indoor.create_indoor_info_file( + root_path, info_prefix, out_dir, workers=workers) + + +def waymo_data_prep(root_path, + info_prefix, + version, + out_dir, + workers, + max_sweeps=5): + """Prepare the info file for waymo dataset. + + Args: + root_path (str): Path of dataset root. + info_prefix (str): The prefix of info filenames. + out_dir (str): Output directory of the generated info file. + workers (int): Number of threads to be used. + max_sweeps (int): Number of input consecutive frames. Default: 5 \ + Here we store pose information of these frames for later use. + """ + from tools.data_converter import waymo_converter as waymo + + splits = ['training', 'validation', 'testing'] + + for i, split in enumerate(splits): + load_dir = osp.join(root_path, 'waymo_format', split) + if split == 'validation': + save_dir = osp.join(out_dir, 'kitti_format', 'training') + else: + save_dir = osp.join(out_dir, 'kitti_format', split) + converter = waymo.Waymo2KITTI( + load_dir, + save_dir, + prefix=str(i), + workers=workers, + test_mode=(split == 'test')) + converter.convert() + # Generate waymo infos + out_dir = osp.join(out_dir, 'kitti_format') + kitti.create_waymo_info_file(out_dir, info_prefix, max_sweeps=max_sweeps) + + create_groundtruth_database( + 'WaymoDataset', + out_dir, + info_prefix, + f'{out_dir}/{info_prefix}_infos_train.pkl', + relative_path=False, + with_mask=False) + + +parser = argparse.ArgumentParser(description='Data converter arg parser') +parser.add_argument('dataset', metavar='kitti', help='name of the dataset') +parser.add_argument( + '--root-path', + type=str, + default='./data/kitti', + help='specify the root path of dataset') +parser.add_argument( + '--canbus', + type=str, + default='./data', + help='specify the root path of nuScenes canbus') +parser.add_argument( + '--version', + type=str, + default='v1.0', + required=False, + help='specify the dataset version, no need for kitti') +parser.add_argument( + '--max-sweeps', + type=int, + default=10, + required=False, + help='specify sweeps of lidar per example') +parser.add_argument( + '--out-dir', + type=str, + default='./data/kitti', + required='False', + help='name of info pkl') +parser.add_argument('--extra-tag', type=str, default='kitti') +parser.add_argument( + '--workers', type=int, default=4, help='number of threads to be used') +args = parser.parse_args() + +if __name__ == '__main__': + if args.dataset == 'kitti': + kitti_data_prep( + root_path=args.root_path, + info_prefix=args.extra_tag, + version=args.version, + out_dir=args.out_dir) + elif args.dataset == 'nuscenes' and args.version != 'v1.0-mini': + train_version = f'{args.version}-trainval' + nuscenes_data_prep( + root_path=args.root_path, + can_bus_root_path=args.canbus, + info_prefix=args.extra_tag, + version=train_version, + dataset_name='NuScenesDataset', + out_dir=args.out_dir, + max_sweeps=args.max_sweeps) + test_version = f'{args.version}-test' + nuscenes_data_prep( + root_path=args.root_path, + can_bus_root_path=args.canbus, + info_prefix=args.extra_tag, + version=test_version, + dataset_name='NuScenesDataset', + out_dir=args.out_dir, + max_sweeps=args.max_sweeps) + elif args.dataset == 'nuscenes' and args.version == 'v1.0-mini': + train_version = f'{args.version}' + nuscenes_data_prep( + root_path=args.root_path, + can_bus_root_path=args.canbus, + info_prefix=args.extra_tag, + version=train_version, + dataset_name='NuScenesDataset', + out_dir=args.out_dir, + max_sweeps=args.max_sweeps) + elif args.dataset == 'lyft': + train_version = f'{args.version}-train' + lyft_data_prep( + root_path=args.root_path, + info_prefix=args.extra_tag, + version=train_version, + max_sweeps=args.max_sweeps) + test_version = f'{args.version}-test' + lyft_data_prep( + root_path=args.root_path, + info_prefix=args.extra_tag, + version=test_version, + max_sweeps=args.max_sweeps) + elif args.dataset == 'waymo': + waymo_data_prep( + root_path=args.root_path, + info_prefix=args.extra_tag, + version=args.version, + out_dir=args.out_dir, + workers=args.workers, + max_sweeps=args.max_sweeps) + elif args.dataset == 'scannet': + scannet_data_prep( + root_path=args.root_path, + info_prefix=args.extra_tag, + out_dir=args.out_dir, + workers=args.workers) + elif args.dataset == 's3dis': + s3dis_data_prep( + root_path=args.root_path, + info_prefix=args.extra_tag, + out_dir=args.out_dir, + workers=args.workers) + elif args.dataset == 'sunrgbd': + sunrgbd_data_prep( + root_path=args.root_path, + info_prefix=args.extra_tag, + out_dir=args.out_dir, + workers=args.workers) diff --git a/adzoo/vad/data_converter/__init__.py b/adzoo/vad/data_converter/__init__.py new file mode 100644 index 0000000..ef101fe --- /dev/null +++ b/adzoo/vad/data_converter/__init__.py @@ -0,0 +1 @@ +# Copyright (c) OpenMMLab. All rights reserved. diff --git a/adzoo/vad/data_converter/create_gt_database.py b/adzoo/vad/data_converter/create_gt_database.py new file mode 100644 index 0000000..7317ced --- /dev/null +++ b/adzoo/vad/data_converter/create_gt_database.py @@ -0,0 +1,338 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmcv +import numpy as np +import pickle +from mmcv import track_iter_progress +from mmcv.ops import roi_align +from os import path as osp +from pycocotools import mask as maskUtils +from pycocotools.coco import COCO + +from mmdet3d.core.bbox import box_np_ops as box_np_ops +from mmdet3d.datasets import build_dataset +from mmdet.core.evaluation.bbox_overlaps import bbox_overlaps + + +def _poly2mask(mask_ann, img_h, img_w): + if isinstance(mask_ann, list): + # polygon -- a single object might consist of multiple parts + # we merge all parts into one mask rle code + rles = maskUtils.frPyObjects(mask_ann, img_h, img_w) + rle = maskUtils.merge(rles) + elif isinstance(mask_ann['counts'], list): + # uncompressed RLE + rle = maskUtils.frPyObjects(mask_ann, img_h, img_w) + else: + # rle + rle = mask_ann + mask = maskUtils.decode(rle) + return mask + + +def _parse_coco_ann_info(ann_info): + gt_bboxes = [] + gt_labels = [] + gt_bboxes_ignore = [] + gt_masks_ann = [] + + for i, ann in enumerate(ann_info): + if ann.get('ignore', False): + continue + x1, y1, w, h = ann['bbox'] + if ann['area'] <= 0: + continue + bbox = [x1, y1, x1 + w, y1 + h] + if ann.get('iscrowd', False): + gt_bboxes_ignore.append(bbox) + else: + gt_bboxes.append(bbox) + gt_masks_ann.append(ann['segmentation']) + + if gt_bboxes: + gt_bboxes = np.array(gt_bboxes, dtype=np.float32) + gt_labels = np.array(gt_labels, dtype=np.int64) + else: + gt_bboxes = np.zeros((0, 4), dtype=np.float32) + gt_labels = np.array([], dtype=np.int64) + + if gt_bboxes_ignore: + gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32) + else: + gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32) + + ann = dict( + bboxes=gt_bboxes, bboxes_ignore=gt_bboxes_ignore, masks=gt_masks_ann) + + return ann + + +def crop_image_patch_v2(pos_proposals, pos_assigned_gt_inds, gt_masks): + import torch + from torch.nn.modules.utils import _pair + device = pos_proposals.device + num_pos = pos_proposals.size(0) + fake_inds = ( + torch.arange(num_pos, + device=device).to(dtype=pos_proposals.dtype)[:, None]) + rois = torch.cat([fake_inds, pos_proposals], dim=1) # Nx5 + mask_size = _pair(28) + rois = rois.to(device=device) + gt_masks_th = ( + torch.from_numpy(gt_masks).to(device).index_select( + 0, pos_assigned_gt_inds).to(dtype=rois.dtype)) + # Use RoIAlign could apparently accelerate the training (~0.1s/iter) + targets = ( + roi_align(gt_masks_th, rois, mask_size[::-1], 1.0, 0, True).squeeze(1)) + return targets + + +def crop_image_patch(pos_proposals, gt_masks, pos_assigned_gt_inds, org_img): + num_pos = pos_proposals.shape[0] + masks = [] + img_patches = [] + for i in range(num_pos): + gt_mask = gt_masks[pos_assigned_gt_inds[i]] + bbox = pos_proposals[i, :].astype(np.int32) + x1, y1, x2, y2 = bbox + w = np.maximum(x2 - x1 + 1, 1) + h = np.maximum(y2 - y1 + 1, 1) + + mask_patch = gt_mask[y1:y1 + h, x1:x1 + w] + masked_img = gt_mask[..., None] * org_img + img_patch = masked_img[y1:y1 + h, x1:x1 + w] + + img_patches.append(img_patch) + masks.append(mask_patch) + return img_patches, masks + + +def create_groundtruth_database(dataset_class_name, + data_path, + info_prefix, + info_path=None, + mask_anno_path=None, + used_classes=None, + database_save_path=None, + db_info_save_path=None, + relative_path=True, + add_rgb=False, + lidar_only=False, + bev_only=False, + coors_range=None, + with_mask=False): + """Given the raw data, generate the ground truth database. + + Args: + dataset_class_name (str): Name of the input dataset. + data_path (str): Path of the data. + info_prefix (str): Prefix of the info file. + info_path (str): Path of the info file. + Default: None. + mask_anno_path (str): Path of the mask_anno. + Default: None. + used_classes (list[str]): Classes have been used. + Default: None. + database_save_path (str): Path to save database. + Default: None. + db_info_save_path (str): Path to save db_info. + Default: None. + relative_path (bool): Whether to use relative path. + Default: True. + with_mask (bool): Whether to use mask. + Default: False. + """ + print(f'Create GT Database of {dataset_class_name}') + dataset_cfg = dict( + type=dataset_class_name, data_root=data_path, ann_file=info_path) + if dataset_class_name == 'KittiDataset': + file_client_args = dict(backend='disk') + dataset_cfg.update( + test_mode=False, + split='training', + modality=dict( + use_lidar=True, + use_depth=False, + use_lidar_intensity=True, + use_camera=with_mask, + ), + pipeline=[ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=4, + use_dim=4, + file_client_args=file_client_args), + dict( + type='LoadAnnotations3D', + with_bbox_3d=True, + with_label_3d=True, + file_client_args=file_client_args) + ]) + + elif dataset_class_name == 'NuScenesDataset': + dataset_cfg.update( + use_valid_flag=True, + pipeline=[ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + use_dim=[0, 1, 2, 3, 4], + pad_empty_sweeps=True, + remove_close=True), + dict( + type='LoadAnnotations3D', + with_bbox_3d=True, + with_label_3d=True) + ]) + + elif dataset_class_name == 'WaymoDataset': + file_client_args = dict(backend='disk') + dataset_cfg.update( + test_mode=False, + split='training', + modality=dict( + use_lidar=True, + use_depth=False, + use_lidar_intensity=True, + use_camera=False, + ), + pipeline=[ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=6, + use_dim=5, + file_client_args=file_client_args), + dict( + type='LoadAnnotations3D', + with_bbox_3d=True, + with_label_3d=True, + file_client_args=file_client_args) + ]) + + dataset = build_dataset(dataset_cfg) + + if database_save_path is None: + database_save_path = osp.join(data_path, f'{info_prefix}_gt_database') + if db_info_save_path is None: + db_info_save_path = osp.join(data_path, + f'{info_prefix}_dbinfos_train.pkl') + mmcv.mkdir_or_exist(database_save_path) + all_db_infos = dict() + if with_mask: + coco = COCO(osp.join(data_path, mask_anno_path)) + imgIds = coco.getImgIds() + file2id = dict() + for i in imgIds: + info = coco.loadImgs([i])[0] + file2id.update({info['file_name']: i}) + + group_counter = 0 + for j in track_iter_progress(list(range(len(dataset)))): + input_dict = dataset.get_data_info(j) + dataset.pre_pipeline(input_dict) + example = dataset.pipeline(input_dict) + annos = example['ann_info'] + image_idx = example['sample_idx'] + points = example['points'].tensor.numpy() + gt_boxes_3d = annos['gt_bboxes_3d'].tensor.numpy() + names = annos['gt_names'] + group_dict = dict() + if 'group_ids' in annos: + group_ids = annos['group_ids'] + else: + group_ids = np.arange(gt_boxes_3d.shape[0], dtype=np.int64) + difficulty = np.zeros(gt_boxes_3d.shape[0], dtype=np.int32) + if 'difficulty' in annos: + difficulty = annos['difficulty'] + + num_obj = gt_boxes_3d.shape[0] + point_indices = box_np_ops.points_in_rbbox(points, gt_boxes_3d) + + if with_mask: + # prepare masks + gt_boxes = annos['gt_bboxes'] + img_path = osp.split(example['img_info']['filename'])[-1] + if img_path not in file2id.keys(): + print(f'skip image {img_path} for empty mask') + continue + img_id = file2id[img_path] + kins_annIds = coco.getAnnIds(imgIds=img_id) + kins_raw_info = coco.loadAnns(kins_annIds) + kins_ann_info = _parse_coco_ann_info(kins_raw_info) + h, w = annos['img_shape'][:2] + gt_masks = [ + _poly2mask(mask, h, w) for mask in kins_ann_info['masks'] + ] + # get mask inds based on iou mapping + bbox_iou = bbox_overlaps(kins_ann_info['bboxes'], gt_boxes) + mask_inds = bbox_iou.argmax(axis=0) + valid_inds = (bbox_iou.max(axis=0) > 0.5) + + # mask the image + # use more precise crop when it is ready + # object_img_patches = np.ascontiguousarray( + # np.stack(object_img_patches, axis=0).transpose(0, 3, 1, 2)) + # crop image patches using roi_align + # object_img_patches = crop_image_patch_v2( + # torch.Tensor(gt_boxes), + # torch.Tensor(mask_inds).long(), object_img_patches) + object_img_patches, object_masks = crop_image_patch( + gt_boxes, gt_masks, mask_inds, annos['img']) + + for i in range(num_obj): + filename = f'{image_idx}_{names[i]}_{i}.bin' + abs_filepath = osp.join(database_save_path, filename) + rel_filepath = osp.join(f'{info_prefix}_gt_database', filename) + + # save point clouds and image patches for each object + gt_points = points[point_indices[:, i]] + gt_points[:, :3] -= gt_boxes_3d[i, :3] + + if with_mask: + if object_masks[i].sum() == 0 or not valid_inds[i]: + # Skip object for empty or invalid mask + continue + img_patch_path = abs_filepath + '.png' + mask_patch_path = abs_filepath + '.mask.png' + mmcv.imwrite(object_img_patches[i], img_patch_path) + mmcv.imwrite(object_masks[i], mask_patch_path) + + with open(abs_filepath, 'w') as f: + gt_points.tofile(f) + + if (used_classes is None) or names[i] in used_classes: + db_info = { + 'name': names[i], + 'path': rel_filepath, + 'image_idx': image_idx, + 'gt_idx': i, + 'box3d_lidar': gt_boxes_3d[i], + 'num_points_in_gt': gt_points.shape[0], + 'difficulty': difficulty[i], + } + local_group_id = group_ids[i] + # if local_group_id >= 0: + if local_group_id not in group_dict: + group_dict[local_group_id] = group_counter + group_counter += 1 + db_info['group_id'] = group_dict[local_group_id] + if 'score' in annos: + db_info['score'] = annos['score'][i] + if with_mask: + db_info.update({'box2d_camera': gt_boxes[i]}) + if names[i] in all_db_infos: + all_db_infos[names[i]].append(db_info) + else: + all_db_infos[names[i]] = [db_info] + + for k, v in all_db_infos.items(): + print(f'load {len(v)} {k} database infos') + + with open(db_info_save_path, 'wb') as f: + pickle.dump(all_db_infos, f) diff --git a/adzoo/vad/data_converter/vad_nuscenes_converter.py b/adzoo/vad/data_converter/vad_nuscenes_converter.py new file mode 100644 index 0000000..338051c --- /dev/null +++ b/adzoo/vad/data_converter/vad_nuscenes_converter.py @@ -0,0 +1,1005 @@ +import os +import math +import copy +import argparse +from os import path as osp +from collections import OrderedDict +from typing import List, Tuple, Union + +import mmcv +import numpy as np +from pyquaternion import Quaternion +from nuscenes.nuscenes import NuScenes +from nuscenes.utils.data_classes import Box +from shapely.geometry import MultiPoint, box +from mmdet3d.datasets import NuScenesDataset +from nuscenes.utils.geometry_utils import view_points +from mmdet3d.core.bbox.box_np_ops import points_cam2img +from nuscenes.utils.geometry_utils import transform_matrix + + +nus_categories = ('car', 'truck', 'trailer', 'bus', 'construction_vehicle', + 'bicycle', 'motorcycle', 'pedestrian', 'traffic_cone', + 'barrier') + +nus_attributes = ('cycle.with_rider', 'cycle.without_rider', + 'pedestrian.moving', 'pedestrian.standing', + 'pedestrian.sitting_lying_down', 'vehicle.moving', + 'vehicle.parked', 'vehicle.stopped', 'None') + +ego_width, ego_length = 1.85, 4.084 + +def quart_to_rpy(qua): + x, y, z, w = qua + roll = math.atan2(2 * (w * x + y * z), 1 - 2 * (x * x + y * y)) + pitch = math.asin(2 * (w * y - x * z)) + yaw = math.atan2(2 * (w * z + x * y), 1 - 2 * (z * z + y * y)) + return roll, pitch, yaw + +def locate_message(utimes, utime): + i = np.searchsorted(utimes, utime) + if i == len(utimes) or (i > 0 and utime - utimes[i-1] < utimes[i] - utime): + i -= 1 + return i + + +def create_nuscenes_infos(root_path, + out_path, + can_bus_root_path, + info_prefix, + version='v1.0-trainval', + max_sweeps=10): + """Create info file of nuscene dataset. + + Given the raw data, generate its related info file in pkl format. + + Args: + root_path (str): Path of the data root. + info_prefix (str): Prefix of the info file to be generated. + version (str): Version of the data. + Default: 'v1.0-trainval' + max_sweeps (int): Max number of sweeps. + Default: 10 + """ + from nuscenes.nuscenes import NuScenes + from nuscenes.can_bus.can_bus_api import NuScenesCanBus + print(version, root_path) + nusc = NuScenes(version=version, dataroot=root_path, verbose=True) + nusc_can_bus = NuScenesCanBus(dataroot=can_bus_root_path) + from nuscenes.utils import splits + available_vers = ['v1.0-trainval', 'v1.0-test', 'v1.0-mini'] + assert version in available_vers + if version == 'v1.0-trainval': + train_scenes = splits.train + val_scenes = splits.val + elif version == 'v1.0-test': + train_scenes = splits.test + val_scenes = [] + elif version == 'v1.0-mini': + train_scenes = splits.mini_train + val_scenes = splits.mini_val + else: + raise ValueError('unknown') + + # filter existing scenes. + available_scenes = get_available_scenes(nusc) + available_scene_names = [s['name'] for s in available_scenes] + train_scenes = list( + filter(lambda x: x in available_scene_names, train_scenes)) + val_scenes = list(filter(lambda x: x in available_scene_names, val_scenes)) + train_scenes = set([ + available_scenes[available_scene_names.index(s)]['token'] + for s in train_scenes + ]) + val_scenes = set([ + available_scenes[available_scene_names.index(s)]['token'] + for s in val_scenes + ]) + + test = 'test' in version + if test: + print('test scene: {}'.format(len(train_scenes))) + else: + print('train scene: {}, val scene: {}'.format( + len(train_scenes), len(val_scenes))) + + train_nusc_infos, val_nusc_infos = _fill_trainval_infos( + nusc, nusc_can_bus, train_scenes, val_scenes, test, max_sweeps=max_sweeps) + + metadata = dict(version=version) + if test: + print('test sample: {}'.format(len(train_nusc_infos))) + data = dict(infos=train_nusc_infos, metadata=metadata) + info_path = osp.join(out_path, + '{}_infos_temporal_test.pkl'.format(info_prefix)) + mmcv.dump(data, info_path) + else: + print('train sample: {}, val sample: {}'.format( + len(train_nusc_infos), len(val_nusc_infos))) + data = dict(infos=train_nusc_infos, metadata=metadata) + info_path = osp.join(out_path, + '{}_infos_temporal_train.pkl'.format(info_prefix)) + mmcv.dump(data, info_path) + data['infos'] = val_nusc_infos + info_val_path = osp.join(out_path, + '{}_infos_temporal_val.pkl'.format(info_prefix)) + mmcv.dump(data, info_val_path) + + +def get_available_scenes(nusc): + """Get available scenes from the input nuscenes class. + + Given the raw data, get the information of available scenes for + further info generation. + + Args: + nusc (class): Dataset class in the nuScenes dataset. + + Returns: + available_scenes (list[dict]): List of basic information for the + available scenes. + """ + available_scenes = [] + print('total scene num: {}'.format(len(nusc.scene))) + for scene in nusc.scene: + scene_token = scene['token'] + scene_rec = nusc.get('scene', scene_token) + sample_rec = nusc.get('sample', scene_rec['first_sample_token']) + sd_rec = nusc.get('sample_data', sample_rec['data']['LIDAR_TOP']) + has_more_frames = True + scene_not_exist = False + while has_more_frames: + lidar_path, boxes, _ = nusc.get_sample_data(sd_rec['token']) + lidar_path = str(lidar_path) + if os.getcwd() in lidar_path: + # path from lyftdataset is absolute path + lidar_path = lidar_path.split(f'{os.getcwd()}/')[-1] + # relative path + if not mmcv.is_filepath(lidar_path): + scene_not_exist = True + break + else: + break + if scene_not_exist: + continue + available_scenes.append(scene) + print('exist scene num: {}'.format(len(available_scenes))) + return available_scenes + + +def _get_can_bus_info(nusc, nusc_can_bus, sample): + scene_name = nusc.get('scene', sample['scene_token'])['name'] + sample_timestamp = sample['timestamp'] + try: + pose_list = nusc_can_bus.get_messages(scene_name, 'pose') + except: + return np.zeros(18) # server scenes do not have can bus information. + can_bus = [] + # during each scene, the first timestamp of can_bus may be large than the first sample's timestamp + last_pose = pose_list[0] + for i, pose in enumerate(pose_list): + if pose['utime'] > sample_timestamp: + break + last_pose = pose + _ = last_pose.pop('utime') # useless + pos = last_pose.pop('pos') + rotation = last_pose.pop('orientation') + can_bus.extend(pos) + can_bus.extend(rotation) + for key in last_pose.keys(): + can_bus.extend(pose[key]) # 16 elements + can_bus.extend([0., 0.]) + return np.array(can_bus) + + +def _fill_trainval_infos(nusc, + nusc_can_bus, + train_scenes, + val_scenes, + test=False, + max_sweeps=10, + fut_ts=6, + his_ts=2): + """Generate the train/val infos from the raw data. + + Args: + nusc (:obj:`NuScenes`): Dataset class in the nuScenes dataset. + train_scenes (list[str]): Basic information of training scenes. + val_scenes (list[str]): Basic information of validation scenes. + test (bool): Whether use the test mode. In the test mode, no + annotations can be accessed. Default: False. + max_sweeps (int): Max number of sweeps. Default: 10. + + Returns: + tuple[list[dict]]: Information of training set and validation set + that will be saved to the info file. + """ + train_nusc_infos = [] + val_nusc_infos = [] + frame_idx = 0 + cat2idx = {} + for idx, dic in enumerate(nusc.category): + cat2idx[dic['name']] = idx + + for sample in mmcv.track_iter_progress(nusc.sample): + map_location = nusc.get('log', nusc.get('scene', sample['scene_token'])['log_token'])['location'] + lidar_token = sample['data']['LIDAR_TOP'] + sd_rec = nusc.get('sample_data', sample['data']['LIDAR_TOP']) + cs_record = nusc.get('calibrated_sensor', + sd_rec['calibrated_sensor_token']) + pose_record = nusc.get('ego_pose', sd_rec['ego_pose_token']) + if sample['prev'] != '': + sample_prev = nusc.get('sample', sample['prev']) + sd_rec_prev = nusc.get('sample_data', sample_prev['data']['LIDAR_TOP']) + pose_record_prev = nusc.get('ego_pose', sd_rec_prev['ego_pose_token']) + else: + pose_record_prev = None + if sample['next'] != '': + sample_next = nusc.get('sample', sample['next']) + sd_rec_next = nusc.get('sample_data', sample_next['data']['LIDAR_TOP']) + pose_record_next = nusc.get('ego_pose', sd_rec_next['ego_pose_token']) + else: + pose_record_next = None + + lidar_path, boxes, _ = nusc.get_sample_data(lidar_token) + + mmcv.check_file_exist(lidar_path) + can_bus = _get_can_bus_info(nusc, nusc_can_bus, sample) + fut_valid_flag = True + test_sample = copy.deepcopy(sample) + for i in range(fut_ts): + if test_sample['next'] != '': + test_sample = nusc.get('sample', test_sample['next']) + else: + fut_valid_flag = False + ## + info = { + 'lidar_path': lidar_path, + 'token': sample['token'], + 'prev': sample['prev'], + 'next': sample['next'], + 'can_bus': can_bus, + 'frame_idx': frame_idx, # temporal related info + 'sweeps': [], + 'cams': dict(), + 'scene_token': sample['scene_token'], # temporal related info + 'lidar2ego_translation': cs_record['translation'], + 'lidar2ego_rotation': cs_record['rotation'], + 'ego2global_translation': pose_record['translation'], + 'ego2global_rotation': pose_record['rotation'], + 'timestamp': sample['timestamp'], + 'fut_valid_flag': fut_valid_flag, + 'map_location': map_location + } + + if sample['next'] == '': + frame_idx = 0 + else: + frame_idx += 1 + + l2e_r = info['lidar2ego_rotation'] + l2e_t = info['lidar2ego_translation'] + e2g_r = info['ego2global_rotation'] + e2g_t = info['ego2global_translation'] + l2e_r_mat = Quaternion(l2e_r).rotation_matrix + e2g_r_mat = Quaternion(e2g_r).rotation_matrix + + # obtain 6 image's information per frame + camera_types = [ + 'CAM_FRONT', + 'CAM_FRONT_RIGHT', + 'CAM_FRONT_LEFT', + 'CAM_BACK', + 'CAM_BACK_LEFT', + 'CAM_BACK_RIGHT', + ] + for cam in camera_types: + cam_token = sample['data'][cam] + cam_path, _, cam_intrinsic = nusc.get_sample_data(cam_token) + cam_info = obtain_sensor2top(nusc, cam_token, l2e_t, l2e_r_mat, + e2g_t, e2g_r_mat, cam) + cam_info.update(cam_intrinsic=cam_intrinsic) + info['cams'].update({cam: cam_info}) + + # obtain sweeps for a single key-frame + sd_rec = nusc.get('sample_data', sample['data']['LIDAR_TOP']) + sweeps = [] + while len(sweeps) < max_sweeps: + if not sd_rec['prev'] == '': + sweep = obtain_sensor2top(nusc, sd_rec['prev'], l2e_t, + l2e_r_mat, e2g_t, e2g_r_mat, 'lidar') + sweeps.append(sweep) + sd_rec = nusc.get('sample_data', sd_rec['prev']) + else: + break + info['sweeps'] = sweeps + # obtain annotation + if not test: + annotations = [ + nusc.get('sample_annotation', token) + for token in sample['anns'] + ] + locs = np.array([b.center for b in boxes]).reshape(-1, 3) + dims = np.array([b.wlh for b in boxes]).reshape(-1, 3) + rots = np.array([b.orientation.yaw_pitch_roll[0] + for b in boxes]).reshape(-1, 1) + velocity = np.array( + [nusc.box_velocity(token)[:2] for token in sample['anns']]) + valid_flag = np.array( + [(anno['num_lidar_pts'] + anno['num_radar_pts']) > 0 + for anno in annotations], + dtype=bool).reshape(-1) + # convert velo from global to lidar + for i in range(len(boxes)): + velo = np.array([*velocity[i], 0.0]) + velo = velo @ np.linalg.inv(e2g_r_mat).T @ np.linalg.inv( + l2e_r_mat).T + velocity[i] = velo[:2] + + names = [b.name for b in boxes] + for i in range(len(names)): + if names[i] in NuScenesDataset.NameMapping: + names[i] = NuScenesDataset.NameMapping[names[i]] + names = np.array(names) + # we need to convert rot to SECOND format. + gt_boxes = np.concatenate([locs, dims, -rots - np.pi / 2], axis=1) + assert len(gt_boxes) == len( + annotations), f'{len(gt_boxes)}, {len(annotations)}' + + # get future coords for each box + # [num_box, fut_ts*2] + num_box = len(boxes) + gt_fut_trajs = np.zeros((num_box, fut_ts, 2)) + gt_fut_yaw = np.zeros((num_box, fut_ts)) + gt_fut_masks = np.zeros((num_box, fut_ts)) + gt_boxes_yaw = -(gt_boxes[:,6] + np.pi / 2) + # agent lcf feat (x, y, yaw, vx, vy, width, length, height, type) + agent_lcf_feat = np.zeros((num_box, 9)) + gt_fut_goal = np.zeros((num_box)) + for i, anno in enumerate(annotations): + cur_box = boxes[i] + cur_anno = anno + agent_lcf_feat[i, 0:2] = cur_box.center[:2] + agent_lcf_feat[i, 2] = gt_boxes_yaw[i] + agent_lcf_feat[i, 3:5] = velocity[i] + agent_lcf_feat[i, 5:8] = anno['size'] # width,length,height + agent_lcf_feat[i, 8] = cat2idx[anno['category_name']] if anno['category_name'] in cat2idx.keys() else -1 + for j in range(fut_ts): + if cur_anno['next'] != '': + anno_next = nusc.get('sample_annotation', cur_anno['next']) + box_next = Box( + anno_next['translation'], anno_next['size'], Quaternion(anno_next['rotation']) + ) + # Move box to ego vehicle coord system. + box_next.translate(-np.array(pose_record['translation'])) + box_next.rotate(Quaternion(pose_record['rotation']).inverse) + # Move box to sensor coord system. + box_next.translate(-np.array(cs_record['translation'])) + box_next.rotate(Quaternion(cs_record['rotation']).inverse) + gt_fut_trajs[i, j] = box_next.center[:2] - cur_box.center[:2] + gt_fut_masks[i, j] = 1 + # add yaw diff + _, _, box_yaw = quart_to_rpy([cur_box.orientation.x, cur_box.orientation.y, + cur_box.orientation.z, cur_box.orientation.w]) + _, _, box_yaw_next = quart_to_rpy([box_next.orientation.x, box_next.orientation.y, + box_next.orientation.z, box_next.orientation.w]) + gt_fut_yaw[i, j] = box_yaw_next - box_yaw + cur_anno = anno_next + cur_box = box_next + else: + gt_fut_trajs[i, j:] = 0 + break + # get agent goal + gt_fut_coords = np.cumsum(gt_fut_trajs[i], axis=-2) + coord_diff = gt_fut_coords[-1] - gt_fut_coords[0] + if coord_diff.max() < 1.0: # static + gt_fut_goal[i] = 9 + else: + box_mot_yaw = np.arctan2(coord_diff[1], coord_diff[0]) + np.pi + gt_fut_goal[i] = box_mot_yaw // (np.pi / 4) # 0-8: goal direction class + + # get ego history traj (offset format) + ego_his_trajs = np.zeros((his_ts+1, 3)) + ego_his_trajs_diff = np.zeros((his_ts+1, 3)) + sample_cur = sample + for i in range(his_ts, -1, -1): + if sample_cur is not None: + pose_mat = get_global_sensor_pose(sample_cur, nusc, inverse=False) + ego_his_trajs[i] = pose_mat[:3, 3] + has_prev = sample_cur['prev'] != '' + has_next = sample_cur['next'] != '' + if has_next: + sample_next = nusc.get('sample', sample_cur['next']) + pose_mat_next = get_global_sensor_pose(sample_next, nusc, inverse=False) + ego_his_trajs_diff[i] = pose_mat_next[:3, 3] - ego_his_trajs[i] + sample_cur = nusc.get('sample', sample_cur['prev']) if has_prev else None + else: + ego_his_trajs[i] = ego_his_trajs[i+1] - ego_his_trajs_diff[i+1] + ego_his_trajs_diff[i] = ego_his_trajs_diff[i+1] + + # global to ego at lcf + ego_his_trajs = ego_his_trajs - np.array(pose_record['translation']) + rot_mat = Quaternion(pose_record['rotation']).inverse.rotation_matrix + ego_his_trajs = np.dot(rot_mat, ego_his_trajs.T).T + # ego to lidar at lcf + ego_his_trajs = ego_his_trajs - np.array(cs_record['translation']) + rot_mat = Quaternion(cs_record['rotation']).inverse.rotation_matrix + ego_his_trajs = np.dot(rot_mat, ego_his_trajs.T).T + ego_his_trajs = ego_his_trajs[1:] - ego_his_trajs[:-1] + + # get ego futute traj (offset format) + ego_fut_trajs = np.zeros((fut_ts+1, 3)) + ego_fut_masks = np.zeros((fut_ts+1)) + sample_cur = sample + for i in range(fut_ts+1): + pose_mat = get_global_sensor_pose(sample_cur, nusc, inverse=False) + ego_fut_trajs[i] = pose_mat[:3, 3] + ego_fut_masks[i] = 1 + if sample_cur['next'] == '': + ego_fut_trajs[i+1:] = ego_fut_trajs[i] + break + else: + sample_cur = nusc.get('sample', sample_cur['next']) + # global to ego at lcf + ego_fut_trajs = ego_fut_trajs - np.array(pose_record['translation']) + rot_mat = Quaternion(pose_record['rotation']).inverse.rotation_matrix + ego_fut_trajs = np.dot(rot_mat, ego_fut_trajs.T).T + # ego to lidar at lcf + ego_fut_trajs = ego_fut_trajs - np.array(cs_record['translation']) + rot_mat = Quaternion(cs_record['rotation']).inverse.rotation_matrix + ego_fut_trajs = np.dot(rot_mat, ego_fut_trajs.T).T + # drive command according to final fut step offset from lcf + if ego_fut_trajs[-1][0] >= 2: + command = np.array([1, 0, 0]) # Turn Right + elif ego_fut_trajs[-1][0] <= -2: + command = np.array([0, 1, 0]) # Turn Left + else: + command = np.array([0, 0, 1]) # Go Straight + # offset from lcf -> per-step offset + ego_fut_trajs = ego_fut_trajs[1:] - ego_fut_trajs[:-1] + + ### ego lcf feat (vx, vy, ax, ay, w, length, width, vel, steer), w: yaw角速度 + ego_lcf_feat = np.zeros(9) + # 根据odom推算自车速度及加速度 + _, _, ego_yaw = quart_to_rpy(pose_record['rotation']) + ego_pos = np.array(pose_record['translation']) + if pose_record_prev is not None: + _, _, ego_yaw_prev = quart_to_rpy(pose_record_prev['rotation']) + ego_pos_prev = np.array(pose_record_prev['translation']) + if pose_record_next is not None: + _, _, ego_yaw_next = quart_to_rpy(pose_record_next['rotation']) + ego_pos_next = np.array(pose_record_next['translation']) + assert (pose_record_prev is not None) or (pose_record_next is not None), 'prev token and next token all empty' + if pose_record_prev is not None: + ego_w = (ego_yaw - ego_yaw_prev) / 0.5 + ego_v = np.linalg.norm(ego_pos[:2] - ego_pos_prev[:2]) / 0.5 + ego_vx, ego_vy = ego_v * math.cos(ego_yaw + np.pi/2), ego_v * math.sin(ego_yaw + np.pi/2) + else: + ego_w = (ego_yaw_next - ego_yaw) / 0.5 + ego_v = np.linalg.norm(ego_pos_next[:2] - ego_pos[:2]) / 0.5 + ego_vx, ego_vy = ego_v * math.cos(ego_yaw + np.pi/2), ego_v * math.sin(ego_yaw + np.pi/2) + + ref_scene = nusc.get("scene", sample['scene_token']) + try: + pose_msgs = nusc_can_bus.get_messages(ref_scene['name'],'pose') + steer_msgs = nusc_can_bus.get_messages(ref_scene['name'], 'steeranglefeedback') + pose_uts = [msg['utime'] for msg in pose_msgs] + steer_uts = [msg['utime'] for msg in steer_msgs] + ref_utime = sample['timestamp'] + pose_index = locate_message(pose_uts, ref_utime) + pose_data = pose_msgs[pose_index] + steer_index = locate_message(steer_uts, ref_utime) + steer_data = steer_msgs[steer_index] + # initial speed + v0 = pose_data["vel"][0] # [0] means longitudinal velocity m/s + # curvature (positive: turn left) + steering = steer_data["value"] + # flip x axis if in left-hand traffic (singapore) + flip_flag = True if map_location.startswith('singapore') else False + if flip_flag: + steering *= -1 + Kappa = 2 * steering / 2.588 + except: + delta_x = ego_his_trajs[-1, 0] + ego_fut_trajs[0, 0] + delta_y = ego_his_trajs[-1, 1] + ego_fut_trajs[0, 1] + v0 = np.sqrt(delta_x**2 + delta_y**2) + Kappa = 0 + + ego_lcf_feat[:2] = np.array([ego_vx, ego_vy]) #can_bus[13:15] + ego_lcf_feat[2:4] = can_bus[7:9] + ego_lcf_feat[4] = ego_w #can_bus[12] + ego_lcf_feat[5:7] = np.array([ego_length, ego_width]) + ego_lcf_feat[7] = v0 + ego_lcf_feat[8] = Kappa + + info['gt_boxes'] = gt_boxes + info['gt_names'] = names + info['gt_velocity'] = velocity.reshape(-1, 2) + info['num_lidar_pts'] = np.array( + [a['num_lidar_pts'] for a in annotations]) + info['num_radar_pts'] = np.array( + [a['num_radar_pts'] for a in annotations]) + info['valid_flag'] = valid_flag + info['gt_agent_fut_trajs'] = gt_fut_trajs.reshape(-1, fut_ts*2).astype(np.float32) + info['gt_agent_fut_masks'] = gt_fut_masks.reshape(-1, fut_ts).astype(np.float32) + info['gt_agent_lcf_feat'] = agent_lcf_feat.astype(np.float32) + info['gt_agent_fut_yaw'] = gt_fut_yaw.astype(np.float32) + info['gt_agent_fut_goal'] = gt_fut_goal.astype(np.float32) + info['gt_ego_his_trajs'] = ego_his_trajs[:, :2].astype(np.float32) + info['gt_ego_fut_trajs'] = ego_fut_trajs[:, :2].astype(np.float32) + info['gt_ego_fut_masks'] = ego_fut_masks[1:].astype(np.float32) + info['gt_ego_fut_cmd'] = command.astype(np.float32) + info['gt_ego_lcf_feat'] = ego_lcf_feat.astype(np.float32) + + if sample['scene_token'] in train_scenes: + train_nusc_infos.append(info) + else: + val_nusc_infos.append(info) + + return train_nusc_infos, val_nusc_infos + +def get_global_sensor_pose(rec, nusc, inverse=False): + lidar_sample_data = nusc.get('sample_data', rec['data']['LIDAR_TOP']) + + sd_ep = nusc.get("ego_pose", lidar_sample_data["ego_pose_token"]) + sd_cs = nusc.get("calibrated_sensor", lidar_sample_data["calibrated_sensor_token"]) + if inverse is False: + global_from_ego = transform_matrix(sd_ep["translation"], Quaternion(sd_ep["rotation"]), inverse=False) + ego_from_sensor = transform_matrix(sd_cs["translation"], Quaternion(sd_cs["rotation"]), inverse=False) + pose = global_from_ego.dot(ego_from_sensor) + # translation equivalent writing + # pose_translation = np.array(sd_cs["translation"]) + # rot_mat = Quaternion(sd_ep['rotation']).rotation_matrix + # pose_translation = np.dot(rot_mat, pose_translation) + # # pose_translation = pose[:3, 3] + # pose_translation = pose_translation + np.array(sd_ep["translation"]) + else: + sensor_from_ego = transform_matrix(sd_cs["translation"], Quaternion(sd_cs["rotation"]), inverse=True) + ego_from_global = transform_matrix(sd_ep["translation"], Quaternion(sd_ep["rotation"]), inverse=True) + pose = sensor_from_ego.dot(ego_from_global) + return pose + +def obtain_sensor2top(nusc, + sensor_token, + l2e_t, + l2e_r_mat, + e2g_t, + e2g_r_mat, + sensor_type='lidar'): + """Obtain the info with RT matric from general sensor to Top LiDAR. + + Args: + nusc (class): Dataset class in the nuScenes dataset. + sensor_token (str): Sample data token corresponding to the + specific sensor type. + l2e_t (np.ndarray): Translation from lidar to ego in shape (1, 3). + l2e_r_mat (np.ndarray): Rotation matrix from lidar to ego + in shape (3, 3). + e2g_t (np.ndarray): Translation from ego to global in shape (1, 3). + e2g_r_mat (np.ndarray): Rotation matrix from ego to global + in shape (3, 3). + sensor_type (str): Sensor to calibrate. Default: 'lidar'. + + Returns: + sweep (dict): Sweep information after transformation. + """ + sd_rec = nusc.get('sample_data', sensor_token) + cs_record = nusc.get('calibrated_sensor', + sd_rec['calibrated_sensor_token']) + pose_record = nusc.get('ego_pose', sd_rec['ego_pose_token']) + data_path = str(nusc.get_sample_data_path(sd_rec['token'])) + if os.getcwd() in data_path: # path from lyftdataset is absolute path + data_path = data_path.split(f'{os.getcwd()}/')[-1] # relative path + sweep = { + 'data_path': data_path, + 'type': sensor_type, + 'sample_data_token': sd_rec['token'], + 'sensor2ego_translation': cs_record['translation'], + 'sensor2ego_rotation': cs_record['rotation'], + 'ego2global_translation': pose_record['translation'], + 'ego2global_rotation': pose_record['rotation'], + 'timestamp': sd_rec['timestamp'] + } + + l2e_r_s = sweep['sensor2ego_rotation'] + l2e_t_s = sweep['sensor2ego_translation'] + e2g_r_s = sweep['ego2global_rotation'] + e2g_t_s = sweep['ego2global_translation'] + + # obtain the RT from sensor to Top LiDAR + # sweep->ego->global->ego'->lidar + l2e_r_s_mat = Quaternion(l2e_r_s).rotation_matrix + e2g_r_s_mat = Quaternion(e2g_r_s).rotation_matrix + R = (l2e_r_s_mat.T @ e2g_r_s_mat.T) @ ( + np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T) + T = (l2e_t_s @ e2g_r_s_mat.T + e2g_t_s) @ ( + np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T) + T -= e2g_t @ (np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T + ) + l2e_t @ np.linalg.inv(l2e_r_mat).T + sweep['sensor2lidar_rotation'] = R.T # points @ R.T + T + sweep['sensor2lidar_translation'] = T + return sweep + + +def export_2d_annotation(root_path, info_path, version, mono3d=False): + """Export 2d annotation from the info file and raw data. + + Args: + root_path (str): Root path of the raw data. + info_path (str): Path of the info file. + version (str): Dataset version. + mono3d (bool): Whether to export mono3d annotation. Default: False. + """ + # get bbox annotations for camera + camera_types = [ + 'CAM_FRONT', + 'CAM_FRONT_RIGHT', + 'CAM_FRONT_LEFT', + 'CAM_BACK', + 'CAM_BACK_LEFT', + 'CAM_BACK_RIGHT', + ] + nusc_infos = mmcv.load(info_path)['infos'] + nusc = NuScenes(version=version, dataroot=root_path, verbose=True) + # info_2d_list = [] + cat2Ids = [ + dict(id=nus_categories.index(cat_name), name=cat_name) + for cat_name in nus_categories + ] + coco_ann_id = 0 + coco_2d_dict = dict(annotations=[], images=[], categories=cat2Ids) + for info in mmcv.track_iter_progress(nusc_infos): + for cam in camera_types: + cam_info = info['cams'][cam] + coco_infos = get_2d_boxes( + nusc, + cam_info['sample_data_token'], + visibilities=['', '1', '2', '3', '4'], + mono3d=mono3d) + (height, width, _) = mmcv.imread(cam_info['data_path']).shape + coco_2d_dict['images'].append( + dict( + file_name=cam_info['data_path'].split('data/nuscenes/') + [-1], + id=cam_info['sample_data_token'], + token=info['token'], + cam2ego_rotation=cam_info['sensor2ego_rotation'], + cam2ego_translation=cam_info['sensor2ego_translation'], + ego2global_rotation=info['ego2global_rotation'], + ego2global_translation=info['ego2global_translation'], + cam_intrinsic=cam_info['cam_intrinsic'], + width=width, + height=height)) + for coco_info in coco_infos: + if coco_info is None: + continue + # add an empty key for coco format + coco_info['segmentation'] = [] + coco_info['id'] = coco_ann_id + coco_2d_dict['annotations'].append(coco_info) + coco_ann_id += 1 + if mono3d: + json_prefix = f'{info_path[:-4]}_mono3d' + else: + json_prefix = f'{info_path[:-4]}' + mmcv.dump(coco_2d_dict, f'{json_prefix}.coco.json') + + +def get_2d_boxes(nusc, + sample_data_token: str, + visibilities: List[str], + mono3d=True): + """Get the 2D annotation records for a given `sample_data_token`. + + Args: + sample_data_token (str): Sample data token belonging to a camera \ + keyframe. + visibilities (list[str]): Visibility filter. + mono3d (bool): Whether to get boxes with mono3d annotation. + + Return: + list[dict]: List of 2D annotation record that belongs to the input + `sample_data_token`. + """ + + # Get the sample data and the sample corresponding to that sample data. + sd_rec = nusc.get('sample_data', sample_data_token) + + assert sd_rec[ + 'sensor_modality'] == 'camera', 'Error: get_2d_boxes only works' \ + ' for camera sample_data!' + if not sd_rec['is_key_frame']: + raise ValueError( + 'The 2D re-projections are available only for keyframes.') + + s_rec = nusc.get('sample', sd_rec['sample_token']) + + # Get the calibrated sensor and ego pose + # record to get the transformation matrices. + cs_rec = nusc.get('calibrated_sensor', sd_rec['calibrated_sensor_token']) + pose_rec = nusc.get('ego_pose', sd_rec['ego_pose_token']) + camera_intrinsic = np.array(cs_rec['camera_intrinsic']) + + # Get all the annotation with the specified visibilties. + ann_recs = [ + nusc.get('sample_annotation', token) for token in s_rec['anns'] + ] + ann_recs = [ + ann_rec for ann_rec in ann_recs + if (ann_rec['visibility_token'] in visibilities) + ] + + repro_recs = [] + + for ann_rec in ann_recs: + # Augment sample_annotation with token information. + ann_rec['sample_annotation_token'] = ann_rec['token'] + ann_rec['sample_data_token'] = sample_data_token + + # Get the box in global coordinates. + box = nusc.get_box(ann_rec['token']) + + # Move them to the ego-pose frame. + box.translate(-np.array(pose_rec['translation'])) + box.rotate(Quaternion(pose_rec['rotation']).inverse) + + # Move them to the calibrated sensor frame. + box.translate(-np.array(cs_rec['translation'])) + box.rotate(Quaternion(cs_rec['rotation']).inverse) + + # Filter out the corners that are not in front of the calibrated + # sensor. + corners_3d = box.corners() + in_front = np.argwhere(corners_3d[2, :] > 0).flatten() + corners_3d = corners_3d[:, in_front] + + # Project 3d box to 2d. + corner_coords = view_points(corners_3d, camera_intrinsic, + True).T[:, :2].tolist() + + # Keep only corners that fall within the image. + final_coords = post_process_coords(corner_coords) + + # Skip if the convex hull of the re-projected corners + # does not intersect the image canvas. + if final_coords is None: + continue + else: + min_x, min_y, max_x, max_y = final_coords + + # Generate dictionary record to be included in the .json file. + repro_rec = generate_record(ann_rec, min_x, min_y, max_x, max_y, + sample_data_token, sd_rec['filename']) + + # If mono3d=True, add 3D annotations in camera coordinates + if mono3d and (repro_rec is not None): + loc = box.center.tolist() + + dim = box.wlh + dim[[0, 1, 2]] = dim[[1, 2, 0]] # convert wlh to our lhw + dim = dim.tolist() + + rot = box.orientation.yaw_pitch_roll[0] + rot = [-rot] # convert the rot to our cam coordinate + + global_velo2d = nusc.box_velocity(box.token)[:2] + global_velo3d = np.array([*global_velo2d, 0.0]) + e2g_r_mat = Quaternion(pose_rec['rotation']).rotation_matrix + c2e_r_mat = Quaternion(cs_rec['rotation']).rotation_matrix + cam_velo3d = global_velo3d @ np.linalg.inv( + e2g_r_mat).T @ np.linalg.inv(c2e_r_mat).T + velo = cam_velo3d[0::2].tolist() + + repro_rec['bbox_cam3d'] = loc + dim + rot + repro_rec['velo_cam3d'] = velo + + center3d = np.array(loc).reshape([1, 3]) + center2d = points_cam2img( + center3d, camera_intrinsic, with_depth=True) + repro_rec['center2d'] = center2d.squeeze().tolist() + # normalized center2D + depth + # if samples with depth < 0 will be removed + if repro_rec['center2d'][2] <= 0: + continue + + ann_token = nusc.get('sample_annotation', + box.token)['attribute_tokens'] + if len(ann_token) == 0: + attr_name = 'None' + else: + attr_name = nusc.get('attribute', ann_token[0])['name'] + attr_id = nus_attributes.index(attr_name) + repro_rec['attribute_name'] = attr_name + repro_rec['attribute_id'] = attr_id + + repro_recs.append(repro_rec) + + return repro_recs + + +def post_process_coords( + corner_coords: List, imsize: Tuple[int, int] = (1600, 900) +) -> Union[Tuple[float, float, float, float], None]: + """Get the intersection of the convex hull of the reprojected bbox corners + and the image canvas, return None if no intersection. + + Args: + corner_coords (list[int]): Corner coordinates of reprojected + bounding box. + imsize (tuple[int]): Size of the image canvas. + + Return: + tuple [float]: Intersection of the convex hull of the 2D box + corners and the image canvas. + """ + polygon_from_2d_box = MultiPoint(corner_coords).convex_hull + img_canvas = box(0, 0, imsize[0], imsize[1]) + + if polygon_from_2d_box.intersects(img_canvas): + img_intersection = polygon_from_2d_box.intersection(img_canvas) + intersection_coords = np.array( + [coord for coord in img_intersection.exterior.coords]) + + min_x = min(intersection_coords[:, 0]) + min_y = min(intersection_coords[:, 1]) + max_x = max(intersection_coords[:, 0]) + max_y = max(intersection_coords[:, 1]) + + return min_x, min_y, max_x, max_y + else: + return None + + +def generate_record(ann_rec: dict, x1: float, y1: float, x2: float, y2: float, + sample_data_token: str, filename: str) -> OrderedDict: + """Generate one 2D annotation record given various informations on top of + the 2D bounding box coordinates. + + Args: + ann_rec (dict): Original 3d annotation record. + x1 (float): Minimum value of the x coordinate. + y1 (float): Minimum value of the y coordinate. + x2 (float): Maximum value of the x coordinate. + y2 (float): Maximum value of the y coordinate. + sample_data_token (str): Sample data token. + filename (str):The corresponding image file where the annotation + is present. + + Returns: + dict: A sample 2D annotation record. + - file_name (str): flie name + - image_id (str): sample data token + - area (float): 2d box area + - category_name (str): category name + - category_id (int): category id + - bbox (list[float]): left x, top y, dx, dy of 2d box + - iscrowd (int): whether the area is crowd + """ + repro_rec = OrderedDict() + repro_rec['sample_data_token'] = sample_data_token + coco_rec = dict() + + relevant_keys = [ + 'attribute_tokens', + 'category_name', + 'instance_token', + 'next', + 'num_lidar_pts', + 'num_radar_pts', + 'prev', + 'sample_annotation_token', + 'sample_data_token', + 'visibility_token', + ] + + for key, value in ann_rec.items(): + if key in relevant_keys: + repro_rec[key] = value + + repro_rec['bbox_corners'] = [x1, y1, x2, y2] + repro_rec['filename'] = filename + + coco_rec['file_name'] = filename + coco_rec['image_id'] = sample_data_token + coco_rec['area'] = (y2 - y1) * (x2 - x1) + + if repro_rec['category_name'] not in NuScenesDataset.NameMapping: + return None + cat_name = NuScenesDataset.NameMapping[repro_rec['category_name']] + coco_rec['category_name'] = cat_name + coco_rec['category_id'] = nus_categories.index(cat_name) + coco_rec['bbox'] = [x1, y1, x2 - x1, y2 - y1] + coco_rec['iscrowd'] = 0 + + return coco_rec + + +def nuscenes_data_prep(root_path, + can_bus_root_path, + info_prefix, + version, + dataset_name, + out_dir, + max_sweeps=10): + """Prepare data related to nuScenes dataset. + + Related data consists of '.pkl' files recording basic infos, + 2D annotations and groundtruth database. + + Args: + root_path (str): Path of dataset root. + info_prefix (str): The prefix of info filenames. + version (str): Dataset version. + dataset_name (str): The dataset class name. + out_dir (str): Output directory of the groundtruth database info. + max_sweeps (int): Number of input consecutive frames. Default: 10 + """ + create_nuscenes_infos( + root_path, out_dir, can_bus_root_path, info_prefix, version=version, max_sweeps=max_sweeps) + + +parser = argparse.ArgumentParser(description='Data converter arg parser') +parser.add_argument('dataset', metavar='kitti', help='name of the dataset') +parser.add_argument( + '--root-path', + type=str, + default='./data/kitti', + help='specify the root path of dataset') +parser.add_argument( + '--canbus', + type=str, + default='./data', + help='specify the root path of nuScenes canbus') +parser.add_argument( + '--version', + type=str, + default='v1.0', + required=False, + help='specify the dataset version, no need for kitti') +parser.add_argument( + '--max-sweeps', + type=int, + default=10, + required=False, + help='specify sweeps of lidar per example') +parser.add_argument( + '--out-dir', + type=str, + default='./data/kitti', + required='False', + help='name of info pkl') +parser.add_argument('--extra-tag', type=str, default='kitti') +parser.add_argument( + '--workers', type=int, default=4, help='number of threads to be used') +args = parser.parse_args() + +if __name__ == '__main__': + if args.dataset == 'nuscenes' and args.version != 'v1.0-mini': + train_version = f'{args.version}-trainval' + nuscenes_data_prep( + root_path=args.root_path, + can_bus_root_path=args.canbus, + info_prefix=args.extra_tag, + version=train_version, + dataset_name='NuScenesDataset', + out_dir=args.out_dir, + max_sweeps=args.max_sweeps) + test_version = f'{args.version}-test' + nuscenes_data_prep( + root_path=args.root_path, + can_bus_root_path=args.canbus, + info_prefix=args.extra_tag, + version=test_version, + dataset_name='NuScenesDataset', + out_dir=args.out_dir, + max_sweeps=args.max_sweeps) + elif args.dataset == 'nuscenes' and args.version == 'v1.0-mini': + train_version = f'{args.version}' + nuscenes_data_prep( + root_path=args.root_path, + can_bus_root_path=args.canbus, + info_prefix=args.extra_tag, + version=train_version, + dataset_name='NuScenesDataset', + out_dir=args.out_dir, + max_sweeps=args.max_sweeps) diff --git a/adzoo/vad/dist_test.sh b/adzoo/vad/dist_test.sh new file mode 100755 index 0000000..3e2ec30 --- /dev/null +++ b/adzoo/vad/dist_test.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +CONFIG=$1 +CHECKPOINT=$2 +GPUS=$3 +PORT=${PORT:-29503} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \ + $(dirname "$0")/test.py $CONFIG $CHECKPOINT --launcher pytorch ${@:4} --eval bbox diff --git a/adzoo/vad/dist_train.sh b/adzoo/vad/dist_train.sh new file mode 100755 index 0000000..141b284 --- /dev/null +++ b/adzoo/vad/dist_train.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +CONFIG=$1 +GPUS=$2 +PORT=${PORT:-28509} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \ + $(dirname "$0")/train.py $CONFIG --launcher pytorch ${@:3} --deterministic diff --git a/adzoo/vad/misc/browse_dataset.py b/adzoo/vad/misc/browse_dataset.py new file mode 100644 index 0000000..e3419f6 --- /dev/null +++ b/adzoo/vad/misc/browse_dataset.py @@ -0,0 +1,240 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import numpy as np +import warnings +from mmcv import Config, DictAction, mkdir_or_exist, track_iter_progress +from os import path as osp + +from mmdet3d.core.bbox import (Box3DMode, CameraInstance3DBoxes, Coord3DMode, + DepthInstance3DBoxes, LiDARInstance3DBoxes) +from mmdet3d.core.visualizer import (show_multi_modality_result, show_result, + show_seg_result) +from mmdet3d.datasets import build_dataset + + +def parse_args(): + parser = argparse.ArgumentParser(description='Browse a dataset') + parser.add_argument('config', help='train config file path') + parser.add_argument( + '--skip-type', + type=str, + nargs='+', + default=['Normalize'], + help='skip some useless pipeline') + parser.add_argument( + '--output-dir', + default=None, + type=str, + help='If there is no display interface, you can save it') + parser.add_argument( + '--task', + type=str, + choices=['det', 'seg', 'multi_modality-det', 'mono-det'], + help='Determine the visualization method depending on the task.') + parser.add_argument( + '--online', + action='store_true', + help='Whether to perform online visualization. Note that you often ' + 'need a monitor to do so.') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + args = parser.parse_args() + return args + + +def build_data_cfg(config_path, skip_type, cfg_options): + """Build data config for loading visualization data.""" + cfg = Config.fromfile(config_path) + if cfg_options is not None: + cfg.merge_from_dict(cfg_options) + # import modules from string list. + if cfg.get('custom_imports', None): + from mmcv.utils import import_modules_from_strings + import_modules_from_strings(**cfg['custom_imports']) + # extract inner dataset of `RepeatDataset` as `cfg.data.train` + # so we don't need to worry about it later + if cfg.data.train['type'] == 'RepeatDataset': + cfg.data.train = cfg.data.train.dataset + # use only first dataset for `ConcatDataset` + if cfg.data.train['type'] == 'ConcatDataset': + cfg.data.train = cfg.data.train.datasets[0] + train_data_cfg = cfg.data.train + # eval_pipeline purely consists of loading functions + # use eval_pipeline for data loading + train_data_cfg['pipeline'] = [ + x for x in cfg.eval_pipeline if x['type'] not in skip_type + ] + + return cfg + + +def to_depth_mode(points, bboxes): + """Convert points and bboxes to Depth Coord and Depth Box mode.""" + if points is not None: + points = Coord3DMode.convert_point(points.copy(), Coord3DMode.LIDAR, + Coord3DMode.DEPTH) + if bboxes is not None: + bboxes = Box3DMode.convert(bboxes.clone(), Box3DMode.LIDAR, + Box3DMode.DEPTH) + return points, bboxes + + +def show_det_data(idx, dataset, out_dir, filename, show=False): + """Visualize 3D point cloud and 3D bboxes.""" + example = dataset.prepare_train_data(idx) + points = example['points']._data.numpy() + gt_bboxes = dataset.get_ann_info(idx)['gt_bboxes_3d'].tensor + if dataset.box_mode_3d != Box3DMode.DEPTH: + points, gt_bboxes = to_depth_mode(points, gt_bboxes) + show_result( + points, + gt_bboxes.clone(), + None, + out_dir, + filename, + show=show, + snapshot=True) + + +def show_seg_data(idx, dataset, out_dir, filename, show=False): + """Visualize 3D point cloud and segmentation mask.""" + example = dataset.prepare_train_data(idx) + points = example['points']._data.numpy() + gt_seg = example['pts_semantic_mask']._data.numpy() + show_seg_result( + points, + gt_seg.copy(), + None, + out_dir, + filename, + np.array(dataset.PALETTE), + dataset.ignore_index, + show=show, + snapshot=True) + + +def show_proj_bbox_img(idx, + dataset, + out_dir, + filename, + show=False, + is_nus_mono=False): + """Visualize 3D bboxes on 2D image by projection.""" + try: + example = dataset.prepare_train_data(idx) + except AttributeError: # for Mono-3D datasets + example = dataset.prepare_train_img(idx) + gt_bboxes = dataset.get_ann_info(idx)['gt_bboxes_3d'] + img_metas = example['img_metas']._data + img = example['img']._data.numpy() + # need to transpose channel to first dim + img = img.transpose(1, 2, 0) + # no 3D gt bboxes, just show img + if gt_bboxes.tensor.shape[0] == 0: + gt_bboxes = None + if isinstance(gt_bboxes, DepthInstance3DBoxes): + show_multi_modality_result( + img, + gt_bboxes, + None, + None, + out_dir, + filename, + box_mode='depth', + img_metas=img_metas, + show=show) + elif isinstance(gt_bboxes, LiDARInstance3DBoxes): + show_multi_modality_result( + img, + gt_bboxes, + None, + img_metas['lidar2img'], + out_dir, + filename, + box_mode='lidar', + img_metas=img_metas, + show=show) + elif isinstance(gt_bboxes, CameraInstance3DBoxes): + show_multi_modality_result( + img, + gt_bboxes, + None, + img_metas['cam2img'], + out_dir, + filename, + box_mode='camera', + img_metas=img_metas, + show=show) + else: + # can't project, just show img + warnings.warn( + f'unrecognized gt box type {type(gt_bboxes)}, only show image') + show_multi_modality_result( + img, None, None, None, out_dir, filename, show=show) + + +def main(): + args = parse_args() + + if args.output_dir is not None: + mkdir_or_exist(args.output_dir) + + cfg = build_data_cfg(args.config, args.skip_type, args.cfg_options) + try: + dataset = build_dataset( + cfg.data.train, default_args=dict(filter_empty_gt=False)) + except TypeError: # seg dataset doesn't have `filter_empty_gt` key + dataset = build_dataset(cfg.data.train) + data_infos = dataset.data_infos + dataset_type = cfg.dataset_type + + # configure visualization mode + vis_task = args.task # 'det', 'seg', 'multi_modality-det', 'mono-det' + + for idx, data_info in enumerate(track_iter_progress(data_infos)): + if dataset_type in ['KittiDataset', 'WaymoDataset']: + data_path = data_info['point_cloud']['velodyne_path'] + elif dataset_type in [ + 'ScanNetDataset', 'SUNRGBDDataset', 'ScanNetSegDataset', + 'S3DISSegDataset', 'S3DISDataset' + ]: + data_path = data_info['pts_path'] + elif dataset_type in ['NuScenesDataset', 'LyftDataset']: + data_path = data_info['lidar_path'] + elif dataset_type in ['NuScenesMonoDataset']: + data_path = data_info['file_name'] + else: + raise NotImplementedError( + f'unsupported dataset type {dataset_type}') + + file_name = osp.splitext(osp.basename(data_path))[0] + + if vis_task in ['det', 'multi_modality-det']: + # show 3D bboxes on 3D point clouds + show_det_data( + idx, dataset, args.output_dir, file_name, show=args.online) + if vis_task in ['multi_modality-det', 'mono-det']: + # project 3D bboxes to 2D image + show_proj_bbox_img( + idx, + dataset, + args.output_dir, + file_name, + show=args.online, + is_nus_mono=(dataset_type == 'NuScenesMonoDataset')) + elif vis_task in ['seg']: + # show 3D segmentation mask on 3D point clouds + show_seg_data( + idx, dataset, args.output_dir, file_name, show=args.online) + + +if __name__ == '__main__': + main() diff --git a/adzoo/vad/misc/fuse_conv_bn.py b/adzoo/vad/misc/fuse_conv_bn.py new file mode 100644 index 0000000..d4e2201 --- /dev/null +++ b/adzoo/vad/misc/fuse_conv_bn.py @@ -0,0 +1,67 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import torch +from mmcv.runner import save_checkpoint +from torch import nn as nn + +from mmdet.apis import init_model + + +def fuse_conv_bn(conv, bn): + """During inference, the functionary of batch norm layers is turned off but + only the mean and var alone channels are used, which exposes the chance to + fuse it with the preceding conv layers to save computations and simplify + network structures.""" + conv_w = conv.weight + conv_b = conv.bias if conv.bias is not None else torch.zeros_like( + bn.running_mean) + + factor = bn.weight / torch.sqrt(bn.running_var + bn.eps) + conv.weight = nn.Parameter(conv_w * + factor.reshape([conv.out_channels, 1, 1, 1])) + conv.bias = nn.Parameter((conv_b - bn.running_mean) * factor + bn.bias) + return conv + + +def fuse_module(m): + last_conv = None + last_conv_name = None + + for name, child in m.named_children(): + if isinstance(child, (nn.BatchNorm2d, nn.SyncBatchNorm)): + if last_conv is None: # only fuse BN that is after Conv + continue + fused_conv = fuse_conv_bn(last_conv, child) + m._modules[last_conv_name] = fused_conv + # To reduce changes, set BN as Identity instead of deleting it. + m._modules[name] = nn.Identity() + last_conv = None + elif isinstance(child, nn.Conv2d): + last_conv = child + last_conv_name = name + else: + fuse_module(child) + return m + + +def parse_args(): + parser = argparse.ArgumentParser( + description='fuse Conv and BN layers in a model') + parser.add_argument('config', help='config file path') + parser.add_argument('checkpoint', help='checkpoint file path') + parser.add_argument('out', help='output path of the converted model') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + # build the model from a config file and a checkpoint file + model = init_model(args.config, args.checkpoint) + # fuse conv and bn layers of the model + fused_model = fuse_module(model) + save_checkpoint(fused_model, args.out) + + +if __name__ == '__main__': + main() diff --git a/adzoo/vad/misc/print_config.py b/adzoo/vad/misc/print_config.py new file mode 100644 index 0000000..3100fc3 --- /dev/null +++ b/adzoo/vad/misc/print_config.py @@ -0,0 +1,26 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +from mmcv import Config, DictAction + + +def parse_args(): + parser = argparse.ArgumentParser(description='Print the whole config') + parser.add_argument('config', help='config file path') + parser.add_argument( + '--options', nargs='+', action=DictAction, help='arguments in dict') + args = parser.parse_args() + + return args + + +def main(): + args = parse_args() + + cfg = Config.fromfile(args.config) + if args.options is not None: + cfg.merge_from_dict(args.options) + print(f'Config:\n{cfg.pretty_text}') + + +if __name__ == '__main__': + main() diff --git a/adzoo/vad/misc/visualize_results.py b/adzoo/vad/misc/visualize_results.py new file mode 100644 index 0000000..302adc5 --- /dev/null +++ b/adzoo/vad/misc/visualize_results.py @@ -0,0 +1,49 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import mmcv +from mmcv import Config + +from mmdet3d.datasets import build_dataset + + +def parse_args(): + parser = argparse.ArgumentParser( + description='MMDet3D visualize the results') + parser.add_argument('config', help='test config file path') + parser.add_argument('--result', help='results file in pickle format') + parser.add_argument( + '--show-dir', help='directory where visualize results will be saved') + args = parser.parse_args() + + return args + + +def main(): + args = parse_args() + + if args.result is not None and \ + not args.result.endswith(('.pkl', '.pickle')): + raise ValueError('The results file must be a pkl file.') + + cfg = Config.fromfile(args.config) + cfg.data.test.test_mode = True + + # build the dataset + dataset = build_dataset(cfg.data.test) + results = mmcv.load(args.result) + + if getattr(dataset, 'show', None) is not None: + # data loading pipeline for showing + eval_pipeline = cfg.get('eval_pipeline', {}) + if eval_pipeline: + dataset.show(results, args.show_dir, pipeline=eval_pipeline) + else: + dataset.show(results, args.show_dir) # use default pipeline + else: + raise NotImplementedError( + 'Show is not implemented for dataset {}!'.format( + type(dataset).__name__)) + + +if __name__ == '__main__': + main() diff --git a/adzoo/vad/model_converters/convert_votenet_checkpoints.py b/adzoo/vad/model_converters/convert_votenet_checkpoints.py new file mode 100644 index 0000000..33792b0 --- /dev/null +++ b/adzoo/vad/model_converters/convert_votenet_checkpoints.py @@ -0,0 +1,152 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import tempfile +import torch +from mmcv import Config +from mmcv.runner import load_state_dict + +from mmdet3d.models import build_detector + + +def parse_args(): + parser = argparse.ArgumentParser( + description='MMDet3D upgrade model version(before v0.6.0) of VoteNet') + parser.add_argument('checkpoint', help='checkpoint file') + parser.add_argument('--out', help='path of the output checkpoint file') + args = parser.parse_args() + return args + + +def parse_config(config_strings): + """Parse config from strings. + + Args: + config_strings (string): strings of model config. + + Returns: + Config: model config + """ + temp_file = tempfile.NamedTemporaryFile() + config_path = f'{temp_file.name}.py' + with open(config_path, 'w') as f: + f.write(config_strings) + + config = Config.fromfile(config_path) + + # Update backbone config + if 'pool_mod' in config.model.backbone: + config.model.backbone.pop('pool_mod') + + if 'sa_cfg' not in config.model.backbone: + config.model.backbone['sa_cfg'] = dict( + type='PointSAModule', + pool_mod='max', + use_xyz=True, + normalize_xyz=True) + + if 'type' not in config.model.bbox_head.vote_aggregation_cfg: + config.model.bbox_head.vote_aggregation_cfg['type'] = 'PointSAModule' + + # Update bbox_head config + if 'pred_layer_cfg' not in config.model.bbox_head: + config.model.bbox_head['pred_layer_cfg'] = dict( + in_channels=128, shared_conv_channels=(128, 128), bias=True) + + if 'feat_channels' in config.model.bbox_head: + config.model.bbox_head.pop('feat_channels') + + if 'vote_moudule_cfg' in config.model.bbox_head: + config.model.bbox_head['vote_module_cfg'] = config.model.bbox_head.pop( + 'vote_moudule_cfg') + + if config.model.bbox_head.vote_aggregation_cfg.use_xyz: + config.model.bbox_head.vote_aggregation_cfg.mlp_channels[0] -= 3 + + temp_file.close() + + return config + + +def main(): + """Convert keys in checkpoints for VoteNet. + + There can be some breaking changes during the development of mmdetection3d, + and this tool is used for upgrading checkpoints trained with old versions + (before v0.6.0) to the latest one. + """ + args = parse_args() + checkpoint = torch.load(args.checkpoint) + cfg = parse_config(checkpoint['meta']['config']) + # Build the model and load checkpoint + model = build_detector( + cfg.model, + train_cfg=cfg.get('train_cfg'), + test_cfg=cfg.get('test_cfg')) + orig_ckpt = checkpoint['state_dict'] + converted_ckpt = orig_ckpt.copy() + + if cfg['dataset_type'] == 'ScanNetDataset': + NUM_CLASSES = 18 + elif cfg['dataset_type'] == 'SUNRGBDDataset': + NUM_CLASSES = 10 + else: + raise NotImplementedError + + RENAME_PREFIX = { + 'bbox_head.conv_pred.0': 'bbox_head.conv_pred.shared_convs.layer0', + 'bbox_head.conv_pred.1': 'bbox_head.conv_pred.shared_convs.layer1' + } + + DEL_KEYS = [ + 'bbox_head.conv_pred.0.bn.num_batches_tracked', + 'bbox_head.conv_pred.1.bn.num_batches_tracked' + ] + + EXTRACT_KEYS = { + 'bbox_head.conv_pred.conv_cls.weight': + ('bbox_head.conv_pred.conv_out.weight', [(0, 2), (-NUM_CLASSES, -1)]), + 'bbox_head.conv_pred.conv_cls.bias': + ('bbox_head.conv_pred.conv_out.bias', [(0, 2), (-NUM_CLASSES, -1)]), + 'bbox_head.conv_pred.conv_reg.weight': + ('bbox_head.conv_pred.conv_out.weight', [(2, -NUM_CLASSES)]), + 'bbox_head.conv_pred.conv_reg.bias': + ('bbox_head.conv_pred.conv_out.bias', [(2, -NUM_CLASSES)]) + } + + # Delete some useless keys + for key in DEL_KEYS: + converted_ckpt.pop(key) + + # Rename keys with specific prefix + RENAME_KEYS = dict() + for old_key in converted_ckpt.keys(): + for rename_prefix in RENAME_PREFIX.keys(): + if rename_prefix in old_key: + new_key = old_key.replace(rename_prefix, + RENAME_PREFIX[rename_prefix]) + RENAME_KEYS[new_key] = old_key + for new_key, old_key in RENAME_KEYS.items(): + converted_ckpt[new_key] = converted_ckpt.pop(old_key) + + # Extract weights and rename the keys + for new_key, (old_key, indices) in EXTRACT_KEYS.items(): + cur_layers = orig_ckpt[old_key] + converted_layers = [] + for (start, end) in indices: + if end != -1: + converted_layers.append(cur_layers[start:end]) + else: + converted_layers.append(cur_layers[start:]) + converted_layers = torch.cat(converted_layers, 0) + converted_ckpt[new_key] = converted_layers + if old_key in converted_ckpt.keys(): + converted_ckpt.pop(old_key) + + # Check the converted checkpoint by loading to the model + load_state_dict(model, converted_ckpt, strict=True) + checkpoint['state_dict'] = converted_ckpt + torch.save(checkpoint, args.out) + + +if __name__ == '__main__': + main() diff --git a/adzoo/vad/model_converters/publish_model.py b/adzoo/vad/model_converters/publish_model.py new file mode 100644 index 0000000..318fd46 --- /dev/null +++ b/adzoo/vad/model_converters/publish_model.py @@ -0,0 +1,35 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import subprocess +import torch + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Process a checkpoint to be published') + parser.add_argument('in_file', help='input checkpoint filename') + parser.add_argument('out_file', help='output checkpoint filename') + args = parser.parse_args() + return args + + +def process_checkpoint(in_file, out_file): + checkpoint = torch.load(in_file, map_location='cpu') + # remove optimizer for smaller file size + if 'optimizer' in checkpoint: + del checkpoint['optimizer'] + # if it is necessary to remove some sensitive data in checkpoint['meta'], + # add the code here. + torch.save(checkpoint, out_file) + sha = subprocess.check_output(['sha256sum', out_file]).decode() + final_file = out_file.rstrip('.pth') + '-{}.pth'.format(sha[:8]) + subprocess.Popen(['mv', out_file, final_file]) + + +def main(): + args = parse_args() + process_checkpoint(args.in_file, args.out_file) + + +if __name__ == '__main__': + main() diff --git a/adzoo/vad/model_converters/regnet2mmdet.py b/adzoo/vad/model_converters/regnet2mmdet.py new file mode 100644 index 0000000..9dee3c8 --- /dev/null +++ b/adzoo/vad/model_converters/regnet2mmdet.py @@ -0,0 +1,89 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import torch +from collections import OrderedDict + + +def convert_stem(model_key, model_weight, state_dict, converted_names): + new_key = model_key.replace('stem.conv', 'conv1') + new_key = new_key.replace('stem.bn', 'bn1') + state_dict[new_key] = model_weight + converted_names.add(model_key) + print(f'Convert {model_key} to {new_key}') + + +def convert_head(model_key, model_weight, state_dict, converted_names): + new_key = model_key.replace('head.fc', 'fc') + state_dict[new_key] = model_weight + converted_names.add(model_key) + print(f'Convert {model_key} to {new_key}') + + +def convert_reslayer(model_key, model_weight, state_dict, converted_names): + split_keys = model_key.split('.') + layer, block, module = split_keys[:3] + block_id = int(block[1:]) + layer_name = f'layer{int(layer[1:])}' + block_name = f'{block_id - 1}' + + if block_id == 1 and module == 'bn': + new_key = f'{layer_name}.{block_name}.downsample.1.{split_keys[-1]}' + elif block_id == 1 and module == 'proj': + new_key = f'{layer_name}.{block_name}.downsample.0.{split_keys[-1]}' + elif module == 'f': + if split_keys[3] == 'a_bn': + module_name = 'bn1' + elif split_keys[3] == 'b_bn': + module_name = 'bn2' + elif split_keys[3] == 'c_bn': + module_name = 'bn3' + elif split_keys[3] == 'a': + module_name = 'conv1' + elif split_keys[3] == 'b': + module_name = 'conv2' + elif split_keys[3] == 'c': + module_name = 'conv3' + new_key = f'{layer_name}.{block_name}.{module_name}.{split_keys[-1]}' + else: + raise ValueError(f'Unsupported conversion of key {model_key}') + print(f'Convert {model_key} to {new_key}') + state_dict[new_key] = model_weight + converted_names.add(model_key) + + +def convert(src, dst): + """Convert keys in pycls pretrained RegNet models to mmdet style.""" + # load caffe model + regnet_model = torch.load(src) + blobs = regnet_model['model_state'] + # convert to pytorch style + state_dict = OrderedDict() + converted_names = set() + for key, weight in blobs.items(): + if 'stem' in key: + convert_stem(key, weight, state_dict, converted_names) + elif 'head' in key: + convert_head(key, weight, state_dict, converted_names) + elif key.startswith('s'): + convert_reslayer(key, weight, state_dict, converted_names) + + # check if all layers are converted + for key in blobs: + if key not in converted_names: + print(f'not converted: {key}') + # save checkpoint + checkpoint = dict() + checkpoint['state_dict'] = state_dict + torch.save(checkpoint, dst) + + +def main(): + parser = argparse.ArgumentParser(description='Convert model keys') + parser.add_argument('src', help='src detectron model path') + parser.add_argument('dst', help='save path') + args = parser.parse_args() + convert(args.src, args.dst) + + +if __name__ == '__main__': + main() diff --git a/adzoo/vad/test.py b/adzoo/vad/test.py new file mode 100644 index 0000000..1733443 --- /dev/null +++ b/adzoo/vad/test.py @@ -0,0 +1,277 @@ +# --------------------------------------------- +# Copyright (c) OpenMMLab. All rights reserved. +# --------------------------------------------- +# Modified by Zhiqi Li +# --------------------------------------------- +import argparse +import os +import torch +import warnings +from mmcv.utils import get_dist_info, init_dist, wrap_fp16_model, set_random_seed, Config, DictAction, load_checkpoint +from mmcv.models import build_model, fuse_conv_bn +from torch.nn import DataParallel +from torch.nn.parallel.distributed import DistributedDataParallel + +from mmcv.datasets import build_dataset, build_dataloader, replace_ImageToTensor +import time +import os.path as osp +from adzoo.vad.apis.test import custom_multi_gpu_test, single_gpu_test + +import warnings +warnings.filterwarnings("ignore") + +def parse_args(): + parser = argparse.ArgumentParser( + description='MMDet test (and eval) a model') + parser.add_argument('config', help='test config file path') + parser.add_argument('checkpoint', help='checkpoint file') + parser.add_argument('--json_dir', help='json parent dir name file') # NOTE: json file parent folder name + parser.add_argument('--out', help='output result file in pickle format') + parser.add_argument( + '--fuse-conv-bn', + action='store_true', + help='Whether to fuse conv and bn, this will slightly increase' + 'the inference speed') + parser.add_argument( + '--format-only', + action='store_true', + help='Format the output results without perform evaluation. It is' + 'useful when you want to format the result to a specific format and ' + 'submit it to the test server') + parser.add_argument( + '--eval', + type=str, + nargs='+', + help='evaluation metrics, which depends on the dataset, e.g., "bbox",' + ' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC') + parser.add_argument('--show', action='store_true', help='show results') + parser.add_argument( + '--show-dir', help='directory where results will be saved') + parser.add_argument( + '--gpu-collect', + action='store_true', + help='whether to use gpu to collect results.') + parser.add_argument( + '--tmpdir', + help='tmp directory used for collecting results from multiple ' + 'workers, available when gpu-collect is not specified') + parser.add_argument('--seed', type=int, default=0, help='random seed') + parser.add_argument( + '--deterministic', + action='store_true', + help='whether to set deterministic options for CUDNN backend.') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + parser.add_argument( + '--options', + nargs='+', + action=DictAction, + help='custom options for evaluation, the key-value pair in xxx=yyy ' + 'format will be kwargs for dataset.evaluate() function (deprecate), ' + 'change to --eval-options instead.') + parser.add_argument( + '--eval-options', + nargs='+', + action=DictAction, + help='custom options for evaluation, the key-value pair in xxx=yyy ' + 'format will be kwargs for dataset.evaluate() function') + parser.add_argument( + '--launcher', + choices=['none', 'pytorch', 'slurm', 'mpi'], + default='none', + help='job launcher') + parser.add_argument('--local-rank', type=int, default=0) + args = parser.parse_args() + if 'LOCAL_RANK' not in os.environ: + os.environ['LOCAL_RANK'] = str(args.local_rank) + + if args.options and args.eval_options: + raise ValueError( + '--options and --eval-options cannot be both specified, ' + '--options is deprecated in favor of --eval-options') + if args.options: + warnings.warn('--options is deprecated in favor of --eval-options') + args.eval_options = args.options + return args + + +def main(): + args = parse_args() + + assert args.out or args.eval or args.format_only or args.show \ + or args.show_dir, \ + ('Please specify at least one operation (save/eval/format/show the ' + 'results / save the results) with the argument "--out", "--eval"' + ', "--format-only", "--show" or "--show-dir"') + + if args.eval and args.format_only: + raise ValueError('--eval and --format_only cannot be both specified') + + if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): + raise ValueError('The output file must be a pkl file.') + + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + # import modules from string list. + if cfg.get('custom_imports', None): + from mmcv.utils import import_modules_from_strings + import_modules_from_strings(**cfg['custom_imports']) + + # import modules from plguin/xx, registry will be updated + # if hasattr(cfg, 'plugin'): + # if cfg.plugin: + # import importlib + # if hasattr(cfg, 'plugin_dir'): + # plugin_dir = cfg.plugin_dir + # _module_dir = os.path.dirname(plugin_dir) + # _module_dir = _module_dir.split('/') + # _module_path = _module_dir[0] + + # for m in _module_dir[1:]: + # _module_path = _module_path + '.' + m + # print(_module_path) + # plg_lib = importlib.import_module(_module_path) + # else: + # # import dir is the dirpath for the config file + # _module_dir = os.path.dirname(args.config) + # _module_dir = _module_dir.split('/') + # _module_path = _module_dir[0] + # for m in _module_dir[1:]: + # _module_path = _module_path + '.' + m + # print(_module_path) + # plg_lib = importlib.import_module(_module_path) + + # set cudnn_benchmark + if cfg.get('cudnn_benchmark', False): + torch.backends.cudnn.benchmark = True + + if cfg.get('close_tf32', False): + torch.backends.cuda.matmul.allow_tf32 = False + torch.backends.cudnn.allow_tf32 = False + + cfg.model.pretrained = None + # in case the test dataset is concatenated + samples_per_gpu = 1 + if isinstance(cfg.data.test, dict): + cfg.data.test.test_mode = True + samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1) + if samples_per_gpu > 1: + # Replace 'ImageToTensor' to 'DefaultFormatBundle' + cfg.data.test.pipeline = replace_ImageToTensor( + cfg.data.test.pipeline) + elif isinstance(cfg.data.test, list): + for ds_cfg in cfg.data.test: + ds_cfg.test_mode = True + samples_per_gpu = max( + [ds_cfg.pop('samples_per_gpu', 1) for ds_cfg in cfg.data.test]) + if samples_per_gpu > 1: + for ds_cfg in cfg.data.test: + ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline) + + # init distributed env first, since logger depends on the dist info. + if args.launcher == 'none': + distributed = False + else: + distributed = True + init_dist(args.launcher, **cfg.dist_params) + + # set random seeds + if args.seed is not None: + set_random_seed(args.seed, deterministic=args.deterministic) + + # build the dataloader + dataset = build_dataset(cfg.data.test) + data_loader = build_dataloader( + dataset, + samples_per_gpu=samples_per_gpu, + workers_per_gpu=cfg.data.workers_per_gpu, + dist=distributed, + shuffle=False, + nonshuffler_sampler=cfg.data.nonshuffler_sampler, + ) + + # build the model and load checkpoint + cfg.model.train_cfg = None + model = build_model(cfg.model, test_cfg=cfg.get('test_cfg')) + fp16_cfg = cfg.get('fp16', None) + if fp16_cfg is not None: + wrap_fp16_model(model) + checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') + if args.fuse_conv_bn: + model = fuse_conv_bn(model) + # old versions did not save class info in checkpoints, this walkaround is + # for backward compatibility + if 'CLASSES' in checkpoint.get('meta', {}): + model.CLASSES = checkpoint['meta']['CLASSES'] + else: + model.CLASSES = dataset.CLASSES + # palette for visualization in segmentation tasks + if 'PALETTE' in checkpoint.get('meta', {}): + model.PALETTE = checkpoint['meta']['PALETTE'] + elif hasattr(dataset, 'PALETTE'): + # segmentation dataset has `PALETTE` attribute + model.PALETTE = dataset.PALETTE + + if not distributed: + model = DataParallel(model, device_ids=[0]) + outputs = single_gpu_test(model, data_loader) + else: + model = DistributedDataParallel( + model.cuda(), + device_ids=[torch.cuda.current_device()], + broadcast_buffers=False) + outputs = custom_multi_gpu_test(model, data_loader, args.tmpdir, + args.gpu_collect) + + + + rank, _ = get_dist_info() + if rank == 0: + if args.out: + print(f'\nwriting results to {args.out}') + kwargs = {} if args.eval_options is None else args.eval_options + kwargs['jsonfile_prefix'] = osp.join('test', args.config.split( + '/')[-1].split('.')[-2], time.ctime().replace(' ', '_').replace(':', '_')) + if args.format_only: + dataset.format_results(outputs, **kwargs) + + if args.eval: + eval_kwargs = cfg.get('evaluation', {}).copy() + # hard-code way to remove EvalHook args + for key in [ + 'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', + 'rule' + ]: + eval_kwargs.pop(key, None) + eval_kwargs.update(dict(metric=args.eval, **kwargs)) + + print(dataset.evaluate(outputs['bbox_results'], **eval_kwargs)) + + # # # NOTE: record to json + # json_path = args.json_dir + # if not os.path.exists(json_path): + # os.makedirs(json_path) + + # metric_all = [] + # for res in outputs['bbox_results']: + # for k in res['metric_results'].keys(): + # if type(res['metric_results'][k]) is np.ndarray: + # res['metric_results'][k] = res['metric_results'][k].tolist() + # metric_all.append(res['metric_results']) + + # print('start saving to json done') + # with open(json_path+'/metric_record.json', "w", encoding="utf-8") as f2: + # json.dump(metric_all, f2, indent=4) + # print('save to json done') + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/adzoo/vad/train.py b/adzoo/vad/train.py new file mode 100644 index 0000000..d880d4f --- /dev/null +++ b/adzoo/vad/train.py @@ -0,0 +1,237 @@ +# --------------------------------------------- +# Copyright (c) OpenMMLab. All rights reserved. +# --------------------------------------------- +# Modified by Zhiqi Li +# --------------------------------------------- + +from __future__ import division + +import argparse +import copy +import mmcv +import os +import time +import torch +import warnings +from mmcv import Config, DictAction +from mmcv.utils import get_dist_info, init_dist +from os import path as osp + + +from mmcv.datasets import build_dataset +from mmcv.models import build_model +from mmcv.utils import collect_env, get_root_logger +from mmcv.utils import set_random_seed + +from mmcv.utils import TORCH_VERSION, digit_version +from adzoo.bevformer.mmdet3d_plugin.bevformer.apis.train import custom_train_model + +import cv2 +cv2.setNumThreads(1) + +import sys +sys.path.append('') + + +def parse_args(): + parser = argparse.ArgumentParser(description='Train a detector') + parser.add_argument('config', help='train config file path') + parser.add_argument('--work-dir', help='the dir to save logs and models') + parser.add_argument( + '--resume-from', help='the checkpoint file to resume from') + parser.add_argument( + '--no-validate', + action='store_true', + help='whether not to evaluate the checkpoint during training') + group_gpus = parser.add_mutually_exclusive_group() + group_gpus.add_argument( + '--gpus', + type=int, + help='number of gpus to use ' + '(only applicable to non-distributed training)') + group_gpus.add_argument( + '--gpu-ids', + type=int, + nargs='+', + help='ids of gpus to use ' + '(only applicable to non-distributed training)') + parser.add_argument('--seed', type=int, default=0, help='random seed') + parser.add_argument( + '--deterministic', + action='store_true', + help='whether to set deterministic options for CUDNN backend.') + parser.add_argument( + '--options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file (deprecate), ' + 'change to --cfg-options instead.') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + parser.add_argument( + '--launcher', + choices=['none', 'pytorch', 'slurm', 'mpi'], + default='none', + help='job launcher') + parser.add_argument('--local-rank', type=int, default=0) + parser.add_argument( + '--autoscale-lr', + action='store_true', + help='automatically scale lr with the number of gpus') + args = parser.parse_args() + if 'LOCAL_RANK' not in os.environ: + os.environ['LOCAL_RANK'] = str(args.local_rank) + + if args.options and args.cfg_options: + raise ValueError( + '--options and --cfg-options cannot be both specified, ' + '--options is deprecated in favor of --cfg-options') + if args.options: + warnings.warn('--options is deprecated in favor of --cfg-options') + args.cfg_options = args.options + + return args + + +def main(): + args = parse_args() + + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + # import modules from string list. + if cfg.get('custom_imports', None): + from mmcv.utils import import_modules_from_strings + import_modules_from_strings(**cfg['custom_imports']) + + # set cudnn_benchmark + if cfg.get('cudnn_benchmark', False): + torch.backends.cudnn.benchmark = True + # set tf32 + if cfg.get('close_tf32', False): + torch.backends.cuda.matmul.allow_tf32 = False + torch.backends.cudnn.allow_tf32 = False + # work_dir is determined in this priority: CLI > segment in file > filename + if args.work_dir is not None: + # update configs according to CLI args if args.work_dir is not None + cfg.work_dir = args.work_dir + elif cfg.get('work_dir', None) is None: + # use config filename as default work_dir if cfg.work_dir is None + cfg.work_dir = osp.join('./work_dirs', + osp.splitext(osp.basename(args.config))[0]) + # if args.resume_from is not None: + if args.resume_from is not None and osp.isfile(args.resume_from): + cfg.resume_from = args.resume_from + if args.gpu_ids is not None: + cfg.gpu_ids = args.gpu_ids + else: + cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus) + if digit_version(TORCH_VERSION) == digit_version('1.8.1') and cfg.optimizer['type'] == 'AdamW': + cfg.optimizer['type'] = 'AdamW2' # fix bug in Adamw + if args.autoscale_lr: + # apply the linear scaling rule (https://arxiv.org/abs/1706.02677) + cfg.optimizer['lr'] = cfg.optimizer['lr'] * len(cfg.gpu_ids) / 8 + + # init distributed env first, since logger depends on the dist info. + if args.launcher == 'none': + distributed = False + else: + distributed = True + init_dist(args.launcher, **cfg.dist_params) + # re-set gpu_ids with distributed training mode + _, world_size = get_dist_info() + cfg.gpu_ids = range(world_size) + + # create work_dir + mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir)) + # dump config + cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config))) + # init the logger before other steps + timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) + log_file = osp.join(cfg.work_dir, f'{timestamp}.log') + # specify logger name, if we still use 'mmdet', the output info will be + # filtered and won't be saved in the log_file + # TODO: ugly workaround to judge whether we are training det or seg model + if cfg.model.type in ['EncoderDecoder3D']: + logger_name = 'mmseg' + else: + logger_name = 'mmdet' + logger = get_root_logger( + log_file=log_file, log_level=cfg.log_level, name=logger_name) + + # init the meta dict to record some important information such as + # environment info and seed, which will be logged + meta = dict() + # log env info + env_info_dict = collect_env() + env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()]) + dash_line = '-' * 60 + '\n' + logger.info('Environment info:\n' + dash_line + env_info + '\n' + + dash_line) + meta['env_info'] = env_info + meta['config'] = cfg.pretty_text + + # log some basic info + logger.info(f'Distributed training: {distributed}') + logger.info(f'Config:\n{cfg.pretty_text}') + + # set random seeds + if args.seed is not None: + logger.info(f'Set random seed to {args.seed}, ' + f'deterministic: {args.deterministic}') + set_random_seed(args.seed, deterministic=args.deterministic) + cfg.seed = args.seed + meta['seed'] = args.seed + meta['exp_name'] = osp.basename(args.config) + + model = build_model( + cfg.model, + train_cfg=cfg.get('train_cfg'), + test_cfg=cfg.get('test_cfg')) + model.init_weights() + + logger.info(f'Model:\n{model}') + datasets = [build_dataset(cfg.data.train)] + if len(cfg.workflow) == 2: + val_dataset = copy.deepcopy(cfg.data.val) + # in case we use a dataset wrapper + if 'dataset' in cfg.data.train: + val_dataset.pipeline = cfg.data.train.dataset.pipeline + else: + val_dataset.pipeline = cfg.data.train.pipeline + # set test_mode=False here in deep copied config + # which do not affect AP/AR calculation later + # refer to https://mmdetection3d.readthedocs.io/en/latest/tutorials/customize_runtime.html#customize-workflow # noqa + val_dataset.test_mode = False + datasets.append(build_dataset(val_dataset)) + if cfg.checkpoint_config is not None: + # save mmdet version, config file content and class names in + # checkpoints as meta data + cfg.checkpoint_config.meta = dict( + config=cfg.pretty_text, + CLASSES=datasets[0].CLASSES, + PALETTE=datasets[0].PALETTE # for segmentors + if hasattr(datasets[0], 'PALETTE') else None) + # add an attribute for visualization convenience + model.CLASSES = datasets[0].CLASSES + custom_train_model( + model, + datasets, + cfg, + distributed=distributed, + validate=(not args.no_validate), + timestamp=timestamp, + meta=meta) + + +if __name__ == '__main__': + main() diff --git a/asserts/bench2drive.jpg b/asserts/bench2drive.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2a8aad2cbfc9f88c0d38b0429fd962fe03faa5b0 GIT binary patch literal 30763 zcmZ^LbyOVB)-CSt7F-9{;0*5W?u6hPT!Xt4+zIX;+=6Rxcg^5#Z+`dt)_ZTQd+$GE z-CbR$s`}J9d+#%mDoWC*NJK~w5D=)cG7@SK5HPsl&w&VV;8zVx)qMyENaC*=I&M0O z3jF4d_AI6rj%JoDp7u`Q;}8&nBA!mB=C+n@!q5M5)cK$foEd zZfX5h#@oeG-CIe++}qZi&w^4!7)j8RAKZYwrJE_ar@ftnE5D}@<-gkH2Y>$OXI4t` zf1ToHD@3WIs6sC8=weCE!@|bGMk$O$F6d%m#jhqI^}iQ`zX?%VySX{>v$A@4c(8bI zvN*bYVP)sz<6~vxVCCRo2A{#~>gC{O>dEZjO7%|@|I>zqrK`EiS0}fxjt=Div}tPQ z=O0F={3OeZBqUL%q}aWBI8+3=FmjFt(vJ z`XP!``&w;LV#uExSVpSKCbTzL{}?$cwFT@Z8ESShlzQXzrY7lSqZEHY)El{osv<%| z7ojD5QZZ74Fk6CHp_BwlL#|*0&)pDw5F>J6OfMiR5fNSB8m1vuphgKJ8G!en$o1(E zWd%P)=47d7qPRnk+zFO@74!l15F#uI`U%zvV+bY)NeH0`eF;#4xS-MQXie^>P@Yg= zQBqMPQ8G|YPzX_IP>_H^u)rS!2}l+`9Q*9loX70P5GztBa6mgKq%XIZ;t|{+E0el3 zI6ci6j2q*z-a135VgJ0KLcIH1LT&{Wu|YrpA!H>)H9R5D4Pi}iw6Sxk zWyoP3AcD5_ig||&<)c&a8jDVw%7ZKuZ1ln;S*19IRoTGsS;6_CFVtKp8{- zx8CI=LuvtzgJY4||Fdy15RABwME~X11dxUZb!wvcb_Nmut#>-*y-WXT(Ein*V=?f+ zbeQak{kPtME$fCOfXqR_0ca4+mB{~WYO+whwd}u#fMVN;tv0Ox)3ZDiqzlHT)6|vS({pb zOiTL@h7ptZrXle_Bv#7kBkZ&4V5HR=z^*h}hGz3Mh+D-o)%kC9G{HhW{HIGL0{L(s zIZR-1Fv!tDr-g}8jbN{XqM446pr0Q?ODFZ{|Mcq;{`)M;d_tJ%lqrs@abhKae4$cl zuwdZzy+yE?K1F0;Uv#i!93Ku-&@g>Q3{3$XqlZ=dP4yMsq>d=vhC$b$YvfY*`QUYSdtT^#WKoX=z=f#R6SVpD%a z$Xtp**o7M70*dZGEZ{jPD8Uz5obmp4X3wE-wcE z{u~iNegOtX66Kht=t%vd0xrTH05pQFs#ir(n&i|C@F4156Y7&=fN0H1bdk@+8Rzg^YZD0LHN5 zGwwA$J;E&eY?9|JDl>-F+gq({s904S+-HP=FRw}=g<@uNt$71~_&!9-IS%qs0D#St zS@`~e0n@9_fI1Mv53#mg4#}&WB2TO(fm*4Mm14Rz3fEHnQ-9I zR8E2Acp9)rdDYC!Y`Z@c+3EgV_wnVTXJ;aV^BHic27a6ptT0VV#4WBe3;zWI0zq}|!(D~7io zKnoC&t)9&P{Qe+J0Fl$th5qsY3RxJ(AK1*%rvL!P+-Q7L{{QtvOuoIhN0s_*GLu_g zL;vmhE~usNJN&@$Y?0|)v25h^<^Mc96aP3x*OALBjc^DC>}&kX;6r=<-jV6wGGdb2 zF{|X*07Mz}kKG~UHkVQS_g8nUN0ClPZn$JRzv!rtTa_9$%aQb2h_;T7QT|b}LgyQ; znPN%XHkT`?m=~86w`2r#^u`XwnqF_1^^J{#Hp`I&QU5uD%r!b#br~6%PG(+S0x@y% zYWxseu;&co3I&4J+Y9uXZ5}&E;F}yfEhjT^a|OMi-rwJy*NUX$x7XWUot~~OfA%rR zr!igo%RT&wk#SPUL;S4PG=H;P2MYtktN1~pIrI8-GjO%{o5JV*TsG?Q?O{v2%lj_h zeyv6J@Brms9u5Nvyjh%)E0QNLxLQ5#M?1Sv=FvF{Hz@!uq2ANs;2*<1(Mvz>7?m#l zcAlGyji*q)A;|`LT!20i@Z{s~k7OVN^38&HiS=6i)RXv-c7p#R(??n)xW4Nnhohf| zjwTQH(QLV1d#btLcyYrJ@z~KDn?&O=BZXC)^aWqTj?d3yTNmMb>X%YU$Bp3obI{N< zf|BW!p?XNxuuN;BF`fKjqHr1E9J}uDI-d@+TquA3lnzHH3j;5Kfg~FF;$U^$>@cR=P*fNA>%eO6}2M$ye*OOVF5008`m$qjL#OuCHlkPJh zHI%M$si(EcHPPds1U7x0hZBCi6hA>T2(C2iI?nvjD;+?q#-{$FED)nDiju9WnBHJeU!zPzl7!a{<_@y{ zq{os`QZ)~&FkhP@k51dnB)oqr5`>967 z?-0-y=_ASG>&-MQfGG7+7Qm@g2m3r2jf4xJMvjYfauo8s%>TjbTxdO$FWPXsLYUW+ z!|yJ4I>+vnTSKSO&spxv8Lg*u*z%-Oob%!L6R%=BfX<=}pP< zIOOof8A=2cz9n(eZ~!ai$M!-cimj-66dSg?6*@k&TKzJ(=tnn*xJA7C{2hpi_jLO7 zB1o^1{p*`DiBN!|-HNM3)k=eYOx|rYeV`niK^MEe2m_j4lb8CrkiTkn*E_=J0B1a0 zl%KUi7?rIc{J;F)*!9zDzc$;hVjB4x8?x8lr?D6UR-0{q?@geeY51VlsZm^YMWsYN zTx}?n>Px`XCELio?$#1ZCG4iJd0$M!I1GJM5CJpKKJsX=GX#Sz-s5aW@ z4KC`G`z*g%Tq^mmx5X6>m2^Dr8d`3oCR4u4&VH)T?&HC)IUC3#wI57hDzf@wEV*M8D!b2_P98Z-s~i{PL$8j|Yc5YkxO)M;;b8 zet978|E$voTioro-3z&~MA-3%EljXr|9_^pxAGs0$-x$XR$qc>C%rxAlpzuH`;>}M z4?-)};Ek<;{j>=R10j;q2H=1|8H4HOJ^1iSMq=_IFc3%Wne8RD(G`cgl1S z66Hcij(?=19}{2_VKRe+&;!b4A~abN#QHpceG>tm3;Rkn{k=?ca&_fofM!51oz?|R z8Bmq<`<3hP`f)GXP}pkmq<-e!)Qji4j;_7RA`QF6;<}nyWu5x{AS&Ghr&A6=sa~oO zOl!rEkubT%@__rLt#v9-#2AOgV43C3<8~`gRM@-aSWyP_qqLsWWF+sk^T4+Enj84% z!L4z75P??+>SX>)pmO&sUPt#5awLHvB;5tMS>euhx6tMMAiEP`5kIv8pUZwoYey%t zxRfs~VvK=-VPp@cEXjd!L{v<0*5{i3P}iT^S4F=sFZ+Z%-)|9p?@#E<49W|u9NtRp z>wJee=#MpFX67KHa9JU78N1+g1sj9L$B(f_e$!)zR(HCbP`9PfspFegHb7mVAQVpV}Od8sa0d3>*_B(ef;+?>e*6l`1Qf~&$cqx!-*iHF7Fi$ zv@ad4Y7)?^P|(o!Q9+yd)c3MAZnsk4w|>w1hx2h3U~0_%^d zdA9nuUgsV4PKnNjFdjjn#;Ec}JXzaN>|;3R+ucvj+Y{XTy2mygJq08Iw4`792MM+2 zv1%y6yIDYR4GpRKy!lugFFecF6}1BAyo@;73Eu)D?llK9w^KDqRlYS%NDZN2qW@CD*G9ZnaZzY;k%>rJqBD}Fc)1tn14ED55}5|WbrZMDLo ztD0lIS4H8LLXk+c&b_@^2@*UJk&!`&TQOm9+<^W$$s3h@0;XN4Syfe)5*znkrxca$ zUE9bC-F7#M7?MXw!TW#+55-J<2o%YwPzRxB>alTnH8Lo0So%Fz$fvYB&N-eSNFxdY z0QITkOS{WKFuL$#Z-He~s}rB=VTj?cZ+*m$ou(J1B!%xYGb+6Z;$sac#5^duA^|l9 zamZk(0Jth1c;0IMh2d-M#kx5-Hy z?e}mWxQx56Dg@`t^~moJw7jQ7$o<=97HeWU^>m3H%1v5JpK2S_ zdgp0GXADDfwA?NRo(lmQxzuVC4%UwvumFx|J&(;^=R;EALH z>D60>N3?h<7{@Z0?~v#eOFqC4H1pZ7a+Tx>)?`N)h=(Jwva%}n+>!Y|ik82JWY}YN zYk^0LgoNq+?ZH?T3?rzPsZcb&Nl126YgB2JCs{Ao6#(OqME9G^P-K@;D|?FimkXJ~ zt`wN_%3krt(^)@96DLN0J6oJQUoOTYX(IzW;T>m}3TGF9=@V`Ga4H!WrsGOY5%0IF z8eW$jJpWC+GY=BHr|YB7KK7owFv?P1y$I;7q~Vbti*|T}LnfzElAZ8iYekL1q6wNQ z4k91IipBEjQ^md-rIs(PgSpt+iTZuY0DPIzK$cHdH^i&5<9QXV-M1~4v1$|) zi(c3Jm^~lwzgE(DndxS1f`9YIZhc@GX-{1WF*D*?`!>%V2sB-`p)3yfRTsyQA0T6y(w-)7LJN3+nuw6W0iG#Y_32 zwI0oIEHRz;?J>p79LuEX4$9sT$F=VQVhtoIH5~zUcVADof+sG3a`A$n7LCGmM|Eq> zY@mj!P$T17c6ZP4k2I4}v+9I#3mV<_isIq4aen>}VV}e{mj~T!tRNgDI7vtb?3yhf zrkbq)%3!g9-AzM~kT26KsUWWZJ8PYR-SJ4cb^RqTwUnULP(SwTtq%eDd&aMss_UX% zTg}0D?CwJCEy&D%gqD*TT@qeDMF1{Zb~}at>l@iloP>feb5?g=$zXJCc|E;RXWQl7 zUF@%K8XgJ8LBD_CA}hJ07Q2^-#gXt$)YQyZ_~i-vRgo&Yg<~93mUs&!K1KFU|LKxD zt54}}Rr8c-@U4?1;&Un1)Uh6lxykTX;$(?IHilLTK;`v=f22r|GHTf1w8ZO~{1Ly1 zNPxKWs_4AmNhT%gX?*jToccXF+P3i3B1fg%g**F?vDi{5f}1;VC~%!}FjYW#a12#o zu zrJABj*5bP0XwpvB#RnZ)p;%A~(mv8`nXYZ+X_FMGG;7uzYxnSD0u#X;6X;oWB|{`Y zINq74eT&eTbE-3MF%^mH+rd~Wwy3D+mH#O^nJ~^mg=XW*#Zz_bW|Vf%A}fL3EP#;Y zU{kL^4Ayyn1haaBCoBHc`EZMPe9D!m8}5-vrrb8LNQC@4n}iHf1EKfD4_>UAAC(-y zABKQv@_tTOXLlty*}tbhI39s~6@7d986&NF18H+8N)AfeyC^~;X^8JbA@rV^NbKl- z(sTkpZ8l#qVwDaQ&R*XFE}v@sn_7w7K!u{f z&fl7gVZ6nsbQ$u194hLC`a^asyK_$*Zh!2! zUk;Cq$5bcRSt6fK$>++1*rGDCg6xBCh%uAAC zKRzU};P1Ud3#ft(7NNr5!^W*~E8Lym_a(Q%m_?w~^jIv`i@-nFrIRoWkK-navF{aZ zHorU4>7R0FkBiM_FS>*t(RQVMIu(|^GFVDV%I&U)&F{56?|Q8f*6onsn4tmfi_mQZ`bOT{Fu0(LepU652Ol6ie{mL z`&Glj!ba*6v@{(w9(yE_O@FskgSUEdX|qBW6_I0Dnfu@LwWpxiGi}p_Imt_-3&^?6 zN~;`im4mxXr^jB>g$mvP4ywRG6Ap`#Q+c5z;X;>F)dtyU38z4XzbZK5D+K{TTKNp9H?4%)Al=h!l*{W(g4$Ue=HBqFci;i{Gmrb0ZOEf)SDYncOB- z&)MJcWYDoY-F`3U6k(HRx0jcJ;OQR6b%|ktux=r34Gj$)6;|!BE@G>|8NLKNL^X=j zGIKe(7PFzK(byX0RaTuk@{o{_!B|{|%MnV5$YZ;>a+}a(T1BWB1XCWtY8yYqn`AXp zaD<46uK${=$AlXrzy}U2^KGAh=J|67S9yKx#fyaXygwsv!QZm#Svx404=)+{pw7g? z9eatIOXb~WmA+uu<3MFFnIzm|gV%CNz1E8*qhSXZ3Lyu~@#ob7arhVR;sPr-{7^7b z2PK6b1fF*24eTVw*_#gV`6}%$mic}IoGGh6ztHKjBnfhTJH|uAIZef;+O7h#BYnEk zEU2R!bzhOobX&X6*%e=p@9C4<%%*)yTn8 zJ0zE`cKJNOp+qBsKp?s89A0Or7H~oc&MHF&!qFL1HAmMQEal73XY{P@&sVGrl^QHY zp$t3R{>dtXDxwjehoQs1e5a95gJ?Lh#ii4|PKslm72KuF+H*&^4sbDc(AaB|BK&;tf^zk`Fy%*pgi7zSaH z=VPiLoL`hW)!kTsv;Dq`qqz&kF)hxYO&YNe0TqQnDi))bOGIL+@qK;FUC*P&Ry_Sm z%xQ(fWYkGOtDK7h-jti8=?})!)5p5NTP`GnfF{-k8Cz48uYZlapU+X8;Z{jXPJFuoE7R%caP51)T6|8UeH^ZA+n3PS`SzjKVcg$o^!Np zZf@>^BRia+prA}kSpZdN-(b3qDf57@*!r?me4)&!Zd~BV(rr@=6sb2irJtOGU$_1QXafUJK0AF6u5XIq~LOL$T zS-qI&eRX!L^H#HmZfF8p4uipMMM0v^&s<9Ep49xbauP!l$ZlB}GW+yOxqNgyjvFji zlew|U^*w{xUN=L-llnt^Ta1((v{DihzfJrp#QKW{jn#ifPfNw)<@rCCTR$AGQQ9ur z@tlP3tlo|6G90pQk)!3@h#!n|vmb2HP>P4e@YoN`q;dM6dSw*KR(8EaF{%uwXOzA* zSdOo1uy7UOS|?SP+jnKmaA^;Vx6id84cSa8Y;vPQD&lEAn*lA4xI(pJV)Vla?f zm$-CM^`bh=S^R;fF?pl7P7H*6U^}2!B6f2)3Cmz!Eqjb+s^%bMZh0ach6iB=hL&Zc z$Lvi7WV_iB zsL*}lHT*2{<&?4dx^~51;+)S}7l4~qwPEU>$G~);*&|R!4jh66AnrQOsud*Ylw(xd z9> z6Pmf8>wQb9)LF4|r(0+#%3HeONF|XA1p{Mqms9yq>KD=Rv7h_Z#1LV)_ROHD66!7+}z)We0MIWuDq5*cHCfM!HTvH7 zr<@&lHMZN5yYtVRI`Z*}K)Dlh^+mwxvh{yY2$3DfA9_IlvpN9AR{9@AUJ;hw9K=wHi*@3zM0;p8xJhn|LWy*rCn}ur;43`;KG=oDLAcP@WN@gD-Mgn>dGZtoI zVgR2rJ+-#YS_^Z+)@RI98V|}X=j%SY&F`30Qp;Ull-|_uL4{Gm%@x65Dh3UV-=g20 z{l1$Y(o)}@U0#!cOrKT-Nqw4LJljevcZIGWIuQm%vO*v zMuBMoCD8_el5#*2=oE4M_)R;AsgEOX?4nV(63w}W@>jeu*1?LNXk1P%E|HHuAOSh< z6u{K2V%>>1xiz0yC2zOZqWBWui^g{~q^NT?e05ga1CZtMeROhuxwkLjXc_T67{mSL zBsb$1^qi7}h`5u~Nt*a%C@bM2+nW$5XK9y6H~@i6-KfwY0(jWlpQ!x8O-guG4XUNu z)K}bzGo^Glq!seMRq28KxMt=$NR^26BnRi3vqMwjx4+xns5IL?Q-Eq zJpmFkN<-Vva7%go-q@_qkdp+n9R;;~?AV`roIn-^f5Hi3QdBFAzDsyL2AY!t6X7(p zO&Ru6n!gT3;m#DxV%t{MR|wV@U2Ocqr&q|_lz2c$m9X2oH*)8qL1)j~EAIQlP#LGX zEO@FeN6V~=qwzbEU^P*YbLE%h#2wDBI#>F?Y(HrHV;>$x_HFt!df5Fw|S71sreMjJuZRuk@&@ntCEQjK`TFotY#@bQDG zv2N?)jDB&Q=j$b!0dF(MlfRGpSOb7367T4I(%O6*87*Q z_c?Y?50O$UG+I+~rb{rOaK4ae6Q$_?tP5Q%e!4k=4sCY{k*Gl+{_K$A{Ogk_sm~aj z%f3v>WxDLBSpDLsKA3Mi{h_jvry57>F7UqBqjX9%oQU#c7^#<;VQ8!4?n5d)RDR%{ zedHy!LSeW7Q*+#AG#*b|ui@*I_`{!l8H=!ZRgIKP$_EmmrE)bYR?g3M%NQ~RmE5yF zYh(ehrtAWt2ZmkV%`QJIqf_;qlf+`pU>F!}whLnd?ZFy{VEe26Rl5-2w7)pSC@PIe z?}|Fz&hx@Jp6&bJ_@3=Q8JsiQv8&dE+fdSRd10pnMM7A{)Rg@LJMSC%Gu`g;WfA^U zsf?Pzc$4}-?HI$?qZA!Q)(*-1yZ%b%7IuoW3Y8kcY|*XuCuDrl(K$<|vBXR21+Uj$ ztK|+Cev++LwB~JAaxN|yO3Ho;4lK(7<#;=<0@F40eAQo@Dka~Le{kQBm4#KhgJY`j zU+8&bfJVzv7%(OmD&+If_r+MpXyFILV>yZ)|#nA+qXJ;4z*>;1j3UIdQa+BI!k4oD}Mk#vHMP5ykt>|mszz~*oaz|D9 z{%nzSAO%--c|XOt|IOrQAmhhWl8kAn#gDPwq+kv{t)^7iMsvtaj0)Nq(rt~U0dT6K z`KydkM6o|IU7Ds~oNUIGo^LK+^m|GsnEZ~Gq(QvmtTJfFVv7n@>$;y3#B6V4^DXPj z+Ic2~RQyL~hOLmzz=t`KLr0dvV5|zjIqq}!{BUTTT&gf6uGy`44}AAKnB0WDk(QL~ zE5jt)gAt}z<*Ptn0hxYucO?}&9B;oTpdr=(I<;_H{w|}Y*BVG#N@d?br+FkMyEJ?| zUM#^jxb1?_Z@CC2;nq; zK%$hM7lCsZ^UGNp{G6b0ow$cIne;(PV+ zF-Yv)XmPKs!HQ{;x;`Zq!7Jqn+k=`WissZ{c z8HE$XW4|h6E9ClCch5U2u7#L`lgXZaes(joE(EW862b-jM;_wO%f}pg0xeP@J%;dL`OF=CW zIff)u1gDt+9xYw7?q^tJ$ z@^aRX#e4??;A1vR|MP<=Pr2NuvK|HS;$3Kw->=STR544|ghm-l4Ce4P4iPm}kg0KT z2uMF0T@`~CyobjOZsDO1ao8ofclPx-bHlzh`BOfm=KJKx^53_t;Gzl`JIPF!D%qf489Ln3z(g^U0v-rQ;Ub8fC{Zgmn8lkUPOld6~T$+zr~L= z1`x=!n=G$(zeHMZ?YYkGPrg(3q@#yrxM9*b@48;jP^@^UIVTkEf!^SE=cjuT3INQ_ zS$NYY;Dj7s)?il7#df2k8HfyXH7iYqh=eMYEMl;s_)_*6cTITZ=Vra?ZPCG$ld`vwfnW}QkU{r;C$^V5K1 zvucr)?4m6Tv|j2+x~90Hx9}{Iq(UYa14ys`7$x(?M7{*7t9afqroYhU1YSrWVZ5J& z3ol+di$_tNSZ}}1l2i>Lzd1p2g*oT-ym8uYl~w3LOP}8E&Mb_Oe zEvO%1-f_g>rEQmaCR8CGv2!FCndR_$)>+6r6`YfvN><`0w`B;(M2qUtuBJVkfk+s{ zG}Ux;egM&M8{206l$9wJWa_^vDarp1NBdOqQ+D>JGMV9D-K5azD-zu+B9mSm$UU;^ z9A^FZ$1jmO4G!oIza*2>{_gDvnH)PgzwcT)W19E87deT$-oNDScGj{g?a$gkI=(f{4(2k^$h2eNkWhPfZpmBxdECunX#7d9S)N-q-wA0hU&2DH>K(bKJWOuyuUP-c;A-Qw8X46-w89|jL1niiWO>SJ;+XXanqzC z72j$o-oDK9r?sd7W^`yQxl1%@#f62;OC-yITH+OvDSmM6c2Ba4Rsh8Iu!%8IIWHkZ z@#f5Gbz9>t&pp&DumY&$P!_VJ*?;P{tFeF~G)h%Y-~m}H+JSh-E#XkGrodn!_piZ( zMB`CSue~dZN=Wp0reFhdlsw%A&%&@xPF15lmzH(912H@|aqHF25`<{VbcE~~;`f3D zsCEf*-D1x`*4s}4U-jQo+^xoGY`8#x(`FFOY3pC?FYPn8e<=!`e+Lzw=zlM9DbW2% zQ7HRcw9u`D|C(q#P3YI72(;eSro!wi!n{cbcxHaXbUJ)Rl@&t!|^0903sr z)m9++yhZB!GF2^e3euOU?AfkUX+N<%^G=bZqzXR1(!?wz6&Mw*-& z&Wx)7*M7UMudY@OG_S}c74*!1;;#!;3u2-gS(pv1uNJN?ExIZ%pnw?q>2;&7(EP`Jr=rBfA*;E|9R|Lc_-r zl2ug`3Gg@-q#}Uw2ngt4olaV@M%kmSN~7}^C+BgP4S^n5Drvts81HH7ua=8dxoiQO z29FL&_WjGcIJwHJIFMYeDb%hK`Ck;h!C-F+Wr~Xamj6pN^<`tJ;a-FFDXT0*Y`pHO z@)SJ=&TgX4I|EeorUWK8sjzREMyFgVK<;T4M~gwkvJ`=L1Q7zC#XyDS%o_Hd;)ErU zwQwQSVRq(JItjZ%BQROHERZ7N6U9QAD)h{TR8rAtD!g{`4pW$v(isERXx=Q5JR%%r zv0_a~ntEWA1hBPv-)8Rg+c7D>M*?uN~-dF-=#cd3dHKs-Bxf7q(P|?(g zUT@ehHZbckCm_RM$|MazHLz|=p<&&6U5+?&H7-hJMns{jHv3DvMIJMJZZL@0c$Pzk z+cjppWSj9XkFstK#KSpAc;qB~!qSsBvm z;j5;`?8lidy&$&@rm?iDSF@7M`$G`rc+Em!6?^<_nQ(Imb*C{1F+8 zVf~Z2O6Tgafcvc?V1Fbr^Yk+)yz9QYUPI^Cx9UkP&aWlL++Aa~Lz}pm%ZU^uSL%9^)dfz_Dz&)CPIGJe)>z9(6C+l!Ga~U$fIhqQ4 zAvk4e#|jN(GA`%n)H-k<$z&!bUqL4kYe&YRN4PL~EC_;z1C}QV69i*1{MJ}mTCV)n|M4TdmT)$dGuH7l=0W8x8Zg>pV_qY@ufV#%*PD_SM&5O83&+( zQ>G?4ay{Lm1;tc}D^Rs=^XH{SFfWA&=JbZQ%6SHOvqvBsva>3uPWMJQAuUkNDnq)b z$21GRHkK}^#gI}4oB>enRvKW-;-_-it|MZ&m9_9&WGt$IS~NJrinfK$a-$MGu(smW zQ(2$UM*!jTj4<@YWF5hs{Q0KwVuytS8DZJajWc6yI~g@SX(AdRg=aoJzgi+Tz!0KP+4$nS3;`LiKV5JZj`H6}=ej=t4t4Ypu1VuNDPTJAl~@ zS#|%ZL4;)@$jsEsVS?gE?m!U2Zex8<1e8&&mTkDNpqijBCIu;M_v-P>d_KaJATNo? zKvsZIa|WQ?9OLx`zlx__QJZk%KVak+H)+0(fl@Y=rw7T13BOXDFRb`bRd0 zcqj1Cufc##A_j;AhZ(tu@<1kxCD|Em6NR&*S=Cty{g=gtyXfx*FJo9o z^(KB(qNez0l)H2DilQeK;lhRwx=~n9F#_(a*|lSUjSVaJCi-pF%*>d$98Fa2KO`zx z&;+h8sio$34#bFV>l!v1vI&l8X&Nc`t=5;uXbwjRPU*@+xGkj`y%qh|>&?T}HY^rQ zShd{612jeMbO0Elgn(r((_+ea8x1_JpDpbudIx`(YV%kaarcm|z--I6o};t&*3zbu zez1%z)^x6^HdR8awgf7abkW=Xc=G3$swO=^f`a?$pV^QSXWE9_xizvqmkNs(V?WEM zoZ0E=X`Kn`2L`#2dcl@;?dD9N2m7y4vvGfC!*WMZwfzZ#bS^fPi6hLq6OHTIA-B^9 zh*8|)6x~q)Cn#ZTX>F;%3zae#c+(oL^I*AwU*U`9b7vQK%8?z!# z5(9PyD5y;C(z@8b?M`oJa1#@f4eFtH8U&SSZZ_Zo`ZmpvOvWaj>b!1ft(WsDCVsG= zm5df$QIQYp)ab5 zm{%>?Bb=&j5;T{;ys%eWY>^90ca|w#q_($2pEMh8iG$&Mn;bWUa^ zU1DesKAPcBefB1hkd&gxFegYN+{MET(x*;h#|!a&LLPBxc~_sQWV74!gX!p6Q^%qK zmzH6%h@1lZG;$dT1mt2pU^!dg~KdXfuug<_WdZ5>l`L zh}vA54U9)L4MNqUMywL9DnVc35kD^w;@}iF`*7sp!P3W4xHQu*yr6do@%f0^LggdY z3nc+oiqf1LnzP%YSP|8$%hT6?I=QQ3ebZA{wqdT^g);92rmoMbD2>xeDM2(H&Lu7E z+>~_~nerOT{;*a24dz>lHGIL~vy9E#A3MT@BRWIkdlC_sAQcMT<|WQQOeVg;n4XdU zl#!}A5x~fJnaD``$@OOCYzEDhM2nAN3Af>XfAwbT^Mtf0o{AlPQ!Fq)8=ff<@^7g^f7?8Rk;k8m zZH5QJLNt&9rif~k!7|5&b(hl6*ikfIp|u$=^tXE!yPO(lldF^ zNwM67*)fbNb}1MZIf3;wG)cyra-D!EpB}3M&wB92oFSE2e{t?a;%G*35@bLCm{!pO zP!mxKCnzAWshV>@PEJ}&_kNPK!mHLn9*Ry+ILtelLFi@9A+~VCR;kqE^L#w!BN1{Y z$`$m1RCU4n6bobUK2!b0@yEjjO53-Cr2Ao}{VJ7mB#jz1-0nw+&AyQ!>NC_e5s2VNuqXpbf=TKMOjuAv+$eqzU=9(H(L#R4gT^i-qn}2U2l8IMT;%#Y)9klGn28tVLSuGF0{B^=z{D1W zMeE}2bx3^&cQPfBw?RG-vhK@rlc3i%LFU};$-#l8hZ>h+aIp#Pf>W3T!RWpL1Fwk| zk762^kk5Tn1>UW4I2iMusozNo+*K@!ODL&zh}3{{bXgX z&x+Gk;HVE;aY_?g${=A{s`_B0nvJ0nO0z@|(;dt;s8k|!Tw^{JE=o)*EBync^~!~& z0CU!wB7o70fAb0S05F0+a9KPK36*JJaTO32x~63>FW9R=>;=!UcOzWcbZ^n`7!H#n z6}`JVJ^#zaJKKGhx2z~bU zRyV3TThhQ&<4+pf_cYYXzKdZ^od$1Ci|HgX4ojy{lrn_T+)-Sk5Jcb0_JO56y1os4 ze>U@HoRj(R5LJ~14cT*iZt)3tC~%|;rQhlhl>BqLAFQ1yD73K3Zc9AA-|@Kmp+VhN zB3=SD7C9Kx@S(v&#D~`}_T8c>0QN%cUh-$l=bHrMU zk&oY1ti?tD*^n4qs*|w9P+d1K+(rHL_$U(%Y5OLLilwS>0j_5msM2jj30Lq>W7MA7 zcb12bRPRf~3R9DqO1z1qR%;k9v?+JGJ)X5lPh>Fm6A2O|UbT@lf5Jg^aPm~>_aRd2!*c>g#s&K6hIwU+#N%3V2{0JA}m;F_Ey@En43 zhVK<>LAhNrr4`MH8S(4UNpcm_Dr(R3hxC!TdD#R(DL`=fIzteD>sL%jZG7!AjS4n! zSpc;c6g_7~fmV$k>ag+;h`X&ac+w0`(L|{BeB2HyCgWCIX1v0kxuJ2DR#EIrQRHx2 z?dB}~EG#%G9+_x(Z)>>!qYrpWTj2h8sl?c$kDCGsq zcOMm44(QdHZCH7}HFIo-jX`lW1Wph_7|EDJ{h#*E@-M2WefKb=fYJ=j(A_OHz|hj2 z(ozBf0@6d5GPHC^cPZW7r6|%RA>GZ{p7S}s=lLJb;j>@t7fh|$d);f@*Y&+d{}^C# zzjRXL7OQ84YUTtWC%-NaV%L$#4OOo-*aCfvr({T!HM*lu*-56 zUok^mD8dgb505Nc#7Seh?Iw@YNc#SILwz-^U$#X;gGOmNtMyW*O8h1dhd>!1_OLvi zS7MdZ4A?QWMGCg8U__1Brdk!M0;2ohO9}C4iCHNsl+Nqqv5-_2k*wdZ?nmhxA?duv zsWxlh+waY0+In4Q9rXhXE#2TD49i(fi5{xFe7-x&oHfl7A#p>*;!!4HS|8gI@6_7( zt~<=67BKl2H;)5ih0Npq4g8w1fho4_3#>#Di}JReF*);rY-Dn>K>qXO0yeT+{U3n- zAp)_daw7-&)gYQXY2(YgqpTG2tnNIIfd(N^S}h3>%&*P7%l%b0!O%}=CLcPzo@np+ z1Jhz>9a+k^6BY4T{YC#68?-y1u@Rl-Q6YcD#3R9Hyo!Ioi~4S|dhqGM0=us2XBpL~ z{_*NE)%Bmz$<;p(2IWY8R$XoepZ`&Eyugy4U7A@K&Tz+ zR|@(CUY=J7v3>wUwh&WevcK^Bdzp3IDOCh{>M&m!*8PGyG^3Tq|QE@465oEV{vpA=4)cTQ=@QYq6`MR=+c&8FZ6_bDH%KkGS6}o{UByA2@R-q({K7=RO znjiF7>=ZP9T)-W^6>F^lTHo(5ul!7-iu>4BS!@M>$l#Go+U#JH-T*+`phf-!<%vg} zt?f5j^kdrxVL-9p599b*PJ!&RbLHI$<}j(lsWNFrS4rVgXbidP9xX?h6h3+V!MR=y zSw7xGlMjp}K(MOll^E2DCkV;+Tu=mvFaowhda0jF9eL(Y@DPi|U)+VA%PkifJd{@0 z4TvEe>YXh3g+>{37I_<}F}y}m zha*=7Jv?6FF~lH23F|^GI;3Y=lE=D_86oKFeC>M7V$kGAZ_*rq%~LKOmyTRY36U9G z@Asd&s-g80utqxNzDLGAS{lG45;nisBR=lmQ3s^UTa91k5F>?L6Umz9+DW#f(=Ia-72fZZQYH&sT}iT%a?ew5jNK3v{xhaDVSaI(*iq z5Oln9U}-xL^b6Eh`e!2^!+_D!EHv1EY4$Pt{7)+q++yBp=VuNGczL!X+PGh#<9c|a z@_uI4`k||vo`>QCYRmh>NG@c_slr*~nYepQBQ`vBq{6VX?QVfh3e0CQpLk4E>P*m=^1rRM!zB8r-HTlvXi$p!jKg)`{eU!Q{tUXhAXG&OKoCo=1;P&FK1cE7S=-9{)m)kJG>NG)CzH>oOOXV76mZk=co+z=8r|g(< zvGN@eEmH&=A=-2{-7Q}oAq?VJGBWh5avCfnWE?in2i@~ZOdtuvH5xixQ(pThm6DnQ z5q>*!8Yf@Nj!(de=2OSj=G}pREgsjCT@Tj@*ot7F4j^3)-G zC-7OtXOCZBUzv)z{;(la$1LT!2oOjyUNf5WA{NPzHGUyv88-68=VGs$b@S(1!6)d# zx3|gIR3fg>=GZwS|aYEduRMFEX>M>x3VJMNEnZv9F)Z%qOS?whkLlSa8>Re(l-NW;yo9>v^ z1RCyszwwi1HIaJ|KqkFBRE9p&N>JlQ1Z{JhZ>3)x@BUn&eP_jbeXt$X#HT)iu2if{ z%q}hKK@T|$5mH1wUzZEwAfmc($R~`VrKDMG{**^?)b04ys0RQ$QQGg$;R%v*7R$k5 z2q_n5%okX}D861Y5sj_NFVNQgAcW#{^0Cktm|9GTz#JV8$eJao5r3#c=x9gi`a7+j zZJ2n;80*Sk78C+bY7w+<InBO)pU5|BUBRH_o5J_=qeX7OWTevfJv`0#!x zORP~#x@GV;KY^;l`#=a{*Cz|;ZY5;khNUpbSgG{Y15{)furIc!R`8i{*y79YhhUAD z6ZB}-Lxz)hE2X3opV>MOpn{xA%|S$xN%xo@JJ&*ZiARsJ3J7j<4LW3&({5kVx(G!A zZ15QNO6-H)pA!i$a_6;ZZqLs$U zk3ra~wd7`@97vTi_{0~FjV5@yC8%Szd5Q-pH#2Y!8pS+3!jJ6pCB%?qMKwPJNsnvB z@-kYk&jGqdxtUdo))yh2>RF^cK?me5gmG}b3PJ`>z)EOIua~>frxFID0vmAKZhRVKAcy!0|dgviKuu==l68qM>F&H=YrjK8{ z_#UntF{nYL^c)+^#_=$%P9?=L-W3uYJY5Ny^Y7xpFCH0kTN@^fRd4ek77rjn-3UxW z?^Me`+2F~vT7mcG=IXC`zr9Uj;i4BRslW^iM_0KD2A*7r5en#_g}SJfGMSR^#q~P6 zAUXl!5nyC^Qy!g+Gcsuuax#u}Wzb0I@eoS`1kbJ%nxk0)m41@s(?n!bz`iT)ffy{4 zNYJZ^um`9R%IIUvoJ2SyVX0&#ZRO<|v9qf2EY3GeJX|Aptf4{KV10MVbB~XrzU!%U zLB3aqtk3D$iIY~9=f$^aD(FosEOloPt zo8+y3WLT@$QF>pi+|%T`@IJtM!W>nB18%kStu zYcnzhvb3Z*lD=%r`#&7N2e_1{8dT_OD zha{WL4e!<84zP(J_>rR+hsW2uAH+x}67pZ6%1*^>Uz35Ve^%jAr}1?ay)P&HKJi)` z1ZVd4FJW>|R8T?H|MMH7?aSFI<-T>(bbzn%Tv(aC7PhXy-IPg*0SAg7c*spvoh~&> zZs?$Q_$8_xpBUi8-co*iymJ{2D=`dh~wfF!I^*K&D zuEgmHz}347>|f;-8V<@NWuxvBb8>#yY?+i&0IuGG!;|;X%OCQ97E(e~WLd74z4zs{ zq*`!P1&?_AeE~~u364Mrm)W91cHqf&xTN->NTr`D2eYaur#BeRU>5g&BBGLJCnC>y&v4 zSahhxaWNCHP5M9s7)YIJ8udU`JP>d|a#*Xf&!~p)>f2s@%F(HZ*oXIn1IroVB|sf+2xi1js{5 zAc)TA-5SpTgG=v|H#f#&p$@V@qo(vNEUQK;jhBd_vL>-QaNgk~ExFbAm1a-!Dzlnb z%EXbBXh}W2GOIgq(_92^-u}?rg&n(JJo>-{k%XR!A#bO&{vL*gCm?V1D zYf0w6JokCvD9i@)+?PX)^u`ly2#Gh6_DZYqj1;Ki^%XiS+EwCvyr^n>A?rkFXjpfO zo`HKbtehpLNisrYtuzZQ-F5IJbm3}K>Cs4vW1?gvsl=hiplSM{^`InlEqwS$ zw6{^>FNcKtjAu%0?QJpL!@YM1n3jK-_%OOi2p=d6JJC{=)6h*n7tBjsC<8>m1eyIh zhvjJ`rn@`J1(`YN&KfIvh?#dGdBQc&Ce&{3%s;OIgeB|G0780lK&?@4rUWa-BBhNs zI{xH3iDLM~Vf<|@&!i^-@8_DjHVjQGskDjPV|PYQ?Ql4u8W?Q8?*((862d@+Y&%K= z(Kwu2yZ-TuNdY%oV9t3_TzZ7VzhJmBDx>Y)Utb1mtB|vI`0%2zg2%E&A^MN`uH#@O z+^vMi@{XzT1qmzk4_#fMs)Tt|6shea*0z&8_QkW#cA8Kwuz;cZ=N${3I%}n&3})O= zGz`eLjA!ZbPR31J>(f0 zwe(ik7mTabWmBmp*F6?-|5!Z?rV4>qUyFTWdF;ODqlgE1(_A_kUukvc8DVcX3WA=K z3usi$Bxu2z3ILUS&g2b%vFGYnn?c!qGAU8uKm!!nkiF#dL?SjSEAja?zL$vHB*=75I+|?OutR38vH1Q^4x~yB>}z*l z=9U*b{3(=rX;7L4?Nkerm`5Wc8BDCd&JErGX@oEU{}$OODdbD5HtC3zh=!NxviJTt zt9^8zcP9>tJ*1BE*@F|F1KuWSJ8zNerMB#ywnCKiAYP|hN)%e|8;teT7XMM)4R%am z-z#JySucE8;1#9(?yktwZF4LPHQ{S5Ls#;szKwCS+qNur^sgMlOqp&7Mgre*=K;pk zcv|qn>l*hBEN|DNlqDi8hI;)qON|ao8-qqa>MC3Lv^{hh6>@$rPN_v1Bn`W5!j z>n0do?OEVX|PF@s><6Kqm7v3pggq!w!u|U}ad}nXQf$FKR=$`HGag2;jJ3zFMyK4sHrI91K-#b z!Mq1(kC7gY63xqg{qXTWXv=2B`{95=5rw&I+|2yHT!I?)P zlO>>KAT2faZK~#-)v-jgg4(T#rl`ir1rtIn(DLbuO}9MHkY+$vzOcJTPGw4KEy1a$ z6-HKS?JC!Zk}e`C`yzz#=@Ge{WHOjSpNpaVsQ%5|zU%!I^=6p2W$B$C7a2ZKGf~>k zCd5WKwIW*zW=^+iqbmze$#R$-(TKUwH|pA4e1Yo?K$q)Bh=GufJB@aq_giizT|$_M z46_WOw@K`GYY{X6Dxm|+Xx?E^fF@r1UaGC^EL*J{q9e}%Obyp~M+cysWjr&h7mP2jIfa??H<4!LsXESlED$>GS zy;|@(&7Rd|@rN*Lei@a9m#XNsi;LCwxCqp9Y<1>(O}ZwwPp@a5OC@@o)*_ zpFP23iPQaS-%fj#g{-%7kYW?Mxp4J@?RwW?WyQiKK=#5}LJ|zup6Ddd=~oT%5Z%yn z)bBFA%JlY(cel4uf~M+5jm}w1$K%QDm)R8xRSgN9+|rJJU%jc zcqC+O&fcwG$t%&v!llJ_5dFn{#?cihpT70H~qWOuiob zPyb39-DS;nn)g5)1Rcp$| zwgt{dQe?nG(j&$Hh>o)!Bz(K&y6MVs6)X`-Cmc$PBH2gWbB5r5<7v?1Bx>yu6Z*FN zDO@{3*eX&<3iFu{t*nq5<&$)UX=vr>Slc9XUQa}Bg#{&u`{2>)w?>OpHBsWO;TdvZe>LDXY$ zJY_-0K_`QXh7m=~t{)wz5K9DfjEDoD_*KLME=^$aa?$P`V6@v`jH9GKs9{}o!TKJ>m z6S<(WyRnjK2!pU%=K6&cQlP$4GQU^$fCA&ZkNC%}o=9G{{75F%be0QWK&?~0U>*|( zLWwkM%%uy3KHfzz*joajY$s)MT70S(Yp7yo%>^B~yDddNAGc;(q=0#?AU)120I=z| zST}nenYH;e_(^*`Gkt4+(As`l_`RK2b-f>$K)HF8qe;9I$3pbzgt~L8UMIskaeY6k z_V`@H5+^Ge#Bp|18`R36F{yswbqE=^>1VM@|1FgK42%RTR8DrH)6SC#>l=!v?+?QU zuM5fzI`LAQV06kZtdp-I5+doC2chP_lR9%f=If@4_0@N#I;f@`aD5M;F|y8`~{Y`<#k`H48!pD(TKVo59?+$MO(GxXxcv*_E|kyaSBA_M#k z!!xi15HIpZG1U8ZCm>^3$q&a(woVQ!ER9-Lm*9l%o#1Me0mANz(d^yx*$s4U9i1c- zvdGw2gfl&Z@Z;b)AIgDS9V>{Mnp%UT`c3ryF>slqRB{<)*iL3wTKfZTAzJ|NN_N|^ zzXKf6!EutenEwy8 zFry;o3k4OP-*#Ic^`;{6rnE*fMe8@MAz&&jL5C4GxHrYGKoQ<(YubF~yfzZfH29 z`-}GbXL{suy!SgJ0Q*tuE!Qcvth1XfQk9M?|1vys4WTz@1d?QG2%ucfVyHtaPaCu$ z=Ck1!#{rYnQ%y?RLtzE6+gv80qV-Sx;2gQ(A`PDNJ@RxrVY#PY+3k`+(kK|vM?cas z)rkRnzC$cAuOs+y=RJ;bqtmAtbxHzC)b1VXy!1Nj6PPeA4U2XM<)#eb4PZSCVODD~ znJtVMOXH)@k$sulG@hMpNGp~-85E7C#WjRT<^_qeOl3UlkA5z+MnJ)pc>a7ykcgCs zMb!~#UWYH@(Uw?8h7S$70-oRgA&NCu@CKh$;3W}rE|_s(vnPJ;2F!9O@b)}GC0^cc z*0N5?N()#lXPgmC!89ggxSPolX$8x$ygC+f@LOv&_4iAifi%<-J`B$qcluYZw{l)> zI({#Khn9&72r6;Q0Z}{TY$owqw$kbb?50u;njUyq8497E4M$sBXpG89K|t!PB3e>h zoO(}wR0rV)a@AAkMZf@HVkDVMu&_+ZY|KLS@$YO)XTXC`rQzF@xTJ~(QJJ0}Z<~X< ztC)wy$bl9;O2cu&2%T}mt0Ue5Nd5bKOmQN%gluge?W@BjxR6goE>E}JTL^LMc@{}j zY)+mB4b#tFKud9S_!Oxm3UJ23Ky{(CVK8_Rg{1mhsKS_CSOFdy$-w-H0$WP;mU`K4 z3dyQ#JCX1NP#3z08Q#D8j5@wfOrRoKpD(HL#nY5UvyFufe{igA!--7Xe?1N>il<==6V4n*GyjoA8|x1=&@L=X zNQ8Ke<{ORMWVt9(A|pOAlSaJRq&ng&7$p^o+Gr}4UEh7ShyuDPYOfQi)ZpO1kY zy3P@cL<=~~UMw^^*NT$H$H%*#^cXg4Zp}Vk8-9yNt|VA|H;SO8w}#osWVhW+5O^Jq7XmORd5>6$YA(Vk$G7C0oD_ z)UwUVJ|(BgbAg8TDx~4F9f3TpzphEk44vBL{#mA|yE%{;4y_8FW1^G#rcYPi255Z9 zaw)ETu;>qZY$EGJ1rlk1ckpsRT=(*`p0Nq9kchvn^i_}t>8AvPJX4dUV}6OIH7}z= zul}Pxxz%u{Hx259PQo~>(u1y8T&9?j#X%-mJ7P_J9j=<~wMS;xR8IV*2o99(MFR%bLvgZ* z4?JTy)nwFI7jOsxn~~D2*1e2ntrh`qU(s}5Umy3tP-?mXFMftYZh`Wb%Gw6q^2!Hu zS%wr)Er~RqjW=ejINKOf?-0a&`^Qt@QnNOD2>%uDRd(-vBlTqSc6R;ba;L;Z=EaI* zbw8)sr$AYqJsFO;&(U9ttXAcC57J%cWyU&^OmrPyEhr#c650Nqf8!aPk8*d=k9#Y5 z!+L4YAWqE!%@x~0>5p_?r0Kq$n=Ww_SS(~VAbLkm!r3wVK|nam4Zil;SZLwURBXSm z_RF!8-JXA>XDJ>OAsKk0T=-(S)nivH=dF@$yI0vH^-jP*R<3c$h%lj@qoce2!NEe~ z6~&kJj3c$D#`o{vA2#>c@UroON+)b>(>Gktb-V$>{$m<7%TBp<98xMtN2rK2aK)P| ze_TCn27FZoI)hN*Sv&RA!4>GF^0_b9xB=ih&TriH!B-SRj>586D8rgn4dRv%uv4ib z|Cn|=t*6I3fD)nmiOi80^N)^(hFzFOOH6dMDJ7{cA66{}7QPDv_fCAJwFc?SOHdx% z@z-hd;3)_T8^4cjD69stF&L{IG>J!RKo|d6qg&{D6nrIe1OnlB_<5XkafkCxoFQl+ z=rD^|ZS%-y&%qOo7W2>yY^-UPU`9G~OZQp(v1^)!MCJ^H*-=oIz4N#5@b)>M!wPJ| z+Lsttuep@2?S4iCoD_WwYy<~fRK&1_xqf8lu6EVT>j)@YLKkk%aL#rV9&4MvlSST~ zNgXkhMz&Pc2mg*UdPjCn9LaD!)^irX?Gv+4suyf-bk#VdcrQy}jlV z98md~D2`kes4x(@$$mYORji(KnHz61Is&zqX3MmXFX3szp9i$?>Z2`@OYps$>(_!K zHgvb7l4GfC9G(&a6A+Hx$Tb67H>9i6r8F7~2<{$Me6w>hxP=ZZLG0wSg?~lx+Q5a-?O_2+b{l;s-N(#|Az$m0+%e_N>;N8P>&4 zLE*+w!2r(cB4H%XO9PJ3UxHbmB`k9LHU>YDKkw!?nw+2GG~T}lZ{s1Iw)u)@O1Pu1 z(1eU$r!M0z-UNpE=Zc96n~l~vZolw7n+!bN7~$3Ryb%2Pi?ear`KOsz`wz<-*wSVx zwM~XUEc{i2AQNSczr=BsxfBbl$C`LM_chLd*$z7W6HZSu-;Phk3R<^4}n zXCk{Bn=JXh+!*-d{$enw`pU*qU^dHnpH#k{HHb!c|9=5tC3I z^!pHZJRT8#_39|Iaiq7~wEd`*#|(QR0&7p_rklklG~W->qgwf(ub?zOH4R-0e=GBj zyHOU^d4mT_ejoKpOTrS{G{f|?Ow0u%1th6ziBgu^zjMipNr-hlhG4vXT8@iP1F5hg zu3Cw+U#Rjhr171;I%5Sh8u+#$22+QyCl0<3PabSb8Ser(WU1%Ie#XJVnM=LBzIr3s z&n2W!H>)7-7eiDB=1>=OEM9B1iOqSq)G?L8a$W){vJd`Z;M2o^({owV&lr!N#13mP zsJ*-DWvmx~yA`*-r0dCxkTuFIu(^d*oR=Ey=d9#OA7N^%lj5X1*+4wKLijn^Zkf2| zVU*dVXSgbSV{d0bw_ws_lZw%@7JYe^EJyf-Y(sI`Z}%ig`8y3hF1Y(4<&#He!W zvd>>m-1NIK{z>o5j0%Buc3c}jEMoj2A)r*vQ^VPJZmb-c43@s{dLE34%HxJ&YSGNN zZp&Xi#`8>(am4fA@vK=%42qKfsL5J_7GfZo9=%JOJXH7sUUJ@M_Z{m+s>L*ic-_T{ zJpYMLJ%-?xe>0gWPsQWpE8IBxpa?aH=65-*F;_QmQH9XF-hZf6ucoH zcz>^r68^?tfm6tZ*cJqf`|ac&EY)K#L|n4g2~C~lb-G>8h725x|9o6kAs?Z|<23H* zN#SS*Ka}m*d<=jp=6ZpyaF!MGXlMB`9==N9Y+u)n>cBJQzPwA>?P*)CZaa;hE=)9L z!VY=C_nh>u1IU4TCHeATQrY91@bq5{2Oe#=1jha_Vs22`g{pWi{NOm?<%rL!5cUp|Atdc(NV~Y!1UF!bs zvsjKqQbf@(7Cwc}__eZm9x#9>epFQ7KND>SuVo){2bI2d1OeM)!a_?4#m7A z{Jne_chTYq9uIGD;d2}kL!CbJ1Wbk~D<79vnF7z&QBa@x*@{Qz=gSl%avUQi;dE(- zSCVJO#E-*Qz#G3!TEsNek0tytO5B2|+6BB7r9&sTX0YSv;TvU{Cgnj>?;ghHCmujJ zt&`+&5aZrAksDKXs?1MTIXTcj=#jW4wmmY3v1hJWD-x|f$qzo`-0>4`;o}*BtxMuA z9jbEQo~5XXXIRGha+|m5aH$8Cmf9t~dj|UW*NJ4pbxPv1BMeAYu-_oR$z$7R?`pNf{bJ;GQe>c)ii#_b1!;u(qzQ?p$Zt7#j^0OY?qSm#yP*I=tWU%hO!M z8HYp`=}xjC)wj0`Bok^lXL|er8o&oG0tvhj5IE<`6m(2k0t8-pcw#pE!&)Gn_oeV6J*p13_3Xu&Kc#yeeap$CPT|7lh<#4Vd4d`?c4GsVH#sONf-EgFPh?HVMQR!y9|G$TN2ZxqA^MiHR;A1`R^YQo!T z=JOtC?!4U$G0{S*Q={H%?+=a!TAqRZCR3}Xp4Pdm-`1>DsHo2cS4(lvPXf`pL5M^b z01^6jd+W2p|6t6@r_U2D+32zTS<3Iah8Ifuu~uF3rtj+h)z8ITl7X3PWUU0Zk4#sa ziB5TuU06e@?Rn~*WbzrV#5rDFMG|fc zmd~5xH&!L1z)$%&0p6GnSa2}?>p^zoXXFsX;pCW z_0H*zXD&}22QI&?I*mKO9o^MX0m}jvy&;6JGw)G`im#04*c{%9$lb(-lP|>N_sr%5 z;E)-7ea_OwbBO%<^M)l(KmH|u$M(M9YabE!q3;pWKJ)5TSNapCKqsso$f)@pOUA$0 z<&Mj8pLw>OzRX4`c9@n@QCmCxQHb36-cS!8?tz!PDsz$o0t-I|6e>z;SS->E-~t3* z7c_Ox7ax-d8Ow33NypguZ=mo^V|`iKNZgG()zcl>69wROFwBWG*TYeRmPA57V=C%k z&f1k$(joLf^cA*?sNR{e^<}?LgwRfSM%V|u@3CL&!JEJmc$B0Ic~rqD62!m75D&q;-|e zN00jNar!^!P?3M%$e>7R^8ft%e@^g3Km^1fVFm}pe~s_|eU8w7t9#ejL?hP!e7yg8 h=>LDo|7QmF9=!HJB2~Lhs~QRTQ<77IRmqqK|1X-7JTL$N literal 0 HcmV?d00001 diff --git a/asserts/bench2drivezoo.png b/asserts/bench2drivezoo.png new file mode 100644 index 0000000000000000000000000000000000000000..f8b1b2a9575f2671489a01ea55822ba34fa426eb GIT binary patch literal 4921531 zcmWifc_0(+AIFuXQdX(RwWLysg>u`HN-3m5xmHPTa?Cj!DJsW`kQ{S_a?O$Z7&+!1 zIp!V>!U`cAHB&X z(W%xx8%+R>y9#?gJ9eoOB@}e_hVT9KdBy|0=jz3y%MSK}{@s?`Tu5*O{+!<8=bNlkY0^3C?DC_o{k=&>gXl%hn3$_uhu0X^uEf^7gQ z#W*gTyH(6xxxE8QVd8+dI>)W)5c}C3^RI598j!6s;7xDKJ1RcwvWNulCs?{-Xo_rS z-PjxGa>N=u_XxO4c#|Iub`A=?P56_$-dn3U>oDppGZ`QFOoSIzqu{(aT)l5|mG*E_ zE7+Sg^u9;_&*t;G2=hV3YH$w_kb?OR)y=h~_64#UE%9v&?uZnYVu+;!&ABu2{jsS8 zu?L1S;+6hI5(ldAXj1BIoS?@JbKi+Lra6!jCf6H3&QizB9%TfHTM1g>DKZ`u2q!?u7N3}KuR6F@o>A=+d+;V9>PWN_ zAfU@GUfViIJZwI!SkMo(c@D5Sv*}R# zXFSDPDF(U#rGr9BLqEnJ6upIzyYFGoVpS(E4}J=PCjA zJ-8inP5wOa*m_x{+G$B%JkPEsX>B~r4!MQ~`1)rcORDX9o$W`0r+4N#d3dPVVjH@i zB|ckpI4-z^EmJT$gXF-yiO5fUQ?o_sKoQRmU4s3KS`$1A%$A{x zhl{5S*^T)kTd@S=@ylqi^ZreoZwCTbKg+>L8f+IM3NO|JcPr*yb8_{e9_7)_~u z5?SuE=ICd1d9Y7s1kq^R9NZb79N1f0?jS;eb68scv@7CR3iNoig84e;ash7AdUot+Ds4o>>F-B~ONb!5Cl^a&XgJ0o86~=6 zSMH8QmI{Q(&&HIShJ+DCW=dpq#X66dlF2beDWAYFO1TLci0GR%|&u^gdte<>71yk?kW}ufzBp*4OTN-xJ zzZAjQ2fpU?*c@>(sUI|xiYXSTVKPwc6XV8ml&|j1b)b0~+5lmk?r#4owv{HlQbrXq z#b!FMle6cNIZfUuKg^NUH1ej_XA+cC#E}106_=X)yVxIco}X((tygOJH+f~Jmme4L zcqxh7(dkoMwfXOxN~y@O3THprx+wPMgrJo6oyNDlfi7^GK9+vJWPOtpR`j9nx|ZVX zpWKz|Z}pSdldrWLzxwfjbolRCgh6simg6w-;APmG5v*zNE zxx+Gy+Qoa#TGijBQx@s;=-xzpa1Ru-2D)59RP#pVMD}u&&G$)JbKUHVxQxVgrd<*L zqM&KT#?j6zt46&PGV#*a48I$466M2F0_vQ=v0c?z`8pNV#8)4euPb|LIeLuHi6jZ9 z62&8i8x8!CI+Ip>>_p1wyKB+83pcKBV)teO-5op>%@fLHGLp2F@+=umh~cN`0@sZc zM_B^bn|SWlPVDQm<(D||i;NH&&G$JlH_dK`5U-03?fJbYllwG%b2a9?2 z@nh+XI%d7V5=dxy)hFqrw0FCtDO2h_02kMBKxbh}+e)9y+5+;^1U%yEkAIkhg@-hG zRe2tzc|u}~7W;kwb{@F`lD_-!xB~vSTIvDL%MgbHLC+tJE-}b%(Wu26v>!1{9K?10 z11f0Tb;}TOj{#jBIxj;SKjXKztEkyW&~yShw3s=NKO}7ne*tXd{Ze)%M6K)8=B5l)bRKkhMPw4&rzb_oP7)sdG(S%2U*hfnPE92pPuRKRVQG>(~%2}GrK*F%=V>3h_1LwZ3R z$Hf__=-nW1|3A&iv1-Vnf~n|ct)pSb1+pvfGgC`iAJ*pan_uxPnCdwqBbF{1T(dar zD88h~mu*s4ZA{yc&I14}TMriz1NmO7$oGHAygvu!dTwRQ@Tb18YpKLioXYRUB03R4 z{dN4M*Ve~I^DfB7vvLjg7lCPT19E>dTM_Z`K|~kI z%rYs~p8wevmwuY83)E=`BP2@Vu^p9*@;l#j-$xA69pd7)0mz#T zD|>Q!6)qI6(Lr)yhE5jiXyNt!G}{I1Y?>FU&(?)6tK>cP6LI11ZyoX<@~>lwhPZz&^0=0ohoPGrQi$BQ9ca4P#R@0#YS=WyPDE-U_1T#bhv(_ zz<7Yn{SEdsf0=)k?B8B9MvTbk__Y=89&#)m^6lgYs|qfhVt3BT*(WLnu3qhoW3RGf zpl6W_H7y}$OCiDha<=pH>8os^zcp*)rCNSm8N?JfNa9o2U47(M41&PdGp1qLLY~{f z!o28>ZKEacM6^sb#T2DzGSG;?f{^Xu3*QTuK@G zE5lk|m)!)xKzGJZYr0&pEpi?0WH!LGNvWNW0y6+%nCr+BVX zxHYbdSXviK;M_#wzu~P!^_XEOr@|Pp>G8$Gv9(_8ar~fu__z;@`!W3z{sMv|aygs+ zVSF@2^l;M;fYC13dn2w2^B_#=%;^^@eAOQ0=vas+LO)g~3$J2604cfGi)Up!paBc= zj~Sb7>BxFO}Ka?g@_LRQ9zKwGnfQYz&gLx2#c28FA4|cF%j0vS){n}74rh_z>RRU83Mbe4*B2}3Z zq{EZ8%p?MWD-&kRXLw)6u0r3*jfEP$3P@#uN>ApR`(oN2zq6(RK@Ug3xMoM6qE!SSE?|Rc~I03O}Pr0`KL!5Q~j|dxstb0P#G&l-PBb&XbWN zX!g_&whHu)Jov&6DM$8RdOr%7H{`!zSFjIsn#90+b(f8dN-CLJ8AxiZ1=Z*eoEm$a zH4(S;2j4Y1z}6)NeYT~drLNnGCTj91k@_qZMTm9^dS=NmTdYkd8F>+N^85v3v*>_~H zkt1tE0F3iWc?q&jJ@VVhuyK;;ZkI+f@7_e(PPxPHd*%zxVMUJWpaaKEVwtdyY&#q5xlrsX*eV zFFr5aVFKcQ1);3>7?X1P?x@x83XAHkH;vplY!66ZRtY5M)>jFV8WNqzNmMMZ?Bd?M zfs-V-G;hv^DgeJHTw~tP2}`BOL(Lo(p!<~`opK*=Lxy;Cy zMU#VnZUFMPEp^AE0Z4N331ysG_}BU)Fyv5sHuzFNZthK|#c8fDJ1ugVzV1AVt>-d& z@hLSJn|*~EG}4g|V{y;bZTV6=%x;DFBvEmy-?nitQTMt*Wl{gnv-@NKZnDeqjvKp|NM?&8 z(}EaL(d*(@9KofY<#MLXrmbm-8pVL^UO6=X{Tn$YC+nAVx7isjN7gB@cQ4Kdk`=TY zyN2@dq-{o+Pi&>MOxz2Y(>Z}C@nTI){L_G*6LX&uE%Wz({-9IzcU8JZtP@orwl!9;&|t@mSiWbDf?ApC z@`-ZszIvfM527^SuMP5$1-;`x5>AJH>dYGWQ5ysuYtp8_QZ)K_ z6g9Oicp+{Ulx{Y(S;of*;gt!2k@QEY@N<*S|D?A*+xXy-94&p18kd&QS~Fr~UmO## zDP!R3?82^|Pux0^Ec26Ae6|)gaJvDtdN0t0e?5y4hl24A_ix7GR~gT^vt00>)Z#UZ z0zyJTS{wN)D`0OmQ;#rf%!(D9MH)}y9{_5ZdrDnbgFe8$4wMAEHCGWhFy$MzncuxK z6PfHgKBijbH5o$>e%gn+4xK?>SZj*j7TOj5X1O)-v?WzZ92Vpi>ci5rb&T8wc6ociof8%@Smoq}cc+6?lX)f`BAt zk-B?LHRu;8_GTV*JC&z>W55Kmkl7wi{`hkIt>>U>$J9qU=jql)+!jdi!5tZpmWvtIwaYrLb+2ZFKFhgD7fjrZ zTxe8?7`T7MnD`gaoG12y-&fb0kVNjjjpx%BbkX@W^6F7LJz82nfuY5c;|^#4EqaiT zlT##1FfMuW1i6Vet~Fi%aka;jSBEzx?;)qSw|3OcJR3nUvX~R%$(m`AqvoZ64ru;slRvVYvS#toct3FskK5imU^;)=^1R{x>x=>3{P>YyNu$EfGQrdZRm zC-PPOHnH5Qlm8iBOZc}p;w2+Lb>&^%|FXy>%la}{qBLf|sNTbdD=(Q)#lvIi*NAgX zsHOX_8!YEk&LH=`72o@kVvJn78o6>}S~Tdnx3J1smznLaQ6aLo0s zCgr}qR3iTyaWBtgcD=}{Q>A~)K4?6-kTZveSeK)2#x=wZUN@&bqNYI;wSG#(Jk$Ds zQrJ85^Qu>vU($&jL%6U3VjB(pn*Fh~Twe3}wR;THiA2gfF3E zbZ))OUjRmc>t6 zpOgy+?7pl3f|oM1zF|DHTcK`z|0`FBclep0x#xXS-J#2@qVl3*)Pfhwh41y2pRY3x zA-NbZpfz7MD>fDUGXzWQUim_v->~5D5OJ_P)8@J%w#O{kQ!i5I_iuOh7MKQom@D#y zr5|N}rp^M{skUBw>SrtNm%va)ykF4oy;8J$k6uiC?fcxnkvR*kNkNyWNAzm2U&&Y^C^Z-WP4| zTnR@{!aZ~trVT;}v%{wff&by9$Me7#j8j@v*mDv1RJF)$h+2UP+{)k+Bj~|Xzr14X zqYv>Sa*t>1ynf0d|K?XhB76pGoMO+t890A@>k!;r(1~C1rZ5;SA?&7mz ze$ZiI8`S$`q$Nf*vVxK>N=Q$_E^(cBX24UX*;5O+Rr63h5O#f73s>COvr+%l7Kgbe zQRLA1LhwUjy{kzcz0vh6=AJ%Hzvul}F7Ga}Hp?!X;nQ}TdgZUt#d8r=jW1oj!=j1r zzTKFz!0weC3z zMaG(Pbb4klXPbv5+#tc_)tt;N@$}T~vESAe5}3-8g{0}M>B4%NrB#yT-z94){Vuy7 z<{}hBSZRtMwxC36ji%M>Zad$CZ_8pQ(F$1GZ@|H%d^c>KM!&lQ0rav359|z>)Z&@m z^zA^(oPLbdJ{Rim@|8Fyg82d>Aj=drbMDiim7ku`VNzN#`UqV zkC~2Qo}HW-ZC{Jz@J9&D)-!?m$Y!&pMyR6>;Du-1?5$>0cJSpp zj6ibS`Nzqe)9?{odeGBl&5+8cCBBiJpFz~g{V^kIc!890Bt@+6k4de~UG@zfwOheu z{6ueRo#b6fSY_bKzSG#9fkv=-hF&R z6@Fh3dph)spfGm)JFBn;Yos=yY>%n-dxD&awquO7g4m+0=RBv4jOnv;_X#G}8^7)# zoG+($17!=Lzr5y(!m=S<$Kh*bB%$9X)wXre z#bk~1Ih4{LS9sM;sD&UYiV0r>lT-9^$77CC2FscC+^Ud!^7b+(B3!K0ENEk&7-b7KA}1EHZSEw4%%>6r=~dP7$lDL zI`5P4%QIbBzfgPF&&SIPLQEs{0F>qh({F^F&oTK5{Q*7aYbFDr4>J00T&gdcu{Q`v zM2hStJw+C)v`^DQJK!`;13>gbbSAd#>9DHyNUpW-3UUy3Nsp1_viiC*C$*2as=Z~t z596u)Hxs^#zwjGsGS?H?lw)G`tG5dIwtE8ZQ@*7``eT8`{v=gwQC#*Enf7Dz|=g z{LC-RAalODGDV9m)eib8V83E~E))4B6$@hp?W2dYeN~lJVlMc5JkuWmqVl2hMaRKp zzJaH7pR2}DSGkz(x2rw)yQ8l&hCb4a+{3{S|DvcD!_{v?9S3t$59(NVBbNNgtJF6_ zNMr|=bpKk#RP|jQyDl*a{jKO(pGg>s#)Efil#YdG^i(CR(4%#-JW|__-=q^k&Ohf! z7xs-m3FBMs+nI-po_C2}-M-Dc`o>hSaFVVtrIHYR$w_tB|plOTlUP=ZH)oeS<-=oWTnr9wt!!)w%12s zXP4jOqwZ|^z0r`ecIY8kj&w#S+NVS&z;5PTzMMy<Gd_k64Q$8KcZ@%(%g(yJIU)tma6Y?q7M z&7`?}hrj2x1g;|gK~QV77&(I9_7oQR%! z?5d`$;(AZCzia!t!%{OJqTveb4b;mFoXS`?C2fr|xA>(rtZ&fh5E|jz&!)3G7Z&U0 zL{u;5R5s*rEZ9?0{Fvr>Vr<2xcgW!&p7Rx#|3b=tDc}^^$tdGG+8@MX({nP=KxX@K zCpqXXVnbT@m0(wB9u4afDWY-TSu9`zigo4b%0G#ah*77jYc{x9Lo{-1`o5;xV-bq&qLpDYIK2Q=B3BSjX}wLaoiGQCKh z?E5-_L*y>+bl?VuutoGvgwXU_2G(~)1S6A|*bNX&n5<4i*ln$qG}VA2FCFYIx01Fk zAru#e@XlFW$6=v${0lZ4O8$0i-`UT#(Fi`gDjxoFs}+j7`+JBXAigq3jCp*6Q0=$7KH9B03gC1q418)tSOlHha+3sqt zCqZ?Qf?tc%pXrn93G*q&7cbM4YMvmFqg+>b)sT$fyx@Q?(skzPqe~p{vglT@fGSh% z5IG6RHRi$tKu)EEPiRBxWc{1Olx;1t(6ZFEjl@%{ks8mduqaFCuEy9}y|-5KWxkL? zGdW5WLyBpOLRu4$L$n9LSHqIU^KW={?DwMD;;d#t!q&x_1smb|J8~ZH2eu{j4Um-dH}ZYsc`BN0?Q3*D z0~|J+84%_tQpJoIdAWe?c%}A8WF}PJyR#`g4xYz4_9gT_KdOilonXxrdAOxnY`f-> z7JDpGEDDlkk*e!ohfJnzg?V(w1^i2@E`|0(AL2f_e;A`l*?YHR_opCHWEEw%OeBeJ zGysi4o`u?rvJdF3TJS;a#L|V?dS_&~)18eG0{s2BJw1fKSoH$Os7+%++Pp~+?q9|l z0bq*D9n9qsE;hOGFPL`sZiA|)7c~B1SwXWyFJzr~5@S9|aYe}o3*tr%>5?BF@QbWMEv30GYc8yraGLMcBB%< zHN8#=}2Wh5RG-aqB{>-EN!`es%A61-oECeSP)`gg&+m3?4^vu zj1Gti@$U$)d;&AFdEw2A&b4LNEkqX|FQmvW^|bxC4jsn#Dt;X$rPIi}m$+2)wKA`E zMJhLuJS!uWi`^7-s&8NM`=jCQKfr@TY7STo9TfPkoo!xu#7;}wE?RrMXzznMU`PD{ zF*lbkiej}u_gSzSq6J$--V}DU7MrQWzbSUG$KrUDVfL|aXKGEKwk>*udYV+?? zgsu&7U+;^DhXgn4Lm-rNJL#=QEhL~nmzUVtd!y=}_Yio@TeWO&-Y`uVbnxnzG;qL) zD&LLpsbA6ZTG>UDWfog3*x^#>v!_<3gT^xKt>J1z5v$QFCIS9Gb$efD-uUXy=?FI@ z)cylWFMT8mCC3v&;QMVR_aO*}@=|hFdKkIr7F|$;q+&?1=8yP+63XgSQ=I7Jvk~j3 zL1)nHVjc8W6!Kr&d_ia9Lw9U;flvnTa`Q*Fv`l6ON@hH+NFrgb3;Hf&UF*i%!I>ye z2Z!1N?r9}Q;FS)ac~kH>8HG6A?e-1DnbSR>hD^dE?>)SAG6VF@H)iugv?n40XWzda za8qZ{-xP}0|8*odNx){uRAQf~%EG33zQU2jsF_lkyIg24t5mlfNA8T?U)K*Z{IY&! z%UrUxiWzAeV#XF7V{zvSJoT|dzYlMnYB?6n%B9tMBEvhli~s(G%MUUJLhxwRZk}~F zDqQhT@Y8Tet|H52)@sVMfl|MN+>cd2=xa%ZZV|pB(+h0Jcd+!m%3V>A_n(L|A*%j?TszA?TC@CW+1D&=f5T1^=SI%6v0|7#1KrS7TNL)(SQeOf*S942Xr;^h}sHF3$N?~L5ai}quVtdi-*Y% zIrb~pw?1+}3=M+Tav@!7I;vS02xVTLESwE6AygDmYR1sJ5_;D~oi7SXs24_R94fOZ zmri{b_8Qv!MzbF028(K%Bh%}gM)PeRD9%WTW%VthCjWvjWIGyy_3Bl4*@SJ>SppzO z2E0bE>Cs4r+3V6_!}I(Cm;Ezx=awGGoVT5wEKmyUXp z%Dme)ij`~RFi-`Z(cT~Em=yX!OH{F zqmf=!NM+#jsh#LEyxbU=7kJLXy%9dtoM;BW4rb+tr9P@knw>sN(-Jy^@oR}(G)DF# z*H`r077Gr_rsYIn{m$qAjyhH&P1uf=cTzLBl;pWl1Dpk!r-QDdE6NwNRI0z3PvB}K zS}rl(e4AHV9@pzbu7vOe#8_(qXE0Amz&t%8x1cw#TeTA6IgGv+ral=iK4oJUHTPxYn8J2PL?- zzigzteNsB_TKENQDKlgL5~*(=d|%HL*SpQ>G`kwLSLTmnfA;yU>A_$TGdbB)>DyWv zn(vl{_Rlz@eu)_}(9U>TS5S3)~PmF)C1 zdDe*BWz{jO+s)oKwHVFeNUVon%KOV!VZZgn^V73-nT&3V#^CI5-rg%2jdvQ3jSH^z zJ>e;T`7EU&<@csX0mKWQe?k4J{dhC`i2QuzA~54B@|^_sR6f`CV$;CNL*z_B+;yu- zFyx`W({?%ck4^^GFuymPT2~!&c=W@Qidth#*TqHK$ANy zqAr5|pnJ2=K??b@*0Q2XCnETqv@Mx`xX-hSry8|tgXHwI%DdSQ(JGK)>o?PY(m}3V zjp`U2v-|zkWA_@|j16Hw=9R9VDD`d94lAb!^WgC(^ef?%!_lP7q&5CI{QF0&-5z-lj%WF?tYw@b*5IZDHgBFoLrR^BZJqFkiIAMxyxhk zR*5WVzwyy}6#~8Oi#b)~8D_)X;AI*fGF$Z zW^8#TMmGFzKzp6twhkT6UwUhAwK#28+_*5KKXzD8naKkTJj1~j;|G!9me5Pywq5KR zDI*=2xU*^5l?=a9rCKBAF)^=cv-KbMlMFnjC0)NU_H7fsTcuaYNk8edW>}}l zEG7?TE~eLXA%lP3ikh51$=ZMYcU5QagA&+cDg?F6NDF^x!XADa>G5~!q**a|KqW7h zZb4-~ksEf$ENd&VWs|B+V5Vkv7YV^i3x0M&kFzwPoPolmqeL6?QT<&&xbu+sO;m16 zefTpjOiYHf&ceBBoJe;%<+So@#%{RrnpjQV->i~BlP;y(1m9i${pEa=3DX3Y32h|o zyNQk5)05zJ7LHH}SR!+CcC%9swUHD(%yoVTDw&_?<%_M#*R=ricKqGh6HMcVL`m?P zU7%Q#_t9a8b4Q0}D5w|ti-1dj2X0_Rh@w*T$(cv7rv@Fu7C&7^jenczdGh8N(h67t z_BfYcN;or4Y5jup^zgUVoQWVCcI`S}M5hNdT&C1@d=oqW-BXmLjm%E}sy3+Fg}C;a z8o&z_(?-gPUwJtY_#%;`;D)6daL&hXvVGEKWq$H#Mt7G^&&>MEY29rh9KD_sYTQIbxp;+y<7DB^Py zcP){-7VB`07W`k5q}r0qDHab=#lF2SFVMd7-bs)iHvB0_HcP|P!PX>GH6yg~9$81Q zaLFj2o|1ClbQ`!U(q=P7U&(SRC_+dV+`$V8=H+XG*F7!tr_Wn=kdMC=97va$_m}@l zLax`)`|fwsT7~IUP`IRztEp7Jc4(r-m{C|#3LjM%+CQ`zEpRc$c5YGs6a{VPd|TZr ziYDoo5w^!G?(N)d4t^Bvhl}7OZI!Ij-x}>)L64b(A4&o_LUNT>PmBxl&u>@kCHfw1 z?1t*MRU?Pqt0Tz$dsb@?)?4pvB5>hL)4MmV)jrCPM-hXf;+nbFZdECore{(T`H%K{ zAv7KCkL^po4ORUsB?znj+TevN}6kQvH9qG@R85!{CX$z)8|6&uQW z|6?=trorD!7L_k?BIgUHWJt~;XT2Nu=;2PI*QOH5iimrW|JA;k6zP{(G1IMFqK)z#Cj%}6Cx zp5Ud)2%x(*Jy)JFUs}143@{v~?RV>aEVhFtWwuZ zD=@B`D zrUq+8PO}5-4ef&Nk8A_^k7yWwKUfaJ*#>d8H;8@atZD~G!2=}AQm&~dYRlaqk6B9l zk>^_?FoXWC;XeK?nkm!g9izxe_*0FS2H&$SJ1iFsiPM{uxp)D!sx>pF{pPIT#P;Lv zP;$iA`@i%aEBJSh&d0=XXiDexPzEI+$M>#*WJ5S?OYznh;sL?n5ukD@UGh>lM)qRb zYV8O)_A)0?VJD-XDE_hx`|BbZL}Jx-+<=elgEms!-PhfnnKB4NP_3fy# zfwwU5VB;U}Z;Oqu*9p9=im zgUH!i3<^{!r8>%Z)+{--Rhx;Lt$hLsPbKkGeKf|112ya;%}FS9 z8HLiN=FJ=J^YeB^D1spsn?I3^JXY{?jHuCoacv$>RbUg(tLw!o5(=)Z5VXAx&)j0& zG}mEA6=Tc6Ug_9aO6-yvcbwuqf(w~VQbs&Bn=o`ivtRe5^0o6C5+u^`Vq9N0dtm9f z5B`^PEeNRLW%|psaWX9p?U5G>^|S8UrQ}XNMIw$q>bSX0*KgIMylFrL0RcY(h__I$ zd&;peTI0C*_M6kEbEX)d-*p`;i2EZN0FaHf3-<^@wu^>U;eJcA-wo-b+!o+*d?TU( z+j*SDV2f!HD7%PKES=xFK!CwxP<~v}to&T(c`5Skx9e#%tCq}vu=?xiYl1WGooEM9 z>s^tA#P|F6%Y6`+CB3St@09*Bts_|?xf@8L%Mbc$C@tryb7D7BsfJfZ%{@oPgB`A_ zp=dkClD!WQ53H zSDGh9YhJ5LX8$?}Ig^WVbZT!K;vaK9^!8tw@}s+;5B8N#t>2ufeT)03`VKzojNQ&} zkI^@-TDjWe%6(c_al)Fj!ZrVBhiaC+w(OtB!84`w&W%CMTGlF*lQV0zkt0u;|lHN8Qan;tTs3A?8Gu*iD2- zLu*?_@R|EajvtGzz4X)D-zt-q^1^s~3OU(fruhdA&()c8Ahu69o}B_O*ef<~Pl{)t zYzuqi-8N}1Rf)8Z?ioJz4p1iy{KrLx(jeOn5uXhj~s&^3K%R) zQnN!QYfN?BR-a2QT4q0DX|XNVRN?2F!br5E-P}%amnwc%w7f&+A!ZTWEq2}V&}icb^UV+f zzq?7lDb_e=!7oV>54v0Wcd_WHvC_W3qFo=4I<%L8^*JXG>W|+qC;TSKgse`K{B|m% zzjXiCv1e0E*Zh}vA^_U?6*X`2iH95SVc(hPx-Z%#31fe#pnAi_ghGO=dy!X;Ihfr* z&9@EkBn~B zqgwj_>GA~1k!c%UmDzLp22x6ct)}Ywl8kgfk|+;VBJD(uSy?>%E#mR41+yU} zOd9ud?eS){kK{KB>fjSA2twNf6>=^q-lHf;6R^#`{L;IKnVL_ zz59iS!SJB4KN(+wW0Dz$x$jiF1$9^bC*F)T69|;F&~Pv8DU*#6_rrr4c|RhbLgdjG zs=;F&^H1~L(B|em6i?#3w>?A^M4@m7lqA~_&WHc|B?d>q<3VM?v}nK7>T1!`4KyL= z9l`Ofd4+@+%#S-0NI5L}&mXcpVtrb?$t%{LKr*3yDc(WeVm@n$j{ZhJLj%=Vy@j3( zY4?%LE2Ag^e8CZN4X5*S!``GGMJJBoqi}od?u*K!>wQyxD*K?`f35l!s$ytxlz0Dm zPpzoejkjvng&4|kc~QuOwq6#r>(#KdZEeW9O6+jU5t^X@bBi-MzHd6s*5l$7SP25) zTcn&okMPRDohtPY(D`MC^>?D>yxfDWNrd{|gd-}=_-`Zcd3^i%dKKKEjf*8@05S6V zeycUliI{;K(0}AB@i$z?PCN;NFf!P4!oNY*vskS;IEvHy&b9?*G8hFeKW#^Q_T#vb zNxmBT(|yJAWLd3zkFQWdaBb>?VBp|6`;3dPJfRtWkId|YDJdL*^7>BX8jUeYC;@%3o@J)#3|jzJ+{%!tjSevg_z zp#jm@dx}a!R%x9-!5oJSLS>n*d17+U7u@4l4)2P`sRga}C%+`PIsrcV_TNj2o^5=d z%me5py^MB^8NcR*O!Cj1F)@b-CnLfy9g!km8ha?X)6~0e9cM`0oin%ud%CN%udK+t z-A!MmP{QbI@Z8M$k2OP8VVy6;KEVZh6>~WEBiw_1LGTNJrFk;=a~|;$;0sT_FhX?F z0rL#mm@5Eo!Ny5Fl#3KQT*tM!dWBcF^a~e9`v;Dn(%Q17a&t5N5o8h3g{P-zrk&gm zX15weKb0Wvg}>|nx3#k=pb18aA2u+zR{z^bjXJg8h;t-jm2od3^kHsB;mc+|v0|ue zN2>Vpo?K)=)`6!Bg}<(R9RJmErzIpxj@t3^#qm*D|LgjZB}*0~!a6 znXKRmlJ9_NuhOD#UM!?U`dy*PR5 zwdRiWrq}*s9&R7OO}qDHCd3X_?Gh2wl)Xu}(QkX&HeUsZ9ZSbYN09^c`&qq-Vk?^vZjdzf`)7Q+84}R;q`WMiJ z-bv9^E-Bqn3_rJFx)}#&85{uJ08qML*+Y-cRtpmcyN3`{ZG;5jHmgWyf8e1>Q^yM#%%769g9}ZCE8fUPSqWKp$gVt*A^ee-q5})`P zJUD@eZD2@e)NM9&weGNl7Y{>uyRF=EdHaCt*A`4>Q3!V~4tXzj&o5#`1FWK`@lkgg z%*_)O53u^EzWzC+eka=TvfL~nkZsA-c@WrIv)i=*(Xe`pUb0Jw;!e4|j%GW)dN*=g z((`(>RW&8tjt*GDB-tE)Xg0JaWv+1P+TPJv=h8h!qyMAmydSCl|2SItM1^jpvZ)jz zglz65A=%l)4T;R`b-78A?7iZe*>3iBDO`JV?Q36q-HUs#JHEdE!25^SkMDCH=Ly`! z@|ldtxy!%Kp6_Hdn`*g5=G&#gSu=jrZs9ujHTrOM(4stCLP=-JCb&DsMEZT)u*@q4 z_A(ZoT=&pJCm98uDb*%B2pK`Wyh4{=a_&1V#%o9>9|-YlNvnD>%YgIEYba#H%G@j z$K_9AnGMy*wHXrfx+|aff$8d)gZb=|Xk|2l8Nne-H^>=@CC8o+1zGWrA;IpO^Rz`s zDJwH_QoIt>iJ%>7!wvpWOb`0MK_EcCKah{-0 znp~C2%_=tFdxES-DsHQ;3aJtM`CP{TE!H^<2s!P{%ePyCLWuAw&=2AKQQznUa&v6m z-0_yJYmvl^I_@9J;zF};G0c8P$yWy!eD}B~3mJ-UxSf4;X>wij=e~ydDKB28bX5lM zEl~QDZ;L;*=jp@qO;>6$!J|bOLb#hAwkgth!4OIrAmX|tG&!6-)Jo&%kk2Jzps4mr zlmO00VaK)SK(E5am7WX7zIX?eT&P3@r|0<`nO%Lq za$nYa$umHccjHTcIpEajX26~c{3<`EG~X3!>W|(lr!^iZV@K@=nv0({VZ2*QI;Vfl zqfYH?4Z~hv-9F{S@;w;#xE_a-2t>7ZOH^E`es~2NmpSq%=0j{XEk-)VX>{Kxpi8%F zN7GXYpf6wnCNkcr;NjC8G`YTeVy-xJ1xpio&mYK1Z^Wf*^jr2=BWbK0_dDpHaQD4` z6(}Nmi(fi3IG$x>r%#!n5dfJ*a#O1ZR?M@t)Yr708RXbM*gru%C69jE{w1n}ojRc( zRH;83&zit#2#D>{^(-c|XsZ_c88jjTTz8~J6}kq%v3P#pOe^MfE2(wuKv3I$#&@af zMHX5GYW0MNa&BCWlUza7z&&m2n;5BS70VEUVL&;Bjr#+H(^Z<1$e12TUWNB{enq;= zE7iL$w&CAE36QvA*Pg91;icENEmDUaXO=7eK45qGZ1d=HJlT*u z$Sga=`EDb1+)9}$H*U3%Zc+qDR)%w}oteyaoWM&HVE1@}G^O?~*W@tEBbGVadR1-k ztp4JO2qzxogS+~>%Z9KJQ7&G|*WNS8^@IT)JFVT~VKcxJ#5XnO?R(16b=$R@W6i(< zfQ}RGp53=MbdFZ0ih(~buR(dd6Jv)7EJuG=fnN6m$Q|8}cOZWF^Frz|hzh1gy@3{d zc&U+>kpJkXR3%kT^piuqUS>+LLZwTzXwgl85B;5?uNzk{f9;_+k6?TGiQJ<@^V$P8 zFg6u_hBuDbdrcerC7|#rIS2PgdnJ zYWXJe*h;mPv<2sJGhY)=R#F+}HE!V-SM|D*obYz1=>EI@D=K=}uZ4=u_I(5KQdT!Y zbal#oAB>0Gx6f=S<=^?{{(UN(;y?4Fy9TD(%RmcQ7t(G|y^e5t|I!wJnQ9o8@sw|} ztC*;eV)a_s>~28@!K1qI*B46Ull$9C3SU6-b@(Ml2jEBt>^?ga*Q{bd8-3Eb$phKf zF@y@^?yCWT$WbV2n!v5c_hmEg#o0%`BhW-Wt}^IsJq58`IVME;2NL3SJqeCylss1> z?Yk=@)y%$(}O}VW~V& zOPLJfF~}pD+T9I+T0OpcmU2@9;7~@@(ow>yQAb4mY--RiNzPVmmIV3WV*D?No#js1 zru$b}!lW#tIdd7PvE(r^XphAB%JTVtHxsi~5@qh}-XHqo(i1kdx`Ie@9#+0gI8@KT zkG}F#_jarwH^_LtD|7j`zFWKemGjrVHnTAdW`QYp&ohwuTkMK@^EEaj17YL~4MnOc z_|Q?N81M{t_w3aaT{fFKtEKvwUGW^>BP$`lSF;LcoPjibf@;(l5Ahoo?{=nz%x7H7 z0T4sJeWd&neUc%3Ivkz@5Fw)c-GlLO&@-~00VK|}w3twwdyK8H3{>-HTc7H$P_Rvp zGo?2$>l!?eitdJ=?tbyyoa}XWyn|K_)KGwmZw*@(X8+oHon+AOkCO6!k=+_Gh&HC# zf3C}X?$`SbQ0JXRRKySbN-bFht@cH`wH(loq)n(TqMjJ?xlt0Ty3_mrv-wH`)9!5N zSLUJY1{5htfCmqod@&Il~7zrG)RH(YEtn^UO+icM6bbl9LW1=8Pm_gt>3imEwGj;pVcyc^7ZYNq;tp5=XfKN{;xgK zpn9=4f{u4E!@=MemSHQaL5Ae4S6Qp8@?_Z`WGc?yArRI ziKn$nVBYr_|FfMKk!#JZaJ35tkNf2Ls~9G2eQo)rh3uTvsbM)EjsH>C%YUn*&?zf1 zq(;c02cqoWkx2xu6dCw39d)8Je@{VXLfL;jkK5xBkYS4;3dnifkoOiUApszjrk$+a zCwu&sEno`?3^5km4x>H>JUf@+>sPpnt6}RaH^qi2b%C)VLty8j!rd{mL!D>o;K8`S zIY$W=Oz>AsyR7{;&BzKt{qYC@x?R_^FAY%qey7pylggnwYPtHb=v>Q!R+hSE{+ZCj z>=p)cj+rgdEc_r)O2~<+!*eg>^iMabN5LQvZ=DWQPEdF*QrwzjSXzYP$(PV)eZOx8 zc0Wi6B94i0l@H&x;682-5}_+w=e+oAhs5RF+OmUorM62kG!aaMEuS{+tUoV>R%fJ)le!?S1?IEzk3Fl3Qu$RmnN3mcC$RIw$TLz$n2uQYi>oy+ zQ7zA+TY9$veth*_Av>9cgZ{$K&nkMh_H%P8KobBmaU834cEg|p%w?wa^^L(A08Z!I z$kZ)|<=K{R5M-P}>UD41va3F6gyeKCvUV?fNVyjOWDWM=cW1r-{p9oXrHf2^O6U zPh0AP{ni-#G6&~6V7HV|(DvkvllyMP>0OJy1f7SR3eiogHklyn2r;NN(|T|Hly6^Y zo6?r-p8?xiWw4FO>KFHur|4M$^^{$aP5!KTk;iE~NDa(E3SWU*n`A5ZDF{f0_(7JL zs3421ApH6<={R8cXpDF6iK_}1+I^LI@QT?=A4@UJyC&ANzaO_8?g$Y@frBekvtx?+TU$2EN{N10xv zKK{=uG4?8TWsk0I{|kch_Ut;X|I~Bl>`&bArc1Tn3!Pm54(6M+Y=ih>Kd~>1#{J}8k)Xc0 z0=vQO%uZxoPhHEi<+TX?sNiJL+HMME8H#tM?x7ua!Qu~|hA_13v&Y?jd$Z7`=}d%c zRkmev*qBAS{q$SX44J6#D zd6y4@j85~7lHb$xecBASY$)U1C<0y2n775hEPa^|*{;8)7w0{qZEWcs&J3nsmMVBP z&L_Xx+0RaS)m*$Wu)bnv`4zFD1d+};dvqG*O+3`2`>(A=A+ns^p;k}Xc-;Yc=_=fh z8K7IIi$W*wi1OP4al=QGaDw~usV*f#5w2WTc@pXeoRS#wB0Vxo<`yHHzum`38RdtZ zzxsVN39dU%08A{4Lh2`Gu9b!3RGA%cwu^rBAkw2@uXV@@C)fzOGcLuiA47cvDD$e5 zn@{w|=P}K$Nxk+av@F;Jjb+ICub$BfN1E~Jq95pq4OiAWKBLpBP6ll={rA>oWU^(l zo8gVdZNu&e(VlHt<~miR1hBfi=ku?-+X=TKhh0NQRXNnOb0qynJO0CkEO6m;Tw){yLYh!5Rncz&iAEN>0}uiEz(DndR7R^Ej;DJTKc_4a&fyK@a^O} zM3S!tSq2<-b+5oEL`Ljv{f$9E$~5kn#%}gS{mp)>;cv}I zd+zI`D$>g8+w*R|kFBmHqkF4T%&dSzjMjn@Hboa}=~PU{He>;IbRn&@;W3lK!SxxX zvCK~{H*PXVZXvyQxC3sqwD&f2*x>kF8&6|qiPGZH^scBQk4`;qyoyoR=dMA>a{lou z1t1HKk>XmU9O9?p?&pGtBM?#?iqkV*`RbBVJD`_y?nA9oRh@S%Luu})?E484-QL+F z7=2>!0-o5!s(q!OqR6Tx7RgSXpfY;R&ZjJyljW97_>8N5 z4M6c~md}20#f{@eMUFh}T4Jn5%aJMQS90At1CIY~yx#$D)J>VeVap>c{U3ErUx!p6BpyFhFvGgm0g*)q%5VKn0 z5*O>*EuyAO&1eJO3uJquhxzqikf&SQGF{&R*nxYqVM#W+N%oyujO@^@;TpGt$kU1d z$|j$7CwO3CkeFvmRAD~XI5TWMR3qNXvD=`MT6+hB? z;f3fuGkfK#Z}8yR4p8O%u=XG(Tm&1=x@mc)(m-cG9!T3m1vyLL3Z0BdC%*Vq?rslQ z$MHRQS4`urv}kBC6LSp2qOw~JoJzp(=GzRWNSCnW`c3m?=NxiEGMj!T;gpGh98zmkf8J z=H>6Vc0=qLF13sU9UP7{=Cf#i!;lP_teWjPvcsQ5H+{)rI=4U?M2W|QkWK`*?hJhI%P zUW)KscIVDYMs@i_1sfN+(Cnj1q+67>)&colAo_)ixz4iEeyZYKTa(BJ;mpy0UOQK; zHO}(nJjTw;KwtVlM`s*d?0X*QXdWNll^Ii~lYw1^1Q z0NpS0yjflz+Z#CQ7CtE+9Me(@c<#zI=YN;ce@9wGmeeJ#x8_mN zbChI5v;2+y%Ymx|*f3No2c|*3}ZR9-CD=w&w8r9ZwX)KCx5V#T^wzH`LMf{0n zzLfohVvm*Xu#B^GG0uXwLJ|B^oySS3<3WeS(}=zi%92nww2|NYTh6{pZh4klr{WPRw2$<{ zXV^Z3ju=(y1zDFl+oPv9yr0(eRDox+{9~5M2PQO@)W<&W8T!OyqY_@FMfN_vak)>> z(De+S4ahTi#s7X>DO9%n(oGRo+3P|7SMrGc>vN^|kEOKYF9Wy?E5oQ$AuXd1d4uQ zFYh}{ij+*Vd#2s~>pr=ZoDLOsG8!OCw4T@#O6QCT&HX+UCVfP}7xanzjk!a~g$Ldo zpttc+NasC(QxUuKRhLV#ie1Z{mIqH`H*H?6&`WYZekIh|rIX|^esk~*{STYMQ=9Jc z*9$iKmiF6UEdsvxZHAD%Tn6f27ea71n9hrEk2`o)H@`B-L6)5ihAs>3At||^kEeMS z#^mOvaM!cVb>(@&S|BT8ds@n5V;fYd*9q|*@yE=Y#AY|8Eylp%!Q8tR(cqN&4xxkC zZ=J)hYMEmvKSip~&M9Y5-WNLN17}eWXmv;edYmMdJy?j+u5HiaTXy*&ZZ+SmLL)Qn)gJ#=$za1N&da%Pr!sTtyHyp zVNUCDF6R7YlFx$DFt3vs6%jkMb%6v1hPpdb4+H+S6+}09rLfwPaPd$ZUgi@4?A`djRp8eMUQfz?H|FD^%*kj2tBFhcZ?ah zO1WLjdjm|S=o(g*Qo->y3Is6QlT}|10*hB$dyA2&6v$Oi){`8Eu{)PSJk5p=TEk9(+hOvV4*MTV+~Q|&uVt_ws14KxWu_y?PKSf1HD@u_ zU84WTkYK+!Z|Z{CSpeH>KWF`xOuy|n@H*Qhb(wIRrQRunFDS8nu0Nvwa8%kCd7$FQ zzFy^&$@;JUr|(WbxB!#)w;fwisX2^IMXmd*heubzKAk}~HgZ!(~eI1X;P&*vn$u$ThB@70yg`TYEE^p3-D=`JYf zR_+1VPx!v6!skiWvJ-#N62ZSS2I=*I6^@_RbLQs{5IlM#TVEi*g1U60`sRRSFgfoRc8=OBp(yTZ+Bfe)Dp7TxJquY|FD+~0NCqY+&L0~NPv!9<|1B9@8kxB|X>mR=` z8<5-=)rCWLDwMc&j4R|8i2(;4yW&ADq`aw4(9g4&Hz#rw%BM>o6dsgj@wBy)bM;@k zTV5&(YgVtLA!x8a1(;$F?@r$&+QQ6QxqAIL8jlAHDM&nOql38V_$GQCV)NFox(PCG z9E%}Ra!ZU;W?LT50d?)6rSo0eHv~d9tJkmZP_Z9zn(DkQ8%QJn<((O`z z5zd@!6!o@QcGAHNq={+hkzM~Dtuz@6Ea-NsFPBm4W{yZ7Z3J?>arncVqzQKmb>6Mg zl{Dte99C!)`48f-juwP*jNozDxF2$1V8LFIBS+e+g>vzOk=RO9^i?)J0493Y_iJ z0r03OT-PFU=XbTL`8B3I?<441Li-9hBMtYK^(LEDSDc+i@t={sS%<4VEE%a(1HZKz z|Kse^62Ri!(X0D+T$qGnC1sQJH94Dl^o+VnXnYxic(X;DMul6`5`TZshA548(>tEe zstQ7qc654ax#mv<=q%;-^)oh#wYhqi%O#m&nJtvWR3bVdVi|+Mi)NQ{^rN{*)&7&( zBX3?NNx+jQ>DJQah+@CnS9O2 zr{C+y>4f390QPxhLF=`l0Y;ndzo7!}l@%ad4riNJ9f>u; z6}FNj47#K~5dWnf+ts%2e9OVP+RZ?j2m&2doNqUC?$L=Cyz9O1=2Or6*Lj5X1FIbI zyE9p(?VnyzNbi5ldzOMLR`m0qHmv)n(-AK%-shoyon12X&!+Pj$k|<)WzOe-7PL** zJ#b5ii1=ysmiRY}!ctD~;dYh?dO0;dLGhTe2iD9D$BRaDjuh{1cxByRY*9+B+?h8q zn*4b=ke%_5QeyAF!twVn!Q6@{$+vKj@s1AW$e7@CDn#Ml+sb1k7oE*&p`N|yL`JR~ zPXY2R$CY}$_ouXZUz>`th?6)o?CwwSU1te#T}l*A4D}4!GAFXwBcW zndLp5p&gEX_+m3Z#CrSzBID+tJz=No_6!aII(DRpTpmSS2u$lk}qFg#TNpl zL|)K5l^Py-kF}BQ zKK*xds={V_^js+K2{Vq6|N7L4a>SDIXq!EHY+j~>P|(AB_8*-%hnqrL_y(a7uc?Bp zAGFK7*d76ZZ#YB(A+|-={eMv27V+lLFY;{JN4k$TdtZCp=ZW(@M_`7wd`aw$5yc^i z;)NFVB%F}nPnu7ge#hX4{+YRF!4|)<)0aAFnhfR;9u-O?X20Hl=d|ywB$o|aA`tEe z{fS9&1Igao6{!^)Mr|09VPG-1Unk&ss^q|QuUIt&pW*e!BGVQK9T9b(EsVa+^vp9d zZBLwT%cjM_bgm9Yd?VNnoqnAg$^E+(Nc|r9Q)^eogGM(K5%{e1c8!~Gn#aNd^eR5_ zB4t;w*8H~2Ol3-cMz?0c)#T~rbm1GeR~Ym0)KrMJ0#1;#RV5Aj1O_#-seKAZ9BXW{ zI$Bm3SSo>V@*vWaDEdcHq|x@Amx`pu-kQ3c#dOxL?yyIal<#Kbb<;J}#cz zNK~y=7eCM3i9e=$E&OaXQy)CXq5oi&sab78f$NKrLeODx)vCTgQ&%9HGJ}cp_RHyf z#b?KGT2pR)v77N+AB*+@fzP=;Uv;NT2MN|gMhR_kJL5jl<8@&>I@O+0KoS(tX@BIC zyDt+)6TiJ|@zAfItIa6pXFgFqRv@J5sX*Ia^+XgGxXLH1H$Z`b^G2z*DfH?MM*&cB zq=WiGV2u~};qkAhBIhVi#pb@jywVT%p%vUc4|7Fcn`75M3YMo=#OH}E92I{5yqZKv z;tns|Kcc^+u;mS-Jdc>C4`?}L{I+sgr&c_VcMT!D!P*VuQ*GySe)PPw(v|n}iIN7z z=uGz`Ska=XPnbBPFF^>2Jd$yF2)DGh-+PdkE$e-Fx#&3j`s5;(52`yDWc2hrh4mVz zF?|W_*OjL=U);A3tt}6~Q>?zNH>($KzLC1KzWEY8YgzpK#+CJvSRchW$o^Zedc_Zf zHy>5Ocf1r?^B3Fh{1%=lmEYQIpEA?XK6GG*D}pRI>{vrMq|V;EdrCwcUoAqEq+B8& zKs&2vtC-WeHyh@k&-NI(GYh0WCWZ|`WmVr5LXS+*YQ3a5yxqueD&>aD%*NuBT(-#x z$Jqu)e4pJL3)pK@yUzk<0*%($6*CT&3_ICtb*IdML$mj1onGz<(lJ;nT>EPou@T22 z1wZBy+hDnbJ+1853UMu-&}DXTDiB<{bzV!Xu&;tPJ@ZorZhBiqvY-Bjlj4G^9#awl z{V`X4JguMIDQ)8_-NS4r^UoT7hW4_fI2ZA+4%CITU8Dl-xb%bARx`%r6AG?3bx$DJ zW5{;Q=kZ6yQn@Apn!=aJx_IO6v`kS$;nt6dzM1W?)HD40+Qx!M}?O0cNhWG$)11H?gh~Z$Q5!h zdK)uNmX!*ilsuu>LH|;h!(hV0!;l54F$fP`^C80~k@c|(7cAr!nTK*KhWJ=+y$NwG z`QK^*aC4}U?kG}AcIdeH(-4#IzFLNR8yiBKOj1(>QO1!p#zU;jCAUkkx9&IWR!+=e z?hDIuj~mACxdvH|VkaT|a;<^-%z=1)n|yXu1jGx}``MFz8M?J&wZYGD_=&j zrNjoEbOo|=8TOU0>J}rwUkg2t9!Qjxu{dTWM$E_~={&{WU_X7FE}*V{7a-z{1M$($ za}TYz9L}cK22q&Mg^!?8P5BO(WM$=zs$xhxM4ap9_91lR5?EB~*A6|ycbHSazaTHv z@TCdT3;?$SgU-^PdqjWmn^v{33o?o&=r#|$%6d@=+or+1e#X8h@Ily~`Bg(-Tp|3ZvL_FneATp%u@+v&E=(2B0 z#2a1D)3Xt&sDJnAcprYl^Y^lf~)f1gb0A zH-WAVEgX~n8p?jjJs+7Um(UgGxFW!72D2#4ke;R6?X+#6c^RetUC-V?flje;Xw}7v zi~s6Z>p}SEQU!i8Fver$S;-~#D_0u1f|%WNTC591@bSfN!U>|bl4RSkxrQ%~d_N_X z^+a$r3-bu=EfJYSdP?ggHu&x~zg=>gzu_sL0p@O>Z2KK9+`pKRQ+U{AyYrUWM}J>} zdZnrBKy5Di%RYlRN5rM0bKs?2zJ7Q{pi*4LfH|1^@z7_3wX7kfz(8Z9Jy%d_N%}=;ID}0D#2f z7Dv{%SC=emvrhqRr688oAB4f2i50mN^^%*50e|*QNc&7u-ES*?H%s@-JfG_-MhS$a z`la5!T$MmOb&O2)US#oE3S^A28`dL64obP%hP4y>*;)$F2mg5@B$zwAe;}ex!xEMd z+(I8_wg?Jy9&C02i`r0ElAV=I+@xfg$;s<91b$fEd8ddbN<%GDRKqUHRMitAlSaXM z-xMzWqUuQ_Wt!Ew7*^Synm^E6rA7?ximElN3qSQq?ME=woXxtf6|9~>2HNh`Y^DFv zyIOBCN&zAh4lDq6vS?aa)nUIDnSU~K9_)6STz*;S#kxC~>H66q*^1R8e}HL7fTi45 zuhz1j)7H=zm{b`+TbQ}LQfNwGl6Ru>p5RuHqeZ#&xS%r-}DS3XDipYQzK|B zC1%sREI_nDDl0&?9`!L=;TT(mm~VxSAnD(Q&rf;@pSk@*cVosTf<0Tq+#vR#P1y+c zIxn93eL9lxCYhDI6fQruFEwkevn$B%Nb(m6zh5{a2wTT3b_5M<`l9tJr9Uy1Mw4$B z_VR73g2LMz+h9t&FtJQ$JjVgHGP@gAF0;Be?SFN{oqhlLGvMo-PHOCuYp8JnBs3XFI?TdH5HCPtW-RFME8>F0?ewakh%I7X6(& zmjqRU=g43p52SVV$KYL7S->R^$(Z_$&r5lfgHHJJ$L49N>MfS^SoWQ+vw~Zlsol>e zKLqt*T3r{hqq9=|9?=NyIq4z#o)67uzu5!qtm*(3BQUGA%;;1Mr{H!lgnGA+f$~#I zR1+AyyFS|+xH%M}8+0cEr&ZO~TkcTLDBryL@6KjB>bFL`_Me^lGlWG}7Qupg)}I1u zBQQ~iL6By7>T+q{8)ymcA2(P8T=ajNe(O7$0M{XcfJeHP#1@LXU*NfExDE+>eT!HE zeOumpnr2X$X$D_4iLZL?z9&{SO~vN!y}<>nn{35Iq&yo^>q*$e=wsg6Li{uL1df%BFD8}( z1yVx-<{xUeBbB{-)x}yuw@KaHw0kyMGvA-Z&*Ee#t(@V7Dm7} z!(hVJX|531p!M9~FTdBlVZM5X=@gxoYtL=IclNkux%R&0hOG_P3nzkShXy3P+##iv zU_;;9k#wGw?v~(#YYQmCKSms#28Oxd)- z%ckE=7v%eWgY+>30^t~+d$qUEH>g=26xb*a6g-514UDtpKVAI3lLtcdo|BiPLh(Y! zg=&+CTd!B1A_>k;Z8BeiJPp|)`0c2dtlSxz9?Md%_-znxeU3M}uIau<=7>kUpH+>k zP;ZW8{qmZk#O;v7+0p!-W77!na#*!Q)8-1E(@iJb`!O!7F2_+=d0W%W z%r$1>V|Ysz`X{%emeOXNviJJ*i%;K=*iEjUYessAOK>m&B1Os|J! z7QbxP=4K1tMtLu&V7qvCWHxw$;4MyBV#v0B*B+t0_4*MT6=LEXU=&W+kV600Ea%Ng z;TT`ydESOvZMU+-g%QlxBZJlln-)Jf`E`ha(chi9wi6N)e**Xf>Vt*MId)GaDo(i% zwgjCc^)mck!ClZe4i=1@Kqx106VEZ;?s}3*__#So@M$`*4|#m@g^j$VfzCyPb(NT?Pu=5w$AB z>~BrGfST87zp&D!kyEtiW-o5}&!VE^=Zbr; zkS}E8{w%{})h=+!-A}p#K@?Iwn%Vl#<=L*kJJ$BFE#w3r2II`@E6ln*)kwO-o$;}n zj&KjUhscm-i<1#ve8=3QT{$FW!^nL)-`xA33i8BM7j<_T$|1FF%ujSvctn)g|0)PO zCGFNg6h08Da3wniBkt+LEcIm|*g>0iz`nHwJzah)dBk5EQS`JT=M$Kk3okIjWY0u* znArT4#%N{m{#&)=Dg4%QKIiQl?!A4{_rex`PXd6!rYPs1R~x^!joE>Q?oJ(je5&BS zP;cq;eYoAa|8gKjhCE47jfsp$x_TL#_htkEFE%W0sVk@YSQz4aAef>kbZ5DGN2Fn@ z+FE=s4c>=p#r(M+-F*$ST9~CKkE6)>)wS4KaIP`F9uh-@@~*XFM9wUJ10yc~T`Iz> zNhB~y{VWtWRD))#0~WTAiJrIooa`ITj36ntaL$zK%eVMAS#A@9j?ojmS@(nD84YTrRPa6ZUesHR4DY&qE zkB3+O1}dcVpJ3!ePutRDq1M{jZMk8?vroy+yrS=9Zq<5>IMJ7f!X%#a8SlVJGUPnr z&WnMQ5|)@p6yK+?UB3_Hfl+TL@@?Zj4QriPTaOR>T#OqdXz2ME9V3Z%h_dZ zyzPa!m_X2Xnt4n%J-xMbqCB^huo{F4cWf%vla7X&oV17MP%r)IXH(cM|7o|A)UesM zuxK@`mj&&=IOlNSQS0XnlTST6Tv#^(uI6sug#0ybIyYJIwSijhWuzWWi*Z8B=#BAt zCDCdQ8{<>*Vc5d+QKeUuni^m1rFz~hbd0eciUZJcDZ2oV%f>b@ows8qB5iQWzvawJ(8$lT_YOI|}0kfUNON2*~TzRc-5d67ykujmdBM)S4W2Y#~J zbH>LC5WkK*Phm7mXRufW4;t-|`5@0TQQk`xl|Pa8A7i=G8C3q)g<1HPEXeG9Wp&4E=pEqULt4I25<;`FUD ze(hf1mevD^wuiKzs= z1#`)DWsB#!1KJE~T35hn50f75am@5NU&|Yb+{y+g&-HFEA&`eUp-Qd-6jjiP;|k&F zEoH&4QFxU0YQ1JiTm@aCe!XJ#$a^gGXSztPL< zthpFOiQ!aJND7-gK#TFG9pTB3cgR2dcBE_5mG5q~7w>*gF_Od@IWXX93$DvP%oG)f zyMmmJADMWeC085$#X(^s-jk*mq0D9&s`s=+PzvQa}HC@0G}^vB`i8S9#hK4VwHEsV12YOKGBbd zz&nH}2kd%2T4=%2&HKxCn1qNXlv|&y_mMrxK>BnCuD^v^)I8`>;JaIu0{-4l#-~X> zhCM&1%o^hlR4vfu>*z={_RMikkK`sbJ@-A8m*hZb1YYuX@Q&%X^DZ$yZzWrEsb`at z;O)=Y#@wj#)76hd+=g(>RffYG>IsB|l01rwo8DE*=jcV}_jqW{3teC9bdeapLF=&N z9?pN_`_e(yXEuPxSXsMw_VXrPmxb#>pJyZ*v}ZZa*usd@G{%wG6bJ6MUkr5i0*kmO z+I-}h)a-8U^fWo9x2b+7M;FXjAJ@)}I@SILd!y`gtL+0m5__Mp;59m$v&L&(yoFOM zUTPh;S*MRSuMlVOKdsG`&D~a+gXsJr_7-o!N6!oS46h|4E_meRu&g!of3KvQ}Cqh+;J z%NSh%^u&hjdXiz+?XtQj%SRi2U!v)HjU#(lU+1A3@vKGElixhEBoE8p9!;~te0=O| zg)we)^{fS#}Z5ZU;uz@-Ocxs^I zqdS`~)PpN!vTC?OzBF~q=$(L%(+}8q+Va#63B`AQ!@V}ElFxnF@}=iWU_D+@?q@Ow zzVx-Ak@x;eGI?fWSIpFP3+nOJ)b!SjWu5QeI5vKsj$Dy?=0{A2XN=cdAOGQeBofec zL^D8m+R2B{>$7^{F`M8GAE}8hJ?@m1EY^j5G{?WS^fZ{1t82n6wvOn(bBF60PxrrC zeMB;N&v9P#X`6TZz4DAr(~4~R1Ud0-c&iZ6EJ65!6%0fs3c_!{EERKJdm^N#XQ-)W z`v;8?D`Quuc^k?sQj=;Mqt_fmPefTdnlZm<8S%)!(g${zzej7x9a$g= zxrOCK0}o4nipx4=#sXh>TGcZ=efj9L>a3-BJD6PywN>&hsIRhMjy|W9^jY1ij5wa= z-(kC>7d1|2=si3;MR-+f;zTc{a31GDXde_1r1jUVS zZoN-Kf0G90le|mn7cIRzw#ECVz&zQd@vro@{PG5KGve4r%flvNo4Vf=LdjGA4LMSJ zPf}97Qv@VB{gVbA<(UOS``|cfrPY(mA(tp#B*Bt248EFcJaQec@2(LOpbmXSN8@`x zZ&ZpE-6IuA1~w@9eE)5}7bjZ)>nqX7JC0Kd=o^W{A&*8xHAJtEOfY2z{ta)u15WW< za`C4YKUXhLAL>|=;O6)9ILwl|Z*IiEDOX-3x$*QwVD5ASMtr@O;bmdmiaqt4ooZ!5 zmje<*cEf}IZu7-Y=Snhi4lByLZe#uU&c6A5-z6V+ip!oHy$cRoT09_!Lr+>;WE2Dg z3CI3^l;xfEt$vYDhS%4E*~fOlAf_5en11+rA6+vfDW%aeaJQ63M-An z@R+3i7RD(naK&4PuuZPP=(8(wa5+Gqlk?5J3%Q(g<0N1wQttF2GvcG>mcC+&@QWs4 zJZ5O!C+ps;$ki6V?1(@zes@{^-D$|Xp}mY8Zc@p{Cq+a@(c@N)q#E0wqoW{+;*Y@8 zf7l2aUk5xZXbWx+Zoc0=(7wiS1O1Foe-vc;o_Xq5MraDF zriysgCKE^^HyL_EsciE23_v}g?xV@&ygTsn$V)U_PsO&9lNi&YuG}=yFtmmIouiM% z@7*LS%U^W;7vIr0l(=hIODeh_3rOk5R!d%BW4`eH^@u(Z ziUD30%Wh0NDZ}HQLL@wpM-f8*$|_wns!# zCOSBnxFdsH4YH4qX#&?#ac3Ds57NzAx#tl>Crlrt>-;?IxZsKzzKjc(Y&VJ>P(QN# zu;obM)4iq`!t-1;p`|2TGI|&Xh2TXC(wp`{sbkWXESz5FCP7FKaZAuc{U4S`8c`w$ zZd+ET`-3i!YfZ?t95=hD{T=*&NcacGKr;k!Vn}a)ABtEn$1f`~1_pCv71QHt!FD>6 zGDZ5wD%hU`%1+$aWDbjovy0+R-~(AB_NpaO^h?)Q=zr64e?Ut)4Y=3|np626UJ%5B zxt33?a~QY-q%Le-T;ofxM7mH8?`=dwd(Hbtd(wmkM?M zQ#RepzB9N}zH>Lyx0@pp+%u*eKtT{jnc-3Z(lA2Udr3L*S_+{K`_u zSFfETujPQK98lWosu>gxT~zcbSk;w?SyoA43@M~0XGKGA%pHl=Jiv89>zJ*5a1KCa zu+{R_vc<^;o>0h^p>-j7Dnw_qSLB}Z_7J4N>Jap=WDb{bc5x?e4=xAeazrZ!XlQ?M z@=nJ*Lhk^8b#J?HaU!1G0ujd&gQ=^fgx!le3X5krViK`g^xuhe$~^w+$@N5Yph=FS z5Ox+fc=Sl#pKB*!w5xf;DvUVd_Y0xvzZ!Cqfh_@`z;g;fy@t++i!0@%#}6sv(|B=z zPX$b)3|s{;yCIc(&%F&PBVBTe;r;fBc@Kw#OSpE?wHBgT@`8HPFkhBE5(Ah*P}(q} z9>QHelgFGY=F=_$I5%-~q&iXmbZgb;ocL8A{B%(vNj@N^_m4?oGj@ES588hya1(5? zVA8lh*akbCc~ZYIqRE>R1~sIrJ`)9kZnd$ zS}bLWY*R_dGTFCTl1j2nlE^le?8{iQ&x9EJkQn=5Fm}c)hS`38zu&+2oO{o?_uPA* z=XpP$&+DbbZ$EJaKR3(y23e!Kc>2wx@$;sulJQ+ zlWHto$lM;dp5Fw8h_EI4VCzg5Fad#vGCSqxgbZtzoAtuZd<{t$HEJz7ey0Mdk!&`6 zE=0{EHZ9;ZWL-*nO?mhAL;hwYU0rA%cJCwGh4+A@h`x2|Y0Yo$v2~p7Gj(TB=~;(0 zN`0S96ywSsk(PRRv_~FG3R8)a^)Ee+km4pxEee{fxH$CnlZRs_b(!A%Y19mOf`x>4 zzE)J!(F2K~rO;196m)Vaoyz!aE)-C{jU5W$2+jNy4nXfdaUr2cJV#N;YSTST?OC!% z>}m29l~-Kt)7GoIg@`0*DLHc#rV?D|EX9@v!?MrMIW=-_Yx1t4Os48ZWUSRyoJ`3B zt{|!fV1BUI&4$v7ewkCohO#d2dhJgBsUo=Ycr`dahm$$5DkZMwauiRhdOwHw&dl2 zl~?EW7l!1w!G>zK4CSU4`7P*Tod85tyEShC5i3Ou1rf8%oI=w2+XP4v57R5%8X@&> zG1}}D;y|)M_3$KCQ9}cDG&S_vDPe1j2(AdPw zQt#0OxyShGCZuX_>@|uD@rx&!5na>-?oaq(nPp9Ox5^5W?m{d_xuCJ>q5!amH5eYf zpQj~qQ`7rnnw!!Ni2#{Mo;T@p2bI7gHv|83?27GXL1wFnXS4-5b8m6q zvEZfBI4?}ZW)7uesIAg!P7hQ;a*JMlxp9{%$39Ie&OnS`NbA4Iybj{P_3oLXjR~5+ ziE2;$dkse4LMJS@tlYlNHtX(Ktg(JK-167RFlvWo<`6@P>iMu#ni=)5UPjSEB_|of z%8P*taww0{Jp7)&`hMz(9WTK_77I&q8;W z8A=@a2()K?Jb>R|vq?K1{X$gJsm4b~9^q{+K6+q1`Uu>bWTQhBNK(ADc459O?7sf} zA(>4HRWeNAZRevO9B#^K>{nb%*ze5A_`*m_aJ*r^a~AMhsKIYTO<1X z?B|JJkG)cIB#@?C6+_xPGwLFa77Tgk7k!W8k2D^afc>ssg+%#hYYPhhJu7eWBdIL+ z^|;>|viz0*h~9e!n=QsWyQT?hs+4>Dwk6L3NEv5LqkS_jhM5CoWb6Bq( z&Y8nt$t~MgpJ-P+XMfD=GE$`nU37ixR?~Si_Hbl4uIadV)ds<%!E;5P%SpRA zII|Cb?|J_nQKg!~dFBn*f?`apk;0nP?y%ZQN zwO`L16y4d%BZTu&7`>Kb!02j$D~#*2$K2)SFFa=CX?>gz`yfWncdd>DCUw+Fe2F6ocq5jGXTyW%h}vRV)M7~&_s$V<1&A5 zl@zoyg91aaoD`rc0n7;>1neCO4(2_6hY;K22qM0Lzz?tB39uL!4miE)2e^gLDEjfMz9{TiEIZ^ z^W0*x7~Ee;3Lp03`GsHC+UZ_J5u}URvhDHt*C*>fb_9K~IMjFewtEPyrPWCmTexux zJD2l=jxyY<8H~T6IK$?H0M`9d4SXy;Pl#Ro}iZ zztH)cvPinm=n>W473Hs{>B&xGSek3hD5o4Wuv?RL6{bI+aR0$G$>IcPj|=`=JHkS= zHOWc#7A?~^RsX-whRDydXB8=4JB3Kco{n%N?F3g!5q0EfD6*)ow_8}3vtcjj<9PFK zo^_0zS~|TeVs?m6hj7LDRFbYPh-cMQeLSM*?A_Pp$IC7CM30{EG)@;g*<^G6zRBxs zVf}?pY32Dh)@ol62{*(p89Qz+r3CbQ z`sb;O4fG7mV&JIg-aOc4bS2{(@z=8<%vG)>U2|S1_TQ++uJcK&oeRbeN1p`VQOd`y z=myDU*VP|fNRr4>s5c;GDp;;j*Y5j_qy|YvYs|jx`h>6_d7A2HXECjL{6w$R#F>`% z!}hP>{*4+9%WEtJF{Jb>tYdt%OLp?&%$qY84OV*CJRPC48d|KYpwI^c%^REpNr*%D zk`t76?G$vBelrSwOuw459QA+d(qIlAF?Hbn4B05MJJ%NbcgmorRj|G$6|{6qqvw-j zP~28l-f_LHlm=LQBpg?!D2*2cB|(2l_aDt-M*pZx{Syfp)9)(YP)R>ecA|8Vk4jbE z^f0#lvSq|swC-Xp)k=-*Tq5DE(irbIN|`eqlA<7|yf3_q;K5!p)nP@ArG8DNlg<7& z35LFtc~I<( z9kcGQ;LckzzW3-e&tu_>_xAX&5E=g6 z)N0i7Gv8x_-W8YA85$sss~RvVhc@-LI$nJIB18{jB55{rYYMV6@%Fo-U-XcfZzB1B z6kp1}Emb&YoNB#dV!uSB;p_GOJYc16>agBb{Egw#Z1hEy8c5fzSh|e+UlG=GEh$P zqFVcHu2bo#81%2r&U!9~Api(?)~sdejpFu~lIvIX_WGC>SBIOV(XxIeUBUH3*CA^@ zg{%=JpglV812)J8;W2(6bNt=hS`D0=Q>+3#X4P;=W_^yZ!>Y(Ew0?vf?FH4)ZW_pt zOSh4I;K4%~4`EZ`rz}5nuiz6eT)WDV;$FdElB`CliR$;VrZ{i$MZ;D^hhbpCK=qjx z+?x{QavG}E2=)4k7<%V$Ws5p^@tdoB?ykhrXvS!H1M2Aj*+_bbHU1x_3%>TVP6_NI zbLyTzWzl!Wh|GyT3Ar<|z(rlZYP|=?b0XJ|uK)6$_$JHK5RS>LKh9DmGTpR5oV?T> z_bDN<5-T+m&cC(3x{bCG|HiI#RB2i{Xdh@i=+O(WmfRp$agAMD_aRr((RXv@6dIdy zf*${!5hz}+#_ZkEiA~5Y4H$oo?!HyiErJJL@Km+=tcQ$4$M)qIO)6O91Qh&+J|?e1 zXEb*-N(3#|9eRkgpau>^bDJpkO&aBq+jiH<(!uj`M)*q>FRrek7oDWxHjr5Ic4R%P ztlc2&nr(J$FaDSz54pp|z^=z(&i-W(`B^^*}msQZ~N74hax%;np7M%dc>r+-z zkID+&b!g-NCHiyQnoB>aIt08Lb(EP2TYE=OC+14=1?n-EjMRN#jA~06a)J_&RQm`X zO_RNhdwS+jlsT zWaMtw2`hVZ?qSj|bf|oia^H@`8rN7XWH!vbDak^BpMS;cJAs{O`kV^G6IhrFRBp2{R`*Z7wv~El4G_3EUslphgqAj_Vp)^>-#N{xk^$| zkSO_3asHH8YqXqcSp+(z&#d5S z0{&3n?T*lHLl&a?0T{ha^*ohymi=X3-YZ})G<9LxZPfXnx&b@bB+)_w3-5aqFXF;z&)*7 z1^OW>LVxe3wS|kQf)lkTK3zKR+~9fs_oIU1)A=K1g`-|E8xfR?Wa%%rt&5Zp1^em` zX&>oFyw#GnWu;Svvq*CgdAEt*N+Tkto2P|Ze8;e}+!Bp_)A6-M-jrGmIJ9=2&?6)QxJi*P{1_e*eC))<84XcEYl z-Xlu9#&`o$iq6UsZ;1L%4Wxu6<1U`=V*$7R73NjNSU{QB*e~t-!a^Xroqs-vi0n0U;=XaV1G_m7R75kutW12@B3tPGp~T$ z?~<_t`(u_*Hii%V`@>*uSrF|29CMmDmjXhpiGKO@{Zz6PI}bO;KdJ}Jpj&4tb52?a zr>U*p$1*UqSD_aC|H|EW-52+Ll+CmNat9Kl#ICka{N0e=NQL;-ZPrnlGV;Mr^tg1M zlf7RZqtE)QV=pO*-9s`8FeF7ce8_D`#(PfmV`6bhhx>A`!)0j$A(8=WJ0LG2O<)z$ zTu4oqioqdGt-FZFt{yQ8UJ{xji9y<+gM6+$@JMcPUqn-jp8VbQ2gr|^5PH{+OP}vh zxU7~+LE5X=lI7p!y?VmowaX3TO-yA8d@G@k^vewHv&Zer7-i%k{(|N_0_Nl`H!+lq zG2%#p@Yr8>SxW7`D3P-;=$?k1l)7EWF3OEiy(FThyyFCvc5KW+yTS(PF+l-5fuLJSaD?MvjR!2ip9+O;+r(vNZI8v=FaNivx!>cR3$aY zJvbC>gssqe#!Jepdj-B7&A#UdfmGvIyWZhaL5gEwU$ic*ZA`W<$7n)l-C;*s_`?1r9w;G(_G(H2CO5NYPQn~k{#5J(K zZ=C~-J9B>^I0v1cL(o9EU%(~Iu*u-O_8WU~N6#yg^m0zcWxoOP=`qJr;stgl1|@Ce z6mNw4){5D(D=#m_X$MrLkwFtD@U&0$ltjM?9xXFuR-fseM8{^hPjR4CF- zhzzz`(^)*OP28x;!I~EtW5<0B@hoPiIe-2B>yS@zw+*SYJH4p(1Oz47OVq`jOSh(= zB@@Yl)3yqfP@TU4-OyVrF~)12PHCP*WVxcMdN8o@^M_L@BJgku{&QLB=c-+W2~SlLsSOSTfgQ(MC>t=V0CP{C4d~dt)+KH4({T>z1dY)qgHVnU$nv?G!#^)EvmS zufM8#g={_$6EHL**x?!h8FCRRaX!jgY&puAY~|BQd?4?MA#zRh=~2(tDbA?_C;!H_ zpNp`6EI8XHrzIS0i%&r z;quqWbOFU2?o|L0wPB~@{6im6otn)@7HyG_qTqu%h%V^nK`+!qbX{Kf0&AMDg~Z2? zEDlx;K!0H5Uaxi~n{6n)x?jpOy!&qDs*vKGe7qR<=Ck6X0)Bt@f2$Uh)c$APG?WCJ z{7*<&<>ktYH{q<|wu{;|#m6oN*WehVj57%B1;7R;*R1pAqW2(1s&sCm_RR#nMX1P3 zU*&hV%U4Vl`w}*3EhXJvBJv|w?CY!`F{>zodck%21pI*pyscrZwW%81{0;d}`J}w` zb*eRc`}JA_-`LhyiuN~{KWs51naCrYuL$5t*<$L5myEVI66&5Miu091hImzr^2tRc zkVeMR?lf)x`WXB0OU@~-BZRK`EozQSbth_0{B;kbyhtcwwOAlpeYBpc+?dQT!*(t; zb-EbqOq0mHlXzk@X$NYvrm>fuyW)?56m86k(qzvgMx{dZMD(TDXYghTVf2kH%_e6Q zv4jBW^BCDn`)KVV^bTEWL;2DvFey47HBZQ~r+jS>>d%^O)*M{Fy)|hTW9MB)Hslxi zz6%|}Kje>WmGd4r@h9G75ibYmfP*=r&nZ!{e_qW>?l_VP#Nl^MRzuc(&ImQ=1acgG zl|0DM7l}Za?$nA=C*`>{Jrq7j!W-XO*^urxnXI@=p$hjy@KxF(MNnMkgg*X^RMS^8 z$q!8kp^BNHSmDpmotMoS7DDVZv`&+BMZEyr_pxNEys$>zXrN>xf0`OZzc&2E2U zlCg9<0k_4{w3=Ll;7ha(F)aog4XHV6Uq6%KJEA(1kNXT4wYOpBu{`Wxpq|}}P6L7Jv4u{}GIPahZSO&sLoCDIX+glCKiJTu>;gu_`j=Dg)Uca()1==pecuPm$lidU zg_N|9!fj<&rUDY9S<}N#oG{uh01IU|ba2^M>%6)0j&G8Q>;Y@`EkkJ!PcEosNE^~e65+Ym z-%?FewhlQaIF~2};57;c^_CmG^t#k2GLE+BQ>X%9$e0&?%bpz~iz{Ia!(si|kWytA zfuSDsnDr7tSKB%@V`IYQNO0MMPUKDox!!D@<}JJ>P!ZIQCs*lq;Gv(KN)p$vn`W8m zWl%D~6~WJXpbQ*@wTw&`@->YBqFE+I^B!U8 z8h!>wSTTIP4dUYRVyl3o2en-P#T03|Z9hJg-R5+`bmAXx&TQybUPL zOa?f~TvNmB34q>Lo<z4m3U;GuH>i%q@88(iJEeDAcn)cV~JJG3^S5~>^s*g%|w2%{NWc_T# zMBAQ=SaIZ*b%_kD<0gGGnCM!lm2dJHj}HJyyiK z<{`P=yxrGG!}3>~<(}EMI(Clz*y)JKN3;Jd#yI}US#f>*%&hQcxk-Nr;$%dQAWy9U zx`EuLL-=UECNg^#va?c{;pV7x4?K}KDB6Z@N!C0BV_xjI>aoZaU z7o&2*PTjHo_}BEec^(0BqWO_JOA&bEaD-;b*wv}&<`emAYCffR#myvsf0(|ZCUV1C z`)HzZ&9jmy7n9_!(^)Nx1WU7{KNL@+ufbkCQJU=!{3vy>7a0wl9eh*=Uon}^CtcB; zNBQr7#Bh_mkM!*qiyuTL-4OmQNjw5s0hxxYCNJ(e)A}B-<(n1AGF4jNk3jtbF1oz^ zuXJMbuEJQt%v;xgeg7ewI-B*YUws-|S$R~CJ#c<(iKtD!p#1o&<%m!qA;B?IXJMf* z3mj9vv~&4c9>84X%F;oW-{L=a$CnM6ybyAU2H^)M6n^5EIHw@ysWUQWejUs_v*CVb zr76y9KugGfU-pwshux#2a-0Vm$ZSDB(9;FJLP}?g!9Op9L9KL4`ZxO-)V=#xp06xI zlLe-OzoHn4YLdWuwuGwT2S;stpyu>{GHVT9ZfsDUs1%%Yea;n8j9TbU$?5v3rsDVP zG5`d89uI_VuoQQjWVI652S3l+2wfzxvK z9&`QI`Rt3&7JMq;#^feVS*vCqlD&N{_PQs45@>V-+?^xM8*pmvy*c7tl1F4?WH8>p z=3kqeLwq|~dPKChJ1^NQ6Wkuwe?DqX0hxHMd?i>p@A%%;$07g|--G@rA3{q)wEtPI z(a{6w=#>jynHx=RDG~MQV{X4yDB(K|U9IvXE8F|leqw4%bbI}Sz89aSV1jarSHn`0 z$kLZ#=KP~KE``3_3Ini?Lt@K&A-tEiiQ?n0bpN&|t+Qhp_?gb}+6IXTkuE^UznfrI zKpghxJA%K}_Kc~aV%zPZ9ix#YP=!lxUX_f?;K^~oeHct8-KWMP#Yv346^CIo;9T`j zwVydd^E3P|vcsS%x+h^(zSQM|zpNgv1)s8NyMRw4Q;lvc4$&`W_l06m7F9c#Zs+`y ztqn<~C(|B~1=x9w0`XzN6IvAC7zf0VP>Ns?DKGUhf!kf%U)vOsZ0H_tlb@(n+iU?h zqT4wC`0Lx#d?!#3*uLpLU)=Qiwbp~tt`6yL%YIsyq>1ZKRZn-wIc7YOd&R1!GT~~} zg2BgguT{%8?JNe5c`%yT2CL*a?~nbxjl;LRBwQwb;JmUwdItPZrQMqpXiC7s~HKhRt5ltZ`@kb6*`5%8y~{%?Ihs`URTTjH?U)ekuj4l)@EX zbDkcR@9UP2157GA4$M3`&>6lt`PgsuVQYY26<9&Fv!8QIW0}$syp-gkMd@KKLANek zb<YB&gOf>Fc8AZ4UV9~&7LyYc zvnXxmz7nVo1_Mlgmx8R^*@MP}3I^J}cIAgua zr^{SMK@#YcO{;ge;i~}ZFXsKHWl@)JQETq%<>eF~o_pw<=UMNp`XG$G^a>X$6Z%K{ z#XFkH%a@*p_LCU>SJkw9Gj%+xK31W$;L(R@7vvnhlzD&b&C!gBws6F2>6P<0E#DW% zUu1|4aZk^8Q1T?McKMIRSyTj-E@8$g`*H2HJWfC`ZwnF33*XwaZO#iVLlfn(kJ(Er z^VA<0CO;(jF~56{?&C6+jt~Q47x*p(y_$s00_0z7lqA3^%@?7p_he61vZOV4%S*3z zex-*r75uD7?Pxry*LfoG(t2+};X(vvDa^X5tfKE!npKHbxpcDnu(PnfW_(a_@RNvO zOrqc^&CDY0@_WH2&7Z&M5ZuVNxneuGWX2r0>z6%*_oo?s_wiVlfW6BCnzHY|AA6K= z?#XxK$$5is7!a-8cz|YYxM+?_cN4nU%|6zNOfB&TFFOEWS;Oi+T@(p2L5+lj zwMGk8N1lU>qEEj%pk=>n_kHE_OT2=gaw6i@JNuPo6VSHAE+cR=lpMZ9mupDhrp|sM zm)(1xOx8C2+~S?%y=k!}t=(=@QzZ(dKh47-S!f=cPbOjY8u{vgo~HGm2{K#E-mcTW z+H=nWe@EZO){n%5UvkF%cpIK$(`h{#XZ|*^fP307M!5Jcw&HgCra$mKl?H&kKKYGx z=`B7{)#og(S-+vZ@gGs`=A83UFmQ8jLpIZ)4vD#~n0g7<(mCJU=xBlY5o>;32X%W&7p$_h9>O#ZLf6bg7ErC8;;FtRqhi2Ef#+ z@dm1g&SqX7@>Znw7*ezq(#C&GwIs{QDoAI^ytixKlve=Ae)S`vJMHww|gx!_Zr${C?jukjF}%yIQ;h z>AcK76wu`LR#RWt_~TW&*4DPLgLnSGPKjnZ_%gDZR4a=6vpllm#4p)t_>8lw&7sKS zk%>IxSFEX7{(aoxDdC951x@Rbkn<+2cE6ouGB+d7E0#IR4FY>pPcOK*59iz0lX4|7r$R# z*Br$UOkMJO?_d9!G>+#d^O+cW5%(-z=NbSw^F19c&6n8~&*gpSB?z!tQX{cQ3v?Qk zb;pbuIx4&SCr>r^_hZDcV@chq2JTa{F~#TL4#MiCNgJdlx58jCX+vta$i0kmvMYEzv=0EFe2k2b1@x`J$QQk+$D)VV z-Z~G(HW#$CbcQv$UPdQ|0=JRPBZGb7|cQJ%2cvo^;!6DZ0b_9K#^F|%{ z`uuvzWJGPw#up2Hl&zVTkVxq^YUH}VP|n7`D6SLLAt+Xjc*wr{^a~ulx~mBr87}8Z zFPb2_U?CRt;teJDL^6NDyUMxE3pu-V4?VMEX|WS77Siv#3~Smx^tWd387kLHNl>M@ zD5_@p@+CjmOVbVq$3GYzZVeX__Uunfe_m{NMo^+-JaWY)-ShO6ZBYm!f2AHb|YF6iBk+1ap;AeO5#P!_)ZwFizWBB(nxP!+w;2c88${xTIIhJKaAN1}} zvU1^~81>JkRfV0fw$?%8;)~S-r~AfCzXSo97ezEfOEf)a#&?dVH27y2j#j!ekJ03H zs~pcFkKj6qKBd^KdWL*}sZ+z)ZH)=9-$m?5Un>$WL2uMZ3!rXWOtJAfiH8X`=-@*p z-(Zffd_U}y0JT8d00lPm|YC?2%eJ{{ggI~eh)1y4vLG} z{7rxfT+c6aZ{Uchm95l~$>l+Ek(ws|Jt`(nOCm|1+3*gpi{NW?ig1Y_uD58O{kUU2Ub2oj$wz6$g_HYO^q{?O>X z_^q!^9U^US64^z%H*RgfFdq*v7~(nBW7YqC3q6D@ERAVnK$VURxSaeGu6aHwh!&vJ z&iqpd&%cKonnRszKIFCyd{oqy1b9V=iwQ7ger>#!c9_N8v}27M9s6t^ykEnQs`(6j z9Da!Cb6segGJ6#1#y2xuA~m}mSP>&7u*-A`Sc6mS=6B;27})MDdx+c?QmWLS2M;4n zp{syF6rC(4?m^8nK=)Js>on=~lvgJ-PjjK$1n4dJfufTdtND6Y+}aHjDxv;hjj_IJ zZZZYaJc?y$O_63u8?hTKGCEL%&zHPDe1%)t>TVly;^DW*@qa7-JkkyPkQX1v@kKL5 z$mlrvCS+SZE3XAtyTk8gQ5cNoh}h0?5Vx36>T4@(wFFSwY5Gc^MA zVEMLV%xj&L!O6!@U+UI3EfjZEZh)A(#WDVt<&l0KX`V;;K%X+J`zx@LBiXEMYm`7 zu3Ak^H1yy!mVEFXtA8vEBGCG5?P=|jxN7BjJ-*(XOG1xKwejOQ*rqZc$KsadR*z<)kXnjpn4xvlZG{c5hX9T_R%wMTXfiByJRK?8 zd09GX!sx#rel6QR#=e-o?=trR<&J!Q@6x3=dTM^S{ewvXxigAy0j`zE*nFo>iYX_n zwXnOCsHaERx5N0-lu38=*X!#1sX~|(Ve60GX*IBF=4BYq?pG*40XgkBa+v4~SbO&k z#90FQ52lo&$wsH|NkNf8xc{SPwxVTKkSsELem(-4Sc6BaU;zAlw|x302bGwoip1*j zm-4#1yHUwg$m1Kp1m0r~;@cT;=|(6Z?pT6>@$NM*Ke2Ynq!?24+8Rrx0}E>^!)d!LoyxMTKt|H zuRs<;RrAsb-i_`t#6y6&kJ!ZKxyq56Fxq?FXjzI-)Y6MmzTda0p`Y3-8!f`}jK7^- zI;wb`=s#7UgG;$`GVWFQuf`-ULS_H4$bc7?uoaj1q8MmQ-V?0od<8FfL>6qcaar<> zj4eochfx8-)BL5i7j8ljuS2b$_JVygjms~`s2e2&#qEPH=UvK==M?aU2Y0J_W=}ZM zBgLs0t-i`P%L&(>&bEFXPEv2gSJEz~%Vl?Y;yglkF??12I4+LNf6zNw0QPzNMN+f>&cTWBL&L)wNUnna)t7z~#%lL65-$3(p*(KY9ydy_M?>_apd3bZ5 z`EGfFrX4GPvrp>I`@8D-=vP`d8@Eo$^H5b;p)gMK4eyXhx2Kj$hR&nf%Zh($-lq2l z;2_SKN6p#?KAQV!#BA*zG#^kuA#;2A_5H!iC%>88IDbxS*Cc&j{FIOxr(N2rf#Uyd zX?u?_#{733yLch$sLa<8jwcMo>q^00_mQ;Z3Evh5eN~RuKkB(s*@KR6Ag8E{H)(#9 zn>2RWwS){wJbbOE!EQRN5+L4@LX_j-H`4w@nViz6QcaG2k~EUVKIo`uRFN;m^z%-_ zzA%pS4$e%Hd-uo*jiVQ3MT{3thKTyx2B$sSyuu?ha{Nv2F$HKt*TOu<7J@s*S8khx zSic(~`T07zD>3E9J#_N8pRqeBx%kOe5MmRj2Rb7T>PPQ}5z>KN&B;Me*-*fq#MlDy z@Xg{7?CyashdHxO0_e>*j6fg|`Sob*^QRZS`bvniowf^rZWas7UPy-}ggTB-dkzGK zv?E?|C<_GnH(8gFpMI^Vr*D<~gPCQPgVqnYHfuv~5AXF^-ajzcz&XnZ-MXU|TM+Xc z*{^(;e$}7k<%~Eq1ynA~`Ujy}8ZMzZ>(5-%mM=V*HVdl3old)AYT=?( zuVBYexqIcj3w0)AsXp$PXhsUPU|LzQG9}|Ovu4nkbUXGs2Z5)QaWu%lFY)4;&Url$q zb$RHl?0W&#<{MTEutpE_%xz5|@@xgbWa2XH9ZS(QI9E_h_CqO4yh6ijHnnqYe3uOM zJ7+bx-^szIy2DK`umQjO%$_N9rRs7pR1Q`A-zx9ApojvS;vH$*J{$y@ww-D(+E#82 zxkcm{L-t@dh4`=j=$&m&%)ghFC9dw0dz!6*?_n0eJWpO>$#PvlzQ48af|s1%#J)Jd zMEb8&${2?3$e~gux~~K{Z|(hsG|`^$W;;-7T{2f8Q}_;eqQ-F^8q`D0h%HJ*u`RnR zIFz^As@?_HsQD~o2X;sP6T78BnjXilU?ENY-;F4qsZ-;T4lizz&>yq|f!Yf*Fi5+$ zq=5Sp5Od$+cxc)Ok#C+Cg*pOb?dU0>_ zDd{d9fFeFTC9Rq13YLST@5P-{!mDnC-FPfp@yU6)$^ye2$X(%IiNZg7nC5TcL-d|@ z6=H_VuBx5&U&7VI+1jp&l%#1%j(GQ&w(1Me;!NY^R6EcjE=w#+b#9kbUons!Y^_Cfpr^-q3H74w<+0E z)aIM<&2iU|OHNJs$7F{kr~3cxBW0so9dL*0XN|t-`V}D)LEZ8)ls#1fe6Li-Wwuxm~5lRvshi!dhU0YB;x&k5ed??xpiQqZad zRd4~1HovqN%<`{1#@(CW{fzu87niM-%;-IHpN;V0h=g3QW4P&k)5miSAgLH)E^NTEWn-0C@7Nu%= zyXgJPiE}BtPMr1bZ*?*TYgA&TYZy& zfKOnqt^D~cm*RcpI$$%q8lH+3!d|{j{xYAG*{h3g%)a!DM501u+?57;M`U>G=06-C zk@u-u1R*{I%belIO$#7oawTn3sE?xH&G-Tk>Vw_vXR~W(Zzwh!0H6v#ty~hd#j}*^FPkL02l<@&mSzZv zhl{*p%RZ1se%-vELwGSVF4b&Epl)Av|K>0wj5@M~qH-&KUxDJMWK$3^4mEGg5Nq$# zyxx^-wo5gMNxAYaL%rNT?v0+)h{5hjU?)~!)DrL#d>gaT0IF?gNIR3)E(nM)HcVFC zewJborP@0Knk*%HFxx{=nOH(XQD510+d5xDDdkl+4N4olyHwydOnm@c(?ko=IB;M1 z$3={JPFwDeQusG9Spga35Yn?~d|XHo@MU-G7g^S%dgB^+BYOly*atfk( zKOmJwIT)+6)g1rvfq|^XsEX?^?8P;w=EZKu{`%M;{yrVF`lJBc!S%`RYPg#n`zyxA zZ8>+udm{ODpZqX3r9WfoV{8K|)?cee+o1GA3f=|9FNV^k(Z61&nUl)Ls>bSaWn69) zhcD}F*`G*8l{sdkW1i0QuxFE?D}eP*NA?w~)l#>%;80Si>sy<~q3!@g5B9<-~+ z-Cto|+c*JXq!O0ZheONvykoz8XPqQy0WulURk+agBUuffPszly?Ps$k-ltgrbQ_VKPuV|)5i_yI;pn3LIht7TZdDd`oG{X;8r5{?Ms zg#-r`L~sauiHj;&cdz4VX?`mLtOwz3Lesg^{Fx;}qGnDYh{4|k6mEy}6VWQF1l&I; zA52}_k5iy-LY5NDc8IjvKbM_7avcGI-9o$&KF^T9lG2_tMXp3sE2q!;+xy>K59|{b z+aMWcYh!11BPpLqtS9?r^WAlwx`3Msh(F~|6;yH6_Vk3-)R0p)8wl)@y@f5zB?~U- zsr!$RS8X7=d-ma!osN4H+8`0sfkX_a>JIHqftLwsKOw6JHs^y+0I@&m>i^UvPQ#O4`>uO=w{)$~tn3xEkp2(`dtW!g=h@wW`ugO^LFN z-r@4qDqMBViwy|@!*F0+r~NBwa5?%~r|z6Q;%zgyC8HksIUP|RWvZx0i)GC34@?$P zqhCTkqZPq=8l;(_W?loEa;9J9weKYPZbbIt=BkaCZq+!fG9a2)Ij@b_D;|(df->U{ z27d&=C+}@Qa7$hzO$5HXo(&QaT0K7aaWSWyKa%Rrd2&+pZ@xAPpWsO4F&{6Jt7S@;PK6c~48;FVao@*|xpd~1tsfKrLL3L$A7s}F z2iY8kpZ^kbH;tzQld0>LqFv0}VZg#h_AM*N->!~;3W`JS&oIA(7s{93afVNjY#za> z22;}7=XVWD?hi6Wj+@O5po7=XON3bQ>1M6Ca~k7M944m7^1nj(hyK1CP%GSaQeW6GFo4Rz}oX0fx zkkwd;0S4-EJdLPC_X6^&RHwM7mr*0OS3KZ|@r2lAx8Q$J`;Nsyh) zDEK>yAQ>WAvoP7L9iTOX4q<8FV#N`I{9eR=xJc@7DXfEVOxoLkzwr$`j+qCZsDC!h z-RjshdcHo|K$4<5T}6aLUzFmn%cgL(T_t19%0?U5PARyZ%P9IoF#c*tA(S2g8LNj^ z@-<#`XW@uO3rKtA_?7hlW4aF+VIqpH@Xcp35L5hmtqwYf-6f9j>(AAF zin8|scyyPZTe%`T2x6$bhjL{2bk?n!0SxWXN8M^2gg@s^8ZTP;S-g=do|Lwr*{R87 zG|3?jUo$Okq@<%D#Qx`w5$PXR3BRhBM5t=9l$dB2#Q#uq-j7iDe;ijTpHftkvM#A) zWbbtvD7)-=NkYaYd)!?zvrZ_LaaJiaGBVFTS!Zt!hr>DR4!7^?`zO5Lzr3FB_v`t5 zJnzQP^-4YIAWMVQq+=iaXw>qzLR^8=O=j&vzw5wR0$ zvE>zV7MT0`y;=GSPOFu$7w~Wl^1zOpJAtc-5`L;$dnv?)6*bAv&0XP8F=9=E&m1vY z9cc^2ED4?_KL{bjhaW%3Jn;~=x#{WTk85jgyC!%?<6jIyFZ>b5y11nNwlMGmUzZPBRg;zNDyCaDU`x~pWwOK$b}?8sAF|R4=9S6Wnbubv z+`9tv<2gTE`VbKGdSSDHZGWV1(%15hP|`Vl)`wOY?qFApp+Y{h1U@)$>xSdv7`=Ff zeM6rugQhU=h}x0~85)W+&JeV#Hq^~rg|ra=mNXgbpK@^5zi5Ap6)>sZ(nS@2_yzp^B5*bR}Z)e!bKI1 zb}sLX3txY@vt@55qigLe2#wU<4!zKQXU){bW8&hrp?x5#FAK9P8TN|)k~wtZ z*7i@-j}LQMh@ZaZ7~`E2lxe9(j#?Nm6J=zetQbF@{CV4XNJ$I;6_My z>`o26J%s*lAAqmjjJ8#4W-a?gOLMB_oWlMciDN351PT%y(HtqPA1l71CVegYVbfV5 z;;@WxZQC8wKc0-i5c5s)GW0*W&QO__F3hCkmapcGExFM_XPP&*TRNF+(wN)SJ=IQBVZ*xaq0knv8HhgN9xf)ie34J(T7+hlKh z2flh2Vi>DpIh@Tw%Jt7ke)_a0(k8g6yjP<62QzT-|_6=05N|K=#>eP<2K^JmO4(+=z4NKbB z-}lvg7M{-B%}h?!YHGbwQN|**F#G zbHVYrr*vJ1y-$bv5SXPA8$}3K9bDq_M)MMAQxp8$Xy)TXsZU;;>v89k_QLbcHiQ_U zX>P+O(~i_peMM{JdT@{Q_dLm2V`-t^zt%o^zCxiVLzx0XA5VgSaYs`*V*mB<=B~O3 zuNltFW^8tV__5Oeeu3}&x&|?$H!Zm}_zRh8;CG)kkfX0B`OeKORB`aHG(9<^*0N`K zB59r2eSny>5%f8I<4xJ;r!`M_Bqly_3E^3TMXbU(($7qOsWF3|>h`-Y&utF9xb~}i z81(+m2%lNDv2KD0*R7x)Gt2OT6Mse7B~nBlhC_;h_xlT9I=(kMFLIs;hMhe4kw2(c6-A)Er)~n)$$Vtj3J#&kOh} z2?QL&&bb8L@vISNd#rDvHJ$q>nh3@YrqG*(}g&@yHgdNp}1k!Xxe8|1utO0O*2F5 z0CdyGs6s=NrLxGAY68IW+-GRyzNpG`nRE;vV`;fN)rh4wZcZ`1eW6-v`Y_Bl%^Cbu zbvX$a%h+^62?rP&hk{A@YE zwRHh~oNDIL2Xv=3;j1NmYFHb!p;)>5H>{r~1lJOXfoG&2qMr*9?Ndxw5^dapzEFJn zA(E|q(l3%LR!zO#x5!0evJvWGDSp6SOWiXk@KjrZrmcUiKMqKibt&3R3q+~&r3W>u z3bn>aLZW-l45X%X?3+T?#UK|AJh=se zJcDn_+6eP{$V92n5%K!ktb2ZMx$CxOEBoBEvlW5c=I6S4)3B-5=*ygvRxQ@@--1T& zl`6VbF91Hm@;mO$x^z5-gcrMb&xj#yHM{S-2W8ci*}!oCk;(m9#a z;Y9%ihrAc`9o^)BEpy59dvVI?X~=NI@RR4++(gi6^+%5uLu}H*xF44m(wkKipVJ2m zAb;wK+MkbI^go+%!BA^C%%`x&O_Ip{RgUw}Pe_}(=0IXr5_e%aVkUdt&HSb*q9;Mm z_kov2ONiN%L31NrL3}1hqOhbeyxKVh;2yu0UC}T0i%ExuEaMrhH+wsDWcV>qhjPC` zlMzk{q@em_z{h=T5LRqoT^&o;0eX1{Bn>yMI1Z66NTX#cGGx%l{;hGu@ou09%qPq| zuFqU{d#efh6Atxh4MiFZjI_y!EfH(mxTl6M%@cB}z(b3lAu=IS2(A5_WHgcf^Gu&8>CjX9wmeWWD3Uq+HImNJ2A~*NP_9=VwOD&X7&bz4yTB zPA*^`FVaHmQm$?tOI;1EJ5l>*=xMp{_>b0_9A5U3heo9ekM+?K2qI z4no^3Y`xECl_4J={jF$4q689m>>nk(T-c~G82Rnx~UaCO8<$NGz=6Yrp>u$ptU9q`)^WBfQmB$M;=P>=trjQCH`r?s<(-r7}M+IR&qR?_!93vrZwZX|z1=?b&?#hv^^2 z%2WScDKqYGkS%4Zs<0l0*i6+2|Cg6ZI*oU4$#cj6%DlIAlK0hYhkkffdXtiB2en{b z=$4qy;>T)cUra(6O~k6mt86#8&X#7c1)3W`FuEBxALFOlhL4nw&-l~OK;$L zufjQ|Bzu%75OCuYn(v0umPK(fz%m9=#eDacZ!{w?MV|V8EGR7SM*m4ifdKI(cKYu8 zBZx%d8tjL9>kn-g3F?)JNCy%BC(p|s!i6mql;2wUW3*vSwkrO=O6*~-ZF*?{>m%G4f* zP=CL9fjvP=vvP`ltAI~Aw<`KN`EV?Z7N;`uXz!bMfYfF)6z3+K05!DEdoiloUwK1i z$Qt23!yz%LHm=DPm|02Lk<#OOC+%{7Gn-1*$o|ra@uUQ7S!^HZLx(XQr^B~;|$a<_FAc&9{53hrwiv4e+K7QJ` zfk~i_@&NJo^YMG3!+IgJZhHBq|Em5h$k)>J-g^;56IhQw92Hl@zUUoS@sJS0RZy)I z9aS*(e7Oe>Rh_XzQeyxj) z?hlKDocYSK&>tCs7r&jN^2;i|B*n&X(q79zY7c_Gv45I2Uj=s$W0$Onlcyu9T;B{L zo-gX1C@3*!`7uvP9MK!50ybs~82{jc%DsS+@I>x0pB9pW-RgNGJ4O}zv!O|mE>64U z0TZiywW!J82zB0v{?k~5QmnLuxl;aUv3-WZsMT%<^IE z!<>dxoH$c1*ATS+Xt^HDk7s|NaI9Y~>-P99f=oX znG@IZ?lG&+Z-F6mM*WvC@E2fJq^wn6{qHe&G!Kb&Tg*gUs)7 zcku4Wik0T|xTN6Y{%61nPpm$~j&ajkVqPie8uu9u-m4*&+0ZX% zO9O7Nb>hN$J@QR_%+^;9{R^8(;t|!6cjqCZXYt|ccr2V!%N2PEl4mM#_-<%Y?r0s) zyGf)xSbsuRgF3Suf`wPaI4sNkVkCH}cXo_B4~Gh!@D;r{8w!_zae0=@`-IolpH(@o zTZL>H0aul3gbh{=hjsM|Nb7rckrUD6f)8ZCp3nQuiV?aSVzhybmYWMVH6uk#5Ck+E zUjJUOfpMB1#jdNi{2t;rXKr@nt_%;h47*a?KOX=-C(Gn1-~L|vHDI+yH%;Re|G^pE zJvkta(@8QrwN_^!b*1QnZhSJoZ2d6g))i%$q#xqM@VCd61ze_|yuA(w3Rs7XbAm3z z-FLqy?Hgg4HLj`e-lk=q2cN> zn#mdjbD=Q5j1lH7HviDw?sH#Pf!Feq1_b!&Ycb13e!H) zgZJFnsCa30o#7mrY-=BGkP{MldG`X_IEb!vPQLqbMycfJ-?A2dM7^#l@KFY0|Gh5Z z!g;V`*6(ke!+{kl25 zcg1}vA1l0XG~JUWC&H3G8MtAx_t@^$`Y!Chmtx8l{Yk!Tp}ZUu+Nvp;E_@Cqy8U0i z-?d%U;#0(Gy=KQ7?aX=DTh(^CC+Q;pVY|Pu!TM~TKNTv7La0C07~$>IAUyVWge2!B zgy~S8@U*b)T82Wy8X)Gu)Wd_r0~`X31|jF2KH(DBg3t%LYzx$*op?>iI2%FtoSWv+ z=>7AuTSqz_?dJ|-0i$N6RHITOqhGZNn5YXF`t>MW;x0?LY4TN>DX8M_vcerHkFK46 z?0I2uXjxdoA@EApbc))pVXLg#b%{|$jLWATmGmo!;YM(QpbH6o%3q?nyqbxMMZ6rf zGdGRSj;L955yC|dKSBNp?7#aJyfRJZ{4P$*dd?NuVBy5kc(T&H{`DBp`h9GDimrVrpZ#GD{I3MK^p z%|b+PjuZe!;;B(E1M*OPN0fTR?4q1dW4OrPZ0^LyJ*F7v;4GA=eW61XtL`@N&z}#v zOHNU8kD&yRxi3_@aY;@zgfP8v%rRJhEb?5>i?dEIKS9-`vr0ueVkCutwM-xRs zhupt_OQ^|&(H+TbY#k#LxY~FWPWF7$rM~YrZ9hF6BgYJ7QiMgkCn<3Z%rKa}Unw|n zLUp*~4Q2cQ4Tgg>&@l*WP;+F6ls9Cw33_`i&AvF$lOKGJRl&O8zj%h|LJ zH+}#T4|HD;{!*>q}q!J(G%bB=;{83Z*W{P09OLy>uHIgx4{dd(3Jfb|`r}9FX<@_}8%S7#P z(EKmEqfNn5Sa&1DQoB^A9$A=sr$ER>v+XW0N$u_y^_yCsIx3O}(o#2CaOtZ;6a2q0 zFVE1pqBH6ZqVAI#D@11&I@a$9^4Wd65V%x?eygp%>yLDJ;B$oQ{-}wz(^B-Yr5y3DdvP| z6{edV2V+l86M3*juWp^lN#{6JQ@>j~y#Dj>z%_ycZwL(=%BP}IOCzOwG~nPZvGZO| z5d_0Gjo)N~Saqypl$rHSR^N}t=AWWke;>KG-Hl}HB0aupo&oez$=#Rug|yP?4Rw|{ zJy+|ygK@}DJZoa_{$16y;p{4D-)zAo_h9`zqP1FkF#>bQ=V)r>>T|SNh%!aIuG(v4 zK{q#ugwY}yS4Gn%&HXE}{!33phG}?qP2COyN!y|6&$St+I|q}6rLIUx1cAyjVUiDj z+CRB*sQS(9wA9dQiROp&r_P=S0G2qP#_fXjc(80vR4CuL&#u6K7~7V`=-}_XTtV+l z+@q>*OEaS>-3X6mbWmlTMjYc$;`Pi6mcY^aWc0+fGSm<*3q-x$1AgqokshBAgzvuD zHFarIlJ8?4)%D$0m72TUM4>6e@gfA34>&_(V$V1+q<=lA`x-Fkyv@@cu?B1A-F}~Z z0vnVM;Yl0FesyQ=E{>#IM<4Lc2+hxM9V2M{9Zen@lXX%mF+9w+AGGi|W-(@?uIhi?oVAgsE z=sE8zeJ`Mo94s*YIj{&ai#%~9XI9{S+IiU(seLDkvG~&N>??(Jlorw%#X+irTS54- zD4tedsDENX%%bQc9lCB-drKsTl=BQq^MZ9n!N+xw=M-)cqzgUc|A2UZoAPK?kd<42s=-E z5VV@1UQsRF)3i_I4aD-LU?%!(;mxdbv*6$H?T3gy8I?UT)H^N2g-jr(^ZrgdDmi4a ziBX}&Ogiu*S);5OZx>d|o)`@2oGdj~ZU}Ll48t>Q+AW z=SNO%Ws2HWbiwo+`Hs74SL@J|E=#D&I!=LJ*lqMi=||$8B{r}IUMusBn@B+=W@$wD zGF3LbSo4grdTA9z)OD?$wUE;U8{jOc04UF$PwbVUcoXaioXNN>o)KWIufm3hg+>e8 zMX#d#p>pbXud-R(-+%k6h205=s~m3FUiaI+V>Ahy zfT-j=DI5rA)?~A1Adi^2;S=5id~3=A?JC&Y%u=*U0;-AK_6CUjEIhk)eaS(UOP#? zhJjV76;u}w*bL0hd~A-S#TAI}{P!*WKw%2dSpGEhp$48NbS!R$jbDGGzLeUcx~X$x z20`tGSjmt^ikLQ7khQQCyM%IKWn)WaR9}^cODs_RBDU1QN6NLL!)J)F0?^@?18ZbZ zZ_tWZx{su3jjPmaqcw+dl#wVdtqX|#KtmPn3PzQ$B;)I)xsT)MT-lmX6s0&d&W^2Zk&s*y%B$BJ553e%uYj;=l7Nek=82X-Je09}$iFk!D(J<`5AuMD*{3_pME2qZ64jEM^h4URP<{53V!I?M%Q(9eOtR>l~btTXkG zyL~|nYOza^hsDwEPhRR%Z*5T|;!t2D`BVLJm_EQ#a; z`U+G{rIu39$<)c@yM#aG1piCBa6~qbo}+fzQXV?Nar<&j!m&H5N6?F= z{W=U|{ePD(G+nQ|nCPo}&&5JpQsB#p*I9d{nrI%V*|Z9 zfr0L5_V8XZ$|N|H>r#}J`J>xm!W*8A+S6E?WAoG0(n2=aE2@rcYjiCt*V^CCe7?7+P%%n zsbRpsP!iB=h{Eq)l_iX6c5nO8bKc3FXC1A#a3sog&lU%f?yu#8y>h$eHpj{_Cr35E zx!w|*$Fe@|)GJIPZYp1n4Xj)(%~D(~@301Ry8l@$aMfRV8Ss>PWtOVQVYcxNA;~%>O=ZhwFrH=AWcRhRq#T^L)9Tif!$*+d0}Sow zVVJ;WI7tz)M!hhuuf~CxT$Wt9-zQp26gastr5f9#_)QsgGd3F<7Pin4R$ae9R?YXs z&+aC$H7J@+{VtNs`XqFz*^N;#0*;{F9ui)MFs)Rvmfp!N(-m5@mZKyTcMji~%q^ml zL<3@jeDwja4hBbs9UxDY(Cc0%O7Bsk)unOQ6NkC}qM-4aYkP)Tl{m8*GZSY~KPg3V zJeI<2C|<;iZ)kylr`=HblHuIm^lA#Hko^u%XWdyqm-50ln1<(?Usq6iHhHTtPw5s} z7?>>*>a;n#as5}Hpo0hQ_ltWP1!otEX}2G9EvH1!*AL!sMY!s@GQEjcsHZs?P7l3?PypB5qI} zLnja+6K$)7Vo}-wCXE7hfo-|$>1ie{zPa~7#0U>Wqblg1!g=8zk^0Uk+R3qR`bYT+1u^^;)m=+<*G>WWhqcJRUVVaS&1^KFzGdAram4lmiAQ6q z9QKUnmO+dDn)zZ}V@OZ2Fwa-&cRkvkRfodOGK$=gtLRxeeUMdzHmyU&0>DRb)|;?k z@W8S+tTcCko->ogx&%QDLYIZoGfvSJ zlYB>8i->1jt26p{bpXO%&SN*w)EfyI`I9%E$9DX7-r>$j@Yp>24w}H)!f7hz%JzC! ztUTTcePYG9*Wh*t)+Lfo&1WMOQxMmJCoUFl+dix!&&^vng6EI))cl1tdMsSSO%^#% zu)&73BN{Y&%nll+nrScV&s(ljs@NJV9Gic^xAoB0@PO{Bb-eCsDu)3_ujIW=YJB@G zMAguI^F-w3!abfWslPjWlo4Laxz?jy>^gg+WuL)K*QL?uUDTWHr z)QfkHmwA}x$8ut0DVVX0JRhos!1}|pZHOECmo$pDZ|t7YOWYR~c`s(bo-y*u>uTWb zgj3-=PA%>y9;yt#9-$mv)Ycm7I2jddDiP*S!HR;|$!{A)Td@DY!;l8N%ztB<c#A$ z6Q>IH+>8z(rJ!r3k9LNYUcWhe-TFJiRjw^eE&w_AFf`xAyCfJ+KBwJmX8FHSOq7T{bcvvXodm+sK|IpgPxl1ZaO}*t-=IOG(%G z^_iQyn_ICHu0? zKM)pEm{(IhBYRvL;7K(l-dok9Vvq5a@5{-P{Pw7i5-d7)Q&BN9NlBAYG=R z4Kp>;g8H!2djrt&=$1a}+**%VXwEL-@0)ahM_5DmV%;u5#ZGh3T@{f_7+Ng=lw80$ zUN5@wP-Oo1Dx8o+b7cq~aArRjiZz{0p6o~V7J-7w5f`<*xvNa*;qgx@>OpmtEoAF( zH>BW2DBu$?Gyn?@Um=UFILJeVS~Tr}liKpiH1^rU2rS?>hs-0^2HBd~8~TQjkuMph zOg(xthFMH{!DV#bg8Iz&VA_dU+dZ7)vJyTgk$p}LTJ8>=BKS@cUxl~*nt9peMQ^=> zhST$0#;^^j=MMQAgyOfP%Dh0rp9^;8JF3jHqvSIO7fCC3K^qu-MNsb6RQQMmQ_Hj*AL^IE7rCLRRB-z!YyST|s4 zb%zTP1k{<{E|F{Tv&yP^#SvU7ZH7=8V7LeWIdgG;WVIE0B!VARcevr)R2H}JLBLj} zn{11fj6#R`C&}qwr83fPfN!7(>hSY*git?poqLxs@-MJZrn*mEZ35kTU<_lEw}bXI zTeEE6&K^xrd5`ZF;6xu*Eil#e@*9l?tHiI zzMw%Qga3>oR7?r}RzO&+XD+VU#E+puy)TLk2}?FUIEr4yCA0{m6oWOy*vlV=JYI{vTnkCXL2l zl^I7<{ifk!PGFky7C}qj%oWA=UndmFF`++InRc6QTCW&hk=(KaZqZv2;bic-JpNXE zI=8kivWz`i0d{qVlae&NCaZl@Ws*Bg`{B+B`Y(5*+Rq4;INFz0rK-!R&3o5~9*MN} z`3}=Aw>>y+ZE%6NZ09XQWf{xHulyBE@@R0Jzy1gyFac!~mN~qSKpXGpakDzVsu1}u zSS8vQU;T)W?=k_g?wjFrXS1e~HNOFHj~rPSnxp&x)!Cgx@$iiwl#HpY@_zm!KGlVv z7jTF}$L^qw2w>gZ`_G`J@>6B*46YRk{q28r&eFZ*Gy>U-V1*c9DxQJ4S2nNXXd~5m z_&HNf;HN9-8WvSkhEO5fa%1uEY>^V*4Y$R*>4Mu!A zR2}=>`@#A!qTTs()>cONZeJ;f>_e6 z0Y0`Z;3T{I=3M7eG>vW#cwP~57!ZNV45nNN<*ghBYxsvB%=uti3;^Xx)3cE~qbf5K z3Bh{ZmFmON85eE?t`^!(#&-*}v_N&>Z{h~?(;jy}myy}iwR2Sk`x}I`v zgZ58(ZRz}1cXE?pvsVJOqd~b7@5FVVOW?>Wv1EwaFEBagl8WV;;<)xFyoS-`S!*y} z^y4WeOlk^HEi)VJ^R7wn$IIUtPbVjZgT?}mT-mjD**VtNZ(jNU8m@*1@9KsV0JQr7 zUZjVTttBm&ZJ8ZN($e~D2F5az{{?$pQU}=(pdc0aYJXWWyOGNr zp?g8zr`+t==M2A%dk0!>gvmd@w|=mAZuZ}B)F1gaj#llvrHoy}6PY9oh3dQ2XPylc zZBh{~f46P45%4!_melvo=QUr*pY4pF&v z@TWCaElCaz!oGph9W?_F!XygHzg|XA-Q7^LUqZsKxOu;DOZs>OAqqo3IOV>aOiF^l zwl6Vx{~i`&QRUz{W@&|Tl>Q=d<`cYf)r}e?A{0JYeusB+O2b9k9)B`krntPVM*IQN zMH6yO(i9Q7Jy}>_6aI72)8Em&Z<1y{LH_F^k+~RF5yGYO_3kbGHNBzufGsyoiKnL{ z4kW(oT~LgQuG0-_c;kKEKTmgdXPXFnSxUs!wf6fR#-fr?CT+wYK4=SpZJBHbw{{6l z9DYLK%6uhLv_nR{BG^+4QX;L6j8#zIpEYhbuA8n_3Bl^q_8}BE)v51KQq<^&4%$}9 z*jA1mkuBMzjG(=Ue^)`sjmFb%5Y3Ecr88^?Zi<+K4sgusH;#RcPydVP-2^la-)ZwA*K;JI`sQ-HOna%?(& zg|t}eF|Sh+xaKV_iznGHmhClB*0Q#>#zK9e?~3B7fBgFRl>L`MH((CKXI$>_3`_(% z6(xuMH6|KmdKhQ(rPnypdzv7WAS#>+$H#H1?w30n;uAC8eufVh&GkdNPK_`muX(*dxFM zK`qD}$PH8GF#-qV0mZQP+>#;?)mIolb4f{Np#H+4c!!6hM0yAd|LFETa?Wjw(D z0bSD^VZ3`W$_N{rh=;QEaRfC~>Koq&4zG`n+^PQ-Jd{zV$!V|dT(-Uox``OrTX$eC zgrB=>XyXj3ZHlt>X}?qVV9@r@CBZLHfinfogacnWTH^R1<7W-VRqbE-6lbD&$ z@oC?evzrA_{)28|sNOJY%3k*@V+)lEeIrAyEZ~VQbw${`Y>Yq(9U{U$+Z-{*t-14O6jlRyOPMmEy^#50mg>A6!JT-yo zgIzs+u5iC+9J8OmiM)QVg%>Jf!J}f?4V#2i;%)`2gtY|{Kj1s(?`^Ei9G>oHJHyF( zaZS;|)m&dqsOP`bi?4?}+wZ0==NDSQ9u-;qy3Z^Ax-cuBt}N*-UL&0HT=R5X_5QUq zoX;oRg-~m&S?bnFfB)2Le}%^3{C`KRb#vIR7HcADGO<-D^S_wPh@u2{(819WV+=hn z2A|$f=-r_X@LaX0bSTb;&})J(%U!$rgwx`a*8Bgk;z;XNpy^YS>l)?J-t=T1yVJdi8;MfYQko6{6z?dUodpzN8wlIJ*Ky z9{8)<&U!GTC<*stZo81Os_1z zdewYt5^eOLplLJsBy2_C&-S71tW6i8?!=5`PGDmzf35__=&k?yucParC)HPD$#Ki} z^1a&9Qby z7$)S&PzhUO8(bFm7E%;8+ps{?2tjX*(G|AvN#!OM>2II*vhPQ2W*Xm-`6U`W;YbxLgL@EVI2!Momcbfg+1a! z`qqeN~+>{*eQ0~Hf^ zt<7tn&P;I&t(quXA(fHdHhlD3$2nMiKw;^!vB|7fa*4oes860D4ZL5CIyw-D3(!7$ zNFvg|;w;C-t{4x;FpC`v64rW_AAj=pH9JWg+YZwDDPEvn_>%z=T zvRp2&E^lZ@TG_Iz(L&mJ>l{~v|HK#7OHJGEKQLhoEE9*9|*xF2QGT9I|v ziS_~m#YjyGBEL*rFQT;4)4qwlex@OHA+O}hVW)wq&B(!OIT8^1~eRzKA|$^gG^ z9H^XcF@0dpc+N9~l}~4O4mJwq(I0Ex$XzsIM`^U;|ar)w~UL`{E=c!0j5& z3um9Nzq}POu$n$U=&D@d^Mz*WMy!~zp8mwNl;WR}n3qQC{h6K$K-$Aali#L^-ARWTa!{ih zPS5?lv&PMEjvJD?e00#r*gAQ7$TxnS2d%Wh$cG*@3p^mz6^dVVjQP)5m!|2WbS=@(@P-gRvonvAL>`}eV06K`zL4)Hr9MedW|)-v5(w_kM1@~51`_SQbtTt2H4 zoT#*E2(b2Lek?!TSvg<1IdEB>>PY3ryeku_8k{_-)AC@RT|8yMe@;|$(N@&(#PqF9 z=3V`90b0!Y6=M$oyNBS(Epdn&MgMbES%Ytu+o{j45K>WJ!Vij_6~CDkeu)+9`4j<#dlBtF(`K-E5wo+huIMZX?)xU%_6Fm<>y}S<~`_h26WyF zcgXXad|aIks9JnzJ6kzY@mS+U(JHrEk?`ejvv;(Fcm!zmPFP4kX8xLX+nbZVC!fM1 z>gD>T`YK3)IZp%sG+R;19DmnVgw>0u!5vv%IhNA)Tggv2zS~O91JnDsW-c`$4%(rg z;vEM*-^DKB3EO*k!V+`!ut$u=E8G(9H_a$QtWuBt4ctJ?b{?gW&Xe%pMEVyu38bbv)>8Jo!BhM0K;b@~>F|n_jQ9WLb{_qbU!p9aD4KOfLR!fhJaEAF%f!MBP&uO*YlR zHM*61r+1Xuytmn$DzVFVrPp-(X(n-*a&$vD3uT_%lod(L z{V)6z%6gyhCzcZ;HZUu@Q0n&DSD*iw$W%#z?_3$%w)ORHC$h28I}Iz1v3@+M_Vfm! zu3k5(i`^Xw64-*1GX9BlKN!pQ(^vr~!xUex&-Gy7M1re^x_r6AIe5O?V@u?RZmi)u z#B4$A$dv*=cF;3W@ldVDsoVE1bW4N3rN%&~C{&do7Mw*q{%qV|0KK>&#;xfasX|PT zyp?{r*=)b-yuhx7Y0%tWnFYwXM{kSbVMMkb zc44WO;ajl<=e(ECYTqaI_w$+7NpFcK9E>a&*_o z^|xg~{cGJ}9X5q9+of~P7NHGbF>v3bT)ZdiryD}Oh`nk#n@VhK8 z#;$Vt7APbZ9Y*JGVGGK~N=h+4Q^;ASIxV7bPD|0iF*^dY1N|*_6?WU(e4W2QruL9` zm1n=bIysSgAC`w%;`v+&4~VFp4AjMNFBz~7J-ZW3zYyR-bhV2OyB7v$u;5Q$3vOu7)=9}bdAgSYh zw$spMF|C6ASxTZ!82u6Au)s`P{WC(;|DFu>P^<}Q%2G37HOGE;oG7PxFchz9+^jp8 z6&g*VkU3uHcQv{IT=cXRV&%jNCKs6VWFjIl%^2ox6IquqT`zN+8cxTkxt=N1Xfz8NKdVd8gk^8%yUuO^SJH@U7IS27B;4k<}A?u8N(%sYmEvMUH(YM-D zuo9+cqXYkq_Z+rP>{?dcE#l`W`V%I4<>576AKx7?D0OwO06715cqs6gTg1+y*N&{T?45~m2ne1*^P2pU)Y^^ za8_Yq(tFJ)WQQsk5`&zi-k|!^;fk#8P}!|%B2Mhhpnndn>SA*dJ{ZKkPMxQB$$0t$ z5U)OkgRe=U>a3v;qNmi>NACKEy>z1G$Rvd`9_AB10L75ojh9=#;V%6VP!O;}B<|^* z$M-Q4e%H`XdI0HFd0qr!s)$>HV>A8K1D#p#?e}ZI^Kd2(C!>F*8Bir}zq*0bW_n}4 zZ%6^F?)1A>Q%>U$8l5>u>(-6>O*4$_eVG&@_MVVhrsmO38}rP*J#IcEFgZ-i2ejkK z5Ckr%M>0<|60`4)%T4a^2w}6brhkP8cD%_OCLrptfP<~c;I!zyP`SEem#02wH~l}; zY`05{>&J+JSPE({`6nFqaGTd|N2KV5^1HTai)j~6kY7o6n`1dff%QtGP{JiPWW9b0 zH_&f+_}!?LVd~ZBB5J>i$O(vdh%jYf)X*7hV}}PNP-(byfB(^CI)`} zBF13@`d$#|M^n4n1AIu_&t;7b4eEvIRaV9^y8A9N z{>$r}z@Oqyq}#4!mmW85!+iO>ZVQ!@h_NH#*9aLldPMkMN|D69^zvm=68M&T_|GX| z^CGmoMFo6X`6za9B6>kYM%XmpS75^cS#P`}WSEay{}R;jY!kzGH!L1kvIcM@?o)3O zDodyf4WZ@fQp$VNL2LBYjpGJeZXS2dG}IYT(z^{evt~v`(`=KlCQM?w7h^w!Ohu3m} zx}uS$kz%1*hR)~LTCcPnU|7pyiA+dv6!lzV-qe>U3QW0*JGK6 zAVhO=SJi5``f89p|OIjJtLqJJ>!w{w?iE^osbmF9&AP z8=|PQ3WX8;#botlZ8^l~q+c(ZRR804h8)B6a>6Z=wU(iN&j<)B$G8(W{VGhFeRS$H z5mG-9zP*$^zzd}Lhs1?pcR#+3!?Ix#QlYbzYh$dhe&X74x;lkDtm_5X9`>Dp{}^e= z9iK@#d<64EwxE!OEk3pMnwu(uVN9|+WI7GmNv`fps~)IrWIgI zs+=lrRhSeozy^gdDha=^U7dLTiV6?Acl^dqA$1ac53mvuQc8gx_+l!}rgu3gR%@8g zyIQz({2UbjY?3TFcQU0l79Fq=cm@i8Iy=yaAmvP2k8p(XM-mQrZg4Je<}Fx~2Bx!fi5ISc9#QFlqt;a@Z2$0$1SXLTl+#OC7cn?iy3vv*sz3|C9}p7{ z8>WE=vE=3O0?g`l4Q&41(m)BdOr^9sKi*MTnBzubhI!56e~PfqTNv8#K5rLl!n?MD zkS?BWwTdSS8^gH=XFn_Z_f}wDPgmEWk7aM$?}8yg0NHo4;LXwyODLIzU;*13lA4PJ zo)44ph4;IFRwqQBngvzk=b60$*bRX%FFUOY`w}**a*vB^i61+i3jOcRQl(KM#9cD^ zu2cEoMXLrq0TDV`y0Z<$9I;`;jI(G$fL*K$Ajlc77so~sZ-Gy~7r6S#iG+C&KcL3B z!Cx>d?NbZo2%f5}2*5ZnhL|Z2Ek^)G96HwszO&C!6YwjQXHx-;4vDquCY5#E%k8WV7a({*J?qdAmG6i<_jAT{@{XL7hb46lv>tR zg-LUVj`FE60g*NJd?EYZ-W6wX3 zrbCZ^VCnK+-qNG;K_Dm55}E;%0%0n)IVpj|wmw5ZlIn{J1aVqvwnd3{!z52cM|`LE zQ}EwUA}78LCuu=U5yq$2ezkb*Qco$OBQdNzEcdBu%AJ&9;d*$9Z?fSMs{0*rg&1}W z>SFOg*Fkx<0T9Y$?M^~jZKpmuRv#86rng;)b)pZ@26?q~tpS#A*a^U-I}i2zBdf;W zHceu*#-JJppP{FRuOpWm71bTjG=LI)LAxQbz(NgyUwNLJ3M8w015l#|wIMVb^*ORP zSq^2i%LnEeAu4)jxMVnj-ytnEaG8} znt;UxKv?i0n1mLtm4&(F(Mk&Xr{?lHdnlOtIYzQ&MYZspjQ}=x>Q7%SPj-wKlBf$5 zm>o|ZdS+t%p|3wtrKP?$qV_U0qfqbtLw|MGck32cv~ku$ld)b|s9G!OykZa~2_e#_aSSsb{BzC3fk;fW zZfNMUjvA*O8&>j!Q;Np%=hriLYWbW$j8zR_JC&&~BT*S$yrYGE!7)1iFvS*&xzFIo2^mc6#EPPz1r^rJ35C~h%%;%m*C zB2S088})W6Yv#kJ*&O~?FT(|$B6^yPKdp4QQp~;}bJI}JSK9FA?j=)`dud6aD6qj} zo(h|!ACp8aCOwi-_WjD(hcBy0QsE;^T>X3344s)uD>KHg7TP7xz;yOS<1QcMey0P8 zFr~iRFbQI(EU_A`D$5?r%{It-mE$WeP_EgYDSj+px)>0pxp0)YNr}z zFOCBPA8-@d)BiMh|Lpst7x$~&(%`V?GMRlQ@>^2ofs~&I4?({3&*Z0emLr_8}Q0ZDT14~e6ymL_cAOJ<(T@3mQUm8 zpv4MYaKBXP5q;+=nf=0#im!?YQsaix4|^0JAX?aC;e>rN;AdcIR6nJtTLTDBRGBT(kf|Q!?SY5M9@H6e~z`ga=CdRcQxEkJt z)O+)b7vG+FFWpif&=hpY>y$IO#)x5qB<)?Uv71FHQezL9DPjnARjQ|QqTMmy1(Xu< z2h8fh6splO0+S^WzHJ+q_=FeF6|!*1tY{Rf)ATkc)SG|pkHK^L?=iUv3-{Skhaco* zDcYgi&qNl5M9$?%i({c#`EN#k9sSY=&#sh6`Nnf53MUS{$|#AyGyPii${JZL1QJPA zx2-=txVxxm!%IriwbQ3O$8Jc;%`}jU%!L?>1vR_hR{a_A#^wGuyW=EP35Oj1s!J}Q zO7J2r&-=b^r#?RlDpd4Rr~+Z~*M$xeGB79qP!?%h;(Q0@={EmKcnEknKDRj_UHNbO zmQwFPaYR^=qm{`t`Tnf{Q~kMkznim8cfTg7-udYLmGknLw_cE7SBdqeN!7~dN#qa~ zYFdkV6le4ix(93ncoJebn_8V8TP-S`)K6-=ZQO*o0kr1`XPf%0@s~L}Y!B5RT&ogn zpR|cT2>V(rK!Ir;I!MT8K@hi@6DwFD;w58reDUyX;CQ0w7Mq4G6z;X= zjha}5O>MI9qgmhFc=o``lPeA=q!l+5EHfBB|9D|?PtayV~FMf1&9&Ywk{guoPRW>f-(zU9YBdYFIorb-e;;IxN z{->DBtx#I%_FogZ#%$*vNLs)UDg*;p9S~CRwh=JC~)tt zUG0b)yl-N<(PkSuvIJt~{gYPp-Q)wW@eVKNS+2^%5;yb56qQxpql@l+Cc-~HN`2B@ zxz7_JtKg{b7zgM2m*r5uBH3_jw;mtm>@bDr*YUnLQ>|FE<)Zm(syW*Y^ZV!NgKf}= z!H~x0DhDY*FR!XyfbfnfY;Eax?rdjrC|VMr2Me{xIx*{sG&xfNxKIdv(JC|%*qLAG z>aB1z#R74*aYhw^j!WTiyYC}euJ7%m-ZZSkDHYdpvApnG*jqmSSzRGHJ{^B7ti2?n zj4^R0=~Lo+Doxm7Q$nM$H2%89G1NC5?mhSYB(~}zVpePaYWXu|ixT9H{+;xhyNNX$ z(VJq?@Rt=4HR5tVjFglctkG#sH>TXVS{Xs+vF+c);`LORUppG=Gda&qp>mo=;NAu&hz|1#~_u@(+M?*5#+3x|TAB1kGoN030J7>#L zqFA}RlgH;2)5&kTR0)ZGMQAK982g;qTG4p{qL;UHI{FE+QxQqu%Xy+sS#enCvzpc> z;BZ6hw0xl4kmq!C>haPUW$TytPd#s>$pCs6`GnWAt?mg&O*k@JhWMqJcv;`Kzo2x( zVD#Tl&{i?D^b1Do4ukNXqNrATBw z_nwtP24R5vNa>Tyaja132!&dVxla;Kj}F_;^)%(N&1V6UU(6-?)NuD%{R?bGTap0>N#c8Yk_-S9|hfGZk|MQzj4uu`Ip75tzj_4MSZws=L@1~ zA&2&19doEObJT*9gE*UPnK#C8{T@LctvP+G-&X;a63g76`^&}qw#G!Ukp7~^QY$Yx zM9uM&&W*oR-27EA_G&)RMs8~_p`BOo+*FL8XkdB#lA?V4 zauy>{adq>9a3pk2WGXJG#Jh8Q;|WmwnAZ^`=w_3}F$lqlkd>(qq zk3iBMlPtUVbkO+yF9{7jl8py6y4$n^v0QiZU%3Fd)4n5ZE68rAaT2t)0EOf{rPyQM zROM0l!gA3kctc_PZqP9gjek-yPK4W*C#Oq7OQd2Wss(AK{>;T!3!jXaZ~oX4s@(YS zSTcWMWt1MuSaa>_TS1g%9!^aBFYvH~Yqpc3R6xN$$wMuSd=hL`S^t} zq@o=3A&+csE4L1s9NP_fKul`(5uz;;`ohUx{)SYDpakeTg;4nW7kZODEffAE{PNcO zGH-$O^AP3g7n6R0jh-(9~JU(!V#)0&udWEDIov>g7C?Wnd8S-!F zgMFpj@6Ic9PY9=|yZjVxOBiJFF_|6zf9gD5o%^HTpo={^`d@H31%6p8XQIsuYmTLn|-LN z^&TzCAj_BU{s3eeOK?Mtp!dzc(VSUNP??$4?%S|!*>&G(}Ocp)V_>O@~|jzgv9~?qPubGLG^;>KvFcfIt;(u^!*qX zZ+~_v40^kD2>WBDoyZt~b;B}yo$2AjN4?&t4~U62cLsUkpZX4knQm-s z$#oiUdz2ojQ<7op{Le9`)G_QX7bhJgrQZbj=CLMP7#Mcm+a_2Ws)HwKPdt-P8>%|Z z(VQVcf6mtLhC)Uv%A*a-fqdTIJ>5GevGfy{O@(PI@?_2K$Ka0GVLRP0}`O=K< zrxdOa@v~Z^rVZEh-1;VOe0X%XWy@&6OAz!%5dYKN&l32%A}*6t`2L{|oNuo_c&7R# z;6wz4QSV4_RvqR2UdI=_8+j?|tg!^zwb2m)a7m=~)CGy0HFR=!Y4=^(zr(|}byu;U z3zp82<@uZUJ5Ka-=gb>SzzW*87xPr@MboKhQQbCMPcE$0SQg~;R+?a)e;?k=ZmEwj z<0hP%#ZC3=#kByyAFK}l9g$ExKV|Y5WTWVx_7~x=|MRnznLYq-{K>v9z#s;rRkn4#OF z4aF=D$XS>dbqlt_Ah_c|LjQSb344k2tyr>p9^EM5FRUkKHcQ57ek{2D;L2~R(e46hI8y%vAy0J0y!rw_ za4RIml_qH7z%|B8q8@+g%}Jszi+BT8*T(bz{)T|df`p0Pj%KPQE7xb#=`{Hv zV^IH5G&qPgRK1{a8T}b|QA^%gR$pYr+GU#|nKPvA@E=Xc6F?oLk_*GP9Pv+S4pX{k z>5Pl}hpy!N*)Ko$3a>b~B!84$zI(p^sz2VWY9YGgi_Er?25Rz`eE|B4o&6gKmghh+ zsY3(7R+Q@h21imy4iWw9VjC`}7iw8Em4T z-i(#;x|H+zb5o;W=x!pHP@quNgq6?QlOdVR{VOqklO@4f(gXP37p=y@R~1|rDw~c~Gp=*KIrW>% zT={rA>Z*&3|AFGrV0xsZ=NF;&^&hF09GWA#^*U3WnYUepVbn3Nska|8$ro4oE@>X0 zbq8q*q#mfUxe}YX3j#*k)XbD`5vm(G=>-C&nAo6g^G%~pAP)43<~quGz@z5xZQ!ms z!vCv(G`qPntpUB4>P*O>Z4}O>o?|wkcuL=HBoNXPMqMzemqVQ!KGEmw4YF;+DY|$8 zy|^a~O^AafY$C+IMY6nL>^O-XDwUR_bmE^`kY1MgRwNTkmIprvc$I#><1_~3-cu@q zv9!1)hB;UKRpk_Y3rNS;43#49DiTj8)M@LEHFhw4MI2B0hq z1t{VjF5Lf)((s&uj#}!+8!(=5a)$i@24L}%VD&VS=KOS(aYGtYr+Vqdx||NrF#B81 zU!E>b4%Y~763A%`$rIuN70_81%D#JAPn_4G9gqCghd;9B~b(nlf+=e?DK_$BNUvy^6T{ zib2tbz7uaQI!OOhhfMB%c@`n0lKeZ9P>wJ1N1C+m zvkZB8u_tWLuZIN1pNafDUtkx<8%=NA_mv9EZRJXPGt}0F(fWD9N{os|j%EQ*OI~Fm zeqbE?bjBK9GliIbl6FI)DkO(*kR5Oj}W(w zWOoHrIsoK1z0WN@3JHoFmb?qiSp&OOBT8gi++|a-_<_`&b(5f{8AEw zyqvzF%YO9mJkyyFINzwQS#Q;^p~N<7OL3t|f#Y^pM;Wf`f&th@Cbi5`uC0BJZ)-JN z=63uEQ?TCq!^Mbb=vd)O2$zRcgx|rPP5^Jfg#^xB<^ZgCPO$Dq4w`$1%--_c(`bpW z)qJFs7J8J3`uigN;S@0jY4@A11o< z@uCM*XRB%-82jFt6?^R~)4dMkqYX`p5^~gYGOoJ@TpPLB3vfKbs%i7R!13O|)Gxcd zNz`f^e)8nE5JW~}6SZOG@fJs~8-Z9+BoGsES+Z{V3l;%sFuKNQYW0dafLJJjahSaN zXXe@Z3vR%RisPuT@_UEs!>z}=RK=e3=?aiiK|SQKfq~r6W40?b?$bQWaBN~DPi8ed z`o+uHuf*vXB$c*z>B{FobA|nvlh7(~`==YXl6`;p^9aqe%ueb*84P{8O&oc=_U*}K zPCd>|gNL^~WR`+%ES7UVvk#fvzhS(ap#|{E2ul<`QKs7p9Z3aMdVNE z*Ty4RTt4`RvxJ3S{&bSBV(+8c#T)f1kAb}F=y+awz%VG}m{-OYV`djJ%{m^jtp>UM zF!T8k)53Ihit>l|yDcF^`@MzyZ#Vi0N8r>0%l(J!9&1KlU?6M=+?UP|_rSC&FAoxr zJATRee9^K~*#hJ(SBw*|Do+I*?Asg#jr+Pw*i)ro^;El$<(*fa6UP=k(~DPi+R5~K zoF0BHns3`}+?wz*ZnaGQnoQqehLc1KErI%a%{Pod$fU!;&HXg&;O3>``JKm|>z#GR z1%h9*gJTF+k|@hqx0Jv%*s{sm_+Z)?e1Y8l;`#<~!!AjTPD%K)_MmN+mzf zUFy{56Tt5Y_JLm}IS%?W$+R}zURgKC{ZW(66okfnZB0X@#7u{vvac9^kgk}f8!{18c7~r&%XsQJpUgkL}@s&(IC zBEATwR1b;D_Yj0i{S#=Bl14M`Xn_(+GbTiK0@nh^vGh`Yrc5T-{L>XPkK0HyZr^GD zqnHiF0Bv*Xk|EudkUWcK5W$g+YDPP-UAtkQkgN};`0~rX+`vO`JNL88uL#$XidvBf z&@uP^wunts#CR*Kpv#g+mP8EAvU_f8Tzk(+QaQJAguD=UktzRvVLAWMd4uOgsjrWqOf|0AYp2f=2nRx0NUVpd1zVpjqXJJ8HZKYF5`+ zlYuCW;PffZOW7A_Vi5xH?0D`8~;i(NeE!opFwe>|%|B~C8@cjfce_?2K=bZ-5% zAFQ6ER=_jJoeMuzav(R&iwCulf93i`xzK$I$8D~~fw$^J&F@pcbhXdw`Ek8@doB2>mTv*SK_r9cX)dVh3z??v4t4cVB3e!*F!_Qc?oC zCi%cq?J>U0I%rRct;U?;jB%p>&jr<4s|cYN{~0;*{~Vrr?Wb^&0T7V0T_VEj={q&j zi8NhQ4h$JbwC;YrD53q~1)M~OfmXL~q-7fbgdDAVn&<~U{wo%6U&*I`R}=?7xOLa| z=`6%6?OF`;&*Cel07m=J*D|b1$i}d_7jD**;$JePb)mHj-o`89|H0KE=E--h-ru;^ zH}zlkizEXcloazgH~Ph%EVHn*QfH1LSj;eDg-a1MZ|@eGS=bsae>W+Ri)@VK%s%iC zI*j($D!7x`b7MdLYf4g$|9?j>J^tpFoF*5eo5wA!Y%(=Kmv@P`XPN1tzdz%%V7@dToe?afSEF6u0lj@##xer(x>3{1ct`I? zv1Rh zFqaaAcdjG}<7AH>(EISKTPzy{Rmy5zt;x`7-fnR0`iBf|LKUjD;jj!H(ilmCFzl|% znn1!He&Misoj2w*CmI>OxORMT^GVg_yB$%Jh9hTv72nKFPgeTd-l`F>E)VLkx4Veo z@q9dHX%OK>3y8_sj=76~O8EgDU>or6jpn~w{L$%I4#S7pw!Q4Lg9KH(ZZP^u^|@7K zyfHuM;~wYPfW_i&-8)%^{@md$KhTIPobJi~uAKet*X>`!0Lp)iRzBiObY?-HlA+!1 zGBZN+fTQ%GG|ux2-p%7|lJ7WkvB^yN!qe2|hpTOq$G8XIw=N{r+v@<;T%<%WU+Er# zbEEFGKr~-}vs11QOdMuOhYAywBpzkECkNV(l(tPj=8*y_zDdJ6jA-K%O5W_8L2(kMcl zmW}p?-Kr(qw=H>A$!QxEuNIo?sJv&Gh%kg>iGBfW_#rrMaj_X|QKP4M!mX07oe`Ed#`o92LAftJjO4xH`ZF8|q zNSuhzH$mafGL75Ycg3*F4xQR3h>eD41qs4~(eAayht`r*nc!Q&Sd&m!w~9;t@w?it z+a2}CZHxTTN^#xhAR-C+?PQF6HQ4>aa2$g#Z|+b=q&SApXchMQsx+9^D9GbnJD2{m zx0brXxO(LQ^?4}sST^q##{<4OnYjGT5*_~JL1OJdYMD$CWPADU&A7+@+!@>n(~jG{ z>)u$k;HIf35ef_C5vrSnt1mn4&SqWp!Y|lZ($%q5=p)HO*dEz+U&Kgm4Ht(+1ft1?; zNRY~Y>l7}O_X=tw-t3LEM!^0V(s}B}m!AlS5ZWz?=ATBtGK?md9erHat1j<55jjDE zb&(jyo2};T5)}}=cTe?J2Zo;-vK$XaJ|O2tKP2|4U9v!Z#gOMPUM!_-rY|&kljeWK zU9S_?v-B%as*OKU$0@J1{Q!lVbg8keq?ZKn1&)+#^)x-fqO0P}9^uO%g{LeKUD1}8 z&mR5S*V{PmJtxq!AYLIvLK)n>gQF4(FCo%Qw$)Id?b>Z{#e#nQAxds-D`-I!%R^p^ zEG11}Z`Rr&iic|hV(qnCp|VarpE#8@Gt#PO0XH{krW~#(P0YmxWR(cKa4lhX`on;W z!AYNQhNnEfn||*O`gLsIwL)UGLu;P%O`#!XLC7$|^42My5lo>4h2ob@>P-+-doP-O zDb-l>fYSh12Luq-r)Q(i*eb}UYTNLDgwb2)p>KAdNei%`uW<=%QR3t0>D7mmx3ra= z5tYLM4v5WojM)z0@BK~&ulQS3NIA_i#2W3*HspB6ltuI9FuOgZDqYYkX{zVu@uX2O zbS{IT>4yhSN|t{E@I=;YTfC(OrfsiAB$`xdBQNv3YCX66{`WL&Cd_ucF6dRMlW}wk z*4%8K)Zv;eDsXz*;}J z12_Mo=93d=E9J?=m*A-}&SO_>&`l09K*r5A~ zd_JUO0o-HA;T!{5{MZHa+(c<4KLt?s;bCirV$gTFs~TaJVf7s>ea{!F5q?F;6*#Q( zFAkfxgX#gA9Eo#sJ!akh8I4K{-+B|dAKz|E!}oy6C9P^-KTm_)P1Wtc6bh^Fu<1%} z0m4NzZik1SCPyM10}HM-&a~F+#2sW#Lst^#=Iz%;odtDDkfVGczUUCfH%)w z#YGO?^vgDddO6$i%tD~C&0CHK%~#H&w5qamGOD!zc5@u7!4ztn?r|0%U@P%~>8-G} z!s)IXLxs-ql;XrIupXUvrJybQA&t)vAEF(KEHEkp6CnjWE$m6JZA2j!u>yUUq8lBGyOU4NdUk0g2(z!=4JAIk_q zQgRvPJm+Cz;IY6lS}&}IRgh^OmCJwJ%L((N`8>$}prC_&OW$Mw=muZDm1pq77>AZT zlxji9h*Pt)DdZDgr`|%%$F}&L@WHyNhZd%o>%&xI@y2wykL&wtlwW{rx4XL1e$iSN zDrvgPZUlN(G{f#h-L93<%p3$;2>v+9YG*K%GPUMpCrI7(ze{*@!n*l_{Ot-1w1Zbf#|t~|%gur;r=V+bRU-oJgOAP(yH$n#iZOMr)Aa;_V2lbrU>eeSCCguy z)Nc4<9XAqp&gu;npmn;T6*wrByQu_Z@--t(Xea!8^0qq2{}UKvj_h{iuL_AUwq;at&NDHjs)-jj?j< z^szI%qJM*PUko+UEY>oL%O}+xw|{uNEerwHRR9u^rGiY z;Nkrp{n)Es&G@wX*)Bl1{`_ZZbxsR1P4zLd<$f8$MYMs>YLDw&-RI>{4=rY&;v288 z2-Nya^mqmyL%kPi+XPk5@ZZ~)j|d@KV(R?jX!91o7igO0BMX$fp$~Y62m%7+3EB!h z!og%UwK+VvojrHnPb}_O2sV}{Liq6&$(cuPA=}Q@Arp9Q{}~?3ScJ?>y-C``t*d8jg`M+fQMYQrk(VRW>NmIpN}zVRmES~FRzO4gS@ z3U2@wourTQaSQipF;C(i&N<@w7(N-_bU!`kTZ{Iga$eIO*JxiO?sy9cHTL|m1 zDt~gU@dZEy4rS6PObOt@xWUi==}YD?@dD4x+0c^8ne8Oi>*cZ9zmrr}PrL<*F>^gf zp2!&1p@_BYMcBe%^15rI^o;{Txz)`DPJ%{94Ha)*UkhH4Ro`}axHS?mlBxvEO)P!@ z)gGE{yJ0;WIzJ4>Gm@l16gfgcB(IvJ1c(|g;+OEgrN))2eZJ!5eFH4J;zh{a<=1+o z)wqiwUnW<6n^<{J4=>yevLuI%3C#IDGd+C!Q5iH5 zCk@wMONfrcv+t>B;n-skqrO^1*G0>Pm99Du+bKo+H(3)bx3#SF4#7hx`60DaisG%c zb`*eWBg}* zcKUS?f!BWL?D|VdxB0%h3A3Q-s-|oq zV)$o8bsa{r@)6TxP5mMM9C9Xo^Hs z;3O|RnXy6Gb=1nY30g-d#2qM%YU={` zvH>n*&Q4A4igaIv&mR8#hg=dLJNFDmCs#j zf(My9CC=XuMfZPmJkftmO2kR!aZmn_T@B zYeda{EuEw0+0L;k04u|e7-fG=+|xaj`g}6w!Z&}L4{X}~R-opCaqI`7Kj8&s#Vj`- zT=XNCKfB2a+a95#{P`nr;5eei$wIG}XzYy+78D<-v7cbT<Vx z_{@;t*eEHT}X?-YxUI;9L{P z+AAT~zn#qL<&pM0m>d z4&d3+^u6A3kZ?QSZSf^jh93Usgh>;%R^a`%AI2nnOr41KF~|CG%w~Wk=ilgiSk_X} zL;gxH<9!CmdgUaFw|HTqElSp=NfK-I@E(b4@my-=hIAPBpTlGLH^qh75TY)PO!&dI zOcc|7aKvHLifQg(Y3(o1lK5!hAPdi@0cNsNU7xlK6~BWWj=Z zF=a}NEl!=K`tlX%`V(O! z8@qT`8i+EA?e!}Q=kTOP9E60;<3D5wFrvd>aDO`<{t%GfCa%zc7awX3bQGKKlzfjQlXP^(9i`;T6zE3UzJ0Goo^1XQH!v1E+BMRpg-A7twf(`k#s?-#m z4(mP&@_QZBJY3IVnNq(HVX1#a!loL_WSjivx&8;4Hd{-r2ApaF?(lu$`G*__V|%Wy zqqn4o0hr;i8K;PJci-T@GI1Kje2$%+tMS08p>F;kNWz73@{vd7Hcyy2vQyDzmXmcr zNU!t$?MpFyaRO4;9|0UXo<({r7|)XPeX)X62gMm(Lb znYTjr@~RszK`-Q2gh3AU=`95LVuVJ)_5yQYeeY29X5_!fe@*g! z*Bh%38ITkCOZZFOX452?z>w|qF(0MC)Hr<@YJ3Rkzvt%ret0mam2wu=ROTHq_ZYf; zs&OZ#DujVBOWk9ad_C2E`OO&g-OLSD-G9g5K9JsUaK%g8$W-k^nG5!ckXW)l;kk&^ zQ_N+-g^p>!b^E_vH*h;`u2<&XY_Kfyv4NLMsl(`KFM)!?qy~>suiSre+`L9^u8k7~ z(?BvC_}jF2#=Hvr!9;Mm-{Cqf&#w{IxS;ua=KpZwrqpwKAn<(0ayYk(J3j&el!LrY znz$VkIrAUwiO!aO2hjc3$PH(aXcI&=)?H-$&rcbEPb(9A=-g*b^SIPZFHG~%tS%Su z=P3AF3a+aj4TxKN-EIy^Gwjz_K9sc5444yd&Nq}%7{4pjZ}6)6w_J3_$-CA zCH0?%=3)AVNsDSfOC^IqZDxJg=tuoKS)PSSNiWN#TYTj7;=jRt-`0AkWUiU4`yG|% z#rV$k@=x!n(46%bFLP>s@h)vTY>&@b3XsAHECK6i zGYh}A%2J^mGRi?V2DmID2$C4MDJ!olg0ouO*e}=! zF?3!ROV1Yw2Op-qln)7>@Y$s%uA&y9)USuWh4}Q7_%3e{3YwTXR9}JKRyqD94xu!3 zM#G$F*k}~XE0M!^LSMvDLSD##eR3bK<5{j+?{4U9ai#L6A;_}?%Q^CO5$Re!jMy6+ zE*?tmGLBd6HiX=-XLG|+W$`%7@H|cMaw&HI5)rUVFb0&8@QD$P#&NKJQw_F#eFMg*R?{b$lAc-Hzr@g&aA)=IbQ@V|r{`c~Z63LdmzUOo-^M za#bcC86khMpjapqY;q@h=K%{=DYY0!^GwJ4Skr8;$o}+%*J*<}udl0V_A0p4T?~b< z?;~%uFb^p)+8@;|Sor;6cs%+x{ZW9J1Q&jMi%u^KcH7O|X4i#q-n|`Kfom-S)yjaOfoBN4_P{csk>K z!7XNw9B){UGGZQ5L}MbA5LAu747vD>D9IPyj}9Dh&-`y_EXk3BU8G7rLEd)Zi_G z-a4qme^)WP2DLqf(|&)h``1=v!=|_9ulmdU=NE>niZYJmdTi6C*U|K>jP|7j5-Q64 z1(BY5`Dh~JsI31+$N^IruDFxqC562X z@z268uet33?yTDq71KSd+W({I zJRG6$-#AW+Z$=VD36-K!B(mL+Rd&f9S7c?JJTOXPmfWh%J6kE5`(D$_{_;~JZUJVJl(hlTH*RmO zeN9r^jzTEL(zjw{)(8~<&+ovC>P9ydCqPzmWF-R*NDhPEo0jTfw#7=1rj~SOyveJJ z3s11w#b>ECU-_C+@1%*pH5dtg%Q*A3pC!@-qjtHz>dE(>lO76o7O-bvR8{+;+;$u1 zz4UeZy21-vAl1pYPkuVke&snorTt-nh9@U@^KXVlSEqLToCYusQVK>8=l>OP>`S8P zi)r6!Vza`^xABk@MrW?TMCFz9&E8S5BiI?~`;zT-NvWK*zIA^|qt*2bgxC3N zQYNm(M8x60&nKHLGxJ1eWtoY>_ok0Hl2oD|;yZ2(D6lu1dRR8HzQZ15&(_ln<2d$3 z3ZnZ3H+*pO4>Q0=%36QLI&TqwS$(`)_qSE|9ljR>f1XU{__r&YmIO{-GpH*yJeQQC z6+t7$YIFPCY<-YJNbO-U(pWgVKP0QPMPObX<~^cMtGsgJ$^GpB?lJz?@wbH>Q7QSb zg>}Es53knyX3DHukBfn$uCE5&mnCAvYC3mI^Yb~heMV}y`j=H@*885*T&env1);Re zjVyJ8dm^Z$s=F3`$n93aC==CIo<$kNiK=}u*W=ZlJwRM{Pd_1)p}Aw~r;Q|9wD4ig z@y&|pCB(zT$QQ(dw7{sPM=AU5C?S%P2OzihP2mfOI0FkoD{Db&S%EuC}%@KqhO?Y^%)l6`z=zT4 z?|7hNtgqLOnACsBexnS}L1GChR%X%aveDq{3GA)O_%LK$1cADqxOF%F4lM|;_W+YV zn9N(Y2YfJL;68d;;m1&mur}!PMGcTzL4dJGT};sRT|dUB={;_zPEo_rsl-c=n^{w~ zhK+YkLV|6EyZVYttef1{uMsGjFoz#+zmK{dsnLJYZ%!V7@mJeR4HXn*b)}BLRElQn zT%+J|vfWY5+##nVuJZM0YxQ9Tu-xXn4cn+HJ<0{mCLkq*YmLfH>O3Xg2Yfgk>dfd{ zqxRr;@SMmN3D{XMmUQS7OgW34EMkFWJE$))t8KYgVa7DA3rpXoJcsiG%BHK-h(f`C zOgqu|&521QL+!=kK3kNUrs*9q-Ezu@;4`>i!0tdkhq66E&Ga*1B*iuEmA!2KTs~V+ z`+hRhGq+s_qo#e=zz;Dwq0|;P_E6-H1kG~HIQCnh(NWpY2wWaHC2 zqDuJv(D@WjT^$zz-o+$qA0Ke+K)(C2yAK~*l-xG#nA;E4$X{2*HqMoewEjImALM^3dw5N^ zu%Aa|X6S|Watt&g&n)d>0Dgo`wWZFp|6Seq#7VbBZ^li(E7wdCRGxR6=s1fQhF*;= zEL}Vj)V<#*?jEim-Ol{X&9#nZ*VV6jW6i+e_#VT2!6iRxOO03cH8JK4-j*iEcH`)#%uQDhPP zF}sQoI7S|bQ@Bs{8Vd1=Jlz6@eXS8IwA>8Bybj%-sPY>bANGm1cBr5D?*{0v+p$K% zQS&Q!^03+9hnz;;uq{&?DI_S|pNv)PozRQQ$#v|5UWWTEOpb&29e^TVq0f5d4TbF@ zKX*kgA3Saba$*jGB3E2uqvl*Mr##ym0L*b%^c@tv1mip+f6?nl;Ial-Qaou|ru}2f z7P<{|UZ8E(Nj|cg_l4=q7KXI<5m9DU@686tp+Z&~e8oeXB)+4LyXQdl-7C{wJKQEA zr|nS`(1XM2)o?XWmcGd->@Lrerjto)*;gXuDzsY(@O*?1eQO07-(!|_Hb|&Zk>2oX z#6;`m>iQPSzKO)Juelgsn1Q0MjRG2`6RkZE{BjuzbnYR% zl3lM`@qt6X;{i$#jL#*`7$gEJK3EFK=FHVZ_$~dSaF_xvVp&ieB*mk0(KAv`n0+pVZ{=e~YJQeVGGGITzqjCaJfzDCsL_cm2>x9H?*ZCK9qsN*G5an~AmclTG~K4nB2yevcl|M%~vE zJV!Y2qxYa<4q@#b0D9Xzeh~N>Av;!|Fo50ft(pF){Gou0sw|UqV$p3gl{n{QrHITN zAf5isxu%^jRatqu1TN-9nWqdY+|8A^ls9*>+;@KbGbb-_#C-?1gHJCxdi)eS&O4CK zLiSBkLwB@Ihg|P=ZhD(Il{sS#mO(<;@x5kHjM!!h`}0#Q|A*4*L!;C@-LixxrGhfi66UB1(1dDHOaZoTxvG1XABG85kJ+NIeS;GH|rz zcHXIgJcC((T+Y3?q5Q@cUS&Y^J~)_W_!AQ)O=+W2syo;r`5fTmn3MVGTz6!B!I|%- zYCvPq{we3vHvnoFV%zF(;E%USsRp^=_|`meP_!-uRMdd0Q#~|_gXT9>ORQFXu8R#zaqFj`n9yLB`}IU%`f(!`6Ohq^5kc*Lwv&j%?d ztarUEB9>hcyz%Y)!K9gI$gH+*8<|CrEueK#r=$&rM^RH@MH%q5$Y=@5Ge%u8RiA;K zFQN}8ByCHJY6srl>!k*#oPq`z6!ouqJ!fay|M6wB$pf$eE1noRT>WjNBND-zSf5i_ zZeTik4gAu}{Kc2B0l60k#BcMC_-;d7T|s#K*6X#G#v~>|^*j#Q^Ls+QxVju>v_DNq zFh;__X$c5DWCF$Kb`o0XMe94N&I5abfS4US{BFr#9zJOs-DG{1Oq9>_6|e0cLH*_~ zxu32CzW^OuDVF26p5nwMTB*;4|F|+BaO5r8)mz2QrRQrMkdx`HCX6 zzPz+tVt|@R5AyEX9P%4xLdGTKMQ1TzI2vHm%mGXp3gfTmK3r?PoTD^316UYihiVm~ z*f)1*mWJGH(z!)+3{4h8V*>Z1-W^zaoKj|4suV&$#$^KPsq+1De@ELzf=?S>yi5{= z2BS)6STfx)WzROj7o!dm18NXI%kB$2OxK&8l0qBxa~yOmU|RB~maBGx<*eR2LfIph zv#TxdgR^aX;XP-v)m3+k4j_++|wZSGmZOPV&xF99mJh9Cm0b z#o)!)XgM#XJjq8|%$k3nefyBbvg!1~mFP5NI6f(Ln^17ojl}W8bY4>EkUveWWYp#e z#itc}ia{}Q!`{dFX>ET{=S8Ku8(SLiUCWvyHs?1#i{t~kgcQCZir^zSm%o62oPew= z-D!)Y3Y-+Qdb^RHw@Fg<*Ff^~bN7MM#HoH5mM6Gwh*sxxr<{LH6hhiK1kjjI03&k! zJ)G&b$gT_2^JE#w{L-P~CLz43IjChd0oYD3mgxz)hSYxkF0AAJNY-TDtm{nvg8 z4=w{P#}ZlM>pH6sdwAcPtRQ67VCQqS(@B z8Fy0QQwK=!6FIIB+xe%^lmiQ}D#MxJ5*gTAe8x?;03Pl~Kd_)-Sf<8}6a>_`ELX%eK${)584@t`I19o5-nh_eN*g5bwLbM)F=p*i<~cMkg3HNKa# zv)^Ry7~YaHX5J{q8+4Iub}VxNApVduc7F3w6R*$^KJfB%z(YYSuZB=p)5s@VF!>%e zD*t($6uu(oF#7klD7aDD!p0l@mQexsKHpk#PKLdCu|9<+q` z*GZhr+|LWFBys#26Hl`r@qH%Ll6(F?>%%%{1)5wrt|^Ec$3CW`exfx3D)#bNNBL4y zDTmfLUuQ?VR%q#g_r$nbA6A|JXSy$Qr$RWEtBrrTE z9Lw}`E8Yg)?3=f#4#YGa@!5GR(30pWc;p!PDiRR? z9eD@Vf;oqma*eHNV%Zqd2FSi{MTHl>yWLma0Ra5ofh3@#)+B;L*zcv{{Ew~&aoA)o ze)_lfWNolzA3-Phcyh(N4q~yr-<(o1JS{BDDXsBV zqGzaPuU+SpP*ZWg55M$Ne4 z-~AKb8W5_UCOo*FNYU`<^!lI##`~7|FXII;_E?r^(9w*K;^oe88}iF9h@Z40zT!sq zLRWv(hip!@y6`>_` zg%Xb+h^`Gn^d0PkCiRujT$C+V$%pVFea`Zo@&w<%gC01t#*>g&w9E!3i$O>o+oZ$s zorCe*!jt$GY-;XRx#=n26!iu@cI5QDfj#-Dta-&aaw&K`P?0}d$<;73u65)pCu$Fy z^(yPILC4uL_j`>rjw?Tf#oDeS9l;5n2(FzvyZ8rGeKI(l33q)1yd{d2-GVPD9t*Be zkr<;>uJ!8)lWf1wQ|`W&f#g5?uynfsASFSoYfY=9Hj=ej+?>kF<_B z>vtH1J})&}U?Ly(^py*9o?xAmq;|Ds_YxJ%TP<_gUYIJ9f0);T10>W&RsA;uLTrKtiT`PKGwgEC-q>@Qv5ByB&R3I$=Sy za+}iJTPhL(f!kfL>pO{h0h@UWN9fx;a`<}$md!p9S*RFHJMPW~;YUzlEe6D4ey~T4 zKja$dP5sxSpn&@sDF->YjApSM08$?1xE1XQZGbnwbPci1#=B3(Yv72Z6yt)RK4<5@ zazzrOT$Teu9W3Gsf{pt>10oosCtClQ@@Q)kLt`OioleafMVRCZT97)gACt$of91hR zx#@93e46KWX1D!=k2$CP_8sQ4kDCb1jOPMX(uMR@L~E*8Kj0TQg(LKz(^}hSJwEXv zLjS1Bu=I50e!2DmUG6l^Cd(5VA5Qv|hEcrEu75%Q&hrZ3CGqgf*ICEvHZ*=PC_HSH z(hW)nvKboONn7Rv7Ii1y`>ra8nu?gz;12I)Di(M#^wn_R@#UF#FNGafe7F&yvK`kE zOL)7_&_G*wRB)4Cq4R>wEhE@&`GjLvekvU;BF98`q9a?C!PF|1^dDX>(}>)$ricEYQP?A^X4|UFO(Q@Fiv`Q1Le{^b z|EM>u7SPyp6U5@I)e!H;4kxApD%QY*vet9Xmz1OyS z@d*s^i|r-3u=SgQl=)MSO#6}B&BHRSvDDg6=O`GD^E;bS*=yyPB!Cth)6 zgS1xR#GPAwh@FcRwwTG~2&UChS>>kzJ!2yrE{hiQ4mq?I;lFApBTafNV9T@e-#vg< z0QbmVd-G5lNoL4QT4K%ISxi+q{c zW$5PM4xycxawJ%#*r)#yAeBx(EI9g) z8Q`6^d=o7rQ<@WyW%@v{#L=1=b0q)cG;-Eal|(_Ot$U3-qWo&6)=DluZQ8_;lw%oJ zE}HNI!uEp zn_U^-_r=dX(ha@A>bX@nb9=!fxf=<`S2AW<^7i7{$mj#E%%PUcCwRU^@=rVBpCEym zlsIY6TZ^}k-_A_XnkL0viVqYM=Ws`V9Ie&z$yaCI=&UM(atd6NcE{46PZek^cCC*S zyz@$N7{4FW8Sy_rc$fWcJRR5Y3!)`29Ygf9xhNIj~QvapgmL) zK4<}bUW=*Ozoz%z>c=UoLw2R`rw}Vf)2EffI8HxN6&KU!>x>FcUO#UCTuk@cfrAv< ztKfm)cYVg@Dqg#%S#4|W&X<_#hwb(Fzsi*55>mo%5UzJ{ziFJ_cQZ>pnUC(qhxM(i z!SRNz`XpA%#S4DHhDI4tXDHBOV#B|t^uh2wrT_K`MB)i?5Wh1hMA)WJ&zA_%Xnv>r zd+x{|8Uz#d2+9CPxdrvFV-7=O#f{;bIdG5wlc^*$uYsb zu!Ork?);!ka6>Q?wb3)Ff^Dr9z?b8Dt*&*j!)2YZ?GV14<3Jy4utvmNz~Sx3KX(U} z-wa%Qzj#Stcec`jyIlrltmKrC!?Dn^&2)GG7qETVL>6M7zAH;@5m66 zd7FCtCr+{HF#zKGyLDdE`H$Sw?B^|v0i94=1~}q$6`XC1l{o( zn{$fe**ipW_oa8P{fm6IvXaPBG3YhSktXrTS?PY*$ioK!H(}~F%^Q{i?|k{U@VZcf z#*+?P=YpEq`l+$qewvLF)ZN z$)~GQjlamUbn5>dvqYVzHl~H_Z}Se*B0*W#J_c#Xh8E(h%}ny}?sPVLg=)P(9i zKIo=vrn=2~ZA0byMWq~oUQbf<>fCmYp4|oKDXOP#GFKWc~S1Y@TS(MHcMUOZx%yj3OWO<;|SGuuiugJzQ9}35&KIoTI9&8CHXRmVsg+fi?T&LHo-NM^l~hjh+61OghpF5i&db z%Ee=#Y{}?7(wFklBH(x*=rUJ5m!!K7oT4wCDP&^~(x;q5eYldnyNLC#0rl$GKmOL- zSyqNQ8V}^tNF3lW;aF6YEo-y=M{V*-qAXjWWyecN9 zNhGtkiDFS8t*<1rG$!@)FnpS9K3$Hy=r=Xw*)C7GN5n`{p5T>w{_R>zQy+rxHQZNY zNk`a7>f8c#4bQu||E`0#5;yHm9Wp!XO7|-g@T_COMaUq!p!l2?t(&HSVu}Vu_C{xw z#Cb~n_0L`-{kFl^q%3b3A56LHL^*yuHnOh)%BRY1M#-b)Ud;Zfh%fM$AHh((D;t89 ztGR$zt*a(8AFVy9(lQro_Eq#5T4{-g@rxfl2P2L7jCeb)@NWG)=f)G@HA~UtF~2Iu zVHn8HW`Bb7@0F;5m7*Xlzl2Up+1|Um?%clahTJyzR|N2_qz<1RuaOM)iAgU8 zM7p(E-3$+=ggz-+$FR2w*eWeUTHo^)hA$@IQZM%KN;Ug2g$$kiY?gQAuinAD;BdznqZ5u1@ z1gnSojJMIUj-yU%jOK#@+rw*DK^=tk;9*p7^Zfl5AP`Abz1yMlZK{KOs#nZ5PjFR zZK~s&W#&m&3sEN!^q?MGQbxCBuVv!G=ysplcA`K-7Bwy1XY<&x zb+n>qZ^DnUk;$p~2S#{*bCe%=>!jnO=J0$72)ilg4bUm9zB3k((kZNbaNVmH{(Ts`P3S&Q?InBGZ7icFH7!G1 zw*HK}mGR@j%pr4-mD^;5RmO-;bhUb_xVE?Q(&lsa1tuXL;CTlVZQM=puwI!cowyJHl7;vL(6loZPI@@F zZ~zLhr~MLWjx^oANNN>1R9+IvaPBAr%+umUymYcEkAC_BD0@$|z zbG^>c*6{+n?dzT5+g_W%Z2DMGrIa$KFyIs!r7W%XhfiX-1TioL7b}OrPs)-~iGI-5 z=Pmqb@%pDD(vBrWt*f*Si~yhxUGPG2ikTQemgwgT>g{a7fHobz(%AGi8wBB;XREJU z2Dy7jGjVXWDCaD-wvcqE#MI!3#uZBONwV}AC;{9Ryr+BOX>ewJ@^2v&sq*0Kcvau)I*o06shGW4WT-MUp=xx$ z_9IrnTOD9G4xdZfO_}#}6~UOFi+|Qv;6;(G%uUqbtE3u8WKK3k+!m>JRu; zbcH``m?>I+4NpI@x$zUv^=KU*p)3tO_h|E1R@dE^tv#hEb#s}(Fae$*&1v@QTZbc& zD26z`rV0P&_e>0TKy?_a-~Q*L|r&^VNLn5WHnNmo(gAwk>uSbI~Cb48JC zRBEpJgXNI_t;&eB&&`jq^cIHU_hPMPP|mMhmhmos_7u&cn)Xd^wVvNKJr<6XdUBCz zLbA_wnNiw2BpSzZcaR$vAx>vd(k$M`GPokmr1AwU_C69L-v4e2dUKhtB3Mg)FY!j~ zdT-QTH$dLmrwWnFS=xPhV3@{0{-AaU*>1lu6a-sprH0aT35{12V6Y1v#z%bdi5gl@7C)GSH++?v_=3Bdq97T`yM5;l z_1P(ZuKZ%b+PFAcvKE>hg|M`<)9`_NB)@`I?Tf5+)e$AySL%(H}uvwb|Ul zjl(_@&Q>cmiz#)@aVN6N-G+oUJUcERIA!`OXB}TZEU7!y@%a@o9hNAm zSQXM{H`OT4-zXdZMbj)*e+u8=i$XB14%`6bcnKVjC};rxc^xV1>d)oTTze+#JE!xx zyR_XegePBdl&La9tbxxRRO--LFNwg3s@QVcf^AWZji&#;UW3-X5W0rm_SBe<8by}z@T-dzt615)Y`7IdAuRI=r}Ci=f!Hb?|5+Tk%>a28b|nm4?#yT#XD zuo;+i6l~^PhmOQgNjCA{T^@A{8VhOF!D4z8Unn3wtINV`6}UQWe+7|&w1{kha8j{E`SZnE3j zOC9B<-Qr@YP zBG6A`R~oxRAE%wikzG@T-f!%D!@w-h`UT@@owCY3U-3)8?aOC~$+|8RlYbE$y@=^R zfi1Va*pv{ds1ZjyROZkCaBSDojXAT=g)}8NyDxw}K0jdTzeSlpG5BN5%t5ija1f?! zb^8VJqYCP9sA5QeU$N^8Q{pyg%V_s)_5jd|ZW`!DYtv;aB&1SSLCs|U!$}FBO(Y3# z9MrHf;=43m0&dBLt)5bFvYUZ8!5eUE2@w7o9JL0sEmT_Se0g&RMk<$CLPS)%A~+!e zp~Em_;Kvqv_EZxZKORoPsJ@B6A$B@oRD#nx>5@!^8VN2|<__G~;ymJNB2R(t^~)Jb z4G@)pIQLmA#DjBCZ&$`-%?3&zob`{2^X7Q<8&0puN9KndXw;ssPBcy$aaPl&9vd+2 z5MeJn0?9H#dwN4oBi$8A|Y zZjLX4Rq)aC^;>@%8YXVAe3*m|^Orm+R3koRSU47%w-Y+KYn(7Q&-iF6sp#e}eZt=D zI9krV?Bbb8lA5wIaPC=Tg8Q3%%}^X+2O^pS^bHi=++7tGksr;7?@Dy6)~2e<3jc`Y zPmtSkz@=X}HCYACd52_RKN&5O>?J#T zYlUHhhvLvmswEH4rQX7FtVhkCuUL}K`WDNN+a#wG9)BLmb*cv^kUtiO-|~dWZ7Swd z7>tRZp;cYu-)ehg!lo=0J`g^vXie(NZe27sQ_8yO0j=E}P`#TgOxM5xUEY4y3h8wX z{mQZ`<8W^M$y@fgpOma?snFQ?E@my5sV>vd^cD8DHh3y)gi zt0?-k-YAucWewhcM+cjR@Gll8_33*bR zGnemnG=o%?Z$l;$2kZP{+dsdZbx!AENi9zfb`B&{yiPAFjT}j24{9ZglVhSJ|HhLv z;OnYI`5e?Y@{ICW=r`TJt4cr0k@G@zV0diztxaJdNS)pK^F=TYroM62k|uiah(3%! zKJGks%z7nL9#m~$0euCqx>Yv5Hq{W1s=W0o+jq{zeEnfq&LIz{VcsaT@sg;|nl*DQ z8-Zz#ego)lB`uDRW0i0W0X=P`;YIiKmTmpARP*8CjHL>Bn|lQ zwFyDqfm@4HJJNqo$?aUM&HL)zc*6F{<2Zb)bct%Za6V0CnO!AiE4k$bh)r#RQkz=; zafa#lqlCkhmw=13Tj58Mb=N+}L7TL9@mD8!tiK)9A1fzpj|7%~q`froNey)fBb%#Y zl?jTn;{%xI$75GZg(LQY8&$`Jg~(HARB7&@z6v!johV980fYt}&o3bQY@l(QETEsz zOInp~(Wc{bbnRf>tQM58w9&Stnq0$ir7QC>tD^E!*MyLLYF{a~kOZDi^3>_pgk9RY zxbQX-p!?9hMTqJi;otY+Fghy7MM>Bvq8atV{7|aZbrmE&ST)iha{(nL$uqu$lk~c9 zyfUH=XMCy3x>Aw1nqEmc=SDJcu5n{E0!?;kT`{`;#PZ<=yeiH+J)d)ZbV#qvX#ZV! zP|FT8X4=`d9K-N>=ltzy>&>V;k7&MGrK5UL5*nhjOJ=q=|J}@}y*1i@RIV#!JkcaF zU6q8W)93<~cvO9=*W3X0TfkQ@3X$MP;lHU*$=$D7Rv=xFi$7u?n`E%Mdj5@gT1jAA z%x#+0_(7HZ?{}VUYFulm!kp-T8&7dNkb45699ixbT1NJ=LL5pW!FqC8QJ3!mo7p$g z${toIN4I5%=)Y@urkcDnykH+upJ-yt4PCq~PqS@#KM{CyJ?Vkj9!8!=xXZu1O?}Y& z(b9r97i^bqnv2)E%@q1y3W9OWkeD`~=sK^~@UY?o^{P;Vy2I4P$!q@9UH}Q3Lo~3S z94bA$xXaet85N~BJEtcg zyBwILs@w+Igr4q@Xx%twH!ccT2<^JoD#p7kI$zPk)+v5OuyNubwfXqAl0V}~w%KUa zO`U*W2 zr!E#aI@P3_9iK5G?W%n^`Oz`EpuRgcFJVv#v$LqUZGm5((_pZDpdqYEU?ArxjcWYY7&}>OC zY&f)~esUSpVf$}iT4=X6hJ}CetQVYP&+mF3;D*d8@;4}wu*7Qrtj5}Uj*+LKf z&0u)`O;glE{;l5JMnZoH)JC0>b9I9o++RU&SEWY)W=43QOiYi~*6p!KPB1SP3PaAM zw?LI2Co_WpfXrBqghlBWwsr?n(C1eM>9!YSP}DIt;w8>l=x$v!cvUg}*|xX&*lT~s zPQkmYYUJ|M))&fj1&r#scsqh{qIBcM*X)uSv7<-6o4YrfRC$6tP(8}!&~?UhNVWNx z74~x)N_SWK92SnN_es{haAA>#*+o%K>Aj$`ZHX2(Gas#QS8lBQOlaig zlJv+4ihLxEgjKeO0bi+NOO@>9C2PA<-Z}asz8bt$l^tuV7Ul1Xx(^23Saj}SypeO| z2YZ}mCv7W!-b^qlR2?T8B=V@-^QmXe{_FU}3CHUI=P;=!6N;R<&=OzZF!08^2kJFh?bB5~7g6^>A^qnVHXrS= z$508KZyvy5;NaD*@g{353e6j_C-DvZ)j);0OoE8B zL%&KIZ?`j|DSxGfKil~?I|hFOPuQ?iNAIp1)kr0R7|wj{u1)K=r2V=5NH4W;t;X;F z+iq)Qn?4xhuXWsUT;ry^Au)H%lHUcQ;8SWJI zmrCR&Xe9qp?YG&5xo5X|U9Muq5frY*)|~5y;WJNyi5f<<{PMW;>$zW(Z%7e*Uhncq zp{^^BT!SIy@H&K9|8lfsdzg5>?hibkKQ(L)?%Ws6d@nyxifd;NGH8BrN?woak==n9TLEj0c64Jx0) z>YJ6haYN}p;~TDrCIa1ZIW^eZN(}Fo9MSH5OA;7iOBZEP2mJ!nr^Q9&ao)-MREo>u zl4V6uds6kxC=pA#cy*u|{j}6h>hq@9LT; zYOF<;f9b~Z6i+33=54Gvz|LA1MiOayiVQh(G9^Rjf0`XVDegR=@{S&s97OdG!*~ha z!@w%;;KfY$O`s6{a-f%cThzugFfONQ#_F$cPV3hgv zHD!?SWn>z)(bL+;Kkv>Ta>3rbSnq!}=;3df{`#rl5e+`W7 z+7tnzOo=c#W8?`?259(pHw$EX)*uJ)SuNO_O&w3Fq(OPALRvl!`@(&PNV|4^d$Gz_ zU}tp<3i@o8b}m6riKi`5LeTb(1$xyZT4KsCp(;dp9~b{eE8g0_Sh5nKcHQF_;mbIupU2mo1Ct*oWADpKw+Tx0L{Xh0}~By zeZhw-K058ZLGpGJ2KI>AomZT>0{Ns*6eY5xL%l8Ux~f=ey=(8Nep(p3pwzK&mfd|Q zIMkuUpki&UH72{{okiL?*d04#Gc+r2+$5@oVC}+>PUJ&30{ND{(c)!t>gH)Kov zMB1srt?7(D(NnkypmlI>X~;jA(j6J7tki{_J)>%p9=KV!_A(P)thfhckL&_oYYGGc zviFoV$D$-;-Uf}m45qzpip!U%DFP`scJpt*d}!DjBs5&ekcMvow~!GAEhROm1)F~p zj;m$$TZ#d!r#IS?VpyP%0K(mt$zu7CVd^J?_7(9b@Th-Vkip$dEVlGY?%~=En8qRByweD<7{n1-G|_B*sb{DssN=O6^j+ z)ac6@BXlZaL)Kx2*uuVodl$&>D<`tn5LdN924xx2-+ObOoc`wm_~!SkoM=ysLY#zS zWH~1xhdm6FDKn>9cjq}@l@2*4m(a(EWv3~%OnIW5&J2Mawqp&KM&lXaFNp9HCI(wt z>bi8=M1*G!%KPo;zpo|Q??qWP!quF)79Z?C74BU@f;S3{BwZqQ^tQtmsys`HHJhVwdd2*KUuLM z?0gF~YemcvAH3;=a9hF4gx5rtChSKBUnMZ&4R)R}GSaE!yhLObot3P5V6~{{FUooN zlG+R_2(dDFj{F9E(!pCBJL|=W>E2dHd_rH;vD()y{+$mEUE5<;*^&!b+ZjtRY-z8jK#az|P z!wM-4BxASBb>98*nMj=LY4Eovvqn^7m6)AK=z#BN|M~BXm6KiAkPHd(uOjU+E4|a} zHuLM53td=|3e@M4xO%s-tI;bnH&yrYvFH1|NolQx_~6OP1R?K zz(d#TL>NfH(lIXXGQ73m8a3Z>k($0S&C5c+KcR#kHvEW>E6dIr)JfLfqZ)1=kv+3N z+MuPVx63x;_vcrZht?c3KY6NJ%=mmDbw^36RpMj5oH~RN^e-*kzg7yx`VRv0FA0q& zt}aAuNTqXIY2W0P{51ON8c3`!Ua^$}4^bxUar`PtgE&5-p+5zVAgQ)_d``vVCxW!heCj3;AlARd4_@&>5 zPMnFPycV)PIjKQ>Z_LE{$i3s2bpaLQMBP8<7El+bW4?~vXc-Jr>nH*iIGxy`3E7nAbO8jC%4hCjKOH?e!kG(87W8jH2E#5*-0-lGUHOf) ztJbAnb5*MIY{F-uo;?==a04-a%+7{&-I2I}JnUofoB%1ewS{o=2y`Z#jj6rvME`nh zcpv^IIBq)*4L2hGkRO82Odl!c|6b-E@{>mHbq~N-SQy^;Q*VW8n{$t`@JHQHhSl51 z^f6slqnm@f>tDE@b-}Ad=KEf?7}(&yIs`kv9iI!i6>_Oq6INUCNWd(yDu2{RViZPsj-${Xz)}q*({WZOg@rk+ygrqFY+U7Y4lJW)DkG|=hhVJ00kd04S3s+Y|IWC#UqEm+gKqcTPh zCL5eIR*o=08M3pH%!+rl=tQ}*Hga|FcuCe0wx=vcQX^@b$Z+Gy=q^sSyzqZhi8!8Y zI1h4|rHT24<1oFSagx5}AMa`HUqWw zwhSL!rKUVR*>lyZ$<)o%3fw)E;XhvXK1v6+ZsIaDB2-KA_m$jZz$-ylFb#!n5!krw3zTN1LvcN7m9uX>{ zSX>p|^M>^(Gjb`&`Is;vhvI7DHmL5=PH)NmN0N4+Jv!*(yKQ}FnDwW1p8fc zbhjG0=|VXS{p1=#IHNHa%pAk_c>GpA$%#Yp z=l(u_a$%i4xm#1qb4dnqf@xhU{^0iavWd91wH&gs6GmRU2K zqE|BQE+<%|*tJSY+vLV;TzpWmW9^`*P1kS8I_aPbbS-ZJ1y+D&~iKk~lL0+lzdakF>&Fmv(&GklrD zR^+{CTP8cuYWZAKHnBVUMKng{I4ZXHAy6%(^SD*8%WP$m}*vZh55y_e^3Qwsx z&y`)=v4f86NqCdS%GH~}^x0wZnsU45y`S%y0{=(RS%)?Cw_zOQho}fs0clWCkrqK3 zCLm%Gl1hw{keGCh4M9L9hSE7wRALg+FqIgMfOHHPJw^@2dN1$4=Q@8p*LAM*J?FWf z=e|Fcw~Vao-?mAMl8auxv(;zptvf!H_ELSp(GjG$T3$u7!^b^fPYpc0IV%2l#?{M8 zhjFR#s%@O1Z^oTKv#6m+*H1sxA8c-DN*qX6sZw&ip85@EbNcwbC?nE#xwF78*{Gu* zq=(N+{HjtnpT0a)y>I9$it2eo3;Y9%c<6l|dMzdwA-H!Y(>>~MES~!?qfa}=&?eE4 z;&9Gq_8k;^d4^jP*i=E4a4g`<LhsZ{sMrrNC8C*l2 zp*Ma5H{d+Ey)t66(r^ddemhA+?A3$if)&IpOEz?BgkL8S7xX&xaYJ8O63PlXckhUA z@TJ(yf2+1Zof}H zT{V)`VdmxMNSen0!-`M>VqfS{aor}-L3?)jFQxXKO5H|r@Cw`WT;_N*yYj-8AMLsY zwro~kL5$}cA%8e2quJI|*~x8sU;~SIZs(f4NviCghrIy&s3MJ7EaDr5oUj8?KP?lx0YO*%2JcU9 zi;2uWf%Ul%7RC$pE+}Dje~I`FY<{uv;$jZ#zxn;!>Z*V(T4NQ@M!3(vMG2phhiYCQ zGVTE^VbT*v(qCzQ^m`W_-C)UDg(0j{|DFM&0{p^14`lV3XECm1*P+cxk%LJ4(<09$ zD>hf~Rz5$jKGL1U=9@WAoN4(imB5%9^Cl-1DtT`pXL!sWv`&9Mm>+c=P*!?ARlRyk z+ek|SBU<_65On*HS~?|z&V0slHz;>2ofni{VCx+gjOSPi&PlZBacpvi+m_+KzdFPdqoo zRuO4Ytx3>-FM{NJOtICv#q5gvS@oHli}uF`CTh{1U7~Ynpjwl3#1U>*uRjJSrEYN9 z{*|a<*h;*1?lEzGJe8?u2I=O)_Xu7<${*{{(HL+ANTuNBs=)aX8cKj$z8iyOIq_(35F zW}b1|ZFn(TEY5I1!`|6-_|_g9``Qbr9OnDR=RE0veC2M8H7xsC8G!{FLeZK`s-j3N zjzf6CfIZdNd7E(aXF0ZJsg%?k%G~SyqH4_So_luifbK|^oDp0R>X@r)Kc1?9FOYe; zrth^dZ6aF8oNqYSzI!Thl9R6qrw%wPsC%N6#;F*(ta0T ziA~yKcq<13DL-M8h=>+xg+=fw53h~~2hw}ZApA7Kx&M;Q^hx-4J7pj-Tx$j(XB;CM z>28pLbLS@59$4mt=l50vq7sOhjGCD99rMlTFz+eTW+pZgN_H46blWi zBx~npOa`)4iKrFN9rpUi<4aH7m{)TWA+H`4^PJyivnjcQy!&@=)kHCZE-TtOxi*K% z|0iXfAbgmtb1g`nau66r`M??=jd939nY>jv0;mLQ-G7+ZeFivfbYioG-${OKHA5~u zKX!+B#hE8RXsXBw_paS(kx?Rti$A#0tHaMz$~$doCt+6J4JaBsQn~`Y4W^`%SDehB z3wOWlV&5#otrU4^R<=?eD3d){#R?-k6=GWCR$tH7Jb`(x*F=qOJD1=6a+_QH?s1_~ zrMJZ|sqw6AnbFeOOX06|zfS?WL%1BGQCbp-c%azU*DO6$8W-vOLJ>2k1w}?N_+h1; zplsm~yrfXR;8P11$@tJ-+0$o=Yj^SoAKnef*JvduM0_5kpB3kAyW*65YRD|*PNRrV zmXDQQxzVd@b=6V84=jMI0keAlHqO%I-&9ph>r_gIkHkxcns-H6sCK_*2?ZgBZ6Kd@ zgc?hL5tL;D933|-O_fbQ2M0mS;w0ZMZVD@zo;7{!Y%F&eb-E%jU97tD3TzhnSZZPN znZSu!clGIO^hN2Ob>wlOr&|@&<`eN*y0E9=)37G6e#75zr{FNXsU6(srqAk2a#Xj7 znT(EtaX<$so*9d%ntAmcNs8S0Bf#<5O7&)K;Z?G}q~iu_+xC{^D0k*8gavmt(^xCv zW52hQrqhOa>s4le2on1yqKF^*rD>0O3*1!9#P!y2*@W}=8_iM%XtvwVbFzrbvHx(V zs(V|Wbz^=|T^Mf*pRE1vMx^lh)h+IwTK=NwB~0~W1pH>T>6pqn68W)Rqw%&jRGlqn zc+r=VM_ku?r(dSZjpZMcIe`baEur*Q$;&LNl(t9JIP}i?*&pX|rWv8iEBGUEt(M}h5eiop zRz$w;w+v?dsYak+bXi%bu-Y-^3(6Y5ww#+^JxM)>{jkT{i>|L%dJkFtYbB!ja(C%? zJn2gg?KWkI#31rLqiZ?;O3kkD5m!Le<#xrlYx8vSF> z{Es8FSQkb$^MSBCzhASHiy0u(sDLjH`5{Q!hwRSHYhN@+-T|X`rE)3K0Sg zHWq$2a)X79C*XRd$}n4p1XSCw8z`UrZB0c78RSXTb`LK_e`m6DuYj2b5C(}4}aO8f+yqC$&9o8B3SVRQ1n^dhJNv8VFAGX=ImK$E6EEeNW4^iP< z)+&L17;dQ&*}>J1tKmAoy)>8MQ^vsU?x6{A_X^f0X}0i6OrN_mAFSozJx#BO0uI{;e30Ab1#`AD{bnPs<5B#y8K0VaL&O zb`e~8HnE~2p?p-v{7`H&6ZWQ+wpjfRiSO9Es9kpv>13o`b9$AuNf7E)uP_v`8iOs) zA8Fzxn5W|Q!|#vF2Ypwj8!d|93kc^o!o0?$&e7+V)d9%`J2j?DEnhWe^zA{(sg&z(X)Uv=)zt2Y;tHpA`#Fu95)&J8a`;*8M}8wbGi(FC`E7Uk+(;{h zi74-iRiJ2_2-82u6@qjFfs-Qk{INqKQsI&g$&>X4zgwTm=EH2d!O0%o z2Tu#J@TD@AJpsfl>;jDM_)DW~hGQ$8yE61u#bbV*Er$pTdB9ZrxpB;Y6t0XZ%VFVO z%g-E?kjUR%=ZkjuDUW#9%14S@o!I+EJ`FxsvU5L$T!K08uu+i}QiwxJ>AW0j;>?9Z|GK@Xk<(%XX047#a zN~Qm-j_*pZ3{Y`0FtxmK&iCqiF)309d<}aveCFm4eVoQH!k>&9m6W{4;QOwibCaOm zjHB>q3L;S<13@i&)+F;0VP|ZSPvhC3wgvj-p0k{aMeEfdhVgXl6RRDQ`}G;8BYp$| zZRLLb4nIO&_yo$4tu1eX3lMgYL$C{O^sFl5GYdHr4BN2}WUo52{APFsccFHQdim4r zH?Pn(A^x(eQw7qDclZ#y5W`;xL6jvwwQ5Xw>o;dINnALP2GY>UBQIQ`u|VeTb^fCd z9nWcW_*7=d-wj%e51TQce48^03-CBZdH5(wXGDL~2Plm-al$(z32PL+)Z z1@NkQlJRBK&)tPThzLk-qVaSFc2v*{2sV~Uet-_uaN&A9?fWd-fjj{8s#Mtve-ft% z)hMZOGJG&Pn+p|R3SABE)2|(2U;$1^zrl{kv43o-l68zLs$kA_&pRdR#xTypZ+~MX zd)PIFiOg*fBYqZ0o&0x|%bBw(>SP*qyoH>o(C+vcUj{&MXrS#6( zYp=T#lhNqm_7}f%ZdZ4xatG`WKjVhn?nfY{l>IbskQ<|#<8?S*`H{O+v!jUqhT?{l zd+&FIueDZ1b3LKq zHzMU2)-s19$P07_o3_)tkD2{eF(MDIWsHd6BMSxQITWZ!>=?JO_@c%T>2P%251N z96Q9cFyzhx->&<)_p^hEoQ?U__c8-=1C7GCm4^D-#i`JRd4C}5QMk}15;A=)k9Vq~ zoS>$1SDe=uu#dwtmeyz2#q+@k=AIvwhDJ6R!$@QJ{tV#e9CJ4ypyH&V3S~6S_<>67 zyQ(~HXg-mDMXERy0C+COZO$o%U7A{`BpmG+x@V#Hirc!s!CYq`?j}~ z%>|S?V$hrtHMkaM3_oE5>5zjr@&H`I+grbuqm#58_KOa-Y-YdR?w3x5ZeTqX!8a$_ z&NWU3Qry~3(uC%y@4l}$5E1q}ZCUm~-x8FMWzS0AyM!$#Ugis^G*oXt3cSLqyWC-O z2yNVo89W+QBC-Ws-DFtmP0Wdr1l4D+h}EH065Cn}%5DmshHyn9PbDKch8uJWl8Gke z@WzMZji3I*b$3JThxUBCj6MLylOBeoDQ@Ta87G%^fy&HoP^#JOq4LWWa`uLuZgY~G zq0Wd5y+0F4eo;&HY08-#^5q=-ds%_^0dEZjdobEQI)i)WhI!o?n3e(MBgcSey4 z%w|rx|Cl}2ZyAiO;Sl!Xs3w0q?>mSFDD({f2Z#HxJsh)=?_jbLnqO(ta7+r2Zq-Nc za!uvsQ?&c==WaHiO997pXW31S)rp6SLtmd=>XBu`_GehmM87@y#8xVkS}m`Z0tuA= z&DJM*`cutxs%gt)nN= z*cW9;XYc=a-NdM*j!jwglZ9DXF&wdYZT>0+Ui0Tx;;xw}c>v1M$1BqaUSoADam0=? zEEEic!VSIvlRq>7S$Tvc@MaUg#~EY3f3CUJ)^%uR9wHyTI0*fmplIfdD5#N=S9D2d z=(|vnl&C1rf4R}(?NVan4W0!)`XiB_LDO{+HaioR)kjF>Rfb_TZA(kAJ2Ho4j22x+ zDRO_%b)VH4YrgkYi#az$x!yT6#1kn0CL5?ih$al1Y+qMAd&<~34I}#0r<;PPe(@%S zZ9xcjyS&xdT4uHCCL0i#tK4sO_Juo-{8E!GaXqni@TB^Jsa|PkGfEmq1iCg2jNizzu;;n_1r3AZ!A-Pw!tS=W}c7)&1nG8XuGf zrG|dT$cw`{*>B&Z%-~N#V)*M~RYm6-NH&b1<|qF7KDwImpMjaq_3N;hhBd^WJ>w4I zGi8(?oyc`2rRTBNA=zf|x&-~NGW+s5t&x|2c`5J=soEVh$8TX-F?!j{d(VSjKC|*b zxWO}^d7%7Rg$T}ku~|+f`tc>rUIcdr7WkVhVHvd;y{?c!Dxl@QVMDNS**(vPki!th zVIRq0v7e0Ri=0{gSn*6JNiv~&KYU9Q`xQk^;lch^3N))=1~ysK#dU0ZzWyR-&n8^m z=u#@WNzy80jtez!nmKoc!7qqs5 zyIH+OrL%ETy002E+vEx&;BbMoqV?^ZUYU8u_+WkNGvC}OdfGDR)2V?M)Y_nSW^sr=-sIkz{n>4NN`tDS>q`vRDNUr;n z6VcY8W4?@bVa_^>cM*P9Ax(vgYnMh+OW6eNCk0}nsBiDj4wEF<*$z3~x4lAPY@w%) zL?s6yip*4=abeR};&;v#;Komfdo11ICzN#vFa_g0KyMKEkl>fEy<%g{b_aV zOC7aQWajEwG0zg2s+_S|5$Rh&pMg)PBd~Hihj<)Mejg!rD#>R}bzR{$jZN-7ZD=;X z^r>&Jr2tmJU5Io^tM}Y$G*=oNCS9<*CbM!Y``)}Iz>>@wq+6SWs^gp>7BvjHdGi|A zlzw151|ctw1QPhjhxC_L?C;82{9>k&goZ2x@^=Ajop%0^fHHhy#Iw2KgYmp7dZibUy?}ly$JyYe#u+V(IglpO;327`rGPmQG8~RU#L2MPg zuswL^;1Coe9bE*`@M~G%PB_)#))gz25V16$sxk!%S{TC2E=Eb=^{V;a+OCEv}XR<}bJ_jWH6=<OzP^yUXfK)}DHOdQwI^ea}cAq*-*`s%sl==57?exR8G6^0s-N)DB+fd4@b2&fq z<+C%6;U^6rEaar&`+utJ$rf-MlydS+_8PLom<2(!1!lu+KWBTEgyFC;o|U^@PGuXxm9(dz@&=8Ok0lZEe_;8^T#+etKV(bvjAixC8Uvw%Kll$B(2KI+CWw(=cV$sl#@hHz9u+!Hm57&{G=&DEB}r+-SLZ=* z!Y$jR&A+1b2ydIlT)#8Yyr)Pe?M>?q%>6WXlM!&TGh0seYn-;>=`S9cN*gix7_HN^ zSSlw_zu?Z{>hW^5+42^}m{`ued69VWlo{Mh~&2Ly7k zfZET!K@!Ts7t+gum(~&N1+@5xM{GO3?cXuwK+mUqKMeb3Za@ZEKv#@k40eRwJ{7+2 zvgz115O*m=f}dg0&Cg=%Hans|$&os%)>R7Zg1lw>^`!H08psR72JODw2W~JX{idQH z<0yy*=BDJtUU;G)T&`OnLNaXD>3S8f^TlGb!OxNSe-j#i{n_82w)^{yCc%&oSjCW7 zStrhs36{Wioos%Oa-K}^BA^k=CeC~gbBU96Owzb&1vfizlq=g*lNxWV) zb(AsAav%9E8dZEt@;y8-hqWvz{O|s7K*&=v*Pf+1L};{1?eNN7uI>VkyqE^Zn2r&H zXYqP?Yug>tJJFSmo0`er>S?jXbewGKm8g8(kGT)-{0;e&{UYtB+-i#atO&-}>yb{t zC?8Y%uqZ*``vd9~e3TT*`~}B0PWjtGT71ydrWRJ4JP5p{W26rAhAqbeaz_(=FFbDv zdTFwqdICletMb>o>hYszdOU)q5|()PSLVI$Kfyi7M_PN@^x%&@(?`Kj*=}Iq z&|=s&7;@?9s`5A4ZBLYiZG3@C1HDd4tkQV9%>7V2B2TLzf|goy&Ev*RucHT6NOK7w z=}XCx+en-$rFN}kxM<9wU^T2n*DZFZrBAdE5RQ3XXUvc9xvr3Wf{pMtx^R7#Fs1t~ zqLwv7*YoO)+6m>-h|d?bAM+P)=B>kN>#Z@3KY9)o5S0pE&G=9IqiX;7A+0s<_dI3k zajTL0Hr6Y~=$sn;&{HXBfHz<4NsIEGNp$FSfAH8hyqVGG?vL1wx)OH$w*ZLuO2(c%=ST zG^hTDO}^iawP>_Qix_p2vsE`TCbSDr-#Y}1Ua=;GyU^z=5h)1?hIGVooms`@)X4{V z46Ipn_CJ|LoAn0R&leopfd+6;^?RZ4=J=0Mgjc37MxVAXSiJ?!T~(o5mUZx!@31kY_)hLq!cWeHixJTYO5n`l~gd&3Dt zSOHT;1p@+$7=GFlP^8*h4L!4z!oeSieIvW+6oTbXua~6;fAp8z1rHCFh2?<^(h;!% zMr~+7(Z!kg6ZayM1_Lv9m`BWZI%$_dBp!^xn_V`A#VP1$78osqxl9C7K{N$0h_Q#B z1)N-Y6~!Puh(q^A1I^yIt|J4TqcvPiLAjLEF)p)F#L$?v8%*lk;O_S_70|A8&>xYW zAysdi9HXxNb8KPc7^k$r9^M{Wa@6^!pBW`6ZB>a{akxqGf1hIM>7@D=BO>`egGbIS zAww)jn^CDW-RZiOo%C%s;7H(r@<*-kKMKhYTU|Nq#`@rptIo9uI&!7jG!3)dV~@YU zR0P)t;e7e5lk`Vk#FY~c3&`6Zu0=g(pAF`4$#ifT#L>sAl8GJ}A+?k4C?A4mfG|S4 zp6J8h67qN9z)JmhHpT>B&ye%NcCS|Qj~iI|9$KT(AMrH2wUsqJq7t^T#bDKi5LoKa z++Tfduh`S490t^{W>*iz*H04qqW3tB`}me&D^}mJKO>wr9zEGHDwT5vNqZ`}`O-X8 zY8o&7ddGi2RdW#X5<>U7KIOwn;61J}$8-C06^isxuac6SR4u>y5pF;0R~r7%-cwKe zEB?;7pmhcCsVaNR&Pez|R6XqPjRy+u@ov4Bn(0Bc&1jiwzmh%<1H;i&ZxtL|ameu2 zyRS1AH3wyj?fKNJM^SW=5*o}rarYRY3&i4N^EX66-*OFN+wDdha+cO zD1)l{DPGq=J{UDX@%dUq9I;jnrwgE0eK0xSSo}^Fy+!EP7Uy8a_>HPMVfL8nbk);L z8QWeEiCZ~hB_Z%`ri-v=`!5k$1kz6>!VR@MMk}=&xh&r0g5OPq{*xl+r@BMhOV^s6 zOx1-qydnt)Wt5GSu$?g+ehe^tf^|EQEnaLQzyJi(DE-zPJ@4gyH*i;jLTsh+d2k%>E7*dxeX3%8GQIu$s7~z&xm(`!_<=rFZ#uY>m@*H()U8e=*N<`Rf6H zUyM(&@~IR7y7z9J@}dw+{;4h`WmJ{$h1W7SgQ%3l5>K5?xZP_*vQo}Y&Af7rF;h69 z^rh&<$Jq@dxEs!;)svx{PSjE*Mc`7wdHmcL<)1@-g)4gwtOxB<|H|kH1Zjn7Hoj@C zyEu~?6cW`Dr+pTb)87aOd`!T-Iu4>7j%D#Ywp?RDC_;F@l3F8!s;v5e$zIT*3rn5xJ7!|il8L#f&{N$lMS8czWaP4 zy2Fo`pG)VGIHXkTIw?Ls#Ik$7NB`C`yPg#@ss+{b|2!%t5m_<}a=l6{DtVCav;3UD zq*I{a7gvzG{kX33(%7+dJN6k1ujw}CdVLb{wlrCxCxx3vJf-?xYzJlX`oj*d$a|3o zcq@&Uk|o7UP*XBSp@;NmkRiM9evJZR6+U&cw}MEK9wjTvr<`(wwqM-l8EN z40T|=>-z3k-7ja?Rn^4`$rVS-DW}U05HH+Go$oJS;_zvGFNRVy6(nt)@6(QKlIiU6 zV9qk^62#25xwxHX#;WM|Is6sb6=v;-kh+K*j1H^EWZ0e^pigWCD6x9?lkw4R?|<9I zy@y48q>Y1TuuV1qrQ3RC3=}ZHgtr3c)35i|5bm|rwiw=j;9@zE`azGi^IpXcCM0g{ zCH?$$Qn6S8{~nYCXk`s|V6*7?o-6UEBTh4H7rg6#-(Y;8D%|D4r}@DMWad-Dq3hA# zzkdIAx2>-d)*Foao!TzFmVaMN#!Qapw_a=LdwMBinCaoO6kT<@5`hkiETM zGgMW)&P2W%(P#FU)mjSXYI=RdPmoi(9Nag+U<>t5xP1hIy!i^dROq$@$M~I^JFwDz zvP*BA>P1kI&ZHR0P(wCCVoW;fi$3UCNAXJV2{1YLbN+r{BE}Jno2K+~&xCFNX2rk`UB)UFod0+8Hd_!YYDGP{o<70wbvRZv zi^^cC>$%X(y;{?>}A$1GHNJH|`jr!M4U&H0eHB=W@r_ zbs&Kc^f2khAWHDoq3?yx^+9CyuquW}fT&y#Jz}@sBm^u#n%if8w$>x*rQ|aoU?|QW zu@v`#?%OBgZ0X{Uwh`zw+50ab0EIu=KKgo8c~DP%LC&pJMx`bB`l>N=z+f2xnEfnMp4Vch^j-UdM? zuy?z)4`CIzG&!M|R!CwK zjHOW2I zpqziyvOT!QN{7PwW%OS^VJa<%*4>~t9IsQ=@0`B0-%D;dyzQN2Je&uk%pnigXb~&; zlt4zPs8t$Po)0{+SmqDKeI5+W+fCTN$mAlFLqy;sJk~nvF`mFzVqGvCS&HB1hi*`q zqfB_VPwm2DbOq?QbbLG{V#py+R$N@=B-TpvcK7?wq<@s^^q!Vuzzm}r%!?4>IG)kO z%&(q0PuZi!1A>p+A9glSL2Y3_A>1s{vJ<8(-LjTmU3G&-OjL^nn#AdvE`H-Wz6PKQttVC+Hsww^xwM4gzhzfVu?)3cQcabPxTZZLES+w|733;D`#) zTTY9DW5>Z3xVYs9MQgMmOt%2d_jSK<%N(LYF!iH07VS4v3{9v$&6LKqW>d-&V1HgW z%sq@g(IfWyA+?#~6hLjEhUH{@MMH;*wa zxgZCLr4^VZVT}!VuQ)ZLZ`=?^QqV~AUN_x71J#iLp$7IpT4ii<}Fa;`avAJlY(mv8AMhP?Zfy=Glnc zkMNhLPp068@U_|cPm(TLmFO0W_P~YS_1SRsFBVNyw&wJCKS7>Tgh^c5_R5R^wzK2~ zP#Zc>wN4gWmJE%icW3D30SBiJ9JQVvd1qXp_i-Cc=pPj-0PFJbaSNynd)z+gu-LBK zgl5k;D!WZ=8APt7kb~7?clfI$ieU$Iu3w7ww=#yQS`w}b^qJvPk%J+i@^t0`!SdvZ- z{mQhnEU)M*+~*2PHDh~OjN_%7voWEUE2BOhymxa1YeoA|p^0x6UxvRBqXGYs1P->^ zqeG`Hw=7hN7qggcj9ny(yG>Rc_y~275h#GOJKXg3Hk-8)PMteg&*b=3W5f9P+s46O z8RKa7J0KsB`YJ!kKmNl|@_gYsO|DvT4c32E&O98+Odu|y#5mfLh`#0M$K(RM*NO4Y z9*x?J6MJSw=)BW09F!QLzu|9NG;GDZ=Sj_lLy|Z;6wbNnV3K7CB}x8a8%>M=Bw?*= z^?>3VN}lfUBR^m7Q$>#NB;IcuQ&&=bsV?rSP~rrSn@x~IZaQ(6b9Co?w*}$c5}!al zwe{84$KimA8!yIYxtZ-+s(kz3hKxNhcP6kL<5PSn-}Wv4wn1U<+dM#rvJe@%?mD%v z7cdSo?OrFOo6W!5CTVLNHG*_{U_SgP9e1FU{?|W7{i847hZzqZmoaaPH*JHQa4{&| zr1lrM7ydtI{GkcM0rq;DVgo(Tld&sfaV;~KPpG!n<(;N*XJ{jm#A<#$`PRm78iylL zZc(vLIG9MiSsC&mn52)+?tDfx1penh)L!tADXxpD6|uL21N*?TSd} zT8DpXitf^N3I?m}ETg&Ba>Q8KKBSL=ECPC7w_j?D6sq~h#uIM^VgdbU-V^g%ZNWR@ zS0shwK0T?O|417<8ZD{Exbko}LGpO3jIGQ<4VM_3>^`aRHx(0!9!q5k5LG5gu*>!i zK@r9(Zx2Mr^lp;scc>f9X!|(9e}Nr@tOu~A?yi;3%6HHh^l zGSfEzHo{1m_*mp7w|iLTKs-9``lLTDp4k4WW&Z^7%(Z3^x10E3SZaHE?aK&yR%O0H5$VbPs-oJnv>D3>)V)Y=C^ zSrN;doZ(NCo+>=j60u0g$yb1BSu#bBm(bk{)$N)SYpLyr5}td_+}BO=114D3ev7-- z&h&q(^J;tFpplNh`-N!T^}tx=S0@YeDF0WxixGI@gRRMPwF(MZP78rq5^(2lWf%yc4?hZ&ETkfMw&B!hfViz1N^#t;mbruIhL z1r*tbJdQcJXB@+rTx|i-{hF9~aJnjr8Ae{qfT8BvLTg(?Lfar@NmvbpTn1~lorl!h zg4#Y;@b?c6CO&PomIqg5l;h&Jjqg5~mD3u4LgmCm{yqqnzxk#^jIK~sRY5pfO;gYq zSiJUL!KE)?yF_6&?y*|Y6H2h)NnHG8j{TR9EbJ%GL-yl2!_566H`?^cmsR%kzC>HB zMXS8}$66nznmn?^n>>DJa^IqP$}JIFgKkV6wtBO_Huh%j%Xt18JgKq+on!;+X$>DT z3?G0#gX!$9FHHbhHZ!pG0d|wa0hPl~{S$oPW`X`xL2rU>@JFJv+8WC2&iW6-hESup zguu1g=GeSHL1F_A_F++CkxQ!Z-(qMvf_Y2RoL|`yQEe~q27A9$V5L60loI6bx1-I> z^zU#9Pm4G~YP)A1alj)e*N1cRV)zih_XlP8vP9woy5jBAGvLHfTM=Q2zb$q#WZG*9 zx5*%d^%wt|HCzVvG`{Y;Ks!%TChN5W&gxti^B~d$xbjmt28uMD{UHzS?pLfnYyQ2k z7CY1B^Ds!+44Uv(dP_xr!qvw(I+vm8m)je1IT1eZ0JD`x6-D6%V zpAII3PxD}nV0eA&? zn}eS#h**5f(#444fUKG+J3hSH0XACa?8=y|_ZCA&3J+*SM&UfLaaR{dPF!db55;>4 z+McZ19&rmF#yGA-%(dUbyca*#vGIu`NvgJYm<61iRyqets#Vjwy)QCWJHEs)Q@VjC zhCkA~upPvf+kp)9c;ZrG^_nR(^{Y&2$OflHC3R6bqy6aY4-|3}-&9 zCB><)NV*WTP<^z>xIlhn*cD9uJs6@bEF%kXLQ-Q{q^&J8iMxz(%tJgvW&_Tx|1}}i zbaUm-%a79m`J^siwa;(23u?c($o??BG=Vs(%c;}_{nN|#!A~d_s5o=X;@&7>VZLoD5|gJ49I#rb zCYNPr0#I8B5B;kPI&SA4x#hnzo#x1Q=(IYzUV}*Rj4qgjMLbnJL3Y}Zb&*%>#a1j& zD?dwX3b8`-fQSa<;X;TD;_UZ_qL84#nx8^9F02+W|Myhl0kGD$$>czvqvT2X!a1#X}JmZ>o# z1_mB}t+DUv?z)p-QR_1wt4x%7O|=!Le|s<8d(88KER+A3T{PNg_wYC+Vk1Si2l9?v z`=w7;ODiB!v$LIh#q9E5`Ktzl3L8N(4?7Ar_Jp5AO>&l2N*)=wnYTLT)%+AsbJNQD z8d#|z{-5ql<}*U}tmY`iV?H}QAd$KxW3t%>1Zpsa@{(v%Mjnac{r+h`)(l7NcX}GM z#r1t_Dx{RRnffld62C|taQ#=$fREYKE8ko;|t zuH!^A!=5%+2pS~f`yN0iVX;n5UDEgS;K8I4j~L?0o{0O|Ev*uII^> zIK?8tzxZw6JNQZ8H|I(Rk|#sisSRj_Umtr8YBc(E%dDv0wejQ)mbHTiWs}4Y$_!60 z^(U_zU1|o|*fFlr$@^S75e%|cWB)+BGF|tpU(ujipYQA3$!2Crf%E&g8qLg4jPW5_ zPxPM1QVMnV{+{Yf-juxl#3bFaGDgSkc&qK#I+vcW|Ir(@5B`u9S29HA(y*E~_9)l6 zH1CxKdwt#y!gW>NJeQEMp*+F7ea%$yp+@+*GU-v8@Mm7L*8N~dQWliw`wW`x(Bx>s z_Em);qU`!`lj)ks#4Ou4&9hr)xQ5!kw2M>@}7=^}yM@xr0}Qcyq3;w%p*d~j{>AY^Sg4Sc8kb9clx*lOSfP+2yYCCOMMMUmHN(yGfFIY#l1&su*TCUeGE46oea9K4Uvxj7H%i39W|=x({HLy8c!?01$# z1<$I3FDgUZ)`b1fgEl(h*b8BI&O+#N=4&?9>|Qg&C&!`c4g@d$>l0MjhONz_y-aW& zbV0>;IpLyYjM4T=FjH5}J-*?g+5PrDSF#V=rvQm3`-`m2qCqZmjc1iRF6*(5XJ>fC zXC1UvY50vk<(+}j!+nycr~>BAxc6})n`=kc?~t8oCjhH+IvuPvkxS1p;$8=_&1wcM-zgxTPim`x)Hd0c&=Cf@Jo zvCvZ%|Jf@wy0bP%+oBegbg+o(lTv7+YU)!kDHnHhG&13%!ri4jqb6 zbN_29mHNkO>RHW7PQ%z-r$w?HgDavt>P#H?w80At~=%?JEn|D99ShKyWga63dAJ)X%B zlDzin=P&U(KNR7?`B!raQtVM(^kmQM(RmZ#{D`sDJ3zD&r@h>zp?-vZRuq|zbyf(1`;D_sw3Ap(od1J7 z6@T+|2T&Bx-@U#cJuOR1Ze^kA#H>FJmfZ?l%e0C!(?YwhVl&Bf6SZzQncAyX z)c@tEY)_BFkV1Q>Nctw=IR?CxS&p~^VpcCY_g!Q**o%*O1FsV_sO$S8!#aXF9l+Dv zmLHV550Yl)Bgio$9(~JI+v!sGK z79leYll>2ggMdtd;w;V?w0L;+R1P#-7N_& zYn)o#_l#iP%QJdb;}$to8Qc%&MkK-W4E++w`igMj(RYe=J(@2)?u~IhgwDE$$LSPh zaqxM@*SpViplN^69apFh@_nQn$&)3N-W?)jtmADN;p}niGJ@n7x^kD7yv1Rxw_ApOden0uZFb+>-2vw^KF`#*LxP5JoXF9Y=^?Wz17o8&EWGe*=oLdVmK* zYqcQ9gMQSVmesexnnlC?8U1b`=uZf-SRn(48!lZhJzBK6weg#sT|RwJiS3@O)vU5u zQ}XYae6d7=^*(#?xjV`_@-6xj3!P(`U`(d*h{_%xGX0%+b7i>4_|ZOVgrpALAtu6D zk~b6Ev{#ch;yyj-)~&-7DZ(hb1+DA6VDbiGF+`q+yIZaS)OxDf#TIG*|9Z}I)vTZ0 zL$Q5{Fe?|C=cc;CosY_9@cl;nNybrT%1Zgz@QNL4l0bI%|0p`ku%`Y742y`U#8gC@ z{S_qzL{NGoL_`EcNohfm9NpVMKvY^fN23TLEw$03Q#wYC(KQAeEMDHv=X^ZZbr9oL_LZGntQa5(CFa*`0Bqjr_>*BaUbc|ghQzUlZM&jYggb~ z|0ur+0c#-Gp^e(<=AWQ0(dDy2ju5KdHe!9UnSj6JvWFoykE%ImaeG0}!t3>0@dZK9 zmUbU^B_p1XF~d6*8@4U}8VED;9b&iK!`_MMz4i5n8`7J;NII7UPYdwa04I9HaW+UO zXfvJCuJ#DrX*FaGi5%kEG%x&Bi<4+BqAq!?^bwEb!(2Df;Ok5J4uch+p`vKZb28NI zO|QWRnA5-lQCydh7c83A+3w*^`P<}2XkFSI&l;S^NZ=%qY@{PPwS_*}^p`V<<6JS)}jv{kQg^2P!B({?0BRhf$*9ll>jiG5tA2OPV%F zn{UszsvjJnF(Q8s$nI;zK^);Gc6sC;s&<99?tWdr;li2L><^CxnO5ks=Z4*jnuO#$ zvmGnczgM!M+Qt5Q?DNLpGu=Oj8BX`71}{i<6poIcP0jSRX=C@B4hMpIRxE!UPG}?r zWy}XUer_a~pc&6FLObCHJ?){RhdczLXf@Z^Le5Na zHO@$Fg|;tDnmtdkAbvA-;4!}q-LdIXo2Cz-(yhLF5qKkw70Uld)7>i{C?og+Voh_H z4~{{c1^Nb}=H{Ke7lrN``1<)QLh|DXX#Hq1fO-mE$6`Shg zbZy}5c*6)Zk$L)oJ5!_(J@&*sus7*FYVglz%S)Y+Ho9r+FLu9B474|-TXF3#pP-JN7I@#gl>!dFLyKU_EbfPa0nff^QK1P8~fy(VLy$rDEM{2qldiLU+X ziA-b?yW4=**2wM>J*hVQTbWmoa{#ff5OX&IPRvKK`Kk=X3$?}kTzBKs!7-m zYCZk#_v(=Yi)p-Jb6?Rgrvtaxc>yVeYICFh5;UD~4`_>K>$kW)6Tf$Utt;GbnlVI6 zOnI0gDrEV+qBxu7ovD}#TWeM8{v0?%pP+KsOS)~mwXOi|X&^@#$?|E5J=2t18+_`A zUH@GRY*B6#_P5?E!QYE{1=?PBm~6c?cilaR)1#LLe*!;&tJ!Oq+f?vu!;rUN z&+K{1we;j+69R6T0kgQ3*BsRGK_D*T=(R?kv(Kx5d@FU(KcRCie5N1Ee0XbdyL$&q zrH+s|sHc{GYieEQ|5g0;hxvqX5zo|v=%VHKV@IlUE}F^?Vrqpd4QM!nY}%%#@YDw( z(d{)v!W&rFr>cQiq~)YuW2<4mHqT<^62IGj@UkNX-)lmrvFy09KZro~f5oNWHrm(5 ze`Rj}LXC%(qP`v72+TET)5&1mO8(-5pF}9j{#&qjsr^-Y#g?Y%cBqrq_vW7i&Id0E zUiSsNxYR6Kb&9|dZjQUe~EvhU;`-u_CnhC1SLcw zPEH5(m{+mnsk7jQ==Y$KD6QW;U&WYn7_Xo%)$AWUyV@E5lUqvnTFI2w+4(Jc%S2BL ze4R;bh^pT;-xgRNqZlu&^S%&ED-V6DuvcVnCOYtE~MNPUNL zS@S2?vtlUnLZ+m%yqHzTSshn-+$Ia{UC%=Njh4CzNeQBl@~!$mosMmw(XAg79g*66 zJdzU085t>kodbkJ5^k;^WpW-;CIbL_HMAzHT?|ID&X?@7Nb-RGgsx*FiAk79gEQmu zf6szQMYNr=ncOc=DAVnCxvW+*u926}(x}%}3*TB!Meu$rDwZ}sYD?jx^zf`k$$xMe z4c9a6=*&u*Q&7(uf8<7>LF?YKn5nr}7;Z{dWpT}-NX9Es(IRMINJFirEUrSEJAJlFnS5ltwnS8F&*|`ro5aNZ4dVIP`~^>O(eRaMbT$3T z4IVo8kp|N-ujcQCdc^jvf22OptxOhh*Y!z2OfSRx^ekqRz*7Z)Ej{btcPeK9zjUbR z4tW73HX$#*Xy5faDP8@yRLGyNmN5!hQ8!EnQaJf{-i)fqxQ@E8mmqQ-_+y%2H`qQP zzvhqGF>Z0`i-y(EvXj8WGW!N{oM5dp9>a{aZi}tPyFSHsvBzLS8f4s8!20ZKm%6TP z;9MU>E?>a^fV1~g$Bq-No`0t2^46qM%N<12&~J_(dye>x1HenA@w%*^b*$}^FFc`x zXuPY6vI9eO$~rH_xLRUg)iOq!ZikIQLwYk@3%1tQ+gxOKI9#*I1GAXAGqw3wqf1rF z7<{_i0O1!F+ylQtOu0MD z2f?QBB}n5H>G9kAfTD0dwEyzXYHWJSfXN0IX>Ab0ky3pxr=A8}9f^e_X zHLXua(fX5WvjVQChdtYtx>23^%WtlQ-v8%T{p210KfNd3_yt;8RM&7(_5u6n4+?&E z&caf$SK}LMz-o1!{Z6c>2CIT^s=7@GmgwaAh|QYk{q6{k9~r|KDUpyLaV=#JBj^2J*4oj$Q0>(_5?YD6Eu$ffejI&$YDnu;mS@SX4p8 z*>@Gi)K|qh4={^E4Vuk<8wqk0j)f0;P9)pn{m znAk!B?YyJnbU>DJF_-t3y+%)fB4IJ_dO1@LF-o21=$BQ zjcEBOR{#xIR-Q9&oB+7-B(;m1HT8~~ z_hC;voslQVVyDu4K<2h|k<-II^y7$RtAzV2-VXP^3q~E)^4G_N9w-=E-e{h05 z^ySsfDYgCfpiA+Yuivjd4;YjU8hys8uobr5u}@SF&K`np zc_pEWNpwj>OS!ee8qA5GEAK13IRzc8tq=q}(tEII?AD@b+vEMsUn~**c~kflw8@It z=B}S3m&80Ah8!K>h$Y)3NMHmSMP1tSwvDUy)IJQeLrlSZtD7yal$f4des#bnG-t@zI3}MDWX0HZXnDtf_>2SpsDujt-S)6*- z@Q<_e{cOL0t*yQ1o*Bz&8TO-OalNt{fyW^msVOlOIEQRJw-VqosCuvxE3WoatOZ7^q>v{t0V6k zqs*z)J*p3B9!$>i`&Lf0WekAzm|cRASFlw>Al&OHej%(yphz=gkmunlBjw z73i^^bxw9$z`xXpo6KrPEMo~aMYGFiF=wrPk^EDq2dSCk_kTeLaP{8rv3#>nT+Bha zgVL<}ptO#^r0!o?(_=8Sr&=G3wWz{T!_1R*PnS=+$fw}#Z?IuqMd8JDPB+T5g#o(B zmNS4mVXr#7NC{@uE*%ULvY!~sfz_7Hn8b>n8T@5If|lBpjT8YD8&!PfqgWzxLk^dI zcr{Vu&ZrPt#6}bxhmmPK`oUO$BlFMyo_$tE_d=E|$eXBl7+a%d+NF8^w;cr5DS4;5 z%IFiSK-)g7$)0+gw0p>YMpnrJdS%FGb_D4z9VJRSPySvXe+Zw2PW_hJ`lAPqD9YMc ztCTolk-eW^bhDB0Axi&YSJ_2CtYwhNAcX(CV^ECkIv?7bOR!7)MwtzkBiYI!HOMB8 zgJwBT?NmtOROU*8-L_e=85rVX7YjqGPcSqFbP$Y4LfwlyFEsF zv$H$Gevbwhuf92!z7?pa!UZ#(9rU;|T#Y$&gN%B=gb1MfR+Up{{Dy<9&5as7Eh-mfPPDF@R5o0yF`gTlRum9tPHmSvK5|VW-=P~T<^jg)CK*+xnpXx79#|C z-yv|XPZ*_~xTCJQ#CpHb+WsZ=1+c+!R`!txCav}!xa#|iMGBb%_Y=s07h5g6?hPfI z?UZwg)$IZCf>#UuGdh$pn;aHf+IYsUx{Y z60ZLi{}nt0I{?zkX&dch5%5NSYXi~62jCqc5zY`dfM`vg6HyTJsYN4_@478-_kag@ zEH~EY`OI>ILuG9f_HFg@xZbC)`CCq1hb32hqYhyW!=VW+_j2+v5n#PTEni7<*A%Vq zt))H>9G3-*nW1a5L}nR}H9O_l?Rm`j(#0dpHJ!3pVj2vdW-ZVx6+PX1)3wdWYVkn$d zA0`UJC2r-li(&IpkH{P1MI4>-Q&N0>S(?79bSS23ADjDLKR@|)_OuOWkdvAH*at58^ky8S< z0~ZKCM4$GORrl+l*jy?0-db)wvNh-ykjV1PhWEePofA#~iKWqNE%9k*;+W4kUhW$$ zA|2jdZU`cKUrGSP^l^MhziP`X10Sh>Px|<8PwHrTlTd(lc@d>r2Ve>32j2nL~T&X4zf(Ws@m6{?y3X*Bg0d4J4^x;@!H*^MK> zv@(+e{~iZdo;(GIx-^1zcaBOAFx5h5v;tTbq)}Uv(C*%>ZFJC=QQcBY*+jFw3Is3` z5c1wgf)r-|oC-U+B4b-Y{6%}`Y52nRXrT}H_aj~ScL7qaWZ<)^UfYPUI!Pl$m|NeQ zb$^cw#OLS=UG{%)P%)QtY>n}3w$K`Qg!$B{i8z#SI+f~1`{{z`qtqPA^Wvqy16Qo5 zy$cm*<;#3V$y%A>)8LLF(Tt8ZZ|XnJGU}8`XV{Xllz=@&ha^R(n#|=iq$LP0o8Br zb8t+>T20i6#ZkO`QB73JUNVMuXog~0JX{N?Z}?~aH|Q;OSrS&%EQDFcMSIn&;Fp$Q zq=9l+MH3wFZmUk5HMjABhU5$Ruo?f+P_`Xq~*9htCz(3_L6j7i5 z()h%D21rx*?LXzz5)%7jkntFRO&P8Z=A*E}+AqbWrujPfU(x`RL>jQ^qf*toU4xP# zqdPgHLdXIaM1*riujm=#8-`POc4g=pV@N}ycjNr2;e>Rp@rK&)$RgC%(u1e24+T!SfrP2xVOokLqDMKAWiZ5~Ar@ z2E2yn_-dn%zRu~}L05hcs_fWM&=X<#haoVW-Vjex-}$@vlp%TEB`2Yo7rTZZnqZ4s zQIEYt@*1t=6itr-KSN}AA$Gb}gIE2jF@!3@j-RF24gGVO>F#!rGA#=|uq21cT_qgk zlLW?!j1dLqbO8tbPBT22^cG^DKRDTMAvZ0Y?!z!sqXEhDjIdOx=`iTT&y&6AG(EiB zkp>V$WO2i37cS_z!-x;w4xIx$yhEoJD(RhF5tsVGc_8YwC_S)gS9ht2MrPe%(FP!M z_F#tA7lA$Pq#Q1fPq*!47vp6;&`u*BD+nH_Z)NPtMJH9}m5j|z@nZT7A^%bYMf#qL zB5q)!?w?fFsKd_1Nu1!%nIHV}4nwK0R8IKmtER(2tR^w1ZI}YR_@`k2&o2RByI0ir zke1xcjHqWAc!K4|o1MoW1#%ZSt~8fdAcNZHj#|eeQrb1=tUqAd z4%x&S7%}>SV|(;5ED7qIgxNRTScK*c$*vfOJuO9#wK;-pcAf5|GakbPIjr7`b;}YG7Js+@uwk954p zQsLiQaXt=gr*Ch59&KL2w6m62NA*Yu?@Y2(zPkRycIiGc&PHd8#TOs^*+$#q6Jny| z>(h+=jZ^lrHmPbDr3`jaw>JNAoF89z0vw?ld7bHsOm=F)ea2z|9#E8EtITG{@h?J2 zL)U&4^z6nzYo+LSMRW)1bj=FEHhp$Dz_rWLj|AnD2@x0hXi`VykbQ?*7xV$}+cZMF zZbgq94!?K&h^Go99g_qVU2_l2q@C<<6ceA>MoFEiuhhfj*ATG;Ti~#+jct2VOh3Ld z@??vd#EPK@|G*%Nx4A$gspbmBjOnzllUaGfPr0Iv5J>D)XyTo%f@U*||7a!{9+2zX zPv6tQ>>ew=k?wgv8d&lEs&%AW6*RNpgz}x}5FkHdb%iFW`m0xdsN{IV+kO5AgEpD@NVbpal9>b^QP^Ku$JvAsgfMIX4sntu<#Na{-i+3${dO^shKfVK$* zlQ9SX^wR73XzhW> zY0$NSZ?@3+vkrb59_ils*2np;A8*#H!Yy+eeO7s0X|qI{r+L6zb;%D4`RHaE0aeS@HFP2<%EM(nS5bMNq+`=oQ^DLIxJ1Fm*D=wd0c$ zrG3c)T-=Df+_Z|WtD*%~tUsA|jTAl-K@&$h!=o*RE@l~7&?$7)ePt{0ct-N@o^vQY z6$ybH90-Yh&Fm@C+t*zAYh@f%3j2I6NXZ^ce4IYES+x?7Eedh=g8?WlCrjrc`@#S% zBH6Pa5cRXaOw?KlB;XUJ8V#&dzVRr#-mmndWn)^^mMZcim`|EAQbBXJI^lg|9yNoh z!qC-d%0G57f1LPMSX?E*arA0ez7v_I;6(gUKm~Eb&RnT`!3; z&)rDh&7XCBE7Mt@$IE8i5#Jbj?me|_gj}vI<9k!Z7aWQDA*YuSdjNI+N zHhT~ePw+s{@SRCkGYL1aw{>ih{2ulDg7^J94nb071fsUNI1~3~{7T030&@>2!ILJv z^Ovqq=l!3}Y)U4i=}>XUvD|rkR^X1OC&oWD|RipFI)w_CZWuzt7tA@&uTJ_J$UFz%?U78r%SQ!?+#6j>Yilk?k zE3h4?Co$FAA<0C#g;5s8eQ$DEt>aML%0R?_8S4p}n_t*VlP3m_0{!HY2fjFholibq z6ZL(?Z5mw7!@hX6OMR#0MDD(iCH3aj_gr)@B9pg_C(sN9L*(lb7~VU|hecsZAKQ2? z+I^CD!XGFIBIb zJR2jrFDvI>D;Oh`|Ir!69pLuknqp1*16=xk6HCiGO4r3t+a2f+CDyEJ_3u@@Q`s8x{H@%R6i+v2iu@BGZ!L)lI4Ny;#ONbf+s|ll0 zOpA2-UoQVYqWhPy#W|$C;VlW-?qa%VI@rJ!Tm6BHQvFHG<1PVj*&^u4Ly`-s= zG%N6wQ#;G$u^e45&~s_(2#$-HPjQ#Ja`hi>RYq-Weuz6b*%Ed6Ip7lutMeVP4={1P znjV9fGvked(k3wtfI5uZ?vagcIYiCtr%HqKbLc4IQ}SNthw65r0ooN4HkNV7?Ub(z z4}NC&do_MTH%&w7JDhTYGhN@wl*dTJ@YWpF1HNqXBNp4#j|$Spiw@6w?|9^w_@=ex z0JbxJZ^gx`joR`%2R+Q^7VsGh5`#qLDkB_sr3KR;!e74e5@N(WrN0;ZLFp^ZU|b7m zPG?rdU(S+GvlH@3d^ssRAhrx`!@fR!fwx6>QUz)AkNj_ zyJ4k<_Jj(d3TDji1vlNn|AB61hipP6cI<+%#_=oQsw5YFX|_aySB=L%0T*rQm$ws4 z8T5@yR0}V5+75@d=A0i7>97ikf7&D-&#Fr2C#eb&95uIPe_a0+l#k}ODCRUG_8%F2 z`gXMwKzwRz3@%rlu=$M{Jx-mADg68qn)?g=dYyi`MrUn0s$`O1#OaACDE-g7ywKYJ z*k`EUG!G8#mj?r(H)s=mh@fspat!|?yf~A@uu8-W(ru>yHV=8nzE-*JztE&5i-!*f zC>^p~c`_Y~6oLO##$dybVH~(Vb}NpXO;?L&_cLbj8L@y(v^LUzC|iHg_iRMvW*K0g z|1D0;^ac1q-HC5sCpHp`wU;FI(9X||=Nsb2SuB>xb>B(i>q%q=2e)98<$-%W%)QyX zdH5imNd(BB95FkKJwBM=@K=G^zIilNyh%=t1a|X+O@p&WXHgUk@_JT8ZdY7$@TkOO zM2wDU#*bG9u)9S{JKa?POY|$(hPXY@m9ZF$1qi>*fSdH@uR#~B#=8&o$1?zQAHlyB zWZ8>uOL#J&gM>{aT+-^qi~UtsV!YX1oVTw0oXJ6Hv7^5|Yx;vhn%Pjg>g!&4J~>NC z8V$T!*_+16fIedxulqioH~vcjNe0djCCzk+UAJB6Ae&xMdOf;c&p2@; z@k~O&jVgaik(Z&x6qYL*?HG-;=?-eW(lhft3d7B!jfQu$%9)n~o7%}GM<1m_E5lp} zx#Z)eWWCi4dMx`XFA}ZqA*K-fnY5D10;HPT$Z@Ny)$a<#PO@jB!UD{Y5s%;hV5#h% zwXpeqOEKhPuNOx=b_%B)zNiK&I?nRpa~Jy5nw`kzq@~ip0%zR;Q-1c$3@MU6ok4Ef zsA&G!0!OK@?^o?8e_UJ{<6p)vDl$lbSVYmudBJ$Y^%N)K`g%h6&`3EEAhYpv)ZAk<{AJV;zR!jS+BSz9iP- z@`Tsg%AE=SlH=W?{mn<}hk7-DwI}zIj5tgFI@IlPhFLf3_Md*eDH7W~JcJ%;ZI7X^ zmS1fYRe7oG;k?SKlcKO@-Qn$Tn|$XXWcXY~Kuas4)H=zw`mh6>e%^DbrJfu1LU}wW zs$0PZCT9k%zvc1uXC8yH{lJvI^Gv(5?|nUqtc4o^D8#N6ohq_8xl&OnwZ4w&bsNO= zsKKQsS?lHyG1*Rb981;BA5{Ea0pftg&iKo-xEp=Q2AK0r{|&bn? zXBE+5E*o7Z1N9EqjCX;RpY<8&1HOU{zZmTlzBqQOAK8OM3+Prj0Lz2Q6=f@XB9nRk z&Y(!+TY106QJ{vV%~4*Dt={B~8C?p6oj`~$z3y#5HzxJ(l+QUWA&<5F{F0$;-u;O6BL3Z@r$ zOg|{622QY3HrR%|H^)9dC0|7duPfEK_Hi1R_0D|(U0N!$fcoio6Q%TK-h7Lh5=XAH zT5LEVZv*BTqZ1s_CZ1MN#LIgC5FROjg?PkQ!c+SPJ{(E8h&I-Nd<%(k$gKx-bS3^f6+8$_; zUuCK$$VF;yHnnketN6?%b;^5iYIENZc*5uX4N*V$36TH)Er!1cW>R> zB4>VuV;jPm)L_c_UUppZG5ilMl2m#VEn8}tHh6O9Rv93{US|-eb}iz?>_xhSp7$?< z#taKv_!nA2YA~PJj@XXppi^<0r^m^KRP#u6KbGu%KFC%tw1u)Iw<@{ju_*7D)sL@Z9SEvk{m~ntkxY*0a1NwHi{AhPV+|LyGMmFwHP7nK zJeq#e0S`OLhMX_iB+9+AMPMi1O{jWm|G_+E%mYe!SMPuD&i`OqtncYGJ&BZ1<^vXgy%#^B1|dK~BLjB&!D zAG~o6NpU4nAn@j^RDJ)v|>xhfO+n0l|Zl3TOZM*)6*CcA!-!FG8J3a86I2=x#6 z*oQ05cQqQyNc{w+d(}yhx!7Tcd2A)yR$Y0*6ynmz^8{6hm4IPE5@0iL*aPv1@P2h1 z`O?He^{68IQ`0&za1Yo;-2AIgmbv`swj+54SS?OfW~K9ncTlH^=NyRS=EEXd)WolH zD6_K9pK_sj)E`eyfsyzX8r{1d0j2+>amqEHLFuJOYq8@|O6d7}L&I-5876u31bEcAj z;eOjgw9nZQNZ%eALB5^SSf@|VW^HC!rTl7w22fuffZcM5Z-&vBP}mkxrZPQ6S+mAm z%2jUDgsZm~F-7y1&2q1qb~^9AZcO7{LZv`H*y8;;5@N#mXo{clLrz}+l>?fd%tJ3f z$WvGcfemmT&i>xZN6i?}{FNWQ7~CH_hueUj0TmDt3WhsH2sgerOm5EUv4-l1aj<;t z&Ni=|p=-Lbpv7aVtOp~ide(ri)0M*AB*ewWTD?T9+ZZb&vlw9UCq|q%8VO@K$*7F{ zt<>(~iqxf|1A7dte{|Fy4A!-P%d3!9tf`MnC_NC~p6hlpLd}9=)l7R>q4ljPD~Lm%rVVXl8DV z$!PraKG_gB2=oYTz?UUVy6kFn8Ykb!;Mq*F><8o%2KINUThQo@GT)8+jihIX0_Sl6 zst}bUCGPpu0bXUc&oVV(p|PTqqdE-*Ea0ud-kW`Z$}xb^!@+wI(DBEBx4%&2;LvMOHmgi2IoT0g8SYuZ%BVa2$L)cm zp@cE-@0utxWA3LBpsW*h<~$b*x`0fp{r{K(-j3`CXFo#KT$q2yh*FQ$>oyZ&Kcbi;+c)wj1~~8spQ!YcExG+ms8H-K6AdlSm->qe94lgg48n^^qUFyIci|L$d?}=Y!b$N z`3j#-1dqn7wMx5jgUp(j-ld~V{5p8^A+LNTon_Sv<_8=G=vFh3p4qkmKl()W=6P?L z$#ps7xj%uyWpL0E+eO88dboE&Xpuswlk%hK&ggYXkJi;qX_D&kH%2|={;OcQRPZiy z`RYh(&X{O(1@xjzs}0ofB`4)cI~(Cj2^gVHE_@Htm+c9z6;3jahIB9%AdKO)%8Wv4OK{7i^pc&j_<-KztK%o(fqU$sHcGD88p&hoEb(I~ zxQXVm98Rm*J!*KeWLKZn4qVlHVp!m67?o9d1CBV1U7O!{| zx){2z#*W%96wU!KBd%cDF=dt$4~g-NvR_Y2sc?eZ@!``eo;>WLzi|FRcJ4KDb6}mm z5GEm`%WthLbTwvxu1^y!a9ZQir3JnW80^toGXPKU`o3{zY) zx1OMTuH_Pb{)HA)+yD=P@~mqc<@u^MhU;3t4$8MYuZ8$Xs#3LK^J{ z3+CtGY0@qn8!v7fy%nuRe;(VipPhCw!yL&{t{GYKgh$)_ijSV#2FD^CG34w}2fgSq z%3ttBNSsF9lG`UMJxD{*nCiNSy{B_!q(d?7=ViYVW(e+mCH?) zml*(8O3xo(e|5@ClcBNbt9q{}8BZznb82TEJ25ilZaC4yq9~VKkKPtnI$5ODJ(_gq z@?%}Bg2DkIg|uo$h0o@3W&M}U{G)4mlx-2*PF53VYR8U0`=TLnOf1v5wclD&jzac#14jd?IGQDsz~B86Zle#R3BL6m3FY?t1A<1#_p|p z)b8-K^GKCLa$E4IBU10A4p~o|-$R)qsi#}m!!-}ma$h;QmzIKnuR(BVLXo?HnGig-bragX@e`iV6l2pJ_|H3)&yLJ;gX03VjF>D8u7BR+xY0dLI7elru=qnLu{0P{vaD;es8=r09S zGh;@7bp;_ql+=~5EBtS!G7ctJyv8x>q^3}4NZC+~1$5Zlt8?5f(r7V<^785rg!k0O z)rKGXt7}SYPS%(!(5#gUg}Cm^faa6Z~G?hjTf5GUGFmaQ=Fm{D>ZTNeIPF{`a#v&Udt{~mRQ~Ytr2yMLokxL z#igY<^alERUlQ6{4S;Os2CK$bGADsCB;#s}@&O z{>!RhyF1kL!3r{$vvu5-4j+l7&Xyw-8$$T3q9L&r%LxQ?C&2*|c&yxeu%I-fF3F#O z?iVc@axrRHyoD0j=bazhBPr7ly2OkESHyS~laCaXH-l^%Mj6g@JU;mID)IplaH2yX z*%2ywTQ%q>2#>qQk-|GB1L(uFZb8S#?1uY!^{i{1zQhNot4HE5^NCRS$lH1?dF^=wa{GLZ;wAxo$0*<_;jdi9q7xo_I;{F+~Y z)0ZHiuxrFkx%sQ4sR-9|l>k7`*QZDRq_Q+}lZ+pEftZ(^lx^CDoBsCGJv%G|A;(CP zE`Apln?Li(()N_rJoYY+PhYBr*SD5D!%ycll73QH9;3;K z5}iM+Ur-UkTJpu-8T$<7&_Uc&f-vT&_u`h@IgwAc2yYH%->r@?1+KFN1bZo@*D((xV1BBt0m6T7KZ-iv(;(lZe2+jYrXeJA z?-fL&HI;9uEP(#0U6X#tWYm z93B`t`^UxjKcDfh`LkEhm*3V*4qWmyB@fcGt&8(dgt~`3B#VMms4QKVk!vn+xtq;I z(uK89Xvgf@xTV0W%Bx4lWX<7A=P!n<7i##ybk`Zb7fFBlo-Fa)%Om_&v3a;hvv#+% zsZv~LF=|@f-jz>C#z32us13*aQz`l~RmuopIRcHO># z8YON0-LKMn3hmyufG@=-ycXnZ@NB~hnVKZt`h`#m@i)xJFss;GKDv7A7e83;Lt9#% zUz?cuea*@nE7jUjQ@8(Gnb#)|5L)J+1v5V$TfgoyL|030QMh5xI|>yo6#y-IuDO8O z5_b<19CyVY{f=o(3Z+srz*;%XDQu6XZ-eq5MJT0`x}qHgt2g(@4)E za0HWspI$-_Nx*tOP}bhU;26dJC57Xq*xl5l={&*)a#Nn;bkhU(VIXi?F`BK$FlY+H zKwh1yS67Hd;qEgc)I52WdNRHBr8c5j#V|O+k0&)BDv>)kW^Y-CDpmdRL7xtJ^>7bv zwZAu%@^S@t zV*hXvyXQGlYVdD}?mA9x;qW8(_G!61Uw?a=F*?`XabG}c%y~p8h3ZdN{OMqGlqpN5 zKJSoEuJyxr*+ivP{sNxYfdSifUYlI_jzj{{xZ!+VI(6=K?NBns^7Up-UYS%g+iCm_ z3EQp!sTg{NLdV`*0F$&t-mu@eUAFRpL+tS33)i-it#nXjR^w7F}_aylT z4P}WgF(TSpI+xYCC`_NsEuCUS+v>&-Bu<9pw%hs#n-K3LmZh6wkWv7=|e z>z)&{TI9RUJ%uG#-`S>QU#@)QInVKCe=Cc}rk26K>z^EK`zrTTK{t;BF_C#}Z}y8( zlY-w6{f2atI1rH)41K2TMMA`xOZb$^&?kDTr{7Zr&RJ8*c;mjlA*(N${)K|+=!FMP z9UfBcP)%0dsbyCFG%{H4~PWqvHtp!uJ0hox1)r)Nj}Cr7f!In0KMjeG*; zM#2c=fTC=QR8yH|O=xzcjK$|y_cgguYvbS3LNCo0_{4pAe=WQit14>q5=i5|E6g2Q zIt>^g%P;SqD91$%YVm8z)<}PR<&P+Sc=`MMyP@a``Os&VoW-hd#@!qWa+&*fOwe!q z3@}PfjBQJ|?3)`3m$z%0nYnO*cBJ)G)j+ww>MF0O zTY58luD#zQWw*VMJ@celcYW*EBRik|1V_#L$+6D#+jQB`RO8bwi2kPbN)C$h2Rz@i z{8gqsqGenz5QvJ~$;(!c$-vy2UARW?8PN81SzU%EZZh6d<@_gYZEE%y$Xi^JSxE_U z58RCW5X!f6`RD9kn;fQ-Bt@=vl;*@x{uI9KFY)1RzD+szmIx%mz(aaW=Nx^tlku*` z*R?9a^E-yso1(B&7Uda1&JLM`zN^V<|FA9kFiEdsM@q)+9VP5aLlJlyUgP@GaSB3A zHvv9f;k^kfP-YF6D3;P1npT^995g7Gq^f|M1{dciK&l_8Z&*ChH{xykT{EKdq%=EZ zz9_ar*VJ2XkF(qK7FwH5=eTHS|1paq7w@qZrg3V~j>eb!IGrXT0Rq8+3Rlo}{GI*$+G)BEXjdFCWmfz%TJkNLXCT6QTvBMdVJo4bFF@{P$J{4Y9#YIBouj z`9V^w2|_8I5@-s(mad(;WC_k+b1#jyZ6$F;Zpm!DOLiVEg&*VYr}=Z5czK<1nk~g2 zbba-jxu?er?y7rdWvjNA8Bqt%S(=;j77gXDD@8JwHS)SkYd#(=s!Q42{8`#D`TlT*W z%u5{2tC3&3KL~51TOf*n=r@=?*67_<_KDkz=%%=Z&&DM`*e(!1Qz{&h@Davdb=zf> z&4VBB@Iwj(06Fn{b;vPRnBAG`wV>znw*$;qxOx{?82Q~)52xYpgsVry!9y1S$461% z9yV|nH|h)^$C`bFkG%-WOUG1(@gg1KF*oU zn~A&_65y9Tc8B?z0^gcR#wrcuibvGBFQFvf%k#av)f$N@w%(x zk(Ispr+1%x5uAH^wfRvk7Yy0*KLCzEalfE+*{tfcp`&?%gE}Ti8u^JIs|Pm*rOGat z03!3#V$|4n^u#83g$~tHkL+MdDucg7-nWiJnLMPsggr=`gi-cFL1(lHt1wW#1U_K_cJfVKq1uxq0tf3CC-aimZpbl>MEWmjeR^y*BR)Fp`%m_mQRN#Z z@3M{&iv7$N47NjLMXlC8hAN&y!#59|D0$-1Cs#q@%xh?s_c%!vNyBeE*!s|O5ueNg zPXRiPLDs*4oH;*Z&l5jKF6S|;9yfNBqoi{zKQ$|%hg zxr?xgzA!7&1X9CkX=5II`?PJPzC)uJhxiC+JGbj_ihd9H9}Ay~`0vx?ciCLGmFo?? z)`X)MZH}VtZ=S6?@yvsI1(Q`;f#Y8lX6lYMt6#J64NaPb#$DLwIs4E&87~l?K%cj+ zfkI?5CebhP9+WUMeg3{#L(M<|9g+fG$Bu&j!@dq6Z3o;qo5J+V%xs=>b1?A%s{?r9 zbi!Hf4jhLEWfgoNR9u_GfVk#gW2~R7i4@3xELPS-e(kd2!7CpuAy$bS6oCxhCkbF|R!Yd^(5*LIT&pS&&@wRO~hiEknUev64qY z1@=JXAi1pvKD5uO zR#nJGpKqm8tPlJyVTdV7E>~--^W>em8neMy@_~yT{;fzIgC8H4%d1Q63$H{H0el;P zNliX3fuFRc9eicS+4mo15$9;qKlx zUC73#JINA1Y(h3Zvi{nia`XIBWqcDNH_HXddAyI#yagcl*YS%2$7}3|4w?E(H{cun zm;Wt8{YE{%feJBi74UBuYLChB2~zBKTlLVzm1JF>FV|Pse!Jey^^I(~k}6$)Ek{Ykh4GWr~OVve`Zmm0j!|%#ka#~3n;7jQ_sI;KfdfR{9$=(yNKdr1(b7_Z+QFs zdG;N33OgPKJ{jf3W7tWIE*n9o!MTH#+RH}Av1eTboIFWO>{>ub}_< zr|j7SUIORa+xgCz;^5yw*`9eOryj8Q;^2!@=y%)`U zG2o{=ICn_QE8Bg~K>wW6nkV{l*2wS+aDlPaU#E##Chg!7J(erY+_D zrrQ3gak27#LEf|9vlr62zkKN5UDsYnIk#aA%Jp_M6P@-;+&fkCy1^V9*^BrFtZ=XW z8P8a=m*CB4eh}c1w*~M30Ojyd{^`DM{_}`2`~iJmUx4;~KOW9t&!Z}Sf?5C>;gn$-MsQZf`WRojy)@#h$Bb9rR0nmwkW5?l zWcV3klv7~6mL3)b$f|?r&giE8LOpm|O;@H5fAh~v!K^GGpZO!E^8>90JvK|uQp`cZ$rcAz{gXvTy3^fszmKLY*{9qWXww0r7Q3R$*VNi;Hkaq;i}-GApd03;8Yh^2@d z>jNb`2q?fhMcs)WBsi2b)=}pS+0ghE1~{%`DLLRlhqWLLJ#d9}vmnscfpm%t^eeg& zTPS5zZ~nxT7LgYt2D^Ecyw;1jnzxyb6=Js>>Wk(YR&3l9_@;lV)Zx;{Ph+8%f-gu& zo(F^91~B8Vc|*4A1J>K28|s9F$@KhLm)2__k-nCeX~PH+uw5Xd8k;G ze=7di_TPiP$IKWzF5Js+9P+K9<{GEn^pNg3?B(~tZsqgo{pYCruj_|BJNo8l#esCt z=Z6c?kw7OWYdxc=0watvLg&}GsrP^)7`~p6g_-4Z^2HIl#_Y2Vp6S2m_!(5Q4$=pt zo1VDxRq_ln|Iz?_r2o2j=>0wWZx?&hCOv(u+Sog%pF_(PJ%MAI{{7oxUYF=$;*gre zCPQEGj9vCwK1 z4Gaz;sLb_*%3B)gmWmwmK{lThDemKr0uSxYHo1Pzb4;(cJ$!>sQAfZYPPw4p(}LOF zAakuUClDG!bO5yR7<8Ym(_8q@CE(QGb-(Q9#)J(2eH) zo5Q}}f^525FMW*ql`i&~KxsB}^jpdK#BqAeM<4TB*B_^|=k2ZnoLiS~_7z6T&8_q_NAY)_jK+V;Lhb2Y=Ywo34QQ#{^xp)srfGdUt_whoF`toN zNig_Vq4K0S|Fugh%-&p6LNRmwrXqUqq6m4SH(N;dFe5%|2wl8Qlt>4TR zS>{weHLCfbIR;g-=^Zq4*WzdXCkE?yOn^ad4D)S$ZazOQk6hDJ+jI2``dnSTSgv2@ zBH_csy*KMHW*FragPBh)jMo{G<{L!ZsAv1ng&YUbU1~qbL*iHdDS!{l<+Uh8_x9cU z>nw zZ0UyW_*rua`B$QQcl+KKCa$jXdyU@f2$vVvc~ih6KH?&yd?W|m^UwnH*H?aMf!#G1 zA*>_qY3>WV*&}0b`<{aZM`QNLJY10mX@Z3pGV0U)el!B|Yas=Qr1LW*pID@U9Rv zUFS0%)_kMr4)|989n|mXhQ?E@FrAAAd$}LMNq>JTpRG^UP}K#>jzaRg%@)VX!e>k0 z{$4>%SDQA;1B5Q}lLN>5&ufmc_K({!x0E9pKH1MXIfJ?6!mjA}NdJYI+Z;LJPdR`F zbvIqOC*I<)c3ZOM!ZbRcfa~OkZiSBq{fTxILqo8AmG?GgAq<~PN*wflPt)Y9LfN??70C7B6xHfeoUA@rkuCfsB;6)O((7bwHeW?aEw z43dUd@$Qk}hr2h=tRp}WnuJR`Fcs*ig!$mz7cNn}9A88$TsNN9;l|DfAVS8g;r?%8> zBj*;KPQ0TV=15-AfjQksQLlrkCr!4?a($G7u7lpBg+F5c87sdzo`XJJejKOdJcG^n z4*j8j_I!`g2nTiwJ6{~O%Y5bA_@S-e>tF7c%NL8z70;HNS33WEajPzPr}NBjbUOQ; z_}r<@o#{S&xKmrs)4Aa;78*C4SNh!D7YHd-UX|43Axlc4lgH7x87WAQxFqNA-0b%? zO}UELKfTRfKtv1`UcGJ}o+1<|a(m=jp^6i7UXGF6uy-RNx-_+AyTNf~gn;_E!?hwN}#WqcdDuTk_3Y2=3mZ7R@2j9IPxXMPIuDGwFohNy}} z$F9IPT5+7Zfqet?eCy4%3QIHT2DQmm^*a9 z6Z63wtQY%fe(xVw!k^JWOWOKgwrJB&f*GN8fSr(Bh)-znoj&oE_RH*F-Nxfvq94Uy z;|u*5e=ucF&R9VKZ1orrBwb}lh%}E?m>?B394J#e(Xt=+GwvP}0U`iy@^S9$0}lZF z^S==vKLF5!st1^#Mv;S(M}Y`x@W7LAl$0VCCOR(6TNpg3Q-H>uFSY$oy^U6l|?UTn^%A|-)y5wA58W^@50z>e#CY-1BX~Z7;Q`s4fdssj?qFaW6LfPk{M!)Jl$axBJml>f6&yJ3?E=dNj`_Y{BCnZ{>*VL{o~{_2P&ns=sgYz%bCK~fMi^+cF-JGgo?+aYkXYEOkx3pNP8F1o zHs!w&Qo821h;Qc5_F)tCwT`*D%00%P!RR;HgYU7W_^>p=$Lj+;p)%u?B_Dk`pn5IT zr*9adhpWP$GH>e5kRrEMg#93V>=lcs{5Up%DW*ODWxxIHdN=k-;3hv(!J4m=7L`OL zwXy?9mK;Ut&t%OOklyv*`ZPLN%XLoa)B$Sz&H7q?joXYrB?TX}L$c<1gG3aUZpiIZ zghJKTf71QwI>}i(T6vDA%CjEZ7V#B{Z^YTIvMmf0u%Eh;nobv{c&yQ+X+x~YwDLPA zcxFr9jEhd#IQ~f9{YJh*w;zBp{!K2k7P>~xAJ))yl{e4l(6)U7V&p*P5@ju$(CGX} zO}^xYG_iv|Dt5nJX#DQ|&%eh~4gM=>FUBHKy1} zKAOJx4Edec_+X8Z&-3k$wZB748L@mnc+Ob!8@~(5uk8~;)1>=0GZ+VK>`;kyGecE8 zXhJ);g=h5%f5a{?zP7KjRdh_BtP{k8D4>t*5@e8|4M3Xm`tS2!%MFvPxhl_^4N{)< zC8;%@15oUwWQ;MQNVeyAMnU=`zp?M?s^|Djh1$9f`yo(WpQqc06~xP*(5KS>1GLv1 zG?nRHSj#MBGw}}8i4e}-P}jT$Y@c#0%=rXJB-%OO{fGW3;6vHc#rp!4SyqY~X;$+D zI+rvNZO{oD^iT1DE$XedUH@#F@9yq>qqx8QuFahGg3EGs^J=-ix=}heo_*ax_W1$> z1lTWm9Fo0vMJ(c^*0`B3P<93X%d3TFPap1YmrE5sK0Ge>_m58FpR22DUo^^W=OPR9i|)>OA--@^x?u6E z6h-d`=DFx1TwY(Yw-Tmv?X9EN|X@ zx4cvO_Tg4Oytgga(uaPzM?d{tUtceufA;xu^YWD+Za}BoyIZc=2#hx_{zOq!@cia@ zgkobZ^!bI7)BNywbKe{}{u$5^b^Z`$S?$;Px3;GYZ`zmea)Vvy@jn8X^5$89#|%W) z`fd3R&7-l;@3EKP@|5V5V0X{_4052UF}MA8l0e8TNKw_HD|F$6$s16DWti z!yr6?efz=x6WhywIqq1n0&T!Pubc+LVMu+i2Wdm3{HMs@kDoyf@3sF#zuoHBIgv*=SKG{2UcIL)?Yj(h%z4Kzp^bLo`*=2Z!|pUT2y4U9(%PxcU#ojz8P zln!cVtB*CZ4A-iS))(}(8#E8yj%L&RcYX~6&XjyiwS4TRNFNTVcDm;lvZ|cpqmx!W z$1uj#$#un97_@J#xRT^S#UoFDwocuq;5>`5bWpL5B>HH-=+k8avGiuizWNX^^n#FWa=GA7gLunZ}Ri45v8m;|k%~ zw2h)y$cWjVjH49U+mvPR^Mdf7nQIIu@`?muXb+e<#YP6XDk zYVY7SE1y+<=U3e_{t}$upzCPA)*q&`uS=0Wyyu`G>#8WaGH_b@>3o#F;uyrkrqJ`O z>&=U^<;Bahk6dx`q9?yyv`+n2x(nlHYFB{p&kl&TYz0ykAJNUIh574~WU6JqwM8uWt#Y|%ub zy;N?zIu>7lE8D!Ew679@Z540#75g@|gO_ON8(uLFzXyLyKXb$P0J;As8S^D5o`n$c#P35to zAY1KM$yF<)=@a|akPb8*Jb~(DJMGaiWDB;XgAMMB4iXjPFIeK|hez$N@u%$a zO@6hbACYJOiuO@^M8=u@3`5zjZj=r1uKpnH{haJTUB%p;#R`0;52tMkN>HwLwh_KO zL@veedGJQ1=-_YKqn8*m_b3BHj%YR6cSiuR=+URMcyQ)J(%E`8%Z_<9yrP> zCFEp@`pEYQFtns$rv@=PU^ru>pRj?i^pID`xKS^=n$AHVbIb=ZKjGl=vcrM4@F`yD(&OQz zoC?@^tQ`RYXq3SQtB~x}UOJH9jRX75j;j6}h`*ZJ2LLjjrFik@ES%_>6V_cR7plOqY1Se!({<&Rx$uRq))E502!aH0R1=m%oK< zd4lrL=Nq&1D>ucnUc}E_AjZE%Z^Yge$jw@?wIfK`iaI1tNIi7carpHVR{6M6tsHk= zOjZr>7ysC;IAl8LJ3(XY&p~Ug*TKxO>MDjFp3aNddD@a!yvak)C;o80=oKg5@+Wyt zur82@eD^0g@rgc7?^?G>SK<$O!M0W(kZg%cN55|0CyK3q*SG%3gOOJl_A9roM>dh9 zDZ%k4diYLq|%dirX_|)_`}Z#lR|mVjh(2t@?$;O z=bLUHi_Y?(qik0{*AR?Dg65qkiw-=GXa(4|GmGC&Plsa9?H{`91y0&zsF28_bgpQqDTE@*v&q zDW-g?o6&p+_W3<_{yW(>$C3FmW75+;Ui)#6eyAxAJSC81e)`;ZE#A`_+3)d=R*sYip+3wr; zGvm>3_Hn9?Kae<|V-1<)L*Mf_;}II>7|BqXpY>6mxE$R3Cj*}4Q!cDz4k{};uaErs z_2SUzh$h;cS4Daxh1OHlYbwW+>)!R>*$7HP1YMe7Ij8Zts@+yNBiW zoi_GdsSzDFxcOZ^d~)+5H>CY7d^YCiSK5T@9PCjmsrGpAc)VwC%6XYK{%Rj^(Sp|t z>Ud8eE%_gySKu4#Z(o16eE0g><@)M!xkT3sZrEQf_wVkOhxWj;&r9{IefqU_S@z2&t5DKcaO`<>l@Xd`5X1`-@RSlzI|(-@H%}1 zKS>BS^V0|S_xCtWdT}8_K6{A$mwwp&YqQL=Ae(JZ(ayg0j4f9~IAfm|1})f~|Jo)h z2^OmG415ENm@v0t&gip3{HJ*Mi+EI49VmOu3p9QAa}!ov$FG4NH#Bijex!H7EVE9p z0nGXG&t{G!_`Jd6m8j`Kj%lVNQr-SKD^U3uhNJP^sR}lHGw?C*)UZ0{@3<=}hK7@uU)j8Yityfe9{7EYCT6-esL!o$D z^>8p;=EBPBpKJ(S>frfDd0>{62EMK~v$1+B#n8Xd>9k4RTCKEwP{BiYYj4KGL^H0c_Cc7{7B&8_~~W8)C8}w9Uzjn-Yad z-Wv2N+3{nyqkL*N!^+R7Hp0fO=ul%dI`$Z1TqhlwX*9~Zjgb01Ci&J=p*Iho0{0!X z&Aa2-F4fQdX8PifP9@J{(ertxOT3Qroty(byS}l{?{Ktz`fqj@>ExmB0I>74jG4!b z^aVD-hjbGvwokhRJ*1ER)_tqa_q7k=;a}fyy?D@h=>2ki^SIo+dQzHK{J++@^fzkr zM)YshF5mk1Z~l8M;cM*FxowSCU)SKlDWdZ{#LC>rHANsD`1vW7`y3b^`r~-a{VeiN zJiIDC+9BxNStSbId#I1u7U}dA=5A+k{)nX{xuKX0;~F{)6z%j)X9FI-c`t@)&-pO^ zLB|F*(Z()DhJw#WB*WL8kwv-a>{k?&c}Qh1#Qwm3@uC@i9VoEsSWT=C)*{-1tT!rC z)Hd?xGUsV09;l(be~%xY?1SVl?}PZwezo8g@BGACUiHpCEHc@H+$+s{+W1j4UN6sU z{i$PJ(I`<}ZU+0ttm?y?Jsf%QW?rc+Tzqp`VUUT1&v-0p^UxwfDTKwvp)(9!Y=c}yj+3-tj zG%NDR;ih%z1Ff%1W3UmwdWq3E^No0HRslZrI1wZJ#WLhmTB(>H5ak>G$i+Ff4_*q~ z{Ad4&1;p4#pOJ?C{(-dixiFI~%6XvN?Um=j0|R-Yhwg)a5>EYweSX6Ovao>ur#~di zwks`PA2CO!#`h9^QZdG4|brn(hnMgxGnp0rS0GvlaPtdpO&Qp z$>Ca0`bIEqd`5VZ7tE8`*d}0;eIGxftM$|(9{q|gZV;5$$jHHJhI|)u3M?Us?Xx}bbtMg) zY2Z{8tP4g$_*S{)iYM|Y9_@Bu++cqzKIk&>Qa8mzUiH&iUVn*9`rYFfKhc-;F*?nn zt1k^E(0R2N+Sl=OW3Tr?toY@r*8C{dXLhN1VifF&>1J9k!ooS`K9% z+q-#qO8&}YwKsm5?Z?Kp8&F4}jK#6WC_dluw_o%yph;rWYt8=YkNw0(vMPFL3644 z5QcwhKyFlae@C}IHUZC(F`h#Yk8VGDX&TRGoah%n%R~QZ;hp8Ly)1}FL~r$>|GFNW zb23%-xxM_K@!$PdCq>kX@B@BP4pzmsP$i6`*56}_ki6HW58AL4N$WL(Cv{h!t!-DJ z&m=2aPjTqaG(K~4wAZiZ)Ak%9zZ2H7VUL2G8>Gzqj_+-=N}m3`2;+7XUsMUA8WtYf69AZnDmMN!~a7a7gv92-cY}re{25-ecmwU z*iPL8eIy@}%*7+{F}9DPU~TU;f0}>Cpq7;?Y$v0RThTw`C3TUN^X)z#pvp}(Ua;|l zH`p96bUcB)fGwMRwQtP%_;YQ)U6;zfX}qR?R$oLve$fxa(eP)pOP=+fvwPU`z-LNp z{Hh<#`oneEN290Zf+y{{Pyva3-)j}?gpQZpUhwh$k>gk8H}vSjXCIF$FQMHYoXP*2 zk~e_SZ+T1eEHE=U;0e|{HC5SFo^cz3g71Z=fl(8^3B&@`=)f;htJa+8AHSm+H|83 zZxFb9f3MB^{qpkV%jNRyg>Sy!-tzMUAC@=o-Ys8#_0{rcfA+KG>hsV2&G$FozFywF zdA)q`<&T%Em$})`gLpi{)qk z^sknevJFpO-Mm;{|Ms_jL%`>f_fqs33(Io-;?;6_dG0q8-163dtBd93&6V}#VS*Pg zUWoon$^YP=A;34FzhwjNAMeTsxH+?fadUNfv%Glqa=Cf=BQ2roL!B4Bf1lr0ee>qc z^4-_JU*5icz5MFezg|B3{N?hOfALS3o2wVgmp}Rwj}L!?!DHoNxs;FY@A9z2&Bcwj zb&t!#-L3lTZuw5O-`%mtPoKTGe5D~1-MkOaXbI>*##zOAC}kRNp08`({jc>u!&me! zE#C4!@bXQ&K8~?}_jr{4E?L)4__eQA%<`nq!Fs=4fA{FV1U~;Oe%1>-uI=rP15EZC z)jozPs6FX8L1_Al+h5cC3tCvy3-)KB^!5d8{N=Sz;EH|2{ZZ#H?YC;ze7aXwNS@CF z3(_Y(+VDl_~GFWwl| zAkR%Gs%u``cO?3{OkI~)Td>Y^-K>ww$8l}7%WBr~eSDL*<;y?eqj^Moo%eLrfovmV$Ej*Jqi<08 z;hU){P0&wanty#=4`h7wJX4zaB7L`V%HH9J-0(V>e?sdCR$Edw>c;q1nde~St@MHE z8M_lbnAahr{qH&0v2){~L><#>O0m-9$_~t{+-x?#oG@GtZAs@Re=l*he;My(M!$q41 zDyWa1(D2YQ&F|%}+&~Y{i@yJ-fyf?8(&d`R<@!?E@K!dFantxh`|g%^&V5Sw%NLTa z59H7{JcvS^2SqNl|Ms!7>?E-AFy@mN56!deWz;W4;|ELNtai3XWh_P9_oct3g8T0S zH`v`FYA;&sh}~z)ogZFToshY#IRdHLvCBr2{4$&P)5%CuTOP}3K0I)NeClXV?EX`D zdXT^2W8LKMS})8`{%4MSSl;_VaeRkNzq#-H%5TKezx{|meeD;U@tb)2{MJ|-4AJNbA?(C&}B**c<19M!y<2nu3hb+%;3{=t&_AswC4<+|e zYY@JZM=jg%;Xb};T}d0LqO|we(w&D9?5AM|eHwk7S9A62 zf4=zl|MS1{8vx+pr`F*MPZ_ZSX40zaTd3EQdV)ftVmyPzd0XL5VL33B1p6s0Q#3Tt ztH6{P=K|$iF3z(mhAQMtr%E1li6qnp6Dejen-s_JFHad1iq=6nUuX)?jEG7Dk}Uir zVT=tUU=!ZwKQySu6F5v9{$i8rWl%1HNDH^6<9Q%}B7Cg4>MgQtiofw~Vr&V8>SUbt zs1is_Mw$}a5}=ng>0;->{|h3fl>^7frgo+6VSywYQv^GAU5D`faQe;mLw+h;DUJ zB^39@x_R#N+wpS*oAI~xMO;^9@GZH~qxd<35IQm~A4nhiopwdnszU~!VV?-XxAWnb z1G}Jxt|xG!z)_3XK4_+pDc`itOQzSb&i6;jS4drG*43lz&>EWh=(Wm1Zm}{G4)~RK z`+^GEBa9!?97Ijq#>(qOlMLE)vQbTIr*ClcT)Gi9R^db`fL>bURtIr@)!knM|jH zq3gV^Z({6$Y4tqEcl%Cxe2yZ{uYkT(9`FfZQl@u~c5EOgV&7=;8CTJ77)2L~Tnrzr zzvy5+Hc(zf>*$&h`Bq)+t(tklr*t23o3CQ#lP{RHyrw@Uzmd9a2z#^>!=fX(^2kF2 z+pz^at&2>Y^s$bwj$!GiSg9`iSGyw5C*Js8z?N{vR)gI1kX}3=aXLL>tIju*>JAWPMv&Y_!UH+r(_w{R@ho$f+8j*5;`TT`~ zwH%3Fcjzxqa0oN+dC?m6rcp>6NYn$SPrQb;ju4@=I0357cx~Tf(CsbpG$0wW9AFmo zrqyYclk3e9bCNuQ(n35ObG%Vm@$^1t!pc3Ek9}ayL%V4Y&pKyGmVCCyz&2M5BERiV zq4}f#Aa=3RBp97N|FD6v%KW3Oh|Mm%tg4fu#V-_a{?iI5=KO5cN5On6@6S3`2#zds z1@^}CwPV00%OS0ZJ!V=K=GI{Cf9r=l>1sMF)NPwCu`gyQ%|4yq%U7d3sIC3i-2>nB zd0M`E{cid8^>@p=w{Okv<*OTS@^ev7p1b5X@VWTFEBfLGerMmW_gBo@-u#>MsN(fB z>O4MZ6aJXrw7h9j%hA$6~OJBY{;3xUHuz@bp z)!#m_$@;(+(IbZk6S#nOCA;2!_tp;*Twh%;uU@|J8qF*H@7~{;*VPTL(ZAMkfYGwt zT-~VbN_E`e=fZ*H$)1P1`{mX*>qYlS@16M~494|-oP92OqQ~V`E;3%-aI@`vdGYe) z^7$8E+Ap_~%Z>l{_qP)Jw0!fs-z;DM_SegAfBT!|{k!+eKl%BeFMst<|9bh+S6?kR zFM0FAg&#(E`{uoG{O6me>O+1aK=YZm8N7S*c6rSk559SA8T2m~Yy2|+j5pbqZ^$q< z=${(@_MMUh#vi4L0%xe~Pnz_&!VBaRlSj_Djw0SJU&#w880Vb!;iwc!p6i)|&Kk^N z&IusdG%3U^K4=SHm-1op4t@T7f=AP@sP?8s_u^D-^F|Re)efH;&=Od0D60lL{$1Me z{tsaUH-m?g_`}5Nf0wM7IqvIR+lH!ds#k=>-Fs`x=2r2r2R_8nr>PtJJcD88?6ebU zKg1k#LRSQ%ZKKiW&ewZ^8;Pngzg>X?VW6?-WCKvJ(<#|DJN>toCe? zIZcW3{r$@54bL5_Ki0Z!`yrpmjMBv9<_Z6bUEcGs^~-oTDqF}H zm{9BKN?z?P*vTVpsGxtSNM*+28oRNXmR`-Q>BqQDewDBDshz;REt~rCXV&WYWvttU zTn{Y2Hhp2Gqnu3B(Wci|rFH(6Z~QCI`cpda&GfA4I%n2t>E)%)d#|6C>zDUBhtxUf z@_M;{e61<`Mm!!xFIv&VM_Q^G?Zb<;gxCF}i`NqUOJYQc6dUkwzAfG3JqD15jt4lX zBlw0VF?Rq3V%CMOPkUq$m7QnYKWbhnC@Ti|@}iEoIrH@RTzA`+edYrMI={T@1!)xPy{LOo)RcX25?*>Bp#Mecbi;|gd zxJyTs;m<)Av2MW{!|&cRXE;|tEvqJ3_$9g*AIale?O@CwnmR@|WvU(f2K;@2U+fPz z!|CQ_tC`!9$sYQOlP?1~_&4K*M+)!>vgk{=n-x4%;cx$2CD{m}^|0pNLIf6~mho3;zzfar*%c*uYm=h!1)a9a2TFvM!*TD@3 zp*MMzN{={+&SMzBPajFSjrq_NzBQ83ks=S+;z_kTMc~56xnGM&d&N99LYg}M(8YrA zvleeRGKga>fZ=nSiib@XJW9=1kA#VN4f8zQGY&-dm1>4SnY@#c)-x48v&Ewx~IfbpCP-Db2 zcAN|O5Q^SGy3kG`-5S1LjNJhf&xR)+=sN6q4lS$cJD=biLVh(#VEJC1QpOD?NMkRk zb|^ez;iMX&Ka`*L_K(uu?0IZa$|q24!BZXm*wuh_LgOcXBVU#6J7TBZT7h&2m2d%Y zZdZuC)yDN@Zym%U&qPWze_-N7E!*|$!rJpMm>h==J3Wfg8eF|AdlvBTAX<~pZC#+K zd{}ht)7V{Y2_~m`_yq$80Ki6R0H#)b^ zJIuNfz&?g1eU|psX2?$5-*5mu9y$&M=>)Xe>zbfX${n1Y5Sl(=>wM3@^v#R|28Fas zb)_99KM@=yUN26Asvq*CgAC-PfYL^%n|iEQKF$hkc<}D)_{D17!OQ!Sp`vD-KWEDqs&M-aS~|1R>#-y2!0cg<4lP zKHdX@iib_Cm*mkypKPE8kBPqH0m{$`eaWW(R=$VGH~440w`G)Yu?y+gLEY>Fe8pdy z`%^JKRGgh;%3TMSS_>#5rjKkI%!oqoIz9j!K*t2?6XI7FR-j$dsha6N1PT-+2NGLcDrZR<=bQ-F6KXc?wzJ0x}q0b?-^{-I*4nBW@CmTQI zD`XyIqZdHSDlVU)h-Bwj(ChjsKl&8z(%AC`A--!HefZieI+-0@=-{S5e(R0? zgYxiT+(|eJUsfs z1+wT*{Bn1Dul>Kjo3h;8TrbzxH}VA+I4C(=-oL-~w_6M;#DfIqm)FaSSD!DRfA!Pl z{OZPS-$^dt(0~2)@0Ne~uYbGz?f?6KT|USkfAW)`EdT7U|9bh0zx*fSbL|`cZ(hHb z{U7|w|F8b+PjTULd-t}!iK4!G^WE~@n{SuzzWdhta+9CY!D}7)sRG{c!D}He&(%-L z`*$DZ$9^auAJ#~+&WUn8q_T1>36CB{YDcA;D{n%jEAML-VJ&1PZ09O~0CLd}nTZ3^ z1n7Is>7YDgkmNDYr0DnO+}ohfL`Tpx$J%2*`vK(~Cbgy+m7K~4UCU28e5#~B=6EPn2~2Ua`csS(KLdPZJIr_l|7kqA zJqwBz2*z^l4`@^{>QlDb@*IkG7}EI;eLe~jTcbF``JmG_M5uh9^q&G^d1wrSm=6YV z^`FBGa;hJNB&y8lyaES{-ak-6?D;@mbv{wuL-K5!>J+ax$dO*s;)Mup`;|c69M|F+70t z3Y@Q7S5=HTQ=udEA4%++d9c1B&rNIX@37|A(xI|4(T7C&fP9skgzLlHz=H-k#mh1O zMb~R=Q1+T1F#@;!(4Z{iAR)BvuXWyR5t{CEbjURl@-2jz(BrJ-CwAMx8a-9+aihvH z2C914vtrn7kS63@Ct%E@Bk9BT4fE+fDtkz;`v&UuDY`yiqSP2Vh#nbJ`)!zb5gb9^ zavIc8*=3|Dia(N0yhZ6HUeve0Q{F-K9zVf`4Vl4T9~w);cZ~U}c90LuXV}^?C^@E` zX;vGfpXxT~vZg1U5PI5CM@ZY=Q@Wk{&$gp824KcgvQEcTj#&pp`;K$CA|XN*(s8>Pt5`7W2V`i8qWD8qP7KtJD$QXOpZ5$ChL zFB`{R?5Hvx{FM~7mrs2ygBzRJqB{A;4?q>YWg<&OP~=8!`oZJdYXfQYLc3^zj-yYw zex|?d|KLX(0(p1E7BsDhs_-tB`d~N1gX>S)V{s#&<106k;}iB>A~R3zN4)o;Ty)C& z+t*TyZ(rxFX|P5Yt;6({ZBSlz!sn51sMD`yC$F)m4X=)8EImFwD6d%U{IdhrP5UAV zaes3!)Z>@E9{2xLCf)pyIPyemPuOPFc`qUm<8(~7&*7K110aDhZ9Qc_SwEyANBYai zLToPC_+R>{#5LiGoIGT}&2bk3>_oO?nk`NzK^i69P62zlN9?BJPdR#m9uuN-8^PmI z`ZHS)$Cxv<^!GWKNGpZeI5pTUr4)z&x5MT0MFu6X+m6QaAisD4xhKeLc^ zUAiJoW1DaO`+)$Jdo92#R4FJ6t7@-ig5QWBsP{z6+~Nyk%4r{IJ>>vs>6Jd0LLPmA zzMjM8kId9DR_t;ad(L$RrFjq^Uwf>n5?zA$7BzB9KKe4gsLPm-vUagPu*xzSYm}+6 z$BpJS*Cf2&ia?*gxcCqM?qB_lwEwv^m*EI7RO?PfNEJzyqM^M$(1A*XnxQEiFs=uH z1Lfwa6qABH?aU8iDl-7wo>;s|+YscXRd^UsGXi3;>sh?SFOCmAVNKHpo}_7nW8g`t z>NH3_mi1x%ERQ?^qR6+|=>yR-v1M%$n$EVE5&zEfjSv=rEV_w#<$qs*@L!0h(7-=* zXgbvc%w@<>HHETyG^fhN`ozbyQyzkLRTJZS+e7&&K(vP&;|k} zp2cie`Q=%l|E$NXET2x74^CpzZ2P;kbJ6P!)N}n!A54YRv z(DWycO>{mn)lbPtq&r%yRtKgvEC}@Ws)`dPB&AR_7=)zHX zh$H>C!EB${_yo@Ac$Mdpo*xxXW74x_zrH+xTltg9(MoH3uL-+;^DqmyIH6`~dIDH?ffVSy#~QCc>jA8k2>80~^;H*u}r?oc(H~ z-QgPt_H-+@bQMoo>NqJx2VQyQV#;d$Q0ozUQ`ngNdWe=;G>+1zBrT7kVeh=2tXTDK zguZpl7!lBg{{#A>rFc_catqwdhsS}tcc`SsE?$_1z z!pvW^Ws_k#HqfK0 zPid7fy)dyjfZm#F7$8%+UNE9 zqr4r%&H20gdzDeQET6sl%xb=V{bu?4>u;79FK(9Wn-|(6p7|#V9`wgWr(7VEzCQlN z*ALQG_pFF;z_tr^ewxnF9jf_Zm&8f5g4pl)K) z2b>yjKITEYJOAuvo`Z~SnVU7?i2RlX=K7u!jg6jrJ}a%)a_cA@$U|@HI<((={3-7? zgtdOvqx)^wf73eQXruTc;$L_u#y2jVK$TUe2EXakP0F6KG3O`Yh7L}Pa6NCy=Ro^| znB?$J*`s;w;yEzJ+MbFIfy~51M@4Q=x(#S?h1SwkV{EM;=Mr?-pc&yj>eT-&(5QNp zl1=7g6Puvy9r;IpZjTwG6ON$Ti+|1?Xy0uaGmOV=+pun)a~*wF$-$%I;g6z$CuL*3 zXg+OEu(e|a=UZzV;_!7Jm#ov*n}o@xjf^AxHZaree6nJz|4N50=$No_Zu-uj;u{>c z-y`iqvx2PmcUA0&59{a3o$ZvFAlZtWulYG$`VKo|&tCo>Yx~42`P{D4YuN!`>plCN z`WjPpFinfx5Y6o&o%?dCF1mbS>~YcQsLowKELSf@cXcP0uT}e6#N7NB4YV~@`#u;U zbH>Gj_(Nl|-cN8|jvmxmbkvgG_QBdXv})|VhxZ^|4xe1x<{UmZ8ii^K|H$C@)cg>p z5?)0vK|NjE64D~$iwwz%z`_x`UD>0$5u@~Mkn(5&3JTx-WVa8lu@7S*Sso^`Ti)L<6#I`OAU83 z>hg#4NK=T;B@3ed(zki5+*8lhMI(4kaZ}o&L+t4r+TXnDhdAEr?Oeag*z&$*f(c+x-R zAU^ar$2~u|p^W>$L)txt_dSkDql)!Vf7o{0FQIDShz$Lr*z(9qApNJ&p+>xc0)Eib z-)1OU{GudT+Rz`>zvQt4ngAYl)GCAGYMb>#3H&Pku!l1G2V32LXJ_>Qz=!4M_MZso zIE;3lKpNKkwwAk*4%2FiVp>(t#0{rdI1SuRC0-Et=h+tuX+i-P2B^cZ`Q%%a9Kf5_ ziYPEs7$jzgY<^Dz=R(KDE0xg-TH2zB(hL9(9(-h)0=&Tp=Xev@JI&kBkCw}~ZfJ$Pe%DMQ))f)2`Jx!gEZsG}b^b>by zQXiWIwMPa`JW!j4586Xp1@>JB+P9w8rfHr7wS;Ovc%guj?hlG8zXxNc+swf}R|B`< zqrx~4DGd#I`yP7gpd(*n#&tx+Z$eHCsV~0PnLr;FE&fByw&{wU{(gGbalVlgz_a3= z4g<4YBOGFkx5gx&F_tI4QMhIA0BOPFgYi}Ye`fwj|B3Edoa#SuTyJ8vJqk6h;~L87 zCo4eQ8w`@ zp3z4i!}gEC0u*N13bv($%^2goaiTq-FDE`5AB;L^kGJoq8#Cre)5axy2#oVe+J9j0 z7i1_>bJNl<)h00qK5U{u?K%t@&p|Zpi!ly~8B53eZ;dIdwxIb`8DkepA8%$TU!Ew| z`s8MzODj-z5^Asdl_YIEcjxtU=r;43^c}Dk3Uk{I-L`2IT-0+@87tBOXwJ+J-zc~S z@}uJEKjN;W?J?aOUDole?-Xhc&bQ5@rcV@W{JZr=CMDQKankga=?Xsa5%z0n@>#ci z<5nRQs~^mdPk8h16M2`2@w|3)x^?>)YP^p2$R>tAV?vxbW=k$zX3o~P0^^i#5?LOB za`3$T;fr$}r*N8|2v1-=9&5L%;!j~jErU9IMl-&#!n~uMg8PpOg812Arj*LWFoqL8 z=A^UwG!67$@+Yi5pAo;{Lx1zx{YVi1)_a1F8~%$te{0`#rFVyJimz#d$`jVHwDw)| z33r_V(r4BKIguI=C%?KD9wca?23^kiD%lFwXGYD34t_Pf$~O>n#_WvRlD@ z-|J_EtWQcBYdP}Qw;%vg9VS4oBD^yr=lUvD-$rihk(o%dQ-Bk%`lH{|HgmY!e;TH_ zH?P0-3d2CLlT&_#0Qt$`lZrnDUO(5lH-i1W@<%H;64Mb>k;eXBvk2{n{rn5)^Oa}H zCF@|FBZ${TdWfpTZtp217ul^x?KAd3;;e!*v)|1}^FoB(oSMiJ&nfL-nWU?Ih0IHp zn+dT<+YP3l1898_tW7w7Y9LO&q3kGBIns*b(m)8nl+A#!NJjec z?pE7X=8Up;56hc3@0V}B{%-lJzxrpg;d*)f`t|a=Z@%*|xfH)IKKnv;--y|nWFaHx zEsxUajekbq-SYDDFP1Mq|MBwjvzN=m{k9$xtBPkyqz_~Iwx zbL|nx+YWBO{dW1~fB7%V-~QLX6<>aGVOjp-FaBcr7ysg4EI;CR{^gsyJ02*IeDS#y z&xeP*<=bz*mCtw(2X8I#Z@XUHRA2Co4V)t$Ptx`A&KF=9 z1G!*|o8(U%WG_unp@$Qd1RtAv{87fdh5#_8(jNgW6GqvXqw(w1_<2loZWB&4_qdos z2o;Kt_kRkmdIo*`qusjZuAyxRKhY8^Pw^@nnntHr-)sQ=92#CPMcaXM3!h(1G|!^e zf9r=175ez6akjF8ui!aFy7ES^p%8Z<8jS}w6!tqQ>4B_qE;;iS#K-g`$A?h#`R11V z4nMDbyY?s1@^+#1+6bHD4h`sq?uU=bXH3{P^q->94nJ}MJiKez3`vzu-XBNww=d{F z5qL}}X?6HehU)^PEstUtRsWH+4{3CW+~_G<#=dP(nD+pqPob3z=wM0F1m_2BwY5E3 zlRbt5=t4_kEl=AM{nzsMWkZRsPoDW2Iq*kk(gc5)<|umpSuOdM8)?pL+jK5-zcNPY zCR_3*Ske$=Uou9$*PE=DqwlD1QA3ANBnYFvN|<9C?EGgRqlcNLxM6=&-gIQ^y7fU2 z9rdQ+W5R(BLX97CnGboP{Q}8Bzr`l6$z{jpnwism9SwaN^jX>- z0$v2@OyC#TWF)S&tFWb(15r8A$1+0(P`)tPRVb?jvDu7I)|Be^NH`be65Vaz8_5O8jl6~ z)y>3L_FRmV!o13TQ>gBCy-s_a^wMcdTyWER5zmbn$xNozb--IWU z%uZt}-}2`?R|OELk9=PV*)^@)&OTJ6hRiv&R5>?_dHsKM1AWu3&*cH_eS^>EoqS`P zn|Uhd3x2Zh!ATo`X?ebJEd4A?rFe%u(}VcNS;G{&x#4eL zXg)vi5;B$_*s0F<^GH6bjrJeDp)FZx-#6<0V1UA-Wkbz9G^Ks>yV8>3o9*>2e?K%V zB?wM4W%S2Q)kp0o;KNYx)=PZIBbz$>DG2T+nc%YuOgX5HC zU75E16q(e)TWRq(T1V(2xcF%dlSfByNgy&L?fKL3-)WV@`eSUREWg_`(l~Jn>T$8kXCbQ`uNk8us?m`!Hnb;^Cdqao)D$KsvWC?XrVtxw=090uR6e0@)}9M(!#HdkQmn$J!$_gq3SS40W0Nc z5mKNA;$gXyfBR{#=oWoR`k)6q*cGrTC5H(*0ugSW_8a<-!dguYqM!56F|neGi+n{j zvi~P|Q7mlg%0GNs?@HI-aZr9t&Ps1TJOlTS{gp7_3kXh(*fiD?zH6I=y#vyYy2hMd zk+NP)2h9P$Q~G9Sq->2-2j>Pv}qeafCkg(Tn)g z!XD_KSEl9w!aBF-Ngbi><-qJ>BV|yo(OnLs)f6jW@E4)pdh@cTSM`wH7vHw^2`^!JC3=W^qy{?x6~w!{diFwzV2hg zpk3SSRdJRN!k(QA2;B%9$uoYEC*(%H()M!)%0;I?{54~>qCcT=G9V3)8UyRMx;u`{ z8QllHR)W>S&nBn;KxB_K zwu2fpOG3BbhxATlq_8pZn!<|E`)^aOQfP^%(EZ!Pbmd1tztkIwee+*!G?GFKjd`S= zFovoExBXyU>{IACv?ENQ3-!U6W;)9e8Unyi=SlbSL%PyxCl8ZK-hTfaI<~IRv4!UPCwd-Wf`Pj&fTG)4< zs**=p3HhM&mrP5n4eZu%}4oyTU&Jt4pO9H4!} zxGuVyoYsNvK-Q_|6HI@U5Svtw;{{Wprn6gm{A516;8u>yKI^;YXIh8J-|Q7Lu4_DM z&*tC2SKeD+^)=u6=iBmZpl@E>ET6sle0g!rH}Wri)BE=QonM*%@bG|!=Ev(iy${s> zkJtCJpXX4-w^?kJnBU*uyO7@i_04nDe|UIY?iFzJXP| zhvogny^Va?K<1fGR zxAA$%03Y~z4i|1@8-Kj<;N`1V%a=d-@p2~`M(Nu(ua~cX`( zONHx8@wxDCH1ePTJkKuG8}b=$;fDX+-L3l-xUWRd&o#gYKW2O>p5~bs1cAPBYW)bH zVf}<&okq?V%bYhw%A5$#jJGbM`1rVOSUvv93zgd?Rd3c!eK8`3y z5d)`SF&)e&X>v1N6>dODhkWdDw9uH_`h3U<;wkuitN$S36HGKy=<}C8;qsqjWv4)$ zG5nL*?Ma2marZ^@nPFNI)40lwx1`5FE4ZGxLO#Gq7%N?sQ^2CfdLrK4FQHlC^Vlwc zey%(Qcs)y(3O=7FS!-t-(|=^j5+9gI56cv@GShG56l+~@A9|pQ4pW=B;qg*FfNUUb z+|De*H~nXVE$lDx@i=I@YHK!;F~j9GZps`|qmApW1L*516-u9=+f4F&fu2MWIaz6= z>*g|{^;GeOd`~CXw+BG3c(@8{3K!KET|!zxrK1Qi=52@jG3lzQsOl*nbu)Hlt%}{e zt0QYZl?HRZf+P8*i_FXGDCOu%!rA2EQS|@{pVvCwd@9D@&^x`h$&L3)8|jzkA6hFy zse^er&#?yoDOogW6HzIj0nN(8^}g|;yvCU^u7CEp5(zer@lL_Y$0u5YANowk+dX>j z7@?VI9gq4QdgCKqOg{X+m1P$BNO%TwyGH9T0Vxp3?@8CT@k>D4rf|E)B zKA%mygsva#oYgvkFUU9#9Xz?7N$J`T3A3IvRy~5mbK5!JY3Mo+oOb^@m(Xk8MAuly zNfk77Y)Khsn8m5bCbnta;CvKY`91&3tEc7i`f<5>akrdb@|*u}bmqd_uiojLkv*Vj z)t+}e?Hm^!eg^UYoO2izKIElQ$}98>D?dJrd)V(oD7a7QcbKWndFDBJilT@J^N}wg zBLp4Ys6u^u3{jn0JIaDd#s>^}?=^|l=f1%#m^JoUN6OGA{!TxzHxw7u*$1j;%tPaE z|6{DmAT_xJIIT8*WxmqL;wRvEfZo@ZWGg)3?KcEaRKA3kcChM)TNnfP@U!~p#<&O# z4~W2kSNlI{4-PNC4M6(RTU^g%uZT|cXY9o<_yHOwm~~`;n+J9#ZBcHhG7R;NP|^4TUIt$Cw2Kt0>lMRXdFu#hlQ^M1 zN4;P@_o#aRCVcad9`JWimJnkH^2*+>RPZ9964TMX2FyoMe7REOG)8v1>f!>)to*3k z2cVdweKYA_0UvM15qe_hIye&iR1dM}F^4u~;|Oloyry!(`Mo0Q!4#{?;_cfhsKR~h z`c6aGd8H>^*0QE)`U))@ojZLLX;0iTy8puOV+f7Mvt#njN3pA`Y|4AiK&!xCx;%cc zyPnl~M)%v=e?EcrhD2;M2CS0j-(kAPLx(Z;{04^^lv%N4L64mw55G~NwL#r0E?PN7vJ_8y8b|^LMD80aD+y!^JHXrMC2f#^fDW zpgrNp-zm)fnDY*$H{+8r#qmvYl}*2F|LiGOs-1bw?I~v*ugGz+FeSr-`d$0VXBt8v zsrbdk_=$=`y;cs#a{7}mQDyj`=2k_5>dCZTO{f1&XrPPg{p2g>cI}To{+V89c?dP% z<1N%DdnklSyi)l*t{u_Kg2I%?B`IpzWBkf1`*%wnVE%9D{##Pv;AAKH1 z`AmcD34< zX~0MsGV=7e8#KYFYiu5pXjK1?2Z1<8c9|U4^wgU+Y3n|a^!gwXdBr}CdR7u6F{;A7Y2KXBnyBk8{5D|wj}( z(g8UZqWv`w8=%YG``hL5_Hp_B@4jAs^PAr<|MFk{_sds5{c8E$?|!%Z`ZvE_zE;_v z{^?JbFMsqS*?7I&ivITgRzBfp5zd#p+dIE*pSK}={^PGCpPTXb%dNr#H`QebvM(;r zmLGlfqveY~|1*(XE$`mG7q2(VH^2Jj@~i*$i{;n9{)gqazy00v>gL7rmw)-Q<)8lb zKVN?GXMZOD(kJM9x4ixCjr9EO^7{9`mrtISmoIMI4_DVWHMi6kJY=E%#3xsh&ARyc zt5?g_&Gm9oW8tmj!tYi-e<$6!7{a$rfX{BCi1!?3{SS{^S+xmnje2FN%6Bm?D2%Xv{@km0K?;Ps4LnH1?hC zN#4QpII^BY$oV6l{YP1jf2r+K_yXnZTLs&qBy->A)J5yXx6|T+p9$`cDbIZK{F&vG z29>v*xt>52{ou2fXZ{At({=)+CLh?!@^Mdgq^p`Rb*bmGWz3W2}wsZ$jPXdtM2D2&tPNm=|_VPWJA+P?)TBXqI zr$jX8qEen$Rl)Luk|X!eiuTA{Qd$>^Q1o4VVtKOafiXHv8JQv z98y-bl}?Bs@yBlcD{y}z?UQf4!$axa^_Q+&X;RimKLGe7z_t4|=(fj0)-HUYoOw98 z!M<$ODQO=s%%jt7SK69HnDy`+cUq{i5xv#c*U#u1MWcTjthpiSn|~6Ro7mZ>_@`P<|$1v-p8uc}A2}77| zacQn`k?|K$Id_AdyMc1}xrvD5Z`PHDZ@??2UMl-MT)FVzc&dsV<~ajTJKMvJO!QF^ zuUgNTqb=v$83#}2k3K{L=-du_tu^+X}+#mQ@eswwh zqB8rOB7Q^P-pQDjc@Eiq&_Ysm*Id~oU7R%2C`MbbpggV z<=BOnZL{*y%R|XF!PF$thB2c``hY^Nmw0Z}7w}Zc2O3+C^mF=+EP8kh%NFzOpEBjI z?g^5&?b4BlHW7lNjt-V%nW_^V4+ZcA03Q?KufOp4 z@QaK8@IU_RzY*QfeKFXTw2BgBJdvD{0(iv}Ly;8oNtJSa0%;P5%G17h@JBfVBX#0S z)=C$>8;5=dO0$IV=Si)y)+jxtlnWJGV2Uh{GIu6L{E;C`1FEIJ+bVeUmo9D#1!elD zy%%*8dT?2WjYw(ypvk4NPrRPAsZ6|iQvk1AVZmnMG%Y7EqVs?-RA!mP1$azAKX*yN zbmS}84U}zACCnVNtNbutp#SRxaRdflMz>nfX5#EDj@SbMO~cDRlmAGwLeeVCaOt z`+2Q@Ql4>;Gm6!(Jv%mHM!PNl)z?96MDK0uZnxHxS@Rp8s?1%|b$=Lyr_p+KZ@{m^gpcxRbi32-`;c)QC)#(O z$UZ23d$I%2$6MX|#{9;l;qMgYH{*iT#82qnuFpdVd!#Hk&Kt}YE`D&{M@Wgs2eBv! z8$G){v^u_%n|sLE$Y_YOHzldVMhDNoloGc3schucp0v(|CvAl7{5y@6&)Pnd&>Ub* ztXdD}qT0%u(fp26wBAGiljHRC{9A1Y&2LA4;{1hmbefhE(1Swa*dHGr5bn_RJGzs0 z?QyETWNW}pyB&e_6l6r!MD%T)6FgqV#3k0|*IDUh2Pjs%bzLeCz)x+tNapfKzF~j6 zynFx74+8kNCslTNb>#~^*H@R`!*RT~=d_{bZ2?z`AMYQRci(=uTwb30Tj&ow1fV&{ zMx1Y?b1{-vPGY0;$P|;uyT|43UhQNO8*;v-{-plpCkOmMxMcCl{kL!4E?<55g?V17 z?fqkpnQX$Dnp{fD`3eDE^d}Dr#9)sj74g#nSLauL<^MnY;#bSR{x|=p<*Psc$@1&J z|K;+({=fdq^7`9vmw)!p{`vCdmtXoP>t28N-Etva9&U4yir?+OSNcak`bs)p`#1OB zz4^}N*miMs?u%WYeg4_<`A>hkynp-N4@bP?CkFoEm&`Sg5UnXm43hZ^{oYWxC;Mx z-;4X`8n07nxcwC!%$Q2Kh z9fv-t39F#u(fJLPvY)X3dlsKghl|HJ0W4+cHm~XEJ?k#SzG?~WT)#P~+5xgbyT(&% zA$ql+t&^}nHvv7Y!ayIhTldhMnK`(ZY+Dro@`Gt5cO(t3^Zwt~e&7bAQ(ZpClF2b+ zlCh>sJNyaRL~;}x0@{*i{VhH~G`?{%<`9cZ*LQnjoJ_w#yV9`Fox64m+RT-E+Li-U z0nbyxd=#|4j_eV%ZJvuJ1RXarU_6YQkBaszQx{sN^P&GbmW>u2AIH2#x=Z%z6b{zX zF~_D61P%V5*89`wR=e2i$Kbey*bdHP{7iBCz*D#=$9Ph_w?W{>Q*7)09})(9bHMwA zxvu%EQ3u(cH@Gz52^}Fa5@yV0qI3T#Kjs;s+t0kwr`rLJ7aYS@n(bWTx>}_TbZh>g z;%WWc%pd78pYxH(7^R>C$n}?l@lv4tnB1msmDlS@DLBE-*z!A#jZO#ik2?I9>z|F; zg@d-m^<{K)Zd(+|SxgX36zv28bF?dLt8ZiD~m^HXwYUv(a50`_oYRBb77smOev z23=r%p*kLNzqntn_~!qO(wDbl@mi<+---TC%OG!*;ykg=sc2aK_B8SsBUC--oQm@H z3x)IPIDneyAi^$7-0A3we*VpR6?3CpEqpG@@0R=6NxfpuC%^W z`MIsSacMn1_(m=dF@LC6=u=0V`=VAWR>hkjj>fd?2mEHJ1 zW3zq^kaa-3k%>-z$U%7}?F--JS0i-xn*zj-GCxdkaVdV$*$)JX2H#?DKU}~cLfbF) zq4VdzxcK+~)4%!~wf?zp0u+&($Y6txayfYnQbg!ZAme=6Gk`qclxEP2k?I)uS-9o{ z0nL2V99|aJ@eIjy4EoL$=hUnFs&Kcc4*vy@ey zBA&=$)6^KGQ`rVIZlJs9+`jk&j}5_w7tqz{SZz6gf8dQ4qQeiir|b7;mhYl zvIR6#T>W*n@Fb)fP4aV+A#C~-eP=*uAzOOvoz-;4FfFC~KA+0J=;#fZhJHFHSpY`qL zU6wo_$fK}{yKVR1j4)aGrtKP3ks~JeMp;M#U!WTV-e^wDd;2>G;!c#k18AU zA@LdOXY+gpdCHTMJ8_bZ<5*nlXK&VXlSh=vx_p>aa6YwtdJ?sWXF|bL_F2iR`kZ72 zoKSKeW7AV~1kJZ%k68t0`AAEj)u5m|X-4HKH#r|@E4eyJUO`*^&qdE+%yAl3TA|!g zy(Ankq(=SllqX!%6kjKV-I}hC5&N{uVzp7Ls{9m#0{`mo=2auibbX^G-?*lnejt|4 zuJ0hfIHWe89YD1cKYm{ep4pIf{T^MuZ@;#&Iwx_Wefe=krJv;`P_N+M@;56!6nW>e z0`*eTpBbmMjZ(N&3P#kbLYBJ_Nt`D^enK(+P z^^Ddh@t#}vU!8w`L|XG7>ljK>wm<(I#|8WppKSe7?cHWh?~x7t+IAnW{&U>LCvm*} zxi+Kh|KdZ;a?Vtbo4kYW8O;`~VkUPG!O#a?HUO@X+(3E#$0f^YP5W1QF6 z|HCi-VR^WFT0Z;YbKl5+(lL?WlE1#VUaoJh{ZIhk;z#D~?JY_z7gzko{0Fsv@0;V; z@$%*-!Zeq;XvBV)z=Hzb|EcWq{Bn8!_T6%O_np5{(>(9q-!Cs;yjWhmd}Voex9^v4 zzvcHyKP*4~>MO0gTpY`{?(QFWdk4oZ#vm8W9+zC~c<{x7d{c$j@$-9|O3O~(pz-_P z{&xAhzx{{hpZ$0LeEG?r{8ag`m;dGe{=Y1*zka>^oB#8_S$_1RFLYjfxxD%A_40Uk zulUV!EkE&k&O7n=;`7g4|L)!U{QPHsx%~8}f4+S7>ZSCT-gj@8H*df5&mO#g|DAMw zSibz?i{(c@{z|@+ukLPrbDwX#+^MgwdC=h1XUmPh?SHoX<~P4te({UHUw-+^zh55Y z7k)nA=IToN{6z9Tm*3Twmp9ARP2OO5`|h2`$<@sZca^UrFh20#W}GF-iWB^Q*!!<0 z+mbB56Z6pOYLPm#wCqL$-PR4+00B&oz>ow6j;6 zpY)0ozoQ#qTse&t*I}4%BJcVhrMiLGI1Uu1+(6DMAV~~anD}kpeaZRL4&gr;g&!_* z{-oF;(gWTis08*a1eY8y(lM34<>yH*?W#`D)2C~OvZ|5WopR+xM;X7&54pz9rVM<3 zfJ_99Ws743 zp{yOVdLB~{6)}#nI}xir2I?YhemG!WFbDx8*5OWP`639uL!5N+$(*4)H66gL+x(M2 zp~Cq5CDU%7u(J;XxK?rcagd+TEnDh9oVvam084w!!pC=qI$ z<50fLJ&)LEi}_$4BwyD{th}X6^{?A>78);n%X+-|g?$U+m_G-Yr#p~1ip_&OeQHy}_~=fM%}kk7J=!++rj^q_23Zb0GS?{fzEZa#)n4ExAYI6b2e9( zPs_^cq^xhADNJ-8sPp!<1JxfY%{Twg6-#&DNT4%mo&Q+{$?z#nltN4`tuv>j^G#jEb=jZOa#>Nqfh1`VxAo)GCB-v5!C&d~0ah7;jpgiI9zS&H>FswLW7N zA9$ahv1hBbIb#lGq`SwW{%{p*aBf7mpOKgTSkkyfj>f;&__Uvj34Yj{Kj}bYL_dvs zKtsWO^Gv=#2kU3Plq;5C+J)<4umIfug%jfrpM)HM@nNRn2=5&)@#%T=V+`>p0%)us zzM}0e_yoXz^R3@k*_+q|z?6<;Az<@w&4YOaKrxPiv3wI7@-(JmDGJaL0>G8BVv`++ zGK-`mgXTK@Ko6ZO0TLikPF^y_!WSMp{wA@W3OEFG71}uzqpNWNQgVU^5rYYO@q>(_ z#@89gO%^Bm{3+`ONIlrp=ShCNETD{UK!RwWNM7XW9EAGE#ed=WjR5F~aqeh16-LyX zBR?T)3WClp3#9)DwFnAfC5MQSY4bu~Bhzj*p9k2aA~h3RPDfv1kTmsEA=5Rs5*?`6 z2OX8Si_5qz(}Ch(`Ah)RwR-(wTLunZu!A2&J|aMiy`u3aCBOw|L|a3g>?J_l#ccyJ zVN`!9B%hHV1XFs@XGVGYRP+CcehRnZ6Aut+`_?!SFNUle#}&r<_xVM9jgojH9FNz~ zJs|OFoc!|!4ni#pTL?k#PU(*^!S_r_vJNi{#qP>8a0@Qm%)%5EVdULbN&keEm48U zcl4(1V*4+W15hyFMg5RB%$<`2tvxf2y3lqCGXQ)F>+yS_8^gfQZEqrI@yAO5==h5 z1zhXjJ${#W(gEM8tTwGXQrFJOXB_lX@q%9j%J|Rov8<2>n@_rsLYqA4A~bMaEf`So z-w;PRe~Ov!jUzq=5PPV3BfSJPyqd6X(;@soa){k;h}-;`^$P-jPS^g6AKafAe_02U z>Yq#X8R6?c;x6>i@REOx@xNkMIbIY-dJ!wX%=*LQxRy5j>Cn?fS@gxI6Trhw7yW^; zA&HSDu6+7n2qF^k+tp2{9W@~DdSAfn@*UzdeYBfWQC{Qz{4?;0jTQA3B8^1>(Z4Al z%BEqOp8Whc&p#vT;WB#HYu!Rj(s8W?NT+v`PF`@@-GO|iiMdSwZjayxuIq9T8#M2E zjix#k^VIpWj(t2Cet2?R?%#V*4)>4z8vLEBo8@5dxEvfD7GC#1nM}&@$&uH_yrQ4s z!&B$Wt1D%DeZw~r93I3s^4XNeHP9hCE)Jm&ugTY(DZ5WpcXoQDbG|dz9iNpNHdMu3C;{j&S`NqPLmz4FCppO%Z$Gv8RS zy}ealfBp6H>MO5%^B&drcAuAn!-KN>?5XTL@Y@!yT)CpQT`5P0;w9Vp^uQTU7RmOB z{K5sE-QB(N`MrDP-lzBMBMxP2YwLc4!HwIu%bh!~mmTq;GhDqQ-8MH&$D0f2_w=2U z<1@FL-^u{cpX@q*qRhYUaM6BlPkw1*o&ZsM*TFR4DR!51+Sq~T!o*R&;Rs<2bg(MT zy;-yau6}%;fu1m-Y%!RNev&?7!uPPVV*j}gJtA)MImf7cRG?VDHZka37A8A1 zfVayOSN;u&vZ+w<#_`J@|Ek_ik=kDI3hOy|;;h6=gfxt(;}aZ)fUQmsvFSWFktR}U z4N*4a?QhCV9k}G<6O91v5K#}Np#7>{^k4HKfJ-v*6sV5+8Uu+x%7+e7M8e6Iuh8hK zVy(XBy`HKl=t(jCZVaP#Yr#)$lAiVtainB7e-h&uRQ=cw!$EK;C(-q5s0djXA#mNd z+xk!CW;Nh7`Y*zp z7$_R*s~pQyero-o;Q2yHQwcl=MRFdPpX$A~aX%kI57Tm~&v^d)N2rB0?S>Jj8fSiV z{#p|mDJ+t!%CwQFo}T6v6F%Auy-_lM;f>=K!%;PZ~2K^O@VEXu%<4+aSa8vQ&sRLB&X2J$9~#>WAnnB z|EuddS6ev{W(M920ACTdc|6+B=hT#O0xcP_x5OqIXW-mu%PYj8VTX=7ltU-|`i5Ti z&s1YNK-la+D&`t*Z1SCzi?i5#1kam*Yz!htYlPgigSE62PmE%wV{VUjj2i&tLCIvJ zF=T^VA2`J~@>4wNoqZGh+J9?%t7r;RZ{(7cI{r3#yrP^Z^V!^#?mVF{Ja6hNk%1d< zy=e&@wrX9|a3suiUay>ZJw$N*nfPJ5(#WAKd=uIbyZMz{;oL!Xx%t@iOdDi3>z{T! za2>rX&iTE^NxSpMI*zy!`-Tww&3XV?Zcls#oqVgm73dXHj|}hcxjoE4|I{KY>Rn8% zav*$L`v-4eBz7O5w#tINlDy$8*K=OC(%m@bctcr!G!K)Qw0&|N`r5W$Z|V=utf|8H z(Jmx-J6+1>QwO16w6kyApzYu#rQF*^V}Hq4zM*8Q4~1O-ev3AbHbg-*F)w7QB>9pC z#h-?BI+$?7i1lzE?`5&8f;m^PN&-x+V=?^CT?v4b@%y3l??@eK-Y z5At%Z-=ZPBiQ5EC;w=PDF4I?~o9%?R3eiRX^l|vx4wWfc^+aWeNyl3Rh#i)e?ydae zfAp>2SLb*$5C9NDfVt#>r&dR#vS_DZ<&*eYqXrCg%*0RuBLVQhfS+WzchexqkOnp= zXdUR}CW9~d${;h1fgA~RTz5;A(;=<12^E1M;K%%%bUla4Z^J%Hh_x>b0^!~&fsa8DWt3qF8P4AYNOP|Lt| zTE1`*%4Lid%D6+&nG{p(11LCv63D23(Tgi>(gQR#Gcdt@C4qYAJgjOKJBMJQuBegu z5=NjR>wpaK80d4rENYn1v(@0|8kCJvbFaR|ARubuj!5_nVQP`UA8`xRMIVYlJ)uuW zm=9S_48vLZ4z4C$@j)cFWSRxM*s(2LmYmO18MRf0{yV^bf8Q++swg^fcfp$bgy9y^U7xC zy$t53U!@0qOiwg2_9e~oFOF+eK z^mRV0^H)`vf*OQ_Y~zyGX*KQ4G0Qd%^9LK|U_{Unz%s}{3lJxt3<-Vu1<;c&B5@tE z3CcLnPcJ+AYK+%;Q0WYnkLl~VhXSNL!QqRtE=rl;5&1M!eo_B{2Dhnv0HzwEypqS=o;a|@-0Ms z0&Dn7y8Odm`~p!MJ_WKLX8;tZEe7SQJxC8b&_^{DCr=?#UIgK&oNhbttbubPmt~zI z2kJAeK%`hp9Ook)vdI|T@%%7=WVBepN>M)wrXc`*oZ~g3S-;f)eZ!%QBDFrpEQyTc zXtpE2fqrS2>KF2Co>MB!@(902pOF@1sxA$SK{OCJBp)69^b44Y0{m)QR^+94grc^; zN~1VxiTSDL=;os^^n->J)h$hC7NWfBBqhA&VNAh_v6eb1&ACC8fgk)LAIHnY_4zZA zvoNYQ7^*Dm3KHpl{$brF@0x7zRzl+|-0)9rZEdD3Q_^za|j zv-D1(iTocGl#IK7;3`elW%}`;rb2o5w_Z+IZT=a8b1jcLe7|g5E>7`u&}O-^d9AE% zEc=Q3pZ?^7vRXFE^&8j9>grnA-#;wt8=KxpKRw~XVJYk0{NE^(lM`PQ;x&fH#|LF{ zrh55*ZC%Gkt?$^d=c38-`l^~+{&rs|-iV%@9BIgE!OzC_6O0UzSqw>@D-z#sv`D(ecbFCcg?b?fH z(tCe@U;S)?IpylLtL4V+TV;*67M${#fa9|F^nQ8r@bmKV2OrqJjqR<{R&nyRS&!E4S1}tA0bn<~DCrI4?&>((RNR zIk?~$7kcRbe&9j=qc{3S00t0!jPcAk;M~+>#RQ&^zzsa5)oIDcU!XYUb-e!E-&~>! z{|+D)Q5S!VXHX*0Pcj70tTQ-9PZBgbLEv)r3)j^U3fm|fBsYP!)A)5qaKTESIW zHv*00c%ViB-bP8h5aKh3)<%5*A`Uxic^rJLzpt%fztP~^2SBmO$Qwz&q~YO;D22Bs zJ$sZS!NtR|E{00d0mjh3d|@17=;JsT)q`9haEKK=UjW0ni~0_Sv~dzLmwNKkxBr06 z98NW5*2fV1fnHXC@G4Co+PG^#MK6nZUZ}nz+42N7ATv*r@i(n|K;~=Qa59#@{7K1Z zpUCq+WdQXRFCfclTSD~#>d;T8KzZsc8z7%)S18bai{M4{l3T0eCxQA}(bGvm+p6;y z&RN4Mp-@I3O~4Q2VNui5kUZv+Pr|q#gDVOL|iXR&Hw5dsEBDERIQ}m4semoCgoOtcwCca&8J}ff{H1;{z;@F5<-DcxZDtxmf7|E0 zGY&IyF9`z;x)4{NRe@1QoT26|4;by#M65mO590Em+ryARGyjGVNbgmo^o~lmF z&vYJHR&=hrexh(xR{7@t#;MA9>(hY_V@H<5>+@F_fAUjofChiB)$;sL{ip?%pR-}B zb4Sh#6F>A)>v(+9ejF#mAB059&3X4@Ra5S7(5no5&gGGSed@@ffY4WHoFeU;8x*sN z9j{-<7pw)4U484ok0>R}Z(TIG@|+V_d>6U9U<>w<3A^G6YBnQz1;6lA#vgM~yuMt4 zjZu}c$_?$6$?sIPPLJ&`IczXi8t4zh-G)?*5mQ@{gC@+n}9)ew_ zJU*1}Y}SYUSOgv(!Y_7&b#^8H>`zk0IAbkFKeC&dgR8%V?;8a)ml(JDiZb4~#yJ5^ z1WQP$4-x8z;_GHnKXE<%sN%&>l`q`rb1Eg4skkCv$?x-E)p^s~yU&y=?N}+%dlMV> z63!oc1NNEq4L`>9flwpm5TgU}M0xvE`Phg?2XqNKWO^?c{`#|c`nQT0cPoC`%j&4d{YFp_7!}U*Sg(n{IjS-z5{9Li#$Yd zer_5yEUg6wb<8jJqi~Sxd5TW}{J}r^&hM-GO%#j9s!Y3I45glsr3iONt|#!8Lgz-T z87VJe2sP+r#DmZD3MlVL_%aY>Z3wkVL*r5hKO3nArkFZTZZJkA3_4##!U3tHk;tpS z*(hima10vsr_QkgI#HfHum^l%@KB4y5Q$%XwF8|48hWBT4i}ix9!|2vN0YZbU_Wp$ zfQ~Lg%;cezXwU^!4DcWiT?(Coq0A|ipM&QGR_bVgpQ8b6cLlzMK;587jYjz~aIjGo z8pF^RdF@aY*1;X5{+SO{MI!{52EHAygdWe3btC@J3ql|A=?U7VgNio`W4<*`zV=hE zGZVPV#^p1R{3?7|`GV}fk$EwFjE|t13tvKhO!^A||1OfzL&qcRi2gsw31H%_sTLaz zKZBdaGyS1LD^I>PzSQ2!Ao--dA!pX~TA=EA_6_o^?q2gi(H> zoryP&mFie{Q8Hy;06zAiXmCi4LmtIh6;u9Td%%b;kGnJa&gvf*)&>sEk1iPF$MGg{ z9b1vat8tU70pSY&bdZE&28lO{Yh=I4pp1i_eD>vm&OM_UJ(4ry&xC=#kA0paUOq=w zo?sU#Z&p>#;$!8wd_nuA*oL2Roj+&l$1tsT#`cDjqk5^a&=pgVbkVLoQMR#z@Tkew zV9^sW5;8|<(g%!UBHCDV{uBTm&-n-mgZn%BS`YtOnAKxG40x95^ZH;otwY6=o>3mz zU-|sAF7OTTRl`|k6)?!}0bO#=rlBeGsOaJzG7n`?tOCd84ecDXc|kJL1G4cX(@)2*#%BPo?Cg{~cV8*jZ{94E z1JpH0YITu3CPEX6m)<)Uc*>d^O(Sbb4 zUH?nvL^Sk^_0=^$fzQPb3IP8?Tkm z?|)JL^}qU;<<(c;Du4U${LS+G$!GNuYdik<@MLzEO%ddqrCFUD`oTMHSy=E4%x0wb8>tly=BYpbH7ah-MOKIw!on) zEAa|s-r_(+fBs}SXh*Tq~RRBVl_ zzp|F=Ujb2WU~VC%s`55Cs`B}T);76d1Sv}9?9OE*MFQ^o5>1hI@OXrLczd=u&10#VvIwf9!&o3dE zVh>&vgzRY;m-*uQJh07!Uh|;}2A4K#e#-Gz+ab%Rcmkx8ChHUywVzPjCkDq3U{=X% zRF`^Wb%Vqk#m$a{8Nv#e(JQ7Xt7l+LFW_8eYGUk(E8kibeP|w3JcH6xc*^wp^|8uh zvt@V#z%-2W$#@id!+stlo;bzebI7J_$F#9wGyS3J;p==r;!o?B=XbH`J;(vi!=@iU zh>-7Rx+pNy9}*IAh+|%w#z!{ggZxWiM&4*Xst~fAs&V$45kCB?VdxNJKJo$6u{X5l zDw}2Dzl5}JfHCd3{l-x7v)!BhqH|HS<1D0H)v1rfc|G~qjD|!QVypAr)w8m?&TIcq z6oj{Yq;onp{SSqIBnrh#+{DYdo2bl(@u1=+JIXTfq_dxn*7>wn0U!vlHSG;L$m!dh zf2W_!Q~#_Ld@NNt8-nqwap?li ze)SV{Hp4jfJ91POHJgtIQn z@~|Z6Vnbg6zEXfZdgEW(JZ`*DIpbJ-;Nl<9wN4CqS}Vt{tt(O4-;T#t0__LB{_Io9 z3VjA2!P&35$4Ao9?Sg#MqEF})8|@(#o~R#b@vA(6c;!8^v6m{!gBJg?v5%s;Z{tmO zuSdbI{ICmt>a!jjO0)i_zmP^gJkgB}{Hcw1TnerI4L@+%xi6N4 z3Am)C4>tgupUY-bFH2sO#Z3X|PP++*dkf?n_K#^1^yDoR?o(v&9}yDq3o_ve@5)2O z=7%pPr;IglU?iE7b8Z0m?(YlDHvmKbVS_UB-{)J>N~Iq{m?e(;d~9gV-UW| zxGa3=z^368G67KRfAZ1J4Vn&B{}uq`D6&mSW5*~*poMG5aiF$w^DTtbvBH;@?}A7Y zUq%l=uYc<|MKAag@=ZT(zYMPL_$~sER}vMsZs;?9J=SV)89sJC;=qenKn5tbZU84~ z=#c@K);}QHZzeQ8mjU>p3nAtn(#V8I1|Z^4H{}ns4)R^t*wc3OpiJrC(Yi8iT_E3R z9lh}>M{lJqUom5?2h+?0=Fg6w{Q*>cMj;OT3Pk=H%SnF3oejvhe#$moh%3LT(DRfp zH5NJ1nf7}^KmHXr-kogASLC{$UU}wUGp0@ej^|@^g{}tMtz;typOF0rKv(1fFP0zu zj5(k26Kxiumh)HH#^FITa*Z1}8OOf?!gq0FALZs`UePZJ%ys+`Yy3fD-GCZnKXahd zItp7EY^t)%8eHV`!4(QG33a{*5`L=RdjA`U$t!B?Da-aJtr$8((p4~Bmi4iY;qqxa z#`+kS{5nYbRuA~}PqgS-!=T(0_^$9q*Fn=a>+oU57Fq+)#V*#*dCs3zU!Q*=OKr!w zgY}JeykNo1IC4?MAtwS0Og28^CWp$e0)21ToTt7*_On)0$G`nyK@kjHjLy(l7U>$h z{Yh4OfzJ~hT7Svmd?nj3Dl39;b>m}c{0(c% z25LURuJuF~ z=tl170xymo@|m;K^k^6Pit9BPkT{CfezZC3JjYt$T2x&lnrFiwz;kvrho|kgwzdmT zj4!XOiPJ^7e&beI-&`#peR!|@@K4__&mTQ1w{G1j+dEgh3BS6$X45mZ+qhUbnOv0P<9+eo@OSum;)Qi4`mw>U)xH=02fMrFVDC_xY7XUX0kVVF-0R3v zw(D!_4-OB@(eY8)+1c?;3%*b${v3Z#xhTn72kamD0QvI6**Q-uap6k(sfyQiuFAgM zr%%hLpM2;y4cvY8)pGCi`{m#K;lD56`|kJ4Z~yjhmc9L>viJOP`QrX3WpjPKeDmAi z^rsMxPYz`FQaL(2kesD*_2xCvzh2h2cgnNJe5T;2oCxQ^=O6n9gURH?I$gVZrQE)I zTmHLMwl>zw{@!!H_W$_!uxxI`T9@znU%PtUe%d?O^M?PuPd+Z6eEe~F`0&2;*%j_a zSzBK#SFT(wufO?~^5$D_msj6@qpZnC%UkPi=A+%?@?8G>;=!ZxSUU0fhQq@n`Fy2p zU)ff>-YD0u-7HtHUoR)e6Y)KE7l~^en5F)tc4bb;E^DBltH6DdF6&A)p37B0p+bx& z5EOX~IMJdR(3p$$OwG6cpT`9Pd;q0EY`myT*_F=6qnb8An|TPEpe`mPee#44(9+j&rlE~B1o#cfM$)~6e9o=TEBzFmApMoWkT_W({hPZ5#3(eekdTO6cj_mZ=9esjj0p zc)p@M0wZ$ZFD`Z6OFS1Wujh*#fY^mK0O_kPT-5PLw#lwQdK>~Vz{zZ{2wWdp2F~_n`gO0w^ZRHr9svpgYG7<^oW1i0Iazh(dK2!L! z(60Y9{2I^npOM_uryoD0jjPBp^KearVjL>D=FI{8BKH+!>77vYxJfM0pY*c#H6gQ72Rf`r_X`iYg-30fc6Z}K^9k8DxcJ~kNLqKa9ku+eqgW)Mnf?@7M7&POYktp_8uW zQ`?_%tD+M8$XH^n?c;+;=tpek$8l7?6kk!89XA(L&p6Lb^Z1@?;i%6gfKzUYpiDT_ zbEAWAwh?~X>v62VGC&H%x88jM*xq&vP77%e`^ve?1Ea7@UHyP`rq zFe6WK)QTX1vMCxl1$nij%z!(4@(uvM1?$K}TXj;Hhsy;ZyiV}omR||r#*-TZeuB$d zKHFme*Gs=%-XgFZLS@mKu!pXlUl!n(A@d1+`d5Z8tD7eFX~1^1J$)nl$DcFgxLMdf zO_#o&{Fm1c5!uFzp!In% zm=GtKYO-!Z=;!-C4@jAR?!XDW`r$l#{w&&Jk)NjR>yAAQ{^Rt^>2Wc#*X^Bp#Jot! z2mJU(s?Hvj*B4ru$S_W$#T zzNzw?Z2#X1)7@&mpYmze;jIam1ANCJ;XiI`43Os*8U8FF`MeC@9_KRLFAFcjH|uiE z6nNbr?~hpw{yr?6DHfFn^tv>e-Eqh3#V-D3AlF2!4HGbha>k!8a48*m>A-reu9^6* zC9mDz-`~|@{G{w29+l(M)3Um`R^Iu_*UEqXd;dxK8^8Nk%CQ#a|K$JvPs_jjr~jh7 z`zP<0`wt(K-N(<%BE z0UO>d{Mn?QOycQwJ}Gb{TE1(~tM;M2ac#StpYk;OW50r*H6(8k;H!Yhg)dL9PsA6U zxPU|1xn!T69{Yj^PcLC_>Tr0-XAPFh^XJc%UMknGT`MbBuV~4AZl64S@U*<~#v78q zA#vw^9Y1=lZLF6&cW;;5ckZYuSIYCJ&n$ztCOm)es66@NemRysJY~PVy~h^Y+)vH^2Mc^43@0DNEAl;giSZ;B-=+Kiez!pFAk{A4~qzUCVv* z^*72pZ@pc<^{sD}Z+z$5<<)n-TCQBX;r@Zm8(W*@)z@FKUD=1Yv4b}87@^<$f6~9vkyce{+0{!;4`ogFNFq1w?)pFQKpR@iM*~kd@(Oz?Yo<|1h@?d}+x3 z!Td?Mw7*nH{Fj9~U%pJA>~S0?elPD59&@4c?fLhBKQko!nfUl60hPYYIGBdCX&8gK z9*?*?xcPV-bz@mm@{Jep^6U93KacwvUTQw*@qam(*RJ!R$9p-7KOaE0!`Qv)gN1(Y z&w>LtdHwS6%lQ76mq?%L!;4_ixOmz2?!oe2NY)?!0DKZ<*7Hlqe35?Rx-V-B=;@dA zU+UfaGwjX!I{#4y&b;x~@-ONO>61a{Mfx|HHS}eFUXbTwjFUdi7M5MXrXrg_0;|C* z{nyUR%KEv^Y0q_zdR~@SkIT~PZdqE{7eh85kA%p^zj#2zIzYUWKQS!5H^b)S#YAPC z=jptevJ1&5r^4o?<>d*E-=sbi(6_Y5CV{5ZMEb*C^tdw#UXSK9Yti2haLQR^s@) zTMA)c>e=^q_l0_H5>ZmN!(QFz!l>SA1~QZ&}zk`yddIK;x|W|f3`FSx<&Ex#RW9cPel{j|RP$wsp` zhsn#|zTv`bu{nIrXVd6Y_Pcmu^QoWwrvF06Q}5*A%}wjkHaxuJJy*r)``U%3Ag>+d z-j1v_4V%=IM+mt?%i~cMb-tV@6X!{D5 zmt&4GroiLXF1$A5_VC*k@?1XLLz?WTU z=%PPJC)vujGvm?z^VpIe>Rxogl|T5ec>}=3n~)lqB&$6;XejWyU2NasZ)buHJ~cf$r%eH_=GXQB z#FbY^A3#i}0`j-g)jLyZ>lf0uJ2nYvKgrOWvKKR?4>O8#JVjJVN9KUXg>-}FVadDrehpKeg~4Q2x0udi(CMVxhA+}iV3(BFLW3CiS^ zI9Zl_NJmKz7o4|J$3Li-%8z*_0{*Jwr{aenl@pD-ndk#A@VOGe_tirmXfCn8jbA`< z(lwd15$S}Qu|VplDrcd|OT5&l=FME8`kQsl)H-5D|LhM5sek2XHt--p`FNi0_388T zfO_bKL()*?XS+2&B#ityGk*?s)^TqCOQ4Nqe`^!PoHH6t3tA^)Z3j5k4{=QjV*Diz zZ1M>a`V!3OH)biI_qDl7X`YneqLy`)D(9!dCM~vnFMexhyIi|=wY>T2D`jnGqrCgx zPs+dh@Bc%2_}PQ<_8ae%tJkjiVjOZdH`e?e{lkN!a(uk!r$J7RdF_5&oY}durLtwe zUY`wfo+>YlWvGj*m~v>Ey^y)L*;4UGBc| zx~7iva(ZxDo*0Mpgj8G)AH=eWBFpOT;18RO&i|w5tv)h6 zA?qXWi&~ARl5vqHbW|}@Vfdu_$My{QIA@24srVVii}cTUG+)6p$zA@d%%8xoB3}vF z7n0;hVz-~{r`?QK>BoBeycoV!ev=Enpy4i{&)D;`k{KEjID|$tADZ06OMhgZxC<`! zPmO;y{dod4kFoq^UF3$mlkk{eBxz@{%n2VPn=*sK}UT}`}&B0br?-y#$DXXNh?7dKQ$h=UybLk zOOaLa6TJSTu7>7^5IUv*2y%W1dm@idX7iag$Ic#Py+D7ea{dG#UctfWjM>2+>Qt8U zvyHMpyEdWngM6m_Du1ONI6l-lb1t<07F2^ina96rumVy1IA*?eisOT!@|*DoVN^!E zp#6rj%nyKHqK@Cxw?f*3|LtVxD{eowTtwYGddlUycFgO2EC8qJko2|PB#$`rZT>MR zrf5bdaLEnjW8D`*vnlaXpPKKEf1wXR93gb33|Sfr-P|x82Y!SE(bV>fb^sARK;mY8 zfI$x+^i3Jab6&-nALbGVDL zv32fm{x7ZU%5_i6+0veHkHsFE6CE~1uQBZ>n_;r7^JW#2+=hP#o(w zkhQun584l#!Vhelk>>mo2z*%)Vg*XGW2lUuPWLUm0DPbiIl-5}6YAk3Ee9-{u)0cH zqXq59Ilt0E@HV~>P|;5;E5v$0Jf$zR!9-MCm#aEa1RiS&ZyKr`F5LLb->GzFTgx{D zoC#U$er{amn1ao*2PavR6Ricf`S`^4o!ftk!#v^1Z!mHOKBA$H;}CgOQOi@(s!@}K z|02jX>{EY;H5)c)eMKJG_E-4bZ4d~y)n)Lnyy6-SevLi_iqldio>*6j=UtpE4An)G z%BNJ(C*f7k-{MV>q?DG46v_+Piz-BL;J}VVS$IwsK#RdBbTSa!ByWePX#~ykAm_Y>kq%x zPXE?Hx&`a#|L6b>{fsi|fwOb93u6u0DsxKwmSq$h@L{Jk(^`-lYp}^Z*OtE8{qA}RPE+d7IA#0Ub;XF zKJx54+`|5Cv=6xmY5;Im?oTfGkDCN!Cvp@Jzr4IAy&!=ffjZG5sLF_W*&WY%QJ%M!F$s;0N7b@j>dvv4D3`O6oauU)kmPoywV;ap(KqT zLqore7#l;L0Sp2^&~k@EmL}$BtgT@E7^sQ~ zOq8v>+N)d*QOAK1>3IMST&0OD0pE~bi#k__{?39b{Vk6I?8gt#Q4fFTK@8oPSv;^* z=84;KD~b}?8Dd~j0VJ)GMy_@nhd5|MA;lIhUNzQF;3edVSMq6b;tjEN;uUs`T(ieCW5*g{6#uIBC%#H- zUz88F5+@JkB+l(mz`UvP2c$kdJv0R9hB#na|EWcSe`oy{x@CT3{}{UVn+NX4gEC4p z{^#i#3C4Gnr`$ol`Fu(L$DGi?FWr6vUT@FkF!t}PT>;6zLe;C+Km0QG-#85Zb1eGl z`V5t+^-^A9DC>jEE1yBXuqWz)%lw!6bMQv@X+ANQvkbn8KTB_D^y7Ge{MOzHS)VY< zE89c}e2g=Sd=XJVZtOMOn&^oQRsXTxRWBc_JH~^*n;-?za4e*VGn5|ezl3O44l!|K z8L&wHC=YX&js5S_&PW0?{_N;Sar84zI4R41J3Ia(9`#f7aAo3;11$Ne8q4T;eI4CH zCUwgCdI6Eg#ggahz=1`3~bYV#8Y8DtccR z<%3^bjy{UX2V^|fE23Xbe?rnHBz{8VeQs8B$sulT2>K}Pwk8x`J}qw+`HPx zZ+w>!Pz!bV%u7DUukw?=P2|vb@aJ6~e{N&O>Ec?rok>Ol#-c|bYxMbl8`j~^pgw?vGS{?ujApY!>8 zEL4NW@sFaUtGJ@`Yb#e{{lq#G;Cv0#oZnORVg9Uf%C2SbWnJgbsu7$H9CY9b6R&v$ z%mlu>9vzimU*FK8`K)YgZj|*+o=O)t(Q^@DcW>X{+~nK!uf6tK`OAOlH_P(oX8Fnc zKPex5_=~dt?67QXt(Vi2qjGX|>P>Dg;%%(2mG!mta-CPrpPZD#1D-;<@Dt|NOZIYc zhq{wvHl`2N|K$Vev9`8WRu!1n`EEVHCiUUoZrOddU+&zx!?g=7?zzy$MmiT!L@T~= zu@YaN`=)a?xLJAe_i5R_vQuu} zy5(2(-@JXR+`4tM@Cwlv4v{APpW)8n$He!-gx zHn+CR&AWHXx4!$m^48bC=_fSKWaD%B^596aMf2dnC+;&>wy%||l6m{J*UQeW+hrmj zJbCc2JpTN?-z0#1IY$1PScmiTQ{3en0=NLR!Oa`%>ptx{<1Gqj)JMPcISh@EV?zDN z{g5IWf^ugRhT$JUlwOaSmf%7GZKU!k0{B!JA<;Ve#~13=U)}#Hf+t0hPy8u*_xBiw z-bku+`mF&-`6GIsY>LF+0-}d27kodk+i{Uz|VMr{xf*Ve;g-$ zLgFWk^Ff)^y?jJ-ymFl~R;B4PmK$7r*|XY3b9p^XUTmlBB`xTVpN{MK-XJI1gKJB@ z-(rq*O^bVdT{`@9FwT!)#=coOQ_%1wx9UHwXOs{5iax`SI?{CC-oW@@~7{MuF(bd(0Bo5V(|0``5?WNLJ`&4A3r|$4OSC z%{BT5JTRp2#jt*EijV;I<=p+eGAKx9S)XS%q8 z*^kP4X#EWwLh3`h>e6HgQomML@mqQ2*YXLe!#GZP6=vxAJh+(;dX9fFj>GTB&lqsp z$1&Q(CHy!Z>N?2&TyJbKP4>|$E9i3EXWhvDmY>#lCT{*u$ohnFegsqP2c$k@Ib(ei zV&jN%+d6|6u{V|kt@|~4o8vZ;)N;)4r1zFmK;)<28K0{&nW*rda;Dv7IXVo8>=LQ6x#@8P= z5A#ibKh3XohQFCko!1;%dnoO<1e~0DEyJES=hdvG>_hdrxWOMi&`a_1YWSP1eHrt2 z>;sUJF1FLD8Y}476frXJmD1*iuHdH~v{_D3e!chePIxnT0oz{F*Z^LA;vcXYb)qJ?tft*BRztQY?6vi>yvk&*`js`MRkyZUR+hKa z8Rb*et*)+@CE+O}I@!r+r!&I(Lgj|4PwKIhuM!D=Vnaa26`J00zy-{|(c=Rk`PY2o zhc5)}!ayx=dW_{}oP2BC^pG44lnb>-zxRQ(h7 zC!HhJrJn=8wL{grJ#mr#OMldHl5&ZIf9g5N517_}#;&x#jsF^V{naU&e?dzk z?8o=GUZ_u<=NGTi2LEq}ruG-P)42PPeFe! zB9Q&Trz@L+W9RD9D??P1%h-qqgpW1de^mg`a$$Mh#(NRd6ngC6Pu^%d{uyxMYA z^cv$lv9P(lT{gEi%E9wPRU;kR!xp7T6$1<0> zo;A1ZTcJt8Sb z?Emqv%KFMh`EUPM|DZhH+b@6e@BVGs*t{se`?vpgxq15yzb=30%C)k$|Gd2Z+BeF| z#%kGndg#v+?C;($T5^}4z54@yr+-6sU%Ppu+C||5` zBgEN7S>0MKr?PQx_n_?U?UnnF9+&Nn&GO0{ub1zC|JTdw-}pu;o13!Xpqz2hQSEZ@ z{8{dJ=Op1@5Y zT(I<86XY{4_Hf;RPa~Y29m}0@p~@G(#KSl8dz{cGMdA5O>vGQ*6xX-|=7;Qi0nv}^ zP?2si-r{^O`2&jESQPZfZA0+!`KppJ4~1?46iUyg)AT&u&O0t*{V<$If#1n)6YG5W z9?#u)Z*kJK^OdUG)O@2*#?tmUnJ5-;@%d@hIv-{d#gYx`S&sP)K~K6!m_SPxw$e-WhI3eCO>O+LDI zbvY)ni#am0tanlsaHMDJ^M=Uhao=Z{u226|{j+`49`?_S-y)8-1ULpMYS-JSQ$b$W zYyR!x-qy6Uj+X}6A0$7_M-O-?R+-lU)&)F{75F=G1l68?ofOZjiI$`OSYfvR2ZX;;)}ZGgC*sy0yY`6o znI3=4Pw2!sUqH%Bx`gyk`j7VM_@!NJU{PHCIX?c-eqmGgkHj1FG}zy&uY8+-+jyw* z#_~sg80;A22SlFl*C)*Qx|45^PQ&$A>M=CT3t)vus_&leRhwx#v0u#~#lA$;bXu&XtTI@#a0*T5noE`OTZzip9Y=ewDb)O+9n3 zhjU2lXglh4UQu~H!k!14y^;t01>ZmySO?+dS_NCZhb8b$06K<=z3kRHST01C`FpI_ z*nL%FL3#M0r)`xK#s~QDjJ<6(@nwU~e*?l!Ha(?+*G;zLgm3)AAN>`dDi*)kyuK)t ziTtj5@RxOt%?otWMRn?LwDFSsc*>^qggzzaIv>GC zHt1mu6OU2&tMcN4@9>NN8Tj~w@fQ6DeEK!?Ra-HWo|LQm`4#!d z#;^1{*)pLYYYvBp{iKL~k@I%%{{-IBu&TiX4jhfkd?=$==$8WC(BW|;MM7_EkiRrm zq@E-(CzNth{SII|W0o=I{ddVhcL-vPBQ5!nLT}nBV%$s*L04`-$hy#-aSaK=FaGVy z|MtKB&hJaYn{*fugCPhC5GZfLdZ1uh380L)b~pqusjHs612{DgAMo1~VnM9m#E%lX zlB?Ai;ruWvKzQoFwQtuL`F4NciI`U@N|13Z+W>NC)Q;5v9nv@i^cN5RS3UUkKqsIF z9Ti_iZQ5_=S6NQe3E0G!4d}<+PP`U|*SWxs3q;&y#A_h@B#dZ{;{zl1NDS<(D+vOn zseFy4O8|9(+F{8%87c`zbk#oteIkzH#;?LyUejZcI~|YtrXI@ULneQ6PSOqLo^HS9sxNK-A{f_?^F#YhL&_V&pilId>d&f2>fh|>@B^m(*%m%EKP&_j zKlM%i)A`gt$I&1+>$4!^Mf}HjHI}@79QN|rIP=AAaJznG%49Q=m&ZM@?1wq+H$4uA z^nCo_PhB4d=+cm<|0~XnKYRN)!wRB^2^@S$cJodwXgR-uHt;MX2Nsc!^Gce(3jn=9 zLcb~ue5OI?2vfKjPy2J2|HpV4S4g{BJ4vF8DjxM#FtUR>V}8|t?Rd*FXF#`h)whP` zPl3;a0){pgMa9o{bH7r|YrT5l+y)-47#5X3Wu`Z~4yV_jYVmMMjx)6#=MFTrsHhnE zz3QnfT1B0L=P__9pXPzYhc9u~C%;j?>Oa(v<5|7|ifv@r6M6vyUifob9`k+prXJ>o9zn`qK{;8Q+N-Vt%16aU&o7kSBa7 z&pMt=_p2zW*I!m*z{B0aZw?j zie^C^uQ}XU+w?2@mxa5sxn}x}^)2J_je2j&@9oP|7v=hm>*f33`(F9xH@;T>_>X^B z-uv-S%lYxCZvdD~j?1;}?XrKs2Kh685`a;DHktU<{u>*cS{uejGZ$mTT;+%5!}^F6;A;OKDA?Xs^jo=^d}@gu&my6kUOvTi4iC$R|8uRI~eB&KdpYs?3k zH3a9?_m#ok9B(9J3^KovQM%Rz(67no_%ng!25F_qa>aFsRfu^@BTfOl%G*dWjshxP z%O^j8vbYX3l#lU7v7;;#G`_?!zU_y`XNaRbX>%SQ+28D10G;T7DxwER4t&EiGO ztYPM({^9OsNPe|mwo>PFK*q=n-$n8lLCUR=Y#LO4_FW_m=xkEON?R8@N$FZ#6;&~P zkZS7S6ZqK>LWi~_ey@XzRDk8 z7Z~IcryTI(m=jnk3kTGobQNC_#O6%NzTc()2-pCsbm{tUK&`Vs$%S8ClRg_k_+eZ( z%Devr&xGiU*pE-rZsqb=*#MoAU&PICD)+ou?E^P{r;k7GPa`|04gIe$W8|JLz0)Q{t7zGHtRKOpp|d=t`-)LGXk9@>cW zdKWdkGyzQu~=Hsx$(uZ z&dE40Ju7P)r-g6+PnL8Zcd;uq4n@hiv(^U?B37cj!OYlDHl9G2f7CArlcl$C*qCIK zHteot)>X0d%rDb6a$cBxI0p|u#(V-FZ`f5Gf(LcNqYseP->|6{ZINr6l{KwdD69Rw zwGr*;J*@C++u#;Yimah2nuJehG@}n{H62L zPpkdtOnuBLwI9I~Vd6_0p7Inre3hO|thnoZ(}(#mzlL1_!I$wSEtJIuK<$IQPc*^jUJ4Q5KVGXK}$Nd^is% zC#1a0_u&OVn;8xLEzrdcWcZ+4)Ojp)yodG$rg38d0dLYB|CS%Y*nhq=Q^+Q(v|T*- zCF8NKoahkqGcCW7b`awas4-6<(nsmj_`$^<>4Kht(%^V5s(DArPsvUo>A`QLT`U;N zEH^q{gPw;0{=Ho@P**7+=1s*{IVb?X?p7UMJAcve}ORLzdWA!PjZc(G#LY#9_hyTpRx}a zpS1LsKy2*TwQUpAJ+M+i4orL8aiZ9NxmXbHZT<|I{sux$aJTpBW_7e;a!}% zXK3NmNeC#xFA(&E1w$G zAUs!tmjR&A71ObknFk`x`A?ki*Sy+KXOc>Zf87k3*-RH;K~Qn7q4I3U^dCwZ1E_KK zaYNQ+S>ojJSNTL~atHY>J=RU~(JXErO$Or_h;;h9)m47nb?*}UrU&wppm|{M+YsEg zmOUSAmi?OjhC*tPs~{iw(+|d_jVr={Xu83dH8!0%X#S)m7jdRDX_eJLs{(}h2grCl z{!AR%PJYf;>A$#P8T@+w8KwRu#;u`KLapo0pWw?K-EifP*ioh?xbg-z8>o-m3fWI@>z7laZZx7CgZ}IFPtc>uk(~p&~R~y z*IJ&O9+j<~^|HCPRZdU>@|D%Hy0)QsP0K>jXe}0(GA_LNkAc$0`faXmDlX-%uYa}N zy?LYj0Eik9`)J1gJ&w|d#75r$~NA7=cgpZGdHJ5emR^Rm0x`LpxpcD^YY4@cgoG1cgp+k{jc|-j8__%!f@rUJ$dk@O?)_S=qd@g)$ZElwvH*b~6 z`Dr;lbXZP~k9=bRc(g2U7*Hqi=NH!2SA7x3 zZLUbg_9p$%{nJmXs7^kOaX>BjIi9jVlGd;KbNo?4T?UmgH<`ZTwluslB)^>RqAZ@Y zBd#nw4#+z*3Ib2MB#_H*+(;9|1o$Q}W~ic^c@DzFBM@=^seh4qtfm~JphxJ|K>2`( z)iPPTbw}by`X#U8u^dmvQd5dEO#LkR=DoG~|{V{ZjH2f*2AHl1R zf6ADnlulpaS!x0(SCr$AgNl>zaZ#luj&vYIfMwuy<@Tiu2QZJmY|z~RSuG0@Jr5T+ zgql%qz2va8Z8{(N6D%iVL&SmSq#(;23F`dif<6E>h8~5Stqtdk*ot*rcl#u&|LC9m zQ2<^o%X4Ix*LY_95#en8S6gdgjgc=dh=lKg@i&}Lfdik4kKf?o^WK!>K+rkmd;of8 z5U9$u)wV$&K=g<*6UvDR8nI1I#L@2H2A`0vVBZp`X9Z9x^uu{Q8-Foo`Baw9?YEZ7 z+QxZVUftJepw8o#4up8Dt`pxN=X~EjB7z6J^uysv(sYi;TcyNPayi%MRolFU3_QyP z#K$+LSVz)2o4!B~a9ATyF1*@bIR_3&^p!|se{Pg|A4Pbi;RWV!T&ao*R{eQ2#s2bhsoSB!9lZI>=WlXA8~hm_O|yn^g`;NYbAQhT}lU_FkOp`OJ#YSJYncN1l!kzs2O694oC7U5S?rVpn2%XyYS6?)MglCOf8)1%x$f2vkFd2~-cQI3wN z;Fici|@UaKln%A`+eo!bPdK~oxluzOF;^O2^E7QK|0Sn zI_tpoHa+zc>~a)H+AikeS~P|xIbxBAQ4BCs1|N48|J&j5sti%k(ZH2_XnZ#bx;Nbj z!6)PqF@Z)Ex*ObqRLHE*0Nq0Vz{3n`43H*DeFMUK6-*lXijh>SX$8b{LB|(uP|D@< z8Q*r1+-M-QP(9@KK=LA{EWoO_e6@qZ4rj=W^=AN71`Hfznv_46)*k_AZMMyr_62I* zynu`=sIv#3Y56=s7LY=QnC}Z4U4y0BL+>@bocuJ+ZE5J_+lf;x)&M{E?K)#mgC6H) zU>ZMV5QR)<1=I58>hCD^Pr`9Lf<^wDX}5?6A7uQ?%QvsFeC8AXDep1Kcya|l9jMSn zWBFsefWfaBJDP%8({Os2Et|ujhXwu{AnB*|j}s5lSwD(l)3F*aAbbIhEN-<9iQEwx zDnHN1f?CZXHBeyuv>Wqs0l zKCY>V!XG|{8Fw0gfaI5UWL*81FjJq9{C?T-H@ktxk&=U9=slDNWzDDkJ`MsSJDa|j z!7N_m`6}=F`QU{)s53a1wHpA`0{Ohx{4jkD#x9^%_SB&r6h%mv@!)CTQi$ zsUCb*SJn$p=jW69v^5xvQLpQi@%mF0mZ0(9moxXQYt6*X(ws^9Vw2X4tVdTRlm$6* zxCrAXTeyg&KDVN7lN;y027o;1S;xYBa;gRVsW<*Dhw-PX&CTty!l&HUR?ErRY1!R- zUhdqvQ~vtz{%+YnIV|tK`?K=s;lpzHY`>gN4$I~mPy4TwE7x|!1Gc9uV(l(}i(Wb% z9~_j!z5RF#z{yEjmp?9e`rj9khV#S8`J@~k?3d@e&&$U8TG`s!DSSSTbudnwoX5=& zTqNX;0bD>_Ti=krt7T*BihOZV4)>4B$De*)e)Qf4<>~%mdGu(v>^?s(S9jD7YJ<1l zeyzOn`m5#j*Y1>Cx9^s%t5?h6;XzsT6Da59vroT}f6ug3TrPWiPs+UyKQ7zbTk`M3 zHeHvUyj1`n`APfT{qo@R&&$(?&&t8xVYz$jwsqxItj9;k<;jyr{{H?qzVWSc?Z%C= zes#NCxpAdDeDtvFOTQ-%KPx|Z@BOm2wN~!lzFmIvH-EcamA_XvHst?FIodsvPbX#f z>67yO$>XxWCpm}5qQxyLPQyy?V``R@mI+(|laC zl3DV#zisNrGw9(mNXl}dWimPOlTg%gQFJolm6I}rPT@WsZN!8{YB;B*s35)eyu`nt@>89 zS$2t+dMASD+?0_Y>>3!63P0m};k(~O^~~{aq{jeRFy>ERhu>JQ5rftjs=r(H?D5xC zBl4kOEdTQM3rP9XVDE5^Ffwz&syF*Os6ICs$%xL*%B$t|JU1_{FFOAl`b7XPc3p>W zaz6moL8~WDeL9--AH=SnmLKDjbcE?K%n$g18;}>bsaLu1gMOe3(9B`Hl&{$Nj&?3s z=#(A}?Egh{0z|>rq=DzBa78yOZ<>E!J@^Bt`e%ORzdnYp$!Yr6xaCLZe--A*_b;!q zCdE7s_=$c2{xpJqGR96+3{}ku-Bk~a~Zoj$E@T-B1pJ2)tA-}gJp!zTEULZf~2l{H) z$j;gEKV3h-wEQ6-@&`EsOqGf6435vyt6xeC9gzXO3)k0JK}m8s;bW z>+0tQjZEb=#oS?c3V(%jwTt7jyng1-RIP6cYk6PYdtc=T(&j*je3p8mQ!tL1v5xRL zvU#CP<18`K-w_+K=TcqgwiB&Mk%_O_uohj{ac)YV;HGHg3?5`VIx>E z?E`K+5w5iiIzu3S*0XYvhkPE6&iK^+vuyZ;UZ#^zWQn?=_y~_RAL`PkTyMY!aK&%z zb4H&CI?95UB9*COvBz!9^(Wb7|5?6p;3OPB#Z9Q?*rAmD%8dZziP3|49zBRB=+Osy z{C4h}){)H(1MCsvUlmu-T0|N0e#+eXaTOm)N;3Aa5oUDAs{a}7CRaH~0p5tlx9ZV@ zyf^4AKgwi~^oU~{Hsg6Qa$c^e?Zs>yH%b%O^rw$-MDsWP#n)>m5%^7aw22~gaKA&0 zg!06EoL3>6@kAYMK|b^epHhw=6|34hhz=76ke=en4Itpa!(jd}8s;CLOLz`K27Lh> z{6Rm*w@~ob1ln4*tKY~>w-=vmXPodoe|%5>f;WDmeYpOB{Va>UZdBbwdB#z^Awhic zH~K2gO%xY8H=rE8VO!`H^NDoyO$4>gsiDt-A~3GXvA{R+2$r?(ciM(P$2DF2QhmL& z#0>zv0ie8zZ|pJtL9$Ss709un&ml1HFJ;#1EjN^8(_K?-Kx znyEfwGQd*&3|Lp}BDhiEL;VoKE~Tv_8d07=UV#5xT;I(>+U;3s8<3+urkJ1sND#75 zWB?R98D-%s@!?=OFfd%?6Ocv?`RLQ`m?fZBMTs_bqJ~T4(}vLqu4(o+NSOoJ$ATDu z6_}VzI|@zb%OLtw-z>@|(ba~<;OZwZcK~9hb zZT9_$D;TZgf2sVaOP|iPpP#ZC`iP4uX@1C%(CGqn{_oqX$*bUv^1j_#7-$lpO>olx_TPXm`C|B0$6sqd(xS=Wei62YUNu;+>GlKS5DQQN zk2tOw#ry_b%6%f%`LOrj^!NkJB!Y{&e1k71e+U&N6Z(mMBm1*Uk(Up$KSdsSii3qf z?MKx7_hQI#6q~T{8U_82+0YKD!2s#UA-K85Z!qDXvEzb1WKutl#RE3!zbPE@2_Y+j zpw$^sp~^ROv6K8{=plJ1wtRuny7Crhs4yD;*q{BjM)_lqmue!Ot&QxM{{S2(=;Nr; zx`l!Ef_P{<&T%ZkJpI$4pNoNbQ`9)9f^z~AgYu)eFIS3HUTs6P@C7u|l=~ui_@OELaBXc7xl0n6BNPV-e z^3CjJU`Hr04$X^z?VWCOpdAPE{9zob|D1%sK}t10W=KdJ2o$`b4=-q3A)qeecrt^a zoX=082bU7UqVbpY84{{|v%%-;4=zH7L2kxYt0y;5SYyfeJZ?zDzCJ@j;vyr%d>Hxx z95~LapJe+fj`i4L`YE8F&Wt|=z9~HAE1-`vAXhPRyW&{aDOr6Pz!+58B;iAYwo61$*wo8%c6=rZ`aS=j+MYm$cuDgMJ&cb+dcI1uT^|g3qyDylW$6F;RKc7rA zkHuP+uDrB#D!$~Uw-}wBYF=CsEf)+l7J2QyN2GM{Ydqm3o-501;;#j@7V+RtWcRuj z*xVqnzP4Vr-g&3o-nmwuJbPCD{U84O@~7|p#1>3W&dXQ6^3`(n>b5u1j}DH?$>hu% zA3l-!K2>4?isXhey_r8wn;CC*B-? z`<*w-t8ctj-ulYd%I1}8^0)jJOMrvgfRP?|a|3 zZa41UEH|!RD_?x}S-JP&hvnX9pO%k5{-k{Mowv$2zWKfK)_1;Nmfu=)WVS^eK#l-5-lZ<&bxTR~Y$%!Ako>KSizK!W-e^P0&P z9eA9DQX!&xuPbAvZ&1Y)+h>b{wtmxPK#Y%Lt`HDO-$SPxR4Ye)|djr)5tY# zP;3$W1b8lh%-1d{$n`UM`Uqv@lScsVkHDB2#z01l?fHMPBg=3VC_JRXP z#_*5Ay#A*+m}6J&>+%{fBy!?%7ZxT3E5@_+mrmTr;cay zRUJMhsW`?ac8iCr2mbLZzH{s!u6JfgUuT|349%1<3t8GwSf_BYy75wQem;Hn=>fuy zsqts#@JPKB`0wVAY`>w84f6+K_V}YHoaK@ksPk>q!@KYQA&b!VA4sEv$7SFRb|J6I zH|n(9z@v;%!8lGh#C6G}i82Qlb@>M90R&%U%o~3|_$h(&2585j_s4ubK=PVd$5;JB z1%BX}ycA?Fdm{tXwp4lOUF8!s*C+uBN%=mSn=!>61af)h|3%r@(s|q3sYD(q@XCKS z{(17BCu&vXHWYYsGT6rkDdn6i>)g6%Js^49z6S4!%@se|JY_rT7Vlxy{eyV)&wJQ{Sj-*62-E%nmLfr5W8tYfO^W<Elgip`L0@GA~e)vKTsNm(ekt83jr5<&&`!MK5$wv7#EHnPv03atwAMjiU6ePVp7LjUNe zU@Ksg#)11UX(8}70%Y*nJZ>6T*0}+`=Jo$2bI9BW!Zw5p7o1EPe!>YEr;kTGrGd4;TTM7JWQmRYy~;Jd#sMsM1=78cy>_zo}} zrW~-0r_Sw*vAjZFQG9P#u}=zwFI+CdBHa1LPfBB06f3I}V38P2g^3qD!}{=f5}-Ii zb*kW-e{>5^zR07|J+Q#FOBB@sf^F&zzBp`qm9L6~?SeWAnz#xcj6kaZucj*x9}pyu z3BVl|=aZr_sG(80SwiUeWkvZVCQ9eI5T$ziFB&yEF}%qjFFGfAV(VN_0Nuc@>-6`} zG-UfE#0ivBB1IR)rZvNw=BLVLz;vc_vj~$kc1Bsq52OH+bm*wBgcb4$X!GHZwB;mz z1v4Qr@C}%@r`g~1A=5JwUqNFhyp-8wSC}eyyLR~&MtEJAhG{tig#Hw{y!ma~&PLnt zD-X3_)TMr5pDJn`kmxgf#V>fJe!+Xd4+tEmqQ3gW2SZ9b`B@+3f#1L? zrG~!$IIitDv}X;jQ_xrijLw;l55S7+99C-_CvAe$UHb*!Y>%3si3j?C!G6~@f7Sd# zSWwr|zZep4%xBtv4O%~piouqV{_{&p9MAKYhFjyxBhr0{b1S>T zbKa=^C^9T+zfr&V((O0m%lWU$#_G??pXD3=$@zW&uTdEzDxdWs`Dg>=UIvZFm`#Hm ze-W{yA=wTm9iSJ-pQ*ab{AbxU1ZNjFw1wj@qFIpbIj_+dCEwFPwR1Sn%j0lB)+c|) zQ{XfHfU*3{k73k*8eOMDlhs4yXX_Wq8G-xQn5Q7u531_>L*!FV`lOc|&_01<{jjat zSLaqQx}AvPX?=CJqIH!Q-dsFk4X5L&*O`#$C%(m=oAfFdKd{?+P3u4KFSKyx^_{0D z=Vf($Q`5v+IoRDRt7|J|b!p9u@KvwF6tAxNsqd4MQ^{6~tK91{>BEBBuZll7D7-3@ z^%#1v>CbwUi%Zf}I49-wY*Mb@xb6!Vr~7;I#d7(Lzx12s)~##h;e*G1>iy#nKlB=p zjbb*guV1<5n+ALZLt$xksZ35!%GvR8dHnc^pNikwzTzkRPmfPkr}eFLBCr#`zOgR8 zkL|^4;)T&d z+f2$E-+D_vJSqRgPzC*82X%V{!mQ&@>Gobi~0mHeJ1B`2Xhv1(Sm|n4-nOoVGg?F_|uQM zh@%fjkbinn$M%(k~A#hSoAN14F zD~TwYtBQ0L*PWV6z-NravC^U1FaU;;k20>GHQcV8*tw}X3a0%x^q+v-h#345V;G2d zXuqJ(@jopm&R>#-p9sSnI1*y~1#VZ~`qKc$>-2$5Ptrs3%P;`;l21FQdGs_6l)0wU zrz7aEKLi9GdCF%Y%W6EDzf$giH-_Y&c(whSzXtgckNIT#ReH4tRt7&x>wHKz)A?zf zXM&z|({f_{0756X%O~{L=W-2}yrVoqLd&<1^2abOXBM)*ByZvjz;yplKZe-E=^^b% z{p$K}ihm!Ez{UK~|HLc(J>##Ms6fA;hCqn%iY}qou(%m9Na)5@$W8l4ew;J@0v5Hy zEQDU2uSSqXQ6Kqv!8q=x(Ql>#>j&!^`F1Xp^B8?ti6Mmk!|}h`334WNTyV8`uxH{j z>Q_#64tH8sR*%d2md=GYC&E3{x&D#P>kf3vbu1n{Pz8^?E(hB>#(lCHIs)G0WB*9! z)ok4R99!aoN_a#;Vu&?I@bZ(iI>xfi=L+T#@$gLr$|D;7vCpJ(EkGbQs_O!WI)FIN zE2AFiyat^IK+`X=lyZ$kiQa1#bM>(jdpTqVOXS9SCiBS5r?2_cH0F|r+)t)YVxyWT z;wL9(oYt2!xs#iDX-9g1+go&46Y_XVne?gsKR?f=AB?cB{&zQza}BiSJd{&pp4c}Z z>E^vZm7%Y1Qc&HnZ>aMT?C{(L#JW{y`7W0>m|@+#Z)klK_*2X*5$w4_K^nf&A)I4d z!(XypW%wGN-t^{X1j-bQCvM<{PAd00K%jKK_5ki|h}-9s3m2x?!#cvK^r7>(0iVrz z=vZ_3HS+L>EsVHuq8R<7zi7>7vwAhKTrlf5tyX0C(;rZAUa?PpSjObFRx@b0pGY0Yc=P!K7GP% zC;9Y0{A<>v4YgmMX-rBN;}WpN{Q(=oZ=q7y>6;i#8*fA4=7+d(0D1IBZWe%sa`E8& zQDyMJ1hEV`4)7J!Yc$Lc*3}U5J)Yd3f`)Q1ER#PKnXWS9jTk&^ZaDW10KXQW0FVw= z1SQZ9JIq8y^eH90k!{*wCfBGuCTYa_jw>q&10wxcF*Blg2G6C(68lVKPxRt6Tw5|LazKce)9WXW>6Nae}ZvN&XL+QNSK@>Xh zL#VVh-()F7UXgI3o+$2=avRWICZir8j^KJau#t&_rf>%=F>T`xu0J}Q|L|wv17HW8 zLNu5{eFEC)#qty2>0my{te=p`yaT*LK5recUe*oPln(&i%RwTV%MhBbvoPpGsD-qd z{YhH17wd}QL!O@qZu73)TCk6hS^4rtiz$C83tE$zva&3AW|^M=GcPjy)G*E~1f46K z_iw!cD4^3rWl>JL3yq)jauMyPyt_mG)xJTx>0A9U#YewHj2((9gq=gEr8OTmv~u$r zXcj{q1HuO_19zao8*n{o_j#A7tkaz^(!VUL|HIR`Q4~H?l(^JG>X5wP0n|It4JL93 zTjqr1sWc^R{PRbwACpo=Gf+m1Sj(i6@M8H<6zSgn#*;P{@Kk5WHbIMLAX8qH@MoGB|6ebQ)&RUhnE10)}Q-}7-m1N^Cv@&IDU!e1oQ!nEQXd(`(;()RfDYfIE--W$yqQU z0DU|T2{m2&&yXin-ZQcZmCsAdkA5?hB_zL>gQ1Va0x$Y1JmUn~fpe2+h*@x3;cvA! z=^8KP@z0=mWXF^&pI0ObWyH~b77M?~`8VQmps#?Xw28*^SB;w2eUGfxSW5wLeRxC@+-7#lo`!&Z37CjFoAOS1L|qNipTk9(>-8{PX}>6LI_)0 zp#9{>JYR{z+D@atsEf!q2gA5-AP4V=SyO~o;k#G~P&VkAy)W+o_h~Wx!+cch^K>xA zLFKZ6g{?QI)qcU3x*AQj=X41E$xFctP{pkb=;P@CT7=Lw_|Au@&p6V}a>f#Ajib~l z@#n%!9|hpkkH5};2Fk~L(-*a@j=zD0>X3zM_E&^N2)n}uUw}JXRkN|crvAbg1E?qV zM!o7-FjM9yF+|5QltnF1$E-?yp8j|L79N|yEX0>Yf8)kA;VqTplcRESbf822g*U3# zg|oW8>Ziyjrz>S+gICy#xBCp=$v-ap`}^hS5V|ArSoOuRkh|*5{|!D}u)bb+Qik;< z7Z7>l!1m^PdFQQn%8i?M%D?{)e^P$%qtE1y6R)A)R{7mmUiC#9Hsi5vZB?r{UN3)g zT=w>M%kj}^+45`qc}4zdIaM8>NZ`pGE~>DJ{`m1@Z&Kg9b+cT#aoo{`m z{MEnwm&&zUcgpI`tHs~MKRzjY&!3d%PoH_ypYyE?>G9~%W8W0OlQlb=S5$scuIyY@ zw>&E!{^Doli+hjc!{f5Eb)|guov)SEwGHt(EI}hhr7~UI7j=t<>`aZ%gOGpC`TWz*6h1E>eei+wexh!5ruJ4lZ}Q6jZMQu)Qfyt>l3r>< zu5Y+Y(#M!@mdo14nsI#1Lv75(8P9X{V|5eylKZmSoNg0+I{J^nb7J7o*L?v-xIX7( zJaFF1*ez-V=8$fT5a&4TgX=*t8uj}1`b)IT2R=8d<)%kY#kZ1GhHzmQ<66f*%O_oB zFqZ0kZn*@;Csjnm&JgAKTg}@C$O5q@o2(wH|LXb^2zgbmOXJr8woX_6P6MuZ2?+dn z0HX4ANc{&09g=^AmUTxM!p!=89FIfNDvi6jepUU0NMrF+;|xhRvk_w?xetLz5qU?ICc+dTg5dI~ z^EEU2C(eWT1?}fxdP=#DMqSFMOlh@W=&d5E%o9ZF6A@mNyHIK6(E%dfU=+myghkBw z2~wD5pe}ep8j$@T{(yFXIV({-iv8Kylz_HTd|av%T^T9)cWy{8zog zKK_(XKht)URsOc!I;YIp01RohTW3@7)L3kD)<82CLX_yz7R9XL{RW{b%es_6f6$RskW7VumJn{7f0{3`5eL8cz>>svdC+ohWE^6jGl8R`ns$3z zAKH%!;c5;a`h<#HQ^%kGpo!7|<~nrg0llhjHQ&Qft~9Dl^+99|<1g$DUW-HJWjy$) z-JBM4iyp22GVpJi)4@d0Ce89`^auV-Gsp}BMuEOx`5W5xPYNQ=^mG_Ql(qTOP-En% zK6RN2v`5Z^0M~zP8@R;a=||Ph_rA8jgHJ;0I}WpuW!3%~+zwqnLPDg?j`_vsGm&q> z>(|svKL+9mW4*E;jQgdr>$yani=mhD%o!r^CjGL`ySFC_=Q=-T^Y%!{oRjj+exBaX zxj1bSE#sbZdx{0I7Zo|q_u0Tc(=qr$CxM<*P!3YgmyD-CT6(bG0uAlL4Z&WI@XB;H z^rW*lI=zb*l;mxKVy`bM+u)JuJbzRr;5vD9rH(Y6mj@wZ8N65@(dpv>S*}0uU;Qb1 z{=6B9Y~kS-+lqc{p$h*ToALKKH_Nj5i*4Kxz>Twt&$!81LFwS5ShmI{F?NHmLg^W* zTS>JYds&iWgL1BpHeJCkw|}%LB>4?@m4poxpX)pvzbfy|RzF3qa>?U?JIX^(bdWxl zEmB3S_uQx1RL19O8?jOx^w{Vh6my%h{+GS_iOpToibXKozyT}u6S+FxqM!oFQH}Dk z+3mKF4D&%5AKSo(9zWu1YS=U;rjncAv6VXZIwd@)RmN7g(&&w@k|%H4Ru!obAF5dM zN807~P&vU*_nW7(mc^6j=jBAQDCZMtzR^Lf{N(vW?V`5y<}@uK^*!#aw(4L3w(N?2 z9`fQQ2KfY)mP8x zudsnnogXgFnSZ21vWL+1iUcutfXgX_Pmeh3 zM7eweF$E5_Tw~t-RB6wF;H%EfORPFk-dp*{|I1(hebv4RVh|FbgQ@8art{({1mJXO ztbj!fIsy%$6A}y&hRbLS`wCn_&C3vF6nJ7kIy3mHPymRH=-dF5sZQo42ot6-?QyPRCe4V;D~NV+JiZc`}-6VcR(g8!WOqCv!w zjtYQ7vWF9 z8xZiL2h$P)|J3?Lkn-{Z;*8vk7r;n<%Qwh+@!_P}j!{d1XFV(b8M;NVK(CjkPrMPn zPe27Td@kdg`oFyYXk(wOcx+@xUEsedRdz;*IAXl$hh zfXB8WO6>!Z1gb8TlaP30{Mvq9d5op$d0`f6`7hi5o4ne7W4#vGyqWR$%gY~50I3^?5lCK?j@yq?rRR_UL$g+dzRkvCk4zhzwZ_LYL2I^5yck+P z859Vp^C$I4hy`tc59_IBuUKG4Vfv4L-uwn+yZ};8Yrlw8FvRtkIG;6?|^r zG0gHopT^O^0t^^PvVIz`hqxPAvV3fr^&j0NXD5IZ?Mf3h?i6~=*8Z=`IDRyY8qZZu zPv8Im|MW>jK~w>eMK2-2Lc12ujW2^(6AkDuA>WV$i}Ct)nANMsb^gq@TL|;U9z0%h z{LMnj@7oW4Y6wN%>=DEQHqr^q9dV(CHP5EkJATTab&tq-I)#gkY?O0xg1pzH%EO7x z_>-gKa&da5(G%(8gQHIFML9h>EJyptWp#P2tgmjAwTAqYJg!9n{AC&RGFd;)RCv0Zk=*C{#kzJQ+~vaMWvlC5X{^Fmk1G0=`aQA z&!As$K^vqhui+=pSpYvSzyXW#reRvI0n+c6z#_e7*6-tZ5s0CAvhhG71q^YN2blek z6(oPoUuqJV(PI%*qD#(S!q>fkgib!t$QH}*OY->8=;i@(o9u`=3ot3c<7u}2QePhr zle|eEv~Wu8*VYWZoz0E`rhO6PZ|V)22F4%fP}x5_V7_Sd$-nw<+z(|}Tw|TpBcSr> zHVpz|{98^>t9fz|-PLvL&(JN(r_LoZ$-GySA-}^>U1YnmF<2&e@=a@h?7SQjXK<@{ zN#6_K09x}vv$^-RAjL%_$bvcjUv#~t30Ao`51Z@wLlZ+M-_YxT-9BC@b~!QaPFckR z#}LOC;#kn`*=_soo6yFDbwF?VxNhBb|7`qmHXMHxr2WWz8p zPPE>L@67Y0IIpfh_bbi$tTttVA3FG({qU4L{Ec7GGj8_74k8`NjIK7p2Q-i3*es28 z7pihNC*Uf-NkJKJ7E`Z6Mdlf4)`k?QPB>m4>IWRZ#XzT}@L|R1ddZ?L-q0X2{ZJ;0 z#4>P~GOfk-#{-A%K2BAH^r!fS`6%JvPyn z95;AX{jt_Meoeo}HXHhEVoO=^$bJSkLm=D2jpaJtUMD)%s!64FT9p zTR1Pd;X=lI9DA@M*88+wRYtriYjdu309l+x7JXfX6p7w$5k`I#on=^)ZySdxLI1R% zG>E83mvoGfP${Lmr5Pm>V?yXK$0r-PNY9`~d!b--0xfhZh&IAJfhEXpH14`S10BO31zrBZ4LHM}@cy=7lc< z=?H}iNepg6@PtFqgTEFRZBXmis@%gze^;QS!Rk_;fyL%dZA@H??)qCwuKVYAl)l`q zY0BgMOo7_bR8^K?yjGW7$u{ZkHr0(%a!3+3uDA0%BsZb@e)g%+HTsrPNdXexGAZFe zFVk}5-u@h@%0p5EU#A$9bVpdu_PSsUft>1M!k50!s1xZr*ms7%nayf!rgz+xW9-X$im;1xMgcnXoKje` z-4mKB+(w^i`hEiYfNNJkh}ow^5k+nY-;?Y}_nV*+U_79$lR+@)=xY`f7Y4VBSN)~I zV)WZfpd~@3gFQGJTf>`A}FX>Dh&( zs3ACDNmY1!)zxgipZ)u)z(=w9IoW{SB#+)i)p2U$63={*bjiaE9@W zj0Q6r)=Ui3>u`nI*i!6i6KIupF6G5k-`q|;zqBsu>UlKa6qWk$tN3;fNy8a-WzGeb zP2kjG*T4a}TmIVEse?&^k$;=DoXx14I15OVn5501b04V4KzLlP&=DRyLmGgl~~v=6#S0;B@mD} zr-_W8QxSyf&X*;c8Rv6zW;aX+(<-yf20@iYlaB#U+sEIJtfrT)CfIGL4}JZ&P&xPQ zz#a(GP11)C?36=MJ0hfUN~6_vO%36V#H7fxFS-7co^NL7X5q85q3en`%eS7i?Z#L1 zS0ngiwS};&%EObRERF5fIC~Abk+gS#7uK9(@R>Q=nT?yuN<^p8@u9WA_QiK1y##E% zdNT}OSWproLTm+>%$i2VW;~lbCFL(}Ju>Yd&sPy?4~uj0b}=dcY2cZ^v4ld!>v;^U zmCwRD1R^djIb`9NIqy42!J#D~PMOUQg)=MWe9Dij+nxX!?Hf2DxtaV#TG%xNsfO+s zQxP64)z%!jtgiQLr1&5Z%z&4!Ab%TF`Kv_#EJnES`{p3 z-0Ia8wwn~VDni7>qTa)*LX6zG`BsNS>MpW2kmdW zIX!Iwo%OqJJ56G-o$p6>q2M=HEozP@t?uE+6-_%@|A{&y(YIGcdt&eVz}Lv;%5(2S z_v1aOSz=}9C++T#NK`RNHQW^}$8{M9U41!Y8e;y7WS4sM9YpGE!=V^nz`5Ed{p-wb zgjV_}AZ4Id>+_2%Qk~WO=K6>3k-FC|=c<>dKQSf8(FGp|2a`zOTw&1x_q+EUc?g+f zn|2j;BvlIAYf@A3v-H{fKSK9v!(PhlI!x^(?XJxA`ZbXvD9QEfHpl^ryF z|2d6!oi@@hc>BzHdETkI8h@qUSFSWY^H|wZoutrPd%e4xh#vwF>#jukf5X1#r8f>OlS5wmER%?Q zRY<<}gili4r2b%#rR?KxY4{@>@vsINucF!Mtetu`FMcb_tXm=qx5j^DGA@6-e8>r0%mJP1iw0Wo#AO}tfQS1}=N8sHChT)92(!*y*QT^B@c>0HO%pPdmK9yz|# zA=gW8z0WD@T5AzsIWf|dZVFj%xb5W|#%EW*jBsP=x&v)1&!q+4=+B&WqPi2qL(iN} zjLvy1q-p-d0(n1D4MUiVj!iQkjv1^;eA2UXn_z;4_F7+=%g&^lNWFx17Xo{;at1<% zb70qY8+vX}M>5!QhH5p*2Y5S9ri3KdH@n_n$U(9`M>bUSTB58&GOC0a!7c@?d^Y9(jPT&YwAirDDSGK$4 zr(zu!eA^&BozN#%Af`=9(EiIGp(pICT0gI%w5=q&9BtQzA)sTbrfsnSz0+FN;#0ll z3Hy+=$fWi6n3h`Qt9=b~?`y;-sNdbqCm-F!^eh6Yn|&2W54Y^8YeaXzAy&>XM^4Sjh}9S&>XB zJmwjx1Fq{20#j0lo&AM-GtWLf?=QT&q7?Gum2`8IA?n>Xrc-ClhOOr>O4XYpdv+*+ zNe#IqflvNc+J7l9F8j2MGuY2;lt#PR)Ulg#Mb6z7J&4#TwcBI$JP=&UShS9rjCn|? zI!$^-M|qm6Q?xO6n$8LZ#&Sl!L-sugcMHjuzpLLjt!lRZ;N$ka<&qhQ*ewm{Z5>#V znH>!;Pr(lj4knj>&l%t9pZ^d&hed%DnQdInx z%Pfrmy9TF<8Zm4FBmQnmhCdnsb34az&p=GN{A~a7Wa)c6T;Ja22(&letzjdo0c*rO zA^+a7;ahM;b_v;bT{s$c$h7zoj79AaFAQ|U_K((>yrTmffkxsM3;*NJ3h2fV%8`)R zE2)IVaRtBAoXM~9Y?eFkOditbvus|yQhv+&NA&`F>uB!6hGr7)MCj}Q^*%<5{1zV> zXO%iK+ZJ=X@RKOLH~COQ^UoYPg`P-;?Q5}p?p+VS0BrX=i`u_T{LiysY$L{9vo!FZ zq(V5|H-S!OffIBsGQRcALZ~SDbBo;fG=|$1Hf1NfSFLNrl~3b}0-%#KVLLvWGk;g2 zAq6yTunGDNLW9rmn(X%~;n!gT{8W7sHZu#1H&=x#{Gg~G(X%}R7~u#%LJc_HtV2wF9_KG?z|$nu!rZP>{@FEug?byL_bIozaWmQ%EJC`MvHqkMePFp^6KqB4F*rlF|TCHUg zeaN=cUqA?|jPv;D=7F4kCg+R_yd4j{D_mXrkuafB5@ z7Z!#ehKT}+qsww*&K60M5eoqqZ+%$+@A9q0*&6yinLSWy9 zTvpX&|Ja=VKT0)RBMExUv(tyb5KOdEp|CSMFK=ei0f!1@_yUrSGhK_+Zj`Qc8D@eY5DS>gKIT6!{ zoB@K&RFs95-dAUsZT%1=i2k^V)C5*y z>f{wS;d@@HUgyTyS4N$syO+H=KKD!^yj1ToxwnYd(VHWcKu*4%)kUZs>&!K0H@?VHe)Au)t zdOn8c3p421kcE~vfj*%{br;rUan*BQ!^K~ZO@}ABHpY+dzavv$k`S!R4sahsv_#&+ zP^<9rF4Rp9Sn};y0~&=iv3=pvkxEdQe`L&p!K=$Vp?=Z$L)~2sFRWj0HTESJc8y+d zkC8@%-uq(}IZ%b;DzAPSb-Tu!<>D9gVh+RA-T~C+(1G~XE^}8V?`3V-1r~xOQrYfG_WrSj`85M9^^RE8M%FCgH&1KeCT?%s`xSjEB@-18v{5;?Cop<~9`{yj#Dy&KgHQMouh& zqF5wHwQ(Z>u}g7{rWw6Vji;W6opm9jMi&>di=R2B_bVj8qvQ(;P#KXf1fPZ`yHU^Y0@UGG}n0lGl%AHvo3*Kg+j|QJV#ph3C_?0yMXlD zyD|w}m;@ZZyn~QabhFIFP8o}m*fX$*#xC!axl09mz&d1k_^mvCW14zi+sgi=6$Dg% zhA*H8Exw)+Y?k=UMtIg)D!0Ce2(~q&X5%^2x~0;(o@2|(G=%^Pu2IsT_TROu+li*C z!SVP!{pmtBl5KtMIts||{l}@Q!EOML=xyW&fxxyn7<>4$kVX*qxUyZ`Ngm5{mD5qn zy20eK!zD|D+beq&7j}Xq=DDksRb=e z+G>>A!b;N2kdg_5r$;_72STVmgHFqksRH?I>id!RTZh~p45AdDnNT8h#M|~NKfx!U z_!JVKzH;)FUuzLOp@y=mMK(?-9eA1Z{Yol~TpsT`V+U_p z!t0bHxovg^zNFU1CLU24N7{nKMKO2)_E!E9@C!{`H{{>8WCZwmnIvb|qi0~oZ@8*) z8V8!!GRotBdc$~G^Pn_$QjgJN6gPKr1_K#l#0H*WJ{j5+Pw70$+kq@r(HluJx8^=< zw?n<`oeCK+6624)q09RoKY9lcfQ@#zxeIWWyK|n2>yrIw-0hIO`xv_*8B$?Na%s(LK-tU*_!#aCjlV|CKb>P;AL3 ziD24a8);?rR#(^^$f)4KK+#i%bTrZHN-wo%7L8QL6)dZh~(LDrOBNc3N@4cw9#giVo4%ctpvC2%_sQN$a+*rD&P6*vs9OJs7*vP@v+$0v%dN!4sD!iGYrD*- z63z1kJY|ww8{)-^_%q4~8jLsE{mWF?fA^Ol@{h&^)6cW3t1QI>CEC8kY(Dne=xo91 zmOyW#)YSrM)6aR&=sPTB{!^_^|L&Z^ro@p2*<5}Uc`kAvcJ_$6Mr3(@ZNQDIRSl4n z(J<}gUYuYD6O{GS07Uf2NS7R{~$Oat`L+{&UYIv?w33;->0q? zQ+2E4g6eks?6@Y*9@qQI`(dWlHn5(wW=_t-mnSbQW)woh-!&eBZgO1CeVaQ7tMu@# zy%wc5f+2UyQ@fA@J&{>{7k86QgU~YpQrkS(H3;VAwOA?uR6 zFJ^lNYi%{sAJZSfpQS>7Ep&k!JMd#m+wjCCO^^Rv+bb)*Gp0MEk<(Bxk?+*YlU<4= z>WSa9hHQGYhPhQ zHy8pS!gqkz0K|$I0DO}CZr)tbtlC!Kr0bd(At7E|VrnwOpy6-8SF2)0=OO4>;&9h> zjyuT`nyb#D2cUjo&mmXi@T)OP^brDcgv@M)S#$+oR4#`cCDwO#T7oh7o9z_iP@&M9BiL#UJ~^qG9Ajth!8RVR^k zVlAWf@E2rFb8Vb#!XpP^AM04U*zP`5o@WyOwf-fUY*#Z;bJm9J$-UPfD8n(5Wyn%z zt+u&v8h5Mt^g^+b3oq6O(|s|w4&FyQ{z$C}^d^@*OM`-jQE>Yqh5kKz8%JD0M*PN0 ziFc=M6oT}q!O&z9CbQY6D23-iA79AjZ9=|~t0J#$J;A3oMR{(jP%dBZFxl%rlwcvi z?(g?T!glOwZfmrCAuglW?$KLpHh!Hl-`c`PQ@lUi8KqOLo1>$UGJHS_V`snEXHi2A zr%aS)_L>cCS=VQ=>B@*8G*Q#7|2ccTqL&6f2Y+ZC9Heyq?f-f5cABZmm3!7ev`qua zR8pV&{zZfAuQ1AAHuKwG%58tj;M24~pd)&)O_{$2T~7jYcOLmnb~d?ED45jG%Z8@x zu2WNqYZO?46)QuLeM9l!cQ7{jEUrz$-t&VPb+i)IfaO)c$}^=1NnAxP?RrF{!5&)| z8;F`=ZeoD~O3Kf>Ti++~;+5>077O9iZ;+hg{rvv?e^`|H?q;dWyL01Rhg_(TFweh^ z1diBu`8Mt2w{Wu@-t~j$iSTC+g6}R{{}|#3+r4n7XzTpG?7a)?u?_q<2cov?kQ+>N zA}7sHkUwPpexAVPYf{~DcVInbptX2Gkb)Lmc2~Nz-`4D}5gpGem_b$1f(_?%+wWAR zndi6bnMHMDipv}4kNPrkD)+0q!FO*!Eam@ot>)hIoYg!lzX}e`lWl3Bi<&j~Hs^Wb zC4Zx4R6YSsBnXOCwmlU}Yp4ux^f8VA2o^524$yEMS(l!5WsyB`qVjy`Oa4{QbMn)Z z!0$Dmxx>Ui*72kLI!k^2)suc6n{Zo+6AO7Dw~zbVwRJ`zn=|^-!WDK#I%mJ}HTaI0 zjI)VrR|b{czxhA(N?sPo^b1#4p|j^*QLYeg1IO$GM8~6ayx>;6(QNjzdC!27tc0~&XVF!0QY*$W(fjSe zhQ2{|O5$oV^EgTcX1?y^d}sV<02*vKUOL@<#|A|~D)WkE_&cIDX~T%ztIf#gMm!R) zjfgWJ1TXE#^r8gGkBmR){(6A=aNk$%7|j}kTVDSDaC|uJAs2R*COG0He}fl#2{csI z!twKO<>a@^uA3hL8=G}huN;P9ryCzg9Wl1QID9m;FW+5;AX9{glV^2)?$^XEUsE~^@1Xw9P3E%5r^W7 zMvBEgOf+otPB)SEi5paq4Xv|zT|6>~dvt&0zxRt1A^Q|vU!$mLyf76$nUXSmk1H0} z@66R}P`&?1y~-?}K`Qh5#Wf#k_2+LS$6dn;wUgs(zeyYFt#0SXxE4r&%dv2506?jvwI7H2te?eXX948gCl&yQtKQ|`Oq z!Xvq>LanbI!{ruP)A*0H>+39v3=-2}LykQ|WReB@KU>V?chWynS>WA&g>$7MBGf{c zVdZwh?Tn+*nqXG7-CcL{zV{Kq9HjV{%297^14#Wk#!~#HY|i(j=*aFv8kdY_dyOV|`y;CZXi%WiF0?VC{T6LbSR%SlW=;dE-Erg(N;R=_>C(_tiSi^Ky54 zYp7~S8wB1=@txgBJ56q7x?n6&#k6ovRpvwOdlI84T}^m5_ma*Z`G?+{J>SX5$jU1H z?XFPSx_T6UZ{1O#e!Kh8_+dA7#vaYI_!H0CT1SfbQw57Mi}X}DN!7)-k&SUfJhj7f zz%=)rPqLqvZq3JxEY{3XbN8aClb?qNm5%V1-QA-i_r{s9@y=#gGs{i?HanMLQU41$ zhMcv4@Ms)zc#tOx3L{J-S8q?~hv;Ie6xM`DFbB6>(V*O)p37$AX%E$fBkq=dB*S~~ z)muJp)fwD#K+HZ;`pm{CC-;~;gNj}yTV=|~#{yv~)C{y1Q#5OaM;=;OSc_lAENCa80eMcZf~???m5~{_^fViJ55vzO0q7fo%ff=~760uPwoTODuUu#z z>XL9nG$CoAap&Uzq2}t9lzr>NZy7Vim;QF!P%x&>ZFc0Krh~Zl3NadC&DbHZ@qZ(>+J91#EW1he*fg#C*O3xSOL`8rxP4Xz?E1>N?N8H4&mQC z=Qb2hIF`xBt55Owk|3nSdpofofIkWy0%=e7?wZG3OClpRgR@7HH_^hjxzxJnd0~gL z3HyUbsVTufh#*thfmb?c>9K#uOe9AlT-@357hI%4XwKVDVv1goMuAD*vur;6Y)3z; zB7Q7g(=8%SIkqzycqqYqBd4Y^A{w3DZ$&}97hvnLDRibK!DRhD@kb)yM%pjZLwOs; zq|jR$FQiVM|3(_Z7RSCGU=-k$@u4GlLhFJ5?xYp| zyyE0CTb6`1=39rl$|iN!=|@SDjJ124Vu6geli!(MDd%VGoLjH*RFHV1Hac`W+{4$+ zn$BYsp%$C_M@9?ck=c$iQl0Csta|6H%elM84opu=1j7P#9-ipw zd4*fB*E1$@QOIO2Jlm=AaNr2~sSn>0+LHKWZ{a#G#NG4)DF}eQT~+eTiQwaI%FCgC zuGa}fnYMoJctI_KfbZrf8Og_S)D3Hd`Gfu?2HH~gxZZuFk2UQ}#uRAN{pxT@;-JLJ z$YQ<<-$A2{BXjPYrnvE2nk_)~s+1I_08g`*^m_ebWj@R7Xi=c3!~%B*5hV(I*$+Yw79!_VGCq zr0RZp3-sR;gnSJo%dt?g<3O&P*S9XYhbcAAt@36bQlh-z2XIAv8x40ezVE4lC8c#z zLe9I|l(Ce{?mSLuB?)6|sI((@#Iiq9^x}qpBo6*2rSd}o_f;Az*A7BdA*a1i6|tXA z=E{4kQ`&?T1VMwBki^zoMyc?gEbP!!pO!75KO+8tj10fn)EhFH`6|gx6Hqk&L8#9ldt(4>BiamKQ|*;i+*m~fUYC%k zPgVFCb+;-!qhj+#7SV9DR!{(n@oZEv%XMStUiX^W`3+gX5JptN7Hx$5L~~={$E&
W?-MwjF zd9|KhQGdO8ec4V?E}G_31m=OL2S@ukSmkrjz8JFN=RuUal_lh7B@0k)T0eXKqHS&O zBu~fIVg}e z_;xLJox$r<>&bEWYx;RHuPf&jZWQPHobZv-Po450jR)&pFY%KL-mtfXeX3?B!rT+DO-}S1E_hjg9v34sF^h{h$Xiz`fO9 zY4NavxBWff;cF=J-Zsd&jb-iO1vmP0u*v#jnt);{ZOTs7kE%Pl`~g3R6`oor>^pL3 zDmaLTud;497qjby80DY76`An>kp7PK$H52dg117qUvBpoBu;ZvZ;|#xTIVUZ9Nd@7 zsb}^@9vqMo*Me;GtdCV*DOZ@4ejDCGTJo3nf%x)qu^)nxKIyhDyC`a(AXvr=OSf~7 zQ28^ge6egJStJr!Cq3hMbIG>Xw$wgE-dGovRr^ZQw-1z`ckka#8QW@o^6~raXTSVy z`{RG*58DTyyw?tP58D!L%Ms%k=egn+%;_wMXQ;6hFj}>#-}P{BQH;29|Yk=~msNln<`xMLcA3Y~v(;wOAF;!Qpe zsPI`&Kqj(=7<>cgR2R$e8DwO>^S1Oyh>2I(ujMB&%Rk&w9^@%wF2js)Lofe{{SCS3 z`4Lspnd)zcne<1vcLk3BP=mfH=$@8x#$C6Y;RK_X{Pi#snDVdOd;VsE*e(rcJ!~%q zjyjO6XVIxqU3$|_+NKfN{c%0DkGM8F!}jrRs7-y8KE${jsAgcw)bjFm}KA zS{Clz%n{Xp9H`>q8o5+TfpOyX;pN3NMv(i`Zynmy95;7f-xXouF8a$13 ziYOJM%F8_*!@@Pp^Vxt-zn_FGk2*kZdM2K+GJhUw%(xC&%*%P0{Y0MqyN5;`Kh?OU z#zJnIUR{umbfN6;-Jq8q%Om4{Qmk=6m8auNX?E69a~VcMu+TAx@kYHU{3TbeQ9H*r z9q0MZFU@`qw6AVve&>ez-kt|sd`WCN_6Sat`PQ7nu$>pR?gcHW+$9Gg5`Ia);fK*1I(fna%+& z4Wk~FdvwB@erQVK7#Z)`h~}0(vcc%I<{p`UFji*%?q}XS(CvX>KjXHwwcEb@>M`~s z%8GHe>oUl^xLrF7UehwoPKU`=y=d*h!@KR<-}<s_kvrO24Hz>!CcP?`!Jk=(e3>ije{!cCkfr*kLnS_X zaKv{8Ph`F96$Z0)VA$L3$8-P1kN&550U*MyAc+9;5ZY8MBe+nDA1QC3P!SYPef-X&DReo3UM3JJLo1?HBaXxcSsE7#sH+s#t+O!b zH3j7)OdKX5%}LuuHgSmteKfA>DfCNEf7GI08d?7836c_^a<`(HFZ^+GT%{gHlpd=_R<}bLygI;Kwq~HA0c%7Z$JC{*y=WHC({Enh)%4G0%J<|X$00+x}X$){IGCJmoK%x7Wp*O;UH z!Yo{+CwxMsLw{Y1ZiX7a`3H}af=ZU37*t{|?19{XtnlfyOh()|CcmZ?=z78d)?Eh| z7wF2&sAJ;t2fJ-`0xP!)M|csz;lI-`I*=&b(DPXg4CQ1P3?Pa&f%u{nI#rKD_)xFY zBn(Pkg_o+Xp=v--W_fM>O3EQud$NM1=(bjL2J}d={2ox{m-0?NOWnR=01STu(q~-{ zAH{8br=v2dq-8kMQ^gUjZnG6R@Cc@-y^3FVGdPgjMiVIWt*bh~l+FC4uj4fRH&qXp ze{i_q(HS_ERY7xvDYgMsx8);k9PyMP@s_)~VW##(kE0FGu-g_`PJ-=0nFfqBD&A0q z^I9HkOK0kzfNRH#t}G-t_^eNH?B!@X6MlMXvhYpmC>> zhlw2!U-*VGboCRI{CCGn)4!aW7RtRqT&ixLY=Cxmonti42;1GS0S<0{W&_l)-EF9w z_O3dK-L5A7H(~HHc~p&dV<`KH^c9rHkllpq;b)XEDM!-Nshml>x1R-ibp0JFaWUmL zPQCnM9(hr(P-QOYq;weWPJqqZh%;nP6{OcxxtFSRdw&oZI0OYk`%pce12=X1pM-G$ z-Vt^+GESZQtaxa%1&;U*I`(3%=HBnqyyUQhrRXh1l$emPk;T}_VlY~?P%YpbT8T~COP}N zNBPG1>RL_IPtT6q!}|{z)6FvpIckR|$BcI_+7gopZ=laHN%YxrpZ9l6?HbO}!4cyd zH{KlokS1(*F~D$6xW%iRd30mI3AfCi$d!5Q?dD{Wf^ra_5Pulp_)~9haMoBF<*ZX* z@VP)SHXIC}1q32zgT-d2msQ4a;>N-FHQp}2lqtF!%5LPGK(Dt(-AMBJ=iU8-wzG4R z4fvxY^@}_?efQ3t_O18dZy$d6ekRIpcIQPbaQKwGw=mb%*OLK=wVBv+&F8Z{mwEA*Go@4~4GAk|hblph~0QfAn8(?mFIlrWSuTCX+@s0p$Y)=VFAnbIt3@@><)xzus2-Mz<5ryZ744##+0A z-Ye*JJnABYn}Ie_*%VAV<^&-?zx~=zKS@_q0H$e~y5K`-#>_SDrOePX4y1KON+?w{)mW7NPA3b{FZaulc6yZRFGRh+pB0Kgvs{8kQk#(FJm29mt%QFdh&$ zh&Sk5lyk$;!xhF;29;fJEl8X8$RY9Mi8v$2z}fL-4&IngZ~5oYhk1t9+B7Dg@Wi-T z`{(O@Jt!?b%RCHadX=jHI~}O$_nm26@N^->Nh8mZ_3N|X_+?T z(WAT6{gQpZJ^A$2ZaeU~^9$Zh3F!ynrl0^;vs z3%o6V+$lZWWTHi94OIy6N7LD+7Bt0ZQ!*beB9fkcnBbu`ah;GMpTDfb0L~M@N7l$kQmUpDtaW0_}z5@a(oPL>#0w>SQpFIH#VBz z)7+pM{IHrF*IHm31+HOZaCvU499#vcCrvEX5;DI2scb3*TKVYVCF5YTF$R>=1>`1&pOCA_CSLO2dgDQ_qXW{jjYmE$gRD)# zKgR-D7xAzaJoj*!eboNWKKZwu^tF(MMwgs=Y5fetjHCFInCZKoGsYM8tL9a|^vx2^ z_}8(8i^sOr)>F%iprJSYZR&p=a5Iu?LyiUP8&A(Y{B#}}$l^6L`8gZIJ9)O>@-U$b zuZoz9m-5jz*D~JN+&~_vZ319Jb8Kz6 zG48vjOkGtWy?|7>x;?@47b%~9qt%;n5e{^ z+Ji9evv>?u8yO?L3ak>uVc1gTUU`~^NyO`5huEEqt{I%>J?2v3^E)6Rj2nFLk-CuA zM)WX&l+l>~NuO@YLaHc75^$tWH)vktdqJcH^K9k zw1?60fspy9x{jUx_zOj6foAhgTKi?o zCirEZ1?V}aoxHd>rg1x?Pr!|_r=K_S;MeBrdi#^V@vpXj^3VQx`^C@pq35K1`o)*+ z2jBh(eDm$I-+bDB{^Nhy?yPOJk3RmmEv~F)^YOrMV(;&#f5>Y=)C=3<8j}^B9!!%i zUo5UV2i>^a=VhKoKjC=Pa&UZS|Cc==_@Hhc?aLq=rhtglXp$3IM)_($(cC{2X($g$-zw+#Uj`!hx4)|f0_n4GWf9=zat6w&gk&D_Wu222>+Lf;Yc(B^JuZJ2lhIIa5nywW%MsVLG^L^?j zIPH4RPJF=(TD4pE2%9lJ;e3J3^?p*88H^QbzboTJXjTUORNflnI@4T1hS!*vZ7`m@ zd*?nht!M1B&s=_=@w{Uf$6MKSPrvUt8r+V_6yUI<=NDx?4CgS1dtHZ&@V69By=dGM zsW8C;?aVVh!0h;j{5a0hCkOnAjb^Lbx@a(>{PNB_Lq==hDiFD}%#;uqJJ+lTLc1m8BZ(JW8BC}8{L z_@UNJte9kyG&6t4C;L&p@Stv%;|`BZ2)EM7hcudV1N+IaO82f3R(6nqfby@s zzjo%bp{me(=!i{qo+c0^nI>eY684h>&DebqkuYPT_sZ1*;oQ9k{@(3Y4dY#`U0 z8>=Yl7j5?X=4RyH@^wt?r#co0Ydam;h1NR2p}AytR)vKMuKDXcwG?7X`-w@OqLkHH z5MTJO4(K10v*p}wJ)Zk7{^S42k1%~4*|8Do!efQYgoz50>aAemJUWRaPNdgvfVhcZ zH2fqq<=YEeWzgJw)gRBqmkxayEL9~fMH7Ro1ojM~+Yq>5<)kSMQ`{_Y^w(#pfX0|E zQfZL}?LB>*(&3Gc#|*2_OaPo5`mIAJ+41Hll?!D6d1K?04idZ7Pt0|?Tr<^9q8n_c^sNygn1F9y=cZK^c!I0lSZZwdSssAYB5ZjP4_i|1#OVwt&}kt^ps2J zW`K~zAM=S{$~Bp$6NujFi=dbn^w{<5CiO8XN&)H~u9mq7#)z(2Yj{B(+;B5^U$7T! zjA`;v=(vv?{kN3^lrv{H^qXajU!JV8HOBollU7<#*@InX^{_T7uNl<4uqH3lNb6U8 z4OyYHvY=cAdbT=gkyFj)c^Z#$q?1S#RQwVF9Uf07s$G8rA4!8B9;sq;>x(F*V+Y|7hw#cAj? zy1D}n2wbQe{srL9Z-t2-KV7xo1nYreEJNdB3#tnLCS1o5cFsCs7GG}TqpV*--Oks| zx;uk=*8H{5!`%q)27IMI{B=_=d&$S!pc6eNfZgt|F>GB5m$p)MN*rUL@Ww5MuHl@d ze~)}c27>=>pr;D_CNZo-`J+D4m!2+rLP>!D2d2&)bckdC?@IqBROqQL828=rYoK3{ zGEdauR`tON#Apw$L%CHQ{~Abp7DLzGM(0~#(B=ffHbv`TB@mK=;%4dwns}m(^gCps zQvsyD%J{D%BL&kFy(ur>C_f|pcZVzf4uFpaRbEEA%1dYkltcX;XOv4|QZ@$FPI|Nc zwEz-BY2QfqfR6A+`AtRPS}Hg4B2>V7La_RW1R+0fhAo&n&-jo0Puzw+BMh@({w8(a zxTa5>3CfXynfAgc>QNGEn7?}ct9(_giQfD38t;iq+6;?!4Jw`MH~=?@i<}Of9{YbC z3E#=hPvIR7*3I62cAkuJ*0db2!?bLA{m%qmw~F9+VA5`kpc6yMaCytyO5fp!W37A( z8u@jIQaAW&;XdO##*(aT(zD3qxS6SRy%Xj!X*CNpL{2`{2xBL zpSI!UmYbW52b`obcJsM&pUh(hlc$6|NaOd1U98VWw+lm*#}bnbH?UV$yyfmqST|uV znopY5cZ)qec@^4gfajAeWwDb+^c#DL1;XioD zmqs_p#G|SWk71`BBcfwBc_~J97Z&vM=Chk->uWyezSh3;$w%$`KlpC@?)N@vkKTXO z7H(hWrs`R4;`@x-Q9C%?%Z*ky)|4-4+t{d$KsWwP&rJ-|%HarjTDPNN^@SUo{n>KD zxWOYIyy+{*x|B0`b&rgo?%>Ul@ zes24J_1W**&;R0=?I%C}Nqg}ecb`Ouhj*cS^WOcowzSgj-F=uvE;5_f%-m4SW;Yd1 znZJ0qBoA}mzIof&g|UVCH8a&t zjt_FP)mvMeZt9a6Ulfp6z6x=2e$@Q@!p8b~J3QQPJG;AWh5EJXGvI5;5cSf9uFd;* zh+A_m5Qk@ZfkAG;yDVB*7x3~k6m!fst(S>ohVYYz%7x}hy9y61BVUZL-B;F>K@R{Z zM|rl;RLE$VtDh97d5LV|w}9L!%k`Z@{N~{;a%R(Hxy~a)ZWQ`0L^rynRsKqkbZJ%w zUBu9ScIrl|^|IueY!KE%K2p$XF_OAqJQrnr^3wX?O>P2GH;*Vg_)dK`{j@vKb$o1F zF6REBIv_=BoiNX5V6tt9-bTytk*?Z+n(+>YkgL$OCPse!kYQ>P?Kr zW$MFK+xPH~Z8NDZ%-?EvD9g<|>)?Hz!&EP}cH7qOKIQ*9hhBUJ{ub&qPk1(D>zuaw zj6AsEX?>QDF32Nq{lJK2aB=D31KV56h_a`dnYUSByyj;Gws&^3faK<~ZKAJI__+yF zNuOeUgP}IBzRYlS+xdT=2MgBm6nNp!qN%xpAbF%1m3&!+UemUn!bjH(JaB*;WvIlM zM;*KsrOfyVgM62h@;gK2lPGBqkPBrBS#*w>wu40C1cI|H=$%mxhq(do!KqvpfCOSE zPv*m$ICt;f$*q6C@o)ROxCXSmK{92bxcpP+8Ezz@d*NMU5#!!p)a+xhg3@)y3^uG+^t-?t>fMbfW6 za5{EKJr)D_rNza1dPAIO7qbX$9rM*kv4GDkLa%9=zryF3jd;}IMfg%LC)&S43+nimi3)A@f2@P>8Z-R#n+#JeHE%FLVcx*YHgih0vP7i2ou5aRh+3s&{w2$ed-+ynT zedka9mDZM_`GW8+(}31~W`J26vMMO|ZOb*7%>TL^TtnaUdJV8!QGvo#{7YL9-M=!ui?YJ0ud0_{ zMF()J#v@6+6_S4U{418uhTyzVc4N9ywzTAjzw!6>-hdS2TX0J9oOU&LfCEHZHpbh*oL{mm+1Bx zC@nR|pdF&k%Dg0LR~ph&p^66UP5OrTRZz!OIRNY0Hgg-tUDD-vkrtQwRpTbd$3EkK zd~}k-a2IFw=@%$%3o8rl@BH0=0~!w6&wutM`DtzK-g^7ahaa`mL&l{0`|UTs`W54Y zz4q}(A7ngbUKbdrZohch_HbWe>?}`w3CF%!e)+kq&`i7RI7hm(aY+9v?Y8+T8}fMp z-xOE#qY4;EP5v7W4Ekl$u)7yb*Iz2^@GJg}P;t?(qIVA+N`5-p1giK6@Owa)|Kt#O zqaI9QkX&hL!7!Td#Ftc<_&Dk7{-7)D15A^h>@ykA-!m4_EGiygzG;NDg z4!D(BFX=;)V3&H8mHobD%h;8%W+Ichl%;(zx*Pt^MJ2`Yh;xQKNltkC_vM9+$d9iP zId1SEi*-R6GrnWQyc~rk`)`cL94BN9$Xt3J{@JJIpf`2QmyA>%^0eNqF0U~^V&1g0 z!TeyExikK5S~EZv&+hJFd-?2T+uqti_T5XMPK8eA_>NsmuRT6xrFf-mNBQD&1gpky zu6t+|EJ^1KIbeumSSvVM+t9wFdV?FT>jUi;qnzSF*s`s7>ISy}^mO8#8;a6aUG(fPawIF`Wa#f0^Z z)%NJ&!}iH1-)VpM@BZ!fH~;3}Z2#83`9ElX@CSdbJ$Uay^IP&BvQ8fkjkQKo`+Av= zn-?=px!IO02L+YG`YXuVApft2{A)*cvlf5{S~({wKeI}_1{YiEOWA|mAaSs43|`^o zy>!Rdc|9_7a`3SDC7G>P z!u=G(Q4T1dogTHF=g-@VXUsje_u4N$`=ad}GY6*GdvwpwS1f0qXMI|u4r%scfa@{7 zLb$xTL3+%2S3HE{e3Xb3(md^V#7q9kWK{S*!^ra2d;`p!{s~(UK6fZS7evm7Ur)oh zrThfIfBf(Nh{E|;VX^^95q0Kxddy0k6Lmo}MOa8t!Ph&7xZyAgx`PGdgiOZT9ZE($ zePO7&kzglUjVA?b$#xid$>z7=#G~+}Ll>tLGXtRF9|J-q9@4Pkf+W*~kO+Pj5k2_< z&8a}r&3`S>*s1F`oirgWxLMI`EI{VJjjdU&sNtJ&1#?p<2TyQe0wsO4n+falD^@*k zKty1{t-o=y;7LP)m3-)IzA8UvRKalUp7f}|8Gs=8e$&(wEiP#0l$bPA391~`dwx<@ zpprhqRSx2SlX;9GzZJ0i2}2-i`F#SW0Gb;Xt!t4k37i*r@SuKsoR-2 zn6VsPT$DdKwPm?d*2%Aama9C0SKcCf-A)}DA}0)j7=(#K8L7^y=&)S;O=BN|r2~}Y zvFJz1YjyBf*_FSZb}mQ=d?a3X^BZC#va&V82B+qsOv~hKWG%duzSJyq`2-{?WsDY9 z6&K$Q`k)dqKZVDIDXp?SWn_NUqmYCVUe;9fFtmwrRmLaJ>a*>T}_>B}o|K#Kqv@ zlQz|mFJYROf}s;;w5=URjFO>lc^4!-aRU?nbhv5nkUllI@mCE#za#t<1~D^w@bvnZ zcA)#cO(ylyed5>olLe5O@b9AA_$F7@g18xe#x(=l>uztCk3`O&;YW~Z`&BR-b!wh* z49wa!ca^f8@tc%&cMI*(YuLEJo)HGW!HXFbQ@}6Y#veqJn@QMSt`!Iv9;MsPm!JKZ zp!*~;Fzj7PJ8{zX#tHkJZcJQ7qg$ZTGmPWLINQfifH7N;F;H(3&;05T$HXZp`@6xs z5_VGF{EBcQ57RQ8#T9ScO*|+0z~Aw6HbSnP*fJtx%;DI>Q_XHPI>vJn?YHo@UcPFF zyC>~;pFeBA{q56Cf@?Ce*uMSoCv9hMyS?}55fja&_UhG6#&G9MIvf$X$&qJ}!GFjE z#lxX)7FisQ@l12U%{V3+d14q{H%sZ{>?D(HH)m|F6jo3>z;+UbMxg!@g-)*W+e~V;9-oQpGw@uOWce#Ov|xf@iC(_h_VbaE{7=N2^qto zyBJYTmfy!dBX7&lzR)(;*4qaU@3)WNdkp?2rFS0@7f+Bf$JWW~W#(;ERFiLgd(T(D>^OjF4$X~{w z7d}T#9&!kPNvDeuSwwJ*oicLbxRU(3SZEkB2`}mix&Q_1W@|mSTpOS+9{9Yon~M)V zxYzFAznA9$7gm?s^2SEHV*Gz=aj7j3CWpY7&)5f=m)z1P|MEEtK8tI}mu={n>k&Tr zf+w#pYSAKK2}JDSkHVlV`d!CknsE6E-)M_(Um=6%SMA(q+Yb)gSD$^}e*EKq(f;hu ze%!wN?dR>q^Q{P6-2mBa4<5Xin@uTiWlGwhVIKP3jC4`c8>s4@$antaqKSO=AdP%m zF&}PR63&Gy_kpha)^L> zLpgeIl`j`Y^pj_%KNi(pG#PZ|l>;!>0z7G1FOZ2G3`v6Eq;AjMs$1>S zZJm%{ZvZX8-#W;2)lN?iQZK9vW0BJ*@Leo7Kc=Z%mVYhe=Npm7b!%TdRP`*Z9~>UF zSKF^rw#HrYMFwcPg7-eZ?o;zN*OuY>=1SW?+-=*hj@kwNiw6ZBJ-CZJZj!H)b^!h_ zUu?DQUFtD(+ctZn#Bt>*SvfiJ<%5fMbaqU5%7e0iUGPs_w7qK{X0VO44M2YLdH`)1 z=|d8-Qbl=mOgK1ma&*?VpxrjkXXO2^FImmPp@j%~_;n7i7NDokYvZB3+dqPZux=uo zuIBhFWAMnbv~9Dj<*99f8}P654Ol`F#y-n$u?t7dZJ19gYjq zr`ZkxmYaRIZDpIkqEA{$x#qA0dSq5%1s3MbqX*{d^$%}GS(ja7IzPK=`v-do>zE=p z4(Apr_k)BpHw)l=Oz*4kBln!`0L+Yf0=84O*B zPWfm?1}#_3$^K*mr^31}z@16czaJ)6@Ks@w{R~N{t zJ_?gE?+`N6;GpoU8Y6UljNh#viCgYjd&2EHHsOYg{GnbXrYO4qyVy1b!wp4X!$);P zCJG&r2~>jmqqW+dxYbXGVV-q+3j{r3%=2toX*ODfyD2B@B2ACwz{&Og9aY3{l(9J)?U;8K6w9O+FSPl z=9w?-F|Tyr!JFY$G5Y}9*t%U^<3r~L@A&ems5T1OSA5-|KUjOL1CT7_f4EUt<()J*!5x-jDgT#Fj${&>{5a~7X48)4SH zL$A~JB-hCrd6xciLXdiL)4UDH-i+;3=z#y4QIiXsrlWi!r!|iAkgfhzZ?cCH_mX4d z*wHKC+86nQn;zkZ$t}%!9DSp=sJoxKa#!m)1AFygG>&nvW z%bEu>DM0KQD_oppT;TZH@o;nh!F5i*d9S=N9`IEw!EuA1KH8`6KRi4}-gny$@_Tf^ z82aQiV_UKg-OPFOi895mk5{XL!CpBo`=KE`wZan?W>ia7b#@%`sXaYpobkni^H%@i zpHKE@p8y=G^M)my`F1>&%8GX`CAgo4RlIAAQ{TIPw|(&FZu{ip_u3!)@CWTX-~Ob1 z^3jLw!;euPe$d|k;Jx-9b@8M3(H}i(pM2-r?c3k^q&;As{>XJ5=Tgie>cy<{tXX() z{SW`z58J=}Z~r^(U;mqbtNqE}_}AKZe(?SF{)gXc%f1N77*oD@A;EFGWpCZQeM@=h zG6;X9F)|i8l$T~hH<^V)?1aabp9pDe@RV@QBZL%08Q*|J30Cyty^bS^!b#ojC{#k# z=V~l75Am?n#UW$Iv$WlZ%p3Ohw%aMP;bHCHeEPfgrBDpYql^>Dfd}SVSazU7 z%L%VM_~ExBbCZQeR6DH+(Prd`@R1YobVy!1Ok_;@;!qk6{LJJ_Am3R`g(vaGob_-Q zT(ILS>k?*^-2}p4RM1wvD8&yG;QudefBvlLvgP+dS$m$xsygEx`}S40y44brC9tq; z%d!c;Se}9i2qGZ-!;P@;KLED*CkX!tKNC0tgCM{W2n}JWWy!5>^>C+i&$(x+IyDb_ zp7$<3pYM9|ec!#S>YOWN*53JMJ~^yhxpL*o@yVyhF55_k4ee*(w{a)U2*S&Pq7NeS zQ1#!-SCjv07xI|El94G-B-#FW4(&!mZWufmKcbrKj8A$an5CYUcS?R6Jmi}WWGDmf zqE$0N=cI80kUxRnQF-(kr+FQQ6lDPuarF~bOm|3Z4;!9^8w&`NV7*q^g#rJFzQG=y zKp#NV$%AlgPe$6#d%aDXim#kcLeH?<1wm()Jza;_p1Z016aH3Xxa|{13Go{YKX0_( zU_zgUpTU%Mv+NbosM-bkjbSqW;PaYokn$H>4L_O2XoD%g3EQI94-u;V#%U+wjr`th zD&bIn{W+OV+7mE=a}3g5$;ZYx|gRKV0SUbzl@Ha5e>v=~4~YsUslm zdHj5xc42t#@!twJ%F+LzUkuSbiDkI{H%)ItJ)DU4w^vStu+#VI2_10H$4x&;+U!pwlS_As(Z4p>tO|PD>E*@M#Oq04R(|-0oIq5HCDt@O8P2y zT_HH+U5Ltm)D8+N&UF*$`~%8=Y>O*{bxA4~8E8N(`#s;dGobRuby<@%!yJ!gLWwOd z-=y2$*=>&>KW|@t@m2f9FTZGyAM9m9vbxN~VQnLu$V)3r6#rFw@bC$f62EVJ(#~n- zzP&m#yO3|+pSfTI4=&qzTV7mdNOa!jZn=>*o$JQ3;X|IMC$)*`245DUpv$mX`h6$& ze!`?S_BGWOCVAx8fguL4q7;_kFj-QLSPT%D33PvfHcV&Q1=QXw7A^e{pfq{05(I zXlmS4l$Z4ea=lS}34h&a^1IB+n`7@RBwC+j>UZ{iE7FBTC#Y_6x%sS}MKk25msdLb zoBz22n!@$X6ysQC-QFw^ejrqi$CQs+u&8jGfH>TES!B}ahN4EAty}Z)K!D%4U)yj4 zf0nW6t+wEs|Kus(@`lurZ<{h)mIh=;8hgDMXPQax)Rc=e#?m-eWc*G1S8jVkqq0$R z49e+*nOK7Llf&1Hy^ZHv+9w{cI6iHAPoB5E=g-^Dlc(*|k3VZ4|Lm9T@mJ4C&p2?-SV6{k$T7Z!*BpVUO}I8BylKn zj`6KeaTFeMz~4ob;6ycLqn$q&7_H~z&%DZZnRZIKU2s%31?9)ZJQx0W3O?(|I4-93 z&AhY$!i)?EI1@w<^yk;5Ge@@Fb_UGv<_yon+1U1ZOayZGkYJj)9U*{Iz+IBw5(_o+X4 zMFm+8mlw4ddv^W`y4@s~KZc=`(P4AQ%hHkyr*-hkyyVw8vZ&sNh=BDuKaYH&qv{%D z+Vc+PhBD&NBQCIj*NmfymPx*ZvYK2dlJhVi0%ELp~X>SYB<=_y;*<&7=U4p|Sj z{bd}*edXN5^ zTgY-f@A>SJhGlthB7PtkU>CTlr^U-}#xRrDhM+ToivvC_Us>ZXVb zhVip)6gvNjV?7d2*?^CA%MN;F@31|8`LgYx*VG+;i`VP&=jXj~!M06X!9Mp5R$TJ6 z>OeX))vxq=O_uhR5=6w(5f$HgwsTC|tZmP>A+3?mv`O_#$!piC)wal=HshfY6F8>u zP?Gvfy?+j0&Cj?VfYIG`2oz9)HJme84h?z``{ zGx}g!BzbtaNO`mDb?b?Fxj+Uzlf8A7D(-d5{ugAS zO{SOgdVCsGSUvVL9<&a8K}aebc^!x2e*^TX@N)Oc&vSlUzrt227?enYc>&U4c#@H4 z2@}uYjF)X!M^8HZj>=U=9Fxx@Gf~fFj5=v|_oV-u+}#ZL{JZA=HuL|V`g`5Z-Dq*e*TCbC?x(w{}!*+Y2F-DbAg!|`g!^j``wJY z86#*%f0`bo(T5|QH;kwH_=$0cy3@nDX4S7#H}lCf`Y1}yJ;dvnr^XciNS2$L{av=BiIV{$>02cRp-ujAtE(+}*lE-WTnYkAB%s=o8o0))}MJt!MV{ zbMVG_UivTS_VYB^M1+1fXh(dYP1CjHhO%7>_YF@iST;2=`Ngi;OiLvn|t?du6%@HQhK}WZdj6U(TtWk7oWV zh2V7@q>PbE#^V`7F=jl@rtV&@_&;D=d%!%~&sQ8W_Vyd=?W)>o4kJ^14^ex)eda~- z20qc0M_iEKk-zXyUe!D%9JH*7R({S~1;cCu-hMU%&CwI&k%JD?9(b_KetX|C&$rF> z)ploVGjI35fA>y%@6PQU48Faw*536E{SV%4w{LH^t@ZV`xwetD2-~!J$Z@@GXxv0@ zwI#+$>&qMME_m`S`tNurO)89C7o zX{(gegHE+ZqONxgr|kx|2P_ujmvJo&P!=L%q)S_p2;89rewO%Q$0|N`h~My=wYTv9 zx=Coy1C!5Si+_|)$49;Cjmx-QmpVSBP}uyr)^g!n_RQyz6>rvaZ0P#dz57qvR}Y@I zUFPI7$ecGmP9vXo^O<7?pIHtEQj#S)8NF&da4l+^`g!l&+wJb%yKQNCDfR3+P-Fq! z=3yJx1RPka51k`=tA&TNJeYoRwA*%G>@XL3)*ioj**?Dapgq|=qaC!iy|&QS(ZLL9 zsrwn`kaKO#H~wiOtE=mbifODX^MaIb_a&Dd*Cdsen{PJ$BKX?iq(TSFmpX?nO zApdVb)q(mmPw|@s!*e)%B7OWuQC~fn{@?zG|Mri_?SrZ*j0vcmOa!H(g96B3gbfC| zW*D4_;sCx{X=t)?hf1-Gy?%tylFBQgNN0&q^u}tXhLAxx^QLzy9E#|0%P0hNwoHNq z^NzC+$B3GN&HU>`*x)Cd6(gu_+D?d3U|7EJ~Onw|dGqLq5 z$-23~g%}mS3c^K5l`DfRwJ2q_e6Lhc7z?EGmTuin1{AmuH{?m`2&41|!h|YYKzBDo zJsKtHlg_%RvdBZ7mhIN7bMk|B-1!s$aZ}G$WQ3sn!z;|yB5PTW)a1t$g^0GqSG3e>?vQt2Wp14e|BDF7qa^zST9l%{ZO|^mH93PhqYu{V>v} zM-WWLrJy~|$Pe3iNiX`mdXDRdgJdk9K5InLP>@^aPArh5H&XyqgZ! z)1)q~hnvvL)6EEm{J#b&31w3kTJ{@%!94b~dd1Pht!JReM;?v+x)HGRSb6%8`nyJFmz!^f(jOXW8jSif zJndoEO@}%hV?JZQf;Z(K?o5~-j$0h|y+OlIZx6pBgoe;DfJsM2nqy(~0HoV2%x6Tu zM$aMdNqily#TBrPo*oex(J|n7o6)KtexJI<8(8m3zWNPne-pS$iK^8_&9EV z$sQh6yz4mhvsyK|iY*5z=_5#<)6Fs$cF2s{I2XG@p@{NLl&~cc}SgmE5Gj7U(2sa^Xyre#L+~8z06Hxga zR@At-w?ETod=VUuYlm@|L$KsAjH`!yTwrnn<_34J{Bwbfh~hI}c{ZvaK)0VqaFfza z2VY2ag8t`z`-kl}e)zq%yzCWj7dff#cX8belqX)pmdP{4@_TwqE6Z(_3I4)Tz4cg! zD?E`7%~>4tO~;FDaCw!e6p);Hs>MAw!!A!R!rQ!QP1(KH(01cN4i^`)!R0~;lRg($ zlmq!^8M640jnO(N;O7~#s7NN>XfVHk%q+~eMdZOZHRN4ggKt{j;PHFnvu%}n_HEgz zX|H@P-lk7RUfoDj2H;@kITVRGj`buBj@OE>enZj&(56dL>x=PpAA{E$9Hk$Opp|hU zb?BVMI=1TTI>_^OevG{B9ke|r%)4LRZ;!ut+&=yJm+kSxC+&)G(=SmUx3{+1*7im_ zgMVI$?FPGVe0cS@JW<4)Gq}+&pDoGkJaZOk_gOjvAT)pHOfC-0FRUW2l$Ss2&o|6H zNVBlC(3Y2$s5_qF{TcOlc;cG}7j1{Ur{VYJ=2qK3+Hbor58&5XJ4F^Y)|T7ax(ljj zZEts%yj~$M$N6l*=JtAf`s8u*`uO+mypu(ns!!@0+r9`b5E(1>4T}!&)LCF1$Z&pX zzg3&6mNRo}W$co&A++k16qP?^UvNRlxnt%|hGjfnZO%nF@n?g-^9j4!q-~mXps}s= zx~&Bi>o>V1nH%5At!dm0Hl6h$eQpl=_P>5Ee1$~Hk@;J0^r_8X!>xnS^^5~}AcDfs ze$Le*6*g6KRQ0EM+upJ;ME-6pici0O07P2-jEQq#-&*(W4G(ud^ag=^1C{cfQ6AmudN-73 zXZ<|EEONaPy=1;Aw>rBj2fyN@Wt*R`{Ou#8#ZA;yALTejwolKlvHh*&KF}SsE>S^sD2^ zS@;UW?bG%S_S(yx=k3|EXE~5#-N|?F3Oqip&3F_G3fs^Z=#m!AIXB{3f0^-n+J zJ0A~CH;ipCZfwUd#?6oVP5@#2It&?fzn-p}0phYvhst945E{V}7oo3zKa&eRvKkd05TcOB}|i+vxA`-jna(&+}#d~~Te zJfsP|pgr87f5R-zF65f@MauGP-}`=h_~=3Vdq02C7Te4A=l|TFrq8`-2RqN&&hwr2 z#r@CQ;n89H-mm?jt*or2wYl*$!?@y){@};$i!VNBUUa7pyE3Nn8f^P<=ajx>?U*z- z1W5yvrn*3t$xXI>(CdaZhM_!!pANT#k+EsiO3rz zvS4`CakV{7hYJ7IOyMrX9VXph2R+}L=FKn}((|u;F%$XcHl7bfeOP z01Ly$yKkC%K)|`PZEN1c)gIuSTgn*D8;-oaP1(!lCiAw%;2=NnE8phjMr`KDj0x({ zyoc|P+bKpr$DNkIye*b<;M|}%RpU>`A!)~$^d`j50Y_|koloz<-s~;Glhg&z{SyFC zz%%U_|9n;p`ed7**IMu}J#_oYttiW8}o8aW7^HmR~d(dKmFb@E!ix!#VZi0XNHu;gq zx4!#9`;FiDQ|&i?^EcX^JGTjE&Z4eLnLI3R+q8WkDil3qXLTkX&daRd$k{MP8q3&V zt0xJ{qiEyFFaO!I(!(^}$h<(eZS#ZEJX|p9K>-7YK4Zuyb>Z^}r@Eqaht=h6D0#`? ziZML0;inoM8}Bpk+Cv{cd%52}`}}@;%6PHH%nQ)2+!jwt4_ChV>{YZNSvig*%)=y$ zOLOgmcW$?L-u2~>?Z~a`D!DxjeoAA#SV8|GpV4jVbl2eg1i`_{Jj zOM;)#irzy~GcK3@FwIh>W$QE23kkAr&6kkb1us+exNG1IkydDk>28FT=X5Obw;2+jhg{7SVJhl-Iy19_Db$rNxQ4hn z#mWoQEl?hH2QJz&;MPC*|LR`@xz1l9c6s>}Ze0Jh3o}?gdr6~hnk!?GZFaTp6CCu_t z&gAP9n8fOdyoF`?cyJI@C;bpIY&(Wr`=_6cpE9C?!9ha9M>Sk=$AvXqe?iA!Mt*`7 zBo7Avp`6t?ElanZtP<$eQ4$@$k_`*1yIHsT&=IKhOk0ax45+~O$_e-@H~8TlibQ1s zbHn=!I+x%y9e;u(6{N<>*KEvdh@-P)-GEVo_?4{Vj(^F8akAiQ-3&0=^bcO$0Pf_V zcX*B zp)$r_26+_=T;-=+)d7Zqm$aRnQW~%n56!zWQi4n`rPpAfzn8D4OWf`pLB$OX244%e zT$8xF`$m2vq&IAps2x`uo3*9$!XZ}Wdc z{$0+xA&O;7Sw{Z4^+QH5qw?dSk>5!(AUGa(f$fb;$DZ2f3Sz=s=rC-;lCn`@Bp4bVZU{_N%vb|lk*PH^2yQx7EkL8jQsB z8gx82;kAfwfr>ZQU*81dc+uNr|K_BGr9T;<^ZYe~N61l^Nnj640^#Pb;5F7hK1J-zueu;X4wlSi(BktPyfRNW{+$!}mQ1cm^etj& zbrV^9Zi;20ZQp~lRR^y^XEuX~U-f1b`}-Par^@N)&reU<(cWP@+}m%5JA3W;~ZIW!Q}l9KB$8c3$qc!#}6O17u4O87f(|j>9u{FoE=l{S9L4H!6EhL_0k>| zpN^Sm^?W0yZ-gYm5X35Ubz@!p0*8T&%++_H;Y{OpHc5=vf%SThSi$q4Q%&W44a=xo@sL6 z%j7?JZAV~oW88Vo)J4)bSdyp4$^6!YfX^nA%6dDX{G)f_Z4L=g-b&}YLv+x2`m_4`?;7Z0ITO9WQ)PKt*>nmHq(yJPTGs-yKV1)IVEB0kt~)@ z*FsG;{o&;+@XRl`$cG-FJ+H3#35dnubaUJ{;E#@cv#Hw8^1@;cNq90T2lJ^A=jhpR zx37KWH$B0v|I{31QCCi*56;`p&Wq@2Zx)!IT1eS4KLaciQVCz2|CP?_^{Z{48lo(v z?O$HuHc#iqM#;STl?Oi1kGNe=IPpql%IVxMXaF~Qb6^X)1m9@)(2n@?=9aov3godx z0+;PjeWo1e`dG@hPyMKawbd2cz74j&=cf5_>hI)~J`S0vTUxvc#KUn*$;(AFE9dy+ ztR0~1$+*qIdp{}Rx`wi*{0ODvi~?^chde4!PAyyUsTLLS!?xaYEo}S=$9SMa`LO>| zZflKfkjtknb_2MGCdRCqr zuMUA&Z9s@*FbJugPRe^EjTj%d=uP^ z|2kAX6@Om`Xq)h#tkdi8rnJI0=O1{jTpbE;Q8h7zTVD-3V^RdmFqZ-mjFLNNvXGUJxu1| zTMsZEA0I%YpHZoAsyl|v+y9JVJYZ>__Rj8!oY8OjSpvsi7iZ?D`z-yReX9M38ExO$ zXeZCUXdnIj1^vXc_S?Vtz4p%C?RH4t;cYp7F6gt*KW*>6`+nQHb0_nk+na7quC$N; z=%e<_&pu;Z;#=H{ZJDw6**Usbp7M8bRtHZV-+8l8KGj5jpecSB=RAklmORYrSdBiz z{;&59g8V6+c0+%LPyFz2l?`kQkljeUv(<{eHldNczEV%30$)`eW+Kx-bvr*}BZQBz07E>3)`R zthd}?WSW^<%w=XX2lcZMZn{t1ssl9nowFz`~q2(fN`AGUW{c ziz{tyZLO^`Kl1SG*5(Fd>AK-)iSc?4z}x>1>CM;lg>N=*#cPb`2DxwGJFj;0e4hOM zEWx^;CU8@oXAl2p3`=#Vyp&76$S)-za)n3ur<@U%HJB^s-Su{U=x3g`y0#46%fWwV z>rQ*;o$Ypa`*z#*GlK8k%|`#`>UwTMP*-@HpzYfO>NywzJ#O~9vFlA%Ro42G%0_aJIY6I^K)FPemQ6S>w$3RMC!8AcVoSmxjXzwy)kF7 z|Lk$%#$8F4}TNl`Hp*)7PhX(WL5#v{3Jp8pcouB5{ESk?FjPEEuFac){fx7dhQ7tyd^;1~>E2o|9je$MVinUdxWMu>82C z)y=P0)4p9Gzl=953Y(EUl8*4u=i`@hzJ?GJ4uY zl_f=xj(E-CYuPRDkbh)FC+@g}A^qU0cpCB^%0Dupxf^3^lcj`2&N`Tn?caR6zxGfk z*Ki6Gw#0`RYZjO?lC0{QtS)8l><6z}pTJTzE5>-kp)%cR_! zr(oLd7H2QBJmSgDI)$;!<^Re$M|Z>{d?{1)qkMLMY|$ld;Y)i9uLm=tg+R-N>!t}h z9NJURqvQ;}3CN~J^u~}ksr=KnJ_^eKdE=~O6)dwb;Csyo*l||*Lhp$F9_Lqsy8U91 z!I&W*bzTu4uqzWZJ-L^;KrXMuTWIi{afnNVZa`{~O3dY6c5XNL2 zd`;IqM!<9O#SR~JS`OXfoXoq&8E|NsH2QkNKQsgCviJ=q!EbmL=bKDLxDIdds{AI6 zk=_{bEimY>&bu3F-(2q%C;iUw36xR(Iy+CV!6a?nfcvKWbn{yJH-K```8kH##0d<~fx!-r$|obK3Hmb`F$H`R8Tcs$>Ep0`;YLwI;Vyl#p)o;&vg)PS4Zo@!rR z*{n$t^!O(9;qgH>{=-u@1@OOMf+b|a=p=QjCi@wKHsZoI>GZe8o5gR__P)F}1&E(b zJ>A6C@WaqJV~5%_2M2j)OssC~Y#Zp=qetz=QC-%N^=@?!t7qMsOiyyb<;0VudFm5u* z-PmA~yS-7HVM|My<4jSOOUmKF2jwDTSU1Hit91-NeOrpm2!poKoBy>r7~W?*=nZF< zfgjps%~5oMm$CT#>|)#BImjm1!HfO&@XN2-!>_)|+r8$o!9@AD{>*Q;fAL@X%WZ~< z@!`wE_WZ@u_QhBCb5*-Ut^({l{LyL%(L+D*dU!bu~=KI~$6Sa`Glp#R+au>d27^#&>3hsiL zV7T)^>o9uCd7yK?;4Qa275omS`M8Oz+xD2YBb~}JAT9L?6vI3;bDDX&Ws?vW$i?A8 zm>Yx2o(p5*I;U=(%e!D`o=}V}qMHX>rl}_v+m9_o0FuFNS>Z8xDmz{w z={$O6t=|04d=Py#J2#&VP8XTfqdAO2-7T+D=PMiS@btL7e6id14k-&br)Pa*sI}P~ zq@kUX=H&PiKG=k>^7fG5eq39cK_|?Gq@&|g@SnD`)8dU+i~81`yocSS4{ zY{`Oa0KVDotrYV0c0c{>}Lb0YWm@Q-?WbKxa(R zA0la_HV1Tt%85EKT|HZ`>VC@UngA+5-Z;0;S_J&dJP{1)W#7PYQPKkfr_gzPd=h!o z_@=jQ&dW{^xWs0kwB|mtyw|7K4JZK--8jpMPy$Br768^N_%iZB?~N&Bzfhm z&(LKbn7)hlXuA;9;6^lD+`5&{a8tb;UfO0n&~VUp4tCo9;aZ-=nVy$L=?Wq%JX) zrk=aoaf;W>+D}j4QXkol5ptf-(SaT^nWGK3sbw29o$c5+{*+tYmZd|+4Yp(27n(N< zpmTGJ?fLWFL2fAj`7wRyk1l z*C}`LyT>wrIDvhgp9R=)PWVrZIj@{R>qx4+;1>wof-_V+*DZ9n|p?e-f#_+Hy%?0K-i+xGVk+WmVE^GUex zeeVZt`_4O*8=X(bYoGVo$G>bp`T0-U#`;FCDz_>epE<`kq@T(-AKqqc09D>d<_2cI z!%qHgfIYtdBxCV+?)ZrT_ef@PfL2}R250pnt{sN^X_DH7Sgwt%Ky+X{#=h0$yPU}I zNz-fW&Cqq~^|hBtQ~4B~<2)y!!p#3y&-ZnhjBojcugB3WXL>?K`WlRHab8rB6)*~pZ@;Sxe)-dJNZ{a(>?;E7q z@J1f$s(^f zw*(NEbmDhTmcxOJ@fLA=P^LENnX55o@N)wm8eaqVN)8w>p7#)P4*shN;54!>-|TB0 z8!CYL#5IB87v*+LlQInl05mxm!<^AiQEhH)wYzt>+ves5d91Yc_1cJEgXR_Dt}vfl zT&_3zJ^0{3R1Xj=GbUZsy`Wjkcu3xORlTCO1i(NyZsKV=qtbtsiOM|@pr&{jT51~n0?@B=|FcZpYcI7ks; zCHKl)VG>HFMqqk{wir&_vh}Mrk3mjf`8gQl_CCRIp3_h0V;FN4^^fDc+`w|`f$yt! zba>KsUowtH$KAjGxIKBc13A#dc+~N+I?D7_Mvdzhx@^OhzfZn2ytF*uwzoIhyWZkJ z-73o-vXy*gQT=LtIM0~6HQ!Er^Pf515%bIAqXTsFE@Rf`?b(YRbaiV#{lzca{U=A+ zs@y;x-@UuhHdm3;C1n^q==#-_)waI2-c~n!`+pM}){wWwHovrlTr8&DS!X%0PkE%J zbXZ>xlA&V=|Dy*iB<_@Z45=RLOdcoc_4HluRUQS^E{JDd@xSE(fPd>pcz%$80j$>2 zK!vs^OcI5YiGeg(iI5(-s%D8zJ=7u-;VupkMzIUcEY_>hK4zSA^VhPy>&c zIS1Dj1E}H;xIGL&TxF}WCEp?c7>O7fNg5M>RW~6Cu*^ePcw5h95^D@s$TYl2{Shed z(>AA^zzE<7PI(y2WseXI^i;-imZ|t1f+I;PUHukB@mBH)ASdNkCuK|d2e1>#LWUW2 zylOGrimbILpUksP1opw0X*HykUm84Wp^k+0Z_b3*0+*?3Qtp6T4`Q0mLJfY>4d483a5jD%y{Da&ywrES)XnAR z=%O}=%u{MU_34f$e^cuBbKdY%CEat&mQ9tE+-h>m9vQF3!V?IiAk!A4J1K0~-P zS3v!~mVeh*GPsB7)(@js!f}iyq&IL}gEy2>cuSI^bux`MCiT;47v6$k0&4POxpl|q zh7q)lH^4+tS-9H+5#9lA zKEDRz^pc*Z8z|r4@do&&IBzRwhqpgR4z7hy)<5B;i2BoDPr^jlYy5aq{2LJ5z5dFo zwg9{p-wiv1{Wu1DqW-{PS3MH-K*x)nJm7$RQqoTv{09A(XW~{H?|PshW3Ne4$3b5U z*KiDUl)KX}=!d47=u@kF&`gkV?Gq)SpWe40%{9b?xvA}jy_+7>p!oO~pSGX=^y7B# zi-(zjyz~B@w!tKKib>$@ciw4F9y959`7+lU&oO!Mn&UYp4{pF*g5NzqC;d*YeJj)n z=={`NJH2ot=AiBE?~<1B6SO%l$~DjDjI|icR#A$7xyBx3zCk;_!n4Ys15KvKIR3St zk-s&LE2xbVLYTOSSlje&3Yd@~0cqVxH<;0#dL2QqZ%ZybyMQM4sz1_T$&)@=?hAYx zac7KfYVuYratfX_UZ?5Cz4GCN?(h&^Fl#%5mKQH~+sniK_VC`5_Tt4J{F-az5H;&2 zt{YD_v21SOXSr+hQQOUkt1KS4Stu{8PyLRLkMf3$<#OS~Hv(K}(YU#ox79^Px#YiZ zdioYl78;ca>ebC{H$#^eJuU8Hp>IdbfzO2k-v}+9`gXlPd3)VFo=_B7=@J@$YDWbhcSgO@w)*z5Y3ydUoFWwXm`y{BfUvVrg0R)6q&f7E{O zzxn<4vrj&6`zMEOVR0?xt*iIIOV(~MD)YXLWv0GqqOsiaRi4SaL-@b9chKCJ^>C*0 zH%lGKual!%q&&YksjCIukc0z%ztV*!=@pbkuWj`0fWy;^_ThQ+TbG&osb~fnU`e!qP`x42LXcf^}P z9X;xxAN?pA#SkCgLC*}-DW`(-btEo&v^(y zd{fZtW};XAdwstf#4mSW(q<3H^;TPT(Gnf%#GZ}w;S~e?(uvl) zJo8(UwEqPc0zEXLo}z6~B)=nXn^=MuE@t`deA}+&li$jj#;ZZCmt;JEx-NP=#F%Hv zjP)>spKmO=K4IICK6RY+nJZTbH?51z%7p1AP3fr8b;fZa)HdLivC2&9`oe`Z+Ux!y z_GvrDNOTs*rt5q0_D}Y6bs)txjj70jaS!&7=@WO_^B24D_&DFG_x1+y%U#=y3#Ym2 zUnYWEy=5D+opzm_mqRe156x*G)Yk=V+_KuY);g11Ba&s~Pkm~?Uf0r=ZuPc^^k2ly zEh+LJ#EZzw)C|w`jc)R!{ew5(nGToTV6Q$-`a2!cRZx75OWD9}8H#@WHI&0X8{E#n zq^ZIQN`uE(AHxBF4yKnz^EG~Grp{+*Lq1EBa%Uf6ede40{GHRzs;;T$!B}t?!o}wy zx+;%v=~*^U$+%pD4yLYLRGpjg(?l-1yIHURZy2*Y`(^vnzx}7$%<@{hI6Hz4Z|gZB zeXU`6I4Czt$Y=6NodmG`w=Q|Qk{5%&-5H67`0BNI45svEDs2S%zYV@FY!Z~Iu^GcS zpX>fN;2J%X@T=$lW_TOkNl=&n^X6au!8rPx_$>4YH*FgT?%oEdzk0665sZ=%pj$II zuftn>l7^p=(NkHb-pnh{+C%uS#6RI}@jAjAkaBh-yd~bRn*ZC9zdhgv4E5LZ8`ay* z{BP1b*shV3W3bg}J?$h7H}Ll4#I5!r9ol8OzOpXTwoIn2g^tixZ#j0zEnkG^3TO1C zn~RRMqwCb4>PY*I8W2e#b2R&N-#lK<2I%~3T@CG(($0OHJ#;Zs4^JGXI zkBHHKW}d(}5MAXO{{?+Q4#i%2qnLdLcznyB^xl4QL4R-weKU@&>1Xn-YC7k4Zoe1W zj}MQDS65IUF?Ly4US%9}+5Yfn58Bc0lQfI>-n-kr^UmG&s32By=qUNJ#E|T z+wJam-ft(&Ge7+BgN!%sfA)DkBX{f7S-V4@?D+Te;0PUGI$OTXIzDIaDSzC=N)LtJ zJvljU|M~yrKW{(#?4$M@Km5(My|Dq)7{e07ae`2X(u$Wte?cyN8s&*Qb(Y}Nq2md? zYyx_hsX7f42p@wn-wu(9Yw>TwIPB}(<`~P&fy24y%o6b#!+P*MoA%&z?&F->&tQ22fOCRZw-^^QP??@$egJ>Z(HT|K z))!+J>j{5p^7b)#Z@Fx@9;}2o!X0Nh?xvg$@J%mY2ASfq{|w@++uG``vCJpkBn<8F zO>IV1Kk`pMChb-N)-VpsxJ(Cl%3bj@ex>%cq2^naM;{+s{~K#7ZF_Sow^A&72-PtM_wB9a_U`st+gw}9t%CC&W;WU#!cNrn2H|{)7Tgz3xxNp zxr%NoNa9Ry9^fcY@R@+|JZZeoFL4PKzd}-1yAkdp6-ZTu!QzQ`^BD5!25R(309EEg*;TF5TAA#ydCfj?`Pqm6`*@Uv;AB+ZNKcaQRn!!z5iP$E{k5xZ;jy>Yj z5rAtyeD(Z%xuBPT(g-U!O3?X5>0;nz>x zK^zQqql6L0^kmRzo$*+vbaPDN737p|PLo!cKamFX!K+sb_&aD}pn)!R!4!oNUH8gx z@YF$oGCqffhzt~|kHty()21VPc#&r`vxcm&4N5|2-ot3W!H$Lms4va75GPv{@| znLJlma0Uu*e91x5^|xNtqN?#tD#*LimIo zwEhhIHoi{fEMeA=IJyyxlR)BV1K}_3v?c6OKEt#9t6|cb3=GGP{xkf#{;zN?^Qh;+ zXB2b|g4cj-nx5{4iOlpo zfKhom+$`UOF`e;~HEz9oLy9h&fF$YpOxUA%Mi9(T=xwsM?;zlRumi-Ef4x7_=oQ;I z@ZK=;?Th4Nz6s(sKg}fG?#6Vl0XjcB6z|6EELHns_9`e7T=P0f&bt?O<;&3w6GUyTFA12%bG-vgu+%-maBq+PjO2zTM-T!v#V8 z<(ZR27aR9Ucd+lb;xDtHS4Fd|Res-$bfd@#t{Yjt*)NWi8~V@A>g`Dv3pO{la|NK^ zs8hClYZ|Vn9P)ONiKdGXtEJz`SFsiOZEr^m^}Tj8x`q7v2DYc{t8S{@wRTGQ!NGAhm~xPsGP+@^_$gC< zH~o~lKRG;Vr_8V1pqihVBd%{b*Ld5y$r~Z7i>R>kw)b)e-wV<%hXpPGdn>~J;URT- z!Nh(wZ+^%tuL-<0^QtW^t+cJ}^}Olpb*aY50+Me9!1JOPhKV_g9=O9|TGl-Dz{3mp z?*WEd=%I}05ErkV_ohv_Fc$%n|H_XXz&W&seE*yDd1K!=7(X{)nQeQ%)t&F3!{t6# zSGJ97JE;Sn)#hBJkOx^rqs`SQ%SCVXK*_GQbje3=2XFx<3+<%yt>zBqYrkS#H!SCT z^BiT3;t)MTzR*LnAYXa!!3xLJGw6r9ZN$2$@fOl2Pi&voAJ6&L zD&_D{m2W>sf1JW=!aX;l6uo%1-}Vk(v<)}deH+WqoDe41K78rI1O9rMSJfF0B-XFX zQ1xFG;NqmRuB<3~=d=&y)ZoY~Db)&q` zAD}D!EP{uv)K4zF*~V-;ZpfUTABi?~t2{~D>3LnNzqhyFc3z@iXkQoS^~V1!ZPq?; zk-k$Mb>T*_Y{hMveF;JurF|GO;Dq01qC``Ed0`v1J$S$)2h@yflpw=FxpF`%I6U0u z4K-Ko==7NK&=q{dE;l^QgxNS$a3J5c0;}a%9I;D%o6&P0sCNa z$vJo;ede&%F??EXx9@Bt(bPPmhu5FSQ0WZwVVwVU|F`GgesliUVc_#9+EAY05(Ycr{`830mYsMN1fI~@@fnU$XC`hu z$41M#{l@v%05={B;FJBI_V72t5zA-Y{tPC49ryJ7@R-D80TVddL&*vHsg^pjqPQ46|d7g}KK*^ltivH|`#4kVBY;XR!hQEh_ zByx*VN#A7EM%(mU z;j&sC58bj4MxQyJjBY{i`|WZ!soWqs-rdU^%8q-SL(EPwM{|JSKl;!Ezh3V@V_Mv9 z{yPr!;OHsi1rLZ`F*bE9xxIB4Jd0tmpTgOF`7(Nb_r-n=ol5ilPd{q+zIxSu^2@K< zzx=QK#r9jj`RnbgFTZS`e)g39{#CpG+5NV$wcft}oewisUS|B_;ne3Zp0zJN{iHp5 z_^7@A?z{P3xrdD1tn%|c_G7v248qfY(|3D&mN#-fefF$9fAW;{zi98h_pSDW@Ba`n zrcY-MH4Xia-0b7)GjeCp%y>@KEwiEDIS9V8C{h>_4znP zrTQ8ebjZjg%^2RU`-ZTg{f|tdPdtA^{(iGQ6_|A6{FO=Z*SMpgo+)E*a>RAqR{J#- zUa92;>4>N7$3JRtz|V6I00fVD%X8O}tb{VwO*ZeH@6nh026b+H3kv&e$EFzYnM4{q zklr`^J*?}qhxa^C>#fp`1?SyRMehB~TyD6SUt(N_8~+^Gjh_c=mXP5kae3i7fg7#x z#lC{F^|7yhIn=Q^SjN>wYEVY;LQxaRH07F z=KmbB0a4bW;JEnk=pb~shUP(p-Q6AZ<5_$3IEY1B3wW#Pux|PsE86WeT3YDQYy+XR;Y#l}2pS zj(({bU6E2~!L8ENw!mSK<&x=94%$3NIp9`^TnKTpL6`%Hnm~$(e&bXvAs?xIEgDC!?J;f zGy?shpRPFcc!hr{K9#<7x&T4hq?LFoL*K-inyHB%59A{co#1PisfLbofZ{nsjZ8d= zS7qdfysbBB)18tM1-h&^26)phDhzyaQB6OUfCoK17@)1mn-WgQubh7V3G5 zNaYfwK@-fy6YV@!LiuU@TQ17QtI`LDodJzF?v6u+5Qy&x`jg6|jB$A$E`$AOJWoAjX#Kvbl8M1{`;=O9Jo#DO<%B)u{PL%r^{YDt-xYtpDIC&bpjOtH5e=vAr+|Y7!FL zN86@S>A{hPj(ByoNL&Yo4#b%xWpJ{UH^h_o&=Goq zu;(OH`wPuL4r5gX@^4)ZdK#1Z?vZ0`!{(2>n*@!Yt3At;*Nv1^IR-FE{SY2R#2N4p z&vAF-v$QIYVT1mIow8z&3=u}@g~1;9KYo65Jmoxmbk#d~=?C=mqa;1u0Lfds9xnk^>UWX;0Ah{aB+w-3d;gXLh>wBAF3-q0~lxWS?K8|>d!cO zg{Hv-w8wdj0YCX2I|=zW9rkPW*A4Np(~48?Jzn|u4k9^E_*XW@@BT*6eIuXv`ESH= zsY_-J4}!q}BmLi@rx;*}b`uI0vD4pAM#7!8S7nUbzG9%S+F+$nmpPn^Uc)1AzS?iV zlfDV!ILs@?fBx}D?WaHes6BhO*EVn8$r~ZtcemOPe(;0#lb?Uo9z1%MMWQ7p6TT_w zCJ9)JPRi)pva3sOLM}2{Udraw?#@m&M^K{)~n|o*QN4C4CvID+k!Kj&)NJjtd#iQubU~=p=v9eru*JtS`4acRy&`+kOXb zwXI{ex3}6mAG|{v-%h<~D{E`ne5*IP>jk^7rX87=`MZt zs~mcG@?xib{K>uc;PH$0a&NaSEHAe8?bWunzS7n=w-CRXEcmz}W4^vEuB^HFZ{44Q z-|ysmpdsI9Wp3c2&*?e%Pimvk1x+X7wu#7&8|T!ko1`uTIBr;i@0lxIoEF}*6o5e|EZk@Zoz|MDV77Y3js{e-A@g zzRk_ew!G^1%HbJ#dMiT?Fd+lE-k)-&ji@K6V`c498)So0>=AM;)4{{?!KEq<5jEkB6&{ZMW)!Y0BVRU%shZ<0uyeZBs_7P4pbP zw|^x^7q_K7n|*{`z-!0u^;XSn4i(r=lqLOqd&g^VXMGz6+TA?$`|kad0@j_2bLMM% z34VC)0^rgTb-`HDJY1CV1{IZrZx>0R<3gX^I9J#1?fHiM3FV^v1la~{XU;#pVZwZ6 z|Eizx@W2!0^UC@cFLy|L)|QuN+xqHC>h$W>X%_l@>wbS zD(wQ9gC=E9`dzC^xskWrBH;K1upaaB1bWJiq0}2`7AS{hn57+h;KDM8P*TXdY76oU zJn+pl8s$((zZN>GjTLRsO=an45wyop{=H3SQ_A=<0O`=u5V3fIE`<8>Oz;o~)=CbTWeY@;u}@ipP(< z&huagdj8$RIueHGVi1lSWk@>^N=7OUpxowpu!%IV=nc^0m9w%O^6A-4)@8y6yN6pA znSYHjlW^nN;`M03p~ob2#2s!v-++G(`7P-A(>g#^8e+l*e{9NE{$*R-DmLwsxD`uU z)Sn_6Z2Xl!=e70RZNRJ}o2Jz{xT!bwxd)M6p;Kzi@7j{`KjTVrjgRp6P^;rx$9vu~ z;>OP>pM2Ere}2EMEW1`!eTDsR#wXzSFjv;!$M!-7y59c7%|X}9X9;t& z|MKFj?Y(%}K4C70QbUh1N4uEH0Zn zug^Y5cP=vK_dwt&I(8PGEUqlHU;n}P+Ts3D`|9(1?ciX)9q%8sZ!vcD>hhCAo-rpU zN4d?&b@v>Wfcx1jX1wu(AO2?h`+w)}wP(98+rRc#|7x3N%yG28o4UvtF8z(VzvLjF zAh1GhFZPMnx%EpN$E#FQ)or{S&l=Y{rheeR{YwFMWV-A9YTwBHmTKRSc_Q)7!WZ3j8bCjs>LKq%v8 z=O^~vZa%yDealU4o?V;Gb>@ui-5k$>%y~D;=P@1vb+@ylS_U(ow2{46v!N4d4)NIo9e!0&m2M=wDGKQSRRDHzl+Oiw?gCyo;Vhf z88q|o$XXFx7pcPn9ztCTy~`_0j7!O5)^&qr@^K6U=b+U!XW!g+j$Yq#H?42;nmySS_g^^0XeZcH}sW8p*nQ`f2Cb;<560PQ6s#ii0;k+9XQWt6m-F z~Ozl2EuKK^BnN7{rBra<}NDgXSHa|Ce{UhM@VQM!|#ggGy) zpk%AO#f?2Y8_%&^_Czn7$1s;iZ{@Rxxc7GVGS7ST@JW04_%U+_53S4d+0?0-yUrbb z0G-ewW6 z@yS@F_=?IErxT+H4R~-GSEcFVMWsU+%#-lE4Q1TWWkWHQ`SMTRP7*x1uWebYkQTNW zUpD@3ev_G}_@-wTNKV6=LP6!UlntIrYfk3f3=@$J*Sw7Hq!vZ$!lN74nb-}yfj%eG zRd+7H;JBP46a|qJ(%`x@Z;WrA=DwT4^=7Jk&v1)8 zi6bwE+X5<2Geiy~%{O;b{|>UiLA~6P7V%j}Zs=c~5)m*j1_}Aj|K%~oIjy>mY$-p; zfc2kMlr!u)Fp$G`q?ON+6KV5me(8XxUT=R3ez@o^Ppwx?yTUDz$_;}kB4d=18nhH| zF{4Ij$4%VSgBy|>2S^S=HM$K`j*VY+TFP#a&mmm?h&Cr(jZa$|f}0xmMlsXq1>9*Ybf283`VZ^_RR%haHd~%8Jj0q{1xG!0*X?tpoXL zSm+$`icxm3#1lG!4n5=eL*u5$BRNcX{PEY;Vzmi>JPUDffyeK%Sw~vPu?oIslai=S+M&Ej`U-WSRkH zzPCo5`PPs6)Xj`rUY}%H-c{* zYkI&FySpdw59Lpif#*r9J9RQVkMc7wBM&_7`H$$Iz;VsMJ9r?Og=eDtom}Ev(LUs5 zwK?mu+M#cZ*_J|*32yMe5bG!C7B`liRrIczVB1a&h+Hy>wGWA0j%7wVi-#J#!rOt? z$Qs%RoM~61GDwFR4*Z4J_KBrS?OQ_3zytA>KBzp+N57uu@O%wl*Tutg@*DcU5x=$d z#~!t(9!6fF*Y<)lW4R%J?bqtBOmBZhlhA1!(sh$EiQn?rPOAQjuef_V4Pf{B8|19m z9m0k@b@*?8Ccd)m^8=w5i}GgZ>R1Q-*(_jUc#8>#lTNRo&YP~J0GL2$zw>a@kAL#V z?c!{z-MjxVZ(01#Z~v+G;kUote)k{#Vf*~k2kq?mqAjkhw}ZnACctaZg^WN~zUK%3 zmsk8Y-CFdPi$uG7FS40AO#ytnRzB21m-S`&(1YZ21-(-_bXkUs--)L? zeDAjWksThmp?r}|`9eQspD15klbdm4S(zqsHcOl|gF7}naq{Dk!A&`F)P}SC68RwS zvLC;k5Qm@EqvIvtfh?Gc!x6^I9-MHnO;YagihKfjT1o#fgU# zNOyM7&Q1?0_z5z5(58^_tFxo_@~cPf#k1YEx6fq$=(z14?&IEXXM`V}QWllJ)WQ)Ims0Op|4K7RMb0W#3?Hiq)+p#b~x^NUwG=yG^)igCVhl{)bIl3Uwr zd8_FH8c(3h4W_)64(+~avbnk4&Y)-6jU}pTZ~p*Fz&Zmg+B$+u@|U_ukE5a z?TL8yU*a%js#6__UUV_UO20Eb^xOxUguOKSjQkai?^Cy2R@>J@9#c?xBEFX!Ba}dYfVz zoVHBgm{ZrC9-pNAm(buM{5g6rZ`xqoKtDV{jxKpt&)N6MYs|T~IPh=V^T3U|=<@Oe zSvqQaN6*{A{t>*oYfyQDRO*ph%SV; zE>OA{WE+ydmd67t@@)!z?OTSK4-NXxjr;V+)N{ULj~tvFxuJ?oGVad8qk1kC>OvKD zjUI3z@`!e|d$6B%j_uoPZEbxk2M%n{3()5J%q4P}d8T6!*I(2*(X%{*ZyE*U_eU?` z-5I!*)lQpoqEWtFgYi=uSLnh%cCUF7zvvVfXYFpBr=Of0rhfgdXV;bTL_O)Id9GBY zT(fhI&o3iS9uzq9fEQ&xJr#$acDQP%7xHHgS@L$WntQ`4GNL+_P4J(2a|!PB2Qrc} z*ls;Q;o+_N#G?ID^`BSLt!`z`GwnfUeC_x0W|4d*y?oF563;0oWl)#6Sz{lYGA+!; zsJ9RI4%#98hzph%XY}7o%WZx0c3Y=T=4a>I!O;=^XT?kBG&IfBfpahWU4=8CUG2H* zsQN+IIP}ZjM9{8uv7+tQ?5^zmK#=@`dBHTKE+fSFT9)D^}s=XsRP{1i3fDWRs3 z(c=ww^Tr-zHti21U#!PN#yRSEatMu7P3lK-En!5M(Bsns!bR_4Z)A1uvVHNzz4q7s z)xX-77Ke=i_0O%@=BFRjJ8oQfi-Pibb<2a`)ZesyEHd4^*+r?CPq|VE+kI?l(dKbv zEQj$aZ+FL5R~T-5h{Y`@2G8h+K;p{JO4o0Ut7!1caVNaYI*;T2Vg68}Wxj=@HZsL8 zt;(4o=f&-lSKOPe~l@(KSRqK+t($mun#`+qUvrJz{tY_&S@G*b4fATry zx6iWgO2!lXhQ`jS)Y-MloWA)k?44MtfKlD}r4yBm2MoSnDBSYnv}b$%zV2u5WY@FcBvdFMF} z{h=>y3S6|0%n62kT>n!Kl`blsS?$LBgu*sjpG%v-L)$XK8y{2ewBPhWkrQlmiy@XH zpE7|@s5!#W2d-ba?g!tp7VlcUV*_+0T{L=Qs%>nqFcy8*KKk(=F*crW@4k088<%2a z)IvV$DB?0cPmw5F#=5$tivQd)gr2c4R|l?nc+mcjHg#}t)PC~&f7rIS@3!~e-)twl zyNromQY_|t=O+}=+ttvYD#M1}Lg%>wXV}j3$2m;u0n}+^?8sZH7_%<hM>(q_3LewImu_Ae%c;<^%%VO+h6#zf1xeCf45ySe%RkX%-hUU|X7*U1EFzXX$et z8`1M->~nc}nEFs08T8lsuMmExF_`p&^d!x@K9Aq&Qxo2xha?Q82mz${}n=8ulI^>ZvdPk-$^ z(R{$^MtyD!SR|Zrzw#J4!mT~0VXo$N`;Pk^CoL?kF+N<%0ctl-muO%5JBP}_aK___ zhny`yZw}Xz*DT{s4-mVLb$Z0Sgn5d44*5irdq%cBc|tuo*VZUQa@H}tb3DhT&dc5O zUs>I1i_7bjaf$MKK;IkS98-7zU@5kG!dnQu>fghUd9yo*30y}(Zj?>sL>vBBy~9&< zjcIS)Vy=T};Ftfog%5tEK8SmZ`LFwq?pxJ(#CmoPV4WJnIf3Im^HHXO-VeH8LU{0u zjL%)(N`^--oTfj)MCLRSu8my7r`%{^nP__%%cR{Hj??G#H>AOi#Aa?uKF~JKPj^B| z?z)cpkhvFQVCUzqQ=Ku+_lCyPGjt(yuKoR;cK_jn_UOq|Qh75KILHc=!6S{f&l+=> zhj^`XWX}CQ^$PX5Ft^rLS2o%T@->HUnnPX|{8T1mcIPY}^0ma4&(CI_Fqb_z@U8!s zIf&rxCp)|Q?N2`XW&8a84x`Ll?e^+Y`{3?&d*{wpTVGnIUXb~@<+enh;=026+9vbX z4P<$p=k?5A7v`5EE7C9jGc}ZLlp(YumzhtcVOl=9s#{0rp$SY_;X>*+xP~x%bS4Z( z?IHB^hF1DYHJvLdZY z%gMV5VvB)BOa$;-QVSy!Er*j%=nMX;r0M{m5i5ciK8$&la3BjW^2BoEon zy2w=|Ik_Ys=zuLZIPylgr1GFMoBTe*i{xTJw^abfv0hxQ%fv@MRm050Gx6L|!5_DN zQNk2lJD&kCEquA6A;xVu4cfTb;P2rPFa}f778=E8{qP*Vs~8C&H=oS9z^8($o`LCu z@K?BWk`|}gOeCHFWJlo%5RrHTR|2}S$Odt-BvOB$W0x_n3Nn%bAWB=P_(F22$Zv&_ zTz7zeAB%Kx=|F}%&2}1Q7{x6RN5vB&ph-T9)1=buWRp0xuuzLNDGB*h$7&qgPjxg| zBv78jNO{aof7?b{6m;b6_0TL$MW0U{Lc8@KmZ5C=m2)V^s16KBWeu22SCHsKywruZ zzap}X8ry69{qeWtViHe5VgNng0jePb-T!*Jt^>yKHP4~P_EdRIU1B5X^)$rufx?t8 zepXaP0HnpX0jJYIQ~<=X62zOzD4qsQl%oSf4?VfSwoLFc&n~9AK<*}%i?ei|42tHL zw;8-`w%Mgs{CseRG-@HzO341!3B&4AugQf}?4&y(5TyT$N9w0pWcIAoQ_NIi6Efk9YVG6 zH-6D;9`b6a&msL-pWrn9gyEO}j<+D**WmT~OL|@TiRXY%2l+`uC-58j*AsTQ@jRk$ zl)=cZdc-x@1w(mF7hc@J2Nb^w$~lfV#@FL?`iqWncpibCuE*>4;2(WE8UK2E-6bFB zp!k`-c&Bd5wawu6@8XmR&aoT+9!_HN=GApf4)%8T+pE*dw!e4K?mc|e-h1!e_U^my zwZHRs|51DL>_zi?{=|kwR#5 zb56SIYCQR?o<(n>W9tnIo{{tPQ7K?Tu?#D($g3goGEBEVRd0zy-8r$76ZzIZejTqo ziB7;T?o8VBw?9BPlSrQW=Adz;(Ht{AMc+F{4bPn$6cJ`Z82rTF15n2IKx?irq|b0- z@7ToX(&V+sE(|Qfw|V;`Hw`Y%(9^H-P3isp-E1t*%-(7%OzzxFc2ms!+*FhczTx7V z0#3rSFf#9?&dq%Dl9mdW=k|MUV&?=uys;cJ)Ac5!bY(8!&Pxa!!odS;vhj6+ya?9u z(z4fa&R1VaTKmO5Z<&MMjg6JI=64l0HrwXrR$K8p^Yv9K-^GO)y0F>Y#$&yhog4Zt zR)B;Wv_6Wrc0+D5y0Mrw2kU~+-p|@R)3jf?*$eNGDb1OmLD<`GC&&Bk#nTt<;a5-E z7oXf~5AQwBhU(L2d+pJ4zu|Y-F5vgegCpD?N+y%}qQT8F%OhLdF>g}U%M!MVKkjU+QxK0e%OtIMnH!*AVb z^U#-d9O}usSl`%4o0K>YnJeoKuX9k5@sO=e{^#=*ih}iOTe6+V>i~&(@L)-Gjjxbz zWmw5JP06XW)SJ0R7pt7Du|@DEfANAiZC|p|Q2DR+>6_WvB$t-Z=sdXEUFi(QW%&Cn z|9z`ry17AjjGWU(Xe;VJKX>6F8D-rK-pm`E&wuO2Pp(L-4EB0?-{P>WzH#nmr15>@I%zFK31WpSL;Ky|6gd7MGBZg}Sw4j`GeTFUpgflx|$R zXst$qtC3R=ZFzV@S}dcyQ(tAiE?v-Y3a^yC$T4xG_1uF*DY;Fx#kZ+t9 zT(!f!!z@&(Pq)@?ljp0n6F3Zw$Y1K$(o2Ku7o~@yM_@$JNM88M*KBfQjF)FQr|>B^ zDTne{ZO?S-t9sEvdM&Gqt;#gd80#jwQ+>#@?bZzyKgVEQ@6sQ7tAhs@yfH+5K+A({ zW!65x`a*v}hAqaLtgfxv6jdEvZoiyyA{uQJ(3E&X8zh>1 z@iq)iUDs(o)G2Y&PgzR%;U|4O=wMmg#MaLRX?cD|n4diG7M~o5I6G^*ySsTqb2Rp# z4obRpi&YrHpk8_M1;2f^Z&>BcJD4eT%7Faw4LIvvW4$Qox#AZc zua0>CgKxFXy9@0f{Jp=^{?fnn-))PV8x3WUTL7fjLvhMf^d4#RsR3y-4+vIHt*2}- z2jEYycOwC|t(2iZNJr+$Jum_OjBR*v&HDz7yaz|sQC#`0wEouf2p_){Ryw)|CxQ_A zgzI+IvndOU^ zG2iPs>#=4+8>p1hhYLDK=?&KHW7Y~Vj@gx+teYleoYf?dFljA!F{JR0;5t2_+PsJQ z96{&XYvml|QOI1V`x7I~o4~_!V&GkdA3LYS(~Z$x1jk)Lx-0)H@<_3XWHjH{4cMOX z&%6oUI73@EM0xhWs-F(Zt+40>Bj!LPVIBa~R-gF^82YoGk|7mSzB zm_sbkiQ9)?wADq&5+DbU2jYAyU;L*hjDvma{^&UU&HU^_d-~{kd%nBZ)>qaTS03ij zf(OLcR@X9z^9H4v`I&Znbk;up@n>yu_N+a7zSF+_;d|{i^MHqbFZ;QN80r6+Gb}Ji zaV+c@V~Kv@lrp(V_4#L?wjKK1?K|7;d%yPG(6ZqAKj9w0^u}HL_ocP^K~dZBU-|wG1P%B@o40D zwTBu5*;XkZNwOaRpKV*ppL8qlj$Q4?)93p#Y4`+9)S(B$9Yf@echY-Uz%hV*zT?H} z|CtjwW@4ORcy0yo&1Yp7cdZe7h~2lL{e06><`G`4??D6CcRh@nYxA+|_B#Evy+J^K z=GLV3kf2J^KFR|J>Vq7xbiBzK}Jz{9z>JaQeokE1-a?0lvU zK-lIT2f+!2EG$6>VAUJ`@Hy-Sud)j6KE4);m+&4x$Eeb!u221#Zm>s^6WXd@XS9Wf z|AtxziD4oUCr*5z`m@Z)v+0I>db(_vO@~hB-Ho=rvD((q`}X%926kSy>aB4rev;GM<5t?j@^ZbjfgF3g zgrE6SE`$;PM*2p+8CK=#JmhDv2avB6ws>F9W&4bKGHzfZeIL_l4e@o0t75!*F#VtY zjeqAyF!F)Bq)^`>;sW1l^Xb{T=dT(lemY0kAE^@w9v$( zS-B8Y*m*E7DmP;(iN;HT^Mr6P*r11j3(c|c(-_1RfmLYqQ$TFM3P}R-$FGzDZWpE8 zFcPn24C6>g++q~b2Spb;^moIGN;PVf>d>%90PUHC_JIw;Ue{O@Z!9xE@YBNS<{y4U zE>z~h1z&kieIyU*#fp1Sbo^Dkp+f^4sATZuLIq0B#X~1>HF=^OmPLW&U&WrQZDk{M z6s2ih5{F1;7MzJ}8F`fJ$ypjhL=BdT4){X3qe!ev{Oycj1mSl0(9Oiyg8`n5%tBMb z>x7mKOI!}b9Q1o*_RINFvEYXU~~X$28b>oT;BoY}6Uz?2@CK zb{t;#q!GHi=qX!A@iidMKqwrN>b7n8jFF#(0D1;LX%k;1sB{x%eEL!ta1St`8gLGA zhPoc7v1G4;8*YXh@Q)W zkag)`+c)=D+r_I@qAj6=)){ndwE3m&Hodadrsh`3fdL@{Ik|U<4A@F_CwKCx?-$Xg z=U{BFP7(1rGFC*nR@tLf@LIOvb0G^OrI|8aN~V6amM*gd-mxGsN_^b98LP(Q=s z7=n7-38XjuhG&tGBz?pyM~vq^U7LwU;59{sE+|;~M;Ga4U4X^Yd*WUUkIq zSQ(6rZ$c*yzvahocsqVhe*mu>!t_CRKh!i(6?czc&x1XJ!H;-ppFM0OUpGL#Kj^cu zTnRnxU@z4Zc&%eSsq!qL#O-+k*l-={~&mGDG7|Pq}>Jy3y zQy^Wst-ErUE$Ba(-gc8tBa|JzI>On*CUFQoA3<60iRYxV|B^o97$t)x;}<`&ager( z9N=|!dDc!(4$&c{x26}FFf1*#ll_x+`24W_{HH(7q+@kuv;Dz8{bME?FHv`AnJgR( zZ@GFcnYgl1#@KCRZ4H*aYJL~m>&bol&T&%bQ?7Ss%$CWgx&Zpz6!0xb$2vWp*Z9K? zTvaDFQ(ak9QywT*nr=c#XS250;Pfx?^v4)WJM%ruo+La|wQlld11zTAdaODqo$a-r zdAq;`KH92&r*UcGum&0J@p=e_}{&s`zDg1r# z#lvjOcxCs&{$4vhIwnuwFq~hI!!6T0CIqG4=Q z6aS)@Xj!nz_1Kav|3a6ulse=#KQ~ni8!ke?cVcHFFB|=sOZva)8`5RW4h}cS-RNH1 ztnZV1-T%8Ez6b3qZE8W@K@)wui*|W?T7LOfw+l1&`QUeHA^hz17oJiFhEb6|XA>>s zb?TD(%b4A_y78K(ye_t!?jN>e%;~{Fd;Ha-_Vb_oynT7^LEC+~-wqGY+e`R+ge<%G zb3uD~vByN)&3iX*s9$dyvOI3AZ>%#>c76g~N^3Sy2f2~SUfpL^J3qLf|0f1KGCifC ze5*{pmzR7S_n36PMK#+tR+ih!qSyXSx9!c%Y~G%p9i_fkmb^Nh@kF~oZrtdT2h=xa zc>!F6xp8Sew~%-9_fUf0sg$QKR=HqtMjb6JEVTDOyxW$SY2z@@8#=tsGMkq?yKvRV z#iK(b%Ecfz&Iy$dP)^3_s%juOC)<3;(SUmd1! zOo{uv{LEjP^~XhBOgFzP@5cSvl-4eFZoNId!4Uol#RuOskXJTj2x}D zZRFJhAd@Api{Vc;2uS1nTODrw zDU*3i$&HJY-|;ck0g1kC=Yfiy9Y1Ywh>R{m;|93pE&NAMD8tsbv|5K&hy95zX(}C! zd`Sn-=BLCcdWK<<-hWcsHZS5+;Cw>ff?xgXA*#v#$ioHJb3`t4c^Ca!^zSix;RJI= z9+7WkG;>$d=Iwmig4gt`TP;ia-2faVzmoszg>p~~rue3RWHlVM&oFw?7$A;(kA8uW zlBFtt`V{2d1I+S2@+*S+4}Z+V_TvVW43bYCVmPNxkG;jbKi3P_y33ge*4RR`Cn)se)~IZnznVayHmWSO-QTXZnb_a zSL9R~xe`z1Ek~8Dlsn}g3^Dxm-z~RP$^cLEZ}S#WEfEVvJL~7e~b~y!fL# zZ)Fb8axyX+SN-L=hlzWXUzJN^XaUc}@BU-?7kZpZH+~+sJz<=7j5BzH3%CD0+_AIs zuOcvX5>PZGc9mIEKibCSApzQh{N99w^>}(rmSup*n-z#VCIlE#K)7wUL!8BUfV9%u z`?~@CxZDN(lUDhmt<{{$$kJ_qZtXIo_T=wd_+K#aFy7kotk>yKM-%BguIRgR_>bx3 zDDT(6Fx#{9K}|B8Fu7Hr!;SiKEhB>fVX(zmrhjv;V1KXv&zKwC z?Ofp;eN4yHmhRqZb7bgd{XhBLf86|3*Y|$#ea1YC;BoBaODl{~=?mn4NRF@#8L6G2 z)v-zHhxkfIuJXq)Re)@TP^x!`FbhiEQ`@fd4mSY>= zx^*MfLyvBPtusDeT3=~rr|<_^{_6gt_Q!wxkJ|Tt@N4Y{fBrADi^C)Ojp_E}>4UcO zayQ>|&v&&M&oe4%m*>jVWn10YYD-(2%!N-G$N7me51Z9zf4t?$L$?=>zj)5!MmKrk zi3bUk4(ElD2ilBdG}r$44<6f}{^hp5M5|w1W8*(&a;-W2=iqPO5g5tyO)yQj%R~-z zM^1E&+joINgf(gt)wTl@eu4hnz26E(&eJoq_trBv&)w{I!+xGI=|b98 zHqotn=P$S`PK8i(|6XJ!5EJ_KhAB6!7t8w^S%xmcwIkvOCvNmj`I+y zW3)@!yX~mBm62EEq-05c5a6>q1l#loNH%nxqgh4|0^}1>@Wi?Fj5p_zSFoB^p7YGV zHNsfR!w{}#7^irx4y#+JYyJ}*OLBD#8ja^Z=YlSR>>B*!cj`0og{rgsXbg_W(BJC> z`u=N=RsnA^<|mB(EJc?19Ss2!r`o;-Qno;`oT9KqWd)yvKil}A$YqTG^wP zjqx+A^J)U(cO9n_Nv}+&!TY9A$`?guMerO`Wln=O+_VvVl$_zXpfLGaVbyTWiD{7v zC&uE-3ltGN5^6Ec?yTta#9Yxx+99k!>`t+Ur<9E+=*}UC$%+gaE{^eRkol(UNfi1_ z>q4@dbG3+y&9jS=D*suOY9@IoSg+yAA`LhkoM90k6uKj<;4~ltfS-9lk8zT}bl$RD zOy;4%#_kp4`j?+P@>&+$sqcXYPAZX&`u?78QVefF>1RW?Q?snbO_BI10&tg+rtK^$-*Ty~O1(G7N>$J2Aj0|pE(+|pHXuWjsf+vIUtAbHp06OjKA zrZ+=y#xdBSO9ga5Now_SjiM~>AWaU&*p^}kdD@p@p#y(W^qhs7$mj@ij1SPNtQnA~{^&meH|wpC#H5_5+l16} z2i*yu@Dq9*>cjXoaFWt0Nbmf{xO%?UpU~+U3O+#L8t2uY$NrU09%f>vKGM!#@gT8A z8=VjEq=~q0_Jnmo^+dPu(3Lp0-mnn zQ6_tu9?!B(JSRLc@gC!vbQe^-Nw^-TyKmY94EdO3QjUB10|P#z>gs0V=MB*D^zfTD zF7S>?9R|Q}%Kt5({u`&i4oKmEq2n_yk%#gM$v^S*@9}j{&@sY4#&aEdglhNoY+crP z;U)~~=Zz5Z22;-+pHOy%i=>CiH$x`r(&K9~x&}%72I%tI!*ALHj2?&hBmTZEf5UsC zPP;!h`8uQ@;qT!$tllk^d3w5Fs4JXmcW{ri4M#okTp`uC|>)H4Dm)m%ZD+`G3#!32rApKAV zff&NmxA0TrRB^vzq(K~YH~meWeD51Q=2!93R*<_a6c}!QfnT45=P)c#8$9+?;xfz5 zUvOA0l7U_QBz~0H@kX8vGm^tZc<;>!_E#9JHs;cXIgR;P5Ed zYubODF!A$y-+rIi1p(i<@k)9Z?M&|*?QV7oDP#CSJr8+0c6M?ZUhtRi6k4ts7ZN-L zJ|8~FXBWFt{uC(P<`)($? ztA2x!3Fp#M`bXdXhZ4Fq_~e3x`Rg+~Y^Ep2TJ&_G!s@S0XZio3r zKYO0H|977~YxlqWqJ945=k4?RPqGPi4u6i|-LvO=xZ&;DWjl3~jk;J^^o=P`$}hL| z^~Lty-P`ShyLTA}OttfqQ|jo5d}|@biL(_e8B5DdsOLQV;DKh}#IA4dQ6whXGwsgp z^|rCOmbAhWX;-L^J9jtQ2OoS8-uNb$Z*93zX`MPRS_Fp&7uT0p^Cp&bc)-F<;4BJJ z78iP4RCa^Et`BUqSL&1g*##(Ry}iBJ-hKC-(Ls?M9wAK*5x_Wk=Z#2R4?DI|-n$&x zCd@8}W~~|goj*o47;kGkhqEDAUDlapRnDD@n}K~v#_Xm^9ODjcFuKJ2N~R`$K*?MX z{UJT2AIN7Q+IFKY${s&&@F3d`3(d>LylTI-scO6&2q0bNdxW{ssBSx>J%v1JOFq)u zaxlQP0KfU~!5Yh9JM*BEI!yg0{pvpPTr!6?ALT6z=+tL63E&mYvH4wcHHJ7kPn0La z{SK~qUN9e(&XiwWP2PE%2-LQHt74gU=B*HSwzu0mckduSt8EosY@7DG^~ccTTqrk( z*v?e8(3wFp4azH0m8BZXi(7hVWAI74Hd&dIqi-_KCQTN}9RpLo_+iv4%IuMcA!d-< zA;0K_S#;a+S$pu+6Vgt#t@YJ5yXb8gzKMPU@6|O{M)6wp{0f;@@8$c)Hu>lZWwZK~ zfHadX-+RZFhSGi3D>i+^_*q$W4qtT}{-QJDPae66hK#K{fh=hwgRtBB z5yC6o(mAvx;(A+#d{ZB*>%6vfcXzMt?;T}9F?}w{yhTD1d8)MF6V$D6odEK)T6E)U zoP@&{|NJkg^drc;L@1i%wapd6i8-!6=h4-UD`RoW2YHh^S0{*rGTA;{pGml+Q9j2d z^44~fxBHpzcAb=bNi838&;>v1*8a`4w2T3;-C#0p4u*g@V;Z0i>Ri&l9=De}Puq8Y z@L~Hm|NXy?A2j%Bhzt5!s!u&*zq&BDkaAcZ-ag|eMx@31$$=2fpzkbc!A-mP5Pry( z5nt*#83ua%2_WC@T_x9XGv1Ki3K7hsTm?QP$N(S4`Bl6j-C)0legr+h^wl;?*Z1_V zLzka!2PWeqCna;#pKhH)UEvz+f)R>uSpRrChAxl* z!IS0fZNc^v{oK>{IM?Y9Nkl36n8bZOe%CW&d;{E|ufs?@vYslgj_*eLo6yX-e+!8Ug`qzrV}UpuxS;;}c8sB>JuM>l#P z!Otc+KeIeXM+fck{Rhp@6|JwY=X=sQ>FHn(oKUwraq+dsb5?rbcy@4kPVdE}$EPaovm z$g#`D)_TUV9?;d#H>)=`x7wqJ58IdbzG|z?5v1?E_ddwj`hankRsC|$w_YDJ##wJm z3rp>k?ppop!Gk~gqd#uTi)-yK{^kFEn_>>I$QXHq{(&ozk~^Aj0vmb?-PPIu820 zDM+{Tt!z+7&M?SHy~yJ;_9U2)I7n*pA2$dy7}5(P{Udb$A?z&>x{~K4^yjfX8E2#} zZnnc4`s?jb@vh;FyXdbr8COiFvBw{*(4pJ*7dtojQAaVuSb<7#e4ASq@{FM#-SBt( zmRseh2X6+*W~|}Vjg@D9)>Cc-z_uND7~1uX8Pex-bhzEj_rPzBWvgA+OAoYb!W^GD zkItcc!!YE`A36iKZPW2#*3SmLp~*Oo_UtFIGFFUHw^W$p2C-z!Ncr`z1KMDQ77rjW z!VQz$T-LAnNIHsYwW+WAA%fgklDH-*SxK04jlaCa>@($%w_OefX#lFC z62I5gU@E}!5*E27RooTS=a!RTUIo|vBX*jR{1siJcsMIW{b*i*3iC#P+0#|F{7@cg zD7}i!U*@Z{b@hpQ^a34NFJv6Gql3M+x3}BAdiW>@0CGTx`nT|TkwQFVA6yV*8LJ-` zEIXAV&!NG&wz_`~nR9Noz&zjic;+0$^TmXt!vl2SN!umQJ@l&w4qm=|*omYIhsvzg05 znsp3Vj`X#?-XfPmg-0evb@kr{47k1?<%8+}-QW0kf5e#N1E1up1Q$;`;Z!jGwyP+L zw6`MGO84666l|z;QxVDpK>;8go!_NBuHk8b79V>s4v5Hs$|bV+g2A{d{Zc66QII0^ z5m-FTCykBx+G&tO?nLPehNjQLVFnGgXo0AdJ8vtv$R!0fYEe=1HqzurPARZBU*(jK zIYQG@eym`uAyem<)|czq*fLrLdae-~#+_-ji@ zlJY~IDQhBN46>~x0%nlcuLbb{o+h)?(4>rmG7jzdR?T5Le&wP2X~M{oh4PZW(u@p< zmt@i(vaRowqvSxDB4)J>B4x54tOE^}13rw}E2e0zu#%;LuJ{-3_|c)&rl(;8$WtFB zsGAXVw>T@!DCGo-*C4WiF|Xj%HeXH0Sv*M@Cy*(Z72kM0uHoJMGk~YidmU;h!=(Hb zRPJ>MATD?~@XP1Wk6sP^>Ph@nUWTPU-S9V^x=bDCVhjVDsYP_g3O(6cyEVVnW*7Z7 z+U<607QdO%h5BZhbdgeK_m<~H{B^Zu(2#pIf;0+4h^_i0($*V!fR#eZNKZ!TNe}g@gb7T_V@E+${;9FoKeh%swS9;6z z{%4E>`qHp+$eW<&B0^MTP%|%r&;#DLD29THh z7~?J7XI_kHz?G<=R4qGJ%L)HX+-uQ5{=|k|jvJzqZLjdpae@m%_JhtV9G|;5U>!&+ znG9`$x=9P(2-t~IMOd|PDbU0aqUl~ zjxs^K3k{Yhtr%Q6C_%iu8B97iR-9+xTN77|Zv*?i)49d@wz{#}Hg0d`FhH(mbf)Ut z5wr8{mDjJkNSbHAIfz_Q9bk?5j9JKwz^R^4`-4t6~SZsGTHrspe z+-dK=|4!aAI;S37p${|JufKTi;k|D@$|vHuToUV*B8|J8kRE zCbS{9Q(i@ntU=_;%3@nzaRG6zt*tJ%yLUI+HWPBM=JUG$Wv>EcjA1$5?gWMzE z{N*5s`~|1BV7sWrtg35e!9G_RQWkv6)eYzZr3V?XPB5th%9*(wZAiKAZ^`=xyKO1; z4p~u(fzr*MZwMLefLz=a*+mo6q49C3Qi8Ttt@6T*ZqF6YR{tHekV_&hZWUygdjH zF41kiMUwhmUuoOa@5aV@n_r*~q1~G*W@zs@C<0y=C;bMbd7Yjyo>k{jS1yeER*JW0 zL>Dukbe$vetJ1U@$k7E>WuQQ+)e?kIc6Ea^S&#XSDd|rJy>vo5Q!d;PJvn;S?mvD? zJx{gU>s!>(bZ$8Cn#<@@#SJ-delK$C=GCQnB0b1szMm;B_#wX|tHf0v5~qkwohuIF zlka{?LH>%=PaIl*PO*dEBASc}v(Dm_1|FiTkr%29Sy6^Ol;AvBOqGW&H%*+|yI`BL zy4C<9>#F)Z>Wj$zkU@btsY~iPZBJb&ALK2(*CuD^vk${SC`Bf+ZH#;{StD`dLVO8_ zC(2(^D*Nb=`K8EDZn7b**TSBhQcrfh$cCF#u0i?21r=SmFmg2WD@^Jq_2RSGb<03$ zw+@Vp_mDQ>@-u$?M3(Rndu;Ix&(P%9MtM|s^nR=QUSvyj^eOVgPf^yP2wMkbq&|B& zq}V<^pR_@{WNkqHlb_$j^+px@5cA8odBL6j3ZAJS+ysi=$s8Y9hQ7QBNJbuP{GC7h zr^tJu&7*5C=tDwS{acLm=iWe(4MKdyqTcd~aJ8MEo{X+FK`{m>Zs$2=w!SeUtcWrG zmD^Ik0eW<;j6aDNfyNzscs5e#rp`iw?!oTQ9Y~vMOMs@*r{72&3KQu^ro)h5pyPNm zRDJijJ)L$pl*1eRgb+S~cf`}d*kp2PB4#yaqp@ z@b`d39LyLh&e*RzNA07RJ@tmkT9Ipx)^G7)vJP+!LB+fL0LYSkdvFi&abHaj^Fm3{ zix4qt+hX-Bxfzo9lkn0@u1PDWxgHvx>z4QF{o-_9Ksy&Jgw+9Q`x4?(cl4qeQ}{_Q zZ@Y5c%Q4CP!V-O;pHW+6oWGmvng7<`{J-W6%^!UKJB$NYNXOWfagBY>@lhQDc7s0% z9oJA^58&B1Iv3OSc3W>K(f^!0PZ$?_`^)pEFWSo&J8g4qHTutWZolvEAtrBevoErL zl_t^KMeQ(Fe)RA@^{hb|V=3d)Re^1$$7Z%$7@j-jV7{Eho?|gWt`N^|i{Nfjk6{o;| z)ZTg5aqDJoGjMO_aR0cyc=iOl4c6IqaB$TA;otvV=$mi<+Q0s9g#MZN zg?4drk&S;hVRxVHv@^zX`_G=VKlr^rX#eDQ|53Z`c>jmL)}~P{e)Bz_15iv6D8IXY zj-LPQ<6pFg_wTjsJ9pap>IP%+CFa78b)5gW*6sMn+ehiQsOP8w_3O}99hXE_)XQlb zB~=C5W&9g@g9#8Bma=&>$gsf8fRgIk`_f&e)~-!68NcusZt^IS2bjcvGmo!>o=>+2 z_n=GDp3}!kpJ8w?`BoXLf3ZJL2&9{)cx-ucb1Zgl=%e57I?%C~owf6f8j}orXUO&R zt=ibP?|;QO!?~lIwg^nRNZYCh&%G7QF;&tresfPsnfJiD^5NV5w%x=U)d-d-3fYld zKLa8@X|J0G!0j`*Q+|1E`M~42-*KG+DX&wG;Fo6@Wg-<$I@4nOgTuiyt_y8V`PD;Z zW%VSL&wBNsG=i#ceSEACAo&Ot7Dycr?${b>8;pqi_$9%~HP8MQe(darjcG3#OpMs^ zOBDT#Z-~4$yyifc=+!Ica&G<~9r@k=6Li~g4*x%S`ZQ|+yL$)dM#hnI_2NXj?fUXUleT}SW`fb%au*XDfP!~4?W`a<$NIn5jY-mc;SgeQ+*w&%}x z+f#J+{fCd+vuB4X+WOjT-uz$nlZ@2;!hGGz=4V!1pIF^sj<~trR#sNgZ?kP_X{9Z$ zqWgV8LAjCsfj8A21rSq^UX7&~if6^w{}#bKW2!FS0^R`d@B9cy4ggqxverpgPHosq zoLHm1)8RsSHK3tlECkF=jn@M>WVc>DUY-Vn~E$%2A9-zhI+b(kwJW5TT@I#yuTonZkG|I6iT)zEboT3@wqB3p3KAR*w zFt3zR5-QAR!@{>BCleQGmr}j${3U|e$yfRV+Iab*& z-X}`;uOEKP7K*RER06amzR$WR!FnkeNGmY!#Mh}k1a^8y3RXHN!hx|1_cf$ruugBL z6Grieuz`dzr0yi6mv=J$00hxeDkwZGD=y@yAE43L`5R9^^UB*J$^Z$57EHX7VcT5T zMcJLQhmzp0c=_~%HhxavD`W^WjqP)SZ`Wb`>;_KByzxx`ruYMl;=B!?VP(IG-yLtE znGCxT?;BwvzIZIBLT)jVMdkBqy0`l-hqwnCvKA3E{FrXgZ5b%%Fb_6(uiPNSwvAuj za-g-+W*0Ww)VyyGZjfdcWX?uthp$f1$G)Y13Ra#kP8i_R8RGZq@`ynd6OHrTc6GJ` zNjs>`L;Rn&tCQzUM4q;@{g>_Z;CVaRecq0DU$*0&7wz-a#ga8@-a}B#OIW!kxT&emCO)gKT%4T1Dri8K*Z)`eMf? zz7y^-RNP6Q4%hugacJjphYn5U)8m)l*mSze9asJ0-BZOUx?#eM%5wuo^#Hj*M!%9& zZ?JE|7*Ek~J>Hnkg8n?|HwmS)jTdVIuG4G!(Mj?lvcJ)7fg%0~&s)lyx+j{lBr2kb zuG9A#tzA~s5vikkE~7P}AA}+P=y?Q#U+TO247hLRIRQQFX7DKfuUvmU-Xvelzu?XJ zPnP9P;X?Nx@Q$KQ<}(SE=O{n;Rei{qzT&Zcljw|Bzg5@tm_)Vy#9_CXZ+$IzjUi(y zqWA{63qyx{N9}-##eepH{68=bI&I(i_IKOb>PCh_rT5t;k=2SY7~n+8p7G}GBfZy8MGWAoNQGOD@Z=;@5 zj$6p>vNsVdB3JOuJYDedDnK{B<`-#4v@z??w+CkDZnY&37{G5Q{%+DMe=BQ?ZDV~k zlYb8gc*}!Vrus&@vKbk5@`aq#ytL%rw&vnS=A}GjL(_1pXXq0I>nrUCcgc5+plvbY z^jT20M3uueHZtO<$&yx?BVG>GC}+5%FC>e2qHrFnzwJm6Mc&v^b}|ptSK6ygm(Ut5 zLb{ZZw92fD?dB;SHwx83>MCO^&pxYKy{SZAr>w}<;lV-Lz{<)B?W_(p)I3((4M`6t z+@eh}PK39%Io~{XquICU1km7D^G)4u;5&cL`UP>-oAOB`E*A^2@-24{3#9E2n-JEq zw@YkpZA5o0Q@6e?7(HcUl0PtI8JX~aN!=JR)z&E2{31NI#NeKTPUT`{+3WkS+9_k) zd9R;rmu-P&IE`Fgo`aJxYfV`dx%DG2UCmTS>g!_o(i}2|PV&}3Z!eJdetMw3YixRJ zy|pJ#UnJkH)#WU*syl4sDI;2nxgdDbR= z-nt^T^^rp!DTTqcA@7q*7`b&K>_)dzo)ReERP^A0DQpAe1IS;bvkpvH&eWOol&mFA z<|)o;>cE$LwT{z&P={H+fgbB2eFG+Cu0~SjO8LqN={LdWg0iyf=6?>65S+Kklx=x2 z$a=}SxM+`*-y2($bT`bepiMnshb1lcE#~j76kWH69<#=Ft}G8CH1SM8Y~vYK==54H z!)cLZU>pOXWg?AG3(B}{bc98A>C^{Sla_h|g~dv^d^Rp=;AF~Ydh6cx8GW7e_-359 z*hp@zuTdAIz4XNv;#{5Dhmp7GMTei%=3s$&!WY|geFv7hB<{)ae*44U|NZvw{ZIcV z%oR`D-p)(#!h3jjMqiu*CeSJGKnGuyA@R7LMPss}GM>fXG6&>qm0h3W3nm?##ADP! zdK(A#5aXID2j!u_8GFmLo<8BdoI^fCO;p}M_-{1ACyjYugYqAxyOI8NxS9TK@ha~z zOu9SZmyLN~L|4U=r=4bg@EOX3ojM-Edc4Hxend6JwRn-?Ah-s>mpI@_I`J4X)Qe$* z%@3vva)*u|7x^X54M^HTI)Xws8Q;3b8Xl!fcW@7;;Pv_|FfmV&7vt+z&dR?$e5}5r z{w#Y1e>1!#;)owT9K6ynw7>j?{)CjB=fRj_;<+#kCXeM8NF1$k{0bcyN4_?df^ab2 ziQ|unZO~~?RllZDKi122Xg9}DwrAY_jm9UG)BYW;oUvoZ8TzZ=;J1Bd4i?Qbj-$_r z9`isVYGRH)c5BJ&@%P%_`+xpF+WU7uWDLE~j*rhX2eD7`(6!}RVjP%ToEBzluA)ql zhdS6>0lcj@SJ``KLDVvPIz@r(A+z317GTAzbwuIZD%x;>v2fG!VY`Ii0h z>4_}N8><)Rui97l?zQ8i`gF~^@4UwtXSMA^Q$KWh$oT%~$lE%n+TC~FX;a`k*xzj* z|Lh~i_6O}>{>%RobBz@{WXe-xum|_QY9HL)YF~ZvdHcnm{Be8vcm9e;=G@hwL|*LUN% z{dv;dgg3>}jEJ|*Ct0N}ceSH&0gIo4X-gy1{;&#HwOQlTbEOtd>O$V>V;KL&t#uIE zEaROS$34_*UO*k%aSpz?dq1R$p3m4!+A5xI$+9PM1Zmyc&NF7AK2@QP6~!VA*@M7s zEGg2R+lxGygIszTKsm7O83-kr3ptKqL?Y3}2WWRfBYl3-tHyC2AYx8oSjR0LA;0EB zLWTpDow7(1j$wRKWegA1LHbw#{!Jg&#V(V8N|m@pla9*Akb0J8)5@0#JaS;L6SuGo ze%)<-4)F$4{@5v}*t;ATJ+X(pdLMHwKEWkR^<2L~@pi}}?ix!n=ejsU2OYPQlaqFK zauNNqzqi{SK6=djHy0wu{pR9xFN1p38G(7${F8e2un98doX*1l z&g(oF;sNm7I&iq(_R)2I62=2t2m8nE5Se-Wv<_SOdBLr%<@V0@T3cIZZi?LbnZkL- zvCGVbOP|A}G3WkSp!48u zP<23fXc-_hT!RPG|LK49@BWC$IRFr(M9M@7sn1*3G?1LeRw2jkL=TlSOt}3k!N;Fx zE4>OWNNf>R3zD4)HeBNqMrW39|KL}73S6Un&6q}n((R+NUK=NlK|?-L_$GqAKm6{#|`(Rp1@u0G>Ae2{lW zqS06a-*OKx!V|ecDmU^{ znKV`=Z#gyTBu-Q+Xx)a(3(B9XmOKCegl8#lLFc1#SY%XKH7pTopnJ;2AmjRMoKRA3 zN$u_qaT1U)E$g~m6*z=VBq`7Xy0o;d?|PJp{@sM|dazTyxFhxv zj1D6t7~N%D@Jjs|ltl#~IL2|xbdV+^;HQ)N4o>p&`;xLZN?*XT?MK{vBn?$(Gb93u8eb-&x%#gjIDb}``P3_J2+^^&!4qtUwqb{ zefnv8^vjRi!+W2#Ctp2iFP=PZ$9p@u_QxAS{HB}I+PtU1KpZG%T2_zO#^8Y;ki1}UsK96(kDFsn9_a5D5G1b(9ZTe<1lXG zGm)Qs9-B@>$&zLy>jIgVfBAXpdb}uP9%Xw-CIuN8(n#J227GUUx+!3sCOX1r_-UDw zK5f&9bkRD_V3H3vL4Qq;JCyOJc?~Ll@nteTnkZ~4CoipSSY{CoMMU^Vi^=L)UG z8~IPz1pjMM#lK3WG!+G<*9ykA@eR+ICV5YqvLemcBiaUgB%nQRV9>K8SsKhxf1|kZ z?z|cM-||D=@$Tt*ScMCt{ND(Lv)5mx?*#2I*1 z^#-nbN?Cdv>!#WOE+69uxFDB!f`M46Gi>`-Ybo6-VrHyHK5e8Lh-ou8~3~at& zf^&o(+Twv%`9y2+jI6k2d`5YO?t8kcj zwcjGot!B{V0RT7VTs(;0pv-PiITy)kbogf8j(u(KS&T>-ox1X8eCjQ8jj+_Zp?sN4 zI*ENn{!8u7ojYyw_U*Q`xNiu z#c}m8S0>5N`Ico+ULy~_HRQO{npBpNg=!9v8Yj!<5eI-|QlXFAGAlh3%GFw3FbhK}T|3>%;P zD3ADU^ZB-}vPryptGMLJtSoohYsw%l51@FM#rDZOlrX=wFIsh+oBx*2&3_MG*fw0W zc7Cis7>G$+=GIw^!>x{U0sGABqR88YIyZ^097i&SHNTWy8HX3Oxi6M#lVHf(@xD!* zwg}B$slTw~p%7$|`ttzR=H@2yxkNo#c6g#*qg>Wo|D?w}vay7`EzIHv-E)?E+D};c zmdH%Iy|LA{H`d$b`Dr^lK5nb%b8ikFe`SG;_9$Js6#zNx z*I`C)TG#MC2Z@+7$RX3Hjj>P9!vO=mrb>9XI30Ois5*sstL zR~|ejj|=M$Bpw3raEw%#mqY$QXSy&t-D&s$|o_XkF(4l zuiy)^3mti57T#Kio_q4MFP8E0>{WaA@_Bpq@NxSW|NQUdZCww%`5kLZG8mN{apvs4JL1CPJpOJ(I?V5xaho$)gyKTphri~sV8iBl6XE-m*qFu zqqN;fkCtxj!q+!KN)E8|r+veatc-5s>el`y)8pG-hCF#j+Oh2me2#DIf`NYuLIO3F zw!(x_@*9N zFszHvF(R(ab^c4IezcSL`)oR*=4XgF$UyinzVK3`w{f%xH(qdjld1aiMvCYS8q|2L z0Th;pz9#t7$T8X@Fu;iQRQwo+U&||c2~UGN&FYAFlu6zRk_H{R_%mVB>W?irSAo95 z@A#$o98`m|&$HF>rgaSCfoRK=xq*M^&R6zd8N(Dk^NcUte94*-Jn_({ZP2;f*}-m` zM+Yo2j{Adu@_X%PKl!+=Z*R2s-nrfUO65HLIH<^naZGU3ctjZ;gx$;h|r{cgThXw{rD##vnScB}1nL?8c0KmC$%+N-v`e5=hfMi9T_nI-0r z2fK%ibLHc$c6@T28wmV_RSvI$?*3Qz+w+$%+WO{3y8}KyW2XF`Fz2+te)4>$?d>18 z_uv13anVXUKHSUOxxf76Q1t+n0VgZ9&(ebj#QxBqNgdGA~8mIvdY$$rYEM(3={3(M{3Xs`Xj@BLoe-oD%J zy!$S3rWr@iXRLDJIFK}s2_4fY2fZ)T&gNfqrX^Fr23b{xlq)L3!y5Hw`w-CM1QHR0 z48{Mt8ONV|1|q-Ux4r7;ZKqazf~~Y6Ucl#@row|V_GJ7q^n714wrBCAt?4fLkNyfw z+C$t40L6ey9`RuEo&-nFM?M?JzszBVd4SK(MST0(#zk!WiA#I5AFOcF_~)2IH}f0Y zqK8{N^lLi~falgNZ>Hthc}%2K+>U7yBjbtSVjLFiq6cSf=&KWwBhOXd(-X(hxDYhy zRZawA@<&`6`W0h4=kON&(ofkkw)4SptN1OyW1jHWHlI8lGh6r2Pu|`}ked=huN2mP zT8$;m(eys6+H}|YOW`+MUD$PFPvbL=E?fRaXKH*>{)e#OGo3Vz(-*F?g>nmW3BTUo z@mym^!-lxVDJL+-5m_)Sd3P`zdb|PN5{`lx)5CLfaO~**p8KE96%P06&41@~>ckg} z{hmB|*&aQ4ns4!2HdER!q-)`?&x*Qw}b7)B9ciD9c z{Cx9&>&|u#fzK|~_x|S>mokr?U2qM7v9sfBXsVY2s(n^_)m>oUk-AO^Uk7EQL&Y1z z&F7oY)0SKeaVPLiz=P@k^l$vzKO$)K2>>JJI(8;83Qq>@HX7${wh{*26>U;jElvq^ z7i=3~k5m4*sZ{^WM*)d{1hUvramP#&dI7BSG{8LR??9;XuIDQ3Kpk$=7~enhpo!v{ z1ryvIZh23Q{t;qQo6f`v8{rNhyfP1wtZ>UjCD*47YOx}vLfAWG(yW0L2QmtCpUAo3 zMi9?53_QYD!c3h(j}GJEClv1nka69GI+17M&LGK2 zy_=0!S1#z_p9NI%!bBdx6PXj0I4$GA1M49)K~2@=7{oQi2#I1WsOPH1F^H#db=(v2 zebRdz447Yeq%Mq*NP5Yna_Ua|!NYlJAJC8Sl)Li&8jwEXCAnFred%5D89bAqoc8i` z+dOYXdkxGdk-7mWfb~&4q%QdF{BjX96LNfWkOsbFSebC1Fug+_l`(SG3scItPRzxX z-+l#M@k^O7eY}8*`Hf(*Q5qc`SGR?Z>jutVmjeQ;Sf~Ng>tNZsJ}Vey{0-3KO~iNc zhkUa5k1-@0E^$QR7FdQMjz&FZ{u!V-Q-IH^4qL1IBNFXQ8$C7Ugl4%-dF8)v{m-o; zs!m?S!31CmGwmUo1KQ_)yE=Kn#9$ZmjOMq~PLH2po-ycu(#{T_wWFPfZSTdycDVDb z?LVahfB2x?|Ln{5#VD>dXeIEwp#~wv_iFifaFzj ze*yIqh~=%uzAR0t8Yxg5kVi>;&AbkEGfSl{`Nmiu6VP?__*yVQdDcz*p|2|_9OKRZ zA%4gC1~Lq>3^7Ew_I$&P09w_Ww&seDJ;W)p2L6pQylKA;20Dp1YVU7>>+Pl69maUx zY`+a|ru!xs;$347kLs&(-#xoN)*CiCy+>!63< zh&R;dm@XuJqW(I(E~7m`^A_LPFlRjbHtE#d zOuP5xqxLWSrN7kH<~Q4a^H2Yveg64Z`L4eU8orUb$RuKEcCHZEml(bvGrKmt;cfU+NwAPZ2)5 zK;Q1Bpz{Fw^-J(LueimWC3TCO)ImTqhF_sVekmVVJcg6=zBfi z@zFt>o1bnQYb$MYYd!V+&Yj!sd*AyY-wNGaUuMF+(Qe~jTUpAR{&@>~Z8_!l3P02P zz3^O@PjzRbp8D0_H>fX}TUatUT}7k+b<*$f)9U~o2Y3L=B2H88+LW~ZU5H)G_fT`K z``i>0e#2fhzc0Jwwg0rwS;AZdP`4<99w15G^6Y|7@ev-9VcvM1p*~#nsv1unxFC?Z zo^3{%y>Mf*=7!dR6HOPO+^p5FziFL$mhy$$d}`B^8Y7MQS9`}G<7TH!Dr_r`)jU)_ zfXgv|^aFu4k#oCFp`ARKTvJi z+|D5)YK!pHiH9f1TU~s10?(j33#ZV2PWpV4SRBxs111EWz@O(kJE;@j@T-N@SM8Lv z^Ux4}Ssoh^{3xANdFrpM*a;{j`Bo`)mN^B1`eqzX{??VOHtYg^n7-yDqNP4jtl}0f ztVin1xBJep9b0)_e*d0(7GKFCH&2)mGLwD)u$~3!&g}x&IaEWq*}17*ht++6>Wpy! z+}1G(K!P9c99ofX_)(PVt?sSz*xsV2DWhC6sV{!m_j)Mr#KUi4I2C zWSXuQYEwiof^}0fL;W*;&H5gG$_=!H-O{2Sk)F_!rc}CYmO49?Czp9^)xIj5ame;7 z$Dg$E)TKN??!ER}p1P^zJkEo6_TToy-mv56jY!&-y!nMT@Z{Nx_NRaQhwc5m33`FN zpQJn>#I{Um2>Ld0Mkl$3C(mgwnZpvEF?-0i%s1c-Uh%sbpK^4F>sq@0aLu!Dn0E(w z9lxR8-U!$7t@4f0_HroHF)Xk-P#>FO%eK3BO9W4|b&Y^rlTM^ejp!{dljp$F?dvp?hj?dIFIRt1w12J{w zc=Zk0}2Cm-SmoaiT0C+kJ`b(an`k^ zWggn@yz@@m+TLo5jMoni4s+nqjVIaWtvnu>jVRDBWo~kQ92Wa&fD<>QUcAhllb79zzf1JKDQw_Z~gX{PFhkt#+F+@}akB(r?WzF1P2;cIXZmqoT9Z-!dolut42d zw9-EQ`6q3E|FqqH_uaO!u|4#4jJrILh3mmrkK5;8eAT}7!3S-9d$YZK`Jz32`mFu< z$3Ja<`nUc}`|b~a!z%^aCFB2BN9XPNlZWu_R{Q)HAGMvA&)Pa;oNqG+@BrnP%oY6n z)NlO84=FEwHnQM&qQ(Z7DccI;lrKL1IlMh-v+!$Ub(6ZE$^qhhMoB4&&M%`JWQ+&o zg`hA}qo2x5=mI$O^NI5YNT;Vg#QO&5W0G!<;4SMfy&b&X?mKikzd847IBw`UgT{Oi zZrTN1cip5X{U&iI!zOK7b*U5e3*fUVqh368Ov5wvnz4tY82%YwDC`P z4nyj%mwv`It%iQzF$;g{g36jWv^5_x9&xOrN*cyM<1r(iR)WZUW-MVyHi^wUHnNDW zaPPr`u#Q)x)%N7PHggZ!k-DXLAP*gu)Eb53oi2~nS75{inkuW25{kR{pS6BU;S0~% z^GKv2zKo5#exOpvU@DHqH6-Ko#Hsp86zj-Eoakag3Q-yBA#wZ}EXf+;`N^_`C(9le zlgCPP6Uu*RheP>HBroKDZnA2aKbSj*r@-$4}bx=g;BwaM;;B zOZ2qkQ|mV4)$mpTzl0|r>Bbsf0G5~8=Zx$TpSpEUVO^^?PEJqSK6=<&08Yu@+f`1{ zGdnL2a^}m#P-R%5gZvR|lF6+F_ zGDurq9vNgJ{2ff{D7+#lFmhD*0B>^(H=j!`ulou5X*%~tFpgUX0Ko7;BGDPEsO!|m zFd<;lte~es0U4-R2^iD(o6PM@h-YKbJrW3U5;1abr7>vFUx?PH8mtv1m`@fE$3zAq zmnsZx!{XsTeyRBK&j7;o#LXfIVWnt{M|i?Co~-xTfdzjkp=@3{q0&DY6i#!+tAGj& z58{>=bfHAmN1?S`;g@dGNob-)SvYAg!H{p=-)QAiUn^2V=QyxRJvOY1wOb6 zjDrb3x?h1bYWVRVI*NxCi{J1VXBuz$jd8>y@e4$t1wW0io48Txm@FnI$|Me>boY?Q zQ2t02LF(^-feKv^3pecKqoe!tpr4QWckj0WainDmc@G-vh0MEbQlmwy@nqA0uoi{% zA9zQewo4(4@@Z3s*=mRKn*>x@vExw4XUH=+ELsZla`fGW;33?vdrDqC);?&{nH0^UGA^0?9A&|&HZkDbAYV=-O1IPkP|6!NxyWa==qF!v zV#<*nK)FZtK3P8lj6`ri;af@Be5fakX*>8_ILCPcy{}f3jt8$m z&+t_{NB=b6gl0kkruZJ69<`;Vg|;%k*8a+0``6p{%I)@l`-77qW@u=5yX^pP9>5WyfbH?T|@{r=%HWWn;k}SzcwM5`E}oCz~1}jDJ}PeQDn~ zsvoa5UzMf4D`!z}BKvgeBFl+OxcXN^Uz0_;IVSO@B|Kr$3S=@q z=t&`{-~z7Um)PdM{u$1>TZ$KUp4*_3T%%+1ui zy_?N2k`9Yg(GAd0^LXBW5*c!#I%T%}c;>JW`MR0vcY0m?v2BOvbF}Lzc#B)w)P1%C-Ok%Vl*0nP zy>XVq{)1YvtQZCO&ipwdf1K1 z@D_gOuzVJDkQ>7FSGJTBKijf6zYtzpI(4##b~e`6+t%hrzQemnF>=_1{Ja&w8!DEU z=5s}_x5FGA(%vY~;lX)3r9HWs>7vsd`8jWPp>mymB8R?mN`SIZ#xuWfs_4j9%HhER zz?N?zq<4lN*azCro?*|Ti)?7Y}b zAF=c7MSJ}4L3{Dyc@`6o4^P`ZeSzNU@lf#1wjA~$ zRzAZJeM4Hx(%W4&=p1*EBy>ZQ{Z}0ZaDGM`h9?(SXYh6^d3oSRO5AKxmzwyBc0rRQ zzgKN%=di7yXP-X#y#1BG^xtdKOX$H1a3I6#GB>lV^M05}V;^T;(&Yw>Wtia9U|TNT z7-ji25SMX!yozHMlgV^W{M@iLJ<$>5xT$+kQRwXxO{ z&>lN2k};V?ey8mdLHnxOp$A|JdgPn-8(^Zmfq_47gV*FT_2P8+Wz1F_kY;# zeg08a`>cKT%a0iA?l2CR zZ_A9q*BGZ;d2SLd(kHulXtC9Qj(Hr1sK=b+c$U_2;>1XU4eF4=MFyCCh)z+4$nSajamH-rRJ7j!y z>f838$V?R#cmH zte4341%w+fEi_||0jJ|a{%U=dc-lc%d=(OpG>!>uTe=g8e{b-MP8;HxRifkQbAYTh zXv{~#q8pv@@|XFRdt8;q`L&0|&u9}FJJRNJIGQw}13$+Njsf+%xY~R;n{{U?!QW?ZF!xY?uO}^KN%Z{39skfg6iz$_iYxluu!Rq*|h55=wWoC-~KeT5Q+uPCe>d%$cWk{Z8UgsLdGU<&!jBg#6O8vl_lxM`4dM@ZTu94pu z^i!rM)4wg=BuvH^I{)5~_QCXj@;Cl^9RQ$%>YM*gMlFQKPE%#v>!7aer+}6*@w@OB zv$NS7=m{nZj33TP0-bo|nU1hW)9MQ)R-*1K?AT!Ri_#gC33lQ+$@h;qi5tdYdYUw0 zV=E;36=N)$4~E&mR5~{NgyHE)^%6qe<)0Jml-o2>7EYvti%8mrV>03Fe1u_jK!CVT zu8b=alf>pbWTIP}=Zb?ZFK`J_+K`{93c547#2ZS%zYAU3>I-Qo-nx)X`QV3da@&c@ zmsdyCOuQuHMD!EC!a3Bt*nF&{rl#B-tu<*-8 zD%0REzEGW?!V#k5p?w{)I3>eV>)t@2!&|Zgp0=ECufPJNegt zpjDs9T0O<|`s?sk`UT4Xu0**WFK;m7l`_T@szN6{y}Z}cjrcqGkJDc_V;J0{j`MBM z(-dz1Dh1C;tAEKr`E@<&(~w5m200HM@$)tkf`g)T9G?787Yv$jO|LN@SZ}vxm+@9t zV7!IHMVhwPUY$Q~m&ecA>A{0`_~I)jJ0G_fkAKk~eEF01;Pa2$=O2I6zWn7c+n1ky z-oE(c(+t#~J$v3>JbB4P#5bR617m)A5zI{D+H9MjT7Z1@$_yR;1(TbzcFd&BD<2_= z4z#uT#n$Gq7qAcU-}(QN_NPDAEm?XWwsM}wcOLIN)V)=8Yp$-U?rO5S*rb}U2~h$h zK#(cQDA*8W=!0d6g8mMF@+TRv3`n9NQ=%=H;2^TaZnB%*U0qeTraQfJ&U2p3JURLK zJu70Lee%us-rMxbj1xN|R;*aDV#ORgHj8K)7`*%CH{w(?2M#i@h|Q8zzTrs5lw2)t z*|aHKfsS18MKoDw)V|1(3z{IyLPxRDbIVS$wyqVz25`^|)pzPuMzJ`n(Xah#V81Tl z7G3CrkK4caWuH~`Ck^!lT?Dxr6G{65^v5lRKR;)9A4pH!<`^=}10%aKUS1*JhDzfC zEkRAQLQOZ}d3>P80q=6VCzBH6g8f-;JnVkcOA7-H)R&&yRs*;zy_(Kt(D+MUIkqVIa{d%gVls-H zWysNF-ETXpA!4(@2a|?JVrp^Tbs05b63GiFT;;gAvK4>uFaA~h-~S)~Ag*1Vjj74m zn4g`CX-zhKx}6D;28G*xOiBU;i{#S5{+lbvr(JxFpB-T!e&edY@4t9uHYTQcb6R|;KqutK)q1@3#1-wlVQPI9kA4%1 z@>agon``R8Mk(!GvdzLE3Xz+^%&(}AnJ1*}w?-`XoNIVuF1@rIM3Kc-w4Fe2w5t3$ z%xv9KR38u!1`A3o1`v-MA;>qIVd!ffD&S@U-ux|dSJnjoXT@Km96!SaODME_69JET%NaA`Q!#DhapgUkAJM=iQ~E? zTGX-L9JSPOlP`EmpD^A~_a>a4{RThuEd#E!1y?vO8_^h+9?M^;xeS=1@)n2})x;-@ zxnmP!`6k7L^jkn(qa15}r@l}JdFz@tHW=zLr}kV~`Q@$h%)jcX?3`OG9z`miB_)WM zj)L8`LLa&N@Q3OtdFK-=q?vL(H(l@;CvuR6{3CBlB-bR6qx^VrK+i*i999{Nx%s(V z1X3AJO^&-BclP$B>tnvzVn}+*8;92xrj0)~J`~ea<6gL&(DSV70Br%cS+McIXDE&i z4y}i5jwp3yiuN)$TXXXSKFP1jLVna8^qF>w@9<7d&%}h<3l3Fqpg~!2UbCziV=JoC zG*&Ua@=LpCQ~-E3#(z}s z0{!u-ou+tPVw|bs?BPFe3g1mW6oKC&zsA)3@hAHt&wa*#Ab{$A5fr?N)}WVk8*_or z&oES|e1@gobA{?(@k)WqUFjRGfr{U=Kg(R?^YZvFf*xKzH_$xuKw#m!@?YKlgl2J@ zK(Q||{wucsd9HQ|YlBG^f3G|Hncp-Gs511{QZQ7~l#71#EJK}y1p4Iqr}jwg12^k% zAdd~JQ?;)ga5_5Vvy+TBXd}38e=rWYGJ1J6)?^rK?(ofe^{+eI8?n8y5xeUfv9a_d zw%3>Ryc}CAOB%bb>iJnLuRM;eE#Yh{j@ss}wN2rz$ML~QJo@NaEIoY?fBY~0Vf^gR z|3vN4jM}D?*cHvyM-Sui2fvEPpL`IT>x_dp)wb`dEjo@_jawEqMxULYb34YjrB9FZ z#x@SHGEQVJ$G8LFKxEEmhW&Q_GWl5F^i5y;dk694$+K8p-H6GFNpCnEX%2HJd2oXk z&FEqGFjiJKV|8oaSO3q?@Yd;=$L`*aH{@y8H&)hTV_R;GQHC^LVea#*U;H9&-+ejW z`{uWOxZrqq$8Sg z)ULn%ZS?UHNIb$*MSFCJ*AKmRv)1>tKhOHvW%SyR^!iPO_A}r*X`GAkG9U82U@P}@ zzL!AFOCMA>G-c&_YOVY9ZtAb7-c3BlG&!cqURS|XbkYRJNfTBcOT30BzX(_8HAyip z%5h!AV@#6k8#t7Xf|+M9&TtQWFxzdVLTvo!UQfJ|j^u;&I zSRi@qfn|Ha@6+JZa9yGgAvZ zPl!X0XDI*+EAm5?y&*}O6o&q(aB&MggacC%Vo@@|t;4 zn#Pm-%C1wzt5?*a6c(L+h)N@Zzh`K&ky_&RY9YUpuzsPPGRjj~;K?fdvl(AFQ2vUa zT!_pvv|Pv|i&6@+%tYUQxyZ(8Kp4z$(vX2gfC^|gh^jPjtH!m_1sQL~sWSRxAe;ZZ znG2rywv5QP1};=qJy2Rjo;cx!bV?rN*1waf1R=JYxfuFMdjx?A$C&d8!q2LuJ`HgWwlvHv|~+2_4%=OTW}92V#^f1XPo6 zh_!6W06E*M;1X9J&a-UmUh#GYY|=_kY#zWeA>~EAAm)zo=G^_yjDbRlY6tIj*U-TeP+2PFzX*|-ER-xJC$qQeY zDqzAd^6^Q*S0nIpx-~5t)cz2-}nK-Thd{-Fd&v{A6Ynv1-UDJqM<%?U! zhpC{UXUThTSRLBLd<>3GD$ay_82~YGX7OHi^msXr4?m8>-A`h7_5E0S^pkk-s~^Y1 zkKT{9l_zp9#rm_=*x%jP1V^3r_P$T+?eEZ`N>5Kzw@=jt^ET6oTK$tjO@c-=p@>-9 z(B$e_C!Rd(#`fN+;)gol2=%|3*rZVu9{@mg_7%p$N` zRhGc*C+#}=Bwb9o$Ta$shF>CCgvdl5DK`8Ho;<84el7_pJU4x@Gk&!Vj+%~4o97w` z=vigzC(6$FHJu7Q9V2F~53Z%q&Y;f;71;?XDeA1A>|N8x;8~y3oqaQDiyzAb9+M;2 z5fj0Hp6>K(1Nha>xUEDkF@S78>Zet1@X<@B{rqCngQW3Xy=zRtH5zV!{8Rtrg&f-C z3`#@4X7N*@ole5@wAzq?*;dhSC>{~|41NXG2ESq#y>j2suU`@=kb~ljkohl<#_yFw^3&s$FubZ?anozsuKX9U-;1Hg zTZwOZ%ap=yf~gnP;dVgfB{!&SJki&WOistp^h}%pen5f09;sd2+KQW3Z^b|O|Ngyr z`0>N|qd)r7_#giJzaN`xCvolCV$2C|W@;wpXBRXvIf#8tkT^ivWrOPYB)V!8c_YRf zxk?AQu?SM-j31kkyt(ZSG8vE2VbAMm`aurVAIidGj+zyDu%~I@~Z$X4g~8Fm2}(zHUM%XkTGVC-$h0fE8lX!SX5mQ3;f>b)Hsp1 z_Zfa0S1hwA$ObDLFKo2KFXLC@dzc|NSF^D=Xnpe`7y+4}J}V>jO=$H{9*?kr1fxtg zIT*k-+HB@VF7LVn0ew8w`1cG86Cb2TZ~CBp-6r0U;`iX-FW-uNHp z8s)J`b@|hx&ztTn^J%U#%;$ZOiQJYaWBfsiR(`dOpb(j`vwYoj3bf3ji>X{FOpf4&r4|O0; z#giPlB|#eMjZo_@pnT;9DTZ<&uCUysWjPQB`J6}xvM!A}J`0TKAUcWmb69#vJQBZ7 z?!EA6n&={YX1Kp`Kc@00j`B`DK^I&dc&2_qS8mr4q%D8ufbgIEqB~+RLls61%taL< zrp}l_muL2m&==N;=3ha12v2PZ3n=K6E2RlCcUL>2_Qi81J;OJf)wCJ7(FGQ5pji() zWLy?=I84K0iJVFwo`{27c$*EH$Ex$E)EWKZKQcBXnP%N1pKNAxcm{c}SUe#;A7k*R zT=K1d+EO99yvgV3>2dJANZLp?#OpkW@h15}W&phKHoWsr+f0_7A6rTf-NB!^Epky= zO^o{+z8uu>HT+pS6hXGy5;h)?;fTrvJwR`A5hv>~_$({}yroFpqwIOR&H9u~#PrOJ z+W>zH9i2z7;YT!R)2Ebw4$3fvrd>d%xM2it$gdY2DO>5LL}|V7feti7ZZ`FiH-{Gd z-CVbM3Rgj~;I_HB6*JR$h{%UQ6vXEm4%ODdKYB#_$}Lq0t8{$^&!-gbPfRG`9BZeH zVFc|YA64emA8Zckz#RN>h}mtA9NFd&FJ&W1+OL+M^F(}?qcAd?q>@8ZgIs#{74UlY zp&!w9zahEGr9VY%c%q!to|Oe$a58;c8?X@PwLmw<`$@;_!BC~ z0=<$Rx!rQ5M)IVHUdUZ!kO@#8geFE>q*sh(gddOm@JKrOl!iK#vg|j2Y@=bD0~zs9 zlC!C(y@v)WN;N$Ix0(vi$G;pZrGjsn==q{Cv~1*IWx=$$s+mb>LeP2g{G3r(wb50^ zlOsCUfX>+P=1M5^QQd!xzbNSmr1tL%z1|Vq*v*WZ>8}Ug__>; z@mWiNcvA!okYJC$w!1#L4OToq6Tdi8@7lIxQ2(UY`DfHV(}o741~g9n!7p|7LfQ1f z@CRro^z3>E^!)vcP{}{f$foL0DKn3M)&Aha)%&iC?hj5pM&w)b+{CrKv=qC09gP`B zG=7|m#cS7Me(}1@)wq7^hQ`n%8kcUz_U?x0Fm~<4pxT~pXD4>HHsk5zrFi=2Ni1nT zCWLtO;A#BwXCKAW)uULJ;pLOH{nJ?8+>WOo|2)nQw_|5zIo6*(iT&-h7*hi~r+##1 zdLpi0y&5;K-_ZDFOgwUH!Dw_k2Ywlb!v)7mm(RE{o~4i7-{p1zZtRl#=+xil-`m}e z)#VKz6da!(kL4%N;**a))>vR9u3o+FF(DgP+$zLdv&WLj`sRLY%J1~_Fs?4n#q`*K zH>enYV* zBhA5%RKBxu^X4suPx&w%V^Q}frTs_uJPuc1%qX5Y;+mMQiL&wLzh*4ud6;O^u8(j#8$=X7qIQGv;K;B?f}T4wHf26RvK(MF ze;(sV+MXlj9E7nBl#2_ZJ_1DdPuo-SB$EQt?}hbn zC{yYRPuVA?|8t}SSq_qQOIK`37#^R=K0WJ7HXM{?-o}drhpNZhYWFmkQ#*N}wwdub zw*;=NuF8F+vSKbw4n=VEGlUSm~0 z7sr9>VKs%5YV&60X8niqa>9moUE~>_EI-CamTi?CGRNb^P|FZ#KewSx<3;rFZ{q7& ze@}=0%m2gQ_>l_l4i$t2FdA6~^SPnRgd!&sOa%EBer{63bY_t7Hl!k`@==JIAUL7I zkT+J8EI=jp6O%VRn=La19@0vqlw44J$4pW-E&~a0U74s{_RBC2RPC1rjmiq>)C4I6 zH%9g<6BpbNEEC>bM`a>No+ShP)Cl;hMJkGtw20uaT`1>tSmjZ;(6;=P3A}wB30Wf? zeU>7CpQonqg+Kgh3Y~k)tXJ^JQ@{K`NBQ%_4PqMn=QsHkCb~J9Wl}4n?D?uxJ^M`| z;SkOWkm(h=ZeT1}(xhzh6yHqrtOuzhFln3oFxgf3z!^HkWLpiSgfjrX{3)(M+=^pa zJAZ_cr=(vO*1=V%JV+zMfF}C|-(k)xJQjd{^}h%$zn(5skQ^%A6l&7%gXBH&Fw~8b z9ur|%e&Zi@NQz7r)@sVnrwgPXzZh=@H6WiR&WmhN6i0Zq3<Mppu5`rd4XR9i!mFONm zjkAL#O?K2FZ*RpZS0QyY2s&VMU-3>x)p?(5GNVDz;gBXW8ps@rg!%(Mf2uK)CTM4| zveS*t-NV@0R;RBH_Kdfv#v;aN8StOS%tFM?yAk(aosO5@n2LAbyBqI(`Hi^y>RnCl zB374Jv|Nd=f91Ux7@v$2F9eNv@J}09bsDu}5sl1v06@O=W7~M-QQ-_K(K((=o8{84 zatD|8x8fMk(eemQ>Vn<;U?dOQ$xA-Bp@t>xYWgAL(p>OdDH|%;&kc(2d{rQSml3Ic zJ)3-KYSqH8bg=Qmv}s=k7sD^+^&%AfRvBClFY@m}yZ`9Hi}KUhu5()O&HyLK4%m86_+_k-3qa5*d1pgbX>wqIjd|pR;VoPybAHQ+Nt*Cj zP=Yqs5l`@z%;aRua_zkEnEdfZ^2D?zd7=aD5lswvn}2d*Dn>Q&ua&{_+hL(U5fSP^;lV2XYEvqI2_yRoWb4=XQR z_wVoeUzv}aw-)0|Z@n6~uP?;(?6^1lkv(reW2PiyZ#0V?HjI7MI_0LCaB|v>G3A+Z zXP$s=OY5w|Ecl=!EC#cf!`rvkLir&BZ=|Cb#m|7Aqp-|JaiaQ+9SBzX%yn5LVxGoa zN>QaF;zZ>|+2G$C0yi) zO*x_Cym8BBuMa)*MtGre<7)u@K^^Hu6!5$QpTqM_tSDIp1rStjN6i$Z@;F zO^>NuJ&Z8ywg_7An2Te)iIsXy+XMe>3J$59^2%_{{op}=Y&J7r;LTy?0$xNr%nfr6 zwT!Bbh{e8aI$bz?tDt(ow(iR&oN^$`YXHp;~W&XgSJ>t(} zAOBU34R!Cf3`BG1_up|kL8Xga&J(LeOr&*-6S`Y4hnF5hh?J_qaAsOEDp zqtf}rp`XqE3r@VrDEiqSiD%`Tbm|;iPnKt{eZXRmFWd=~MNhuur^?Py^5q3K)#+Y5 zf(XMv1AbJ_!ed^}vp)@^ywH|dT(VUQ>}JE>Wlee*hl##i)_Kv$x8t%b3H5r?5M`Hz z!oujV`ao{~puFIxE}X+F^g-p2gx4E2?%Cel(cE!6e)|6V@xvc{HzvgX;mM&2<2bmc z^;~nftb5``VM^8Qh6G&XgZzt3`pQ;*#oPK&ybQ`m4_bJJmV)&`Me4U%2XXo}jRyK0 z&jEQ!3YFeP`1JTasPPMaBZG@j@me;h?fHY@1^j?({8qU0j8+$LP)k=^kZT(tRK2dT zUXY&rn{xeBNF0+C&HgAg9nu5`I}xDmIJNx#wc*p_QXDTV5<`t&kSLFKq~^Jmj{T~w z%2{zLp2IFP`e~l2*~Q%gWRmibd>UMg0`RcV(pa8_04+oHPm})5G(+yhq2W;;)dq5) zm$szaJ@i;KZ(dQIXG2?K%AGAfwbhAhSJ>#k9rNnPrf28WhGqNA7?1;cY@%Pkb~Ucv zz7;oay=?sXsae{QlSD zZ~V>=;;;YJAH-{S@5ikA?3t+vndz9Dn~g<{Y561<<3GlozA{|x+F|!B4);5;zOm)D zd_m*)YuB#1{oL7Nqa55*9}ZaG+>F)L_2?d*YK%YcPr>bNZ^VPgPb7{LZ~pU1BtEsp z++}}vSMx)S%^57Ho#SvRZ=^0PY96Vy*>q>TsnM7QRwJS<9yIQl);wWiu&X%+V=awO z6l^-`b9Z;P)TT04pEM8LW_BpK^EsQnz5SS5SoEg-Cr{UeKNuT3dp-nk=gz(O+Sk9K zsl{+~HrF+GnTqA7PvU?5U;a_7JX@A*hJ9%GiSn|%vL3s;doK7*%_m-d<<+?N+M98z z{CHd{U~JKZtt&@=d^JH)i2%XezpxY{35hz0ImL~1@0q@|BGl{7VkyvXOG_k&W|68 zolyC2({B5w~B|4)w?s6wTvv((w38 z^KOMpoiv8gXhv}zkMSaTlWs9y@vTO_?SjK5sz=9%ai;p*30n zPnw|NHiQx!(w1rOP_y%uBXv!cV$B-sunK?c#Y`b$z3&GyrB2a9j zgWHSer#g-)^nm1}7+M=_$=vg0nHuIbDb3xOzp>$dxVIY})$8r;%{bUUh#j@*+zPU$ z_T`z{jE%Kzxc7J(*`(}Akvzdu=F9BMq;4?w1cjJ5)sGmT_>^A?3ci?=ve^$m=93u6 zO%i5ZhWQigK(vv(v@oLdSr3|+7>y}zV&Mix%~KbyEXKI{gUQM180MgW=3b-B+0@pK zPsmSn+y?2t@=4K{cRIk7tSrXWDXT{A&QlLAx&i#!{+|}-v+(SPfd_5?_%Hs$JOE&) zn9dX2{7Zo-U>1rzxmKW=PNGZ=yjbD^2s9}u2xvlP3<-fhl_=4iu%G2G+Y=v|R8++^ zbHby&Ax!b{vk+aV#K}grZF~*67c+=QI1?ZF@m7&1jk5KeKp3=OD1F>u$()~ML*&B* z=ACX;Y5R&1CNjAoQY)^rE`jCACAn)JnNU^Y@}r?S0iSGYFv5grSUh`SS7E*?4b9=^ z0cV1=l{!fkwyiL#n8hi;Ed*tcB=ZzDe3+@shYX5N5NVOt{Ta%kTqG8TT@8;fyd-dKVn35Xi)f02Q>AZ zhZSmhBYR~6IZ$q7O*}ilFkk^-r;hmFyk$O9BE%|2bO^`^GhuSMOseZ52oD|&DYHCL zC)>P12S^$h-R|Vk8__|pC1Lp@v*fkJO53#YFTBY%Ytq#`en$j6>VihEP8c8C@&MYl z$`2iccEy!ZY#avX3tH7(Y*1a#RX&3%XY_@9r~Cz$4U}Y1jN(mNuEQ+m`1)BW!iPFh zJ?EPQnQo?qE>KsKE0dQyMUnN6vdE3nYHDJafeGZJbb}{Sf=`Hgu>u;DiS>(TAz44b zseI|z<=sv&QX{>LZ#M=6CEU%r#_Re`|eh@nyQByOpE1O#Y$jRvV zP>f9t#_ar1+`Ka!uf94PZ@h6WUVG<8+)HrGCLM+X5!MFgVnP65G2Z?!*mI*rSGHB{$8&ZjQ zu(R@*`D`0NHZm(oq`sL6c~ehNI@}j%ej2#{uXKQtzlN`39Ncmnuxt5UG?&A3GI054 z*(V<*{4>i9gWT2b$xd0Q1#IvukC#KXSv*`wrzPJ@p;lf1)HBUMtf)23(jJ&r@oJ)g zBPU7XY5$9It0r-Kph)&>h$XzR{BXC7^NRtBZ5-PFEW@-DuMv+P8d*ID!_%0Unil!N*yHW$y*-a9r>5sLkr-!AtF$yu zWBBOwL`$-%yn16!fjNPYdqn&xoHzD7#sZHv9$q}oNIHP=iJ}+WPDH; zh)B~qQSt_Nj9pXiIb>9d2(XHe2k3_t2|gZ zydL-N+>ZM%-->(p?#HcLH)4EzGV{jR_grIL*&NdF+y2TYlR!4Hb8O9m6cfObNBUir zF>@yPLl?*^lT9Xyo-|IfaazXYDsO1aOwao5mucw(Z_lt`hn|fNiRbAV^`+A>F})CD zlT+#wCuAno^o>P~Oh|@GA3j7Zn@40sy|ff$7*jKTm#!mEAt76zu0O_KT5{Tzj%w1r z---R*%~)T07OPK}Vs&LBK6&uO-)cTZcX>RvA7ActCis1vwp>pg{VETtowp4$>CT;zievSG_O zgXr)M5BceZaeq788_vjZi0^)@5NRKTJj^EoR1Y}lQ)ZCfc;k?e7dXfOe56|IhqH0Eb`(RsOu`*5&p9#$>{Syik+`x;7Xm z(v{;QbcMXKNyq^lzV*ri8Vg6~o|`(Kr(F3j9qjC6x)ze?2+wcxs2*mUExPc0%qA>l zNDoeVf)4+ZJhHRADTQn|at@pe$ted$k_s2T7Xsk}xw2tuS*U%J43iCL<}>MoGcu+; z=`Rr!4`bcS!vSm{lW*jO+>qhS%#1gaxeaD&0y81~XX9UO6m6B-3qBLWqT?Y~W(hdd zad>dxD|0y@FeW|A8yRy`pZ0~fQg{QA4F+UCA-&z-+mZi}H#2-N1l?4hNginvXjAD! zym098u`-K(IX?x!m6KT=c_r`2l(adhLcaO8UQ$#lE4MfDOBp5&blEx}xluvJNHCe^8DtCp@5qKYSXOI+yhh%`pyal7<{UXd_;@ zw%F|`brhvndbHIX7NLG}Qv-ZVsO`m{^bR_Qad6O8KDaWLH&{7<(N&wxsxtK#d6ISN zu4u}Q4BbD=xNd_-A5LW9d7cexW#EzVk+~kgx9waQ-p1hd1 z+lH?4KwFb|^3#L+Z`xs07{wP4UUMOy-1SBZqJYo(mCqGHOZg@m`slu1e|%EMpAt}X zHgssSO-FynYWt6mRUajr{my1=tZl?^{mwTq8$j*PIE&eU51locg5Eo(8aPlm`4O%sZiqi`lpzMg_G+L$?=~8GyJ%0U* z$L~Sm-vC3z1$y~4fYRi#9aiO5jCBR%f@6JbnHKo%?- z>o>wzcwIaGG|2gW1)gbl(pG58Ko>vk3i3z(_Lp&)7h_s~0dTqf>%_lUe#8YX@DIv! zbaA)khB2bdXG0ceO{c_BnP&UmrX^!5ZLoPzjg$@liJN1Xk{^|wnm%k2$G*-iVD z?cbQjuy}I=7Y)hb_Ih--H)C~eIi_YuH6Fhm^VhG%=-d_6d9^ocr`3sBP5q|n@m}!# zdD0tIzdCy3md0)x6Kh;Fd*w!cvsq(*ZnAv;&mP1-`$tdmu%+7dAAJ9t@gM(3|4#fH z|Hkjem%jLJT)lq9aXEA}sy>B&h1D5<3d#wdi_dIx_;k?0!Lc{XSDvjY+XFs?aQ(&& z@j2)X^$wo^;4s6{VJvApy}7v_2fJP0nl-z)5ZfD@@#M+l*jV4xTx2E|=H;hx_ORv* zT;0!HEDx75ugOmhO{hI&_+GC%%A6)TU_*d-Qm%`#}XqGVp9aw&a-w%Ve{8wEe&oR7hc@yEEoc_g!~^h?CI4DivXAZ=<9rVA+I=5=;wrZK zf5tE+p%Pqiil0rbe;1A7n-@J%=XkgXwC&s=%NUNlI9}$ri6g7DTqH_09HNng!9xW(gO|0uS%x71c2tF7CLm6f&FP}_H|`KlT& z&#iN92Uf+u%b0pCTZvNFg(xH5kQ=571rm~%*TzcuA;X-5m~p$Q3mlg60d18dV@}qK z*dF0C)Vv_VCsC#*XJSHgFy8K8SiBN5^NTS#H5)V2^Pbz|Kcacs$OtciOv|RPQ5|%- zG2ehsIaPkhuwCz3j|R6{1^V&eH%*<7R%p+CfOc3Ls(+=^#`9CUpAP+QMhiiNOaZy!WZ17vk`dzwBvd*;TqrP&zJcCIP@PbMshS`7sSQ@TGoT7U zT`(nOTRt|vB0|DXIwgz=g8gMsa*?sntOO7*`OtGU3CS65D;!QAe$b3ME!FMG~+1HIbWYsH6 zAnCLrX|l_T@hs|oqm0667Cs?m%8C6d<&1&7*AoAqi{b zy&vzrcR#-H#{IZ^Zy^@1@Vy-MFZ$yoB@05^yL+*&LBXUZK(n(`@!_wZh}V<&(zm{o zMW%M_lts1^JCrT-kbxWRezsX|53D<>bG08K6nRpV1ZP_T9sH>=`I}J@P~s;P+j@|3 z5~__qacH@#AL#kq^Qn+!)rLe>nf7@<9}}oLlKD%M09~NXFL)WJhxfeAQ=3+UYAEzR z3yQw?=vIFEh{<`8UUgUe&xZc?+uY00egcHa?PTk-Z0d;9cEEFXwzECoURjjzR+>Fo zwxJiE(8Do41pA_0ph0D_UFj5gUO=lJxnb+4V!sG3@>Sgz)Bn8XSL}ukHn?qFluOU) z?oa(FBI?5$DNfCZfdAm{QL<@exhAiN{0@8jv;XvU&96N>3hx z%F}u-gPl5$P1`0c7 zclLb6DA$61_|b>4y0)PvgExkTV{UdfrlzO7S;?UQ__oXxAK9~^SIejJOxeH&Z`|_6 zkT;=_0b}stp%@t-)kKpw-iJkVEEX1KV|sqx*M&1d-rU@d&fc~cDtN$_ zF*q?AgPJg(l87d)F;q6jk&D_A`bp1w;1e0S9A&f7=J7bfVhq4MPkA_CBcJ~DDE78? zqO-FZD_qa_Gi&YO^eJne6;ldE-V!lZO!$i%)GF^CrJ|;{b&Z;;23W zyq!TIu@FMbRQZ z43B*Uy%!=?UlhiT0UTgDkSw`obCU8=Jw*T5#D+%>u;S*H0qY=gLcDbW8i(_femvP# zf`-`KRodt%W#aV!7C@z7(DfRKY!+G_7y6_phVy0v`Z=YAp&T&6ljZc?rizy9ljyqM zDL!_tn<_orEj_W|At0!2w2;iN>8lKJ+g3Tl2YBR&L`?0LP_h5fB z-uvpiZnmghqDFJ0eqf9eEcuha7uabZ^+(^H>EwU z^|)vYzb%JqTyH)HH0fT1&l%tP2NXKZc>W?(dM(>gYkWaFA9z>y3m~&fdf++2B{l{~ z9c#eP6|X`IUvlwW=Y{eqfKHd9bZqx`KrddcBMGH`w9={2K5&qq^~`7B^1}#)hC%f& zyUqe~Q^!(B@;7%GWV=4s{FjIgfs2W?W%bn>~3tSt=RFVfA^rHvBh3=_d9X8f1vT}w#Kf{YTZ)yHRo`gEwf27AJMg^h zMB|=w^~;RuIJm@SHwTusH@T*MMEt%K*Kgd>;A_ygn()>u{2m@1`UVu%y4Zl{kT0#) z#_DD~S$-Cq8gnmR=X&YcSX18ll-R!JBu^eM#g)aYarfm{MSnax>T3==yBbfNL}#}X zT-)z9R^tOcD{wS05lgE(F|Gdnn#SOK%bNa?0}JfK@aeVPgToxN$)D@`Z{514v8mkR zZ+CCchXfcGysWYPpa0_L${&7RADsT?SH2Q&eBmu|J{ZU1L!-8M^6-=R`Jer%#+{kR zx%nBt$TP`dL$%ifny0L;?I^vmc=g^(apl@A-|RWSK7q>psIxCRntNz`w6V4t1Es{ln zP$^urUj)7Qm-CoFA6TA>o&5CbVS$VMHE;p%IY|8M<5OPdM|F|9t4f))^>1F>o@X56 zQOD(94_fi-=Q7TqVw)W8rN#x!LF;%0T*uN6ZsVjwMXPjbn@D@X6aEl$7nWhBRoW1H zx%L3eoirv94UcOo{cK!3Z$Z~lb~cz9f56@;YbNvug-7RB<3Hr%L&zkfAFp4?&5hU0 z#r^p-&0+G>ftvrTJp%Xeh_~J}YLf1H4j|7H=13|R@F*U}LrTTt6_Lzm#>oQaVWjOo zihtX*jtU3IvUh;-IiPMgz_X zr7qw=3tRD702xk(ncVwL4mUC&$zOak;U|Bbe#!<*h9D^o4FqXQtz?u#@D)4rN(PAv z7w{&Tk$*A7S^)zD$Cctb4h-wb3aSwz@`nU;$tzcVBE{5X($q6Hy!kl+{;Lvz2@u6R zDPi+oDvhbbpTgNNtEX$+V9JfW@}LBjfHY)e(-10Hc6sApbk&Tx!{&N_U+wS4jKxFQ zuC(Pg0A!%fycdr!c*8b%|0H)jBLrNu*nr|8H}Q~=@i?K>_9I_CH@Ph>z6^i8cvORz$JB@3i7&kK!JR}yvzijWg1nMdl@Wy_oyh3WPJCE#nEA~THJJ?d;StMU z|M1`icj+ky13=1-n@EBZ&kI42M4ySnf-eDbxRWA10})E37Bq4*AO!#!Ax8ik-y&CE zS%g1H)X3P?RQ;*(E0pktpG%3D(ZJtwM)rko)xr9-H1bh8!@+lN8My4n(0M01>mS7ClRt^|)rYaF$;ise zb}TI&#G|JXD;tAx*d2`huF6y$URNFExdz{Z8n{hN4#({E>9~D!F24EYm*TtMcr(8J zt#{+AUw$p#{lcqp|NfP@eswY?XLwUdtc+;jbao(mdm6kmaZ&ptei4A`Yys588gVt;Ueb~w*^ur8#2(O;2l+a%O9ssY;WQqNn^ zZby2A5{LX{IC>!aGX29wo_^#P{9>S2`L~f5vMFgk2aUeha8nhu-St_vANeV!p1`iA z;kEkp1AL_68I1B=^gb7)NHg=m05AO$FGT<@I>U33f5WDK1Ry4j3YW{^1?7ic3O&>0 zAw&Ap0LZ_f$#;=wgMwROCZV|qxvO%{L=p-^fm}E zW>d!5GB0`{E&-r1=3$m8x4KM+pJ>8I8#^>Jr8aUj4mNjUtV1d%-UucUU;pp%7~?XvMb;ZO_Aq6VVn3C!;+|+6=5KO$85fBM#&-1G za6rEg?-I!nx4)>p_5n}Q6Q7xVJ)`^3Ltn_K{2Y%z@=X6t+v~E)d}rV6fAiuoq>cE* zAZKJRdBQim)$wF*VCG1TIBYzVmg6fu#`*pXi%1|F^$bcwZuecV0?#}t99za4$D(+4 z&bO@VrYHOn&KOkT#1m~cXUMZBgeoT{pIsILgwNZkA)}+tfzyX3Ixxm(E0lI#-N}K2 zysblAI-R4#eZOJjjd#f@-=-;TJVGNwxWoq=-uf2~vO?;}0sU54ig(LPy zQ4M1PjmnR<%H>wtv^d#PLo%xNsB69BP}Dp(WlX9p^*pIQgwMuoZ}WyfvQxV$+Q<SKlh z1f!R{F)7b18sw}Wbm{c8Ya0G!ATm-K^k3vHZ-x z5Ci-mYt=vUlC(2!Ixtn|VSsj#al@ zqvHed;V*v~D^DKB_rCWnwZqh7wQtfH;(JY!rhp_;8I-o#4+|&Ly}+pc3!ep?clQq! zFXe3wwpS^O8f-UV&`O^kJ=sjq@vo*LHGYMs8s!4yhIKnIK$3-WF71~{G?&sYaaIp!Li zp3G;u?NyB3qH8wr0MP-s`*}^M;s{jmb|B4`O%yiN>uvF~O}&oxK=T z_`^T_)xIC8~@hdh_C#6|8Bhb+RG7p>#_XH$MNXV1GO!h zkEpFWVk~p8AHyfz*by(h#ZNoUK}ZgY9*K_=^(TDS|Jl-#ud*IipL+A=^|=4?YbvSp z*x1_C7)DZ_RB}_KN2hJrswbiS;n#YLWxrvcjm>qY& zh?@h0%w0H`x-FV~MvTMA+4c^_mFx4dy|oj2lHJhhaNN9hUGt0Kc=U8Fe)-EC%|$wK z@7}HW_Se4>_mG>4@3^}k$D+Nlxe^b5%|U?`&7UF`7N+70Z@r;7955V?EzM_kce@&c zoM~J*9AEmzw_`wKn&X2lm$|#`p=0%}a-Sahlb?J0o!FHeCN=N5bLWn4CE$RFdRX6j zguFTY&N$02Iiio{jb+zuE3bq@eMCss!pnd#AZy}t^{#Q3`Ujc5dI!i|>u)tpyy};3 zw`DDGUBRypA1M6vx$86d#Wtu;aY%|h%h2{_7o3P5#qE9yw-B5~w-kCV>vQ%aGQ28nm0ymjRPw}e9*OD%GLCG< zPy|swZI`*CZy=I>_{9Wa^0vRK9+!7GuE*{AWB%_xLP-*jwv9Q2*G&YBSDBY^;Dk+o z-*Tb($)VZ{Zsg+TCx6n2LjgzJf~Iy!W0U+;g7|}VSmrqsmdcQKc`_vs)5bA^lQeka zn+)OUxsb+@_?2=o1%e!hAKZ-H7$C{5xwSJTlWlb^PcoX1UdKKcUKvYq7y%!>^Bd!5)^+w|HBSJERs$H+~9%w2Q;n0cPk zE<7Q2mrE_b3y@=HapBA+w4X1+=f(Rx;P}Pp@9EHg{YL5C%Q`rRc&oZWe}H69IAfB`xu&Cn`lQI3)k1Q-N!SSgn~ef(htOk(b#gCDN)Mcqv8vX{6Gp%;BpWeE*hsW?mq-QIt=+papM5Ukha1 z=b=}|H67%v=c*@_j5>I;jeIf@duM|1fF|NddX;QLzwl!>17^rO4KxpF*8Ehul^-Ws zY@pEVvlS+MXeWJaQPpGhYr?>{U4W=gnli^JJlQ>r#LuWDM7rS9bNbK426?M~(Wkh> zcAXL>>L=0l=M8^7qkG90Z_yzKrE2x4`GAI5}9H6cqDEu%6j+sPl#p%IHoFA^nVMl|Ry%P<}~;=9U8JSCe6mp+jnB_%5CLoSc41}`?>Oz{GoT!LuC;e4MbT0vaDnz42sMHD?Ol5=o_~C z5aj0alCC0BWNl+&i7C4dh&_0+?5iOuY@|&A=$XgHq(7;;H8pK{`R93)3qWnazYXY8 zzv+WiY$xk5^6ZM4bn(LN*2eyohHMZ@{0g~|-cBps4r}>Wzg~T+bSMX)x~pCCX?eDx z;um->JRx~ka80*B_Q&!o@v{BbGki5=hoySdmsfNJn*nmq-{XrG)Ovd{T?6!#PJejX zZr=(J)E~e7T-t*s-<9tQ^|_zEZ64vb9YpzEDAR(Nj|IHpYQJ9jC6ATwieK%*M?)sv zS(jRI3-3L@es|j>v6^1hyQ*(BUX5GR>DdXD{|dxy!$o%immLp`kD$kM(xuM2TH;Ti zCL08&i(k<>R1!170j7}=FN&OKa&x$I5Npe;@pu2bzpshuP#hke#Ni>E+L|z&Y9gu$ z3%3C<_Vq>$8&JuB$JA~Uq*tgide1kd*^q9om6MF%nT<*>yemHS0F|R%b2};Gj7;25 zGqm+#CZ~|UqTwpl(iK>M9%Dyu1j?>bCQUGr2M?Bo+^I!MQ{!6PgQfi-#D^F(VZuE+ z$VM*{CHgDT73~r6>cauhA|KEqZTvNWV1vb*r-cvLW&FYEesAWg>^Mx|`GWqCRc?qO zBb5*170HvglAn%{pP1Q~IGf%gqMpNr*ZtFgbk#gBDcWvX({IZ4r7$&zdKef=MA z`iLc(nrIPLF)II z(&tc)<53^1Cz1;ZQu$hSfwH$AB4AOWyeW4M5s;pBPVvtCRt@^4w9papph%`)+pO%% zXm>j`n z8>S>vWdNUNgNi4cJmb%sTVx&n0p7A_LmoajXkq!g?8eRi$*IER=j+lH_r#xN;7|#- zN-#H8x=L$&dNQV`W~n%?7szpJlzF|{$HPPO$%ZsHQSeN8Sq{<#^DV}yTgaJo5e=UJ zI6Fx`$vb&up{Iz_fLv%}@gSbtTFR;bqckrPAu`LGIHhf@fE;BZ7x2+xaDDRt>B)`U z^QMv1S(lOYni$aYr$}f66vhD!5;&23*i;}-6Ozx=B-cugT5fBbo3XsM5?gB)nb$$Z*Z2h*;xVZhA?eD{*#hnM5i zL*INMhxzn;S2SD)gp`MiEQKPMtb@ohg`9yblJfF{l9h}VPBl;%WQLS?lcw&djA)T+ zyi5naOC16v%X|_Y^$I>Ptn`pAw#!ilJw(>Tb5#B0nulnkKf>X_6gRh^N2XD14iT`f zbRyj_kD{GRLoUG?5?#B2URPU;(7v>};U^n?)Q z!8Da;yGwA%KY0ZQ+=^d;dV)&2562KI8SG&xe#V2Yo=Hq#dbIrFQ3azg$w|1#s`gE! z%%Y~(*JLOfu0y4K=zrZ_QdCW9S)P7UdHGS1tT)6hc&04BY{!!?zy~)}#|`)^t;rGf zce4v|_lT zUpBuyw2U`!07Z6imSa8x$$QV^g6;BaLldz;aktS@Au`EU`2o&T%T4?$zkdG&%C$T> zywU?T@jr!shr57Z4DI|;$sDJE-52RsdaZN}EjobWWxmL7dI7$ExX?(w%JOLPWEXCQ zqXSL;3l#jCURFEd@#lmZ3O3Y*^smr*R{FjPpg7xrZ&ILNF0q`S#O}qac*W?a3E>&u zY+oop(Mlb!IH?znZMn7mtEu=G%?1C$L(U73Gk*9c=Y8ohK568NA2mGNBXZAJC9lbM zC_gU2+E%;$QeRD;e6_a5Sm3i3&KQ)tJ@$5YVtjT!Zod9%%-wuB2Gq{(cQh{TDxUhf zZf84AI=eB%WXADj?eF)$h&C z&8lf+cyBQY~SFM1k> zs7>BjT~#MI5R12O#?$4U_`CnpAF1z)n3*1o#ku+T&2N7rZrr~mp%297`kD{GEkAq^ z|LXVuiPsM~E4p}P!JjwcpuwKX=1}QVpE=CT#{T#J(hpw868{x0r(2=t zMvmzczUw)z5?n9JEfCZ#-nh z6KxNEKIoy8L(goFmLzRk};5y zV2vOTooCxdd|7sL*oFhn%vtzEg%A8H5(jL%+z>(-2Lz5dEO3zRN>_7e)zi}>jb&)_ zIKX|L2Mgex^2mLaZ2M_hdB&ey!IMVw#>J%UOB;;b86=7?Y(>{IIny(T)9fY<*~Ebc z@ll;;6jw@*gSeirl=R!Ct|hen^wVz&G~rqF|9=5g3+1Onf9G%i z?vFHSzT-&&ovWZS3>pH?@8B0@bV0$8vdv-%r?V~VD%u=S@miFtHuX)P ziIXRn3=*iMxOq-L3=06Nii!@t!kFA?z|Z1N2y=x>{`VQ0VOjr;>mJ~&JOi!xMHCNe22b@Il|MKv@g8!#45 z&NUfN+iym)_(ORJZ&-fAvXLix;Kl=;U?b6Q_!rqIugW1TIbXhl7WOG4MRCQGEZi_5 zCvnA~!KGulQ-<(I9%zKgr|2RBVo-sMo%uAb@|teSK{VyePK!7Ib{c2dbdZ?^)A(WK zfE^z3f&}qUSE%@LIgzIGjL`IKS(fyP1Gr3*zOU4w zqLQxpP1=M~HiIfNsNv_jDwn@!LtNsagI@S5^;Hd^Sre{lLE7Xq&q$l^4w@Ca0eDVB zds3gX?5PuQKzy0{jiAaiGJ^_fVK=fZHt~Qecc(~r(rxOU%Fj4XyTswS(kD%4hW4b; zHhscc;d-WCR{y%Od_F`x9t!^gtGm;JPN7R2w-1G;>NobF;<2F=L$o2BkS31gWpPQW zk`)X6gI)+$biU06udy5FT}`Hrw&M6;TZ0ntMC*9mv=5KZm9G&c4`hUmDSpYD%mqlc4CBe5kF>Sq z%L_WRD^2+&Pk6yf>NY7=4~!mN7SKoUkRSQYvLs%B~GPn z`uR}n1i5IppAA2@F}?QFMODL{HXaoh&~qujtZz;*pG!Ilkq6^OcoVOTK^-T{)F- z`&R+tPqFj_1@R@e$p^UPz~f4J_>BpLH{~xqWl{2|%NQ3?o~aMZ%~}0wV!IceeZFVc z@q!<3KJ)fIp9cV!H7H1VL(nJTPvo!s@vJew-xMDij_HXBmHWIm-+<%e15K2-Vsm59 z3j$0knS?Gb-1J-jGn#--D4n74kvuHGo7&tSAot*y$_ITQ~n1HQe8 zar?!jU16tP^N3_z?~ui@c$9AO*3*&lc(Azkv##MGtRtiz=!cel)IZl#VY289 ze@ejl_QrtvKi4tIl-n?<-^?|IuJUFMNn#wfr41n4ybVrUNnMih=A2|x>yPCJy+MCR zo<5hpk~WG@M34{OhNGVt5)Y%Jk~Zkwglfx#@GcIQ<}o`TU2{K0Z5& zmtVT0I9&NJ-52fAF_;>R>FI^&912&6Z1~U4&UgVo--wbtG?$;6W&<0ZrE|e&0(i@D zRQZMx)fw@YHyKiQSx=CBlvT=>>M^LkhQl6INxt`KouX|;-eLnG@G1c769}5D+UUpa zu<}a2y_hN_v5!a*r3`YBTNY{YK(#{`y?O6-3`x!}E{LBK(xMDie$o^AC-|jLqL2Zpg4p$;EwrQd zx4GC2;WOkQK|!1PhA`>FeTY!xu>A&QQFCI^chQ!4e1>j{H)wisTzNIa)_EihU#SmX zBcq>|!-rxN9a^++lojM?1Ccv@Ci#NSxpap$u*Jpkm>oZh|LEWSH{;*`PyUbb_7~oC ze$V*qjLP@yl+QG<-lg=FR*nfW{T#dTEP0_jW!%}b6-=5Iu%4Y$9DUik{AB2(IDk(C%}^Y(L3XgEx_ua9v-2^3?|#hQeZ%WS2RnN)ELo|UCsG{k?5I7^T*>o{ zbB|Zq=s#57_|cDl7614j{}cJ`#muDoz=g@UdizSu+_@Ux|M&i8jNQK$gOk&-_TXte zdH)yj>!1HTws!X8?)9s2{niz4((UaY_$~WA&EZZsaO5#fe)@~gkMOA*Zhv{Yv=+-t zD{;7g5^ucyMtu3(UsD@(?zdj~Y(QskC%E>1Z;$bK_!|G2$@$pQSpTDsK8dBJCHEh5 zSEl0j?W?}cXJun6);Bg{W9J|qJ>2vszkc(#zNtQFIyMz=ePdNJJ=R=gQ~jB0+!!+L ziYMBW34dO0ZDS|KC#QS>U|92S<{n3h#X^D2*lSXy0< zrKe9dW*>>ExfyTf|M&mnKaF2*9mnh>Gr+UBd-F>C^}qH*AAsXc=)K*26~jsV@$dh$ z`2BzS)0mzbP#&k^#*LdQtU(`U;MNA(YYv#5XpH&z$Z|wS#_MnSfHF0Tu^*p&^0?Xkrp6|W->kP7G%xE7 z_0I-fmT7RBcB&ZZbM14}v}v_X@xNG~fKnG32RNCARy8fS7{B3%F^VG7KFH>orkFaG zte1F7!(*9J{|pWzZP^+VW*x6K>40a(1WFHiX#BwVJJ-QPld&tjf=V(?8;}Mb+_uu* z!e_(3%E2^=gFoooD&AU*+ zryhBJr)T!yXts0S1~0rg$X=z!^$KTB!<>OzJv@F>TgMnLxH(bnF^6{e)XC2FzS_Wj zwSk*z0}p)r2RD%Hs4ZAq+lbw*UCW7sqip^&o@5#moO$Ut&Bl~8VMcZ@Z6%8af_SyHJ%`OZ~IY5(8+KL zKK&WAQ33E(dQvci$J-J>e#>6ZLUkPZ5zYdU7h`0g!v-=F+nmsYu8N6*sbF)l5gDc| z9hce$F)Izh;**K7+|*m+%FgukBTBH#b7mp)*IdhFI-+3FD{PbvKK+5AH5Z^SMtPs&9<5y}500&*PTfg8F>;@F5Is{Zc ztNvKZieJMOvG_xZ!vWH&3^7jPRlltKBzAfJ6BH+fYB}_Hk#~7?=WE)+b2>dfGTDMi zy2znPzxvnw2#8SXZi1dmSjC|cb_XdkzEaa2q_`kFE3?zd!9u3xn69_r7FtwX(lb8s zjOFx8mJJo(e&!856F!}!Gf4ggH9lbgmcnylFKCEXcR?i&cs1drJQ{bQ+t`JEbvJk~ zg8pz90pRGVBPos=kT!m5xs;s3l@GOoj#BkC2S5VVN5aopSdb_N*aXsGg$0^Z4g9-D zT-UX)0m`Zd(wg9GKGOi6tA;oHEwS#Q+5wT-+1ih_&0Vz-qjBZNT)g$p-MIhCwcsU; z^{tio<*(k4pK9Xt;l~eRbyJfZ4Py2?T!%RlCthe)2RS|!gTs??!lL-#RE$k5#Q4~i z7#m;Egl{$`C$B0c-h7?Ypkzi9$Hf?*xgC>Jx74Z5$K2wbxOwjj@y>g{8E<^)n=v$Z zGrA`OJ|y5b-IUIdCWl;`%bSm+$t?nd9D0&m+={7rViU~uOl9M`Ctb5t^-PoQrdo!9 zVlMbw5fWFASPA)1>v$eu$_aBV5khjKKMeyM?B;DJoQABw^fMkHm?JTxNsL2lGw_@~ zH3SOuyK_L)4_VLBkZ!+UQm}D&)#n$%1^UmWSK~vA-+p%!zlnFzUGZOlR{(uaxJbVW zE+O-j(9VAa=fC3g<-b2&`at8kyud4QTIr{5_0k8g($e|}5))mi0T%c-`vMvz~hJmlki z6Gl36GbVD>;@_0_i{N7TMR!d<>lbNMJE1;Q`+|&=zVln;o@J`CO-odT@1PRypHi0@ zKaCB?i6%fSY;iSV%5E@@G|~9sU;e#dPVm96KT^BJHIlhl%I40ICV-tzN0UV+3Gx*! zkNwmTFu!7wb?mX2N|=6BCdbH(zQlzZF>h$8{5`HL{IOBWriL`h{yxkpfAorUh)*3o z*968zqvw2l|k#BNmUf_+gW#fsr^_ffojHUCf zf5s2s(RPz|#&feNzUmy}=6Zy1RNZ-)A0zseQqvRPc~Fq3?mdERiu3x5u20DQ}zj(CLY zDo00RYFZOZ`AyBw#>n8PCd>!1v%95qB~`^^QH?hs8Hdkl^2vAR*wh~w8I$}ZGqrp0 z&bK=G_CMdzSEl4G+i5F5qC?4e!PRAq3@9gT;aIqo5x9eKc6_L@z+s#n9m;*8zJDV& zmRI6|`u4|Wk+rq5~fE1g>A@84~6*ok>IK zHT{zC!1dx2dRWs}{m@h10D-^a)3$j2EPu*{zTYx}rsAWgvNG2SfHw|6CJ0e^=r<$2m=jCt(M<6-G0Bz3$D zW8E@_^whdafm4=vI)=kZKq8l)eoP#M9(b()|1zf*1(z$7&AX!c?Q7YYW|kjvC+}3qm~f=hyQ*%ZIWMJrg1CCZtu`0$G@-WadepcXja*1<|Af$Mww(V-_?0`_yfmS3e;!)pAB)0ATc>e35=ILHCnmNaDbr>FQXd;DbD^!4bs@Of}BVHr1N z2k*sCaz0a~QE#iUlkA2}>ZOSZxRJ(LG8rD=4@kOo{8`;E%%9U2JQse!Ej?(b-?r1g z&}r=ChdK$6mp+-afcVV2+7z1h!L)(P<2LYF{5SG>0ep`9d#ytMh>XxIGu7Yf#}Au{ zgeuHSRVCRWKhjv9!gIPDz7Syyeq-c}5B5B#=2pU66u-|pHS{Moiqi9c%CZGWMP=$Lhx)#IOGB&oyS9jW4|W#kh6%Zt$&YuI%65 z+KxS?%gBer${y2{fbyMOAwb!&4cR@OG9%0uzeTd%~sU;BFS8pq>Dk7IRdDc9PB zccOlTdC~OrtnA@so{s)d@|BQDgYi(EBkXDHaO2uFm1URi(PKI2^Oo|yWO{g_u{dwJN=_Wg zy>shUtgddx#?FrOAdWQ-9*RHu@rUt?4>wHq=B?Ry?WLRX?rXQ>d%yV|DZ9pd!rf7v z_2s4bd;i0K8{6xLarNqC%xPRQJ~8Gi(K$@*TchX))K~HrEr-xvdQEeNx8IJ_{e3b) z*DG~8kM90ajH@qLURlvxWHUEs)lYXi-T3mCzY>Rq14Rgc1dp%}*;e0@g3L*@;*_oY^^-JI2l)ho^Qd;27gt!n>S%7lH~#ncWSFNMY24OPyMBm$ z5IY;YmdDvq?l*F%$}*=A5wprV(?^;D*>*dynN|)>Wt&V@Pj$|U~ zhukV1bYsv5*Lb00YJ9@;Q9fZeJ3ANCvooF}(dLX#O_*Ok5#SrZgvXkdUu1!fbO~Oq z!lLaY4Vez$G|@fdRD@zT^PzqKwZSZTY2)>%_n_vjLLYt!yO7q?p}+GV{M8>R=uY<6 z?FeZ|DifzcN+U6*f)LIGrA#gyu2wxar_#;Dv|iJnZ%GhN7z=mh8CfdnTs$eXip|EN z{*7S&IshMHksv3mOvqFeg^v7GL7Du3pBYz-!ful%XAlL$kA;j>GytVjtx>Qh(~d+X zL06TI3Xsk_7i5q{%FGK2^7F|P(}yk{7H>A-Un+8~&?cgA-l%rINFxV~Y!W5Fcgi6r zI^;d^dy^BTr^NUpT_tF@Wkq}q+}IFgz0iwZ!|)qF`YLz zGNMizk`vebBQ*V%^hIA*mAC$drD+Ko+RPD&M$Mv3FF>V_L+0E2~A zV-kAtT9AqPzfjY!WF$E{f8?7GyeTvQ8-M&9hJOvqJf>UEKDCxqP0xY&^P75;;7IzV zvq_w!Q5&=iP&zB)k>6r6ZGLcrgHFn(;9LO!!m3?xZGaYzL1ijj&|LpiKIWbHmP0M0 z#P_emr2}$!;m0HuSGK=9FMjfEjUh{#0I1aVfi?qKSr#V3zcE3|v>U)znk~;PU(&%= zBxIjn$zl_)@Y^O-o?S=$Kt5GMs#iX!VDB0SfGa3BlB(=jx*sOQNTp1c~P z)3;-E=5}1U`Nf#N@^)Ok{iS&It>22bzx0E6`3rw3UVH01@yhGJ5qH1vt(d>{hT>jV zCdW0AS&+KSDsS^@X}F4QPWD)w4d%(>6AdIS4#}=AmLWhUuQaha%aB_he$!9}IjXgn z*w9gQ5DK6P$Fe7Kl{c~%cu-0j=~w;*uJt9Jfb@-1ECoD!we@ zqF=?!#sG3Ljo96kwc zsGio~e60BSQ*6bdy~W9o4DD8;zz?219-H>ut`c83^(kF?cHZG%7H;#^L==Ekpz$lb zm-6!s3eYhT=oK4Urk{R|i0lUxyU~Le0TD43ueytQQTg@RKz*+17k5*BtvC&{0t<=< z#baEfXHJE)@J0WI4t3S&bq+N7=^na2+}zlV-~R0%23JA;y}$QA$NJWGOifK|Vwc3& zIOCK!i)MpEIk{j_BkQY1bS&G$aoL=nC~?K{Rfpm)^Ol=-Sq|AJx^G05lBs6onviQE z&mu*)6RVFO$Ae#e5WoJ#2eH1kE_$PJ?fQ+l|MH!9`|Y>l!GlM!y1W((i?ea(&h2>n zowwqZS6=q!F0!^!J{a5Z7W2R`hf=tnT=`Mj-aJB&WP4E=JrZ9&d<>6@&$WQak%bZ? zs4(PIS?f=3^x5-8{W(ukM&3B1=yCIa?hr5gdXy@4sOp8&^mFPQ`j>GtHR3pF0tMC~ z7JB3fbsx4MJ!C02Z-t@ru4{!Z#`99~O8lBfB2stcB|d~#{_+MBQY3xCR3j-{)o1uc zX3R}t$z`f^2`6UO8I5D$QwI4#1U%#hU)~rkeT5H?O7GEW+7mCP`Wy5d8qI?M)NSQtMmE=hrNEb{w6~}TT8nTUA=i*r6WL+l&yObMx zgHGV5vH+C@wtifGkyk$BB9FO{iA~z}DnVsFM*~>&!$7#=BwYoI2ZMBzXV-J(5k%S? z_)a}aJ@BTd%IBu)+uhrD{CN#F1U8n|e(5>o ztNe);cwU&aKQuGHeG)DGk)GpN7SPQ!tlX5HEXv1aSLE&aAmp)N zgSK35;wQiGlls9Q>EoXVO4ODgp?@8@={JcJe!B_%PJL2}B1|3OkNWGw@1!YYXOBmC zCI!TRt~fxv$k-oV%~RUq)FH+pBSL_r@f+EZUdjozWFE*644Dy!H<~gEF>5@6@)lkn5_py0ePKINgX%8V@940M9F z(jJambPb)u3rq5Hkn+ayDpTM`^3L{^!&Im0=d%6c@D&?O45>~!>v4Lpt9Exk7Oq~8 z`MWR2ncBJI&K`?NF+DyS`#W24vb!7Itxe6Pw$z{TDYgCh(?9u1Jox!XF?==@+nc*F zJv$$7zVSwU^ESOG93MT} ziLcpCrkfBK)}$A9|k7#};2Teq&b zUG^K$%w5%I`Y?RyPZ)phAMnY8Tk+o4zvi(aW6`e0d(4g0vszXUKY0`%ee}Se=HiO= zhtIZS@!Hk6d+)yHQ{%DU*;OA!d!bs$+yC_IWiFU)28x-Mq13rF#S?{X8zfNi3-s6L zO0Z|L-BWsQV^n*bPQt~upck(~bzk%c3LWv4ZAdx|<`uYrBi(_Y;@LBf+ee_-7ja#Y z?PI8qBwkT4<-z+-&bM4EYcgaoQz z*-yzx2M94MMv+X^*ZL4OMgeYr^f#nB!Tg=W{%qtj=jIcD=pOZjZ}?N+OqM_K#|yz6 zV#>LP${kt?_xMJ3a;R}6IMV0N-hS-vbFfKu_ef(%8GkN-TP%dnjR0)Y`_t*FC;mh@ z6`8T9{-{43z-BD!xtE^v_A_|Mmpk&{0An9LYyOmbm$VDnzvTE^ZVo5N&VvNPQ5G1l zVKP6=k7alk%@gBL1_}~=AFAe!f3GWX$l=g!Fmts|XWyIr`+GaFzq=Q^yW7EMTsVxy z0ik`(JFq!;e5SSlDfvYW%7!%$mlb@coNK*t-{a@n&&uikTVV>~A#M4&pP{_7oGYEQ z!2x(kdNuD^U!)frM@~(1{eON^Wp-{R78e&|OuE2Yj&Ebr+?X2xShwPHh1>$*n<<$? zK&!Mz0Qv^NANnQ_{g$=@w;^#Fw{^L{4XMoYTV-$&F8A$+My4&=7wA15`p^FM@BT>P zcR=!6-onj^5()w!G!!ZeDv2e*{LP96T?>FgnuNo6*9(Q3n5Mw>%*3orpgd^DsRFl< zaEb^f=@r`sV&xeexuMA$IMokg(D7oXCr9LgXy8jx^p|+41muqx#LLQ+6~adOmn4Lm zg+(Q$uxmvEypUizDFrOTX)L7Gk`uJ3a9OGH1ie*WKTsDf%Tn@SBBWuli^&SoPZM?7UR)T|Gj9h|Ms%D7pT@KvNTB@47qqj3#jvO> zbchOv%?R>9UFAyQ^sah>sSFUHANU4LS5Ta_Jo4FAA5=0xq88j@67s1@vlXe2r2N}) zj3$p_<%5mnYZ58v28()G6H~q%WH&m6XB$d7r85Aef{73FW(LSKFUEIOR{Weq@?7kT^6QI> zo%!$i_2idU4gV}iJX5Sc=3Vp%k|F=jdn=EYmqB_aPwNly+|Gffh+gp06ZH~WYKIu0 zFwr_a-t#HzE zyrxd_ddw}{j;mK+i|g0ljD^M5)M37)NzUDvoOmgwr(ce%H@*<}U;0|yed+7*>g(T% z*WUbQy!rNT#2asaJKp-@_u`eez8&{o{#smr>8o-5&bx8t)|)ZDz!m=slEJjtpVHuJ zRx_7HWr6FNhSgk*s!e1QVNw$@25ECKI5unB&%@jzFcrgNlZrbb9h`LB!J%;pfj83V ztjA=JN>IF&I2c?DvM3>MT$x%wBR#LbWNcJyTFsa?h-ENsIdetD|$)lJa zw3_e|Ogt>=8}tcK9S|NQ3fU|}Y=>Wj3!azaB)GnqU>nS|Aw$6Fxn2ytc$Y(s-?RJD zZ|X_08+UCFD_jh(Q0XFTAl;NxO{Z-t^xPNe3G83!HBl0;MM8jI^=PM0q2&Os4Hw;p znyn_NjdKZaec=K*YhFyh1hX*hL30bAr=F`qe;Qs4E?CKL+u%nXXv?$4FG^j?hdS0L zo+hIxbq3^#ZrUf1`pfV6a()rK2=DUpdmg_cWWLM~eHQ7PuL^p0cm+Mv9+ONZ={6#- zx#pVm!O_8(m>i2y8Q$i&cJpRzt?k6m-~V;|zyJRKNBAlu>3O~pLYpR?vFeBhoB3tD z&2`QmKhYrRpGB42Brz8*XU1W~7osPQwAYL~PmksHBv6PVo{RlT1h>hdr!b%O#X)Wd zYyGSB2;0nwZ^nQ!k`w(z!w>XiAV=9|J4amAX5lby$1rK~rnUm$l`v?Jls46GA}cO* z!9l9YgZxMn?u3hBgBX3Kz4jH`Y<8>MoIbfLW6PSh=MMFc{xSbkKElTZPfV1-O_^7D zNvk#4W8%fJDojG znz@Q|c4j(eX4Hp4A3i0IQRQW9A{X~Y=+6|7Lw4K-Ff^wA%x?8zLtOhWsUSxtpvur$ zxi+8ct}Rmvhw@O~n))YyVR4obUbpeJ=ta)DvGBs7bzbad?bH7B*>e zd;)^!MPfzTsxlXk!sBXpZ^#PA<7dj9`Y3#t9p#OiIV5iMkHlPm^((h^lRV;~g7dr& zZ75zjM&=p%#7x~Z(^5Rs$1OBu2^C-WEjNoFiI??)*x-V0exTLxmSX}II2^}#Sw_}V z(uWoC?E1wYbcoLz1kQ^|n1&aZYC7VPd{q60(M+F>dKOTSZH5t_IVGW_X&&UVe9;jS ztz;D98!-ks#4zMFJKhSG!!$S;<2P8;CXsvMkBxEr5(i+I#j0GXyULHR_7`0iyN(YI zHD5l9kuet0$75uC+BB(KzE)l-yUpUDlIZ3FSx0(Id&6R#%3bq%4&jimywQZWQc?z~ zOKHQGKM_HjC_a2pU18+C$^?1wM>8jBBh>VV6XAfCmhN_A<6CdMD%p*j*Lt= z9rU!(G3yI5D0u)BRNd%b$}MGu9!rvm2Rd=&q>R9YpSqrr^wO`=LPv=xE>$f}z`E)% zHdfEgMFn(3Y0^R$L#5;sda$=l9wL9XYsQIy~Jy5 z^Bwo6`pzz-;$aB7$@YsJ7wk={1;j|Zc`le zLXVas0dnxN3hjs6Eae#<@=#Pszof++m3aKH4v5bA6QXJdM)f>8J{-UKmwv-HQ*5m- zXik}{giIx zAV^vQhZEN0Cjr^XA2~TZ<5#?tZ4K)OV>e+Lei`1ULQ6jV@Hy$cAU=FhuNyi(VByx& zQD`sPbs*97qyJ&G6+_+t$u=m*eR;5#F}laMGQP=7ZLVdkano zeK|(1UXP>Bp~oa6N^^T%V@}4-o9nT=y%q<1yK#DO6uMIu^#>P(#Cdly|KrbhRy$x7}q#|TJsB<(K|P< zi^h1YEHB6A#(ErRe8f12jlK!Z6L`srae@09wS#;YpU-h{Xqj=^$kbSL)i$oIDcy7R z`I>iLyS@-F-UztjDiEd91oL8V{eW$HSEk@p%*n;$?4OBML(jTg@1 zZ~kw7C*~Gr)HiDWa-y+@!aw}kPvXD*um4}MFdK32_BG9CrhQ0lFHjR0MtD`5Y_aTrhGF5w2nYur3%ndcrr;X{$#Ww~9J8{YMob1U$A@j+yM$6ABu z^qRNuDTy)Z)+ATEb6DGBliv80IR&=e>^I_P9*}Fk)IeM5VFY+QC@wf6jNz`@bH4r0=KtRIP8{qW_){pn7;~yTODqEV0{wb?&KQ}ozPbbP zg-k8iT4yC-ixEG@+5CwHV?UQ;3y`_zu^<$vbWGP#h|f3_U+AGT12H}^8k3XbF(ETO zH663FvucB9G#1VE8`@gdU3ghz)Q3mZx2e6NkE`<;4@l5GbkLCcV!Mym^33?!e55@h^*;UVZ*ghyIJd^;dtSqR0aP5)MM*nMFw=cvD1vOb#995f}ocAX#9@ z0`)|N5_W?iUkMA_;p)}0VhRuA_{k9R@aDArlDW2BC!*j{nk37rS5IdA5{5j*wPcmw z!N_O6_0K6B@JDFuSb#EpepP0|A#Ey-dGLyJ84M}^idh0gQ?w|%njat^^+S3lYe@)- zOgsU=ry2peRqVc?%u)f;#YPdtq!~@VD>O1F#!&onU`sqIFFGPaM^bK3$Y((^ozJo} zIyN%P=L8(`I~%o6>b9UPV7QuC-vEags1FIml5s;=Y2$@Hl;;-YAO|G)lrg{L0l%c_ zvMEnJyP66mpH2Gd>M;P*D%0X54|H%G7*VdIFAUZcPi61LFdk7XBf|kS6&X3$I|+G# zAAdSozCT2}pgf^bj#6|oTWo>tjGF?vs7Jz@9qsZAIa`(|bZWj=MLn#aG{ZCtkXH z*Ms=I{e#%p+=-ptli2QPVsQwslQA)KEfyDFiPtBwkg)heF)EhB8@v;U^Hl=Y;f>|Ag8H)jr^PXz^VPf7Hbuec4&S z>1@)jZUN!N>2^mH+G)Nh{ra|!ZN=jNNN=II+QhAzIGcKdtlzmTL)b=t_$!OC@!*9x` zY2M_`fm6PLjX&WY-zhHrG7I~3;pmnng&#J>kIaPVgMBGV(WPC5ekOn%IRId{X^WoX z^5!t7%Aw;ijSOwkh{o~$roECQCB}l}osDw104OJUiKgPwH*$rr^IPOnHXbmZm=|8i z7bJ2H!Y2sS)oAQ1W<0#5FW`-1@J)+VIPvFxLU>%~&!-2brzSNSPL+EyGKo;a_IS8jNt_x)e-z0?om_1EslcfR}WxU!H37g*R} z0fO@8;0uQ&*Z?Bmqobp8kGU*7Zz`LzzV;V6OQsI@$6eu1a|?7Id8G2$8=Z0ap<@`& z_plYL;^A9~%&j;y!<-4)j;|7~x^9btr!=H5?(<5URX4<;XWd6W=&wSV>v+Cn0`LtF ziUVsJS5R)~M*2zEq>`-4zz1&hk8~(!jiXAqe&r@z;=)7u)B_hV}E5g|H)+VZPDJ zTv>@LYg~iL+#Ve|IU)bKD94Q#OwDb?wC$#>YT2iB_{p-9VXQ)1s&S>qc(}>5zvZaM zjHWcnP{vEoY7(g9QaAaee)XEB){~*r!8kfrJ0h9hf8~BGKUw$pim%_h8MD(lDx z=F{oQU-Ty@Cu2W(qq9YpC7jmVv zs9J7g{6PAYGdjXEa>XPfdFMCz#Rhu3lWVf>wxHsYf|#D3>6hG`>}2DBYR4_@; z7_dRqb{fhbSzZRcdQ&0Or|-)-7j7A7!V~F#Di4Io)OM3cuUq8$V?vQ-rFj{^dp;Bc zz4|PJC7Gioeg2hy>k;%z{*dD(eAh8g>N1a+-;|e3aW`~@<9g$?aLB69=|~W7nO=*B z&jGaY$!6(qT$aRd(f(9u^V>F`7cPq{!+IVvM5cJb4{kl7Vf-eKS#CZslNr(Yn>KOS zZ6PMx0^~#6oRZ`KEN_P6&zR)sXg|*OHP&2z8VCFPF*&msS6+HG1~tz0K{mxX++L5f zo$csst;fddO6+fM#<=SIgNGl-C%;&Vo3|F@i|>3Xc6N8;_U)JAl~-RAh^+hYudcqlXW?IYv7)uCdM}hYltt{AT}? zr_W;P*_zt8kvKeJ-ZKz8hsUwGa}WpJbKy^^KUSLRpJ?x{UB50r+BV^G5bbb3_LV;4 zFy0Ct9T9DfvH8s8i8uQtEa~yS#%f20r=oeRG5CaJ&~>`U!W$hQjW4}=E#7_meoRkK z$MWW8{OM1B?Qz_rXB+Ww=`hx}PeLUW9r46VKew*V$JAsue(;;$i7&kKsy`2Q^VV(i z@N{)G{>T6R@5WDlyc@5*KCk)Aq}pNRrE*icTw{J9dGPi!x0}&VFisoL^TfnVy!+*^ zX-+d!bmPop9Oi^VR$dP^-*`Wc_cfI$UaSdK&T>{Vh0sriTZ#<6Q=fk+aRJ?G`jLUNzO7?No)*ZeBFZAD(Lm&sN4BH;r`Tzdh3eL?l?EtuunPUefG7UJtMO!LFP%61W#o2WQl1@#5Rsvx#peppxgXO2N)y3M~+Qc56H0^ z^BBf)K43pGE?wldF^waI%OOM`hV~dg$BL9;j)6s&F-OV&ndaSbD%sP1nitJkxc)=I zzLiPw&eZN59dP)c!}fdmB7oY3qwb-`njMXAIQYzA=M#-VxiLVt=13>1$0tYm_CK5R z+(gK&UEVuX{mQrgm9Ek`SNkd-rFo(-HuIUc<+H!n555LUjGK9pIb@iV%QMb7P@p=` zA#dL7KT&-WcM>ajmO9Wmoc!PRfjrPsYs5gv^xMv03kd@QF3raKgA9VpRI!7d^(*?lK=@ zeCqrpKlKJ@{*tfc*!-Y>)|Vd7Vu;X!hwjS~^ymL_cn;^1I9ZoIn-(CDj zDHnILvlmMU%LB};m?`C&DOgtqC`g_WJcXhHWHP}d9$ah}cEPEWK-ltxS#DAHUkNf8 zzZl|p;#%BPM3+B*4o{`2I5j>5;mHFKx$*KK+mTt=Azx-L2QF69%zLx&BaQJ}Styji z72MD1229tuyr}^FoTS)afAE=&2(uI-C{X{rF`qmMOYzP$G0(gaOCY}t!-yx!!2tpy zTA={&$WzjH-l45<;@YVnvPzt~SwE z^8)p#JPODG_0Gxorv^edaeML?7$svNN93HHoP3C)4har5)KQjI$A?C}@h~^94sLor zu3oaUViIa@z#6ak2k*j z-FWHEuf|LAyMFgev2f*;n7(>HM(6Iuz}QuBk{kQy$GN~7Z0u7REOV7m8BC>3$ImIm zBjJh56Sd=-7_sZ6U=MJ}#;FEVELhXAK!XJ+HoVR?fK!(*gAU1{Yu@-6)fj>Bj83}%)7b!c%9<@QYrhs+^#d;TY(s}^(`cs>u6tW&#X(02;XDjLp-~||E{Ll-p`DmphcQ0?* z-=-f=)FHWHMbPV>cmOx$)r(*3idVfq6P`z`eD-*;vvK#*Y(o!`wxh6JKx3a8$K{-m zbzBTH2DG;%PkSWb>bOx&7}z*HI6U%1imRHB502xX{r(^LO2fs)D~iDwnayn8fS1ff z|KR9Q6TU-D<}?Y@IFXSTlO-pmB=sae8OAs&CfHyy%H}qdg&6c1A|qIA*6 zVNE^1v#w_fq(0w=om7v(r zUFIps*PHU{yTRj)By=x7O`!55pK9;HV?&KiWDZ!MrEpGp13rAsyoutX@pE2Pw*0!R z0qTs#6O0$cXVnitqF_o6sA(lMrc&o*D@9O&S=${D2A_WZ`;- zoKyFqUHK+aP<=RGQae+*b2tNBkJHc>l_7fT10D)94bva=#x##CP@&K8Lbx!DKQ^xX ztDcOGPkPM8K?xS!d8-ASv-7U%$)Pv@4I-BE&H1o6kXz5lNx>|*kY^4{Re7)h$RZMM z^6Mht^puEsZXh?P7OCQsZ^|0IX+U)Z4E)XifOz#^jyp3p{`jN# z<3IUD9Cr8Os`{AenHg{H504JS)Wj6id;#Ay9*L7PzVdq(Ty@Vuj02U$?6lgsxoPKz zyr&JH;tU>GaJ2lDPv;$-VIvFO5)v{)e~_!fh;JMj&q-jrl&Ah34s}Y;>nn1gG4?d* z0a*yI9tNofO!vcuE@{#p4_l-P6EQE+%0Er&IfjHP9lNE(=r+&PTiU1T$tf@Lkxvd^ zaP24k0BtfrTU9?b$;~l5^M~RfANRB-t!MX1#PW~RZ=T7opEFOctNbn&`aONQ9GofRrE#1xNKy6_@oi#tyU}qiP=xb~fWz@Bb{m|2uy<_+~D8 z#T#iXz|tQ;iV+ewRVHtGmY)~+vHi;fz78pNffDS44DIr;Ri2-Nc}f=tj(Z?RU2gJgv#b8$gmD5V&ZyW5_IQbYL5}PX&f=K2 zxn=L~ZAWKwBhEXFL62f|c0O*t`K1_Nysg3WiQ0he=x%Mrsm7@XTWhiO>}iaMG<_Kd zv_Af5DXv|ah;M%DyRp8y?8DPnu3U*jK6f*qamZmOe*EK~#JO-^d;JxSt)H%*WZ3Crso!Y@M~>tGoC(Kij}n$xAoj+K)cAB!5myZ^fmo^3SW;7-=Npn{ocKo z;%i_3hT5fb@%AL1J$V@Wo7{{6ceE2*FE9%;;_@zvy5bTvMrpXOWm%nN5`r$k8OC~q_l z$L?V_*0#1iEj?75IV2vL8yv`=1H>=gxfyTXzZTQ8qp{sNjz9m|C-J9${$cEBtiQE; z7^^#-*getsU{d^yoX4&ESK{9FxfmT@iGSzc`eEF^eLeO!cH{nQw={;BibtP3h=2b3 z|1^I8AN@S;+|d|A^PlQ@w~x&?Y7R32Vy= zvyP&SipL@UNwqb6R&YXXf)5cCfCdLb87Ctr&lMCttp2m!ct`TIBZ!f)7WJ4lhG9JP z&;0r5hS{H$T{ZGS#_lHd5> zi5)#VEqIZgcze0|FIs1+%U!jbo(n3&mJe$?k}qP*u`Km)$e$aaoKymqWtL@v$G)OL z8C#DeKmH8G@)()2bQwZJWs9Fp;sB?8jGpP361q9V`_#20lZ8& zlIwaCW8$BA{BVqn^HKw4jGjsvRF+Q1?G5SJ5){1$O4<#`o$-rX(2LgtkGC%guwTo+ zjX|6W7vtj2sIrl@{H(_V15bzk)BoVF{z#SO4uVV~E$#&Ahxi$2Q&8F9xZz<@fJ!1E zP?5+S3pdX3P)sw!^wCW8E8nU znKX-zVyi+B#(7^fi_WgyaASVHDQ9uOpup!`J2?UeE}s0Zu0{*aYI6@VeXWK1+= z!>jXD!z)d2=rmOESg^3{D0?L`JR+KdeE(6jAjO};L9Z2thS(KN<&YH)EQQfP6USG` zkaq{k5SoyWgptpJgBum_#sdk-;ezsiUwojtX((WdGKLBUIp&G;1mc%O4R%+%I;3Zm z4e@D^IKWW@-O)J>JLT2`&jgcM5-f3T`H2M}Xh{$ANx9OmvnwQ*L(jMYuuKYpl6D31 zdr<}n=~wHP-KMQ)+&mLcJSLlj^-L3|dgYIxKYf5c*Q5dHl(Nn%0JGutz(r-4Z{g#X zuhZ9~EZ1MW<(FcFPwBJeB)Q9 z7<1)9Pl zukb0oD>qmG?IQNG3Gq{*{S45CTuA>@p(arHAa8CnF4Ao=(g*m___)sED<^(lq7CKfK9R92s8t?U^da z8ZI{6VJ6tWuMa}Tga}8TgR~ctQ*PX-?$Tzfjo>X0-h!n)+1S{NKlp=x5&z;}{Ap}% zblmPTCQ!+F5rjA0kB&KAeJEPcPy&q4&;lkLRy)fUx=N37E3)CLJZ=FXZS!5){_#n!$>!T{qrL`idTJ&{*+lge8XZQ-uLk3SyeC_{< zChS~^%J)~dmRGzu`uOQe?CkBw>v8d|KIrD*WzpMz2`i#v53B&h|bLX zti}=Q`}M~{1mAJjqxeuply@fgmc7c3Jb9kSX1~X-sdpGQ-stnQA1dt_#lw&Mt^bk> z?P!k8v;6!_1{sT@qbj#Xe;Zvfx?~4^T&WX&8$s!UZ*P^8^;E3m$9&EC5Pr24Y@D3G z=4LpW;JNP*dCxjyJpG|ZcIa7Xi7S0=@}jVuTfwKw6TY1mJf|*OKC&^+hp6~gKGiFK zt6%<}lS&h6oik1Ac^dp3R^FaSzO#+t&=GV!pAc`Za~!UeX!+hFS2HriACu0ZUo6sk zjw|}e4;`hwL*F69XE8?6Me$7@`D_KBuNavam(HMv$L5y<@Mp9c(j&hasQkN5ix&`1+y+0iKhB4E^rkth?GWR?;AGh$1IC?MF3df04#E77 zC|TB)k!1-9{Q#1=4=52w0lqm!ybkiYDaN@%BVES1se9l_UZ*PedQeF;tqj8Tnn1BR zz=EtuAAO_TQ~ya=Wvwz{VUP_OZ$L{f988+v0ET#2*?1aT+uc}STl1j{-twEAn1~xU zuf_brOk7>K5?tYlo&(3HY8_6Fx2OhdZnB(Jz{FR4Xn~_Y^_K|OF)>WAAoT)CO7k*I!If%x{aOs)vRA={N}d62SSh`$lw4eCrMIr zpv;wLXPz`j4=;2KT=J{3L$8sI)4*T66?_iQxPMX|^k$usfgbYkoD+GgUi-RL%N?4@ z3pxP!wkg$dcybv0$@U8)N(LPwufDz=UK&0`*Q9V`XoKKi?StB)^Rte5J2^Os&7JM| z`TKtyzxmzoxb6=Ne{yD8xEyBZV43Q=aNXZ2j6PE}23C@ON!JPZC-YD|(uN1~smByx z@=Q?WAM|39CQxi=riF0PVC7G-$ulw|3^!?F^z7vQ^Vwki050O!Jbo5bo?86Fm;H*n z52{}&H_5q{eU@V}gUlZarhe+WARa=^us)Oe08Y})@n?qV$Jv7}^lN&VW=2eXOkRjo zF2Po1$$s4v$hiZ>&t3`_vpsr>dSA${M%oNSfju`oR)dPDJSX)FHVPu`FBKiPg(>^oQ?m}fBd)O^zb~vdpqvj zy`#Q%Fiww-)UWW(a}}EMy|c9)AN~A&rG2Dw>Uuo+Ol34OF&$t3t>22{y*-b^C^Phq zaW;qPy_YM|^y;`gs8;+H`DX!R7ze>AdP@(PF8jWt3)b ztOz7;0Ao*HM)a6whZ$m)imI zntk55-#^%ouJHWKrvX?mVy>-p%1_HY&YcX&wuet zcslfd`QQESkMwYd0d@rtka1=K0>q>+Gt8znVI+V&d(dVio^#@@pN1wGt>x^>C>mMV z#HZr+S7x4vPyo}Fn}u)4D;|~1>5w}{6MCvjM0^iEFl;gjo9vw)vmu|AnZ-Ih^UWn< zi6RqTW_Wlo8OoSqF=-h~@mavbpU&+}9ToAsp)W(jU{Q(({Gs7ZCPl0`(5i7x*#rV4 z2m6yef3lqq2jT-x$|WJWYZmZnB<&@6xPoN-lyAz9!2rd>m1~1ya)_NaRX})ggGDRl zMoU&Rp)Ekf;_@$LX1i268h@qJ$jKQXAB2OM{*|A`4Zp-oSMo|!${bG{{S<^+$tu(i zxI%q0df_GQ8lw1Qu>`RIBnsuPmq+Y|u5o*57>cttVDReBPRK?lTP_P+&`rDMy|Pw_ zD+n&YYeVG+oXSs|kK)EZQDi5*+z@W#UW8g!!g2jhT&Gc<>2syUaBa8Ch4Oa6NQkoh zb{D!w8Pl<%7mDw<4P;o5%tbY|AK+4cC>!O=fCD`veMpKQq~NqH3Osu{Ysoq-BFWpUBoA{DbPfL6V>PH+gYp(4S-J6cWcz7CZTM= zyAG(_DQC&I*M`wfib4JB`{WH@C>mf)lqCcpx~!X9IYHQSxFSp?t8g&nF3d7)x?I z<5mLYiNONxm&*W}xRn>kx(foIXa86udtgWPxx{vqWVR{Z229j>$|%L%*LVTF`tj+| zO1FTYF6di7?KZUIlP0!snQ;S51Wc=q_i0em$@a5xKgUl(PyZ`?7X6EGIbXS$?5D=g zzNHnxc>z*jb_29q~C_7 ziR**vb0MUZU$0+%0iFQ4^va>lKag%jm(LgZBJbtVKwZJ3()ld7oG%v@gd4pnb_=bJ zN&0n&XVq(EyGC_Ax3DW-;-ph~Cx~!luko$liX9$x@Ch4Qd?#3K2j4w=^zjqF0kXZ- zQCpMk3-P!G0DkiOheN&^o;SF25|f*DY?Q0{6F`}^?LwK#pD_O3(V2ZL{U7fch;q z(ql7XPcF5x*Tg#NIeId#v*gXbT_r#y_KROI#>sD&oi`0?!cWzq#V*lE(A z4N4yzR&d5+@}~5}KmJVoxru-`KKcG1SMU4afg&W1=*Z30{=5;?JwEc>p(+YAIRr2y z85|umIo2fl)Daz%4FnNlk;E^Koj-u_O_I@ z8|UJAZDlo9SGHn%=O}*pXerjVc{BJVuHU{Hzx|uvhD-oX|jxPhIpILoysfW|Pi!l)j(UJoH%k@di6t z6`b*@i=uN`4yl`Yoj3CuGzt|$4-^VL+@y(zz~6H$dINuKJaP~ub%!Lg?ohtaOFQEu zW6F^-!e%@}_<-6^;^Z6$8mXg7i4>i8FZ8m>t2n9ag*WjstZ@hn*mazTOlViUaW4{# zgP?Dk=#Fw^zVw&hc~luIZcP_J#^|wSExw?U0n$Oqn9Y6jEH88q8S%^_2kl#K=BZ8K zV5Z`vzRIm<^3DN-A?fJJao3+gp-;naRI;J?_=Z2XUT~m*Jo5%1SGvmEi&3nQ>L&6CFdP$qC;GXTz4oH?C%9K^)#aKGfXab8+&bd{`d1Jvf8)EKdnklvHnc zCa+j(__59yKk9+g`n=*rK&}|& zie)HqSZ8=_%x?ivMx?>5VB>z{{UoMmr+n4Ah&s$0o10&d9pI~1 z9VUsWoT=YP04}UM&Q$WBkap&#`U&({^NC@DgSwLo;_@RA0p&y9$XDv7QZjv#L0=tg zJj+U9xU-C;UsQ3*Req#Ly-+l#VQQwCymKoEG)x`6F8+y=@#_tL=^@(axyML^lSfli zZu8HFtjRlR%Zy34SvclXH#2iHvAVVtTbpa~z3=~qc}EwyDi?l9m+^$#ALef6Pc&hN zifKIqWT$634Y&mms(e*1WK?8mn|}?ss$W8VZuq7%;1=+li#FcjIe+j?r-YFmg=anP2cq?$ zI2!(2?lP$Fk6&FWFE(bqHULrUbEQK)>5mE2jeR$Lh8|8Gn`2i14=3@=qJDVJPrun& z{OL!^^QS^3-b%#qnGp+=co*mC0~!mo^6M9wT1nKpo&}?DL|gD_ANk{O<1qCRUL*v! z_k_tX|DfIN>}+dDC$cJ%b9ljGFGPHe4jX^c4$ z*Kgd4-Sw6D=#wXEcZXtOb~<)1BYd5aNwQDzgv(-lrKZ>Qt%kkdV-isSI@5G+Q ztPg(uQEYB(#LeqB;?=j_P#<|3tIO+tdw+LF<6z~3zK3yYmyI;_6MV+)@w25^-Qc_Z z>N}LB`NhR}?bW++-4BY^!nb z@D#r@_3z!2*xWsilk;KmHxsw7&&SjdW2LV8_A%=>-}Zm-a6SI`rw?NJ;54SD#p}#O z>@b!Z8;JSoN%@USK8tbp%C-32-}!3%`+w_y8}I-0C-Lm@L*@Bi+`au;{NiUnj}Jfi zF#enW`tQZHg^2sN?!>*9uE)#wUXInLPjd60y%iNZ8Sv@Y=S^p{&)i(WpsP2A$vOb6x29CO&c#;g)r@GdG3oU!zcmvm>-)>;o=V|&jV%q5VR=d zdIfL)YmBV1_Vo0W=kR!n)+~XHlZ(s9pjp_FeVsCdR_O|zYFo1D>M`3(w zkT>-YI{UG+m3FL-zS?P* zC1o$Z(V65^M$h#IxXeSQlTSro#a;CSh-sjWR3z|fod&AD)aSN+DO?D1{PJ8OeX1!y zM$Y2t(Es^w=K%mJA4OLAg{34`Dhmy{2JaREwiP!E13w5t6G>n}-SIWR@dTL08oCOv zn?QqVeBeJPBw1+yDoC#qDU1m`ypxHZ%r0EH%Dw*po z{tR^ZJEH-Xrzg0ogxF-D_~Vy6Q(&aRAHYTw6Hngw5srLWc<`GQ`g1y7a6A`yq7%BP=J7&-EK-8SbaXHZL_B0^B{|8C{Uf=%57dr>I9o zPCcIZO?Uq@j38t3av4-#@RPsBHvclzJs`y(8(gJ+Il&$nS7t~V6u;@w4;osf?Tn<3 z;7g~8jX#^FnQ`>o$gaD#6+zr6UbX>DG~nO!Gui~Se9Q#hU4bWF|Ojmf!}V)p76 zWBS^AF}mFmMY_k19CunQ3P50Hm=m?WW=}%sPOuiIXGIe`f`T&SHyOf*4p-VVv(caR=*Edwn zh=rT!Q}Ar?dys)2a#X(De+?=>W0NsF&XuV%;*DDYSRETPu1^UHAAPdS-JlOLv^90w zcilP-P} zkwsgcd*xlBx~rY}&jq|T?nS&F^vS{Dn5qw%K5Sl;AJ7-opm01(eZ3^P3s4b?-L$*t zUwS2!bSoMSg`l5zL3MIa)en+;5pmPUVi8?&9o?jOPUCO!vw_Wc?&LHMPmW`JY$Wz~ z_Jn&T_j!Eqi;rS_e99XzoEGMdWj2&qAo5!sqL`nXO8tkfho^c@-gCT_?FWl0xrxDB z6CR(^BGo!*NZzmk+S?lDwpnC2bO-Nbu%=;98$we9b}wyg+Q1U7KWNhaCC{WsTTNU& zOJRLAw%TNmC&8!fFYP&->vG{MT!?alTfLPgSl~eyJSNESO+Sza3)0{6kw-y`}?U%0&0KQX~ZJ2%z{Q#5=~KW-~4v) zedg}zaZchrcQ}s`$!=nH+C00>)})1S%F_L%P}!hi9+UFUjWi%PW?4YtZO&us(}-lK zVwDsm98$9?U%%I55p@7|w!<2E<&vpiT7CT_AWdgvu{Ea{kq13k|dH8Mdyl2!I4 z7$QMO<0E8=%oPX2Mi>~-@Ih|bUc6F103TXWx?HJ`JXqi$z7JZ!J9=%N1h{Fp#iZxI zvIe0S(sfmr z`}p9%8`!klW2zf`2bc1qZdgxTud{tZM!flt;VljpHjB8f9Nf;p=|U|JD_oMwyOmn8R^dpbTXz(!tiNb7EjX2w$2Da&ov!PlB$rr zB~I$D;?f?3_i+9DKqdmB*b_XInypkAJ7GbxjZRDrE62_d7VF?pQeXAi(G}K z{BS6Vbnv4*NfCjQF0`Pj{77E;=0CdB^`U?h={1p&Ejc&_PckD8Ns>O4kO?y{9bM@Kae#^;&jp{%rX-AL_%{wK7p@@$Hdmo0e|GM?M^L3M+RJa9|L z$ae-LBvC=NdtfmYo4SxMX^1z+>7(*IG;}MFC;XUy61P8``i6f`EcuY!Vlv;!Q`IN( z21XOf08UGU2Hu`eyx#o5c%lQVe);7E&1X@cI&^Ubtw?HXZ76l|M>_of;3RaonAh>&XlB#qyCe_leDMw_dYPC_K+r! z@f%|l`X@etb*z43du2H~n`?2}*^SvNb8+wWFUHu7n{je<9^L&;46D8#?eD7ItKZVl zaBgPChuu26>+whb{7*DaIE%&U#W?7m#mdr(ulS$Uczb7SFCIR87{B<*uj8F}-i|lF z^S#*L*ovjc5985?4`M=n+q+-+N=#2J#OnHbtUP-fYbz`M{2CiszUo=y5H`hVBZrlj zCrfLwy0PcAA^L*F#fA9dTd&9L+?=oDe(=#pu_0Qsd{%Sjn#b$w8|(4t(UVwPTaW#N zJS@wcgAM=A!Li%7W6?S<<0r;|(<1|M_sUGX{^}jg`M3)FP~$7LuN+2Jf5RKS|3Aw9 zG+5IlyYIuY>h623z4uHvW}m?ffWgcFAch>0AV`sfMT!zdixHA7OJmuNkR5gesF1(- zvp@Qy>>T=3X>pw4%hbG-Nd?RG{TTp|6ZUs`XMSLV{MogQDL zubOWU_YT_YZ{Ka-dhK32Caue~`IH&chy&=@e)PDltzB%Js~hb_`1}xqP!dJ>ZSGeOTYA`_U_y7 zwVStZrVn&p;GuR(nEu0qff^5KdUg8QW5$JkhT&76Z6AN-)yT%)_IBIb^_$}R$i-g! z{vZBPd-IJqGk3C^^04R;^5wkw)A0Y=voBI7l=H}gpo}R;U#1OjlI4e+N#mHhw(D9y z8+ep&WnMe{!wmm$O`~6ZF-<>fqBkZu&bvcD&#u4B<~?-_kLGE;G=;CS)bq4_Ki#Nb zXJ7{E(AC3-VnF_~#!7hd$k@g*S284%MvqS&2u?v_)0f!q>+;;o9DSL*O~befp9v;? zuBdG*=_7QFuDok4)s)JYVZMY0oQfyUIGD`0Nbh=!3b{Uil$iWQcCfjk-|-xCn8o>} zjO)~AuFWjw1p(&fD=V4TdsBx8mSZHJERT+MGjCN-IDU3Mkei7dn57BSAt=WzjuGAT zmnV*WEMIPYq}(2mwhnbqJ?M<=pP&!Cwtt`Tj~niXk30ylOZ$J+4tDpOpBQw5{(!N* z*YXQHo{xFff8^nSUEur1{lN~dVYoR|kb?qs2tdDwhLdjpQ9jdhj2=7M_vQev{Xb}4 z_kVm04rr^})2%=F4lf-SYEn;F4}r*Q>so$iJZ`;P7uIjUd;jO}q>f;TGAEC*gRXD8C)ylu(T^b2WFpHq_kinL3?6|GY!(%mB z)&0o3Xe-|9R{g+{xSFCF$ne8P-yQLI1b{Ql(sGia+^v5damrNQx^U5$XGxW_rBmcr)k7z{aDs8mG+qq zRE+b~i*-f)Sk=jTkcc=)Z^l^AQgNgil?PniEW>au3%oACm3(FcK>z6+6C5X!bv5Vd z*%2ZJ;6zmlx3nvr3*R9+gZ{lJrWzb|NKOuDuntWLE5e8piqjrBarVD(DttQpUpzk#3PDx07lH{g3_|5j@HDe|l;x-F49qzy&$hjr-1-fUM3FF;+_vy>#+szyI+IPP5LtuTY(l$%KC%@A6>EA);Z zot>cDe5QT-*10Msw4i@O0!)jJh0cUua1u7h!}xR=?}p z`1aqvp=4nVb5qfU8quJINnd%7{B(s2teE_vtiE0E1XudxiEntjLFjYx!se7cc-??> zIG9Za;GBpHlQ$sYLk^zuT;+m$(3A}TALRjgIu}`h-@=7AvBrZRYre%R(vzvT&j?`O za&%~4Qk!X&hntRW5S`}K`|j>uyGMOK+-6dIL^#&BXq~V4M!@Ffjl3mrX>+Sxxpu9s zZEY~FUP%99e3K z!pD>5r_k1ml{)l>01u$q|0$2&>~+EMxbkC-xw-3v#Vc43cU)0x2fGh)qtD)>qxRt8 z!}fp)|BZ*+?cx4e``9P1wtwlr_Z#g?KmP@$P{-}g9S;Q@WS#&@MJb!a;Tt%81AJl8 zgAY`wY>WqORiX*-6`*u>$=J?D(VI0wn5o%{mCjj7sfG{54qG!zcgOS>@gT)D4) z_u_oXr#6SL6lP@iytX0Q7AoNt( zqBAImC{|)e4T@q4yv*_FLWWiZYrQb0c#n9!&-0-3}6n2FNot} zzC-JqTDB(->v)TXZ>oFH$$WjAPP3q1j9&99O5|_v5#?}GdoG7vPJG4AJfJaeQ(FiM z=$Os&rtIDVK)gm?39Iq#y7AUOrRsE>y2|N;Wwfk=_0hYDuJATj3(2FdWlh>G{>+UP zdg&q1I?y$}yplhvDJx3-;bLAE@-pJD422)`igQwjn31nx!e}F-!gT&;T|_uEY;IDs zJ;)Pap)iNZtk=tzw{j5R#TTA$EAagAct3Tyxw+mR?fdOezo(9#Sz2kAF0Z%s&5hJK z+9``+OUtV{6lELmz=gD0CTR(8WJ8KI+MQnLbS@_^Ewyz+U01kVD>>B4cgvOy0n&Rg z#Dh^DLhwLZ4qg!+9*|xh8YVqaJjnyoc~)`oa~>HqkFNhglDsp|0GB#fqs+TOk*iaY zP2aY%pUBND!b&^ohEW!k*BnZrFHV^#zc}qnl?4yRgkD9D`Vz($Ks+oU4R9^ER0)(i ze#=px!7F*IATyS^J{J*#hiML)4jbOb-(?A%lMN&C^)GyMauPhijr2S!&m@Xp6gd#@ zq%LOEE4Z+>b?H~P%HOnq`5T1NP;%=b6g$F^nMv zYuN_8eR0alvo!fT{sKq6*z$_!1zQcl#QH7gprIS(AaL=XGXzcC_=97(U3Rt5)g!tE z(bLE#ap&g`!%pXm16R2ji!hd;kDDKrpfb^zv3cZ%d_*pj6EloG*6jn+=tm7Kj;8{ zmX4ye!v$N_qvfl%joz|s37guAn>8a~#^vAjVMDYfcjWc7sKf~ILHEg={j~lrAH5vG z699D;{^(%aJd}f9`s8D7a#UBK8|>$ewC>YC8ArI|=Cel++R5P~daToS{e_p>l`s5! zJNCxKV|2*DUOT}a?%r?vJKH(Dwz+kwEr9Fy|M9o-mge)%J=?Bay4JS0ciQFam)bMW zzLd>l--5mQ{>`>dpY}^%`!&SzNxOUNM*H6PzTH;WR@-O3@Wpoh+F z!sE8Rvzva-&slkR*6-%8EUmYl`@8Mt?Fa1$`ZqQC-ug<~7>YeaYv?%sY0Y>)d!fg%9~uA>)hVqq7`Bo@cyu?K=Ja<>mI| z=&^|Df+%a!1Q!Z~dvJ2U`e5rkky0jm7wDXYk$RE6MDi?7d zGH)B`{w~H?eh3`xuuOl>`k696;Le*$qaE$LyQwGRN5!SI$9(=BGT9jc-jR)noM%rS0OalK6>tmWp-bPnU!Iplk@nxmS*v7GafM{EW zng$)`yu%T^V@}5|%Hks9WN$NETUl#c8&{%NHrCd13)F>q#*xWEGH3(N^Kc!0*skS+ za|q{u&e@$WNQdJe$83>5(+lSlkq(O-+cMqE7%8KEND3$V7~6Ta^Y{}F3eZl@P&6JK z(C?=G$?*ZtehT0S{d-886xLhRPO#o~a-17B{G33>#U8fl&#ujD9pf5jddKPFcWuOh za@J+!8FYtt7OI;XmsR=37|mbD%BHE$PO0UDO(&hcB0xR`1|Eu2IT5QYI@z?d#+hu=2k^RN|BuI9tyx}C){8y;)8KcWu-6MJLsxBvCO z_)QA*q72te7sL@r3k@J`2iG4eTPDElga?Ne)f+K3iv(S{P2oBo!+e7j$=2`Sz&sR+ zz+zos$#*-X7Dh{rgN__L0Inue#uLMTCISc=qmR&+jep9L@}yE|h`uewqv=Z?>m>LE zCu$hOgjcA+P)dLXpui@R*jfazQI~K!7z#9&j2Li=Q)Ee(e99jcVeNYhGYbc5%*$q#K8Zi^DQl z-=+}QCfz0xYr)aNCX5#p(6VzDi zf28ku`!owbO!y&53GxT)BF2P=!*n{0CLj4S%!GbS@ZqM|zzCIRXb+Wx6+Qh-(r!9$ zCa0xHK$%i!;jtjKI5co1TtvYItl`1YgKSH>vzdJ`PrAh^ErjNZx3~d{Nn43$q^4 zfd8uRat$LD1)9p(sP~>WX(%%t46PStF1E*~bV73r3=DjG$6ElF87I`{f4i^>dsmn+ zTy0C6&$jus=UZF2O4ugpnE_;z+l5T{g$_%JWZ?~J!UX+imugW z;~(oF#f_PqbQUi8jH_X5Avj^_4{S3f8xEN4Pwcz0=oA^5r)<8phMakd4?g;QA*Vps z>7&*ulkxC|GL6Y?^=Zhl1$K}Ko(yVIe)GVW=c>0k(u_V8&d8zgtC(gz_7kxEcPboD z*-8{Ul?`1h#+-)C*f$mUIsK{W$qJP~{`a^iJ3x~6tIJ~5B(qA&6m#s}cbbhJOy6yB=S*o;j~T=Y!-jEp;lFCL8r z*ov=ZgIRjCBTh5iZaUBEu$gVu35iD8LKYHTFmXdbJrymp_$#95vtJ(m^Y)xcGVzwb zZZ6VPo*#feXL_QTk>~mPsN~`oxqeOMXSXpc?`1nscZT^$+Pg_V1X>qdl#Pni;>StS z>Vz=R$zPrLzK}P!U6^oV)B~3LyNB(;{hjvu>u&i@HiZkZ~w~HGWs- zL;!5pY3iuzaf_dh^Th8VT}_~SUzNO5=cGXgdV)TO8|;ITIXCg^4In4a#bI=$*l2j7WRV@{90*oEHBN(P39uNmM#C7OZca$I@UrD`DPPO+6dQmbG0^$%%Ac6 zN#S?%tNM^Sq_+F$VcXr^$=fM@k28FtKFriU#Erhq%bWSu_STgvxhB-N1{PL)GtxSh zq9@e*A;VJ_0<5c|ljNb$wrBl&-M?=UWfRp&CwRP(#?9k7U)h0X7>oFk3cilbn0yM& zav(<+o}5ru)#ur%UT|CvjTbzKe0tC>9313|@G~a!hkJ)@Z+E}FbNhDt?vLMZufKc0 zUBAB7{>ESV^X*Gt{9HQ)#qQpHCjR@8FW-9A+PAq0<2MFfczAr_G@FgJ0i}GAKjnfl zc*V%Vn+$9|iV$*AZ+}{&Sm0~ycM7@K$esL9UVM|@gLo7L-er7Ds%(Oq7tb}XQf{D& zOqp4g+&FhJ+yi+SSQX1};pwC4z?_nY}Q+_ z;G&Gx#>C~qLCIdV4R8>qt4Uu$-e!U1L0WHnv=MWaCd;YSm^R}O@ul7!>-0G+yi1u( zez~H=W;m4Y)K)OL#C2L_z!M znTOOaLNjeM2OQ)TeI0&3K|#sLUn^g$oXVm+G+4PRooIlDw2VE+xL@3c`LsVcf*FHORJ`OKLW&1MEIz;Qy zo{|UiU0z)Q#5n2$-fwKIw)ORud|C$WN>0ASaMZRRdHwKldxSoF=K7VkxZte~%Ck4o zEVPZy)x6>4Ri$a0j(aPY{F%SJf@!$;^#YMMtT1D-4~<&S0#HwW5(HEU$)Vlg!0CJ_ z*0?b0I)d$AbysZ$D0H}ODGb4w(6k+Y!bQ;O1CI;6@;_CIo6UZ3yV+9)*ChED$C!BJL9FivV(ylX~P5#nDz1glW5-)x7QW`_Ll<9#F z#Wq~{6}j=D^3Wfd0%!4|8~jyHAW2hvppCe`WnGxiF^Xx)AUX*d@=sWC)nN;D81dp` z-d%MoPSZK}CYi7a6R&0Pq3D*k6-TaJZq<7^RE+#CNVolRK zW1jT5UEA7dzxu^bv~T|Ff3tn%>%T(#MkhGGJE6`TJ7J)h;*n5VEN99Z>du=4z)$j_ zq~v9Oa6jQ48YeW5huA*mh}DQ+YixzZC0&YZ{?V1@Wu!9E;d-94HZp;$wX3q8Gj=yY zOlPj}{^_cVB2&6X8CGsphJnx#f}nrm$JHO(VWctPfP`Ki2_N)@9`lP!^u+c&{lhIW zqdfMrp}~5wp%Xcfo8NF}&B%CSHNp(PW*T2>Ff;yh_zW*MeJu5-T+7__qmyXqG@bPL zY)PyqDj|6f`_E`5w9L>k^H@Q!eDCHRMG)jnkQ+LB|l6gC$`TZu02(gK_{% z|0H4la`m?9W^BRq^fBV(-#Byxx#1ajOk`yKYaC$RLceS6((9)l)l>H43(?P{q3+!b zNg~>&VT+5tR590fxA)r<`u**D_cHc)`SV|DOE12XL!{obf3o|i9Xz<#?!EhVd;5p4 zw-=s&wyiBMwbx$zZu{YPf6`uk_0{(5wQK0^)7-Lk`SMk0dE9Q^c)#7bd#gR%zTdw1 zr7yNiSFg2u_wTfy{OG&wA$fiJ^Pg{@{QNJpUB)9j5AU}d@4V4&zJEL86K^kajOpgj z;od>M{XNI?k6wGHJtVH?2y@W#Li^08KGB|e_L=tXdpFw;zW4p;y{pi$wzdJTv-a@e zcDr-uLEAex$${a+Bj#}M#dW9?`i|q%3wh)I@x_z2w7S?fDD zcCH80%+s;3`cD2HGiJ&`NZ5PKJjFMA4^NKTwJVpxk447{3s2f3%JXpdtnE;a-Q%dgG?kq(X;{Jz*u=$|iL-e|8}zuf-ZSAVJfFTVLV+dulR|J(L|`P=_- zJ2^ORzy7t)v`>8M=iAZoPW$`+pMTQ+qksP&w$;tXcwwQv^5Qe?um6p|(Qe&%r~QL} z_>XhzL1YdBYmLrvWqKk+pw{8+L#6$gdGhAvEA8`N{Dt=73(w^M-rmEz?ey5PAGGcJ z_uEf?{CazM|6y<#=E0FuKfOkCIRySs|Hf~&wh7N4K0r--h}1p|dBdbBDY}^htG|3O zyt^gM^`H1I*Nnf0t~+l=dzdxVZbn08@88moAg7h4+X16%<@9-2q3zGT()TWqvf@pXv9`rCr8I=!_BooK&%|^Z14jzy6@rlFiQ{V88rb-={LaB$`*nSQ zIS6gnH}yTxyvW?f&Hp9mGK&kjJ!^Agoi=m{U9^_5s-F@Zhh=Lo(%S%znxF4DLBAbm z4&w_Wv4xd)lhU|pd>sIQnJ*@;GrI^Xh;;6XA*C~HybYNFX~Knd5MTu}01%$AEJ!$; zOC=I-n9S>VP3JX#H4zA6{Nf{x4S;Z2WSk;lo6Y!x&5iAcWsHt|G+1M2Ils?Tk`3W-C=12SnoPogP3L5St8}1qRdc zr&1``vSP4fj9=NT3<7UhnUN1J5j9v8tn&+#@)zqS&%(R7kem|^zSOCUu1-zin4A3+ z$2pb7$|Oc3ZW%2O@!}7T;3=4@kM74pLHwcG{N;g9X#gE>%%G>rIoy(~M1aJA@^+w@ zlxsxwddNgTc})cF06&-KEE@CkL3avY8xB*Rf)rc)iUF>?6+eY5<2Oh(g}I{*J-r?c zNJohy(?4J;?+Ml!`fE{9Su}k?Oz0v-vJiY^JnP2Ovksb)m0{{_J^~^u-6SovOoBR0 z_iIYFG{aR7@FZECHtEVjB7eFiWYxB7fpnC&25S*U^CevQw9K_gI?X$i%kX}LQGN)0 z!i1?Nh}{5FkHejs2^`Ld8trP<6wiA3j28ZBWSYP{6+U4etzagK5`<&s7m=f+F%$SH z@OhF(e9>fjL+TAQxCLEyR3{9uA7Aua#%>ra(_yXh%m8KidRtz5rp<3W*DfwzA|dgQ z>)ZVqJe;Wn;Nje-B|@*43)UxjIG zvrnas830o^)jE;CsVN<2xNWw|79ADWD1Wp%RVN9bxSEM)ChSAZhvYvsKP}-fPs4l^ zz5$2aq+wbI%H2o6|8cU8c{=^5=cmjBU%^kSs1`P-Mt(kRirzBk2@ClI4SBLW6UZTB zF_X}Vn|dx~^dV+8?N2jL!_CUePsfqIztb^+h5kRT{8k$CJD<-8|Kk|*O8XSA|L`n) z1IOt5(qHLUfrU2n=8-0TK~u_s z!%yBY45`rB$5isf^z)A2Nocv%gBE9uOR8l4c7u1ad6@K0occx~X~H)0iXZDOQjX_q zW3Z3;BaiA4`Dy7L_vg1@370>!eoy(n;HD^2BJa|cGzomA_{vS_Uz z@i@M~pS+-*FozqkgK&WJZ~fSpI%z*{2l=Mnqvkj8EJMD(P2E}_UI&;%4a$3Czgo*9UToTQ?Z)yTd%!Ah_1O&5IL|cCab+rHB7Rywy-hRL@wvwFDGD-xPV%tk(MfMh zRVOZDSw`te`^ZGH>X{}G*(Q=SB{wdFd3~fercn89WwjQ}tba2U-o+*9t#NJ9X*smj zhN@hn@AetZnSAn>y3(o-gawLcd7pCe7rgK{aDruXqw2eixXW+YxNV@Ks}3SJ;F(RM zH+3rC{7aq#);z>ZT>RxGIphfE?N!-e#;+c__}CgY8dtPDTkdFjU|m>Ow@aJ(scXE2 z!`!_p)V@SHK5-)fn3auXsCSdyLmSK$jy!ZV*ETP0wDq;cwzj@fGH}tw&fw2^cbzv6r+-RciX5YlF^64M8_-(!#`GK^R8(m&}EPhx+7}8JiA)n9{{!tgcc`u(M zgW@!82C?Fp|Ih6jiYtq5z z%wLpk_$^=S?Imsg%^cS}4Xg6SkD1)e6B#ntau`$bx{(3t1SY}M_lQoCUXJ1uu+d$n z6JA}U9~zR}gm}~>@Bz{Sm*Nv{8S)p|E4kH;+*=Od@r9PiCuu^L<+e@oH+X8f#%8j5 zPx72^75@vbf6*PJS5}G4CZgB;|B0`ErTtHT``>PBmp58lS!zdKM@zU1@V1d!v!KMf zDM#T*0KmR%#1(rWm4B&x1BS-G$GI+Tg1_k`pYT9gW={ih_eoZ7w?4FVb151mjK8k zFIM0U{lG*k88-67Uqi?rt%A)HQ|joXjK8789NBVkHN`j#>^JK;0kZ8301j+FOg-G z?+O$5z&CYES2Nlmex%EsAyX>9PmX+s{cXfhu9_BV4 zBz^1wfV=JF?65um@+aHU`bOJ*u-%?8HrT&&v)z05jduIyd+pNJN?ToAYwzE9r@isx zH`?mjO8eLgFXooWL;AC4UU(Unt>x2O#|L}uji3CuEibLNFMR12&=sfc!QESJ`~K~A zOxt|*)1UK$`{26K-um&6+MS!X+OfB)Sw4(q$eZ?zZML>9wHxoWZbp>1AYZ7;lZ ztvz@BYP)>#p#5wA>R)ItU0QGdxBusV(f-lz{$5)_-u~pD_;UN?XFt~--QI5B{?_-~ z@BjXH+w;#Z=ca)dUVf(i`ZxY``|ur7-C-+UxH9(XRmVMG#63m^RE1g+s-u$LXTc3i0M_x0 z2SgmNIR-L_@G%z6+|wx1Io3;<10?~G6H7YJoID4cvCGWc*J;C7uUu|dE^RWFUTVvW zi#aS~_W8t-2Vmh@#;)idWzHJ6jXO4X+~pWbdK_0~ET@$&=??AEbll)RTC(c#0a$w4+FBU@#P zK5=3kpXeo89=mRL>B^RCd0}($rI1b? zd1$z9prpsVXH6Q_Kbz^Kf50S+b}`&Zd53xWx$C8l>&Dz~{}2DUZ<6T6f>WTX(lb!$ z1<0EWPL}zDr#&8z{}&Wz1jg`;Wvm2oSjh@)uLNunhnoi17fBR^2&%XhDhIUYBr?L4 z6dN@p+mIBQgj0}Aggs5^vq3OAi%SuN5@N4M410)o^5_XW8%P8m!AHi!6FV_ozr|Ax zf-annw-$g%Zbjl3hj1=Fc$%>U65&ji3V6Vhj%=n4%Dl}CGxFpYEDpe=P#EKfB~yQO z@-vM5ET1@|@WLysBxc|(f6XA7bo${P{F}2uQpX;m$kpB#PN+kk=P0LWOvm$N@su<+ zJOaCwR_n<+qHGFt3U7laKCM(ZYf8U#bs3N&Ixc9Wj1r8J5I+1Ri12`RHv?zgJQ%Jq z%m))#Ui`&SyycD#xFh8kS3VOy8|NDgN>$NaWe&~4N@7f9JSjuRS^Vid!0MML+I%xd zB?_#mD(%$OB>gy06T0Qk$kfIRrwt}a_+ZxcgDD!k!m`rv931A~6KQ%UU{u&_o8dWC z1dJP~fj2NF+=yH8!!@kesW#7sD>NMjGL*ZZBEvd842hAI@<&cArj0Nexp^YL$rqc2 zCD9ufM4>iSQYIOf(|J~tO*;tjjL2@3y}qC}F+8NY6WDn-V{21zLO{%P0yS$48@8M1 zL!^7u$*=+I_$(_<$cZ#PEMLiKH4TJlZ#h#%jhWw+33x=B?kOP0s(aI6PHp0pFnf}V?9sVD3U$SRvO z(x|@0LH5-nQZME?_!XDBS64V#SJrFuCmMt@H~?Wl03S}D9A&fXvGT~XZ_tc#s~78_ zm9QXci5XP=cr!*G+C z_(mCkHt46CflFiFKQqC36U88R22VAXQQm?Zn>cxxa!l#Z4FG9h1y`2%eE=mv3l076BQlGA2(k9T)3ShQezs`C6#tOU9_SP7B5D<<>g6(}XKL=PgLW)QJw) z)%A|>YK&;=qFKCdtYmZ2vY6h(X82>J!8EwpY>{R+z=5p1t1qc=%SBoIgi6XmIB`xk zN8B8JorjEhARId{4nTn)lYTc*YaCtk2H$SZRd@%7;hngAJIHFYuW{n-hS~DUV*B)G zKh-||>d&={zRk>^Z&9l^^S$WQr~Q%gNsmmN14=hly;4uUdL8%j(ki&TP7s#~=-k2z zJn`HAi*(uGmDlnMdb2Q~-?IAY8r*y$$Hg~n)UfS={2!kjV~<)pe3ZAZ9QU0Z9ksok z-L}2sw>KZP-}%iw6A>a=i4{_%&!7>+78bwzZ8hkcz8gWjBFbnLj}lliYD!20YZVGWVOlG-%Rz; z12sk2YMeYauT?|Vp<|#(7;OvEf^X0u{^$mAW8_bGh+BPvpx_pdf5=YYL2lXt(754E zIY?XWKt?OyC2Q)iEZCznCh{jP+?wMO&$H!`4>is$S+2Lckr{ZLae?{bCcX6w2^5#I zStiS@4B#O_UjSt+ro0~^i;lOHUl&)*%<-1j@v6s6 z>mi%m2qCS?u<}G%^9IXFEsED$W{&L~`%y0Swe{r~FZ`6gsIGNlCFA1kAL=7Bnr zj&T)b#=-;_zlz6I-vgJM-d(p5fYifx@=|}1i+opxj)YV0`VKlNvv{7>6Z{{<_++KyYlyphgozv@@x#NwZ|p%%6+ z6>&9ZB3_)9l{U5rPy7Un2Pl?2*tO(*)bAzRZ+oa^2|n4L$N^bdNPh%~_3Gpa9OhUu zM;_{a%4{1`>ii?x|K85}LyT$0duOcu@M~Q^QRUHm9HsvQ(R!FA)m|h=2GDD|9KE^Lz+Nx31x2+>4v zWmlQ&GMu~!kq!fV9=sTI47aDRHKz0Fhba+|4n5)tfrZoZ1M9(M^@GojNpedSdN7Bj z(3Rd^W}j~!t|33BpSHa(&Mmdar^%o__U1;$(+7|Cqff6~dm-byz3rWL@^Gi!dH0=m z>-{_J>gIa;^e2C=?bGgm=O6qodiP1YytPh0bJ+He5AwF=GcUZ__8G^X?C-X_ciwC7 zz4r#=*Dtp#AOCoJc<(;r+3#k2`{Ii)v=?7}C0Fvl^V*NwTR-^`V+h}(XDm#4FVb&p z=DBSR0095=Nklb@ric%@|Cvx;6Z!u zt+#VP_}Qz^X0zz_?R)L*x9()4egR(Gz5Af;?(gRYEk+^ve!b(9hx_!sEAwq-=|a1* zvD|*?7eCRy`fI=3-g@UJ?ZLeV&{N;<&uuL5&RYbwA8lvO;5=jRV2`S~*w)v)$&;}Y z_3xbW{{4sThi~3$@4Ua;9@AIe-#uy%_9+pt-n4vl#`xsITJ`(Ob8Y2pp?&>ppKt%l zfB&zvyYIi({=@(M-){fE-+7~5Upi~Q`t>ihuYCDy{oVic@3(vR56vR;R}UvB$WeQ6Q+?>I2l60$ABg}Mdb~y9 zmw)*y88hCxd9&?qZ?{$WvbVoYy}jOUyni!q{wrs;jbrB{N8bK4*A5x`{h4q4`L_Df z^X>5V?X2Iq!RpvKeS@-^KWo%yUFC0B&CIj^%02V<(#ym4#%&|Fy$qYUVGP4jSN8rR z%_W957rj#H3$^W!zn~D;NUz_7A7+i4ni}(zIp2>+Z{e){qn>9n_+yw!|L24C2##bm z#TzS9VPXS-v0S6S7*^BWq`{8IZYMax7t&kYiXmGbtbBElZr4Ma^r`U}-s7UHjO+c7 z`yrx+{}V85ehhb<*GX2TM z@mjtizt%R^w-`6l-k7(0aMaramKZzE(HFVzxZvE3{4;h&H<+iN%*e}XS>vG&iSuxN z*1?7Cf7usRr1(plB7ey|_3#ZIQW7o*f40ZR-gK2v@~gRAp21=MfM(3)?O^p8>{_eH z8oSynY%EzQ+0#}UjbV;;+)p%j=Y)9B*}gRA4SvgQ*;`f8S5#Qtq-DL>7VPEddlk) zwM>aPZwAaT`iC86;ujC*yl%jhXcw|>X1q7%e*1s$m%a(miy>aYVR9)s`9r{K!N~%{ zfcTw2fRV6FL{xNC$cPht`77SMtHKLrSf?G?3?hjQE`Q>601Pdrcc5dK6+IfR_#+_Q zRe=qdic{bb@Wjo+8BLt?1%x1zCoSQ=39DfECQU8cDWpaSqY#Bk#PF?@n7N3^Q@_|r=e8AyD?0(m7wA+zt26)KO2c$?umVt&%U*r zYc%Gu9g+s8i^rp(iB>0=z#(gtt9W|3l4l4D)X>YA`YqS^=~KzvXiWSg z7Xp-$5Fn2QY&HoT5zuOwjela~JMmg^Phbrb&gbG+_}2ZyOucevHvQBHGvTRtEP`Rw zp$q1d?4eBz<~#7t<~23SLmGBE29+EM>Fom@?|14&3xiYN8%zQ zz5Y79JeeBf3x3F1FBQS%y6_?f(;AZYY`P9gN`4|;33JiKXY+AU{;~QXadsj=r7cC!M{92~hU2YebRw0E!$LiI#vU;t}tzM@t)`6*k z!^yF_OrflN<&{^2xlrUH&0{y*)u9e*EuRB|{%l8|Aj;%{Dz9C2px~y!<0nV)qda>D zU%IXaGH%8{VUXd%Ed!mClUyg{hJn|dc^&LAX#z+3tTCcPMRy8r2S(CogR~)g81aF^GY!mc81tvS@(3w*JIhu7?LA!jVkfqA~C>v#v}psNOis zYQR@b60gZ`24)A^4y<&OTS=$JLLxXb+Xa*p~Jmpzfz#~M3L(n{uRR{Cj^ z)gND|O_P=krc#F?PCe0U+2m2U=gEESTiL>PhT4exq$pz@TrF5dcCwOZ_@E(mu4z&CZQ z>&TP!h?~CFzTf_qa!Z%8?S`^%V7Y*ox`pm+=qQBxsjJAoVZ_sGXLxe*#5We6;5x=o zUTh=w#dYYQ4kEx3(nOx5C4w#5YELybAQ-?LtO%?CrXwH!Y(It*RGa^#mH*O}y0vY< zC+p69gp_Xl84oDK`11y{vKma?fLAlxtn^5`&yp*wvYv$|o^n$ib!)b&+Z50CX#UD{ z=DW&)q62*ZlJea9*?4#~+12p5jzz|PZ#&Pi5x;tf@R&Q|-uf8LF4)|C$ z>o7zWkAy$;O*PBI7*VFWdGEm!X^gDEBV9i!;k^6!NSU?Va1hsgJ$N74aO_H0bQtoM z&3|Y$K4U%pYU8_j5)19tf67jML3waYc#X{D+IpgGOXO1dGI=uzxpy66odMJ9RFls| zRUjyV5if~Srb!-_V_MfWRxKEf?uZVZ9#SHWI!KvU@8qyj*KLkVQx=;v>C}C;wY-sz zoXH1`w{xkNU2GHm+WK<4a^-Tne&tGAUtev<$b;0~y?3uY`|PvextPt6qr;snR&K8Q zNigUHr}Zc^1tj8Lsl-{x28c7eIfQt;1erd&V&y%*h_o)5l z-}pw`-@8wH-)YB(`>6|?Yj}}*Dm*zL1x>_Y$+Q0Xfj(*TApzFT)J*5yJ-X9Xi8Mv; zNHo@T#@fnQhnWDR(3H;PmfiSR@OFQJbog$vwi!B&G{hfiDS9g2u%6BggdhG%y3$jH ztQ-B)V#Z|t*5Oko6Z=1tEGF`(IUh0joW1bh&^;hUHZ}qNolBei0{|~Is zXnXl5hX@FK3|t*Hm>TEdF{2>3RbS#X-7v#H<4TwHts6QjY!V-*hfTsUdJ~45gXX&Y z^s=6dA9nBqlQxiWJ?f9{@2+umOFT1g|zP64kFycvDS>I?2n``ay@nJhYK4?qmRnyEZF16*$SKFLpLG-oj zJFc@h-nw-CdN$mjwBvU8;C|b=eY5TF?zLsczn}QnEA8}Pr+w$2ev7g4gLe7yrFLcG zQro(^)z&w++KV6iMAi?zDd6DYt#;?`?Y6PC(SG4i{K@wC@Nv8G#_R3%H-1FFwAeoL zxzDvlXubLNJMHFsZ=%N!vdOde=zwv{a=Ux~F7(W`kG=FfW0BSNN8kBDuJX5wxOVMo zyL$O@d-nRZ_UTW5iZnaz*3Gxt{rmUYGGp}h%bOXC++Ym7e^_f%_a5GFckXYuL-juM zxFhx^mL~@?ZPS_NTu7r`pZe z-f7?d!`Ip${_u_VbFV&|gASLUx!hiO{>AotzxVs?cYg0%k?|{6SE*Y@EA(lMmXwgp zEfpN+S7*wO?f0Uec|t-NlfbU_>L)(ge*M?Kp3VCQ-Ue}U(4L$gw(aeQ?H%fAXJ@w^ zFgJ0N)s0vCXKy~3V{T@D_8WisFSWH7pKr(1-5K*($B_Btg+fRCc^#|YINQ~*GsgC1 zL$n2*OE+zYWjYQ)5;hBq>H0_%g->_`M*42#Nldw;S9%KDj5(ja+U3(_Ja5j&e>B{z zk+0ERll;!pZ*(v{rFY_zSN8u5npkKsIyk!#E^fCoW`cGS;h2xW8Q3C49W;5E8pi>H zx1(PaWYC{F!R^JGWJq`&XjBf0KG8UxsqtEEy4xme9OS$kqpNJA%lOCEe8_p8Z;pGo z#X|tzc5$g6p!cxQ+WJykU1qFo{g7vU9*MHzdSgiTAVGM{nDvM;kMxoQZNxE@Wxr6L z5^#JAirj2VqcWLek~8Rat=zeihmSm9Y0*85lN)fQ6`X#Cz!y=pX5tHe!fEpoG=GkB z^Aa6&g`FhKnw90366+Y+Ge4$V@sR)G(kgS0rM9xX-d5IDbCtd~3wj8^ej_*Kfdlmv zzBxX&ZhX-$xQZ>2rxGa%s!?NP8a^8)4gS`iX%bXg6NkVW%O}i78%ax>shnN4FJ@ew>vtg@%F{M&;f0k(@S`e6g|?Od^&E!W0R>yxx_Kkr;YWGGh|a@ z&p}E2(?-~*!h7ne8F*&lLIeLJ&z@6f{;GAOEA2s1Ol zH`-^BVzUNd=DMVnj0_S?$4@4Mp$B5QE%ul*vfG=1A{Y_kj`5YxdSnz`^w9gtBNs>wN zq|(US8N~=4wb)9RDvx1pE$Bkwg`>D${VmJoS=Ezx3Da6ua0P$jb&7A&ll*!??eJZE zp7TV$3LSJOf26uDWiDhS1FR1;ev1C zTFyKUnhBcWU&)yOxRF7;-4t()H!w(QOmF$~RBp=HpJS7Z5ZsYxnVGy|^kdY1apVf2 zkrR2))5jVl4}pa@d0SULfXdt}9~0oz&j(GX#i3*6OuBfgj1G>oP{Fe{Xi|@Qlu2`P z@JATwoW$7?MLy-De95zNhCI2@gS8`32D2y>o!}&aJgf$@u;WdBoqjCT6kg>*8I39W zGl7y9sk=}rbo^S=Mnr~D=Bl94HMv4Qd6l)a73;4SoH*xjbYl{!t*A;>xXstLW4da` zWpG2*c;>+cugSB9{7oaDgBV_04>0J-g+=rexGyfyMsVl+hW~PVJiprJm$vAR3iI&yT5c)bO$stjcdNan709a zqbmmh4jZ!=(vcQ7gNStQ-~^uo2~R0{2;h|X(_`QI@a-Y5FROzD4q7q@BD;K(lOasK z$yJNc9;~X(d&8s=>T$>7s>b->_b@^7^i(570Y<-XpVgaEq#>U?ce;X~!IOP}-<5T+ zQtt%4h3RDkEnxN~rari_94u z`PB?Jl@EyR3x9%pSX?paDj$ZKDV|9lZt&so95=x|uaQ4EGXaPp!lr?16XBPn28g#m zn+98NyNvwnMg&cL0}@_^1~Cu{}m7K6cYU1xEbB(p>x6m%q~XADyS8BKwW(3 z5n1|wS()*_&saCE<}7o@1<))m{w=@b1aakH!~!Eq;`C=@lr);WA;|b4S2+{!$kPq7 z#ifJY)olHZtv z*2U#4KrXIsFyUNj^BxFTSZNoTlxD*jy2E+-0J}g$zfHL_uP^=4(js?I(D)Iq58{=5{oQ>5SbS6%l zj>vZ9+(%g_K@3PqPirVQ?JnN5(;So`KR5H!Ccx`v`vPMx7lqt7&BniVM!f0lKRxtw zu^$pBBW}wDO}2B3={Se{(kH-F$6FaIJ1*ug{gc1Smj{8hd{48t=g?*~)0P2s(E@33E ztR|+XF>VIOtT9BMrj{MIz=ydkJen=zVDxMj1J$|G6xk#V?MxmXo*uSGdq=4UucY@t zfZy}X+eRx;OS-j9^~*vQ;><%Fj~;E)*7riYB49h&TxAY{&PrbbK2Xqi!1FK`x~TIp zZq6qQc(9$%&!Z2BuXUEHA9<96JhO_^k_+I{y3r39`tk%h+dtUP zcTW#Iv_iu4S-~$AjuxsebfbgdU35#Y|Mc6+sPfl^(rArt+Ck_hCb|el>kqsh5ZRu2 zkQ=&zGdKcU<=0pz!Y2RlC86a9w93681!4(Y)}L^*mZqRZXGl-s%qFWCXn%fM#J4=7 z*ZB*-h3)02dQ~6Nu9LaEs(Mq_9KQe(_~aSh8I4bikhvKb+TfW-RKE7rpKs5+aGf#I zg|@e|9bQUb^rKWnCrUg1amP`PYEz^B#8A&zkJe)!{;Euba%kUP*avcn&T4GWioaq$MPHA@f4XKG||(- z3_Ea`M}Ia8>5T9Y6FSQ;PxM*O2^#tJdYf7e?gXaNXTCMU1J}b-?wRM0GLs4(aHF8J z&%=+ampY9)55XONLU(s3G#7nC-jW_SYgdVNOjU8BEBV%(PakMuR1e{2B8e@1@j;78 zU)1$BXqFb27^kl0Kwmy##dvUTW4X;SMmX3$h>R{W9$MJA)aKSNk(NHme)8fXbE4&T z>B{xCxwX;e7(*OCI&3?)Z?&86+-Y0O%Q>Wm(r&-^JHOlBdFPF`!q_U;F*B~d^31dC zrH_B2Ev{c_yASWRcYpGe_TwLZzwI49Zr}KgZ?vV&t@hyV?e?P|{6TyF-J9)mpZ#?E z)EBaxBK^Z+f``!%+G(eedjwrXn*jnx9P9E z>1eTCyL!33^uqJ);~#&yJ#+1Pd-Lsg+fQD5J#XY+A_XdRjQ;%}zR|w-gNN<@-YN4G#^5U(?bf~h zsDAtUBgO^x8SC`8f9F4I|JT3spSLr{3(u17ul}{a z+`jy!&$pla;0Nu${YT$wufOwl`|4M})HXKPBSF`%Uv2;RpZs3?gYUde-`d*NrR9vB z>~DQ%IS1l^^NRC2B3o9DK+Mvc!FKm| zlcaj0<23_d)Y~vhRDYh}#EsV<=3Khb|7AKJP>era>O|OEVri}WK4S44v*2jC&v|r4))W( zuBRUxj{ZDi9DVG1HgiVX(l}7y`noTE91-T){kXO73|*1+ar}O=&~+%oU0?Esa}WH6 zcfHTbSb_TV_Bd}wTwd{om$kes;DLv=jjg;m;oJ0D!<@g-0a({Me zeamOdk852yes(VUrd3u%x4hzu z9MHLX8UJcqS@kA|jp$qF?D_O0Izl+(4giGV2yzM) zf!6OJzXtkFda`f;K@6GlK479JIB7wGJ$T+|5&)|2%}VZcq7Q+ZyVSFf_E@JZ)-SNq0H8QeXq~-RT7<=_?2Q$xlAVwAx^OF z#wc)>$$&{9#&Q!&eI$w21_aM)ao<17Umdiv60`xN#_(GwS#UtfSw}9+j{2>77pI8~ z?{zD#_&dG9uAeyTA!<`ozmpD9JS4VZn#wd~#2u_XkK%{kG`bLhRTz{_A1p*KcNHj} z&@j>`zZv5~S+W67va6MhVs z%A?9tKsm@D3<{+bVgNCO@sO)-{7V^(6dqUHk~G_o6T17=$1(=ZGL zuX;Bf;iSu?(C7Y};)r>YZ^eaff|W=Y;*IdMt`ZSBBfQ|$C?gMI6K6g}llq`|1GIFf zlLkaudb(kz6QVLHk}he5tqK0rnDco9+$0G947%tnq+w8-wA5St;H`8#Cy{0JS)-?{ zdZ+mAMi9hhhBil4c9#LiW8zMA{od`|GG*|Duh-JAxB11ZSSBU&8{k?$pV`JN92}=bXw|W!b9wUg}1g-(z{XO;!8G=qAN)W%d&VbEYI>d`juw~{pzyl zvmPc=4~QfW2+77)?;PDc@Qo}dRn}~DW!$1If0m>7CAlHSD8?m1MlXBFL0yhN)g5_E z#=#T*s(5ju-}uX-m-#cvatv+~%i*AYvB&B zx~v5S@`NvzM~w1a3kX1xwg5{l>sQVSQdgKtDb9MX7UE#1LlB;BEAbVcH2Jq~`R)A3 zv*ZY#KeOl8{C((hFtz;fFV1={Ih!t?orhJXQooTY@fWoT;-}QDz%xAMe_Dp>yN6CQ zvfAS?Ff$iW~@RLY`Q|J-j;lo_Rbq&yusS3pC_V`J<4x>r* zh_kkoLHRUb3J*=JvciAPhOfFGv_aYBr2N39o+_^F)6Y{I-N9Hxn90K9D0gyCRHwJo zo%WNM{8sypx0`bhn5Umdix_yT{v)g6PT`R0KE@4M%4nxY*9nlWV?p1fwajk%FEP=1 zeDSnB+}>#iJ4fx0{^)z{JKy^*6Qip%S5Fo%GQmJsxH;kG3T1UdnGHbnjrltkGyl9f zD~x)}{2cG#oSo7hdDf~2yf)IE^lXBKL|x)j>sn~!c>=A=5hX=>5AbJSmoW9`Y4 z!DrLgNpSUHZhm26DNg%a(+cC;;CkG^@wsj?fL6xba#wnWOxo8iP!82s@ZfPXJ#z!g zz5qOat3f>_zPvr1{=ML{5sY7+MRe>ojY}darawX1IjxQXPbTM#tGpV0VQHcHo&8*2 z$|TS4+^frd1Lx8HZreL}l(~V6P#zvQK02w5d#b@rBl}M`t}=o56!|8TeI~*iTbJk$ z=HZk5h8vd0ZGRUz*xhc2d;6iyiSXsk>zO2bW5LqmYV$jta|=w4J*?o{v>qHF-yA|t zpN6%FGA$NF;cFz2N99*}a3hs42tC@{ZKsd6k<%k^9JfXA`Nsaed-vMG;ck2D-re@2 zAH3fFg}?S!+b@0P%WdEC&7C1DE`T7%%ajv02i)L=8)M-kbuo9*@dtIU?4Y|mps}#H z)Gkt9?tD(9dzz2Z=sAD>B(WEKsIdeKTa`0@z^iMDWXW4v$3z!WpUS<;#4;*}>JBIl+`^)I!COsyYkVQ&wNr3N zlza}~LtvFho#bYvZvY>GH-}20v&Na|q&$;ip8V7Hp#fS!6?~ajQ$O`)zt1&JPn|^u z%*)Y|a-uF3g^VbhjW>BhI_tCgwMwIou`K%KZN{_YQEL$eqp=K-6naaimK{b}@ohl@ z11zm)vy9T+$|I%r-i?57@F)C=F2=i4*S zKZoo{b96oOSOoXAjP{l#98YQhXinT+qx(rOwWlNX*LKjCFzNPd(S zzXeTUlf3vy`xg&?X=9P0;){#b>e(=ef3aE5k$bb_IpaSwNNF@xAG$?L<(GN3X^?}& zf;RCfdmxHV>OS!R_=c5ltvP0R@`QehvB;&%tL;;tekFMvA0D)&H4j_YVJ`JqI46A| zag*C-s?EGxzceI_d_xc6akT>C>BeW}C~=l6u+Tq&$M&|XuJL)Khnw+d9!o@6fyr1t zFlCcR_+YH@)415+Hzq&PHwZ9rJS2^pmpEwa5Sg<1gpX&}2*iUIc*oHh`}K0`t$3kL zgCE{hj7~3q9*qN$@IK=X{s~uvYBj^{1^XyN`j0g7D)9raR(vzhou5L5jzPP$83ugC z^?WAzr9sUY)17z2W4SVdV$MBa>bYP)%oqX8IpH(SD93O$#`7zWeKKP!Gf8^=N`Ls0 z2Y)cu0dPrQ_(~qpp7u{Z2X4UQDwo5bc;WSRJm!s6|7vV+(i459u^rf-+IOHIyy3=i zw5g8{z47WWZ?uYej-L>?>)qGetvBCp zPiW&;uU=|%rw8rrx87{G-+#Y-{FN8mCh%7-UvAf5c)o31z1G&5L%mPj(f(e$f9sw0 z;QoVl`RX(6vy5MFzIPM-z0=-(^Nn`-%GLI%pZ^>xZL$6Mdp~MB+xIBpA~c`0z5T=X z@F8Pk^uPN3l}~;upZfaNcfQw7&_6Fc|9sopywt8=-fGW2^9;IZvmGDoxA)(Bhw;rp z-V$|l>(TB(yK(bqZ?te`&e>!Y_ZKeez?^xBuh+?%!+o?%!$8 zZmzVA&AIm1{^nnA&une9|M!3S-?o47kAK*fBuW@+OsdV zx88iSz5CYd@bs{)Gl#l=??HR}?YA-(a%^EAVSXO|+}?iFR^a*4(t7)?fBvtwg=^Q^ zqg(H_vqR>aIV_I^+VLW9*=r)N=q%ZaEIX!9wo8vYwkTQ8{E>KNI&H@`X(PAYM2A$1 z8m=@|f2Y-yTWuIS(K{X9W~8pldVaOPiZD$0vKeP~f9xdRVR#1Km=7~qSMB~X^%NBp z%g@CBA-1>wNxqiDGz3?O&-@R)P>JFukD+HL^h2xVo4`pU{*HcP1lR4lu%4_-$JK_J z-@qYo#h00J75wBkOs7W_-D{bwrxYvo*n=%=6{2J$@>_Er*HawVXO!#u17oG!ROJf~ zv~4$uJ>i(&Xc!&jmM9 z9E|X+(sj|!(;il$Hk-oO(8`1mCw2NNi<^0&w4m^`v+vP^rW^G4I48hX$x*`R(Rey=CqK zt&0Zbo;l%lodZ0zxKM8=*s`)w@4zV)=GBEZLdzf?jkL%&g=!&rFZSzNG#g4#4)F;{E=&bf{?S?JA#~j5`ms zo)LN;a_T#H@gqpJxI~zf?g;*n70?kLfrD=02*6?t>qh><#4M27SbfuIl*!~S0Hi@l zFVA_~CUNF5aPqADL9Y!li+R?&br&V4t&*pq0`E7L2X70HJk|A?@<>nTmw4<3{PRS6 zQ7;Yfq`M>Ske@965L$dSY}T#CvG6`Dv7pMWAj)%?2QK-HFEHeqhHdUzaM;ekQF(Oy z#F=jy;rWj(#Xs^%Cs!Q7n1VVIPJEO>e+D6)?$m4Yg|}(Amd!`yR1f%*2%NZzH*CRG z{_q$sS*crH@R^J}d~`5Ap*=bQcMsB%+0gOMb_ZwHE!kM8buHCYgmcl;Y@=jy_K^vUDm#|6LDB_Eh@WRXO9l3z)L z={$SlH@V^28T6kXdpdGTzpnKr1=WY37o8vs`mso2T8jq(>~jv8IJ!}Bdg9xhPI}-A zG%)+1E*SVRIJ&@qp)IabUh8ZLoemu*;+1x3Q`cKcp1~hU?DCrSZCT7bbwE3cTxn@* z7oc+vTEZs6A|v9I2lCCn$FxqqZG*`$u*Dl?44&p&XUd&?lK$?e#FQLV!kaP~@hqR^ zv2rDB*(dbhqfZlG=}zm@^x`qju%bhq6>w>YelSmI5|?h#C~qo{;lkfu&&gj|6V$xK zn|#fWC(9gNMG$#dhFTa>hAi*EIg7t&NSgQo1I^%H4q_z_pLI3(L$irGj+EDY3?DXx zfKNJ;0jB7NA>F1`#OAO~^ruff&yj3m%#Xm7WttZfXHFAa{MQmE@6;`%u@6e$1y3Dw zP55NLJE4gX_$;2FHw+vhs{*<^wTaSiJ%j(EdQM(MSI_cYW83bs9GR&w5|@JoqKEZ7 zKBE5eDGlPQ%*r9bA*T43?6NIS?gDx_l0fzy$$KZ2( z$=IXo{Y4M_V0}y4?>fHu+MDe^{!jj6u=x$)<#uxNq-|_C$-CZ;_K(_w`(C#W4Hs>H zmLELnANZHpY&6P4b$X3iM*Z1G_$^}L&isa(lP1S&mXG>#=o5W{-v#K55lNQ`B=s`h zRyMpANqV0gCfTlCi1Cfl(pz#Z{e%4m{k8Jy5ZxFjNz751+xTWZ8}!uK z5|c;!#Wg04&pdl2lX~BbR#tM|9^?M~?Y;Ks!ESqaZ-?Al+gQ8YHaD(C1{M~8h3E4N z>!?G@OJ0t}9Zxxrf(YnGwlX$VF91%RSHE3#;%2mye5=9h-km?mL%-dALo?^4%{ z2%JaZ`t8!)ormr2{X6ZS{&(MQU;5&w+pqt|*Ymc*{?Q)w;-aN>Nm-F$^OT1a2S4=r zc>?l|E`iU!`8%I6hzB^8TWSXya@c}r>%+xMuLZ0*f%;xfSSO4xP9IZWV5F|m8j3$L z#92n@7>5oo+KwF~T2TC@_n84KDhJ1#iyGiP&hMb2v=gV8>kv(Wp zj{xRkY1)o;OdaHEaLZ03a%x7>l=;aS@VN58t5_EnHo)K7$zyL#NWn-49*XJ#k-S}R zO!#6u6e%!n*g9UIUu}<(SMty17VX2f=bLKs&Nm~f|1+KZwXG>9wht`2#zQ8)ruRpdvX(jb2xCzGjBYy9$lzbVWjYSRtvt#On#^J`E?b9pmrP(_508ZWzR!_m)$7e^-V7*lx)SQ;` z!Ydcz)FWQo?%{$O!}(UaoBzxnkL3?Ch8%c^!g;KVx>`w-F9yso#{#B@t|w>I6+CoY^w_p&-CD;Mv|NDewO6qi29_+i;F95X=SagZEm)Wl}peupT#?I`oM^@yuH>t=Bi{I z>*GRsJ8q}cRNFAExRMwb`W-jRTT}F~;F0f1$ggk7rFIReal1OV##oY^bIG^)@f>T~S$yV!mCu8I;WYB12*!$2T@m(1nyhwp?3P1TotjLCD&2MbT9-@JN0qYf@ajvZf! z)AWNT^A>-z>L02yXPbY* zwXM;Q?d=`3w|?|`JKQ^JpZUxuvO)FMTW_?RH{Q-|E*@~ZclTa<_>l4M!`-$*ANX^x ze4;I`uC;G77Jl!>{q%Dio0r-%&s=XGd;WTR_JzxBd3hcj@3e=EnU|L-lSN^CdF#&Y zcJtQFwg>L*oxQfh*!qBKm|I~?M}K|BSn=UQbRvD%rOkzQX=|f>?TepjpZM6z?beOg z+WYUk4PSRt$M#W&#|Pk|AEpo4XKv`2dU0+A`D4zpyqVhsUVGzK`@xTIwcGcfv=!zj z*REY@TN{_#jXMw8gI&f^^r`jo%?0`|Z&NsJo6z&h&%NCCZa!?k{}28?RYjf7pSD+D zdA|LtfAe2xbEk*xKlnfVo%RO2nPubK0BZqB^8Y->qmNe`m;}eX_Rz961*Pc4cF=z4*cl?dL!H*|xoX*nae#@3y)-GSU-2S0e~hwbb@*z0bI1FB^^S z6*<*)&7t}-`=-)Ejsa|{>7!|fdD-WJhw5!J^f}O3a{=0+?Fiqrtq!-^UG-s>wc6ci zf9i>9yTrx{OB+gmh|PMLu*1U0p0?IWs{KwaQM1-|YChtMCB3wSZk}VR|HxQH*Ld1_ z>1~k=INNsDX5hz{&vfb!$Dl)pSkXQI=%j$_;=v8hSn_d0FFf?wc)Y!j zOSw$1-VUF@Ctre-c;qT7owPnq>O=WTed({hE^+2JsWZ|H7<{51`m^fMvdJIYwCiUY zFpct)A<`6BKV#wAyzs8CyT&U&ykXEem9#nb^lf<85|&n$GoSac^hL+X%AkjFvUbn7 z)o~j0zr}?W>{44)5S)hRZk7H)+?Gx30If)y=lKwt)`0)YfRrYbz_X?F(($ zIpf?JdZ5Oo7b&~*cIVRBzq5qWP8nQ}(`MZV-p|2jp3pn;LEbPEw%zfhF3O86(7n0} zU9`KERptwf!8SHG+T}~$VsNEh!o76qT3&3qa^-4U-+(^o_YL~h)%CW#MmpvZeoude zHo6K=oM+eP0LY(theN}<)eZhTf1wdOes-R&9ArGi9K!Kp`mEHe4t1+w%sG#=?JyR0 z?qfK0VOb(K6L~6|y=Kbqp{3|k7Q-^|NBB|8Q@uf&;h%Hx;GkK&gr7P=l6VX3q&9U6cqD^hc;%6mqaw8^(1~rY6qDMGpd*jVgV(c0?A@iY$Dlhc3&h!lh%$ zWF8}}nQTJ0ZWf(-oOn~8VWMa%PP*k%24mru1=avZeF!Zq&`Dnd`!eCT!WJL{x+7fh zn9fL^tjF-vj?!mT8Xy;GJT9^kZ1yxv6chq)$#MuF^(dzDR}R8XGfN#MzSqA7eog!_ zAqq=*Q;@;~1%Z>&2>c`o_@FC6eF9Y-hK zzvkE4+~PW&sVD8O&{j90VvRUAaK?bo_EGr4BghQh#A?i0%GGx+;?x4BtCL=={Op z!mw}Pl*}fpTwhe*Evh)v1qblu*LCw99OS8-5>ykRl!bDff;YDTxcOvy(%R>^my?A}>lqf>j%^yD%W5Kr z3rrT(Ay!${5dL_o{B=MhJ}ffmgS_D%YRIcvMM|4`#syj7B0v0rpzl@I)Sr1fMoF+! z&%jyG#7@lcKcW83Yhq@AMt;dNA(~GANRzf)w#;y?S^Tu$ihDYKxMgZFL_E`SR(Q3S zSyOO5Px&B0uOqT# zfge2xSahI^A!1O7LX#T|a4GnZ&Wsh&hl(kM)O9mDduKI>FG%@(xFe#pfUhuo9~P3|`L{icYYyGCVqhKaZQAnpj?2&No)~_jjnWi|xBV z_(8i!pZ(=u`D(jz?P}XU@Vd}5CgsJu%7eN!|IUBkcu)o+6McMAn^4ZDPHMw9{ipq- zvY-V<+9^}LG2uAI2{E}?KFS%H&H*;tD1O_6<9l!yF6G#^)pfjK)L6z1)}wWf%t&M2 zYKA@_Lud3Uu$Cd;nkG+W(h?{S+VO^49e$%-L7%kdP=>fjx3J(w)=1-c3_e*1@MetS z+zJ0Uyy60S_@wQ8s&HJ(5*&`xEHm^wvF;9^J4_f+1 zRLVeh&09%4P@4lWUan5v-F?`$ceZo=vvuM1ty`O$^v9R7 z@#Dg)hq=7_@MfYl3NE@br4Nb!pySJSo4f7AECgwDx)CLlPRy#0ItZPIKVGI zv$larxsx>Mf{VJ!OBMs4kv1UVEU!sDC?J26s(3GYbGQDx#6_P~`|?pZ$d6Zg>bZ<% z(N=wnQ#0gkjJJIj-vni+f0&9ZotJ)$H04g^(t7vSnF~~G4xXV;?%%)FZod0Y``Xuk zwXJw~4E^M%X%6-ef$`u~>5Fy>U7>x{i@4*<^9S6_bM$w9S0lq%0R1y@F(duc#_*ZA zo`2_Nc{K^=dCFLf{?4$013D%$Wz>Y{y-u{$S5+!*ukXUGl$3wywNadURyK|IISwBC zpHbiZjC3DqM#5p9exBtSu|1>V{?zozyX0*GZy95T+vx)~FcW%*Tl}mtA!7uGUH$n3 zcEbOHk^Z<(;Z^?2bUJi|d78da{~sDJu7P(L%TaFnFxxYZHKiH#Rz`QwJHpOC+Y6No zssS=o+}!TR|R7j}*h_cGqJr#d)z)b{uGqr3c!nM&DD z0xY`8?}5X)`b_09#c%`M{-80ZIo;p)>VUjafBUVUFj?AZ*Eg>)7&&YY?%Zx?hvbPa z^euccYr7A3+oo^DKKop*)%U>Y31ijw-uy{Beze=SK%Wg!OfNJ(ssYFI`$` z8ym~*#g|@gw{P4=_w0wqdpkSK4OTN>xO3-jd$_ZUUY~16JRcrCZu>{a;h~%Gd(i2o z-{s32ZISl+@#n9$Z+!hL^h5XC!w2_rvx(z4*OvD8k4aDeN7{@1kfvi={d4HQNAUKI zcki?}-oBN-W^H}0J@eeP<`v`bLf;Pkn;Ylp8>uV(_9>S(7V}o|MdlaN zda?cKZ+xYF{ZIT_`^jr>w*S+=|99I(c=p_t%kXZqZQt8QJuJ1m4|m#+Uccc!XZCw@ zD-U|tgO{p#hMYAHATK{#rcU>>1?eLQKW+0?dGqE>4{Tk(a;06n{w#8G)_(H(+iedX z)S~c$UAfvm_VO$3SAO$1A}{+79t75Fx*cCR z-W9es=6H7eDL;|xYRB|{@Y^tTP4!Ln48cT0OV>dLB|I;tjpxFvnd*;v)?{s={`)gO z{h`LGvu65SU{+i`f4KSR^f9CVgRa8YUE6dFWsBqZ14CO<#iU4i#siN((#V5g?8fp~ zUeZ*4=ucgrMKkc4TV+`B!DH~i*4-Qu&;+WyF~pNkjT;RwxH^Zi5z2akHSRpQ`Z;BIZmq?335tYF6__$HRVdi#a`V>{!!l z@V&vxHsg5C&v!Ye)iC|Y_|>%v=VBfVSY3CG!P`B2OMR_vZLFb}R%y?h?b6mNW7oBI z1>JJ>%2wOl*g$Wsv@4g4TWg!l_czyo0f&BX64==A?S1?!g>M6V>sVcH9dP`=MfxjO zuePgKujN*P_^(|d{z|)Y?ON8ZeB*y>>uRp{Ut`Sd8}n=Ht99^UNm~bqJseSCOYp(( z>Mx^L7~j#RRNdxKK6=2H2Cbiop#tDE{7OY&OU_N7v7R+T{rG}eQt?=aVQup@ zrWhveS#iN{yI+RZRd|~N0O)!@PoVCfmtJW1fP!^6nL`B?72R=)BVy#^SU(RH!*-*r zcjC&$3^`S9{8NCsaV&0FAucoKxd~2r@l0$$g1hh=k0YJ78y`B&BmW5JtDk z=tY%SKxH0-d?!F*oWz^E4J36?oj}5gSD>@Op{3Qiev=A|`OfK8hW9)sLd$KxS0Bjslepdx$pNIjre|f@c@h z>!g?wRiDW%xSgLa z0aW?6{!DJCDprkyzC4Jom)*kymMNR_<_%29N|r@W2#&uy;tz~KwHT&}a-M`^Od1s9 zxyn!mH}L|Pi2u+~`6miG{JXmp8 zn&ORq;&H^8MW_j*%JuD%v~5F(pZ@WzG9X<@>7f_xnN3Ruz8xf>RT$aA=;j&uH;0sk zBw6sC)ICOi0%6Fo6J(*IOyncax?;qrwBy&#@Ni-}ocIYIHfFZ`sdx2S4@s11Ec1XX zQ!s|@m^;PVt*3V4*qs9o}Z3VPLNV^LOLtgF0ybI>UJz*kjHHyc)l|SK=7T#Lk;!o+nEMUrK`ENRrnFg3FC<}v={!{u$ zuaUwekWQUE6`^2@Yif-&7?kG@Yh7z5kHgp>nU~O^r659!QT2qa^p-V&6@e>dJ_s|l zf$OyUoK_y3M2xh7?Lpm+uygG%rdP^Gn50em{9sI1Hf@yuqElGwW0GLT)eQg0uRmu1 zC=#A0;MUK`qn^tOqpQ&sSMUGGJGT|Z6fSE&o*rMwp2~kbeuR;~k-hv)uqF4HPS1p_ zVTy3VjnH;3Rq(|4M$!};<)*DfbO38Ch1$@LKoF}i+ zo4`9Zghu^Y!1Eb}oN5%bjJ=g_J>c_yB0_%cut73>5nd~H-;z=@!1q~!t30?bl`V^$+Bi*B@=o( zuqNe^n{;B%oWjL1=NzVwD?i-qsLjrjr&{ol&cR1%k>)JCIaw#I^5ce=8$-hQ29Oii z91Bp6;DO`#Y=9vIPSCx^&bJb;Gf7^!xX}EbUi1ho^fLm!akR6$(+;WUrKS0{%%t1R z{{>H$gD>+W%H+)jUY)-f*;sBD;frseQW*H{rZwepb9T{QMAHqkfg2_7wb*klQS2D z>wBeX>Pd^NI#!`BlW$}VEf$$z+<`3GNBicHZP+Urt;_j2$3FoHt^QS$b<-R;a9b9Q z-noj zUs6g|vBEiIqrlmC&xTIdB}H)ZOIlppo;qmg*)iA1<|v_Qx&TH8z!O@y!Id7GOJ{h< z+cA%uuD+dLZ$X(R2Q#2c6If(@Y`VLsW4rTeT4(5~59%gk7p(m820hz{i>_|eozfoW zEj#&nphlipx$-n)I25Hab9#IT?>r#n0aoCxPs^LJAJ!(PEQHZDpU+`S=k#D1<&55k zCN~I#$-=S`F)~8BkxQ6fJW(E@iR99>h+Z~a*klD?b6b?I^=vqRs^?Hhl+e+i>+y9m)hUKlzUg$*s`uGe5yatnYAig;)RT62tsqEnUH$cgdDs&X9N0^Bjr8?2PYwoE$V{UtneOWbgvR=>?hq=7 zm4#}*8tD|LdQ2mqjXQf>hjj*Aci@%jtB$Ka%pZz)CIBCRx{ia05x=gqPUJOpE3Eko zhk>@x(VvyAdM+I};D?)o;vq_&7;ZlBj=9$&b?=zv@#%%Of3TYcHERdH z;SST&2j487@jUC-%);o%zu7!1eu!O)Hi^eeGU1Dj;;8y7Q*QBIfO;&CP*>CYPzTmJG7+_8BMP|V=Rl-}Wvh;D*oCgH^Nut86^ z1v_H{HgM!2{^)jH@=IKAH)B39U_a#Pql> z&koIIO@;}E*hWyadPgsP0pnUy($AmJ@1Wml?|wSq@xe~JwD_bwfBkxUc5AgA z@7!-2=w;s$&6Ug>%k91QZqUvjx0gQlv9|f_^^CFoT+kl*-hAuLc81J&liG`~e5}nc zuebX*Z?>QO_{Z(#kH6fWeeQaD_uX4<`{54xE@U3|*4sDQ?fct|@7y%rXwP1|N}spZ zzW0M4x8o;A*~GG^U*A|^Tz|QJ{9`Y*WyVGCz4tDBbX-H9NV(j|+uz@Bx9<2Ut>bn= zfBk5mavh&F@5gaT%+D3scl!w~d3TL|>es&VrS{C#t@bXo-???GEjbr}_a`S<(i|Rn z%Ov^?oxOq%^hO0Y(e6>+@BZ+O_QN0FZ+oD1^Z(grpJ80J+TNlbZa=8GhwJx8@Oi<$ zhr04y#wGgnXWi_B*B^g&y?yByKi&T1uYIMhQWyXAfAJ66@BYEJ@;(2x)h+68E_6RS zeB9o>b+5hi{@u3kkO5KiprX`on}2Ydug(a^8eYIn|F^N9N)mI7S{7Hx80cyZrP0UwoEBzhxUd8F8ICKKWd?8RMU^i64G&i9)}aMn7QU zjCnH70){pim$u=-`)ukn$MuFoH^AqY(LIb;SMqlJaz4@F`hYJQ%q@C=z{65?2w)zi zy2#wi!`CaT-U71JmRFaeBUiz*jH|9vZ>_Da3Nz}EuzMnQ{yYsF7Z2tRMg3DLi#^ud|&GY59v9U>>o48wfvwsQe zt%q6LaU8y?-&+t?^Bn7;0O<7v2|t0bklVTB4P$Q0=(wFR{>dr&9i8Ev&OO=8{o!@i zn_#-F#0t-$jj$Qbp%_>fggM`;evU}eINr&4m-=7?n|e{d525j3hP>apw9z);n|j?c zSqFkV3jcY@21jXRdCX1H9VM}iG3sx?eD_pk%FiYY);EcHkx+OC0JbKMWi?d~eVEe2 zYl48=p9AZl0pOYpOw&6^NmX<_x)v(<>^8ztOrbUeB)P-yZ-RVM0)7wNN^gkQX*5&&F1EqJ_6Qy7^f4cQFB>MCb0qG*Cke}9%1 z!^WaTm0e3*#}NS%GXBCLA;KC}c_qoz?LWi-S7~Ep0qL@VWVi+DrgFuo+z8G9u!rh; zDnDuIjlDiFbpehnS}4#?#sSX)gx0(iQ0vsX3Z2Fkjsob<3Tw=SieMQ=N(+|!^!5qu zfSR0$7qTY|D>0VB1~_G8;ZM=kX-{ z6d9)6=|;vd-S2`QaV{#=frZX5>*@oz%XExpH8lom2`gEnE~UGeS`pn0ew3N5AC=DN zfJZzna(HKP1b2DNhZ1y}#b<;7-K&FZ-RO?CplnR!NPR$uCr%fX2*iWUz^o(c>2&K& z1--q)Hwv(TbTjI>@|&9T&-%MH^GVe3Jnar6O_&G;En_n9vP|8C6%#5x+A71rW4T-y z1K*Gr+$^qKaPuE~(Kpc6+uYJ+I;X4c;=($V)U}dz<;*hA!o$x=dBE$yHeTBib)5W8 z38%fcljts{ACT`go-Wd;0HHu$zw>G`UXw%9sTb=_tj8zyZDX%wRj1~v9wrAD+9E2D zsorDtu!}iviX&Ag@_VERhlZTvW^gu3`9* zkMR~Al9PrgZ-PoKa~*p$=6o;Jr?em5vw5I;OV zWWrY8<@US6@t+)0CgFVBPTJ*1<+Z>BE*t*Fn-hG`tpcPiDy!TV;G3iiV?yO2{>YN9 zG#UWi!t3VODBQ%?v%dkw*k{_87ha8Rt$1S{l!C((rBM(Q1I}|sxFj-QNe`x7Xh=lT@9a z30cE*+Na??{kbw3ihL`B(uZqFMvT6ZZIR-G{1Ud@QJ%ULNT439?WSlLb*CwS(i!z+ zK4}k6^RXLaD)p$ha?aP^u%<5mlx-bfhnLih_pCehm3ooI*6myE5C7=~&b-yj>Z!mv`NTx45U0&p z)5J0!-qt}9w*nnkt|QVlFl#*lb23Pjc@HZ65_^v_||#gW{BU?H?MUjzE>_?$#s8A%f9ur z(2kFM`=7qj{)2SuOw8SYcLBifg8D{_eT;3y>r$n22|Ru)ubHp%1LvzhR$cx9+% zb*yA{BSWhnwbMc?r9?Pzxsc{-$R$~96!8eJ@DeOti%9|M<95!io=iTZPM`UHFuTJyZxl#KjXW?*%d zLpMfCUUeM&-)Jba%#$;XZ7y!B7MR;PN% zD>dhNh`dl=6ghf3Z^QGKZzIDOIU}Fzppxt)Hkl@~tTXsnb)&wN)m5H4*g_m(wpx55 zz#UfH(5`&saT=c6A1F)V5kI)FktH+9bHOgcXDEc`>R&wQ=_voGIMVDQ!cUfZu-Z#PP6eE@R!ph5xh{`Zd=X!6r=pbj5+^Ms!^@lDSI?B1i@w!i1Mlxt1L4dyGC z{njdWbE~bdZ?vV=dSlTsJ!}JC@n64uEEXA7%y4t9d9=u22Vmn`e?vq|WDlB;i)Tu7#VH+)<4+V%G9 zfATBs`ZHGrY>ytfnP-2Z3}49iyv3PAlY^I?yP=OTO3|OYNv6UMhL$7z%mYThcHkX` zu-;Zb)O0>f(|t(#n1Q1|LzlcMep;vc6A?S*TggY^H36<(Ul7RisAmJkHO;^HZ=DU- z6e8<*I?ONy_tdkrJr!1-r}LS@oHrGB9!9_AvbMTlO4E!xfgkxy@*D0)*U2;=&8+{Y zn2z5K+O9e~o{_%v>*RUD_YwcJD?a>=^R3&yzBz5z2KoRG(0kD!DRb3#$pxN??3G*L z1e5=ubJk30FL!E0+GP$DwsZRf*F?sRW9nAtV`uc~7b&Z6YaZ|Jw4H}L?Fr)}H?JI1 zKD>Xg-M@3Y?cTf3aO9PZ@s}7elyS(0Z zb{vmgZ1;Ef+gtD6Xt(b%S3m^mX6d_-LwEv7=yBu`zlVaAp$dP(yvMy2`bdi(NT^WEnh71ZbV$?s^Ue`6()I*6=0(H_RRHdN|dw zk)I6kMnVr}`{@7=u&yqyW}I%@(XNAYV`C%V>fhR|!>&GWY!S9`DdFCBwc&UCX;;4a zu3cFN7c_XFV0B|HoBQg<6~Y#nE4WsX&mJs#3vnHo^<_8pfd?Dh;P=^eZ{MWPr@K6? z>l_bV;f8;W-NT{OrE5oy;b@L;!!Rs%G2u~h{RYvg{ zm4oRCo28xS4xhAvMgL4>a+o2@hL>+R8t1Mh@8arspE9#y$u{{4ThHo1^1m_n@BEMd z`EOFl7gICDB+IHL9_^fDsSI$?AcAgbCDbc^2IUOS-Kc?GSQ{44=0{q?iWtc4O3(qi zSPgYxBr%r7PQ*1Q+ja;wkWdFBnOGtnW5OFmL}&5DbaV{a;HV^-KxMNZzY}GR>9QDQ zxRX>GZLUxiNZgY&ln+GFFVI#KFF}Z-!!ZNl5JLc}l1Nb#J>AgN^Gn*IpHTdk(vGLX zB2>DImhPO41n?`zqR7x z9Wapr7hgtxnnDd2bos!)-yaFlmNS_G61P9tq9Y9A*Tv)+ddOoefEZVNsKTZ*^}q8b zCZrTTvrKUEmHWzB9uraNW{is7e7m*GwSfR^(sV<*JWIy8aMKO9+g_&Rm5j9-<8cD> zG#x2{9?EaJUjBeR1v_iRZ*&e^^suY0pf6BKH>;+L;Gqe7kMoQk!4C-Y%{%L7KY+o)us`{kjOY`p%|%*l~jPh$KAk??-+)Z#z3|DX=j$rBJADn9W# zI2R~-NEgnAKYGC-&1Z)^jCvuh`BGO}7rThTPyWHrzj7rlU9THBUw~!&+zdJ}{YvPk zynW)JKDXcSiqWH^eR#m+qsf=UoMO!4rUN$8i_dg07@1z6yiD$BE2$Rz$<80bs-0Lj zP!n0Nwilfiwkp?!vnMi0b`THkBX|S~&$B$#KXj~k?IUB%EWzR;On8$UG+<^8@U?+g z@;kAWVp2cntkJ^+ez;{LquLlk#7}7-^{K5|$pBFq^9_(NzGBPFx@8^kmKppZbVSFGr`suu zCW#0`hC@Wz;(_Han32Y|Ih$^ZuhP>}r6x9QEdHL@r16+XJ@}m1v^7o7GkI7R@sIR| zm6?bcsinz$Dqw1B@l~-Jje!b(3k$X zAmxoK85ha}u-S9^S31^o$`!o=IzwBylzX^^ZgRs@Txw+(dfdb{L0xqYCLZ&hHYaZ; zQYMgduSjU`D&4yo-KY9(?l9 z38HfBW_hmWrXS3BK)tE#*$-fSt5ljY;e-Bch=Vm`;K`e)T421|UY_}uQWiF$OZvTL z(o^={qO(BVS`PZ&1jJ3*GPc`0xM%uGFj>edJsDn(9Df!?=OhSwEVR@$PxnZj{xzCP~ zl|!$U&4GUZftLrx$Dk~{@-qB1d*a}g?fR5F4i9VG=D{YBn9@AGnmil)Buf3`rrd-Z z;u?Z!Y-{58;1)G388xgxj)b=?wj23ooA48aBxomuJBD|m%ta5=pq0!Wo+zJ|N8+W2 z90~C7LF5BQg`C2vGI6^&x%Xn|)WBw0N+QdpT)67_2;r6X33s05B#pTe?HORG68h ze*i~q%6OBKObfyLzu)r zUGC7T{qVFOFsbuOGnsoNsbLuXoi8ys>W?a~GC#sc`IDgAKZXg-q4p0PWVk1Ae zCHp+kjw_B|(oL*4*v3D?r}5LO60ScOt|OqjAun+lkifD&=%^0>x4x<>Dz128fRTmo zTwP)6kV1u(DcY*s)PZ`4BH1Sl{b7`T^|hbnu*x#7Iy$Be`mzJ#b##kwjC)km>--Y!uV{@ZVXWA9OW@%iW4b1%P`w_SH0JV2)&w_7*fY45)IlknmfzWk-OxUtdR zfBSZO`$s>_vZ$YRdH=?}tk0lG+S_m60^hBCM;>b0#`0SG#48_b_jh;NYp=i4jvpVl z)ipoCUH!QMQ?sSz&B-c zo&CIT0WY`h-M#jMpZui#@W*>uj=Hk3m@B<~L-*dp2krL#2W@u`iWe`oz2h2}TA$0S zbMUgYKl$a)wQK9k?Gqn+roH^)^LZ=y$3K3v{gdzfAUCaCT%u29?6Ljupq-q}x7Xgf z-QIkEyY2fmF~x!=;ZRH2Pv0Q-E>LHT*Id%_5Ft{)Skc2|b{^}v)I+8@ct^P7hU!D59hpv_Svs@Y zcFj2_*1xp%UjFpSK4VKMl-IoRCWMZtH^Z1x869?n&*&fLz-fPp1g?YrRP$l_D>}4? z4gHyM8fD7My248s2*c49_8j-AC9eMUd)_$4?BU(n`f#*%bOLGgB!UHvKE zuD-e%59Fn;l9u%Ivv@VQuBXUw^EWu?3&uZP$KR9uQaO679NJnxGS2)6$)nF`-U(gC z5ln#N9$_gudd$%^uzl>K8{^!Eys|G!n7318Z3G$hFwr7o84n39Eb+`7cxj=o&0omd z=qqg%-R8!=H-fCJ<7R_?t-^HOtY33%yTy2y=XDPN_}n-1d0zF90ng%E1*eAq*1RQP zgE-;AySjcEYdmnKU)yNQjQ#z*p?+-+D0r3s(rR0D-NOUk&f96Di_5;q;Kn($`|KJA z*24haV4Qi8WoAxc+sr|KZ>fY=`D7ULIOkdN(=p)D5#wz6?>NQwN1F6c;jjH6?FXE2 zI{L`!slHDgC4Ttt_#3&IyjUeZgwXm^C-`#83iNMXh9CU^Kz;Vn&yPSm`8hAk+zGz~ zWiDg8h0X}HauC7RPf&!zeDqJ9k||sT$+U5kAb*9sqADHuCZ;38#3vr%NKrZ}4+Ku( zX3YqdKLccp6Euv}Z_NEW|JGmmCYAA`5E0!W$g`GU^OK#)q1&_uu5JU$;B0U)6;DoVR%rH%gWKoSb59#RmYQeT9 zN0d?&m8P4iS5M~+W(qOT6OxMJNhZh6LRETUOY!+APYtlnwAC2q5Jn+0;l=O5Z5BLH zUb-%RxVUP(byjsQp2BT;Qr`sR4Yfp8p5|kjrj*Ulc#d8hb@`G!9`MB@-wJ1764-%K z798tHnJM7~ZWi~{$p2w%gW2eap&UKQBrLc>#?%b(Srgi9ToO`2Q^2WPgMTx%_DbG0 zm2PA<`I(pr%Gojr;^P1fALNg6g_)q}bm<#uVg^73x~x-X8lLO+(EIElhFq%#H0JYMB58>pWS`55;#KnnvrhK@{VWc(rbQgj~haTDp|0wgJ zG1m8lw*%s77i0aI4M7uyKC%3SLBtmk-RPgSW@S5x7r(kdUK`*@f{wAfkLR2WY__?j zD{X%1db_Z=MLw$}acK49gAn>qo8VU8+xF>P(e=wr-aO6d30yl$afgFl4%OdwzFBnFRe#7c25^2aTsod~ zzt>-9VH6xLB4+VjY?iBs8upiv*ZSRJ z2MW@ADqdzjS-6AdEc}sYp229(mG4%|W6EWj^44il8+=E;IS5Us$4rb*Tqt*8j*CR2In(&CCycSUFqISGf*SrdYz*RT>p}S+@ zBhu6}6V&)F>?6G^x_6{2UX*-SS$IZv z#pR=IvFO#G0D%%dkklat1NjHhMK@3KSg->Q$nHLE20*z_+z<&dPlZcs@*Uybbed{x zil6$cZ%AHfep}g%kz04~w4*)r>fu@Y&i7w$@7=f!-RjCZ%`GqM4Foq+z~KZ)ocuXH zRj*|t)5A5!xp?7N(@BxxSx9os#zZPo0j(j4A9>?umEpckE7|#;ynU{?O%F`Qe4z+K zIyJCbFAIqLJN7Po#it=N>O3b=o>W&KI#F_C)6JJvPq{O3l_y@ApH0{fGjBh)Sex=L z$}IRsKkY)Y#`w}aEQh8x=AGCPL3%vF6I|hYmDPk*p4wckF{`#Vm2=P)T;2eXv8?nE zwzS~2!Y(4fd-{Z&t|oIgW*xUxdg-svHh7xeGG$`#=05nWhx%sT)hve9T!k`FU%5t( ziM}$DeCP|Z04j}4kmu%>Ql|8Q(BOAh-Qco3)>CaxIYyT6_Dj}{eUlr1^gDZx+Ue0D zv_EP4d%NxQU_TAmh0~+sqwv<-34Zj0AGeRa@^bscCqI`ralE$F?{@nAQDxo!%D$My zN?OX7l|cF^4-2@^GiO<(n`aNS)r5P_EJT1Z>D=3Rh&zNd40CBNZFN7aXH4|*@Xaa7Rcg@{VF`kCnd-uawfb2#J?M%*(g&! z_|uFx_>_IW2k4?jt}_R=aDYL2t%LgK*|3taa)UnOF`u81uR2IEg_uf!dD8S#TV>0M;oND+9@|r`X$UxWwJqn!TyRkkHzS)j> z9z3*cRhRmcQ@8K~CyU0)wY*2J3^&#yX{9UI-|`?{s*7yhendWvmkyG-aDEav;uSw} zAd^qvub=R+E%{yF+}447U(6e9>JHyn16DlBEwn1@_KD)2yWqhX+d$!nD?SeZJH8|j zzc1>iL~IKSbIZ_1SyH!kxWK~a5U6=jfB8g}wTtUwABBe$^waV(af`%d#9R47hvl^2 zv1Zf@>ZBadf^TUz^gp(10K)6yr3{w~wmqEguk{;c)w*`0-#Jaqg>v}FwqqXnog0~_ zvPdH22ASa5IypKzKt_;P{@grqqu;|w`3|XzkMbqys2k@1mH`>p*w!wpyP@9}<2c{G zP29>G0BK`V9I=V*bZCe#Su!={*5FlcGjD;`ScRcSPk4aGR&IL7+D}faUedn`jBDD! zV>2G;CcIVmI&V`K0;C#^Zb(KYGy239`YU7Ra)iiNY4=(B;wxPxSX|4FTX{%FJr6qy zlxN)zKMCu8jr0zq{-}j^{N$W*3_jUy=-kp=d+w!Y+6rTXmtK6n{qz6YU(eX=jQ&hr z;9OJs>qgI-Uy9%Pnz)O#LO zEp1zZ*DN(I%{`t-fBm*%Z>Hbyy3r$4qCWy)=m!@i+T&d?smHf zKEGk@;m$L{pB(G}dj=dOX3VX=Jv-iShuhmZxav(qJKH<$pMLAxxsv&lpZ>Y_%yZAu z#^>6@hY#~|8bwf40?`{&!e2fOXw+wZrXhqp5CP|4Fnr%ZS6-fQo? zceCBNbvu2dFQTk6U%7hy+4hsy-e|jz9_20Ej6od3qmN$s*o*B8pZ|QW{(rQy-PYGu z^F4pZ7jA6dyYn!gCvj8SmpgX%Pud~l{UiF1Gvw&#=r|kGn;sBen`?jm8(&YJ2aM78 z5BC^XwRY>yE&40^fAqLxh9~sdZibTqbu!-`?H{(+-niYq|D)};xOSmkyS$RkyUk0N z+pU}T+WU7Ow4DPg4sg>!R@Z-!p^WN!-;C8#|B-{5H)$N#sWXoy7}s0Q^!j{EzrDD!Mw(;h zQuDcHd=5l6Z{LU3-F9maICQ*YqTT(|c5rk;-pBCdfO(Pnobem7vb9BB&>ug1xZNJ@ zF{bnndNz2?AtXy`=_z5D^srP42cVmkJ@5M36us3g>6aJL3T*l)~9K7(E|DSDAHjGXmm!v;QJ{`l*M>v5_MxCa3 z$9O|)K-YWGD95|Vshi=-y<<@i!MoA!hPpT4xi+wX z-!b|EI&;pq?7dPy8`X~W7b}c(Zd@wTI*Rz}}T36V}AY(_$oL0eqjp>!eKCVSW zyY}L2!c$XDWgOGF?%~`jpLasPxUVelvIgIIYe0=leXjcCH~iORY>Pj*kWePc*+c!8)lk%%7eVfO&a&r6)o%jx zVis|+wU}#)dXDvxgcV;%r`ESoopcz-ZyH7kLnhQo3=S0yZmQC7;du59a#cAs2E9}c z__OKbgryf!V)drt$wS8F+njX57nnfWm};Ylw1nA6>y3K_rZXBFzXBx-fXlOagEt+@ z2$M+)nk!JyKb}RBg~HHN3=L}h<}o^v(7~U)tT4+?NWw`QfiO*{SzNgC(+r&yXmZlX z^cH?%1(L@do({dzJ#7rc_BiW9lQ$jUc}}h}!OGR7c20!5=xig3EK5)Pg&(KT0q1kx zx)Qg7m2!bMG#QEC$NIroR)I~UZ0H|xBGv?1@RuKqS-j|QPJR+zruc~`{3+Z|o6w?5 z-G-OQiTtcL8C|xt{wW_QqR>PIyrjaS^JaqHx=F^tQyoo}JM*(53acR;ypaJr%t_fg z?=&eFCeM>h%8+i9AroEkhSvZf5#d1!(2t+@bc%}a0B8_^fy zo$%bkTZgq68DLE^@5VfJ>ZzM){Z+YbkHVM-kI7$m*3RIZ1dg!ok9VdNnI>&pdPWj!ZlB~4ytLNcF$PxUKiWm^}&8Kr^n3?I|B zq->ay>G(|GfOgUcR7@6oV{nNuv@nd67`?MfPPiZ$X*B`Dn!3|qorW3Ro`ZkN^dvEp ztgRp;h8@ou+$u;nvv32({D7J8WETFXnUPPq(|)G(XXz*TZD{dgqCaP?a;mp}Am?fQ zIo!^d@F^Z_&r{ra^4IoUZKoDUyVZ#c9_b?Aq#7j6;(yB6-`R`@sRbe(KAq>kr(`vO z>sB*6>8Y?F;emFPBvM~CCW*_R_)e$toC9V00~^4!PhHDZag}FadcFyaA(XL7JrDYO z`+e+N+_bNZPvlci*m;=)Qb~vnVaDOq4@s;3LPyU#Ab3L$NfH!rahDmg;Y1~i)BJge z$hN(=d)of#KmLREAN?o)_x9SG@3r-fjdpVCb#QL_I3^{}psF!fHs`>nj`IH^ldl{& zQuo+53X9HhWUJh|(2}<);fa$O-C*>fkubZ0I;*l}omo7zbF0XnT9Gb}A4P&ps z^R%~R%xQ1?5aT=vevyf;{T@8X8>;3*oPCbEy*BhcB;baY8~C+ZSMiqH#krzSojgw2 z*T$Yabixlj&1|yyAFJMMZFOZMe8?D_f920|p1Ikn?8qZ%chlYp_410F`MxFp zP5?e(!=vvRYk%6>_Cmuj}+)gb&e2oN46{?^kriiyw>U3x{K7*h2@3k;usVEY%&pMearVe&r`a2 z+nunsNM(w=p{?YR^5Rc=_*nBPY-C9oi8Rcz_5PoF$|fp-wm!!rfB+*`3YW(G4KMkX z(j)~Z@hE&ZS8MD9USuQbXw!n4ceH?VF4f_Y7?dyamCqvci8@nPcyp8axXEjr-@>iW zU^w=9+zt*7+LKeiuP%QsHs7?)rmA#{OTP8I#gp+-&qrR>CpUm6z?-JBuzc$Huwa#c z43D4LVuYW10&+>;f;S*_~!}+=25zhivhtK+k zf6yB~_jV^ep+DoFnYv?qr<~db+{C7mD6g^vkJLNJgLCM7W=g$%!Tw75LN6e5k3AG^ zI|Ih?e{HTAu5#8<7;9xDl>la9d=JBkPN|x%XcDFwNFe{wTRdBDmQCud zdhyrcLzAhaO5ceu^%fy85Btz+`$7sQj_5Q0fQfz!-SUBSLMCn5sVTkWd^2l&ST`E! zKALZb*^ggZTW(+d(kI(*|8M^{SsXsPdq;|abAg!l1RZ{AB>g{S3oYQZ?3!f9kB7^1 zFsCHCTa7dpvx|=rK%cO)w0@WwaNUg_zh>_2?a4tDM`9AMtS_}2rX3+rhrb>^ z+-q;X{eFA*y_;?SfF6}`|HkTCzHfa9zW3gH9}3~MWq*7`|1sBA=tqC)i(hD48(Zzp z-COPCsQRhnN8T`rmZdJo+*BX`0XXC5=haxUd(bT&8F+h<>W zsr|}VezEO5+|5lnR@po6y$vqnr=B@W{ zx8MJR@3mWZPjj{ZedeaykB+d%?f&*j+dsO%_#1@EN&2kmyih^N8@!f}a%NqLOdanW zdG)_z4aXvMD$Df2t4kYgX?+>IyUaNrwcTSsHi|)(Jt$yn^YZ}vhtPt|9Z)835PIh7 zrS_Rme+u3HxV`uG?eszW`xF#C@2v*E_9wp9KJ`n#*ba9eW$t9(>tQ~}9mj|J)Sqwk zu7F*ALSM#M+O~AUSaJav`;F{l$T;;sb(wWV%9AmX;TFw5lj;IfhX$%&Ja0&weor3p zDDMc0KhId59$qNTTc0!x}i)~4T0*W(QjqX$E3tLHpwP7U|BD7{ME-n84H?L zIl=3v?vkbH#HD`kxF9*M*(uu|T2b~gPe!(#Yda>cg95I>)B%7T1aPB&*+T$JZO%=7 z=d#$^#Gm@d_53*?kaTYTFQYHL>8m!sm*&S+|I4%~*C|%XpS+#NN(XK>$2}atm~BZt zE{)Ff9g|UjthZ#`Nx5~2OIhJoS|C7K8;aatU<~j0g@5#eW6)Fdf@>8y7{r*h=6_X3 z@=j!oO2a4jKoaHv6`TE5AJew27oYJYS!sjTEy?b*Ci(FS1z2{o=;&YB4>VJ#C-6Oi5~n>zB8z^O3{od`@t zMy?0WZt@hq^my^2Z!-RdetD8xNG?3NG57EOTYu%70KJ$B!&>O1?}+mjcr0as85OK@ zELg7)OuPgeX|XL-<(_g-O?Z_8!UIz{GGTdKS0K8mm4;@@EIOK|CRmwB5mE~PPPB>7 z;IS5ra)KL%IN(rdq|J^j6F^`C*H1X++fL%>xH5qdH~z8MInpIhglY0@8iZ3k;_!J? zVkqx86RzpcMT-Hz@GN3t2&*(P@Un9TYZxy7J$3?TS;|;}y3|PoB7qn^T6yYoY9c1} z`y3{E%?Ok0K>*l3GAxN0U>dG}&;s`I%Qk65 zexg%?K0i}qy6&BtQ3{Na<~P!YA0tI~X9YoB(7oAc%KuaF-P8gEVHqU%I9I&DUz_*y zz}T5DQFrJ$JDi2}c5(R{o$Isk;4-l5r14v-;P5W6v_Jeaaz3)P&kH?1W21L`Bi&7$ zy!o%p3?JeFc-0+w*vuo zubEX>wqx)gLBt8^{FW}d_o64M-7NQ$1781oNST0R(Eo&idTs<@qQq>nT>!t=$zD*` z9y1XmOxO#+y6kg-iPsbIo+HggCIX&Hzo6dNk2qgc$AUv$E#7&ay^>CRNkciZz9>Hc z&E^W>c0ggV;o!6G>q<%WcOR&dm(Mv&!JjLvZeV4j5m@z!*DCvsGv5gIN+Z|$JUQv9 z&WrOa;8N$>W*tnZgF&hO&w!>^p#PB}^<9XH-#&7f;81@~;)iPxfRjf(mNwjrt*C*_ zepa`)tq+))Oiel4CQ|z!VbpCA4%2jZJWo&zj{qO$euN2~;_2?J<=^TIYHSs08uUu$GSPwr3JN#2)NKrz&C;xHl%EUbNJbA3b)U*Dl z(_su6A~DUGQYB42O^mXO6Bgj6>ovIJpC-|VAtLeh6qAke9&Jpel@9^`04=2sJLX;< z-P*i2jo}?(Pn@<&P_?m<2mkp5NZj8LW>ivu_2N_K)flyO++{P-INL5Zfq|#CZ!)p zS|>o{<-&>Oldk}sx>$%`IyN`fbM5BhN}VXTKY#A|=Q2q&Ki`K{z+*7-fF9jOoDLb zsS{T5p4pFrdl|oPfalFuz+9~J&8O#{dA=RkFZD&sc2T-kK}}l6DGhqF(T~l|1dHU4 z#ihpayW6)!d2v0svEUWW`V9k~^d4ZGqzmFO<~-zB-xF-&5Wo3& zKnB0NCSQZ!c zE3>v~H+{{EI!&e9?))<^V6<7_Aifre;7i&80-+9?%V;Dp8vT(Sb0bW-kifJ#0{*A+ih+I06gs=Wnnzyf;i9O(i(Ss$lM+M=}j6QZt^AtuWol-7!fOiBqyfk z!ZlB^ZHd?Rr%oBSq1XpFHkCL2)f?k3OcSP_PN5AaIdNVWA0G8=^2Ufb#AT7Ka+C2x z3=jfVJcZ^-Ij9YFaayMyIx#;t^bL3948tOG%Aq$@*nYgGc8>9!%C8Y7hX<7X$z$7(ayK^O z3@+z!yoW(-gul4F70Wd^Vd@Iv>^gGGUeyUBBP$-M` ztDziVB_nen%36np>_bWp>*WY#7GqhU%XFvCGeU2dDdKd;T9QxZBYj>~T=G2P1V@ia zljSCP;?icoB2D3)&zh2T$q+V~&0DH^))!hZ>9>vHKjX8cQ%?>Ygrz+4J{XcNm4nxy zF@6Z2x{)TsJ=ABoH>v3wo=^ZqVf$pwc>c<7Z)F681qO%+%-~<^G5S6Eont>v+gC zwBvfNa!-u%(ChDMQ(-;)-2CCgT(U@gOrOhiI&8O?4@8sytTFw$e9=QOPr(f%?V4%6 z1MWP$bJGvf_F?9!^d=eJS-K|l{S2HFx_<`TFf;Iu2dj9bWlS=~p=$(gf=2i-0|#c3 zq;yjm-Okr@=4m{$&klSwg##_neKUD|sOdP!&NeaX_oHM^;~2N_PH{&Us;eB+qcal1 z_#&SUV_bauowwT#W7|da-1jx9?^zfi>bqfyW0=j zQB^K3dpKadUA=OxedU*bf&Su< zyr2y|?F%n%M5{OV_mA7%dw0=APbkBawzKaV7&_=;y#-I#LtegxyV%yq^H2TS&qLRI zyLIy>qhQGY4T_vUst5^-Hg|=kU+9quhq@-p#x1d*6Sn{qFDo zq&?h!+?F>N+QA_s73$B!eFq2VP14O_J?Q7*X!m*+mgaNqc;;)27o6Md9Uh=-kqzqA zxkPTXf;TH`uNXZ#`-U&^OPRR8h>B6cO_cV!H?rYi1vYg6!>` zwP&tfZZAB46+Y3o(61aZfA-d;&1;w1^UuG~zVTZb0>wIdA48V*L zsOLS28)-@=M$xOhj`sAAe64>Uf$d|%thpdx(lf?cj**j9!d2EHrOt>C8t3+mg`5NC zfH~uB+f>G(jBoSCJ-Wj990qVL0lkvz^?ez_xAq;kBj;}H*G9kVG2Q^+ix?ho$Mv>G z*EeWi?m5(<0&raPt$$ytaGq?tHczeV3c9ry;Cwm<>K*$#F7yw%%}bp28_r>E6Tr%k z@WS;D%WZjp*IGu|9H~VIWWONK@JZKpNg59f*F2!oRtnRISN`Bnx>}pBHd?i=KQdsu z@;QDW>8c%rdfFKs*PZW z8)i%(Zg0;q!qv~L7t>*spGlYjB^SwH*aVY9_XoVoVcHh9bQ}JTE;ex;E@NjsGi!>E za|3_}0G_;<2}4I_p=0SNcprA(1hurIeL43KGwQ2RWP&Ql@xD zUY6TvNzh|G^{ANOOM(2KM(9g%<4I5j#*6^!+48325)a|xv0Nlfxr?xvdV7g@2`XRN zFkyssL)b(%Gy^7hdw3Qu@TUW;pfV$E@kkS=m@u`{E(~;s57t{y>rXVUk~B3v=Z+WK z;U`MaaOL07@PS8Y3ApgX;B^5a9UC$xpHn+7FcF!Fi2leMc#+5OKsgMuA>aI4{&+g# z%53U(GoB_JqfX#eNIWEcWCM4Sta~QUm=vtMVbGBE@V3lgb2j-W;RP2NPAj9;F!=R4 zXm9_b7jG99Hrm|sH9GL;;oBC0Oi)~K?CIoBHs@^`!C$zu@H_J9j(SXe=C{z&Iog?` zBkN`n-^8gBhXv4KdI$T_+vxLLr>h>upW3A3%t<`Feh2nM-3j5gr%8uSp6ZsfGrzay zru`xCZuYwvcL25{1`(%iasG&UKV^_&{v=K&ZdeJt8MiB+Fy)!&L(ZZ05Rxz_pyMfeL(&C|mZE*LUofHrszpGw56W8mPE3x6(<)-}rY z#-|I{F2bvObN#Ca40`<)Kgquci0tUPQJe!82`^JMIcrEfoZyvHxIrOd1)t~vi)(O2 zJ&jK@;D!;{Fy#r{F&~od!%!t%cco`y2Jm@T>`&)j#&~QoyFZr+>h6!}54F)>=^S`Q ze_C(2rLQ%F;#KC(;rM_tc39J%=ffcDqpXIST?oMs6=NU#_)R zbyu%wG!_CNL9j`RE5HTfBGa-Zi;^r$_7OGqnXzV$#k=!bu1a2n++o}S#1J05<;C;TUyZ)o0GA9^I zXUKdi+r&Y~pe*K}{t$JX7Q$U-w0F!p!|%DcJ<5J0b*PM70;2@~8d>D^!#ArytR|Onzfm3K?HOr!EcvpYnJgWhMcS8p(|{$E_=2sI|Q zr~FMdCT6^VOtR!W>`yA4-qW#c761?~8{(>FdBZ%#Py}*`dt& zoVIq7@0&AWtGXa96S&Y)`NLox`gzDCreudz>9Y)QDjhAKPBxh^a0I;z3+-54UG{Nt zOG_)dKM~6(PX#-d`Hp3`vm-i$+q9~T>SQwU?+&;+-|&Nur#qL*dun#Zb=dcy9Pw2? z(ZGs%44-`?m4!~Z{dTa67EUIxtS~Vm8KRRuBl@~K9og+E*;mTQq&B;gIOcyC%PTAX zhG`y~&q)P)o_L*}Tkr%WNAw>|%OJ}SVFFQIWLw=`wFOW7Gl6M6m6QKW;HTfhF@o)w zRL7YGj7<1XadhE)TP_0w3UsFD85zavP#Ia^sCy^Hr#c^d&_Og0AV({ zdaiPZtXg6nkN4n&XuV^QQm%&U966_KqJw+41=8t(c3{6j@{eK?o}|iKPJ#-T1>3=W6)6qr9=KWmBNGn&_iD09|J$gTVH;bX;NGkKPm^eOMVQp zGnjqDDkMb&?4U`7ec6A;o^x#Xkvie8w8M3+ZPtWR5`*6Ra)^B8KXTQ z%S@>A4PQR>G9j52M;*?DJ=Z2hxj-GWNkl zzFw*w!Dxkf;2pN9-o<=;bDwUbXLQTn9t!}l9Wzenb|-qNC!up%{k6Z%f;VIZy~)IL zT?{ZQox<)?bRh$HUa+95;8vsU4Sl^n-EG9ECe~uw@um@De}c5+R{^r(f;waSuUMpVyk-I5>>=jw87+ z)Obg)33ub+Im0YRM`4Vnd(kk;KkGpcgdT#!=_Zx-+m{a4av zyo%@k_!4*z64CE+ynTF0TDb#=U&`(AyD=WHA;$RleFJbbZ5g!A*KfznepmkLRNT0EJ>Gijay3*4EcO zsr7-Uo{E*#UZ^dnsOfUwGd4F_>~Ihr z{kA#>KKdVB(;rNzFSM_I!MyCyFdjH}JbwH~elYIbx+WfO%MNv7T|9s#6ALz9Um*e%BQ<{oLrra z@BX$&{miMVk2YW&W#FUGSkZpDV^oLxZ2;butRt6sZ1`}zr)M4t(N^gOn3 zMtwL=(?P$oaF=rDDJ{B6Rchf1HHca|LyKj_mkXSBoV5yPJI-xSZb<(nQqbv7P9cP<`(@^RVOZd`x!QoQow zE7955@~2{U`@NWJww(MMlSK7ztl7MFbD!<_61eJw z-*KZ~?N?Typ3QiN=^eVtakwU58c-CgvIfQwijq=aO(<1^xgWi~}%k=hK0SYu&8Iq-dU4ciW;L zoIV`8{aI9Le-1fU9QTRU0NHHdPcJIIzrRm9;ZGduYcux1J$(TBU@hXReHYpkQJKmo zhc39lyr5{}^QELaP+J$lqL=rSqtKypPSJnRKR%7Z_!DDg_lX-JZIjTNkJwCM5J&lV_vKB5PrQd3M*DAUlqL+gtv;Ljsk>A7Ic+S zQ2X+vyM9!#cL;#crFKqAMKNytx5v*{G zUwNhjC%{|!o@A|dG^}`?lU+;eMJ!yuY?tL zD6ZtrvY$_fpvIM~*cs#z9MlCe$3H1wPryiCb0CC!T`PX%Qp9)#BJqsUe~jxW)DV@< z_m(r-uH)7ppsR0>K69%)o1X(jK;f63Tn-VKJN(N%?=6=qEA(65WLV^k=T5nWm?+=+ zpPo4>9IJZY)>ZG_IVHUj>rQDwVfihY<{v*I0y+4!D;hZJo{9Qhr6KGJO9N-rH7^r8&~12{uD8krM5g-pKiVU!Ql$rFaDJ?RRxnXWAi?nOV28y$*xCiP;3>0T?zb8A^S6O-yyB#s8ulHM`pJVHK(Gfn| ziXpp%7+ho*pwcM)#6&Kdpj7a|IzvAuzR*GN1)x(wzsNdWH|!|%q?+V2ZN-!)Tn2r0 z6nB+o59cT+O0cnB$!KxW6}utaI8T-@DzwOtfjnrhkRrMa2n2CKl8JOnUGk3CB;uXQ z!o|_3KV6N2Kg8CHOmz>a;bZTe82csJaq#`p9)T){Zz?V18?VN%g{O8W2=YH&kyqWg-W;Ak%OqiX zFiz_p;`7IM{275GmV2P`gR8@9#?&9r0#{f=Q2FW4J%9C`+FOHwpjr+(GnB)@8SbCj zY*R+sBJy9_sFVQy-qPYy9PA#%^&7Y1%{MQ_+Qym(InWjE*syb-e@Zt#F^yjL&R~TD z4l6&(n>NI++^*9%0*?!x;e}2*LZU3`hd%yUN$FXYHmxu`Lk2G@ z&@r=)7n=5b%cmJ|R2-efm6NM+>Y;~YYr7Lyu3YsbGPvBSRsM7$r>FCXJr){J?{v!j zExjqevn`rLFMh%#yW>R*^rK7353=hCWi`OQ5Pkz4bj4>vjl zF{v`KBY9VOcT~p=s&X7^NA|5Je(}xDuIL#IC!?qQc(>Ktm7H{RzpJplSl{f%revop zd)VoR7svEOe^2(aFZ%g)glAVp?x~;3^X}fXPcP^UBsZRDWk)p&C-~+$@}#mdNWhh6 z22NBCCRI5xU{dkmJ(H$0Or-Lxa&hD>-vQ*?>9%E(9iO-aZ^e5CF1#b0HmI`G8KMeU!Xh7X61+l5FcN^G3|z9XsbJmpLQ; z`*SAvSbFDjidH7^!AEgi3CD%8OYLA z;r@2Ncm3<9w&4kV>RxdyS1My?Fckv{Dxdo3i+G8;>F&mk+Cg8shEHDX z^wd7YC?*cN3g2Z^pSDVvWSlF5{kxL&?qJ_?&#uFc{K&3wZG%t0&dxx%bXB|R^zi%D zRzwSJre|Fs*%eP&06?1S>O{C91-2w?s-8I3s?qXn&nY(kWX|aZOgKVaz|E>A<46w zuj3_sK(%!c$~D8qBhw?lzx>j!>Aij)u+=}ysKOnGRBFR+_WU6-@l67O!s)gOKhZRq zzK(C`vBb+O?#i;cxH^e&wln*go126LRPlN(W?{(>vkGwy1EfiUGYmgNyvR z#~wOPN$d7b{1UJ7f%y3zdC7P+E>QW#Bk8#2JGVr&Z}o%dUrN_7#=LgxT738;ABvg9 zc5K|dXM)gHYlQE^Y5 zI1!zV8*%&kt=Q*V{oS3I+GD5xZp^V`vdwPFg=k5)a6$_wW#PMUYRk&fa-2GMHU>M} zarNSxv9i46&wOywSsUM?--)fQUTjN;zxd{@xV<@uht8jmZ~Mr5y{LFoeG3*Ct=+j3 z*RJ1)P3d_~-Fo6Z-(nT`^0QCJ+U<=vbLMo+%1>ny=jyc^@w?BxX#a8Lxcpn$iFTWv z^2g%pl`F!z9dq+-`98ZojSW*bE8G3u-|=nIG5c}r#x?6A^j5da1{z)Ebl$#w$2-jN z9Xj%vIvv?c{50t_`gCGmEHBK*Y-=*U{ac@m_dfAZT)cE4w$wN3%17>sH%yANqZi#I zw6THnt9y3Mzy9V8pD3`nB0DENclzv^xP0}dzni}o)Jt1gJpX0S({T5XDW}BU(kag*4VGDJmuZ@XFwJBkJ9#c-AYot@;i=nw6QR zX=CtIc=I{K8b%uxi3EqUOMspxUV)lNAtmc?94e9#veDf=Ky?kCnnmTueZhPI>J^_) zX=*wyy7z+I67F*Ik83GYq7m&J**XSQZziI5hB^^Ef|4&+{Uu)mJWuA4$j|wv)EzOQ ztK8=gsP}lMiY>x%Gn(?_MAmvLknvK7@RWaRcs@glS+6+NM`EnPeK#VA4u4WixIHeQ zOre}-a#v6r2YxP=d@N3;4gBe2*P1nWJg$Vh7XuN9ck&i z(w7^?Q2ir(TUd!-2~w^sYPbIhZoMa6j-i0dCDTvT!VBF2_>e>C0TwrKItzM{%^dT( z*5m)#_(5MGH=Ja@qhNBmAjI;;J>ixYfNP1#d*Tz4(MlLG%DWN{y-%w%GtU zG7IrFUsSx#sKl*4E1kXk$9%iEAY15-?|aO3=(oRjWn%NIfBj2B@e~Dc!s5`T@QyNY z#~){fBU2rgj%yyFl<`R7699a#3IUx8!>%+71UNv;lL4$eG0*s^?sKI(<(T#RiW32Hr%=xTW$r&}0)(l5u(1}wmGkHft zV1Sj~v@FrqBC~lj)oGUY;4mO&Uh& z5(v+-*8D5J3Y5(iPJ=Hpn(1_ZG;XDT>}sy=sl4W2Emr~OD*#VZW<8F@L3UHU30ujo z*+HswwW!XKHdFFJIoq%Zas}Hst%^-qRHlj(sBw@%JA`^fk^2}Mlx>S|*2t-xW^lF@ z9N)^f!mNlH8GuI;z*Oj>3^_9Cx|dv4I!2T4hrtTaKqBa;jR+?)IR<7|jayzF;_Dco z3T%)KluvXtg(k6R#LX2_x^?@9E^69x%Ys_3Qvb zw#b|MavN6~PpU{E=8jEKqAPLG4br4yrRY+f?ng&BC7;6`e-QTA})ph5q zBZt4=i`cn6i0#|!!SP@Hem{10x*phrepVjS0nGbc=|_7qP3TjVo6ei{dr?eGzB5P& zrx^5R#W?gYEUMEaBhUA#z0yxI)zY;!zbu+42glK37?p=~+qCouRhi9_<5$&j@eW9g zs9vB8g=2nZ(K{`vSI4X}qbTfjf^`568k0`Yx3Hv66h|_(=oD^?rR*NZ)#s zN{a)OyFG1llrQD9{e`9?cM9>9JIZXLX)6^Z-vsG}UKe5jJ8A_d^-54FtM#*2_Krc5 zwq6w(9qqwgo|A_L;YRij!&o{1d}eN8*iops-U$jH8felT#ar@k;+N-ogy&sQ>AgRA zQ6OwgAvppg`tOfPGJ8b-0J*SDydAH{0yVF!Q{XW6dIW0K&kfvFfAxAK|N9PTeuL>7 zxL^1PXh)nHQhh|1iAfm&;&ZtCS?=U{^2c!B`pfu7p`i*nu`GaRL~AXT8~8mz?NNk6 zYc2ptA8$%@=aKx=)*&~q-;zq2k5^v45dWWF`d9Jh>sM9X(jQ8)FxQp~%tV%S*c3Yx z(ZA>r@c=!qx&x>4(mm~pHtBomS$t+GS3N^dPLLo+_J>RQ3Qkk_o4D|g(IW>j*c&by`;Ru)`WxkFU8 zLa4z@Tz~-})2aSLmW^M&=W6?<_xPO2i!2NDp%b%{HqtZP*Pns3uEXR9Jofv7&l3wO z(_pt(p0nIOO-Av1{f=17KqxzVaz_cf|5uJ5i!%?N55DjJ+AFWdwX2s^b|$7~ZHut+ zb9_gS$z?s`)37@pda)O_nG{1m!g2swG>Zm&8)j{(UP@K%FX59D>ZmIbb&1`E_Mzxw z;B2_pQ@v+BPKzJ??!Y@^+1cJ|aSWhron((=+z0)=m|I|Ce_lR_WCqy~MtEw@i9d3& zUB{k8g^!ZyDl^>MhtYkv(~rTB`WFWIp+nh|ZfDo_oq7Z(gL`RLmG@vE+*UY-VG8sFaEfLIVLkF>|dc&@; zC=u>XO>zp5>Q!w^tW6N_T9UCn)%^s%ifDEFQ{8hKPqq;vO9$(ovQA8L+5&@JyD2Z~ z=J}Hy`>Iop8IoN3pXw>)oS*8B+ERIGn;g}M{E0f|o$dEdsk}mC`pkIcS9wf%$n!w@ zggo7EMmPlKq3Esh>z!pPwE_Xo&%$8_z=M`UNWWl3ck4x|S}0rU+R*pTl89b73hI+}RkG#%ul6dXT4 zF*&b0{4BA4n0}I^M8%Q3E1f(xJwvbjcF|8;0k1!crD)!vc)*T#c%gR+rj98CG6nzo zDj&4cRya%gW69(8G>syoZLegi(=CHeq(opK@jqrjnP8BXw> z+w>z^(l5wM)@LqS$~H9@ZTXv=&T``9s(j!X>#}3VS7K%PShVy!p>(t_2q8T*>_W$0 zU%F!^>tAJ_SHFzBvrQp?JC@ML3dR-iZCM8^28qwIAuIq8 znfTT|mD6+}qx+lqKlPVYK{i!e2jTiW- zU0Qz@yp*N3%i3S0OgzUPdL5gGzDYoCvR#e9aaBh0G#>qgUg<=W?Yr{P6^4_q$5)t=k#eK^dS9(O&QUd;YEj; zUujoWC7BK#rH6%u3DwU={QZCQOR;=xE;esn6S{2cr009B(88zH_9Sb@2eS;|9*6Rb z0Q#8ryP@iM+G@s!Z<1p`>FisO9%yi6V|1B(ljEOvLGhOpcS7bjf~=F$xbK3a<+(T5 zhc=!gQ1ZVYH1yw9o{|552yemXb~#2jcJ2^-6a9ze$9>9K`TkCDw4Am6#vsc!s)tNd z(%sd67>oCfz+L%&gLE6zO74oU_HmEuydUH^7I!64!3S%r=y~@t<&r)EHXWDY$Z+~lYfuhn~~iUo|bg^&6_uE ze>fp%^Uk*H%u;;lsqd1m+>4iVa z^YPK|eqa3f4}Z6OyZN|$?W%Rm*49S6_SPG*zSWCI9(gQ&&kz1c3^#AZum7um7TeqV zarS}Jar*3ow&S1q%;)39*DuJHMJ%lzi&^QorS^O*uPnvuue_-OOZQ1HZf@+v^6HAp zK8(j7c{Kk0Km2dU3opDBH*Z~a8oo^riv}w1#>Q4$zIr`B@ul?FZ};NPrg(-wC~;@u z91HDE9bb%drxxPh`H3HhlZ*55+;d-#JL{X)Bg1Y#<`?B(;Y-U$+}iHRf0&3n-L-h> zbI}nG=V$k0etI{KA6xbUiI2Sh!T89B zo`{QYU9=zZ?DMb0Ygc-~K@1Fssf%WtK0BNK|JKg7buIk~{1napvs9@dy6+kHqp^#DDYW|EKueYg_S? z-*rB|`=j3#51xM_p8nDon`Rwh`>AKz4n&qQQGhzk@e0}98b%zoC*qd?#Zj-L z*o~@F{Y={vjK+%#bI01h@u0tmi%l<4jiIi$x6I4{$b2`jC2 z{Jf@V(l&adWJHC+FX<0r+?{=muiKiWpsL_;RQx^e9fv;(Q<)Cg-nRwf@EhfqaP;{e zpfcdojW3LB7<@GEV*l#`NtX{g8{VefahdJ`hw4z^jh6*SVZ9(_2A=`l+SvAzj%UB0 zOqtERYvP)(=q|L=z7tobGZ>d-hxsq!+5SZ?ZWMB+HV(X5E{<@QTI%KCk<_vNt#010(P%*;VsKD!I_nV3io)!;sFHcl0suga_c6-N zdxfh;Ji(3(>zTd`oeGEfIpaAll2GpXv8w=PAb>o3w=9yW#*ils2)EvO^^NG~*@C4w z)VUWPfVU}sDi4PNr9)5`064AQ9bN#SG8I~bMwKKjr5uH2cyNqbWY}RBi#<)u^a2iu8roz!={usJ8R!vNecxE9c&2h7LU!gGndP zJXd*0vad37Wal(T{?D9ISr?Qe15pJtJi-AmVTG1r2y(XxY7L1{*o5?#q7WBd=7D75 zDx*5c1e?%bv=MWUHcBEryF*~qMTV*n04j?ISxT9>O{ZS(B)8UM`|Q5ydyuBnWyQ8= z@E|cAfPHlucVefr8EYHY;w^RJZd|>f4%LP_3|lcd2z3Bu{JR5nHkn+K&QS-$VEr_$ z@Ch4YCRw0`fhh*6;S0|`wpMa%oFtG{asFV((x#ZTy>P8dw*d!oLd79G+zDyg1j9Z{a5oBLfkC(ymy-;G&y z>h@fhqC@s|xVsaxbJKC`B)bn)cIn@}{Z34(yvsdCvxt7qy&ju-XmPq!Ea183CT zVL7&AVNuEY;(;OTMw%5rAyfH?Ous=sk~a9{^nlyc7&PfecvAk6Rnm^d*S#J1LvT2M z+i*~!!%+BDp~e{p$3NOWn{xMmSm>)hjbj|ie^l&@dl<`6 zP(wP(XF=ju{kk4UaQEata3qI+(+T{4)ARxl+>hIZdqAUS8oUV;%%TX6mR%uvIZDSj zxd7lU0H5lBT#rzq-$*tA|q`U5wR&B|;P`)BMfgKSHxIFXK=i_V7yb{0l*)PS_ ztLxGO3tmLTfJ#g8m;j@rRgbh~l7WR@*gKA-ueJ%66)ED}FYUr0EytGrlHpApeE3q;HBI&HTxS^tfk!ry?X>ye|_QstbHm zz`57Blv6(@+^u6ION4s|imohP&55O5Pc#!3n#eQDf8gU!IhAZ~z7;1=9*?>9yzIiF zboMFnkO9I$T)uim{fS;I%6?Gqo@i6q@nx6@o?Db%muz77`PL?Uv|cRq*^p9il*K?j zp!2gHcs37Yv+(YzMrfpZT^_w-0Y_gQ=6<&mv&xr+We76y2C~H*-R{XJ`66A};!aoo z{A5cs&BfMsH-=LO^7Uo!Jc*V&W0{ zbEnV5@nffBajEUad=H<0Fpi(#7{9h;Sn>(y@H?3J_5@wX{xTUEmRG)2Ix8D}AeqP3 zAM`s)*Nr(AIxEhB_=Vm=o_8f1^bz;n-_Q zW5V=`!A!)e64LJszWk0)lzIby3Z2CQ0LnQj9Ynh&jJ)x)X>-J7L1v|a3HxcaEN696BBLsD?EV=y~>_%@tZ!; zi@m3PArmYP;iz6FkQq>#pXW$_Ci7J(paw& z3NxAe`pddK3gTM1AwK#Hy@}mH&eQ%Ra8+5G76u0dvNMOkJdup5-!b2swQgHEwj2+f zKNsg7I1?vVk9*Rjtv(q${s6wUY&j4q@S0p^`3Ajs#kY^GyHzKiWEM@>4|tjF2fG0+ zIrwfpC*B`SF6dcrRi7OB$<-%rfrHQE=$W>>D_c`fBZG!%sdmPNpkr%mC)U?DT?c*X z<(BmD;sT#*Sx~`zl9i5ZS1xQoR;z3RO!`}H6lUEk`gvzrw18y$Az$82 zq9=8ajPl+K0Fc9y24oq3V7W{`QfO3G;9x(;&j6|x;2B#6{`w5=wEO4~2ajl~buOM( zedDM{0J{mGmHb}50UKA~K`kd&%01eVrD%GlK954uM>{O}L$|_+i@h=(lvh9R@)w?J z|EXIv+O+z8EKFn3_1V+Mq?fki_x-+~h(Gwh`sfyW+vK=}`1dR{g`&5=)M zq2+r!vUk$6+uJ)aH@h4k{;uzhS^3Uyy!=)9Rp_T-?5KaVy|oc-`9SdC;+=l{{6Bd* zW*3jgfB5^pBfjUuAMj^tu3fw4g}59K|H3OT$-kV6_kZvM@!_YwGj6{6Tzu(EpO5}< zT6SwSo_O-fxODk;{LA0`bZo45V|AIuUDL5B-}c0bW7f%6uU?gHSd2~4#_{&(lS%o% z-~AmQiSPTKkHlBM@)h~JcVzb#Scf9!kX;v28WYcIcI9ksKwqbt5SHgY2FY;N1n*i!fJ^-CAx z)@{)&0ube6c2f=Z3;J<=m@IxLGgwazZS$SL{2t<`&|n`UY>jab3RRffsh{?@if9>SF&y$Fz9szLxYX zi^9np*+#!P{d!!yay5<}TZtd~ zf$xbAf9to#?|kvg@jK6cJyw=-f#de}cC7DYKSj0f4LMtC13&ja{txky-}htDz3^7C z74(_p?{TyeK9lWs@r%?hvVUFMhky==}{RZ%D&6 zLHQ7HR9{FgjzWHZpr$L=N@q!L-8Kf9*9drzxkFc^**k&{a9GLzD84a&()vbqYhVG< zy+HIedEEz6KNmcLe3ah+zmD={43MX^*}(ncjn(7*z!{?N#%;guOMRODWR}a5P_H%r zEFivBt?x|36&N_v+KK61_Zt(ten&vcoG%0>KA5AdX@XyXERz8j|_(*WSiyhk`2 zId)k~Iat74pvHCD3Hi4L7vY}29K&7fsM=rflOJP4=oRA7=grMBS$2TG*o~pu9-9P* zG9GMR7<9H|J38v;ZN>KHR_yk+y|{N{vlF-1He@dpPdbQG%rM>X4YzZ+-}5+!UWumU zmr#50QiA%1q8;7CZ&xoF!*4H4$HR|27!N-1P@Ft-I*y+>8OzJ7F~6`RA9PMX<{UV^ zQF=>dX+VBw8feN3AG5ufXMWCdQqVj>fuGXDf_yWAA}{U>m0P#LDtpf6u2comb^_$9 zKioOI0=%c5;5VOwhDprTAV2h_)n};-0QR3^;vGR99f>rmY{2F4pUl8<^-LKWMQ4IW zjmgh+m~agW9tuQX0%56PbQGx^I&@TOhLes#B;6tS5=e}(9XH`XXoy!=XVZD{>Kxv= zvPFZ?&>3MDk|*_*mTxqhM?y+^gsOI=vcs12OQuYJ-NEDz8&(avpbz8){9MYzju0Se zNF4BrXWnNc)H^Cme5)rRRCLBAfUeqLg&@@o&*k2E>Bk_ZJ6x(S<0!&IJpk%r`AVLo z^EYS7kBNQ7oxw=UvU6p{Am0&TcMHN#I5aSzHq)YG$8!#_F%f4(!s>c9sC+bf{ega$ zw|NI7O_k2%M=B8FCltDoDJ!}xC)Mt7)q!_Pm4<G8397*HGo$>W zam<);g?EynNtK5Qf4*hu?}kn-T0u`q2pPOlB$232N{hiN{n))A%L0zXSNMuCQXaV@ znYw`27^ni0C8|4Da`7(f@lc1g=(c=U?u)E6ApTYsQ#NS|(WN@GQX_9xj46)tp#4t^ zKOG}@&9VOblxO-_v=&aNOs5pOT1DAefx4~|HLc|(dDSvkqf4C(rQE%U3A(HYg#*5* z636R0%OieEpV@b_AyZv5VXs6U;DJX8Qt^cOvu)8rTO0%qsgj}F$~fF3v%~Eup}}7A zft}+(T>+y~+za6K;TWD|61?36dbN!ShwbL5{*IQTlviyxC)aEGdi}=IAFW4L1s}Bw zwu2+%drzo7BYs^%&A*nr;cx-TKKiQE8~nX{k$~W6y}u)XL$V{>hxr1eF9?j}q?Q{U zMEjnM!5+E>m8ce{{P%|FiK+PefA62fFZ|74j#DR3#MZ_jR!*E&>tL`wVqW@$T_(F~ z=j6e$#Vm;PynrI$&w#t?X5!>8<()PMB?nHBdIdml+6R#QzzNdWb!h6X)(y|ZDE6|H zzm_Y@t2jzWdOC5$MvtO*0r0FWFmRJ6v79=2T)J$j5{b~b%nCGnW}qO9$O#c0j7VxhgPCM28E zT2kCRt{UIP3pAia#uq=uC1xN@evY5TSMQW5`p5OAXY&fjT$P= z9Xxpk!(5DqKb_^w&Nk=iGdanGZ@0e}z21IYyK^UAe)-k7di|C=*8RawT)22OE?&GC zw{G6?M81#9d^;%BT1XR6vA|LNRU;h5R|k-a9=BWyD&C`TS3Vj4__4)!=)p7buE!sZ z^JmY-$rCH_$RiKM>f(~@xjOb7S1TE{UUfT_FC*V)VNUHwKKL|t1D@fB>@z4K-m7oY zeVcVaUAtjQ_*Wy)247k&x@sVtbrzeU!m6J&8JSkRcVud0h@AN zi;FH5*Eo>xEO)IyCSt|YUA`NvF!VOMOZB6qZWAnKAa6DUFgD;%7W8|<$IcHX1?juz zLIfs=2g-+?+2k;kU*yG5qJd}XfH!=@H+<~)R17B;bl(#Hb)xpg=bg5@9ku=SxOMwxT)lQv*EM19xjc6P`m5TbW}+$eKu%P(R8FV^syriZ zgyHAkqY*Um1=4Sm@}*Tw%ZvF-Ga`L$P6B+!q^}054a#pK{zTy4!Wjm_`Hh!DGAkZE z_VB~;@cHxc;DhJlfm3JV`0>SDe*Qdkv*x2I| z{~gMoV-JNFhMGTqEpl=1^w@(6_^z3^pz-&Q!gwC{2BTmhlOgGE764ctlLph(@RPjr zdQTqTuRiVzNBLCY?dfNs5pckGtb8xPO}K%3;5|Z{d06BZyPLZU#ZUXy<8f8avGQ|Y z+a!Jtehhj7#ksdTh*?h_XL?Rq;^_a*&8zY3mp&hBS8hn>$!fGFMO6mAzL>e-y_{{`qTsQ+!sG1fA*H``|9zN@zA4> z#BcugGx5pad_neUI+mAPKCR}&vDIj`=i{>cTJLgKc{g@A^=ZN<#w^XZ z@p$36=cT8+_J!~}hSKqT0_VboOR=`q@#lHAdc(NACVv25PQG`yAL2z$fOEpZzyAk* zukxIWufOnoY~H*ne0+v7{X+9s{NI!hJ}JNd`ByK<-`);}m}b>~SZOc%vsiaFJ8|Pq zKenWU`Gi<&TE6PkZY(Pf3u52@_?h_FN53tuU$_`wd*S7H<)uq;^$z_Qc`own*ik>* z)>S?li@P~(bM4NK7a^l_r}chec3Ndq>sPhIi+$Ns%FBWhbU(IxR&|A`=2M?-5y@ox z%F6NRb@7D{;-!}_#QNYMPOK^~Z=RX9E#6k$iB+L249GR<0TzNSP$toWA1mV+$4{P& zrRjNzWk3GVAN#oZ|0}=vOYy?9SAvhLegC&V5#RgqkH)!`Gx5wb&%_tM^i|bQ*T;8r zlEZLM{;B-4T)eCDsGVH8wH81AQ$G^_?a%+^*t>aMWf%V$>*S;X{8Hsj|C`M?0RC;- z%Jk9EXS5OQ2mIkTf@&|-H>Te;=I1qIA@|cGhZ3H zb_MRo|1kgq6Qib6Kgu(jFB?m5(n`)ujJsTF$wE+lb(c})$@EH4<`Ma2{7grKB40Nw z%3ti|C?5Ed7s%oW#{AMcW~Q{>AZFU|#e{1NpXn;#{uudJK9_o|c{;Xu*3h8xG_t~b z#m_mF1T>kS#eVyeB|hUvj1`f_`N<}bQ|4WOKAL_k0Kn|9@P63aj(&&F`0m8^)~5YH zYy^HiKK<>rE&F}Ut+)@RysQVqBg)_Er9R`j$W8CLS>9&nNp>Ts#;K)c(`H zJ^rqT;}Q9sr_QKPbMjQIEFX{gg%ytzv}QO3fQ>k__HUDK%A?4Ec|sZ02eaJ3vl0$? zO#B{jDAe*VDD9(3TNt1IOR2BBfVwL2h-3XQ7MEtBddQQ6rLU!1#Q^t1SoqWzFU1cQ z0Q@zd0FVm+Bt(rvox;cn@A0k@4q* z5BG|mVd}uUBPixGpvsD26f|Ktq25`fj8VWA1l}R!;MOy%*<3RM>JP(XCkNV{j{|jO zwc4)p=V-X90NE+vP7Mu3IAL7UoddEMOv(zahH}pU%y@YK^99rI=j0oBVN3H3ILIZQ zSxfxt*a{R*u$4w_grpZf6btky!09a*7TAo}9OEG;0=#trz@+%gplnX&%Ry1NkN#J~ zU;zNakHa)ELx)~r%*86cS8gi}dDZX3X1Vm^eWAm31iKp@Hc}q0mfAqzuK>Q*IB6IN zQyjeqj~+5ib>@y_eI^|^)bL$?)n5RTKyAPBC2?3DlBxNu4y)N?nTA%bqxiM{^EvA> zA^BbLWFZ6xs=rbePpSzVPedomQMnD{^+){i2@uh0`boz-@zXNrXO)v@_+01^2ERAI z8-5Tc;c$4lma+qC@d{`v$XTWWCkpf@E+}?8)^jPn!Mv%(A>VSJc`2@fxF=ry0W}@F zekM4ol>s9;Xwy=*lX3)S^y7O~>>`?w6U&6Zk1>_3)dc_`NSx#)WNxdJFXh0-Qv_s5 z1x3!_TM}BHi@`Jyhq|O5>ib&H0+poPC7?pAY3VGPk6d%of#b2@32~-W4i837&Z})L z#?K1) zN&1UtrJa!8bCC3&IBTvzNNz$FvAH~&ZXQq zH0URGD^5%Aq)uYsj0qR&9l1e9Jg~1csTYzDG#6i00Ylo5anj+z$(NI$eD~OWBd5vV zOI5yfYWQv`gNJA`@|kNd#IX~nVqs}H=4KZx(=*fU7_duHan#jRF{yX>o~y2LQfAw8 zzS1Eh5p9)40(`5KT|*4+#{`3+>X=D4udd9;;=-&4N%?fb+(A!OwH=cfZrY+6E(fd8 z8B;)($8u2zZHv6nsVdM6@baA zXk_x6SO5Uv+t@BObR2=Ba`%n^9ph=n@s>8_c1GU5G{C!R7x9JAA=9&S(ckLE-bO!u z?USF4zw-}2A>B3^-Of-t{HcdRr!n^ zK<`M=lP-P09N>bUdaNCKmAIpNK$l=gc7+ptmAZ5MSL4=`M#zUS29wAKearRu>PnnA zdEC2P7gTPJ$>;l;07oLjOK7JYP;3~JpQKYbVXX61>=La`?8AHF@dFR(-rk>7|$A<(J;@03M&#+bp~O?+@bF%!s$_d}h)Gz_!dF@ABWWZp3@~X6`@gzUUFs zrT?N5Nw3kLz|IrG!q5G^TyV~Wp6d!5Cmwmi$jKzVDw|J+Qy86lt8((fzEOvbZXW|g zLzh|MI(~8`9z63voPYS?c+V5>iHFXgj?>3a#7cWw?+5Dh2#xHrs@sbZnRjIIAM(Ko z5qnJD?QeU53H}+!7UH)uq0iz0%bM<)Y@cO9UzNlJc`iT^ZIV;s)CDL>pZKAU*2MIj z{kM8-ymcqhv&&)-1u8#J zwuzU>F^f9kw@-}%r~b{TT~jyw*bM=&V}M1J;PV0q(KIiA4qq{MPf>R%L)0Vh(LJ%M$ysiV14}f@t9pxx7P;$dfwe69VqzP(IZq^5y|7@y!cG#4Rrb;2m*Aw-<;A4WIgA ztf;>;^f#AxwmQm1cw{)y&nz7FqFuEevrT+tu?_MD{%U8?Hy-m5xP1$-pX7D)=XPZr z@C>M_RI{FxX`l z7xPSf-sjFs7766|kmaB_5l$;~i7WFM#UUT?WP0=u;Iay;WR2q0ajKM~x&WZsT8HUJ zyZ3?)LCSERJcM3jl8i-i=N~>1CzluE=l;T9h&Ztj{k3bU1H{Amg*lf6J&jzDf9g}( zNz#J6qUV}>0k((y6Uw^=h@-kU6uy6x{_ovT@O-0iKYz8!=bhkRpN{0qzghXK{4|pl z_eo!EL<3Ddm2`JOt+TfW`>~eAN`Ix}NckJOp*OGhDuKh z*6(j#yAdzE_F62f|MamRc*=U<%~xKq-!ka##p>~6F~71Dzy7&rQN6sE+v)?2hcA{3iMlhzX~l z(PzPjv{mm(R%G8;7+~9^A9BTlNBTFM&cv7$_I*arY@Xj*--%0?*P^fJ$5uq5`h3&s zM{aF)MYsANA_o_6dNzGU@L@lYM|iumFt0LA$LZB$%1{2Ne6EkY?^Jx>kNrSw-0sHT z``f=9S1#X<^Jf;~eII^*JoVI5rsFGLel9-$rLRa9@gv%{d3s%p4}XaAV`o?pk+z9z`d_upVbcEUU7~_K~*jF-A zGWgN-ee~clb;^VMxxSSanx$W9)8t1VbAQI55(^gj|v}Q!Xde=<<~7NgjvKl@AWf` z$4{vE?gg1gzHj8BmRRq+;axs9^9-EIe84A|CF^_iQKUaP5k`Fb^3@n0%{@ka<@cl0@OClzIh7 zr?^(OB`$mbT#Wj2KDsfjTuU4V9l&A6RRQrUfu8->-R^97q*H^3Pbp9ZpUFy^yi2SY zq9$KEfP_?KqzBzhSdo>u62=KDc910zra*b{?KNa@LVs8lL&q@(UzJ*6{PO^<;<^0F zGwIQl_iBuVJe8Svx&a??c>xarjs`ByS(8l}%@l&9oJeA6$07T;UQ^hjiW#0MGmq)S7q-jvr_ZvFhOl+RCu>&~cj z&#y9GP!|MLsQhyrlg)F+;ew1-I%=Nz4!qpx?Ew=J)Eb?6+JO*>r&E)&ia@7^XNcxW zvgSmJr(oKcD;Ak_!T1jz+J+toIQrv)bHLSqbYI{3Ry4cF8J6r;uH5*31gIL!jO&>O zBU!f;`{0d z4Hc1{ZmhYk7hj1leEO5|i@*5y;~)LwzZ<{t>z|3M7hjM5)^@aK zcH`LGTpVARjfc*iiuXVIaJ=i`2jkSrYMhp#ec<%jIHRAkl#yqRueiwIt@Jt_grN)q zJGI~~gU>kU+Z_gcW$;xFu03@Oee9;nWP^`>S6SJq&++~(zO&5)43jiW$V|_~JQGxO zYNX%VOA8*L%moGPM520S3-ruU$^CxE$D*xouEm{Ox8mxJE3vV0C;Gdr9v9CPe@;#U z1O9vB6Ec_14!cU#QBtQFmNV$Z=L9%C;6P5vjOyIqkyW{<1SWDAyzH^FP37|UnN@dv zdo;)sI$HH1e1zNXSvl*fax;OSa#`9t@~dAua)bjJ=HF2uZr=DWMAZR>1S1{$0lM;^ zb}1qnoog6R2k?A2UGq#fv{ygZU#0Qr^L?Nx6LEnf`P;VEcZaVIcbfu7`N2e9*Ps5> z67cf`H^B`^5D2kdb_@aqv^hPG@jMq45@(zzwA-=#33r^a=X-#o7@!}h0Y}Pzl>hbT zAkp|v{CkJ==C65Hei^D?`9~ud5!--+%%A(vU+r%F9f~>%7e|F=tPUN_wfr3Lc0fLg z#Ux3wGx-M_!o8rN$P4t97pgjGC*5#7qAb|lJTr&U;k1*%PE60t$I9aAc=qcr#%_N< z=H{h;q%#;O>-M|SN%&hzPS#Q=Gc6{Jb3nuGPghSWxft+Ld|LSFi*%W?DAAtjY6i}|?)rH3YD1l;^lUaBwb*M#t5>hK?UPr0m(i01l3 z&$8Q?NW`5M&7x(30j$&;69>9iI0IbZvoTdTrwj4T$()SPk9so^`wnMk4Z5GxYrs!O zZwCCjXCjF-qBn5dHMv^arAJY<*Su52390WqGW`!dT>nfh@1YK zgq&ybz=9@RQ}`H?k^OzugYwR5Q(8VVrug6`4*d}p#2|a{a9aF>k9@n-MUcF)STYfS zqx=NCR#9VXryrNE--;Jrel0%x`7g$={Hx!HfA~*-B|iVfFT|xQm(>;r@e<>xaxFiCz7EHIbD{%%P%rS1oFKnU{L`)&#} z&q!wVQalq)3LflAXUJ!9yYK=I1tN#abI3bAvS5>bb=^%-?F6*s$z9M!e)>5d{!>=6 zN*TyStQ1x4QTK_5aPZ>G$q!;jxFQLg^T5ABADC|WNTg3amWu$`{ZDa-hhODWos?o% zGz|4SP+tPsgEa^8zYkbIEY9t!KP*pAZ3mfD-=eh~8@o+pM8~MqtlN|H`e0C zS1-m_zV<>q_xyA5`kODrjhnap87q8EcFa%6mygLM>H8Jwg=Nd9KjT7qlX~+X`Oj)s znDStuk^KjFAwDAS=#~l2)@1{e0=J+KNl~(a#_6E^umm}S+ynk`isjaVs7!c zXkUzj*(LAd9EyiCtwrm#ez$9x8mg|bf1Lb4F5ZZu@?aN{1vt-Q1!SB4k;fWTubkw9 z{>Kwwa$|wEE8WB=4OkSz!a4>3IldX6+MhSU9x4U=o1Jb;&xudU%L~bb+wDu~XrtH) z@Tc9?Px&Zj#z5k9x-c$h3)z`sSI_CVjS?^XW}9HqKu%>*u)32yrbum_8$J{ zy`QOg@YWwN{_KeN1$a{K%CACswk+HqvcB(?-_hs$!Kl77ZslD`M_T}maS=~fh2?AN zw1(Go_u*ed%aPw*d`^6$u(yYr|0r&y<7obXuKc$wa!Xv&!vn7RIXc(!Ytr5a-hqD0 z=)HipZ1l>z{b(;Q#>&Z)u^@kF?dHvR>($q6hgQ^PCtEBM5MJ?|9qOk~ zpNZM_vfA)eoH%_twl}uot=C@0#7WieM{8z5`HCLt;-%GgY<70y*MIx1ICbiHeEj=9 z;LZb+v3KsQx!vDd-;N6xuG{xJePSgx@7xT&3%%RN&p|)5;`Z&0c<$vlbCTU%pnUK=)*jC_Dq~TeLQa6y6JHyd^vO(`k7B1Y;`)$ z3!S^UBY%5~Nhk41e(F&E+x8BN+aeZbcjNoM`&;ANKk|Wi{k1pZ;^m9}Y}gEbn{+I= z2fG8+(`4M)=*Snn9s~J(t$9vTSXLR@vC-X*zWRwf^3i(()sXb)j(D`XIIG`6JaA$; zKK>Iw5SK4rjL&@TOYzD}>(SfihA z2?-0CvCmA}wZ-pQ@f$kck{>dwGClObVm$fn-yt_hKB44{$uPP#-XwY;*>0sSux+(E zNPA_*@T0K^u=fBqn>K5Lq|obLvEe;TLnK_e8;u$@T~mU7d~>K<1a} zfQkd4Wpl5egZQI-YnXq~4Jf8?LW1*k$l)J4=)U&v&~v>woosa*Rh;=S{x) zTaq~En*1oZ%+Noc=h0u5C&w%j4(U|%XWlr^hjZ^>EgaT3E)R%#NnK)Gr|fTa-Tm!OE8uC{35V zj-^&=^wcXkvp_?wP5G$3_j~Rq zkdBj&6^PsJeDO0OnevjK{Z?$5fCU21R~YS&vrt}s>q2`mR*xZyKaMie6 zb0^8bc@W>o3tr?X^GkY(&yQ6gI5!T~)kh<6%Svp$Z&sEtAS`I|L?D$9I8B>~L+FIj z@jweO2?h9Qf6@s^hhqU$T#Bim;&K82VNCNF2GMY*zxIXA~E8 zQ1Wk9GKn8eI?%9VK+$7L4NK1m$D|B)OHRtn44vZa6wb(~vlDoxMTafv%bh1eA=IWz zMtq6^LTC}KD{>J$aBv&fm1uy}AyTL*~>7XssXA1pLw8C3ysp%;%GC#Rb2Q;D93*|PiYuJ4t z>o4oWvcw~FJFW7q;lx$`uD3DzlLxsHtpf4uwaREdryNu{B|Xm*{swQpPsnxv7_Z7h z5{&%O1polMzz+fZ;;Oh54_vZ0g38x|mn(x_lXA#Bz{?;P150J)gkfjcp~EwLBToiW zkq5r{j~p@C!a@{W5$b~uA4mHmck_!&vAlFFP8@q!<(yS#gw@Dx)d%__ckc1Y2Ugf4 zC*EyKM~i)vICa!D#|clf!$mZyXnY!AR^?h+R>xJHqX~7Y4#fXp_o3`ep4uW!z-oO7 zhwZ}@tM3bs(7{}&aLcu1h5JK5J&!S5&wfINOFEwG$uVL?xmGzWa#H1`(TgQswL487 z_e-DcY!tshgSW!b=fmIuFxrUdPbhRZ&v~_d0vh~E2TA@jo<~hrptSXS0gJvo$wwVG z?KE+vj(9uTYd(b!4IIwDIaQ|kSL2ZOBmE!J`Q!j5?hG})XtR7&S_+*FA8P(3-=;oB z=@agqo(@3O=Lz|~*~wbs9gdSw?H{azj~Q6&?|yPIq9ao>gZqK<8;?`V zRohepgx7ebOz@i(`REItrFTO5;>N{`F)O`z@zq=L*Z#&Y#;X^u#jJE8tBY5bSG~i$ z)7|nnAKjrNt@wjq$qYnc;!bQeK?knt$KG0X?k7Z z75(#bOOA)GgDw^@F~~w?r_IKGbP^U9KO0>PWYTsl|@mRFL_d(rnbT$XtONs){B}AWmx=meXDi>xAlOq01G0R9pidZcPlu^U3Ls>7$?N$(;Fp zZg<6Zm5rnEg9Upj_dDirl;-9`u+W*+?9w;;Kvx zz-3zj;H^P+Fw^Tba${QwkCU&&_xtkfegr_j+3yP;QKTr!D{$e|0;lckZ2Y$o{U{fMrqoL2+3qjGh`lQOP`4`CS$8S(MX0Tz+_kJhM1OcxV^==bj&W zrrx7N_V)QCh4gh#eI&-Cgqr*~0SA1HD*;2bIr?2(U3Zcb<3U#e)>o2$gj>HHxXpY= zpqi`oE^s?k`{S9s5E1kTK)QNjgzHTe3!MOZm3EGuV{E75ta8IMz%#b2-j9Rh>niQ4 zI?Q8NosRwkM!j*$l0F6Gqpm*12}Y~Uv~-=5eq~qZgO8nx|M~y^KScXPTMXNa?X5NQ z44DBpwosJ2Z{;!xFscg@(0hT-B)<~{72(QN6KG#iQdYxR{q|v{72G{@ejGqawHt8 zpZoH;!V!KPg$8fUzu-ICf2i^|R*$A(jIM+u{G$AC2X8}v@-^u{T*vv&{RZVfuA9*F zHEyPdS=sI@@SdM>OcnYSN3F{G0Dl3s!F$>XdXs$8gh=P$i=ks_SK^cPm~^83al}HV#ENSMY?2KdCt$w#m(1Vim!k6x8la7%dxOH8}EMiyVQOsWLvj<3f1bV6VdBT zc>@2*_r5n+Jo)O2zY`l*FI!(f{P^QB-=2%xm#@e16H9S@{Z9PFzx9=P;PHp!&;H4u zirwwa==2U^ZSA(oIT0_uelebV;iWiz_I!N*@A-iHPFIyTrwBZ7{)8x;jEmPd;tS8b z66gwKO&f1gLk3IB2T)lSL z{Mz1HhjH>J2GQB+$7XjY?%ZDY29)*fy|}#%FImJe7n{8A%*LY+AB%G*58|i);J+1T zR!_uleEL&y<;taa;K4HzJC0n&HthM+12gjTpMB}AxOTl4b4%@56b(q&fpprPo3~VE z={;q!Ih>5;Wp<)Y#o6P_adxF0ANtO3jaygVj4yoonb_R!#ZtQ^pGH24^f(KnS}G%( zcCd^4Lq0tqpGC;)%|c&o@6Nd9Y_#*8ry(fWLyuJ6W`|)cJkPici+D2p)+yg z@v~jTjRg_pZ;Qm`l$U2Z^WK_i5!~~G4c=gD-XH0*kt0w#AcCY>wFtQ_s4)DScuFMDN z-sSTAAHkUq<*2SEaIZOoaj2inPQRA8@C5`($Ed$_AE@~^a3sIC(cfO^seWf>SNxQQ zpS3;`A1xxuOaI;rw*3O16*^T8>O|>X4*hr!YV*l5LAs>Zd5LES_Lmb}?&`=>7vO;} zAgcW5SL$?}|2b}=XXcOO%T^ht9L}rAMN^J~4?mQuc2?4jRSvumS3bS(mEPs`xIyCe zJRW&q{~SM`T?Fn$m-$*i}8<#!?-J_2gvnQo1$ImN1duL{wY8FX~t4~nwIE953}&NJM6^i zlPBVdcRdsjJajIO9Y1ATvAl93TJwzgv_)Hf0syG}F8Bo`PvPeg`Nf|Q4IYygWV|s( zq|_J)t^5Qk&T$~D#;Gx#PJd00(})3t`5F4e8_58^^8tEMv~gluzt@S*_Ljobr=pLt zJN)v*=I{LVFHP=GKDE!0Ko**+n6jW=0ip0p2s<4Ns!=Pv(&1$=3x{SJ#*5K(oQw`4 zNGnB$(&L+yD1|(JP45`6CXVnpo(vDG{ds2vP35Xl_}g%L&j65}8VNRq;2RJC?-)38 zB_!#-_kaf-pQ%ibVI!QKt$voFwL%sy(^r%;xBMqRFT!2oHiLL@#%>{&UO7Rp#0Hxz;K7CiZ&_ew)L55m9$;ZYRA za`(!>0~<5?VN+SL*wPJ7fqWdj%Y^}xE!DH~nQ6&s@W86_gVqBEIzmndnTe8l=r6q*rh!&=MD&~uq1?DWt=;y z=EQ<&>zmvqLio*u~*RA*@8HPTua5N}QgSUp!vZ%WpFy+Up zG*;Vsced*y7c}gP?upil+RUk#oH?U#H7F+imEI0wsfq*fkY{v;`AU5&4y(D$O30-g zHrTszLVF%WB@8{`32_-J22t|pvg~v~8xunu+so={>$Se*lCmlfJ_j;6FWz|)V4!!~ zv8WE^+RiPNsT1A)ow)XvIvk(yopryiOnjU=_n`0|SI1}3`iNbB2a;7LmOLTWYKzxXF*mcI zx==mIuCdq!o7)?7JXtlz2l22O(ev?W$BwP2vn@_BU^v{?PxUQZ*HSy7P1)Aa_9nWi zYjI5BtlGEbSaDMRi@vlV0g^r2^9-XrqvHqE#18t=|MNT~^Ca4R`s?DwoEk^b)VZWij(o`7L zFswl3TNA(1m;QspNuzivkK>^B`;E76-i+m$rTB%v z{ZHa&f9{j<;dd`c_dMp+#n^^krxSNJZu`jmN%>vKAZ5o6f{P!6E&$(Q=NL@yie^V9 zED(L_yVc&*`K!@p;n*WjY1`Ed$V1{CK0oPnUkPezcz~ z|9h|-Cm5hQcPh@GdmvVp7iEj^@p6GK9jTl|OudD^tohTH*@0XkWdoZf+gHj|7v|2) zW;pgQ)3OkZNp*Yz`3=*A(KpC4GRf(DL_E0IG3)fw_jDSq#}#LPf7_=T@I6VZ4%IW0 zTfJ^S<`>$ryexY$&(8OpSY;p#e{vwby}Yudbf;o=ej(bVm%X1~YTF*u%*lh}r9F9D z{r1+9aIp|TJd!UZF3P=OS8*$P9EB%(ci0Kk9R#0N=&Js&T)q)6y!gBE)n}iH&;9mq z#hb5PvM3s$`3OkE$CUUr{Kw2%6qKhB(MR>p4Grb~Gc=NLiEz#H8SW|u*Doxd0 zV<1*MbY>>L|9gH@yK9o80Z)j23JZ?F_uLyI>&M zk?7i&U`CJ~QVbe${3vn+Pxv7(`{)EBs|@C|Xn{9;H=TCFB1PUK<6eMM`aAX09lLw$ z#wAv|T$c01Z6-}cFT3rLJ5J@0$b#JENB{eq_DTcay#R)* zX!Co;xMniJ~2hUly z{{EKw4ZE&W7UKXD2Tat`ri#rqeKYdIAB`uUd{4apeNV)xW2^D_BWGm;Cp?kEq8<96 ztEe@v^znDLHY7jt!4)<`eN1vX7-bf{qMHdToEm%^za zZUNes@eNgP=pb~c+C1ZOl+O#rp{HEQE6bhZTb3WuRwpa~%Zl0>?}=M~Ji{;K&20#L zDi3rK&H{ier)Y4F_mjp8AGn>o*=`X4oNxsnDD-3g=-l<}Lp(iljye;KH z6Z$IMX!?~duStXB`liUIXfxl0&(96*qigcDmLYXtm4j^Cf`*=ozr<_uE$=J-qw-Sg z<4FEZo4g180Gc!x)JlfY%X-fClkKF^%P;kIwk_(9_lzMhZr}wZltnt)o8-hv(%_3w z4Da{{FFId9%QGJR>|aUG`?z3qlkg681<-}GGxUtSQriTmY|9)B)}-^-Z^dW+#XpbR zH}1r@y!XAay0k1iy%9IBUW$d41>u^BUHM-RK5#x}mrumbtt;{TS3l!%t25^xiIXcQ z;>MNhv2y%W?DTfxum8g@#j@i5%pd$canM`$1iah3^x{jey%o>Aa5+w?fAR1BLi9;`Iv`%7?o=?n3qJLtyA<#v4U_k5?1q`!9Us`+9Q|_}=bI z7r|rkeQRqscBMnHV^6*RO#Hqd`+>N6`AU5Ct6z0}aJtuWyKSEKRX6J!+i~^gt$6jd zE!K(0s`^dSQ;ZEwMz=qVtJiMD&bIPU-LU23{JGUwoIQw>s)wKc!#@?zzVt%;y}$Qc zG2fbq)2A4pns=Q+H}(&);hy@mZlwMh*Ow22EFg7s=9o|MA}r-I+?7u+U%JW{3kCq{ z9v@I8l)tO~>TSv_xscvnYEQ)pj_()r^lxXUD_bbvPNJi%6i4~$mwATXW!Qojl<-Nl zWsl3TXyMhDFUDps;@Hxp`pM_wcb~J5_e;O>sd)7g{yF&eB{TBf#W#HE8QCH1 zFn%y{i7lr+%rDMJ9`<8>eM8rXTQDu&MfELlh@z&CbhzR{xES@Q!{S z^19DLe{7@lr`sbw);*wExGCxk=$Wd!fX1$scz&z7FhvQim{N%7Y&r-|7 ze3RIW1N}AGU zeEr}(4VFfye4HOiSP%d{F3i`ssA+Ha*}TSr0O~{U)spmweya2id5eD2S?IPn=#Njt z(>>$9*f9DTD2@rWLHYoE(s&?!GL*mH>uy9}{nwuSz|GAq{kG%wtqrw9>1I_U`R=lb zLuEntF+R`9a_B^CB4Lgry}zfjGM*`_yupHn1WfGo3-@f=Nn`3&#TlCnVzvBgZY@8U1*S+50%M+Vl`Wrq0fB{zq zfEY~4#<(wKE#lo7REEmO1!hpm#7KxlA%aSD=%OsXN1#iisROz`YJzKVVCZyP@gk5; zj8j9!(YXVk?nNk-h{0mzIqydfFajij+2_cb%!360jv~5pl7os$fyhCA{Uo3nhpVJW z4G?Caz=_XbsEw!7N^p>_2Gsb?BViJf%r^z0rs2M(<(|9%^2_k7Fu-xkfKFA|@Bwgx zHhOrj3~A)PPh-s$pbWLX5;AY_klwsN25}VOqZ%aP;Iv~onF|0A^w#u2%*cYz$a%sv z@eQA;7LFnUrabx1&_#@#)e5_EAx5JQmDteiCw_HFlSnM~h&t&!pPq7TdBM(`B+h)yvT=F#Q-~;YaI}9xd8Z_ z?-DAW%D-|g;rCtuP(UApkT0%EXSN7m)=#0M(2GNd{CPq^ zuZhD^l??Ds%*k1roG%LiSZz5Ybh^q3L*68Wi9<%qM-myo@<;C!esfJel=8UTqK&w` za~ys$V7ykmY=e?p)j>fq0t_P&RezL=`e3yrZIMAufB_YN0e+4oooX$q4V@9~$Mstl zW^|V1RYnt9U^#<N+M~_~jsM8mG*AC>_Rxjbgiu-ti_dU-_rF zIOyJu?nOgFuQt3p*op0qjOzAH)$u^I4PVIMMrY^TuYp@z(9Qe0evn-k6A;&P1$N(GX>FmGu){9K zGVsO9+g;tyD0~Lpr7J~474x%15(bdQmkg-V4WPqRtG>>)_M^Qp8LP|l@xYnYc<}sE zJaqPWJbd=NI<@G=39l?<@sryq`eJ%2dg_RJH;!b@@;cK>xp!OO`$eh`<;9|u)s-`G z^2AxmF30?fRyj!R)~fDB!_=HQRqE_PjS4g&S(%)jQF&OAtj@-6CuZkVPm(>pheX+r zA6tl(6}1B<|A#&Cf^Q`;;4B`h-l_Zj*fDRtf*a>na!$DIjVME{9-81UdX_rzuW{(_ z2DhIkeuX2?j-N8C0?VSjzB|4iSPC(K9-KfUGPqH8KIgYE@{de)xW{{D6E#Z$+yPKXX-KWV*`j^^UwV1#Ciqd9$f&C z15F9mTY@^#e=mOXZiJ(jJHsjG_yT~YJWaepSGiWZ^iB%^Qg`!!LJx^FNU#rh5iW^GKxpYpQ}eY9Ntpy&PFHZ zr|04?{k6XtpZKNE#3PS98S`_}1H*~9qmIn>)|T4&bj(Zdq~o5QstV~JD_lY8BcR>z zhp=f((AUX1e6f5c4bNP8uD~q=4M6&TBUx}f*?;U_jCzTdjQQkYRhUjV_mLAKBK4EKj(&uJ{4z zm)f>fQsHx9Pn{q`$FY-~6AZwUowdYG8}!75!l54?&&@8B@=keTnDJ0-&eU|;BTlN} zy}w;8J~E)SxUw9p$4(fh`Atnre$>Z``6W&TScp?69|%4-GgO{4vX6N@I*Y{AZn3qZ zk#z0|0%ZXpi{_CP@Rk^oH+)PKs`x z#DP_N1co6BpdBG9T(I(<3LO&cQV7KQaT<({_** z24w;K!w%oC)1y!9;lu#7C1j079(r(HkrjG^L1cc&Epo*7-i1+&n}{WK@(+eP{^So6 zP~=aa6CE%ZsGiACMN<(~&-3xIANcP0u^;-Oc<*~2jn$>qSeS1qZeRQmzgTHMkd0Q^ z?GuOw7GO+Ii53YPivVErK8p!yZ~o!estkHzpHnndUl4w|PnxU{Gn80qCdz_q_{McG z+(_R~>B!q2mSlzUvj6~pft^d$N9EHEOsMe*435y<+Fnz-Y1bz_%v}|`IcP#x(WS-WqU1f$)ltyXr?v62#AT?CcB`cPe(o>s|6y znfQkP*S`L8ymjGH+`hAE*^gO16U7n!xl_#(Dk>ujH_&m=TNmMaQkL>e3LgWwRE%VW zlcZd)LZ{>sF_lko8Dyp}ZCO-#S#Xl=u++VHHQUZbxzxRPt?RAZ1^MYce9b(?dxV*G zR`eaep>U;n2^s>y#2emqK~2*W6G;JajE~&@oM`QA(y%K91QSMl9D|H@bT zjfNxn*YT7lKZU!#zv=w%0}b8^l^=Qqjg5i2AJlvR>R5sJx8PpD=2t2_t*}<7c&g@K4d- zY-ZuP(#sWkiI=#5EKk)_*kbIJ9YN^<;#qIV_HgfA{JK{JhezY_DO9*&>-q4&om zpH-1gV$y<>EH7NS7B9SfEml@f#ZP?v`{U-78}a5FmttpQGv4?9N8{GUT0Hym)wq1K z8-3ZQ6Dtd{ArtY$!w<%p6D#rcmtKywoek@>h2@3l$eu_O#<^3+;zvIAeX+g09XD@Y zw_k)7?RIw6PutEDhD68B8|!gneOtC{BDzET33UI2@W{`Qy?*TBGx6{JBNym-yfpMIo33W&XGP1dc)$r{oE9exDmN$KEG;ZrKFF`X z-}ho^cANscyZH37@)9ZAT9HIk=9Hf~A!*GlId2xWvLK>0Gw^Z$CuIB2o;oeRbTM9k z^{x2bmoCQIwrExVc7ADEzEt*as8>!Wo>D&$dO3Ll{@FgM+_UOacR0O4witWIB8*@C z&3_q-Pkto&Z#^UbTj>>t`r*`o{R2*c!}lx-KbZphx%G|j0KOQp3qtC9K536X%#eV*`_%k>Zkf0#HAp2f!ZkLQ(LUg>cu9%QF_Ce{xXoCWYD+? zCZ006*8+fM{>V`7D;*`@CSK9E*_Q;WJ*)Y0C7$RczB303zQ+DRmvB_OFb;)R$#>xc zkHT_(0NKzBd@n}>U&-50`35MQc~9D61C<`#ajWu}#JUl}!Xh9qcv>r_@S(9|&_g=U zpXnXtGTkLT<0_uO*N=KAH0Gzlg|3n*UzqZET#&Gmk7*vq*T_ZbYe;ULZ^g%Z>=1sZ z39EgQ#OG2Q--zGXu>FIcPl97?k$DZ;oXlHNGl1Wln(nIbP;tUjUm<|4lN49m9v_-v#5l(4?>fLwtx6BklMLzrR5yLA0_Znf^H<;JBm{UAhM!uedg^ zok!+v!bz%oKUc^C=|4QtJ$N#r^3%HSUW8YVYu%>n6(={Ds8db(k+7q|&h%uxSbqddS84p@{q@n7fKmJyu3@y8;NIyBF zGWuD#SOLU)D|JZLA7sJ?A@QSRj1q({5_IIR-dm{^8Xdz5S25MRlJ%9>Bspe1g4*Y@J*OPpnz+=2g-ddSb5F`0N~=8 z>-djLy((XewDNJ7ew4|Eop2!eXk3cZ=nu!Q?Tk7jT+*WA^8HCCQoSi4 ziB95BOxL4bMrf2Ht@_-HS=q(5I#$PGa_)@qEU1>)eWg?r3>s%UNfn{ga*CW+F$69K zEYb};^UAm~`S0UwU49&Uc5?A;L=-$bPBMKC%!wXoQCh-xWdxY8nwDXkVb`4ay|H#B z4m^vXl`T2PMxi81e{(78QT8u|e^WFZFu{_s`^JnCw>l$+8 zzxvFL_~n1~T72S@H{-Kk-HhLTYd0?3o{Y8rfsZjuO^H^g(=pU9>z?wZ1P~{II)w88 zO`f`;{#gKEIVl|sL`Ow-CxZcDKHy^4Nw2vg9pK@|vp8!D)svpz>K(+*jooV?a z;;s0?(--5*zx{k{Y~1o-)Vm&gEKZ*|9TNe2xH}w?w<}%?H{aD`5PWvFEd!l9CfSYD>#O5} zTq-`gj)en0-kX6m2?G;)n^|}0t{qSS{$!o>i&{%#oJ|#qujyO*Z+=K4B ze?Ur2g09Yn=aLTmca@QMH4FhIiuesZhOCKv%D+{%Q-=s@9v)D-N4^H7avAxRpr*Vv zf7$7pJOeU*1>sD)Cpj4lFH+<8cfol21~Zu;L%L68mB0JcpM4`;Q#UJww7xCn*E^FX zu)OP~J0biRP8@abUQrlV{`n*y#mn$aYyRDXZ+;}a{FH(KB_A(0k;ckWpG}Wr>7Mue z3aI_0hN0fGeD#R*J+e8FTQm#*4Edg^QrILtxLR|7*(C3 zt7QWxWSNRT9O97!w;VirJ-8F~w(Y z6rbJfu$da<`=|$$QRxYfnMKjVB>J>hptEq{wmP?b%W{2fGv0jtQv9=j_KEnp|M7p0 zzxr4ITKxJaKN&B+_@a*`ryrDpfMoDPex|%Ln{OfGlk2bfVmm1!u>bU{=!euF6}Tt= zq0i*hE|aGNqvhMm_zDyF3i{DOV;vlZ6A9XvJ?E%P-cO512a127PIOCk*P6_*$$i;1 zA)6TVV@kftq&k|DEH027&nVuU>S9*)Kd+AJ>fCHB&PqN+`+}b5L^H?glYUn9GOac- zBU&d)`Uxi2MJuOl?Dab;->!bL#j=6hJ&pz!P0ADd34iGDVyoFLJ~zJ*r_Y{Mr+(G- zj~wtl(*0gX9bn;A{Tx4WJf`QVXSEp>oa5<7gQ>zOmT&J2W;nJA8mp5tMuZHPwfz&Qns<_l4}S>#%be3Q)J|tH0j%tFQ24H-7{2u z4t96kcFB`?){a%(N#CH`a-od_ln?Dha#ZwIp2(MN%#(_4vvra>^GAL~mD`*rHuaCi zI>PM1(`trAKl?p zY<72})n1nPPQ@Ed2z~=Xq{1=@oi+<;tX{mx)?B--xu?pp4ptr>WKx`25w*y|ua#x-byoIUe3X3}sQ#I7$DrehPlVcUbj8!1*xb4l zufF+WeDNz^im!e7xme%Xkj>;XIZM9ID6f`mh!Vv@yRG8%tuN7b>l^EqFVq_Ch&W78 zvxppFwU4VGdeHv1W!_w|o)}6dId#qpzZ6H2L1ftjS!qhr19`*AELrAk97wkt1+p$# z#a^SP;i6QTCk`L$R7Te z?&%lvY<@^z5a@5js%$**OfK#N2run0Z9?jYoE%gH3Jn$IS|<%lmb$v+0#ElEDD=v26z+)F2sqWKI`m#6DyIWhE(c9UQO{spsq}sKWl>F{?bp1{J z{kV4XPW1L?o zV?LPZv!D9y*xA~Tb7xyBUn@Gju4vfx1h@5~GG_rYfWL+$umI8h9{oet?}?`@T*p`R z2{XhJ4cL1YI^qM(x93$({H%fOb4T^JAMMY$eV;cp`*evNG%b&~q=p5l?^pT5QR!pJLOY5K;~% z;O1qor^J_RgYZg787ojm@Ymn=7B2jHHj9xzr+5C~$37Zop7>B4Y~E0O@tr=QXvQ}^ zm|z4k`Q!FOStO^_J#vXH&y&ZpMd1sruicJc{TKf{e(2*r8gnbBq9+;0&!GR#LRPWd zZ4g}iZ2z#k`d7zbys*-D*`BjD?geA-bn*BLZBlqC%hB-{`0Q~06Q=_A)-fZ0pn}_Y zN#FdM=L+?{;w;a$e_Z(k{&ICWQ=j;`B=b&bUcp%1RQ+k1dBG(klwU$&o1h@7w#D-g zQrzG(+b+`_+#-@X&~eok6JpY`TI#kD8n`@5mWt9evH%8yIEx80{b z>Vn`lkngLkB-Hy--~33|6jVP)Q9AvSB@WPq=knfikYSlWJ{9#ukp38_uC5OwNl1i( za^1=8f$ zIc-P@ol&``y_bIu=i)zE(xxpod{9hfZvHg9O{n(0Li0El>=gc!I4BxrHP5(8&RNXG zyaPM`_tY1(4M9Lsa4>TxRT@emlaVfIlFft^d z;9{VU8W6w+?;30T+bkfA_bz%BlJu!og$HY)j`IFq9V{RYRl`IJF=RN^`9d+7==7SBk>R)hk^N;(2&keUx zY!!c|16+RyzKKx%!Dk5~egZr2>oKx?27nzItk#>7!Q<#XR`SuO_DX-f=g3wJoKFP6 zd5*b)UnqU8PG;tB88BU0f8<}wWd5k5mr(e(U*$~iRDay(bB5)L-TaAVf+COi0Ty_8 z=VBR1vuq1r){QV4-w3Qg?ge9ww4t9ep%5&Kk~4}US@jh}$`AWO{y36`vSNzVwQNa? za2l`5Rnl7yNb4(q1U6XUlsqb^`G!s5pYwEr5mdP{RD*3EO8(UTfbov$2+}#ptW{R? z27?XV%7;gGbesN~XVPJPfj(#;8B6;Erl3Pb@sa+L8I)@M3;;W$r(1K9vsuX*tFRD4 z-s7B#W;yC8XGZ)F+)05axifF!2AIK_g!jMG65U0mw?vB01;tyoZ6uZYzP>L}fbuYB#bc=5GvT(~t8o03g+ zhoY_WdB=$8XK*k%BweCknnzY7@A~sFtV~RKN>jlnGTCip{)v9{uKG-V$G99n%0tmo zhbq3(6UMU#-C$d?BJ0WZqClRZMD@e2M*9n@3iM~-4pB%m5c&(4tri%h)t7Qt4`YK6KA6}x8R8% zjw4lNEB->vw%Y2Tt|-mCa4|{1v2r=Ux_2L8g^+K6ii2Es3se;1^zk3o@3oJHSDW^IF; zcJoefpLhp!+IUyqAJR`i=BpQZY4oScLKOIMFDUem(jUbKM#73-Y{JX6roXFv^*MQ~ z3N%maQT^lb3!6p{g3-6`EB+(-H*_DZn3U&H{6y@dt;gW7&P}lW!C5;3**Qx*wN2h1 z@C$|KDE?X>O+k-dE7bDU`U4MjDA_g6TE1GY@+`2`eW z=~v<}{-wXF=axDdtKK<|KQJ70)rL79-<)s`;G^kbf6%8cha!-N-|6QGMuoBBnIm#} z2M%N5Ubw)Kx-{Rr{v0pk;0tElJOQWLf9nm!sS^$0&t0J8DSbVVK523Mp>!t$^C(q8 z`ij%ho}CLO*5?=Jqec;K$SJCo%61L`;M`Q{}&p}BJOC7-n7NJ&n5A&eE;Ob9WtvcM7l3vJ`) zvqT~{&OLlCmX=S*oGYHnge~EiXO30pO8d-3S<>j@)j+nH-J#o^VQg(}M8Bhsm*RD| zcH`AIFU7BX;uGwYt14nGlauw{)NxWJLx@+fq9B7bLSDq!S%;ibHi%9(Ex??>O|89|N1) zonGwdx3d{%g`cG;7E)0HpQSN>h$-R^Z_3R#TBSljCQ1b}W={F$25^&p;o z_LcbR^RL9M+v{p8$coy!d@;(+=dd_=$=|qDU&6@J2}C`yyRE1cA!&6xxm2?TqLYyWkOe8J*J?uXtY<`sPU}=-p~3 zlh4Q^IFN1bkx6*N4o1oZPU>JFd1P@fpMI#H0zhZ5yA>Itx|}}?=#(!%rNW|NPJY zx#+8Z%A}%CE)kzOfrK(3-|pj37Ov#Mi8{!S;#WQ-z>lOMq4Exn2vmNL$&~Um{@_^p z!%)+uzE*;ADfKsoH(@+)pz`O4|IUI#KURE)-;dzl`QJVM^UdXNf8(2h27dxt>z<%^ z?khHMv^;pj@+@4cSK#|PcbSi5m6b~f+C#?_1Q`U@}F@8kQ) z9H~tHx2|5b4VrDwM`x>J{j%DgliHClDnIeo`nGg;KhB>6HZ{`2>2iU!H>jKA9{cM?z7K(l7vNs zz&giG%ctmeWhZ0{FJHeAo7+P#>fKTp{>5;J|Iat)_u|2`r{ib;i~lIT{@n9%@vYYi zj!v{?|Cy-5j~%L>o_^+)SnoufJhkHSn;F^9srEv=e&K3dU003Ef#}IkSy`QrW%VbP z7pLO8zw3*tHThrORt zJ-I)tVB`r~he!~=)sp3vEkwQWsibXo-6CodrG-V=7|9mLVk5tsDqEjL90I=gkB!LF z1n`rPFNvwdmh7kXM^dVIvyx;5?2(Vk(QbP(%41pbKN%Ofb$c_euPKcFlkz1FGCeoP zVo3QMYAIgasPy~0J-&ngUIoxu=wZ=c-cV~Ep#2;Upp zn)1=sb*V4X_zOsv@@cqN{h9WXAL;Ek^^>z>{4)wYBSY0UAK@F-pW%r8^IQ(Mem|nY z^Ln7F@*}+X4lIgp;<=B?<~I4!1KA~A`}9}3>c_CTZgbtIBW_4PuSt>idur?S4}}SP z!)Yn|LTMeRTQLQn2Rb&d!!>H>a0~xBVR3@MrgW{Vz{!f9V&#Br2ZTuY?p~2C*mq zsVJ^8R{2f>QW}9m^)EZ1sVoIWK8mm5LZ=4g!^u8`$DEM-aAZc;BCVlb*sQ3UJJqDBO#C8qmYMxrL(CCz}m81{?=DG55dB>qT ze5B)D;k)nkbHf8h=`w!h$0!{JnTdTS<)&4q9F@vEIRk(QK7$kt>~Lkm-^b0)WFzoB z1Gi=CR&`Lx%7Yj#i>|t4!&f5nL2(fhJ)15Jh~kWB)O#zPEEjcRP};-gNS=>Y7TpJ= z5ZDRp)%Xx|gr5~BK(A$mN&@%pXc_thYYJ}@KKCSOlY+tC#n;M&xe=r&#kaLd3F zibd3xd`B__ID2R-3bc85v;(T^Aw?o^T*`h3QU;m@0HyvYM?%UUkmX58g;)6@8iRou z-NRqXBbfqnaC0nS1>?u`E3MvpfI+ytvrjUIlkVMga<&;vVzPljx(Uf7ldL`(6h~I~ z-Z9E9AgMoSAWzF;DT2#gbXCKjA}@scJe2{_M-Bfh!dwydgb+vwSGMJ%*FW1GneAEn-Sfnz2(%=j0a*wA3V|( zR;srAucha_k7!NHNJ-SD{LO4}LmdZH>tK*|hVw2B==RC?dDwUZ%V%7#U?_HkoK<#K zsoPnoaw~8&J-TlqVo)8ddWN685=!6K63i;Wob1u2R3Kl;Jqk~~YbE}`PkubU^&{`` zZq{{HvzLL@-F`RL*Vm$_PVuQzr($}pt>Ii8(gQb&jr z0~iKhkSs66bo;p8@eODO^Cc}zyfDcln}@S}u-A{aIv*!i+p)Sbr*yJldmW__FLu>= zQO9*kWyn&>rX4WYDh;dr8PKG}OiluMq4dZa_9*l6Mc__j)qmdrqz~6TLv-7r)c<)k&hQs(9T20FlddjnpV1}IUt-RpUJU- zeDo1A$iyRd%)1KJ-ZyV=#-KBZS6;dj|NZ~%e~pE?W3ju-gyf2Gce{PHWft}1q?mnU z;X}_cxRE=WQ{Puwc$YgGXFNbQ9Hw7QI_$D-1K2QAdf_7<6^0GPzcZNk8JCKOUA7yP zNA}3JyXaMP2rHoRi^&r@&{v-9k^_XFox71he zVwRmz*|0MQOyH8*EPKYjc^#c}U z8$Ea^{wcd;Ir2l%;EDK&kTHQxOS0TaZ+pU$BLJyK(ju2efV@#3vgIl>JHB`2+iYxh zJaEi_FrC`V*RIEl;_0ieyycTN7_{A#pVL>nY0XcmuI8g7{|H>2p5$6`xwy>de;$t3 z++5taz2(7P{I}V;wkJtxk1hF3?*E97{eI8i$scq(9x(K`iS7YE_R$Z-|MnMuE*|>O zcf~~KraIc2v3dPc?A*NRfdwxrmwo_paychKTQdu4tF%Y)hIS$yfK(#`v$WL?lk zIc+G@WV@=*`1^`4-VB-CReK>lyL7<|AfF6+l8#;5L&<-|LuYbEa>GJjFKm?$!|6tN zlk7^^Rvq>_UCGv5EUo16{F^&Fac6VGzEzh2qe<+m#zW^H>AJ00<6YA)6~y?FkG z*W*jS^YwWB!WB^hKbG{HR~iO&_zV|)+FS&&=Tkj6a(-G~`hG6pQayObzvR;GN8$L- zQ_?T&!e_D0fC(4R`4m)CYq-WRxi4VX+8h=L@ zT_TzC;-?B6tFL_L7nq1y^^v=ZTx1B|4V{0tv(tPwgZ9C(z5Dx|5MU(!z2}Md#Zymx zB%XZau{e9~louT_5j7{JwXt@~C(-cv4(hEgcHna<$l$c_;ukY10xnM=3*3Ih=lrw{ zvJOvju}l5tJ}gdtGR7hOIXSCxBCo0++N9F-SwOSHNnD~;{vU&EECQfxd=keC07M6E zRnf&L{Be$ipR@8`lip&tP@ z#PMpN8=2xjm(qy0Kq0e0d2m{wUXMUC0a4({2A1Yp^0*6*`tS6jo%K8AU+__}fL|-k zH<*8|lX3ji$tc}B!F~9l^bKyH=G)|xka-El=yI~V;vc2+2$b=!2ENhsO}^uTBKU?E za1?0zc?Hz}GklZuOXSUygQ?=bH+mu4P|}gKz+a29;xwO7m`eQckof+Uti97u`b;&W zG%Uay%KppNNvGrMw$wjiT(#5P_6876M0)hG$E~Y!u@?RTp9qrgs&rmRCf|sYqxQO+ zaqaTu*j3v8#zx$_dfoQqY{;8W*iZzp0=J}5iPZ*OdRk>}bvi>`)o{@humdnDd` z>2=e3^2~C4{pHu=l{aNeQ&jI1)l(L_0ER%8;jI_ z?1#Q99zAn1-gx5;>6V@tIpgwfNT>4|0DQ(ycQ-Cvx-MHGU#-316A3oA_M$(WQQu%% zzQZuSG-YR{9N3)enI}H_}G(Ol8><~JF~Stj2GXy9M^AlVnX(d&k!uh zw+Cbm3_`u}8#TiO8RYjZWI_D_#v}WZn=QTX57d{KRbKAL$tFnVFtcnZOnJobR9{#Gg996i&7F~7 zM>sq4@v%>xUX~wzN4^MlJ7TBL0%1NK$6|8fmz>Yfar%+UMxRf3Ic*19$}!zj6N|=+ z{kHv?mLH+M*W%Kg%F&7St%LZZf8wX&&;RAW62nUuL??Xdco8)gFeWx_SCCaA$Ky{v8Gqv+{aWnax+;1kqqo)+c_7}GHV!?~ zn=BZ_>|w4LKWA|QrySBZ&;EAib6>z-SG_+5IWJbji@rIkhmL~B{l?!Fee-td$)7$p za8&K2UQ z_1w|D_(r=veFG2809tyd9W#cgIV+}Zsmh{YFkwBHjtR`-ww&^0sVe%Dt}8Ea3IH^mPf6Ohawv( z|FmV7ouB+`p9Elt>zWNQGn-e+jde`Ie@WYS9gAbgG6DLl@e7ha(g*lHAFqTJq zrH|sx#IVst!?zX=Y%<2Yfl|&fKQ}=Mk$Lss9iRHMTnHKEF7?>dd*LHMSmn7-M&sH|Q9p#CQMorP^Z&sX&HoMSGWSlro`P4D-od@V6IW0tf%%4`!wZ{?O1#Nq;&>-oiKp_qHdV%2=@lnH2aWgn zEsxBjrZ>>RP#xr8Pyq{C;^AEdjPf+!@<)Iahe>@Lhn5F#WejH++>wJJL+FzLlqG$8 z_2bl*m{?NM$vM|23Ksm342iXlYZ(B`P>F{kx1wVQxu71A!9odNi~Q6D5bB~ER~;m@ z99h=Dsc^?iNIEjaIP|kI7E4qE4zDmyA5is{$pJSPz4%mSlt zm?RubaKu6hOSX z0Dv9Xxw|zT4xbcajHT?A2ILBv9Fu=V$Y;l?B3l81}kxeUse7adDnaMgnX@6B`&8vg+%h|iD@a1;ohV=9q22L;>->ad|+o9N2k$&Vv@;x6TiG5LO=L} zAEQvQ1pZ}Mwj=S;6((Axv;9pVlr68sKWqxG38DtzH@lix9nGRMR+qDyHy5Sz(ZD(B+3$}kag3yXT56raRr(ZD1>ivgy^yFqV9>05DJ9q3~#3*xVc-ltQc zIQw#xm}ybR!_M`A+N1P1D~rwVBG=gSk<&=YiyY&CaUE-`$Xh~*e+Uv^1xsv3s6W|GRom%ic6N-1P-i+7YxDo&TU-)0+u_xXiosFI+ky|s; z0St~yS4wlc?3s`9*FAcaK@HjxdY=gy>pkg0d^E1^;Lx!VpzeX{ixQVBKTiZrrJsiW zQ2pOp=E`^bW~L|04nA}nI*tk9Hs1$Trk>m?={Vxi$N7r)t%X_p(zysA^I$-Q z3C#2tbrs({V5Dcu7yScvO5h6=+e*H!2mDPmHagybtgsXXmnHAKnOk0ziJ)yr9`M0M1FOfG z@DXw9SRZ(hm(vZ95k4tEIZ`HBh=!NIK%e-aFv>@{C=awyrN}qCerZ3@t03_>W#5xU zqE}B!pSy7;RX5%-P5sEevJ-h~YAO1i-Pq`C#>FewS@_J7 zs1#1u#gN52L)CLcYcAsW%6$CHfAELnPyeSsBc9L5zuJt+{-)S?;C2ElS-9AfJ?{^8 z)FxFo@)_}CYX6D;7SEP9vzpTkvJJ9O2tk9t{OF_2&CZ*r9NEfb8R>X-`-R^$P35z% z`XUW^)HVSOhFiWeFbt1*uVlLNBVXhu`_$AeCjd}CG#&KU?k)qlqC-4H7KzWmJar4O zP|_#s$+z;tT^21UFY6zbiv<9DI)Oa#`TA-z{T_Oc&!{Zg=il1t$7Vmj!MoX!H`-E} z#m^hJHhk*C?X7;?zP%Hl`s^3tl~*o{&B|wTA(mE7Mb~vD-N(SUoH{;1!tO#AcO3LO zqKgv-M4#Ffx&z*DeDI?DQTKf+axN!AC zymk3XT)cEOZr;A-U4b37eeyz9|NVrg+s^(j3mG2^t7 zSt&?%zwq7Y&eo=Vbbx-P$8T2k`04-TXQF@ex^&94?CFMa z(|__}36(dGyvK%69N|+xY6Qu-yrm84N%>YVUo4ZOuuMa|j88hpLk^0p9D+LDQ=$6r z;ulck(+A8O;jn*N0KLwo^0D?GDl~LwIS&JsoNdC{5Y-bP+1|6P`M$=h(D0$)Z}PA4 z-w_Ib?t-^ZPuVSB6^c%1pq59{nB_f8XN89Td!%=|hHoX^k@O>e9O3EzzZL)-frbZT z^t;({k{|ZHmII%M$~7JO6&+8|1n~r}JLTta#R0d2io6`q{$zu2Irtos7l`glzez{U z%(1vL{lo6|R@}UFG1hM1jQ76(18%n+>13|>dBhh z>ffxdZ_2jJ%8xwjiFNGe-f$`|U%DorYe&A(toSq$$7Ey9KXfKuedUe#-B+*1hra!N z@(+|pYhN_%#EWm&d9hPxPREBn@WHtF@@pO^T0OlO z&%gX;ymnznMntwij@ZiLY`pjJhuuec?e#a~mh{Qlb0=dc|LLvkcVeiv`S95j@o)e5 z55%IXJ!X)ylQKEUvy2x@ssUXnLCJ| z{Mb|RNB;2djZggQC*!r(UNX-(*`w3V#Z{NCtjBY&TvL_Gr&aEGN`|c-e~)SYWxhyx?82)R9%E`vmo58sta1Te>~{ zvq|A+T*@LLS(E>*Fnmf`M32q3^vp>{{j&v9Nu2EkHWM0Tp+uL0^@^|=`P)1)-Usa$uilV>M^0^;M5u5nuNh8$ zm0VBCZ}j|w>^LVdc_FRru0Ka4`J9%YhmAot&4U)csTHeEw-qHZqTL{rdHoI*{K-d!)VU4?JQ40A=#&T(Tc5>gH4d`Ze&(|f?-lO*0b%~Abb;fVds1r+2{1C%ecjO77Pj|SnbWl5UHcvO>A zULFF`Bb=_nL#S)H$I_5@eQxMbys@x`FOWr?Vz-(FUIp%^Df6rTMvZ_!YN+#0g?~;( zUWF%3x=JGp0Bnd7ul*O4k#r@PNA;KW()bOvTxA1S!{2Iu!6~TGE1r5{4OQSxCt?8a zP)**PV_#kEMQvuk+jakNUwUaEyTbwiPV8G>U-#mkO%?)d=K|YZrQ;L;{Oi8jGM}E5 zg>;`n&0B5FjFt@`w))`Gz2wdb0_PukARd3@@p$B+^TEc)<)y`FFD!VBzulHSb06S7 z?XR+@ff`WwFMcM^Du1av)xVzzlRTDNSl95oGEQK=OJ6lX)EhcR7xRw16b3IJJIQ}0O0e#@Fh|3lqa8ok_$&cI%8K>aYD)u;K&w| zOirnyD1q|MciC~lNN^=B#tH#JF$2a&XGp!b{b33a1EI*Ck5IR>pNa(rX0mjgS&0ug z3?Y;Q$K)yoDtD*Ru|lCs07`zQ%bn`H=ZC;?P1+Ecq|Z!MVf?HzVN?kBstP?DH~7SK z!kIuLZUw>;Z@$mLY2u~e!#CdZlZjNA%5UC_5LIHGD0YLUk}*`GJ@Zr$vuc960H8Tq zUW7Y_h^!8xZWOHdwQk8*7YvjKTJQj@ZPMiX(jjJmi2CKqV6nWr;4=WM zM4pu3F$jPl`ZE-?8794vhv_BJM?2EJ@Ocurl;7p5bY*1}`uORYde@H$Q^He!ByE%$ zK7s?sz4BWvMAyX4;)6ZaA2P%@ep_=VVzPBYaoUz!(@A}x91FNTCc{7?v<#v_2YQb7 zQR1Ori|%3P7o~8!wcL}m7*5ecnK5w49D3C4U*%%;7af}$Yu92$9gE-n>SyB<|MVZk z&h0m&JOlk$DjD)e;^)tSRG1rPIeTRohq&NRBUf3DVXH-p#lKHk|spVCN^Xg=}xYDyQh$EWkXIZh>iP_dfv>$`a5*^qO0D0N_y$)b3B_0M^?nL;t^*!(OyUzgLz0)2A;i>~rwLFLN2i*}SG(cP9Dv7^GlzcZ1_oM4@hw6TiG60#C zKKM$;RZ3w#ka(hI*yx?f0KPF;9d1!uA_+e1W!tH)0_ZML7}3B zeA8C(n0^`1EC3Mry`Ix|)U*62j{oz{Np(avx4SVnIUT?73;!^F!`K9BrXL8v+ zH!0ss@vYbuU;HJHDhAeX!p}~G>T}{(Va`~rgkBs<8FG{v^39|QzSof6j-cu|?7+;d z-eI?d#{)L9tv4p&~EsL#t*8BZ!XHu1PXFqPPtv&u`?90Ex<^3O zDUMz*VtYsK!_XZ@rwSg=Ms_ex4BC{h27B{sf~Um9Y3d2JlaJb6AugU zjqnTP^U1Fx%w@tSW-w0gIfXzGR0G4{Q#$Z%QuR0``e)}BVrBJwtgf7h){^RwT~)M; zoxXSWfAwo$i{JXv=i;|M^ZB@WYt7$S?Wu@}!>)X@38kN&JLbhW6Z121M)ke3wH-Ha z-i(cod=Pb*;U^0%plfC>PqtWD6abY#YQIhL8*bdV;nnm<;rl-Fc>MhT^1sA;KKPh2 z+mFrLSEIAJ7E24NZ{cBZ-ZPh~OpY*RrM_Cf{WyFm%deQnUuE{h1y)KxR5FhYGB8^3lXU24Oiin28RKfG0i!H!LWT2)T{#a#XqE zA1LolCe~$lwp4lB3`ox{#XxQ1+HJKrrTOY}FT|&Q>+^AIoqU10ICkos@>PAwujC|% zUbief6DZ1Tt%e5pl=0t0|wHN13orn*A_*>)gcRvk#)T{J6;q1h;uP%&}Y6IvJs*~MYs#)<% za)^$CZ1*!p^?e8whx+ra_)mQ}9{#Mt>~QAy9yv%KHilP+Igin;cx zwJY(;TNmT`7hj3jUwgxYZ&$9}@+1lQ z@X4#4;l7XEMTg^8+wT$|4(PkpGO1ckm+F5`Jjd^+jqLY#y}*Dz9lC=>A#Nx3#M7o3 zCR*{t6OY9Q-~TP~o!{|oags$Iq7ffx(A`#@%?Q8x5FPo(-TXvAUowdA<v}%FoFS zt@*aq{4LdI|TID;B^B7wyM-^q^xtQ*I$uxGIu-#k;~W18I0)fi&dJ=Pg{{$dJOSTo4`h z^G^Q-i8pnO%A>YmJ-Em6Zne#_kS=M+HjjS*KIDfq;H>~o(+|58US$mol?JZ-2uJ?W z1L%2lD!j!`Osjpdux8lbjvxBr?~VV(X1(flt#&K)vs9r08Z#=Bv-dU-wcfLgT^L+r!3UE3q%|8cnHt~1Xa2lsLuue(Rvtzlo+hly{~7Ksn4C#zmpM&H#c^tZOx@5HUk7h`LE zBi{X<_jy8{Bm2RRZsEvx7M0>3Y;E4j37D;|Si5yS2C{!UTbr@Hxfz$Q+zxeh;=SMU zu6X$D=~$F*W#{C}FTLi|W%%UTJd-NIb?)rRxbW7M`0Z!kjK>~59m|VnY-6`JyYcn6 zZpKgk(6`98?D#V~3>C{-#bj~Vy>+HJiP9cz2Brt&`U;A*VS592@mkN!Y>=zWjI zul?F5SN79aoU_rzyE^M!c*!cIK&&|=K0-+>O~^HunFd~yxg+s!@_BBNTh-n8vR-y$C@ z5EF8@RVAql4ml-Yac)-Snsz^RZEIU)70u$oQk#Ar6YR)So(R*IFT zAl6-$G-Tq9`US|oKj&E%74Qi$7Bg<`?ARtg`^U4l=N-2n{BV!q3M*re;Ks*CsOdu@7xt4?4%1zrehSP`RSBT@27E*Sdtu0M_>oCV zqDBvmz-k```=RDZKhrP#j9zng<^am5=`>BPF8g4&EU>N{krMrQo1Ni|M zw@w^|4)mAhXDrVGJg41J+{XTu{L!00^PDhV1_R}DoI>YO@J z>~$Gi=h*sf`S#nE zOiw;3C)Ygw=)>`@cRl7Y`{l)@SXo_`4r_T#3;&lsQl*pfp)%ZVD__Kabe8+;mh}uD zmm&B=nKNt@UaJ!D;y<3Dzu+){bF2=U6Bd*&zBIZe7cJ{YpOZem!sr9_x;x^5`XQeD z?|7jE8ya>xopJ)e!TwWJlJPafr1M@2Fn*sP0pVE+z!fjTVM>it9T&ps4I|3d@unIF5OS9y1>=N{!F->Pls}BWFDMt5Bbb zTWNBb(lOx=;HKdKu{+cy6E2<^B0ye;yID@k4G!M%>^RV1MGbBlMBl6PU4=vUC}4xi z4OGQJZ2WZ>Z=%#F$HdAflTmN z>ZpOj&s2EaXSzo2#^MrYwpih)tjgn{@R@qaL4B2T485TtG9hsmdDqn_o%eV=)fkOY zaWhNRKd+T16lU01M$n>17IK(Q#kU-BC7-H&1>hyn6Pg5YnhVDqT*NuYwMtK;-Q)0rCMH^mXPzW3z448G~dF=e-k_&32( z?z1QVzpVWSyzR+#*NLtZ_ueOmdv3m+yQP-ZR#&X29zs?`m zfJE4le|}|?x<*v@A=Z0(KBB{a`zYvv%R|`yrpz>wms1CpESk< zuxKzgvPVxP7k=VYj^vp<2}b;<@_47QT+>ntpm~iQpz=(m<>jU<{8kBNvhD^Z701^! zZhM%DuPY;|Ts>6=oj`_x^04WnQQ`}CKlK;TPr#8W?cOrUvK})i2O#l2p3?x&S2tCv zY6tAVRE6v3_Zp_czaM>wPQzwU{$ff3@rAN{dH6|F;-AE;>abtMxE&#nXxFA%`lTuP z_Y08DYn9*S6s$6V*N>mBcLVz=4I4g=S8-L~Y65TMF!`|0$M@ieVe|jb4FIV+(qF2b zPv+f~$gSp1E9M$g@z?+N?~A>SvABC@Gj>}nEbvA0VNA_TNrtMNEG!7funt6?&W zDm@vBmva%H{5-KB`ROAC3q54OtEA_d0Sp5^mr3|iLz2e+(iweovd#tpW}A$)yGL3F9hX9<9$;50<0KN5#mpRCL9# zNr}D;IRHx$hR&S};Pi(n$H9&XPHz+hQRSa;b70mVFG!wSmvDV3Z!stB|3GqZ0=y+# zL1e_b1G>b%?=DqwW2DxI3l|=av-dxs=NaS7{nl(Q;EP_qPVS24Ya1KURQV{y=H0fx zfO_h)PscBR_*dh%fAiNps3iYlYAix3Z`XzL9j}R&?q^gBqj>~0ve*-8ZjVZ6FEAh^^y)EAM=C{TD_uUtZE2kpVS9m0> za;QU6-?JMB>Z^9QHe>uS7tSWs2lO=2qM%x-BoBl72Jl6CGC9*JEvMBl$*#jp>YyW~Um`12~$nE4!=u z%u^arXXiw7t`7L_zxZe45C1pcUy*fM&s&@wfI;6_9x=b+O9Xn%*`#R zZswH1r0PRDWn)&jh*t#Ld?g)kO6_`D?T;^?$9sF?@h$0@TnQkWBAXf4O2(W!b2{Gq z<~PLa9y~7^j7g8)jP*OWqO-T{NfZ^_dYLy(2t4%!oFnB$l_9IRvcq4Xix%i!)CF=D z-PZLikNv9ug+BDJEJD+sOs^q}AN1wf?N6E7SD(>Fhj;*boXv?4aBo1=0Cmo;3IeK$RaKnHnI5BKutgbeLY^?Z47}76U-iW_& zxsMmOr!kDn*URxPql@zw&&8X*=q>U1qYp_YE_k7y6SL}U(h{#h>t3hni9;p^**L>S zt5c^mR?~QLcTfJvJdv07!v-ho&G0cBuapZkHfAC-aj0wh8p=mo;sgQW`|H>)8(28S zLSun$({ix0*AlF37b6-MGL`@)_31Wp*mQc@9c^NG1Asq%7&4A0oQiQA>R<5O&ncWq zK#cKAkt@*3Q}8k5ZL7S%OFg>%%M~E-K=K&JFlXPx#yG}Hz$YwiCEsbZGNx12$MKaW z#2 zWUBnxK!+K`JD&gXvCkl0p~)9+3XGFrp$lz=~d`0lQ_Wl;8ejqSr4yhs`8fc z!~fOvWdnd1%Y9`vj&L|AQy-F-{dy^;RA0(82lok?R6jSm`#o`~Oa~rOWK?sK4#*MN{yRt)Kn;mAH00 z;)y5D#Ty^HFQ(OZoH=tYUc7oeKK+T$_>`0hjf19V8gbvn^Rc?R5x@1Rr=`2p;*kd* ziK)g>>}~JG^VinmYrpbsv3h&W`X|>iO-wi9#*G_si%kZ~o5h;PpLimsg(F{LA4<=; zqWCXfZTTY%%mbIs#bfuMlX|)zpL+6HmE|bTuS~_p&R%@#rKYd)`tmoPj}N@(adGd6 z;qPoWqa#_iCz-@o@RC~64MmTQZd|^x8H)>(F)JPM*7|mA?;gZ@b0RKXoQ=hqk@z#; z{`+EOeky+Xmw!3#Zmj#1G_C;H5uFazUw-;?m*ehcJC;r_(}{@QlDE>^KmF{jSZj9u zj&5{nc_zk$!vqDyWy_xv#o;cOPEi#JA&@{}{~+h+{Ko&U+-8ZS&H?~=#C zf%F04=2K!swNXBzaX*m!NKeY#8eEH^uP1E_-A*bic!MV8xFf25O37#W3jQm6|1|wc=UDm$G`l6e-sP%UzGk)i=!qV z7^xjF_cf9mVA0iR7i?%{lb+j(@}1Nei;c3JI(y~Xwdl5b@qhf2e;WVCkNs-A^?}*= z@Bid~7k~KM{+&&ia02lRRcqw=9|zMP(~WGZ^}=~CfV&HtUmwNK{?XU~CPM6}XR zq=85DUg>RSYMYnS|F|jTEO620WRyPhtkPHgR@T+9QF=w;1ZSI<@fUs3P`Z=-pZhqO z;SB)9MM0K1R#i5MkNY{7G3OAD(@Ak8&pmFEKN|oJ52T~Fw&Q?>Yqi@}HU8rCusiFU z8gq1_-H~oDUHm|DhOq{^KI0u0_R4zO8OIlL~-|13 zC2o={#{|h9Nz1~Ev=vZd?u&wXW79s@gLZu)9b+KICvA;OT9$qL(d3$&PWz){-}#|$ z_>l6r-~tILSB^50_{$833d1`%rUerkk%XAiW;jSxX@y&M5Lg;B9tDmfQeZ~yW~^ic zTZAQkg%54AsRG%`NkzbBe)%&&0F&TMb)j9aH|^Fp?l_N<9ubG`}cLi93kw2^?~f-(dwLAsdD2 zsWRY!53n3snG#8EpNH}_V}Y~lhBxZEgchT42PaSZ&+q({c({~DVWxpSE0cLv#kEPF z0ICs^ zrR9+q0BDrJ0}i+-z3DG&{PgHD%hC?3cm7O6N62=QrF8@=#BNT=7yNK(bK-vniiabU zFqIwX=T+=WZ@uXkvgLOJn>h-18%lIfdVz>jr`0!lLko(6HkVT7xR-lR$L~Z z)bzo; zEU6s&w@g%A&|=|dR5A>Cil55^{SHI}6IS)0B8{oOS-z^V!9%ikls&0*(Ri1GfU~qJpkJ1$!e}SaI3_Fg;CnnX& zD=vg2u3&n@nP^I#!+-6b24U*YsIe^#h(7hq&G_YC`dI9@x8l+JFU5VA&c}rd=M8s; zFQ`-p>>A#e0&@M@mEf3UPAZsRT2LM;*94nIq_jxU8DsKNd2_EQIC!#bg3VYHkk%Um zn3(RW-aArexT}uiR2$O^<|T}IRAKNQ{S!R>5HqR zIjK{{FP=ki2}yLp1%lz0@RCmMJ!yjqa-#lDno^$@xcQL39?%Tuf5N<0`TIBnhsP4XmAsMB1;65pEWjPW1uyswwe3Ix%Xb~iFBf4()uzWhSvI$F#$U1A zynWkW*i5jH>&Xi7tMK2a%?fPx>p9`EFC`rr0E6P|NdiT38&My~k@p9lw2+J#=PP*C zgW{G+coys@CDV9EZcscHcRU#CvhXh%#}^bV>W~A4a84?x$C7Q=f|;``7xtsc#z4$Ed$13JdAT4(`B*-;x&Dx6${q z*vs9gS?!{k=rUcOh?%L$nAVtkwssJ&fB1ZS>jys&f8yK!X#A-^_us__|KJ~ux4!pl zvrU7FezNP<64MkpU<^7>N_UHzwT#6cQ&L=s#@46$nl*}ZBlfu ztAFL1A&yg@p#Bw3O~Aw5Zc}b`PrP`Gg&yer+HCm8$D+uM!i zW;0qlo3W*G-oAbPM?nP^XFpzv4`UI?|NUn?*s3R54`Oiu`oLo zyE}Kpv#Zf*HKVJ+F^m0dj=X;7W~|=0CA@mEw6GZG&z+6Mg(cMqM}w-Js@C9v&CS)A zo?nVnOJ}1cTstim)kQ8f$!2>$^dCR}pMNWE+`Jm!_`$D@#WNSxhD2ADnFVF^H~MPY zXv$bP69+!VNf`pKBi}s66c5n$Q%WQeFMD49o7x? zwY6H^ZMVgB6ynRd`T3Zi<>WYqAoQVv$5_+t3|zpBwuju(_)@qL*I(EgHhpTg7ZpqI z)Ty3r8KbFOqBjkY#Sg|FZksk$Tc%wBls0CCSoztXhLQ1#$+m|p+-TIu3i99?Jxs3K zmQD6`9$UL7Q6|U-1&@KZ;sMY1f-`BW#zVY;K?cyb0z)Bs1{2GR$|QW?>A!GU+H;He--<>&Pt;|KF|z7If^hr(;w1`0p-%`luH zQ`!Ky<#T22ondM4D{%FAEPS|J_!TSv;V4c!#sAkfj^@1RgY_T} zdPeYdTb3N6oGjwnE%ly0HlAhID=)( zLZ3xH!s$x4*COT?r(-K8gT|1P1 zwV?Jt5xZMko-4U{_CmCjPfz_ut*$YI#u06eGj88ee@*8pGs^}q-+R?bA-8r&O zfSv(~PM=zebEoE&t{pe7-xds&L;dyUb~~=FaZopJs63*>wE6+>Vn^qJm1-oY?A)IAbsNO%5;3iyWXm?LNi{v@{$+m*<6Rr zVw|+PwiDNGN|zS>Ir@EedQP%8M3ZjZT-A6=Isr%jkE(B6nwgFJPoIs^siXMyUw_UU z16EEgSdXGy&;^6sr0UHpBNJkGc!RM7dRO_-CC5xX^>@sXh(pXD{*qPorXJag@SLo$ zwRhuL4UZb8(wi@{e`Ee19`*XpKDs4X2|jaJ(lyy=Ngqm@+`z1Kf=j=7 zC_M?CtSb@ESGr_DUjPhXJDxn5H%y%teUkR9G71;ZhYH2iec?awU{McRWM9bG4jr2@ z)^m6^2)A!vMN2DN6=Pu62Cz=y14_@34Z7w(^Io?AVRk^ z_FYqZ+1T8Qzw`J0QLL#d9y~J_w=Q3eZ`HW#sMd(X=9Y#^8ZRln@n)1m|4Q3LNrNDi z7sU1GH{^(3ZE$6?eV;VLkuZYiXZq!Yo2GiMWYQ}c=jk{+2wypdjGw&+GROE18-wOY zR=ky_#5oc76%FyQMOF{wdggaL|Ciz5w8%Z424RDEC*z+q3WxFf^n0!NqLDIt85*7` z#uFIF&2gNoXZl9F@G1}4|Mrvjle`?B4Rhy?LwLb^C~lSB>r%?g^D2Fq#Z))|iZUIG zP@V~_9x)&}Y#mrEd6are9~;Sw0cs^KrqJ=Ei$T{`ov56n=r(MeRU1-sRy1^J#xvAA zZ8iHLT;w(SEPWhpn7N)2>*y@@3%LG0b_KTln0}_;)o@2~&|k;$*nf@1Y7)Gh_yT)O z)|f*>7E~u>QE$j0bX%84xJzI;tZFl4mK&hp8C>2WM4?fpOIsQIGm6TN98@TEsON;O zf8HG-(@_?}tRLgAaLY&G?O5ukbDXC*q9e5GdF+>C_1?k$jj`|iZ@<|a0GtP@63!56 z$@mzD`<)7)V7&2MA!L6Bl=4-u-$6hx#Bi(8{7iO!N955fx%{B!V<@nGZ%tm1%s;*as7 zmBY!OtTJMOlm3b;Cghn?J4z7*GVb^&KXhPcUs=6I0cRJj`i1scsd{O6K)ln zC?D__#lZC8WT6yBuDrpAa;tctn|aGR55XjfNg^0TB9khC<2u3+E;g0x1OroEjY@eC zpA0(SVxXVGi08Px#oRs&oayj;211!a*lfkfH?=n`oK(|7RtamT~OZcCR zv%IDevc;}CJ0@uaiw!H3(N`c$Kqn@`Gg`X*g~ge;|IF!FSzeB14KQxs>csbb|Ifzv|I+hzwWuGBdlKeA3NbQx zLiLZds!uX#Q0Z!88VpWEZDLGirc@dvXwbrOrwjxqBtFq`>8K%rm2eF*Jt-+Vq>N3O zN@_}uEMdUPj#W9qVYr}Ta*g9V_4XfOXCy!{#uXwbln#S2fipJjJoCB zvI~Ee!yUJIj(R6gb)E{JQ2zdcNck`k%fqLkz09eYULq{(;7 zRYxeMyi{%_|A<{-poe@SouB+JS}IfbwfM^dfe`5CPOwVHv&(+Za-1|-hL@R_mFJbr z3He1Z7c#npC8hZPeH*vBK#{D#Lah%xJ4fH={D#O zN{j3QXC_O`PYPyWGCwyTmo7XQ7tUXbTi5TzRVi#$?N(G?FEBuO^#WhQb1S;`^{6M%g2 zJ1#&BnsOmTd8$6Se;3)qmp|;7?o~$tliy5ZM(HPFsy-vRRI|IXyy9ngi-85#AS|6e z<@g>0NLDc!RSab*)_kcjnx7XUk>jcmFS5vAzbZ5RL+Sv8x*yRq^ho`i_&NCmZG6{1 z=?RyNo*3Z^#_YQp+|_*C_joiUnE%5MfmY;nCXKi01lEY;14EjaxV4 z>Wv%m;^nLH+~;42pZRw`6MyZ0|Nq24{lR|~pZfG?V|%AMfl2flua2mv->Xg5Vw$5+ z>(j1BA9XF@$ZhJxn|CS0SXKY3vjh5h{6#a$xN>SamgeW;;rlPegO|?6!u&+M{fS58 zLx1?cj_>;OePVI7s4r7LHm?3%IMA2UpPTm9Js41elrV-6+Y|D^s*eVPsJN&G|Gu=r7YnmziQ`+mV?Vv?_^;bH0t{>%+X_;|9p9kIS1^{L5t z<6FKczVd565bu81d*Xoy9*l0Mr9OWw=H?qQt${vYu)W09%(E}X5B%_t#r~1_ zVn*`k?1h+HSXSBU{_2kUCu2^q7^D4DM#;UF{13bBn4KPtnW^!Zt&gjXO~uNY#rWV? zy+6M81MiE6E}V{|?p9p8{P}q9*=J&}wITW66;F)E+>H1`^*kflJ>5|Kp%gM1V4AD# zLO1v=6owy|l+#o5^@Uy-mpKO3_%Q$Dtt zw!XKs6}N6)kE@b{FFgBvy!8C@aqXp-)XrXto#s~5l!BSFXk7%a=6<+EHJGyl3M{Ee=%QaAaN2v;P051xr|x zvGdKgCvU~ez%=>pC)rG!QB=p({6$93c+{82S@0Wwz?QCtfBC?H{vEiCH@rc}X%dHQ z^HqF3!*hhu&as8hN?!QUm+`EK$(ORk!7pLc9-yh}WwL@J8PJ%j$El?`f2F*}6!0a0e_kY3+XnVqZ!Ej)NVN>9x3;b8ep)!=| z9!B}!6E)A*5Kvj;IrMILxH5_h4C8PQTScne@-80+T@`-}Oo=C##>GG3Y{oFWd{0ZX z6L`uF!ipc}1f5@j?gjSnvn{e?_+3%3#pL_(u;YGN&VjAqy$tSgJ1{5e?+f4?r!O$v z_X!8nLuu8Y^PKX8c!bj~S?FYJ54{ynVIGq>8SOFC(+;iw_gb-j>$=+gf!de)MztSK zE85z~ zRRn89ZE#YsZr*9e&9z?C*1uOvdTGYj)U0i6Yn&tbK6(&$6=!wtC>rYDE}m-0PK(!%VpDP0aEh+M)lpsbv-GjO z1Fq(ZxV63)OEc;#C6{)#I_mG#FR6{N8TWxRbMf{!y)N1vHXB@%G#Cwa>4w{u{(I}@ zwy(As*H{2q%O=w!^+7LP+Z24&n{@E0+ITF_O-pasjW@sL4e^Qp@(Iw)$K9jiUJkGt}=RmV=SADzy zGFQrmZUQXU!CP_g0uKDiyc_+s`w+?`Sd8P4xkdl+*ZRs!MT7n65*2?$Wsn}J0yIUl zms@B`=Ku`m8&p8&Am-1SKq~{gPJv zrO}h(I=aB%rc5ew3D_8*3>&=*rp1yI$qq%yv;Zw8;S?X&p5s&=l_}|7Zp-}77a%u< z2VjdO@4<&8B@aK^eOdZrw#3WA4g6Ie_dUQS#Dut~<=05r(HkD(yQ~<84w;gVogu?` z>gE36a@>^nq<6^|J}_*8WxZ}fTEFYZWg(JWC(U$e@RL;>MqvD{qGJiy0m2*@Rk~f@)nez`C8%a-P z^SV!^Cb@N8a29>|W8-%zhvzbdkMeQegqx0~O(jhow{kHKE6-TJr(ep(d`=j=^FqEZ zrIg`H#!x1YPe1@1JkfgeX8>(c&a*)Dtrj9EF zm~3QXk&4W;_(@SBfsD%o?Wk}t1!=jatIBIeP!2A#;}WL$JW-ig=)^nzDjsn#&X0!* z--5H>sRojZ^9+VUIc}!yzZauAG6lg$$c&CMU{*P;_>*add18>f1Ye|ZwIu11;lP>2 zH3D){gUrE+$sswW6ELxq9g{RKX7F6aJA%IXUV;k#`QD*`qFR$$Y`ufnq*(0{=i;t=o1$~*Z3zkZ@B@BD$6s_fEKcw&<7l(32?U?tQv z1s_8j0C1fZ{D%OickzcmfCOC85O1|HHnp(xnMrz2EO?Ml9qJuL&`JaOq14tzYbVS& z;y=p}aH;Y!d3iLR#LIApj#iV@GD9VnrM!wk&ngQIIEetegTVRQ(AP#6D`4tVj3)iCxr86hRCo08{?-H;ZVHA~2Rc++oI| z4}3)a8lO16Wt9Bz&kX?Z4XH_cB)nN{V&J3rz^cL-M6v6SK@bCxnmoCeZGyY1MRUfI zG!0G*U~ssnL8FEa!S|5Y*P8L+k3194e&)%zbm4rQKX*D-PI08rQcP>m*=+Jf=|QZo zuf_Fi*P<~q8y7E}iJk_s?G|@|GAI$>tBp}!CILnyo4s&?j1dhPST*X6I1rqU6e8|K zrQA&7K@{_oaAwll?ewtfP-M)B=I1Pfe)X?#3XUf}5EkBLU`KedIQdh#qNL&i4_?n_ z6W(E|80B*sT=E#agDV&(GMRsJcs{#3dYP#_ab$f*wti_lY2-=9mLoh;o-9+gtxQPX zUVtN=aVJWqBixZO(5u4d-bQhgu`26KWbvaOl$!TfHT}eXOY@qEN*2`%KV(+nKV&V7 zj#5d9%T_2zOyi>e<#X`O&)9)?Y+ue2%06-RL?yI%7Bo+jg2XXwKyrh@ZY#&O_5 z@E?x<>S_B$Dfyc@DodFZ26Zra4K@G}2Tx)5%p6lADgn!yWxNOgu)wGs}$&1!kbCWqEB-yzwdZg zIxox!7pP+SElZi$+)!ZAZ;-6$>rl!YNV* z54_=>(U?E0aP@@LU#lDKy>=Wb?e;& zM;dYjL&-b+dGVUXYUMM@^$2`%qdN6h-{KV}_I#R;aGs!i>PP7lT6=sE%cNWKQ$zjv z6kjc?KF*(-j{oXges_HLcYR0vKmORCiPydL%c8a{eB0X*Yq#TQXI*%22lps{kB^9fsZJkQl4@`U0NJIf9jTgdQSsMPoDbTYmUEe2X}7oi+^%J&t&SpkHc1C*CEGY6GA;t*2aCr zUcEN!x*Q*?$1L~7HfCdLYTA7rU+}u0mptkl<>03u+P^FJ=4=)i&Fqmt?1%z6XIKnoV-u@(9uLE(ZU-I>&amrv{@?~E*r0f}y&bTkR)E0evsw1wbYRoMA z+7xux?aqFD`g1SD5B=Cr#5PAa)@R~@hu`3%{HmMq-3dp9=L?!>5Y zL(X&h%em##@rK7AiMPM)E%DexkHpf#jO6B>*jj6;FO=wl|5Wc@OmH76IVs(Rd6usD zh-(`Ty|^ue;9hR| zbX4-7PCY*-_=Tnwc!=k8NhUp1MYcKlZbRf^$}{f0t!Y}(FRjYox9R73te3^B!iJ6O zsq%wwsdqD)ye{IL?@yoYxW-P#5n1DMKlb4YZ>kvK=C;;5&4n zBk6;;uV0S3WaZM*qT1P5w3?ijv=ueviDdQ8`fBV+x0#c^wY|O(JKH<%kC4HfG=rY7 zx3v{FE?^9rJ`fPJk z<(-tQ*YJet-wb5c-KeWzp_BIC@d**GsKp&MKk5sYO97M&p1)&!ShS; zj<|XKq@PHCRsUKSuP-dj$4$}WviiZ1Bi0TmV{RtA zbdh?Bc;We*l6k^&VX@MwyUv>hspKKmpV~F@UOkZN1D#qtgWRUgiw{xupCHN!itEFk^nMHIKpMWjw%;UtTOqMbgpN}Xqb9Q;6Q5BA?a2)zTrR5`2!Ek>^ zzIvl8;GNCPYye;b>Q3t*mNnM9cy1xy`rh}36wNqP8{m{9`ZqQ$CD;g5TZBUSq>Ap@hdG{9qhX!Try zCA?&ptt|f>$#HmwW*L@vI^`E^z%9#IWrQgpOw$v_Wv=o!?$C$?YK5$`h=#}z%QCeY ziDm^U4sD*X5klT$9<_1FA~yOk?#i+v7iqV&H~LJ<#)c;OmHrbtRQ;)XI@b{MU7T3M zhb+??&vPHkTsTU2#79Y@6J$cdgECKA zKv)K*i32e4E0xY9Z@wE_6*0?hf>0AOt_bQvX_;KLKjaYp{rGtE&Lc9YqM?hUbcule zAGDa6D)$@Zci=kw9+008U42dGI_K(5all!*r1H!q+}12W7BF z`Ehx7LSs$_f=Rtpbx{1$B{TBjJ;TU%z)#Q+e=s6?mb18q0}r`u#5SR1iA>sIlW>qB z&bNwZ6#T;*@G_miR#x~__&J60%^i$-ms=(5yX6g&ImifpDlE`O|HfH{3<@o5c$bxz z>jf|J$n5=|xH@FuR^e#|@ZcxMm$AdUaULYibd?`J;KXx3LS=^VB*?*49NLiawH^3l z^3}h`{EO+Qk>O}ef7#A^m7VKUIOEbr@n%v9Z>3~>lkk-`+b!Q?>zz6>&nd+)@AXZl zAqSoZ#Zza(yVF&1OcUy#!L3}uW0Hgg*nPgapO}yF`bvy8PAPQS8xXv(D%fsEaslN5 z+mJC-;v&-&-SUapmFC=+n@q4Ez)>b(Aq6wRKEhZ3ha6wU(ZH=(zk4lqHg3mGb3N`{ zzY?GS)UUQ=Pr=?{2gb-_U%D}T=g($}C3;HS8U;vF_3 z^teaws3&R-{#YO&-2IcF9ZH6AwAX>^7d(209AOL6N-L$};D8C=a=#jFS3$ysch{N9 zfZMYYLRr(&hw$~&v<3|gB^pz?cAE$B>%aNAc;=bse57haN(Enn&QC2wV|q@0$=7WS z((i8EiWw;!H4V6-mv`?AXLcepLC=B=$7DiC55iPu(31&Gy3}sxz+WT6pIj3_n_=Kg z4kJR;?Es!sVaXGo!*B`!PYm9MO=&mD&qJo)V2Vq9WW1D%GIH|HZ(g8t8D?~@kWZ<@W9GOA#ZrlRN}T}XDEyqHnPeI()B;*2%nN_oRO!?jB>>fgGc)nP1DE35`SY=I`gF`MElOr1 zw^_(%0g@9`a-a-MfuKfHWj-YH+@$#*R))}aXW6_xg8(<$VcPf{nXFJcm2=* zOZ@zQ`ZXUxS`7C`1COg->h)RCzAhTgL|t}T@|}0+%$TRuYO8PIIDhV}r*0^Vs#Ib4 zPmHTB=?6v6mBq!FpPh*ZFP)1^XHUnQU-w9S*Z=e<O*kEE2H}?^3`Hf@mE*ZymS8C`O|Sqd_E?ALVhx_nkNE4FKHx#&s9H6 zD%aKSrM^U4a^NR_{->==J^uQKVs8GF>a7`*YI8zg5(HQ2DIZm$GBOTz70@1BMrnl7 z4QVfK!=eGr#^Xz>OE8E}7;+%TzoL)+^dNuHQ01}gPz^D`N*kfRG5dmTSkPH;-9`li zxdCqQZ1Qc&p49WQ|0p=%0*^Lmc>`8*$Oq#w3VNUnN6~kCc?E_~Hd47-U9LEyF6+|k zq;0CL-Cf@nuZ_k>Kl!=%$3OI=vFoGYIc4O&n5a#wUiU;_{5U?p=E?K}!Jci@qtjs{ z$g0}al-j21RC4L{ue&e4{%hVBANas~W1+DhFFyNJeCD&i6L-|^xyoXWFBsjXRd?bI zP7PtR0yOht{b5IVbA%!~dD7Jismm%?bUl(_KZLHbv;!>H#pL9-+K1kKf4cb9U!ssk zTKS>-f&5ZZfE>;7mjd58l=PrJ9 z*N)l~`GSM0QamD@T_^NCSx5ZlRbBKQlagnXF(rCTH>P7@W-gZI7h`2~8It7ddd()fa!np;}pdGWdy1)8tN)FKX z!55RFHK(X>9YB4GQ3`rLn-DnlO0<^@*;QS4n$4I1U-fz8lDT8Tk9n90HgO4Qj*|Y| zQ=f~i?OowL5u9Ge;#H^Hjs}|tss$Ck>X~dvmNU=8CI``iHo-y*eBKa_%z1c>B0qm& znIw{%?Z*A4+PvEoGD@-lU4w-V6-sS|`3=!r_>&$ZZ^NMtiLv}|qRXTouh}Mt4M7>6 zcu;5gkRVFhGq(ASOy`i)#e2@dab>DUZE_YLt*1W<|SmiMy93~7gL74F9hm*Q96 zPr~r~aeve99;Vb&8t4p7(=Eoq{k$tfdG2YdyeodY7vI?!mTU!QfF5#c$bd)H4?2FI zt|dWn!7rUYs8=u!*U!D;`Aa(jPFk`DzHPzb9%hJc#|=Qf5c7q_ds%%d+w{x)hwBtS z+7DN5(T92tR&9|}l2UJ1;mik$N?URYWN7xiTzxX4e((B=&q}tAOI9ySjHN@h&P_1mklwIzL1eQ2{KJy9}+`3FuVJAG=(ed5k`D{kLt zOV;k^)lHHud&--|bFM7g-q?=y&0a_?^SlU~@whGsnZQQO-DXGm=LUnbr|6?*1asGm zyXdQX>SxxPhtZP$Gb26V)U3t-8rFcb*B{@TL*Ff zsaf@*UG*s)^`W@{JTp^|i>IgK@kj5EYu9f^Q)yE#NPW1ih*z%f#Ppo{C*d&J;Oeu< zc>eNg?Dj@uQut2PxlTxZoa*8JM=!?HPd=kD!$B-9EqG%BG8f&FK1xzo_09aExJ2?T z=V+wssLw&`wqapR5mFCRYepYYJQd&i_s$;uXtIr}Q+z-ixZn=*ie_p;`B{{cf!0P9$%<=1-Qe;NWD@#@`vAE84u529V4EM{V=30@Tq6~(qy_S zYM~$ToR!k~{kSQ5QWZ(2q@jK3X($6^H(}{FY-RdouF9rI;+6CRIGIoCOtNZ3)^oru z|I+}EsC}y+V8|(?=*Jz7=zdxurJiY|JUViwaL^!entnV5@LcddVT}jz`Ud(rp&VHv z`L*iDpjmm>176bXTtCPF=H`)Yd0%Ggc`) z7X(PTtYsukGamAjxpn$THjXm}VD5`~fJyXE$pz|$F$1y{!)67*57!@S6u`@M#Pp-E z`B>gbQ$5I}?na!XMOw){@D{Ga@G+MCPD6Q=AL-x=mnU(9{yZb+SW7yPTsq)N8;#St zoUq2nh(~c_>^r~x_k2ijE>J;EEW`~Q_=dxTL78Mv3OA$6aBrM*QYCpnp*#vPsIqLQz+fs?6$Csw3%Opfmf&?7 zRVE8vr33RqKb@L87QVc|Pf8Y@9bX{P>8QgUQzt-tPbNAp(5RSXhO8tOAT?k#&O%6K z@z*mVJamH|rY%AGVR6~zQ+oV;r=1I_XD?Xs?;lw`IbWXj?_~L&6v9Af-k_(RO;_UZ zr}Db4@|oX$-2qRqjtb zyn6hj?j$Hi%qBSEl`B#qcf*6z4yjcd^5ODr4UvVDOULT$R7AgR3}I zT%5F(zaY8Ui)Z+%+DY@r6JbQAO~DWTs+jgTqV}r6gxfbex)h&D8@?QYJJqU4UGXQl zL09?FCUD6I10F$}!%r?U#^sfDrDQaWCwxY0A|ZK>X`rC|CT3!+wjATr997nkf=QC( zF?JE=Vm^KYK(2nrD<(bsXHS0M3E?6%^Gga@IvVm8$dOSE>JEBRF!w~eZX9-YVr})N zaBRh`D^JA_{KM~!&wk=pV!X2!Z@73S-ua&Q#lq6rc>3uVHI=lByJIjOjw!fYQi=BcM%Z+Lh{Je=Gyqb&KA3c5c%hD39WD1?vpd;GlA4VaqV~j5)g<-i0U7kmX5-xH zl~`Um9b3Dbv9+eb#rn;d(SUVvaZdRN4HcRR!(9z1;RPnT7<9Q`V}Pc->l(MVXvTL;+(Lsy{J5dK;7*2EGlvX*S-@8}rZ;I$*n8!5 zPgCeE+>uvK>k=HZ?)QhvKQQD!WLy>uE(wao$Tc|LD;ScYxqwUAvG}RIILdH7_c97E z^?uSG!|~Nj#ka$>vpsGM7MKJPy#k+l#c)|G<8XN7gg>TAi%hT%svqE>^D&8r>|kM= zO$0o1)NpNVGM;+!b8+*=&EWXu=FVPl|2zv0EJk=C#z0jc$}VNAkkzxOY52T*A;SFw zI@qWeoG~me^OY}hkUJKU2Naqwbdgh(fJrs-@W24sD>=ipE_`hv%%j)i?&qfP;ht+` zKXRNh2sa5($jr$Fd{K1v%-J||=2VGr!Cy$GpoSu%x>`ZXA0Q6=c%6-#&+%Mnej`Bm%mc?rL4BYt=nZ>|v z>}nt;EMtDI;l=93*{OKqp$qXh{_0IZ$FgFmn!HFhx>eS+){ti68*+vvY~!N^)G}( zMZWG4j)KGHJVb_d5e@RZnOpQl-{Is5?hj>G-oyl(LnM18$B;49sZU#@{-BX6B_|Q# z4Hl3DfcjThTYS3N+>U0e6+5DTr@imV&t|h7+@^S)DX`t`Mz_Oe zfwqsN<=A&7U&lC6V5%N-bMqeTa=H#10CsjZV`Ftg>DsX{Gaoa2%}u))-;cUzKC-eH z;|o)9=}lh}U-Q0q#~U7aEM9u)g}8O=mM4by4!SXZP>ZdtE%n=bY6Hu0;o=4L!*kK@ z@by3*)y|j2jj3sGKID{+x#@G2?GL{GOZM2xCA+co)+ zhep(}CvWmg7)nc9qn+uwyn~;|$g)0)QS!@+dd83vFi8vc#HapqLp?Z=78${~1G$%9 z2XoK+q$jDFs2`b}7O$y(CTGOW6Csv}Pk!d}@q<75lj0XP42Z7FmwZCN%{$v_XZtZL zy=s1bNp{v7lSW2*aqIe3wdt$T5M3JLi-#UK6~FfzzdC-`d)^sm7pLOpOV7oppZrMN zUB50;^MV#d#)$RCkAJEF5#& z6HTFv7X>6sR66mc_(biA$!W&-+zmWJe)Jn`)Iz@ord;U4<}1cI-cUtczpG7A&QbKh zd}p2m>igbk;pcus@M&-0fa)g_3nvzP^scs!+~C?56-riVX#dD>*E!=e_(m|5j!A8< zG7;D0sSYY{r`-&`R%9$e+I{hVN9~2lrn<_*Jj|qMh?a5T^qF|`6HmmKyyYEn>eND9 zy?QOKukV zx#TbKrStBqOxJJUiW}E%tG(A_LG`+~xv6%7>}n~WkvMbytWSbrk%DmxNB+`gW+cxU zb9E5I#1q9{vI0xPUWPGyd$fW)O;agDL3#0A0v?HRXFj?@!^x|n;&@Kfq4IW z-xDKqi;5t5(rigT+I2j}U*2RPSjyhzKsEuu8v|7CESi4$<>py=6a%6JS?{Qxiz)MO zMUOAGqSb2}h?@W^%e!pBr{IL~v*HZmYygY9g4XsylR5sN(iY zu-en@J9pFvvYFB6G+LkWVj?n*wn#jWscB=hH`+way667xcHF#jS#d_={Dq5_VS78$ ztE7bVI?{_+?7n?B&Mu#eqpl=os~a1eJKh}Sje!yj=rkSaWgDyOvAMAob;(C$6C23T z59vRoR;mx)i|e=cBy-smv>&I~V7ojWTbtWH4*&YiyRo^+X#q#E+mehH+U`px4`Xpb zvR(S?mc~PEPWq8vdge4&ij4`Mb<<{FO5n~`OS-Z$Q&~B2jH_v8rQ-pox!aOX){JIH zGD0{kFVDo3WCI%qU{C0FccU4rP30^(w=i3i{uePbek2@nuAb?nwe4nX3l4Qaf6r#b z)t#=~iMX^pB^?wzd)7@`s-)T3M%=eD7Z=YiOUK($zrr#7Y!GClopgKYtvBxOsh?6G z=&LkGeb@e#TN-02PNShdWUS}Yj~=}5d^9yq{LE8pv9eN+#reh9Q@PseN2zmU9=eE) zt6a4$oDqc{+lwDO9#VT^{GlX*4L^l-XRh06Nnd8ecv*ueZI1bUHs2xpk@(CzvO!Ty zWf<@ovYjrCO%e2$7JJ4)(w}}w=`5QS7uxCHaTV3iNCR2{xzbl)*s$w47(K)P@D>~3 zIf=)od(!XfnR0;x@V#L}Wno;##ymFAQU>IA$_>hteUVQR64JBk|4*G>jQ4-w>lnbs z{?3*VLSGhK;o^Q&G{VxS)ao2=C4cy;cpz$_UDp}xRIB=w@H$o%%67* zFi49nuu322pUP4GMprM^49aT;uq1lYYd>{wHy!56NXh$A^q8pgBuSYFXz zNf(zNGLiLMHemx88N|E+Fo}<)AK~dJ>XHLBw^h&EXl%g7qeg>|_S8u*1{4@J$%#CY zQ~E(?B(-oK9TQw+4|=HnTrBf&;X~r_?x)Hfm<&r=4a~{+G+9Sj-jr7*NBx|hmaR~S zH4N+TqMxsqWPU=j6T2_@$>u`Iy&GfS`JoSfNQAlI)EH%KU?PGOdvL=91qDigB||F9 z@A_wfoM$rSz5209eDz)-*+&*X4WKN@FlGi=Fw@x5$>=HbtI{hSk4JP$RHR<4ILQS| zXa~LwUcHbHQ_&Nu2E8MW2`?HGe@wmrTb&JGP*EmNOqPMa;j2R-KAq7x`H*Ge!U95m z-9WjaPiYV`@-CI&<5sIQIOI+7DYF-}4b-9j^9(xWG@deg9i~B*gK3cwQt$xW@xX~l zz$c2CGSlKFRA{S4pO274xq)q}$6)hVcVV$xlvY zKj0UfMYlU*usF#Rv?SV5 z)egm%kVFknany!spYR-h<}F$AUpd}|6a;$Ll%imOm#7+R{TEmNLfi3_zh7)fOMp5N z_(HG`=w!y~>WHMEjT9D6Oklf*Mo^ zSLsvMxC5d+cB0wbh=cZa+`e@+Zr!>Zbt&`DeCosTGymoXYEv3tYMkjWWhQI#g9Q2m4+eoC z=k{zQktZo@{C+^a%IXh-`iXLie)cB=nbOf&=F9r-VDUhW4FJe!`fA!z><34^5sls6 z3{C*o?`s4fTx4$HT^cu`DtceclFCdj>jql<_~%9N%rhjQuLTcW`3Wb7OCB*;^wl*| z^7#5IC*c`HvLR3Ya@m}6beJ+CC(8J`OLfPlA&N>Kgee_^K$oa;(JjD-I`9|El#g(X z8BPFrHG=@G_?cnS;NpKT(c;(}KvY$iWs=D{9X1ubq54ac_18D9zl1S} zgDLP%xQc%~FVzjWh-R;93PZeNIbS1v-+W>5`q{i{^y7hp<8pDKQ+X#WAFb{lFaXH~ zirgTTM+>`M%8!M026{)F1Ts;N?e#6SI;G_c357W>JT02#rWCX?_W+h3RIa};lv_>! zV53jHUiaXJ$)P;Ogto30DH(uFWwFMK7m`~Hh*B2?oaBk*9Y>5ZdE<$ac3bkNjr7Ye zAFbP|ERuWhJrnVK{m-K2>@4>~Pshykl=_Tu<$D-AJ3GpYO>&Zn`yKOyaW)B5HlWa^ zYWF~X+}NKSd-kCR=I0a zbx(lNEh2M#1hlf@3#nEUyBB-S9V0~lNF8y^eZAJm{zSOJ3oOY^2#49(dOZ5zg}8Wb zB_26{HooIK|4jVpzw{Sl>B6HC-OCZ1FU9VStI^q9i_YFg9CcgDuN(Va7Q7g&c4JHk zOsY)mjAx+CrnUo4jnO;($bsT3P=v^_RxbLg{}-RaX?(%X@uwW^&Bie%$2b<;_m8Tq zIXIS|2993fQs06u3$J@&AD#lP{>t?X{^C`-L2lAeX2@c4hX&$J7Ed|(g8N@hqxzIj zwaL>HM0M&~Y2ioy;Xmq^!8R~?M<<-511AEo#osJI^3^k6Yt7AZt-yHHz*A+etFHF8 zw_|gAL*?rEOK<3K)a}OJ-8<2{yBbI9tG+9K<@Ble`mg=E_=@*^z~voPKO(eMzFzb? zt$6;W=VD{yZY<2s$KoRVF(De#esUpxoD+>Ghw!97Z)u!6*~m%3U-`}d9B+8!!8p6L z6f?_<(QECfYS?f=8B`;pDdlk+p-u2dJ5iW*FVWXm9mr3h4PQ)Pc>#_1`k#8o&{QD5 zB#59$E=;P;xxJ7N{T|QIS4b04Vep3+0%<>x)(LN`Ho1pX{){tMB~ZEOiax##zbQ-Zr#9is# z+%?Z+KmFDI$bK}{7jiPmj`Z=4>fQGs@5Ar3>zYuaov9zuSYw=llPZ>L1oj0(Ra8wj zs+6J&^+nsW7iDo>DxTX6mO9J&qc*4m3~^2OqpDA|{!!Iqk&WUnZzsPwCMte`WLEkNU=-Jo8fU96Cf&754bSh5b zgr5*Vw%|~?dSDYb5xAa)Yc0OyOy3z7w*+1|#w!*c@$Wl{SeuG;`~NgQ%x0MsL& zESuk5U_C3ogR#z62ESorMUJp4ZQoS!U(>uoJaD*|IpLq1q3#7ke%Wyzv#Oor?<)5( z^RjG-heJ8bPw`U4)~+P}D2E^q-Z)$gc=UEz<1fxaBCtVis? zQ2!^qDpT;0DRFG|gWC^27C5kl#;;UguVseQ6*mdvUsT`Jm~xxSz7;v;WQIYT%=DZDqjb{w*~B~6YsK}KUQjz4k4u*>ng7|W zv?JYTPcm^wZLBN#eD?HNFI?Z0?sNI_wOC)<^xVXl`cq^HS6FRrY{m9YevLoLqKN7| zb?&?d@%Ej)xV_#9PHLPOKk&&g=T>H&p0C)Od!1O{;fliu*6<;a<+u9Pm9vT$lX3g* zy2lRf&PZH5zY;6bi*~p3i+W6_a}b+5Y^ptoDUCZQ^TOhs`sy+HwPTx&%jzpR{bq5l z9xJD$A4ne42ej0WtZsK>yPGm`dA1fy^W)MR*aUjyQ=JwT7vlEnrcV;^W(>(3KDt=n zlumq9i!)2psu#U)w0%O+h;-GObiDb-SX?~499x@P!cqFa^yn7)WV;)8JtoQABGJQ1Jp%G_m3I=s*h_Uo$4mY?v@yBF9c?a= z=s5}#e|btnF8X9w`j9sW!+Wfu$N<|$aoF@o9kXeVO(Nt2pU@P%u3!}9B8!15*QZek zf7VxWv#@wreG>C;MCYmrpJXS%?nea7SH!7|-Y7udlwi|!|C6*LI;Svk{m{bfOnlWh ze+!eIvA?sfc<=@|sy(U9YJAE}@Dz^B;(CcKjRVjzw^p0+H~z-=#wTxXNryNJv5s_| z$vA&@B_6u(QhepNe3SETt>4zTYgal-j{l(-<@DMZ5%dF27|QNHFJNOzK;^Qr$3m}| z@_f=o80ZbUVu!+cvMt1UEmPGC#{B`ws`rB10lz zCWMn_kPqP*2N;w?@1;+2yz-njNnhaR?<<*p|62-Ra6AwB6@MECR2DP9)9H%wT*x5S zXwi2aT{M@QV@c>QYyAzqnz;zcD+G|@?=vQmpRY%=E=}RF^qJ_{2qV=Z`f8;ErtX{L z2w#CFZ=YDA_>3Xy$awle$*9~UOT3g(K&J%lJQlC{xOn2g>){`-+uwy3E4Qzp< zry}zz(}xXAS4cukOPY#C;%U-lxn+^Z8oM#4Lx0M+%GdSn@3R5m_k2i{@d*HFOCbOP z06pR>FC#<`LgC~=2Hcy%GxYT!MsY|_Ixipyp-i7}OQ9fQM&g(+B9M-F@+J+>Rl4Cu z;q)lYRGMKTurm&K)}bKheFz5X4xft8L5<=w;h?xoh!TYz>LaRPjw=ehOYduNjp8`lq52ydF4-i@mdxo@j;-K8X-#It!GlLtbLLBV(`KXy;wJqROL6mAeia|NT1vUC5K%wbP7Svby6~r-DUp@@3VA4ihRJjt4jxO(vh2kjdvQCH#z=uihdW_c>V{GQEaGevb z!cXx)kvypbVWGHLrhIfJvNGV9PsGTo<++ljZVwWo$X7ZQPY`j8vdGJVEs7-*meJdl zFyD*qyVv9A|J}cgpZx#+R4mU-#D(+s#hc&pb`9t*#NYnje-nTGUw=BTZe=Dl<%eZX znmg=WvPoIyumpdyp%TjF&VBQQEM*uK4&Vy8k)imps7y&EP4%m&qJPR5LACsV5?M#) z9lhI+2{To2P5=p<&(2znYv<9la49zX2Ko3s0!1x)R6f`p2#F`ZBUN#Gj|BQ zYogi-n`xcQ?@>!PYk417HJI1pj(b;oPEQ>aLQ97ZD;0s#5Sm8MBx(36b zbkNBi?Qj|I&=)0zFYK8V)|fLmz@MGN&)v8apZ@446*lHC%^tY#oZ(FQYyPpgbN5zk ztgU&Hb#7%bW*Uvy)u5Mwn(3zT1e-bdQj)H>QJYbjC$o%TAwP~U&K=#*AEielKk8A> zOb(;-K!a?9U@jV3hE;VX7n6#kLdh5Z6iW89>qma7c<>7k{r(3(y^5Dg7vwAz%Y7xW zd57PO5N@X7$7yMI6jZXDzLz$_q7wc&@$J#8nlMTpaMKMY)Q=ic1Ql0o!$PC+TRgJy^pCq&&T_|{`ba*fAN>%cRu%-SXo|{OkrRFzbd+j zHl}`$-MTE!qDRtF%%hc_fb8IbJed&hcK1C&;?qiy6N=NQPf5XN5nfq?hbw`B4GU&W zA|cCi@vq%&2}U042_A%@h1puIk#K|wP0^EJ>#K3|*3G!Hx*EI9o!D#cN}f%| z^z^ha;|NieN$FV#;+|h7udH7Sko)n{pX;4PPxqtnuZU>f0awV;uVeZ;Y(-n}fd&kM zyn(RdH}D27gFTM>2Um86GpWPZ#)qnl(b1mib^83-Xm$2uXQv&X`<GwO)Y3;_i7?Hb0eNq}Sj1EG6K1~f<@hm&&y#bjD6b@j6Bt!p)zt{l65pkxa`3jy^ zJmGCx>zRRdj%(0Sa*zxZcFnWc&odK<2f}TCpG|yn6^}gp)jmhi0YmsOQOyD(<1RMU z`6%y+TGZKur+$`A6>b-LX93*(vS>|xm_KAejBy!vsDdvO0!&(hFMTgu#Ic@ifSKTw z1{UtXQ*_zc*@){muEoN_LM-3668oGIu*1=>`~GT;FWsh9FB4OZIQNz}#n-<3-SNPs z2gLK&;`7gbJ~{`Yzj&#&*NN-5ZbWUIFN{`<6ZD_p-gxmGW#|a6J;gihv;}W6_SJVc zw-4h-{>{(EpZa5eFy_u*jDy`Zp(Q;+xLF?&AJb7&24HdugR4Px!(?UBQv4uKuF+yc zq0`Zi>)#sy*hDG$;L|rmFBaZu^L#Z$eNum(Sg!K$Mh>`M&-n4BJN4y^t9Dpq8*ym6 zv@d8+nf(==>XJzn%S8B0$&gosfjfMW8<~>NxVB_+tQH(gcilhZN3bXr4zMsyFy zD+x$pPq}ocF**ktuLtFX^#hS=f;iL*|?*&vF(jA zXO`#V?Qec8{^)=6t?~MY&&5G=J8s{+61Q((j{VkV%um;2wjusgxj8CXtZLmZ`JDEl zmIw{p4wBbBc_bOZv^^8>oc7w|a?8d6=qo(-d-C_>4RV8tF6am@!poCoW5SVs7JRZD3b^TvOC1aw%V0%G znS_V>2?3^jz_%WaJ}%fyw0qGNdbsI87ZxAbWYhICyaoMz8iC}rIL<6XI@%;hF!MF{ zzT$Hdz;vUga*g;3obzYT$GhM26~f^te&Zt_70i}-WlZvx1pxD;;(8ODa9|S_`Zo*C zoNRCa?RQ&}%?xJLcX9oL5)ZhsHoQ(x+GTmcjIJR^U-x<8_QB_Ba^>WFrg{YlZ?vfk*=L=&FqarQU> z$_Z!QbMc=n>F0_8vLjT_vQ=NvH+ahcBOjE$lACG3pRmj?pI^C--;#OYe+>4Jsq~e;%|!ehH->R9lf0AV7sJ~j7PF}I$+y}1?l-FLt89E*0d1?2R6H+hmEVRzPZ|p+jm>WYeM*(KXWEdtM6rOhkW2F zmbJ}hboSNOTVfrhXa0;ir1^QSNvg%2+ncc~w1r|UFVDr8SQ^eE|` zY%-jep2!!V92wtibAsBD@@&S23WSWMPZgmw-Z+fAtKHa>ZbrS;CJ#Jcw6erCViU1? zdp+hvd-|-V2r)*VCOlB(!4bKPOhk9)!xI=_e1g*<)XJ2`%q6~IxX-*zTluKE^9msP zZ<<=NiKBAbWn)5hDnOj3<#3q^>yvzapR%6~ux<1Xg^NY4H*ykPUdyE{A%$SUkMs!| zPGo+{16&ZnWNdyFEv!>i&&=z1O+oo^G7anA@IG1ueMsI}Eqzw~rsw6v$DIC|i~k~? z#!~bneDUi31X{}gpVJuViRWf#VtHXPzUrI5S%p)-y}RKLrbd-^LJ`@#O@hh#jLISX z1bPb(Hghz$yYUOZ_^a{vfBM&i>rp`M8PTlX7>T*Xg?QIHo`^4b|5pmzX0+FCseLuQ zXvo;5Bi=<1F-{O%dD!7cCS@H2QUC}`rl#EFUw_22OmSrW5Gm89A7PSzfbsKCc;7%r z{0AoC%Hk?NJ6ORl`a(vOC22qIkae)!YQBD8OJ3Exae4(qTU;Al;+H%LF3*+g`qZ;u;n87NL41rwc(FFP z#VTU)9q<9_^vFgRPU;aFAA~sNR2u8Xc!?<9=%l`w&1pUbK<~X?@5b17{;{wBkN^!%0Pvtv4$_;{LfF6% z_OhS|+Ej#hH#6%B<3<;BHSxH|_A zh?v+=XTVNEfu}R@Vm-;|P}CveW@QPGlq3cuC?EJGg2L534zL-}fR_Q7c^t*Hr!uRo z-dRFGqF<6XV!q#FUBDu8e8on&k!H3CVxTADds? zIJz0YU1x!1t9L*o$E0;Rp$pivK#~k?A*y_otJ3YOO!zU#S0Xi1D8z)C8*FAFg(`Eg zB|RBH8(y7RMl3WTp?!#Iqc<^Z`r?`VkOr3!x%{hvCXdLv41_B$8{zC^@>f=tcMei& zXh-8PaVtDbD?*7c>%~O*kPhQxJ`B*tw8QwZy>qzi!We|4CfO7cw zBVgHYU=pj!klgK1mS>{7kb=*W#EK- z(4X}XXGA9Y7mu$@pDJRp$?lj=^k(wf?Ga8+g601(2ekvxW zIQD;0Rib>HAsq~D0oc4#?qc(oe6y^|g*^4=2@%N?_`m~abvAs3#-IQJ#uq}|SI=aB zsDs`rz_+nfwK-wBKwir8)7488jqylOCgu2F)s2( zX4@6reVvQm#^?wSQ$ZL9p8b=??ue_ra`AvcHWR%};*n?R$SZtd0C3bBE|r&)nCj98rDxSZ7ZX3!-qc0yDxSx5_zIi-ri*?vBJ~pLr=h^0Cjx z-L>^NtAR3##+fshqEX|5hLLEsw_|;ED_XsFOlyD%j(mB>J*K0gH(zERunk><@uLIR ztnpyMqtj`*51f=dLrEH&5HeOWCC??Rl~Tp4`VXb_CIO7_#hNEYi>gTyz>gfowr6Gg za;r~qyJi|cLq@nBz9%1iGMvdpr(*#=Ye;#@#Zt?j5TVV}x8qXB_4J>dQS`c^il@it z<8W+N;SAhn{XQ}_6XTPMF)^!s{H zUiejgwCWt*;2nO4W{EQNlTo8F>3A;XhLId^J<*p@MJZ z1hJ3Qkc%a_gqhwv;8r;DY6D-$PwyF00>Bqfr2)69A9bTJT>J;I%lp9MCu?5fxx&As z2bW{#WH5peu6WUBBIuOA;btyAJBp0O;Ep!CZ2U|OF;lhQueEUXp+9R>GweGxGJn=pG0?O`Ot_k4lT8^E@ z)4Hqp)0?F{p`eJ$XH@mT0!)MZlO<=YmoxE2oT}U`sxVmtFEcU0!V2$gzE}o7;ms)l zSl{=kG?tmtt-Jf3VDTKe*VO=m1+Px86Wu*dHHc_6cVc6!8GDjDn`@h1keF(4or&6@ zBCvpFxvxBNZ6I9rNc$+Khg3D;ctnMZJa(vP?r{UY6&%x~2A(+Gk@I#|+#9 z&%~_hsHOp~v~%}|$aqhfLh`&*mGbbOi_L)Go{dXmjx2)nT1ID zDVh~6n1x`z#zC)P@(vvbm%%W+!UP)xvwo#C93>CV3}%^mWU%Njw#91-5D%)J=sN%+ z*L}W%d^D%}AMi-tQTY{@6WBa<6vaG_5atH0JOn_$!h)#kG zR@@9@p znVEOpE}lLm5M58^w#D1~cW=ku?q)pr;JNtHcf2hge&~VNT)(OAv>Q`uFGuZGY;AAG z`o^03=qUlD-r-mBtkHj|40Hn`anvX4!h^n?3GLhQ!LNINbbCjV4;*_w9=m(eGlVk} zl`?0>=j{hP(=@#vyw+F5(Ag9fe=4v z$J)&+(#g6pJ-#2yGxd1qn;wmC_^S8D?|%1}#QyGj+`9ZiT$A0~T#s4R(bPme>Y_Ir zlqfv)0S)6ovUJlLn+DJ!&M96>UrQyO$b z(+2uM1ZXOY>#8QQ^&kAzZm2}+LwPtFw1)@CSy4sWs&0^Ge1tK`& z6!Qa?__<&J9NLKzgOmP^gW{kI(NFNq)dIeTK=8Qo#2ZOi^e3M3gAdrGz!#?Z^`zh` zE#m~_Vx1$#gd0aFa}WQaaN^kKJ8NsvZa3qRhwqE8``WLH&wT2W{_1f?a(8QE)2C{1 z|;~9baQi8vV9^Rzwd!~ z(}S;%wP&x!gyhLWeaijpmDRO)`pWfq@-v@{Pk#DS@v%>SO1k>9@$7Ri#OmFRXbJzl zX3h)jvGGg!@ReUz{5U%;om6z-Lo_n_qNRGD8;mM%|!q9Et8M zl#UJm0uk~@!W1r10Z2VDKRbD1)fd_1%Ss6-O z8OrR!Q1Hs7;qXiX1geEJ%l^Hv2o63_S#;N^G= zZ@HF7m7xq({TI2@w{*spq3+CAHYL0&Z)Hq7@+|N~>&)B8%8Dm}VYuW4&G09*g!3YQ z(ka|I5T7J{w97-L?YqPyf$PN{{0xlq#(gEL^uzD@Gh|M_pEN^s#hjqeYnmZ?r#X?| zJ@CiDKzqiyepX#sChMQ}!^dnp&1T$?9(ezy2P|XZe>S$J?xnV+cI4}uL_lvI(0G{z z6E)%w&MD$aLIPQjJ|MoPCA_Q49R6q z+S+bu>_9r^D^%Bdj5`_v>7NAYBYu>g#&M*lX)8m2<)y0NC`lvC#^tD$>3=Eps&HrzYc(ht5T9 zN;q*9m2l`DsDBVXyStnSMSV+lsxM=FvAro>LbRpdXY&9XI3IrGqWh*z<0=+ys@KL~HV;BEgf4CB1N_aGFXPdn?HD7eG9VyyihwnKueJIxqwjIVOlH+IZGO zuHVo{sGQ<;h04{tEMpOP+~W*zbZT%@KOjszud7Twug?cV(hKNstqaO4`&QMTiq`3k z#;K)+SYBL;ul$Cu50*PZ`uw=?VBwCdm$?27ZDL%!ri${y{pQAYTz=t3eEQQ*#b5oK z-yh<sE(VFZ&0DUYR=f!=AUc>dD2ig_}>;WtM*9*QB$ zzOVat{SMC2Q~spP7KjNZ-rl8xwu%y+Rtnex&{p{u|b^grfF14pjW-tdv1 z>TYzjW?Uq>JYE8QPEu=i_LK!UDlY;+VT>rr0!AJs0~|f8 z&>qy1mpeqIWP(9S#wJvG64c%VAc4#-GjJ$Js(Dwn;FG^AQk5zw65w4=m*nAZVx}!;rj~ZYsM^27or6C?FMdpuE%rbjbQJ z(pVh-WQUOT1K^5ghM_zda9Jo}gpbn=j5~agG7~XD2ECY!!KIPOrEI1R`0%Vjw5%Cl zb*EzMS@c$Yut?)cKS#kxX{PJm8=CW*T@wcmrDs5cta{@qvfnO^4%3 z@K!i^0?KIts=87gP>&vDLK7GY-jOfAM08%%ooNjm{Pkb)mm0cW)&9UkWg-_!<8-=N@FQ~RWn@=!$;U9T|KtAYHgSSb49X8r+Sp@I}cg zA2uNvl)D=)!UJ>RXj3^dO!(VbP;}ZO@|lh}snSD5hM=Y5_6t2!`uW zX}k*_=wf`gIr!=@zxEatgjd7sJeg3$>Rol_o!9Qq zltlR+sv{j0pL>(Sga(>Q%Ve14uILYC7#PE!DvA7a!b;LCc8@g3Rg9(v!_Pf`BQ9UP z6?663IJLYI=gzIf(%B^quD4=q`&L}N@l5o(J8|LSDNlwTw8f1hbq#dYfp52?&T+X@ zGotZPw066~h2!n>D;u{kAqX9z6m7|qe#li61mQtj@`SC*n=%Mm&{p(pKNV2FwwA@v ziLaa_!&H7`K~8jw#|T33=s8W&u80-~`58JYjrtn;%%iA@l@#EuiiRPQ}>liu{)&8gpu2qRrT3j41xd z#I!0>RzqJD{Q~`U2&4?Z_ZENn=+`#O5DdGAMCOP!<(PN8MC-`59LhJd8_a?OplV zzluj1!y&IhJYu9t_!%c%^9cGwfe(JFmyC;B@es#?T82`OgvlQ;{;6_EAu>pBx)Rrj z3|Zm{FOuXGR7r87vrLIpEWW8Ij z5N&I02%=3$4l(h=2_PQ$sBQ7pCihdLYtKwi#$%5^92f6D7n9R0BKG2Ee(LArFaK}f zFZy(%t-)i1t6*fIGyKUHI1(1}gP(=3?7nE=$tmS`WZ9mRdo}eDeC^uqw6ec&oxp3t zg(LfYoVDb)I=tnjsc6(E<6UohBL2~j{BWGU|B`5RD`N9@?5|&r*4kBxf!&y@DF@_$ zdJKtikw-F9@|VT`n(!PK4Zw#5O4^@K`an*q-n(6m$nX~aoMvV@B$}A+dZsL#G~uuJ zg(e%~kpA#3i<%4)*#+-~TD1-6$l?!QyD|}qjO1zo_?v!^J`q0kf-?(4sx0nDM`~~~ zf+y*euh&>zKWM@9rTFm$5?2uLHI^_`I4v>^eN%(R%y7B@u zzR;p=p#!M@;+g={Sp0YU#`RcgOvT&Z_T@3Ma5~yMyHvGOF@dN$kt{)c*oy9lqI(>P zzogS~k_DR~fC-zfb#Cozx_ErsoBJB=!V{<8q&V=1TnIAN*HwYpba?H61ha3*vluigurEZag}% zxqdC0yLYAOwPSvMBEIH*?}|V4UwvboJvA4dt+lv){l!?lc_nI++kCCZ{r-H}?ah=v zJxKIae}$gL^%3-QELK{dgKjh(Rfvy=6phfYyx0kcQ3%o3_uVR-Bae}peDUrLIf}ox z%U6NoS+$P?Cam|{dKRtBq12z~OS=`#yilz2ap%1vSNSmspKW18_0AW1x$Z65AbLSt zcm#c&IyOIuzgXajmRk{e^fS^d?7#x#dQgtp5@qFuGSZkL5zj>&Du+-yBw z_sB!>nB?F`KlbrhTiZ}g9z;#Jq5DtO>oGGs=aaG8TkGDCP?I0@Wo}1QQ})V3@R1q* z`dv746z{y+^DY~UB**q-@7%f_U-Zxe@s)3TQygut$BVZlcSQ@e;@GA_U~(C*1mND^ zjg8H?ar<^W^TKoS$tOP(pZm;H8jr1df#vj>v$3f5-k6&5#+3Edjp#PjFH7#R@wX@5 zW^+A@{yt?zbf?YwL^0Ljk@^NNqKKy6Gy$WDw(ye*u6j&aDay%;Ipm?5)f+e>1622t zUC^k%S=J0dpQkjkdP<&<_2w1T4Rdu|%i*~zQx0B{Y@t5IpU4PKR$-BY{tG{30b@Yp zCRi*sa^E<8V{QOYd4yuG+cv~;u9s6zUbImX8d3J);y;z6uD-d|-ZP(0Ei^RVxf%Db zEXT#iAB$ddLwT!R3Lc3)W>A0JV*{P)u*N17v8daa#~*Z#;se^N+L50nC)_7e1aLOZ zpt#?-77!N-9kYuBzocgGZ_3c=_ zvnKf6*xhT!mCIM+uHqu2I4$ekg$wSVb~kqXLK~eb%&D*AJ_P2CR)AGxU;$= z{YX?)`6nm#0Wi=xEnjNY!c;u{hspNZSBX(LM@!IcBey>6^; zi6)Y>%aT#l8oD=wSK8sbCogm}_xIU0*Genp2lJWwCAHyVr50Umnbe7yVp z?5kpMUzZ@$G-{dty&`Eehvzr?_&jU2X6l zjZ>%Zk3aoa|3Ujr{@I@smuH8Ohv!=4B1a*Zu?o5 z2efbas<_3H6Zn=t`VY#j^rCytK~R6>Auq+Ji_N?hA0<*AMQ0?vJoQW)pk7nQ-hSmisg6&%V!P+y$_lCr62mTMGSvQ)+P zn2&brH7@8x@hufhHLq*STKBE|Fs7z|ke|Y+;$fE$K>#-RHu3m*qE zr^m+)o10BOKx6~JH&#~wz(lHe6J1E56sp2$Bo~CPM}Ir{))=ipY}3gGMwF3fN@2T6rm@N#0eZKf9FLw zm8gFv9YECHz|*6C>}M}^SPCSa7s(Bi=qe9UKm^9)&qNms!GsF(*D_W4SlEI&VN&S9 z?YO9ut>>Q{T||Xymv~iHoW5t>PyO4cC|a3^T18IhhxK(Th?PKk{&g ztG`qjL=!1;+|82>M6gYJgsk2x{0$lqB_Gi}ckjuQI#8e|p(?z+i%&e}e94n2`bj~h zml99U1s=YhaIw)F4NP$CKVfYgJArOToc{s7jStR&T<@WUM@8;JI`Y7eDiq zfU_zni-o7&2T5fL@Pp)Qy(8Sq4&ZwlN2}8H1#*=gF!1X0ce-hKuKf8Sp6g0)jyHgn zNXumOGz9yd_*D`cXIMSDu(FxH)N2Wc%%+nbm;@;lqn#TK|55L*;i`@4iOd{d?@To5 zmC5Oxd{Uc5Tcqr6r_dUI1FZ6?(urP4ItAF^V)0Bj!x6~YX>jio^WiLlAn<+f|HN$6 zr_RW8LCF|oDW%Fz($WE>EHgy^IZ!6cLq2L@k_osr!ku|HAKfd#%n`0f8WfE(X`+E( z=YTuh+Hv#R^Ks?s3lTdz@#1Gc8o&P2KN8bY{FlZj;`Q%(YdrYSqwybp;S=$fzwcM$ zx%I=)kiaFIRYx>iYeY@TE|c~c=vG(f#K2-w?YLGG{ZOFQoS4_4{=E1vy75jkQ#4QD z(&iYrQ^uT3DMoZ~$D6$b1oan$l5yEqkQG@@MRlE^SfTjQak0?CE@}p?^f|!CA6|#u zt(>X76tWSXZ8l-M{g?;jB_H=&WP~xYa$R9~C_diq>@n&b6<;}3@D%FFOg-v{d&?SUpIi}yc@`X1?b_+AhQU)?T^{ddNt^o>n-%To@Cgp>B zQJ;JMN?f^eS@m@o%gcO?v>#Km^*FV#6x+L--dVn{LEYTKT=X=kUJVaJo?9P@@kl3)L$tfClGR?3hY6U@g+9sMQ!@|_2j(T~(6zM>f<6CHDM zN9~7pN0_2o$%Q@&ONu*L(2Mi4OGJk~o4`(zun~jNy zdGXAQ+vKPQ)U?;5eWe#_EOc6?(||Qb9AOcGi3>d+h-Ms}?7p8qhn}4UQgRlI@y3+% zX3>O}?F9|22jOrSM&*Lf)z5oyYJL^IYP;|uWp?|9-yA#nH%}p=4Z#g5uNVBLVQ0M=zw_kN@f#ocn0RU=W~OHRrHhX}MqaB;XWs$; zAd}(2ly$UQ)seEl)7{!?P@C@)a5%GpzK-~AWk{H60TJvSXAliW+n2CmWg+~=;u&;H_X#KQc7*dQ0( zMkkoik}EjeM~@6ZMtYFx1-Yyb1UoR00}QlS`0zdMlC>o5P+XK+0A?95!8 zzj!*%3#Z2)xi9|lzy8O9qk7qVCiXYhqPMpllcLGE`WkqFjT7)qIo5K7v4mjdz3a-; zuKX1YZpm3=3X@!NR|?W)hLFgrTkPN;fQ~FV!pQol}*Nquc|Y19}xj zyb7!d)dLoqgC{7%D=c;+_c>*N_oMRa^kW+`c{-Jae@ADI+_|5zAdV->tio8eA)HVSmTDS*jvl~48CmA>I|gz7@X zaXFK2*;iCQp`HJy;glLIQPCQm0;fZQ5UY*6Ql# zlqYoNIuCDZQ9jTMo@Fk<6c&9_eo0G}OE_a5j|)VrDe=+7l?lf2RxuVlc;Q%@gy8_S7_J>UmH2O@m$ZYSL#X+4wAng^aPz1C+n`Fvvmt-O?;ks zuHec*1FXj^Fiqu`i|}fra^W3xiIn4v6{HK0kAC1E>I=&l5_v%W9kl^fts;qM#KUZ) z^LS4%Kn3hbTVv8x9`HG`3?9G|gUxW%&tz>fR?eJ?Q>V|yePV7x{9%twr?sthkR^OjtrST9>rfS!U$OQOE?AI%ipW?#W=0pKD-jI zGNdbUY?Z$Md=J~tM2c-Zju(&s-L8@KMn%KW_a^}073 zGgjE%Tvr>~_bC83uic6lU%KPdhtSV1oIh`!5xqznwHHHoccuFviv*MLzSKeWpK7CQ z@Rf|;QCjX%X5;D1+*CYz{~7hQBkB`dv9`V$+dF$AyV`C|GDQ8PR2=C#$QADNpLQQ| z>(+Xx>5CThZ9U(Ge`rH@DiMDhRgW)~7YrPo`ywOtIXx>aaq?8>L42P+WBwP<;Z)Y}l{UltFNfp~wn%PUy$9bko zVTZ9SKKkM(ULS9H@B5^bs$RwWY;I;lC5CAaDf{ko)>iMtOV8hk7hilf{?cFh-q==@ zGm{Y)SEi#uzoz^k85I6pj%6iNPG1-p<=c-- z*@nZ?48tROiSG{$^*@{>=XWZ8vIo?bK~5LkWd`#=rQDTqGT0#p7DgOBy5vQtKvv(SO}DsI_So9{AuPLue0f!;w` ze?%AaIlhxtHu)rD%9{;c@UT`I>*nrOUD70h7bd1r^6KRvo8~8&r zW1r(4LuCF-1+jjqcS3QIvz(mBDVfaaa~dReY@B%==C;@bKw9-_qPxer`a>2AHRVY? zK%*QRd7aNL;{xa)SMntJA!%S)sXz0?D;R3qI;`R{ABQ12<>QwsgEk&Z%CdnU{eT{g zQyMv4hJJm0eK)S%xp`ykJHPE4Kcqw#lCcu$BA9R~j0r4T@q-KqR@OuXiFA#P3RJxb z0hmijz)<$!J^diYhy=1Cc^=G0{)6KT6ePTuL^l!jM}9d8f#R=3?2;K9_avovK*M13y3jSy?&{v$y|I2ADtrKlw}J@mIf~<+$7tB;u$NOmuoLUK!XE zt12#oV4yK!=Yc41E*v&tJ2K@$*qJ%xT1DKdnSHQ%D!AkhR7RJlE z=UJsUePJ-gz;zx>D6m@(+9s3p1#M1FtJ4)YC)n&H9^PFlJ6@ya`YEF|(~5D6or zyL5JW$Tly_Mn~GR1|9HW3RvWe(#g&10+ayy8353nvEmtWWeCh&({MkF+=szJhuYI< zU4sB|*7#I3IQD+wYgp3Ol^dWD3e6 zLRtfsBXwrIy`308&|qhKJ?^gEjMm1j_~l>x(fFw!`{`Jjor;GZeto>|&2J5kAp0Nw z{J)8x{?v6o8nda>mV`rNVp4-Il}_}S;K*mus6H2f( zfB%Rag_~S9q9Fra$xm~x!u4w0+!;ECe!-38vMHNnsTDd|(mH>IOIEu$6)e){FUp2o z^PGGnlUz(a`-^ZZ6D-bjQEIuexFG7)2mYwhH2Bc}I5B7i#xoW(bm=rGH?Yq^K zti9KZ8@JYCeS0ffyIb+#rE@Vg(};z|>6n}xiRYeuHa0gleed36V>(ur&$?cl8f13& zx}H$wh-%8^HX#|!SFIfXKQ22a7)Oy`&#q@=$9q9QSr*EIwcpaXbTbRgIjA)HxY z$B$PT*D8DrKV#q*+bSLqNh@pm3MOfo2sk>Nl*?%a%2|!NC`B>G9kEif#%J|B7vuG% zn3!6O@y47N2SRO<_8Fr!wVly8*keJn7d19Rbk!!&51Ay;NT0<{w^i{!ce{DwQ>|oV zk_p}^vmI362w#pj1ip{-;#gu~s+SQz0095=Nkl!8fstox<3Oy60B3|D`@c>7^PtJh+M+c-~0snc+fd~URH6OkWHp-sYAkd~ zF7EB_#hI1!@#LqTjDPln|0<^V(q23_KeLeijrs}h7KX<-0+(Zsn_QY{SYw`OC-DB9}|9Em)m`SU zvT`aOdgJS2>eQ0TrWR|NI2s+v1~yJ@Y_{SjfBHXq@$Nt{`TEI=lhm!+J+$PAICOX) zKaAW)KEQvn>f4fBxe? zsj{l5~)n zn3WX&gMRg{p`J#piFwpWK)CFL`3~iAiYBMb0DPJ#Iv%szx@EIFRIP%-& zRXux>RO32t8W0ZD2RZ`%9sI`;wL3d|WDt#dT_9B!r9p0UY$|bxPkmsST(39a$xiI< zY?XAXjunL+Xf4Jsi<$y};!kw@zTaK>W13w{1LkyGL>5%4g&E|A9X(?UGZWCSrSY zLt#C)GZqe+tZcHdD2$lEA7vv0vO_$Je!*lUn<$|fd_dbE+j9MeH?4^dKCKPf2_M=J zrv-Te6+V>Bb_%cMGm|{D2H25)3};+2gSw-iWbvE6kNhYrUq(%+?_{I6PjP~`1P6YE z4wk2C?+0v#V9Y+s=054#BPzMtJNMu><`!dZyAz-M)YI`p|K?}a^G3`su2|o#iHBH_ zgf<6l@!S4hY_8so`9>}7yRa1B^mShu?|SQ-qz^P>{pRIZU%eh9osQ(J=&o|j$mPiY z1L+6wJoVb#+cm!M8FEXSsY2-IxlXAgzSxt@f*980x4?`#G!F&lRyvRn>XAvEj`bOr! zN2%ioCh-M?oY-&+t;mV`7b3z3j7h6-dGVzj`Q(>ADls64D?^I}r{oLxLC=;9`j!0j zGx8UCEPTSw@$tnyo0@q&L*p)slnK3dmsmf5!ifh#+tHN`fEWAK()tvS3n`--=y;eN< zz@=!eZz`T>-$2%>4;P+n+HqefdL?yLPSFb(Z&=8M&<+YFmo1EvS|u@Ss?hRW%vtM^dWy1yQnW@FEYV>j>Bm8;w=@LuVR5? zT&Pdr>zxxV&@qq^J{d>wSa?JiQP!dX4J2KzMnH~HmWf)zudzs1KZx_o)A5)8(s!uo zfVbp|+BQ1K7_!p%y8tdvf72gzYCi?Z<`0o^D~HMxNZZQW?Zk$-m=aJ7ku%I7D%*eZ zOYtrC)y#>wjy}lSd|C`&!_#37TgS(gLR1AJ4A+gbiU%z8a{0qKmFJMvNaWgj5%Xee}eHuLv`FIni z+z~{uqnfck+TD=q9 zt(|!GsZYnCnPNjiH`>{B;Abm=Dg8CTR zAo}&z_O|CA=)=$_Iq{G@IqeDdSZ{7?d;u?w#O%yeJbd9y)THcXu`ZQF=~vTAqnmnC6NZ z$r7lPu=`=b5RIxbMtzG*s56 zbjZCut}kLvLSq8yUo6CrGmcQ*u4}xpCwX52TN<(8Y#Sg%}obv1VtuuJYqWs91(t?vsKS zB)Sq3M-sx3>0g|VccRFEioaydS2ao6xE|c@;dS1{m!g;ZKo!YdlTx_(l3xCdof9YJ zCZLSBc(*oI_3Om7Ug5ur_Z3EV)-r$%UFf$2$GTdt9d+@@{K9m6`8(bgkG$ngf~9f8 z#vP^7IHl9}B4J}@CMKj;-xdGuZMEaMXP=L6{~!NOY_Lg1`CeQYjnnf}($zH9P#GuG zzcUW{ec$?f<1O!de;jS!5zcICQ~ii%=h6laB%w!4s zM&-;OVE6^XRMC>Ir5>HiVY04p8PUk^4kSs!7?#iFIp2#-160$$vc(LiJ#J3GDbFyb zUF!09e(pEqy5DgcaUG9N180U~po8;AQ{y8I_`++t(rNv{wu`7S z74k*`g!K|%&8=r|+LtV8X$;tr5T`HjM+o#6f_*4C>ucyVMkiesTeMLco>NbyTv=b1 z2b7I!d%vcH>sOc>4w?0DQ3806-&4>kh|)bl_L40S_72s$@A}7I03+qTnuo5vs(72<%0A z;xm9D4h4g9qD|QVU`X;S#Pp&L(MT@^h{-D^9&(X35R zG;$%87^~!DUpdChFEA7A$2)NdD+1=;CX-g_ZAyB=a1&NOLwx*&N@aOw;eTX8u9Z%8 zZh9ZHVwLo?Or=or&yG)tFqM;hlQcI&V+b=LXlzNNGzFLPUQCtVOp@>ZEdWX5baE=X z8jtjnNx$-BB?X@A=O=Ps1;!;5n$j#B-?rRKH28 z=^3naOc|mFEdz%lo8worOukoS2R}_lVceDs8>6irngf6ikjNfK4&f zKOxSWcLgvESK>UVBFamR?-D`g~l!{zB~Cej$G4 z-~U+Lx&C6DU6_kEy!lOW@u4@w$3OLa{K^0KFUG3+Ii_=`l<(BYcuY5Pf^%H(y=Wtn zN&JhfAjAvM3BHe$ ztm;z96es247+-RVJ}fx%wLhB=#%uCtf{g|0@l@W;8^XuDpiVOHt7VauoaHCE<#>Y2 zaf0qd6*u`*JkE6_MoT{m_x-p9F7PH4&NFgL?>>SFdeHo;wl$`@aoLHc9AJ^mQ!aUc zi=LTKCk;6G4qoDdyWV|G4dn%Y+Pd;%(F|Jq9yi5>7l_Ly2=4l0kcZc{kLJ4@+nbv) zH8mdFJ6mz)+$jw}r=1sfm37sL&&|$=U%J6@a}c@PV;3a#BBe$=$wCJMCkBYPzK>ZM zj4++9vU8maxN*c3eH?|#KA0zPlWSt8$!B;6LddmAnxQ}q$5jN`;dhvjcX%fk8#u0v z{!<(wye1U|`Y^f41bE)(KQ=xWBef~Hc_baXI}f@&x!trHjP330`Oa7tht@aOV|!~m zHZ+*NefMsx-n|oh9gY}f5PleI>l<-Xv?Y9FP44QNziQ>!I>I>?3EDDAteSRvXOj^M z8#1CQg2TNCCB-i{zp-$UGB(>yE(+65&=UYe{Zjv;m{~3j0C5BxKjeYs(}2Ed3O%d% zRs9U!OFT@8CtKiE&&a8Ml4GVyZ#hWGiLP|CLyU1U=7bDj7~aXe(@k$f$~eGA{_zvjGxk!0mPvECECm5{+}|#biw7l7-w)2x#NanSxCc-g+5?M9v@I6R zIl2=DYKV8#y~Ka|kG~O5KK*o@Kl_kck0iLt#sV`(Kel{_r1)@>FCEp6C)6$~eR6tw z%CdmL3-^QW>^A*HEBE?);a~YNzi=RVz!##V%|0h_yOv!lGk~^ypNPrV{0}$mf;`$=%?eyf9|)P2Pc7`>+4Z@jC*4m^nl+equVWnlswCR z52UkQGa>7~ls<Ei)$7*GLL$kpP!B8<>h$j;<@-2KlTI4V>(85o{!jB)qu4d z4VA^|0IhcDF@zp%k{}{IZ2&$&PgJP92;#1MCe0|Hy^%GPg)sd@%L{a>pW2k_ zyxtI>)xG(fi8p_3uTrB^@dZEp!on7O!h{_Qx-3L_BY@Jf_ywO$&&;W9bfU@00E`U< zZ)$cfc6N4LE@?(;lLw{`Uj{(+9{q*zdEnuPVsT+!{c$sH-?|e`CSEyWUviVYYm;^7 z3oW4^?a+&X!i~kjNzns%U;ZVJ$JvYbrH(5vACayyuwV|IQ%4x}WF?cuJ+4e(K=0#T zm8bMS>j2_oHm&UInQJXZ)lT*W1L*`#QiK#HUJx!m{$271o!lc_zJ|b$%>g57>-5o; zAB&Iva$D)d<)Q-O<-E}0W!YM;Wn zDL&X~Nfs!-n>W|v?|k1si1lsm&>f4JS?L24lhSXeV^Zn%_I9GvR@)N&Y9i!Am(Iu6 zef9g}p$9KYO!Q)7{f2^eJh@+&?uA~dys)7GFDx7<_@)+&d7Dshx;# z;1iBurrf)Ga@BT#+hS!yd9-N^%GYfYQ6n7LP{8RA$Sy<^xQPN z3YZ-~mA68Gd%C^+_T+vP|Y!4EZEY6Q*U3=*N5o zvV|jrb1Ws!6VFdhPKhp}jc9(~eGkOzENujgW@MP@Dy6m=)7q|;q;-9 z%=2G~t!ZRSfDCh1#U;I~49m0dV-AVyeY{xfLaSwpzOGx6I^MuQ=Nx;eUZB77DjNvM zN{}4in*xo6+N1jo`Wu4~Y&KG$KcN3ArQksi#*pJ|4pF?E_d>2=GEejqPGKQG+Xp@Bg5L{dVl{?n))?N% zoHRrFHNnZl#U%bv&6I<_RY>2w4CXL}zX0R(q>+n|L}nXz%0h$^UYY(Q{*|qYV17rY z69+#kiSjuQtV$$D;@A2X(U8FUi=70MOfZm+Vmr-5wLyLgIQ!5i8!as_xQ(j0NoGk7pdU*2 z;_iR+e8vd1TFOM)I@;vM4h!JujgzsoFck|L7c^?)8XL5HoeF)q<%DFwj`}|O->vO^ z$9w3)iZ@bmJq($8y+))OAL+()eKMvRld-GuK3A22f0KRDqQSEAX-I}NG*)2~*T(u@ zOg2PQ(O^n)yFSi5yv9pPv$?CjU3@hy`94?YgcZ)$Y{8?tImFN zw!m%Np9v!8lvL8iP@@c#_=2JG$E=|Yc?4-)xjBVkt-DKmJ@@k=%vCuyrOwnv? zac!1lv~&_Sug%Yl#qycCxPIe?`iXtX-I`!`#RHx|6CHBw#i>F;#BXj zq+wFi^6*!%dM3;t{0J!Xgz%}zX5PXwKdBP?xxmuD(_?t1+~ssq!s+{IE7aAf=$V_f z(fcPoR^>}z3bCg#@ATAEy!(A$CcWYz7)tfs7Vk>FsO-?3YkqFuzNT=1* zKi3<{M>I^|$_&2&pt5+|DdCx~t0RB>Zar&=zlc8!abT+W1PZS~oP2iOs=mEBSRSr> zxs_%leL(djxke$G%P>A}N74n=5AxMN;~~5mn-aA@XMsd=TH&g8gKUKC@{z0*Z0AS* z3ZqX|T_6|fli7^NoC9(Mos)So&;Nj)fHTjZr&0ogkums#7kaA3Brd4l1zW%R(->Kv zxETl*$oY(iPs(5yz_9$$u-yzNUp+d0{*n)6WxT_fR(Lb-t?{w@X2!d+j5*rszt&eb zyM?e|Qn3tfELdCx?wFWdDfdwL4j@9jK*B&{UrLK$(esc@q42+zBNkNPCVWLO zo+u6jGPi5R%Sxv#bbo}S@KHXwa1A?fW%4UkB>*=_0dmxGv4mx<%Mi$G(iC6QyD}F1 z{ddK2Ixz^Hh8ryPf(xyr^{}W$)~W+5-eU}=$%PJEE{j^ClXMNa48)YgG=Y|)De(1f zn(N(Jo8GdZ?$-M_&YOl*#qj_}gf%$% zF>rzB3NDzTEL|7&uEIo(+$|?d81HBy-UvYn^oI+60tEPh!5>@FR(OFU!Nw=?bP;)V zwB%#MtkOCEf`joXc^)_El`zvJe`k8?9v&c4m2ZN|EKZtIUhrX|&T*>rq5(X}WAZP6 z9Kp|-Mln@ZjNknvym=?Oo{GRX85|+gc(N1^T-U2YiVMOOeZawkDaA9sz_7fath^|W zi<)JJ_mBW^1pswGiofdT;elj_WXs_ZM}ya5qP7?lQ)d-AM}~5tork+|WpzwU$KBCPe9~TzB=9}) z<$_B_9d+cY{aW@Nd7_8K5*CCRl({S_XKuamCbcYNo0h&lTlyK3!i5 zQ#hNkSRkL6tQP(ys(o?@ETrtH+p!7B8pZL(4$}*NXd@a5S7f>ylPuY|?!i-O(<(iS z|As4?OTo7bhzU^%InI5gZtLujW^sdoZPpic$?>Sjb^3xVBkAO?v|b#NSCOIcDY(Ob zl!MyDrH|lCPw44ge#*z2EQA;E^1f?$gYs2U$?~|S!LtS`iq<|slWh0(@{+T#;{e$QGXPnffH|Y6hqJ zl-`BI;Za+0#dj28zAhWnU~xo>q!*CHL`TBg`heQA>yCCngQtDTBX_G)M!k#vmJi4w zNTITt2FL^V$ihJM_b2FDFLQ$I~*lkZ;3oJdDG zN*M|*2`YwsEZgMv>te_brFWm8GI_!qF0s&&7g1?{JXCs&k0n!EWpZYD@l-tZ$tV4l zaBZ52yYb))$Nj^uzpS30o7FQq7_up1YN{R!bBkVpW3hE>Ys=Sy@D*pR-tgo{PX4Ga zl!o?FQ-^?l#^wSh`VN#or-Z=k;KSYBAj$?U_mM)CyTUpe=+J+0#enbpC7McyY^KDn zW0i^ag&w!Mx*k`qUN($VYCrSy^U8z!LZ@8qEEK5aC_FbR_}F3T%w5`q8utNTfD5eU zKM&ALa3xifeqbvXS?@c=*+fwZU|@5MC|_68E_rU$CSq}UJ}y1*K#WgMg~Uu8c3Uxi z*beUB=PD5H=wt)I&;8u5#)m)hjQT#|C|SsEZXqD}%1eD?o+gxiU|ww^VhBHL`iZ7+ zsyPjVFK?n#9@N8rPkpV> z5lpo4;=hXy6+{R6CDDd_S@3c83=*-D=OU=`?CPC1E~`9!EHm$l44#%Xl3}!Y#x7W7 zlE)%sB|~ob?ZZcesjlHA{5Uy{?tYAoBK}h9(36f#x_YpyB$R(lG~}x=-w!PrE)76& z_oLf6id`ms`O;hYh+5HVA4QLQyOoE(mXa)G-j1G<1yI)K@ncgP^x%$uW(tW5zf3BR zxv4qf(ex4durY&N7C_69*3=_ku9#hv#W!qt0Zv02V|r$dY(oK>gl*ZJz1BmL3QXwze(TnP}OdrNd5 zm!*P0RBb?IVTY0XE42~IhH^q39BQ1*(YP#3``&izzv_dTKo-AotbJ{Q`{-#~3Rix# zZ**hw@zr(8<)Cw@`k>!ad7MA(fq1Gnp+QNR{C2*=%U7CEZt<mIkGBdK20I7i3p z;*%NmUL*$%U?FD27(g` zk+Xa$k1S{lHX9%GqH-SkIN&|S<-XrN@y@Nyow$7MW;}QGdOUaeRy_C8YJBdwTk-S@ zH{-LH@5t{?ym;k~c;$9nxqdfp-dT&)tsRx86g35~86W8ZPZ$k<`w4=edkw89#_heookO zl{YeiKE#_xc~d?nDF1>{;yQ1Tficvs^0WzLsqzsV0@UL5%hFM%>8Nhd!M*pe0XK;PTcHv+@Ek|OI>*)L+A(UYU^wSW77>~@a8VYfPy$DdGYw; zkHj2t*Z{(b5y)8aR8BlX2jXTs@n$R4A@Tr=AHpu%nBUyZmU5+mwyHsNeA*FZ5@;s& zNaL}Z^m=6>ILSNIAu^3>P(re@qIC1i#u{ zr@JTMB*rFuUs(!zr-mpF~iTV zWdL+9`-Pf&rNw?>W=xIn&l&Ru_EWRV@$Y~BKTFSEi+6tM zyJBK#UNUlB>_H!V;0=Z>CNt(|t_MAkYr53&DbAt#&w8xgx*nhX_%j+0@5JeI7d`%; zlx*S}0CWZJt6y8)L5-ALIEvZ1sW`Q~;5N+0s!p5Z&D$|1MYSj8W3SVUU5zQm)sClT zr^J4eb7G>qYh2}W;IFR7#}4Ab`_D@z*44K+qs7Kf^=rt(*~UWLP=4(E-Dz=Uz)U=P z|B7@rbgf-)3OMMDdILmLy4{)6%W+ydG;6pIV9UVy)| zwrV|ujkpN7dB!0sC8rhL-Pntry`yMMkHxgq_W9YG#tU`nYWr?t?WX#0>50fhbWU{D zjV}Z@nzd-G)K6fprk+BN)?wb zBHdkCdP$-(v~4$|s+pv|FWw_eeUBj9pLlo*vUs5K6Ku)rYZBcFcV>*J?Ew zm#Q7Gp#r#`H(Q>IFMi#_@xWW3;2W|y>`3m_eUDBymBUk`Mj9x@Y4{ zF)%KNJ4NC^C_cm@02xq9j~RX*GROy!0CBib3W(uc{oBLFqyB-pS=jxr`%*Jb51Ak@*hjXWi1}Carx2 z*0i&y-F!;gBm-kg_^_$s*b+{g#;;iFxP-l$VeXSSiTj`oVm8uK7K5wu;+^Ql9>arZ zgTQ{>R<>`5N*Lgje~!P7=U?ERG=^2dO_R_Q4tQ7ajPC%>EAxWmrobpW^KAbsOE(PKgSk)$f)rRe;PZOMXGwV zEHpg$KJlh5*?8j34yIkHL+A?)GC5=2qKg5|22lN%s+|LeM94G2HE$>#VOYi+j1!nU zTwC3YTQ}F@`IoN8+HUK{*mryzCjhVkfPqmTvS8(VsZa}TyFzkEKpSBMIC#j-1(PaG zMM%O>CDS{@6Q2AWg@DYfzy=l_hX?(*O2=K<9_VQFM?UT-)FF8C5IFo8zM30@RVoF- zgQuBQl_SO@n(+j0J)2N^r}Kn)OhhWE5}uyGap%myL;hlfG63>~9Tim(+Y>y%sNP{7 zPt5U51*vlIx4~tx#Wf0&xWbb#U=N$Bfq;*)%e2Cu0%Y=x--lh9egm}t!~-0vM`%zf z;G(`9*~~p@3V2y}W=w!67D~{-Pp<1ABjS&r)4M{9txe`dgz}y?hZvYDCb0AfO-~x9 zhYnSodm8dFt$0o^47e+yp9cdRjN%)AKMoAh$q$F;Dqj@jzHyuiR-tbN1Zp`=-%$Qc zn{-gg_3C3_OXpODJKsLMG=2INqo~8LC&P!I;fWO5yrqA^#egt)^JrH;8LfKGfP_iO z1j`Zf7i0RmUgQ!_=#$Jb@R#v~B6#pDzmx&MmZ{TeN74=trD*>Dc>51H+q3Jg6J3?gIdx9u zTQ}d%>TZQrbW)ZrTTaNrgBdJ8g9D()2Y5UfjIF`X@58{nA=xm@j173kU_dgqY)iHz zBx5UC!HTWaQYZE8+c)1^Ih|bT{l35T|DUQ`-7RTg-rH6Gv-f|;wbx#It+nHSta>b} zwNuJ}TJdVepZgUs(k{vn{CStF`h&|N*P`xj3;qiK?338)d&Z@dz}X)t zZ?4A5`huAAjo7^Y)%c^|`L*cvTXD~G?~K=e-=B+Sb1(k#U;AhA(XVx4wx~~SxDw+G z=A>PFcdY9K@4`q3O8G?zW+A*H);FesMV$qc%7_OcwMr#wV|kJiqnP?p9>O!2q7TCY z4L@L)ebVpHQ~?Na1p||*Bcq(ok*&6IIVu=lOw+OUM4nz~2bbW21t9)-RT^Jq>%ki~ zG^@5zW}`JG%LKa~Ce6xItSt7Ex9h3Llb`T@d6K

nOf=APZi@sBoWFQC)qqAAi#T zbwRKf{Y7wPzo9J!A9Z#6B?_o}qC!99gB&o9_B_b;V_yWn>aA+!;yf6WTV`akn)GUM(OPWm(Q?gNGk}EFORCvAA^YYOJqp7+-w9nbQa;eJt>?fHleqFyV<4 zQS=E;`O%*}M_LjCPZEjI#3CF7;w!6=8{k#SEV9#^m5i(+O_>Fb5})?R(*cDsk$!XL z7CA?Lh4U8kjGng|;D9cUnB;G&BW@=6{8#2!t;~Jc35F_QYRv8z4?Z?K}hIl%CVBd?CeCxl}Zh7+$lWFiiP6E}(8{*?t>!jP9 z997vMlhcXdaaZln$!GIHrSYu-f|0W5mleeu6I@hjXJo$my@LWQ{HSI^Db<~br|>++%fbGM(26SK$SufOHT<2yL%*uNAzo7ZBmy%odT-58}Fj5ow1p+o~FN6iRpHHwdajf?ACoe4fvrR4Qq-UU)a=Q$5pw81X zGu{wJU*_}>P0n{<*?>SeJsdg0z?w}ZOd{O-ocn!c?xPPs5}Td;Bn69}eCHb8kdwRl z@V+aV%qp#`y8St+L}Lz*2nO+5Hmp>t z+zg}!W}G|Ph#i#|O1G!DyLv|JQ+lsS5v$3n3m%0D`jIL(C)Cya5EuFqiwNksqtbiX zh{ug0Y%FR>hZB$Ca|V)sY%Um+n>>dr^{DWTUBSd=5BkNB!bLXn6YsKKAf6!{`%yTX zfV??Q<+1^!5AAtzx1szSvFw@7&~UOiWF=J=u5y4R1*1x0@R(b0p3k zKN+uo^|!~XU-=!veOr3LlJL@r?$%bc*Z`n*Wiyo*sYTPZbuq3B1|>WyOu4Hp5eM%*g;CE|XE&Cc-B?^}EB#hnUtEnVH&JM_Mm*=SH5XQu(J7+O;rkT9O~n_2A2sBP}@N3@Du&f(dqNdQbW`{YGD{LDAW-_d1~Z((~LMEP5lQ9Sb|+=S~pQgU^TX`4KkpaXOPW2N&=fsxB^IKcC75Ht`1QhsHDg z1RV$5g6A~g*<)b26Tiv_eqx!7Zc$813N9$bl2WkIZbT;z5BLXdrV0HSF1P~t1S-J- z+&RC6TxG+7G7zlRZ&U_#qU@A;1w8IUvv4N6N-zsij7h5I9Z&cL7|YrwBSQIZ|ruJn0sAKaD>v2f|h=*VuaEyv0E`Djdz#p=p> z+?32>p`F_WdJ;z5F2iX@79ZekYG=V!;^TWbZc~@jm@^k zC_TX@Su6eU__5hIb80r4TbqJ)L}T^USXpVs^mN@<5x09g(drF}9}LI*l9P*)t?S+WsEY55k4euH9McnZZ#<*FwFD2h{vd-8r`$TT+1m96!I=i*f4=W8 zKjEpRB-{YN2I|{S&c(^8O02GLc!R)}(r_n2zb|g*|RIDbT)!v@Ol$CBvRAnUFuoh_v;T>6!8=N!W%ui`YB)jCthFoZu+bI~kAgg%T%zJ8n zF5d9HuaCKtC#86GqqV*sO^r*|MgNzsN@u%xGk*W~-yOg7YwuO*`*BBOBs{HKH8q+7bab*JFC5i>UQv8lukHwGRwnsr%q!}J*9u985B{}^T3n=;Jj=H zjBJ?iAiOk(rO12gtLUBR{>&+`5eZ$GjCRCtfViqLX-SKWmEiX9Pqay|r&A}F zo8v&zxL;617%uVk>xwY1L*3-|cn*JXBtstH%?5lBUocTeIUE6A$V?131tJ&ud`Y|6 z@}apWFJ191d)@y2^^v#!rE&uRxdEua){_7MTu~6ff~2Ah-u=*6j+){A!!F$XJH8sD zM3{+Ie@47Jklw+y!aev=S}XX)I~7KO-~NWb$msU?)PeaabrZiGUlL7KEE9nW#)p{**M| ziA%;J2(>9?QfC?;ob3nSn#6#M0tQfG2QVrxJ0aDK=?2!HGFA)_r?e1C5uR;V8uFkv zJcLmnTl9Pu3+noL<$&EVPa4eT+zYXMz@`#S`AN2i|T2@{pU(KskEA zf#VF|C}3hB;{kjDi|Hbb7mLB6kx`^8<0+l}9r%uL0joTVWoU%(s;{BNVkXoGEJF}le$e& zJCmkxOJ!8wAN7PX*J#2!cK5_%#^$10KQ3NzOcBLzl#%lWelc!0iG2BmxepM7Zw6$X z!tHo^p-=imG4T-w5+;0CcAo``c2nKA7faW##eaU+zlq;_$J?SlF&r;?(M#h;-uzeM zkN)$g;zxh>*JG(mZuCKAzRy)x`x;Cz@vFv8aa3lGJYad;@$7Wo0Y)5TOlaQLKc9Nk^Op3;_8n9br;&$AB(nBljWuDxV&&v1L?`Q?ZmM-c5E(g+`JMe zPaco;^)(Hqm*VufvoWT=+>~%{Ho2xqgIm!8^u0ICH;gry)!>9{rW+HS02T~_odL2> z8A`61Z_{@%vY@e)%d(9K*#`WO+wqA^1ihB;ZXi9Darmg+L?gcTU+&#q*+VR5=o#W$ z#5BIn#ccUA<=5{nU+OfIOjICx<1GJUk z`$lXQ;3~H-edVEe@JnBcZgVT9W@h5{JMW5f=WdUOA9*|;d+dqe+pRNm>gUxlpU`CT zn^o`6BKN8)Trx4$0EO6)tQ{eKJ2mJHkCJOG1I(W*qxF0@h zfBz+&0>?LJ4&}*H$d92R7MS&p{JI6Mv@P50#1rE{0t%mmCi|Kr7N&?dFlB5Ig*(f4 zK29s=q3DO*Hl}AhkEAcC)UcOk5LbqJQNOYP(>~!$M$5S`{-f8_LD4-6y4O34FlD`x zZ|6zcsfo#$o|uc9OY8B~FMS~wF`WYtId)X-qUamZvOnZ}0SCt0lRjutvS%ysg{ook@K*18{_w-! z647s)_m7U}<_h}Z=6W-FTiy8gzwxfPveYU1F@00@P#yg(Zo!umH1v6vm(cr>?S%v4&`Pu4RVVG8)%RLGjzmh zXxh!RtT=W$nFh4taZJEsBMGbHwPNh0_7Ps1TOB>B-^iFx%JLZ%2Am^f)o5$LxxU^K z&Gutybt6_Z;AFA?E014`N56J89=~uiE?ioV3s=^Kd(q44b}Vh~#?qSP-TI!~d$GE; zAFCUEy(^EkPF%XN6_>6r$K`7`;?lLHSl-<7rc*yuFd}s7=hqu4o*;N`p8Gt0}GQN@GTSW->-p=BV-< zZOr-^6Wz`Cu#rQ!f5}Up7k}nYze(Lf z^t!$h?JYJ;Y{bU;s&d_qW3zLjk>o*aZnBP`enY>e&(evv_t<18okC&TY|2oVV$sCk z?NvM$oSB5@bpG=CMqIhM7!N&uDIUGJ88<|qO|k!ubU9jW=YPM19OW%KT4l zCiq!MA%8a1!e6Nl~9FZHi9|X694)Yz+Cc`RJA3{_=R|`+h%K z(qAXWr=%;h@R2f%lYc(xqr#<08!kcMGWf;7qme(v$IpCEIPZymhSk_xn``P{6LD;2 z);EjX+*lP23wtVKZuYqFT#I8T=i}J?Nj>x4+_ov*du@GLSl=@RpFA-auY1)iV{2nA zy4{wb5q}krX-L^xsC7`HcLp~{eW=4edL*5YscwZ9@+i{Q~%Smo}C7IyZx3;C=V!?^~w3YjN?96gkwxM*0&X2+HOzc;O3zpV3+U|dAJ_gW{yJ? ze$f37*RhC4d*+kwl%2FMeG~iEhttiZ8$9^oa{7D@j3A=E&KUUhjHvxchM)iWQ{E-M zoh}WucaU$UN#1nW{~J6i6TCG7rCJYSsHt zn-w^qZFsmLi#ZYcB^#8`X)0WAUzv?x{a=13rjC!sOJDh_s4CuAeN;M#^n3A0y1mOl z=M!x31KJw>r#3B}LH)n8wH7xnU5=+NUWt?HcPCHI#>(PKw4|$X^9?e*+ufCWUouZ} zjjQ-&kmt}k0>absaaph%Z#?Zo_CJ?_2Z ztm@P8LO2_QuP*JxXQMSV<|mUI7miLDPN4G2^7WH(RR&`a;tPJtmYVHIs*QRex;Wu@ zVgw@!7`%m7F?;rfXVhG)`v!-$8X6$uoBS20)j0$*O1pWf1sJQwk{nEH9`40S( z$~t~J^9QSN*pzNpUTK9-;gt0Uw*n#{bjq1 zqt6iojk;vVa2n!bw>Ohe&D#I2M%J0k#f@{Lj&e8d{}FbO>&Z}yNp-iAyth* z(WRkIL3p_5p*;bV;+i6)Agk*`!k<19LULaYmLR==Z1;# z7Mx!$)rW&QE>S_l%ze2HOi^$<^P*owyj`XfKDPm9Mf z7qufmIK;NT(TpqC7UIg4g;-eKQu@*BBR}_(Klqlt{k^kgW+l?o6b&HgO0SacjVz$X z1Gks~4FU4}M;KL7xZjNwy(@YuC>Y|A943IB)x)+woxSjPdP^M$5yFuIC!4%V;6PMt5##CTPh|-s%9pmRmtZaR5;`vQ7jhKEPSNJF3bzp?VNT-sDkck|xs? zPmy04#e*mEEDXF|+l9n?FlOP=R z<6XeGOad^4%?S*-eY$}7WjL1H#yJ%s+)shXfhptD?s7X5l4e>Ac^j_M5RY_Zm4y~@ zp;1|wvK;bJV%``yQihqdOr8+W1!%G}KM;>_O1Vu#&RdpI(C#AH{D^6YEN~5yPWS!30+T}d$C5A3R3-x2@d-+q*WVp3_R zt2GS(5i&zj)x)9#3&>3R!-wd#2))YKP}KBPE1cdNj`7-9RHP8|2@fXPAT)TSuU9nO z;7LG2lF%i%5)Y%Ed|H5dxv8i>p2KViQd@X3NjQ|hPueSp33cXtEK3zezR5Fqk`E0@ zNd_k<>+s5Q;*oId(jZLX`Tk^r{Grv%Lum!(E*qHWAB4-(`2b$QzkE%Y99d6!$epwX zU#A4sLjznEfsh}gY%Ftn)j@eC{FIqoEaghVnRTLW#3wNGW`fSASOhu??f7}~gW_@( zU1pH7VMhaC%rG)XbmL7E%CIUNj%m=)YDHYWwj7JsZp6vs8n~Z678jm+GVZU__dgtoYY{rNp)17>Pw$dc}(n~obF~EEFRR6w@{Vr|VCO&bd}eCO-{7BXJTu!9ew%(^aWk82rW_OxZ8Po>NkHfUGZwTMSP zCcvbxl5ZZEqwhzJ5iau#@d(NWk-;W^H%z*1l+Mnizi(^~=Xcmn{lS$?M_MdsmM|3Z z?-nztFW5_S5an>((p}oXdnkNRFQq?(iUBwSbC6H=|MI;^e+&K(CzjiNzVuW1j+*(f z&}?3V-dXe@w>`R``91>`*-Fl&I#-5uI-FwMjr-JxKl-r;ZeRd8D`=`>!w*ufdggcNhRgQMw$u1gpM=WnDq6{42-t6 zHuAfom1;Dm8iF|&`^b|Ao-E4u0!Q+6f@L!(MAz2?%;ufQ6+fz@_!+VfzRsx-zU4Sx zqn=|iIW-ZJGn2v9q&tGg^Dt7-YfSP{2gN6!8`l@&@{?ENZNKsU*xZKC=YY+6toSsG z4+oQG;@N%i8CP9D1(8Amv@&=n1-FVpg=KHj5GPC4Y7QcBd0P z<*~qZ=Bpdg?Cr#*8w+vq#%erzVIi(w--w&b?O1JYM{if{K%1)_hbqdWx*Oy5(HN^& zq9VIe9gFdX=u@!O8)ORLf1)&apneK@HF?eLxW zF%867K%}2|u~+Ts6Q#1DXGQl@6(4898$2~8@*DFb>U&JaLu+i9*xv4&M%b7(Dmb7^ zU$aY_kSFaRe8EXhoH!j{`qD#jWntMmo}(yF!u!Hse(RToS;>u}TUEGmMmSvGYRAjJ z?L~3wXrS|HIC$vS=GqEjx+TBrIY7;&gS65$~ofzSZuA zuk{t4*buk0vMwESBNimj)|%~DTHBPY+>90J3u_y4D|}&DaaNnLs5DC}8?m^s5{rxS zTTwj4UsJnotSe8&-Pqdn=DTK#t9ZMjK^FT*{ArDPV>)&u%aQl!F)Y}Pi3jmrMj=8p zh91rdJK;+>*ik>&*yzQ+aQ?*Om*XG&(!UfB5{XpmUL>a-cj$j=hYc2Ice-(WdMeJH zIT1hdL*EzYPMwLx8&8Sud+JmB!sAwKOTJet!!aR#$!!>Hpu)I^h&E|g_$fSYcb}Ui zaudEkPbfn0SV%OrrTTF?a#gszDtfzcZ6U5LHA7j@?1E8vSN+Bt(y7$G@Su8CyiPhX z-wn>sH?WyNjL7^4os52kPRQ6Fo}3G%-Yg%AfP;FzO|RD*UzH_IEHxOi*HCAeDg{4eOo(~T+&-Igf zKCt=*M*2}v57OiqiGmn26ugI=$xkj|ppP21qVtgt8&Z)=$fEJ_3E_Z^qWiHa8Y9ay zr*C)v{KBJ;$4q@zx*k_F4|}p7e#=c)u7FD*TlKGW*)8adThdS_VJj2;J@ISw z)RV{NVn@0q_W~@eFZ*_pakZr?s{HeWEjQJ$X_VV;Ru-=*XUQJ5?aAZE<0UV6L9EJu zS27#D6`9P&PwL<+y2bOr1a&9O&??aUSZ~6hVf7<;OZn(ev&ocr&ao2woN*I4FXv54 z3}o9upLko2%Aq7hgv`fdJ>~C5ji2*cna(sSbmhi6ba;d63>mKxZaUyyFd$o;Q;{!n z6c|m{dWgf7<=h6x!r_=?wz@{#{hZt4HGk&MN37qJ4$|@l7wF8_JPRhbk#p8Bc`6V1 zCyShfQ72*3;TcblV6b6Q38MabhH!U^5*KL>-w=7=Te?7u+6(>G>39x|^%uu2aa@`* zeTJ6l1|Sx;+Y;CdHFwDecohn`!^QMkX6DdQcn54H8vjOgva97=Z> zmRs=q^~{lSWlX<%=%Zy{VIyZ><6Tb1^2sjdCb*H~++B@$=R1BcTH-M;e$fLlT9wYw zY@p2o*+KIwko}Cfls(-+%P>OU9ls64xf>Z)!mFK=BSuR5)MYk>#?@klDw8o8{YMJ zADwr4f=v?KRMhY$fF0FuOt=CI`X z8<&2v<(sZrU36(SaKKE5rNiw<6F>1Vcyqm8joVI6YwXpL&b6*_8k-A{-^f+9n`i`{ z2}@4dC>+5s70xH4dV;Afyl9B$_E3`SA~0>elZJPX5tSC2L%P73kY|=9?u3dg`Zwzb z2{QNswh!|V*D#|yXQ0bbT;u^<0K8Kyk8ea(B)Sy5n)eaPfvCv8=uerI;ue{~ch=VxLNptA|vZ!Z)LE5N{ zH>mU>L$u69=12rDpxW^)_OqLk1O`U11Kv@S@ecA$IZqp4OkkN0kJ`!sPs`^Z{ei)q zF=`rpRhg6WMviMS;7w)z&?m|#Qu8n?-p}wx`Q|9ST=QX+v^$B z<*>;V12_s^#X^3Of%9-USrl$_F5K23RF>hTOmu7Yse=3|*19Nj#$Nc|^_&Ka{_raN ze2@d4N?Nst4}|b%kg|XJP^bKpjONS;0ne#u*%B!I-y_hm*n! zRGu;>E7Dt!b;OJ>F6m!PCeg268KVs73Vv5O)5`UjOsC3OIT!rs*^0$ZDB1fSm}UJvIl#b%e3f50_+a1#j7$s-nVJyRlqn~1 zij$xiQ4#ugasH@UdI)>BWm(Dvx4(I@U0PFCImKFcEU#QuV{rS9m+80#2n(FpB!* zi%ojtSGaAIPJYxAKf_Zzm;;+%$^=%*IvP$IJJTfb-dc_g9S~FIkum(n4|s7#MM~i@ zv!Y^2LuHwXM@Eka8>{hvy4Kc_tSlVz|b4{}~jBw^5c7xx1>qxSdDQMrnp(+dFalUTkH7J{Z~U!aiuZi( zrc0qJlOs}WnM~3<$~C+T{-Vr+>t`)#1Z<_sY5!dKZ!|`uHkMcZGnWIsvRQ%J!M|Z< zO!0sn57Z1h^dP#zE&4KV5^v3kKi-XQm7VFvsT*c;C}N5&C3PM#;ma zn?y%Si3v!OU|!Agr-Cqk=A9y(EQX zCyvb?^DcH|8v`mP%KY6tmAR|IIwxzYq6HT0hLxTxkG=6g$`QN#DKpuVDYwiz?(&HY6uCaTIn|H0hkSD1yD$5F;SD_P#LWCuj1JY}i7S`lGoO9Xr;zWz|Gs#| zYrZosU%VLadiQ(ca}PeKzP9e&+??h^Dco4wimyHXwYYTYYCQJj*Zdvx=6cgR>*tTn zyQ&v1T#e6v>7iIzTaV}7dv8q7O{;HBctU^^#hjAe*WfVazvxuBL(#M>6uqD;`06?h z_6|;FfK_^y3<*?!qwHzMF}{u=9Hf&6?jxD-T0R4Q?iU-{Ro>(6p9gpWc_s;mhfBt~zsv5W7 zc89P0gjX%Eu9}v)@*n=lLM-|49bL+Fdi6c{ofkTkhDElX`c0lp<{EGM9B|NwIFUS} za%zSXhRrqZ+dD1EhN}2dRW)F;p1}{bfgi%3sbBJ}l5b%_*YIamPk9M0JvEN$C=KNz zr*jFz_oBt;_{# zz4dgNgAKby(-wwFKI2PpAy?Uv&Nr&Z^gblHbo|(CG%CaKrtkmGc;k=#jfg%Q{cd`p z(vzGlLhh>Vz_dwQKjsO=Sj>Dx?}Wi)EQ65QdR02whflg-4<__P1IR&OwmzhXv5HTN zDor^!RP9QBF0IESPu+~Ko)=%exDl6@`mwa7^gTg0D*Q{fa$0?)qP)5NrJ-^}2V>)0DPNCK zZjE5UUTrBNh#lx#G}2T3dxCpcfOEb2dV4>vEcN2?^NX>tc+GP3*vw4K&&^oosp6&~ zUums6Gg)i8L#`_MJ^59Gxr5uY*=%=PhFJodQiT{YfW!1SY6qQr=GeV7u7ybK6xb`d-Ot_f8ugH`skDKm9LzS&wlR9@%b-&IllUp zhvUmUUw$|q`RZeee=)9Iy`i@6#&#FoQEju$=cFcMettG)X1P*Wa5bi4;`oVZ%*?7C z8^R~$tIdF=n^){Z%%*MibJ{(x0ap7|C*$A$#_z^wA9+IUHX0L+nkV`B904c)!BM}> zDe?U{sq$X?>Tipez2pVaT3wO(<*bE}Eeqak~jTg@r}>31b32Spe~J z@_c)@X&UiKd3cfeYc8Dd&Ei(K8&6)o5nsD_GuAc$ctD50cdd8uEqW$|px4U8WYkpm ziJ9q`m^u~{({fKti7!ov_l&D;M9ZRMZ+K9HOB`WyK zg;sFDyat?sL60H`z=%Z*(ni z$6vy&Xv!X&c?${!C-7`QWb-n7wIdn;&Xas1O0t)}eDV6#*ii4~oA_+Df&W#;#S_OX z!Dk4rUA-D>%lYY;anThg4DY(_wpdzN5Uq0yOwS8)D#7I%$AZl`7qU}cBXz)y;FUk^ z?D+mk%cuW&)Na-z6^A@r59Q;HMMP9M^h~>eWvc5rGD?8wi_uUH2R0}r`C#gOhv$?M zKgtK($YA5g1CPYl<4yS^x2fZ{^bKghr^PZz#f$c`s0c8J`Yyso-bWa3I6R)Th{}4PV*g21bwo%m&*(`CbzC zGaAAUKWWOvfBVuwXe8$?2R82*^=F6ES7axo2l?0x`U0j1Bc1D~_ac27rKXIdc?{mM zGfp{Aa;vc-rvIY-T#roSG7xZXXFYh^D8HlG$dLY^ztUcI7N*RD{Bf6cAg^rOtjB?I zeVJ^+K%CTD>6kxa0c(Ia(Y67i>}LY(7Nax+rd!>05r?Ggv#GllNUyKfF*Wmo?2q8;&GpR#j$Ykti4Tm%ykt1@9G&*I(55;wj+X46Wbt;C zPxnmaM*pp@QN`xMJ>demPM*i4=QTIBVtJ*da<)}o*K(H)$ZaWzU6l`y-R`PChzCti zP5VZw)eZ4-)kQ-F#T}2?=@~U4v^x}A+yKCi1lmY^$Tu`~XopHnPY5pYE;hifZS|w6 zwq`Cx5<+zu*VFJ-@te}Kl#NNjI)!MxE4{4KRli_;Vt6m=(tYo~^H{8JtjeFyR&gT| zYZgfZiYYn}$`pTJVd8{_X-5s(ReUv{bO-qeOvn%0-9Ws#WO?qxIQX%UD0*4ZU&;k0 z_&tU&c(hRO=%z^(t0pu=_>CwHkdOy9VGL!EWPHXFF6i(wPs1iZ#dGda713i#ljSfvvc!Fyrm#IxpZllttw2ShcNVb75Udw!L zG0%|R(pJ%d5cIB&|s6mgvvchU#?P@1yvv@Ht{8Kc^=5QlK5{7ElIMi|s9O-YBA zZ3?4J6LU(VsN7yy`s-(4$kML^ztWL*iYvLjfkQSR*c`+wUC(R?B|T;19%Nu+bP$}@ z;P+{^EH89ZPlrF7Ig+l#lVABM((^-pw}K-hDDfwPJFy3LF506(5z*SljY zu0PkSaUR{w>sIPRB==|j&<)jZ_oL_WKl2~VS#Wp*J(W-9p$FR*a(wz;6Su zT!J_kzYpv+-GPW4IKJs%v^~Sdd*=Ms8C~q@d1$tnog77vZNoZwZ7MijXPl-Gm64@dT zRnCn~zW60VGY{oQTr2RIj68*}BK+VXA5Y2|S<06L$|p$!cr%UyjLaO6C`fLiP$vW` zgC{CP10l+27+;?W;izFsVd(fKmJ~eHj7HAZ_rHEv;O7AW_~qsR9uyjvMLzt=&A1%)N0vI{dwl^KcW$J%V17U|{N*jtM4ELa{v|UpdN{z{# z0Stc1BZZy3p>#cy%e!yM$J&QTO752g7{Ow9c zgt;XuR^j1zg44wn@KR@o0^8wuf;9d06n(#}zT1a#Z+R3}*q9O@1-ar;(6I7_&d39j zp&Yq=1-;%p3BWt{pl#C6r;ZBT12;^D@161)5i@R1{8f%crOx92q;Z{HkhIQ9&IE|8 zOISfYEyLtLr*Zu4Ww#3xG~k8)Fyd28UbGg|=VbHx>Qy}p(cf5$^@S(mQy+X!Y%DIs z^X@(uFaN$b#l^=j#ozk5Uyg?sC}clo1mApPF2+<>HZZWj!@!P369yD8dAc1ijH!QC zxbj~fwc4n6R3pQ<3PuS9Gs^P;-tz=2bVylDMha#(R{B|3=>dKQ0F-A=rLw#OdZem> zj`)B;0Uka(;Kdx3%S0}`I2U(VtO5tLmi+Zj`#?Lq=Ls2S~18l7G@=4Sb3??^0ZFv-Q!2)5cy4W1Q8ZNtgBQRS_yfgH>lZ`8~9DDjTB@m~6?rmjCJWP^Xz+ly23!OniH zc->AGYlOuNV387p3ar&AqUU|>QJd`jV(Ngvgkd^+~x5@}d9`{d7>6i4C1Knpz#<+Cw)AIi8L6^(u6|lh+(WnU#neN~fr>D$Fk~_QT$#yYdXLRG z?T#bV1S2{zpDwsV{j6SZ`1BB`tWBoEJqrmefU4?g6kuIhfEi!#KFNmMU?8{Z%fb~f zv8jO5weC;;oXxoMAI%d-8jP{og5BBUl6%~^!sj=Tjhq;wf3iCq-mavRyjrU1S8Mi`anexbIeY~L)Qc6HPt zJ^>&&RX5*4l+e0w0ka3io>)_C;TgH-cGUwP^TI!MRNklurYGu&{8D8&Ro-ffH;M=2 z&o~UJ1aG{4{bnq#H)CmiGoHM1GtOULj7Ofh8CMq9W4+CUt>O!(e4lfyUh_BA_tl0m z%3>=rVN|@F2{sm1HAqz-;M5h1jOb`1q9<-zsW$394M!iSjE#$qE8@9T(Yat8*@?~O zUR=Ds78fpG@uJY=RKpY6&{;!ip&entcxJ&BxxwZc2J5DG#e=38o6+W6+f7}$iGu|x zjp&T4u4I+M#h1SLaD4c~ACGtZ&b#6_fAgL3_P76L{KngUD}Mdg-xhCsyX@cmy?Dnv z-xKeC-v{D-fApdF!w-KnKJ>9ai4T4J;ZMYS{?q%N$8Y`i@5XQa?mOf6-u0gN@W($EpZ?5e;!6*G#i#UI zsxvgRxV$Eei;q-ji`NSkBYa=%t_CgjCQvlt=>rd?Vta9XrM769M&2W z>XUrSn3HbYXwp-^5zVOo-hJok_@N(sgW5!VthFKfZN+Z4Et=mBb$$1(#zZan{vi`I zkUKmO!OTrTbOjcj$&)c_rBZiUObqqaE}Z(`QeCdzSc&r&ugBHJwb<;ajYPvk;^$St zR&6w-+lZb;H*DnMQwRQ}60)6srM%c|MLUjd|@ z&9ldAP!%Q9*{QjD@ugjU!fLQu+t_1*hKByPblQ>ZC=d z!gv!9v?%x-Mf}d25|xiYHBT^)AV~h;j6Q%D{;9`{C-dmN)RBn;O_M2km^aBzeGo2| z#g)cL?bpS!GkZy8vI)*AkK2-@GA9&GA<9#3g&(Z2gXGNyMSL8oD z#HUx_)#|eSB|SfY z>4DDym-5Se#E#8x;c(NrVRh;mlEm-u(Z@Ee%&rLYMHr^Y|;zI`X9y<_4g(h`lPb)MxCPHu;`3lCl2M7;net@cRVLP_Q4Ovh4W9veYc&C z3F$+8hkm=$Rvh}Tc)av9Zp_=`K#KDAr|UFE8dLg;WX$@t$D_HqA$4WXx7zVJRl3od z^f-Td0MZmJ^uyXXpLmk&5jmQ(m$@yqsB$Glez zG0vQxi$=X3tE=nYP#_gb^@IzL_~sAfNuwIR`KB(Gvmihs|`1>)S2q4{T0Uou+EsT$pnleCK~t;)V?XqrgJ%3Cy3?GdXKrDvibaVVzuK}mL3yroN zP8!nFNY0CV1;6sgAG)IcaENj|a_|St2d>m7inx@a2fm7pK~9hI2{>R8P8^q;I`y{~ zZS?`kB%aTSc1pidV#+{VPAKu+8XgE=8xg ztX$N;L{N;SsHZUtl4uLzBQc%7coWAP;lRm0w3cC@!{L4ucK881b|10X0A9x7DdIUh z?6Se|@}r-kIE-N+ouid`9GFaUknhvV7wl;tO8-pH;q;U*qRYHN5@AJp`iR~SjAIoK zamp|f6fT^D2Y6&3%4d+T;}a+VXoC{(&WPu5xuwZ4qNh`7N;ifMEjM6M&cN7D_Pd@#aq%re3RAuvvB8($o>qXK4DmBSK>qm#GXj1T;|Kq+SR3a>dJyQ1MKaJXVz=iM}F?7 z-}n}ldKP4OS1|(tBVL7qS%5Zd`k@mMNQDR{4CB}QE7_eVW=T(_6t5gCklwa_a*&80 z^<>gfIiL_BLP$p-S>dlpe1AD7K_wjd0|~$cGKC}y5|sl4&Rh8aHiK~(4ZEx%5E_Ju zQV_^DC=erVBg*7W2+hSP21~q)cqCNvJDU)R!XYAv(ccT=Gy%j#gAhkyd%3fbFx&_O zPt=izhw1uL7-cfJmWa!;0TF+KN(p$_xPwrid<8bc&LiWFqdX5x;us_79MA81yh!Cn zq~zS;?Qar;R|#+k&mgFP@&%^cC3!I!k~L_g%jeRW{ktSF=y^ zWE|fnp!D)7!d-y`M|9;l1t3`^(U_Pn8vszs6@$bM2j}Erx+?QB5748&#-u$Hb;Dx~ z@c~Z%^S#&8lI2s%1Nxyn;ZgC*E8h)vQAXAU&x9x&8@>tOLMSC8&c31ku1NIx;r)JW zH@9MK=|=4JHsj`{FT^|F{%>MfgPr^Dzb9V!s@KF*=P$%x{_p;6TyF2HNAAa@_}O@M zGABLYSE@&EPYhgqnt@NHSePEsdqs8Ccu#{xzJ0|+qUxxA$RLuqz&oM=xCfmDopY@j zl{3GjF>L7Dibr~(j>QorQ8@9L@XUox_%Ln;TvXk>c|`T3Jp>DrbY$tgh#*56BAX{m zn1V=eF5nAXO6SRe0;`mt{7@|k(xC-~n@1E6JU)4~9^u0MnKp)Al$g_6Q0TUV?mjZe z87sDLQ{Za5n$n4mc$5c|`}9f5;XAh8^)8z5q`wAyxronXD+7{TXd+W9Fwg~f0T(PV z$Wd;11Ap|t++0z#i}FTs@gPslTG)hsJJ97q2#mW(< zX;P3-Z!nbYW=3o}kyC?T=!CFDqAd3l@)vIL7HxSUO}O(Uo^asr{g3e3feH6FKO-tn zd2e@nv9HlG3v(zk%1yBKyv=R5CPWE^WKZ{&Vyx;sKD~OG{9bwEQdN z$Ki**o(B6+T8H6KnfBICnZMt0muU|h{Pmc}J7Qt;z=xorqmM!h_d%9&K%Yr`!Yh%= zz=Lv~2HU)xG7iT%hwe7-^iJv2&~z?8WDYdW zB%Dtv!sis@*s(c(j)eH|LGohJpxy3>XQ}KhlQI_nn4kL;f|HuetPV+zv)JEN`}3Xcrf73h{Jbgt z-x5DX33s5VpsCV^Re7ZL>*pWgB5x!QP)*K5LOvC%_Frp?`_HLj~q zO&C=y`qQ^K5$0>Xm5sl3FS_FMQCu3V$jp|8bUxj$8rHM!yQ&S_`Y^?JuY8ejLSDRVxujZQK#b8pJCAx zpNQaru3D|#XezHAjXQ^jrAvy=Il0M%^z@wST%Yjgh3>fh_PAZoz4zP~XKuSK?z-!q zIC1V&oIZP-Zx0w2-y5seO>cbCfrVpFUa7BenuN(LPh6>9&-y?)Oixe6 z%=Cm8E8q|47i7jJ6|)Yyz4B6})%U5gaH0oOQb4Kyn^iCg#a|&a;gRqN#x%C!XAX|`MHZ~O6zm%qZN z_s(Ct9FrOkfnVcFl|hd1J!)X_?PG$UO2=rHh%tvgwP=9c^=Ci}1ax za?Z}5h`IVq++4aDi`%{R_uaf`+B)oY@ap0AItNv_^|=iPDo#UgL_m1jo)c37#v|xk zmdQ$s&Se>`c-TAQS>BAHyntm){jJ`RzAw{I|KYpY^QVr*xwF&p-GBDSW53^t%|-QL z)ybP@B^M!G@;2OpmD)hF^k?^3=>fpSoE)^Ln#p!#!)E)NHHicq`F>RID<2!|B5y3g zXqygHMtP?6_AlKfY?MpF!=?ttZO^N4fX4n=E@hb(^;mbX8{-UQ)L+ZEgd2yHk-SJ1 zalQtd*D~3&nlb`5Pg$R%PZ7`2c*n>dxc_U@r}1kURWD`xxlcN+9^$y(_`zr46)IWu zO}{J;PS6qZ=CEpv&P~UJVlLdc9uGbArFh?edSAT$^{3^w4M{`piwk3k4&(33@(l&y7(1-JVYp z(!c4SEQ}!khJsD};?o;jzD2A)rSXb*BYc9}`?mYdxb5t`Wc`#kjkenTXiC59h>s$V zhr}yJhIgYPTus!Zb5$ktRgbp#ZKc8?7QrAs!XW_`!q?ZuuT^$qd^DyfYLa^me|x{Z zh5joYKzKWKN_vG*ytB*aVkGC_SCT2r_pEQOTQ}9K^uJm(#QXRhm)MTvek-=5cQRII zE{P2pP3ff^#+a(3KhS2wvEJ&&Mo)Z*CKA0j8e?&MYBW|>SFJzns613n-^3{X&bRuv zMOJ)oANh1@ek!IK)wsE^Y8k=eKRldn=3BVfJg*wC8J2ko#y4Hzk|_(Jz;r~fH*2Qv z*id8jHi5`vHlJRFDf*fRqO#R?=v2%{5qDJZ(XR=SXfzGNyS<1-OBczI|9Lxr(&MQ- z2;$+#woI}P=wFG<6GSV7>&LnQZpv4P`#YH{9{kd zG)|v5Ci<)T6ASBGTiFhZyRzDjA&p^n$M)mgU3bL4`d9xV_7!iaw;9d#W%ajp(T?bt zai-uBW(1e&q{ObTe4G)DQnZzzby)IY#=~G;P~rD%hA?nxbJ*r_VDY2@$F>Jg`Ili1 zcl)K=?ZX4^i3Diaz>RUj!cCZ+W%)8sgYAepoL+8T%ilDI@-YmDA7TP$5oM4pc{OoL zFbDZiFZ0+U%xzHk$sFW?(F;UT$VKPtI^s!0l|jCT`kLEVnRwpWndMWC)iZz6ltFmn zhr6&1D^C&0)lnISwG6@^#}R|Sy=9m5=D$MU160{KU9^4<+YT^I%C?b%vIp@F=@Z5o zo*C1dS5&yej5j<9EFV0wZ3d5nu;svw@w;?I@{p&!OPo_DyY#MTpd|YgZ3CUkHm_us zsGKJeRMI1NrTe2-^C_KfuOla?;ZxsEewvHpd4qK>prb0APB=9p9tmGA`>{75xb3Mk z{RDUzFKT?Lu_vFD-D*p2=+B?{H4f2>#NWg#bIg>qLpG%K24$7&#d{vL+c1Jv@F$w6 zw<}F{nHGQH6@F|WNG~|`rdQUSlNWk!VV8N69e>t`xwoZ-wRq~%P0Ji))SmL6JvM)R zi4R}q2m%t@6kzK^6q6cyGm?9nGciZ+#q#8l# zVN*JZOBlG&i;?FE&;ziFp9A9{!2@b&NQ8>7dcY>3+z`W%-dP+Yg$GxKu*Kxbn0!~X zOe>RHdUB+yGtA$t`@lW zyYK;RsBMyA^AJED2e$K;&7T*~N_?q9G4Ll{8sIxz5O0~D7zkU@u!-0LS?`q30v1=~ zK@Th_a!Ms9Qgecs>?smh3{R1#UR(z5vV#q1tT3(xad8*vOZQRZXovDRz(;AETe|Q! zE))kpM-$9AcrC+Ab10we8-%N3jt6##oAe@&!Y&MOq$Be+0pm?RaumgQ5;mPO#G(BT z!GJN$W&YUtctgvf)ajq_9L7OeKG_T&$Hip&L09)r8pAm-C0@&Pf}3~Z$W9rNVZPp} z$Y%gYX@f(X#7hbo>>>$#zTo!lFKFr?@P+li5h}2g;%@z(ISelBQSZOJCOm<5rZMcp;vI zjd+Qj%FFyJxEi<<8;dtJ_*s+k*pBN@eJQ^9nGXn#@Hc{A^y*i~7e4XT_-jA^o3ZZH z`!O}du44^Qq=@^PNcl0*&#A!N%*29^`ke&asF0l;fSv)WMSoxEX}f*RwgWeXX-bNi59d2^{5D;hJf#bFb)~!NQmoaR!XJ zQMy&ofXR+&`~h6{TktFV$abf*NqNAjxP3O}jEsw> zG}IcZdxC+5|6Re%Brgl;?9}E8QF*FfEUMrq*{OEybeTlZz*kBfZOu(P^kYu`Dig2bWWb@kLv^(ypK;bVrp_Cx*bm04tXNr3!nc|eDI?miMrbO-0kNi z*QNAnfX`wcx9QVQL87mp_FsV=+7vAlA6sf)^&xO>S!g^SHbnX+%z#OjCaLKb158hg zm;A(S#v4C=L=Q)eK`Y^tO_Ty&=W$D#G9Kn=nWe#>d`NpZKIYlds3b>M`~k3sjcAg7 zEbn}9i)NjF(rc#S!Ibuq@>~~%9l;m

+AT;0y3zzmvcDNX9fjfnCu0z#p^WsUr&xFiYOy z^S)I>jWniy!j*`G!`nLDwrHQzq2f>C+3;_l8dAO#HCX(YVPOWk^g@yR$U8TH%Ek`? zfl=LR>SJs!qHgWhrgufJt*r?r7Fc%Vi_EBs-;6t+Wt#e*u_Cy}fJ1p3o}x@-jIT;p zd4&1X2TI{OM`O`CHm!%PuPd-Ka;$O4{ z<)x+86&}oM*bG=K4w26BosRyZwk0iegB(>`ATOzpYARf&+{7TW$&3*s- zb8~ZX{P>9&lboEM<TIDLKct-Blgy zNrbbQOa7pqG{|ah0`P_~!GH1c)%f1;dqX^R{;9aWupC#eT#ujmnV*k;^3Q%Le)WI* z*YSV6?d?8!e{*peN~QT^XnSCQFR|%{TNCmc{i@uR%B0G#N4>6k)FxxRE_tVSzRzDB zYs9$x>f=*-CvHPLgKwxRt(3|!$!#_XR_P~_gG{D_12&xU=?_mNtJH=R#nI9IxOR0V z-uuCi#jpOGUytAXo!^KHS8v2PpDviJN2|$(A@zObacyB;^s=k^-H8A7fBTiVzPO}% zjH>LaKQm!^Rl9)mA(h8dRo{8-t6m;wj?KjC%2h9ZNQ;bab1in%xAw#jYBhK*IDvO0 zzha^UU5s`jU%ur#EFQyRIvcXkWwzR#==ApD%Jn6m&RgE(CIIOQY&H@OxDu8PN-Wgx zX#6l%n~1sj(=ju5BIb{uj2FJ}h4F3Q@@?_rm%KDy`RZ5N{mNIoJYM#)m&Qw8{NlL( z{`=$kFL)rH``qU%&P(D&FMLtF^jlsQuX^RHU5Q47Dob#CK)e&!7+gOW(j1F-`y z!!5&-Z=U4Lcfv4q7m@eC4Gz(Vyyyp?lshLrkpr2AJlxL;dc=)v;-CdQoQ%@1h$B1Y ziJaKNExd{}Y((dYdHIP$Zj0g8G;UAn?)IauvC8SGX~~X_*z9b?wleK0-srgWdo~-Xs!j1B zHq9!tn2$4UA7kArA%fUbdn60$GDw)yta%;4jJmPT_VC&h={?Q3i35 zXVi^yGS0r`FzxG2&!R2L=kpn$6kP&5lZFD(aM*OB&YF{)^5=#ZJ2hXi9 zg)KZPP3c1+xD(q4zG?JM{xqw@4yVm`KgGg*PLkW@chhy@v>Bu=@l9LfCt4ZQlLUnu ztMvZ5=4hnDg&sC#yd(D0+WWsUeHknR`VI^#b;RcC1^qT6su~&Beg$PjU#JY)&?!X}pdL zRst`hJ%JxtqrN%OsCpd1{0ec|$banER7_1zi3cd(&bD;OmiQw{k;&ZnGvaUi-*N7w zHvw#EoZgn)^^LR2ezsAQZc??5#PDC--8TvFnW{6VXN5)h02>t8Ot5dgA6R;-caH^p z>51?x_}^x8LwYAVTtuTjCjKp%E_fM(Z1L%trf_BbPr9u5^HzH&YIW61yvw%65dbFW|riyqBBJ zycte$Sp0`Xc7%NNUlwqB(wULdpc{-sS7h%t*cBZqKI{OTLpy}iUfB9i+YezGF4T%P z(AVJm%1^2h;sJ7YT=z-jK++(Fd44vUFn35@^zXSyHe0f3AU^>r98rcKB`?QOm{O~` z>gl=xmwp)A`T!Syf{%@a`3-T}Wh}TsZ zj@XCW&spQIJjoU}uUX`+X5gm?xQn{vs4~3=wx0|i*k!S}OJjE#aH!CMQx4Kg=RUyQ zp?EgWklx0b-)h?_9oYDDZIw*@h9G_l-3;B`>m81l`FcDE?wIpe7&3u*A=XEbMI3Zt z!#=#1jcD?=T=V*?vSH;{gFmH%gQJOnbLCyOyUUZG415`X=4;Vop@Evl#iS%QxL{1r zCvix}TorTdY&PJtg6Q5O2xi5meael|%-Q6alyVhX6l4s66 zsDc(oEFM|8PsR;`I0=)JtGv@$5J30=vwoBYyfJ)9{1F#h;l%NakSSr|&L%|=VM#v8 z6xFb_jmuC}F?m=i|K6>Zqb0QmWy^0=@jz1Xxc!K%KpIy`lumJlta38Uo(xL#1Sj@X zt_cG~vnXZ6ssYr9JYL}UBwOMI6oFi_Vjxa913JPuUBChZdHX5|I*Z^U4t}0g1XgS} zC`eLSE>mz?u?H7=S2RZfUa$bzZWpRY{dpiD${-F#iDZ23bcqYfB3m#8UY!^5^-!kD zFCSpZ!M*B98i!LJO(FcdOcX$rHwizrBerj1P#B5{`IhbM1#kOMF0k0!`T4;QL+2wf zL2{bRL+^(5*7wrw>ocfR5}R?BhIhrG;rdH03y%gaeV(cwkyNVQTf@|PSU2EQ?pKru`np`UbwUE^E* zJ7Lnhd@pR*6*pyr2)XrcK7;1xce(A#FuyBmxq#-jhyF}B39ARBe%Ir2i-};x-&38% z!ee)MJgW6`F*-h{e5#qRo`hb1JY`(^Z1i&7vPrMt@>rl{PiMgGbUFvbT5S z7MYm!!dK;&i@t(I{(MuNt0H}ZTyexov;O31iAO&0Q_53e8URrqr^N`S%Q%hlVSbH; zbS6!+E5Xy{RGVqoBL|b2YzTpZ2?J&Z7&{>BTo1hpsVRX+ZFj)7BUrU>dSKi`Bf z%yrrt)UoiaG>$8OPNg!~V<)~Rv6L|zDkd5XnuLeo3Tc#vq8-E+NFxdm-({glI1o%7 z)k``}th6K6mzQIDwic&P9QS}2l%LW-rQ7ZL+j7-P!{7Ad^e+o`RF!Rsy8Q}RF7{O(v4dT-QF}xQK z!H3@+1ve&nXtr+|gTLhKJO=^JsIV2suVi-kDayz~pT!seMN>S>PuXSOWjf~v$)T~sF;}yeF!MbLx4#5 z!;jPYT#?QRJLn6ZUt>cHVPdxZUY>Ym5(OU4LII$0QwV*MK@0HGH*z3>tjG-k$rC7B zTvG43$N&_g9>E8nE8iLLa0OYbwWa|>tHLTh5<0jH(>86VY|dRos$Sg7vQ z-|Z!MFxfqkr=&;E79a`(Eu4O;OR$fPY{&i2zdQcc&;KJuUX4Aq?@o6khPd)nB)`qT z&|@3)FYuYhdSCGv?ZV#>gJhiIm-|Y`!Vv>t_=e>huv#7}Kk;zDM4tMZdGRk`XX7%b zSN%x@fy*XJZeC%bo%UqHl<^81vNkuH-Z}5hYbpi*F4yxbZAbZBU097rpIV4deDU#E zTR5Do{Zxsjwu%MdN}&hnL2H5 zk&&bq-7#qi%+@&+2RV#x(-^PEu`}nQHai!ioU{}FhgbNM0noB&mwp4jwixFO14tm(VmRiF*!gVd6F8wF!kcqNaFk8W^`-7mcRgzyc%-nVddmGTSnZvF8xiu)`;Y)VAvD zw2`1emBPKa+Kx|s_RH}f-to?O@4MfnfZ-UQnvA*md8r3u-W2lXuRId}$FILlsUqr) zN%cdusWRXMe95!m1*0RoF*`jG-~O@}#>wL|(b-y4n+PA$uXp;**!BhhK8+Q=1pvI$ z$2jd_-A8p`oX@63>cAL*aXypm+tOpY3R_+8#=`PiEUmU;ZL=R6Vqw6vCtdCI**jxq zc3ypJEVk5Ev$L~t+liC$g8QEv-}16=jqmvO?}*pF`c?73OJ5q_F82cuyfE&4?!9sD z_PhK^lsoRc%imo-E1l!isnanpy^YNSbH`7{=`***nX|XY-Osr@G zSH#P{^<{DU?RUh)%uKAUEnAOqKUC8*tn5R4s&BKs8=D$munAyhvg%DSj1lPDoPdEh znTH7tJlt>uU)@80lLx#e^4*7q#JsXSM5f>;Z%5Qt3!&os%{U*srrnnz8_7Mx2~~K zSM_E=2!5RlUgW6^po=SsWxd)V=jU=$1LgQu3Z+MGDo9V>YQ&Y;$S=lq^mD!`4s1vf z7U3$jisVNto`3&y;=BIbUyh;81%J9jRknQRgf1u+R4P3CJ_l<&abP-R&MEmb_%-|x zwsGLvy1aafI)5W$*;}U3Bly!`lSlD5xXcq94@{X(mU&DO$N3a#c?_m(;$WPN_X75! zTo%cQ;`tjqh@;2t#y??Ydm64%9^gm3LT4X5i75GfQAMG0pvAwnu8yfd= z|9L|)cw?m*UwwGPo1bnhwqkRm6I)Ggv)Ps2P_aIPOi(vewa_P1{+IJ|^g|8uz0ksK zQQVRU>}-y#R>e1fUWy-lrA~h02h3}&a&lTpnWJH&_)v_iPIVk)`lA07$FfVgI317Lf=@J5E8^?&KtB0FXHZP8#`Zj;GB;?LZ~3eR9%(QEU0S2hFmWKpmO1T6s7#& zgOD`iY|kfBJ!ppr=EszUEidY7XUbvb6S##QIl)*(P)EDH9h>VNfAaFeg==wRVaZnu&0A;)y4IaDsHgM(m{n1Y!l z6zP|BOL$6SSNUGhtJB%9taHBO%%>=$$d?#)?Fx8yAM#Q#JJ*La_z}^jbR4mZwkr*^ zRT{$qF6cMp#f**WPZ=bqp0tQx907QXLK7gn69r{CrA-vv#N|yn34iG$8X-Mp$s!tY z=@$cZ4f0VLE-Rm-cq+{Sy=1y9iN2_Ku4nq<$O)jb9S)g-CNO2&kS^V~=8tWEXHTTG znc$#)zP&?!Od9z{0Q|CeJc%slh`K%=Iq?hc;fk{uW}-o~TsszHjdO}uGj5o*bo2s8 zrepHu#YnWL-0+Ze(ic2YAwjmQft2|s@gOU~%GJQ@3)f?Hc`0@_H{#~yuf(T5{s&gP z@3`ykxa;2g;~j7NgZM{pdnl%rj5;+m$l#kM7G>ZMuVew9MK6kBK$F+_pr9zW21K}lVYKKclZ=eze)7_a@pD`Q8gM^5 zJL7N3c6;quSXz$x+1Y4G0b()3ivnW`>q;rw?)wV8iTP>oWOf~)7v%}>aBWqTH%DYu z^q##T9~3D1C%8!3*|ThytQx6V$r$pt{xuK}PM3~&5r1Ebkt!v6P^R-yf# z25v2`>gFbml*gW|RDW?9DtCExHLhH_8P!HD&Yn6OfAohRjK`k*n)*R4#^pCXITL&8 zdwgP`)oDei+x0+GIk~;L6#vQm1#4F2JpR_IuKa`(kR)*NnXL+Cn2poHe4M8In^Re+-HD-^^#+`TF6|a2t>&(ZR z>ut*i7OOb1!Y3v)DsX=Eg^KzZ3$|INdLt7uV|w%s??xwvpTMhoeGNv$BNa{boZpx> zT^97nH+fm?WRea;82W5(6cc?Os!YmX$|z`CdE^deA_)#6L)$8MW_C(-tBIEicKO4D zI1vP|?{p-;R6iC7z&1RY6F#Je_iVGc%ZVj!|0qtQgToyOujS?Cc;u@O$0t7WvG|jZ zf6RSzXRjZPMkD5r%`2aV(9Cz>C*y~I^vA_0r(>_PA*$JlwZ#?L`|)eP{?2&p+Pc98 zWWF!TVkr1Srunu7!Qq7rA`Fb`E$x+j>_!C!;ax$tbdAZeT$I^N?zWlXo@uf$v zMzgCp(ml8ueH%W+0+48h~v|9FKyMIb&?u;HlA*ac&$YHJ9^G78i>Bn` zr1GnA1Cqi)(YV5eNa3Ip^*RgS4Uc7zkvU!yZdeo(A4g{Isjqe<1NiirWuzJ%-3UEG zp26oj#-z7N@OgtmL$c))pZZKZ&L=$tGz0XC+T5D~(!2N;e_MT%%@wU~+t=^2p~o_vNm<}yW0I825$O?X zd4VN=wL6pkeCvK+vJX9mPg1dQ6xxMDpp&j|^x`9*{7n3Z|M=VSzyJGRi!Xlhk=PO4 zf8dYQkH7Y~>OSSZSQ!;P3Ff>JLG&%%mv4mo+rN^puYB1{Lw70tQ@5p$SjNB$RW5m!rsOZg$Ble2SJON3@EdGC2Phe0 zq|6aqYKv@h{R>yL7gtX+F~+8zagB+d|GY;kNuD~h$G(>mj#D8ka4}Q0@-HN8tjfy9D(~P1srrB42 zfw$C@FE?Pk;CauLE?t*wS`^(%W=RHcf>r>!ZcNI7UpC*HwXOWMUvTX-|L^af~WXQv;1Zog86xGBVis5Hih7}{M>37>7JX?E7)hkxXaaqj~! zjOM~+$(*j+8hyuO4V6JXa$#FWaQR)3dlR8;yX{glO7KYZ^SFpm!Y@Ddb%m_ z@r>mjHb?V2V*g)}-t{Xx>QSmZh>wdt;eN^H9+in6jc&jJ0(cm=?-4Zz1E8E~ApyEL<=Tvo~BdW(8JoLlL^JYXCn|*KD_> zFLYS+5Pb3&ALlbJW70)x(ox36565CmIAEFr+yEyF?cq zWk)*ahU(AUA{!ZM!rNq34#}6U7kt?4#HOfy-zbEP;&#A_>NPb{mCP8ijE8oqL(*K* zF{eg}-|6TPj2wQjyj48&yNVt~CmEuW@d`%0Re32Mbf>`bK`&sTpIFe1Z*5|sUV%Qb zFFsHHmcK5(Yjr@p5d=4B@E>)vuYzR=XYESB- z{A}w{pMl>%Ul^xXp58FPe3)#YV4fG=;ghv${2q9fPK&VCi$dRsc$d`w5$HR$9$dIaQ?)xm_9iZXV0C8fA~NALd@Lx zK*Z+N*l9OaH{fDZFBp*~F*YCx&q2a8!qKgUIL~B1i+Ni5 zXE260>26SdneLk|zqJ4Vklz0pznp)87Sqn>z#Mv0J^e=;==GL(DkqzTaC^u`2T{9_ zbqU5GQRz|Iis!b?H~Njk7_%0hWz5W^#5Zm|MLG;sE`0T5DhJ;}SMqat`Oqg^IR8$M zrbi^bu(`J^E`3FNg$Im*0dr{v&Vo1#{2#<9UANi;IX;MZYx=}J=GJt!?G(QRZraoJ zpg#cip?D7CKl>c$KtBZ4tgSI~O+E}5I3$6t%I&T@l9$}x>d(lD7sD^SmsYVkY{n)4 z-y$Lp@+`+DtS2F#OWpl2?yYg610xuPdp@BI*vJC-D4WfguVP*W-FHl52Xj#>sekw+ zhAaBLK^>;7GF%q&P=>LznKe2vbyY&qOH}5}=~{2fK)) zFf09fPn&m;Bh<+TvF64u!mQnuaQV(hq)GV^5O1!R&FjaLp>^{RJ>AAtgy zVDzp!P!5x1rODz%EK&;c$_NN25xXM+;>Mu!kW$#p>-~wRFa~F&Bd!;Upu0@Z!V(Id zujIs5s~}iCcv5*eATAn_My77Ybv_BZ@&ZV`B|gCgFee^t6*xyo61l{U{lJZ1%61^P z13zV}C-LX^eF;_z0fesN7x~dZSXL?&6&uRXYwuwCOmXHY%(`gvh=Qaf;!)^90z*B zQalJB8p6VqY4c46JhJGOdAN8R#ZcV5{UPJCThs>2Wf79}>iEVM&Wtz6$_exa5Ja=# zUBC2;qOamdo%mKaV3y^1fk0v9iTzw`BKI=0qEnOJW#GnS8WyMqVWgv66**Y6@)b;A zQEv!6=!k5b4>1lNXK~25oxm~WIF9`G4%!NQ(hT4?m0R-AQ}87iH4}6hKjXO&@^tZK z`zn2*K<;wGM{xb{c+{tFi=okz@|U7nocIQgq$BuVmgCImYt+eou%tP`ptMBW-R~<8 zmA0n=3UP*|P)9#@JL^8Vv)kH?Cm#A(yz^aeij!K!d9w zu>H9X+D0_VH*FbYN{%@nw}JkB`b4FG1+FIMlqrKT6#cy3UINQFm!I**#qb*KpF?o%^g;($FunU)oH727pAX8e36}7|=wT9BC^COXX@KurVSrZj=LP@$5np4;seZ1!ht@`hD&9RWm+2u-$f7Vn zgQ7d-!?oNDP}ohsz2CMRqb*>XTv@-Pfoq?UrE;Xz>ABocVcxDZ45Ek&4%fGKWAXZO zOw5kN^X|I~5*ICi6Ag&AnpQ+OsW>vm_rx2bBSER;Jq=>|d;Pfm_IqMagL4+#=}Xkj zr;b#XKXrf-B2YLr=RXqDrDQoSCm2HNkcn8@+ByFe)Q5N*z zZ-uH2%wj|{v`@d=veMSw?ub^&U$v)f(H)B1#^z>RzIHA6HtfvIR6P0Q<@nNL4=ej^ zUo+@k-O!sFkG{h7uK9hvoKWp*z=(We(TCHUYwN4Ava%vNWPxGK3mj@L!Nv~CPHb*& zcv8UMc^3U{tgOeZ%DwZhyW{x$srdA#KN%nX#7E){-}#*}JvAP~HT7ThL(;Gzh=C=@ z5=K?7>Vobg`trsl>L=m?rAD$JzsQoCr+j}lWBz1EVbeG=#lpTHn1I`?crW9Yaf&21 zD@7jfP1#Mrn&rpCgjco}_^#d>(SCpA7V)5sO~wTX!dy|9^T&^ipQ;R%)$X-od2P-09vvHs z@BNVqYfEK`DdIK^Xq% z9_aRNd)ilScz0C-yC>elNuSeaj>Y8EX#Dao|8ud#$!KkykL}im(sCmLr=!Hng=0QT zuqQqbPQiQ94|&t~G%!~A<_{_w{B!c(i&YE;k+I+$nS>>O>pz0uw-N!Z^7AbKDhK@m zUCyZ%=X-)xu=yq<7STjg$W}I-`1||9d%ZU8&1q8;*j(e!VjHp& z`HW7`5neTzr~Z1-0|c%OSVBzt(*?o}lgzm>F?9pzy2)bh>PD=rH)BSF-YF)hC#Pag zbl2^)V_P%>{XvJwUgR}2z~Zq_5U8&pPqupqbvtNEBMg;JPWH9YQpJhWJ<0iD_v7wsvqxRO88hJD(C+e}fwjsJ; zf>=h~fu}w>>jgA#xK`sbv4GwojZY7HEp4Fu6~?JFQI>eat~dIz_|N7`I1!)HQ85XF z2$yjZ3v1=V9}Cppe5Z7bbJ;}cQ~$y--(BZ(Iq*B$vpPN=jfok_W;VS^uT(u!rx$(I zOo4(?{z?n31meK-yStLz7vcl|`Ga2k$Bfk{)yCp+(p8w~Ws;bFx3hyj&Us z8fx%efAP=h>FK!rj@#n?=e{5&8iH$MD;5@S#D@B6MFi2K4+w{wZPAx_#+j4HOfO@? z3wk`a0ni!gE9fBz9!LwFH`|O(1-`R$<@hc8TpDy#!EC{sd3YS4df}D)Kl4bwSR@n~ zWH^t1lvZg}KV%gfL6K|FBYhXWIpN55Ar6p|8>jj1xc#o^iywUU;jd~8B7BMe@JR{A zAl~pRK1cr@l8vlm)xisB;z9m2PCviVZw61hs5tWOYYb2ymON3K6EhPrvON-)+Dow^ zz0&%#+y#8wI)y-@6*;BKCIRFzV<>vA5vWjarc&9~0cb}R=8gZtC*Qhu{@|C)Ng=)n zFF@C%%{=L-O1r-+Efb~S2X`(&s!ymaFixAo3n(9nLK<{&Kq0SePWrqs&bUP7ny096 zK~umB{zdc33tFsAM*Q?&{OOpS6aQUb&^TmAdIB30*$9=gjntISW;9?>7M6dU%B7ER zk_)|nGE(8|hr|9$(igB9JZwE>v_ak}gB^5B zd4(aZ{oxm~d52EYk!BEw=a7G4-0#s-9ahi`y8Iw6?NGiKW&70`7f)ILBHY~|4^yJ$NtD zlO^Erhuh`zF1(d)KS}c?|2&-jAkTbP?^3_X4P5ZVJUqar!d$$7f1_u}%kkMbyf6A^ zA$v-);NIJAi;sTf199!j^*DF?>3H31zCG^0`~KM6Sc}CQH=-?Fc;)6oEL^)93s){n zF7|y5`mlIHb8SmHU~~kHRAZGd8Oa5emOD<9{S!e zI5XXlZpP+e_^UXN@B^=@OV_MOrzf5?gxGGQ=W2Z3>c+U>n?J2_#84%cmzJ$#O;62= z_j1Bq_=H~wSa1yQUs~4qqaz+()fi=bMCk>y_&$rt@D_ivMXiG#(`;=^4oh}RXJ@_* zotw1)J{QyzPi29=R^3(jzI6|HH*Ls#nZMiL5-)dt@cPl1o8pE*HtKbZQy%~^VQD<4 zyqJsPR;0FcXLQMJ^+(1;zLit?LVx`7={7c^3IwHxe)Om>zzcZCKsL9!gv`E)b1dNZE|ZJ2HWGRkn&p*~l7+UCAM| zOUp{aL`#gWlvHwx{0LLVyKHcgzhys*d6HMc7Yp&UK~H({Fc$0a2)|u6Nl1rFcQHRm zYGvpfG=N#KQE%X;FLF?dPYZc$rTozCnP)>5QWD?sZnq_G1z%SMvJqxtwh}Ya@80yL z?~B*{w|_16yH{f4%C*?rX{*iA?a;dg0O(VB@Hq218UUG001xvlIQ^pnuy1+CKp`?E zj~MWZqx{1O553bmekqrI~9hxw=!lL@Ws3ALYKF3B)wBku{M;3fD(9qim(FyLBVBu zo5S|uw3sq|2}fa`AwB(@cm$Q}N&YUs?DAkzj!)#Wr`+hxisN|@81IDZaSJiXm+MYGuw4Ct`PE#)iu&YBwY&TRB&Y%?Z#&fK+8BV53j z;9u}c)^PIHq?aAsX%rb8YA5Ygs!*}GzdTw3ej8^c^Zh!ONe6f8XCum10f`g zs5T%Dq^dkA2p(AI4`m_CW5)-`l%Ft7<7)Kf4FcW(Ac5nZ*>dNp3KocVr4T85#j}zD z;+a5EJcy?pz>;s#ZZesFSyo=*p9UPzz&W{^CgYY*;-th&@|IMv@Jc)JXM%QwGEL$i zud=?tQceJ1Y}rX>>QF>3@hjqV3eg6{nMwigY8Sb4U`yM&X%ycD$&nr0Zka4MP5P^@ zM0F3dBiqmpOu|>fh%v)NXyHfx1wA@UcE)9JsCTDzP#VPtPD)5nM61+;cofNkc0r?= zk8J!4{F%6HN*he1Da#t9!wnt5K**2DH{uvS@)J(D!q>De{5e@sZ}6>Og_pG8z*}tR z!6XQI;-(zeLEZpuxQRo!J>{401rUltYwAz&2{PUZSFnwoj#tEWm<@eV@u5}H@<&|_ zGvNhHw##JccgvIW5c+W=C`N&!0^jbzDi(f{uv0IUsdhriLtqGV#vg{0zX>P!glCvi z1(OC+D20^?ae`V@Cr-z3U2HF^RyfPP1-`xG)y-PusdDqGOy%p!DXtV)+K6;a_HZ(U z`6hP0iW|tU6`RX9VuSBOwp#JXgCC2FPkbR7W0kn`{ujh;x8EDT_|JbWe*0s~ah&U^ zlzoNr#V@L8%=7`3>60+xFJAZ+TnvJI0z(gE zSKgzjdID&K0Tq*O?99bqg9Gu{ng=@EKrrqBC=}IPcL8yURvlkm%eGS-p8fa}tOTvi7X#}KCyGv-Z`9_IyYqcY%`-}&0JCfJ!GstCOZgX=fHddG8^x4xfR@0zgbrBYY zbMZvM>wYb_pfgVwm~sOveU4xTkAjuWR#I+;l|NSy4-JnikinJ)NZ?fMyx&ERie7|k zCMAdW)i*^vLp#b#{$20VQr>OQ~vc5>t5Ks_M^u{=sN9 zH-%f3ulOwuYj9$I`-rpQ@C&7eDZ#SYO?WfBH}VS-k4iuM}rdT|~Q+Q=D+% zCIA*{lv1$Khjye-Fwj>sWj&E!H1@z8c4LmdA2F5_b`-WMd5c{b_~tE!va$42mhY_S zbi~y|76cXXkO1Z=j*bhr9`dmJXc>nMVMk!HDg2-{^`Vq|@*C6(Q|4WiV^i8?o<-Ru z47a4m98E`AdiF%SGzRGj!j0`mKZ6ID(w+SqFZu=TD$J-jbhOmD6vks2VTmR?bz-L# zzP!uV{a(CFgoz%uBYI`G8@9YDNJ+^f92ciEY?y9^wESrAB<0l zi;gW5{K=DpFfY2uuz<)3e)>L(%ka1~L*kuWHQH8xUW@gWjp&FUaT0?~HVZ2Yv3TQp zJo@m%@!)4a7k~06pNNMaek3;5w}h+xn3|l9`PpM}Y;HcLCnhBG_{O#ReXA)M(@MAm z3llm&@TcDt<8yPdvwS7SRR8OX3-Rz*9*f`l!_RwqosC$0Ld}*{TpX*9qvlvNxG0EFCPL#V< zAJd=P1)887G-ADro|%lZfo9}n5Db}nj3K@*fE=I2efqHB)8lc3+Mh8F;i@H;!{NEW zQC?xspb>bu26bBkm+xRA=Qyd%XJ-~x+wqyNJ{6Cg--=-=9&GO5Bo>SRE#fK0|RTsWv)!{7l?_*PXIY#>q41;swute!Tg|-W+dz>wh0_dCUJj{?cFm$@pu3{jbMQ z{^VbXzxWq_O7@S(pa1i3mi-g)BY);i@uoMuF<$$c*TntLyEkU%>WKZAu1|Qfw){S< zh%wZRk{@(k$qMixZDD!?r?^fCAV2AN7bCc4AK>LE}(LIPCGXh z2!3y}6TaE_0k1PFM)6c!`URT^*%ZKr4~T47d3)oV+5pOjGX|du(whL2?ykOJdUOS{ zZIzdA)>0XhjcQCcYVlpK{EnEMs7eN36AxLo&f`;rO!6bYR1UWw`6dLF3vZ=dXnjOD z1xMS;k4=c9m3nM-da<^)84Jr>ap~$pY_`RZ)MaL-=3`=JDsI2~-gw>XUKelvb8n8n z`8WP%e9L#dM0{#9F5O&;D~rvzwzL+@;!n$)TXB7PEmk?vzq#p~Gxju20T*o8;1&b$ z%fcm#p4>VDT<8?UnV77{SKZkA}o%U{l?0bU-PtLt(7@+A#R zgiE!Bn#uhi9f+HhPR#S&<$dX_9q}GME{wC*pq)&?hu;5~PBp#FZ zq8)hm?yhJ?<&TT!O*I-ZK30nxYwNKJZA!-YCKKS*7>mAuzB#V8L$5n|;<(aOHNII_ znJ_}hXKsLSJ&SoDJ^qXUVZcPc6CKjV-gLu6rDVEqy-_=}z)k%@FE~UlgHQAbA;tB9 zMr66)<<+R3{5x5M_W^yVs(-Q9PE%jtT3@B(Ga)% zz>Sdz@Dy%O8WC>Bqowf=b4l=47WUy^64O4=(dr1##bFeoO?eB~^f%=|pYnXGjN4M_ zkkuk<_bFiXXL=QGbxe=c4f7FlU;_YX03tqTg-v!Q*e=w9_G@_+u(;PM-_4ZF&y&Lt7R%Wvp3F451_6f34POZO41?w z!tc!NOf-(wF)_f1_GJTqIGgujVP|nkh`LbY2$TMEa?+(g+hVmh- zLAK8T|6yDHl#^v(j@sWieVS)U|FrrQ{?G#MTVTD_5Hn-xPwnQm^L%CzLYwlA5=Hxw z7vK_)e|QeGMOh@VDcY`pi#!|%p5;Z}j!Jn&{`d|aA|=8KyO2M`FP>XXfxm&bc!U!u zANP^+;SWa{QMUi7gKTA!`6mys-z|3WI6m&eF5VCDLM3@GY*{YtP<-Q&0_{)z4&?(r z^#_-+?RF%u%qV%4+`y|o==m?bi*vY*Mi_7TukX1Lcse&ua-&yYOohXM@=~8qI6OzK zJacSP_Qp5C3800}!n=_PoZ|WLF?b4NI{Db`aRd5-!n)0FZ0fmj zZ8$X1mt zuYD;78ob3m%7YZ2T_)-zeT0>~dJOD9I1=_OGVOE}o{RpDLs7Jm^4*s*<;g?CgI^Mj z>ullz-#CSH2mrj(u(AhnT$Wro<<3(As%P<@3M)jSeC77(AmwF(F7nB|iZo_$v?Z{C zO9)s2Wj;a~ZQ-4A;6!045GF(sx`n+S??4o)=`dET$%pqr`_W0T$xm+Z3=@EMz&Er& z`vIRL0Ux-^aCVvEhJL(|t9%VVj&FvpH&x!{ z4Dd(hnO6!ZK+ICd}fnSgrn;z+eHC(;V|rnZ8G5%+lTDC#qCh z`V64^lx}+a$NhO5MK$YXMUx1^qrczA$-dpT;&tRFe$W?#-`3!2bxoS@$=t00^r{i{@&} z%IJR7#x&rlF!3qsA7cWC0fh!QN&;+C3X5+l8&5>3Za#g_%Dvv%ee2!1!U43$APU;rRoO)0gdryrnFwN0+l$-s z>k5Ev(Fc=k9)#(S34IUNWHVvzEA{uPQUsJwe^`D)QuvuHRa&L%DUTh|QkVQiHSEmu zX*!e!=vg$k-J{KiY$H$^(6Wi4uaOEYoEuU!b`*YOtG&k5KHHKZl+A#BTnYm6l5byE zI1$rebA<-Kg1y25y&8?(<70}8;=tf1uZ@P&s;w1=1p`45BNQkCW|Kfuc)fOIEmpQJ z#HsnYxZ|!r?u(L z0eoML$!RuJK!d&sKy}9rEy2T)AMRuHS%q`E0UH2DMyJ#*8;Z|2@udiv4KrD&_R;_s zf`+2to@zw-34hxi(GDm7yII}{C+QlBp}wGgu%o`Wy4sAbt*!>hw6Xf6%D!^#dVJ-p z4~dTe%f1&Uy4}oYyT`y?8Ul@h>{mZVmelK_?`p$!T3_Duci&hTL53J4=$Hu>wfPWw zjKZcSXN-ToISUJfNAtb8)y0+A65Pj*osQM@h4`2M`gh`GFTN*U`jY3x(3J4N_r7aP zYzW_?D=)~Z?U=}5;K~LBwYKq@{gbD3V;}aXGar5gA1?D}daxTtk8o_62hZf;oriGI zi}S*i`4U%Q(30#T*UXp7nu*AU{$=MkyE3nXGUZOfkf98@%SX6Re@9fjtW(}DL;hKQ z>Kmk$Wf^$b=-ugLTwh?(yVFxQ;=2%;lqc$8dT3J~CW?zR)YYbd)AhsF`ydQ+*o`5* z;tt9$%5kyc52~kSui#|EkH6jTIKWuO(~rsl%gB39uI{Q#PIwNB7eKSniMN@I?Z-o3 z`Jzu*b6q@ioG0R_ck)k8u8)b&=uP>Hic~(|P)2{*VZu#3Z;UIy#K++;E?(du-?$}) z-ODUKl&9GFPOs%Bbyt2&@=U{0H>li>6^6Ic(o>;(2UKER&2D^RiB+Kx7~^#_`yFFQ|Hda-paCg zMn4vom*d^<{Xl&3(HpJ{la8JR)H5;+UncKS_x*jfGpA!b?r=Lo52~liMu+!FBqjz$ zTgYcVLjVeRPM)2Mzwo2~O+4_jS1HY6Y%kvs59><4avB$Y#-*dTK`l3W0KRl zRY5CW)Dewy!jp`1VU)hWz*BHY0MUMu#BM((3YqlU^=1zGDSinbb)qlsve8NMmdQry z#i?a4ED07i@3lJOeWI})@orAfcl)EUu-cE$eEG?E=&^QeN(fI*i67R6z4*p8ntK}G ziPV$_1K-h@J$520L-m-Qn~hh!?lti@|Mq_$zwkf&Lj2hudvm<}HLr}*x1IILnu*z| z7^{y*WwI8djfx)WACu!zn`*@Di4$@5jyvP|FM4Tw=j-1PKl#<_q~{td_1jZSn>zX4}E)+ znd-x)I^+nvlk03D{RzP{!PVfxm0Bpc_Mh8e7}r+DN1`@9;l*Ze8dRJ0q#Ll&^wP!a z@u3fXG&r*cpGBrlNiLw1_;wid3q6ckm`J53vnhZ+#&!B5lBdw&*eJILKsVwuyU1Z^ zhD9^vv6LC%z&gBaNQGgubyYoiLIi>W21Q}RWYK?-weZ?K;R^X|{TED%2GI}c3)b&^ z%MUs=88hw^v1xqO-{CV&lG%*?R0mjI@)wb);#+4@T=W)XDLhgQn6`CsMa0O>WTg#bqQ&)_u_yh3Pes%#74e!a_k9)>8}%9E zkBth<36Sro@K_m}GJUsN>LV(X%?nNGS)7izaQS*%y15n|;f6dWPfW$!$>Z@OKm5b- zPyXpYhzB0HPwf)1AYQhzwh}jQEQ_Zs#=^=UM4v03q~DbYwR z8iH>~d=GxbX@BT!Ou7O3Y`eW-ypA`dYfOtLNT1>4buOO6mzh5qlN@M>)@EW-edLbY z&ct)>dtt1r?9YGcbJ12?R;8=&sji%eTVHF(%+zF@n40#y2PZLE$U$EEyZ4Mq&3Djq zXawc~ct`U^&cM_07avl-EVgnx1{;TS4g$JhDFB*;H$jWaNc8GCAGPVou=!{JT1a?g@NqDEf-W5x_wApw27+2;IVixP{KpV!ZdilN!5!m^PM3ZU z&e-IPtlE;!%H*K@O{+=-FA&~F_W7iT#$t@wz_%XYq_6Ms`7rT)`mEj*R_H(St@wPe|->SW*rt0dS5yxhy<8S@_pI2gy8>K(D*VjbT(5uGH z&>?N@mh!8bO1Vm&Sx+QXo<1=RTS4bQW-6XqmK1|0>9%~&v#{_T(4XU3H&+?Nr);utpQPXM5Etb@ zHgjPmyn5Wiipsc=hvLx-DH~pAx8wz|dJYvBWJ3p(nfcSRT&UvlD2#`6_)!PfL-=>O z&=@vwE8Q^dAh?Z>5?6A$T=L@z^?Zi*#p{9DWywj|qTgXXijeu04>(4a4Gd|h19(N2 zr9F78*AgH2WH}$+59c8-@JQaS8|ez$?QWp-$dt5EwwrK}Hs#@86G^$NubR zzYt%2C2ukemxRftyRgl&Dia6i(TKF+4+PJe7uhw>}s4K+MAo1 zj{Bc?IwtDqLTl1v)}yQb%%VKk2M-TPcbDwi6%8goQ#vFj%h4> zEY6%c75ChAuXUp3n=9rY=nUu_>(c2rn%qvc##&$$p4@lnnsFi$tgN#|baEcYETW(#4o)psvUhHjfd zIX3^f0-2~m2 zE9coPz?iLCgFo-&qK>q_ZShF@NLT!nzp2S;OjIYLx!F=Y)=0R~i8)H<9Qx8j!YBXp zAOf2)Wy7DKHRQ8$1rEk}{#+#UGtyfj4f=C=?LO~3Vv%%v)e8y|6>DrK6h_VC5c$Br zM};qqV~`)_zu;N4!1$2O5vrQ=fOjBY8QU>e$~uqxhV)O5Sj+Wj!B1L?r=0F<|d=wI2L!_eNX)JfB$y1 zmD+QQ!!j$XHT;H62_2Vg~A; z$~7F65APx^^~m&2OPCGelp+3!P7fayZZi-08*Z7R9Asfxo*(%G?t$T@C>OyGPxJtz z@l3t27_2Wtw>2Q;O@+tRU`I(Poh~B}eTqXS= z_e$7Z4`GHd$s*&>ev~7a3>UacT0H1u@D|1^3RAz&G8MwS4E#WOaK%4!{Qd4ww3|D= z3ZGB1FxO8m)?bxQ97<{EqlUz=03YKY&r#y0s_;Y$UgmxnzqeJOur-GW>EKR*-6dJJg4@TsqfhYlQH;#;=d2w5j>BPA?KSD@f_d=rj`LdCoOg;-(9<>taTaT&PCPsGGdL>ihw zCnu_QH2T>S^6X|e1<02mDtBpfVt~af`!k5rAbr=9`YKoDFfqyij^ZJJl7Kx(bJ@TF zAp>g~`I^x{Xb`khI4=IY9pEQT){h4Rs*P`=2@Wuj;V^>3rpSkKl-ZzT8mZ)uUlx*i zq+gy~RG1#`5?40mm9r?NocJLe<-;fOm4%x{el!&2(Ka*>`FRjxlV#;Do`d&+tpp`J z342i=n2r3+44}`VJ`S>#co`&BexA4@zS{%09y+?c@h?*qri_N$KL^GXHpoVBxO_rx zjp+6%`wivWU>}O-Lg~QNrwFkhE)zR13d=h4UfT38x3d@kjLb@k>72GNZ8CF)x7g%C z7&Z%GS$LrIp2-vA(ey`>jq~c4JzPb` zQ_%o{6GcqJjg65`uu|W@4+(L9>v)K3Zc)a^gKKE`E*q1PwQ&s!70zThJRJJ+ zMKxvMZHK-V!(qu zzU7t^@U&!3wC3#-8btOO^dao|#x7Thb-f{>x7|&7N&|?lkVo`tCD+4(1@eSbVS;T2Wh;l`cY8C`exI&0Zi3vadQ4dtgp*G zH5Z$$mhu{o*_lb>j!kY{wYRjk7h9KWMVmh9sCv<_ z@K0Hk?aV1k(wL86gg>|&CppQ7zTM-hYX!4##ja=;Igv4JEMPK@JSN5C>0_f}zSEPF zF*7w4$7bhZer`U_o;@vjc|49yO~-`zB6)J72Qc*|c;KJNA$D4`pv1sub$v_xOLY{V z|Nig)-k84abhK_fql~zi{c?~A zuaRJoow^0p2S*VT_0c$S{8;?GxBgsIXX`Q4S&Qw~S_~^4i}tiP>EW?VB6{MAhN3wB zTqZOv50&TU>qX6j@kS}dA!%a3%=ZS|j-nd5QpwmV|} z)XBK*jyvM)sWb62Kl69uAO7=y65sQNHz@23m9qh4KJl_A8fWo#hqhB&?)2KRzazT! zr(b%qd$HSZ#V|sh!M0$ZnmZA%ea-9QFZ}46cW7SjdH`r4VzT* zG!n7}8Bh_v#wGJRybE7^7nD<5b;(FhlydVCI31HN#NrYFdttJ|DQ(fdXnIToX!6_G z+!B5`@uj{~uSlO@Q7~^3+TX9Lj!Ih_kBb+t#RvcB52FLaQyX%N2jA`Q`Boa?!ei0o z7swp?SF%&-rNS-aQ`!ufc6i)~ow7*pL&On$D0R>Qv}6O-$jfx4NAayGz^8C4e(JQM zG5vAEn6ll+Nud5;(uk-@T_mrQVCp^=0W7>Vs;pap**} z?*1vAfg2jo?}a*-MVUfz>H#VT{jAg7iH_)POFG4sYYXw%#Y?fSZhgnO=fttIr{ehh z$@tSh^h5EMzxtPBeXAK?dHC^o|NH(h{@uU-kMYaD@~iRox4%8!`HpwSyMOQZ;@$7~ zt$5FG{dTKjUj3$`^!(U^rSpXqR}ty zOa^)f)fn?l(M!oU;oF?Xqun!acqnov`IpNF8faR1Ad4U^KAVDkWlr?;ncLz|{^ZlX zih7JiDe{txfsXUmM#{}YCgjIvWyVy{IPkG30C--=Q09zrs>+yqp16&zp}xK$9ipA1mG$mVuekNP(^(D)`aHl+$i#s=-a{5#wjwk_TU z4j|55_si_l#UhJrS|b;haUrka0la|-`SBvWAWw|#qF%i4uGf4QeyJzE?%NLGE&DvA zfc(SI#*|My+AQJ2U+-!^coK+(?ie!@j{e{4viV&yP52+L!T-3;q9K}RvT`rZo;#zs zL-9j@>U-m!m%KWneN*EaP8FkPONY_8(l-tzFnUzKqAt|f@Mk3s>M4^UsXq^%BJ99C z{oRwy$YPr^)mrGggT$((e9739UuI;>>_^A7oi1* zm-@KcKhq(zkhiM03>$w8#{jw#KQd+celnlZRxj3z+$=DV#6T9$A!jZKlx2WdS%*w6 zdf4T14!2|CF3ka-E8#0L8pJO%K57@?hw`-F;{v;aIZEy0F<6-9s%DJ4OcA<5%NFFMXjuiFf{Mm*TOnU5pR^!3SchQHlGX`FzU86)Y9?F-|>tAzv`k z2jJ;z(w|mVH1?M0pFcir-JiLacDE^AiqA34#Ar?OS~{F>J{gm4rF_u;(J1o9SJiu> z7L7>`8Rq;!!}CV9SXo|=+1ZIWapIU4Y9Td^ieg;4%C2-wDTdMQcf>kJB#Vo@q*w5n z0ZzZ0XLImGeVI82Fh4n=x=YTC(XZ8apzDoRJDPp*Z&d{SvnDxSQS+jEy0wI3=8pur z+m;OgW5XKDS^uMriIn3oZoEWIC%)LI^49~&$TnHyw4J=eFQOazHH-L&V|XdUF+X}Y zdB~v7+D|z4&6DJ!e$CBdY$R}2YMX+M*uWD!SK*?-kT{jnBKvY3Ky9X4vUpFqz(5(? zNLm#>&}pD_<|OgUTVarGgrU3W8LdhGu1YV@jS%n}A^$^m$}`zyVRy-AGmXRiH}JZ} zIDRoN0G&B$X|f>w%Mj5?W^_$ zR+XO%@U*=O-_v*l{-QVN4c#8hdgChnzQ8kcQ*2P@kVQ|7e%FT&_r%L2&jpv)QsrhH zA$hUjr!LZ=$pgI%AgLEQvX(%;944SX9DHG%fo{-k_awu*v2b%KE}Xv>i;GK=^?M#4 zPR`E8#Po#qJ5~Pr$j|-7A9{m%Fqp|GSvXIg&PEf^Y4ZsK1SslnXq$}p@9UABab8&rXI`~ z{BM6I2fWxKB~>W2KLb85$}2v`SFBN_6aXvo8h~4o2Jsr0&yi~d|SokGwi?{{Oyy%EHlV2+C8NbAP zqDuCmGO$yLPeLsHr~JsnEr77w_etu{>*=|=% zk%y~2M-ri}o^+8LjcjZZyZuhZJf=l|#JbC0r}&O&=ay zkLkZ6DjSocUCpci$4?4nb;V(!64+Ie(v!j)io_yd`-X>hkYDDH9|h32_r7`B z^HrZa{ph#1Vp9X0p>8KGJp8%%&>y@f#ufhZSHC(Y8#D3Kf8~FOPhW-)4aKKfpr0ApZ58znzs@Z(P%P-VSy^(||a;~KQF$UUauvJc{>pL#+U5S0nMMDoPpTyf6H zOD3+W$^>D6J<3f2Qf7@dl1b3`yPWh@c!SbHXZUlXi^=F_XFHbH+Of9LiPgQxAVJU&RfT7J*RUF3O!Xd9UB)$# zXsEtZ4F+i%95zN`W}+JN8l;{)R*$o%XXEtzWSl(Nh!gV@apvSy9G{zrx#?=mPE}>s zVq%;H@?w!(FnIF>-+mtHo7W3%e#Fj=2XM!PBDTiJs=L}lbl}?t)b0pyZXMtXT*#8v z$>`WHZ&6m$S7`QVBp6m7p~n{h{nW}Plet8y!$oKtH!nK zOVQycSML^!Gg_yO~ooUlG1~f`!E(U}s{f+{^B4 zOIc!Zs?+kxVU!zo-M4xjD?FSY=Bj=-kYMygC9+ok-mwXur2wLIxsM8dPCjwnIq898 zRQ|asgh8m_63oiRZN(%f{d|WBWz_>W8$9S|4dIFJiI+Tr@0xRK1&bA*`PAp)wXggZ z(dKOt(^Ij%xnY<%&RybJscUeB8!mJS=#w-w)u+0R74Teh9f>=_=+VHmO0x1W|Lw!-4 zz2EMeFR`%V1${m6e>NF$y9^8Tc>b3~_+!8X#4IGRFf}nz_jlR(h9*~^dO>fT zQ!0W_@%YS27k+Af#Nw6)P~uN~0%VQvr6V^a%SUS?8u;YN3@G3S-}J_qK6NryuUJ{G_S8ra8^N&4Zp3)S#@|h)crcO^dF|XZ@MqT|zIt>ePoDyGO+l;fP=Hkrp z83D%zFVT-^kE`0@%dLLb{JJ(?@hSHah4GDB#=C56q3`)RQ)Gvf3Q0xj0xX^|t`b`Q zznuLCv~AgS*9p%XPq;a~94bdiB~_JFlC3P+Mz-VtCfYcGFbsD?H?(aKAEu$3(F~d% z?Kb@}4A`a{uno3p8#hR@EGq{ql}ajAPA}C9H-{5%{QkfH-22?~UR6orTlcQB_g-H21H;BI>RL0 zK|8#wY~%#bRY3om9x@t%yf6SgpL7M6`L+6e(0NYuD5NqpzoaUy^C@mYXY?&_H%&2_ z>XIdmm4hih9fH( z`p#WW;}dO6s4m9EBi`}0H~QqOwWX_8aJ}P9@*aIPCzLsLNpvqB2QQ-T$g{;xFTz{a zk9fj?#hd3ZEyu^d^k}RqY*qQ6xaFp}^`@KShrjpx?7vG_uO??96x?kZJ_FbZm#>PiAPUJ?_+X#ePv7KRaq34qt&Np#!ZhbxZK|BMqBL; zy`6jMxqJ86vAKBltL}+spL!;qeeTI<3DH&2CP!VeI2;~)_UuW~f7SYm`MK(XDh5w1 zv?#x=%OIdE{nLPHN}G_xZ(z#eQsesu3ozqPoF<0Jugq@U}6NC@&W**kJ6^ljtkGF9MHP+ zmlU9V(4Ph@c>XA566|}@z30Zp#PfDyP4VFcw2fi)lZeSeBNooiO;5;9MpgAeo?ER} zG@32x*en7TZxj7>IW0suXK=19UWaiTQyR1m;1No_*d;x{6tt{QFd(@7A;d6u$SzBE z^d;`)O#3swNH3q%5j=y9H*`uoW<`4VfkU~QH#i>cmKu{)*bGCG8ICd-Umyv%93IF8 zk7#R`g$&a7x5bY*#f}AHOvrEw7x+i7K5_O`RO>_WSAXtjl(6)Zjb)`Hn0BR`OFwr% z*JW4UhEw_Y%Pz(5;g<#%B$Lrs9Bn`vX5jk7D_9Y)S)&#nK7Dc{RHavX1u z9k9|@EUQ%~DZ}L}8BIN!w+s2!cgd2n34e@f`cwzw@_GY^j>k*!ZNAErAEM|5nnmBj ztwc##e;J;bS#Qa=fr!2qZqzkn0)oMD1iR%#;xuJb0f0=#%lQ80U11f{Ie5}q@g2kP zWx2}n64LC4jX%A~t9bIj^j-@G$ICQar@Z%Br^T!Bcw-{`kY~SN!qxjm;2P@k8_PGQ ze9GMD*83JQWu(Gl^z>`Lds>f^T~}uE2ZM*9dRb7u-q=_=Qs?jib_Nbfh8)oER(iFm zsW|Hi_=Wh!qhF76=dZ?-Pd+6XxvqZw&2irY55#nRJgzOS#Ku-L8XGI(J8YH^|0fI4 zFh}<@7CR$*4gPg1=$Exxgn;3N-zD`g)rCzI9ALvT=J9Xxy`#j6& z2`UpO1;OWRL=#+XwY#T&KKg<1%$Nu#Ms04jquCLEQrNb58*+~Y3=C^i+gyvl#9c$W zF2^b zGNEXxXS3i~xux91UE#c=U*1gcLqFsP&R7({;)oH2ZO#25Uy;BktBxMHl*8Ykrx z9pFwBtC~Utb2v+DEl>V44>faSDk@_mara$!#6SP7f3F;nNS}B>hdfiA;dPpZ6jX4}3CypQRYosR7I~VO(!cMo-$A z|MJtTX7Qv$O&HQ9d{6#i?3m$gN_Ux_TY;cndEhRkXW!GWy+|_Udx6LDyU_D{?mS4_ zcpw1pWgI;U9l%#TyG{iUdOqn$2Su0lpZW-%pA{ov@}HBp*xW_`hq3MK<1nXD%!3lg zMS`CexKlplLG~EJF>cM=nLOo^K;_Atg_FX1(Gh;)Kg_Lo(Yxr3@s0KM^;lh9Q~#hL zz69<>hsuZXX!7N>PASHc8Pch{xtM^t0N`cZ82P^|9?4id<$wp_4=uO0Ipsj*>mEqP zv-^KBo`3eDPu~p5sLAR1n3|r7VaZ~~o0Qt+p`ZVW_r70*z-8DnB4Ss7vMrxITVO%h zpr8mH+z|q$e`$Sm0^!{#gfu;~1A!Bm;vIJy91R?V8T-9_@#mpmp(vX!Z5tOG@_+yk z#*Paa=?{s4C2c*EPHCt)?B|^{?Bo;z85CpikU>m#S`watG*4KFc=C?6{3aYvCV24k z#J<{&Qp!ml6f@;TMNl3X3mvK~HZZ4K`X#(sPWia}z$P~c9HJ`}?IcD7iDDGE%-&pn zXJ-tPX;R=!x2zW|opw6Egv;;_<7%KW*vW2Azepo3b*gVgLk3HRD^E2N6almfI*KqG zg>^p;Cs!hIWR-3KnZ6aS1T%q=5fG&#PQ3C}_*-GNQCiR*^)06&m<#~vL7HWG9Q81S zSMrp}6y?XkVD{YmOaadI<}kg|WVIazG)`Y)z-jR+b6ps_C$4!W?s)~D`1Plk@2*EY z#kg_@Pwu$OK^hb-p8b~kE8^-|T1-xo^Modg4ql%v{WAPv+eG1TZ#JkaLmq*$MK^Ju`oXqM;E5zt@=G?f*`=y?l#p!&5K4W2yccAsPPm7gU2oZbh5v&c(X2XA z@|&6*1E~`167CGRsp43D&G5f@qdHQm=8)Jo3DD_+H?-S(vBjO@A}(nA`0-=0vA*dY zko!LR8--)vo%8N?E3PdqtDdG}dS*rngy4a=lrahi?a&H?Wxvu_;TgE4<2x{@4&2DN z$^;#bn09=G9YiSk$qGy>+5F3W*Mqb2qNAq{{*II!^B5@Av@d_<%d^>TsWZM6Ev{K$ zB1<^%PSs{BwwhaU`O4+kka9!&bq7%LnMr>*1auEA()r^^Qou$&(AJnV<=$BQXalrq zc7b~5ztZxg`}P){I3sJ z1d@RdHH8qcpfB8$5J28 zMs14Y!$^|JZiy<>F%x1iXcns6``XvU@BP0Y7M~@a_)B*$rbTP$n{BlL?zx0^e0Lb_ zO*HEP7t4LMMcTPw;0hUZ0sPr9zatsL;X|oLAphJTft~+6(i}2%P<1slg5tHLnF;-_+>$v!-(il#?m`NQvNbNSK zsIWsuJg>!R2#lbiN3&>WCmM}S!w(-IpK6T>2L{pyd=kz2`iA)PNK};H+urrAn7jG5 zSi5{7I?Zl8`sg$9d!P8S)5gFz7zkidA#ptEs6WF)W%<9 zSCrm(y%K-&eea9!c*l3E04vd5Sr#rk!Xu%{EAr7V{Qxs7o}jXT zLA=8H2e7E@^j{2%`7L}m>mPU8p35QLMZd*7R*AvSy@H)VHic)GCikJT2ml_>v40zz z9C0og-`t5m`pOfrExllObj;TSFmch^+mY;^i@Wc=D;7>1i!;Yh#$WwwKOH~!KmG&X z`Hz8m&>P^#?G7iUQHPRUqtGHal%Q03duX{FCy`p7`)l(t4%S?@Lgtvc`Zp`2Ixdk2E+QC`S? z+z5_4jy4{qa2&@Xk93wBbpQ>Sy8X?O&Iw1tgAsN3Ng&__H>oQM8a)AxKAtpLHi}Kv{!l8ZfGxQl2WFTidq8)K8^DgfGmlhCPTMaJ&f#zys6AGs`qsxVp=0qGjeJW<8tAEG+uZr*c3x77AdE%M)wSV=m<8S`0zZHwu&I@jtO5Jsirony5 zqvAp4i!34$O|WB;g-Vww?rc+#rHyaST&1&(J@nQqtjA;fEZPt^n-R)b|#pb zf^|uJsEa%k4!{|8fr>pfIUaZ4a$B61UQJ!K)#sxO+X~NuBcSE#G3vQgS!he>m6oaI zwLk~22KS^74uMG!gA{O2ThUX+vY~B~CVGPFLHdg@Zg00Yz3@&nOEcUsMZF-e2+fnc z+?SQfo!pA6aNq;{VUVrxJjTm_4_-k1un58n_|%_-_Vcugy24PG>hGveQInjnPt@GM z__@FJ9PfcK0095=Nkl)ixwX*XGo}LG9QqaaFl-KvrMS?Hm>~g3eUnAhnU0k z(-e)nXKwT_O@=c(rU%}!XcNFmD|UzkQFNXDcC;bx0rjUakiF|ldexrV5qzg2o&4C@ zGx5eZyk33z@mN}1jH}nyK#P8I`H=8Vo zYm2wCNM3za<;AXecJLoNaUyQH?WVZr-h1OU556|eyy5<+&d$a`cPrxR)#$FR#x@fh z>noCJ>LUveEGT7igcBXBN`rAqPMd@e?hiC#kgKSgo05?SUJL+j)ay0rbVp-me#SJm zC%vzfE6Da7^E`;^Xif5SJf8rvF{~qLUY`IeCG5~kGm}|Ek}F1;RTrJm?EgC zjaH+jzI~&~RZ%R=-4p++#8_oGw$!(1w0G4W2BaIS9>w!!ln*=EiA$YuT_y`*o9d(N zNruu*VIgRJl=dS_zfkVTF0vHp(l@0uFuo?43E!p}&=2cM4`B?6bjgCSV!LYBA_eMS zZUVvI_LpN?cGt@);;7ms{JraT@iQ9iVB#ydDsOG(9u-I%z__D?^Dh0dI2O8<(*oapMv2fh``{-j~qB6xk0 zr~IpWa$@3$bm}^rbmTWWBK-+m2zG&?pQ3pPdE|@i#LZ;IwXJd11CH7c|BKl&i&f-`J zdJ{=keL#4+b1014ecyL!h~%ddpa6W^iSXB(>(YBAlgw4ozjavBD>jWarY`dtQ?2}as}7T>)9q$eYI3}brQWHP{{H}pEvuqz(luKX*W zC4(sHPsCI_%kSdZ^Dj-A09eCYzX}4M^1Pn^3U^-0CxHt3NJIYm=2LnLuc_aVM#016 z>3Lkn%Xh?IJ6-AL(&JklwOjFZQXmQa&_~P|`HL;*GhJPUn>W7|eFoRB@SJf9O!^0Y zQ|N*+9)r^#{W!)=NRPQ@PF!4D-N?nldT+78PcU*~7_!Y91e64Fog(Jg3FFUfJco~^ zp2ns+PxvcM{5l=vw)mUm((2l}Pqn*zWl3^)S3I;D6VvJkPIFx$V}9suECBe)A9%m8 zb}|c}1vhE)q?CLyKxV>rx%@P-t58;8^xYef;78GD6g*WGO5Uytq<6wPefi0mf~Aow z#3?r;p6CO`pd9HGY2;mK@)QEh0MxPbjw1P6xMTcHL!%PY&kF!Rn!@DDHaa>YkS@f6 z6)^}Hhz6J#0Hvddo84JBpxW>;&=8mWJyE7VEBr+oxmZAXVhsx+cmRvwcG=mTllU!h z3~rK{7j@9*XZWQwcrpg&#-E;a#v~due$NoT53eZbuA9VPS+{)i$E!aJgCF4&21D+; zB}g7yCe@`NY*q?Fu*qnOk~^LP*)=*P6hH#Y{b)rG$4s863(cC(7kM7Z|(#s}(8UDA+10$E$IibA{vAEnMJ7f*n2vcmkJW z6&a-AsNu2tgu*31&9cC=cJ$4zJJqA&kuQT&q{m&l>nhLsMmw&pY{b%fGgj8RvJy0H z?kr=69g485`d;pwpr}h>uhy7I-BCwF9j)R0m>3^a{t;7CBQZZ;k5k8|wN&J|1p-a?;15 zVr%s&?{u$DjH#nK9(5KO;6EihJ{1#_6S7k=IX&TtSqAYJ7UtsU(S^9_ma}o!-8Xw8 zrKhFg(DZ?yB^x{Eac5V&UV_=x$kE`-! zQ3*0tN|3@ZX|>BhD>6%MHg{SfW2FSCRZ_jn*xfvA6Au2pL>rut+@?R0HAR+d)clb`%@ z{OLdW1F9yq!^s&>WHBzlaf`rG;S>h$qEg#64^n!p2F-~ceIi#ue4ohyBM?kE@nqt_ zK83~Td7YbVAukJ$LqiglPlf4)D;{NV+K`d9FN#KiWKlh7{8SOrmQ}(D1b~p74g?B$GNa3l|t<F1X&>m$9#{v@f{W3!UGb3}P6H_hCz;419POILQ0(aCZsx1k7GrgN-E!dAiR1Bo zKm0>c6W`lhy%yW;{rKfy`SU3_X^Ob{GVh)n^;vsscuADru6!gu9X}4%2 z{#GFCX9|OCcfUe7G^r_X7Fy9UMPFh9%BP$Oo_+C4Xo^J@@NJ<~FvE}OyC9q7x1Bqp zy}*i2r}|r5xpH-RRq`R?6JL2eE?$;^uhqS9488>aSWcR$R=_DPIjIDk@V!0NJ3G5~m?%@-4G5k=(KrKBU8TRzvZD=QOEi#4?i3mlEcHonJ3*uuXWMm zmSp#4v*X=T1N7enC`WxS%uV=i{yk)~@ZtMnRiC6ks>+WCJKxF)0Hd+KvLrpCE!jJ)_Q%yA=te`Lm2Naz4e3$hLE;Mxwj=Yo zvac&X!s1KnZnM#hW@j)y`=v*sAsRe&=1d$rbvhor?=|uF{-=KwkAC^9@qwTJyYbt< z_wNl^7LuA4J)`sv?!~Bh(173@;Djr|%e~gX&%h!(%)leNqzm+k<__Y?C!UVq{_Wop zoqjdG^P#uLEjOR=Kz+9(olq||^!j?${Q{Ap(qNDhT1no>f&#`os-u+{8p%36b@EKi zO^(O!{{HWab~*4^TA6Ug=`aT>_lbph(*qNQ^eLbt%V-ap%N;r5N(J{xPvk}3+^LKo zw2PaHZ+{0b?gY>J%XpNTr+31Le?#Qxm~a3A=S8e|`6ylWH!Kgt6F^eNEn(ovH@Yo) zt*r1qBApKWj8+6Qs!IDmthBse0kr zJX{XYwI@BKKhx&QcAfZlHh#J(%QALGb@4-s0TkY{E>r=&U$%&(H`oKbhp?2(gzq|p zYSk_kj=X4tz{LIDUaT@qKU|n1KQh5jbb}i>0(;=mCu5-($OG9sG$P$!UM#u*f3&MD z^&8RQXJ+Tb-v{Hw(IfGv-v4(anir+tG_6NK6JD@OJ1=Ml47m(?u|Sx$bKPr(aK#64?SYyb*4%I{wO*fiNjpf9kK^p$muV9qc$q+?4dRnJte z-hE=I@PKiYkFdODvnE+ zzV$V)j_EsYj=ItunwwF#Nq}vwM(f3Ml3QHEM1Q{R1)6LsVZko@^~;GbPJQBde~x?K z7ti4Y$8CjOUfhb7c*5BDsK+|!C$N})tX@}qa3=bN#)Ox;{AQ-7y;zR%kBNzin4T6t zk{?t7u@ROWV9l$}3*aHDkcF(P+e0 zlha8!0YH3C_?{fAIL@}(j7k?3>3K}vG}W(YZ%hB!A9no8z_zpv@ig&N`pk_6dNzF~ zPVO8P`w_p97n?<>Q#Q_Ua$w%0&qRKf8Y)0c<;1qD-MpXU?_r-Ts^$bBcI0QD%{?aZ zXZ+#M#Ag(gc#;^xkr$0AQOd&|{Kz2q2%92lpU4L!5{r6hm&h{UNWO)Rs&cD;Q~BvuZ-qZom@LX|Dhtk zLD4yElKfyzWX8Sf=tN^RrDv3iXQ~fGzZh6pXjNVl(CO|;&r_|bt+4?I9iOXPxW1oj zK&PZjoH{ZYzwq~dKEC%Se@&DMcDR`3Fu}%PL#_ z;Ra%h?9-q3lofm%F@7&$;#u0l^!yx9UQj5nxEF3tOX)iwJmpvVJ4lg+%3vIjggaq3 zq*w5Dkkr^5v;_qtW)(9S&2-NyX9&TL~tg)@RuX;4LeW$03+o} z2gOMlm@8=2Q$&HvY{6PKa0%3 zk;nIx2QYiA&12@wy|6h=GDPoP`g|S9tWG!9#J4V-zZ&PyUy9}BbtNS}TAhfw`MIdp z$0(BFP`OwD@V@seuakVa6Xde15WOcPtv?r576ie{3uZ_m2}6o5I1L5_@(mIabNVcm z7#a-7MfpVdWFDCYhIjHR+*Rp^ell*rgen-Z30VFmT`Fa27DQ#`XFi^o_eu&06L)AB z2*XWL6_+F!m|^uc1A3kq1wJZOmay&!sS)Sozp}xPG+Zvy$#7XtU^FznC#Fmfm$H1k zmy^@|xF8(wJedG8!?TgUTQgy)O-$yHS!W!=WLia7s;dZJ4i=U1$~-fFzxL9i0T2z$ zQ{^Zk1YxIx8w_#u#4 z%nZD2U%F*lJ^b(sohFU4f|I8Q75u#3IyCPEo%N@m?LOTThU;y*`_cPOI1bY|v?YAy zvoF4aI}GKA?p+^62g(gNpU~K$fxh^KZpqJySdqnKGf)IF5Rv*AItH}{qzI}*!+=W+ z)?=i0G=?XRDcFQkVCSvUP*~HhlPd6LenfuI7_?d9Pw#H@@D2$r!OrQ6@RP0eW%Y%Y z;-J-xi_d&5o_YEaDW8xlzC8(Ti+e|fFzP7V|P@PLu(fsb<>n;1}MtQPb06$zG# zI%AcXnyJU^++-YCn2yt@j>hdbpNQAK>W+Bb{p{R-RlM<`*TtLO_&_}PhWq1n54<{V zyY*}wnV(k2k2|6{+PS0rw_hV@uoUh%OxYnvN-ZhBys3GB@VR}gGUh_+*0 zf|PsP*fGw|@FtVS-Bz^P9pAUsmSxp1oji5{wzs>{Qh_*T%4gG4QI^d9%!v*M8ZiF1I5(;h}W;Ii9Q zT|ql2CbV0aF|{fhV1+dk=d`GqxvKnkJz2qSsjbas%+Jln%!ZROG4SKc2@j^~k9c&7=-hE4fR8AqxbRfz zIZDyIh%&Nrp9uxj@Hiu-=$YVO|oyKU{hy|4SUFEqQVJr%9fe?%)Yq8XVOYu9NGo*}|5l@BJls zfu=FICkGfK1)-E=b7Q1Bf37i9Xwrb?2y@(o~oXTX=%JR z7PsE}s(AEEUyRddPKvi4^UfFFAFrrPB-55@TgVa?d3i!j$bhbSXOgAV?P(Vn+BFmN zgz<@10>tppegQ|};18xvm66GI)4h18+qi5l78~_|ijU-E!e4YfJVIX782Eq9!iY@CSqeb;;A z@Bfp3;%h0$ANfYLA!pI6$OAqKKSiE0fC#Or@}ncXG$eDEuUwAC>S8Qjxu_27xp?l` zr{l>do{XoTdMciH{2TGiGf%|x&%F?r&Yh3N%hzIEdI0G)INe4vfx*_+*1FmeSMEf- z=X<^Z|3~I$qfWI>|3U& zJ{XXM_PFBV=yB0KlI^`#cDH8w3ry@ zL0_vuIf464~=Tl z;v07#k+`pRsPs#C4%V<%k zx!^%?x*e!rO&jo_WDy2>A)Vd5yaIqLphl$Ay!-8Mj_IlKSXsOxeWDX%b>aR1nY~-! zR_#(ch!?!V-{r<+2KvdC=x0d!FpDf-TwIGMo_{eOJ9jZACTHV_=;ncYZ;v1T$sdV7 z{`AM=Cx7Do@$|W;DS-?c7lYF*xTQ&;cjSaLn+Bi>+7S2uvSY)i3KnI?FzHJ?PAdR` zJh@un|NQ6wTzq~v?!5bMg+&(%PYy7tLykU)Md3Z!qcrKyFyM|()Z)%&=^-3B5C1)L z`jq-1tMTl!Pe{kzkDBsCeH7OX=A(~^Kbe+9bz$&-Xj!GMRH1V6Y; ze$D`!KxDsU3i_ufXQ*r88TXVk3Sa0_J`GYKgbj#Y|H75aq%ib*ls=26SO9>GsIXf` zekmKon?agjWANNpi3u*aA92NHQ@Rt)K%3S#& zhbt`XlfS_!^avJESmp{ge?q%Np{V*Uc?Ibteh=MjXn%U}X&3N(_jy+yzW~gM0 z19%}c!*})reIlH24d0-?9iB;67R}L`gl|t6j*>pSg^4!h0i;G1@PHfI5N%hV1)nC2 z@$PsT*KjI6bl~)mWllnEgfgivay*qWQs{2?VS86FxU2xlb7c*S1-Q0@-GO5hlTn=+i~Z4RR3@k6jtB0K2jBjdm^^zj z25O^mu(1_Q>1&%;FT~cR3$gy1#jj8!5b#u_mpwYpYU6_t>>Cey{3;%Y+Yv~ivS3qB8!oOZ0i<)$LbWs+9jSBr- zDL~)bc}Xs^FqaJm-Oipjk1)}~1_$`D>rAjPhK9^&G`A%$kf$A~Ob0Pl8@4-e<(P$J z!q8rfNxmXi1}v8)ziDK8Yj^jfwatbZP7;-D7~JusH+na9!bTdz7K^4?06=@DI@^qc zY@?5>eL(|C&V47^A>XpVNV}!lm6ghc6;F@@N9#+ZI4o^=RQfr2a4yl1+9@X*V~8Ww z+qDC(cpp_nPQQYmpg+(L$kQF6DdH%NVcIRcM3@FPxpP$)+=&xO3Bw6T9y3uJV*JiL zM1_-+aZTOP&KcW+O>$}su;&IB@hkEnPw@$Onk28>SY!Zvm6#l>$F%hAN}ZDx5_jY^ zO#LA~;LHF9uNOs zZ!(slP_PW)`HJ!zczT5qFuiwK@METRBY*%+*pd(H6>EHgU&>urzGaOG2*)%_7+G*) z|6U5b^bBn-oqI1uTEIo#xbu!n;ih0{erXE-(iY{9qN-lH~JlNkn2@AaS44H zKjvi6wHWKl>+k5l?EAQW76Nec;p*Ckv1>%0NK}2+|8{{oI8IhUc9m-xu|}iG(k)%GVXjfSF)L$T*DRdLer3EE%P;nTNB+V2kj>kC*(T;(KP%E6$I8Gca~V=Dd9fD>R84DxpeQ>KIu zd}SOS#R@jy=8cD4c*k8~f!AS69OqLR>Zx={-T~5N@e7y=WBTJsohbmAW6(;JQKZ?I zK+hCzPQOgAC^tr*tiThp3a6|m%2&dfaVX98uw#uMwc8T5mp26rl-+4SBfP89C=2yc zq<5n!-P5n26T%bC3x5o3@m<0|K@Mpu-}9|Mz0%(+DEWp~L|aL}O3RhNH@x96?Q9Ej z_m@mTGl^BE`G`6qnvp_W9hY0pgYos5o7GpFQ9ff%(`l>yn!&>_6p{7_{&TfFsp6J6 z78B5a*Na{}h&>E{$O*vpu1yX~rdq6`Cvg_&1ya)cQIhXnxJ!JRRStGg0R`_bq!pfAh{ zPaN+>T=Jk}z%i`#`c6zvRO0B7skr;jlkuiEJrEDQ@ip=Gcf2v){N@MZfd^j`cieGP zoIJGDQBQMc-JAP3GP{mc$F25WG^AjlATVij%Wbzfzm1IzcX~%F3^EOic1Bdzop|Qy z=i}_HH^=me6S1ex6`g6WE-;T541=PnfdN+Cv#Oh`HL8-4iC1=aqLDDboC6Zz9J1Dn z+7>(WcGW3I*5)e4L%C|BaEK}7gB3o;Q{fnB_l`{A&nw7P?k+nURF@d& zM5K{UKemo)j^d-5QCqFiT5 z`!IcYW9EY;j|}I!(6=q$mX-LEH@1LB{3E}4FCNO*a>`C+_|iCl!w|sv>os*IbV520 z93w57U~mXNvDu9`y!kuh*=L`P^Ow%6tq#P}(o(FhE}1`8#3(wQR!mP#i$9HtZWvtJ zRy#}Gl*zC>rAoFqv4TZm&>*rc>6i9MUE-f*rcKc=$c08~t7?Z#?sKevt;TL4(x#QE z7Fi6yb``~AXC;H#e;vqgAwf<*$j_k zA(z`2aYq$bd}wT(YXs`v$@}7qFS?z!)CuCq_sy+ly!D;$j)fB^yBH`#-I6# zpEe%4>o3M$XG6RP-k^X?wqyoQEAvi$%n``Q-sFP>a!7qDKV%p>h-AGddBlG`c_BRv z_dW4cXu%Wz!XI_6+EEJ1Tj|reWI~JyU($77NM%4Scvp<@oeKgE;@p+>`29~lr6#W2 zrEBbo4qECs*VvUcJ{)7!iTIhH`780`Kl9%j&eCwA5OhYxb=&Nkr;M~O!LR-o{VnKu zSbU88$TwE5#^MXl#W%k4<@nquKNTPS$REb1KkA9oUvp7{{W^y)0)dtWx?!NP`_{1ka6Bm}3 zEd#bxM({Ewx~2V~5Y_0MZ#H6TVj_;rP8t2|T0-V0<`2Bn9D3#c^EXUfw;Q37$5q>kH^XucYp8uBqCtt$a3nQHUUjTLpH8{=u8d- zvzzL&pcMxLm--|BjF^irfZcRl7~~nmj05J3w z6j9ASjq<`x>f>QPxH;YAiFUL8ELgAvE@cF1lO#_2`JVKT8}K+2J$Xq!XnsHd`d6Zz zLBY!c0NT9-y%6kR9EW_?q<1 z|Mbx>L`^g^Af4&N^j!SA-}ue=OEVLbq!++4VjcpZ)A-mQ6?|BZhmp4g5WdF-P5hQp|JLE&6q;8W!xe|V$o3_tN2%13xi z(zECaT6BFTa-6cmfR|i=guE#i0Kn6{bI=Qqz^QnY_{+9>K0anRDFY|#fJgT)g-hB6i?Ds~v&zO~1rrNCH9{q% z%sI{``~jFu;#1L;gix_(_5eHX+M>-CJyb#WzL11)!k&d&_k6zp_;sh*} z2e_Pa z>nW4vr?i+GzcCq47!pdMo#f$OY^AjMP0+GF%bkSOn)?vciNeD-vdvK6nJj5~5T9z5 z941^|#Y6zc30RcLB1ujN5sg*F*RV{)sVxursg|WSL%Tr)_#SS-n0?9-yK@Ar+q2xb z&aStaLUjVYke78W;;S6gH8cxcY8{Z4UWk)p5=GX_QNNK zER{m1EYC16eG(#VztML#YFXs39!^f__h^@=zKv{xr@MItWwJd+zb6f2-?jacyZaE?r%WHT9*hT;7Q0_Fl9VcN<+@I^IKX zeRI6^FZ`J}{?G#<jq_#cZ|G<&+Ul?SZF{kBu2m$UxW*LfTC%WFM6U+7=SO!pH|9sLSh-pn`!}j zgk#aM$T$w|$e_-7nat-`dRYDc3q>|W3`EIC5?D4^Vl#8{@|sQk$7n&c-Z zA#o)Se&{ySXv1Z$_cemhpG$O8TMDmgqExWOl<1#m^e#MDTP*9Ux6 z!`r{(4e?KZ>6haE_x^|=5pC^n#m?rn*luq6Y6})=!tW?VN!RECEO@aThv$3F1s<+U zz_g1cJn7qV8U8~#ZHlKGJ7N4Ne$u|;U%?lsP0vA;?n)&2P5G^GWtLd-@N3~PM0t3X z2lk+le>Qzjzw&dVEls{3F#h6#_$%Qof}*ov(_a<>aXB(B2EB;y9(?xF54Zz|B`(ud z7QYOWiyei3eN$}X#rTjnVaN``u)$HLw_r;&4rS8UCt&HHNhBUgAGgBG23?kNh{h~C z$qx}NlV#gdqCsx-sO|?sOrMO|ZOE!G zPu2@*R6I-@q3snRA~-Vz+!T(AWnhTGDZ?hX_*O)hgRr|O2S`bk*+)e~N0<4ywnJC16j4~f8xfvEBK z;1iipr*g*)Q$CH$UimUD^7E1ZkgeScLWW-CpMxx^ge07Vqac2x_8l)95H5OxXVN4e z=%5@>f|fFv{V)kjzGqqGX@*W%DulQt9cK{p7oKF!D=-%Cc$Dy!$=VeDWgVo+XHl*G zvX|mJ4Y?Fvho;{hMxl4tSplC7@bgh5T)O+SC?o0)_#l1>lXvpVpL)&o4sBWQS!Y<> z^|WF~=MSMNdLnr<&axy;g z2powY7gS-*ck*AR@FP8}3qW)RT%l)uaOA(rIABG&uJ2k@$BxI)1b5R`6_&*T;H|6& z34vbQm75jtA~%G+GmyXBd?dHt_YXS3uB67sax~UfVsN(|t5=?nbI(2MNt=6L`&u7` z_z(Z#FU21{y5*h!Q=&V!59Nzu+EM4oN2x1K@~16PWCjN5`!Ggb;~uP0Dd3D_%DMUhO-fmwsKvtkc+AamRDDeyuJM>(n2wVt=i;v0&&Ipn_CUPjZ4bp; zzw;sAxqr9ZjxEesf#2Wl#?>pA;_BiRAJ=~M+SRzE@0FF6*w|S2gf19HOJ^52R6z#; z*vezry&F$uV7Dgq0vk5zD=_s4EmIzS*$qC;KnsJU@MI5i$=w5cV*kV^9#Nnfc55Xq zu_};v7FML9kCMP-d8^ru&8^MY+-%00g!9_kMl3Ha#nRPlaplUQj~5yrtHs^-+#PRF zr};kB|E;&*6yxeVSL;l2sUua%)qpI3;D}c`v;(T-U3Ct--ThcuUXM))e0Ew?Rd(tR zK3c7fsxvp>-Gdx`$K7{3`&}Ou&M`vW{cW+_LD2@Q38m-=qbiLQgl*wcoo#inY2UsN zN#&=rJ~>(UYWj}S=&B>ngkGKdJ5~1e^{r^Mn{jIHxQ`g;x&e45b&~W|^+E*h%+3M+ zd_wnbQ$j3H>I67wLg0{z|LqRPeMJ zi7JR>X?4Xr7#O%59Av^J2WR!KcRIe3MYLmmp{cO4pROFuZ%FS9s#t2O@o)kHgPJ}; zMfo$Rr(BIM%Hg}8^fDJ`#n7mn=)>?~SQnd;Sg%*`K(`Qt~UKD-ytKerKQ zj!(sr`I#84*QK;^zoOt`VuQVABnu5HO}!MhD7SxQ3Z7}BhT)^H`~m*;hWz7E1T#MJ z^%YF%+n4S)HD&l4p5*)Om=ZAOm;A~GB|2a7p>URM-x97ee{&FCUg7zz45wG<(F>k@ zwcivZc!Mbdy+Z!k5xyaz!r%&RlC7_8;hjtr-jlU%Lohlfv<6~oW-N}JxGCQ9jV_blaBvF~0zf!Wlbv;0L(zga>%%1u{}Fzz-`UwW!tV;)kQL(b$T`#UO9jt~f@FL1fLD_ul&K|E+hIrXkpQRG}`C3s26{UEh3WFcjR zpK@;;ka36owWW>NY;4EVFRsK>SGKH-aKCw1=~vWwy!GDOd|l1o`S}mT_x|ZW2aw`j z2R=0h*})2yx$v~F+s5;Jz#TfDK1~S7#E*^Ha`2wkHvrf{r?$Vt^yJ||_1}c2n{T}(Zol>Bcc$H+DPQQI$NNsE)+c zcrA|4&005NppXS+wR-N>$k^HD`X|-9GJ~FIf4*myuu_F^ zN1D;Zc}HaPZnmO#`pW2FEaFA)189x}t_selpM5GGdE{&6%eUQjvnLx`?QYb??`RJm z&`0M|oE`KB{dv)&G6FvwvE1%##nFW$aUj0=(T{&BM#Wp;A>*}?*cSgkGB+O+(oXSlLGkHudwZ=EN&w8T15_cXZEpV9MxmYdh$|obquN+s%J{-@)HF6C+m2?P^;7(Va z!CPP;9Zy0F7U;q|UCeWZJM<*;Y6ie^^3i=p*#o)H9yu0IKl5~4T3WTdV5)tH>lG}^hso(-P0wF9oxLNEo0^;{`k}2l5?! z6P=&*=e0EH%vfDfZav5YnaY9%PvC(IN6Nb6Nu5BCv~QmFE7}mLVOZeDLUSgcN5u2F zPkW9@$D{M{oo|1A{J@X?LPYZgg_N9EyQ3d!8K^AC7aRarKfEt*$PcT4iL$i(xZ=|0 zIOEG0(>11i7N*ocd;X=l@%u)jy!5-r2`bQ-vITFn1Gh`sgzEh;SN%b}ybFN?L%Eoe zNiX1rHUJ&y-Defe`ZS4Q+6|MZq$_+&zKMIp0E=68+M*@!+KN5yTW>dFzqJ`Vjdi&- z+lS{i&mg#Gq1`ny3OoS%n2*PAkZ_atSC@0ZZg%NtK5*uWn$qs)g4!AX&;;V9lQi7wD+ zXBGgMcdFg7po7J)^rNA%uKLi_JrLE##^R1w-xGJ=erL=dJL)plCa0n}|gDwYD!r&w8z*78!cmzH9CYduEgJT*HJlT*_%#%_GIc{VRlrx>R+JG32??Z7^kAZ zq{k!(`es+@94JhEjJ`}RXxVBs9F__E%BcF;>JP9JenE0iKzU(4a-6HrKtKAguPu}t z*F~{YlhY$-W+to?kq@UQ!mre|_vAF?@tyX{58WgNsy+pvE1u4T_<{8C8fB2qzbpRF ziIa$=nfi!$I%AJ4j@`AMjqYcep!6kz5{H75#d**Z6aqQHMF=-4n(&?b$~(t1ayP#H z^^VR%`m`$_*RMp~cEvYFh8R;~GGB334=luG0V4!u7?d}gl6<;=KG0QIJiuaE*fp>b zMktV;AaRm+w zO6m78*5>w)CxyrKp80@=(-aOpLmWM*3nT1XnKI1vVUZJd>RDwdC-7_AZ>Xdj&5h$P zdYXN{B=Ysf{T&xB$K>3gcmRa6DfwCNGtCXjiaL$Ho+ISF^hIxy0f-^!G-Y&JK*(^;!Pb+}pj>h}@yrLjO5x;}I+-EM zWNe`S;gg0jBQtJ9SJuVl)6;9+|21uxut z&sQ>KgzM#>_4<7C?@NsW03(%Y0}^FYDNUiNliyGm z=(sGFhIjc?*=9@Ms&oB8dnCi0ZZ1khMvEXbno;S<%A2jg8V3}0GY_kw`cjjh<+YQ*{)8zwp7 zGAH@hHXE_Bz7-pKE-tOd;>ucFU0&96C9W(lD(*%!8=a_(alLTO*CGQ0z)~Ll>$O+_ zAi_OK=Bd;vgv(73lr_0aka)Fd=`X)*?^${2W(U1zH+K7kWW~Ac*oHFHFWiGzoX6;S&;gT5tDO@>D*>m-sExF5_LFPCorG+-?NIJ;E8m4GbO- z02VV_{F6Q$ug?O9cUO~!@^GgkvEaD8C#d>y5}gWgOBa3`NA0NsuM) z{P1)};5+LSYyTobiLx@k(%l8KUm1`5_y!&xWq6r_8y}{@JCXHG27;9ijJ>f%nW>i^ z3hbeD$}j{joJ5@lg+WLg`IUp%gi#ImeHXB>;J~j|8}(pSEVR&<2Z10R4q+(dw(cKY6~&8aBCb4EeQ1Du5!|0=L9sXkK`EzOr)ed@XG;NVF@&eF&)H? z@{;0yuoFXj-B?+^8oTXgEM9y%zW9ZYMt8jtum6rW#Z-MRe(|6EtN6`7;uzY4m{ljK zTFrFnP{1$PB?&%BTS0jBI~@Vq77L*(6~WIyfgHUkK&WCBF3b%6M^si2z1z!^9iW7#tPv6TX_Q?bzIGOR%(KW%VGgEgwWfog6wVoDN1lRcZUfL%UME z2c<+;~O=nw#qIY32v$L0pw;6S2Xe%7o z-|g%NQwwy&#%nbXUeftjp%6%(l;SQu1{vAu=^di7Cr=!UvuDr5>grmwT8$W1IEgRQ zCnw}AEpK?&?d^Bn5mBGYzGWCKxXp%6{w&HrwQlgEcjJQj` zO=p^VS5F#PXmfe=bX&pTWTHLA^JJmwmsOf7sMFb1y$z`WYLul!wWG?yBYl1|~|Is>}N5gxc6@eE5SOjX(Vt-xp(}wW!wA5hguo zZMz|Og?~~M|2rJOOem2BcG0tEIldg21xR#YPAKm0R~o>@E#N`i5WwbF>~upF?04*OdMT^ z@zMR5p03Al{l@4?Gjs+d9 zRAX;oG#ag)cww;_UwGuJapA(1_?2J%%~&q)VP~>Xcolk8ZrYrz%H$*2{7oBVQk(&v z)OB)kDRh~&dANu{e2Mm|@U;CCN9N*BzVAJ8&jSxB-bU=Kt$0wLfiwo1RlTx<$sdKM z#B~n9SpQHRISb{NCk0?|2NyY@#x4i?WbiG(*x#cLShxfZ#o-j3wAexDqwCfAg@?+^ z^}yYzrOu0-Q&lS4foOGh5}4`0*e8bMXT| z@{@*(0kRgy+ZTCym(d8hjV_}nKy8UOlU{LA=*4}UPu zKYJ-Q8tXoFXr0dO`i|;zJ6haLE10?hkoPUs>GGy@uJu-2Q`qxL=jo@<#Rot52l4z1 z&%`}<-x0UmayEu1ri>qUzp)_Wu3K-9Klp0vNH$7#1ATeI zN16f-U`Kz`JLMF>N-$~8VMN35-dt3m0K}0#PPydk`eQ)GQ@HRUJvsTKCViMiQEz?o z8{_1u<8k%UdFc}k@6_5CFXPBo`l7Uhk>Q%kIBNdEpaM7$K?~>m9z0Pk#Fw9VJYHCl zK6LbC{K#MYvoU|-Nc_1U`3VYQQk|NRg6Bmj2y?FcU$8%@PmUX-;+ z$bCs!=c{_=bdagZi8yicWPIl1ABvTw)o3g3R8_o96f<5QizCt>IW+;Dmx&u>1bsQG zN6E-ghtQBeY#6l4L3*VFeVN9TKeU9O!ZS&qw3qbdLMtu)G4Rx+XWAEO$kF2}N=JPe zo|J<&Mbw?$j^VOQf^iCdCT!T83F3XPU5oH#>#-ztcEJ(*^A#Y3l5Kcj$Pf)SBcpmpCJCvgt0E7c)EW9h@*a%*i>mclo1Bp? z(DK+sMf|!ey>~p$+;UR7d?Wt$|NIXWz7sp^S7W=;Qr^Uc-k=APA}={Q%QBB|WfYk3 z=x%yB+=1vsUfDPd=Ef&{;pOsvIk$cTzQMcl7bva=aO&|)k_3NBgF5gt>&R9-&*dka z>@IRyxTjq)S*k8p)W+0z=X5sJ&+xGL&w=Pz{l|lrbc(I@=y3W|Q|((iMrTvsTWfxA za)sTN`r7IvHP!Ff#D8NgwiO2d-L1_S=(JRSofsAmQ14Bu2|5<_NL{EL$O5$?=!0+C zzWaXy8vY4wVG~DIq!dhT)PzYHDOdRfFN7lprH{T{;EM1AzJf4mHhCJ>$=LnSQo;7%GOEqA5UtJ5Tto-JRYAE#Boc4(->{OyI6$+8q5Hl?r1V^ic;=KD>J(gsEs zQg9ncoRjx3PGfw$oXNt99ul8~qdg~(>}@y4 z-S^xcx7~4T+o+>6Ce<~@gOTV7W>dIFho2cFVU2*i5(e?iuyE_(Q)ZmOw@8Rr)pE_Cu$gn zVN6G`NIueE=0p)uY3zyiaA#a(e7v6d3{^bFfSnj)VZv~*OP(G2)3Xy^{C4HVE5UV; zHNgzuV(f@fG$u1y=*eQ_ZGBJHN2Tk|c!6?*-Ir>+T{XX|;H{4!?~x_a;RWZ=p!go+ zQ9Ej*?8+bHge#SiMb|2FS8a?dwiR1%(O z&k29V5GB7eY_#TE`6I#Eqc!AdFt3q{NVS!H~#Mb_P@q=y!S_6ZMd%lvlR!PE)jhst%8Q9-3aeCLC6JFY!y~S2Bka z?qxs-f-m7qU&f}r2*>kfjNMDmqOE!ZWq*XEqp}>YWK5$3M0e9m{-qDk@~bpw1 zhRTItypm2|p2jP(Qtyy(Z73ee->2%7hBrGsQrb3W-AHOL`}q5k7!HHKGKX%c4)-zq4lK|U^$zVRefXv<=_2cq z(bkk1xvKEUgtFgGYuD;?*Tv=L+rPYmiQT`XN zUh}d4SFSF_^6I)|&c1Z}nqXrLaokt@l7C6pY$#XWmxun&Uw+^F<#JLXjZ_mSPa{^q z(dm<;-;ES(`g@SV1E8Qi3r|>(PG7=>2W?=)NXmCA)QyX<9)Kmj-1SQ^62}{posuGa zVeF>7&74(XnHTN|9Nsf8!ZCPBCkTO0B@+j@=qx)e1dj}EyAt^LXBtcjkp_7Xg2B*? zpRnhoQJx4u8dN6Xiu?@&;?hJU-{VAZiiv}4SuAFEmwXcBpt^`xDbee ziX^lWo_^dpd8M0^{|O@u2&^lXza1?rfRL^p3~?E>u^_iQ3jk0AWD(8^Cwj7Ar?puj zg}_P0)k_WuufcG-S>J_AhJi2u!r{nI?#iQgM;z`BM|fEmlMD>F9`ZLmDsC_R!jc$i z5e65*;HNF7zx;$R;`B_uTXD}f+dM+1K$J|+m$<#K80xJ22JbQ|FC7>&J2EF3J%HWb z29iAb(>Z*{9c#rz&upLRe^~c9utD-!$1s6xpYFg00~n=WVHM91^yh0($RqQ_izn&k zH+df#6s2^(-lidv0N`5+Nb&>O9s(3r^y4`E>FL1`wK?O|5t1HLPSBQ==MaMx3OmG6 z_#;xtr=vb~GDap&E0syBN^~H|!<|yKB?dSFok5V~an9a&$Z)(1UkH#rjv-cuq|=PX z#!?LKbz}L;^KtpbC!^bF#p#=Fi(|)5#xMTw|2ls4zdR9hs@es0LKd_ zC7jL&6YlI1+>2IwM;(vdSY7W(khEiUZ6`Kc%2S|>PejZuOnSHf>@3fjII=Jicinb6 z-tp$w#rwYZyW*{He|_9>>#3NR;!X2jymlqdUw9^#C7hO)mSbgkDHgf2e`7PYI}(k% z+bWX+ionL$9U|nGM}*M{LqO#*@+#v9D~ah0Fepu@COflOvtWcH6F*+?!~`@G45~{h zKo-OZbHeJ$j(Iw5d+OxF-WdaC%&psH5D~sO;0butAta-lx14x#>PBdpL>|r7e_TCQ+@QbzJZs85Zq%#I-dNe!c+#@U`x2|ijJ6U+SuB%%-U>h#p>#c>V<(R z(YoLO2Q+Eg2uIy{FqwAC0ubfx!C%}28w0I`hhT&uMT7@%+%p3v1ESj<57K%-js{oq zilT(bIWNpmebBCelU<0k$5N4_KFFilX~pql3-Q>wt8wxCv+=gKe}~{66<+JnW)Z_E zm`{ELok8!{8~Fl)p4S=Rx5;`1U)KczW*+|iW+p>eE*5cK-Vjb4L{1eBHf4G@rhAxXzsy6v)W2tbT6P=sG+=PA6*Td6}{8DPu1S{H`hEs*AeZ|IhdNAi;sWe zk78eKlSS#YxtX~c(YE+ev!Qgw+(o$1EHV*Y0NQ4Ujjy}#fK^U(@Pz(&C(LMNB>DAd zMIDF=2@F zvIk%E0s!F6xWLZhksPp>1&8KgrR>6A)}3MTEkgwe=Oel&UeLM`yW1_d@!6`(~-=o3#EcqV1rBeAv6n*w``G|U0GRV zeEwqm```F?@qhi=uf~(lU-4a@M<-{s>+FqbO!G>k|M-FAv=9) zB64;t&Tth0JXZ|=`R6aium8qxdd2&hlgB(*$f7vf(Os{(Pwf35KK-$e#h~bzBlVat z2G*+3U(-9Xbh<8`MEO*tn;}D{W~MB6hNOp4PL8xacKlcjPq4F$MQgjFb?BKzF`T{w zT?kg`G<)ikD9d>Andjm^{Pu@EM1VdqJ~63wr+QVHh=cCQUp5x~7i|Jj>Io##1L>F= z=;snhe!^77;T`#>w5%WN=|Ki+r=V4wO;*G%+|nIKEQ^4YHf==ihNFn@B$ZE2W-ByL z@iA!M@5D#fA|B;%dNQP;vN1_Xn)FL;n6#t+0a-e@Y95`}@!&lQt9QP9{H>c9)WM+k zzS^&FCO-4l?|4I;J$o{)TsSY?OYL7gN~9yBumYO+=$Ggo4EU4gFiN!i;3-YHwbec@ zEUiUlXgW@xosaka&=1GY{dfPpbc|-q)GARG9)YGSyf9D8pl?O_R7cd_g%ftyi5&IZ zw|-8UffwG%4o2EKIsuagsvYYBK3bQyA{fA}l2TliQ;~D9eMIfD9bbLqYlic+ue;xq z9^0y8>vZUGN(24X^{hm}Gj-##amtJE!vfngCr`yk|L}vc#qq<#=vJI;F%pyFQ|v_K zBq?}->y`KvKy^dAKz8MXKlQD=#HX@vq;>=RuJ0@>@&lfoyo|I+!vHBLneLcy5+N?S*+sfz3r=E#cmy=^s|5I~R zdEmQhm8wAWK`%n17bqCZI>5j>%YHF*1Sx?R>cg-KHa1E@i@37@U}j=m`ZID*>ENh; zw+rWUU>y?OpUErB8U6$lgMIFgL#xW^U?>Ohm5AX`3TpF-FE98ifHa<{gCd>Mr2dj& zz+KAUEDLQqgQ|@Qhv3EOC~?haBG5$q60t=!`(Qp(l)I8cnI2eCK1$c) zD$t&iztN@vo<3LLBfcu8OsALT%Ua4nl=7Q2F@^qoox!sY$u>9Sj}4S zr0=!pO0V?pTj-6t+1nPqOGn|90LgDfRvUo!fj|4fDzve{n1#Q=Df9`iApZivCBDID zhI7!u9hsC1KGPNf_$}W1{dgV{fAK8GKg(}u$YBeMQ=csje)*E^;L!9uif5W$_?H;* zJ)6V)i zk%+8v{CG^9IUeI@Zi<5$>Gn*Pbeqw*dMUQnmZH;??kd_E+-r-LB>SW%)#~aW3pOCL z4hx(@AAJGx#RDjTBY6fzL>FT7Mc#u9Z9oZ}{D(fLj~FNIbQ95E`l%d@5xQiB0k=IvJ$@5gEJETv8Ecr?i>!NxcJ<LVB7RZJESYvwA^?v`w9LN|DXm;15OjZ%~&)%N8fU%f6Y#RF92ZD zU-t6V#kj%-%jMNv9I&_=E9+}sT*ih4Z%!Lw5!0}**%_OjbScmWFdjb|*uOmVxBt?6 z-!J!*CR!ET4H5v0Ky<&v8t@Wwz9DYhD49W)rF{C6n=63egcsth`1UNF9-c**GFdCo z#Ai@}rxVCZc7Q}oY5LQIaOp%b06|EcZ1wF42)#3~;o}w@2L(5kBkTZ!fa)+a*qD?3 zxhn@a6V?(?OuC_L`AhL3n*2zi0D+JvljC&0pb}7J81-vB>5q2KH-DLf!mGSxP9hKC zA=5G;D^C;(6|2Cy!YYl#RbkzT6_#JhCO^v3Z>V1rvw{~zbti+7^(YLx1Qcl$FiRnUXW0$mGM*Fllyb;c3XC@=1yHghsa;2>e^;i?Q@zA8?F zr(6_`Bgahy0Ossci0MZ`RO61W99$n+C%xk!@l<;0o^(rCf0Ly1PgBC1+trRUf?z;0fb$o`R z+1iXHj^w?1HJ(5ByzlO3B7x2u_tY@J!_Gsg>zJ3o}W=QeFoB7zAX}Y-l8RcpSsVcyd#n zOt0RA_*B+ibw;3L?k7dL;Hm{y8X{ylzLnL6OgxN_O~%R7C*tmV?}-N|t)zRG%6|1c?IYd2Cu3K)p$#l|av;=26W@i>+a%w`~?Rf6_ zb8+JInK*s(-Ksv3d97v$U4nsCsVG0=mzkmnfj9id|yx_cgw^boZQ%3Gr>SfB1nDV`6;op{tSl&~X zyTf^1xctVXn|{fsmsjZvJQ?D8i_sGh{h99O?}odyg#jNt@Grua;Y*u__x?Of)Bpa> z4B?7FWW64y-~XQWpvufLl%#OT`ynmjsFe+p=gRckh7?p*5*)?u& zrDHmC`x8@B+S%%rhv5Mrg;6^c78OQy>OPC;L3N&x1CwwmzR9Em*Pxj;>12vl6x?;g z0y*w7Z+9i1BsvnwMk>_K*a}Ya9UuYBx2IJ(}nn`LgioJTr~2bckQ^9+P9$T2MfC`o!7XxeJNQpy9w8r30`ZOtr3|faD(!Lsg4>8* z;WHSw9ld&?Ep1KEQmy1o98#rz3Fqtt8|Ct#vfyCACJwfh*MKM8x7*G5j)(4#n{PT3 z*Iv9JePP49cqP2ssK zp5uV)wR(&(d14sE>y;pU2sp`$_R~~bL01Dyt;S}2`72+JhhG0s+;;mdWNx}-p%gpH zz582m!mH6;(7w3Rg1c$G092`*m2UT+e(yiVwUtfj^$dK$|8o)X@nc84aA#NgmKcHn zG2%hH>&o<_LZ!?}oZ*fsbstO)N{jlCQ4k-zNzZapia5zHGfm#1YiQbWsD0(>sz$FT z3Hk4Iw}lfX1CRyabwu?~nH5iZyLbB2C_?>!irUova6I|!x!BxjiH>R8P*u_y|+HU`LP8L58c(|l2XUQ8r9V%&y7 zP1+w5JU+cjbVFS94D)2!JQ;&B%R`>!)qv&6anyfV@5q-TSO0<^PU{*IZyl`+#pc>Z zoIQFb_A0y5(VOvszy5P^?7r8F=&r;-bJG*u+|SM8VGOX72B@I&Jy@TweWsvE*{mH5 z-}&YTiIn=M-|=^%uV9q{kNhdhp81nEpXK;T=2e$CzG_0`f#HBAoi8)Qo{dj$6pGS4CcQzkzT=@tU$?^ZP1JWVY5sa2}l(lQuVqKQ98@;GUU_KwUMRKE!a$jkh-O;dZ#?n zW$;Pm{U-j)dZ&%RH8Xh6x`COlUdi^?Z@)VR zfWA%BqroC>kAA*|akw&U=ACU_h9q5x(g>d_i;L4|z&UnT?Yh*rg=V`k83lbh9l3G; zJ^XACI+NZ$Ffp!x@{mmMCba#QPnQVU%0M@&s<%;2pA<->N#^h|p`T1S1-R-t@ zPUwTlY3eBTz#NAGzOHz>>(BK>n-P5a$nkmxTDHYY80Sge2(N@+ff4S$snDb9l;Ajstxj9=Rd5Mc=unla@Thi7+39=0uJP*%$11sW55}|;_c<|0 zdJ@9IBV%Q)mgKy2k$PSFgZSqliv|d%a{Jw~m+x##TusHoRC&7{(t^+-K+C9j2>fwx zuj@@1xqt}X&-kIVA*{+%jvo$k;;_<$&*9I)4tDPouc9Z{7*r)Ms^~T<2a6to-)SgA z#w4Al+W!c0S#@mP&qPa`h94wd>R+%Wop@0I1r&_n(DMl{pJ-S0vb!f;MD$A>#!G>X z;8?Ccg$Cs8(-OR}K{>+z1`axg7x?R)UXe((Pa$SwK4aRvOTY_XE9^^63Brv<%cx4b--CSoSwt#69bIrZF()V* zBCN`5cW6oVl>KCVqL3?=Pn;$^_@x|VYMklQJd-n2l==#^b{u-Fgk>i(Kl(~=QR7R{CN70?+mYk zlKr8@!wEXYmo-GqpVIJA*aEjU&g7dI{EKqBp_cjE6!{yLd`)xsK~r&ksNRH#!m2!F zc?s(D%Cz+FIK>C}Cq4`qxE(ZMXVEzL^5#m_m7ACJnPAjhy7^9*g*?)k8RU?bLto0* z|H{9tJLdy^lBv=+eUOd5eNq>AV&2MtLWe|ftO8=<=v_Z9hASN&f;%C^1J&Qh%YA9d zVP6xV;-qXvkAmMRCmi*z*p^vrPGAuOW8$4{^htO=Jb6@o%DQxJ>6H4WO|Xe$NW2`@ zz?_FfkMkE8mxuoLU;h60EB}*Zl%@b(fpAO01H^RC0;XHqbS)d98KlFO@h~M*r9i4+ zAtD=t{0W+vb9lqyXhM7@W8;BCg)5)Lm+X)b7t-`@cPf;D2c^Smcsk;O!UD#F9m2o%j}BuGVh7zvRf4&N?ZVNhIJ{F)CRBJ4N}!^9LIZy>2IJ8!kPOSG%B`}xWB`H_hKzU^n*0RrEDKM5 z(;dTLnJY%d`QSz!k$-7iQ0gn=_UO(Am(mt*!0LKBG)~SzGF%Z-Ht|%}+o3%M{;6d9>lY9 zMm?BM7`)Td6x=+s%cGf7^hcuJehlYW5{W}moB z{FP7Y$DLF{A8gxlT={NuEgD8@q?k^{ixNr50u@+QtT@z+ERvnv=tFhI0ZfJIJ^8( zdRhI>WKG5O22MB)fX)_%9q^ltZfvx6V{MCLb>%K{o0*%8iK$8)J3bc+^EGvJM&r&~ zkH*{IbYJ}7`@SpQ{MP&9?5PE{;hk7pS&Z}No(%@i*EUvE_NH;c@om%;3ld&l8udj>sfY;1BA{6Tbf z^NVrr z!j-sq@mgFscR5~saWSr5U5#ahU0qp^&8>E{grA1;=l*C`d~-xwQ*K+TUpj1!&9-1> z(w6~y%D{?xbzHjYJdJQqD?5EqIHW|Fj9B!6A~@vwV8V+d_m`JeWS2caSg+UO)Ui`> z*WGu<>C-3O$%EjSjAAiJwLTcz&AnJ!+frKFF~J0zXrD>zN|nVQ!*OI`%Ddq^Emm!3 zdj}5a1sY?5#7z$VESSdX>IjQPytsHxbyoA@4(<)&`UE<}lM|DwqY;&HKdvq<#(nqP zt#1Z+lm-fg_>Ygyqa7-p{T)?^;<3;UIm#fbcY%86J#9vH%Z^D^91A2^wcl08i-~>_ zYYeFzbgaC~S!HBV3iQNkeD2TV3I7`;*A|>@D1}uoGiSQ#WK|S2<=4iP90f@*_nxW{P7F%z3+Z&OdmZRgF6i+)K+rjrKA`z zR(fgR>-sG}M(SSel!uH4c*7+kMU3Ucz`PJ7e=8U_;=!ez+JQ;h- zNgH%1>8A{cC=7#Xp1_0m%VAGF6*>rBC@XsNZX9_q0CVr%_r`P2J>$N}v6BliUYUr= z@f;wdZ5c-WvFnWMQjj3<>KwRXfQEr0XgnvQ(3PQWhmqUn)~4^zB>y}u01Mya1e!bw zlr}*7_jLoJA^e$yOnp&p58k0Ce%lNN3Z{B(-0@QGA{W(;6t0{MXBB=;>BA>Ss-vFp zZ8YJ-nSO1ouJ&Ay+W45uxh!6O@yfDyT-T;%!~{kZpUFR!La@+|Ir+f)742Br0-pzX zXlTHp2Ym$UKwdVk-}1fq21XJfJ_BO4ks%*J|7U;duS!5|!~r|&+D*wru&S{8!n+4Y z>4(tkq|9W-EHw3#=~^#I0b+Q-Q}&f`66{I}H;kPRIQE=);D1lDQ8Gy#LWEF8Q6Tv- z5CM;+`El9`yjVdMelIrG8kVgWuB^lp&#xJO5;Af8_$ld7Q*qNRx5lsi#=jM~=o<%K zsKsD11G1#QyT@)W`e2elob)pczJ`2K#HRT4@{3pF_kR07#IOA7zcWmCpBRr53)8_p z=PAp^Vz{PG>!@@zm5!5o&cLJ~|W&b5o)h z20bMwI5JOorK8P+V zKQH+sQly=DQW`oJMJP`ex`9tQsMPQS(SzRT!kPk=Fmwo6`q*gsK{AB*U<7S&0Qmy# z5>shV?~1?#D*7wO*8A$CU3Q(}wqu#>1@HQ{{;MQOmY0&&Opo&OU}wRLvz=nmK!I1~ zR2@NIq(Dj~+jc10zYxU}*qExy0W;_o@b#^zfX@VnoMm#*20wOfaAgsFNlr=P1c2>! zD_(ow-SMh>?u@17E76slWfxrzn7|(g&BJ^em@?TF%syF4e1*O`xInkNC>`SQ7cRvc zA9zztA6tlj_DlcFi(mkO`m3v1DyWUvPfSQpnVhJ^{LFOBOw@h6?8LZu54-9oStK>- zN%gw;R8{p{lVw7l0eVht8W#Mu+L+>vOZO1n(@zn@<~kMb?;mFeu<~LDZ)KJ=KKzP&qM<=>NDl{;_uVs3gyy832(>Jy(4Mny_2 zevtb_y&gyB=e(e_D_sh{K$PS?)N!Fd7InlE9_>j0VAi|khAg^J?(!pin&h>PXr7!p z5+?aW(gkTlKhUwLT6K@E|SIB^7=0zLFvRlQC~ zf8?&|eJ_5Hw{&85S|TGA$>|S@KVZmPsd_@zSH`H_SYE1NZs*`q;Yz%sKju!f#E10^ zf#4Q~KOQnA9uMn4@W~4q%{#gQ&s`BR*|ONKl*7asvHlFq&=4oKz!#(skWaxIJ@Kei ztk+VNa;HKF0}t+YTc)+`F}0VyZhYT+-W6|o$2(%Tu^1~Cp0mDApCC`U6F#X-878~|V5!osK6 z75`uojO##@j?!}$%9N9ZFz`txI#>kFwJpM^t0^bZ6h?rtIGS&%RH}CtGY!O;_~t~7 zW6ebi!Xp#@do04<+=}((rPy3rigoo>S&%d+KlBosGvNlM=M6}X_}Vws7qm-X3pkMf zv|G0|;R}e(7xZm90?)=8?Sw&G=-4GG-2GvbVbg$mU%e`W9+km_oFa&;R`0GNw^>Z# z`HH3t*KbNVz8%w-xdHgEh+DJ;%4iEdj4Qj^uQVmurF&XsTjLW$ZQlhW`SZwyd>yd%AAhpQr%S7UE$C3ZKYH?J=#E$TzOpv_`<>2<1ObO!VQ zbO!p$hFW!r&dhj()1_`j&n&isr=SyadKh}qw&HVrBO4=tDK~(O_=LmeW+S#XHYAsG z0YOe$44Jo}uXv1kkcB9cpJVXc>6o0Ejrzot%e=L|5iQ|hU%Fv^VnX@u=VCY}+=U0| z&Wiw*_T=PDtgo%XeB?jsJi)6^LKD7_(=9eB2xpv{IU{)wKiE;)d#YPkm(pXi30HHt z`Kg?yakXiVmmh!yg9(>rP9wE#2w4XC9A7zyA4n z;oOCos7RMrpKRmOi>iZ`XnNZVGRcpzyW04;%FlQtn^y#j(q+8d_x-Eg?5Pc~(SsAy zSik|@)kFid*`D+%=L;xwpXs&j!uE`Vfg`!$?s$Fv{RUoUZ~2oU@4a|E2-4vGa^?oO zdf&jMIF^NFToHDeN-3`s^p{DS{FPP_%ygOZTww{`ABA_q302Cq_)Fug{h=S?mg(6- zH_|(0OFVk44mU>k?3bxI+cIzP%e(T+zD5y-H1*>RtV7Zxfv2E7(j4-lG|6|l(JtVh zp5~*$Q#OTXYyzF0lVQDqkTh^74PK}yXx3?vqp8+#gEQI*K#-4(+7V821lR&0{iX=l zLrhsm%HT$0*;j^7Lvm$`*C%~_xL!qqls@As^@~6I3<L^=?m9KT zh;W$J;k;n!o@v?%1MUnj?y58RU=J_oA;P)(J-J|~?2G8BJ{OC@(092Cn6^(^WKS*8 z)5dtNT5uW;wQsxXT@)_Z!8>%B?Jr>^MB>f(av2rYb&j9D?csliBtLXBkEto_py&!Z z1b(o>7#ee7$kkEtW=^|gF&X@)RvmNS5Z#ioJ+7BUt|CJ*vJ)`fj7(OYXbBR(Rjy-(<9G4cicQsQ=|_ufh|`AE~G11 z2!KzSrg1A=dQj51PW;jtOFtsYji=L3$VyB8)Ded8jzbl?B$;Ak1uWMvC7}tpty)hl~CeNh7)Ts2$N=e{!*u(TP4FJn;1_o>t#rad| zG9~?EDSwtlfy}h@?O^0f5Ykt^GUb)K0P1~~d{!mFVB44jY-w4)S3C-_r7UKj&C zDs9&-ZALVWuru*1jUn!rQyrNBiP2=6AR&}i+*FSRygm}q%t>WZTSWZg=K(%ou#b%L zGL6Z)uKC8|&$l4TdIpBF{%}*iM00V7TP9fescsT)il;Cxlgi04?hc^{zRx@=F<6_A zsfF8Bj|)nNor&xaB5g$!4pL$0&nNNkdJ@i@v3Zv|=$^cn^4Hvm_0>ypu-%9Y&wecy zFP)1MC+1>i{&;-qbDxcW^iMt*n@Zs5{78(CgUkJBZYfXUY)w?~zM z@CuVE*%4w#KAi-PvPF3xq^${1z(<)$Uch7tI8?bAOfZ(RE~xvBMk_8~U60kZ?bzrj zenkp?jWqUSa$+#syWaMO`2HXGzPRt+dzAK0G#gByzZmCVJQs`0+?&>n zooxnkga@UY_ZU+a=!>d=PM9Q?yTKXsU?(tfq!z1aef+UPa^wvIv7_~IrKgyqV+uPJ zgL-m|b*TQD+xyYz?!@BiX1s9zYCQ6dXX1-rdMv*1#jnNZzVJ2S`b+WAPsx7b;rQ^U zz8Zh{xkut-pZ{8X;tP+)rxflpUw%A3_wZBk`A43PFMREp_|jv~#lw%kVEff4&c#>s z{PH)Ri!VI-O#JcJpNr3Y<*E4O7oSl4$Ks2h|3-ZMkw@dXXD`Rn@@8yzS-m|TGt*4q z9*K$ZY1QqN>ZGcIs`sQav#XE6Jnnttc;(&Bp2{XSxua~b+tPKTI^(2-cBdU{ODpbd zuv2YvdNPimJQ26wakqDFG3m9b&iT^HdTh1DVI&ThmRS(7tgzKMenPO!O+>vq5JwiO zo>0GdVN3Bu?xGp!fsP6t9A%*n=e{~@tW@Vn{dNo}%+|(cwA2w=n4OE0$B%d=;nMo5 zI@nVlq~6~-h-G!I8;xe%cH1p#SK|VN_NH{GNBuGRKd=ow>Roh*tY={zi(WW>x!Z2% zU_-m<69C|qOg=Ec%VgdF%C(eg?g?*ow~a64xEB!Uoe7_9DQNI7Cat($ft|su;%~P) z!X2G_b>8hag<;PN!kKAl}B*$!BnRRL^d+8DA59 z{iz@S@fc{Y#6V3vZMW_EGOwkTh^B-Ia#xm0*oLRD4lHi_>00v8LkpaE2oM6jk}XUg zJ=XW_e0dSkDLrI;e)A)E$lKrik{7yKktcZ@YZyBmTI6Cc9|RBvHy*^l!P?{7FophC z+N?v;C5>;x^zhdIE?O3>BvV?vZ#1~mGc*pBw^!*ZWMb&$^Doo`#Zf(j4~Q*9zXF?% z0P=yNFnDj7itM!`dZpx+Xaq9xDlG7-viq5x zh3x3hc8KiBD_zv_U;s(!LDRdQQKOv=#?;ib%S8veA|41$AQw&V0+pR7Q=U}g6c-g( zZk$ZQge++rC9(@gk13y;`N3uJ1+KQ?)EDkOCGWQQO|4ds+Sr)*5Ce4LS%QlRLf6wi zyevENUJRpz-QLN7dR09Y)haz8+oN@B`O^p4Q+hK~V;+Ed`&+&v-u1oTuj<^0{q~l4 z-=^@bunaH?Sa!zKUYKMVG*8htE5H>*$!FOM`XfAU!D0dWEA%Ir=yL_oFQFsbRd*wI zM?-_~&E2kaA%z`~UPv3@PE5)GE)-p1hkU0#rzf<<`!@96ZZWaG7mvSiC7!?9jgh)! zipn~(a3tpDkH+8p`Jat5x7-ah!eLAJrk>zcEIOdQQ9r6b$!-Ss7zAa&jD>DH;)S35 zqd$!Q<$w97URZE^bRdo%nTZO=XDeJ@)i7fI-jG~eU2l4a_GV{4TH^g4j8zdNj(it7 z!_UYw%NmNNG@(Zpvo%_+*ixNbU0RCY{hj|OzVl7r8Mobiujp$yZk7zYbn!y`@fRQV zWG)lRQ}wYpkUSm`j%$)d#}?+K7md3;OwP<(KCz&Md}(Ww(=#zVIVnKMpMDSeu-e}~ zZIZ#_s^rR$PgQ`HKKjv5Nf&w4gQYBTU~wk;kx5H%Q{TJNFJ04mr#*OKoefiyxIug+ z4^TZrBRu8iou_!y<|OCo^MVU@m!gkA555OcZPR+c^A+60R@-MWru$#yDLi}PT>KN> zLT;Y?RDJSfr*V!EBn((ck;y0X5sF}6rHsnil!SLWT!oNa!G$_R4%=9MZ)k1i&942 z7FZm^vHlM~{AhH$gYjcO^^@_zk9;H^ed?UU)9%QbAF1HtNfAe;N8`ltxtN=t_C);d zZp#Z3*uBOed|f+WDUJ z80kQxTs0xRh=niPYSVDa_4TE=cI8_9iSK!jXjt_?KTS#wCrYt6257w--ssn#5LcHB z;1sT@nTePd5C6yqe>XPP+tE;ZW6FmU42~?!#F6>gXm716Y{&hLT(A${fv#xR$)|H{ zM(G!I#$c3BrU6L`=Mz}yD?+CV??E%A0=ZfT7GXlWe4B<{|AbL^)3)9e58aswdKNHI zmtL?ze-ruv0OdJh%#Zg%A+nN*n+z6PZqug8UR7p;r4D3k>f-c=*s$pnW_a06$C zQ7{OWl&`c1;1sQ^ZVelg(#1Fc#UxDjd*I)ytAUZ29IeL$M`ufy=4v=js2M8SpBDj; zPtjK<4f6}ykla(ZWPt**Mlb-UT(G_e6nqwBY@8X^#DOW?I83{5`9RwiygU_*@)6BZ zlqZJd&?&MFCpfR@32KCF zdfOO(=4HZ#o)zgcqm$#p9elQ{H0YqUVqbl)*lxr=CnoH*MYDjxX*Ddm5r!ng4t6+s z2L6stMVp1L0S9Cf-UZTY;?C_?WunSCl?@@WyDfPGpBKGw-#1s-aB>9;^y%}7n3aI= zAf07@yA!RAwb)#~5}PYmqO-mn-NvGnfTm=}p5@vgyUkROMv0bfujWG;#LZec+ zfV)~hF?={B&1L`@bcqbyOE16qDBnw4(o11WT5+V(mh^C=DbjF$AiGSjwBN3QZperD z{pQB+moelix-tEm7xu&ON(Q`;AN_iEPIEn<>QQoqXp9T(2={x^zaR$|excj?%%dx$AF?QRFG1zUY?W=Vwk~hqWu$WL)1c=)8c_98tdEq0-3@>03y-*BJ zwH%bJ8?bJe{cqX_W8GW@>1zf}w*!K;D%u(GM17;N8Q0WTVex-Oyae6LyUV5X!xPAl z1;ey~@$qrRl|1!=U|BH{*j`(GY8HZWD!_PsGB!3i1pwU~UZZxRcE`y8v-1nWi*((_ zR*Z3lVzVPT(NP%T1OBZSu03NX|GISIqemt^7DCxY>d}<0jPAgRKA!X=^8@JtA|DnM zbcI94*4VUT$`j1Uf9w6iEB&Z$hZBt?i^n8Gq-S#q)$p)zDi~=`qh6fD?t6HoWR1#* z{>sE6eF?6ERO|AhG$_YrrZ@>8#U%!cNF|2ys<@5-(iBZuUTpL<;IaOvVoCV{+Pn|J7W0|RPu&BHueb1*c znYSUk$Pr#p8bRG_)3WwYgWN-oq-=%vfRID~G$mgtEn)R^az$8w7rZrp^hnyvH04%W z8|o&XwlE0zFHXoIbRL6}p)kFuGWp);U_%>MB=i{7VW|I;h*}wyRFm}gDMkAbzK^W=TEa+!kn=8NYBMqNMqW4sT4YYMa z%YVU^FlCtmR}`D!+?tUCg>J1cH<_{?3EZofz9(tJha3r-k>$G=CJkS%?((B&0F?SF zEX)i`eJDwS_6P5M_Z%r(=8FzW+s6{17tZA=jLOnW19uR`pcpjcvf?2_vVPZ3>6Q`n zStO{swfuAkNO4PNieg-eKHyEtf_BXKGWg^h-Kx`N+?5Szq9A0Q;76YZLf4CU0Q5)y zH+2@#SwU|Am3>_4Nd__bXI_n)a3(y=cMu1l6aY{KGeOJ;JbsyV&5v=e@Nm%7-|)j7 z>L}zYdFhXFZTdJY=Bk<}m$HJqlP9xafOp2@=>u^pFsJZx?ap?$eR=2u@BhK~E5nl% z43uHqz{m!8jYw50g#{oAV`vLQK>)CbOhZhQ@p1tG{>1N#>t|tIf$8sttp|e#AcOFL zfYQo4OxV_qiueeuR77wuz>p~_i3%&?1!$^$p00TE$5$VPtHGVQc5AitZyaZ@fr zvOC}8AtweseI&BuDk$H|$mJypaa^#3u*YP}{EP4G9P4TP%Z+-`lg8zc0tNU>L-P4d z921kLJLP2-eeU3)=Z00j5GX)qI5|^Xn@l_j5kbxuaM7?7PXuRvA$PCTlZEI^fbz#( zzxpfE1V*QU(K~6;8f_dZtt+T_#fwDyzxxb0zTWM|5NTXCiqoTWe*3JED$Bnai$42I zwgVZbljX}gqrM9_e^VZCS%krtXF=nKh79;lHw#1J6UO06CV=vV!Hq#{2Jop{p6Yxa z(6LsYsteY40>{CC1d^2Op<#9%jY%lwWF^Ow26wIxGI3c|&|%5#VIP~ncOWH*!RVne zDe?78mjQzT!ciu|sI9T0in7wK7Vbieie&>QZrH~ z1~EYuD18OpV2P_nmb`^03E{598zs)JVZc@vhZi+H^vg$d_`CGK}yc4EPXp zXL1PTTR!?`ml_i_syha6)TVu(`tD%t?hM4@@@ibZx*Y3UU8NT>Jv$yVa}#lNelpIS zoR0@yeOrA0_r4|G`L;L3-FM!iPSa>KH`n9Y=bu;HYq7c22<}$SBV~p?dCE%JQFXe8 zhD1ZE=OG4lRPT^=xk!XN)!d<$`?kuv)!vRx*|n`^Y)dh0boOOgO+6BgRy(e)wBl=z zosWkfeKNlI}MX$sl&z(_W|>COE(aVusTKAYa#| z{8`jS2W4$_BbHZIrHn{0tCKOcFdL_CJ`=CH@18hu;zU%(hn4QS#Kf+`?8ox8Rn<4E zkWn%RV}kQth4u_OiiH-QxK#PZ#wPq-RcC_x2cf~M z>l<z{VxvG3o3Zc*JM3nNpw55StL8t}y{U+ux&NnweBvTfzC}XLY zWGcAPV)#xMKY5kjlqTKFa1J4!kl_^H-j2YFOqTEP`$qFpcOWWV58q!Qep!zHDsz|? ze4yXB`iu9{;D##?Isq7DMw*w;pJ2p8R9a_bS0Xfr{8pWNBEo!L-!v4t!Qas<>BKOY zL>TDSJ1kAVOvYE@*4IA}54`i8@jdT)N8JCK*QjkoJa_3rY>AgM0mvXPM`q?JA}o|s zUc)Tf*$wW{_k^6%r*q3mAUPq`$#Uz*w8$V1lN1cpAhcwldn_F(suWp){<$X^$YciP zVn+|{0r;oJC(Q3sUuVa6*PSubyxJmzL6nC!!LFK);x3C%viqCC8+HhR!?B5R@4VgE z+A>dC+iFSXbgZ{*blQpsSC)n*d|^9X@hjn*vL>@fYd~*h;E4vWR;6}qFU6xQwiF)Y z>L}_}xJOsu`YRSkAfMm$&bP%I-}){If|WSvY* zO)?cXqg?Dm`%+E$UFqmiv{>Vh1ps_mClW8D4p>;`4CLlxuNA|(lQJQHb`Z0G3G|31 z2dBSWTiJ*&KKf#8Nq^v&*wK1Dj-5OkZ+`0=<46C}Un4v7Ptx$@1NsTLfEOuiR77Ay zkKIyzY;CT^;|KJ~fAlmJ{xOs9YW+$avieIuws=>gTz*-SsT-|8LhGaV@ z5uiu#mrKoz3QFK-Qk?RV1qRvzbcrm5XOCAa;{9CdvKx;*_ITWP&pk0QJ>eZ=W3@_r z@Wa0!J1t=B`abkiM<$f`j^Yf(iMhErGRwqmRq}7ze7@amneJ*_3otn$yi|R%4SfVA zwa~?>SSCi%qmgTri&F_c`ti@i7r*v|X}(q;7o6zo$he~I>qRYCX;21A2YOwbE?<@s zKWL4%0*-|+)jL-|n0H7&WfERklN+aRK=YDEa$^E62S7yQ0&`exUCqb3CKEFZL_u!s z!bBGxG%wGzNP~RY6-Ax0a1Xe>I{^@Y$M7?;q_ilH+5}{%1O#jTDG{favJF%f1HxHc z#bYRg@cLC4Mb*37w*!!`Xc`$0&r;spXMM-*XX8x|Js4N7UX7jhy7EUykUqd59(n-! zm0$zjgF)#6YM)r{-mOu8Lc*86{%E}Rf!D>c+wY2h@_+oQ^BSuN7L{RgO!cVm$#K!^ zbSiziM}@aXPIXBYfc1{Osxk3aov+Wtk;2M& zvXJ^>vQYUjX#VNXe!&Z9=2R{gY|Tzj#)O`&%`N2#on#y;$UMHFMXDgb=bnE-{DkyVuLM3OOt{_xFwj#;+eICLe+gdPwJ7Ahsu?B7QU4+;gp5z0Nmh5+ps*4KZAoTZlf@u9{NxUE-z`g z9uyRK%IP7Bqvg)8WrcpWj6P ze0h`Uf)r)%ul|tWH@}N?3?7cMC8!vE6PGl+yGnISngsJCnd*TZ(fjB^k_Ak9dY7v3 z%Qb?#lBK(wE74wBj4t>4uP^I+BgD|egSNd>mt+0ij_6xRUa~l3U-YGvoiTbFb;BC<^qMMEcqf%d#;((sDbL>w%%Q7qA1PEyuJvyU$dFh7_W8;h#k>+0hoQ`KDZ zbb$3nY^*N(bO5en*_Es#JXFhs9XpW4_yntR10SOCY)ASQ9LQ|_+s&I8P*M$<-0UtI zHjasp8$bFiaZgZ`Y^U!CX)}pFQ%1mk6H~zX|DP$-f4MxqMZPi_6F!vRhiS<=df#`? z>Q@gebzfCmWb%Dqe1(&W*wx7W=txy)kW(1ZKY91AZP5l}T08Ak^`V!QcRMCj&pELy zKtP8QA5ok2z478RJ`F@}1;smW^Apig>H%`5uOdJ6j;?g?reupJ9SZr4?5I^lPwEdd zu}a&AZ}AK4OrUeBq;+B?#RMF1>~HT`=F$(atX0^&Rxu~;$0nv@eR(As%}sBdsH;!V zRNLYnWO%lqbuOD*qW9f)%q`5s`c^~fvvB!9azgzO?pk<$>plr zHWTlaSZ^_gkQ41=qfF9^FN*H@uh!~@k?{`l+XatmlVf%I1_D&D(4S%)2=>Y;KsCju zE|78X+#x0%|d>I~U7pk&-ghHo!QrTxl#Z`Y32>2hLP?Ms4$Q)}V@Zg0?mLGi__g#Aa`W3CyMp#V4Nf=DPa%RcMN(Rv6CY8bo%53BuR_N zK>tBIR+mp(Q1MxPj;Y;U9voDOE%WFao}0Uu)Xp8FTjivkdqA$?9N z1cmhLE8`|^YYN&`RVt)Uof1c06+j? zf`NS+n7%!TtFklb1A<@%R!U`)!O%$~BjT$-3RBV!Pj_q-jCT)&>D`?{Wy-s%#R^82 zJ~2SYk@S2oBKRVGB>;lUyij{$fEO7IU&!GJTY6m%rAjNesB9$Zxf216;Q{UqS1 zjO@4qAC`_5zT~Bl;6nM91OZjK;(KuAprZ$36pw)-D#PjF;D!!v3#0ssm;43U`Bpqq zI1|bQq^DP00S8|=+Ba>2a`^2~E{D>Pd-;ylkNvHn6yaA3QcAqBv-yFQeg^JAEgPDl?A)Q7W^ANAYE+{z#}xSNJlXG$;^ z0uTVifG9GdP8>S(a(@2uZ$d?Qo}w)~!yuH>U^Y$Uq@(cEmlG=T zB0OQieesuY>j}aPC*IH=(G-rP^oq~`=fLP4R}dd7#PrQzBXlsxvG-EC)XS6(4nq(= zhuDq84y(b~7M<^N%&Zh0i8+-{B}?|M+5j;c;& z)?>CYqh7}KsQ~nMx!-e$9SVYFaCj=FW^a$7+Hpl0S4f8Qga_AyzDV1aALvJQ4dAp> zInjakz?y{lz7VjlPWjsE+T0)v#}h1?XUl0ENx2xS4v}b zR*kAdBc)@zBjrN$%+W2|Qv8iW<%!TIFFMQhTEzpK?A$h^my$9poJxOB_>2H*idQC& zg>M!C=zU1_%u3WQyZ%&;^^J{q`q`COS{I{lhdM**Xv_@9%=lp3b=yL`(x|N5iAu+DLW=QN zL67Q0vO|YmK|`W1(c`XQZFCNNm;F;OT!~MA{?Yi4|K+psA3ylj_|Rt_i!VKTF&=&9 zYCLm6aIAG>YnxquZi3evzOQqGAY@DTSF+`6@y#*#v`D&cHgYRT%H&{QQl_myfteFN zOtq}5PA^fKXTQ zSwzF25l7%LU_+<9-EMhBdb8D1$7MWbkIcm#cia~Dz3z1}rq1P+#TR3Fxf55fg(qvS ztz3%Jr%uGpx8LMlWz!RtxVpR?SJu>?n3Na994*UHrA(x7ZxE||Q52|W>X>5z8N6Ot zn2lR*zA2WMR^#%Os~#l3{pQPCJK(K{ryU za^Pvt9V71U%`!4^&*TA}shm&*+3=qo39F|3J6t~?#^Pg<;VXij>lWA@%0vze3z#sZ zeuhP7i`TBXT}{;4tvM2PDf~6@K#t&NAfpO>Dy@CsXfq(W746MUDe_GZqe!#f`ks&ePg{~}I0fwTeBhhW~@XQHwb-Dvx$LXIV-qwFIw z1#7F_Rp(LP;*%qz`|*)aej|S7ul|LoE}V?+if}$A{;YC(QVSViey<#9S7d6e7z~-@ zs|f&%Ky$wg{0qONRJR+wI-CPJ!b=SSu&h8;K{LE819;?X>c8H&H8`A}Acqg9X)ec z^3BIs(>YbLJ^@4Wf3#MO!K&nSHz!XJ!sByeF+Ew2^XDIrt*tf5`JI@ap76qy0m~piineh0-Kk$8V*J~e&=&q?*h}T$oVuy^V z6dKjL?`c+9Gfg4S3+DQ0q@c&Vn_5@~DBKL(o_Q|iH}6$?EC9&7y@SbTW{u@w9Qp0Oc0`5QZR7h6F&?sCmFG5CkJPkRAL9; z@nZ||uD8C~R{-p^R@BC&TXeVzgQNOcXfYU_mg1=-@Jvo{;tH^?=zn;)E0M}qKC?#xpARj&7xKvq>EX>E|=DKu+ zy=bW-z`|N%D^`}T#gF~?kD2dKMi$m`t(bS657t~q&)HMFabsenoi{*KopzU^FT>@mL^e z{-HVt0Upp)=A3Tg$zrv4CVo9ZgltzB^bZM78&Alglghs)J%inv?AEP|{wHVZs)v=h z_r6!f2Y%tNi_Qhyek+;^nCES$7vnI5MNgwF$Yp4RZx%=)!ws?0-QN+cq6gf3 zgD)N=MPyya5*(l>WeER%_$3>Y=OpQavkA@Q6O)Q~31F_&7UUp$T+o#-xxv zEN}we0dzmC@-z-aTYST7x~+~Efm+T9M)*AKf~zuF*vq)SPbU&jVRCMIdQxfDzoUKvr%rmjLQlh{KNg*E^#K#SwaSFYHdp|_Mn$-lTA*}UXt<+P z)vl`IyDX|@bA<8*E$j%Mtc|-Zk+L^NsLj?Dd2*uWHi67Tp!y+^l4bfnEsgfEN@SUgFzyUdFVMuzp-bEiSr(_|tIl>04qF+E$ zMwW8wL#QwG{PWMnBVYPL+(f+PEB81Tr`KQc;z#ZMICx;C7~ye;2sQ}+iqeiUhuN+F+$FNZ}oK_q;ae&!(< zXomFi+w&FV;r}90$I%zCk-m`+w<+B9>rP0KG;S|v9KGPTzzx-u@Q^>WNjZQAoaK{m z$Qb$tx7$i6bux5TpNz*OrZQ(U0Q58=&B`xF4RG4oDYP7tWnKH$x+4u#q;7N&?P!xlCDm=iuY)Lwx!@!=byI_dZxyLLcmQE^m7T-Rk@1@jc`5T8Hdl849U2Iyx( zr}9^Q!3r~-Y=<;c{HQeyH0^eyaHM2p>H+l$mrYw zrlF_-$rEcoxl;)0+XYUi-jl|S@FgpXnGK(SAVm)vx!g(5jIWGb7!wAlFhT@~o=#KG z^vf_N3WZ3-Wtz#fQG`%1Xi#D>Rvf5o(jXLrC|L#y!IR_rAr=USMwgST+36)c3}v@+ zq}=GJdFP|(13_HE=XV@sfX{;*JP{mvDph=JjTe9F+la-G2?8_WGraszzJgF(Zs2xk zgdAb<(>L`*G6-`e;DKsc*M`zDj2YeOTbU!g4KNiRCi2op>DbL73V2Xhpb6jsBl=Uu zFfGc&fSAEF?I<|)uW)$}Y8pSC9qAQ@_r3t6(KEakHgTYL1eMnb7x=rzH`v8Wt0glDTr!$f=}h?k&qCe584jOKRCrX>xJ^d zq-k$-oEcOS4&5=5w|=ORN0Q@RTv~DWcGy`ZC7?Pf`kIQ-iPJHD?A5BCDftVR z&L3thxOlY-f{S*_J4ly@z(M|+u}h&GqJ2jx;`HfTq$E`1 z@BgF!BOZOR>jCZ~Qz+HD9x$ZCg!0H_uaE0gy8vu=s%ST=TImTsI+PAc#mBgDj1YHr zA=F^-3=Rn&QhJ$Wm#@+vQRj*Dq>!lN(^h8)yS&zj))t+Mh~o>jIC69>=I3g0+nM?J z6K{JU-uFFki~C-EdrZ~`VqWG3 z6JLBXzVi6RICpj3$NLuJOEM)taK-3dR3!(Y!YJPa#fbn*-rcsb)wKLMdFEK$|G)z=HZdBV?usXtu3a98?m;`oE8{US zQIFFn&UiAdxxS*b)Pe1?@PsxaP@!#Brs|*F^pc9AEoe+mQnp!9KRc~5Pp{@{+rn>qn~wZ8;@NH-f7=p#bm-Zo>fOrXyR7gA4=gO;*tQls>D7?y z>WEgv2S{|cBZXa^yxm44cBQy;K?4>RGH3x#yc0sW&pQIe zZ}dJm+=+)D{$l*_kNkBhpN$xu8jnE+)`So13eXApD3?;dvD_%!_SIRjKOKNlhRMKX zN?DTT(C=l9)ALJO%XUoC&Y|fKCu8{d6imwBgUW`3yh*2o!GZD>lMLA9AS>^_6JNIe zLbTqqu!>8J8!g7~$PHc;&jZ~`)FM|_|Ct|tyvtCkGL6iO{4@N^=BFRS+8;*t(3l$> z+8fh*$v1HflI0xozN(E=vYCeNI($r@+(4S1%#v-{Gg7wFIJNq_W^eP{O<`%jo=_>;XSQX9fJWq> zhFYgnA>61e3^1i0A$Q7QFMX@f=qLyK+s1v)puuBuhcSzZ;J55JrE|-GE4-LHT)FoS znrTXoZ^Q3Z|Ib}oiFNVpwi?fEcitJtkDrd8djDVc5&9IM)#`W`H504DmE`#p1O5*U zdmvx5LQmt$)eEuLYQ(So>c5CT{>qbaOrq}QqmwZ#7;3|#l4}jct$NX%>ra&hDn?WU zFpXs5K3ntv44hacO+322A%k<_IsNi3^{A%@9R*KIG$ShX!j`3t74iT5c-y<*7Moj} zar$JSM?ce;{-|%9=vHG~$0CL`%rcK#~k)z%X0W$xI`Xh=N%mF2q)Be>aQb$ojY&G$5@;&8b{`*AN$F~(8$T!*CAFeEypjaZhcuT7g$`OOmGPw4rgQOb~YrTPR{!i}{kg((-{Ge8OM z_(K2msd~_}h%38JB#hsIb+*0TPE1dX#k=4BR?&}Sd}~#DovA~lKkEdx~wVIO((qWX1nyl#pnZm$s z@UbkU*xE&jiEAQ_-^IHvcpC!dP8+9?y-EEs^U_z^l34qO8maF7=4 zR4{-qh*$13Kp&-D=r*1N(;s{oY@h>r(SzWF=BK8oy+fM32@BaeQE*R`KUKO5l2n;<5GbkRk zBPx{6p!mCj*OS7;bh(5V$%O$XRDf9?@F8}tfiUu9!WiYMBi)+o>Bc1YIePo1GiPE} zJYq!U`N^O9(fF~y`8U=6HnP*EyblPk_BW#b8Rci<3)}-Ai&Nb9C}f`TWaI+4o=nIm zl(I4?cesN9KZ{p>90sJ9hVMgK^EVIEFBXHYiF@gmd>Ea_x{k89t6=j{=Pm{406zj13@&bSHyV6n_Cs305+Wdk1*vhcSF2Hs}ed49q>8_~_cGO0>GKR@o z;hOwt_wZKIhPEZI$eMRA4p3T&NaZWKhL#w|v~DVA_vHXgVSu?jdB?i8(x9DK7l;3- zUeK;-d&AqGZI(JXbGa6}v0;7Ne$`V2dL=XZ?AOQv&zxQk~7@K4a z?|s>w<4hZLDKl(sqWVvys*o?L%<&Dn9v} zzPXeT_(2R0x-H0CVHnJ(PXSzUEKo`C7mKpVR8Ec!Fbfv^v^~pU+Fe#3@GSaTnGnH+ zZ0)WK$J7a1aVbsXnevEEX;aV^b~3a~8fg$d$u8=ExK_fU8{OT13+(6y$gdr3z-?5H zn^}`+mWdyAF3MCOO*1c?rHj z!89y;x1#zQt&~*9Yw^u*-XT3~S~`Ep^1C3Nh@5 z9xKvwUbdaKor)-z!J+T?aVIUV)L!7&N>|T>_t8V(jBoOQ4wFj{DSe)Gk+j0u5tzsG z?+V8wFTQOr*pSnAFQ9Z9eAi_@qYHR+Pk(au(zCu(4my;`=1))tQ zJr6ySzA{t+qR^h;>F6^c#5`QzJfO}Fx%4d*C6A?t_!6xP{feiHu@qMGBlu)+PB+-L0*hj_%^gSX*6;u53eg;_TK}V!gH;y{z&VzvNgR zj{IeZp7!!=Y}DqxBS5S*qN;Lkv|6!r>bY1vel(U(9*dPzr=q>IB%`z*oAuS`kuDUn zA;c7ve_JwBHi-C9kMs(TkLs1tyBWYN-XT=bZvvlm|B4*+0`0>?2w*yMnUh|N{PYJo z>oLSvo3Y+(dQ1MMH`#kb2Jx%*NA+t1(WqTY7A^K<_D`7ZmWmY^Tn z7iG=EWwf&++BxLKXT7JsP*9;Z2p;sDKB-KiHRDeKXkEfaDk})nJ8{!wcXoeZ+#|JV zco`FFAu9UCN!0CIspRq-sbzdZeBg$9BOlsx?Exb8re#uDTN7ZWW- z#)6yZ@NB{fZtC5aU{)LH*G1n%`cD6oAOaWSgx<|UaXj(g`J_wgfa0mmC@Wt)>nd!; z$)44_{`i}lJ)1XzK!B%kJmYM*h`HJiN|F;P`Y!WdKLRAAASG{J!(j8urPRsh`JWp7I%nHj7vruMi6?{ z;D88BOpKXXh3r01g1IX@G4phmQ&yMYUc%S}pTQ?z-(9!zM^HFlUCB|O`Li7Kq1W6m zuP-O%_Z+_}i?2iM^UiGa$$4b->a-k&GzmbS0FtH0GvVkN^It3&p3xy+f|!UT_bv;S=0Yw z)D~x@FfYVETVO#UnpSE3lGF&MZePVIapquK-(A1fjiYL3T9Fak^BfI z{eok)i+G*_SgS&{KXf)7DWY{s`iThRd<4KEy)lkhc4|hq9iMKw`JOq-WYfH ztlfgzOW&Y{KSsRiN{$Xp0@4}IN@o<;&CR;`35L!xgZ}m2Xw=)L>WbRkKw&ecMk{gE zRTsvKuD>d-x#lVl++b*|E-l8~nUmgfcSF3QP#l+mBmN@#avi8`ZA!r(K=bNRJ2%k+ zv-BCs!@6Gd-anR)C`XVa((Svbuypv@}tI)ikV@YJ0^i_3Rnj z887f43Vb({71S3OxHPX-l37(4dhp=3r#js(O%8nSvhcYB{zH34W8dEWvG35{m>8N- zJr%t*JTkMVhZXWNI@Nxd(18I8e640j#_e#7jweyC%@V!4A-*wXs`u0%Veb2-3#oO+S=N7i2B3N0;H#F3zwECj4+KFaE_$>)f zBCSdVx~oXBF2{ujFN_`*k9 zkH?@Yh1suzy`qCgS_Q`{Jd8rN`9}`(+!1XuE56lE?Ti(vGM{2y&kQPc*eo>y21bJwEm1W3jj}uksB>OZn2_$g2K+waXsKWlvmGxsg9~ zJkb%y*+}G{&)80S3abjT4rh1)^^JT{qvI^YO;t8Lxvex<&C}NNn&_}=-iZ1JMKbeT z_!PfIsos>)0gdE36FLE_p_xR3(bH;4k5D<;I&5@gD*oq(em$dy+`6=Dh z&NGJ`ILGbyW|z0?+^K*Nc^(cO#Odt_N1Eb`$)&q-W7t&4%x!~ujw4+5D3|L!xCDPH zCkk=O+O1bto>&e?y8FysIrOn~KMRlDcKXBzM=A59KI5`Gx)LtK=XQL#bMUzhN9K0G zPdLjcaDhCQgKlsxnh8>Tk8rsTMeET=@Zd67PD-DZtcQ27LYDT%iWO*Wa$-7q#wVlk z?1}jDm%kjxpFJiV^u@|@(*ysk)*7o0M^RUOsTODE&v^ArYqcHKkzo&ZGI$SI96N)- zM7tUvolu*v$Nc=Pc_*+`s#O(FZBBuaW2~+j8D;ftPtF+H~h|7pM;}bpra7B zSfUfFkap4#Ph4GH^?){QVNLu2-K4SF^$P91yY{*Rt~64NBhQ_RuigDnEH&3;N-M13 zTi=qr^x!ykC-PGnkg>pkyrAuPwa!*MO2r|`;vuyw;ZgA5-;uy$%Uy6yhhVM`ZEQBp zQ(~(n9w$GQ<-&ctVq~a4{`M0ei^+>`jIG5-V(HAWDD`i6P?7ep%1IradPnZ$^pfZ> zwMDK}`>BrgBVMMc=2g;!=zt}zc%0Kz;d=Cq31O^Gf*)^4Mwo}GO?I2?ev)=rVD|*4 zuVf`=T=xsLPAsji#}m(;iF+P97AF=rqOSJwl3QLLFA(qlcmL@>LU589ofuTzpo_6< zN>{v=0X24pK@PLpmO3IYc>akaPsg!mo{k^?(f3BZ+Y{Fxn23JWBgeDQp;vEpm3J#v z#W)*Elloyr{&owK2RxKMNl?u$r?euzce8ef9!HIM`+CtF^^;aLNk>M1Q@ZR7Q}3+D zp6QABtH1nTan&`K$MD#A{NVS#E&lp5UyNxb$PPI#kX-IAoQ@Y?dqG@((SewrsLGDn zj4Q6aHU?)V(Gf}dSNq#EDK%o?E;r8UIn&2(O z_q^=~;_&=NjE_x3UvW6P9brlMf`5w$OoM$}$)2=sOecgpkCnFdRrC|pGdvT$MUKuQ zWC{qO^NbFIp`l^XbidkPImU*{F7HJbT@)8zd~qB)bSMt&I~WHJ?p7Nw%iLj7X{Pg= z+N5uKZ=+^x)Rv`JoQ$WB9FFIXorq_ic_vPsIu*x`9g_|`Ydw=mIrM*V=C0eC%IiT< zSdmhLWl;yfXnjc^Sj}B34T)w{5>=?%oxbOK8j~LhD<-G#?8uR9PWXA+RZRS#z9jpg zBYJPeWe2C@zy61R6V29AES`8yrCC#3Q=4x~m+Hzckxj#{TcTFw)1x|JGM+fCbS`!R z`Nq8u$ALo^#2pVl7N7d+H$g;HlozJy%w$D+zaBQ(9x~bT@S94h>hpj{MrY!(OD>Na zZhUc^I5QjHdh~&~?ar^n$urB*klx=?HDP5yn+YSiqks`3m14a3`m5ubt1efZ(=m6F z$(|=;X<;@FTzFBu_SN4Lx8HG_^ti`_JaKy=VRCvhu6x0i@$pZ7PP$sb6I{;BE++i2 z%1HRa!5_ifhT337cEy@-v3q+4glU#dqu9T ziw+3e*HcuUNybS%a(c86^mgcoHULdfM*2vku?fDS+J$r})opjH811#5Se1_d;lKLp zIJK~-wj@5I9I3m(L4`#wiVwKsQ?`v|EXV^s*{jrzcWV(0-i8@ibYGY9+){|g~yVO>t>MY!9>sp2z9 z4&30-qX8Fw;7c-zqbKMcz^)q*Eb!a}KZDpDN1(K12ayJ0nT$$VfQi9(w>7(74<2nu zZqT2_ymKz_O=-XYcXKlN4-&MlwA@|SkseTEOT$Ts)=TA^1 zIO&T)=vc5}CnB5Kq8!K0N=)wJxEk|j>FwaREM5SPTkI|-`3jKs{PQv+R)azWQ`hFkN6;E zW#>q2tWLWg-InCJ+DK~W$>yRhqbp%IyRIa!pijQDLlih#TB*4|lnJHm23S{~s}1QH z!o4ip7@OK9c{>`Tlhe#BO@$I{p1`R5`eeVSi9jl(=$O7?CS<05H7Y65PO7iu@^(Re zTQ<{gt0caqHi@In(&xp+#XJ|x8 zpQO4}P82ES!Ir-v8ezAZP1QHuegy?9sO>9)M4jqG{FZVGb#BM#!sKO1hK>pI!_J^i zlq~c@zNAGSIO1bp(XVC)zZZ@PN135FR!q8oNsb+C;D0#ls`L{`7I`9q(x80sNsd!O zw`2zqcq2TmtM)iKIT?HQ9&oyi2DZq2wCgq7ChWquvb1cuv9PctG>V>E>(Q!pkaWz^dbhXts>owYAYR}`Ng_zJc7E&M<*Hvcf87#5@1^H{; zPkqqyx*}cL4flNTpZf0Aw=_Ah&l5cfOIxJwfXU%R!=~%Z zUZx4a=LT3zCOEOxJegl{;0?e)e+l^vvl_=?xPI$r;_H;a#!q@Rn=t6iUK-Hx%j4c;o0TO`b*N$ zEy_dn;yNNPrJ*9b-V^UlI7xM>&!huCkiF3>c%T=5SF-eHQ(NB3Upk6c$(y&34Q0|L z{np2;s?460;&DJ(j$$I@(IXy#{2(0POaqc#>$*1A)vi@$WF>w2YkEhoW6Z&IrS!2+ z1VZ$Q7xt?>?w52Qp)Pbt-HTP(5YnaCO>}8)HkOwfmW$x5ufT+$lFQlMEXYvU+Mz+?0s4X7Q~$OH ztkvGYJ%hpYSy&%b8E6+C+Y@F<6aGnC^28;D!;YsfC{NdJTNg{Tv+eT!#Jo}f3YXEt z^T*G^(e~|9eA+Lr0B4T?!mcnqk5pdP$ai08Jb%R z$cSb(Rqs8rB|{gxJk(d_Vs&LX=FZH;-0Vs$%&&TKS8C&=iC)MKHfk+OhjhUvzwiYw zkdYj~Z3-aQmBQxzu{a(nOKM}fdNYBS!>qh)D;r;`cSXx>Bb+CtsvV)<;PmMF0(X|l zI~;y#>=wQ(Sq`j%leQ>Mqwk3)0Tg-n_VnD{v--^seRHd4^F|VI!XlJ&oD#$zX2oFj z&WbUR?wAJrypX!nFjbSF!CPcePBH}uwzeRe5JpZ`W|Y1GQ|0FA6PCQqL=_f7wZHDb zV&bNLJ=-T^INKK?)3aK>;Yy6mP@2dRtolxUkS`RaEYtbTz?#jZks{@^q+FX}OK5ZV zDjSt^eN7F^4N+kf6v4}LN}u#pPTfI1E}sC9V1hAIF}zd8M7;QW=C0=tTz-~$J#jBWrRUCxh`OZdX^-FM+$FMhUz zK6Zvdp-yjlJ}Yn(D0@!nQ|{z;yt}dF@z1&(cigCn1ZVD_6~OPh=M0c%zGo8v91fQ& zJXu5R@qja7L@&ynl~9nO6(ne+chC)+jw@ELFhDR+tjMsdNcoe$5Q$+&e#9LZ9F~!_ zCn`ge(NnF`Q(>ClkTArZ#Ro!z`bMdQp@AR08x|BzlYg(1mQu^W(YYo*kJOD;Y*;5Aw9epGW{eTAq7<_`UNew6s_C1G{9~aIQ5JWVQJR@A^_S7M4{HLoq&cQQUsV-SN+V z`_GhGH>NB7QR3_^<&s-8pw9V%g4Fc=j&j7=UzA(=f(KX;+=Wun1AklV63kLC*-63h z3O1kBm2Gpnxz?28-ieNkmJ){1+CUWg3(+f!t}27>feO-=u3t_=W2p8>Yyc!ximBq4H;0&OU+o2vGmk4 zC*p}Go{ST7%Tb4Zb9&!)`Fm=2K0Ar>Bg~X0h6F8-yz>jXBPG8}0ZeqeC8H&EOrU5n z=wFhuUM!SllnqCv%-My>pd>tJ@Z#Xb>*CApPFj(U5k?u;HrAdXK$S*e{>i8^)LFEM>9Xl@Wq+sRYU+l^7Wr_O`V3+ERS_Q=gB=9)CQJpIC~^ zE}V>ayyew#^<@V|Pb2ZypZHAt?XBOik=j(9(WzG|4G3qbf-+JCr))nf<#gzdjEqFJ zIvf{Wcqp#B_WF44_;F7_xcrKX;>yb|jmMsNI?l{3#{cn4KNn+r_C)>6N$<3PqRD_6 z1A!RMHqh(3#1p`q2mBhGhb#Q+nF&emz!V?riZ|fTDkaZG72Q!^Ub!m#ch}jP58g}1 zLF(W{77hsbXOBD+vnP+It&hYj2Zi`P#bsa&D;x8jN^%EL&i@sgI;7pvPEqQqpPr30 z0FHdzfHp0xg(vYT#$++j)>Mfd-Miw29l=NE)`rT#S>#kK6O15UWE>NV;90bD>W#E1 z7jo9Cp|cKYK&cda;^>)EaqmN)6Ie1#I!iG&E(IQ*BiNuD)1UH}q;*;xtzo&L_k197 zr^1=q5Tqzv^Q`hFmwm|OQ5t-w{P~2(l$TlIIWB!W>ry_sW3Vd4$-`yew-tUl%QpBn z0kismANWD~MV6;Of15k&nPgR7p5##0?Q$M2M|wWmi3Bq^P@-|3_-034?@TYdZzCY- zaJfScCzsu`bm_o5-{tsB?p%1k4Z+)nmD1aZGvjlIUsiAE8wsw&kJMC@pI(s_(7b38 zU4yO4t;ebHZ;3zlNUvZ@efRWy%+D^x)6YEX?WqSvPppt;2Y|1B^BZyaiD#lL1CdVd z+1Um2SlW$On8=^wNLUpDA6{Nw6t7zM%zXG%;#F!#vZNU>^R{XN3;y7Ne}PR{R3{E$^g(&&L+mVe7_;6GRX^+aNK6K!qd5SnioSa{dFMQ>$Xe;tS zN$pvJm5B}Ip|W9`UtWmWQ_IeqobTsXozJ zXz%DXT@S8N9>QL17(bB2AZ zd58@{Np*i(bnyQXNs6knx&v=1Gz0M8XrRxRzI!qP4maOETJ*TPmmW>~7JaoXTta zOfV*0TRdKAmGnF|IT0^=$qVDZ|M7>#Unk=Ce)~7#7k=sA#iU?ETi8E27W;<=q1d-;BHs9#SH?{@zBp#~?pC`W6MceoxgOC99c5NV z(?BO@>ASOlE$NYwDEyEj{K$GI(^|9mlB+B8vvK_7sd(VQN8?+MKN*jpdmNjMu4t*v zcF>alZQ-Q0Dm_5ya?}!&mXi)EL<>iS{*qIKebLY0b|w6=g5I)H1;)~lw=Z=(?v#o>%gtF%frv$yJ%dEjZMV(uF3esCq5mA zF1{%0&6+3QHr4KX2AKe$yc7=pkLyvMw4*JRVQhFPKKGf=#5;cI$6`WyAl>{2r-$OI zOD>50qVvI?w)ltSfb0u+5qgX1K+Zf0ya~5Vs(=@BW-e`wBb?CX@(BR&LMC_M=+BXt z{GszU4VtN*1$Pnw$Yd5{pa&$Jf@day(Po&C3U6+! z>~sb*VVip6A}oCj3}6%lH}cl4O=&U7nAKBW*{gK>B*f4^)iUD1MF*qS>4_2XjGy`W z55|Fg6T-FVR&++40Sh#;p}O#0d04&)=IxG8rG@46|jebKejX9TnG%Ho@`(X!&TO^=*`jGSUM zEL(`DI!Saw-9w{Z@X+o=gF#CVkcqC;wwbgA&7zy7i4IIIl%AnKbSh*&>K*nq{ESIb z1zikoEUm7He=-Q7^o5tAbc)ibWPG&}rJ<^i!$CiD`_~{FCuTDYkh<6sNlTK_`5E$%k4$yE!el6VM%U> zZ*tJyHVvov=QzIY?5Ff^qC4!)1aIc(KlFee@(>sm*KlO%3u)VJ$M8ct;_JdYJO!EV z^i&RELw>3^>4xi_Rm-FS#bXi^GtRIBzQ4brE751oo5Da@m@vJwUw z@G)Y96tb3=$ zn&fG{S(jeiH7{iF2b~M}&d64BSqIB1cYh3)N8;I|&q){U^<=?PS#3(Nz#CfPv(WAA z%8J^ktwoHWP=zxX?{~_6`U>g~(C~QYKPh|#&UYI94hY4cnlhA9@_vw>LVY+Kg z>s-n%D&ikS!3E!7(G`w3ZpWs5wG~KRPp%`~3%O3M?@j4S9Z#5)&V_v_#{xe{o%T)` z;7xsRiGHn{D}7`N4tojyg{?&WFldi6ZIU-T6q^4tmXPE;-*5!=BnQ3I4)~ty?(53O znv~K_dT`eNq5NFljPBv*#+_j14xL$U${)SK&q|yAbk1TI^*g^c$*^_t)`j+RTzl;c z;!QvF1GaewWFNtoP}LYTXBP<4>6iTENJzr9 zrMe<7(qP9)>eIbnSshY7*Mq`vr&_J|!)uNE6yLfK<5mtQKl5Vg3X)OCsZ<`pBAzY; z>r(oLEu$4ZdvdE73h(d)(xeY>eNA>CWg;4UO7-LMG4U&K!%iZk%@_sodsPnXM(kK7 zr)*Ncl0nEt9CkIZArpa%V*~4}HL>H37#|;xmf&bDHze0jJ3n^mrN47f?GPKID_dir zC|Ot@6;D}{t=bfxWQR)jP3+wpBfED;&(MS?5_rcej$c%J_mNy`KWtRirT>rqN9p(2 zW2Pml2bm|Bpy?d{xqisI?QuEy3H+V*NOfzhUCUJ_E2 zvdIcB!$6!=5`riUL_-=pX9(7xcO5W?NFhyM%HOmKTr%27kGp9{AbDGILPOw6E5SlU zAs#Qi)A65RR_fVB5JKTN_r`e8m9}(IdZeGAz*`w5e-o;n*)j(qn6_GxAO5;hGBiTL zK|{zKe&k_%WN?c$X4U5sB)q3WJ8PFi|0pdCbO=hP?+$VDqvS*)ZQU?@6C+9j8~Ol7 zjDNP!HoSaOFmG=T5*3%K6J0H7f4V2RJrp4$VFJfnWRR zA|nFiX(aq5$2V2B36ppEJ8brChdZ0Leg%Wy-Dp#~e$T!EB3pW79y>mA*DYgIC;!DwE)vC|LXUQ*TMWU?r7L~P>D7~|)t|P^pK~RKOjtjR1 zx{4$yQhQa!_S30ejXo*BrHTD9yyv1A9osF%cvNW!KTJ-LP-e>|R-Ca_7sedr5&-Uy zWA-L9DPd?9W=eT$oq>JPuJD&~hsK2?|6?41O9VKK-;@b}MmI373%3k_dCRzg;ph{N zsxuc$K^}9Rk-u7u!t$s3fHqNnRHvi~M}&oN7AneuJ52?GFcK?^v&vy37EeCw)zzHQ zclGrzjR)`fR=odL{|8eQVyv_glj9W`SRK_{s&BT^gz*9+WY4Tt=!LDuF=l)Q*I;jO zem^^CpafMb6~z<0YBUV?)A?=YExw1cBMV4=N>4wVtaz!nP3laz*}ZEhZo1*B_@3{1 zS-kmeZ-~7I_PdO$D@$?s@Y6BBI3M+TL*-T4v}q~()#|81Fet=Ug;IvmHB_F}MmJij z&+5py*M)7$$U42yh%bHh-uUa!emOq&mHXn(2cC#0j-HNrh3%UE;=n5xwTjE%p(=O^ zc)>?1xMUvufbAeXa3q}g9Cq%a83y==q##v>s$Ky<&bGXC$_)*<(~UtX&VOSoO?Crd zt1|{E7@+X>vAXEgV^E993~UF-_OWzcvb}VG7rT-r6{@; zEeR)VJ?B+Jf{ltyZ62PGc%I89I)=~T=R)GG%calXvu9S5H$^A2OG|O&*y(umiD%-` z#~zNk*#*%K1LSHM7-!0p&Jzp{w!reXtE%i~qX_}T^wjS7u9w^#_dW1nJabqIjFilW z9(ycaa^rW!Ew6Z~ak;+Hj;EeD9yRfiL7}86qp@5bjH#K?Xp2tS5|>$vE%6Hmci29V z0Wy@wncb6W(->h9#j^Ot=330v=A$C~U2x?UEKyQjDvlIqRs?oh3_KNSgA){b{YJq6GMLMQr0`IMif-O zA=Tla>R?OovttC?+4d=Y`cB{=D1&eq)t;W^$z{2TjOBaA6HsiaSB3M;a<~w8@g3_j z;t{AGbO?|dgB0*b8=-pk`E{Z%%WrC3XNg}(2;~r+U0y0I^4<&(n!5b(o zZ|6A6EBpnAKKiU_mEV5c^NYymxD+ORID1+CnMY^eK`!Oy6@S6zunwbJo{ZjA6Fi(v zIlZP!Ki+^Giq z=^Z*bo4;Jf7tT7~xl;MhzVEzJow#o7MOgC5F+^Qu@w5B@*;~UaiaV_J?rk;IHmp-a zFUaqVPGwDca!lV)F@~lm;^5_%$0dg@@G8c~AAK^u`pvuI>8GDIp4gpaUAoJ(cp~kO zcGGIs#gm7lR4zoVR*!}G)2e1xSoTMyT#{~5R9yJ)MvM*(o4>YNb*~(5tu-z8#>C63 z%z#&VwDo$k8B0rb@rn(}LMFEKx&v-yWz`*e@M;eTVz3Ge_)Ses#GV5(q{pT2h|X44 zYVpM3BXNAbruI{^x-z6T#8&06f9HmpTYSx zc#J!?QimlS=4=fKAH{?d0xh=&3#=+;3sR(`oOHH{v7s0m8i@D*lb?tv499x?SZuT# z-YOSzpaYdUblr%S(EGrh>4v(|x7?YI@;2&co>Y33l`bO>Dw!6DFDWmWH!(!H3oecs zF?}e=I!pbPGb=IS$BB;n#CO*uA2*{`Uyu3KRvbPy8z&dKF)6wr%{_be#4rBFuS@5{ z02WLt1g&*j*A@(_Y~_(UcMQJ5KqH-%<#_P!d*gTh@XumOMcXws7(*rLu?$2A#zw6X zO+_*GR2K!&&FJ`aj7i>4OWwceMOR9Oy)xeOp7+FS-|#*0<3I5e@xJ%JFMjf;elp(m z1MiA=yz~22=k>Vf!Fz>6hFmtYRZVonZd#rI4!VZ>>U4qQqsNZN+ur(i$+o>Qd-7O( z?4y6Kc8|8%_l9X}qIU3I;5n1d>?^g7W{kFYEu zp>!Ga*CiTfunt|)D}o3E1EF5G2uE4;ju53|ksD5BDpn?9YR`pn?e#B-SH1T2@q_RF z;dtFEUKAH!av&C$XJhu%si-fjZj~jtX8;m&92=7@TGynH(e63xlF1zXy#*Ctc}dQD z+fn@+5rW0|P@U0{2hDTr(|}~)7TX;Q{<7pCyIfsy=|%Em5@$^`ptOlAe3@Qt7a)iG zR19pXKIyx?F*`pe+V95ra+vu4S;xpg<>Q}@EKJat# zmhXR89J=b}C>OA`2jihfo{V4ljo+cK(Ws&ZB`s8DrGU)lS-9}F^^*InZo)pIKjh@x zLX^c5FT3=TSd$*WcEqf7oSi%EV=CCe;KZrp;+;J)p|aNNt8vXW*Tj|ATr2&pZTj)5 z6X6JiA#xKA^{;32Xm;j+E*q<>@%MlGH`38$mkEy1Dm1QkH(V6m3qOJhS&Qvq+*^Zk zvJ@yhF&o|?ykc|Fo>HHH-sPRsiJ&m;kXGU~IIu#Zd=*|ulSKXN7=ClUdZ8JROKK00 zUee-l$fk9ratsC+%iOT4jHZM7p@b+_7*ty}SjfBri$z5jBth^1()oQbN-z6N=?y2+1xc@;?Z zqzr~r<)kRNEJ*{3JSS(|`oXW62j6p#6mR>Yf6jN^w$tIB!Y5sc%dhe+#xO+nFS@;4TY?Eqn$ag^E*gy|E;KB={XF%}K7b9GBWv4ZkR&8Hn*U@G;RfCiD#MfadR2u}<$4=Qc!bOvY` zlx<7qGhv%8l{Z_0NB-W{T{?mLVQBc!DLA37s2cgv#(Kep{NPdBgNR(B<6JK{?^9Tn z+I0zTk(;)^08sC4N5XGj=QwWgdDF`_$s{~GZVKk`IX}z16pXZz-`P6}W_Kru<6DB$ zmj?!PhrLk`*7Y1Ixt&Q8oSWel3~oQV947Gu-jQ&$X9o7^6Y)w%`LTFqgH`|QOzMIE z^|+5E^^r(8Q}3cL9J(!WyqgpK^$sH(x@{m+6<^=o4=&o!gW%Am`lBtU(n?Q(hfxRM zQ2yRRUjB)nQQjOc#7%t8vGoj}M*mIW^i2h&FU|UolnG#wW-gj!G825xMQ|HoQl)8C z<)d9P$Zg)MXY}8$^pA3>Xc}go0{tE6p5TOCPQVK=KwH3%4mKoxlq1s`jdZ*r6BSut zf)3u2qSMz8zo#FB{yb^{-BY%K!lL`IGpe`3R~gZLX(P17QgJ}Cz95z;w<6pg%AmE;4{#VD%j1Q)D8J%1FI|GwKQ5!iy026aL(9p>)6t4m}7S zM!yE~L!VRz!HK`%gok3A<$YYpCi*&P+oYo!HIKlh0e|`r6W<~n<#k(8zN!7eJDB8# z{YG5iksoBpryOM{Npmb58Y07e1x+9ZTo;`LV?!W)uco4UcKl3YGN(Y)_QC=Fz zS7}HKo7!bZPf0q_Kv}Y+T8yHu!J&fQrHc#*2M{%QBVG6o+x4?bzuS_@BHc#vW4*Z| z9eE+v8cWfw=~|xGbvinWr=z`eD%R@D!505Linqls4r@(ax+|?-jscJj-@DmX@nu7) z9#{m&q=5~#=XYOdQ|(QBMse0TCS|$qNuR58vK8v}7*u;1u8hQ-7Dk2caoXt&?$R|$s9a7cMhv>+-XwglX~`@zm%9lrugzc+4qY- z^}X-c`(C`AspJIkZPAe_QDeKiuCnZ6vA zz;_siSM`z{?|Qbf;ofe95@ERWiqOcEJyn2LOglX30#^nbK~vW0!4*xX958?=y%S6| z;Bw)c-3_NIK*>soo+ZvaUjz;A}aOlsnV><D4&gV?%Ah23%yXg&dVQ?AGk35y1+Eb zjko3(eupIxDkD=;;uKFx)jG3}gztgEXjCeDd|rQlWl}gEijI^RR_|~opU?c}ye#Fw z%3-#ygDg;bYZ7Lw^_6IF#*Kt(Qm{5Wdx@3L%~nGO915eC!bo5>YJYh$s$+YiIy$DZarUChCL%x?S0P+3XorrAEoPgsdP8lRHYEbl zCt)EMLRRo-ci>0t9klu4o7%VQL2XaS#-CN48%h&l!hkIk01DM9Pf&$GsDvXZW0TqLG1MD6oR6ev}ob+Key`HF+Rev7T(|5t(jvnCzhattv zXJ+WGx6{DraD{qpOJQlpV7WJ@cTdFi*IW^=|DIRGwbx!2ljBtn&Ky5}!X4g=tEVF_w?cS(0+p#2tg~8rM8R}it&2oJs?*G=aaogAKi%);{ ztMSP%-xrS_osH!dU8pi*R4zds1u4Sra232Nl2fa7Fh~ceK)BKqnPO7ai}zL&L&3=R7Cn3Ryz^7*`n{QFPX2?9d^z)@5*5 zbPQb!Ki&35G@3Qz%IBuZpi5`5v%NES8B26P$mqn_V5JvZ8^pN<-!Nu9v(tMpWlsiQDPK!{2&J z-xlM*zR9@Y;%j1V?o`YlpN~R`R*+;jEA*IU3Lt=F+zRe=M8=jB-Ii@FC%rSFW4p6J-)ywXFkKf9LnwR8Le<~8@ZrHtbO3j9p(LX>=uUg0 zY`RmXq->?ia0e1qmGK_r6L;$)RG9uPJDKQcn!_KYiRUPVT!HqPZPc|ABX1jl&Vn?6 zKjoJ?&?yRvr3_lNiK6JLHn$LuA9*&u@7-@w`<)bX*pN{(FMgmngUDxO1pGlb7Y>-o z&Uu2&P$sKB{UF=nfjGl%c!*=Ka~v!G@is4&>-7q(6!#p5-#PKWLl|6^&i421IC|fi zuEJTKf_tZ(bPK}eq0)PLP!ZCjfAt~%_H8cvKmm ze!0ia_}r5GCl6oG>(0+7cf{da78iNx=k%ViqdLnzpT&3Lg-mK znec^9!ay&$2hTneP7cw7&c_NEs)YBM>9H6Uul~^=e~%huS@=Hb-I65!6o&RjooyyP zK=lXjK#r<8>pS&*mNraA3hU>DYQc*gg5CqKqYhF>eVTlX4AUh)>V`>-z{_L@96Stu z>ND}=kT-SlYbIDNG`ewSaV4HTGano)JUTuh{b?xP@P^mNO|O2P!{bN;{SaA!+?0n3 zt~RPPQKbe&H*`iHd+um7RWF?V|L~JX;^4$!jF$>tX#s7nsD67mM|h|rRdFCLy!7(8 z=)z0mrMJ8|{*Qn8FX9(|?cc;}-}a8U>6Vwrp$iYip^Gnzv57H-FGWwW5c3POv84Jw zF*_Syx&5m^=09{P+IJT@pm*kW;3zSjn&57=*@}^BIbQR+?}?iD>jQV+9mi+qqp2`6 zCDpZL+^(@=T(EmwI`n{&S@%Seiu4lX+lsE4eFvgn@0^Y7j#SbYogn|y;DXwq^w9am z<+$T3-;A%^`z`s=5zH3&?C7I*DA|{RCmF_snWST54VagxuBdOGsWNB*y<~iRB=#TJ z9g}-^#fAI##|MAn$KnGYd{10-$t9wRx^&+)>8rERKPVljxhUFQRZ?_lF#ymOjnJ8? zx+8tjPA_?wc9bt2#(LhA{1$*Jy7VLQ3U>2?j$^%{cj=N_wb<$`C~i&oT8Z(A@wn>x zm&Ci?_0D+LJHIb3z2Z_uXO-%T$aF(G)u8m4mOF^ahe0iL0PI8fu2+$XHlTLc3eWDu z(%tkdN$YkJ3`%s>Qqqd;qGUJc-M;*#H+itKwYn6ndPjf8MrN1*nq`t4HU$l>U%Dr| zYGLCH^p!jSf7@eEE3EWb(GSS(u5J0a0JlHEH$62SH^2C%_|bpz6LI0SH_L`vh}koz z^`tRUiUq3@a{>pXC_;wZB|p;ww|MlqC-Q} zDKD%(d-Acz;+exorE^7$4G($%VVnUD=`YxM@Fn-Pz>}ODwZbdyJazu3j{Y>D;eG&x zfp!UNbgN$A5$;oP8sx1H#syajZ+Nk%Evt6IO(?yCC}=^?N!NEKqA7jnUGT7V`WBt= z;@K?*JoSFxLl4KYY?vy$XR+gy!X`ZEJ3QAFq|_DG{Sj*8)D8G%uyJ@~G+y`JFOM5u zc%26w5A2za>tFc7=oenTbjR1l=ZYQ_D2R?q;zv*}Ne+$4J`kVtYBRN4>}T3_K{liH zR^^c`#LR1mzUE8YbhOuSx`jA#m9V7)X{w9FkSuuGNF>lfcqnDWGk+E`!Aj7-Fv zUiZrQ<=_6V$^+ER#=?nX?o3CHQb)>FzrqMOmQP-9$&m~1R5&;41%JJof9uNL?RG}B z`!NVdj%KJu$q{Ok|o*DRmoh5vzXb%*5m8(rLTM=zV+b4 zQ5}%ZG|(d(Otz71urcW+YwIny31|x2%(6!LVt-+uDks|_7YU-0!pGy!c&-(#j0oqL*17{zR-%_(`8`9M?ILXt4nKky zq=~9_L{mJHOXR1VZOf-}aiul|mB^Wfa>sN2o)Pah5$nE9oA{-0=6(1(9m3e#usfK8 z$}@wBQO?V9i8l~0s#k+v&7g zT!x7{R(yC41Lr*Rjs7s|oG`9erAyxn1NJ}w8sM2s;H8eRYX@b60CyW3nRcym)5k{} z$0nw)g5A%sOPIjS4i_!SK(CBdo3K8qtE%=rJUpWIh+c_Po{C=(F&9e;t30qzz%TUX zfj7apMjwU(k}q;5@nt?q2a>$U)?p{EHPL#@gOs9MrQ-!K;y3hxLdT>>xv5{hC>`-- z;jyT;$fPy*StzU*2nZY08Q6>&t|a019Z2Ys5+sp zm|$0yob`@7loKQ(1NEl>_H%wI2^E1K93GS>D=+!!lXVuwM_#dm%b@Em62FpS^uPo6 z#{TJP@mcX2!R2xb*Yt%a`h@>6F#+VM35hQ0?;(A%O!9?G^hW-E<^tmhp5*k9@di(T zbC`eFrS_LQp1COd&N6?ed+vaduvvKU=6Z#G(v|c`#S@@ewWyH16M?$7hk1u}ZPz_@ zMVM4Z@QvPue#0?;KBlNBd`izmhsT!Kk{z_wZNyrm7Hc)x3ew3p^xSEzM7O?b|4w~b z@A$1ox2bUYwytnn9FNqM-Yhyq5=k}j&Z^mHgD@{yCOEOZnXp-)uU2=?PG{x+R+pVc zB~t_gb?(XBqQhFf9xdtPjDN5#JUj<`VNiNqsZf>QYAnq!stoJ|FcFgz(%HpBxqcyITl~L{Y!Dj7d{(D zpLiI33`dAi-5)dsM>t8tV-#2%Qwa; z?W1`4#2p-U>AF(=XFliNO)EQ{i9ik-!Da8c_y3>q6Fuv4I9khDM>}y_(&f+47yYc8 zi9WY!{=6)*ulh-}O&SiWOGR}a(75$$m78a`arvVUKnKt@leP3H$1X>DAhOft&ZF?A zcI)=ww$C^HQznI_KQ`CX{Q;LnP2m9ve}KzB$l0HJ3X|Yhdb(48&L?vz?X!0~cnL7- z9~f-UQ9k+e2n8uXx&*6WINA5hAAHCAMF@LIJSDaRAc#b;Bn1n95@OH|VWY506^3d& zh$;aRhHj=teaQpD_{4$JYfWB;7~9U zT8fX$unr74JaWLeogNP)DCFdn$+J-o-Y5@DGy##)fia+Z;7>Ue1_IL`j%<{I>$p|G6r3-OxHv%a#;Kx7^7pU|DA?e+q33vA5kvM)L512^mT$KC}F2!`k zd0DyQ^Tzm2Jo6ZZce!X#JmUq-_~lM118WBR&S$<6ToJY}H!{w~H7;?KRX1P2m;3QO z4};@bAJ6M--|4ntaOLS@rXO`>AfSPaaJMfUc@kESfJnZ^KkqO(zSCZGnJy%x2^;VDI~Bs^#J z0S|*dpzT6m(pvBjAcXqQEq0IG0D=UAG_ijph1)g~)NDdB1>3{*4AxK(d? z^@_2y(Uh?uMXE3wE3DSz4F83CjE?liEw{WNUjMr9j#s|?87&A9u~XX2TY z3$fVj8hi?n6fCuI;d5XRe#oj2k%8WYem7|;SAe0$IrdkqImDSe=t^S%kjWNkH)Ff3vtIcAB!m|sBd`PtG#vh(!z;2c|r;; zbT3@4wor~K6K6<~uW}cDnMmQY()0}Osw1M6wT`!{tJPMc*=)t)axD(-+Y`HY?Qxr0 zoLjJ>ghAbEGGM?&q>fh}u^R*DDYY7G(@iH~T3OQFSo11PI=bLL@Jl+~nBnhL{?wh` z>7=2q;7h6$zpu3!gzAfh`MH=sb=oU*3&I5oKZBcT0>HX3CHbRt!BburPK$bV-HMK^ z52$UbjbPXzbK${+^)Wr%8T6FyK^*;K&fNLJJ&xGMUOABUjPPp8q zXYiG{$#x9iT}(aYe%mp(KY~}{rORg~DlN7dw5-nJpDkCq+?lASJkRYX)vIL^A06WC z-TI0@uUeparepY8nqeC}Mh&t9~@yp*J=wEUhlGCRFfHgd@)ZtxwNhOARjfdzl= zi5K#`{T)9&z%yB)!ayB7ps`wu{-Wqp;aSy&zJLs5i~nX_ZCU)Mmp?(!0095=NklD;Bmj!vf0;i1^SZ+{#=ek@KLJLOf8BV(g6$WcWS5ZDc@ zSY)gEW{XuxtQ4Dw;gMl?JT1t;<|;9O4_}7%1v?W2_U)gsjDQa7;z#KioV7T9YA%k= z&RX7d#k)5)dSiTIOgy6y$Dmyr$ zy>6=P;4pOn_6tXJ{4o# zrUc24&TF$HIr1ezPd*awNDVwWS0#BWs`Jw~WCS`1b`LAJnS5y(C)~qx7%=cTuHx_L z5Y{~e2dtC9YIh!}kdzIbtR>w5-aot8iX$h^#Hocw)Z5Z$_D#juhm|&X?oZ;S+J!gAXgvRt&K{b$26n zPnKfWSUH9(Y<1p~b{&%5(qJN+Xl;7`{^*nb(vckMLEltH;DOasbh4T|{d`<`A!ynS$I{UbC~L3uMD%hd^b8b=o_yGrPD5RM^Pc{n4j-V`n0c8DSg8G?{8kdfr4_an&{P(p$bu`p`_AI(b5Ts~e9!^sV?G|Md^! z*4yti?mzub$uEG@;CzWmi# z6DXsVo~VePCPvCpR^2ez2Wvu}Ky$Qd^ChK)u9Wy6be{Ai=m0&}^rZ6hPM+>aCa(OA z6gNWZlXB}BJW#39YpjncZ8}KJN%*86M->K%15FSPe4(SEkBMrO1Oo~pjJ}fMtKDE{ ziclVV@`*S-zv5kH+Dr_^TX_N>`SW!4SD(>&)y^4=VFK`lhxW(!z3t6W7H+y~i*&M% zkB!8{)J&XsN@<@w9(B>lFcStWZ#h1o8{?9T!$a_ej_@ZtLimJNBa>MD2(R|hPohh3 zz{)Hp^wR$T=^Ga+hqO}JQP?gF$6od3i!ewiIHeQ zX^7vj5**sk2XC4S}1+CVGclIert}|f94kPp-Adg2U$0Z|bv3qzte($$_Ic|K@ zj|i@nXe=I$g`>~;cnRdEVKKdk$MA>!uJS-k#x*~BPA_~@gcOnw$sdPz?sC7}nO{+f z=UwMS(zEl({kZ4XPN$qrYh1uBpX@y?E&$(3NXdgo>T+GQ-~l+5YhB;^3)Sc;R>glO zVoUhiYOM(`^;ml1Tk**c{g?RMU*8o{L1JonK=e{o@%xNl(xx4y<1$pAJq1{u=vn+0 z9Z7d^Lebz!UhOCFX>Y_}w-G_1XY4d4C@5X=fkt~XPR%ysi(h&q>Z`}%q6;pFnW=Hn zU)k}<1J=LRk$e$c$RX20NqLGcea5lM=>APZmO8?bKTPH#e=g!Muz+pB0u%Hg`YwFj zgJ^)kR5D<4Sijm44!OnzI{;vZP3K7bN@Ax6xu|HUe4-$K^muVR)V_E>EcE+XrXi`jgGA$@e@?l`7ES-Q! z#vReUcVN^XJ5w;3ib*F7_KuH@x!tgc0a#h2z)k`a6QjbZ>>23;`SG2!BdQF2Yfbtj zTYIyxhoc?2=tqJN(1%k{+8yb|oaql}X`+hqr+ul}2vp;qqZzo|H|BDye2T!t8rtEY zv+gszz;#W3Iwg^2fLgqFgjwhppPcZ!=&@&G39 z)~PhXFM2QU)H{P`{E-*^FK%BXK;Ga+Nl<=jDlMj@9|-~e>Ux%&uTA&Obn4uUqHuDLa;xWhUm9fd`5J{_O?-?zr&cl}kAh0H6iyfOy&?pNC_MW1Xx@GCCsML!+k1^to*ksfCo zkp7M=7cJ4JY4Lyzr@ZpB9t}Ux4LO$v{Kb29(H7nAsZvL9r1S3i1!3gRJ077~9{$nz zXB{Mz;FI$`(c<|InA3wU^nJrXNpZ9nc$AP#eDJ(8VFgd`imGUol>}W169cp_{Be{A z4A(xwiTu1fu9AgkVYB*(A=@7CNAeQhkSidLZJnI0(sZ)=QTp(}?Q&5N?wMcu=b8FY zIrNPy#UW^Vw`}BD-Xv{Da{iv;+e_inBR`Z7r}SWw=6R$oAKz0RS%i~)zx09ceZM^S z^1+`7AS6JtHrzydY4)oSsW>q4#I!*;5im${20?TYUv?)P3Ia|b`%XG(rW|q7JMn!+ z9^V|#ndn(*`kt690v3d&Bo$ZySc%XxifRUdE+J0u?zoo&m&9{=L{r{IqCaNOes@5J z(*s|fCU`&z_JEDxF+B(fvX+~zP zIpErn0MBX|#s!by$8iWxo~aCH-8nC3of(`TvRisjc;-3z=M96g`zOeaOzrCK~Cg z-B2$i#k1SJ()3X#Doam~gs7C`cBdBY20JD!MZ0+_9=hk=*ni+)l!}va+il;B-~Q9j z$7EqE_KsG(f~#DSaxG|zK6T25H-ZfVf%Dw^ zS!pL48XO#owT=E*TyTkHz19{`UBrzq>uYc;8cTW`$1c z1T$(lkd%~eNzJQV&^siiXY2zMEOr4XmzWJKx~-%w`$tDv5jhf*6Js$lG!$ba!!b5I z>gQ^uBwP-PK4rwHoT_CROV9vvMtno*(6MD+OPf`BEDwZN&uo^$X+sGEQOpMVpho*; zG`N#Oh<3X`!6PiZ4)Ri1uiVF2K_P;P5>rW&PacPSi9;t1;S@W;gr5pt7$|rks!}d{ z#{$kuWU!3(#2NkE(~N5=!Q?OekUwn>I>cCl2!R`_gqm@@^NGn7Ol|=Pe#4niW{!IHm`02Rv%8O!h=70>F z({W;s6@09mgLiC1sai5VIUksI!L~>6dA8RRG%9B~*p8cXhJ6mWpk+1xaHSlVTy}-| ze2ufcWdJidpsjCo7||h0`m7vjwb{xz4XVK}*r|ka!S5L8M-G5%&h%Pa6aMA5t~N!O z)VV?XrOhgL)t6VM$Y?w9?6IhcSC&LSYoh!5>Y~D_{h^q7fCZtW_$mpwcL##rUE>Nz z)jN#SR{cUF4xH{bPN54$_5=VCx;w;4Qw#Y0b?jGz3mx5r>*Jhs~N;{O=!N)K+ObkmGq+qN@ox)Kuf$h)7M=GlSuv-h)* z>~!bi%US*d&Up59Zah1cy~mT>$ccoqve@a4Kj7%m%Dg}2@F_Hplt;LRMM4KTJfvrR zP4P_M`V$qVH03um%=Y>r{xB7nU43=D=8f-&H@^I}@#33qj_PGQNCvW+WxcC*B)o7I zCi!=<&gD{j1DAlUe8gY8;vSADKdNA6ax4xV+8u9u=R1Yv{^-_E2j`YcE!I1AqqeKK zX(dorL2^7&FDaGecypbL6cS0&wz-lGgi@b6VQ`AAM>_Ahz$3Z@ljzX%NETiL5BAP0 zYpO#f!9+)l+PPXgj-6SERkk=4Fw-;p;^NCLi?_b#eNLUhX<7kv1z@CwGfk;1=x`Lm zJ58v(CyyS9FMj6l;*NXni!p_t7%O_EBF8ke+nX^wHl(~dF|lVVZhFZL@vFb}>(MV= zW$yHuICc7D96R=$beUsu?C9Y*apZ6uefDTP`Q(#v|GoFem%i}D_^XfnMf~}ne>85r z{mVdMTp^*Y3sNwk6+&_p(=iSXfVwD8b~m`;`fJ5^3)YwZ{_}q?KE4%0Qgo{2f!Hnm zYX9U=40+;WuUCkTO-)8avWeqrCim`-zEU|j+Gb0 z_<*Mm>wE80-|;&?5~D0m9CnBYOz;21G)!K!CEEqWM8R|bk@+DRR%VCbwv z=D-h`C6+D8lKx8@gB;PTNsnjjBqmow8GVYwR@JIB@s@J@-K}4anrM=aog%bM-o_i9Jn%Jk zP+O13AAclfrly6zjd<#rXX1%RAC2Gt{r?iT-~FI*1+3`E$~NVh(H>aLAGh&KIfZ}f zR^+2PV*sI}>ez}a)$W+s$gW9DC?1wxfbNJNXMw}($EPOZM}FwXJOLZhX0i##5}sV-Z9Dba?w^ z!Ofxw29K<-L65>M<)?g>1pvBuf^vQLOZ2))J5=zT;EM3pC zUFmw_qw#?Wn(Qb&r2NEZl$vdxqU0h2P-G78=7==xq0x~^=UcBY8&9lAWk-ZV7wq?$ z)-QYIi{p3x>wk#CSSPPxvM9xD7*>mbY}WD{s{)wh}f&vMprC z7e=BnyBupwT0AowP07)(|Lv#a&wl@Z#`GaqCy^ zk9FB(7Dbi(cvkA6*!Bc z5pV*Z@NSNPuN!R%R5FRo{Y%onij}J9U3p<2%3j6pDRN1ND=G~pCA5}SqPDaUjpb!8 zJXseX+;m60+Kb}#s{EXDo`VRi$TGJ9{)9{T3+*vqIn5u(8o;B8Wc?T%t9-^i_!a** zO_GL50e@s7&-n43%1LF+T|$#9&F{J1;j^~ooEPy)+flRh(<5&@+0TwJ5P{Rxa~8&r zc^Em$>$WdH4|;7j^{zHqiscW4q%ceI>&}^&KdMRn_T>Y34*R$ z!sPV{sq<)_Mn7=E8A-bxkea?xR!7IqRdKz5Paft)DJg%mbGZ~>{ESG%GfdQ(f4&%i%Dflkv9*6X)D>SD{V^z>c)^7S6$ku?CaNsBlN?7*>z{rf z_zIKVNH>=w&I_;;5t|0^BV9Y?brwGO4DsYo99?I>J>Qkm;!2JTc^7QN*H7v<;XQq` zQ+Pmfe~}ZwPjqD&B7c3%>u%c-PF|LQN-ODp>Fx>2Owgx%@=G!a^s1eKUd3VXe!aF5 zYxO0W2aD0IEk=7)`YhY{OW)Y=kuc&zYM;HbBWOMDUlu}pMc3$N1trSSUhGtb>B0nG z^y7l~2ghl6vY}{;ZTjJP*b%T6%H>JVDh89{H)*G$Lk7?RjqUzf@n2hEL4qtb<;hsZ zp@SE=le@QE@OTCbjh1KU^m)U(19*Y2U}&!Q#A8pNiNE>mL-DPrmSVofVoJ%sa!-uN zj-;IxWuJ|XjabeWC7&yzp}~^cFuX#v!$k9;s&xHIPmE7M3;l8Ly${5fKmXZSK5{H} z&Fsx~Twa%cDt^OcD&ZWvfvZ$5%RUnAx-CE(sx0W-^+?H+<4MhcU%x>|u6JET;XRQN z^_<@EvVS6Ge@iaK-@bDH{BuT++|LQ8D>LGb>}zLWcub{xj)Zx;7#wqlV4|@ z1D)*qS3mQP_v_*85db@}FGQJe@N#Byl+Z>88KWdpEh%A;MVypbCJg+EbQ2(WBqfQ| zV4lg4#F+3U!JU}4Db`_S=C?FmFRRPMu-%|ONBww8t zIIxGlX9kkP%2#DK$fV(C;_DMQb;#_UG$CRqLVcq+WN;?RQ(PcGAxN-^H5woI+fbCi zfEiFO3nYagJn54P4&t?fz&$HJujl|Mf(*jjI6^t~Oi^N{hU3WripRMy#38=n?oIVb zUSvalxy$PU7Bj10N_eLK*$J3FX?OW~7Z zr)LXS<>U5}<2*}KV4ah^FXgRz#M_LAdmrai(J?5+vL!=H_0i-!8<9q3Xd-$FWd$0H z)unnYFRu!>seY|+qRgu9IH!bd^{qHE7^O5=U5>Hul{ZpwIpdv<5YF>sTTb9%o8C&L zAXo&ul{1vT^{7c$udc2}W3?UaW;<%j5_+Olwv%Jlc0h)ogKpSMxr6|D)2S`9yPTAlFXEAbA1AL>~T8WNuJ1k*7Hd>9ziD4<%sxKMOT^VRkJ~OL$ z8*%Lwd*b`v^4;Kny2bJP^u{L{aT(gpf3yzTVtD$zyxtJMK_ z!cI?3#?-{5&y8ll#uG`T$R{I&fqM)bM5pRNkiolY`{0a~o*uwQi6n8-mxI<#7s8p1 z4m!ayc;Jhq$KVa|2%BUMofcgi9n>Y0E@%s^(jz>^4;O|8E|)9%HLM;yQ6}J&0RWc+ z{ED`TH#i`=R=J9Wl3)=7L>4MMg8*##$!eUmb=+u7j86+UBT*e1mhv}b!%_sVdQbd` zNoBdCOt2KH!Gqxn!dF7Jq?jzOHsk2w=i>C~Q&LQ5lME>J#nR$x%0|j199@ zRU-~R^>{q^_!BWXvMb*3mN%+RccUg+;cTkr+Lq{ttr?|IGH{_bkc=tSvG_RUW*S7yn#TU}28@G%RDZzE87 zAWJ=U-ip=qW1D zmH>S~g1^FG_(MpPopL?&%tU_*DcHRolgJCENnX(yyh?bWvgbMBodVIvin-EG(kpBZB1qxz#vij-&+ zsPLv)?kR{v+K_iW;-BKBxVjsOq7J_MnJaUu6KI4B9TPrEUc4ed(H-)RI0^F@IJhUq zFSsnOyy?bx&CM^46GxB6$+^=pBtF2zEzZiLL#Wki#NyI?43AdKD;WGZaKS}!_{k^Y zsizKm%V23H#tZ{l@Hw`(ATo>$}UbaWj&xIcdI9dC~7Z+U|tSaN;?N{i0+ zefthdj=oO(sUDyC{FlIzqQmzRKGAd0gP7<@haWo^XjO>c4@-4($k!@XC!3X@>r0pQs3&pd%xxud?)yX#bSosQVzpUyqq`GH?59V`;` zZFnMZc>)rV#XD_>keB$GCuit_2T>j-*{vH#tYX@fj@OAZON}^k>{PJ+m#qxPXLiLq zzyJH<((7IbG|CrV&IB&WTFFZ~RntI$>R0KcL9IUVtB!QVR?MF|5x?{c|7R3cjG3uo z>>VF>2MfnHz_yqK(kp&`;Ltwh^Y*ynCgrm=6eky%aM_KyGjs9SeGkVsZof1B^uvD| z|KYcPH$L>g{xUxH$xp?tU%4$FmP|W(@))I+qZ5Sz!6aYY0Sfjdqm6=uK)o~Qf)3H= zU5ARo?RD3_Q2NkNeB>kl%N>*>s^WrZYS(Bn4(%TH`RGN_!=U)`=){C~-D-5ZF}Y`N z^m!})b>UTXD4cnspkyHfu5IB}Tsj_k?CJRMXKqh=R4K_D+lHiC$M?SZP4T;sIMdC5PK+FS%^6F4w;vpc_bx zy7y{7eMZEgdn>XY-P>Dp4Id6t92NEwOj+{`mYCKNpKDDyzb=Ep)$hQ}148J3%s@0WW|NXyAt}jFp-5 z0M672i(N^lG9Bn?rRj^Vy)v%8^0IjD$YHU!uJvk9sGzu(6{=Vesta2j3Mo`r@wXL! z^x3ZmhlDBjWMe3QY%$8`mL+gFGk>|(ihJ(4KaM|lB5wP_m*TJf;^Xo7bEgyn?DGpB zMW!3%9S->MAlKxm4`QHNs5r94^i`Q*S6zSIReD~RJ}=u!bVAUHv9VZ^-8a9`2rRAL zyQbr9Z+%NtrRP!4t*-Pj;h*|}{%M!gBNHWYMbT!lT!@$UunZpEx=v(e9~Q?N!L3!>idS`Zpx-(*NGm%fUU#C@qX#}k9_N? zIDUH0gOChLLhs;`!4+uM{FT~(zG;I>g}PueFO#?5{>InEKDD{U`8nCW^Df_W&mN22 z`*y`EUh&;=_x*Rrv(KC`UV)l{H9DzjUhDvn&`2A}vH)L#J$1HH9Ppsd6!nH}!bCj7qxw)Q zRzBEAYF{e<=;UyeOQX@OiAQtREfe83+O|`d>vQo7|L0G~5B zu14zeF@Q*$C3{1_olbexiGCmldFY+XPIa%~>4$iZ_5Z>VJ_SGLq8_>Px5GaLCjIj+ zymf~btW&BiDK69iA4{;q(-Ix)n~(|ZgGl-=3M|U#6(14a))(7RJ8~jE`rm#pZvEt4 zQCjPXiBeCD(XNyRJIAq_y(Jl$5B_6YGT1=peUEH&@1`opXZM8&9%KazIucJ2@ob4l z3I^&LDWle;ckv$42jzliwmX7vot+)FqTbw$N1r|wPd)QkTzcWfv1@u<^eP)xWnmzm z9M`(^9i%o2EkbYkKp#8>y!ctP$a}IARquj}V}H=mSkytGSiueLa;`aBgR>PmyGU?^ z4ij>iRKn2`41RVzX}cCfYNt%-?1MDcx;_@5wy-Q)bs<*fXMLPdv}!S^vcY5FbtC6EMm478I{BnQ!(snX0@^bXE5q!htFvCX)Ne94n(XQwNa6Y;c@8DBP&-$Bi;-m5u51G$*oa+O7PoFkv zGYZ3mN+5&JQwQ|B7$`-8UK!BGmLMOKg45je0tIw>1_iMLN&N#0suX=-Oe#g{0V-)zyTXs2!4uAK>rNVc zgWegS?MG&+4D4KMJpg~zhxLokC&T;jvxd(Ml#BY08LSyf6A?TR1tldA?RCzF>@-DKV`m_HPTyDlq^jiz$w>` zc)}{X5EF#Uh~|4%_sLT?`)7W3;8c()9GVOC={nPY7DbMB&7HQD>OE-~Brt1rab@@#aLPsiHIDd`Tg_S;|wfKD^|ypmt=tNfl+pwx{u;h%we z^n9v|vBI3^Fae2e{riC(Jty;Lv7-C-sSn8@5s994liV=4(2?@$wp-#KZSlM{$(Olk ztS-mul6aun4O{x}J$Qk42vdXbWVz$d9ElT$kNJ3ka+#xG+0~{Tr{@~+cemabU%Y)b zYTZ-^CCPmBG;qR1Om-wGN%zLSqz>2#1=&PfW^zE@uh|w~#vaG9@_bGDL%q?CZ+`W$ z`05uv7UQMixcs_nWDHEIZS+N9w5o6ow$%|&X9pf8^mk3$*rJ}813#h0RCaV&T&hp` zqT{=rs9oqzt@=-8FuifCcX&)c~s%n6CrdFloagk^zEU41?F9J1ZLpJkOvbgkSSZ6^mnA) zE#S{Aa1({c23s8qhCW&K^arQiVacBz+nkM@>Y5ycZ0C=^f;dYEG4r>hd;}2haajbr zo!~L&RY1NcTZs?&E55^!7HMW-1y9m;msLAAHCA<+s=Q zJ75aKj4j8ZQ-iw*2}Ykk1CSG@(8}FHUyYf9q;Sa}M>s18DY9?LpL|f-XheAO4P&2* z&*NoqI3E6~5c!Oy6i-h6begymIAg+j{tV%W@Y^dSpprAU9=NzqB0$)+;oAo0#~GGO!|f=Xpc`CH-1TwV?< zNqXkZasBKEf2Z8e1IB2=NjONja}YW3llk)s&g`FlF)v6jbtn^0my$=>N?}r7~P>bOVW<8=eh)|pL{>>o%-XMbWPYPAHw-J z-7-hq>~T9B_cixsw_?G;WlFN46iSD|cwlDJbtY_6@ex`Ol z!y6+2dPV57Dx}_Qq|N|AS#PXHquy|xGHab30XQ0f4m*@&I#oRANxRW^4B=X>5v$89 zv9h=roo3yNu@aOqr@W+4NgS!LD&s&k$|DkB5}y5fXNzs{Lv{7^b}YEqo`wz~I(H}= zLKxbhL&a4DPWuuzw{qsK4@>~?S&%)YC=5+SaqNJ?l@(k{yTT+vr?5)L?aPcf#b#AZ zkMd-t1S=U@DnnOzsP?bNgLi+yTR-jIzdx2vHR2CH{MYfdN9WvWI9lzGiY{iw16D!t zt;CZ)Y15{Zu4=;HZKzAhD3u1n=g3Prpj~XLo(BeHkmy^h*^VChWB9jJkBf^-(U2i= z@X((4;U9TtyzMQoi^-|cs5P5$^3>^ASy>j|R6oVCaEL<6*@8W>xYF>-$gYG_Q+34P z|H5K3?)dtB@n?Vi>G=3(z8rTvaU>Qc*l>^}3J%<+!oY^0E(#9WFUgRkY{083L%}Im z(t!W?7?TyoVtR6~I}zD-lMV#7X!Ss_geP=~fTL+**nl2pZPVdR+aSDQ(1j6aB~H)y z7fORFANitK4cZ9vR%&!U3Le8PW1?85Qv=0Ee$=)AWovZb8O+WMVQK>&Mkk03JSk*0 z+C+>h3^(5eZ@=O*dBCvBC@GXu8!9PYrBaIF zp`jR`7>TLznV6oQj>+k%7#bQ@`U>X>I}Cc2l&*{t)u;0kzSy4D1M|v@i9JgzwK#F& zlxRT8b*mGrEA?2GAKO)5c+rJ%)6KWUpweBOUx*{ePI?8?Raal6_9Z%CJ6RM*CCsXQ zjJXXd;z?Ff7-iVQ%G;9e;zYtfMiSD9t&nJRo5IUYH{PH&*B57wpB4{LyH|apFeCZc z=8_353|eDQcg%C>c1#Xaku9r{-{cDaVFtcC*qnlj4G&{t9fq3n!&tZ6Rhk&@uxdIX z!3k#@_e&1@yf4*>WxHt0w4}KCs)d~QLO7(K(NA9Uyy3X>(5JBv_1Bu_fN@yZAFk<*s1Sy^yArQ;3o&J)P* z&*$>^8Jt**@=H4k@$LEJ=XIIfKbBAK^6>dRbDjV|n#ekyjL__!%9Hd$F2N_7g?@k+ zG-Q$dRVm0n@kVIch8}mf186VNIy}uC+@edi2hLaLa*l1cHW&SD3#xW`{dHHzm6u)| zGc!}3fVfgyiV4XeI=#>nFlZ;oN8{qlu81Q~J`s;T{JYrOzsiSS9_q| z7&w}k8jp$bDNkOj)tllSz|a(-RZXY&SiT0C(cViMZ#1 z$Kvz?6DR4&mHbfH3M;vH&%`hM^Pi2%_^@O=dQnSy2c3T_DtF!II=g?>i8`_^*FCqQn5B>Q^=nd>Gk= zA(~)LJeBkepYmOj{>LEWk!PNcKlsWMixsdjGs+3R zvE|%RZ5)UffG0hsfiQhHoIRFn%~~z4z2Sx^7pw7+KlzhbSKSPWRSqhyCjiWh#z?g& z*|HhsYB|Oxr=)KVMoTzj0>CD^FL;-3p+u<}!NCfzg7mEA=n9vg|NA@Qwr@Tv`j!49 zU87GjUBFo0vZ~&b6eX`^T!TVAvl5RET)nDjyLau5UHf*&{=K{7r{4FT_~6g}bV_4m zRyyZO^r~JsqkCPts<+~meo1*vL!uXICedAzbw;CT3OQnZn$C6IdrfDmNd?a7&on8F z<#w{)buoyEev|ybwImGsD*Xe;E2qatq9UHlaSBpXQXF^`ZArL5H>F)!x6TSg{ZmiK5IW)4#n&xU zlmK@w!igmr8SqXnM^`#2548ow2M<2JNPdPL+M*o5p!m?PTNgUE+MwG#m*T6ns0~O5 zQ@Ax{#!d*2J^n--J3VV#75xtV(|t2`2|r2z2BA0TR^J51^yIkkKOFCP+ne0(8>{nf zFANNEEU2{T=sRHJZMh^lB^?+Wg!ItMlih-CqVLKJWFu3V7)>2f zWDox~_y<2Eo?o;L=ca#%fg79{r+52tC7*xXr8IUraIx(V;?HrCpqLDg!hR-P&Julf1>VOU3BQrY{bR#H#pw{nT7E&n(92Z#^1+@_WA-^G6rs z;yr~pG(8#nu(9ln)4xRM7Z)`dt#B1>(TlJ?M!u^+7Y-FPQ_T@Y2HFx zaeds6h@NX*jHiRWEkAf1ZI$z@i?TP1rKAh3E_Y>D99DXpF)q8^1NKUYi9sNu(W;qO zG9U(hkT>}lw<>(HT~d3*rsoJZ`jNWgiA*R=wl;QRN%DcgR#sVd6s9BnuIY)Q;2WEX zK@n(%NuTH}8?5k`9Q1L3;!h=|HK2S3lplR0Y-P`R&?^g)CDJc88?s%+^RTUyLAuz5 zM)y9YXS+{nP(b$w3V--{8)lfH{!Mn`*@-XcoA746k23m__YP+sMBQ1ikO1`}r@QrQ zM<7urtShB!ojv~qERLGmgyTRAhB1!|&FMxM-O|tY&Gq6isz)xf1KGYkn{AiBS8`F} z%Jj>E3V4K?sA5R&GZ}*cw@t}q@7}{I$Bt}hr6V}@3+ zkAW^0X3#GK?<|Oh679l~R2=eiy-_}JwPCqnm=yq-#bt4)swYM$BHZ7 z>Mg`o7Cl+ku@C{-g6{8yAqrbxZKz$YNvAADpL9OLarU_DLf^p|lYP2ugDzN*8`f0> z2onH+n*nv7r>^p)om{{_I7F_(ON3wQLHW85OmR4RgZ>sz%AtL^Y>KDwUU&ySRR`#o zjInrNUnTKGGvxu!p%Hi?{VUc%1v9dQeijz2(9eO(#}t%}!yu=u$D;oUPTB(HaDSOD z(+`MNe%K3cD;X{bXFAho>Vxp?px}H&Z`2!;Mb1t-0vFURI$DbF@=A{cmPCJ`OW_pC zeM^*2?+k+ZyZop}!sU&M<3i!j?8JcB1?16DcFI+nO6RG=PefyWUOMbm?$d?`)8FdJ z7>H8gS;cbtw!^sV&%B4C<9Ie)WXhd7;GVhd#|$Xm6Drg;7Jeui7Juva@)Lm7;j(n9lH#Ky6?%*Aw`136CJ6WU!^dfd(1o#? z__N^ab(>Hdl>9&ju@ydjqi*va^8npU#%5P7Y|iy)wi=2{dle6CG@Zxd={d2&w%b}| zc+_Y5_p;NjWK2itcbl6rH@g@oPaOA-r{$p`@BFs3ycV~8?dkaB=bnsXXEtMGd@CwL z9G56usI7n`27PJ2EMMCcc{vj2Y1d?M`6` z%XlN?Lj6<65FYI&^)=!0J1%|qlf(MSzdQRBO)L55bK;-tQW*X0$m(pqJDd~Eo_B`N zoMG8H0YG7rUy5XJmCTA6u#g^?E1pk6#Yx|`GP*N zg4DjSUr9I169+x==ZP)y&igK%tDgBwZ@Rmk`c(meBm0)!;mOpOCwl1)n)u=ip5suw z>xsGm78omz_Z{g0Z+072x3ut}Y3yyft^^5jyZ1t#KTj_-mGcv32OrFh1lNm_C7OCR;0($T-~`U(n6 z3K?5m0dH8{FSq_BpyTX7+(o&=}3r&O$>c~|7`Gf%8=^#njB?irw-VT=m&lc|b z)qj?=1M3s`B9H7nf2M57Q64fNU4^0XP>2j~e7C=^JiZ?@mw!7q-*X%Vl}q1JxD<^B zl+|I9nyLQWfOGh=^fEeir-m;&2~ea85*kCg)3H%fGDXoNt8$sqI4FH%BL;hT=Il%v zS_5pYrZhLCaIQ2htUT+xR*w}4+je6)`Y?zP*3hA{rfTT~G3^OAoMTn5*L=~bg@VOw zI-h;3{HRleuLn34*@LeVY$%}X8LfL8NVEVs=$gqM4C*!(mZP(}6rJS-3EH|8P#JOM zYU)gu!W;cn3CD62hsWekCoo4JbfUe!>H&h(S*x@*Q)lzGaV0wOjLocb6yt|MNWL*c zeq&u>BuM)u1dAiPV{rJOaY?&^#5`-9_D20EF7?(U-U9GF!WVh1AeerjdFD?HWk0tSdWH`mGa2A%3n~pR^0a0`{P3&`B?nf-`)}T zJo37|pU8BnKl7$YXp z23xiDyE6^9E?P!@q{#r#D8_;4SkKfo1_MTx6<7UvUAh~=e~Xs#i3gs%a3lW?#0 z$2m(VndrGjX_4eAy4$K)SRSey9$@2iz6*>V@AtTeNjsU%oRa8Ffb5_ldNRj2qPHXA*eXO zmv~I7C&B`&4NBFqjZ?)V46%_9O4amD98%?|j%K&vGiOiN+c7fSiI?7VLkx;X_H>#t zSdlRyB_CeHG{gyYYTD)Y%w2{q&Yqym4j6I~QCKzvQ3vv-Fg3 zlZQX+NP}cjQes*TlgE2Lm#4Gyt}B}Wke<1d9xe}uOJNi2`c8WlKQdjZ4WzC9bcrVO za4Br6;XEbZ^}{!KW8!a;B{FjRijqI@H6_7H5RSEhpN(9+FZNE2#oj%WQ64VEV~;)@ ztF;y7wi)|&?~1Fhd!Y>aLfm)%eIDpwYs`^}v8c*uCa;d#Z@pf3M-`Kam`pG}Iwe3h zV_7;3av%B51jO=CMe0J?6SP>B3G1%N@V?^mOJi(&)VzIAdeak6Jr$q+y9Z)PhPo$a z_ls9aj~pE>%OLNIYp=K@-u1o@M6_1iK}m-v9abK2RQc$%sthriaUeePnZJkKxT?z) zjs>72Ts%uWi2)f3VL7g{fMw(et8C!=;2ze%5db&5@CEVGSKOiyYtdaj9h;qoc)cQ- zLC`&u*NGfFg#_{4LA!}YY17HR(2p)`BIPA`NXH$~@JIZBXfsW8q>S(gI^Ky(Ua6x@ zd`jgZ8l9Q&<7R73xTST{d44q|en{VjOqwnr`PkvzC9lUC{o=_tFn^kq4cEf`} zz0z?TwPk%1UWLD2=~KfKyW`DoewQaNeD#hyq~C~E#lPqnUf&ST1dk_uia*fq$roJ! z{gR{1+&=(RGTd3I!Szw1>?X4cx#(IFlW00ylOByJQRs7e3Yu;PlT?E%|xgtfobPR>v#i(e=3W(#zv_ ze&gT8Rj+u548yj=}M2}$CU7z;aW*U7t|B==>9v^uLrKgYsjB= zNnT8(a^ID_jW(AlcZAiufQDXy+Mo+=`YAWyqoxs#cVm*4@>kopeI?%9Y{_P-wc_E& zk9y1GOD;HM{@B;cw&pGKaQb$ze@P?fecl;LdFF%s^g*SGk)l`Pbh`56=#akPOmy!G zLKNM-O?Q4vF6W<4g1E`s zwBa%-ea?JGpFy|W>a=6MT~oGcMKpE6&T;SyV1l=-Gl;G>imlH!*qa+n;OQz2;RYO$ zi||Xn9!}^3ck)8zP<+0#Y8)7H9^!I38n^^z2Ir9-^m8Z`e@igC4g`1J8=Xmi*#G=5OdGOfI3%2Yw6O018cl7k17AE_kF+ zZruT3SOh{_VsS>9)w-g=b7Qc&*<_);P43gCVX>0ZbdWGcsAGAW?GT30;D zaqzF)Nz0P}Xz%n3h*pu+l$goQrGE56$&1aF?y2tB6$!@YL*_65mr<^o%W++EubeX&dBYnj(_w ziMmpD32vpyoh#83IV75(JjC%%Am+8AJ;Nm$QCLHxA847syl;oI(}BUqj3}dGxgr>` z58Iyf2M!;9{E6r^*5ZmAUmRPKm3_k%PfVg+kv4j;;Zs3F#ufpEE~xmKSpLIi*SB#KbR^@2vbi*ZG^D_(eySEug1O zXpOsZsyfN=D�{Dnhs%r0=qzb|G4Y&V{#)`dn<)m!h*W8=a*S(O#aH;FEk5U9zhh zTlcewOd`r;(WiIA26#$#vr86gkKoJoq3`+wA3Pg{(1O~fCrztu&|X-uN6oTRDT{`Y z+ol`A34PMu($TuIc^ma;E^+REI~Go#bp0}MXkyo{7?Di}cSINCSVN9bIr-eP-c@W; zeBr=F7s!UG#&c(u;v*mXM%;RTGb$>}K(#l9hDX)rX@B5{zFyjk;Gn&T7~pNReUHh} zb`#MG)-0@E;TiOe7in{Xp)7vJ&SMS1!6J-qvnLj0-+b|JKNAP{UKD$;z9RZcLoqNi zAzaGrksXhWrQZS>Z9PMdHk{;~u%tU!Wg;!ObsIuv>Z{wP;o`1a0#OdCNDhC(I-h|8Q2AbT?mv1Nt zum)mE;7DMw+P$}@5@Tb#qC7kqr9vf!t79=bzBfijrlLAL=2Z?wR%w{wuxd1&G0wJ9 z6o9tM3O82vu65h7TC2(E)3b~N3>>!cCN2sKnY#l}?`+-6ng7%sGwcvbw236VlJP)i zF*DdGuRCWHe~oia5yX^D@w)9brOg1nlnI4xG#jxj7~1Wo&-`6)VaSOmFic z!0sTCLLI$bzq5*+i4PGNFoHye8MwrF2Ho(}gg+=!cyk97%GRLht)#YJiSqdF=&$Tk zc}ozWz6(V2D}(BLqLWMsR+0L9i&lVpx=k6DQpVSorUKROsALxm(1>J2HwD8v#V45ES#g;7IuTY?u27E*^VgTp8*tPo+gz76A6+Pb?` z?v0VrO7s;rRBsaG<-vIAORtN6_VYg#*IjqH+E8B{d-hnIICI(^NemJ*Xt=e}AFB-% zh0R!AmXYj1|86|+@Z<6EPk%A~@}r-NJ03Y2OU0$J zLv9xS8^o-++FSteBR-B=j7@LWL+5~mu zNeyb7l%1`!8={+Mo_;d!zWc#gJbfxAcTL9CSH2*|h9|rNbG6Rw_u-frABp|DXGCYs zSYB8b&ZW$3iBCwOM{LtPn5Zy3JmUJGBgec><>lPyu5iMHq8GmKdeM9#o;`9TZU3)u zZ2eD165Ci|V6!T|D;V<`;^2-yWF!OU%!qdftSS;?NTD3wgCmMh=aSDjR9HHpaO+ZZ z+v0P~JSMJJ=har-VMJ%O0B~L?ym~VoQ6GvROsQ`4OBwM`JTK8HlhPmnb8Imh|3Np3 zN_@hh-Y|K=GERQz8>SUvDI9592N6z9PNEyW=f^zwyTW0hqxV2p=xX3CuxmEl0OM1i zyC>fB1Fwt8i77$RamQAN^9ohvbS$Hju!1I6^v&iBtej;~_Pl*hE_Gm?=jUJiap!*f zc9(^7I=K9vyYut?$I>GO@O&Q3lqD~Zobl#Mc;0yj?+!=1-WfiRlX+w=`JLx!m)dsj z&fNhZh0k&Kozgf97td$IeEW4S+}oG*&u{;p%B26Z{=8&52C(oEKkDD~2_54H(egcW zvPkil5x5d#D$71(x@hkDR>TpHIF~h3Llg>xA3yvR}nq=kwr1(!G zP8>TKZRNu`|9vvXOO=u*n6bKbYHA`bxOiV0fEO-Uc^ne=4B$WX_!Du@0}n^LyAfj( z#h99zj05|3#ok?G%3se1cE`W|m46xIGgn5>Kr7mf74h4m%1VboS84V|OL`J|(7{6o zqfwiWhaP^+{7;O;a)eF=aA-RPz6kzk>F@^XK{Uc73AjH0lM_Sn;+t-Wn_hM^iNsp% zOl+;Sz1mxdr>fj+Tt75=oRx8|>^*k`qzYA6sXmE_H{s+5pb}ikUr?<}CW>dWT8b5& z?o3y_L=y}$tXXgHIj6eQSr3hFTQFR#7?E#BL_ea`OMGF#^zcle*gFWeLQz&B@T|1V`8Wr1<~Xj=OhaTuWS|1 zAC=sgykLKP^y6vb1sm*vN7(lOdLLz^yuw4ujXZdV*D0)E7543)>H^&(P2MwY3vP5p zobO6RnD9gqcGCe@^9yIJ11&?Wi^BwJ>XK>lp??g@r^=3>H!HEXrOgs~k?0YD$| zaQQuV&?PMjwPSmiE%C`i`*+8v^zKuq&&0yQQjCpH#O}Q_@%R%@MqB9S7?f+Tz9O!B z;SElw(Qb=}3LoZ^s#g-$Kl%?ivqh&n#|nU}JHPgI*$0otSTW_fZ+ap|rBA{O>7HO9 z8F_cx4O9CNdVT*birFX(w8|8pT3RDuDT+=dGFnE{LE4m1wOKb zM$&39kwrSW{8Hy@RtC|@KuPUEACS3}0~^kwK`;;o9!mLhN8Ux*jE=7>Ikh<@#%dz! zCfQc0o{}Ava7R1rLhF|IOg6=y+KQI+X8KAhLlx1{SWNGkl)N32T&~2--pTlU+CE&`8d{WcTNDdAQ6$Q(2nZ>7R2wpT92QRJZN->5+GY=(B>j~#Rj)tTWCBVoe7kI4X$n=Gh+ zF8Qvuo7EwX>ZZ;&knKvrrOt5)tfVCy4Vx5NV)edC5l!&|>@Xi6AvaiJQb0syME0U+ zpoNWiE0W zJiP-igL{c5VA8PClZh`I(zWWIj9BH!4F)=xSOVOnAsK*O<+OA$$s4^7w6IdUT#y~j zyL7a6V>RYa&&KlHnP{vmiVoL(WFhgDnCek@6&_gJk`O4^M5hDrNHIF#@`4k=i9V6) zA;T}vjzZg0=3LO4-pya-OgDN?Z>eswjP#nq^X~7|El>PsZNn=90YVNR^VjsrQL{%o z>kLR5dBSqBb=`+2eSif(o13g;r4J|duTYN+d>N0z%f`CsP<)5Mf6g~%(Mwv~lk^eu zAJwlYU%A;s_l(!TpQNK89o?_5aH34AKD`4U^bJ0A<$Z*Se`iU_kq7hvlD#UOKZn;{ z-l>l#>%Y;JzA2o(Q6_W(bfuJTkNrrF3CHx0p(j{Jm7gcF>4Nr2hs74O6Z%@fKj~9; z2IG+pz%nv2>^?eWV?1vtV|E{O+f`WbNME-5uhibim%b75WT2wxpCN+jxD6k0K4Ut{^&QNjdElweF_u~n*%=2(mJh^*FA7#*B0twX@aYbYdER;DE`tD?OLM2qgTDGV(!bC*2?`S1P35 zXs>$DGJv*8f|Q+h_Js@jGXLUFas^$F543+)!;-#tZN`sp3NPYSJk^Q#8G5k$Dn$eI zSFp0VQ(Kk}e>&RI6V@9`vC*oD=eB)ptdE`&Es1=cPRbjaN$r~fO}A&+2e2|Gyy%1B zCU4u@$T7u(2gBb<2fpVWM+78x5B;!Agt9KA!lPf2Znx10CRW>4kdCsk+Kf)49UGDj z6ncDWGDdez$u1a_{w!J7Y{%m2N(`u8o;&<(EH5s`#Q1RRyX?|vt@Xs0zw%K0<;NaZ zTkDC55na=hUf=*PMW^ynQv%mVHzptgjjw+t`Wnr+{PIi17$vt9zB)QB5)xizdle+d(Yeht!7ufx zdNtiESK#m%3T;=si8c(aC3;JA12R^FQR=Zt`rhIO(1uf+;`c zKZF&XCyzueX<;$+%?r_KG~e#b4@56{`^n4P%hA+sV~Xc{YDc!A98@$YTymT(6D*i; zoa!&-vnBhSq!NW^Wys)@r!IT=5pVc{`7nGjE9*J~_)$2v;!Pd*lr`N+6U7HY(9K9NM)XZpO*B^uMi~VJQE5Ve?t%@~mca?Of}l#s z%i9!-3D_T470&0{`I$VGjL%_~8UB)VYN+;ok{JCfNHnmd+Br9B}7!H>^7;VCR@)Nj@@4GI^Q-3FY zU{=OfPWhPf(>LcygjAMXV3K4?mwXeganGL}oX-xIeagOXf8fpj>Dl4@Y|sA}PT^z_ zxSkvj*z#vN>h+xbbY}|_n zV-Vbv4MhKJ_hti>jyfq#Hth7hU0;pH^0LyR{AwQ(T1*;XRxa(gI8=^eX;=m=9UcRM zN!fwZ4JlNFM@LqBHZF}x0V4w##T=D^)eg+cA1IGRd3bO17I!HgMzP``kf{&iP$K`U zY`*hDT&2h?+I424H{$-gz7%7XLR@Qj8{k1qPy;v@_0a&Ddij}r1H{UP*PAn2L?*QXD3!x8&T7B@Zf>?xu1Do zyzP765*5|+%JOPFckHN?gt`okz9fW{+4?3cng(OJ)``VhGfvK}#Mi!lU;OD`ej+}3 z+oN&hfv?QHo*&$9Ybq!&d$b`j@N4C>5ClExQMs!!I?6FNG3gxvUUb8a@sgLmEWZ2IuZioff1%IJZnV_KI_+3p zT8Z|C+Aif$xj75AskV3Q^vU@9+is85xkkMFyI&GF-F#DAc;Tg@v1Ol&&GuCDb4yVz z7ezl^4_4GW8=_|>Hf(t%2fIDE15>zTAPoM@Iin5n5TCmzJYIgql{PewA3bKC;z1^i zQK4*23NIb6%%JX~d<$>zLz*edC;W!aeGFIXC#|4Jc`*@3B3ERIZXk&(`HInNJwoq3 zyHbj-ppcSZi?)>Z_R6Y^OW|Mi;hkp`4myKA;j-ib2SDx!4Ed;zc~=wufd+@Fo!o!x7*Fn(^ z^BYc_J{_ygRqGimXP%9B{OH>i4TD8#2@h=HNm}3reS<~;FZT?13TEe$=}K_t4jdS! z>^XPoDf{3%S04ZQE}ZUu)DOJ+oH)7L`TU(77(E*&M`{^AX+s6PGLn@+b@dPFj4#7-S$9{;u$JKCSx`TXkkxGl;gf);9hyTp4VlQ>;)<+gr}g&&J%` zta&}W)Dr$hX_f}2!ECiE@DhIk_#qR3-#!T>QH#GIRlPV(KaetrDm z|NNiPZmvt77o^L;d!+jeaK5c@LKBNIZ#7K@b`;(#PXvFTF8dbkmDm=5B4? zTMQzzNeqqye{g+eRh2fxq7IZc{KR#HL&oU_ysA&+Tr${oNiNauAs^DRT_V1XAM%LK z!wuD8;&7>+(Z^Vg$LcuFMP(8|v(xoaGII;HIJH;+Z$Oa0kK*t|Y?TJ$13&%qF|qFg z$FoC*s)7s;vNiLEPoirA6E@(2c-P{|lkuPa(;tde^~8+i-~Op_@3eOK=t2yNo(hs} z@V*NU?uoD6e}6nxmwX^CrO9Biam*h)jv{4wQTM=M8&9$VUTi$*d%_s{Y|=;8q|@Sn zS5@WjJlpnk;Pq(8K3kk!jAPH9h$F|IqiZ{=3X2rlHPRn@#;Y+TS}e=2SS?3&bS&1{ zfv7qfrIGRIpi?R@23!FY9S7SFxi~++7>_;tOnm(BzUhe#boMe)mF>$tX-suXJ*`RC zY&Oz%u)xVKOAJ^tf#&5m-w?n1fB$A2ya*a*((a=2k)|vBk)Lz-^YsExWqZx#*8&$|{VGY@(Qr^rZ-&td?|lVrh`{H%NHyC)QZGNt6sI?)dkhb+?o z92f=$m5VOnpjfKLT3dF;(t`8^C81Eg?6y=KwAM-6DX5IsU3pbZ&5Xv;<45B7snepx zb?HijmWlKk(2)+!m86HHyj5oTqYnyuwlO(FnhA!(!rPSU>#|EObsM2`ke!6|l;Bre zrTpN9)pCR4H#2*tFBu6^-1>;C>j^= zrR!Xm@MaW!M`!xfH(_33u-SAM?0a@i$GGTieqmm^{i=9kIri<@6OVuE$*4NI2i%n1cpSH07F9bPqUx>S2D%MLnl z@PepHE`ITgUsbw2k|Udeiwo{{}lCux+p4-PmHqoeDwP)RW@do)aI*7rOwT zGXlfheJO(QoZ5`up--IPb7DBcK}P+zJp!Rah<}m z+(vWkN#Lr~BW~qZFD{4Vh?7i5Q;=Qq_uh9RnZ?ptssA z!hjCtulDT$Po>Z+eE`~RwUw86%(uREO0=~Z7aZE{ej{j={uSC4&6b529qDd#3Nawb zAU`x~K0rMK6E?`2J8FEf^AM{RS+(y0PT7&Hg!M{h5jw{OFc^bA#3WO8fFUXD;C{&- z+B!!?al8s`6&zrL4vN2*%GD_AnSQKQj=EcGDletMu2Ri-Z{?S6R(0~e3u{6z@KoLZoTbN zqT6o!Eu#s4CwNw`kV5(l9JF^q3Lo5JmjebW;d=VM$z)x&k%mu+SBO{jxwA?2C_JUU zU%{fb+l#Jh{jaT7jBQLA&@#b0gW&Wik+I|jHxVhV09v5#(y>ZeD*;>w!YPz0%76Mm zpNS{(D_h&A@<%U2&LN55>4YWyJRK(R6W4us=om_ai2%lh@&zvFfm#G6qh3F@({)dF zw8{q`6e0U;cJcJYPt#-)cB)}FL$7qs>XdT(5*uE;gUc9=-(=X%Rvda6E_c~yX%B-`twKbXLZ$tB4=pX zk`hoD#e>ID9jZOux%ei0So|TIRBw_k=s#*6dRBOJj&?29>MOBXI~CpKQ?c1@h{s5O zRC>9dtj3Y*K{N=TalJ!N%G>?p%AZ|KRI%R8s|P*D-xN>Y$?k^!uzBHsq6PC--7;J; z_6Echk1@A($&XFh50NFq9&QLfA7Ay;srOxGf$t6zy8E+@rAE1NM>(Ebz)HV;E-i5^bLPOW+5{@_G1}? zU8qNukvdrgvkJj>XyzqrDioVKF#PForJv!iKH0w33eb+?C1F5cqbl-=y5xK?#>UNyF9%VSXdGax|{yM8MMQ@UX4HUNNG8Q%f^*^ zBw3RtR^*~}n)A%w&o1=l zL-vF4Ae}6I>zwizIkE%5Ti-9@*-ImHW@gS(02IkWo3L_x<9Qnh(iPD{Tqsdw0uj2g z>YeADapy`_7R~SO)KXZdr%T|Z_%NX?k5qUrm;(hodsk_8d`sbBH{MPQWayjk3YzZv zs>Vs$B#+=C2Kiao>6ts=!$im%c+_;93knLe#yxl>#+J{Hb?2hu_>3;%C=C8M!|yU= zkQ$iup2K9n^PDFi2%hu5=jr0~g@6>qZwZ$wqwqsna+h%E>QUDGK=dgN;ay*L2_L>3 zF2&28Vevh^tF0&X`AYcoFe=`Y2{zxG7T zlzU@hm~Fe2lay*!Bx4wrS=lcxYF-9(u5y%V+5kqZ;M6y_1z;9#lOaSAwqkgq9QCzY zREPWH2j2O*c;}D2HAc#V@!Ye|#gk7Tj@h|ckqHJzSxSHum4@)v?TN)&JKF05aqPrg zeBx7IiqC)k&bae|XJc94q0-zz1*}|RHfyoO3Ixswlwm=Ke0d=mTQ`@`jfV^wF_}L8CeZq4LLgr;`ZWrwJ!gd_?}9 zsYvI6cqei3$568IBdiQ~g>xrOrX$gVvNR6Kf^-v3;SV64Zx~DPw$wp|GS1dM z=J({4fA2}g7b~$*d@bjcCWa$|ocv7x7$wS@w9*a$)SAMn{!`hQz%k&N+8*?;l)Z)Y zP<1GVhlbq=QYouV`g=Kshlfp9Y;nsuw8fIzYM~@UgD^~j(Yp=Iey_}!o162sva({M z@4$gQ@lqKduX@ER<3%^#5Eou}SyYr)MRZ>rEXVlpXpB}zW27_`ho3zjfAN?9EAG7O zj_5Ww;+m^2kNx|0#njX;6{8T(9y=ZtW|t3AUMcVpis+V3Bg%qT_W1~A*Tt#;Z{J7tyD!tCj22{)Vx>5c?RYBEsMbNz}^OebeA(FrtzqAtEe z2ax@hzKtP$Gp#_t@~0!0@bC^#Hc>p?^v;j+Fb`5t2wNYl)99Ttu>vTS8jtK(c|vQ% z_4d3G@q2UO3Ahvite&`TWm1F!b%=^7PZ~tjj&lzn>3( z?w-$IUo!k8_npcM-qQUacjt%M$$@ve^Wo0!wzzjXV9Vh3xS_L2ao|y`c=Ptf`sj|?Rz29VoLS~u%vNd3 z(CjS7%F=wy?4F8OzVg*^!;LSFtFON@F1zy5*u8gGjE#@Q{zLm>M$aR99vZ7U|F-BD zB8K0MkBr6i-sza!H60gSa&f%u6}QB9z4p6fYTs_5u{R!hn+pW4Yxy!{<-iTAzleevEOdr!RLt#6Iq3P*#?MyEC_UeOSLEU8A+c7$gS z=3x-49kK%v_#3N?EALCdrcr!lBRLmz)ybzALQi)b*z~ldIb~ zX=LA&HgeKq-U^U+Xwq?=;vmQ9;Kfhjr02MEKxwgkYD4ZjBxcKsm;75a*^T^Nb z9yy0y1roSalT;Nps_@jq#6HPrc^f~%zMeC^RCRz1^wC+0&k7*>68-ZT@D zNsrD@cU((u3xF6Ntynjp9kI*LTAP)m>)y4B&VA(bpJ9-ti@}Luh#$%mZIy=^V znrt@^Bg4{2_1-+K7b`24N6#t^I5de&pl^jM5Z-(V3# zS{WjL0cD4;#% z&m4JDa*=`3-e{@*%pb`{`7uBWEdnbx9DLNP!c|OcT{^IBn;Um2AMUx_#?e-3+u#;n z&ihs`M+~qUn?YOQNabUdFX8gRqM?zh$~B~PlJ3lIW1Wq5jEs$V_nv}m%%6Dg55zzF z*Z(Xoyzb@EQ$H?4xE9N^CxjQ-q&E66bNB42QWo#`F)lqOW(lHHCy z!S=QA<6>#hM>zHMb>f-l&Uo?0{(V!TiK-_BdZL&B2X1F5hfPL1N%o9jfw$`J{yT^O z2mR`qegckyqpbA7azr7=w@@zYJZc&YrnvzK*KM^0j@n~l{9yl};x=PNc1}$^oc^L# zM`6X|S!_WYuc%E@4yh4p2k?L8A-b$qRBpAeR;?9FYP*ZNy0Qxyoa-pR!l3vBwk2he zPS&F`fFs*$YHtj(B=}Sw`B8jG&3&1w58Hl}Q-0j759t{cxlZuUBc0s;q>T>4RV z@b{Fb-f0I$KA+*O;8*-WJPV1#;JpeZ)Ujf;rDxG8ygIYp-H!(?&@P$4A^F=QezC54 z*+Lg}8U$6EfSu91`9A)D;Oy1*Raw~bd`YF(GYpTg2}C)*l2D>o9PxTs0GA4<^x(C~ z-}F7DrC*d{`Ju1SKSW=o(@;f&-UNu?ReZiDnq(4G(q|KXGC87uR?c=*c7houIX)qW zphu2=xxPiKoQ;p1@Ua)0tnybIl`Ukwntb(6Sj$3%$CiRG z@{GJ=r!o44C?b3s9?uRu4E9w=OHovL&{e4eCeii^ufT$g0dHwX4&~v}@I9wF%8mV| zZ_tgn6ZOYqzB3D#5u$VqpTe@T8z1`M-Oqs@o?Q&~Q%}e%&S7UD+;~w0CUYQ5TxR49 zb)VI#EExiqxSDWET_w4Hw$Dy}1agw%bHWig%<0Hsi0^VH=Qz4> zc^dgj>)hFoG-*l(jOTREw;~ArI#dpQRKSDyWx@uNI!?`+Zi?mi-IgTv}L)-nDM*yXb-#p4lH?yX(pL@c+Iu9zMJg6H^h@5o|u% z634x*M_Y1<1wf26DLY>I~!rSEx%1Jz*~kOq&+ zU;G6A2;x8;d<9ti$33H+vu-Di;qve2`{(Zb__=$&@c8eHqvsv3)L~BN+unZAkNi#J zcHE&8%865)Z_83)2=6QV3?0#WsU#-8_p;zn_=3m?ft;hQ_zep~H6Wbwwd)DjqGJmr z)vxMS7NYCg{4r0$Y0=6ur`LpMUAnt`JmROzMRH(xv_A2S-+7K?fhnON3j|-lk9VU2 zo~+A{WK6r_A}dO+OLrH6BLIH!eQ$riBJ6cUg|ZMw>BQi0iX%HCGdIOZ zfF&QWu(oRW5=`D!(dE#q(a&>&g`cg>qj-rYb52mK|#bl2s2#3#Lje?Rj|`pPT$Q-8Yr&Ruknm6LijO!7w%B@C4qkI5AD&`^&}6JX9nu>3)=6{ z>C{Tw&3T|-ZNAZ}dq5u=p~KVLYbjsOOk9;=*6ys!;A{rx4Axg0KJS-w$h5#La0xz+ zL+I(1ky;%WY${8CN$F*b6--LdX4I*87zG$Av}LjH;*bp7F$qIDX(wZYvrp+LqFf5+ zSy3*b(s27-*Vl|jdb+~za;()CnriI|JQ%_*8jVP7!YWCfE1&B zLxKQ>o=!EX(Sk#1ijIU%*1iDciB-zOv+rfJC!-|| zsC&>>b=4GyC@y6}S!-*`2s#m~f+tp#XJ4e!fQkM?bPXNKOX>IyRt{VJS6Puhl^R0{CU4}h7SzcmfF{!rcp!Y!W8H%`#%az{X>^%#H*y07jRp5(0c|XpP@&%-=Y7I&f!5aDO>22 z`Y&8UuLP5hILacU8cy1ldarISN432rI=b3CXvZlZ?bV+6*az;2p1vOUom-Mc805;q znfP0!qF?J=!Z*e~N^JH&6h&S9SioI87Y^F?<)(qq{NOM_mp7M(UvW8j*Sj5e$|Uaf z*V{Ol*+pggl*y#$P5_VKkq5AhUvZHplgSlv?Vs?Ay7Z26j342R$OR3`r-78+F4MjB zdb_wgjy#>8`+nVBCXTdnhJ$-?*_Cn1Fy7w|cU**VH4+!`cU)N=*AJb<9!1;wS@{%x zJlo*V-Qjw+A*!T$a~@12ag;goeS3=?ezcI{SjXi!Ve!ARz~F_)(0{wyt1)om#uz$v zBKDlPAr9PpGH$r@);N9Qc$~WB=D79VJL8>q-V*OPeL4o!@37;D39rGyzPSIP2jWAY z_?dXm2i_ZZzU$%Gd-6tE+Iym@vKifyoj?l?kQ028=idA8jSqhC1M&W!`7`m@$3GJP z#sBbsi#s3qsCTWmc(1G4PLfj3i{@BzKim(V}%8`%>dc=VAtb?0fNk(^qb@&K&F zOO{JBfN0=QUc#vJOI?$`MmJyS&)ui#2$qyRq`*(%#WVUTpV9FmBDJGWC17At{g)zH zcBVXLf-v<+f5%t%`TDZ-kIlF;wHRa5E73bV9EXn`k5B%UzoD{`$1LE4z9~OXh=6x3 z;((Dl0krPei8JbVp8eL-@${3=1=t?$tHkX$orueq$KvedV)Uw?@2s?0S7m~3R&<55 z+D0d~{l{bmbqD9VE9=B9xS&kk2}QW_QpgDGRLc+Hk+?+Xzq)PP9ulu^Z%_!>#=Y^+ z@M)GM%^}*w3rNu6?v^;TcQA(42X=QzzY)E+OE+p#zuR}y{Jhtqi^6?+;ztx*$1of@+&XP_7NS(7BOw>VwWuR&WS`G$kW9l zB~DplB9#fFZt0e@vvcwO_dXhj_8*P0vCB#eysDSliKYTq?DFGN2TV5g_4mZNb64X} zo_#4;0z|*fZd(TO;7>$mLZD0kkbIy8Jm9x0G|SONSQ+}$oZCPb=tnm23{SCp3X&ce z=!pX(dt-Kf-UCf6h-9+){JG1vqxuDV@6cfU+^2rQiEGQUT|`f<2vo}w)urgp%hMp* zpa~VsF456Tm*Y!c_BvOGdP|V5>kI%13wfW!Fh?d9afHLpaKQ z1qXF7*eebaZMaWyUoKqhKUeBGdJuQWlwQ=G>W^s_tw zE_p(SNJ1uQm`G`r4*jDazZ&PHlcJxuR%BN*F{`%EL0IKLk;;U;kd6J_vc;6&yQI&) z`=NW|)mL7L#l?Bqd9u$|YPOjun+Z}D<#zQ}l?}Yy9CzP$SA6~P$K&cObSQmU_=aEk zR0oTlIMsnuaB8bc=Y`_Jq5M0gM{}e(_+l|R`X}&}V_+QF&*TH}GET!cm%$Gwr18#9 zHtLjpHyzrB?NZ=mAwYel9?R1G=^OYoAh1y9)>w-hj}6D4{nUr!zx{vw{WyB_y<&4o z!S$I~t4+nq(qwdrpX$pC!s~|0Wx+d>QJl;taFjogpmWkDj2nSZ`iE@G-B&?7a&Fda za5)|b&oDa+9X}Lz!pl2;<%QGTkGtM|f>>Fuu4}@7k`sL?3bf9tGJxY9_v#zbA+RGY zOGVr2Nn&woJl1Dw@y55FiGTkOemjngY{%Z7iu*9=-Mit@J(LG+;y$NEGK_a&lRMp; z<*vdSi_!V9@(dgZzx0EA@7xoeN}2W%GFZ8oZfVP`Te;we@mhGboVb&f=mg6DECK+s26opf7 zFLY5^EO0=Wv1wpKHVl0;GKVn?(+#?b5<>TDlHGh08MylfhlLwuCcR}@^y)F9rCQW# zHQR(Nibub}u`q|-kz6c_2Va~hpl-zzSLiJ(3v7~^h`EW;SezV>mBpFd2qENHR>2Rd z0lEih-=?oty|5wSA=h2)r9b0@x*BlbTmhc^R|x(h7#MjkFMAqBRlj)8%OU#b?DEYi z_y=DBn8zT5Pd<~=MtUzCT!|IoML=72w25E6SVgqTWMmU~f_8+*EfyQGuuJJ$RVPpU zyDZTIV}1ao=q}Gaoajrz<+s3rj5s-w3Oy*D-wk9s1^M(KU7}Wbh-VbWI42GsB2&_M zp#gb2Ojkw`FQ|1C4~d_P5=74mv%Wxj3e&r7UZpo5C<0?ez~s+w>f-ZPoGym#?Qgp) zDg1z(L_eYb-(peR zTo+v|Fh%F_LPhnb=t!H`^!_BvR`m5$ZFj8mSqAu40JIllY2Xmtv`ClpX98(2 zy?e3-oXdG~QEdst;DU72pHDebE{=tpjH#7lbfBB`IhJ8D6P31#$1%xcdlQ>4$Iajm z5GON1ct{zw)7aeA%1Q%VB}}<70zc@7_<~zO7Io-|W%;C4;r_`l(+~&ROF8L0NXK1r zET4DELUtJ^WUh3PE#s@Wl$(Z!Q+d?ht!<(mHaM-gucV&^u5vLk zV|VoF#9=Yqt1rJQ9$b#o?|wAarsmaFEZ`!4C3;Z+WTiQ})<+HFV^l`!S#J1zRdUlk4N7Gja9mlyvKQ^mg^e@taS@fNc5S|KgMJ z&p!LA`k)A@YwDxA)Rze_sxR_~c@K;l>>y56vS<_j$k(=)F6IT8`vb`;ni{%3X-9}I zq!2VIJtyO#TeA>NIl4{I%{SK6FR4x}xMp65#j2K)lJoWDjX3khR9rd#M!ffQV1{ zbRQ>N`Lh@8=-$`~EEr5&tqQkw(G}w;*!#$L=AmeF$QRq&u#mUzhOd;Dj-L0@B_9{A zc=Qc$^Z*ZKe)0mwT`tqojzlV4BXiYE{^&7eVhTh_lJm3vNTsx+Eq~`TpZJWP4=Q>_ z5QQ=c=wo@vgo`i=Uk7u+RuAQa-HAx38-j0qp?1WDmf<2zfGL!Kqou4iS-> zL|NA&ADE-?-x;XS0#Oct0^$J&_RCw5HeWFA05Usi;f~}*SY|ECs+fh4Pv(7{?@WxN<2JgREUn;<^KuONEp4zNJ$g4ZGd-cjBr9I$!cL zyhXrIbhHmJQD&Cc$j|tcD`?D*hz9`VLmu2kgOmavu=~HIi6hlJ zVr@$b9LK9maPUpNN=G$%szWg_crf|~`lFlON21g2-hL^MgVEQ`?zVQ(9mjs>@sA5j zi*f1VXuSUFoAL6GUyUFC_?37?O3K+c&&C^P&cyk17h+~+F6QSKyh0mZ!&qeDijQJb z8-sH|hJN7}=ph%jWGz{|BeGAW+jYo#dJH^lEO5Mbb9j*|&UtE31@luPQK6R0W| z-Vxt0pjTh1#iA67C3V28reo4@Yh8J(E%{bF#(@=n(vb;hwNS$_1txWnt(9nQXM%Mw z+BgBAbx`@BJgNiM59No19*C#?FrG}Ho3XO29)3%mTZnF@?(Jf?H?War&BsMCP=nr1{=f(w*$9`QMFCeqrghq!N#=Fcdw7WB^o3{n z9X);|KJu~m$EW`6hhlDgA}(IJ9Mem4(J6zDZ?IR|t1&yXASGlq)ArBf3xogcDXDSD1*F@iH*fABPVeilax4#K8mmV?g;c zDAg|h#9&0BVekr>mM1Y_42T|OtmHF>1{2p@N#~vz*PY3K(y`OT#|5H9qO7Qm81P`% zm1Ubzq<^}IgF?r|C4CKLF!@B9axfF)pNVByw2g9wZ18ENsE{$1C6XT~X3*iba1Wg! z*IgcPDhxPrmbWG!tvjzU5@R=e>`_M?UmeJoK&y;^e8*K8XN2uXc8* zUYqgi**D`4{`gN~WOyjdi4#eGe-(?z}6|b{<49ual!Ur5h3{I)Om2~kn@ARF0R%N@T zQV);FSjS)dit)vF+4%ND!145!Fj?!mAi6}!f3GLA*LE!1KuW^%@ zM#=%Q>~?7R(&!M=2 zXxq1pxO1mUR>p8eq@sR1?}bZ5?Q-RN8UIgl;0EWqrvq<0>5K42IoFq0rn~N5#HUVW zy>OuU?OYjecigvkKRtXWeF1+1?gF20NnUzffB#92{C+xaOBgI`uFKCf2v6zm#k|Nz z_)s*R^qKrf(vn^?u4qef=%c;xSA2jBV=|NhU{;j#J=Nq5(W2yd3-RjeOC`5L@@%`d z^(ON<;p6n(=?-b?muPRCk(EMj#bRu-pxq#*q= z3#^c98w`d@?&@Fiq}s`KL`RVv;+y%k)q0%1?bi79-}rUQH3nN*q*U@2JXh{U0dMFL zOr&}Nk?=R%-y4tp%=_cS=@W{)6>AHVf`gUe3RD}AlH&~Rz_IcKFK*w$KW!SEWS0Cq zn4>p0vD;4Zj7tINI?(5;PLi)4BrN(D27PHSMRxllgP{|W6V9T(T757c*w z^l_2Ro$BlN9Ela_-6EN!3DF3^xj@y7iF`5$4atVu@G%6&EpVFY8DCsnc)~ zp88*MuG$dNlun4v-rDehy(%w{hnwry0*5%RB)g3>2<#jIr;HZ5DMX`^e zed^>%y`mV(!wwx_LPjus^n>q-fvYIl71xo?ZST4Pmx2!p}o%O|j~F{mm1%klr2sUftK^~r~bqb?8ILa4>L%~=QIX-x@Eg{#6Icz2lnrY zJMKCa|Ks2OoAHUi{@3EjE%yqRMRl8JV*JXPSePDHotbQF@@aj1T9ApRo%TQ%R$GW3 zv0Z4}w5b$D|B}{AM~wlT=S)w3vEz6y@-1E2FE;Wa58Uo_xFTH%S0kS?e&HJ7Kg}g? zQVHr{U0S%tCgnKYX0`LS@Vd5$KC%?c(@QaX?o#~b|M@S2(_)V9?}-leWsGgnj`Wk* zv@$$$0t6iiItMh$(6diY(H&BUrqCnSC7YOB;xkDeBLEKM2{==~X1+Gf>k=(;Cod-R z6v42wsD#QO=hkHp_@qAgTlO3$g)y0koTSdSxyeg(ip|gIa!L?GL!2I|`Y10J_}8U> z@yQC)r1JB^1p)389wpxyyFngftFsG}egQgR+yj^hW1xI-anW`xG~)QcD$tBKy|YVm zYHG@-M)q{~`m=c5)n3sqxCK|+QYV*UNp!iou_F91W>mM%!IA!))<8Vyv9p5(b)C`8 zDSNV6yY&o~R@8NL>7vK;1=E} z1DEv9gDy8@I{ZtTa=03vJ|yFD-wDBGF6bgV{_vkeTGdVzhhC(socv!AJ~!4RC&c4? zUmIIDKXD^jB7FFi1IdytPHzI|=z*dqkI9m=vzH!6zGSJIZ8evAT?q$qv7n(X`Z>^| z3aGAN!u6M5Szp@>TPa(4Pn19lde@(ISmDn4iyZlqfwH#Ce1vC^B)HI3H{e4d0Q;H+ zmh6Zpx%(#7w^{v~Z5RBMvPA-%jZLJyW^9A&jlV|I(T$iq*g2-FBiRRQMvR7zd%3`wzJ9ZgM;rsTUjK}VYHS@sIitHks z60rEN+EKOMj*XQQ;?zgD)I0qOas>Xs-tdAH)y3EY#~_$~Og^-~KT|`0>9&Gi6&KXI z&Wx=%rR$H#fX3iK^35?Yl_wd4ANl110JXIEfdx$DMSGMy#=Yc)(zbA`x*e6L?wqs) zz}eT+PY{;yIK9fwF+AiMRU(gs(Qr1(*x@_iEW^w99OK)mU+K!{vhL*Je9QD@T+%BJ zd88eTp1|0VaO!wDj>(CW<~4;^JGx@uzI}1-(wp(}vro&8eYe^`^rHB=0mOew3J%qt zzzHM32SvI3r&f+9zp@TlCJzeDKwfsavaY)Hm{A0?^a=Bo>S8~Y-3Uhq;Q?FTQ`%RhAu7I%sNXj9<} zy2G}nA4nTm{ocy5H%@XA4D%3OqUEl>j_BPxWZ2fF3(n5Y$HLs4$1UMsKC2>;8Li55 z_=aQR)s6VA|NMA-;Va|8-kae)t+D|-6c5{Ght6BYYw|Jdx$s1Fq?z@Dn&o3ykyFrB z(uR1A)S`VBbu!89dP*0hZr%XJU9vd(hb97Kn+QF8Cw~a5iIq(b5f;|^d>8`dY`R}a`Xm>km^>*%hf7i0Oc84V| zztttLJG$L2dg0dKgUpk9M$kGupJ_^Ku`eSB(Msj4tmaXl$XrLg}_J{5*s;DoJ?Wi zju7ElZROR{W-Jt4WvhQy?x=_Bud6vb7BBm-V@qa1sZCee}4 zdMVR27Gmg~Iw9;izR5+D|MMnCe#BQeaMHE};++v(N5_|D41qV2%0W!Vy*wGzVq@#dK~ zQNg2-qcR$^OWRrNTst&@pGz4M(h2KE*g7h--< z9juH#zUjNH&Z)l4j#ibWpV&jj+lo4G$qVJVCSl64aU7+yx=fiEi0qQ?Hm)`|g+K8S zoftgDO7>0NIXYLwq#X3T1x}&MuIT9A6CFJVh5BCk%P&x76$@fiLwKc@6$rJIWeNu9 zf)uEw*r-p(i zwFrKd)yz&Bp{}|ji#Ak%l#Q)zl`Y(IBr=0}ZNeo3UMv7$6>%zSIRWog#o}ou zgt-XwK%8WtSH*!O;hCN68008$G!=slC~Y}-FMOi(iJsuA4H@Jv~s(mu#unA zU?{*pP9t7mFoT5yeil3p8dcsHV#s6n@vmZr_qqK!_iwqZELTU?RyD4LY9eB#83c;H?4#Y6Yq8=v^ZM^tuGym|g?Oe)=cO~&ZbQvAUezY-^J zxGDbI|LU))ojS#<8*xT*go*avo_>q7wKdTLilmI*jrDC${393JT07#vzWs6X)Xi3+ zF`D@<68yO)q;IaP{b+DAKq^P>7llVy55kr7vtNEzg1$2iRoW&O=(*=}`>zXE$R+PI zk&!|#xFMM`(AyKk1B0=)T$AFz>7A7H{cWPf#wj%PrRH-vmEks`pD}L=6!q)g4GFje zi9Xy_Rq>kau?pW&Sqm)oFUpaPf2HJ>gkQ(+jSLoIuoa!pekS3 zm7Po+zuUsX7s=dEKLe~yl08b~vRK4Nnr-PBO>0W8GMbsmk^idX14eLDrQOG$GJr#0 z=RpJHyy|GNu66@&+Z*fyZjIwdZir{U`CObichU05<>;?n{h;NDyx2j29H)<9p_FxP z{p}qYijRNvu{e6-xY9_knV%GR8>%z)X8;YH=uhP-Cs9PUSwn|~Mv5{lt4N1Gm)`xT z_D!k5b8se98XP07hsuumj!|`)} z{x8Lmlefs1DCjDD3sdbNf*_xSMVIcLAku@6*jBmIlVkB~|NdXciu$j<&gST^c6&#~ zn`bUWcV9JFB+}hkjhgz0OSAK$+E(LkUFDQqPWDEDUdWz7!rU2{fhKRRT@F2>OZ%zBoLe8U;LlH8fQl5qepeEc8RWx`JTh-1@(w7+sYb*#-p5T>4K}m8EuX&G|ysD>2LjmgR%eMq4@YGKjnND zmX;-Ag;(?lcuH-sAzA|tA7!h&8GnEmz~lJnmH5&h{-JkDA*+w5^Iu#9los7hZq>i;scErn;9Tvv)tH)`bpOMSdM4VbC!e11B%5GD{hvc>g(UI2J;7rJ_s!i4HNblrW^Odzl$x`t!3%+FoaLU+$gL~uH4F}>A zKlkDI#lQ7G#Xax&aP;okpY5y|CZi%nhYqRX{~g=khAtxe3tfBZlGVbmu#Vo2&vx9klj2-nm$ zOxiOE0<3F^*_y@GTCAkl*>ZwIvN%Jg>A|Jv%X>Yy+mze=N z2s(;+RqxjSC{OvQCUo&`GWr4X@h2UW7c!X1lC@Pa%vP&u>4Iber`H|WyHBcCCEB{W zQ-4yvb?H}X1?9mwhGg~{cI&e3GU$_|(dlz7D=1L)@j8i3mC;ePyl|M z2!TJkER)GBG+eGP2v=$g>6YGsuDZ+MRr$h)KiLMH>f#E?(S&YBmG2#`B&5>zR6fp&a@D7(1Id$yoVny6_0}`NDm&YX`ariu z9t4sEd13zMVgZH=wf`o&ixr=-3oqzbdSgfFvrkhzvZD_Av0E8`_yl>&SOMv|sHexd z9EUL`$vDQH)VE+uquW4cob=M*C*{F78g1aQO5MI=RqAw&ar@&GV%FRhsCi#_Lkr$`5V&CNnE=?ZKBd!`k zSJ8&(fua-qbCE^LG2=w_Wg`a-3QXn!HH}YsQrppf*@r>{shiW@8JBRzbp<~s_^1rW zgMM5W!VA8U^X5L_rfl#)`RM$5R(4KDxMJuMPW8^*4Ss~d%bq-T6!@63q%SMZ`i69O z!A!e>Q_{lgjCHbj2|5NI`sLg-)g+m-7B9c@nkSoYeg6kzeQqgQSzJQ>k+q^5@5&aB z0B_Qj9Q9LPxky(q>%SLF_?58oPJGGzf=4m{2kndR^h8-XS)O>zIJ#WT#36TVNkNo( z32%6_;7|{=A}I0)Bx*5ECyCMvc(IL zL=%=@{1tUiI1@(7OFELj`!wuiXa{&5BV~c>t~;9og-6;7y&u|Rycim@9U+CQzOblk zA?*ys?dwoKGTal@p%LR_?8>;uQderL(bg$@psP*1S&NCQvoSC-7<&#LjnRqm_|@P1 zYJB7QdhFQ~(Kno9-IY$tLmr@WrA>lN^P)F5tKgO#Anz&}+4 zccwo?nd;&bPpPf~w_DH|l&Mi6oGBf$9y`W`DGmz+SODEAeq^EPi{F1U4(uC<wu$=@G})tq-!qx2)q6w&GnAHCuwa* zKTAiOrhn;ImZf)l75O=R8Ass`CSLL0xEJ}A$&2!K;_f)|vwuOu<{{oqZ|kP#ZRSJO z2Xhht<0-kQG2?jrNA1j~WLOA*4Ykfm4G4JWP1)d-{1XEEx-dhkJHAB-{=1yt%**NW zJ@52whQCOYJg+dl+TVRK9y|Cg|bKQGEa>GVK>s%$SI z)DtYayHhpO>F&1SS!Ek;@I|H>H|b7gDbGg!cD!V~bV&~hW09X9-ztz2T>dpOvny#b zU2*r3a;2Y~qBy`K=Y2XEw!%_RDkro;M?cLI$SzR_P zqiZMz_a2Dup~Di`9kDdC7z>j#QJ3;Kr)R$Nc4Yjul>+EE(1jU+Ng6+}M(_`Zp)Uv{zcwS*E zV{;87&hO;ABK~25Z_~$*uSg-qFlF%prva2Z(LDgKyC;F$F%~3KlsPNuP_n4wmOAB@ zj=re&9TXY{)gHq0Hp(+?L&J%d+}3i)2l4l+6mwQUuGPk3>dM)W>L;b3E;u*iAN=Eg z6%)0Hecf%U3-V4%fD|&0*x_6H91XJ}*xiAe&!r$rAwhtl_@`nS(NS%WzCjtQU9Hj8 z-4_Rr55<4=>Aw&Uy!Y-{mtr+GF%fk|WYDF(wIe1bXJs&~$K29ttZ#S3>u+3*fBA1d zAHVs9XML2ehm}-}K#GdmX|T6Dx}*rSHS3b`(AU==Cr;cDHyl3}hYuc#kv&7vj`AS* zF(f_EC)glUA9p2Li_V`$g7973J+Q9&vih1Z=$y$O6l8yoOrgj<+G0cWMR-91?2LGETD&eVYTK6X{_V=+BBE&iC7vbQYQz$f&N0k-Ac6R*B{CSLmSOYwsjz8kN-_C`#MT~^*p z7WeOZ;J*0qV;_lQHy)3H{s9m8bhJ0eGtYe|e((4HAl~<99*TWK$3#+NPwu09$%{K_{U{I94ZI)+@mQ9CCgrO~A6re|r#7Z!l2AG_ zk5eIX@B<}YcR7@N;-&$Bz9J)0#xe9x--ls~u{PA#6IZ6k;+KB@6H)0OiS61|l?fh1 zvyx*N_AD4M(v-sRHJm+*IteOJ0btiT`vHewE%V4&S^x5F=brh3*K1uFzi@8}|H%%V z?WDgxf8g5fu7@Y@+3%NrxSePFm)YvMymlPOxgOr#;kfJHuXU;P^Kc#9jbx>N>9PB+ zxWGf&^4jhEPJZ0WtC6u@d8YncC5;!8I7PgjdaJg`Q;H*>g6FJ*IUQjNglEZ9ei*>e zE~ink5C>joM=<=$@uTn-&nbSM>+=Oq)WhRh7y@4nYAswWLtgL_G(T2{akD;F2uU@9tvjnfd|ON7vv+ZmfR9- z*40^*itfOGnfe`N%yHJ>t*w%Sew9j(^e4qrJ%>j|;x~W$bK+f2wP7Mix`$xlBpN1T z#kz@OwTTztD1Z2IWKUl__SgsF=#57Nwq*R`v|wAyK2bHv=E!t3i~AWe=Ed!&wzFKK z9VkS8q%24&`zqnGfk*lP)lc}RhodcN7qUc#+Tc_*OIb_4>yqQ>g7nFY>VuY+*5l&X zOw89eVrbui_}Hg@AqMv!c6rEN`YZZrPyWjpUegTl;=#lTGHG3X&S?D0fBr9+>y4`L zdRY4G-hrVwFFli;kl>cSkiq&H>7r__P|B>Hz^A>Pr(6NLU{y&){Xvt1`Bz+yC%Q*} zHB7!BUuc3b5*U@v5FX*QBH$n{3kkuEX#p72Si?OnacHn3_73$%uXHyi5&3p~=kVTW z+jlJLEK)^oqd!V-L&u@#866vs(b1Xs7yt6Nqb5CIZ}A=c#Tq+1DT|$DZLvrC7@u6) zw{K6}dCN`ltN-!eMn~UKK~obS&PzXAjuyq^cw|qs3RWuWohI}ZMpE*ydD*&{?xJJ# za`bEK7xks6EiT5Ak3*K;KQSd;V>V`IW~FCL_}Kh$_1_mRUWy;R_`~?d6Hmvp&weMq z^{sEm(@%XXo_*$9@q+xZHC~f`@y6>cI=tvl49(9i#Ntv-*OF|EHRV06K6xSP;^VdY zig1!Jx{X8+e4Cuf1SSnC;wzD$_=?XqN&ll?UtjV<;rBfJo;Y~uKwLiergZD&SYjqs zbjT+Y=>KUqc4VWM!Q1P|21Kvgp{2PJ58i#J+Z=nA<9ogE67-0OkPl7jXX!V=>($Y* zcsWM#||Boez@k7-KY?!P%PEJM?HF_-yc49G(Ph2Pm(g`)egv3%MJaK<`ei5qEc40 zscw_wWAVpd{6cI{e>u}CTPUu-!~T9KzNA0MmCs1b67_DsE2e645i zJhY>N=rr;pIh;YCf)EsPdA6@~Cfbwy$e$g`8L;W@he^c0F5v?$#+zZco3}Om6KjUwrmHLcS!tr!~Xs8@VoAbnMvu| z>_(Q1;!|nJ3qCyoUjZjN9g|2s)n4JU#gqA*%5?Yr4~R!zjoG=G=&DwQZx%Gu-)u*B zwKIl>hlD3&hRRhQE9#>-IvRVeM{VmBWP)ixc82XGRgIIy&*Ajc9VlLXz+b zM?Lk9m&1AH0k=C|;XK~7iZ*|F|x(n~7CEc+Zu}KNd z(@&dX?&?^~k59%^U;U$a=^N)_FW&-2h6osTsn;cg$Vokp`!mK5vtQF)*@DHqR1S2n zaCJ{kLwy)8V#hc9=0*1CK57%{EMJjJwgc_K?lUg{7VqE(47Rr@uZTx}<|SmVg(Ll_ z!YS8pkVQ4KeE%a&e{F1f_10}G3`KUr80-;g(aws&w)x(O4kO|dwwK2iPO)Y!#X zoVXP0oQw(FbOPv`(sP@EnQ!TXJK+%V#nJYbTm?;oOY}hNpM??w{^1&2%C&3<(>UqC zA~KEWe#`M&MOC`gXSW4^3*Q!2|F^MHQ=6yEV4(&lNmyZ0N_PEQXW%3R;a3W@@9j0wRt zKBY-qbS(AzjHLlHN4cYe(I4gbkW*PUW7C*#lxON$*f@+wrQQo3+Bz$i!HiFKcXzo@ zVJw72A&iGI9^+4fvS>!~6VRbe=!BCCU}J0_=#NthnK&&!XR*Sj34!Cb7hEi|^(O|@ zCoq8ypI9F0qRx~9OiU~@#>(j~;0Allr+p|hYDav3rcJrJ-_{R(7@Sj2>dicc8ch5I z&9az)1<<5Lp6~=MsuIvQbfSzso+U5xf{(dy=*&JHQuR0K2v#2>Z~XwNK-u6Pl7)xi zg=`1P;mUMLo33L)r#nvmD%-T+w;~6pG16o{!kaGM>EF0Ax6I$~x^saSPMsnR zb)amISwo|`L(hB~gtYXFq|Gt%<`^6pjM@5py!pa+0Q?4Dz1T{Aoi_tFHs)%l6(g{@2k4i2Vb>xY2 z>Kjy7AuP+X%ldetxonpr1o4`4go-768vlYR)2cE!(K5Iqu6NNFKWL4<559s9H`M>2 zoAEgY+MC6_ZQ`@u{{E;)Kf*p+uWiI4^C%1R{#+|`xvsQ~yI#IDC7rP|`u81Rr)1BWjOUM;uQ0AJK^wFQJ5X|qT<*3 zH_{h*nz*@@_ktXD9Jqm&oQQ~7q;ue>7x7F6;&#fiQ+z@gGf3jnpBe-dxZR3mELOsi zAB*$owJ3^(@1FBg`*5<@(xPl_^aSy}+fMisrcIkJi*m?H1%PMgs(<8_;oNpA*^moz z<{w?IYw7gpb}C#Mzl;n13a@l-&mx{K>tce#SO6a3Ku@DBf9tP&=rdc}8~y|UKL`Xt zh{3LI!OEH^+?=}%Au$$%@+fR5Kbz8Y8{xFG{()2m&`^g#GJXVm6{W<8y|(Gd)lnFE zlqK}FGHItW8^ox*l^00Q4uE^^(B$r`WkU?7s7en$bH}gp!Z39ws5BDdG!K(B#)IHu zr*n&lmp2<>j8^DQN0|#8a@8k;Z%k5csy-+ObfCO%$k6km03TUSM@`{nnbbmX2q0jh zI@viv0hFb3eeupEoKd#!7~`cS;VE>+=VHcz+dSQ-n1ck zK=j}t2~&T-?|0X*2&XJy=Hi(;aEB>qC&dMyOo%y8>IBRRYPifVaZ}#2Ug-#@eU-Nx zNM)&Rtmvl$a#^BlFp>g;i$6Ln;fRja@pLIU6Ewz!+K+eY1r4!6oa(?^JgjfGO1bQg zk$uOaci-`-_6|$I;%LC!E#PDC>2L%Wt2Ezy{an2I+UxP=8yDiWGiUVtYK)Kb?QJIJ z*TkO)cPWGFNEa8Ee5_jTCZ6|h{{?jh7(?v-U0Pa*sma+G8@(E5&s>P}7cRy0%$Nsq zFqTks`Of&}`nJ++M!mKcwWS=yL;-K+3$GH2d>hNVndwOMql2k*bgA9p%Urn&k9$?5 zaN-@$Qk+;tj>5`!3^^r$)v+9@&#JbiT1~JDM%4jB4kONO+pczQ6V)qEaiw4t{xDue ze`~^*^fbwhszR#KH+X}}la(&I0b1yvJisRoBLR}yhHu${Ed_4!{OhqPT=IR|wdJk& z-9P+F{N6V&M8DXqqmpgU=0ZswckCVQQVt~eS^3q>_mW##y{gs~4WjfduP{i=pjx{p zj4>bvdI#cDKllFlv!DD}j0{)RM)Pt0(iy>C3wFx0>wih<)Mcc|6Kg~ z7rz&yvrDFU9CXwv`>(AXr6FZvcXcrK?%f|JZ+b`Ec*9MxchCN)c2&&^@G67;@TR|Q zC|TQHQ5~ewbyXSY17-$Qnb@u>Uk1*Y_!o|)U^I)rT3XkoTrsJlGNo)_hbPaf-gJ&C zhXL+38DAKwz=+XB-vB;vz{=!5@i?m8tL#Njbf|pxfmO&mJ5Mm!Fq{||WN{V~JSgbY z7e2y(L8(QK3h(AYXpoBu9TqhpbC3zboZw;5p2Y*6(DDKYb>?1U03LylS|}gUF3Kdk znmVgJ(cRM@)$Sf${hkNfRuZ$Wsi;okJ<7Se?y^cKQx>w6A#bU$igY z$xi?*i|!Y^lb;>`Gt*NsJvA;Km=nJ&iOx7NLp%e$qHv2RSbez0B$`U++obdJW3jL} z6AN>*;^*^H249LRV`DZBZ#jKi96o+D?!EWE_{7hCOl{Z_m(HJyA4#5E8M_*1-gqPa z;$QxSIDX_v%qacD_+|4Q@|wjkb(PgA+MroFD%BVo+8YnQ>t0VB%+1S4Q2nr9@C4SIYJsb!#R%SH+>6v z!Q;T?4Bbu8Poy--KGuV91>JI50tTcF7nLCl0F+}(D()%=yola0-XuHOttb-l&X!6W zpKL(pHOD7E{?X_i8IJW8j`jj!%Fp~_Q%v|n#wj5ssqT8=&+p&^XZGMC+@aZjl^1ZC z@93<-mFksv6OW4F$^oK=zG7*_JG@oo=>{p@8+Z7V=OV2g>A{uKsP5a8*?@;IE@0#$ zEMaP-@m0iizPjkQ33r8eg&WSqL+S=R$jkZipu5xJ$X~CbEy7X`5r+$0?0PTDE3Wbz zhFK=pPC72|Z@Mb|4GWLlOT2Jb8)7{883&zm-<>yC#`Oh$%e>(W*R3o||CPbcWiO|J z|JZrZcieJUb;wkcy98jDspTI*L z7Q$3@L9b~Xds3OjJxYiCJ7U&hPAUDUIX?z?wVJ+eIhzYb+X|TRVH^73o=Ll|!C%gB)QOCT)X$ zgPiwqyMl*7J=$^4-l6!&hdvlbjvv;0ORPyYHcK~Y74S`xi{Q#~haV%Rg3F@7{U0(3 zo+BR5;80G_RFgPN;IRO~@wZpquTY2V&zOW30+4r-Mc&z@cJaU-G$A=7RLDQnr{sw) z3rlOUs`sl?wU}C1jGJzGM|}Lx{>5nT9#9%_j_}aJi3=R>sywW_Q&$9Fv)~sT(&7vk z^&1-i=L#z_8mMJ&BI4L@Y^K(aoWt9@Ym5^ja8LBIk^~L_`;L%)$hIH zMVdVWU9nu3eYnYjQ0Qw*I#g5i^tHwCKzID?M?M(;>o5N^HL+rQ=l`TYTlFbE+v`gL z6GCA2c!o-#GnM+16k|Q(4{xkTt8^_+yjWk6zG(dx9X)M?smYnRthRXZ``?d0 z`SO?I3!ndk_}$zm(??|%EG_`wfeju&5gMfVrud*AvHeEl2o{PQoy#N}z9zEhP_%Hr6n^xaBluiz1V zBOj!fZ?D12UG8V0^;X5ht}&m~MGMk#dir`}yQ42o-*GB#yzx|g``Kq>bYf8)QD~Ur zQ-5nz^+vzq`?c)YX0U~UJ|?DbICLP6AA|oy>(cGJx+`8hfF2d%?@qx8pRed0UVipF z&vODa;!H`*Xe3Q%zYK%)xbHal5`C3Vq98x=#DcQFNjU@2m6yDW^nPqSBdy_2^kAh{ zLDcKWfdjEAzuDQvoP6l*mEO4F1sr@9?zY=c$9q5cGs!8RLGTkXhSX&^L02 zHkWOJJcH(iU-+EwJzFM4CDsLJTWfcGM>Kh5d@4GnzcV0QlO0K170#dXWKb_<1n>LB zi|@Ygt~j`FPh7ruPPV%AOz}MO1iJ~nojwQstVOtCw`Yg?1SVD2Ryc{NJ&qhc6#MrL z#!D|euksc=FkY#&_*i!)p0GJPy7IGq8^Q&rYw@|K1?jF-mkG(5-qF_=WUn(0)2Z|w z$lR7(0K>8CG%bB&SEWlf(oKM;VX46g^1RO!}*A^ga3zkHOKm5iXufUC3ESr9=EC`dnGsjJoW|>AA`1 z@9&F)hlZ37JjMiqw7bo^(88iaL94v-GcT*6$u-fBbT`!<7_q5+Vw`vbA0|uD4|6y<2v;bkrmw;cHFx+*pYw$#N#~s9O_yp~Af& z9)6LI4IOw97;;rVKK3S<0(bt!sb0Qzcrr6X(I{H^m7-Djf6P{R85Cs5ccFTWOD9bIwk=FJse5KnozM^iFx0#9|J z%@y5s!Omd;jAWqNwq;vyMXO3w+Mp6);0rz$!pMJT|wUEsvz%0W8mq5mN^mftyb!LypVd z6iHc6WHAM}TAg2vMP0~z#wL1tdQ~Q4+u{?Mtt$%)F*h~m@l?iKIT5L)vpE*$X5)ny zU-2Bs(Hn1!{fCdnOX}ah@Rb+i+{K7$KRPIjaKvX`bVJ(-F40U*n2O$nVAGfQ!kd!> z5fp|W&a}?Y@>KAmhtT5K%HXLu@ss%@q?_9hokZ{cwxgN#&y2h4g0ITwf>~$+QJ`T&4`nRd^V8}hn{;oMjp3xwse+jQSz4Tz zJXGH&dUo44!ym$#c*k{uci_$e>_qqcV7123v#|MG56 z9 zR(B=pQXJ-H7vs|TOYz$4Z^RpKoQct~$+$FnHRc!c=qSELh|+?=k&9BcB#>&};WIls z?PIw)9>;_U{i@SOX+yEWm_kWwZ|ihkGjmgM>Ecy&mNQajvvXk=J{_PZ=R`Xg5Y(fq zyHn}gtiaQjOwMAgZODjVfdQ+xQGiz@Aero^ol&M4Ou?a`Ccg@G$^4>T#Gw=RPgTZ8OFVGjt?^60^ylJ+Lwlp9b{-!ejY+jp zPcOTmD#BUB)XakF&=qsjEAems!{_7If9shzJGsJ80n#FRWW@Azwnwk<-_6c6wg14t zK-_lQ?Q!}Y?}(9oLq47ggVLurs7)9YWgv<+<(uf-`2+w)5fhEjPen9<0?vXBjluAJCT#k=Isq<`T-`5>R!oy9%lsEuf5;R$y23VFqa?FnNbLgt;~%0N?H zFmTxYU^sk?AIDkd2@t@MVNC2Zc%SrJ8X71t#G`K@9T)GV3`p4V10eW3`P+jK0*(Q{ zG7Y@J=@ooFfp17N(Br{o`T?ab`vqLWLb@WY%IEI5?gubTM9iuy-}vOXCg^y%z8rIN zqUjpnNL&z&^DHG?ahL?}@PK-!2eA2GEwGa}?UM4SkV$f0X(}3Jmk{lTQqF+PJU7o)LV_@!8+{?YQC8;ds|Q4@F%D-opH{ z7a6Rp4%KS6>(tjX5JN+Q@$sMgsQ7y$rlzMQZ|c#Zd>BmJTBojwm*hF_f=y|8&%R7J zaRAS9YL46qR?1&^e^W*lpGL?Eg4ly!lJ?{P8eWKGA^U`{~9-V4e4}S?S!jJ(V zugo*^Z9G%1{(0BG-~%p{djJEkrjLvwFUQsc^ok#)P??gBbWuMjS;m1)0fj-=e1e1I z*x{)YsBdq@pde4zM1f7Rr6h|AgBPBJCeSH3dTwHLD(=4bmN;=_M0rTI$e>+aUQ!;w zldynW>l0*Y2h`N#ZJg^@Ug@kCh#!QCos^AWaKydv$QQZm-^;>(LOc+0&Ap6|Y{0Le zho9uiWDQs5Te>2@M!f(3%Xh^s=kh&ytAWhIH8L*TweMbJ^p-fg9eM5wza}a$?v&S{ zQ^m<7{JXxs*SlTyDel*lC+Bu<sQ7 zAb2iMNU(1UiQ#^4jI@S?{}lr{&p6o|ax5;Iy32@c@&E+xIP7!B00a-Ri>hYlT!&;HKu z878_6c$fiE*Hv{ge9GH-f=0#Uo9>VO%+JJ8761s&^~KrPlq{v+1%$*WFaZw+uRK&= zVD&%ZZt+C6YAxDWjAt=ym8^EDmE}oYbwLxN{2qgq!qjN zn7~+KfNFg!E>FzGu-mk4j)%1+vlSu>9f#5din=qYHCI@dnLvuX5w%C?f>01l6ELOHAzqFkX}f?-4-Vf z?~k6ojyO89H~#9U|8o4|-}rBUM6svTmdknuuWBp$P+jnoCrfE>+L1nRO?lHVs{dUU zoi58x;L|==uUt}J$Zr36FZw-y?tFajd*6*GzxMU``j@^GU;g43;;Vo1NAZm(ACDJa z_PTqJ^yz{Pi#*%EAIoT!*rm_obab;1u?S}b@#RlMu z{?bR+wl}F>6Y-J9KBRW(mHu-sIJu0;0Ax;{I>*A?lslViTk*svl59g7OzLQ@*`weRG69Z5j>$z`az~#*^ z@M);k7#tdsJvc9$XgLOlN8%yrZ}&X#kWz_{vGX<<7m?j&eHdTm3jDgDe-HAjJt%K( zd@8>Fuj-!W$WB*W(Cz%kX{uGwl4c_+=y@CT-V>=E%l|j3f`ghv(l$e+y zfor8oI=$L566S|>iMC2Bp!M9yW^3E-WBhC-$SDJJL0a}PRIM+|7h$# zxYsngpub?B+!1~K)u;~iDA7u6 zEMATE`3dpOLaef&K=fIuNG=O;w25s$cmkT2Eb|FwddxPUjlhxGq!?d;2Gq(9&lu^C zbE}b<^o}DB(r3iXN9|wcooRQ{r-O!^Q@+0CUb-S(LtiT$1>&yD*L1ntmGJ-;m~LOq$Kme<*`?iG-Nuc_LeV8W*(MuDyoWx* z*b*nf_(*d(!f8(?Ki3(vflCBOsVl6l$-b07JGVE~ud!Hy`ol}u)8LA@Ow2K+#L1J4 zv+zk2#*bVJ2`k8ejUvnIvO`se{=UJeh_Q1_c2n|yIabw2)s|QkAUk_u!FDC#mJe!k z76a}b8upk7r{~PiOvU)<#h9NMkCmlGAEmr4`Rd)YC~Irt70F6vqWUudwCV8=n$5P1 z;&Jl^9x$$Fdn+Dkct)1O6W*olIMAMSJLzr2X=^4=X?d&g?uPK#8kMSO4x17^j)fI! z55DuiwYj3ZWRvVePGD6AhShqa`bx^P0&ADRn8wjPM5xddi{m&QlTTX`2U@a?DcQ~# zB6Xw>pii*OcOM{HrhkyXj{sLaxJbt$5%A$f4vd)r3%o^o#zaMf&?NmE z^3g zm{S0NKX-=H$FNYcT_lx#TxUmBBrTz@YIy!lk z$QwtT@|pYY_jEh3;GDnO4qTUQpK@676pmg4?IHW97j=Oqe0qcPFUxRta!K##8Po&P zNSAbnfF>X2YlJqz5AT2{@hEr@jj}KUI#9OG*LsZ7Gu}-zF@DA;IlM4Odi42A=cA|6 z8Nkscq|VPy0-y3Dof3F%O!>4ZK8uRlx21=RC$^WF^q-BjMd{Xald-%s7ZQ}R+hngQ z9iKPj69MS#hE@3?QyBC1<{SCiPq##Efq#H49BqlqILJx08R#`rWxF7*Xgfh{+A2?# zpk8d6^n!f7nudi_`WQA4)fZ~!ZBp`CR@$N_yRs8}E5*Fzwy0F_wnj&HXY}nIR`oVw zzP=Ra&Rvd5Z#51dI~M%|Bk_Y5-i)We{c_CC3YQYG8=J_sR-X>&#UooRP7|P#kJg8P zTezW(xTp_wlsYiAMb>2-R(&YPUPoyBfwtykabCy@RyD- zJZKeM5x6UU=}O)P7mawXD-Xqc!zF&McV!+0Gc2(#3f4YU?N!VQwB*DKZ{Cv%%Tk14V#J0s>>_LS{4haC0Mj3+VzI#R@zIg2j3`f z_`uHNVUz}U0tkB3uKMhDC5)sm>g^;B;~!CW((#;&u9TnYHOseyF8#2ptfp-~0r1h! z$j=J^aAwYY1ULRiM+KA8DU~aD$PLD(LRoM!HwpvJ`MAR_03)j67(fA)LK^^uY!Ilp znWkvOLIB%nEaLK>cU>5gbcmE;KRUvE_9d73`BBdZA|8~N(tCoC4p?4WAWF{&V=E`> z;1SxSN0}>kil9iZ`#Ixt`hnxC403|$Nn`XG=7b>~jn3_Qf~SL3eu`(KjFASM@)w|b z053a@208g&;0xuEunZ@DSzhVn-(UjbGc#u|E(r4OFh7s>BS8&M77w3g$TkFMv>9QW>J8+lpWgV!a-f2gE#!sBU9SU?Y1jW@5cWo119X;WEa1bLMgqh5=C|Mw_6m} z($X0nT|>gJ6u*vMrD>0PZB+(FPX6%SP4Efb)fQ_pdiiqv=%rWVwO8MW^A|41%* zYKUbSNK3N|F+Me6e`vV7r)tuJ##w#o6~3Zp?hG)~iM4k^E24Vk?~^d3kirWrMpzXL z#G{i!iCvib{MrS6=8}FO-g`w&`R+aoVinaIhGdc9&4t z#9#c>hvT99?~d+DTa3?6c;a_UhVP(^Z1o#)@ycvex&~urdNscIM^D5*{&!!EGoy2? z@l2;S1cpxPEI#Uxp|EFoZ`^a&J#qW3cX%*jNeY}hCc#^gQRIaS(&W5jN=g(;I;T6d zceUHV;RvyI;hgY}&Z_R6g~1|)OKk)H!MCP&(Gi1u9NEf5xp@=5(+$NPMV&@u@fN&a z4i36agmF(^D~<=VUhvNXryPU-D5PsonRzUL|G*)mM9gdTRs0y|p zyI7dyNB;ZN0LoGsr5wSZ`1T_W3$Bpaf}2jB!3Dkx%Rn*<5jun`Cce1?A9=eW^k*6p zPN2gywnT@`l%4Pq$;HZ*eHH^J&@Ve8Rz%ZGocp`~;=9@TSuX-$GQul#g&%g6da~a* z5}pk+3p>P9;@LD}I98i{IfiJ(6F^)dMAZo%T8YWYi8z1ee0<~SZ^pvXs(1PC-@7;V zA3YI|Jp4eMy6Kkq!WTaumo9S}$oaVAzPsc0+fS?BJu!M|RJ1x1y}dn>{%x`E;DPwa zhaVF?RbpanDi)+zE_;y>lXn!$3Ss>r4+NfJluOpj0h#QpivC#tn8-p-A~WS(q}{Z_ z>Rlo-wl*ZI_}-N=@LXHz~)yfaL}>JMg9@!Fd``~(xzFofd}$WJt7fQXLJcL zL;ZyTc2dCr; z!u=w@>mBtiBl=!m*(TK9U7#-KX}2pMcZ|5=-gt-h%e=U7&Lr;*S3UzHj`HwBChLvI zUB&G<0(6zh%lF1rmXFi@E%hv2`MguUE9`!~bi3o14xqR)j5f?7ulu7xMz-Qj}*jxrehA@5BA_*0*fmhsS~f6l{q z(Y>=sKL#nC8iQGRFc@C)nR$x(N+y#Zx}g#%TYjmxMziQzaeNAu-uXmO+8gLB35ypB zDV-N>sSNY1_=V&E1HI;nFTe#Z==@XH#E*O&qD+)jY0c?!(m5>0)c3V_Nl#ge!-sE( z-}>yYN@lKj;Er<9i9A53%yOUt&un+D!QQTT^uhb%rkjr|bIInVdGU7L#&$C+yj_0k zdWOq*5d`uk$T)AbC7GrT#bBQ!kd8qgha*q2&d6DG4;EPZ=vduT_EN53RoYecIru48 z+cNA(=bPedlBri_7h`g%9w%=(9q<3yPYL$y>yX3r)2cS9+=kA;uq6EO%|@XUO^=Vo zul>7Ui{1{&VU;!1-|KIfzIpD7`+)Ajff(xRj>)O%nB{19r4lyG0+u;%>s+|Pp{soU zQ(c)C;BpnOIIxRNjI2ec@oq(-ncO9Pjov-ot#RwI;W#)lY@ML9dmuUo4n)_%W1u}2 z7i!Vk*)0W2DWq#GNT1`I$e;hhSK_PRc}YDb14i4?#Yoz=`f3%>-Q5|zgPpN&a4`P6 z|Nd{phkyRB2om)TKK6e(t4TYej|<_psRW-0T6JK+9O%S5su2DEYJF90%Hp`DSeTuU z(Q_B#`R_a*PkiIa_`Tozz4)Eq`dmEyo{rNu-x4>SI335N6C6EqG>#lO?CZdR12M8^Zww3!Ti+ZU8q|G%3=NLN{(T3O z{>C_b=s*k&4|?ExdS*T@Tpo?5zVXfY%2&P~&wTTnaqi7?(JfpJ_4h|tcei!_W(F)( zVcLcEVeq@P+9?}k(S06+%X|Xiz>$OT;DZmvxwB{EyFYqWyeFO#4=~wKUtTi(4-WNO z|6wOJpD??4?oxc@XWkblPuv)1-h4A=W~aOmw@W%M$7rrC%YG1U*)7CLX+L=Bg&3Pv zzG^dRjtUb$N$vobyHT|Z&k}1XN9gY%R?!JE4=y8;eB|hZaJ*ehs~Mhm@+%#5hem37 zV2`Hg5MAvZ84_(Zsh)LzhN-8gU$z;Gx>sY*zJu{|KliCPe$$Cqmaee0v@AL(cE>8c z*^{5;B?Gy+$ina|aBgub9{^%D7eCmr^HbZQnK11~bf_J;8WKB)(?O**4s#BmA$ zSOVwPwJrPeQvfTn$JmwF+cy~B`rdb9VtU>*$pBnk_#s=L2u6CrhfM2|O$&<-_x8po zKlWJ6&P>I!^hxTmzOn4Gftz-LpQ%eu0(fXSeHDkgGQ^H8b?;s8jCa51;rQ4`J{pfn zSN`DpKM)T~&pv(nMBH-vWZZDWv3Tb@Z;f~8`NYW+aog>u;>gj1F*q`yI5)-3?>G^M z4(;IyAGEH1*FAT{sT0R#r*TT$Qp_(-NK|jc$lm@qaCmQYiVxRo zERd0YD0nQF-PedxnYaRS@UMroOMn_K(hnVy0M*Gixx9ljTH|_$PhJd!`i{5UTV%Q3 z$)${+ZCpMBPr_nnuj088Z^wCO8Ug-%>%T9aqg&)UPfBSne+|SYfq9Y~;+B&SGFy^zV{JEsM_@WGS7;$1ntHP%F1 z?D%b0|Hq`visbRSYNJ6q>uC( zmDizu7d$Z8j~>X5{y8?tNdB$M=J5_*WR&ns%dNbRL^>V)YrFWLN!od}|MbNN7%P=nL!K8tA@(MTY2yFFn%fjiJ z?u=)2^bWA{NP;rQ*RX}R;B&^})gJ8fZ)-7ih&3P{J*E1a$CSU`1@+%jvI)8Wsd96IWSQogI#%-^A=7lYcPQm2My6?onK86ie8P60A zp^LJ>7yDszmBl53Tlw+n6Wh|{FZzZr_{@TUm+p)X%*hgYGKI%JRR)y=AK4G8Zqyri z1Zzcn!~%)V&JOn_j2)2%Jg^wieWJ=Dth3T(JbO{2+J!}>9?Ms0*!AEA7}&|``UuzN z9U+T_po}fk?=wD2w*H)_7uAWT)NbSl?}8KZOqhi)y?Emiv_`HHc-FmC2D^*V^?{4_ z^W;3Sg!3G)-4@PrV~%Br3I-2l9GqpGF5=MkWzs|DOIm}Xi~CNV)HmTPWf6S9$)-fkF{-3HpK4ObSk;nL zWG6q`A@k)#BF4m#Gn@wZ`k6Om2M)!7kv)=o9nm&0Xj;VvV?!Y1EMgSgYH#wEN6t4% zUNC|baQsZ^U%dek{ zKY99vc==V?YeQ{OQ$NVbz05CoqXTp#8DTrvbrA0TQ@w(gCk(hdM6|P8jol|`K6F22nM$~1d)J^x))yNK-rkx#t zk7Xuu7Th9pt<&t@}?lxmpe%f)o z83&5XulV!TaQMAlg-d+UE<7ua$qiiU4lUu$yWD8Y-}>}ppAmsKP5^MGx)5FoK>Fhl zz0hq6Pnen)6iM9+La^cHe6pTKnC>QG=L3TaC=gQ!+EQ@mGhw`ge=%Yax9n(kzjHT3 z$d8;6HWso5rC>1SZEw7AFqj3B5KkR+q0CrOvp`0$>h8u=XR7*eR1gDZRvZ+@@jnc@ zH`mNObp(NIC3_MHxLDjk>x{hnL4k zG59<^J`u0Kb|zkY@kcQ_df7+f*H_i~Nr|M7BtP?{E_VBS(tnabhdIqq>tINl@Uu}?_q%%)PF)Weex%161X!35vR zq6)6Z%F<*k%wCeh(CkGJS0~2fpZs6H9}B`=zw9j*U2~*aD}uhZZX<$;Y9EoQK9hPN z2kCs7Ku77X_En|uHODPCACFId?0qpb*rQT6V{&Ff_z-@CBaG*B7o~u$!FyftpFa0Q z{D z1MY+`!P%iaIBu1r-kg`(lX8GJcR!#Q(>kgu_=3+BmM2Z9LV}q)G=Z_gx0)Gv;Nlpn zmF0DRpAcH(J~KVx?})Cgumg|*67o@641FoQ$V;Kf{?vh;78s!nCD{OzH}M(VM82v} z(V^gW+q6Y*Pj6JatETs~W~|f}qFu1|RJugd7*A>^ zwF6{Z(pTy(`XRFMbMp%}?!s5LZ7OpFIvhDj9=d2xr{`yqLT(aOX^C%$M9=hf{Nztp zB5E^!aOf6*LIxZY3eDzmb@V6lW>Jjf1rv>&C_=rBudTM&SlNuZ)urg}*ou1}x?61` z9$Q{AzSl)x96Ja+&_Kf(F5y@KS%+P&;bDlbamJ5+q;o{x^JT{wskq|acrS!S!(AU< zI^yA%{KlH{w8x3wNs4DfI#uWfcq6+8#85!aKUqCJJ1lmU-|#XG#9CL(L0b7+*oSHjJ2 zDL@?8GFWkl$3jZtx~hc1Z;~SzzPV6ADuB{iR@qNRxGqi|8Q8NYE?v1C&pr18Ky-S_ zqTk3glD}|%3uOru#o4o`Kkm8fwz&Q7yF}K~<(6iB9DBRUGW@17%IBW)8(CxNpo*tH zrHo8Rx*Xp%kRSD?epv_7dvTKeNM8<~rEF#5hjf;;mN&wWT%KYeU2J(RW)>G>UBYT? zb}1$o>T$>2_r=4HeO$4?EimXNoFl_kfcr~8Q3eX8{eTxa&Y*8=T)liLe(l$OBYHbk zu4L2U10yleKM-fmo{zfZd$p%CdaD(mV70WgCQLNNhVr16$V>j?N_>`X*W*ujL@Xkc zzD!r%OYkA9tb^KLG(xG+YKL^V_O|WV+us~F40pz$^tej9^o_1+>^puUntO&K)Yq(m zZt3eX1Fh>$Tpf*>h3WV^fA5z=)r!8(_UM=5Sm|KVW~V>DGc?j0#||8fzxVh5m$?6t zk5HnniHKF{p~3)7>La(&Tc8W|1>Q-<;@72xm>9bn)02}iIW`?jvInkSyb|C0?swx$ zpZ|mS<1c6>qjlQ*4= zqsNZL@X$!~^!9k?SzBwTa3C7;pg4oao6_mg>rHZP|lO48$#qexN=UqmJxl zLBPJa>Bf_B`zg`?EqBJ=k^RDROI*D=9^ZWSnRxEo-;TMtIq`w?Kh=@Nko0r(rsxbb zMn!dAS9$1Fum~q|_4oA0ZMWW{HW`Xb7th7=;*uAWt_opmtINXNmUIw~@!gQ_$byb_ zPiQ=J?>#Z8@|ZIAK(}DwbS~>u5=GE5b(@)-k4qP(tgoZ*c|o1}WBOOv5YTYk;W`gE z>!sj4{V()^;=<*m=*M~qz><+=f#jNUrvAJ$elRcl?ujR!i0O%|A(D?H z2Zv+taIX>whax|4gPhYp!-ROQxC3Ccr`u>gQ_Q_e_dmHfk>2^Z3$i=tP8 zNN9{5FMMLFqk~Tmpesun7%7Pu%RD*Nz;~CWygYfShVf|t#1T-)kNS#eQuNBF>%RHi zw_|F4NxV!y!vqjJ`LSUHEAkAPsP}I1X;pDf9X=e7KJrjpzHm;u_@ZqycK=f{xS$<; zvYBO}d_{kZ3E^1a3Eq)u&@?LwI%IeC_xGxtb$_y9dU`6x#z%FZh%wohb=k19v*P8c zG4sTv{HG_c`iOoO_|MPJc)=@n=g5Jf7~VG={kr!I_eV!>S9FgI#n7Qcv1epB2KMfY zPT49UcsFE6K$Gan`AG(_2d>l3acjCWEZSGj_$7+sx|6!jAu9hgSGE`qd|&U%K~-Ei zwzvB#LjII{;oR@&=8N>j9UXl4dvVu)U+I_r=>(Ht$;E~exr|*Y9=dzG+7lTwss3zb zz83%K|N7^#zPJ%1J!)HpF}X*-jakAX9WP2!jvW2pSOh>hg#ZEN#;pCg>p`@X9$BUb z{Dmhk7$~9<#=;C><-aed-@zv?_0B>MkW2eRKb3=Clcy@dH}WsSoHuPQPe3X41{Si@ zMwc&78>amS_e4*(>?_ro`XOJL=;HHp_+gVXX2ryVKjkO8hA{;e3$W;nQ_Lur_QB7e zeNjCjKhh$rSxCX@9bO#2zM{O9ra%JxPJh4{6fMMfYrVc~{CBBuWf!#?L23Dn%%)@j zHa3gAazRd~PsA%GZzL;`SByb$svmk0rvfms+}YW!cFI%w(5o2Bp+A8R;0=!Q9=miQ z#z!y7ZlCiz{ZF2tpw{156Z@dssSM>u8N#l|JOn_rFeahAtV1b(s>hvU*{uPg00=(r zqDa+oQDMu5y|cSNn%L1H9Mk_&R(VktI;MI)TUH>=lW!nhjtM)DQK&AtYd^<$1WZ8% zy7a&&OE%T_;m0S4h=)uD9)C)Rg#pTww47|$++I=fIi_LRjU>^X#TGfqr@ln(3;fE# z?8evy+E&(GxK%jzF1w-|a*CYj{N+ksg>T6(`=s%j)MW<~3sdkHQ^y9Qu z=oe%%R?0XayONoJXQK!04v$sPEre6#E~h*ahq|z^kLuDtGuG!ZS!hQ1qaM^59x%_! z@eXLznQ^$%G44hlvd`=W`TdE%|^Gla^=omlBCWyh?Io1~E zVrz9l@z$bK?JM;;Y0hOo1Nvjuk+NuCN*6r3N zBOZBr5duMeCl|f2Xs|=-44cUnQNQfed83m>Fo6hVU~N-E&cZt@>d6pNi>N7_!5{>HXwAfI#u0-- zsO}U%u&|6E1SBJ!IKE?OrXXg516oaCM8gPpRmxRYCJ6i-_>vz2M?do>Eu}mRF9;Ak z!0dVeKQMz7)xiq06b1w@(LC{wg`jjO6@2!E0TS}^K#5ATp2X2A1?+lpemolI`tjcv zbfEHR5G(fyyy8=T0`z!HZl^rS$8Cde z`JRBe{kWGY0E2;A=TrCrHg)i`ys`tMeMqG;3nA&9)(WR{BRy@$yB7suln~xbSHfg6 z%81fu5gD(1X*k|#H-u&RP8x6_Sb^&&IKrGAg0{%Vc^Sv#mjk3`nu2j)Ok+U!1;=Jm z{7B+3Js8I*P7I()5UUe`UUD#xK?=%Yr}dyZsv#+dd!>-|MpJvY@WHP44$+hpS-vsS z)E<+QlkvpkPscZ({eDbLO~(Aa>Dxo)f=<1~O}mB9}CWh?dbQ&avPKga#|_HZPu6k932 z7lpg1!Htp=JMpx&6cWX7bb_h)!UggVx zqX|z}vk-@|B&|sPs_W8hEKFYzzt>e(lipY3U;oXl2vIFgi6a~E&{u~ zMGA`O83ltumJPw{m84RMT4kiQwl~L~p`Lj3-S@@=58e^oyFUfyAfAwKVS>EFMi8r4g%uCN_4IaI;XxLQ zT)d*#N3*I7CfG5y7$D`FLl{FWEa2FF3=RfZVI!1T%1k9r@<5#Y(5ho_-I7&uW6V?D|H4qM88YRF*7ycT@CP2Gm1C$6^_6|Il-b=$#f>h zQHGfi7qch~USih>0}gGHwS4c??WZ*K-R}Ry8yrVkUyuxu0==?SkBet7#`Z z5*sQXdSDU|TEM{zX%?vN0)fF68c0=BJY)rZCPYua%0NdN2l7=~424t(ED^}-MbahsQe~cg6OAag=e=3Jn(b1YeR+p9=xe*_;X+;`bb?oPMsqRkucxU#_O3jT=>8V6U`UP&Y5QRe$rM|ep~2`@N9;1c@N z$|tP6NOx`BO`D>}Mjn2qJpcFejw{P4a8SfS&O+Y?jb|%)Dkl|KU!MHOp*xyC6kOsL z0~FcihLo3R+TMcCG7Q=j?GtYKFPTLEyueERrsfWz5#Atw#Y0yx|Hw~r19@VI=zj}- zfhM-l?Jb+(yOciwYqQ<{%EN6WUJ+i9H`D?H&N7IBMe0Do>O0V{m|SYDbjM(|C;sh! z_zlC4JYumEa)CkH+`*ALZ_2iCzkl-N(RlFT`&GA=SYMiw4pNh>R6Jk+G(li}9b;X6 z6dkU?#Lu_}htyP#`Y4ST#{$3da6d#e>Lq_{883LImgM35bx9r&k9uuLhT`Xe0>Ql| zeQ|YT+4|Ge!a`h~T8u^MTKB!{;kf6KKchqdM5AT-XWc9_ot&BO~$pxwBE1-cyl`?iEfwh%eo8S#ffxm24RVqTF=YIR*GCm*V)B zVRk(4zDh?Ty_2T&-<_1U$C=*M*0u*#HacWiyV^-@aB#3APVDQCF6mC4(!(m<-LdB# zcf@vkPi%PbM7p24JB)uO{${3U<6r-)Uyo;gbXmO!1H{=5d=tN|qY^`VhT{I)Z;fC6 zNB{3Qc*8x4At*M+bY-83F5cY2(fE2*{_N^+R$s89_E}@0AiE=Lt1*6o-StnzZ~n*M zjL-ef@5M9Ud^$$2T#jDpEhmrP7{^ZB6gS>@B5pqQjyQJgcpN@_*gN(2?Aa4N!aF-* znw38~^qTbRdX3MGN#_-KHyaQQ0?vs6l2tG|f z-;T~UxMv_9eBeIuQFDCfJI}_l^d|=P7+;x_J|)#Ldf4%~Ru}DU#nkLn+<)&~ad7`0 zHrcH=aZ=@1tU9RKEqf7W0>hH0>jSOS~1~{D)tuau3)5(+Z>A(Kh zV?#8%vb+(Cq789(+J0^m<)vbC%!X%pp8qL8jWu}{`Hs`yBrloJaTX_Mh1ID zha4wLTx2Y|at?mNp=t^{WFfn38GAvbBj%Z8VMidq=OkO9>q}Q>w-L|CYh;&?bEXfK zzJSff0+Kw+SbD7-xS=e|c9)LMRmdI+gu`@4k3*;O(Yl*_wq-@QYKxwM9?{shVov&X zMKsF7QWl-#upO|~IQ4rlwC0m+>y0i_G_c>vPjHg@`D>%FW zS75Q@jQ);^5hgh~$&bYu@J)AbzX$)2iS?y<BoYcy?YP%6BbMmwXndG5qjAhLNX)M!iDUBb=k4#eY$Vdr4K4CZS4iKqFn~t-FF&t z+6fxVMx;*e7u{b77W!`C0dL2Lcz{N2?MFM(?7BpI%E=e)?Uc%ZGRksFN4<93Thse; z-(5Q8)7_7|-W!+x>qd{vEBtM5!OldAzsGI)uoW|xC*u5f zeiYyQ(hJd}K761m{0qk{6ldH)#;nqdCY0Wgi7q8NGDH1wnvov?9r>xd$4m8KoXHQ~ z@(%%QJRk?iL}A7w*N!u98)oWH#^goD;xEBc$}3lrJh_Wk$j3VX4G6uASupGVNcyx+aSct^w2ux^lDw|JWl+0u&GBi$FkEiKDq$L_eOJN*H zx|Zsl3z}Bd&*b86qQXnaG@;x10UR}3Sz{x~mTl(TnJr(*5}zhWKI(Ip*`%WSnVwY# zc7ryfX{j-=NBMkYO??lWww7zl-aX3VO@HE0KTfjiV&g$`yT?n~JKMczfX^qcEY?-_ zviN#IFt5ax;ZeW1x)S48FUQ=~OEG!rLe!?O#&&H{ZM_lg!Xcl=g!h?fC4cZEG{kmW zr*H;eru;zOAy;y<+In>9+45Jdw^Cn<_2qflv>lTBLsB=?F6xs#@gVp&h3{6SW8z!_ zRbj}1JlTe3^a8bP43Q-5E98Jw;g(5oCl-J20095=NklxOiFUXX{+=Igx3lu7%t8$uG&sIyvM^CBNv?R2P~(RAFU#K z=hR%Esw-g7&^blG3rkc7aGVpMnFd*AT6g>Mo?{>;Rxz9x>neYa-bQ-)B+!DevyT_9 zsEhLSqI$hE&HxU`&wZTQh_;3%JU&TUm0>xkx4z6K9hG7pF0Gi2g(prYq_WKHw9MA1~|eK3qR`HqC_!CU7{*MI!=xX4_;zoJyzGxNfML$?R$Y15S{q)WTF!W3oW06-*!mGx5 zPrD=KEDZ!?9?Qc{14@C&XGpwZ2yl!m$^>@$Zu)$2yJeN?FSwyis1TV)|E=t@-Y%2= zRpq(r;7$L*v-(o}8T+IB91m9BqI-&Jmu<5po<>Hjt=8k}=#{wjuDheHyVswQLHA@Q zJ>>!@dYC_Tp?h<4jv<$Qv5GLRaxt)86t77a^w@?DFC4YEcA4Iqo>mL`}9sZ*?TzI5!%<@%t~v*hEuQdRini)VAVRHcz1^Q?hAKKiZr*6M3jnl16g8 zi~3RzaKre69_fH}x4qhM5b>keUyt`c_R;9rb2yq-=3`Shq%n2)*EA# zH)ymytp_)=a)#b^lMHz}u|oW$bjEE1RZiG=F56EJ5I5->X|k*=6BtW;mT8DXKKY*UiZER{v8qS= z>Eam}gbCV`wxW~6A2{*L{vqq*Fyb^aprau?U6xtAG@Lxlth*>#u7vHa4m=WGyz%V1 zrfcBN=-A0KtByicxQwQ>z*+DC_yy1E$V39_{H4f18*r%VvZ*j1e^%*=j`qRm=-HzV zyg$~3H>qMWNLFKUaZ!p%x6-xx8)c6_{zQE5`_ISt*qC>BCQUa*XLnVGDMuY*V6?{E z!hFok%*sGn@v1{6Ey8S zBN5&-0SI1-6)6O(gMUzM`5 zv=QI`{u}Xce*KT)kDoiIT4%R5(A5<~y?rsVXHaz09s`3zag&Vi+i$xyMuvu5584r3 znt@eTw|BR9_?g}RtcXm77dl8>dw^AbARk5B%D3nQLxTphOhv&g=*5$oiVKhWL;&tm zGBIRGlZtevH+@0;LNSx_#9?B#Ty<`C!DvT;GR>%URW=5W!)Y4tx+70ZU-_}1N|Y|T zu%cqQL|kRYr9>%n6&oHShRUdL3WVUouweld<(ZyZ)2JX1>S+U=0RX}1qy9VcxF>df zp!`)jx@;IB11ulF33*y5WTy{}0zY-b!B>s=C4AyvVDyd&UBqG04hvc^Iys7PL43Tn zvKn17CONGE#gobWG=!mZ$rtxGMgOD={$k?6hM&sAsBpO|kGg|@|aIlY_CSHB@wRr8d*P>a59rZqXqpL-_GUzQ>*o%YC~ebL?D zA45ZXV|IFu77`q4D;dnfj9_6Q3xXr9X zlzM~mR!SUn0m-Kls4$g}W8pwnR(D;{8TC`_9bQpU6ofA; zor1w^o&_j(->-2lq;OPv*J~=~q)WJXN@pM&Z*O&FCdmuelwo)M`>uR-=2h zaGTd|SNf&@wP8E&Wq!r|`e+T0oq)0!>o3rN>yVzi%gl1GuaDx;_M!`DVYkcpJ1$>K zKPR%oKd)=cy8g;EyVI5axNN^h{x(W^Rh&kdZ{r&6rF@;W-bDZn95`72N}3m+SRPZZ z;~`t2f8@K$w5&zKh!`2@^1FCjw4JOZo=_Ku^0>*>Zh7HULzp5J( zOw(7d#xsvU?oUuLS5xV1j@xcI9k0D{E|%)+(JEbPUGU{Mv^Qg3eo~t)Z}@&C^{3ZH zx6CpVFF2!Px4Zs*mxDa*$bvi+m*`vs(T8|Pl4(d6ZJ;|x%!^Rs@SbX%I?@ySh6ZAI zxG#E!hNExqLE%fm!XFgY)?SUtNseS+j?0%X$KU?tUyZ8DQdb&%z18Tcw#K1@`{UTL z!||R6AB?~I%l~_{b?s9@Ld+(8>q<2FlSV38aBMDb#PZTo%+F57?9_Bjj*Z5~@@kA; zybw=(?ZKON+HQdh|%#b;mvN=)2z=_uunC+f-!kLi~_ z1zITe556N`Utv;%g|x-4DEL*#VNobZFwW(Lz79Qs8wT`=#{esM#L?#NYy>wtA_34K z6G0vTUE}0{)R#GB;Mma{;`Gh$kS=k|_Qj#p-NGs*@FzbN zYtj{%P-DR~_98M9IR+m2@f}KbaG@))c$V_8yJsfH;~QW5T1QF;EHgu-j9(wo#^#QEBjPsq+5xjbwU0MZ}5>|0-vx-Df@(J79?tmvUJE&Oj|Eigi?Et4S9}-|y+}*1aore_h?OA^QgUWW)7%AvtOh zlR7LYSX-9hSzn3udM#=T*sSwD0+)%xx@1OvnlA18O$0BzL$lFf1{-`+r{pQ~xt!@6 zS`mRm<5q!nCk}T<&;vSkgC5FfCr;;grQPkyc-P0>ao713`sa@S^>-Z4#brX-?Pwre z;ok_qK3?H~OD=JuIHFrmimBgQtuIH&-e0dR$FKh%|0tOE?{1SVt#<9|=u~xtGwLMT zpkJkH=VY&h4eH=YG~Hr(ZR>I` z8$g8_=YG~55EQ}Di}p1@OfH~{5D1?M9wwC8z(9Z0R#9YykDecmz59l3H~Rbgvbn$& z9EoG%2-cM$Y0Zo}Y)Ph@r{#@&_mQ~LN9c!=&824%s<^iIkZ)pU-AKVd^E~|rcw(HZ z{3Ik5LXNL3GZsV`pX!jVp*|xwH6Rb*1s3IqUzX|f_>7b6DHiyvzrq$~@fXMb&#G@= z9Bz3vciS_`hFoHIx5rUbCpHu?9t9upz3&btc4>R{B@1)YF*bTJCN7W0^yHXiE8o+u zyKeMb*s21?CkDvFtpuHc2x)22A--oZ&-(JB7X)puEXL~cyk%FH-?~p8D zcW;yA^rrf8x4iJ|_OSk)twP@o718IhsmHo6rKyi(;VN>TJUQvg`iv1zn~+$~qHXs% zYBM%!ZL;x3Wm~U+MrmWE<0MPUfWXLuen7zJIp1k3Po}B9p6GWUEPuU26E;(#yiu0MS<^dcU|jzRMIL{{m*;NJ^8sc9ik=!bcweJGc*Jf@<{Wzg>W0i)YQ z&*bk!`ctvSCwd}@(=LSn^-PS=^2@X z1I8Q^Oj*L6#Tw*Ezi$Bb%z^}H7MfSYKL*nrw}grFrq3Y&FS}rTO`& z&(~vdYBpxBT#m_$Z^Y=i*JJ9^`Is5I9CH&_6+RhF>yi`e%hA=@8l9q>?k>qUwLPcO z(Pc5t1?;>?-L@%3k3P+uq6Qbk{@fC%x7=pNpo z-bzE8K(8scL>G!zAFxTmvBLwWst_Q`-IN8?FNQOy|REi9PHn{1Z z%H)^16+EF!-svb`v<`d1Z*1m7Z{^NH;*;_V5S8Fnn=^GkYO=MrB#VWu(pnTD{VnpgnvOOJgz{ zHH|ty#h0@!n3$Z6B`Fu+qN}57g^~Kh3$$F;D?1i;f#uOn^3!EH#vl-w9J@~2acn=z9)>Ln zO)IOwRabVMFn~1AacA|_=;`T+fqucSP8WaZVs>`MM}xwj?QNX}?+KHF+q`2w)3X$4 z_=SN%K4ri}^yX%>kKWsmfr$U|8b_cT(XNlgrckU)ykNyy=^5nkCj?XhJ{f>wwW^M; zR$G&?w;ZeTV>cm3WVTiYRBqknn(oLN18jRWHe_IJY|O>-+*C}AUyiyIt={UM_?7?f zZ^Ua?7otyisC2LbSV}2_SKF;JblH4?0hGoJN9luB1|B(bwSS;Hjvw6@55N1qIQ5QW z!dI6HloD58i1pRxSQg5bmbc=?7vGG3^Q&KsKm5*>n5scQG8m%GyHVJM&as#y`}V~B z_uU(J+TOd>CP&qGTA}Nxa7=ASbapYRt#%MSl&gPHmVg(f zok{PuW+{I%GMLD)L7{l?Bg#EAU|NGw<&b+Rf7zYF0t3F$&jN^yhf$^+1QWcMljj}K zkPU2AedS6DWjbe6z{ zbAO>9@+FUL%Hz^Fo({f*4ploVLnbM`d!;hvOSxq*j$_&wY_?nxZ_qEX3RDFNX7EWJ zkjt4KXT!yKk$-_74@RmkmZNeh51`fL1we*&;VJk6<$Y#4d4~ypV99|CQmY&x3njvR z4+^pS42sLXP!UvCVQezUsWi2kXv1w)vHu!7gHWCYKLe9h8LQ;SA_Vi{VlB>|zZ_3I z^<*5{e=P31@2(ge8jc4Zc$WtsKKI$*6e!T$hIbOq3T7sj7<`4cILgsD67DxVo{*m( zL4v|_ZUeVf;tl5k2kNBDpV6@rrMSY^#=0n*ez=fba5lSD-<~Twmpg zvu(?=HYwK74D_CT5;ziO;CrZTTePZYD`#@;-KQxrcvw2LMn#l_{x=&Lq) zH|;O~!~Yi}56W;R)+JY<9n#|H*PK2ptiK(+H=O*91nOjxA}!_V-n>rYjM7M}djjRl zlc#TSrC%Bl>54RE!fUS52}0=r=?5H?g+Fe0yz5=U+_*E*-SAUgs9Pib?)z?k9A%f~ zrz`mvKl=Nf_uWqZ@-|wEt8{N4f49rFHM}OS;+YEr@=tMDUbYwV^ZN7&SGJ+!0;`_= zCq^%``3w%3L7;N?yGdJ=Peus? zBxrh9To!t{kS+K}0n`mY$t@PSa)hRL>yWoIF6v+bBb-?_qL1h*ds3(Sos`1fJa*TdwY_3d7*QluwrT03K9X-T#~v+`S1yKTgZ`r7B8{Z9P$Z+Sdi*^`L#A2ivFcf?Vn;AP3z~^e4W?kE5U6 z@4C;0#1!9oprfl#Q+l3r(RQZAR-n)HM5(fDXZK8@FCm}G)1mlYc87E=zRSFKWLUa! zr+8>HUU}`!_~S4AVN6X<#(@JPF+4mNm7am9abzVK!Xwhp*bT%WdPiSZ9651Y+;RF? zOioS2k6wN?q@H*nV+C7DxM!gq$D^LVFcx>+ell)3c`C*x#-g^cAb1!sX91|>j*O3b zea-Mc`|TgZ%yLUq6>ho~jX-uxO5t{B(Bi@luk3TV(r?G%QC^ux(q?9q@zW<`fK!FR z!n$>Kw#L5U0e_B)Fg`anCz>E;yZnCclYchuy!T!&B1dInhpWGTt$G1Z>P>2A>O%V~ z9CW5F&=gbSWAWtUUyt#zQGWv9_|XB;)37IbkTIqm%OptwpbWiAWP;%(d$liG0dma`8 z)njpfRy3{p3rDmq2p}c($Ht&vLdOM{8DxfE#75OxxYd039434^Z=v# zJcX?(U&>^H6WIvYU>kbDfk5f*?2f^~A%8XwnZx1*Y;@#&Q`5Rn3z(aoRr%CU_7`Ip z;#F|TsUe(X;Nz*Ohj_HbdXefX*~?~``HAtka{fY$pFb0GSI)=UoOB*|U$U^Gd^{E* z+%kUFA$ft`iO#UOQuB`frY%mjoDfa~Yqdx1F5VI@Ieoz2^_LF21%9P}GM;T&EgGRE zTcoR*KYcXp>+#vrJ=_i)e(5=#YL8;R686K&xvAN2m zweH9b;nf+M!t|W_DC0Z1DJN-!a22FDrOm->gMJFQGMs*daW?9JedXwahkt@g z55y_QlZeZ^+f8Al@%WPJ482lLIerbjV2H^z^>|;b`lsSWa(rD869fxPHc#fq{$Og*uTppC1BYjKjAGK0Y6?$%&yCFU6G~ zy%ZN;{C>Rj#FyjwuYDoD{ngLMtIvNkuAF&3>T_e_(Yp2E592;R}3b*CNN z|4Bw{W2`{J!cVp>ZGhZP*^QrfwnJOcuVfwVLUW46)fX_nEm@^H)fVfqD%*+i`^y)` zphX309$1KL}bZNIQQ9q<41zhs@U zRdQ>E#b~BGbui#w_*D&^)+dSxX80q^)}=eQo%+ISyqErESozNVPrH;24n@^SR-KeJ zcznC~j(W{|kpYPEbJ->-7c7$+aWjgaP?TH{Z7Uxx$pHHq=_xy)V-t__02erDnR0X? zZq0wBcKnWsBxL_btTYbn84eJ zLy+o+e|aHPxf2c~+<9j}PE|Fq<=q6!b7>qxfbRGaLP^ZTv!W`** z1mdTHyeCo=A@6d)Q8uz+H0)duKlw<3VFevwbmFdeb^<7?{4Aru?JCtU~q%zKen##LGArC`#O zk>%`H62gu@ar~1P@F$#EE{uA8ootvG-Mjq2D&>i(5Z*TCndDbK?sTn~WD*7sXv3WN z2PokN9H?9i(VcAN5-vYo6Hq?q{n`Zp1?+a2-c`aM%NfjzObBm+r3%=jqZ{9BOHQHjf!uEx_( ze=E*jz8sxhouW0-GeSx{LXjx?W0Ny6F*6&}Gt;rMQjZEoQCqH1b{x?tgRpeK3`WCy zKKfb}XJDzMTQ2yNW3&+>Huz+mFyYssPJux>^2{9`C^Iq;me;Hh^L>-Sk--=o81TE5 zWl*5jHWfgn6(5uHU*Ec~K=&XdV|_a2PSo?&%-9My{gD}1`WSbVT71#2NTR_0@Ae$uNd zQ6$E$&c(0%+E;^f2=-JN$YfW$lv2?%9cyp3%k`xuTRuGn*MPrUn~ zJL1lFo|eKl9Lx1ZD{(V3Q?XRvjH$V$c>T4j@!8*hJihR?SG|*rDe%7Do*1ZBZSaGwpY6&aQ&m7SI5=#v4`-PLVn&PTBcK5&m~ZlP_ZWTD|H zZ+?~;%}kc6G6nCLCe*e}tbjWv**R{^NAy!y)gL@t0aje-0i%RGSvk*w6IQ|Jz_0Ey za8aHicUISvr{ytnSQiuOo>Z5tCvOEBeqb&6ng31fAW%9F7<=H3L1|_`6xYQc|g(=N}@a6!5^iACyd`*Xvj-yjMWgJx04wPj@SorrM z8@FZ3Un|5wK%Lc2<|~CU$O-MQOS$#{EwWTR#_k~u8F+YkxgLBsu~U3LG&B@lon0|B zFr@gL@==dB&YzE~lUMw`W_IJ9Jb5bK{jLwhx4!da%+J-LtD`6Ojwrs`9|gR=yy5|Q zj5dxwBQOLRhRPJ~pi(%XCltB!8#jCtil{g{#y#QkJnx%<@Wiui`Io06%T4 zE{jf^qmy=`5C%ZkHobGALcb#~V@dg#wuCDjikc1B)Ub5PA3x&h!hzeyQxT9i(#o&A zpa#KBIv*e9B*+-ik~~fKy7R+UXR#71$;Bk{AvH2hFp!Y+B}{^x`ZF=%PbA1OVM)E$ zSC-<#f98>R_hUaxgxFrZtRgHOt+&_^Ww1y-WMUY1JF+jz^M&K?{;qJvCvb`y`pNa@ zy^IFD*JbT8?I<5tyuUT#H7*rh9Ct1kW}0x@{X~~x;L!1Q*TJqV_xk%!ay#i8#*Z9r zvHLw86poMlt|=D(63IW=?c_)1JnqW>rwyR*aygMss=PPSV{DerUc166M|d)=mGN(%|dxBiDMZ!aJh%03W#tvy*C%sdm`wfyx zXm|inuyZ;Kiw=+}EF`Q+H#l|j*7%iw@h`|S+65DOf9?)oz}2-PQ^Peh)D>&%_4viV z{ELFO8k_Yg+1o4buSjDUgmdd?(ggkK0|Sxpm3U1uqmFt5KaP}b!L#d4{deJndV7cM z`nqNQ&Itg>SMqbtWZ<$G09%$mHn+GOGmEP+Utf*e-+5o$@z4jA8;c}To`Dz8M!V7` zcFr&9m44hJPq3`imZCO0A5T8@&0swz-&JP^{kGb0W^z>UNhV@PseE>;&(F=ry0Q@{ zSy$!9y#Uwc;;1)ac8!O0el7p;2c2`bU%HfI^wh;&b@BqnHnpB$@&Y?0KrN<*Hyr4R zTaOOLKyP>KJ9sGCditZSTkWYf92js`JNc=iW4XH$KE}I zacKYk_`CnlzZ;K!;?v5T#hcPaH~F+q()_0SP<9*7PEUFeYjI{NMr8wh_nD{TDP5CO z<8k7~8{&cc9*n#1zBf)Dzd82pCf;n1$g~>wozd3-GdWLmyIq0MuOa-rC zl=fs0y`?25>(TkWcnuyGqM#|-(|VlX^vOHwbB#~^Q*s`xTbMu9c$BGDwf+GHsUwWV zos+$ooMv}K*`ocu199Z&;W&2aQ0yBS@u1ufp8vkT8Qs~=saqY=SvVqD`KfJ?7t#pS z^=-uZ@{B(-@SgWR8Uq8}F)?~xbz`Sc>WDS8LY6G!pQw~b3#SYhDZO% z-}P~rKm`}VCG-Oy;edm5$yez2lIqBE-O=X=L*MoSTwu}Ncvg@X8ZpiwSWG`E6P>wq zsV}KNXpzmMz5yA{ZhvSSkU0HdTX}7(A5pW%mmYuGxWz`G5}8-x*C)#f)(*kKDGPU; zz9sIs^;Fd6rebcElOmvHCLr?@0O;GmLw(5;$xXRT%uqLvkttwxHDw5KeKG(xk9cZu za6s=&P;Po5?z-}a|FG-P8Hq=i#l$DMgLqi6v>U)1aNC|%aCxjWaJ6*EW^d=1@q&Me zBbdb9w3gpFCc)oBR$3+zse_#&zym3dR2T7>^;xBHS_;i*Nr#1_?<}9Lx&JBp3)piu zYPWPn{snB&#G*5~MtJG2f4|<9u$B5}=?edXW_KLIPr(BMw!i z?wllwT*A~L-1z_#;rtYdxL$l#w1pS)5tpo-PuVVd7A<&3Te6Lc$~3;y#s-Wu@I=wC z>+W6D(B!ehP9<-@y2A_eDjIusdma9VN$cnJL}%f@C?Ln?ZH;x7t-8_B@LT z(2cOWy+9LMR~m5E-q8_!Ca|_ri+Rb+wN=S8m5D!-!nEbq#;QL*Nm}&4Md?NJvkTVu ze7dIE9b47ow(8sXJP&Q=UCZ#C;Fgvr8j_#dgHu)}u3U=C=g-FE=%wHj1lBVd69q2D z2PD_mYO~R*hG(3qzBa3NTaSL}8`05~?ZeofE*1^4fyI+z!eLGdsf`7bo%DZjQtExa zlwMJ2T8P59Qo-`)mAvE+ebQFZ?!|}e!aHrs=>;C2Rb0j+`uh9Tr%Ja}I6K%mJquZ% zpG;7E!BO&&=?S_O4CTU9fUs;*Tj7wI&;VswmZ2YuW=$T*CATHIY$+Eq-%|hehCZct zAK-lR6F=xd$XCXqNJAgUh9mQWz$^J3IwU?gL7rATv9D*<2YRCh)DL{J>4?dCPKq?( z%7=2;i487T99He*TmH;bw0N9{@io5h4-Oc6Nt^&rj?++2PJOK{ue#r6Gm6llN@o47 zr^tg1Dva%Jh&DIP$LJj3R!=Gqy5n?IK5Lr#7`UUXYzx}jww8XL%hAO+8il!U(=TZS z+|s8Jgp6=<<7%hPm@v$GkOuq`MpW7n=%AO>1=3|7mPD9xfiPYC`BGo1LU$(rbL^4& zo1Vy!6pDvGc^DoGzRV*x*^p0;?a9~iNaG1O`I#5wlD33D*FSX>;(EN9yUHMb>L{5X zU}SRYqO|am{J<(+biFVWaT)sq$It?wHRW`$J0E&9nyZpCqUE*vQY_2vS)QAV`n;}% zg_xe2i23Q6Jh4P!v+94&yzpYY{M@(V`EPtZo`3RB;<;~pIbQhglX3R-AIFtTXZ`67 z%Hz|Yy*->Jr1Dg!Eo6jbcAg5Gi%uAGOn60mkg({5dctzdn?R4&+4w;dsxZ|>Jb1&Y znwAkV~YsUSa%*Hc9MoLh?=-zyqH$SL88Y(H9GOh{j1>8>{Pv zl||NwIX1nqkYcG;m!IsX+N$L0k~jYyK_6aRh-y!-aJ(Kr{Nazxe|z`rjq&jrS=()K zX>2--d90j%MQZDQ52L(t@`2`wF zn)8@r;fI}RTG5Z^63?(Ia@*D^xTc|W_o)bqL!V84j+@Xm98rybJu@D$En3IkLXI*& zv%1k5r;Z(s!NWI3ljPgFXphe*P!Y=l*D?95X}hCx8; z=lJ=ae!T45(m&rF9Xd@ph2la#+M@%isLP4}V5XeGmeGs7e8vyH>~< zKPs%pBAjR9W~W+)rx48nYU7Nibf-dx<-r6H&%2Z!3uFr-I+N_k@gr4(kyLzM63E1N zO+lgZQUPus88E~Vss*zE0}qBD#$-S*;bV}5KpKTzI4A%m;R83;n7W#bBdN2o&5mh= z9lM#C76b%bS*|he7l125sIvzf^+-B`AS%~Gbp{OiCEUP?!?SeR zcz987+QAd%%A7%*98?9{G%Rs}ZRehjFs_8pKdSslbyswic>5%}pG`LJG>|&Q&_{Egs8Yj9lNdOcRv@fW54% z-t@>-#?1>?-c>6a0w&}JF_!*<&FR(t&atR3N&PMu62B7k9N{7%z(96WlN85o-qkTz zhobk;adpB?F}JuFOY?P4`t|h<#<_DB{x2|_w5-t$<@t#_Z$VB-D}_h;RJ`cE`pGbflBx|FL|&lyiK%HQ6A6? z&!$DS*Cs2B6&F4sE~~Bi-aki{&rHopNv_1;@K98{WSofl`EC`wMJL@v`w3?`fth%M zcYy*030c9Bl?+ZG1Nb&0bjD%;^Q3Xjs!%%p+&K*iTAAdoIMm0qr3X6KoOs~a?CqG9 zGQ}=FzEOX}iBr+lI}#f!i%KJG0FLlLXTGt%7^^b6)>h`Cw#cB*v?nqD-LHNjULBi{ zJ{8u{*&G!q`^X}MI1}8RQl?e~e@)5-#sYNQ)>(-gPaKR#AGtf;_0V0hXRuFolwl}X z+1;|Rv=&oSi}Aa^|5W_DFa97dPt_Ur*RLf8`ntUEqNls!Z$93C&;4=xowvu4y$5AX zbO@$J;b1jZWo%d4+3B4Ma$jG+jD^mqR=Y(DZR+S*3Hlkz|Tw%iq6 zgLj83jFr%>O#F*pLC>c7QRsx8b+H?Qoh_|WBv{dk?DZ~x(Jqe84$)B_RZcmSN4%V{ z3HcSWPXCatR%9$hbXQxaoKt%$EhlWH{E{-LvY`RelOF>lrLxYqupmw^Qc=EX{7HGF zY~|+_;%cun#^6h2cXsZiXX2VcZPO-%0SO95?^eSINg#zlBQM8S1mrbwJYb@< z^np0)m;>=lUZ;}HQO6k2d^-+fmI<20S&si-FkK^eIz>k)zzo2SnS3C5OH)FWcojV7rfjU@P%Q3pCbs@E9_icTZ@(DJbA$V zALYTzfRm_IyFmcJ3{ML$#)tY%`d;8yL>suOvll*4UtKl^^bSriYF1=abCL)X;Oz7_ zJt*t-m3nj{(}aZ-&(t5?LB&E_hDPWTAER&3uI~F(2L{Cb9beKJOwpD6oJyIo$O0Ot zzp^r}Kg)alQYj~$(&H3PHpKCQK|QWYo~|LcL{|b>$l;HWq+8m))Kz@C4|tG_p$lSi zilds@6##!9+`A_}_`yfiuws<@lyD+?5p=7U3c$ULB1JK(SfEub6H3hf(z@jHG5GqJ#%{Jgk| zclcBKf=}T``#g?yV+E7g=ger zrc^e%;D7psJiUi@qrVUZF|bEJC`AQX1Fy4dQk)nAgL~uYXP$_27p{u+TCKy7E+qAwSKOe_JPZ5UmgVuZY)Ph4TQo${Wmt@}EC z>m70D!yi&g=@O!6WIDP^WAGgq{lp+NbdvHB_@IK>>2V3>tG z_;ZJv7;I-zBgb!3r?SuCTk`6P;L;dRj!#o-%=kMuEyz;r{bUf^M4YDZh2I`f}=_J+>(B-av>V}EZV)c zP>*I+e)Qah_@h7mO1${e50uYF+<54yzvDkLyf60d+ap?Jr!f=lEc6rZmFtG^N&PJU zgse8j$wiUy62;1&3KwyWFvV$?e$1pJ`c9>TQ;@38XLX$ud?dxB8__n_>CpSkFG^QT zJo-}}(4XLh|Ik&?S<-$;I%6=|yd)ZRA1oZFte`QPqzgem(x0VI9^AiQdT>ijPfy27 zKYTgfJo9Gk-@h+zyy>JDI*(ig04b{aH5Ax)D)zl?rTDH5Nkua_!j=1GBjuzXwXHbS0O=O<8Q*r%0ox;mr;ZJQR zUY?P@xF}ro4)n*JcfB)y{uh2BI;&MLTowUGePz{krhTAi+JfxRub9B#_*5?%Rpu<9 zoWE^^ZYbT4$v31lA zs;^}RbyQX@m2S~ydt4o#h-bd@eFdb9W^pt2D{aQn_efu5L| z7`4si0c+ueu^#sojXI#i^Q~NTbR}07$VTi5T0iBX;1b;M4t1+Yn80scqP@=UZr9b5 zEUG6Il&9OsewEB-kp=oa#9&SC zM+Vr--D!<}k4h*$Jq|qTN-IB?r3?6YdW&;hT*1>6hL^L|Gw$sY8ez(xG=w+qO5;3J z|J-rq^KQSlbIwE;c-rl#i(G@TcDo`Cf3$}{P)ncwa%6Z5|)0)#|P%WSV2MN<-Pg7mNA%H*vn($2 zWVFI8UqrC%vStGp<5kP*E1+M-4v-Oiy1*yj=vr7@ip7OR!L2&6&|7KP)Bp~E3%+KO z%Ae|>4!Y3CR%^AW%eEf9bUrSec|9(?`a)d!@r$uEc||h0$)6uVB(kArr9KzS%M1SO zqh*=u>M?ZHkHrbGN#5%wXXH4%Zv+XSmXR$AlRw_+L?_CFF#MYI3|!6Vp4i?@Mj%7g zE{y+mw0Ar24Z&SgUDwpEoQBfYQjy%|5{$CJ*XlLfrJP8~cp#shTwPuE203rIklkuI z1_UZt)F_lQ-iLfck6@D#<7(mu!4B@}E86)MjT{U2aHXAI>aOsY`xD`ce7wV6e)N00 z>0FsA560gl`_eph#w=!3otxEMREfD!ZQ^4hBKy%|+b^X!sc zFHpe;G^~bo{aH-L*c``qdsnr7rhV}U^<#V>T~0tNQMNZ}g#%8>@EncnQSyAYQ91rn zB(Qx$xyBi2Q^jr1>RG{V2Z)4s=K&nBL$a;&N5G%ha9*cpaH^%1D zd{l%#b_&<)i?J;_S)Q4S@i))L3r{{7-~7rK;+a4CVm$fyAI6K%eLK$n_?4I%zZ6UJ zv#~Atf+gLmd@F)WoB*+S{9fLok41kDXjM8c_ochVXVqe$#G7F^Co`R)P>GJ6t%zGev>Wd3rFfq@mSjv8A|8R_r%}9o- zKaz~PFnT41`i5e#e=usoTYJ|~OwP^4AN zt+@2>#Pqlk;W#}5fgefYZ1j(aa`e8;6WC6YeBtw0bJLS?--kXPAscpWVJ5aDtEnON za@l(G$&Sv141tr($q|R&Pji{ZMB;lR;oCa!W0%0|uS^FYCJZj$`EU`o+m-oWclT{4 zevY5ybLNLD%67cM4}RuXY@k6dS(jIEBb{+2+JIJ_rT#%GYAV}>_R4v+)%Dd_Q2)6s zd(EFZRKFPB?y0y+lk_h+qaS0~fa&od)$Yv2 z5RQK%LWY%m2)rdP%CP|8vCqi&AmId*7ddc8c&ESx;LK!q+!T75q3^0tGK?xlhXP=} zsp1mOo_LlZL+H?%h)pt{9D+JK0E9h)n`bZ%(7>TbE6pn36`;brpa9X3_;Dq1Q4|9e zz+b{lkUI_4#R4CNcU^bY13|~Y)dohW1QW-K__!Vf0=X&;iY!D%R=8Y2l69w13-sI1 zNKQn{ks$HmXNT#Jfl+%E89H9xi$=QUo>3KtXrzabje|^AL`?_&oFaoWp8_tuL&U*u z7)L1Mr64OJC=8e#l=1V)@p<*mQ@xtBtpw;<9W4EmlGD^Cc={F_3Wajug#n~jL?EY= z-R;UcC%n25N%!6U>2}f)R$e>drMo_z?#gr9$DX1>%Siw<{7ZcLzv$1N0$e@}bWubT zmE4kIr*t9}`7=q{9$mdd(La3Hpk2N^W`m-$(yw}V#l=fk;`#4=KW66^oG-gMP-HM@ zW*1~Mj7`Lqu}O7!D;_B2+n${rdHf-(jw|Z0Q6AY*z#tL8(g9c@AV=39;hqXzMID@v za+gp-3HLEs1&$abUh#4HqG8pSRoQTGdwWZMmrCh2B-q$xgCf~C(C?!SQOH;oo8SEb zA5C7Uz&EXO!HoDu{yrsRQ^uJ1$xJI=-r{U*)y0#Y$`kru?0!0+WtlkqG6 z>07}8cb)ArdN34aY_+I;kqs%acfY%=z1G@mt=%z1CTeB|JWh<&s~&l%-u3p^ z)wy$XnYlUdYA*0}{ras87<}dnPu73`Km2xm|I$jf+B`9Ptaa+1oU6I%={j@jbiLx85ojG}uh6FT04!NDz;1I?nk*20;9MC|RlKz?1=^1=9PPZ`rF#eTI^2`woHSyQa zq}BLzkl#jXZ*(^U7GA?H#m_FX=KTh1hbRC zv_qGKm7eWgzSc>@=GUbR?QGo0w}aV25M0AERxbxA?95j7%S)|MV0h&&ydcBCAo`hp z25Wh?+%dQWz$^%(?h0JQNe2htQWn3F|KaV&T0;q*;=Awip$*$%@Fky$LpvDkTr*@t zU@K=j7N|Skn``pIeERL=Y+SEkxVnO%owG%KCVi6_>mTzWk*x)*1%QvAGk}fjhuEo}B}N`yp3H(EPd6&hSmA zvvdTitkOj$rvCb^Q9Vu?_6;q|;L1=33`{c=u;GET6C?vRG`ic)6UaG|*#Vwi$}=@G zn%zqx^++p?3gTzA8@L#RKIt%^yy0o}Z0IG~%t`EtZ)w|Tt{IYvFnmh5gz8>!kwMTu zdeAiGDekg@wt;dlgEd^+(u(kQSf{}N&BB5m{}77+{KGQQxn!O@VI(wSr`9Rw7r*-5 zdixt6sQVu~Q%g6l)r2b+kZba-o#)LK>F^7y$|JpN|j6-sH=zF#?QeVWg2qV3MW1S zL+kWw@_|Z0eL?}C7I(Rk2lsrVF70EF;6Xg}0J(ej(uzw1sJBC{+saaPp0Gu(k{9pF z2;np<z5&mla|Dcl@dKIHUcBL0!Jq0SkVsiZywbfrVX4?J(NdW1WI{@J9ugn_K1T| z+D~~$9;owLGJ(%^-rxS)H9UPk*)NdccI|sq4f$`bEZ4%Vo9M3f+Ti)=C%#*s z|IDZBnWvtq>6zJj?2*Uoo^$u|JQwr@_N4l{ovn`z;eZN!BZ}l-+r_5s( z4@CK_F4Fc-hX(h4XD`?$xK$sSt}Ed@!!og?D`{vKxlN~X%+7Xc>_kj$82K(c1AxEK z6aDLiie_Va!54B2c0p_s-rYGW^dt}8H@0XakoU^kYIY$we)2eNqoKM+o9Csg*J_=1 z(b;n+GjUplKLkv}gVZ~DoH}!7&yK9B*S_ZQdgnXdUiX|iSKt5sceL_r9G&Pylwsn1 z`-w|g9rf^i7qT*A3>eK@m>s;Z!|c+fYw+uh8lRk~Z71#R?q}j&@JacaJ%cU=pFNKF zY2W21%46#*Xitb%&K4#ya-|;TuerSUezspmQp@V(aoUY@Gub&|7kRgWt+)$+Pf&+` z_Gdp@?|j$$NOd5SH?5P~*vHyerWG4ZKw$cI2Y0k1l)~CZ4qV*0a9;0 z)%Tx#s=oETCzIAF{3Nf6Lnm~Jj|>h1*X+y$_U&B#{Kr00i#Ko9>hjITTAgqqi&n-A z5Hwuc4pA;yZteE$5QqIMiM9D`_q4h~`y6>;JyI*?>{`m;Lkxk)zKoe?;qx-CX$8pcwZYgsU;UJC?dRa$Ix@Xl8|??LKEEG+-F0&KVb{U% z3Rvzsbf>%*JWF+W1^Vu9Zr1Ajd_DE$uhn=gaekMXp6eKytY>N-ggdNJ{I0z z;eDL;m*w5vSx=v+`oT%zJ5KTkcZn-Q;Y~J9$SQ z4JL^rmc;+s%3A7=yzfLc`?O5cRiIAJao}AUB2KMqsbk2X?6Sb0`AKu>r!2Idrf(Vf zqdjAvis@!hGJ19Z`(!<I)JDhlNICg5h>fJHmYq%xn?FkGV^G5j+_LC zW5~n-^z6d?ty-MFRu`ZAqk8e_?}E41hvt!e$4?!v;fdLhRJrSYe-B&C2~5Hd{**o1 zcHskD>XzZKGn#DZ{N*KZ?&JjVY`?g>4j6Y6yJDL%ZJ}2-*VkxcZr0lJPRf~yq2PFF zVIlo}+q+xH9W%I-em%>q4Ic5!kGNu+fC6m9PUwCQXi3!@r@Dm?B4kh{$zT86g zc~n)~Y12NUHld^G+onfM&jlj#XXKq9$Sds?=%Y++6XDep_IH?Xv(2rB+C2KZB2)f! zvWfZk1K8#legr@2u^a~mj^z>C@Rr-U5?zXui*)ucL?;;6az+NjGr*j(!Hd=->5!dW z$tOEN0-J+F?qJ}glo9NYsi_%sguBL!fJb=AI*UCmy|wG)5$mo^M8VJ&8%9qk&odT- zXMcRH-{fm833ZY9Y4WJme=m2z6fVQY8Q*x&L9b>r%F~8#G9hxr%yFR!X+}nc`Py?9 z8pRecKf@bcT(>^C=31SUM~oBu)pWY_t=r;MukZoshpy^XGsXp$#v;kj?>cekP9NzP zMrLPHf!~N`ADiXUc}zn3B!Sa7JG3F?Uk!jxY{o@O@xd_B-Pkd$e8S7WMFJM%3=Y`` zD^B^2w{#xGUbTPOzTf-reW-5Ux`v&xUe7-9o%+nL{YL%4Cw{#?^@;zbzV-Dl*Zk#+ z2-%IAogA-|v&U)%o;fx%Ra4jwvs^Rq$mAHZ1pg6q{VZ(+7iKxpNw>S}!b0!_3?5Sv zTLmP!i{0YNVy!GY!AKe1;LOpXFjVWCrF;TSxf))10afyPlO2tg;vwZu&?ZNlebVsb z_nt>U0BI2y9Jcz_@FpE&3#H!K@3({eL1#qv*cTkmjeTXGw3C%bCPt%cH&)hbb-|;1 zw~)mPb?o@jI)3~_UA=OH`ZQ3}(^G+M0$WjDzIyd0^?9}~Uc6QR^f$j;*O#isrz(Bu z+U?4h5o|=&_4euGP@{m3Nss+&OA!<9_p8gzfU>%$#H{GrbD8xH#C?!EQk;#xW{`}VR+ z+!_Am@1aB7;rK1gUDuxTEp1|bI$BT{TG&3kc1RqCW;Dc~7@>s|6Com3I38<=i^)DPxJyf*MeczzU$%0fN*GNslwFYt|ro_+&v zzsN6Q1vUvUMI=a6%V(K7BdZm!rQ6bL<>?3&=X)wGF8*ANkwid`pRT+?o9QXggv_)k zy8_xUfzvS3n7n`^%Ksn_o{4wF*-~8@4W949#k1*}JA97Q(8DkB@#D{QqKENw7G~mF zG5Fqa!QUbJhUOSFhHF>%375QOei0RmNMC+B*g6lK<@dRN?_3hN8}{xb4)BqD6xHU5 zV!+>W1%M!ud%}vBB$Ufe1=kh0jkSW;?fJ91W^%5EXb8;TTBw^xvTY;VkeC;BAI>8z^lGui-QN z)$I%s<6UEdaMt><4)lyww^XJ^a(4?a{=M^EN#H9J4| z-LlMy37gBczPd>M8@0Z8wQgR&Mz!0mfA|kSQQx_|RC6PCY-&6rm%yK$avI-bc3j~< zJcgRWUOIVfy6!oDvVQzW-&GGjcwf!CebXv=><(h&E!G#m@%|K6umfKsaBPxBIkX*uPVN#hLU?&FGYM_X+OcGrc#kor%=H-qM?=zvoi40y z99Y~&j<}G=UDYDFBB${&EgN&5PimvlFgBc}VQY9dd6QK*U4uhe)r@p&#jxJT(g4dO0)2?NI6TlkjeDZlw$k4y~`Q#2A;} z?(`te2nL_SDAvQ2OF;Mm7V%++;6dYso3+6Xigd8pNacNObRx8|{s^nvf2nKiNbsyi zJ4MuU%d0f@fx%-uYea8W`|mqn7cSfbkKR|m z{;R)Qv&W8wPuwxU&H-ikl%3{w*lOT(a#5JoyH?#aj%n;F1Jam+Uu|>?dkfcK_Ixml z48Ww@%9O!!O5v^*c5cEZz>G|T&pkuU&iqk%oA_C|Z=+8xA#6BDT_ZnmNedCH9`tTF zX`&o#eBPi}=qHG}NqCe)*3*^B%}>D;J;hsmPW&$cjI(-Z+$>-{Wn}zk%e}7<7%?=&*li%1PxE#@22Z}I@^5j z(>}=8`eS+R)a6;(iQpaBALc9hBZu17ZrU&0o#n0lfo<<3fJ~e;j0obgo+&Sz&E*{@ zkE36-$M4t?rtP2(*pZ+2Qg;Ih{@U;R$PDkoW_`4wXWGh{MhH8lt&#YtcZhFi^)~6- z32CR9(b0H;d1!hHJK|3L_y6NRqTUl0dD|w|xfQT7wb71ZWs=h7%2s{kXFgnK?|oJ6 zZ(awM*2xiF;~ z2L-Q5PCFoWYi4?;zWJ>`tS@}yiJBhXtCf}A)TI0FKUWLOOSQbXk&c~RbcS0xx&^$G zPq#OACtFe||6}r+!grkuUfkWRBOc00=8KCr>khj3h3|f+e(l$P zv#wlzp^neZ)IImyS0~S$%pkg}>C%}4VZ^<96HU?eh1?!>YI3Ru+~QN)g>q^742_M~$j=|5RN#eX1UP=zNWjO<~h-2ER_Ec4CS;_r;ek*4p|aux!=Z)*d#Ib&trDKdaNR zC6q{wzhp4%2K;3%WnHTy7NPNJs@j2Pb9v+^GBydBN2_gm+0K{%Pxs$<0Xt_fM?9>r zZ`LOC+1OmCt?^*}^}q2qYG&4B*tYUrai~nXGeA9q7TJ9vNz>kwR=|_p-|(^?*tWZT z>BaiYr#_uQpjqsQ%JP7mxw&!dt*W=a_Oz*@ z)|aqTx7rF>X_!WYuBFGSeXThZaBKlWOpbf8N12_vm`T_0q3BG)Us_kMJG@!VxDpg+A78d9zQ) zALAr(^CG3B;ipz^C+5QeooqmY;hl5VNh56`(GG*C)6HA`bA&=?aFFj?Tk9kb*TLhP z8}8P#RS!J)K#jo9yU5`T2qQCWV+x-;B-u7pjAZZ`{x_}225@HG-PN8$W_cdE^-9~z zu;y>+>`$;goPjsuXOh}xYrpn7#z7;u#<%V$D;$7|KBRv2gIczE-N_?^s7}PhrQee@ z<$e2dg-4_)$n z%PVzze!iA(FJuD1OP4O!>f&;CzjT*^9dxQYLW;jTw&%gWJ7z^T!Y;}q37`I{@S8Yv zf>WbxR`7$v^y%S@Cx2$5l?F7N|-@E<%x@VnooDCr3k52Q^V!X(gf;>=Il4F+m;&ntNnEOC>LIBqsCp^|4N zxakLLHJT20-&>k#s|k?t;SKw39n95Elb#M33t#vslSd5MU=k1NX3J-Po#bshkf@6- zqkvi8iDA2%XL-u9^U~n*kwil$gAcXA^o)~W7i7l`zS)M@puU|uf1#GH->xrz_V?-^ z{oTJ)-~YyQb!TIdR_1=4nwhQ>(?>ITXdihr037ZRVtaQO+@y`mH3A+++>yg9plyHr zh;{A`^{mN4?Ow{S90rHl^80&RwSA}cFI$(>9spMHItcGs=zbg|Z4YTK4Je-rUtza6 z>$G7luU>!zGTPv#^b+q%H9^ksy?puS=PX`}(?(pvwyT@-_)@92We%2QXTdAeep*Xq&d1pSI z90>-A=b|)uURv36F)~cO!*<}Q?AV35~KpR)2ggJdD z1qc-QNvl^_qFqNy5nRo+%IT%y*A$OsHVyq0Qou`kyDC(b4T4EzNW9EKREQiI{R%a(^ z7YyHg^A<0|E0|;|o{gV0Lhq*5h4VcEL;vQ}R;5ZGT;TBmzb=3@ z^)EawACSJ}JmMfrj19uXpu=z2cqC)ET;vlU@6Gk)71n(7-e+$a^8IdjeOnX*we$?r zc&3Y(JaoJ#s-3bv3VCxa#Xenpp|xpPvF?78r!=_S4r~bCfKRaCZ9iK)p zOw(xCttYuWb})Ww&sVR)~`NIQ1s6vl(f-VTLv!lKlP zZ-c~D<|$Ap&^)!u15>#$0SL|SMK=AaLb9MaG zsdR8zN80(}YZxBOwXwCr^GfY)+^T1veiGQ1>wo&kU#=zKJ~c&raABPhCz4tmj82Z! z76z1qohRm|>%#d{^_Dlj2DqFZzFZ3{t95;Tt-kn&->*;n?$_&cUwXQ(x&b~=5>iFDjJ$Uau^<(dTZ++ms@2$Dn+0fRN;BHa5x4QwoTm7)1=XRv7QnHa`7>%Zl z4He6FEiSo=oOs}OgNBDVmNu@s-0}Db!+~!bLDam+)l>*@YePx`%eid}L&L%QLo_7t zZ`=4OSDd{DJjxI$0Sz)}3%wLy%IwG%`9cE_f6uZp-SEMF%U}FkN2O*n;FSx?8Pf*8 zF=92YJ#&p0CYf$za9w!f5WHdySlbi z8`Sspb&PVpTfc*c(1$oY6AVXLmAL?6e!bzKGmH$c_R+XdS7oBwF8;`N%b!r@2VZI+ zo5#$|6udoFN8vl`l)PgHvggar&CaCLa?IJrvvajgdcX0Tzg{O#o~tuwPhd!o*E`?! zj{5E2`E8IgQd48onfM{joqQu*13mDnS1m)>QF=;qXwdNHFY-ZsYYg!Q{K9I3RGJ$` zUdnMKe9MF!@FR{~u__V|e{7u#<5z;fSCUx(4z5 z=SKv12@P-rwQ)QEV;Au5oqC!ZS8pQdY-qmHub=&@3vRkA69@b!aGQ_Ne{1RX*Wt99 zJNS~fxF8(A9~w4AO4@e=9EuzF%J+X!7v%jA*QeXvyX;#Bx_(}IcKAF<2ETmQdG|vf zR@Z;)oMHP<{au$}eSBT_(G_&L=ZzrZ-Edxq6Zkk>)!@^yNoxW~V9(wAHC z&t>%39a4M%o<&x*V_;|;S>Fa})Vm^589mfZ2H?u%UA`;J(zzYk-lb~}$QJdyopYIW zEg$2pEReUNpM+6<4F9(H?cU^zkHYGMwuZKY@+|*8OE>D#*Sx0Q`hlMz0d-hw*fzNQY5ZpLy*seg;|Ux*DBDh+I9Xr*)Ni0$ zfCDHuX)B&Pdki=SYYm;_gjZ><-7+|Kq$WokOwgv?14qMw^B`OKC(rSTllsZD%Ra#f zcM#&h@8C6L&4qB)#?#Ib7p@x2PCUZ{b#!X5-uCFJdgCJ(>Yg)aYJ6&<#wN#W82f#H zV6?8hbhW-}o{ky+jZ+-M^z4np&>cRW(t^dn^_#f0eKJ+h<6SUdAM5bG{ zxpW(Szl3ePSu2aTYvJbg`tldQR4-k8u3r7fWA$pc`9FR(cB@c2(BVWr`PAdSQZ3ar z+DPh-k5~!p6r>MzF3r7~^D6S0%S?Zk>c@JHbwG=nsB_JLqN_CgLtjZuM{H zyw;+8;UH~v4+zf08vpROnn^b&6KIoICru-}=WRE>k}I9uAq8yyO|gyfFbjl{t|YCWegC*0a84gOm8%?;u`;Cg?c0_wq5aR39JmEgo@^W8B@Q zUwX~B0_+%d1v_JXi^e1VWAL9V!{7U!_teLK>0ikis177J@qB&5qZ@z$%$uJbxnT(Y z{b>`Rc4I$VEGL9qz4$_X`jelaT-#Y`_Tar|YI+=;x-BjAmht34=@~HuEZS1i%LxEZ z92GtX&C=1BXZfy$8HJxS*hd?4XK$<7=EPT~#jY84H;A@cSloqg;FH+0;n9|Nvwz!4 zU-^`bpbc&72t@6jq1qyjb<+s|uF(DMPkyqN*0%t<8`R@2{|6;pu5>#(Gg%X~U4QgP z-dks==i*~$a|K+un%Bwn_D8jjZ3l3KQ)j2KSCh@Khy#>9^WRn-@{Rf$xWRkB$&&mp zmB%M0fC=U|4*JRgJ_kCj&Eh+g=aJRIpoljOF`&~Xl)kZ3l?i$8bO`ZV`+#&pe;jvX zhVQz#Y3uOX+i3~rk3nJ@!KMr-5tV!2}S)|H#}&;RbH>g3Fknw=Sm zoDEG)3k2C;=0LnDJI#eEH5Lt=psF?mjoZ_1!f6rfYX~Ocv!BmUfByB(lk5;qB}iNqu_pg&TG9 z^szdAdZ9758r0psQE#c!f zK|r}rIu7Kcx3)>oU4eFzCyDY+z3PCO)SEy)PfSh%zqo0AAIf64+;$R#D_PwdURuvg z+wbQ&`6FrT+c#i3(~~20`s8dKn>kwN?|-=7{R_WX1JiRb#CB{2<(21n7ck}Ae{oBh z#C2<5LPu>pu`Er&;rcy8XXK?>}Ph!hF zbvb8b-*kGHnKb5(0Jm?uxMd>~mL^A$|LADjbL+nvw0juI9HVv}8)tNStBRe+mp2@8gOh5V1q|*P+gMK2RZ#zf} zWvBk>^W@nH2EMn;_Ox={`0x@mlowTu>9-+2xDlA8t!X&9fV=4t*0_O*dsZhC*Bw%& z&F1DV_VjjSgzW?6{MPnHcuc-)Hbm9N>UwmXE4pXqX0u4AO@L_m^|r6Ywm-yg?kqEg zU8g;zj%>QOO_;FW#+GuDY4~29#0U9=a;X<02e{h6H+aJ@NA-|rWQC-X5XvXu7ni|L zaN)bZ@F4Idyy?YBb>B^%wD3tEJG618L#tUxnmsZ3M7|~OFH^0{J zMpVk9eU|60jIl4FTP8RX&M@xu;7-k?Lb#SDS%6;)DSTeaa!N=1#&70@T>?HxHF4BO z>Pf=`Q0sq8{SzPZZiBjUbrs;1F9hME!PWVbK9)zSB8}>+pZ&esqmE5s$KHGL zRGm0JU31VDDw8(6O61r|lp+0Bz!$oii3tO<6HqK#`oNWo);nk&oB^Z!Y5%dS{7)S} zUiVx$SC2gOP@O$@re^YlTO8L?CQ{PzcxI6g!yUJE z7M>rUhzyW7hM>3nW522TMw&NX)9&OswD(;{sYJi`1G;zq#!J1B*LXAD!M&x?E<>6~ zt20(Q@y0FR{hAPdGCXgPk-N0z#joa*hy25LEu3%GJE4)W4X5qx#G)N^ibtls_9s3< zV`-i+`?ZOjG9}?w1~fX6N=vMBN$p?2y)t|U^bfc`Tz3cV=A^-4*YNp+t^ui(?8|P| zNHSc?JnOcFY1nG%hh~9ES|va$FLlfkkQ@7gnX;%>P~uAcx<=bIq*qlNOWKD&=oVqOC!gKxqt zhx*iLk&1Uf<(}C{ui;a;+FeF$OxV(o0O%4Cejin!?YsB{uY7Ca92ix3@NN#IZ&p?g zp~^XZ31_~P#TEC#NV~VZEu*>NlWg^qhMk`Y5=Kgi$0&cl1B3>4=-$DX8H&k|DEJ$r zS6BiaS_gXQ>8*1uO`lp`i5Z+3NIwH{*K+X1_dFYhw8W#|^`S5ge}=cLKKD)@qk{JF zy%N^6y7ZC^T|#M`Fi2v zOZDQ5S2Fl-!{_sCUZFK=VY0eruyNXMLQDtrUv zLRnRa;|P9dNXmgK`dy8r2rBD^1BNPV>VOygZWSEi$NHyHtMPz&n=(sB`QIZdtY*ev zMX)(DeD2tZbY8A3wZZ9}Q^j}hZQ>0)xZgTZVjOh!QT_}L!(-$EuWs$G$Cz`5IYB6! z=c2pqw+gCto+qtL)FCb8lH{-{p|Rjrb#_K6Y~3pJ_|ap@%Z|MrS6*+dU@T#LZ?9s2 zE!NeSo~ilyrTW6>zgAy+dO6fTHaihRQT~lBIJgT8ez9?|TMylLz8-({zPfPlnVOwC znwh_z`}EAS7wglX{d)cOr=F_k=T~ck`Yw05<^Smu!}ZYlGxg^_^5J^ld)}SZ{7!h- zpi#9;1J86_?LUT5W+P-2oS3yoE)1Fubjz^{D0CZUx91nZH%8H#9hcVC)+x2UL5|2n z>+6wW2bL)WI2H#Qigw(IEz7odMB`7n=j?3KfNzF}+)7HO#y^!_f0NO$1SjA>a$bF) zKX|epJ3y$!itJH-Tetb%>YR1MMz6a7bSr>>wGFl!x24@0KX!NnKV>j^Kk>`29=YEyxcD8DEw+Y(y+dG@!9(3cfVIBj-IJEzTvg?&_nmv7BcOh|IhzRJ@m-K zH9j(y4r*7=I#~t`b-GpfFo1*;@LfS=L&7@GEi0vz1(5@Ww0wMU)w{S9-#Nx7_C`y@ zolL06H3pqs^_pEx(6g?ZLOLa!e3(w^4tKF9q)$4tBwM4K-xRd%uffndRhulzkwbkN z{Y5Cb*?JM4FfL*Ai~T}AO^+d~NW)HH&kEk5jF}L{_dDimc<$sBJ40M~hPi_j4xk8w z6Lff^%%ha|NaUZg)ibpxr>5${`QduUkGw~qYkz%}w3MgtjJilzl8KOsKkljDdbam` z(pg4tT;Vg`{PXUY9z(tJ>EI%6f8BNB(vpeC_MH3^91V!-e3HCwM71OcaUEnzQbi~3H`e3Wnu2#m4EGB*z_YkTx%HOaX8Asclo{# zf0r6@!V~v{<-Z#r$8$QS59roi2k~`@@Pn=|oAo4cc6s#q=zNR-Zm)6haPSWy&8^wD zcJ4W_Yx7F@0dYsGBZ|=`mS>|k>{}QY~HC8CuizsKKhHbzv)p!$N^;S{vGtWvZ#}L zecfq!>j7SbkGz{5@BMDLRyWN{WN}kwiU{PSdMp#GJ6_SAa)ps3m3Hb02iyIo47TIJ zPHhM1hmqsAR=4Zfi`VPkdmpQJe(0x3#=0~T9o67zZ8e?0X0MZaqBVHHGh6)*jRWaU zEiWw9(@#HHD=XVIHhd?Y$@gElCxc-Qx+?Qbe`o6sHqjjR&uIK(OY=u_j6&NX+N9In z{Jl3Q^3=bI>BJ>;=NE5WsLqQdA5qu40)(_(Rc*)D(6E!N_UfTCxyUj4*mij*p0l~K+5Wh zo#3=L07^b|a-n?ggu{ukiR>8R&RyRx9U6JdZMmfyIzkz{%B3P zLdeMt>i^M+y1j%gytrJWT&1VK6&#yD{Y55b0UthM=3fWb@9a)kjD>Gho+DP9^tc=db*DO zmZya??O*DqBGVPP4$Oqt$=k_j4wz9grKFK<>fw9@~-ue1LI4 z-gcvfHxK)&+!;ifVLMAbWB-s7>0F_og)StX$xp~0`?}PTJJv^Jn7T~eyS3p$ovoyw z!C|t<;1{%%?jCbEirh+l-E$yRs@QL#{8#2l8~e!mBlgb` zUkp#rOrWbqYHaRUz4^l*t%2jm$dkN*JIF?M4()9~E{|k!09m3~?shpLKsyqw@*PK; zb!bT3fp-`=eQn#cFSP;Iz{T2T&EH(8%P-xkr=EVXzWwcI>*=R_Uax1L+o>Bjx9ZCE zmAZj_a;e2*}6sE=z8nFJZoAb z9><9-LK4tUJ=FNyx-g9Glrlpriz{DlZLGI)pgYE~W!xzsvYYbRAEm8jVD-q6J3H7) zTUo8$>Y#Mp313*Ekd(yEh5mIXXn?;9^! zb`fTrCO6td2=S<0K-q-P_>@_Fq`j#=l6Ik|f8L32c1pLtos=vCL-(AEv@y^!fNVc2 z&OU5yR?BTz$5|2!xU%aCZ|PG44*TA;ZKSn*+P-_lfiOBatvn6g!H@At3K*b=edxR= zVScn_#FM;czB!VHKP@2P%z`1pYkRarY@fEeW*P0%O&d@WMvMoBfTuI~$b42}8Co zse9B*Cn%ku&2rC$Gj$fb?*98P)ZE-`O=5S5uRYrr&>U#69d|S7N!wg_Et{C83i1rD zfP)gIoZ_kiLRy96LilK+x7$}u;SueutJj*LkDZlDsWr#&@wwX`xtGv)2b8A9UKX3>>;1V8{X?~a1#PQk7 zY;oQN%{()a({BXGL>lE8@X3enoZxuMx;wnVyYK&nU#vUm(g9@5o{Iy)gY7Moh)zv8 z+r3?8=tY1ZaDAD)dcef%;qZpRS>}VVTxNI>M!C^F$42#gdu`5_K7C;7ksw~WyXhs@ z*g{}GFQ(J#WgjnKSgs_Y{OK~{p5TF{huL_(`JsK+B~Jd~1JfY5I!XRV;SxceqsqQ>HWKMPyu;5NJGyS zq=l1z`}mEQ4Ki?L@Zb97pZXPq=lL`M;%^aEb|yw5LesDt+oBz+&UBRhoS}{3uY)I_iZK z?-9@>9HU4-8@MPD(@{XhV8S?!0T}_OGQt2d4By&7Cg~=;lLBBP)9L(FFs%&a4`dBC ze+1Ej04G<<i9x4iog5nd0%Mm)7$jUMRDmomP*I>XLVIt^fu9CL=ui5s(*PLYiO4zgG#q~p zNJh|MIGTo)BA_~RkcXa88~?TEWD|J(Y3~R3z==x%4uIlc-t3+a-;C zZ{td1YuFXs;tLoxjBThn6WDXURKD`G1I(7m^UO0tFL;dl#*o-b2dpbfygVl}ih=N+ zJLI$5os7~j^XyFP*%tLC9fByFBLkypbeX~rOhP{?5uIH)lNj~x!aX_Ub%y3c7%HN|K9E< z>8ydby;`|_qgEDg)co!3`XBzsFV{N7oB(egp`h`pQx1%dL0g`uG1ShVK2c}S9It!M zo=m6bDoXVB;%a^CiA(iA|Jv`>H=f(T7$2r_ceL&~e^1?e@_0Rl@%6JGd|$oq{qIkw z*Nq#LXJxGx78YxMaUr;GD{pKZg^{^CI0{MNk<>%{b8Lis3?4P;cENv> zcgl`t7-`T3*ThQ)FZc+Jp{erFPF<0dEt5>!aD4CUD~WaW>8_kq2DxonlSEB+DK~5= z+PR$xR`MIq4oDBH3y3Rk7+1KO(bm#*hZ%Lz%kl;O42}z5@>S-vj?9!F-iRE=#UPFW z()67T#MUsfqpy=SUAiRM9IsL8+{KwR&4iGy;nLTGPVe))nKC&X|h1N5SK?u6TTEi@f!bpAYWx)M@#*sdmMh@OT%=R;Sa(Z zAJ@~z4COOBqtmH?ujSD)(Hz7&k%=Pf!=ILmZ^l*D*qJ^y2#>?-HgLw#A2xOz@bN6S z3un*QrI&8j#!l6l^T%syWV3$i=YBpg?XBL9oQDj+C(n?o`Yue`a4F#NCvW5U$3wg1 zbw4Fs^L?d5Ec{UaA9Q^xuXHbuZF&c;*S~QV!sjlX10DX#bUT_H#Ov+{;a}-m_!j3+ za0j3Jheqc^ADa6Cxca;NByTmm_Z=wh`=K;mR%V_K_^HpYOYeu4-#q&Gefa)a%64G` zhYwxY7#_t2qaVki*VEdWk8G8?p9~* zgeC|GtAGCO;IU+cMSr!%7dk{6LU8Pa1z)WS=|5WQ$n6Ioc(nf8zx#IzaYNczEXYv{ zqE0jk;Zc7bq3+KeovXk2%l|rU!bS4fP5XkMz?3byfg<_m=MbDEWVT7L>txW0pU6-C z^NmYMV`r6Rl13(x_6hnfyOe=fDWkLF&<-k{GAMQL1 zk&hy4P1Hd<;{!zU^%2R{l~EZy*gCaZhGeb zXq`AVhpn@Vy|Rve+OOp;&$k_@*_oMSwY`a*N(Q^6D@VX4Ew3IK>0|+*M*I#}xV{5{ zDk8`sVb!KM2e&qF!)mvZtXpO^P<5S4u>6sdz zoT*FKZqzkw_SM@P^?(1r{l9AV#8kch@ki=oKljo4-~EUG&&c)~0G~Vaua^O zvW(5XQP*F(TvxALN2iU{>t6HPI(70)O^l6amCM575#^UYzy9!G2CMQXs?>jCf;2KQrrBF03ZATa@U4>r(2;Eg zGvC!&+}-IftBUPlHctHQgd4BX@qQaz?6h&}cy!iOO^G+YWs;0T@uP$pZ`&O&F4yYXDrvXD*`ssE z8q;~a7&*D^s)9lOvEwrR1M(dInatCD%aL{9oFa5wXgw2ed<+%3IM#Cm$e&$EZBi%@ zJf1my3L5U$1~#IrZtc8tSBM||u^+2n`W4q`6m5XpflVGU zF56^C;HUl$0_VkNpQ=y(?r+sJw$#zN={kFSmUaMbFn1hCy@4OWZA)E#w5_0R${qT9 z4Z9lDwjT-G?im>#=esrzd20XA=G6JkK_OvsR|_ov)C*TjrjG!g)7i$8|6L>^?%b6l z6Aql*JJd}sK#n*Vs_1oq#!mG;c+_10YqAzp1|Lq{rXJm33G$ zI}w}uXt{?*T=lQ+Ym=_T=Sa5&ul8al&vMrmOq&M$WOqc{NAQL9Mcf%*wo~9sQ~L*` zb>5{JaHbPk(VcLn;SC(~Z2L)$3j%P)X$iJ>xR2kSBh9Xr`v;w0|D?O=kq-GZnVnn@ zxx3+o^KOoRjBADYGq^u4+$+--uI|wX>o9n>Ewgs@S`ETnzwwX$L2a#WA}7Y1&Xz9F zSH3<-kMPO?b&-Q5Jzn!wdn>OU%}Au-LJqv^q(}?Kw=RC-;u>DUv7n&684k)Hgvq@N zriIjpZ=Ui+yUg25So-J4p*ZV0;rX|1qn?mA)Z<1_r-&aH8@S`51C8S0`nAjT);GN& z{Z(!!pSsLncKwSCpiWcAJz7(qfKOGqH_!}p9_4IE8 zcy0Haezl&`R2;f$(*7~gCLXn?)KlK&0d<}7!IjvN=i&pNF;DyNTt#m?+f}gY%5CJs z>BnDNM_>P@+IK((TDl`h>|o1uXA_o?b}3$9kym5ia<{F0*xx}x7usyDx$2*Cp+}b& z7VGAv;NF(1GMR zF*y<2NE=IdboLiGF*f`J{pBM$RXhr(dFO~3(zO3$4El_Zdpwo)SL-(yI&qer3BZZl zp#we`5Ni1byn-WXqFs`HBj8Iv4MBo$BJ{GM=^*`DUsR9()_<0STX^fG$O>J=g?W*q z`YC@rBUyC6zu@&?+( zZ~9r-8(-PSqAlut4|&4{f6BB>A^|6^;6IApIWsv{x366Xe&`Gy+&yJ$eT()0@&sEV za)q*6W|0gil$ZKM{;fS=IMZ%@(_ZqHlVZ~s2R?L8zD%FwmW%b|$z5=?p7I&T@=C|X zgROj)yY<^auh5Za?10omaMkoet4q3`SEB2QpGm&RAmNt>R3c6Lxx_Cg&#c9XyM*i)}5TlBh z^jYYsuilm&KFC63c^Lmh4R0rMOSk(5`AEw6&7~fYkY6+I#jE_4(4-AYeM(Z{L%*38 zS3mf#v+nkgt5>B@a>?C($)Wn-hkp(pLJpJX4l?YH^(%6jc=7~rm`;P$EbhYHoS7Yd z@15`Dc3>|a3U9qRNT-W$A>;aRzI(}sU4dQ}?ojyn_3`fB2^QL>9NxY9Z+&?3GC3Yi$MDKFC7nC=sPiIUtnY-_+S;w<<)zpt>buzMq$%HN z+awPa5H4ACc?e%7?grN8Z(fJ5`0_EIb=^a26Ev4{cK$vZt(Bd-&v)s4W$@qrOCQP( z0CfZgQWOuij1&k76F^ZIDoCDX8dsL4^DfATp*kj($%H>fWGV^FjKR=4;uV&pZ`wAP zp}B)B5h7&NU(U#ifolN$cou{hKykv+N)=+Jrh@lQx*A?tNg159fkxx%8dC;w zdA~ceNEm(eCm*=vg|1RIPXn}j=Tcb`hp-3Vyc?c7ia80(UkPT#_MLCJk*~PKFJ3%1 zQ7jJZloM8zDV2I;)HD8%*2LV|nmhj}4gJ|#SVGzH&Bn&F-+!{c|KxLZV}7Bo-@2V` zxjc*8LEy15w-pl)HelSseI<>cjn3c{ml}>^BV#e%gv){LO)frR^nq{6H$d2&mkTpH z@GsA6qan!5`gb#r|0VYiHUl{WB1j+{IQ>@AOF!0)ai34q2o#he%GMq0PuJQ?ck^#2+)SV zEz0j|sLfp)er_qZQuFgS>xCCC)^pE3SC=kb$?BH*g?VVRSxbvcb(8OxF1=KjF1}RP zuH7K~VlBv5*RR*|(oz)9sEt_~AFlEm9-*e$pa5qwvI<5nww^0vw%x8*Jfg^?lbwck zK)TW=yCM+Bj*id=-iqQ2kGX=*bTodIQ_V@y| zMp3JKT+}lshj7r1Hxru2k`e38C+Qycd3I2(@r4d~0laqV z@9k|x-pP+c;O6MmM4K^9Io0J97FOBI3Psav{jlKBguF-ZK+hp*8LzN}ps9PzyU7zh z090D}*F77`^rRzVFSN~M7t%`SHF0&BFlgGqNL3qa`C>~JC(SD?9_q?4YMF&^RIaUZvCt&;9Yojx&A=Z=rpPyM-{CNOnia{;-!Nj;(L z{0VoEtrPw@@a!dQezTYA3_Az0p?+Ad@ z-K?lU&Y-n{+Pr#Pfse>RZEGc@GENzRAD>gg1|Z?MphNj!BTl)fO?YRI zvQcUF_6BNhX14zIzxuDzl(Jl8O#5PEW35g2iv0+Vh0QXi5G}71-2wy!0p5q;U+HQ* zUsEQ&#U>R;kxhIhKW!)rn;ig1Lw%y1q3lgKV2(WGc@Lf)93Mf}?9{iPxmd?eo~a*y z|4)$LsJ$d+PFjsJ0a!9eFpGu*HbFy|I;!ow0OejbaC3capfB^RX17r|@uhi4@$jnY+aA?zwbI zUcKd=CY~`3BKt9iuob`=_|YzO_3#FEjS=gudrHsJd58u?Q)V6ubP!NP5Lq{;7Q!$wb9WXn%L*+FM#Q%XoU2M|?(1f{^( zll=LpjU~;EqitZvo&&3*NL?eVB{^^;9qf2^S14nY$}M7u%7zlT*{!=uV2BtZD3pX>4Ryy6tT_>6f}Efl_a@8H5RY zZgi5m;(5q}b}ocj3PJn}6%ytYfE6)hfI*zpzOAa3^$@Cg}_g^2h`8y)g^< zM!s}5Wg2hD0P=qB>8I-te)o5&htt{h?DUD*8uZ96Y%Pl<4ekgt=_r3)xi4E9xjCPee&K0w#;Tj=3&F0ye zv3mbI-%;nOrwCxpUVTtj%I5Ez!8{> zjFkT}f;6%HbYzJ37j?&Wk`tut%(tzt9q2^40r=JO9vPtiYjfjg-4^E9E82NtR{J8J z7D#{cAVo$lQ_^C!9<-rgBc*Tdak!r9Uk4#=6d zp)}Z@pRes(^YuHw`dc*w> =oA8Wv7@j~zTPVY;XDwf&kvJqtop3hhh}*pr6wlj&jCx3iGzJR8+yr1#6wD10U_ zCbT24;^5m%93rNC?T&WNt`@}2&HdWk@TkS%x^Z;{**=h65gpidVT!z-Np6ujGlo(_c1pu+0bMt`k7_srMV}Z6^aK2xLc>a~JCIpZJj)IDR7VDRquG zXuG{k90!+M-)!qsw|wdzm{NQ~n>Hy_M(F(Ss86>@hjIby>e5=hbm@9M_4KuR@zRaD zc4M(tS1If8XiXnIg{+&3%{4qZTx;8VwSqq1+tU`b|Ih8rx9Z_X@5cr@ntlrxsHl4u zmln{SlXVRJD&HuZ<&B-~^~hTn*NKMy%yfA)eI?*XyHUKW>y2aoMB+N4OGR_%m$u*n zM|<)JxJN5Q#vjde~P*CrmgJAiG89bmSPGyO-lDS*R%3GLtDgR+fJj@A_Rq~)A)7bVhG z-#I{T+0-rW3b5&*uYIDx+$JKDEth>|!~Ix=GKYLDn|&*-EZl)FHVf(bXMGOdtPjE_ zeeiPpt>uI3yxNnm-1|T$M{a}aZ;ueI1QIc3l!BOPx^YP&KuAkkgKJRwKTs}uJYFn4~&o>lmpO}HTzK4xNp?dIQHv_lh~lsSyc7R6PM}_ zzVP+BzJQ)qZ&TkSqy27ocvqLmx3*6*85vY*n>oRwX!m3yAH3V-1oGQg`Pud~{vKT# zea4>&JCR-*G@SJOq8+8I!H;0-RPETtLDnzW*m@Qm%V%QNsP;0LY~wWRLE72EL_ ze!U${M6Q-bYzUl_LoDJjyy0=Lo3tHbqrRLxWjgi^4AB1b zTlk0an4d1$bounp{N~$T=X;RVh4Z4!hr*^l9XP)6dN)m8`u?+|g9{woj>v;NckhkA z!L4$_e_roG%lkn#@S3iVM<`S}H{TPfgHNZQ_E}34?8nbkeeKKCJ0+m+ZG4L`q&Ya? z?nigR`^Hu8+g{pOU7-#x!*8UjzB&Rg8eSeS4Fv^v*~h!Nq-^GG`VFAC!!YO$-Vgbw zhQi|{4rp9madY*-_#V9R-fSl)0C)tz{+;ugrH3HM)@cf02gebNDl#jOLa%k;6IpRGLI0@4$&i>xR*r?H9>kyy!R^VQ+n7CoYM*?t{smzBmi$g!;GG){8n{f=> zrD6FE(&5wWyrhRXx1t2!UVcN`8Q;r*EqvZOM>x`IY5Pikh6@np?Nfuh85Jr%PQ}$wqT#boIwk)w5F{5ZIut1CCw zYH4Ms=5H+3S3dux`s`=FP%mD*TDNa6(%8WGBpGLMPE1c@9AX&qE?e2bmcc&$+&*z? z%x%4?BLJ2y9fc0QRXN*LJx*O41J7gBp|N2MOTsxD*T$TyN!_N+29{fWicaHa(24L0 zA`Ri`$#IO+Q4|M8FAX8j_;RaI`P`1!O*`zO=$*m6nX@}J*cMinY8vC}#PQQn@TRHT z+T8$ec5GUwT`dVftPWP7PBVMl0WTG*Hv*+r>%&B_QYaXp% z_{dMy&wl7bb?=2UwXnDtgXY%F`Pw1>V{_1%v~%Rarn9xzYaPSIbJ~_LY~0T1+2>wJ zN9WVeJztluUaMP+3$;c=*M_PMl=by38e}WMo3nzIC2o-(!*O>j2Dvjn7v|^d_VPk) z)0i7eLt`W|-80j(6`pk9FB3py#5+T2z{ni(gd)efqVdzOf{|fUSHZt@v=OY|Kxj@n z%3JH5bqoSZCk<$2fjrgMI~Ar$O6SNJ!nuv8dLS}8evMHzdTcb{nZ`hGXh#m>C;Z0q z)pQcy#K|`UV%AqH6@K!I4SEU>pR~q^E8&$(b}qDZ$PYdnvcudM@G3y(8_DBZ`6CP9 zHMbMAV^2M4-ohBY$6w?lFto-`Wnzuz@F_zqmq!4MO;CevbYeW1ci^+!G}0_%CQRhd zqELoTr&=!~AH^;-A045QGi~D!;}4_u*zviV<8n(djaFgm3Q%qtCso`X!_4G`(LwP~ zeT8?_pH?s%Z&MR%oc;k9+_P23^YQ>&pMaM7l@^Ffbd{; zeJ4)Cg=w}%lll%u4Bjl9lM*byxN=)T&o&bklhDTrGb7>yoh)RV@cupxh)E{^Ir#(Q zF}o4)QJo{a@?8K(UOMyl89#Y8Dfv!Z@>VbD^p$VJyL|IEXyZrt6Fp^thpi*(Sw3g= z73G)p;DoGX*@nk9xclzTCmX=Oz2A;X$;@bY#feYrG-g)Umur1%y*4~v#fc=)V0mRN zjU9EU1C$Rxe6}8c=zP8R$36`1Zq(k|N@RNPTQ_rq zhF$m+w!6espYjh}#tXlAm$&q5=^R{L*p}W8bHdTn3U}}?G#7dCRq_bz;YPdwkfa6bV5p>MdmdH46mfa#?5fqQFU_^!_@ z!^3=vHwOp^cW?wm82ND6AG{8i-z)V3z6`7e>0y?z)JenW2#shb9~(=V^yrFAZFg;% zF+ft5jq%BW4+s9;R*^<#1{fUJ2*9N0-8!l~76$yf3C9f<@dV#SZu6>awL?VNN^MeZ zII+wXEU9o_d)o*e;uzKV$RIl=_HDbM?`+e^p_#m6eYfn&QtE7Wa-lLSUx5m_L)nly ztpndmj*J`|s%7fwZ+_zU0;4*`RX?cU1z{l&$ZC5C*|8Yi)**k(YFG@nM_% zwKI0f$4;+@Uh`^n!G1mY^b^=D+t`kKwS?_*^!TwlermSnv4cHA#xwbs(2EWl>|z^v zWXcM1QECe|iIWlZ@!pEvfaosafD3&hs5Ddx@rBRfR$j)e9-;@cMlf^;drBr|ZPrI3Ks__{pQt_Gn$1U#vy!t@Wk7`t{%WRK4`#e7)%{57+U^-a-I9H4t5$E{u9seT4!dl)X0Z9Dre<;m{s#P`-q()J&Ry`9 z{9_%`ju7f>4+!9v)%OhY`c7G-GF-$fFU0W^9FXr>Wvq=txx8{tKX~<AYrXH3A+8x>X0omeU%r3S( z)wxE-C*hYY7I!@pOq-l`W~x{8fHOS_5oiZ}!=thSoOUSGUDpGg)D=-W1U?Dem% zx4-o*wQ}<^?XeeWm`MAbx_*7WPMny@Q4g}WNAWDJF4xlSrA#n9Hak=2&z;V>sh)Rz zoUk6#HReF^$Ux0Z4%RGfps7*X8H0P7EZ}#y)wiyDroX!f*tzQfza8wu-l-oBiaW72 z6IYQlN8s&|BNH_Xugn0?!YvO;VyLIB|Y@IuEtWM0jn}Vx5 zm9yHgqyet2ciNavnpI|xVryjeB=pkm$oa*zEgZOMgKpNg*8Z|h(mDZ^QCNM*?<};i zqn$jge>ql5n2{UG4|i}AcfHLkVFtmE1LY1dD2_9wipsDL46r9*JSTRuhT)Tc@Q?p- zt-@cPRqcdtRkd|r`ciqovBxMC^?a^=@fSW;%L@y2{qjqMabO8KNZIWF0yq&Wrl*n+HLfPS3a4P=@1tx@n@NJ zLWC1}aov08qPycd93`${NQ(@7Zob4fD}%v*jR@Uy7ouH<(>v^5p6@~YL+-$bGQ@TS z7d{`p{EL5|J@-aGzZ*nc;fbtTynLlD)3*P^FMlg~V~RRHL|Ex+`&u0KIBU*3vCSuG zwfDyN4J+M;gKxTEmbxjQ*k&|f4^MN=dT$iKvoF^^{3LHmNMQlCSGuM*gb7M_pM^OC z6Qq%-$*VmNAm_s~zAHnhi@VrZna~Ub>M+~?;z7A(8*zMMsAi_zetE4PzW>1tq7?Q| zc51YrMR{)hbRd-UhLgU!O43q&;Py?OttCkJPoIO_-@1z#!L418I%k;_l*?wknV4IKwEHvU?ja zXSFf%Y6qMQBHKI$AY0R$u=SJrp?%oOia*Fs9{ee*Z0BZoEa?o*hTQtvi#k7f=1e{M zwzt;AfVD#sWTMM-XZKfw@GJ#ILo+F~rB@<(ADL+uR zmNqj;dGh!ivVFW}r>29K&CNCN?~#H|Vj2}26al3#$3i>dmPH+)myWi9#Z8+8piOBT zxyh~^`9m7%X9os(T9Tzd2|L+-cTwB=W5m7KZ!)g+)xfjh!CjKj4&lx0-eTU+*uEI~ z(7rswJ75`l@TUxxp4sWeJNhBNo}0XpeVfFX2` zliRX8CYODCO&{70GB6jB+_55kGWf}h@>gg{oI-a+7h5KnLY+G~=|lu=XegC2-oN)9{Hy3Cyw>sB(i8Md_>+IO|8?yVZGhn zx>L)@fGal^>-zjMJiJpYYrso7L!x5B~qc!vhZ*2V8>0>2=?zTG`MtSzP|g! z3-!#UTSc|5$(iXoahf&*w(|{ayhZHZZTkkm9Y5hej}mht8&x>CiSp6r>3Q~m*3h3v zFX2hwK!}8_;gXMAf0|ytTfccRf@u)PSNSM?$$7>}00 zZ^_4J(%|RH;J^LLKk+LFg7X1RqeI3A_y`Dqc4dGaD0Toi+r>s}(gs=SaQAYSEB`q! z1#VTrxs!mp+d-v9l*RyV>(&gk*B@8C4?;1>*sjs0%beSHzA_Dc!w!%+8%1eHpTjaLMD*j#KsgT;`cNG*OA)SO~ zZtc=YLl}^&RoZTT1xPd7RT{0FtwI^c@`(C|HyJsEu$(C?Z9@;rkX;2n;YuBp>*VBAaPK+R)~KvD z+VwoqO`f+=(ktcGxLcd6G`coWwhz=~X1N}zWfY8qi<>BUXM{(f0(Uct(C zJmXDcM%%>>p`aV5;oz2$Dz*&NT6TGycUR^0v$Peqtv)H(yiw-JimjSEI#I`uPeO-< zx_;$F=;OBQgEfx<_WOVE)%xsLUI?znFd)M(B;Y`*2Ko5dbRC_Zt5e6%*U6LT>cq*D z^~|$Z>+k*jU$4)9?`mzq%T6*seqyrT_?n07FaG?`)Sv&@hwIg^dOV%4D@&`9Cof&T z4F2~sQDDbTU+}QIY4dOh<87bD*KS?Ddab_st#8(MzWe=p>bYkzSe9#L9o%C0xME)+ z-5P0jqVMo5-I}6u;2|?@2WWVU!xX~_8yI}+>$SMNijhB7$LEei+u2M!@jNWB7rLh^ z$oE0I4KodO>$3i^S`0kk-Y1SmX(z88;8E^TqlqppY;?Cqso~{Wledlx9T^GDthb)y z>2{NL*cw-!SB?%(NNegqIv!eGRu?%~u5l^d11_}5&K2P!I}y<%DG%ZKz5`A@kFCih z6SPGjEW{@~T#UcEsdZ9}j9H$<4*qOBWd5|AQ|5SMr~o;{b*Rg=PNdOOrZcgPU` zPI9rqVWZP>YEWBe?9dz-0*CO5$2%<1NWVi}9HymWN5)X_C^e)Vc(D^y9cX=_abS7d z3LO%lJb_dljZAWdp6B#h%&bO*?+G4+b^6SinszJRsqx4Z%PW4Af!1kx(JjU9 zpw}`he5Apg6&(B>%`O1&5eECj#8lm$U#QF1Z`a(!T)pkBZ>+~3e^WjG+>72A=*h3DbDR)s-)HVBXCe{(;}E2cbZC!;X>` zzR^=S9I#L~@Jyw1ATaffy4fZKAiI?B*d^(VApJ}P2}rzyGHuw@7s{RbnRoNF%y@%C zj|Is&r`q>-HZjLGsaP8n5gtcAI56!1Djy*O(#j6Ny=H&QaYC(5vV&F~r@qpLPzHuR z1dzu?!4YSei?1E@XlF|d+Nq39r+ji^g|gnsf37(GXaD>k)cU%1YfHm(-yF8&Coou# zj?PWh+UjEc^hZ8g7!tL;a=XSykqfpjoTP`0bh1=CpM8U<+P}ipUE-kQAKOUHai>hl zhb#CLkARR42yh|22Gen(F2!D=fOl-GsXxSndhN< z?9|aZ|LDVHM_Gi7bi?w%A!g-aS6ioUt2fD0nJJGOY;|KBnLLRsMP84N*E`<^*qoH)Y=C0BJ3lNK2mdYGrX^*gGby)Zf@+=%GPeS$Q_W6C{H?#B}`CaE4}_{zGKJ_6`s9ooT$gF1K&gG2aG-VKD{l=7&*l@+i) zaeTv1-h-T;ZLA@h>HuBsAhd2eSdw$dp}BUO_0rDnBjm4rF+}@h*zL*T>470Tfwj%u zsvr2UP1KEWB|IyySO-iW9AYc9s}1}gIih{i2C|*RDFN(!btQf#*x3H}C@tWQ%`)ad zzd925fpfx+JmA_B2k1Y!Ul{D{9v&a1JlOkoplSzW2RdQJU9K)(yi{jToyRUe9sAZc zZgt zT%~=qxmib#ohT;{+dgp+-y;Lgojp|#TsT|z-+Q{IowP~49i)vhLEB-NcC6ZIgoGz3 z_ZYaI5C_1WUCOXuls_5Z2DfetD{ZYy+Fldz$qaUzHj8IRuh6bKGCoo7e*3%X-}}G* z`}Jn*EyG{BcpH0u4Z8u}BQ1AV7$t3Y4A2H>qCy$AiygN@RbMB~HSCF<8X4as-tF32 zU#ri4<`ebJFMp@bpFK{Q?ym{p+1*`%2G9lDOZfttQ-|b}5o}a#O$RZw#bY;*x|&Qn zQ_q@sfiGRL2p(u-xw=!A1tT!7s}Uu6+v=Hgyz;zuDC_}Vrl)5q zgB|l^06fYCzyx=+pO8Z%*nsPsw4ttDso(s)&nM6Eaq2nxER&wXSA<7SO;3&@2aeQ_ zy!Bo6!4H10zW+y0)=Mv5N?(>8-43qY@fmq`2VQg+3hTBoTKCc(u~VFC<-!FAmORHf zyH{bq*{E0djnJN_B}u*@BY~|Kl$9$h#!?h$g3UcPh3tS%d+xv%c?vV8tm-pmHM$PG zMo%5FEv-NJGDs&GvTz~#-2A#r-OE#>i!k`0kGh4=_PO{)y`PZ1qoBT9N#cxq=n_BR zquCGMrJ<6&i+AAgH%h*9AzylXrCt4V_dR_<5~^L9bk<$!!pO(Na2$#o7!>@mSNe2q z|M_gbymxTI72K6eN*b+tZ4BDkcrRYNT3`I9|EzA^SWKUQD-(|lI4KPI1wT+*VGVRE zG#;e>!R;L{s(-{{gUNcW+!N8Q4+(GUr8ox;+gCZhCG;?Si=Xyb$FHU<%qbFO%O7C| z?Q|fgW-FM7dee3Z{^B6`<6pWGo_}-?c?g?{2@E&w$<~X6-KNY2Rkpoc>FvO^VGL_s zRlbNrpDn*T{Y@fQwF6dGck5M;Jzi5&W7+M-9b}ydWj~)gJMJSR_gr;vTZKH7A=*9L zn{5Y>vC+xML|56b(N40S4`Vkd-|QE%O>LXrK_B}-{1a=V$l!?y+JDm%;B6Rk<<+af<~{aE2n4|0>|h?O&^=7vkyI7_kG~K;F-iX7wXQ& zvZ(Bq1qJ-odOo-Pi*A1|na9xe4*DxIoY1GO>k$>| zLXU-=JAMNHDe94ZEASRDZeVBIhji@N@w$Cu9{yh@|9#T*xKk%TZUQseENk&ZLd zJ&NrOZ{H!`9VctK&}Q2mLy^;z-D7eQ?%cPqgQ=6- z*pbNGP2|O@w((Zuo3x+6fBMO7>)EzQ`?%sT#v6^Co<+JaE#0wBIDmGtXbFuYdRZ zHGg}pj$@~fV&k1WeJVR0ym0wOt!yqs1?x04l4i;fE+T+}eV-$y5AW_raZf ztd2|=cy$OI%3t;!rVj@Gj=m<8?MB=1_H|}FGJGT-!b{}a542~u0lp{;QWs<&(pPq7 zJe0a&-umjFeiiEpFq)5y5E-eS&)pq1?tmj;wZJ2f*{^N-S*$BQLhR>GHT|dieU1;!% zTi=lx`iUELdKaeIG?dVO6`$KH@DhnM8*ZeLcAW_+=ecVa_j%+ptng?98V-7iBlJ<@ zU()IL*!t}_Wb)U}PJH9~=r<*TPCv`*C13RK%FHp6qAru(5%iFRPG2W}s2Y(x|jj>Dww+Q!FZ66`bBORjN!BN@kxU{IU&+3LgKq?V{iNG>OJf zX3iW6=Sn1Hgz53-1OBb7`ZNdpytFbOW*8$jrnPu7o5}RyQf6I{YXHOJZ=v&DoJo^L z*hUxQr0@v`x?}hdr1Li|6_Ag>!mTKfz!P=FvKHa0ov`(RowgBqZnvhjEtjv#!jGErm656 z9s{*RrFYx^-}q<$1cQ62rlzNB_UJ4Ii$clO|88HeQMZ*&9S6)EkQb{82#trf8wHIK zXygE$ifj;r#dGe*F?>A3oxuFN1-Z(W`igO2V__FX-|8=MQJxyv2PoyP;4yt0ee3Hh z*;?Kicy6oa3``BA>B*@Yn;5GZw_robJCQ>LxJjd9kovK)wU!k(V(h4fDsilbPT)wR z3?|DANQ^lLDKqmHo@?6;!3S8>N&_b@1SZ0_&Rzl4!1R(}@~fR((!giw26epiH|kdP z@R1!X?wX*nb?n$tjEwQxSXr)H*RO)RR?j_4|95}q_v_01MrK_*_^DCvc3N&lYz)#U`_7Zk*028Bmud;4=phV@d(IrMSH0??`r!NDS-<%4kJRg4^?2xTd%>-# zuGjS&H!wN~Qe4!@&;N(X=5955?AVDqcj0{9 z!*%ks+ZVSj>Dh$SsV}0!S@fgRMfFM*PY`RWL$e=vsupSBPhz)P@vN7gP z0K)R!??0V$y6$`Md~IX!tgUX)2w4MH&{6pyPUR8s?21lt&>Ocl`nRb+(yO0=+y((D zGx@~^04fuZRwJb6iY(8jRql9pvuCxss&3lOXUey;xj|;ch3swOh6tCAjaGf*SJ(4n zWQgCm!q8wltVG(r>!Y)72!|sSRM7?HC$ib}A2Cj7M^6V_`J?>Oj>KR~>rYk^f zpk_52WfmVESs@MFip@zU*>;>X`t2UoKjLScMpmQoK%GJ6{Pq9i{{~xk1)-(RBm*+? zn4)aq6`;A~!9d;0d~mmNAGo{zmaEr)uXGU)FZ4yHBjY_aFLuk00O!#=GTzWw`Es@V&dg^1VyX;0@=^ zKaX*D(`(8JByadYSuepq^`JwhV@ffW_~H6 zd=CJz>r9tAs{9Xtro~^IfQ2mPv&X1-M8N02^o@iWqE1FyPzDF$>|E3iaf{R&H?Gvb z@mKyz4UEjx#^M#~gA*<{Qdb=?vP{NSZln!FdD2nV(LSuf_r89nsCw~hJdkJ6sYd=t zd;Fqb?Mwm}pvn%RrwhrCfHlv+9j|^t&FWN!e7TzG_2hS2_s(o_D>iu3UVvZZFT*2KvI)NjI)v17`zu z-#uq*1O4H)vkrimrxVm{BcyF$`JuaXN*klQ#}4I-IfKmTA8jCMK`9}n9c!(_Wpe0F zO^#L_KRH*&W(Moz?0B6!Jyj3gf2RJ|f8{UKJKyp80;qJJIstTXb*%=F%Ui4a^|?R% zT7BuW->eV(#M|pX`Op80ntb5>6!j(YS+DK&^-NrIva6lgYn~^+yxi^_K^8bRE%8GkLR?Fz$X=}*#p*7_*(ioAS2Fm1cmjL zB-I4knzmu3yOYt(f76b`o&8!}+sHt&Ht5podMzxk*1UErw!gbjUB9tVSFhj5R;5?2 za&dp@%8h#Q;$^NY^`b{oUAk5;UA|hEE?>(LRadWJ^JW##66vqk@@iX&sl98P8`A=q zY@hF8FMw-krku$+(cr?taXHg)UTy*1I--pWUr>;_xw*RM-h1nT`yZ%Nr%u)U{CwTK zK2Q5(qNZmjpn+`@>%~y5uCBx$boZ~-<;9%s|D!+l6UdgWx^nGB(sTkxrPJ}gvnOkE zs?o7++v;j1ZI0Gav%a>Pf%6S62l!8)I$rnQd#)aP_`!PcfeZDHH@&T1{p!c+#0mT_ z+*gl2_+Z_8-v!{jA0B&va1Ymm4?k27J@`;P_BhY?-CvJA_DBZKUiF&C>fwhTu2UyZ zCjJ8tJy36Z%iHR&{OmY>wA@aH zl=#Ecm3~*SdFZpgv{;||#IM%v`J46d1NYRc9(@?D0Z;1-)ZZe(k+wcGxw&kpvnb?VWf& z9mG5A`ZS`mUy*(n>%c`Xy*!ndhj>MJ-aAxuj`-q0e_`yt$8}|l^N|lddTk3JsIV_~ zY8CnYoB!=UE$8QqQO`nOWY8{c6D*L_FZrlXI18nCls@9uza&cjAsLwj6#P;b0*BxA zYXGE97~i&s4!Wt*(`Tp+auA;|k!|KHt)aT#x@e{sJ~dgK;Tj$qtkIMSYXHJ_wpkWP zfjZ@(FxrnO-&?nH>__xF;R&YR$?hOT+}cE^PzRrV;Zi+x?*(Mn1afjMa%V$3rySt5 z|H5|Nks9d+f3qz)^(XBL(sA%noe&TGfH_ChAwOC_#z^X-dOUp~`?MXQhsP@BtZiU0 zU3H)3b%I$YI^tYGY_iI&w6(Zw`;H;MoRsH^SoiuN^US3JrjEF@lg$bGlRc3{djz0kVwzyZ3Y`CGN=q%m~)1LwyKXJ4)y&jn%^sW1&H-!4!CC zC~oi&`xIJ>es?hqQgFrzO@zR*5XM1c^HS%)rlF6!4;s#V3@5CS*V4^*!sv{bRp;Q? zHo3BWka+oS{wOTtE0?^RcB;K?7UFu8j{4XI3E|J^CfoJ$x%PJ(%(d^wK2iHp)de1R zrL(Wa)zqfjb_bA8M>Cl1!m7=U^>UY;>Dif@1J5oLT3lR=J!$#0kDR3F(E^K$OX1OQ zaaZnPh$mdp#pDf%f(HWxf0V(yvc^f3@|p6*Hi-Vp3-OiyZW9&vB2^>=xebp=*zeLD z|HGH*q(6emiMm8_FZJl$TEQXe_UMX=M&(`SCsJ zlb(4hm$Uc@o1wwcaIAc@f5@;K*xnwswniI9`dN>YKm4rxbW&SvoA4%yf-m8T$Q1A3 z+d7f)G=T9^M`pJb^0o|aFE3uRgAqi_P7UyU<7fLqk*z#0W4ErXF4xM6$DUgLQDn<( zj_4(LaNusp%S+3_g|;nG6L!#vRq%!rNVeh4EQ+E$9$8|)+S1B$edl{m)iv5H?s9s} zBfG|?Vfk8#2#Oa>%OXr;GhXmT0mjU3Tm`Q%uci#FF> z2nNqOu}aj*i~hAdrCHuAnI6sW<(dE4T*}t&2eBKHUS489>Sy@9{VRii^Dq9`ui%-3 z3hi)_+0*#u9zU4NNEXZ)+5)RbWB3FCCo;1LLg|xmQ!))<=Hoqv01+g(5a^fpL5(}| z*lS_MQjm~m85@6;PG-W07-7Iq7@d`sjz8ik1T`k`@Q0gaXah8&tWUx#=)&bgJh+mV&%GmkJ;8PV z-hNwtU4q=eJ}|ZX8-Fy~%L5urBw_OSMKBs3kH$Z(Gb=e6Ft`yeB7}Jx9-1>v@2$ev z=hd8rIpl;l(Gs{TA1>N95AD|dws2yoa~)k9-{1=ztsaR>dDsCTiJ(HBMG2j%vGJ3T zVY;@qDBGQpy7=tn`X_(y@7BdjFV@o1W*xpMKei~MFIWOLJV&rn1(N}R!m2Ikp( zeQbumt2#7Lhw$^Pf7dnJ0qVJpTU#i5m?Ri65^)tuUY0U=(x0*zDm3tJdGI%V&uo=Otv&7ljN(x;KT*Dtv`O^M4dc)E;Cw3P~Z*} zs_^ZgQ;fL9b+-7~rojztZvQnkF%welZqtBD-KE^5F8?+jZh|>muK7*>j+Uth@)pL- zn1c^p$BXGY_&tHL9~nME<7lC_+ffTfCzMs$*eL1uwY=wLOf@&&I6zctvLN9+_z-`dfFE1J-p1 zC-fIddh@-dZ))9d;wVdmtEHvlZCg^=>7d-9et-uJEO|_xZbqry;^s@;!EntgrF696 zYrU|u14FOLChK_PzZmxdK>gBBLDq{SYH89=BeVCjoPn-29PLC3o=MwpHUiUNAwmYr z!R^%K1a$=32=}Os*=E#J(Ggp4E#d5V@r=ST_%qwa3M*ko$H(jJ`Lp%NW3Q=G=g!sK z(W9ZW4bg0cM;WK4XQ^k?H9a?1r%#=!qeth`!K#k8QQ*-8HaNwbahx2X&T*jLaOy)> z#(8e(;^KVWx=G^{n112|Z>tlhPu3T|{KZ;YUZnn?tQlm3@is`W=~Iu^XcHW4Ah^B1 ztKRL@^_UUs>K*8}z3mDL`L#`4F^+?No9nBMb~d2Mb7~x89@vnc8BAMWU#HOwzLd-O zhL`*IXod{05ax#Sjk4Gq{G_-TXt^cwj0QzK*h)R%cbgBQl$bOB*81lg7*4R#hP-jh! z4%EN#SN~$Y>h&KYlUwAuReNhowYlOk05+6T8X-)1QubEJ&RJH=(Y+>2R~NZn7WA+? z2;aLFzPbLH3!UHYB$VuZ_M13;rR%@xTE_h(p+DaF@$jWP{GLp?bi(k8_XF2I_i%OZ zJ^XQp?n$P9*LBYae*Im0sK+@McX_C}K2N=SzwUnU4lVj%2RQVf^bMl%(L2-j4sV8g zdszA)iT8u9B|%uix9^Sq2dmG`CvpAC#e3>N_*}=`2jruA*nvy^1_t5d5gOk#)@|`- zf}}H~wMo&{)|I4c98zjJ07sopJwvvvs04AxQbu&O-%8FX9{Ad+;dJlWwb< z#N}gKp$81egvf5f^n)*!E0eqMZ*ro+l*tWUol^?f2u~dcc5VMm*aUfYI=HI%AO7QC z4IW(eq#RSmYY$tOU7<5EHeA=P+^C=b*`KN7=kKrGm0Q>!i{OVy=nHjD2GuExy9K4Q z1@;Sm5WFc4Df%zI^2T%YBjqMU`(~R&8YKXDLncN?;UBvRpXlL1%9H*R%VZc*lHSOA zx14ssNqOWs@6SGSsUCai;hLN|5q&>6Iti@Yfzbh6p~(tnc_94L@b3ze?DoQ+_;BYc z(dv1l+9dCK&wJ`y-~2{>`^o1b7w1km5-v0kNp4$oScjswbil+naF2ZCtpI%?2FA86VoK$)TNk^_ZxL* z(=9Z&Y6Km;y^5YAzpeEZ?99dJJGYm#)161KA+5m=`d(fXmu;n$@H?mpO+b{Z>TOSC z0Fn1}oZIQe_pF2hk4~0##Z&5-b^(5_lr(=@282Z`f|u;@gAU%JUAqQum)AG2yI1O_ z=WE`WuN${-*VU`H>+;p>b?xTux_;wk-CkI(+qajpOPSBh%j;aL?HHQnwX8C7)!8~_ zUt8PCv#T=KH@2_|NPB*vZrr?CSFc|q?OXNYOPA_}in;>-N?9z2EyPZH_f; z_W8R1p$m09~SFoppFa$A12! z^_DlkwH|r&!FuemSJfjAKS(>_@p|Ba`|H7n9;gf0X7}8CPd#wogLN+NrUYkl}9f2w}|;~%e|_~{STz4zUZO}0@lUAY_>*RX?Z4>+inm221& zTeKawu(Pyj8b87O)N4CnwVkw==4Pj8kE~^j=+&j0_0`XOp}z2iPhwjP)azgOSe-p} z3YuRh%q?(d?Ut927x1716XH?TZ0GIBn0{?ngAr|+_+j6Wk1NcyN##}PrmTtV0gu|e zInG7+prv|9ogh+Z}o@vmNG9k zJ^ZKqvTvyIg4>RRd&;iAtE{YV?&M@0w#@3C)^Se#19r=p$;(~a#sLafvN>tUI-cDC z@%MNDmt6YR9_$Bq0y@wQVoYXHZI8l0WsX z`o{?bJG)Lov$laoX@pG4q<-Gh4`98ezG~A6e{3en6BJ~-fi{NK{ahdS30NPL4H9-Yww6>Cp^ZAhPNE7R+oV= zm=1Wz+q~Qr)ppM$bhH0-bkaUOCz)&nw;r3dh3#Sc!yQfK@wN4J;BL0J{WRJ-?l3z8 zy*z)NS`<8s|Fi>gIjMOoI2wk3t@CMz2Sb9$FQA%tJgflXOpt+9IbvG6Uaqv~R-c;u zC7dWQUT9@FZj|?Mc=2Hx#2_5;dETPEF~78oUAbN>b`QZqW@kT{JY-p{tLEk6BI}Pb z()Mfu65cZ8C~NZe2-Nib;J>`GQ!l)D9bB%}h5OG^KRwP(dEmqh`N4^`t=~{SC8h93 zj!q3a9BZ;)YzsdpW43{P<$yBPe555zlkz$s>uUV$bV&l*8Oy5+ITlR(c#MZOzy1EP z=Lzp|B*C?1(num)P(|9AOe1~p&f+)Zh&#|(h8sN2o&1<)?Yq!Xo_6l%s`D_~p9V7lFma;t) zN};JX|FxSp>YLwvf_T6J>e#v3U6>DU z)6PULS*=qz^Gg~{4*T97+FNt|a}a^+-e1vwW;!P2*WB~o{F-a!4*cFwh`kKSz4MjN za<@#0+!E*~5v${yGk^VCK0oLhp6=!$PDwlXGmi#)i`vJ3WjgIW;ll{zc@S6H2?KRP zs7;^C-Hz;2SLZG-dvxH7Z(SMuTfg{Yzd{4#d;~l~FO5htZJjRyVDJV=&X`bO^WHil8d6hYnTKb2 zRU$<(dq;>hCf1-bj0Ch$ecZ+n$;Zg4NdiY0KUiZ^VcVcWzrr%_C~LP{YO68{N2Rs_ ztnkUIXu}{7G&mI|R(cIn4Z&tCkPhWEoWi#ml+EbKY^V+wI=l~_z315qE0BisLx<=f zndgJo?!ABR@6F5d%T%2UV|#%!pIVkU@S$O4zBc~MQD%%_I|y_Dqn}bqx;(_U&aV+J z#Iu|&9m~_`5I*SA&}lGqPu*{$U#OEZ3YQf%-eCo4KZs!!MhE81I`ht@LX2>P?h%Gj zOy)y6rj=}3UWW>g>v5QZkr!|XawK|lDegfOU%2oLN)Lkh;-%~FU8KAku!F;L>BWaO z6I(^-Oi%A)H8wGq9RPNLWpHpDWi(J%Z_n57{MxV8r$6)AY}ezUo4grC2COk$Kvq@+ zk@`rsS|T}sZ1u-{_y+%}+-+ElqrfI;2u+TSWi_nEv5<{er+8|Lp(z*3Z+xjOm!vb? zy2d?QU8@+$BON(*0FNFaO}CYtq5(WfdB(zd4(@AQdTx&mE@7R*AU$#XXq`B5qRyQ? zn>uY7x3=zNwxJC~2l_0ZSlqx^b*tB<#g%3xkuT;)j2*YNcgrjd(cW3#)d z{0_D`Th5hBJA2Y;zdrc~pR2{?-8wckR}b8GZ@u$fZ>+!iul?nE%j+JG;pAYu=lrkO z$wnE)k7q7f4;;X7+utn=b~{yHx_qU+Lf+r}<{#CK8#gle>A9b-(zZO7_3k-Ku0(NL znpS@6PG){e*H&;?0~y8TcR)`XSuG~pz(-l3Ge+Z`k4|us7;f7sd}Cws?qFtwr-K@~ zr~xQ0RZ0$0m~J><8d!OFOzwu4-@G&&t-J7c_>3?yfD9S9FdU^7aeDuLkevt-@vVHM z(aCq;d!3SRK9j$V@fH^ut{^6}@QQ}A2904cQZtBcdX~ldlss+JQ)g&YT9?G7>ALzS z14c=oeC&KPf3GwqtOeqQ^mrzbRp1BocXfvfA$ zGaCGOs%4z9Z08Fez>cxXv&Uc9>DsS=utQ;dyzNlo1e@8pnL2akOr1M-woacuSCi96 zqvPZmS5w-t5?v+_lE3gtwMYtJ7@wX=qhNB}t^PGYTEn9^^3%BiinrlIk8r?dPv@*O zu+!k`wHsOG_Leuk9)oj+|$(jv@hJ9!;TDftqpYV zIvZb+kEX+S&$2~uWLw>7`PnpVxboe}E$)P(__vPx7kwf(O0LjSQ4_dZ@APj2pqhHz{SMlkSZJfo+4-0vF~GN=$sXZimw{pGHM>ma;&{UzhlY-gO7Bcl}#7{Nx@l91o2k z&H4DsOV>R|o_FMF*~*`!f*#0+u6`iJc(!q=eA3@?+E{CYkOOYbiHyooRSr_RE0X>R zFPO-_q||hIu5u8N{7~vy`N8{O8dFV1sdF+DK5~{YQT{$dqZBg$w{#Omd%(svuyuI# zXaEIRVC>6iIO>~qS)LP?bcjVxB1=qPIgG#0N!vQkm4R0L?>Tp&e&8acRR6m?GDeS1le|fhXb;#^XnZ$EIrT=&`KW zpg48z!H3Bn8Y6G*1ltEM4mjJv=<0f6QQl^&WI`U8ob-+ZBPyw(%Eb}%0t^XnIuO}P5*1>YOitER-^rZDdPzX zh53da8?KWx6Ln%{vSufSYIb_0&YYU53m$23W};s8$mx3P8y>5V{nY#FU;6VOt~b8n zq1s2LUc2%_-CDc_UyRkl%2M5l9X?v0`{q~b)1Ul&ef*a`Tz~8T@!x@|$=ct1srJ(` ztZg}#m3hi7SAs2rH}%=k*<%^NlxG_Ipv%F1+a=G=to`VC-|e*A-ljHCUUjc*5ZX{y z_2t%9>zs8}vbMKpcX}T3^0Mb>uGZ4ZYF6COJBWX69zA@kZp_bThpTJo=c^9(GwgQnco zRl}aeJWjh|BHOj+_^_!N{ETl~pz-JW`exm_>FxqI>(WbC>N57i<(C}XzEumrx468R zaylW=0nky}Bi2uCSGnD`xf25&h;nkz1Z@;oAz6lp9(gDe8>GR+)Oc_?J4>4kxSZH1 z9!ZKidp$b{zmSHJTczgat*>-G2pPE>d>>E4{b$$bqh!=K`>bA_};Tfx&qb$ z2i75Z94V+11WKU$la)+einOtq#OB|*ctz9AC8Bsk!lE_BGfkSLPt#0Fku^NT` z$~pCs>B;l9b$Ce{%6m}5y?-V|Uer(K$R#i6#Z5M7{>@K5Zo1R7jo;GKMW33EVGR^# z;Qslr>mmuELGteP!@~YB*T+BXn!fK{pQ5gRKDeOi;rHHU`{^E@hL{GQ3MH?-e}~6h z`B2+ytF^niQ#W6_TEG9RU#OGEpc6bH{n9Q^bi)!GVx%1-ZG_uA_?FJ}_(2Ig)20gX zyYF3mc{1w$K+(f^`6jKJy94pifO!s0yezk6NuH)_dF2v68%lmKpFY2qY|FRBY0qA| zCSOh0RAAR8@9?rsyS+vIL>{~2(QXC~(f8VG@QHSs`a%9Wj=WSBtgkQD!w)@zd^}QX z>uaQAJaza^^q=(ILI#ob7n?6U8}BQy=`q)Z2wTn&j_-|}d-jns8NV zZc^A!N(+y))6O11rVs4f=Odk!IYigq(sl?fg<~(1zL(GuD=VAm_T~E8H=e1_eD<4l z^XhsHVwa4L&0!}rJgdW%VJR-oUFbZrK$}&(NKgBkoJe;6{r5zNIoZtJU{0NIp}-h6 z1GHG(0f4=lo1CaE@TaXeHab!>==}6gj@qxWgHCYrBfO>&EPkmM_DQ*dSRLN?^SR@M zbu0a$+LF*blc>P8d}bZW0A)}?;bjzE@F36HmT$IFXh^(v#1RqXsg8F2AZI*r4$#}q zcEY`7mG_;jwTe$}`;DabC?gX|bW88a~t zSR9NW1812S0AI;l4zOD9G8w~5IN)c)+gIfA0``l{9>ZQZd4e=NCbG4S__S*0hU&`Vv|l{%Z}4- zSVul6n+M?$VOFMCPnrynR^rf%)R*8(7PXG@7g;4dyqk}M@zLqzZdxA4Fgels+q@hk z*QN(N_@BBMT!R5|)$y>-2@dK#-{ncuwv56hOe4q|7c{whtVj5cyPyd9m$UzoEuQlh*fdim_3;^sH=7Fcgt+gkR&QO=G#-two;Af>{vn!k|rTYvr_Ui;Yd? zW9!$nZp_iaZJ{yObqj= z3RJ|?Pd`^zuidVxY3zUO&-IP9nw~ug$YXW!+O3+W4d$KWsI(7Gn=QO( zd7+c`Yx;l5LodTA6?9^x4XSgO6h89ae7axXc~lps&*I>_&wMAT*epKx;YrK7mQXwg z^@LA=_$Oq-h1YyDo)_UV0p9TX_xVU8^U;$ua^xCyz)5(v+n2C$-t&PUugQBKslBD! z#A^I(yrk2OzqWMEtEC?xUgjD+@0L_^<5W**l-k9ukN!<`MuiQ;fC3Q#}CQiWBDM^4$ibN?Ah2p8~Z3sdjJ5 zMFzD}+ZgN=4)@Fq%b8p_;*xDcgG7-4ickbk;L6%M0~k;R)}D8BCJ@0%KqM;oggpcJ zBri@RLbjmdn`KK!e2iP(3EK?!MuJR$AVCK#8$R0A@F8pxTpTsLM2YdCF@%-N*M{hW z?{NUx5jlek1j!x$;5Rtb`Jv&Yr{#f@a>n7Fh6oHLmRdN|Y7vq@hRlJ|HX4_(d3I^G zT@TZmR-;M#NaZptu!LqwxWAf{l7ZQ0K^E4O)9_7%s@Rlg@wf7HVHIHcmay`Uyr7Ua zuwnI2qFl!3*)q4pI^P5}5q@~ci_Y&Ygi-S7H!5}uZ<+Pu-hDH_g8_&G^hw`5aPv-N z!t@>D>FAc{Q9LT3!J%mk*qJ&qF;|PXR_j||`bzz?fBZkyrE9A-cdVUrW(Ue34S{US z#Iv(MRY)2p8Z6cy@uG3y$_N;ey3!QyYl3yyr)LvgVGSs#}W-wY0cOgK#Yhf1Pmdau62l>kb(?IJSxGn||J3G0%UIJMC!+qfmG$!d_SEH0O0^0Zhi7opRR8|f2DRd+Uoq7sd0=p8b%mVuBNgx zJ+p_kbSR9o;duyNtRqbZDsS+Hjt(MPWINu5M+k>Mxnk%v!_abzBO7Qt+neDljn$(^ zJsM#&4JHQ~)4>Itjb9p%q;B%pWS+U0n0E=)Xp}*F!ez1*X@>y3EAt!QHD|h+C;-yz zWN6>aQ+OKOG1BmrM_N4y@KK(|RbFf7D@W{HX!VnL1XJP4%Zft7reo4P_)n)K88ldF z1kivIpLP&>w=w43y5l_^Ioxw;Am-2B5z{h*J8AT%j)LoK56I;fyn)1#a$<;zG;l;2 zX=}u{L0->3jl?CL*4Xzk&KBz(?|gGT^xy^R_*Omn)c0fPpP>Qjz(Q8Dizgbj z%76wBMt%nG-5wFhBpL;l?gEfmnUQDGcMl`qPJbHLlw;JD{sd3w5@`=ZA6M!q(nEV` zCjU|Zc}nN4>6s=c$UYQp@`L<5iY5WYGx4C6i#wl-Zyr(YYTrnyR9QOm|XGV01z=1SM5UgIl;r#bjj->1S@=`tX z!V9(d;@9gZKmO4g7#*$swOdhR=|Bf)X(&Lgex+2eaQGWR?z1f7CHm0Ia(76>2~!I9 zN|$&>?b1kw-unoq;j{VsuIs`byyFrl4}Su@Kg4zUxBHa2H0@9sIHk@V8>z`Y;9$$;{pBeet?8by}4i^VwnKS4dPQyBk44XBhY zbxS=d&otXR^4r1hLKkAwTm8-6q8v1O2sc1o&#p6q#nn1l3`{-OPyY46m3U^AijN;* z+B>lDZ#)#S27&Ko zvR|4Wuwj|4qj|QoOuyh0Kk+5H_tWXs=~dDV?}&#kd}Nq7;m33*r{|FM)A{W7xHlFS z(|K_E!aX%$XQc_DE3lHlDJ?;dl>#QW7|cpz%A}5!7OsA`{2ME)@DREYJdUBu28X9> zaBQ;P^v?Iz&;Ho^>&DF&>$~6k9=1`{1X&DI1m!%UOz}^;_)!MQGCa6f!yrPfHiirz z0~~k3!gNTr9pQaycYxI1*{M zGx!hV@Qh7Fxn(eU+5u(m63J7^`QwwQXcZE=4Dyt}fo zvf9}+oP27Bnv?j}RoTT6B0)Vf+os{3B^c%40Sw$7eAU*}!wO)1abJt9Xc$lHeps?4q;zBr@42f7F{kHGPnm;KaN5(XUI3hh~2PuYvQN5Y6SW_v12O|B`Ua zySXIxr#l69bnW2_zog&){t51f#`hxsR-gm0EU&P}zsG^)7nkBWxxt|>FQtxr?{iy@36qr>o$w--cu+tu}D@!G_Fy!4Nh@`F3oaK=L)%8B(mGqNv{{5B-%zO;*5Z-X= zG2tH_YXh{(8P8ddoTl97tDN;r{7jmso(zvVu&OMly@}4BoEod!DDv06>Gd^tY!>@y zH~k{Q;EqVvdHago5?`65BP%ZQP`=qcd9LIVh}2Th`kZFYdr# z{Uc*11Ax@f#=JZ#$o4Cw0Bm4Dzpt;X)XwHW-MT(szw^6atS^4~1@xyh@-&OxT6R|= zpmTsUx|G6N2FoAYR{H}4xv)oDT3Nilj$B30J@)t`b^Y@7T85ss#huLQp!4Q3Hp$io zbz;9p?ZZR|xNF4J^k_{?x^qtJcUP9l^Qn2%6Rb4&27Un-+08&*rM&Fwpc4|5b=s2Z zY-QjevcK<(_e%RVh9P)^NLJ+m+S^XBP)AE6Z5rjOcsm%x=1+ZO9o7bsuJ)nXKO_wD zcG~$Q?GB{&g9^{^xb}v!c_#}RM85RbSuZ@>S@=vqKD8XS3G7p`ztr!`E32tj?p!oE zHH%Ig#h!Cwz;76C{nBbz>XiVpG~#xIj{pi%mkD3f*C{uKv&O-r`P4glnmNf5{p`?H;BrQ6uJ zQ_sG1tL|)jj{I?GC4Wx@R*xD`cD2b9%Dg6zefDxoek5XeB|OU&_$a<%qZ_#7V{O{0 zsVS)l==-$-O_o=d(iYt%p7xaWMmk3x;kUQVMO^c@U)TjNNfX#?dx%>7)E}c`+Gk_s z4z`oC)7h!Yi4Cda0>*HaH{2PhZaQKi8k-Rz5!w<#u&I6=rUvWBbHuZ{e` z?sTUtSNXg9s^M)rPGWCs?;f2yLEUO?M|U`0zO{&*f3@as)8-_PU90%d#UuE5yvnT7V?|64D(vJ9@-+wlEvYO;*Wy;oM zhkO|3=~#_2f;1;4^?cgfMe3o3|IYEf@vfI)x_A9~cVa?fgbt0*8`mdDt{{=9)*t_* zdu#~5g@zP_ymhe^49v})oz&N{fAh_hO^7(kKs}RU`>X8W%ab1{8$~x3F$SSm)hc@T zEpK~UoqWw(YJj$H`uxMImMdAdU-N5;xA%j4fA#q6z8!o*K6YsZ&i-y%2VvsbxmMu@G_5zM3UZ1&rmJH5$Fb z^M|+w@ga8@0YBi#OZfUo`*mgT-~5+9@GCr?_f2QU$(%}I=MImgXW@8PA$JV#C2964 zjGELA_4ch5I)igCWCnVIoL2G7+4RjXg$@z;CqIo9E5EJ{QVWlQ?&Z&38iRVwOmeAlvWcVMxYgB6KztVwcVKI+f{3jfEjgoBd8QdkEq-UVy(;fqo zWx}IBqcz`?Z1Znv8@SyY4vg+EPEHrjP8oT{+Yoq1z;cHT_=fR>@;w;zQliD= zeF#Hgbo40xlXYuxvsN+iuD*Ds{>%UTKdWmuZr2zJ*jch}sUt2lINhF1esjwl>y|6; z%`$ZZ|8(G*8~ABw{X4sPu*NV3UHK0Lpc@V}Tv(F-~3jstnSp@%oqkFvWb9uM`+A) zADtLahwHVQi*@SQv3lUbx%#Oe|3LlHFMcd%e!8N5ap5*N*i6T-+j8x0ZfO>0HJfnV zUR>n+RxKVK`lA?OGn2zPi{E1vj-NP&!WgV|w?>6;G~|_AcBHA8 zA_r|G$}<_%PI0-*Cn|T#KajzECz*KEf;8t7>DqBO2428>>Q>N`2JunKWrryVr$cWy zMi?m)R@^0ZBLvUpp)trEzo0dMkY?~Mtc@0a(@De3Y{SO}yzomy8yLM@@$wvWzZoyP zQt;ehkT293jZcN&Z>`K4`@jWmdYNpxc9_~R>-PArd=jptM=VRwCZgH-ZFhZ^JNYiFc9(0En)Sl_IZhBH_7lnn>V+rb*dHvF}f z;Pw$HN5m!NwvLaE(MW0X*t%*%PQ&`>v7G6%yhx+u*vS($!(BY7H#ePSP8u5Zu23|Nyb`KHTQO@R>bd8i zt9QKpZT0F`y{5kN#1pl=v{D}Z;pCLmcX%tSM6Kh2Fyx+^^GY_{XsZ3MUXHfsnREUs+S%}9=Gmp zT7M>YD2bQ5yY|b=?tg6fSLD~#*Sis3ei!!sY94()&B;g2{aN$xK1e{H4}oLSo$o=g zKHt0fYj5$K`0?;#dxVdYegFPLT=1SUbYYaKrp2Xxf$L=~-t8!}Zdi-pRrRc$$3x1f z5#*z*owQ$E`6C~;G?nu{nMP_(lz5Yy0nSbc;B2hxyIET%Z6Ey9d$s|nQ%!<5xnpOC zwo1b1U3td0J^Yaa(F@-BuFTT5-$j;qRFPW^rlTVRJWkx&?ega0-a3k%0N`1MrqS9k z%7!6ipF~mvO-xSLZ~ykMV<&6`)vic4yq#-ysEalStkr=(_U^aV@uSD;$jo?F8rt!T z6aua-QX)5Kq#U7kB)^8A-jPu7qdw+A8iAt(ku+qkj?@kS0=^O7v$*-zevGe78>}30 z28_x@x4r&>`l`oRQGyrxr6f#^eagvw6~>^gS(0!)dQI32b`0y6Bs=f$<9lUJvwpBPIvV4`f5G>%+vLyFMp-J z{oQZX!qw~91ST@(0095=Nkl5)1jcc{Of=mKAo*^3<4;@1%WO~~Hpxc3p#P!SZ1K}@lOIQbC zw0(O0E1uiE^|5HjxKd3gFNs_In;oXiYwZL31d6b)N8F{`Bms*8?zrE7U5Tb=ND z{ZRME*#xA?h)wvrQiO&dhYwz>-0(SPn*(-DCl-Y zT1&gdlV2@IlW#3Q2e1+dXTD8lnz!}J@GUaFT{`^8R~@W(b$`n*SdZ=0@Ep5_d)rBj zP<}Jr&QTNFy-VABgUvVc6c=57%55QO+39Efqy2*Hbs$mMZhwh#z`CxDVq4iZB@&)^ z>a|vS}%aAQjTrj;+Wc1w0 z)AiW9-dTgkkJp{$h1y5%7}hkE?*luQK^j>E{hIA6shl8YJAZ9ysTOYC1|PfF2y69; zPkz3>_RX8MwlPq%bMD44h5WFuNS#F)gb~2@@3`H(Wp(nH12s-+F=yqaM^lOepxwl7 z8y%_>C+2Exc@bHs9f$m!nX2*eBiJFE&||$$9XnC8v(t&=1pEnfw+mU)&ht;)k+1SE zxZ+toCe6*YZ|m!_>m+oO4$X;sc#lhaTsJ~J?D@v{=}Q7J^1p1|`U|DMd0ChE4h-o( z;%=-1UN#QpPkWy~`^s&nr*E3@32YehbA^2-o0u1u^a^h{@Mr%Dus8i6{PGlZBx&t0 zcfWGto;JbQ82X$KZNZ`TKl}n*Ioe5H+BuFx0QD5$v_Wl?KZ55KoGyNR#-(bHH(ze?_F;`ro~D^0Lpy*Q>7=3LfvgHUHcb zAo?;s%0r(0Q`Yq1n*R0TyU%$wJa?Ug(f*NV7p$d|pi<9tTY3$4FXRKfA|8``^fbHz z|5e7Soaj%&EX?;=Y!kwSOl7^c04hANr(@qPz<@hM;Z7U3iw z9Y2#U_~YfyM79^C)y^IJvXLL#d$j#FXn(HK?${5L1fw$&jS zWI+fR!Ah<&>nl-1s>wA*HNJ%RKhJ!YVY8AYoqRHtz=&7@gKbc%`!=$|yn662Gw%ot z{t8{br$NTMOs-(=jX+l;?ot`zZ#=?jP?)&yZb8^rj#Quy>8&zR2ADzBpy2FX@E|&U z@Sk$}Y*ep!ch?WPmVQ$3yW#UAuDrLa#clcPAF#qPJ~U+>90`Emg%$sZ9wIm8twV8; zbW6{&AcA@J%t~=7GX{RYnjD^4ncqkk*P?di&E0sW*V3<;2A+huy(gtaB!e+aOLTo*9Lfe(pAu#Ttt3)mu1_wCs3(-13$ z^RbJU_~1ww2jp>#H&?>Z@EaPPK?!5@c?`w&VEw~?@ITd;zxoZnxeCRzw?GIXOcxkyVzVIv!!5d_~U-#YrPDF-#SQyLAl_TS#BNb zAfAS=hKD?n0o=~8j_Y_$V_O_%i?D=m^;B`=P7IFV@=DE2|2n5j0@5I|mgP@&^C}m*f^FT-M(v+Z#>1G8qT_yUO1R zZ)*7#U16HQuG}XL2mjj&D;YyRwruc4*SPM(_Kh}yjDal>M%j1wgT}KCCfgy9^Q#DN z02?#D2Y05Qv>X3>*+{lwYMMB?dKPL7_@i?<)6LFU@sZi} z;+$}8hGk-?I6(M`zxZf3nK9kCUHQ++TV1u6vgcp|uIV>?^!=m@Y3bH%|U+T=O#TIMd=ivHK+Kl`tRE zm4O|UPax6=Kgb6r)5(3)g;uWebc_F-L7Ksh2^!tn91d7GTVMHLCNhh9fOLJbZkQi1 zHhykAt?ZDmtaAI{-T{*)|EU#%fbD?j72;%tly!%ct?yoT7V(jE_=bO*7-=V& za->b9$?+m~SP)KG9oyX^3$1t%x_!4Gq@Fvl1x}sdD2&2mC$@TNXbM}7GXMX0`|n`i zvg^JR+xOj^!^@!)8j%190VD{K6qreolt@tol_gT-2qVuQV~;f@*LYZ#$5EDCnsSxL zmd7ra(hN0{EKz|;Vh|~U1OYZu=Nw-5%kRFMbMoi&UFY0)yMYEEjm!Jq^E>D4xb{kW z?RfUwJwknd^EW?3Htu}DPrE7l1&zL^enQWU4Yuv|)%Jrw_MR|3 z3jw6vNj%+msdI3ltW=n~rywVt_@n(d>8TToD@>D#Lq~!1as?Cj$YXS$I>-U18jvRk zXdwL#vx|5beX@VJ-Cla(#kRG%*DhVY)CR}VkHP~UcJuj?Y8T0>ZEdZiyU_XW^vBkf zp6Vp+)4&0M4s1C{qYdR}h-hTmxd-0SzU}+ItNrjF|Izj*e&YMuCw}NV+xLCPhuU|3 z?0xO~zWu%Jhra9m?T5biL+vMi^yBRlKl<(Mr~mwqv_JQ!zPJ6UPkdkd(?9n8?Z_)YI`ANs&M+Jg_AZD&tUwVCPRwhug~256swZGCOIZS8K-F0H3-Gt>1c z9S73I=i<@^a1FH2JpOq5rC<4t_LD#TlkF3K3wVe$+@D9%H(OyA&@SAT8 zbN9Q(E?%6)o*EgS0-xGcPWFrPT43RVNcqF#_t!W46qb|vOW7s1>Mojft_@Gb+G(4V zwX^NXSvzfa`ylzq;x&NpPH634{OoJsUkeYlE2}SIAH@Y{;RN1u$vvdaaqKcnxQTR$ zgBN~9&rbF`Iqq$hn!NA(^@bg1!@U+A`Y8j`h#Tu$ww8I+U6-~Fc;x4O`s@3Y<59a# z{HuG?b`f_U+u8v}Ps>?eS!=g%-fpkF_C|a0g;(2++jqjR4m$b?qv^RB+E(NUnUj{2 z(0~p^d*SKl+UDkNo0^}+);$gVhT8QTH`~3{m3HCWWq1Lbm_F7wy!XA>$;0i_zw^88 z&h4cf1^nPcSAe7JKNli85l}d)L2`Es-dkBpxzRDjiuw$vVe;9Pt6Bae{pOlWq`-B_EO3fGSPd&iDAkub$ko5m~oC zkpbw~WAy(gC&A$u_$)kD|3r$HE>>p6!H!2@1HbDEMzCqcht4)I1H#}n?E!eSol?el zDE#A34!%Z5`zLON(yQmudN+<(v1}6KU-nClZRlP0MqQP^u)x{TFTLyYGlH)2{+g}; z!@&c`c?e?)uB&%%eUIzY-!IQe@53u-*x%pC!aXzuP*HS!pSGk(%)z$ zCw`L0cXcq_RB^*2W#8CeG)-NVm(R*y{PNd_S)SkoA<5@Ho`I0dcEh;o586kG&JVTY zud|Qg*qviU!r_8$C)I3!>|46Ce{XZREiJpdbf>-Jp@-Y_{B&EqeGmPzMLV|F*4@TS zod#&PoJdl?BX#h)b|11IT$cRwi-*)5_OUHfT-ZOBUWE(#@W9@m;OByK`|P$;?)J4_ z+E|$!PCvaCe5#vu;#pfbn-##R6O1(hNq?HVeNg-Q?kKk16c>)A^kZbchd#+odigDH zbVT^d6X38J+>wiJRj%Lt4ex97-}rtIj_mJjVwzEqQLPWU&WVNa2I++-_NAv*(c7)f&?eXtS*{C{Y9ie69Q29R1gGlLER?lALmOMR=6@Qh#+{;TIHecQL6v?3jb} zREK7ahP>rj^U4W~@OOC0i~fq`%O9D5jh$Wj*XJP!hxc5_7QB#meFnlwaAkz(*w2nl z8ZIwJ&a`U@)80755cA|4@f{sWcy@qhF_n|gbaKp2X?oymD!s-LNtY ztWFrYxFn|<*zRI)$JPLB$MOOve7C#3LA|ovH|>&(Sf{WZ&Yr!1Ei?@c@Q2TBx7M)- z{OqbH3c0D#vgC`ji|}1g&K14EyY-69lZW}pD|v?(<$LkqyR?w6k~lm^Uif*{ID|=% zCDX~r9Zel?v+Z>3%5;#0{ER0o6XXN$lPAR>ulw!B-KDm%aRe`pf`_83a-TSzOt^Tb zXo+S74)L|Sy_O|&!g zCB{Z3BP&^y1UDo z1xizXo(mqB!giE~PV~DFA)<>8axER+HD^{2M+}K9?KP3~6?P04gg$FE)uwNXR*Eh+Rb*GI|yte11^HpZV@kB5W+fdTb8=1`3Z}WoS0$XVF~;A zjn}TXTX$E&%a_icZ|BaRMvj#8p?3A^&GuWr_jr5qIX@)=Sfhh&a*X~LeN2(24$2r> zr$eFyS*0ikRQ*%XCb?D;@J#Gwmt`N9Z(Qr(BRn9$=0sSHr&%ALO@~WSq(0OpSH8q8GRdE!|eZLML0jTOy;V0wP6+TYhUR~Ur zpya#GG+vdr??FSt@29J)Pm>5=hwI{X<6X7kL_g~fk zcX2b$qkrCTU6}7R?w2RxaXTDv0e~j}99_=jq6zq|%sZs;97a~Taj)$4R_WVddxTka z4E3u)=}J=qU|i$*UTMpbzW}vv2agr#Dj`yOB)CFaqcDQZdlYMT8D)2F|>Qn|OwMk+;kVjx75amOgpPmR6Q4tq4jBZ&2Y*X7?`5%_Fp{C~+x` z2Dbc)iPLDN_-cqMae}*~mdg`G)+BR0DaZO&9D{QamJYsglAjHJMRLGYf6GWQ9U84u zKsT(s$_1wt|1BYJac0IXbvPN8!{AeyyYkoJf?*X__*G*ytu?5W7rKFoqtU?Z#AK|TAdhIq^Dl&`d)0_C zeFDL*OxoSW0s_7m%`~14_$a3u*dABqZkH80cuQ-mbO@%~#Y>mkANa^Ownra*R~w(4 z!(f`r_cQk}Xtvf@+SLI7JQ6uW9LB{;IzA3? zHPWqMC?C=JNk@s~S1w&_AN{5ex4-b`{&f46Z~17-^*gFdE3S&(jv;ID4p6GZt&;|a zyL(nv*XZ=_woiZdGws>upKtf>F186eP}6jfJpO5Z0RwD|GMARy%E}5x799ei7}7wq zE*dHFp$$Um*62_0k_O&5_ytM0{(GG6+OwnNF?7P`QK=e!@{$I!9SZ%ZXq&;vnx#YM zhI)OUIO^9^uxPA2p;mK)TDqHqoOC&C^L7SJ?XPE>G@I?O0b=80WZBU^bud zJRKkh2wr&gD)K+yKJ>x&w+rVlwr7zs!=0%xuAOSXd+bgpk3vpRU?~*E$fb;Y;}?9` zfgM2(>}06p9At3elb=1&xJrjc*(IId+IBZbEix-!aS?=k6*&TCaUidXmt*@R1PbDL zxBQany!$ImallD@5mEX1Zh7RYzx+ik7ca=cPV~4SW7p0QX`vQ&g@eTS1B=;GrH+Q3 z{ekD6-`oVp;yN~tdW`h$WO0IMhmP#zG%y})wV(deKhlOL=i9;B?eMhi#xcCyg{@rG z^}b8Z>M>#6V;u7JW{-p)d)-flU;lE_mEV1xP?vwbyN^@jmA|~(J(Ny35&tW?G(Wzn$wcOh+IOZ*zYe*9T|N2lpRTK9}jhN$@9bOa`xhxx1fV(3#gn?(Qcp zgsb?J*WRVL@+iBGD?;cad1#1gggHV*+nDdbJE`bmN1n6LZp3fvsYhK<<^VFh<3wa+ z3JIkyfh|>`{MaMi0t;nF9_6`)v1%alaGT5fVeQDoK#J^|rt}zP*q&3Cvg=b^D_EXxyrlx#8ZCM_3pfMLdka0iJ}p7-N-m?BzgE<#35{zK__2?`CyMa+jgj@ zct@{_2{KK)f92wp_RGKguiEmex~V3-%XCrSU>hLUE?|1`l^5E7{8K;Kh9*z9-IeQ+ z>jU(l1BJFLu^-d{mH61*mqqU4%j)L+ekA3o7WSgUy^CJ;kqQR-sj6tAxbyCbHq;=g z#UZ##{X(C3(k8hBd%KO&CQf6cjEoPr*RNh}9uYo^?Vu^Of8g#ScWPjdP=LGL+|A^` zjP(%~>*pc|PZS6fkbf3zZtb*TPjJ{-Yo|6ATU)=@?!5F=+qm^&TeCF zj@s;$i)J14+-_x^m2RQ$uG25o_V_6e)Z+{>N8KY<-0f9t?RG0J2zhE z{(8H6`$k(|y$4Qfv6$_ZgJSA^C;t6LrVBwlUf&6e)wNCh7TT|V>a*?jH}1AS@`)d9 zpZF_33p(JrjXS~lDR_rkB#-z*yPWPqXI@^M#GY~j#vS?@G{e62eh1s{#LZaIo9XE-bY7f8c}du`hnE-Mn$9jSWx2f1Z4Vssl!GnXv?5%qf8|wD!KX z=C~&lS#Qepxa9Gvu}r8>PfoSDnVEJPd*AOUJ5ZaGChp#9ufOt2d*y|f+AA-;(4Kqh z@%G{i&$Mf=zm~9DH{NJ(yzy$g3!m7JTDp6;-MfST^=s|=)mPdZuOS1^JVm-E+w;|=Fgrv4bG?9 zoyB{k9clmFKmI4}wHtR`rqd?T-{$M?M>6jQ(i`*{YBSLOeeZg>ee|OrYFpsMK3pBy zTb}}OV#&U?FnHQP>}u>c@nC(`li_UHnL4;L)9>s0=`O$BU$(r1&cpb-Th`C#O@c4% z=RpU^t&eg$TJlIn)<5kQXIYyASxs!?ecfUXJaT?xsBEx$b%XiWfgAqsq42@~hV`bHWt&mI%k= zXJ5e`1EBlzj1!V5)(1=(#drVIId5@2|D_K=Ai?AkLlGQsr6!LiaV>sXgVU^9F@VO91oo?U7{KJ3fyp$oYWx}EAC9E}B zrCtP&)Y+r6C$Xg#Z`^8s;F~|(#zzO*+R|F&(Q%O-`j@t0;?u$K{u|w5(oI==*d69= z9LFA2E9ZI=E{;<>m%xlyho@4h?U zV$JA3dyf;Jgd0z%t)_h_uANjDH_?&kN5>dU=;xj?UMDW8SoGyYjlE2tnQI^Vwr@vC zO=3W8w?ij^od6)|UPJ9_`1?mG)2m<*(9a?6&!t z)-GJQ5SX_2{6vXFMvf1GFSdY3(>tz4-x69zw*t2(t2k+>UK4&u7`tc$I@3-Bp)pqgXMc_VYv)EPI)!OIkENSMcVHO8> z6%CcOR8u>}cm#@LI6xYy-<_b$BZIelh+L zc+P?V^0b`e@qPLsPOPJ}LkDfvjQ?{vhCl+!gswDN_7y~}`B^^~o`^rkFsz35*f2Ek z8|dbHiZ;!@jC|^(=jf=L3!uf$cH{ua!e2s{z7=ugQ8$TKcN&ap`Ez1 zV)}|(Ax>ROXZGXbJ-_z;UINzBz{`460Es|$zdwrn$V1yHusJX>MC@T|cXP`a*(mm>P*yiUa!!Pn6J-3|l<>v{8kXN9lJmgLRvi+ru`oxy? z6))nXc+aw(oc5Eelh|X@Zgaz(^!r&9yzc^9kYfDU)`3^#fFpU2^!7QVL-cNRueLm} zKv(sReQPHaK~wN!KLo4=r!MqVZnbr6+gu>;=EO-bp;jX~{1 zl^=8h@6I0`$2I$;QFe(>P{!DapEN1Qa&ZwE%JP;B{H++;F^R=JWeSc#GO$qz zs2lRsjlVcnVqWq>7Pq0FZ6919cjoei_OUGN?{DwA zh?JA$I{4l^%N=J#xX;y&-*;7h+O4<>Gm73N13ur+=gqG7|5|YNu8SvfaeX;xQ!XF$ z_g;Jv*TwB_9sc_l;pf(Yk+EkpRsPAN=2nzJ?GM_3t=+b=e2>eGZ{WPM?UZlpcpsi} zCvo-mIJggWVhh7pifb9(<@L}4SBb6<{-=NHJ3k2+mt(BTy&*_3oyal6GM;FtZGfsM zg78DQT%}cu1BfRh!u8cq*w~ff)f;o>k=0(5WmZR7Sy}@Mh6g6Hw`}ir=%W;Qi4nlv z_kA05l$y10b#f;Eh;N*P5$Cug+P$YE+FvR}|LOP|qzOgWV zF_d|i(h#48dlGcAz*Q08#|1Rt!iKg>t8?Pp=&2t!(pE>&GAP$U^!NpO zlNyVBxduBWOMDIGq@mu%37jUc%<(g<@L?MBOHSSez4wT#@=D|S+J|w^yN^DUe=Th2 z^6c|bppP)J>J4x5OnUiVjC03d9--~W)rwM20)UP1A8ltYzPsJNz1jZtzxtQ$>g%t! z(`U}N^BBmJQ!~_|It$a&Gi_>WIy|Pc@pVwjNjp~}dDOMX)9z#F?R(@QylKfEZAeh? z?If769ma1>c~g?DveCAg z4q6_x8`p2PmtVfxR@b*;Av&%o;?XIrLE`(3&hP#H?`}W&7k;u`x^%gXPmW^D zc$C&6FqC2L0tRt(;NY!>1~6(|E%}Y~)z$Xd-~U{D^2w*$&6~H;ji;dVM2-pd7%Amz zeQg!Pd>tH77trN*j)X;L{-v9gjh;?|R)%G$7y3$9CyV{IJDmWJAcY>DEMYp2FLXjx z<2SpE;^&H0crgQ59zzaYsIvuRB|i0wj8Y$Auv}$Sn);r>jmT-~7I7T@kgz@zuk@I* z4sFC2w6KoWTb!AFWK_C^?@edN%Q&_rT){J)XW(}N)WOyP{MFS}Cir$D9-P~;_xL%D zSqJ6nySsMqtFv>;L1y4_^3~&t1;F;oLE|dFw$_pCwU^LH`>1iZi_}mli5B;HUZ{PZ@A8u=O+@5^=snkze zI-u!iK{mJ62wM$~?8N3MEWmRWyF35gX=i6fy12vNF((%ucp88U0PLL2z`M%yDExXz zx^4K~<9bJkpT#05O7oYOiU!4dwfF;i8?N3HMZpmfaCD=1*Uz*St}~Bng9sFl9bQxK zV3Zszhw>zCc7fr)YiEZ%P6;Pu$cOLnr=7R)G31eYc(kx157I*&x`%$+ zSU+qVbTrSNooOp;%k7W;(8p^L<$$amZnTSG>d)YMQDqZK|EzQhF{|?u0=Z=H={QB~NKlSci|9sN# zD>=itlJCiK`{%C$OPt|c!u%gxKJK^}-?_ewcIArKcsP0ChwnlsiCw}s;x_=1$MO6d&ph^YbWUWDs21;_Atj-sPPL_b8|_2i_^$Te5B|Y+w6{ps4xC_r5YP5d-RSPm zQ?zHwK>9>gg>vAi^ckMgxwG?&b`EZ6yA^!F2{yVi=m3&&GI>duwu9}E$ELgc&1YIV zau9*k6PVSnljEbY2VJB%68d2`y?(8|{>n>j5PdX>uF5gngV;?}*l%|WUnXLc7e-F^%wl$}Awo;#@!g6Jaw$F)>Za|(|B)+_7+jHnb z8gMJmI`AZQ>+;#Swok?5RMevLt}M7)JvMA;Z`*}GcOi^zo&KTErXnwmI4*f6Y(g1j z?4qhWm2Rc9AG=%QP=(qBm5VLv_WaL zy1upo-Z$F<{V`7&IfFgp!rYs8ZpPN1nw+C;-DvN8Sd-a89 z+trtzZ?C@iOndpor`r0;UAPDPhjIq?ws}5m)04w(61(#3!gLlidj#wNxOMT;Ha4rD z8*tEj(tZGXNIBQ;)jrYg^eAZUrHuD*uZcNI!Cx9{*A*RYThy<}2YG0ZYqRtVM{HNZ zQ&7}*UFj!Nw%9)EXGv_ITHQ1Y+_ILlu zFDAd)8QLRvV8iR`IwwIqSz?rYM@I+R*}1v)qkrTNr#-oO<9cM@6U27heMdh`+D3tG8{Hk^0v@RgTG*u;81>_2)B)<-JRh_(|Bi z*a5h=EC9fxyH~3FSMT3D-p#{yQy}{ce16kqJp5yy8s1-~>*JfZFK^?Fqbe|3?~-1R z(mnCY)y1u!H@Vo};!LcNbobs$8-N%7&A`P}hH={(^AF(Qa_8>d_JQ}mk9yNSj`%(FwYIxOf88mfFX;kPssjJH___^$JUOgHX=8`iC~jA_~g0lEb2sj z5^}G6Ehl3D#^pH^+SJ3ztdt2}bk^VWwz7ie_@>5*6RjXSJK)Er&{5T1<8J#0|NTF0uU~aCt+mtG zI39WJm`^5(_6z0{9;PhmWZaS| zV_1Ls)7nAZS({{`DQKal2?X_|2@a$ELw$EljcfQ-{~sCNePTg6YAUgoUO~)Q8IaNQ<<;$bTmEDc6425#@@FBpQOcnV7gv-k8Vg5o zK-wC!Zq1W%9gor$lV*PQX?qjMNe~=}V-u4Hwcu{vwiyhS?+i?DIIV}ECJ2LQCw zYFtmZPCm=aE9e4ro5&0t%7P2}U8DfNNgU8)+cpF~)$3MoANt*R<9fS!d!=ovSFq8~ zU%1#N=VsdM!fZ~q`r_lyv?rf`y}kbWN?XDH3%$s5j|_%sf8+`C6;|as1%Vf98GUYF z7FP>~z%g-zOP(qoD87&5T~BfBW!-ZRFXxqZge!D4f&6a8qyM`W4AXUW4AG315)hQv6qcE8oM!kFGQwWl3}UOU_eFEiAO} z__0s4Q_w{&-G&cz;m^2&MHkTG?)!aZXE;|^pStw#GdRvD{#ECb&Ub6!{X|(O@YCy> zcym8_9haXPd9QeH@47O`zi!-xhPme!sN?wz-<>hvwT%FIj(67dg^g7=UCukJL(L&+$AoBr$YQgtZW$uEY;9Q~*T~^^%%X zMVLyP6Gd2NT*r)AWY~N4&&KKMk<3iqN5TctL@@#Jh?ZwPwAY65w5WCxV6A~vzWA@Awb^zcRqd^Sow z*K2n-d+xjy)WJjXfpOjCZncUZxXVNO%VR^(*8E}*au-(0Gj3&UzBt}*H9a^9C>j?jTGrp{s;RDBq-65JDVSi69kxb&LkH>U+4YuI#p>;j`_P z7vE_A;P3yPHaR&9pE+pZ0M=mJz(98}V++II6^dIl_~B8k5Wo4nwz`^KR<5XnVX-rT zgMSB)984OeEXrT?eyLAc1x@z^|$;e=SG+-yDMl(SydwEn> z9nE7$qdJc{h6-3_CZ}SM`EC6hH?FlOzxa52`kALmcdE^vJKNsz?sv5-4_#^Rdh{LO z$H5&J71S|W78hFcr|kKD+!@7?w;m<4a$oVgw6T0NEAh*}8d7x){)TZ4pPB$xJ7^mG zW}F;CMknk&!srbLvN1HopN8{23`{#>v-2};YI?N2^U+7!5C730YTxl4-w6%J=*Zq~ z8#I#Jds}p5F`$(>>gk}B#cF&CqlVnl@^ZU#=WhGbm%h|qef26d-Dq?3b4hpZ{8@~? zDPUM7&zo&!#p9w}bzQ67Q#19a#8f8Xff%g#OE=36jUBYWOP3ud86>JfMvuGqn_o6M zcHCX9nGQS&Os_$2Sq_eR9Mr_bbSAw$IY7hI!BNW=_r)qDw_-7RmU|j3{)9I)!-?}O zpz3uD{-u+FwY);Fs)Nib-%1to=kHkOAY+EB6Ew1Oyx*`L7M@SlLI(5Bgf)3NQJQuD zFdTd}{1n2P0RjzA8(Unh)CErBx^t(F<8;Ef25iNVNVQD}Ux`mwx`NZveL+iblFp9C z6FA!5^&3~p^A>@&f5Qb&=QJ+OGmgZ4Tch3;7p0TMJBBbFI%y+p);Apt(z>&ueT{D*WN97~v=#~?EQ!rAjt(ZvZc%hS2i-Ofi=gQHt|*zIsTm>_KC?M}9t znVBpwu@md!A6GIf(;l^^j+1v5?=H8iuf5T}@k1YKb91LL_y^nKoyBytCdP)sC)TI0 zyT(x9rVQoyP<11_$jchJb){vL5X2ER^2!;#dX7C-nofP7MbPIcdzH#iGV zsB?^G%Iq&$FB78BHOFmAUs?I6WTg!I(i^(8sd15j?pczPh)5dRkSJkJuE}Rv7-DCP zdO9FvMd%npfZg|OuN*@=5aJ?m2pLclaIIM6|eqFk} z7yj?RS9tl|cfFsvh--DsS9Qn1JuUis4KCexZw>z%?wHQK>kIlC@Ep6IV|w?s%fF8c z?q62S+m!oda6(^`y8>OFG~5Y3J;kNjh-0I6w605U@9<*e$#-dnK19xsoWR%C8SIYt zAE143lG6d&fid`wd$;Oc`zV5y^2pQtECAW8o$2br($`MB*hab2|LDM-06V$-ws5rt zwmZr#S=hHQTXbgS+riG=fw!u1hCaNSH-qEc9?u9Aq%-YIya=WeyKqBOl)xwFDr6P|E^XYO@l8Z zQFdvL(pF9eqxN`pu!{OyCu5j~Cy&He_o$b}g|yPX*}}F_-!7p0$GNYqu3$gC+^)U; zJh^jZ!EO~Y}tXmjkdFXuk9`^w$ zT@O9f-uKRj+dCh;+%BFu-7e4;ot>X<4_&#^&YwHePS4J_dE(ZD=41wM?R006oY)qR z@-BSpvGDFv+1a75xOLcm@z*}p_IC!`w|>vZ+kf)E{A)B;r`!JWt?;Njukw9;^z{zy zd~BRdP;i0Q3_^l$kL=$8b<1mO?R9L-FFyHPd*!vOnfQ>MY9S2m%g|)n0|(o{x}&Uk zqtgj@0l*>rq3NIFMoE}wuGj#S9lMJ9#0G($%AM_nbaetI6YGppKarO_c>vaMILbu?hOysg3=7KrxPdlnZ|epXP`+N z3C!t#>q+_i)gqcI+cqF|A(4FOfL^}EOSngKYulbZcfL(bP9sla^r!aQ7e4n?d*-?4 zvoPS&0~bSoWp&(RN-6&)GU(z9Cz8)!ywGmnyc0ScqL&=3cT(I%j}JWXK)&I>xU`(f z+X;Bzal}IJ3|$oc4XD zT{yeYE}lO_yfgVs(#-T&o12+vr^)-$xpO&vz`o-t^vy2)P7vSxhOj#x{lq|y&<0|6 zkZTKjih+wS%l_7G83`V=$Fx!Xl$wL6j{Eek_=2i}^Vo18Cb|UI(_g(~|26$Nc*j0R z`XrO%ks(D(drA9AOUut)9FT4o+PjE;7`?gSCp5S=Hg?+o@xT6?*iO*HGQjaLw05C- zbSY`na;MP~lknvG9(|~N_xFB#yLR=pw!XSV-v{}l?ePSYJ??%V*+t?GUb}P60eRu~ zbg!B~Q9iI&kzM-*eSKWOol|RU;~bn-j;#m&%BF2ZeU5{=*f7ZFz%w>BA!1;SvKjL& z=wWx6s#82w%g_5Yc-zU(>Q^|wSoXiX#jo$+ISbYBqg0;7eg1Ju6~D+*|L(C*yJN^uzu9dwzPm^qn)EKqtHV_tI77#d9}y z)c+-3LVUz0@8zdkzjdb_F0ZuT`ZvE7b7X4TKEAsaYf?H{fHa@AJLF&WSnmvL15Oau zyX;`SgeeOOdl<~CcgJwZ`}z|PJuPvR2gfBcVTZo<6De7&4KFA&w$;UhBttLB7qsN0 zE!b+Jsq)YGtogy4>Cg6P#$_QiI?Fbp-`QC(SqFEM?l(_KQ5G^0PW&u%;d0YKCO(k~ z7i4cCi}r1;>c+-F+gQ7Y{-Ga;USD5X6y^C{YgS@b{#wr9$n z;r26)U#`mV?|3``BsebXyZu%36Q8M*@F)`c3mP=jo^!G|G<33&EX?l!{*UgeV)(Co zQYL97vElY%g-_jRdKaiiF8!9cI*qo;zO}<~o-j2wJJY`5ys0r-T21XPCLAN`H24x!T>YjC593tDIlDaTTdf)@&hE}ks2kK9zYCVa5WpSV{aBH-(q`PuC$F~Ky}PSzpR|rCjlwXV zR%F{_doJ3-%h&)ekdPOv?I;`tq2AS}2v@KQtUc~}9Ot=|UEFcn?zi6akapcG49a_g z$jIDhk9HLKnlM=zO&?TcmXWBa6ZGTX%VaKWOB zR|WXc6Gp*}pQm&ZS$dZ)G2er>yt3S`-@MV5?yXll0j-47_~z$AQpfP5t0%II3L|** z6Ku{K+@6Kw{W>ipGC9I1|Y6H zdi%d<>{V^9jAh#1?((AQ!JE#B;m8wkl#VMtfvP;(R^ugo3Gxw&^J<)c6Mr9)yD(6u z-tuK^QxZwbEKnp&dqq09&}(#Nx_!?N|5)HLiVI+M;RV|xT=AND=M`td=`mi^nHBD+nS7^BZW~eU1d0lMV%sXc)f;OoeyVM;ZO5*n80aQX6rY$=IpY_P zSZ?oo-}Pa-gr0cU<;^_0yK!QxU}Q0}G+^WlOdm_b5GPqYcFs@hLz>JTe3 z2$cy-kOIF-Kt}3gi-SCy&bn3oqMgEkh1<%jOVbHP|1KWVfa%X2h&#r#ZpNv2<|`Y< zDB`*DHLdvP*}6(cqNiif(W&AQY`#fX4Nh=^e~cp^I%o9oK)T}Omq5}47sk*SHxDlb z1js2$K2)#s6%GPVmrtC}!r){4yI_T|6U1`d6R)ceMs<(Nievcw zyMMK6+I)Y>z*E8zG09bk;W4_b`_4 zw7d82w0ld7Z3Bg?5$8_W^^Mg`qC40i*2IIWc|DR#T6%1)#@Y~gcWlhbDQT{VX=sEt zqJ~R<)lboiC-1~N@`w6_mt)*jRf;F^OuebaSPh&;Pth1>m9{&a(vfts&xvAJT$X`T za^}R-Zd+epX{#%X?YXC3Y*$}>q1|}prNIA=_r425#+9!F`2@h``eqCPzjq}p4p2&4 zJGLrEk8sMsJb#jnkmwp=q^3kE-f{5F31TwIvla40lEFd0b+4hJt{@mbG9w#y4d}Ji zwRZc~O$-%}k0crR6%AX|VH(5Z?ELBW7k~OMwr~2#hl7(1>XmPL@*;Nq&c0X;tQ_?#~!aGoC~KH+RW5!P6oJr%iTV;L(a*9n)I!2F*)cf zT*{qBqU1@Z9QgZ2K6hw3GU~uUeaC4MW6ps)Cqp&79Jp}Br{7f9*b@djco{h5eR|e! z%ForRVH%?td+2vEE`1s~*J#qqC(+2S$bxR(qStzVp8Qqb1HTb)c1W``9r{XVqpD+E zNa9rk4rR1t;JHc)jcc%x&JFQ}Q9Q}xy^#`~mPR(fpgT%7-A(DvaPzQzuuf)cOgpB2 zjv~kQfrm`e0IO|O3@6}McEp>FbiR2l{NOfq=y*<8ZC+gTp|R{BiS^O|jS)mXz0NJY z%bly*6|(?lqV;;tRq@KBhB$&?!3K~N{Q%|}Yu zG}-9mMZ7Kz&T!j4J2SzP-qfG};6IZ>ly7+sD9z8FX*2MRlk8@y>`9{|I!V%EYhxoF z%EiUS_VGXTt?ga!c~5)g6~A|T3;i;lT>>73=fKf6deDIs`NgC2tiPQY=_Y*oA^q^9 z$C^SDWMBkT_(hj!IhQ&}zovk_# z`tz!c$&K>z(f_I@!^j87Gl-R4DBSb?Irze%0>yfV=Pmff34im~7~kI7 z$>3T#e84qAhfCSZQI*s&?H=jv9PVX;bThm9uAG~1--3?1{7yPa+sj$haSDD@7sWP^ z&~d3vcb$B`fAzdj@47HocbVp`S5oEIdRIK1cX`z~C;fFNpWoiqX$;}e@_18TCodiM zFDu{p!rx!LJNf)o+;M&<^VOYv{#x$ezx-Z?zdT-7Zf9G1sRE2&fBJM!fju7Z3Gw+) zxH?9=(l@gyU!vy)C(Y1L3O(_1+sjd))uP5SRDJtOKB=6Kr8p2WOT- z!N5OabI&dL`IAOj-3ce|3JZV@!*LlLYCdtx#ucS2^gH^wFVYtFBfLcRJMx;L({m zP^7)I-Oio6*#7yy`t?+KgmywZ#ZpnBw8@7Vyz=yv_g+5V-uuCCXh&mn*j5|JEIRDa zx+Cu@*#l1nu+31{sN=XLr;hGETS$CtyNp{p%=SsVr{wJ1Fm}bE;;EbZu6NOcT>S8l z{voZWujCt}*ndN#Lv4C$CVAVQnyvU9Y}iq2_u8%3-)Prfd$GOp^yk~Pm%rHdHtx28 z{SB(HLc_SuGqlCF-CSO5n=AL)%B`#I^S}FR?Q@^`)%N?J{^jxi}aOOJ!NN{cE`b9ZOqNB?e^T$ue4wL^-r}oZmqU&`|j^-fBCQf zjn*dTFbLOUryrr4_u$isnd#VpyXfly2l}uL29bk&KQ{8<0dhxaP%yTcb zyLa!koo#tj*{K2K>N_|XX1^75N^)rfo#dJQefqb;4KHiQ)j$x2s{M&9+9OQ@E zN*`?z+frd9ByFPuehvoP79I17G9&SMS03aUCuMBsC{s@;1JuFmfM@Lj+r%8VOI&TU zBVf0lmMkL0>{ZQDu&%e9AwQMu}%Q^nGHV`FzF%z>L>m+t;m)%;5Yb(dS394 zHoglXbK*s88tP7-=~IITd46bUKV{7Nxv@#)0=r{xcN5sE&mb*4ZOKV17t&5oO(mXX zsQX+1?(VM~R}Wr2_S(grjsZES?qr<1+GRX#DCv3tje#bxhWEgS3vi{eY-78i&eh4g z#oWl0GQi45M!fGz z3wZHsy8~~jBNF0!M+K-A`6``I@xQt=UVrzJ@B6Y&Ub_A{ggWu=jkGEAYoyQPeg7J# zDFVr3=9_vY|NDOZbJe}xw-=Y%+SRM=Q@{25&f~VJS^G9FDA9s1+=x~eN~Q#o`Chh4 zVbpl)TW$E{FI^1aiv3}_-X*;EQHGTfh%Qg5F9KiHMPABoV0a;uOWbSx#vq=}oA|Pw zG&8>{yUSl4>!W#aD`@>S9$_Zr9$n%)ul6On?HNVQ@H9Gt@-sd~UF{oA&&(h{9xrUa z2Du$T25I>GoaBoyU26|tIo~dvcOnWsg3eo6S`ChDFDxq)U!E9bj>u5iS;~(6J?h%k z;dla*7a0R1KVcF-PUN|(+cr3#3Ze`rzX#Ek1IV_EL?8Olha3=$T|`Ba&V_$e zf@j{~MeEUznWoKG?_fh6po4B-TWo*-@Bj1m>`Sy^WHB~T_!M`mYm`lUWc#Y^u(jnU zh&*NJ2s;OQL{-$JrIVCdS(;blSs?^Ax4kwdJw$C;)w`DmYm$^;K(YZhHoSCAXo;kEd}elPE| zOuvecW~^)@6TH&+YcxG9giIz_0s~??qp}TW3j7i zZDV~qlgkUI=P7rLwu$m4CfeNmX^ji=!=A-9@$-?HJXWegA9w;hC`&dmwt1y$ga?R% zGkGRV6*vmvUc?1%>s(jiH)ABEEBmkVNLUv~e5udmAIn8ZsFRCWwsVr?HaOFqaG~cI zc8&JDCmgK9+kU>wlk>&9a|nH&F2WFqHU!Qy^xz;%j?}>K#ObJ=; z?=;FfXgBWOYL~B^&PhsHfD7Inr>&#C$?24KVAlp%4_x6(9Yg-$N_;v#H$gk^37_h) zUM{zhsVqk6Y#8y?sq?tNk0S|nP!@gD&-@)fvu^fZ=_ApHn#E?S#bfHv!?uI&sd4a) zwy^;}k^YpMUWWS(ZNSc-`D}W$|30;F@dy6i9p{wi9oKfj>pJ;(vR3lK-|+!qRt~&8 zK}WpWre*UWeacaI)dde8e}3)e?Y8bW7?7zm^YiV>Ll3ouGiR_J2ilFRx7(M#@O*pq z)!SJ}Fi4|3b9O#@&QI&DqvOzvxGpP9LnXYVaby`l-itriY~5`yYZuJH%FyiK;9?j=Gymv{|~#0ns|2A*GO9W zKW(afYu5bqwqU_sS3+_Z7Q^4_472?{nWoZxO=sLbx{sz8cPC)@YA(qWr-%EbZZf^N zEF8v2M`zjgR1rG$sqRg6@1Fax+)soD-jnfhz8(9^D+xbQ&|$ki_&@v8 z-~CB^E*H}&7$^r6h)iAx;IgtLN@n^JCyj280Q@6RhIg5pPP{4T@gu$lP9~aALa4+p z9{CV4M*vzWo=+lR-%c3XPrc~qiD5}k15?Vl3`Lcl&W@Xne$sHM*i=wata*08o;)lU zzx?qY1)e*38_pHi)h7!X@G9?0=f8C;#Db&oBhjy#Cfz3BRf`}Y_j zK9N`Ol(#O&)YTk{xqEsa-|@>)RwPnQtKiaEgZGCAY2=0`+QIfA!PzRofW?iw~uj5X9lCu!7Nv8 zPE1V!&qRCIJKxzp`5*nccJ|`A(9Gk`T#Vr)xs%U!Mm@pdz!kjc72((?KX;CJg2UqN zrS{p!KHIKeyB*`fL0;p`&CQ0ku8_QW^Ja9?_&DWa%nC!kLk})%LQ}GX6X4BpxxsN~ z@Pq52(E$vr!3-MC%uKf_49Mx3$u?%kLq4HgjX%GMm7O~9mYv>wFC9O>E#^+{GpEmF z0f5IhWk7GxUFY&1IFzTn0fPgIM;g`f$48j_gL#T~@RweYBK$xe@|W=qvmV}Spo|Rg z*Wf))E7j!*bb)c@55ugxgZkO2uWUGAnh9y^XMXZ5bsJS0I+-v(bdUqM6UbG{1WtGP zr(;EUPSPMx2V(0R^3)&P*2>S~c>#4*y#sX`aMmaF<=uPfSaomhj__oUo!xEXbqfFp zS5c&1flj=2_2tUs6Va?ES#TMv2KVg1h1bPK!fn?mi_3g-x7O@?-q7|QSP%exp3}0oeL+2)Ys4i+Spm1 zotckZzVzz#cK*yl`=$?ls4Xtt%lD4g*4KkC(+!^*K~6D@C@W>ZRpTSX9tfR z%r#GU(9LL5~`z5qXx7Ak<>p?2{KdWkwuPYt(E{Ma9BKm2EZiu8Bez~)+T;o_3i z&vsSV;!^o~Jifo#Roqkx*TsqLqWel|^y}gu2fro#m$|p|e~XKfAntJyZb(al|a-Y8UzD$3A)GyKw0dIyc}%1qtzYs zl%u{ho{ORA3)J8>dFaQL`!40854Q8v9z{Bm*<%{k7`3vpz!s#}XPteS^`Pl`^g4fW`eh!Ehjf$Y>J9;>9wuVi& zwT>=~y=@(_%^kRPfD4{=;@4gJYa3f_`R-PG`Gu?Pw?FlzHaT{>efVP^ZGY*%{MmLi zb*8oL)iy9X*@j${3I3g^%kh2S@01gBv4)Uem+RMt@W(H$*f*t!6JoTR>bAr{oZK?HguvA^( zFzISVZX*fLHizldddkrC!S1eA41zWCjb}*z8%QTggJFn@73Xlt=h2=w71b-*2D@-X9ho_M_dy?^=(;gbayKMN1e@7;oW@^FKHV;SYVFz5o61 zYwOEP;mNTvPpcRNzt(5CO-=iGwrOZSN1tjkm;D4`%Y+g1RX-|!MN@DyM87TaTykmL zZSbsJt1T~YTlbtwxknyUi>$zj{^AWd;qCFs={7kzhaEnI-hc+;gi3hAMS2^=Q&e1Z zp&E}Jo7&1ZWJL1_v_o0D)DQ(x0dmC>H2ux zUH++u?%unvk^VScIqRGMWWx{o@l8338yyjfZocR@(6;X0Yn#`vx6gk1^KBTfsTupl zrJMbnk0%fr-6Eq0yZdoIDy&0vLdG)7ufj;%K_QQvbiOA%-jycuV$QK)xOnd;;*A?8 zLzp-6m2nVU7I#_SFpEBD0AW@J)iGq8ipA=Q2(3=f<92c#xAq{ z0ttSzKYbN+nWqlMq5nOKdki`Cgs9c!?RI1FT6^Tdi|M!9M|F`L)r$+Si%481qlJxU zz0&5`w(#yI6XmeSU*xZYHSjq;BaGyMXY2xZm!3j@n#T56x;fdE2FP5MLGpLv%J-^v zO~827_`&`5Ds6J1Ixe=D540tY2L)e%6#YhgkHZJzcJ{)B_K|P>gKe1paMGh64$%`i z0ghKdz#kY2jMAU9of&7_+H7kpt8HUxt^MwAf2sY>r>`d3)RbHF=?8D^w&j%_+BWDx zJ+&P@E`8gby68)?NT1YidD~`sTFH)W4RENVHn3f_Kex8Eld*@XlVeE>^XNa?Aiq_w z{d*SJ{hqe<93I?n6P_3e%tM2_?eyu1HZ>tVYOHQzasqnPk>J@60baCu96u>lqpniR zJ9Yp+iD&s%MJGnWk3C-(ErMw8ohyE}y(#-%d&PwW(V3LfPuNx5vK4xIn%1z;GxT)= zI0dBbC%eETKY2G?-RtD7xU@|9*uDTUi9?>2Pg~%4t_!|wfRwR1>A}fQ+FIHaPlK_o zbDSq*tF(c_-ow{bw8oi1g*44K+ilmtSK4J^aX<&>0uzyH+u1K#^F&7JB$1~lJ!NPn zZP4iWXgdQPE!z!g%lDQrAJlEMyWrutK9*aYS>UKQxJi>d0IPoLD4q6LFPEkfr=b}R zsp@jN@2Bf=-+S4YGY{=KD_*pcjz{^Xyr<^c9v_M$bdsm1E@RJa?#M?jdi10sZD&th zuwNiAQfl&%K4vQ<6i78pqi%EDTqs?9UGWfL;z?)CE7V8E@SPwZ89rq^G#5L1D!}U6 zYFj6bd2eoRlYimN4OHsQJ>b~d_7fxO(p~Jb4eV#|56@Y?G9@46gf(*!htb!ma_}iH zFRv`MrPYnLvay+d=H%2wJ9qAUyLkB=xR?$uUU=%o_S)6E#9O3_BkeSBk4*vd>|EQ} zIcRqlSK9SO%C|qa?};4Hit_ico$L>VcRKzG?syI@I=C`t#f0zSay?9ee`OgdNLb?I9614Ga4bw+{LY2eW=nkWo06iZ zcBE@jlC z@#2xI3qQV}bSJ}1BYb_jugASPUe(>a`h4Er_3`e*uh2^i-%YEINeF&~2_yHA&Uf<9 zoSA<~;!h>`va$^`GxdhAiN|wnZJ%i)MEFs)skRvrs1)nsu$o75G2}9`r(D6fk&&W- z@lMp`E$%B}LKp)J3~)01lKAFnhJng_T`W+FD}E6;2_iFl0UK98 zu;x{9#78l_t`tJ0E#@H)!X=UM;&q(9d-O+!wHyrTK(q>;mf zyP96n@@M8}+XP1YsgWTJRr%>CM-a}=&gL7E8Ur>0haQKE72u#z zj%7ve{AQ%e8HuE0DvY-M+*MZ4z$PP;|Ai#ULY16$LO4Xec=NBLeY~VyS8p3XW!#B# z2U4wdR!IYw-{cSlFlu4_wsM$z15@K%0~?j&&0baVj|G zBWM~$^4B5w^mt!aDeh2~4c_MVCh#~>J_&7RGD!Hxe&i3gZ~EqMZcBHULWfbxnw*$! z9zE|WN(cQ`*VhX!3g~6g$^WILmG;@+|7`p8?|v4Z#T&kx%EbJ5jPKib7m0HZJVC>e zG3X609Ne>$K|aDBBMBUuZRn5DnSos7gBu+NC$oouX@XAHIGrv!UUVwNWsVCcp&b=> zSWo(`Oc$5XA#&n>oOpBd3+W8Z&d;^kEJ|=7kIr)j@-^JVap`9}q>*_~<>-4OquoYkHo9HhNh6*}sg#6|t*S;w9&o^@JJu_{Qg#Ln z@UeZVL3MNuP&25BbXKtu*Zp@}-*32+CP{Fd|d^kkdISRIi@8AL+AY`SCQ z09eqE(AMu!`%P$@3+Yf3Al|hjs@}ny9Wf%sP{uE(07zGPk#Ol|yK^*HE4ooOgari& zPrF}*;2|Cp5dM*t+6k~mE(c+Dska*Sz;NNA>&#jlX?x zPI>U2Ib8BvXfxyO`@ZAj?f>xC|Eu<~@BfK5u=O$-ZM6NZb$HQlR699lXT0Y(sVaR5kw{13QunRh4uv!iKnB6fk3lv%_=I;{-#S8oG1>^2k! z&*~xd3V+xGvAN-C2gwel zciY2{Txfsbo4=)vPEc>^?BqELjr#kYM(9HU!kjvbN}TwW|4UDZZ?UbPG?v=N*9RJ6 zdn6e@+V;`q=v`fShBWFv^@R3TR1$4os0OXnOPP$st}uVwOb1ro<>Y{nyCOZl(xbzt zCP$k`mP>PY7!Qhbcg}9Cqnb9d>fH&=)6>&!a%8y8Pfh1@0_U(r&de^fi)YWaM=oAz z4?lc`{HB2pn`{)DW_+qmj!k1TJGea6CY*GH=lqnCHdUua+a|W$&fZRTDr@8IY`Ifp z1o&o%@9rw}I+wdf{Qj`@ymxQCedhPR*naa+D$-;_&riLe^kJJ*3FYr_i{ zu|yPgz%mP8nW2jBd*e;&G z*ru>&wCgTid>}h4Kl`cAVoOf8h57k5j{T@@VLs{{PbbQ*J?N-iuMH`zH7KiH10K*% z_Qwv$Yv1;L-}`oS(Js2i6X4W`<73#g=mGO}>>?8d%Z zw9BF+^w8$!GI-dAGVm06sw4Lgx5>+er4BS7gr}#c{ruNhn;fTq0AEXwO!yCuqJ!u+ zWRl+rP!|HAli^MI(!~|_MV(*^+T|nlI5bL6lz#s1fdk0$D0B~7OD7#TA>AmrKSke0 zm89L9fk*17O=PCQ6nA+tI*qz{0>Sv?9D3+<`x5=@zy7!XPV#fQH76&Lr^jI1hjN@< zxU?I`u=gIka=!h^Klvxy?)pZ%d*?2x&`H25UhU_2`bBun1v}8##U_zc1qr#dUD6KA z1f%zyBvK2k?L%gQfO_nK+wD#H96kj8VcJjSM!M|78}=()=q<0>#+9s*e!>j|*uwCL z-ypXR>ROQ(T@Ze9$F%xGyBzrvOUe*s5<9dN-?f7>n1(Njz!f@)5d8GYO3Ow9Z4Bvfg`36HK;r>fuJgN(T*Z^H5OJ^f$fkwJ3o+Y-}zSJnvZXF#j%| z=~Jd*-Q7!mRIZgnAx>KRNy3Fc_4WJm26!a56GHY~_R%rhTkiPZ&E%2&WApM<#ob*e z9uL~}8?U!V-}Oj4d-inOT3to&!QZws$^lmz8X6)`oU}itE>OMGBt&QRFzTu;A{<`n z`+1?Ec;a#qljB2!=t^Z;c~aR}2XpGjmXtmDQr#^aPW~%j^$9s(v+vj8w#q}+1XDfA zgYcB@5+&rLUW9%r2iQFN-}d=k?|E-~@FU--f^7rzo7?s-DM?TH>2Jy}I)Ji`7Vp|& zd+2s|Ks!nB^b@bOU;T~GwrjVX1a^{8xmULwHPl+$RKEa+;zN_+iS4Q``jfy0E!j9sK+sVU#KD98)x3^_G+Tqz>J!K=1=W`thdS z?H%GfwpSF&LGrzyVAB>@U-Mi4n;Dxi{m_`7pPfwGHm0pg{4Hek*45kX?%i61 zqhx0sBzlH)E^S5l$sf)D@l`45fIWTLl z%1HeBTzp}h?m}iy6{0(lC_~Oe@Fz^dvAvD1(PkmvwXLIe^Y)GQ@I#l{^jPhp&iFkK zyN4S!77xxHBNu7Xx!NRQ!_I&Qw1w4C<1QQ^j?lVDDW}CDz%HIuH%NQOC9Q{iZXV%x zaZDYwm0dg&`^$XD(>~hXF1i#1?6?pJnRQ&$F}&%Sxr`tCnE)4ig~x)gN}pLL;P)g; zPq6Uw3!YM`IkMw|Uh-3Q_~|aIXS$42a#=@psV7|Iv^1_Yc+!)&mhN$(_w7?p(PzFy zzi;8}nKm~!Z;9>Jt$XdxokiM-#cY;A4Y%2;d34G`Y~(gL(U#UW+AB8~+pQ(~mpkIp z(*{Tkjiim6rIdToZ97PHa;a4`ye``XF~ zfBhwtclkisgN_*!Qm?C%s3yjYJSmA>>@jotO#A4^zoShooJ&4iE((U2j}|=q z#8=_Wa>`Fv`1tZm|LzLUTZfZz`@G)Hoh)0PG``QK?OhL7)w>VNb8mkhM>kGiHqZB? z|PAeZc8Mb{Ln{6FV^Gzc!Cg{Q@ZvUhweJVfd#p&+)4u~NXm)}YUJycmG!oG zZ!v93r}?KwzUE0L^v;11zewxu208xeegf~v)}tk zkaD@gte6O*QgD*UN`qsd3Gyex1W3k9etEA3&~OE)yXz=e-~>McDI)oIfRJe9fl-#0 z0|aT6$hFD8gtxyEW+lnVva?WtxH5e|Iq6_P1|bY5OeVKlrjO$4t}>-MfAZCj$G9Am z9~Xp_BkwBLD_Rgg5n93Jz>~~zEbP4Fdw=pKk&;JT(i^YClgScP5!0FIT+)lI-0#yP z?{!PsLLaIKKNr; z=bz`!#Y^1yXk(mLl6G;Dj6fB&I6+B!RA()KnSe#zL2#CC_*i@LiRapjuUv1V6O*KI z#Vz$lpj89zD0g+VAX#^Dw2eZxq4&t2ZCBtO?q)z|5apSj2JlBs_IF^y)ADl~nLw+7 zS<)5S=u23dCS?}C;E~2jgF`MXeki^h=iRbedF180Ae291hZTa5acI)=7cKi127^;?NwmVx@-*l)bLwyi>CItIzc>RKi}pLzVL_L<-R zEU+B4x%t^PGd&Ytxwo|3mX?=+%i}j4IKto~Lpu=CLH2g=*NKSuQBP_ZoJj-g>;eo2 ze8CZ{~svnEfS9g(ZxpTQYSnmq?2^R~DkCJYZ^kuN- z+n?a2Iu)Xh%sLuSG#=7H+yfKz?R7*VqJ~K{(5+1riR%h(|l^$e8*cT-qrgCB8i94kyd$^^D*B zmj_1S0XuZ+7biioV~O)h-RI9%o0I3dF;h{QfwOf*o*}-0p%5uU;8L9TBuHN~a z_ERrqFWSHWWuWwgOXQpPsR zH1aU7l7)W}NfXfVwHI;HDv2QR3&c*U+DY_CxE)~izDWmc10HerMNLRL;j$ZEv0AnX z8D#8qyVuZ&pXMm~udi>l#pTuZ?2}Kmr=EGN{nh{Ke@FJN=69I@ENoPkS=HadmHsi0jMf?#ID>{I|M3{?~93l04LJN59f7zV`*a zUHNb4zGj5Zy;Gl=mmzUI8obq`{vD3^lNZaD?9=;RvAstX z#~HW3I}kt|ui%@@POgh+{N}wAs$Lm5Mfdiy1&@+EFeyQilQaVGn6EB@f%nz32)I$X z&>cxbsU)n4PW`JRt{twdE7O@^B7L2TL>}gYj97M^I#avUTsY=nS0<>CL-jyTH4&2N z6>ZN>H&C^HF`JidZR9O=szDp`b$~7tV73KZ+I;y0QBU6&;dA{9F|sA zTN^31PnVbY_KJQ_n{AsxTV({}radVfS6plfJNf!0q;zfu@xiqdVz#O3oQZLFagD~D zj=s0uQ2$eR^DCRnlc@$XIeF>gh4#n;54FqOAHMQnd+$5n+1~lcBkdgzJ=7k#a-}`+ zz~y%7!nyVU;b#}-+xdmF`Ih+v?X%-|?jo#jH#!Lc?xhF%%w5k=4H)DhTezdPv9Z>6 z_OKa&!O2P|&*T|tH9j?s-&k{3n{m~->)3PZ*VT=^cJt=l_T-Zz*(p?`1&>G#_;_AvS#$lR^+DgSU3}+VaY3d+C*z+wXn) zvA{nu;cniU?t8DwAFz>MKX~ULpGTM4*2>NjqWT%c2~p@{bo6x5y2Px0s^xa{f>wRM zRvIKeJYpX@?PF*IO{JN#qaW|2gr2ln^u_F-D;uN;&-+|*P;Fk>4BiE*%FrvvOR7sc zeS)}!UrZ~b0ur(HBW@>W_Un|p=x{-`4B}Z2z)JmM`%sj-(v@WktDSPSKqorEc-);F z5|+Avhx}5I_~4J4WT4&&pm})AlZAfou`gwZ_XAfhW+!>d#-?#cybFS_T)L7^8(n?% z)$m+C?FCHJ*zulVw!XR9UU>GI>|ULpn-1T);9zokvUv=0bP%>m$y31@ndDvD+yPA& z59O31kd__z!+xfVz69`V$7tWWDB9|{kYJ4Xl!G+cKXmanJVu?}KoK^U7WheT#UN=$fsZ=NE81G2CAf0KhIW$jyn|fq zZWryx)IrGI4std-dj|PEY|lLRLi^dj^?wUb&WR)X1M;DH$n%GztX?M=Lu%jjy$H0Xrz)ek0GVpJILq;fUCyi~ix;_(n;A(KpMRvy5 z26wS4@=CqZuMv;Z!}gc7)}scS148iNi?T^pky3TFPvJ+MQM$*k=uQMCzhfuNUx(}3 z@e~vXGsm6G3wQk5m&e_*kNv+!`jhE&;sJgZpT~8@rM2wv(mS7Wf#U=jormqk<+k|p z%k7CTKAi;s?y?@Dk0I`rb^E22l2SW7)nIwO$KC=48Bh?ae`TAcz6~`}&YNJ#bJ0hB zNS^?&Y>JYP^%n=oTaC$3KjBt}@K=uDoWer%aE%*Od6mD<>dBg1(68!Ipm_J8c+&Ff z==^G9FqLUsvHuz%Hq-qpY4LOy0y2Pv9o+{I>KLi zWmlVG$A0QgWGeU*FJ90qcjVs#sg{^LWoLEY+VW02cW$CR_{in9w7Ar6+*)sQXZ$>lJC^sM<3vsc*w|Rgsim{C z6SSYR@B%iNI*hWyx!i3V1GqH>H_C+wFMj8-lcmsH`vO4n*YTzI-c~Q2DYVK9bxpe? z>N=SV?QPq&5y+hSXgg$F0ay1*+w+rpTOO2Nqm$HuXYFJ+J7{lb%r?5V`Wi0URURas z`DCIWKCn11b%yjkG9B$w{IDbY3D>}({R*7+cN`109^zH|%JMyd%QjKxNoAg@l#?o< zsq|KUhG-k*P&YHJF8R5sy^OzFKl47?hxVt&$omL6IcUqv%WdiQolM9o?v^93I;Iv} z0%-aj%8;m!KI1N4qtnExw89g8lJQGv7P^$~ke0!Tm%x(;Q3Hc6yoV#W%d6t2kOY}Z znFP<^Mj2D)97|bQ-D$Vs%Wa!&P$RG98})XO$X6TE30%t~i)1AfeLEW&>)c6xktEuJ z`RpR}C3f&)M&hLAB`WUd7@^1xW;*X2WnVw|g!56c)UVfSj2vTp` zBtHu?fu8>0H+{6t(N8zgHvFLeQWu&>-{-5meb%XmJ3z><XIM(U(fbaPvDKAIi=%td6@~lpY2$eDWOe+p| zamJ5%%J3018K1P~6oZ*K`UkMCa2w}51@YV+Sj10dkO|jxC^TTbSGpKsT;_SW*WE#~ z41Um05)j{2)XApHFPD`kp6%T61WIzYkwU4O)-a(8Jf=6aCwJA!yTp>GycP0ALdD9i z2>p)lTq<(YiYMc64;uUYJN){be1j_TFekIXY#2YuTcH5s3B=E8T6P5&^uaHF@$k-jU^csgxra5L-F3hA z*3CHbg>^5Q*G@UHfMFl}jnPO0^WbRP-jff;+nKWuwcB^M+pquI-)wgmSEkA$AWNmSAqYA;KFOjBjEB&|%Y01l3$E%-r@`<1k~aJt`pJ`|b?^s& z8%~~hE~7{G4!!`&4#xb#Y&&=MY>t}x*vCHFK1@S%_TuFj*AB!kFE3>wgoC)*sZYvb zjAdc9;u>5!2VQGYkn(~DvMRG^G}Oc!va0-YIe6lrU?%>on+g{>@HkZ8?Idn&uF5J|Bh0E>Iz%AcP7#g7**liXdfb<#gN zG7c^>4}MC=Rs3@50r)+Gj?j@#t44Krjtr{cDTCimS>WoFYgLaF&6Np2DA&BzoV$M`b7-Fz_Ng8Gr*fUG)f4e#($ayMe#P@G6GC zoqvxR_WOsK=)$Q(94L^q=12BPFW#ynXvX3hbjjbrmK~Z3GJm5-uW?0nbt|(I&T9{x z6HV4(@4y8`Cgh(m%d>3DKuVgs-u8}mGpJ$8wAJ9w(-r~`AmgLaElTeDE z{I;{Zgb(3i#XjGA1TGD2@uU2yGqZx7Ok9*QH8tKw=+M~tqWeo32k0(!$!0qob~W~? zcJs!o?FWA7kGH|SJ8f@!BYAic-3o*uL+YVpTLHW&Lm3Fu@_}z_qF(){tf(*5ImD%?zHKRp z%!jDR`C%rL$bDaOIrbZGlPF*)zy_ zP(rx+oH$uHKpNYhp`oeh@eCMI&zx#RE)IC79VNa4z8-t;q^i3l{Ep+`#7sLx2Yu|L zA8&vAZ~wP#lXicce4K=}HJBQ4S4C^6QsS+*$G`Yg`;&j-6QFpi4IOUAb{K+oF6hkh zo!~SaWV_%$^>lD9XtOSchj#w^qh1t=o&B9;%8GH-chXedqy8)ZzHgAe8SsXpylZQC zjH|S(&gyP<*P3pE_Eh}^rh`lAoe8ojC$6+x9GpRKV~>mzSDVA5CdHMLm9yZ=1)|j% zcITj$HJJOAaEUv0nf>;H~=>}B9;YwX}P`f+O_tscRZSfiZ0-|edjiEw+*bV&CboYnaSx)s=x63 zbI9pITUa=k#f@YT{(9y0*V^Yk_k0%Nj!ukaA&Vzdjlz%GirO4bL^;sc>n?!y=i>a}zGLZvbnv`Bnc8-k_XJ zI#Z9n|EOOr;*iGjrQE;`TAgy|9N|uyI6#>RQUaqFI(r9h4t=Gk;Wf!9nTGRiYWdp< zt1;|FkAO!m)^rs7$AbsGmJ8~P%=t7@^&`#b-`$ZjhHr09Zy#1wuA*WwK1NqU(L2=*! zo(p+$g#ui#?8+;_@XHJHXvrSyJ# zcbvYDXIkCK_x_o?`Jc>F+|)yNe@ptVE^kg(ces+rzQ5Mh^Ee*wrtOp9Qa3$PHQMK; z)pqB_7u(ZMJlp(|f~RD-P~PvTyBNrRkC_Jx!J&K$A46N|1ta0FPOCbEzbT_{Z!5g& zQ)QRaFN>}5%J?J?utP4x(tev(0uL*L<(E@mu zv;N+>;F|&3nVgggF4M;$aoQ)_H0tR|k5xU(u)VEzL;0AVoNbpco@=LPX4(jP+8u7E z1`gU7Dt%{do4BLx9S=D!F%6%Ov|D#?WrKu^iPV=?I7dzctT1{yj_~#Pto~B<&2{PT zA|uM*Hu5ih)o*DxY*(B(-6f9w!yR?p5i(m7T*9wBRo3baY&HCMb~Xr+hoysY97FEM zFo6gCA|CCdDyL3}RT~WW=rZl(?6L>OVf69Ck3Q7i^NsIor)Ezl4+T{C25je?;3I!^ zqYRQ$Vu)m(+Ao{y>+Q}P*V-@q!l&AAKeiElIx*rniDM$RC5};`GX}fGV&k>xL1p0N z^WiD&1!zxv^|v}(T83Y!TgF6%yQ*7s>}oGLPR|av4}8NTZSnTqcI~?3Dm(4c`3356 z*go>1_q98BZna_PJ2f-W=4L(Ba}<4Ai=V391LY*;DMLX#Fj!j0gWy&B zK$>K1DCx8-c=Xvi0KazuCblUPH#`zvFC5Cd@e9&>KC-0F9~XI{4GmEp2N^)VM<>&s zX2A&c(UuU0p2Q%(WMKvMGi};(%GBPpEfFG5!^nv+rX|k)j-T^`F96N{y>0Kli~q^X zcFPSH(nmW*eePJO_=z}>mwlG-Ab!TvW^mHh-T#P3#x@;O@pRF&V=#&FJ&kb!T8z_v zIw|Yq?~-G$_cmmR;JSC#MVx56<)jmE+T+{&s20E!ulQ17JjpNS)uj(#>(5X8Qil2I z)J@tG@IBA>^D>UOCmxUHqo3rLH(bc+r&rcDHrv(f>y74H<^a#!X!p!rxvMq~7`pZ+ z+@T(Uqr)TGNWw(HCHGY}5Xb(*3psLOrt;%tybF8?8!Y>k?gRF8xJUFQpXIf^wzht| zz2{vI!S5qEv12cvDxiIa2M*9bvUfHyQI_S^DGS;U_JwJ?sh@U1eO`92ZEs_nz)LPf zawk0|M(QDt2Y<9jb+V7h;tEVY+S^kPQ#QKHPoI?ShMkFyhATuA{ncISw$(P)J!w)K z`c#{o@RLVwCL85Gm4&JH2KUjOs1aZsYuoezo!HOkEkKAiqjHhWP(CPksT+1A<=F@H z6I$oaooi>#o@r-JFSLp2>DVrw{<6HX)^6Oq)o$Fnjjgzo{^s=T9QO7Cb;hp3X4oG#V_G!@9=11^B%hTtdqz2C7v=;A|~(gOusyRG-Cma&fuQk zixc&cxeWBpe7a}u=2yDQi}*=vu;J8^`ziPps-Zj38QtfgoJ)`goc#B*4HGkS?L!~^ zSUY{?VG`SC*(5tpx>7GF8zPPT1EzUb{+3(M`hU68za-L&c=7Z{*XPs4%ky#I{b$;f zl+)K!yz3oU^f$53+>CPktiSM8ocsE%aGkc{e&OY(>oQ6HT^_xkF0@S=?s}^ajn~1^ z*D24)fw-%Ud^ltxpO{52)+gSy57f`}ag^_TX4+2&-fLS93?bw4Yjk1Cud=GWE8O?4 z_~3*Sj@}sx*Q%Ixd7n%K8b8xt`7Zu#d(YfVy2@XYyhmxt}qbh zL_#J>RCvci0PrCH*eLZ+K+@^sN;af5Km9#EO(gPK4Ph_LrL;?VSJ~?N>`Eb?g94d6 zlyM2$$6tcv+djn4t^q9_|0PhDhmIWat9*G1e8Le>5QS6!G>d;mk9<7>l3Ox|-$us)rV_c`?MVj8#4J4I_?b)eu$r~*{w1yAOoKbhFj z8B(3Yy0YLwVd?50r1oi(OQjc9u_LV`Tk)&>EaTXDR7O3>6-4Xuk0BALP&E3>JA%&- zadKo6o-P20m*Fzdz;|VaKn$N`X{_gzC#<(JgDAiD-E7v+pp@mGeFgGo8M zNE}8^t6tVv#;`6u-3l*I4m{!A)k!~bo6xx0slX{a|i>&L14?QLAwmb1EckGP{ewv zLxey`iU&1PrPDEGNedp(J;4r4u8vTqLqn9V@Ve_iye2+K=*mIAG5f%k2iiA$@VyyK zTVGuv&Tu<>b|IaqbnM8>&cA8w{7OGpXZj6T2kqDIEwxYm-ly8;%5IyVbrgWWw19{nH15G{7+`%~~uxG&E-0W;lDVU#|&k_Dk z^iNK?dULGJP^RBddf6je{F81JSsO%W= zBb=-7ZJwm&r}(hqR2_ZKBu}3vN$@m&=lYJVGUJIE^0}evM~zzt2D1V)vi50AYaq~1;f zy25)BIPCNt+JS)wq6MOZUr!h{1D3B zD@5p?!MA5C8Z~+H5E;Wi$?Z%ySa8rb>2zmjiuMZS?g@W7d!z7d*+#V$YE9OI9aDDv#y}PRz;@*1i~oT#=U zmb**G`e5&<&7D5qCdVh+Fa6rTZ4(Xx?eArWlx>A76ZEB>d-l0k$?c$h{M)}7eKF9c zC#ETLtL+_>{xrVj*@g*9!3v$oL{My##FvAKwsJIxPaDoh2!9{FR z=$n2bu(()b+krbr>ZrH7N3^q-R=3*iTZ`?n&ppwu-&t-CKJuRSfp7a*`=9*m&$gq{ zg*LFW+}t5m3w_nO&`5Y}mpmqSm%0j$16*9UZh4Z`-FEBF&GxIm_B-wD`Lk_q4x4do ziuQ09{M5og`J(@3rhPkM4IgsJ`_e-?$6j@S0N%F_>isOx3$H^1X~%nTMH+cg{D%(2 zk(OBy5A5dSd)T(Sbkz?xpQ4*AqpUdqt)FR~gwJO+fZyEIMao2WwSeoA(}X0<=g2K^ zctV4`ReFSAo^|=$M_}k>mh^$W>SBvi|*RbbfU|nrdt+fj-k)ho0hFyxnQ9d7ib@VT6kZWMjTuvOnPL(PkpUjqWlA+ zeTDFqvI0Fcuhd2Mhe8MRzXR9OM!l3{xZ!sP#Df#k<>Zv`u2q%qpfh3avL2$3qHR6| zyz3k8rWU8w7ndisFOs3XZ}Q-ea`CVhu-%`-FFr{fZ4K=dhTkMI3OI`4_(lXW`j`h4?9oGu<$ zbPN1z`zAkb-d$>UUUNWc8#clTCX zT3T&8^aD?yJJU}4ZRe>;+Q@UD{-E7_<4$|WBM-Kbv4M8$=AFo#&5`BW?;k)0q&eIZ z{0U|)O6ATK+pmAoSz+Pr_^oV1Xb^a9541V8AI1ug&?35x`Z~tqM3oy0#3RvzAIsG~ z3>}5TaS90y@6?zSdddZ%jx|7E^KeXNV#3ouYJudy&{3P3ooZL!@o>BR&PV-7c-4je zvUv5=Na8U4)3l-1r%q;BTUlwF%WG}r&YkwDfBWh7`=7hk*4JB`n*}ERoZNB8pmp5d zqK~N_bF!GU?uPZ0LtVxsOc8uZV?3dmcyK(#u@OK``(xM$JUTjh1U>+FywUd_Xmb;` zW9YWE9r7KZj)U#&nW;8AJ&E4l0q=+OOJ{Q`j;G5wnQ4Fa5d4Dr>c)se{IbXeSk(2A zjgEh7{KI(isJtz#0TvJHtG|EYQ8JXa{24`Dr+txM%_|dt;<9AGH0fja;Up^C---Kb zd&Ie8dFplTT^E}A>Ap-Fj#m36-keCc{T(I`Z3RvAe7Xhw>nBXJ_y*lAtbVFf-gHxe z?YHS`r@ae^p*;{fdFm1E(!s?OyWrCW0-gj|@?QKKoB@m6=xG~H?jAuKPpMk-6jGUk zG-FQUj=4BwG{?N3a#0JywZ6WVPjgV+=!!a_6}}DMOJB;ctYBJNP+s`GBMK#pOq(Sh zj=l5RIc2W+*~_lC`d0}v98c1o!!Y_wbzy{h2@9)5CcN-AX!+pGM?Ose*h08KJR5SRs6gYt~mfZ zXRK*?l`@aE+l7UBc;>LpOpeFy^3!qUisu3!;kMQL_Nz#1o}M6I0Xh2>O>Yq^dejMyvTfA3HqAL+Te#M1WU>M^KRP&{NcSKOEEt-jf${ zmkUg;F5%gGcO^gY>vQpNPg|pz3mK;4jq}mGI{QhFsHH&1vy5cP+GXTr`LUG2T#SZP2v7!(aHZf?weie3=!WPRBtD+!C$DD=O`Jkyy4f& zc=F;V{(bdsx%~y0mRHdFmn2-mZ5reI4o;GfX)0#X2B#l^c{eR(>kq2aRTT$V@$jYo za4UTU$&~R5+8g_oE&y0ubOC^K48zE_g=veBA~I|`^AT@e%6kWch3b5+r(+jgm-qqX z*!gDM($#(X?&c=|{_{WogP$BYHGDbzM58k(Q|6*$>UZ4Jz}nF-e}OJR(6~a_Xmn5= zAj1h3v?R|qDy~fOTQxx_@8B*9sL#i5#LIY@koGR(s4UGccM;*P#abnofhWUIq?srO z;|jlqt%E6whZC<(+BWX4$ZY$@ad4~@UopAlN0o-c5I!1&I0oTlntBmF*-K1`LObj- zp@VB>Or+s7KbfLqD~rfrR)_#x<~48vkLINUw|v417c!pl^(T!JGWfeoK6t0zM}9ls zI+BMwF;fp$oy!PjCES!(Fq>Xj>C~RG^|TE05vO)^iE3greQ2 zMs{K>%lHaY@I}@R4&jxFeql}o2cUp4e9|!|bQex(FTa?V_ueo&DKZabEZ?I>K|{Ae_+SwFE0Fmo~v_&#!QTVw=>+~ig3DlS|t79KS(UMW+=$}4_XrXl|hSSxq#axVN4 zj=aREBYy|L?VPxpb#r64y>^wx`_>|P4Yz3whzBk|*xvEzd)m3vm*@~)YBSSwH85sg z@R7TZTzTm!1nM4PRX1e82q1ezlM?}+@}U8g4m@?P-SuOk*8xU!=onfO3wjElxOOpx z#<$Y5ZyQ zy351?M%mUCj_xXS2mkqVm)hmam)nK&m+5$&r?a&X*k>`C&Y!!Oqjj%bA^hTnoMzyW zq~;fSN4}+_tHr&B=)h?V)&Bksx`4BbvC3*wwVCqjzP z)JyzR7wb8ANa2P7NNT}>kcu|1p0*5XT^tI_<*i<82I` zdbp1~^6tR*juUR^0^?3V^Ko>javnpCnkh%tvAJ^+e5o^~yQ~)eNqeh4E&c}r-hn~> zPNcG1{1k+`+V7#OtZIe4gBkL_yel)q@08)LlXT#q;nu$Teus|BR@!R_0iW8w*%^X| z;1@gk>1+ay?VNUr?MOa5Br}5#PaYAU%XIPwMt0jKIQt_%@ZIeX{-JLn^4;WngwC@~ zA@|f>a$AoE?Ul*6-1|#Bcm5T*yIXH=siO~h=`Payep?(WP+2b3RI%IeBLhC{!;9vz&={y54HsOpBEF5%fO?s555hB`R$ z^V8dBT=HZ0Zdgqin4fyc_6Qh;&|B^{;JuWl5!FmxxjyG+nd%S9s~q?%`7p$GFS;3; z#g5U=7*wZG=8!n8^wEXXB_~0V(SY{4`bRmjBjhA3WLL&wzt_kP@=Mux3}8EuAeVMZ z+FSL<;4bw+*9|NC#K#VB2f35YWqVpr_=0=Bje6*(T>Lzcb^~S52E{JrPr1$xHE`wO?#Q3r>&l2D2o0fGIn>wZ*_q+ER3u@%iDI zx`$W9yS!pP+CpWw6bx zZ2}9oeWf5dCSVR|N6}Us4be}8W-d6>J{>2|m9_N-+q9+S)%N%P?$5XDOG|BT;X?cN zZ~yl8lmDxqZATM+^MAALZNZoDNbFwaRGUctSLc&~{n}m1^+;IT3l|eyedD$E3%~fQ z8N8Ypo59~5=g6JYkn?`dI~X0k`p_+^8IN1+t)S7I(_J zoLe@G=}Kp6KN(M+ln>I@=K|*)KGT;$j$un^q5K=QWYw%ZNx~gRt81(6NP@Df`Vm`YcP7b-@-CJSxkKLh21*Z_XRzxX8! zp^&R=d?2j8FMmLt9T&0x<GSvhTCJCLOI@hr_f$}u1KCHh@C zCS09kcvK3PG|K02e9F6mZ27`rT<@mS^?qLF?W1=t?Fsdv>vQ<_VbGN-(5Uq;qqkIlJf15+iqoGT)YbdbR0p> zPjL;ofED<)hbqhoZ5QN7D{3Xpz^k@~`pi=a!ZqT~bkx_A>Kua+pE=D3-d9iUVFYAS zE;gX@FI{c3i+*F+9PYXwpYoFkBkj_I7uqB5dbrJ;JJSZ{z|SH5S85=x9Sg9HPP>PY z45Xj#iBU&wYk9rxEN`_ZKmSbom4EYvcKeR8oG2V?+vtabL--OK(j(qgC)r%$SQq-= zJgtMU*@n27&`Cna&ppYB)`2q7P1xdNqtFfW-bI#`a@P8^!*eXv1UJNGSWmoWvtuYRoy~8azRuW5h!TER#*a>E-_(78I`9*HSG49q9fb^s=HXlW^TU)+TS}NGtH*M?Fi2tK=0Af>By?ISJwRGgeOjnBHvS!ep*4fal8;7gkdwOXGL*k%az_A zotmUCvY(0n?Ul{Yb$o2HO-#;`&rF+`o+Ik1OfapjuVwOFxpNbmV_<&HW@17*p$pJI zwICGwP(;}$qDDSdXQD^t?~J$k?sNU6vXm4vQX5X10<(5p#{f?^RQ-;t;J(GsTq zvDruy9fO{%XF0$h;TVWrt34y1uTkHPEwZvq+eF|$;tsYbca%8zqnkR6)vvl$U18;H z2g%yGkBr+8rzQxchrimn$`|&m6VC9vyqME4k*$ms+Rx26Iua?25c|&JdYc?SYL7m2 zsf|p`wsra&o7?Ld8#6!kjN|8rM?2_E`a#q`ZMV$<_L|QwQ1YaiBg)wK#2HZL!YUV7 ziSt@G3ayYQW!W}fT`b+CPq3{X(0+z@)s%P;?%0<)Aw+_o(XEI}P8L>wrQL@Qh^{T^ znB4|QUKiJX8y;A#%ks)%+uB}fcNcHAmDRyrCn|4p_=~SPoo{aN<-S+m5b2KL7#cxGcr*MTJ0IQ;Kgx?`e`kz| zdS&bcot>ocO`an=Lfz?j+p@qINUOe(Tbn}oGX~Sa5uSjy8(UimpPZj<@BY9C+m-je z9~_)&TWjmsxpfkaZE!Y?#Fhc5uh;&g@EVuu{Dhgiy0UD;PM-*5UpJ#BZyvNs%oqQf zgA|6kg3FX`T%B=yXPIW!<43FzKX6wWbs0}vxa^#eFyZQHF=$zRzTPXLc52y0!p#*s z68LS0C^K^i+`)40(x>JX)PB%Ad@5Y=!ia!(k?&plo7)?^$j?qTi)vSBJBu~j6{AMS zCXsn~55!z2Twwg_F1k&^hp-3vzKWCi84$?{=Un%=+XmJb5YsEP*a({rkFP!Ygq)C*HbS z;mmjLiN{?=!MRE*et>++4rN?lhRkQ$%EQbH(^WrloyiE8Hv_pqYDY_ymh00OT%i-` zD6#Vg-{PjHXBe*X4H?C8=uoBhaAh}$yy1I5R;Udx`sw7K9&S@xuq!8vuTUrQ6ADNL zXnq>!Dhv&J%QvpdK5(X_s!M_356>u_l$J{QM9jbw-?$3TWAJp}Rs7>8ezF4-xIBWe z2HWhQ3q1`chJdK@n9<4USvv4D;lDSo-D&skF14TgAAYXU#<#N<&(a8vl8+sk8U(Sk zsqv<(9s5J+=s39R_cz_?>aGNhb9d#s+qDLPzza8dMLGp{#0pMq^5XS!7?9M}!R|U5ey}axyB9^C!iZ`5mSkKa49Cn&cAboexmAeLF z>Fv_rF*t*Acp>qmkvXY{@Rp(Yms^8S;>1u(U^)3Fr;(T*{Vkr#7*cLJ`Z>@A+A-*lv; znK+Y?j8{?0_4u@*1*d5*EZOMNJHP=aFsQeIGxbV*;E)c9YI7V+2L>^Fl{KXtN)@Y1GIS2 zpsgASYxE=KR-0|X)FFR;J7yRt@fETu4}v95%}-CYk(Yxu4#GM?xDTCofO}($j@0%J zyg`Eiue!)kJY`V@@ocrUJNk(iCvDPUr2VU%XQZPT7hu@Y+NT5Y@H?)wk;$?4zx`kT zl{Pa!-HtZz5RT3Su-HocDMQjZ4*n6;WE5BRy6@ilt`kFF#l1E9TjHzt>Y;A;!~1(5 z(BH$2xcF>XH8TE92YkrXzU4QTL{r4>Yeu57Ew4hxRzff%|L+7(P9sEp_qEkrOEn zzR~&2qF&PFo0;13xp+rbPH|BsV7IpFC`F30`J*ZwY6?Gn|D?lK5~Z$0%4tK&8)fCR z6ZF(R0_((_yW$VgcdI z_Rs#&|EDdkZ?+-m=`Jl#G8q;QkT!|laj_Nekz-=HdV9z=9ZG{~ zVXl)BvH+L7oCFxcE^_iby)*XS~9N`Z9;>?L~Ph!~K+-~<4H_*YmnS}VI zU-+%I4}U-O@O#@geeB!XC;sx!v^H~^{`zJ+1m~0n98TDPTJ?nZ1UKn7DvQAHVvl>+ zealO0IrjS({?#vIqn&Sax?n?=4mgF)Je5gh@$k@jGsmKO%G21`aC_jP2Se*?SD%F!*4wpfH<078cK-BC+t`Gc!jrr3 zznTixK@K8^fRdArIy_~P#ClpWV9Nx(dM*Q+!YV$L-;!4sME8S@URe~Rd~nxxn4OwI z#@yxaqF7HuqF<$qhWP%gH#=E|2I8mklD*AOKg!YGw`i`|VI3@yZD?zMMrTkiaZb4Q zp)k2nNIOSZ#ykd^ddmtf^vpN5sizwa9FW!Dezue7^38-h!H}WlrR8>aeW$Ih?YCe2 z<=<%k&CmUdlsO6#UFfO4GqHB2i%}@HxxoCw+4(kqW}(eY4z-{8sh>c;58CxNUMIi1 znqXiP8q)X^eeT87hax)1~ zyze3tCv+n3GL?2EFv@t+BZ8tmAil}N(jp(y)Ia%>dg#~NZB7;%JxSxk%bV`zg6m(y zzoi?xfPyNjsuri1*ct7W7>!lgCwX=K{AO+nGy_fbvq#mnPWDfv+8#!v{cz3tI<121X za_r2{ea_4}sf?a;jMTDd>coq53Vh_*F;H+MFUw2B;$4~&B<}$PK{#;)4dm0j$^(R( zd7N=cg?$$Ip>6anFl9c0+@xPlgYpeVmk*@9yzO%qvypkxyRO0RbCR(^#aiqz$`aQW zM;Yo_gS3IP%flnw&6kwqN&SeX#Mlma8kA#l8H=L8*ak$mpmgJU%&-PfHxaw;OAlA*VWOYGwww;nNZNA;{Y1+FDy*--xX$eqFri zi5Jl);KEOvZo;p-Tial2~Y9bIpwWzB)>SJsL6co#!9<* z{&bt0nrv$u8*ReLZ4x-gA_{?=&PxT9N!Ahz|M%tv>yN+RMFC{&2X_soF$(Jr*&5r(w(Kb0bo${0^7YkEt zc+zp|si`UQQ9h)scyZiy7#%ZCTQZo%kK5S4>+4(X?yaS^xVT20+~hOb&Z7q(eCSF} zOf!zUR~#6&xwQ!n7ik|h+O<2&?Z%x|Y;o#6Di5Gzkq66n!B+6DRQC1lXkmM5-9)xp zqe#~AS$|I)AK*_IRJzzE)*XMIdv~8#>p8D3;sclRL;Qrp@WcTgc%|AOc~8B}rS3>v z{w$+jlU^XDmJjla?U%LF&$d0aF!L^s_03}P-rKm=$$l#7T@YREy5lkN>hN_0Z5)3& z)uzv!Ztwfhhuh^3eu%)~qWC^2&U}U8WF0KGODCNj%Iy!uLdAY5s%Li;jNT$G%uWe27hR*xf zfW32lUHki&#rM6(#|cOL>w|ywr~b$%QOcKN=*iG9p`B0fYW^te7#=dG&oX2wBLTM& zFg*VFiMXuLSEJdv5IH=jI)ukqhWR3p!LkgY;xUE|Hb^7fyeENLl#^r;!u{+aMd&GR`R*VKSd^-D1)*eyGk-_n549H`QhE)T3VserO zlMWq4#R#ArN}y~SXkZ$IvHezk4RCvu6yeqx7;Uh#fPxFNNy!8*kz;l6r{b=V6Lg=g zVtB5ig!I9oKi1bD<&FfI6gDCaoN)P=28giY!79spH7J!w9ceGG$|LX+<+}^E-&rCI ze&c5kWApayTkZPmue7T#zt~=X^`*9S_ikHTTf#uzj=^ibu2MI52U(&6%#(LYNF6Mr z8db=nLRFW5Obr#eHG?L}M_f2S#zkZezYI*LBQ=_Vz=eew47G7OIvCq@2Bs%w+sxE# z4D-#6wb0)|HYWtCGl>yGBe{Hc8AX23e(B%*>%jcTJ08W*-f2%f{cMiGo1gPLMrHJ* zqe17V2CWXDy@Pnx=dev+^gDq+I_@_e2ix52T)XnX`S$Q550U3P+T|;kb9CP9{A`<^ znTChP^Z5jKC7F-Lq{diJBO85nX6j3s4iNc0eGSaMLDVm##}yr=d&fu0fvdxf5PEVa zUFi_c`MWjwMH?vPmA)kE-DWtx1XPjG;L=xVv(Mw*H%kFblO49@Fl zcRENK&A=X`96tx!veOOu(5Q7XLHWpveZn)C*A3o97Pt#~!BM=EN5cdgIKxZ8XWmYb z=qnHA$ad=H?$7LeBA!QsI*Gk_Z>5fgqhS|KS%F+dP&3P@`pcX8^yT-@F&yL#F%jI> zz(6UaKBK~Oy?ej!`UOp)Kj9jH$;;xABg=!A((Say^cC)pB*c=^Uy(knxN zNBpGhXs@oR$tC=N-H9039Np)j0P@27KJx3ps2vOk&==+>+8fu_+a4X#cRuM89a@sk0hU^X#2vk9OOYKk@Pm=|;g!9E!M443 z(6;E{>~P<9FpJPV2Pfbo@fi5H>nGci}6SO4B! zu@m#Gv{(!Ra$nF9hj%CO zZJX6gqOukU&=EDBa_)d8{F-BomGkJVu8t12Azx&jv=*m6w{3Iap#Pq7R3&jTu;Ks# zZAaQRM4xo+LD88*PtZ~o@5w;%a| z?`>x=Z9kw7T=q%(ON zuAUQS^&Sd3rgP*%+)*#(lcipMDz14p8*MF7rHrs_u7ZT+W3||&c*zS^N!UgUyZFy*KurOPY4{<>fxfEY`f_Ys_}j@A(%HU> z--(IpyZI@E9Pug7QVkb>Xv6t=kpbwVp3XooFv!E%@kd>i>Bt~Y>QZgP979eUVchh) zsi*W&c`3u145m6O3=-z83d_0!vTF6-1kb48aN+sK1$cX_2Pz3^&# z=Ba1f_?SDP;bX#7UGk8yMP;d1sc%04AupDGq1U?CaHeLG4ct2?!gz1oWWLS_ZbY6Y2&5;VIK}p?J4nO5_hdzux;DcmPeisw5|2s zcJtO7*h((88E#t}dpV}u#THIrnl}9{WJ`I|IYDXN_3PU}=`rHCj3*F)dUwY6&UQ2T zKtSoIylR`6&V@+P3&^keXK|8peNgs`c8KpeU4zR7=Y=NZsNb6AWTAF&d^m^KQXfh1r`S}9*=M>D7 z@hRR8_lvJio!V=2o?>ueHuk0V-NM3jJA3X7?TwTDvFC& zkfuPciYpJOW9^H`XLZUaJPf?0r>Z{H&*<*>W&DXJJ>jrCmu+EMqN$5WA3k?X*yx7o ziSwjfQ<}e`rY$Eg$j{o>IR%ik($n4M3VcrRp$zetQxJnY;3bZ9k}m|wPa`^3WO(&; zc7iMOuF2nlv^Bwr_=(r^#LFj`}*2?m|wz{&EI$94GpY@afPTZEg(e;7g<^T+$oY zx@+G$>g`6XwGH5>ui)H)pZwZKzi1CSw(IGt)-igsgQs8xC~uTyaan*GTF4a&lNB{e z$3OhY6G#+~(o;AKABOKB6Y`fEe*_a$x7&-i+B+V8kiJfh;Td0hDHDF^3%}nlPt*b) z;=uIUhn^nhNx;g?uBWAKZKG4FP06Rk057&XII%4fO7jnY7qEF3etXc~t#|QMDhPbo z|IoVn2cZP{iF0c2W!mTs>=5XzO))y^=dpmfUnoQQwK#Gto6x4PJ?*DW&(5?nXU|2K z-nx0KZEkF#QxDqI%tV`=o^Es4*t0V;8RK%H%Pw}cG?LD1t1Io5SFg3JZ`^82E1PX` zWsADQqlniL;nI0PT@u>X9D%?{Q`3hxr4xAQb=?v22Lh~Qa2C-gjj6;{&}LYEq&?oI ziwl`*dO#~u3tpDyWi>bO1-m`pDq@v8Uub#cCY0f6&3*2~Lq{p;N_ z@|w7oXW3o(yekhC-lx0ol(oRoJtz1WX7ko=>+>Q|fgn<0l}n!L@LcH(DZbFDH{wjz za2WXhJ)P-# zcT5)^Eoz`$AN;F7{llLm^UE@4l%9-7MZN1}XdAK`R1yFQ$lcD1gZL4M5;8kD2@9~s zH^>srtSUk_IFV%&M4E$AY%!YgQ(>{pFl{E9peu_S5$?V zq*Pg?eCl9a!>w0f)p|^kLLs0f{PbG$sw>#LJhUVdP<1`#C7iJ6D!zB{*FBqkANM%C zW6Wd2sNV9MD8Q?NDgSasJIW?P@gsNZXITH|i-DbgetP95eiAn!1(+@^G~p@O{HrXL zSfZvZFH!~vdT?1z3cO!eacc~nwd#dLw1&UbZ)TQ3dyGJYY%s?}8E!m6RiNIxocg>| zem#{7+_->3NCGRy@naO|372@}n>rP@_5(}dIkL1t%`t!Qw#WaEP0b;sLv&y^+beIp z)|S@R+NqJL_S_TCwkMx|p$!k#j_eq^){!C<_EkEUD}EDRBay~9-~MwD7ok@-dGwz< z;7(EgD2VZ>N#)By9qU6=7hHMyUE0W@_fCNt62%`&T3m}<=~#mUJ-^yOc~30T=MuNv zc_5XBiTM~t7DSY8cJwi+k|$b$^nF_Y#G5ijM+A5s;PCrFqZq|@qBRnn6hC+7OuKaP z5{AKS26Hu1_II{oXxsVqxF=V(3$N73AOtjUGSChsfia%+t^xh3Ya(^xSzn3qPvR7RjWN;rgPoP7Nbmz}1`2@I&o ziFWq<`S#$05449Ky3)?^JiD-v4x7fiI9HABYe?FN(;*Na;>`Sm2jC8FkYn-Juf&x- zGz1eCs!4xcg~v$e%`B2n8bY#*!ArRQ8o?DSS2fOD7VFVFIOC>0kk68cckU@IchYkY zO%fNKW}|4%_*=HT>BS@UwgXi#YBcwqSKm*%=xtz1onv?v&OGJ8!O%=jm?mX=m!B+G zynx#%WZ|)`BOjlIW5@wDaBU}weD!mXpgMlm(eM23wtGvf84woHnFNy;!M#T<1}^+< zucCwT68@@#IJ9hOsGlH;x59f0@HaH}-L&ReG>IQmHRc_$t+Je zq^Eo8@;Q$8)_mUzNB`c(`wvX7vaW}>x%aNm|1IS~=W_r5ET6FB;L0yDdkkCW-jqeX zXs7i0q0;)?T_?iQd4#_*V1sa|8?;fB1tgH?5t6&(e}Z^)_y>kb13!_c+FNidFH*`| z=C3>nqB`H39TxQ&R(S1Hkh#?h%BOZ6VI*)DWA3&c3RLnWBXmfPyQ8oYFZ?OPc0|6a zHmP9oq!pU9gb9cGC%eYB@ZWP66jF(7VC9pSnI*%d7u=dWSI)GxY+JyEIvQ{?cqFb; zM^jloe`Y})lec{iU`8c$cVGj?VQ}Yn+;*04w+A17sQtkYeyIJ@FaBa%2OU$xgKc$V zn>K$OJqcW~l}tC#{`UXl|J}aj!yjsw&QG?zjlDK8JJWV4%VuO^bQ~LUCzC|BUE0L8 z2*P$rdMf|4edJ%K5&$N3c3{gk_V~N<=#ngcQ2%hZ9_cGtKPp~V)kmB;&|vk&wWM`` z&cci%-fRL4H*OXn0gH?0vY--uZ~N;vcQYA@zmuQdM_iCdy&Uj#2c7*J`*D7XA$fEY zTGCWqsqGZ<2^&P%!Un6)K*3k?n+xy`Ji1aFZJ@2KZMH8y^K|?4Z$Htd7v|cnde@^ zMmy6c#-;;b4f26w(l}Ty-fXv#IO^cDId?n_{D$RSp3l3N6Z8&%7XK>?Nk?3A;6@m9 z$fPoNWi9zY40XTscW^=5kbvZueB~kKKm?~;(^XlKFN?4oc;7M(9k7UvU1cB9NTw{p zq&1$&KXw<{ltsuK3AE1Q5UABxi8vuTFpS)E+ed z@|Q1a(VYXZ)<=IXbjvB;MX3^%h1Jv{_6zkVhwxVHOOknN+nIA`+JjdvXJ?G|i8xU< z)DeM+GUw;cw6%>@a&+={51V8#yX~gvFDx$IZR_{$A{hJa<(FP;8`ZKr5hUO0$KUw2FXrjONBdl!mf)m> zCmdxbzj_jRnVOzy<6{#%i#y`&?qpZ3FxSrIis#}TCmDuD#@aZ2p9zopS9ij{cUQLB z>egX<@~LOrfAQD;R(oZ6-P*JXU~=)4lV8=ox+v%%dd6?)&Q6cEL3GBsnd$a3f8md{ zxvBB?hCBb47hwx|O@g%Sgq1J$(SutX8`x=1q8!pEq|GB&X>Oe|&ZO?M;_{U5sj6*| zo`6AJ#ING2d`V(=QaT~%g7K0e^Q2#kuJK#DPADo?E=rmlCNbN;37$AsQxLt7MhE?wA0qKctD`dBJ$hud)n_r zfhN3G{F8TGwE>lr@$0=@;pc0+WR4Ts{#keO`KvgAlexNba|;lC`CUHhfF8I$`u!iA zcrLsgtgW}Dt5@5rFTMb6;1}9m^~|nEDx;TNykVP@{tSF<8<%m<@CEpy9QB-zQVZwa zReUM0FH;A;5W12iCHe zte0@%XP&%^6-z8|h$S9{gV#85VFH0CUXQ!`I22z-&jx?n1Wn_iLg6RRTC&me3p z-D_)i7u#n)^@aB2Gq>{X|B*4_kRP?7Mrg0V2X!{5(85fpuZ624^0^2Kulk(TgYv>h==79xNcqa$$VBRRun%r!0&55|hlgQw9c`Qwyk%q6B#>>GvLK$c%~M{; zYFZuv-F8FTC_6*oTV3r@>&8zT4pyv>_;wey3t_jhgIx&k7)egBQI~gc+ZJtaV?UFp zelAojz6HO$hc_uVl?xWs0eY#=jLS>H^t?~+N~9wNd4&q(UFoG~g@;yor-E^HCrQ$U zgy9XIoo5o4+NQi~n|RX4&hA#*-h$teDSD<)Y`#q{KP%yBMClU%$1ZuU zVyihmH;Nqjd0H0GcF4yuQ#Z+NZf&*2+b*cyZ`;_F*>E-F`1>gB?Wwi_ ze2%%DLf^RizZMsjHVEyw%SZM~!cWSze%4bS3f+2|`O96J5~=3dN!(he{hzMLl zAdRXT+!Y?u=RMFFM!v#Fn753p2rH2yYbG@SoNaE>mf|^k=1lv>kA7=A|E~8DO53@- z92T-YsJwOk!&~p2JAM^TT#%?kf5g?h`4t`d@ZJ@SRYx7q%7yjL<+Gg2&7TwHr@b?d zcX8;w!>N@%&Nk`$tIH>3>hg*l8o$C#o_O)=@AVw=B+RRe?=xwoY2ZnI6v1=oiG0+l zmiE2XH~4eyBRh^0>|w90EU&h^i%ZBE`X5=euLl!V)l#p@v!1KGf&$n1y{+r(dBUYW z+>`P|R^9XU!T-fi|L7+%CNGC+VXic`gyhe68(KSwHhKtUIvymptQ<+ye+L1-u&-W3 z2;Al*0Zst!V4;;W?+CUL@O9Ol#+JE6K$4_jP=Pwobx3Eq!icMaPnp#K_#Oe1A*#AK z^TT1JNBNtnL^3>x5gV3|^N~2-`jFfsTt@KS%Gi*aKY4TSzr&yKL^OU~ag-^%F>XW- zd03~~d2RkEy1W-obolb_ow}2`+1Kt(pA0q741FyDbi&4lKYqd-Lg2?egT|&mhO4V5 zKc@y=Icgc=g+`V#)NSeznc6H3HE!Ut-hvTY`EGr@^VWfM>;k3@JF&QHBzyO}=%}2Bz2-ncegedrQQcYMRd%o+BUnVAntso7aTsUBvJ)fo{ zgGbQ<_#@ATE6ZGV9?8$RWgH;a63FsX2lDV)nB2ueSm3}Y(?94-SskB9Kkk0J)!iu? z17}X3Zs#vwYL~BEYUj?MZL@RJ?flt=c6NTD4btgyCF~|W9gq4M8<|XPG;q^#;icad ztGHUA{3kE^T0BI#0RswAKY1l+;9iXyc(KzC4eYqg&P=zN2`Aqt+ho3-<+1wHDa%P; zGj@{RUEL~aB|)QEeWT9X!wC9kzxa#o^wdoI$Vb1qefoEPw|)Atr`z0|JM+ik1CJ-y zXpuf0?!fby9nCB_m~7J+=d*LOZGLX9U3ut%_UNPUXcsSDXlKsO$H=wAoQYNk3@J0l z2bDr<@s~WEd`JTc4$PJ}hyf*x>SB!*%fBx_71uO*=Zd@-P6k=!8(GkcA2Sv2;9B+C zu|=*T{Cy`cm2uSPec#_JIU(&=X&po~0x^1+T(4ZsLmzKKZLI(N5aaE5n} z@L62)o1Yq1>P_%H2w!5jCtvCz{)5vh2p*%1fTgaXzIiVhwXA#}jz8-vZN$xedg+%? zATeJFrI)lbbG^yeN*kZF86-41@0FL&T)5B#I)-+YQHA51WO!v~0J5HedEqtR{!X|C zblM91J7TXZIq7c%P-t$?|Agl;CExg9sR;;9VrvrLOBx* zC*~Z)KovxWLR0@Jc$DXHI@ggAK_;*2?3PW^<28eC@|(E@(q!6UP^&jUTp!AA|rFr zYvf8yeJ5Nlxmd_ug*gd;{4=o79I)9#nHroXFp1bp0`}_a+ACnpMK3Hi( z>#Hitprd}1fHeq=k30$$L!A>&KW`Q1ORhfr&vn*6Vc&Z7^|IWFyUfjdfAy}%&%C{S z@54{rzuNTO1hVl>lGxspzR$x8Iwc;rxWwd=Prc`f`@cv2$I%Sy@BP)eBbwJ0J)dBo zy`OL=vpuqVtkc|m58O(W@&=Bs0~Fr)tgP6U)9J?Q@Yw%0JVjeGhu;kO`Ys)u7*#I; z%l{4`ZS<^I2UboCp!};=>Zb0w1Is={GmM*B!nPr{OLwWq-Id{KG45~` zeFt~d2c{^Iu?*akH&{ap+jsG)1-C^{zYS@d;RJhj@UL%Wfcp*K@}2DyANxT2>=&MG zW9l+sIEi@X)Lb3Wx$C$2w%gYJVf&R|`la^p-S@ROzWt4@&7E#zQ?syTDm!|%Z?2}s ziIf9c>RJ~ssW-K2)U^)aWS291HoO#5(K&-)C#mi4~@DA3qC8mX)~Z{ z$CvcM_|K#Q9@^{jAUa>=CT}E>VYJom=5ylT#Z2loCjm0R>foF_z@~IE1ArC=dYfMe z3|{y-&`OwtW=`^l2jr&cIvJBwjZ9A-2fN#C`O3B6IWx1^2FTvV-k|;dhyS?!)^B{O zoxA%&o4@DY_RZh*UF{ox^haBp_VZ3IY5`Z|e;B+Xqu33l4uI=B_+)1e?TNJXgowTE z-FEHTwf1lS-7mD$rx;n3we7#ZLw$N8Wd#i>0+rxZ~ZlYb)2=)?l-}`3n81u(2L{^HO>R}tq-6p&X2Jwt3|isC_6ciMYQ zwfzyV0c98rHrnQ34O#SC!n44v69530#gcVwb@d~(sm)g#**fTid=?}i=bkd+B$zT+ zv~{tfr+Vx)cNT7;r(9@YbD*tmKV@oqs`Z~XbMSh58@p#4J4m^USRhB;pr#9L#wTVn z5I*ZddiqyR{@X%KqwlU=TW|l*|MyR{pZfI=HK(X_<3r#dGPo<)Nj!BFui!W{MOmCk zoEU9q=NH-!ea~NNU-RZSwl6;YH2ujN#DkAkY)Dv>R?wPqr)+pTQK?N@iyY)NLeS$1 zkEFYKW`f=_+irBSq0KBkU5H`5AZpqU+n;I4R?)RXIgs+nvI@`YrRxI{_~$s%S}_Hl{QGxdel_WJ z$fyW+R~9!YYk4mi8T0aPxDOJ*a)@%5js2497ag|2)oX1Yd3))F=iBt;Onc*-zotF# z+K1cf%6i*?hv_E~KYeQQ%Ge5i8AIVNoZUUY@eJ*aYj)g4DK=V<4*^-AgCHIGhv;&W zHVN3iSw8!IHEq7%xJ@SLLLPv?OZD;8P02>c;vL+? z0nh&G2qU+BOH*?Pd@iz8Y<|Rlrf_x){a2x_vdrLgwcd+pL=$+-xz2odsS_ zBHwk_eeE@mzP?>Jf3`jH(4*}Uo-bdz)GlASYR9jQSbx;>R(G(ngxGL?FLf#NKU0 zNJUDv<#~q3owGEVb9Zj<)5VyKR_J4#KSj>6;3-d(Px9aW9(($Bh#-XgSZvo?yX_`20-s{v zBHMmW%Jx@Vdd3~%l+gtNUgB?=^>a+og@8A&UvD?jVfMMkMs1TFA93ueh_MR|oOntb z>evcJS7+M(c_NSHQ3uI8Wi{oNC*)r;tNau{c;O#j@Lsa4FO-)zKBk?KzeY7q-Am_5 z3*|x?D4DKsB1^0|+rApZb78Ox<{W$TQ$J3sAJR{>E#2Bi?@Uxaj@?J6$-w9XdCxQd zp|v&;@*~f!Z-@nHDsS^sZd{Pz{7}l2MzOnuMLJ38X|0}Q=QzG6*U=40ma=+z?U4E~ z+8((3&e-C$_{jX6*HG`c&{SBJ#)TT%8usA|k6I9Dd}X3v0G)|^%SNP(^+oUbF8#RM zR}cRTbw$M2Z z<#n)$O}K%LgMH|{#NvV{)!H9*6G4mJc~o>>RX7>`iUQvu94X zQ)f@NZFKtOn_F!S{j`COvtQ_m11`iH#m38O_{f`AMvhZ<>z0{?49@MO6+=;tz3RGQ3E&_(@0xXxwx_pC2{JHgUa`olv z;`o?+v~$s)@$aJ?;~^#Ek4xE2%8#@#a{2il2uJTnSN=M%rggkL1Qf3D!|b>|P5;bq zp6lM_r%c3;bm&m~DH#)o2riJZ?;Hjrt?Bd97qy?@iE=yi4OUiG+Krnxa#F0i)BcbG zt7#)($8*iQw;hggDA)VGlDjo#xxn(Cw-c$$1K__6QK2>@kW>RXW_ydV|@8^7*S zctf}@yBP6#;6gyVJW4<w6pI$ zX-beZ=2SYCrypD>xW+eUUws!4@VcVJC9vQp55r}oQMn|M{zWTXUwqt=AI?XM(=UEd zJs4gU9RIqJ9eyC+PxQ;Ko)4MK$iNN0@hY<4& z@|CuC9TxmM@YFpfu9$Wn{>mT9U-eB)!U({Lm+`6ssSIPAc6sCma^3^iaGp!1c&5_V z(McG+sRNz7*=aG)pC5btkVqUp3V1j5yG0`EAhs ze8BdO$4)u$-bOt@Hibmb<>v=rT5{C2QsrhzlX%X=lY8&pfB7GWB;A)8U91h;R`J22b zm$-W6XnVsW8_G@5Sv*w+8f{Y;CLXsvgV8ZfC&lCc{Z71tUankJ_BDj8-|qJDTjS9~ zHq=~$jqP-Fo_zX=_B$W={q_yt@Q!wXA@jixeI#+0T+o2=U?(xVg|GJFzZ(QmBgcW9+d6Gt1Eg^ zwGce=aO8|1o+PfW!$o=WkFtcP7y9h zynFmiB?28y1IsE&`E*4y;;K}mk0_~NE0g24z(I43h8XgADL2SqK>MR#m!{t7fS4cU z>RsqVkTBDc58R&4rY{`pIX8+04b?5+Z`)-ZupQReu^m^>)e&u6)P-lBd8WPjEpJ9Y zY_-*!E8qw{$dmqg;yK`{hiG{@*AZjUyZaGiuxekK2XdXP!j(f#&p zj@<1y_7VR*(<>zP@Fn+K@&9sg)3W0J!{`5sPGLS71`Xo_*uK6Vxs%bl`kv?RUJ(i( zu}J5Sco}Tg-^zfWlQK?#(gh|Tk7B3O>F0_@r-(C6N(U3H(Kzw59*Ro+1g{>oh#8Rl zi>G>mXXU{qHQI9ucjQ!9)Ya7O=)_~49MHB``+oBf%o3~~Etw8zek`3?YX4d9ZR2dW z3nuv;mK{*^BckcihbagDX)}&&impx*?0~wy?xb;IM<@BJQ?EO|OMC6J=nY(UU=!B~ zE~~r)*Y0}rA7}eYp=~vzn53mVt07r73N->0DyYpz5n*N z+DlJ8)!y;uN88%+G8}T}>r|V?X7Jcq>EQ&L6Gi!Mto#Ir*j&782gwsDS9-6?lV|Sg z76<74mZA%+v~iJwz>r?*ANxxYFWzea%}{7+A&V&SbGecORiPgHOcNx5CNF!o7p%fSdmQ zDM!^tk}hclWEsps{7I`&nIB-?vG1GjUMXJ|psGK3G!F1mPCVk5G?gkiNo?Uea20VP zl6mlNKiYO*JKBXg)(z!KTKb)8Gng2kXm{Lokvf5VLA%rk+EWLJ^H~j^9jKh3590}7 zUK_}o3x>{|J=5mtzr6hN%Wa)D+B*2y=bz00zwoqmtaHlt?7~6@QN>(Yc6MI7-n!xC zxPp_(4v2b!oYy$@+{FRjy(S2=|788wzVgH$Z7$1URnjjLQIt`eH2Q%IwZT#ccelA8 zWFkPlsht!#t|%B-IvP>vj2&+bwBH#tFsJk%XCeu_+7#;4olG<7rSOK(^nG!Te5 zh*@pR5ca#f{oOq|j{TF#4p_Xo=^}>R_VSIDwzhfLKJwAew7>sf{nPf$_2o7W3vGes zrzgW-^@GP1+cyX<|Iy$0{`S^4yuLm8_~+X5&p#LbO_8sk zxKjT6li9}Ah44eUMjFdzYIF+a zR_EH!%whxhl3m)d!Ra$>Z9>nzwzFABjE@pQT`XN((6vh(>FuP43pq?<+or86f9=az zC&uZo<`g{JGMOKTKjkdlQtcEa2~Kg;jXL4ZiV}~iDm;s){^6!oh-aHV%V$(mnf~Z9 z%RKk@4pyhr`}Z$(;7f+F1$1UyK={|s=dZ?9ywVGmU%CDc#wwY@yztdU-`1nq^%J+k z*r6Y?zrN8v^#0$?=@M^#`&$s@vG&wc&t})6c|=EXkA4S^vJ}UQ<#K#yWSlxLEs0n3 zGfe(hK3+Wz>P39HgC}u^U62B79u0&%&sugUX7hk^IF28(@{_~(+y@dW8tY<@nc-iK;d(=L8c1a`av1{O`b=dI% zC#*gCUpq{fF%n$xww&4~;AwrajDFYN(-Vwcb{lbkmy_A3hvH>_OWMYsDK&s2h2@2R zf;Mk?@3@pC^V2KoZ*q;GtJU>$b93#UhaPONdE49C;nIBUS|j0CkP$;%8q9jVDwIqZZHD3@CBKO zu9zT*cHXhX#p$tj@BOFS7;<7fH+;>`&{mBfwwc-SHaj=ore{Xl8Sbxs=$`hv*WM3b z?6Z!y`GxxAjEhajXDB!DT=Ws010QU|4Xd<9%eW|Ubkg^0_4p1l&E zAEQM^6*h`v{pd@o42ye>|5W_4Z;|;E{V&4<34Zw;8L@5&Qavl(!#mTOH|Y@o6F-?`9LJwe4M25kjxt&2LUU7!te($~cUmPgbSE*>&2 zFf|?f)A0@4EXN8{J}xI!#cyVIwoOgWlwCd8%%rm#CS$+kZ@il~Z?xqb%lIB-q1(jx z6tIwY2(G*;(-pUovS;F8MSKw1QAM)ge66mV$rCk%_GH(OFRvJ>-|%p1W zRVNXI!bRvJ!Bc;KWp`BJE1vD7>3OVlV&HSVA76dEFsmyUG#WwgsBg-*;Drpt?R*1K zfN{k)T@Sl|w*1mt!q;Ox`_jcazCveRh3I-%$FAZW)mxL9r#R>ZkA|^b`j*PNCVd?H zqLOrVo_gar@n9>Td2}3Td3mL6Q0LPpwCw0aaZmQ}EwIS*BtAV3Jakxf&NzM8E4dRe zPP)$b(#SvjPkx~L1VAyT1`}jL-76|ZP9FRu9)5u!k{V(a=NK0n&{PsVcr$vK4NT+W z7ow5CZwNx8#Px3Dr7&7dVa4z^z6O9wK}7&8X{?g)duitDkIKqMHMsDbxFjyTgkz&n z1SBMJlGcp#=FP|z?z4I8A*^!V^%NJ&u2DdKczG90N@-d_5T%YvcRNl(hhpAAgp3jgX9*6OLA1{nxL(W|&zS!Z^EmolRC z;i+Y)Zz&l_l-}^{>40BK8t{dX#i!NEYa)K? z$(d6adBbgC;ar=XJ=;F`!H>2NeB@8to#z(Y&b}R$>Z~gZ4j{zv;IgsY#()>RbeA3u z#AJY)_bD2Y$q`pLVpvNb-fOqN^-xmsE)E*hHaO5bLIr{GPGhN&?|`)R)gVifU!FB? z%D5`J_C|AfAwI+z9;)MZicSSbx@nl0pR(e_vvsbgofOU@1K>EBm@xfG>VTxX`&ZW1 zFhJ1*bdqN1AWdS>PNjjzfCi^ivx{w-&P7%*y8|BjiF?&EIHKWXe&8u?UZ%@VAIl_~ z;OL6W$Ry#__!C5!ovG=`sT`xWfI&Uyw~OX5GR7QabA_@4H(iH?^6k=D^=Ko%7im3q zx5)Me#wPjw@-P2-cEUXRnn&BGKl5kpi6@`J=$?Zw8cP^8WM2z6q*p^uX4}HN-|C#p zfZ-i?-q{|00Kxb*JiMPnT4N{7<;c6xKn8P7)@23+cnDY)0pf8p6#P{tkT zlLp2A3L|dUZrGk?0AAnVQpUGeI*AUdSy0Ml+hOO!jvT-OC!G=T2e#j9ys_*tR_>}4 z9+!q1bh9MJkCToj_c1-B(z{-LnZmO0qsl8@iDMYg;UDm7aL*LJcJOhT?86NWV@MD+ zb=#8YPyQK%*9hUSc%XK|C7y^U`3ao36Hggd{xc~;zNS^`luzSSo9EqfNjnExYM@nG z+c~u^Pt8oW?ZGbfkWK}BzIJ0PCuZDr@j|RlC0CkO*d z#9dHiee^rh$;XU{<3yKrCvr%+)Jx%kq)2&_Ns3^V^sY;} zd5az)TGe3_$wy@yJmdNluk560$*F>1+yDbscaN)k;^ytvzu=l5uP%)%V13SO=XcBf z_UpFv4uBTmx?A8b=s0f4c&`4>Xui8c_=sSA!jt#guO6n)-oCp0k1qqIna{Y6uCM4$ zc;}Iy~P+vx-X)>m%=N|pf^Y8@W@F2jz1zHq7 zc<6_$n3KN*@?K%)Xidlj2wsPtq!d48vgn;7y3wf_gaCGd)BYq|!s^75fsIC<2z140q;0)}l;bcwb?YzB8)6yI!zqia(q z)PdwH?%mxo**-?E1Su{_L7wU79SkyXV_b6`xBf%t@Bdr>%l2>n^?TZjSJ&Dwd2I@v z1T$0aHfn8vS-7#Kes1lL{_LstQ$P7H+Bbj0H?{}g@-^5eK$<_31)`I4i}}v;KDt6% zNi^M6R1;UWlMZrAZ~Vb8HY;uD=(w~nyxKe4UUj;9+_qtF9|V%l`fn+V&-o8I2m_vQ zeFpi^AwmXE{8qLTW=wmR9 z@-5#B*g0T3u!c;6ZrK;m3A)%1qbR|_@BWKY^&7QEpf5bVapOkY+T3ct_8Y(1F5G!X zyLj;q>{{C+C(>=F{3a_L1^DRjB=V#j15}#>-1^cI2a z)OrvwZ+9FPo=!%RGkA!ownXWxA!%lrqhmtE6vVPx9%UuLmLYP^o1UHq@98$bFh`$iI(-Ev?42mE9!Ae`IkDwcvg%~Elh+;xOgsmh zQ9$ZH2cDg%>(iE9OkWiIJtfW0xp{1VXpIdgeoofq_}BwrDon#O@W+WT+EE10@5j1> zd1_&yO^waAA<8g-Msuf5w{`08Q%^kC{_p?wPqd%^@E^PM16Q(~em}Zka?A-1`R?aU zkY}>+6Dnil`)z4vroHOE3+?az?Z4h0zVEK~_-8-UKKIz?V!w@dy2gn=cf`8G*LvgN zwDnUx??Ox$V|c0y;tM}1FFL_NT{28wsSEt650o;+I{iv;wA@Nr#>UiZwm(x>A_qoG~cM}8-52O~O$ zzv|xsT?J*Cgx;e_S{Fwfmc>~9CAx&wroe8o{~p;#=14;{WX|?k8d)FJN6OX&d6rxh z8AwYR4u@O-My|?eXnNEFM;D5#;RrriA`j-(-dZ#~kjM554V2 zUuJJRc1%vPfF71F3yF|<^}eTr3|#nw>?*d}Y&Wkjw`ZTf(w=I@s>9~LfCHl zJ|3-Zdrzijm|U!z{b_(%yaP_s$jNcX=^QIdeWq+qo(Dhm*r?+K(unw!$@bIG@HF%; zn)^v0?UR0dMViT^oU}n);Yq{Tj?gRGf^tq5F$Yz{m~# zCEg^g%eEFcd)PG_D>vHml`HM3$G*@$_J@zP%U7$&kd8D`k~~1gdL9|)KVzqbQh^gK zn`)eTB)fEXC>P32Q)hZ|s4YxQv_~F!FyG}~zj2jVBeaRo04^@gO|(<EvI4WpZhLcip{gyH>Wx%w@IrkTT&vNaM}(o5c4nnVba>3l#_R|1DgQa zl+6Kt>Ro8$*u#OJ*+`p}c zZrlBtg|>?IHZ^ zR;FC+?hM-M`bt~5eid2vvl{b+xoM{8EbTKXj^L;-Y?~l#Y>t$lGDnBu>$CAA-{9h@ z05xuA9kA9^UCUzKp6oJDJ!1mm$}5>>zYAa$MLfLZoiu`fUIIz0nlkIBS~{+e$qRVA z0g9B$88FpZ+HteflhB7c>Y`(88Fj0NGYpS*_!I!{g@?~6i;vK*^klu;4(Vq~pF;za z2aZKEp1K5VNEdlb5Vo-7Fbm@CZoBYvoc_XEd+^?S!DT38{*p`k73@2@n};^7`o=m_ z<@Gs>ywnTmH0?I!Rl6bjJM>mBakFR#)T@-wvJ@|U7MHLs50<;0GnS{WOrT|zP0mAj zdWiED6VV$2_mmXDWpN`mVCh8awfy5*-UwHFQ5)ELv9-OC4KOa4&O$iin{RXg`S^Jh zPpe2>CjAaHxQ4EO=DADl`syZj<|%NPjvh%K;@gc|XI=~Xsd{r_g-We=_^BJRku-F$ zZLk=ov2K;z)WRO~meW1Gs0-48XYIjCt3PxmePoinl6zO~r+~Y7=1lv#zxW+(hJLJr4_j+1rY4P<*}!lkEh&N5-GL{_ zFTJFPSCSFCn|~0O_?zzJy}t@a(>Z2dIz#}rmC7b@sE>w&xFI<19X&Gkkka|yWgd6p zS^wi`p+oY?d%@P8DLY9&%dTG16@DjOUxwZpJd&{{UZAgf8-kLLfX7!%zV#UyP{@TFod0J!Q$!|jbNb(2rl5L6prK16 z|LAZ3z>lMV@05;x$3}uFlfj5f1zi~wkx~&l#vZ6V1FMV6TTeI}(@dZMi`Xc!YJ@9$ z(oqYm(WwSD0?cy_sFYy#1nF0Wm0(*0pAE4asd6QMJum_be_*X(bcMG- zL(jmb?06Z!1lK3|PztJ0FWeRK@C1L~%PF+P)6cZVQ-H;na!8Ye2!kj8fEfM>BA~l+ z-{P#i$*X&w@Ld2Nfv+V37Nv_Jf#kG0Qy z`qSAFvbTp4r{S!@6LC>?Egu}hfbp(^5dR!mPF{69BdP=$=Ls4VH_a`yAW<;lC4*m` zJcT|fsC?Al#-S@u1juj=E1ro~wA4Q}r1*jhZ6jZ1=5k>hFzc!LNqg|&?f|q#oHUa8 zNiJ^0ck-=v64L;4cXh4d($Y!5<>edg`RAW)&wlYK@}L7tc^$B{b7=m1lznSw3q!lU z86*+GmVA=jv&%z2ZOrNHfcOkhCNw`c7v^A1cG9JqUBW@fU@cHby+ zV$uO}2XoEGQfX-A!f4Lv46f3~NO#BHOV3_uzw-wlYBp%R~b0+TYgw@_}|^_#2EL;&^1}^=Hz8q$w=?g*vtFeNY~|MJZaKJ zN)^j{545l6eVn*T*I&|6C$Eli;Y#CP9^NTRmB-GOe1*pjzM7w%E9FaA4&Hm0?>ak9 znJ{&Lta~px_t|<@b}bv^^2;-v9Jes*@rRmkYwkv0x8 zu}#P$r~V{Ow-O&`Gwcr6z6_<$h<}T$dk3m@Mn2&@0#f|*Pn_5h6}i%ig^ezZcJe=+ zYsw{CHwW8obz`sXIQV70$ny4qJKc6tMkn}-!T}$><1SYhB}rGw?*ahj#!ixh#)F;B zHZg-Pz^=J)VZOcV`~FvCcBvh1t$~N18leqInV|!ndSwkvxr=MOPu@Bw-Eu1zqzW^o zsUCd4HPx-I54$~1y@%}l^l0?(Pr6>l>MHuDzUY#_?)Lkab6rMV2j=Mg%i?*P>x26p zT)Pj5Hz`xOq&R^N(l*x{YhK17M z`{*QL?oM&i(_KuHB;ehO6QdFx+rcrfUXP}}^BjAvLmACU9P5{Lo!II*>JgO^fAj$&t&FLUK6P;|{6bCBO_ZHD^JynMz#v=*kc z;I56QF3^4!h9}qf{Y2ZaZFJq%wHxin{^oz&UVQS&cID>FZD+7Y`#;v!2im--PVgMV z^wM^iofyFuIA}lpv%l0ng{|@60}rCx4%_DPHf5YbarlV_>_rkeSu~6e7;>^{9A2U~ zrMl%-fjMdJgrnc%c3{WJ5GNDVWww>EvAD~Y8X$3S-L{%iNv@(?_@LO;(Yi%nGm!?` zBQ-dujYfL)8s)FSU|<1P7!Ig;J| z*REY}E6XeSOxEzkbi1~?*{)t)Za@2TA8a!d=i1#5Tx@Ury0^3+`mw*(&b;}JfOg`~ zT_v_-v=1XiJCAcEQ*f}5sVQ8vQOJFrY9M{8^n;B-`_S)xIH&#GdGXG`Ay#Bhxs}H5 z2$Mjbtf3rOUqT{iR_zA)Xal4!bn>FS8(z#Jtrw3vqL*Bkd>dDn$pF&wpSmPG>jIT2 zcv}`1#%AGMKB;6}{E^?Ba$(&OW@K4^>0f0H&PY#eeraNmco0@6WQYqmC@%(@zjccI zEsxSNasa(WlzdBgjAsu1s5`9Nz6;l9!^zi6I=BM_ztAc42alxVF>ks`Cq_Ec^)TT; z?!~)up;sQldTdDhIMq*a=iH-@zLvh**|xd520v;5HsAOjcjvjM`fQ0&2G6~=wl~SS z7VqhQ$AvrEop;}ZY+j{~-fXi|!|n3bt@iv&&$pY%r}mcv_FG%%7U*cbn3|rW{=556 z#OParlixbk{_aoxvJaJ=U_L`*cNSWQwL|SU<$KqXkIVkLHiX~QcVgR<&Ai-U=(}b1 zo516fdLyk~LrbUs<&!UZRICb{g>j8JB+il9oJ@uVXcJcjkwVJ%SV z@36XPc#gWJKENh{mJ}9yXsB&&4%+g{Mq9xS+F09ZpZ?V2?Z5r+-qU{a{U2*L(7n1@ zbjaA)VJ7cW7w~g;nB~)+ouR*WYH_+P&QG;-XBXPr-~4F%JAdm(+TQj?`}CjwNqg$q zr`ja=dgQqE$lbZ_#@m41Cp;u<@wh8m%85n z_VDiATONxq(KEeQ=c>$gg^#NnZ696#(GUK9+qmImE%kNZ3H9oW%e#z;3MC||HdOux zUVfxsmQ)5hx$ zU|(Zb!`HNDkpqFnuB)!;)V7#-|&t$vbX>+s7}7aX^$jQ)iGU8;(>3QD{F0k zu-=xh-)x`!_-EQ@ANROycZiFNG%LN432JO1Q)>s5=^`Agi#>Wpb z!9Tw+!L`uNoIceay7z3m@7_Dx4t>bs-R*YY9p~B`Uwe0Zz2=d-+Sk1G zb?y9J=iBI%`BE2#(DvE!ml|Q>5T_vb@G4933Fu9F%x+rf90Wz7vhd?)>9tjLu8njVn(t>+HgwI+&bH|p`m*R3 z7a-B43(#l>;<^~ZapMSua>U)N`8C6MSuFDhZ0h-uGcT@8*y9|NwZ9ZRWOV1!H2X^Z zbOHM0=w7>VbG6N*$4<@9Vh`1#TxkoHijMT>kigD1(Y|qUh_(s3kvv@B*Xtbho_0rc zAeZ#0eiM$H<*EKgC)=I#UA#(%*FC(U`tPa$84vk4FYc*KRUT8BPw?58`g z1dcE9sP|iO)s9m_^)k(&u-~cRdRh5JCXF@{WkrBQRXIp~TnP%z0+=73@ln8y zpK|bnGPE3}+^awlb{a^7O-Z#iF@*D~Mj%N8Ls$s4N&umWI#ww|T$yWwRZ4@nIF+mo zQxt*l3$?)1^#l=gyA60M}Cr5z1-@|G)^8SalPV95}EH{j5t^rdOE0Q2XG1lZN3f7&bOI! z_hP`!=Lo-F{iR=RPkjEFHovq)6FM{F5t=iZ{Pze~SCLL*tVG{{dFqQa;?EVh8bJfv_Z{r9)o z`8n`J-(hSynAht4&0)9Uy6LoQf;$(S^hnmK+FgMJ7ZUX*A+{@ZSXh8t2O6c18gRxCbbyic4pfvgU(>j7YA46ZP0kSaaI5{7|L`BQ zd(a_6gG=zsJ;VnY)Cd65q+vT4%1g_W-#_0SC9a#sfzRre z6Y|^3ZL<7|uF4IO^L7m5_WPG}UHas6GPC~q&qKg)uP>i}U*BH&syL-vb+_kR@m|^K z&xK6M)5`PK_<26U>EyGnkLR6sC2-&~#$2ocpue)5xLit)HWCg2%zG z((Psy{e;UTA)y)cO8r2t$vZkKW%b#d2}>L&&51+XuPi$G;DmX05hG)kBa2RjOP$Xh z-D4P$fJgg5owKb^JI|$#NxN*3Q#aL@rOW#XMdRe;92+FTiCbi^!=L)-;Eip1zp&7V z(RHekM~V=6iJN1Iy$<*g_FabCZeXN zsQdFX?fZZ1@3oJ8_}AK}9)F=tj~}!R$2r8^aX1pp%#5`4!Cvz-uiLzT_NhzlJ@5J1 zhSq4``pw_eCNA99MjeEmoo>67NgX+g?J+j9fIqejIBHYuql>n;HYu-zM$&+G)DdOw z>OFO69ZL*v$XHu6JGzF@pRyvf)&6wx9lDQ#%5qtt-F5U5Sx~=fv)OjL+t$Sh^n;8C zoU$7ol&ndA2OGQ_Zd&C>+~nuh&L(9V!rq!~8*A%G5L$De4+Yd714v*)no zu&+*^ZJWD8?f3rRGwr=U{rm0u`eA$I)ep4q`JwM>f0=9ip@%`vfogYbQT~1CWc#cg zft_idWy63EG0Ra7Y%{-KO2ke~iwv1+&dJvlWA`NY0^%JDB3J;yM2Ts8NJL>~7(Zk6J7?J1W z1uQNgOo?J2DUaNF&SC?+`-%V93%Lu6yCkAc_-uO73`NK@arNM`ujT}UxB`H@#kKM( zN*ZRK0OV%7t-Uamex7>CvJz}~j4kCXg-TwsN0OYRRniDqxWtoYp$p+zIFO8N-;px_ zD9cXbi;wj%_8av;`P5HX_6?l;y>|6-JAeLMd);duX)Bv+fI$ZWUs%>((`vgoab!PY z23udWHrF?x!+2X+Uu#QCr`oF?epT#`r=EJO-CS931B$o4eb}CO{%X5?>3RAaOW8#^ zJw8F3f6&&}2lY9N$(eTU-1)Y2`b_#g(SOjvvN@^iRFk`@+}ZEMdv<40LmbmcKae_` zek>hx(Be-0g}{De6oLbb+5}Bp808L4cQ$&os{JJEoNa@B5hpebv)?u3VovDeIKv)z z!1WR*YU8f`0P699bz8eX7kEIMvVkY_se7F?Yebf;r$pZw!bwg1mQ`LLb_jjZUP)6VWTI$)=*Z)`yKwJfX*jgbilq)S&>pIvOxos^JW zJEo7wMZR>JSk{wFvJ;ja$2@x(-#V|2sVr#kMJGZ_?X-+j5ogc(&4W4s(qs%^dv^;P z*x5Vv2Y&py7Q1%2A@AC>(!8)r9=a?>hX4IZ0At~;vSY)dt!;haE}cme9Q}nZ<{t;n zsk6k98z)`j${6%oGU%=hR`b|{NCWJ@^)Y^ZK)IeBU*e7eI-XholJ?6b=v@!LyITiN z=Gi@Fa9KK^2 z`OKtqA53lPvPxY;6+W{f4x<&4og{*gT#ZwY5EHGw9_rOAGmCw3{QU z%<6@pgB%x)Ycd6#XiKbp^-XK>s(9iH_Ici;&h0zVMgG=8VeW#1{Zac_Uh>l_rmo*X z7u3mYwqu^0LMmZo(G@roSDVM<_%o&fFJ1WH;+;nxeQmo7-4tN(L;hKp)8AJ<>?`q} zQwy-y)^6NryK8Ih>8GA=zxn>pwwJGK4^fvD2g=w_xVa(lhDA5uB-Z&G~7+EY?u5SP1>TH zyRpI%+CLZ4(wK3_R;E2s?%mz2U1S@V6Kzr!A;?5)uPi{;p_BE?Q$zQ+cB;P9uNs}A zuLX_TA@(BqYd5+;Kw3IkpHnVCR608LrHm?jE@qpUnT{#7xw#qKr>0zl;AvRc_6Fcw zkdYG?>~~DgwsHCzZZ2|+MSIpoj4n`7=hPxX^OtvJmr8eKDZA*oia1ah-uB~F7PrPi z;4%J&C!#Fn4@piLjALBoB4cNirF4*dId}$_FjgFRE8j+S#i5YyF761em-Ie!f`*@&z3Fh(6br$SDHK zih3u-N^zxwHi_tDVN#%37xBQ^fA(Zxd9<~$)iwre?ZF4`&VoPngEsn}3xy;8q?2dV z1t}wMwY^+`t*s@nwHrw-8uC?c{4Y0e1Ja?M9Q=7ZAC$&$-Ri~T;Z{>(*+u5|kJnLt; z8<4!G?UJBfy0nSJwa!*sRsNQwo=djnUHVS(CLU$AyrcSc@?UWarm%dryamVp4J;UP zY_H2#iVII(*)HcUA1Y1K(iWJ`Xe7zvYD&S2g*b+9Qs3p}@#KGeDn3k0UZk)3**p0G zFOgdp9x1Em?zpRc)Azi)4NcLPaTDgILSOEvT)4=XjKo=yTA3}FusT^ji>|I;7LCjh#zq^Sn;m~d4g3Ye~h zu-@wv06oJHQ~(#ly%%9DSmAXXrs3&eMomOWygp8T63?smuS#Big`08oaR8AAa5QBu zaEP&}0l>}R2%9y|a1DbeXFw1|n3c;Zm>Oh5ujW1z9} zUS;!cBT?zZ6F>j0Aox{z6D`g(G3ZR&T`I$*=B8Ke{HNhnnPnBLhBVKFtE4i>WCIza zK)w*r!4H7k%m@5Ko1?(w;k~X~UC;B1->1Qmzk|j{oT5PeE(Yx^kEL-Z)%yA%lfHL9 z^oDlz%38bh!prUd_<#PtLdTJ|u(;T!rxw$xa8lBvnd>_`fERsa#74ZzZ<(ZlNB3c{ zaf#U|`8xUUH;vqF<+FGi*U51|0iY6d5Hiq%KSu}8 zZ`kg%_2sp8RLj;w~-GB~@7XB;_kw~*fx)u1En+t_r4R~dF5X53VwwR z)kzK*mf1NF1HKZdEDUFjPG8ACQXr$<2s@exKGM?3T03nS z6opTcH;uUXP>$+wgQF0UcfWJ!j>OHuR`98g1yKmjfIsjoPhn90M(E4d>!P$X8V#XQ z8MkGG;7^z^BlFqaUi>?VzQNv#E6iu?zUl|8;cbHDl`O9|@LB|CM*@;jhUwn_s0KY=f zm*5rHDZJ$~0sb0i8AP{i_(iEH@6<(sQZA+4wEegG^ZiIV*bd4Mw)fjwzJ2M=eT+Sl z#I8uTBj~mLHrU;1et+8f>}dykr0tKUtPVMyT*_MY%1-|Vou!?2_QKir2p!44|9|+e z@uwrW^AfU3TA({oWP7Z4fW$Io~q~gp7NOpqVSyv=)_rw86)6+!(HkOH?P=*J zUe#ObMk`b71LY6=s2|!Pc4phK6A}1!I=x&CJV@e9w(=l-G7zNw-o^7b6aVI^hB86Y z=&!z^od=iTgNHjAwl-aW<4zECvXi6KMJIgJ;l@>m)n}4A{S+Abq5F6*KB)yPHIXMOh?xWjmRrTy>^|CRPNZ+Uh5$VWbe?XZ&t zpU#D$zS_p7bP8y60rcQ`nt%`d!5_7M`LBPfZ7*MM^E1;~FtxGb=Rq7a$?yef+&wyx zHdDRp1mCCwGeZv0$baHdUUi@oMzTkm#(oMNEklZM)vlE&$j8}BYdx6XyEQyzw7VO%a>lfgdW^(r%s)3 zXSufhZZ33w@#U5F-uM1)`^2YSZV$iq!S?#MJlcNnM}DBa>D#}t4Y@-~0XeYEu)eqe zcar?63)=MhLod6d@?`{@(M2(eZ`wi!Az{Mm@@m_`4t?^;r`m@<{?Yc9H@+2{empyM zU2Nh&uAhpMCd!lP_grisJprEb+tx~uP6oX4EU6%vHc8~vIwSoFQD)?iePJhh9f`Bu zvz}!_f;2e+gLoD~d&nKj?lN z73?ft0H(=|tIQfEo}M@+-Bo$wpBIh8lgUikh?PGXY-=t0Cf;=+tgr9f&rg;xfYqBeYy}VmjV$YykvRI7w{XH&d zm+w0}@a}?X@~tE3kt1+C)P5n{#R9v#mg}HxlZSTRwVSK$I{my~_?6#k|BwIfe`vq* zvCp(CeoBqM8E7#>nf<13*&W*7$N+drw{gm-E<1njTzlZYJ90A0*S_hs?eF~954SV( z6YX=K`P25q=O1q?tIKU+Zl;|*dltPsLZ3{Xr}Wmqxs!+vsC&X%Ci)^O!}QgiByow6 z@`V7KKxDse2nK#0K;1b}llZm|ozAO1NH}Pk?4& z8gw$A-+i|J<&y;9AwNCEWfXsR*eM&vg~-s-It{I{b8X9WLL9_#qS3l4|6IIaaa_D% zecXq4Q|P}e=Ca+FnD|?LQs=CPw6V%}UuUhG+RwVa{X250n7G)+hY@zq_C&^aO_K(tZfOYgcK>pa9W2ee`9o(qTvUFUilW_>o8nKT)R6B^)OSd_I70 z)3leL`ooX4m!7|gJk7QdXm6Ojx1Bm3w-KWBt9C5kZzc;T<@D=wJJyH9SMS0P>x^>e zBvy@Q6m1Ki2tbkBvY`i@_!1B6ACd~M(6gNkmUc8<;neSdEI#&5T?j0ns>nqaQ+gM# z4q;y$`1P-8 zOIfHr01&(qzV$FXFi#{WdKg*R*=#G9Uv7i7)%KY``9k~PM{YDo(xx%X-3(+|VxLKO z+XV2B?BS;#r@6$wEu0Y#?4;>7H#d$BK)+3nw^{BF-g8HL(`)Z(fAJl!Y47;j*S6DV zrU)Bt_uO?M<5Fktzu4wZokITrX9Qh=eQf=6l0tg#9;kO{mmOPi(F$S8oP3b7B`+m2 zDPu0Er}2uXJdY@tlz5?bA`@nK(h```8Yw{ui>^5lr|hY!W4emNC5cV19_wv~$cVbl z_OA0w4!~2#;wn9&kh%g2EOBu#>EWmTek)zO zW@oU$tNGM|JngVy;;1wH7WY27T$yqah{yO_9&yQMFr_Czm9?Dyvt6`zceUEn6N$9_ zG7&pHgUw%Ul8YYPfnFyZ4Q27M3uiKi!TYX@f@%NufaPSTbP=BQRy)9P1Zh*0l~!K$ zCe5|6(tZH09JqEbeLp}5B?I~wPnAt&#JU%H8?(FXdLGKQv{#`;`AcsXXcc|b9|o7b z;+?0^%D&V5{B%2y4KTk1O-9ufE*>Im!rkXe2ev-^Nm-O5?c<;Ud~Ix=AR+X$Pg1z_ zIHzP51Q`9;rSRlc(t^A8q<1%UXvb}1!;FpYw~H6flfR1&krDFCVkqwE@0yYKjy~$e zln;De5LkAZr%lz#M78_f{KZ8gm3;afErhG5M-GXbh~S`K-OZ!F{BeY^JTeY9OtX7h~g6Wh^X;Tx#rpJT3 zuLc_5iS88R7}rWKVk%}EYMyOuqF^lCiGoSQqj&CkPI_PcA>Z_(O8CPQ^W-(KWtg~_ zyn_|*q_Y5)ryB7>OXQ6#JpFwJuEwlk3GU+LZakidAA^l&*%Kp`do4r|2jiH3r46h& z1jhIwq)+9*ifnlkf1!41#m4~vjYJ`Mkvi~`ItEk;Q8@zSA%#wsHiEbe4*8xAi;i3< zgK-L$wDBKIz%3KY3G4w|rYe_05Lr<^gh9L-eA3@WyyVC`CBRQSF-k@qq%}-%Dm&xY zAuM=8h^+YROY%IGF7=5pFUl+}m7#QsNa}!O>K4!apiWLth)ye79p_U{I^x4m zn437F7(eSvC@JqvZ-BbTOW4FBIQWsyiJznlbE=G{OTEW0I#J$r<<#kKJq~>HN}fJj zW{hY$+77lU?;4-xrGA#j%9b5*WzV{{f3W469p0s3#WO357McZaxx%6HN@LBR^;EWi zaXNQ)jN&XO^--FnodDub+RZkgGiN*phkBi?3w(EUgyx0QtG|eeHr>)5;w-1Mmd3%Q^S4l@Nqk@P)0IhoV89jLt?z`vgXBRxX{Zdj zqqz7ky}%6d5=`-0aLO6F&p32;9prM64Kb)!I~`A2zPGDEs(xz+JMHGpjrPz359Mgx z)s@xAkdyv)n$y7(KX^MlBpv`D?@2molf+4R%^X@;*Clb$%d!-&>=-+Mn({&R4fyGunmxY>Mn^vs}x!TAYszb#b6GcyF+aUI_&ViMcrOqTW?z)A4@ztY%XZ9 z7N$KX6Vp1VXB`qBVeaifFWw#W_Zxvb$kXOvlMeh&J3t;U-f^z|jqiU~d)-^V6&UN# zb{X0aXkXD>$ldU`Wduj^J2s%r&&zTI`J?jYN*v1rB|86jCBCkY2f@0>Tdp|aclY@F zZk?^X%jPLpu)hiyh91K>b|;^|g6qn4bQR;czDoE1bN zwS?=KZ~h&}^QR9Z&*;?&WVhN|bx(FYTL*@nICAoV_R*7#{PsV7hbqh1Sjd8UD&o@f zje91(txr{ci

<|$exj4Eb(Qk2TyrI~hvgz| zm^KY}D!;UNg%+-f9Wt)*+~ec)kMPU)0gGhegS_>@`&9Xf!%gyCs=q<=B+sS%ljp(5 z&cB3v=_q%CllR{Qf8)H&`)lxzycqVF^35_`zI{RDz2lyCKLs z2yM?(fUpz-%CUfQbZ(!_cyx~Oq;mpip7MAM4eAH-%+LVyE&7!uibq$taSy)wX0bMQ z-ma(3AMZV+-5*8|c{0-S#!8!8TLE**MAtb-n6VzZN4?{i-p>O>lo_`;kK^CDb2!-e zbG~q;9)rjB328p`587y_*ehp_WxywTRUX)fpu?K|;jy|9+(~{Mdw?>WM|B=$Q7u9| z%FM;DMp?1Nu$k-|oPUWPg@@Jm8gHFy$B-U9qRz|~6hM=#2AbEz9DYkrKh z=xE1Dj%CkJoC`BdK;jI(;Gw;#o^+n~+&%>ztNxzB9`IOX>sIunMgsgap_>*m4+Ks7 zhYu<1q^+%j%d#>gGY;B3FtuF|_xFg06B?!#4V-L1)ehp>69%MJJJXjdoO8T&?}>K% z$*0=h&OweQcY$&NUR=9&t-bKVv+1YbefO=5;hoo1PnE5#owiF6^yLfk*!q$ej!EGK ze*4tSRZ%Z>`Erx>Bv0r0qu-_89LL*6(q54JoQN78!%NCraQw+VFUlYr*3a7dTDy1q zN&2xT+A~i--LBnmUgrtM;8(z9HVd(S!{0fjt<5XIScXGqnR9YZICH02l=4JiKbZkO zv}Z5Jx1mG3MHy0sk_IcF3{k3nwuB@$QL*6@Y zztgT<-E3P|*Rv^j_u;$r;d|}qaHlOG6StnY6Mj2~e-fRbUmI55g8==G>lX_%tFTGi z6TZ+EeL+ndJ9Bqji;in-XQ!U3%N;%7sf0DxDlFeNmrnWZydQifHVb^}GV4ZJ)JE~y z_8sY@{(MaTUTxF4ggTkc`lTH5WUV7Gy^|8XW;-f7){=+=Pi-4*DBH@|v})Kfv}EH4 z^`6bu*qJu%eBOfbOiluw_Y=6-N(<<~CHUoM7ZniGN}G~;?3^kzi?ntdXuI**d41cg z>qx$=;dlOHTiW(X=e(DAYP($7*b42|gX1_C>JJZhp&412JwQ&i?M{#*qzAZ;1KpT# zMA-*>M{Rw5GxJkxtDBLa^e!A9|#G(U`FA@YsWHaIN$9g9UyzFc2>+D}L z7E#`NpA$cPcPwrt*Zl6NQ!nwApLGWz!YqfoI!Sv3zhl&@W6@A%q`!~P(0OO)`_Sup zlG;36{fHOho^NxgT&)xYrEIURW+%cQ|{ zV}k$>1PTXPG#Rv?zR4k#Pz{Dye)8QyPh8Y}HsLYUZ6+Xdt0aM^( z)Ck)$g~hWn5?W-S84Ig4CesgR&g@D-xeLvdMLz)IF4D@+EEsbS6Y<-qOdA|JPSU0! z?c$+O4a=n6>RR6P_h@E!Bd)HmqEInz$yYprs|b&iIqo(|cn`~`0x)9oR8&`Z(@PwDi3h@Gf3gZ5W)6 zu>cQ=bF#_~M>(t?=_8uE&h}ieQHXdtw7QZ%0kkF&G~D_#&<0+4W2XvFg*wTTn#o;f zY0{$bSTAxQd^ViF2#Plo=cn>boN@r*$ex$9T9z<{2VdQoVf}>0;jzZMVekY) z(DtNpc1m8~blAFj3!S^zZr#4qW>%Ilpix}>NvCpSY4$Ac@H@#wiw0Z0{j|GdXT8@p zx7LH(;o)&U3t=9OiP7;d5hkvFKEMf>MT|3zQ3nrBOs9@P(KlQ39WKh&5R(t?_{%qX z7FUQnq0C*37s{c-w!G{r^$jp^azlbUvNayWd=YtCrflWx_yB_;rx%p`sJJv%rN{5g zAA<|hn4N%&3|^F3d8pB#5v^Qmz?>n3PVx@r7qOD(IBAp*St#pDK1b6~4=UKfzj_0! zWWZgpHIYkIQf55Xm3lC;Vin^_yWws*hS=_n#JMPHLUGci+z4Wqk%ah|7(4cY3 zH_!5>+NW_n-thRjz`e?>a_lTB-gYYt=@OK%xFdUFVTbQ?bQ(N!*RCLhAHEYQ50saE z27l^I_0sSB_Wz`P=9N#kJGX99?pfR2eb~PDz3;X!eeF-%PhS05JKm*ZL`NO(onjCk zwY>x6asMDYQQf&Fe2&LIOD(js5`CkJh3ceictZo^79%OOYb)Nh!&3)ex( zo;%o)yI;KiM*GXZ`f2;t55C|2^jqIm1^_90p_P?5=K`3FdO7za(M8ogGWdR7LSs7Gc(S-g_4=Npc6k=@|gA*Fn4 zv78j?BCX0WfQ(aqBP=83XmCA!azt>z12<`ZBRS9FRXch+{_0?^!lV*9Xo_6HUv-Ld zKyUN;zta1 z<&6`y?BhY$(0GV@T>jx1_s8YGoNmO| z?E>zj<&Az)7Uiz~h=1G%0PmD7zr%|}64c?Q9(0C#7f;IS$@JtN`J~JeenPSR)T{U+ zf7JEd(oz=oU5q|CIH4a}Y_GrmW_$O|`|Xo2eX@P^YhP*q-5-6W{pY{`ciYz5QhWID ze(>}(oY;8U?7;LC8ZAO~xi+S6El~i^=pW^R;<6D&w2--?mGaRO)i>0LCVYXOq>24# zKU#Vy3#x>h$7kcBpVe*9V!yp$->qKc*>(^;$=yDjq;ZP46AQxe&5yjTMH|f<&S`7n zrJe&A;r)6H6ZUaqvss!ecfeh^4kB0ny#$Dcvth zl|Ex8GZ=>7dK2YHSHOy+9%3>^P~Ve4#0~9un8Q%@RdV-nSL&qG5Io2mJizlJFOKx7 z2lA$mGdBRys1;2*)0b`oR_eUy<;UzPzb3Z4kFv|w(kDRUbJzY9ROBYo#{+#Vac){o ze=LpCV^*Hi-{LRW73Si$JWYRW7W*!6)0-Zi{;%o;x#)d|-BI9*KVE^kD6Sj&LioxS z*d(m*QBKA> z?caG>V-~2}V#HV=1PCtu1pxEBU*)r>41C$MHPo-r-z^J9+N#9*GaadAfkp?=n6vL~=b^zVkWd z?deAHNx38arSc!=Qik_&O#pT`;627o=o;?w^M}zt?M8XyKJXpSz5K`KA8zD(O#Tm) z)9>T)qxknfR{6sX_@jK~*PCNx+NJu^V}6})E4lV=ze;-AzejJo3Yhs%%S2zgv3~aa zh`h81^MRYXfx!+*Yc3&bfp$H^m}QPOTZ?OqnOz7)Kln0|xTM`5&_)Sgz}E2_N8RGL>=L)Yok=uXEa;w2N$oixQ@;- zpW`p-rm2@h*Qlq4UPP0I2jZMG=g`B=Hw>LesJDL!PM-wMiLtffaA~R+Ot7)(V;o;Ed6Ft| zzWwatp30Hl%+XNB>e_m96M%9ceC6MltZYm93?=y%yT_LCj(cXhdKu2C1jlul2^bS$vH zf0#K!>4?43?Gyfkf_6o8jt|Ne5osgHgWthFKHhJK2YWdoBDBP=vmHR2X>C;a#V_rO zzqAt_;Obs*ZTHb-2`c_rzdGrWJzj=sw}hkN6PJsA`4b1_17|tvk-!nIWrGRw<2;PP z)~Rw~F@8cs*^ZqB&EvGB=qg|inethA7FXNmBK&kt$IX|vD@{7;{QqV5Y$PdItLz{y9oKaPx% z)1=|iy2C%NjEhDdUGSjRO8iKZ-=s5d;PW4N{QE+n&i+Z1qnl+zl8$lZnwV7(r5)sz2hEdWDl+aB2dW^ay*7kuWFWDh^%pX{3@@xFOhGE}$c*@teDX z*6-CAL{_%;$}f;at2pcoY;?u+N1e~5T=Uk0WdcPe#~8C>U}l6q7k~jS7!3;H+gY!# zueX~wueHt1YdIFh@B7KMmPs)$o-IqB3dc#s1qIyR0>0$?72{7@_><1KkYzgP2fP9h90QDEXB)qPICta2$zX;clPZxBICq$!E z8MfowxN&g1_4}t&&u7b65;o&Y>#!sj&B&xW-;EKux=1h zQS9vOz+#PIzk#*f4tDo*pGRS-1dm*tB%RBm%8cJ4Iy~?*0LVb-E4AxxS~m^U0`P7- zIM{D1E2|6!?BEyjEh7yQ-w4~^-2?7kTg+P?#S<4^oy6Y7U~?d2-DsdJGdMVj@!}3G zj9Ux}jaE0qR6BEVzT#t$ssy3kv`$n>zn4Y|5os&7J(Yco8~$uBzCB+G(|WR2s&1?g z+pRpXW6dTAj4*uBcZORBSvaPRRJ$&HS9K2k3$ zoxF*fc#8yv@*9}d`l7eEms|-_nv1_BQyAtJMY2iSU@5Im*{7bOj#_)`-FMs1U;9P-%AftX{rCU=OYMh0{8r$p zx0DGtploeyMAxNVqF1sIKpKrbCq$Jo7dSO4ZMW(Hb(f1DvkX8TAgR-qfVn_^cNsgO zxTt=E&u;Kop$+8Q#n9xSSDksTegnxR>RkhQdBHdFX&(zq8E7ujdD}i#T%1@|HqP5G z-+rt8n?L-+_W$}ff873u@Bg^{ zo9(T)-fq7l-#hO;MDE?yN_&Hr_aEGEZ@l@d_LEnC-oEwyzi5B_l|OI)?oYnl{_s2B zZJ+yd9R`ak*9lkMuYYt^=>8)_dvJi_3%A8=7bSru%_Q^{UrEoA{; zd8vYfPrCn#1X{^uk4BCY_yJczQpq*s)_Y>=ir;_2m0z9r^5MiYNrC{be9_y$v%M(K zWdsxFM1jbT#v!CMh=Q57Du^4qd~F{(`)?$aXC>2}`p6@&bJQ|9)onFVZh5w&t{d=+ zM|2g%81I___Pfz#$dt6z;=DN94|8`%xoF~dkY($wxAkihp>Py7Gy!w8fAw76Y{g&j z!d>vB&Y}mP5&4wZI)Omggwg+G(H}Z&E54<1bh6i0SLfT^d)MhZ-fb_xbgw;eYop!V zL^@~Cw+GwgKY}N|Ekir??T%iiZ?a@DBQAu57Vo;$rCtli)quU?A_k_{Kk5)6l7D8X z#S-cwb5ztu^gTX#Ns4+CsdR|4ni~;?4M6zMW`zJWzV0Rqe0dic3E13|2mS>nane{a zRZz)6nDuL&2#NB+F}w%>VR8pXPFD!20wrzyRXUM@lUnoMuPR56u)Iufnc|TIQ_eWk z{3g!47Bb}*-q?UJ3BP=wzQ%3pIS&6M{KM|L;V(W;o@X4C$shfL7M}^2&OYc2&vpGd zJJ&~Ae+>6oe*(LcPTk&d*O}i{^{(KdQ%vShAN4^KZ8K}TkQtitgo9cEVkqQ zW5&10HEsU;-}_-(UAfY3KXs@5$>;y9ee(x@(Vn=v-2V2bUunO(e;*me?m(`cl+rG@ zuPzxuFW9FM=0MhVFAr?{wwWB6&Ans?2T~c+D8t;XEy1t{ z{SjH$b}&=dbzD9MC-@f5;Z-kc7nG2LD-rg=-gT4qN}NgT!=0da1F91Vo|==*qsOEp zfs58@UzSf<*)(fD`YR`GQ$F;>j^Qs(o|ZINvJ zRv$u@FhV|VTF2`$I0u?`Q#sRu2YO&kt_5e(>#r0%iwb$NL72GY2cO7FaFr*93*UUU z^U7U1#j=Zz3crXi+|&&{&P4`bk+c46-nyNd|9(5q>N*izBTE1|-q|H7YIq91K5U0XV zzQ7XgSU1;Je&LS%mdi6qJ#txF#c`NU{JV!z%B~2amId4r<{Ykj;J;q3q zs(i}};rNn*_{e8v3vj{dB)rkKvTxTWvmSs=n2UKw_#J!hVT-@<_B)h4*Y+6yS%Bp@ z9(63?ICpV=4Opk`V=sNYt*@@PSAX%d_T{gAv3=~BC)<^)S2L#ZO(04rdVo@M94p`? z{RGG#V&uCCjPHy{c${Hz@+r6R)9?M7gil|(i~Mz~`X~niNg70O4qEST6?-t{wVu#&U68or;F$5>({v;#{WpypC^~P z$GeVuyi?B;H~g3QA2*dg-l^wDF5RX4^6zrpc>j(0*X@_Mk!R}pkxTd2EPs-9s{F_Y z&#?#8JGOV)l>Hz&%NK|u=d{P@8pb2q(OEDjyK}pxhiOlYi}Lb~XpuL4HhN)}c50XF zI47qVE-puRh|I|m?Uu1gba8(ZE=R_rS9~M8PBeAy#kcFUKODRCvDFDShkY(MNMhzg z^A;ZVfqDtD=CHpU+gg@n+lS1mU?{WZCW#_K{dMA}&FEjUxC$bn10CTeeiem_Ug;CS zH78Z-#l~^YNwujthTC|=smj91-W^d*F0Z}=?D_Ic`0anVH&5pF*!d)1WE z6k7#A-tD)XbBG?zy>x5li<$@W*2UbKOS@KUG3a{-w? zZQ$kHomb|E$P+#R!%gGPU)MYu7kA%E_6=|s^#ED<;pM^39%EqaKI+MZ#`Ir|Z=H_! zWYdG~2kqP=%a2dm^1^&uT3IEXdVRcHL|Ww&nRPBh8BvCPtJXJ?W5Nvp<=WM@zP^G?I_5^EAugNtNF&Ym60)XF-diM2 zUdc1(8DWHVL;~#tUv%;;6w+Kz9lW!}|OD!$8Yd5a7r=R{kD>sD<2^2Ob%> zwXv0Y6qSr!{7_JzNoQ;XCtFRCyP^Or9YMbR5GzyL=2@+4Gvjn8W7Zjdg_O8?HBSpmQZE_ z$#7+}76f(b9ow3VhMw^2+>ED{s+(*d;%Um0KIh}hZ;?lF1+0thwt<2JUDUMbnXjkLtxNTd zC!%W4=9FIAm$sOnXz~qnX?OifxlpbT{ao=8`W+qQOJJ*OYse}xl=ey9=;ryhvbIk7 zi~$h>=W*T8sqRvr_$S@6Ir_A_%-Mjq5ouf9ezKi}f!y^btR53rVLkv?c&SrElzZ=% zK|1O_8_iR$DX2#TjgUMKo3`#HC%Q4d9IncIG)(v~E53Y26B@FxZ@aQmxDso&Y1%ve z16*JSUz{*_4i?C^(pPz;q0$Iv+{aim7h?~4LGoJ|Wm8v%>(6?fFo?^VV%n0t5`Og? z8S6WL1no8Fz-7N}9DJ?*T{wk%_>uKrv7sE-jGf@lGdIf3`kwfT0})xnOMRQ)RO%!= zrpdFokSDO^p5rOY4G{i`0}tDwu+*vb^4lDGbfGP;qC-6`Wfl2db)I~ot!*qfKP$Ad zw%k^?R@>_4N?wZ6zs9AmTUe$qaDyUpZyzBYZhUf+Kwd2b~yOVEiDYMFO-tf^F zLaq)m+Om+3TnMY)06TBnIQzfv+zV?hdeqZ!1O1|UHC393x>*4!`OD@ zLcdPqq6R#4XzDb0D&2P0la7xox(iUH&q}ron0#yAb_`x=3&5`yZyM`)7%d=>kF3%t3TyeM!841bFY** zGg>F6L-{2s+0+S(bY>>ZVj?l_7|r+U4^F6e1~Y`YS?T%bpKGtY{7U=S$DV6fue$N$ zpgnx}uzlle-)evMXJ2i9{3n0XzW9|dw;%uH$L*b8y_ug(l{n^jj*T4R)_JuEfrG5TSzTE!ckH6Ud@ax}cufF#nEl4gd zLaW1G$8@q@qwM|Nz4oJ@zSh3`!ymVA|K(5GxBlWsTz}QR{+B;)-~9eh+E>5(z4n#w z{-FKo4}RFb`jc1N_g;IW{pCCFwAaDyzOo#+1#=1Hgzp>u-nD(yNaAdzL@v6sd_*ra z89LgfPu7(AG_>5b<>1zS#cz5k4x5`>?b@~L?a8}$bLz|z#*A+~n%>O->V|ZqmXD|8 z9it&g5Eg{;L-|U8!TuI52SF4UpZnudAb>j6WzM6Ise|CoFMu9)YJmFg=_&R0? zpUC5;E5{jWuLQ{xGvw`m!ky?pK`-SvPwZF6Blh3cpZ1*PrvFJBg)iiB45JSAIlLF9 zWfC^*U-8U6`k6F53omvYcv`-=q`qWE6tTb>c4YED^=;Ex!fFWc&sn;Zz)V zVQCkbztdp*&Rx8~L)p_69Mg$8&w*ulr5A5*z^n58H9WE>Jyer8D-}tWatNbeiL56F z&!(47(pkP&@mN@tZoT;g51p9ResRnleV=^QHnS)}0rqzQ;2sEZ!IP&X&x3RqotCGY z*!`6l=e^QQyU8Xsfu(MokhNwlHosib!Oe28{0ug|6K+ayebx^+p;tbt4=&0toP=XO z!tmO@l$(5WlJYVsEMStRbn*`wNpSRn(QfjlJvhI>{Rlpto>lws?M|J1%{V_`%65wYT4U2>khWaO7mOW8E5?DR1)0%|*{X|FQP&+wZl1`)~jIc8xL1fBj$mFWd94 z{Jpj`?^v>rJILbLRhu~v9(gZ;%9sS#f1-33^>2hRJ;HMJn=fwMyPJd$XI@>;Kb|N3 z*@(Y<@9uIrljq+5P0Al|A5(s0R44BEKdk>HP5srQppd%qQ-g+&=L>p1$|doKxXXAx z#(jXE;YRsA{xY8?Zj^tC|C4lo!~FlhvS0rHT0U`AsRYvwXq(2zwo~tTnU+Rucj;UC z+863_)1g_~EPc3hKQm4y51eh|^zUbk<6VDdU_pP3zH_r7IzV|7)(PWO@vvBr+IGC} zi2$B7qnWahY&+tMghDIN}F=>g6$h0kPl5C)*l2zDa+3Ur{t{Q~iMr1QTIVL6vjnd_=Q~f1vteRkVKw7=k0syS+GuN(ySBao z4b?8Qp`N}uCv9qrMvlQBmiUQBKlNaJDLdN1Zff-fKY5Tv(dCuephj6YZ`^3NpS;ta zxO=ZX`{J|h>1Up6TUV|py|%D({o-ohN9_hC3;@PzTVC-+7wXzYe+KFu{80|F z$e*wwU&SZ$DAP7=`zvLj7C4n}>(CbzYYbst>lEoqUhD0Ic5WWw=R5m=U4zfd;Go@p zkR$$Sr%P_k@r_v9M$Hc|AY1hUmT*d5OV))y>PTL8dABW!hp`q)of-!***xN+xt|V0 z5XeU&(z=PTY-Yy((srgS2w&;KMb<1+`Bv!2$X$QQdJ=`?luVM$1*wC4FinROoWNI1 zZ3KpAU4>`M>_dx`Y*ERdR0Slqtu#Tasfq+(fk`7R>K>%$JC?_53ukYgPxyf-kf#A*=E_pVy=u=WwOVm$Jmg_UA^=gsrS0N1Ksb zH)wj|uxnA-6d?n_VaTO-U38|*@lu|w4_OR7?)y$u*lnQB2QK(yn7SwRXu`m(_LDIC zM(@gqRiM|bNoO9TD{bT{!3s{bt7$jN@a8q}S8fErz14p<8OJw;62C9_HIR;M>LSEPWEXsVHK(W5uR@doca=E6q&{j8>+uEk5vU@GJ z^)2q33xqAV74ojEdRoh3KB2q3Hb)<^+?H37S?Hw8ZL@w}eF2*0zz^dmpGW}5YygUk zTA!ugJoUiO4u(!Q&=>~&xyu7+$avb8Pncs~!w5~foGt*dZjL_3Z@)86e6|s&k7Ppe zg*$izG;ulEhLnP%n%#oa6ldjDagd!t!UeZxm9*Hj=X^{ zam3}#3uK1iI0+D9WHBm;g3V-#5UminJOWZ8C}0`p?h_EzNHM+nb=9auU^p3|kWxPI zWO33PPW}q99Pj4jR}Vype+<0rAgczV^VbqmB<0m73-H2d=_zB%FjGojb&iE&FK2N! z-&eVI{Yu-ovem9`ao^ZP0lLF#A#ciN5bO6Wp+7rbQG$l~_P<7_h~&5(!s%7{#{jW| zr$euXsBy}*umWz?INb=4u%?YtgQnUESRF_y zMCp=F1r+*44CYC%=}%8J#JEfmfgmrL!X1?fe;Sl>23^P9vJ4}s$r+eiUuod-G%dNx zGdws%s8RxALH#DN@l{j8(n&*LQuoSH(BV&i7P8D^;}Nd)3cNCgfNkE$rg?adJah#k z9Dl@pGypGUEhxihd5~=Ki86u{kdlnB991a}q+i6C!oRwCrCq)86iViL!>U8~la6+n z-N)i15K!I25YUj}60#dj6) zF`ctYgJ=x%qb#f{!y1a#k8N*t)nleK@;n{Dox7mKj|=n??KjXhjE@d?+y3D`hJ%XM zBi?8wl<)3s={-6)i1A-QK5Y}JKk~-7NY964fro7;cC_ngavXjM z(bVLUV)q<#O}-)Ud{@>koJzWAl~`7eB_{rfNcS^NKd{oC!!-}_6mN=>;S?NYO_+T{@G$Ixno0lXBS}M( zMZmy;ew=+p;*>~X2v36go$?~0z)L#(X;J~gC4BEX6WT7s6*`jKc=K>in}a6*To86} z=71)}lGo_SGnevRZLIq9$Uk=n;Nt0LEYu6pBmBko0VgL!9At@#VcIY1mg0xBkQ%RT z?C3CxBV|VCFjy5)@kpDOpMIB?I<%j$Uex{W5SJDQMUq*4pm~JF0L*?J+$?`u_t^*8 zme0W5@2zQbS)-;;9`IjbfR=Ck8HerU_l<3vH?MDj&t7}-=6ZYX*_&-`5j;9=kw%q#VB?j(eFg5u;o&(u-SN5_ZCNj-9hHreVT zlj-9EUq_h!ncg2@BJ0TfYE^JJg`Gm<`xZ~<2f3zDs^xm(j z_9rHZ54ZudhjFEDxnJO0G+n^eagTTEd2)4^$3dS3TW{0P+@|9vS9bw5;CUbNr{g}6 zJKYqm!zH@l6-mAPWa;_5MOJ(GBo6u=o9Leovyp0VS~ZlFw`6VW@ak~?w7vD#dx+e5 zyZyvHY^9U-um08lz5V>nU$*sm`j7YDZl8VSlkF6oP7blrfaT<5=@IJ}*xW^DY@QVT z)P%au3D4ltY2vE>gA@=L+|&_+2j>o2(FG3JJAB)-dJ;{+W65dWBGYCSPRc1;AQQma zXXvPmLmZR?OT645;L-0XUO(__qMBL?9y92+mP>ICmeI}XXvZz! zMVZEhHd6pFBm>)x$g!Ki&lGdf386mE@h5RP`DkGAHnqjAcq84_MvIp^>q9pQPn&~& zDm&u|AQ&!BymIQyRC&ggZHAsBYbH9y(YoQvx5UX)V=3SQqf=LW2`Xh1mt9=ANSid& zx7Q6LA?YI=Y1gK&{MD6PuVx!G;x(UdfZ&N-b5w6`bbg(gFn9~DO;s2 zZG=esRAFU&Ctl)-9!<=kn7YU7 z&$y>Qila<6_Go}(8G2W&Pk*#`tq zuuw-zqtD_{;Z+CG)9Ih=cjUYLFhSSr!U2E(Dy_QqBri@>I^dJnwArx0xZg3Wi)Wss zsT~yk1ugZNG|H6k^z+UWNU!xSeVJ#X{}NaKhO>{!NKr3>&j2~P%HT|pM0#ZQ?!udAKw%TkMO_9Bse$LzvvUW~H; z^W}(zc|REd4e-K+_4T#&cJ=Djw!XR9mKWjQ_U1R?g>Qf5ra9+9 zYw@bJ&5e5b@75Ey+Y?Ve(Vl+hnfA;x&$a8fZndqgE7`Ch?%Fci!@hJSPFeSWZWmfz zEAaD;o&t~!a`WeHjlM2*MtRniw!^SrO75(0K&M?;v$Rd+&1%(NbIuTYl=&R{p5)3S zzv6>TSiZ8TYy&bdg)K}sESS&Ff*u{XKe)0$$_rIrn+DA%Y-vq&oV-z9D$F!t0RO!lB66al z{?r5h^nKbk@}qQ`*~}$>EJqcp9qFSq7%}Wbt;28}8f|aFw*8uSZ1Qw&BA;$fI%si| z3}wnI7x^6rC;)9hlD~^S`$At%zW|i&ARAz4chQRkYeQK_H7{RsUi5&Q@$w zo>qZ7-1qaSz94blR@YYBswZ&}XI|yjPr;Wl_h`SGG6uCyZzCvLCRG z=_YV+dV?nnOMgUqVP*qH%ApN%uk{GwkftM*H7@>AZs>_{7TnZ@j`ou~?bHze#f6Yi z%fE2q$+PX*c1AgAf8rwzqwnfGjlLkcSw>d!X01^^qBCk7LMmt=BJuV)YIk*(e%ml0 ziIY~YS?v_~r_CH&P+odjs3l%pS##@kmgZ$JF0_*&VpYebNHK|RSf~wDYVni!$QIhBVM~r-2>i{NWy5D_=$_4b zKE}8Cqo-(pGRm?e-#!B-`UV)46a7M5$v?OXb?)qqng7fG=D+yL+05zR&Q8A+qHxG@ zGN$rGgiI|uO$8jkYE0D0suB`G1w%g9$d(Pw9>)1H3A&9^rZtK?IW zCOZ*TzMVPw$&(5BS$7x-TW%YH03z67dK@C@Uh)-aiVSuD@f`c$S-0^%WlND_{ zbq$WBp-}@X-^T3ainoTz;_6~sUtUiqdF|S@;Ov5y$31CkCgtUo5+^6qf6W2qz8 zeG}N7efee+mv4^Nb9DwTIGdlWq2uh#ooK)mSzRW?z1z60Gx0=uN9jpRDUZY{H?v1+ z7;qzDejmRavLMLFQI{c|jlwhzJR*ZVyysJft!r`9Sx-LeD#&{6X`|?YFaI8KWvmQW zu%^nwiN+{H+`M8Snh$(SxmJ252tkTpH@~`xj4icWPe0$Ded%}Gz2{$OZDk|JUrE>a z-jAJ&z5MR>4juh*TcY!Hci+y=b{61IPTX;O5`*@Hft7FapB^D28W0$X_SWL$4q7`Y zWzT1iU|n8b%)ffvm#ay6Xs zHBn{#5TJ~brQVk6yh?;wl>@+a`81#Dty|@AZoY8W2ogr($(v*IdYXmk08z^E>Dq`R*ouiHE0v}ln&HYk;*Q8^;kypD?DH$CXS>{nWlE>Ded9o-eXafC_1D|O?fdOucQ5tjW|Wb^S1KpLC`F3S< zqg`3wXsgJD-{Ifg-)r|DZnu+zph12(=imcGs1fMGZ05F4+zD`Gh2(OJlTyQV`A!u6 zqWe)NPeyq8V?w434GKR|Bi}WM!sbgL=mz3aUU}ohWk^yS@*KIwm6)iv)x+vM;ASBy zvH6WL6ko|pNG4$DF8zjqWuDb3OI&`xeRFfQ-Mn#|GH2To?efl3ciStU{tSl8NGkt; zGORq{%r4+Z&i#yv{ea)8ba0}c6^@(kbdr>lCeG@OeK+9wnH}3-9UEA%>*N;jrDXMx zPCeqc^3_EdtP_@r8sMLvQ8%z*95i*57}9@bkv z9u?-B^BU*Mqyy*Z(o_2lF%VDNPZtRBG9h8wBr~U7&Xffk5%<~*TCySkybn7<8z=T? z;*b~Z7Lz+!8D~9G2iSl18-bICm7I~0 z=oR0l-QZohfzKsZm8Yj}6Zn_$>G>yg4L6=YD*teo;0$|l0_RfvWp^2V@BjX~yno)_dHVr_#4DtkZ{PgZ58A){ zm;bW;@XcSf=dLWK+Wrs!`~Pjb!C2wpd+(B88@+UZ{^$loYsoPI(&E@gy^?UH5gGM4 zpSg;TQ0D-Y=bCIQdn$9d;s>zk1#JiPp1N5aG8RErPG{6b+6HCMIws3$VahS`I(+(q8=*0MZW@;>~3q^ zCNPVyWD|6cH@}k+BYryHpjYRlmwH3~8y8hXT*cQLH}?N0E{+fxT}`95FDV_wIz@rnl5z& zRWok-MU(F$uH_QhM@J`}Rk1G$4wW#8T z2Fs+swC;qHa=_j6vokeb_BuPP@nSYvf|vPD&!`W0>oKdo2oO6OyF)lAPydm}`v=7jC%;TPY_S>#f-AIEd4_6l^$A=xC~eJo z8)1pNWmvDFIr9wai2mY%lUwk}NiHX{>zG+X8Q)R<@ez{X+ruQ9bDTf5w!U(${h$9&|Ht-E|M`E`PIe!(ci(xV zUA?-7p7Jww55a%8t*l~;)shY`B2Zuzmk33xc5f$ z0Vgk}oC{bw^;O&j{110kKNF|@uKURHJ6vR|oH~ZEQU2v}PcP$nnIE<}ZWFve!hIlr zcfV==$MR!@U!wmb%e#F4P4jpAZn(?#%Lm|Jc9-B^wqO3oE?;}cG?8)iS#uK}&z7gY zqF;_&tB-6S=!3I^J@gLwjv1V*7wBWo(MvN~*g{`nAIzY0PYqKKX-fceR^{Z2E%mSH7hT-Il^4md56X@DuRPhPg`JvBTZ=2$U#s~n!@}Y^RiI6jibxLAojWh){BPHT zmOZx&v(O99$63C5-p|uI4&Tp-p5U}R+m_bWNsmrv9D3L<7bDhJ+!)}zr6){!eE4}= zoMp^Q{LcGtw}YMSc6hK0jHPz%*3Gtd^-63f=aZakI^5l7EMNAK{fY~WC#TzDpBI_z zuZ|Du#L3vc)SdRHCz)DT!kC>?j^;8~n*Q-@HT1bq>D>4He94P%R{Po0qZ8*qx7$1K zyw~1)?|$3a-f#OJeNH>9ms7A)D95=|;jZ{;*R8F#v3V`?|9;jqpQwP#;_T@L(tdQ} zOqRTcr^-0^dWxgRsEey(_#Ev|f9_(iE}wR#7hIy9Sf};kfpYh#Yz=?r9k^RhlSN85 zU{Iyz$r=OsrFS^%ZKSVuQJ;Smregl9hyaf5<}B0( zBY~f`MSjg7vLJDh#R0SG6lb|TU*v?mV~(`^T)-OhVrCoR!pQs`S3Zp%8thZyebQTE z$@3`J@mcOXW}aF*Te*yk<3k1`>dx;88rjN;Tj;mQ!qMmtX?OIi$EU9UnUB6pX&?Ph zCr_7+X7+3JF>XrMIS$Ut#l8p%-POi{!QH>HUQ%(^QwNAY8yrka1FSVX9r4PXbclcI zPkQxBNXa7z<~jMK_Z&G%BH}Wa)7zVA4TTo-QC#o{uV{b1*lir#NS=wkMH+S31PAUZ z7pFQICkR?b$($GAspCYY9P`x6B3$q^@JQ)~{e;25Dx=^V8hPmBIGuXZI7z)PvcwOvsiUvX{r;z4{M_l;>D_P$ zG>TZzk^9_T#3+&tQn8qh%=l#{J!lHBg0ov(UKX_p6O6)QLlJbnnM)Yl%4Pt+AuAXM zplOWeB`q+hHo_DjJ2@4rLg59WRRjWPgNZNCHOL1E%Sez3y+q9C?tszjGO=Nb_x`NY zAN&dN?djssAVT2ON1ZK(;A25P%?p`kbW0PYHX!vEZl2*KV1!E8*RrDUmBS!9a8jI0K zgn5L&M^jnH8on+T+X?JqP;2O}F4wpB#Z?1U15=uI_6|`Xacagq6yBQ2q% z0&pc2z%zefEvBxfaZFR<*}n<-;-trT{pxGuFgV?!CWI=Pee{#NcfrY_(h+#=gPd@j^60gd?7HsdnWc<1qTxZv=usaEio=k7Zfk;vmqJ$g@SsC^5?e7fey#Ck0`K`jAak2F4|{ z9O02Sd1?0a<-)7-l`{vw!j~pIWa1*NxJ?S+GQYS~A60qJsr(epkBd(hWSh5t%r0`| z)X8y~HQIIcTlQJp<&B$s3{qNREIaWL#-(WalRi+rLy&OoO9JbZJ~*98;a7ctJk-^o zIF|+@FpSeykSDf}O!yCe0U*acktX$^@OHjd-QdAR$B`wUccE>XR$SDx%C7^)*#)>M z2*@yR9bHNLAPe)ZdobuAy{zeNQRrP#ahjT1_hm^2s??(t zOOMckv@6sxryw8MRu_;<{KQQeCY||9vbns`o_#jeLcaCd(^!=e#p3BaclAkVj9jZB zL4!1>W$sgLpF!3jYYDuwydgL-+Qs& z*!1~)&HYBaSNSrtNR9l)iF1y-xw)CSxw-jr&auC-T5M@PMHt&G3HlP)=QYMAzClD^ z!h!8kN1=s0s9xtMLxOMKFcCg~I%MOHsx7JYhzoyijH z_po75&AG!6H~JBASr4|)^h*HLlTQT``AQG4){{jCE`fXco>Fboq+1rjn2Q?MRk=0Z z6Ch{uSyp7nQB7TqlTNjVI;TAQ)8w^}AU*vn_%e4Oui#nqgJG`95XyyeCpv1gx(Bfc z%Sn7UyQI1CD;%Y_L7&{khk;c718JFDN^kB#p#I8hHloD_TaL0*hKz-rG_$s8U-rq; zkTJOo58s~q$S^*Hax58Czusw)Hu#lJH#O*Q)Z3Pfmww9=Q2v1yJ#~8SA`*GZR|G#l zCsgH4JRb5BUdfwzmXyyeeW6D!2BfIqO&ZtQ198_|MTDWvJ5K=qQ`(&qeexWK!mqd@yYf5wFbI3*YrGx5!WZ~rGoaJ<9Qv$Z(8uQqPv{G@sK;*f&@WE8 z$j7$glSU_uc^~fYxA#7L*p8)@pJ|(0>+xf^w>R6XFTd8l|HJPQkLQ2B+;$nu{-6HO z|HpRo&h7U7@4wad9=zY~-G2tzF1O>8L&mg+nY_k9%An-Z1yu`$Uisq&Nz^$q`MEIS zs!p)V93zf%F0bAH+#%j;xHD>+LQG+0`UPe-7sLUA3Y41Ea;QzzvuF?Sp6&jJO_?{7qm|;I@ei%pkFTi2bwH? znlF8~w%1KwKKZYGqrJ!L*DqqMetvdBG_=@`Y|FIYmM1##S7_s>^y5BcrcHFgD4z!K zwd$6R&5kca*%_m`kd;MK?GLswwvDnN$PED13y=?(rKNf+=9#abXKdqqX7(35Ne-+w z@65?>)I9Sq@&%0NjF+H4{SFb@$SkIF$a@yHpiA3PW5|ofb%Rb7!W-Wdn^%i|V z2h=d0-z7fOPm(R|MssGEp&lh|eN_@Tm^QU+MmgIwW58*v}X^pKB-|qU7!3xqgBV zC0Mu0&L{nyx6MNi3ITnpr^QRROV^D0^FrE^H=2=x2Sbl)1_x`GUz&uMGusGGUm z-r(Z!?1Q8$ z^mooT`sS3SRr~;DEIQ)r_5PK%?A&A8rc6i-X+BjiytD$NY%ea4IMcS=Faq2~JLb%F zHntL3t zmB|xvOB3mbmsR6CV)4Tm?bFEalH9BHAIL*Cpdoeq(!bJw^nH^!&y znDS8OzTseEJ(T=YSGry^Kc}?RChMH1*nw$>^;0dJwePF5)wfsvlP`b%>tKFf;ThE7 zG>m@s!qW(qR?1TM5b3=@8sJ1iq6P#Q`y0ej@hY6KYM-csL&I;P(JE=7GBu4MSA=x% zoRX+<_Kqus>}5JP@>I`=QBbSIFFXR)>bFk`IU}=#^NqLH$QN}pN+u| z+XmMglck1+DmSmXvJpzJ_t`X2vMNr13*RbY$xOb%>IL^GPUl&UeO*TS8RFx#atIZp zgCb|rpx+nv!snG~?ifm4SbWmTpm`1%(JSuxAKmc`Z&CV0sz1hiBfV8m#wD26uZ=Gm zh`*;*3fIuVeJ#FnODyWxRg<{TLLg)AIm{M#qDz*AFHatJ90}=cuGdGUga*RZy>wNi zQBLkas&_pvG}#kiyl1{*AE$D2=We@s^LATucP(kdc~TDMt4@PX6-Orngi{R-_@tGC zzO>Cr%TwBR{^;eJucUH!7Y?`s0fU=Uj`xqLZ)6Nj4t9Kv`YHn`Umpg-(Cltmoio4d zzkjrcZaaX}v4$F8R()-MIv>pE$x#M=<#3Q0Map16$Nxa%S{nmw1JdU=%CjK=9d~lp z_F=mSZt|yYl_4^yiBU{B&ZiDWcqiVob>&$;+*l)=_l8K0_%RT$BE{`59ctuic_;)* zG3w5ohB)KG6P@amtSh1`P^sQYp=fAo`3#+ zyMK3wW3z438Qqz+c8gB)Hg@|y&f~MY8}0s$-L{RMP0`s+s%!^Zwy|?N&@hA7?deL} zgq98Dw+VeS7~kT49bVmK>%_nmIQqZlUg>qICOh^=#KTS~tZX0p3Q$4X6o0huaBGW9K zvjGd(3>LuUAZUE2CuWj7uvjqO5aPmuWQ~X^L`>;_)@R7~#*KFK_TBdKYp-BmHgUSK z#a;KrvF;9Y+IsYfNb7qwx+|?hiPv*o=fojDW~or^D>|Cn(*3|B{qa!v;HIM#Puh$G z5z<7vGhO;;ob8Gz2I_nq6H>-dOlr6i?hutM{R?LeWIIh zf+w0_n(acT8$f9QXYH=U-t@_ifMT=)duE+ko-~sV%~?F4%-U2`_BQvyqq7Vpq7zxt)mqaWDyy?2l?PAG$JpJ-I>>U`Ciasemwr=RRQk}Jr!K?a5N z%n=>nzM?C5!(U#3?QL44Q3Y$?ZHTTeLdiy*t4c690OZ6m@SCL$BgI=dy|$k6G!ylw zj*JDYJi)5|n64tZD5MV9IgyaXEcC=a&Oyphk1+uoyPX*6@|@I86#6Gl=}6;I7qq!V zb+Bs}*Fxp3%Q}}z_qc?rlgH(B6|UkrcmF)7Mto3#BTC9;FK2OnY8ZHXJuHg<1@r6w z)x}fJpAZ~i7=GdMdp%t*Kb6{L;eFa#m#^?C;|i~`%>X>Q0iFzY#6#OYK5IvZw9ia* zP228`JM9o(<}d&1uiGE}^FQ+8D*DT{c6>m4qrLvxuY8Gt8nEc*e14ckY|C<{rDC{i6MAXO=F>gKk;AV~MgaHE>TleQEl1Xtm0tqy9msHX@TjB*f1|MPp9o z3%i&}Im&cm$;?j1*=7`j{z2N018405HqssH5)gY~*aOp zZv0Lvu-95mJGxm%e_c!rH0t>L>14J3!(h z2x}YSa3VB^KDYdfnsPEpc}|(hS>@=5D~Hs%U~XVRH`1RHsdpvu@nM0#MEBGG2Zd#U z*M$dFfXK*T@G5N`)(!bWSa0%eUi)+t$5_kg&>^4l$FvhJJeCM(mnu{v$9zxaCl&;Qr|`}QyY?Z0I#y5FX& zjB$@n+vmRc<@S}YyxD&C!?)YpKmBog=H7G21^m!(c(B)QFy8h_Dfzb@T31oHNX(B$Ri_5kz}8uz14lOuADQ`D=y?9F{olg>`UT2Q68Nq3_$B@R`Z)?Q;yx-2{Ex$U z_mR1tN>}OeCx?OcN$mZ85&g7so(v;HFaK|w|8HA=Lmt@xK=}Vt`4!85FJdzoKUmM9 zhx3?k;9k4sXES`_-nW416Kj80V-V=)gk#*h#27(CqfN-u`1(Bb@hdK7g4H%pr`}ev z3wfA@a#!?g@Bze~H{DpizDjIx)r#Yx^2_UK+ojkr_yv!PquL++_l&cIGa!%a_SSYR zKl(Z4>tNfo_x;)@2u<(iGxJgA_pO9^!Veqn7#<>E*)f~`nR6f~@a+@h>-cT!;iVQa z?X{%Sex&-QEKEX+ah5asXvdGEzle=SrsIiD$DAouJe<=MmACCxyvhKZrad=cabvU7 zr-RG>)`dKfr@XvLhVkO5w9ox^`!Tk3qpi)hlb+8TP_ppi&KMgRTE3MbzIx?9B?&{i zhYL||;5&EBN&l2L@ku7M9333C!^3^*j&bJ3dKS0k;q=%Ci_W-5W~;ttU%Bg}(HB#f zE;PB2=-jsBR-cGgMhaoPS;kGZ3g`^J;%|^Au0aH)!Fc6{jX&dq%0SD8Am zIX+w9Mu&s_qs)m)qYqeRzI1IR^D!ISTj&ompzoJ|Y0E|$`thna+8#2qZml=#B@a!( zQ|X5NkGz$Sn*|`ssrpfWN$=uFoHB@9k}c~L;_>0Ew9B4Xe$$5s-i74VPP_9>ljg%M z85e*=__PmbOkC2M;MLDdnUnrPZ$)rUOjUm;5n>Fa36gmu%0n>n0YcT; z6KoKA<=EFxV7NwRy{`Cr24&EXs+&3+lzC(&pN=b_P06Q*w5jlE+z3kA`XK>4;1)8t z3a^L3`bj`mrn}+Gq?)6wQ29DsAM;DepdgiE5@6=7wtAIc{zjQY&jXk=`z>I#^U62z z(4zga49k{p+bM8q109CkqR83L@TkL8)@X~iPyO`Xr`i54u&ty8t>|-2`C-R;P~YHD z8;#nz$ehhlYv_^R*7vpeE-GWs^41OPgHxodd(V>mhH>V^e@|UgsX)gFJbg>8f)PdCu8`m#8|?Ynl?!#8Z*F(|JJg$RJT*`n$+Vkc`$C)Kt>tJn4-GJZw$&9`P zzWm&Q8x}#p7NJw@k)Bi z1;76#-;jJ|ObbP)GXOSL%2Kyu7)&?1H#(GGhsf)EFNIcO=j!f`_z|@2+pPFI4hL1WT&4UPrxY)9LU;rgZv;#`39crA$xI4 zZW$-y?D)lHe$Pay7*P(5hVW9Hjz+bvVbnujj9Gas&8DUNxHpt zL9Z&SX!9(G7t1)l)cfk%W}8lKwwt%^v=utbE}+OP#b6ZV_YB9~&j1LpMn|dR=p<$> z3h5-`wBiWse2-V+yKB>q*Lp}N47>~W)`N~$98~Q~9661d9lfts+{kXeM-LxG-=)P# z6({n1#n{9*PC$qF@&qcv5~pR>PEz4<@aOIV9IppG!_mNDz)eoOL-n-jgc=^?K|M}u?jR&Q%0Bliz&m zS~)i?qMfzxggZ7l z3t!0iUgW7dF*-7|jxE&){Xt8$Y!OOHviH0?iolO3^{nimx}+Z? zy!g;7bS_T=6PL*tY3Dj5N_Q_OjokVUeI=iSC=0rk7_LK8r50@!D$YY5yevEptixln zSZd+WKQ2ewFz}Sk?dTs1qs$8CH4c2nD_|q6gM^N6(07f_j|`>KWlxj z(|$K+zJAlO!CLcad5@YfbTWvRo&FJe$YX!(M2fb)`egYTZBLsfLzIi{&|g#q&KS=q zhO~jsr~aIHohWk5CV%=pPC)7}=oh%4>Ig&Im2aTp2k6_DEd-%Ko8N`#Irb=T9#~F3 zpD4~|P~shL_+*_nSsq-(k8NKV2-*^t+Om{SdsR%fVfpgSYVvQH8Bg}IrN^^*$iM4D(cxZ4G^K5m_7=WBSAeIHfA(c@0d) zRUj!El${V&hYK5K$i3FRXsq6aqgzdL*QB21h0hIcqcGpMWQCFY9f@1B_MA$lo_J=|_brV2O&G z7+$26YbWq)BO`dRM`=r>XORVA%9Hh9u=+TTSKUaAd|~<&L0UyFKwUt~Q8p6T?kDZx z{&D-!kKf7DYrZ*i%GlR8s%>v>=gHB}e&!49w}0#R+fRP@cKgfkf4fbWH=tv=?cTWA z{>T5|f7ssr;QjWUZ+x@exOKDLefF6)$L4!cnBD{iq{H}+eqidY2|GK(=S{NAndH64>w2G?mB>txb(>;3oU*Px!^M|kN z1dQv)i28_Ny2szO{IYLX2%bDY9x9f$uHDI9w?E!_s~J_|m=yL9B%J_RdxHG?}y|u4{=dvl)f5A=lf6z4M zfQm-2%BOvzyzxuCIS{RIvBmPHZO^y@5qpo`=x52Za?Ch6vbH|J<2knVBCiUbpWu%k z)cIh)=J$4Q-)^&;&$bKrxukE%cr-FmsQgi9&Lu%e)wdHft^(O_(Gf_eeTo}Al%WfF znM+T(=k54FA9%m{5ReZfg z$(y~D3om82SMj;2AkxamDIJCXFrglJqTJJ(_elT6q2AgTrViu}|Iw$A?Tk}bR;gTa z?Sm5bqtmsG_Oo~1iJ$(Jul{m-^|P;b-u?W7n;}}>-eql;kGR5Edlp|@KX%lo`n$fl z(Prxm!lgYThAU|Fi2oy<X&={C{&$lO7QwtNr! zS9n@MN6=T>RLxxgqtcu1DKbF@AU^hu5hurO@8JiGjUKi~hlgp?U@5z#Ov$Uiq2FeG zMh|+31^@UT8# z;QE93wsv;|<64B9IJ76*zUrSmm&$f+BmVPbeTIKm8vxQqos1>OoA=?5kUI0Is)9f0 zCfzIlvhW`r%_u9p1lDAcQ2AwpqY2b6QKY+(DLyvwrGH=+L12kU5&rY2`m#OS#|1CJ zx~Q`Vq}#S&J2p=HtbJF`;Te+~<)#d_#k<#3VQ1#;6}++RM1 zW#E2SY_88UZcL0_D&1%Jo0l*!?mW1jMNXHFLSAB7n+JLI9T*WHV|nGdBJBx z5El*;PL4RmrWrI;8AWqV_N&D3LjxcYg;$W%wzIw6Zr;3`4FFR*csqH&-B%OxKC$By z&U9`z4r<2+RFihlO#{<$$)Z1TItzASl9x`_&NYq0#%_G&(O707L>RSD`GL=nr+PBE z2i+AFIFXuWh%;$^(>Ry$Ag#+mW1(YFoedJf3dW% $bOOlndD!)}M=nS`plfk!9C z3P{#2_gvM9VvUMYOIyTDJ;`T?gIDW!k*(?|%0oyUr9#zJQ-JlC>k9HW%1y51fyTtp zh|~dJLna#GQicXq)bgwj0v$ZcyUxa|Kc3AK5%aHT;+hoE*@39tnSjCgCqMM7QtId0 zY==&8v)y~<*|xfSqtT<1UXHso>BKrXIypL`1D$8F&sS+V2+^Rs8%z1+scbsjOw6{` zyk_55`&b|9q`~h;*r$&9HkDCL` zqdpwF^VUh|As< zvXMzjS~`Bq;s@4FJ88rX$FnL~-R4YUXc0%JE3^==!zygrx5-i=51mmrN}0#J$)m&K zncz7S=_KY!Fo{$aT-dt;N2@9(H{) z=-hybd%y2_+KvwW+`~LOr5(^X;b?iA8?$zE2YZRbHv`wbn|Ipo)>eIXLJYv1VmnS5 z*;|e@g&~4vIe5_y4Zhs;qR3t3_o%@!ZNsz9AJ5t4b4IG~J(=Hv5+SgN#Wm{$Dhx|-lV6w}hBEi>w*=D3K)qvb$ zwZ#)%PY4JHrZLw}giLVEZr!|{O%^*i+xMS)7AI{=uZ(kl z^fdZTnH0X7>t4swVi3Mg6_*>~jqn!B6}j?L7)~IQ7EtN7Zp_s47AO1%XzXJ39J&-D z@bYQpOVgqp@xc4guSA=#SD-er%L;4kDd4>~hdl~(-vW=fZkUZN=CbfvO%%WCVKp8q?`^pYZ zbfV~r5JH2r=cyI)mkq9VDcz{09i@I1@B_M0s_WS0P{8IfoZ%^sF*g-)9eJtU&TulVY7% zp~Iv1o^!&-1-p7gS;beuYXhr|=U+m}P#w4y7st5LOkU9rS|6ZEN$QJaQ7#abC%63P zFHhP4B3&!W?0k-Fz?rJHm)5`VS{(v5gESMtM_KNG_qzN6;kmP5Kr%25al%VqQaP@N z9_M0m&fJT$n6Eg4^e5^`96g`yYGff)S=Yn3f8y-PLm5R*9{jWX+;e`~ks#3Za7FK@ z3la3SFz}A^CxyRF`QSD=d{92u5qsM1D7Rk&gT_JT0P<{^>J#UHIVp+i=qJ;$Xm?#a z+rq!Y%HsQ6wCx*r+5!FI3opOa{_vmvllJ4Eeb_eFox}zY<*s7~zVgdo$t0iO$+x{` zVux~TV#|IAn?xD)3VRulylVTS&1X^pnzi-XH`7hC-!%9-$SjP~Kj?4e^Br9VRi(wB z^Knkp%X^+evN$_IaEVuONC!j~e(@6NgrbwE-Wy--GKfQ8*+}stJ8(E5C2l9HEH_Vd zfiv`Hf~&?O+Wp|drYSGmuT~ojA>0yTZ}BhV3o4KJZbjRW&!DI5w6<2sqD7s?)Q8vn zW#N+Gy;7#+$Rp)Y&#`gTb8IctxM{$UvY_FrEZgSL)1~wq5g>W&H%Kfx$dh|`^x7=u zl}{&22M@slecoFIshjYkjBD%$WYWiRJq$c`cwO>Iy!{eokD%;=uR4J6(*>4hAUUB) znNyc;=CJ-u5MTUB$CeW5quod`i}1={uzX@h>g@-_Ez_3o_28){E-hF)8yd*$&vfst zZ)vQ4xa29A`eL&!n{B*#`WfDr`{sPbZn|walW!@@d?CZz!bh92E=5;3ID^Lt&NC;) zQ@-sAgV1S>e8kT6LjO5}9Gqzjoi}0A7Y!IwkXQP%^dsY3|Gj6pFlHA^V|6zOQgZFLWQqrc{9oXEM zY`1&2@3m*|zt|>cv-X+KeXjlGx4xh8#s>%Qx99FX+rIL~SK2B3-Me?M{l&Mw%~+f9 z1-8k$y8X<{?RftKx>;;lnji93P=*qD_=Y`r_W#F)YV+1hfb93qhy~HM*b_G8 zw4*{A{va`O~GK*mPF%TRO z4$02uM)*BD_sM4$3(9V5%gpPy+wF(*O^_3 zQq$sDoc1Z&KZ9|mdDypk@1Z|cZzPlF%A1FYyaS=tqs`8nMOU~dB5}&ca zBi|RtCi(>ZImiB^hk20b!w)}fKluKSu$SlUr59e(6Rfc?{3}2E@tQjg6_y^mSqoA4 zSJ3H#d={tix0H`{ram}6GGoaSTYhp1ZWki;@vy(<&AcJFC{xADcaMAD=KQ%2Ds1`* zUFyzxX2n)MN{!Kn9#{|Rp0=PC*MPMEjzDq0ThkDj$^I=t`HG|Fr0w40?-4oPAeuL* zc6;Xnz7H(f1L%i3ZdSfWhv)%#e)7|wwnvX1wAWt$Y+K)&wV%EFZax>`#y0&O@yTn= zmAMh7%C`<(=yc)V4X<`;$0tWA+kBSg&oZ)!71_XLXw91%Ij6qmP5KjTzjd8qH0jlE z7w_=}s2BTp=`x@EeE7kKZF^_CZSOKhA!e0hU$zX+C`} z(S~RbWl!1aX4)!blwK)++lBMKrSmWrY|cr@f}di{d>=RZA(pS+&5!rm-lGrO2k*a| zH_PrnI%wbj;g6$3zR^qYm)vUmn=DFuh6${2cu#|X} zP2~uRvVGMKQ>O_cwH;U4buVq&AZtOy7G+s=pRD9jkNpHf#*-AFE-7v(wqJLF4 zua}2*)OgqOQnC2l{Nx2n1B--7FL;CT!bAakkmjmm%OxHi)lS=&rB4Tex}Rd;->3N} zQ|rSw^{ryF&)wb0- z@^rqxRpH=&JjtJY*yAM^$+;#z3*RoH+qbUNQ}(qOegp>M!RsQsPxaUQnQ^3pH*Zl2)I2`x-i2>@{vXe#}a*=j7F|_a`51x&KcdoO6L3x2In-i2Ja`9~iIebF^ zaW*CAmtAsO^(_IpcdX_$?Rj$U27uSUPKnQ3`DKJk0prjSS<0>wkq&^jqe12-;MGCs z09LSC(~>wy4el!=?7c?}P{4`z!gyd7i*7wsTpdq_nyi2>Eq{osctP_tBNsv4%PoF9 zgfU&?Y=QlOB^Qh=ZRThoS4tfI$(63oCv7KZKI;GFKggCz0jwjRE{ndNwZI~=apMt z%cKetS$a76SM{}!xtQ&YQ-;n@sbu0JTb)Q!;2AyVwwzH9gA-SRnTCqt1kwQL1T=&y zt*;Bt0*qLp!=FK(tcRNDmmqO^1q~XG$R7UXF|={E4r0XQ30UMKzVIzS5|VUI0wj%l zqXW|wsvrKSyzm?tK{J_yRWeSQX~^Y8+N}?wi)KN3`~;pKk+@ z7Q zgu9tQzCCBxHg%;g>a@qGXCnWz>2QG}WzMPcjM(kH`y?pUz zaMMH53q+=(dzmbhXQTXw{Kc1`J>--{F`0rMy7Z!fxX4flkTRZj-$NK&EINeSOPFu@j2 z;m5ZN;OgY8m~#)XxKp?C(ht*fRBG@>;}sWVI;Sg~XEoeU3an8-YskgV7OYv%Uk%~cLV2pYkRxhx^t)9f8lw~_XMClQZvZPy4IOh7cEy= zt3%qg3^HSnEPL>1`IaMJ;gtBY-^PamqjPVY?fefK1rpA5!mz2a|AIk7^s3`ibl`-SZ#O0B%;_j;gRosbT>z}M zsO+<$^hMh(&8y^7?`=UjfIGvMir4m5gGYmXn+};2kUu2T&#Jq@$bUv-_N5s#LZ6#m z(#P6vEkl_BV;i$wIcU@d%9LrtwtJs8Mj_gK9sFVQ9cZX04*Y_Xb|eoQTkFX00=-{t zn_J*HKWSh6r7yO-x3=2egZEH7`0%qJk%LdWcG=5Lmwz@Jg*_`S%3yI}Fa2eJEx6#d zrvl_jQE!u%D3hrNCGDk?K)%^2wTI(cvg2-KND#JU=^5(ySvcoHA34?{5=Zim{6hBp z#*4oXD$pQEVPF8?lAQsOvQU4Bi0*NpH^x{$$XacPEkzDU8+7XBRcW~jGE0T8;H`uI zf$(ho74ajEkc~CPSK(1tIzH}|N#t^gS0Da+ynG8Y4x_CTA#(Cx-KWR_zliU2SnQ_` z*Yo!foKFjualHVq$1%b$LclLtKDbG?zDidsuJov6neXU#xnzUVz@0enD_bUEsHDMPw4ON-G8oq;~W27`}cqHXKhA1KAB&(ovpQYaB$N8 z{%`$8+u6O@4)-3QhGe3`EXB=Z7Et|}F*PyiIfGAW;wY7N*LvB2w0kf(5o1u#GRZ7F zRS^G3JtCKR>pT4}ijg0`&OTqbJV7EY_J!R>l^n!V`s*vYRWm0&o!IsDk)&iCBi+MZ zN>>&D@EP)oTH75N#c4m`q{J9MIY#m`Homn)uA}poAM&-ga2Hal9<}G# zHqLxm5FnEakQob%@tA!a7=^Dq-3}8O|6U%rR$bH+<&Z^C+kNmSmPF{XAao%c1hj9! zT7LOc@=+k6DUI+d&A7UfDg^Jm`Fh`s zmAE$@IPs<~6Cpj+ytX$)fEBvzZwuZsm56dt4@Q03RuCikK+$P3MkJM%jRNqLS7MT8 z9(AcU=XIH+$#wblxEN9ES$!lvZx@$b3J?Z&FbX`l=xf4vM;w}BqFF9un9iF6< zIQzuBMM>47sE`~qa0yH%rq3D2!CU+V=MW@Rot9RMk9;Y|YyL^U{^%=fe~@I8U|vFncs$=1tG`6|gL zl$~TUc74qC;)s%P5K`%%6h>K|IkJ)QI=*TTS65G*^>g~Q|6@NpE@gjpn9P}^s*}nw z_^mjP9wLj6552?=`$Yc7u0JLef@{YJ*RMY&B-`Jmd=ZQ~`{eQ~mj7w={CV{9YfMOd zaF*ZTxAgUkGcNGZ*3y162dUjZ+ZPfX{C7MxnPLZg^*`ALMHW%@$ss{I!PqT}C)f_#nYPW1^ylDnt}_0JV+9OPUR^97 zr0YXufjMKk(xCKjq)palN%sL0=c|Nw!T*f%wE159K$bS@Je!vYlzrCe(?-}Ytih*r zr;idh@==Cm=e@72hSBF#f2v*8)`36$9rl<=)14#9*?V{sh1ft^@!Z&AqW#7-7q#sJ z=s%c8jIX2 zpLK(8X0{wtZv9k5HpvmMF65I3@T{L~P?r>eK2R1RQ^$o{ zT=GzUn%HOym%c$j*Z`5N;JMGDk!?b~Q3mCM>p%HW&feamcKfamjcg`fSvpVR6XCW$ z7m7d=zCF~@!X-7Khy%Q-C6$5`GF9rHQ|+-U-pfzrH>A9}h>VnP>c}}{>LhHWJ#jET zJ>Gwaj6a0tz4qaQ{r0DS{_XbfzWYIY{^o9b<;7>gT~8@8UCAJI|*>gS(zYwP&+4NyMfFE$6?Yh zmKx8{CVTq~{n}UkNA=7K0XE|$brqfszUjj9PfpyVzLY2ZwS()M+kn@c@6pjo+uPr7 zTiZLxO%uG*wsy9mS^GellUdu`-EON>?6420IEPDp&3q_~_Oz%2?77rN=E`2myG}=! zJh}{r-=W^*Ed3ZuZPyqxYBS`~>l()j$)VgpVYy2e)n6zR+t*k_C`fT}%C*6A>GOac zn}!XMC3-RD59~M9dGTjVm_9(6b2!d-k(f9v8rRjN2{jEb7<~u5nR#_23WW5 z#8-6`*+|1_#b-=4u}9-C9jsJKb^Uri7lI;6VoO7 zNbKB9dv<}2fU_?;=R5zXR9?I9LiL&p&gg)ztk;~mDDT4dD*c78#kW3O91pxQn}dhz z6~;kX+;%)9oNHFKDWENl4FK3F@c0IR8b^Dt43(+nStot*e_8v_Iq}NZ1$1teYpT<> z!M$+mo#KjJ6CXFh-{Fx}_^pudQete&Wk8_Xr!!)C$9_o z*t*EV1%3D!a#RET8lot5;E9th9Zk-oJW27g;2y{CwE&eK=$ySYU+@9DSlH(%80k@vJwF*}@`YD; z6xOsV>oPmlHa6DV?(S~earc~0Ok@1A)6bnTHX01K9ljG-$*)YpvN#KuEiMCTTEvpC zRYGP-qf-*phOS(R%#k=dH)%<kQg+MET;O$G2&9Z|D15Pp5%dC z?xZnZnwISRIZJD@bR6<9>g0MDcnxK2h2cc`1#e2h)C5iGSp~|Av!Eb=l~-kD6dBde zDlbTKmfmr%tWEZ~>L8Mkd~jBY93^||P8#J?=YR^B&d{HY8|~iR=i0{BE}f+fxExa* z8fDyYiuMiqyd#HgiuY6b^^V4I}eA0id&1SQj2=YhK!l*M20;D630(gc)Yn=+? z)BozQDS*JUKaPNAtrrQNPhaqc;ku|Lm5rMg@iiGGsbHezPmi0zo(t?oPT>Xd)VLgd7$ zOAh8t_c{giAb83qB!8#VU)i0yuL->?Sf5xaRK%80*N<{S1ItC+7!zB3RE3XX% z@6q8=oMExJd1!NEBd=Fe_k~l>W;!M4po69v4ydm})^T;Ws-lVfp;sE!1Nn_^$ZM{R zvtpwf$)miYQq&^_^T&-2lab_G0iVC?eidRXw~$5@uv6+bpB zWL!U}I)nIA1-a~8;MQZWW z4Ih8hPluPz07Zr5joW@0gv*8k^AS>C-gdc-P$1G$_f;kRCV5Fi2GPaPRC%=vHW2Tu zkNOT5ccGCGc95x?BK@T<6CHR*g`QRyri}>Y#dU;tJv@2;NnugIynV^_RbWtS+ zN8T=l&GD@yv6($+zq7zkf0lC7*GmO5M3?jA5=Uf&ja{=m?+27J)K1by^d-bG=(mOG zb7VXM9I?S0yJ~+85^62@ocz;|&BCPP6Mo_tn@*LLJQx?>MI0RZCdLihpT$;yoy@u} z&oQAGe^UNAY`UNb9x1WT)C>94?&qy;X54f7HwzQBVra_H%S>(^$COhz2X2)XaP=s#LYRiSu` zR_}wmmn%(TNS(oN_&1*gq0_?TvjFm7{?5PXgi^Ch$hF~zFD9z85%C{C`G+Zt^%eKYvPNmTF!5*V+A#{)^4a4z&q zYxyhk2DOy$D?XhFO}mV6EVb|k2Rde+Ahl)6KW!qD#{_X|t}^Kh%>2Z~|MU8!8x$?C z;+tHqwiDCOd@~7S%i|N$*4iAH!{gKT?uQ>T_S2We_AnluVPCgqo9*7c=iANon{B!@ zYaje<&uO;y>gPY({^qUkw;#XvK5ooKJ3s4k#&<4wY1djFbaAtlWlX^*-68RnO?YDzRkJ`hB@3;5gd#Ano-UsdQ;IO^( z?p`}QnYY(pf33~7ciZv70pq{5=0ib_Q5T!ZLK$U33nk>%MNZ3g6NvR=q;(+Q_7~Ew zFXpRo(dRdH7|HUwwnA8$FG$Pb}1Yvq7KGKK*PyPv28=h8k(%+OfEF*k4ND zkok~*(d4-MRBVX@W7tu)F07vEufdnERCnHUyhWiN!mIia#)VDipx*ky_wmsW+8bYb zv)#FOhxkMIoJM!_L)B5Oif^MD{h=G!&fqDV<~UB_Gvz{`q^q#f0;vny)p6_HgO-?c z=G&AdN&4uQ7sxT~rSw!U`6?(A__0pZZ*HCU*oR&hZl8JfIq0T^W`k|{OXWM_%T8C? z_V!ltNhgt&Mm=B2*IUX914ZgMdxVDoaShH9C!HDFSZ~sm2j8kL>WN%8e4qp9@8!vH zd;guEw)fxvuswRT-~Qq+{<^*Wvm-fgpa0BD?U_61Eq25?NAcHN6U&~-kB78JHla&S zBQkUsncAiam8Nb;AJ>U3ID_;_^LSHG{7HGs1YgImoRy>ck6bd(*5z7aP#)+xu=-W0 z)$^*ato;~|j8CW!eKfPEGseZQBO-N)-O(1+xCj!&k9@5s%W{K@?JIhpc0{@48~#n& zBx%Cub1Ld7G^-QNA*sg-A$E&=&f&_hZyJyf*TAGhdt>}$GKIbMKBpJORJ*P7!e4NfuFaPQ^7lL;DJmoa|92iK&|EIJc`h1=i)~Z4s2@D% zm1X+qsq=w;hQWn@ed4t?*`SY`Vh3h!0N^}9ul&Zm3;9{3rr+s{@OeUCeO*B|j#*p~ zcLAHTPpW6p8@Rj@KY87Bhs}3kANumtKXjNLIMUEY5}=LyZ0yTM7=StI+xuP7TnyT^EYJmzlHXWy%aiTRwL0mN-qalOl7D$ro|WIt0(tE}WoSG6 z41=2joKyDNc^5Zp=&KoTckA3l-q--liu$^`^!Cc{z4_YLLHK+rMW`X5L@J-OS40d; z1=7M~)QmfwInz{52fCiC;RK{wabU*c)$&EmVq}(yG!Gj^n6pIb5LN{J?8!TE^lL*V zkkU|9{4%t;)}WFeTDS;muhXciGQ~S~SXIL#4ntaL+9h}Q$S=ZNTZw_Xb@Nucb?bH} z;QY4w6i09}^Y#8Z4;*V6Ks$ab#M&`Ghepmc`it%m@?@1bgzIO^Svd8IhkFNImpVRc>uvkyop$g3-L_7r>i4-)cWDnac#g*3u>sR@$ zZ}^!2>%Qt)qo%$&kWhzxvw(xIqvLv=yAHCtFRsJGIq^(+TG6F95BQs;>?cj3(UVTnsVWM!RY!IMJ`$fg!~ zLw1%?^p*q0WjQm#iFfF=I%u1gZ=gbj`9(-jL&Y&{s0@j_Izef#`sJq54ZZBhz--E} z2l2(?yK7oGit56)GnS-FcSAq9coJe?>V2lY(aTG2Rhj1><?+fb~UGz0XYtZ1^FC3it3jI8x2)*iNCZk=T zQwG?vSP$Z?DtVGtQmnolv7`L*1OxZ(>USI4b> zCtIU@2c^=bMG1YJA&q<;E9LkG1X_^$|A?bQR&RQp3MWos-E3mC`e(V3tp|1DrU<{u zwM9GfZ3?&U-ff@x+~?c+&Mxxy=}X%KTvMKPM;35~g2*JF9{`uO+4fhP4NUc)ZPNOy zbEn3{z$DK8-Y8{>&Zral7ckp08U*u!AN)nfi%-v%U+@YaO|<@!UPu0e)5DPe*0p?b z8Qss>_Dx+NaG$7i5T+g0b`@lYrML{bpg$4$ij0K}#k+ zs7n{gGGQZc$>oNeNnm_cb&*v5g#OPu{fIkCXOq+R+rRb8Z41Z$^6Z2%pzG8(zC>}j zH&^jfL0%{!c8qInl_LxTU|03*y+V|yeoLB`EpXw{IB$x-z?modkefQ-(#gU6~E3_0qgSN8<1?phcA2Q(Nl{v;@S9WbXcmmF|{hT^b?SV@d zuuApI9@!3rF)m^wFM4K56(EgXs*|3(Yy<3}UW{uF;eiS3qy333{!Su`mSUDLy2TX1>|PIO2L;6byY9Y}wH4^sR=J69sL-8zcpq)$H(D5z70P=K_sqv8m~4j#Q#DT$?pbdf}4elojh~3D7yfq-;&B|Mjq@!()|+|HXp7~yYG0`rY`t|S#g2Jmf29RG zme1gHzs<1+vEj$f9P(_gVjr`4iBF9+gcU%_rY+6M+7=k#M> zOj}pip&~k^jGS1vEW(gi@CE+CxPu2)G4Lq20QgaPG_IvjZiz6u>jW#zgK>npJ2 zjZnZd%JXNPWy48m7o)nPZ-gQvP(Qm@-<&O;MN_!FMz6E6O+L+Q{c=!FiRc=i+WF3t zc%TxDLzcuLFZDJPR`vtPKlM)CrLT)ER$tI1RnvUPIpYdx=3ZVI(NMnmtsDAqVAWmQ zm+?FR2S-Qk z-S;20{lmk+Vu|=M)^HQm+E#o1?u%{P&*#lo+Jkpb@)^c2e(8<2e7e;B_&@&pwz0n3 ze*B{!v=^R#vAzD;&$MrU>pN|IbEo~;pMRs>zIC_l>}<4e{l%ZPFMst7csOZi#|MxL zfz+#94ttb(N{qaXI`Hg$9hENvf8sFx(*$K^8BZSK_%z|k@;^nn(TQA;<%#kax<(MD z+o7iiYv_8p;u&Y-#$n)`%WL-;EqU(cMxKTz`7&@8bU~x{Px>!JU+3Y;K%447btI4W z>N*aNq<@<5Wc?XFZC=7hmHl@re;gkp196*-9;DfFlL=*R@t>&`W5)I8^&~mA)2CRq zw;9(vzepU0DGOHS$(#{mrpsP_*)RGS-z=anZ(kT2jg3PFN>TmI*bI9;fj8}zZ#wc7 z@6zcLsLIec7->saryGHFu}jfvPse!6d7FF>n=!X#lLUJ0zU3+_4+X1@weO%f$%2wx zV+3=+leiK`mcYP|{gad%wC(o5RTrtG#3kM8h1xS@Z`q!ev3564{&Vk!1sCB?4t%5M zem0f9_s)B5|IwrN!3PiXy7z|t(&|ZBV^eK1SoAm%Qc_qL7FpelJgEF~fuFuXHZ;~$y64o1Vmbvai zAmu18A2PG-lAr9CjJ(emgtSrVTfMG1LTN5P%%U&S2U(_kr4Qs+<7PJ-_!?=-ol@bz z*cYZx@2Cxyd+e*c$on^MeJ@YZf8mWc+O6C7^I3&F{oMVIJmrt?DGk!-E7h}k5nSqG z>I(ftP{NckZb(=D1m+`XYisW{E({)Z1@t%%u&}B zvdB!`I1W8LJb?CF$hj8yT_{uTv-L?^-*B$2w^Qi$4GHn1BaEseyGN z#y85e>xcfgXIUgM^(7Bx(Ez1Z@Q>oRB7aMZy;NduGGg2m+vXu0wCBOfxeK6{bDsHo z1!lXk3^(;SSL%ZrXSO#8aFI`(>Zw8g4VDkL*fSeS1=AcGIc}ZLkHLl9kmCh?k>fzJ zgcidob)o+^L9W@Lf$mIae%gA{j*gDg=4R`gZEd=)^$0(C01RK$&pFLc=A_LW_O`9f z?bP`h@^M~xW9LR&S$D$~aq8BnFG9`tDKn)`zH9zV%~|$)ad1_2 zSg;ih3^Gg*CHM(A0-qzYyPalRHH%M!O69B1&49q=1{l87je~36d$VH3T0#y9kNBjv={j)FDGvVz=x_e zXWJI_Z5rj)z46G|jRCZ4o3?g_ChZ1**S`)4&r7m&#&Aba-btJ(NvLx|#g$^JT;;e7 zMKwKw4iMLpoOmFSwn{uEn*z&!7woHJ;hv-g?&R~nj?|X)@r#c2^fAF+*CYgnfD{7j zrB_^3iN+9AjvmCN{PXl%@YUh{*4Adbb?YXB$DJ6eRT~X0Xa$2+6HRtJH~~&pDQu4H zrd9HE3iU+ExV92p5VK4xsw!*nk^t{R_H_<4Fo_B82`c_$aLKCOF`^nwcRmtn!;K=U zkDQHDBY`{95wICx@oZxWQ;|){ z6T*@fsKvyA-7g7}9^J@@qhG5pvSJv{Omrl8%SdN38ysOD!(DRpEFP~tDv#Io*#s^V zpv|(=n9Ob?G)Cjab(Arjn3Ht=2c36>gK9P5G@>X>q>Od;jdYm5ha?}%;tyI|O)3yU7-`qxPrZYGeCur3HgSeOizF@^OS;Yt_}s@p07~$wAg`?% zyCx~hZbjgV?6Sv(EQoq%cF|f;5794qAg&e|I$fk0SFqI~smt0o7xHzoed1qdIGcc| z1VqkRyHN7aWS&CnxI&L-luo)S26`^rY;BXca!A+L(PN5Kw|zRt4o%xw_7yoxQ|};` zDAV#C)92g&RDl!2Yv_e`*~9N#jYV_m&88UeYewu$U3|xP$rO|(AGWfPKtCCodMJ&O zi^bO2%h+76eS$*ya4nCf6@Yb9bu^C9h(5{36k06Tjy-rd)S{lX;w+tVy5ApNv5o2c zX%#m&H|T$6?WI>=Zm)mtGjuKJ9!PAL%2(LDUXi>yo;nreMix5o=!kWnvY_Fb-K>z5 zgla>?Tss6aoB2k%RU8F0keD`NoSfKh1VTRwxb!REc;rw1EHZ5{=~b@cN#iw+vn@OM z`r*LALaHq72WM>Q%xb)JD9kP#Zi6QO+LBN;%SB(eM)e;>_9GjwJ9sC?K+p`p#f@QP$JK^>Pd!mqFMtbS(@z z_%m5qBcDOu;>i({uH9dSU!?pV!XJ>50*@u*${Af4GD!r-N04kiMNYt;&TTJFoYLEr;0k3Uv zb6EL?wP=qpO^P^tgOlw+yO~KYuC+(nxnZv)AU27A*~o0+PFj=X;$_(;G@c04G8urJ zd^$~EuB28lA=5S@4G@s3;HP~(a{>%_%aqre!gC4IQ={xmy`R`t4$H*MJ?`!iXi6 zyfgOknzLNi1XbOKUZ1`SJ6!_FYnjTh$IEY*ne}K$>BK89ZRetnx@}P&HAz(cM$*X? z8kI$dH;Z7SocN=_aRq)83U1_+DAVjSQ=WtvytXY_ljLzi)yYL&vQf$d;DcOjBvq z7ghdoMBhc-=~Wt@9TO?wOw!5~~F5U3{aNee~&GyR6ujaF2E+V}B;~%#V zj^4}I`k8yrw1+3h*xt1^g}+x{|6F_Y!9jcP{rB6govpUB?bE)DnI~1Rc?w!y_%EYX z!`_wr>ZnjM?l(Udo|^DPcp99g{%yjiE!QCI6Yv_N>p`9S*q~et15Vi)p3K+rF7S_Y zx9yAcf$s@A7WkexFW^UbvfLgP?SSj+JcSA?&IhnPx>#eU?uZs{&|xPdsT-%-R+z37!+U@wNuU`mCbRI(*=At4rAKV z*ZCx=Sp9zg#r%l&IcLDcf!uu?(yC92Qr~_~;PSNR)y`vwq&GH28pOek4{GGK-re?S z%ksuWX!n)s=5uj)em;jjeW8N$?1o(XpuBp0*n7%#Va++Eb7*tSm$^-C6Rv#5YR*~t z8gxH{1}?`~r*6{Ircv(VVFsAaT$T0<7-f+8ROFSu$ZPvz&fZsl0T-vTw>>V0eWF@P8hK64h}dx|h|XQ~hxYm5ao%und~}!vRNnyb;KL8v@xfvH z@FDluSMfdEKW%^cSMRjx=4$(UU;E9>JvKLoBTp9{UBotob`KYDDAWgB!Ye|nxgYtj z1=s2;>a%sj@1U^E?+>3*eD&RXKM$eql+Eqz5`Wf?q2Jex3+LzS3?)MQCS_axM|=zN z<(i}9XMI;W(xCj$`U0+HiIVs6=1~uWd3`;;edJGm@>BAkwKv}U zQrpU%W;SU^Dj=pK>rcX)h|0>rherEKIJU-Fw8w7G6I*=7I$r}PC zP`;r&vR=)y5QHd`M->S%3YpJQTKQ@9>l|q$PH|*DLz$>d4XIemgkY zZ-<8`?JvIh-S&;Y{Aqg#)Q#D+z4rXA_O)O70{CmL%QshKo)Ej_d`LD*$Uj9X8_Un< zJVUR%5m%(uwe(S@XCnd^xyTf0;Hq|8aoHSfl=A8L4A!r@oHCHX&`Wuo=KK`p^~2b)KHnkxn;aZ2FKNIQFqQ zv?B1y|4P=+PN8K9pKiLj*(P~YEVNQz_09QO%diY(%PBacN8tCqd}DQm8=?~@g%BBDS_4_>l~-MIWI%SfG@8Vl9fnm$JPrxzVO@1#ur|_~t1&`i=FPj|$Tm6$@mYW}W>BTe$q!I7Dqp6|;K02X1)Dvm5 z;gQd?`cQVTBq$-;w(2vM-d_2gH(vQV?c{k4oCu@LR;1Fk(V5LSvxX`Tij_$VwF9bj z(O01h14DKTvtim8!(J!Ww*bHjnde2znOj+2d} z?E{4gmv;54v5vl-nu1b~P*Uqp zUc_aiugnSgUT|=7rp)LAM>bt}CU4-WPieWNtSpRLtqGLLP2jkbM6a?CVOnr`FE8>| z^&`&UlTR*$PhR96m9d^Yseix@1D^6i6IWz#m+0DbyKU{*g1OQ z#I4`@a+kES(7CAZ3^~cCqdRG{jevAze|F_|S>`pg zv?=+>>t3x8>ceb0EHR+ye%YeJXWJ7%{)91)bY`;;05J>XGVd7Fn9rxwgV_mBWT(@m z{8Od@W1P-jG(Z+Re%?Mj3PIFk^TQM`F%00@kB zHQ01a6e74PkMSVn6c7V(864ztL@En6me>L)Tis2$(&W(Fwmxg!WPw z9?(Kz4it=Y&BH~0>m_drAYzJM`!(JMpFb-EA&TNz0V+>TaV~#0PT}Kz+<{)B4f{D5r_U$%1(^(A){@KvTG5{pNZ1K zS$Y&Ak^wucd`^c+&{Ja1;EdRVY38@yaNjzhGc_|pz@}dHBk3Z`YgrP z9d=}X)=nNBw%KGIyS#(gSKE8O7Z>aFOKp#CaGT7W=#nCA3h~mU&B+8J z?X)(V!kcZDcqafy|1Fbez9J9eveTX1Y2U!RplH9E4OZxg6OR6+O~{p>JJUD9KXja+ zBwPScSgM?TtxtkPl^JAchxLi2Yw-pe+DP>m;I)kSZdY|E{chyT;<~=AL2{BxNLfgQ zv)|BlJY%Xpb2dYOYn}zjnv}|fJRoI9$tN9@D^121Z!RD!UrVs0nglyLE!^dI!ZS|W ziW812(nlKxf%(Me!ndzFjEtewJ}`lFL-hxir_Wv!)0I?qcJvR6(DP+XENoATrP8Kx z=>KQZ!h314Cqx8-ra#NM>Pb0;SL@pG7ugDqBeax#Pcb3Aw+9jfqox3t@;Tcl6$1J9 zAa9)7E>BSiLz;b^$ZP(&<^&`+QSbY zwr5|w-(LUBtKeB~-~Ou~v=1J=*M9f6zSfTRAGG_=Zc>lb$@vL%z`uV(URKlyk)H{r zKL+I)V>jTu?rB%y$@?clHa7{=kI}dnapV3eVf63gNFKvDe_RmeiSiSky#Kf$&xQe8 z`ufqhuEpije*?br85tVJy?oW>l`MK(_*H8>kAo0@P8g5tW%ZD}{RsWn@`fG~M*g&4 z?;p$5&*u3g`3<~J34Jp6>gYH|`3sy+3s07B9>bIOBfwwmT7_(4%`GiVyWw@_yeh7` zlQYhY3y_s|@={-dc6)ksi1^R2fwbod{f#zD&d!cFTu5-C0b3#tXmjCp z&6N%8jBf~VV~~sfZgljm1@_~@ot?$<;Y&@S6P)nl1_5oU4?rEC9LCmJzTb5pZ*6n& z%lPwCAAqs{hh}s){iFX9v?0(4T-nr;pYisg_1PP4(Xr~wybfFG^E_KO)g^dSL0T*$ zi+YTEt1mzT&dK_q7JQx@oU{O)Kw`i7$w@mnIBZ9U9!L2sfNwr z-rmFZ@~h9cmtTGrbpp3zP0GnbHq=iR>Y*|H6zXChpl_%xxBrk9VQaC$e(1c*kny;_ z7(8b)0d!WMCs8)><@nU)Aon|`p?^5u(tNx&Sv}Sn=dhhVqBHB=4 z?xuL_g_HCOX|cd7lg#fkPSaL|Uh4x|WF@*rY{uU?iudquTMf_3r{-n!VbZphx%~JB zF&BZ5mg5eh2;hu7!KbeU{MlSui0}SX$ypipd+4$pZAfGckC{IWz4%P$`j2DW1ubr5==X>PH>M78Do)Naoz@lh=k_fvcUGP&zCgY#3M4 zADb3D)eX-4H9Sh#A`2Hzq9F-K2;fI z_j;Q#j`GZ!abNON4*%6wAyydAtmuDBn?LKn%h0!euE592#Lqm?=S>_JgLT!htn-@M zIyd9cm##7Dmb?lit*(?SudaiA;QKjy$Ow+8o=kWfyGeG9+jN)En(BM|?AM+4h_9 zp=A0AfWQCcm%k3z&zmpALr}-n>Gd8X*6&k5c(DKht824cQ95O#az=Efdts#l=YTRSk`9cS2S%7c0fcN$yIU@|Fp5kgRMR~?pyIUVx?Pst}|VLW$vnqQb) zcXXzw)3rgzfGZQ5nH8Z?;{;iyMx`&(n1-fh@}7T|A*^c0jgbkt^dIrL21PpP=sV?d z0*?LP(7;Swv!}_ z`YDXpw)b4iuliK?F>(YugkggxGotw5LdCo*(#(t72{>w(QnVai&2DIY%FpfjU>L`OP*b)mkL zo|Mdt*E$Tj)P3wtuCyP-$|v<}7WE)$qVwS12q7Ii*IR_E)k+&Dn zulU5in}mGRhK}-#Hn@c|{px2w*IxU~XKR0EGtyW3W1Hb7d?i2hS;QH1 zRDdSh*sx)4z6u4Dt-Y7F5Dz-@$Wtb45VL)3}Amq4BK%4q&1e;I_STcJa)CB2p&5*YYjy zDi@$i<4+P*sq``9h7kWcj7c}=h*ba&gB5MM@I5{ZD+hV+aS_J2e&SG`8PHu1rV(uP zy>6oi3p`JT2)n~_gigcaqC)g<;PjGn{)*oO>nGv*`SIgQC*jdH1fIO|(P-m4`UU=u zGyjub@-b^(;mCzP_r-}dveMSKZ{y!=wu^&#d;h)n+JxV1eLdgPwEw!qA3END)Yj(v zZF6g@ed#M-ZGZUx`X}xCKRax9?{2m)fALMqa)7M|@8-8DeWH^(ZszhSm#Q~^w(;qP z8#Nt+aA@CG>RY6v@6U!>38&r4MJ9{Tjr6~;;+vgR75jxTP#lBC>`YhZ%o#fjAbqK* z^ntWpM<@7*K82J1X*HV?l$CbVa@A{XSjI+mm!(f7ia6( zr@MUmNpZx*Oc^BFpT?H-dvrrwxhKBrsC1<2l{9@t+fCF5S~#VA(%f_+ma2c^Aw0BL zhWG&v|I~ABvR02&A?waw7kKawev2<|boe;w&yGpjVD);?A3CL*eBc~;m2=u3inE}F zV0`EV#<$1ScuC#@6uRVl=ymBK)&-jNvn4B&L(n9geYChsj#p(E7UiHfwuRM|S#%_0 zs?cp4DY`9N8{*u-@%eng%Q^U|`gQ^xcA2M?e>BGy(Kqg6H{bDTReUaOnfNBOjg7VS zc6aAid*#^|+Q!nX?Z5keyEr;;I~zCKGxzTU2fpKzwtM$>``!<}-+uhgJ9*{oAqDK* zyxAC3w}17o|G0hm%U@}~|GU4_o_*mS)k$bDi!A1hA?dO}ioAsE6>)wX`eD zOuD&Sm|LT+g0RYmla1TF9d9L(P#`(g|B~+_2=iEY^8PA_`Z~@+%D-HX^V1LY>*vaU zvb;F&oKx6Gh0==xGIV@{=by{h@w;#o*>D7=$HyNrjQcuWT|9a2@CzB|?ph#X;d6de zc!J-bLs0km{}kNSQlIt{>gm6#?}8fm9=m^xv5z>Ogpa<~QHvv9_Iuj4`aB@~ryW#Z z)onBvJ5c?1<|%=_^dA580y{yYkIi1zZk+66YmU+vUuFY<3-$Fx{^k6nt(=}fI#v)g z%geq3+BZ`Az{nBpcAx5>)4o^R6u-zPTgOwc_92ep-LRL%NQiM$lYM#mU;B0_)qc6@ zGM`-GH-R@&6R)kdoyIM2WAurBHvSa2-V^k5*opAbaVt9w17u@R=Ve_YyLr}7xuZ%;pI)O`ed9#A9a;^8Ord%9UmIlMCLm;w%h9ZtTuoyt+j9b z*`Ksu`_-?s8!x=j<_G(2vaybgsCVkv9^A2|#A>w-Zian4IR$LJdsG#iJxZA+O@W^GKMDDKsdqJ{(9)i>);(7wcI zF|2o`N@lULNgHYrMNIH4=$O_f5<>T!iU+8X?M;aJLheF{xN>~pzZJN zw}1R!{ZV_DCcH_mXSdheb9cAfSHAFC#v9ISPbT^yjvHKDGDxi_`E=|jCx-c1UyPJ5f^E%*HswLsyv+>& z?XbL9KQyn{QRn)l)i~F;+<cuZb(X*rvTv~O+_aPy0sCZs`~UtixyeOdN+`|$Az zeS`2b3Q#YRm-^=Q`qpOFk1j($4yu(nI;EGRk3{SeGO# z_rw}qkkmO>W;IWx=04RYG2P!tJ*D#0*$!)&)?on3?zlN28bSS7SLRE-_H-A!wG9@K z@eT2&3z%_a=3Fhzz5?9hksUSD;`#=&($UfnPNZrB$_`Z9R+BBS=&wL~k{0QLMqjmG zVscD?KRuyOoMHo3skc?)mYj1p&APQ8n70eA^O}C(Gye;I&-NketQ*^Xt-Ko#wB30# z0O!a#v{)czM7Hp18b=p6XY-jRZv6>y$-QkvS^DN3=r=7g6o2K_R*Ht4bw*&3t@p|| zGDfDc<;uczB>@J|AQ|GdPf3owtxxz>-i+ZGr;wVTaN`g@^?Y9SW}NsbMR4j|)atDI zq73kbs5sX}?bKkCJVno$4^2oPk!LVM6mjAbuklLFF_Qj_e2|y>luP;YpwdJ%7M}7( zC_OSx9kOj0A%yqRZFJ`4mzLgM`MocF_Um+jew;FjXJ&T<{qUMhU-usjP?Zv3Fa>qqyol>S&><^>PjrUoyykcM!RuyC$IjWOlIj+ z>XjuJHXEwDpr%V%n8~Xnkykbl#DKXu0N!Z8m|q$k@Wd-2l_pQ!Q(V2WP$TYSm#^os zL(thWt~(Ez%mCOUe5$Y-M`eL?0)(uk&V;cP8^0A#jh~xGxpQ5wX&B|4(k<#@@YMbR@@jX%vH%$I-(C{ zJn~9h^vJW#CEpOS?qGK*-vUv8ypP%gYhB7aaFkIKW%8|aropaXloa?53as@?47yT! zBw^~8GIgh$I(I_4a)ci;31NK6y<|^CLrcdW{m>Xvx}7s=HmdAM6?%CZz0d)$48#=u za-t|kGKLkJS1)m1X6=O+ztDDX-$Mo~?d0&F{iA>Ue`%k6?XzwB{(a=5F>vB>waswc zmA8{AH9^+vJTi5mNtqO%*1Nkq9mp(Gr`i39KAg}gJHT-wdF#d&^?lq99v-$w2M0KU zn{DRz+mWY>^*XAD`v+O%mrpk+tWRfc-JN};x$|(&U`{#Mj!}O~mp85{@BH!zr|w~1 zQIuEv^l#R>Gg5!;VlZ&G^cQD4agTdI2v}-Gpdc&~BfchG` zbiTgU#tG;eh^mWOjD|ors?-}RC|IFUM(769Meuk*yNmuJN829l($`el0ZXSkt!yF- zXs8ZS955c4Lm!4+)k!-2dZY{I#&)HR zF>clNDp2I-VVXHceJB@?lACg@1K#-O_lw-|Xwc5s+1KW$lIE(C_Y4Q|_%hx7zRggTIfu(pKjOs0&V>V^#Y;X{D|$ zsdm~sK%y)fs+)Xl3fe8}zH32<&B7tKHSy37`+V>7gMp;02TB5c4mSl$3cR2j>H~eD zJJjKVRMXX&4i`N`rnI|El_}DQi9(;v<;NL zY1g%p;J2t$g6&9Wymm~xdsKN9#Yg~wYisP=w9^hI$p}64v&4^qgng!6y!}(2zR+GM z3v~|ti}Q|gvCIrQ$#!NROPRGnO4p871U{dva0j`xW!J1SvCrdh@Z;tzd5Cg=SDTRq zDtNOUT90|d1N3E4%*|rTiV|jP>+KM`zI$tz{B8m}X*agFDC=R{y>Z$8;eYx&?fmfV zc7D7EEyt+0-`zrgq@Sx{KlD|?II+@MyKnt*j^Zk(kxn>Iqvt|J2UoYN6@-;fujR{p zRex89_ty?!GRY$i>IG$HbCO5sA|#GGZU^05<%rC#hvXGd8zn^fS=L>Z*YkIn=qP$p zCq`Q%gvHIgMi&=)^N1EG8^ z%U#9x+r!60chHDRSKet4)|2h3+PeBUOxVy>TdQrfWa-&p-E!xxG^Cy!u9Y0xWqlL1 z;g6P&WnS+)9%W*PU6iISw{fJdg|ps03uBsR^TLnzjQHr5{l2Kh8yN`G%Xlo9Mo1eG z2lv)HbQgXP5e<5Rl%wNHJK`LAET4PIx2+hTw?I&*)}OXR+NzF)NBSLA>7obr>C6Ed zsrbJ~kM`-eZnX7X+8e&}(SwKW;KN6>&y}{jv(;wXn{5j4qxaqejdlZnYGc}7{qKtM>1{{oS@TIc=YL{l)gui_c@Ht@HD?u{py|&G9eKsvY2 zsEwjdsH|sFC2!WEE*yk9#!`Rc|9~@k55MBa#KdO#L`OX}tv`ZPv}N%v$b(3gC>dh= z)HmgriMwOBHv3?N#w6F2@jR-~Cahv;e8fns&b_PlM9oYou;v)3aBB;8- zoxT`_2xmVM7APZQL;6@J;-oW^4#+INtZh0BBTs0_7@Aur!_yzxPC3jHQ>31AVN- zI+UM2f_ZJ{(%ESzzhZLCAaS+_*>Q?JY8=@bquV%p%q0BQj!DF85r%-lqim&QYIqOrZhgrY_pYsWb z10NN+q>7f>G40UTC;Q6V3*`a>z9rxsU0-6nb^;AYC#UUbe$pNseb656eVF=N!e==@ zKaQ_|YwKov@!tLR;{6xf`To4U{k@;I9qjGy&ZOO9EbBL!9kagh@-yxIcRy(V?YIA& zvBYjWJUwZ@`YXS}@k{ML`%nL;?aNi?b4mG7DdLx&=FbwNh^VgILY+Q$(hP1{>^-nQqtj*>LtTAcW=A8tko;5gFn z8P|`|-!6^;ID&Ytg_KiyJr%fd`oii|rxDqd&KN9boGs5xoRLAUJ%?v*<-7OqS7lw5 z8KsCV@a#c3(dm(ov>YRS9Arruz@{;qN4(HsdTge&jkCTB*CXEi{tJJWOIapQB-{kbv0qEH^VR8$;hOD-c7s1~ zNt;`CUdG?r27Ui}NIad>Cm%cn*8ygM{#xH<^Db?DwVfS1w@7|`smba({j*Py(my+I z6Wi-6i?NmVQ`j0mA3zTX?asB;yiewHwYP}n{OG7)pher|Ta@%E85P3BTZX(hD;nku26m+0<(XD50WX5Oz4HO)sEF> zGWr6qq0fFVoA1-!tOp?T#HHEb$+oAJOuMLlx5hD)k4(IDh){NzGxjr2tkZYY>+8#2 zpToQM-p@$+WVG_z*xGK>*{tnu-)OV-S-$x2!FxYzfBMaL+UGuVw|)6ne+AWmE@-(p z-p4;Y!p74-;giXnZ|>DjNQb^ny`~D4{+?ar0QS+u$ zB|4^$8h-~~vMB;zMj4h|?0=Cr5!Qu%iDRzxP1uNhvmBkPg+k?2i*LSZ#y1dTE>az} zU08R}?HenVO&&EOc^t<)W~W_}epX8fBCYdKA}sj+~uK5Z5YC>@vQB-@OSm`DnI8KY!9>PHgVbk5}m(%|4U5Onu1@bt~K)dvH2Gpp1*Q243G9;v(dS zA3kXR<^SUk+FKu@%M^X@#>Imzx1L_=RGwH4`_;7$5BJx=!>LznZ)Q$Dw_{p(uzWFaju_?YGWt)fdtDg2Otf@e~6;@@M>P9C_r)!*v_SgO=r+eBMa+?F{u8p{?RdQ zOkUJOUnhTjbkI%?{hZP&Z4jGF8_K*t<@h<0$bt4db9@ReU5VAnZd+U5Kz58>p>Jbz zGx{QL8FQ#+kuCo`>#wJcsH7&Epit8?Xs4W0m&hrw*1zecd)1~1i*sGeNBZAHD+|hC zOyb&*4{GFr7UHU%nx|x?p6UmxThc(qaDt{Fk|GZ7N1vc=NPc@S_2HB>Y0(cr21;L+ zG9Kt{MtRh?+(k)#aQGy@x|;E#SiO`FY;OvnENKvzq%5zZW8_(J;eYKEy!y-g(0{=B zh_j!Cag2@3T-yk&@^tLW5qkljKJ_1Y3nwG!4>Wt3c_;02>cx2hZLpsdrLz*J?P*aj zmZdIRkHllUGhT}=hd1jUp{2dhCTdJ&-=kc$ImDB@{A1-zkXh;{?b9D}P!!JVW_)U1 zl}W}^`@zQ0R0&nK2#pT$IvCWxnR648eS!Ja& zzR(Xf{G7Tn>6ya$|IZy)A`h5}7;VGfcrV&qpxg|s~ zl|MgyB|{co66rDWak7F&=6;wj127vR_XTZIF`LTpC%9+m&SHjD0D7V#5EUqbN-oai zbFRdW!eDVahMSvP?bhvEZEI^QhSNzkcY)^B?c8TV%SkU_ZJ;Fv=mJ?B7m;=PixFRfy_U%NrcSNr zJlTuWWqlYN#JGdgU35CGnFzwT8SloAt=*lrN~eB!aM%tQ^jIG=^jSky9S+1ozncg& zhT_g{KAj!Xb;fnDbu8?pMm;-GWLa_Qj!)YDJ_9`}c8ftr3=W4AuMX-W6L3n8pOnag zqWWq@l6ETZ@`HX^zU@l9(I3+7RMq3fE0{Ra=Ac{SZ5`BWx@+RhL*&^}c^!{WAlaGr zPP%lCK)LEjoQ8_X|Fo~rd37Z8c2H_%zQW}u0PScd)3j^JBc7sJ`vrA{-86~ns9?ja zfs3usAt0Vy#3^TaHO6)ipjW{kY(4J!wRd$t;L7Rqe8p$-_xp}&5#rn!Y%fW)GrT%R z+2bs=hEYa%fM+Lm)vdg`oqATU+*xX!Z*6asKYv=UmrWaM|atu9FVF zFba*_dkmd-uxdWWCc%-h6K8pmz9FgfGaRT`WaZPI4l**BzzMU?mGKr%vJU#|pLwl) z=JQ`@dk1@{ADKcF^oI{*Lls6xxUinXn|i42v4Kdqh4mnz#7n&7CmE}V{&10e0bJkq9!PCHXK zMVTPq{O|Oa45XI+9{p1;nT)~lPXA5t8;WAJoa_$eoL53chMdKxK7=PAEtfLW-=-Mq zr3-up2;@ArYQ z_E9>dl8Y|L;eS-zRZy;_6Y8b>6nsZe^G9=%~Tjyj?@P6c9 zL@q+vpex&$u;52L$UK`&dc;%0$REAra`YdDoWbciZ7sH3ec(Vrj7fU~EPhy&gCpvo zPSF?IzZ2)eL%q_|tc6D#X}w3C2+A4FG%Gy9>%IP1hRjRv3a8lny^jk2Gt{ueN9 zhWLg2o;(-y;ymP+!KY=|Mz4o{Cs>p{7yWGD?ByaFtEq4d5mH_LTv*f_f*bG#H{g(| z>Z6bj$bwSFT;*nPsm=fuK=TtzR1GGLSB|zvlgvxqN++QfbWNv?0jhn@r;zZExAEib zJsRzH|AR-g-+A6rv%R^ED3; ze&g@8TX*lYg5xwS9e`eTM#p?S4Ahh;Jx!@e%Z$!4K?A*y`i^vWvdMG*0bI&;(w{c3Cg_e zmZBL0XMIg!oml5WM6si(D)qVRd$gbUf-So({XWHSn{nrSx;`MsSS;Vf#g_;Tra_nd zDW5T39Q0?Q7+R!H8!IUp14C^3Ozy*%ZQDHHlLvF_`(|Q8)~usWbEsB1fEs;^Fk%%? z6en_4UY65!R5kkRI!j-_L)lpG6`?+yn-wO?`=VQmS zJ!~@hO!f*K#kagrU)nNYlz-(T-Z<`!t2a$>ub&c_vR&D{Ek+QVzlz~r8AzuxwxMw! zx(j{Slony^uR)r4+pBfE(4US^(9}!vi)-*Sf@MpqI#jq)XBx+tDKQYpQnICfGIpXo z_}5R2|3td=XmA2g`{}r&bW^+@mT#SLDLmyyx0B-d78sawddNvX@k^>O%32hWMY0cl zs5r-ZDTnJCr**ks<25)4qyA(k_`T@COBZF#FP`W!r4@*LSGmgBw*i_UjQXig*Mz@* zS<#`a(&osm9g}+X?&$P_airr;`Ev7&i|m(-VZoho@NzqLP62pd`TPC{@3lGhRBk5t zJ$4~mYn$!f%{%S6+xOer=~8?DXYaQ8-n`x1*=f7$8*PR^?5FXL&raHOW3%0U?tc60 zzy5yPcfslMq$%4}*>32!3Dt4-|Pp9>r)g&gaK%1zWd*ubH(JzmK0e8#1NO^+6}vwTrGCKK*}r=7Iuc z;itrD08bWc5BKqjNYZ9ZH`>bPEnt?D_Yyl}KjND%&6A08{3GX^v@ec%W!L#H7uv$Q zHUy(`Irml5N14;SGQ8qrEk_Nr{9&6L}_)F*bLA9B&Y-!#s z2d~bbP23!$eYCGa-}6~YWMQ8r?}{p2fpoy9!E;@6rj4G#L&mBg9sW`J=w5$BAv1-` zVUP1}*{*tb2BVX}{y^=S4Xa0=q+;b?P$PySw zOZE953L^xUvB)fCK(~FQ^Fq!Cs5iBFP&&Y4L*UvzZg1}({QT(0KMDL7-~3`*-`Ht; z4;~5v&lYGoa?7oH53$l~-FwDkNqP1w@-pm^Iyd^x&1=lRsgry(%xG zrF-Nz!SYI9yv`f~Wh)Bll3%(+b4M-){f^4ZfAT5ioROn??m^%*u82+d8N@Am;(&t9y^mn=f<|A~(`9 z)+C=BT=XEUtNg&DPvJZ{myv(sph0=$lOOm-LpIJUs{6ue*Nm53`LQhhn{1duM_nh9 zFQ3lWwn$Oz0eSL9O@yHx@YC4RI491zN*^*jWc=%!O0ApN?C1n_HkskGt+z7^UE9Dm zW1oO?BbS>EVpp^ez0OJu>c2YYwZS@4?E0<5QwOHS_9Trs^};r22zb?5P*j5G>E7)Zh-dE^jo15b4zgK)Svuh{R$r`d#jO5LN zKSIcyl``N+f8_$Xe85jAOWBxmg6qZrkC#dE1a<}oO6Ua^}ZMAAD6jbUVeM!ciw#c>tuL73K)h0AHYm3sNLkS zVaa?F%)Yp%}ReE6L!m~R= zD7XYmv_gwOte{e4NiM?xxJsk*DmF9F2yFBg72}9^>g&;c_5ZEgyL3vkHl3|upl0>m zON^|8{TwvRG!Q9rVMRV|taL;^m7rs5x#7NUI|COSyms+8 zuZ6+j&*`L&B1dE-kIIJo>L%qye(fBQ+ocP2Jq@68 zhQT1;I@`Ve#TkPnZXqlu@yd~V`Ap6vCe09OHz@KekbL&QX=Ect;ug&(>cfZm87u5`mg_9n{M2QG4u6lI08*B(nX1Vn1%~55yiSA$?c{{PCH1@J=MA7qBQO2Ftw3GK!Y^kl#`bCX z@*20FL7FtEAL$@D+YD+#(`$8IUV2-h{L)iu0lGD!Wp}EM%E^__D#W^zR*%R47(28I z;;4MKxNnglxP@ zeUV0)>blO$^Do?QfBNtLghA_id-OZL`JM>MyndVEs z{MEKH*=PsHC(TdLTyzQNHqzt7na9|UC;AazKtw|MuiC1H!}Dl|m1YDGIJ;njcHN_n3wZS-_uPdKXtE#C zIkv1b?BbxW7l_TY?RrO@s4J^Z+&BoYx=;+Zb1TYoIhzF^EXcOXCj^h}x8xl@$roHH zgfd9;r;V#cj!v_%{8A>Q<%!f~20bWA9J-XCgI9K^`o;1ca*y5`=jN4cc*4QAOdZFm z8xkD^LZE%b3}0rQLG$kJCTz{=>WUJKH*(4o@%t3&u}K^S#IZ$Awgec+PZBqDqIl{&a4f8LTL z;uNbc`ecD=!q&a`a`5X^rktAC=hA9|e|=&T;?ILGIwW-TjruTC{=k#+uI>$K;|^TC z9dhvO>B;&FYykVYUoQ^xn}@pVeNQTCgsQa4_E-?cH1YR*9I0RH!P{g~79NSn{eWG- z8zh5!pLj`H#}hC@HdOWC;(b1mViYzz;~Mg5N8L1Za)_^jOgA>Rl74=C-roPoJ9*{* z#_F`qR%al&Y^V5JJ{@=N+cvcCb}HcUlS=pQKimH3pZ-z%i*Nj}{pbJN|9N}mmDkz_ z@4bglciJ#(@t6F5;?C|49AO2iyEVooJ{jN>@xGDFjRpBGqx}~20H|VKX`hHHlsat*?%Efv0$8^=yK2?~g zATkhl#w6+coxB5{kcohW>`T_#JGB!sl6UmQmN)Q7pJOlka-Y-_ul}j_FX?2r9PO;O z*l|l$q;(H}PJVia%<{@%@Tc7=1Fp>rwTT;>awI728y!*3X-8xct8F!~MkGLAWLGfy zx~BMQ;A}*L?vlCu1)6KyJ~FWXz7|3@<;zFWQ1aB@*9HFA6@Su^0=PIAcI3PhH>A+_52?j%s+69 zYmbgEw*#-|PUip{WiHC|o`hb9PY)eI$6YP@$>Zlkz>_p!dijg{>&M510rRx{z5Wue zopV|Dqdiy9o*4T7pyirBpWgLp_UKgC5!*gaPBs8Au)0AGO2qqx+7_M48%Dseq>RgZ3(t!dc$~8104qqy6M! z0w1V|9GS#!5-*(bvCF9z+o&+YS{DH@Hc!{5kAybuQWjb$(^UN(OAxPqh8sp_>l=&# zxAL_A`o?;EG&cnJ5WyzLv-j_|3H{NJfBbekL2lpt#y8tP{LlVpt!-|%i$mmy9{BcK z^&-Ba3%=(r^wSFh6 zU;fi}0PLpoI@Iy=_inZ~zwk=Cb@OH(_)%}CjB)Y+9KKn;{jbb9vhkf@e9BkOgz~2h z{I|JTKSn4jXe;e3dzHE(u6QR-*`(bI6A6j{zT~6E5qbE=6k*V-*n^Z^?c4|Pa-l2{ z{J0mtbChT3YJDD8`B_$tcLGM)`k?BlG~1SafXX$R&<2j$1aO{Bl=3XCwuf538RZra z#-E&M>q<6`3G-QJ=ytqm*+SRH~r&QD#1Fi{e!D4Yik(uAgtK3tF@&DlDOT%|v-#k+A7Ujd>`D?h1Q659!~tF;K@lvBw!LE1tX_$1sz+d^E{A%Kq> zP_Oa9ZHJCUp+uZgX8noJG{^8Y|LoYc=4|740+(hKS*TOCH`g$+Tg|aKdf<2bv-m%U zCpXWLzUpUm=}*^uL)S63$^O}E%PYPtBXR>HH<6Vzt55P(+He)*&8W1Gx&W5Z%mw{J zSYpP+e^9f8ng-_npo8{KMb!NHhtRffI^hm~Z`c6kK0 zy}k1Hzf>Clj7p*faF!7j%fB=Q@pwWR=y)EYnlh zFzxHb_@8(eauAw8iYM#~=kCfFA0W%{adbt`WKlv?J&ip15|#g{`?~D++&q474q$X{ zLQBOPEUVy+t9-e~_*9(d>o6nKed4a^a>aSo%TYFoTW%XWciOF;XWRF_^H=R#-}$#~ ze(|7fqGO-`;;*&!-8+njsQM`{;+@$C|)3>1YE&v-r8*`Zfbt_E4(&&;h@(MFT;qju#J=i-V_3%A+%W-*XI{eD!afA?nO0nNN=o+7G_>{r2LE zuQE_aN6~NdxsWY<2EuLzpx)I%ZG(eV^}qqUW#;K=?K1S{Gc9XXc3$fzO)lh6=UKck zAM^zu$FTABMuBdJMVGNwXi|Hx}%DI@=w}YowcBoPLLIjlA5ZZ8%_o zQ zIt|f7`kqW+LvPwgufOy$9nRqOxc77|rB2qdb%o%`OQh1?LTZEBdV^%fCcoWUR~{Nbw1tiX-Hmp#bf|sKT~i<7v5g(f=}~m=Sz7*JN{1Z*0N`6!coN&mTE90z|wUx$dy#4+lx(^i1} z`f0%|<|{^lm8KpC&Lk&Yee}){*xuRgB&JEt#jB2{J3-@qn(@r!S? zKm33HQTygk-fJ)2*=fK0g)g*+`+IGJF^F$?aiMqvyYG0!r}pjd+%#!-;bit2<*(46 zXa|<)H!@D4-_a=8hskUW^6lTMKQdl-O?}LUIOwp?;}kn)p6I6bRT;^zeiYY3TvPko z;E$vtQ=d@rTeJqBo)c&ETG>+YZ|J2SKpq`3Z#5UML+WSFU!keT}VSpOufjHeNKHYhW;Zu0d4RTe4TeN z2A(1p@{(u6u=xh*O#6obap_A8nWkS7jDL|;&tuRhGKBWvE!;J!-2>niYdsabVbEf> z;N*yWx`0!r)g|#kAicH0LZsTNv@P+NYyeqGkY|2Ti+05=x02U`V8@jih_dg%ad5*mlRJ(Rv)on{MnfA zJ-)HI^VT7LUPlk*M_KpyY#=h8BQ+7*TyzK~sA}+7W_%~wRp63~d|dcM>G8Z?=|4ZgevsM4@_E`2w1h^#6YHnmF52<& zS$lYJ(Dt!+bH;;uz>wLd%MZ5Ib8?eN_P?GpQUXZvQ`T;IZqVhd-! zVjP>WF>QOtM{R9unm4HY$8UVA?PGIadFjP$SiiBe*&MQVhHy%XxzCmQ zV02%s_}hnHEPtT+SEZNUUJ!f6zbD`+3N;x81993VzH{7;XF3rZ@*9 zyxl@n+B5!2V~nr@Y+lnl9}7#`B5V)n;sZMAf3nYUh>bYKpY@5udQ+;{oqs$zK8&h6 z);)v1wb>TF%6gtOKWE(H8wS!3@DtwIaH-YSNUHWYww9Z0l7mqF=DhLemIN%)!gz*&Srs@ym!E7Fv$`ke+T9>BO!-iE4;Vl>Ntxkf~dXTTwx_OXG>8x@QTG{0-jAx!XV^OB3} z-d3P2?FKT%Z#^W<@c?{et;{&tkOlv`3MjG;{j zwRloi)=5Ywzhisjv+3M4?=6>~MVjC8%DxAC>Xpj{&Xfc2=!0c2c$}lq-UC>8gdwj! z1#p20{NU4j%Prc{XCS+j8^pbA@kx)FrP-5t{E@ETEN*~`>8_xB0p26OnrzT?IWC*-3YIw*VtY~;>1~bB)DNa201Xb0f4)_0U&{sm#QRM zXMHb17FKnb($qi{>{ktFpfD~lXmKe#XYaYGCttY-B>4*k2N#RNj3i@jJ&V{f#e$p++E9E7SU&o&sxUD9Yn@b#M0yzc+jty>%}{^KMOH{B@X>%{vq zhRKe{1*|ksjFcUDCe~;yW#oHEV-aq|NhfhS0RAwRHP>{Sb{R#YQdGtm3|o;gR;btV z1wI`bldEfz=`tpPF|UfswXa#P#ebE_opomzb`-?x}_7;$w(olEfq@_%3X`4?1D9xM&_I^qq)8F!xg5cT z{@O|)zsu0 zL;X5tFqmE5(4*lsAbzeLSlY314}PyTmeye=tgE7eD+Zsu+MtL>2P$U=q{8C3`c&|x ze3*@YDae1_h^I{!y=t^|spF(fDpY+ZYwBM;sCyTDl$m84jE^lqewJUh$eQf>k$mJg zI%$T&7rMn2oincmc6-ucMksqx^p#!?+{5mQ&wHIsWnf(K+1VkZ>71p{w3J_Eki!3> zucF3v!5YTdxY53BcTUzu1llSJ%Pa0P2!|5>r3~z*vR_%k0dH&Vy?5S6w=Ub=d(UFC z+_8E>9ZaxAE+9a{g4qSR&sEsl%s{3nlt z2M5JE+>Q4doLYoYyzEsN0ox_x`i(w}1Ed8{%k3ZW2<4lo5x@ zy5pCpOOdB0P3PAQ0t!D*gd%%kMn8Bxq^@%Gdvv1LY8~3ouIo{l=t}7iH62Jp>XK0U zQy7TlPysyGbzV_L5X3CP)|r0Cj;`mu=uf(vW_hZ(Db{DT{jviUOFVVrSl=DezJ3;o zkk7ioPGO%+lVa_SWej@D8FZjyz1c=adGJA*>DMhH3*9}xSKux?q3x+QsD3QkBKAO5 zw3Trrv8nn(D}IX_IQhPCgI%1skknQjOi3&9iXOPQXE_uj?%4DB9!Q;}kOuMkmKSZm zFz4;|ja_80-!?W{`<>tZTDyH`o3`^1aqZF1(RV?Y@=N~(KM9<2+e6h~@dj27;rfw2 zcBJ>y^?Sn5sk$k7m>#q`q$BO;LWfD9EM-nVQt;ke{sQ1ycrp(7USE~Z`+j_6{t8!R z8>GcnQ~jJYjJhfO{P%JZDn2@syy6q#05XI3>Xmfa->9brlXj3y=odOxy07n7{$Mt= zC66e$HZO<%Pwcn}=xj=pu%Wl`U&+-T@-b0-lX_YNeAdsRiiq>OZ$`v#3WB;1d&t3Z zbeNv3lYZnCeU?=jI*f7So=|8jT)+OvaJ?S6R{wl2ZFubETN*TlB>TC#=m)fChK^8eH$?#8SWpwgTMA*zirKS+bZ;2=r`x} z|M>c1rd&54<#l!FlXm9T&AaW<2M^jm``7;lAM3dN@-Kg-J^So4ZF6(I9nX);_re~` zW;6OPU$IDEvW7NJ>3{6oxVPVfu(r1D<^VTdN;$RwW1wxU#kyf{xd$NQ1N&Rj9q){O zL_Mm3yLt!BqU}G1{fRU2wveub_?k(ErFMBCFTPA}jlGH&3h6 zmKYltnqfmduw2e5JNdDf^uN}reoNA!A)o#cx8oFS=ovOt{m=MLn&SHiUV0~7+LvR3 z)QI{5fc2l+<-ga`16^rvdAh;`b*p}t6x-8$q`w`?Re1R*nRIV19MESh(0X@rfO?=2@$yvCt z&b?{%bUydeaN0qXM%MU-2^5+8DsYN*^*>k28}#JH@B%avnL%s?e`(f zwxjP5PPz7=X!tlz+10)(qgoRL22PS%JQkmeF7B7K3WeghI9SS#0}^_fBZDzGC_Og7FwX9JsbQ-E7+% z+wHkK_uI~Fv&|U;A3Zo~yTt9_H*LE4D6)fq3H0MHWBA(I=Cs{=;ePv@@BK}C@6iYC z_N`m({`1eZ|IPp5f7ibD>tE%5t3ChhbM5t4o@;;o)_2=ypMN%Q5?`jRE$W|t%0rn~ zTQ2!pXPLvY?@(dUoARAF0Lee~t0cs=+<2#~tQYIC&ZZ?AITi57gvh$*yMBC9z7f*6 z1imWY?@5rj94xCrHwL;OUQ_KD+q8Ij{t!ZrFIJH)#PU5>%yh39ef zkTP>oIIr1_j}K3g|EJ(TCB#<#?ebT6O8(H#p)OTPkDaTo$GyROLZT=e9S98hqu1HQ z-u0D+YnzCT#c#k`IH019xM++Wq2Kq@G=6J;`ShqY7xM8{&Q6cX>KiC$*a~d@sSk9V zwDS`et-wp%bo$j{f8TQP1pyLBzYqDOG=fa+2nOY!&YJC`v*zOoh>k+!KA;KL$ z{IH$uJ<3BR>+7?&gUwj+DRyjL9%wq=1CH?y{+wg)>G~$;dTsS7^!jFiOuAz`?XK%h zIo)0>D|`s&L9>xJzK3$6-%yMMZKdN(O;P+n=o=5bDC}WN{bZr`*S2S$s%(?IlE8h9EJ=3F8a@=F8*&ImyI@CC*FBPWVAe8Ya68b zMurz(d_GA2-M{>1`_5nfdHWyy!++Q|ckZ;co4aj(^r$Umq0ra8+E3tbVUK-<^CiCO zc=FO;e1krwb_{JjCQxtwkSs~)QwXkp!?+QYQ(T}fFPn1W;N5DsA5v${=iH#U%G#=@ zz=O)H@x`DibxNG`U*&g^nAe7jL;qmB&4ByMdIPTgXUdc(&OS(CUG423qHjy>`#<;* zGM=fkN9VJn~Z}*4xnE)Nb^d`qL&x?x`6Huu`n6 zf~`bTwAQx!K#^AU9I(D@Fg2Q6K4ghhZeL%B6>sN(e zAulkw);4t>%$%`a`nFp0a6KRpoU=|CF#UF6p(}H+7Ay_)yFTIXSk_nlH~Pyw@y~h9 zxg8wrWo)$TRJL_t+#Imnb<6e_SxGcWv6G^w9FB05dx-!*n*j}FOT%(9YLB&_+JeXf zA;vDCr?w<<3v1iU0<~v#Mx`M!_(<2LUu4dO1ICR@`{=Z2XVvj4m!QG#q2!pR#UTvv z>af5IIPbN)$Rg!imb`FqK9gE$kzS!KKV`~~ZBshAmUruh_7~aFrsT)Zn@OC%b95>F z6u`C*=kAMF=Sb8eaane?Ga%CD<45#9)guf4;b*`hCyPlAivwryr|6_d-W*-qlb$#0 zrC!bCKi>fG=4+3A0w9S5Gp3WM+eKP6stPVtl(JfIH7JdXl~;oz@=!2R%5?}L-JzgC zoKjqqk3#eyTv1Qx%1N+-Wjb^OD6zqlIR3@>_xlQ75Ep}&;xIB^BM=)HTGaV=D5ZgCmV9t+pHzt0Z!hPx3{3x9vuT`?9_3MyIV2OFrP;H*q@w$640W** zS$JJCvYsm5kcHaB5?rLxNF>I<|6WeX*30U=YGT^r3UZKP8Hq(EU<-=e_p}^k={syR zt#oyy_akZY+VdLv2^<&`@uyti)5!pjUZi=VXTn-H7MIu7L!=$__V#X^$9F&Ql~Otk zwx`wh`8R&OO?L0Kvx7P8Ew}ljhnZCM)m`q=lSwCktS<)uI;eJpV=>AN0KQsZ2U82S z<`c=lpP>tfNBa=5lwEd^6tU z)s9;ndD{SGX5eSLhE{dhR}h)5eXu;?j?uw2I(wgtw!U55^?Q-N2_TbO=#F_5y!9I+ z4DQHY{U*&gajFj*$x5_ckOt1Qd$~mUgw^m?sCr6!BA7p~aw0hn4Z5#%(iWqe>I(H5 z-L$QGFD<3q*1a87#d)m`l$|IXMVGvYU)*MmV;JXDxS}ojgWn(yJDIC;Mu@&$-6m9A z9V68BwR=!&+YN)h0WzGPw6_Hf#1DKnf`lQ5E1Jjo`n4gvq#bm=h|VWeln;DmVMTn@ zjjw&T63)~CU^cfm+uJ{SyY237x2@@VHmYQ?Vm5_FB$r8DCuGYOD^z8bMGEUJ_D4If z6AcVHtFA`BVbVo{#nb-4Tfg3~0pp^p8v*>3hOZoR(|}LtyZDlWYj+{LBiYxw)+?A9 zl#ph;&a`dMyq4pE#9{}0n}>8lQeY>xBZZE9YNFbmyly%$uYJr?oM|1fFMQ?A_QK0A zwGSUW#IZnTAVIs+<|5-%FuHErQL@;EEH=m=N7YN!&8U|Vo=g{2{o^G5Qp2k->c8v$ z2&v~0V)%8fl_oYh{6{z<-hsl4ppp*3%rpj=_?kGUk?NhX|n?4h43a)EGl zV&8Dso`3c}rv3zdJ8NHl*`2OIkUK2Osx=;NdxrHr6Vvd6 z`is}_L8uKJ-fOQt_jKT|!!5$X5jU@JWrwfAljRx;kN1fyrz3IFWLTsncyV4aA|#;mOM7}lu@vTl5GVbLRTh6a$ot?mFgFW9eR@t zq@g%Tr@oV2eM5)yb^y}yDni@63g*GUQ)RY{qBW%!-n0q(N%8Xwx!4FVLT16lU`dNO z7QTBxZ^*klJymry<-nrNQ^3VQ5JN$6J&CedTU;U-8v@QC-y*=!_o0uT8Hoeie(0^DH zm$uv3ANwEE)?)9n&`kYVmNrI{RJuj1G?PakG?TYZ5@B1`@h>x$A!O5lofGis8`F1c zM_nV(->d%1_TX`bZz;~vK9rRcL0R-i5ADn4S#?rohD_XXcA7^WT&Nm($93p64?+C= z3-0J!^ikhw*hI%UV`2lltDNv@#U=k`r}d{w4%tBNpJy+iCgaXU;gTwK1bKkBl#LDy zTc3^L@TU4GN3n?8YtQkSp`-9usPggCjUV|s%^b!56`m)9@uLrl ztOHzjHr#NpPh!w!2p`#~+klF{Hr`rJm1!vcx$xe42Hv_AhvQ@8Vpc3uSaJ6H8aS-W zx*BO5=vr_ubX2;y(GRkJ5GI=95dxCWkTH)$6Gj<^!c+I2^`{0MiSP9ryAhhWu@XD3 zEH;^*Am7DLHuCX9)1w3Bo0~pptNcM%SDZ`d2Ey7b=(PuFsb_fBWa9%z7SJWi_>HtT zV!#13apKBYicD#*$U8KN*EoRERtU;A@cK2Ov)XFLO2m5~xSmhEB_HXo$!KA12g)X! zQMEDD3*5(sMOT3Lm6m?%^@OpLe(4;WrFU=+UhR<#U7&}8Ut&MzTQ_&B=Ou?)Kew`{vEIe00$sz58KXX6$-r`$oHoy`8O2fu;Qc>vv_h@7`&b zw8aC)zS_or_b2~td;6WY7$+{Zw|?@I_QFfgwnvZl+q2KzZySCh;IF>fp1HBr?%%nA zjXs7K#<`k2Xtz#myYlRH+EIdXQfA5*`6-t!Lz3zr_1t3?=a0T3cq8h^B0nlT1>j0a zKOy{l`PT2%UB|P)_cXlnUAk4_Q=cR^LZ1HAYswP1w}IH`v>vLHxWUeCX|1>AW%J=|-{;MM^8 zNm41)){c#Y(pO`j+8AGpjgW)wIr5FBN0fVbnDps%y{&KUw#oKRyU-Tw9kkPrZN2Xsv)8{xw@P@a%qn`ob__wZb9C2y zsRv-KLupCBk9_tHd;`Emd+V*Y(6hDn<`>>T-ka^<;2>qHxGwrsnbJZz;7I+!h(1G& zuc{A;48W_uE>LQhe^tK(>bv}k;)~oC(M_a)<{^el%Ss?`;psRCV}5PmE!dvk7xzPB zg0u^>h~P%pPM>G-7zd1CqbTb;b5OlL7v)(m&>EbI1H0|`!t^!dk#&c|qqg_(u)X`< z`|bbwkN-*hhMVBqo-MW8JDcruufEXk-Mih^(NWpS8Vz-(`!n=sEc&KY;>Pu8$3^cr z&{OHPj}P<6Hy~aUx?H5O=GDZ{HS~80r`)t*^@M&UIl*f?mA}lb0OR_L_-nkSCZTua zbDZr$pJ^#aJ~(VMJ|tp!2Iu~?&9$M(4Nctt#JRM&qyb_0U4 zVEp5JtL3{F^9a^LGIT=Y0kHU2L+ z&sRZzl~CEP;iT8RXp|YXKw8ZyD}kQI;aDi`6x3>NZ7=|5;EUhTp*?`k zigPW_Hm)qeL76^$>t`3!Mme+#^+o{B;K)PS!XUhYvEP$sWg~3QC*8`Q>#7Usy7UbI zuPxpHppq$|gnlxz3=xOiB%zKN*eE4Q5a#`G_$(wrhbg#)1y_W96=%NMk!OB|0inVy z9QDEzW2<+hU>y*vtGNenZ-gl$xGlf#tsDhmC##@+9Z`1l@w4;rDa-Bc&9=MiH~()o zpAPla|LfcCUZs=C(=-ftX`Jq)qHWTcY@E^oT{?@seCdWmTXKmik8T)-R(?NizHU3EIz1E26e z&ujgU87R+jpnWrdi?1&J`xyW?1Na)4Jo%5q<4#lM=`nW9*twMPKo(IbC2Ad#s%%JA z<~ltHEP6m{L#-EUOb7l9OWZM58*V5(!reuDEO-0^Hh&#JTI}yVk^=N{gN}ZN+FP%!9r((# z6JY3o?MTk3e+L6P**aDE9=cA5#8F3w43ex*2MTU*Q9tY?4iAsv!A@j?p+k4bzq`BJ z9z1v$-4XwUa(%6|1BYYv=;$~T#=eT$frBzr$4=c1PTiD4-Hj6GF=&@Ybxl}{AC92( z9zmZQQq*bfh_>L2!IVy#GFP5A9nok5J0`mqK|BOIGV4LUs(l)7$F8o`B>H1I zc18Zr1>;KBpd{!OK$Tl~s>~6ptjaS&(b3bimlh}v59=j(3TNdq&ekgT64%qDA+&f1 zXGY`{yU^QZZyV!msQb#7g$1sCa^H#Z)zv97$G*^sxtK4Ew#&CbxFKe9eY-u{+i#CP z{IK19<~}kgXLiN_UVE`Jp+!0=DPCkAI?z2A*vBRV(`u0H)Bf0W+XTGZujQ3f(4mu^ z#eCB8)W43f3;I4$6gvGxfP-ugcOK)YX2IWq2>D@kgSaVaZU!*UG-1;Qp~H};CTU9> zv+ekDZn&5@kx3h}QKvma@4#E%GP}&{3@6h~20{1Rum09=s@Uy-cBmb70;2k$sweAJ zyXkJk|1WWW`lVTt+~f8wt1ePxxWz^ zRn^@bE#ndQz4yACo12@No0~89UhP#ju0@jSJ?#knt;KiF*K|+bUmayXgVNKmoAy7p zo9I66MIGW_$Ho}*8U~)y53ai&RsV5?_@bQQ!habLZle4QrDg!KcFj5;b($c8&bL}p0e3QkzJnEMx-m6QxEPT2PfD1 zjk=H*TPIyQTYd&Y+h;I8r~y$A$}W~2^1IYf#zAh-s>ij%Iumj5E&Nbk^4ltbTUym` z`F4JBgzfZIdF01#Z>+7i{i9vdPTS@NHvasmJ$P}w{oK$04A*<~IZwd>JZ;W?TOCtN zX7x~Jk*R$1Fn+Xq>fZbR9O=vLN4+-)+s^^%c&{JHSGFJ~-@C_1FBjT}9!jhc*NM zg+vP^pE>=5C*-wS@o9iB9vCF<>YTb;Iq%_R@%37ej#Nk(Y3=Zt>GXQw`(8nK@`Z2L zj!e{eu5zUF`SAR`^v)K3jY#p==u5mm-UlLQ|LS|Kv>EJ4?%}CE_wv$O*me409a(K5*Z@1d`Hp#jDF2NMOzx5oVd~- zbZiRGuq~r+6ILCzKB+gbrfIh^7vZ0NV)`}yE}M()WjBZI6h=GdYi2VzMlSTe@LfCT z#F&05eVeefNzaPa=9wm7K^*p1`jYn3+D0d*)F1nOuf>tcdHR8xytO})4*LW9U!N`^ z9XoAZ^?Bn*s_&LZ-So*E&q+Wgd?2DIvdoH;@0n1rU67=I5FUeb0l5jDcG^`@zcIcw zq5L4>sgnGOjR3E;vkv&*XXr-x!O5O-VwV854CB*|bI4<%tP`gX2?I7hN6*_+_@^B6 z!bAIRdP2%pzNjJe*?xrgRhG{;P3e;$bw{3P2jO3ys8_C9M%n-%nJ^JI`W6(n8)1MD zPaWo~qj+M!X}!7Ta3RGfh19?D$>A9FiLXVRI_fwlyqmp=Pa5S7B?v;ET!#;(qqz^w zmW#|BbLwLVs`ACR&@U{Wc$isKuwEy@Exw}5a@3V1=MbM8b;MC{C4+j> zMHn>>zCvH>6FjSqp5?0O8`ZoQ(fG2xE~Y?p5$o zFY|#z{tTbRx=5>i4><(lU!S_h>$HOOPlzD?4iMN&e-;>?H?R+f2bLRR%{V|k&FAcZ{2RMzVuR?J34O%@9%QPzk!B(8=Gwc z9m~ME!FFM?($+V(+A`y3tM&9~FTDAU-}$Y!d;E4=VobfZy%G2?f8(p|t+&73{=2{P z-?ndk?aS?_e(Fctoh`-^tJHyUsk)Ij;U$1-wM*kh7U8 z(J?l}ximDBeEPe#!Ps9KU(Ja^lYTJQ$CP`p_l)xE(*nLpa&dE~O?GzL-10&@dUnu` z5BIU7b9u9=lj-&;eiPpfL%!v7-`BTeH7s)h``~82Ghc~Ky>0XHIpBtiTlPVILP+~} zaR!G-_ibz1*VyiiU$uSOSFho;bZa=`*i;|ttwCSNGCacZ*j2COeq9AN`d+va3%q5^ zA9X(qY}BhBrs0flwz$f%vfL)VF-pGpdTsda+=TNY2K_S^;TD!w+a|UXLA4j}KWKZ0 zXYIfGwXd|fgU9V>fAQbKF4FG;dvvhh*4*$IVzBSZ0$)nM)hc*MVaz7=&G?*{Y^Ecv z<^oSM$LMCO_%iUL`W}g*op3)z1 zS+4T22r{M2^ysE}Uk@pnKXOaCfLbr^59z1m`B%U4^*ku}nNNR)er=L@Sp81hrl|Fc zLmlPj;9B`hzx`^~Q}hSyJS8R#yobexWG*?{`XrPx1_M)VKuR_%;5FKv7+q_N5 zJbiA+8M^Ql@L#Hafy{Nre2x{8O}|AZj5K1cY7X&{PG;*YpW#@lV{Plt4S%-TiYvR# z0gOJ|68_iT-fq?!p6u?m|M0K>&2}gp09&ie=w)l4dgHbB$xptCUSq3}=_^qsCd}kq0JXJoT3;hsH z{+t~2P+R){t^){7T_=YR%Yq8yyeSHIfI+ivi zW4M-=S+Ai!)*-s64TUc2WOj*0a{y|*sW52zpXRQp78 zp>tu8*0P8#b+O;i+c=-Ipv;k*-}$F~yFia-aj#yWgH^fUs`*$B%O%Rb%W_H|lykLj ztCcZ@=d}B>H&s)wE3NRFt?~(EOwD1*lhQr?04hKnm*F?GNRNa-Q?e>M3yuzy?v?L7 z=+|ajx4JWoh5c#=@+y)D4(}rq`)Pn`jAT18r)2<1oux^=Fy1n4@b$F58*@B6e{5S* zqv&^HBgqc~&10J71rB8L!oS+^SA48D*;JY7hrA8l@`%Iy@X9%S$5VB0-oh&m*J!2H zc=={`n|VS>whZy{lW~1GL?M<#e#ew3(OU~Y{n?M@4FIx$JY5MYD3mmey320~uhMFl zy75+AG%@OeexLZJ&~OA>QN))0;iF094e_vDox^22Jz$G%QWIb8Jfhy zpN1>+q34Uhz@C)l_nEhMwzBZQ!JyKoqFsd5S#n`fqvgWCN|#QiPaia7hi-^&h_l#?wxXzRP94dy6x=vfZ^M8I4BkzLj^kgjfcYGY@D7NKf(#HdGpbA<# zUnNeKCdR)y*Ejo%^qPPm7Jfw!h_mxoc6PvpPJHlAp^v*&k zOSEM3dY;2hUHUR8LLT*YhSRo!u1!|f+hnrSUVQX=TV3C5)4ij%`|MeJ`t(T_#j2JT-0QK;G(%4bYS43CK{Nu>6Cct5T^pY(U`5R z`{dakcyzcX!I{#@H$6Jcq_iEAn+dF~_3*6#I?-cyrn)B`^2P5nc`cl0^Z0~ywXb?A z#k9x{I&43(O|j@;RNFBPnWC>}E`$(#g?4NX>=v&_+6?%wWH)%Dc;yXWd*3@^M0_$avR-l7RxU|zwUtL%Jac}Ux?33kmqn{+{71o=A9pQRje)E?z z_ajI2IOn2cgz$v>B#->&lTn1g2ENBvG;vX~+*i5YJkQS&M!s^G2A+cPe4QSn>%Q8J z*Ey)WX=9?_9i}?^e#gpJNcp7N*2bj0{nmFta@p?OzJvTrC-a0Kw!q0+ZMpK$hUCfp zW%MGBU^z;0nBa*)yHyMME+Uki-4swyHgW(bEuQmQf6CEn$;-S6z==@PC?}fFmwb)0gBkKIq1(R3RDE~gZhh2~$z-FgI$^>< z{YQTMN88SwyKVpQsGZQ(95_UMq7BL%9fl|3mR{?f1uGeq>X)U9os#hYWx>Tu?z1r> zn6Alk{0J8r)@}MIRU^^hNlM}IUcJ#NsfE{}4;^R6T~F}B7jYG8g0Sj@bm#{Nbm_@1&X@9n1GeDcTV|$cow+)gKCyxpUmp6xy ze~_5L4RNZ}&?7^1Eahm^%xey5x@tql8Ev2D0L}i`5Zy!O#z!vDT75Z#ee$LMB(CTw zd%Q^hvrL{1+QZ?|L7Qx>qB~1%(?!9>v-aw%_uF6mSAP=SI)sK@^0{dYyUxH-U3Oqs zvI3d@oc6&%LC#etLu?fH(p115ZXP=vxDnsarVY3c9G(Uz`~8Om<#iRt&A`#``(W%U zeS{CYm;Q0xgMIY#1$}6!R#8_YR6cQzx)5Zg@jcG#0hbUS79G)B7u0%^C!e@VKNOp5 zep{lrip~PHtr2HGr*Gz=Pv}`+(m}w8w*jIrXk7B@5v7w==Axai4R^UuUo`3|EIEkI zfHV2Iw_diz0cJLj)Y0n^kw{~D0E2l4ocXyOlN{eW45%B>*LiPH@6^#WH?KX9>$-`K zn|>o}00rrn&js>C60*;Bi0tw-hxSdMYH6ii9OFap9k&zwwuLi%sFg`unOn+JZOTdC z)y>C2Z5$-6%&+IEHf64V_~xfS*}nevx7u%g?hEb32lv{`k6z3-X;&s|GaEHoThCKn znM`$Xuf5RzfI9;}D(0Xs{ULGH#=W<%DxXmODjx)&XqowaLir<%=P(CoyP_`7_grDMu9s?wa*Sa}oQGcWJ^hwSLYS;Xe=CqOCCpo~C zx0gBv@<*Z#XnDg_W6yIk2<^$B^{58DSr@33$7#d1IOz@9k~HSZHF{7Jts zg`@DwYx_z2ixKMSjG{x>u^WoqLXUsqQx`a^S6o1R z_1#sT^m!;ZetX$T4~vV328+20#8ZZC_KdbvbdPeiHP8l>yval`X*C|>p7E2xwk#|^ z*;wxQu-YpW=~qx6+h9Fmq8)K#kDvUYE!i%}YTeTJP+rQ;SryQS)DI?(Iz(>JpLWWh zlUAysi~EjEee&N|`kz2kePT?#UdWpPE|As!fv^0($dmtjyNpZEPC~z**;=OV8>?$= z1Aq6yt-EaeLqkMU0yJjHS91$K+?)lR)ZY|M}0gTU*=h-~7vevHj^E`V9H8zxYHLYU)Q_ zoHQpy=TwI-Eboj1m;rq%=_!rAB)Ho8NbGhi}5yrhb*YEOE_gDFDh7qq%Q0+OB!Zf8~4L|yiBxorc zK8R2E+di~u_M3G4G?_!jz9*<5*3@*`vE%o}_4T$u+nt+VE*u;g-{5O7I0uIDIYpe& z?#@n5z~htsQ>cL+Z4&t$+lEfy>@S_T&%;|}%p~~qxSe7bT;N|>X56!LyUk-a&KZxL z9v!#C{r##w^^j(tY+YU3%!4%gQiq3(g-(w${w{mCoIYT@t+K{-3z-k~K4Uz1BrUP; z+Q|4(+*^P0hFH?{p^Pi~iKG136Ai6__LMmW{FK;xaQha|%!3<-cnJ2XWpl{J$tECb zG`My6#=J%GH-`dV%97oNwQKq&MzM8k@OV%jL&ks^BaIUYfW$tBZ`hNT4 zFZ^NqjlchQ+F$&IpKtSw9!tWFL~gR(<1~g)siYQ^(M~_mk%}rdIDbE7$Pcyj4~EJJYBCm{u9`CJ!w23#qfHZ+8eh>Et`;43E+rIpjuQI+~Z$J2hKSdSo{MXk0*ZU-egYJgQBq6Wsd%l(c%?>EvgID%V8y z-o~mL8QB@@{B?r#r7x3Ck@Y;{mN%rtckuu$8$YE5Jj%M}O)A~G>wB2TytB_5hM#Z< z{$}A$`682N@b&2Axcxu>Kfltx`ee81*>5xMxV^E253tcb^8=q~cW>P>BYpXDTX%Cy zZ#UQHKRwjNis%Um}LMS`aSYx_Ftk4lc)ChBNHqnLbT$S!c3=$Fnx%{K5y} zxKY>0=bV)MP4$IM+0{lv1NZf5NqEl@n@@T~P_Ich=PGjZsiN{~%>URs~j#rA9!w3$-E zb@WyqmHnvo+bj6AB@LLssk-r%r(_z?IxNYvJ3(N6`QT>9&oEtM-Qkf_s$hQQzbzF8IV)?OOj`*jhj0Y#XMJNm*hflX?(c zB~RAZx=Ps|vsMSB*XEx(J|o@FsL^b7vGO)GkvYB!ec-cQr9YE40MqWoM_$t)A+%&o z7@W3a>DF%ob=irt3qJwy<}X3oBa*5@#b1IA!<*iat`d5v#w0Ov8y;3Alc%nSF|K$R zCP#voQzRra%P3jHoQ)SMc{)Ci;rJ(CRiyq18iyIYFx80cgwhDTr(pya40P=7g6qb{ zTHD-O!|7Sacxi}melSh|XE(XKm^8YXIMc|HKZ`olt0sJOcDezz+!z=H&!EKe=R=3_ zDoH9YDG_vbfF$t@fe}IQ)~5;;mK95bv=I@!R+x!HR&kiRGS7C6a3~KAZg#R@h)++Z zEpGtu$$k_ePyZ8do?~CVcYMU*Q`w|JhfiQ<0s$>m@k;TLlM`vsV)v#3TW-G7Z-DR^ zT#XO^az(jn&@$l!y*h^0sX9sXIjNunY(wHM@<}x5#bZ0wbgc5bpY!xq*aEyo(*PY1 z>lC^Y>AfA*+e8=hiygRIvpZDTaSA@4f zblXeyGjby+BkRUxntt@X7BrU@Cv9u{UfaHPzfEr4ZRa>efAXczw|Aeu)n0n(Rd6lk z4Gm7nXebZ&4{$)2bMJsgdLn;hSakzGdf-#}%2_8^W03FdsOPk)qtk;Zq;(CSshbO# z=jcd%FG!q{ZC&iF>a}VPp5hQN*iaYq1iu>qE>7bJSSNMOL54E4fvA^fI5U=GoORG3 z>g1eKFP(iS>SR^yu`+OS3SQd=rK9p_&GNPgLhF5pNrmDl(Z9Vs{;&o_%4ybZ}&OKdGh25ajWgs zM~~X#$^<;(I4M0`bR$SD#QN&N?81e=I^61@WM_~99?ye57ZR+WPuv?f_^%fD%74H0 zmdyq3T1Bs&M5x7uqFLum`IuuBn*4s1aI3;X$LfT31}`CtUpwQAHleMD9hAFdJ?V=w zv5QRPlQdXPIo|Va=hm$@$H4yOS6^*E@@Ia$y}$dkJv%sR^U|_ZJAbWC`UCV-+M|os zhq`)1XLPs)2AWSArOgya$4vv|EkFSWhBUyj_&6CEksaT0|Gx1riZ z;g;v-tyjpKrjF|i2y1%~4y^~q-A9}fAhG;pmxN1EpSCul4cCTm$#Qfw*9|;YyKh)J-wQ<&@ z2i-J%IA+qJ+~X8tHx1TR8cRRyRcoVEaBx5_I_CzHdHV8W+5tMa>Z`ov$8o!JXQln( zzxI=DZ5zM~pCUftIBh3BafAKHCtqSGi|$#lx|W?wWoD@R5wA?^y5yHkIR>r{_Yq;> zrPleq9!3$*1$78sd;ot2H(p!z^>7nO-B*8W5RdZqnxC`<({JJ#aU);pZ}F|J>&$Pi zdw%sYaFuU{m-0tk<+F{`emUf&Y5Cj`{PFBxa*|&g;+<*gc6^zr3VD;JUVCp8x}U)Y z@&PR6qsyb6S!UY3%O>btd>46w4ohBqzR*wXAx;eO=yPCF$3XQk z@?4AKI`uB&_+3K=^|`u^a|Gl1(fjd|Cx`ORrD+|8iVXQx;s})%IFouP*-?LZSN%5i zJ*UmnPHpq_VDYKWkMO7WkMkCpxheF*`eom=L>-+}6t{f}LdZn9)xMb0$NA(I{e^wZ zy5Fl_-)g`54}PT`oE)_mA3bRI@7(Vb1<FS|lJ^|AGxPw_#q<>gyUA~z>$ zgtt!sgg|@0vIKECC^v-f`fK1SIT^6);W_r)yU?YIRQr<`#>flvm(MTV1()<3`f(MP z(9^AtJTOn`M8(_6D|9+69C-yAnv37c1e(F%Pb#Z$bneuA?&b2 z(pUAZC#MWf2F0dSj_680F9A7M)R(q?@(4f(Y{%oOBl^ zVA{P5R4`C2=ip0+Rk(I4drvpe_iw?FlxKh$=1 zZnuByFa1LM_x^W(zP-R0dujdcw z)$5+u%<8*HulYMASL3e3epKkVXJ-kzmOr?^Tc~_Rm%;oV>fZId{rUs=M!peD=g4@# z>znB}!VKR{yz=8_5EsXDG;kj~H_90>eFJLR6;X~ydQ$)Sh+rD9Gn_Z+x)}zpevbX( zI=A0Pe%q7=?Ph+Rc-!aqK%v|!XV2f|ug{l0y&@~*hPde0hlebJ3IiSKlVaAYjpq$( z(1JNg+q6;pb}`|dchl}!my5H*c6of%7O;J%_$nBO6zw;i7gibfE^>g25-86$gMIeZ z`&=*4M)iq&J^wj4-1r~gkv?4f8LP?`+qU*a--BW^p0i%aTf1<2c$7C$ZtUD?m)jfp zuKfk={^;3mJM*mr@JBzzd0oF_?L3<{%egAYOooi(wI|qg7q4RXeX^K7LVJ3_*e)C6 zUBIIJ(26|E2HJNJP3;WY*XxV__6z9)se7aqrpgcN=D56MRH|;hlnFP~J-B)wR``fR zU&@ASgYi3-edu74#eP4FXkGF^!Ad=aYkcMapfwLf^z!7TZw4^0Z+Oas1mNA+++bYT z+MV0?+i(8v=h}byPrlYZ`QokigFo|g6wde)zFZs~W>Z$ikoup&80zU4?Pu$5^d*p5 zbJErU-Z;Lcr-d$fk~a!Do`vt4(p+;m&u5KfVZmX**o!K|*Mi&+39>KIiMsj*isT&^J97D|^DgQJd+Uqjq;8S7e<`>Ni4iib6ZD=N6XzFL%gY!?vcgm{t z;P=vO;uEA$hL$Kr@JL(5g=ZtZhm;2{qtZ73Vn3k|XNmP^T}9_uM0(899|G_)Wyt$L z0ZZS{eQGW$H|zzQuf7q zr4HJ5@MNu(dwmCSWh~11f--$`n(MFUT!&`jAqP6ak@1-KNSNzW4tXHz^j}<@ai}rb z$cQRsy%;E(yq^I0_%A{Bqrqp-5_kO!Y_2PG#kG<;GH^{LAC8P+Fxzl-Eqs1w`70<; z5Tpzal@6yMIHR1V!89dlqqQ6vYnqpG)g+iY6oiYyWdeozv~zur$g*7kn$M#D-Me?% z);7imr_r5d?n26I|8-#PV8H2N$5kClEui?_6X~!)jL8?c!k_d}Ho~ipUB$4I<{mPm z1mRgJOgTADUKI>d;f!!n57ULK!;UnExIm_Gy{^T79iDoM-wBR07xR6p2xVh+qfMJw)i}z)44TV2)^+)^~c(K=Q;*&41iNGL=*K#U7 zwpTdwdnk{()AEE$ z5|pPQi|aOe&MpM#;AR1GZ9PvE`wE}w>0vtN$dMlx9)-^-Z)Rj&(Plkb~_~ih=MSdrR$|M+41CO)= z80IG_)T+SyN;wMA!Ovovgb?6#uG3C9j1kUr@NiY_lD47FA;UN~THT(OjzLRHD4nT# zrmfOt3^~WK1xN6PhRPEGcb$oDb1_TT#)TX@eRL!pHxc5v!lUk%K3T5i=a>6(;Kg1t zs&Xp7;rew2fZ=hXNXvbaJB)F*wSF7BN=RA#cn%%GxY1Uq;S6pB%OCe?L%9|&(wTvE z`?^R36%s`Kb|4{)Z@BPjfi?8?$f!m34Gh3a#qh>oKVm5&P8`2FO-suL6&8^Ph?!HTqyM;QAeD=ohryvqmSw9B@y3H_9dJuBe&;UE36Hov^m z-g*3_>_lF%jGn@0^*{E6do-u?hq}n~p?gXpN;9*m7A-XKL}l(w5YA;ul0&2g6WJkC zZKnC`=g1^(u@Agga<1zty7W;U@~{xd;wZQp;CQ6OoHcFgO+p9*hmNnY)X(#nkWOBb z#F>e2Di9t3mM%KFFPs={+ojLGu|&Ee2WoxQ?y62Hh5KPoAf(sHdUG2H@3l3ZR)TgR z_QX8aiThkr2j9%1kKsf}p2h=_u*EmR$jhbnXGAMwZ`?GZ&2WR?+-2<$m-my2Pu3{J zleV$8)PDA-ex$wd+PyY+_?S5C|KT2bb(Frq^3`q2lq~5_y(p*3t3pTF-WPo%qMx0B zeJ+&d$bZ3;MV-45D$RHRRk#_57nU_(ZXEWWomTJ2TxT!yGme{~;!18qe#Ui~sb8Lw zmdjbVDy#BWD1KC$q4Imaj$@>k?wG6d=Q$XE(YgE*53B7 z2umQ&_MuOdrx@vHymk;E4*f*^fjm`XpJ?3q5vOWX!RG)X4G)aslWfX%kRwc0-)mq= z)K^caWXiLhabf;JZ;oUfmF>Z6@W^`+3wA9;ZN|Y*<*eUX=yDrDG=t~yQ`N1yEBvGt zrh!Mk5@Q}gJIqybhfgu;jP)gk_)HMN|7@gzchf_B>!Z`Qy13ddj`36WPUsKnQ(}W1 zz758qLCVE2&0o+5@n>J5T$bqX=GRu*CqDVf_V@q$UuoZY@=p7~AO1|++S(+?Mc!2A z!mRo0JCVe4IN4jmf<|o(xbex!Z2zhqJ;Uxe`LgT+IDM>kD|$jjwO_S)Q@iM7MDf^9QTQe*U$ZN$lO5u< zEbA22fyhg6r(cd-sy@=~qI)(bxX_EP+fUY$W!mt1>Lgk02jo*B6nO;&p-dx23)Q#u zzU;eyL141<8fSJH$337mHj@PnqU# z`CjT5ZIviT48{p(h^|7f(8^wt{YgB*$*rqCzu>i7@dv#Hx#9d)a_x-_J@@s>=3Wo@Z(hev zGrkdTT_Pinv@7!3u7s73;;#n{iY6y$Ll^R?Cq-zF#sAD5vami z%-G4NR@5zDkLgn>M;tErxk)GAtOSwTad>pnj*g~nN?DJe?YENyeGw=U7ZB_E&kHMU z9l!VCz58u$de(N|eYY)eUIX{`(xly)Y_w%)aeml07ieEy+;da?9Cir<#j)6S?%iwe z?>%k*__u!}zRu?6Ry&%WwioWdK>o}2;H5j*Y3j2yZ5Pu+c;?)PbeA6ua};n+9$|*p z_IF;(O7PU1dXmA23LubI)rTh{N2=N7Z-_h#5kVZ^Ee!lO&pm&L{%)b*rK^HxkDK}W z{lJ(0bL9DuFe~@Fz!+w5H_MB`_=o_`>-f=ut8)5z=6{;bX|Q+rCEqoWN1>#Mn{?d> z=+{gUq7?n!Nh8lTp6fwd7r1_0r#o>y|8>4xhd25;90flE2T!iAB4z>T#Fu`3cu0yx zUe^3cnLpZ){YrcT{<-MX8L&(qcxXpeI){%9+( zty;<0UTvNB)P8!2HmjYO?x!zt-p{wQF8TyJX$y{#vl$H=lTC*8fXjmQ^JQ82ngzK9x}Vh};Hq!r+(o4Qluh&! zbuV5JsPdU#5K#Dxm9(w;ZnfC&22y>WEcnZR=PrC8BDP#U%2(&^$nOMbaCr+27d}J) zKS|5xzonW>xp)6=o2;(2y|=&EzWBvIYM1Z7(?0X5kG09l27c~-Ti;yIyrJ|uw{BhZ zi=;JtgHQSu@eSmIf7CsTOvoAv!sqZRV-EF|dRu3YQpoT`UsD|Vd$V-X{8Q&F%X!Vr zFJ_}b@lc&}L!ILr=iMV${`9jm-{7HMgGV|0kX$ySrCye+FQKnd8}svk)A3pR>es%O z`J7LG>Qmvl4`RhM^`I!lnMx9%wk`;&LymHkxTrFs58meBH@Z-$;WIIaST(HqW$S(175NkdIVNNvX4In)QKM`x@HcNs%bs5%}S zm@>Ohns#2kzWzF~I(ohRmyhP5PL^KEBga#;nHbxoV?Mpj|41FkiZR~Y?8-vFWA^jrUpcIte@=FS})xRoqR zuWxN-^OAF=nU|&S@Xe^oH&iKy_#w(so)OQ$DA&=2M5)Y?m+{KX7FFXX;{jBL1v$&( z8LIuc;zKShc>w5*mye#YsrT|Q%Ag*mu_P`*!nu~`*-*!A_$?K-dq~Nqe8$eY_Lyg~ zBP%xm#4J$XB@WWFxxuIXDJ={9c53RTakSmJMc>{m9mLJ)!%M4g`j)fW!rFLcHEb)= zU-H-XF^*6tfb>5><>9~XO#yfK5em5Me5btBGdwVWGg+ZaKFN!~RozW7pV7JH{(7i6 zCljl@w8H`n{W8rsahS8-n9dbPPRUdGvPn1Xvrs4p&$gqqVKAhq2|V z^B%a+3!MEEf}vF?ndFze!ZYPsT4;;<1<_OQi*ET~dl0AT#*Z}DTbUfEoRn*S zb%76?^<9dz%_OgBmS_3U7=Buw`8ZOxbdun3?<{M>(%;J@qqtnFJDr|gZ2(Z1QP|YG zgcaiox>9uS^?7Vu)sd!=aa10OHwo=BcuSeYJI;2tzE0=-ycPu9rFw!B>>~f!nQs8_b@MLd7Hq!zk8wA@3XxY0 zOQ+OBN1h_Mpy5zo`CrCOeHcNxT5&Fg>2-)<@G1?6YxD(?hG$_B@ln-eMIW*Nr2Gm^ zI(E{0x>Pt7UF9u(^biiZqDSH={Knf*>WMTGy541S2mEn%G@iK_hYhI8vja6vW13g4 z1XnLZnV8o&{xsYrZ*|mxiF&IZ6!J>qTAkxGYbTbWu?{?CgIC?klSK2iP`JN$)Lwk} zux;Ocp?&#JzSw^E_kXQ@>XSdvUbz2iJ3aDs(I@EZDxLaz7Vr0PKAn`+A$QkwUNHjS z95788n0Y_1o}g#PC&!WZCLON&?k5CDfJO}@<_}^uM)J3Givf{N2c3vDwOY0*pZ-@a zE`TokZ87Lqr*wuEvw&InN`D6~jyT*U7xNpJ$!p5haQjrw{QOE}n+-TPmhw3vc7b8v6>9@{V+$d31xuHuL00&*!EzEuCopMnzNY)mG52h(U(zShD z>+o7<<6u#hK>-?TnHAmEn;&jWwubd5 zud7`Y{Z3@1ex!L&bmhs3xI`3frQcPKxVyS{e0Bcb!At|*s~TvIqv}M-so-cg9tPtb zfG@U<$yUDme@chCG4V|-J^`;ost%sOL!H9K_Vzd5Lhq+-4gKHVSi@m-P!IjKWB8s4 z1oFCRpa#y$u_hs;RL5BdTG{Cs>$v7MdA%JSn!09dxd2cL|hfe0JlDGyyC$Qrn*K;>bO`(`U#6yo3U0mf8?d zWf*E=9K66JPl_$t<5*b-Gr}kBoT2E{L9c7&Z5h=rAyYY_Y2rRRtEW(m5q^PzPtnf( zOb*0RmY3dT@&bFSSeNi$WUDgR+1{j%Q*?5nz4_`3?a%zwXDN%pz=^NI#1HVZEezP| z`-}6j-zm%Lhv&Tj331_B1%Zb6fyP--0QxBflQuI<<{J{MZs$OtG4ScGN&A$ zIi6ZJZ(9Cd_42K+t4EzL5cmU6| z;q(ysUhd)Eao(h|wzARAPw>AEkK6L4i{^|gs82TVkSBTd6~OCw%1x8iN#RhRW0pX;uePK=TxG)HvID<9AJ;^ers<`6Z;`Q}nc5bzAuxl(8E>#T#2Yj$UTfAqo-sRy#>Wa<)8! z<6Y@f#sDD&VJV%g+UlaKHkN8b#aZ{@N&70@@w#MX9dlyb?rUH1yx?z*Z^^@7J<&%Q zz`3ZS4E#>Ex)|M+*FZ;?C^I?-?v#&CLv>)vuPNE5tyl)v>X_w8o8v-xIk#+?j2kIi znH@pf31>G0=$~qv)F>CKJMG@iPJ8zD zciOY}9=8?JCh+kVcGb54_|U!Xi9Y^hbpuI7 z{b1qKD3Ui(2}bpwp4T$!J_j^>Xh^bfhU@2k{eiR_q38E>1#7Upf#+KM3??}~It)0^ zxh{SD;9=mL%WLuP^FBBOD89qz*dX)|{BOv>+i`5x8$ z2;K|RZ*$)-wE;N>t)xA77=-_b{JHQ@e@6Vs>tWCe5lBmN>NnDQpe0|X04#t5$0WXu@<|`hm`C<5`N>BJ^)pL8RI)_- zU%S22<~JwpXzvhu&f4L@UORQc%=u0FcNeJEH@7H+K7exkG=Q&j*X}#dU_YZjQ=8D; z94#%*rB%J%bYQ;|AH_vtY-8r9wB6Wodk6c?@`>y(s8ITk>NCb1P4ye#Mn?YmKWl$p z2VVUd{yaQ0uYdL1Vk?V5o|3E3^;NAH02MgYu8s^yIw#;UE5f`bK}N^U8a+X z{v~hKa}K~U4!C`DU-F07p2^E!xn}wl74IB?L7k7hhzoLYa2QwqNNqgv{`laaO^;66 z*T3?0`l+q<=}&(G{`&U4{p2Y>K%DYe`mKRt3M#DH^d9# z**eQ3$;7Won@y_`-n4)aXeVA+6@dw(4}Y$0&*`*5cFVK zrGLc}eF^=O%sqocnag{}F(a4h*0+2L^Ut>vy7yoxWomC$=3pZ_OKq zna+`VCAsQlX5-+8_Qrb7g6&=y>U)L_qn$N>UM)7;2eew-at}Xed z$|dNY!*5;N+H6y+yt3Lh@%L&Tb}?hrl?i?@cFH$Ls7oHw1M1*U+m2k7Nv9WOuwSx& zihU*0^=^n$y{j?lG&kOYDSC3l@HGpFKj8KNX4?~r*4<+}9BveZnW3NCG* zd)sN=Ug8)X9a~sH?xxL88Q+6@k^D=hL@L!Mz=#6$|k^5{4;C^9B95RwFTG6?F zEVQ2E*J=~hOX;xwmZ^S~k1D-Y7wxw^16gncKI$b-bfe2$T%PCrY>4^9VQmCXgCoe7 z880eo*j%GvOW`(fKMyU^RDAWQyk5t)ag-d%l<{R9mSgE92dEIK<39Q|kCY`u$5+Ae zanWmYt%ZC0F_Nf{^)yd{{SNn1kZZNdu~=3JRPE4NWS;VW}#u=W2`wg;AA_Qcy_=R6wO8 z2uZvOEBK8Rci~NQ>5MF;E!;-U&OOcDG4@yGP$e?Qpxp_i?BG{m6t0olSleiK?%r-Y zJKHg8%NQLOf?R}jp(Q&f;e}6ac#1LmA==qYrs-qUv#(-*k;>g0OxDaAG56(I??sm-a?N4KX+%1!up7Q}p?>A*BJ&ETtdpn8jaU-rHRqo0ebS*Pp9|=DYB7&X z(n`%Nk2+>ipW7G|adM1%JH^z0(Cm>!?&}p%(cHvKSH~p;7arj!X=y*gaB>k*dYvrt z)v#le#Dhl;AJh@sQx?9^nKR^XrPo&1(t+#D+F?{(G#GZckQM=;3samlCux*J_YBJrp?Ap9 zPTSpjnZzo#k>3u-`r0nkS&R|-CO!2UFz6soohzq@8*pZ!4?JbybTHrrNg6BpY+XE* zg$rlm3Edd*OqO+B8Lz7;<6HP6U+7QUl6->tTe1Pa)mLXV%g`-co*pB=ad}m;4iEKk zpr^F`v(04!jqH{ea>y$^(xMZV4Nuln{K`YU<|vJl5HnMjF`=j0jzN7kcpv@BjW8bO z@m^Vo+j2O_AC$>sJ^3}h*g_oj19sGZ*r!fSu5-wjBICIbeDc_OU8A*v*XoHly)XV- zs^xf=?4ln$$Wr@|HqW8MXV^t@7rI-;=dbd^7a>C{OV}*j%q~=s7T-EVb>eE#h|CGIZ?Rxed+7nLwD+o zxS?dSv7QYCHa#a7)}Yb)=%~E%=4kcHryDqTy~-cg_5smTp~c(V zqdO>Vr|P{6Py_eSPlM@_>)^yZXHL3MoD=YQ&9ksN#(}T?O*q@3=h$s4Ck@FE zjc~!oFF%&aE#=ZKxE!)uq^{8u;FsvP)K$M7wy}Xe%pbR%t)=#hf9cP+xzlHmbHqUV z8Ge9oe{nK{yuxdnWQmBM3s!<)#SPI1uLs=KIZ3evoI7|J8IS?jLu62RDj(O;ohV%| zr^nw28I;cO-8|n2ACli7Pop2BoLT-G@YnJW{i|>@U&&&YA3jtWb!({ft5EpVe-AFL zKjqau5&XjqNZ>+uC@=x5^ke}xXw{vo0#%)~MQy6rMgr)caOq2IlhHW_c}7Qn?eo>s z@*(vTjHM;maVL+BEi_Rd26lDUr}a0JEByI%zuBhP_7je! zdHRC2t-PYz*U-+N&*v!)C3}7<|7n91p3Twl@M6k!FzHX6w!{6s4AfR=%lGcwYk&Xm z{ax(*dfU0R(Qa>Vr=N49n54aMItyEK)wS0INS^-({kc zF7e@{DeM6UdZeEzu=Os#TsZvCKH03ziteJTPRv`5G}s^3_^JF^Ve}Vk0;%ZJpUk&v zIZV$aF8z3w=TQsUio*U&`YqqGrNsekefz&A#Ke&e3e;D=q>|Ph{)~1g4f=29&xUT| z^&{)`@QS;)VReMOGg@+0dg@(lbssOgfEB-!{x6?NAXItuzVM8)T!VKqE6h8Y#eHt;yU(bu5SL!ST6XjS5o5>+2HD)LsV9aOY2%sdDx$-d+=S`g?y#O;6$$R%F9J5Cp`!3 z7*hl@KkAM!RaS+nuOyc*;dr~`>ey9Z)i$U;mQLm%Pr--c1nVm<%PyS7FY9dDnPaui zWAhrM#e37l6+WBJNxbHhIpHM$#+}ecq7S{TrhND=D6y1aU5)MhafryhI7(MJDBel6 zF%hMt0s|g({1m-6-Zps*59_T=r4vU}-#U2KrpR+2`?!DPcmEIY?N8b=xR)5eZ7$<8 zQast zLcu>zkH<&ko8cSq9}zxK?%>bOaGg%;>{;B}3d8mEo1w$n&}a1Yb0)K|1^TTDH{d=r zkfw59FS5@c%))cB|B&;zorN3uXW@T={OG3i6pOf*HrefY9J6q}QXdh54g}a7u5z74 zdkf(3;%Xn+sC(>GLiqF-FOEpBej}(^tDNntsE1N8dUM?Q_^} z{f@lVM_aLHyF1GU$;^ppk9{@uGHseTKh5YyN;eTo2Ob6EGU87T^AL)5b8&s6Ev!x2 z>GY(Xog4vo(oPS7$L__qfR63$o#1u;b4uF5!Cva-Q>SuC`>5@Y&7(iorlf4*v3|hV zuV8=4uf5Yg#5Y1zSxClKoJpr?&{Vcje#ukV8{2E2W9aR^!vU07imG#0N66^v4j1h& zd%LQ>t>#JfvlcQZA>ZMRV_^^bJJM^MS2jF!DO2&ELxW=GuU(YS5&2>}7vPz`+4L|K=H8pl{;2vCnd*5QV#ryah<+vG6!TF;7XQ(&^7mjEeT;HUHglp+wLk(5 zePMmpH-@?ParRZh*ryhL<(!fOr(l2u_tC*&n;uWw*T447wnJa?sZV{vN!)gF;%7vG zg2x8^!5~o17V5#JsbCCD;T=Jq)H#6RSKUTe5|%#(MXw!i+L{f+j7YZ1J#zS(YVEVfr3-ftg!;~_lWX=}7m=Qms^%{tA( zYW#kvD*q?=q11K(7VDJyTfgwkNUy!{-g;N4WPZ4)lnS-E0)j`r)Sbh+3*aq3>Y#Co zJ!1f}sB>6L0mxgvoDvXE-ps)HOgqx`^77P8quA8o(4WH2(XOT62RE&h^~=Ld$XdTj zTscT|1IKJ(9(qczBv5YXGwSE|#bs>}aQevkWF;_`nWutnCOy+GziDDkEgGJsRVV?HoeT+MMP7V~6$7aXQ4FL_(n z8PjlbOi~;7^hvJ68{`ND+iocVF47LnL=+&(-;M54WR{GPxzJX%GwmSR$SW7J0X|p+ zlyCCDK9&>sOcnmp{+0XCLci-1|N0OaM@22FtfHN3`4l7s^PAaw_)~pWCFqk`FE?5{ ze$V)xF?t@_hK}^7AwXW%Sgbx

1zCJG*oPz$bnQ;va=c2-OK<3s4>p+4<9ms-`~SVjk&V!AaWOoV43_wzK%}f{q=^>W0QvBUKAId0nGUQziv;oOG&W zuw;~@qfACUeS%d=WVi;J(tEsySEr)tqmwZjO(qTEIHCxksmYiUaGVE)t0BP&jdNnH zx-wrs%$(Qv(ShWf?cjAG-^KsznuQO(7HOKz0GN-di~a5z>AOJOnKji(;&5k}N+L}v zxr8|ZMCqwv2F2FJV+46HI^NO-PIaZ6)Jf#2!IK{)3vw7w9b*99$)rHV9bM5Ov1Nb} zKn-RBC}oXu72KapB)S_(nxapn1&%W^qYsI5ToXLR#p|_<(!ES-&mmt=5-aYcTS98n z(}<5;NDEAoI;=WjCnL`5N0%k~a8+mF3%@XSj2?|!R&^V@&Y z*3i@YcVB8-w_j{W`xtua;A^L>$|>?wmn#z|Z^ zRoE$<9K&y&SL&;w(^)g0uZcckpp_@MC_tKZlI07nB6yaedzsEUPZc7^8mQHiShZ`h z;MHL#$7`3g2Hmz=vULH}SNhlB$z6+3!u1^OF|S_HG6H8iss(k*MpxZHAk}lt31n&; zylMTwYO^SrJY^TCcfU4&?R-?LrA3kp9flTh!R)cnFHN-@(Y7K?&x2eI z^~yZY)xNl_z=iR_o{dGh0dQP{uRDSX;?pK3UjP$ly+TNW?aDL`1STR(rQ*J`=q_|?QiFMf0NbC=H~z|X~!{+a*O>Jy%WEblC`gWVFtOd>m{Z-3X&#+A{{%3I&{dbaL z3Av@ca;&;|!CCgY31*c#tZ#0#?VXLbz6I`$m3I5~ZSFVQofq%4*FN#7_QDGf+xy@7 zPTM=!!y%rwHE^!lM=dP3Wt`&EGy19J`F896-E8pD^51*mZu`uS{7^djz5Ts*Lfeq_ z5S2Pz_+58pgq@XtZYm4IIox5KjaRWlM%CG%9^^i|O4X~bhaj(~@YNmVB{cC7S?N>K z$R8&ou%;P^Pyg5T4VkCib*DJ+9OZmUgLd9BI6$Jl47l+me#$neKODmN`5LQb@LOcY z8FXe#k6h#_n^Ck>zH>Jl7zdu3c(2z!7B6%*$%~Vn2^;X(e}<=)n*z`y;ABhso-{^} zPJrzqrg-VH1O#f)5PpZq?25$xF7O`nfP-b+L9xHt)o5ihWjQSfsg{&{WP`fq4VDrYlEL$7DjJk&pF=bX^b zk*5bRxwkCN(F-Wjrr18Bs|L@)_M?|uH1&|I-G3bU!YD!qtCPH_csH;bG+nmUngqdD zIy}mx^Xg;+-)co;)1JQlPCGxEwoUpNCs@jE%5&%tF+Ta_)5>M$x%B#bV79@ zPqzBnM;GvZ`}4oi_KpwQqgP&TzOBao(>_l>;zYk4S=nbf5cKIUCwR-YLTjo{yM*q% z<%V|eB#dQ*o_s<{UkuJ^d$50TjR?l6S57k7Z)&HURQ5ZZ_Ji@oz#IQ)sTKpV$Zc_@ z`cgLuXpfzw^{fqZqQ}j4zTIhMwVqtl-h0%-aM|4RvlDVikG}0<^EJ1_ZfVPo;FWsl z#wRC>w8fU895dEPoj9OPyB=I5o7eis{)I(jjHXzGk(ctueEM?05ifo9t^e{*Y%A6! z<3q;)@KRoB>s2B9ugVYo)YUZcNRu?AA0$y-sea2kh>HyP9(to`*W6~XpSD39EE;V- zi(U9V+TGX-;(c?PJkvKwKWn>69u9tjA6(_^;(6%#%I zrg}j6ffYyDO=VDgl^QeHGRdz@a!-t9XeUCSG|9sxC6|2mNWG*f_2(KrQ&%!Es?Na6 z@Tcp$eS7+%Ld?JS19zRJ&Cg#f_{K5yLS+y*`U61hmu~r*__`@pyy~X;qJJ4{&VIa z&;zKVj#^>ouXt{YlyQolA7kT=Y5y0{`S|I6J_F!q34I`8m3Fhqcz2mH(0uIVYJ2U`qc+{!ZF^6j zwbR|*wg$C3`0{IvDL3_#q1AcOW$1C!it?MoCi>r!FSQdwH(?`sY4}eLQUs zUwzbm?9cr~`^lgFiT3)-Z?vEJ^FQ8RezZef2~&LWqkZbwpX8(ev99^XfI89pNcBPy zBFS7Ulj=*nzZMclupP+P(Pz=7)0YXZZYk%$LU$s2zL9_A8^lA7tG7hH1b^cU#=fBgdZKi zQ4c_97ph&y-jF}`iZ(rLRmM#m3ob(FQ)*|XzW%lzf{1Q=URu>~rw>iQe{tNUjdpQa zKP4rQcZz@Grtj6sI;Af#{-uAU{rU+33{N&yTE_Y5390A!KG<5?IXJ@JoabB8Ya8q7vkm%FeggIM_$YJ3B?s-A{k8K|wV~4B+?cjj z7#B2M1Usd!+EDAO&+1~BHqm_fEC9IlpR~cr%3uc?A{EdIuKJ%$p+Q)zDC)>p?e1<3IZnl8N z(_`ce{_1xeo8te;T-%sqBI+0Y3OmHE8p#J`pspzo{V{o^U6H@&BKXXE#yHlz!AYN% z*RL~f$>x$MYW3H{j}2dGN6y8?UyVN`?p{`caYA+C^2S%*gl?Qs9F zeeE0HY@3_wwE+OtI<)@?KFhN`RyiWn+o?utIdz#5b1wfUZL6o3Jt+9n5%q_2c*CXf zb&*i|#eGjRue9jk=_JTYs-T(?{DIWp13ZLkj1BdkL=0r>l3|#}`e!{DJ=$ z&X4JX%J=SXD1ZETZNQTLnf?dmkgM%6zHu@5gkfzj$qSP~Kx8Cv~b@w2u z^CAADV8W#TveL?qd+=1hF6XJYJTV{lUi&vh6maCNALUpen`lyeA~0e2Y{1wK<+p!w z(Y8@^015x}pQTlrY$vv7+lg;Fb*(1&pxJSk*QMjc$AK&PYlp^ysBPMF7UPu#hvj8# z0=VbM4q`L@12%q}b)r)8I%7BC&0rg%6ra%72lrvVvhPrXt+AeYxD=d`IrbK<29NO6 zi@MnK^jqEhwYIT|Kf97Q08BQvIs80}>u+u>cayi{DX6pUsXvr28S+-fTemu7wskTA zm{MNya)OTNsxtCNn{D>|$bT-wzsi#k*=rXq%!B$VY|9eH;>^NjbSVR;Xh~qB7{+r~ zdL=05EaaEJtsJuW5BmAVn)}Oq%fd;_G-fpSmvi(#&dp+b9TU%?XW1xVF~DZQpRsnv zE#ZOmN~L21%a+J)3p=l*3yxHGV2JOdu2=(fG>S|*SqBc+1rpKiw|=ZRC^#^vBGKBg zDkJpOLKrZr*8yL=@b#xBdMKMBaF{=&trbdsp~qYKA{sTv|ZpcP5WY*)n`|G zuy1kPVXG_DTvMDSdVhIln@DXEDLbTzBG6^^x$2eV(7`bjz%q@Cet3X8x9S}CwnfTS zBI3|J$eD{eDu2nwQprmm>u0>}#k^7~-sqTh5x0Ts6z>KDqsBTkaW#J9_JRsUm#dzU zwV+(rhJXt{0q|x%0bsp^m28!COwyYz_d#T4$kK2TmkH{mkHb5wVgQegPD-NrT?qUJ zOrdy=5*m7WmTN+2>4A%JoerAT#(iBx!SiR)QA`hTI{_Huje~>{cG0nv7e|B!5a$%fTI1|)$SAe<(&D6Vijg+c zEKgpRGG&5``|#DehL|p|kdL;I)`=}egg>Q_8q-_^Mu%CWITqvN$f`f$S6;{|+2v>C zDoi=b(j*>bFG}O`B){Q=Om2$lAW%px*$aHq7CmvoGj_@S-(LEY&;lb)G@a=IwT8 z`yuhjmimbS2_~PNFH}HN>6e|X0~W6>$AzSeQ?8{YIz*m0d-W8RPbJG;+mxNRuh7!z zaM81HX;jSz2)v-BFtG9)tmWv7SKV=Q$q56s$dxo{7!HBg^|^`akn-(xI(zlY&k{Hg zw4?P&YdfMmwZ&By`NQLqg@-hW%Q7uXr=eayWt$=ZYp4alo{xK*Tx6$m5r#d>8#3U# zPLDzQq*=Hu;HTVzHPoaVe2Rq?zt^@`WlCP`N+yW;i}N6qya!x|%(HDzoY5^{>_BU0 zZmOI68gN(JlLk^c{HWKUNnUv(E*G5-guu^2$KUIpd=g^^Z$1uhz3^OR5=5%bc}|?^ z6&JncessDXHIOJAflJ&-)viFN8k_OAY1&&mKJ12Rz5aGkYPRq`>2I#(o&Tx z+@9?2wj*E3Mw?Kd@*O-U0&#$}=Xouu4L=y55xFcgpil~UO3S8>2SYS<&M}zpGsn2;;Z}pw9MTH_uHL2cLRGty_Qxd z?d4B=ynXDa{&c(h#%t}}uYaw5Rn+G{{fH@|J9CK7}%r?dLB(n-p4;-K53(Z*6X`CXjSnLMLE4LoVp7lBv) zcD%U{I*VC9-MX^_%_sOdXYFVI+@ESMzOdO2r|%+)ee{-Lg7#8B0Y21X8M;BiwNNPS z5NZJ-W$?e>TZzaa41WGRED34=X~eRvM{Kw|i)^C<7exrdUI z;iK~xUA_L~zoOId-SYLzl*vtab}e+Bd_D+6O||n}3Q|u8$`h%mUpJo^Qz_$za%S}t zuh-iBPK$ixghm%%jW-LCsgukQ#yFGJH*xefEvkGbLSIpRM{n~A@W9$P*;tK7$32@b z3>Hh#k#;f*{hCw81i6I{V6;!kN@R3H+yxNYi)PN=`(u5RKKMy~0E$3$zvGQcHtR4V zu7~1DA>pv?dfz!+VWf@g9%EZkPWoy+*7`YCunqdu@BWkBq-@=~n>7_Fe)jG6+Qsfk z+q!$V&12g#IK+SP760<=g0_w!r2phr-;DTG`j&cv1`M?SAN`RZZU5tM{g3T)pZ`L8 z^wNX2z3wKnEsoo5_t{hO6^$9gVB=2>r41&|>iQ znr^aaobGB)&Gq%6c&J4l(#D}_23z>_DRV?F z9ab6u5iV)UoCtN>2^WA$hdm3M9OY*_-cP-4ch(W|jCZjl@I+MkMYqMtLxXU|YlHZ& zXe~Yu9vO;OVZ07E11rw*F{&GR z;M2#C+unPR+xF^OTW74f#$aK6aV4-Tl$lQkILS;E96y{QbE|jZf*3yj{Q63JZ~tlg z;-7q}o#0Pw?(DQ%cW<@d`}`N%V{reupZwG9m6vX{>A^GTlV-=jE?((Zt1qwuzKsQe zFc+Gs8Ko58DtKS~mxD=9n=yFpf4`SlBQgf6Jf#P9O+j@U`Db`W7;qnT{{He;9qKB_ z%-_2akoWf$X88x+!JlzI!l36SeS;q(e&D-Kf5+u1vbc7(UD_}b$1#HVwTIs|nBfMD z<%>JwdJf@tKhHor{r{r!`+oA<_-3|dWElLq8Dd9M0@wCc5U>j-&|+xg5iNo%O)RroG*_{PeXP5zrZ zw^A<`?5EfwZO@tcmfZk}HL*{o27Z#rO-c4mZgwoY>6j7Q2M>I6nm(SYWuqC8ZFUMfRZ>dE_Q^a8(SM#Oz4ccsdPW_WQ zmNRFKQv-j2^r+9OG6W8mlt($4=V4J9pZv zuRenB7j5s^H{1X4tN*w?{nFRlkNwzZ+uCH4CaiSMvREtK)y%sr%`9BuOF#z%(QPuTZMgVO zWN0k7!T0osmaonj?;rQ-kYf+|C%+if)R;8~b?4p(yz~R!d;Cs2KC-Qzw*SZf>+iPD zf8*_7y1l&CCQBFX(fytF$=4sY`}c0Qom*R32-a_N9z>r#dJ8Vk`u^U^uc6mc7T<+< zHPVoJaCpuox)6Snhbtq1@~Gf#WdT0z#Q)CI0p^Wxxum^QrZz?%IzKL6)j7oglub{Z z_3vvlX~oOa8kcmrnnYiwPQ@N#OMHOGH^|i(M_X27&YI6sjOHu77LRFXeLUs-WX-J> z;o(S6k4z;Het0iW6Gwf9PphA)OyyZ@GxlMOtMcFs>{0$c0A+jj_2O*Oud&h?`4*vs`@~buDx|wue9Xu1GKOZrHuJbjqKs*kU)Ih1O~( zF?8fHNy`@waFaK~P#<+xWePsc7-?B^)%II9N70kY)U~=mp6HwH$0%`f0Z8s$YpuC3 z${?-qz@_wQW+)v@qwIU-6Zm@@n-d%-|zkLQ@->2MP2K*O{+ zDprX2008Sn?xD#B1AK6chuUiF^UYjfs`DzNp%drA7WOi);&h?k*BRZqRZsrsNl%=W zg_U`X5XM>KjUn^fe|e3PWz1(s(b6~@}QgQ^%`&O%Z+JCF`5ZGdQm z0vDzfr|OnP{4?=Vhmus%RN(MSenci7g*SEV*V&mmweh=P#y}RMoTvJsadg^Frqx1j zZ3^&7|M4on4D5Zi1w7L@Uu^)eTnvds##xm_(sD?8r%%~vK%_6mjYthls#k`{D0dB! zN?J0PXgSgh=cYv0!f85o4-BLVt$c$MIODUZ3xCC1hl_QfN7gl2B&b*|K>eigc3AaC z8!En{%fpVj^w(l44xr>m|J7mYlS%ZXS5ku4p~Vi-e4_ytA7Xgr7krko;4)9}3-1II zo#(P0uO00vtd=%kYCTE zyU^Mv#ts=!ou1NB>2yWM?TiS}VEN%Q>Tt{;;w2 ztoBlT6$cs{)arbk5BMsq6Vc_kSU#4UI&_VV6Xn3v!J>MrV`ZDlLYPJjm|<9>BLr~? zCw#ua2y@2={V_2ss{wibJ)Gdo$E zciJI#`_}drcDFVOy!y(cw&TVDXn5!GyX_nveEMJd zY`g#HUVDG{Y1@?_ld*6?d!}aDeSa|}PP&mAcHI7HZV_El2W)@vUc0G&D=&}IkFta5 z%llvcp+~jI0)#q{i9i#`*L78Wh#uJw3NN2a#NfB}<|@C@CG!H7@{|kqNF7XDx}ZK? z7i^1Wt4%guS6eHc z6f=5V6FcIFg^+h<2D$w~o_eGlC)}-b$OXW<%41*OCi_As?cDxN8*J0Bg);lOYi&;0 z)S-CeLVfCC8u4I^Oe0tB^5mS4fXWA{rHc3y8ZA^{#ZLbe78+6p0>@6&3a15 zL52CdjA}sYAX9>M{4L&VFCq+r1ry}i^+RCog}M;f%KcrB=*Z1baq_+{N=HV#3t4qf zSu@(a9_b$5_x#uCy?%z5@#xK98Gu(GZ`4)(Dy!0>d*I7Cb>*0qHvs)HzMj-9oB=Z7 z`9NAw=o`(_skvFC&nWpROya!d!fW-+2zZ@0Q2-ZQ{)F@^`ipLZ^|a1ImZpiU+Tu88 zKoZ$h*(o0!wnx(vAq=n%Z+y*|8*2n`!MbR$599=W>TUV4>aKlY>wBx`pVGoXve6sz#og7cw+SW?D{mQF(itfMpm465gr_P0{Y%TUaKPh_i1UepjCU&%}}T@9Jrl4^DHUH5~ z^QSD*Nv?6bwDh`yA)>KQMLw3fxTt&_Cpv~HoWdy|&wf8%JY%sURVJj*EOK*KJa=58 zJZz!fN*LpRcpxqhbxnQAco=_0`B?|EmtGXVeC?{`3gd=S-zF6->LS2nyy1A=i9!4Q z(7|8%Cddg|=Nr0=KamaFzKWlEl|JkDI5DqYf;n(^)lC5}(unhUC4#1fS*z+vSt?`a+qoVgRoe_>Ie>6UKN42kr6he%ksmW3d(O3H6=W z7NKcld8NJa@L^l$y!)MZ+rjSpd7^iFZKbUP@l|v zPYB*?=h1(OC~+O*?EYUY|M1~G)m62pt5C65`1}1w1?yQC{oLt^uO1|Igl_}p`uQgP z1K*&3oUezg`s58(Gu$2ne%Qwk%RhqYls{w|8$$Wse+2(3Tt9dEO;70b|F4lh4K)`g z)Y*jwVaG8;7pJzlJ=)iK-r$Gh)K2nBe>eQmA0mg$T`~qf zKk?~*?$4*0`*A_qw}-e9+KoN)%aeA9ZSi4%vy-!ShX1gFAGXC9+_^b7F~#oqxy-|T zXmxI@7XS4>9l!ddtu)3@i4GWwe?AeO`6KOOY}*_-h4Wk9_;~hz+JE@xsm62FhuQ`x zCTaQ}B|dO)k$xcd`08Pu9HiTa#OJ0AH-4o*0KT65@AIVEPB#{EAF_HsW`EcECWe${ zJAe*-ijklH~7@ zQJ6%P>~)oW*@JAR$>AOFvpo7$#*>iquyZ9ZqY_g8s+UMIPoj?(eCo3065lhVjZp`M z7C*^+Y4k}h+#Hpi{QaDV2$Uh^XXCPb&?l4Uv>nUTH&K4aw9o0$S^MSx;=gSF{H=FH zkZ;0ouFSPpA8xjfzwuIg>Cydm`}TI*+E|aS2$MH8uTH4aY34JW%d0sI%8^Hbvp!17 zld{nNHpXDO^mCT>Q77`KFSgODnS3!(7=5mQM<)z}9maW~yc~bb<_c+l@-EF8yHa)l znzW0y+xYBcm450S9-(8}9NU<7pyq_i*Rq`WgT&>5$H>b1UNG)*EGDcN=i~d8F35Ln zg&QZZf7o5g8I`szoM1z1t-vy^kNTm$2vKjw(x`9f{KFTPpJQYhVBGQ~vX()L%WYWH+K}LpXz@ z`ebY#I(V74P1x2J778F7f&A22B&HD2@P=&y6w`uL^`3}gkjUICn>~B!G+_> zPMf%+zk*wzjx!qvh!@WM+Lv)y-51bnnqL^wfd`+&5FRq}U+IFL&^T%;Eizi> z6E{d=@*0mNgR^)xC9XYKh*qY#g;^7n~q99*2Iaio!hmy&^{ z9OCNLljm_-eY)9K!q@9r#3P)FP}ky^PLUM%##OL<>J`#TkyL0-9O#5;-XsWAN~ts= zN6z(>!O$0#)CN+2^P|CmOXo~Ud+$$nty(^J+36`x6MK4$n76ZgPtdN>qH(JW(ophL zmO_ogyaQ*I8SDTJJvZHk9S0lQuHw646=CXD`Kp}KpL7htS9mgM<6m$UzJe2=*CTvU z(v?U3&;TL5EHW;yu93dj?%eVFg)g=v21GixcBo1{i;xU}tGt%QdOJz=q&fh=(vDrs zKFy~Aj={0MzLCRwzsInw23mZcH+LqNj)2H&#VEKl>8^h4*VQc)wkExDQV3&Ky{)xXQPYy4ZL-f zHu-1$Y3YggJ^|V_wCca~*r^P<)Cq%w9Yv`cI8m53;UG1Go!ZbcCSK#XKx^bBpE+!o zh74wHvt+RSWs(kE9CEfC3-Ma5&cwE=Cmej0?l|UwFFfj>*QIYDlaz)<6QYF&9Xd%t-6d(E;>HW>1e#U%IE3o8oG64Lc_p&brye>QS~##&R96* zmQ9UvWgr?S+Q9>GZgP>oqSM9+sv~uHRL}g?v{KJ$DDF1M0QgJ8eQan>)AjoxsJ_?e+)%{EJzv(XqzeZg0H#M%%u# zlm6#xZ+#>6ec_dd?cpmgx4o0Y_AHD4_E`+>DM+4bv*pcL)DDCF*CwFnV?d&!Ymb$s zC&BO!0j)n5?%;0mD6*D|rhBd{`z94VQdV}6oA$+PV@1fJzQmewYdK=!LM-AbxUK_U zAbV|I4R)MxF6U4i=zzwjP^)d1O))JCme6xXn@+xqnzRk>v>nf*@H}v?Xhl-Vkq9ymUGPD7diT)IWt9&LFH~UzyS=rw-Zr+D+Q;6w*M9b2 z`N?+q>|1Sb|66T!8T-;F${g^iCpEzpDF(kR(SD{z%CFbW*dCd?I#<4#&!T{#h9LN4^_p;;ri}e|N60aNNk> z^>>!PG7|>;)j2r)4^1A`jz-!z)Qc+b`80m`FZ-CcB#}DU>svVrh6vBsja+xsVt(Mn50W^G*019uBUZlOrKk%#L=&Td5E{sH9Z7ZmK;^{xL zhz4v1QM47al}#wL@-+QIaQ7p`8DH(smK3`ZId~q&s06ffU)0KP3_8iQ&K|EVxE{=F zxXK@Fx#fp)Q}@WtigU~WI3zKPbNu+8##SrmqC1I$i;~_7rpZa8*G`AX=R^~l{!a# zz4s}-+C1!&aI#||Z{EpbIT6Acv~}9)WyVtKj(pHRiyo_y=;_!Tr*D|e3BY6ZfYXPA zf55ETKjQ~xOcEU%2mPi`=iv`Hfwe;UF5YTu^#R@F>;eGQiruuYc0x*7DGzZOq)mLp z6kXJ*7KO1-+CacE0Z-jkRQm%bDt)5Ce)+0Rt8XKYbm(yMS=9)x&OZ@ao_w&rqg@Xi z#GEDb_1%uc z8Be7B3TvJ1$KbhDpp3l5f~-{sE+msn`L8V#PI&z%BA`dyUiNg$rhXYODd)~R7fDhF zZ9zaB_i--|fXkDL$!D6lrJedAOE($I18LI!SkIKjwX}Mjx|q&cn~`AM90z(yeEDVS zPWcMzhc>ACKw-eAY{dbOlp9JAeraFh51uJkX{1c(QxEkUvZ;fF(-UjH~1JJZJG&q*SXaiwpY@f2t4U*jiam9YvC%Vm--%c9LXYJbn{?6O@ zF1ycwS!}o0Hrj;pHt~BW;9bQJ_mcq&#L1JIP+o?%RRm4B6o0BNPnJ2i_6|)ZZ{y&ZO>Ma&5a;X5r?IV81MkJ=IiGwgwWwXa0Va2^6KLG@=+Ix zk}h?Jn2M-t8`B2@%$wmhd>G8NF!MbIT+wrN?qP=JX1xa7pnu>S=_3pTH_q1QxiIn# zxSM#c2Vr{tf#&~5{?BfQE&5+0e{|+X{u{pERr&}w?<@{lBwg)S32!EmY^7ES{78ni>7NRK~AKd+rwT+j~JTOaMp+-$$PveuTg?f3)7M~B$ulWcz1 z=B;mTVKZsF*bRLJH)u+~D76FDNgCo)I0lm!woU49KPFrjUbvr{4|X^=rp?dBNpNc$ z^JX>JVt*354t(_~(l7JKC@cL7FU1ePupgP*07u4+07_HruzpLIbNZ=1zOpr&uHA8N zByZxej@oANNtgB=o@9d;_u|aF2lq8DQMS-2siYNt2!rOUw;htt`k+_Y2xYn4@9iJ8 zJGXDOM~@!14P>}<{IoRS^Ypaq8WQ^Z69UhWiJ1H-32b> ztY)PU!UxKlX>dn+*;|i6jKNL=TwO_$Qv8aZO=WHczM_PVO;7!CgN=8hM+Las#x)3{99#k zZ`q{BH!=>~p@c~FRkDK}{)k5%N_n&kBeaDz*MsgVL*U|fl1o@WGb&sbrIAIw-C3m8 z8UGAF)=yxbp*N##*TxG?Up+0azw5QKb^IgV$c~`TEj?vR)JbuOyT()I3vKd^e6{#n zd3;Ms>WyuYy{E^F7oo+C+<7ypNP2l$JHXbtVZglnht}wN<}nxA(%NR*zI{KR9O zFU3ADt?EZ&SS1;~w(Vx@Bb(E%I0LWltE%#@bXl0vj}hX#k~gS4DnrRA7uK&*i^i&} z?ZC|!wj=ppw#RzN6XN72GS7yC1m#6k`VkLkwiNtH&8V;aSIo6xox@MpE!kJvXF@w? z^JDqQ3-8<YQXsSNdWe#HKW|iB&j*{dCP=gF9u=R;WY9RhGxuCa1o~KOwKn#MUpL zG60RVC_me-_gP2fA{2ExuZK~#VWi6wh~fJDt%aZd>}$US?T?CZC#nVHQp6}`3C=>x zP-Mq#1SNPaV59Ls@@B%pa z;OkCfkDx`qL`gNC@W6?t82^C}JSwdUQm^?FUOqV)=ER+b62lP3g?o3y+F5`@XGmIf zc75VBjo6)!knYphW#Giep4i@s*u&c4ugq*R|6*vuf41MZmdGe#qsdvu8 z3MYT8varS5=(P1hk8I}hBl0KOMItA0b@J6$d9i)>LEG8B*G`a?Zvl9+`xu-Ibk?`p z3J$w%MP0Vzcdr`K5!v>U4e8k~_TTm#RH! z*wtet=D^O$rqNDp<0Zo~+A;p{{px8Y+p6+Y4xqEq0pWdKcdI_d(X<`tASrclsZPi` zllkyy=uzP*J=KgwzVOJ-&UWVx%X)HJh$-;4J#9(R>sv(@=2oevHdOn$(rLGBCyT4h zqN3>6p@_bPe#=$|ErFoDRk@^@@|Bl$@jeK+cVbWdT%gQ+BM$yn=MFvO3EyP3m5y^z zx~{C_P*@oCb|-z>;`}kV?N4-MrpS}ByS)%+>)h=J0A!~Ho z*8pq3+f)027J?nwtGt3O8Z|t&szwVR^0qS+lLk>IbyeL=I|yP&B(7FFReOp=wgMt>1~VX z`1(ehtZpMGbb8qdkEOPG`wn>5vcvWE*4_5CuYD;CM}CIj)sMZ@Cfk$tm9Kt{M*^*{8>S9iSVIEYmjD<)^-I%-KiUULYhggunJtru%0|KP4UZ zLnJ!Dw!GMK^4Jc`*+s7}YJ*R{*H2q_H?wWWy4nZE?*RAc|K^cb?V^|NI@|uu55>p@ zTST82cS1C0PTr8BO^`17hqMo1({7w_flu?7YaGaL-UWQ_(_Y|l@H_c~zTg`;AyjP= zKET`4Gi_%r!c-ri4msfttF1rwG3{4-2_EI+>(N}u%VY?Bq4s);Ze#&)&R3h6ZtaHe=5` z%1$0JsITT#|IM%OrEo1p2-DRObswH-@ANMseCs=i>gA1Vj?geGzs}|7UX?G7qDfp{ z8>GiFs;oHJ-H(y3x+k~typqe>^)WtMBP|h^sx?PgOp77QMo!4n5Xi zoWVPIU|qeIF5+pE=83F=$9j!J9M|d{JYLsfUupYRLWz@3;tUzgY42$VeAE{3i?Z-fIk*h4i#{@}0w>Yhi>cqK#ZOyb+i2?> z+mPWCSg*H#@N2)?e*2$)v)#SB-0t7M*Y4iCo44Xjj}P(5j-bWY;;vv5p=EuYzGNC- zO+U&0V#WS$vWi{7PdqwIJ$#dhlM7RyW^;2T{h*V#E0dMHCC7d<N5SDeqkMx)h+P)5(6hIkRLj3{t5h1Ua^JP=acDSY^R?#SzTWZ zE)V-z$Ryp(%HU8Y;PKVF_I0bvtCXpY*UmX+D%mQ(^XME@Q=Y!3eu`ri13f9c5e`VZ~Weh<2l_l0li5fAcLx1DHPShSF8U-2jO{rE5cm&^FH zj$6&A6_Ov6OM9}vTVi}JzWDO+E8{_7kefQDpM6RnS>p~T1mQjXx@9UW{Sqfb%kGNT z@+jY?Oa(9LBc)M#&o10#qkSO1eCzyh+(FIBq-M%CXji{Lc7wQsf&_zV+l%hp@Em8oxA$hY&cLAIAgeL9SQZO?@={USg2BU%b!2o=xQ$u+&E%W?^9kc=Sde3ACr!DK~BT zDK=lR>v&?y*v~PfoERGs%)#L|NcQc zI6A{8pSFGMkae*gtfFschkmYTp>1G4Ufy}3J-T(j?LT?acAq?JXZY}2_(E%p1$S1l zAMg=Z6^)?BLl=Bo$qIE@P=1V~7QlB-n}Pdnjxp>3g1TVr{1m#r{?2>t6kFh1I$nC| z^|rHfzde2LeZ~fB?FT>m!|jC^w&Dk#Pxn({-j`OwhbKU8SJyE9yo$3w;x>WE6VX0qmd#2pvBYj4f?aVYg zMi1|M+cSQKYtT{p2BLmYMvh)?c<1#v_i_e&Xc6}~29BOS%R7!yE=PQUUY~A1uW)d_ zo`3K!{ONf;&+?l_2v6l-`JctdvTb#UmoWHezW+JpC(g6+mh0jDkY!H~A3ZEbyWPw0 zv}u#{y=K*fxEd3);Vn z|Hq7L7gw@)cslW3cXul~LE z7k>6Hw7W08*ydO7wCTY?_?agf(E;B`<@|aUBAr)|1NLS5SkRogRP~**Vnxvh-@d6| z>f)yUvWuoLQD$>Mp?sIGC|uY8udBcG8|BXZfst!e2i>5~sC8_p^983hklJTjPGkarey0P0`GuNyna?<|d_(7H9lyjS3V!frZ0HzF{_%_c zse?9C>!vS|EAw~&>6kxb=5BxFrL;tM;7jI(9E>0bB_k}NNl@41W3B0+hl`Gf^v$df z0XY4`4aOg6Rr>y;L*E2+)c(W&%YWLwb12VSn^5Mh%}IOd#m)BGD=)T>zy5l=d*@!; zSla|UcxeZIren$R1mlRBv!&c6>O-9_?Kh!|79&x)_H&0dK1AP*$3CGG0^$;!eLU4Fwas8bQiC=i}8IDy= zxX~|kQ~EOBs~xp1fuiP&;IkNs2*1N~;6R#gk?CNgZ-k7J1^-*jN%o4 z%7yEiD+3zBGY-W6b`ILPHSM3}`=*NV=FQsl z{h->I0b| ze5`f`F`+8*i=Ng|I$JaqQC+Vb&**Q>AA{4fIeg1Nc(0u7FW^J^eeJVtTexaF$~Oka z`48=v^TYTuS#(GLOSjBwJ4Q?NiNiZ^gidjYOP+JUqm+lNLT{I!PqRB#uz#^nw=Co0 zvrzV!e{nt`c3iyR$s8(qoHMp8$2fTvKDr)uKugUhW0PY9X#T-cu1NRl03 zy9jL1cvnSn`sjo)=cLJoG>CM*xJKT0 z{NYE=gQ>*$qe5G;NXB>#6_*Iw#!-t8DnJP~6YVn`<|`!$|16;69k96;mr1$z!t-@} zqM;toq`<$PD~sv8m0}Kq&oKr*eE1C51lzQ9L00F?`!qA30?IA0?IY2<;jmMT6hj9z$vrR z%^tl?>*b!j;xbkKsV4~*Y6s$P+IK-mT;8rw$-&A1~bR)kb%?w{?oQTS;xuR1j?uAU4X`nA%Bfg z+OT+`+g;KcJl_Oycz6&8WSh3J;@br%_Q{j?DC4qiY^{fHc|A~gSFg!;GT2E^Uo{(A zk+qnm&bqt!nTg{l8g)}n?tyaPqdu4yobtzRI54vu#ObFqlb-APu`GmxuWxdj>TE9Efll8mQCr(W!H7S!VCG9doTOhG+AnU z_efCJqX)(lJexNTt$4fM8%KebGn02wRC&AwPcP!tKM$OBzOtiTKYBhx>Cq^6=+8(W=MR>jFvD|ozK$=b4A;u9Q?={L&X4G?)xkbC z@wU?h^oHM>>?56Pf1x(1H$yfDTno}sbQMjR)Q}gdN?@-a9VQ^+ht6~iTmoPHK@B{W zwQVm0b_XMw(6DCq7iD{iL?=OJJaBWs0ZG|X4_?kLz}Z^FIgf6|kC6z=5N61`AJK&+>FdYzDQPYHv=P-?#O( zjBY%1QAwuFAK{BXe%g)?4%+(4GHA`W6d#KbZR)$U=!9F@A?v9@bIYlg#QZtlNBEQPl-i-SuZPX|520pkv9O^ z*9zTU&<5>e_0#fnU&u7hjnWQ=?eARdcfs#GZzd4OWOc%r%0&+gQvTJK%G0B~BGosW zXw&Q`oj7r`DY~QncRjHA`xJ=%?D+1djkRT83{K_&k zJP}HrmLQw4>H6N%5ucwp`w00Te}Z`PR6mGbORsVg7l$%AJGDUZ+ky)R07*-~h z;ji@CKCi~vd1X9*p)YMsdW+|^X}$E*BmFz*$rhh1{wVPBr06TU0*{{Is3CWs5Jk?U z=P615B1Z}o5lnUBI&w3wI?q`a6+KcN%S7{F4B|bgT^zCOk~gsYWP(B_K>a} z()z}qg&uWUdh}a30fPP227(}ydeTXB**58SEC-GHw3IKed?SK!K17hs5Xy)ob?KP$ z93!5d&$V5@?|pKf@v4jI(&Ogmb;g|M_~<^>@3;K#ZEdty?mcX~PoB2J-6!qz%zQvdFVsj0}s$t3v>e)UIbRXo%~g(uEuM2|>!O+GFgE^UN>kMJQbH zKwFjPVHoED&wa`ge$eCjTKcuq6&-`#em~%=@`Q07VIUai8~CGj-zCiOj&S4N{#snZ zg%>k|d-!hmJzqjUf1n-%ew=Ue=cC}pdEl+ntiH2+1K)rf@gIfv`zZf8xevT`8s)1$ z6~xEU^)r2ji~5>!+J=@pew`0EU;`IoH?UXEYq(J2;sW7W4X>)O8^Xc6q`f9M7v%;37m<96+VR02 zeUS4-^kvvNpIY}Re{G(&)~EmNI~?<;kJsj7NYmYE^PtPTnU5ro^HJJs?YRqHwxjR_ z-styuF5kiVCC_r0wn!Pa$Xb}_Jf;rL56J17&!m6wEIuML|0F*PI54#+NgfWeM=qYl zBYoPd7)QOl<|cnhD?VztlU@Ds-@XB|t%dc^{ITO$?v-T@`YZcZ z-->we?ydInqnFz1@w2?i;XnRQez{Gc;WM9ny&W7Lx2^3v?HC(*icD8l;VX7u8We$x z`|?*^JjH)L_idu^GM}(@!5`jgiRbBGjVdO<{6b)(5;VIedzzLz1GNMPojeOR|K1M%Fead_dyjDMa zTY~dumLXk^LC65y!-gQb_Cy^?H;uM;Q##B+g_K?PBu#~wioYiwO1bAYp=cX za=VK@P3T*fX=Cc8n~PDi=Gz4HNfzO=zDGVKA`h&ue1qx|VfBaB4HAgxe6fC}g?{d- zi|I8t)AI>0??h>~um(P1YpVSi?>M($!x!{w9=&kGfQ!ER3eu#XVv#=OjZza;?I}Ft ztS-r`$c75~fLnBzLStuu7r#f%Pj%l9px{Zma#g0}aU+rX6q=PGfAGtBO{j}+NRG+} zgyZWP$JH3h`P`J7P<2)2^OOar>g`UzX`9kMlp(m{>%v2d$|g*M_SKEO!a6RPTe7{A zPkfYNJv{rp|0(s^*t*qjF|O6ev@J~5R@>UfB=sEgCRt2Qot3n77LA^Z?k;EJ`H%XB zf25`Tg(Fs+IAQACFUctOUaOwAORAnc zlHw;i#l3(-n+@g5+PFUcbxxgrNZ5Slk+N+?DKqo!@_vd8%TlDOEz)j0T6C1Cv^&ZFGnB)c#1%L!2_%vpnY-A(Il^@kha}{S;@4Q3(QXejzQR z;v6$aM+wqWypjjR2hc-Ib8U>{d6pskz>k34cs?<`);~zN9Ak&0Tyl3Hv z!`fIW6>2DeugX}623CVxTVHKEI~(oRt(~@WYm;$dO(sqJwg?8-^=%hw>_}pWeNqxb zqEXDw2nClq+Q=yb)Yi9%$ZZ&13;x`xFwo=SV833sh#(@IFu5NUpAgzhK;@)V8T?l= z7h}T95{F{~MCvXsI|T2|WrvOx0Wtffh+IaZ}U*Tab`rE`jm(Zm#yTdyFBI_I@in{Tqq;+x=NjY|W(FJ+r z4|-GjQTS#ZvkMvAdz;A}FQnW2Hljt+q%E|AK^^cf#=GPk@@9dw;12j%-@zk0g9v60vIT+$)G|GwX@UC9&G>gVB+ zuS2W}Oc$?x%IXZbQP+|se6uzbHd$Xpk51#bJF#cmQdj*xtrI81k=7}905u!|pByVa zoXg^>lg{=nd7_34wntxwn;ncQac}RIoh#@FJ>3Zak3ew0DCfksQF=bEozRo!=)RNb z=1I?t95l?(CXEK+6JhNTNaDGb-YUbhG~4#NY@thhV1u_9asI?3UCPw5m5D|=I_-iX zv{(;epiEMNU$HrXZJ9-jX|`SArOV%P*rH+ywWD>mStxKOR+}&5e+6F~r_@!T5(!U$ z&86+eQ~$}?U_Bur&JeNbFx724INN_9xzD+d{3$cbN`7_5U>-wZADyqf34cO}@z5+N zx5=-ZQe{V0N_L=`tAa*dhf(}7G`Nn*<3==aoWMH zEq7zW+QxeG+on(7dbb@vgU87CI@eqh|Z9@hpX0(k?yu?Nd2AtQW=eE(ZW0t35CSR+~RzGI5 zv^`4a&~3{tC$8u(oK;8h#)0g$m0`dE(}^Xc_;T)(JOZjpZget_N@6WwfVMz z?)royn$dpo7k;+A{E3%aJKAmM$Ghk(He36RFJM*taZ{k>xj#&0_nxA8Di?5fp@A77<(FO%HA`Q6FY_Z1Q|Xa#SFoN9 zUB|<3?GrQvhA__V@;6V=)j4fK+AFe$aXsqW?|b>V9XNZrMMsrCqiI&Y`P3H5mp6de zKEqGWscNS^A-o}3xm}}8&8)K^>Tzyop^6s2dNxAkjoVCpfx9HacSW^s6* ze873H4r!k%Gto{;cnJ@3vdtJ>bu_OFV4h>jeZmFZw&$!TPXrUM_yx9c+xp~}2&%0b ziZzD^zVNvnE0 zli%~NZ}it>LOr=3VEnmh-dt6Fn3MDNr!Fc&A-tQP^Qks*`}P|9Rp^Hs_SMFn=_|Tv z0UCnWC-k6IyI(NyD>f4u#%>}PD&%-bHfT$0gB2(^FoOk`G@0+DddV}q0JoYOeCkC% zq$z3CuXLjNk!mZXhUem~WB8vV?@*|emAsTKeAh>xRzQEC(@JhUv(`h$NxWtLgdoib>Q!c0S zFYRO1c2Xltv5_CUWV&*Q??{@IrA+l5-gx%)waO+>EJ9;#-j_e}qU;W2LjhpaKbvAW z14g9Wp^V4?OXQ?Aa?5L!)j4^lyeP+F#29)Oh9RV#O5?>S_o^D{!`}`oR|a0f@8h&ydK&9>WH-6-TmaTjKibD&>Ch#qjqWypXsH$_u9+1 z@3)hu2kq(mkK5_q9=^QK`M0*UvecHLc^RHfunj)+Cq>qJfjU$>gn!mcd0=lZfji|` zxxL-?E>7D&`pw^HXYg&~)^>aN%ByXCWi^WqTa&f6xw_Oo|Ihxg{rHc4y4`<)afABd zQ$X;~r-l8D*D|)&Z(g6A?8U#=aLm&#<-NSsE-MS4P?B$5SX^^X!{HqLG*13=DB#pk zZhP&i_*x((h1t1FCE;e=?0&Rye}ja7%81M>zQVY+d;|B=m-5vMqr~Y2XKD3v7Dl=1 zkKu#m=bX!H_p>mrua%!L@O@Mej$>Bee*fHI80VY8d>$ix+;c=8ABKZ-F2}uLT;HI- z`*9UI&RMn*PngB^`hJM~o;&AqmWeQ~|CcG>)?Yz*o3rO?lZUP0+(-t zV=Qs$TXM2+uDub*CGF;z{NTo(FD*MJqK~%&!0+>|eHZ74ZDnn}Ev#?gSDdsX+PgMu znYNovM&R^85`7eX1;^Tsdrl5d!0BQXVFMlY^?RR{_rnzr~ui{?Yq)nkzIdEpVtNC z%)ip+#bq1U&evuyBg7*n>a^|^fz`WWGIK(acJ zIbPWjJjh_K`dzy}=QNdlHm{K`KKZJ?C62W8ukcad%S~*Kaop&8ba>dl{q{TUh&nuY z@M3%Olb=8-&g&Bsl#BoR4skWC} zPF_$8$86DQ;ea7i_?7;wWFsR)Rr3gSc%O9mmNc)q5KAuI0N}Oo{A-6IGv7uQe8pdh z)F<|=O(`97z8hX5kIY4L@Az^yb|72Lb6GvA=g}gC}i}*X1;mA zGQgJwU;in?_FZEh@8O7PN)$xunN_e3w*4&1BNOEgHOZH1f~NrawaQ+H8QxRlRNh0- z!hKzj)R!s>oY4vDfR5N6)E~<^BTqJ8SzmFJmSUyJlQzh#=0QKCT?*uvG=&7^Mf;N; z+l^yLIFj_F31nLoN2kp<9K`0qyVRB7xJCO|G@A|EAJH_V31eVCsivqDwrBYp*(R*pKzi2js=s}l zdG$Y1f6K_>wR}n$z5(FlzeLVQAuQ-U8~Pa{jZFhg9vjQGBV>*AK<2&_0(fo%Cr4F8 zjo=7^X3#FKf(z#ph|48E$=dOQusXI{=*TNH09>K-*mUuKd%NAed$(IYW6;z)5 zry=IaPA8@)Ps0>^IA#_wCRS&qM?ts;w+@=jOL`m;;WP* zLOh-`G0ruHwL-}?3gVqYP?VgE$|SSr4iN)NT3BfYG0h1ma=fb2dNjg9V$6Om#LAr_np2NMDV1(@OhjK zf_4>7FsJl{Bue@)*1>$DFt-CSk}qj-utKU>B*#-`pMNE6>+(xDVR9 zPv1qJ`&rDnef!1aIXZSX>sb~MEzJR!6Gj?QYai0&EmBs`b+ndnEKbOGGCfMi?xBw8 zhZ-tK;T z{GIpOm;d+=+8_Py=iA}lF7&KqC-nmD={K_?9AS>_NP6U&P+=i7YXj27aa#uw4kU&=2!X-zG%fu)m7KxzlfK`(IIh7=r0|63 z6M1Y8rg4n6Gz;nyV8+`fDwKUNjDAf#1C62hUHFJKhU=n1xbOg-wX6YC>6O=@Z8J?B zQZ5DN&!|hmB@c&#;NVw>NfN@u?9laI{r1oS=C7Ref>kd}tNz8!OxO|n+LRMMd9vMl zT@w;z`+*0aubK4CE9#}~!^JyaX{GP7GMPiiuvy69=lDC4;aggKW1{y zUUVE5#W||XrD{89$PV}mhyLi4{eR*rzWN^G)M?vIvT&ctB=c2V`o!Mmg146^4de3& zmTNzh4HDi-OW+AXOl;yrI1>CJUM5E7JzksC>sUFp4RsZyUQ)?{#-kz`s?+8#od}_= zre7g?dhSNZr8JBA=}B9fUSRXT-7Zc}+t#B8G6!3eX;xfOq_^n?j3C-6T9b2y5vQPJEzr(|$*mgfFFkVZ0a^jOm z{Rt}VBw4fI7~b&%o$7^lJ`20r+Q>ac1b(!uc5YpeFOl(ydV3xdZfHmH(eol-^HjSq zz5Ghy$J@jXJ{2yi5NPF~7&GA5hgg+XyCBJi0o@DDiODq)KoeO)%XT6fY0Za)c@ zdRh4!L0yx#<=-~@%R?M z^#9yC7S9`aq}=+547$0g;;2*J2%!AP4~M~RIZo672aOq5%UAs)`Bm70Pu^!DEA^4z zrh8rbCSl4p6r?~?B~Z!@=`XSskwpflMt+`W%=63*jDnEe5?N9Ntmo$svg4n2?NKQhk4>3 zn}W=asfV8#c=qhDJ$tenyLfP<-;Iq67TPcMo#5-PQt*TATkW-bFShBkgLe4Ljf=-^ znR0Kftphg)+)`Vie7^;}3Lktz(yaluza{BX4`}zw-ZzDwF-D$SowPmbwzRd?zVYsN z+V6b%3vFSt*48(++AFWT+}?fXaogP9&Svw~g%y19C+#c+syZTf(u$SU%pXuD%C6*>vq|lNC${qAxD?eeRkN9yO;iK>m zeBUE{pgtqqr1PV~hrwNsyFvH&!@rzv$nOK^9%i>4{<{1U{arX4Y&-XD7rhb0bQXB14<8IK>Kd{y=Jh>RXyt0}$xVN{Dt#eZ} zwv>L-zSTFoS{>)BeESF%EjDn9AC>+9J6TWZW0hkIYmq8_J$2H5$T!W?9~VFLWVN{l zZMHs5IEh`@Nf)#Mt#TkrV7YT z#oC%YYwd*x_u7LOUu@f3E6D!5ef1mfwO{_v|FibSod<1#-CUm2H(Y32n=9daKE=xs zpOfmkflwbtehr>zp?jStK$&!v0V3+4W7uTN_l}c zl3R2M+R}>2KXiG7$Sb}sG(?uBDc_`~-*@Aux{}3qd@&cp9EF9$`{y|K}1=iU&z}k1JZ~M>o@CDA=-jN#v&e}iy&41SZ17x=Pgo~?>y2%o2ff1@A1nwmp zTEk0&XY&;v=eLS&Zp31I(DAxSE4l}cv=9FB=52LH7>?2x+ZeV%YP-#J-OYN&Msr?X zTg%w(_Wc*z`u1Jg=6PGfR=CEIHxA2N;6e|CQv=K&v`4S24afE+h|@Da@=(4I5uR3T zj#_N>MvdWmPWx_zeO1+8CS~5&xx>VjEm!_#&%Ba+yF$e!Ryr$b8YK-Awe9lP|pY%(& zM;}MQx3YcaSdcGZPd|Fb+DE zIQO8_lg=9fm(j!#p;0U?T`;IyXqy}B?arOsdF}t!_F9||zi;SZuAYXV({l2Of$lJl z{7?*?OX}7`RVpD@N$_Vd6dJ-@l}rKHOzS!dsCE_@_r=fTcVgvSv3hTL3M^PFpUN(r zp!}#z$!R$PcEeB(rb?&=U_ewF(uc#6MSmNC@idq?MJ}vzty7zZghS-D<@lX^s!u^G z05L;u@;LFHiO>KkIA3j(48V}5zE4hLCSUL3_|uISOzlyZieKqa%@_Frm)oj$cu6pxxca^+5ta?UIKJwYxcSMhEb~h?2s&EK667B5Cla9p!M6@O;S1P0PBfngX*Rj>XT5T&% zpe`=Ar%&E#Pxjs^n9IeswZ4NRIf+y50LKa3T6`=$ka(Ru@7?X3@9EwK=QO|tlZ zuzyTBE;uhO?F<29T}d8cSj zaekGTZO(R5Pr5oV^b-N~#FM;scW&7c+DDiSm*R{o6J>3E%BjgUhl8;UCWspjN1Ua| z3LKQfU360%mZNBF_w`g8X+~TN$$LG{`!1`zSp(S_Q#NgxI!Lw?h}aikr9Yj4BJjTB zo8eUdwRvQu7^~&Nxd%F9i9Ubho_uqdU9iGre zd?lT7_j{j9OOw#Fj6>>x-Ng~>xu|X^Q`%&l68@|gmx$HMK@<3WSkK9NY166QqW`fi z>By>=CkF@Zkn&yVdh+hOZGZnsd;IlpweP(Bc3WRx1IKbZr<|?Zx7zyFPIk|p*-ogJ zpVe?eoPuRX_*WCo(9rti6#0pRgAK0kPQ5L>XAT}vc}RyQ@|n31?XzW%t?=yuglz zf8rpM*O7e?c$9-zpWo3Ql#SHRxL*8Tfgd&;GzG?PvbwA8nT>`{>C5c`n=Z_#jUXTc-7qg{heX1pcHe z`mEj<)BCju(-2-gA56|qmoXLKryu8KZz`VSW-!kP!g=UFm@w{V`Ujt}W4WC1wjanc zx{`8@Gnkhn6KG!d_*t?S9mYfZX<}dvrpuKs%fOp{m6mkUDt(k407BudZvpmOLqNt& z!4zES_6X5W`+MzY5EzJ4|59GRpVgToa09mRW`h=2n%WNws#iW25c;1#915E`MTI_3 z)v8UnY72EZ*e(ByYZ1#+zp>}3>vv@~uLEg#tDj0*=&=3j2a6Bgu7N9lg$R;S+_qzI zS+@EpUA7a_D_)!y!F{q(mIZ@5Mf+o2#X|!avXPUi8TawoDn)2e9gAs{Iu^7p98d%Iqy?hfE*q-Kb3yC`?S4}?SJsntF7U$O`pEs7V&kQ z09pcGpTZ{++@y9vdv^nnI(Lar=Nn_{$++{jHd$@!4_|I;+<*I>ciP);z1_a?^{=%1 z_wTiJpM1;2;u3Jn>5HVBY>*0W`k(N`J_6jn#?`pP1InOm^ZAro@uwE^?Hja1K2n*_?wQ~&Mjgcr^U`fMsdz97a1xoJs$X!rDQYa-tbdfqQBuHv`)RKHxeU#W>j z%X0&Pi-?oe_1woMQl0p+=!UOMhZOQb3$j3CN@oa}@QW;zn?g$@Cv=E<)EE~YoX%xp zDo;I;r}_)>Q6H(XE!LaO1n)Wk@3Qzzq`GWKImB59&!NIL5jY{?tvp0`GPx2kX|Rn6 zYTI(M8NSP3;_5ZGwms@f_ELDy)WULUKiPmLUo9sMfs?qkE#TMA7zQ2EFU^)$iJ5OAq4o60coTm^{=n;+WQB*J59{JM%n4@eN4)tZLmiSX z`%GVzDv8R)@1SPWu47Hwp5ub5zqF{2B|k%C?V0xH1aZ+t8OoelE(xIgjCY-^UPkxfA%*0#0SG7lzOZKX5Fa7^KDg|+T~N*h zA;p#dqU(t6JI)dg`pWN-A#!VhaTRUMaw7+EsHc_yZg8ER97PvuV@NohF;O;Kg;(7_ zhQ9b^lG*tbJ9$xU&5ikkcMiX+t*9K1%g7tk3_X|}4hZDAkQC>zyp6Z~H{3Fs9} zkhv{>f%;Jcm-*30cnodwil!?A+XX)T68`=^_F{f}tNoMT`>poY?zh{<_D=iE4}GTX z?jE#*-Q)K9tFN|O8#~-Trg2T%d*6At{o4QZYwZ{Q($BTY#)hj=ZFO}ePqyk$#XdVW zl?(C&eRuvO;{@bZ{ftL%MBO)+XE{tuZjTX+BXk)yUjv&?r{7-(H+~$mFs=bohQohK z;G9c;zT05{kEra=XE+nl&j!!qC|$auW#GC|{y68-pK*-P>;D|??G4rfN1SgGv39Ggeo6_ucY2bA*voTlFHp`M`gHWzEf=Qzz-Qu|4Lo;*lJOl&oP z`hVKU^yy&FBBJzS2kb2!Z)HBg{t4eo?%40tI9RVm=<0_v59ObIvyk97E(hrnCLaQ) z9S7GUw#hdXMvlr(zWWFNuKL12CS998;*5=$`AgRYy`x`A-#O0aq3-DifmzniT*ANf z%?)|0!L)56lUH7TslD~iyX^~K_)2^4{nPdz{q0|A%lnVpFZ}#pXp>tTZE>>M_8vct z-uUJw{TvtEFHWgjssknXGWCu0dg#Ho0y522Uj)9P$s*pND4|J6b?3b;r z{hqQV;5!P6c9owc?a?V?i$~!VY3gNsGN7|aOWo*Y!$WmuY$mkw`aRB9`2F|HJvq-w zf!YK4kvRqG6>O9-J*K^*dYitoojM#G(te?JnKu6H*>3x<{_DTh{@Y*wyc8!mO?GE{ zt-bUDee$Q?XrKJV$J(9E^|m@$Xe;VswI<4clb<)!$Xwj=dlTSZ*P+xmnGQS0FR zPsvHz{0k3A3BR~7Zq`@N0`1#8`kH~(iop8_T;GPeaw)f+sR;~-Gan_d6;QH4qiBg6 zzH#QPIWit3w}50zuw5F)7}zpnKa{govKA?~w!CTVlVc*NNq%jl z{%Yc&&j)ptj((}H|JP?&V9a;_(Zja7b~`?<5mgn9inIEfjk9F-8yK_ z+KALQoGd0;$IMYePo9QXuVe}NwT7n8+Jd%0oferi`QOK7>09`%`DkGaM5#IE1FY;B zcr%aAVV@fErH?jHW7*=X+_UY8Kek-F(h!=+nL_2gZPfA{d!_a{8&~BUg!8Z*+tU?a zOs{@h%-(mtq^`tuevdkpFJXPEjR!cmfw7)+TPYvlY?9!*+Iq*K6M~}nxd#|vEFjbt zn2;nMv5G_V<|!&o4L>dv5MwCqSRM{^{4FT7XFMu@tt$+;qPyrA@x?O^?n&_t0B`;h z#^6ykGYX4Q2nFWmUIg5CMph&2XXAUr3?+##Bg9qiHAaStD%KQN*lNAC7`OA2haIA-Q6wU~h zKhczLdK8$W_+-M!TlvSe4YHngQ0a(6t5n?V+_Mw!={C%>zBpkX7ypI#wSS={@*Xm? z$Wm(W<-=f0h>=M%y5yg~@VXQ02~&JG=%h zPqcE5Gn~RHL#LE@C&0o(j!bw(5BZ7SfMq6Ff>~#EtvaTVqL_maei)aVesnMcc#W&9 z{#anqq_L?`aig3%s*cYu0&rwPyzX_<3}v*a<9toNuCA_SN9*JF-)+a!`UbR)+?_l3 z+Q#;s=!~y2S0A%8Q)f*XA`jb;4zY_vI@Nx6U*~m=_A)&>Zcm;(qm7{d(BOAtUECLs zijHDD&7;EV|7pd*Mt&AQ5_`0^7Be3^6RKOaLV_i&`S-P zlh01VM~^KJTI?`0Q4CDWrOzaB8=2HXG!>@WPvGO#0p}jaLi9RE5C*j2f zB9ZzZ+AYQSq#3#zYYT%{BD|(!k6q)wxKQ@S`)b$1U4^_xj>uXZF)_xeG?sHY6XL4! z6VUN-IW__%t9qIXj%s*Zii2}ip@Kpqq&|VYy3aK@%G_FJk~(dJ7j;(t<34Jsg8KW!56i4QQLt}9G#=j4M+TFZGUM={uDpXLvB2@i@wvaOjExFT+Fs%|kT5 zFYS^xuYT9yujFSl%)~ADJ#3$rH+06}pm*@nH0vZj zql}m53GmT;S&$&jx)r}nC&D`Fe8*-N&6Xei2TmIxKHFLRgqA0tTq`fgsSy@QJwqB}zQX*ULe#ArXd7wf7DRQaSqoqtt7!x&UGT`!zv&qxXH1S3>o7 zE~vjxz2)2bg`GjQlgh+RkJ}<+kvaN+r{8+3?W}EOW7#Er(bG*P$B^d{dJ= zm0JGoFSLEW-r5ZSZpySj(jI7|+%Vu%wdq#1d(^?SVyySYgKVJU+R5AWx5(Q`o9g#~ zuHqs;Ctzz*-i=G6>`EI?pU>qD7{Ve4GCF3bDesuF9uRSgd;|XMzVB(^mm6ebGR4Gn@O2PXM=NsI#<@Y9>H(QT_*U)GK;rA8eZW zImA%)rfTpmZ&0$|>xbnhfJ+^;UY28d)&EPl*X0*Ef$3Q}WXx-2%_FVoM%BCM&IXdS zU-6bKKwW3j%Ma-M6DRr--yycM>s(Je!#@+&x2dVyu}$Vz$5J-31i5uISP#?VbCB+} z^thN3e}zBavSB^MT@NkzG(T;$=NGLsn13Ag3goJfVU~`4ix0wI>Ox$Cx|up~EtxWa zkO|?G0Zx45v7ta8Sv%tQ{pIV)8UCq@WAJ_d@TeU!o^&Jk<0t#kp%ce;+*|I7Pm0d3 zwiSGjP3n37)}8joi!Zf<$4}eSci(N(gZ;KKzueZA=CjzZ)~sR|mLSf}yDpj8UYGH) zOp~ACDa`eaBA3{e3utmt@tFAO@?tw!x@f=t`Oguz-X6aEa@%=uuYL2&-)eX6J!mi7 zx$kat#?r_6hV%CAwf398@y}txW&4Rg`y=h(=)6sk=j8=H`q6&IrPjmp!tY zHtE=hs2kECe+5$KMhMI0Z|8ji_c0E@8k`WUiyP;XO9|?FxQ5G18(*S&{7vv#xQU-2 z?K65ud@uI{hz9;^N(oOozUZ$P{P-WcV4U<~8`OOlTN#VwQ%%@}#G}jF+KWSzs;~QBX=jX6PWEYE*sL1sxVHfR3H#w&XS74pQ|FV;Grx<) z!p_k)eR34Lq%HD|1Nq$2oPLfr)lH0k-ft;yVaf(Z@Mv$Ho6?WaX81|X*zWiO&<^{Z zn-b1=AEv0!(UqOI??{~pE<{^rZMSn+zS3I$X(QyHu>NT`>yRUqVH-|6<(j|rQ(e5> zY!k}%YEx{M-p2-aJFJZ%Ip!dJkbmhfoqx3qaaW%S1y^v$F$ekM zQf3e0asERd8cy+RrE?K^PBtguqZ>1B-`Z+BcW&Xsowpb83;*bgUuyr+|LL!_Ta(T9 z@ZNemK02ft&d$iaHD5@ zPuhrcguY?cr}CxI_>6tP=lqECvTl%7ch6|wPo6xb&0QehllFVR|3~ew|MK5xfApPa z!Rzw!t@YJ*Z+or1`tW}HkstU(d*#uK|6khv{MokTy6?pDj^~^^zPW~1Gl+=*Nl|Lr zYPZys)asTUlGQES-Qf;9+~KhCKlulTBkTyb+pU&liK0l65Wztt2vPtzVIn{jG1Odd zyz}hO=esh`z3){O0JXz;?>>9)99FI=SFX&RJ9pc5w_1p!3kMTnm#$g|wQvii=t&l! zCc4_xk`D*5KP6ies%_>{`a_%7wqefdgFNZcj{AyTX(i-CP98mrWnvpt7_NmY=|Djj zM_KccLmjehnHt}k{E4^h$ZtvE5v9$94q+4+C`BJeel&wJA;c7ZT^QlAN9VVS9^qNe zqpm5NQ1UbGl(rFHR2(2c^0O6t9ztsagii_Kg@0zSy>;FbE8OrKIg6kA6P$a#;XgZn zM#Yhp05gz8CK-1fUA%yWOG1 %Y_?m^-zCi#7*E^!=0=;%)QOt+m+#V$ z2W^DRBP7V53gV&c!iL1dDHlXb4}9b|xJXse#BWZf^PdxboN$Pf^?C3Q?N|zLi09~a zTQsg)mo=@NNZW_iK&Nv-S&-*EGBCJzd|cP$^})ZNJQODR$AVyS(*Lhc!MeCh9R;(l z3&+C3z;*JgW7bho2{st?+#|H5?UIvBm*WSotyLO|cNrT|X3?_nP4bgRo`gq&CJ#jd zM>6Dd;F+)DT|iWCyK`-M^v8$iv3=yJUFNEsc0cWc`oz<*hYz-cpXI#j` z$PnIuB|`Cq|=DviE*Kw!fA*7zTg;N`C^+=b@96B<*+Q#Il>1n zz@_r^IN=(EkqiYhW5e(sK1dfyX`5!TuLg)Uh^y^O++iVdsr+UebqKTcK)wQ7=9r!z z@v1z@U)3AlT>8bI`PApA=&lJ4GOLK^fe3&_K!%j|C!a>ixcUxMQJaP% zk)b8h>!`7D?fWa>q%LS@gd zByb+EeNbYDQVq&P&+?ALx{C30U8*twOl}|I5Fe$MYdF@>=M_Sxj50OFa$Z|joMPzRmzO8aZPNDl4%*JnV+yyI7I(7p7yW_NSJsh zj&)4b_1Z?MxH)+3Ic-I!iTtUDJoT1s$<6khH_4G--eeO7`79TwMVh-Z#L=@e=RT{` zY=b~Z+dUur7B@R0dlzMh9wJM53kyQ{LCT`chkQ7BYdQ9(x#!Q-;u_ zjz|A-;Kt-%CSzkSh^I}ay=k+;ovND}LR;l0C0h~6BcWA3izmz6Hh55X=mkbHq!cF| zRPKojofeZ;naK^*6ijZ@NeeS`jq9hYsyNG(GbhlJeYL~lNgT!wD}BepYSL?BKzNkr5(ov0IIKlrc1-$V zTNU16?5uQ$KFa_@-AR7_FSKKnv3(Y9g7B+4Ef4{A6Pg~o+pM@lT<8lckoZPUziDz(# zLIfOYX-o}v;&>(p>9RJNo(e|I4}RQTB>A~UepyuFAq{5ozu4MCT||exhK7psOhyMx z>Sddd2Hdc(bSK)E;#}9oJd}=`62!Tvwo3%Z6$JWy73Yl7r(7cH$aQ?ox-J{dou`5K z{NOivJW~(pDgSi+qG0Iv1#(1=Q1B{f*T#LFr7hKm-yBtGxmniXG0Nn+Lg6=#9yvnC zS0AJ77<9)Mt#3;^Zm?q4W_m)F@Q@(ZeOZFS~`J&E9r7h7W+u<>`#VMAhcJlC1JGuY3ZEQc&7LI7w zv>o4=bNg$3*nEeYHouTb?X=?DXQC9|_=equTb`aRw%OKZyYZQyX{(PPw{LyxkJ6{? z?d;?eP|~D-XFCj?^dEtPlPH!-WV2 zebXZ!9$-7QqjKIpPx%E;@Q6&1U-W=K_0#k`$ahj-ev(DpV{^F|p7e}K|C1wi2`Ms0 z^iy&6`ROY}kHTpy!UiYRFgjCrxvqOM`)hvS`Z{V#z{h`bEI1U4N{q0Ex4c=UEtG9RD_<6s{~`f1yb| z#ED z0)<1`yPdIKCkvH#JrM6aAYT2`@ADhF!(XrtAHsxod2RhY-g531gyux<8VD!b^FX54 zS3ZSx4EzdT+s~XIbkhFSBqeRwPk&q~BN5V~){I5!`J5VUqHuKljZDw*+bFZNNI%Zm z^c3bgI!f6UaKof!Y_H(sM~J&Q$!E}i5nk@$(U?e*r+E>8+sFf3qs+`_J5w3-N!+?W zZ5+Ckk8fb=gN791+}Vw4NngstnqzmQf~LKLoI-yZDXM`Od(_j?cUYuzHBe!(%? z6u7JC=Em~0y?W=l_RRK;cCxeI-hbn*_VD1Hc4M~HuFbFy)HMO8>8h5JYue|k?MSlm zy%1C2*#_)0&Xoa?7up=+7iP0|hJD#5-=Dqydi%lq?=mjF-d=e1Wqkd|jByX!tFOF* zt=eqI_~D0phqQP3dC~6Od%u0-8?U#I|I{n(6Q6t)bHuoJ|8ZpH!^Z4Rf*&67z(D=E zW$IdR=P19hD2wIXr{4!4N5U-sJho_)sEqNd%WinLYnU7M|E+-aZV zVgr2;zd5fRbQ^acw%S*?u+X+D4Ck-q#drXmmjd*4({Gg>b1Yc%So(_2VR3!xcwb#o zf3%&|&&b~FY7icB?*5Z~mx~x=#e8S@uiTVgI5kRQ~}y&*DzH1W8xnYad;{6VT$PtzctU zZ*DU7JZY!9$mH~-J$LJd)Ud}%rL6GQ1jp^*$yr7xk*`BtSIo$AlRJ3n#bPDjqieEU1!YyaZ6zSccaJR|`a+Eh6#PWP@^qu^L(9X*58HqDkN-dImwxxFZO<-^Fr)uu zc+hTN-)gVEaJ&7&U;Eki+KVq_akI~JGA40;gM3-^fbUuDrrLA(5L~5CUX-6EU(W4E z@fQ4qURc%R$2d)W5KjKgnjcbVZ{`T!!Yl2kdSe{C&$-%W$~dN!`RHugRp+Vmz|~ac zD|JK%xdcG9Cyu}(cosKBl%LA$+)sQ5LH|c}5l5stwuZtShvlx9Xb4plb6Yr$ zZ%K6S!(DK3Cx|S1;m<`TjyZ!rWheN2+qZ7D>(4yL*k%&nYRcHvmv>DU2g`*=9ugos zGRCO;0m~f(4dR}@DCtU;dua{zl|N;bmuC^qfR3?p8dnO!m=Kcs(~i0j13y;=;*h@! z;qt-*GR>R)@S@)Kx4$A@UE}OaJ9&2MVqJJ>dZ18m+OhiE|CCnkz^V=sV;=FLys9#A zT5#v6=%6}48Ss|;b8{Q4E3;$ zWhbxdBJ_fs%{z|EFwZCiu+l*hBV+tAzflmuUp@HOp)#FR5L#JX$T$A4U)yS1*VnTH zfD<|n#(i^n!M6%9hBCql1o02O0M=>Bo7Uj~I4rou+hC}{GY87=Mz-UzBW{0hW0095=NklI+0{h#sZZ29O>+|IMx#-Fb{_s zWYFmOr?YD`^;c7(IQe3vY%H-_-j}{ug>)HPCKW>pF2>uqlfq5uAmzFBNY|Jfu5mzg z3^E~CVanl zUdbU$Gpyd z&b{e=9W#SHZ_5r`Qq6&03vaIX9gHFM;j|Rrsvcr;FK3d+He9iRSfe3feEMMDi6XZ;wrkzj>7_;GcZWufaP$~6#gMj?MD`Z$OrNrqZdHI z_CvFS3v}_;?c43zjT`N`=bmY|Zr<$h7Tez5aogS7<9OU2?>vrt-6g~C7G_tAJdl?* z^aF6}PdZW&kuDr4CpfH2-88?eW!?78W5)#m`+M0w&Nd2wwmg}(XI^@~J@?YfZMJ9y(|7|6W(BYmWe{c+MenV86N z38#YX-6Zw66IThGB|rW<7ngjkpt%`wb*emC3POWjegd`d*!6J2(Qdi0?eo#^lZIBi}yc4C}Y)`_D%o*v@U&=1_$+GyYZ z)9+@Ib`iUMroH#Oxny6O*7s2tv5}fg7f!m!GCMd~tMC}eYjBPaj@seQPFsi0TQ_dE z=jqq{yv>7$kFt2k#i63HHeJsG&iG3DS*uR6DXXP+WMu^&<~0=@^*g0D-#v%q$V<5Vx&Y1Q<)m!Ez#>=>KDrZ_ttUZv>XW_#x8@0b zPEwIqJ+}`sRsUwZiDWRJau2&L6O|~Lot+w4wDCh7^oyj)YvE;gi0Gi%>~kzyxLU{J zvNEahX?UJWQfbG$`cj*fNuI>TEovk8R-ccEoG2LE;k8T&eg!iJ@wGT!+-vp@fvDF^Gf4@C?|6V&i+HD(*`!;<5g6!8^ctg8@!ul>a^=X?HVZ1v6 zNJn@lb4mAMmj10<4licm>|#5l^n1tq?Mq+(dULaa?Hf1St-E*I55D(i+rD|defkrh zZb$prA0NWqhc5JI}WvdH9zCaG{+}L8SiT4$(<_D=fo(KB_!@GE zpY)d;_HoYBW_;?yG;&yO@U|X`l-;(z^ryY7the6|QAj5~HGk=^v9GD$f6^^xy#DAi za4ce6eU1m*x~7V^_d;L$p)^6O~Zr8}SyX#H@<(GrmWwj;Xax88oI{n{6Pr~Tfy-b7zZrq0>D{l=T^=YRfZ+rx(s+b(5}?N`8Qy0#Xd z!^Ncfq~Rib&rUwz8NSL(^;yBtYZhKL2boWG5u`yJZGVa+Ldh@idS6}(7=;f`!g&51dtW>)PgeySBdCZf>l%pZWC1+F$$GpKdpAY`0Z>*t2umB)oE$ zj(S77+FBjzuzIa*oi9NlxF7h7YUb`h$|H+j5=i8~)LXrm4@A(8tF@R`?V;|7CAk&= zcneR3KlrI^YFiM#Q!pn{3su-+JL+$J*h=fwrllB85+{=9or~J+l+7->t*L$ai#y$ zEs<}5CyzKT;Gc1Y2R09VnCq`Lnz+=_#}CpJ8uIWLKMq~dS2DP$)E(!<-8rY#m07#_ z>@#iq)*bMu1=kylkEfG0xKnM|IjzWt`qTFi3b%2tWB5;zTV2(!<&j4G8)Tcfc^PG) z{JF<}$$Z#&4(2#mn{36A)fKtR^Ta3%%czs^fMdZo`@zL?Edro_$g_E1*hh0%&hcJw z;!ZiF%@$C~6>K;dxax>8smm+yHA)hM5h@tx8fQ*_qE`S^R@o+=>g2EYc^(c@p@@8< zRy!+NMpj!!I_aCDqh(hsPxxV z<#A(d?VlWw4^^Iwj-PF7Tt}zL<9IVTONWPl))D0jBB@4v5{RO#3jluVbHKQphS4z@ z4J8%8B>x1KiV$vAI%ptFi=ib1fyz^%U}zAz=y3+G$u*Eq8p!1yCDPD@d6JER#CXNN zcuj+I<+hID=GI!fb^Au!+^p|Y+Tl)o3zTb(w_EG2^lwj!lE$&>>UimCfL$?;gScX@ zjCR1TWFSk%5noIVA|1c3U`7xf#;RdPjI#lu`WNHc?@hem-Yge5)CGm#oH$dya7}d8 z7tJ@h8iqKgk)5iL8jErw>aAxQjssNA=GCCuk%SHp_+wRh7)o7fbW~@JDqrO4gBAXj zcaSLT6PJls^I&}3rJ)RIbq*Ram1Xc+z9zp$FTCK=PF^{s;Q%|?EMo=!blnga0VbjI zWHkLWAN%K=rmz zM%`fkt4A;ARZ#8iq@qY0aP5FNp5bY_I7k!uNXDT+n%N?lXgU2C(*M#EHV=Z z9q@9f)hQj6!c!+c-%dmC94OenqIVkb@UskcIpDPt&U(t}u-@*NqsPV@XUCC_03H}2 zoHFq0bvev6aasAfKq_8viLG$pH^dPcP*!;h!vUE*)ETwjO9QJO0|#$EPM0u);H7@s z*2?IspJi8~Z{Z?}r0C+LW|wOXB27Ia!NuJ`BXJIf<_= zBh_-;1h(}Wj4Ir6TX4x8I|$Fye$Au)0yjcjt2c<$CoWy^%8V_hBnYsXFdGufhDb33V!Gj^w3e z^o;U;L#zgy(mCoK$P|v1UX$oR0{4=&ZG`bE^-8bsO|SNr#UF%}6=i~S4gBoi!xs2v zxnZZ&6(0r_(+QH?(a9Fwls(l>MX#+F`n{%X@Z#G3P1rhd+K@QRl;fw)1k;7#S)1;T zD{jx`WV7G#T@mFYM>I^Wg0xDKHotbfREgiDYEmZ7fAT6KOfT*c{f0U~+B zScCD^4nGP;@bl>HH#QqV#cubqiRLJMKx&Lm_2OL&?#!;^Ja`c?f+dUj@cGE^Jy zjjo5vug=vu_Kb7Yk6k!3f%sHu9X;LQ54!5#0&OX@Nekd(DWO4&FZ~%!Ni4dret48G zrQPXBBy{*%F!LgV_`<=)_ zdxF1~LF95fxc9Jq_Yc0*UVQ1Lc7CweCg}3=!b&?jJgvz*@uV;D;s5F3alV16ua^f+ z(gp03y*+$>`aJx`wav|T_cNbpr+Yi?```b5dwBnG-e}$4+yb6{!cy~v-An@caBy`M zTT%-_^#y%c>nDsz1@{x`00&JU`Stf!;)m7X(}!ge9~(!yexYymFHfL*@EAyWgBbYI z?>NncpT6m%-uRYX>@IZcBdWV&@p3*1z@dEYU)|nSJpn9%bntLON&iusp7N2$X&y4G z6S4dBv(lXfXX*e7mxt;c;-Cymuaz_9k-0vX{Eu%1ZmCiYpmNK||4Y5M?9d;sE&q(lE?WF~uU{g6Q4Z47a`hEiWfI&min@26U5505h2x~LdA0A+N#f7* z21Ln4nuD*9Y!~W|6?1R8#D_=9kXTi!^xrE=phALw{wlQ}!3vJdbXDKhNz<2jF(#CEM z=9l~CEqN|yEaY2*MPvC*UdJCq1_QRo;9>e~&gz4>=6n6x3W~)ymwv{_E*M!_QBTGK z0QqBiag`3o1_jfxk=McyPj%Qg%xmJP{8fE7X-OME?(y3QrGwOi9tJYei?%8LvU?f0 z_^A;W3FVuu(xVJ5&fz&@d~g#_AEqfUZPf|1h)Z-bbungk?u53j9dpv|FuWBI<3;S! z5irEn2mcQqJ!;1X`oN`+vxR9}rOmImY1>{|YoEf_&5+aKgU9W`yKhnFLAySkwH4^u zSe~?*dP5!BIbAkeh!0r66q?=9NZoef2C@YOZOJ)!K|ni$jx%t*cXZTFu?^pT=Z*HQ z``>G8Ti4r9edg2Pw$r}%!?)UJe)?zHOLyy9A8cw02)upRTv>fA5X< z_M7+GfBL`p>+SStq0KfKS0C?ToA>fb7kQ{nT%E2Vd-w}J>XABJL90>i<%wXsF-!WE z9=rr*9V23daeXzehX}G?7m$8jsZ*Fa*Xbj}$d@qS7@j_VROn^U=_j)exL4tSSTNl~ zJ0pM}mw!~!&l)eUeG@LUjP#EPb9%4VLzygKxDq!9^W*R_og?|L);+GT#(h{YeH{P4 z)bIIfUE}&P&a2#A$u{N%o($gGH`sS5SMGy`){H}=i>->UK-+Zj`GUJ(U_bbwEztK) z^am-AtvWe4r2QOXFSSJrnUnHOYac$Xt#1RTVLNuquhRy_hMu?8$uu@IJN~F2l$~tM zVgvRM_VWq%WJu3vHeAub2$>#v2 zfqL9cO?xOGYT*EOR-ePZMSgJYJe=?|){g#EQdmXEn}yul|61?xJG8B`F#aVMHBaPN zlG^mq$m`AtK9KcKKgq*SOl2-0H585f#P7@;S1KhH9@qzGhZjv`kPw%`NBEgfb$@O9$z`f`ju0kwkD z0W!4zHO?0^YR=Gp+rCrW?00IeF!az@saTGMm7_XiuW8@ug4p7N3p!5Q&dx5!PIlVy zp_!k~@?q)X(xg3nxYK_9-~4v_XJ7tCdvE`wO(>VQ-a+daTp!bKe(|@z)Lwq^nRe}( zbgv^LeDXCPWX|XVX$Rm@56i%T`qh6j6%F`KB!Q#;0~Z|fc+e&-3CO@j8OjnAI9Igx zFFs-TU?lRFXG)9PG*yD_$WX&0uJIM}AkH}bd|x);M!wbI0t)j1AKAOhvgyj`koGJV z&ZGH;_x*R@YybEE_Mf)j`0Cg4y21$8*Jtha<`RAG?e!Ck&woYtDe^zCQj6u;0 zg%H^Or6^}JkfscB>9r0jJYNOj=o4_Oe%o;63$BjCi=nyqh`SkMK9CE=c2zr&1LjLo<(0e4~AY? zr22g9C4Yh|mz2EnrMD+_qJR~=3hFS?=!5VwSCWMXU}IKw7kzV4n8meq84q!9-O)$! zD6|Wox{SVMw}6g&<^uwYbP)6n`Jf}CIT9}`Eo_{4K&yWlBT-4&4S}15l;q<-e2^#d z&?qwr|B`3f71}bYPmixdyIFWu9jo9f&NX+P`PcXj)~D>GgUms@>>3paG;+x9AC^0# zewhp4pn%;2yBcGY59r*Biuns?$Cp5$2=oq~S(qRwb4h@CjISP?5T3v<^8k#bH<$j# zXMXB)80G8$P@xpB5ETx9R3<|(khv1cZO$TA>B*}irIDJJOm%)LfLf0q@W#CcK_U32 ztR1Bw7__eXz;m^2Z*R8kYnyFzQ-@=cfxgULTD9Y@)r1H#^ zFd-@!fjbzdGOIjg2x?-`ycS5I`n9nDR_Ojh9d%k?ut}p%V+B?j zh7rimf4ERcTs8{+O5w!EhFA`c3KS*CJ&qK{-Q0HMM9v#%J#-Z!l`BtpP?!-ZpL%`b zCjuSa&=R>D3C^JfUWSMKIcaF7u-f2xTuV%TQ*o5IoYY<&7jEHF&Q1RVx4KuC3}((n zP#-dl1xs4*_90G(B~EkF{g+0tqfU7ezEvs#%0rhy9hQe&4!V;|`3svL;f=a&z+B7E zqRsfCExgL%zww6Bqj4QL)Q_2V%YX_4<3zLV(7HTqYu>AqlKw>e(iE80QK`SCTZd;m z4t3Qu5KLQBFSQS8J0@r1k@7HXXiVB@^nhz=m8a^iG9%V{hb+%8YAZMKb8^Xe^`uId zoGUN&i(?!&+AuIvj}w95Z(f5=rrB#yBb~LvRCU$(Qq0ILm?yI*6c0&~u6d|>3J>D>C&okUAiQDMnHZSLXeF@?}s+K6u<+$oAgyTuD2U^ZntNif1zE! zbEi!=H;PH<+upw4G1M)DpBaditkd5@vmDCI|0G#|VuM@`U%eaXQ$rBYL zZf-$Mlee5p_dCB$n-817xnCKwGmQC$0~SG4p2Cy5IC~v^NTro#e-j&HoQ{+C(Km7E zE8d~Oww*1!NJtxY5}E7tYXvT_$Gv2RR)Uj7k#)8}ta=TeqWkDDKUCoW2p;ONQ>8o* z=Fmr1a9Y+QylF0+0*HX*m%2qh7lP>L6n@g9O)aO+ng?&u)TCK=$x&Wvry1lR^WuNi zEzR2eEFb``cxy`>G&s3-`{vEIIK^Q*IcWdIKm6P6lRy1xyV!fw4))$_XUBUfQ|tf+ z^vQ2wmksE$Gdb|hV_H9g2Zw_&JxBCqvaSZo;{Fy;rZFh`TJ_!$^Cw%m`Pn z;|TWp(X9L`w|VF|7zUh_x7@(bR-X8t$0Tn|tH?U1JtX$>tjv@Px)a-RB@7rN6sUq# zM^A7I#@r0utLWMZ08=Ym+g>N7()}s9IiO8q@Fv1WYTnM+JbX1T@oD4y+aILg@>ZYN zd)i$3K@QKvR-OO>Jo+v~*;cXSNgEJvQt%_ezF&)rclMb@Q0zbuEc@YM+gn*(!w%4A zND2N;K1oA+UojD1;pE_PJ3l6lEO;&Li~sarw_DrS+s%zB_2W;iOyg6W(Qi~c1uy&t zzqz}zf<3Wc7h#jME62nG+ugkUJkxA_sXcp*K4$BB+ueB_pH2TS_7@cPj}F_R+nWNy zLHY9PLLS_^#WqB=m4#{OD*Fj9ptr_R*lBr6tP4N&|D;Qx@q0k3zU}W@d`IzfoW$MV z-H&|M*48p1EN_&BZ|1oxfV9*CB?rY$o%Lt(OndkjZ!Oia+LL7dF&7(|~@n z?3r#Krr$GUCEfC%+Dh+_=e(j0VOb7#(y|J+_i1O+B46NM@apY5Y9X(DHqIcdv>iegmJqK^vH#{KILR8X)#Ih? zQ%_LVjz!4poAqfU9gZ^08~@fVe%wok_^hnd3k)@xMm?SX`m!1KgO>D2nX<863byQG z@mE^hCC)eF&1XB0JJ4wg4dfmApIxP-1+kp?={Wr^^<9R_e|-?rPZ>Y%VLMKIr5+sL zfAFw9e7u7XeA4dwK>xvmES$~5d+J@smboZmV|muDPiE~CFTd0-X!8e;9<-hJ?~&)C zZTWBn0&eM3PyE*I3NjH;b>`9oU;dG68gNBUg%30E#J z%a1S;uV%Uul<5;iN{{DZ!25{ePlVt!fIVIBPXfU3RQ+>!Pt)~LVbt^E@_z(=^ZBzM zgYO6AWT4E`3mE5MenR~JedYIo@U;B;`6sTw?Du8(VEtZQRlfx8KM-ZxPlD~R@X}tX za?+VGAmgBH5AMSX?p>@{pL)c`Iu}G6#c&~(^LC=`*gFU;ALdUdle8(@-(s2H+l}c3wtl(&;U9dx{qn#3SMBw0eJ68> zzV+`eiN4I?;s9xIzGJ#N6_Mbf|CC*9RUB})k32fujjYR0!Uv%*%N!k9$_EpU^Ci{K zE2iV4V|3Fw%%gUIPVM*^R1=n`?fY-N+kWNW{C4}-fAA;m{_auRg7$0l0qfwW>;<{H zk`LFl^{ut`#V>!Y{o(I_rQLt;-L|!{N&hm*ywIcFoh;t;Q=Qcz+o%!@_|&PdXB#rVIvLpsD>@=P zd1gB(eDt|p1Y=v3-{BME40ptIK5QR<;o*b5_OCwwh4z2_=fB?G-#etNpXzAs=Cw`6 zH5={O8yoG_7oKUK`NYTD^UvLCGk8D~w(&$CMNoRxJNuZkToVU=6+G}_kBC#3oL?Vp zEV}`iPd^GhbUrM+?{tQyloW5F;>!}37xe(Ge<(}|$Xt$cG=ZE|zU1bD(bNqI^33OA z24R+c5q_QZFD;aDl4y$oBp&4p{a3Nzc?O2_oC!7TsNL?38 zCrF>l9jXfBY5wd`hbu*A;(X!Mt-7an&Vvcm-6%einF%WE&dJFGzzw#0a2gK zS33d1Kl7eA$7{y2y_uKkWwPi=INta9R~V4AlofaSnsa0+na=Nwka0e7%CE-Y^h4BR z+3M?wQpVB-KE9lnx~VbvTDN_H3`(4GQ0J-d%=wm{mp)#!l!d{$4O40XfjPv$LwYaq zv(4Dj)vNLu_yb{KRX*FZaIN3lYQMwor9_{%wGPwG+rADrN8;vk9`ifcY~11o5JR*2A4)=ps)tj#1QD&JTRp4P^B4Y!8!LZ z%Fp2tysm~#KAA$jg(P(lXG16_#+n?=FXUnW*4Ad*+?ut`%{2_h8phAT*$N$&E86SY z35iTBp$yfj*r`?LqJqRA%WRooozOcMhH=m-qYb$xNzs>bimtd4Z+gz+T=eIdr+1ht zR2P8sCG>{wy@sX~qLKiXdWC6R8jCPeKd@0uSIfww;DdrYX{NGeWtR<*!v|MUbn>6} zL&c5LL4|`hJI4B!MWhNqj>wNh@(i539(*cFm0>A!v@_QwzX~PK(zpSKe`VVFl?Fk5 zmQhAID<^O&IGmN&XxRB91o6@jFKvV|_Fhw0It*_{h+L@)n69MK@K95sWgG$@j?-ni zI;Y;?OHA?*Uj<7Z)U5#zZ=yKhm^1hTJ1i_vgo4wlNWI<_T-sh}h9wxKa@L>)xDKws z+MK}$cBK6{U^$_J2lwA^-}#Ggv<=$C*47qI=n8FP+D;BNz@njpg>k`99`B#D!($ve z25vK)(FxAC4>7IFhyFUOeoIZnA3eOE$ywi)P`4yW)a8M1bBXtKIuol50Q?S?gQnTU ztzTEk$H_~mb{iew=+NK+tcMTQSG?{A{0POpLol&$5eMkkk(S0@Xz;pt zH=vAPg4v?K5FQ-5aQ#IF+Q_P7lz%GNMmeRt9gTSui*ifCWtqWK&qJ0KxTpV!b1lzO zkMai|I0!=-mh;5D@P{4ZtX{7&U<@6$LE;ptowcwWXr?dF=Fw;@=0l%TY@u5ZlE|2E zl1cyB#(G;{+h}W>TlrS#+7{P1raHUJQwD96*EH(P_{RQ$4?WPWtPF$`x)claM>DrR zyLB^*3T{4or)}N7**30ix5?Ui$)6u5dQZUF2LWyV(RI~;%yDw$anGxq9b_AfD;ddi zu2Y3FP&PP(u3C0NCb?ZEN8R*@=fC#B0V6IbIc ztgdVS(+0UtpJ_uPzqop&52c{jw!ySX00>W8qn^l1VMpiWX>dyA)CPR&C^i+$Z2e8X z>VNHDN82vjQV&?jOBn}xI(gw6L0W{dyRo;Fh8c9#+u1>H0!D+1BESdjcOC=RI@@aQ^mK-uT_vSO*xGUFJg4x0L(pIIQSvwt9j=*@j zylMV*@X4SLocms`hpTX^%t+@zmN{QJF4HS~_`Mh49EG-C&*GpCbUW9>lV$sTZ1xBN z#((M8)%=DkV-Djl;i_xN8X77TZe4!!!@zm210#rodTw8;tulSIZA)k$Ye}pTnzqVF2Uzjd(j4fAbaqmfykV8O%T{SLDGT;5VVQbyb^8-M(p0UI+X> zoZElzL3`_~f7TA~KWtm;v$lTodc)ajkKTT-t{2G1iNn&gbL{@% z-fmk#2aR{4Ws3i{yf$rjpSufQhwagWhwa|I2kriY2lQX-@%ggQ0UL7$Vd>AbE%ryi z$c{}RP2OpHsn>pyJo4H@AL#PlZd3bQ<1Fv}9_!KJQJ*OH?LHs=Z$xHt@aPcV4jz=> zR1-zYL3=At=dxE$rc=M$_&ZsWMSPL}A~ta`-x1cgv|fYux9lo;C>MO8*KhtNj>56$ z{cwUpdv6f`+QLs@;IXQXIKU^p+w%CR%7owo&O8XV4&YWS=|Kitjz66@V$?CmbzCn} zb}UfIcgx9{J^&f|VAF}9;1++F{P4BLX!^@=gv0EPi=@wTPOjM%i)(%N7kwdK9SlzX zp!<2dUR=4?$AO`t#Rt_k_u3T>*y=y<2fvDk^%kYPa$Ieix@o`cguDHlZvy#n6rg~u za#rrcguLz6L!!afBiGIMYtt5&RcNN~0IcjM0quL0Hjd;nVm^ z>JT_`Ukh(C@=Y6ehq5d*P?i+W;u`7^2Yp$viS5`wIBxg%_OkWA<4qU3K76#(_V)TB z@TEx})GRHmwC%}OyG9%R>}#KB_uqQ2?LD~H&JXdm;m-~1)EaGKlf3JbX%+`8oIo42 zPPCaolP@k-mqr%^*g~{Vp%wW#w&kWRPW>zae1NdW_(I=&9?USGUG-F|SfDd@IcYtQ8CueORytcRRWb*xhaq+^!ar^Bre5w5>fA4R$ z8#mXYORIjK#Rr9%Fqg6L2fn6n;XdDfRbY{pG94k(Bd8SQ^-3Kc@yT8~@Z>SV)$6B1 z6pdq4sc^?3?|wwlCqf$E$SiTf?B{>4AR^-!;p+8|3Zt%$OZR;~a3owghX-7S|DH#_ zYJVR*44iX0?xmIdBg=?R7JwfCvu#! z;`jNn+xxpaZ52Dcn)@Tn!Y+Q&KE4C~6#dC;eJehTedh9H(pK=3d`N2l=;xG#p?^?2 zK-y0&(I+opfAM8VOV2@@)fdtJ*&L+KYp>kh#L0erlC)c3WF81zSfP&)ri%~s#mc`i zz359nu0N$sg?=n=`elHmU&)J69x$i+mg)oT3-o`aL0_Dn` zJ}~%69HGYM)h9@YY`3q;OK#v0n<+fYlV`vzSq4O4+7AfBeDl5neekfZ%oUr_GB9%q zS@?+u`-v=6B}qTPd3P6r>9j8GL;5LouinHIFX$z&yqCr)I38z_0oPOX3VE+zUvHC*Y5UG!zTWuu}WR=aiM z8hyxh>X|mTPp{8m`C^n0J>3Go79Kg?uelff7yI7w1M@~Hv>4*Eped0T_wuD=Xt|P4 ze4rkuUV0vWkq@W!SF4QLXRmEp@=|{uTA?%gPwKqSQ}XP9dZ+7~?Hs;MXKU^Ldk@>M zeE#$8|MpA&rhVtlcLKpM1McRuwVTNLg=em{&wTQ=_Nh;NyxrK^%;$Y{1=Y)1NT+XJ z^`wn(U%Qpn{7bc)I;2aqr7<+yUge3GRGzk$c+W=gFOx{rkE=8TaM0W6ad>KeqrK;U zbRk!G8Q&XQP;Ev&fddQ{vNc#1whD<$=Y93#0Ti30&uTm87T!whSYVS13qs1OQ1S7F z2hDjd6(5ds-u_oFvmZ3?iq#bKQegc;Lf*%q3v-QLsZR9F!Y#cA~Ph;_jW^ z>rcwh4oCgWSdaeF@eJ@pC%XigPdvDfj7=*^^syrsDUXCiTcL2yr(~Reg|={sJj%mb z~Lovd0c@nF_y^lzwQr*oQvvOx#i3()ATxA^-Iex17br(#XlGojHY-|1s zT%6REj9)mrC_L>hG79p!S3iZXE{ap&aPKEwvx`J%R3FSsIsZuyB+~5Lq8hR~avn6w z;~E2YgP7~IZS(U}FaF9Kk1DGUQyRRFLPw>`Ukk|^=@>YYGxyL)8S8frqy7jXQ+VQE z5!Fr#AL|sZY^ixR&mo04U_=oDgnw#)2i{gWui=fvhp&Z?_T3W3X(Ngc>b-GBQy&?O zw?9!sF3y+UT>8bI`S|C^Ql9{zvT_&#st5}b`LDEr+=!D*DqGJ+*r#u|F^H{P|VuI2Qq+gJCSrI7{YoR1qX;jtTI_6 zyuhjs#p`NdFd=6J3{#;&G3TE6C-ThiNwabEkOxu31(7Sk#y|>X)Rdl&@}_voo04d9 zR=7~UIx6DlpzEf}a7mqoXDN0Xm*q(JK|i-q@KYS4%6Hn1jpyFMy{q@@0j1E22LtEG z3WGVXI+?)BWEo-`OiTsKIx(vFbDx5Bqr3teXrt3ie$Q9*$)jYEm`cy}6+U(P%p3k0 z3!Q=u|M)MM1=l=z6Wkc`sIygGyh{>Y_u7NU_tRcBx39I;Y$v^vPS*iVa25b~ow$pDI)nl7Evt1LJh#FUwoY*tV$hLm z2>sR*8kM${x4=S+#)i5<(rt)z7L9Y;SMhX0N@rJtzPG;@r&2mxC6;ghqpL0eu#9!N zs!YelvIEa@rl?2yQ=bDG_-iuP>0BVo`zWi|TpBdgu`z^G+CU6nWW~K5K@2W8WYI*s zKp^%;y2+Hudj3=->k>y{$TM-*U&@L%cu{F3BdaWV6^_OY8&U^&&<=gDC;WOKUGZtp z>Lh=qkMJhS?xi?YCsX;Yq3Wok>NLLEf0q%uR6MOT>ycuhbeIK5I>IgLO_hlru)VH) zhN7#&(^p9oG$%@jpXI7vN=4D@{~q0^I! z9iep^vI>#l0MQlM4t88@LV@tdZQq>8^exG3?MJ+mth2RA+t}P}o7Xopkh|q~Oz+%m zw{P8Pcb<8s-M({&_?xu1t-R&G<#Fv=+oJugZTY}|tt~R(8~w|{(Q#J9x)L^TCupO^ z0cTKs7Gv>|Q_`-@g36SYR#YkJr`mZRE}&};A~O(3n>7^8rbUO1Er9UkQl@n-eX0!d zx#Gj`+;}z_%wY6bU{PgyAaBY%xJGEOZ;#O zM|(18+svOy33W9^d>E%=T4~}^L7r4uc_t2np36MF-&7X}#Y60o$>OU%@gKQ3u!4T! z%14869MD{asccf00cR(JK79~xl?i3V(?j4H@ZxAMXFz8j2N37&=FJ-nn$X?j{r1zJ zeYO2(|I5GA+Ww<M`;?eO5o72v6thVE~?oQ4xe+k;w=0d29M;-&aDZ+u&z~tJewAZN3jF zU*SsKh7aKRROsNI2&sc3ML&@GL7WIfXR?D*%5#<=w~r%x7O0o)FXzrjwQy7^x)kXD=sfwd zQ-HDPQ8zW#Tz;wfoN$&U$WPl(gQlc*l5++@oYPjR-;7*Wmz+wH*UQ3>3xf@n}Vf7uw0=ee$1X0cPr>EiVE8ocfv-S*j!w2sV9zSe52f)PFIRUpte6$n%9Q&XPaGOrFI~Um!)g7S3&0Phc zHgdHd`7b^8&-J}h@10<>@2Q0k^1}&@Jg|jFP7ExsE@o#PAHd6p^8cz&T3Mz{bfo%T z>vO!ORE({+Ykb3MrBBA!=LFpH*M&~<+ex9|$DjR)eQLfRtW3dG|JXX+f>QYzvSL2| zr+#LQx9nfNP?pk}v7-~czzlBi$GVVs?hWd77J-3h#=7c9Fju7*Z^}c;r94HvPkHJP zLiFc~9{Fa>IlhYg3#NdIPGv$JnHit4n?9`>sFw>T6fF_HA&c=H)I9u^4g5)?ct%$( zxj3N@bByR%x%xWC2DY6s`KqEj;VxB{jjzRD?Koy#z@#qmi5?Q)7%LboXDGg*U&>OP z<(Ub&7PktK7ZJ95^<6xT3m?HpKDpS!PeC}&urD=j(BpOZ=6#n*K9|A$keAnKuik>r zGVMWG`{2N|On6g=?Z`J4$HZ&w-LiaE2hHF0E1`5o{H*8H?TLGe!O0nQD4!nSUVqY% z{udbXG+Q)BKqhEI;3>CxlaWH%4Jv;o_jU>WE*zB}Vb~5sn|w7N>CrRl7Jq9UpKF`z z3VCdkV-BGfIhh~c%3tuZu8eQM_vrYn?HwHEt^Y?meoy58Rcq_;dJXuSj4|Cb;gm6;lfAGxpBoU5 znggL=QrJGa~M-cdV%hDG#$wB6nPBfym`d<%6powRq}d9S_m?mO*2`FsCSJ3KhT z&cZL>PPWRC1C@^sjZn$_8a*Pa&_{$HmDWSb554T+Cnz=#LwQO& z>hj+UfZ?O^7u`QTJXQV!{^STx z1oK6Ko;ngfBtB^XwcLlnHhmudp_LPa@uNd>%=ux^R#&-tuyQ5nhha&4SmN$PYCaZ6 zqlNwCo?>nduccX_?YiK{ho)yI$N5aNfU2?NY;b%P>wEaer#`}L^k!`XT4wOrK1ljpthqv8F@awDx(Rixg4OEzVw-Wl z&GiOxJM<}E`TBR;@BRKCwbx(&F22ht`cD4_+&yS{w7+XB;?}uO_0q9>wL0g4d}wP> zzUlM40lqqrKV@YUhde2v^9$XP9|@696-<7wO{#gb;*pBxCnPx0Mm(>e>nC}!>(PTp zS+sD3-*x}tqxSmu-e~{NfAY`UfB&z4tNq2h57O4PnzyfSv|C$S?auaEd-mq__VHI= zX+QJvkF{rS-)`%(dh1`l*;d>-KRYxL=bPU8pJ_)fCJ~9CD}@G-gu0NE2(K$Hd7^t< zf=K?z6FO}qBcK#Ty8wu%jE%M67>ei4Yo&_ZTmBM;un~co_~$Sqb@&&*q>|D;h7X%D zfo(=Rne_5&lE-x0jBPFBd;PG2ArFcVWiRA$L`$WTVA+gEyWNm~bzSuuPO&eh6#Ux3 zDq|li5k}rVM*-%z$RGV5gBilBqDQzoXNwo;zw?mtmIj?Ut1d77OX(92Q4k%^P?C7Z zBQAPzjOWVLUFU>P4!PgVZPxICi}jryrN#h1HIZsA`j6QD;P zl4kN4`&B4fOMq0Vd^8#X1ZZV6AoJN!(4rwpk-ew+Y_Kz6@T zU(Y;-i+MO|2hZZO@B>Sj{RKg3QZ7RB*^>E!4>+6d;eAjPLTKrDCB|D|3BJ#tDciv4 zVkOGk@G;xEl|yI@F@>6d|feoOZpV->EBiOlajf0~rK zpX>$8e9~%LN*88v%k)FIbsz&W$Pam^ZB9EODf9FIKg&TG@8zenuHe-tP zYYuhd{QS+OU;NouJ_kN`ZM0EN7bld`vqr{fA|gx_3IH0nDpcn(P^q=@ zs`Mb`A$XtD*Z};s)9$iOqw>N2bjE-&Z#BE+|3)6ltWN5IV!eIi8wjiE$m-31S0>d2 zoQB)##v*_b>ayR8I`#DIP18Y|9AK zo8G!LCK{JaVv3i76AyW##zvSx2_BrI5Gub4Yn)DmlW-iTKJZsDHNH5-7~Qc2hOp&Z zl+S87;ZB&gL|)gWZUOh;U1My&AlEYU!7AZS(Lt9+px3ShnJSSH$OF?0GOFY>sB ziXf7Ypdm|b!XqZe+k5_$iFKMSd4%CW#QJSFPAh#oq0@qBGx8n|e zkzaim)*>MbeuoGAM`qNuyR+MN_i)xu_uA_EN_+V5LEB`Yb>sR?9MoAm+DHE6w>_jy zg!Y9@Dyt_VJ-E9%~+lnhy>ENBb)@dtaUVPBG&MIwmY;~o+*LHN^ z7TJ^s=T!z-ZnLF6&Zg^>J1p zYfNP`oLI;Mh~lG>c0?LdSHfuJ=HE>Fmu_wTiM)N>-R*sThoowfS55|u=uLf5@ z#(5g;!Fqzem0XrLr0yO!u1h|3Pf4yihfGrkgbGaW>nuBJAT8QJAxT^4fQow}vtxs_ zD7!O14WLb=9n&d0p=^E!X0}*u#CSv9xAuxFtdpc}59;Iu ziUZr0+p=#z&0Og`aUWCQUj_C)>J!)ldJ8F?b6XEnT*+*Entej zPsj-zJcM(>U^;O6ArF;P77T>?YICqTdQxqrr`VT~ze2SK<)&<bZp((Jzq0$S6?ZsPAAlUv(p5}FBYwWrD1yb5KZCMuMiJk4y zlR_~cG->1PgDY6Cx`XXv090@P7vF*d@-)lR!i4zc@WE}gwL@!bGwdC(E_U0;U%J!& ztN;D~iZ13MWgY|a5KOJdPpvRM6D}9(lyFCT51Gi>lfiy8_Yy;7m>0B0+WGP(qqfK7I-rE!@xP0Ji9O`rG zszbfW^}x~7hZnD)ewcS0^U&*mI*1#5o~J@@d}8tFZ8pahSjv8cp@W9R`@mni#REPB z4U1a`7a8b>rb`%xf~f_o{?F^u_YOaL-|rLG%X?BpMkn5bFwaMQQIHD3A&2x8muI9Zu7j8NM0zO@^QRqDec>UISy4RnEeUS09~r!yj*M7K{;zHnL@lPz|VLMPZ$)3ODo^^jkCD}3!( z*v(CR%*iDAL?Umqz+)$tESCk&76-U}a~_^^7=$C9qwf-ya?0c|@=5=T?#OffR;$#1 z^o<@Qq2yz;s~p#&#d**Vs^B9G^~L)1^~6V*=mU|eqCRi%BRzd7hu7|+aDHBkVuf3} zIlEjWp;e!5tV`Z8g|W$m;>7UHvT z!2Z|zlp-)K>&^|rsv!TganhuW#hbESj{jEp_t zz2hO_Cyk1uC+dua3BzBQ(_mvGjC6URe``JZO7*!7>#KG%@()>>-$`K~YGn*YLAUYF zxCs9f4kf?mV{kjfS3cnELyevNgLeP^wzt}hcIgKz{0{#K@YFW9e0K4T^AFkTA5#53W**3O zy^;6M^~fXJsq+*^$nyYN9~_^yufOsAcJJb-%{Fhe*~WI8Y;CsJzxzge_PJ-RZAd-fs)ErFHD&3FG1e#;Z&4;tc;)f6`A%W*c$feD8a2x3j%_?HB&1Ki8J0Yw+!q zak2~QciPF}L7Pl#*Ra@QCw^^kITS=cR3zg@=rU8jQG(oX?qk0X99PT!sF3MS%YWe`Vq7SPFKzgjvu@q^-04xr%*o( zSI%B)pHk0<9V73L2tP`Ga~ugn-X9h|iht;MrCUc`&chRNKSBK;5rUQVn|F?4*=|}M zXW@_#1BtEh(B^5A&7t9;9cH&TY)j@^%|-e-zRSVx9%*N7I+@ab*4p7gEndz7LGDj| zfOK}yP7lTlC{wCh!oK*H_G-Q>uYGn7N*sNdOrf+r`ya=6PP`wH|AhMVpW{FIcY-~m z&0YE_)nEB?%--%UzL1|TsRd896}J7OyX$EeT_opAEBbCVrgNKgZT9Hj^dbCSJOoe= z1*zM4s%1Y#3(oN;p8e5aB9j<`V%&y9|>Z8W+1!p1tM0B4tdUH<+nLF_Uxc=3mEc#!0u;}tG z%aXqAqR&YlDC(|?@o7dZGufEhCJ$&5WeCy42ihK_bF53OWllIlO z-fRE*cmAM#;aC1;b`Nkt+3h>mvD+KLVQqa2zs3FyeVNQ6Go%rpSiiu9pDQl@ltmmXGpbg zlJ^s2vx00V94pY}qS?jM8Z*fEBlPSTBxS`u#~Tm#+HZdAFWVRY;7{8-KloOA?(TE# z`8#*o*7gSetU9dkAfA5q!v5QS4xNNPcPUU(`Yq~!FZL;|EcPd;(^)_r{fWqOt_SK| zDJ%RjX;79KC&jKPeFE)49baEtg`Yn7rwvp0r0?FfL)-n`FaCb}fBoZs(SH3aUu*9? z*a40uFVe4H$Hl+CzS5q%eyzRq!rk`CpL(^u^3vV*%+1@`xyR2W%qH*#I`xC&BZ8!h zP4utyfz%IoyoxT;<}!E3pVdf@bwyu8i`T|+&sjY*!g;fbR#tM}Xj>L(rbBQ1|Fj3n zM3KF%3w<*I`urhvXFQse;xBTi>fAeC4KB(@AKKXX0PtD9T9?mrXuH;J!Gcv`(2;p` z)6&jOu?}Pn+!`xc2wKD&h^BegR?K0fO;QiKiUmsB9X2lG31GTN29SYk@Dpr)(_fn8 zf6eF18}U}3xKuBNEsfEms#?~CL(wnn1?iS)=h`8Si{mAHBF974DgV)DpCc$&eOt#Z zzEpMVx#!!~*6o%ZgRz&BHT@}QV@&9--p*-@qZMn3hVLK$TwFR&wZmdnvM#$@gGlq4 zsSfi)VxT3Ce{r;ZE%*uG*iiJJHYVnIz>f^AOTAK_hRjW*4UyMv{>itvqAtKwbs&=> zE-p5I!SO{M$JIXkcaG9JYk_Ya#+`|rdImr9VB2z@K{B$F9_trP4J~o%iD~(KpmWIL zrEQQ_2RdC(PESi7>UmfqzhFpk%-B+$s>6j*&L`!iH7Zm6#FCa!xDa6%A&$`<^v${Q z`n;~=nZmo?mZz+^OR?}|S9H7NrLG1xNZE3&L)^@(?pB)&s`F6ox#AOoPt`5&(w4c( z=gd+^;S#+$M+c=Let~#a8+H7Y^5iqWWz~IgwOv`i9LV@loO4Jk%ptxl`T~MJLd_dv z&C^!M79M$Dw1}HDO0y!g9qKO)8&EuR0YZJ2m4C`R=cV2zOXrEOjnJ;e9&qcNiido( zuCgILuLmJ&X}KJhYVH8=S{4A9V&o{O7^MEmzcS*L$2$dOHts!ZFnXB75d%1)X_qlE za6uDxkl#*9r_FEaxw6zw*tv)ejKcQTdfUFa**4vtdBe9#fD%WG&Ue*GemYGIhHc(K zZYG=rz!9N?gO1vuZ5$=&s2}$TDRhf%m{N>NLpQz026+?tpXaSa^CaDv+!WlzB{TP( zAC)(kRgXrr01`NR+a1M9w@kqs%ICcXHIo_A!o6vJ&(C<@YSh?=#tvi1BZ2&pqlyBb zZ1x6!hit|~Y#754{5^_HGZc(@gkXh`(%$91ih1vKG|1|~3&cu&3T|MTi%8?W_YA$> zXKO1uAOGSA_j=>(1|L%c?bGTa(89h7D90gt>jWhcX*+gOX1->q`er6z5iU|nG#c96l? z+)*H(0}lsPr%omV$3d8F&o`(;2Xtr9MJF!S%BofY+c8!rAw9N1o2c!zI&zCDCwbFZ z!q5B-=kT-AG;|4_H|mNqiK8HQp;hDPglOqQhlQcAY;+@a;t0hC30fycz0qDkCv+KX zLx$KYs;@SvPLzDcgkY5|xaN^(M$3cZA2~XmX&1n&w}U97&GMgQVT3;(IC9ZTl_H_) z5Uxi-e=-y<6}%p{&a!21DZ}A8u+&!x3#>ttok-^~q3XPZYq-3>#NiSq*A%~6CiV97 zi1*sqS~=*I_?3MVrNmTp9crySQFr5ss0(iOM^~+xkiArFXKiV#yp)ylryr8nggkig zl04BJ^!}>7&dNbzB^g!*#qLk z-)!qeUEu&YmF?u35FO)Q$IhKJq_y;}ba%yeN_pX=S#tI|yoUzaTMy94Z=3Lbo~PF|MjxKQi$x9XXQk%RPR-~&7dN#?Wuf^o?!^Mud+Ph04nIvGw>wIJ!h0bd7PmzLF+{kAss(=BKC zKWFWQyEoeZ?!W$r0N!uMhwos|cC!Vz6Kd+RlLgijXS_S3wE!fT%Y>?jT1)QaFq=9N zR`C;_K~oaB<|L<{M0A8pcu$=5lj@?MRSLtnAECqR@=Hj)SI#3Sv#aHwPQMydq7MsK z>-irQo`jofJUYIYskXD65e!tw6z4k;+qr0@J8ig5Ks8A}wl;iS6AMo4DSd zOElG%3@!}FC${jlo!nT}7h0aC9opBpsM5tbuC+Ns-fP&6^;_HRyMO$(_PsyV{~ePVl{)T=LBxx5m{@=+n5mUYu5Fw_>=;xJkSD@WJDDbZ~$_ zw$fgE<+XP6=IwURZ*L#&f$vf~@hw*R8FzBpdHA^P9qhIB31h7FHDEgK!dCg7;tuM2JINE{L%k1XK3VLv<^3`X0E}Ji1bLnOx)pRE& z?O*I~eITAmK7mkA^)J=86#hUV-bGz0b1EN9r%%==Cr*A?&Z@-UvZ7(#H5LUf=(xqL z{`OgOq3k7o$}0XY$EmtS{pvWR7i}xZ0ll-&)Rz@6-yn}KiSLv?iZp5^pY4Dk{@~|s z5Js5Cw=MF9II<7#s4u~`A#IegoW4nXWC`NP_RHWTUwqi19K06K6jw&%D?d%X8CSH; zIjd$^hvU04E`hb>)Aps#g zv9TU$QNN_8^uLu>7VtCH@@?FyZ_!h?IvkA);HP)HgS_Yi%!a62aXO8+A+3`YGW_m4Z!(P z?T0qVZKeGLfI25nrAK=byBM3tcoMviD0hV3|JnDy-`+U`Uo7Go`R~5^YCAq~7sLzf z;K$mF&%e^%{L6RpFmii+v#qaApej96ZEa(z{o23zz4i;g z@U!jCt!w!8C*Z?)A-G|CC$-QbV>~eGqUk|K6RyN9&;jI6T{1y?kPPH4#i$S=_2BiB zfd4(ihm=htxN;<1ssD*kxYVR~p%QfF<$AD@erR|q-$#Z~e}|L)-0(Eq0q?1EX2o;S z&ja4m^$&QX9?wtbxf*^{T7@pve)n|f^`yQ3sPHtMBU~vbM{3L1m5k?rKiSy8H<;!A^yHZKd6eBQGY5F#__y%e$3E#hcx^wZz4O|B z)bX7Cq6?mk%dSNjo}FE68Mev=M?S2s1=>FF*DjVVP2a4|l6uEj;-xJ@p3ZsM?@7C5 zq{AJ1(!QxzwrR5*a9PIk(j*^)r|^?bNnjX^mw)<>72t1VQF4qAs;w{|b);W1kX+G4 zcYIH6C->T_8vk?VkDerXBHK~8^UIntK1Zf;*+WBLXlG`As*H6A7yjV?*dYWa_j#)o zp5|d>9(Fkv)vqdf_$i(AY4GFv^=s`DpLnf(`ct22cb>V!{aRaFId8XS7j28P{o-%_ z{@2?-`Inz>fB2;@z^IG9d&3#F{E+iWJ2-MDywc6XlllaebKowxJ*1!Ua|2)h`k%Ld z^=rS;{`oI|q5b;rf3to2hwrx&@L311t(6()i*{pmxox9Q+bhJOn-l1nBHz5_?}AME zqrSrfyoK;h|IrspTmYd@ z?d>1F8{JW573Jf{yKQg(ue?Z5xkf7|}- zdvCQv$emKpWV+O@Z>_g$>y!5UjjeX)=5~AOxjXdxFSQq*zmrcM%%<={<-sQV@L#%? z;jIf%FZ~$jsNt7wj`Z5;lz1QH%U@AOZ|rv}zq;d5KH`W|2A=a=AhJmm!Lecah~)=W z|6mD=se8%Kz3G+aoh++!0KwzJ!Bp~7Zsb?KnJbc?eU2&*V69L1{sF7zjG(W>x4g0& zKCQupN9G;7;98gMFm^UJN4(`x@lX^BPT?btWR2YlKWMX6hq%@pv-dt{JHzk8k}-a& z-Lf5n4L&5dyp4|NX{qz#ZvD{f%Yo4CwYDI2>${Rqc{wH#58-6Kl5Fani!9@#l53Kk zU)J%lo^RJ~-^t?AMR4&GA6ZaIY56K%!ne&k*JYK`LZlbD7mwg#yg1CSi$^0pN664F zd+#;rfm>t1@v^IS1kAbc5Qt}pBCoc*7JG6m9+QTBTa=&h(3g<%-G9s5*K}Aq%Pvq1 z{}rU^_5&P|AJEZ7M;t1!i*e(!TP?Dv_AMrIk8^&+8#!^QZph~ZrD1on&AI-#g%24On>jGmb<^jzv?=^S!5xr)?$Oz^9^ ze9k@cUk3CpZH6oIX%A8X@!kt}T&vqD>mpX}jjOSvGEc!?iS=-8o#~Ic6qoSE@gMxP ztXV9Jy~<00A}?P!YgR{E;avC$9L|%6BNY^lfls{rGrjsL>#VXwrF_{OWeV)%I}}|J9+994*m)! z)8rj#;b)Z#&XJ_@8ZacfJAo!AnAfyYx*>zGH2}F#r-Tcg>1aq+wez+WjVxu&8aVJ5 z<|IA12*~o4HFX1D2cZl*IlBRnXQG-s)~mdzES+q4Xcgw6oRdMc0m<~ix=78-Nt5!k zp@EBXGAU-(^1-t>GaA1kI+%NJ3a614cE&n#A=yT7j*$bg@ZYkgk;{&`-fU67HOzFF zE}S7z9avl2%!G}TumjV|io7(!ENlROS6jJ|Ad^u9I}poxFK=zv z(rC<6@voyD+BS=G?t!v1^VB4ge_Q31NQh8{PJxrG_d0n&)981^isMTS&DkYB{ zCh&A>Y>%$qmydoEP9CnTqO0g^9BOd@-QkFM%J!(h7CUtZ=>+Gc1=^S$68EB^*tNs9 zk0KR(s3+`DpU-_uz?IPq$iRt~Lo|4YMr9+7)&Amm(8kkdXkU>MFx=MA2?*&muk{C4 zufYi_(niIv^t5=N@?1xst^z*nj9)>IDT>68q2sQV7Bc$SzRGGSM#>E}g1u)Ut&%!B1qzYhhxPW?Tx<8FM($2ee^$KXUs`WW>* zi5%8pD0x;}B|L4Htx8a-tnQ0u`EQ;9XTbJ8nYgd(v=7p}FE}2fzT#tvPq_UU!Ry}E zuZB6#0;`{sr{hC`YWr1B)o+HYbfWLxQ>1K$?F+pSM6wAZ79gy@8ne(Qw5`&iOh$1o z9JE=`>$UXue#(*t?S!`FV<)X~hyd~OJvCFh;%@3Um?Q^d!;$zIVHrk?ZUu;g= z{`(Kxw}1Pq?fUw9d;X=5x8=iwHd~*7FE&kkfdB5+xfd?5>~&H&)s~s z?LBzd-umwM+tKdhcGF$xuy-rijZMasRFF64eJFFLyx@Vl17;GOcIQrM)E?d`c+bin zqEzH|rfsxMaO*dh7qFe*dFSo+wVk)xp8B?c-CMub?%aKmG3!BFnatWJKlv$qucfyC z_%Jqg!dP{EI^kN|V_O6mbT~Pe1sm{XeFLn(XLsj*`}TLf)&9Xh_`CU(m;60P$K9FI z-JpDH)x}HdbK1H%OUv*l>JBlKUFE0l4PKK)Ey*H41s_#4CJFsoof-EPu7Wn8O%*u~ zWMh$ep3LtU$Kl||^Aq9Hf2qU5hPt26bB*wH`Sic$8Anea_>B9H3|$9Cy~Ed;<1x+# zVRD+|gn+0Yh|t@{2zJ7nLOe@aD4(_udm*JFhs7ApJyUsFT5Zq(;WT&Bg04V z@9BH%7>B{6oUPNie)QzaxRJ33je3WX-}w#-1!wp<(Y*|oIa`LtQqa6M2lklu6871q$F<_asqrIIh0+{s$0Ddaq{QRix@9oA``O<~`ntjFk z_BOU@l0NI`@UR{3>|wWDNQm87T*+<$+1~x&G5P6nVngsvj(xzj3OU$F+PDwBeBs6R zE`Db&;P>+**aQ2$DLxBGg7SG5G>X5rHhlzj+rG8K+J5C-{z>&GKA=tiL)`Ft9LuNw zhJRTc%29rVwoMf=PmQ_k2OQfLuggwf?dz>CTO3<=`Xl`UWpvs9DS&j*{o$PBe)}%_ zS7D{!rCi3-{o#l@sYvCSArCa+Pv*o(Q)Q*w9@{kMS?U3(d?t}P1mn(8s?Q?6+9L9m zj}|t+pRdw~@ojwhWlg@3ygdU zY5(}2{Zjkye)B8sPrmb)?cE*v>Coic{)_Ntscq1nuFodznd=+v#b@udS6+Itz4F2f z?K$M`>bSCey7Dc>FFP3n)cJPl$ZinSVBOkPeM`p) zp2yr)*%0zr4?df?EW6MGIJQgMmph0l`|OA+@wQpuWUi5W%exDT1Zls>4xH{+s@L)| z1W<;$WrMcH@ojuaxj?+vK7iHD*CzWMPJg@RD>-w>bjLZqIF&`y)ax!FcVBshK4BZ% zt?gWBYa6rpq4o#TB%W1AwX?`mBIU7|^|SHy!XEO|#(|Sr4Rv2P(Vl)CIw_BOas&K@ zWAa*uaFq{uLuKmVI&%r?4EgfrKj*ytzeIet`=>6iO-H6<=TsL0Af&){F;wkPDu0D- ze!xj9@a1zpn-kr{u9M~%DG$uSS5&>0YBVsmNLwvs`ailr`jNu*kpG^e`^Y9Pps*+t z-KKKcY18qxvy+RG>3A+C{Om}OxZ=q;ET^2@;b1HT2<&hR-3?4})iM&VQM7*`*yIet zJ#*KA$fl0+Kk8at@ZP(5zi1m4cgk2_+F0yLjp>RQcoBSvx3A(l&!;+m=JlMc(HY{D z6S(-C5enyd!%yiabL?u`u&e?GaQ#cZ4sR^DxIi!EL3G*v16q>a;aV(u+2tBq{R^E^ zY+in7duf)m;XJdpXzYTeiU-0^W%*Yoy*zoMpX!@iH{r|~(_IMQVW%3ca(8fTK>C~+hPr(Q2d_)C67hV2m7 zQd(n7_d2U7H6*5Soh`V6YtiK!uqO=GRm@WidKx3w)p?k&j9aOh*Fh5CRvc;$!RAQ+aXaZyS5BOsDOyLodjj1(itXHNbEF&Y?&c93war+41jut+WACeX^0 z^e(3il0?F?>VXE_^x!Gaaw5(P0~f+g{rLxto=FOlW0XPX<{FkPhyi-r*=?Q}v z-`oZb*v~+a$gG+~m)!cxx9`G#|ENkf^d8*z^-l&DV$<0HLlg>z|jo63Q-Ex8;o)xb}N~%29wu8wdZf$)w3A^}f)FhCuF z9$`9B;hPGvZ$?K8$6V(28^w(ep5rK=p|m1m&OrF^prCugeua-!r#o#l(X^1d5k;`NzoLZ@&n z{R<8Rua!l77vlU2p*@5OX(Y<;L0(|v#8)oc(FrH|p5ofDs#jf7M}#HpF#z%5-Kf*D zWh*JH-+}Y&;>YQ1scmnqw2iH$wzav`{+s{9|E5i@Ew{tRKWK*s5Arb3H|JL14YkTj zX!i|KbpW)et9Aq^r!)OW==F?lh@ge2t7!MOw^hm{^f;8KRI=;k{5I|T43h}m_1X< z`OlB>1+WhD^vfKi#oritEGs?MGeC={Y1{{JIicIB-Po`5*V%w%88aKl!u=9A(?<>?cfov$OQRV5jL& zY5KE3vh4~Z#bcXXbespQwBx+3>OH=KZ$6ryTu=mED+wJ}v zZ?&VhciW4%ZkNvmo=0vot3QX$UCaY%Wy3*w`8?QV{I>Z2;>6jBcYTEQ^%>b0+OBU9 z`JG(wy1sp_-MV$N-Fxrd_UQ5BHU+;m##!e_XPH>reX!FWJb2KitLtsLGRb#zeUPrL zwQl=0zsIkCrJUv0;n99OrVbz2x&V6>Uo-v;>Dt`t!>aGKU(z<|bvh9wQ%~Xb)i;}+ zaC7@!$4lPJ!|V(I9{PJO3Nya?ZTlYag6H&w)?pvw+w?AG(AU!*$HqD-imbAO72NXS zY3V`LJDa(q+-f}x1vB;1t?>QafWb*#?Wp+M?;_-)2RJTz1%aq&{5s`iv*9`q*uh1d z?3c2jQ#!>#Imb3ohIq>#BR}QnTT?=5dKgfB z5U3DnM_l+xBHtjhJ=k_epX_&*<*~egh2fcgtq4W4Jd;m&Qax;=zxIVW;APQSzI1#9 z(tQY1iyc--z3!Ep<8Ra?e2P2?E|SRD7>u$2 z8GNLS<_n!5M*CM+jd%Bj_<`h?ulh*plsJ{%YX`vr`Nj{UOvXm!P5X&_OD=GT>#m>j z0DX$w#k7Qv%w3?eLfg532l&XOmG248Wu``Cm7WV3g8+8#dK zZ*RZ*pxwW}({>&m!Q_RuL76GMn&E?7o2<9jo`1f5>ebiUD)#e-fAQV+;0JHE8FXD= zo3)$ko9%pezpbw>x8uEi#(oFES^lMcbJ$*2Ii|>F;@pmW?0k}^;wq1q(A9f?H#vRZEI~powsk_ZL`TXvOR0h+<}dyYM8f!X|53_CzT`n1V5-2y z7P+dXk$}M~`ww+NnjCL>=0MJsuk<7R7kFeYf&ZKnZw1unS@~NEIp*Gfv*j?|RP?-x z_IQrmcrp*?;L;1{+T&_Sq05jm1wY5rL4W^=@N}LJDf2`a_4j(`@SehB4(F-!JpYIv z@SdV;j)ri1TI9%m_&E;Je&qetBR~Qw_c>p=HbfvlD*TV+-yDah@w=MFA$m{T*EzbR z{KLP_6D|wej`KX9<@C*nqOIsBS*`s*76*{e2b?aH$pa6@(x->JS-|Mqoh$f0R&zw0 z-FjmmJK$FC+LE*5<80-Ba(_Fp@#jk3?sx@&#(C9=EPxjtF7x#~x- z$DDl#m+>opnh#=QujLo~O+QcB@>QIBus@Ns2!nt7H`Bz?{B@Y7osW$nUYURaIB^X= z$|b%=K?w|Dnb*IOQe}HEEzzU{qUIy0PdOsQn*Wt*U>T|t4BXfh&Nbg$(c!c9vTnhU z{+9dT0-fohsLMBcqc)!Tv+s3>(v+iKUkq@;=icEy_IMxuownPzw%eyZ{;~G*t9RQA zFWzX+-&$*TW-IO1?7ZDtJZaBuphL&4{n1xmZ@>JTzu$iA^IvSg`pdu7{`GHusr}mT zeWiWnn}6BfdGAr%#s2HX-Gtsv^llrhHo8ssM z^)%-?j`8hx?PVu~>`;kMaE#s`x(FFPb%)kN{Gr!h|3UlmSO2vA@~{3z`)~h; zUunPg1$I4aw9jkw`pJ&Z&?vNWF-UMV*(=8jL{iL`2=T zEQf6(J`mABC^JTlKA=@U^%sZ#W4-~$OG;Or!`3>$A_&7dfV3a&TgsD%bIR!h>Sx&= zK`r>{$C4&p@z2CbaymU=#xH>%pDDP=BJG2`nMY4v?y)J>p}k6dq=z8zFhNt1hlAiS z>_+HHqY9AltQO|z(-waOqumKy!J*56Ma9Ay3!WWIy1S^?`sID~S}FNP-^!d4c_ zR6dd^>H7JORgd;hvjD~K`^%GjLWR683U>_VCmv4W+uHV4+t}Q0FTVP*cK4-MqqjTg z>BiO;I-@;+H{eHg)S@h$7lckDq+^ax@!Q}%Dg7*JB?EaC+h%d;${c~9>GGjRE;f7 zTQ$V7oP2d>1fNsLQzO(7Bw`6*fjE;)qfo6pz{9FTtK$}6Qg8YO;p^i9 zApW#I0ce;-pWfgiazn|f=j9lErLr-k9_lyVMNDK0o@G}?{)!VMY>D5H&$t@Tb><}y zqvbP)4BUK@C-^&m?Y(?Zw`d!{vaN`RcxLRXHMc$pF@JVbA-(!N&Z&>HIvAc?o;Z3C zqN$f3wM$t`r$Uf63lT!2vZt=xLq}wz48%WqDQb4Ja4tPEf|BIZ_J~*V7-7H&OQj1Z z#7X-hF6t7t_06Tf{Z~HuIR>?NB~ao8Q-_slFu0&V1}XEy1_g#5R%<#HYK%)^MXaZT zB>qthh{iu|8hK@~x-fIK8=b~F9m)0U+im;Wtj*S!+jO0Q*{TygZc}b~7Xs8aK>7AR zh9#5NG)!l+%Ao;nCII9{S7sD6Z^9$g@Cs!trE#(Z223e0uVt*`DgVd*V(#~GBDoVlyA_KL>ocu^#89oag#&vt|tu8iaM9Ofu-0w$ay zI+8ScR}xY8#e$93mF>diplr7G$pbLxilE~8<r+-r`?6b&<}ccxTzjF!_Wft-|6yx0X|e z@`4zvyf}^%El&A9D$eAD0nMrtyeRel!A?76@S=YDZU1H3*@+L~XH%MsldMx5fEu_> zS2o)@4(0*QrYq`vXivcy#r@s==)!3xX?^Gf44nEKIO}^myVSD}{L+KkiVfW0=O&J$ zaQ63bSgF@7)BV2njCO7(7KhCY!&nNEvywn>r?;N&;5vdtX) zOdHd~hNo_2>f7ErHNMfJ{c;8B)d1L^(4DFWJ82~jA z1B`Mq4tqd-zKN@-CsXxghI za!v&{uQE|zy{AfXr0^?et>L(grNRwRQp=Yl|Jyd)w9?5Br-w z#4>5m)T zfr@Mq_n2GTEr04-R?<<{PWlvuET`Z4l$}V)58{wlt@4zI(p9q7hG}PPk7*bA(b*-J zr~+IbD+dS2<`tPy&sYgub`CwW54A0Xm)aL($pA98R)wWr%ZH!fmhS_qkNjD+_*|e@ z`8-E&KjAaQV%s9ClA&_d{>cX?KOruBaY7{eAPhrK zh_mg4^3a@qBs|2vFaUKD#kQ(!ov4k@cD-Jdr|KujtIt!Qn^!$+eapFnyUcIN<42;N3@zSe2-hxnf3{giTGy53Z zE_}|n_{gQ5Rvxt=s~pStJ{}ClPR@4Pt=n7JB;WQsYyZdp=6~I`?p|wc|6w~m+{Mq4 zIqJq~>WUn-1=3)jtsX$X6EVs$ItvdMF0^;*GCWi!{DuJemP>W2qA10g93IIt64V9y zC*>p@fo$Rs(R`^@TI}>$VWYl0(k@t^5o260RMq& zcp(_Cg$v)yrkGxO$q%>I$5~#&lhC4Vk}tLs)4G1AXor7UcR!Pub`kpZhroN@?#4k# z8Llly*07UH>2pFO>GtCu`We#dSw6%c0KeEz+q!(LHc)c6K4A>~H(0N*JW@HA!YKJi z2j+4te}mN%|+z3}tG(9Z*p$j|hGsjbL_VtCYH25#YR+pt`nQ!ymp6KbG6t`exglZ9p^d=~I0;wzatl9DE}DI4zss@7+7}TeyCfV481*PNq~u zpQ0a8Zv*&XRj2KQ@fz9&%}xr+PyN7yeR>X^I`un!K~1phqt;t%v;_TGWO8)u!d3d4 z_zch~`VQRfFq8s8@4LDRhQ7eLL;5x@CXi8;FzX44K&$ zd?N>(8JGB(hYkG>_95|i>1SPp zVS76}R-ws4nz*e{u@Cln;;0S`KfrVj+aI{NBSqRZM|MbqFy*PwQ$Ex}CMl0k_R~k#PC8Dm ztK+sSZC=5k!oZ=m69>&cCP44WUhY=x15{9djq9B}sx+mP3022NB>85Xc1rv`&(><3 zrcJi2Fdd`1^M(sF>Knn@9BBuZ5TQGK;`-pI#$}n9O?z_}lH#2&NvJD6)H;CHJbWRZ zR>WUD^g5;9y#v}gHtQ7LJvumT4;~-4x8Hf(-gxKT_U_}y=%z+zp>3|Nwr?V>v?csIcL+FSJb4~p4?NKY>&0xr z3m>9fG))JsgmIUnZSA?dv!OW#g%-zTes3QeyAR(^fVqcldl$dxJNMpe-#gub<5To` zy`912XP$eOG3g;dF4|9h{9}33ZRgQ$9x%?ZkJIH@yZ_)Z@U=(CQ5)J0(H(G5PRPji zs?AW^>c;PX>5tk!`bYnH#;s?*Asqij-^n%sVJf^)=$3k&emr)$^dXb@>D$f6VMDT? zGOzv!a0HP4PP^urI&v>SvU8F0DI;890F^*$zc{vve{PD#i9F&k&u@7OB+h%$v0Utr zah{D-$E;I@t};&?-LI-MM~J<0#HSFBXYuG~<2|CV9#$`Y)+0Q}3c@pZCa%+2$5nXa z`U?EOEVy;`FL%(QeU@sA5~tQqho`Pk2(gt`BC^6j;84s3e$c}xRz6AJ;@78X?L_| zob%=*X|y-zdXEqHxUv7xHrlReyGN&O|NbNJTWD(&Y{$~9o%-N%|2XZ?&m_4p^N9Y+ zh49)3ZG(&Nmi>l2{iko7+W+nC?YBeQBfRvDV(qv6-ogG(+s8lf8`RG2sN43%*}7lf zhWq?EhTcD92qScyOpKaS;-`@zj}SAb*q>6`+Tw)ajJ+PB}|YhV5D zTkUuL^xN&X{@_pBm%sSs_T9hyZhN@9({|l03t!ybC)|N(k2Z*gwIh7#U3~1F{r&dV z`|r0u|IX{}55Mxw_8Y(RrS>m>Qd-mCDZIixmlMEAI$fRGNxY$*n4>@LG zjq|hcOgmb-?0CV4!tl%X1rPLR?EkXkDtyiy7d+JRcptx$xIAAmUZoXLcq-P8&GLMT zWJ;vH@x#CEt?I0{qHdbQle8jnb=h_S&sSE3ZJWdY%Ve}lYzs%9xpRI||3JIG2<(FI z&L8{0(l@sKpwHN$1$@nU8$rKnehbOd_VYp-?Ta65LiH8SK@~MhPh1_*)?%WiMfa%C ze~?+y2d&_aHiEx=o+Zu&O?em$TEzm}SJCG<$|fJ5Q$BLo*0m*>ljOW&8^GpT&M|{y z*_uoCxq@@?)Gc-Z_kpp_t>YVIY(DA)(RIhFj@3`mOZB~U(s*g$0$mn4V1y=R zQLv?_`Z33kg_k^ZLA%^t^hIVsXwRKsc7I0TjpP;^q(BZHUCvp9i7&125uK82{`Hi6Z!x#05^+Qd?VJY8z`#$gH;Y zEe2_f3Kv&x;3%CP1f0sMr8rlVPoq}}3lSYsn{I@uvWIDrP9YgWZ7^OIj&8e=yvav7 zQ^P28*`NoEl;R+$^R~nh*P~@<$^lbYamYzbRL_$KiX^LaDr$ri0P>caz$L3aRGKu7 zDp&@JGAas+d_?5Q~=>Y(RZ7T_r;)kN znlcKB!DmcT`&}MNgRdv)7!seL189mmpAb$x6MO9^G4vc$oLPu@lfCTqeGOa z&e^Quk*q0~|Kzn6ZXhfjRceVcbM7=nPGTayB+U6uS)xM-04eO3!p?Nq%Dakae-+@^FXWDp^rRiY_tnu zkli++leM>ZfPUBsmC;;ZTc^{WWCBz zrkAntt&MzFOlOX3AO0U7+du40WBW{F+{{%RYEEcwP0@IZRGaH*JCNO5ALt$!8hUtGuD$RUPVC z;ZkQEmz*g-?5LquNcMJ#e0<<>>b0JdwCkl9xE}D!?$&hzm&#C_t)mVo_D4FbN*cm@ z%T;>X(L7XHJNeXu!l#CQFJJ7P)X9Ob=UN~m?gg*IgXx9=FW2@Hz%Z{NzChB7*AiH5 zzHl!c5GJ&7N`C^cVPNuSvc=5E!@k-bMAEO5S6vZP`C1H-rovyl>su_wJCUGHPN08n z!wD}bJ!yacfBuW@#ZSCY5C0Dy=AqB|p{tJ_s4*x6j&{Lz<3PZm?bWFts#~kVgLvT( zB}GO_^s>&;5b;XnhX<1bYo5hxdEt5J6Es_w@d=(S;6dHu;3XGPO&yYievTi&buX@- zcwZ}4f$z28J;*k{w+(xdvzHxSk;_p!lzPmQOq^0t&r5^@P3xMMCkXa3cmq)JNfygP zv;GieG9mf|k9xQY&!0};6ZsTca5Fr49_{iHPCwh;p9o#Qer&i3{}Q)Lc*e^Iem&?X zE1LA+0G&x2^62N3y9}1Arf2-sdbn2B$)19UNE)qY!0_CUE=RZQ7K-&``)**!`b>_9 zLuP=ASGLQtk;zTGT&y^Cw61Nx8pYm^#4c9&366j^(La+9R99I|)KOv=vWmANCQY z*S;wN=Tbk=gNyJvRbPnw^?&ty;{(zj%~3lblLsdj7~M?QEupiPAy`*_Db(8HKPUqZU$$HE^M^{dmiJ)mNDYK)NG2jI)t1<Y@}DDb3t?mTmCfpM+e7!?JKAmU?jE$iJbVy3 z+tRe11NSC2Zh3WzTrb*<+qc@Y&%DqM_Z(*;FG17SIPP3s!Jdf^l{xvDMe_WuOk6-& zGO{hXZLM!Oo?y!#JiOQbXaCWE(Dn`v+uFK|ZF~z%iwoZ5S0&UnHKTf&(h=`+W#dbm zF{XZUM?FEVVsjZ^%BV2I_&O>_;hnhN=VcK=g0kfof28W`@qt00{?KNhX1(5WW4 zl#lb&VaT;*>-Nde@t6a7>O5}6kKeLiMi;OwzN(0)4t_om$5B6*VcT>Gm{J~%uG{_AbGnQ8I@!UxWTHS8nGH3LY%G`x>&)&Y> z?mquaww-_e+4c6qbKC8u+q3q}+G4xCGHbW-ldsJd+ZOt>xpLmt8BcFcTaN9?B7GV6 z$X361m43>dXM73D@wD^T`V_uI5e`c$L7B?t;u|T-k8|-?Nx%>C`k=bXkS+{pjtw`_ zIPdxML1lbvqXQSyO&xCSze@Z%eup~*%t%{94x41#Mt&0%Vnf}=&fR}}*uL|Fx7wfl z`RnZu|Mbt>?|=0h?TcUeTKl~}{zm&<&cFMWueUFL_3PY!vwig&-)&!e{jK)a`w!dh z0c`%fOKZHkXF0rs`km3DKs+-}TP;NO(KeXTur^IE%oW4qnG^K5(Z?z8avPJ8yw z?Y5m~Gpm`iQ2$ntwTs`EX@5}={af@v@%qwKK3k^kfwo+Sb`INudKVq&aj`=l+{$!x zEeq?3GpivF?m1_!$-FX>fAw#SMeTQOYZW|`CIGVA=JmmC*B;SL%EiyZms0Qboy?p+ ziYsUFv0gt5G}?1e?*3ige%61LFT#)pGXy#FmtRH!U>Qw&A>FoFbxOW8ry!-Qv&yF~ zuEv7rgxCjkQpbvLa_oyL0I{6qsU-Fl-12E`3KV$Zi%;E$DU{JpScwY>^#9dM^G5H1 zBg~px?fxajhCPv>w0EwZcg~yB)ImWq0hhKb1uL`Koo@T)t@iS(A8#kLrQL(Q?83Ib zxe@xa_yCjG@*6T1jS zUU2A}%b(00IkwX;H_mY+ej*2>3}Ezl@~JD10rFxTxCA!*QZGHi|K#a<=O6i^5<$7& zTYZJP@_WHO-kng+qv;vmt|4n1e7h=3<#c zhF4ku=RyY?X$GH# zQO;?sa+5-)$H;hg!ic;MOr3zr_x^p02AuLxRq|nqvdiKKc&#T>of-0soZ+bk7W{2M z2Kg98p5Om-obEfS@D;T!4urL=w&D+a}DAk`^gfXnob!jxHK@uTW?c}%tJ>E!iqS&4n8vV zvYd@H3sm5%^{YElERr#n9Zn%#dBn%t%T?j#FCO)j#2V75Q^30B1w%%^A-?l9lZH+S zxjGQTM`xVZqp?bXz&Q{SvPJ#Op3)p>A&^#}(gtJ;9)LWI@nwCy~8 z6ur%aY8Iv_e|4#3>)SeckPStq#eT?Z#E=KRgMab@9F(hXdqrosR{rwcAg%`C7^U5e zp|%50W8qqsatPNhJu47_6&>+U8E`F4r_>*H0+)+NGiAl}F*V{N$FOY76G;bthM)>{#EZNMij-DiG9( zpp0^w5Sr?!d_D6B08E|AjcjHwJ0pHZ0FLY0mWe(A{VA|DF|6)IRJ&e7u>68=?vu@NSMPnHK;na zBz+Na(O(A}KqT zr=06LPI)4|QRecw(w+=G&phvk#91Y16lmi*?MdJ?=cl1xryXC0;pCT0E6jQNL@<5i zsrHxq0gwB6+fIC}WWP5Ied&4TRSaHMyludIbNv}~0b5!x19`+f{wHVtT~KGOA;qNd z#`1*TClS$U6}N&rKP+TyY=ie^*Jg4-l%VuEgq7UAU5S_zscH8MwVM5sF8dSOn}cY4 zEZd@sp54|~Aijy?m?IBEamBP?=fwFLfX9y>U?cqYK7P*{ewS(Pc&5*`bk4aGS*knan7^w$YY}yLsouws>^X-u(Xe@WZyCV+CJLyIVUKxhsorY1?J`*3*P~ zCsP*|KqPW!KyU{qBQQ`&AiBzD!2QS!@*exC>Y9%p`2`;&NgAuzwTa z;DG@vpMhKH-%BT-vMayxI6QSy-f>{{74&D?bE@#csP-=pMB$6U{^f*nr!JTfFL1|u zESYiG#`4KH${!pZk*fVvU%?Bw`n+QA=LWKlfOQk4P8$7=s$rQ^2qmo zVV>|-C-5J?IW$pOCSk;dihMgm7P%XQ^tj`Ui>Ra_TX8B!$nn1T;i6b%BsKPRK$7nG znD91z7WmoE`+!22PQAsBWGp=|94sg9`a8l> zSnhz4Ek=ES18m0-S$qTT+L$cz!%kvQ1Ve@LjoPe!2vm zv7Xwo5N@eEU|_qo4Z9FTo8=p6+JWh0seR`!-flnl^FQ5g-Mqp0b&Ynm4{icc&a`<6 zh36`TZBw|Zgu`E{RaOZ!{`j!!%*bYHZx6ZWIv}11T`yH+^O+%0URT-FG0G>=a}RTP z=x3rwnuzycx%fd`&kue)1ZRi*WFr6OAHc7i2QBl^)8_sQyi0nB_>f@!t49Ym>YLZA zPJ1>O$0_koRNmtZ)^FOl|ET&$2tbaJf86)uivQAoAbr5B`wHXw=`_zAxxE^9^=uf| zm*r#6=5&29s1t>YWuL+yHdLDNLp!~u%oQ7${zN$1cK)*9$#{-DI6FFsU!cv7FNG~T zrJWwqMtyj@N*}R=4LQUYQuZ|K^b?DWf6lON?l$D&9_^G{Mo-peZF#Lev2$|BSZH?# znCG->7iPKB%2^g7`XE335&1l4@f&S8i@>IRes(M%4e{=eM=Ta+W&M}W}bk**r)WB zy`DZE_O8^yI?T_Hd=T$sE&of*436i5xAQ6Dk$eTW`pW=I-t?2fRgfwk6ks!b@V+pbz;QW(I-N*FxTXVk5C_d2+yK39^6oNu1}_i%LeVOl#Uqt_evpW?sKO;nvGz6w$ezx0m4(axIR1ejdt6h?O%>qk69|+Lwbb~xe()T zeF_9#VxN40)F!GP3b)$1@UU^gfqQW?I`~-!{Lr@1|LKRtPR16)6KO#|yk4HHF;<+R zmrL!nkA12=|MIKt-h)T&Ks&s?);6zgW9MBJ&Ofm2AA%%MYp#VO_f@C*ku$hfTH$Ix zY<%gEu*_?kbn9Op5ot@Y|B|2by_fag`kW8c+eD^mRg3lu$Ixh@na^r_~fa#PDtaHe?hX??eB?Gm8P!B4d-`50XJvmVF)+92DX@h93O z$As`e@U|^$&pe;Hs`67u%pPgdSTD5|Z_QgZmK?$x?y4OPnkXy`qO7LqcgzD`F6_m! zMGwhfKjyVN&6?k%b_cy4`^J3aP_dgPTktV_I!L*r79ais^|beM+V-(?lr z2;!zHR^>^{I4EUK}D&HLP}GRhGhlbLy;@{ zjQ3&m>dGP=`bt})L*Lq7YunrFZNtuegHDCcV|A(%sd1uS`j#c~>(qJ9M2&yxpt}Gu zV6|FDJff=77DkFd7-?)2&--!hwuZnW7SbiGEJH}8AF-S@4W{aF54@{7q*OY9&#~IWss^tbNkP1 z?HuC-8Sv0;ckCzf_MrSE-m`7f&Q6>p#oYkX2$h{@;^cc2NO<5@+?QlzW4cK|nCCaq zAPv6RbOBNS;R;HQB`0m~^w@2$&)UkAO5n#jgD@ZR3uk|SKXvE(p6G>x8wXlr%N`dAsJC`ZPE6`V zILYM$aGl^oI(xaczP=Hs|KY<2!KvOv(oxVrNtE>XDGMi8snT{nbOcz!u>LaSw&O&@ z5-*fRovW>LPrNi%v1o=CxL7W494R`|y5;J4m)^wT1F$j{?>MC79Rt9sQ_P^nGNuygWVs=98_=h6dWvTG zV<>xMk*<@%=X!MPUE#@5_#`ecs*J}7bzj$&uj0nJ;x5a0UpQ5$`+hA`60Fyg2?g^K zhr&r-)qTY)&$_;>SNlc+{8py*upzpKd^{VMd-PkoDKiQ-=bl%Yc{YB21TTWu{nB9C zJoL-t51w8RIG6AW53W;JxtVeP2X5kxtyhNxDs7p(K=;PO4@0$wdI)8l`Qmsc^{Ol? z40#wXb5%&$kw18LIz-XDoCmDLcX~_^?~&gsdbUm{H1!DObxHe(!*)h!RZgDRR%};U z1mLzy{MDO+IA-u9&Lx>WYQR<$VX)OZ~HF2gJ-m@CAa=VCJyM*SA&Osj&@C1*~hN%qg@rou@9PPXFg!k$8nOu z9RSR?vAIgzdHd^s^;7L{{@uUsM>5;#!EQS`-ji@DbWn=iWwQ1|{DFn`z{9ewF2D>; zL(~-T63M^^aIpVOkN{2|MscnpBmJX7<+?mS6=Fkj@u6pv>#FC|r$`v*UjH0U52Mjt zMK4Z2`OL~?ij4lx@HC8hu#5$~F1_-m%JZKKIL4_%ng{ReNJg@OQ2e4E>&OJy-qp`X zgb%|lU`p@Mz+7M2%}Zd$C$rac{#6*|a-H%Yf&@55f0I_Q=K0*#ZeG_&AEB-*Z!d#R z3Txmqe}jm;1@f}I`RjnD)ZgP}8D~X8OD|E|}}ra=Pq`~H%W9RIs6om6T}OPwnG===p?(v+hrZunos)$BH9$U^~?nJWVM|i z`)$pG_&qMN^x?8~WI+J<93Jdr8_wFsh6^IG`zP+^fg0}-e{$HS)3x@APyKBBi?6-j ze(>FIw`ZQa(~k1Z?4$4@i~d576NOXVvgTaL%KD_8&n9jA?(=QwrMuL<(vD8C9ROdP zPTGpw>{3QLt#AL)UOU9+@~zy12anp(&VE}s^$p?Ub^#AESpc;s2glj9VE+&s3yWOH z>6`vO2y@}4w$6c>Jd=NUK&f*Pp6DmRbLf=wenP+rbKmmw4L;=~4}8eVnAE0e0_60lYrQt(5t@zUZD}Z z9e)L9%1|Je zym!%rQ1v0@!F&r(3A5cg&Ov6Gz^1<{A6mNW;cO(=?UZ~J&w5~QpGZ9QXAIzi1nM5? z={LdCd;2EqmPF-LI#{}BT-qZS)!$l^5Af|1(I@B?U$4u@5N_3DNO}IOWqb=-nvGR{ z$%o?BB1!4Cj#`{kpZTG-z?DbJlJ**!JxBxZaY&CrT*TEpo@Max4b3P1%3&;Qa2ydn zSO@pw>jDsqX5t*)Sy>(&Q^xJ9BTMiyjQTBma_l6dase)RW)rv17H8zkcqyM%vJFz! z(mp6~qT0CwcL8ztwZkJ9VV|~xgOhgu;iLB8!G3%5?f2R{?>}gd9_?gTBH{VnRoL(3H_an{ke@&DL0T9^o$};N9YkkcB;%y<5j5;$I}XWv|7f?p@$g}L zYwvzLM3-H5>BGhCJI}W3+c(?6k$&QGd-j>T?b^*dZU4X>MAQ!_;2Be@`|>;!OBC)C znKj2zpM`MlM0xu*pOd%FJLPTBm9zGhuYR%pga7;=lI|qqA??TZ=ZhEaX6eXU8LMMT zyksU_buOJSEsP0=!C*_YT& z&StZ=jPD|FC#;Y0MO>5=vx_~~r|^M`508#d=)bX_!aKot(0;n5e_kvBG3R$&+-2L$ zrvZVlkKs5_f5Er^Ye4{Q82EX}N83;9r5#FJ&Kz0rm+>R?YJZ8-#@Z*!67z~=@}l4J z+P>91)g1XryRjcFn~;TK#sJhh^D=@kXy?_hn8<$saL6!t5V=GJx?H%D_qHZ&o4#)H zcyHfj{{dbDwmA)g5SI?DLW-I0MQR1Vam(G6Gb=TXP$MOusj+b=x#PC+U>aBZH zlqEtBUpeb5%I9<+(6EHv9S;?KliB`0Jfj|cTc^2ZE)JM&w6*E1UAwlC1p>ZU7(Ie! z7ZB)MxJ7vuLlS5ID=(JZ{mOob?m)iSCuA2;0GpmeT;-v>Gv55+uRN7T&dYF~_zW4K zSJsE;#Hk--uY8;>ul-XVmi7U8l_{Uc{?Jb!ck395;2{G%Q z<%jvCSmq?uwYQ~{BhGW_Og<%T1b{qc@k^y}6wfNAd`%Y`rmdh)LdbIv&Q%Y17^#Bx zx^48tf}2%1wm_e4&uO!qy%!wk=m8}S!;X4Z^~+Z*d{V#VIW)_88#-y!NB%i}oW~rt z`d;>i^1;tE+6BIkXJM5OTK_i)$lQiS)}EPc4hs54NgGsI!;`~;Ftnf<`K4eghj6OS%NUK^FJ^sS7+`?^Z8Hlpy0+R z80TLASO5^f0809NBTQ4io&zsDDSMiu*b$rphM#&~Y98kv9Fqyte8x*1f2XEbe(}iS zeA0rQ?Fp3-r=o{EpjYyweti_{@HrKE3SULn7y6l*_=2wtY_C46DjJBTe&CR%qMG0M zeA1Yr+88SgkEP9eQ%YWX2*do=44G?;Bi7iQw`o@P`PT7j+uWLQ zcKh2^I%_%@23q)I95qn)jeiGNc7*9@>Co&@9X-atkro22H;ol$NKhI+Q8{x&8Zf|n z|G6$DB2GrbDGh)eQ|JZHpWb0| zr+|aYK5;g96$}HE{B`fc4sokA9VS=S`#=|^Bi_lkyoDTkNGI0C6An@*boeVc0e+{{ zNjG_$ReIQWAG*rh+TKy)T@!2N@S-TtM4v;taugqsme0^74{_IWj%x6e!z`g#UYH>L zJ!YgSOaCmGD}vbw71v`eFCOX{apk-!6Yf3hP^3X{&KvcYFIKod2wgo_>9+*`b z^V)PB6j-PFkQG7xY1l)n=c2nR6=99A@J5`*R=wynTd#)2athtpEBMtQL7Hr5##;{| zJTr8iqVo#x)j9D?UU_0&a!UR40W@h;Q@WUcSgmuj(2mZI@&Mik^BV8z3eGVDB|B~> zXYJT0uDGV2o!v*7l!ZxIm8}JEBJJS7@A#kQZ7W)3CX{pnkB>?(H#auJ54TdYsRR#e z;Ornv1F5b%k*YDvTP+ylT5UUcs@&v>M!ED$G3S9eap40&n)0Bq-|Hw@HVX-q2|+zh zn}}=-<_8`+AG!8>&kMkkS30r6Eyo_%5@HT<=g8KDq=5-ZrWe-r1BxEho9oa)5-^N6 z1e|=uAsUM|B3k&N{<-+dw^F3b@)xdNBi%(7fo^8wqlfSay-FR4PVG77aVb-ArN2aa zM^v&C4LTdoLj2+TLi#+Qm3|=-=^C+9B9O7qG~|sq!hK$dx;JOGQYW~@Ak8}yBQNt1`@7`R)HZA%&Y0Nce2r|5AU71A>3je}mt;J*%g z*+OAgTPU7){u3&%byi&B&F2Claq=})U}fSh$E7W-!&Aw~q8{p8+Ee1{+EDdWT^=Kz zn;fG(Xd_&xm$$9Lc=HKISy{Fme(BfwO+Eaj&y%Tyxjx3$Qd(UARNmaAvx*Z&#NgM! z#k0IscIMY+>Zuy9Jm?+CWo^_KnBuNok=E+t-44n$`}`X80s>sU$T!5gzhJOG_Q8VR zpS68tH8}Z|)uOu}Ay;=nx@gC3N`L%Ue)i?|xBvd%KrBw6??UfUJ9hyTHqd@NwTg$l z3`v%)b7+~{Pl70T8MgDIU6bF@0nX4d@HEc9`TdUz_I2vo(?^i^-9qX4JLRL?uDmy7yknGD&OYkaa1VVm#Ztsj|jkzeeyE=(^0r$ zLvwGU_ok0)PMlLF_!-CHIS>7d25oW`O~HA-(NARoXYjY28pnOwo%9RGpe(tz`sC-n z40O;b5pd6u_9-mEB$aY;6ol2!-0SPqwLrZlceh71@kU!-(l5sMapKgOz(psFSA(0j zLECnA4F4EBX|MewQzo$Su$$Z09v?oUP16#cAj7@|*;D)=+VIKAUh_Mr)7cbX%ZK3S zZ3TW`9JylH=Hm0Fz~?6I8O2{Iwud^jmbB}Pml5OkpJ1iX*+tj-;N&aw0C~^cDsH3 z+4gAXs6E;{$R`B+)~-I3-}v8$E_KFL@#`CFv3Z$ThI+r#tElu@vK!KwymE5T2_ogs z;ot$i$bcEUti82AOP>Z`wS|uJ+}*(7E>%j^x2v*P%$ne%Z|ddydhzk7d)0^J_@6GS z0=_(2Tbo6$+Vj2L{Y+*#8CCY(dTSQ}<%SYqxAA8w@8{B{$bN0cI0?R`FLrDdpGcXj zzx31i$N7#hvWhQ)4vs~J>5r8mXBRod$0hHww%^?lLW}*9Z49cShVqCrJTNV9-=Gs& z7zPi;L-O;$Lm5(!ezRNR>UY+JzT@P2YZ5ieSk87)6A@49}>eQ-G6AV_~$xJbCLMUH003{}etS9v-1D=5Q>>1t_i3mYvKx z!hdmV|M%X1*xtQ&zy0M~_u3opJ!tPfc-$T`9z3KDed5&%+5vWJo%+@|ZftC{XRqIA zuRQ;JyL0Vko2^XS&ix1N@X@`tczj^Z^>!t-t}gjTFO|CJhBoDvsajq*iO*s$4X>cV zZ>NHK?4)m*t-y;|6L^>fovJA!_LCx~>KO7kA^sE_aYEa8|9G$c;PHd@gQJISpY)^D zJ!zXaZZIa@hHs15ud{aJ=I!>vYd-~Zu@O$T#rBYy`dlz!mDMGvC)x+)hrS__>^OoR zstXwhlGjhNU=_f7xqas^UT=T<@BU)Dd3`%@)jt;v)Z{QyM>%q~4zJU%TebD7kjm1- zc;nzx6iU5OzrC)uk?Z+k8AB3Ou?m6jO)s0kIhXuIZN2%h!!Q7i^N$O?4ThF0NBnVM z1oYD|J&)s(?h2Rq1fzl0M}}%|A2{N_2vYz9w)Jo_ejM{)IYRhnCTaKyLS$s#r;k)L zKeUx~ZMd2?f@Q9Tt9dw*H9!44juDhKKF$0e;zI zB`->-8C$S@o%%NZPSV_Q(uH^jj~>vpkLr!<*#>Qo{uaN*2Q0Rcd_D+x`cHOX&Kuc> zPS@A7gU1zG`jMwx2Hs$P3#76IvI{39OiSL2mRlDEV ze-cOiqtDHuj36fSxIj=w9`0~uzvi8A=XInWVmyI|*d|MY?axr{E~)AcVV*^VQ@1z# z*gX9UB4ty>1aCBz*iYC;I{)TE_@t9pKhfQ%vH)~CLW)YPTHXVCynI zsh3MezXSpM73RjC`g0rm0Ty&onh{R8jf7D*ZDht~vJ=!>_tG3K-J%D$XPQ z|HzHsXWN`V&=f6k==tm69Qf!bi+8_9I@pH#gp+Mr3WXfMUr@kNmNkE-?IU##PkR7b z=)byBCBX@L^9fDj%ipI)VRO`6k_!`+18x_-FGy2=J)ESf=$`BhEx;S&@Zm3{W?>>j zWg(u9zw~__U)gTT_K-Jt0);eRgmEt4@!m3R$+3!`TXF{g+l#e@INH~VbJzNMtF~R* z;Oct2`Rv{H?6WVnWAJ+Xc$d1?+7>#eEpu17s;|a|>QHJH?anLfsB0qStAE@tF3~?& z1QzOZ$rq2}r8bNF)H&u8YQD3sQCkYxr|3h-pL)pe^DX^~#YO1FM_r(fg$3oOucu-O zSKD3u&EX$3n36l?&P~mKc&?Os*%x~0KOECppq}qJp5{!Sa{&_udMKs121#IOLn~!DUV(Dh<9<@-+&`OoD^0nCNA&- zIXLAxrHV`)wkNOihYt8~+`5%V`aaGvd*EujFg|z)KXm|s@_BGxH3XOQ>{P%#Hl%he zhZppPe*U29^uQGf-Qe8EQlJ*Tj0f={OFng82|uWtR7%6Oco;7|^i;hcH!-vUH2h4v zCXVZ>r|d$?36nU-$e~+EUR$-e!~fJF9P*BOEnACf)K_@{PU0#EXp3Cr6KBCi3@Kw-g#Fx$LgI@0l>aFrKW!L}Z90E0$v8iqaalw9(O zi}-ro5t$#J5RR+Kl2<%D*D+TX1QG>`VpyBoZhCL2|1W8O)?{0f-T7f2J1ke=OqS!Q<_+GF42zrs3OiwbI9`p(HD8A@{Mw7`Xlf+r-WNx`&^7$bw2$N$}Z9Pt8kk5yh2QZgSh@sP5-*;z=m+ zLDZc*H$7eqsu8CQ#_L80bv1;$=<9Ii-0wqY(pe+LdvTgB4X)Fwl1QC}Q0uJn5tp!D zg+t$4aVk6dJzv5S-$4u?bO;{V%Tu|U1W=(>sIbb1=;9?!B0_gCK@wQ`lQxwJsg+so zvm=$z$ON=GshK0CkXvO>V_1b~zMdMfN*mMxnMq7#%S}92Gi9wN`8)x@$tI0y6{SbF z*;Wm6@>zpic{)8g&4i|ggcL~E1fl?JEYv{uwblJEPs8I4CcVOQp)Bd9t{qCFP*%U zQ>GU%@?Ql=p2q19_!uns=AU?ykblbI}-@_>l&~>&}79A|5Yu zoPP>I9K(pXF9P>k|+;$R-YTCD7@V(YM*HDJ|ln9i&qhO2b8c zY#ixhoZ!zd!iVFLboMUBRrt{HL5pww54p`TJmAhZoss{%^@t1y5#(xo+aGG|n|@48 z*R#ekK)vh^_VY~M2@?nY;B%gFgKy%G{vb`%TX*%D9O>6HqSBXcDR~{Z!VmRV;N^|w zquk^X-O2L^&)|jjEMF`?spmL!N^2%X(#J_R?M+<(|E1Xp6}c^~V?ifvnaUMM?uF~^ zG&p%4w0Vu^%DQCEW z4BPkyU#1^+fSv&Wyk0{$`jU$i>A(G(@3%ky5B_0WIy=k+``Pped6=rTC^Pzb<~M8U ztZ4+&D?j9;5@+63kwYG(+e<2RQqkS0@ZQ#9Pjzr8P1ye!7c~5Itoi+{f6EP8=1!S5 zY$4r%iH&8rSC@64EJywGQ*v5v0k?Dopj>ju^MJ3&x!y|BkpSPf=lX3fX$cVgRbB_I z%BJ&GH^4{qXWc>sNpBVkM)-|?6IL$I!0V6ercQ6da%uAzWnb_8D0leM(@1-9CeP*f z^m9IqczvDm{3c1i>G*;!{1oP8U)RBKBMjdGUHB@UwlD4l6xqN%=RKZqgw#Sb78T4vum zck$QFo#%a`{1licAAQ<>^u51rKl|^$)82gWTD!Bk-yT0dXrDfR8XfMyT0Ob9vy;gJ zzt`(Rvwe&@Y<+_=Q;v*oD4I0YhAC}A`DOrLK%l=}QcUNvOo)rWy|CnJg{sTH}dCV{l&QxX6We1Eb-*!+lkt) zIp#bYN(iNGI+l|rc_%R?gHS9hpevi);EnyA_OIU_SBx^@Mtvv)es)V(FKLlp`>6|P zmKQ!3ud1IDzx`G5E5#ve%0#@_-B4hhZDR0ejEAb9QXZfQylGmv}I7_`;);zsV0erK{{PX_DKz<;b8L=ES?2g>=g) z?e^Ebyo{M`|MEbbnQ-lN`yBhFQYIhqXOW=uR9_xj_eF=c4QaUdBn17kb)b_!9*W_B z)X7JnPSsa>qbmPY3bH=pEL+K~z6%jIJfR)A;USVv82LEK>XG)2}u4e7Yqetx@{>dMwjamlvY-vPl#fl|e7DWgNUNW$*V{Zv%guX8% z7-k>WH?@c(jYXi$eGn7^6&^2SLq8EZk%_zO6O#`gkl;@t1uNKc!`BU5xa4W#{`?vD zc@YY@Zp7dAhwFKDUfj42_j%~&?smHF=J@1|?(^~=?l!$6-7j;Whwmm_{@>8^IrM%e zzgu|!HPfeRez@is^#P9LSpkB|Xbe3_@gC8hpjC^qk6`e}Iu|}kGiZgW41y>ev zw|BFsadhyE7VhFd?Xc@Ib(Tl+=V*Pxd>hn_)1KC+;&g+6FR!Rm^9L>dO$k54uCYx| zCOg67B9?E0yLo6jt$8dLh0RBu?IyX?6B87iYVEg3rOhU(It? zJGalsaq1$F{pm00TMZA4*O(XJKECMaGf0%?Ei7T;CUbt(g|-PJg%wbmOMf{(C3K3J zl)8eCs9H$Vh*8r)7{Y`AeVl2lpDkE>u!bpKc70UQYm6S_P*VmbFhReZ6P|4(Bh$YgMA_=bKXDqhzFRogbG53g@4v!Q_P8 zR0}v5Cq(&kUtyAIV$-Cwfy1jZ3d0rL(m-DJi)AAlr%hKh+D8&Z68HeC0wRwHGfmc4 zh{$QlT$_wLwkE#aztMI!C$*4&=T6pHUcYy*J-B-7X zt`YbtudNN*9qrUlF*xRM{lIpxoQ)8aE$zJI(fSMjA{%fg^g~nlEtv93p!~tfhwviz zRTuSKzPE;3;@l^hLQ~JluL8>#WTtLNk=LGX!v_g)%d{|HB5g5W8hd2h6 z9$~8P38$}`(*ZJl3#d5O_yQO92>=c*DIIVBC#~s#$%%-BXYSS%F7lI~b)geoWQ_Ql zZ&r5{jX^Kw5YX|W(|xS|!FuO{nA(^wF3V7Pl^lu>EP3;tF#ha^GuAS|e4$g;PV-Tunup&W-ML!$~V$U8Amw2G?+Kf((PaP2Ta-`zI+h8*0Z!7 z%*F)p%>D#8fWc`B`*fa(v-}n&^_a4Q2RMtFa!PBLqiQR~%fw5GS-__tPw8&rb1k(G zSN{HQe)-!Zu1^4P2|N%+0RUAkh?vB8W$mC^LMfWh!m9T@`Z9eaou-Q!n;R=_XLq}8 z>@3sh>2UnkKlb0oCJM*JM-+$)&>B#ga9MMQk;g=qGRiIa|PNy%<~h11F;oH9i)g_kqpu}~>1S1N={*i=6Fge8sTt8!ajD~ka65@%S)UhC*90DJ zZmcm_UCoAzwRMl8qz*gpw7f}+xM8)g0-MbNF|IVStPg1iHfbok)(Cue zvqlUR_${6qz{&#)J1%92wA|JtGimETyp4k=={UNq>4v}P+~q3GrZ#DmO89GfJ4bhG z$zcA;Fa~q4k}Mp_Q{l<5(3hX;2_Fr!ArYgTNjHCK1J=Fq$zGWtKD)$xrXB(x1h^4j zc$10$(r(&{;Q}{t2^P5%*gEqVZeBpH@OPw@R~F8T_TdFCMK*b0McH-WB`v9?8RKdK1^q!9 z^NC?<-N<_`VKjQ>oqB0^doOuR(M6eLCS}zf#1bB#u(Pu%x^%T|V*e~Nu=wEp`|aQV zU;ZU(9C|Q~SMLCZM)`FS;{E@M>Yni1_eHhuH{>U`+$e+1f@OR_-J)Uw9 z-^;S3%!{zU*r8Rpq$3aHEdqQQmY>3vUor@+;b}x8zF^8p{Nl6jHjHkBaaryZz*5Fg zsi?RPlVADMcD)A5d zXI{?kj|u%{>d!m{lUIV&!@_-C_3RH<5l?UeU*QU_WI@@j^uZAwYWiv$^E`}~yxj0V z;CoiOfhYV}fJvCw;3t>uE%|%7Bov^*2g8Iln6UH{h60y1Lwxw(?MwMC{Kzc)CR}|M zXPD{KOBHhCY%}0-!qBp(e{^C`+zdw4@g866CIH(xb*F8Rw|i#}`wI893xZ1%Y-M1< zXWaGmN$jN@F^&$AG~b4u&9Gz8`ECHPcv=IOv-5UxeAr$*d)m%V5Ap`j^!U1Mp@;7M z&bQhR{`!0EyMOgp?OVV12W{iw8|~s0JY0T1|M1xXNazQ4ciWr4`5SHNy)WjIAxmez znP8^;V^jGl0FPA6QQ*^An;oCFpZ@Tt?VtbG|ExX#`Qx_Dfbfg2zuWd#cH2kKpSPcW z@+cEklg*t>Fgodww_E8e{nomA&3ejb3@kUiQkPjydNAO%Ww7gF3&|-Ws28=d-&-wP z4c1y!)Rm@?b*KH{CWGmTZw{|ud!maO_l<>5Cot?IwfWL7DFT$=Z>Ian1K;k_{!%|* zK&$@vB)T=m@ zEff1jVJ6A^v&>{S(*}eVH%;eFFm*Rb@=Zei6c76oVZEf+{xx_ANS{Htg)f@NJsqogP z_JHXEZp9Fj`aJg(gtB=JuY8CjTX85O)?v}%MCOfcBdukVQZDEjZ)E%SpUCQr|SpUAazX0se*ItCGsr*+(vQCED?Ro^Wg_>N<`++;2! z1y&Al*0W*2#%>8u4Cmj9#3?ti@$y)>RJ(2Zo3{4`_xqb$NCj=ran|&-og5sr^A`v0^yo$GlXdGHo#>|pC+lE! ze2Kn+%E&GZZmW!M@-`>1G$YF6<^|d}?P?943TtIK&!cc%X;Ugf@WVGV)ix)yvvv-h zA5V|kPY#~8k1kHyi5wxFlhwO>yIK4vhxX+0F+=1@yZ`X5_S)-TfM=WNCns!IpxX@# z)t1dOpBxYm^^!NGusf{>Wu>fh03DwpFByN5#!U_9zOeF8Vl(Pss!FcAaQ*QF+SZa$fgusbS+f(&#IFZy1|Afm>eXkOr?!2$7a1J>x zIis}v@EZ^VxCE02US_EfCXJqvNjMLBwHuK^0Iq7|Z3P~vOp_kp`}EBQn9sO|3t++K z5uYozRmJq5+it)^Z_;#sxFjCWi~g%FT`$M0Gkb*_a3jsQ1|EJ>hy6;>!t=O#dFxpx zAIJS7+^y$v-A+F*^DlE9zT;89rCnq`OI>a`bO!pYBn=+dbFQSV{yU&~6PUJiPRpgQ zapaWGlb*ZKj-K5>Povvr2QSj*ec?j==gUTZBFUFVoKy2nX5Zjf-_P>7fQvRo2hC30 zYONIm+GpHl|( zc}_g|trvMB4apDnZ68B)7W}i`bOK?+Q~xd9%F)it~>kt z>DM3L}(T8d$8?I^oe()L{1BiAPhO| zKKal4N2@sHP*)gVg-BTVWZlan&~#N|9V-~&q42q<&|vm2&%&f%!s~J7C92A&aK(os zp~-jxGv6dUzx5;k!Drf_6j#ZtP+6ZquH%_jx#c-U&!03Zsj{WBbVcKVK@oal?{xao60FV^Xp)VYBsMC~*YU9~x?)(|H zhxWOrKHF}br*>W~V^VnHe5du{hTQRjnDR)XY#2uyW!_P_bj*>mQg?Y72^@K03R_RR zRygBajJAHk1eR(i3bQ(N#u!rdyoP+W)y?+sjknteU;SE6ntP5-Nz!)b&Kyd<&s@pR!YamMD^AB*O_CopkSx>Ww!Fv-AY?IZaLf8a__SoYd-)(({c%0-;t z_0OU{v|!l6d&>)M-~RVL@(4^UMbcWY_|z7@YYO6~o}gR)>4Y|-bf_E4T-aAt`?@MPe(#>r4nT{Qz zXVv?ZyhueRf$=jNwh_Z?t+enJ=gB9yl?P~}fh5l>ZQwALIAz^bh1WHun+5(pZb}1j zj^5MWgq#3dG&{t$f3l3pn^r0O%(U%r^ zmQ=yPC4TbHi+JQ=>-4$WeT}gTugHKWQ)#TRyLn3~QSi^4{Ip_y)`rxyv0CpX1~6&V zW>GIBZ~2`%p#^o>&xVI&1O>J!T*kkIWn-f24y4tk4uc@)?x;YpCk$bn#_$@hi~hZm;OS;tWf!BuJe z5$t7Wm^2rC$&>rErG)95FSjwWp*1+nsmCW%@(mC~?A_GcY?jJ%z5)D;n3QCucDzZS_WPQeRIcwLKgIsV3uny|n`2EZA5$T#{5XB}HVcuIddDks+Xi>!wp z>B5mN@|{b0S8rhSDL}|nIuhFp>8$}BOL!MJZiPdFQZ{K7B8&M8&RmF2Q#H!kQYI{hKk1X?6+j73233~2 ziq4ADEQN2%n_whM*}Du{&6Zm(YjC+qB?cK;*=b6r^^=VOz+Je2(B(^+@={Ni0cTv5 zEgdysrNb-EyqvIzp*L{Za8f1*ezm#9V{F}R%p+mdHm%E&WlDsf@^T+tMWMV;;dR3F zIIqMdxWkdYCLPNKhh<5hCX_!^>xKmW-ylLp+T?3Pvj@i&a0Du3~KeNP?eijTB+Wf`7zB;mPq=ten4I}~1LuyOLrI0H;q6us<<2ezX;r7UAGUd9}k zX~|O3>dgXaD;k8Wfsg$wGKBBqM!HJ!mnP-NWAO}xQnIlRuQx(hD7PE*_V+i@vls3D z-L>|=`j7r`TU{sLLO z)pnQGQm*4Nvchg;;+MIInrs~T=!X1Rzj5FrNMkck(#CzjUYXITmtW>gtZd4J9uk~A z{XBzC5B}hXVx^1znA7gP{$hHa!rkNMpx}z02FCp75ev@rk@5aK&@IT_XMA8BV9uZY zU7+vg{>I(Zab*y#N7%SN*Uig^o8Rjz<2QL)zQR*sWrOgPFv?<*)V1YWz?tWpbi<_& zc{ymob(*8}drILGV>_?AG_<)xKXioAiL{eske~`5?uIAGzpx3keCiAoZ8leG%g`6{ zoQv|?N7=4rjeUS`VL0KHZ~O}{%IF&CTEccR9{rFp$eC}FW22urFmBjczNHR+;7@as z(qY@(*{)5x*ffWSXYJ^6we7w3M*GTd{cijCd*5$I&kx$(&PEpgJ>ovcmSYnvuWhvb zFTB+@zxpLeC%@yvHaj|MGw@lqYZTsh{_23Uu0 z@Qb#Qi3N0*Hk%V$p0;!8A~t%^Z-}bjd?S9AO`i0X>P`B9Od!%ot*c>M&fuR5?U{f8 z|IlE<`Hk0{GL`m*9t2nF+4>U3zJy2nPuqU_*KV_|dg?=D(mIA-4-}_v(nlEv&h!VQ zx9(h+@FW&dy2;WUN~XvHZ%vtjbVw_IVqW)pA`R2g4=^jaD7 zU^ZM7-?`s*Hg?+H+Gf7z|M1?O_Qi*R(HYY%)=(Bpm48=J~3{Ds(d@a$b=xH||g{L$apVvuax0wQ?=hF_j zP5Xy`Gi2ftoqL5`Mbl8`L&i?eXJ_q`*>QU`J8e@^`9|UrmkU5U(7(B{-HuOA+sVap z+u7M~@4ojz+t_e3rt^5$=#R5@cH$Aa)g~jux=@l~tWA%5${F zE1Uu#Xn`*{@yOef@tQ-xSErdKcxi72zAAlS1q$GT|I6HM`0nT1xVl^6=HabZWcrrt ze7JEpFuD6ok`Diwbi<7VxBW`uQ?7aX&%*s;cLQHKLtMcazX#p8&!uy?kw5{_-GcvB z(*rl?`NeL)+=9(~C0>FnZj@`$RT-5k@=F}_A#ci@CAik+V2qnS0X=+ne3G{6ar^T8 z0y{(9=SHWEtsUf-wudcnPCsvUTbny=V|xodUZ0$t(st7>Y0zuv5c}uoa&*3n!S+GU z|JW}`%?w@YSkTW7$Wz<3PBuEnWt@1?(4kS8jGf{b3YV8ApxZEQxv+q==5M|Q)B9fa zALWH8bUEcC+;OeEQ5U2SS0|=wOW);AJ1~gz)V=CQ#I38o(zNP4Z3($8Y{^)CzDoQ) zTb9xTxTqO|%Z8$iq&J5plzzsZ;7pwp=#@yl$fuWS zz{hRuIO#LkfUfckh-yk2=%8%rGwi2` z3uYRD1*v+<9HIDqz=f}t**t)?)bnM7cIGuCh@+9LOY`=3i#o`8Uts~Kw~iPB+A8Cq@5&J8J`^ zU)Pp59r`rmD0Htjar7-X{Jy{C&)hrpY5OOOc8R}#=RrOJ^M!Z5*xvopSK13~(-#LX z+9qS#y}NgzwQuyL{mG9ivsB3_<-mI6Pu~a&U}m`R#pm9C863HY?C}df9p_md@x>Oi zf|LO+N|dz91eP2S_|Dm3SGiW`v&a5lFP)L(2BuC-MfbZVm$gFY`ARNgGXm*Dg(a}U z!J!_m#oAOS^`_h@yS|)eUj|0{yv#FW*Lv@ogko$lO3v+%$kojRE>1&=byI7Jm0o!e zE*D`^rFgr+z?TH1#}WpoZ9MI;+C~Z>%{I6Nm!j|&Vibr3R2>iPMx>i{P@3Ekj-uwake%Z|=lvyCdET%(FCt!%Y%q zkTsmNe=CXf;y1jldIG>#zfI`dh8XQ7qEY|xg;r4VZzEM;>}XLWE)=6nTm)S9jq_Cp z>I}-ZCvA6cy>0lN{|!2{Ejt=Ik#!W;dgC4(zSTnp+3+q7uA2=2$vFAgaBK0ue0S5h z*q@y`GR(%hd;uNCE7~$rzc*0geA8Y|`xfKnm2WC&s0k;&g4Z9L1m@aWI&AdZ2QPo2 zgQ$ut94=%UuRx|oU8E;n1ln{~I(e99{4QMCv;eXaVrW(1oXks{6$xzLIaG>?a0P{O zt--hTEsw&iBmQ&DzegagImw{m3#}L`c2diPm%(A3q2M)?G?FIX6UMc?&Ly9T*T4*{ z>8Y3E4qp#oM>UbOnY)@I(s-^Hk$zF;6Woo2%I^zPTaSrqQtq+0xS7CAk@`hLqiEAw$$J7f>80N1}Jcp z7-_~p#|;Si29l);y|xQ@lR5{F?Z!3(Uov@*v*uzfMX?k<4VxHNoj)p72dst(p9K%w z$8rr2D0^u^y=qNMzlh#C)Y)Q*}P{$=6OK)9OfV@-w)i;W}>6LBkrssv1 zMHdjJqmgcK$_sU{P=l;YfRfhdU<5w#JeZI5oJ$(uwY;>gX0t=bQLf4waB%q7F?LtL z;cc!;KX$pvl&WW(WghqZ%#9w(>g28lL+GUrHAJ&h6N3G#PPlj~J(l0Rg!PTLQ6I`< z#cPCVfRqvD+e7Xe^v$I*0>MT0a>WG&7kZVe+Kfe=TbZ)edRJBnlWzH7nZrVFEo=%5 zaA+#}3fAYk2&^!8U7}ZQh1_y)zh%9cUko?POB%~<`YN3{7k{$Ig%ekJ)XOb|WgqFK zt?HY9@W9DR$K^Mag_ZmaZj2YycIR$0>nC+?9^$@nm6u82ORg%1Ma>`qCycZX=Y7?& zX**ssjDwCa#!wu$#XAkh;(>TGX>Zwt;U|+96hQrlI1FWTP2JMH4=lXiOanDicVsrHe7E)r2Dc%a;cD*CWw+Fj8{TZf+5MM4=C z!Rl85a^)tEDO_CfAzsR8*noN24SvS~5{U@jDu3jU(3{i&0>rwx30=Ul5UyYK_cFw* zoG_N#^jw~t6aIEO=5E|mE^h-*I&FZ|^KeEQCcsG3U84BuAYe-F)o_z}r5PE-Rb28i zSQ!>}?iS#NFC2E9_f@CHbw6!-z!Ex-AFg13Ww%(y+$&RF@}%mp+zlOZmU-@w;r=@& z^9t@|m_We4mj{spA8AH7zIJ3{B4@V6JR~iY$qk z)_>x-U719;`Zm=S@n~J1Cq$!f(MOV3x~%#EClFoe%!vV!Ia)!dSCdA&Mm^vH+9h_D z?fG&xZN5#ujxB{!gF@px^7gcCZ%(qw(&MEcfBLLVUVqs3zxnmnj*i+_e)BilfAxR= z&)cI<9<^_M^EV`8>vnd2cR@Eu){}&LjnYzS$<+1-P>D0VfXIihZf& zm@oEIZ|S?a)>2j%7#Ql4cP5iaXntPu$2JoFQ~96BJcmEpP1KL{ zuB=F#WhEsy+Uur?tF&*n$l4(YX*q}9Allg+sJ@-Z8P319b<0+cTk zLa}?4m09R7dj?vSN$z~RQ`<@VvgAqHYiuTc`W3FYmu#4C$_oOhPo444o#j&YfU!=M zYub(z^~zeM1=0&zq&G*;?)As!z{;+-jqetzNqvLi4;qrWMXRmD`C%cs8!Nz8L@8N6hYwx|= z-hb___SXFe*&uXscqI0=Wqpz6l;Y@%|7+ zna+?|WI#K_@q*(c>qM%dLp#>ZGmfo16(e&F@+osN6xrM@7jk&n{cB|S2);f?mpwW? zZa<$Mwj+5FS+#Agw8{2X+uPf3N1jkHy=bfJJMHavzS!2AZ}y!Enu%v}#0v@gQrdTfQ}r^C(RZ(NF!zXhMX z=IP@~HuY^+)=A>?b_;i}!27Zrn7|t*+D^Sp_lmT?3}1*AMqK{}>3f@dC61Te4gKYM zx@;get>GgMY`4%=nakCkdBY3)4FXowJ+z1DF0SK)qih&lnQUjX(DdMGl3W5a+1v*1 zDw}jXQBs||wY#4;ko|VPCk##foFuv?Z*J2zt)k2ifn$BuMJLDD*b}5j9a5a-wn0CW zevo!+Ut`)VT+$B^6y2?F39|T){#XC1_su|E<)+1`Wbhk~dQ4xS4#|Q#2rkeErnT($ zVcG){W}m0ss8e5*j<5bGESBZuJ=gE}%3^V}(&>uV#m8)#<59TK1C7$`IJqQPT~#=o zKX6V&wvhD& z_~S;wI{CKuKjVtMLfAz65lr%kXZ_+GBxvV(Pku$S;Y7$o>y+@!T}UEvsmBfz9*R@m zaXH3t{#_a~hX5}tKjB=MU&S7EL!t}y_EBrO_oP5y(AwTwrw!5nZf|69-*Lk|7yI{j zYq6j9H4%QxHd1|@*M|PaYTE>#ZNkk2o0AP-Cv9sBA3Pfq#vsnyZu*^e(#r$+XPHxn zlyH;@Ug-ueQdaJVt8!DvCNaROx6X&G2xI#pjrNTJ!j|hY8Au`%KI{_gfY5>rWaBcv>sr=#=30WY=ucO# z{#bFNYRzlcS`@IM*L310r0W?9Uza+>l}w%Vi+K3+_Hx7PfLShOm>ytV)!q3xr zT8p#yF8E$>dCH4=esgEPefihF-uB*lvpxR=`*M2H9%7Gf?CoZ(mbM)l@Wigdfw^IG zRaMbtrSj~3~u*8FzNxqM?&Eyuw9yE5G5N zG+Q=ft6W*5B+UHvbAp$D9?35je6!RRuZ4f^lS9NF$5WT3T8r}Yi&gpdT~*dRM(%h_ zlc>HFKPP5Wd*BaUZp5&f1WCB%uk!i1f|_fVXT;OT$kUXau$nvOBKrho{D6;q1fO|^ z_7I%(Q7-u63_UQa#<|jCr1bJkyFsJoIeFrzZ{~8o*Z}+2yqr!t>m(f?|MJz?>x(KQ z0?$+7u19qPX$Y0Fbke4U-|0vB&qfS0p*+H;E;`-8jeote^x?`s_^q#g8wLJ$Iw(4! zyuBa@4KoAY(87-yQ&B2}2nRnTK}isX@0;f?{BLfq^l#5&kZn&;LLRxt;C&MWt4^5M zSwK}Z4HwExd|ABDvklsK2!#4&8>3;s_?*V8kfs?>#8g%-o_kD+1MQkj@W_0w#JQ;N zAl$c&o$xU&l^~9kukuqe5V>;-*#)ysr5!r~3ViP6tHaIsS z5Lc522tLtiu-BddfDjlE!FAF|rIcnB!H^~g+kKk}b)bU9hp&0l9~zcd?PQ8pmC}lf z|DFbrG7wL-3YT4wUTz40-}D1qmQ8vsA8EuFoYXOWo8o6@i8Lf#44mMgQV`;>&g3aS zPYFOSVpOS+>soZm)-X&>Fpn3HE_^gcvNjv3h=Nww%}1FqU8mV>flC#RHDqu@--gBPVUD{vT|C&DXz5X)cb?e&|E8owH?H>G5R87|4VGD+rh|MNff?rjkf z_yBVSC*hVWM8@fJ@hrA7ri)L7k~?^y>Pi&yelP9fathpa;@p+(x7YUX z>;r?8BR^X^JI(K*`7JlUk9y5DLvcxyhP2L3#ti_ZEkp zD71wTN_Tpu9H~Q)Z3>uRB3-Q4pmC@77-Tt-o{|~g*T$Wj@D)fkS8hq0cmiNgLtGpL^4IFRzpl zD>3=^vYNLYhx*QZh2;U9>O1Sa7Zm4YylEq=T-8a+3u)?at>6q7jj+-tjD3*pESp7$ zv$cjsgi<=|tt9y_iIhe9q%HW&j~jmCnF%<=6QdXS>+FCfz|JM*tO2BD5xC0rS!a0i z<-YPr@Z4D+RWICxcJ-@t>kCo5&;^&b$%-HDW2jfPyyRlY6DD;6RzFskZ?KJ{wg!(q z<~c_**=Nv+4uedcJth&xbwL|Z_T*bOaoOqem;9j7JThs{rGc%1o;MO5&=POoiiHr$ zlsEYs{#m|I%Pl;p{-?@Vm%LX$_j2O~{pm-%7iKU?xx-I7H^UL`08M5ITTX2CoAN48 z21J+g?7)dX<1JU{#3hU2EVwwi4iApt;n6zQAAC)bm3#WB|6A zM@XAAso%(<8}`+|_r&~{<0doEJs_SxruVvW1Ed3e#*KQ_g(ZWfgFhqP91gh(nZFpH zelCpegv{}Mo*Vdjop(9^Y`XDWvN=!IZv^))SK1eRT?nfO?h>F6jC}9%ST2$M>Q4Ep z|9SAscn9o_pFi!}d7Q4_r=2f`|7vdVb!S&nWjTj zzjSGRsX#7>&D2_}xiACy6R>&-XXfBB#Or|8Jl_S*g) z{fx(MueI$vciYaJ=nimtMECjWNt=-S)!8iH==ReC>)<#$IcXP1XYG@ZK5jqx?%%dM z;Q#8|@3r6ijo)mm8+&bf@S+{RIB1`se=afxSZY%j?k`;EpM`Z!RIF`47rKdFEywjv z&+;xVV>`!p8|196@(pn7M!n<$zY`qlNqJ+x;e<)mT^0(dztq)`Zw(Lb)Bgw={;AVE z*@?c9aPfs-=oD_zP5OumCkyeFt+ffyvp$rwjj zR>w9g5(bh{w`GeLjq)k=+RIvfKKxg1x!j;RJ{OS9n2M@@b9sS&+OU+Su7X3}VJn2s zPQKGlBnitq8$iSZ&xElKR?#ck=h~HE%)N3Z&wV>fduU4gK09+!>$)Ajn6@8&_)+_p zKl)kw=|`WEukClz)|b}Xy`4Ml?&h7gOB=aM`+V>9H`+U|z25FKu5)qQ6E&VaeZlp- z9Ui{GytrzoM+fcjfOd0?KB4Wyp}a+y#Q|vd`@*(6-zK!}rwuEA6ktq_TRzKT*&RQm zUX>GNL7f>fka?C6UJ>M5`q$v~SXfw&)T!uv^~ZqdUSS}Z~m)y{WO5%vaRj? zwntfxPo~h1z60ljhhJ##zyCEG9P)M1wzsz1v8N!ww{g-zQ)q!!l?4qlS$W~^B@BGZ5YmWX(5J7pPodO?@L$Fk~ndC@1$XF zbd~P&-GIC0JNNaHyA8MC4Dzy#4mX(dDtKI)vmlF1?A}0EZrt7Wak1ZcUd-3>Qn$S# zi0}A6*WKVPUJky0PP$);Z@7i}TXes2`a##PPX9~38vsJX%XEzL)tm}94#<^h+cdD+ z?aq%$r$_&rzZ2WOnXg`O)1vb>E(E#pae8t>cNmj)756`Z=GC?H$^UeG=^x z9dPF7kI;Mend%~QozBiehJC)e-IFZO&m5cEw$b*OPzPt_kuDZlAwEWOUY)Ul#-T9pUXA;!cDgJ0bn_*FvSv>(Y|&&K?6 znMbc%u_t%hhj8i1xEXO4Txw$b6q?{vW~wbH5$0*RlnVj4_eLEhZJnoO|BI(+81oW? zrc39zCCwkxOAj~{G5dGft7sOzM22r=Pg`Vjkw@xtb+=`(Pc_0t_IlZ(_vhGXE}CW& zL66J4HW_%DpZ#}afv}3#?jv3wyExB1opa%NX@`EO@_;5^umiX97h7Z0X`@{FLyLw!g6YI(N z*a7|p2RQYO=h6;LCFc^J3O4^di99!zmo$pcdRHD3URdn=0^^*CGMBX-a1p2cWa@S88d?)o$ID4^|E&O3bJUs+C^R`+k5TJciwC7 zfAuTmvD}_KeJnL?_rV?Vz$QaSJC~3$Q@5iGR#r+Gwuo4{F=Kpb^UuL@>b}o^8P?0A zvrb&Y5lQ@14EcoNmP0;!@5x)@5Z3X7VKtw39oq(eGDcL-I?oq=lP(Ll{6!{6V-{NQ z$q{DANAs?Q*QzhaZ+Vj(p5`+)$jA&hVcgX0>B}b5rq3L-I^F>OgO7h}+de4$GQ7MZ zow63(WK^fdd2e1t4@?8bJFk+nA+yU82g}q~Jdw^E(o+Z8Ftk9}s3s@{+inc17QWiQoy@nUxErZXdaV#l5!by|OFrl???1r!M za==v@i6`$~uCy203b%xT*vun=#UpPbR@MoORgN%5z$LH5IqITl1rT`d;gw@z)1=)8 ztL@r0VlCPx)a_iPHP+{}?^?J0aOEF->nqs+5M^(Kje_h_h$Jw-B7*2#hB-J^OqSdo zLjzfxV93$I?e1-~om~cyTYl5Y-FQnFY;-0rKCgkpPFe+@fx9@#gTamq(Kf8$&sF*5 z4fGLi19xDQ2J1pWEx_dHj@~$ZqY;sW*=PLv=7wQZo{cn(*vhs+%P`7HISpgz$8F)N zg=Q)$7x$U)ly((W&)?37dvVqxG-1Atj$ez;qVH+V!<|OZpum}L(NXj$;8vtG*749) z;T}!nojd3exs*RU?N;kp$ZS3}E=mhuI-m*Nk741?L&K$2{`Jd+eR->)11^sw_SAsz z99TDUSVv15F{v9U0k>BSEO8L0i_TEs_?J=@atu+*Se=CMmdyrYJrV~FL2IIz=g3in z%s?V3g8&IL4LD50v$B%A$b)!0eF-0NDG2#+fu|HXzW3sh-QuF`QPhB^N)SL(q=PZf zuoPM-lic^u#ueDOP9t@fg)Q-N=|YJ#jNisNIbl#m+tw&pTg6ypaHf&Ej=^Y$ z?2-O;Y};l#JvlP-)SZf5+$u1SsLXe|Ff=Z%&Y;7#NoHgy-jf`CJB`k;dQ_e&Lff?I zs)8*c=~3=6AT7Jb4mc76aM8>YMKwyxfP61evB56M=}j8K)j|6GrNt=L^a@nz1fClq`M9 zW63iZ#>i8Dnl2=Rx5_WA1f&r;{|up7OFyUM5&wp0c_mv10uS1tNN@a#H9rakt_ zUBJHOS;{cCu>4!+^55bcCQl6KM|jI(e^TWxJv!t<@Nke$nc;B&0RQw!L_t)4 zQOb_ci{7y9c$JaRop9w5ry(-%g2nb@n8r}gN0=CSQK%xf4H2I9LR}yeg$umuSne&z zWsDcV36C)(kjaG$q1>BqcW|ZORlNKNedJTT=xv=!4lmTF)&*hmRhnv&U1J$N1pVSr zznJ+2Z6#?sg=u5dlj&n?kbh|rosb&7bwn&JEwm~_m*_#XS-W#*8{t21yW7|8Klmqq z*$v6D4tl|Sc@>8xVH0~vuaBwT| zmNSVy@6rKN^iZGrW}S?E7}s#zznoRMr{Krp1lLvgnI>9KQd#s_j_c z0qOeo=DKn7HdhG~GOzc~Tsr(tS8&hk<%Z6pOGmwP8(zpC!{POupN0$C^RDbWj)d10 ze#i&($%r?i_~4|e_xS4Hr3r0nJ$mW#lKy*%wr3v>tU5Pzs!z}>%7YVz8oPyq_K|*1 z?A6D(vwFUXfVS-9wj3GZ_hn8oBqOue4B0|F)-(pR{lN-tV`)jqSGe`fI+T4?mz4yUa}n?F{>CI?Dq6>{$HtQ%A?` z(?=h-AAaY%?da)~_SN^^Yk&L)zt<+4yKQN9)m}V)+-4_-?QnL?sAtwzCoAo6dfJW- zUZ{j?q7;3U&2G@Bia)jABkd8iPBymM&dyGFl?Br1)9M$s$Ls_BzQ2k+Z$JT;;~3L7 z(ob-mPd$;%NmD06{SIKgH7&oL&@B5>d9cj@N1y3tQWq8Mdt9vC^xLM;A*|&;Iz9$J zx_Em7ny}2^m2V7#pv#mb*=L&%SfS0Tb`zzo(7MW|QvZNop$%S8Z&S(z$yL|fWbznT z2hp0Y^`^pQ^8_s?w$6d6@3_<7K$}1=P^-H`12NSP!dvo>uEvh6MQt|}SZ@0&@j78= zJGTF+%>??=Vwm)~DIj$~Iw!6(IY{4=lPP-tTW5i@=Q-thAoG7K%! zlQABDw*T1w8VWw>z-wLBXK*?nB|Yt*eAaosWLfK1G+H#n3T~*^squWZ>@hZhQ0fJMF!<-e|x6 z#V@tJy}j6)M~6r4n6cZ7gM;??=~M8XwIe?r;#*rYzq5Ucy1r-|=p}Vi*r2ph#z~h) zZDDzb*jS3lj+~P1b9-NYBma&A)oV4O3pBhgoczEDkN(evJ$!{O#*%3#;CqhFdwhD@ zK01S5pjTHnMVf18d#`Qn?vj4Befs!$JAmc~uf5s6{IzeiwaHeSSubSbg3iVCyq%%1 z^37z*k603Ad1zmtQ&vL_G_3duHDy}Tpu;wD%2-u67ap^Do;urv&z>r9c<>CnaoYak zFa92gtXIYX;f)K5@`GA517(=HD_hYkS@5I18L7s}OktHNLrNZNz6lpwEH}UqCQh8` z(_T!H%Xs5VS1=VZ+_)bJbY0GE7>n_rXNSct_(7#_o&zt}+>ZDKZUKDm=J^bI>WciN zpt|DQ;9kK9Kk^*mx_+NQsE+3WlFOe(T6=`$PdRU0iT0k{N0NE`E8LuCzs${%z3l#V z(?@=|eg*n(9PL>CsK+#_+{dxa1Ds9w7Om)#LHi#Q>&{LC5?5Xp1 z0uE_0KaVwEpI|>x2Irxiw{la^)(*JISDWD)9qt%jeZ7{=Ka2zI_tFOIFmDr4O%_&$a5yvcNANl}q~IEbwdZ36C#6pR;g^+ONg^ zvI(8z%V!4c+hUJ_&;A7*Sx^;UVBnw&|NVK5`+T04ekgDG)2Fyl?bPt;X~>qb(_bV zOHjk~@?aXBGOLU#Iogimv3|M8Qwj8BIE6=;h@DrG+h&CK@?SXtM`Qv3ynu02d0NJ@ zWy@|S1+bY@WvsAtY57D{X&2LFe$+M%e43>{nP;+{BX{sd{$!z6TK#kW3tw6GIqi{l zESxa0xiyxElLxAY*uUDS-~=PS&M@0YhaG&DOmvu=lC%wNr?Fqep}qjUHf7naxL$|a ztzK>vaZ{>@Y-?_A4UaR{u;uv)Z}cK*E@+dsmzwu72_?!nm@-?s)M3N+0vDutB8TPM zy0eeJ+_v`bv|s;?-)ehz?_oPl+p}jcXjAL$^*0`d-BUNMXCABct@7m`IficOO1bBM za00rLsZZdLJMu4Mc;mF0j3)o`(2-#wc;T1zEA><)Y3Zvdch>NP^NoMw@=}K5RGzPE zOqv&n6DGX&Ue%?~*3pIW;Xmok5Sn^d5c$a1fey}%uURNh9|`=WpPiv^a`Bq@j3ewz zL%m~5>INj?Ipwx4lnbjQC!zUGeDDk7IJg#}xtC7rPcp1qeiBaMl0H0_$E((@I3ZhF z`P1G?dT7yAKjqcw5oecqih#2fN?WHzJcO&;>#cL+2*kI{2_p|rU%bBZgsm>?@<}<& z)MxR`w7SG8!{?RO{#=|T*WyIR!Bd+9dfy%LEib&29;;KDpfxmDR!bb3EQlR}<2`8y zR@RVzl}%d!IO{0olYV7!g?tM>;Nk%Hc@PlR%bhkZo!COPPUMCF^9M)Qg;F3Hu5TnC z;KOtAf=3zfIkG1W`oIARgk{pBPWvV7@I{{XL>q~Qt?&<5{`5D0?b|f!`38U-n=T#- zAw-lNw<=HHT#4Wq4?;UTzhk>Tsm1>7U5}SxP`Zg>M2EaKfe;t^6*3o@$y6!x8}+pS zOxeMgS6^IQw`x%QwV*|s!0ICuG%yRANsK6~x`u+*_~Ms8C&z)YK^i6uVNOuhsQ@nA zb$o*}Dgg4zpvu9QEg|imI*6h&WgWvn9YXWCQKwXc(q(qXc{6HbTr#Ln<3gAXzB`SHg9*cx&a_N!<~5KFGnzT+C0S~*`=KF zx|EZhZSl*|0T}60-tt5Ib(nUPaT>RMw=KEDpZUO9`pSraVZmc;uc3H zPNRVhbD&zdJoeHBU=0WrdP0>I;QfTcayk{;q7!%Afm?iKIQR^0JWE3+EqOK`B*}Z! zkrUgQEa%xcCpT@c(Qj^yNVewR>)3#Fe#$rZglmY9Ep=-YH#*{r168&RVhFbLjDf|# zq`u=Xe~ZWFU%Uk|B4Uc9;(63n3}ke9eKXmMj#tFQSzh&L?#BHIq69QZll^!QU3E8Gq1CKtbj>O@8cni{*}H#0D1(tYk0@2hO)n|nN2 zntq9AgDXzHKo_^^S4<-wi%`*o;mh|t=Arn5yd+FRMZeRcSN!Xa0AN4{`X zcyGVJ*1!R}_>EyKfzrsI@{&pK7&#+PC2-_;gHsn>5na{cIxS@s`D|W^tu)bbRVLGR zG^rz>)jv*5RUcnuz%JkKQ58aVBjAKckP&U_Q? zy#3{0{@wQ8d#|^v*>O8M{DiiBj5%@DW~XPF5O5%zHyn`bYxNL$jlMdIXXKkVPeO<7 z9UIGbOPv~~j4PW-An)wdfJt1O;YQ;dHvG@H;>BlN3SKz$B40Y{Ga<-b2~&A7l}%GFZg58<}@hh-6erxdcFKFmA&qVjcfa` z54+ApAA=Wp#vruxxRWsw(RCbcI{h^0ecLA;2J!ONe&6mVZPoDvuuhmQq06!(K9dGl zZEb53^6E_xPmDRsVv+r+cEUMfE;cEPTle>!H_xJ+8+X6>JHOXH`O#0?&%gWqc6oBq zCXktTm-YNudJi6W^D~Tv*XitaeUHN9XFhEe{|S>_`UD7!>7;M*4m_f?Mq*3 z58nPlJOA`)n|}JN9UmVwkHY@>r;pmv?5Mr=`hzy3ESD$KHh~tk?8WR%&QXr#b~f{b zsP%TPO@)TGzj6bE?c(yn!8ZJbf1VbgZq2TI?4?Y=l73}fA3j7!TVK=!`qH;l)?744 zCu?73GbenL?|vIn9k;Quko=wl-NljJ?ukJLBI(STA`F(aLL30mn zYt-QkCMy;4QxVLVl4l8&9z{Tbb5oaex`3eVp11pnbi5OJBn~OTzlJ~9hupb=HfcfZ zq_A>yu8pW&#J#eo?P0%EHjoo@+!sP*2;2!X?~2n|9%(h5_Jh1Cr(Ny|U(Pyf?~@1e zX}{`QIfjcto6~x%69vc(dMwL))12)x^`m`DfDDp8^qumaW6=RLUYQ^!_P031n>K+A zIALSjOy*hPJPMOHEcnyj%4uAt*Y-^MzBxdA#OudK3=yOs{DSV%8KkiuD1)1Y)3*3? zGNow4ho1Gxdh$&hC7tpiJ*kJ}TljD6hO!YdeyFHkj6)}`eEcG*oq6YPi0Hp31%Ii0o_$4Bky;c@%u z@ssx5AN-{K{F6uRJ<(+rhJFh)x8{qLR`GX_c z_|G4;pMCsE`}osO+A}}<;U=Eb6Ut*-y>5G(erI?y?ZgE-Kk4A+6*m=F=1a;H{>eu< zOxUvRi}K`69;=)%C-bh*TYhIVZ=A?Gc?zH8x0?X4wt^25+LU@bk@e(xM4Nq#9(;6i z&>mqIor7z6c@kJBvp2T4+bU)BIR2wkPno0bQ}!=^?bqAAhi}sE)k&aoVUe5DBXl8r zSwr_ZHc>W}pR)Z*eq*;>52(uJsCw*jxBSC_e%V_DQcPPG0{e*gE| z?(P!`)ViV z(ZgQhGM=BOHH7@w35E6IK4mbC7te#H%HUUb>KT*Bi|E8^Z;Q_8@GcJ{ zpI?w)a1#Df)GOc;1_!$?h=ws) zwSizYFQMf0Rq$%F)Z6s3fM@!;;)CU({q}yXHVfL{RDXQo7>xAB8K!=+9ozp{e*2&3 z0VzjW7)J=z0tZ1)5}bWajZZT#Xr89!k^+D7OiI)hm;I3aAn~?4tEL0Sww3qh1x@q% zF*f&A&*@{luPDO3phG$1UcFgp1@Mkv;-nz}*m}x9(OW@_uJB1WP?m#-jHRJ3^YB_o zgp(0*@(P-1rsC(-%1(Ui-an#VYCI?t+g&* z=^S}$_YGHbhT>4AAT6|GoxBMS;%$RO5=uPJnX42=zDPsLO1Qj>zK}-e@@k$dV;9@A zS%nVm5%?u8llHuex^kh#@(%h)mlIEe#NhH{hZ1Z)%0u)<*W21O(ia<)OTGr)_CcB( zepI=%U2PBAG$|+X#$`?%nX+0eSEYe3Bv+rsP8DZlKzVU<%Np&^IcM7;`~df|8&S4* z+v?_Sd*}TR+S_0JAUrrYJZ#UOKWlHi^=8_FYXXs`A`d#MtYpM}0*Y?~z^o1H<$|A0eN}T~lnZJ1NcURkwkVX0 zd-L-JDtRxD9pltGXqOezE7?RrZ^0n}lvSJ9Jh&*kS!dls-T^1PB=>T-;fK7lfsM{u0qY-k^0RTk;8PLVIF!MPFhanapA zU7nbiV?goA596(ed~R1*=|FC!!*GSgJN1Xp#Yg)i`OZa}T0bh@%Ujb_+2AL>WvlWK zMLcRbbtA3DXA_5cb6_!HABO|p9!%nx}-js0~ zZr&1(j&SGQWOIp5ZYAHj^}A^6n{==fCoL!}M#IX4fj^zDiqbhZ$5eLch#@zyZOz4Z zD%7{<35RsTtvX9;1A|OTogdpHadd32HHU*&k#%Thj92C&A+>g@&@-}5?;W-0J%T#$7uaTGz zA^KIA5`@SnR~mwiMi_&veHYo~H8@?Rp|sM$>paF-3bK405FX>d$f5hy6}mh#ZYn9 zM1b@pWkQv!(7|(MDX&Z{Z+z}>@E{k@!^n%lLs=uM<{9`vQye84`9<*T&!tyh&n{>; zC@B@)3JuuJKC|<)w!MwDlb<&H1==x- zw&ZvLtb@HeRu|cqUKg;!H%e(c=nBl|YTwF&GN*Dj9Wo2zb9)46&VUWyjxUReB=;>3 z4dL)1%Dn2+?@!7O^R?5m6Sl6GfX{@e%2bwyr~D&RX^ZepL)v?A_b{UgT^hVkp(8!?3-C+!AIB2wU3Q49VIx(!ubqZ(u@7C6b~5B68H1hlkw5Vw`F8bVsDpm5sW z%+r{$n1YVdu%E{luhloUA$RI)YuJ{PH+g2DWnS0V0kpwF3deAP<4$1BmeL8LH4Sglw&5(R-)2a(MNfzlo5bM4C6ox;G9v&h0 z!)Q(an_>yq8I2jXUWlDL1_y(n!2jKZ(<)0MwOdatIuG+b1U8^ov)o6rHz) z=rb<*hqy$Or~0PcLVBI$u-q0$yT?3)QTLf1dpV7gTxj!)>;8BSnDXm$@jGpX|Fm6#6Ng;Kx33ud_cFUq zL*ypuy|-V&ze1jDIHoJ8T&Z^SO1+Ic-umRa_M{`<9`NnZo+o;##t~%~=ae7~>ENP# zw8_++x^Ra4K)Z`~IWbBaC`8^yB8{-ZU4d^lqs?PyX-lg^ri@Q!%hR^9Nnf(Lf&M_p zX%y%XmWfU(PETj;O#6ueofCFvmP_3U zU3q)|athxl1@&oJsy<+oHn#N@RK}|>VJUFYx!Ano@lvkGq^gN)64NShLcocRI(5o7 znbhstf1Xz5=dHH4x9eM{PS85BJF$OW$+!HqY26Iqr5!BNuGTY|s{YUM!8|)T?PgPD z&Nrj9z33Cz_3NSGMe!@X!GQZORrnMM_@UruJgt z18~xweh5O9co+Ap5AdzXD|k?o*#u=grhQp*rmb3ip?s^pnlcDzO%UgJ(J`^Sbz`ra zH?qfpx{(LcsZA4moV=`8>5w)pm3$KtK5Ea1PrJyv$|w0cy>WQ??kM=|S4a6%W}JPj zHgP8ON#`WI8*;S09p6U%lEidrd&q~IYO**)Ik$is@o*{+=!GuDZJ#~|Jt z?!3awx=MQRV^30N;2W_PBKebF*>!GaHdmNOnWICrcPLX}(*7l!cH>w_CX>cV)6+BL zYPBY}rtsYfA5W0*^oZl>2`H}H>GZ7q%OCupedi}1=Z$}bWo?yl7ChNpUvICye!qSF zYhP&}yz_mT$mRwo?WzdjZ!OqZR(!&iu3d0jBZy}g(-X{?2-eF& z1FnfHDdDm%h(8DE8MK^m|Cl!W2s)ml4^K%kS>I_n35|5S(1~)*y_pFVi_ zdi&y+zR@nw^QW}?5Q%*>J3FTwn45r#?^Y4SMW(9#Iks}cgL2?;-!8HtBluYBv0G#|G~F{`||84@!2FUth`d*kgII4hLp(c+{w7%8ySG| z69)6<;fDPJCoKB2+Qed-WKea-eS&jW&vWNDRr6>XocQAUbKFQd!aA}zc$>?B;V;7e z5;yW2_~2Ke(C21cLkQyx>wY$H04vjzMjE^X@RAOnU*(J+u$|s{`SlI6{t4!1_>27g zjQu&T5&z3v=5PKD(*Fwd&*Ss~RSnpd5p=@TV*~^bSIUNGId!zJ!M*6R+wrvK$R6_V zpE}#kgJW3bW+(OI$F+^at{ms~WkZBKS~V^7#@j}K4U!HXAkwX{!kx$V|I zPhFH;`LTbRO|e5}w0G<`PY3W6wT-QHiY08#HF@v+wS9s|aXTkl{iFG+&!k1tTu_5v zXv}6Z9@2N(=c&_ij%P2N@sBLX3-MZd`pxJB;w*sN4Qr*Acs%E0a%!W0vYD`qkHcMY z*q<3!IzyZ_4h2SF9IOppWR!;lgIp{w5ZLe7SI5q!o&-xjs17qH(%8?IE1b?7)HmOG zfaV(afit#4!AfJsd*)@3GT;rekwp&jFrR^wh+LiQ^uy{@sfgZ98I%@jLjQ^UMK5nuQ^}JvvXkKAxWw8`6g8Ol9QC2r|EsMpQ2cIu^;T<{-n%e$~Y$s6>-X=^&)|G*o@2ew7ujvPG*M@>}!~V*gi; zcx5E|Li&lbQUb$o5-1U+Kg>GgK;R=v!M@;ZSCJj=LxXj1{?*SWZSoYj=BR4!xt!VA z@7Dttad`>UCI-y|w%Uz|;ei2JPbplru47Wym1=&}_F^;5+>q_Jw-MoEXP|GiE5uo{ zFRx5jlx2)WM%frkeA`qe_HBJ2=gW7EVDYCVSUv~_GKUtl!eI<)=?%)K<$BCYb& zWrsND@>70j&YT|E*9J+7fqvFh0zLOyZqggZW4|T)*sIcjma2AWy!I30*j40s|G|Uy z^>2Ku-Fa}oy?Fkj9UdNJBlpAC9=3BgmzyVP9jC~j@%g08#YlTEYO*7*`tzB;@V!DT zdxX|7cp%YP)5sq#m7%QI&mt=pls-G-e4eXL$(wrcWo5?{nZt3NFmR;hzSaOJZFq)0 zBaJ+@{Q8kS!(AkI5t#aP@mt@^aUjD=(kX}bBaB(?pEBPHrF6>9-@k6bNRzAw5aT(XhPw2`RN}h6Q^Qs>W&z!OAu9nMm!Baeucb=AGBC=+D z4NW{tlX8|eQc%gSF%C#MLp$-<9+*$7-A*jgDya*GH@GPjy2go!3k}58FVo^nF-I5_U}^G)@RBF@mnZ+9+)#RyxA#-~26lFO0luFol;?pp!F} zi}Wgk6q<~n&9~#z5vcIZ+cJbMd^=3TNNa;k$A)ha$_NpUc)90_u9T0$8U`-IQwHc! z2}O_$wY7_b* zdE$q$!cs4$G2ZqFrBM)}oqxliQA0@u?fq3YJ4g?YDoo;tP2RbJy2Q|?XC9s=ATR69 z=AZ^d#*vY7Hy_); zf(smB#%YjixLaq@9~9~Q3_9Vz14rAA?Z$~c^G^pSuXsv)95LKR2Y@HIDW{uMl8*XC zpu?BQN1l7q@KDUvX021%RWjy1gHzwgGE0>6W!f_QQGcW{2`l}%#g+V-cb+t`04J1o zRujbPDfL2lE00KPUP*_a9f#70)3}Ecc%b}PW?P=sDN^Y*t-nRbBELhQWsWQhM;Mt2 zQ|++wELU=(SOJv~UFv1Karzn5rG)4zS9n zpIeAGP1PefpbhGhpHBKiPiUgfq*FbQ@4cZ075yVsxp_HkxB~{4&fPNg@+Q(>`x(OW z6xd3qoF~26%0q*bUzcywM083I85ZM-EB9QHE%>F3mL4tiRmEk@>B|Shv$=pw{nJPc zEY32go9k^$!qW8meU;ffEgyGhoJ3aaY9qYy^-(Wq>B>pP;L;Y2sX0cMZs3Jrs&V0%+l+F+see3|;vR8SEW@$Iw zNL#^{8_#RO1vW_wXW(T1ylofG_-TaWv2NzE%F}pQbR90P^Yqp3R(|PcU*W`&OL$>F z55C}1k0W2`ceiey=0Cmsql?hZ%Q*k}72Ke|C;mM8=jBS3!w=WfjePXSl{8+K*=Mj?{=|8eOCHKl!lkd`Zi+GDtSSOi z#ys*&c#0$rqJ$OPpbk{LW>$E5IHhR2p(28k; zS@@+rs%~30Guyst_x#yTGMQ_3wEs*nlicI|FV3b6F6&KnX*IoRe5*;Foa30uDf9SZc^dxqeSJ%n^{{07Sdw(A|_>TSQ5&j3ykLuWSY}}2>CjEj(pn`yW zoZwevd~4WK8S<@RLSh^CI43P&JH@v@aiMU%7Hr|g`MJkf(*Gk7Agv4LnS6m0 znT#f__Corp>i9x?-YCb}PfAkk9XM5!g7#rWhm#KS#s1ZPmmE{~eyd&H*)NOV%@FcE zbzu3xDShcj{gW@)FUFadZ9*E{0FVv9z-IG6>V&e!hKCgFTg#?%L@j>l5O*eb!Q(`< zZhVW<R77PglvKn=V{T%BB$b z71;>efDdmiC-p*{b>byoW54s4Q*}fby{S!Byf;i9L|24fb2r>}PfGZpd}R^{Sns7V z^ifVpRv%d>7vwSHa*p^H+UdzD?d_zUT{^kE);^^T{{O%Cz4r4b zhi!9vJqz=j8(Uccy}SQfyZ7*6yMOO~+uhowP5CzLIs8~@6KGi9*o2SEv~Az?p-pm4pR03lozZXkd!j?BT8=9{@ zL+3m?I&4p_Pe@~p00TPf$#4E!_wvdU5e|-_nRfW#{#)&vzxgfdX0tteaoDD(%9=7k z?U2Be05&IE$j3F~)oIDPZ3`!*^&?jGvt^@gVSgcRv^C|E8l}HdAGk5X@yWXS!oKa2 z)=b^Mc>b*YqrdnQ^d0?$o8B2vFoFg(GAC~A(ADszva)2#YuV`<9bIN_yH^B#)8--0!q2eXan1M5_Gjd>)r>_|osC8}~i!OYZjb=eY1AH{DMUfg8`= z%?sT9*b_bv`ajQ!|MLKXsBSyG*86DhY3K7g`W%1GuiI|G{W4eie%Ae}>5G2JELsOW zxA6{qnZdAhz&pmPkh$yiQR50+_^lqR^hrI>3kO_Of3FAC=QaJ$IXcdM%1ty`D54!k ze*@$qyZyR$%;gz0g!extr>E(H+!HR~dhK%9#64S@%xnd2LDoSL-3Q2|5et+@nk<7>hOik7`UfTr$AiI zL!_sv^4X^o6_Vi5+zzFVwb$$mt*_W;ggG~$F83S!3Q;y4I2tmP?}hhWBWqs5S#|tWjCD{~IRW}DZp_mes2>`92Hq5fw`dqJ z!xc*9q3T%)F_GgbI1vKYR>huo#;Dz)Z*U)LfhaCGTPNalLZR#8Pl{;^ppLDTM z+4U`dee*7UNjq@bfaaO@$v=3;$pL{8*8&ZTcJSzL#2fvft`Ig&1AN>EKEZ{r@C=+) zhc&*g*j&U97n>UV$xnO+_Bzf+-;}%XM@J?L>E>Q`lD~Oq*ZEv8cJd5h#xleMiku<$ z$UB1AzWJ@+Zg0K!L2S^Y!$Y**MccoBAA5DHKL2Ju&i#@$aw9#i6}dp#pYwvAgvo|c zpM=jH;O46{UeMfk-_1bgV>z|QdWG`jeU}~Avs~z3wogz7oWpa>3je&Qh#IRYFIC_9 zMfZ3)R}hE%>DUf!xTFQ1L8 z`@BGv^i^M$&wi?Sz;lOJ4USsQNbH;UvM zt7R@4m{DFot>8FP-DW?K{E3o}S)>P-pFAOo`+7Fk@kjcV&TC~nZ|9Sq|Cr3`8~G*8 zSAK55{EUNc`Bl2bF$FY5E~JODf-&t*^p;;7;5S$GA&5vPoke3+V(~REv&sk3q%L}% z*`y&p#y9G|&=>R;w%TRd7=LcWunuj@I_!xLSN`m`fAiZYfw!|z51TpDkdO1;7a4DjymQoV|u;zocq_`9@59Pv4J1>+%a#gW(8$*O{*k0Gr%p{MVZ z^X)dHLo4x~D4=3YUC0z;^}#C?Q4H7%M|K4YD`K3_`JSPCN$XO`3ZGYwapzCja_6r` zrSMereqa#P-GCz8$6MuTTLLaTK{K#XwmR$+*za8&~>+RT7w@uyWL6FlQ?oI7Ac z3`(bHvdrd2eVHft3^y!%6)$0W#UJylE9rw?zEoKa6VA&tQHD`=)}yqqv`Yp=7YsH$ zDI+HVpm1vOvSGwdYBv&THzppZszP*vQ9fKZk28_K8oN$jI}uxxW?-hy*O3o3yMwV9 zXE^K7OI}=G)dHOgFmgdTNKH8%3hL&^m0IhGYTcZjEMld$STt32w)D}Dw{Gmr#u%oj5IGC!ATPIJYEqpd034rVTR-To?$f0zC zvpPw1fwEvdssn-povJ|&?`kI=xIii%r9s+!9yFS#2L6hlSD~#ZT7F8CZ=(1YzJimy z%}*W(C!G!!0Zmzi%hC1t?#L_$u5>~HFZ>n0uF9XN9$%LU zb@COO;ElS2>H}SGQ$8C&yf9wjB^ObNpQECkmJTTS(524gsdDZ=e#ty2)TK$-;k;x{ zA>*pHBwTj>=CUkAQ8x3C@A^fDa$&m~a$K-!ufgZDbs&GL56m~@!hB^xev_Az5VFq{ z=7XR9R=Flkbfbu6#)2!O$DDe2^nvBF?!ej0Q*i~O%*rxTW+x!QwgH=`e2PpH76Vaz z41o88Uvc767D!~6IoY1=r-K;1m{I_yVXLml(N>|yV1F!|{epEU9pLD77G{Vuv=>hY z|GmZlO1J_hbsY{;A9xA?x2aJ*R zHR_CUHcwY=Iy(X3jKMks_I26^{pxRg^PTn=|MNd@*JlT!oc`D`SS-ZryUaS}UEx5s@?sx}@~C z`lj9Pc}=d1O|11S?v++ z*jsK?VhnY9_5vGvt=<2^+s#iSiRbv}SzBB3=uc@g`P25| z>Epbex3Rs|*0(m=(eY9H!IK}i(~GnA>+iqcP7hyX;qX)R;W1@dBHwlR^yb@dw|lR@ z*)HIl_MsC&Pk}o=Ipz73vB_#SL+$wOedt!7*)O<>Aa6=wJEfn151!Bz!9n(Bo>(xO z_D!N!nWS~2gjH91yC3^&eSo7jmO0)-zv8=SX%*R*oSvO#;d=tEhi|^ow)eKF7wkKD ze0X@+o;-WnPEV*CaBmT}f9Edg*QuwH5$!V3pPqVbu$x@z50-qR-~KRykTA<&o0_a| zhDJ|ysP7IUXZGRLMfxUu>)8eFl*REi<#r+8w-(BVMP|^|SaHCYY?5Jkrt>WwWzq>~ z3AfJ0eUY@(I{cBhIiZNW?LItBCMR{#XT;thzqAhvPCsT}Y&MSDDlhY>K0bJ<`@As* zt=5@->@#4~;<%4}s?BUTZ2^)h4w@~4u)a}kJBr;6ykm--*uYi&9CXnq3 zxZ8%I~1+u8P!MN{e%zSt+luI<$)Tk}Q}`DarA z^{hP0Q#a690ws-{uflBKOG5&dy$&Z0@&n+QHJ=q-}5Swd2F1_E&%N*X_Ug-uK&!>8x!aH=bhQLfE_S zyw$$(jc>Gf-+eD{9v&VY=Pm!Ae)wVg9v+}&~^!!}8Kq>emJ{|sJdK?|GKMT*rmWF7jhl;c&}H|6vjm!4KY zt-=$YNq+$ka+IbrE(d^h80}1qZ|cv~3s%Gg9MI?Y{(p9G(4JmTvl(Cs-Qj5h8(TYV z3wr>Yahf%(EkGsB1Tx@3pUe?VISMw`kWV?KyhL`2|mEb5X}KTJLVKQW7>cxB1nE zQvmpvW{H4a>n)CO<;!$A#Yq{meStR{fdRAqMt=aly*Fu(KKiiz*`NG_wsXf*7qsUr zF>OmBLqc-ofAX--{VgG^aY1^7?hr0)>;!bWez_BGcg`^_U`u~*9?8e>+xWT;%dNif z^%zFjzmEGw7gU|j(Vzk|c&I;cE#`d-&uurym4rBO=z?$H;7as>S@dtg&98xX#J`FT z!z;hR_Nt$o=PL6^yWkt2aQUx4u95Ke^XIvL)AXHRb9bx$UO~TQv+DCUB>3%nZk+jf z0o!fI!1Gx0T+z7?lf=PgeW5>cdK-8z(8uc8b##ZHD>5%P54m8lMf=Z!{_$B}C|O&b zw7vWH3J1>90#0b}F4Ve^>1R>Y)#r|p(98A!5##lO7jJGcQ8la)K_@ zCU65=ZCtCx5l;zHZ|CKSH8i4n3m!Rd?B;}&8dB{4>Ju;Wt@MU{Ma?l^VXI(2rr)){ za2y=4Fe`sP!~eW}E?;DvNWy&LbUdo#Vq*X+OQSEu34(lg9)j%eGmbNkWX4IaCs>8& zu`gT%Fisf9!`e{V9wxh~W7Cd}fAvxxVX0^CMIK7+`y>;*AV~fxkA0k)j_0HSujz2& ztv*oteYUtI-h)Og%A3G#E~u=JzcQJzRmsoGOhkb z7Wl>QNdj&_F}*UE4Fem?u`}HqYn+$%T<}w$byBW8mX~lIddxQq`oYsrBH-FU=C#{5 zlBb(K;Wa&Nj{1joWmCH*n;gI|F6B6LYRZT33J9F@k{3B4$2#NB{JpoHYn`M+60vyc z60S&5*Kshu7PC!dno2iZ-B!Hv3^ac{ljdg5O!*hktJ;FYJ28fCWFXYzi$^J>@`Jb9 zpW*dfYe9uiY0ey85Lqr|3>g7`El}DPita4va+~=VZ6IJGuhL_LE&|J?@^QB!E_3N2 zQ9GiS1KvmO124|1gK_UAEyhQ_fz3t+?;VfwSDS~bPDEpwR7bHpsQ^D6MKaQXhX^rN zZsn4s_yxARv}$8p6dFy&EYU3_Sa$>G>9o36WJ}v` z8NG~VY!4njXn*wg{$X11@!3h%1ddNmuv_0vc|87q%tf!De~Y(Jm;Od7d?rL$luwI& zy#HL*X`ClefzwU=nS-)?g98ES^+q|O!=SI`4A3R$4(H=M{y(}TjDeZ1qeV{qb4=v8 z#Pq-^7uG@IOi}UX?VK6i0`UsCZ^WCkV-e*_IYO(Yj=R1mu3gbiJ%!0S1V`u{WmXOd z$+%EhuW{lLPP~piFH$cAaMcL_qfMEA;Zry0g4^V_!xiO%vbYFb`+|C3@D<23KLEPi6W6+Tfu?3Z=7%LthG%BjvI`xme-wBxF#Hhwc(22e5&xp51W}rlJ_bGB3$?hSmhs)kuMPx=qEe|o3RioewBF2l6Ye% zNy<@SIgKDv16_Gmslpd3&4tI*3GksK13K~GOSAMKL-|IDi!sni=@AMIBNY#nd91&* zS{`V1LbBfOCmn^YMRkPUf|ExUulbWk4Xp4=WyQ0C=B`)R3!OZG$A#14pF*g?McHCF zTA$n#myQAu<;KZUJC9840G0gBQ&>3%D@xY*_bn_QEPKM_S^mJC{3~2TK{o`u(u85$ zE4$v)DCL!SxDUvzL zrGjPqK@%%et8PZ1?p*|qtV4s7Q@#;p)9{;EE}5I*GJ~JvyBEw~qdOlQ39Tip7go6AX5jT;xh>Mb8995Gy}Xmx|_-N9XT2;f$6 z6Q0hALRV+&vjz?M!QTvMz$d(lwBRcL(H70$aB1^?7^!K~;cwAe-?60)6QA~{{G~3y zNxRqgAMuC`_!ChEStggP9&x3({1jT~DEX*zSneuM$&mvb>t(l1g&&f|{h&GR$qDw9 ztHQ;rti*63R2F0*LSrp>>4TL=%4WZvBh-)~+e-DFq~c$@!HSD~lCHBRuJS2-UOFf1 zZ`!E-pmXraHdJlB!ZozW#|3oKXaIZMl>C_+O*mP(%pf{_y)rC~wgn;hxxTP$XKiEM z!M1~`%XV*nr|oX8ww>+O_RX)q*Z$GJ_ouE@w4>MohxxJp}BInyt|r2Q?r5jWt=k2p*7E&Mld z!!4#GU-;2U7qZ<4_E1K`1A7fBL5Nw{S|kE z@8#$2&&zq^7Y_Sj(@2M~MSpCKfkKyja)V!2+C;#?nR1JJ5&wAhQYQjqn0Vs%usCrV zpU8@jPwKSW;d*!+vYz&#BfVwMt% zb>d{4yL=*WTsnuZwlj5)8*#`IedcC9H&L$9cc*W6(lI1}SDPw2qZYK++$4<+G$YIj z?QI59%j#qXiigjhwdXIMwoBUjHhJIw();b}zw>w6KmBk2tM=$eKWh{8l1F}DoN0?M zxAT)}TLaIlHY3KRv-kLlqelJm$^+~;FIg%&A(zBOSSx)I&zM3Z9~7aM%5Ig_Y69=*B`&o|R292}g~+Zf<= zJeN2A7?WgzjC?D9`v&qO%mKZN_)fa|M#uTN8_4XpTqs4RLKPSA_Ho6EKFR6@7Yr1K zx?GFIaZ*mBf<9$5lN{jot#MB-F|V?_EdL7h${%IVQ!J2a_-HF~GQ>qt%TQxX=(Rqr zZ24|I0F%iBF8elZR04=4EWDL=;NXqpYH3SfOZNF>7kOsV$psf^iJeN4yzRhE>}mdL zBcS%MFs74E`#0b6H(sKwb9m+?EW8(QY_t_8$V)E?D=qn+yLP+BIu<#pv-On?;Fcn1 z=^LeAf|56Fmo_{WSj>Nni~Mv%ZN|_pDf#otBoBBU-&Fo(%eYC0h+O8c4deZi8;S5O zyE1K`n#o}40FUX8j!XV%4(<5ty#46sAGN>!n|~P{w7#{~&cOfEk3MYwkN@JoZvXsm zzu!)er|sU}c6;OXhi#vBxx2U5-hAz?(igP(pMCss`@s)>)PD4{pSI^mN4_-GR$LU= z+HRZ3w#Vjga=$jA%_2V@o$myei*3ipr}-2{+8#8*`jll#8#*Q4DNz^rFRVw~qxK2) zH=}GW+WR)Y3;buqTraJ+Wha2(^ObTzI!8Pg)ZHa=n29Uo{Tv(@@ZthCUn!qUYwZx( zespxu9$!si6+Be-z~LKYzA5PfigKr%s}EL|H?wKy^|#+?Z@v94dhEPCJ2;|#m-2SL zCqcMD-8Q1$R;M}^QO<^q65GfsvR+*L;IbNhTR7FlkAB%u+NbbB*|8mxuExZ*G2rQ^ zAGi14d%J!0>mTF<6Bm*^*=dD7OwEUK#m}{q`C9Zu z-!%Qp<)4oye*yi*4=E_vic{Rmag%fv9uQ$2OPJprr%v5!KT&!gtwI=mx_X_qr;V|S zZc>T*mcD(l3z%-6_C%wL*#$bw6M3*3kVp0OCF7!#)8pPAu|qc1Ya6t$%bX;+=`;4o z$;m0#v~BIU$>$2)cAS31H?aNIx|_m|iO-mdc9;c!^xC-_6KTiebPv-yZ;}mbRtNgc zeC^xR>87=v*kOLD)VR95^kK-HWeUmwGTZ40;o;3M4wR>>2^NO`UnoviBsK`aip2xC^Z>tC*Y-B zYn@a*suyG5hfM2Lxk^r^GC}U;jl3IU_~D*N)VdfTr%tufRZi zPRl}G+@#>AP&TVTUL&|16SO@bK^q0l_(YB(3PMO zROytlq$OTRVX3r&(OI9;3rr?KA9i&iL}z8zLUnZ1D>lr;DD{gX;q=~NmUNVuXPWcI)E{0 z_>?@8ThRsW!37;+G^O$#8<%)-39lTFHeM$ZR6Ug)UO2^&Fy)2%^97oVv`6K}aBN*z z<))H^C!*Sb^{31`HZZ|tdrQD9{_pPHZCks$?YDmaf7e+FR_^iv~edb|%p{?H3toPuD*pyS*aHDv18Kmlu&kXnirG-~E zOzxVEDeJTfN(fBxt9X+WuVfIJ44s|1UF0eY?b4XM#f9C@pe3IlAgygh)@1$`yfu!q ze{pjk@iX$9QD(~MxA0x~2R!otW6b9`(H3(KGTV=>OA@OOP3eJkev`})2x&C0Y$lM; zjQ;>lTrIZOSgTGRkRRkDf2?!hs*QrrdIYyT5gt?#2Lm$x1E2LN&jrbb?QkEEREWuH zUYfh~A1=m6vEt;N!mA91pU@_J`dy-wad}W}xW=o|oq;Kyio!Xm@H_RRw0GIiP~awI zkL(m}8E+dFT5ThSRh%w;H_v0-8=Vv0K+Dp~hbw>j+h6@Q%KPnfoC0WgSjIFg8a4@I zP>_*3(>69&+t${4+un6Yn##bP@(j8>qTaVFSFhUIhRX5`W-y5IyBZ8DZBVuN4{;ij zResd_kn#_#$l#pi z;K_SS5vG%0?xhdfoD_7hnDT>DUPU=^Ssu$S58PNGPwfqTBUB^VoMfKIl6o4$GV$5L z2A&moJAZT4PK)#<0H*O7RDrdU?Eo#e1AQBff(*YAfa{f6%BrGt^MUkS`?f0iW=EV2 zQa%&!ce2E9C#WLM+r=tya`2}X2bC}DL)xGn1Ir1GWpKMugwhEOu)K%JD}4B$27cx7 zdl)#^7+c_~^Z`!U$~PQ!lAgeOdB?4WiTr|h)_WN$HhL9|>BJpMp@#(W2`8Fm8j7`* ziX@HsWMhW-;D=?irfXoN@{kY0RvtN?D&&|qezK7_Rys08Ij?ivjCF5RntiI&)X8ydrOC@be(Pxm1I(dQ-@^jxzY7c;b+*)N$%SoY10z zv*VT?+lquy2H~zWP{ij1ChcI=&O_a3{ZaN9BeX^JnhM)A`96`m%^=fVZq->^*UA)h zS%*Bwz_gu_C~eeyNe#~ zo|>mTsJhF=e`HF1VIqE`2h(0kXK5t(6d8}+RMySU0w+aynN0HpE@_|+?MEz6$_o9X z{@^&_PT)~K+hgaS^yZD_v;o^5_2cF#`mj2(i${KJpVjGGYWc-)bX6If(o(uroElW# zNTW^~O7Gg&lr9!txe!L&{g%|?tnzY6k5rOx48wpGeA4iwQC6LPhf~K$kAqC>Q7~NI zTnAUmD$P8n{f9RI2pitWL;G&?Q`YL)dQHD({V78+fR)L*YH|Sir7g=x7l5)Dk#Zt; z8ZnW(UW#7lSM-_YSx5JA3r%ILo>S7wPkF+G=JLo#2e}dqtR0k4q!G1sqHU0N4ZV4D zlXjT`Qoo+AV|j#4mhd{pPv=3(2~2dM{fqVv?I^xs^4^6g>w@%=HAHVsImkvqWsGp{ zJ)Uo4eF@uWrM>&^>+RqDCx6zi&!+A2OrG#Ii?s4qdVmQ8{L+R{U4|&xZlYWUk~U$|ZK%>9QscO^HOvqBO=A0-An~#u zeJNS-5O}L#FFmjfNt>Y>@^DDgMmiv^wx1*8X#k0z@bVAXD-*rjVO-~biz217bd|k}HaHt!q z?I?o)@@IYZIv@{|r5(~%bNW>BC8YyWWwt5bW#gE(;^l{JTistLf0h0eADosv^Hw%P z=a8`>bMWGlauQ-ainsa(^D_@XEVAWrU>`@?F7t-FFq=h~r42*?8YLGxJW5R+VxOoX zyhgw5@t(^oTfn=>sJuvjZGep}DXXN(J|umhE&M<0Kfw_G>xzt)zv@3hTteyuGZp0xk#fAXKT2RrNS z%OAYk*7xp(u4QmvKY5ln_)ni6w97MeC;i0sWUuXP?Ki*e?R44&y39pBC(O6@Hu6U6 z<42#wHhTTe!*+LXw{5L%x5Lv}I|9xf?w+pZLg2>sCgobscO2EXE^cYVJ%4fBrYE(q zcyv5Xyl?0Erh|*D+Z$W3^CFY-bRU#)t(|4OV84%@;o>1OJfpujn|a(nqa*HptH8xr zkNjR;aZ#V~7<6E^Pz{FpEqBXvqQu1BbkbkGpHZF{&yU;b)FXh=;@G5HTiZEiSlz3Q z957Mx&l{NT~XnZUB) zKxZZxXn!?z>Vi3PYdcIEL+-hp2+R11HXC_A$7ZDc`DS-EY)b^s^0_t_lrB~uEIBb~ z{?KLo%4)q8M+E&nxX8)Y=wxesE{-(hRkpsZPrg?Br3D}t^ncpHmYsa;{jV91**>or z>s=tjj=jBVLE6Q3^#iC^vGTmv_SPoh59S1K1eEcmV@zq7o?Rf1v{wiG^t0L}`Sc*^ zowV|8E36F4DKAJ$IoZfw7x{pLgp?GEx2R{4`jmdm`AUG1W^HRO@D zXZ{z^_3XuI`~Urm@3z1C{?FRM^rHRnXCJknee_BDSAYAR_PrndG@sdc`>ijwH{O1) z?cI61z4petZEO1;NNJPD=k3M8QG4?ENf!P6bVwF~7>{@wz}os&-bT!uf7GQikO@PG zf-~I5Do$C~94jD8!e}$PNkF^!nzYw0+$-bo>0$-CDK{lGfA8lzXz(R^8OJxy@K=GjEVx!7DUmI|R?;!{_aj3-6Z5$9i5xcWkwd z%^mnQ$s0sxj31BPJb{d^?rgQU-h02j_0AX2H)rhtyGq+{d;2~WNWP8(k@HpL-gaQ0 z68)`QrBY!;{%5H>$10x0R2x7Dr;o@6p58W1uMV(H*9IPK!sWKTwcF0oQO}+qW&!aZ z{@EW!PF5%Au(KmzW<1;0(e?Nl?_8cjpSCh|D#NyU=;Z-=fQ>-ghe2m9+neW`!fjhkPvP@p+k=UTLzvysvBKc!0EFtq})5FX++I5_t(^p?k z+ZxZ4E$44e=;LP1@2^XS$EG_EIz4^SPL5yDmpeamO~3B(?equd;-ekW9=CSx(B~}E ze?M!R^g%AZ`iUPm3vHo8yiTWQgdIYA^_^pb_1W1OI>LD*ZAdq`(F!SE77)lYV@7Ni z?SU-pL9Zrc=6Gno6KHV*p=$x>j5)OtLRNn->Ua0clC@okTd5}q(fTMFOk1gt3GZsRwoJ+_YNQzo?ru^h* zQn9F~udcO==@sacm$Wq{3+VLSjvbKWni~q)z(E&ComYx2gS~+EC7d(XMaET~d&>o&gainR9c1&3^7zhto=yMNim z`oy`(AGxI*j`ef=KK;u&@vF$Y8({q2e-`>T>eK*FD)40@^K`SS8w}QzQOawWr#6MB zUBsNXazg#ynNVNA~H`4PTVyI9QoFp&eUqSrnIF%6)1@xr49cnR~x^4>yrb z<;%qW+jgiM>UusF0>&8YFt#ukx$#i)SbbOJm$pUT)?Lke5{hi3t|IHufnVQcFWS=v zBCnJ)`rA52hVAcD@8sU)=NjFRxf|jUu(a`NJvf4duZ^dS5uiV7GLFmW(~Ez=atRk3 zRy`~Z^OHt}(Y9n?ZdqdQ!Uu4KCfbATF7w97tYPxN{$|WUdDViZ`6yr1A#J|&m6ePu z#zKqji~Oyd@X&VY8j|!0Q1#{ucqWw|=ReKYwk{pcaXN2JAn=Y+X=5wr(P4f@%Dl{1 z*(+OUxolnSS!8K-)lXPVu!$L~d2$xAI@#Dm4z-Wg+YaN$H{N}x{nj7+9rW;Nd;a_> zKJA;ocfR;V==J45Gy~&P>nt0oig>dgKShUfVcEFP)%(HH2i_Y;L9t&lw@AB)UW*)9 zK5<1DQcqdCg5Q~!%G?>x%hKixbd29uc(!j7SLQ*{7Np6`kc{cGfXr2=A2S|MpJ>x% zjDpTaS6$|9c4UgOIiys-x%h9tlKJ+Yrsh?8-K0!BL)Vx`8h_H_ysqmeHMXnqsdKG9 zdmsBnTBXTN2*MDTqw?hwbg2`v(L(Mh1InKHYX>6-I@^b~2BE}}C$wBp_t|V9K1D8Z zz__l09qHyR*J53|xnS&`P0o2?6B?0MC<0zSxTa{E@r6!crCt8m{`kwr7~*W9X%EQT zl^esL-8b@`$F(gw_buHva`jU_M+Vw#1TY+!%vI;vwom5Zb#z#p1;B0l<6h|~dGUn> zVJ)7#kZ{a;n!Q^)$x%)fW;@KCX=mmpY1W1BbVOnx|p4{zN*! zY%7ILW}u`m-3{|cnMSRM%KNc*5SC?hb#Z~Z-4dMBz+rr zTtzsURl;KB6bc)d-&fqg7~9ySW18?hL2Y4-tgbnM0dg0jy_^uV;OY3u*c~@IGLK#p zAy*Mo1ycoQX9;<`JH$$~((-nHE-M`6Aq?R$y7c8HVJ=pLS%ifxc{YzKnMP53V)I>5 zeB*2!#zYANN&0)-%n$S&P=^v0I2u~7gyvB;-$aRiiZf`(kRlARRb}+ zSV&yxP>AIlK9MEF-JAthHs>wcbebmMBCUT0n_lBly35I5v!pYFuj!~j=0uBvUW05Y zyKz4IlW-Ni&(Mwq_wbzk&YFU2DpiAu5H?TA7< zjh;410C0F^WC9<0i6<}yt-cj8$2ZDtxEDV#CN*O`iZfzSy)$w6Fupo;^X4AjnkM%e zbKdC8TRh-c%qK4LYJ&cZt7Ot8y!o2OK=BaP@55X0S<=)G3Ee?v$6N{rg92LpzSZp1 z<6MD6mP=7BGe|pXN2h-O+9TO88nO^yZCyjvqul&He!j!apF6-a`H3-WM|y$wR>DRa zU5dhwm$KvT*VIvXhYzR|q7LTN3wE|y$c9H6%rOux2k_a^MfuA3qF9-cy(FG zry^_w87_$7*B2u|0yzOi5ge|$T<;V55Ws>Y925+#Me-w9t ziO=?AeUF^|u}(~{VPyKqmh>pYrcL@vk@9k7LyIYFOC3nQ=!DUgp76d1uw9NaI{_U% zt|7s5+9Y?W8)cs`J8H`+zU(};eFsiRc_e(L@v>BwPp7;`MhTFowz7V)p+@AQm&-vwC6iKhNqKGgV)P7Mm< z{R@m~h__Ca-L$39sz0Y9{PAu7Y75d?aW^!fD@^Mqp@dk@o)3SSbg^A_zQs3ld1PB+ zz#VxdNd4mGBjG~gf?Vonu>`;HRH(WmpH(11=!WsxD4<@0*02LUTOTDamO)(hBhcsI z9NFu|ilJ`(dGwy~$|tGul{d>HBERL0&ErH2cF&zVyVySGZ40~TgAd+p|IR=D``CiB zcKGDOc6#!H{5`%H`p|ci4F*f_S^W|k=%ZtlQ=l6j_rk@Me0uqBEzX@hQt$;2o4gqh z>H+ZzUtv`q?q#bkvfKIF+k@pT*Td(o^0oby>+P`jzcbWxMPeBmQv4D_@3y*8yPi5_hE&Z+zRhd0L#%*yXY>+=M3VQx}u3 zS2nUJeC1|^;2=*1wUH@ks!1*fXHJlipL#>I_U78^T}Rlq)DFwho9an)7rH0>!oG+; z1(kzhKlP%H^o@Yp36Grt(iQl&v%lZgx9(z3P1`vGXkL`c*;zfJz8w*z1Dz?_@@YFbI>N5NPJy+4BfMn1>dOgq>(pRvh53mu^b0QB z*J7RZ>?8y_tWU@a13Re~`wVbVZpVWj5g%LGe#AkjaF~6jUxz-AF1Mfc4J$WH`<9;@ z5L`s}JLj4BUIh-E{BVieg=QyQ=^Mbgy1voF*J4LFI(8DoMP(P^-8AMzEQmsvWwmUP zNz&(dO8X6Du}(H1mX5>n$YY-+e(!eB$2n`QQ+k> z`Hwf#(_Z1TeMO#Wv&wW9;K|$kW7|E}%t=yVfJCf2~A##&PLxVKD8Yswoi*e8CY-@3BRUpr=VZ( zOSzU_`Ajoy*M#riebCa5%niJ{DadfbCMOl@Z zs(pBX{g$Jjp;7x(=d;zUeplzHpCkX!o<$VwIOWiV8{cH!+TLn^{||n@t#7+IiFkD2 z#aX>^EUc|n9G>K)K2ZMD0SIIAvri(+&=#tB4gnHsLLG7)Pdt_(l_(4W3M1vA*j)Ba z2G?`u!z{aD;0_POl{PFt#0#F@9}D%8BWgHFN?#s2)fJ{60Qiyh{uuYt1CO_GzvQZX zH)-&DnQxrHf=K|iifqRtyHon{LHtn~c2fh*B{W(3Kmw$i$g}l*? zbi>_x{+#qT_VMj_Bw^HD(t2Odqh1ql-!;!_;4r{@F6vxatHl_ceTp&2XXaOpquqEy zVIBESuSr_(5eYVF6(JCrjjjbtv79}kV@$$ddU()ke?N_b6+*VNz z`6PivgF4@H*9PFxAIqATT)GQ}=#By@zL{D_dTOYB!;6!I#h~ zUtQ8|+`ylEtIj!ZT*h3u!l~+)r$kT-najpX0=r zH})xS=ninv9El2EPWhJ2`3GJi&lsC4ZvRyhsLX;oCik@pa%xjs2IE$-WgTCvp_BjUkN-hl5`2*t1NZWR;Qf2|+uq&# z*hbiVGxLL=Tcx64gB(TiE(Y}hJEzPvQq(Lrg^FpPU{wk~E#Eus_9b0R!Tc&It>Sc8-=cD5|*brVM`}B9* zD5s-bxq~Cz%U5nXz2Gu$+EvEKp@H~p_Ob1TVadk=SARknSMZ5Rewvs4Zg3hH@bDop z$OvFIpp1QVX>+3IQf<_jv=Wc?Ly?hV;9b)VzSKwc7wJp+kG?}LGfzZ7>W;Y5gVhc* zXJ-3RQ89sI3w>Who}4_E%| z_rCOPI+C}m-GENKBIa3T;#<%TmR#7Mtgp7oF4vZw8OXEnk797{Va-949g_=$auETr z5rT01jn26!lprdYMpwr|5au&i8V@0=RwtW0YB4)e3`Yosb0IQD8#pNoX>4d-#n56% zu0OXl;;4%L!R9ZHtn^0c9p_y>QRAk({sE{<=$FQ&?ps01U zrX4%zC}RZBItA8vX|nv;SwjOD-<7Z}IM_8;+2_4)JL+tQ8Mh+vc05@O?)fAmqqrGw z()vUec_Y8V@WCma%8W))C6jMN;7nHvTv`iHggE&ihmPow6c9N`7gC5L7}m=z+u{DCmZ#8kYha`>EUL<47h#Stuypc95TvWc?w z20x2ZTm@epfSu)~JLGsicVc6eyK@F@ndCqZFwi|Dnb?|~RFFqbvZij} zyKT+MZ`-FGmGr6HgpaW_=uDZUz4L$@^S6^L1KJ5YLU>Mlw?~r4NmtID+O?2ZI3h#9 zq{DFPl8mda%-;c(`4nuu1x%e%7HO4!WmbMjr@QY=YMs`mBI})BaEMn|v=sgx3G2;6 z2@{WT6_yFdl(%pt8EEXrOlKP`S1_5F9O>mxc95H{c#^s_Zq5(NZo@+fq^jke_u?Au zDf|nH+|T*L{V*J>Y^7&R7DEaC*Tiu^LJy&-mvZ3kf4aSRg$E?7y1tPG`ArOQz}t3uTyh93SJ3(s$lyx3j7YdH#Nv#T zeqpWqo~}n5JdaPD5#kxR;Zf4&iTjeLOt2FYy~=&@Mi-{_%X0IRG}R9oZaVMtWS!s- z1gT5;VxFSHk0GfHrF`aPU4l=%#h5wn7=Qdp*Dy|vGQQfD{e|=te9;c9eF|}ATe1@> zE6yjw`B}PNBSxQR=?q=|+ZL5A(?L4n%9gyWybLS+>d6?2xYSwXr(`TR%*Xsp=R!lz zYi-pxN0e{h3UMd6{Xgm-VBbuwvdB;Mfa!hf&!eu@MarXw>QdX-Vi1IUzyHA-?H~X9 ze-7+LJ2`yPPEVfFJDgz%PSgMRMnQN10q!o=<%EdHrL@~7_4%1QB63lFX^6op$9(9% z;GmU<*=Qz+P@#XMFBjR&vsdLKT!AP{z08KCrf#nA&rG^28hZbi^!|)XH+4*0nn_qXzoOf5Kz_nBmif!^P~evvbs*MQ?b z!qo5Qct6i|m^t4^nsM3Y@Zx$Mi9FGj=B6%vSS&B$^9A++H;`2yQ4V?NeT8iyc0JBM zK5sraxTZ)q8%~Y)26*E|uW$N=e#i|#`R1B(+-WS#K7*Th;gsRnSpyM&)-(2Wq*Zh`au>#w)Qb4{62%uO>Dbu;y3Wu(3dXooubqHCh6|hekMA!?WWjz)aFR0C*e-)@!c|*d`J5P+{H~+S zg{|>OzT3+mqYPjjlZbX%3omuQys<6<%HZ1xj9*CS$pp%WZ$zJ;tGjpd*0BrHd2<^& zoxsvAT%~T-xsN2UVO$HDyDNBxrEvpys;@1uiKA!88 z0mnkM(ZJ0Sd21uG)3Tr}Ka>$V5Ymzkd2{oIFv`6ay8LO)V>y*ICz$K##cT3S8u)3u z3V!7n9NLlc#c!_DBB2*LY!lGu0xvj)BZmE02jI^lU0sep)GX|9nz+6d9*v0S3DJfKAy}CFaAV0I-GjLbj4R` zOFswy<)fF2^|obYZiV_1hsQkn2EF&9yI?#V+DI4P0eeoF{1k`dkoAr5_85EY6r8Js-Fxsx zd*{9P&_8?GyaRcVj=bQ{$9eFthVf-B)TA7IA>PFo^)Tee4$EnY-NwnMmgE$$ZVpnP zD+}T|H-BVdGO0H+-PG>eOgU{5U2i|}t#5w0J$MWKP3D)r?S>At@A4He(PV*0gMDSThW~g`uYF37(g$_;TDAV4l+OxE~lQlwWZwFUFfUZLRu9WJU<+ zQbGb}e4Oo5-6`DSRe5~&-s{Huth2G`7T^QB_-y_~{YzoF?=JR6hxv@XNL!f0nzs9k zVK4ckto?bO_vg8R>lTgUiv5J|WBt6s*Nyvs&HU&5S;V#AJ|}(Ep*}K@E04I+jW_u8 zUxhnTTkKX=1W}RNIFu>Em95OH2#by>RLT#Lv^(X`MSnjz;uy%!a`;B2GL}Vj8uD!F z=WJ@R#nWWmpsDS0q25H{yCAi(u}ynJPtiWhq{7Cs&jZi#$)SX`?d_f5_3eJku8kqi zygUXi+7EtaAahX^W6Z&1!@sxahv-ipvcU}B{GCUsyzNmf5_%wIwg1+Rb0NuEm9KOvg+|KkbKx^IS%#vE@RY;81wMox!ogW@Ws;OUD=+aH z0WDz$&!!jL=qdpJ!iz#4-pv0OtQW+Y$V|Khw$6HT@nEr}J`9vcawRpPz9;RF{Vez@ zs|1%lhSor?2r2z9?eN8PFJFYi*DSvKq6}z+U-6^>#|Fq?@x6HO^J?rKp1F72aT5<7 z^0Y518N$BAp0a=OUVJrIYM%JH$d|TP3sBNg;VI)N8}Qn6nctD+l-p-{k+D-X)bK2G zIMicaJ~GS%SJ-#*%|{u8lkKLfIqAgW`zu!RRUPD62Y#IOQFU8xJZGNHd;SWqah-l; z#574)u$h-n`D};MSM5fSq<0>#uy7$YuD;?IS<07}np_>(KQy1AB zT`%IgVOd&r&b?<7yWt;HA^%)j1IxRd~ z!EW;{aMzQrwbksW+~iO?&%Vg|P>#k1DzBVGOt~Gyds?7W=M8k=#V@Q^Z3y6kx0PO0 zmRa^&AIcnL@f_Tafjq5(GC7tZC~2&(EYOQj*@HR86%O<;aTYJK5t&m5T4wqT+Eul6 z8OCM3<5xYFta}2kV_$ek8eV!G4K&LGs-D zIDgEWa;UVNZwm8Aym^Lq&?|lBN&fK(j=nW-^AQYOg3sHTf!B`Pp*QI&sLC!ymfzmIK9634MUI8(xG+X0OKoGvPXJ(uFlJs^2X2i*0_U6m9-ZV+(&G#> zz_Ln`r}ssLRUV>=vz~1rIo2Oa6!h#^r-3*y*HF}el1Y9Wu!N7sps;XxG)dACr@)Pq z0!BkL^AaL5k`C_UL0dXYgDGelOP)(8LUL$SZn`q^9y(Q0(w<)%y|{)@n-Bb7F;5kv zv;mo8BS^>6|TNU}=qnQHmTTR~4a93CBe~sqQFYe2YN2rOnGw zVdbBT|GG>%@@$#1>t00#IZ_Z|p;A&hivyf8VO5f>c&$&m3j&?!u{M*DJR)s6ZNqS; zRUS2lJnl&PlwY4CW4X6+6K>IH=rOD{bUMA|lNQ*};$yF*#3Vp25S9zt)4AJlrN`Uy z%~yXkmUI#!qF`uV@w15nzB_PFCj$-A5gNRz)M+C?@?7!}Ms*x7ITFnW;Zofdu5uXz zQ+f?I8MLKL(LO$#mWa-elwCmMyHatGR`*q3gAU<}XLUv969d)2mvT^kl}}(%cG&7c# z_$mO?`J^u4#zD&9^C(w+%NST`SFa`5@(tePhi4Gr=h=24jUpA&Xr1&6109i-lv^0} zr#LB;u$Ci9t^bZQ5#}Kd<9xDBCCEEM!kpk7^q1@TD!6esafVkr7k8M7v#tte@YXPS zZGVvVlzixCLSIq<)A>e%7%Xv%w2ZP8-l9o)AtvEBa0K8da9Y7%ngiPTs_YRvmcaTQ=N0e7M`sBcFk{KSN|q7}fPMIXsJIG4M0_417I2ZSW-| zH#ihd`}Md1Gvub+BKAsG_X{_-!WUC~j!V8BuIty(lb{>CztDZA+~scM0C+nCSZ zt2>0Nd()L~C~!6$2Zn@~ex|*8&)uK{?N;}02(ZqVgcm>k_L7^97{ml#TNaDk0ZI5_ zf3}Kh>2G@ZmXLW`ZrhXkgAjFfbcDJCe%aUCJ&MNxo|DbC*Vt0>1K&6&27F`9(?C{i zJ7*xiT50QRo9+B~)_(lm@1rxO?Mq+z09~?`#Wn}EIa2Qe-LtXX-v9FZ?GJzb{r0{8 z{;%7o-~TXgzHM*qXg-!69%HXB0G+kPKWK6)gp3+ryySfQS>$nFM==9}Bt zVC3r&0!JUs&4#ZKQs$lK4r zgywy8I%_A~xIjHcR*t7L@Nog}(VD?S9w%qBq&*>jH|U;_mw9@BO8%yEG1;&)@K^`J z9-f@EQ{=bG!I&caAs_G^(*_TZPvbkeB_F*vURhFJ4rwdO^C|ev=iYyFX};o<40G+uQBI!#CTyi`kUHqk)y}C3I395wBdU`_^bj zj5W{|hPg>38`rT-qQ7XrB!Cv%m0>h&7mU=6j&)t=P-k*)JBjT8UiD;j8R5<**yb;3 z=YHqk%XZ~A{?&hOg7A%hVXw(Q8y@hT09sn#Ao~e=0U9?p+wwY>8*V&y-)HpMa!$p7 zZk`z=LiQ%Op=*7;t#54;zTLLB_w(t4y*m%u=9X{&Z>BDmmp9w}2d}p~cOSO>d#|<4 zoqgornTs;z6I8Sm^?>sV>Q?Ps7kbvXJkd$Lva?-jJ;~A+5i(bnkPR6b*eO?Z0zqW$3?{VwdMZ66+B8`8f33eVI_-VV)1 zlnPVNJNawbQz*-5pCYq(wwMS8x8zQkBJ+z~Qv8a}5{Xd1yI0vU&{S}`fxr86d!?sa zuwTj{?{OpkHvU`qUdo5-RrI{leI8uLH^+0^AMRE8>;0EG7^At)?5AYawPokVn&)O6{?YxO6 zdW`n%_)Z<5yU_N6K7Ev$#0OXD0qhWH%ed7>;vxt(UyXNdgT|2r-H|yE+V9{~I1#?M zctSrxKQoV!$;GO)_283J%5&m`Ett||xZW-^R;MgkI2cGWjzDLShap9SbpTD!<>t4L zA}-72IEy^YO8Y*GpW+P-Ms0$jC@H58sc{z;;bE*YPQHoPvy`n3aDsV@V6#SZR zFH#2}aLAx}a9Iv~H*BT8sQ1jhK|^$-VdU*%8##-#;EWSSSi|7EcuGzSXYw2*abq3o zXR(>^F*ody`q8H@-s9(uXmzYd`zy~b@Y5gK2OF1f_#-QIG(Y#?txW-5&Ow%po5sFI zT^~(TsLl0M(dvBz7T}uR^HNL z{x&k6t@o5M@`8NQUX{m^a}g>dqYV;1@Wd-hhHtzsvTqylq0=uRi}1>oc+?+B=cX8* zGe;%e$fEI1H<-6^l2BKbxi0e~FZ{qz>~S)yxH6YToelVuEA5_pJ+sD_2C0)jxwqO( zWB&)<;fxQ2G#Vhyp-o#yfXYXt>LBOijjQ=`D_s3pZCp7u+;OmZx`|xAY1^-1&w&*? zZntgj?zJ7peSiFK{drquJaTY!i0!)CUK~7c`*-%S-L})7eaS|O{g(%>A4JY9Uu2Fy zWX&>r)m(t|>YJN@-Sub7n^4mP5e}I!je*L&c3Rdct?MgaXvC)TSoXZ!SPTC7WK;A= z=^fV>wEdhPJj3pEW4s%3XW0K)Zv@UT*E-~v<2mCTV|wD(;&J>|k9A;QR;Q^_YVsnA zXU4LIQ#bimKJh+}Pq8a~wdUD<(mD?ic-wmD<(V`d!!Mkh5VT=Oc=*P@oQj^8Ab4fG zG`L*E~~3>zpw2C(u%f)H~btTu$PQ zE1Kkqv{pN>m}-xvE83OQ)Q_0~;U}R5Oj!*xZHiku(|id6wf98pr+j(fo*QWmzH-Um z8{061RbCZtJwgXUc8vlcex-I!$3XDSf8R!3X3%9L_FEeuimp?Ptt5mx zjT-u;AxAiNVlogS|8(TI5T0ikOA%_)4>+Gny5{kU(O^pQ>dsDA8hoQK$F5i~HMUhF zK04-=RTTlx2zH!1I_UX41~O@>t#nQ}C)2N}Fe}kqOaO&IdcWfv6r!x-TP%magO+kC zT7`F%DeBeTF%;Ft7U_dODHE6xv71U(5y)HaYXK0!Col7~Od1!(XET;6(-+N>eC3!%X?%K~w@UNU%j%B~$ z4>M$6Itd#BjWnkYP^H&yq`=kgCmrdK=e*IcTlN?jWv)6IUFFvmK@~SJw%2UnAiv1G ziu__N<(1#TNZv^ooP=wLgCzV&odWEyc|b=N4dL0#d2=uQc!QR?d_=qGtgCJ;_GjH# zgtU?0#1Y<$TR4?{+YYHb;s@$;ERZxhabgxK5T&2G%494CuWur)t?vN0+$QV0*_nOk zwYS={$4~OjRr%mix*52E+YU5xMaSf%j^AiIIB{~?op(shQti^VY!@!JMNU8hk=enG zyc(A_1Dv#1`QfLEm@p(W=~N@ipm91UJcNy0iZl^jZezR6@y7T z`+8G1Jfj|0o%HITAh=Ek=*9>2OX-@l9h|t-n^uSP8E3spk96zj_*ji9JG12L1{9~R zD6{X|fFxxT32i0(ap2K$8ZWH_PHv36T*NRxVGXZ^PveVEB|GL>@H*qo-{%Y3rSbAD zI3o)M7aG<3z&oI?{DLg^UdAo@^Ly~5nsC9p>aocNh`VU12?$-`T&OzYrM=^_Jy6ax zQajSLk!p8pSzYqJEIFuW>J7L$;luLXz=?CvtF!DQe$b*Tn9&vot%XlQ=jr8qFN@z) zwd`;&=?Ao0qZci1(4o$)=gQAZevNXC@}%tD0pf4&hcm1OQA1uxD?ZSqt|2M?fZv0% z9^lO!H!j8|vOw;MFWRaviyW+$Jo8Uj^`+st@+^IKkk zLqoOwlv#QQAHj2>#Th5gn#>W0c}J$m-{-+I>%??rJefv3RnOWficH#fFm(Q>#N>}$ zIazGjg_Bg+L1YJl^Y+@qeFjG>?Y(#JxBuC{`zLK}i)K1IYR{j33geF2<>@hS43>bI zw?FdM;Jzk(_-_TrSx(!Db=2E|gyJl_>ERgR6{qaW5-^nb8)m;MJeSYGZ@r_N2F{uY z*7Y`PKWLwoH#y*micvqZT5iyx%iz53@;eq%<#F8TXnOo-T`zB!+eO@qF8x>v2EM=+ z{GzM0BYfohG98h0Wk$gL9H(+-z5{_6Zc z>pmwRzu{Xq<<05&9DZfs+a0iTdqMi1|R&bsxtwX zc?j3*BYIk|>(8a@4W7)?c&<7AqHDO+fka?~s#i>N7exrJMbt~f!weLl2P$V84E*dNQE`)jfzHjvEq^bHTQ=q@FP0laQ(2Y)@R9ON; z+xAFmzY`lrdCKm-=o8u}k;$vmv*s7tU3J~s-D@jbo9*e-C+)xaFaFE+rLTRt{o3#R z4tCO2`}E24<{NY_=Iq^jr|rJ>PFrIz-nRDJ|MP$Szit1Q|K0z-ef9oZ?Ki*prS{eL zKWKYf+wE`u>O1Y@pMBiE@YZX|tD%23x3;3|r{`zw!D|oO-3RyE&;RycwZHn#KW)#R zJZ;y*+~`AD^7IC(?Q6UVG!+FL1rr_8&ZKSKFKIa%-*q{Mpm? z{P?=<-oM*cUEo8txaj3)1kT{g!2z_R1Dz~??f$*?<*$6u-unVH?r*it%}Mk4+^eN& zCRyyGG7&(3^Riy7`ct1PXXzIxh;QCm=Z?QTrR?Zrn#q`vE(?CZpSoBFpItCbpP-ym zkDHUN9AAEXjNOV1xu|Howy6_2(x6i|ybQPB^j;wociw=YzP0(Z|Loret-|Gvdt}8$ z37t3{*I3?s=bp0q4P(=6_{KQSMNa!d@drmw2PK&>a-12Q+R)L_l-c%&tQT!H9vt?= zz~iRfY(z%i*Cuo)^2leY`Zn8Z=<943zu}JG8VUvi@Q`^kL^ulO!I!_>8D(I;@1Q&)^-CM)fPvB=5cu@*_f++<(v7<=3{OL5g@g?kBlX!F!% z9kH)HKuYwf6Qs(U7K3HBzlW^i8M?`~=VYNLC`>jt+Sbl)TOm!2lhEJVu+*mX)fk(; z$IMAvWh@%7*a@S3F#f7;TH8$4+xPeWN=$$`4wE zPHQYGuhTwKzAnGinR4pFBH7~{7dFxz(q1mwHY(l|ME+0z$Nx>+dbrjujt<(&Cgb0u zC)Cljtz$R(w*M9RU7sT>jsctxKxe?cJd0BvXN-r-rfXdI4qf^s#}a^?KqteQxO`f` zH#j?grSYY=-$<*UaT1ZqXI!@3fw#k@?4HyUH}Dkh8`q!1E8L8}`KYE&Fr$BVIRXjT`s)hA+bXBKH!GMb|6CGU(>`J_qmrz}~++`?e(a zeOT@H^Stgk_tozP8r=Ya1|h-VL!?NVA(IpcP_S$|sIUjNMzS5WN1k}#q5lL&IKmMg zXyS<{jxarOgl$FGlDZ|!K`{t{51@hWzOQr7bHDfg?Y;f^e6#ZR+xOgiyJ5l+*1&nr zZ?Cm#Rb^#nWoBj7s#VJciFe^oTmP5g`$Ad$47kHbbZqL~T}U4Z-lo{UdE`2<_@Q%DEn0Wrwg>FaUqqD6VeKUk~Z_9K>}>CjucJ?VcVJ34{7>mjO{TpfqZ!q zQkMRq%2Ye03_iMalp|%ApVI|z^C&He)5x3hr1=m>=sZ5qHU8`uqCC(1E~uAzI0|)M zeePYqTJs32YtOq}rz<;;Lc}xeukKYmB9bjHiD+qkhm^Hf}GR?3Liagyo& znx^Ouy2rJM5<}g%i&N!W0d+6u$K|tr+C9++cx5PR5_x##;R9Y<0!8FUI9uhKc zwH@$Ni3vHc9fgYR%ugqlHh+nMzqDi9VL@;Ut9wG*<43Gd|DkWRem^;Pc7BqvN8%{x(&rhMCQgAS z>E76?v5~TJ%%iHUZ*Ssb4oTY{hmW3&zx8*1cf9@W?~P-|glCs$UY;}@SWQwAIY>m+6MHU@geFNH$5*oS|Nw9=so|UCkP-f8>hVbZU!^jCLHkzk&U}< zJD<*2>H7MFTP{X(G1)0;^-|-_+C_X`z5G|>K-6`~kp zLX~9H{CbUh#FJLY=fWzk)&)9NxVqB6wcV>9rPukSlN{pzY8*v*j|6{K<=h7ugG=u9 zd(x;mBu&0!GdGOMjf-h7<6bzwWrj}WqjTZJaXR!@>ec0 zcPCTLfDC>TPabvL!9CkA;{nR}^ICCu9^dMpv{|KT&&I`$gY*n5`BnYZcN9&9Q+dJ@ z?a}xvZ{IIj^t|a3lu>Mh_NRQ$|EBG7QZ!k2`D`)+wx55+DJ_iLp%whtuJ_`b0Ojl5 zmT}iDU$zTm&HLIU_o+`GWq$K2eWA-$UBy@KTR%44u|=N4EMqWfCV%o(ULm-&Ez8R8 z+S(7-{#XCAzx9K)^|jx!fDe;>5N5`F8=9lU`Ec2I%R%$TW)0rfwiE=4wbaObhCHo_pX5P1Lars6u-g zkh>Xd%>afZKwL-wUN@kv(zOVLV=!acQBD%I%C(iM9GS=@y@U!Dw=_)ts#gb>fGc@J zb~+6oYByiuQROp_3Nax#DVT3Z$fW0SW(BH%{N;=~4BR%>2>hgx4!b8=turhBz~^F& zqP4D<3c-L&BQg%UA}tP{bO*=cPK!vNGUk3wOn3x&91AGmAu@_QlmYUzD!{hR#7u5tWgP1|`r4L>3yy$9w7%HcsBZ8=9CtAH*+<22%uw{%aZ6})vV zR0qLHl1q0gy=YT;Ler!Kf4KeY%cn2L$=PYPakRrfIXY#autlJO zlLg!BljHIH#Zfvc-_Xh=7ocsp0H8tnIheb;Ah?Ae>t9atp(~LQ0g8BTd~himWv$$@ z&_vxW+qVrGrx4i+$UHi+e1KJQ3;#IfS^!|cfG~hOtfx*VOkEYJ?ODGoD3zmwF>&{5 zIw!sT_P))Ibc5G6l8(?vnfiWgJ-qb3zj{DPn4R4d>KDFeJLApj8ge--ar(xG@^k=D zc#4;Rypq2j%FO&z5H-0f&(m+(p+JwRhX3kNKL-e; z_^L8vgZ4APhp&`ZC!p7(c&$EtuY84;sR!wZ0sOB(Qs8b!ZNB8SK9^+%N`C&#r91?$ z<)_bE_TeXai|z(>P##uEeNqp`!-M7FLzY&cw+1Xvb^5cawd&7X41LPaU9ISNyBP)$XOE_MetD zk0>o?QSo|TT1|_-(QoEWe*zpun?R9#shgjEzha0rpQB+P*96iAwU^`Zlf&`k(f#o| z|MtHz4)5=d(-$8S5TB2e*RRI;>8o*XkG@$swm!%|?cVYbX9b>Kdn_oQU-GU1nY1!s z{zD)3Rc1mOI53!=|5;e}#ma5^f|ki$D4NxU*2~Y*5&CD9p~-mdGihI3|K}k1jD7fI%OOl@A5NcSTGqQt@N*a1eRKMlm{^6B90Sf zU$5v;$GNd#{nDzPb9TEQndt>XANEzUD*S@!dFW3%7M!%`sn_0XKGau^+In_y3fRZj zgiGFX*mh!c-9;U`}z+*E2cfk zqYr80+qw_OAL-97&d>2pXYn&m72Gn2-_F+8JDHID>Bk>t5x{r8_uX;-op%#A9T5Xb z+pE_{V{7mJxcBhwv9(wq_wK(n#;3)lFP?rfJ~%x=4hQ4yuYF~F{afE2Z-4ungTQYrwztNols|C-!`+amXKP2U@Xau| zbFfRl?HlrEdB}coavJ&Ed$2$59o!#Z`T9HK!9C)~`^1rZw23-_PT4QmR{D7JFa0#6 zTSvx&$`pQ7lX@HBcI*WKJTe|*Sk>~}+d=){sb zVcZF-sP!tc==$0YU!Y68T3t9V{~_H&l8SfQ)}PRc2-uBZD! zQd0jCq|YijimxV4s(-g#GP#yMK>tHI@KiqP4DCgSkhkx!b$RTGq`X~1o#_+FC$1i& z72Vh>I$yFc8Teo*af=UQuw&_CG@F=3oa%$N8#>^~`?utkEq4}C-o9y+`Vx1ne42S2 zQ+O{imkl2%d&t{^;=OMFw9+j&sC()uiq+`>>j1fVB3~C2gHIn79TSkJ_fjs3$xoe< z2FmArwhx+`@`;Usz! z4Ys-VZQeXwLo7SHn^_2MzJ2`Kixwkr4%y#GI50HBTtZJX8~eYgX6^9W_HeT|NBsqZS9 zY5{=qDZMmD(W-v0*-yDp^5o?>Wn6U2`6hMV(hs|2>iXn*Jpbfl?C90lKRn2S8hx-f z;fp0lM@<1VTX?x$71(8ThZm8{NyLZT9@O!Y(2d( zKKg`l+2vx~yLWHw?H!~KNA-(dVnD~E+DY-`qMxnUx^r0h1P*@QzPog0Ba6MHvHD1V z+XHOZ^Q$?mWi4W-fjDmQ4;NT|$ z^p&YQ4|i&8C#uZrKzG+&jf6DJH~S}jz6U=!G$kM`a2Q>xoSCb!ju(sPwCNC|OS>t``EaZR~?JKl*_Sb$WcCNoZO?RyqQPnbMJZH zI2&wJ>Fqk&$8`{w0O;q`n%&Zd5MdT{7_q%+X`(I#Z=e#Q@D4JLr?(-*XoLaD(59er;X|ZyfUiCO$0lHuwbRmRxWw{g(GZ zx1gPgvuLe;)nDtc_ZQ%aPasWkcwX~o(!PrIysy!RUU?E``}YEXYD<2JcQv~CusiqU z&wR)-{)4hUI8MCbE^<4Gq4b}ZSALqsIc>%Y>pS7!aT7>Gv;J>qdk_B!uaq|i!_j2N zx1HVlV`u-t_||v--1yi2TYrCCFn)cBkKf+f7|&mRIv&3DV7&GAlX3j&IC#U>0zX4s zB<-2^U90C!{pp`UuLIMNn?)YlAT+|7RTdzZ&9IAA#tq&Rtj@L*veSe83v%oG`J`R&R$6dc2(WFn{VYhNP5QoI{_t_T{=>EZ z;D7l$KS0fX%Wu%s>OXRD)6D2d6>w02e0*@KV!E|A{)%&*r2{L@aX`*3I{;A__1*WX z$O=oLR}CSd4!F`;A#_(8hGWH&{3Y-RbC#DJoq}mug%e^cpoYz*a1?yD;ZfH7@~wRN zLC7t%&@IOGBYu>*tx2rx3;^6N#BKS)FI`9_v}6!R(cqj)Ca%H@CEi1+R9Y2L1?E8c znjDGafCncnYS2jmodD7>01dy~SAM;)A>SwkvWi?q1)Ijay?&3%#Lv&h4i0bI4%LPw z!=g8JLSO;~obg3EP-N-?0t`702)erspiVrMfQ3H^`NaEV5Sd*z^I(Mn)X4? zNMtrCbd?M|df(t}ZL_{1=C5d*^zw^LlX24F*_?Qa47jzp#1N>bWmON`BB&*ppT9{j z{B>VB^)3KiXa96Ym5)5>&`KXY7M{8n9Kl}%nx6X0z-fQ#1wa2QJ$TLE_~d&EOdTYT zI^>*$`P3ElULht>*xI8b(NEO(+75Q#Vj#0O)_3;D+1W)_XuDlzO{QLeb~~%MeIVdl zTF;+9Pe)2uMR^~PxuR@Q6RX&Xvb5efL}>JTa9P+g9cSM9gFXjO;=jH78K!^FVy4!%0{8e7PDMgNaw$XdV;>@*anOK~- z46tZRbu~OwY&BtO+ZRM$-67{*4f3p4zvRljtOT{bX~z{{m7hek=x+I|$PV?=pQodr z3jj-v>kvt<#AV=$3w=Hz2`sIi~&-Yh2$kC{lO9XwaJ8)x#ZomPEmr>={- zH5!C&OkrENTH5^TFr{K+Spajpn@R9QTtM4P9#~Mn`YWiT3j$neDj!<>iMw>B=&gWB zJ5Dj~TUMJ?ZpfwwTAXJQ5`%(Ee9z|gE*wmK(QdHKnoPxSknX3A_V(PCcQN)kzqvgd z-}w6D@!$Eq-_ECzPL5s-xBcJTT;Ky=jxF%SX4iXxg7T83*^bCYx%C1v+qLeuqJCC~ z)VI)f$4`6x8NJ75O*ioAHfdcDU&+#R%4gyq`&&Ot-)dgPgBZ{+_yAc7s=Wp7 zA>*dq^g36srMo5{(QcBeO!A;NrTe7MHrgUiF@8yhxM9EH!@L^Yn>T$fYIJR1x3x{5 zzDqx*zXTt-wa@S<+tlp>#ba!KfnVC>pDXN@k&BOaclPp-ZgHvKyG|Xx4S#)giEqL7 zDdU?J58rwm`W9zbWldX@V^UHC)yFLE!OLKp#&O_2KfBB#0R6lAcChb*TjN|~dHV_UnwVT3T$GUsaq2Q2B0>uf zlA;Tdza1Y&CVsO2E8EazRSXFGOFdQB>fwUr)TJGG+u5y{OZ^L9$T1W98Nbo(5X;$@ zuKK}}t8pS|)8BQ!Dh}g;=s)*Nw_NS0COz~Qpph5vS+Dn%i*0yA% zWz=kW7tGB1;hEcXHTB@tG~-3{iPMD*`BtX5WklOHzR0z7nr3P(Ae5Ho!)xW>Q7C?j z=DIigB(2hU2ihxf5WYj7Qe*at4+Q4|`nuhl<96@nVGcYNg~gW;9+P5Dxgw1^y{2W- zp2FfFSmbgtK)%G%Hr~3oh`@k+!a`kaTbg;+7uuug_)8 zF;=$t=se0bK8gMy^EUduYxLZ{O%WkyWtGoG;)}NMeVM~Svc`AOLFB8gXHl$L z0-Dx;uX}=V?YiTy?vIUww2#u`>Yu9(gtVWIgFNb~eNXIU<_pYzzMzlN0ki={zqof> zKK;wV@?A!_dk*9UjJq&#E$jI?&gOBn*S`6)P-7$Cz-?VBT59~K9P-crSoYQWCi2#H z-t?`no{ba6M!WQB3*v!&i&59>r_aXoryt}k)V+iI*td3dIX?a56Z-Ds;1s|5YTTlI zJR~l9_UVi9>eZ|9=*gpT@BX9EwTo?k@WBV!J>aWf`C4L+Pe1)M@zcS<0l2Yo>Qv|D z$3>y_W@qWYe$aTN<3b;9hkS6~;Imy=8b3x^eU3IN|K;nQ$0i@PSje#FI!ily-@Zut zJ&)XsZIM-CWZEbXVkqun%ETzs=6!!by4-;>v}1mG7&`T#-}cv+=DYU#-_%Dsb^11M z-aIx|gKlFj`-FA;FBz#9J(5Q3I{lYEiZQcvg;wzR;BfhHlenY$0sN+Z$QVbQ@Dje{ zLp=#k5{MpzUM@J)e3(8?f8}xdP2?|sHi!Pwx=2i$m7a+V8Iw<6ns@=f?*f37b3TCU z!~#=xwypScP8|{T>>rA5sxUA8X_ZkP+*{g=a+u}8Tzr+>*VY(MOh`FnfLX85`0BjU z~xn0vuDAvmDl_DGYM=8yE^a0R9ZCLOky?V0+Nsg;{febr}8 z43kHH)@_#s6`WVQhiH8a{aruUHcuY-@)HsxhgPuCE=l)ARSwqW4rIO%5F42?OWzA* zw`i4+wC4SGF3QES;GVwS&)K4PT7o+5y|(Rq?h}6@sO$1A9{IITmqH&R->w~}T*p_I z4?X$?_$}J3yL7m04IO|s@vWnOE+Egi1fIMv9v@;y9*z(2!{&8d;aH&Ouw9s~o#MV&-Wia8Bf0U&GFy(+rK+Dp1e1n{^1{i$4`}= zGJakk@4WLQ*OuOKoP(EH9>Z0;4 z9T37NGAJ96?~^iNGsjEJV0dc&cWd=}D zjGA=HwvApvlMNv~Ze3`@g*Ngla(VI`%E?`gr?U`m$c=G;Q-?4aiE+D4iFHp$lg18v zDsCy1JbIKSSE6m@0Vd?9ZR|9v)3br?sBH9-;iT!BJm$Y4$i;xKyazVxwazFqOnPlO z?{@i8vZ6OUPvxp#O^_KhM9vWIb|aQahn*3uWuz5ZQ-{vajv+yfI>7Pr;x^w=xw+bi z!@bR$9oA_crC*Tx=wxW&)F1iJ&RrQ%OY-F51_a2D9goh*PD=e48wU8czwN1BTTk(0 zfNXtTw!!bc=BTFw7T5J~CypCurKZ*DqmrSgCAY#;lT+1(`MIDY_1;Od-91va(!@6% zh>m1BAv+q6h#i<|&{6cN5AYGkGkHV)IRcSG+afXwej94qqT34^XlBw=u_M$tDr~^F zEIgup46yFgVUDqX@L+83A7+x|<%?rF&YJK!JMoPvoH_P%bb2yAefknx^3AZz1SC%G z>Szr{9Ed4F2PxL&Z-L!KFX?2YUls6Sr4z%MRKI#%IyU8MyI95tk3u?WoiWrpnd(t| zz4CE)`$6QDEznVXP+=G71^y>(Hal$fAp@hW(rMelddw5Kn~9_O&0BirVuG5W>`83> zp&fM(q*7if<9iLtpd|HKM*hT?HlvMv2ycGtmlo|J9rmOlHbA|3JG0vwv4mc90Kw6W zq;YMTOblX+o(qMibS;54`D!5rW#AI zI02;1BoDv%g$R>!>SyY^tJjuOZq+WgH#GqyIjV1VjF6AkZD3ihGH}xTaGbUzbUvYd zO>lWX{91R6jr6+L_j^FKvem+Ex#Fkt_mSK=7c!u0TLywtS^@P}sG!z%E56Mezl>b( zLf26=6@8Uf_+?9dGtF}sUF?J@BqN)ZymeGM2RsgTnxdqy2EzLwa)7eQmjL zR6PdHg28&ed3(*Efcw;xeD?d%83NLk%sUt?y3~E?&_1NC^s)5H2TCRX0_9Z`HC5LB zU7fOD)Bo&j-()Ma2ymYF z{kj+2_Hpyten8&jU0)i1sr)yKcNbIIcb7gPTy!MqQ&-wfLc2%xT%NvU0l;0m67JF# zfS&5tEB`Wm+QOSq_vQVrOh7a9_MmTB4{af&iU~2zB~X9vHQhf4psQeoBz4&@eF0e4 z-MUwF-=RM~dga%CHoQ^3b>mOD0N`_VEZe8kr~Y&~`JMWcc9u?MZMx-V`E~SI$#}2o zsD2``kT&UpBcSWorcxe=FF=39Q#M?Agu-34R66>r&H_-9qxyV9{3Sj=i>0oHK74yE zK(bHWrN%0^v$sc^VApPEW`9LLoqnHqq~nd7#47e%zFmrL$Ilfmha@#-saR6KaJe{w zpvtfPUin}h|FKPsWFIO$<+~Q+;_Mt8S7Q^tZ;XG|x5neQ-%c!b^kD?SIR}&R zoBanG8NZovvV4Uu^MM;Xl8#K|02|nW{O8-T;1SfBt3_=`EN<`Hr&&%NmmX1HU)GpT z9gj`{y%4}f`6VwBVZt})6K5K4fh9D+M=dP19J)wm@mr_#TCZ#K>Jf{5)KEP>z+)avgJ-&+De! z_RbqT!t`6IGk&5L;|f(jG(lUcn({Bc=Y87=IfW;5vv5FL=&^2XxO|=usq+~=Wi&3# z6@Q<%q(EbOel9Rn789p5S*O0o2gBiYE=WX?Kw5dcUd7NWh%SS-@`F>KOMPCKT=~rh zxskWb0p6EmkIipz=)3iwK*>$tTJ2`MlK6qNTx%m7dndBu$0{nW%L65O_KkGtvS6;s zmU=Bqd--kH+?{xD2fo0uBtAKQcDAE$Cb}hfx4pjK{Q3ghZyTEoWKtJC5+~zJ&gH*+ zv-2C$adCQ)_E|W-+=N5*96qnkF2J=l?ms*n_wcojg}(Ow*T$1~pNzx({c(to&CZcj zrN7_a5!dN>_Uzes{_6R7ee`m?e)@X+#lQONZr72j6+Wm+1?m`VRIet^c)hnmgNP@ zB6JsNRl1f*8Tz}}U*~I>eoL7aRJ(NhDC4xV5Afsn|FqEMC;a~x0Mzm4Lg#xk{}=J~ z1u*MFH3Fpr0DkDr_FKl(ohLBQd%UUg0c&gX0Z4RVMWgd>P8@LtdVLkTeR(?0o{?PHWR{#jou=eCx@0O3d@o z(@(~O!+Yc5lPB@5KIDJ?;@SAgkAFJeCboKT|NeOW`ZZ~<@}MlcIsxL+kJuj<-dc#J zeXV1^ezwED*jPqCVt;R2n}5qsg5aw%KZBi#N87Tz3jjQt7WwgueIbMR!zt_HhjjF^ z=B$Y^rfr%%4`|45A6Iz$fgEvV%HqSwXg=$zH@0gLYg@87?Z@<8H!!wkoUzGx+uaP% zhwVN3o{h82SJwPg;n07jkBa{k55CKXhTDFosA$Z>z;q^isoxI9@3U{<-y@6eN4R0O z#uJ$U%CY^9$CirRtfFtKqCtXx%SK0B8{yX`z5G-EZP7J8Brlh{)#T!t;^f1 z1Ampci1RFXyaNBtdBz7an)2SGtTxb4u~W&X{X)i)#vyCRGk@8Gaj`b6oYmBhwe>aD z=loUVqrQ^vC%7E1gqQGFdR>bjs1G@LPkERxv;x|bi=_2;{fQ4>;B%pnAU!n>V2q5+ z&l!)dZQ~nu_Qy7{(K&Uz_tjq*f90?L?eX?^e|cQsbM>))GV|#8)p+~;C)shudiB5B zh6^t#R{V1kJxshh^~||4LNY&p|fy#qwrRl(tEG<>FIB{)AF=E zeQ7xTapF+&Y*d}S<45dH-L|1<7mwQXw$2QB7Fu^DTbui8(% z@>5{Cv9QNp01&@mR%D=EYwyZnxd1>NNLlIT8c-tsZ5fnbTghBJJ`W0Cs+qPw@dV;S z&cbtzkSs@D(Wd>X?%;~YQbk{%h7hY1E#gzY|IKWBFMPvdC_5 zT(5ix^5I^7=9{sX_{3|z)FGXK=kgjK2=x8F7K{nV|HHNa$N&Dn`vZcU-@=v63fqyX zNX1N)(KHC}jt^Cf`8}GZb11rkF0~WXPi(LLMwK@bXOt7XTzL3g>+%BwHZz7Qp*;ZKH0ml9f=9d`YLs*qK4P^g)Y# zG*#$$CK)jxX?3!1!*5XG%+Pm^m7YF!I(1DQ7I==YQm)-0A&ki9aCgV zy1!}Q>Jt*4WvMg9-nf(}d8N<%@LVfM?fleL>N20S%RhC9%fLqGZ959KNz>u$`@m9rR={!_n>Btgq}0eVj6NA;H!4QOPVF8a$ED$za=1 zJPY(IffkgDPj&R6sy1fev$uDc^O``$PuY^-hr{G;c?8m7=xbCkq)Po2fN&1qp$lH5 z)9cE%EpLjwGKl#@$ zI!BN|+JIGEu)OWbz0LLdW@ZJ%U9Q_&9V$C;Wx4m7_xKRg^+)ax|$gEvpQa!}Qkk9qRLk98S;U8e(gud5#}zLG9zI3B!K z=DE0&2AJRS-EPZ#(hgc}sBl&p^XeNSv)F$HJefqLta=f8#6w!zLp?$nR@3Z zi0b~V2>Rk17y#ScE4u|R30_++zJ~iT<5@>2^lIIc0yP=291 z3khn1SemcgTAny64}ffT&Udi49qh7v^VT@NxEyDcd3|}V6vj2_K3=<`Urwyf(E<0X z=P$=NVd z>?i|d7}=IiiAS((+T3`Ew(yfR=Ccmp7(6)eEfcq}#CL(jNhIIuOIhUC`qDl{KWKUH z`BsDYiavFb^3>yY?)HDSeaAZ?AuY`hd4>;k=jt3S$KTgylN>WuebHI&hgN+Fb@@F> zw+u9{9E=+-78m3E@>O`*%EW-SH01}HqVqQGt{zE?6Di8dy0j4}*Paz@?W?@eAO?0^7=N7XszFJ&IJVL^#SRVMHw?s?0uC>+{;w3|Mx{jOl^Tc73qt)qMW$$$p=4Q=xQfDm0s zEL!0G94!Woxu(s8#?@PBI@y^t;(_|yR?!FZ%e-E1L9ce>M10<|y*bYVHGOp!t21V_ zjZt~*`(W&D?Zt0Y9pKTI1HO2%yY0g+w;<;paipY6qYEz<_(b(m+ldW8hiz*c=$9Sy zJbU)(I6rY00d$jg)3-Wqu>UdNKK0+bzdzo7``z*FU-;_y_P4%19^Su49C(nV}`LBNeJLA)*AC8Yc`YGey#dz@e-uR90|Hk;n*So>RZ~@n-^^$ z_U4j4fm3xtsk-x27Ru7VzR9@l!#dK`r+Vm*uK7>{8_Nqh&^;FAyPS_n`^5ipf7y;L zf3AsZouF=5Ctmdhi;8t-ewC|%cB*?SVU6*+{kb5SUq0z0+$r~^VEeA>5BNRaFM>Do zeJOlN{JDg_{vv#L=()@H7r|Y=1AYBwxi668&xD^v-(Bc@^Vs-rd?*Y2Dd|^5N*`u@ zj#IsUv;5N8G{04*`^X%*=K_H83-VpMX5X26j_b_lwZ6d5C7z#-#nJO|d2}>3iBER6 z_s7M_$vAoTaq#Gav=Qy=l=M%qr|2{I^bsx=(2gFz`_6dr?D_bUKly2P?|AF+<4|ci z{l!mz`qM1nai@SQVxOmw01x?ZAFJm5ft`aKsp_<5v@EB#tMz_Y9(j}N9TY>k}L zhYH$l?UYh^P4y+K^t0gF+^spJTOUq|hjVF-UlosjlXA|c1fU~)uHzH&9q~)#TRyas zU;R?j;0K<4LB(?I>l3%qf0R!%{-}7uwvMk7k9DF4+>8cAR^;&~e*03-9rqh&$cxZB z21oQtQ+2Ci$@m`{Xa0pe;_WIds|L7O!RE;G zfz(@gv{e@ZB=4r*Rj;~Iul1zvRXsKDQgN9$y!Ls4vjJm$FSSRzp)=2M6tI0vjx&Y> zXrgN0d7ZzUdrX?=oi{X8p5WL2*eA^eMCL2q25Ec`97;cxq>fzMrp~967QEz}ZHjQh zxA9~7wrRhq7yi&C+q3DgZ1$q^$OAzJk*;9%1fMNU0f|=4626~V~KhI zENRuW<-=MIg>UAkaOTB~Ic*p3Z=6y@d)UnUk^Uodw%iwwe#K`A-m8V`WvA97oxP)_ zG_?(Tt{tq}oV=9P-nC!BdkbP57j*g=I$TTU=f!0iKL?->^Ez+7GrnISr~Unh=o~)! z;Nkeqzx-Fn_y5M<8P_jgjMv9Uj4e;cDMw?bdynp=yfK#x4V0yRTimJF_Au6h9P99> zeAE-=NLBG|pNDFvdIn^9{cgqZZ!Q3Etg@aLk!fqjoHsYbdM*IC@v~$2UOxqJ%UBzm zT`bPW#f1<2;nf8J_;zD?-~Lz57mUpvheP_fCQdXB%@{Y!`Ipk`vOi_aFM2uoqunz1 zs-LoKToN>odI3sYmW%(RT;}yTwN2BW&}AOQ#3eWiZ{`^&b3@Yt?}x9aXZ(R0g)+aElo zY#gd-)ImSBzFGd-H07Hzg89+Uyd1#&@R@t%pKQmk&^+tPk*Z1&}Bok;4j&v9`wI?dK1DAb%ic|{epu$YGd-UX*{;J_QSRR z=Rf@KEf)ZY%-}7|N*WH-v?!PWen5f0()KRrNyE5OxdT3l(Fi40PU$?_DAUMum4js) zm)l18&^>skEbDx0kOMnoDX$Fr|Gn%4^(Xs1>$il!54X;)weq~Z3xP1 zfF=0KFX==U(2fH}S;ZtM)I9jZU`l|bBRG{I2BG7zFtV{)gRs;;p`4oZiHqwDcqs!8 zc^Aa1+_bYOl4B??lzO(o)5y$|yi+(Zk(Hu2H}Ok?wXWYDrhf5M;8hbe8?=G=^Sw)R zB5&!?QN@^@Akm@CfGN%9G3_S(HxECyrGb1d z0w{WI2lb&Qu5{c6e?D|oEPf6^XGYAORs2_%bch_UFR7n8{3L^Q+Z>S%a*!Ukkhb{@ z1e8L{A55hy(ui}kGxF0F4j6q1qe%Sz%vgU-dzZlq4i1W+iWFKNLdmD>#G`UqT$T*f zS6C~4XhZEM#vD*$m9Hum7kE-9x}m;GHwDDqu+D(pl1alBgbvUvoA)hqr=0Y9P#;W_ zme6eQQ9(uVQ?`{3LJ=WE$yrSRDRt5*n~65->hdmJ;r8~f+o^5C$JW@}zc0b#aPNL5 zR?pmi^@R2Vop0lxU)+w9yG%a8itwHB#RUF3t}t)F0jB8Weu^jFH6}F&~hvOtgUFRI!ucm%xm)%be`&i%vaY{ul0uyI(mg_w|gG?aesZ&RmAiq*l+qb2#e0B=d`gjl$})jgeK{*4&}E%SA3g*ET&PY z>Onp!p-+Hjaeh|bwOV&M=@2@rj-s*Rf%G%8oNZ=VCsJJ1Yx?1V^6+ydP6}H&eHgmt z+Y;K%_6`H2owc#IL)y;uxVLvT{_fxXH^O#ouxMG zgHZ-#J3jC*$$Q#O4f@g`;O+iKDCrg@IhQWv^;wwpFZK2Fa9198`CP(m^SiJrGwbVe zpQ~qH8{{B^*1Md)0g>xnd3){y7#HPn9r<;!Wjzz$l=pvC767bZR@(Q!1f)Ijhq3x+ zWc?YU!Qa=~*F2J>4y);^NA!8=ztkD-pl{`Kz{queH;xtX4W1M4cz(TT|3w=9qHRNe zn1_-gAa5!nCi6|C&2{v4b7yR9bKE+l+!pukAULAqE8Z=qZz&$avt-6UTVELR26^yb zE>tE%79|R_&y|xO__f##vXCdQ^%-G%xPXRgDC67zWlQw{DJV$LCeKcfv;Dl$?DfTE zCS(?;=Xs+g3x_VQ$J66idgzS*d_-#*ejmez0j22uGXbwoeq!VPsp94p`Mf?b=*JeGIdVq0VakMdP- zjT_T{*tXD-BcN`XCyPQR5Gr!FCcV5TM6bDy9vVcT1JZDTY%_riNUt*0#ay_klJ@f3 zoaQsuthTSbj+YTNKPe|1l}6d&RfVpGuWIIUz?RfmI zf;4$AH0uStM;>97uk=7Vatk^lhoz62a-44f8DGkS4{Ca%P5C#zdf4s5C3G5H*xuV7 z=NjCt{H~yR>GDw}cN zM8CwnHYbg-+bk|8PiV$}XW|J>vM+EVKNHZF|KoF@j8UMiWL5E4+p>N*?DW6pB~o&WxJnkV`R$TxFZMl@aVz)vA6pGzFgpSoC$EpvhwCGP{v#?kl5Yb9uMyw zj<0{?{qgwGTjQO_?~HGM8$bHsAd})-?uw9Rz6qUcn?C%nIC^IX|4HS(edh#2=G`{c zHR5&(xd2~Ul#YN03LfbdXWNHzwp}tAnf5_uwz#|D090G)-PWFe5iI*w3aAW!>IDF^ z+?#R9mn{G=c2-e;E__M+xrDxcquu~%O%HtkOt{N;ps(L7H);NP_?Gaq=(`J@Zyp=} z|Az|zK2N{81H4)O&33v|uJlL0Oqy@dagEiMz9rlAHTVzvjNDhJh%+|vFSnP+OjwqeqXh(f#qsC!b>1ey+ezS8k7svy1qn-TnHMq>u+2_-W^> zj6vKAT96)nzVnm%5Wk6U?COppy(4Ii`rw>=`Vk+ltokqQ3Cf_`1prwk9dqXae(Egxl1_bw;*P$}V}Dxx5q?>Ju3x4Paz`Wm z?$&nAdnASce84E4df@|FChZcY+>F1Kfxo=Ch5dO>iQp?7T*JRSSw8*91ncsr{uIsf zg$&U@b;V=rfU#GfXS-?p1qmQ50A{887&e&jLLIOhSz39ZxKj0`DkT+(SW zxU3_8xaQEP`d(g0o487ssfKa3_=M0s=O-;4y`WFC56^sRc(P5CJ#D;(4kUZ~t*K9q zv-QBK#gfZx0Qf5Caz4Pb4eDAdjfZV8$8=d}N)WNm*fn!y=uc!3)pM@j2g%gSxJkXv zI0ZR3t=3(1)wS(ZdR+6*YxIA`xB4I*r7LYGmX91L<9RIrXdM74_Ut|LirZMn`W%P& z5L;Wc9_h+L1o2T{%?lf6X+ag2`e`(A)IxXZhAiko+8Jx_9p0x~_Oncf<6D33SI1xb z?cbB5@$x0U`|><*(c|RDlXu>ZFV3ge^pV6^HHHH>ZSU6l`OKz%0(y%t&b3Qh06OC% zX4^)myPapdi7#>kp*9`9(uS79uPPrX-*7I6v7!(9{akRi{vX7QF5I|v0hjZ4`eJGI zC7xOsaYmm|&jh@_PPz+3-Pu^&`4kODcMuSimGgJfdIMehL)&s`r{v9Zp{Zxt{Y5@E zYFC8F+aLeP3U5_L9^+@p)VqK96D&7RITl`NPabq6@`=7u4}LpG?8G*R?olS6b#FOC zt6SJhOY5Zg$Q3z2RbplRx#@XMX)@P&mZ$AXBj{Rx#nUwBCx4)>wvs&kN#)aq)q(T_ zLChWdlsqRaMnAhiO>Mw7Lu=s=Ku;X0^0q$tp-&Dipsjq`sybIMTbP%eoK0HPIiTt9 z^FC&NT7Kpyh%H?Y{43oTBa?5|*JW+T_-bTsTKR1xr#{TOm5;Jm$}jzi{(D7(kbBa= z&%8P3w)U3mnH4Nc8kY5vj$Zw6?GOILfA0tA>Tg|8fdXyMr9*DyHgud=9@aukDk?;x zk8;|EUuU_Rh)@Y;zZ?N3^i4SprY6#hd)P9j%v^&9@yElnG1 zhq+!lM2*~HAYBCm+=G8x$1OF=i0L?}s~2Zv<(2tx00xKFA`RMI@Ii}5+fs3=xG86S zD3AkIw-@mc!%!>XhFrAN1oq7FTNo@VDcFPU7kUow-03 zp%X+tSZrjO$b3xH8ge2~n=EF~ER7dh1FwWx;6=EKeXv|eE?ZmYml{D!%vXYVi(%yNz zSLe_-bu(|;3kR zpFJDL#|#2@$~N^6)RuDt8G{JvG_SI~yl}BV(btZ3dlxwW#7 z`Tb_EpED4w+wuuK$*0`XHuJoidF6jB@6m~hJQ%k*$(#Bkd;No*407tuh3wrCIY>-> zoJWtztGSA^%GtI>r#O8x$Oj^sRJ6^xPG0ilfjxR_Uj8p}=D`Wj{umQQcS=|EndZ^< zRH*RD&u=BktmooSpfJxpGFGpA6PCMdfFte(zvHz7FkNbw6=!6Y4xefw2lDwXr;w># zRi9y*_Agy;a7<7)bUxT-S)Zy;^QdCGya>Lm|l%8!^|`5;5(IqOI^C=*}o(t1eDakjhna}D3g z4Gobu`O?mus{`x!Kl2%HRNDN`C~4`ReFF*boR5Qbte!8gzaezrp)=RU7}h;)wtzfS2Uq#0 z+^^}|&6~CZ+_~b5;0x%lGH=j1AHaMLzNkFV4=_ImZ_4#9+-)1;rsA9Yq^DnzpZFn? zf*alLGSOpYuU*y8Jj(|^@Kh^~(W5or057|3f02QrlNOx&_E6%P*bZ@=-)nY38u5aY zr0biUZ##iWENLK*jf&S;z9A3)qv=!rITxy4!3X962gy-gwJ+QaYdx(M^={CHf#ldJ7F_jmK~&21k~;bCis zJO)tEymR1Z6o>=(9i77e3*u+mcyE6{Z{lqcvkA&C6Yaa|KfqOu+rmX&=K ze~uS@+rk>Xu21NM;2J|L)IaVcQ^(!*3AO0FIKR2BNxf_Orr^VFmXqG0yZUGQ3wcrB zlICcHzAbi1OymMY`z!1Aeouz$2gK9c-J7Szh}CE7BdQ;zJbC;Cp!q!yP@%14+0aB? zwXg;RKxm$H<{G+`ebJ~bN_UqRlk`}(^>Ll{Grf4RUMECX6U8%rq7Sc0HrvHG4L*@) z;!eYkh=+5=FeaoZfYd`H(yD|8&__N%XpLO_e8p?U9E9P~fE-BQE`5{VObl^b*=2Vj z+WW*wQ^rT$k6z9yz3zw6miO246FND+T1J)~1BoMgL^f5MF;KOWb7`5=r_g&b-_nAk#aAA;b zqa5|y_GhQ3=Xr=Ixr=MZvc#6R7vu8$g0b!E@#N7X^v7*g*Ty?~O|+4GM;dwAW(XzirJa&1J9*+WoCH^<(G39aHFnVb zjSc*HY>_fTWW*1=)#uxY4GN2&=^llpO#9gI=5^zl$7T5%Zv-Y@+qogUERFNdxsYpw zGaoqWqkilxyoVpT-Lf+Km3(<}wzkGG^x}ULGICr{25w0KZV<*h}J$ zm#<%C8|uTu`&mT!{MoaN@BFlg3;KKuZkw@N`afec2=t+j^;o}Y&J+8oBtOG*MNp#e zanX(w%g#q-hYn?_BLVbD`fmAkY%0!-PmyV4pt_P@SuBhhiP~b*$j|v=^IJ#y*Y>5@ zK+PK&|CXN>i}~WSz+3)ZUpwiLBp=pOuJTw8T+T}GvmY5 z2W}TgD5FhdanfA?Ay3ZZRaj_0Dt>c!*})DIwZq`xTsr~)YY=$}jrKj@z_ z@~4Y3K2i^)WkGDZAde3_;`>OmeB|gkbw!QAq3lhI4wKhdCy?~gO^epCe&!WzkBPmg zdws0>TmTS%HT|XiT+-J|HVx*CrY}K{Xbam$ddz3rH8@Y$Wu;wRstfW3l{#s{WK3P1KD)<^#scDR z-m?h6!f7*wRQQF`2V~;&Cu10JL{5O?E89`~!9vn@xj)OKje(LoX$Ec35&I#3@KWF1 za(ONiI*=3fSW5ly0p*&D=YaH&wo~Wfmlq@H+wEITH=f>f5oFsR?bZxylZ zr3-k9CxK>~ydWEXntA&re`rjOriGTYYvu7=o$a`7fz42;e`FH>O8K+)y-?6Ko$~0CwAZ~g5 z_LJZ{B_7HGOU4hzGS(|dm-5U4rgdZ={h4&YrM&fnUL_e&m$b>HT+4?!{*V0Zx4~KS zn!A%U@@sO-SJuYR9}c;vM@90v>kPYZqoAH3&?nG z+I;Fa&HJ;y_HokcUE{u@L(y@Le=aa)hdO+;GV_J$_RB)e{gkedA#!w_<26);2kTWf z@n;!RQlBW4iSdi=AWf;iXx0X6j9KjmwUs`1K)oTR)hy@mscawcp82BN=1WCVhRj-) z;PjK;xj)l9uly4abOaZ574SM~E^xP{bjYLrv!avrZRLf;Zx zByUZ&yPZTHC@5Ezhcfxz2Fhh(9;Da+G6nA)?8L{BknjE8)*B{KOc(1Ve;lF^AErE<3M5G*)c z;Q|1Nq_|2-y)zidR-Ka;A0|qp{4TE037Xvp#TVyJtRag_7X#?Lx6l(Rn>70HOni~8 zI{-u>=mgWw)TtB0xwXNx(x=^h=n>iA6gTJ~w@;u@9Vw?_J9S>^j{(BFRZ6GrWF8@> z(V6M|k(b*_W&wcL>NnWCQxoUvNVL zRa)7id9}yZQRtfGo7N?swc5cUZNq)}tn1Rr=Ew77Ps5aag(+UT&pLvCr%ewtx&L%AL zw2pgi!&Cs=1Jag%>q>a0tnKT$0hEJu38Dno>Lz9Qc?><1=imcJbQ9e--MPf%msfn_ zIstG3JM>h0iXDNs<(kED)&&_=&cteGX>2+Ko3;myUH`Jo1o7K8>b3(3<<;#e_|V>S zY3cOPbY1#t-%~Ys1phdv@7l_Cvm5ZCAMz713;g)j*7~@2e`h>?>>DAM@eG>X7Of^rlvO_nZJkR^sdu&o{df-OS?C*k-e*2Z5t#Ds1psf*|2g;q`o+8S7Y)<@d=9>-yl{5{{Vpu?e*xU#p&xjOFUMK# z7TsLpn6ij`X1)4}#E0ZjcYL$ic*XnfvSlonMJCwOnLdUX&Y1BQe|%%iys^i97Y-6T z_^^M=@7>cMZ|y;!la%06XVIhhUUAUgZs*tS7FBm4BIgxjm2H$y<7hlW-d-#h`?DNJ zb=wCy3+&tl0v>$>!W})b0~Nl&iDV}=e2DA!|J*j#$xzQJQoEjzPkid4aa5~)#}%bl z#)kMU>a5S>tjFg-3^LhHY;{UKjzbRbJs4*U0{pK2=C%*wx5r87#GimWZypf)h|d^% zZyOJGdX|T*Sqe*i`jt)E#YI5d%Fx9~b9~@L4WdDA<)1c z^g+6Q;uy`;_)hnzYrmwPGw9NXy6`t`gr4X_+yTrtT%y~QSB}ON-tz&uZ6S^IFss_A z5sTWj}y<}N;_Mh6Bv+~hvkuD{p6hjk%Ka}Ul%e)5d9kMKHI`?Z90BT z{phgv<%3n}?Q8oF<0`KO7Yt~NmdSPU7!%4%o!dt#J-0Pg^SKpXnqL=FOpzhe;;-%( zEeprv_@(A4Z#hxM`!3k7bZM{j2GgxWTxsXohxJeBHz~aSTr38@6pfE;{MJ|e`YYRm zKlMVLpAW42UYECBiq2UFy5cvNec+qC+Nk5=+j^rX`i!)yyk*p%x;EBv;yAE=5qWxF zUi(c&ukX_6R;b28T;cPGC$JH1EN!g+4E_S~Bu)k*yDxy2QPI|V3C&K(>ywa&G>R*+ zq45a1RJy6Ia-S+)KreLrgCu2=k65P04&a!5Ku^qeo7DmUWfklkv1@IaE60var<3D7 zR_L_EhT5gYZ4Y>r55HR*%?;}%SMhNc|4=q=nOl-t%VX@s1p*usOY_SM7*2Gi>AWV->?iC$^=0WFYzxbf z$NNj2@Or7A^GG}$SdM%9*zo4{8|AG___FxrF0@}*(o<0F*m&;%viN-c!kc-yZaNyi zEdF|v?&!KOmFZ209RD=%M!sb{{01!Xy>a~i<^lkVv~egybsC7=-zZl$59q(Wrm!~F z^)CBB=*f|v3q$op8|b|O;`P&y$HmdhvA(#?MPpR+xd@=TS_m&IucJ@l% z-jC0)G=51Rv!HGl0MueRUqDeeY5{-`!`=PoYH^vE&iYkT!xYc;BQ5~2oc4crp2Yr0 zxkoN};mE$CuIo+Bs#|{bOPZ;Ak+~=sEOV&#aPC_$ujQQww?6?>@k7Pd-|?vU%ojel z@ukiuOFPciqd#|Us(6e%cW4oZeS!FlUu{d{eDheR@tS$EqX9H#JSjn`x9Bn5`h^;E zIxk;35}#uJ~W+|C_1LaT>aSJzQDfzk;Dka|>t|WcS9#Q*oEA`_pAuR^WNF@# zkx-E)Wi^Be;d_aQBjtjd`sF|7p#FRC$36STTHIGWq+O;Ch{O}FRe-$Y;9vP6 z=j^P*wz%;nw)8vnE2P~F#+>fd=O+NJPrz_W^KwobUtS$UlYZP!5W7pBKE(w9{Agc& z%pyKTf6ng;AEUp@yfggV)(%E3|C;~f9N$yzGwJ52th2+FN2rJ#vVg&Rltss;%c^qp zxuy&1O3SW(mB(jj)+5HG>vt^gU)wQPlvl3mwo)sVpa(E!taD4~zojL3L}{Oo9P`4k zmr1NKqcYRRoU_mjEaSbr(7}1PZ*(bjLP@@PR_)i%9bLd4y_oT5+E;mS2w&4))0cpU zgrYC!{=Dy4$29X7ttz9Z=C!tDm}Z=hac_qj_q-8iGTX3&gVVhZv8n<(zLbc5&lTK7A(`_W1rPh zIBEQTxc0~YuYdmsIO5;ZUnZl1g!tu~fLZb4!&Dtg3DbNIoB{;Qrsu&v1EYn(0(n#t z&lR)=bBRHDtx?tB%1O6sJg?0cJQ$&McrTM5 z{83}#}0gA^8B#L*PWu>cJ(&a>m_XG_o4jl1f19Q>P)g!F}X(bA}P16a+$LSVcnup3vokAnJ@3$sosRq;|%613olX+O+fV zFBDA{uMQ#K+MmXYM*gC}pJ;0bP9x0G_$0<5d>IavM9KCotC5 zs{`KHoU4JHDBCih?IfLOliPC8(X4N2a296Zy?Fr+bU@w=dhJ-k8`n#@A}j94z?a6{ za?{yMQ%y*^QY@1|wyoaWEm6o89Y*QBZ_%f+Jbx5A%vnO~8$^klIn0xTDOp}?`dvyA0!GK`921Gt&Q)ism zw_`fke>nCI9}x(+_3g$uI>m-;OZ>q0&OyE#^wCd087F7;7FIf4I^wFck`%sat*@*t?I z2IqP>)3Oi>D0{9pUg8Qq`YcpJ`_|qw`48$#`d_zfb@Ir$OyB1ByZ$G~$K%n1Lk2Tz z zkz2n-ZaMsy-~&#f{a=H=wdAv^r*6Op2m8~=o_V`XDd5j1DB#B(X6)P5Ionu0jo-yLiKpAon9MEXealFY7t1fOwHtNm z#%=odQL+88wtYxkusb$)x!&BRc znY1`CEIO`8_hFmyyW<;&L>E_0aIrS&0D*n#{My~C1f(QF3oR%RL+e9Z5Cnz8A&h`^m`V{*}>RrXPGp^!VY29Q2 zKqZDdj88{nMtUPhM$%wPuWGu zW9zoui{JB%>y(p%wO)#N6nVW7MjCw0IoH(5U;mH|O!hNV@*TSjBwVc?%qQ`$^ncfY5QayqEs@D}6Jq?!j=iUD|1;t<42J zTnEpcaSr56n#~X{a}0js);u7dZF&n`<}Ll4{XTWoybvWPP5l#F=t_C|uuOd3;)99~ zc~lRyDa&V`Mqwx$_{svq9T$XcM^D|d)`bzZFsgh?$xPvvKQm6_PSO_GwQbz>irUGq zemE~gZK%r@Nvu0#P1E&-?sDP+R@=()s&uaSr=4R<)#piu{@xuX<t|jJMx;IKK1C-x^>2`d7!-zwz#P_~_pF+Bd&4 z9zNcOc6XhD5BPJA!yyFZ(+9QJ-e3N;gJq)X}7@#syX0Z>-xqDm7Dmo!(vx4$A zJJ%kE4Sqj~zepz721IjB6+*fe}0tm+9_ckr!Z$GdR1KF*1A z3TEDA{PhO?%X0JB{4Ddo33u!HSheM=y`E z+s6I-_p#Bv@#4jcaeVA90OzqaUtBWwxp#OU@!gIe|MaKh62JZA$ve^Y%!?wAXV0Ex zA?4S;_6_2Z-SNQ(f07+1wbQ)qiGSJIEkEThPX&y7rf+WltADbOabC|DIq?R0)Jq>g zXHuPZGZ&sA57(5***U(MJmu?e~O$v4_bH?QApmVe8tJEpJC&n{Va`xJl9ht&=()>b~e#_#4)4@;-4 z-#8=rh76!67_wpq7y_X*`zF=0<1#wj!oRgILop&K^W2@?_IEFgt@QOx=pE@7C zg_htTZ~ER1B#SNFI)1gBlGZkmqcTaIoHf*Z7v-n_%+AN)$=pBXGrr=e{iQziByUxDp;7jB)`HYSTL{nf7Cxz4SLOq!WX5QMWG( zCdQLiWnAaCSx{j9l}{>C^ta}rUgO8<|8D%m4mK)QaTp8OZIU7Oq92g9@W8L*{CcBZ zS$nT^RubCo0xf3=04%3IE81#f}O$?HMvMBx^x7xp}y-M z;#>HIUceo3Y?tsm=UFmuByqG`dLkkSZw|F~{C`GO#S@O`| z)suO^=gJS#e9(^%p{)4itqi~k#SCVg_~Bj)!#9JS;40%shj3c1pD;jjuxa3=udFSl zz4q;413_@$WNi2_@1W76inCNIaOz;Zj;$Au=`W_x99T+U@^p-@LQ$84K~a=(m27fq zy=en(W|0F$ptT-)H(tL1V<1RAuI(se*!)W7~o&bdXR;U204ziOSqPSK0Cp-txeoUo8OG&cckm z1pwXh9({pc+e^CJcyx#^NU%BVxcx5t#nswaT&OQTTyLt8EYuoHu?UZ^4cau8jS{c$nYx9udrWk3gAz7?P`$)~zD{YdJqI;B;fsM31L zCm+`10Ko+S@@psRG53*Y2SCvUpvrpR%F2#n*mIc~43w!k_sj zL}zSU(@EC0<*Ltv(|q#oF?3m%ZB}VAS#SH)b=O&)w=&+qhQUd@ZxZx7Ij4^Lw*R^d zK-PR~;r`g#xkujZ@%s4*gQ79E_IBWd>sKe^Tso>f%I?%J^}wI);n8&IDxm#iuNxUWT+rdiP8eJ{aM7P==S#mgbyXbxqWiHc z7t)}oXZ3AM2Z8z4krQj?wf*L5?f`U$)_D}aM;9zLPCp-#w-xCiP3t!or6VXfmmaS} z3p8Zsh>2G)co5g>K_MK|w(?9V9qE)?-E#n$e8>#~Lcc)qfRkR{&w?!QwqJ@aY0lJ_ zYwen&c%9@FJhyCUofF$#epzlp)7oiHKs9);a2I~-R#$_YV`Mw&ouVR3(cRE`P#~=p zkgG@1W?Eo5Ku{*D^4)&%t=2nruKiB3+^H{vb^8mTeW3DH_67{LMFxM`JH_>B1vTN= z3jkJhD@Rk1#T|X@H`sE=>*1t>`Y_v(d$AYrdvtU0h(( z_6zc(j#t^T4P_*Bd2OZSM7{EbBtioN19#yV?&N_j?eFjEku#a`@L+Si|K6i<|Kaxd zt>66Gc>Hi@oS#1*7iX`r&8vJuqy~P_slH+V%Cpp=d{FEa5~QPO?{-nAu~Yn_4+3L9 z%5L88_7h4DQ~wft$WyhQ%y^|nDSIr{A}RkF^-cNGx#FL}9)3H4pniv6=%tJk^VStV zhySm-$_CBz8F=GTxsrKaeIBX}?!eDj0I;%kVU}~yfq$$C5YA`*&jEa{bn%NJx-sS5 z_+21afZctj=%jB+XZfEmqn{h|*f5dlV{@zcN*e~gto*86fk*T9b-`U)#h-^ezzRKy zZI>tj&0T5svr&kWXVIhHmY*;f(ss*UMFcp=+cKXIXo4U!B$v+?VudnUX=ietL z*drd>Cma2@103`z#f@>PCpr~hhOQ<)D%!LYzjat!rPJi#H&1+rR8cNt4ej4;nc>58 z#}{X(uHLU$?2KT;Ny1F_GXCg?)YhYU`OQM-qV2Ex!8si}^jRx1F!u43bKmx^PbU@K z3f6~+exATV0VQTFKfDPKp*jx8T1X=fpkRTYK5E{{o&)&uUxS zgWUuCEphQ#ebcc&5p#jxi_i8AO4>SgCjO|lBBDqeE0_3^rQad8aX~|7)AZ$#WB%B+ zX^c-0fY;LQ1Ab-c!&2qpgbEl^RvGxVMK8b*x_@+wK|gcQ5Z=M5h+UoTu1KlZ_=WPy z&7-<#`mN_FS2`nYX*=~*`Fov?v-+US!8bjV_Ljf=cy7DKPgB0?D4n7i0_Jg+vG}Ps zKgwO?#Mb_miC>z*BY2zRQ+b7pHZ@3u6$| zYd1FWD+}`5wv&%4rwt&xj2Fw7n{51&g#(Bw*Zj!?KESr`2YJEM%?7pRPYlETk`Xxc z1K4!%!$TgnLr3l-Z^ucV*=yB-X?EA|kt05Q(4yU0hgS$J{Q3hpg!|icX*loZS<5fZRgsC{5QWo&{Z$YpMDiWl-;d6$V|vJb!68q zaZD(AXgejBw9ll=9W?^fXI+W2Nbh6i^`4oE&t&IVvRvh9zbENl>!%XG*l(ooAe}bg z=4=;EBNwYLo%hOit2#@U)@R@1z3@O@IW=u#Yd`jRd3~9f%b#`1i~8%adELfF{API1 zF$F*42qEK^_zaFTMe5_131c9#pnaC<$a)>xcX!vv1>?>IHl)t@S(@Ork0X!xqv6QQ zPfj_%i^9Alk{9c#YoI&&Y|VlD94$-}5C`k-tTjy;Gy5B{L#{_gg8cz7`0I=q){ z|KsP!)wn#l8mAXW^jjy;bTwYQd5G zUwMCg<*QG|#`b0=;-$UDhVX?xZ~I^mxu^$`>68m+@Kz7Q(jMf``MUSD&(b+_Hbj;R zIY6uGCv4X!K#d>BHIEIYC$>>R9eZA635kDN*QH_Rv(7hB3ey)9+{w>RettHPHxJy+ zH<2&nue*4K&(gi*FmIB&TcmH>fArBudBb!Y`|^0OcQ}6Xlb?*IpME?ZKY5Zl9K!-XAK)ARuU@?x zU;WBA#=VCR#s@$CAfMwnIM{=p%`7G`MDbGq(w2ETet9U4&nX{MJ`O2khPTXW|kz;M@H9PC4h&r!2B)T;HsGXMgT5_ngyJJwhxnbtd`)L`L3= zeATOpqg*IrTy}YWI^3aQL0_@l9ROPOx0y#LuQZz8Wus?_J)k=;(8xb^8na-3sb3{Z zf6@M_?8^Qn}-8|)I3%_<6u-tUO!2?*v1pvum z6Pu=-{`LY`4f&naGB;t5fol^om7GPpzjK)cb{}>I<&a zM~yShnBPVH;em5;_$=pwaSRc`xcA`p_eqKX0RQw!L_t(OqQ-`I_r~tw0@brDkT7UZquf138(rLEYOee`$)A^SGx|)~?{Z$Kkj3g-8xE!EkX>CoF>F1@& zGkiuLNWY^a8TT!?mvK_*pXb`NzBc%rucrQ@#~QU?>znk+x8)L7dNuX-W|cXdgW$jQgegGT4Rh58@hViCbG$b?UEewz8 z@F{ElC@c+Ux&vGd#GqwiXAFT?C{1<5KJ0gb?s~OAfMCRU%1ILOU0)M$kw=FkbE(@& zF1htw!9assyek~fw@`-N&HXqn--J~_zEQEmKukk07&4%0XeEhpsW=J@g;zW@S!Dtu z3!wN$Sd^2mjG1WHta#*K1u#_yO|OZQR%foFj)En?0QXz~00qIXp@{ziCu&ffy{k|b z=_xr=MukIBBQNBp(t8Zv$;7Kazbm)vgCU9rS9A(o;;}ru*x;7cx!+t9 zm=esn^MOY0q7b)0nG5*wOTk7aRsE&cMHDk6om!^`}8T zqi1E1b#0vq-k?8pc*ivIgirHG8yVBic8sL=rzhexrgK{5RaT|{?LMYI3v~VJQr7^4 zKzqN@5E~&~9akH~Q1}Y3yg8BScfRZhq+eW4jHWZd(QfVTkDY`2bbMQ5j{w5A|4)yp zXJeb-;VN&_9Uoog8*kcytDaxKccrXk(Kx$23Xi@G;=pNZb31;jCi0LyZLM6bTU?fP;N*kz zE9iAg-YftpvBP710!4FfI-1^U)oPojCyRu*pCHNma{^#(3wckQQ2v$)Uab^*hV1t^Jvf z9+K>*G{o0|a4#URyx=kTqsQ`GZ$KN^r`*)nB}`t0KAKP3V{>!GpSZ$<6M5dBeS|uB z7cAGbq&&K7wM7HG&jk>x2PP%M0&vnMu%7e4oqU!zcV)VCVDZp(I~9S33u@X ze&zxI>QOF1HSMjz2?hJ`X@4td{8L6beWUDz(6}s*EG(jYQ*4&8Y;?(=X)8Kwur}@W z4f?BoVWuzX?|RB+!~YULVENSfC44nKmQT`*Q}UNv-N8?r67SL`?tuEVfiEzkdb0-$3dnKUmU1sdsxmd&Q9}omk;>Q&K*nCxZ{Fz zkF8g}<(>&h1_LgrQ(S&SU(w`&(Clxc`{a>7?!XfrxMbq*N_Hi0z1i}bV-G}?r~W8H zKH{gV7izj$h4o{OuIr}jDf8}p>CxNW)*>y)8=#@#_PTvHy1elvzsPQ>Na z4^hXB@rW7bIEen#x_X=ZZ0o2lLa6om!WY#CUoD7~J}y-$Zq&B!RW=W`i9zc#G4>_0 zO#B)iD~@)-lhrG`=1FM&(@&EjV>WJH`oJ&RWL{*8JObb}|Dydk^_84T4(hvldUbPB zx4BmJR1+}O#47o$cj4Gm*{FYxx2e-OrTDY&RRG}TmxTk+bBjERey?qV7F6*9vpKiF zFi*ZasSPT3{#v(NmDl(k9VV6DgL2w1_^Ce=HbdI%_ei&(d~o78bpu}GN0b}wWq!*F zah=+xfwHHdcBrmwYhtn%8VKQo({^p)n``glh3c9T-nV>U>azDr&WsJXY5nHS8y)l? z?obl?jB_}q-=s4664F*c+LrqrpLaaf6JTn#a@BSn7rN#Brr%0tY+&8$mom8`zk;Gg z^5DsMOu0FUOeVz_8dKkthu2L&Ru=gK|zj=#n1&gAj{TcI2saY|(S*2|sQ@ zYkW%1%G2fHQvn(ylIA_@3mp(?oRhZ1x0ft#@`k=OR=wI%UFQhz=1UpCB3Z1od4LU9 z`_zN0Y9o&e(r=NMpF-(3-^}9zDd&wGhiz`u4jdKNm!EPTqPD}gU6q&kim%EG9mrPw z+}g3vFn)tl=~>%L9WDfDejN+i=jex=V3sbo65eKPv%5R+%}3+(=oCMCZ`|V-JyS+5 zbYcwaXI7j)Nj;1$&reRr#p%h|h9*B#xdXjV9zPgQ9=$c*dGhYKe{U}j?QjCouUD^L zjMqmm#_`GPas2w(I6ZrrH>6*@{B#_jJ{>!LHV!=RzWaFm!neLLzV+>|kMDfvJL5aQ z^o{Z8$y@mjdKS4l?qgi(V6}JV&<3+Da`YZqQK#MWjr|I(T-lVH?WzsfhIOqR?f)X< zOzf(#<$ozPk8i?k|I9zBKV?t0hu{M+`|0I444TvxA=SPK>TjL!BLf77o~i#o7ZNAT zvci)7Nzjt8((97L-DBr(==6p+uiu0&Kl4p_6iq^xpL8tgTnem?{}S{oPrY)%EZ_ZA zmyc8BobqMoO(#dveS@#O7yvy;GwKm8Dn^#^{#USE8j#RR%k=binAvG%X6`zC0{WW-z2 zpSMC8<7z+2lU*+_uorBde%J@n`4pD1BXX)aW9l+qQzpnydlHv1k9-XLj8HL2$*Zob zUyi?3oAIIQbav0um+9}x?#s6&UT!v-A9~ zul!c}ZDS~Z@YpYG$!A+((_{Ns&WK09?#oUjz(4Yo#`JUQKW#s4FW2CVOpPsUr`R3x zaeh&ld8sn_``S3G%g=;5rY+1|<-HwxO$(Inl>R8!x-W;FN0}tfL4SpmzvDNPYWvOn zZR3*1-zty)7xDl7e$bX(Cz(?@muas?QQ|>{IedL;Q83!W!v`x{R z&m=1E&^6;2Ajg&`_1LcA$v#A_<9^pw{S`pq8R{I@K$CBeJBV?6c=P%^z&5QaC*9Hy z{yb!5JYnp%l~@N}!J7xsclw2eam?f`c*vhI7_g2G-5C{yugH&I#&d|*#jo4gl%VcM z2H>!5vk;MEVk~GAe}nZJD@O0AyW~~%n@yeZ#Y^XdyzkBco5T`6gS)2v+>!1ayFYmF z*7(k^|Hk;&|E=F0WA|WOyn2nD`PTdydHt6D4RO$3c5FdZS+M1JeRG>Q7#ojYz;C#N zi?q8yQ6J?e_-=@&^q&dvr3wcWIx~;0RMao^l=dS}?HEA&$&cmG|H#9v+NkZvPd{SJ zZKQgAdrDbo<>-!0*H`p0*Ju1(0C1IsV3(J~WSK*_92bj<(;Oq{GeoO=)F;Qc%1u8* z>x540SN7Gucn146NL!A5Qnuu;Pw;scchk;}_42HA&Ko`i4^5S~1)or}Ei$F~#_a1EvJu+Z)*A3X11&TUs-m2i4iBi(gA&`2d2EfaRHBLpeyP0 z=cZea@)uK9&Sl5L&kwX83azQH+9zWw%E-zJpZDas z(yTA&LtD|`ke30uhAi@}XbO!SW3kEt=(&3S7EX|^aMkr)I;E?iXf^fz9JUxLLr35s_=8wC{*9~Dky6!j{0y+o&O0fotwJ^fLRRRI(m4E9}F(Vi7 zs;~?4UE_!YDys5v5EOoK^2oyNq+MBQ2T%_>ajdEZ_TsmE-SWr!NF-W%_}|DADqzJ>fY$6IfIWt?f_TL!3}4rO(_&LE2* zKqp`Fl6Ug~=|rKxHZvF?!GWl04sdz^>8=&NvE?ox-qROI$NE+^wDsH08`xcFr2g22 z6PI>!8n8K~k@VOnGVHgk#Zhl^Xe&iy>fl}`z>v8u9G=Kq--PwL@O7uH3e-P%bPF7Ce7xnW7T74qwikwG-3H*=>{F-7LuuJ1E^$>e#;aea#B!q z_M2_yX?{|kcFTQq#Ou}->56WvKlCA)a0bd(i4z^c4{H8K5OU?V$d6uKmi+=K3!A9Pm3q zA&(^|ZOC#qRq{tiC8FfKwzVDmvmc`=vTKb4ldbK|v4s!*>ih4E-~6>-86SM|hvREs zzdwHUyI&cfe)7lEc|P{h3*#Q+6<5$kFYRls4EvHc1KV(gobK{h{S$PyoX98WxAf;E zkVksEy-QBAka99Dgb#0*-nZOuU6=q3%C$0h;68N3UR!?AE%OuTt9x9Ee`{y8gGg)& zmNuNsA>QczM|@d8XGs?TSWoH)Ps@42Bthh}#ub$Q8Y@|O^Q%u1X2E3#SawjlX?=Cw z`o4m?AD1F@J|Q&U0qU%YCx6fl?)YWO_kz9pu6<7GXnO<=*V6BdnFEaJ$5#`ulYm*S z$?1BEcVM{ypg|kr9(eAEkvgqQTKqK)fb~y^+`%Qb$S8@ketF87=h`lP*4T?aIQArR zgC_2+)r6vxmimLWb^B}3IDweHg1&F_VC?KY7;8HR6ueI%beJ+3oXvL1Bp3P|zkq1e zz0y0VpT{c#y_@S(xj`n}_l?O5{1yGs@zF{8*CUQ+XZ4`}!igF2(RZXzz!tP&9YOqt zvUT?%^*j@!q{*N7*XYaC3(Bb*u_t|q`p400eVt|9`Y?l9@=IIfo%5+%X^T3)CMGDD zeg))F3(jpX7Y8LDblcYkzip?Df@cdkIsxG>NbYc^AMy%WoC<6U*WEp zQ`7W~`?M#?_H(wilXqdVbsXqPW#fUoQ834iwHp^cZQ7}ykCG2*UZ4jZFIZ>Q-~ADK zKyrVB9@LnE{Y~+%KeRrbd;D|8O}6*+)12w8?4L^?XB=%Sr~dAjs=T!L>vr}R`ID4< z@SgN_+5_E@Kk>ORJ`eCIC*+McH4@@oyy2%1df}hteW6=@%{WUsrylC(QfKU0yHp0& zU-XrXik9Rj&+44?1TuAA{DN}wsZ^6h8|W3hmR`rR3fX@6R=>u1;xSY{c#)_5O_~|H z#JA=G3v~#(^#<{EHZS?(UnRkTy4C4e?7z}qm0gKnJ2jT@+AvZV+tMBSY+Vj1j|RdU%s_A6Kq2o&%bCCka)F{%I{*Q^_`u#4EE=@We14G3 ze&^jA*5kZ_a*%5Odd`p*+R+Q~roWV4#_=J!WKaT<%sL8YkF8Y`b8S1xm$q5s9SkWn zs5jwT-xwT4f5qVVoV<3SqENePNZgRR z%hnP*XoJ+hqN8Aa{SZ4S8oHdZ)DHcRWwE+yTSV)4$8li%ALt!#uHX1we0WmFATHk$ z1$yjn-teh!m?Cw&D$Rbcf2`F6rk}0JTR6sTldpO!=O%B1i90V4I9C9TwGdaDoa4gA z!QtX?=Of(ye?z@QzkN{e8*hG!Qv2F~ci-&S&r47A9sKS_<01p!2o=t=kh-xf) zc(6AfJh(?)XK8aM(=U!s$JyzpEGCn&#qhCGG+>Ei=TaWLLZ~x-A#-IDO zUmd^lE5AJM-#^R)&1>|~PZ>z#?!E1?wOQjy=T00`-8wg7AE1LPp>c289JpPuV_D-P zWA04c3TygJb)+t1TdlYO+lT|QKw`)meYMT`sru_4DV$yK3`oqH`?QJI;InLmmx`)C zNLpalh5rA1kO|v31SMq^zg69-PgvrA^Vso=jx&wR^xu4UHcCml;Vo37^b zUy}ZxQ@-q2_-wh@d)k8758Uc;;s^1hOyOxiT=wrdHj+EkppPFHU_y7WiHjG{#?8@7 zU%MRVuaEP&)DlJ{SDix<4HdKk}tB_b>T2>Dz2mcN*B( z;T(B{R;tKMcltSea{6X`^v(|DoU5&eHv#BVo-VFWTt_+Sa1lUwL1OxY>Lb-9eE`?q z0l)m4E8}E*OV1zK1qi)RR5}Tt68~ndgE9*r;H5cyqxXVy{I759xeVo_&j&2v*(3mAU=R)kng0`az4~~h^PVnJA{6EQ-|1J`8Tzz_RTIc8ZcNf(| zfBAB0RE6||{A>vqj8LELrQy{M*3v*#kgUHbTBm(TqepNUOZR+0eLACjSsrc8pS}ci zq|2jerSB_z9HGVdC-veZW3u`*>aXVsNZmmN=aMNb?U-Zc8ucHRcL4?!rC&CWl>^y@ z419NAXv+&+*ox==%yoE=-sPn=@@0&b+iqdP-3#bcx`f8#5 z(?kPyWv2|NE52vyJng-N&}yFIuke#6^$Gf*@~2fF5Jf6TKg&CRqRek$b8`P6tf>eiP-|!4L-2 zKfmSf3U61^0pz^?pR*7`y}${fYE1LOYEC=$#lgA`o~H7m!| zK`2ccdGnn=@+;4F2p#kZUYpmk)dE_4`@-*wh%s-dBtQWVo;q^o=bP181y6zw+#n|_ zEPViHa0rtn2MxIt$e>omD#wL#$V23JXRr;G#Y@S-A-9ZY@`pj4bU3h8l*)d?6_0=& z_?{2@{U#peGhij(zS~HV*!IoPfFBNG#{HxRj2f(OnXfez9Jwu+Gq{ zQ}|y%)uk(CL3~BPOb3-WC&5!&kRnqD__iJ8vl{|!AYY-G^C&#**JmVX3wgDJSUZC! z+QS6^<_E9&??s$>cdaW4m1HEa*qwy2JIBN(){vsjbd%NLUC&s`!ZN7IVyB@ z-+)F?mhDW%lVB3tQQ!4x=VwmL+g|nHOt}f_h^pb6Dgmy&r@k1>8En~MEZhOawzQ$B zzjeTl)kO~R1GZ)CTl=d(P<_fbGvT5Ag}9wi)CSbQYA4(D8lLUw#b3_fj?$ys4qHwf z9>pzhzBO0%_|4!NL`tU<*mX4U(eE38)?iP3Xj$&7+f}}fNsGid8{>=J=FGeR6-sn^6kuMImkJ$)#^c92#;aPjCI*T`Tz;Yy|T-$1@;E(!Y9j%ia z>+^w$ZBBvGh2mFez34?q*;-lT$aUF&d!Mu%Db{W2eHWieuWvJ%=71tW2lDb8yS>n( zaLhqee35OeSgKvcA*dlj*BPHGT?j`JsG}%Ga++qFM$Wb${ao)jpk~>o@{_?xF7nrJmFXkY z+YGwZT_7?^p3bMtNvC+wWx~LF)sB%%iK~*Q9tIEBc&3@JGPO=xPPxi^+DzZWF@9`* zx+HeLsl6&w;wB$5#7@fo0`L|6t7_r9>?mKJ+=M0mt`D6I?HVNa`u^1ChLVkK zkIa)oe)}l&{gHQpEAnKOB)_qT7N>X)?^#NC;hdc3N=(cltEstf-bO>mKwO~ z-_ifI{j@piOkNiQs2B7vl!;&2p#R-D$hQ8Qdpl#(@11X}f5u{y9`fg4r6#G5$i+A{ z0~gX3^yS$(#zlK>(|K9fKGZ)wea&E;0r>gFI6FDbo9;faOCLRHmOd8}N>?u=RX6ov z>rPl81^YeUa?(Glr#|Fp-OqhwWu3i4iMnqemM#3D@jAPWl}|Un--NYZZ9V=1I_hU! zU$&<@UekZ2pRiu=1P662@DrZvKWl+u>B{+ER-sVj+Y$^B1um84w=`2~**vUAjK|18$zM<(duR3-$<>>=!9~@mf5RX67cfu#S z=26*MkGdm{?EVB@PNErusjKziw|up-aB)@Yx+6=+I*vn>i;$Qx^osyK=Z-S57s~o8 znEj~_e8f2yzFtnnPd^g{v>i)!>JWrxVM%x)-H8pyMe0jrH79IahBdZTmL($%%WL!a zV8MDi9yL90ph-_?mdDvwNs}_3eE1zn$GREca_yT%9y4JD^h9?ibfiN+1`@{`V90&J z$@kS8h0>&rX~2Mbwn6*$WaXqueX%dqHhe(uwem$9sb~5#pvsgSmgR|$avuGb7OI;% zWBM)Qerff-vgtZ)wfF!P0|-Yc5AO7P#itV>;%Vse`6e^JUaUe5(W&B_b9Bx6+Aq=) zbzeI&$v77d(+2Qfe%+X)Y#c9?daoJ(na8kWllzD;ldO3|9yyB3hn)%I?c)=hg3~w~ zTUL$Yv$V*>4 z0d79jJ31ao;WM1ThFa?Jx?&#N-0`3BeSc=a?dWX>UoI&7uGmRsaVLi8474kcd{Pe~ z8v~dncpbmeo^3zUr`>xV{Jvm;zH&)6%hf_Gq#gXe3F3VhA)DU`)uNSK76RNwAv|&I z;w0x!_Ro)0e`Vcr0@lIxa)| zK69LQ{2980W&Anubophz#^2YCcgk%^f2TLTKV$u002zP1!LRMI0&&jrU7u~H{3qDl z)#T_$+OO`}oOnTeil1E3@9-a89q8)$s~6+?r2?-buTE^2sOT?DS=} z@n4)DWx@8r?qlN8jq&=`^Kp81F&;mDJT8b&+-b$R3T?su?%|`i$Ah;XkCWre@uPqK z2gICLd1+~Tr+nA;cExD+H+i`P9O|KQPuXDkm`maoapQrqbCS=~wOiu}AMR9aFMs8O z+yzFNIbM}x=O2U`r;`x`%eUMCccv7qQ@SqC>0ja9PYo$Ud(g~H(P3G>UVtbX^G3RM zqhm>`_?#bC_RvIAiBlh{%)DOTvJa~V2zfIXBVT9yw~cSP&dypF_}Wckwpv&#zB7EA z;|TLRZnmuLDcCn<{74@WT-L4q!xyKVx4w`@g8f0ms-MQ+;s`ufUmatoD-Z#aj*kE` zZ)g0;7`7hp8{?CPOx#7n_BwaR2K@IW$BMn2_n-_iaE{-0@LE|KpBRs%-|^ZuA&;`_ zc+2^o@FV@oIxmpeuUgi;C5Yma@}b)l9VR$qul9}h;qqJg@7h(JPajyulWU>Aq2Fnq zQa-X$m-u0Sp*ME#J#d)a`hvIlsyJqCtK#bALiesm-%P&JTcP^%p6>~6K>mX-7nLVK z{Z)@L6DxS943{yS^1eN<{Ir8*JAO9a5R9L*ML%(C@FPEUN{kibQ=W8VJ!xvZ>ZJL! z7}HY((EgOiCVV6gbX-cVyfo;2>sF>5Efah`*dtG9SGds=s)gQlV!PTYOC2pc)NQ(u zEqWBJP{4WJr~S#0aVRpf@3f3`WN{IBYCah|GAN~L@#T2{`P6YA!h3&j?*Q~J4)@uL zvZ**rJU+lT-nyCdz49ad@jvYrO59Y=OMomj1lt6d#Z? z&gW6t+wWx&GsoyQI++<14Wl56VUI}18(}5oAMo@5B+Pu9WO{5RNEE}=7A;JY}QHJly3y&LBBKW zfQrx?+KI_+PwlE;${oK|dFoobZ63QmkDXSZOssqPLDyZduZX-sZGGuGrw>&=xd$%! zaxOXc?=4$fy7e2B@zVm!wA4}MYy{;ZFybFEkjC^fP(vt=* zO+^y&5)66jvt_sm^g{d3I7B?t9|bpgnvd?sf^Ti@hif1HK`j7~v|^}^8k>dlkaQNX zF>LG-h{*L%=TVG>0gWB7TSC~ml2^wRg3d+oOASn|JfBmhHw5XYRe>*#{u%f z01Qs6;b08E8CI)3lP;9Ocu>HWT3>@6j$TyDUIYJdX3bCO>nzS^nDtMh&y7_Yq&Qe(KCGa9>i~~udK@#WQ z*YW3|EYrl*VJ+VrB-MruU1Z#FP#*0l0V$K_$;Owi2Fo>chi-oB&>`ppdg4X?5i7_l zAngSJPGIfQVeH^ITy5?{O$W<<2hXBj%VU&U-9quBPX=6GLzlZmEV2T}j!lM3w_LU9 zo7LxM+&_f`I{)i)2k6kv06GRia4%$~7xJ+!>~ycy*BFhRyz-!pV8Aji_Q)z%bR!cQ zFzQYLuIe|dPAA5!cF41i(vzjkOBeEXEGPXF!Xvt4TiV=CV5vWKt-aXpg(H9M6sWW( zl=5I%eW5-(MRmQ>A;Wa|$Th#z$xq{7cgtuUxep4|RUJ~wkw=BI9C_ie(g*Eg-JXlb z2MXkq=IHL&uJ%R_uC^>YFUJ+Vnfk6X%;eJbUIs+s$z<*(wyYu>z@Y=XI|t)(;Wk4D zV`u-XHOxo1MsR5N%?!AZ?z>csTJ02%Y6HqED{v_yM5Wws+@Y`ab!?gnl zuQ^Pv{Ehr&kk1 z1>KQR2fvgDLHA2dr}r9PAUZGKr696p9w&Cq*L8~PbD;f_ZvMVsG+}G&WpB!?>aV`B z`+%ZX`Y02B^{8-bW7Yn)V+I{rfRrr3VY{b3Syj}QNxhV(ZdY{owZ94o zSMntRM5bf2x)J!w%5HrC_w_?vRy+00Rq0IsflQl^(g9_tA7(%R%4%b681&rF0)QPk zx?%9MF%EXu$Gh)58o%_NZ;l7|HpcUh|9JfCf8p21=I+(_^w}Sd!-MUy4V}Juw}JjU zp^yNu>9$F&DA&|KwMlXLHL3!_;Yf;Uo@rT>K(WkNBU$T{tqqk$$N3 zY-tDZ8hK3nYkHOo00ix0LekWONpJ4myUN(kLfC zYqzz9wPqTANK0HnwXp@;O=majH-5XB3jo&E_tU0V_?&TbNLk+)x}QFAd+z~7pv8qB ziJjFGXk;KAnbJR9o$F(E!CGyeH-(|WH>JE@XqQ*?@nu6UFBR|A1R!|SUCYLQkV`o$S8zCf5UN%6ZT2giYTP!Z__^+WpeC5iS$cPEd3f-_ zne9>wE47>Q*IBUS!btp`@^eDjH%hzz@Xdjw+kTW=>I1a(*n&UMCoYP|FM6y6xZY1W zj!H!OYT~peQEg}GxS@U*7+c18F8wz0$pUUlmj3{k{=a3zIL`P``IgS922N^Fll=m5 zeU~$K%>5bD8TWXs75Us(mZl-h=&=(GmR+m zLv^_O;Q#?Z=mY2tBd1INS|=cWeOSi_maoaB+1KFDN=Ik?_RHe8Z&c2)yTWDsX!%Mf z)j=@rmuut!N6=MxBNJqnd|uyFoMav5w}{8<*rKgn*Tx53Ub>QpGKxEs1GHn^uYO#$ zwI51-{;CIXr8{=Akmq`cO-6hz@8>;l$rgIa{ zQ!swf>8L*zrmWQ&A6l0SSG3zj=n(m49$2csvr8M<^lQ?VdZ{N{6I+0>FaIb zOaLk;(&FD84{VV>{hV#A-saOqHU={Hn5+=wgszJNoi7l-^AJ9u>VAJ~$G6Oo8DpPK zA6(hOq`Qz=e(IBLvyVdl>-06o9pZ9Q8fw!gWb11{)66&qohW*pJF2?G<2;XLy;eS< zCH7wQS!T;gOzzhNsyr$;+rbK|J-`^<@}3V66L&}NyRELXgFuy4gf6&%T9hK#^iR~k zw>{t7yDEEg{O0FTGV`nN$pY#r6YSMFl*DC})n2^k;#=p&F3;7|iglfCbe>E85BK-R z!QRez_~3BdKdevL`F6B^$^{PYs^j->&rdGKi?dhxWWnXt$ynRE9`Aka{qd{c|K;(^ zzx>_t-S7S4*x7~7t?EPfc8ISxZ^y0=Ikw=z?McuFw-?Xudpc?S@|J!;A0gf9EzoVS zg6b!dPvjgI@S8Y?a)P$6f3dxCjI24vBxbxrIoqM?qmJG=Gx{u`M}qc#7ZSIbroRV| z3$LYP`l~O6H}OpSTS6N@I`NsKbPCJzP1o#q8c&yB=4c=7DRak#TL9>4Xq_@C3Gm*ayEKg>hv{d@QD3C{1W=K=PsSFgt2!GrP6 zSHDWVo8w3S;-6<>%r3F-CVIWKxmWstKIP>HZ0~Au4pHh<#X#FzyGYg;>vo(I$J&4F z@9*aY4C7g2Mqw>abXs&w@&KbV$3V}d6JzyxAm_Dz1w>= zuGa5}I}5>~Nm=)fw5IRvm%r67mmTTLs86mOKV~v9Y?dtWqsgzYNW5MPB<@1w1gH~D zw|I^7l~L!<8{EW2`cY#L`E@R@-;$P3>G$XY0P(nWxA}ChSp?wo3hs;Fqv__$Tdgu| zzYnx;yIps_$+YTcwI`C3)$}YJlsw~-nI3jBA}in zHmP(xap9kI1{$B~9%8fA4k60>v{CRAKZKvz--k!i6PE-}+jskwnlD&h!;kV5mb6X? z4cI}dQ~~TWptxzv7%)44NQd#Wc*+j#PfI3HFK^PLtd@PQ56z{We0CPo-jsvot4+~Y z?5*1?pZ7&ijB~T|6=iF|p?pIdWtIyv%rD8%D1X|PIGp3RUzTO*a1Kgn8AJzCUx)`* z_Z6jhyd!<)qni#M@5_(iwY+<**r?7O2l~>93m;8$rw$)rO0xFccJ700<16hm3x-~6558yipF88>HV;}zq0 z;}f_3uNQ6bT|PJ#Yh*-QBb#)^)Mv;fkJ6s!HPPGI-_o8Hvm(n#q~v5_WnZ6Gm`U7r zXHxU{d?O57N;w-pKtpgk*Q}0^$JhsprViWn6uUep<~bS{mq+;aQ+PPWC(NA%@gpAn ztbx16)WRTe>C11>Gup~{I!z4S+6Jx1HToBwM84Ekypm38J-{!yn7%raoxF~J?IRZ2 zpT3;BZWtd^O?XLNR@Z*V=Vx56=6?%kzwqc>oOe8LT7KUZOZ{d1 zp+VYfE|)r%3y!*6$3rc*l23uO6#eaUUHDw(3zVlaS{4IG)1aPpdsJQCMH*E!y=d#gCdU&X7VJr~LD&cQ0ohO`9~;0ur>PU4L#xpt-LeWt;%?Lx0nI zKBC*tpJ{hNyf=&MAFh4)PyeSsfQ{M#pyi?gNQj0;=Nd-2c4DL+W)^bO42Y`Ztd7L( z&9aq0>40grOLqX^8fRv`XV|x$wG&)f2ylmZ8IE((InZD&sN|Ue zaPGsr!+T>6cK}Fv8<<=XSa_DK)J_H3 zqc_kl&<JF_6-DIw%Npr6@ zPkR27XTEXi?;^A#Y=({99iD<&@tKBQje%~vDIbh4K|8Q|FkJk9wIc(`-y|ZMzT!`RgC6UAw)@*G9k6p4_u;WuJT% z=+)qe`*Q+N2)*PpeHj>#SKaVlR&LB!-2|GwIS2$%sy1??vSMfC;=E;5JX4Cr_6Lld$ zANl+SmdW-lLLTrqsL4Q4KCIuo;uU|>Uvwu(jjm6b$fJ1C=kA+DlJ-N75^yP3h{!^k zEC>OwwgJhJh3&>MWjR`xlNG6teDwyReyy$rQAY3i6ZFbQ5F`bd6tPE+sino2aM6b$i8$% z+7yUybP2xs#BDg550CWs`ldVIAT4=_uXU)P|Oz$#?0Qph!P;qdHcOc2 z+J_4Oxb1@m^)mLOFNmM1iJ#sPp!!7adnCVU>uv!D{(N8EH1yVM`#$eS4x*)fa!#L8 zBtn;$bHXez{efWK=A{7KcQ2Leo&3ryuxtYg;( z9>Hs#YM*62OMLU%^!V$tN05_;dK9ko&iJu#iF^9*S=U%I9zuG?Ia79J2Nfsw7RUAA zP@C=@VWiQ#_+pZ)P0$rCv;#^bKQd6}hFI=qZGUX+yfrp=4#(!kJ#66-ZAsleXpzS7 zWqwLR``UuoQy%O;u|pT0`F-Q_6O*p;kip4T-{wEZM)Ixx^IF{JHig<{9&SLs=hEc0 zviI%N+SRFICBX;zSwuJcIZegt-D=sRx~OjIXJYf@)oa#+fAZ=Kz~@8!?nmq=@}MeL z=q@^&1;5}=9)7;fAsyCXdl`!^uYsbaE~M0nW})i{es6G0n~TMdGHS-*Hs$x+1U2#ruAzPf3me*v?~MeJIU4chHm7MaTS8k0uAtZKQg1P zOy)*r_126Jio92G5_MjZMmgn>`%Y4TyXmM;pp-;?h$_yyR%@$1SXJ6t*q{u&V;yjf ztZJ+WHz{X78QPQ&_`{d<`DQz~uCLsJm$I`DtWSD@kG{sbZ3fFaX;U=SgpPfc^{l~% z^2!gn*5|=WW;~}is$eJ zR9)igiImlSi^{h6l=X@>(jr3uoiOG%pY$uIj4`LZ`_|Dmmd z&#oWucs@IIP-m>u=F*SzOP`l<13c2sXi63Zh+o^x1Aop9X%^Hcb%Oe;9K$QVFPFeP zKyBgB&cxch3DgkUYL^ufmi^$&Gv(I{gD?Q(MIQT?d8E?1r|cO&2l`EF(*ow{dQIzd z>*HFY#W(0L8lBg+4{c_AppMk|LA~A1Tl6_!dcOU1Q>RF^l zKeI-Re2tIyp~}_eS#0NK6S~e%5lhKvVIKrNir9rF6*tL)y5dP)%cth!1I^w6OL>`1 ze)G`B78Fs=LBCYnx=wpdw6THccIizPSj>C{)~gNF_}BUp*UX9Ap9{hLGhp&n_1rx+ z%<{{45qfjz_`UCUJ@ft&CNFO;0O%bI{-x`$e)3IFwnBW=U3gOt%X+^YmTeT8$`9GB zX$v2`gEH~C{e?<_O^mbSasKl8c=hooV;!A3U~J}_|LWG|>B;!u$A3tFwLoXz84n-6 zgXX!N(trS1+E&-*0ViU@Ln_ z7rVj-*eA%dZ-z>zi=lkrmhG}5la1_DWxTPwyX(f2(RtGqmHmr#X3^0Ut@yCisG~bQ z71-OGQ1MH29+mZ&dhK5j(#rQ^8{EI4z4d_^$Ln}E?2i}mA^IP2*26jYpiIV{(BlF~ z=~K={FBtKa@w?DwJ=@Mt;is*?>d>B%5x%Jxthxh$F@f`NHsbb9`DE#f{m%Av9F;}( zc`-(w%Afj@P+uFfJJzu7qOo+#@;$yZ^z%A%m$6s*fDz>f-`phK)+-*;$HQMeho}V% z`e)`Lv0?FBH8~ zE_Zhvi8<7KM#-dbgu+@2)dEdMf>>JykBpRHyZC2y( ze1~6Lp{ew6HQzY(5)MHlufT~;Ao`fdD8#XC)JoeQ>C`T1OdT)A;1%@*~ZyxSH+ z%BurjP+xYS+_XKEEvX643a|O(ChcOIPW!7Drly_*F|maDK)b4Incucv=M}wA*~-Vg zkXrnT-v{#D?n3j|zt}Kz8cXy-a8*2!Vr0ypJGxTshJI(fQudNG%A}42)m%)U>tXi7c=-68Jp4cS z#xIV=^Jn7(oY(M56_d`Dz7~xo1IL^32k=z#nh?G`kuG^JJN5-k<-XEOadR-ft%Ql& zy7)W$tMnVRjqN8-;v&eaw#sMeoL5zcTK=uY$C|LT%he{nocPmjj=`DJ!9IXgMa zq6ruI=G*?thRU4tPy7x_@%qb)83x>0W6`p zd{2)9ZPyvcf-k-~FW{P+I#OlP5Zwl(rC^>wf6-IA<0rtE_XK?i_r)wUAC1=q00qSd zjXv`iyvh5je95VOVcN{ORO;4FR{17KBL~xiXP!%edbhGod`{$NZfOEnXej-zG9=7) zUiwellp-ldp3p!xw5;&*iw;=NEX&!%XOYU|eId^q$=_vdjGR-d)4NaiT)s*F><$1z z6HN1`!dew&V@(6#wt4E@V>uI7!ke~8=WEAW5B<$^;edtU%6x8_odFd}b$YfKXMVQ_ zMRF@cak&CVJf(bwI4~%alc8*dLUAX6%-36Dr}KewR5bzsmq2L0tAzp_r56XGF@U*f zPge$O4A5G38eGoBOGe*R$n}Dt(OHLpb(UeD<2o2TyMkb6xT?KS%i_pEBrytaNG%;w-(lfvGnV=hQ7d z(Cyngn4yngv| z{OF(klkrEt|NGy8@I9Nw>+!(*q?Z!o9Ab2YHeq9qMc*?JIhM zzwmA4Y*=_^V5%56 zCp&m;N99ubbAvr44?2-+2UUO%GQ5^{(e*@ZzF|xLUH~9JP?a*$pL6v`c_xr3kghsb zo|bD*(l(UtRz81`5xQSNPszxm_LM>5oN%ixO`#V#guXjsb6H5d3EX{RaRv0ke8h*WQahM847vALK~AUgy5Gnq|r(=+qVT zfFxJpOLO@5I`Wf0<;xL04V~gP-M+tR^{B3sSAGjhF0$a!GEoU`7ca&CvYqlt6WiPT z<&*t+KX!nQ1P^kr{t$emkt>UM_FSa2wLT8-ZI7?K`_}m8H{Kunha2PRC;w>t(l@_4 z{`_C~wej)4_|GBWJnf_IX3_+bt8FUY$U+P=OuOs{&L&wue{&sQ8L%z5#-3!~JIOa~ zw(kpdJ>@gw5BW$C`2`CAT0RXazm#u5+D2WSgJvhy^I#I3kvYHAiN5Ox+@=zbfPc3$ z$KP$0AKKU7Vv5*#67(CI04n z=BWcP@h&GotDFk}>cs2D#pP^2p>XIwyeN4!{#)aac|VZ)ZHMv$oFr%KfBWUsXZvMk zyHNBk2RP7KG9*t1!=*QwAc;;Bx1e)wDc}A=**JjAH$1UV`}7<6)n~(x?^F88o6XI8 zV{`u@=ZEMnhsyCywD0l9R9ej54qe z^RzxuV`OMq>$FVbbnkgCJ@Yx)vOj`p)hjrp!KMO9@+u$Otk6d-lg2N;25`42Gq|N* z?1F1QF962S=WYv5TpJ(y2@@X_iQ7e&#x+i$=)YVvp1^Ykn9^@d z*EX#2kI?5r46p5*J64iBkN)fj7Rnq6`PM_9d%Rkl)Wni+Rso@vyWMUsO17R*qHNK_ z)Qe6B&$9pF9wplkDYx)p6V3M7fQOAu$^y!`e7bK8D|F>g{ltcTCQV*S&eN|W9P@9W z-xasied(xrxTc>|za zH2)HpL4$eZFB4?aFK_b=!ji$1yS5EPmYiE(M59c|SF}{U%E~nJm}Z&&r43S_Z38~- z!b&W~y>8EpHF6C-l7mAcdWjZ@(lD%G0*7O_Yy37ySZqMBE-ICVv;%3QTUk?~} zlukl_biy`JF0;=DfACB`w704Y|30g%d^H|$mknt%i`UwK6X3>3$;a5zhyMD#T+=@} zf06(Rh9h&p?fkR-Cv^0u0UT>=>}=)jUU#r@?3D%B*pShqH79H`xi+kQk}DK6&%63@bTjjutZe#l!M0GT%BWWO9CTxYTO zEp}?$x$dWO$nVZkZu5QPq5%8qO>~g_E+%T--coMS$IUFjif*Ls;0gZaHM_IuZ(YP2 zzl;B(U-qGmK7T>~(Kx-gyRnmpn>)t34-Ur;G@KLrEGX+I0$#p;Mw6_M*GDfHqaSB$ z-|v3+7ss#v+OLi8fB#p;`(J$s?LP36W_9#B^FO{p?F9TSc&G_MqCe6b-=~dLe^6VM zm%ipc5HCujn9n@gq75={C#_DG-U|STe_TN0+_ru~d(eh-rCW@N3~7~5=~(sg#-HkT$|G-W zvaXfmY`>}Wj@dc?#MAoY&?uk9 z2g89J^_SLfT$%i&S-%i{uuR4|B^U8kjN$y2ye8x9L+fW99djB-OJD!=_xeY|+2~_^ zK1LtvgTGqrnKv#;wocLIIDJ(0jTG!-9;AYE?nvAB>%k=1v{UV6wrls-OMCU2{KjkP z`^+bQ(=H+-eL*`zbSQRYa4`Lz^;z!{mU*nJ+N1DPUU60YTKM%fdEgzGvUdCzUsG}} zx~0i9!1-VK6sffO!Xo#&9wGO0EEzWhzx8^qtj!l*s@*JayNN5;^ISPs8<0Vncoc`_ z89$d^*cX=`sS}xx=r%Dsv__OY-dNQ0s^tQPr5sEH7e{&2CwbqjiLGXTFFn)%zKPe8 zv;NXQZL-=x8|rvIdW%fFXMC3hD6?-@$Fn;Vb@zhd@I$@UMYZ9zKc{hd#(0r9VPhMg z0M6RkXruanQbLP9mHe7x$h6Irp?2lvJ`zT)uy;ZA{A8O@? zcA_5$KC)EZB?tKl9`8k7>Zv&OX&|t^NspMjkDiAK>S5DbepFE3Qir&Si!|5rW#)qh zz;n~AEA==pMVj$_>~8dqD2*${a<8tduBD%v@rLw796pZy+a=h|bnQ}x-F%dYUGdB6G0(eZ1Q`{CLL z|K&gU0RsE2qyRQRD!6m4VRgePTq|gYYy)QXv6 zM8SD>W*YbwYvY1^moh!yfNv)3m<@!aLnYrFzRL$a(5usO@FLCC zFt$($Tm?3Nl!E)Aoon-2SA=e-Np?h+#*h{Vwh^)pXzFCiV=AhS7U)L0l0G1xR=_Fe z9K7DP9IRLQ`2e|Ttp|H$fI=yh4Oo*^-M}8R0*t{~8X3G@xA&cw6eR6#nXy*ikoG>Q z&{q?Gb)Q~$p?169+G@JRWjL4&f(w?(HB+G)l<(V?XSQ%1HEKjmDyLp|IE8`pO1-t$PBdXu&+ol?q0 zYtfzUGp%RREUoetd4tjP+F4@C5l9;J`qqsdcop@JE=r0}(TCxQ!6e5w?20bvS@*7*| zzw&@DCy#9VOmM)oTh^vs)KBhJn=6mfb(vfEX5^HuWt^zi8w&ao(|Wb(2J&$6f!i}` z9}XfJK2^I=Pti?z_;6t0#FOCLY?&mY&Rzf@h|`G}q4iN(>we2YoAp}ylv7OzcEH;! z8ZF{czn%N&fbz*hV{O2^AalyQ>yM1#wFjP-6TGbq%$G93UuAUKo;x|EIXIDR`5#CeXQ002i2Y=zR8L!8>sa;?`D^Y|_3d$lA zfF?n6^5L(=ug+9m)qe)=)Rpdu{QhipZ794s0c5+7R?sw;Ote=eV47t~NZItufcoRT zZrAR=Z0iK4UrVvz_aQ;>&BV||zNS-HE(GA1?X~B5J^?{l2R7P)lZR=aDQD%E zI!H5r%A-i?mvRnGvX35mFJ6~p*_v&`J!q(M9Kl!g7k$xF?e$I@*ThXtOk8FFfDX_u zWG-KU9VCD7uEmD$9X=U{d%NSo!-Me)zwjRC+VIKv6|M=dboJ9+a7KtRpgaLW!d`lLctxU1w0J{z#Yi zq_f*ifTDSRmk#A&yK{=Z+D-v&tFv|59eFgK&^W(Azmmf|D(|~hHOzKt-i-gGBk7cB zdR7Ynyq>a5y(|2}?QLyMY`;_>4jbl;%FQm14YM6@H%^O>pi^SV(c77b0dMFkA7d8t z_FHR7!+&P+lW*qG_vJyllcAZ>kbtnVC-?ft{CwR?Z8?H*9lNj^YZzP`W)<&`vb z#(J@7M`u~&c6R22;R^=MCtTM^LD%C9Pqwl8z)&e)~o#m)7wA!hLpUzqkt30`is!WwS9fY zL(pjD`lTBeC33Aj#aH9M%zt%V1MIx;iTr&4o;N&@;l<@?zGY~+EU?z^(Jt=7aCUMI zZenRCX=qRVwE1i|V;|!us34q!7iEAyRSkUbpYaEN*#*4apXo0*FpT=7Rmp)0z|s9m z$v(PxRJ&qUQ_oCW{n{^K8!anot0>^7$w(sJOGp<59yVM^2_C*5yp&jQZXAJEH>GPPgc#$Ig;C*Q40Swt*oV)#@b;c4oiXI@j*qy2LE8S+zS z_==yZyan@I+skR6Ok?~EEHixp+|A#4?&kIW7vb!>nugZMfF9Lk{@SiQ9Ah=tbEb6~>>X9q|d#eIT)viZT1G z@H72U@q+)ckECq;2E>@fafV?2;=9UQ#yWfnA@eqC^_J{Zf1no8>VGX~-imb!kGdad zdCvzLUL_uaTUj7$@`%@R{EgoG;7B#q-#fX!cGC;+sEdEpQkOYb2J(o#DR0MIw8vT& zAKEYYUHpTwx4%#Py&ImCeZEb-vop4Lcj<>*pl*E~16GU}nb42cx&4K7F}gtq6t{2E zDQ9>5u$=xm@y%w%)asYAxjL&DR9-zgAzA$lHtrZ_&h^-)$#=U+(adU5$NJdcg+J;} z45r{F*zT6kLw(V6oe6&Mx*J~Vq7PhLxZ|Y{ukh#5NAyYDp_#If(qrYt^#9klV z@9mF=k7^t9TgFwVes}%o^*BC$Jzl(eK8{ab(RaDy)$aKAFMehGt-tj*$6x%5zdpYG zt*?)*1LtjwooGKBbnaxMuXUn5_4@|4;}UEkI~9-?$R`HMkGcR=S=iL2Cs41Pft5VU zF4`6=4%Nxsj)m*x8sI`2TlZTUB7i@a$+S^`s3f4HL!lI&L7 zYs4wz=+$`f;RoaD^wqdQkDovLcpUCv2lSgi`S7Fh>e=h@_|ZG#{=*03lb?P#{`i9* zjkn%Ra_sJH zPUSQ+M?*z0J42PtZSQoJN|?%mu-M{23P+H#qmCkKDy~ z%1^cphh}}Q{QEs{)Aaqj2m2|%kwru`-!A>eU-BiM&?1f6lYSFg5~J{oJR<|-gAPcS zr(EQ`#*JK6VxaT;v#)IbXuoWk9#6ZYQm5M=)b6JELG&ZO&OQ+tUFjR)U)lJ}Tn9%N z0N5`{dtHMI6kG-hw-=R1drkRv>-_)=QQO2slvO6NnHjn~K-={K@4`y~N-X(KxygIm zzk!7?ZKv{b4*iK2Tb(52ex1#DaizOi2ZY_EvNoN2pVz+g8R;py>r`(KT${{1rQizv_#v%x03jj{g@^ z&panLI!Jx!pw}Djm`G-IGja!C`3mH%Or-(2cPtnlIXbtLd*sVlo4$6zxZG{*H+NiA zSo5;(aHb!*k{0kfkMBH}c4~V|Qg*$geQa*WrU&=$M~{4A@6o&Ojlc1?e|J3i{;!Ym z^w~Ja2TO;aUh$|-h|f9je5!6a9}Im80=&{J-wkPBKoxU-q3xmcq3k7!s?H#rmPU1fP0DZ1M_!e!{q!AAQ2nF=JuI zzwSir+y5^5QuU-MdIhaTuR6nD(zQ)#vNrvIw=JU&@F-Ze{fe^Bb;iJEsCXMqOCHC@ zwG$Hg^zF8@N9xMhQ(8)>Q$NH9rr=XPl(l`AG;^d5#i?C;ujr5u`?%uC>oxpm0D{C> zdHKaAWJt-DkC@qj_Ql*D^#ZjztrnOciaF7LkWa$^>^TK($NP~Vy!7~?{ZOTw7Y2|00hLV5Ds^qQkOZU8PT0$SZ^tr9CKS&BgpF9|vX9g$AhhDxA4jetMP{B&rp1oFXcI$Iog9{kVF zx3)j=fm@}iKt3Q`TsZ+&6X`C-b1d$g;ZBT&iOGc3p&DGhL!qo(W>D5uY1x!e!jy%Yo{C1tH zSSIcbQuX}OP3p$&ZrI9w=+u1Iww#2ll{Now_WmqL*X_LT!&VPp&%Jw(XTmuLI0T48 z2<9MCAjKIRC8|uRB-s*WsU+FfkfbUeqEze)U*#&fNL6lfp9>|aa=FS;DVAeJk||QA zM1Uel3?zu*05~|)o_EjF*WLO3{?A%p_uk+kL@BP^Xzl&=`@U;<*0Y{zJ!^Q^J7gh_ zCnaXbwo;i#{x3~Dg3fj>WMR{*15%H)Tx6g(;}^nOPPIqQg9A;G7iXu`&!aGHZ)D`u zWBWWo0y#OL^vR@6PpB7Tap8amdQoGwttvq2RKB_=ntlc+fc7X4wmJ1f2lJ1&>s|b> zXO*dF7TO8Moqg64g;ZALIe`IK>g89wQKq1iR~k}BxOgqu(Lu5(sT5W=>QVt?occky z#|5t;>+&VInb_;^c#5O>_y+8*hHIJ$dp3IXiC;cb>F|@4VIC_`Tn4kM7-X z>o^_Px2W&XH{;+RRVt&CiMZ;xOMmWhD1#3ngAjNi3EV89Ou5^)^zk%Ca(niE9^YcEP@)V~T|rA^6C#n&LUY)|Qi^h*7- z+pBHTAZTB#Cv*T8?@f2oEcvKAI)yygEHOL>m9g8uev-BJkMr2tDsc*=xMLR z-w2?<4@Z2~k(oB+ytms^Hw1;FAUZP`ed1)mvrXD*t2b`oK4_aOaxX0LHji;bUkhJ? zpC++9@@0DIjAEvS?AY&xKGp+H<7a(N($+JX&C~6QPOY}y@8KFRe<+>-apal$j0+toGkY^qIN&qt zOS`-M2=1g+yG9*lKa)qW$eHQxv>h7&OdmT=#no%z3$NSZ*+5ZR5KIfs$btH8IcZ@% zH8Eo-+Ac1+>83vaar@RQ?Z%C*cIVc$_S~%vyQzwz6cm_~@~yK>@t80l*Y&>8MLpNc-iNc{;tNQyf~4fHd9bT!*|CtPv{B zo019ZFH-b;#2Fvj5^TR=lp8W(7*~i+e3z>pM%_i%nrODqu^wp_-NjaAf*koY+0y_r z;EqpG1HvrmqFgha^17s9Tni5!<*Q+a+m$eIGDH`zWbp)u84VKpJ^5pAo(<5iLZ{={ zz!7ODTuFa6%;ek9@o@t{QLx+kS$xm;R6vet@8w1jev1d-N!*a*K0&1aV0u20(EEHh z1q7<}HM?M%o9pV0|EZ@VEw0d?E+OOV_-9*fer1!sk3p9Fus&^`_6S%x-u;~ZR3XLR zJYztAcH~>*M{R%ih{5u4zIU2UyO}W2zSO4&>?5V$^tAAf{iPj9PVn{3&Cw4k4=!BW z26fYG>;wLR@`ClOXFyduwJ{a+IfO?~OMEgUU-H-%@_4V`t<0Zf(zik_tm`LWujt3@ zTjS4@U)hKr!zZ7IP(3Su1sn1(j-dtImt7I23xkUbs~NMnP;Z=*4>i^~E`0!=pF%J# zu=q(MugCZ@CmyXR1>B4Z8q)p{zD$pg;0?YoIP0I;Hu;fn7E;d1Stoj&0McetG06XSwc@&f}L3Qo1jI75vYN=_Z?)@CShLoVW91(tBENSP4t z!b`tdy)(F>Mmb5HfDZq(pQ1VZP3(f-+&Ab0#9!dq3GsX?=#07@vkZPf^P+=!DL(pJ z^5-g?ezp1OkA^KGn5L}G@{Bg~EPnDYY1Ctyc1zk={*u;2FjdSKX9DvJ0*YGC=YEfw!=C) zi>Z;c&im2+q9u(ZQ~ar(*Jk?3n9vIvDm~gMg-ITQUSJyrUt&)QzBTV6*4a4&Y{THA zr^S%(jOWlvSl~DU5}))z*SS`2#txaZ&eTI&g*SLh?u@gfW9Czg_76p*wYLWEDOnF( zV^eKXZ-?@%y-fk>>cXLun9?kS5eMQHm$UFSnP8*ot0p-?V2$pPc=}2=d)P;1GTTKl zWXtyC4c5qZ9S!dMl{mSHI-jcPK3?HNzB3oc>L~F+7a##o9~t!gV}APmERG>9!pl4g z=6DgiAIk>r(#J46IT8D&tSJxL6;s{Rl%ts$uZCw%9NUNStUTL4XWR(XOgK|^5#DI$ z)d}HhLw)n@jK1mw{yWCIwzb~Y{PY5Ov`O|m2Zwv~H@h(pN5?zaa5h=FXt(aHx9|Jj z?{DA#N58kd_K}xDLw&-$nFwu{m*`p4Kj%>5H{dhG4wb$n;CF#--O^6W`cjYlqgf?e z#HaE;b~70NRQP@UH-1+Zx+7;Jbh{wW3YTxq_bqii6-N0U{_O^Ue*Ja?DpPh$Qo@&vD_Sr9>!-leV#H;6X^%)ZNR=soK zUEJi0;{rFTR?vP!LE__kW3PrTi?8=J-_`q2sxoc?`&V!1qr+~>v4u-d(*~)?m8~+S8HlMvHkDkf76P-$)sGw*Piq-svoTvOzX|N!jz!r}^Si zp5c>N7@wCMG(x6BNMZO3Jm+`x4D4@c$6P+aW;{R5SJzMZZ90ggtfBBjUxA(c#`k`7 z7P$8*_4mGmjNFq?n;^^t%h$bmpMpUc&(b;hdUq(>s@8--(EKH4CfT!9v`j-f+vAPOP`gmP7Ds94 zPu!FX+f8J>5IqOEH?p))rc<~F55L|GD6y5-R1$jV+}W0`J8bF^?I-VUeaXV4FG?$J^^51 zm5U(LMNULf^|04M+3kg~6eW?=aL&7cY=?_r4k4f76r|J5faI+CtzqvSoNAn=r*zm@ zoO7JloB)7y=i8nRUMU~@H5y1-c0AH?dIb2@kVc=?1d#RB#7n<|cMP$H6^>YM7$S4J zBUZ{%M3QmVdv&4Dx-B9Fs0<@0Wkn1kYizh5Z)%NDZ!C!?OeNhS!XMun5+@MIeL8d< zir&te2rdJuVe^_cgb*%ZmO|QI-76?bo?4L9xvWXD8f5F_jkek7njq0xwJCM)_2SGo z!Yqa2Q^!!eI5h!2+EGV4vH5nH$EUg>;QY|RHx8~7UZHylbKzQTybJ~3wj{nA2I-Rx zEyT?&k_Vim2jt_bqIBhQL5`5)M!T#84tW5TAP$jpI)o^Fe%#w8f3&4`C8{&RP!I;L zJ?IPxQ{^NJRi}P`8rWOl@%3$hb9|w3|*(M+Ko}a)k@NlEYVom@s zvu|N6X9K|b;>+!)fBw(5 zHY>}vX$n83y91^S2!X54!6l6VJhJ;!*USb#3Fq^BIjT`Q`?I|GiG#nvZ{OOmUY(QW z)wSgH=%M-ZjZ8o(U*Qupx11=)SgRa5g9OV&x53werue$EvEpkYAof9d?v9oYg6ZmH z?4Pm^@S5lc*J(MG)>oS)3XdE`7r5onT9@&{F$l_ZWi*DtJAkP7u}tL8z>hfdsW@p5 z{0=_D7C=4oC~t;gv&`dlg!D1Nt;$~wmY;>0Jq91`skf9H^vxsr2Y)WhYL9BbQTUX7 z2##cNLJtU}@k?{7oS(p`AbjGq=kUP3MYA2dN0_z6cK2tR!aW1fERGiatW0|AK-kXW zSd5Q)wM*93@F*53)u#SKvEBfWan(nGJ1YJp!` zmYuFpFmeJw2+^PAWkT^qSXV>k5e9W9>lOKs?=<)_Pp5s|^(!SqbLd8zzL&m-VOFl= zVtnZ=`H3?~&)M<+*qod~Lmmg7=Jk_IHpe&V48jRLy1uJBB`@A{qmK3_hBh6St6|_| z5a-HY&+U9YFfTV4^mwnmzx#v2oZlArMx+z^_4mQUFmSybxW4}h1lB*x-^17d5c$xC zEbhZcF;JJxEWPaqxWCYrY9H-&v?S8X1se2JtkV&ZpFv#noBm5%ZL)f^O;)zr-0~Kn zd^??d>b^6Sj{Lb^zzf6C!EW0>-fgG!L(`+NuA5`D; z#HW`m(ueX>5VQZ9$-IRX&|C`G&>ttNl85#r3-wuUaiZTV%_S3cQio-1pLW7J3U>D$ z$fqoivCQ0si$soPT&Q=`geN8(rdGf%)-d;P{?_;Ar5$VK) zpH}c~|JXkJQa1#U$DfnL;$v6>KQ|@W4!mW2W&0i)2L4$b$DU?GV>*NC^P+E8>{P~F zQVzc)wCWR>-pg;J45jboZyUUqK4Y`-@Hg{WExDF^_-LJm3w%fh3i`(*O&k{IeA7*w z%HME9&rrqDPJ?wtFrw^S6n)^h0Yh92(#tp(-1Vc3A8l6d20Zfxf9hMFEQdxZM_w19 z>_Z~oSu8C-LAnh8z+nEsL;hWm^IO64$Na`yherZik8kEm7bk6kBfPP^_l|d5)U)k1 zmXNl>vMqq0`F~ZPxG3A>Gjie?mM;~5PTdd!9&W~t^g!_7kePM4i2hM5Q#uGTI z{l2}BPv)5e`km3&Pw8(7WmIQn&Q8ClC#%w8SUApETwx)yqsT8`>7Mm=N(O>5Ss8#!7AE?ulNpx-%-P78)M0Z@ecNyf*;1T?f;ifKeb|Ei6 z{JE9od_R9>d9&NIr7XxTVprV&u;RiNbdVMeB!4B4j5k_w$tfh z+u41HZJ~cV*=u|F4Ue`Tw6)EZ_K^>Mu>HhO{#g5gANaoZvG0DpEqa`~6Y$yx`x5Cu z88^zRwsW*AzULx7gq!D)$F!HY_Uyj^a;HH>msWsE@oUtz@eMqH`q_YDKD`J=kd@O<|Aexbwv7XR}PS^u}o zHwz=*v-wL#-#=XD54ls{h$ABOmak>wGJZ$K{XD2TJ+G`_$1iptx5xM1ZHL=W@Kq1d zS^B!8llG0Te-r=cq+Q$Cq|aM!Z@&3vd@eTtIM(w8FZJlX2X~`y&h4ykUZY-mo`b`7 zv|kHY>V`TYF6mP#r@l`p>q$)~(PM0YdZXQ?U(uJFETN}M*wX$2mx`J>5*m|dn}lo$ z(|G%xWAN4o6tBgF4dU(3`uvmqls$(>cW{?JLET8b=uW>MKB#e{9@|^`VlM$4bkCP? ztT%Juz;}%0%O3Wn>fX|%PO#G75bo$RCeYH)PzhT)EG}c4Xuk{n_J?LQoYF5Yt}f*h zD%R&I0nt@(55C+RFI$5Tw#icnYAKES)#gDPS)fba)f$iIw_B`Y>BtJMf#x8r* z6Z#2omPtA4tTF!J-({PgvN!&OsbMC6=Bmgmjgo$)T!qq;t08@wjV6R9&|!?BpBhU` zJJKcC`5b~#)zsy%z4fR$Wz97Z2(3$iOgDcXT z^!O4d>r~+wthXJbFR$wO{=&@=8Bd&W?C-=L`IuB6o4&wco4efE9@&s~87Hj?;xcgb z1*nQ7J@ZeU9_YV$q+sPO{D-cMK0BWXKqe#G$k51l8H^J~>8bN`hO#yIX;b;GuV3_+ z?$SaynK#lF;&)r#I@Dd~$}L~|q|Bws(Qiscw}i8xt}>3V6B`#h-+>K`3ASQs^;$L) zEMhBf-+8h9+%Nu_w*1lu8%8nq%1@n$i++VKOKKa!Gh`xhh~C15>qmy7i~O&4)U%LAA@yruhuw z0bAgFXsz8<7^qh0^3icHbdySiFJK2w(V5B$vUz1Pf-TzR%*d>UTKpnihEG;uP{2H7ru2IGr2!YO>+BrFwVie z_4{ndkq8>@v>7}c^kYzy2YyoBkMw(TzPO9!1)cY4n+9B22t16~#0#ZaB; zIJ-Pshv2mZg+mFc&*A%YISatzARncPZFHv!nT7*TLDOK`Sb7qyric>;zLM-x6cDJj3Y zY>YPuby(F~U8L-i#4g!x=@MCGpb?!Xm(LE$ioeD4O#av|rG)3|@JO3Gh@9JLgAmYSYscyLvj^%l*|2 z0J&1gyyBn}RRgZ-3^U+_9xhh6fMS~UfimeuN7IcD4%u`;B0&dK8c06}R}Rw3;jDHm zQ$;uBOPXje#GO+9>GJ7YDUr)_-(W4eiaOF-HUrq>YFn;4FoGFrS3{ggcoF-BgpBmk z4e=`&262__UPqYSBe&8_{H-TCjSkih%i_~anfe5@m=&(-8hplBUWILT+c$8k0i~rF zWE(|aBW2w=IodXK*ASS}q4U=e2P_kc$Hdo3)k2thyfc1(Ow92Uk-7&SS zVc_gFxM~Z8LA~M=9q;{v{obDas(|ahqN_X7=AbjO60lw9dgy7 zbIpwuPudH&m)gU}Uu?H-9k-wSi63mc_up)X``9UTE#G(6pb^B!2@LsH&UW$27ZL29qdKNoY=B~b)o{r`-^G%rP&lKu0C@u{!14p^6*KC zPa9&>_3e8;bsQlTe)N~;^bN`Z#JF%##5dTGgXvlI8~N@jK)hE%Nd#_{9rEKZ{WF6_ z{X%K2{@V{pUPWChdoLS2k;qfW!W&Yi(4}k`F!6CpHcLmmj<>+fclmR%u#OeYL?ZU= z=CK3H}mx`j-hw_EAaR95bx!$BITweibZzvDJuDWR^ynQss?_G z$&1yokEB77jZOm>fL12Hv8o?X3%?mWn+JUwrl#80c?68Wh7ODLZ%9cND_!6PMPv$D z5V<^%oqB{r&E3Wp{NREdXoQ@U9XRE5CiLHc1n?I<=UG|MInrhcS)9~)Jh6gjUi}46 zqA&;YV}05)gL&+)##<4Z;lRHrn?C#wgXQDf@EkcJf9Y7&TXD6Jy+l3Y?XwF9)12s} z{FT3GIop4+sH~JsDm&utg5(80@Dg>4U*rm3xaeK{)Q8Z0@O#;7%k(kOO1ymoc`1IT z{n2tZQ)hz-df=zIBB{~{xRL9NqOn2P;f3_WFSTjqLpcd}l5+NS;Rif2uNzJbfn`?Y zyzrzf_!#F^9tC&qt=oIA`WMoz`hB%)$Wu*3T3)(%pN;C&U3gngyswlY4A^*~R7w4U zf_=dWpQPS=qzrhP*NGwL53E0a8F{S3PmnprCu4_Ob;$=O_RObm>n3M)*ArESZ&%-P z7IwY$yhMMNHg>r=fuG{&F{t_wC`@zRBvoLyata}74$tQBhnK|-{dPkLsmbRisKBJi z88Vb_irT;Nb78lREG92B(+x0L2?@=#v$Q{MlYvCYJqt?P^sMZ4#)w@H>x!%jD?BER za#P!*{90dZH~q?_?2H^2&txR%JBW*TNdG)bZ|uV*57m!a@F{;xy&+G^0+DjHxKi>^ z^+*Y_1H3lIQp{l$XwAjox2eH^$)E-k}Py@PkfZ_?2hLpCeKrVAIkSPeGHHXEn^ zhq6%2HWGZ3$T5>N&Z2n9XJm~&v^IvzV`;S5R@XN2rfc;vWeYtze0ljs{GQkXEXH(t zgzkANU)L{aS}#K|egv<4PQu*MlxI6j^ZB!GH+8uQz(7peUsUSzJ+yH)pF=(uFtjJ$ z)8NoXnTdTcMxE(;39#~)!82A!Ixid}(yScfU4Fb+%9q!e< zj&c}50K%LHd$uOgWb~Bc!aZMG@k0NIBHk@6w{KM z4S7~C%$pZYCN+K$wsESd{ZrY!>bs-6grbl7QntmbFXTwa5m$39)gR+A!RDfC!9X^! z)8-lD%%w?v(_g>TJk~8NH$}y#1y^m?(yE&kpapgzcxpOn^9lW?Z!0d)7Hx{Q&+^(& z^Etwy3SM2){}RkILX|IhjqDQUdjG2)>6>a>4dxqdxf%wpqB!7O z%{S709`}X;FP&fyb!Wc`k9l~ut`SlWV_NeN;%9xr@;B&Ia$-Mh`@;*tnKs~RkYhKL zFUrv$2r#UyMVurZ)baa_B!xg%sq9`5FjVs-+04b{PiDdbD;QHO4!yr<+k^m1j_}79v7LW zT~lN{{cq~7JmdcAvty)`r)`F`)%JHf`8?)Hg==2>;-U%nA+I#zim#*ooVk(6wh&4m zO!uWseSbNFPe*@0SzQ6o4$D~bOt62G6>QJqO1p9Mx%Th=xqq+CVHa)x_Twk`dHPI6 zZ`X6&w37gxj8~sxo7#HXX}OUez7#QYKZ5x#pXE4V(bIv*M+2QR9sN?_Z+ZEpyjA%D z18Vjf;byRk(jTctA+CaLpOC)r>2^z> zAw$MxJV7X537!>Ii$?y?ljt6_F-7|nS+WhHqyLhK!p+TXhB59a{7ZIHhWbpaHd(3b zJwmQ@{c8R1%a zG2UbcZ4|DUDW8OE2V8j2xc(NMD1{gzJ|k=)s^euaE)552)md)CvF&6sKX}cPpdqlHyq(N{?-zlQqw9o6eNJvHV89JJ#!GmGb_#xb zKOGVHS&lyuM)3>}41o_$XKPUg7l>Sd_H8)ls$6t)Bf$BwOkgkoT|BNtxiI(^TXv4A zIMxBhDpb19MHjE)prUd`jt%u@y|l0V@+Wex(@9!6t(G;-Kjk1Bmo{;$qcDw=QtgZm zg{DMdSiELIjChqVa7;>u9u}cDx)dWVzW#a>tS&sYjSiM6Cf^Lhpt*y}GUoNF9TN9p z_~KNMHTdxxIC#x5XBcpIBa3@2BG*KavLR2@4}87$S~#%`dD|$D}Q~Pn@ZizI z_U?nnZI5~%J>G8j81VU4#qWLT3+;2i{_E}Kn_KNeue{WbclO)FPXGAOp<*W2?GQZC zU`pLmMp}3TqyrSi&Pl*C=$N-CzF zHNSY6c-u>2?lYl8Yr+TWG(PsS!ySwDZhDver}03qs+s_>K)O z^^ad$A$@$7OoLg92t6NC7ulpPIXZ}3TRRc4jmmsXFCqbJc3^!(VyER~yPE_6XT%BjZ zul5**jgrRR>sVSggJa8@J@hfHHUNya8fS41 zNiM;)Xd}OrZTndl#{5=n-l*kgzvhIkd2igj-Clg@<#uiB18p+*5;aX3+M>h< zNFZzBPP!EE6r_=}+FI=z_Tt$P8iXX)BFcdzrpoxXAE03BX{waI1a@A%2~Y2|OqMnK1CGw2pT# zchM(cXXS&XRki}KG607VbKv#l)USjWf%?dWn> z^s?`DQFWS05Z?edzx12-x1Y^#s7b$Sh;W?DtbWnB`S>tLhfiveg!1}2egklCXP36s z+xy$wyOq!5XZQD;-$L}w&Qli;=|4{n>PT2&YkLpLS8wU-Lyy6j`T=YDK(44{f9QmB z`Df~i*Sxhr0P{TStNZC6Lv$>6!s_jU&%&~VA{ZK4nZiuHK96xi;<;MCn>Uvx^ZCwn zXiL4ZQ~IOHR}&uM8{6McRtoxrQNR907Gy~4YVb?EQaf}A0=2PYR0vf0fxl1D$T`ak?WV5^Uo_g*>}#zbD^OFuAu z7HEMZVv_o81I0)$oD?PhWKkafK0K9nKA+d3hGLbx6dbaV#c$%ywI}c`ey{em^Tn}2 zO(03VqL&j$*&qadSv0L!Qn4TK*Q0E8jY41a{QSh@iOaW)-V?8#k;g7-_?(kTwDlml zL}4dTwJ*V+I=uRs4s~IAaunOA{N#vZ9v(f}$tD1glt02&kej0g(h4RfeIOT^YsTr(G z+DfDJ3G$2}7WzN-3(8&6`%l@-mAo!E3*UALf7nHNQ75IGMoz@ZrY`EYQGdxYG@I(?_D_(?a?)K541_%^+7%Bm-hv6UBXd1T)JqD%AAS>F#` zBPwkKrL~pHE6q>oSJXZ6zv$zg3v8WpP{vDtPt)`iiEK^X->&&p5P7s*aUm*2~92`hOE z$nO(IxqcmR&ddM2488of)boD9e&=fFGBUe>ww>n@couB zWPOIm<+Zpmp2?~$4{u9e@nql!<*`xGjuYnw&BD{p!}j?8y*9-c!|FsY-g@(!ZTIm` zP662B_nFWBcH7(DX}53QN#D15Z5`g8w71`R4;47d27qhVu4Mt}@x$>+x57u?(!MsI zR>E$%>BnbJs&0 zwTR!IxiWOzw0gn9{y}|LSDi<#jcJDHGi7t_up+aa-saWT+9qijpBpL`9>|*gH)&O8 z%O?@MIfBO8)A$dRSzcvagS|MSjn=h@{*Da*;sQ0WInHs~S5YtdoddUz4~_8Sa{>x! zF{R`iKB(;!2jjAN5B#rw>dc=uIrY+>*kGGSoc*8jV_zp8)6?$rh=+AxyHdXM)7IzB ze%e@ev~bBYsoGQT4PIRUj2)_|-m<7yb6>b1W3$+P^$ukmac_u}XMv(F=p`|x-c z-|N-zd*PZi#))Y!G|B?#ER3=xSIJMdkR}u)%`*5YX%l)HJmiCQVGA=qMSRljbLz~q>eC_&;s(toKJ*lbTixA&qQeeTf%R%jd&@htm%VbDC<&E%{=&Rq58 zhHZnYxmuTbi#Ng{kMQ8LG6ZG8>4Z3SyEXuvUF^2w!`cAgru^fR-JAe$uT_#M=xj(SLXxHPO44DW#gA? zUbeHyIBT<^;MFxcMIGAqtCZ94NhuIyRN3#z-*rU(jIKHHXJh`%XMgZAbz|>{yV^Gi zZ-zskv$~i+j^A0lR<7w=NE#;Z1FH**lpNjF|D~}!jl~8l##D(_c z1zF+PsElnAO1})|k>s>0NJIa3A>a*xL|!CJeYUTR(+ofi>{#S7j!+ zl9znrK4o$(ysam^1cq~Jh9PIpv-y(3)eyVG)$|btfBY4$T^3Vs(bs!4zmiV?m?5D9iO>h3QPD{ltF|gk_2d4$7O;kq9)do_czJJp1f8F84kGI$01kk^NZanMv!wQS2hNC7rava49^k5Q+eYKKtetzfg|T+ zk&8gEllDUUGs;YH@Tj*Yp`{sd&^r78r_11dudv5IwE=)6+nspZ3UZM%(_y;ksgOli z;mD{eFC696H>A?pbefH~7-X(I%OYIj$lI?plMYqbJO}r}P5AQ3t8a0c*SZQX=nV+z z7kv7{rwgE_TPE-rfDRF=a+IHxgYWLI3!M_D`5c$?3`W?eEwFuG8C*TYq-qc;($(;gmGDJ(PKG5EBYOH+P>0Fe+J{! z*QCc0Gp;7Y@~yz=Os^lCZT`r6$Kmo?w9~H32e8U>;By`K*6+2-RGfBOOIRBa#LGcE zX=il=ocStrKBMDMZl-hE{;Ti}RR(y-z18Um#3oZ#Jyb_$^;?dt^5ZH`J#}QHsdu)~ zZ$i&DNC?}0k8)`jRg#M`m-xDH42Zxen6|&1Kg zX&<+ZYnu#=oDjO%Ui*%ZBGWFOz65=a;O4ZwaC@$O>C3+gi|@7n&ENUUSgif_`0kgn z-Ny{Hb~Ax9al;k51=g_xo&X@8gKttGeM^5Msqoa+n8&u!9vVD$=1RZO;>v4ZU^BZM z5k_0DhRP~kE8pNVX-pD|hQ zvx_zk4CQNVDt5#E+P058hQgulgO|RZbm?~XYDjwLyRAtdiVOv-*&A3DqMMkhyrAlXdW@WBwNxH!<7mZGP!myMVr#Ji})?J;1I5Q#!kG(*E$|@SyGO z9<`mF9V@}F!Y+W1UBP6sjvn(=uvTiK+4ax&wL0ls>E*7Yp+ z+B|ICRJ{cE>0uqiFYe;yLcWu}E(RTvw!6FCo;-Lj3n0?Oh2x_GKZS6RNf7;oqa*rZ zb#;8gE0eqz4RKy>Xbei)QOPEwxhSg(ASoCH7Ou_w9`&pNVA%x z5{7;6u*Wkx&O+%YNx&|Bw@#nqbDK5#r^TwP`E!L%6XZ=_O?`HW+*RU0)Xo4%Od%?WhxPmhszWKO$kkbdHy1z*}It*j@u7N(nCa-%*J9b%7x z>CbQb4thpDsMvVr$mg*MP*^2*rb(&dLq3b994aq7y8%F0RcFyZi`ubajzdZxrJc_R zMDiLhY&SqDtKJVhuHrZ1)qAgb!#VP19%ULk2moOO5BRITI(8YK2T}&jUu`V7Gr3xD z98qmrS*%4y(_FA|qkwsw9K2Y-w*Zvm6E}6_^LHR3|DN2v-;N(`xAUF7*0vAYIx@4Y z&7kZ;e>2DzB91A03@({eCo-EMyMGwkMSp2S{5WJ%@`}6t9YK4ltqz^k8S{`M zf683pPwb`i;|i&&%>hL{z;pxOV(c-pLW$H7zDdtX`UGh(#vRcgu7M9;=`-w`!qDhcue}3KHUnVaOc%~!KABQ4YPh#UzNNpToFO`m-L5M-KtH6sdh(0q(SBhIS9o*SinL!AkzlLyK>yX| zt`%bhLSA)kak)+<@oiY&G*H&INck z0?hgo`XuQY83y0;DeWd+8>TGTa?52?#g*XPk!9xSFH;ubGKWWC+eZ5aPm~eDaVEq2 z2`bcKyR_FUz6A^)}o=_H{%hn zeO#eTJr&fa9@5XvLX65E5JtIv9dQ2b27tOg8)C}{GdwP@#gXw$*6OwK?O^*s+uwdd-@e!$-MiP`d+Xh{jEp?@!kzZU8*j94eC^G)w!E4X zes12p!I*Ekz4g{xZU11Wt*xzPla4aAzl%-A29;dfC)GFh?N^OgC!IqQC*zd4$U4sl z2m7FPf-itxlSg~#oR{ft05Gp@i;biITE-rz*Y?*2$EW)1inQ?bvlf=SpC&B=qxh>X z3M+O&xTGuF!N(15l1RGI_k^C>?Md;*_(l8{7b~4N(T@6pfPMYaQu!;Mw5hF8zl51j zvq1N$HimTl4f_jE&$BN+I^GZd+7i=!%Xl8TIY%pw+VRmJm@ZDSKfsnYneW5so>7N! z`5c^ns?Fxvki5OUbheeR>+FE-3rNBqc-xm%xvDoCj>Fd~=O#kqbC1krp0Cr^c*oI2 z>%yU6f9r3S);9pmYzgvS@T<;|_iA`H?)S@6Z7uonC;a3a^>qF51WnV-o97v<(!at> z8Wz3ATu@*LTcCe~DC1y#6IU53kgz1lYw&dTpq{K?Vtd6n|eI1WzP zPvfOqdhTx0GG|Jj@WuX{XXQb;%si2{<-AUM4(`FF_mk2j_!bVQar_~zESfr@jsBs2tee% zUGV|xq4Se#l0^jinkRhZTJ_ZP7*|`*NSjk{l`C541^}-Y(*tbXA=vG;lOx~$cLTry ze)C~A^dB77;{V>hn^E!m8I$Vw#hyUyVGG8b`M_b!>F9qdbK!SnKiKzj#rt5SA4SNR zxChe(TmC5R*2i-`K>6-^G(z$Y-IC4(`D;jD;5giP$0?K#9@0`hjGR{HG1nmf#Or*D z-6l%%gGl<^q;qlXVLPr=6y6NHGPY>7{!#rD>EtnWE`G_SP6Yo>o4PL;S;tFz0=e_L z^j-c-lS0l5^yD5gq)mZs`zY^RRN@0G0kV%*z?HC;N;j*Fmo55p8LvY|Kg{3`a~i?+GbPD3;~1pb{OVyAyhz_Y$KEc zTnfB`?o}seJX^VV=X?L;tHG5_#^B-LUHBaol&zc^p%v0%QX(gDO2C4Hgov=9TvLQk z#;R8ePB-AGvrp@v1@Pzr%^WjWcn0O72!VA;_+=!TaU^#)ahP@a(2TX%R}6-;6HnEPa?I#NGD zJq;JFi}*#-{pRPa4b%}!Ew26k%=WrzLV5r*av*Qc7DNALCx0S+d($v zyc~=qKVgO^S9$0_$GlTF!GMF4jzYAo6F~0p-`;tgJRYg~{PQoi+qa%?Z+!L5Os3zs zejRLGTtCWW*a^;9cI`t$+o(gs5Fc7G$Y{$s0pRuT`T6!8Kk%dAwBB~MA5#Z|FDI54 zmK(lwoQ`+z-)nEZ_prVB;6c0pXa^h~W-{J^-dk^crTy-|{EhY^1HJG4@N4ZF<+dL_ zK*>+q<_ZHfbX^;84o;C}aPbWwC|5MW)dVXA zXEtsEjTR@ThzvAS*tBb;E%GnUoU#+zTkGJjJScrizdXkvus@GCt_*Gf&|X<*40>?v zoeTHIJ%t~^H1imze3>WtsV8_uw$(8QbT0m!#U?vBKJt$G$2ajD^o}45e^uw`+^SAH zoXeem1oUFSG`|}PGBEDALY#E&+C8sDf7?^=)yu2(sc&h6R52Ym(Hl2gB))?6o1S|v z(ZPgE{VEv6=b>xXKZ5jeVsO-NKJ`u92Ym#ddS*Mo#i#Hrx?G0DM<=R$o@sZPGU1V* zdc;YZDSgBKTkoiIIDm$ltgsI%_@hm;pgkpzLHtS0eVlviCu?LuyGC&HQs_bNHfv)K zX%FxcPd=|Mz=&#wOT=9vvM+U0R z2Jv!1MqlIz{Z}8g!NOfw@*9wDrt?icorCkXzPZs>Hdt``iEdUulPTzs&gZZacsR9x-@WTUyUi>>hi*FuxMr&`uN` z#l8BX>UX3^@l;)?(Az1ybv9{_y|VdgL&L6$KTXHc!QZNnm94Ng zzttwUPDJWYoim!-mf@q6ZS-o}l5f`U&{ow#eyhLgH-k!_rF-&F);>m`#9ipd+fT@I z2E?_sn-pJFWTk1)#FuUoy( z&*mXq#`Ly!JrcHQMfYdw?YNbVa`A?~SNPZ-?T^}ImW4&XizW|=o2=sot+mPW^|r9O z)fQK7fZ8fK={FpxI+;ir^%l0Z^Mi8+wuf!^$$mTBKWTe=b;Q4usJ`{Rwqn2F;_ESe zGqxRnv+T0?YJcd2rF)E1^_vF!UndrF6n26BcKj6w{QG-H(ETJwBkBX6p6+-2@eq%FBcsMB>XrDY2hzt) z1IhxHuT#WRLl(eyCH5@?Rf4#g->YWRX!X+x&k-W71SCw><=E%3z^R{_tG1i6;T4hya&gUE#2VUCKs$aVxT(wnT)@EOA+wo(L@sY5b`dkW8`X1!?9A4K1 zQ!Ng~&i8tK)6^rQCyT40$;pv#V5*1Gfcm8wa_NM3hnG{IN5oOE;_GtB^(%1bq|{o-c)@&q4~$g2yMjt?BKZLF@)myA=Uc6axHz1&vU zY9ZRX50Ojj_e~RRts5uwf24yy^VxT(a;Kiwk#f~;0Zsr4&jlfMLS4}|dN7Q`PjOSO)L(`Q$d?;`7uzaw zag3e)#v5O2?|uEP1~b~(lbv>Uf4kj6S669=^1t952Q+aZLXTd%mj!J66oW@ui|6QX z{3N1&x^bSc;zX3bV;1>==QzXtYoSs6X5=2-P`_lQ^n|=l#tT#VoqijW@^mqJI}zUT zsJ(Wf*?P7Ame1!>$Ui2=(@uguo~JA+&iWFW$W*^$|DYL2+dfQB1$mFYs;32`@Gqa+ zWDq8lU6l67ROnjm@bm{K(`{F^NyI_g{$2d-Lk~`|&BzRVDJKqCjx7zs2OkNls!1*b!bBObV0qb4G=1s8?E7&Ua?&KJq zASYc>{J}Z+F8;J0-pCIPtol-D*#_&#+!=6Arn}%XDZ2-L_4c!4zWLst?nfW&C&Y7! z`n9_%i1TVE2UBbx<<}?4=gE`%2lThQJ8g%)`0k_ou|pqz?Zx(&|LR|BKlbB4)Ye|S z)*wb}C%f%z30%>E)9H>>gqGAr|FS5Jfn|U135nT^L)*k(K8b&L=qDZ>1494IzbM1D z*Q<*1enCD`CT#0LJs5JKY&{iZV!|`m_ges;<^Oi!{qj9i|NDh!@f_g~U4Mu3ZQ(t8 z9r^!ouu%+rx+V+xE_*$h#Z*mhcOvhx6GKpdCwn=#-!A@Xd7P zRz0qTuv)lRx6+Rg@5w4|X!6MIg$ey&?3jHTas)lh6FA+cwBFb#?ybu{Rzi z2=ZVr@QubsYBvR%ZeGh7+|;NqcXWb}MgL|SU9=js7q%x8jn8R0HWP+6jAj4})x-ZW z&G}dR8Fg!UX_a#2Q#lS0CqHwgZfRoO9Hl)KkK>cQ_(}So(R;sVugW zAWaeneXPSEjl*A+TAigCKC6p^@8V&9;&mo&$W!Ri-+)(YqQA&c~SN>7nVA@p}mdmqtGSB`ftIe!zrIX4~(hb&WY^SN!&C&n`e`L|1-`I#HS!>a; z(hS27b-tbzgs7`1%||c4KwTI|K{yv_9L}H~0?RGk@x{b93DfxjH6_Y>;lh zke7C-LmHc2_FaX7bvnw1ZqrlusH4**Z6w%cWg+-Lw_4<8{6_t?)>moCt9=ZDl(Ej( z&%!NsM85QTphDu+8=n*OmA$7-VrKn10l4Y+kY(?OZ8L-&SKB~6`tv!pEV%V{N?*&4 zx=A;r%xE_k$5z%IowqpyKiX}&dG()%oW(YwQ^re^(2k!kw`iK8uSBVXWSZ4id5Q0H zfFmBR=UKds8+FQ4VXKo_|FoSv=fni^bb+E+)sfq-V?LJ*6s++x&hd%)4CWhlA!%IA zXRu$$)%?8=w*Jzm%4eKEi1O*VFaFCv_bH50HUOA7-l!a*fb&CXePjEni7H#&-yj`8 zr)~_OrN&J$WM2K*p^7ZGnFOl0r5PMM@zj)z6=^Ak8`3Eu zNf98Q?N}+J0EBT0EBVo&O6O|YaJ2I6a+2MlogFaXUG28c&Q*;8X`yKjB1eeTyk)xPUHKhi$(+?_Vh0A%X%n>aBONhuBpP)$}sNd6;jX{!=4 ze_ox0i!YHsbk>C_c~v_$|E%Box40yrca%Ss);G{jh&`f z7lKEP{?=@E|U2Ab2003QsG~=wt_fy;i=mE8-v=W!*{4l6mw?`;#)l zsx}DQtL;|aeNNfbS9pzJ$E(iR)`C;Araex1^xpxj_x?g#;3=mysp^6h_6tYV@}-Bi zsceC|Exa5q55~>#f)dsf$BE||EN!e8eu?k; zmkBk)sQ;|n`woAmm%VP~tel-hCttx+D7zk~&ca(7IXN3AsN+3g8wQT@rJjYC+oWCQ zrF}q8`R*hY{-fQ+uacoEn|S1W;BJTvW^j_nHjdRE3*Yq(R^gQ#s28b|XW*3MZFRDie$9m?-;_Rau?G4nx;dJv%cv;YXAIjvCi{uaUy4U2qy<`0P2x!sSYBDV zdIG@j7eZro4Vo0)DI9xNbqxN^=-P3!9{X;uJ-TEQq$<6xquhXFh&-LSd4u-M;bTQ~ ztIx4q>1*}-+6J%M0Q&(?0&sFUWt9mUO$^L0y_$u-Ks4g2ZOQ_26Pb`M#L?#jM+Y#4 zYCDrqF@pIn)4^es)47Lk>F){2H)Ko#{XXHc%96K|TLk2=)Lip~kXUa|8 z=m?)Zb~y_!6g6#rVFjF4+x(IX|JPbu+MtVCr|qi%P_E&Tygx*TJOUBl$c4eakFugd;<6t|nw)E{*^lm5Wcr}v0e z2k%Z^9vHTT z&#Th`gr_WIQGqmrbz0Um>oMIR?o!)eyyfL(PT-166-`}8cfwUWE}Y1yfi_s?f__`M z*Um+TJ)JFe_WH$y=YdDcz~{X24l=gYG@^5zpFhH{b_@kfZ8i4aG6wk^Sjshj^>yYl z;iv4Q)5vr_0fG;j&kX3BfUj+}oFVPQo=|;TMz$QsmL8VPb0eP{(d7%hRcuH4DE_ux}#E!0@F7dL2h z4Be#FIrLQ@v+=M8U20!M$mgS~Q^npAPku^=dy>iH`<S@=%o6kMANM;Ew07})TaWlb@0Zc$)&q?VFR1uqPz=y*6GOs z6L`JsNfnMmsAsZx4gG?Sj>GJ!`ccdC;N*48gZZXX+JIE6H~L=l&RxwP26l9lxUm5v z8!J=-VynE8>xM1-Z1TFP$$NQ}evW(9YI?>ve8Ej<=%gj6U!B^gkKX0Fbz`ahiC_4W z?I(ZcN89qv)wZ#e=G_2EM>y+uhmk^_4C-E)Guo>|edA8|w3j*GJNRMtAJP;_nsW;+Y?9 zNq(`0GHQ((s?6%`^l$15zy1PtunbU97bB=Xld@PvxkvZ_rWy$ho&&(!f(de8J8Q+w z(#m@Kqkrs&+UuYAK~Thw(6??gW*#quX>-b+6i;RgusTNl&bDUGgtlJv4LC)m3QxDU zl?lluziH@8J|<%Og|^y3-E-EA9spvwhrUOCAiwwvSkLZweAPs=?6rw z@Gx-A7)u$g_~JYKl{RD!9HXqppniI}0zbSpfM@FCr)(Gmr*G#ei{U?+L!vJ4hRe-1 zUDokmMxOKt=uqv$$3e@b2kd|wC}bq}xuzT{UfgD>#47rqRtJXaBEQ~DO3sE^#I&&LIi z4G8UkLydK%eZr4y6iVr*VZ-oOs?Q;@)7ExVEMd%#1%^Breh2xphD({uxf!f~dT5Qe5#i!>!|LcG1Q0fFQNJ7p|3*rYBxR(=dW>59mk(yYo*yb{^Qlk>}ws%$!pz^TM;j z>rr>XqYEd10_kpw9iio+*+`EuM>x~2b9d1Ah6-`C zOSw*2kQ9#$G&}#|_<@I=v2i5}9gkQ9;5ksWt}}#JnoKb$nPewkLWQ2fFGbhb8)`!W zH_AwET&O|tMM#;Vy}_6Elo0?2vd!9L-Ldwgo$}Bt&mp}qOdr>t?k1KFLYOuxdLi}7 z%FO~L>PCRdh~MDN#)RUHK|YQk*{maSLcLCOB`&-yx|uF5lh0@BmCob4uh!+RWF^jE zZ15&&nh8yHqS8FFFmzQm;oI@ae$v&E)#b(Z>MO6c=U)Df_W03uJ9zTAJ$m$rTu!81 zFn~G)F9$?>ZL-QBK|W>RG7lVh3EZXC549itnZMeuf9PWvy7{)ZzuWeA_uGmSO|<)% z0hPz_IFL~M-+lb3y?O6J`{uiEx1EFi_Rd>x$2ocQ&U@{vzx(<2@SAV7S6})-`|wM5 z7z`e_H5{FblY@4Ca>(EWSwn!7^K42|?r3+^O8#Ic>VCN`cx0pb;h#r8IYp#Gmh5xu z02CT(kfo8|uJsKSWy`78@CGMtxO(y$kB*TbPW+wtu|Q9zVR-?mm9lzW9Y- z!B)Q6{_p?S|3mx8>o2$a-~6?9cJTE!aZ>g87>CZ!b=be~gN80#x^QAsee*duiF;&J zK8UD13)cL|W2fU?_ypeSLt<=q<-Zz)c}bfJ1D=b(^3Sqv+|0F~Xd>kemt_n7tgq>E z>_!8{L);S3r>gU5yJB2;4jhAUe5}5ibWw-zmT|!8M7eL$crWif*{Ez#Em))sGO)DR z^`z_DW&d66@b?tnvmM`;bQwvd>;1lmDqnhR7-gh$l>?WQGadZn1N40H)xI?(c2t^j zgc%Z70%`TeYtdMl5sx!$jB8K|Excv8i+wz#$hZuF&#zWu#i zn?(DY^5#3;J#43Y4{~I$Z^EA(`b~5f{`{`;desNLrN3>oZHvoXp}89hogAFvzsVOD z=gngXuJ-IOcx(=cU!QcI`KyOuQ2$JKp3SQha`t~yWHaT=4a<(Fhs`73&982{sb}Zt zG>cnVc)+H3T7jE#J;A`Yn|AlM+urtG+w)ESNB43X(Ua}_*)*6V`teK7PWpl$eqiD0 zXMn(qn1y9*0dc_#e!H-%4ID>1@&rGQ$*tU$+~t!Nq$#H(#Q);w_t-o+BR(Z|DQ_|X z#l>u@;8%7^@s2&Bf0xGTD;F%`N?uyUGaG9^(?ll_vTw!s)c<&7v{Hk{a(Jbwa%fF z)RO!@`^$n@Yy~vIKgl9l#x?WRMvB=E>EN4uF2;@Y;XgaEqiIsNg0FH%x*>88jeV{< zp%U_HJh?jEi&n--M>h@VN2*)eisYr7eWCbnY`Ose-G$D(`)*Px|8ekN98x#FW6FYx zG}yX!llI~7!m}J(Z+%@i)fLCAkzeZquf^q+lv7W1+ZV_SI#)uq$&)p#f6(Lf)Q!9) z;{~tydf{6#)SV3g@F;rDvu_3_Z%PyBdvjtNUu557HiOi1kV@c`I?` zue5P&sBR9qB72_QY~sTB{+=gU9Mv2JZM0sG1@)v0<=RgNz4X#c8EHP;d(=MlsZX_E z{k2cG^|j^p;;rY}26-0H+quWPZT)E4o?BUKTj=i!v|k3Zya5W_bNZ96t{_F5WuWx8 z?UGFL(GIQIS?>q6tJ)1|a0!~+2hY#Jq=XnMCc;^Nj)8x11GbwGI;1@rFHX4*mB9c$sJ=G!` z)r10`Ss15?Fzg?MyXLp1sn5?sTJAJ{3}Zq3wPSR}xf*rh7~SwQHoJRn_&RC_$Mg}5 z`FxAZO%v&_w0(U7kg9SuNLxR#Hx{E@Ohfdu>1e`u&bToK&fE~e_9Lyq)q8`_(Fx?2 zJLra0&RDm~D4%{3Li-SYQolapcuB=z8>LY;qES8@iu03}{#M#Vys%qqz+T0sJa_v} zy9TeqH{^9$c_XXeGGCrs56p~vVzY|&)o3`Q4@12Ej!}-UqIcX+;PsN9?NesD-z<75 zPyU=h!4{)Mx;r$ zXV7&7vV^vVt7*!yZAt*QTuApw^vb7=C^@R|G+ZGh^s?06ukROzqJNt(aC$b(>iKrz z{qp@Q*5BvPX8HSg`d_{M%-3q{Tnwc>&o}d3`~tgOKN{N1c)b?_s>-$2pI8 zZ`g^4_uG44|3*Ihc(Q-eZe73Lj<&bk?|lAu+v9uN_$V7WF~bGxyAC7VmI8`hv;tx7c&veuZc;Yrr6L1H}q;VMtZ&xo_V|KmkplgW3aE1 z4%U-5LTwjxH19ac)sT%!q_{W`{f;h^mwqt*bn=yc+s_)*OJC@56YLm2xf#@X!N5<) z>KILXjb23;@ZKk@MR)oxX{7 zdPJ?$vuru_q5@fT>@+W#Eck(qprpKfrfG!-fcPm_!n5ocQ~Oh%YhwX$l-C{~`2%L~ z*S3f+<)q1IlfMP%m($CHOF1cc6({vHGS0JW7(fDE*@SvA#Qde3FsxkNS8C{Ixs2STN=u zt$T#DjZo_`m+`Y8OcBH{N!~7}hCI_g%RO6kz)gV92?|oUKu~wU-+Y-1h`f_FJ;4V0 z34r}}dgum#ZN^zgj2rMHkLsm?{lgRN8Ta^`hX*BBQ@_OzET|2y<*9rE4=r)zUFhNt zzRFzJJ6~R{Pa{?X5z=U&K`={|DHzUw`0N~# zSL2+M$N-Z-Y`^>`Ra=&)z){krnHvC7R{HgPJy+UjbZ6s#%ZIMYB<@*9!| zoPd^g`z&2eAHS-jhfcHD9qSBS(kWwGuIBgN;B(m*>o55;?P@)QPtSekpZ&>C(TYz< zc!LY__QT0+g~WNUp^rk~P#6Lj23b*ly&*abm4;1=!%_y-0kF2$-3%V_=m0Q_EhvED z!jZ8`++haE03D7{{iLVqqajxtlwDqS=$ZWJKkGg_MVN6G9c02D(+$T6xVy>gh$aS_ z7)cesP6*Jrl~WLWIsvOBcpy18GXf#hA^qKfDSY{hB36e|ZK?_TDyJxGToh&}FcN&W zzDRDxRr|Ps4z{rZ0;E9Fp-LV*8J($eyntLRNF6?px1sa4ze+pb`6wi;iRBeA4M6Y= zZr0V?4fDbWHD|^XZo2|xLXi4(D1FN!N+XU@L~$*+QS_7tGL=tNHh3<)9Q_$ZRQWNC z4(!ixQ0z>M*J~i8bLsRN6TjmP7KK-u4tj`3+LMKLVHYpRl*T8`Kp84bd4u#%T3}he z_!cKbZFVBeh;uq@HwTy|jtY6gpbzy#uJagpIbSjX+!ii(uvdM7nxLZ_Ie|9vufEOL z6SF{VTG93LXC5@m307%Jo@bOkao#&|>-P<#pZvygiR0(-qE6ad{`&Q6?X?fT-ZpRC zYG3^PSK7hu!{Fs0-?vyiO2@YmGeHOmok(8jgNX|lFTiVU^CRuYf9`Lz$<3G1n1#0M zQN2!hP8Q;zliGHP^DT-fDNyWbg>j=mK^#ySL!4@T^AJb^v7hV4Ddki3}A4V5Wi! zbeu{+#4d zY0VRdlen=dDRju|fA*n$LVpZEfHCq^o66yhtt8!@>lsXvWFDk3`O_y@hxF#z``8Zj zF?Kup+;vl^@X$Qgk^I0n1eVX}qcS*Pjo%2`kH9ihBC0={k%>@cdp{75!B80ntV`Hc zZ(tIK9l?Emy2dqzUqWfjZ>L)qB#W(0JlNzpasZJsaiP{06eRkvIP5 zS2o(*1jB6qDUVJlmjM*n<6k!hvuEvi|Df&d+>2j(aqdzRR+ItYWQB72)wEw8`Evq4 zGMfP-aL`}1#0?0MQFs)&Ql4#x7wR)jzu%J9tL#ka*IcIwZy{poJFv)eijHKll(N=$ zd{X_XRPfCp%UX|bPVF2VwntB#kUh_>$4s4|4d1 zw|q0jjV$`v@c@uN{jhd%3hk;-w48OifNT3aTG{rRKeU2A=JQ+h>6`ftzuo@R2~-#V z{XVpdUdt`KH^^(Ft?_CbHw^L^r5ztgmLG$+|*kaB_iD zTHAj6a&>SC6xvzB~gBn$b_8@;uR_Sa32+EMM##G|N3 z-$)&vigHTbE`aCcg$&4@RB>TjzLmXDc8YHBOq$Ww$R9u7xVG)er?eAqg03z&t@0wDYAaH5pm(s~=riUWcyhiF1>hAGt7$ zBYLHgaFfYAFa58{O(GI<{9kyA1#x7b1RVAIPb@?k<8#32@S;AQCH9oXCwteJGdyxll zzkBZ<>67-_2VZNe=+zv$^3|`t(f;wT{%U*oz4ub~J6?UIt*>shqbE-q2CFSRK4`1^ zhwTF!TWwQ41MCEy@be`g+@{#4Y>prd9@Mwbp;foDeuB`pWg!|n<6H!-OB>w)0QEzw z6UJGdn08DVHvtH5vWhK0{xY91sm*r*1Dz%-rd_lbUwpCMxP3G7`tG~$#^x%^+HBk8 zPu@#o7o5E+L-L_E0%;FBPlz+ky0w|HvCvG~i*+rom2E_;%TFZaa|*!9DS?aSYiix3 zgC{zK=iuP8Hpl$wzasBt$Lzlj4v*W;-fqT@F1)Kh+fRJ6>mVCsb`SR3(IGTa2mAP4 z++sJNt(!KdQ+N(NZF7x-$|js4BashjkNn!kA&V7?e-UTyZTmj=F( z{Hidn?-wrX{sY5k)U#n$&$kQjm+xP({@&mG>$iVg|24~df*wMp?X5U%BC^#_b{Kq% zE(sF-Fg?7+4$V){S^VI$-6#1(woD%@B zdGaxvqbO?~+EmL99rY@_YG2XBc_8^|zh!U(vBCPaVWA6jpos~#f6R-THsnK?{xn6# z4|{ETU=T*$5w!KvS-zLQSACJ8`U2xF%Z&Squl_1mz$8tZ1YSl|+SBC?;$3)E`j}S^ zkc65)!)wXYC@=pijJ9+-T3;fn{Opsu%5HCeuIdz_d^v;ijqd65K<5O~Mwx0~^*{9t z9NWjws;X|w72MKalHCwW+>p^3yu^t&JY&2%+GE<_*SNRt$iI1WD?LUip8HdlTuTV4 zXoF)WU%*H@vM5aRy9viQ!=PIfT+#8gm-c(2 z&iEwoSYt|E-L}PN?(nFo+6;|CfBXFiU)fJTw_ynVO!uVO^xs8q7aI-Eml|p#BXwwd z=(kU3*XkN$I>tZi8=Gxo^G5srAO5lS(ucnjT4De2iyWso^^ms527B!ru^}qW$$inh z@Re?c@lu>>Yi#1g)%F2P%mSyHI}785f|1uYMHV{U3_hpYD`BLGOZ=FiMO^409z?|N zBri0=s#QD&;T#|0e7cXH$nWF{whO=TWG_CWC%buC+qS0*A02aF{e@<=yC(9X>g#>)rvL}X)k9VLQ{icsnHIbg+=^Rz+jR2UZ;8`xeu5YPE z{)hu%%BPHAA2vYYyFvL%gyk5It&=n!o0RMJ_sTGup0nA^-q8LQzU#s;Sz~- zzzS;!D9NhwI*v<*SA#O~tnlja2N{z2ldG-bDm>$?%yRy`z^Pzu#FoUycGI4V&}#5vEwrgNuVv!q!MfwYz-Q- z@D|vC1Ag91!#FKeh57&*`RKxDOz{w5BD-}V_gVu|oRlTMyd!^h@7hM&?Sv1C;n9pv z>^geQo9EEa9ZU|I=VM5z`^>>K1?iZiaTfojiIouYDFZU*S$cK{(lz#whrE%Wv{SzH zb}PTioNY6LJQZICu;lHT06j9www{?G9r=JQ$)qn=VUU@;MW?*2X$bDU9y87z@k>Fo zk>;d;d5f3fEyCd-#d5OJ&et?$*$Jf4PA9nMbh=?e-iRWOgzzeK0D_l+n@5ZJJwhi= zoN&zdy}(0xw0_^Xc>I__I)!fB+-$GD`YJNL(thuYUvIm6kKxTld-0_g+S;|Hw)bSO zdDPC?`Egrb?~8FbqZjiIyw;*qTQ^^CKlzLQX`5TxYA5jb$=;*ZR_sij@GPCkfMJUC zC|a~fZTZIAZ?(_A@wN8u{fF(zlP4Lxt*ouKyWjj~`|NN0W;?p~xP9P-TkVxwH`?`U z%WZRcu}u$m+cI=Z9mUDjPlk9bTv$7CF#sLv zgBm*+)zqEV;_Cp$fs`c6QN}PM125X^@pJMaj-#M$Xb(Tf&`mp3LqK|9p?(vC(56Q2MG4y^=I(V%G-X!F2>1NS6q07y{p&Wt^D*-xP zc{J@($GEx)CDLIW_4-5}-x`o-$#0(peAB&-yiP=n4H!P>YGvAg(v$-?5)57s7*=hc zr<^K(iA#Hl2OsN`uGTH>{du3wd+1OmHm2_S$JPK^xN$~lYxQ?Lr_U}J3GhQlTKa(n z*TNa9St`Nj+H?kAp>LIxu7zLpmVyq}z1r02*+J~w%JK$r$X>VO^geCT;|l4YuHU@T zUU=z6Y{rJ&2(Z@M&faNzaxiIor%UZqzxglPzx5KuPjnsp!b-{J?DTxQCa$KngKJYi~@;7f^pS~CT zBLfOc$&mxPS}cZcp-ZSKkd(a|>bd0e0^g?MdMm--#@4m})g8`5-HHev8&WK%v9by-is(92$57zM~d%S!+iBEhMn+MFW| zX^8wt{LG$c2Yu5Td%w6wZU^euiF>I{FIL-R^>$lX-)eJy8`qA9vN?(yT3qbA@Cc1g z7@*Rh9q#V8{e1_kC*(P3Sv;O!i$43A0R8O6rS(h#rcXj&TsSK`R10*vH)q10+bn&@ zj#*bNPC#4Qa7J77F!XV=D8;?~f%(kifW)`b<>{2XhYX;j|KyVnJ3D)nt2a0g_jeg& zKWUGjJZz61Z@2B8hizy3pxu9TKYhBJ`+OVWVE-r!He+E}+zm_1^bGXbi}59qkHs#J z`t9mK$KN;Ly)I0OFSbCw@DzW$#Mi;xVq1#b9oAqC;vlW0K_vQa`|ac;zj~#fIN@;$ z4>DnhpHmrp`|%hb16zoEL=UmOU{bY$gA-XUVy)q43|sDZ|L5mVp?|ew1!u)K!hGY{ z@6!9ZBfl-XwRx@ijd^_xQFrl2|3@kJSbR5D-?+Bf?mTy^UAu9uEw3!L^|ej-eH7n2 z3;Xny9)B*ZHGcYi3d4lH?flH6Z>0yaK)bmF=WN=bJ++YUILNV+v|oy?cT#Qd;DB+` ze&o-(lrQ@Y%T^zuY}ACL_g;lB?IWweKRza2cu?3N9@5DC`Ug2uTDwlV3rP06zS$Yg%Lioj%yxNjcxfU88?dC(MVn%TeS<`}-;HMvOsM%Ng{$wF4U)*T~bd ziB><{{#ze3vJ>3Oei(!mebW^6xaep4M1J&8(n<+5+5Ch4s)9$cE_I*IH^=LZU1Ze> zM{K0u^vvQ1bj!Dp`-I3G<9|(~Z_Ik~LpJ(~L;O9gD8;3fx)|Q;mjG{!O{`yj>Ys$R z$}{qII;H=nZuR3FpSq4ur`_s0|7>gZ573)5=No{EiL`~#0z8DBI1n82s!So3~$VZ{K~deg1cTyZt}^yZ^4e z_wZ4B?d2ES?H68Te6iTJA8of&#y*?SYvIvuyNM3JxVhCfY4{>_EYlzRhE+}gp|6{2 zXP`&kj>n#yT^!IhP^YcKZsrqEV5@!B3v$zdWdYZw@&Wnq1b{v0eC)giGHu`LMonqF zOds<6OLvf=i}q-H2cDj|Ke=7Mb(1mN%k5zIkh~Y!0Pql7>>`+pIm(e6Z~RP#Z_Ik4 zh4w}sB2B?t-i7fXqkgzq$kWpdw!=5rNw*uMEL z?I>)ImwXVBEjMDLGB%gOJgUHh3k0OU_Fi_a~c@=l~;LeUTynxw_b`} zcca9uTesTHYg^%^^DvQrl@pm$t_;UV`2Cc2Z= zJLrIo&)fI%4?Jw2f|U(s1mj)scdoGdLU_JFJ>cqO+sP?oMK`N}9HWSK|KWS>>u`k3>J08%7o$yoz44@%h&0XM*!*PlXPDgjqfB7yb?F z|2J*__yof!_lGHu9$lPam-WBkiC&f#FN=in+qCOm363?;BV}%8N8H@9aMI?E_S=H< z07tv+;4wP?@Nv7bw$-*D@3gOd@k{O9x8KQT*&Fn;o+zQOd3d~^I#*U}zD@gN7z@FE zTFKyinv34r*4kWO{&9Q{@_L$;IB9>zqFj#d)kXtzYJYCK6k6I4e}wytRe@vO+EcHp zs%;Z5$FZ@W^igIJFYStSG>?6uRPx0(>F4HA`@Ecn68Ww%m;4Yf$71&DWB%m~eb#FE z0*mnU-;H-Z2{RBs$Tr%Bg^b&(oOVY0bbN9c`?7E`Nq?0fekh&!M_;ck5>G?w0p8%B zdHjT5y|Gzx;)Y)9(Qh;qjnHS{O0yxaPRoQ2_#ot@d83&)n zn1}SIUU2NRHdI{&w|u{nZ^s2AZ3OeUkQ}}`zMQ2Oozl|oFOT^nNm#JEJs{m@bC0q$ zZ(l(i62^;doquBkfwUjxD^%SBhIx{%e59=v0vCCyveKdacD%4WXEB;!de%McKMc`( z=;dd7eY;z~5dY3lu!@$^=fLPR>GL1yUkIKcX`SQKsj1KYv^QN?MGwkD<8e}zN3=U{ zqMB8G+$;XX6>mG8#i8FaSJB?s3-n0=(Em0+Rv~)ex|lV~67^%uze>NWG8s7)pGF7CRXsZe=cP4I6GTg( z&Gp-N;$yw^;)mKF|Kv}#ty|AAXk2QCyE}QwE+-2(!wz1x5z6+J{f2tI9mJ)vLngTT z*zw5xjWh|4vZ~wv*eaT#H(IW1)G`VCY=}@nb#4pl=bSB2HRa zd=B3PYlW=%34lH5vdi^=@t>a!Jm$BbjSzX^V2|rTja#!;DSv3Kc2#_`5dgY~mol0< zfu8n2Lvd46M!S>~%ZlBQkIeIl7&wcg^R0nrCT%)!%{ST}eNk?D9fqUXTA2 zL-zho6?S9Eab zkNF%crVsQ}ZoLkJ@!lKfuY>7v;!*a|`_Ug%T{X@f_EtOiv|Lr5`eaU>I+S`TFFv+e z{CvSv8z8TpgO=t+&+0qV7Mgo8)$Uds%;R|2w5pq$dRs5!uF@DvGL6!;w45Edk#G9^j;Z@|K2sB_2Am7*-3k!g{P2fA^E*2JxJ(j`D^p&oWwWJ!K1e;k347403N={POk9V}xm>**y zy-Riq#_TG-{a#q|Tm4f8lurj6BIN|0&b>Q(A3eU8{5Nhm5xt&C`$Ia*+w!WD4zc7SsV`c||#-y>0~Zt^PQt@($SI8u{c`0}|Y|GdE_a zLFj}7ROCyrgw`Hh5n=Dmn5waIl+1tE!bBo=kCw7MFm> z{jd@47B}4qdgDCSPFpuRr|g97_ciDkI9}>qXoso?CwO58J3r^7ta#dPajgEw9jX>{ zQpx<5HQogWgE$#x@VkDD5LqWk7lXVp&V?e&mhGlqC(x7EeA@Mazp(xLG`+I1sMmQ_ z=SI5ykRji7l_tcEZ~9cd^00!nm~L>vbsYIq4vO&wSNyytzT5RtZqVEwMm?^#jHm%y zq->@TFzA1+JEyr2WBJq% zKWw&`)dGX7v-3mI#w^#Hk@o*iKzPi$ zlNC;^zWDMh?b?m&!N)gh500?93{-axH`?cZ@6GoA`6vIVtzzF^fBjDT+1< zUuw734%*kg`cJ9gt+oP<7TvgqskV!7VMH91_xc8X@v|0w;hA>7T94X$wVmQ9jiE!2 zOFrpIJJL38LIpDG*d>3LnX6$uh97;xLfWUl)***B`XgqHuax>qZoSG|>n%Q)EX__Q zurE#!S3)s9dGbhp@f{lg;=|dd4n6~&=!-T-{ooG%>4!Lp<~@Iqy^2fUY9By;<#E9y zNrbb&fRv=j4q-NMl1^TSe$$^@n<=?Q4h>cAYIur{(6!iLzt2UONvEwo(~jWQ>$y^L z77C9`eXI?fWorXZe&VN?=3tW6(;uv~)464Q&t+`bb8WKtp*Fv~)h7Hdmg&dcXzS)O z9np@Di&Lk2_PIyx=)j}&9ne;vIazYhrO#9oA;%~BY1&~INs$fsDP8qbhyMB|5ZSRw z*t&~#s!>j>J>q)oH=Ajf)yG!Ah`^xyDKh4F&9zP1m&2o-Og7}Ga^z=c+oPGm6PzDE z-fr(bc$iNGJ$zUT{Rey7HL=LRT)%Qre+Kw|bKM|{z5(uAj|Tez>(y+B#x5*!f9^sc zBB=e;1|mX=6|lNobh|j01qWg4hm>xPlK{@A$Mge7@QpIU=9f><^ogr+s)+Rq!gz5O zhDR&sWB^ZaLKj@1&jw87&tvJ`oU0hQ@VmCMoTKZXyLGc|ZfszeR@&_wH`Dj7tS)2$ z{}ySs)wjV{*4Ns}p>HdkWRb_onPmp8;&*ad{jHM=zHNVeRJO1N(jJ*#_Vc`d3c>qK zaO@snPmi<6tX#NfBHxz9wzkzlj&oCjx}#p49wAHT z_2%%&(Mc9@odBBRFNBZCM05ceE?vhrTrT@!yf##SPo2?E5{|+0hI%f2x6S_ae{;Mo zypcZIf6K0IthbHzYm~v~q;Im__6HMmEj9-MaRY$;br#yik#^MEWVHXFj_O}rLN6n1 zWskr#=^lB0(*0sj1JEawAA5UG0$;w#Cv`#m?K`EZy0AnUKXWB~f01K)DDUUyJxyVQ zyoYssxUz`sWCJRCDGs(<9%v8qjGWY)E;Klad3<~je|b!FDwlkTs;l1PLR*y@qByu=t;>f{n;z8zSLg-_{ZDf@mYIt|8e_A|KuOH-}=Jm z+H~Qx-Mal;yMFDaMcU!sZadmNY-{>IJIEpTFF@}X*RQvA>Ry7re&wxBZ$V$c{S@3C z4}_1&T=0_YKbIkDG~rZBUk+B=vKGe$GSn$>(G2 z!}&!{z&$6(AqVP$v{r7MYKd;Zb6QD$`KXL`xuL0$D8YV8xv^I3b~BTS7gXqm2zjdi z8lM9yh)?|JvHM)R%>-u$0ffw6xYXH&@%{>Snur=VrTg z{YH*Gmxt<=_~Z>=j*~X??dWhXeE0J|r_kf@WEWoCZSOpKtNrvRf3W?p{?7lR zt#2_7WPHE*@*48EAKG1<9UvpVsDLL0?X+FAD0qY?$n%A7aEnf>Pm7pV@>)2{KWz;N zKuG1S7C>x!6&(Jr*2OURP;wyTgf16IM!$|Q@{jQB^D|-0AI(CCKfw85yvOoq*){zV%tSO}EbU*U;ZNL&t&UQMNB|Ue#Ocv)?qo8vvHEznW*C^TsQAJoU-i zmzUS+OKZN|J|%qxbQqtE^c&jx$793f&@mTHmhgAoL{>Z(7xAW^(S?&&JaTM0enRkf z%)-_A0NXMaX$;YA(hN1W1cv<&x7mo;;R|>0p-tmARiDA<;ML>Ciw(vbF585Rk!8$hkNKj@@U;F&ld7wJ`Gip9 z0-2yLZG6G)yifgF zM*?*hJ&O+NLD6~O(e+`zHZbG`1N3EVI87HAc_|nv!PWU8u)t4+ZjphSFHj3BbpDYR z93$`Lr(D7*L3FPEJRPHOsRG{AOsgkDW^y8f zg_Wb!U$nIU(`GrIGuYb-U%y9u?c?)u89G*Q3xDY?yy&*q1z+H+Q?V9TRvV5VI*m_& zU$lU7fBbvDzkS#Dd|z8z-)Q^zIP>K5#5rxACwNZrjfAV8W;4e;U~HIrB&oPsf6)P4 zOds^@>nS6o9HEOCGShFuw+bUl8j(;BgsVInHvcln*8yYjT3Z1xV(V0GOWdV(0ebH@F$Vv6&l?{S<)qe;1!>fA1ul!|5upt>+nEi<9_RhhQson+IOf z#TOmK#|;zVlP|^)m9kfKA%1h`ouxBR*TYEns_#|ps&ca+e|me9A9*muH%**#y2?_) zm1Zfga~eJ-y4pj$c0JD|Kq}OQF+akecGCXTapgDij^Mh8?Xf?!{mPYkEL{e!Q^!>R zj9!pSIH4K-zWwD4t{2*oy1f-AewE+)dz-mdntd}P1Nvy=q9=|UlIC2aFyRL{m=?WL z?$H7B6#p(a0El1K8ELove)?wcu^g9Q%bWAB2k`BY^ENCJBup*j>5 zt2!#&hA_1TW0Z@q@C6x-X(PNSB*Zl>`eEI4mc{AS6hsw0NASClM38mnRiKLI zMGFNxlU3;;By<^Q-{2H(lz~*=WKMml+Q}Z_1w=wQ7{xQ`GYq&pc?=P17zs&aQsCt3 zM4{9mzXR;Z3WKfM@IZ8T#I)Oja>7(cbr_@}`vu=};_R@~wn!i4#vHUyXXuO$i9Euq z9rMZnWw@39(#Oe0ckImQWv<>5zAndxvB((udDmeiC^G_!>=0&YMXv*3E=fCh{Y zJwD*-lz}#NF%#6j0jKR6i}1c(qBd)*v=Q0{H?|mJ8=#wIUDujR0gW%scI4&2fuXonndUXeB78hp0i z041dEZcnVwPpo7C3mi-{u4t#dDEU@bGY}w6T(lR_jkL`U6CRvao(kl#I&Di9g0vZ2 zDC1R{R=W({VYot1%ZC>JxHlBd%w6qNMvGt4-Tc~a;SVIpV|r{_hb8{Q&SI~Xt(hD^ zzw{?MVw7>B)BH}3tIPH)q5p6c-Ka;|^f}5O4q$W#nap=)3teDPKl#cA+ZqNcil6-q zc7hyCh}|_ZR>rzt;?trHTF#_8=D!HFJK&Ac;SV1 z`?(j8^%Wd_Z7lsggQmm7rS`cmztjHy|La%V#kG}o=L0XdAO6H^?dN{{d)hDj?J6q3h!dKh?AkT*rU zda5w^Ell59^SOK$>$06mCZoDr_2*lvw%dWOd?rMv)Zy5Inm}7{N4%3`@O|jISF$Bv zR04Q;%bSO^OMDjlNhckDylq%10f z9+4`2og`7`?5o^Nu#V5+X-tk^d^>i1?OKk~pTsAg&jQRxzvIL0x#w?X@&5Mh+ils& zmnZw+GHuUci$46}*V^*UJ2D&Oh{s>TnZe-#Pi@rEetUBFJ~nDUI0<)YxsH85Io^jx zr)?P@#|_K7Pj)krVUWKrZn-eEy1bT&9@{L<{8qN`Tp;z?0j%x2zOfeg_5FFqMsA8A z{i6HxkpWmDoz3rt(QNKUkS*u)uIp7q-P=dBr!~J(ALX3Oupf z%7>FzInsJ#o%)pDlfY9(+@O$&VD3Gd(eHjcX1uv|v#oF52wxBPALkqAzC|y5^-jN5 zd*dllZURd?kcTOCd18UG>!kb=vc9x@9hp39+k3mv=MdYz)YdlFQqDd{A49#9HU`_Q z2bqnDz&$wJjcmk*(M}gS$A&iRvz)NxNhaVm@{|*^$PKCmWI)dTsbxvD1+W-z=Yv z?Bo+yH;mMVYxEHMx!7vI=CgidD&(q?aW zJGiVXcSIdN`|@&o|=2N!VFUG1TEP}$7}9`MiR z0OZ5t;orOWUhq62izxl6Y(=aM+>(=#l=lK^x z2YK`8{)6cCHT<4~_wKjNqqFuZGWg=kwYCXn6YSnRvXy{2Izi?zVJUC>Y7!fL>$Gxl zLpr8^MNTl4&cPu|{ODV3#}pc!=tFVchkg(CAGHPSj~f%#k=xY4nCbEEP8NP&|IQD! z*T4IF+8gh@(Z2fCuQHyypCjY_B$7N@U)zeEb>mt2_0)?E@uV3ikk(f`1z=+)-wSv_ zB}ZT*dubS!eI{!jov(cO3%{s@Xka0q^v#+7$XHxkt3Ob3C~SRRB&s$qU<*7YLOw+n zp_L&rMbOvOZY-@Zu9L|GC(Gq_P8Wj5!wz~<)zDFc3tH;T{(*0O9k+MixzD)vL3`)z z_uAX_N7QIbgzEY zIUMCD^HAK6F@f`OqUi|(!VOK#2mXWp^~oIK~!!d!~Np+y1~XgFC{2-@~*1^tXgj&j?RH&-nsM+NwRdq(hD0>Cdo1 z9_yWZu5dN7=joE_)y3Rl$rbt2_khpEqlaw|Uvz)_Nj6|@KY7@8A0M;_?>=aIPZ(#y z3pc@3ePw%&7;6ege?uIm^tUI6M~s?XEYn{586oTo@~toG38Ko_=}Gl}&L3fRW9!VT z9n%+8pR?H&J<3aC_}kSttwe{jF)s5<&c!(Q+1qI~@u!y1C2aZ}{+hO7L0eQlYWiO{ zM3CQpL;LL}DOI-?fr@?!=$sqsyZ~!6wm7=$JkPq8ZeM4<5hzx+6^XbO>GjA@0Y>X0FxF_x& zbuGVrfx$TrawtKepJg*=jA&M$Nsa>a{5u?9;?&qE12q^FnS+Bdko^{7{qyiS5>2mc%S^h!``wZZliD0>WUKX6S3(k+k-%TIH_yEPL6G5x~ z@JrY>O!!x9Sk@yzOPLeKsh`?{4B6aj`iz!EqtL;2C`aK-=V$jh+&^IZQhUcYd?Sx- zDA@)k*^R65RBtoE6D*~TA#lJ$dMgJ*_d-9$W6Hfvar`9==eagFZnQ)C@e}OGD$n|o zZqj!X`FH+1-_w5LlYb(1X&+mb{v0{A?AY8s7LdnDbdE`_&vxcf;t<9s351at5sChc z{-kKE6jbQfPN!M_s(lUsd9Tp%llMI?vId=^+ld@-plNRUbLV4D3K}x1{R(rr2QA|k zfTfO}H^zPVI_KvHj7JXn`I5pRc8u{R;|A~d_v^PqUyx1W*xjrXhHssBG)KCLN6FgY zcd$yn!CmQGyjGTm{ZJRu*tC|?9k#f8-`lUe8IRxM)qH^w{pqwLT+J^}Y%_`67u|>6 z&G>@8Tf6H#nNM+E%<`n0!#S)h$bbC4Y2Ym6o#INRM&!JQ12qvRFCBFtgv!0Vd zY_Tu+I(`L)pF2Fo2dvFrloPT1DO{zqIC!0%l)>0xEY7>JUb|$x7Tl}sFZjwS`WG1o z>)v1Gv^nrHUEVk^7`>wG;Hz?!K8AGLX*m!p_=+n434G{AgdU_;GgIk2ws-fOfGI0tzZ z2NF&m`wex#M41V~bf8EotRO){BU8-lpiluyz1~ZJVqVpW(5eJJU!54YzBAu^fvEjW zD1T?P$@+%?IM4*;1mg@l3hSf}57vw8Ry-k)k zE#K*}!z5nXmHbo(99~0f~@9vY(yPYnyk9^m6;YcjB&;I6T+riZ5?5v&Z-XxBSm`$~KK&ez%kdb#)LQMBF8o=RidCJ(p{JvS&eZdNef z&a4()s{=K^6Ty*V=s$L#$(PvMxz)fhO?b6&$rv4sVFQ$f(K+-hm8ML$b9RWr9rPdk zwtPCgz5yWL93o!%sQd+IbV_6A;LwxEl6LLvP=e4I)qt(?QWx@E<*P2^#(h20P=aw) zf5~wL+uHM*jXEvwr&E+?`wQD?y0ofAMCH5S#cneYOnPKieHwUFn`U+#yJB1o9Iv!f zdPdKRm*J^AV9ZMZ0RQw!L_t*fyb`R(VEF`a6u$gP+L%xw-ugz!o2k8Q@d{^=r5GKeGOW~VbCe|0Dbl?B!hiRr4`+WjnaXydL&BGTW9-xY*?e8)?P@zAb@jBE^Lgj zq;bIX`Uz>1brjzWriqc|Q?EP&H?N+kVcj}i)^B;8mExKAF1D1+I>-`_*d2c6(IIo= zg8is?%};!@)``1XWHeuJL(Y^tCoqlo^bO%(*l$wu6u52_k_D6H)f|Pkyt>i0Zr#qe zQs)-^R^c&Cn6$?`XYH$Bd$0ZXzxvz2U2a<+e7W6x=>zS>8}sef>OuQo|Hpr}eb0Bk z(9RCO)>hklZRg(C+GK7wM`mBl9Wbas8XSm<*E#x&yo5o}o14=0=&97JUgTRh0GKGP zi|$Byu5NhzgEjzU0A;-8tf%`0(mha>4e$9SPM&!`3zu?Qc=iDxc?y#XK084)d=cvl z+{n@uc)l^3G}4$|RL|8nVi9X)ue-Iw$;1mDQ36&F3nF>|?$koMZHUT^1<*U(n|a}YusYNMm+ zN;3W;WE+zhAk-H8zBu-4Vf}@+wDCOce*xHAvH3`LyKun`{G8zTZ6%K_J$Ss+PBT#m zJ*Z=CdA&_mmKdBWf51ilrCI9aYFzll&%vu{MgNL7-!X`#|C%h(TKsR>i+*S;T(ni! zJ#yHqZ+82Jtv0?helm)ri5BQLFM4zgC%19QXVU2HMgbvhge&s?w z#!pg;5imCz+NUgHhgO!Cn^XB!o zwX%Ybb1Qw2i_O94z}GlnD*J2{jEE##=as|jAvNW6Um&b<0S!gm4Qp#6#*A>+!Cdh3GPSg`Pj=t%9- zS@@o@ld6v&WWKTB{e*Tq$-TO|lm$EMvb=G-wBNVPq~-d?YT!9J<1zSdx?J&1e<#5C zcXW8(p6qOA!tM6+x7v;Ccj8wa9Pidb-+F!WLiv_%($fV{>&%H%(9gF#j*j+eE3&zQ zZ@RP!ZMVRuwTBNMw;lXZVdZ=NZU8`L#W!*dZ5+R4&<>w9b&6_E!r|F`r|DmecWmh7 z2DU-pGQQ^M6VIZ1u?@tntm!i^wq4&$#wT_`(DLy~yZt*t2VF-_d2Ee6p8UAn0B{uE ziG#W%T+O8EwjuanFRCv^D#RDP@!1JTPgAJHQ-b~+`A?OZ!Y@5I>4F@I zCvxChf9JHR;>*@6pV32t3(D%O@!Cfx@@oUYDe}9WGFcP>FY&E%&|!L(O)Lf%sFgU! zxmozccBtP=@K89)wi9`7WN<@+LAaS*f)2uTbDBDyx2mAg;lVlMh`Y)6+zZdQTQ_dD zmtOsFd;H*0`@$E0r~SkK;UBks?ArSEb@;N}UVQoGHV5pJgZ*~@$)k3(z1LP3R@y2y zW$&H)ZEgRkeaGhY_T0*P+oH||WWcvjGpPXGT>#Rh)qZ3g@4}A9lPWW@dCE@tiT1M& z$2_f2+MT(ONrTbuOz2*qyT`QQ(f)&W0bg$3xgC8zLT*;krGxz`w(45@nLqW@?Vta% zUvFRd@&J`UYQLB1H?)D1ycO=z*Vez}#t-p>WLby;59uMjKq2&!FVal-bBxE7wJa9l z^N}2XJptL7*e-0Z{gwQI+vz&fPf})L+ktn+v-&>4ww1i=6JkTSAD`@4Sze2u;6|sR zi?K<{w-StwyD>^Uq@lfoUAZTb7~x58W8;#&*)sUo7*0KcuF{$IShr1h@aQq}xR+0x zJY@X1gFHQYvX@T>s5dUQA0OGpz!5qf%N-CQf8sA=>oaCl-r&&~V}#d;bWTX2?wk^$ zKF~JXYk4;T*uL6?!C2Hy5VTVrFpvG8G*JhnKT$o925!+UQq@u4<~5IgmA-{`abta} z-PpR>p1<`{PM_Ia*=TFp0XLITpS;osktc4}5tf?)7UM@j9ApH}@uNQZ$}fCy1AuWv z*tcls{E48pF72U!c0-zrr~S3I+j8D}BFM@bvWraIe|WcTAMCXE9=_Fn@z4Bx`w#xs zUuYMrr)~547ATq4Gm--CDU!BDdc>i+`MJ zDuZ&RKee!=p9Jo`Ird4O5akOc(N{1P?=$$PJ<=xFCp#Z4T=hBd(XsT`*a+uYwO`s9 ztwhTC=5T1@Ra#X4?k^`&MPKo8g+)7ZauDrG7bJeb8w(t-3 zCamb7Vme=98b|ytq_;fjAo+|7a9! zpE68R>=Fa)D5_So~3-7X@C^@Zr?5HP)MW&Qj+goyyPtI}8BDS`c zJV2(tOaHe5coXZqjxg23TFbC+k{{hpc6}LmWZn+iO0R^OzKE;k^tTE}^V^2N2an)B ztG~i%m%%dX^T<<|Dwne2-=A}Dex7YhaDq>6C^x7>l?PesaC}y^EDu@<^w^b8!4O76 ze@vrYRlcqxRC|0by&8HfJmnzvcNWYm_2my4074%7y1~IuyBp@rga4J@QZP#b2}@^LlT6lnt-RAK9<^(>BA5ewWY5g3TWrdn1qXCOQUe zbVJ!V_cOQj2aa_v&Kawo`{E;gz%k=Ge~cYY>@OH+`3b@u#?(9b_|6wjvE9$`QPm-~P;3{c^>HKeS7n z{mr%dj9l>nS(E_grZk>47dtY-8SC-++Z0u4ip^aZS*VhANm$*31f3#cml0P zh!zCL>Qj@4pTRLgUh0u2m*LY3zx3x``kQCx=N}ly;#4E-Hb8_N=PC#0Qxi9g{5lfobIF#mS^#^?UQ?SB4-8V*_n= zFRv3*Wvd<4_5e1f?ya2oBB*s=60itnt|DL-m{E)c93B@sb#nZUXpBt+!F&+be2!!+ zNxVXrU5U0uI0{P2fk)7`o*GVN_q|RC*>(1;-)lLdT`7g9lS3xkX`J=eLK-;u3~fr- zZ3*p4QWp~W6yr;j`BJtzWG8aGnyOtEZ51)fXcOSLleB4_={yG~+h#r-JCF0VVbUr` z{^tM|TQW9*XHbxD17f-ZWaX2;;cM|qhyim9Kt(+o)L zd{PfnbmpKl*{>6Ny1{buCy+O_L#YwLP@;l&RCf4=?R7rxl`_O|Prk*h0hb(z5o zwK(YFn{C|En#UY2 zs~F9rR(JOH+moGrI*p}vh`wFWDSF(|#J9bcam@L5LcO(E@u)hI$zoe~VKBSRptCd} zJ{woSH*a1dn;P&OeFl9dI8%PBGqym3&sDP8ksTj-L?&(aEkp)hrMphfbX#}kguTba zL3`>slm1X#JpDNd>JGe;S-B(+x$@t*L|fv@b>z)8a$}jotqlIqtgA96ET6sBb8I7d zG+7Q%lnF!CWvKd$H?K6wq5*Pj{5Y1gI-3DkTne|cQ|bt15d!6Hq(|<+o!cH?HsKOy zgOW_$B>Nk<7hK`g^98VVs4{5CbB|XKB~S9!ui729&qBd8uO04yWm^jWvcJ$Nwtcj* z$_+iXFVN-(r#?Ag8`IX%HI=6y2oB&J=T;h!?3|VIE?i+0jnr9Ov%nPwu&FP7r%IKW zs()s`Z8I>KKJM-ux6gm^OYI;1z5lk&O>VUI z!tHkb-{;vYFclK&6Hd`@WZ-91^6{I|t^cgYO1;|dNL?`8;}UUj-pkpSU~0 z?F2yvLj1!^Pk!4v$AHGS<0im!lgu1tb6hd$E;OA3KPZ2Oge z_Rrd{^7|iu^_SZ}|Mai5-}1W4B^vNFa=YlveF&-Im0dL9Eh=94aw9vLTR@?QhwRU}dslEQ;m)cMK$RBM#{6l}V z{q#@#@%D2+^<(W9e*Tl~PyFmpwx9jEpKL$#$scP!^^-r=e)vcJc>CTT`hoWP5B^|# z`Hy_Oec<)i+nv{5ZP%WEzAZnG?YOoY`n#EJ&dmGh#q;i2fE2go(``2E|9sS5ywiyekX^}D&vXMMMPpPV*$wom$9If^gs4Q z-Em=&R@_j?zESmWYo$24y z_t<1$)+CgEVKe|3wgGWN2ZdeZO~<~(!yD~sj!&=L{HyW8nQv}zB`ffXCpG3Ia22K- z4c%0u9TK)SQQx*Eql`lufg%1bP>7|zjcpSjH+;BhbrD*trR5hm$AHxe-o0LMkhao3 z?NEHlt3PmrjojYefwv27b8|Ce4&UHOpG#O-TgoXN!51R?ru5FvUVF6jxGfU@>Z>1W zAN|gcwHIIhU^}MVFaOG~w7>gz{~y_WAf7jF-D$6U=+)+@C-zT{+Sk7NX4`o}eb~3P zMQpMQMc}YTe}Bu(LzG#eo%6IIW>=cgnWR0?G5)&u*aImf-N;TJ{z%?TvPxfNggf-X zf7<1|1OYL;H}9!$#?fvU{XNyfO-35a54?P*{lriGRQv7U_|5jk-}@@^y_ZGQ_4PGu zzGHS($T1Bv0?)L~{-vtefJI<9){@?C9y&d(iNZ{Nl7S-OOBOgRB;G2wZABK`tmY=3 z{L+2_%Qkg!=xOm=S#vVpCP^!OtO3e5;4Le7ZDM2q9j?W&J}(1~eu72&mA>3b9k?kUu{Ua?CjX z$&>B&;K8Hz_Px99?tAaFckjI$oHjSzuugsCchh~wQqWWT?xr%|g3BpT@v{(&&KTt; z{W|m$Yvre|@Clq^Q2eKks)Z+#pc^ti`g2=KzLS(f_Lx9HE ztmtKIgjeH7hG(yaac_P@e;)p)k0Sn5nO^zT5Ibq8v}{a>lGjs(XRn5F|A(!A{Caq% z{!4i86NLFKaIb`<{Xrp!@f-QZbN_W)mq-4{r~Z&@9+O8rh2dNW@!CrDNjpNk_9lG` zaq+?F*B3T6(RV*}g5;qCzNPM*kA4Hi(vLWQR`#dta^wVgbWz;T1Z1OAYCwib6SRFu z=5;JHzl>gjlX4^CI+;*^6}qt_Q`H`Dqn+`z`)-i zzk08~g4GN@w$Jv6SLU`tPtqLskk7u)Fvfp1&+q%J&`aGtF^v9zP|632Z$vEW1h>4#@- zB?T&u+G2a0H@{Sme$yjrUdKQkiR(XYr}d6}X@~jqNT_^Oo_q2YsfbJ3Wf0GzTcz4h zDVu_hoST+k<*nOfP99}Ad=C#S0~8;v+jMzo#H?u1Uf!FQb{K7b`w7ZtGpTjM<4Ui7 zUkK7Iu)Py^`xnPHsi$a}69dGHT+uC84sBOZtolSqx}|_mz)D+fqwTRBBrC5q?h)@^ zhq@9xgl`$sxI{;UM;~863;z&AkhVD~vk?5qleT2stAAjg`la2{q}zys$9-T*qoSvI z%1;t~{hj65g!p98GoNKc=N1`@tgdaf58|)9@PSv!gYIEBPV{5(b+Q>&TV?+aSjW=g z1Jsna)>-w6H@6*sf0?)PI%L5NNyAoonb`+}IQMJ7n)^yxWGV?M}W*pgq^jUCDdwzA{=Uc@-8IhQ60$Cf^#DcQxLv>^5=hVEe#%;>bsNObM*2x6Ng$7A zb7|&RZhLw2hK8v@5%5{MrC{RxlupsvG4{w$@eVVvkk{OdpLswk_*>L#KB0`S&A4al z+KqN=>jq;j{BGrpB97|(^q%ceM^=~Dko*5p?fv}lD9(DYd&GR&QpAnkCi9o1|F6e`~`06p)D4ewJ@mrTzRw)6y>5U6%0dg z@JIYu4;t}-t-sLuT=fh4;+?tw_W$m0gypTC}U-H?QqDP0#^WwfcfiO8=WPR3*hd1)UH zCFH_04m_Y=KKDQf9l_CZikNS(*ooH>w3Nt#3be=2&1*oRt?e*4$u51;NfJ^Q?Z`c_ zt{!(}FwT$(3d>mbYM`Ewx3_;55_u(zbl_M;bvj+P>|_%<>{%U8!&KoD$Y1nU36)(O zkRV#Hoh&sz^z027_-BVR`X!(1*|f?(;MEmbK<1Ph@11Zf8IcA~PBbkquO!|)9>06}-CIzh{Yt2lt6gvc+oNmLSKr*S^HxX7NgxZ^QhwO6&exI= zLx(@)j1E|s<-lI@RcCXw#h`8*YJ(PSij4ucNlom@&%)dKN`?xa?NHWBhLroUkZij0 zXx@>3(5Qm<1FwRqeXpHv#E{oGC*CYJ>PvgH&+2~1Ekf>6|iy-nJ)rL2qPT z`dQ95=qkPNOj*&ier~+CKKWyHfp15Id;}lgfFNB-v;T^nBMhgob|dzIu_!rdXgmNN z)R2<=!NK}dHVAl=Iua({TfrPYnL!4(W}1D4wM-hS-I+0nH9 z>aTygeeTzOqs?u-)Yf15SiAPYSKH#+3RGNbn=2RX(Hp7(}c7e7PC|4zHUvDjAUk8%vRM~R8s+(mr?0LnzR(c0pf^zE`G-sX^>k%!0_WyxQk zfT3tlC!*)Sq-TY$@0sv3P5yb)`Dc315;}!<%3HxIdGXo!l5J1y8FFdp#b?ij57(Ja z^nBE-tb4T`8Nkfsi7T;Q<)!V`#~EQBd?Od#5%!H&adF~k=zZ)Z@UsvgY_8ehfeegK z2t>}67wsU~$4Lk!yOt|?DJQ_RZ>3-47W-Lb9_d^Ktv$U&n_yleO7E=0ma2aqJD!C( z<9)*=`Dg%ne9N!-9rDR)n_s)#mbP91#1=`&tfwqZSF(63E~ooDZEyQYJ3Tnc@xRj8 zBRwY!uB_9+sc)>LUm`7U66O_z-d1qazqCKq4|ZWOdMdiK*ZxY{c^c0F{qwe;6`+rj zKW+fV%YiWZ?zew!pS zL>I7?S7k@p(vJXt_1SQ7dV;@jkap^mxFB`h=^1QOLmi(p*C*QW;hj#d>8*7`lc*6jqBx*`w7eQkBKUB7-k@arvg`=q|m2V56|eS<(e4h|0U zR;n`XsR1kaoQi{Q2Fedk+hAEwtJ0q8hn;%_Hn{kfu$md%kssfHly@2r->%S(x{<*JNc(puS8TJN z^l-7$kUjwbF1nhoekgzP({XWV4*y)lUR_zIO(*T>82_F6wfWjE@sf5fzQ>kPmyFPi zj{YUQKnJyV_6O4W(W3|L`qow^p?3GSu@}h1;&M!~8#G+ZlwJq;0>_MD_K%gnx%T1* zK7fsQuHAX=1MTf^e6#)fr$5#H$v^pJDh;B^aFZhXP)C2$JfijX46rrQK>!y#|IKx;Ap>PpwENp3$tV?+ohQT=$ z>n$BdUTIHG7gE1WBcF0DO#MOIZ$5U2c93po6>Le-s3Uu?7hUf^@m5oPc^w*^v_FH_; zeBlP1G(xNJduwCTUbuO?-P*j>HnDjgDeTE9&a3!oE>F&r=YIAo^9aaFCMvB1WX?#d zBe)6hTG{eyjQ)c7iqm*wTKbzJ9mH2U$SeJe*q9~cpEmnhfOqe|-QIlf>uqs$zWt5A z{a4$c`OANj-j9kGPLO8+`(__Jb42Inx!TNLcAA>{)gMotuGVdE+^C$HFF5Jl!Zbi7vzMYzhUvyrf1sZH}i-iSeBMrzZ#PK5B>CkJ~-;^x*Mh`s&lR z{pd;C#lKZYg(p9p7f`R#4;_|oravKM)Ajx8&3*M3ots~U^vEPqb9vnh@D9J7Al9 znLx6UkPTf1ZK88ukw1d)+}NN!byHCmEu}4XLlrllwCS8FoF>lqS(jz$`}NxhmYXbj z%(!^sdjTNh&G?VNfJXpJKQPK?Zd&u$ZKT08@fvh6k5_G8((_3KY<#D$Z8d#78!Vrf z6%1qir(98qZ?LBQiPr|(MxQHxOugw)H`hMi_Sry>n5$*W#&r5W6ZF#(M*ZV?G_;4{ z4GvZ3mC)sV1n)x|+LiYAv|3E8kVWZ{20v0p-GG)!H!t+m-*Cfp(C6OlZulB?sDqve z99>lZ$T(zREPQ97UcUnPMQiPwfJlqqRzA7?r|=cs$nV9Vp8-DUi2^VoiqdKrE4 z$WoUlT9!aw6b2-AG!H(D=ngG1q= zUC{rtMdqs$jOTOO{#`4fB1e6?%i*X=$GAe zt4ul{G4w|}Za=XSUmp5!@7#)G6nS+qJ$^)?H>%_=a?NA9i9O^Mo;J`eOgzy*_ zcbl0#rpPZ1EKzGe|gzLz(2^Ne3lY z9G5s)PA)k?<<&HW!NCtd_^u=9_bA-uEY8{O$3J(1EAXj-YZj=f(p}UZA?!|3g)fW) zc9_JoITI`VL;z2GgbhrcFhea^iI+lH@eZ7$^oWG*cc7B!C*J%rLONSl;3+WDPF!s##>93=SC8OV6f>p4vsuJXG&65xeHN1zz{!qn$u)92tnn?J91n^eai{Sq1BwM4l8gD}_R>oqZJW=%7H4v9VWpj7 ztPc*U57EkQ+o>%?hWYD2LYt(4zi?0i-Iv$vSk_ET5%pmEQM>nKAJ~();S0;_ZDnJt zEiRFd_V^aWDecjwZ9m>=_ZaZ)>>bgTqtJJKbGfb2KIKjAweOKXPChHImOI=(f*#s9 zXomc1m@Vs0N*5kH1t2yG2-%$s3}st7`*R@%MI}y02U^pPMeZ4X0aL!y0e6}3XZyk@ ze%eUy<$3YW;C;^^{47X=m&S7zDs!kA{H#K_v@1W;OD61qyiZ4yXJx~5aU+Ub+w9wV z+Jv#gTetxeYR}XOF1vl3W&3^19sI@*Abk6s6rJ?8+aRE z|EgW0oBO1VK2T>8T%nuf%#R2`fC2w(ciHUfPo*X0hJzRWNo(6op3sClE4f6AP?+Q{ zX*?xsP8f8*C$yZ(W;^`Vcn&F4SR_K?9t^n7t`vmKxMrtS$kJ#An8%I~%x|KT5L z>*)B=!EXAqWt`7BHxOd?98@^Dl5f3ppSP94%l1jr8EqY?c(45WQ~%QbYDZ3R31^_A zeSQj-tfW7Vtd1XHCcDD!Iyj!Kzdw1OOLWG&0VC3uAEgtQQ*k+Xm zNryi6e>_L7#5Yxwk+}4gUWFruxsj%oH?XzCc`yKHIJ9r=aS9ata zdu*O8Xe=(%rA^xTWV5YqzS5Siy~6nU2G~uIUnof4d3}h(<952sfb-Ggw)glkKDTd7 zd%DareNjyi`%U15r6rz$Z*>Ims!fl!Sef(Z8^CAi1$w0)yNqruxk+rP9Wb~(q`%$v zO?}`WLG#DEhwZ()58G!x`}y_{|It5gzx+>srTyk-exrTmjjy(M-g&F-?>}lMM+};$ z47N`FjvzfPjEL`BY|Er%XpK$sX!pEzc(jLqz+jrP{_+_E--aZczVr(1SYMfM*Ouqo z#wz#x7Lfl12Bdyh^7vpoZ@VAv?NH@m^M&@c2$Cw!$eQV!Mj-*Y=BW+5b7>~$% ze4u4)+9Ev3Kz7x|U5{sm*TTd%x`5Bx8oxtnS&yT1F;lupi;cBad$Cm6?jbN$V{>0LRzDGFae z38*T(0I6kq)S9RC%SvW=t`sBjXZg$fFaGMP8?`s2Ns z&*vL^pK~*d60CFfZ|}WKtXL7TV#TssS)HDuBMsfNE`>Z!j@!;rmX!6hU8xtnfWRHl z%A((Q&p@zs6{oDH6AV1dOUBrNPQs%fhMFP1+7($S4cPgmr7TL>NB%rz$_XU<4-Uq` z3E*RcDnr<8Iy=j)lcuTW5V*qW9ophg7&4R{L+C5z zix|yal#qHVi-s*9ba4`+V@M<70Yjb6EjV}r0I$)n>M`wV<8ue??t z`i8HE*Gu*J&wsxD)xZ3e`lWyV%lTHiCkZVsxtr?ce6H*ZZ{DphzV#NgJ+JlUt2OV= zW!kq>Pr2ATs-xZgJ^=uGVHugouod*So~F)FE+_j@IFVa<*gAlS@GUgOv&9{nLH8^q zBW)IFk;XV#Oh>qO!6Ehf82xbq5Y6`)>c#r%LVf#pek64H_22rq(M$m9Y# zD2RkN$dL7#_9rGIeh~m*XX3`X?O;FeIr{$4I;jj;=cbW)>$y;N@tPg$Qef55>7hZb4p1pmuZr!+w{>L^%zFkOj2E=1B^I`5NRh@`HO-fQdF@z8Md z(PsU^Fa5JR+BrqGzZJOfXZ>2*|MSzCI^5j_H^-~sdD5$2S{8yrXyToVzvvcdrwv01 z8uehC2*dMuAFp=m(U#GMo$=4{*?XeA*zYg!Y_0Mo0uhGGXr`?}5 z|Ivv@KRk8)8Qh?|zU=z5rnesy+~0dWjR#l2(pgw+tZ#A zM}s`H81zrzJ0>H4X|tp2S}wcovMl1wHRY*H+A!l7ju%cGs4EQ%yd)p4Cw!&JWX9^O z_e4#doKh#|t(|S+I0>n|+s>QRMV8t_`D|3|f-X)Ey&C zp6TDjMv;d41UJI`Cyr|0_9g8&f7!$ZJa;}?mNfZh=)a^ivhTsgFm+?!-6#|4gxB0G zpQJaQdYLj>j?{0#K)<%#274mTu?22v&cw&DIC-O|Y0b;WZX=^ld>0^B!|%8uf%bmN z-n4}s+>;=F<2*#ymocrb;a$95IQ`o*7!$#}?S$#I3nJ^n<=%YYk$Wy*1ggJxu0Vu$ zy`W$Dw(YQdJf9*n_{-yq6a18CLHI(Uw7Xz`u8tcRXol%!|Ca;!Qs4@3eC`>stcw?QrCkyiFB4se|Na zNNE|3JB~F;zfc_4ycLh$%rdsCUg7~9!zo|O`%v~yOcun8Ahf>FWx=X)5x$9lB@(Xu z7*a|jo>S(yh9MrdZH|Sdk4E%4Y>_GKG*1IK!f%GY()&O7we|XI@5`n-`&g?hD>aY( zlHLE>BIKQubO*TpnU%SzM=U`wiB91!eA9<#afBz~uPJy9+G!UWNTM1pN#cgMfsB)< z(!hk4n*{kJa^9hdJP}sMzI@LA;1_s41Jk_1GwZnC{P2)5v$XBVA>PI@1*L6}@*+_c zQ2bS{M2PJgJ#Q0YnU$%wJ>f8M;Td(o@fG{L!qRRLwq@@PRC()v9+eeg1XaV3K^H?M z9`x-6Bm?aSx&#>kc*oCi&I>1rxT&rbv3z=1AEQgpE-<+HARil^ax=YlunF2nd) z%b@o}=ijj5y@3lQf$H&g=!1N!8~jfv(kIeJwh!)!dg@e9<`TcyO{83OWHL44y}U%yhb$o(9Bs`a(ioZ@`tDz-W@eY|s!aW4BK;&0j0&lVDS z+nymB@M`40KiaZ9QFi;c(nQ{*Qgty&Yeb@S;iLc5U5;b+q`zsPP}(l#Si0hEdNq|W z$hS{zhj+Fa{Mi=I&|dcZuxZ7Ch)+4gq6Nd54K1hK1u2=@=zE8#@Kl525$DcR5-J{Ao-^g7hv^ zLU_-`yA5Y1{E3{2GXA}}1EUG5qU|gBgg8_-m?w?A%sc21!TcI83Ep@rSTQE_4V)%G z8z0$UBHk4ySJ63ATsEa`ej;T;X+@^_;^%Dnv z;7L%S0W?h-071rYggc}W1wmL&ZkhN99gH~xt$gmvtJHMf;G@6z5Yf1DI;$&%GVoaf zj(K+e<|!Z)1|XVF11{1i!Gw#8aE<$wTp9sg(?%SV{vwTV#2D8|G;D8AJ;xDljLGD7p+z?-Q7dQ;`v5u%+;zby;tAEm(F0?ld zJPqFm8Uep$!+e^?%1j@@E&r(#0Lq{*9h-fRxTh|xdbA`R0SD)f{EjRN++!9sE*6)U z>e>|)^z3w2(}|a|a6|_KeO-s7$>frs89+{+dH#L1dh^vB^*1@cP{-#QDhC;mbJD;; zr5N-Ui#m{<6xk(ooC&QP_*~;69+Kdc5oD^BCjZT=cHEJvHV~!9C^NcFed!vei!Dai z{BM5upoqHEJl>nubUgrWP>cecI#3+O#>sFSQRCR>9c4u~Hb~|d81Zg=y`q~bsyK)j zZ}H3SLkFB_$Su1Af~M>ArfL13H$3%k9o6}c_sH~6-gaX-8%Bp2Keycb>UiTdMB{l( z(+S4#rjgI$IHm(*>Ywr;?V84hH@$VHQ$CV@phy0=>zB4sKtr2|hmpE2YX&U zzx+4ML>=hVZ`^qwXfl>5`Qc(%QE$|Z9-HotFzd~@W7b&eseI?3xGos7COWut0{!M! zR%?ci&cf1qO`})r5X~*EWH;R2$wX~zAJjkmr@vT_?>?xB8?V(fuYag+zW7?5%q`X~ z;ybstLf!8H)a%!7#%R6wg?n|3uKU1iuh+@(NzKnqWg)=yq?2@< zdzq>iA$I*?P)CJmw zxtYmYSy?KNqIdA$-QG?}J8*92$vD!{=EC~m_vzi?dF0OaQ`!|SCpA1BXpyqaFU-`u zN3cV)Y4qkycJS@g>ESMjAC}+BpE$+7K>inIrt0b{_QlF_U0vtm{>;s5_1rTz>gAW8 zt#A48H`GVJ>)Y!`KmM`$@gM$p{rMmHWc|n|f3QCB$v;7U(d_wu!Ut zs^iT%+j~@}dyng6|9+kCKdPgR`*j9g&i8lgbbGT-4|eNld#jH3c54rw9PVz{!R~JD zZf({c&-;6Ob%G6Kxes{u+xrfZIBDx-zr35DpUV+=(#gRWkE^zh%*;Ar>m)4*!5G_E z)12o>S7mD=JHd|*)O~GZFD&&gbQfgmXXjZP%*k_4KC#-{>2|TCI#ju6o!vSi6OB0g zT%(*MdoSLt2WO{ax=a0Kn6^ja4>0U^#2!%o6)dYe_0&&bIN_{Ur&o5s57~C%fycT! zxuxv7Kp=LcyGF6^w2z#e%BN!ZlMX#7>|phDXephy*3c$T5o#jGHzXP}-o@yX>~Jyy%$>T5va>_Oq(P?^xQu zlSe+EVE@XS3=pKQs>j64e4T`DzjN#!7bVOu`mOr@2DlTfo{TU*+uzgG_7%^zzvPoT z7k*-kM*o6`Hh1(G@r9v^vVUqBY>UlL{KCuVRr!a`AzYfKA8DE0!42K@1HE|Jy6Q4$ zKwipG+Z@d!{k&%|EQ|8w5Ao853uMr*;I5wb&k+&J5I%rJ?1%7uIPF0@$a1uB0#n)K z9zWlS%R5CPif}<4JajHDa>COS6kI@K8AlwIGv&edWpi`4wy58p67lMbuhfkfp2L5p zKKloMRR8Rs{ZjqGXFpe0u3XC?*80^e^~Q(ZPrqWnzWAj#>+z!}wFu9y626RnR*p{( zk7%dimkX!qi!6fNRqFX7GB7J|up2-ZWGKJ#5IVFR*-}_GYAa!s!!d~Pxk(V4in|`( z9j}Nq@S-iK%pF0)-SdN*nOmrpD{ImFA9&;SdV#Y2;B_MwSo>@zq(#?3+Nx{ zaWb587W#;zeG>9Yo}u?i7RY2#OL!5IvXMTE(3LUpk&b~8 zneeE8Pt{RZxodTnw(B@Y`=>9PK}z%sWpdF`7Uh7K`MLq*_a_fEa@ z_Px6M)|**m;%@mX>uYuM#*KP~cIcHCU#jO{yi>Ps-b7#YNd!)I+xJ;O*UZw^&Cj9B zEDyH13%8_`?Q9<-jow1P81C4R^iN%b`U}{*E);YSG!vHMZG8ep#@j+?+Yb8xo)%(X z`Z#o&!KPiLO?DyE;n6`o*z%Kk4{Kv{3%N(O5e?JK5=R~=!|v8{$DD1MZM$O9V^P*g zctt*K6Iq7l{b;L6KXFlpErT-cLN0Z~+^{&s4M%>`LH*%m?b;f2y!9;R#8LhFZ~dqG zz2EsS_1z!)&YGkR*~f;Oq5SGAYlnOv$2ZJJe9bdBTwINZa^x73xV+1errmgdx3eDP z4h*m4ooDlW>aIe5+I5bw*}SHkDdn*J-r zdyoI0Q2w6iBVM9*`MVF&{LPqJJpEVlY|D@cWXqG-9KVXKj$F;nE~dTl+4SSi{h8SE z%F3WW)FsL-GR;ftsK{mG<=yzov2eVbrtPT42knC&FoC07B#j}-Lx1iUV|?i5w2k7W zz9VMv0SY#Q?p} zo*TVOXS#&9T$;AaAglmN`1mZ&NZ;Vs@!#z_Kb;V=dpyqr9zG9gI$rbaz3#m}+D=c$ zGij4R`$Bx%-U!)(K7q<85sjPp213U%-`r_KlbP&;jM`cHj|;GzV;v)qRr`4fCx6B_ z27CZV<3tq90v?#9o7l`ZJdw6Ux}*m(nRZVe6JJb?l18rKNiQrI%}$KG!?+U%38)$u3o=J8$W|>;^{bT zKQ-o*AN(vk|8W;}TjVH9%6jq6gAwzuuWo36Bzkx>#Ji{u4KFT_(tUi^5pR6YSSm@A zm+{PqfYi$|Ny828;$fPmv2Lux;t@Y^YWqXkXni0I(Th&!PrAm2;TEP=pk)Sx}Mlaw(xE9 zLmzEdU}@KBpZkd?<6uq_Hc`vV%dxGuwl@ObP>suAyGD-%AM*k~!cAez5f^X` zQ)gPQJm(iDgK&WhP9yHxB)SfsxI6V@AAj~!AaTcvBM};@W@=C5(+MMu@H->@`h7gU zNpvM*I;|*01tUjD1o`&oC21lc#xWgmF9x7a$vf|WCEo}ajk5|ng8?K?nAe4eO zi2$1)o)Xh^gobe5LjaPQULrUEXZomTqNL+V5<6Zq(+(E6tHXF0g(yvTGkw} z?5s?m()a?o?CN!3&op)v&5w(?(l`UB^pyHITLp0tQ1jKGBEFc){3v1!Q9HFVE#HeG zU8It-xe_%K1Hv}1bcWsG5(t#F{fU)4A~xosqDW4nD!dNQs(M$UgIHfi;F3{1jDA1{ zFH`WGGFTSM%S-QOw$nxqlvCU-hn;2<FpZrjHLLcw?nqT1ZApYFD5_1QQe)p{_ z!390=?}{`4z{QTad?nm`^pGF&%A$1#wSg<^EyI7J3mpcRz{#Ll=a%`N-cTZDza#&L*5;u$fKm~givSQ{jXx*+}xmiM;VB) zp`4w^XaPqjMrLOg>ge#iu3vkhuD$R|R-`VeP>gW6T=0%({@N}nz z-}65`1!s&N2Tmz%Ez$_+Hn;Zb$;M^|!zL!@YWwgQ92N*)1-F%&nOg*gMRnJN$IVXG z=Js|yezH*jpFm*0+XtlG+R6e0^K>xRU7O`GQN(uw&^o3ZyYnNfFnM-|jOBL#Q==-{ z3|!S!&2y+oy#59vX`o4VV<|7uX?8rJKl*HPs&`b5oe+5|&n#nJxP*7<^t*KKKqf`J zlefA-!N4zu4bQC;g=^T;u<&yN4KMx2>eNxXA+pZ4@FN@IAN}Ed7#vQg(|X--FW?N% zrq6(-%YpfIfH5F>mWCR)(f<-J6{K;!%p1McUU9v1$a2^&wX9|_Skju#$YU9$HxTh^ z-QzXp(Q*0+e)&aONMW0j$@0V<(vrkBEA^YS4MQA!8_G4nZDT?l<(sf=a+Q!y=YU!| z5dcmZ?SKN;iS&G1517)q;{#Dth0KHTx}DRL{GrX{H2G5BJ1##WHDR2Q*8_kI6d7)w zgrQrMQE|2NF5RA%jn1#z%)E;$ST#gp38!Ht3lYZ&BW?9?1!kUwp)Bw$6c-E%hVUHS zY(0Ue4y=1>M`R26?R3Bln8~IkX-+Bo3A8&_2Q95pcRl)_`aZv03(M=ZuzrpDJ5`_i z<1f_D|HGfF|KDF6x}knl<9^96avdtAQ+uQ=~q;e}5_F8LYp8R|m(wE?xk z#Lv3h0q$y@aKYHbLY+>p)Z+RpHM{a0bXg(-a(f6)M^NgZ>TqkP4mLJwe`6~H-0rrs z9nLrZ>EzoH%Ah_(wQSZ?Cnuz_G8LUE0>t+l<97OOo6JZ2jzEeSwRNzcg>L(_RiF9& z&(<&g;xEHbpq4PC3baT<(IFDxUr_9i2j3Ox*ng zKM~YN=k4z9XX5(gz+H`Y0#9oJJ9TwsuC6T4*Yd(lEzEjE95x9!9vwQ#y;D0|59)Y# z0~^Pd86D~ldY-9{wK6wdYfH2B?Dh3}|I5$UN519j>bt+=+v;O~?mO#8fB2L2!=L#x?0JGW}}`jwhoyWaIdJIKi{2Y60sW6$;}3;CQL)XAZ{ z_FdrUu4E^Q(KSvWrHw<5PCc#Qkh_!B&|yY<$)nkO@uNqsJMpRBTB0MHlYrn=CYHJT zS)i$fK7QQ^EGLwsM}h62!%V(Et-f;y>Nd2L{zrj(gr2eE>LjrASKs%V~U38bTgA(F21%GYN((fhAPh0e2 z(5R_QKF)Gkk@vBC&D$M!eh*vPxcEmulS^lSWFT0Z$M#X1U)h5Q;KxN>*LliE530j! z=yXXd3kg4ldnOM_5;m(LY)dFu+b$h13kxCx^4j*yexk*;Ppa)eJn}g?R;rLM`=;P0 zj%^mUAupR3>fgFme6*RR9> z*}8W7R((;&fE2w&wQ5t#pC3=?&1LY5DsP?(Pub8pSoD7(0zyK!8Q60R|sFk zo|vQUKh=(jEg;@aTibS7n*Oi!v`?Z?>1o?Y_=1=R|%6k3yPya;y^MCov^)G+rKi4yNuGF<#S8IVjli##IJa7TGay;8IAY4yl z10maPC&8T1$-;5`GQh1o!_$@>7nvvzQ_*qO15ba^*}i1APk^Qfc^Ha-YuzJHOlLjK zn2CJ=aIxTK(fpMpwy)g>Q|7JPwu?D+fcSn=K-qAic25d4jbi7LPcJBJeHc3g`BKEx zVcMXl$gTCp(;xh7iF(dPPbUo{ky4dyvc?Rwx?YcCu{+&rHDBojYl29G2_&^)z z6WdVzl6T~cFnMg*T!?T$`6khi%gc-TM3=H-b;zO)Xn$?>N^PayTJQGjH-7CmYV+}K zeb;wgOj&N!9YIqYz%+#^>P* zT+1<(Md$cqzw4g56Rf9YpFcStnjtZCYKiID=yU{?XP;}?2P(l-J$(ZF8thzdfF^(bIi#}08gkhzt}a# zAxhf%oO&aE%DVkh@f+e1$F@nm;ew^n-^;Q@oUn_n8wnFk_!MwPA+Gw87d)dADXVol zyZHf-Rso$FoN=SQ@U$~O^Xf3$R_&?QOVUg|VcA>&-zTn!G5GN0XDwr^qVJ+-r8DKs zLiM24@l!6_I`u4i#&YL$nNZNUO*6ARMehW6>#c2!5abbQn?9f=eY8vMx01FOSR)I7 z3k=6915djx%(^Ic?9JF3`k7vxH}K6r`J#H|>8RDm`Jok9E&(dGl`B)JDbWg** zEJq5^7d*7Y{oW*u4M=2BG}j~?ZqelzfQ+Y24d0|CydP4K=Vg3?+FzQ5{(4Xvo{eW4 zl(vF@u8-KdDOq3155qBs~Z2WTd2?Y&P_|`3-!ei@3jO3~b0%o-rx%uL?dpws`Lz$$mFv&cn{U5a%j;KbZegLGJlU!%>sMm? zdYanGm38!n<89jT>aoUaJZ&tCc+5ftG~^XJgx|i3u{axUJg&U!tjnD=Nb8>9AeVImsP&QCb_E}gym?;{UAR>b3|YX+XSOk3xSJlJJ3j%@m?N1sTG5k`O_zRBbv_Hm6xrvr=A3B|SFL3Z{9n@DIalL`9 z;kAE&@NYVA)|tcvM9YDHaq7zx>!0OPM>{`s7~6PdWd-}R+eXv5(bLO`b5C$qHn+Am zYjfvOnh;U8sN-a{^Q~5!gS~xh>AlFJpJ&iE_p^TX&HdDyry%x8HsF>bw2TO2TELXU zzG=!}1mz_W?I);%>^~%7{-l+3N*l&s=D-QniTlN39LFu8aqDt*&!CSeh-C>h`9(IQ zqW)>m(vI4AnUDHIpgtq}s+`RChIfs>amAu}qDT<$;=*I#)tDyy$T;@JBu*X1+xUZ)avFYZiq&=yNU;x6p)#!(ByS1C|MJf#$yjE`w1{&{Fp9 z@LFYC@oIEt(8P3r6IPDol&iGMEqvBd1nwCbu3wlI2 z<&Wv}ZdzVFYR&=@)~hhZ#ze#e7jqL2)Ax~`txWrwFyf~SmNouK8QelQ9t>|}2by_B zNSjAVF-(?-NApZnh=HsKHx**U}3$>Jq2fc zzKoMZT*_GIdl4qUF+UW|1(=IWpL)h6odWh$`rtK24}y*7D6{f5G8O7mkJ5=({^4oz zmVRQ#4{`PBNyn}?qq0cnhj;6f{CAMooelEJfo>aYcQNz?zeYAL@;gzht82(`FMQEC zS>X3<{Zzn`9eL`B1H%r!OiYuWdh3AMojY&TwHLn%9MB0UI|6732s?G)`v4rIg*YM` zv>hI|sbO$}?CtCyVQNq+=w{G_pw?%i5E&A32; zXGmh6(lDK)CdI%%>!18kM%u_uee?<39KgxI6o1p@agVZ!GRqsnlio58u=?E$GbP7o5nyQX2&s1I*UWn4%a1o z^YtBtsrpIbZV|v)~?q4+Nxg=sj0cenw(p%yAL1Mul(w7)vtZ}H)>*jrDk4urS81^!8$}{ zF;VM)PSgo74s9pW?!t%c7(t$Cm#n|D}? zbI_&<5WM|7P>L!Y{IRS)4{?ZvkDMMNbkR@-Tb{{>dqwT%eoJ+{96MqO&^-;X+-yx9`fB9I?B+vRG@YOTevK zTU*XRybD%W*A~g2w&BbU?`iGtZ_ydsqOAMiLR;yi!BJ0AxQlRcc{Y>%ned;V1~Xdg z!`(V2jrU0hcTSIMd2Xs!i1W<#^}2awwQgKpspoFps8?TjuD<1)zNUWQdp}w~{E6?c zzxY!>Q9u6UpR7-O;s@&6zxA8zt3LR8z53#_wc^C>@nLN|dRQmN2ek7$G!nY+C zz_y9d%I4h8zjj%4Qzk%=X_ScH?6%_{WPsw}dQO@kkvMA~+HN%Npa>qKY`8mJlsXsD zxo|*RwI`&Ni_0ImE)*GUA9?J~ zS=)_c>2Q1$I$M{mna0si{p=*C_*p8&Z+5|jB7V2f&mrtZZj~L&XFDyP$H;c`ha1?s zKrsD5+eYr$p-lmVZ=pM--mSXhzrDR#Tbnzn*S5b-pl{H(e6sPV9zT9mkFZHyRI>5tF?b%=B6PlW^ICoIeQ(rP zz5o67(sR$&D(#J92Xpjgts|Ze;CJp%{YE~;X?l2a4)qCI&~>iEBNvAqbbUiV^v=Yt zBNE4P6!CU*%N+KJlleKl3L@uJ5ZeaGKfkz`1x@M>PvP3!*{CgW-#>BSR^2o~IS+tK zI~zfTw$wW(-epfVz{ofMz(J0wL&9tDurBi-8CRy%C37?2L;h#LI6^+N3lL#ary`uS zyT3=F&uV3TnYwu^I_`IV?=$r~zwul3ec%7xpgx}yhip^MfJcPr3hSu##B!_uf-cA@ zE9xmd950`5&oYRZ*=d$@4|IC|8)1?$FDUYmz@d2o-V&ppQ2sIm52DrP+tvJ z#<(NZ(49Ie4CTxy)D7z;0)ZSNr)eW>v#^H?@nud6WM;xV!A2fQAc9eq|6q?D3{MIH|pTFXQF#apME-x zMEZ+dNs5;dW^!c;Lu{|)8?60Fg!A? z1Q58{M34At`$L{U@l@IgYysaolF~~{t;{7Yd<`8te5||P^E}XYEO(yKg%{%@Nd_!o z+h5j06bCoL(~U6o8lNwn&k4A^27H}_w%GDJlT|SikPVB=6L|3ogq`h9QhmRiBy?ghGGFi{vd0r$#(@E?YH?=7f3k&nHT@AC% zmmZn>%GkAta#4P17F!HtQny7^Pht8zI7?z0KS>n2Ne<&(#4`S6igDcM%bfE*dduhJ zK|Gyx)%(+RJ@}0$^PTHe}P65tI#XVV4o4W@FZT^_jHPhnzAu#yz7IAogiCgqxe26#btPYc|UdBQ$(QnOrLXw9r2;fqv;E{R&nKL#Y zINZgL=N?D3jN$@}A$)W$4%3B~UEF?8JRhLPK!-T<2&hXKmdBRPdN*FqXBsC9Qh4^% zD^a#T>70=Z>=&?QU7)?a+hZJ#Vc0icps!+<8yg$+(RR~LsiVCdgZbw=7B?C)m5 zvZrDmxgZ>acX@dw?f2f!4z{lgWaS(7IQ{xtH*Q9LT_|9=T32*^ja&!G){TbPH*|cv z`!)E3!uS~yX%f;%5B|D;OJ4lh7IRA;XRU5AC!SLO{YhrE_I zL&&QmCsGtK3<}}+=TaS~d}e}Vs!aRy0TfI)DV>>MWT&u%>4yL$F3;gL`pAU@>SnuC z`4-p-FA9VMT3k`!GG)ly!sy};(R19S3ZB3)VxW&VqUT54h0#g<;qpcrC#dcBx_Ujk z|53mm&6!>MGnfYs5+;r-hb5**7Mfm+^><|lD02xvtJ!!~s68=2=JetK2|q9y(kLid zfn}!%{|xL#Kuk_tM^?=tg}^g>XtZ0XMPCmDG6m}^asZqbVB!qIsf#cu@I*eXrZEo5G`k^5s{kilIKYS?A_MO3%0Pnt;1@(~ zXjA5>7ct}-?#2_LU^~Vaj%6TC;#tACB~F)L7kUtXxV5uENo332_e6IhO?nvzr_q^2 zh9!@7{V_a(=szZ6D6BnKkjy=XdZDZ zEWbaf;l8`On_YlwSC(_s!t> z%pFCNIt?-L508&BNZ~h{d!Sw8%E4_Fx5j~nEi|TX%a1HvqFk+uYz!?el$eeZ4BvWU z$*bR`Lu7-p2s=#S!Td+QqdCG~O@>3nx$ZG?Y$AL9%2y)gJ6{6@n`91U;(Ot=VzcfbyrM%}4zywASxk{_r2V1UK7TT=W4rj;>`y{QG^BiQu5@n0E(tTwYQ> z^(Q=3Yjv8g2(D;t`7u@Jsf>(FNLlnFPV}}$6jdaP*@QR9y7v<*g(r*%$7AVIR&<3Y zfE;83Y@!a1U7R#iQ`E~z>RwGR)b!#~Ew4~lm#^0C8_(2d{@}Cqvp@Uu_3*8Cs@AU8 zjW^z?m7CAj>GWKkPR-X5x@CH4xsJi-eAY5Kk=lci^K;AjF0=#LPK>_!xi{eVCEU$xyHJiY={MA?)HTbi8`4Uxq#x3T zCp-)PDR`!Bf3zfY^!?u2C=i6D3(nVfiq|v;7lOgGU((Y=Fm4kCYQ85`J;2 zfpCq)dm^hBe9fit-TB7pr<&wJyHN=7jN?w+gmvP0jh)dLx;)rS^N)VD{zKWZK>CS` zf8LWPF$0sZ(2RfKSw3wKVJ4HlcTDoZ9NJ zP4&qi_`dqlPyS$i@)O@*-~Bz`Rp0!PkJQU=yi(VmyH!ie(4|}~cbLUDhc21`Cr=@m znd?dL_4S3e(P`T!SC%M)vWQ(9UE*$kC*J)$5q{dV8MK9N+#?4NDw8JQs4cG8I=Sk^ zkG6>VDms+(%7u#@?Tjv9KPU|Qd)u_@?vk1;cf~qD>VTxH)eq@}DlztDtiQH3InM6% zxOTTGkEbR;)10&rJqmx(i>7zix%r#xv5Og@rM86=o$_5-Z~!T%4LE@^OyoI8&aNav+pYpq?ZdVypk_;(XRD)Z0YHztCV-kqkPBS<2ffc#Ac_M!GpU> zMOxxdEO3-x@JOU=!v(^B^RVq2#X??;eLCUEZ*2oV^Pn7%E#bWoHF9JmGHe)~{%L1^ zk=a4k{GNF11Jc?jmR3&Araw9GTqhrb6ZtDw(#R7gJZ3UwG7K6bxv4Mm=o1lyQp?4G41`a;mqMFI9xETT=sgAP#S$O#6U3LK^rzQAPblS9;o$<#Qs+iRgS^U;U3zo0kPr zAGyfJ{DkYEYs<6?4QOK=Y@{69o+jrmCkk%fp6ZhYW$GPd`c&Ql$7A&UB;?-CW^Fur z0+HDbul}kJ)y?bIYUA-kWR1E&8|LJd_2dj%x^TeH11L*5L5;BP z2aV%M>NV?#HO+QMU5zZ_m-YZTZ~uno{ryc z#G}25;{;6HCER!4zmhZDt7YI%k3Zr#;zzoazZcYu_4YoQGJ{^15eYgQa9A`y#SP$ByS59M}EfF z2}k>A>_ZzN_64vUgON7c13oxWkwtT)8Plr=#B=Ot&tUTdit>qvbwvBlvbyLB+evzf z6KN)R4sQHd=d+MaJjDh4N7&|~T=*VTlv6YJE-z>dMjUd&5Leef%QVCv?)(z3-^HmX z*;@y47Ln5Qr;ug@bbB6KDQY@ZwNKn+g3`G1N7svdgL?qkSI4z}DGR+JM}0up zkXQP2DX%tYKn}SRBW)A8;xAl%b%CjT0VjNm{3w%Po4NpmWWuxe1dpkOC0yb3W!jwJ z44(Ekv=I$x{FQC)+K$L;PO_p-yKs8#>b30l_f$%C*n=nc0dZD~%PU!^YJb^vA&c}g zW<0IPh1I0DPv!}8Q6J>#rH-IPUGn5BUOYUQ?wT@K`9-6ks~Eb{=`1P zhyIziea}^47ig=%hwzkeZ7HGI-%^&-k3(+kk43jA48rCn7Lf_Udu++zAQ0G%;DEmg zbl@INCrlS8xi>!=Yy08vqURXjI{-r}A^D7-sgg`MkxkyEpZ6hY=V1~f_T`_hY0VwL zX^(hmxW0*teN60X7r;?o>!Ed1yAR4B&-ST1j#b6avrW^7Sa$O$vbeRiosBAop2SG| zB2sYTLY9^m7t^ol&%sV)*T1Jtx}jorVIh5(P5RD`p;*|t`Kh{b<3>Gu`%dh47sslH zg%=819_V11%_}lzJ0PEM)-mnW@RStxSz2dU?tu=eE1?FY6lZjZ`js@}czELa*I<+0 z$^U8M$A1V5zsw2*gOTwpKjG0k!POjEU**}mIPshdd~2%lg(JClO!F`U`P7%rvZnns zZsd0I?$nQb{JER};I8~+mO#XY0!hCnA1=f($T<3ODU<_|M}=ht42t8OOT$N)?cc^j zzI}|~oYN?W;N};k+*ylN0$ksFfgpK8prmn-DGit~fT;oyj(CMWUHFdCaeT2ti*hQp z6@r>ze=w8RRCovTW01jQ4Jtbm?&=FgG-}9)+!0<;bf=Aj+Uh?HEpXNt5_i+30|PnC z$Bu0}xIE``0w|g`=Db?LXly!>>foPnjH8eUFdZrUBEbAL?g`AZOpZY{6nNk@ZtzGz zKSv3PB=Mz71ds->g@n926g+t160U8v)SQC8=BE)I9Edkf2Q91o3AenWB`xB_7Y~mZ z$;wMEX`=>;s3^Sfs&j1K<&NAa=&6|8gPRyAYk_A0z2}$b6r!&|yE#xTPFB@+3TQux zW3E^~K6Gc$2u;@kws{fWJ{>+jjc$zpZtqwH679kz+6aYg^W@z;21ODt{lqIynko0H zaOt3*IQcvR5k_|c#zD5`rSFtJ@bOeh1tDKdufLZ&iQLgiwhVU4(P#S2Ra?R@_WRGNA8ZiS}(o&HFbLA z1pf^14ze4{og3~fFfSJyxN_0KW`Sg|Z$@1*RR{RxGyx4;>Z8Z#yU0c(>G%X)0g}6i zr*(*a$lxPn+H?1uETK-F&|FO~EY>WB((K}D&95xi!s8xwz{hfG3-^=eKw@ z3L>);;3b`{^X`u50jh44X*j3{Sx7`Zj$w#XYMm5NcEecYRvu|^d3QHT-Y1|{c+%m* zlV`0@8>R!U^6P2mYwH3hi@E_yy&2_35#ipjy73ZzbZ+YZfM@FMCG6nqeUPKTD!AT>X1tgL2R~^)oPs z`eR;!$JO~Yyr*5lVPgip2AVvbPI!im&rkc0_tu}nmA3jd+#Y1Sq=7Ec!bVyL2O*2B6%uBQ04v zP|$S?eDysY+bZ8Wt-mpf%+F`;=7mQ>1?^)x>edthNPl->+ID&jnkkLrV1o-L9FVZy zPhwC`&3gIQj>E_^&fuyH|k&f$6wbXthLwPS8LBaUo#8qwGXU) z_5JiB`eYtmfF2=t8a5CCH|0>0iVM1AVPTn$lWp`wJ-)Y94tb-eea zmgX^Z&oDkIXWGb!D7XtNu4yrL7=dibd$7lLMWPwt0)V6;6hOur{*CBw$7`Q=JA)I% z56_ZVk5LxdjnDF)-+>nyOd(%CebAxEz|;8|*)##%-q8)f4)_Ec4?ZVUo(hGx=rP`j zluofA#&ozm;};OnJoS4B^TBfT;H)xW`teSvcW6Eiq9HZ8n{5WbNZ<0TOqs*r2Kb|o z<%%wHM}7hYZo0vZ_?UpaVw>uQXVVJa5>FtPcC;sE7NF-!EiOG@bL&q2FH%>a@xd{6 z`b3@3rtCf0$({T<{Jayz=k;XsQ6?I;o;hf%zAMXNZulB%a3w-Xq^;SK;f4}bCf4d$(db_r_ zHfnc!gLc@{f(~S2W|7s^|?j#8FntZM7vrB zQWH9@lQX>Aam_KXJokk}qn9!v8-9}w{ILVO1I;~}-*#1@O*=(TVu<_rk1ixK)8rl23(u~GHj%K>@M=^NnY}{VrUnDoVp+# zS@CcOy>(E3lBgHbW(5Xz4|?6ntQ}8*pguV;u1uId-;L*4okjEuTr*Jsvnevfx25&D zKWin=eAfnYpkb1_DZjD^f_G^#+6>Z8db{W(_5gm?b5DuzIM1DJbOAK*6A=q()1U?E z51mkUK>~T`XIiO|0e3EU_FF~=DHPpNZ7V|=#0PI>+}*M!%)$$4lL9plQfCsT>^D7g zVjcI3L1UchTGmK2IPz(rQ;YdE%h|D63x4&46}NR+qjZ;mP0f z%5&%^av5Ms|JhnCE3AvsqWjD)SawoXn?Zm1VE<8?%EQ^&#q&3@>#-iUtt z{O3PkKl5`xM_AR;>h&zfcEHPEdU*d~2B2=byC?9yo0ki!jXT<}I`6SfQy-Pti7d?MF%#jYc_e4#gsP@KbYYO_ zc#4F&LfDOC%dO9|y5 zHCN9euix`s-&H^M!#`5DZr=`m_H|e3v(DSEM5a>T4~Io%=v&9I{L`)@kbm@B#%I*o z@~fyDE(Y)v1sDDxQsAOKRJL_WK7!>+2T=Il3uWltJ@;` znv>^Fs*2F1iwqA+wdMIMyNt!W%XR#*F1+{pa`*1cFQPn^|I_Kd42H9l{qPm;f0`Td z|I^}+@rL_Y?kv8+eHk6z>;5GC)a9moDsSNg*5Jo;y!!3kZm7$l$#C^JDSTGX@;-@u zrS4QeA+9Eg{3};kREd1M_(@AEll}5dJ;z-;<5GT`4`Swkl9Q;)a{6wxVd^<~j#MFM z%3bR(>rO-+!83m3F{loF)sK6^4Z|aOpc|53{5pN?OcQ{U@DrOnt-~3kj&4$-#EEFO zS=2#cn6_c5o3Y!}A;7nPVw*7b=}qf6j|&z^l(s@$m%2mzaU%=BTKD2ySZS8(TFY;q zS(LzEXvk$-GnOUg5)btQ<&?hKMg;IoblXc+I9_QB(}q|MLGf-}=pOoM{qdfOX&Edc zVu8Lpl_tJLfW!uWleiJl5b+8O^Ju^GObEB1d37qG02u~e@KAb^@6+{v9INd(9d9f% zVEQ)P!$0AZW*pl$>>~c?gpouNgWH&27n6SK^Ak@B z-r@R^kF-vDeS%+yUAPgZ_wWQblk30ri(t;ZB6J7hWlMRLDijzL%A(0ZH zU1%ny&6s?|p=GF(r!nyw+?X;uyrlQl8W|ctITNy*dPg1l~|a z-e(tAXfIE(*XHZ??Psth=IWid-idwn!pnE+`nB8L|D;~o$8iCGH0ec*+8+23H@Igf zM$@ug^UJ%)U$Ap<#%dbiybt#2W!w(Dgf}?D-|H9IYg(m_>U=eS_#$9^FK`&rU%ckG z`AG@-cZ}m5dI@WUofteT`vPrmpUH=2)NCOSe04`V4@8Kcx=&id1LY>~_*-s21*mN1 zbl1pd(^wu8gXXz)pm?RY$)!nY&~(o>_H3;%M0>E|=wU(Up4MKB1` zmD<_gEl+i{ZP(sjT^<%{A04+nO8cI+38dgm(>q9#hwLTi%-_1aRGz^g>1EkG0bX8? z{a){uzvpH89W9pl(Mi0eZZ=(1lE4tAc~U8 z-obeNZ44Do8*6)G6rXo9gP1B6ydf_?5Du_0n1qkPExb&$kd@!ElR0VV^;4Cr(21{% z8ArwCMdUGrjsi$k`=i;~}$>(^x?yE_dQ}C9#v~x8KNvG}*laEgaKJ{wo%;886n84#d@4c(k{@W@)f(_%+{4+SecC+Q7aT3*SZ#S;etW@c*{ zrRy$wJ5x&w3$?nogp%H;{yBk8X;P0TYGY#`nO~`AuRT`_>sJX|qS(jCeh*rnpC8rc zjuY<(s1p#ySk-Wua7Pq)Ibr86;3EogaOx3@hqb&&eyfXSg1pVsAnc=`CTe$YpGIbj zhGd$?ES26)1NhSSl}Sv`FCxSnim0rN1r zVRm6A9UhOErM6~}rw3+-fiduM;6+W?N=hSCqZFW%=(Kb=)otnz$_8$DKQK6igr#OuDVvO@Ny*lofUj}!3X8g{h(~f1e+?VMSmN;fI zDuMGT-^6{+^t;Q_=x$bkx#ft>1X2ws!XFpa06gt^f8r|FsI8bnT^AYVqnVbOw3@ zM9yaxNjp~)=zW|MmClyT11u z>dv)Q^x$UA&!Im~oz!!%(ZW&|@b%B~``2$>ra0RV{nOsy7aB_;FVh(nTsu{}p=}yn z*g%XpZpugCkMJ&nByz)zXTp#N;tMYjks*)JOAONwPTP(^S%%YpkQ;fZm-1kE;KIk0 zx67tWJp9LEkMg0@FcomnMFzC@Z2w|Q669d=Wxnl_p4YMSqEmXtw$OyqOblWx@ZNb| zx|G=H!(~$Qw9OV;kZt0&^R<8c@A$$@*|k}DbTEt##*_<6{ANup*TVAiHGkzLWNU?Z zblS0Vru@$2{z>gV*{Org?Mx2MPtVmn?T|Ex?MX$oUaODX)#{E!^Hi&6;ura}J)2qZ zDEj%DqQm3lkh>T+cl_S)Nxk#-{rX#f`)BI!{BJ*7zx}(vRgX6}a*B<|_b#ul*8IXe zhTn81&+k8cr@r*&7i(+l303xxj2!6mbRX&hvb?e~U)ShlxpUt^ngw_HPo37`-gfOg zdBT0SmYoolcc*?-O~hljM+8+CNJSqJ2ExVKT~M_bU?Fysk6pOZGgHG1Rd zv>rXak1cY)Hl94p@%#(Ri{J}S(G%|SSAV%^$Q`p;m2X=y$~rK!TSuGcxF?XZLl?qp zmt;|{IznAF3|5^S9`(2DHlZ7F=jhnl6xz|XZaL}eBw|_axyEEQKZSh zCvxcm5EnG1KJu5H;^v$7nKB?pP9mEq?H=)Lf7MUgWlp*PE$yER2do?UJQ3dVK8th} zUB_w3H-n?@wiGh@PrAB8v=?OIK-AjQ`YcIPp1gCnj5|BqvFn{gb9b#1x@Vr!g6yOX zp>B-!g>(qMZ=l&FM5zqDi23}wQj zw=KK0iA`?(CsYoP9eWp*`iUt|qf;gvB(N=Vm%K-xyD-4x7{$}UvDT6H3&700a-CJ5 zw%~$G<-@^!7f*PamB$DpXS{o&i}BkQ@ni_$ob@hYa2^+cDrehU+m!#1dMSM6}z8Nw!Pi=*5_J67e_wUyNbiQ@#dM#iF_$d#?>%>0N zkxK@VYv~1^=j1=(H}I|R^qisk(k`y`FvP#rljtV#Mb3B zZ~IrIBRHoxTA#s1_|iT*-h9TT9Pr7{l&BwUZ8Pzo9dpX3vg-so2$9yYF6*t}bkfnI z_mxHY<0-onL5in(sPE@&C!_~K+LkV`nlh;)7prWI-1l?xo+n zN_)SyvQ~@Bo{H1`DY8satY63AWI=L-KXmppd(tyUqi0OWu&FwNHqz5iJvc6~wY61u z@4a1{TN`!j+LgL_eJy?BOh`bhoaTb=nzkMS(>$rM;i=q-qJmc5vIqlxeTcmyeLU*b zyv0@CN#E!qk||$#07tRAapPLud;6{WjZc4?0-e`4f6sTMeV<;!#xWi7v@M-@i4Gyp z2t{nomf^O;y(G+gotU>9-umY~cjN1P=5oAag!S~5V4ugdKI2>fjaTGj>@aAjMHu@vV28)2 zov{N^-i|Tys4sj4p0XdE5**Y3e?Wl0F+>1#5~AnSDN2;O(gi=6I8`=GLs4RrYhRJ9 z{m>!kh%xiD=gK^B1f5>UyUN*4nN9@Ug$^2RmB68n*=9rsHceX>3d^L8yJ5ytYo`4m zs^Z;x5`<|y8n*RNdjP!!2<1HOdvGvMFLAcMlTXwqdAOrcI|O|A?|k@ehbC5MCSB`z zb*7U&vcP`4a2wZVo++cdxm?)ivzKv#L;h?})bZjijm=fK&>md0h59}+j(F0T%LVK} z5AJ^cv+aP^K|)PCF_Zz4#OW)a6|;nxkFk?4m-LiQDJ$=i1b?p!$79oQz{g8E3Y3i6 ze}EJDY&ZIYV4xY?v}pNhnGnBZk@!i?OXzB)upo&54Eygxy%Ds174c32TtM=p9!%b$ z+Ym0lJPAy?N&^LcNMnhOBTl9xrVqV<(I7*4zmNxEbC*iG5vRa5Pumxrv=OH12KqK% zr2)T|F;~lehY>$=OxtcY@&`ZV$aY^G<=e$ro}5bBPA`t6QX>c|@S|IN4KhA>wP)lo zazj}Me07K*@Oa2OI3_?Eb%YLbL89+{rT9b6%3^eqb#*AC_Noic#b2j=?4RQiv4KEC z39zm?RzEpCN55gG9zT9kZ@lmQb>rD*z{`ceC$&YN*^L7)zVvcV0o1;l!{)P}BxgL$ zS4`AD>cz|P5Az-`(Z?U25hL&KauQ&Sns~+v)Lcn2$oA;u!$_kGS_RG+tM8F5<3?}m zcX_4nF5Cfr@-|MujhOl?4nunWTehWjZfyoc|3KS-F-*S5du&LL7v`C_LGINxQ9v!5jSSb|q=WccvX*yK zH-x8MiaFGg&aidx({7}>?t%|yH~J6B&7)=VDIJ|Q3C?oO47R>~1>4GOuE0)f07$iR z6;06qv+Ez5PJEQ`jjhdk@XkB+_S1_it1Efl+1rafXdi8F zZ@+HdxRO5e?(Sat?k=czzDm5@yz02Y)%B~i?b?$-Cm-!os!0ky<(O~EM>!|v!!H-J zYx~-#kq+HonTdXRT8CI~ERQtF0z1Qpu(bU+M0^*#M_(nan78~ZtG*MHvf4lQ8serB1s6{Mc>Yr~_IFGg07l|3 z4O!xuk?^e`#*#ofe|;C8cbU%{(mD!J276DqTVQ`B9yIC#^<5&EhedO8GT)p7Ru`W1 zz{pBtjj)CiL&DB$fA8DQA87?-yaqhv5#SfkmQNzaaN%9iotbuJwX4Kim^G5Jvt2<@ z2}3y1hi?*$6z=|q3WQm9y0al_?^-pTN*mb39SQDtAsx8t9sl~9UZp}_DjVMuZs!N6 z{O1$^g_QJmltdy%WSr@R-8?a`7(td-T;+$z8Jg$e1s8#c05ok9mp^w&kQKO#mp(!k zH!1>gx-9b5fiD$i8*ILhcjezW4rT;@^BHj>zA$yd?Fnk}=L$Xpj5sUV06SjtPRz-l zzroX|R%Bkp=|PmlAih_}(~XbbTP{pQTt{2@?~@A5$1q*XTf1a1|n6pyIfscp&m`v zzTf^m_Q+uJoU*Z8$PWGU)7e^Dya|leG!6t)90#nCC14*M9%WURlYdh*F7EJk(DsNr ztC_M-18@i)9%JMV{aKH1B-~YD(=&_s9o6B9`T?CmnRa$P`j^HEI8HG738X_{?bC3d zKr|=%HK=Chmuh+SS}iQE@a*o5d8jd+g#??MyLJEG<2u;~188_kS|vGa6ez27u};f9 z%a8>_FhgTB<-;#5^R7NI?w~8O0=h6@0TTHp%Ba|R;?iB}vXI$d958ZS$m$%~|EgX8k*^wIy^9l&Y0;@Gk$EX$R!7}dZG zvCyu_k^0>5(HU;q5&j9rZm&Ru4(dyItPz&>BJ?s%m)Wd3|4STo<8$NEaYm>A)AYLZ z>o`yQHJ?V>^}VmlJcK1aMi<4#V03r@Z%jYJFxHSw()E47Idr_FzfSoX<3)z#5V$s=U?fQ}-vdAzfa-$^EzJVw_e#>K%^$jZBO7wY_#5F2RHKTWTPb8 zKRT%Govph6;6Z)`*o(7u z%slEYj=CIo;ug+UsftJ`H>$h&@n}|~@%i)XPC02esWZaF(X$N~;0 zTwS%UeVz6R-D`3_R@+cQ=j_ zrdF`s_c&)Kh~#tHa_;%QvV9q4Q$H>z@4cs9@aIlXanuI4EpZaq{!qR{kDffj$g!Su z8`_h`+Gv`&mPcBezgO%>ytF9m0UnhAjr)fN`$~gM%?NXn(TOs@` zL-GPAvlj>3lAQFwJ@iCYc4U) zy?e(pbR*m=c`L&_CvRlJ!5U?~`?E911#KI;M7~8Ga)-%LZQ>NZNO<$MU5(5eh93ly z{?bQ$+n#HkJlb}lKhE%ur*sQI8$cem?KkpUmpr*7RS&KNtHc|f^mec(^@6K+l!%jR zkdze0flg)2KD%^LE&>A`DSk(m9iHF5d8=M~-y8HNCh9Z4_xtq^f9_}N!Gp)y`MI*T zRJR=Xa3+1Y7blA9o8>A_d<@!_fsSCQ4BNaVi}R2?4b7K4+os-=MagG#X((} zTdXU7)11;xQg%z}4sqK!%Nw~I+SwEaJkwtBs64iua6!gQ{`2n1cl`6*ar~^Br~Z0M znv219x3=pCf8dm*_sb`+KU304o=+T{lFW>^+EkDY)i=y|TA2J5Ndg5z7 zeG>gFpM#rbb__k0sqi)Q2cO6p<>1|tT#j=zuPkTAICz#<`iq0OjJ$5YUd*IUP#$ER z`p|mMQ+Rlu%^gjbEPli88+Wc5OaG*?KH_2jO9~3xjS804MM=qnD2~lozVHPiw=E2g zksmdMuH{=rt^YiF+!Lu>NR{y!Y{kd|vSmC!p`pB*|Jn-u+UR{C#WvO({XqY84}Jt+ zUG zg#u&{TUy$Ns4x7qo%K!Gw_GmnIs$*k$NX%|H2M!g$?mPa{lm1K^T4z%Q7?LTf|8QK zFa4Drvx3gtM>|u-W+4h~o_&BtPezziZ$Vdt!9|MVkv53-{$j%l@NBz1ZE$VnO6_cH z)aUQsuj>ml_39h1)&ys%}skkWqYWJN#_d8S*>f;zB}a<`*Kf zvOs;LzHrl)adpvk0<<0@oAFbpn7`1$#R(wYC@bE%`vu=p7BlUC2VIJJA-r5^J_bI;WZs{i-L<|~{)^K0PvgyPx3nzGXJgNPNhfm|9{UA*8 z)0=zaB^~mbpTNf7tLbOEz^mmmjyUryeL^GpILa3qsoOE>L=Yj3_mEFygy)dde2g*V zcX1Ku-9o2q4Jf)!VqVx?{&1!A`k96+M1?l71 zgYTli6`Tj0hAfiKw%HfSobfW_4f#kMVc1?Lz~3>9yLkr|@su6wF4CW>YCO|g!x|5M z%tZRwJ`c{c(mQc;na%|L-gjl3bnt)7U&|u)1ag8T#?P?6`dR3)U5BuC1AM*ZDgM^& z6j5)=VH)LWtb5)Q=w;fpVZQqc4#;0@Zv3+u$24h2h&KWroglb%y=-3OJ|!;r+o!i4 zh$l#dWXetYxX1xN))VjMeN6xE;iCujc=JhJzj?D>dg-N_!j^S1_5R}r$aB^6FFXe{ z`!#KQrk_Lq%&~jxv?%1$K?7d^`(-Yta6DZOod`Yce0@5~_&k;~;XIESNSpp|7jPGE z#A|+bVuF=VBhiQXGS^W$uJDIEdTfSECSSldawjl)*0_<6S!pwNyoRF<)fK4qmkVFz zj;QB!Qqsn5rY{&7C7bcnS>}*Fh~lFg%VPkrqLqQ|U*-YK%5XzR8eKU2Q>VETmU0a; zI6Bh?eD33OhmG(;%B1)Hl)ryI4|T%lzPb+RESu@vov+>Krr)l+jw^T?o}YhQj(wSp z%}%(J&VGySH~9wgn=9S;T%tynVBR1$zJG6 zKTEyjSj+PAe8vv;51fOdAGf@mQ?~rfpK~@R*p!P)i?y_%KlVGoozu!@vfxhL8u}Ph zlRYNjVqE8SEx;jlO#EG-;o?>|iG**~9q!$)@>!ZoBx%w#)ZcMYTd>wtEmNkK=l$F| zNS)C1PLYT7Q_%$>3U~QwI~1xUpMh_q18zdWwQcLhGx_*ORBLj(K~9X{<+wZbV;_I^ zQ@q^~5gQ(1jPT7|^ewc>WWZQSEkM$sNHU0QuhoHV3ojwckspmDhri~e;Gq9R2M}#4)F2ik4~ZkH&QD#H9^VS^x~C{sos=h zZk7}@DhhrTgOc^1r`X?TasOFVlf+m3jn|K>x zJOzvAl-+mb0Y3*BeD}Y96jFuTi@1sJ-SGV2x^d7zaRqY>BjWaj*9dFCxBU$3JR6LW zyoUU8jqttqvuN~l9&kPnX)UmYHIMx9Y`vmhBp+iXPyChpq0FZB7DJDQE<4{&>G%W3 ziG3Hqc>JY<71po}!cx{*zbS;GxAWomSA6%I#Tpt%4uGw#Ak)SN7JnHunN@yfP}WDp zU#8()gI*3^di*BJ8O45ldRVjbG_>cyK_6@b>+tNj7FU;3&vzYUBd^W$`Jbg_wa5|#;-x0b&&a+Ev9 zcj`ZN5SnzSy2}tlAPt^*n%pup-Z3tqonoz@GMtWEDvLPVaTvn<4PIaV+^~>U<$R!H zr}yf(x~FMjny2X^{w`8+p@~--V&zW3MK;I$@CtOx?=ukk{o<+hN8ioUyrhGEoo;j^ z?e$Y;GEg9$r6tjAALMBhsCnAx@~#Yu17+|^p6UVOrF~F;CNFu?@Jt)L%_C?uk2AOz zoz1&{_}K=ffRS<2HXlO^Qwksexlv!v;GK(%Ji@hU-!wL(U5qvw+MM_y8BW2$-Tyo2 zhikW=t?M_QtzY}kzgxfaho8;qAXlHeQ>QZvHMy`@=d)f*H8Hn@4nW_W`?X%ugUXrp z6a&+`I+->IV;-8FpE`->ceNL2i)ZU-|0t-hEzj2J!B#!K|GD~skA7{M z+t>v06P|L>Fr)k9tatLb+dJ_HtneE>Kz7L+9t_~)d>-?2S7&xZ>nB*4a)$ll?gCFa z>460BvHk$b#O*_F3QyjWQCyB~ls3wU`GSNLa1zV*W2}dx6ANiDKumZe3)bl*c3JI! zh@^N&CXE+a?euoqyR4)nPc<|lxjMVy?hlbJ{CH@cW(NY=!tXO>r)W3kZ`9n{%Qdxd zo2Y&deX?eVH{r4Wn;!e`G5Jnp&t<2a6YbK?0mV$XAme_c@ASm>$~pm0;FaGP$CAVk z6tE5bzVpWBHbtGOCtKU~@BjU;*8l5&{-5i&|I=^RIdx!Zc`e_GzIx+Yb^^{U&e#6o zQGNdI7wX~Td*G}6d{T4Ro-~`-d)j%li~H^<+(VwH(XEGd-~s{4==X;wX_Ken^Q|i@ z_44!A>)Sv4HT9#P{J#3pANv0K;UD^e`i^h==6e0Lm+H#OLKYMp0O#m%7noatd+JXA zGryHg=kIv0&Uil~&cV)R?b0E%!LaM>WJ7k6JBdH)XX;LNjAM&PCr=rgU7V|z zUVXW4-Fd!NpL;F?AyE|8Gxap|mU&PT*;E!_<|wzy7&_;K&fbZ-2N_YX+Mc=V*U9uO z3?M%jHG1NYySX#j2<<#}(Loyf3;8ZAy!04WJIdO++5#Ts?&p%m0c4T!Ge<61%t8i_ z-=3LA2Ancfk4{DpitiC98Ro>cl`T8Oj@7p@c39u=wVdUUNcv8>tMNHIO_$R;gE zZG)47tbVf1^)wanYGSnfyOKR8Pe7OWrA|^GDYNqF;I8nk$1c(qUIuoBiYz)omw_xU zZJHDENnZ(nb)2McJF_Q*t(y-1I;eL7k3AyW!EyNj_JkWBJh3gUX>&79zRyO7IVtN& zP1>a{q?vYNe%jMCNC}!5)Roo#SZT@A&nw7ZcmGfMJ#z;zfTyeo#{q74;!aO51ZTff z2pT!@p$F)I;Q};iAZ6f(nJ7A*?xYN)T!iKnEw1d4=JJF9Wx&3bIA}lbVXHdHrp!3d zVUw-UPn(nPo+Od>U3r(@!sp$; zoZuJv;f~yS!bndBs7o!OpdbM0#96w_TlwBWZLg`iW@FbHL9cWZuru~ygC>!nxTUysnsfAmM6uZQ;@*0mc~va@&| zet4|96UWEsCaXzkN*V0`9PIDb9(vf%pUgtfGx{Y&taaq+3i6|6F-;}RKo7EEJ%Bn$ z8g#)gZ89WC9T@y56{Po)qtci3)(7G`5f9PO|Js^d=*H;(;Y3GWff(y$yZnQ-M#9O-$Atcni<3C604BTwH5Y@3U?xTo_~<;# zK>FZrCBV&C_dHbz+N-;CD+y`cws3NbXsnt+b**l`@}c_rZ~B({ z3qSc|^{rq34amh&z4`W+>e2nj^n-Wkv)VU3pr5`|j~;E*<42Ecfxh~!XK&T5Teq_) z-Uah#+Qqh$(ER9_eigEO3_WcJBA?J{W=?Gb&FBY5Vhu-k?NbkSutD~~-$ek+wEuI+ ze?C`(9Jrvw6JlUe;0r_ML=K_(34Q#dvqS3RY)%hM7*~Hs;#&+i>fO`f>r< zo}vGyXK815p47klxBsEO^*cXO>nm&Mcj_wiKcns{XD8Gv^Oi4Sk^UJvGz0pCwGC~X zPH({u#GY_Q%V(T*CRYIPKr3aEQPR`6qbD`zzt_<@pcSpEZB95wS{wG}C`e(SeuDEc)UFTi6I?gHw#U?nP(nKuNF7F=YC-js_?Rvzk*BtqxCBn@@UvSJf!?+PRK2zQDX z7@mZrj6MCfkz{O}go(7|@cfRP@apdzz#S}bubM__1RD}9NoBt)(h_>bm#~mq9=_OodP0>v?H}GN0%}L z2E+(WiA%)zi!;xl)y<`TLx=u6pr7fWCgLknT!~M%YG9hl# zG0~*~Xk?+Ww2cs<+a;1P4aZg+Cw|AVhR##@Fh0=rPd>_$m-zP;eV)9;5m{082HN6l z^57AY>p(pSYo{Rw3h!c7kuccm%q^d*Ab)@fVT>hVSv@zu|>W;517=eq2dX_$_{@Gv@+0kCKAJyZl`FyuFH1GDUX5h1Svd`Li^8JW5#hw4^r*Tfm zx|VqWis!TXbgU2fblLGY?kFSrXI`ocYAn)sIljyx|6|(7K1tln+GR6*NOwUm9*rZ8 zJX_ZybCxmmp{=9*mcfM$&To2hn0*N+*{K1ct@g2FCzdz6;Qh0F$kZWyu(SvCKXo#qieO_(r-Mt+_BIov%XY=_*#|J#A?e?>` z>HD5$A?(b=EPaYYaL1;kPPh>M=-?zLLfiIafhpzdzKb^NdR#@?WgMQX#cbe9UTC0Ml;S&k|1LLz~n%O#|uCaDk)V z@inmd^@n%Uw3Em3(0qI%zIkws^E1YkqDeyf)Cu54KKVly?@s;r$6xpqk?s&}0B+$r zqW~Cy#&Cc+cC?cz`AxajJrz!2<3gFM*xE4Z_!lq*E6(2WiUL>o_{$Cryi*y8lSYuY zOe%3#I2}B?FkbvafFd)39+XFnG~-2qHG#**&MP=g>2IfvD$lzM06eaK+yP-lx4{>* zrLxgauUUs@E32`SJmBbHF1W>TLQ1=nD3%Tg(jZ7H@a`i;Q8XlVqQB`&uvzjRhlFLf zwz#)X;*E3+?R~c_BELl)?ADgr7=+zfSD>_;2G&OY`JaevOIpKECo}9C4E{=4FH! z0lou~q*k`DKb~Vc@44YdsI8kQR~;9t5;w7y4oY{gL@mbFg=;0T>O6K+(Pe|n*r~gz3m)xIT_p*tk~PV$4toAsr8Pip%B1L^3j7FL}6o5)V9U61yM zIw$8eLg6Lycw&R>H)i~<@JUVwn4k<(E^L4XE9*Dv`t94byneL+S6eh}Z$EfQ?NhIHCLGhQnp0#Zk(=gX+VHGc7zOzKJ3Wx>p1NO?+e!*f?+U|bh@5s z@BAaEaYMe`J1=qU5TmLU$Na^u+X|l>hsMW?L_^xnLU^C!NTX3|kdCm-T^U3P98AL%1ojMsb>wtAVcw2wUVsEh_% z>Qlbc6kd+`i)Xhz?~)rY-i={?0*bfkvVh8bbHhos5$~}aq)lTjm>`+w98*nP`OsyY z3@uzZ)DrT6Q2IZ-}!9a+uf__rL~$_y;e21UgwJ| zb&lSdTv*NSC{F}QnZ?6)3|&D9Aw)_;o1spdr_A`$7WCvE1{q~^Vv?U)T&jBRndN%( z^PjB`f6ept{EfAmJ>Rd%qpg}g+ed#<-p~U*L_YGLLi)pvFhaUbk{^26=I{hfz^I>b zrgLJZZNDVIQ{6B;-@Oo-p4AfbmNxda7le4uZiv&$#oy=4xu;YR#;@ zShK4ykYYCaW&*i8J36kD-Thn>M|5UL>x9BNGUcT0G`h=yA_tso^P}ftLn9wf$YDD= zNRaPXFD}+Szrnk`QIEGa^BwD-{U<+H|Ki{L>ntd^b^BJ`dEq%oOh=2g z$G*Eh_1FJz_1FH|U#y?{iBHzIe)#L^#piF;EQIzb;*Cf5>g_LnsWvw@>d~V|yg#T% z4PFi@caZF3gJ`cP z2ayT*bmjW>y86QN^a^R`Y0Ew4P5UvsieQ?$%F{w9c-MWor%{Xlv7ae|2QD_S13%|L ziS&jRrp;s@mlL>Iph4;r+8z5LkqP4ve`b-s3GLt^`bs-x+|_u5{o|rY2f5q@KHBN9 zKd|RKX=uJX)*63Kgt-7i8mYIPaP&KN_9e38_-McOv0?V@2$T7QD=ox>xH{WHzu#=f zHw$QxKk-fbK%TafIr^E-?H+kJ>7>qcC&Rdd%YoLX?E_&r`PS9Q+TQh1K1c`Sy4W&D zQxYk-kdKQTPELD*wkPkT7c@WYiE-p8#Qo;=_Qw6QOF z;?aTd)OJanqAfE!^rr)?Sbc=Qbj|=Lj?xo8Lw?w}(8D7?{nV9hqMsme z!a=%57l6OvJtzaM)jrVPGI)A`HftD4K{J^M{NRPRyV8B{ozl~F1UyeTJELPHExpu- zEfdfd9;7`7mU)eRwDHmg>iVX?S@1*Nrgb3Rx(tZWtMzl!#V9#x-v5F3^So4l z^xuBJe&LsYp}zRVw<1f|Z#`2lzxKX*=iY<*!#{encD8rx)~y@pyc@^`S`obWotOYG z``EHkJ}ak(=q}6dLI6yDT72-^r=H_?Wq!G?Isq!bWF&Z5z%D=K6;HgSRr+hvQj+qM zyY$7o`wq}cy725+sDs}p+>a)}!$KoZkLeHn&?i4xzwnE{NT27hZr-?Fr{v?#@AdW7 z$c*~a{QHxG$P@6acj_R=863cGUiGA_?L!u&P1~PP_mOF6+;vf09s8J{nYWUK_Bn|{ z_JHTjPd@=d%P{SVHarWgOpe|6>weJ}Nw z$L?FlGwDwrQS;bk8OS4D?@sky`Jz5kcVmC0X|QDA30{=2<=cr7^qctV=G0xpH;A7M7ftoFr@~pPt#HUw*uOzjmIyLwm8EBT+y3i65-L_Lu%@eaE+b zM?H7vxtd*ENWEJ3(?2x$o;^~HL!`Jk4d>DraWEQYbISy-OOj-YI+Felbk zqUZ`gSEsC~Ta+O!cKMUjA)wj*(OzwV*Dg(W8n{^gdS$OWQm% z;5H{*;`vz$Puvl{G}R8bozUsm`a+(Z!+Wj!^DNl0pYVl0yj!3B+~?|JANxM~0~qX! zb5z>>Y%a)mz2T>zCTPzc)D<7;Bgez|FLRym({7Yy-R1L_$BVO^e zr}wxqqH-G7x~a=E=+nG&m)Sb%bMNLuc9jckstNT2`d)iYeLH0v?MWGP3#os$HQMYh z1XW9ji%?_h4Ej&q;;S_xZ7bp8uM#j9)fEu+BF%W|!a-#M{$|Wxa_W!wXitwD6_Q4s!?Sql-Q&QG z9}zmCF1l%Oz)ycx7`z5{K=L3i@%6oFjc30b(%D@&Lp&MLc@gQG`1}VS`k{%dji+pp zIFJUMlAka^*M71x(*Al&+rfu$+d=*1%jh~iQB%)a?izN}L*7Wrh^l^_d*g_oKKl7< zP~d^*rJNIuXHRPhU6Q=>3LW5wGGKuDBzq%;C%z2yxNyQSoJXQ4`&|V5h;JRwC2}c8 z-!)wWV@N`3X=rTq% z2>&rk5Bi`h!;|)kT=_GkX>Wdq@MZ@v2p_|m>O5s4Tsr$U_>ox_hMR|R8<5N}`kY7S9b!LWzs=eIBS})j$BYxeT(NNxW6!x*R`_A|};A7-NM1`z^RtL#fVxJVJ4xSWsQ-kEvu&)n z>TmV341H-2gv^m8Ip;}K(peJtW4uv^r=5&E=-)cA?ORYC;KylL>-WdB#k!Pzb0lgA?3OQ}J=W@RNx#Jh~&G+9zJqqmC596c&T420Kyce2&DOovuSuIPj~qX zGH`sK?m+|ntPD}ES!7`z&y^-XR`&5)ePx?u%uO2$=C@ znHqpUI@0N3zPz1l2=k{11k;qiX)xYRVUPDR%<^iG@n{x9)=|)wLI2c?Ak*=Jn3s{^ zzxWt6f%@?~$m95)JO9$kU4T6S;iaLgT$xqBM+T)m{Lw|Gxg?>T0`zT?4jAQmq)!?& zp2NrF-#j+F<*)~5-R))_YGrd$*RFd^|8jQV?(XlVv%Ryu6Qh1%*>C(G*OSLvIRbER z&yxXW>+03(33Io^KAj~8INf!-y?2Bnovg*xtM$SQAFTJi@eL^J-TKn!{ut#qfdY15 zSA}W`dN9So>5Z+&$PCnQz*Bjr9zEIJtj8$wSqDt%s5uh0bFf`=iwiYJ+=JtT+S)y- zha1~ z!#om&|AFbiCA~P2XGbw(XcfZ{57HYhUe*!Y@a!0fkxp9bi-YTaTIjKNcvL&P z$F+NSTnCPR&n?uQmp@Q{aQ9w);og%vpIbzH=Ih8$jJVKfak*xeuhbbvpo{VkHQ)kM zy+_@&esWoQ2OKFO__)&AdhdXWgUIut#~udf5_)ZV-u>4{b?2EI_2%dQTP;m)*0+D- z*VW?mQSEQuLnk`PM-e7_uv)?@kE30aK^Eu`d+I_4nwFtmR-yX{j-(HLLRe_(lXW}q z?IJr|X4M}3V7ff>pFz{uWyY0%+*9AicX4X>^pnL%TO@GfkS@FOT8Q%B*BA$X@gqF8 zgM>&>mm?Yg`z$;CaUnrJ;&=d`*lww--VFrDF^*}IKsh#XLLr|r+~tv$p^5%np)Bv| z$mG8>?KYMtZmlo-(r(i)co45~6qir~AL~;;BOw{+);7sP4dkUJ)@x?iu;NB3mJ3Bpchbzn|cTRLb$SMn{+@tcdgCL&DF;4 zUhUGR?$O5m?(h9y_5b-_{=fBWpZ+!4&beB>dWE+5pzc4opLX+0ci*Z9j~{`z3w*F6 z5rq}n%egr^taFp)uGT5wEHBK~yzK{dLF?N|fh#N1^|kMRC6oUD<6rs7`pbXmFV;7H z%Qw|C*H>$nxI0fC*T&<=wYB-U_O`Zbf9ElE++92;b+ETpC)f+;z&}0mi2IW|r~XF{ zpp5ox7TjXzj-T9eGO0UN@<1KZ6Fes;2br`u*x$!~@H8JMCG6k~i*%^RH*ejn`KxsD z?C_CG2C#;{f*sFv!cy2!eY z;DoNL%Y9bA%|SO8Ex3r+a=BvsV0Q~UX)kfJf*k&eqZ3CC6ewfq8&OA`47PKcfm!^r zXaKvymGh_A5wXqir${`jE2XWIBQAu*Ude(2>PFgM%1fC&-q|DhP_c>EGEL3lJvyJV zyQ9RT^p2cd%K|HTAkD2`qs<}ppr-8rd88UxDR{75)P_-hZ3nu}*q3qfCha_7#FUM4>z^GJyg5Oqb6`Y*`3-Yr(<9c6C;zP>(KHd4 zw%P0xQ!0}5!f)@!A17{mB94;sEIvIj3_}3Y&)}G{Lo2+SWW+`9n47c{p5=1_L3l=t zhyl}M9M!GXkH{$j+L_AB6#Y_pVp*N&HLW_z$q7#w@e?2RN7Nhg%EcP)oI1tspPyf< zulw4suN&8%sdwJKS3m!c{&D^4um5HiyF7E}HrEUJ9_430_l3Iq_M;r5x_-TAf?)OGF*GWI^)6~<MoCv|nsaB003-gnu{2MDd+;E@ILql7}ZeZ-L{jXRp@tuf9^h`5XVWmev<) zVRflCw;$K_>({aaJ+ekU$!9>Ijaj(!U4FV4#RU{_Dg0L7rJV;c>wbUF9X%v%DTj+W z#79|Ej@36VSdoYFDRmiV-A%aUa`OILIdU6Q(R#yDxL`bGP`=R@ zi;FY#e?6*nu5PYh$z-@?Q`cIzeP3Cp?@9j_?S`L=hM*T=M}F%coK9&Q)t9sG?pt1| zmtXt3`rhyRzWN{k>QC4EUwpnEJ-JtJ-TeaX^`r2(&i1ol<^L6OEe2CNM`v4gx(XfCF`XgYmg{_3SjOb-9y7#`flYimig&QjB#WlL& zJqY~wT+o}8dgmZ!R+b8_kl@jbCoTQ+urHUe^!T#>6&^oxC<-VD() zWlre}#XW^P;mW?cJ`>cq9K~%K$~fvqo#oKayO(7Md>cyeihW`|$oMt7lsb~h+?3Py zrQuPsl+WlHe^0*Fy>9pQYrM^)T~2|5FIo6%e$dkK1!*TLqa5ah8~l2rjc3#0`t-+O zOq`*k`N4zZX7g=B(7JFTR0Jwz>rAd!OK(SQGTdJ zq&0^1$~XfrP0U>>Q5I4bF6GR;M|qQ0IR%aSWKNAhJKIM66u(aEot&soG^F@Y2L~kG zoFWI(=cjECjnE^O)z9qOcTx{2Kh{w{H?+0AU5oT()~{Yk-@`G0^{eZ3+7)*l=~CyYscQuTbT#5`b4LIfoOn%1@O7+nLN>daQY?CFm9RWGx2!E8)t!yCoK8L z<$btDckT%=zK)p$KJ-;2WRUqA|7jd_;iGrp4>p{B4Z39_s!N;_w`He6!4L3Ot}MR1 z6@G^SBgE)&iT+-E>q_lhad8p1r#bb;0T+I{Sl>PlX`rcjdcsWl)xcIaXQ5AMV0cgo zj!IX`Ap*I?8!Vg;CB8iC`8gwX!jXoNfBP7CH_RcOe~B(U9?ZyhZ$sK{8+=xG(Px~p zy;GMBx`8}rG9Kz2DYh2x#>--S=wkmtyL1Y@DLmu32;O|O&5sUGV@Eqq?R%?sZ9ChB z#zCF$ysvz6UPN@Yt;I{4$+y{gYvokNf@06p2NT{QZTIzSSL@F6&&JNYcmF|beNPI^ z0%~}+wz?kJPo8Y$#Jw!O$Ii4~IaahvA8vi+YD}=b{q5KVeuhf<@MHr|g0zj2eoElv^xegVhX!}jdXdBqeMYa zW|5&CE(IvcQ(_Epq?Q8I21zUGC{2Uo*NE5v=Z{7~(mCK814*ut$576p;_z%G=G|}V zM^WNGL*7oXQ%?{~h1LN^M21eX9avX4yOMZjMj^El_hfv&+soYvw+tKtUcnCmNoOPA zc%wv!_K=k}IB1 zd*>SP8(fz+-h+pHFsgN-I~l&KI8p>@5*R9E_@<&Xy&1`;;4M7rpNb?rZNod|6Nru6 z&~O=+Rl)ej@F$|m%}E6fg?$HS_uReaZv3TE4R7suT|QFR#~~7hkSBFT7q;^Go%33!{B%u@+Zv)y(4cx^nBa`ucDESbg9dzq=;puD6mt zJgSF(_{Vj)@wl$in4FTA`fXtjdk-4)26tG9=;y;68cDLE4gYc9Wbn4vWcjsqn zYoAU82H4!9J1l1FoySk=(dKS8aG1t{Zi}uyvNqXJx#HAGz|+IlJ=r-YO`wI%_CAe_ zmy00wjxd_0XKUr^&AR>k3$=Fpj$0RNW5*K#c5C-|ig-u0b#R(pI_4t1AZZ;Q0)1i@ zmQiL`>tb?{E&0G(bc)8F`og?Z@03+omv7C<@O0R?Xpl7Ltt)asT@u2GJqfyg$zNe{ zQ#cd>V-V)<+;mFSE%;g24O3so;J`ETCS1KqlS>_Koy4W?`xp3jV(pw^=meQDU*U44 zBV#o0z_FeRpHSaBEq5=<(Cvc*pp-Fr_}j87>>gkw1LO{Fq>XS;-&0kr-=;M?`JKse z@`Lm7a|I_AAa%f%GihGHkCsV@!9lJ|3*&dY#FH-}NaJkebhHfszvxTzAS~hdx4!5$ z&b$BSqn>I%lM=`Ddi&0woo#9>F!6zA>TCn8-=Pg@LzCgkfG7EPofnESLD)d1kp?M5 zVDe~AfNjG?{LP?g5F=CBuwKhgX_0afg}js-<*~~mo#Y{DiPy5!6Z+|RnH;%j`cO8~ z8{Wlld81N--$Vx6{pPtxQhRLl(a~8>;y6B;tjBu?wN3rpA^dy>W9^w2>I;vz>z$nd zi$HY0qdK76=jfg@^vwZvtL9c}YN_l05pb->{F9Hy-i#SrNOG5lU5}s~G{?xL?x((q zo8i9$EP*P<#*;!wsz|r8J+=_RGtEt zsXU(|H`158<8$b|4D5@CZ36A13k{B_PiM4MZ_&Bg*xRYQ5AWAM{iT0afBR?ue!X?? z9opMFwRU9%{dH3BJiJ?5n@_Tf+!fPT*H&t6d7;*q=j!Up0(kA$9P&Spc+A1a+w049 zZFQ-hyS`k{KeJZf^O0|;zy6nhqW;ET|I76Qf9|{ML+^X3W>1f53j**~o5mKM~;0ge*j*!f9hGH%>Zb5`nhYUAdXKM_)%SVQLmHQrER7 z9FVs?u>7hdCBey2nc_kY7v=ihWRD5<7~`x;r}H+mxRl+4o(?cM;iNHnWBbd`6XZqR zsIAcY6L8{4C-Go+hj#g}7O@>B>A+8bw~H2z_jWV6cmh)H-gnZ$ffGB7?yjGk^9bf8 z;G*x42Pa(p#KHc~HgVM{FqzBxXrII*7h`vsH)-rN+c)XU&qO%s;Uv{@26mJ|^?Htq zNA{d(v0vb62u@yEv!{^d`e!gjIV+-AR6nabl=y;II0%zqDIQ#|& z>*)bJ%#L;Zp;GEh>MuN!p6W^mc9nadA(IMHt1bb>z&0KWK) zZCmo1geSfZS59E#o=I2iFKr$_FQC8S=_i4V`Pdh8*R3+<#BF#COzG+bT5u(vJ2rIk zQ<|u&T-54!xm{o)jPT8}cp41lHZyns+b(G*ifjhv9OS@B6DO?XZTfi`*q8z5{Va44 zS0_p%&B*dRa+`r{{-uwX0|2wgVABbirOc3Blqn1O5gwfkebW!cd2)|!yJd8d%QQA{ z>&XfDeu8YwM?WcpmQft#qjgVut9{Oo;e+itGID%uKHYa{0#g@qsbld1Zt4{mPkC05 zM*B^BwI}eE9`vL4h!N>qwL%Bzi^~Kb@x?_x9J_;r%e-7rAnprGi_s_B$Ylmrp`Cde zU*=}v0s6wzGpv`UJvyZB5bmURP0iKwcV5g6|Ihv5AJs4a@;|Tt`um>&pNYD5`&Pa9 z+G}- zkap&;5o+Gn%jh~}S*HzXJ!u~_J?T%=VQ`pqYjhy=EJ2ds7bP8 z7TmHhm%DX2n>wU7btv$OlRgobFpcAp@s1StA0)8$rYEpCAg2tf6UD*J7M?0& z8{*i9ETutBXS+uGP8t^-&CwP(V7j`pT+2&KsS``fE46-gwN_SFQzaLaQhzZ+-|-8>WlT0KlKy!mwx;w z>Z?BZ{+eE(T+n%W(RLZyq4V$GeTRZW>%)Dd5`BAoNZWxImo&4jk@lWKBqQXL_VqNn zZGH)xn|5qw4*LQ+J$&*ATjf6GU5rlCS#SLm&K%{EUFWnBsvc=9&$3HytZVWVIO+#= zp8nBuwAUU_)$$SFTuM*st0vp z7JaI%rJm(a=@HH-pM5DKPrY05pMHPOHI9Gwg^%8b@X*)g^Pl9hv-jeeAK(CqY)K<( zm!Vw!JWjrOL9xDDkNq>RMC$lP?e_qCk2A!0x-p-wBbV>v{XOX~;EnMImwcbPU${$n zT{iEeb^I`()QQRhI?$-OfDyWRb$N6>h~DK^qv*mW;ZkSDF>AXk)|XBN$CUxG)Hx5%DtqaM>mc*Y-%;ZOk)u27jMz3&lP0Y{Lc^;?(@G ztwHvQC;7VF9n&>@$9gSik4d5tptVizSPsK@i@s0Blo8o*F=hS+y)Z5aN?*eIoI>IVY&g=>cjeh?cN!XH41rm6pc2#M?weJ?6*LvM4}OKs{theM6d2*Ny2@hL(TJ))@zD-nzeg z>4c{fz;F#TXvJYUlQ}_#HJ^JKj0*o)CbdXg#_9kBx@V9|5B$2w=OiD zj0?`tA~A@sjEp$&I6NEG_|_SDhCCtnfngkjiDciZWtTMVnK;%vkxU;3mz()o0@IPk zd6COD)282{-9tIi8`=WWL0AX$OLysCI)Q25&9=+Q=GB$efN_DI zJNwtJT*+sOJiW?3!i^g@>*zom$T2NTN>;SFljpOUD4nIkQw#2ljn|a&{kjvEzWzA z+*~bT>&?4(mA<8)08f0oHsqJx+T2q|xMFYntPG<+lnI5(ajWpmNEVCd z!~>Y+8}-I`iF2OKpFiuC?Tqy`?T-YF3ZOkA-0^MYaGo+~yUlY=Lfd?L(ajL_&Fs%x zFI)sMzu2eqxtY`N^XHT3!1d$`bLh`zn@8*t@ZUQf${k9<9vq8PYBu` zxL9FvX}(q#mug{pp^p9hrHgB*Th>b9xG4Jg*oDCMp`@36WM#<;FS>oV|DrB*VNE+S z9DYM_$}Ih}mhJs4aZIXMAj0oFT=w>bf9$27(IW{Lnb1A>Y#7L4X!*} z=kZ9mmpl~Xo-35omSN)V)KCBLE1v?`9SfJqgrMD-^dHYwj38)*YXQ`CD8Mv6)zB0w z2uO8vgVR}gi+``|xA45vI7BT0=kyq%Fz@rOOlc%dtRzIh>3Er+1JvX_?G9}kCMWJE zr#wE*!A1vyYz*v}oYm~S&vZ0qP?qx!k|3-IR62EZ;A}J$1}FXO6hacRwj7j53lJfh zbOjw{@_XoZkQ_ie*H0RuNFD64{O1!#!O;SX@&p|#sVCg*b}~5u=|oGLtjrU41&eFw zhzE;7r~(Lj#5kqQ8JHqS+GJ;bQZ)S>9gxrIh!bCYHCPmhOcD{tw1Vtsv4*B9fAH|x za)KIVvK;Uyz00mFeCsLq35UO7gBh^n(8>lDLa%?usTC4Xi;< zyl9j=Kp&+HZ|%fK&ZJYOY4{V+g|Jf?SJs)Q{>h~I*LU+07I5>)2A;WuXGfup9p1RB zT-(3{a}rMEd#vPnzQEhkRAm z{D!IA??yrTOB3;+q}=sYHgpZ$Kkku>Kx*0#^+Or!OF4iCB$5se4jR{nK*L?be;4I- z;7&!J{r=xHo%Y%Jt95cdU0YlG_0C%lYI|!hlX049e(TCz2;1AcwX?sKRnq5E^EJ18 zr50AM)Y7$^bhvNSwVN;0`#$t7^}(s3w%j@;zOJArj z{mviNBpvr_3v|>S6kS}d#ThzXz;y?cgYrAud$qTJRLfVd)aLGf9a$#wKBDD5f-eUL z;x|u&1FvY5o`Cb4Z{M%!d5oPiI(EuiWY;pf`f)EO8)$nv;1u3PFyJq8i*~^%pkC3t zIXRoIQ|_K(;35F8iTRbf_WYfq#jhvZJGHT~Sr2!Qc-}8R&wS=LpidVu25wNl*8xwt zrW*vTo~pNiapq1w^?`W;in^xm z>+d2{2jOdWNvA_g8hNjtK-OcxW{}ZI9`LiC5FtjzDYTHM(lqr+)>?jPL{xR2WCI@; z4P-YOIP?Mn;j0_urIR#c+eI9t$-KL+0TU%?{YocKJ!8YovwUfK;Ll%17C=HB47%Nh zs2|le;i@AL9DoyykLCo4YeCFStuzCyAs_PO;$Pk-OhvZW;^IWBuKIetxDL(#e?W zg)goMJw<=y=uYY!IVXO;yC42d(b+*K9iG?z{%JklIjW7VqdG+wtlfI9W|mj#!QOFg z!;d}rLYkAgdFtZTO#V+Uu7c-0dd$f(YZc`bFFS~~S&|8gI1rw?1pWCZiec1vu2Tqw z#)<6O)=y0x*6h+mEiXXJgRT1J_uZ+_|6l*5e)^LiucbNSZa=CiXN34fwB-zAr(ZCO9m)cE=qDD*l+w6bWIzQ_{0)Vh?e%y2W2Aij6Z-< zR|(B6u;ty5-u6cw6Ii1&t*~rw^%oC*&BsTCTlrT$?c7ASq)d2P9)(4`t^3N&&_??+iYtW0iVH{)8)My~-GR2!gm+I*kuyN?b0e#=Gqw&NeGFlupn{I+=g!1mU4= z;}c}0TWF}gW0>(RKii)wnJl4skO(?j^WOO=8z$Jh@?2k5M`TC_vx2qvA(gM;lXXt1N1 z_Az-Z*23~~&C;1w4(#w++ALB!^!U5OI)oRtqwaK9ZYVjp5KrZdT&W|SymeQygFIFt z^_}sKk-xN;wh_=Iiznc@qA@)OO}T4tdHnw*eVcj8Zoemkv#A*9h3IqiD)QkeO}l;) zg}m*&`%OD1tTK3kygMmod+Uyc8Dzi{4z@P-^NFWJ(o6SA7cp4}lL6960mSo|4s<5S z?7u)`@e#kYGsu;TPI%OvnM=ufEM!lZ(x&PE^F1_5J#*Ha2C zOXt}-ihSm%^YgY_;C{FdfBVFOOyteEs70Hk7Y7_4z&Gfc-8J|Egf`G)!-4eS6y_4^P!dDlyDh%2vZQB{gfeGqQg# zAYlY2(al%Ay}z&QAXk*faV#fKrB8OR$q&*WIIseJre~ESHITH#9)drXM_VJ4#n9R! z+E-Nn_JRv^*Mbf_SYjHvE*(v(P{TWo{a8ZHvDu4-GKQ13j}lsbxBEiO>!pwq_Y zliJ>SOgbk6r=XvGcj#wa%4z!Pi3M_-slB}eSf>n4!@Jq~>JPnMFTDOD{gy}di@)%X z>Yx1bKS4%LYaN~V#_O+Tr^tW%?C0w4+mGwTSKn7xuU!M)H1cg>eqlY~Y>o0>RoQ_|7v`I{IB%IYu|gZ^g<5=O@Ru59ZZ!+QnKl)<5^UlNQ z;sxr!E3ds!&)#{auHIOMJaW>L>L}NwAH!1D?S+SP$fn=I_cV%pCWij8pSMH_<)jKf zkF#{`YJKfDeoOu24}Dj?{^FIockhqt;r&0Zhfns?9-f_VQ`hLrljfN8In@A}bRm;M zlh5?bFG5@OFS<>AM>#TqOPlP`xLc(2)V#+#4{K}tAjh)axW1lKU?x!x6VUn$xpbVz z-T#gwWg)K8PkoAfA`F?}Q-|=b6u?oEs5e@sw2#$WNr&GIJQA$(DWBi`bX8oS$rD0e}z>k3AJh+2FFFRjH=5Tq;H0yFWBs(B6E;D{Y82Wc*v#k!ZMZ zhUsNk!bhiq9OHRWy4XhW2z;WD%@42G_u>%Thz%{qG*nF9jl^?c8rQKT?R{MAe)`_% zm*{grAUEQg3!T~J)Ar<^EX+Gd^G`lcUvcAIH^ejFA@6s`AH#(?!Zq&bFTc@)&@a!Y zDhX&X#7`fx$v3#&48&As!&>=oJML_WiTpzXDpu3gR z0zmuTXyl|A_BfF`9}%<-;bz@N*NE%Xgfx;*XyKjm&(H~u+FKQyH*DWF9nA7BdO63$T1)Jt3` zTXc(bl+$BA;H19JvuV2w$;)aCjjW4fzuG9V7Y6z6eyZun`vL}KHZEv%U)*QgOUI;| z7D5dzX{$0uFvRVoCC(=zn9lXb=Ni-+SFU}C)*Xps{t$UAz;bmSAgj6G*s$=>Lh ze#}Rj$s^n4E`x!B$O(n;Lp~UXoFnhtlFmz@!85cajY%vr`L-VDdZHeS4o}~!Wz=$< z*f5*0e*(+0D55z5LV2*xQ%==~C(LC5Gj;qN9=ouvaV8FZowOAJq|L&GEf}5#Dd{ql z-Td^D2+14I;4AaZmoyq-q;X8`2!3R)$i5wgbTS#@BkQ#C;B$aY>dt@Zo>Q(!YJWrd znnqt)OvfuhyRKioSp4m&zEwm@F+{N#D=@mA1KaXD~OoWMhgr zj%Zs5J^q$0 zrwZVgxjp3HlKY7p{cLkQozYK6En@e%+h6vgcR0ghXgm9|bB6+2q`XyZF3a7tH02I-?I&87r?54!3J8~n)KS_s zHMqD!)D;mMT-T|~ zFhdNs3S^ANOwZ2mPW|)`z49r-?xZ3K#0sFK46Xs2fRsFEQj*MLR0&Z7YltUwk&R1q zLo7nOemB6XMS=4U;Yiays&Mk7b6_$J9)&c5%Cmz<^9ZK`nj=J0AsuM(m^eFyTy)Tl z-#h&=#JupAsV)F;g^B|T=IQZC1cMiN^PI_On3BOm|5cEr17or0t|I7iKoRbuuQB0s zzgwx(K^fiyDtQca^iReUmep%8-uMwLArX-LBvX9MlZt7Bnm9a%ca4=h!xK+jZ3rn# z8qgs;@P_B`mAovcf}jgt1J9!gEmx62Z(NFtQCS@`wFjQAKqAQz9ZyOAa zD^a?_qznTclpzI?G(j=X0uYiO#?ufnuCVks|D^8p;E;}EGVk(u#R0-@o^OLs=smht zg2Kb!Jk64yuAoDO)o>;cpGRX_U{O}NcNyaXIzV_fJQ4a%VAGEG;LBwK${f1bF!FAl z;1y-W9bC16odnFJ8+m2*rST|N!yXN8{4@k;9}bU?=&U$MKc9vE46~MyngFWUAgg0tz5rdFTd|Y_0?bZ z&Gp6`-%ua=x^Jql`tV2U`fDGm8_&I3H(&iwtv~xx&0o1)Gb^{i%#Ivo!k{^J$2|?o z?8;hQnOmscdw1(>7sX4XGCw_AyBl=YNudf|o|&s98X=Fr-P*Wc``ZU~W+6^=qVgt5e zbqArJ4K#-KqcytWNjK<7J&{lPxnfb&9EgJAiQBu~47Gj|Cau8Dfm3@F(s`t{5lz!M zbbU6>m}9;HHs}E&+Tsss3G>p=c!c)=5$O#`WF0iLyp+wmV}I!|8$`QeF2Tn-ub;dz zE-z!p+Dy{976l9<(y85q3Dd(LXfO_Fcm;gg zW5b9!7=R+KR_NENCgQdMfe%Xacyp+K7++85x zz_-!0bzDF)Gq(iQu|=1jsoAwxNsJM1eM8@?pig%8xpokAI#s9E4ay4)ixPI(*4qHM z*n|o^+8Cnm=PwSAkS)sgkdDif?d`hz;6eSJzxVg+|M)-u4>~5Z_54fE)9$U(o*&eM zM{i|S_VVg{UB9+Yd+j&SPV33zCpqTco#@t6cOAa??6dVf-}X)Q7e4v1`YV6sC+p9B z&$rjo^m%POc&i@1^QC&{tuNKi_7)dukM`i1de%vx^R%7Q;I7R&3!X@E%A)x6Kt2xo z+O}lTQ+S}4dZ6DezVgr~Z$TH^R6C&B%lRWL_Hy%1w)>racUw8x?gU~Mj}gyL3>{ET zcm1wiy@}mIhaCIi9JtM9Wr{qpgPcI}Qy}U@?N%o=V}qfaY)4Oz)bH?$!1?(ZE+@`i z{2?sb1Z)Lm!_$cT?%lx=`~@%hxj;wL2@-WmKJg$-3ju{Lc8tPJJH0%xV=j$I*!71zoP3fu;%Yz433XTH8{YY(#sEj1uEri|*D@y#)?h(O zk8u~ey2phmXV_{}@XKAk9`T*Q9r%{d1rXP=?{00wtDa2Nen|%$n_1m`Oxc{@>{yqF)MkqHOr@Ju;FJ<8`qaA=0^aL0R& zGL+BKk9&X9Q`vSvhPcqM|GR=#U+Lh|JG+^%>31_*}X8( z)p}{z3FQcn$-ns_M_`YO41D5W9KD;~!OSdxfL_@(32(Jilq-1^Q6Z%?v(IAvw4Z+B z>09vCdgmfwZNHgmXoeiQtHid~grm({(%qG{kQMcr$7Da`q#huZ$W6b)^4oV z>#uzPJ$kP`|D}7`O@H&5=WBiK8uEbLAP@P5rAAH4sXlt|j3 z%)<%w*@a(o&}nUYzE<5-KzXLA7go&_Dg~2v@D!y1Rq+6K{h*~xn1A)iI3Jl|K(qz`I@Vn*RI#}T#KT*^ng?zv&aKFIF2*gDo)_5qYO(qi8t-h-Pqx3n`FY3 zFvlN_oWI<&r$c)vBHKN%7hE&_jh*d z@#D?fpzpeWu$z8?V<;|0ef#bEb@$%e_2|*#+S=N!o!z~9_~bFNydODIhW+%56Y47~ z%XO7D{H0f3s+-rZ)-%uCgxo#3r9QMRo?oCYgQK>hcX{D%t(@LK*;ZGuO^{np0&;QB z!N!9+*?v&d*sZ6Y3h>6u^^-sK)AflT_;|hc`U`1Oj%j0DTr&gxwO4j`w~+<)r!r&z zM!H#Mb)W*G{Ll_&K``<`oH=)V!SAJo_3(Uid!ue%y!xV;U) zDp=t#>3Osb0{5|g^dNN0aqbtF{^Dj_^XvBur_LN*!i>}5sTVlkmGO5w^#H%%uN`5w zzK>~nxM)lCYj1x1&kKHJ9XTD-1b^}w!~51Qc>2=#w!LlC_mt<$+MY9Q1@8KF|9_y5aG{l-s7shd0(W(wf!0U$t^?I%t2Ht&K2j%}nii#pgma`FsW z*ydZ0bdDz*W<2w^OsOx8N|(`e#2oDp-^E3jlVgkzCdLhnv}Xb^ZC}kx^%Y^_&)>B=r{F?V<+0heoD?o+L3E<K+ICq;BWj}yTyNZUhCM}7ok3l$;j)hWcin(5 zE_dmVfOUG|%6t72CA_(qA2B#TPrLDbNZ>7GG@pRx1zKIC>v)b~Ssz{4n<)O84{7Cd z!yRPbrvc}b9a@K9rjK4Fw+O0n)Jf_vbiH+gaP@xrcG%jUcI=|ll@&KMEY$MSVy$qw zKyprd+(}~eq$g7OIi#Q!xQ(xJ>|{ZL2H9&nJs3rQ@f&=NYun;)>?3g>%ApITq087- zs>j{9kWC!OopT=cYdpQ+xo4lPE6c0tgW1rEhP(akOA66>kOTT;`v)%2b!@@SCRKi} z(l(LD+T7a2=H1M*x?0_RiB3Ovy%MJ`Pfi@84UB9DFAC1x#X?I9i*@_-+a$6@VSF;XgugRb%dvV215FQhBtfYUE^;Z>vkYEJrDUr zkm>c4ZV_zLSd!r$82ui3^%|Vxo2_NUXYex#QH19+*WIa~`jJ;YMW%Oz7Uc8Sh$u+; zr7-D)Wi{pSYyk~5-bLJ2oK}kD%GLOR2tf{T8 zs~{9iD^~6#@SdW>NXtoi(rB2*GBp8)=kDxum`$#5th~@d*dc{f(GB!UCY@FxZ)j#i zxU_;wA+I4Q_cl!MBZ2oc5XK_U#L|!Fyr*H~+4u>2clbCV?&OC&NtjrJQjVX%&-O41 z+B_syYK!I92ORC;SupT2kK8Vth4QASX-PvFjGnlOtiNwO2hU4c%SiqB-2aLGvXxHU6MTIJN1GjwFPEbk3?fzJ2c;qHRoGJ=e=SnD44fhJDRm6% z-WlYi!olHD?Qic?zOjhncF@6Z7H(~Bm*0%tb5i^e1!y@?zVjFjD_3sTotIv(H$L!n z_08Y$z4ehl_ha?`@BESa(1(AxUj6Ei)vcGmxvstNRkd{UWqKVqD6Tu6+#%&)D~*6F zu`TqYY?0#{K5N_`#v;7NzouK*TmdX ztzNlN3u|k20E|5vlAYaydbGJwkDqMT4)hKjWG$;}>7X4GPZ4pEg%hrsEJ)*NOD{h` z$hhRIj+AyiSkm(0q$>X{ZyGQJrH;#Y{<;xShSe|1wF7ND(FwHt?)=w~n0GMEdQG0G z*FJM;l#d<3$YhMFmQ($O2YQjW81QCI%(NT65YPIh90pf-tsL87ig7N@tzV_#Ff@uL zBVFJ|J6qPq!Ml~yyRhsWcV2iqkVn~g8q%0TT)X`G^}o0iA{_LNR*kz@ETPc?fgkf$ zM+RS9>Z6Zjrbe{9NiWTemps}zU^`BnO4qSHfIGwGJO1!81F5bA44pe|5D6W+3?r>a z{`CNB<7;VcBUHhy*YxnhBdaQ;g=FhVn?MXbrB^TLk|ydF2Rl;6M6~S4D%YVMZ9v%} zvB>U%@`zgVF^3F1cqgqL73E8=x~kvrC8~$5hYlV(VW;tSVn)c?;n8_LxW7@Gdw$>i zw08H8Ya4^o1qTc3S82z{Xd*~eC3CPp1kXyv@lx$k)uYgS}^U>)Jw{?cJ|$``}BpHnmsZ z^ucFq_I$q%w;uq@@2Uz<8niqqKcgH-QhfR49{b9=$|cU`7ovOd$R#cg5a<(FofIdo zrtblV3uaj-alHURUgDKB#7&eTZ}Z|kZqoe+jPZx3vMoMcAAx1@gII@4i*Z0C`EWHgeHS;|J06YCU4PRDFMPo^zX^jDDZ5~N zml)*HbR#_|^x{X}C=QY^rVJF1k~Np64e-4qKiO5tyLHF5u_ov1bZWiUpM5`aeU-S# zIBhSQt>`h-@!mn5(H2eF{-OKqXo*Ackc`rVd?wD+eO))DnG3~i7mmQw?>%oFAJp6T z9@XFa`+u)~{$Ko3CJV0Jx?ZbSSJMHzcmFQ!+E%U5v48%#TlLbjx9jw9r|!M^c5OY{ zsD=5dIy%^`=Wndl_kYJX*8lWVKVCoaW1p;-zwU$CF?#smUVY(@f1ft;NzGy3Ou38f z*v5rV7Ws}HKW8yM|W=e85F{xvsc>aEEMQTUEyVS3O0*&i!vV?(D6Pv*tgU* z?`nPU$zp1FVS9LS)G-mn-F(Ic1P6!cVea;&oKTHzL|MiGWjn&nM`c%?i0w9otw3mC z%ZJo2!nk#vR0liZ=PGw18kq;}0Q+CoS&#A;p7xIKBYvS0bhfM`4U)Go0EF2Ft9o^R z77ar?X>oSqz{LOy@wJXQ3GF15@*q416x=aBg5IZu;v zU||=Vm^w;!ksoO^>*-EMwX?Tfr`S*~EO4-3&z<+M2C<@{)hdaXJ)I{A5i>zwZIBGB@F`}8?yhqc#UYwfkyj;B4? zdEEZ_mwuCUOYI#mzZ-fvv0=ScX5pFj3f`GX>LGaKs9tEGy_a1q>N@$NU=wwj*JEf5 zXWANmTdPa-XTT5nUo{@#aL_|1NJc@qE0g)9%+kn7Y-A8wI6RrO-T6@)jK`VOb@0jE zr4w|-@^H1C46vb&&)SvSSKBu2z|a5FFSd>CQCr{Iz^0$I5&F>ns^cCy7ceES@WVP# zi$l~`{SNxrD8bK|RsY1zcR!6`eYM!uY3agaF2_WQkIE=|lJHvmRk9<0?T;E(dQg3* zZc%2c{>$I!KIO?e10wE#tKCX9M&l%#VrYE(yY31SzVe^?1rN>3_`(frH+DFHD2**u z@KL9rGdlgJ9<*)GiHX`ImMeS*Sl0$cU!@L{zdQ7#kvt?XX(f#a&BXrx!BKsum;2t) zVSDuWar^o=UT<%_@y)isyVs`Zeiz{@VQxYhk2l&zuJsHC-@A9WUAcONP+B@@vO~Xo zpZ?<#^mV*yeTYIyeTq#^KY50Za+k9w$Jv$-HrCtu8vXO( zs9n4Ba{JgPzq9?JpZw8u75@1bf4P0_tM{?N_uJ`L; z^cbE)Q|Ps-9)drPktqWXX8S3Gqy3|H>+03CF@AbdC!~Fr z4LWg17=igE%%_r}w7vM*|In^;(Zv4YLHnowpMTPR@W;Nt4enlVi}ORuyo$Y|4uNMq z!)1niw4!#dM!x?u_Tv=T+|+ShfJ(pSsc?Q_e0>!#|hKd&y|w z(BnpY%VPVOus*)7f3CPvF!$j7biCi@L?B4~9&s)a{5_oMp1ySbGj&&deY~hs;@9yc zK}FZvctgY(&<3cBtlQQ>-c4^?5@5zPx;g=$l<^oZZ3-f(1Jw7`U{xJQfV3@6(l(+i zscg!i3n#NfpVVtg|mvIyU zbdgeaF5y;LT zdfa~IHa6uDTY7d;=RI-2+N}xC^BVhiCl6!4@G78uYkCt^%XO*<=YSoCQ#N0a$r` zsw{zDavd4vHRJM`C*z$qL3wo2+Hp?D7}S4G_&eXAtv0}B8m?+rZM1bRC$kNA5rB&V z)(5mFZemV-SjfV3(~6DR*T(D0TeJ&3o-8vwE~;q@SO3re~H9f6zepl}@9y>H|=ryddW+%;+cNgzGa%y zrqjctcM@D32ej?A{~Tm_6;0FD90c^GeTF3oi|9mls{PL3ewsOVmsfP$Pj= z{Ve6!Jo%3v?B&=w!?d5qFhBegMxM%VxCmf zvF6XgbbJA-&=lalqhn61ni?QRW`XmpqdEd`oDrY&jQa`XJ>6{!A4~~K1f{UOK zL*b|f$v7Pj@ceE3?82;zxi33F1bWc5!huB`8?xkOpe2cSh1oD#?!1__KyjiG@AzAK z=tG%ol+zGP0DR3SPPwxqFFma#=H*>yO~ED4i##lc;YHJe#znZfiGZOO@B^a^h9bVo z2jiTwmXY2qBH>boYA`_Bn2+GPiq1tn!hgjWFpJP@TU@@aRDGECUb@c5Aq}Q_2CJ zbdVr&9oYq~sN2JvH`@R@>_2$ijvnr}r6Y_KE5Xe88MO+$HXlc5*mv zTh}()%D~lT(3Qsc;E;v^gGihX_7B?tePDxhxVP7GbiUD0+{&GFj`4{+NjoPgQ_H10 zKO;xBQlD|)o6b#m>AT``-u7%nCI>lP0YPb3uKEdtTkY!YTQSI75U@L+wOtymM+XP( z7y+EqQ=ObR=r51fYR7a3+wl?KYhfSnWOhQC94M>-q?I&A?p)8Q5W#{w_+(HQ913A? z!bkfUMy%9*{B49ZG^7bxrCjn#NY)eOvv%RZ%g&gvfg);fN_?*HP+|Z?9j;K@SPpJufUJ4j@9rnxq_A}|o*u`SN=f7@MDmN?BvOt%vhlRea94)Xc-es=UCNH1gNcb} z8p~bCCT^A4R|{K{04jn+fo7IPqsd@>V~CKdP$_&O2Y3j*-#KEO0Ls0;jN9n|>ol)5 zF`H87t`2jcwhT9-2NeEEZXVoBhev4K=^LM;f8%c?lc_@jg`c{HyrZkd%XEfS`n2cD zyl^7N>KFl+p797z-<_bJNlyH8qCfcho&HnOI*j5PnbS`G-1TEeBs*mBchFtkMA4d` z1MuX4Ifa=b!yXO#1fyz)I&=TgZoB_@uRYwO{!Edx<<+)2-e{|%EA7gy+mZ7(pX_Hc zbYVDd!>#LWZTl)ZYy*8+$AOFQsiW%>i+FJsTUyv|TQmY{w-j+CvE?O9IkEgMusSiK zVbD2gkhvxF*y{Ol8?P?38>{eX^00mK{V%keTg&Y|FI?jteRc4F_~@%F4wEL*%!-SX zO+f}+^v}EAbnx)bB%O66@>L=FW8T$)L^_E57>PrL*36%g;NP{IwkXvo4nm z=vQfs84>rnPt@V(gSaNHbc-C@_K3_yBp&NIHVHVy9s!lKZG;6z&yzU($dnopxv`Fx zH}q2e(iRhke1#meM8S7(Ey{Gxz|N)Igr_cfPpl3%kw_Q`#3{bz-{UWmsRR0D!it;- zN*Z-8&mQk(YH6|Tk+w_gZLoc(t!=)8+>ycA40!?8blzsJc&1HWq&=`3<8k06dL7d0 zLx5iLojL;SI(-6Usm~7&p0vmNd+j&B`t|mo{q4Wqe&!$l!z|#tdGkhl{vFS^(QuIO z0-hXCY2y~!t(#Zd)_Bk!KYmDu<_Wg)GM%H%cKgPa_Sx_JX#2B&=6~9L;B(*8u3p_} z2S-oZYrpYp?ZMZ6t37`BW}D3p65kz6*(vEjA-d4XRcVw-xlZ3ni_lA+OCz80$`#sK z9!W`gQ~;d|4;7B58E2V^6S(SRf-^~up77{3zf14&;I`+9Lp|`gNoka$Sx1{~efwIw za`*YRu(l2~@=(@$oogGR?k5lX0QpwD6IyoC?UeIVuSUN81vTo6ZH#Rm=?Ds}Ouj;o zQ+OaQ_BGP~KnG>l7C3458CQJ>w!l)S`h&j3Q&C>Qbonpn!#gsV_ z4gBS%?PyJqIbduXrLLfC@JGD0CyGa&V&LGpgNV?Qdk~X`>VP0>3GCkiKX!c8b?NS) zNOTOy8H=Z+t+uY-v=^vx_|X@{RFnmQ5c|A%aUa?(cnOu{>BTUV|k*b8kmT5lixz=yG^&)a|hkN#2n8~?@MYJ1of>zm{D+zT(Z zqv=umg@5+X+xEt0d&i5fv^lufVjpc~C!wLG6TB&dcv61#2eK~C%2nhhCFDQ!Ng1sx z$iV{i7@RM+&6Tyb;RyiXyN32pY%7jW@GGsw4L3(_QOB%1MMIDy}#cU@bg$q$41tuADMKqe=1+l3DFqoUguVlr-tBvr?u(2| z1%(-m`nsmx^7Mu1T@kmi&*!B4}8&;CAk z?vp3U+wYXRD{M$#+)4i%H?Fta*KcIU|6ILJU(mY~m1e&|{L#>3_Xllv*H8PjcK7bB zwza*PaACS2H=kxfmOW`eF{=|ntbbLv9UD_eh^yD`&Te-4pBx;v`QH8Z=;0gf$=*Tx z*vG%Mec?}jp}qHgue8TIkJ|pOry1-<7F`&Z&oWSE?Et??EDodbDCLf9;O}W4_SYR> zo6QcRQz!Iy{Y1|5&)my`mSgNAzh5jnq^VBctFQA;fYMk&`gM12>Je`J)&D=EkD>UO zHlJOi4#~;~kKbrtfAhE6r@!yB(1u>i8hsZ(HA1||1?eM~@GW^$jt-Hd6#V}mcNstB@SdyS_jYl@)EWN8k92kfI{G`QTJgexij#N|215+) z9DME;4k!!fw1KC{f~QiLE;`bMt<)#$QEW8xL6-D8b#WhLbNo2ABy}r#hM)bS8dpUJ z8IBDQJAnjh`i!BVuaLG*mx&p`(9?<>$~|=;dIb;dE75m6#b@KIPwZcdXBNAsoR%AS zfFcy=mQgBF_tX9p1sB~)nv6G?A2G_tp`FJgHIBREV8+$XHh=#q-6>!97CmbI<|AUd zF1lg<>cteLH>+OsFuhUR&5tPJU~`-HoxB6nD|OMbc8PGd6SfESOnSmiXZzJV^YX2d z0Vfx($1RWCJJ@*?2f}KO<0+TEP1QAB^=rjl4~g3aK%Me0_`<5^r}9f$vn5T^Uq06u z*Jb*;^)N1-uf3mXeUAR>^6A~v>E8ZqS$dp#r+k%v`Snq~D@DB%M)e`YQJ+q*q)zhh z(-Zg?-HS4$E(2jGfwIbmr?R=ogz>Dk=@vl)QyrM z5~&XiH*#oPbkMeTB2zolKBtQ$D=Tw!F#tE{hegWY=eF80J}dze#) z_S+V=hKpqF_t8B_J;@?L;Ai~y_iz_<>|DvprJwIl%ara?o}Nxy&$g5N&vWQXoT7j4 zU-!W1^74PYJEt9Oy0{c1Ou}DzFfD1iw8FCg>|@~?pZNDNR6zZhT73?adeX6239KJs z_{qQ6*zw>3cKSWab1%~bgy(K_!#?yAP%HL-u?-!A&<^sre08vk&BufF*o33es11kX zEPNh~JSkw@#(sJb{yKN*f;4UR=;{=q^m6bM599V8G#{v3{SgEADO$}9-Mj3n~2~_0L|BE z`cW8--}5!zEf`_B{O@}y(PR`z($3cohBWg%ICD9$AkKyvVt!Uo0;?kVY=T5c+)Tot z6jG~P&6O2hgb~NM+Tw|hpQMvcfx~~Kr+Ex4dP1PYSB8r3=HJVAa0wr|S$`;(^5Ie1 z3w|F28THx8teg&jpT}q?+{uMgU?J-siS2>`)nrsZFJA$VF=Z1UF6p4G`JH`vbE>iw|3Ml8zb{H3oiY-S@@6-zq@m#9 zy|N7vd%1Ea{qkVY(BL$R+-H-v^VM&({nzfdF=an}yq5um<+V`;Pd1fF44kEdX}kZ; zZ?*?-d@Va(wr<^|k%eB+`Vi&6K3=EsdE9n(o>0%h4+f=UckGUl)i#@}uY?Sa@UC_< ziz9X0jvBf_!!W!4l!FdmpR-Va_)2dk3Xm_y`P3UZ<_vw|1jPyTSfYVh8V}lFdju^p z&`xJgS`P|)b6+d-FG%QE( zC>o1{0m@ju@02NfIXy%_{>hU3xW$Ct?W4ZyEChzmhwC{I?S6 zsej~;q+F(V!CBsu5f3uKc)(PzYCssb+HiCYN=$<&_{T~i4EVOSM95%xCOC=j#Oxg4 zne4X~0M9E6ioi>@W`~#c8$Soz@=aXj51CRPiH?5<3z?bB=j}dq@wGSKYWL}o>>TV< zH}~4(Cy&uxr)>^hmR$Y1vDubK8*OcU8(4$(V1JU+JKFkY+q~{%>-BcFJVf>fkaCq` zIB6n2(x_-)c_fbXvF!3D9eWAl8v*!0C*wmCp3kG-c(+dB&#f&3bQw7qQ$Heu5!iWwiX|pY*UGCKj%M zO<)?ldRK%ehH_whZunWxb=jFIvLwvpVh6@u#Azd_Plo1uoGZh2a)!M!yxOkZeTB5^ z_#;0vD1)v$p%Z@2b#~++BNEo>8u1hhrOkMuMd%@sT&%~1FfY2OB?|!dXu}TL|Lt%6 z-`h|B%+F-;jFWy_*Qpz{B@gbunR+*--P_z6v>R8p+T%CB*$(!ew7vZ&ZIjO9w|?lQ z_GkaqKhgf^|Lk9F*Zp2Tc|CmVt@ilQee8uN#N7vvleV@db8I7PK)_C1c6&mf;!FPF zdcDIVawCzXOvrBBibR~JWXA-!;*I(E!k;=yqDtuGL!NXzl_lF82S0LLJ>?dLrv}L% z?S&y7Io;cQ)Mafs(ln^%UblJWRam-lm$G><|0XV?oVfj zNhf~QIjlH#05cfI9o7JYXOWX$$cp7quT|UX#4mj^`#$t}oCp(UI_jYV{TrUw?7XWd z(fv*ypB$gT`(gSZbv(YfMPQTu2zzo)=U9AlL^yd{E-C8Kcupd_0HTg_m*$k2a39n5 zDf8Uj8Ja<=jyK*7i~R~TJKk?=qxfh3x(s*#PgY82>wVQ-A$WW}yuq%^q_=G$PMbRu zR@C3nf@gJo&o_BkHihtO!qM;jrToxETs&UZqXBEDeyB*@#BN%1;h9HdJ8*CSJ;?*Q zJ8)vVXc@fZor6CqKbMnqDlrEWX46`jpv~+zA)UCj-nq~wi$P|DLsM-K7l0@4z)_s- zYtd%YruxYW`@)vp$y87ClFtqfEYn|blHOh69xa}o-}Ea&FL1CgVVfym(hdn9iq;)G zH9RG{Wrt8X0U&ydV!#7=?*wW;>Dt3Eu5~-M2XxFr0r(&hz2_7@?1Yo!bNcn8gj-(y z-LaL)OlWDFxH5p(h=B_&ggqRhccHg%+`T4UH?Oa^+t;3J?|s(?+O6Bqw}18vzuNx% zzyBB8A)fVJIZ=G-)oD<^YmFZk)?HXi@jod7||)vT%FRTOchT14%w>CEVmiD zZ>DZRwxor1e4?HJA-lkHaF`%d^OKeKf%kv7y>;-oefc-P-bNeiZTrePvO{@>>Nj`= zU}dnzGiop`c8~t@96X~hc$S9_2FZ0bqBM;~i>?+<>~G=-U*t13lo;OC38YLsvQECr zka~wY1it#Kwz3FZzVXuA^U~Nw0)0E|iI3`3aaYC)u6(wxBYU*d?sC_vHw&LdN*49@ zU*RFG)hDGpe5P1=R;9Y*Mp=(M=Yr0*`#SKibiyWYZlJz{@~8eM?fx6DwSV$+|F}Kc+ok>;wDt8Z^u|1W<0nXY8S9%D2LH;X47h%lW^mx*~iM{i#m!EI9pL>CR#3-jBxjEnh zHvG!MBzT?715bJEUb?F=;|e|qgq1j#a;ksHdPNhKVLY49?{NBEUeO1?YrNm)tZPKh ziI9}tPgkW4RX{sfhUc;Vb@%@2Twa457xz5>k8s+_+I`(+y$s!+hW$UrX>&(xgAEZuM2Xh?7lOdADwmHfivqG6-Ay%X;r8T%_5e zGSKl*UR%5E?B-a1JUuZkdWyK(g@d*AHbSOtpHJtu=LC@?pSIv7_8M|(oj%Sdmy}+C zqg&vk`pP<_{fP*QC~>I6wsGL^Sc^Kw^4iuKF8-w#xgcBG>f8+`#~zOUT(zUP;)!=Z zj$JgNl-d^B6Omy;0?#zQRC&zSoFkvHJB_PPGR>9#6A9GuDW&6Vz(_Qekj3zsxVA^y zSCM6Sl*Ogwsm-8Md~@g5RV$LoKLe`l0!(`p9H4b5dA9uG)$z|bNlsMnPl3Q0UjCJDl^dT+__+M% z8I&#n6kOx=Hl)wROW);R?<$@&7QYHha9|P^-m+DX6;*|mj zQ#pmtmXmt##wI`G=cKH28L3y|0TAZ}9Ah(oZDhwNSFmY!pX{|8H=fC9vd6S_E`A+h ze~dRbp)0gXPPG5l8=gF=v*xtk?s}f;iWYtOz1J08+a-KD{_k|+X~b%~leSAALiIq5 z$7>+ze*x0)j;5hihlAsmfmP`$AHf-3?-5t;wmZr}MT}p-0kn97&aD8R3tsN>Gf3Iy zfwlcZ_$U9R+oB7Hu|M*8B=S{1W%Vz=BJQZos#^WLO;h>F1 zW9&w3Mfxpc`bGnJMjPYiXBPzcX*w6(YGd1<^V|H{)JVDfGj^=2KL^hJ$Uqxey{Dg^{QcmZ{_3wJC_ss_~NAQb;GQ7 zmG#QcX&g?c^hN1&&B%*zKgq2=@Dr}irAC(0#vW59v9$o&z{Uf2=|+Z@MA=$ep|lj% z@`gX~OWJFCI3}XqoN&`y2w=L%91;N^st{OnA_R9)imSyxBZhv&H89Uyk>SqYd%-9G zyuH5kM}GX3FA#9Al$4MvLB2&RaB;A(0`7oV5rk2$h~9VjDifK_Gcbgg)s4g}B&t9< z@2&_|cwNnuK{X7cOzhhc3Slse+}Te>)4+&urj#I!8I7F`^w}8F>C+yxmF68^Wheh+O)nhAk(Nozvy(*b>^3}?9dPoBf8bRHiEp7oJMNvlRAb-A zNhF`@M;s~OM6wuFBiLV7c*2d7G~x$h+;suj!|t9zu-E`zM$lXS2`AC?%T>=hLQ*D` zdK%;Si?@J^yQ%#02FCSwE0Mc4q=CC@YQUwGRML^$blJ%rFZ_5=>TI<2v&`-ao71?b zLr#1L`n~L|J4km7oMYmf#$!o68rx39d`>v5UmgxKVYawRhh+O|+r0ivd){yJf9rR& zkAK%^+9y8!{q5Vo|Hs=0KKbeP&TspUcIRCmY~#Bxx7BO60kanJo}VAKv*UwyIz0gA z3Gx7JI?wfuN9E58`AVHGg9YR%KV`H;+&%vIg!npQ2j7bh8b1x)OpxC(B(JKx;;RFHilTv~ z%sOzE&KaKStTc{9v7xhjIi)_zr-}UM{g{|b1NgMHnMuwC@;IE#+ur1;y>b6RyZ_c+ zn?Q#g$*I1>7yvywjy6gqE2r|DJWssiXR~s~vvhVqN);fsrEBn0SEwV@T^fq;#~9MW zvN#B9y-?ru#f%YS9jKy3mZh1pZDXbGP1&g{8Xl2(%BNfu9Cetw%FeZv2+?@v9_*9^ z_=unNOg&2iq7dJoc@iFC@@JChQgvTS+}C44;cr@HhcI!DoCzfOSGabzD;PVK&^hsu zPUC}ZJ0=NiHmu6E_wtRh>_?oL3Ov@B)(|@~( zr3I^rz4mxuWv!d*Wun5@RF5u^-lM|n1h&UFD z5aVEPq=MnUN(o*C>C#2Q;|ffrE5L7e0n_&s%|1r^Wqh)sq)SBp7S>d3W9h==BxsyPEu0hl?9X1x<0b+;)1v z6`aeS3pwz(l%JA)=?RzAodlGAO@z*sOg$|-id;?P09!r6FP&xroCI}|Z#uqojJ5eU zx3}AsYd6}6>6LwX}~emh`g;H9;qTdtAjI4W`h@$}`&f_`?tQ6`VB!oSemWaY38$ zRVCu-;IES&b9W7rkDW1bNE=BxUGQXH+DGoHHohL(z0yp%swxFdvY5CAghLW=aK*YZ zmGU`}PGm@cfAJ!^L3#=+m?4En=(4~Ao4{R~H6f3!p`B+#K>3}h_84WqRqyxa(?*JS%CnTu2i3}W{4H~2MBbu5 zwJl-`R{}e>SwO(6B)2_xB3u0W(}lFBB2z??FZOG+Av}poddo|>nFS0olf2xq?IH&0 zZyaqDg$^%Q4cU1zX$TYM(i=<>-hVxl)I4{8yx zfJa;GK$!NiK-Cc&8&}$Rd!2GDwrB1>Prv6@`_*6mmG&R}mH%^l@aR#y_w2pU_@#He zvwiiA*P!`AyK(al^cs?b3lrwl1y915QGRhHUv-F+NxVA1lnF|oyRyYbQ<%JW^64or z^Z7wrz#v&)9<&Y0 zQ2)h|pDG)*qtk=-(eM6P`X2IY(!ERr|*Wder$ z1)ZcS~jeS$8O>{Iu<^{;h4ba;U#O8`D=hFLj({j+|ID{HjgC&o+@e ze-)l*UWjknY{G!&4(iF5iGzr9$41BtIdUhuv`c!_hV2t9PM-2w7kTUQOFabNz^49q zMGsLz<9nIA3u$V>6&F7d7lyh}7#SmhHpbCekBn#ERoK=|wU~>+tiRUV?D&T#+w{qw zefHTl-t?G%WDgjV$+SIuh+X_-x9#umM^_2iy5Qiji`iWiw6R6IzrEG2a;(p4Y5_@w@KvQ;`_ z5@fg9%IaeKwJ-is`@TQ$dHN5V?PT`>ZP$bpSY6bsEY{;WZMU-Jvv#VPb{WTp>yhyC zmVYASz4TR{GqIBhIBQ>5kbj9wux0P$%d$M}dRY!Uvw()V@XI)+&xUFTC-0)k<^8u` zI_@M=I{NGa@3-ascGssZcNzaO&F_}}cf#$T_4j7&(UpIdM*O(*M+wWHenKS>4|rv8 z;_#mKrOU@hT*hb!%eb}v@Kqbr69={X7LXOkejzY_=rY<*HB82TNu$2EzbyZiBkTKo zGRxS}qIT18h@DRyH$xCdJsX=0D7I(fq&$_DQBR_Kf(Ci2H;6BM7clA^d(zJE6I0e> zZA0y&V;6$ahNyqlH@%z)FOyT^53AK#{eVB3Q7nRoC$v<*M|mjS7~N;P3@G7-PRbGR zI`So6{}Q_LS~(Xv>yl0eP*Uq7VcK`bFa2u1=23E%1!CZoNjURQ3h)K@K9A6-&zpFC zd4r$rD6Yy-dcbif?NDL&V_&*HyiYH{Owidb3a3C3AsLai=?dR^4h!Q%w*;B57j-c4R0dido`+o#Vv z_*$-77*c5x7JBHqcwL(Qk_L(PHh9vy@b*SXv5zo)ZP(M!&s+H?40^=w_Wdb;dFS}L z?NcCwt71k_|)o$qod5Jz4XdEp&z`0cJ7Ma*xta7-bBYNVW*(~C?{Gdc$OgK?{~N= zW1hPErTgvW|Cc-K=TmVzxQ=axmg)ubDB>8B2T@b?*96_ubnkLjd_&;QBc9ToczxZA z)6c7(6Vaf^bG`e&;_&3XtJCHokNWYf(gl=9e4ue0DkHY1cq+BgNhSB8HG0^SnS>ji zoBCI}C^js-l%MwB>_@IH!B6^|Bd@`rt#=Cq2DCeVKETfg7+xElSDQ!pyJ6bRlG#9w zthhimI**bm%Sw;WDW_O=GHrPsuhQ05PM^7Xt6kahv?(-UCyUW*Az9JMezO}a*MrZv z+CJjiBw2nxnPpD)OU*Sr^ztD*BwyR#KqE+7Mm)eln!9))4*g)>#!LT##}g3ALR0da z0)GH5OUw3=pgD9NuDQtjtUY@8xIKQj6S~{KmVbW2N;-K`hM(H~4V{&&UoXTd6u0IgASdLMv(QpZQxS&EI+J{__sbZ>N7xxOVg^ za-lt*)9COwpIV?HEh=CBz?FZ%m%eF#4K++Z{NA!%}kz02x{ZHJ-}Eh+Vpl07983!Kg{Ha+fNeUqKA1Xib7L4mfwNCQN=DtlGf42YR6 zabke+%QM23qa|aIRhaR_Ng>T7zqByEbW|RQP$vmUM>2^}nU5J<)Bs#E%-u9mqQ>DC zLmpU}Q0|V{Ri>m^5aGL36R2;8~7DZT#Kh^gi}$xz}F;+ zVFcC|9xmj{h@6g=;(wm5@D704xyaXgOaYP~`7fFw0Mk*R zK5BA3J*Z;CRfdPM;)J*U@}vgj?9gkVWS|N;QCC}EY|K)2>kj#aW)xq8FT1X&69hsh zc|?4SlkwJe8xisAU;eGO_s#olNb=JiPXeHBU%f&7Uu|pDhc#dfKx2P@r+xW1ej_^` zUwGF`ZFvp2s|)QLuf5qG@9v;GtZSCjm13*S?|x49=kVIqYr~*-dOm9>$1~!=W8a}i z>NmgWEN}x&;v%mUMD|*@Es&ia7a9-)+SCAYyDT&D-JYyY1orQG2v=KpZ51Ms;;<)K*5L`WCIn9#al?UCSS3B>I53E-;Zzc6?62 zDFz6c=J?R#Ib|tbRl6Q212|X5SlNX3z>tTjD=91RD4ON5e3m_*!{DAU?*yrT>jV|$ z)ch+0$Fu0b&2qcK_7lKXp7w8 zqmEA|b_l!p>R9m>rIa-=&EFJ2$Tz5f%VpLYK*sCKqLVHnXO5-!#n1YId@H{g7V7Gt z-VH=#m%lKH>s3w!h2uZ|8%KN>97wJ5h;w2GRm)KL`VKVPA4>v$yX!CxNX55_3mtPq92{)2{Y6)C;m}G$S`-(dyK!j z=LB@3Ba{ma&pB|OAiJ-<@mBlhTW_?lzxH~2u(Q*Srocxymyz=Y>V*2ri6Iv_Em8M| z8{6$fd8X5{G~8^1?dxpE(eu%W8Fhz9cU>WqyX4U*%tecO{QLmF!pSisO49 zA4fnYtNHcqaUZ*P@%kOE;#ntal7n}>gautL^1ryeb$49Vd+4VK5v}s&1IpyRh{Q z(M=h=5ZBIb>gs26!I_ceCapZ*7~U3edaplJ8lnO|0cK|x7p;NJ$wCX``5nkQ|%A^ z)E{m`I*3p1zu6w$f33|9pHOFxX!GD3WmV5xKTGahp9Wkg!Z|FYkEn&uxj z;+|4g?m$Aeq`P#j^xnO?$bpfGq|m+N75Ph?{A|A@7MHLtKifqgV7S({u3T$ZuHI}b z=#GWaI{L@s-<*K3EH%MjZJ_i$ZA;MFwwRpEh*-9s>TR6a*gm4ad)c&3DQ||^hQ{9K zmzDP5-~g?*q@6YgEc$lQyd1doUa&16xsks+|4o-VBpT4l&gkjcls42w&*(s9J@Jd) z@<{r{mO!70pMyC9&Eh-KNzeV=LprJt+YH(B+td5lhuVyxA#t;i(g`l|lTOmc1r2*o zJSECc0APz~BY~T?!fZZG-@@-z@9!NYzU9rJDtN3oD6ulkM3)2C%7dR;vm91S3y|-~ zTA2EmJapQG(#5vYNh13?p`SE?h7>aO&{XM!(@sR6%M;^Rm()AFh?fg*FL$D=3kp6> z8O5Oula2wVO!=rRxoAUqMp|qqOtWY?%6etBsS{LRzd_4X|sWYm7pKXschf6-OI zb>TqjKTdp#fG$|JuMd4luRPjEH!yK3I%Ra?yyRE0k%!77sHa@YP3o++0J7?5931?} z0uJko{Y&yv=A@nN&dTzzCLf)!BYe1leY`em+rx4Dxd3Mu3@HoJ z&N8Woq*oT&$QeZ=!d`v zY@&Jkfy>GaF!I?G^v2YMHp)9X!jsG1|A`N@SHJP~_UO@*cJ0<}WZl6(WfVFCJMs;! zYe3L;UFwzo0GYDwxzzhE7Em6u2!}jV7jQu1S%BaQovaJC4f%WzvS_;-dy?o*KFcq` zOH1jeEC{!gsH%q&(*KbuWy5E6j~hLz?q&l6znsbk{MhzgeCxS7N9-Jz5T6W=lUK$t zf;(kZCkrFW9T;Y9n8AjCt1r^2*99&>4nBm~28AZ@!Ua2eO52n}8Ryw?li&e8%GQ+Q zF8<*TQL;Ek-37nhC4Y7MTKc8)sr`BM^x>@S?Hxo<2Y>K0Z)s5*VHVoPhMx>rZhrUQ zy6q=quHU#8{QEY}1?TER(`iSkuaY-qO8XSJU>-SuCTG>|ozXv>f#1<&f=(N@t*ucz zN6sd+9S%Ihl{Or%w(HNmr~UAceqVd(`Ip;&`P2W;wu6oDVl@{O`CV6c_Pa}MbA271 zxf&go$%^P*KjE=PTSdOm)01S*xcu(w8hEScN91L{o8m+-^!i3Fgd+Yaw$fQWEF<8d z@%n-5UN-#%s0*%Ul;25x6RwUNr&?|Q?2BJ)KluIM*M@g)xAVhC@O1``_Sx!l07^^x zlx7y0QCIPfUnZv_+~#I|@IgGlu*cVpLOv#a?+o{DduDoG(m*C+clpjmI+J!qx`f-g z%XjWRb7j2k0!IJbUEcDqgO95*N@>{pi)$ahuV?>qS6N%Gs=o!pXWM`Z>+erry|ZRk zA-zmvIJZUWbw|6r`!DZYkiUN>cK56&W$-bT(>B5vUrSG=)JE?gw96c;#me^6l;^bJ zXffmnnWa7Ola+oJK;3s?OQ>J;KlOwV;3CJ$1>mvC*XYv@=qqZqTgR5s#g)!1SA0en zd8$>$iYQZxuHNKTTXGRQ%DUnr##)r6KGEJohgu%~>>ViMDTDQyOWmgJBA!JP+hTN> zvaGFyjAntKFu^0FQkLl>=!9B&I)n!h*}i_E8Js!JHJ;+3&f37ZpS)iX7I zQEepHb@e5-1R11i@vPiPzl@b|r4K+@Z8Xv@s!5Nki%A+BqIYpcXZ2X6(|+)7xy(ve zbypm#Ji0n9!2AX3A9(~-Uc><^8&vp{#s;!?!JEq<^Fm&Pm$>0H>3OX3iHmSd&x>JD zOq-*;y1S2E{DR|E&iCK*lKR}GW2LF9UtFoXgexuuSDPbAz^B5I31lo~bqt>VZ^6=W z59;J*Up)Sm&T^;#I_Uwt7`T3piPl{Q_QLzj6eGP51$KULelBG1_N!U7s;?T2E! zP!9VNZcq}MCy|^lamT*ao^)6FwOiO8*n&@Xp3v5>wktPoMknm=?WK(3tvypb2sJO9 z?}a=5T+$$}qnlS?JY8RK@=({O)0JNby-zRCJMe5{y9FePJW(NHNa-{lT#kaFWpUZ>IRBAa!m1&wCG1q zzSSnR-bp(3S+$$?B{~}Wo_a1UgRg zPzSQGPi)CM_{)FpF2DdRGDdwTem*^)wyyHaJhuD}@0}Nu7Q_UUbJ3pa;9ObihxAD< z)){4s8@S20j5QJ`Iw|vv26`)b)|C!|j?gx|KKBHGpM2jJ0DVv3nVjt$`RCmRM1-Ow z1LzW9T#j&(;VR4Wk0L`E^^Jb-gi!{Th}r?9bMMZ5?-0dFC6BYvnDTjz%Z^aK*{?to zCIO)>mxdFC;U$w(+?`Wggdq~aBct+-|5Vs>;wx0d0}rcf2v{#ERtaJeJ-cd6C(^iH zx68yFCk`BoWv8b)z!-AX`sZ1e1Udft()R|3jfzNUjAkP5e2xL9si(1m!ABe?W~?A- z!?`j*ZGQT>5P-bG8{Sjf%rUg54B(#f0I_&xnCQqK;#=s1b0tj~lt#U1OalN+p`S1+ zTG1!(=80#?O!426RbD=Ic@SR8KhYD*@+7UYc21>F^KKv&p0~tKnYg5(6BP1T+L6w7 zAkX~7GrY!6^o8pw2=O_0zbWkAyVVx{EDw25Olk!`^xd#WlgfBeG-OFGWPpqm) zi9^S_CfHK2Br{iWmap#QzThwM+`T$;;L>g0)`h_u{z{~a0&2I8#{e%lL5(uKe)C>? z`F$T~ANlrAweSDopJ+euIZ+MecKQHXnWVkKHILn@a{IadL26gIn0r*Dk5ac zUHvB$l)D|?X*R`PM$D3_^eYaO+KZac$5;;O9Z6wsInS7SDT;i>Cs*~l3vg!b+ji)TMeDXzE z$OO1FN#deYE~|sOk9&4VNCWbT+@GrdBm@7H9$Yehcs!T>!f5K{ zq=Tz7W$qo`;^ZxQng?|j0!_&r^pUv*FFG#L+okQ&>Ef4q&J#%_P#Q&FfsqM0I~~F& zb`~P2>xC;#NF-i;<6eW*z5c@mW%IY5Kqt})s|zg`94k*-3d`WV=UL>4I2uaTS#t2+ zy4jbi@|JWaFg_R~1MmUbX|TYS6c55i=fuxEkP|dO>VkFMlO>MNR;eHE^tZk&v?J;V zHK0wXdsB)wBkc@bHHU6f?nemV6xn|B(c|{|TMsa@p5&;|IdyGyZQM4tueYr$H`<+h zFSOCdR$Cdl;&O=ml74cMqm^99;z<@W4bst8yZz$JZJ+%1XJn`WOqzyad%C#RPSA&b zU33l}$K>IJf$gF4o?ULxLMM+Rr^v9xDjIWzzeU>wNUPKlAEB!{$$H~Om=v z%NU4lzpbxM+Gjrca(n09&9-{B+lJ?pHl04oAitkOlBTAXKj8~^bwPM)uDO>yThDk6 zzP4q?6~D+Su`(H$_u_UUcTUk3Z)2wZ+13X}xr*w)%0oEye(6FJFVX>mXY>aCsf$mA z7vL@~5&2(nB9o;nq6!urAVXdeDuaVH4~gn~2Ul0m!tUr~K2SU5Fr{@ft}B=Lb@9Td z4wnM#vtg0+xE}9{*M}F(GNAKR@AX1)T>Pc}Q%e$8ehQv@{Li#eX$L~@G5Y!?WcDsl zXtyUb+Njlb>V({E)|L=b7t}3S!6;8!)IJ}ct(YD^8a;#FK%BFJ$mqZJDcreKZ65!t)-Jgd8iW(m@km;(h~Kl>-41l@DN(4 zM8H~I(+^OVZpL4JRk1?HT(ZS~rXwsYswfHfrRTr$k__dQ3%G!7C&&(vc=g|aF5TSo z2E5P%$2?=k#AaHh)2tpazhnU1-sUU#{F#@yqx*oSXhl{_Z!2?zR3Mb|>`HO$#02*&?ZMDs-TlA$IK(H>XfG5_7Hcg0A?PWiJlYv?C zR41y7Q!WIn25xz$818D5KDMK2uPrOMsn>1$%-cMa$2i{Ir5YU#?G4L-82LKLZXL7# zs7>!KKf{FUzLBZ;ePZi@ixT9aeKmLYqj$*169B9y4zkSQwYG2SAar#?P5c~;kvr|%vX|Ut zFre>Y9m@bDcln^RZU8>ILQi43A;A27R_-l>b=FB2J-o5mzUA9K*}n8^Uuys1U;dBU zZ@&6l?S`KNgOB%~eI8x6)PC;gf3e-Vb)#Lob|ZQE{qppK$j8qwWEY778E2b@J*VA{ zUvQR>l0~@?3uU#0HGEjG{-1aPzzMd(Y8&CVOnb2e{3UP|_W}*gpn1d@fI`XXaMnqA zMgJ$CL~}P6`Dvd|JcS7Uo)4DW1iNdrz0p?2Ywasvd9@9%jB8L2TWNKm&Osh2pOZ({ zLhFNm(cwIccE(ZQc?KJmb?XHI;k9Uy)?-C2mo9dcci^Ocix@6B`0kH#MgLNwezMf~ zsZ-pA)nDSLESPutdBXubLVjJqKwT2oNC|XM?)x^Ns;_x*i@{X&Lzxy2KVMaKqx6s@ z4}LyNY5q#5@XAY%hr9O*% zu|8-wZ(K_~KFRKNbe#*`p?h{HLm=rbE}p&;eJ)?pKN9!iQ%wSERu4$)qB>#F&)$DX z`r}L(oFQ|Lr_Il%ZU52hv>`j~gYSJ$`@*02Q#qRU7k=p%u;1qygmy8bdd{?-Kw=*{ zgU*!8oTR7ig{Svu>%2+-{MOA|ZF9UHIa?i29(0uYuH?03ruY^TU%1r8o}Y%8w>;tz z2?+me=d)XyA6@OLN95buVSDwpSKFt4^mFZevJb6oi(QOWyVZb#eArHt5`KI4$|Mi^ z8q(FfMqv@O?g|2b3-lGN(Y^-fAZ56Bg~ewB%q%fFXPFHkALXcXmh-Z_PrLqIxOGJx zh1ySr^;ZQ(-hz(w>f-4KmFlgF_j@_ zC0X~VtoI&bTnPlkTTe4+ECr;u!PZ;z&E_G>W;$g{-Lc@Lx4MFMdGTx>d8bm<{pM;1mFc-OXYT*$hWaAaRy;zD@QG@Uvltma=GJ#5_*KXiuiB6N;#myjw~ z8Q-L`uTZCJoQ!sfU)9@A|MIH|q*`F;0-#Jr5MHvG7-X;BcM&1>TI@pcFWDBB{Y&{1 zSto^?@N`te5?S-&dYXsUFUOwstD~E{GAUE^ACd{aC5t=}W*Slir}!D1hq{^EOWd>_ zt1n;eq-~Fj+j=}=cUX3L3X=M%KU43++p?p@DYi=TC>(2oTpeS2o}ZG}vem1tkDshj zW>V<^NL2Z6c)_;FC0~_~cjI5iFvAoV7 z08iZkT>D?Psp0`?3Y4~yxbU3(xvcW0^R!-_{S9?d+FNk&o9$PwUvJm1-E4Dg@}0*! zZTreK6UD}Lky1{)6q}>ObeD9~UB*gyWaaIyPuJhe{kHTMVU(T6fZ}IP$&bH^ z(#7kA*a*0ybzM*M*$DBsFNk0I|HctI_kw5K;w72`-gIe=pE$Vw-e;)u(Yn&fr%uSR zZ{cNuk@>`$EF0+E;$Jdh{GxNx2!VX;ujexuq<3+;r$8MY9j4E%PLm$eR2z3=a~*iL9pGTy zTp9pL*4me-O%R7ilQub;<*IB`+eJM52`|k({@4(ypS@jV{~}mIZ%^j1ui-P@Ie1Yg z-Y;M9OWl?YF7}HktNoIkd{6zfO%%`ibhsGU)+4X>uY6YoMG%6RwO8=%{KdxlW!Vd} z!Xx#=tF_mc{@9NIk3ew0zxNAB(**0K=SO|1db)N(x5M+Wdk}2QKJL^gqxpbO_K@kyjgdti=iuN)9499qK1aT^!d_-Px4Xn z@H?M6aJhykP~WI?OPe$z?x%k6M_*p>*t@$6khYN6eK@}%v#ikkvF**`>nP{7LrjI5J(Hn$YkOFDz# zZ@F>|v4bh#s3=(%H9YczM%2QDg-X&`Sw_xXjp>oSim!BaVDH2_Z&@vu`fqu)`R#u3 zTafcEe@gLEH=RT2jEs6>8gJ*~j?jCPr2Q}qQ-3iyKoX{^dwgifRy7x3NW;M!c za_b4Oxz}Z$&`DaEr*tBgzXYlaxMr4R6q2Q{M@8IBN{XOk=2qX{q ztGj6Xq)CcUbt9McuzNS`TqNCbsJD8oeBMm}y*)d_0`9hll-|!>qx<~mx zcyid@q9gI}@qXJsnv=~^TOV&ByW1F6+ii8c(bh(rZFRVT%wsenYf}u&6Ytd1{ey!n zK=ShfF3MS2-)`5Qdl~rP0-mSX1`}wyyo!vE*4qNIP9@2tpESrW*fYy3S-h8BAZsor z&O>t}$OA4bgs#6slMIS+Kc}1mOWh}(lvuL~bAWKR-?ql9ZEMxto(Jv6zWd|t>KL*f z@1S!ZQw|4^YVaYB#yUBYbxl?S)0g-bzCKs&=q}>PCH<`nH)`Ok)@BgL$u>GU2{EDS* zcW&Hiw`t4#p5Fm=XXo+#woV6YFkWkidym_lyVu(6Xt&+IdZqm@{2R~k7Mi)^-p+5`NiEU{3d$YgT$2AD5;u+ zk*vy{wWuZ%q`zhLZo1+y?|uD{Ubz{rzO-ZKKn3?zSHHU>+Hc+QO#9}s->!&v=f6Ca zQ+_{xj(#{gI--+zkU@@>(RN^&XYn_g2N&x{!nBR7lkyn-;mU7qYcbQ}Pu+;+Pr2ob zwsMH#a7dAKJ@UN?E#dk>$pq+@gmuzzj4aeCXRKR@)#z^dil(xarAEK zssx*s`eoiOytNLeEYQ=P7di(NHa0e(?YvEZQAaJ4OVaA%TFWn>V|dWl~a$>8L< zT(xfcD}<0K9_^WDLL8b{_Dn4Do|9K*hpu{P1LS3lO`Zh+mJx#5+)5Ye7P!_0?uc0E zcUsy9`vzGDpEDAO>lB+ZqXBD6?a`A1!cNSv%_*rZ(b3o3+{I!Bq*C;dhbF9*Fmy4L#T5!yQYD}6nU&4J?)Jjq>I6g|o$UOG`x z7x@`IF;b^x+(f=qlJGhG20UEw;XQf`*DqLc0+JlVbJI!ov}M{w0tw%srS=i!P$OmF zR+vLS-%^V<%H}|Cz!y(w`1)(#KtA7WZ@qONUFV>F(R4DwUf?~8bsemvo;qmifU1iJ zqI=O3L+ZBUV$bW~99P|042|a0IoqGZqkU|V#rD$k&xcQ1osN-YzXG@`!56mlg_A4R zPs>y?o_NIflHSrFGLPI%{ag=nr(GZoC$pn0ihKRxC2IzU{^5)9f_36`h+jIDYE&X?SF_hsl-hzwRQP^~rk|=eOm1`CfTkykaM)G2IDn+NYB} z4ddcDi1@CI`A%K(At&JJo{HB!CK#EiNw!`#3fB%?!YrpAm6h;&omFM5zP;_9db=k7 zbGjI9&zXx*)B%>qe28<7&66?V=y=kr2a%EVo4BmUo<5sC6ZL4>BZrYEH+=Z%T6LAW z*m7utIIgYQ%H+N+1F~$}?DxnOYt5UQoDpAdmXAEFSKZQQ8o#;KEqsV%2|Dw|%u_)dyW z_nA83Fd}s(=_`(D>a@O!t3Hcfl^%piTicK1OnUp7!ZLmOX7L8DbTmGM%S1?+Z+yxR zO?7pWY^5=8Nvqr`uhl0n{1PY8Dvqx5DtcQqWy+|<1ANK1WV$P3m%hT`U&2a8Bxun} zIOVTj>4^>oZE^hNc>&Aa`27`^a_ahfmmmEv;XLh3Qt6-_w^W}FmRY2~R~>Zj){SHz zY~lqCY}<8NK!MEk_*Gpjdpmik+YHl^NuEV}>ut&|Ed7ZedI8Vu11lNuZXAE&Cy%_$ zH+~gYG6f7W;d?DgDA*Z~lgId7@Trs2l9l2?@&{C&fp7Y-DOpv3_KRt(8^SL5CBGT( zFcR@ilW@z6r!=kjeV&z8**1-MDHC1x!~p>MASWS<6u7t$*n2)pOiZB*-0;}e+B&i= zbV1g^c}iO>F76J`f+Nyv_YB6PcFhe0H?E|dQ)~up?(xPt@qt4c$536gn0Xr={>iZ? z63#MoPrvOhO~s?a`e>K&-~L>2c<%B%gGHrj#{Uz^rN%GMwG zQM$Y6t%Cw-lXb;E`;x|ycJx2#gSd#+&tQ1BE~GC?F|_TZ&*o+=o-Tal=4xzK+ez5F{ScOi@NFV9>6r??Pi|El;ZyV@_?9e#1~>gOAH|h%6bYID@F8jG zYY|nS*+)&>*O&hI?|c6jNO`ZHbm=Rt{{@!k&d;wX=gKZt&{QTYTUBbF7crLT zC=@}wAW+Q~XV6=rPJxWiqkJ!BQ!98NdxEk5T3steg>O0{g zgHM4soq73O{^nBjQ<)o|MxoI;BvLQXHMwsT^C|# zNFVRUQ8;;k&lT57BL$NiPTCs3-W4F@hu&VKEBX@02qBI*syK8;?)>~OauxIKPOk_v zK+mj9;$StYFi_)P!I4+#08p07+Pph{z_;M5qV_#kfq$Fp!ZHcqvoI5p@`aDwl@AJ8 z3f_)C@8XB+X`7A!Man=9$sLz)Fx5?~R>|6BWh2i!ywy)-?(Wwd)wwhPuE+UaZ|5k` zja&EH&AZRGkA3R9+vk4YA8Mcd{2y-L_Jcpx-uv&;p};++$w2@* zE<(;7hf-59Wq^w-lYf*U#=2#&JSsrwr;J*z$WQ9A{IqP!pmNNQy5~KExzdQ|iv>I2 zqB5;IjpC>NI$)sUlvlbKIM6C|z!=E@ZP$rPlw8o#xDt@03n)54L?Ocy2lCVh<7c01 z+ZZ*EzV@~3h&%Gw+M~U;NJq;Z6P9feWq4(Cvu$r~v~|+F@%rm+?{Gf~elS{VyEX*V z<90mtn~1BbCgezoq1@)bq>MW_ZTZWIyXAJlgS4nRuJb=-(-0+&>P5rK0nn07Xczgh zp?45x4ISo0z4hI+(#i=J8y0slVtTk^Fpaoai zo&j_56Q($rG&0INafKmH!hvak)ku~%Hr!M|>vm$6E{Ow{P?Dtn;RpVf3k1>$O3?@d zvHB*Y@<6+TKJY3-1*qI|5M`4kbxP`*f4EQ zxtwJ5o7qmh9G@(<39>gK{z+TQr27=Z#$EaD!aqd5_DT3~@2EZ8J#4#&elH0F&ef*F zaT{U$UA=art#53=3k(@#eR6Wz_Vdk*pLJkmfN@EEP&QA1BQ0`b3T1Rq#14-7LfaxoaB=4^uD|mn zp~Hj7jIt=Lps_m8A;`$59Fl)LIl;IbS@MLGS=$;dwZY# zc0Ac_9-Hc*dYKRMp}6m4SojE|eJlf)z|NJQsz=%IGF3#OobiF_d*r3_@643wuU^Wd z?s+a<{kyxgdCC`cCeK#F_*WQ`UU^stx}W8VOq+{k;mLYg13Q(MaZKajS)YI6n-@+w z7k^#i!o$MFxbcaL&Wyj|`fx807xDWvM)2|C9bEcP8pK~B86q0K2Y=(@q+ikryy6AF zMcdY@Lw37uqb=5X1~%2>T+1UKH`?~im)gqqbD=@{OEdKBlUa^hUpYmN@XPLD!}uq? z%&Sh0LcbVBIimr^i#GHW+u*H-kK4}CLHo5||Bd#4{2PCzZ7;32n>U>JAGGzY^?Wnf zquaCCf*Pyr-MYEm#)HN7!tFcl-~KoMjrQRWy{F9{z0sb$^|kio;TyD(3vFv`M~4oF zy9`$bq(wh~tE;1@N60aD#QDO!bbOH{t_Co$hjT~w^?E$~d@3T&_Cj6SuSzyfZwK@+ ztcLPJSeXDq_c?hOTYs(NFMnAWk-3Y z^(}TMQndkvK~|AxWy)Q`$L-|QBZqAS3cg@6V2fAyF`MKlf91rEr?#aNv3}1vVhk>s zY(o!EXC4cU=3evIIP;(#MJEo&bj;U=IWgku_SH6CAE)ew>uNB1DU;tD8=F~3ke%ts zqn|}`5k}8*VSBV-EnbF)*m@8+w3oN8v@aVO8;B@px}{-cHU7L$R=_^x-8J3BcIMcwv0G9fFdm& z^l(yIyVN5a9b{YuX5@}`W;&fET)WbC4JpIl!3*2E)slB7SV;#bD6_l!$p<`Tt&>?M zu}ra}f#Ys>pY@Yg@@hVrq~0qr`IH`bs*l7^x|PghJ3yCS*5#)(Q`VI^ozL3O)(yX* zF20E?&*7^B9FaK7&(DPx@yML+vhw)!INvXo78}^a zFTeNQ&}6CoXMf{AZU5!p`ES}Y&);jeZrn;0cK~JQ@qW8|=XQJM&hz1&^+vt2W&@9)fdhJMcxas$NEJksstzThqL4t8_%Ae8?z#JDVT2Wzvt(XG7wwL2GLT zH7T?NBy=$&VA>Cp7uKc7xpo>f!@s$Z8h5Px0()t&)J~`zP9}fkJ3ik2#aF)6cK7$& zojZ53ds_L*;-UtrwQFX`zpO8Pi|t?W(gn z{mer?T?}|8q}MK$ueMY_@g6-+dBmmQX}_5sU70ZJ1vE@qtk?D_DHCZ;<2aC$UQVXD zFvc+9i<6UT(iSi{Pjv7TC#5&!x47ym-{sUF>$LWhKL^W=V>M4f@K;}17X4FJ@(`v( zv)txg&*)Si!GpJ=1MlKG91WI$;+Hw@Min& zj;Y=KJ#6>WcHrp&)L91(Z{E7y?%jPRb;Hkjl%A`4t8A+4vrwVSOZpUVQozW^9Q$7S z+OAlymRHZ)V2JLAw#T$V;y0olNjnA|&(Il99^S{CIchJx;~niYpZi?<(HY16n+lIS~e$@95=YSg&no-qP>u^PSFa5q%-ufR@k`?BP?IJ z@I&62Fa_%_l`xSVbUs1IrR~W%`jL32*rxX3JSp(CZ+xkJ+fV#h?15uwuH7on;Beq8 zi)FLqEmxgN&|lU{v!pa;tE_clI{hzQ#kuscrdzg(YpxezDiboTNjK6Yf4#Z~TwwPB zohzB?p1Zp)ZH^$j&^}pb8YgalmHXYGT)Im<`glDcX(ivvE=s0PxrAM$6?*9ppCo+f z5k%2BaTWK%KaOWBTGGjrKK*66vY0f{3!mO)>{{IpJ!cA9amsqC*Mux%zI_0pY24@w5!;N%JGU_F7;F z(;sPcX{(HYf70`_UFa792`@nWPkyy{lTt$NqJ#FI^{J(xEjX5SkU7!k2%<;Tia~Xt2uO!x0yn+@v?YuoC9tjv#5=E^l59B8CY~ov_M= z9MVpfUvFocqR-EI6TL%P@ej^M>0Cj?b$*8Tm+>mEie;VEpMOcGe}(n@s&eucylLWT zLurff*KTpKjx>=cwx_lWX={j=vIw=~sbc5}Muundk)M4X`+}3Hi=58e))sx_@hCR_ zKDNZ>l`T+1Yg4B^HB26+SiQWGrgufB%B%M-IvLjAE1=}zGOh0I?;XF$Pyg!s=tms= zJ0=EP{JX2LD;F86$J~RnSNT_(j?NeJQsp`{ahJTxT~>*cIAq~axwIci)F&){8O?uk z!uth12s5U^>iiz>$UXObIyn)vrwJ3jw5JTwd!3$i96T^9Bj9#2rc41V_rM{RHZ%P8 zp8he{Dp$rOjVG42L9_v}U&+TdC#OH9ZCR#I#5;DUD%-w-n=#DOQwk?XhuIBp{=H7f zf(AS+p*Ca|aMS+SFFrUp#8y3`?}=@yq(X;-BWzjxr}XR6ltbg}e7CHY!F+AE!{O!lo=~_NRTk2xXX;+;a z46wE7tCZhr^@Zs#d&;1vISfdV4{_qva)dZhpLsT(xH<)v$z#Ugk@07FgsyKzOF3LpViXV8W8FUu3imJwjNsEN$L=zIUA`;m z1;Z=4>zttcN&B7 zdON4Q4xgSL9<|BNUVC`|Vf*GAueHP3aVBJi=Wc6_0C8ThP&P@>JUSYD(s1D~{hhQi zo(l#nr}C+Ul*AdNTsf(%+$mp{CDK@O$4hb8Qd0)>g1!+S^2+4D&*H*8ojqm4ypWeH zCg}8OBA%}Vr|5;?426|J7eiA1fS-PT21Z*W>B&P=e#v@4Ug~b&vjf2VeHVW4sWOVE#)k0xZmnfE4zbL* z!`l~=8F!^EuDH{cOqDg|gj|lLGilv1CVxGRM*91lg$(dJ0|&I7U{9Dj3fWdKP^Ytt znKI8|LMF~AYmOWj9_bGN{CIyFIodlqZ95M39UQeMdl+*VckcIF0y{_q4XeM1T(vxP?#D!u0lRr8@Ivro9)(%ue62nc6&UT zwZl2|M$Q+gmmZP4fV_*log>@Zv_Tkt)*t0d`EYTKx-h2#Q1-}12we0p=7qrVw>`?=?Rb2wRPGJunD(aUxsI+&vrOANC{AvKbJDpWgRtJn8YQVOWPq5-pSI1BaxzNSZajw6~MqZpo3LA zfb=%pn56?!|M^*m6V~bQRBU4bBb^{z2CFECc_xd1BxlNMz49V;_rJptotbu%$Yta7 zxbsdqDy}ZrgD2tAimUwnPk!1MZ=;*>>b?AoSNEFy$2)b+hvFyyy30I;FV4IPBed#d zitux4SvUdy9{aC7pgC`cEyYFFTcZw)Zny1w@1@P(hu$Sx?MhqUT+a$> zzwLgo_oR(CJU;xmjYrhMrFq*P4clM%-~Dgev(MdW`;YFood>VCgU4@BSLQha&bDZI zRepdQWk05U%y-QQ_gHzG2`6EpWbp_bm5rYNAvwQ(;-dtR%K%Ga8ahd-107$`PGN&_ z3w*=pW#kGTgF<5rHx}8LeRxd_Fdbj}baqr(aIy$gmMjG%pjQ0fbD zk?sn97D}j7wI`sTG@l$DA;TWCP5tHP5!812V=Ezdvq^mh&P6fWFlr{tBA&{vX_c4Q zE|kYM)izRjcOix0)>+#`>zdz=xBg^-y|yy+xBN~lPO%{^*I-CnPCZg?{JesL>DsN* z+Priw+%H;MKb&l+zN$2|92pk?C+$nal{0zbDIVH)_BoAfUpn?4xLZFxrql@x>FLCq z10wFmb)vlbQr1`OpX65yEbS|dM(gdqDNwKi>xyfaP-BMNjf^ zFw%*7fm^pDU(i%Jm%=48U0vtOB)r;|JfmODN6LYRa;aRc4g5x@d?5{KGl52UL9j?b zrp`zL7h|c<(~iI&%aMfABzB^x7HzUy5D@AVaf?neYWFNZ;*@W0)qasb{@Urac!s)< zy{%2`qI8>h1;9-!4k|u*^0@8q@1v`yk!8QT?a`k%ZeDGhv|F}oo?54T*e)l{A@=m% zK^A5yW4CW?whw*qJ?(uTes8;Z`$qFLAQ#Elrn>;&Z~xZdAe0m*$}8^T92AVyo8ab|^>t)fTNE6upK@x1-SMvXez1MygYR#@@~gknzWBv| zLHq;y(_?Ty%m8N=`9fRUgnl8B$mEm>{fPJMGxyqXZPZ*qv*IQY!$lOl#V_q=Z`(l| z^#~D)5BbL{d`IL`c9=_>1%?DVd22f_Jtoj|3BBUt|1W;!EA1!#=pSfp^=g|Rz5(r! z4eJ7FOOHvPlIze3+L%E-7u}Sj5L8>ju(u5uni`D_|>0Jx}U z7nPmhCG{LR;jSJ>zE6*7|7=TL0B|~O3)D-S>_7^=>)EvFS8$o`7#po*f>8c)A`k6< zHdvrr$}S}p0MZW9w??{Oe*y7+I`p#=-vw`*&@;@ApB2zAYRW5VN z5AoI8>OsT86%?MlI023)=xBG;0!{z|$FVW>Zs7wxI$Eg{dY&o+OSFg4b-<173)!qY z7Sp=n*pPYkwp12VYS%#X{5s*%E)Yj4Xx$GUKCRB=*|^x@BtL_jKc3IXvyZNCjX&^gJhR~z8X*^DLrW9$D0;}_MC32L zw5^eU&^~-2er!Jgg|1NDH$S_la=X~?r;o2U$|%A~H+XFbm=RSR?~dzY5Fg&-xq3x~qQ*?O!6}ls#|)wsDo8*pY-- zj`ZKTY(UJ{eAKg9l#b3PMWu}$EsuG(9T5)paNv-qcCEHj=#CzBp_y1@A)8|`0Cz$; z$I|D%lm)APipx(=yU2Mkuq`SbuS^~s?qkP3X@`?T@UtY2xgw|7Zim=@I}f#S_p(^q zaSIo%D0kL*{XAjs2s_OA75i1z<;)3#zxHiTx6;PLZy6jZfrQOfGAVxuv%K6b1HjR; zC9nOx;)f7Gpl`$bi=tD5CD!>`Z>x3SK58(>tX=MiG0p!4XeX8fZoH^L3aOpTGcQc16JC) zCuebw9ql*{@QSntuR^QZjeaAkb9bz$J@bf)lio()34^sHM8r|djMbKj^6z&KD z@ip2N(tImnsRk?4*@~b@aA6dj!ciQEs~>S}be?YbVoW0fdW0Bu3N-)~7XNpRTpD0| z>@i|U!Mpm^vP;+~9UBMI9&`DfZv9mJ3V4)GXJtDOp|Nk{7e&o`ufToQD0CpiW6wOU zG2Z~xP)vgaygqZ;J|-TFZ~ zC{}codm&D_i$*w&p#Zdqbe%sg|4q~71qhtw^=`)8yec}*2P@&FXYl7aixuo_=!e7< z{?Nf&q7(r1Da$9M7f*QG0*eHMSqYTiCqnD-OFD+$-;G-}$-Ex9|VS zKiaPJ%%jq?oL$zXI;|uvP*awIVGE^52i^tcRM-e5me+2KlqlNdSQok zQbGziLVyn!bW|o)i0;~oQVLRColQNL_8LMSmFlsWl&#~DN>1T-W!vDzmz9oo@Brh5 zda-yk&4B4DhLWqOdN)^fL zRa7^qbkp#OFFL0`PD%cYuSzA25qT|A?<|l0P6Vs>{23-4q?-+m<&eHHU==9hDr-&< zY1EnS{Dg~?%9Y=4LK^a%NnhpC30E47wK1}`vDKEhuC%r`Y-jkn+xy|8hwZ`R-K?@Z zIGnagcHb$dGRR3^+i`cHp-;ePHa{ylraVptJHd(?Lcb|??gmpo6%NApcv3}6@KQ-lmthcvWCT+FEOxQnE`*RSj;ER)AkJCNGU_LSrlAR(xMI>I-vuDW)MP+1RES zyV4M19x0hf*>NnMMjp(^ct#P&;LD46lFn+Hqxk7ZhR@)bT2ESsq|cprG#uD2jzF5A zh2QE&X2E?1fQh_>Iqs6rxB27O+5!JugZbpZ$^DbIe=u*4JwhA<=i$zNd%S|WNeB0=y{8?**!w#N?c6%*L<)7(0f2JQfO6GhlW+o>mO<>K zX7C|QZb-}2Pup=x>_6T^d+S@U;4+O&q5dmJX%lRVhM;&EGY03JEXKu6>YCtq{<%%wWze1~{5oeo#EWTyOhv|R=ZKUt@m@0NWAP7UjS@J5ii{gC^+FFW^GkC1 z-N`p$W=-YXl&X3T~)9li-gXD_iyLYa)72y2gpZMYS$?y0? zd-Jugw*4n>VK*MN0el_|R4ShIF|BXNA3GUhz8)1w`=-53QJqM!4U`N9N)hEIbvs_f zv%|x?9S8lZeJmbGsFl^kBkPLWPf*cD+P0F;NxV!50W+Q04v)ZsPpQYa6WV@v zS<6c?@wj5Y!M?mQY%8O2+QI90o@*o8!i5#+q+LLMJx=9>S@7gRSe^D*-3wnVi?RW) zz$X2g)DzOVli7}`bwDRxK8lyS^3R|r(vou7_mI9G&sgb|E%LC9#Rjk<)L;%a0J5zu zply(anE-VFD0Ub6H~FRF5c6b)O|sa=qfztQb?O@JGR!lwG*gF&B4daGRfL`| zCQMm!gk2t6hpAnJdlEx*m2tw;PIr;aaGieH86$Aum5hROXagVAyM#qlkPe97xKksmnJHWXMSDfwRIhe0=0YG*v;GD>_4b%S2 zWFPrygNFTti;Ks4S}tvVd7yYnbK{Cb%7ENWW*)O$b<_bNzk%o?5x+Mm&7_wLoTi6| z8R&3=vOj@9+E_OGq1r7@$a}hjb<_7u%z}e>IDjKbO(R@o+&m$5aLXhsvg{|SoRrA! z1z`mDf@j^!wEQu&@+^ZL4wN`(sr=R8zji0|Ut6t{hLDYSzVvdt`@(bW>#u&b z{rNxt?`7f4_LZBNe0B1FgpRm=?MmCex}Cnk_SQxlkf$oa6GR5u5a{PIWgmi@Qfa+f zmDZk;LK=5!SZ}oJvrB_Z+Nvuer{Rur2_3Az$T`7TJb8#FN+I zIdP5WCoL*&S4{q8VJrDdukh1!T%JCVF%JBc7t7v%s$$7S6!1nEso(aaxJSK2X+_`=Lm>{sjAG+49_+)#ZL0Z%EQzngUUOGREWBq-!`>4J2!rgZJUGHuud-nmL z{MFct`9+$f7jUv0KOC=T{Uc6-l;6s26t!W*69=B3{wWxV*M(Jj@6v;3FX3@Habtd# zpzPK}COUq$cg9P>bP3G8_-p*~3u(G5+2H>aDa*g3TWM|GQxW2-p6U9#j+%S%)ZZ(- z>R}=IwEOMvJuL721ypQ-JV~1*;Z0`id{41^SbaL3I+$m}LkG%b*}@>76DfCEoo?)G z+Rk0{=XE-#?K#e(-NZu=2u7N#6Kbpn8e31NM^qEy0xXN7=$ogl7Ge4e%DA8VSzboQ z!7C>q$hioO4|l^%mvV#$6<%d4y7%$asePWh$~OSGcusoZ7F{YI<8_F1I9K==BTy8zeKv&g9cy82hoI`{!hFGWSC4yj+kcH~%d#Lz0w+9FnX6~VMI3TGER@nB|UpbSq$r7maQNvrp3FX zwT)(-zRPpymRrgr?=wC_nD5mWpsl3+vrp#^f;BT7(N%XBm6f z)4!gp+)0pI1zg52`skj9VfqU`#Dl*C^8n7?D?Fh%{p2Bc<+O+xx)JkD__LN_e)p2HqvF-1wDiv9-+@{`(uApq$(C^yT(`$!Bcsl z`X~z`=e_hRy3rq?-Do0?=9;o|S#PXW=@SD$If?Cz98_P$w!*f=c`0|Hb9askx$s$A z-AVtIU#$g%(SbHz(ponps&YL%a}+uu8ANAz3g^fOat z3Tgi1(J=kzHS9?3e;3OhO=k7^*ASvk7fcy>PhUm+DTh1YGe=Au=YFGw33E(C`A&}w z;#EJS=x*AKC&@#pWH`5KM)2XXeq}u8_$-S;J(bE6wmj~CcX!(Mb}5s6X6#YNL?pbr z#r^~oEgmUT;i3EqH>x}f*af_2ej7U4);n(Ff_80N`<5g7ruI>>q1~9VMwm3N&t8t& zcsR;pfQ+Riy>>cfmL~M^vgm+3R>(gK=g3`K-}3R0vU}P0F-<@2rOain$#AJ-{LYCx zlzM&XPq_eKVd519HXBB_TC{>TZX_ z9VOZ>nQ(*2c6K!WSC$=6f}t896w)13E+VMK0E)Zi6*ply@FFe>0|s2eQI7c6_x&Bb z<4*jvS%g{O{+QAfN(tf0#19HkdiHHtHAKLc#u07|GVYml7lHxOg8%&_cH*g^&BG1| z9V=4t?!EGL<&37GE4`|NCd{I78g!7YM!~!TJ;t5cWk(vFnG?TVPLwPjFfL3s9)#n@ zT_xk)NL{(z*=P$AIvOPI&_<))1|a;_kPE1yu}3?(ipbb%Hu;bkZ}pP8qAhmO_O6sz{|YF zST@Q%$p^W8iXtjH%CJrmB^HI^FI;ier*uh34ZPGd7c!^e9awFZcSGcB8R=1Wl+!Xs z9%NDS;5oEWc2&{@M-BqOq`7n#e7SJ-p89LOCb9fik$LRpl$xP|V!R9x+L`fnywWx> zV64MCUwf@B?a$lFY}$5TeXVV~bB0dW!p3G>ymqCng39*5qiJUCYhV3pJ3KyagK;%v z=O|H+V{}E@`i39#c0tFyd3=rZsAF&(Q0?eN-SQ*=1fVrSpB}vJ`HhJu^l=%bkGhCj@ls&@7}=_!;*M3Y#y5&l|lY;Y))S0=RGoq z4smkXyfb}EIqC>{C*$>ZXEu^*c??5exfsQPD?tE0^}~Um>L4n|ln4Cft$CMv^IbfE z1AKR|)6Y&>U5IlEJ*ib$u#f>t;s!?G8PD3HPL*(>sz9k%)@QtkWSG(&hmIC*VEF7k za$7N?dzJNq2n_2qcLGy0)msV%Wg%<$Yxx2NER_f4q|1@G4gzN)*UC$->1f3T17j&l zp=n$4JFe;=zt^X35>Mz&-Yy3^K?Xgl-{bpL4bsWU|6}qz^1J=Wy1U5sk)wThy${cy zAm@+wj@skhNqhL@kay)`hCCd%DKaww#)RUYq3b+?oJP4Vt&N&T3a_lJQ$FPnf9m1l z%81}$TSn$qSBayMhU_{3ksV*lgkem&$R?dJ$~2uFXQ$*Ed_LJfYzx4iDjb9D_R0sp zrR{?En~!$d`CyDVZ$lq-p!Psb7=*__Qs)#*h#^0?d?@1#8rtR(rd~0=vXGg)N*(=T z^Q;8I1yU+Y@~FRT4;&mZ9Dg;9qxRy4Nj+R z@pRT!XunYa-~zwIQ<+qlVm}3$i8l_>(K<5_VP z{?j_O-t!dy=+DH3P+n$EB+D6p?jaF(4d;{}*{l3`hQP54#4&At!S{+S7l`CTc;O*Z z!szN0F$~X1Gcqx7^$8jj*Qcb)m46Ntly_Y5r9687!cRETOV5g9c*-VbAt*Ory+hBW z@9FAeo=68V)gS4x=|>&Ye!?2%4Sjd!0KGbT2xzY5?RN9U57DMxq3k51@+?rFPWJsg zPik>?6*;kzi9h*`U(auC!r%_y7p({6C9jfO^-a7klYhbuT=( zfJAZ-F=+sm$$jLcb`sekgrWqv`rOV`77i$5vEPBEtjRZEWzroQY8Pk6|B2d9f3zXG zCuBqaT)BR?4X<8r8@KMFGRNSB`~;S`cqzZ;Q*9Ck75H+6A;>}rV)>KW{*Ldl3TSI{OpGcad^^qKq;Vl$|I%)M5b31tAh&xp(CZ5z)^$$QZz^sfD&rcBe82|^E)BmB4WHL?O2uUOj171Espu7`qhqc!)P%F=t zCH=2dJ@KI2sU*Hii|U_^=n#*`V_?D>C!Dbh#ZB8NCjm%9-jz?~B9q@di%|vu^heH} zw3dg-L#OTn@tzkcH*|@d6Q_7ulbsHLD!<}Z@}n*@lva6dm`^2~(iqRDHSu;%AFL*@ zJ!uC@b9W$)$KxW^18!PKz|WzKdoN6|yaOi}`W8M;t1>LT@bs=eo@f*kBwrEWtF@`?NsFI7O1 z5MC%?ebCMaE%cHUBVl%aPQcSsp6>5GX>hQ;@}76KfA*zcYZG+%yWagWNP(lsUF`0O zFog#-o&t`RgL}qEWD{3*{eg?R(SbodYjJY%BZI-nwQ}X8C{anTUg`5F-4Pv)ttl(b zqx1(5h*lFg#H36)NdZ>_gHC3tQyhdVSUTZ3$dyH45J>r2frc*fR2FP|@>vamE9X$H z`lXh)^nftZ_Y&FEe{e{<10A@GEC135BTelZR9;C3MC+q4t#fie`0<`&P^Al%RlHI# zobaMY!6)M!kpbzeU9Qe^5fAMjb#QZQJ@WiyXNNR?oAfXPe-6rKL4e1m9!=WO-Ygpo z`aYDqgr&POeDlWDc5`Dydp~NUb^5sE>o~;W;XZMV=fu3<^mSse7OY9<=t$l}7U@CR zYl5KK)dJ6zt2lPce_46HfpKD9#RljAMN%{^2^>KJ? zEdfVyQieU5OAuBo>yYvwUvdOF<;ujU?K^(a^ZCAd63R*XPcDAG-#+zYpKpt^U2yW$ zg~Q}ydnvq{Y_D3X&dX*Bob0#mghA$G-idSBE|wR#*q9|5@RCmW;N**#cj?sv`Y3lP z4rycClCK`gPe9%^7zj#z4N$`hUeQa6n9e*aJa=Om#b^5i7855vm#!=Cg)AP{;#ZF^TgUDOqO6n?5Fs;^}fYGw_ysR)#Ov_6>|@RL6!uHSam`2Um-})Va2~ zwq@p5c_jbjM;d9Wmpn3tr_bjv3PfH^)4!M?3{z!{xhpmyXd#=5;XD3C0aPL(Lx; z4ua_xwGoLCc_O{>p>^6E@gqO&lZ@qvKRAhEU;ldk8x|SG#kNO)gehJ{6J^yjr31CC z`@SVncqUdMi1SW)2wPaWq|>>5Mnel++I63kgLnwn0A-N+XxuCm7K8#Fd6SmNm3$|D z+ECDr>~nd#4i-dL9+G-tJJ`0KV%xd^kifzlXTH`SBW5u-{7w0(PcD3Qfvk3lpY}76 zMb^H^f^X`#3zN3DucNQl+vfTfHpjFLuqg%`>!iarLw}|J9)5HM*C8twIRL{8X^;?H zhoAnD8(w%8*Pd@BYxsqiUY6gx;ZnQzhmSJIcPZVcm#D(%pLIsdV;@I2_Thvhz6KVa zB{wALG7260WVi~4UyGy%XkNuN%-4wvxT#xw?k|GN4iy*)&goz9p7CzmkI0z31sDAT zn=on7XI#|d*qrtO%HOs_kt}>| zNA%11kDnSwX6*C$O?qu?cc%OKvN8QaKVjqNriR$&F09qA*B-ZTZhMtJLCQv70{ElR zh(7#!`m^@sSBc}vgzDI+3F`NggM+rWzZbC<_bg0T9-xhS-jfIh+Qz`^@$XNsa}2`8 z)b{iG#c&}9aR3!sM$dQsnb4f8BO-tDh<>bnH8;%7>SPQTnwmUs>sT6_!0R-n^CuX6*pm6k22{`y|lf{q<-q@Y8F^{S!eS;^ci}4U!#nabrl{u z;a>|0;3s#ACRV_w-b)|yQLjZOgP8Nh)h>&(6j5#>C0w?%!r-Y3w+LdNqAV=FzVs*m zz_)yX0^jS>a#2zRS!EqS{A`Rlx$nYicO34H}o!HH3082))D zj%>7lV28E;?t`#1$l$yA^H{6s!AsdAj{Y_l9)ps~#-(cQRC%jim1R)aImiGX`DI{? zxM}<>rz_xiv}q}0Nx0l4tkAf7%y5qFiy%}(mBvVeiW2yq9sa}nwX;v2$D zG&_soQ=Yy1a2Zb;m2$Iey+47-ZxtgGi#P7?JMGxVzf{%kl2 zvx8c(>Pj9y>r|-RB40lEE(D&1*Wo9*tg&VnJdA5tWt(T^>>LFOiBnp4^vp6@N<&P3 zi%U*0tNc*%!1Czr;b^NZk2c!q%Jp{Vxfk2FfBJLn6W{xT?StR*{q4CAeyj~{+=Ul@ z%kL1Nvve}gPw7A)7an7aX*@6 z^{F?YEn>NngG*O+H3P)toe4L}9zzNkh(#1HD%OF4CZEz4*>HCgJqq!#0^s z+vLe^d-&*aTbRw-`e+#W7kj_4G9HfG7&^W6_1D_hzWVjHx;Si`!%;gtpksyHIT)W6 zdholHBluV!k-yY+o>k`ZNV!oLSlyI4tq7=*vV>>qj9v$+>qrMGgj1|e*E#sALJzf6 zHk2(!m+}hMIqnA~=#lOV$XW&qpo6=>PmPBRs*UL7xlDqzYsi5O2Zr4GW?LDp14BcE zxPDF#h3kZ%W!(?{Wj?i7%g-*D;*XQz;crGIFJM@5{E~$NM z0=sr%!vo6WDFhx%pJTW6SBxmHlAFnX?y1MbwWY{s0+btIu%FCf}HeC z8^E)2U^E`Hz(kodN*4^#fuq$SJln+5M_D;}PG{isxNWVqHe5PxSJzJ3J8y5bANuZ3 zwe!hdJAC*?8=kv+#K|wSQs8PaP4onSEJ5KZZd{aM$vohzT#rE0Mn^vy1D%m}Kp1iQ zi&ptuOrxt?-;J{BzI5)srV@%PzuT%lo?Hl*N1>5NNd6(8Q3m5%I9Cumkq=RM9eoB1(jNg~5!!7)i0bQKle(}BX1?TWB|B`QyPY=WA zBv14nU!8On;#6_!xBRUYY zKZ__dzgN8okFvX*vMx{$%w27-=5vQ`Y*4OX1;6QuA#jWBwMRQ`|9IMd{$Kp__MiX1 z{$9JWb*pV`Z-O&%kgubIJ@h)Aqwy+YIcb};bHlCWHeOq7fAP=#Zvk=E4jw&dlfx%% zjrew=PG(2of1HUK+w;-LPj|?F^=o}IT|3o@TRV`F%6cAKKsfhw$wGho>v4*7QchQVTbDCvqD-OlqxC6}Ho5g7{Qw7d ziQ~loDRzOY#6?azH~<#Cr$A7K9HVC%!doDf zlce|LAqR0rnHU;VW_f0RX*3!`TWmbq^%Xng=w=55vfErc0sK8aZcN)hL@sK9{K+=@d`r;inT(_QCAj%1`0iA=l>c z8{{skl!lhm!6wrW*XrBu4m3Cz;VSqHLWK3wQrm#`Y%<9rS{I|(8Mf{4L@fI@P8LUR zNMq>cLKO$u+@)?jshz$VKr3F~@1ic+X302sC9T9a?FKk#r-`ukrQ%}!6n^Yku@ffo zta!F*B+T)dP2NQvV#*J=?a>O?}-+M zyVylLGK+(xyR_u)B)#^vcst0i%#;m@Jb`OYec)&Pk)G00UF2ZT!QMf>Y3LwB%155I zZ$}gSCsTOmq&hZphl>j~dcAKs3Wut%K5JCExWGE4xGM|V=DO^xCXHdrfiRSp>|jE7 zY3C{rT6K>sGsa_il8b_QE>yx3#s|u5N6%SHJX?_7DE+pKibU3%}G3pFC-M zZ|$@hdi&tfleY8dQM>=(t+w-U2mQIzLo4tW5$XGcF=fV&tc4 z#R+j21UT_)h!d^RM;$>=MdMSr=C8(mj}D_wL4am~Ye8+f?rNgjpkQL--J2;XJq0&))S%6S$|5mD^yU*(8fqNqoNZG7*_ zoOLtxf%G+S-%p}D<{`DJ|D^R`2VHW?kK#)$UaB_6g{NKFah5xIpaZfK3A&kI+9t~6 z;ug!StZ29PX`QUsX`VS~?;;-^;G%zoVY#qwsth+A6 z-rwJCvm+0l+?KAimnPXe%h2;a|RZe7`Kx2~_Z=kMHT*RQVAby#kr^%3;- zw3a!v@MMfCoBCSateKij_Xjn~m%$SC+oTlJUrw|t5d2W0DLazsDj zv^{(CCUuo|OZ}`rH#_9j{A_D%gL9_hE%o?;@ie-1HrKN4(Io zd%bkv8`p;@@w(p`v+}#Vm$P0!7#kfKK1Kkr_m;3F9GlDMQ4qee>C`>yI^|DV!3Si2K0i#o zQzl*5>MvaL z^u6-$aT15zaW(1KyB?PJx=i;p4Grtx>t5+E6V(fulG3TUx?7h z7jDX8Jnq_olJ62U_!zhI=qJggmnR~tdA#>Knu8N?iawU5M@&EeOMhj^7gJNdippJF zxD3N1cEyv_Y)ghC z`uN_R*dGqt`e;2i;pWzM+wz2KF8c!dIacGOzV>3~HPkVbr#_3g(029@+Wy{tp?5|* zLO(}}IM!poE#nZ5QLn86a~wGKF}&-ijN0e+p79~tIs5VUb#qFa{Rntuzb$PPZLv1E z6-iyw_raxICeIgEF56seR!{1QO=p`48T+~>men_=G^CX$-leTf(DJw#BLT($yv)BA zQ8?khlwJMO(Z%d8$R7*|qp#kV$3+4uLszc8e`H=hM{;>q&X&P1tWAH2vLIsG^vR#} zrc=MyV&8(%!+KheD9>Kt{`%6N`d2>k1p=}Fpclj_A2gLkj6xQo@ z6F7-ygIF19#CWfQgisK{BPqmBL_B8Cnx$a)4Sy%fh+6Ni+KXUv2PUL9lccp;m|u4D z$=oPFgpGF_lUz1&&vlip4jiL592wkx4(c;iRiHSkf~cKjZ|% zQ$2X5o}~fLYCjs3Qj|0}Ls#E9QBv1{XLm|cd)s7AnLv9=`KQy<>>T%7(@$uSclM9k z!`*$Z$8G0uzddrHz``J9rRp*ca+9BJC zexIhtl?|HIuwuA{b#j`C>z*{KT%EZ=H7UvV)EMh*38k$SLzk+$H` z`pe+C>;(UacurpUK4_=X5xQ79X^};0JfEJ*OYy6LUFFq!piEmt>BS@ED%}*FB;NC5 zmpHZ4!^2G4&4_eNncn$dM`|9E^i&o@Rw6k!0n>l z0>`wyWa}=)GnBP_TBfWz>r^H#kw@Es=*IBU(%`gXcuxreP$rwE;)}cIUL2nsw(SuH z=INxZo$a;n{m6UU$KLmz){b`C!t8NdK0X9d$|%oC69W^>$v!fw?o5u65lf`)VR$5( z=fX!il6m2n@VZL2ysO8RPvYrQw>Z%?t*$!ErVVF!uJT>96Lwq!fIXf>-b0Z#mQ9F6@so+EYI86lPq? zEwJJntt!v*GMPcAE)#GwUJcNA7vCyH>=*Ilo_5O5m_UcufqK4R1%V1{)oKO9ufe zIT;!~o6CA4PwX)9+{Mk#bJAw;4m!R)zPypv#jkYC^tU||7T!cnXV6K7O!SXw!<^8j z9d{CBi#B|`eWk5$Zs*fV?t)yqa*aBUjw62bGkPdIwa!O=tKQh@bMitNjr8!(dnsW2 zmZ^!fbH}rN;-E`_Q;S~28Cd9hkNQ`pY@40rQ%B_JW@zbTrIVKq=2)K4!UDn9EZRkH z`x>1%^LV*BT_pAdmsGKHxIx+a$u#rHf*RlfChs2Cw?c=}!K)lS53MpGKAY3-58G(8 zl?nbebi0cK9Hg#x4cbZ{kkHo84guQ-h&7*2kuP_o(zz#nRT1n1@Z8wKHdv$6>_B>)e3WmO zizCFr3Ukqyvh%~e)39lqt>+a^d6jqQm3mH;>UXNAfs2QHvJM(3?4rAqZ!S7eHj!cO z>SPB=Ha7jV&6qYA+Dzs-K2Urd^l(?YHim5?RE}Ibpd!z+IApFY)4>9V|Vt%k$4JUZ1a@i3?5SEs!wPh5Dz zjZ3-DDYuIUET;(fgZwTSOj-g=XZzvlQGQ!l`EcScdI#Esm(X9^a&=|gKKT9*v{&B! z?)DG=yZ^5JJO9qV+iu>y)^1$mx_!GnbMF?s3dh^6tK03BmtV?4;h+B7f2aL}pZN!E zcYn8SlCL}eN6_>RdIgQxPA5n0ggSY2;IZbrZTG>$wzIQ?o}8lNXXy2lwyM4Yx$Ia$ zHeB50CoL?RB~XrzG2(1P>hu#Xq{04yTyh{Rk{Z5ID0HWVD{4b$m9}yK;U}~Md+HVF z^z8G`wy!>ZqrLIQgZ9$9?$K|#f=+kfOz|k)?1Ytf?H~1^mDoD6Odip%W(?3mqo&qu9>wjC%ggoi(O zYvDno9z>3StOetV0O}=$Li<4M466tJxpPl^)XU%_c8Tu<0QAx?kwa?}Nkwf9o@7&W zzIXq1x9&Ok;FRaWK~K^G5;8~vocS2PY!{ufe?c}f>A=PGNxFLNocQT_Uo+7?{oX%& z@2+_HuaDn3k&Ejx^}R=+^UF8M0|I(F{-pgL2knX|{E<$CFVgm5Jjr|IBZVy6UD!RQ ziAEhuO4~DdN+oHh?w0oxKEQUdXKXroWZMKCo%DC)!FLyLrr&$)1e&nbwE)(pvYvCf z@E6evF3~g8ZEgF+hyQch7Z-lY$2Hnq7t4yO6K@vH35-SBxG)QvM5mIc^*}wO&R2ZY z8R|{tz=fUaY!}ID(@&=AUr(^~v>^H?lqd^Ffn9BC=~FN9@N#j1<6(|z!VKVQ3vmzi zO2?Hwa4A~`sdMdzs}Dq3`rA*slmXfe(!{@%XctEwDU-r4zFEWMalhD0eA5n4XVWZr zDqnO|pTFs~;e=W8tsUqIFzL%Co`J>@ws3gvQ8TV!%v&^Y;zUt8|M>9h$}AkgbTq3x z3-`n{yEyx)mA64vXDg0z%T_IXdw*TRlCk(+rjdr3-09%MEiMaT8GBs1_z07-U$Baf z!I}3Vs&!|C^CJi_S6K zj{MQ)ho<0_PxAn;FN3mTU&&`}s2VdCZj~ht82TBO`6BC}alj{wvuQt5MrkH?#CIGh zd6-<&NN<1Ly&vxjT*OzJ1y{2sj#5xq@Snf5`F%Qp8Vz9b_Of4L-&k3s9K36*2+w?~ zeFY|!KKw!7nLA#<1wqH$&{NvN$FrmGGAF%RUim8CBz zEFY{g@TCEFkV_e)h2^)L@;RUk<3iQ~DNGSY7p(2ngC$MLxj1=M9{tr9 z;{r9@rJks8`JRPxFt*@8=P=HCCy&j{G#$=Hu}c)m>|Lo&B zmSK5i^duixo3>8mhmw`H;La6RJj5BL<0D!de(kbBFG-ztz-U~)J$Ouk_wD9!z z?tl~sgM8u@-Z)CapM#(Q*lOUwymHhd!k>wLqlFrT@y-($KbaY%B$k(q>pC~4BS@vK zQqI6A0(nYB%S5~>QPMI!6YgZ=gl@n$0_E*Nwt!pEO@9(SDyUQY?=N-uqL zlrVg%4ofkfm+rEB+@X=>k-j-XkF=3-qoxwnl?Et6ZkW$H@8+jKO0k052lwJ(oHES8 zJ@o5*$yeG1#)aXJBX0O949P(amSW%8%P!J-*SIYW3!YXX%g zu&bX8u< zQZ(e^ZiHHiPeqp&YMK1;BaSki&x;U6TDlm;iD*a$%dF3lB=TR4?B;0p@PZ2-tSn*x ztk9@HldPZwzwwBYdmPWu1p%8m-rZf^Yol%8jM~x=BfwpMqxH6g%%9UBIkE3)c24w9 zq4^$lV-Nc59!=UK=nUn* z^Q6s?iz)P&&0S1@uA>BoJK5$b0P4HMp*{kldE&qvS;r|dSH6|DFLvI>l7c4lrK3Csj zFdUl}xF^K%-S6eoT3^Id{!e&@+vL4mcP9>!UG>0;dYtsfNbi*ABibEz`X5o2V{o29 zE06r&N8X=cq`&#_Nqe++03YDTlnxJR&%teF=&t@P^c8#_Z@|;_wuo`DjErZ341MP1 z(flqj2(7^JeEuPR4s=*8rLV-b^T6GDp*SgX&@k;Zv{7E8n{hOzt+w%bne`Gv3>meKHFY>^|ki@{)L|>eBRpiTO?Wl|6%gW=|1FBdfhu* z=sHoUSH$V@iY|{)qpqw0BzMvqDh@;lY=0T#FunTJ%L!w!EM3TdWO8i{BMkqO(*q2- z^EO&tXzQz|?bdLq{ox<|T)TZ`&?dVNb1hNc)IK`1+8WlIEF$p-Nb7&Z4Q83)BI{dKKL{ z+Pv3pyz|3E8n)BJSzE?#TbLlf=)5K3Eve_#MRdePA)PoKGKr?6e%KCN($)@obQx{Q zt8cy4zV_zp?XUgKzk%*NZ`ZC}$3_+w&`dfyh^{@- zcTlS1z%w0^@+TfTFkw`y*b=0(Q?^VUsuk9DW{SS%Nvzz{5h1;kCCM+cik^pjC-A`U zf!hX7J$ij|)aFjsP(JDDAc?%UUPZ@|sj9)(k^>z479t{({8A^mG6+xI^xLkn-xeHb zsfn%FUR<^-8N>mgIxh7|zKN4|J2>Q6J6T6g>gO4h$}B*Heh%)~pE%Z(0Dt=l)-UaxOkhiJso^OP8O<_| z71Q?7#g0xY&UyA5wNBnC|I!`!;66ncyI8_b_xk!cyTk3sUc0`9EN+G`4iMGp0MnEy zeM4;&U`kf$n_cqecQQebIw3YoI%#KoX%+UTuI9nokT%@_`;4iBk$o=i)=_0n{)kh_ zjpbEF!~-6vhlK?qhSy{$_<&2Fk8#phvL2B}c(vHGW#Y^-nYV-Lnb1LY929Xuh4h^q zPTJIyHNdG>>eIHugSnFg&_P+TZz*p5M5+_9#)90@5S@&r3C;RjrZ2N+y zXs6DVJ!$gNQ67u0J?W_mmMHawpR_rqoXVx23=yaU=uReDAGLSg3FIJ*^q9au<=$`Q zi;I2SI+adK*M$V~)5)@$VC`*q2_+9RAwYQ?s1X0~LOmDz6<$L_q7m)@&I&Ykr?Guz zYd)?T`HwcQ(Uwp*Lq?It$T zHh7)x@3(X6=^ApdJ{-|!7`2i3$^>aa9o*mBYljDW=pb||G|KVg;F|B1)5dvH+kj_z z;)yX?{J^tEW+J)4-TJ8VIir2I%bkUi+&W7cm5tfC_V!V`b?bypWU2H5he>cc9!Ls>0(R%TMCT{Is$mmXF&`*tp$mqTvq725+Q zdRmwAN@(5#hs5e;2iY8ys{tqxu6k#^KEghAJj1r?guVh8&%I4s z&32Gy-_^ls1I-iJ%i=fmLukk^{W@IHl&gc~V54}4UeqON=mIUP1QF8)D7Wg%S|EYm zfG+B->EU5Jg13Gj)nj>0D_qN_UY9@lhB!1iKAyH~wDT{$^V#<73wPUBzVhYvv;Xkt z+W+u#|G52=U;4RxO5j5u`cT^#ZROjl+CR1_XnlnY5vC4XX6*p-5Fg8u@*s~c_OOm- zlF&Xacq${If%0QI!qxRTZGhry`&&Ha`_q5okF~%0*Zz7t zpqvie+HSj8&Gw^JEh8sDS*!t+AB`4110~E15jH>k7fXX9xJ z%WKN-e{i*BDLWDveVF$CG^&!I5U8Yaoy#8)?_4wOd z*MIBrl0V(U<=u3`MKOk#p>dZ_aQ6a^VZi6sxq^{+dBVYuM2D97oKsH^@roUUo#3fL zC$@cca>b%;6?{P1XJKt7{Hd$zc;mSfS9@MQxuXv8NgZ+g&B}{*M5aAK!^%s2w|+&J zt5ePEICVzd)1&U?F%nOzmhz9BJC(C zv>S8!(#Rc@v?b6(xY~v%=3{$>45?$Z!L*h!VvUC3{>hfiMua-P6vVQ0*P=h#J+i3#zO-paJ;gdU-vGZ&7XML)Yjo;o&}@mR{bX&dFrI2G_U;Q)4lU(Txk$Gbk2aD zhPZ4d3=YXTPwBU#_v0V^D{i(4Jq?r-`#yE=lILIg|HM}%s%ibSPRJ5{ijvkoG(lhQ z4FYb_&S#tPDtq{AnQdn^EN#$CN<8UcUiLvOZ?!$}Ls+IKUD69Ph%XZGP9@CGv^nJT+ z!7WGG=EaBjld&|BXC9GW3l0dEe8!Pa@W@lNT-a^ja&ZN^&>y^ZXFI3E4p*@4=!a&G z3Vccqi{8q2?s%1ab!7m%a#c=ps#_;_Ae{dJ)!hqj(LMSmJgRiI0n+wzUda4?=-}n% zKO=EBvhXAsul0EkVVQ^JFZ%ZVeWenv7mZVQ#DRF~Epd*%5teX#P^L{mJor|)6VF@z zArIo3B+m9T|IF`zhl;;wp`+Oor`a>pgn>Br;g1URYn2a*DwAR$YFU zc{J`87+`CUu$67c2F_K`?!;yXf5Yux)PjK__T;FI*GD;_U=w)jw1d^}u#fA42FghM z9I>yBz5*QkoZ#oF$^BwcKWiSoLSskg0quJFNWsg#lI3#Yh2t{ZsS|Z7r#wdvky-1$mri}5ER+x^ zr_fhklTN)17w~qBK>CWGVZ|$j(_$z#y(8W0OMm(gedr4a*F6QAY(ZS68M!hEaH$pG z07PH~oo~h9O_P5ih*>oO0t&*?BEpivIiJ^3unZ^?-=mj2zI1WP0TmIE$QVvm&;=Ax zzBR8Pp%i@-7gCy$yF#N_{dR-I2Zth-iiD!OqZ^~c!C7(vOs!Ci0-&ayoNMr%M$roO z?37L&X;tXCoDk7SZ-yTyUQQAKktl5pGzou#A>ifr9UT~QqRMa$1mUUoPfs)g>F_(r zuZI&b{M_YYMKFHt{I>&G9i;4BvV)nE35bUgXL*n3gmXVXrhJrFyd8Xy{(VJ@(UCH% zWT21vX|QCKt^Ux&)&8W~;fTq9VOk*&;ygpYQw%@h##k!e^sB4;l`twE zPajZJA};#LPeI~JZvAJq@q$-*HnO}@m`y9*^=yzk0Ugw63l1=W#j*yIa%uzz3T^*Y zK$Huw9O=z_VdfKruf;dI*k9-ce@Gw^DiupH7@T0^&U=y(nt-C3A#eV;?eGifP( zI|GQ0go>A97g>~j`C+-uOFqFU%i=|b`L8UQUpnk2mfr%9rn$f=cmyc%2uFDpfBZ-1 z1Rg3s%780w%})t3jXNPV-jxH?T$SsDFz(_HWiyECj$iAzN7-ig#xYoFjKB;pcL-&I z3>jBGCkF?-pS6R@UYpL3+RPQ!l))W~?xs{XNKaRuow^%mIt4eR&;dnvlU>_rgUyW$ zV!rn3*V^%;C;1lhlh?l4mcV6eW6+j7;+%$J9enOBf!AcxzW$|G;rYCcuUu=R;kY>m zIh~%gqyAfmC{RnUg14$>AYZ+rp(4HHVd%+?2o7v2v+h_({Q-9yY4wpjJBuHBB=tl( z!@V>rCC~k;voqk5t6T`ZGoi?{6UwsDIu3$_n;Y+udz^I*9~*K99;LYha(2#U=n5AH zEG)RYd?gdhC(@Y4XUSuL(Z%N&gR8Epb4R88wPHvs$}5{a>faq3Q)o6jIf7TyHknV_ z0s78wk{(WHneacDp0&NhleT*>X-^K|gS_C`PYdjGdE&tCKKH5L4nA*?ycP&JaBbP? zG&>o83T;oIv*i}QGv&^m>@?sD4w6a_jF_k(Y0!zb3#;gXBQLdxLH((0LI7@_M1a0T zKF?``ve0L7lu7|-g7WfGUPg^((Jssp);Hvm1)ak+tqgk4HdI5B$ zNr*1V=L~>#N$E5`ez$v8!w)= zb<6SrMEz7ikRg4o;)Yl_J47HH7*-_fLUg7&7~ZL$l@pDo72vF{jM~X;(k4f{?Z(v+ z&CNmko{xRF{pfdpM>~A{piTE4p`ZLdusX^K9togM0}NSI`>5#!eDMa~MRh$qX?~{? z94(i$O+098dvuoeKu_X`r?Q)S-wyK*>~vbxL*y?kX%>BQDvd~QkPg0PQ33W85|%jX ziVRG0&kic#h@rOt%6{jBJ7vlk#i9=cP?z6EBH$UI?7Pw{N zLKf4;cl65XOlkAX+pBP?INc?AsK@0eeS}x^$w>j0H<652?c!pS-^4jbPW(u)rK1uI0>cMf276w1i#s_)DR-~M}hX?Kc{$Bf;pZmr34}am8!HEvhw#T_!K42|S zf0mBhe72i9;)xh{Z(VKw=D+c;Q+@8V$6xzK+oMyp*j&U$9iS}vHZ*m^2^=H`@weTD zrcwlX%`w%UkRgw-r^F~ysrbMv>2GsqC$1)H)CXR0LA^|!vLorrX=NhqKBQe)q4NQ0 z9fxf*zr7zx5vPt z0f-=8UR9@D=qHYff;%rWU;_$v*aidXOj;(~IKPZ5Ek{RBVZpll6B=nZWa%2M_r zX)Qr`ft3M0GbxWO8=8SE+Bo}U>5Fk$n@(uk?Y~f9I?to*dQYc%v`smwJMf7t+W(ib z{{Z%E%kJ~Ab#uPAbHDexUk(%AL|_8)41fWKAVmU9A^?LBK?x#ND#wy&mxElAXuIUH zinb(*5-8dqv z_E|0$L8x2OAEVCNxO3P5Jt^p}QU}GU3%r=gXv)aKW-j?ly=C9fciF-^ry~s?jNh%d zk0(1#$1b6BWUJgUgw);7lBh-XYWpS>&x2?MCkUZyM2XOc-{Wj@d0v0S;vE3 znx68=ZrbL3>Wz+=2L6TlmC#l=Tqv=-yBU16+>r{rOwEuNyp)sVB4!{Vj za^ykRZvubf;xU5}i8G^c?5vw!-D`l(;|MfAf#oyRsiw{oi1(BVK-%Tr5rw2f?H_}gU6 z!xOXEW|I@{1U&{{(gDq#xpMUAqdhF4Y^~4j8 zXMj$*Q1^%%`zH=!xo{}-!Cp%n8l7i*3V+y~%5&gAohJU9ev}7{F7`Y;+TrfPBk9-u zKV`{N(Hy|?$|QYcO`WO^nyB5KLu@3!TaS#BkGgDO4&4TC&(PPC&ut?n>=zfjqiO-J z^a+K5GX3O%N8PJa%}-u)AZ*@2Kia?9IdR)H+y*I4#5Whn@;?Y_Dv; zkRkZYwmi6xKF}`L9`O()f z5VI$lIM^8bWsu_@xI4;cQC3zwF=T}_z+n5MP6I!)^Dcg+zjlBQhBqiO_*7qS-r1^O z`Q_iJU;4za)fd0`#k%y`Yqbc^e&_?=SWi6pSlU-l7eJCQqt)+@L-px2)*|hB+Cb{c zvnTzjyK@Imp{Ef|ml6TMciTXl5^N=TNluzTrZYH9x??9zf%)KQJ8AD7&D8ShLOuT6 zhcG=4>K8xxYbiq-%*>*D!gEjw9(S-(yI~gCZ{N95CzqG%=?hQLj_k(9>y255b-Me26@DyA1OpW6CGC zPwa1PNZJGq1zcnoT^GLLQuk@&hzn&x9V-30Pe~T`g$Zw)#nS|Ai>xQ!G8;ANHbh{| zQ(0;Mj6JHW!sE7OR?cV2RKX@L@A9&R4fp7#`}`*_Y4?&h;aQ}ook+{?SYl*`a=RZZ z{=lXD0<4;pku&p2xx(PxvLCp%S%gLc3}punWyk>T6|Eq4*Pnu)HL% zpw_1UEbpCE%uQLucV{-JHl1SAMob+GGqj081D7g7SeW_7ipJIU62B zBm2#ii99+kdx$Pj=Qph6EB(mZa#9vqPN5^xw+TO66Y>L|L)u6C5;h3av(}GC7rAOT zVCUL*60B+3Za1OCE~A}tY`JZb##8!0T!(hTKw#(`c`>yJ6$w1s4-^FInFY=M2_T-q z2T}I0V>h=q>%^&tGj_4OvQ~4;^Ym@#Ltu+}TAQ+KeDiL07+|?Cx6S9xU)<5T;UmK0 zz+2*`ALQ6p%U`E4FCP7o_Zy+SS|?z(ZX;h>H{q}x>zFj;VVepa@52>2=)4=AK|bNP zEVdrA;wOAy`)0bS?qj(zOx(e8&i|r7jZ{E zQ7KpbInBEh-L!YH%K?VEs*7zywb9o%H|xgrTeZDmo2)%eeA=G1l@oPhd8Pae;R*V2 z`Mew(<1sF3u+6l9kPKS-pM_Xv zuFOMQ$5n(gwl9&xv+xE~Z~X+E(jk2paP6jsjGK^`e5roXc`^X}acSyb`@#3;1ORH6 z0A);LIgJE`tC(YuVfZR{&D2J0W}_7D`13QaT1o}TC`Yy!1tht{&nw?1gP1Wc2@_CN zwg#^t>ZdS9(M3>HU;quk@f(#vgj^+p^2Dgqxat*R1W`$incPvvJve1NiHvdvQ&FnO zvkM9oIy)O#rA!>lRpG026l0i-fE-*S&gchtF)- zHLU%9`pSvZ(9DU{<$CIwx7A0!^ZV+O*t`n zq;k+O!zcCA2{VAANSyrE*>E-LOz%Q?zWAYK0_ZPhgKot)XZvsvrcuGguBr6_lIY##0%iAQO>y}O;GpMU^fhG*@l(Ga;J(BmrSXlxg94D({m`@Fi6=NUgSu#DlbK!b=O zG3g37XMQjW4v{0eyvR^Up55#_bf$Oq4r>?KHnw+b1KQ+HC)`Qx+Z$VT3tD-Jr^15*EY|4irc=Fx9^Zx!w)QZ|(Yd?SeFxp@=^w(r5AFAnvwd`{-7Zf-a2NhIaW;33 zC>tFPwhuLoFlzUqjFaxB+c7WXe;+!Uhxu+phh0>}_C9z0$ZKQI1rx`0XLGl1Y;M=p zyPI|C_C~#VbF;48TCbaa$KT0*HQn4oEv=rwSn%lU)3va?R+Heq7alkfgx<&c@PvW> zGy*yhUK~3vn*UiS2h&Ou*-kYgcjts0`7u1#1B4^*dT(BXTWrus(K=psgKGvJHjC^M zBfjFHeUgsU{ysct+lCnpEsiiY-}a7o*4f9OsZad!uhtFfJ9+lu@?;A6#mN};aXJ9} zr+gpyCzJc|v>RlB!ZRQtjMhkR_?XB;`Ui3tX)(FM@TsBZ(^AxNT?paCmAlxmW65g2 zR`A>1x?1mh=FxiRqi1X5*7ce}XSqNi6WwHL8znxitGMml`d5vi%tdn$uPqGq7UdG) zz>2z|PY)m#cj4EG;&%ORow{DqLpR(9yqJ4pcJ!3hxX_C~^i1MvFD0D#22$qN=X<~4 z`_5$MVUX>SIQQx_O#^!>-jXE`+A)0xJ9wsDNgB)YHrz+29f;p}880>gj}Ha{^=>}3 z2JJ>W)4aXopZG)llFyhJm&U|<-^O=AOPh*p=QI%be2Qlm1s&E(J^IcMlg|S6#BY!M zk?(yvGEO4N|0=vp@`4{_BD^Q>l3MFFSn8e3(#u`u>IhG5aNt5Xvp54doU~1bl$%A|uZ>?g_w4hJ378H#Bp{=uOWu9+y6%*rcap$~S7b(8&PjC6RAE4dupp-H$Epvha@x)j73R$*pEo0KC`PTe4H|YyVQ{q{+3mp6;m?s^a zIC-L;eCOM14?FH>e)?zYAO7S&tcw>PsS6h_)*|+U14naQ#jU4+h)v~U0s6QTc^3VbN%lYu zUPyniyN8XvLtkb)ye3VQ%-%^Y|2I!k@!Ta*cKTC4`Opa-$M0N(>jG3y!P54mklKO2 zkd`{8Px12QH|wM)y*&9?_~__(7v0%Mt}El>(w#BlT9=a-#5?}c;e#x>!^DBA@TGFh zCEaw>?T6=ppaVEidF-l@4Iphjy2$eB=V1UFzO}RM3!G9i`fCqfw0+Qt;od=I+Av&nru0z$v@K_x!~u4BG{`bxRm< zHcwB6XB}T~fYEw-Lev3$pd3M}4|dYEYuD=5&D%8x&eR3ccWZCE*69=Z$piJIebdeD zE&3hX@EbHE-*e~A!xN|LTfhC=>*F8$?pj(w*7>vj6ee|k;NkATv#`WA$pzmSDV!{1 z{!p9f5W=F5Nf%}7)i}t)zxeU4eT|(?Y~G*|>0Jz^4Y{1N26YH=U)D^o#c3>80uUdfQ{qhVJ6nqxvn<#qqJFC{76EQ*9|1 z;bfqlFzY0ZtXJSijuH=hkT(&yS~dSWBHBO=4<2T9h~@_xZL{EPhHEi%AH)Y&~^ONE7&EE*plj0Vw1M> zH020~JVyZZ)0RQ2SU~$F%J7`4UBj6|1TXP2zKcuQ!&qv&E^DYloKZ8`FV9%+;Fo-d zeEu+J_=t)~B zn7D^7lZSJhFZc9i;i-vxyETFC5SK2d>=R$8Kf!K7i9JPb@}=_bA{&d#!XfoB{p~|_ z}?cPjcxooajGHT-`3oy8kWsLgMC+qih`pdiw8sxm;;}!434G%QE?>j{b7cb(Z zocr)l8nDC;AWzGUzTt1QpV}5``US4kllOstyp%12e11@`4mY3VkALW89dvIQi_jKE z{Wl(!A>u_Ih-;1Pqi84D4hpBm^mx31E`;KI{(OpI`@d5F18PNj7>6|{(9<&^X7B6vAEDg*L1V0hL8Buzuh0a z59vb_)0l90_dhi4cVX}N+>?BWV+h8xrM(pn%N@smI-Rb=17APgkJoTYgZ5Y7XtUWH z4t>quwkZ1M0gXe~P|9S$k)lR_G9f6>WG{ckrjl-{)4-qud?O4Gob#VJ;RB958YOU9 zMjj()opwks9s)?y{C>OyQs3Onq2IGWNnA%ZvCZvx7*AN`VaL4i_vG8~D|OEX7wbT# z_6fCF`=lsj1s;?4v*0(#6CTUqroBj*?TY#MnYEkOZq~;3CS_{NS1n+_pQev@?(CUb zr7!11zmwIrNy=Mz6~0XyOxb?QU=BZb&AW@<4WWH9fTt?bFQ?7YKZ|i^`gFydZZMn( z;0XaaMSwgqhQnna*#+Ej02R3cPmxFD$py{gJY!e%)|6*Ov3LBV7r>9Q=E71>0dm94 z95|XGlH+y4Yd_4-dbv2j@-hZMA0ekHyL?f)w}(BsEjII>LO_gBiS0;Te)j879LSGfhKE6a7&X zRP@kz21iioG`dq>1(;kAH;kUFXgo3v9TNw2oSg6vhV;mP5$3>sAIC)g+COLm6@p4M zcRJR_bLB@SjwzTDGidK*yYThozlPRPRwrj*XSjNjf5^)`c#xp_TLD)FTCT^JWFsr{5QXZg$jvlJb?_HDn#=&o(Zg{wDDP=PJI7)0PM8K9 z44%d^@knbKT*7Fh7ey>z!7%b!CQ{&^{;r8QvNODk2Yh9N2XlG!hY(X!$^`H5fI>gJ zXb0V0!Im|V)kIG}`D}gbcm0Wa*SCMHmd`(rMShaviHCB8n&jPsSRS8c1)?w~XIuEd zdjtzuWAO1DNz(B5)jI3Pi`8|2E6yl9x=Kep>N4>T{^a{^T&T2}P;aqI3hLkZ4({}G z1)exdxKtj(p#e{kpD}94!et5gS_nMCJ@UY%S4eMy21*Pjh4Ap-vgExRe&yKCw%;i= zKXL8gN@NWDZ13!l_P91S)=^;lwSMbXj@NwT{7IDlbkgtbZPw>M{ps4>+^Q#@dZMO0 z-GjfZ$edoF&4tFo0u#7vX>n<#?wwk#g>&cYp|j`e%}dv6@Alogb^T_ozw&xb?;qAF z6#nG=5)B6Tz5QB+M|QT?>*W_;A>J+Mc3dY;o~~mWl09H`a#}rmh&;>Z%2jqPDJQ%~ z5z7zot8(TsGV*9oRy6O6wK(z?7aDkBjlny3#UtN|Gk)+40(9a&F`+qtNar>#d+^Jv zX(?5SOMNDl{{#5`V6P^j%h^*awQ8dWQfHKZ2i|BfOfgHtxqkajZC|-wGftj>A9u!N zpqBjnp1*ld(x6g9z*69lA@Z7_A)H-UsTq%%1NSpL&n~WJF@QVTC)^oBgYGVGkG9TE zc=8aBE(kb)r`?z0LYrM++6Bkk2M4uHhu33i*PZy^+N|52Ah5n$w|!q-%r zx(r<}UBAY4y{=umQCDtUt1H*9QXju*dsw&Nsjt28a$UK0tv1#->*n3{+SuBy%QuXB zz1Fu~EHIJN32ts|@$Sh4yTEW%o21{^J*;(Pd~^4JXYM;kwZ;3!?tbl{o3l6oUfzMP zoZ#P4-*ETS2e&qND9?QTOoHF$-^?Wct*yPfzRvrdjkW#bSgfhJ1#s#&y654A#;KnX@be)KKluG{PfYN05vqr@9q^Jm zLweX&0wTPMEJD|`58%Hwc=KVm<+eK-0^umG@#Z_Rc@EcE2zf;pLre9d@$!8r+8Slu z0dfai_Sf%b!Okub7OB$@|Jc7?zwx`jS6}|MPu1kvhw8-XGZ7EJMVv_<*gunUl&OAd z9v1>((DglSitPb1LDi`f@4%&o!!xj^4asHO2iFU?_0-QiQcv$Km+>RLyE*($_w*Ec zV+EWaZPvm1)w;KHr=EZEiF#;l5xH6{zXk2Lzon5UjQF_<4Ki&W@#S&<(TOTit{f}n zpRl{~I3-Fm;cEKm)GyY}N;HVP$G!EPXXTwdlgeu2-CEu`&k+{K6-&!Liv}N z!Un9uq`svdu}_3GcX%`vZa^6C;hguJ9dEfxXz&0&!1kYfAvTI<$C0y zhw9p!uh!n?W=%UNe&8-ZisU&?d~5eoH|?Zeup(T2_4ClCpi{IDdv{;+itRxLLK08> z^UPh$1*Otp@RE+By_@eDgCFT&*%K%rkFdAIyxX=UU&?VZoi=WFcL)1mJrhmZ6?VjX zuwp44B?mW_R@Q2Pwr1_rX>fG1rWa4v%+e{^pfwbblmG7alqM&Ip;YwqZUKI)wsf=`dK zoZ@%zLr1YY!VcK9tTq0vCs-OLzqBdm zT*;4p@5CFWg92B((~d0Cj#F20<04ZhqNHQZKn*jpl zDE^SqiDzJuCZU^hZkweICU23#Me~5HJ?bQTWQ_3a;N@A{)OuPiZFpsC9JI6}oD;)< zKVblEpU$-N3+|9`Q4l;q8h5|-grtiylo{S~_t^ zzt{x>$g2I7d^a3=%O5QpCb#0&6I)Xpk&iSIt{$itM?4?z2Syjzs?!`05$C&pN0fZ! zrz`+KKl&-U{Ug6GYM=JFmK_WP4kzW(7lekwDsF@#lXTh_+W3HLC7hs)F1C%sD+8Hw z1Z9ppp2eZO(mGsR!&{-Z@8U=rsAH%T@sS+le=B{WEI=V2`PkhOANatB)4%)L7r$0N z@i+go+Q5c->glKJ^yzbT`oyXF=5PLRoti&UpZ(PD)KC81zn6ur=i&LYCsu0}I$@b+ z(Dfen-SHv#5NFb!I)%punYQ(u0qAM!VcoTxg9GJ|%jb{;{HWtCXN!J{gIYO3gb3~; znO#&Sk$Y$oy~ia#?Nfhk#XS`dav1x~_DG#84ZznS_4m_SuVd%#AMe(C-}4UIf>X$x zdWf_(M&#vSk$Oe^V`LyRz(gGT!0Lv;LBGU)s`Zl1rJ*N8IFT=X%+DRw_Itg%3utO? zHWSx=E<#>%afGyzeoo%2pSRFiM=pAxy>M}a19ppZivR))=yMmMXvZmQPC#dn5X6~} z0xGTT2a0nj(7Xl>=I53(N!;TKlUWR+j+9R3q1<_NsWPdIv@8K7dckkH=Ll@d%dR9R zzpy2;iG#Sxq2mD52z~@*Xl+3otLj(soS?VBG;Y-ad(S84t*3)p_6ZBJWXcW z6zW;JRErDr4kvO{s*3==>BHYx-|&s^NnhJCdLh1glk#{^>p<0{bH+%luYlp-;}3GP z@vJ`SpwGC2RA2kIu>*Lg+9b2?#?~qKJ`+Hko}^Z#zNdzd^o7 zwX$}K;^BFdP@aRKE;2cyKfb-amtFl2ojO@73(Ju;b(%XjZFgl}S;F?tNn5I^SNPh6 z_rlQao%Q4f-Lz?Z5x+zX&qyobw!N^;c06)-_inxakAJi#x3AORrBAxEO$JJ)db{b} zIxCn;9+u&c{uW~!qAd09K%Xgx=!OUIBo)WryWI<3edV5KqXBnlm1KB`*VKcX84g*+ zMjqg4xH^s}5LjG?i8C+L@=BCUbPq{Q2b=eu_cs0EFkT!7%4uZ=SZFSt<>;r=uiq0b zz*+Ax{OjG<`};h+H&RF{uRX{=(UYGyffi3;9s;@?`N%5Cn;LVP98fo5@@C)3RsC*T zrS0b!iHnyApLCIlbn_zdq$dWLw{lBZsx;~?%Yx4GP0I2{;o2`*C`CQ)?Zbz{CAjcU z#v+NY&I~E>1(@)xi?D1*wNsp2i2R`M)lV*hmG`tUR!*M44;?|DTA8$MS7*uJep+Y} zeWU&A*qXZD$&%Jf+Jo3rz--&DY$`iW4yv1!0Y7ai-0nWiIKi;s#PZUoBOgy8k%6Kc zMT5uyFT&u0q#%cPb&z>AT|}6)v<-ldxLbc+hqYcxLN2{>`9~g>FOTOQKhv}$`r^Li^qL87m`6{FJq%=`zAE=IjIbG`{aTX%;=HSO461+07rPf5Q};6+CpFdW$3T z^;yr(!+7CeE_K2K{eZU*xX#Zql9<1)|AV@Y_5Q=^FASE~NbdS}Id665ne=0Fo=5o` z^RoW;VdAgtxDIQab(nk_m6v+z(Kt0qxA)R_z;EC%-+s)iKD0;OU&l|ma_4twK}7$> zn^?fB#{+mv8>Ss;9|r#sp2U&v59*Pkd9)tZJ(FkQp&`y#XF>ye_y?!{8|59n(Diy? zyLPV1p!bF$aAc>uQVu<=dKYeluFGIo0Krfmcjb+E_WLZe`}^49=qi(Tzg_iY-;_Gb z+s5@GJb4dsJgp=61MUHJ!P6nOn-To89kEUIgtxP2&!jxb^w?8R*2LUO?79isVo!f^ zT-gd}1DU_B@zU^%gT4n6Zq5IqB87XGKYSdb#0@-SUh($b2(~%;K$fO!Ut>SWe6(R6 zw8!FXh~wS9=E!sRor!|!ycw@~RveBxjTq_)={Mjq2$Vmyx`fBEDId&9{}f?>-rPx} zog>|yQ;Dr*TQ9smay6>J9}f-3n4h$yT=UUcwsq!ix`EAxJglGb%*ST}#iN1bmwXbI z6pbt8BK4JbY~zt>I&;$5vC*U=OXX?C&ow5@f(gN|PYP^6oUES6)EAhcuQAPg7D^2H z+YZP#>S}pbpyk2z35ezAuddy^T^k$Q>F1fP`sMVAlUdZgwzitZnWDx8E85mh=6htn z@7cA#;N-p&_&xF8r!dab-YsV_T^0in-kcgTKO>BRL;i&ZrU~u2!dKeL zz)ZOG4lhv0l&PN!0RHk1edCW~WL(fVGQB*Y;Itu0?h1+iGDfoGj)(7oSi_lj2_AkS zMvg=fSw_*{v|!BVC=;G7L%}sa2lMmI@zhsFvP_yA%CR=cRGd|IkqiM&iO5AC;DCGlnH;*I+>?|l%3!R%Hc3jp_7R{hRmv4^xhuEnR&w3 z+K2o5`b~lg}u$=6M0$z`a6AlMql70Uq-x#hU)}D$3=@3L*7%p+W z$B2-yfhEZ1PMC%g?@p+hLk7<%lYc=-F}+AZM?4;7fn+q$*}OG+?ZC8h>`UcmMv1C3 zF6Efc=QhYq?_GakC9gc=1k-n6r=ZSBcn7eI z*9{>G%yLL8z3tREA>%a;qUywfyyPH;I~eS&&o0jA`|uv0{meVwSKs-uKUGh>=L5BH z_EBJ=5OA=+zZssCK3Sb^VG5uNGRUtxf0kfffCoUOnep>%9YePPCphsHx^tP>pzIn; zM?v0oum}GEj~Y{!>p+`^yJaPR`DDPWc{->JLS||2Ss8Z}Ie55tu$NBB4wv5*atF`G=H0q^^JcBDZ`AhIM%}%2yRKiq zQCF{Ctv6nOqpn=OQkPzPvo5{)S|;ef`1vo?=Iz_{-uJy5y?|1syj|W;E-lu7{a^mW zy8P-T!k&k%R=~N)a!^P(5J1S_F04a@anc3XcJNL=T<0!45*+Pr+^X8zuNh=(`|jOZ zU0tau6z~l7@W{6{8iy6?yM5y}<-A5+4r>0?8ghzYqrCi{vIBtfb#xWw8Ky#(|J5%+ zGyIqV7;!H_rJFeB9+_4NlQxrel_`&wfV(*>ah#;J>T`A`oMeNSs(wdkJR(;e6}8c^x@jP zyMgMMuBDY_@;;^^aF7c4C`A!h$7uYk9~CeMxP?z1RAx-;k)|3Et`u}Ple@X+-3_$7 z)Tah;%`dLh!ZLSvX)V$4uB;+%OFhv|V?D#QxPX5a6U^5Xbn#pE)5xTgIxZ|YaxfUH z!^H%S(ZimNA8>!T-+KJc`g-l}xVuT(0Hl?dueRH?2t?%Tza)M4s|Lu(YFH)VZP8? z-C>r>k$Q$u9`NIqC^|dHg!vv;)XN9noj6-R`-xAuezHzqd_13k$hVWp*G|cl$4e$x%8Y!-h?Z*M_krr0bq`?b zzYb3akN4va$mqi=q zVO54UzoAXi)Fb+&ee&0L!^WScuaD-F_M=h4HKxTG2mGY|fvxjaFLhZ)QZFTTJ3KS^ zg%kBDnFwh|64#CgPq7cW?MPVK$043!X3n#&2N~a52oMq6ClNjHk3AFk@&=oRqByt~ z&mL?boJw~*aXW^~*&Uu@Q&%38mG($c4Lf1zGYN8VV14gJr|s-d)uYe6uNF=|qtOSk z$s8qgu(6e~N0M?UAFgBCMF$84Ns3}Lqz!eqmjffxTHM&ixIppR?Yp&2d-k*c4Bq%^H8St0DPy4zdpWN(>%o1Kml9i(r1?>p*`e*C*~Wa*8omuhFHkN-XP*g!cl zWjcTWJlLS(O>|@-EOn-q_wnJAKl;i)wspjcpXquauyNWvX}-rhfF3CR20E zHA!4`qo-lrAEX=Lwm)LqDlS}@Va-$w530eBFVUIQPaQ@bx&w=CgbkDdFc(HdF@jzE z@~2JBd+%(OxF8#{xu)YVR{yIF0E zv!m1^xm$mE)3n-Zy#Q0{f@8`E^(aG|*Lr)Lwlt`e?E_ zSU`s1L+d22iJEfscL(n5tjBQQgTLm0oBYYM2N{|s?w%GesSk;zixXOzh~&>ci;3)O zgdWrGs&jH(n)ReOn5qL$ZZJKUyPsVEr_tv`y#o;~Q&L9;XgL*ssRQ>c)S>_4!jN?I z#SJ!vgDZ-F6T{+7xpKlkyCSHAddv3Kg(^*jobIov5BLfFC~JPYc?P_!uB~Ok z&r{DnTaP~ZWWDya*Xlp{8~-tKH(8H9_Gn#r=#e^i_H@1NZO_&A=0^SV-}q$xy?^x6 zb#Zy7&do2>*|pO(32~;QYT;$-?I&3B89f*xgWAw516$hT(98t~_VL1x0*xOm1s>oR zaFJsjr4jFw)YlyzS$Ip{e&>8=a~u4^Pn79GW%YX;@*@o*Q}V!!wwcj@0l9Jk`j(3z zL4%*2xpezxaB$(mse0nk3z38UJ!ng~ZKH!s3)Im;CY^)CnUJ>MLirx=>42Hcpq`O- z_R+#0#CK3i8bq$;XYvfs!B-ImOH|jmI0afdcxT^FJu1&zmlbUK)s+*qytp=q+`dyz4^#2D2AR$B7%9xbT!k zVfU^#buiuCTkG}urOSj()gE>A#IoJ(o%EUA6{Q`# zLLX^u#gpqyX8ynpJ{&t+TAZ!to_?&p=>y+T=g*w3&8@rP<$OK`oM%T5d1U9A^&DWg zQh~#|1p)f|1nj;G9|Z}o^)T!{lE7=nH?Epa0-Kj*$n0{mb$0<5vI7ns)Vy%v>AHLS zPW|?$e-B=w??V4sUJ-BV3wM<%i@V3WHP15=QWwu(ghsQ->MnW9mqYQ=r;*8j>ukfq zzxV+;@PVrV-qaiX^{HaUvz9o;U?ZEe-Jz5n^D75bWU=rH6=drLj7u_itn zhg`fjt}J`>6Q1@(({-rj>lb)=AFl-BA|Sp9bs)xhw{5})7-Or6XV4qF+;Y@Q;yti& z7xn(x=HhxnLx15RFmFS?x$`@~(C~L0WBKMJaKc46MR+sKgLp&Y`)=Tm(RG6V5iVG6 zp6i0!*LL)+jN|vkxjv+6JBs;FwY{tsVdewkuP>aUbW%5W%UtvTpkkor1qG zwj4B`vWSE~7bx^#mkuAoyp-GVxvvM|lGeLY9bdhqN&L;ri@3wx_qHjJKl6(J1817# zo4DpfMn30NxP4a!8umnu)4%WG$>FtOzzui|tmL8mby-7s;G>;)aoX>(eNxCMSDnY0 zpDr-n=l2ngb^$jJonPm#>wLXTADD(`UFYRJw>U%Y#~JHmd&cV@ink$gKN^1sH#wIs z_viw{z4xBOH-d;hI<>@f(UPJV0i7+ zN}W1$4*PDGcJHtj78lS#mO=ku&l5weAZ3ML^>1AD)z3{w%N5p!PdCDn=b`TR9eH*5 z5I=e2M*h#u{5vS2c@Q2kM}V)*EP8jteZ%fA(|(kN(#t)|Rk;Lm!AX9t4mU*)mV-P2YiH$jdrwe{@;)6YXnc z@d9n-yr&fmivb*a&>ryF{%(({*ltS$7Y9(6d%!4dDbY<_1L^bLJN=z|hAyyNK0yuB zjGq)SZZ8bz@tTY~@SZV^fhP75(!Q(Pgner2($ru6VHW^QUNEweA}lG`Oq+q4$p8qB zFkzTNejz@f1Uc$~9*ps7gqcSOLMPQc^pi=#YlPX^%dYn@nF**om6Z7`Kn8W!uay!c z8B$46=z6f3*W-g2WezI%9k2t#L=?g~ey7dn?EXj5+W7;2Ae4*n+$l!`ZJaQ+Q30nQ z8RdzBS3jE0g#}LR+u_akwtXjJjyop36Y_Y6NFFOD{v0qd%(~gzcfo<*gZAja^zQFD zkfgYKIngURnhqXG<{(M0k}_@6AvmxSyk<2QP6bJ^VS*qU*z`z!C|?pvHwQ+1QNd45 z+4z~BxP%#=hE`?R7zcjBVF*A=HJRsHQ7&6g1;|Zyr+dm?tU7-xaI|x`JN5Qwho!#uGPDvKcHuccEym-`=S6 z=bxxQ^}~Om9)I^At*PZRq+bS-;W#Yd@Vl;}(ZQiiDnj${B9{SOE1sd3N+SS^*YI#^ zOdRp!b30*`Z^0;8*x9pzcK`*dqrgl(1S4b?w^adh^XU>b2KitruQ+plBR7hkLwzVem& z!tehs{;$+mzVOBR;5WXzp8mjx>*(&y+Pi(HW`S=J*xjx1*Z;5oNqyjhAF8uYzP;`_ zU<_`yx3+6@Yqtl$=F|&Q*{$h!&uVe5rWTg!#AA=v@%nz<0hXhkgIa&>dQFkZ!Gw^qaciC;5XHCvMai4kFnp@AeUVN@6ek zq@@joH0ni84t&Z%gt@2y0yUo!FNq%TN3NvhM&_ZDMBpAdfqt!PTc!lS!H}F70j}K9 zG)I0*>RR|{>TstXdFVvF>(R&RFa4{3y8h&MyuUv5?x*YB&puKge(%%u-QV)wdj9?2 zT5nu_y}tCtFV+UQnw*+VgX{#$ERCQ$^<7QwB(viSF^+@{9-pM4&oaQNIrMCS7mXHI z69RYe0uRf_G`M{#0CA^16~G3Y^x)M-J;(ekKr^1RpnyjFo`b6R6G+?z;@aUQ4h{Z2 z4FlktbZ3(tY8MCqQ;xRVKgcHlY>3^d=j!(}4?m2tv0m%&s(M&vccAx>j=Gc3N5Iv3 z8tU=vxAKpL0Ub;AsrL-=$gj9l&H;_)K5*_&%;tW8{2f4peH&K!acQ+CW|ym$R;uO} z>kynCbCr`ob4$U&F>oCdX4yxR3#j#ZP`QBQ&DI_~CU0n<*$y3mc(?|NQXbB2T^Tbj3|HVyf+V;-n7QZ z0sxX~+)k^$*3IkniC_Ihz3=&NsWazK)%FqfouBJP8gL4n zNXZ@vjGmyJY1=fI$w55PO?3iI`WI5IMi1k+kd1Zbrmnc;}{RWJnKiWonJis#|?gDZyNELdxDJ7?NB=0 zl$~$#h1bY~fbd_gk%u~HbZPgvcZbn(rJ^{Je!^v_aCf<0t^a(ctoU=Y4i*tOy*a4X zX~w&cU4P#ji1za-HR2hFppoAsoI*ig$uQB{`^k#1$+bU()Iwi9f;sxw!~jM-VTuMm3F=z3G3&# zaGeZI=S!U!yen7QO;86LBDM#0(%y*OtPKKKPMG^W?S-YK>`HdR-`$i>X8PSX?+YH? zhODJs@pA?E(<`KG+H2}zhc>vibD&HpUwGsM0?HRJ@sA8~Q72wO0WcEp=w5%j+reT7 z^U|TCj-~_Rq&?(`+TssA)jI+&Uy;8Hfyo?rT#PV*4dc##+ZFThqRpY+9$%cDyx>8l z;G}dqwF|Uu=y_H0*k8%Ybi#~kwR*s*eHqIYunGtY`6=0 z$*-{SJ$KPJFItA3TY1jOl+cpCr*%;v>MiO7xgBd%_8in8Db?LJSub6`R^^y`x~=%m};!8fz>4njC^?_v$~2zZ#A z^_zg&^sOTtyl-AQ;w~+eQ~Ni><2}dgg5&JUATK9)?PKQqy!>r#ZGrED9$b(QrM3L$ z0uk==-O@67&AKqaiHVl`^bx4Tv^H3T96$Kc$pUe1TnABPY;|9DRs-)5Wl0G84$?ck zlloAui)YDG-620@EP(3(dzGGg?H%vsxMSkPi_f);@ZEvqfPCZoL^Re%#otr!Li~saz>mU8huheM@eT079qId$91?qjgmrgyZ zSv}+AzxomV3|+KcGB`&St*>I9=kZM?fq?Z zw!J8{srCXqemt$7hcBpW$q&41yDHPu*g+1K*+Ql_Rd(ANWk;1a>ur(K8a>HEZPkWWMYZu`WyICdQ1;K*-<_vD|a zr(|q^H23qPj^ainsK2a>I0MmgM#5bjf4F~){4JF`gVgKxzhV=i-^E+k<*0U%W8wEx zBrD7Fb?(esJ@?$x8N}}+Q2m76Uf@vQJMlOTEFw?Xz1vTWkm^F56u`|z9h3)*ZU7Os z(Y^}P+qQ9ek`U1XpF!aSZCn1}mc|o_TqJKlU;$p5IrC6GgiQa@)~`)^^~RkqN*Wk%CcNYN_pa_eZAB}2v=}tEkiRo z0@e)e;H9*gM3WwgO~wa@Tj;N&den^uv$WwGw@HIsXPnLP7-(aLUV}5OdP66FsZWBJ z;7XgIV2ab*YM_jFXN^H?KkrH;N%^>)iWK|zU{z~Sr{trxE{!A-{0E* z9OH(k`z7&W3+Gt?^YoTWTTefKEWqjMwGm#f+mn zy>sJz#8LDn_v}h0+(~%lAu!y}hj;aVo^`%czVV0@JX%NVI>zx)SS?48M*bPYts42; zCzGd?8+Cc)fcLRq45R}b!rS>bnZbv*xb|-?EwLylXL5#q1iEVvz4*w*bM?w= zm(xc)dHzi8(hg^{C26&_(grF=l=lFC#r1$R56#!HE4{S&z*FK6QO%(HZlhn9-$k?@42t8e?i6W&YiBuAG=Vi zYpd`oJPp74Nxdu<*2d-81p%IlmGLL%0O+6Eukv)}oB%$(RMT0QYnWqD){ zH=KsM=*G0)Tr;RR;^J_vjEV z7br+i`O>)2(=bTJWl1VeN8ie4>J0M6Cpur=^V#(2qfbs7`?*+?kMj9E{ zg1pNJ@!;NmUb3FQkT;0%wc`;YgPM-}xY9Zy5m-CWrnYlPdK-|Wf)O=1h@%4Mkt`aP z>>$!##c3SLfTT>f`DVTT#_RR! ztFL1$yjCxK?d5vurI+i)mtL%|eB~>d^#9sdU*vkBUU~VoguR5{E3dp-mo8tetCue) zpKDhx*WEj}Y5@iL$fFO{kN@~zs#;vC{Tr9+7#OC(dya5XnJ>)LKl!hIx*k5aR%ahN zMT4+R{phIB`B0fDm+s2Qw_3y$_1)jz&9U~U9(uTz$#-viD=WRXsps`;*K1XUiy{^u z3cZta3(#+i`_=WE*^T9Hi+N;W8gx$ExMGya&Y(DrikOBfT=+qk?3~bOi?GY{5IeF6 zY&^#(3E!B4^a`wwn~K}`%||M7BWTJ+a*U9u;RSh+@*J$O#=<_QPQ{3uJlw0fB z|I!zN(+ql1kjFPV8Q>9|@|`lI-fJDJUQ?ICd%UQN)8L4WLCzB==yeE@XL*x)^a6=d zj*uDn9A>AXbby+R4y=QH4n$5lNIK{H`~&~RMH@jh8e$6Y#m^yL2Tel6nn z)YDJYpZ&9cuAYC_JL_M3`gdzVl3r~buf>zlvpdvdykb=g4=*_PTvo0%dn1%Q9?AkJLdXf>ptG&`bP zkIT&|4rz0NEtk(K2H_@baH%7gzH#9X>(T!09P}i}Sfju6H#h;ETHK~h_MUex%8sA^ zp||Z=#~<4=UAIN;XZ`dwe&U)gIA|9X@Dp82A;#&v2HXTN>D%#bM{|#U0PgT+*P{oI z27fclJ9lyRKZq}G%)84N-@AVLb2n*V>B7T{4uB#9R>76S%jEJ4^RBwlmNT3F$}LF)(jBlt=P@vJ5;Kyik5E5?jWqJLScV6jOEi=8byojVtxv ze)5-U3nn>p?vYwPxl-HtOwKkWc7kUo1OHQWu%CVMV*S{U{@L2PeXBM%Zq(lHHrH;G z7v9Z8&uAy8f2p5~{j_Zyz|cSAEI0=+GoT?Jcn@au8}K6T#%KOX-}#IK+D%*WL`vn- zj%0Ek!?nAeba4mi9__W?uhw4f_Cxzjr=1rPkwzP5XJ^0eZfw-n)^==dCrMp==wO95 zdv?}pAEReo7&b{~N}disif_tSr^T)%y%T6-2Q7UNVuU9KXfwbW!ZhuyjY2)*jV8!7|mbRA@uWF&>4E(kswtcn@4yf8G z6)wxlU?(7D&feKQ_AMsC{Y-dEp6l=V&(-`A?LJ}V(-VA7R4xJweOuCYUy{6oFRxzgp_Ebq%T8U!9DWFJk*IVRUi+-VA>QRtpG|AOOxwlq@#F+yad)@%PhXep+74_UP5q#WWxB{` z&RrmrbM@hG{r0+j{dWDzFaC0U@{|9p9=UkFo_X@wI&3_yh6Xc(6D0Av0DNvdvg$w)I z6V&_`w7dD0KR~1ot#4gCV&7ogS?)mB60})b(dLz|+8UGLKgUm8cy{dKDfM&oUk|nk zn>x;8HgohQ_O*RC^^^55{yzN{Y7x8Nooeos5l=)(xVv*b7T5R=06OWeZgn74`LWO9 z!iV`~bO?IR%>oK!=mrkNsV5EOa~9N4{!=Fim-(pg>{DgYJB*+m61>49J&-8h^ms(% zm<+^878hAxU^d?=`xX?Pdhw6}H`2|~=k(->=r)x4D*EJ=*I%Qrc~}cP+lNxN5ASsy z?XQbB>$<%~+b&LkgI226R#$R-@7mHr%`c)qpw9tz0-RZk$i1NEvj;)|&jf$eD~>$F z_S8){dBqVg1Jc}KaK{mP*Hg!jEJXf!v_5fY8Q~+o5(3D?m7l{v=rQ{vgz&tzvsV`$ zdANSzmw$ywhdEKnOyNw@NLzI~^drB+&Aoc;!o@mw>RfHEuSd4D34|>txA?@ne?0+2 z0iz$!eQKj#KI0Ea$zM2;0js0U(s^(PPZQJk>h_)M^}#>!@v3=r?8a?!>~_m|^0Afj znV-&IDVI5^NAluwfyO=cf5L%HoQS;9CUt>wU=~$THn_fPpIEmXGi+OKW1iy3kGjNO zri?5);BKE`^y~h&u$Tme;=6fSRuhr(8U~-^0-Ksid?c#fIc9%?7qCI=w<2lmD@H zjAI0}lhTsBl*$($YqjOtr=4|>9hi_)_@Fm?p`cX>57l zasXJ>b&{YbB7530U-pS_ZNYaCP|)?(Oc^!-%7cth9{FfYD8{ zhZ7q2*5^dK&+ns?XLJwz8`jRtyY+2<;|zTCmW)T({=d17Y8vmIXMpj-bIj|39qqev zXK;tbHu^(c#+$qoH*gXSswJK84`A=SGUlL%?Mpxy>0-J54)F;WmiEONcX*Wd)Su>Q z|Epcs!`w$YrY`z%x9x~tO}Y9@b>20AplS1h8uBqeUM=!|s+cmqc}XXp#yr7QKlZ!qq<#|LwrA4Q z%!Jc4wu^Bfq{l#%+xALcxfAy-G&V)jddWBBEdhH@((x7PZJC|7ZA%y1{*8lCwmPrY zKiZZS*L6yhq23-qxAV5Zt35Iv;9X!fcP)@y1YbrQqZT``t{*l(C%Bl0McW&RQ+qZ7y^HuVq z3(ZcRJW-E4e7;Vc@HCuE0H%*E?mb=DOl${I5A%&ZPaj}tGt>j+=AD!eT(o!Eaq?46 zBv2N}OIfyR!NCBVemUh4G$=O3!;=qpM^b( z0D#%HcY(g1i=92u)05ZipLp!QVfF(ZzmP9Ymrp!rPS+f9)X_yi;!~NouPuMMc|@7_ z)cT&k?#;Xjmu3LTlNofr@_=@17_9jt^#?~8vzkhrOH+U8hd-1P0J?OLW68t4nd*VU z8-++vC6dfG;(IWGaB(8+q*HMNJ^1N~Y~#u-VL=$Z z@rnOY8&4UeyvMW5ODq|5a~GiZzB`y4WN(F>jG%M#wpCDxHhly*{?GveQXFxO z)5I2lPA?yTdwDvYu#nnkew80r6@xp9#8(`m=qNhM44*U@yl1f4{NdRgu?RET5t0AH z^OgwzPS3!@#Fr=KO$W@)FoSRMJ?|Rodvwa($-l9&RU2DdbhiC2%hkGa`OUiFH`P6| z|BW~6_1E95*WP%&UU~J^dgYZ@>ZO-nsxN=}EA`c{eYL*yvj3k<+^?IR$aewH65(&-K|<)n6DrFlYhJ(`^FC< zfOl)_@*6d^Cm+&za3Bl(JbdP%`rXfcrvCch_?z{8fBetXsfQnd-cAtH!2)x4fE%$F zqvfu;&_{)bVx4rr>G3CP7Ch|G*a*iB;Fto3%fwk&UaLhqOw-`%6iWHl)<#{ubEEEV z+^M^p+qF#NvjU8$M@qVvU3GSDdhi6Cqq}nA1b464A_2O4wsbBczPf$7yGM#59Yh5IuT+gho)y&dbP8Co$xHG>d7xO$hg|4MBc6FgA z6Qn#ENtazrD5LQMZb0ax`DrVt!yz6S$c9QvHm!WxF^Y?fPN9b;XPs7-PrD2!|HWT; zf%4oHY1;S=c+aHXfHx;T@XFO~1$c7+lj49tCIKD-jbR49i~GT=f5<#^G0gV_&=k0H zkY~L~BW@`nG2_P{CE)`!@Li}2fF6Gyed7_c8Yg}i9hi>hXX?Y>^sV)s-}n9X-~Ze% z)K@<9`I(U+xia+%bNd2QTh-@RL+|>1ly!$L1(!lTX z$I=lA;?=M;3?W9U7(cJTFAc}`*f8V3PmMmF4IBIu=6fSfoCindMgF!?rput;*EwLs zPaaNG)8LGjwkh&GfP>2*kWmu;mUx0{xi(HXJ4?;5{oYQ^;Do^+fZKQQFxnn{AFdRp zYkZA1r_SsxbA2|M0J=kFcl z1b_)o?4g|O3{vNl-+0=C<(qfj$tN(PGmgM@+ocm^b5u7r?$&Sq?q};afB*A!Fuha{ zUA&M_)ok8gucN)~@TP;6t4p)ANmW1a-5;%Q`1Wt8TePoR>vw3wZ&NOOH;djOUuCmD z?IYf;8|7McQI2jEAF)To9s1kPssuYn$OCO?V~PYq9Ne^Qa~u4Kk-AwUY+bWa*;~IO z_>a1pCN?m9@7+bTZ7bVpIgGB}*>EhQZ+T#g2|DKd& z+vGsCILf4%_~DXI$=yqU_#r3E{7xHl8 zes0lED2Ube+r@s6r=7^`9rDPH$Of0wGb1guMWNlCI}=DNqN%&Q;HeDx%mSBX`62UDba;?Rj)(%YUGW_@Fu*-Ba#Jm;=I2Rz9>6T+U5=Wasj zp&sr}1N2V!<}+!`UxA$vqX{P4rN51>r@2)ea&e!jK>i6oe z{fB?8POPldg$s|>h4YWp$3FUzI@+oFl~4Ra{hgownOXtT$5+baP z2RIr(NpKIo&15cR=hJ+auAYDw)KwEX{iXFIRgNs$R!RA3WrDC->gTC1PEIaRzj<&w zul-1VyDt7RU($*DJGVgj^$F)t|?d~oX&~~hEZlP~?YLmOC24vw3rF&Y}3Vo$zKi@UCkcCSW6&fnL z@>uLa7apN2Y_DCAo0BJiSsmsAB#+xvPUJDigydWM9G0biasiC?y?VYU+mRt;EP4)=Kb+F#NwE}jZ3$_qD`Mm`}w%GBPq+~|9rUwYBc z&h2I4g`dycbD@$ukIBa!@qPj)3*qP0`{)tz!`;sVoqPB+I^&=|{oB7=U-jQH4w)SdiVY!}u z{P8So^pk)VZIfhawzqBo3je5+fx)u9R5I??qZXmF+-U$}8#t#3v-waaY@mg^5rdgTq>q4ts?%{ti!9|z>qaV#fz$RvmS+j%A( z&+$$?A1pH%245W(-sewvyp3^&m#=pt5J9&2{SRI88|u~hjQ0_@!av|6(ZqGfi&MA* z3%G6Iao(jdIL>S9)`r_K`*z|BTBFzQ?QNn9;7=zWl(oY>Fays+!{fsp+KUZr`Idnm z-$vIWuY#jaPmV{ugTEJrk&b@f4m@t0>8X|2IgSy?T#Pc7yQTvITZ>0cYscG@o9etnKr}rX|Of4FP!8H zT>w6I0dTe=gRe!kc3kuunNxuFU+i=)bza82xWi2i^C2)phqhO3OW~J{6Z?o=IOtc) z)+WqYk8!vsihN*h;S+NOm&z4&nymRITjD9(@`J1mT=KB=pniA+hA~fJ5GRfui_h+_ z@C+}@LY8=6cJ#9b7)M18-1HwtvY4jwX^~T>Zd1cA8y;)J75Jn;xZ<0Fum%4TS zLA<;a5-9h)Ql{^ri}45#44#0Hcm9PTSy*@T^w}q13|wRQ17{Xpm5x8Y2j@fFuD59g zh&rej99IN>$1a@+HIZ_ZPq}e@{X`^Au;Z+|uyYsZIl;sNEoZ7-XCT0i)jnx@2`^0m_pW0gp#F{(rz(R(g`^y1 z$~?7EMr%3i_t4Gq@5?jyWI*T_?$&8<1>pSk3zh3Ot^;d>@HZ8=@Tx1qV zSRV4kHS(Q0M46nPvdwcIkiHr+<|m3bcDCz{H{VR3N_A&_J$)~q^xRE?E?(AF zchRqlnEe!6e~!)mnI~+xzj=&|0ZTu?+-O3?c{o zP=9hKJAep&Owa-;a>KA88gV5m8Ah=aZpw7TlBf5kp#vAvp&MRHf;L>it#}nYTc=S0 z5XrhEO5pBtrPJ7W=cw6WOUJXsP|NB@xOEc7cW?ML{J=K|j8_^r1kfWx9WYCWKLXj$ zy{g6zzY0M=2ljeUppP@Lk#vGic-*0}z_akWnrErzR+j6S2K#VwzW&$`{7Aj$BR>qN z3fTcXK&PLyHh$9P!LAAF96V0|hl<6z5y84k;ART|=2&uGinZRF|(@truT>@=LGP zE3ds?FSv{UrC00aS6-{vUU{?LeB%=7uhf;RSL*t;TN#-52guX!j&?}vU@=DKJruV`c`rY7u1-I4 zw){?LI%?>EE0_c%OCRJtvMp8M4Qbx@;gTVmn*8*$PFV}TMNZ&iVT1*-JG(at}-8t)1!R{6|8>94(WeyoT`Q|r}Cy`$VAm->ioH~1^ z{_GF_SiOGjdj0$_ezNxPUp{%d_Gp)FQ^iAcKL$@ahM=(9MspPJDHZ6XjfLn>vE-9? z>mX#EfSbn7TfIo>vvte6_(^Ut(%ktn1+83cH0M#ITX$=YPS`4bE6COI%yDhrxLohJ z@NhkW9=V0iJeU>kx%!d6_?PRE$Dgk4y@T4O&K@Z%awd_(33SsmyeX|MJd_TecnS@> zr4qZX3kDdaIccOtcg&4zn6&Qaw=hM>*HM}m?%k7ryMEyha2O-k7`%^m*8RK^(_b7J z#pbx5eaF#=G+n?SU84CXuMYw8p#0K+#NQzAF<{9+1GeKEz`sQZmv>lpG7A8FA!m5Q zd+LGLc-M{W-G1$vXA#$LkqI%OYgjsLyayIDNgQ#*{n#C@#5r`zh0ec~pINCVKlo9? zmT0PozegLhgHDm(30LpQxGscoKp6{`_d#w#du(F($wf@+#w=VVXloaphaA3n<3|0H zpZ&Rd{pKx-I)%)f3LW-$HflzBqJ6vvKb0_dLhasQz$AmqT zy`<@#u=EpVwd=q+cZ<}{NGF--;>h~j{}AWCr$bGfq+LA+tn#IBhBw6(38j(mJy;1Y zc+W0=@BlVat~&v>TO90nVlRvN%nSNy`|Mz!Xjf%2RGLdmoI3!Wq|YL6U`c-x8n`3P zg+lw#z>8`EvpPiB%+tjU%94IOQy))zn4e$DgrmoTOFt)U=UvD^neNCPPZ97_0!!G^ z4hGq`J+}WXezlEa6DgNm(98anFlE6BaU6KD-s%lck5Y#;p1FhP0~aINZvqbe_xIE- z(u@49G<`F9`#`29o{J&pR#$3w>#*MW?swIha}U?`H?Gy+`rCgCeR5pye#i6m*u{(W zzW2YEHg=|d`&WLk{?_7eeL44R}J`;)x(GPEz)^cGl|-eY5bGb$RGC*O}Vi+Keo?XvjXk{a^K8a0oUg^D6{_sXa=rV0l=;})#B-;=yGhg^x9-rNyF-~Y3LdxV zMEVN-<|Sz2cdFM;oTw#trNRqN!phsyPySzAT!z2=REq<1F3@(tiTon$@Fon=vZIX; z4u?K|J}sht;5j?@bA+h!-tye1bPT&?W}9DpSBK{mM)KRD|LLG+%Hdt!*5-y&!r)J>-}GG$6irPk`thV@d#V;5nVL z>6iik(7--Aci@$N_JgLeZ(Q(j4D8o$UM0PZH9^09Q_iAYTY{%t6tqWu{p>-1^V{;A z9I(tgcW&3qFVZ*v>PvO$jazk#zVl;`pRdOse-ikYvzSJnaqv?d+LlD#fLqw90{3n& zV@MfDoxQnBJ*eTY?fMSSV0a=}fBbk+4s0CbO}KwE=x3vPf`Ffwbb+XL!MTSYuV4Gr zuh*-uUaoogFq5>vXj$p|YDbZspX-4#AJQ-Eo%DSudf1 z;c`#^1|n!j#Uzw&US!nuf_BhU^35(S)ScTm>Vx0%zM8xEP#xdBLWb~=@SBaeBED@! z+d%@PDA~?Vz)NI8SdYmy6p0~jQxtClp5JQ6dmo|pX43l`Zp16)2gk;TR!-2_mKhwK z1Z)|^wod#y5pe7$jO1VKRcYdr(4|rRabukprhL(4rv0smHj46cj zz~1mv08=)eo(AAzH)IPzhlY+{i+A}Pcx(^BXKdpEXX^e?j{F^`iVaKuT{~!g@f3FI zBJGQxw(+D&Y$Dt?I{p|r&?eJ1(Y}(0)nzXB+1dmiaKEr*8{!xr{gA+Ulw<#Gw_O|* zJ1Kh6MFE}90d>pd61)`$YWcSFuH$^k;)Wa5;!>?+E$oVu*^KfpKEfloUI1=+9o8~wnOwl(Wr*>;k`@qnvUu=ee6Nn4v6z5J zzUuY_;K(m|n0CNHyfgp7GZpMI4EIO^<^ilYN(u6sllpnia zscNwt@erHdceCc*MQG;VyPnOP`csY*gT5vm4}mNGgyC{?_hG?1f9Y={^OkS_SyYoW?X5bp zsi>a;^!@0IXrI`p_B6A#6X(nCp|71fU2Cfz#V)-b)cvir(zMO#1q`uG5dqeuP zi|q*IQdbhkaypB+Y?=#Doi<}y)}BaFIp9s7vHz`Sctd>Z4}T=XOeloyQ*wkfS_V*;Y^&I_dXSKaOvCB5u&unCIKV{C7cFxaz z(XK9GC-it%x3T2HbGLuvq`Y+2$qQ)<;X&J3ap_&Z%t5P5;s^Y|;&g&^a2&y!Yir!6 zvwrfQcp?9hcElMM0NlGE6N$uTz)?7ch`f8(wSx0qq?5)0OY=4!{vx9bXg|v(6_@F( z9)DIq<;Qy)fM68=R1oZr$}z{9Q32{g1x?fG)vK<8&wJpJ=S&Ih!#f?mL60xtZ_vs zX~mwAWi}aVG*GgF(#}$p$b)+5Bw&P^@c0uYlm6nId?Jj&FNNVEpfG0pB?Iomdla2Y zIuzgz3=t~siJ+e#m{-?RCoIhiU3Tajd?PF|%Jnm@@Ngqcp-bJlLVF%ZmpTKtcu;7( zXY${UD|h|+8TT(OQSTu8wY=WxYE)z@FImz~Ie<>l<$|Jn;*t1o=vOZEBBe?HgeKKFUvxjy%~`r;S9SYP?- zi}m7*FV#ygyizZ{{93*C>YMe(8<*(Bo157oI-57mLn92|)KrFlA1+oq+{M8|3K?rl2u zDAUDR@}U#-uc~TJV9(5ys>Bs8k zrB~`N{=g5`Z+_;t>O2kMDxJTFPcPSVPd{4kf6w!^L8tVkFTYe@`uvxiy{Hd-;C*%T z?p98PQ2NEI#-yFpHim(nI}gAJobnwJZM@vosC-Iy6`eYQz?rV|A)bHKpR8j<@ZWJvOFQ8`x?i5g zP*qQ*@s;n7_esQkdn^31y|-O6OUw10-}8NS`r?!IGe7@H^w1kMyRuq)(AHBroM4ro zGEfXoGwDKpY0rlGs_Q{yV3gNnpcpZNjEn3DDAAQSwSg57@BRm7!qZ;*N4`C1Mh3+5 z-5rB2o|)WTubG3rIx%w(86uy9t(x6mueYOv-u3t+_0>;*t~Pcz;I+y6p&$E^`p`$d zyI#0-weDcdx^sO7!_MzVx${abkWT;{?ME+I9rXf*Qbl0q$sL-4AuP5b4w}NmU9a)# z*Tnh5TuZ~~j3Z9D4g6vKn&$2F8?TN}ybejcH08LF$0Tn$BHlZw!$dE4+sx3$#{@RO z87xXX@&FI1F+rx)Syn(8ob9GKqJMWDa?Ob&cyxMpv3Ak1r_MfA=brp#j6&*xd^^$JyZ=c{o#;vg=PuqjrhQZG zM>fbZ_CO}+xr7DS)rsn|jqROsdg&{)%YXMD|KplnT&u;ir}9nU+v~S$23~N%**@*! z{M=Nn%uUu)kDaf7>HEG1Zab(O*DevZ4&Jmw9k8Y>Y*srus8+;L|E8l{y+6&;Qa^ZVBx_ivbaxHCR5_%42O zX`4GqrK}F+T0Zy@7IzqtYvMaNDm-=ur_q^CM9tF%PtDVLT3o5=r4taqNoMr5hC6AZ zGX@%@tqo&o$H6!9M~IFr%B$$HN!wCrx6vSGjYSnK0$J@i~vQhquYfe4MymTJc-o`l%!6pe_IdVLR}(?Tr(| zvwofidpoE00E0Z`5p@o3Yj2N7-A>Iey&CkUl?ku*yGv_G==y7Qzt?Zx!Me3K`;ly$9&lUU&i%V(C zblW@F#Cz`KMdt&jp9_+e#IHN8&C3Z_@vC3@G4hwRGaqp4k>D;MfQtdWC$R0CQo0Dp zfJQ4D_%ZdQ&au55cu~fWb27p5>dElI?yh#Y6VD5^Fz@LClXZ7vySBFd1jJlEZ7>CY zI6&j(fkWx1ElXMX#-MU(^|gnTG3=ou;L1P{atGNW!XDi&e10oldP*0|)us_vGs$ji z>XL7wi%0lM-0o^i3pw(*N85Vgp$qkq@BZG}xUp6LkN@be)r((yxgLG&;dUzXSeYB-Mitbd-m4|OFy0R#ifgctdoACPI)?* zpw9pz4_}X&8xL__YU-#uCpgk)HBaKf$e}yXgi>%uV2#FXGD4e(AmJokw$*16lE9QRK=b%`f^WWZh>V1I#d;qkK%E^1IlO{b1^ z0lIoz+++bWeC*i>hpjCq^$+Xjty{s}B6#+64#y_cFVb4)sSGX( zP@X)c;r6Y&b%3r^2b?&4vYvbDv3lDx&(`9}lkmkB>A?lS$tTUC-F7!Gi*IU8Uczi^ zn}u`KbpjlS3I~28L!>iYdfd0I@Uhg+78#KlM*P z5&ba@f1u1r4(@#iEI<8sxQ8u1Ia`mOJ6H42ZC1rk9qb#p@ZO`@bJCJHC%xsgJd%MG zCAtvnGC?RWakoxs|KTNR3kSwt6DT(-ZDISLI=@_xe()P>diMr7IMHgm(e{L7NI9k% z>YYJa@>So&j?nNMo%I4nah3S$l7W^&NM^&`cwCYNJgO&CiPWL5apFjqx>+Zl@5kZ? zYpL^}4Mrk+rwG2t3Jje&Z32;f+2CT@d@M{_>&VA34eeWXpM0uKx4*XouVOEvGqsxpEDN7JjS9VS4_)cPH>JzI z|K9Fq@>I{csL$~Y2kRX-@?^>Ft*x}%3#+ydE=VF@cuF1XzVx*VH-o*`q$V+YUBCgSMoN{`5X90MH&7dCYs*EVi40 zLt6fN2drW5yMz zui57k0^=tyFZ?=5zYpcS)ddda;lBJ^8*r()Vf*%V?m=49iHiGW;H>*tS98g4;Nt4n zxNcXx2o(M-JTR699Z%FN(S61{@zLwK6EtG6A5=DbUjwDG1f8qbXv zul6^87qI9j>5S8~vs~+FS>mo~(f(b&i6gJTL(b`6gbqgICpyu)@A9rN@jNtT_n+er z{~lp;SwANi?Qa+X>E|Zp$On0_|NNj%O#>pTYIyFly8ili{X-J#!p(Qn9ny}a|478t z4n5_mXD%v@>>zhK`)p=5j>Fi;ww&-D2+KuV*nKG%L%NWn=S;-0xYQ2FTpG`gEn7$9 z2&<<;c{y)#=G>$8?r;2tY~0!2*}lJERs5Jo>be@80grEU>r)?7! zp+JzM4WP5Er^Th7NN}JhohPUp)%Zi++HvLymqe~(H>vB5Gteq@@^6siC=K0qcRp>4V+iSIMxO{T>Aj~+ZCKkg|Bw~sm9Vr&siSm&UKvLmVZ@$yff4cS!;pwH z#)sy-+fI6tfa5T&gFMk~qF#ONjk9=s7&yIh~RSsQ9ZJ&rb+~X2&1Fu1B*jip|+3h)#mPh9r^iyA`BQl3> z3g{x%h2Qo%@+i*KXSn8%cA*tlCP``uyUTuG{58_mi2XuJxcd*BaRllv=J53yL5~g7S%Es=vI}1k+qym!z z25mg>p18*AyMEriy5nG-h!D9j_#?kmC2lE0=0 zxCCFibnajpaT~?2;BxugyLS~{(vy*fpQXwVy+DJq43mK^i2rV+be+$r)JI{8BVJ%6 zFZ}{i2GPx+<63{qvW~*0U*Pf@j7uBshSSxrgCGKz9rjGFdJ(RGTNg8*n(mc8eW{el zTm?zd@`TTet7}=^edE?ge!&s&`As)QqiXr*g#@t3W4dsw7JT7QKj`xEI*8lpH7?2{l#Cxu-$u3k zxqCn-VS6XL`S0AlQ@2r`H*Vgjn>kkhM!wbWH}}8z^Ql4 zKwZJjv;&BX%eA_^Rtqa!;C6ZSRF0EeSvyfDPoLzzRx7JKdtX_tQ*`=Ioj#QnUCYZ$ z&YhB1)x!@zR8Kx~uKx9Z>wiiW=W6@v>vi+m_u|TO zJ@m*!D9pX=;B?U9&ib9&KHjaRId_5|)`B!8?4eU9YQn+rt=&4_-L1PDPRNiyd~mq8 zo2J(e;4D1ab9U-AMj_O|NW;&Hv}-A~tl_8 z&rBZVF}hJ7nRawW?-Kwl)wV@P3uT$kWZ<%*)-=e0yP5-zoqqKnyyEVq>@q|)PMb>9k?e&?jyojC>z8UNUbk^2(EiTMt7nF5y;n1`@P&}3l zKfgn&3<;G@Sg z7*SaSjx40m&w{<_KRkEmsh#*0esOJiV>{Py3~ioqRDse0|9K)!58!dz&yPg`t<3YH-a6s53ZPoA;8u`Tby_`%ORX(-0No9j5K&rDW7 zyJPpWw9B9P^sl0qPS!l_S!_8v1R0n_ugw6<@xcZu|lL+Z-AGgl(SXVo8?ikbA4mCgHsD}&g zEX3BKk7{&RzxHGA{p{3w6taG?8PnD^ZiLIuq@8K?x1fmwZQK@2Nh%NV zM|gygJe*i_kT)_%ok1lXODEy&G}<1LKXEe%MjTJ^(Y`hBUF;zTb!{)RyF9WEFPJ^B zyOZ6H@6L|LhGU0K&u3S+lQdS{3B4Ke%8q*A@B|i*clZ0%PJ%CBTY2>S+~P_t(ve?u zp$2+h|0Ui}o<3V=Xb)V#;cgS{M$`JqEotn+Hz&Moe{BbyP!%`s0LBx3*~ecVX?}4b zur|9`+JX0_hxF1G2>+-|l2W~*{%~VGfbqz+CP1kcfz z*dpp;IL(FUmem(LV}8P4tNEm3VXz#X5-6^beq;Hp|xD z&ix5xcN9%c&gG6~Ab<6P{0YwZ%cMLdxX57_-Qf=Xvu7Tzw>|%^+QaVt)nEPf`fvWb zpQ`8I{*HR$p~vc7?|N6AKl@1i{_p&Y`oH~+zg`z-m+G0-)Ai8FbCKtr%^mQ&4=t6e zmVdwwuLf40dpVK1kPF%hSL-_mT#-}kciV&L5NN5*8JktQf)4wiGs>DW3_hmJ6n@KK z9eEhfNe@p0IW+%;X@}JFi3jbX=vjy(k8a-e7|gryw*82ynxCDi=bn55*;qu6Y@2Pq z(XVp{iF(Fe|A*97SZt%5Y<8!XI(Wl{(vaV>!=_w4VMCc*?g%v?6N>V$b%7^RKiZ=e z`e>)moU2uI?Af!Y>+HF6^~fU^G9Y)FzTBBJr|aa26WBqkIZeRb>1(uCi_6RC8W$S1 zed{6%VGT0iAwT)y<_V8`cfibnQsJ@<$RIneWf#4QU-!SIT+=~$S2tU4o$@HX%-e+) z?y|8w+Z+4JS(K!_VoTds2oB5#{>$K9`WHO_?xIU4)_isWmE{=M$ucM69h0z4(g~~l z+PonLlLuiAEUHuDPp`~HI`aD)H(UWhU*0{0K}pZrrKg|NYO_rOTJHsNNIw zq_2yfQ3}b=ajYHsUhH{7WeH&f|7jW;xC%+H62S3}aE}QVYCsMoNB~`bMJXm}m zg^S}yJtHfrm#jx@GH~1Z8_D#I+dh#}4{6x&2-XA8#)o#D2lX@&b+#T4aQ^>~8{ z%Uk0lPFTwFg9GCD|CX!a5qZQN-u1tCb`s01kiW#XqRWlZfI+0Zzk-U zX-FaS_6yaeH47Zd3wb21;o8{TWJP!6A7IB%{=t8%FA z;(}rAHzyFZpFF-;XWQw7$S&>X0{YjJnkcn)cltyUb7y-3`R(2 zIof4@PR2I>0WXBdK$Ops1xpe*ow~^DzH5H69_iEb?63X5RY;&y4b(NdDrVepsH3 z+hO?>W-4Mn36Aq5BKxv2^-@l!%}aQEgt5Kf>K^!6&Vzf#P)K{fZ|M`{y>V*4({kdM^16QZj}6z~yv@T?%PgW79uSZBEO)d_pCe3(#oqMZZfFJY5&;7jgi17Gm0GH+QpZ>A$ zi7G6@*)ZuMzQU(3b!_hr(~YonJ){x%F;e7#XWGRfD^0S5flZ26&Xoh+4VUOvc{=oIhjF?9g%sw#eyR@u;v_DlDLC@aVjhw?Y}EAoH1 zw-28m*4h{gM0@ao)Fb>+C&)Ad}tv`CjPjzXFD=VG^Fh4tN$jDfLiwC+*_2_@cf?S+!JtwFAgps%e zLhA%BM;lF;4(JFkts0iMNmPF99SJ*G#$iE!~4jF8{BnK&7~#SJ`oY3j#+ z=so=j0ODD2Gm=*m){3?ppNDq^hBP4pk|GVV(w#pGjxtMU0pWq`?05@R6yhi;#s+9Z zsY>2^AhW|Apt4d<{*Owr8x$DM%ELt2EeZi0Xu6z--GrqDEg?QgY&qy4R`PTJ~EJxFIB zDGNbNV=BA|htClvggmQa5C|tz9enYaK^rNB#rJ&E6HP|8gb|m#+{KnvP52)1e2>nt zgH}gQ=It-k_x;I#rOrP2y@VQPFW;6u+ND99!5DUMjr^>0j8158Bbi3kxaYAt{ra!{dVT6szf-^Wd!NZd zfX{vY3;AaMSHAkSoC0v^(wlYZ%GJ7g`(~|gZshyA%7Tk#Jf_vbWOsf|PcNdB+(G8i zj>|PO&wXK~mX_8~&PzF3*2(vkm6LVihj52Tw2bxy1JABqqVi=T3KGG z6RRuffSg0g|KOkc{(9)?cY~qzy7k8EGzK?o&LdDK=v07@eGH);gfPd@LiY>LJyqLV zck5?A`Ew}bSL&bs{7={Se(d||iANrdA?G4*s1q^Eq9OinZ{DewsIR-9+^o5>vQ(4j zAFh)NbLfWMx^&|*IK4}!@Stv9yHXcUo~YA{?k_s773A+|bG?qXcQJ@|>gLAnI^5i< ztvh#W6(znz!?XZ?9K6kusIZQU52k2BaAKY$XoK9 z;arh1cv(HbD;$);DkqvU!31um#SeOIVt1<+kU5VnU!L5nZ~nlC>T|#SOZ5}~<=?Ak zPb}8QzwO)Vqu=!Ydgs%R*4rL=xURqUM*Y^WezNB0ChPsrzoX8aKSTX5)iyl%_P4(a zKHRU(?X3)~->ZYj9O8#uc<~_5dMOXqH2OoCHM|>s>#2OYB44~HH*M_bCtlP8-H!T~?vor3;vy!2H(XaJH{xu=}z z*;?P)&jhrS8#bl)D9eF#k7ZBYtVGlwKRgaXOPw{yp)+#nPSH#>hLU&k_bm*W1 zoB7z53@&&Mzu4XkJfy-2;9MHhgT6|f=&HU#4}8@^8E0(ezeX!^mzT+Z+yD;F|6-VKX({TBj+00S*a+U zN#BFw7AJKrPJ4k@V6|!*uEH$OsZ@=c-I(%(Fb6jCvu;U{dp?yR%;Y+yADO3GJll$x zat`{=!aIx5^aQ}n1O4Lmdac3B-|*a{b?frAy7tD^T0j^5$iMLy>uulo!MeSBP?v6^ zKMx!*xd*)tvIxY1<8T}Vh6QT+7@SZ6dB(chULQ~<2&QgLyU>WslSPzqsp0!rFDj5{ z>;1s_Od9#dHrY4RSfjMRJm*f`$yd8CvGVT=m$AedcePM{lf)mKU~ubv#i#GYQT7wR z+r6=K8$!t!;LvtXKie`FP8ys=4sYdGJdKlhBmbpD!i?phq&N`9)CCx*YvaRzhN+W9Qy%z59b7s|nh_L)rqgcqadA z4_Nri!U^(<+|hY(cTqYej$~`cny-z6l{Jb_!0#f|b=!0B^wXdCrTWaPU#_(i=W2Rs z1=t+8ID&`n6@7)8$tM7|>eT8&eg7Z*-g@}#>Dst`t@d~Qp6M<$z%E5EnZNqh`tbu6 zo%9ly(!zSnTTL75h}8>Uf_ZFCYj2r;LsnWyH(ht&pa}8I)1utO)%enxMJVm7B_g8kV^Q8W!rEHXB{!gspL%1e-+Xrp*PPejD5H94Ss6 z>?pcOz++}-Xm{op!1dBn7InHaZEAV}_+}H&2|d3Jxw?7+m=>`Iu@A8g&s}`F&Oh_M zI`imrb^6h#>&)ZN)VU{~tMgAhTW6nrwoX0$Y@K=PZT0Zm-dPv%zxe1A*d7<4<6Nzt zIGNqs?g*HgnW&`|zXgu0VfQ-F>@nQ3YJa>ctT~bCLJo0Zokm+jx`bS| zOX3-s4o;O7@eAMB_weXFPpi@vURw5(2@aGKMn~O2acP`g3HBGr(<8%Y7L7NbJ|ygb zck`?A;Xve~_A)ri1V8k5cYpU2$xnulZgN-nlB@%q$otZ2cI6)lyMynP>B$!pGvEXJ z{2uveyEs{Hjb>ax!8Hj@CJEPu$pVJCCB#BGa)HJwurHzJu!G^VqdjDRzm*fGu*=uL z1@uS1n$O(g8s#m+pDr4}{)f);!z?m4zqrUdaEN>Q0DjAU4tr;ER-W<`9g8_GesKkx z|K#~vIdz8oRx|N;bg$F6czDh}1pZ5_r)t4I#ECO??%@k{7G8Ml@wZdY>AL#Hwfc$w z;%`EeoqG1kr|To%@sWD;p$qlOSH4#N?*HZAs}10Plsq0L?ZJ-6N#2Q0opm4uS@kG( z&{@auh`J`H0D!OPO5T-v7hkC>rl7rx6r5DOcjRIRbp`YRSIQDkk$neMXOZbFK;_w! zgZhY3bb+dda@|=S1A{O$bL|mt2c59xzzwn|IJ=tZ$Gd5 z&^_ps@5_h%lnHne`8Y$n@~-zjU(den>3ZAS-%-yXvu}I*+v@SBo~S3EeX^c?<~hRN zR!=|kY(4SRll8=7kJnSrK3$JL{&+p~&_i{SexQBE1$aiCfr^bh?PAM1sg%>psHgpT z`BhzZXdjqHG5ik<4uJPK4?0vmjUH8}YUlW@ynsYVqi$?nsV;TVgZe#U4hf8(HVpls zU&bD^OX=fCTNk0^!~$er8K=awA#?M#|2_Go;@I1_9qUD1>Jt}t^w^79xu2!!5^|uf zu`P^E#&c{8V3bzJFo^oB{oy}ga#ztSufA5B>zj3V3m&GwdF$pn@zk%2S+KQDKg@9e z_1Et1_5&P{$H}!-bns%m>+R3fcYpL-lI9ru%TrDIBt=A?GE;uqFfCJYA;=9^Bthg; zjqE@|7sMvwO8aMI+aK;ej&_0R^GCh%>?R8EVMbX9Ph{)pi7*L658YYcCdPFA_HTVE zCoir&^l)gW{&ZZ;olG9#JP%*&!mn?C?m1ureoq9ny;TdRvjKf9O`Q)o4L|7xQ2R5& z7%fW}X{?S>e9+lvk$TX=#X@`go?5bBJ4o!efA0sWCy{A`JlK2~CfjG){ zq{&hn#x|nco9=6-^I~B79(fvEKXqT@Yey7P#U`jsGYz>3# zk@3>u0^t=Z;l2UVJy#PO5{9I9`mKvbtQ|*FX0?bhuydA(O!7^B=fO zlQ?5O(oTOT?yXb%$FA~DzVS(6W18mY^a;I_Jg}>Huw%2K3|{GP`)|TuJFt!m1hxI3 zD|Xrp{9-Os$g0@(l$hgv@a9BYzON>7Ljka|;L?X^ETD zxUjE;N*%Vp#_J?}WQ|mW%k&BKUbCx$fp4*c%9eT-_s6wi9Liq2%Ny%C$eBa(!Og~QuWx8IcZ?fPd$+) zPgl}u4}}LpUh2++emUiY476XBgF_d2p}#CQHmsOZNAi@#?qMnkg%5eCY7CQb_K&lv zgxZX98(oUO`aXR7Rwv6CZhFEbz4>VaWdSXDi4*U_6aK_?HvUi#E9e`Z!kg#j1fH4? zWoh#bd#W3T7t;uC@-q^C)-~-y%Iv(&$3I`Q_zpMr(bYqPSLg4~Tk4-OQUGc1!~ei< zblom=9HVtqf4Gtn;VHm68NqOzenjr7^D{-+Hps+EI=qb9(%0q6i%}D99*%u6d zKwn|$31i=lXZ{VBcX6IJ5Z!9Kq|;9xYS(212IZbiGKN6h5eK@I$GzJoa6ae*%Qdbx zM29J@)?a#&AG`!(+|P?#(F^t?8crA17$mK1OC*(a8Yj)WUGFl8ly=BIk)I2(?6wK4 zCvV_gyCV32Z$dlsu)U*XPbUsL}-@SK?9%)4*q)31>e$Iqo#SDhap!BN78tHYa@^cm}& z_&$NBrgz8bHr6;skv?*PJi@zecFK{yO;5*Vd7%j7nI9K%Gwws2)Su|K2@&lSqm7^+ zad64MQ>W~t4-VpGW`x=^>9VAezmDH|GEYuT)0caXN0=xD;kt}=z<8ew{G^)Do-*@! zHwooDnPK7}`3@!K89sI##&^;MV*0kq*C-?AIV~K~=j0=P1ImC4ysfur>;S`g0WcTH zA)Iyh?nd3XeW&hjZ1H-pmgrBq@c5DQ52w#-pUlO*(l#gGnG0o(_NDzP`8j0>t<_1G zHR-;X_z$jaKhzP!J?#u&jl7sT@~-Z4KF5tIdpmn|>B`M|^YZ1|+U!qK%ljDzK;GRD zn(+tXr;YL=f9KHbU#J(B7FKE@rzy^6Az#{3WJs*q|8Wk|`Kz(dFg??o6@8Y|luOZT zC4+Y#3CVo4wubfz&ppO=oO$nYQ;l<8y5#Mjbmty9fOmo~9-|W*50@r>=coSsr||mV zeZ?{`m~e!#NZdTr8AW&qQD8zG4Ih=AhLsXwn2J+H4qYoJpD|Xrc7LplMdjOE0-A_r_Q1#=A<=2|tg#)89tLyNeLSLLal#b$7L% z=?zP#i?sI+24jt9Y3FAfrY2`|s>9UGe9f<|)DDf-^!&N{mXH51IV}R0gNX8^I}BB3 zlxaf`{>*FSLkHcJijl?SZk1x=q}_-E{ZzuLY~cvJ+V2{|;i=|7UCIz<(jLm7q|(WQ zJ9~EUz3cP?bNMXx9j!SU#}}yE@!v@_3PK-=fI8w1oEZjd(6H? zih9eIm&LEV9o&+y{NkYJ+@Pqk2m?j5NGHafUg=m9+c0^~Z?{_IbWj8u03FLR-q;GhKh^4a;JM&n>)kswapOk4@Y}y$U;V9L zt4Wmm?AC6bT{%(DKlNyB?|TBmLA|62SDPx-*~L1ZUaftQbOfIFT`dP5q$9!51)8FVB4*Nt2WbRnh}Gt?L3Y{< z02ln_XmH##Mu01%r;xQ-GMqiyt2yXCcd%V+;D2KMR(;2NpRV`4?PC4s|G{6avu76S z1MhrWz2nIz;IqxTdF5IL3jI#p%e{s%wRN~%3o9!yBru_SvSJ;bX=7w(=LmYIlLgP@8}5@^Ef>V~?kOta5uTNKT_)^l z8&Y3t-+IZ~QpjP;JIq|i$~wLp3(<70cxQVHgBzT>)65C4z=MAltEoc<-K#7s)7H-cLf-?k%oaD^b&Y$C zN%gdO2)|B)*OwTx$1b=~`KhDyZoOf>jqedoPWrnr#Dz2FcMrTD?C<83jl-S0S$5$> z=D+lVf4*+s*{xTu-l%!rU%!5n&Z2`3;22}b@6M`s)m!PTIan#|(Y+1t=+d#|u7TYA z(e-e#T*AXg*2#3hM_r5}?1M;AW$V~^rELP2mOW3Nb4UAiZeqJm>~7T(w#ek2%k`b# z`5pDri!Z|0z=uBnk$>Y~t2((>Q}Ziz3EgmeSEJPvNTBS*L7iBbsI{euTAtgjwfTFs zGJ&ieT&<&>tDtk8f@zaxz#K)ZZeo!Szm-ZhV-e(t`t4E`A$U}UIYl(ltCLkpEl(PaUb#4DG?;)am-BAAs%?OLcJj7Cb&(doBR*Bmm_=W=9TYrRL#o z-9yfm7O+DAe(1jFJLv7VuJfrJ^wJ@E@#QyZ%WvGN|L{NiiTczlU#dr+dS@Lkt)VX* z5QL2Pwrds~&rNz_&CPoH;_3S9f9>C|$)oMMd*hAT-?;+~Hp@@us9)?b**;KQ*Gah& zOTkwYev}L4M0`)!M!|1(rbOYy%uZw?k|Vyk``-BF*)-S>>Thi%eGO{-g137;P#E0e zyvqk+NOX?)Ka9R=K9Uadob5+wlR?6^S(4Axadw)zcyDEG6?t2!sfGDkSUX!2%O`7c z;gmpAXY#OK!ez#xC+U555XBW@wjoZ?JD7V6on%hiV?SS_PIRI|@lij4Ct-AFv2B^S za_5)!TJSf(7<*LN#JQ78Jse)gPv6u*B;m}n2Z93M+uHLb9yUxZ5>`^|Mv&9c22Y3Lxb`8v?2o2IeUcHBPbi6-ONNu5Sd zyHIP%W8?8tsiZCt-Tdv)dNg)Vw%G1~|Ct5u*n8kR?G*KpU+j}jV`~!FxKIgMKf_(X zv4_r>BLKO&dF@tRe*NVv$lSp0x_jdmo!XsD1p7G&+aKYuJ}yEK7LV&!C)~zfQ5V~P zFkHJ~30DEvA|5EZ&D#gkg8{cFj#= zCojzq{~+`aE+?>*siXYk)q^;+{_?c70yg_^8AO!y@K**1fyzY~^9!xBqi2X4o&)(Dep3((Q%Gy0pDj zm$%llSVBIqZIPEJN#n%-iDf@4c2w_q&vSM57 zoXcWHc~97zZ=nbFldwz2x#{MuK4P-_xeN5g40DiZmS^)lckYqw=zr+KBlXyYi#31t zjOWewWLL&Vs3SC*o*D81b@pkby=D85IFWbtn)`{T7R@G2&4A5yL-$hM_t=L1{CvGk49B{gy%4@7km-H zg(XfXFD@=eXRBxZ4(RP$8}->Qe6e16>E&7n=Qr2y)Xf`rQJ zSKhc?fBet<2=S{PdB?kI2Qb~Wwd2BUa6Lo+V@aCQC;#(*`cKzM`VAXbucg1Mj5uj) zNmJxMnd5G)6M0yk?IjZ5^=tVTUiro6<{Rpg_SG>XPy|hnrgngHzcyxW*N^{|AE`&) z`yBMztL>ZDA_L-I9+1zJCnsza1aWKq{RX@D^lu%PivusQ*0t5qMRU0>xajizjPtkG@&p%cR^j(kqUtm!dz5oe?{sm1V~yGYezHJ1FU9sb z!ggs{*xueHd?&V&a%UcndC51u6A@nHS-q>hVm~6fnli+vpW>2!jbPt~U?z`!db_nzSAYSMFRwoKF#zP^- zvkGIm#;4jHNjELc#Dltxe9_6CTo`9xaVRHv0N>por#p-c3DeZ1eG>IzFxnN{rlp}Z^ZsTeR{3F z;l1ywZ~dkZ*QtC4zyxd)T^xSQy%oAJScz-)5rEP9*=K2?(`&xbB%U_|r#Ac`OW?#XLDE6O9h-fq>!NnQNZpM?XPS|8{j(lZ+R18=gNb%Ty(^!YSV+Z{3BaX;3 zKM`nnb|6uK45|Q+#&6&vOy)Md$TIItj_7PSMV0~q5@Rru#=L__lBI#0R%l2^<7o#% zMzn!*;3E@AuJUxCD#oM+q7yzIM@bP$r{q``49(@hs7Ff%_m-{k8zz?)s-lKxAY`0y z1!r-_BP(!c7WWn4Kv!MhHIE2zr?WiqB;yB1L;l_#l=Xl&BJoMvjMZVy7X#Y_Kj!O> zB@IWP+i$1|4+^t&9~S_0Ug7}01jc;#!s0S1+7qi&(@W5vOfa+`ef(|p_&0n%KrX?% zC{+#mBRU6jzFP{iYHTb?43<_?d5nC|z}M4reixEX0r<5{2M*Vk=Ig}ja`=8` z>Zn$i7wUyCf2lt9T_35Z--U|VzEQWYzEL~*W}$<-l(RRPqgT8ZPxy_}h1%F!uUmJn zQ}(@j{ORZFfBS#@_v`Qf?VrZbuR8nC!}VAG?f-rK$dCSLIw>l~*?BrGON({w%I&)L z=C!(Y?Fx9kTj$r7>e+`c*32;-DI0^kx9X+e`jz_p&wrw}zxGBwIk#FDF?b$-?wJsK z7rNe{)AP%}|M~jd^&52=9rP-+JD#QEGq+MxXV2H_qfgW}jlnH+%;dsK&8~XPIsAxX z_bAr`aHYOE%ESckrjH@*g?!?tZi~}^81;!d5gv0#M<(@1=b*;~9i(Y^2CCG;(O%6! z@8#OBCF*o<^LBmb`=704@c*m-^dHv`{E?5;LnjyN&ZRfAt7vC`rxwAd9afLxwW0G{ zl2a>dbvUzFThk}&^KXKiwNv%dm1}kR>XjVn>MpP&=LmFCiGcd|DcGwTm zfd_N|_0MWMWmZ|GKI*y1IDYE;$*FETq95Thoku`s08IvR09)f9F9+-$aF;jl<;+#` zQ;#9DQ-}Mt4Q&soAFW?aAJa+Q+MwKnOwKy!CZBHDsC&Z3!9=Y-_E?>M_(HAk?w9If z7dh29nF24Kn9@26*tq&wN(}|a3HCG5zmV04-JxFd_)}nYz~TI9zu)@y99`*xmmTsy zcB08TqvNfQPCRU55V^3yJmwcxYv*vPUb}IpKL3@M>ILL}0y&spT_gQHXtkXC4)s&_ zm|hsImwBsG9{3OK_TX$wY$l9n2m8K**GAWTI5>48Wr_E055RMZYk%8z8%2#JmBNHO zq#RTShI=0WyLAoun6GEw^TAp?bFRMj(wp_%d*4rUw^T2^ekBV{R!^LQm*z0AwzFWz zfp#*bOxvqKk!B>oXCPr>Ix7Ro@MfbS#MxX@u>O`Pw+OlXT%>H6~%xx=Lc5Tl<5jf%#H+%t1i8Js^`|(cR-OjY3uZ?B7M^1wCu3pps9(=8hl?iw9&4h3AF|E%- zg@9Y|q9XR zBR+QG#L{X#^yqu)sc*-Qzy{jCyA8j)GI$@oBfR9f3$D-))6mr;&L%w;luRe@IT5AZ zYuRm&I!SH2=CSoV;Ns;suheJ1^kV(rf8xKWH#fHG{4-A$7D3I@ZidhIwsShg$whR@ zt;_ZCk9?s1&;O^tl8LabyI1O92R(=GcamdodnYNqh@X2U5NP}Jk zr@S@~**0kZ?t+^=@Y#9|drn@!j&P#u#EIovTwAFVv`rpozHs`Xx2Mm$Ao2TViuDIa}o{R#`w6oQB*+GRE>UTs3-#TOh%-!q6 zRo{ynCwVX6Dy7yax%Xs6i$Oy1B}J1;xWxmnh%zpr=j%NE8!Te}$1awD$_W zyrOO1_BuN9XuWQ~_+owLcmH|4_|>n}?)G{Xx*YDJTNhna+I(UiRuslGZEk*z^(PI?h{Z?;dY|_ zrjw}G?_}`b0r>0JZ`Ymm+r-Jj6Rw|UrKvh9-B&=&B8G;KLCz6-J) zEU^{VHg~6O9CB^m+J8Ohj%;{>P?ira)av3=eZ#xnQ6Kx*ch(|y+1+b5>L31t|EgZQ z^+r8hi**v4X^{?qOP;3D6-%>=(JM#p=A|?z5?n~<2tjxNkmwoZS;@8E75c({`6Qk) z2wo;B-h~XdsD{q~<2KOUq*y(ZEdm*uRI$FFbz(8M_6~ zy|%etSN)zUeG;2LuoO9R0l`NmyL~xfT)%r89^K6XDWW0MhQWv0;NsVQ6>Y>K_N1TTc=Yin>Y2B{qgFkk;i1RD zM(;YgcW(o`^A7FK4)zMVc*i5NK^%2<2afV2!h|Qf3%=J57w^hg`k>HT{)qhRY_DlA z*EjFfEqL`sWctRfTg|tdoB3=*??gVX)um-@b|<^Am1&=M{rnpK_Uo*tZ3MUGTOV*a z1~mTTz&X!Qk00vIU2cHZ4zdf8ZKOMQW@Z++98_!_?5PFju??K|iQV}x9O~oPGsN*U z1K}}S2l29*AbhM`C^F46p4=6qSzU>~%LE~LX0gGPgMCx!Z)|Su)k`nFQeXPY7vao_ zx_s?s-MY0-K8Km)_t?-^;1UnEDfbu20wk)s@uTbx?RMHIo|Ptb7qx|NOanf3>SrNT#)DI$<2RDHUE5hH z09ltt$Jlw*5{ylZ+rC0mYe24V!fNn+_!*`Ccq!|hRsT(C{;y3jK|IlNk3BOvB zfiX2eKV@tQUA23N9W}96hf7=a#QWCj&;76e7j+Lj?p}Glc~N|lU&ccy#K|u4=P|E( zgiqiz%dzh52K_kTSl4(quCDo47rf!356RCymvssWlFyiD$H}-TcuieW@6Ib@lzB`I zc_zQ%#2aBVu2k09eed#&(_!*#b}%AOi3{$`uymbK=*12j!tdAj0Up|o z^&jprAjA)Hnu=47_-ezV%XkYe&2OB5_Zamte?iuEg!wW)TnF2A&xJ8Nn{YSwxwad! zcZBUcakzz^!=^_+pg_?j$f>99Ifj-6hxFaiL$?36P3loU`KaCT;5*#l$2`$ju~ShbsC?8|9~N#X_a^VA*7Z~^5ueVF}&t=N?6RqLycbd20F zc4?R~KHFgAE<7f!D9iVZ|3G8;PFWdgZj0@S48D)!V=a?}+i)Wh$M}I4`5VK(?hf9)k9p-;y=Wp~?xJzXhA{aQIaZ#H-}Tp-cFarOO&)`P zCfo)-H@syKH^{*sbocY&ZrKmyYLEx#5e7Gw+v0_cTbA%9tvZr86Fokm-Y}1x8b`SG z(`M{ zaN~1M0VCh=mt13c$kG^pTo~--`{Gf3%|$>+27WFivJTjm!6EG#_JVaM2{cm%fPwyoo1)wd zsf{HcJ7(M`-Ko>n`%QcMYEEh^$Mz$gj6Ec+cvzU9g-3|*T+GsH?i1L#k3RKGJ^uL9 z_3n4SyJnVKZ|q{nxobHz2AJ^@-`2HH?IXVduV3%}Fuw=xK3c&|e(%4xOIgF%(F51- z;!&qAQI+dP*Z?N)@~KXoNW;)YJV|%tE%66g?zm7K8zFg!ANhFThcWN)qxAyKP%M0+ z1=Dd_XLo+eP3JY1sjgKvv@04n15Zwl;|D|AHngWBA3wkg`B^t$>-pw; zc`gfGXe;)%wlfy$2E*U~%;)QiU;9ekzI!+QndhE)rat!3@2E#EK296ieH>wTT;9BN z!GknJS9rHQ%qK*pzc>Nb2YBlEsm+jnte>zPdjiM6ziB_9*~VS^(|2yy&6_vtQ=k4^ zef8Bh(x)-|+YI6h_j4+5al=iGZ{sA4}uMd zprXdfDFCD+*DmvLB7AGPLJh<|1pYV#C<7om>pmiI5U95{2)@Sv8yfLU{F7f#X1hyT zftOK9Potq+dJ@1$0FM`(8RAZ&$+FXm4p~}c@R6gl%^E+N%E0qLiI7d~z;wUiRv9%t($AH?gd1Of--SoNetzJ5Pr25e zG8CwWt1kyF(tu`Q2tQZl4lr0>jd1hGsSmK-@x7T0XxdR)VJ! z-V=ULu~+rkks4_!tZ@pOxS&oR*(wSRvdD2DLwve}FZ_bQTfTG$G9xKpSzwY=9i|*$ zgYV=Ol>-g2g6e9$&Fx-Uq;cZT7YF-p-?>h|_PAbu^UeDFm%dh?`|OwMwac&9*2X3V zH60gV2@Zk92D}?_>biOYJauF2&U}??)2k=?JMIETet)Y=rvc;ujboLj%fzL0@74m> zxK?>-K&ZrPvdgnU^1Ef@VE?QNPorVpfmR2HxMn7@I?9C|;%ydwvXkKiAy!VUt}WH| zH{Pf}_ve3%LJ#WZt1sfcmHhVTq&Z3FH_V(Eyt8?$mQF0!^4dypesJ6?n>?ES#TQZy0u4}AZhs_**P_tg77_^#~MiEP562YW|#)zWnC3`qHiI z^~&x>ZGz7mFyGenLLE6dzOYh<7)jGBXKH5U;o1YA$Kd`5`nUif6WQPu$;q?&W<1qI zL9mTR4dLV~d0LLNVK+Y74T44fNRLGDe&j}@nmgL7d3bH{Xs1@3pkKdP-|+Osn%&;0 zFMjUV>!Tm|V6D!ByPYlc%3*EO*_Hk_HtIq1UclJ9yL(XU`!uqrF4U)Ayj>FuC+o`k zW?iL$-q_#F5wMu3F$`e_l>z_i0JS7{V7I+c(jC0_{r-X}b(S~Mq@q@kRMtmF?Gu*v zLgN^@fs4a)$LcWnEvw*2r>=pB=qlxv^vLx+I_7&@o3(rArXR8;59G@Mn%y>pJ>J#t z>Uk_GEq86u=>`F{c;Zye;J*+2NATjfKu3L}VwqIeIJg&#Lo4K0L&opBFD`gA(L|lV zAUL(Ul5`FTKJxI{didN$cy6z5+`g7o`bWna&C|#VupkR|hIJa#?%vZdn4DXv<&)=X zaqWEVfP??`XMe6Pz4T@+oH_&V_YPRg@FWmr+(7{q_P97o13(?2zk?#t7lWLRE^QWR zdR1ua=@_+!4RPPLNAd*JfNgpr#oT$zUdcUVg+q%EIL+72g7yY)q3mr^r2L$5; zB-h3FexS}j{cOGT#+}+i-!7gwi8`6Coo#oi?o$6bjJ;Wm#9?xPWY!bWAS3t<*o9x0 zwt#xbUa9BM4w(q`4L*=@;ioQ_~x$R zJq)_Q@A~sNygSH*uAvx76+gm9`{DjwJV09~V;XMZ^10u0jMdjY1Bd9KoQ~i1(T#Od zKaG5n_Bim4015#Cx5;EpFjCe zYL7Pa#Knhe2inXn0t>P*b-WFa9@mK_IyoEH>c@WQPu5@lH~*FT(x-pBw(feW1o}?h zNGHM0feP~I0B-kgoo$DdRq;cppWzmeMlz` zjW!N`XO|x?i&l_NYhvBxQ3uIKK2pE5zHQMA^0EfS9ZePYS zwNw4Rd)hwfYB+YVVfH1A?{4j^z_%?4b3+_>XL@#Vus@xgxK~@W-`a#uGP|3761&Qd zvVDq%w-=v{cK-gsHacvkW|!!c+Ez*v+h6#?ItQQB$ok~?TG%E!8Jv!^6;5@A{1RvT zVFkG#$y>l{9G&tlZf%c(AD*>K5>Gq;chNC+A>YB>+jZ^I<$B?TuhuIszEryy`3JN) zeuBxqowzx3<`lTut-E(NGI1#l#AE8Ze?VQegBPqpXraA&gl#D7w!t;u}rP5iI%e)Y!9dhNAW>dppsKe%#r{|6WE=o!E23#~VK4#`SqKZ)R4SsxH@MyG&QLoAyx9bOQ|yLQKtQ zSdwNId_Y3qW{tF#zKlMI1q_0?E!k)(1td&EAOkavMSB>K7734_51zzIrrXT z=>5itbI!KUe%yYxJy4jpj(-8lyX`?_13Of*rwEjei&>hyyA!!hstTV2oBaf>G&Fxd zi#9ni3l7NxeL9W(d<0)ksL%G$vFKnY;8(!A_Yv^s4$X1O8C8cXdn0?HgQvndQ01a4 zb(0KhAMLoU3QSz#IvLZ}6I<{Eagl)R@Sh@@13FIl46s3FfO!Er&LDRHrLt;v1$b&Y zW3=|+={@>0JMsbP9*iB;P4vne^!e85|BS;|o&x925qV)AT8;zT67u<>54@YU4;{1z zpJ1npXOC-+Z1#2$&Ufjftz$ngJH@qQDQ60OID!7q--XvmQZ214)$+>ex^(GMJ@w4f zHMO*A#UQ@p3brddz;Xl*T-fH~a@%j-kzws$X)3*&{7d83Ug(V1{F}fxoaQk!_7phv zZ}^n5{G6M1w3FXn5AWWoH*Q?5Z-4vSwXwF5#TGym&6#|mv2~YcVtxRl{(;FhmiXaW z%Iam^0e0Vw14OhR@?_KR-i;%_W%m&8(pNpG9cQ0t(w#VzoBT+xO|ZYaOI|=`UAo0N z=B++yC+5k|N5G0L0ZdcVgPay3Ue(VoDssWAU=4&CSo$!t5M<0G z#krdD(;VPh6gVjuc+q)Ms_9_c72f)`+`AXBy8ytr5sKas2TVkjH@xDg?_<~b;Xo1j zfLmlHlRmT`W0M2qY_@LPU9YK`mHP94<&*X6zw!q)4^Pf5pR4tUo3P|`oeZJ#!DtOe zrfToudcE(h@2EfigWp%TUwgC0krx-rN)#F=X(U`G0!t4tSkLfd_H;yN<&E_A}U;kVGebqeus;jS~OYox&Qh(UL(za7}rMGR0 z&eJ*6W5JCJ+KAK27310j!l)BD%CF(QpI{hIyfxbB((ZZ}cJIXb&UbZu(*=e+%FTD_ zq>oC1CvhZC3!mkhmy-s8m-vpCHa$ih^*j6=oSB^%>S!Jq^4RjxHJBfjPNdcz@Q~MR zWQr3H&9AvW&n`Xmv9vaUAbh|fZR2k_?QXt~i)8Wblf`D?2eu3}c6>zp>BtHE{S7+X zPV{f1Yl)~FU^^V@M;kNZf@OE#BCBJ_r!crE{eW;M$K|>35&C0#Vj7ehhc=Y9NV!z@ z)e}ylwYq>KCkNnRZ8U8IC;0v3ji0NUn4HZx!~uHHiFS2{_Qv$WGVy4`NN1ma*Aoq? zpL*z+xWb*Xcy=M6p8=Q|kQZ+4;k~wyz}B1g-!TSHTDf=sPQo*Z0v>HA%xCT4M(u8I zhpyRBM3{@ith+}YXN;1vEf2&+Z#j0@e-0)5Pu#RSz-k@Fk@-#Fa%ZRUBb(qP3ji#K zckTQ}s&BK&FTZV0jd9}Q$8F72p zv!P}8-o38C)QI!B*|#0vc6^v`_5-K>`X#K3lj!uH``z^|c`M9zu$Lp@GECK*cb?<*Qc;fT&#&)?x3=T`n=F9A!qgu17%PjT+TwQS+->(O8Or_RK;?B+L?@d%$olg{DO zJ#;oQX|t)6wpKj%dBq9G6Xm3@M-Dx|30Nja(N&HkjnUtBV~}HN!XX{CC9FF|#I<>) zj%V$+CqL?go#hD*DLegoU?Pn+ukCS8u0l3lAVUeQUS#azrLn2;oJumcxK#5?OZDsv zFJ_KmYG$@pR#$UMfb^M~oS}SiB2BGp-xn4Swi_KECGVbg@>d6ZCl8OqYYmKi&}2%JGy6`;a}qJ#x~8TPCY5r-MzG{zo$8z1Kxue%}eZWfJvC-N27ZCjN^Pbzin zKc|W!(L2~0E&{MUw(nv3$fh(;)f*vuJeXhfAPgwKfR|5=e)7+}{7HoCq5>v6Sh;4D zs(|SP*fD@nt+Lq#(F%<|5Fw0a$_R#x%q#EhdrTH6i&-eohX)um0EwTxVt;85#Y9pX zeh1U3IO!a8QU3jAzlzRE`K^9C_xigsI}HJOK|hQV8z5kyp%z(o3SCuYnwAIQo&%uY#)-$oO0*!W4_Xx|7_xr~;_R~3z>$1d(eqqo2B zqjmPV?JcgVW-ChO%-QpG?)=4CULnru(=|Wmj*6K~RQZj>iK%w%W4>7=y(BVq$qoznp?$B! zg#!c9`Mv*{TAW|W_x=~?pe$k>EH5wD3I@WNGiP&@|Aq5s>-@RZT0L{77X4l<`8!at z=rQO#yTZ_2w>jDo0sq*CK2(!~iF$DJjePfi8pA`mZLO;mp$Xi>AUfQw5qE6KlPJUt z(h}}>Vr>xK_STo{_x|uxb^rc)ZLY7?=fC)w+J_d;K6km67Cg3g96ZvAQklXuXY>o;|$`9_Q=AbI)cn=bK;nTm*L+<7due+IOo~m(SF|Z{VUU4mY9Y z{%&pT(@8?vtZm+*GwHX_56a`-CmdW;*AOSW)Rkr6PVRLtOr2RA)SKV@I%VQFF#=rf2;9!# zuPep(Ffg;5jq=o$1^>*itYQQ%*VX$wHFQx8sXcP^=!A|Xyg|)L(`3?B&GNi;=vTKo zv7-EsPoXE!6;6nT$LIj&o1b)?{eGt3E|UK=uGLKnVssjDQ+HQF$dml2IynQN2(c8( zKx>odNgDvpKrA|cXR~r#-w|}611dgGRPMxeAi%+3-_Bil8hM$kt%nclV4J#)`TZU1 zfxfcy4Sne50+KRrhlhFsn*-33?%W~n1h^2N4jjtktEU(1^2Lkw_V0QLULIr+bz@^4 z-dn5P$UQJoF=_|gz=eUbi}7QFJw4HQ#riv`g)f8u#!hlumnTK#r4eX1b<9)s7!$R*$929%j_Qps94vEtN$yxWH7+f(>w zXE^xxJ5U-YW5Dk&w4?0@b!vOPPIm7j?;1x(wQ>I;`fvi>wO9yZCRiOfaKcO7>p~27 zmAYFrCkxnKq(@0|!W+PWRAxzOg0T1S9Z_yMO28 zL+CTRzf%hc`s(OmJ-4(_D|3tW+M93GRcP?hANzRy#Q*MR>kh`%H3v>;cZbr;tSyE6 z7&!5p+wK<0BqH7ebexNRa(WDWHi`i#UuR$n8^QKNd>c8kV>Q8*gI=jys@8BFr(Gc~ z`u8}VEsQ^1$aqTof?IznV4Vxz#0~M}2TdNIBgfId(p4TcUrT@NZ0DNujt=8JolfKG zle?KUKVQ>c6Fs(`hMMWH#SD{PLYF~4%5Jr`zP1JOGw%dNN5mPQ9C%d^KH6nr@-8o3 zgI|B!HY7Oqm{;?UZRZ2;)*<#NX{`UDi$R?zIvlOx_HK<%&(@0{`%yQCW#_TRM?QNY z?Eu8@(&mWvP7m23a=_3|Sw4ehF+~_i7QNakE=t`4AEUIzckZv%Z~g8c)VHr)t1)C? zYJRrHXX!YRpNoi&_ck&?G?;*2Pwm&A{h6Pv%THgdTW@?D_#Vb4$s$^ikO>Rm+S|8| z=rT$U{^@`!4}l|S)wWC$Blt&Lm=^lDgWa9L>i5ts;o8yKK30qRc4-`h%QpwT$gA1G zz9R$F-89*i1it+KbF<;{mqF&(RygefzkzLkW7>sKvrEVhHVgL5401R*Lq}I+`ZwTu)CpbzqA2oJQ=~ABD~v{?C%b12aYyR( z>;ih$Z@~^wN7{JUgyNF6%th|(Pj!8 zEtJ*cZXZxNYcPlJRg@x9`0bQpv$%7(UtAQN+m_S+pwiF_yGgxm8$iE<%GD_C?A-Ym z>zS9IuXC&CvlDs;o6E^@PurSuAprT?{$(dVFxZh7hkjE$JM1V&8EK5qCC!2BkTRV# z2=o$qjCNrbIfO6uweLBJtXe0_k>{LXc3>>W^^Rgw(}5q%%!A{_nwnj~mRd=_($4tT zzwxd5^dEk%e(l$Pv;OV>^1s&q{7?T`{rtcA*Y!)k^4s-WzyI0#_n-P){lRCyR9`0E zYgcd9IyS@hE;a&&**=%HiQg9#-&0ew<>v%uC}(`aiQZXk?M2{lkuGpJA&G75DHb3O zL(%$r9Qib|H4jav(pQil(jRaj1A89=fO7{2$%lMBzTEFy=NR+Q9Da>G=kDbRXr)c! zsbS+?IPEC+q>(3~gLemkoCpz~G1|cJg?%#nSEft9%s5_Sz~&BVco17ruG!w%tM#pi zvPC9U2k3)kaO>wJHga418~yJ0%fq8QkJ)y?rp8a33Jf{f z5Z>J)d<$NCfUUV__Q=g7{UO_ZkCS&XqT>JZTdvgQr!Lmc<{CEMW({VwHEECFGp~Fn zm%OK-)g&}(gHBbuLv8a0r+uPv;Gdmas0-xv*0;aC-uHnI)cbz)C+f;OKUOQxyi{Wg z+F#_g?~c}8%;nw4Ai~1CpAkUzXbYy$Bk9+tKM&oxvg2Ocg}U{+$Tm~jD=RLXAx8LB zTB$2D0R*2JN7+-SD(ikDbDI9FN6ub(`ssSd%P-dpZ+V_{6X+~#W9n-;;>h+&-JX-` z)K%cc30G}i%XJ`3SiIZjI8GrQEUS$pz}}>X?2Se|tC!Wi=A({9D?y8vPFuzCBz44; z^1>Cil__lVw2cX~yo0ovy`EMFs;6NrZ0HQyiw|^*lVgXJC9UjVn}_2q=I4SDKM4|f z5c{@QN$0w?VRuhH zk+%BTb;{2+RrXY8ID#mx0gHU#Q|=8q;kHh+QQR{wMWnPD@VqvVJb!onTK(XU{ZO4+ zoUOx++vIFFrqS5R6YUo5tmJ5+fuP|b{W03fhBt9c>fpD<6Q+hYAIzgYHyq-pO$mI` z6)c2z5)}{a6_@jik&QTex+?qJRHR{(-H}7l751aa|nWEjUh`boqJ1|5P786PaCJi6fmTFVy6o zWY#q?I@n1+rcH;e5pDFu1pvDUIJPwU#7`$;git?hg4iC|V%D4bW&tm@go^@QNT~g- z-ce8Z=|w-e;pcpGP1iI#I{Ki)fo;JA<&PnQsUbG!-VQK~ovP_M>;-IEPu6s7Q2A{~ z=^tkSz!Wx)dD;gd0)N!OT!32|I%YD4jF|5pdeLfoil?}7Q;fQ7%sAcntS5%-?(XDy zYRXAf7X-J(bd`z*1?5Phve1EknNDV+t0O1X6*nAr^b=T#@_$~ zqV_!*KsyXQEfs|=3&sf30&X^MTezs*9KH=NNI#HCyzm-0(S`2Pz<9>#cq(*m5wcDx zp-C&cEVJ3y!YiaLhJ4fL^ysok!uog1Yy6j-xKaz^2`TRp7@lRm4sIPu^PD*HB)MwW zdRKywyu6!_&xx+%J#{ddkN$ko*#|azflOIkkq`5=%m#0BhWD`aemY_8aAh*%n?F38 zzArzsc9BJNO;<+Z#dYPWmqI(R6@@BH9q$|0INY?cVL_flMSe=`6HyFLJ89ZRA>~~9!u1gqcD9a?C=4JoL_)T{|cE+*2 z%myUtWqq^o1(@|0b}!ov=apiUQBF^flofU{54qj(IQ0Nrk@6eN(X0IY6K2P}?1Na> za8AZ8TK;yO9HR{dc@|{t>LkXm@oT$;-!wi)_cydrF6A5NxJgltF4vAiE`tZ?VSiQv z*(bbw<(c~U_kADz0{R#97yV4I3w>N1xx2H2{RxiA2uh~B9*>qQZnE$o%&fm9wtBbc z?mc{O6yJ>}GPuc4nfKy3>7*Y)T&?Zu@U2JhtedyQ_bz@KPIP_D6qT5bS0FEM;6%Rf z@@r#`+Jv@u&|E*uleU&myq?|-&uLY2`K%o!@9G-7#_J&vSK_GmvutiWhr-^~J>oQw z!8`f^Q$R$C@*zRxLbd{bH+DL=OmdW(R$&pEofOH zsl^GFDJ*uAKY52JZ9|#R%G>I7=K<8e_CdS4OGN)7U%+?hBAsEzR(7_A^zpXq{+i?V zv_X`A{_NRWUOr8m(-zkT*7Om95HK=fFFb|`gz^U7w6B}J(dZ;jt&7m71|(L#oV#eX zA~|VtqJ}jHjJ9KLOxv%UH*VLfuV15|ML!RldO#a#%C-r&jT>n@X3Q46tA8dZom)MX zezEQD1pRV(!hW0cLoOU}uF(EkTiBpZa$Mob_6leO2|S6{e9j)(@G@9^)L)M8c$(cX zi}Q%H2REj2olXE(5l3+toIvANP+ULxuZ}1D#^`_kvv2z(Zx@@AH760uorb-44iskh zkgN~E^9ca%I*MtCki4V!3bAB2 zR}?qJkqI-+1>Gan)jhL;1DC)3IAXedvCQ)uEM zl#~Pv?jC7MLIP{D7>+=arwxM&O}e?e%#MC`@2cE#8PEGi&ZV zwYjlVckbS=Z++{Hy7tEP`u*Sibp66F{=52z|IJ%Q_G8swK%^}^9yseo%P*% z&F__u&qMP;-GZhI6CTHoY@p^GTz5cl3}e&DzuBo#%G#%XTfk38@@S_f;H{CPVNHn- z^d9X3SRZwS5;@7YFs=!42xXPa|42Ju_9?bZ-6L;*hd;$#;#$_a2Sh=D}2L9iTJen=OpQ z-~Zwp`2@f=oy?7`O>`>t0FN5ZPGm+o2&pm_iAlvEOIuK&A**jQPLv_J5x|vc=osO% z9`J9}n3<{HxQt?EkuUuc1Rp7Tbf(=YJ&sPWd+BEoJa&6zpU(YIyrUnW=keZF(m2SS zPsFHSg#{TN!FXMO#x8Eiid_^J+6CNtx`w&}&?w8vLyzJK4T!6pjh*sTjcIs)xt5n! z;IH%b%#~-eVo{mgqw!ha*yP%+&Fx{1#&fXU0d@!G9RT+`zD;JQYcM@qb53v(XL@o@ zW~#02liGv!yAbN@ue=&vYy<7!n8uHvx3QBS-9vnh!ZryjoNaf!8521;f|w)<$_E!14ZWP0_AAeffMX zF3n~Tb!;$|wpqsAy>Tn~&!an}b#$0P-9j-J1BO|khp;txxq!>kJ8mFt4?CAIlSiwQ zra!t)J0uOTgW_qt>gdpc)ZH2&uamS>)5zrjemR5uT;AWVXHItNsXC~6!e(b?>X3Hs z)i-X}d%ypC>!<$8e^J-bCpWit>i)LpV1N@1I46jZYk7Y}IfggIk%pe_!Vvj!a^e8` z?Ym0b0hozd@agvvJ$d@vj$6WUc)6C9fx=&%hE4^FPcXd$-rNv9OPRmW*x z9Lon120f70Rs7@PW^t>yJuJ;PrX7Wl5O@j5=>3hKMG%JkznI>LEOTkV@c9lW~O zJcOs+?VqVo3JU>9K#nQ_2$i6^$Wl9tF^XGdoepl-+(#cRChz$o3V!NKunNSisoaEb?c~TcrSKAv|- zv@O78JzIxfy^XyF@27($eX0B-`Bo>9PbM1GYb3N^5ou61G61$$2Tn+mmpl1PCoens zbu`?rli_BK9Bd<7M>V^+Tra-$yXxh4es|4am%1ZvmpqQ?FN!liN96>y$JDlO*&foy zVz*GHdd9_jZBb}jFt9t^%|aFNC%#>1H;3&jKJ0fo@NhufLv;4^+)|B_*S5CA|%!TkJE76s4F&(}Qm`W$iQf!W2pE>_4QHF+n4 z5B3KfU_oHCw-<0OK%4a?f;|?M*P?xIQti}#3|k#OmU(>=5Vsx z6T{RY8E|yqk@h%?53q+O-2p-Q{b%6p2V?=lXj`B#HJHX$Si#O;qHd>a>C7tj1a=lY zbLa%;%v?>cETb0|Yh-r5PALoYRYn)*3MACn%yOL?EVOHC7F*t(m(WSO1KJ%s;YfW? zfZ=3>XX>ijaB@c5Ft$26a6nzBz=vn3O;S-$H5-S{Q|evb!312TEGQR93A`tt`Dv*= z@Ucmp4Qy1u3GLviTGhp=ezssRIbP?^EY`c;`8H_dCm(jON#R9pX=Kg~Ws2btU9`Pt z>wzeNC-vbJeJ^RfeD+LDQTK~aU#|Cl-~;v1kN!Zt|HB`x3*Ys&8eDiP`rq$e@A}D@ z&Gnp`psd-4Pv4fyKCzQIE;e$JZ)l5-kY+B}OTp?@_#lH<@&)lTsf@R-_0y4Y2No20 zL#TT+0b$B!j?hO3M81`6WHgg)ywA?f)+%!Fws*X}-tv|gYH`_x*)!DDU8~~3cEs|8 zZ#;co7vjjvStNlT(6$zLsvUt=o)6HkX>Z^gZLPME!O3yQG~{7gM*5$o$x)oLKD=T3 zv`2qJ`^Z5%HxY;v7X=tMpNUWj`bYjbl|>zz{v2f!-pVAhd_lhHi&MTj-MH>9H|`dC z%8dN&4-b)F+Pba`8|N`}kbQMe#<+j~Zmn&sr_Q$d+w{@49&XmH>vxGq|N1Rgz?}>6 z!5d{dfN#CENi0Lr&f*;V5a35UFzt*o9i=Xe`l$ktcgojW;Ru9g;(Lnvotjy z3ymFO+hJdkx>~NVdK%vdxb;(qzwnE{n$uIVxCERIos>lOMo$`E&3lZt=h^e;pcVaR z+daa^DZ>d_#~)+r6Gu9_fJ4G)=V;H^ujzR$6AiX`-tbpug~@t|7C%ekh80@;x_j?> zz5fI6t-&*w>+swhxr4)2=UEu{F*)7WFoeQ@Fl+Q)KBS6 z^^DK6HoUa9sKm#QkQCHi&E7Rmk1zdwG;g0;iUcPk0hJD3zH56JqtVRRiPn-4{c&G4 z(MfOJ#g}~B;wpF8Q|ck*-reo!x|3{>z)xGnF#`I6$JiVrjtjYr9wc-BS%9_=G} zQW=_1R*(aEaGyN=gw7tiOIfDW)WwCES$LPU=nQoC5MAyu{^~^gi<6V;D^LF0t(}dH z+TYpEj=)Le+C@gn&b|xUK$}&LZW8D68F;b2O-}7IV%JipX+8PNt9^qQ83BK`)4LwC zOdVZ_m5EFzWRzuod(>G9s@G2FyJ{b~*w4O6cKIuZ*+^?b-b+II_^XW zaw*>1!fx`9eo=;-PI0Wl6C0#s+W?{rnTNdMO`5iM^^OP-w%8RYLF+9iX?GaSje*La z`c_!D-h6PDqdkn5m~8hx zUgw8rlgs9m7vr@~iuLJ39=ldD!PHUCZ+eS-)TiJv`Eo@}$-7CM3HY;Z;inS@|LGS- z!a1>@aRGBq`S|nts9!D4&BJ;MMu*4zC$ET&I>GrAb$Uyj7@@b1>I2eQ&n)&f3~#|` z6I`^5#-mJ6(Nvb6#6#W#GqBU12wN7RBS*H^>ekj)wX2n_>OB_0Nq*fq+YXZ{rB$ z%hmXTwBkr8gRs_a$*aaQnJ~!zj=RZ2wzVQZvPb#u8g}9>xTYFGHL$mQ%)>A#M%|Dp z#~wwiJPo4}*D&G}A$g%wJpIbT?aw%;WgD7JN#cRL)N#s(kcS7zNB^E5lOHf!X3D|S zNyVfI?$A%tGZ6^p765!DOoPLDm0NdiOSIbD-m0z5jeHu!KG6d9nR704E>xmU%DA|Z zcCj5OCp5La)X8%x#D5*lEY5$t`e;iW*CjD#YzpeDtg(qqm4}os z>hs34$<4+%-ZPe%=pk}!8B#SrufUZEyfmqhvVk6m$tj*8Yg&)4$)a=2eymQv*5w<1=x9Q?zNdoFE=O)jow6EFC#(UQXNn3xbVa9Z<9lTyyBjw_Q_$8p=IN%+ ztyyWXTO__T%<;_iAJ9VZCNbU~s5clv1qoxjca$v_@8GD(Ow?m*3M_<>F=c8kdEQrh9Ndpav zz9Jr%wvVx9rCp8d5h<=zbT`#-AH&^Mi%CabFqOmMe&QPL6do1jfl6+Z3|Hu_s7L^ZAH=qMF1Kk<`%WH$aIATj169NxtW`G_Ab zcWOo{+i^iS{U}#~Ap*w<>5KjDB@k|O(Og}InJQ*k#c41TprXeRI z<48H_Pr%ri!b?7hrZ5 z>%Ux||KblYVXmO7-jlm8+Z6JHP+xd4_#oR1;%}mzfqC5E)xaMnR)rri-I=ynb&YU?@=gyp~ z3+FG^QFm3kV3IOU z)o>deA5K$(9kZnxOaS|ti?w`ar9SnkPuIf-cWW2iy!q{K*4JNsvtE7WTlL-VdQZLh z)+^8?nb-Y$_vxG=h{*RSICIc=XUko3Rd*h2)j(Xl_;MW$X6lV=SL@Ypy;*x(JN5MW z3pGDEmQM$)fXjVgKK7{2lU=HOQZqB7&~38zHrFWQYRycX)Zz?^o}$`>HN3jD+jf$V zO@kkmemnUf-;Y&4p%eYiS(8^qM!uD=G$>$Jk9M{(0~cfPFQBMFNV8KDqx}k;g&Z-`=aWz2mw&JgzT(^UZpIYz)p^sP)5< z`s(Yi)$f1iYc;XBSc8Sdx^w#;on4Qi(g1g0cpiS8h2~R$fMEmAShOs@9IYH$a>_EeB!NcLbN=YWRhzFIdn|fMJ5x3;RQ~?NI&`S z#KF*`|MzK2_TWQ0_UOuSe{```z{8OE1>b&s;$c7SIVJv|mR#ws(8a-6!A#_;Q>w zm%7+S-gZHBOir|K3=d|J{c(5Vx#MmcJWSQrE)6>}eSdwszWv%Y(l!Gj6WKN(xYo%4 z-{8EpbNa$FaM>vbvJoJ$37X>Hj8ETeYV;6O8j3MKaCY8DX@8-wgYo*<`#)SSKL1kPxOuxa?r%XKXfvekcLB=5K72cdemP(6 z`rjBrFRJBWf;dxeWyP!I&>m!UsT~*~)A%F;<=a+RU+Snq;?5*^ht`63>TuL{YZ_g= zO%A5&aBl~`+^pG&qgp^Hrw?~)c4wo`4L9m-2Rn6zI-P?L{jTCe0J^rhSu;;9*U$Z* z{*Sc=Y_~SH>zmhZ)hM!WzV;0qs7%Kd0tlb&7Y!!#f}d&oq?grjahP<`z8QhelhZ3e z3!QAc(3#pR6STYee!OkAZLifvkJ{0)j&`=>EAfP925Qt9{VGy*M;2vj3usI5-(&3s z^ft@}{3z#ikPf zgg5b*rrbOlG0*aveA_0Zvj{KzN_~*^QFn}zSEIRie)tE5@3`b!vWo2@?!jD%Xv0q;vs{pKnkmtcMj(nLV?(D<3^_s4gen5S_FS9e(#SpRa%Xt3O}U19a|E zn-tqSz~+Mo{WjUD6Le&FLDgaL~!H zYi#}uY&S<)@UeMh*Ag=1z>W4XuxEgXva~y_uL}{iNtH1Y1`bbiLYyAeIUQyA#Kld@ znwK5BaT*}oB~!V;%{m|3)~EyFBW>O+t~Ewra*&AliWGGgCI?C#Y_qfJu5~BjJV7Ws z;(<@zw*>ns!spS_6H~}Hbyp!AP*jcsob-Eg*gx~j@XG@5`AMg7Y@dVd2;K#Mmg7;g zerjb3-a2KR&_P(JrxRtAsZJp(@6)5Z#FxQM_#Dp&M4u_cew-)TH$|3)6&3*WcJ zx(=>4J$@V=NIvQ$!^yavL}C5plVlnhKLGYUzc1@T1BhjtCr-2L(Md9JazMB+I+ipA}Yj=CM{^igA%laSx z=|8W}eCZ3=(3k4%?|g54@5jEs{?yO?T>a=z{)u|u2R~9TzvG?tUGI8#z5fFrst>;J zBiI~EEZ8o#GTH}-RsMm?mR+^1ZX#2bCe~!BgV7dj(Twns4BD3IEVrpkdvLj@= z?Iw`l956w~2|GZ>jEj2UoeTOn(0riIg5NTr2cDp;!V<719XWW5z6uZcNt|p8sIfKU9d)Nu~ zd8LJR`Y~~iw9DX;eahW|jyq;EJyj?4m5!m&sVNr(EY%p-WIoHaSR*swmhcnwi6j)*sbV7f#pv-t#u->rv6$$nOx|oFLCB@QQuv zL=ybz7(x1bPC^5lcn57+_AL#&;dTHCKkBP0mlvOC_2>s%smwxOe3kArhAE38i?1GA^nL*%ik(T;ycen#P zD&wPd^?LgZg7D__Mda@Q9&vHZEVA#w;URsDot?Gm#c?X;1-o1Nu=G>Wj z$4f8d^uLK|`^_GyY(D{dL+~*=jmmk(Uf?O~m<2)B%Y{Gi|EW>iJZQ+3aU*p#q)i`> z`n7tSUnjftIn&l6_q}ZxSTb;2&F>Za}8|dRes5I0spR%HU88jw-(_7>T z%ua}C2WUg<>$o3iseRgu2W#uKdhvW+`M`&3blZg%^y$=t=rHkMy-xXgEb8s24Lu=G zdfAS~mPr-JH};2muC;fqpCXG2E$hz5U3PHvysfX=j%C_A#dvPTW!Iw|=IZYDMD|bG~A7(60 z9sy@0ulCmbxk%sig=KX2C*D(ct77@~t$1l;tzO8K^)j3?tygdf4Kf))xMNQ|nn5|6 zaJ(bm4FRpeddda_`8&4Xx%-g5>3$t-uK});=sU>qIPJ|$jnL+J#C}d+LWk|rM;s0} zNlIUqGEw7^87HE(i`C2Heqzb&;nCsnu(s$!I?il^W?$dU1MZ;RSicX9F5aaty10;I zw;hWhGI>E0>eX~1Z4=dy$E4}OG5risHR)o{aj=Nqu+87xe284rCQTz-pvfcP+i8L1 z176+LJHSq~U*+VYeO6(#-{@bh#RKh5KbMy8e55H<3~ z#++F|Z_zKJ&f49X2%&v;>_Xe4@ijW(P@Sdzu}=yuv_(Q!c~4n1J#moNoB}KD$*;)+ zZ`|8>hx{g=HytJKiZgjmedQuslVl;O{)U6zk~gqFQlxShei6TxLwxi~n9H`yyMA3c z=G~XkWUJ$KpV2{v=FWrqL4WfO0m(1pv6g4Hz5J6Z|B*jGq1^IRzLf{+VBd^%jzvX2 zwCf@(VupA1GxsKY!eV~HXY|x4zsN;?sd)Iqwz{Q-M(}5LhogI%E@}1dPdtto6W(-- z7|_m_X3z>h$LW%_Wde(1;VDND`ZCGLevehR|KzvowJU8=(pSx*L&EX1Jm+XKPvBUn z^&||I4$ou(s&Yqu>RfpxeLi&)@Yue#x{+4AyTQaXz-^hrFYJ!JN+ZVz#m*S^fQ!hY zE9nC|4(mKdtA{8y4_stX84H9jr zBmL|jIgaGXAjh%OmKW%oAdB{^C(w1PXIIe|F4zMO`v;WZ9Nut$H+sU8t;DI&+fGLA zz;|Q@-SS9IWdeCtIi`j29D4-V^47^s_&s^(XXK=y3gEyWVZmJ&*S1?{ zvaWI9>fjO{=q*=7c0f{2Fw))zu6Ai-%1`JdWzjZF*)*HLOj`17It8y>)>FuYhq#eX zkm8s=Y4dq?uGl5y?Q_Dduj%zm+2k*X0ofY)OnG>;jUCpp>5XIVZN5?Xl1YOVT&5lt zAs7NEUj-M|xup}Q!L12>M_;b zxOCcT$LM$AA?Iy(k&pe_8Sd8F#x`x{ZWcj%%GJ`+Y|YO*4@NC5AN<TBPw z8#ix)J9q!n#$&5`qLvGTteU5HSUu&;&#&1ZQP(>zH#zQUe4e~Bof9iaF{XX&>NWsx3+)!c{x^;$$#u*+jAEKSQp2%5-*p$C{4gy z^q~5<7kM7XSkGEd-X4LN}D;-tM3{_cpgqVD+5pp0el?!cgv{;tXwRqp0@pvQ^&d;-7< zlRsc}h!9b*E*>OF9{pm1H1^gb0Q#I;oc{7gCZS=d2#(0n69n36N~4cY6f=*qR}_x# zhHzJDCXEbB{>j6%q`1UUgH7YFGIJ%A)_;s>~<&~4#J5aM-le3ub2C@yHhHVG}# zJOVq<O;mAyS=5;@J_b!aLM zp`8qGgAy52-VeglGlMB;a$IN6t!5x(dyUSQ3KyBoZWL%F&)KW+vk9(H_UJJO!5#cN z7^;WJlkmL*j5F}!&U6PMpLyz;njXy6ZSd@_l*y^Z@>@*TZ{DiUeBrZzGX_rPa%`$? znS&~Az}X2Dk6xj%A|I9anw>T}4s-MK_1w!Z#EyRbwKr=>U86e@Tau=3m_fBqhj5qY zboP(OQS9&#Oy{xa?h=}YrsL`zdC53n%MOYfTu z`sR(g>ybM&PAA8c_3Gha7I#L9r*w`w zd9`jCROc^nD*N2C2!LlW6aLURbrqm~@(RqAhSd0$pWf%tv9aI);67~ z(W9)`7WXcLiQXgG+CLOsqg*JN9z8)yXE(Wk1~-)J5HM>41P}bBzxvl|ZU3+yYz^z)=60Q$8UWEK z`egwB!fUiUt?hEa7TrxJNoZQTOibD{;nfgK=RqS}S)e@`b24XSijE|Rz)yHJ49BRl zvS!C|j5M-T21$|IP~O2$X$&ONCksPx^1C>0HVT;-9z2Lke&V$AZ*~OFA%3H^&#e;y z3gIJz_)EH;zj_xE_)i^W0oGIgz%Qo|$X{JNiP^_*10I2z@z0Y znDG;*39~>P%~L-K@hBh52yaBw;@WE}eq#)}? z{;|AUvsjyLT-LkIhcf1z-587uvi0qF)k|hwt9|cln4|&_+3U zWFNy1z3XZAoflDnji=nzVP5jEyCCGD;qHEIZf$2JwgZlM&!xp4bY$PsT2Pm zF>k*_`4Sd?GH@0@*oPo*%asT(rp&+M`_HOq3^` z_CTw5Y!{zRVHwgyJOY=xPm=1H$u`~{WN{}a58>k+txg+}6HzF~$?Ffp4z{`~h2e`$$G`n1WPJ+;hqZBS4ivqcg2fR9c+<-itggcGF33I5=3>!hvI z@FFjn6dgfcPF#f_v@^^DfBif%*WEIfNiU#|J&6f?oV#$b&YwS9+dJ3>HZH*F4!n)^ z4Ro^$3Bdz?6W}$n5?*t1Oq&`!fosdK?H`}kj++7&Cjfy3|MBqwVmV%W>M&$~uy`7} z&(!_>?&k?oV)cgo`T?(U*Y^UMICKi`oLsphj|x9tejPCk?gpOdc9KF{)j`jdq6yMFz> z@qzxPGvAiJg|&QQ6Lj&#Ngw8zLqBNYsSMe9h1<8z$`G9A&Z}6T+{FP^&)quj5#Ism zy@S2z>OF9BKwmL96DC0RsC5XPHd-5uMid^J%IRQE>Zg|n9O{)R_+pZHbJ%}#&{qVz z@YW+EkKqsd4O9bGY5MdOdU_1|U<-S~PnEoW?`B=!$5zFLJ|v$#^qb#acIW5(>~y{R zoo}nfr8)ezYKU#CYxFlH zq-K}r5#tT`WUUUiu!*rpT|Bmnoq9q)D=jK@SC6^V)91)Mc#FJ)80EF?CUsCvtV=6- zK%Lg~hnI;X`(1kZpy?Ic5j8`zNr{=um|4_Jx)#4(S#Tl~NKfcyzrdrdhv4UM$8U7+ zfd`MLp3doaPhGxT&p!WLT{v^LrsuG!FqYKkL-d}LO0kF3iL?pAXFtn9H=X76_w@a7 z(dood9`(x@LtsLl#6NY)0s~RhT{?A#;~Y- z+0X-?80Y@tm1hazXIp_T%q~1^VcQDn((|RX5yrfu_eq*(LG9xSVL+Fsm2>PudEAMZ zoN_k;9FYCpKlaf&UcX106Z8*lnEegwZ9A;gtMG8zr?wyFsojoGD?fvIme!RRxD4kl z-u%nE^g(v;Z1reyIw)JTF@iWo*8tHD4d`3n98&3KV|g^W5+&4~lDU+Va|y z0a*S~$M7ueh~q-p)_?8lEsSJ*N?tnJtD`;a_WkyG##3BJ=;fnBb&K}A8wJRVG&#}D zNp2TTxp4px4&fzrl6KG00dnp*4fcbl`cWF?G=4hY|97{tsX;ikT}H4icCm9EAIhSKLl-Yaqud!~j%7}HBHsh4$#dCPYOzi{uOyp!4XQ5;vU6OHo zPe%Ti;k;un8PduS4)BAAu>!XM1P^`sR?cH5FF*dJ|HLg($h-NOM)~dY=<@C#QcmX~ zu-#+e@Y(cw2p{3;Q>PHUT6u}sBDZ(q^$Ro;Mg3tN3AZjvqcD2nk@_J1-htVLf=NdN zU255oFl{hM3ek3pFGl8U0ErYc(F-M7h*Nq^^<>I%^yE;reF9> z_?oxQiNnBB#16ueF0>!y=o?GhlXjXmQN5 zEmqe~q1|y)Dl$naslW73KUUpmIzz~(otVmdR~KFTFYdfqhW3p%hxO|5Czcy-O|MSK z0s-o7J%rgBcv5rd1N_=Cj*n_v_zA4VRrD9JMS&-#Ip>-(p!2xnMcThzzahCgtk668 z5dbqmZdeKiqRi3oNIlzK_zW^$=YsS0-s9PL&9!^;Zm>5#1h=ioxF)Ci8%JKyx8)o9 z2@dxZt1i?7*A~YJQ+p50xpO6NG9r=fPZkd2pP1@{M}>nI{M8NOztPPngZljPzm@NE z+CTlsn<9F?F_qbxUmW&`logIF#!h_7Af7qv=ac*xJ_v9PPF=%eOQ$q7{F|onUGi(- z<{#M0a~2BuCfyU*r?UY3AL;WTI?ldbi;16gGB5nJtL)GFY0R~Un>htwb91|GQe9rk zr=RKs?cD^c?r-BE0gUpTGZVwk4+2N*H0tYf;xsxwqF>+zR%IPIQ0|-`_EUk* zX(9f#P56Tpkhdfhz~L?ZmqfAjj$BzehxAqF;_U>ngTS!B);={Zm2m zbHP9s3v~G$9tw}0IiCeafg@Y;_TB_p7U*CE$i^u66GsMi=R!jU(c)T1GW-+&$Bboe z*-eCN{=7p_SJbIwd{+j-8$2Wp&o*FfHA|-!y8LA#cTT(7tr;l|2${lgGc=7a;#6Tu zL$2mXdV`vA)EFLyigDKnZ6wRc1V3^5xWCR!2MAI^&>cSf**v33WUcIszW*puFiKt z(ZN`S(m~AK4V2W*CY@NniE~ijdgWX7kN@#MumASH`+wBG{P}-SUw`F`wZ3tWd|c57 zJfjZUIEXWkQd{PEI!eG5yTaG_l{@)+1n$gW9)&T%^B~_Mbp@_Q_`Kgtq$A-s`27~Y z6ZtDED>*LZj1&Fm&()>#7wW==i*?TX#S3-u;>CLUsi*3~rRL`#l4<6b7ISR5N5fA~ zy35E_eHbou4E$!fa5?y+th*2g;e%fC9Q5JU=AdK!xJ+nwG}8caMPrN_B2S>`CNUDH zP=*)IpFt+*v}|pJhh0pdjP#07dGK3?@{WEE$f!)DyDK1-cUKz=hz689106KHeC4Tn z+siN3@BP82>h`U>wFIs_s?7x&zxYePT+556>q8&>KwUU{uFjk}UmF{1b@%SQ(A(1- z_J-&tSG-Z)5I%5)-e74NoUdj$fKsf zTd$iB)@x_)xaMeNr>E!Y!TNf=_S*H@+TP91DE*x{c(}O{-J9>ZLK~zKI@=Mp)7#`S z4HNLU<_U0tH@n7>8CRuybY8#sC~<(@)W|G?sJu-T5r;L1%jO@67Bp#>1fYTpX~slQP}W6XP(3KGbx@E&6{H zIb1{z78jT4JB((sAiIC2X3@cuIchb#CQ)vKh54FYSgLu95*5aqw{O*b@TRG~fxOrr z?zk9+ih3lopB8XOnMYQSd-?@3<~P5m=NI#>Z*?!|qry(?!u3uDyF(Z_2k`D5VXwUQ zdL3=;0P;~y&J09d)0wT|3je3GV|!w=K>gKf@>FzhNWwivVdJI9t$DPd=4_juyvLcI z3z{g`ZD$KS=irz$&*G?KOe-hWhuH;va=23;|L_OvrDvb5Km7gwP`m5f_5Sz2hcqX3 z?b=m%afoF!6|JG#Mi?Z>P91Mk?`?SY5^n$YGY zZnarjC)YNdP|>SSpv-2g#J9b3hq{D=DeI7Q4mxpnrF|E_ttxMMm_mm3$*{)*4zybV zU)#b^L>%2dKDkm?-u~Xm`|%E3a0s4hQ?!vh;%kIP+Z{xwsBadXKm!L4V(TfR*0*Z| z)tk2SN9dV7!tOoX;CZZm^>==+zWTSx29A-y$?1%>;`V)sT zdFk*64aILGwu$54=NEp6U&CXnov6Nhf9zTuVUlNj?*bCn^14nQmkZ3a^UQCH&Y1ey zy3H=2ug`%iU`3&`uq|XQ@l3Ir!$l#<;uK zi7Wk;{YWR35-rw8y2uw9*b?8i8_EW>a1c7YVr+{A=cyO?FkPd&Z80&(HaG{~c0$Gu z?X?r$$^lYnvFZnP0x}_d@~ix3A1@P-#1Ri|vi<;@5WI1pgwBHIj!SpeI2f-zKY=~A z0AH+N-)gga#PrSUS91)XgV6(QX+M#5kjd3HF>0ONmFI%moKDcu8%o7r9Jvt0(>XSG z59aJYb8^7OjH|E(fse`Hh1BaE= z@!ZEwKFQR4T6ohZ?Rp&6$IVmZO2Cw@$~5Wp^H7Hq&D@Srh1%ab#zjwrC!^e04Ba3)pbvihWKW1X>M; z^pl8VJEZO|Y-SCyWe2a|!`JUVtT(pr)jGU&h-~h_GupD2H8(p|&pv&=p1O1no;#r* zx!vqsbjG2&0vH`Ya#6a2WG;g7(=ac;^kV(VpZe+g;Xn14>&&IMLbe5KRR~yZVvh=o zJ3iG}%B{P~?0e{9D^M?yBF`wRV1N+usNQOQIB_msBa75E3x9|!t?ZAbA1)pI2hL)9 zr6UauNQ33m;#;VAwH)vg9Tq*2fDBquRMR(6+EJwD~ar|DX` ze5sy&<_i7YGlbE1!g>gH;N)U`_(Sik3zwhDfGSZ0 zhkx)laji?zqvLcLOR^$jTANj2u`cxQ^lp<)T#>cz+1Lu47cRCJg`^K+KEaW7lhAEf zp|q-G4lTCSv|?K<)ymTvkxBIFe$H2@S`vvayw>@z%Kl+rqWT!j$j}0jrjEk)Mc};i8PYhBAPIDkhzc|^Ug zE#tSmwf(a3g2=WH_8XlzaUvs4PlM+a` zn~fRB2YOGUjCVRVa>5UggWASB9bXfsYkmYuT!I{ri>}3A4vI^7yYrOraIQA09`Q8} zh3DcqhNfi^PrMjxS@>mw4v&@;c@=*?=QY0Zi0rDfdzeYlY!;sI%jsh+7t*yli{E5V zJ|=H|El!-Cfz|TKgLo}}aoWlWWcu^m$^#d?`2N^+5L?)j_vFv3<#u`W??$m-(E#*r z2+z_YaOy+`<%vH@!>RO7r3+VPCCh}{EvV(eD(Ft&190)SQd+?<24}NjB5+H z+W3xrMyYF2W~a=?HlPBYWMkUk$t0H4)mvQJhqP>~m!k6k3R0ecXhV>@D{g6Tm9;Di z55mot)E#UVkcF_~EcHqaTD&;$VKj>dSU272N8%RGEOE9?<|hu#Mli&4BH&@&gae~1 zV@}R9JcTV`J}x$Vu<eP(pg>yll|uO!|)0iRt*^pcPL}Y-*EJ@_1AWZ zGmi#s^i_cB>&$E5)ZaF$(c0e#@?+!H;5BcP@}8;!A%Bgx4A;-REK7LJpS&!TwEhBH z1l5ShK!RHS<`A7ldefS{NeweyXiYiEk1OpYIi&yZ*d~3VEY#j{5x{XhSliA#gt(uZ zTc{=aQtr5yR+9ra#?p>~19@3XLH-#%H4)i-w2;IT3WKIVC#XDf7kr+Z+*%9kh1}JC z@T=^2nwAS}x3+fcwb!rL8`s`|ulKSMP#ZqG_SJbd7{K4i;hj98--L28K2j6YqviMh zrzapv%|w9)Oq*FB0Ggn@awF^aF@=H zc_zLUCOs8vCkF6Ng^vzw0LWwCN?WFgiak6_lCc2+oz2(WI1S=GVg7-8##Bx+lXLJO~oPsj%j|Z}?l~49JgB2N*Sr z0$|qcQrBM?iI;TZDESC~6Nu!I3&ioxL*Q+3xG>bLP82^^YmAI1GfN`}AACl^X>8LO zmQP$|>p-cKm!1HyFn_V0d*_dm&m_DgW0Q{q`LK~Y55!i`#(U@ryw=6z@Pbo$K^{T1l`48c< zsyUBkot&$g`6ZOqY|SsN)bz|;Chuq6ktGgOf9eZNeph5}b|$9)wN|rx z|8$*kmp%=#JN8enEZ55NO0AOT3Vvr+dCq;6uocp-5chPBH(x<<&J0jMD3wW+4$O~X zzaJyR!AmC(JmJ93wYwc0Tu>I3RjHr@MBbr;1Gn5oUKBjMC4Hn-mFtE0)wZwM_=kE7u(M#Lt0HNz)zM2>bE)Da$&*7Ji4HXK~b=M7Mr!{I@8e6)td zfoX_}Fgo!4{BtkV8F2K?Z+<)9#&wtG3>`1Ox%Q>SPFZ%&rs0qBsH1b%?BHg>n`f#2i=-W>;Jb8Xl5 z;cg9&kul_fR5b(_F8|m^F!tf?12DNy`?E`(U6`{4pTGLeSL$G^4F*ilOsBJc0-l`k zk`C@5k`~gS?M9Uite=;)^-`W&d?=W-qu?*}BP`#?Hjlh}Z}IH>Jc_TPGK~BkIvmqz zl|OCYp~tBF2V7^-S08=f_tYD2yivdU?|-efclK&_ez9J7;jOj4J*@j1yBUN$+9Msb zIe@o3qIzV?-IW+|$Vk337rqfjDP={0#JbrYS}Ei`O@(2=mv^Uaz{eQ%ABF!XfM=3x zVsE!*pux;=rE95F9Uj@R69Sc_!hUM&r=1N89yvE#b4J5q1`$oJH{zxRjgA#pa4 zn`RrxcHqRp-A*PnFrp9917M0D?TSJ|TqF?L9HE*H)Ph;s7SX0W*~U9ajaq=#b2(bk z)T8jTGNaC$7;BS(el{Q-?{rYXfbA@K$mh1bY1e>D-hV{907s4*cik^8_-t=62$ff@ zL!sqn2PC`XNt;O4nY)j@Q!mHROPGP`d!EBnqz^v0^WOhlWea$ngX`3Laiqb?YuX*L z8-Pn&LO7Cr`^R6)AngKi?T-X>V6}`qiGbA4_{x2NOjw-J%r6g?WyegN0UL*g$l)P& z2K4c>ODm_JtJP;;PTRS^v4b8ujO^HP$*z!&|E+X8tz?gStEVA~exx&w9ktlS+RX^j zKFEH!wOx1D*Xx&l<2UQp{RjEBw}UCh%aQ!>v)`Z}&GG8Hb@uc^{bxV$A!Ka1cD5hX z4(ZjI?yi&O+O)A}Q(t(cBV24R^3px(#1a4&e!;F-(A>$p!K5s!Uz+@61vhwW12Ao(%az#l+EcVyY#Il-M>AoM?m*cmRY^mx0yU60~M{?Ub*D-~c}u;1{d)aR|NNiU zg9jV+@sEDI{?woTbM@n&_~ZFreHLz#_uj6yw2RfXXXvw#N%$4#X+G}owvD@eo4&*i z?8>!`TH9F5z=J#a{rpwyn?Rqg;=c`QnJ$=0-nvJ61DUc28BH}$M+1H1g<_#AMf#fG;J2LOT4~c zAa7Bn&go}-{l$rO_SJjSTg{f~{e5q|FPhRsK!;{GDYWWm`YHIEm$r?c9h-y~?O{xS zGzx@7SK^w@87LIyzy#1fdAZ4I)V;!$EFLv zNm2{gr!T$qO!!i}cNe?SV^kgB8i#l1W@hu*wW;ZudgjVA_2WPO6ZNw{_m}G3AOFEx zJa?Y9WgY0YYG>n479bC?)t$@9@t7_mBF~Xy7m!KuuC6?bE*H`$0~s4Z*HD4j+ww{V z=H!>?8!qswjp*)`wo^q!sb?SS27g&Z1s>V}bZc*PJump*XY-U+k|v9?!rPCTQ9U-8 za)+&T!EPu1eol#USAOd+9MEqO?m7C?=PuIsfBt#e#nlW*J3*Bbze4C%ru~UbL<4v9 zxuxUpCj`_t>IcU;rsylSb`*QoSios|^^gmL1kidXPx~kQ?3enPJ@u3emtDkTn3Hv_ zolsUC@S6-w?qKrMVp)_dz6n2TJ;_Ci`TN8XV2 z{Dm|1wijQhw>=w7VAr*F%lG^bJmeJF7e?hE zup3HLb&^?#78ltd-FRc~VRyatrRQt*>~c|H9nydBsDJC4#SP}k|2Eb~394U7njQ?qf?40OPuMZpl`cIIYf)?p=DdeS@Qq{6{$9?u&X9wRt41 z_|b>FnutnQ9(;zrk#(;0_kB;>r=N1J9x$EqP6Dz``@$7n?lXL00e!tmWnp@B?_AL< zCg2K<nLuvLVN^$uB1of)i!e(+F%k_UKzngDgT9_wcW_ z(f*#Yee}qVw4RQy^>(0ku%F}0#sR<<2m9ogMd_V9x#Q7A0Ilx$*-%PL@=#ZJl(CC$ zhC9H3{7hhriQ|kjQ%!B4Mi&V~8t&Tn1}dvfUYkzMG1?F^C+QjAv@Ln?DGS6uS@F*> z?(L`U;zGvyvRVZHQy#^ruUkI&+eRdiOpm3Vun>y3{FwE*{JSt@#yV!vK@W&QmXBZZ{0`Tp>xH<#6Sv_NFY6(3pKyuR z<=;QITgyLk8jpB#39CRnEh71W6xoZTLPk8>d z_x>5zs@rhc@G7pv6ZsfTd~xsl2nIdo4fF5eFmiK|GUdXw@(eC0jcd?_HlkAk$0;9H z)pCKy^m_ZwLqE0^elP$&YenOqs^=x;iVE$oUY|rx%sX{!?}8J5^krnpyh)d~xfPiF zvIxU8=yzcdH?7`Xng#^^t%AuHKhyLsA^h|6Z}toLi!+9C6H~b{+b&FiTUqVRIm48} zds|RQ7`NEkht5fzWKKt(T-(47#{NAbPd6Q`F3r`<+%#=rn>!t@ zB!zVzUyEvWYw_H8)~9Iv(k-^8ZuaGzYw$BRZY;ig=U#pDTi>diH*Z7-I7jD(4Uh6~ z3u#FoR#KmJZ9R84(mMC$2>{ybliC_n1D<)OZH)bne=hrirXyeNir`CHY4ZSUYzb=C z%a_1A|EZnTPT?1BTKnJ;%hfP&=@^pLP8+}j)DTb+ka~x1T)~Yv^zRXxPTd&&^MB&4 zpQN~p)Q;b2y>KBr!?`7f&@v*y2uKV2r!TA(#O9Ea(9^@?+#*R5+0A1wv`z{#Y3ec zqwth_6m^%Moe?{9Vdta=LX1RR!ZG+HQiH28jsdOlYz$-v*NKSaZ)P1cIrxyqE>yN$ z8>}|SX}x^*IQ00Pq7M26X5(<7=$%Y=z&Q$yuqb;6%c*me4CTZy4l}oU^?C3=>CE4- z;2Yda2dirhGVm`kWRjki1cN@x1|AfEgBKq2=y!HJD(ZN2ratoFAFY$g^OQhc$#!(a z?~}X3$~;Ap2A+H@^Aqkr8~G4TWhBqYBg7#W@1`Bc@HK&*@jG|#*2A?8I-~pbPyflk ztiS#@{%`dgzw__v_U&t=IjqUC*_xSe-*|*$Nk>l99dt!uWM`mNrU5dJgFPPO=TRr; z&MeoZ?A||9=T=th+-dG-&tyXFJRRe+XU^35vuBCN`?)jO`G5B8YMogWjZDngK}yj*o^}d@xO2r%;NQE}o?B;yEyz_W#fA= zi*XX;O8DS+cY^I5(m>$fzQ5}+WUk7Zq@kFtOP4QWpq#C*eCaFDZ74hBNSXDuwfe%B zf3L2-d8@8Gb-BLl?Jw1Z%NJ^A^AOtJYde_fB<^pm*C_hOiJn8?_auo^!ao?)(z(<1 z=8c77(NFY#Gpn8(U5eshGioCk9|#xq1wZ5WRSE zsv9T+UDn4zY$wp2P#p({;{c!U^<}p|7q~hhEZ@Z%gKzUKUJX=viaY);An-eN`@^k# zAOD%l=fh957<7sc>cO4+fz#pe6FP14lklLYP0&br+><)=<#)VP*KS{r{v|ik!YAbI z3OIF?Wx8ug+DT6rC%8aB7_z7Vf%SXhS)u5p0J3N&+Rq?3>0dbl`5=n`{4PAb?XJ;) zC(!$LQjPCq_K5WN4w}4j^tOC!dE!#$qby#zpJX-R!O?cjFD}>0>S}G;>84S4f||wx zrU!TAm&TFKtH<`{lo9HZ)rH99A$;a32MUQR+K=c!FP}SGPrq=bmM)#Hk-4ebLoT)s zJQg0l9iUsek4%o@w_9sF`?XD-cd5e`b=x}JtqlkCpvMkv*(P<~@EKekQwOgh?cWyg z`Tg-Z==JdaT7ByqU(E!DtBw8cpHge*LOuY={2T<4{&u(l7rfZ{)UU-+n<<*!rJFSC z8bkDu?_s|9SzUg}bZBSV=&+XFovhN?$#v60!0cw7ApB^!ZA)73d(XS-*=L@u-~G+s zs%uxTg`d=&Q?qk*>8VS#Pd<09-KcU<+*1-z7E2c{*2K~(yavtr*(o{(hZ$&);+8LP z)}hnmw#VP$Mdo!sdj*E zjX&?6gpoo1(Fx>-cj)dQ+4$5{&7iv{re}hKA$h|Nu&O(cuzQdTKLL|AkplG34s%N* zU(55j;^=h|k%^WLZ_0(vccb$tUB#nL*xF>Ue%x(rC3*{N>5{hP?Uj_)J$cB5@B{T# zrhJx$DTCm2-UXF-I@^@^z&1;|TlnL01B=fcU)Z*_${B8}C2wd&Xrt*z zm>p>m7Jq)EHS5%Y^2L|H*#UM(jh&7WzcVX0k51O}FTT5`RxZ`a-eK);!?Op-SxO`Z zb#t=8&jG}q(CCH+mgnF>`!owY?PP*HO`0~eJet!a9&B#ZH{W=(e(U!?O-K8%7HGdu zA?Mo2%Ca3JaX2x7ZiSDpJbku)`V&7IeXza#Fvm!HM3{p`)-{VF559 zf_+r?l1XS=8g}~rZSy*v`gHzF#U0%wPE6w=SFH=7v8x(`XeXolnr{& z-`GgpO>4WJqu!7YcWBLL*Z;-3{LJ2*1XZF|n$$)>&SP3W{s zF|=ps`r*z_266rF@h*I1M^ig&(%oOw&xH=^ebFTvW6@Vu0h$WWtU2Izwe>A+oG!83pZr-=8@3URlx`PcdJ9Qum8@RD@4 z&4p171HLS(NGI(E*s7gII4 zHQ!7MQS|-}_Wv$8b|L2OP8KdY2%XatuwzJBV~g<1@@YEl%e8j@ZVk6LtO#jbxQQOM z8szP2e*03|UPI`nzsKuN%?`o?N6^JS=fw1&j`5#cSgzmx-A~oe|Kh){OJ^?D$3Fgp z^@*SPnR@wc?*d-)l?O(kmAHZb#2!3gxg$1|;<6DSHplq*sk(dXc3r=Ay{_H5S=X<= zNs9f*zk@s35#)kSd>yRFWC}R-E^nr-OqlQ9$={@5fKGOO)DfQKBHsYQjoIYYh{PAy zhD0aem#|KTl2L$9n&z5cbmjGxJo&6$ATs*#%D>UAUD6xyXxlvo@mxJL+)Zbn^|pvn&XeBP@*z^K5rp?(S z4SHnj;RATYg)i8}$bpkQj%jGRYC|7ld)c0fz4Z6FLObytOhT8mS=2$^5eD+|l*N%_ zYtM`Ig>l@e5?w`Ios>GEy^&t&Lqn+KFaGg!B1k7qdS^6JrEGr*Qx}AG*x2e8%E~dB z6VhDxfIV3>Sqe{}r^k`=s5YGOr)Q_Z<(Yc!EzhTKtj%=i-rd?lKco*!>cPxx;7{M1 z`wlv}EqVu^;M*Otp6aG9IKW=-$4c6yrfrfFXOM{Ww&H1Lcyf%z-R*uC*PUo@A&-3J*DvmI~;uKh-B zWXIBkMY$XqKcVkPiq97S0RQw!L_t&w*5Rwg({r?w*e=I=S=jDkiS*CGt2#X6HM9@0 zm4LY|-cWl;e?aNM)RxmDSMNZ!`oWuRkH}9H$8y451`x$(2LK36a?7(Xg?*){DIe&` zoG!OPpJ8IA{@Ks}GkEo+=1-rl@#$G?o4qE(@RS@iIgV^$i=U@Ybm`os+BF@SyEt5a zh^-}oC^kB=*F&l%;l?$76Q;&*I{B$B6axCd*l@6aw1?hDp91s9$YH(k!S~e(d~vXK zpY-$xEJOW{&d8*Ha=_j~XA;zvAzlIi&i8irK`;Vb2SV z34i(7kfI1;Bm#;`|jG`cd-z*P#dqF$oR?0h&IG%?GAVFrw^fh$@0YQ0Z7<3|ec`G)lb zK*o|kycs8fmWfBl_koK>IQqm10OHG+4G!aTnN|L^D;i3e%@2HppDo0zYoPMq@c)Dp z)Rwm}Tb?l5f0fuqNVL|S559C={(YdX71A!?Y*lW!5-YuXQYRRvEy%Hs?P@f!4qOzP z@ebl8595f?Jetlw{^69X*l3pB`DLLsX?KRL*M(^TB zJY~QtNY$2?bRf(sNHd>PJ z>-;F5;lQ1~N%#K9HNQ3{CLXL+b8n4i15eP=DawYj!Yi;n#)&DS*cy9;f#BRy4DN~g~u zf;xP*tOb@fR6OUM)=F4{Tf*ddF6G7gIgM?e#ZmgErYR z{0!GPw)PbL1pAJS7wKo(UXZh1t*v$E|EbvJ_Q~v1w8?%q8@K?#PgGA4P8&b&yp?UU zIfyTDl-#0^Y>#@`&jpgjRW?ZkZpSVhzv3b+i=@HNp<`L<62r`cw2en3ReSDT@MYQl zZ*gvn{+Ula|4EFZi%Cr8iN>=^&v!G74i$wc4uRCQ34Y@OgmFycGgR}qekbnp{eI(N z7`f8%@K9sMXNiNcZ&;L(W`>nDU(+iQadSv&r18Tx+xFrZ1#aL!8)03`$m&IE>!4*FVFWm?XY{tnd z7W^`3pf_b1n8wb0(y&lH6kcFz1%%-f?&V{rU^2O%pf1dpIPqz!hF5cF#!{!4@JVn| zi%XP(BQXjpnM8nz?ttT-D_ppyXXgl?tcw?3tkvf}mb5kwCx;^#{0F25_Xrn2lXojy z`lsF&#dF)CtN?KrcJkLamM$s+C#t{s%B%IQS6{E|*KgM^{Oez=zxsduztr#i-f!l+ zo|BWawXm>KGgEW5u(%9wBb4CJRX}z$TzTu!{Yy(LG@@rSNq_$Q`3&q_y7W|CxqP{v zx^yAO;-6VwuCp|(ON;Z_&9JhxP^&rKe>u-9OO&xdd2{nMKRc+IfxEToOhXfQMSBdG zmlNpjKyY;)HESIh6q`pRdX!@ZhER4Mb(q}+$Y72NQ=yrMk?R}VOiPQ+Ipw=?8qu!A5ogUOoZI&*rZ=FY9w-uhatKU@btBPe}8H6Z`lp@2@} z-DBH;$xesw9*Ld-6O^b1my;;cNrfJnq2LVKQ13PJ_8VyLe%JTZ1f7_#eD$l~za8r~ z3%`ty4eHvBTlFvgmw!d4>7bsz_!MQ2)R}YV^S$Bu!E8+FrzV(-7MuX}sJq>@wYqcT zdRC&HKYcbkjh>>uF1#}7^wdJWCptOq!24Lu!&{3B;0V6j1%?~9Z=^1Pk-Fs=e-pZr zJ_At74sr&3eug05`aN+Mk?<;8PS6l$r`R~)(%tJ2D&d|2a4PWAtw(N<+=c^rj4 zK7fB5#G06x$-)9BcGcadX#C|7^_bs$_oyR3QRA_OQ**>cPuS_79G?lU4@Rf!?v0!E z$`}3+o*8M!579$%}KKMc4I;lVWz2D7uYZvAhYB0N4 z8ymyyth{jf*}8u92J!EM!`Z6AT+JYRBl9cm8_3(6-5?4jQ9-hz`U0Z73z^RX$~_g+ zhs(AS1{g;Url9jUa7`WzYZhLc=RUWyQ;YEC8R~T!dackd3{X-N=`sGJZ{me^ z>PvS@m=?*VD$;KXd^kZhF)~Hn+hDc!lC?DN*aFs-^!i3tBq;TY6IVDx#Y=$IxR>q; z>l*aVMRvjv01yvxN?hxeh?e1_el4rmS-RxYo@JZXo_o(ro?Dwi83d(0L8h8tqnXi! zflw~W;4y7=+5+DnU+M>O)Z0K$x|Rnn0JisVhQ0BWL%9K|mFXq^^6U?r@)$aJKPoyG z+(+Qgx4ie`M5GcRF_ed_aHt}lM|8+C#Xo_6;ku;z$%ettg4{Of23UAI>+y?CX5_=mr@?%%&v zdpjFZcqj05TaXDq$|t~=)%#IWIPw#o;KBM!LG25XIBkSS4tQdlJ`w(?FkH*8z5ASrkj|;kO>2k9j+U}8M+flyQ_t4w#mhCexC)wG z6et}I(2Gt;iCC#EZ`&tm^pIbgzvUngDVKT?W?L+Ygu@AyW9%U(nGspq7CL6MEiTSc zW-^Gw75m!u##hQ{-|$BZplh6HI~eZdXjebmu|M3P%@|UEy3z#F6c;qLI@@lzxWP`c zZI8vsi-z}hsBK0*zoR{gZW^DPs}XQA%Db11fcR4G)%R`ZNk?mVK5{H?>fxhGOq|Fk z6zGEPulf#eN zmkxTn2+aPSEFq3opex0%H#Ae3K={PYzZ#g7ojb9?*|8$MgXtHG~Lju7dX0Hnz`!g}^)a=)ZjH-nDvt^Fi%Wir;uX8pozXmP|W} zt#R($nR@A^=WEeHC3tWi+w$D`v+&X)`e#sY|E_n|CqD5f>xa0OpLrhO;lks!8gAUF z%?J0X3*~bS(2-N>iP%cmma({j(7Rz2&D#BKy2s|DF_19u4=f!{{5e z9XAumCTKV$FUt_|L0{50JgY-IGF!bzG8NZHsk97sw;_`}X&CW20NXi%!pD@-R{ou-9EEr$tTfq$f9kMAh%tsx`r%qk#^1>)t{J}pbZ}cUulcMk9j&^t}Yh_ zbxRfiba+i%be8(+{=Iwk`WtVe`wr{goqKeVY&%-pXg;2BXM174N8RZHINM1tKV7!% z2@vi)qFLD9->wJu?*Wfv5PnKVVS8aTSV+ z1o=BByTU}`q{-ElE( z5l{9C?^C~Qj)EsWiD+YeGh=S{?^3okaROj4P2AC(=&`f2i4OBKRqh_dwm`-l_;-w9 zVw(Q(!UFl9)Y{rT?14@6gF0e>z1Ke9BmwMGN2iJ(@^S1yd3I5QCl%~td#c0GSiwUU zwt+o$XeJ-Vw58i(1o5oiQg2F|agXdLu46rE7x`|GE z%_Gl`-%Zz%fxG=@!pN%24%gBrU;a=@I5;l$4UvRdeQEzqygb2I;=^>2tK1U+F1kFs zav!?}Q{N7y4HnK0ui`(lL2?(qx~Vcp`D05+1nn5@3{OQfuAfPD0@O{IGc&Vhh+G2~ zcBJi~r=>+s$`y0Pj2Ef?>;|H*POMN{+)kkx9Nl{u;HXZPWQ0v z$CV0Q)s?;8%Xke=jcyv>TpSg85&^LUMQNs0$>^{K4|+~MrT=uiLT^Pi-gi@`_&h-5}GI+e`?F_|R+8MtEd z*yBxU$ef(r=khomfpNf22T3wXI+tGfS)(SX!tcb44B-k$pW6-+zkwPCPaIx5 z#F^F(Azg0xuET-6l}w3G*&1yb$RwLK$emPNi4uZGaU0)@XZ4+f7T9p*_ptL$!A>9= zS0$Vk$JQR?$uPzN4WU7H$_S`1bI&&*iR~rm;;RX}e&ive<<|xoHH^iD;lW85(Rlcz zzNjLf`2`*tf-pY!W*k@^;biaRgvaLwuMS`)uXa7kpG!I_KzbWDEG5-s4(>UIw;N#e z&-nR-gSp`EO5Cx*VtwGFpCIlzguC%o8hn&UCi0z$#xsMRFe?8;8!H2r(szq+=s!kW zTr))JRBvw8!~5&CwYgWn_dCB=fAz2Z_4>8n{FNB~6JrQA!tKs971!=?n`@Xs9*>N- zBku7Y1LAm$zq|L(a9>?MT^BE&uct3PRZl;4v94UcR8R5#)Kll{{CVP>IbCPZajl-I z(;nwfnpu?J?93p$0L-&B($?e{+0ck@gBBvPA1G9Ytd|YDyRULbMtT`kR-9Pc%h{&N zA<8$GhKo3wK&~5Y`T-HC1u4Og*pWwRvK%Cl&IDw@nphcBnK;RyLzMR9NX^gBX3z_( zqIMoWpd1HE!9QinXAbT-h~)s(zJpKWlnouTD~sx?K;8A>4mwMBP@pYV2scCrY-6nK zZrg#HDZc}|yzI9zkLycc`0^to@BV|e^1GeeySM7MfA_O>-x5Pb8N&Gn|>~8Ae_D)T}r&H*k5z?E7ow=zD z&VVa%pGgc1HaoUtI*yKTq8z3t4$t_F@$6>=*6-cR(XuWwdg}c7y8P_rI=6bEW(PAl zX46F(XHKuyIh$m#4nw41L1M(q9dhjl97)u&#- z>%i0e@^W@??Rp9Xyf|t{7kJP(S*US%fXu^xOBXKGK71P<+JIKTHZwn8BXrh164o|s z0-j%5K3@wX~l>>~F$@V&5{$^J>;a^Zz58mEBAfe8x90)TP+MyBS=6AOO(w|=Si zuHPsarKTs^jzrsmwkt?OQ9gpjb?|>1^oGeIug$oL4)>m)lG5J&YwxX76(QrTE%G3c zdPgorX_uFVw-YiRk*ki+Cm!SlaJaknu+E=5TOazs`|8Hc+x45j`R{?_xaOA@q9dp0 z23d4*`MH-fQF8m*x2mR)&&B1My?D6}C#Gt92b%1x0}A*bvmMrHZ{Vd23bysL4&J19 ztT13!@56I0f*FVJ#`iaB23k(H+CsyQIEU?e=z&pOf+449h0t!f&g} zTk7Kp8g{I5S_XRDcaM2=VMiuYi0|T&@zFW-*Z@7VfX=A2Ep`s0`lElLo8^LtR-W=e zAZ6x$%0)U&_k{BL;e_QEr z0E&sR%^E%$ZTtzVZK1KEhWIx+?x_`3XD>cqr(bw?p51BP&!4VhY7oO5aIH0{gQg>5t9)b&Xta!?REpg2+gIc8ZQiF-=^h=}@ z@U=zUlnc=AzRvB*Y?(C-Mw`^ zICgcuoqB0$JzRjG?)D_10k%SHPs-6Ia98(dzx2)e&ENe4Y^3MvhkoS8>!<(hpQ~q{ zeF1yK(?Wvf^fe#pE>FUWVpN~hB|I+ycXST1bn%-Pt|nJd}Jco_OWlC9u1Jl7)js4Gx6v% z_4X42BTkr0Tk>i#T3RZ^6MnIk^z1I-=N8VCZ|U*IHb;&EAHQSL_$k4C^tzu-oA>*a z^ylo89udc$$u@a?>;A2JZT(hZ&7fk@Yji99EQS2#;!nT*|L*s^qb^)Ho#V~NCr{Cr zoJ@bx1>!&TQ$JH5`SHI{i|3!FIRh5#?BgBeWbJ;9gO_Rhmhgq2EfTWq@J8R+=W`d0 zRaDosIvo?2zPPdqKS!?7Qy%x9-D~)%o9q(17{E`PwNnE`3+cgYP6eb-t3DPN4zM|B z>_Ai&x1j^tpqu?M>7oWx-8B6n|9OwSq`ndV$pSV!n_|lJjh+}^0 zCdV7(H5X1~(RgSHCV0*m2Y;Td=LvexKKE>P;;zx>v_J0xiG0R@BJ|JLg>uFBmo2IRhPSQ_Z<-n-p04}1JcB5k+8D2HFFjcD;mg>e0 z7k}TWGpmdF9H4!T&>Mh57wVDrU-_V(ZLEom_-m(p%J)F#zc^oj=t)2MEAnOBR*zH1 z+Q?*LU%|-or}3l<_19L($uRKL_WmJsny>%xyT4zrzIrXE0xYhcMNeyckdh?PBTnXG zf6WaB_0}uT)d70nMO@?*Ubb!UbdP{#S@Kt~*I;X4PAtg@cGMgPin z5zRjVUhX`+RX_6MKT;?3V@@{iLED|=q1};%-BeXPnz{C-ciVshu03$qj6mWZyx%$1#MxhxALgkrz)#vEO7xxP%efdh*~AcKtYpi;LKgJShbE*KbnWY-|So z<5;zEflFPdK0w<0^h8(uMiGp0gj~Le1%!g&Lwuw%7gM|1SL5KIi1ZLPv#0 zJfU;Sh5rNK911_-A%INB!c5$$v9=Xb#O zfI}=wx14;&-=3dnq2H}OcH$%YkLOX^D&vLk{7FvW4%P5WIffA@`o|K96do0Vkd}1t zLE3%`>++N4xsgu2A|SpuKVTrA>~I$!GNX0x^Y8L$a^Ryi5-&W%i=O7x;5Jv|m}vReD?TtKt29V2Y*8DJ4Veg8wk zyS5=m)Qd0j=l7MHPA5lH)a%Ab>r$Jv2w zF+STDYJA?(DHHYsnvM<4@R!fUPbNV-So<_7r^U{D>>ScL4%eKx)(&XT!k8yG9?I{D=FE&VHK2#fk5F=iBr7UXMq2fs^*r`ubMv%fZY{ z@{r%m#|16Hn+2ypE{baM=7V_U);%Y&F7j&Ktw6l@YFI~d9{7E%IOuHLAaVM_jc#nfm+vydSD-DFX zhXH;xiw>UXPVJRW7EU_6XY9iwk~dzIZ+|W6Od9;_d}(hM#y>Z9%ux8hPA)9iV1oE`L(zrMLqZ@zJ@zWv(kb^pGLt<^E@Q-Ssi+$@=e zLdq9;$}O$Uu!={q$tSZAcPw)T&hvPRj@4IZ=qJo>0(Cya&wggS2Hdo|@H{I08t!uf zw)o*iRnz9Y9Dj0L#mWAoECOho>>HP)uf)}Ohv>3ecv4yjb;~bT_iUs_!@4p0Q$O;| zC&9->6+)D6jEPpDnOkTS;Q?U)c#PricLy?nrScYFvUHNpxFRdZ_LIhA3slJMy9Exw zQ85UWasWjA;i&+kSEA(m{iJIh7q6}iVF5Fy>je-oqTE(NJX*z~kbLLr0PB^1IwYNt zdF&e_@m1I>f6~nxMD1IRRh{ zKk^d#><(0j>EM}vCRPXC&2_=jG}t1FMJ8w3)aJAP3BFqZ5Brdu4tC?j|=4`C;6 zc2winR%YUFe1lCbvwH_Xg(r-ZiYH3Ti#n&Iv=_ z%-{cZ=!ijG6UMc_=imW&09G6Q>A_OH`0@v9;oSR4>FQPmh4=`RIL&N{ki&asDjcTM zdEBdmF}lznJXt$Cb+_-_sr8L@6u_Hx@8CkQ^W>CPjc zRcQIPC_pJ6tuqe}R2tG3TsW|$pBy|UKZ7UvO~b^_LY_~#T1nniQE}ln!CmF%WR<%L zT+uY?AiZ}w26hx&_%J!?gw{yDB|kyl=D)b$Q+CGNgo}E-^GBrI4 zEYmefgXRwA0d<<9=2Ito+0E=}0HeemMc<8**9b6qjY8KkXgN;(N1=Ni?9?RBe)oI? z-k8GZ^l02^Xo^tRz55UA)z@FIJ2Yq>RX@G3SgThq*SX7=YJ6_Gj;4?=Xtr}OtWB<0 z-uPCGJoOY32gcAnl$TRUsISi&DK^Jr7%XG>`^f^DWN2}mfz3(pyiZss^qb#p^u+Zk z8a|IwokT}?%&KvX<4%9Sw<=F$;-6=N=sd&6;31!kupP?D4C*ig(W~l2^KyWaJJHjy z%6}Ry4<2sT!}|}S+h2IgOBk_>wUuM@cEJrahWF$xb)75f^8F_2sD<&e8~MaVOO*0Vry-?HrF21-o{4ldmOZ79XToM zq5~H{sBK_XWb|mqAp!BM4y(Fz^LBmy4}P!ieD2fHebe&`S+Jq39zlECur|Tkj2I0k zr;rGazpFFVeChm3JKG)&CGWgz7-tcW1GV0Xm3GRs5l;onKw8cfb9+>Z@P+N`3B&pQ+Vzt99k+D;dmiAVaxu$InCVH}0%gEuF5B z<#RQ@uu{8z%4g`YlYr!e7xE*1BE-lM4FLe~r_dXq7j=;rCxCSdy3JC*S)M1hH)`f+ zr{)fJYie`7=J&SSgv6|sR=T1OtCyj$UOiA$Vf-Y#neQHuZ@Z17;NFI=mJ}VCVv#FJKaoPZR5P4jLHD}~R z42{!1cX_n9-f_b9gp=+D_O^^|e}7qzHoz}_;12~c8Ts7DGnI~K;>2d)(%&q-#9OnW zd{-{I^1yqG11~!;Eb8?zp7=~4v5f~Tst5TsMMhWnfFoX$cSVT*fGMOdDD_kZkEPum zj@GkpeNPQeU#jEvHPi(9j;Ky%PQaU)JccjaN$FsW1AGoVV$_ni^=i-Uoy@H}b#m}~ zeP^!@fb$x5#;^VM@8pyT^>N^a_q~VZ=}h>&*3Jb@S#MwZF3g zPgw!;7T?x|f2lO>`!RS3Zj^h<4nF{6=)|MWba~JwdW4)7-VFff{fT>{Q~%s&^n_Dy zCcL8u>AZWz&V1tDhbuSJ(}V0R@H=Zc3LYE6NsGbEd`-{J*Mt)oSrlyfE}ZG?IFQh2 z-E34R%ks$?Wybu~f$B;S>86*$GE%2qFMK z7j2}IB$Md-H9Q8T_YSKBCXLPu}~K?dRrpsOsz2uGfF~ z)aUB8H*eP4-u}+|nV*zdAyXeT`%U-~_wad)>o zywZu|{cQFD>83lxAZ{yfYWq zJ0QuTsW4UN64QTNk-5h~jrQav{Mt}RiXd`PUNY7X-%%$Kjcc8gL|}QPJkuv1{|sp0 zE4SulWScQ@DZeQOo`C1ziYK(BR6O%ZCd1&L(V3}QI~>+)_iopnEo=%0chFg**cR&a zoEp%^*0QMk{qK1Q>ZNt4i=_)!@+!!wZuES_FseS-x2ZJ&qqFFnCyk3QP= zhKq8P4f;jV$zzqzfti*|*_QT}8(Ucw7MeGFiok~3b~XNd%!j(vBjEEdUZ%hg-R$C| z&=~nKy80>l$oSH(MUSkhGupy+1KYrV772l$No2{>-7Z|X1kKw5c>7{5hIb*cy3cyp z9vs?_!ewU>u-OO6J_10$tjwq_;oa zPI+>G(A~oB;-8+LHCD~jXSV(`vn~MkQxobY^eT0Bm(hSWVG6o<)c+xMQ@5@^xLY@` z->U~V*JwY->#Z-n44*C6D7tlTm$nM{#GW*C%*U~$SXjaxJa7>ff&gx-jgF^|wu$?Q z1|3}pPufQbY*XcpR_B&k%kY#ZoY3BF(YJR;@#nwv)%wh5zMMrebM&EY8;lN9FNJL&*91Ns4sxYQ-z zLMD;bTzhb@zURXqsOhKARc+mew`fbKhWcH)A#LH0r1$2N2_Hj{|Iv|;yt}YI3NAs3 zA|AOreihec`Y7{u`=gyWC(?f+UY}FklP`(uKRvsI9$}3<1wX#_%s}3ecb@s*_@Kj8 zWWRfM%vD*A0B9R4>xNkbbhM2zMzXcS$%i1U&l0F|Wa(#-?U#hrZ={ccPDty+rq8A7;}Oeq$5u6i{(ux!RQD zndomg^b`~?$68#Fa#H8OOIi z$)x%D9)2Tj%TvF`PySPPg=dN92B@U(-hB#w!oaxX*B+CX|CJTmUUgP%ktf0*#pg=g z+;Pqg5Em^Lcp6OnDSyD(O2(O|>D|~T%+}vLus@^AeDA^x5QaQQP%M#W)AJr#&I?x; zzuk4>(>wo^!Cbsa2yI#5&3Jn9H!k-8MhS%VeFdX{frJOPG5jn;ooo5zl~b^wX?DTF zqbaK^yKi4@pVYJ33fj-|N%)IjKQZR~h~qQXpxFzgR}Bzf{?5WV0D6Rj&Kciwyhzlp zE`l58bSbanO9<)Dtdx^>fV8 zepJ$%H}{MakwvQ|IpOaZM0+-_;CHmai7U$#p5_`|)D3C3gn_^;z|dd6)K#2QFZB@5 z+Axl{TkhQ4d=^pqDS*NJeEP5ID|b0h&$Lg++ICuI-+l=fMT5u=5bi!vv-Y0A-ciBS z$vU_GvUm|b+01(|fcImYqtVYcJnb+#(Y#~h6Q?iMJmtM0n@f5%*Kll}ainQ5%Ui~3 zyw^TE&}dcu4GweB77Xh3b6l-UmweHk=}YwZ=P#%iH@@<(FDLK` ztM}*y;cWG2`a{^ec$Ah6RnRLKgKykG;G_03Em&==ZPeyA?IPJbw{z~yO3lqo2Odvr zSIEu7#r49Veo;2H$sRAjR~L78R2e z><6?{a9mV8=I7hcRY^@*ZuT@+i|~X$=W*><_!)p~@)+Zqz+YKG&&3aUa#51)dd3WR z7Sz;>oXxE7BQzN%e?r>3I#alLR?mR9qkYFk(1)=X4l|zOX^6&G59BlZ)*N~$p@J0A6OB}U$f-;N-#L*+n7 zo2>U9VWr5(DTF`N2*F56ULMy(zW^Q}W`UtM%PM8SXMx!l`73 z(AJ0<+v>uQBHMWQr@`Vba%?C>f`j?)+EQ>pS@R=Nh~=F$D&HvOu1ty9kk~F)oDwU{ zFS2D4CsXZoCB6+C>BmW)U4fPXb7g@6aAXn;e;Fr+BXKR)fwHV{BVU!I0-Y;LT{gi9 zmf8duTVqY+__d0s?$#skp$t)kkk`OX^e%ps5|=`g)wTW!OCH5nizf-9ZGpwwW+Glk zkoo4$WdfmWjg}=FE|V*;&?uTlTG802Q6ewOlis$;yL-nOD0FA!)Zk3LOo5Bewn02bW3$mPM>THU|DRj+*Q8}(oQSO0Z=`KzBz zUW36>onBqdAf1A?w!Q)X?q}t?JB?ihx3aQQ&p!K1J^#!zb?K?6>dKWX^)%1Va=&=# zT)u&SdS#JoxfT~a9(yJ$i`uHdX0+_>@6~WmnE*%NRJgMG5T`747;U(dZVC-pRW3+Q zKJteTEuMG|9?3u7wmDAsB-{gYjX!0 znm6>Ks!j&_T}xN=x)8|xT|w0b&mHj6SV3N)hl?c+)hW=Y4o}d0gx_ zzy2z8bFg`kiQE&)+1tBSZ@hV}KK+GP>V>DT)W^Q}Lm4!B@L;PhUAj>7#n)Tb{44f9+efwR=#blY?4ZK3fAichlp3Cx(vJo+}CEb@gEz zr~!c5-QSgXGzMceP2H!_UrzSVg0~3=&j_D@)?@JLD4kR%`^QK>LPO|x?M@x-*ZAQs zX?F4Fj&OK#z$AINVBmO&AM_r;oAQVQ={N7)t8aYkwR#Btm(QQA<+G=2@#2Nr0AH`& zy;E=8xl=pf@ec7eH`b{Kw3fcm%}INAM7RQPf=15wOxRDtL-GZe6Z*#Snuad&(FF3Q zj+>?-oPwr)&v8Z`gXmMHLrgt!AqxQTgR0>LvI-A5FnQqWHahGcwdc;4N8kT-VnJEl zqccpI@)E3{`tD*x_zj_rwJl_HuXf-K2lzD>UVQ0=nwy`kJGXDw!OnefJ*?f0%`8xv zo}EQ!syBrLO-A`H`WSbDXJ@xI9^S9btJiAh%{Oa!>vrwjxSq-XgN?O%c>P-4`SxqI zb^C7Z-^X~leWTWH-Kxzy_iOt;IK6Y1>rQRlzF!Z)_x&5U>(;fK^#(Ng)|bAK>uaC; zVtwtipQ-h)d=bQeYjn!&)C_n*rs0bi3F>or!|&4iO@1d`)pjn%$T9ukU=)KjpH@)D zlq16A$F>_!BgxAJ8~D+Py4)rcnea?n0t};QMfFJ6*lJYC3nRQ6FQ4h){Q#p$+MGML zTJL%1JL?Od|5APHwO8u$<)<_0vV8hXJ%kRfJ|3T#t(#lBb#HT6wX|Gkkh$HX5$L0# z2W%Kap7wAmgMevCR04Gj&-vbT8e!nTukMb4k4fO42F_{p<}`3j4|i&E=Rr+xZPe`E zR-FYOmj;tH=b#;Y;~>r`X%68T_kCVN9v)&o{K=pD3w7n4-(BCndb`#hAU_V)r17(8 z5g=x0tti{emP`XwXGdJJsDs5#>$LLB%NR1y@A$WU7w@)#V=${b^?Kc$0a@a?Bl{4X z3~7t(v<%QQwk>`>V|Tbo^*v4!x})o|16rtrhfG>0R2->u1l+unx!40R;`nMR;vCr@yWPA0(1Y`y&M zkD$2b>u`OmMrcn=pglZ6B;j;1j=O?FPi4doz4A$Zp4{Xl2#S|?C652#@&r3^LtY=9 zsLy`o>-CGj{u?#!K>iHeNgeVTA~FcP*r4tVasj~fpz5bS@nf~LI9>N{-=GOWmcUW) zC}>7^Fo^nD4^Ljwi9Z2yKu$OsPEv6av|GbrqiSqnT^0Z|+UWWu$?OSdUX4F`Cy$69 z%lFxYhChzi^OJY&RP72+o#`hv78VxkENzn~sLTxJlGhO3yS|1FUAtY=lT#T$^)!WJ z`CZad&MD||;^40OVFcOjm@xGg4r%1_KXB&feKOGEDK+>y`JV6XkGm`1Pjk4K#}gb# z2TpR@0`$qrHyNze&Y%saJ@UA@OpXJi1I+{ATcEQz=@G7$F7nV%4GZWt2YAsf@`K!A zLuXyIdw`f{+D%}|=VP!nM$!LdMqIhJ(zK&@Nq~P&PJs6_kZ)fP+rWyE&czMTW*?$Y0P zF#us6JL|Eu9;<7odky$tdn!v*G~Q?58`LhIP`LI(&J^tn^n}_JD{^`A4eSn zSm_uKX^-yG?hdyf)Fk~07k*-FWJ31hkAN~IC$JU+3emjWm;VvF=>w;rz{I$J{&LF=G zPTDp=5En4-Y-6)f->q%hz`GCX)1Uc#{ra!}PJQE>ucZHLU&qC?=ILU1@5*@elellx zZusKf7QP!s1>y`>t{%(olXBN|VY}ieGf9(Kd=h54EtPu12)vnwD-&jsC-}xkqsb&a z2>MyRd33O)Jkvf3@AAV(oukZ(BlFD;Pw3Xb=3nm3S)KzuhILH_&J0_$mQG%Fl0v7i z<;dLRA064@pi7_nb~Z))A{Vp){A_dlX0pfWO9MZnmcb|LH%`CpG`7v;+@N;h`B(4U z!nWC}Ez0!B@bMXB*CU?+)Kf0p5vp2#`Q;buqaXR6nnkI<>mBc?pZwXsSm&SmZt^=# zN_FuXx(q&nhum>uKkCE@0eJx(=Az;@XeWH)q3K)Oq;YhQ3ox=<6uGgk%_iU(K2i@I zB726ppiSC3nJfp#@;M<+|Ps*~^1Ken0tm&;?V8+08(mGyaS~nt)A^L!RDt^&}_T{ZVym06#4ctxR;Nu}}#~A%1%lA}17o0wL zcrW~B-LiX#N?HEoX?Bl?6p*oTM1<61|DJ@d@vdi%>S)Z&?y zoNBfYUeivJXOGkLqt&P4F7;Ce32QBvwdA5eopRCLf`+_Gk9Kde+2Myg8eevF6KE1; z0?@&9=t5QVnXs$ZZq={+yWgr)&|}iYep7?s%Z2#Lb54b$4>(PoPcJUj>AA(wl#J6R zNkiW)+kQ>sBz;0LZt8>vXWJBmSMU&z#5bN=Aaz6LwaXod-q?Il&%Whyo&WB4*66OA zJaEi4%%sSnM|f3YKsWm+OMT#8?Ojc=mp zET3l6xpuKI7!>+4%)6QEb}#z_kM`$|kk!%680a zhrViLWYBik?jJhIj@=Ifd-9SC4zl2odMRhNN3Jqc#>U2)UKDTbGbZ*y+eD~iDf`q- zx@55!`85`xHQ+%$%p?xZ#SMO=IL^4}*?n-D%?MQ7GF&95edh#-mquywbJPM@+87^H zw|+^7z9w89WW|(QVGfnKo3hD{K3>iF-ujTj*7?gL^09FPa!db+&*K^&=~`dASu_IK z`oE83_I6a$Tdk~=*QM|CX?gn6JX)OQm*epL>n0726OcRi3NqR{zW&AUT?dVy8}8%`H~?07(1$YiGVz3DMQYw1&O!kVqpcwN_`0ZYQoMuarL zQZCmiPjIy?^n-cp-(YOZ`}y8zNRsKzbn}!@k<)sxtpK)V^|X2;r;K@Z`3DE^Pj*75@8&{wPapD=MdP(NKcBw2i-E}((lsWbU@pm66sXq4vM9GpzJfOD5NMiUm(cJJ1+;ZIz97YE+Gymjy3 z=n*cwcuab|Y-4ntXDdfITD>g`Eah4H36{F>Q5EAdfk8d=vomXr=aYZ@S(OXWnf=5c%rauQbNVJ6_6G;5Xao z-QF3m)hl7ZV;*fhj%T9U?)t2gCVBN=;IY`2clVNKxD4EwrrGc|uBnP;a7AvRmGUY4 zswL$B;4+3{TkEHs?`^Ch3zZFgZn|xsNVBg!o_zeSJ|)&D_PP0+hvkaKl*fDPg95AZ ziEGuwf%2@qu;&;9k?m*i4tMI!H?P+>zW!=GaQ8p;@Y6ZA^Uje)cbF|QAKCG;&gix5 z_~)9mtwjb*XCFjc(!PKv6Wcy!UTuQB@X!2JG891#?^#3!&O%o%YZ!Z77`ZYB&UHw? z!9LIdVLAN`J6V}_PG0#mZEG(DxAAAMOB&6@k!5)IJ%H$LjQ+$AUd{;s;?M>&=T0>Z6_50H!ah6ub73kAEF}nlyBS=8Ex`FMxyFcj1i(g=#E0P23kB3F zlVc7@r_s~$A}%mBz(-PckOC4iP?azQ&(3}-)!x&{$Pgemf=(D+=Dar}N#!W(@IGpX zP=7l~t%3H~2|G+qj%GzC@f_T5lVmW7aUq_5=F*UCTA2=^lrhZ{kRUI9Fs1N>Fv2Rb zdH0#S#0f))bLy9gPVi;Db!k+T(2iLm*an)`obxIj9Ha&>`hlNDbmN&zB)PkHd73b) z%y)3wW!wDn5Lg>71-eUZeGM>9l!I|e!(StVp~9iRmv!TAUgE+`xKjO7?!53t7{n7D zRd{QMhG}u-3`$^7Pd)ptS~>p#azcs*R z>-zO;T(|0tH*VCg{o1e9fBjeg&$Y9&UQ<(xwY+>TEAr(vGvC5EpP8AfbLTJA#Y<1s z*>mUX9q)L1z4+2w=y*R<&s@2L|Ao46_I#aQS;_Zv7U&$fOL-I};O_qI-JMKKdhI#6 ziO{xVz5rRnfw~K?6WY1x&@`om{8$g_)A$FPSvP(PxQkTM&VWc2*En^bq)ZnYOirSV zdgX)?Qjul>N~f6QUl&m{!@uYKmY47}ntEwGxyp!(yB*^23b=twfoI=7K4p#N_`=!Q zne2kz+S$;;AHZYUUb^JQxH_B*2kln z#i8+?xN{=GMM!>Ab$e@AAO7eM1V=Y+-JxEmQ06nWvAa#37s10^zN`GTZ@gAF@2o+~ zb2T+KU+X*DSwJv5)4tO=gyzR|!lnij+0n6oG^B1bHSMwTq&uOmQe)-pY8@c&_vzdn zqIc-V)sfxL$8=E`zrB}XQ-+cYe`t}=F z>r-F&e0}ZPuhqjfcgAhQmwUD7i4-}y#uaPQa1ve_&E4HVQdPFvXD2}V!CP;mmSkZg%x6X!1&TWf1_fn*{*Gb+Ui- zl!GJ4FnNuUClnwZe&)g5c1u6$HV(cs3GJj9Fw9L))w|#Mj=DzOzW&NrY8HJrJ?Upl z;30J7A+q`P*Wak?)cK`%zqh8)srNkbju23{M*0jjM+;r$?N!&^>qIVYA*3 z7hLc!mzec#-#<~rl{Q@$j-SuEz{X>5Lo^3oz=M8$M=n*;ac};@r?v<{T~OzjO6bwx zNCC32&hicBpW@J9vK`gFRZie?WM_QJBRE&<{7dg8A6m1mo$y{3+F`>69_p6~4AYUu zBWOvSU1~w6--_-0j`4`?jE$&afBWHj9YXs*{LJU;(_i^Y%`LCcP)-v^TEYXww>)@K&Nqe!rZL&48 zrBKCbt%}bUO?NWV1_;g6{2n{sy>}7g$S-wnZ{C|7sg2On;XmyRGLG>**=T)zy>_;T zbi}sc|DC#bpZ1D&>Bj9Fb?@GtoZPawa<*p9tZF1B13x*FMKSQaJY(%o(Fsnu@(_RO z3~6D9PvT|Qy6~w>5yDLVV-GnQVkgQ;Y;{`nCvqZ9#X=SikT!Xt`}27m`2am+p2qWJ zsL7c&!QUo-fX}qT%Fi9%%${(hyI>Iw`p@WNGu)v@F>qIUtpFp zwEwg-ns~{y|%ex=EM=W)IgM)We4x^#ESJ`sVfe;+MWqzwiscSik#wpQ^ie z?}Rvh?m`{lXFy`_0;9*RhQ^U={iSug5{K~MMcj2XZ4NrCbGo=sruj}MKS=t=iH#R( zNEV*GMThcWn1s&mS%Ym1piw5%GVl!__~_ub?}l|jeSY>&9;M|k@@2O8898AImliEq z_!eJ_X#+1j7^!pi9|K#EKwVYP-n&6~(3HH*+!(zKm?rwrpI+20n(!3zNg~Ufj3oePqLaEv}oJ7vtg)jRMXNDEKk#7Xdujo0&>^$s6_l!gnQQDP3B+fT6{nzouO za$*W~bqpZ(7&&J#!-2KVYZfkFz4|8o&0T2U;IOamriESlA5QkS2?7(e{@utpnudz-}Uhi)X~;DZI35q_;C;HMEert({k|giM&Hsua;*YmOe!M zT38n)uKBmix8NTaIciV&-zWW}+@rkv%%6zg^6K*=z_`AK&L}G3OL}=u`>#2ni7rlh z`M!m;oJb>GTTt`qVdlL>LppG~4<~Yb~7EA2_j(KG@!L_y7Ia7Sq@p_F-K}G`%oiCsR{(fPLm> zf+6<6B(}B-4IIn%Q-bytoP68a-9#QdakJTmheP0mOgUnock>gk+8s&>iV2x&ayL4L ztZP4jS{E!mk$;y*=bbY8ynR+L_I}>8(4KPSu|LWso|kx(PxIadcCNwrq-$k(Z?3z; z6V3T9|8->_$)qiWMm;b+T9DZVG#I32-rAkGO{esUak1SVyVSKyCtb9KdT{mgF?!Q? z%CJpL-0nRWk<3RL%5&1wc2^l{)$H-9tuEg+9yFKMAG3ahJC?#dj(hAc^^{)JH}(en zofwwKL*OC5w5Px+9F}2v7oAvxjBo3w?PHDP#o*SQNTV&nlXE$yR}NbH-rD$<7JbxG zTVhK~9>!JX%xQeW`Xa~5ApR)>r|jr&o!cay;jM3@YzX5%vXoDz;aZXAHc|A`B@q9(zhSNZe5HaPv95c4t*NmbP+rE z_&NPl_&IH?>8yu%?!(Q;doJ^A`8D1%yc=^0ZuvXqcq%bF5y{+P7xz)xJo_q56B5^O zY2Nb5W7p%@i`+uHmQcKR@Fi3ABk|&+pPrAezNnzidwWehg2gxf{ykB7ZV8Fg{abY`6pT3+pPTd>A9s=ubd6t|e7E+#)kkO9xErJY+4n#7Nre2O zosRxU65|j(E0pa3+u1X5I%tGjAmx=i9QF>{Bwzdhs;y8EFxvv|<|MH|raYpbfZ8e8 zNB8j>L&^@C=~duVnf!u}ge#OXQ7YM$<)u3b1ByNyDv{hO7$rwQR_{UpB7_M2H-B=Z zFP_=Oj1iCte+-y5Im$Ed?obnz`sI7nyiauZF&7dTE^~xI_`!Uxg0W1slX*t56E7VZ z{3EdOlcD5;CNMU7{v6n{LLnk`Fp~IE(MT5wC^PCBqa+p29X^bt^N1|V$g}aRtv})t zmRKrF6=2?4`Y=We2H`WcaVSCWT$WClWhY+=^fRliFF$DcQaWQ(8b#Vh6@)8tI)iVJ8a^Hxu<-Q8p-)w&KVR$3%A1wM*qQ9)n#YUd0(jWm=UP%E~;f zti03=GT$rPNH+W`?>CUb$$?=->HDSLTwk+SGGZ*SGsrpCiQ zB%}VW6sBIXvvR!MAIFNqbPz_|_-(|j$cCQL7x)c#hqbe{nE|gHtqsjbkk_3ZI&;Sd z)PE8sJdyMp8*9{gIs-q?LKlw|{?Zq}Q9JwlH8ZhTdk*xVl=m}ud#nD#AAGhpH@53z z-}ikrH$4+ww74=~OA9k?fNXbz#$t#1*zxfjwi|Wc&jo;&+qZ7k&?95U=kl$j2lwwZ zJqbQ`_S{iSV=$OWqbeUr?Y+${6egWVcx#e+Oxqy1dXzB9V#DK#w@fM$K2F2t&ik|% z;H(|Dk1is&d{2~=u5xolU3MLFi3?X$ZGkU$uI+I56o$>=UM5-gpsoE1SJ6#OPt^eY z&CJY1Pf5cz>E8H6!#MB2ly_hXACcZg5-!HbDJj4c0~6Sgvd9}Iawh-Xx$b}t3sqvQkf(Afzl*2v2LS4Axjtl|Up=edATFlHS?rz4DnV=s8`1iBw@ z^Xwqq=4Krc=j7qTB9X_G)Cuuwhv(r&o$Nqk-{Aq6*E5c6B-=gjL+(6NW?^VM0Y0e# z1D#w^H~_y62p=8nlsoN5{Kh@F9Ca5kcy^ZqfB-jm1kst?2X`(C$$$rUC-|MDbU?(< z5R9ou3S)~fpT~}oCFI)ECPtx$pP!&W^7W`h0csb}Bz-Pv7z{fl3!^y?hSfG)Chx z{68sm$;(fG3`sXczubauo91)&V*QoB@i%JY@Kk;3tKUH9P%gZ$zEgjxdmRK;pQwM} zYC4~S1HZz+-ZkKC#~j4z9j~w|p`?wzg^|-SQ=a8vQf6?3xaz^^vGBJ$(8ubQi%7Qi zYGGl%=9cHcH-_=L(ivc2eI&%juIiN-0IRN?4U<0RAN2Q(j zEUMI>e@4^OHYx3r+gFmtwkm-wt)ay4?JkWcesPyNMYNO>=Q@4^_Oi> zJEcPS#h3ZT&)iHd8eG%StG7Aex{Z# zy@Whs?{4ixpBe|%4DXwcGLXp4bK6K4$E1A}D%%$DlARLCew=pVxYm%D?SsAg z_22pr_3E2fYi4=5j^RnWT^H(mKl*-j+YW8$-7EykCp^S= zYpQ_XQ(@uMf1#M#~5X|wag zcKf;4+LfEnwdsYWb_Q*<=QFti?qf$`<5+X;33)(!)HYJ5tcWNlxSa4w`5@ih|1KJ| zy+BxrN4}mG)8U~wi?rHe=soQ*zb&gSjm()Zw4S1FwtvI%APWktS0-!18JvR4l2QlW(?N`Ea9MXCReqGgeEuom zi(zGp@C?do=TV~yBzyZM@C)x|SMlu)n1(piPyA*90Bym+-Zt=f^3GoI7*R?V8~~#| zpuMUcHrlX^xE)b0NL`|ZxXd+_XI`Uq?7;p4~oruT;*e9%64|6cp%x4zx}!9V!5_V@prf7pKO zYhP<0e)wTdL2%cfpOMlo)Lzu)(Q1m=s5>>zI)1j#9d?#}@}WA2`byu>0=Y_fH}P#8 zpFm_}p*|y{zqEr#h%6l(L=5S12@mb-J!KIu+)HK&_nE6hbzn_&9(>OWG|@jh`sFD7 z_?LJ(VMu-n$VHARI(*GN@5bd$B`8l%^TijKY_pPYn3wz#nN*Z1*yZrdf&@ZRmkx&f zD_1(I^Dr+iM;xSwVYM>`?!Yc{ykP>nd=GiJyYaMrwDzPu+}LQ_$mx;oH#|KtbJq6Y ze|L0iW4mzu*7Mif>#x7u{@TCuzi2m}`y{1}C`>wRBAeJ*=z9dM4UwS?WT~#Gt9?$M z0=s>t8Vr{|qzUneWBYq_>=FKTEH%N3Rd_=l&<+RkjOCC(YKVp`XmbL|MOK^ZPjlbf z-=V*9*j{_<&366OSFumg&EP>jG)bLp#B1a|Ykg z&F*H^zPokvcAKGp{NDTTrEZqvDKw73gh#-eHem8awxqdw#tAM5_9AaW($%-{fQ0g@ zm15s1JWsS55U(~_z2#1z-d?mX?&kyT!>i*qH@7psu!jxnXK+UD{uR@};|U@zE~<3c z3zT<^ZmtOr+a$Xx*oE_>_W4i0)jt2(x7)(fm~i-&@-t2cf5paBf7;$*lQ>u?E^Bi^ zr)Pm(zsN-DO6FqL*Y3)D_{bNZy-WsQ5a00$bLvpUlemO0@bHA4Gx0k&Z0l>A?H~N( zf7rIxw`srqWWx*TH+UQ?m`oorCz=6*C&s+->c{A7jIsc)P76t)+$l?@rzH74yH>k* zLb(+n(O2k~9^S<8pRs(PK>5TC2D|j(CWo#4_|JR^Sg03reRM#-&_0+ev+6%U zW#0Hb%lJC!&^^XI`@-|ZYvomGyUV*-xC&(__0yvf6C{FnMym_cfc9)^m}PrCJfT$wZge?6~QJ^B2Ueree| zc!*ve9rIPzYLbZhp;so(?5|dTSY7HE$+5Pwam|~|QV#uZb>*@861j6+j6xy^F&Epy;!+j?>|hgYuWwMcCwJKvlpYw37u)=$ z74)?IxP!L6u}|(@D6)#ih zyxJ99{c`D}oviNI-$$RLYv)HJbm)laNDBGLo7%R@lDaf~L<&~dN@Jl2B=V2w702M( zJeAMrpzuQ|7M?Lr-)n-c#QV(7nU%Bt+|q|6sd~tCUcH={ChXb0_pZW?SGGlLIw~(EA(f}?YT@?03Mt~EY+d01 z>FDs3lOYmpy(&+gC^DbI6^=-h%uRSI?m5o6;t#L+oVk?v0h8IuSAk18qNMN?0pYi5 z=aY|crH^Z#wDZc`d7ac#!s2t?twXhMh6Tim&$DH?csg}YJbb$_!FSxnzGkJZdgy9m z48NqamOAThd3XdTRTrJ*7fy{?u%#&PmdBMckO{*blQh5BDdr1;jPEIE#_jEGWk}gE z3`_!!xa;yv*;W6{5%CL^Nb5XR(g?eB2wjC!>UDaXz+{nUXyJRxt=QT&&Iu&b)XVx+ z-E(v~^6wmprn9z#W5$kaFD)&Q%U+CXIS^dLoGz68l7K=N_?wTG&jIF-90cBKN#HK%MmA; zU+}juMIL;Yf5kb^DZ2-Re31;*8Ls!QOe+t<{4}HTU|!bOe7n37*EE)4K8ELqjEFaW zInASUl~3i}y8_Yt92+m1Cx3Bj4j{;lKe3O{o8~F3;wdh7VK4jK{$#?figgtZo>NE4 z*F9UlF1#(Q$}jn(j$OXCLnS+e!Q=W(dHgTo8Dk-1?cl=`$GQ*TrOCEOd%Q<`xxZVd z{w|HjwAU`sru_-N%HDST!CKlrTOLsJ4n+tn9Dq}09y*z~3#;u;xUtiI{O-<9yL zd*|H`+Jk!!nxDRL!atw2o*gz%KUQA_YrtuHCI0MB*e`QYwswT`Ey{qKJlw=LqFs!Q zPg`nRsO|1tUG!k)JAhhRk)L)(&;riHQEblW1XT<5Rafv6x3;79$?RjQXFQeM&-!Q+ z*oJ1_T>Vqz5KW>iu5^QR&RhJRardTv?q_a)l}_I66bxdWNVOB<%B1Yt$KRDd9(9xz zKqn4;5>Afp^gkw^@smb>;4qzK0bhU);O5cY&C~BN)M5kDrW_FwYT`f#_Y5+Vk-#So zX*K*E0F7kl2n;U>L|WtZl@)pE6o)zdaYw_sgFm}VAh0Xe`c9|>pQV%J^Xw!9<~b(- z%nax!_zaWy-B^h2swWo&I4S9O%Y3ine-XwgV9JwO0n^=NPLS!MOwm!VMN9pjOA`liq7EgEY z+-a+88|{PdebD~>|I>fa{>%U3KTF!#;c{ELd?j_CfX1`Xcp63U+zT(Xx8M4B`=vke zC)+Rn!Y{O!ZhN%ga+_a3F=3dG=>#~?;*pO#nOI(L8{2DbYsZfL0fO4?Zs-PMlqr=_ z3~D>zV9|vn;zM51;C51bz%@iEdHL-~C-?Omz>iLFIslr%26;7uMDlmhsxZTkx`+om zLg5w4^ZT$dI4D00e&8v2AheIdl!{Uoc-8)K;!^N=C%$F*JTu*u7rN5v84B|(`oLq@ zcQcTzp^yQ3=#*8Q5}!*vsIaADj%-2(RG1dw&JLBF1IT_O*6*vOOyKD!p!Xce-P$o3 zojZ5qsk|MCKW$G}SK*_R_KCMY-)`Q#)$ZMU*fz+^mG8TI`)y%ixy_E2=uDlqjrGI! zJHPY$?fR7)?beMO?ZKm`QLc;2m+0X6op$KDvtA1gXuSMZ&D)>+kxZ;Vxc{)tt&H0Q z@%F&M;~hGm%foi6q2-_rwDg!x8+5-Vop1XC-zm5`br++&%QZp2>%`e6Sl13@P9lMb z*4YybG_=$o{gE>65b@|47b3YM--ZjNnjJG)qy$cE7_VNto{s9u(xvv?bI)aw%4IsG zx1YP6;}wVGD{n~`DjMATPu{4A17yrberAAN zy1>SXn>n7Lg$@2--jfs1Q#Aa8dE^y(4@QGF?_K>snz7%z1kX$3VOz$aSy^+U4St_x0>G!@vPGfMz@p8GlbU(5>D)sFSN2%_md8?lX2!8fUq2>=gAZ5kKq74 zeSAclSPLz#U%Qr50MsARJm|8=t6S~y)>hj;Z0)7j-fUN2eyx4@{`+nH-FMpbjhh+x zb9b#Ld3c1Qw35DZ1OFn^bp$o=5oc!as111^9-OpAbnNo}PLBIuq$4u3zttwTSKA^C zw**Z_4v468d7lF32k_+vdF&xKyVT_KKl`)oOMmj0+Sh*X8|~q`6G6-9Fldf^qmlCq z^8trWy^_gj6kK@SHp@WeBCjWGWy9R*M7 z*xf&B^YG9ZKJj#hsfnGoxBCQE*vaCc41ki4i-ywrMrVsKabEyeQ&_O%S(J8u`Ogig z@<3IL$v-Qsqa8_p-K+5wG+g8_i0SIxz2{Dv*b3Sn#4nrWB3|^rB?S=eE{pH*<EI9OVWvEouwWBJ{#j0~fHk&@G3i)LjoWiCeX7kZ zT%mUWPtpNVH>pq6i&>N)FT$re6#yM&n4b$VEGwGPiJ2^hCNL|r`zW~{dPOpUvGzd>ydNY z=2LWJ>PG$XK_0Cl^|9BkGbp^Evy!Q3;jbQXJ{!Myrtc(O@W6#DyXuGif6$5Bo<{vW zI2P`dQ*ne{-Z%z_4l?LvC+6T_7hKXNAftBR)SEYMz1Uv;*eBZD(h3S_7ThS8;@ioF z4kqdmy~C@;%FCIg5C7TL@CXg0Ul!`3*j$b6!hxDxr2e*j+Qr1LK^^dn{?INwmErcFq{N6`6iCkV|Fan zCAR%t{lJHl`3_WhLY4P)EQLot3oQ2AszWLe8K5>l3k~e@>sg&@0{>&>Wf!hT_{V1D zvUBfAZv&6Kwl0#vbmV?~NLvKXgvqk)QyJoF_XBLb0~;11+J6%c@510BPHFo1A@<(d z)7V0@=-9#4n{5F*eUCQD-T6m*hi#v9p3-rOE%omE58H5Yr7bKiw?pvz@afa+`oH(^ zQQO?yz@Fb}>uWB?+Kz4c=<&mL?>^!89<+y#9<@gg@7Kit2OqX~zWZ+b*0;XhzWcp* z3BQMpd6bDnCma`P_vf$^LL=a^UfMItgEYzRAnGh1rjHOlmM6t)(I8Hm^UvP4c<}^? zve!GFQQvm)gHdpj!5H%+EF#~vbFus752KK0(i?#5RYGJJCl48`crS^&FfQ-OxMMtB zmrj67{wuC!KEi6S{>rIe{|N6h;}wj`#hSbkBaRL|6*3OJqMXcafs~5(>JlT zfAYt_*#7LV{3UXdr|-1Q)dy`4zC74?lm(`8wX}B^_=&m$AfltVtXBrjktfxHJmElg zj~DBxotsZ60aNM}`j&q6*&3IeI>HNo83{}dHIIxRj{-TX! z;5hYh@wcBNIM}a2KxM#u-Q5y>LU#6btX}LL^0&?KM3H{sc#i4jo})H_++#j{cOk*% z=4Ke+(#nZws%=Z{s|qp zXA+LI)GGLxqaT_@U(~}zy_vMora*uC?Ni6i9?&QK*6)8Cy|U5v_O{6vDw5U(8i&}q zIcX0(S*GQi+_3@M!GMdJwXd^G|LzN){RI8o%Nc9&l)3ES4Xzwu$Ht&8@=C^vz^^(* z87sRhoJ55>C!)hlK7$Lwt#i4Gf8o0HU2>y&C5;*7j+U}qB2A&AJqhs}zx%EB?tAw# zL2~_tm(urhK-M-LBGma+Wug=dhh)@g!nG*&H<8I-gx&RRjoAn zPadHwX>wD==$*WLAU0{!hrsEZFyB)~$#3)$zhppsB#Vn5VaY$qjmLv9nA{86I@T2a z(hfV^Z)fz|PrJobPMf=kHVfFD%rHNdCp_b(G(V%{h6;6)I{RWl+u`}(|Ow5zgg=o@3os+bb z6Rsg|%kxtu_L=?6rLf88mQ!|{*aMRa9!T*XyQXWuVuSG01!ra~t$F!_`1bq#dz zhY!fl;N;uMelK(J_u>4#&#QyOvUyiGdhgQ}uU_PH?vJa57v*}mE`l%O>8gzKyC~ad zc}#!H#7|!BGwodBMmB%oqK>^woVY3{6J#k<{~yS(Fhlynao=A^2+8_<%bw=haM~W9 z&&3yKS4sOTU8e4mrOnU$tN$ynNq^GHd*q2v;>BJInA!y1wZiZ-AK{|RYyc$>iv(J+ zWa@&um?bW7=915h=dIu{-@wj;b?Q`&F_LHRjAOfNx&0VUr7t|%MpnCD@s(ZsP=?#4 zC(UHpT&WXfQK9g-@hV~IAs1?=D)B2k+5aq@Shhig7}ipy&~OggFOw!&tQoU$MJ98yqPg`o$a?Lsc1V52J`5} zIcO#9=0#?bp)0$`W7Q>Ip>ff&yW-(<7a`8){vCYbQg^((dpQnvo;0rVkhc>c0pjFw z;zEmZm2VGYvQI{Re4KKSa{65Bo<$wl21%<785i8k_SV&+lbs0-!Y};IuW*5r7`hH7 z?KoY^vP@rylRN@OrOUgzjeNXv;z(C+@q1+~ui{ZW)8W-0%|PO&9~sCizATe)p$rfe zl6<|Jg`UFJC*?JH36H@(6Da@Kw#6>f4`*J&A#Ms*{e?dngD`A)_z92P#ie|?cR;&z zHcelBs|`o9X|vt*=@{xUa#r?=y1t7{ z^tgTdyYIL6KD$JVK_H^E3qs~szHoDlj=6wBBwu=UA zt8GKo894!*yJH^8S#<7{<4@vl3R{MZf^cpkn0i@eZRn_Hf9XrnRy((q_7+{@*om9w z9Ue;mPMxGIO4AP4prw1Z5M@-_Sc{$(a_q#tsh|0YTVDlfw=MXDND8zoGwPd%5XVj= zk7}#$ z7C*|D>9c5rc>8o1*48)M#_CS{KmMQpgZ6j->%T)D(WBr66T^~=p4ZnSClVqK~i?8 zC>toa0g7*Fc|HRi9{(zTW%9)G!b?1phX#_0*-pyLuogZj=Pm&8yY1r5xUR@|g4X<0 zdYK>y&Cx;NS>2?L%4)HLT{=&rwy>~>5wnl7w^KLM7M2#;bGKh=H=cj7z4QC;wEK@A zW4J8SNmKr3Nb3O6L3V6CT-|_Qr`v09yg)rJga7Gvd4>G;ciP&NE_{l*jXpM}_ST6@L99%61nZPUYf z=;5T%F*GOsz*XAt$E?PuH0PPB!*V%p-<|n>8x917kNj?Y2D5e#qFY>X=20{wI%!Vw zOP?Y9J$BW$es*?&CzG(~e6LC5$}*J~IaXeiHV`VZ9d$+C{UwcT8`OExk!Ln&@CNzz zJXgfzJvzZj6zV`lJe4o{LOn!;L-28Qu%EUigHqJbL7-U-Oqf!JCoiijFFI-D;391b zZ0ZEe^z1ymLZ-AiQ**Aqn-6E(&g^dNw3F?%_H=W<%_CE|-wRxrEGS!r!upJ9gmD#^W5j!%GkvL8aSt_J8YU=19S2Vxa1Xo;&TNSQ0D>xC-XgNAd~u}d-j^s;IUnU_U7y22GdW$lWFH> zY~RjEKOMeRpUEZbPgmO)zWBL(`r`c$?qqePC%3F^AGP`MO8e4qWvK-+^No0=lCoR50+AT7JgU5#Z|)beu;QjCXe1;_hZ! z-r8;pM|*7nT{n5K*=7#6+tM5~1U^5R;nr;zkWEsTt%IYs4K5F%(cG2g_CNZ!|7ts# z9JjCk{s(QFu!Zrm3XKa+gl|b!ZHKIjGPf-cD@OoqrK-&rUTq#c;dkTNSuVMB1-I#P zl8113fqQq~#{SSBzR{*|hsXhX?@0!xPuh4qXqOg-q(hdc4%*i0y>_tsw9O5mheCkF zKx+7#cH52`6{0J0s9@VA!{6b!YwS%3T#}-#y)98FQ8Zbzp zB;jt`W+yJzuYP@0-UGjNs=Na~uue_-EnsRjz0h9x_>Z-TV>+{bC!F>vlR)T1^_Ua= z9{Dcrw9X(Hag+zzT3mFUHi&rO&yz{=c`Rb@VDGMN@3nXD+-bl0yI*g6@aqV>=D_SA zrUQ5m!pRS(e%q5gudd9upa1!v%z*yZ=7V;6xJOhcJSdMV0|nqLlQQnC6Yrkx5gO_T zU)Yv+`0SJH%Wnzk_&MLu{X558hk#DoiZ3-YO~`-j7$fSL5N96FT2+X6L{m9{!aL} zqXm!YCx}-0S=6TfrF+8b`6E6!5EnVcN&M#-hzu7c)X{-l2d7;K=|rJzblLylj&y$N zNj~yf+-nyWZ7r5~$cA})gkr+TSeRzfwW4J!-{0OqWKP?a%Zs!pLb%?DGjBSPgjd9+ZMVw&vp%Z_*pF{U_Gg6duOw)K7HI4 z78l#%_3PM%D{cAGO1ty^`|a+7J8d``w?p9HIhtr2yVw)txlTLv;K|dr;V%D;t+s|u zxXShT>7%xZe)h;fcm6+k@UVS!?_RtA@B#iCu@PMuuuAx&M~~X$$4}el)>bC+v=g*Z z{ARGNrYLg&&&96O*bf=l-=*K6&XC6mk4*+G)gm=$BTl%1-1tSmXj4lGLeT*=@LC>8 zA)O_LGGm%z79;acpmAje`G>r0GsF9#v;O>_!XF+?bmYW5lnY*yzWNEi2^0JvW09`B zF^vPqCxGS1{LneaFgMU(Um*VjEH10SySt_YV`*{N zUVZt6_E&%PueK}Ko}+}*wzu`9t=)e>r(m}o!W&uS4G(%`{oXDzO1hjdMBU_lPl}kD zrabDFPmiSj!dMHZj5EVO@6Ac9;K4Xt^yi)E)lR5EL2L|cCqI+1xv|=wK7Igi9=EOC z9r^?F?NguoTpKRaFH$CHZ#-G*ud38ac# zXW>DP=B_a$`7UD*yl23QwBp#K@9o1mL8p9cv%6?Uesi;hJdzU{m3QJt7kHVU17lp` zC*wN~xS^AkH_K4(IQXV+bo}M|^;_-A@=9CVTx)ml+^3(qm(PGr(*H00)^Q7Qv(=lA9zAUDzWZ+A+}YVc zw>;(ADB0&HADXA-WRneT)GRtAI^4Qy-_uWg`Q_W~P5P{K55UPGeLeMSjc?fonI*l_MB>8J?jC&j z@NxUK-}r6dIL<;pI^{VLMwBI#9e(OKG{_#hDO(H85hw{~z$XAgL_|cP| zNO`=6&e_;(d!)@LGUu@|wJDGnC&86ZZ6N#2PQd%Ar~~-U$xPerIrvo`b+OG6x(gvL zxwM}?H$wJDkc7$!+unXA*0$ExVH4;8O|*OcY~{kGYxxZ0*6I`N7sve2Mf8o;9m>kRP1*bFER%i4lwI2$J)*cYyZ*_``q_kVrGnpVk z=g!b~&MwR7NqC;N;h4U$ryit_LLr&Nr;Z2v+QKKaaZ6dum`{zwUiCBrcM>~h1H8n| zr%k0bzvzD8@j1MqKUe%5)9v6aUY71WcKqv=x|lZnSC2l&R?N7i{8#)^`P+UcVW0Bc znZ|5FGXlhC;-imB1}l!KO8;NDXY&c~8mhEXY#a(~(eRuOQ z|Fi?@&dxV{jSOf0qw_0WgX^0OfxSaM#V#hh{?~DJQ7lK>lE_#Ej zNJ37w{{>OvoT}dpB$=NM)la-BXS%d`xN{!sT+f5Sd6&1{#eUET1k@y zE#V*G;GOibfs)=jm@h%VpM{Ftwb6T5umG#NhD46%mwv3ONpF^&v+t&7Gbj19k zC#Ox<4rg?rC#yIy9-Wlx`LF5}4tNIVLKX0y#Y5`lJM|}@go$h11s=_>E}ivCymLn; zWNg;uq>K2GOU$=$6=!+=0vi9!Ch}7FgKH|1xdkq6MmGO06F8OE9tQCU!HmLtqICK| znuI>N_b|$r^#^vp438xa@rDTAl^jM~N~eHQ21h+pG8&W>Wsb^_u!fHs4hJ2L*CaX4=DJt9-^ z^dWYn3xSVmq_#J)?|66e|GoF_w(q_FL0eth%DjM^uEcLXf9%+f^a)=8v%)~W);aUP z$b)u6eX5!F=|RmK&mt4H-?V`~r>(_aDBb2o--NP?cA^4YQAGOgwvW_N{bP|*#b5lS z0XWIxXl$dtuar6Ww7=q+6!u}hk5*M~GW0(;!@cXmlb!(ZlRtXxs}ywG$<2J19OPLL zS!XcQP8NST{v(seJlpxQ@y}`^3dg%cegPqSIWe7lv>5>Q3_^JN8)oy#29^& zpD>>lT*FfV?gc!LB7-Xf6)G}_@hV!W`RAC!~&ht1CzQ!(hf$ zQRX;vo?T_Bk>@U3cW7sK{}}{TVUj-gE??8x*@!`A;lNZhuu&CW1wc6J9w2@9DRmR* z5~4b>px%v}28lYPv4N39g3KV!P3CoK2RwM!Ps|0WJSzy5zYUEHA(M#zlQReRtT*An zRWOj&xDqAM>F}3Po`EYRNKfJhCjP2kbPH+HC1r>f#d0A&Nvv_$W^#?!d8{aK+2BCD|?(SW#5AU}B z%m3~F+WyXe`L|O3{OAUa+9KII@i*6Q+_>4UUcc2|fAwSS-}<+HwSD%ppUXu5`o`0C z|KY<-Hv4Tj7r1OYpt8LY8bW&bMFlY3hTYNXPHwp?Uj7)525mf?pjXhs8jKhB6P@~%DnI^Kc}%1DJq@*ovZHV zWsv;I18*fabtzr7i$EAvUUtHvv$&n{dnxm6JnxZU13Ej~SrzTX^qB)Q(gE1&I7;;k zc-!6yZRQs|vJm3~Smh^=aP%GphtjYP^;rWHF3MD8tD>oaMzRBvtXQQ^13F#ul#|^q z_;8@*>Dp%7r*nAwxmVgNue?TMaNM3geVk7M!~mvdgCQ_X4cfyeYwcfs>z#HCjL*M( zyI`Op^%z-qXG>%2<^a(4sw>mx+X%jS;&=Axe2uPNZu6HewQcxg>Bg-#HJE8ThjiL# zd_2c>PM{Hv#eezvP3Jb0&EBY_Ytn`+jJLmIeoEQ^Cf$s+L z8aPtc)d#M8Q+In}fRjuP-W?tsw7tEZ?6%3tCFC?ngEbSm&B6on=vS;s?+iAnFKO_y zQda-KQF_;R8}86XUWJC`C)mD2EBUqKCvh*#RgOpEVg$&qq;c?XZ)cwd&ZCFxJD^Ui z9PsRC9^t~G4jb$N8S+wR@H8+hJ+>#eNjf3muh z6FipZhwa9dtL-xUI2=y3`SGZ&Ky!~DS{To_MR)o8i47X{dH8&O$lrr`D&X$52}{|GCI$;%9c`CeHGqW=sQ4JDgpFvwW{>vTj5s{r%}EHup%d@O6@Gh%$8A&o2j7P-1UQ*)f8x*oO8fLr|7`m` z48MEG`}~z_#0Q7q#KlC~DB&YOlZPFM(Y{IN23+Z?DYUax#%+vs^hA9-R-WtRmNeQF z_0FZNDM!>fr+N^7dPdvINd`|Q+0Ba1;p|jfSy>?8U1)Vc$Lmqs+k66ls5|6yVRSGa z7@$i(sTmz1eEcF4Miyo(B#7K?Lwrpv+fUlu$fObSpE%@`4reN7xWZfSsk40GGq9R} z;$;#VcvQ>28)7nlmV>jL(gB6BS@8xlg#)Bban7l8;x(YD{uX1JKaLJIDkm}3|wK`I?&)GY9^841E-!d+C|%t z0aWDFN^5Tf4f2KggZo3`YMeiz&G^PU@3yah`#bGqayCcy?vZCc2NA=kCSi~Ex9Rup zLHqgkXMXt?+QPzg+uL4)|MoK}oP~3gEgj(`XfHmk8_%Id)%nzc1aY1^a}(!0cZd;4 z{`L6lT^II4uBU_H$>fKeVGbgwyNyQ^ToyV8 z^va~BxC^i7ha3}-Zwy0UPdZ7v?4%X#knNABmWZ#Z$vN)b;%5A`7v%Sx3IP8gfZ?x9 z{HhCPhS~8yTE5n1Jo10eox?NfUy(Vq2ll`qo{K)k_k~2C3+RGV0kiXL{9ayzbCXBr z@OMB{SuzfIa|ps>K-7yZl9HNW2^CNg*RtL*@P;=TgKiSSuqy;pkUaQD$P$7!dm=q zF1GRF&vy3ENh8Xh#QvMX?n5w;X>>gD<>%y1Cyv4weo}4`d-3YEOYN6_@fX{l`g8vS zGD2URuD89djkY|-{!W6FZ4~RFOo|IP1jJtB zr=H8eZB@4VHP+>s_(!`(ZD(sUyW!Wdd;CPlj0=B2&$Kgm+p(WRY-blX z%a=#+xI51669|WkwcLr~K$KR?d~ftztoOS z{3O~=`XD(K(sD#w2S+j`&wRo_I4Co^kDq;@vSaY0Wc%ZR1wC)Sb01sofc}-aImQ&Y zdCqtX<@l{pPmFo;Xf-?O?1#BicyAZFXG4l7jm>6}l6;T_-SEqvHX<}qK4#}!D7V;N zeBm~tcbGDjxAd27b127wNcmS7QZwufUHSsRWZ%2wz?$k(yU?;;>mE4?9jtHX&4c`x z|AZNZ0*FBzo}B=VAMery6Au1(R8mM?8n(m!zkokBKu z*4vBMo@+1CpE%gr^H49Lqa1aO&nbX%N^U!#>Xsm1a0&Nb@$*brOd<6Ww6#qtyRJfs3Y_vLDiGg@&i9{AUo`tC@LmJzixC*QB zR`a49i^|Uc5og@~Ug`b(RGy3^&Ik{?L1*!szLm0OyQ)n;~`JNoOi2pqO&bcg3a8anHq~k%_7M_6QrW-^WIA_oSwa{QEdF-RpXTX(ywWzSSowmQFwhT41Zqrv8V@>Wk1@L#CsD9^?kuRURrit)MP~y0;$k z|5W}Xo;(}A?c}_;H(-CCt6*VNCl_y(zwcccKj4a=FT%bro^5VD)UAU*c6#T+zsduy zzw^X%-M`;edh4y05n!M7^}RnD{Gz^9j*9}468Wo)>%G#EA3R--{K!+iRjgD3pIKtEK^4YY&XNlQ4Z#~SPccO&Wx`LzlL1*z5 z0!SeoX~^CZ&66G$PgRdh+6fcRyO(LSuT7`z+?OLx%SPaGgC%$IBhBg}JvMhgzN(XM zc@*i?TM()YvlA}+p4tm#Lzs^Xm+facL}LQ$RxlQ9IaLX{us((r9LBXxvc5WC5q=Bw zt&(^D)Rp3k_#paOav3bGq=BwrB!A&bqik6kSLX}?z02Eh=_o8k_j5-Y0Ea+$zai`KY_?#6D9_SZczh9=ghlySKVg-26)$y3 z-p1$7Wtrv74}a}M-sGFAgZY#kBp~U#bX~^%X4{I-vpmkTD#`_DC6hc`q-7*s;j5mJ zD*|&$MCVso^G?G4^8&u=UwE*;Wy*rjC!PNz{}xsS#;@}8?t39V`GBv|rItzj)Wf9c zdlPi}B@6hi4|UZh%|--Z@FXVTICfLiVSQ@Cv5BqC?KYZoY<#+Tva=ib9B0rj);_mh z!dZP3<;{M&{S6m&dh)YlG}V@GZEd#?KDyK1=X&($Nt=Q83k#!+Lni?Oxa zwJpd$Lg`#$&4c7qW7z827LIMyr+TXWb8(DgS#@bsR2yU49Qn7NAjJ6-X`23&bhjSB zT64SHZ8y~qlTUdaP6v=@2dh1%P2bp9J@=KmeAFnZ_pAA-=*Q$*4^#3a-FgptqWM1&8=$FfMo!ucqc*lYH&0+kh04#Xumg)GUVA5i`%`BRwoDZwXBUGcB(G8^A@dDTnITOFXGLS1~15 zyuBk7vv!c-Bl1Odx$L;k(+FK^`^U@eN51e!X?R|Y;!8&X7(7D5W925~g)|7=KqWNj z8}S$(9WI6L-ou9&%De4<|9|-R+kf?6)#U$hc(qN<4%+_7LB8ML+FT6VpZtlRZhz@7 z|4-U$ufG|k@!-+@w!gcFupMNO(N)A5Xwawzmdi_5+Tv)wEiH`Om`=sw!aPa}109(B zcJp}NBm3bijch9~#MYlDh5QhhRhaJLPQwiCf*&B~miY3t6Zs}mx2GYLF4WbIzt4T# zC>sdiJu;-PMB^E+#K|QsDL1>i(qI@T9BCx5%4McB)H$vMe^*4C&Ovfz$%#n6T{s$! zQMmi*^q5Z?OtPJsbs%^Od>yv^{arc?bc}c&j_mLZi>D`Q(!Q7d0~jHvX(*d2Q{+vV zbj4NZj5;{jJ4^%NcUT5=ta40Rr+?I^Tf4h$XL~>S4aej5{L3%2mtT3kZLIIK@4WL4 z{6iyhHq-VG&)Oj!A-~JIy85`i^Zo~Ing-$ZkH13RL-L+!*RNmaKHau9*4p0Iep`R^ zkn)e)(&gnG1^Umw_Pg!M^EcY7Z+*P2JpV#FqcgX@z11E(UeEVp)lZ)CFiRag+8mY$ ze*9kDAu_t}>NWKTxI`MlQw{_<2rLzxPz*AqXQxN>a#B%wu*0cN8qj&n@rlvnW#k1v z9%o2$8%;dZiIbLo-bBr2<0YTWf$MzkMEwS=GJp%6#JTz{E8fW8K{Xe;$jchtHlE?v z?gFUrqq10fT;3{vG`fCp$CRYG>gF#^t{@%odt4&Pk5wA7xvbvpx1JDaO* zaX8zahsG<5=pFFoVh4}&(a>;^U=CbJ6BiY@(=crY{N~aAp2*-t`yBKdkakEqkMhsw z6_gElGqMjX;@XzX1?$SAGODrQq61KZCv=CVU^qDS(++__n)ptAO{-S((*z#(H!ClI zOPe)~cva zGr+y}+0VDvKJl40zi_3UAkXVtJMDuHK4|aXyWigb#y4BLae_~;=eYn2WUiri)+yDIE`}f*AkDj)F`di;nF}3mH z5;a1X9Ks*ut088)(d%GWc2?V|yaF)JcXbv|(KYE%Lbpu#^V1-d$EjG}%O(J?@`<#~ zWt%DgV;>z&G+452?`*}U^5l{0SC+_Q54@uvcAmDawR`XZ^tD4X;lNA{;MlQLRwuAe zs`Fu48kc2%^ql~THz+ty93CQ)q|vUCzU8=6KWm~h)K4ubs|hn6_KPSHM>PR!UX-bR z;h%A9Rl8T&1*R3U(huq`^Aw%}O3ARyn#ebfZGym>Hc{f#AJtBkN406b(}6KfM~lE< zkZ>m(89f3PUWW;3Lq^$odF!Q*xAF5IZ--B|&?QBG2l!D)8Gx%}XOBqXf*5#%wrhfR zUwI9T;^5fwqzU<3Ch2$gPuey*>j`xI+VB0V_PvkpwlnnXV15Ck9=uQwC4#NbSCx zw{EsuFTRA$hYWbK3;3RNa28$YBDe5lY#(h_dmYl9`YRjc?M|so9->#gs{^H}wq_O& zq)@IfEOg+S#163D!bS|zK=a5Na8t&~(JpN!wuG=Gd^X<&H$UwQzh6yh;puz~hZr`z`KQQJk{oxE^B%)xOx+)f;B@9nou+Mhjal0$dmZM$n3djea- z{GYBp$u2+b6el0kf=|tX584(_#qqdd`Va1Q-aoM3`#Dw5I@JU(`~;5EACw2IxAK=u z6c;Tkj_-ua8lnOmR2-c&X^k5#!&F^4g9H~JmpD-H6iHW?F9PDouBfzHk$*tPV(;4 z=kfS@ca}JL{@nA|+siM%*#7Nb{nfT|28!Sp>rr^NUh=wpYdx%A(bQd$E=ZS8g>Pqjqdj`?I45Jc zQ%$|4uKMI>zR;$}qjs>lhP~~s+5`A}C%kR@RJfT)pUpuiW%Zak1dsA=H@KxNI_PHk zHK=EQPMb>lXk*fb@obxFTb9Wn;LYM8s^tWcmy>1MSDp~5y!ojW`zPiv?n7S6t_9`N zQV%BrfmO;Q4JSE?{s$oS&<>ws@tz_TsDU2;J(!8F4P3 zsbg)MN@q%Y6+ppRrvO+d>)~B^g;;Qv9pDo#TrDy~=bs!vBR`)2-%cL1FaE;M!Zh&9 z=Hv7)%tKo~@xf0P+VCzq)EAp)5hlD6lfTN>I(s*oaeXfM`-y)UDRJyO zBpYrr?BdxWZS$U=Q`-su4`)ZsjU72^z6Sslj(_p}H0X!p6)Yx9G7u2JksYd#z=($^cc z_0`91mU3O-Gd(+K3(J?N@F{wGoBBIxkNkqCEP|vyGuWNlCzh8Jgn0IpLr==g=jzZc zC3E1e>mOu-g>u!S8NWkkyRkz3lW{f2J2Ih%KW*6F?tc2w=I5e@8OMkCWdedelCnO& zw1jRRV8a}em-M$CaGb<3PW!c`Yt)-)Ncf@jWX}hYSCP)mPk+dB{EriCZ9e6#_(+-K z#xv1q8|ephZGp&4|A)Lw_KlxxOhB+N{5PR_#hZ8?Jk@`RZ^p;_^cOr1h(vf+?tb{W z53e*?kdXA2!`0~rap>Kkyo-wviwawj^NT!;Ks%U8zXB+i3%j0mnGj6!z6I13CIdq7 zVjDmt=q?Ocg{3J-Q}U*sE*jYH>+Paicmq#SruB-TAjLgSw zBD|-w@YyyAkGxfW@{T;xH6Gc^k-;T#%~t!s&6ugPHUxKyFn01x(E`z>>)TZIqjVH@7Xzt#rEc+3GQg21 z@oN!7rSI{Zx)_6Jaglx>@yz1@-Dg%V6y2d;boug1yMFUV%JK7+*KS-Ro#STO#uMle z`(r{w?TU?Zr39bxixcn+M00l_(-=G!yk25QSa}K!3@6tDHO{cQClY^ErWs`(BpDXn zq>8eG zfL_=_q~%`uS!dqeKw~~;j-%?h%FzFgN5pk}nK}wH{^BbOw0Zz?hu=uj+i~JC{XU;b zmvV&JGOI?!>FQwN^+0^?;=hY!89jYOkatlWY>-Zx=~7-Id6gf?EqN91c-KOA`w`g1 z*?2~NT^BR z=SaL2Jr29syHpDpU>XqQTc%9iFR-5i=V3#08~6)wHwX?=$sw7 zLWs_)okk0TBEZrCBEnI6B#I(S|J2T&O1t36a}=iUrQFU=N(q~{M2b=;POa1eCV;cj zD$!v5tR@H@rz)LG#bhXi$ar*@lSe8}T6br`5~PH1AUu;;A(Rs<5YCA&p0m%%pIKYth&un_vD)Ii}GN8F4j;P%OqLBO8oelKSGg?xpW8|VprOT zXNA_h6nyJv&cLn$6MQbkKT6U82|T0R5>Nu)Uvc`S0qgv{1X3rwg!i!KG=fQlW@ZE| z9^Xg#Q=_)L{AzpklRrg*AQ zw~yYx)BgH@{2#af^gsQ73jm$#TimCzTWNZBwq3eNDB=EXo%p} zvvQ^aEafOcM3W|SQI`MuX*qzqWA2%J;*X-bx=StN<5c3(Oejbfxs%Af3w)KPzrx9V z19R^1!|CgjMsW=&%j@QFl8(CYIx)Vyu#DeX+uzxY@nd7+x8Sq0$UJN~y=wK+l`yI+RoN)zUw)h8$(-n_Kc~^V%tC5ZeRPY-)(Dj7O&j66`eDQ z(c|v;%PW^NnL3)8P3LZZcPpnyEG^Ep)z$U3c=>WVj;jX;?ST3p%}uut?%ZonHn-uk zg$z#Fp_7(QGG#%BMyV5c;Kcz%m1ynWuGQGt@esOV<@=lo$lB;_bFXSbj zb>SyUh_T5}x`n5SSgX$TBaz0F`TNG*MH@~QtZuHiCr?(J-{z~G@umqg)J43=J@m{% z20H#EB*+2K8Z^z3$HQW_syo@5wRg~_j&{Ko50O3j$Q{Ztl!+5C$jcI>g}O?;pv&%aDkKlnyrsi$ z2gG(u<8m}N$|41M+q|Wp`Pf>SuX!auE)68*Sec~gwzjs`e(Se?yM1*3UKU8GrZ=JC zCcNyoaQ!5}8M<<~c&W|to=GR@lu2^*BDqlx@MMt@9g+RxHX!e@JOB6g+GuOLt!(eM zh27n@1T1szh(@00JlO$0p|g1P!V00Yuo>E?8JRn|eyvtqiGl>Zf)LErV zwKe!3TcHN=bIJyI%ZgEWu5f7@WkuenBL(TV_jXi=*d!BeX@U0ObQdPtX-E63ZGZa_ zxYZV@T_NI7*>Z3^aMlQb0K;d{&d2a3S0~5i5}7&>Mvf2niX zbm|JqfEUF<#UTtO`7CZ?eCNIV%E?ReHgyLFy66w^Q*E_A%3Z-PJ_REkGXJ>SZuk;h zlQwbuOJ`EYi*=)%bV$)P)iHp;_{Tn_4nEJK_YY4d+l#M$y3NerXcIdJS%{(m-{7Gf z9V+a|u=*woV4zd%4)`{*W*v!V{-QIs4B68Hh3`G`|H10h_Um8!&Gu+@qfHIwX=jFQ z4?x6=tLdkp(IM^F>4``E@3j|SxY>T?&;2p%jyw3%p1OV9d&*Pbp` z+F)p+PO#H>EIxpB(hi|^#lodFJ6dj2OP9#QNhau~EFt`^?ibD)cmz&g%qaQtk8u8Q zrR!}6@gyXkUDRqv)YtNBZ)d41YQl}SQpTiY?Fn^z+EZ~zdhG=JI??&buv04-Ol2+ zD8uk7rIVMmQU;}o@9D?T*R*d~lZq$Vp%$8=EAvy=QU87?y}H&Nb1npM5N7q!leWFF z-Y&VjZZv4uZavqIk-3jP`lxN}pR~309r_ZK>Earo@pJ+OT2b+2FmWgSVXCPvIHQXD=`gy1^%P_FfCLx_YWR)HT7UdY0HSGG&GK#!V7ih;8J+n%~$BhdFT zUOsaIDV0&-jVyE5&%SQKYZB{xMp=}HeY?x$rztk*qxdNcKd)w=_vKe!X*Xa0808$b z?aj?@VXC}Zye;2^k29!EUBPReKBy9xCeJbQ0yr^S^&A~f&7= z3)`Dn`VzX>AmWtgA#J}4N20fglSM|h6hZwC-vP;cMZg#qd{CnLq9 zCj-pRp0#V&R@!s7u4Om#!QmEsGvBUUT51c6>Y{~G8x$PA=wv06bI>3s)Zpqg0ow!H z9TG$at*_~LR95o9WA`gpHbs28`Z*DGzEhiDM32A>h6{&-+?2C!UwAlbfAjDBZTenE zZM1x;End3X*0GP=2;c<6B<&~UXuF#`ZE1GcKKc6FnN*!nzeAslEu=XoM++C8=+eAY zdCF`(oYN_H<-_ObfvS;p^qm$YG+W<#)c(jH|Hal8XWP;GWAH_a=oJ3stE4hNGxw>- zlThm_yqpp0UD*s`;yORlmF~N6>V@*}uZ#C*op14rO{eRHvd`~=emvAYPM+WvCeevM zZoF4HJbMTB8iT2F@&nH`rrl8{{V^tViQ{_-F>5=dOEnBesH@VRpAR;CYV__5_02HbCcT7kF&33wRs{ zlKwd{i26A8;D$OE3`|bWqTgIN1s=ee{cTScaGbz?j(D%r=Bz6=9=NnWBu$;T_sWC? z_%+-z_QVyLMgPqJ7dneT%64oSn-IKOmNG2OT=-^LnQS0lKVfX%y%TPoG!R#@e(d+f z_Jh`0z)q&pw_^bF@;x>VvS&~xsihZT;i*n8@`)<)^iLNlAfL1mCJb{2Y#w7JbE%_h zTz@j-IYum}onPs~rIbZG%|qHM-=DbxA9-br3%VtpK0IX1Cut~6y(kZb&&}Vw$v2@r zOkTSy|bEoFs)En}q8N8I3-aRl-zocH)K9JoxXzZnXj zeTNimnfOY~(7LaqVYKMduFFpt4D%8v`b7pqf9)E_zT^Y!kC~Zl&Qn)N%AMVV_W1Fm z_Qo5pw=r$CcIqPib$QDESo%BiYgY!@arfe%FbGlsj|(T}o`}48lXLUmJg83Lw&3;Z z>6bjEd+LloY2`jDY^HIRm-dB>Q&#F2jB@XAl3z+MKgS=XGk6p;1!u)cV$1N)qKcaf z7M(QUJz!W0@wE$rV|cdm=+Zht;XSrT$x`9XG|4qTWj66llK)lS1r7`*)6`QwDEnKN zWcVj85rZF|3oeqvo3f$Li5sU~W%@3z&&2P4KJzbS1;L4{GpzCnTs_|VV4U97)?TDZ z`HtO?pEz&~Oq$7u_FFR6NTfyj)nY0qmJ@$-Yn%3o_KtpoCj>a&ySeS>rX8241$%pY zJ2_=)bA7X|bFDsIYwK&7se5P)=7WbUE1maVQxfqp{wI`&CkBlw&g?&`3$)%JQt3F zGs}VZtOGvrOCJVztS-T?KSjiLR6jpe?m}$mlky1w!i#j$oZKZ-3NT;$GvpC{**nWW zzwS+a{?kieJw7?UO@0ByyOXddM;I6<-t7otN*$S*1awdgb2-vV=EmvLS|h&PeTYN0o|1%uqtN(pH6u;WcsO3Go072q%lt$IT&B@bp=>4UkQXe z;v}93aCeN~{I9zMj}Qq$s_@*ASCc6=1P%(@Il_+y!Op6?tz-icmDL;*_-Q}|!CsUn z7-twsQPwIV@0Bi%h4ArTZFQFKERx2%f~ZBQG60r{tA>n9qS>H*7VkKq_o58aZkAAKY)NPgdLC{F{HX z{fGbI|2^T8lcO?FhI zEKU^o?k+C}mEz7S4cBt1(+Q)wsG;u-o^P@42AVfiYr4N@mM=ytK(*F%7U`# zq8pW`@XFursFGJtJ$?@yY8)w}PMpcpcHU}11$oYBEbQGXMY?*fW@x$UD+_clIi-}uHy zZGXJbKKtp+5>JoJ*j(VjqbV6V-?AM*#4v9aC8o7-(+XQz#K4%#Bf8=((o&=CXRI)nBT zZLhV%y$p&R`fXj{J_cV$UKn7T#F@Wwt9{~&UuvKDiLbQZ|LA`EyZ`7n+dekTV6+S@ z4www6&rI9jI||L!P3c&H$Kw-tEHK*!s-JDMsSz|Vj+2)$s0mUh*5WAemO4QX>B93A z^5y3nv^RWCTS`NZdH770hsO@K+tJPw(C#1)mk$5H3LiQc zu7M}L)Cpk>VD>2ZGA0cp3C}SKfiEJ&lQNng2?-@F@~RD{B0x9T-dFo>8ZN1+j8MMi zr5s>S#~7K67Em6WI$d13n8t-73V@e&j%En{v^63(>L2nImUK|Dr?jCaPDiBisABjj zC$<2&dP=OEY0J_kTI30xj!D9hh&FM8K8Iz~S!wDOae~eP5Bun*_1zsu^V>JR z^G^F4f9LPE>CvdoxlnFq*w)q$2y)PG9<(@7^AvtT{!aGVkACU1?dN~)EA7GEJG5)y z@9+pcc1VeXHX79TJu{&jVi*3;PP8LvXW=XHq7JKxe`zXKrB%_s9z|5)yr)Y7)LZeF z*QK?WekRi2sOF(YGXGAeOZ&)2xv-g$8GfB; zTf3X>(Sv*K-S@xM*0$HdRcp(a7Te$wcvJU~#z~koQr@AFc*p>b)c_dloke2&6l2@i zi7Dw?_ITK)ht1^B)`@=G6#kKhi#)5tg=|58S2a~S7u|T05B(P2B!N{$-E@Pl@Bq5` zSov_V&m6jWG;VWCm)hiT9#a(G0cl@U99vxlV8EqQjs)=(afgGGZ&LEh(RA=dU(;45~Xb&FWZ6CaUKc^hc z4;EvK4d>?R#A?TZJNui|buc@`rd%YC>bN?Z9$gb*@cpj-9r0X{pV>db=&sVRx|L_uu(e`|#058;wU`&c3KS4;ah;%aA^R9@uS{u<%pqrM9d-Mg?e`&a65;W~l0z|+~OgY2ZiUmhu^ zu11HrU?r^7=V#i=zb@m3)6d_%*SqP|k(O=VxYWf9f3NVZapYs)E8oCq9?kwz0Jyl5 zUl4)o2c+U>wic?F`6h2H)0X70$d&$Er4yM^p|%=G5^TDyDiPFq`BZI2#3ZdWc}ZD0J-k7rTZKDL&FN*-V7 z;u`C3dm^vdzB&10-D3w@AZ?#6CpX9|lccd^Xcv0~VC=(bABo#NWZ1D22LjDcytxD3 z(?_Cjk>A~2^+cVR_MLBkCvD!#ufEWRo?b?U55QZ#Hw#^2he%Ux$jFHcH_@#;=9Cbg z)v|Jg&dI4N*9FgnV-rwhTUZ#j%PT8wOxyg8Z+^Y)>~H1sUdpt#?)(`04*s%_qD`E7 zq4Qic>S7&t7pb@LqI`M%B<)R33JO0O2H!|C^wMDH7>Dv$r<2Uhd4%b4dvN~=_>foa zdkoqDyUwF`4^KAQ*3M(}*f#oTJACZ=#NlAvmX;UWGVRLx`W7@fX-o7~$IHWg9v~(L>5{n5p<61I%9Or^t!YS~^hY zjA_}%A|<91@93KB>P7d!%)p=xHgZQ_B`^lwu> za3x#reLlQW;fnu^U(%6%*&EaqJY)?7Ha~{U0jPz?aue3?X9-w=XuagOW2)@i%V^G5q&%uMNT~d^vH!eevSfuS2xRp zBXm?gw}Gy6^54@!9Ul`Mk>;WuzY*-hLQh{5SLz2B^l0bgBtQD0)1DAOeUBU~qi;AI zU7}wPykyfa5`$5LFUNkw-|-Q8k@D1so?dGI>Ja#kkbm1j$Kz_U9-2WS#~Q5v{=p7q zA4W&}-C~Kqw_m5b%uLq=#$aX+d(A}>)dr}qkAQ!2+WstcP$y!0$Uhmkusx+57rCpW z9gh$$Cq*;Ki+{=wUuw^l-K4IK9#2i>3*v!4^G!T-kHPk|jG-Pkp8ba~oBcxKXA)1W z#Fm%oI{vo3_ex%d_wjhAp7|VG-pwyUz_a{8Jnm_8a&`Xbob$N|=P1Mh`B(XpoIDf| zfGysqj_O%(6uFf5%qDd%moh_rp7FDQv^&6{jZ!et)=(euK`g?- z0`x2R$esw^{8ru7HB>8@SHCMjF0YahD2s=YOL@urS1eQVWfLxsB0@(QO} z?40UYH|azYBA3q6&;DWI&U}SYS;{B%wBghn@Q8CV8DFGK^Vd>#{6M{-NRts)G7n!F z=9o-h|FR398UL%#8Qpav_JC>(cgex$soHsi&qL@C3v=_Z~)1eEQR$ z0JgcDsOu+C)GdzPog9Kwh$>BLE{pofKdEQ&MfgtHF%IQg`h+L%Mdsiy=vCp|!Z*|n zoLj#6c4#1E@aVafzg(v#=Nh>kd!XOZ5~3&ihvQJR>Cm*Z0A zD@>u2myF!W2Y%njzmY%bSGI0!z3{o^lO^#>Maa`;Dt@G$wBl0M%+qqPs|=Tx1*1F; zOvG?=L2yNRvCT-^%Tz6@0)HYsxFy_oaAor9HjPF<_JuwWnEzJ(hM9CvUd$-_sF;Nd&%z4za1 z4<6h_j*n=wXWJruEkC2Hj&i|Y7PEk)^s%6wnH2EdtdTu&mHY4h`S zQuOpJcDtKJiI>j?X&5Q1+L6c$b@vL1Bs56~jb7JI(^94Q5 zG9;Z4a|titBd}Zo9>zC_FcD>h2%(iKVXNc0M6{h&8eW*t_DFksL-bPcGoF0nzVN!$Ufl4XO0cKs=#Y^~Xb2wJE^B#J|we+Sy15 zLKGT4X~AD#d;92j0S8B&&GRMAze<_n;Lx@^0SZx{qCVAu|6ym$VbclW*`cX*F!# zP1Tj}Gxf>@BaMOsyerGgykkV5c#AUN2Lj4RP7s@?N1kqPZG$rn%7V$2()rdSWhi@= z=ZmLx$m_N1-$7S8Nu(2Mzt^ZU&zi{TFLH&KUA3qIFK-+Q1NoIQB<-|EcaGck_DV=t|1^=5PE)yT88CZeF|5=HMASQhOdzzy1W? z8RO?*#E1sR0po*q=cBuAUwRMc+SBz_l}-EfXFdflU#Z0;ysLMtV+N0uBaKh0t{!j_ z(YtkXwQY=fS{-GCDtC=5NvtRFlz;upC?`IA(l=Pe7X->Xc~bbvho92oBBC6@MYuRDZF0Dkgm49tWi0_laVo|BY!d8j%c)P=eWx&u0%R&q!O zG=?Afx-#8w_j?kD-=11rzS71^m)mf0B_}P+E-vTFca`yTqGf6pgMxH6uy&SD(I+XN zdaI+|p_d)|9?wg8>W7s@CmHF4kEYvbepGym%scpymEru^`N(cV!rYl4uVzsJsBpq3 zCoT9)cvciE&%~Dq(lc!U?-|q(M@Gh#u7&4mn#2wNt9)>Cepx0k^mq$RNblVaT-qu1 zt@=BpkZ-s^xK|cqq|BmCWH`^lBZ&GE)3!Oh1F6D$gm*9{FT&FH5hlMmnCWQ%wyl18 zVs0>>39pYn_^`eA?eDhrjkR{Ly_H=QQ&>Wi&{&)~8D;g)luycl+hb_VQvN)=w0Lyf zM%#OBh;CiP*mL54o;Y*pRKM*wt1%T`237}q&_{lfK^{5*;M3VbJD^sVo`1Q`UcT0D zz42E2%m42Gx-GwOH4Ah$(J6;V!{%2vJbHca@PK-#L(14K1AZ!cM?+E?_+I!%M&-%w z(&$uHRhpU92U|Hc2soVV*T+R9l1ILfPKiUf@6uCVKcwE9v^RU`5m%yKy$n7vF#XKR z@!@*gUB3&9Quo=(e0su@1T5E8uVHUsR`W`$(31b^@lMIdTluWUq4V&+?TL52LwM49 z%R86$2QO(CxYCKluR5E>2ia|GIA|~<;Yqk!#*Fz3Cd|LP3$MODn@@B<_bOCpI4-of zs8<|u(}9*JO@}wwOpt9r@<|+SggN=F+-WFl*BaXaznLjd{<+*PKmS(XIo_c&FU+&iYNGU(t#e~wURNggjsD09>7Ju=S3aWt!rD@2H^Xf-+jOR)8G0RZSm6e zHpJF&P+=7tV~$RP0|!pl9`9|yKhT%X!dJfh#rB2Iz1{BL`v}~^o9G@pf=DsE1wUx; zJMgLQ1rGHk+!ehhF3SP4xBN?uFJ2PAd&Y$xa!2UXr~kow(dr^y(WLy$qqiF`^6lfE z!-Q;!BL_~LgmLiGqtX3#`qS05wz{z%n@cttjON>oTQ9cfUwOSP-+G}6EX~1H8zOGNH9fn7k$r5!Zf*>7z$Y(!FP}tq0Rm2$Yv{)UfeMo&}HKSY2RL zJM891#K^EEs`InU2Yxw2sZY}$%`IMPlViX6KSSL}E1gI;6*w)AbSgx7iG0j2&U+`n zu@9}2QRVGmshqfUGBD$Z4*I0iTKde6u5DoT%P3R5)zMD-D+3m3H|(F;nLLhdk_iv= zaKF3UK3=hncAh)n_R&ED%%vI}7F1=#0cCZ8x~7i$L`T8j?n;w4+)Z+Jbk;`5!Oa`j z(1+95&~#+6rJYF4N_MN!>06yPpm4Y|#Xgw!p1Qeg4SCM8UF7NDy0Qy8sGHX;ZGsCH z{5Ibd{f1p^GHZXC&c71nZtL&TR(<2!AA$$?MEpR%3_gNW2g=+nYds~FcE5x9Gj#Or zYenwG5uxJS-GSmclZNII-1x4%$}i=Ycf*7e*h_yBjuOoPAFe+tHGM|@`aXraC%V-% zhVdpp_#cnFOW)jm%s*1dC)7H;lz~D0yY$dbya-2ehq>{C=%wp;j{L$amd|s^2$9Jn z16DpJO;XmqyOg0oVS1h|mfR8#PcC`UyOF4)Jf3IbpTk)9^UE^(`U|6X-UC;k#_+I_ zuv`C&vN^7`!Mm=Sicqx3V1Ei7qSKV*|Z#qDiMMk8XhtJ>s*sJY-_!s{| zn;pT!N1N?nW3`<)*}doPZ9fs^$y?M5{?(qKTT5SMx6O~ov=K|#oU_5t2{Ps+u`+oE zy)p)oa;TDUIv@=ULTuHdK1k>yHC5i8{hX~$Y>KVa?a9+8&CgJXY6rUB{?w=1bD#N4 zJESdgC#pwEXOf5Vif`o^>)G)kg_fW6E}Js8E;8BM0L3|`Rfe@=Cu5^HSQi~deQV*7 zgI9$&`-&d#xw*NS^8EI-pN`VT{gKany1Cm%+Le7);|-_suW&&Z;ptBxGG27DZ~#Qf zB5kGvWZ+DAmBGWKvg>xSF&$L9LjO)VeDd^Z1_CwvJ(bPzs={$O+hFTu{p6DjcuEZ4 z@a!H7PkBdvh$9@1C3!M{G9;XPd-cukr}RNS`rv-s!RGaZF?ZjIMK=}fANuWZC6eZY zevS6RBD!;NVKGg{&c+VI5qV{8qvLu`BS4_jw(Smk3qYJSd~W$lZ3&1hy+ls7&!J&PV`fgw_N4$G`rYUu)m^ z=6h`tSg+i8xoyx_Fl+S-RRYGNOv+9J$L*`P+9Lh+fC-P-XBU8&vy_ts9Vt%_U3V8Y z;pAWH0MF_&I1bds(|BX6%lE>mj`j0PZ+-HOcJ&h4q>~oDSNVPX*s*`8E4VM*_rGVMZuk7GE7){Dn1BCHct9z9o;&rae|QO6 z@)vi;$<@WfrNwXni5oR3&n{lyPwnp#F!`MKLrPcV&GwfkGz#%b*E$*HPWc@DpID@Y z78B@QC;UB$iw+Xa**?!l@Cr22206hF(3XIQw3(fxa)MsoJc_;HqCDE`J@m7m2c#|6 zhLE=$OrIYP(8ur_?Y@)ohxoZD-~>D6OnU^`R8Kg0EPk{{)jytIs4jIOjQzjx6ye%b zD(Stw-RRBggNrx*K=Ks&coKWcu?s4joqgIPGj9H%&bCD^rq^b6VnM!}c3hG;9_8-| zCob9^jnNC_?Six*ma?+LAH9*&YJf52a#z0?*9GtSq!s@Ayl)#qpG!M2x|ccui#8SsagG%r_v9fCtV8rbe(D1*aS+}yj^lfxqxeB*xVX|q z$8#AYe3U{AC%td9Ep?ZUN~$(Qr;BivMyKv3JTRIs+)Ey|Q`W05`v=`cKE1pHJ3Or2 zr0dJe^LZxb*F6)|kHb9c5+1MK-}vH60IP1gs!Kgza?_hS0pt9F z8x+7PakNXv3*+{}^S9IYQm#GG$a3k}k$PrIy7?`PSjuztGk?jixbb;dWk zK%culu#%gfXpm&(`u6wPn8x+ZOTE`WTfg%sBk|C~^NhovQWEL=KuO9SS^UXeaem zR(2xn7}c?#Dl`Dk%B3n4xMc^BCvgE3s%Lk3^m#F%rwB-J>VT6Wykx@4Q{}765&!yz zY4odjHTY8%43mXu3L8+!Ncc(2tb~(k%qzt4sE1-AJ9u`Aa{NC(4IjVf>^9$v$RE31t~K;j6s7jO|*UeAQENFWO4OqeT#@#l(S{AEUA5;u8I7!TGU zN~Y-JZfEs0H7r~ZITHZZop`Ew^N^IQl?%6!AF3Udgw3zSeO%PQd#(%(6@AKTNoR|e|C&OpS3T3 z{ukP4`HjGJcIvmc%s-SgLvF%KIq+6B>fqmrI^eW%cBjL}_C{NOwBG*BU;Wj#vA#~C zv$n8sB?GflGj(0R`ds_W=YFL9+%NoGCLlcme!`V~q1R!%va;A-dj7e#vN%p>+Fj*a zn@@wjDMt!`Woi;bIobk4M7mRc7OW6Qw#^`bI5Ck>z&k~IYwkbO8qWmXg^O;*b+?M^ zU8JV=@IvxYp_tCReZL65NTuta#ie&*5MRlbHoZLcjdpe>Q+Rk)w4b>ukkr#>nRL6f zG-|`aY&u4EM8uCg>;OkUIB(~~0p255Fr&bfmr!3NhGLVaXQ4;%p?mEwcgKBsg#T+w;s`I|HbQ|*I$ z_u9sT$88!q0DwS$zh8UtrS|m6lh(E$wfDaLUiA^BWjV=k1f0L|>%Z2HXh>$}M{RR^zpbzBw97BtZg0H#R*oe9{cnG#%{#zD{ibO& z9EfwG`e@H1b*I`Kdd5zblhrjCr@o-x;M_@RzkB1iNW(|e>5#f-&=`8Ak&X^DNV>^? z%Bxv`6f$vlLkKDb^iysmXg67T+Tkhr%e-8X+2|nA3oOz}9tjVbO6u6isY8peD{lQ) z4_`Rn3GqUgI4Gt*@u)i|$xndI#?D5~)ul?fpKVyVezU#!`p4T#Z-2Vodi`yl-)fg{ z+-ldJd$BEXzjW<-8!lgJ)AM8U1&7qzi6%(W#-yM3$Yu;!cY7bgkA6Q;9PK=Ql)=Oo zp1T%4b5)<;U=RM}{onwwxyy6b(FGYfmM`x-XJW!d8q|%tST$D$N{iw@{mbx<0q%SB zsO^gj)6}DC@4}=;LpqC{;Q5nYYCeybceQeb+g4DPY|mZ)gmVSshLME^zI1WYIWQ2a zJXp7ar%Fx|)8MBeqk&_665qS{07uqWeKr7}ehMXN;Xwy2j`k1R;qDgvvfB=}p0ur} z_u7+>?zN})KWrQK@3ws!t)i>zKDvfV&S8N0ISVJN{H%h<`VUvGw#o5AyYljj?e?1=Z~wi2^RKj9FI;nh zX}f%DrQLk-jkbN*+IK&AzkPJ~qjs>nLwmE`uHC#AgK~g;I=HjmwHJdy?b?!;M5?$E zN7_YBgq7ik{vll6E<;h=iqCG499la0?;=E>V|P#i7YEoD>>=wLo14ILLisc8>LnKd zpd)C1P7YSv_U1kE-^V_Y7M5e#hi#X($eksA*D>vm!VA69ALY51MS(Nu;#Cf*vSWn$ zY8)$(OUIkaH0_#(s8*TwgZ5lMC|+^gWvULyB!Kx5sgfBk937wKs&`oGnfJ;s&;Ilm zsKn{uGjBVTMs&85*9mZ>a{)lw3O#Xo;vFi08+Z+bXiRGhX4kXRpi?6{w71&wt=BRD zh%pj-)s{p)cZIm$1JxLxq>fJXG@zU^s2U( zKXs>U13%fHvCVN%7K7Gx^BL3~jxV>V!D5@78w0!PX`9e#lj$#+JA<5ad=EZ*y7n~g0`lT@#x>V`nou9C!FSE zT6jnK)UWcVPQIHq@vDrU&*jsUCx6Lr@N@Nd%fH|&el=PjZkrW|phYd};@JrBLuAVQ ztCVq+Pj9Fzp02KDBIx$b+wF^A z{;4dW-FdnWA44xNBJUQytd>0IRdp@g@E?9vHkAVhIE`2GDL>bwl6o}yg*Id#+1sTp zbWqIxPWooT&Y!yl-O=OmoEyOCpx*Y@PJ8!z-)pbD{7U=er$5#9_ndr{--HEz0KS5W z@=#Yko0%|0XTVdQ-avrd9vl{*xk%f=dH5AwChip_@iQLN$5>gxcHPhE2-d+JwmaMP zjcoJR&-O{&)`?5>ThJ5YUApw}_3ftpQU^CPK|vbXOMT}6uWrj-Ognq2|KrDg7av_o z{5coPqO)8ep-##1qFnZsRKg46#pd@$cXl`1)5oiAVR?amIQAlTqixu5Fvuq?CxP43 z1Uxk*dS00UA(J2_gWBQ_bgF!--v@|VHgz)T90bd?Iff;S1|k*+T6+C*-z-d<@R`|^)M zDCEPFx70l*#3>KiHELd#ab9-iUkEFoa{arPDT-%$zZ%>8LtLM<{^5-O%!PL!@&~&< zgT9>ZUEbBJCvYd1=MHaqN1m=U6P+)cMCHkW2rPql@|`71Xp(l}ON3 zIkTV5<@kW|2b^)r*U^5PaNOv`Ph%bAnmF@R6?8H3t$lv#&iaEra)sgHcXdJZljB9` zgA?*NKzeHaj^>T=*RMvrjx2k0^82)`ttev2Q?{(}R6<#16cvT`}E8!vS~u z7bls3*hhakMqnM)FPYGb{jNSlhgueOvM*@c<=9P*Eq9l(HU~NqGNosgKD8Bu(l ze3VbaNC)y#=Vrr%I6!w=hIWNxGuFdX8KjwYSFkg-?jj3m3chARN15_;my#{7XL*O`cy;y$@?YV_FBw-Qytfe&IlNr?n5N{s=T*~KE{{0vuR4LV z;syjnSCGM>I1u>#CRd7e;sVYB$vB0DYYa0xF-2j>1(oOdv=c>p{4^$LN8s=IpRv(*D1dwBcFkv`54Zo3DxG1jxJuFn^(6LO)O{EMVi}N?bSElXhURq zXLk?%gf4+@Gqb}Km(!q#TXZb=EZZ~b6I}dXxKEgL@Bx^8%CkK0xD!6&v%n)P{7K1pBo)OL zeNETbGm+~a{n5iJj`Y(;tFn6B*dEzSDVX~7Jd5MVaOnZBo>u~o|3fq2#4o8yUhqDX z&-wKXuH=*W>8x89Cy4Iq9C(T6vrH6SP~}&Ayt{kvvY;VZ6|O229Jr8|c2W89gel=Q zP$-=5&p zf<@tW;X=lWc~J8%@ z9rn7kT;_wxy!0>cA=<^g*Qdrw8SU)e)F(f_@KuO#TTtwTm^cO#NH8A-%t4O~SZEBP z3Zv9ea#4OLpiEXlI{#~PDUa@AjS_G%K!nsg`4P$8fHHh8DsGwi38Kh2&L&R%i{MiCIHsQu4DwkPr@zNpuOAqx>PblN>p+rZZODS!cQ|9Efn0^3)(D zfD-CGMc}g2-521s+zZCHY!Pgboj(VKsegj0BloJWMAQ(}A5Y56LJK>rrpL>$tdb!{ zR&c`zhth!b`PPjMjf`VF;$)Qw1Q1=dGP4wmMWh(sv%7>#d|7M-s4%QY!G2K}!Sda@ z@Q*(`wF%F=I51YxE?G&tyqhLI#F6R6p{@)BKh@x~3-mqC!;ZDbRvw*=+ZTW8&k%Q9 z%ydH9l%>HZjOJ%jxxg~SZSkne&jlRc`|$m?wYt;(H~+i;ZTtG~ew|oGUl90q$=$^B zi)}E!&|Z1O&`IpHPI3}EAnaa+gQ#yT@78lzqFFfCtiMzYE(>B(hW`(cEvG~oe z+L1xo;qAK8Fk@g@)0!aYfCUYygY`~>EmaINSjzpdOVW{;?8N5e?5#?GrOVbGQr7upFO2@Ux08TCInaImO1f_PGX zedb=rL)B_Q!<8!s(lv%myWbr_rryzbGL89?L*zfQ$0b~Luv`&MHiRiNuGDrlv;#dF zxX7@E5OIat@^ZoivNcZ!&z)&b;-1h5-FWFXZSX{Uw6#NrbI`VSw%ek|rp+uNcN1;x z{SVvs?%Z!zuiR)?mKWO6!nkd&yTWXzEzp>p9UQf*E0^1AFTUKq{mpkWndEWJkDoqi z4<0;h$21(*ZeDFyUwpAWe)I(XZf&px48Y@eAW!y==!iM-sNQj5z#wezF-s1hd(F^* z*w|=r*=bQBhzaW^?TRwUdxTo3amuEPZSe9)C@0v}EZHe-1bCeS$&)-fW>KGt=iWdK zFA*lM748fF`t=4z4A2CrA>|b#bLYMEgs)w`(p)~WTw#Z<>Fkq%M{s*`LskTnoiv`B znQJRoueDn*yxNwo-)wWECFlpP=pfHShsBlUw!Ctk@aNle&%e}OdF9pi`p4dASFYV` zGoyK6sHtOE3BGdy%XB*N!fVI(06x34IL?BD0W_<{Usgu_O(N^yTlAN>QgW5oP7e`k z-o4zx&OuQPa@3_ET^$Z_f>&}hJ*I$o;VqpF!?_GAJy(8##(Qwav-s@u%T@8uFX><$ z(@?gP&54(MG-U9T4!kQj`QABi(!lsd|1J&T8eqbt&Ng2Y<{OQ~bvMtYWxqW--^OEf z>Ui9yXUEXZ?}*K}=@Gn4`q|M!8xp>>c&W`$&NR54q|-BpJPyI%5ZRx>NE_3!SVEqL z;CSxju${UK3VF^ziUTI<)^1Wn-Rq>flU=9C&cedwc7!}`0rT3)M0?>!KHFaW!WY_; ziP?5{zk+?MOJ_ z$?Spv4E0h^zn+i9i7*C-yh=CC1MLjMrs1D_x&c~glSmT>9*z;jJq%hqSI1}5Z9E#b zD~luKojM-73s9r>VRX$DILo)mfk!%Pkh(h7cGN-d>U>csu*xBp!*Ge18`n={+~!oTsSk*S&X^zl8QKO&wswu@eD z$81we=51ST(}J_p;%njIA9e0*yN&^b)<7_KRxS0i@5&Yr|;wc zuoG7EjxLcLxmUVs&nDX36m1@Q!ri3Hw4=|z@N&BZzGfGe;eT*29H*|6646N<+6ZYP z8PoylLSe04Ttzd*Ku)P3()Hzrx5cSEXE^#!c2ihQA%A!IL=WPhjvIZ22`3v#FV7ba z&lR>S2bX;e+L5`1725cfHZd5tiP>TJL;B>1Nyz3zOxgtD!9RJLeSUeo`0p*G z=;#st*1ysrj^si90ZCd=L+mm?VWOR%1uJA?IjN;+q$4T&)S}B;c;`X^@vJ_x?!^

9&-fZuuu?r8-jWcayXOqtQ7H!2s zCf&4cW(VDop1@wmY5GMD7l5ko%jP#syGM}J!?9DKo&JaF>tcA=1N#x&T5p|g9g*{W z@fp&164leEPuueHa=UbGDfa69M{DgJY&<_#-~v$xHC)i@iA#Z>tTUJoy@kt@VIq6T znS*5xrn_??{bOL(&U68GuQ&T=^Eeh?AgsdlD_*M=|Jc0V$;*j~^oykrqiLoNyD z7x?^H7UAl4Wxlsxf@gln1;d-$UQ+_%+*l%0wYi)bm@5jTh$fCRdKm71cCX+w) zsZX^RUVDxBlWk{vhkjF?WTc%MJezOmAP$8$$O!(_1F=bg-@D;{{zM(6&d}xil*pm) z^7|1sTHj!+wzc7I++xR$8w|F0%tt~X6T6~%Dr^EEO`vBfP-@NcgL#ZS7-^+Ptwm=0cY#Ml``wmqS1T& zvb$$i{c{%icA(t8Y|oqFHR(fKcfV%9&Ki=v`f7S=mIjZ0E-)H@f4@$5n1Xhzs~c@= zb1QXle9C6loLsP7r;m67fVLt19s1nrXOB0P560s;`lAkNqL0HL^lx3nN-JeQa(|@c8^6{W7N6CNRR!w0Y9} z2%RgwGSPHiPxXv-ta5^-6xAWY|1yH|0bIgVkoX_0#qYn3t5iSa`r`h7x*sSfuO_b4 z{p%**5714v_w?kDI~Qz?$?F(>vE2^PQ!eC^FO)OWX**^x zpPx>dlnrdd(gy-J)3bB%*G$=r!(p3n!T@;WHIq4Jk<$l|b&uG0!cP5>4GQp078)T} z$_5EzpVOIyI7-)R;nPL5c%V>&s8nt1lxkI{ji0ATzYBTZAb4atWR66$}XNL zp04!el>dUecNck(j{FU`j=g`MeF8o;ra>~17#Lh3tGs=_c9Pf2Zs@k-!F2u<_uBVw}FNF{1R>~d`Zp-E3 zu5FVPdU9oFJn8LUSf;#Q_75#ax4F$RBE#^+PU+;vvCKYW%Sf)e4km3j`87rh@Tb0| zCv?Kt<1?p@#;@q!ru_$OW+mom(8hpIog$IsZt*Z#|#kmUu`GxVDq z^I98?hxxqb{?=Ce*c)%Qt2b}9oxSbAs*Z610GT9@;H`ttoYnc}nFwa=V`#3+OXBzS z6+!B=-t~2_x>kzHFH!Sea#Hn7@%maWFTkIV3!(dpM4R&BVhFSqVJzKt9o=Vg7<<}r)l6Tf1yx)O)7`lL8( zR24A|9!9aw>W;LxiGi)`JSY7c7Xi@D2(RPwo{s96X-?_lid|2bd}n*Mv%61!WU|lNL(-oFv4Qh_40d6ag%mZI|7H< zi9;Pd&fof$Q)tCqDFlrr8#*%*cdmA;fu5_n7b#3^9DJ3vHOF&~%s zApXhmY}?s~p%28$e0$@KFSM(-K1+%L6|h|JW5*u>ls|1`WV<+u$TtYVqm#x8pThCp zyYIEX{Wt!L_8x$A%jE`JGJw0wa`Eyc2~R{t!9+C3?h! zz!#3<&iVLX?`*tkqC+__-ygiX{CSoHrL=$e3Q5ezS8_@;{o_H-geNV38eA$8zfGgE z!(=BM!(eW*t@ypl(Oh1ulv)+`$^fI8Xuo!h4hFL{f;3=s zTGLRtat1oZmBuj1Ol+LGnp@t344#x@+f(Z0#E!h^ zQAl&pW;{QP3<=l%@d28;+M!8w^6d1WEibw7Y8+XgI)diVc7z-c_xIb}_HJ9;*=dV= z2W^PoEMX4p%!Dt=$#0Fqw?3z?{G1H6O;*PY<`>%;^07q|^O$zv33_4SrI*_dyzzT? zAGP7-%WeKLMjI7b9GtZ)qsjJ}=a$;|>a}+FqbKdX5AL@0o%QIG19aF8IyvgMot^FI zF9&|oMs~E(&C-sjuI%`MHaVhb0&Lvy6FzZefrXkM0Ga7*FT|=#-tUDogDcD=8lQ1t(5!h^Hy^>{;8`+iiCqKW+c` zH-4k7Z_}PEUuts;W7?a^wuwFAalO{#Xm1TYzR?y26YaTMEA5y5)X!0*r^h^?%zYq7 zPr>u*2~W*|dnjM~P}~D2y5|geEZ5OZXVs-KF>=pEyv~N`ulnv~#VZ^ek5~PC?4E#ml@$5HKZ+~{ZZ zvchaGCDgDgJVq3cQO)<(@prHk1xp_2#`&c-SiIIIusLb8NT`mYj`E_sV&>qXXh~kp zNxzQY2nw!*g?DYJ)G@H@tY5(Jf0Fn~3!S!^`nI2xK5?KNUOYsn8ZUWMt^?gUIZ2tr z?*u%Z(wVFi1H_^JmRa(O@`@dl!7?g-cp%>FgwN4N+fg-5!IE~5_DUO18_hxT(O{e% zBZ9t*4gGZOF({jAH*eic-E5znG(2`fEcRVZnqBl^iigsF@IQLdyLO%a_K!TdO-*OI z47~XcUHS<=7nk}ufW3obWa|kw?4Z5;>Ko{HKjpRC{_#Kg&9=IC)|MAsh*El79E-<8 zU=t1MYme@Bg0OIJp99-7gX{J|v>E81i7W6_{WssafJr;oG~!8p;$n&{=%PK05+SvA zPk31V1YxxpMc#IT%saf2XK~=WJg6OH-j<2K_17O+@f)DwXY)-y-Q6&Cw`CDO$M|>Q zIX&sw%e=W)84log@2w^q)VqP1`lf#1Ad~3C4<0PLE6;R(GdHIe=x^KJ>0K|X9eChM z;*($KXB>j%U+?moWf7vz4sTdz-j!cs6`%9uxltAd_>D{ejI6tG)WLAuPs^E-d_3-`HwT9!=ME>4)qj$Fr@R6c3vs9y~>&gAIQ0dROym17c~ zXh1F6rtX0O3gvP~oEGqs^_kkg!EW8);4ve-LtXMM_! z@|}FF7JJX8%_P80zneecz^?e?GijsH4*v%&Zp_#9$) zdjb>fhj4+9lf(TsP5$bpx8D4CTO7@|t@U+^s$*Mc$Xi>~4Hf=U!2ob^D_C`4N2NyW$;XHTBD7>KjY1`R86M;0^4m0#y2X%LWsPh2yC}g$kMiIlG*cEw*k>*@$md6$ z07u99J^bl>vd-}d>IM&<9-yPPb}~tByG^$`3$~Qk9rWkM`f6~Xj#h_d0~Y0~QcoTXo|U)i)5z+HeRS;KnZYRdNFSR^S@HBt$Jo?CjwN_PQTnLpfI0dw3Rb>V zObPvXuw|9r7=b_Zmtnwgh)w2Lf_!*z;Fyvpi_C&2Y#{LGqJ9@CSciPNj<&@GSJCB^ zZ(CsAs=XX@3?kI^rzgS+y^sm@yk)C1Y#%*_*-aIm?B%$RdM4V?TITpFtq}2rZq0{;61?N&3;XlDU#)_AUkRLyJ@|6jltHxdG{SR`6)!p#^ zl7Fjhsb_h&;MeteUF6x<%S-rrxVo#CC1r*n>rr{@Z(Q@2hl_UKmzK+uD=VGx5*?>a zBQKS1>B9q~&*sIWeRNM0?s0daqkQ+Fzt?l}Ax`1N3kRsCx4qT%I5Rx{$zQmI-};xI z<=O_xpYmGTDB=r6m1R6bc_W^ANHfb9|32eMJ;j^8lmTvY+J>%^!_b;cl%Y~{^cTWw z)*ZXS3hJ(QC|NHIU2&$!r6{M47*)i3^No7&#Rf2xfa7SI9Gl>E&*bT)_Lkuzx05mD#=HtW#Ldbe6|G+=`5e*S_Bv9$&d>dS=X5*bx9lFA)(}vGS$VT&t=@e>r>ux z@577tgu3!gG!t}fd{3h|%BTgGrZ=v&qn+y7zBVP#Nk88mBX--G37Pj52K(Z4>7Pl{WJgGk!wHIf+riwbRYpc_%*$FzKRa@tn3ye1kd4 zv%l8!W%}LV&P{ki9voAC#+^HgT#Jp@5-7J}TB5BFbO84ziFFW;4 zkbK|K&h%aQy{S*Wwe(epbvt}%WHa)SXK@)qcA2HI^4T+WeGXGmA>sn6I+tMo$uu%@ zRmuVrM+FWNl9k?Ey)yT`RzDRIlFG7GFgz=K<}+CjNvkUc#lKhFz!am4E0ZW7KSwJQ zCh{ee+2(uB{OqGxLZ45Jf)cIDw^M2+3XX6V-6I%llo!Iyg|a26xUf#C7a_+mriN8@ z@~vMHIDo*UR;h154eOl^Wskq=1gk(yBP`r`E_~}J9SrB@m1tc&TnyTatc8N`4(~h{ z<*jdpdJ~UgsaH6H8wFQccCO15PqItCp&?i3m*5J^6Miu~c;7$JfE-YBCu_mU#JK&; z&-@vxvPcRHOc-8;-4#q~`NDVd?{R|y4y`ewA3u509zK5DK6wA5_E-MOuU3H=`>yiU zz`k+yxi*WkSX#Q$zVzc?Y%jm^a@*V6Al#KaqA>zuSqjz18m=Z$hc|3UXZ zz$Mv5X6Eawv?fzny3(qcs4{XSZ+j&6WV^Ju)Rq^<$-|ZL`CeZ)fNOe{e0H}$yE2@t zul(z~jb)=lpwg`GEZQ+~pvV&C(X5`zry;(Zyl^Qzk_u9<&R~z!NWU`X)xy7aCSa2J2Ab` zrg>j~_^^HBoo{EN;>Fw7+R}KX?QCsRhKmv06$frEUulmwp30%w;pPtZovq!rxxLdC zM&owlg_~_{XRm$t+wZhJjK>A?odxDuzxPHRGo(YKb|`76hm#KCPwuk+YXg(r+r*s% zAF2>>9^=<%X$Kw0C~*>+OSYexrTw_rF2-hwaJgoEAH(&fS6zi#nnyJq-U^2MMuLd54860%@u`$=y(cez319iSigTjpu z*272Yho8Sv`Gl$Pav9E@KjYQ}PwBE=XO>)B5?6;)b&9enMl<**0=s;4Y4bW7>MT2L z8F3u6v@MdJ`J|2X?*>M)z|8>{>xYkqNGc^Pr0Me$6QMb2<43s0cO9+jBupKPboKJp zcJ12rc4>v{(h9hpZ}Wq3KE+@erw+Ezffyd0w(;RXo88!GgPomrg?4_CvK?w|ey5c? zqJjJ#P^?IEXM=E1Gos}51mX5*b?S6P$82Y2(C+RWwnOCV3Gx23zxQkH|MU0$QG4h8 zdu?~uT~y0$YwM^z+1zg@!#Rw@aeMXEm)dAyrrrB!4Vk12iW)CC&~i$eI_BRa8MPh! z^hphd3s1GlAfs^C;ie}0#i>T3okdg)xYplYXOd3aqju0z2D-xuvNQN|Nc?&Da(2Q2 zVe@r@X`M9a7bs(gExR{JKZTwEIQ-xn>0!oTCp?sAFK7;~eDYsss}o)`wa+va?a@BC2A|4E zY)j#W#_0&e@aCQ8+DT+)(ws|Ncu|J+b;0T}&wiR^b9blx-nYKfe)H@9vh9+7Vfj)U zEiJVj_+7mOc;p4Isqs(tXj2F6%{N|aU;gpW)9K!U?^c1u^4vYB{(;wyTxeHztUKwn zm!fm&81nD?F59`@g{pe%@}hq6UbMMz(#Hs0jt=KE+kpKtQ5Z?2<1XKBkf^6h^+@|RaGXQE0wU--?#YW5TP(%Abk_?8CN z+h0E*m&wMoZN57|2yGk?=k7p%b*h2YFyz{AlJDOu zb;ac_{h$p`Jev==G!LnVf5o>v!Y!xwhqr5Sf^p@IE{^xE+;bTDk%YYUGpu0oy}3xj z@&k|e(jUo^pW>!-d?6D&!BfGgwWN)G^U9_6vDaQ~fBKjHM4KGWw(Tc(+SdBIiX1Nh z0RQw!L_t(jKss)-$iOoC-@&f^d=}#y%UGjGu`lWGZ|m{fJXn;njHd@APvbOkE`4_QhO8<4#q-_n)Kh;VVyF z+XU8ockc1L+kW<^f4W_}ex0~3wmHjbX!a*v)C`Kiq4js54m%1vN$2Mv>`%(isgs%< zIP3#PcjA;S;Y)qNnV%$>_T({PM2;OB(1v|*?;bLIl2h-L;i0>8!Ly(FNLJ+QfLcFr zUk<#BY@fLk&>Z;3w!K9386-rbnt# zxQq+q*LxPC*}nBv?H=@U156(;`dM8|cn0IeMeH*dZb38NomhDIWUc+Jzx|)L1MHZo z;YwSi&v0zHh$rdRtjsZktb5V|v+6*q?>4ToEM;_g~$CJsS`L zS6xQcHiY^{3T*;)M=K}!rRUKJZ6`YDGhcYSjV~{^HtUIlJ83hmuZi4LWct{Zj{n%e z8ju^g2?D^Ut=5vzpR_4cym)nRB>w+j?g!(WSGivHe#prPI>Wr8SggKi95cVVVwl;%{?p>7NScMYzt(2`zCY<6xeLPR!*YTu7`q%13#{jgQT~O(S1{p-(MR$s);N7~UhWTX- z8~#`4DXZ2|?3u0?tGX3p?N^eC>si?9oMDnI$C zLo3OoudsD^=F#^HUah;;sdP1AYg8Fe zJmaDZaDl(C*G1aiJ^T7BeD!#dujAs$OFVJ?U_L2===N-?y{J!SpXo+?@4(Y%ZY zU&!lOe1KQa_rhR4bJz%p#}#?zvd!K{_u0R7qtw>+cKggH-bQxS8J--ao~=&+WZb38 z%j=vkc4qMFE1|Azt_(V2-wuaIkQY#lQ!H*2cB*7UC=2^or5$$8uuc>hkSFaf@3y_RofEWU!kq;H=BvK(M?E5U)ZORc4;izc zWvSvgePjE%_(?O{NbM8*JdQzl3V?mCe9risu9ddeJ-W2T&>>ZV21Toj@XpRE0!l*5pf-HPOf|SBs>sPl8v)+?3Fr_lnW}(OH|bi;n#vZCNh6TY0GLNEk z-0{mX{wRQ;Ri$CwNfSJ(e8|f?ypS}(pB*Z~eOLQce9G(6N%3I2N3LlKz|?~pQ4R53 z3eE4T>z72pVq6F5L6Q_FdF3POgExJXU*%i0>&oMgHoM?RhQ@_P<{+x#3zmS@%kTd0 z?mPG}(>B%*+lw#0-JXB#j}VK<6zGxYIW`qq)T%A=G(XD}CK}JI+DQSY?e5(>`4;uR z`@i~MwmTo*p`KGY>fi7DUA}UK&haAnoN1r^{Ab%2zWAl2@msq`haN3=*lygo+@8OA zn>^`&)7b5~(*DR39ON?xe8o)}B*IX#((#9NwaW1eMuU?m=v3Tr9=Zm@_ZU^h-3b$g z((+l?)6O^*CLZ*Ye*N=Bob!7gE3D75Xv9m(wLIRHbLEux9C1TPawVD~uCXLXsX#sY z|MKEe@Nj&*hY@g49eWCh5+hFdG=`fZB(J)9SR9sq$V%q9`W~W&#Np9iH`s(Bx|s*Wtn(W@xK9R073P#lPrxZzXcJ2^&~8fwzmNkbQMpgu{zN9XIX z?Hz8mnc+;kbY-bsTUkbL4BMR#A6pij$%@ZnlT3kJ@`5eAo^> z(gi)>@nfz=pPd_lGj{_YwMh*z^pc&cLv;mpmLc7tXG1uLYz~H_HXIJ9Sj%L&EEheM zT7x|4MkCa-j`Z|%0EWuTJs&4xs)YPU6*- z^3@~4Wv9{fhT&KGT3u@zC*W)(!3FmjI*am*9ohqEdj^jk?QFEe%|~tL@!hul;DfgN z=)<=6@L}73^iex_bhqt2ey43dc&Dv?^sRR9_kXj!|J(nvef!;a+QH6tTUxo&Zrr}z zZocqbI@kF=H@tI-LF=h3Frj-p+SQdy`80?F5aCxCr=+;}SS;pr8=hPMU^|itKyj{Q!b@NL%D?cg>+x*|B>G)%ct$=}PUDBpwyo(Gv?;jW z`}~w+of0H1FilL#pYlk2p`nC5T% z0W_Q+4KczNv+%|5{=1myV0#DLpSF3*9Uz-iTYGJ|zSWkGCfhu4cmVXo$sxH3lXZjs z(x|JydWdRLzU8@_d#1U2=opwbiT2bLk;BC{e&yBn?$fpQz57qv8T7rkw%xw*-S^tV zyQ}T;%B^T(;;wKg|6Y0oXqwoBJ;w$W&*{q}GF^Y-Y;dK>vE0|zbCU)0AAnJYVU zG;|iL!3ETzCbOlk^#(5Ef(MW1&qA@pvBQbcI3xb(FwKfT{E2%APIk~sJHY5CFlGs# zA2=aOIELx$jCyUq9q;e9{q0TkA^P%opS=85wF_8~eM)!NUI`#K@YJ`-1jV}#E?xj; z+a7<3QxjToX`4)B0v@d=51Aa{a-u|+>Qhc^nj`68EcG>ay(bc{S6SG&zUPOBEP%>v_eyME)f zHoEpo1M$(JlU9g7Es9L^=e@HpQ(2xfphXz=rxKk>DCa9Iy4l)+}5ui_*M#JhUQPLS`qbEH0V z|H13RRe9z5XKCEK{yB6%e=gk+od^uV=~Fs$$lKNHx7z&DQaWgUJM>`BZ!kM?f}m0X z?Z(_Yo{VjQAC#bJ1E(P%Z|Ifk3s_dcr>!d(kqu~&>r$6}%F8PmT5 zwzU%{AE-0^%!9VOgUk75{1m!^j^NyQu}z_W&gSMbL9RWm-V9%nq5NriWq%8t@^Mh% z{QRqGic|a`kvdQP?m)Qy34(VD@5IRoRHeI}>~g}-$ubukpTssWXWI#Gm%z?#@;gsAiET_O3Ramwf^!fsb|xJH-a1U2u{>8}o?xPTc36x*TaE z9`9!1o8KpPallD8v42e5;}h%;$~xLd&mZ`V-}!|fmy?r6*vamibKuP@ixRZIh2^|_ z?&RTjj!k#uXCRD!;$WSVwZ!*y9w+`?ysOO(4lhm?sH=AR6UPaFOagG7y5rx;Zzs%s zS09>>yn^h2@BZrh0I`qG-)(2So4;}^jpb?O1Al3rH0oIBP8zSIS8k0;4jFfl<}wEI zbCv}(rXz0dT%A0dLRn9qN&$C3!mpE~yiiwjGrj!?!&0^E7qnWSMclhZqeX0G}&-@f(a?&>Mebn}MHe;)JGLpx)do;iDCC;2o94?I7{N)wE znu%@i?iNql$Vo40sMYNJ(pFoCl0!fMPRm96duo_+sr&)o&enR{_Bee{ynvsSBPWO+ zK6;G)nrvSIzAHCxw#N^iv~6@i>~7>Xb|B?DX`1nt=)%$oI`Jh;vAd8vZF}Q5ab73= zsDHpa@svMxvw2&#-`{ki-OqNJx8M19v5GLd5GWH}#B+hr`yady9wytDzw$F}9(mmc zhw_enJ?kBtPmLJ#01J9Z9j;!GHnljz!8`S;@rno3O@`T?n^YaEEic^es8<&Y(+p{@ zUcHuGw;w@6ZL&ILqU^uoC2>LiBv(Djo4#1S_0}%45X(tCaRpuNvx(2sqlvbgHeE+`5N+{qI;^jW?Z4;{o@-o%^tz9M<4JGii3u_NN=q6*^|PLPzmM>f(% zcN2y7b>Ru^>PJsk+du#Hf7Tv6q)iws(MP+Q%^()%ajp5Z;J%-|A{XlV{O#v5F0=k* z6~GVksYXAEWqYjth>Tb%0SG8SAz#89ft7IK@DGe6n$fit^W)vcy?f|s7teXv+wAOd zd+m##ZxeIy&c2^$z)l6g>K9ptWec;tmUz^aoA7uqT&1l~Ny5mpVc;V`82`VOvtlOf z?{z5;apr|9+WfGSE<%vg|jTQkHTmmMEtAyy3Y`4S@YA+j@GK-@_jchOxgL zBi`BD%jZ+9p~n+bk<>w!bR5fL_g&bk&XD%DC27NeLw#pouuapJAe<+Y*pJA>v)P%>NF0IaVeOl{b0^dD3GTx>`hxQY- zh6hjQ@0CZNXTsvVcb6yKxqu#5Wx*K4uV?P|htyH$X>rON!O2%yJQW_xQ3h$p;V=ARK;Ht8c4aP09yT}CjAr1HFi>jEBam4nI0*WN! zhWcoSlA(QAH(zLX8>iq^@-Cd|l0GlcDoPW);x9f(!i6rQ*x;MLv}L4mlg>Fj0l*q` zGUPvU4PE>Mpl|UP6Bo`olP>9bI+Muv#ues_S)n6bq!Sy4GV^_ZlKEMt^{q|xC;AaA zukY-)8<(%PrR9YzfKpcvN6V>;mX&d8;T=V$g1lP=X^7S6aUjRS`u?A%sfAH6{&hpdt1c6s3PlS%-mn>Z*t{BM5dPgXHcI)F@Djxq_bYR*ljT~G!(S;;tseAwk3dYYd2sR<93 zB46ll9Pwa&L0z7SllbL=OCLIqoWN`35XMC2RlE>>imzo?`Q}~mI(UMoXA3Z0#SdYr z_p>hgE$zQA^-bHDY7}<6atLO3}rwpp+>?=6t zVYnMkv{U@tq54XjBx5|iqV=p5>HzBV4y-JE)6d{ zgr@izMMc)bnWI95t`Mg$_x$BqW72OQgN8=GpN<~_Jy>k3Tc_>Ee&Ua`g~gZYTWQ#n zfb^D;)ZirWm=9Tcm0|7C7_N#xXz#xJ-S+GMoKky3Sg4X+@*!Z_U6Z4i{ZMxxehO$MsZlA!tG9jD0z9s@+FY< zreRM5bna}ZxcN^5OFRbPg1rLEg@^U-uRqN7>AQ&1`})hJ>{9LEA5@H zf4%+FfATNdKAozUUw^HQFJCU@Z^JO0Z3n=6Ok;6i-Kj5(ODA!*q4#T(A9%>%B-NZD z&8(eEkdkkztKSGn2MQ#3+%Q;!FEN;juN2y>CL7{80hr0S=ydUqKS(cnmF_lj=^P`F z2AE)l%9n@eUf+e)3nCa69U`7hO5tjQ=qWOc>fxV7o4>4j*4)`jJ!mi$Cy&n=jYsg+ zFfxs z7%J+9Jvt6qvFKo$2`R2i2SXc4BCyiKiMYG_1{}3uS-|k!Fah{x_m9W z9^U)zo%WA@?bqAW$E(TX-~h;HJ#D77*I!*~&)s|x`CeI>fGca-B$BvuqEg#4m(x-3=F2Sismilb{(g8!>58-c5rP$d#pgt2At&?p$ zJKg37H6U_)xJ^0-p2g)(J2>9sy^g=iq_7hTa;AI)9)cwQbMq=c9I$AE7yZR3jxgQ` za6(2dmJcL?fBY22@RBx5@l;)wh2B!vdqIQv%MF%CSa^_29?~zm!k^`bPbpSilZMkp z6_y_5KtSoRAkYgs8DAGT`^HusPN{yvs9gD1{*}PN26@%?!bK<^?>TWgZZEtFk1Ski z$6FZx&_KR&A&pj38W`RkDa(X`Z4l|xI6(8nAx}$CCe*Cjur^)$E=%Bv-G1(B+gN*q3~d9$ zL3Fn~V_T?Q;x0bxXD2}NOzN(`SJ#?za2)KWY!xp0tIP zYi)4pa%81IB)>k-*c;M@b|<_5eIx=P#fArY>2vTuozRXy3nqQLd^*4Uq`N6Z5q?=1 ztbWAXUCK`W$^Wr$v;)%?p#!i1Pn|6CSlOu|&6K-1lL`DrzVJ`k@_NDDgr)sWsUC$Y z5Cu!&OW5ScN@G6Z(S+;AyMB83*){D5FLZ(v$lAi$@l6`~+ok{cIneE5NGIpFc6YK1 zOkI)zEp$|LJaE>;ns!&olI`#jm;DzPKRNlYzjEb{RPpVOROJ>6ugb?ZbP(J@XWz9K zwYSFD`kT8O?arOY?aen{&1YlW{i`kH#Fuz*frZ~zw-as0-?}?VYPbtwEZ1F^u><$3 z?a>ZYUgc)xCZ`jSryXTEVK_Hz`@2rOqi^8FCyySs(QwqBfB8mRUEgS5|Lw201IpRo zKBoOcn^{-tZ~`3KjOsP@t*}fPM!jnioeN!&9r*A^KZ)))i0vbINf-IY4!sMJ%u85B zc%ny<#f~U($JrOUvuuJDds^ckx^oVeqeYwcNU>Z_1LUmTO;J7_FD3nO=e@)z6dpLnCSgYCBY(N9 zDGu^wP>}+r=db}O({{!auFog8rJi(`oxMwM|5Gl1P5u~@SB))phj18x6M_Dg%0@I?$>Ti zCdqVevfc)@#;)kl;yZ=4TH?PWO9=OVG&{gg{hYsZ@gBdP) z@<}}P5B8jW&5XmUS30mfrBB=2Ps;eMe?JXz{#}3DRWySOc(pvvY_AW|&AhotF=LvJ z`vRL|K=}h#4xkr*P=GpspA!q}RzLN5;JDO8Ex7e$QBN*XU#Xwnfv2u8RSo*jWb&-H zZLMeYB=s>*$1jda6P-#~9%-Bn6AHwXr$;Tc0eV=Q%CX=P_RuN$A0q3X8f2U4i5}v~ zcEkaH`N_WdLAU77-MM~m--SSfIdv8NPV(!=OI>*8lZzM*-O&R6P45xI`v;rpV>q6a z#Y_;zML4ol7A;68Y-UWiJe7Y)YS!m>RNS``owyQAKQ6#UKS*f-mvH!el800 z5^sayFnDxqzSo@*XdY!5{R)2uSPL(ma6v-=EqN|?!O#6YTIiX0U45vVI4xVgc-cx< znSD9(yqb@nufpXWXZ-LF`KM29SRW5Ri^@g%*!uE(`atv*j6y*v(*zc3#_BX(fZ&Rp zRNBspOFaLl4K^+ECypwM3a@-gM?u0+6Hj^(PMn0BkMc2DClFVr{3C%2aI$zto4@1R z$cOMiC;L6}xb^542}g#5$7)~9TX^$KI_VdDfZIqM`2`j|eFxlBTj=nkHK?@wY&X+C z0Uq%e8%&;Zat%16=E>z4KC#-ur>z}y08{m2s)`wV5njtC58GYw*SCe{EfvBqw(amX z7^ZC7PDunD=#KEKaH5}T{DnaB3vW@j3xU*So^U2!gehY`j!9^%SWTgxRF_z`2#{pF z2`_NQ))Hu353@)rUc(u$i(pX2$t$jC5ul%O;-czWc(DH({v@6Y3)4R$j`b0@_RoYz z`g=m9r!~Fx_NTEo7J`G>!3Y~veGGoY18r$9mw_qBwB!y>@50%=$1a06Qm4c-U5C%k z`7B37B=8pYl1pQU;GNB3AoYSi!Id$D!#K!`uI9fgZ{;^I3Qu>BzvVqse!;^%FXUr6 z9sJD=P`rv)VF?{|04vnVo3Ir5|E26tfOO5y`#$XSzW43E``o#+0~pMLjQ|OP3%Fa9 z6q1r9%Qi)Zl5CdZI7&HARg|(*PMlQPu@Yrb*(p_&l$FR*ijGKIi6ly-NQw(ckOaUL z#4^Cl+?l!iy}f_^eZA-R`#R^j*r8>GrN ztx@{2^}^z>vjgaTF zdM75Yth@IH*E{MExPm2kt4n_}n{kPLL9QJj;mdeBb%e#jhxzOEy?M;iK?r|NUgl@e5_bwxHy~w$10*>*Qq_`2;iVR8IdT z+!ML{5Wvw`8*!K$ueM#hQsVjQlNGsS}hm8m3@=`vwLeu>v4jxbaMp|dDg ze^=h-TX|c#LY~MrE(##Ay+APQ&!4nW5o!okgXI82z1K*RxKy2YLQ9?`5EB%}FJ%!= zW3|fi%BxOjg5$aib4Y_Opi3`w*0J*uJYT7FxF@Xm7ptPW!RH{BNar-Qw~E8n6opD8@4J=Vq7Mw}0|m z+KVr}FDqvE_jfT?x7yW<7uu^YzaQg&R0F8H+bJ`h#;zcExSe9clqk1(k$@=oaV@1pp8Q{?diF6wI+?7!C z6s9ge!PRMqsCdFLgonD|*#SN~Od2ePxw`7&ipO}*ql6CH9-VW&_@A9!;0g~PjkWAX zKdIHfHe?zNx>90CQDXUyBKY&DRmbM!KV>CHu|@H@GakAFkLh{?JoyM7SMxExD>6%5 zg_ceNd3v8+>4eK8<{^(-4`X?Cuxxm+ot0e|R_EGqXS+SP_qZ+2Uuf5^U2PYa7Q?Hz z-O0ay*jAS>cD#E`9XHyockZ;WzWG-B=r?_Hd;0lj+s5t|c?=?Ju1vpr;X=E9VYwX- z4%(~FKi?j{^?LiQU;Rwmrek?P!|>5>`{wrI2R_{PkM`T+&7HOnK@Z`vU10;t11nD> zG(KbJm}lyBNS&>Zyl!W0){f}}9jKXE5MpOWUUan^x=xgcDDWIzNOoDgW3YS&y~>c_ zU38;wc%OITj=V@8tc!Hg8qcdduRC2tFsW`6?*{f4ISQiU%+ChAcDqYge99}?m0twK z4ym1O%eLdVxU`bpgX%6n1K@GH(Le51!??EfNGAol%*-#ft4}}EmM>3l}b;%0=t@?1C0gbZbXi|})Q)!6C<_4K=jjQ`MGkIUzmWk*CmUR0@1&{lPg8d%fRz`O zo|CffMk*YuBdiECfR5HYd_X!E2E-}n(Orc5O#OpLV9Y=gP8p5KBJCCM2)UlL=I7HS zagu?~yh)pQrkAFcUwOrl;D$6Z`pn%_rNhUL2n_(^@Cpr4HP+Pnr8c^z+Ux08)QTcM~+lt~NFCXI)D zW+4%I9)r_5Dqo!2&iSrZUk>-+>*@K{=zJVg-UjTqwnsiUpKh05{YZQ5;d*;}`>4%c zyxJz_7u(Jza<`{WI&Qm$NAA+Ks1PX}|Ov zpKiBr-EMP>emiP5?X&|gemmL)D(aswZ*bk?PTEipI;c`c`^3rm3?c!y->J?s<;efi z%1O21-U*#5aL4luFK6)%*v4KY?+nh&&F9Em>3uM8aLR%$zw+^3x=HM(RM`nur3ZhdbOftmT%An3AT0I0=tu<2iVBl!@YJ4t-t){t@e+9 z_7~b75vJh3)hm~CjNazH9olcS9GQaZkg6eP(Jd6KES7YwgzSueW#JdA)6|-ED)>ZreW`Z_mBw z{cUP_wP;|QPMzeh?DjApXfT$YUk>7sDaC}>d8MZFCgo|1TF>C1a3k2t=m`z4y6n_$ zi}#)Bi4VSNQV{zjSD}ZWCf)h3u2UDyw28Sz{Dn4#>^k_ILAe9rBfkJwd7tipk>3n% z?jYf92Xo=Go_nWkSx(>&wYoZlAAt|p4r3EJN$zehZ7@2v#J9hb{uA-{iKAS4vW|;p zC&#DT>cy4xMU;_D=u)t7X2rUbdz697rhKlZ2OoBn0f%SHbu!drUhA9Fbu8^L@yE&E zj=B?9Bkb+r?jG^b*XtYY?YnogGy3L}H`+G#mvj?8>*ua}MQ(ax7X751fZb)CDAdov zsPmkVPWjFcXfx6_rcDMnE(p-J&L`K1YoE@=zvj8MvqSxN+hA)e_UVnAPo(Z|zxjH5 z^PSCxXrX^;>&TxM;SG0ZdmN#*)y%YQB(wllDU~Bgr3>jKoy~Iv70&xUn5rvwk1xTE ze054dGRg7_kH+AC11#)@z2b_a7aNY6fA@`lkC5|!#{^VzU79MbQqN5fMbXPMsFYb+B zJTL#5kMX?*fC_WLp#2AQXxb}NsTZVS;*yW)&6c=%Ht!R7)pAS+t?hFgL)Y6(r4vdn zt6u1E{${6W(`mn+ym6)dr62v*+r`V5+9ABWx&AoEH#=x_k-nv;Z`eOo&PKxlcEwVg zyL1UyoTRq@qimC-3<{OB@j{m1%aZIHi4N;g{A8b2c{{K#hI|bN+nM~|+1;fs>eS7) zySLZgpe-81Mtb$tSK51Dda;eNU|>ITIg9<|w=I<=Z9D5p;>fx6?CrmDwS!wUfUWRy zExMtNC2q;PwvIZEE=g#kYCgc$gKv~T{z z$J2gq?`#I|`39zVN&7CoBqH}bJ2C5mIBBmpp6~au5k2-gHVX8$59I`XA1`$T9{Z4T zi?|m)kNR9(T+Rgi7yjV)iFe!<<`x1&YxL=?5MjjY`+h&bcr^Cw;Ss3%ok{r;Zgy%bcNaIE|iCNSBuv+Twzz zZn+x??mieL-1P30^4NX*SLp|kHnN0l2%$6s1YF1Gosj2V9LtE@hw!{}EYQb_?>p@o z(-i%q6nqBnfq{#%Ozcxz_QBeE8|)smpZWP;M`2=*J@qs?S=-C@k_4d@{36eQ|K+9S z_VRmQY}*@KZJNCO{GCu6CzJRBROqG@f5<~08g-6m&47Y60VENZcsSa{W{3GaiZ*v^ zcOO1zfAU}b!8UebzKu2>5zkMNC0Tq1@=agomYHF>F8|<27<$($&%oBjH%NTdjUj)? zorU&!_4z1Y-lwBb5xKrQmjs{Q8KvYLYIR|j-=(sG=0&!mF8VKd8^3cI*Yl}l(H+Q@ zSMTHx-;>T?VbUirQ;KtK7VnKyV-dnqq$+H*+w8Y zmOP1mbs?y2f;{Ay$mBG6K~rr7C(=EINYIbq>Bw1ZH+9-{5M01BgKik@?;#hIJv-O@#=g7lJuT7C6V0K|7tsaz zlp<;5b!}ER+X#y`I=M2SjdCH_;^IPdlf01$AowI>Mc_+5QHPXmYM-R% zF?p%@g&5FM{x!JdA?;GnYhKOCdUz`z@+U0XG&ya8`sUg8)eMQ4F?=87doLDke#yj0 zPmZ{0I8JIaSV4ofDEWf$Jev+@-u z787`2X6=Fvx^|eYT8(kB7AJu_d96(aMG}o@H4<`O&uAY9R~u zlOTBLP8vyJ@%FtOt1X_U{Rbdho#br?VWK#cYqWJ zfhWaXxJGVG^?TojarIfOd~>ir?|>w*nGQ}UJ9UTfd= zJ>T8tpLxEW_$dHS#0o7e6(@KUBizZCYiK7JmhZoDq<46q`giY$&5db`7y5ghNo_*a zkv@P$-{NZlo@tYP@|Q;57zbw>!^j|Znvr~OIxpUJ*TQF&e;#L+Xe9fo%9i1w(l_omt2@a@M4?zY#i!Y@+p0aF4dmm&|gU>&*9+ABnu9^^7N4#v(V*winaV378aMr z$q#QBa3v4_xRySmPP{jni9?qTE}|r!X?xehQ1||LHIe>YL|lLRM0Aumkp?)^5t5=E z2R}KPO?}*~=thp%V|XwNj7UtzfU)OMJ

KFtQDfMu;Am$;8a_9N6+$hPJD4zViB4vwQ6C z{e!>Xe*3q6yN%O$UA}TP_???x%+az_lMC&}(>L3n`2IiLhQr;qzvnmR=v*#Lv=4sh zm2@_CwzeosVJx8&HGO@;_Zl=spu$zp7hc3bAt5*|dYQiB>&!3s?wnB^ATWaro^`Hx zkDt`jdz{JYMP1Fym&${UmYL;!2ZuDxU*~w2*ZKq+-C&q^$nT)s`RH_T2BWx#y=?wQWPAARw@odpdKWD$d zOw%FDfZ+adj&^gB@4e5yM5k)Mz4Q8Al|(u-GmEqB(v_zv@}%8*=e736ormqor=M!e zi*s%H(rP9+kB9rg-{s|{_VE6LcJHfiw71^4)!y^I7uwRwg&f;9b~N5DUccBTT>x`< zlH)iJ#Xb2t$nB>D^36#8T-9xz{Oec8*I<^DtsVtFt*Hk+s5|eJ5j0Eg(>D=q;tSgK z9U=k>r(m)4U83Mg-+N-|OjC`C@eD0JYy)2dV#I}MGRTEjuUJgQEn~m*FY)ruIMTxg zTmCxEsyX$JJO8SIqdw>e2klo^R)|CAJOhS@X_(0{ok{uJqjNnmVSf2yJFqPx%o7vD z<;3J{TY?U*6db{`4(83w&9$A!kK4h6djLs?odjeb-o5O%>n~ku`xv%@FQ($SIYPdK z%d*qh0&hB}ypCZ(cvV~s@nh$NcM(mlr+6h^Jh4*9St+?q+p0_}uf8^zu&5J$I!#M@ z-;>J;d?F3REALIyH61BGxc84RStM-$k<8Ow>&iyyYq*B`l(Aq$A*_|u$*ZHA^^QSG zKIzC!N)vZOxC+wMkQ22ig#yJ=gU8*)GmlJ7g6=V7YHWMAU7+2%K%TSeM>>HS{3FeA zw(s;J!aaKI#040OORdc=v|ZrXf|m}KR@&wFzuGpAC)$_Zy4MaS z=h_@$7gjE1;#3`R`QpV)c+D@(x1GVDeg3ttw%`4&FScvfo@(>xxApDTF0Y=nE0-_l zbdsZ^iA=n^!)juJju852d~$~H+8ttN!F+3Bi&#Dhuh|^S1;yX+3jt0xdYuMm!7cAR z31{Ho60rEqM(emhSsp8?4jJrjx3#s$&2Redfiq8A5f^^PIRm)hItw9-Il>=+!d>x? zLE2$F@YnH$#weF+Q_&O1pH8-u@4SX2@_Ki*Ew`Tiee^QveLkig4<5#C8%;}G3J@2T zDm>v&|GIiBC)`>p=9ulQ@d9Cac|X_g8E2fj>ER_#>R9i~b$X3-=AZ)1VAL3$CY~oI z=Gwx-wRYj!3uHUfj(0PA2slEAU1$a1clpmr+tZy!=E>E8;zCXk2@L842S#x6;b0g& z{)Ml+-ahr2&$bhIehPi#XGr$oV;6vBw=#8*hbGbVn36 zGqRHbA!rl3Q#(z0%wVw3>ND$Ebt|x_YnV~~B_8;b(sAHNQbryW45JxtW6<$W?D~%0 zv1?2Z&TWHcrsvUp?y@4L-+`N%Z{yQ*`OHiv5~*J}(>#)M;XMO}@)553IHSN=@Isxy zD-Du5I4isnM;U|0K8Mf62k;2nA$E=nAmuBMZg-cf)k^!T5kPjvu{wUX#-r#Rh@GEZ zr2TbrNWEqIDTZP+RJo+LERV{Lyo+nXg|`O(h~hx1GL38x2HR;zTnMJk+MIZaWVyax^-D+6dM~ny7zli-o}fe055{ zGtRKU4LrgdJ0Nwiy#@z++IU;of>41p6Eo=O*~#|s-aGBfuieFlXyBv<|Lv#bTie}$ zqV1ge-QE3^l4p6VV;_qI7Vf?3Q*)lh4T1i}*5y zm@eh@`Q+NQN%-ok^v{JO*{c?=b@!&@#=6LV7Sul{sG{b_Yhd-p1!X)Wed3+IAZ;Ng z;awbcQ{`9BF;c^nXK~_4D;?uy3gM9VpkoaV8>TGCm-eOWn*J!^-uE79?D8TFE_{(V zUJoL#)mH~Mlw&O8i?bTYNev?F@{Iilm^Jpv)YxHr{@I)D-~DUIZNoaiJ6(UKic7)r8y{k5PLzq=CgbdSos#loIHZe3bQl= z^3XDKw71`RhqyEC+duiO?Go+K4&k=pE|RtXkk4qq z&wh$bBP|fMN>|(|4C&93r;~-k>0oqr2W8h1b}I>7@bu`x<2JKk|7I0Ec$jw3u^i#eq#b(G z{11Ecj9 zmw~xcEYd1C6blN0E{_$@2W8i1gY%qMwE#f7Bw`YIPR5Coyp^atmreZm@mAZ1pa0>1 z`?C;WqFs9WIq<2jA*S*Lo%ZR!%m3PBQ?y_2d;UErZ@5Og7g&`O%NaYgeG?GiCn!$% z_3AgAJ2p*_i8j+%2jJ135<21b{(ydnIP;j=yZ7H|-}`-^Y}40Q+Wz`o_`7s|Es&58 zY2V2s3ji!9c@|Czj}=d!u>45@o%hD`-gvrt^GCZrUU%(iR41VJdHr#@^l#pEb%AvO zc;*c9+GQ%v64Kj{g!lGI7Vw%pI7m?5Cy5RkmZXsq8S2|YBS+uScXM1LUYcmD6}(X+1k|o zM6zR8o~YqOm~^r)<^-imQT|6ZpdK%Ga zNT2%Rg|@PK5uG@dHcS;+b`df}e8(wV{H1g3&V>ehl&j7X?@s;+gFrjhVq2x_+XL&I zc(wbfrDErYdIO`%U^PP3+%b)qk9z9-Mv|wBANsqQePMWCS#B16YB4xZ>CQ=8G z&bn9oW1p92GwhhmboQxw8`NjV{TzHzPlOkYLv;Dp_8|HjeQTc_6p~fX*XBV)@+VdK zu)1aWRD9&U*}5R2PnW@-PR9U#l9*OL_qE^(pV_$a%Rox6s}XSBCHl%g>st9&9)#q% zACK!l0bsjE>l?hr`HsInpKALGhTzV3+$X;InYRTHE*^EI*QF8+DXQ#Jg5WL8vm$z8^?JA%hi7C`ATQp z)GPc}@zdXf29EIxU$BJz7`~+(!4U?Ln? zTLow4OT@tF;J68dgLj4n7p4hqq&?}lcU=9(jVIcT7hh^8+zX=vUdFXn!Kq~nIQjKX z8m7LM(fx%Mz2~{F%hz>@M;_MuG@krY`kTxvR|U>u9VZgmmIv=u`F-0`aqGV9o+RUH zlHQ*y2xs1=5f^ozXEy*RS|3*L)sw+9F5(tBC!+u2rmJi(*P*BM6>i?imtgD&f<-_FJ{TW68uy{{&uehFHlRja^S9ROP#U)>!yJT?*JB{Dh)w=VX z>?(exh4+)T;FSMU#S?UJ;4E8NJ-(_dW{|CT1y9 z+2~}&^6Bg3gp(jo{O~UNgSrDx^aOfGn?yR-m`3R^;d5a?wZF)ga)o|5j7QfH-~O6& z8TLui#~}Y~>=CcLkKFdW*ONCmQ_jof{aCvL%-zc5AfO_YqA6y~9i2qFhImArLTu;y zBiD>$BNPIu4Al9`1d#xxi`NexSz!SNs#qt5fVd#gIyms-psEOjkP0oG3M=Aj;OwZO z(_peBH73Od9RL-!OjglRS$%1gaL@nzQ5-{2J!bjc519`?b7*&j!vxE8GQMrj{<~@ z3{HVNNA-y~obUv8IV#M4RJa9&yvgyRckVoB|K0D2>Z~ExRV3F~*wMM5JI=EZdRf)l`f~zo5w37j1Gd{9bvJ&FT z@Iayu&c&I}u5LT$tF2p%RwJ82r2DbnNCT#c+DB<+jux`A5qcCm}! zE!8VWhLsMGckB=u2H4Qw$w-~=523w$ZCNTi^LF8ZbyUH++G}oMmX01Bw8Ih5tACO@ zgC9njV?|Hc`Ty)TSy8YUxf3yAX|IXiSgYm_7?fLh&h3ikW zt2b^02Tt@{zOvkY`JeyO*65tQeDg;8p>O+S`<@Seq;1{0-M;uMzul$}r`mJRy|*o` ztU`ymHa5N3hRDbs__f34_svhznArGCMfr;t2lgJVKf)LrqPnctWICeKZPIVbp+g-^ zqFJl^EP8SUDtZY#7vG$D;T5mp^<3cvc^uivaXR1@iV_c;H5oiqd|@`-)$(JuUTWJlHsTpJ_q3HQ6d`@{XVyu3mwv@c^*!0xv({j9}4 zI$=Gh#dvI{JZ%1cA1?;3@OLs&JX!-^IVA&J_4O}YlD=yTy!QSy4yC7)8F@u!z%7{( zs5UE|7s2ejyS6)#hL0%NkJjF3Z@lriXm`dJWFf8s zSMNJVe&Qwi>=YJxr{u)C8AsbdxfXw@BGdu(M892Lz1&rDvK`UBXbY=*UCiJhZ;o4z zt=sWTU;tko2$EADdWBZx1#OC+bUN|Icd}X+c))Srt_fRU(k{0yT|J{;T|`I&xd&~I z+@(H6+nw(<7fy027ch>&P*vZmbG?Ue#WP{zBmEg+j~p06SeH-L*Lap+VW;_6HaL)J z(;lM(Y*U4=hqV@T1qEd0Vi+g*%xeOhV5SWQd+qVsW`~!K*SmIDyM9l@Jv?E#(nneA z@}}$qboC+evU?R8tNZ$XfNhZPN7#t&{+|U$+6iC+d&MKGm)rW|NA2}Dz6{P?42+Ei z2?8$wn}4;1wr5jWhoc=+eM&If*`Y4-6L&k?E--Mh7-Xbu+dW={hT4w%hjk?T#Y-z0 zRJ1d_wYAyaeC_R|y?*Uxo1Q|ClFU5(=B-)e_<8CPJU|aH6}lK?-lw8uak@fRoGL^g z7ra$G@t4ejuXmundq)Y>tMrz38`lt!ciI7UOWpZVv?wp4)j9d8yQxcGME@LHqLZ1( zXO|bRrEw+ar<0e{?D~AT_VtKUHjojA|CDq2I?svUm*btdgP%#5`g3o&r?Meuz|*BK zx=JaVc%+vr2=Cf0+pwH;L%1}kffUlI2Z^5n73tQ!lb33Mwgzf#ug>IBn^U`ml8I;k z!K0)dI7+|59X_5rE-e#=2Tt>vo2N~|F1mVgwYB*t+cA7U*xU;LI{}^nD3Zde=m>ci z9AMK=%}i4jj%3#smr~)58A_r4}o*4z30Uj+9v(vjeL6^8JyLkr+ffZf7s{EI0X2!mPI>lY%bls zxC&2je+a$~C`08Ed)eWDb%0I2wZ56YjB>sV zE^QyFVcMk9IV2HA^7XS)J?;FA4zB~Pj4H|wX+-pO8XUn~_lBb^L~z2(v7bJUIFhGa zMCKTg_tMqz4EwwR89K$f5OjuF2apYMSD-DM`ZHx2| zH`dzSyZ77n2Kc1ktKn(?#ky~A4caDn-hof-ubWn20yFJGCZ=%myn3Mb#Q?+=N5CUZ zdmmibZk_k?2|Rp)-m2urEuEZ`=42VtJ#mFTv`+gTKQ}i`TzDH|_igOB?JOAH9qhNY zwKecX8$#bf`@l9LnrsUN@$gPMu*|7YL+%Kp&aU+*~(i0klq{ zSO*tlnBMOiIcAZ~CFl$n2ne%YHnWAwIJNkW>{&nUMD0Gyr%ZH*ICF7oEl@4qE7>gh zJaxSstxp5Hu$v-IFOANjJj z;xPtiXy>#>1%WrVtJBo;0kPWrvr|PAR()Vq8ou>=;ebh<409)4@<#eov0giPM+cIyiWOg zT@QC~nfpEu?F0G6g}OyIzpL(hX(!Fp85C|K6n@i74&yj!7d`~0zTS@62yZ_7i?78G zU`ASsd+;VifvRge%s=cSjd;$uw+n2L*Zy-@rq3s_Lc7YVU@LfQt_(Tl>aqIfop_}1 z+Pg|y{L{7!ObAHyzYxhb}BF zw8hz(cJboHwmlf4D{JG=w2PBw)R(D6$_`x7Ph30l2QIvzg>)T_>-)Aib*8DgR{cy< zKD48Nnj=64d92g1sZ&?SPMmP^r~*;trBBhd&$U^%Yi&l~AmXlKc%W?Hx@ zxvsE;7by9uMg4_i^N1cg*B6ido_E9n(0Jk%Z%bp!R9~kORbJ7p=y>KRi{N@k6}f~j zsHgo#12WGke*?k#=EeKdqFuia2RUltt;`V%RC{K-vZTT-jlkP z+B|-7ey%Od&*qae+S9Y}q$fPrsjJE`G=QdVTn#OORlGPB0Chl$zvO8-;eE=*{Xu?wo5Dcv-I9dEwlQ#?~O-=(@8pHzLQ7qqQ@YR9Od8Ta8&637ec zDqxHKBR-+wiJQH+SI6^|@tYd^sC;s=0Q%kM&>a`rbzWC+6itLLF#3-FiEsSSl^=&W zpDcAL!DLtxNjN0UxtR0x5`iJV!{(vlLba!4tQTP=ah3l{QfDPGuF?$X`Nbpv98KEb&CL&*ovg z8bB(Sbf>QFg{G7h3f)E^M8YtFIPP+f0R>~nI5Zr=bn@K7T+yE}7~)i!ldozTKRG336Kjl~j<3x32WfmTZ_m`)s)oSLy1 zv@C0BppbdTugu21uZqD?AU=l&aB64iN}xiX^xQiknpN@OWY5VC8o9~o74lhXmsg)^ zuYC0Tlb7|$fEPZ;a;Ptocjc241%kKQLFUoZ9@qST{G0!$_SM(FN_xMMJlmF+R!|lw zjM@1%Key1{`}_;-8$R|;D1<4>AEsmc;(MQK*RHR$wMX}(7^JyI_jEej#c%Sfs}WNn zaVbv257J6vf$QMY3LCB?p8U;oLR@9tzl(Wv=G~bbVrfgsZ!K zS2XDqaNp&s4jrEZ9)t<$WNbS~X5-4uMHKF(l@;){*LHTcT2>T8OHWYHIpHCXXQB(* zq>};;T}WZaNgh$*xU$Of9Mp7jT6~C7!O+;vF}VcP4#{o+nCEJ=s1`ka`5{M@$%CY` z)3rMsgs+6#Fw@8z@w|^*OB*7{xt=beoT^cg&MaWiURZ!nj`pBE4fEb$m%8pW7ezh) z!pm)kj?y~MJ05RK+C%w!cBM_w=)CoX*V-T?(DZY8Y9;;PEr3!j93R%$DoVG zimTRL4B%v+y9|XPp9&DC)8N)H`4l{B&p9BU<8!UIJR2exzI{5=8FY@MgD-g|IFWL+ zt$F8Cd~dqw&AwdTWl)Fb(lf=OnSBdJ*fA9ux z`AxgW>ubpT0NN%gIiSl>=2I93ZE}`+Th26f=Oty@uoE+bNuwCl9`RY7 zR|moq02UW6v?rf@61`jicqFkp-fweikkmpm^eV<3%2}360}S+p3ox)-HPTvNulB2R z@`4qL?&#ik`nYt8^*?7s>Xks#rJndM@9a7eSHZ0LR_9jy8DcE|)Y!^{d4zU&@B4Jh zlX!MfSKt#)FSd=ub#XGBfF5hThyn`MmeC6_WjK9S;uAwwUPeI3V^$B^-%tQY$*h#yvx{6#a zrycpHKlAhLU~jxF(^j;F>GsU(R2yHs(!Th`*Dx$s+SCMX3PvY2O*`(6SQn{f=Nq7> zy#>2UiL}sWsa5)-om7TlB>E`;aV#BO_~s`W-8n8^{ltnb{t||)J0iB%Hrng2z19As z|M)*_zxL@*weS7TPqs@>Ki%%V{wDI^zz?xqBw=dGsaMZ)eoKw<-6p}Z&yZmRQ@GG=n2osGYK<| zS9~R?Yv~(q`%Dr~Y*D6z3U;^>r|VD{0sajb5k8O&KTb`}adk1#d)oN)Y8IW0IiL|- zRe2u!EqXoZ> z4Cs{1&9%Ya5L)kNCy)bM%E$B=va>h9F8Of#zVH3^cKi10ZFhGaUa9|L(jEX#!Q5X7Bc6YKdllHuRYz&gASALleuBOPSoOUf(_`#Rk7Ape zMVHaeV`vhOcG!M~bdYa?14`(@;a-?5Gt7g2Lpv5sh7qwKF(&02-THK3B^|D8%iNMg z-or)(H`$pg9`cM$rv8Ix$J|9je0SV?`d9|Su~XcEJVcLOzI-Vt_0s_IMHZ?YX%8z$ zCHn&5vv!=%o@8Ws>G=Das%i)NX#v|yJI%X0bY6Mk$tCmHk=xr_ZQw*7c9n}3ov5;7 ze&gDW9BJ$1pt#bOEV~W-cb!%3$_yTN_J(qyj+q#%Z_IiEmGpCPt>_45;TzKgHwq1J zE!)olx#LgJA8l^so8NZyf9H38x2+9N+Po8!)Ul^q=%bE_ox|#CV};do_qyCEA?wn=4gLKkJg>A9x=3ACr%83*=9%zZ`bZXTS_3HD;8cUizVcqx zVVZIT5I237WYVQQ=kjPY6B)0Q8J~??@4Gzg`}9r{@XnAv&pJxAGE7<`K{}In?}4w+ zALM2?PyUj^AL57`ue?j!;C-d@TJO5B5GXNtF8yP;P^EnWDax@nR*r(SuR?^{DM#K8 z&Ldk+9L>OsvomAuTfgPw?YqDK&$Pk%t+unh-gXC@nOt%qx(it|IYc=wGWBTr`3uYG zvp6Q8F_6OSV{cqcR_tC>F z0C?%8SK7CI-*>e~_a9`TO)V~VB2}9XIH;2_n#TOR7KXtJ_^_P9k-*gu>~%tBk#B9p0evgOrQ+_u#R|jr;Qpg_57-tK7uz>|yt5<6A z(8$jnQ7|w#NM_$NC!OIO$IvPTyJEY>(F-g-_36zJeBMJCML`%*z7t$im=L18%qu---9jVli$3;CQ3cIE!jsLfuOYOj9hH?`xf z$H*vc5I9$#*$;vnQ!f{nTa2n9ddzGK0pUvX~5l1_Vpup&V2t#9I0 zqJ$S;*+0UA_Y6vV3Q=X4Jbfp#$L>bvG7t{GA06z{P4qgze&1^cgF!nS?6kw7pAkYf z<#TajUY1W<`8lU5l4r&j1ci1-uw6*w1Uy!cz@}W~!+wxs4_Qb?xL!D&Bye$%6ZtM8 zbQ0Zo+CtVz4P^Ei)E5L`h+gGy5}EWAJ8ehx@5J;R`d}%?_wNq2v)RTIJG4&@4_p9% zJqEm)WTRZix^}j=qZ=HLaA8jtM^eVd_GUgoGdH)`_HCn-3HV35$u?wuf&MUmoTYyNe0@#6V9U?W6SbXS4BAQOn}XFad7m`g7B8zOsvc#d z5SwTFniiLEQ;{#BCQn$(G3;Er(+MRZ$otZLh9r%6BAnvQrk+pJs^;9>QhE2blx>2% zZ5(kUT>UZE%7Jiklk`=-1)A0ZC<|Q1cRIOb+m{7X^0l7~*GRY6Rmh)SCh*?6=kwM&Z2Bx3 z;*SDTpVVIm>`6hM{kT~8ge*OwL)$?Z{j-k2tMny)#?De+;ns}mGgE<*I5mbO{5o-{ zPgIdb)U-3k@8K4o+^NqD?~*25uzZ)=`s|C~q~6EI>=Gy|feDu}E6Q=gG^aGHdjh|G zE>8@Z&Vo-*VjB#G?WrfP1NSt1GKo%cQx=gZT-&7buR6w^zlgT}RRBdFP)_L<9=o{j zc6m)BYt4-k>=EdAT;8WVt}Z-4{~M<^J{aE75od}NA)<1Zn1Mxl$&bD3WltB9>4d-k z>v&Cf2DkFyv*G21$7}ef!%sNT^5C5$=%`louRM(_UF2ayEF;jH-Zbt5fQaD`1mHTz}sM^!4up&p!%rKQ66!Ma0w(K1VQ#-<3;9=yeu=Oy{Vh#dXTX1Lg&)lNZ%11+6a~6#Rpe=zHl#9i-QJzU{+6XrABYJORMf z59PD+!p`s5*^bUK_$sp=_#%BhXSG`@)G?f$p>wVp84BmBw^7Z`9rLoBPWdIRad4)X z%v4J5mLs$A`5e2f?jerOnDf^F|Q-(=t~m>^x;bY!To z4WSA?D}=yP@Yca8^En9TyG-2c!AA|K`Aj&%B`zH-lqp>mYJGA<#eoYQ>{Pp}%Z3}{ zb415#@3`#^j@s_FgOyXz;iSF%>W6@N9@vB*oGaKd2&gdc)fs(#JOx6-QM_38_WGdR zzqi(Y=3o3Qk*3v;Uy&L9HI=I2J&t{yod`6D_1y6# zK6S0uf~p9obp|Sx@w77L?p*6nTr$dvR<9sHK0X-jLG#`0(2@6PE9h*Yz=^XD{m0<3 zYZorIoA?EanSy5q8+QobX%{cfwr~B8kF_VCyO~MjTVHvzZR}6A)u&!)w{|kU)K+dj*_NKX)(&V?rWdE%w|(+k+DAV6;kLKE(Kg@?7T6^-s_2vg?pIp7smS$H#=2+VwD2vDWM!k~`b{c7!8lsdP z@SAi9?`}P6Th#4H!)FdXP6u*qng(YAI*~R<>_G!3it;^j8Z7lmP6j}ZJaOer?|Y>K zj5mq>U>mvY7Vr74<1;%R#iw1vn2trY8-3a=4uHfyry~TVyw7uFsPve-b0@~#$p=sH zEHB6dzIUR)iF+Ghc_us{T~RFQ&;g&T*#~rt_l7%JAVBt@$z8X=hmJgezRI1uK8L&T z0G-aI%ix<%JPNRMKko;-BRaQ-No&nC@Dx|LG5XQE#N(8))YVDINq8;%X&iY~9C_7P zr~x+(11fpuW5uZ33XkDuJv2yr8D(ue^_WHkcyiE!w;l&8VB*`nPn`xANt6-uEIg~z z(NPJbXbl+`FsLVD9AO9nx;tO9&<VO!FZd-PVjRu zekNjkW)T)xAis&Wy1J6KX7kbA_P77$-)jHS-}u{YXWL~SNA3OZeWrc?cYkk=^>w$| zV7S+|wl>o4NPC*1IPYUyfQ!D}k3LJgUiO8HQ>2@_R}c1$L(5^a0i53ZE%Jkr6Kk|Z zdsG2=+S(qq&5iAL>z#+~mw)}O_Ot);(5IogC zR~RxKU-pdkb5f|c1$$R;02j&qbb>pujWh1)1AfXw7@(Q)`t~ttD09lA+2Kxi9#1Y^ zG}>vMJyxz$Ev=2n7mnI&}yxm7rcHQci#4nBW>~sROftPktRNid)jd5BxF!Kg*>2(r&h^3a&JSlFfT`A{nPBlB{)T6NhwTa|G+UgoKDEnD?m1mV* z+oJFeu%%Oru73Ey-J%C{g5BjvoxAoyxNZBTowUk=03K`N2%E~C(&0sc!jAGA|Fpj@ zvhrkT*=g-tk_$efw^^ zed`^X?eTWm1!2^h5u1nE6<9uBY;DQ)k)(yl{c7Lt3mQ;>d<#{!p_mm$76wih0 zcpw>rPtMdtmtWG9Kh4$%=sHmsPo7>4KJ~(5)KgsLQqKExKK+~tWe8?ae|fc%r%wG+ zce$QVuOqDbYWiw3#7FcvxXm@`%oDF|NJV4+kfu%%G%Yf9bWq=ioSSGLc=tar?77gU`2PUeo}uh%k5;D+nUJCm4lsK{n~TJ!T>OLV z+uyu=b*1f7Zx{4fx111TU9na7NppY<*Vv2*EIAcsbyvfN>Pe^a6b@n5MHh-YU_99` zT$;b-WLNA2upMFJfk!hK5k5ON(_Z<=N5J4Q_!Un$^qD7PX)DJTl768JvAD5JT^6+h zqbIa!BV@v!XJK%GA1aal%7A>g&}|#&UGXemlZHw-(YH?@T3Dp3@N4Tk2`HYmM;~vj zx5w*#qTs0Q0`De$Oiwl0+`(^dr*H10x~Bp7X%`?or{W5cJ)VhZ zAa(Vs@7}}9dU^rnvE_i2tGc4JihR-!N?N|U+Vip62c96Eh)0I|U3;ZaWME`f-F52D>S&Vl&iW}YwzqvBQ=S<+o^G$d z_D*YCd)OcIv}vB?L>q>fqA3YxBt9Sv__x-=# z_u<~BZs*?doV=2UZAzCXJ{v2%?**fWje93lqB)`}BQZ`jPG4(MUFn=!IN$3@eRL!xd9r|&bKKp@c26)$ZJ+gs6!uA=n4Ww;y zQLr+Fu}dC2d)kF?(+*hIUUt3qV-CdwhGC)M-Q|328MaxWqr54pxx$y?qxcgo;o=8v z!r7VRYkyf=$2MZo-JMJGZGLHn_7(dZ+C?sTr(G)@9aox}1Akn@_VPS98AA`oj3+#k zH1JhE_aM*VGS|@gfCrq)e>S0zN8~|AIl*N;)ID{LzENN5g++VaC;$=p%Wd74U0iY! z9co$X3G_^Kj&wlAPI<}vbUrIA!V+KlEIu|1dBg@W%=~DBfn!YhlfODRg&{b%Jor%? z((xm?u&-Ykd-9F4F7LaU1N=@EjM>?)ZY8X*m*JU!%DWmfDjEm+*v|5$Hqr$2(5IgR z>@GH`v1-e$cCPr+aQuXJNb#n0F5FO0jPjLm(yHgmUNT9k1Zn!tZtWwyy4`X=oLT4BL*q0 z8#pDN7<|abhm!=3oxWzBzq)|I414zEN%L>Cv>Eyz+Drb(2978 zOS#!KPknnQtsLhuV<+=HsnGUMei>pfXq%-Bcxbe@(>699wY3L#+E-rtLi@@8`X6L7 z%f#Xg=?h=R30=Z7XXQw4^QikS9I&({LbION!4fx}x1J??;Z?(wyDr>QhU!FsitlsE z>|{q?3cbiOeYKQd{9<;w;!C%gzce&2-Ssw3t-sx-@d8yx&ml~Q(*VFBst2&vsk5omLE71x=j&h3jXx;UwP9V`V%wLZE|j^P0!7yeRctvWDf;*PZ@Vj+=<}F4A?f-!{+oX_7vQZD7wg{dk)CJaVX{gWpgh zG$}qSeFDCyTIp!`OI!h9Y@_gkz7Tju^U_nrD-|Zr4NF|{7+eAd<@I)o&TIA2I@#!4 zUj{%@cULYDF~{Yk;RN)gw_zuaQ1%Rguf#HYH8v8oGA}|~!QL6Sc#}XX>R^!G$rLMX z<*bmr*SjzV4Bl5*W=|YkV6b=|Gf-9);HMO(^)P;5Bu`HgNJk+`LB-BH%J#kS_c5rP z+^z|3nkhP=8gKhPW0V+w597ND+s^Pm5h?F;Wb zYR7Xc?aF)K--htwt^1GK%F0Sx2A4B568qba+ebh0-u9=+Z;9tOKl?lF;jOpZGpm={ z(^syy>4UMhG>#z#t|zGE;mC%=L9UXQ+4}=$7$@=w|BDhbbEI{1N!2r|6ZK9 zQ@1Fy-bZeriTC2k=hDsKvy**!xcIpAxm+v0dzRIJ)5r)f!UuMyavxv#@9&E~T|;O) z(KSwf7T~Ncs5E}d*JI)4=H}WghNwHHiPfgbKgajb$U2Z%`BumD)S*kp=*T*dNS6d$ z6%5f$@Czsik7^j`jy*MEFmU;Q;-%AylMh|IB0M>UTMbB6lzI7EyQGd1s{D)>{tkl5 z(C<~KixWg#>VE3$Wx?JHAmKb(TJMc}`b^pBm~^<)nce{_)AevD7ZjQ_{f_8_6Ic1G zRgVYA9&jo{$f)_L)6{SI9v;scTGMpUHFnHDogCyPFrC(08q|?VGc!(pQMCGl#(sa) zEeb>5k2Z&G``+U=K0InqU%J+wT)Nz@OfR%qd2(j}t~}X6eeP!;CbNptiN1+_`~k)r zRgy1e=9Zc}sSokf7q7MbnZ>pS&D-V6ZTyMr?fz)5y@eikF@Zeb0FHw~F7(PGJqJ!G zM3{!^0qC6Xb!%Qy@2gj@wdY@crCopN<@V0KyX~+4FaKuysbBkQ+t}J^-|(>)+v=5- z_SRc(Mt?Y&rBNeX;w?0RhVq!Wu8x)q`?_}DwvjN~N+daQCeM$g3v{YM3~;NF>L4_K z4(OY$a{lB~&m;5KC2jk=fA9a(Hs9K%+za63BDqml=x94#niIO(8MBp?_blVx9-@|^ z*?XVMn5_2|Ms0GHuDw4WubznqV0~CTb<@Z7!VrexzSuYSi6^|t6R)B+zm)5# z

#2<}J@MM3{QGVDxHTdKWZ+%b-;pcsYlgQ`hCwyTtg~Ae^))Ui|OmDEAbH?{e?X z>uaB@4N!J$k32GT_QGm==KZg>CtiFRa8BATI{IjPCxf{1n{wv~FM4;epV66D(uur+ z5`_mBd`VBP23fvz`Wkr6Blszo)5pMMI!)*!v75zL(ZR^RZI^kZuS6Jd2p5DS8*QNC zD?8CCm;2~+%cQK@b)hZnq!1bb-%o<-db#S@4R;1x@JAh!ANR&n$DF?F!sNrgy>PLl zOe0@t(}`=HgQ^a|Ix)1lww_6I<3|qRg|rFSU0#(H%dw2)8NF8V9n4KS%GDndZX4?4 zlkt_;OvvMHYqobb+um>=J7hYO{tq8MPF(G?J`Zu=2?GP@uix9PMW(6{-|1~*+)?e3 zE1yc}qZryrLIKz#a0#H~Tdo{(oW?&}%vvz};)-aY5qg(X~g&c`8A`95~t z_x*z0b1OV~2BtoJ7Zi6^!*by(>K1;&*&mM@m+!ddzH{c|vr)U~alHx(zvR6y;acyx zi`;jvq@~>HR_=TmpS^0-s{h3|2uqt42taOVUN z3Qb!IwDtKw`LGs4J7DzObI%e|eD4BtZKisMY&htQ{@B{sjBaq}qm}SGmD&(BKtmZF zynXwtj5%QJK!&;`gXK8m7M-iFI@n%sek3Ojk@ zw4cvty_27t+(&eueE=z?FgJZdu?lT&~~?n*ugH$!nS|}(oP!K z2HDq`LonYyU zQjRC0*^izB&s*zT?fAhX`m3}GwDIOqyl=Xs?sRo03;GJ+UYuHPJ%2sdMg3YQZ0F$n z<6K{7-MjDK?N6O_z)O!>VCJLHay_d0SqV=blxGs&(Y^Fwl9aCQbk(Wrbd;{|d*~Dm z?}NMKQ?#%{Sz;rufCU_9oJVO1zmU#;k-`cSOlrqC4s|$chkn{oyCCiS$W!ApQLo*t z4HpsTXPh+3@dz36l$1UCbmG$|+N4|JI>1^K6 z2|ukZO8eoY+yVM$*nJvAT`TVPQD*YJbr0St^U!`Uu!@V?_)&JD$T!dMen>=0uIm4s z<|~m^j-hAyq$Q2xAKEsh_iEh4F{UgCwXS$&SRT%9$ZpbBy(#Q_;9J*E*yxPsXBYFy zw0ZpOe8waEH00d;d~iR7Zq#;C#l!bJ zSG`K{R(vSEIx*ncuu7l!+W1biH-&}nzp<)Vd7x}^d1hbbi7x;gLCK zzgoOeV=!4v1AcXpDdUrma$uU!ULMD*>mp0g$bJnOr0np6Fj9f&fA~N7jB^G0vhQuj z<@3&-!5?aipht{TGAdS-Gsn;5EY}qQ?I4y)Jd_?zt+4 z@x+bHS|96%>(lhlCD)Unk*oD1o%2-U$AWtJd8)kN9p|fDD7b@8^^MjE1hJ(o63dU$UYNh=QsNv^o)n?KH(2e?gM zB;(T8C(q(i-iu5UUeR&#W#PuK^Jywl@5E1+_FSBvw!y7Bq?G2lIOpNEK%dXTf_t3l z8^i;99O*1@%=g9JMa6v&8L5NTD0}< z>VA4X=bX?#??j6<>F;%Cumd;mvt9qQ-zvbYRyoPHsj`PV|wDl2Uc`PfDatLYrW?cq00 z?Ghxeos(g zL6kzkeU1rC-d+otN!P_M1rJ2#LmK0y(p+6>+E>QkK1~EpK!|^cqR#4l!bP%!u|DS* zHLf1RpNZ?v?}2iUTX5oEW^vNQ;cM(7}u5^qs!2D)I zcKMSo3j~O}ucxDt#*Mn#bxod5fC5hsYhNGl&0jq9vR*+ShW`l)JbV<`&yL-UeOX2Yr!2PkC^{RVh}PX(aef zw5;MLW9uW2x{LULPRzlr@*J zMs2V)XnWf`ZHh)=Wr>F5ctkC?>0A!l^yH|0*Y|yMyZPQ{+umTO?QL$gvB`yY-Md7|BJ8XsPdO@KG~kTcD0QU;iuh$z~#}y`7X3_0d72&QtxK~{3O5#-Ly3r zv<=`s0H+@9Ti@|5{@0}ep|Nna`IsxuyVG8zqd1K&2_EI$(k;E7DLnzQaN%$1kk*~X zlFqMXBz@;9z3)qI9PII1^OM-zQ`7Fotp?WP%YE-kNe_9<5js=TnP3$T2TojQW1R2- z{KX~t*1%|$0eF$yOjb>}1zyTb+Pu!0PP(Q&v0dShR*15pW#r6?S@~|Fr<=slfS;m_ zV`NfYAiZ39E$%1A9SGhFK60AO6yA5}O^-K~r`%D0jN&~;N9=En+Jm?5w%xTsyRv$* zJ%LOtPR_MyiapugYX>_+WXlOp_y-ioOEd6=$LgLq_)Fs;t>$Lt$$tiXO}D8F7u&(y ze7jG3a{kUUu<{LPmeb?+7$9WH@grVJFuNaOI8zJcTmrr_QEIMyNi5&X*FKD zI8nb0oGxtoz0ZHS{RjW6|8u*2ccVRb>9{@r{L}5;y?bqA>v0Ah`W50?X`Mmi8VJ$# z2CSdxNawL@yCn~z8vDb|YJEE&p75E#1N`pbu4oHGIdB--L@&h0ns2KXF9{ zSOU#Y3c>z){n+p9dUWlCgT{X2cbU$$M|wh`eeyrlCW2O$1q|}h6Hi=fehV2X%TE7E zVdUP$DTS9_7Z>hzXR!N>ELJtth?J`*ywlUU@8VVcBzn>5YrM4TU1()^U(VNef84ur zT7l{^+G_ZyP9LEtaYx;?9c|yWA^arH{If5#CeMQdb<-GfEUvQnRGpqhZd2$dz5NyV zhkUYisPQIYM{w5asv;+zv;nS7L1YJLCH32 z!$unE#QVgJOemA)e83f6gEzdw0O)1xDP%P=1Dv6$G6la1Pk8hkKh_BpnaCu(?XG#aOGvy~hWXmJ(MdaL86Co(1J;>5 zqCHUGxHEUpfqCCkruhqxopPN6UVgjJiNUzT{q`L8k=VL(|6aB8z~|za`erxw*kIU( z?(jqJskev(>9BjGvv`8DqKim3B|l8ofU>sU&dzQdj9i4EEE8KiX>S7K5q6zzny|VU zV0~lMhLrW>^(Wi1v{VO*udW>HA$-v%PLfC;=>Tog2gRR}NP7lei>}bgKYDJ>v~zv* zcZ-i)leZn;_P%zM=&L-*2n57c;teZU^a{P12~^G3cPPGreqOLl8c7Cg#Z*_%!V z?Y7O0^|rgc)Ao@WWx;-!pPJeBD1P|;!o}5g<;oRg$&n2w_#OPVZ7jQ`Cgt%SJA}*& zPj-%FyuvXK<=I#01>p5QyRB%ml{d@Gq!h54zwNm)uZ{1HTL;6nm92+6WgSRUFAc00 z<*16oO0>zDM5F)aAaoXr3y1J=Z++F5!WG=x3{$qamMnn_CKNVxaoJtip-0lI^k_|5 z1xk&ZU~eNPx0we_v_w(Vyd2HWlS+i$n^_4VLon!eG}(o*4rG@}vyOmL`kL6i6! zr{CmcNG881cWZl{^yu%2qc*pIjmw{O+a>=;gzpS?+i;t5u~W5I)Wgzv~2#3 zS`2L2(nT2aDGvL<^jml6J8QH1Em(DM+9=^9-<-}S1#C~PM^|=VzHj?Mdv$QsG4Ya~ zw_fGwoPtKVmVbOidrmv>##i2HCw@MPzP|k%C;RM^=LAx&RR$%1|I#VyrZZQIUxc8m z|9@8}UO}F+7Z04}30G%XPpk1qy26bEfmhQs-zDOCRz?7de=yd^%}= z%oPD?)5Mv^uJAOiv6*SE^l8c4%?`q>?5Zo>VeiJ28F`C7>hb;nnz|unAA4atr+(}o z46{){TV-|*9qt5|vW$-{=%UT&A=A~k=L~Y*FA&JWe{q6vn%qe{7f^=3 zC#yZNziMAwoTMEKe8A?oi27YGTd32Bn^QTZ0XUO3We+H0oz4RXj)6LsZhpSgSypr) zx?fz4{0xg3zB~*_`r=V14s8R>&S(4Ry?v5r%L-#sF8R2yNDF{W2`X=T^Ub0o z)7sae7`q0|jSi>`^-y#a`rb0l!f?%#Dr%pPuv&> zIX)Qf0zY;Pb)|GLMV{oFw$u8NHg*bSdUer3zlbXK15VsI1{wLmQwgA}@`Y62*FCz* zRZ+m5G5!Eu&X{T=Wy-s~-MzN4y%ibt^bmPaSzBCQ%A#m(6JhdnPfvHda`h7ZrCn^@ z$3#??D2H}6-?~@CY-7nJ^unFd!+2cq{Ye0)@6Pv)&cO-;ln=zx66|&T6Me;%&S$g_ z-{hXSWSj9Tq8nA+?O)B;6hEpt(nsbwb-^Jr0!euRHf&dAS%Gp@hIcZCzK7eyUg|mh zpG1VxiPq66KnYt9H+f4|p0mhGnd7QF2xsUo-w>_JCJ*tZ9GyN_eyIyZ)q-LOYTkM= z2tMRJ!INH9r;2x)XBQ@qz1x|$Fxf}x)9HG==PJUepS&#{3MY8)yXUkk&ILIZjmzdQ zV6*+QovpFVs=rQKHS~h_vw()al;aj@XV7_GoeOfj-uVW{XO+pERtv1zaDk@Dr*+Ub zvVX?EJ9+hfGI_f=%~JqoXp?H8nsXD*F(?bC@`tnTE&?q5AYKXNl{&=J&%A~fw6EH1 z+Hle#W5?7_`1*04-bep%EjpAd-K#JC>_*b18Jb#U=_u@|HeeWhd@zLu) z&hwL01q8!53MoF{GLr`cFrj@S#ij0ru|MvkgpF7x&yNi^6*V889o#b5BNO@wyG)c) zNPx@0lX=SMmT1L5tP*2`Y7)~YsZ3Q!0vB;U+ks0YQs*5$JKn&uod)sJ3zX?X5X!Mb zXU3+?ZawlM_f+2oAQL&D-=YCV&O4NF=_ zVBq30abi?x#6uLL{{n;e`#h(_12;0V@6rkO-i8Jjnh-_fNgTO4M?+8aMR}MvcvLRU zD&NKt&(bf03dB{o5|0c5lkk|cWqU6jc_)m(P>VOH8`m;is1F?{aofo-_^OUKXlo-3 zN&z@LoTS0Cfn8|d^L;`t89Z{2DC;J^B* z_M4ym^`xJmQ?@w2+?JjEou6-)FI{dQ`oOFBS8ByDjoa#lh4z6DyhJ&pwz=*kpq*j| zMJUT^bG7{XE=Qiy(LdBkm-aOHJ+30{;I16F@B$JFJ;hIk*F}V#J@&3>udCRJm$1sW zlqoj^N4UFarc5IhR~c*2Rb;PKUTPtug?AD;__QH$pvQrDW64Vniuye;udAyUv-0_f zj)a{jcMZr(SyV#aR$W{;P+JrI7&`E>%6Aq+-fyv4w4Ea-uTv%f=etGX7jJrTYecc- zyNPMysmLvWIFUrF(Bp$@>FT{aq08!4;84c=v_qaT7>w@DK?gDIn2kYmCqq5T zPkg)be>kWGKk}%G+EaOr;lP7O?kn3*Jo8+8``-QRx|v#9zZp#wSB7Cma*^mN$v_6F_IgWGLud#BA^oNL#fe4CUuhQ> z=i1XxUvGoqb{p<&(HR968i@%SAdPE}Y4ZEL9%Fut+$;OD^L9MPp%IN6@8;+PE9(w0 z%CoaHU^?}Mi1O%7Q59JZf53q2jW``B^<6C}yHa>lUdf6#b&x${7R2yyJ;dugi9dC5_Bm28*|AcEw&x6aGH*d5H7``AA zoQ~k*LyUkjlpTyTKlNrBZXT2y@!-IMgPl5gI|BjypE>bJI&oxt-if7ZAEiNcV$wM! zFL9zXZuC(hjr5gAKWHq%d1*w$e$|Y>$Rr1WO zV*}6+nX*%_@q>OKB3U@VQSIk&xSv4?@Ar3)+hB92jg3y)m8DDV+QJo;e`~}!+}fio z@}*FXP-*KzfN_sM-PVY+7?FJ z=J2RZ%`GBpv#MW!b!P^7#Qp$&Wt}=4f)o16yqiWRfC=LIeI+|L%8R^u`O2l{Zv4lO zAGF{5gWqp6OIO>CC$6?jm#?+^4<4iq@u*sPTqpUm+PYg+Uf*f!Xl&=pLEyai-dEBF z7@(o@rwngx4ch2nkP|NamX-X!usGi~cXr$ArAzIf{ru0gyKjEAt*l;czx#zZ+Q&Zn z;r7DQPq+II-)>XW4*KkaPk30~mN6k?@e8z#kW+c`Ofw+A_kXrk#4>7l2mMj{mRQg` zdKRDX+;{Qtv7?-n#gLY#l2PZvU(}JB6mP{2k*Dh&RSq93I@(_%!f;*(H~tjgntTQ2 zcpRy)?Xs?UKH!FjeA8W?QNH9OaRf~@@(1XT194$u|QMgEh|y5^1ip=D&t_6m4>q3(x=ZDWMTa5+~Vx4&UK{jwK{m5OyJ?{APJF@u>XL!nRc}N+e-L{h)JQ9L@ z2(&WhB&c@G9CX~>+0LZC^*%V*CeB2A=9#D4K6E7-<+=;5CaUbzr(VE8_TWUD4mc<} z3rMi@N=Fr5j!xV~$DbkteB!}yUc?4-F{RAn_r^EZHoyUSgS%bqTP6HvqWxHrDK~HGF7Nfa4ANOPF_Nzt)w|^DzZE+TD}80OdY8wf?eBG!CScUFy3qtF z!-yuRAYu10_3y(H|LhvM28P(Vc~3NMtc#0M9q`dk$Tu97B^nNOEEsIpFJEkPb0_WJ z`pZ9t9Y57J9^7do`a|<`?g(ALZp0orppAgPJgH*k;zi;uwIe5z;WbI=jx}w->fbn) z0xzd>mQyxdEdm8z!uMmZ$oEItwK^Q`W`J297q2duS=-!fw{E?iN#-Y?xY@q-+rG6e zE-qz4NV{luezy5ZF;5gDqv%uhhb)6gB`^DIB|q|^G(>k$|9$j^{WAx}T!g*5J;-De zP`AT9C(dcxklUTD?Y2GGZhpdRacP-4DTnUOuX)Rz#ng}d1N5GF_uKvUsXCfP=H#Qgk6lCjh!c#b-WOIE zmio?5uepf8F{qiT`FwIcZ0zXbo%(fBq^pgdi1|a>ckNs8q@*-hYT~E7eUt?x=F7*&(B7-h@n@XqMvMwT^ z7a(1CjS>)x)wU>O#16h9-}yx&scWPayq(i1A>zIypso4p8+Y5!|NN)gG`6k_B{P|QSTpk|NGkfJ|9R#jUpg$y>Vd zK52MQiynC%Lnhk+eek`Vc0#}K#Em2e^egvokUJOd~h*$c_D06&*6aKWO{(Oa1n>V}{xV3w|#jxIAWm8#`#9^6Xa|Ha%O%4EHCc zIL4~XY9sCLdg{hr?5HX1!;BXY=S+rI||ziLCD;!=9An#GI48W9Zf)vZB7XoNHH~Oua^=mrnam5_2o(*Yx zwMoRC_}YQ$2kYfV95?>R4%$Js0n$U4whu`Bv)`Pf!HMH1)%*;O`opod#3eu5)O^~( zHj9$A^^~hjib8u&<`jQ5iSIZT`VHJ@UZo6hV&5}73kd23#VdVT?pq`b_9GNkC@(E`0MDlmEc8y)pSOdPDeuRos?7?DDgJVE*v3;bu|rX}433 z&(!4zxsl!M8O3|H{%MFeN22mapo0*WxVeMzBwM7 z$^DU&{wdG&gcJA3xa0#ul8*}kPVD=`JJ!RztZNo|kTw(c)W>uwrwPLtmIv=C!*a8j zfOwWs*DPb;U`b)FRWRpAl?acJV_kt zjboUfP10iKW$0r)DCjgSeGTv|jpe)8E0jl}P(@EZ5Z?tVfP@dBnv<*HN$VvJ2vj8; zQ+pVdJWOl86eBIk-}wjo-8CLk?KG7%$3Uf1P9QR2zO!FMiP6br$HlJlPu`GEc$0B& zV79MPGGmo2oz#|-d%-H0{BvF?{a0WqIWe-fwapDM%|-r=>oi_>1WPViyj*D|T$B%W)95Ka4CT>ES}{z)@& zM!cM4k(bGVaw$i|gm%6sl!W3=Cwyo4_PN6Ct6IL*@S3WVWBM0njzHLNb#5`As$U#8nL~)Z=rd3fRoq$&pA$@+Q@r@>m7l<$Yn5+U(k0kDtb5_To^a%E ziHR=_xyJq{RzL22r2U`%kf-_X(=IzVOGZWncpK5QprwSP?lMr&5e6k|y1ZAFnt|W7h+wLC%a`%2pnxdyjZdB^ z7rpQNrcK8@8F=xg8$=6C4MQWsF9J30WC@f>XHeU=D8z=6D27xT0rFDe`>p;M23&2h z>_|93U51D0sI$t>`f+u2-4Su!_3mnT`eD^c?g(*s;*JuCgEICTavt~MM2Ul0Dvv`F zWDx*Fm3UzS>+i}h;0y81%gzX8Iq>BQV^`OwArqD2BebyoDo${G?03^_22CS=tS-;F zDmX>E>cETdy29$+bLA%8 zY-yBEHMnrqyNqn-+?^07H2$vgpPsqezT(BtYj|yRt?&jrB@;(Px&^{5}qI8mQfaYh^%p+{LPzt@=Rk9C-A`LpLtPGI<_iB zVizryGjpf&At=yFzO;Vwu6#0$ap(8E-PM2T(&ctYgFe{Yfz~Cz4u(0wByHq%7Y1x? zZsq%v4$Nv?Xg604NaJ8F_%VEDss^3H_f{Pq%BMGuM`di#+0I}W8CWmt7r9VgxnhIx zOoQx%zUd*7yAPaHam?L?1PA->OhXoV-W_->rZQ-!1w3^8e?Yl*3jF50>6|!o)pVW6 z!F{qGYr_Lg!SS{;Jirc~Z3`AmgFgLm3%U;KkM zH%ljUp|vX)SLuw7wYToR*|vAL+Vlb)*TrQq3Nqay_Gn0t zt(%=)PD7;eb7b8P_Au_%L#|4r6F8$TQI80k`~fe7jpTnBrFb&>MP5r@@;G{wt2@3O z%#zO0L&TMpW9+4aggi%I$os~VS9$LAh&s?+*&3g^7=?%8(_QJ~ZDHQ{p0Gm4|8O6< zo@lr4K4_a;JBjb4fAUqvtn0#Ro0?srUUZJh-@z{{WLxZ}z6tnt5;%{KcH7}_yX}uQ z+R6S-yls^o^ZB{?cKzD5wsK)LymtWYT>(Ei;dlCMJHVKFn7X7BN*!}@0r<#-jtC=W zWKj82UJP==PgukcX>;G{VQFMX*ba+jx>C@$)dtF|Uc|jG&0ASj&egN>ww-u8-0BA6 zMBbC6OIz^92|M&l-srChJE6#^I(8Cznl5Q5KYHGBiK|XD>e9uwI(?x{yX$@ctlLAH7vT{#)8VYCNhf1>+Y#v}k=+T( z6|RZ#r8YKosf}nG_UBgG7W)11xr~PoZK|Uce ziM|47o?>u3d7Oc3^?W+2G6nS*gE!rAk;!R{J_lOd^|8M<$|RTr)xYq|ztKMW;SaVy z^(Q~ve&_dZwY7(9SzxBVbSIV*UbYE#?9$Q1i^GhiXbfUNs{h=9k?#~jziM~EyJR5F z_VxyRy@t#K6L3pOb=K^{V!L{EzTLUK(LVL_|13DU|KMKRUR!HF^5Z|&wzeO)!`(G9 zQEpsFA$QsSK({04)Le*#eEFT$&=uKM=CQ+0#?U6fVplbtV|66Fqt8weo-^5KX9c`k zA8jqa!%efoJM}l#n97;^$hYkkJfW?St8JvR;k`UBKTTL-;lZ+GhHA5F0Yloww8ys1 z)pq$z%D`{kazXx-pF%cm1o~h3ad)@_4VDSXao```nVeo}SFS%#e01~xodWMDKaoM< zHcK-|dk)XQ7inmyxRZ;Xt{^f~UT6=YIQ{U;R|uq+L8hhh4pX z8QaI*$Q!^Uub?f#*A%!v+}&xbbBFDz>yz!z{h9AY=k2z8_imBr5IE3xDlN-6+83=j zWD>+HkAO%x@4*3DDRd03ROY(i2fF9GknX_ds@`@7MC@zvh7aA8N1v4oWeHq>&)$yd z@my_0!K5D2P7E)38~Sz5e4`^R%Qmmt0NW*Z#o8v6?L>_E**1rMc;>lR+Hhgp!BJR1n+}i>Pn5oZ_uc&m#6SYo@$~ziwpF&w!`GiVitLL+KyR}IjGU*A`kz< zQ{sa6wo&2&9LV0|!pmE2BW(_M3Qf+bTdNK^q8Xb@e3xA=J*2yEbn+No?j$*IlTUqi zOZYoojV&UNc;vNq!9KQJ@IgE1=_rN|M>PSrfj;pY`OiN495kA0+nd|@{D}*tw0rII z0Cn!gwfELbd$bleTdr-HooDLG>pj|SSGe;En9YFq=)%RN)%Nhg!?wBYsY<|#oIczb zwx^zWA~s$YW~q+}uhseDiM|8$vu||*z8uV&nw7r{*T%EIHNYMn0E_)hcLBHnOj-&6 z*h5b{*|+ZrR=^^Tr1PV-yU64|di??IKY9Zi+`YeTFEjco6L_}I@Q3vzW%$=NFuVNa zA$duq=+*BTdbN*ADjlS%d#}Xa?2Uo}p7afTBkWmq2+bTkoEXCf4ZEt_lza26@-6qo_6aPUXdhd?j2jD7 zcy-Dc_|3ig0lb7KY7J7o8 zG4z@Jaq*wQKVVZ2iSI=2z%#F8l^?w5@#eVGg^@|~0STuCdr`*019?wUC+!posHQcV zLl`b1QU0!7zSOQ_YyZe!{EO{iceg#DZJvawW@g)X%}FOoTfDWSU;gzY2fM@`J2MloS}Yx!+yhl(EbP+3D$sRyp4vVwzjbr-8MJtC!##f1zUvcXn5GR zHn!SeFrtsKrA~9jGlJ83nY0F))fKjRT)G{HwypV|ePDMY&aoo8E%O($L;vxk&Dfe{ ze$Y3YLibJES9UJJ(}XMoykeK`0^^*gz(G&-Wc7FcXE-k9oQ$H?Kynu z=|ejQ8*S^sz)RRl(^m5+*9I*Rz%bsnLb@j3m( z9(fpM9@sl}%EdJ}a(r!We!1O$yxo54r+%8a^yBHb%}y<%o6xh!R|Y}p4|v?WpGi8{ z9ku74da^ySdb#axZM2!lDfEe4TL?IZ-Zi;|TO}iE{kKjB zf>Rg$olFed?95Sn=i%$^JAUwc+SoGuzI~4<)JxqeD#$tGp|lT~oTP-MbEtADLJ3;3AZ?wORC)lE}kr^6sw5n>apqx+LvH_gs9*JvUew_{yJv z_-HlhS2gL<86My}4A)$wy2u<5+yP|Ciea3Ce_lFo0$cEpeQbBwvIN&!u))jUtDa9c;P=ppNxj|Wp_5o z(7q%(40z?+oHPU;l;v8W*s=hA3!BN4hh}H6|t8H)ONf+n<%5r>R7ro?XTpVv0pch?uIy-v--Ram!YrFdc`j6Y`Yh1p3 zg+AeCCjN8M7Q8IiyD4foq|MlK@-cnB=3Jb8Ss}7EckGwiTt_9$tQ(9Qq-%mOO6WQ$J1pNW+(jo21 z5%nPxk+5Kj14W zNgl$BTu#tW27l_)jAetW575XF%J7#=p zo;01TbARL*Y-l5&lpc^~nSbG48Y@TY$}!qE@tTD%j@_t7tW)%A$(n@e7lz2w2FY(n z^lc8YU*t5g6MGB1YwLUBkhIf0)<(8|!ar{RnX(-Rb9~1!jg0N6^Q>=lGyFw<>JR(u z;cfal>8oJ-WLy-QX0njDL?d78MSAf>S!3vy(*?Y~hhAQ@XoUA}!ci6-57LLLp|^VH zkg_}lfmFb0TgELo5)ZZ=woTEq@}@BJ8`1ZcpaWf7qL# zZDwjY3#aTeO-^ek9=6c|Hr!~?KKj9LZe#P<9pG_@O*hhZqwgt=tduAL5sGi>NXEgB zc#TntJ9UxN_AmsX;#?#axYsPypXr$k!j#JT5yx-q_ghS zRU0si?7H?_csgYBY(2!K*RC$=Tr!G1mkfPZ?Ky9OhqUDDVhj5ba3t@2M>y{TQs5;` zs(F@gZySgNcI&g;am>TdWZAYlPT^IU%6K$_23e3^}pPz%rk*}Oqtei#H6n>N=dCG-^wlUJj zb~>LUKv#)}*h}&XZH#?YX&afOGMVd5+X@cQ503u`t9=vsCcG0lCZ2Kmvre|#*@TSH zx`4rXyO4zXM;GCQPnWsZQ|Eob-aA=bI)|@XyTh-HLpIzM6F^idUaK@K^C%DzkoSE- zh84m4Li;@8Bog7hm$31?@1MzmdprtBmvYF`0iS|J_Ce@R%PM!8C-1Y`qH{jGaw45Q zu6m6XJGi}fg3#-bPOV2Lxhh_t?-SXn9@h6OU2W;r)lV8^b&S6R>Fy0v0juzNwxi|( zf*jjn*&tdt^4>{F(#A_*tP8`lzyd6sDd;mCsfcs@QkbXetn-~j_sSDU>+#KdTRxn* z5x{F1MO>ZXSf?=dIIQQhcR@G7-76YA;!%%&Vqc}!r`6j4Rn`?Qa~_DJeT0wu9MfZi zLp}#6Kd-YhbI@(RedN`Tl5Ww&#OWEkU2yy-M;vx*AjPP zdbusnc~p%`2$e?zQHxY$W{pwae}L)obnH-8(>g*ruJJ>v-Gztw)b0 z^-&bHFmNmT^>G7R=idF?yV7U%u9RPY|A*s#U1z2L2zSQu^ z*T6+Wctisx-`^&oE1*^{t$<66B8;Xweo^D7-M{w?2~3VuQ( z2340v7=0Hf&rZQFTr_a98c>13g%0>b<)-In>j3@*S7)92RaNe+>1A5)w_|fE0eRZ+ zi7guqi`HA`;!}7RV{SM!t-MF4iF|Y1i^Za-_eR^u>UKLEuD8SOM{W9Ow+%P$w3}B~ z+sD7@!_B^{aPP_6Gz2rND{bMuA8b>Lm)bx7(nBlZbHZO9k#>eB0ZzU4i!iZ3S79j%Rk zYagCHn4N9A@aqtrzIh1NmoBy0OHZ`@sp&Q(Z?ed-=??ms?<8;wo8Nr5bDTv+wkuRM zEAmqXa1HNS{~Rd@Zj`S>_<4Zt&|SNJqphx9&i#M>cm96+%;(;2-|*p=+T|-xw4K2) za^_J|mgSC#Z_=P6MbykDPMXVcH|OL#on+0o?V3K2`AxmsT#Nn z9_3o+gh1fZlU`hzkD{tqHiJv9y68&WX&St_+vY2i=B?w2U%YX;_}=UKyv}|tu%3F? z=Y8tr$IkVr}NpW ziTf-JrHswNcH5DElu2kXw|F7E?@p80MYK=c)LXb6TehSIk+&3!|WGf#)t9^QJZ-GAftw!Z$5w5`oA zFSp4|AmJQz+;>9YFnV=*-XmXK1V%ptqDMdBl7J*@_apzt|mb@<7o^EqUhhuKef~<`E&^yXfMd)*@3$2w{NK{cvH@W!T6@F=Lz_nxY}U@>@gQ&5?_~Ndns2w z5SLEcxntvKZ@1mOb3gqjoq5?#c_et*DRm5=&N>8t$kPmbl{Plz7LI}s2Lv2I2BP%6 z)X_ao!XMRs&HDK&ykY8GcWq_2T%i+!6^?`}3!Fiy1H;Og@P+R?1(@AnULj@gWrL2y zx%XP-cX7G)v_9`nd69SV=GR9v!>UTIRhQBWn$hZ9MBUW4Y501^>h@h44hKQ@O_tE@FZU9vD&^$QgE4Xe95)2#YTc}op8|>OgM#`uQ;l~ zH`CgObzqmahWv<@T~Iv?;#9s6#@LfU;kbf3QVb8&5Xt<519(_>Y#$jNpp9hyhscZV zmd^W(TV(L6`Z^v1?YEtM=dRTmc-YD1KCRw+ZGsj0!Py;4dDd0?%5p?YjsivY-NmVX z6D}Dw*A74Z;8$=D^JfRLdMz{^2Jp8ALD_g&%?O=RZ?#4Y`4v| z&3v|C!o|RyUa{S@52@`hK9xJ|IrAyJlzpeI8h-7<<-3$6`l9P8A6)v`MQNEh)X}uU zAq{3JS3M%Fpl*v&fj5%W13a4lM$1XAA~z*BxI zL3<)RL^ZM`i=rC zP35Q0m{?xoqf33sH4T9EKL;2Y6T5rB^~yHH7<5V%Nb|4wu} zp%|X!!TKJei}xreW6AsIW%_rX7T~1)XlH=TQV()af4i_z`^7rwGmZgntV48{?L}v+ zT35sDdnyMmfY)hHsn3lgZ~1uyb&C?Bb0dx?A(?0RI80SO<)A+#yXnn0;~(hp)Z5~H z7Xy^OskSv>IZ-YRw2xeLmdV-3FYOKOiYK9sWy}{lZgvqpKS!R^ZH+JaDZw=eV=3d*}*=uV%qxRtOPCl>q;PF7dHl-c1?nTYGK8#d^E@@Yx}K z#y#SX(CY_nfZucuV$YMegt5DV*CF_(S!}z6?H^IMy<^^=z)RTK+&gYEvL<{1IoY_-=gfzZOTHQ37>u!QQ0M5aUwiB;KUK%E#NY)ux?T426p`8tsU@F_1z}F zP4d|ke&K-6w?}^3$NQFg_4(~#L{o*FtTo~~f`>LpzrKYm5-@^q2FT71d^v#c2hz4X?3mG>>zo zo&+|p=Fu-8^8U(HFjVVMO}F6j?g7^hpn+&h)CA-~xK&Ie6*YV}9__ZyH|x zbIK(sWmaCX+Dz|#_pbM+xIgFmcRKUqxAN^X?3~Yu2TVW_@9*gtL+6ZhK5g=<%2W3M z6xe%Qsg`x#_xV)b=kkKa#E$=3ms;j7@#G7f>CeOARhpjS6yLuKA8_CIar^vw7h9zR zGVtP0UDr-!{tjR6Yt#4HR^M5A!spb-w0+&<4C`E#0bJn8`;v#Ukrg<{(v_3+8?tZ& z%7*_zGS0fnf7;wLb8eKVzK`B8ZQ{N^)Z65q5Ze1XjjebOFvF`=_l};_HGNgWeNMY6 zPus3{`0<)}((YV6eJvl>CuWNWtXAwRe9`3$hdr&hbno8nDsEq{&Ze?=E^eFn#K*4u zIGB2}R+8A6p)xX~nG#6=a)c8962-i#EPr^A(Ef!;g^V=FyaVq9@oY2_*Xkyff-#*e zBmys8xl?)ZEM`r^pJ6G>48UU2$xFh=e|~5P_{qEMqEGoS9a+}I3GYx!u6lCygfF_B zJHe4KW(mB82jddWx95~D4TXcawVMLwqIo(y#-oU(@sk{hcL>296wa~B2vq?;EiZD8OJKhU9%q!7IbI1!90hA(_wlopo z#9i4rIuv8|_-Lm6*}w2tXjHC|2)c+TU^1R0&7>Ca2v-D(GHM6eSi5)cL1_J-{*C{v zy>;si1omRS8#+&C*ufBwUVP={SK6zuzJx$yc&!KSPl?-meoex= zeBOe{#WuoLw5*rptQ;i&E)18hc~Or)+VUByYtC+b{dooWqog#j)A+E zfjQqJgLh0fHA8v8H1N1+J5}(}^2#EaAj@<}uRQrgCO{wDz12%}Kj>ZKbU}y5An%d)VgY=0opGmsi`9H*T~C>uc@7op;(a z#?jjPR(s!z?{7mopdL@F>hc@^?gDaR+V9_#<~ax+gjp1&qX=ItEi8Z|>Iys>c7A7B zS(;L35HUJN9#cPt$a-+arS!LWrku_=$c|^|6j_(H-Vh$L6qCe7k4R7cY;409yjG`3 z2ct%}c#olO<7foomflX#m@EVM&_})JXIDJ>|Eq7lO*~>N)bct}grQB(waJC0Hc98w zj-mLMPt8Xy?=j3ZWVhd@l-&b5_gjzK@&0ZHn;h=v6Q;;gY{v%vbfSN&R%FoJG^sp0{NT*mDBPO1RsaCS=o7VK-t?s zg6vUunp9wbe#+_GV%w##L*#0UHfRI>*_oYfW7nS`Y^kk72Uoz4(*bjlfeY2tcGK1? zCoX^w>MOji7!FFro#g~aH4!_Fw_TRct*7>YdUJMO9y`d+uOV&A?|uIB?ce+}f2Pf1 zG`{)zTN$KXp1+VuDEYzy)F+{dbX3nc;bA!CT5L|y@LQ*L>UbYHqrN-npgnQ!AQm*3 zoStmU3yY9JMwo2dTf_Df|J8p>=X4_9`n|At&>p`1R{QgR;lJ1Bmyg?>x4(prJw&#V zSIT$Ct>lUbQFRxi2uHq$doqd*AUd>7)W_w-3pD{3@`y4Nc}Lc@brMcyB#HbhO|gsM zO==he)tsz}pt8@h(>Ra}Kz(4~~lrnIIBOw(d z{B_adl!v%J4p)ifl}Gcb1cr0z@`+2nIyguD;lM~2)bCyhEx>K;PGO=huB!IEeqmw0 zz4YM^wX09wY}*@a?X}N;uKo6J|5|(V))#2^9<}A=rFQk^(=skJoC-h7$nLUmV$35y z9qh2&`V4>rzT$)Y@#_36lJg1zbB~^_V-QIsT;Nm>V1X}HNPhMQ#Fy!`>1*;zxNQR` z{lt!o65Ms;;@)=?6JBd-A^8FengRaZ5Yyjn2#&M@*pA0<8ReDd?Iswy40 zu}r*_msiaq98NuzkdkTR6#pE6M7J-$?aHxWOR09D z0FyT@-@L6;;jz!~^cMZA{Zcj(UL9c>L;wZlj3+)j;Dj}GBvRT2^Ui3JiSi^`y7KxG zh1)l_H#$mDFVTKSXOJM1Fz6hQHg@+%BbmT@xWc)w)G`k*b6SNfB&n~9q;OOFWs;en?N znvXK!h6e1e!61C?F_x+pC%3eb{j|W^)+W02os{+JE3dYfk!yEq4(YFyz7l^8IUI5i zqvM?D^1JK;Cfa1{;L#Ns+}kG;I`Qm|7x6zthCOLZ9pQi(;A9eAd&y5LEiQYMwm2To zqWl56CSKWlq}xCfhp!iGCG8 z$%&H}oD7frM=z9qka@Exmx`$WYzK18HRo4Zb2paX~C z%<+;r`jI&SkG$lATEs*iCWQu0B0ye>;UTu(B(}mtd-BQa?b@~LZHn@T;QcOr%e%Mh zb7^<)-9;ZCw%hk^w~hOc+MoIp-`PI&$?v2o8f7Bl=%8dY3(vq+FNc;F1{Ccg+-IHd zbRB+c7n|WFL%BCh`6SF_!H@Vbsp%$cHz(V}&F%IZpZ)E2A9$x0m)q*fRqAa2Sz5vC z(A)*7F7Wl#rprsq?WJd*f!7E0db;eCO>(zLkWc$m6E7JTu^NIA`dBx3!+3_J--``F z8mnjU5gp2$25H9e5gR-5RCG#&KjbVI zUlr_p7sfkvzVCcWKIEGrTyP*is84&J9n`)@x8@`{XrMifgm&4|zVb8{eTyDEL|+~4 zF%0eGHCgDW9CvrIU7@LSP9Wylh-QjLKbO$Nr4%Ma9VP*Mp za9};{Upn!h6HqBj{pcwfj{Ry=#V!GMC&C=lQP4e=Abo!`1Fs{>vfY@QU&v-Ob%l$D z2E#h-XbL;j#WI(#U%{R`ZZ0Cv4&1css0rZqz49Xu@dfClFHFgiVoJhs)bC-4|_~vKmk50oK$OgQU6AGmZ_lwI5 zZ4w>oB>m(h_B3rw*b6wbxIhoim9d=e1svJ28+_5{7d|Io!DlwQ*j7`|oa{m!G7>OF z-qay%}k@`(d#ZAo?luZj(JJf`UHumJ2?L5LNv$VJo(d2A1-L9Q^nlTZhL}_ z9{UhD3CoV^F09_WIE3~UzkfK^M%bvM-2>X1!;EE(cQY!4rKU2pdvK5XyYd(iGac+~DcdfXlm_5d5? z;iC=mZ?!e@U)$UPhOM^kVj1Mc z(=B#ss|GuJS){N_J%oP`J0|ty>H<4AQ+eWDHi)^Y3cTbrK4jff5VB}v3_5`yZ6gJ$XU9AGx6)%vXh-W99LFLWsI8q%Fd-u$_Fqh z8}e}RDx#bB-dDKonJ^cxpaZ}c!DL}$<-hHdaFy2%$&2{jm2P$HTDenh)BmDQB;tM9 zbrG{pRw<2O%23XgM&H@DAR|S0+)pTC!vj|G=S8IlcE{(13$jGc)Q1_b2R3s9CFsnP z_WqYX(8jQn_Tg1GtGPJdF(>g9+dhy{j5GsgWjv>-SWjt{lEEb~$BrkSAt{GAJv`;P zrxMzgi3{MtRoWxVp^k+2Hem9gEf)_|jraDUDN{U|&a0Vr?(D9@Q(p2nv1L>_x=Ply zpS*Ki?euLrFGdg|?j`fN^p)1k?Hh1W=iyTIE~z$l`X;0^XRDYwpYw27|1e(ne9oQK z%Xf93aSE!E%nVAhdUuvZJ)hy+1YV8cy{?Os{vY=~RuJ*@+DOIAy-RxGPmuT4O?@wJ zjX)Z%{F0ScRu>Ov(%PKFz4)Qdvh_*kp}O$`0_+E5Gq+<9wEgx^JkiDl)Asj0soK8K z{L(ynzc#Jf4!R&kpZ*_h#T4yRHpSX*`Oke8g=^bO7s9=Iib$O_rZ?R8!VjFpEq?S# zUbbceZGWar?%2|l%3IJ7e z=g2z(i92!^U&j~;lj5Mov>wHs$$Q?Z#5KyKc8+7U%4#eKUg)JPTV0Qk0O^*ilgV-) z!J-V5xU!|8uCo!ycNF^sbY$5)DA2>C_#6d4$xloYI=|eg$f@qltocMOl@s|UArjo9`mU3haNub$ffdtS+a1$YX(rHwKS6>MlrT*d_zNc z!+}6|-MMpTY=VtHh%1iJn^N#huV z(rB~#g%{ILw)xqmOs4H@uVr9N<-Q94I0!Rx6)d=QW$C^3P2g~q@@!kV_LN)Z+B<7> z9_Lot_WnfsXFvTb?e@Lb$)NB;Hp=fD3u+dR7K z^5RUp`}xneKlqhTxAg}*bUJ3*^6KSw)zY#Q@`=qHa@e^rdAhnqQK&n3+*#s z`aH5T);33UT*1%c$^~#T(?-bO@mSHw0W&9AJmSW|xQpa@?fRumCi*QqzkzS#s3E6} z^)ys}>WZJie>Wf)-T-NZcXb2uGI8sb@{W$`s6~xVPw;s2&9~a`ed%_W0iBA;S@=dCL4M{}+r)*dZSletIu#4e z(*jEG9BB8VvCy&Pr4cxVp4(Y}*f#IoYGd$p^bImO4vbf>Ty7ux_&2p{SD%PZH6LLY zpYodnY2wYg+h(|R!-s;T! zEIJpX-^n%eO`f{aKjyoSy!k1Z!{JfeqVqZ08nz2`rmvvi_t$sYEFhi?hi#U+&eG`{ z!yua`pBXv`4)zRox7+Awvn{xI!@&Yr@8AFf?7~VL0`G_p_;7NrJqE8k7-YxSueGB~ ztL-73sXb8P>gosmKnx9ojmHvFob% zJzC7!8RTJ{PC4z~(N5Vmu%%|zaoTfe+R?Xo1fFxrJ}yU&Lo4Oe`#M#HaCkDEVHW_1 zYG|RZkf+N2kmn39{EJiTm*di6V~|LDLj9A7W9fM#s#37Wy-#4DW|H5L4M*Ft z;ppX3^nP?xgBP4UQ!UqgQ{w#oUGcH^n1+r7IF z+rUqbfKSQmE>XC-9qim|&t4yEf8o!5y#1BG_-En!_4d|VZ=vU%V8G6SK(6^lbxY4N}f;e1H+cO%UI%E<7>K?kZ+FZTL>Rj0lK@$Ic{EFZa ziIuN3J)^mCrImbAuF9X8n{F?@|J6(gfA({~)9yWbyRFck%`VKhOBZjnh1F~A=@(ya zi#Ojxk+yG6n2BX8uWqXj^91_Wg?JNohB9DmInWb6u^e#(!Gt(%2t*8A@}B(#fFnD3 zw~Mci71#E8$j?59=?s^jwHb1(+uS1Zv`n)pf5AmPOkG-Xl1)>Vr+YZyfszCcuHmba zr@W_JcforFc(DrN$#-;?-J$BSk17-8x!>uVN%u_LDW}3xyX@RWhtOmv=lzFkZSBDtHQZ}& z-MUSiI@5mWhyDaLn8*%2+bDH_;vQVVe=hQMSHAg{9Ybu75e+`-*@X~A?`pSwXSi}M zo;(8H{KSugn;yaY`+xBH*s^~5%Jn~=`|Uqy54Win_r%O;9fg1E0|@ZBxOVVO{FklD z3*`x=Z5&7zK6tk*MUS02=sI5ZmQLE9y03XCr~RM{b*3G}sks-28z3`&hSKeEG_`rc=uJT6Fwu9e+%-A(XuC%7<@$Ya_%CY}JoaApl z;J@^nd|h(x#7(tfwrTPjbT)>Q`6tL=_+YHPpZ?{a`JwM_|MUOge}b*J-QIrlwYIlA zXmi*%bF>Gu^K-Pn2lRzTZ63XG8N1TO&>m-Qe^$A1P}{r;CiI2G1ZV1e@hdFqBjM9F z^OPm+(ygs6;%ZCHW%7J;ZN1&Ob1#dM-@bdd{pzPb*H%|Aw!ehE@;%@C?a{aQ?%nO` z1;?TTM}E4=ej3l#aR%7zLxEOmUG2Cs1XRc#{XcaMeC2Vl>JQ5t4EAE9mBzk7!aogT<@$F9A+8MNm=%&YyAGY6jVtu&ZeiR$$J3sje!XHQ0#fdVT z&%@D{o4;}{OfImH{whHS0ENwRbn>_HoiMZf8f>E}x4%k1P2HtUWIuIpf6$iEy(wetNw8~?`k*yOa{o!k4 zaDOy5-@f?v!}hoS_TOti@e}`~O-49&1jk*t&sLzvyZPzfWq-K`6??C$?Dc=u?y39oLoElU6`xEfAz-@d-4GN89fL1rcutdns$Jsm%fo?Q0p1_1fmuD>DnJ z%k0cj+d}Vqa)CP_9Sf`peQ8&_bEU61a}33PBo;RB(Y@#tRc;oNbK}W5>Sps5_l09{ zy0CzL^0@!91%+jI=;x-K%dN$6@~`a#O)9(qPSJIiZF?j<_BXY&{Iq~OB5jGKi=R)? z#>j*$_!;i36Rv$nT;OorQqzC`P}{Cf@g0oz$hS_V+TT5Bp8l&XDh~GHGxe%zG^9Lr z#(GGTIKn(RPZz!g?&vX@o_DGdC+PeHmqCiFawz}zvY@2epYtYZxpiJ4vKbjNVlUry z(Zz=8oXNws_E)%fUL&rQPV&_btqD28$h+jbU>?(MLjI~hoM55}yfViXm->-^KILL% z5>__&*Eq^d-a725YIp62Ve4xO^W3MGralX=a#^2WKu+Nu^}_UAO~RR zd}%(Wp;+=DGI8BN)xWd-4czd7dd&BZ9UP3@=;+v^@=|z7-vGGLhyt$mO`3699p_+l z;v=V&lBWwjErTqRUKpjLgcGJ|+IVF|xJ=jE&87|fbZ+ZEofdd#5Wq+-#E~HA+vlrTb|F8a!+w{#R+RmMKlI~=}F=ucD zek7InKcQcgO-S%g=Jnvk*mDS8U=s&a4~YFK{DN1nJ&%_w-s^eHcH2ISH7@?|>PuP4 z!#G{K(`WHTi1_gNw28&ptCO=Fiw|Fg5t8MAaO|xGNps`>icQ=?21f+>*46{&%@sZ$Mx?F@*4aX@^N*IO>{1g ze%wzEE`8Ox&VG^K;&_+55i)r#L5VNjC{g<=!CiUX`~x@n&0>uJCcglc`0^c>;1>1f zHFVFrIvMSVwq&2SQ+>a)vz0#4_LiS0+v0i9)*e@ztnAw_S-h}R?J+d-stuseL_T&w z-{&v6>2T5US1lB;ezXf{rr~)PG3Dec7YcaIVx!`B<>$1%(9irbCS!g4w2Sg#ON=bI z={Efm+D#Wz4QZod2aFD}jrVCY>vL^jAh5$Yg)eynfi&-)mObrshP%KZ8_9W{=i}qw zocP4Yp7?PdpR55^QEh}!AdP$}AYM#}1m1VdW`$fyBr-I%jBm!tK?VpT6cS|)k!SwW zGDa-*(rh2eVyKnRnKkTvWMPdEt8)GCR~YPEhW`QoBMhDpkbXyJ{o5bo)Gr z`6kawDU8~}lR3($rd8NYNL~r>pF3>}AIV%pBMp!fSSeS?$d`JTf#{&8u&C(i$HCOv zR5JGX(4)aO7{np*~|Ne>prhWEv zzXmjvff89>ynvxT-)0cR4}bX8_VI81#^k&9_+fkM>6`6CA9@ub+G(5H>*)-;+Sdjw zjZx}kO*?#@@;LVdoIUBNV_-2EdczKpDE@4sHeS z7%`DY8*vm73bc-4mv8N8xk`%85uJn_?@3uM%IW1MWf0cORF1nL%pGp-K2vVg1FmXQ z9MT~a_dJJxiETAYM)dNpbvGM3E~H0kDyxUUY-b}Yz~M!4AWzEU5hd?UXCB;0cE1hf zZZ-MKooDV$bdiaCElPd2M@Q2nmd-WTaWnR z3$C(>6UAb9a(s>lXB0Pl#}?GNrmaJ1B@J*@=>sx4u4t-_rQPJD%|H}= zL;WEyzB)Xy8B%}sk98wNUZY$G=?wQ`3+JMcDfFkjF=f8UuKLSwyb6T$~<{*=~euK(*v0KPgn=EZOf9k}*B>#zH;UOTaqA4?I@GpQngM?0P5tV-|0+SAp^bx*mLm*)tMR}# z6#99D;5J74oyTkK*M8&o+naYEqQmCe(uGypiVK>O76pQpOm-iqfq z!+Njsw6few_&&Z@lO((g!j-MlxzqbjpU6a=lV<0A@9Nn)&@Q1;t+EQT(E=r<| zFM@<#9&)0h`Y-TSCW%RlPRe2c{?z?;=v+0Ph0Vg;rB^28Q`>N5(|n_2qy-&tb(Eb# zojh87h}zw{JJ_LobSb!0UuR$mSmj6EBs^@#`SHVh@Zf&?Aa=6$n&k?cc13p2f{*lf zz-vx6se))DDc`{{?sFOhIEbysA3@P4@=Q9gbkar60s2n6LYroYjf5rF?%uuAUVia? zh}2Zu+1d&|?SyMBvrubuz@OdcgO9*T?in{>ek4 zP?qt)YX*d^Q1?>ELpz?j0#AL|&AQ3!U7Rd5;HFnK=3UV6tjF%d=8u1@eZxmT2!HOj&5cL!k^Keia&(01z@w&> z4<|$x=dq2j0%{kBYN%|z;K)3^5Fn2^kX&^U7eq#991I=&@zkYVWN0`1IZYpIu;VxN z_p-BZdoXBs?>%U5y>pxX%wqfYPky33dE+{wa0FkkW8)}C^%)f>&RpbOx`cATi-S$k z4FyQBilHf~5@a3hYk(R*X`b!KN5Gsw5IJt+IdxU>V}UA|Et zO;XO36DPD)jx9|&!7pqun8%)1QnyGQL8e(qO!J=w0_c%~g;hiAeKQoA5CwioTrF>RMz_MT^-%EaoJw4~ju zw%_`yL8OQFUcszgr}Npmddz&}OiNt)2^a9luO_g6moXA-dgWiW;$ZUZ(sX;}yS}lF zZ*LOrcuQx05uv_`@09|&I?7ME;n_d$Bja&CpL6oG?@oiyz4tcA?>#O$p?JK?>2-H` zdEIg|$zzC>2t7KQ2%~piM2|dq&hPfa@6KinVY1iPixRq~r{JLokRP&=mNDH{Ui&U42V+*R&QaCh=MUg6?* zHleovmh9D38bsmr!KK}6klRJ(1#GP(ghs7->t&9};_ zhc@>tWxAt1{D?l;2k(|?7sk(PXzjU^+1>D(o)hRfcRstzKB8zubd|ln6J=eNgSO*N zl2Eq^`ZbDypGlmacIOW2)Ayg{ie1&SpBoFZIDv$F|TQpZfIY+AsX_ueYE5gF{eS=2 zue5*sv!8AM=)aoe&;LgH$MMn z`|KBQwcq*5JMH&x-ECid`$2o{ormqs`|IuPhgeIn?gqE)VWJCA6epJy+?aXZkAY247(} z<&#$Mxv<%v)r@z1UcPt1gbPT#`feZl*-t5??o?7-?6~n`@H&+jsA`cW&Qn@7%rHZr{OfxqY|Yx^+7X z*}n3|t@fp_yxzY2l{eZKzw%o9GS6S+`Ac8plVz2bO?CmHBHmBAhV6R}Z>2DTr{g7Rax&=Ulm$UFg$dpsY24^QD4 z*-r#VSpYtc-GJSBpd1}LCRpv8>5j*z)u84vPlm68D_5|DFi=#Isysu+J12 z^v3Ms@CT@=K#g&9@Jdegh4H#t=*&}G31`v#to)*T7mrKEdQIs(uGRJ4Bz^y>05oB4 z)7B@y-V@)8xXN>vpM4@8t1JPs+=^B4bmH0cP{Mz@)8DNtYF7~ocT+0t$ zP2)XzW-J5yFQ2o+PRy=(u6`D||ICJq%(5r|yI448;pHXmd)lRG#}{ZD`vt-F2PV2t zS-bEp^Cl?~y2pGG*T^XCSE1W_xe$)O4i(G(oQ3s1a=AyQ_%>28j&yv?%gXmoS-mX7|ysA?iEmZUyN|tSr|uI zbdx|n+6d}2(uE~LNEZE$l3q!Lfw@0EbO6Q#4hWDULM41Xq6-OmNBXRq&^a*N#p?wq z6FtCZ9KDPXxOpctBz_b#aFSmU+=dj`$Wom3FD_mXI`meV6jfc zNU#tudFEQX^noiuC{{a}5W+Npp-XSxef^4;|{W@z;GHfnm0Q0R?#^4uCbso;l!|u3idG?tgniHmj?Oz%~#rA`}h7P zsh08cF?{`2)e-q-5Y+0O!grak$@ z6K!Q>rTxGU{K@vg4}Ab5<3W4y@J{>oZ~vC|p7%cA-nsQv_{9m|nk@EvKw_ZBy-rl5 zGaO|j9(WLrJmG?ma+RiVLCU&_gt+ec)b;T?M?Rr9@jaIM{GtN~*m^j0=ii+#KdzU* zPO%WVV77SDnNRR2eJl^Ta^PnQ#S2Qo#ZL1WYrkcA@#1B&$GAgTsz`G*8N4T7yF+TY zv(t74TUntwk0Eb4M~BD=^xiuf1x{C{nKsAO0!9Ya!F49<$WH@nA6VU?V0tG-l%E3( zN_SoQt-l_HlrA#hf5?n1%7fC*ddPc`75HuBpxDkX2In9vq*8yJbv3Pd=;SG3Fz3Lc zl65eSt8ShSnXAFV*N|&-2V>IHBFM)hqT0+tTe^4^V{)wBqr)&UccEQ<;^p=$pZ-ky z#ZP^P24pt76{kV^)Fh42<9luQ;oY`OL-W*y#rD_#-G8Tj-*Pp+9Bm4QUe5HNyjjyy-cXqt|o_0Xz@vTP>>A1W>X9N6Fg#+s- z0jF#6(Fns^tT-SZJw)wSRxh;Yo_?my&WXFRwo85d^o2VZGQcQL9CrK@zLB@|>dV4O z$*sZSM7;L;w0lRo!fDA6Qfsmnrz?>kOEX-^npXG0*Z00pQSqGVTs{>4ejH z2K#sJJ!-%9>EFsX#6mk)nL_sy@J7b1%`Ua^#Vc)T88zrak-{V;2Li zc9M?T=A%1p|1r8lN&}w_zdOgTT)EOd`CZ?c6@pIwD+j_o&3o(U_W{LGo-yE~L+F5~ ze$k<_8t}t!C!LPWu~*^_U#@sjC%VTb!83AW<8K)b$SI#Spji4{9G*IjPH=wePItf; zo~2`zcMk77t3ouw9aJ+=cSWs+a*&kNR_`x-bc^bw0u#5D$L- zZ3uohu?x28U>%{Gj;~&BTj=ZIgq_d0$&*=A^1ZTnqKE{94>`Ef=D8lwZby@CG1fCTpY_qW>r<^S+EvU~LQ zt$P@fh~T8(H+P{8`O!9OBe=rd#cb~MpPlx8tjh5^ITZa#Sh9c^c+P@q)gOVD+ilqsK#n%sw|>j}+duxN|3&-iYoBSu z2VaH-u%!-%ZGL(o?dXV(x<;fsK;(hpXouRNpTMtnfFf7l53jaCr=HHeMkZHjtxWf_ zT(%!P*v0o6e&SKw*MiV9-c|qlF0`QjiA#~O1A*am0xI%i+h1wb-REVccfOO?i_g{6 zhh3TO01<4`Xivi5;}h-V7C;yAT+TQ!zwYI+Tc6<2XapcPZGkGKv z!RQjdf$C`+wn^d&(zGL)4ChQ%&C#-fJJ8@F>vG^)D&{^8>3X}~D&z|UTjBT2{ zD8mI#PB{3i{L8Bj$iS(jp~HxsIk~Wi9e~}4%-2Fn-g(tHhgW1K?Vyw?9ehVQWXwO$ z#BrCQ5IMP%vhf}}I_t5d@a+LIY?yefg-znS>ZFr5^H~I(JD6>&+{KhSx>$kowFx#J zt)W}i2L|zuK0TmXqh!)@GZUL&2E? zaJx{+0cG26dDJp?HurM1ucTfP_oj8RqLX~WtiIQ|Tg8PFYY*4j*5-EJA4AW_>uc@P zzxn&^&;QV$M(;h969Uxd=BFNW7hf%k5XGb5!!pFt%=A*^*qyvCph2^T?rA4MJ++o3 zg-`HcnEet!&QYxM^9$`aKmF?|)7{hu==;0(AGI&Nvl;VBV|0RcKrV0~lx!%UQb-@& z=0K{Tfz#rDB6MxAkH!x;zA{>jDI9cl5yB~-CcY8nmoD--C)|~PccoH>2}IZfmom{& z+78+wkTKt$1d+S!+%Jt@)-}!vL`**5J z8a%Uq?|IKT`|R-SXFq9YFKq#PeA0mjh|-(&WS65WM73jt@ujCP)t~#bf4u(HAN|4F z+*++W*WROjyHVp#Xqp@cRCEnArVZ)}^SOm{<^t{7i9>mnN95P~3WR|AAkVfC!O4YW zP6e8myASNbe@A=!S#fFym;E)$hoz-u+R&$U=l;EH;rs6O_v;(zqYLLBtAFp${qcJB zwU?5gE8LyD>|p!E#S8UQf8wX=%9YFT z->u}UE>zZ;zZ9bLIdufjcebeYV`sJZuCL0~t-|W6&*(^GaAXF!cEygnG`bVg`o>C4 zPE6G$AZ-3{#dh)5qvxAzeh(@rToP@M^?o8UK|MZuBzHZ&Vlk&{Z&judzamA1M zcN4j?6)3W}>EV;K!6MI~tsB?~@87v)ZS!qBM;g9&<64g5b0wTBo1JvhXus(p?Q?B4 z+qX{C$T3pX{b}DGezcYLNNECFmNshaPtqD0#jb}>XC?>g`KQm-fBcvJk5v=HwRh)z zWGZDtpSgpU`9w~_=&{SduN%K4&x>9;?#4-i)*%H!UC&$L%DwB zXyMRgy>oM=zVwY7^(TJn&l7a8Ui{?u*ZAz&TDiB5UdB>&rwsdsz`^dyRxKbqfAsf$ zsxB=o)(LG{w}dx8FddjY7ups5kxSTsuC~^ZYhY0ylRNKP{`e!y8~|Dv9si#@ldw~W+F+e(GhmH)t?eCzP1RC?h27T2?{ zI@j09Pwl0KCn`4Yk487WxL#d6a1Z)lJO3_Rv(qfs@2~~a*P6GUylmkuk70keGiv&3 zefHlY`TK6#t-RjJ5qirzbZosyyX`^-#O^YT>1nTP3(@|IpYm2dB~|Ld(YhmK>EZTz zog(i}4>qa~nmyWeBEW8JH#gl;IVq2hsFMuNL||hh+ZZ4}2gl`m`$yziQjMN>$0^}B zbw_|B+S}mIIPRDsKX39}FB2c)c5-Gmw$SR@a@}8EuBn-6Wb~Ers{LNdMe}wiB6Wlljod|M5_#=pfvxRb zW=2;@-hSce0e{)%mVu4}N)GID0NK$rXm_Uq|oQf3ky|Z~|Xj zltp`k@7h}0c0;6N9octlojY+^`$zl5V+Mk2uHXqiGoRLP*wQ#W^lbXg+OW?MUR}q= zb0Bmx@DWCCZ3l2Njz*BJ0@MSvUhKv$MKg+BWPVGUzJ2}7k*^jN+gV9 z;PV{u+_Uwv+bH=MN1TbCpjliPX8>66KS9nN?ymuxvP^qKSQ`yDd|1X7r^ADHTaVIj z_wIM;!6v9Kd4NLk)xGPiOViGgsMP0B?%0%++lfn#JDi?6`N%l-2?7gsY8yQ5X58@j zPzG50hx%%KdWPq|`qO{mKd1?8&F%a5gFklz>Fe2xp$W>;wQr<<=~sUTG{u|aLh^^- z11t4rof{J?>9usif8h8S{n6;M)lt3?&aq0J-{N}h8gUSIFhk|UDYllk9(;Ys zcB6D@>4Zc2^G&$*H@xmayx!HqHou2-+Gs!RvJ>bhcy93xXW2|W3nY7T<vk}LH(EUjNXt8mQsjG*_nR`wJbdq5U#4}o{G6aE{N|N$US4q+n->YWPN=-lWf=5+fHd~D{I*K8_+fUJ~~u0b8|H@<*qnRa%*Eg?vSPo z7q^|va96qDG3C;MYqIZTe|&h?uGc=7C%ZJXlMUy@K24HNmf8+!&OWB&@QOUF{&P&s z3Cotpw#^>joW$TF3wIIHCV035060QYIk77|O*YcHd5FtSZloUYvtPH9Asy`C)ExlM z|1^Yo$+{1K?Jp4mubMqhVcjaIz2XmZ+JT!2qVh;Ng?=ytmm2pnuJ4%{p<&p9>dOyf zLWzT)>FMiB1D?lx@M|pa8a?ik5&;{X8duDTE;03Ux4UwZH zLMJ^xJA)+r5D)Kd2Dc2?V9g9!!q~7(8DuVgCen)cFl>iAJE|J4yf-E?YU(WvE)hA1 zCWQi*oSFKS&8<5VHg%v(IeZGV#BVrqOHT8(4q8+Ph3#(y)(|~-gqSq~RklZm{q=p{ z^#gVJso&E}nlLJ5U{%=)Z|Jb+#JkKWvuthc)b939{r2a-RR81O_#gN>0uztYaE5sY zM{1Tv`^&GqRIk19YC7$ODyt`a&~@k z;79aW(gt{Ub?>@{lP=1m;ey)U6{N&pXQJfOdu^+0TDWv#^`2E&@e|XHrTE4V1tvX+ z{|CZ3v_SnQO`p8AdrH_#r?JNYjhBg9TwLIb4WNCHei&it8S?Us=Uwk}dCsJ(Lq-vH zqrjj;@%P)^I>Y}c+F)}!ZOsh~A121i=l#v_!+hMRv9n{m9! zCkn_Apo1^)AaFV(YiNXq90^-bDqMx5u-g%B2U=_(;Sd7;vi&d4Ey7&=<7`jGkFuc6 zD6{N#==Asp*yTMZj+nRSNxCz@?#_03PUOY&7iw;Nq*hngYW40N=xnzW<>Tp@$#ljkgAAvE zZ{yVpIiUffd<(@;e_1WgcX250BZGYQyErxz^CGU#k>}EEBdR#Vr#?s^yqxDq2)=R` zM#FNQ9-+WMcH=YTCE*oE=IF{qz2Ccjr*7VHAOn}IH7Mze(kDp6 zfilzLOaF&EJ0aBzq}hG6dd{~^bkYR`hkQ(=h9`PQr###GdYfYL7{D-JCtei^A-l?kN$xl zs&}tmM^qkC4!3|32CC!phr z@hLz*t+CG@Z2Ue2-(xe?Lt-;dpmhjxY+8{Xh*gRaoCpC2Z+tF8e2>;Ax=1DqkwvBl< z+D$-UZg_7vXtoE>VPKvNBTXCMNdfwG5gxc+-N}o+e68HX;Zi+FvspozPs8B058d`8 zGWC3&?pYpq(sDXcMc&p1u#EU+CV7-Aad~!(d1RG&L*~M(ysi&S;tbgN=`~mph^A%TP;Hx`r zL^#XXvH@Yrqg^RZQZBEaeGViICp-s_!HZ6hhL`4g_%i?CP(Q=;-s6%8w=Rs~dspIm z=Yu%PGqKg7D%sS7^{4Nn`Kt4K6E2|%cM`HZPRJUjj82>x8y~I33+LK&> z(?LgF$eRwZxAGX7@(inEurraBBqCE2Q99$B{3M(LXR(EodhBRMFQvCM#Rs0GH9q&* zzGxj!$|-jt5x45@tehjcu3eCs<0)fJ&yx0O8q)i&b_I?$8=iEbX<<-5xk}D$Yu%=` zHPQnSWopnI75w`k`;h8o4{Jq57_mTOeS290t2kjkb|D`vf0{x zQ157a+SW!=TUF3ei_!9F=|0HwWGHlErq+qX>Z|6BVcNuf@zyT$FupWt-FP&X`nh%V znjfBgu>GJ;v%*75(25+8iOi&6Kh%ZAsrr#0{!E=acc!+s*K2cqi8kjZy4x-BTUkP5 zLqpN$t}6G)5zCgN45S_HQ(#URpmkyH27QAE?K*VFDYD`SM4Z6)ZO{dO?69q^-3+W4 zcH47=S2yd{z5BJivR;>-c)ULS=}*=G6?2H4XB+&8yYkkRT}}ve^3MYY2-1+9Xzh$_ z;d1i#UOKt##FE$S09L12$D<$^VjaoSB2*#Y2?c2AIz;fzK(jYg3{;KK<<=}d;sBikvzvNSRrxVjE< z%bT5@kOl5KqK>z-&3cxq1%1ht2aYZ$NLkuhXNf8D!(7dd&C ze2>esxuxyE1%Q$3@?_wY?&N9d--%b$b&p@2Se3efVAs}ma}>hn##Xt?Sh?B;8jzpL zK53cylbw_-S^dL{v^qUCnMs8Q2jG%6rpLW$(`e@@AE(BLLBKBd;X?hnKl76{c6Kg< z=C()0qius@cSRzt$RGUDR)I48?%j;@gun8~uVDZ^<1Cl{Enb+fi|RjLc~3m=@|Y_k zPlsw{a~D1uslWRR{|GP-YVQ0+XmggbxV1m>6xz4mq@5$$tK;Bhc4EA)(B=*=(uS8m z(q=LT@N4~xbKAK3Qw8{wv3cjYR}RVprnw>n@K%1f_yDf-crN%iG>vUXW2V0EyFXTa zw7Zon+G5sEj>I$Rq?N>zj`!s0o$^cn`RUE|VDQ0D{VtU7z2}ezi3=_N%borvq&;q8 zP5|)TjC8$tdi7uledalDx$7D{oh%Q@MG$psd;rz0Hs zYP&K1JX;s?rM%j31OB`^{-MpFO-QBEOy9#s->Vb6?8i?40RQw!L_t*UM|;Sbqa9=& zZF223c+!5UW{(`#2()BA)`681d!CfcLHT2Sdj9&(j*~l@oR!an$$Hd2-y>}Wx)c1j zvqOpYKFLvzLFArxJjP=n)*f(Rew{X%EB_CGZDedLlLQ<{NPC;h$vO^{JIP%>clC94 zkU}1M6sp_#j*O1i%>3CJKrgs+**0)$uQ(ah!F>nO`-hRAv=eQs?e4o8eJ?vHts`qK z=iuNZc@9&C!}NWT33C0YcCd#mN9*HM4i@yRE@^hvK5{VI;?wr@$V*pAD{IVm3!SjK zwpGi^Yjx}9oqFfpYxU06_v>q4d#gVC+0WN+eD(|V8^85i^((*j8}&>7^q1@J{{7#q zzyA+@vwr>;KVLupk3V0(_|LvjzxZoktY7}Euh&2S;&lMCH?&HAcZd~KU86BJKUFcl& zu6!boIbm&hxS#Y!DJ##TWAL$qcYLSKJqZ5|42>f?;d`w|bh!Qd$H&@S&^CN{0Dsgv3Ein2fu^9;X1%hKSFjK3zL)h(03>Bs7Kq`SF0WAxa8gPZS|<@ z-~kr%6R-T`%On0ozBTaiH_rbwFPGZq22eT|WkQj!i!?E^#Lhz^w-h~t-UM|d?Z13Z zj9zrV3$8TR8b=KPk%GKScp(h36&hwDvU};7mCvaN0(Uc;jFahH<|hnPj)+TsW=0w& z)p93&;!8N~dwVu+RtE8FWfC^n>cM58jq(4LvmISkd{S=gyyU1=>ivkNOTc2O;RLok3$|f1(Hd$O7Fp+H2szw z+p|FS{QKO(GN0PFX5gD^d+*|;V3xs%5PzTm?o2GOya<0|Dd<&R!f!{MGyd!tP!YS@ zasPOz{?JeSJ2fzK=3t=g1;yhK_${aGW0>xq8m<#{1o_4G2CGE@yXJ z^G(3FSg$KjxgFw3UAuO-rWR&u zbZQhC1ir!V7O?Nz0WLg*0X*u;n}Yz(sDrCDh9RIfTXPV@dL(r(9)dR-wmM2hU1|jI zLORN~$S?5OWSIE#V{?ss$jN#&du4GxBboH;x$en(=+TOcYxNoh&PR=NWtm3`IN+Il zkjT$`?CBNr&z&;&!dfq# zjL|QXeAKBRU|gr2#Mq4;Vx~b~S*LeB3VIK}c1Wmvm{IOn}=xtn1gV)$aOUU7V+pOhfW? z_n;1zpn2$Uynm>MlpV<5Q_?!}*bvGlj+6-llrd_aJcouxX&eCudSYm9rgq3@3$9uM zUk5W2b#iW@PNwH-75p4#utYiHJ^w?~>CB^?*0<$1g!(qPN{5jCt-h3tdOHx12}?Gp zVw|@X8zW9aP%wBQi*{Una@cGR!%U0})%x~nEuNVrKjIT+M57U0O1E~P>x9itDmxO> ziOjQg>2_rLNhm|Ev=|rz@5noT%lB6?UiWL(&H#@9AZ=5#Q)-e1_5biU{(2qnuBKCJ zaOkLJCXhiJ_v#0K@b}fwNMEfl-9zUc01MEZP*)&09pvO|<0CX7zQDgbJv2TSI~14a z@U@1f^#p%n)kF^aF1<(36Fw96q-o=bZ3=5`wDL8cj89KjH6xWPdFNB^2?ngvL9-?F zm)8i_1q$yZV0(?zqvxpuw|4YtH^fL+Mo?cvLzBSh&Op!7NSz7Yo*qi|goV!Ss00<6 zbfyN4Zqy#2?4IkFj45N&-7zW%Mcdh;$gb5^kv04~+q z%6dA~TrKHjtl?303V1!-M3?Mho1Cpb^uwR2)s?%@nP*vARwodseaybIS*<%8jKC+1 zzUz8^-K$QoL~3a@_we3*iTvnX8iF`*(d_khHM)|{T3GMh zv){AZ0(WU&-Q+PBZf!a+IGhz2lhZRbG^!mpj;tBQtXHp#Yh^u9Ko{bn(Uf%vPl^v$ zAVZJRiO@oU39d8Wj-HeQ(tSQ63*@an((CHpNd(gVX>Wx`wsnIxm3bOVb~a^eROLfI zx|4KU`GAF62K;ep#P(!I!#$V0!+QqFfmO`II+OpMqGVndyTbok-ZX74eq8BKOX{3EdzSjSvF1*llvU(e=YA&Lu zyMQWrQ0~C1`RET|`Anb(wv74W$xW%7Jd|@|(AmuFc{73{QJAx*>T6?(c}jz1bBwxr@S*g zC&olR#-{5gve?1n0GDvsL93r_FnEA_cQlEUk4;w`dfxHQ_CalL!oLuOt6i3t*6P}g z8?hOlf8m+>o{xVNUg?LPcSzH^)z)^EN?$s3_x79+=s=Py)a?s!hXRj2^T?W^lv6&+ zYB2c#AM+P)_SK9IPv(7VYcq01UTy<`%6eD8c(%2xZ%;D#?}Q|`yLaGri}1JZw6mAZU*&xY{OI@^#sn~RP1QaJD;w9_>ccFw(CZX%q-Nz z^g?Ldu*!dBzVge$kB}I}sTZGlD(N{XpdFQ=J)j(+-p!l0OoozodvaM=2B?chB@MW= z$qeInAMMOG42Wnt7djt+XHfaiz4&BJpFLwD=;XGujS?pICXrh{meqef97v<`47Ygu zPv=8$?jw1ISR1`O2-MkGxOCc^Pw(E+Z{=w1#TKpAkN$qL9DMgx2FP81>!5{cwv;%^ zYjeHlq-AnGb2UBC1vG!s&U07)U;~;D-E-OIk-ySzO<0vyvzd}#($guIn|#h-4|qS7 zH*AOQZXx4*-mjy*J$M4T;CuLyL_9)Lz6mtsnWHnv$w4qtuZneo6jVf>)DhnSlv1<- zK^L}YhD5_6%hJ9C<{>BhIH7%P5}VISAKHEH8Zrov2 zwpG_}FV*{Zm+RW?JN5Q^x9i*QT(7Tx>+SmOFMqW@|HZG=m%jS-`r?-F_-e>>Z-yOY_qTX$*+9kY(U*|Lv>G7OE5*Vy!MO)res?BZxGoSCY{ zb5nKx++02W*kV2V#QA#WiOco;)0gXoXRg$nuRUEaKKodG;I(J#!ykCQKKj8o>!Tlj zr9Q&-+8fW+M?U&$eegrC*9YEsy*~7z57tLN^s)Nzhd)vu;Q7rr-mEv?{6M|<;*0hC z^IXrqP*)zmQkO4ZsdMMf)xyFVuKDc7Ff}!mOTF7B*tVWZP!}wP6Uxhwewxn zVV|4!n^bkIor@gahwgT{_IP%($PVva0d=4LOK8S$?)>Gz zw)#?et_|aYD<@MZ*AJlycl_Kj%^lhV!iR0(Hu-BiyU1XV#oet9Xl7$;C%c~J{A_gd z#)ikr`0N(iZmG{5y4u*>A*=)84h-*cr;LJ@Sh0~)3Y@_b0(7@ z#`rchGhZ|Fi!}>9Ogdp<-bolvU^r9L^XF=Qk?ZWmnmco$W)|DThv|j$HP3sS^srdt zvvV~%FiQ6QWFOjLZ&H9QLwD2+&cVNvkus4$f5O|h*A^n!_RtrJNPVRL zL%kV!j;vIkW4~#4d4z}yp=@7SU+##jT$2uF&Meleuf7_8`DhEhZl9L=So(83$ekGv zNZ-51h@?#lv4iF@#;6aJ`qgP6X2)IBF_OVA6~`S;!pkX-@dr|xZj2_xLPq@$nZ zH-s>XgWh*;*1NFf*NWE*`_Q@5t-8J~f-_Ka#xZ|W3#yVIFJ2EGw251J^Pa*qx@eOD zjMMD*v@;W|hs&N{xAUXpPLBQ9uGmHP?jwV3-)tjy50JIm?M~1e_voL=X5SAx0RS1J ztP$__ueF`TC{NnsTs@niwdq?qdgK;=ag>Eb=#kKd^+%avliFsoZlPP-E4D9m_C1KJ zz>S}G%b7lwjJdTz{~W>Mvwbc^(LZfe(&{Dv2t%urhqyOA{TkhoHZDPU4EztTTLT|` zWAUfK_9eracmjnLx$H>gF1|DHodS4Cd|o~}I7?#w9_GT;GRES;Mb!pztr3Y&56HYG zI3Fci3T>lGPmz<)Nf(GxNxgJ&X?P`HaZF*o%iKJ-d_;gDL=@k+Ov`Y7H7un{xFo`@ z8L*a)mt-X(Ej2$Xpbbv8N()f{ZxnAj@w#^nL83NvCcgFKY}pK$36Jk)m9i>STA&ow z$lNjv@ljh%84Ul@C0n$lTN&Q{z;$o2%mS~LPTu>M_{22{2950?!iw! znWSK(h6y~#2Si3jhzIH_10p0OeH+KwMK(?~T2$(zqcipFbFb7JAN~>29*uI!Ah6x$ z7-(T^%VS2ClW*xXG85dbTemUjcj_8bJh@JBwF4$hV3m89>CA&ajMn3mpAX1^_wX3rosh}9 zS#mk>J~Y@@^YarqZ(5ajKx-)VN4Nv(Eh|o-WjkcEjX&i-!KibaCC|dmN-hjYJGC=N z1b=v@qyzbOWcH)%#e>@irF=NqT%oT3wnIf&q#H6#*_`q3V^+S9ru^utvCL$auPBSF zLtLF9$RF=(;-Qi7SuS7{$1I?{=*&t<=);QeD1sB{PXN zc2{UP$t-WZ`yCX|Xnp)cZ_rSguF=^G&cUk<8fMNwhbW^24rtsiV2BQn50l4yX(3b%3-R4CEYqi)#|G7<$6VyBjFvO!&4&g%e922m3V;6b;#r+JGnDtBgLITd;d|o5NBQqN&a~vHbZ%MKrVA|Q z=eB*(kAyXx9k5A;eA>=6LI^(@mp~cNB#g=D3v_>A2Xta_@u$Hr{TNPO9U#B$&8^y6 zqhYwZR!?7ftgg&osnuKe>tt=WrfHOnpzqx}eTei23GW%tQplh(KzRDCUk5;3={HCX z4yO^=KRR58W23c9d6voY0Nyx0f2I!S=4%TY*%Dk!4{y8j-y=+X&+Z)j^jGdXxxzXS z#p<_4eI&>?$cpeiy!ypQ8c)h{@iYn_J4kj&!#sH9vnyVmEvhcbgdlN?K)&|QyY;~j zyg>-$5pqx(9-*=7u0hs;gPrNP=`WXUKY!qS+47hUudXH!I-2J7c+TPu*IXji0 z!+z?_fk;<^Xo&uc-}<$>d+pnROuZZ(*5qhkUAum(e(F#DM2*paz4o0iXVS$O4KwwF z`pxw1bXK+ty8<|PLFPHZ>$KZywlgrL6)t6EWIA=2?*`x=quX?VS~=mPWejg~Nk5h| za=z!^UA(sE@Uelol2WUs+(O&_#8LbH(3x*{r_NQ3q+tww`p`FkO~z!9iAC1!L|wS> z6d$oSc9rd}IHiuD(;@1$22DeGFTAG2tFkZ`G(Z6zKvG_quOLIY;HEDt08WsxS8v~~ zU;WL`)%xyH?I12@7Z&U6xpTF;>K2%8b-tT2k5F%J5j{L`SR+IG_1Y^>*AM^j57eFa z-$UkX2A4YJbSon?0=}}Pw(FQoeP@6kgqNTUO2NB3EclA&3 z%D+B%rysfMgqtCBHcU>1G`pphP5hds*zy^#_?u?qlT75avP@SQY&#)D%JcD2<2|Xnln8@wmw1bSVjOrSh$|U(v+3ZdL=A&E>FQ5amOA+>)WpltySh5wjvH+S^ zziBI-AXmh(VS*n6}gS`PNelY8^WjzkZr7p3jhm37K>npIm4W~m!8fKLL_jRTOaybjDexaf{X z=p!dD^k^%cpupgB>LOV;+2G|r(@@UopVf<`yL^A8ZeV}eslC3rQ>*K{*k8~KxKvk3 z->zseO~A_C@~C5u52aHAC|BVJJ6pq_78LBL+eoP8P212;20P)L1m`Drj>%+hp7|3~ zk)NPPwHrTzAcx?c&>D5Yv$USwn-a%R!o*kU0B~d%?UljEdu>z#5I9G=5dTmCEuV0= z9GReQBtaE0uU@(j?%iw3W_Zhuw@LX;@1D!9U2|Je%k0F3 zwz9>}O(!-@gO>F4Ok7Mmu?XXBYeA&HChbeH0zEl?gf4nG+7O8XdsGqY%1sTp8zGxpIj8 zrd+eGJPyEphoQcdM}K+tkUEhE#o69o+t%N9A_Wt<;IGLldB=95vSSyRoLHc|IS^kv zTXp^Y+hFXZo_hMJdi9l8fnyNe4Q%OpiaqzoQx^|RwE}UtD_k?>lk;Aa^t#qCC zD`Ab7IHtz8?7%>L@$V#K>uL@=(=d1M-Y<7_ax#Rn*c}qYliPA@iw~opzWjyXCNH<@ z9!ov~s{N)dpG6Opn{1M=I21?9DASWZwl+5-?+?J?{rl^&`z)ywGYl`pnGiz0%4&6i zX;>5|6bh$-9MI|;P=~ZZ9qU8b$0nQrFjQyG%+ymC7wY>z{XI2zJ4x9&#W8x}cu6_KvZw zC+na7%CFar_iob`p2yC{=7(3oYUo^E8L(~-JX(*s+uNxRzVdqH@c!;5db`yvP>^OP zANlLx5w^g|okTOp(R0EkwDOmIO!nI$NP6|;>JA!cdM?Lkq>edy2PDCe7cxqs9j3nI}+7-0&2W?Zz(C0B9!=8cN zCJ4CV+SS@l6gWO5UF_MuA>;(#Px?k^Ph<0_Z-&U*0mPy4Iw7x zzc%;K3l8j^jMTXVzUdIC?d~+9#iL(ro_~S+EA=w|ue|y?_t)$B7hkFu`S!w#FLQsno_p?jo?ohGo_QvVEzVzf zJge?)FP%Gospb~W(EdFKZfCPv++7p;kj0*}E^Jo{beGBUeoZft=;J5ucouW1GvYY-Z(14bqJ6i= z%RYG-f6qxYegl^t0eT=D!0qG{%h5kFLrU-ze02yePSlA*wAG-S@ESaODh-WHW)RgLbAW$jY_^6cCTn;cex7irg6SG_Cj+j@xrG{cCx)3bHR9w0Xmf0a@S{_d zm2^qdG)@P{>xev$xtt-q15S31+yP*)b}82`<=EwY>v*s>PX}w8x?dlhsnyfzy5Bcj z_fMzl?#WcG)>N&L?gsZwuJ!)$+CCkxJ<{Lf`Cwp-d`3xsfb_tbdJ%bZtj&X;)5qN* zK$@`qk->N9Qk&HsW^GSq`~Zx#0d^`z9m`Gn&n_zXY12e$@~%DSBt(yxbt1fz_~j?d ze9l2L&Vb+KhPoT1e}T-pT#rq^iItEFok$|VleWl5Ird^U?@V@iAo zD)`rQJ&up*Cs!}T!~adEIk`x?v+D!M*Iu*->6lrfh`$y_+@*MVGhiO_z4`SVh*M4o zsP)*=G-o~uJm0k-@0(#)ph;-@f8;N~cv%kRy5+Pc zsP^QU#Rjn-8h*q<8>4RWTiAmJ;r9LjF_JfF0(0ZN-gj|gIU@(Xnw<5k-%Q3fU+^w` z8S4=5+SR@MfVI)HVY+(poA7aO*~g!-w+24^`r=O$Cldf#CJJPc-4sScy5IPhJSez@ zG*cO`M?gL$GcxhL`ADc;=8f<&z!4yO;YcsxBsSg~0*OEv^9d0Ur>*+;rNw>VItDT= z;pJJF8$s(#+s_a09THL@#J3zNQ}fO}go_KT@+J4=!(Te|xH02ol=kHVJfW= z+VV~sJ~t2EO%kG1zpar;&-8(Y@>}&wzxcoM)y}ig zbhbES|H8Qo_4=!?)>9a$GLPq|KL6}fb>+&Hx_|Euu^rrY+n;TeS?jpSgqIjql-DwH ziG$zmQlCQ9-Te=0lv_bZ&ow#LQm_^~mvkQq_dDE!U!Sq@f3s=>(^B*y^+y7|H`zDF{=9oDDwihSe|O2p_-jeXn6D z|3^S-h$1*ie$atm3@uCCR?v`v1}qt6CjfrZr>L}(Is%%qOWX1Rl8pZtBM4%R_aTI- zXLBp7q)n~iL7wf1ci>;T>?gf+qMjUJN74|af#{ZS4){O&%#+zd>%D8&YJF{;G!Jw3 z%-Fb%LE?iWI|{GgxJhx2>W$}~q@i=Bj=<^s`HQ5pT|2J20}judv@>~ljK=BMNZq@? zQhU5V@!T_YbLl=tGQ2g?27Mj;weg^HppfW|hA30;7a?l7k*VO(0RjgW#D`n>>Kwdu z5WdwdS7^pd5n#hbWs4798PKE-bCy^~*i={OtoN#Gbl2|Y-Ds0LKl3IPp41;ID(ldO zoC6w^jVf1>-@1FZZr!=3UQ9ja3?K!z0V1rBPdpD}fV*W_X7ZU2bll%>69L#ABsp>G z(#_i6SgyVYouQIdUsj-=U09^C`a;TY2c<9zqpQN*8rTl$_)+%ArQpH5Ts5WMQC^a) z6JPiRE`J0}`SG%@V&vh157Kz3XIgr}D|r~VIceCt_%ni!`o)va&@AEGU>x7!8%e?E zmR`F8yZKZ9{?tkR@RRRyf-A=f(874fzu_DF(TMcOi;cBSnX9%p*Xv+&yDrY0uYrAN zb!(>vwvTGGf2f9`r(=vS`}94x)($kQN@FMI<7>W%^@>L?oxChsM3)#b_2}CkUcLLIyp!58n9FBVv;)0#Zv z64Bj5V%HERMkO@nw;{@wdf}1>ZbR#?4x62t%jBR2XKQfJE=<c{`!_tu$97wgv5ucw1Fc0lZqMsIE4uJO5PiXM5ovy<|Sr#4>mYdADY zp1W{HCQO%5Y(ku6X!ONhd~s1j_~rA1Z@m})@h2d@0;}=;25veUmy4RS9DeP=Fa31( z7im10&8_XKujyfj*r_r+?3RJEHFy3gcz!4YB|_Gs5`sE5T_=5|v1zgzFFyE;T|;J} znHcJX@@9vLcFEfIE{*TL`sUm3)@Q%`)jFhcyieY9^NYysv07VSPY0zsQJR3vku7ew zj$9u&tdG9=a((aje^1@L_D&`Mh>M0RX}gs-5L!RN!1sVbTH2XP7(F!j)A`Lk_!B;H z>SI#SSuR}n>N{m?%%kh@-QIgO(*C!EnzP=#_|nlvqmO({(1+#ly~TBVUE>+Yux@we zQ9y2w?ab_9SIy2$*WAS?sz&FC(Vs~!o~0U2NA5^(Z89L~l6F?gp6pPBpdBejIq4)md&^w;ukKL>YFkL-Ku=t9Hl94~;0$?ZhPU6vyT9lIvVv~f z>IC8RTcek(P~hEq3f&S$d>vyyWMKao{#Nf{W4IGdCZ<}+(6f^+TRy$oF?r%2fj8}) zJQFwN16FMu`M}ln4#>Box7>Y#@5nlJTeFR^E%u%4B^)QQ_tbgr3iQoyzgJTe6ZO>7 zk7qTk`p9&$D-7lCe%e}=-_CQ(>#8K>rglOXUK`GE>K~o?WU^<^InmMWC56RRzbmU- z^~_V$e`u(M6IZD_s|?wj$q)X5$0P7YeJTU;E-kk9jbtb7nYZt;QSs}-X}jQs3>~iR zeQmPqb$@B2*0-Pu9Kpbs_A(csWOca%d{l&HkAv1u3Y(gksrlKNJkLVEXBK8^VRpV2 z7v{JY>fD(#b$;P&om)Ix=NA`gr<~`$P#4ag!|zNjo>{CjZV5X-UuWD(_RL)F#y`vV zb7vRo4ByVqFC@J)=*hDSzLUqCJ0_f|g~@SXz=oNhu7#Pgnh}=3Ggs5t_)`-j*fT@R|yyXI~;cw#V5nT5I^h84ommhbL=3nCJ zAwM>|WEA^$Zf-UMmmm4i2kKw_{XbN%y!2wd_S!4;#_O-tCqDKuu8-D-KlEk>wBP*T z>-CWjf2dx2_2qi?wHNET=U%KQp1e|zKYpbyp#$AY{_HvMc5bm2!Jqi_$St?rT{t5y z7qUvwErjRM-7`~dm##B&b2SI9W+$g=8v38X2ARYzbmFYXojFlSTXAM=h`R5`@3-l2m+d2W2QzFYFTgL&CK$*BfXH=dPQGZo1TxZIQZ9fX=u-44sZtgE zYkCJU1wVN_P*%A>xblv4?Bu{*^pF$$+`9S3`*)z_TkwB>z4FFO_3=-9ur{&VG65Cd z%!E_QAzW!=VNbV}{Ao)?gRTl`O^3~(VEYJG-X5G4fk23ee_fOZ;Pdy&koLuVQZ+KTMK`((7+$9YN+N1A_-3zJS>mpv^;57Dz_=V^3%JvndvGMCaE9q9>e6oz7qf}XJjquI)Ta~ z#>S_x!@l&z^lf<5o3w0pvF|9OwtH+H>a){1=5M>QwL#rNd^eN59r##Z-$5>VoJ#@2 zc+ww&PcwO&LU@^u&N3*!g)+infR&?NRZZIvnLc4m5}9>2I)AO7HL_4G4O z)EMR1+*l=T>>z9s?OX>@+YSH?FX8K8@(~BlEQIAqpx(VJ7bS1;d{$^m;JRK=eFDG9 zr0u~o2y>fe2QyvW`P;w!)%yBZzg|5W+J z#CswoT8ma<+VX7QdtSj?FH9%+O(V1cMtGOLb2pM{`c9iKbdF!^^yZuO z&2#z>;R`X^OAy!6&V)s6Cwu~jal_B>s&Yd9;Bo?fWFcXOu{E45AZ-kdjE0|gckRKk z{Xa@QI+OffLzH_(Ax$NJERVf7tM2H8?U|L!**n%C-Co|ToA=iFwo%t^-LI=R?$!JE zmg|Nq_<6s(x?b0BE!DN#OLhGY;rMoMb-V5^?efiS%XhQlc=s6jMj1w@W@~2ge9fJ^ zSQj6EsxChEOg;77OZD>Wuh$zN_+Y*HfsfROKKRjk_0>1(10VQsz4H2-_1YUBs%M{j zzMgybg?j9hi zu5Tc(wrd&rzJ#2;zqC^KDC08EOW2xT_pt*#zGiu44gd98MMt>ee06K5Ho@D*26u3` z?anv5`?ZUHa>coWB_3t4gMM^y*ui86l0Bx)Ez6zQkO5HWtQ){~FvXSid+Js6gm$Ys z)>Zz#tGCow=?68gd?WqCa;+}|TI`3|APr@k(z?C$q$u~Mlc0L_xHQBUpJmtdyBrVI zK5!ULQFd?y?F0LM^t0b71F{>=NW0U4aLcFuaWLJ1`!*O)7$-kiPQnR0+LDVrqnng! z4otPl2J)^0w>jFZpE?WgptHDpOht>M9&%fK2bzX+chN$2v&rC-{g0#@e(&W$JnbCi zTx-WRgrz@$@{5-?QA>Hzi>IDfUMR~*h%9hs;j zcTYkF?G26B(cnZK=r`yjp-Ix7tb_iM+DGf{*JySfIOrRH;J4Q|S_j+@ly}t89(8zd zia%lZ@Y_8d;y!|m8mt}O-7R20agkft9sA(kxb7FZr+hjlj^9l8gf<`f?L#a3j^mKe zp>;^zA3FI6KX)8BfliM5hHD2|vv~euJ@Mj;)rTx-tC&%h$O9+&O9RSvtI~zncbp*~STZL#~bgf z@A8R{-GeZ;bj-e&>Hn7tUkQidl4XM<@%WaXck}R*VdV9m98!-qyHvcHN}H(Mm8a{c z17!`y;D{H)^WMHTm(M8Yq-GxZNO)10dTaR7JNKlmUkjHHz3@H;ZfyXrK5VfjGr^a{ z1l;zqJ06v?oV0Nx$5@=CZ2zoww8v=f@3_!!tF~#AxSNUeHcb0+n)dkwHn#S?Z61#g z&({Cg+TG%cv}ph^hb^B#85ck65qqoIaxD)2xBC9< zPrm$Fg!6|DBoKYDrS}jkf};7`Xhpk((J+!z(3bEi!a`@5%!X0s<1%0`!U$uubW-|E z+@mzWBv6Thyo4tJ@@|KijbdERdW$J^Y{rd73zfynp z&-{n=#V>z8IggAl)cnkRO-@YJhd%V7`uHb4R+lebLCJf@UDfaT)F)^(4Ah-FcVMJ# z8VlRycGQzI82n^mx_v0d)T1yHC&CLnJuF{y56ayU75HiY!FQf|u9wcEu3HK7FLe)y z^mj!f)!&&;%a4HV20aLC&afeUql+&+jb<6b&mh8Z;#HbXS!w)_BM|53=j!a)soLA# zNoR?j4g^c5u|ic^bMW7EcK0@GcgwB3Pity^IvqTA=%$g5;cPw%Poyo7QlHX40<6_( z2Me%4bB&)c;@|~mdpEvHsnQvIgi)q(Hy|z1uy?kW1L2nN6ocI39o!DiJZ*R=kEFl- zEjyMlcc3{5eY*WK;Xb~ zX%HXeoWPOHU_2fe#lReu3Mo&qtu2Lcg9hW_{hh z`feRA-G={%YI69f{>%U5FVw&JqyHLwg{*-O_P+eJ`r5DlYTdYYqXua}4WOtt2Ty91 zM$Y=~Zfyd?Qy+M(_6JYvJGb7eB_|mGudDDuSIXg5WEuPiZoYqfL!YDH%`@wtlD{H7U;`L)Vk4~Sd zNsR4*5&Ut{0}?YhJJ|EMhrQZbU#_FITg{(5S_k+^&CX2K2VQ@po_*nY@)*i$rW1E9 zv4a9Q#aXZ4F))Z1sH_Gtke$F_`M?BtS6(*x{V#LjJ-LH&EAvRc*D2U=6dqAPQ-6vK zQ+YHk!*qjuc{UEHb}&P^Z+z=R|Ij>_@%7j5QD+^=i^|8X-JNvGIHFikD;r>q1QmS(!vvsh#O_;_v1LzlJ$3FC!#&Unt z>29fM=YTXP?#4#u>%f)&@ck`#YYjRbymGM)=4Wf0#?dA?E_oEv8RibbN6rmmU`76s z4`Ap++Tc~(h%%9@6Bf$vpof!xY&xpr-15v7Q+)%j_;*K;gBlwjf+h&DOA~8I`fNJW zi9s<+Ph9D@2JQ~({{5wT<(1d!_x;{aQyyr6FxGDjOZgcm-PmzFjO_6)?P%k;W$_>l zql9%(hB|PC%5Q)9OZEQsd-e3wPuG>lAFE+$k2*Ld?fEmK^;iD$zgmCwKl}4EASV%& zTQvhZuyyM{`u9Fvf9bFMKkDnh{?}{e*4s5XNRANEdcnl26Q_*1bdw^z3U&ho^@0ngH3qv2*3H6EZI)KmCdm)=unq!k0z ze5l`a3gk)@oba|f$;8p=kg=;22GHw6Lo+ooex)uw_0g&=O1g6lK8FIf z%GFN!q^X|Lu5HJvbMcSHzx0a$1V-ow92&-LM^7l9 z_#3~B&RT&dlxaP>#Wnl@igFm82h{=^Fe4m}gpWG2Td3iesFWK71V0a5_ulHU70}uM zf5;B?aEt#i-_&>S-7iV)ccj@|YuGksC(U6(Qzys-HIv)Cse84xV;c$)d7dC!P7c=aBW-DiI#Gv+>y{_< z22D2C>e;%rVbnDjr`RSCRpiAH@>HGSSxRCHseFX&6c*K)iP~tuGPx@m6T_6d_03(+uNS|y4*w;rC7AxspyN>^q)~OpUvO)I9BgR4-~bHhKUxz`IW! zdiOG%9rZp7-wus)cXBfCci(-tu3f!TpZdN}(oQ%>%0tl|h7$&9$M}|AKkXU)%u}A} z@-&Rk4$!9_z?TD*9tB|A)K%2c3D_5IH|ikR%F1f};xGPU-MYP1>$C|cv9H_~P}!F) zGDCNrUT=5XNaIdi85qw5Ozje7d^#79@gD!Ae4m(@$SzA0Q*Gse-`i0S9vwAZ3-f2P zO|2ao!aOz#t|TjQD;;tSA5;LeJeP# zL-o$ma@}8At8ablTXpCDHaKu_1D+wVwv!Wb4)pkLQU8CoTW}5K-~)IgdL&i+)YXXJ zxF(ak=a3u0TG?ji=IXJ>&~q499zpiM|4;to`tDDCrVh4lp-*j_+F3|=&-w-j;_rnRlN=T^Je&HEWJaQpeK447_uelt3Lb$tb#+^jY1XnA6D zV;5UzKlZ@B$Hh>52eg-ufFWnY z1CPA5y}k#JFHxS|dhhzJ`t{%Va^1VXoYkg(1FyobFp+82I4 zgon**c+hQj#UcLK$-A4oH9k2J*&xsF?Qf;6Cwy-CZymVW-1_s`iCo(0YwN4wYs+k0 z2eS>IHe!!oJ@U*!|CQy{;QPJzuGcrd^;V5f&(^2E|5No{ANx3bF;sVM-cEVsi_pI^ zmHIZnVMMUI9%u(7Kj88^)>d$3|AR-KNxw(n$8YK1;hTIR-jW{ttMzVYp)~o}V^5IA zN!=vxY!%we+kqB$gHTVMId?W^F#k{gum7f2H+Jg6`9=5!n$~8A&J906yTtOFrsYpv z^5PK{`Xj~a_MN-6jEvr;K5yP!t=o6+q)c;X=8zK;VJvA-xu|YX7RmeK*6pC}oE(Ry z_Nj-Roz387YJ3WNo-oMM`T3c;c>Zj?{>t<9p%1>ocgnrHUgs|?B9BIDb8jbuPM)be zK0X^gBu*O5QI^~R1D+D|?H!%|FA)do`{o)yQX%c$b4E$r_KF8})(72iU%_@Ao>{E_ z{IC4A`pf^f|FXu;K30!C^9sB_MryRfRU;dfun;WTpWX5q{>wwmEIP0AH*pdfdzz}B`_&d&gaV=C!7`GEvK}5F6YN9cJ>>;nCmFZClGXSy!~V+OEI7at;Cuk&&c{ zn?)w%VOpXn^48D8pL~VexGvyn3t@;#KCnZheG)S~ zPYE9<{Q_UgO`W-Yy4IL>#?i5MFmZe3PgGpTQ@)#yxJ{dca)*C_RQQe##chj}GKzif zoos2ak{_^{f6CuWGxbj$CmvVBYy0NJ=@!@bhLeavdC0KPiL0{v9Q+3y2k800XK9Fc zaN~oohL?t$!@~;UTkuV}2;m3v5eHq{Pe16b!(&87d(JyyvSV_u?8=VF8!H0r8o#qA zbZy{`xV>=>|WWZOTxxDB70nx<;&y>%dD!TVR zG1TC?t~pCBuYwNWPbTvs)X#w|tw;*rg%N=ACE!L{BORm-MwMdf%H< zMu~632p0gizto$4I?Exx`ugnWZ9Jnh@8qAj?b)ky2}=^%yy>6o)luXlNvQX=yZqJ_ z(fC1Q%evtc*Is+^$Rhak%9n=UM3N`*lQ$x(ZP&Yi0=hYHM>N_*N9YoDv3g&dU=8|P zURuq<0@In9or?WF;Sm5_((({(^g;C|co6;u%}~Q1?Jf@Se#-GFC-#6@0Fq)b{MZ0eBjx>gox^Yn9lfBN5i`Ll$|1b}>WK!(T)!WO7S z>J(U3^57)kBy`*uNWl^1MTY5^C5={CVrWQ96fK+r0>&pJ8+x4(e~6S;sfkDW!kWtDdx!w|HGpDRBy5kk&6o=J z-jxgAd8MAYM+D6YKp${;8h zNjl^CiI{o!ZW{eGSo6$(XUr$>9{2g2Syo*gm0MU_427ZJ!H>^yc?;9ZY7!a5QOSf^ zP?nY%)uqeeK>u(IK!w@>ihO^M1}g;X89N(WhxMuN`QiG^AN-4a-@(!)ylG=dI~XY} zozdgg({0PmX5=j0TdwW>?fUB1zgd6a2mcj92_(+J7v&6pTS*6TZR+W z(wz2y15yjKbG0x(hu>j3L0s`~SdB6}ZG5&~4sG0|!*?tEX@e?;01c7UmlJk~tCMf+ zBs;P`c{dG(_5sGI@cXs1tspe6GQ~1@R?Da>aO6wquQj-U zk+1{(HtbZwl};lCr1_hM(m-+2(XeS*zry1-v!3DXwrkA+7s_N1-W`@S34`gAMw0O& z_fFl0%nlfoCP>|-F)%iDzSj4S>I+}}PF;TT+4}JJd_T0lSHJX2zgmCm=l*`3?CjU% z*>f?5PH9xGFW-XWFu}HN*9RWEQvcB({p0mRf8aAUH8xXyo13-u{*C(5Z-1d~U%df4 zjnrn}K`lei+tk6t!gS4CzF7M-0GGDb>-yc>nc?Q(VUwYD5V`95s7>ks*7eop_}ekR zM?EX!25Atx)&1qmmur4*u5IVEdq5g(>qrSkj44B1S&>Nq)R}nnimWtE{7ldHPEjiV z4BHx-jfcgb24BN+bAHpE9DBYn^+?^Gg5&*zt#nX2n0WK%&HCEczET?-C_MnO8WW8= zcP4)Z-+^^-Y$B6^CZ}d{7GI-3M6Y9Y_cwo%+aANmV+W~+JyzB3lV_Ch50i4N1;KMQAPn|lgoT!N2wycfGhZM1=!a?!9vUCTa3tMP=r8F@8{prLBk`b4 z9;U9dx{JE80VSTD0APpC{K8cI))&8C-}vU+_5AbCXG_Eh=+n+m!_S=^tH1lV{$Bls zKk=j0H#S+Pz%emKS$9wCV;_IK{>iWX_w}tW{z|Ri`c91iu*Yd+wKO=ge9}N?cs6Gp zXs@+_4lBOVpR^z?HWF|2MmX-4D`Df*Khl)83U&r|iaZ+&jB=tAg zostPR*eQ-@iW61&!wCSC6J2B@+RjKPg5|sb?)eekZS~Z6@7%jnC*z~_*Z!x!Rloj) zuhy9>&(!VJgSzs>(^-M!z{t+QMr~k!OpQ74xsSYFuJIuUlJ@Gq`zt?FANugq_0=!@ zI&`%b-6{@rO@{VI_ShMz&P%5lpjbyvl6!OlK$q-NJ7}(g=K;js|t2dSzk~eSpqUrjoW)sO@S8xOw$yuTlZYSNaL; z_=$JQqWy3xE#Yq->ZT)jQg?W;kKQ}X8J4qWFJfkZ8-gA=F$ulZ_|kza^>drR=N8)9 zI6iCpMb@M1Ti9o&XKQDFw>H++GRaLl&d$dSav}$8Kwp37YJK7B zcj}M*>pxH*{@{me57_VCy&D}cJ~0tI#ulJl+UYynZo}HPF>mE*v}YJ`A&km8^YUF; zX1l?#n`>>Nf*rBu@rBQSy*~A+kJsZ*JQ<$e!R`?@<)hB9p*gOxG0)#dhEa`OUOM%k zG~%G0etr|S=34zos}7>tNu@#q52-kzLtVgPNc32YT|9Kq6ZQ|CwxJ5p7X!m z+CBcnFb$I=zWgW-g=KVNG`i6VyV91c_ul;Av-N-ZPyZ5STdS3&`?a&RT@#aS;6>Yh z6dE2LvCceh$4UCMH+RuBz^@)1LI)4Rvwj~#E_;{u6ly-}qL%^#_NjWPc}EVqLT&%3#1Gd5X^rt>uw{N^xTie*#)aBOpdIn#|$86Wai`#pI z8>`2zJVh>~2VG}tbH3SuV+XW9+dVqTQkn1Z4M5Tq{Lvq)tM9%CpKjC?v^z37SXdGayDKY>+v42fk(r2EBp_z50_V#YI9?Yx@m2xA?n3WHt`qP zqU^^e*;rezP1+?M>*a)ucdou$U-{a%YkJ{q{r=zo>H5HHuTtWnx_9qh>RX<&zJ$Sv zsOC9Ly@#J6K4Cm{Kt{kIGViuu_19S@pS_IlcV&I6SKA1%9(3pT3m4CEueyEvZsJZ) zPp8c(J*kroGdBmH&&|}&{hgnM-c%Yhh-xUV8c2dg96j+IYx-!DGs}n9FUK)3C+99zw5*rwmx&0ya#Y zmE(~=Tm}VazuQG5sCVO9_@3uON4zek;!QlBpfeo=+1P<^XU^4s_c#7-{e{2qpVioz z^L6gA7ix0y42T=9^`(v4-rB7Jbkmp}^Ry|}@4sI^_I*EC-~U~othc}VwHiP+h!5Ll zwA4};PTUi2VKVR75P~L*Vy88H+MRdn5D}7z?}j5fdf;v<6-?b92&;{n|^3d`g7uibndg^ zgJSER;0^{f*>4$|o@uKiwIR%<={UFh*RDN1Le}kV)G78)29l7w9uJT~N!snkustVl z#4fgt2tSyI^$(lW_9k}e=s0;a-8(Wq5*|`^5F~vUo>zW+fFJTwy=TY03zO`#bRf?i zjSQbbJ&3}*^4@#i$`-?0nLzKcDaIWfY7>BLd%1!;3p8lIT5k2%fD;o@I<6=_=p8dq zq#uz0t3Xu0j^XmZx-jSbqZT2;xgU9sO^Ih4bUdp6B@Sg!U{7j-t&$~N2 z7(X*}h9!MM$m66^*pJzDpBUL}&rJ@n^o-5e%FvV;f!`#_!^Gj?(fU+nKwQ z<@p%@gfY0qw){GA(36#!d~M#m$4)e$lZoQ2Yr^0kc##k7`FDP|<0nUh@NQly6XD&( zQ)l}zemz+ccyM4P_e}QIzgIuq_(cbJCnLcK|42uih*!R40g`c&K6k-%ppmr1p>0TQ zeN)N+s^NO^O)+p%u2v`T4eh_aQzt;1kd;gMDO+rohSJcf@12uf0t^23VIE_nM@vQK ziWBihz~0!C@W>k}Ga*gOg)D}ZM?BIgN8aI=FsBduU3tP^{&%9w9=V#gI?ld${`mR&^MC%&)+p_s zwfow_*o)A*>2>rhyd$O$t!k%vl%_2^({lo}F5@BAt?i`Lgt!)tR&TA0txhu84EtGT zN_|B}s*CWlvk}*D&3g~OE^JGBwIhBn!j))d=Dq$$a1OkI(s7T z9zAIoixfH$|CXsow(szqPPJ)#* z!IN;LR4xA|wxzAcKP`Q6-0oe!lE{m~q5Kc*kP5uE_A@6ID!`?OQEJ+dP}53dg=0O*9R z;jpViU8~pTQg=P4KiB65KKSbVPoJEgUb4eKOynJAFfre=s;Ea)K2sK+!V0;I3@4>lAzq}AY>g}O;0*J3yNI@BvWepWxZ7^V@-?Fpf3+$Z zm~JlO@ikF$*C61jeQ%Y~KA9eQ#F*0Huyep?Dvlsc;Xw?<3Bt|HKq4-s;m+-mHGLYcoXw-DA{ zR*m5+Om5rg&y-pNNW$(3vj_*$>rskqne8`ZNa8jTg0Io+bSIqgG#afd?78%1se~-+ z*0w$B{h8r^a6D8$@yGr&jns2|2ew{W`J8kElX;M)X&J=XXKvTDy0%gKdk6Ku{QN(v zU;UMTMm!Dv(VCu`&MKDYUwp1!eeLxcpB#;VnV1@@*Is#{_Gv`#>}a&NGHC3%Dl&}} z9q)}Uc<$kXT)BA8yD)O)lmEPQ-JLoEmKMjmmtQGI5>3SBCmlSvIzRt4oCk;`YZ^W% zdVKD|(DQ4+w)gfk5%Fg^dg&$%sgh4pLr7Gi9$0?pi(#c$HNz zdK0w7)qprb$c_$=*4TLK1Xg&NosH+LY)#$@p%yoS&;cO@pURvxMX8et%rDzVTMS&} z0+2eBJaj?eLF9txq-LAKT-KXIdC(s;lI>ymuHl=>D{8dhqPfPuUN%y_NT0hhM4EzZ z$%;6@$QCIMppMbk-`kD7_7j^0K*?RTOm#Q|` zYIt(IPDY1n?O>;dXUA%4ai*rvF4XA!cnwTX)F8CIv9nq4Ub|k`uir?Aor9D2F}iNv zx>awz{dRrvOJA;Ue(lTk?%Us~n>TLOt(!ON&h5Lkw6q+h=<4Mg_moEdmCIF z)%?tKEu5LD2|L}OiKd9E7#v3ltJKFZQXMR| za`_|O!^7R$-`=UCo%K4wNT9_J9H!NmPD<EL^SjeAu0hzB0qvjqx{@DLXbOZbbIL~QZ%|3QhHOI$+t zbYGZ}tB2aum6x~m)}-M+U~drNCI3H&3b0rL9EOPHJu>GrWHlle&}$a7~pr$YPOZgG+Z z&}7};IjmLcY~cLG>RUWh`=jHvLzn~gFD^L2u+MqMyhpZl^Z?uu)i?(!x+DQrS8+ zJ6!u)`}IHn&A;s^SLoO@571$wqy6=RAAPMRW~S=MNgL3)+x|N{wU0vEP-~qb4{5Fz!i5*eVxo?~o$%XP z*zKZZa!DIn=%d^)vjvN%gE?*2hbET<)vX};lEJb)8XzmXJGHdAQNR2fpRJAEgBqEd z%F4?T=+{of>^Ow1aPVbv8oHoC>^8iUW7tHnz@PkMKUSk-M|J=1`{cD3-K4G~2S5NP zz(`hFF4PA!ZD6 zbJBJIJZD#vWCl}-D}^|Ce|oUX9brw;JqpM`v2Vk-mZ8H5ej+LZ8sHQ;qrP#lkHo>N zy9Vu*XAEo89Hl3=@ku&!hQ_c#M%uQIlr8dKQX`D=(mKf&#rQms``-BrMlCyKNr#-D z!#v!`E1jg$7Cs8)%!Y2&7m|U4smYb{TE?yQ zO=xh3JR1Ed?^K<}55IG{64;eq!tKgjk0H2u^A5WATK%5y{a)6?3lAKLU52rR}7nv||9*G9OXK$m)=U@}u#&tFl|;x5*YKnIVSi?eDyo#`F`9 zJ(**7HaCTTH*w@|8^*deAwW9}JlKgF*tkG~@aqH+PkdMXD?juD^{4*rpQ?ZR-}&kK!+-Lp>RDZbb*(y?3X+^Y%CE?)^Kp zxxQAr+u)13NIMojP=;ky5$W3wJ|KNp2oDX9WP;MW@879y;Bf)C(w`_`1^D1UX((+N z=v&-1*DFZp{BDO?ynUvkxmjOn=SmB`dvFh(k6~YGW4fcs%Gy@8@pYiwcrz1&^~q0s z1T%Q44FtjaS@prQJ98;oY(E?!J9f6$GoZY;zX?5UfXcQD!rIz$t*x(Q7mWM&?()1= ztJucUl9!X6J$rtOupk<=s28Liii#c^L4Hq8PSo7&LY+Hzt}b4@RF6G&xgLMwNOI+lJDGG&Kb|HY8k>N ze*KM?>&53^td$kYjh!d+D{pKM@6n!C#|{!MXCp&To|hasfxo)WK`!kXZS-!(59OkK z?I4#lvb*PmUG2nXS7X_bzgy!vxuZ>9&?b=FrQyA8cUb`D-MwrJeS2vI`9D=JKmTOS z&dt$II?mRemb5rq0$6T}^oP4AO@;;1154T=zZU zwK@B=A8f}sp}0RCh~_(jO-It%GSoWC5iKLbyicLS*Xs7IJCsZOA(N9LG*0@ht=V)@ z@;O#7HofldV$#6Ih7)n%b;=bPPKCAso2Iptb!`%UD~z~KM|u>9dYcq^lum|*Mr&ct zBQMTFCx^gBT@Y^=7$%%JKX6b}qqdus1660wqGO>Cb?uTO4$r@-7f)9043{0q!t6ZrKsZZ5<266jHByA0F9CSAP5o_A^0 z{Jl4uuH9R_1OQLs*SK99)A+5=FU1Sc&`rX%vgs8t^W5yH*4f_jr~@d7y6#in#}?E6 zvu$EI2C;*TAi4l%Y736@u`Ixn3%2M z$ul)Hxxjt4Mrc!eADx`$&NJV~xDSj>RzLPvpWEtF#*@A=$ko9`x8A0Vr-Rv|-0kf> zYG7M=%kv&M+oQZ-sP|p&Bpx`Yl^c83dXX12HAA`7Nd_fTCr;9|-dq6TWF7EMy&c&$ zBpuJ;H$BgLKN0VQJ;@4u{iKC{cmpepc!%Is`K>v&5AMBZC8OW<4?St`Q0K%{KE__g zPg&hAbUt-s-1r;UIF>i|s|$YGz@9V~*{(d09)UIe-1?gzrOWsNx?cN687$vQpDmst zd~Ukh$*EqZ*KF?AS0!BO*8~&tj<&G*<85k$>D?(y;~&UaL1sqERPvYhRhqre`II&u zux8+R2wss-Y=<~egu3b$tQlYV@9uscC#HRnGAqMX4k=ZaD3|Ye#7{?kdQ8FrL~mO% zZ8@$S{fZ6hMB0>Jdka7Nh}xhhI!m13Z@Pfe%B>BaG#cK_42WXao4)!j;lN9dUyx8J zP!cPo2_BW1DH||o04F+Lmft&h?yp_U7 z!0$$5E_9LSY?aAlE*y~mY;HLjNA2O!Y^ibzB^jiyr4znoBAd^^CCu(7>+Yw=+bP%-8&kqJNu(`cWZH~&Y+`DIa%$V5F({k74+g*i<)n_S zqu3GFBUkW{hvtWGXhxc7<7h-OycHfAZ}wa!=2O2J0kMt3B~4}WwRA&b_PaU>Av{Bz zs6uGvQP*@JKg07=Uw7|GQ!0<`hM)Y`=|K5E4+3o*9sl!?lyH_=*p#cLX}x>rzv-EG z?g<>1e0%q#uO7Gjq|y9a*_7kavw2t635(yA8c(t&;ard!{Ydxg@8;;N}^R9MkjTlR1CTh?fdLc%8r^ z%pQm45wl0gA}7Y%Z*RNi5Zl@IkPG<^d7qq`&LkE4Rh^_fYJV;5{61{{M;G9U1N#RB zywL$=2wgP%363tuj`HHSeyG>~L&}OC4F+$)v+_#ZTCdV?yX$9Pk!kqcWOtL_Zt<_a zOqqCASE^H@166_zivkh(@Q@x%+q-q}yWT@*{0Vcuw~K$b2Ht#i@u!hemt>*f-ohrZ zm2Zh#21y}Alt6h_0r{v46ZJAqN0t7>w~|BNd^0$t(hvYrLO?-|L`T@k$O-(ZNlfFr~aHHy&1e!y!V3pupOt@t{Ka zg)mLOfx|zE$NUIGg(~tg*}?dH79P_H;~9qV!KHEIEGuC5msGTKAD;6w~#o(sjY!c;!nKZm+mZ|W^f2EwTfbmp@*^M z`!T}SRY(?iXmkpNaIrr1`~E1g>}}FmBOgGs!$Dj(oWep;eq7{{8M&nAz^1E~|FfU@ zKh?c^cWYo+My7E)G*)L9=jx>wUZ@KfFJ~K6>F^mEX`Z9GytJGZ3U+3s@Hkfn=`?oY zV^)AdZt-vU@IIPQ1f#aD;39`WJqq<H_5bwj6pgAK>R9?Qti~KUu#qI1 z(WbxeuBvf0$iQ&xh;gu7e8=b`oWiT4i6I1Rn{3hG>4bRb43}9`2(TmaFe?Ye+>*U( zTs|lhbm2r0J5Z#5`6iuZ3QO`$CocZxg%5sBsM@LR_tZ1#W$;bBfJ0|?+qs%ER_utw zTS4xs)MoU96O2#8t1#N+gVvDIcyspnww>7c+bD8y!0n*cS2t-qZq@YkY|YNh*22PU zUBADa2^XF-@2W6)Z*pV|{7={A`LpG!<1rN4i`mDfAh=r>!16r`nAt}u73H~exrW z2MBz`rJT}x@00vu$KZSK-KZ~o`78CcZ+@-Zdd~V78FqEZP;GBJyWL|_+O~79+On=~ ze08#2-WC6mckqugK8lfiOiNH4Bx&a)H{~ZRP2RK_qDOY7GzFArJN4gt?;1*cH)q#6 z3oi0NSz|o-DGC?A5w|gQrJJjggp+inJ6HcZL-1g0y^c56fzJViHV|Syp4GZIKVPrB z@@lr_g?iIrse$C=ldv1qM%(Se%K7LbItD1RiwIyGe|NzIv$=HQk86AFKFI?!`Hxa><0HrQ#O1kq@#Pob z{hd03XZPLB1etk!yayyA0=+_B894R_jYu19@WT+6IpiJx_WY2xI{U6Y<1hVlY2U;U zrd+D-R#yIjGi^6`Ptj5x=*(k5`zd_x*t*UZRV4zBlCmOPzAfwqzTaZgr=N4bY<_o%n*>liu(MG6Ftw zJM^h@XKHxrTwQ$p*&4rmDJ%2csih8gsXJ{6WHmG{9eZA@c*q&>(6lSUl;yHolXd1K zZ%f_etKR3z>WJPq5={L%P>n*#prYL~P6lwWoe12m2igVV6?$<6))2fnpdH}AbC1Z? zJ$C3N?a+>X-lf4-Zmr~I^H^7Y^Ie-td5n(Bcj*k8Qtv=RlatdLM^Pik;KN34(xe<& zSw?|9!#_JG+3^%X*va|U5s|?;+_o#LcicX`ZI5?=-6IpK{~YMf*8c08q_0eH0_Z`_ zFP^K_<>mU)H{Y%w|Ir^o?#$GU_ivE)L2N&eL;i;uc=R{{S3!z#;nb$LF)Iv1Lv~au z!<7w9PkK3^e;~ej)((?~jwsLQ=vZC5b`7)Zq+WgXg>-oBlD_tv_1n>G_Z*rC-S=n$ zG;{+?I@8w)k7U`!7uMhi{@+DMxf7B#wX(KazvmO*TR-tff3p7cpZU}EQ-AK?uh0DW zAFhvl-}lsWue?%|^D`(RY@FSVI@wxAo^Wk1*WM;=jLmzsy?VFy)|P5-{eJClEY<$j zYVB>T)sDM>V3X}_Zq&A0ps%df7Vn;4y}7<#?!>UZx>nnJo3)Vv?6nMRudXiF>dIQJ zZmieR%5sh=*dPseLtCMK?%utJ&3C=lu^C0fs4E(1gN=bluQnKSQaet}zUZ9$eb6%^PN8|X*3Be)fB5?ZvYH&vFR1KY{8zRUDzqP!&@9p; z$HRwwM@l43R~p)x?rM9)T}?n%uzAf*Pi2QUZPFoVUi+-=kb^A>PfSis)Rf!PLr>Dz z^E=SsjJwYSFgQS@4fqOz4g{x_1@KY>;3ny*ZWtm*8N-W*D`I=RqD!#7j~F? zOo=$Q1KrN~(a|>8>+V?&-pi4ejmvv~v?IUkjm)&|B4y`$_58sQ6NQR8cHMZ3(S z&D86nSK8lLqrCwPZZ&eZXr6XP|39QmG4ezcx``te#`g)X6kA>eX{uOs-~ zNhI1L<0F&E=c6VkkWJ#rU5a$_iTt7N5qA!L$g8%ioP?vj z?J&}nLfeGH*3l+h>OXa~c=SEhPdVfTUDKoOZX3g6kaSH(75ujvT64Dn+sczu6Ukqg zoM1c!%_+mKLJw`FzcftT$US9iZ`)6vQ>pR>v}~B@DWv`;_2lG)ZETWFc-}UavPd|y z%Z%{ZmbDEEc+&GA_|$cEKa37_l7cc>F1!Ujy5o=&F(xJ^v(@RDg$Zyv9r{{8ekm75 zX@|<6+6bN@-A&l+@RVon(gqLl=P$0sYhmJgAHwej)cM)Va&yn1SDsru<*zzO{pNt- zAUO3{h+p`ne^P7Pdo?^ZQ`0kZ(9c-xrvvE9$+u1bINisVLXVBp-uuYwZ@|NA8W&_2 zNkzKSEeQ*Xd2=bd^k}(TN8a0;dNq?oD61>V)t}zLv;9K(VjEyFBORB<2hxCzv~mcrLd);-K5H@!iIJ zk0?7_Tf4`#vrm{~RdX| zPO$#f5z6iybwm0036nu*zB|cjA9>(_PbPrck4J{W*>>?d%6I!B)obcs?+%pbuKvPr z|JY-GoVe;lEaITQg(Kh12YH{qV|0YD=4}1MS8nnRKl5->h4f7WJMRv-tLNMK>FOrsTDUHFmbcY=;=c7MD7z>%;Ud4le>+YR zj->9buEwLC4gOHbL|*aQR3dc3yL!O3h&7k-7iFNyi(B9^u;HE1kZ}PZOW!Z{j`i6B zH=Uchloo<(@MM3#M4_K_i@$O+X@Xzv&P@J67NzZOsawH=gUF=jL74W<_mt6Z;@yc5 z;LnMb)}PM}4*92pN!X1HzU}w0EG|jWD;NJrQ5dz;GBAgqwA*Oi?_T5?`3!wy2R-QP zP912Y*dJ^^67r6B^-?CM(dKBA3E&AQCm;hfQia00O#gYFp&{V4fLZ8D-KC`pKRMYZ zJ9&_vM`sEn?F|*3m0m5_CifAoTuG`-`v?6nwTftY@Lix*HQmyoeYiV?Eh6{Za?@>JF*{;w#dmKe5%|SE|RcU7I+7di&{wFCw7Cf zO#D$U>N@>9;u0Km&)sAf`;P35I?<1B=}UqatTJh(*?;nsc*T=SHw(X-MEXK-P0rz) zIya7KC7`+Fo;b?Qq)U3-lP6D3E=Yc)ZyJU($woT~3d~(x(`;#*y?KYOxaa4+^Cxfz zo35kB@KHQWgYYDjaKd98za?JFzro|x%jc1+*#|A0H0Q5#eD0OG;j}s98Ad$md~RV( z$2dV@%j$tM8$&+iC$270@3hyvrOe{ddXsOJNywUp^GZK++l7L55%Sc<61FXzT)gfP zTijh)ZyRQMZZhph$210pv8Qe02)lStHY9J~wVTYm0W38lwU7zifyW?m#sMGVC<{fj zb#NW-MSX?MPtCR8O zB>)folxZzu7uNi6w+24=()>?D=$AC)5}1Pfu`@2ZPFW$gxT$Xw@BQx|ppsg?+&i-q zab)IX>bFiIAhLT2t5%S_0L zN)k3;67jq?!O$uql>u>SsAv#b)`pZ8CbP)QGH`(#uE5DhpPMs3^G{KdR}5mG9q2Yr z4`1o%qj9S8&UexY^B^!fg`B{GK*+$rQ{o9<`#>;Zg&6VIiJ{yBmx;9eHN0Y=_ww=H zLM!kTl!h}cEN5_V3 z40O=cS+Z}w{IPoWrBAl>Ob3`$w4P~`Mq(>%o-=sJ-Skz$%lGcrjhnaXFaPELD`8w% zgHev5Jw-$J^5YlkndhF)K%T47$HzzNVhMluxHG~^{HcT*%Xn+_# zD}CT0S4SwEn<7Wpb%{}qAsR2za&!1`NsG?tb#-ngEO2#lOQG5(0!X9E4m-t7TXbQ} zQ_gH(PhOslYNN*n69o>uZID12)3y+GOUpqTayCL0z~0?Tb!cRQ_$M_sF;?>nbM@Zs zTXpa5a^1c2ew{gUww}0hxn6$l)q4NdozSlxlp|vUH41(=)|P4%ExHrke# zSAgMKIz6QZ4T)14ICk(Ue`2&@FhXd_Z*XKh^<;;E&U$u`_IQ7@_O`d`^r)?1qSV}) zGM}59tq;8Mdd<%-1Q$o}2&z2vnpMOa^yVRq&;(H^H{U3yhQ6z``rSSfn594Umq?c0 zB546q9`!v(i12I1Kx?pBs166w1>DUcu2(5Z(pN4UBc|2TAP3&tU{d3l;H9UxbWNZ2 zlz$hmeG2}_opTvKXueOF1c*0M5@5(9;(t)KvCF!B8`SV~Itxqu<${~Q-#WbuR%kRwfgabb7 zQ5rsm=3JqhZQ+dv%;S{92>}iipB%ZZE%B&Rk0+Q!4~@{U8YN5{d?Ak>jkU(KsgN_? zmPtCXlU$k^943t+2Si6}b881Vafh_~YJ7Au{_aFEGKZuy1#Uryj|UDCxjC*$QOB^dZn=)gaDfzCU!K4xA2##LJqm4 znVxHOq(8Joz>Zd9@5mq07Vp8eexw(h16m9!^pF?0dnG;mjnfV2Hyr7#XP>?B&ww@| zd*6xC{+pKe6nR)C;dje+A$40bI~B$!&(_?9r>i#6Z`+5VBlFHG0C4MsILj#qiq38u z_cS`-_sr7uPF>oG5Q-s>Bg&%vv~jRkU-;%X>pRzO)W8^xYviS?fVTGb%k!k|eDj=M zXDN?nQ01`JHl%;wCK7$O^78V#oatC+q^r1T zb&5ZJT_e)f8rlf@E9<}@_&`2m8`!~V2Z5X z2N=@u!bZ%XdURBO&P^RgRt=1Le1n7kz=3>lg^2iSgGQ#&NazIYG-x28S-m@q zY_lWO!K&T;-SXIi>=0$A8#r?n*S@{+xadddH+4a}N61t2^x4(83I-=2dW2Q*0NmHF zUr&e8hd=NdamTTdHUe+jN4RF!QDB^Y+Gff-Wssg(@huyaCu?G0LT78#iJG`}1++u!nW4*SaeOLPX zeQT3zW0hxDuDjBEqgIyI>c;!m>*mcH6W{<`q_awW#IrirHmY`e22%;wIw|ZV zJgt@GHSl#>@4k1n)(_F&?h+$zf}w^#63Bbl>VM0DOIJ)N3B0-#lnbJ}=>T{g9_R)U ztdAY+%tHtNsebc?ozPS6+%{An{m@5ibO>FiotZj?pQ%gbM(BX&ed@w=Y-f1fLfWvV z)B{#}2#<&pc*KsBOmqVe@_G2X+Nwcu8aydmNYmZH+(E}YvipKEX=^K!Gd4FiYHfWP zdlH$wd@s7e38toLT^%7yVqcQ_81h~jzP+&(xMycQUJ8i=-qUvhj_i!KGs_O~45$d( zk%%J&l5*vt&cS^L3-&Szz=2}NLL)Q!N z@B86RC)tcojHlf0f-o{jJ9?Bh3~e6qEnhon-@zi~!0<5k47}knV|J{GLp!H*4m`_q z+uPvpV22BRCr^&L^NvcPzjij=6-2pD1><&+A@WQHypc^SYwLA)WwjR1o~h@ac_stw z#!G$dLmwkf`otDQKAUe=%Rx_8wR~fzo0BlD=hkV~+HV;=Q(y9@ji4S5-}CNw9~w>g zY@duW9r_~gHetaIecEZr{ZZO@P8O6l#NWj16m5#5diUM8W5+omVTkWd?@=o}iwFBf zS_iN?l03p2Sli^eL)mR_-o3X{_n@QIwH4?JIRpLLmKh?B?HJ{{?IqjB%6)O;WFF~F z`4)bF|F)4GOZV@S`e_zEEH0j@Y3fJq?L>g-sqs309{+RhumGbWvk#*~m7fkIz)RW; z;8A=Dr!MmJfo;M6;7}Rcd(RJCN9MMEIgr{yYwz)kec*fVzbu)BC*a2I&)tdO{_<-5 z^I!Rmx_xIEJ8G(C7S3W~xB^sOKcMUls*}C#Iz;wPjZM}EUVc7Dpq%cb_k`UXZ2Q4e ztsN!^9junaFwm|v0UmXkjKjOMo}IFk<30VMV&#eYpe}jdh3Dtb&DM+G^TDe9t;il} zxz8<_Eflg>y6kY1Je5a1x`+#0UOqRktN=`$-qoQy@xWkDrkP$c$P?e%AOFO+@S2xl z{NOtY8Q{_U1Ji>H+P$}hw{W+RovrOVuz3+Dd6Jeit^CRHNaEWvhYo=QztCx@o$tY8 z_ij4Gqkd@25fz@1ybq1r8~k?g6xw!W`F@UIK_+UK*d~Pn_V%DP4Uu5Iqu(&|>-x!+djFY~^>wO{wv_v-HQCN#Fg_w76{Z|v3j&S7mLW7hU|GSGi$ zKdkMz@tK;YExK^-QccY-LW}2XVrH)Tkxc_bZSc&2szcz|CEmskG9FpIMS45Had7BV zUUl7Yb_m$^Okc|Gz}6w<^sIbW?jIuO56DwZB>f%Z_n_ZU{m`!ra*`G01qY|xQwQK$ z6pBmjV!Uy!t?Y{N1fjiZ-;Fe{{Bh7fx|nZS1>Y>fb1f`bBP~O+{NzS!TTS z-vPUqSxXbg2QCy)*6OdVVIM2<7rEqa4e6Vsj1GKzyd+TxuYMIr@ENHbsn-toqMKv` z^_+v|F3Pb#CWGV-upfBT1JB&|_K5E@`Z|kv&^-q};}^RW*}AJP^POjPw>$r+bDSur zKHl8ks%_fZ9)+_-J8heLE_bFu*KHwV*3oM_@cb@1)qy0Bd2lBtZR%~xv+K33F59Bs z_b?IB&pS>ia5CLK>G4iH`@!6$)5%Q2pCg>O_PE4@1L)cw_M@u9Z2NgRQ9wQIpq2w& zPP)xE(sOcLc0y9;fxqa-=!E8{jjZm_eo&^TPZPPUJtD2xZcvBl-0`vPu&OR~%%JVC zm7@xwC+{wT@-7`}hiP+ZdxvhYd6cW>)5av^+a_0a%3|AO=@Y>(@=Q8R80wfha}tNP zS<~Ub4?Wl>kcN~s(y(JVk?SPl1m;Xc(Yb@2d<70OF~NL*p_P|+0LkDw=_}6-BOWr5 z0e|&(WENm0ecNx`Gk#+k)PK;pFk2>JaRA=F5WfwDSN-lCIE4-V!)y(&p29u6xQ4*EuGzpx8WY2Wp6amR?Pf#DhS@obGu zo~dzV;TdG&+4D7f{;`@pcew^9XOV|cwl%R48}y zgKj0g<~m+5oaOM*GPHZsr>%TRgTTU>=Uj>Wz_qgLZ&1Irj5_->M;O4PiA%VqQ-q-_ zbV9W>#G7^tp_B>UyYw@l?TV?e5PSojr7zp$l9XXhsiiM%nskmo&_A?nynOC~$-9ws zzUNWd(d38rT2|NVjh{%g1T7rS&W;}60&!%ZvO>A&^B%gwaT#|ITU)oCy+>PexaQ{P zv4baSeB22DjuqGsC5@!EQayu ziMR+uqTBJ{CE~K>e()$j4QXlIOJKdc22IxvS_FX6%r}h2H=Zas>7)Rpl1@PL@M|lq zLOCw}q+vS5H!+Qy4lxae_S`Dp@NJfA8}KK^q-!ORk54KOYD5BT@$eHC!ZkR1L}KOV zz4uCL$wPx(SmMP;8CRUzfWlki)^#(13~Ble4xatTHx;U#Ivw*ECSg$wdG;>z0%sIq zD^2p2h!Q8>X3E{~+|vmh7f|NYE6XUKT^een9|H$CHE?nsPSO>AlxuK7ZoT3LK3oQd z{bn5o2O6jszZ-z=xAKIAV~@y@vQ|J;0yjbl(?RC=zYjqGgk1bw&86<~s1 ztRCaL3dKROk#Wysbalf}8oq8Vo7uGvPMDwg1PVb@unC(p84nQ5`3CH+@b~ z7C&d&nuir-eUW79AuK`)p8a~HVj->r$_lGP8l#@8=~iq<0)`k^Xc!c*I$3S zZr-_3|LZUQLj4ba`)}9(`1gOV{@!o=dVT)dyLEN>eyxIQO>A*X&N!YiS}kvzOhoxa zrga~>@SCAqhfG9He47&o9gWJl+6q=XoRC#m`cX}_xwTi{{`TATtH1V}^|h~lt=2H? zM3U!n4%_JpZMZ5a=P~na!%aRK8yZW4Wd#1Wg4GX_gV5XX)P!pd2@To?NCuOrL+iyl zH~xFq-m5!zmqQ1Zsw<2(01`zX9yIo#NoYfwN&^O3vE$hDG51_iw{ove_Z|E{<$GJL ztuf#hRWCgEd_DKvGthH8LkPrCCaH+f!c3l>+>_QKC&W?ongeqL0w)fzL@`HDh<|Y! zZXhKu|F?8nnuIl|KS`Y+31vl?&LhtJ;u9E}&hakJeU=sskQvs*PeThA-c5M06?lQi z#BhN@!ob<^6kz$w2DOcR(^CeE=e5u1TU!MyPS}Lh9DGI&_S0({r^;`0J}Qo`)uC7+$G``Pu-EP0Iic**S5F zZ1Ozr&QuczqC6;ymx#tsf8uJhKa_*wf{Q0LxswrNvm~N8Xm!!*vf;)7Unfdwl!adH z+@&1G(SA6lF_VD=8|%=hjf&BcvCJ@aWnH%mKo{5C&8Kw=i(}G)zT;0h-Ecs`Rf{9wrLClcX-Tpbs%@3+3dSDf;K%%=!DQ_ZnSwCG`?R)Q z$)`4zRgWb$JcB3B63B13ZUsXLqgUr-tgq zXCJR0`hic@;lX-stS<>r;B(@d_}qJuw1 zhccjK<Gl9d^6L$L8me6j9oFV6woB|7ziQjr=rv98DUOv-863plwvqJ_f&j5?QUmzLD|#86 zcW|mxl{3MGj&R~nyjh08p`Iq3^m6~+o!A|(zxons51_kOG619wOPhdVrZRC4<<(KS zlYwxBXM}=#YWHz}Cu@Xo*epK7YnvJ!!)R{ayLVZk3q9cG?S!ySBJ1(M4 z$oAgJk#y)%|I*>^&TegPZ$cw|_0F~TYkBvuP|=|y+XLT@kr!?KgbyC{!kdxGSCYvd2WVe(5$eSO`Bmj^4*at6}EDXHq zXd$BXr2IDzC#hTa^0&30wwN*iVzG??2#}I>PFx~HCh;yW*cTx!NdLp|w>y$$TYTZ> zIdW@=whuaZd~^icQ9Kf60A54hc>IKA^B4``vL3ZvCnlUMh@V9zFOOWXzOsuJbZ1*f zIbj>!wA@10PJCCb+3%1e$$&F{^1A$n%j&?$+dg#7aV8}|)v>)@QGe~m-TKyBw`&kN zJ3>3;_kZU1*5cWPx^w$Z_(a(|INVn|Tk^|628o0xlSG2E(_VSiLrsQBSME~bvyuO(x-syCV2>p zZDzN^bp^CGnS)Jh$P%|Hb?{!AlU|*azJnau-g9@A?b_VjqHfVu76&Mu0H!T%B}nsL z%0+kVa^m3Z^lVK{PuAJ_*_xhoGT}hYPfygjbMqNwb8&WcbH`T2W*#}rhkoFYnwfA9rb67+PO|Uiwjv#NlSNp1e<| zz3XSN3at1DQeCjh%y9+1RP2m7QAUe&^nL z-Cf?v4gelCdPo_LYphNOM}Zf<9-pa+*~J>2TBw;b=W71!~t`W zvLAsf2etZv!`7#(UDN1u@B!Iod$1h^7fDgW(3G;@Hk4atXJQGopT2A4ce7a!bC=$e z)W8vUpMeD#=dM}xe2{fyW7d6ZVZc?ajh<+tcr^oN)Bv+aQF{FL;Zqzf#>@rXwC z%RcpaxWA2#-bP36LJQCY>3PrO=-46NwN14EuRu`0Nue8P!{-6yqPx*J=Hg^L@2FAy zc}K4JO@HaxutV58e)oM4vK}085k-5?ge|3w!b!Cv%|esryUs}g7SO@V^ri7a zh&mu+<#4xi$)56kXuOUb_#B&~o&ID^pTATy=PuR2%$ZulmcO@kQulUG>OExgx2`YM z7r%3>zVgo9`trN?>svS1>)oZDy0M0ALDze9%g%9s?O+S+@VtwSyU)FEXrguwr~_;! z<>*l%Ux&vsIi7|C>IvJ`x)8UNm5+fteZt}n_=9ijO?haE#3Ahk?EvdiRptPA`k(DQ zX4{S}oiIw;I{D7k^66PRz4Y@*Ispm!-n)19MWO_r?vkSX@ysvnI22Bo zc5dX^!7q&3MB!nQ3?GQ_Udd8VhT+!Wf)y-7c=ur#@s^1I#83SZFn#ZYwGHE?BRul`I^8Ewy!pess^1LXPJRt;gj;VHgoDd;0Ps@ z!ncN63y`4s>^*++khqGcylZ_J)-w3m>RrAKowU;PjgOWqWo>O*ZF$4A3qN?yU2qgr zwi)%43~VE|1#!}9tCJ2s&}PmL-s7yNWcx6jG6Z*P;I$VPe;Sluk||mRAmkMqg9x~O zjSU1TcSV9?%0Ohzdu!;WLq~vkZw+)9&xZ#DDUnVCIJ78Lo3JXV^fVFjoTS5HyQHV&?294#)U>po@Q5%y9m_n8`5WJab zgdbI!bOpSX$BXAa6kAV;wN-HvM^hdRp58^dvT^`})ETsaT{r|BA*LcLac3T+selsa zO(>~68@w54rVN?MmO*DbSxD!^vuY_1ilL8-#7F~HGCzW5aMz-du4Nv=sEu)tb_g-2 z1K7dZff^o}sI~2*dij|*X*m61;#dH12m{-=Qt;ZKu!BPamoam83;DaPjg)4Gz|9-C z>)-t|KTW0<@-!W@Q!^9w_*0M7;{3(Bbn&q|vv4Nc^*#Umb1`!6FWo~(?7{S&cZK0G zBwQG^4)9S=Z8acyNISws+UZ2+J;KZSi=u}9rTgHMI-zd-cG@p}h7j6zJ@o}z&ggM14dqphhkXOVgavGUrpupRe4J?+iRr5qz; zgXNag=|qH=ToK}g5A#dMw_#Ha1-2V?#AexvdMDLum-Ps016bPH6NxBYnp$j7BDF zaBQlk=Fij%&puzj?*~3rKlnpGP|rPoxke|d#-~o|g;$@bkA32U^~6&bY6RFe*6!Ag zciyR=|NB2zfBmoj&-IV~#TV+!S8vwM^{rZ^<)|@;rNd=s5^&1j@O0|E6`o6F&}2%J zQPLdm?uZhaYr%Bg9dW~R`=JK;4-Soo|6oBER0LS30WEDoa1tHY#^!FlbM70AU_glB`)mOgyHR#y-#=*NpXhxQXyYf+4Ijj+E2i-_{ltSd=kgNDz zp}kd`8#n8CX9>^7!_x5(`HYa>l`BuyE3dpkvVG}zkY?mdk<<^a)p@7Ti#(#twjadF zH0lr)v|u1Gc$YDo9Bpk1!cbNRAmtVLKKd~<>}7o^3#3hHlneZ5oJY^Vf6)ohue;kM zp5y>%F^Z%OD%bqfuj!ChuOTczFjMTH1qi7I-Z@WRnT}a3?3=7jkm}PJ7T3$Ep_0J z!D#tY7Fe1qHbAKz0=7C4OV&ke_a#Z|v0Q@OUkpIUBgF6JjDu?P%GpfA|Z34>B5vQ8MAF6wL~SF)=IPQl^v0XD?(WL>`UY7LB>tK&^mKj#T)$gC`}6-xjZDr$ z*XId6fo|bDJf%d{2EKRqe&ojN%vi0h-^;eO7tT)9$3OORz47YPwd)Gujm;X7*3pv( z2j(J<94w%$q7i>($`QQn7L?i!R1sx{0KlI(jhqoh;5oaENcX9)p1rA!Vm)=2fxTOS zJfCV0Q|HZ=;Zpu-cO-z}ymJ9dbJm0P2+p+06@yg70Q_sdu_IJr^56jcM>J*1=*VN7o?o%N6Yo zyksj$lAWBGrQta5*+&46JdgY)Z^}L}qMa%}2&g@-5hN|!kRyFpR;A+#dnCKGqzqgZ z06e-wg>`8sjty5=hTCa5Hae2Vs<_t@7{+cpL3TPAs;V(9C*9cyc5)_#wm^JyClTr5 z0N&n$1~+Qg)y>EZ+ZP$Y=Lh5&2y+FJ`gMGAq7EVE-90-5t@C{v&s(*;d;`6|S|9u9 ztLVU;niw6$2HLLOeYZN-ZXj26UdjtxIqSN~Wa;u4+sN;GyEM|Zxs@k`6HetyTe<7X zW*hLXx~*EqW?$J@g)g3?=8tQAeW}@?${Fd|`i9n&{jN6K-?I}QorMmv9z5ew{piX61)cfyz4IFQg2KdG{ zJ^+udHp}dM=*24aJa#7#IOywuhWgJ5WSLwAT&`^Lc!{j=D&?k$9_OrG7_*{ zP)5QIDo3PqWWo`Atp0_^j-|Wk5akj1Xd{Fkg-d*smoifNAYW<2@A6)YqkNDSffHJ= z-D+Dmx+LkECwW*dGH}JOou}^jU_$m8ILSjfYX`e+c#k94S_4MXpP+5vIlDsxqcuL> zo}~|MF6}ny)Hcr!JaFO4sgTg)Xf;}Bin&%8^|MQ zOV zcQu>4q1?H(lz~ij^I-a!)P06^he9VSn67fG^`F`10fp!QSM2Z9yVvhzkpju(*q6zv zS=2PP$I&4Cd5|`?13GP$EHZ&I+1KKv8R<@$=MZ>3gN*Xs72`*j_@xO(;ddh4yL_14?h>*}?8b?er0 z-Cf$O`>Q*3Z)v}l*FB>jn%O+AEo2KVDB6wC^4Ls`PNNeirfX_prY7fRd7rKEnVFiH zIm3OnM#kr}fT#vN>w65Fa1fgv9faI>kZb>-586Zbpd&py-<92(7ujZ?>lAsdrRg@p z1IR${^3W-5Ew`fIN6y-QIf1Th1FL&a(Oc?*On`%~T|nY4QnueCGtkKn)GOnx+w}KX zAJEG{>~Y&Eq)FP+(x5bp{&NEBAoZ#}X2+Dahx98=IEhfEbAo@LvliqJX`JucB=Sew z^@-Ze;6FAM^>lcMJ%lY|8&R7=x#voG+bJ6xTgdzS$nPz5j*~amYIS9uFivJd7ht=s ztlXzAm$=_W?%b)R`?u@fy<2tn&h1R_T)w|r_wU@RJNNE!zt8(p{O)1x#|}2o*5*zIwzqfCAFk%-(r?4n`^e=C1Zg)R=hVygKPvYRxZJ(z z@Ysn94xaaRZ~`^zPIs)e%=TqxCj-b|`&BjJ_oMxOWYZ~e&~4hChIe5QHlP05Fz$$u zBMeCA2pc7nBFN+52-^``%n6!Kh&Z4Pug&NVL+*~C?dVPi!kWAIkB&PcT$|kIyAwqB zvjc+t?Y#5NW4)Y2o3s3d9kb*p#}9Z1c3^Cmam~w>u}&i3?#?~r=VaD>@sWuDq=}vB z1ZeMvl+U9Q4&YbwJ0f4pDBR{3dltAnKEXV)i<8lN96`sW&4PHb+pmo~xnrBM37wlZ70`3lD zqkPoumeq2%ezimg=JqXP0VLdZ2WYaiE$qS{UEk!XvOs;Qy$I~&*%rnk8vrv45LFM@ zq-kH$e#0WgW^1G4&_@0)BREQ?6^l>PFOkxw(J&- zpOXZfn72r9f>GWI;)0Q8cUaX;+vvoLvojWjHyL}V2+dp0RcL!?uaJ;Ur zp42z*?$=k|U8&E1`+9xh8&~U{TeoZ7NipDOYVK@JEX>!~>;m@xR2@SACr+NC4&5Pz zS_WxjZ9+bJ{_xOt9rZw+_VZnRryf%0gBfHhNVZJ{?WvEYkN!T#Bk&VX)a@zy%64PZ zpQewT7N_D^eJVb{dDF|vUG<^udi9wyS;FYm9Y6J}c48ZMBaXC^OB!x;W0>??Nv9gA z_}FhRDTUr_Ye?&fPZ`u9JhkT#V&K)X@hPwiV(6#W7IVBn-C-KulLx$Ee8X8r@^0x$ z586ePH}s4@dQpI)hrod-h@D4zon3C4nitxWNXnL<{T@kG-Vx^{BTn>__%&8IT>}(0NYSv5)SU_KEE@Wxn#z2~^QnTkiO>g}JJdyL|>`p#l+=sLEpl_6aI30`=gIsu>JFnzeziFRENYu+tQ#09odv*V1@ z%1;4!wk}N5a^Xe3X_tyBq8L^klS%97Xyt^Lfu&AKSpB0e$Rb&Owi{c!ntBA-W;e9- zbi)6r%R9JEUE*4MR$H%b4ZQK=bEObSOH2FZJe7t7GWU`WDIiD@)Q(D=0S z^4|O2?k3&BG}p_=LK91&(9#r^e95I!u>tD6byVn2I^rWw#1rQv<9ogdgl`F8n!a03 zLV`aahE7{rt%KbF!hwSURa8dut}^o>9q@^0c>VbpuR#dYfY!My>>OZL$sO0|#8fS< z?bjdsH~-z5TYLh@h)Y`OKuno@Md6ZvRw$|T8ak+pK8#3=%Z=@HZ2auc{tZ5~vyq2K z#%g|UzMgpcX&8LA&YwG1)ALg>`b=Hm*|Quq@;v9tv@E0fSQK3tz`C_Q#Fu5I4%>>6 z-bJ|f-0qpp#9i?6E&p}RC$p&Y)H!kHTl0T_f9uL0^^mFzQakrBb{8)*Ajvy93y<`l z4?S2~W&4ym@ip6!a36)onm9kH*_p|9p7$V)oLvmz!#3cr0?ql77-`afKqhaEP5tDR zoCn@3r#l8XG{rIUuFaM>ZGF`B`x;0-)PMyS}3Q=aLIVc}{w zg=f81Hd)Mk8{OsSZ|MtK7uj~K*GymuMFaGLZt-tek{!ab!KmFzU z(pSD-ca~S{5WecwG#K_tJmBV9|gl-T6Tq{gy)KV72)2o%KNxHp2IqIR`^%G`qf%rTcwOGLpvBC z`{t}m>cy>?Rp29|lf6|s)MpG9>YWDQ+RB~UUt1%B9sX5r(LOmfP8o;m{P_#@v5$Qm z_(o_5ZHDF??3E^41G<@m$_WiL=}ClVHf~RTidT6)gT#-96R%c=6_od;|2n#iEFuNz zy(MBz7}Wpzgdy@2I_GN!m$m9VwoDZr)cV#k@pm!Y z?Epd&Q2t#kn_cAyaQBT5)$q&=_X!#V`*nR~t%lB=uhEN_Y7?C9x#~>ZM<~gw$`L^1 zIor22VF?Z%mcPyTr6ka*xYE;!73rv_e3}~RH0G15xh=Z<&ueqyQ^O6H=3l$gs(}%LDhbBmhGB#8{`#1kaozPfMiu`W1y60K(RY064jp!gE1z3<<)$m*ET=BAw}(Gakmq*!HUEMt>mv7j z5Ldj2UuYx^Q|a8h-g&ls${S=Cg*7*$w5#!h&%Cw2q}T4ud!TFcOe zHVZOLmg+x2571~F>YuE0m!78*$Le%zH?nE4uh|zi(A0_Am1(@$3xvV$q`>58$A&Y# zaMru4F0=X)1lZWTzrLQ%z;C?!Uj5b=zf^PQF4Wk}d|MfGOne)mq)Qdn()~L%IXO}@ z(<8{Vd+5ZSy0AE2-}8wN*7MI^%tV8O-3@Ri;w3>!2Hjac@oa~Pck8I>zJL{embk=@ zPH&S4PIzd~d40q^^3u@C1wwSmzs2nmOMP^4Lc>1v!s8zs+sekJ<0Ei3TdTp@?hblS zPE3P`shXKzs6p()ci;MUed)J;tG@Hr*TC_92E7;N&(!$~S8Cz$r-?KgJ~Q9IK^JYy z2Uq`43Scz+QNl=L4o2rV7}B_Xd%K={_G~RKo{K%T2doY@DzBtG(Vl@epDAyS$>HwS zb*;flolys_sI@*q7uFm|mQMWEj-av)S9Sm1{d)h-O1<>L)7eeM$!FSN(ot+Tbu78H z_$>{2#hsdL6iXYHQKv0y+of%z2d>f~0o0I7qif<5)(#OTN4aa$%g;ZX75@%AyOYb* ze7QmQ*xZnl*1>~6_b3JjTgA77;o1jIim~H+ettf33`~#&_Lg-3Y{qq` zBKh4FYVywY8+Yq2vdoEoZks6_85k8$#O-KJTz4+Aoh#tu_#Aw;wu3lm9}E~Hn97C7KT2Y&O(XORbT?t#LO(Ep3+)7X#lZqsB8o$+ zAnj}**~2#5Sldh|z4B+%74-@S7u+ZU&vf!Wpf z-sO$fUV}TtsriFz=_p4qkf}0ijCPN_=L8gyXBn&yS6jL=yU{Or!r!~HIp^1dd&{Rh za)ObAtkQBO1s!2CZh)_~-L%`EdFrv2hV4Cov#p@LElza3TfQCH4n!x%7DTSvZjUV^ zk{-}sCxh|pT^XFfUzgpGtW{r$7aNQ^bUXfbw*Dw>y>aTOO*n9pmQMQBR$EzK2L5%* zJdm9@q@31<9Kf&1tgHr#S^$<_UCSqHYa4ZcX}Ok{X`cY^^z?M(UF4W>AbXtD?3Ibs zz}}s8DsI(&7T5j_cS7|0&@gQ`!W`}$@}+|^ z4l>wJu(@?uKlhJ*A$>>Zo_MDAiRa`Pw+zpK3bdm=aE!e)0Gy4LRh8L+H=sEpV zJZCVJxL_>ym-)x`mo9)kIw+vGa;X3O303+hfy=kZVQHD~?Fb3t4W87K&pc5xm(Eq+ z5V~N;$uH_lzc=}oMuwDSb}gfx%hw(b#6^>VL2FRh3;O`qT=)xrOOqD`NlS$w`uol| z)8T5_dCvp)7Pk4BQZLVjnE310Li^Uf6M#3LEteNzpkF76v~8Ht^?X-VD3crrJ)my( z_L_ZQhqdj#T~|$S@79**>mwI$-n?B`uU@Tp-nmxqynD5-U%!Ejzh8InEyH)4(7-P3 zP;H388pT!|pPH-5nT47^bGF7NXR>|t^z7N1qusc8?y;I(JYNf3vkT{IW??b+(TTZs z4*%#RGJh13!2U-^?9o2*IygMd_V9=hWNz!Xa`KxR*r49CjW2xOkN$UZcKRaZ z&mr{Ag3=hHM%DEwQzuHK-dl>$A5>b= zz+?8|xJ#&gB@Zs@LfP&L@$eYW>U}7fZ`ecdp%bjzq^p70KG~6l@aiSydiuEF9n&^H zf7)yAfTIr1YIWgIen|PSfLGfAuC>9G!%7vPfZuKRAg2M`yzP4&@>zLG0$JUjyjmHI z+b*6h3vjm8+UB93>G|x1E`q0>=Vg1R^1v*JMcHQ=BVa2qZ= zLm}+Ep?cN59p*LtZ3(K%)TwRY6i(TvG;HhmRC!=~ffkaw!@J+frg7!|(El!Tz zh29RR>m$B-EYbmdf9Ry*-sSFChC6T)2Ju3>y*M6I=%TdHxRagX0p)!9Sh#!+EdL+E z{sUaoExYRj*U33nj`!Bhv2WkoIo^(HsT)avl99pIV9PdO1`jr424nMp!xl#oHGE1AVh66X+{>s*y0Y1qm*eZKrbObT`Uu@;p6JlUMt|_ENSB zHmoPMH}}T5km2A!J3wE|ehC@He`%+3xM0AzHt`o+O9SXerbYwf)t=gp&`Q0F)qwc( zqpaqm+Dimq^tNWcGMqYZcN&Lis|%yN6yp;*NJ28IxvnMJU(0dl)JpXTkl-ES#MsvRxiBqe!ceo z)w;HTuG;UdeHT^@O`vPX(X-eIqF`VwSR5qIc_25NWV{|bBAZq+!QAEw?4ZQCy;XWF z`|v}3Vf~`!JmQ@zA$cW2>>twm%$hR?8@<$IEh<`#x{@sE4Bh^`+lP1F5|wA#Y?OQ@5b~T?t8~n)nE>Web2WZxfqRve z^_jS3qfBAqwO9WM((U)HT=Rn5+_!RDXyZ;KBysx`05B&n_x|Ua^eKe05^Xu=v21y? z{mj#BI*knDkhRdQ$w%89+}n7)t4H_uE1Wy&;)9pM=;8!>L>oCe~a?4^5iMv zqbRs$x3+O~GJ!K~5A=a$Wn~=!*sZ_uH~wZl_uRK=#xP|Z*7W2|oj7^6PM$thQxlVQ z^w`l1e4RLcqy~n1YjtI*!I&c3RmdCj%X5}bJZZYe$aKj2yMzXlAgR;5aYES^1jV!? zX+#LEZgCda(lpxo^FFon<0@Pf9QMd+EZ^o2!N|@pFQjM7ADZ&F+c9NTHW8WNLs95_ zXq-48I0!B@NZkEDJ2PEF$b&|aM|asF>iD}m!=tB#qbVTmwdzfu4BU-@hG^>2Ku-hA)xqVN7%Q?*yML!gTVkxR*XH6<7kPs#mB%C#2!p%9cY-XURHB z>RG=t>-e!7oA)fs)I7VAe|34au3x-T@4Wn4z4PMp_3j&Q)Rp(%uB$ls%eR)ZQ$mA# z5L!-*j$q@udj&p*AM!0h>C3+boX6*$f3YsUf05GV4|Gb$z>P8Baux^Ip!4A1C=MeA z;Ql}cfgPmO`I9G>Zq3)`!u6D4aHov?p;La(J?HDgA9VVO zS5m(8(#y5ExRz5L#*vlzt2eN;cIw=*S%ezkVj*J^8bm4cCX`Vx1VWd$zf zA3&$&^^NKqAFjT!(dr!;s}1UZ{Sx_xhwJG557!!o{5m>j9~wANlSNDpoY3~5>F$3W zUae^DgZ5ER0*h_iiQ)roHTP@LI5Yk{9{ z+hXPoUU^EK)M#`eGJ46_)W&`4^4UDbDRHLdmsf^ddG63pI=YK3$K%`o(SM8%AFY|= z=ShZauWf;+gB!^scs66dO3{L%&2{lC}>y?b-=BaY-ixg6)E&g|!uwN`L*v zcfMP%z5RC0o<4)qJ6Ve>o3(QQG2j`U>J4agbM8uwk9qJ+pIj3)05VLXaYFKL=s@3b*NKF%e78h(>H zb!dC!Wtz%!u1QSjUJcx**iK%amtU-F zm*1=1oI12$V-sVj^nn_kn60ztAE^G>W0Zk?nEJqnb<_6n5Ou~k;h%$S?xxB+<&qGs z0nT*T^eqIsJ5ZDFN@8ckd4%S>TWhtuxl)G?>=5Uql=hMXh=U{JHGcF&^-j;$KCmef z4)D;xz##q`4~s*1Lz@K{OeeyFXNB1N&`kIm%^XO}v6|RU&8ekqbcH)>wBu%u9YJ4C z=F|#5HImahL_fO3#V+F51p(G!T>HESy4;CG%TAg8C+&7S9xw0F134ydvwg4IMbqwD zw(r{p2VRB$*3E^Qo<32pzxs9!4;|LiPd^22dds83J+3#45ooWvWZ%hTEdlmVPwJ_AI44#ZI| z^@>Avsr5v7y#yYpK_c5MEaXfXme;t%w4}kY7AL1p9Iw--&eYu8jrfQ|S)guO4+TK3 zw>PmRH!~p``GmjPgkR&U^y}&g2Pwh73$n5xq64TG+uR0iDAb*^0^N}{`;*8KxC4c$ zBhS>v{q!I1X3)2lS3Js<%t_~>0Bsy6FYVvl<(A!B@U?@i%8~ZmKDM4mp|A4p z@)hhP^*H54$5EE)qI={aV8}nfj2z}~2Zw>#=2@-;Fjm=U3k%g{p%-Z^%x0%-?_1$M z!8-E4ePM5{t?ku=(ElSJ{cyec#+zxMi#MgIJMxr~hEs6Shi_t`|35MQ$I&Wg3aba>^;)vL9-xB`uAi@fNf8+1$P z4Mp_L^$mG;;4G(E$aerP&pF{Og2Yu$)*#L_w#`X|$TqO3H)RyC7o2%jFL>mwGMK^t zU5_zFp5V#(#pPOB-mG)y&(x_?$0*;$586!XA&_CM(oiCZZ=<+2zYqL@|MUk$`%~U+ zTUS?jZ>E$5jL~nvr{1}sQSJ`1sJa944#6(+I>|zYRAFYn?(_xvEr%zj4 zTFydEi+15a?CC82vntzdvGNf(2e;xzylFGJlh!g9mlw0>)>B{7rvRx70D90Pqxh(U z(9xs%$(G0sOw< zp^2KBI$o30M`~&Ydwupe&$Bf?F^Ru(B%6Ia8E{~5q((iBDLeSbvCT%|bNqdD=itaB zv>3|?0DHZI<#NQG{l0vk9}xkbz1(}0zejB!91b~xS^KolZ)f)of)D*Od{zC+UgW)f z7v0TF*p~1_j_!ACnz)=an}r4FXjsV46}UTAKC+)v-ZQ3!42l!`s6pU!L9n#Q$zIlv zjN8`_(65Xkqn+Zd_Shtc9C5Ywp^Wx^nUTy3Fs&rAz28=yBz0UAcU*-hco7x^$W6Ywpy)o=JX> z<9A|z;nrMsVDn5`CFYdg|BrNd8wvM3!ocuBgkE~{44L@xY{eYa< zNT2RISy3O+$#Wm%N!3J~J1|Q7k2dnqWE2{4udJGXz_I=K7^I^J?U44_Q^&SUhy26X z5mBxgt5*CeTl>r%|Bi#YNYwtJTQ4a{-gKVz`)p|Cx5<9(l`L8qP&TZ~dT9@UNPj2$ z9b?P!_wrb1<=h0Y$xqx{H0+bH06(p1dtAsM?80o~r;GAkrVWFhCpMg$ks1 ztUPrBgK?gKw!7OFEVMXJoA6XH%IWZtyrazafp1R~a&yMWU>~-ji$0*OJS=VHBk)+e+Waz*vf)O-m^|nm0a4HSNnF$FuVV6G zi*@3q4!Q|N8vWN!Q9-uI>vbPJwGU6ZFo2d2=lGmyj33M=0JiaAHqd`t2Yt26^Dgf< z(7ju|!?ok5U~8;)2ghr#e~dVE-0*m9Q>PR9{i74~CFo7c#ih)Niq~$g)w}qBFTe4A zefO0&>z&K^gA2>Gjn6wcI#bg}jsfmC_V564@I8z=Ha?t_n0C=a>K^4gi&ki_wsa=_ zX`PE=vcRP~&dvMS_54Jo|Pn1ll>Fjlm2e5WbC#;yZ13Y%_GWi{hoTHn8K@`Xc%l+SF^SYxq{H$nI`U zPGVn7Pi8D(09&EYxgiv%Sk}%>JAq5@MZoXSiu#2ijcm9KjY4Z-mu}qV$??xlpJaI| zbl%$+1n0!HMG({~k7)N{1`nj+5#l$l}#cMy<9q`*c`Z!I((7A4!cv>o`y z3Sbw=R+bKKc0zQ@@+$IO4c8|3J4mw%l37P4?X89TT*Kumu?ALmk}JqHj`CA>@=6Gh zV-jHs6Us!wBl3xc;l-AnFVK<~BfO>A*)woG|67wdIb%;2X-J4C5N{ z+l!zxWp=2Myu3?;jI+J`fT@w#6qZYxJn~1dxq#K=^InE&1nmlmb@19EJE7qV{Fydo z*jDo3teVq;d~QA~b)p!gU-GHrjOD$~b&GHHBu@v1Fg5L^H=+JPoH!Vm&T8*)q(1%0 zFV~}we?}1CXgWJOn+Q9lHAhW+iDL(g5DDOF!c%qe;#E59sz3Sf{0R(zO`L$446?YB z;@tUjb!7HPoxo{0cJfG_zvpyKjg3;~es+=g{Xv~~@od!wL2n9N9t%ooFMq}J@A8v8 z$Z4*btcU)h#nTfkH1DOkh5O*8Jtg1whHgZcI1-BgMpNi*=U9AL_g(oLA9W*?3PWpI zS6N9aC z)EtZ(8Xm#Ga3?Pv$K+(4I(uJz%yatWOezi<(0a0?MA(S@mjt5@~idY z3oq0^{U`rN{ri9J&(&Z2fBePz<*)y0UA}V7#wSRkA)4+)-7tpW*kDhdrCr|u@Ij|` zI*M|j^Bt^3Kc?dmZPF<6NMVXv(iqy`#CL-1qN*~R_Zp<$t4b}m1@rmq$~8KRCy$RE z#K@Z%8>?w|Y;rww>_p9uPt~~-C+hs!lXcu^RYtxnDxXBHC8)8To;Z{e<+D-lp=4r-QtG|U#S>%aP7JW;s{VV_P^aU-QGGV+j zC9mHpzrkT!Jn>-|2f)0p(s!c6^c(Q^@~x$s0RJ=KY66~cMfWU*)Kot@c3q1d2gsuU z4_9f*VGSam9u1p;Yh>B}php@_jMo7=Xcu@6hQ{jM>(^@$`5!)htVT}XQyYCFwS}|c z;EkG%T)}S^q8WoQ*s2(n!Bj`z($?UE=&X9pwcY1R63dE=S*G{N#~X3h#cju+_bETn z1Zvtnb&KYdXPgch`pH%II*rm&T#nK2I59sw?6-n-$Vg8{>7m{V?;zJX{yW4c9(<_M za#T6`$w36aDcRdQRByiZUKSobc>e?D0E6~m2ge%~ zs@m)w-mgFOBi~=E8!NT8d=q%PtJT4kc%&VT7rHteo`F=dhB{V5)2r$3{@H-`i50P8 zL<{PzP)yw9*U}^z&Fnt=&K|d6+4PiQB;j^xqQ$5bWQ3J_CGbuOYR;Jn3F0xfihK|3 zelL!)dk?hdMr!uR`5K!#Yx8S=)8n_P8NJrEQPmkaT}Pb?E@I1CwuW{FE35Zr1elc#TgE z*Xjy1+25*r&K#+){lK#|H4e?UR^g>BU;+?$LVSzU;GGbsu=Aj7I`Jxwq@VcF7O`#N zu_W`UllkrtoZf{$(}+*-nAG`0DGESj?YEU-1qm(x9jB(#cZAO1lz92M*(G)X*aGwe ze%p9ubuB#Rz^*4gjW}uM;w*Uk=+WafbNo!r-1`8kf0&%$8eVO5vJPm0+&VeYCIF!q zAV|ZU2mo#ZyZSY`g^&Ld(!m*R%LA_5XO+6?94Oe`$i4B}xK1z%TOV{B7@MlWkqO6N zqF3awti0yegHDXSZk7IX^j9QF578_P)}xITQMhML zo~*v{@!H+o3fz|6T@*>}iSrnGb)vX8U5=Rt7fuMa1<_PR^)4Q8r*IZ}b@<2rw$W^$ zJkC`Za(pLncuc78JiY7Y+|_#T+U5Gn=by!v@5|>bZtrSFd9?|4?Md3>F{&;;5MJ`T zP(d9h49&i1cAtKV@7u`-z<}=8R`YlTzt4K@#sYfpTuskR<@i@|-`b*opfhZp;zbLGBx|IXQPOf@{^&tJ!@BjV3w~k@gh%X1?mRFbI&A|*b zT7Dlqs$DdMjp&4%QWF->r@G)xo+WU2Gq;UDy9>y&U?gxfKgX9k_g#490d;^pVWg9w z;zeAGJNu5Da0PG1w}cZsxxk5F?ObWxCTa!B01asGstfcs{ltPh4xL;$fc`tR=BEWV zckBJjSI}*8Fc1q!M{W(mo`1oeeBF4vdrcbNwP*5oXrH$7&cWqDa24AYSlUiEbaV$d z*?k5+!UyRz2;93v>-)d(d2H-!*rVXu{N|%0)>k*}uy~*l7cdKbvf6)fE4~^ph{x8y zcV1AbG9%7uYpB$D55KlN^s|-)9ER8cwz1cNbMiuK@svqwXMn($G;=^oy+47jU%WnF zj_Mj2@;dXa$OW4{q@myw;k}}y_2^1 zaa$X0(93&24>32lRIk7JR;_NkKz_K6!^@w0_7mjots6J4!|T`(*uvV(^1TbEo4;Xu zp#d;xyIGcSh!Sm=jAcMO2ie4f@K|pa3BY&m#5bS#ijPJb%adPCr(b3|7k}x0=G)NJ z?V<(0+q|;ATW8Ooty3qDCZBcqy=f;G?T@9ExR5qoT3Vhb6j-+DrL($4-j2Q}bZMVH z(|dm11xgkf-PO^DwsmY|H0}22P$&MUrY6F_`nxWO_4wn7$ua80<|3UtUvJ!)qkcbY zGn8ga@2;Hp4RrfKVc{M;x5W+UUu1P1`+4rBrwCxHVOx8u9*h;;nkbGZ(aMX)CuyPzxPyq`Ac7}v-jSEx+Trp zTG9yPZGS~x0LI+=qJvBRcJ4d!qMjgK7Else-n+y1KAFFjM?SN2r%l;EID)R-fEP#W z=YHYmYjI(tCb6fckDtm(31+eH(*~o{b1I?+h}^)ReCp9BY7+lseR)l>p=9Kyfk{5# z5=PRBFG}IpQ4pzJ*uC1$B;$D>UD)NHylOA(W6*_tgV^cAJ@x1ZpD2u%+Ff5F3HE#Z z^p-sJm^qU*UK@T8jF!NGq3Ak(x9i})Nxk*Pc*{xMdEma&9v>~T3$!*5?QS%&Mqf%x z)7wv|vw$yjGER9R7T(q;@wjKxS&F(ZC?Or{P}Tq++`x;{YR^Ts*a51&&CNaNwwllN zUA=ar-h1zIUA%m?-oJRYuFd0v!*lNBpPZhlqsLCwsncic*zwbK=G^%@a`a^4CZ~?p z^z2dOZ2}PRkB}{OC>oG@kv;0}=^x6~6IG0J$GxA#*gzlpDH~5r^CS`FXnA!L-v`;! z=2TDF2--I==yCr}is^GF@b)$M+J-V;!6>sb72k6vHL#@O0MFYclf(e+!%ql-07DQ!vF`L;73S?eAv@1hp{p6px)7f$$kl8t@#5VnM;A2}&KM7u`NALE{iH9l73V`I<` zp9ULsY?L4V@95}ojf@WEI*N@HUwC9V<4MEzJCvbMua?lJZxbF)-nPX|%|3JiE->}U zjp|YG4w|FyVvE=pS%&sP<9W?1#UlL!^wGwTU%tZ?{hp)Rd1tvUe9SIu`ZOnAg+Wo& zj8u*Qia>S0i3~i#fkO}hkQ;@w*=LkV`A!V0kK8q$WAk}-hkTCrwrT3+J3lD^+^v4sYyz>vvFArK%3~i zDBCd-VX{u!M4YgQd+AGz?_D+ntC8Pl>mfb)Ehh^tr6qVZzwa|vf4B|$#3kiWPcLb+ z`ySb9^^w+gg3G`!Jlyx&zNEDdujIE+IlvC$y>Zr^_EC-#4=xP%bW11U51=s}DRK1A z(jU5eGC*4_u#PTT%^2tU%3@C0Sas3N;zF&iEk@2=l#^r2$)8CL^7iiITi}2CUH*an ztr~zIyzkw^wh4UP)6RZ(xdV6e1LZAv4A|9`>V9=pPBBqGAs0^8@1lFR4hCwwccgaw z)_xCH?)Um9YPWx^cKi5w)&D~8dy;c2OSw|;99se1{!<9$H}x5j>PqWuI@>WnnMHpD z{}I$}qa(F<)ziD+)-m=reln0g!O!Qocx!xWwnkj&H91ogvuA4f$cdUbda4FSX7PXV z|8{z6Zeg?DyS`9wU%FNoZ``VV?1V$|`^|r0_M{^ho{f%pG5|P|mcTlo?8t-gO*Z`^ zcj}mY-jy~e57GI(Lp<^x_)I1BNtkETWq!|l7ZCVbq;S_3Xz4TXM4v96%-8HoBgq#T zwH&3%!c1fKR#WRkz&W8)9ihp8(%v2D`-r4{9bF(k)nkzhWAd6ZI{AJ_e%mM9oo7ND zo3lP)rCi`??~PaAw73?mhc~v%epy*0m003+D;F2HT`&aK}N z=8VT$x4608PD8`Powz%AlZOHuzu>1sKmCA)uTUcCyKybFIvh9gU0Ot0tu1LZZKJnA zX4;9jXgxO(#KuO39G^nI1tSZs6D-eijP-Y?4FRM3=qoRz6NcOEI?N{kPX9EdJ`>>p zIiN?-U?@8op2QT-Hq=5hX`cd7+Ug5eCZU`xGpp9P+GdH_<9CdW$B0mnfPnYZW=0ip z5GbS~&a%j_;iO}ep4~X#kt*c^TL&IFrR^?KkYP@Wu@jWYc9iMFxC&DjzO>7C!T=Ly zm%Yl&Hz2<|;6WwC5ju7R!6~_S7dP& zX)20NT8;EZFmQ#=Y6B6SG9vU=!5??_r$+vEnOlDHmAXziOl;bgDdlEo2aMO_H&IfK zYOBM1oxwAWpQN>r9DUN5(Yg}npb1abBm5q3e6Z)h+AdB$#vL8b|M_44Q`I+e#8i|) z9`a_wnKsz5%O|enA|k*#AQ61h)+-mU)SGXrkQZ|lHtox|P^7bbB^n8LCiqou zq|a5MgHG;(^csb%GBZ;rqj=%A#kJ=N3KzP0xFNuR(G-J5XlERQ4ZLI&d7TP zv@v#aVgP*Oi2`zCbIMv7>ICc65Z}W%vTl$68Xg?O@p1>ZMm8J;{K6V1j?8g#gHOS9 zw--hUZO`{q0e5v}9jCG>U(>lGV{~+^KJbC3>IZ)4hwHJYK2#^qp3S%8=NDFL14X>N zyjm}O_qqCC{=q+}|M)-oi}gSJ?Y~*S_)Gr`C(B*u0(>W^GR8siAWa_{Y&4PX(#X|l zV38|Ll+XJ3Aq&#T_J>Dh5hpB(i5Odchh-wp@zgEUXZ1=$yMMe|pX>;A!#RyLDJyTA z4EooiJ?>rJpBTm2!^j$Qu?2Z!Of$`n*U<+vH(?qZj~Arat}5Gc`SREXKM6=HfvtX$W}dA93n&xYPzsz=yac zPTXY@T5y6pYEb5&RSc=b$U_asiIYwtT1vkYEEC>TMr?!nqAeg$4lPTbr5@UCoWG_I ztlR$0f}1XOM?fpTrBMcrkF&r%e*;Sv2I)vrWY_SK*6tK?Fu*b#EZxDmxpwIa^&Hj* z9(=eq*EVaAbi=@Wa%{GSk;VP>wd&j1srB_+l)hDiw83wSg`1E|n>XNqDstI3F<$$~ z>A}!&Eo^Mm<;BJ7nVhQG^Y_*6_)M+f6dd#p*B<%PGv>-2P148>{%Kp!0W?ax)AG)V z7{uG4AX?IxSNz$(gGKUYkpg1B4ixiL@6M9UUBls}?OSv{x(;wc z$KChf`5GUasMXcQI5GX`k)y|^>X&};pVh_JUM-J;K2(PTpWmyTm>jI%_mwYH?@(W@ z;e2lItkLGiAO4dcJh8_kr_|TBQND9v#J)UDguc-S;YWk5Z}{;(51|8X0JrX(365-2 z$NtEKk-R1j`DbWyA;oxBuNlab1&6*1f1Ab^tsd{?gXqE-K_U}SLxh$ek6f}+H+{YR zcHly1ow&dHN2jZ{&~Gj;7Y!5`W819iS#akO>UN>>Nb>vM0(s5^RUYlLv}Zh-bGL3T zE!DNfTlFj7_*R_J*<&ZN^M7T1lX6GX_t=NrxOo-%xm7bqCTo1Ow-)Cwp=0;z(TC5~ zkNwyW(ATZk-ZnIHun1mK&uI^Y7lW&Ccx$^@h)mnh$#>e_;-!-okZpTR$lTuvp#pl=`fTolPODA*Zo>bzgBY&28n|_p+mGBgk{vN)|AQ#V0 z+@^n)XK2I7_+(8TJyT~Mdc1lk&yXK{!>bN{X~$>BzB@I6-~L}zWsuc2Wg)8oiiwuK zeUp~7w#7a~okTtd?>q^jug3{^aO&~tyB?A1K%|4S4vHRRBDH^Lqz2t>Hawa|AM&nz z8s9~`622E@u142h_C5n?!YeF-3omsC{@OqS?@4?2ckTD{eYc&)=fE`Y?)#Y|HHuxk zx4K#T+kW#C{S2-ww-5Ts2cC8jI#Gwtp%e2#J@`~7?qLUMOF5ymMfonoh|NznC!4mg z(Y2xcekmaFybWHQAQs$S_;sX~ZqC;mZ_m{iKL1JV?xS_}>J^;Qy-bW-hj!lnew%o* zeosUZHd!aWg{)=gEQN^9WSlrLR=uSj=^d&qoOi&k4Q#Uaugukb_ngn_{govrBjqLj zH^xdj?NepIeoV7a`ey)`cuOUmhXVTc4!dHr>~JnTC+&!#5hJi(87l&VV^S zuky%0<0r>P>)Q4CEMhoy>THdoN0yd6mU1<_TTh-i4G%Qhy2x4{$)b!6bdkD5P=cVI zEXHZyXKrb7@&_sWgoE-3pp_1VvSJoufzHIH4l zU87^39MPA_Cl>*z%iO_Z%X8c$y2uIojKAP}$^W83+AF)_#(_4|2%mkCJMz?DzEeIj z=-0u2Y+3TI;*W14SIV8`xEO46YXiKj*ZTHe9Xmc-r%#?}{H@PN8S2LH6uR2y=wIoF zqO?ALPKc`mnyiSqkc(f_2TcY$GzHHt9Fpw*8vG>o$GZXg3>OuMXD7ZqHq^yFelEpb ze(tmzA0MNBX?6(g%UQ_mVB9D)j}J=uRK~q0jur%d@doyqC3=!tFoS?TB^o7islDa3ENC=5S% zH;aFE>)gp>_4J2682i){D8^>SX_FJaZD72GhQkO-?&U|{b>b7CX%q6?M@AwW%I006 z`WV%^ z#ci!F*TeTeSm#ci$}Z<-*TK&ui>~LtvW~{3YFcqfA4)sBdZ@vni~~1-$*18R8Q$iS zH)D`Qrk|tU;Sn2gP)~pSnK~RBs(oy`RAKwYi8ZMQthwWnrwn=eh zYH$Re8G$CF$k-_M3VgP8Sj#JGb@S$OUAlUs-hKCCUA{I~H(lh49@n3}5LVQeXPo6>$4Ikz#Px3h-d_+Gd;7OFjqoXx63M@OT@cBY?%sO`2+|9Xq|NZweq5s-zuhmO0 zy;RS?@Lav{!t>m}P_MuCdcE=bTlLnP@6^TjFV*#Hepi2?mXH-c3*!PNVR!tqjoFFM z#xLqu`6PNVtyj-zo7!sSWDlDg)S+{oV0Jvn&&CX+bF}$9jbsEGk2%RdG8~3du<{g8JCpyY1(7T z0<@J=#DzF;^0_VgC1Gr`9dH6(fO^tEUbZ>t2^v-EI;phz-1=+d-n3VnjsgNfOL1r4|DG(Dd%|Gj(JbJ;2r!h z*zZP@-i+(JD8)~Vj*hs)-cRB1jEy@ojJ`-e>!u3FTutwZUX(RtS(L#q`!ssNC++ex z1DWXeGyvKNGeBR`Q|BnKj8bNH@k8{!US$I|2QEAyo!jbDVc<(rSQZiVg6~86be#V> zppV#Fx>eirbG5N>wbpK3uhsc0wYqQ(TrSn%!AA9MuU6mYQuUw<`gXC|YqLgB9DS7M zq6ts#wO+?^GyjHsXt%(gR1Zg5J#)pufEeHA+yO%NTYBG6A)XSmo~ZQ%)s=t%mC-N=TY18|H>x#!&;Hmh=~ys9&? zhx~0KTYf@U_~=|xrYEp?ddtA%>FOOnQoFrl)iZLW_6F!DCXRAHUHzjo#1GaEb*`-M z*OlwH>di~n>gK`{^f#49KU^<}R|K=Kl2k$TjjExdwN( z+rHEB9d(v{d(49Ly`+KPvT#oQNR**{0-!zQbK2fgu4AdO_rRU?M8{eW4_*5zc7}`W z%m7r)kJ5p)0Dqmr@-tYLc9jZ=dy8?Pz@)yWe5=g8Ovzo^VOs05>;#{iPBmU{C2dF+ zsKPbAxcSo{e}?XT=O<25na`AC9nL=rzdU0--G6DnX;VpT4Kue?4Flq(Er_BYGBFLz z_<-@z)84o7F8-ir@dec3Jm1~FxuDjc3*xoQ)p?$vHFIPZd)H5fyKr9Fgx*wO{WR`6sS%r;+xXYK@b}A zwRbU+lAbH|X2KHVEo}q)t_()+qkPGQVW{DK{`{l$?Dzd)GPMaN*NOzDw2i_ngJy`c zFn^SfosI_IVUCOaXFvao^=rTRD~TT-8n5B8@q8P3YG$%doIFt{PaUuO?!Tv|rzbH~ zFxC;MmF3mY#*X1O{FE8GQBNkqY#PsvwBSQ#^{%4;qxeN|kmIv}3NnAXL4{>xLd)MN zkkGw$LpR-B`E%c4gid-;(XL6><)u3z3OArxvoE0^GGcZoFh7H$4fv9aG_FKkU*D#ze$uE)hW5KKi^JPii9(xUIiz=PjKJ9Xq}J@mkX^^p&JAd~!G`^uN= zhkxJ)>ifR%rTV~AAE<{eT;O+qojY?5yd9}w3fNu4p`#ss$8~yQs_wn_LOt{3llA>y z{QmmV7r$7a{^Y0Y!_R!E&Ya0%Gqtlyrwu7qaG)Fz*w}L*w;jW&j*63JlhuVnk1juy zvDKZnQ=WsiJDWH}7<-#*%S54Pcr`pQgdsQr-;cxpGxg}BkJJZfi;Dr=5#zU7MUc47 zA{hDKB9(O~X2lh45?_fUboB{;^Q^AQ>dOwk8$SItJX^lO{PL`H&Uf7O90_gmfI2ju zNt-^8(E6cPpXHg~UmM^e+@*i=j%ymGa+-O;eJ0Q~zkh`Zv>NC#qj&>MaW zar)@-8k(4>edK)$I_)9D+m!LHgFVRk@UfFMbM8Vd)8xIuaY(Ej1HU*$RvmnhXS=(L zY;rq&MWZuO+|xeVCbrvEy0w@7%_?xRyLKk@5mzY`99Up0ns-S+T=$u}qhF+}Wmzh; z0EgCNOox%1q0zzYd~hJ#om#hPkb<;Dw2hh$REF)ITmWEvTZGb= z9Tk@@U#m+OZ{!mI$4^Yx3Px=&-ss54V4XNNT;KZUx9j zC?9v?NYd`DGr1F&=e+kC&Ij&B)1+%fQ@0P`SE8ogW}nEjsa^(U750KkD zuOd6Iyz_Rw^xE6iKQe|6JX$xF)@vIW_Wc9^b$R62{M^;@8=S{Z%o4v<%eSt;CshwU zbf$jj2ftKni??du1pwM$@V)&~Hw4FAxgoscaYJE^R|eI)E($Dgr4%U_CRo=qsTKvv`+P6_}RAZ zV^e@%dEaA7m0frLV+fJgfvxQ}=!Go1n@hW+cVHMf9<5#{nXog`9ebre)kz93QLjIv zgi?eGkwHJgLC`oDtwWD~6FqwlHs430ZaS7_jc@}LHbF*PiLOCd!JhrrpGCY z!S(H}dg;yU_1ME_>wfzCtJkiEXVg^=)H~tjH|WC$=|{1}C|7s_U$9||n{A&30MNy2 z!vXpSeJp$~a1yR}(S1ix9IYeR^B(0V;>iu)w7yXOX=O=gcmQ~<+aNx|0;Ki5q$fG) z<%M+buIoRanXI;X7@2Ri8Rs%&M8>YH_aZ7aAZN#k*A zqP~u_6KqSvtBHj9wrm+EOgu*~fpE(}RYX5&hbtdYIq{FZN903{qj9scvGJ`XcczWqf8{cM_j zjaTi01QayEiNuZuZmkQZHTfj8332@vCk8e%R$`pv6G#!V>I8y%jkNr6(n%Rz+uY71dOn9hIW7cvAG__n zi?=eIHi=HX_xzbG(se=e0(O@Z3*x|C=s9Hq4CVM^`%UV|z>_!u9{EkYgeR#}xfBld zi-T;!r#&gZuE0~i0}D+yI&JInsJ!l}0#0ywWUqaxp8#-I>%!tne4yjV`>7Mh!te5T z_+PFS9@_0y{t^$GQ<*5?Szp&U>yNxscQ<{zwx2Jg;rQ_uyXqXF>@0l!4*E0i@7Rko{D^?95EfOi$JL$Z(@C z^z?L{iHXrVcm8BO{@8;x0t}v(=me6yQ6TD7x+NJf6J!b(GTD={WW}{Gst^w3~A@M zgo|VchXyj?_o0tGQ+-Fq@Q3Ef*XeKJiPk`_DxmzLwL9fgd6Z-8Z(Cvl@baIhhR-g) z#{Nx`_S`}@A8zhtA9)Jsd>mxOL>{!jtp5S-9CMRZUWONGq zyL~g?9sN!OtuE6yL8DCGuWUeXPnxsu)K8zp{+osmr|8QjYZRY-cw`ivu20xsYw-T+ z8vZ@k*c>VVU>zD8#l{}1(b1V2M5j6#ot=K@w}JlQ^v{m(*gyO8bPn+odV>zgOrSjI zn#pDCBPR}=KyC}1zzH_LcIW1{V`u0>bi?+pzOvu7_tsNo}m+Civ^H=ISFMPLNeD0-s z{-qb|jW^${%U7<{{LT4VT3v|_mUdalN87YbhDV325w`!^PC2^iw0!IQO=%w}#;J z840CTF) zXi=Li_p=9~zx7du$M0LWu(~^ZKw3J%55A>~$Mt6c0*MCEqn@4+Lr|XO=Qxo3BOjQ? zg$a#L;AYTxX$*a6F<;nh{Ed+=E;qi@CPYqat=T~x4!1^*ec5O8a zCO3%NA&oYO8=TZH+KN5kpvOfE@nOLs_|Ue>IFR-^cyj#9eiLmUesA!D8wW}3=S)u- z1O1LV?ZOA(Lz?iekNn~c5(iJgomcX^=pvtw5Wk_LGDz{zK^s_`BRbgrhZove+B+f5 z9=_!c{Q)vG0GmscG|5= zH*VIu*u1LoA^HK(P+N|_?8&3Z-{=^&CAtcJaBP&ur#n>V>vO784?RVIcg;?eHb(o$ z|0X6bd0OyQ>(>qB30}3;JNLc=Pi^BvHytS{-*Vg7v6{4n)Yb0tKY8=AW491mpJgNw zUigtL&j;EZF&^yk3E;b1ubZbr7}8l*`?cp*4*ei)%cjw8JTJ0n-KW?VjbFuE7wS9k zS&r#xKlOC6ka|Qz1M+t06btN>*?DEVJlR6I5^SvUcD3BtY})C{3z6_q7DvIe+JY|V z+JooptK~0GP+O&sS)lKc7wr#Sfat=se)Mkp#1Cyk_A@5cFb15Bz^z?(Lc__O&}iU| z=^JbNJS)dcVQwB-RX1r-y5K}GQ-Tvw;7nd55@etan@0>9bjFk8-yKU?* zFjyY4CC1xA@w;I4^1WaO#+EGzzMZzyo~wONKYHq?DeR0dG6-toylaUy8J!%mN~uHN z6QejQjsemYayo=0p^?_#oGSB}KRb1d=e@{QVTh!zlhhmIl|ZK*K*PY4N1Em+H!Nm? z!+&=8DZe4Ur*}8$8Z%qJadZH}C{Q#!14X=VaB#12B`YvIS$FpcswTt|$eKF=xEzSd$pjKy z#`As3@?LzACv|cO0-Q*n?lUf6ktp=2^*tcC1oX6}P~+MXRZG7Fa`gepus z)w>v%mtM0Q97W2WY!>`QkYiZ%e2qh&EtG8jhZuL4xT*_m68t7lb$5-|&Zv@WSiqe2;0 zQQ8jUBZJ}u+-<`P?)cAuWWWSQVBdpRUCb~tK2nc9_Go?aOJAvnpZ-`KIdO)vdNQc6 zK!>upxLCjR3tz9l@Sprg^o_+RH_4&_yrY<~qe@!1hf(9d47Vvn|1m1Ar1tRzu z?OVIGf%CVCBRo1dLOKc^9Iif(;W>G#PCoNgJ@Dw`^|22;249ZVCi>y+%kR{jE2~Y5 zkta`tHz?bQ#Wc19`=J)JZRr{29XtdKIkp*o_2dt@x4j2X)P3s2>Fj#B_n!N4 zvfRzHlASR&U!7wa?n-F-m8`)Z*b{H+eXaq4p99hIyrJn6aY;csC=eop`QS2TNwWmg z@l%h=o5+AN;$Rf)opk+vPmq5WW!+g#2tG0x)d}7DQl}{JM_Y~GJJ3;5C-~m_Eu-E0 zQ{K}TQ*CsmV(&9p0T3D*Xk&!y} z;G;D-bG(*a43ixwec8d$&#(Ohhd>`5@-F&)Vdg4cqX_V-|<%%JsQ=^PP9=^vRQT?DWZ6U0NXjK}}AL z)$GJzz5Vuk^^5=b=Mb+k%qIKG{pgh0v7vhSzLWL9Blkk7!>e9sJ9cSL z2XE@m49FNnaQgo|(RdIQz6+*AT7Eg9Cm0hqO-AIW*mm+3$+~foCHu*kIh197|D`DY zgqW7)d+L)XJG5!`kbQcan8=Z5$pehZAZ+4QV_Erv_o>SrjpLKkb>!SbIJvYLXESN# zC69Mj2H+>1)O@#(en5Er#hKQA;=iYj*!~>nn7W+&->>y;`h=~G`jv0|W{x@?o}8#- zXV2D+`IXwIp)O`|u+1jC_uktSv0KNEPt*pwr@wa#$9T6s_3pnd3W~I8_%Ln0i7S0{W(^(uc5i*x^hP)M&R-|J*Dz)0(RbbF&_p_k zgO)GJEHCv~HgP=1L8IQ_uLDc7T~ZLV(LMg5SqPxNI@tVY@yZ7P8>u}6t zptT`AqFwsiR${0_h#VM_XRRf?#kK1w3RCz>HX=@4r(Fa~5&`aTw&WuxO&sLN1m6BO zW#C9|yGw;127=C5jw|g$j&&Rd+L5uI*5mhKd$qBM%VIR-&h^h(pb))a+w%u-xWbd6 zlRlDhz@m?l9jq>Vvpw)%cwGC2esu2IjoRJXuXCr*RR5SqPWNXb+zFdr@awlQQwjgc2wqQa$*mns(3HAr#2-`P0 zuzuy*E$sa1?CkWoTJ?bKji1)xIrB z9N5KGY*c`;eiv*y@zwk+`aI}qTlS!jG!Z8Mgiid+>3RGEYcN^|Z_rrZ(0*oQY%1-S zhqM{oNlZz#j*agpH>}qkwwX|*e|OUH-OE?&o6o;OpS*=EX#)Zq_@k-FjIB|g+f`jD z&gI=e25+@R{!ZQvrtgXk54@ga6a7UwX^MHcR~>{4xFcLr3-bIS66Bs-^4yo+B>=S+UxJs z^~H7CUUg(_-I*=X|K>TrjqFdfimDx30S(>YB~7#iK+1M(U#ANrid zFe!#|u{{x@>|nIL?m*1~FzWU6k(ue)$kNs(x{$UwL6*rgkDA41zI6Etc)bM=522Is zO~UMOXZ&J)R;mdpks|waEc?I!0RQw!L_t&`Z4UXA7xKW;BDOU6bFsaXp6xUX{Bqjp z4o5#@=V>eMo71h^gbpYuyC^;4X_-p<^#}@Dl*N5OD|D@r1c2 z?tdf`rM=-#`!D-HbxIq*)7H-?g5VeBI50>U#~X#^z|U6N;I=4jPdTvR)G3Y~YjDz6 z+jeMpur}~_A9?bj8ozK7f%fD9lLAwkEw8%JM#TASIhIYuv5jnZ_sUODf?(Qcp5VtH zAU(xTuTYehANfRgL{hs~k$T;+x%4Qso*zN1X&!&_VPU|$yL1ZNP`MNWREYHE7Aj?T=+Z}WToo+xPlFRgE0U8sxLNEa`?UvItjR($&efUoXD;B7WPOb?M6cbz|;2{x&=d&y9@QkN4H+^kmAk z?2(Dl8XKSFC!R;!X(Y6FeA4e3kLMGHF8uS`#-_0^(f1q%M;_1b0;Z8MXhpnzkCPzT zp^x1vzFn|1l+y^XAGnU;ueeY^+kA+tlX$)xKsTjNhc0gNQ8zlFuike7S5A`hq>*iR ze>(|lKcQ{wZfbOjI7dL#H#u<^YA7%+;!{(hrfPY87v1Krc61&6jbneBW;t2Qu31r4 zB-Bro;RLL6vWg9m)ez5-C`rgQ6V&KDC-0r8_veJT=K8Rc>g#jZ2Z4zEPeuT6x2+w!oDPtV?#-U9X5Kylta9Z{#@CwT~@LK06zcFa(VuA4y zc!zXDJezjJwq>!w0Pjo>{ItpPC#YHnQ}3J6UXua$wHIt*^$=L6k12!A&)*E zI`QQUu(}=H(km|!hmwZh_R$UdS^VgjV|D>!=X$b^wvnF`bAign+G;H?y6I=FHo5nC zbs4)0J-)ZQ2|V@{JK4ygKe#VIRDQtK69K5O0g25K;{m>o@1V{{*W}~@@Z~2n2Bjx> z@zekpe~h`f9XXspPA8Dxsc~c(`E#ebrwVB2`gwuokMg73y_^c@!WUq4ypn!yb7ir% z(97$KH)`YNwOX6M!jJpKo5U~H_R>;q(Jln3_L0jy^!7G**hfxx)>g1_h;#7`cBH*9D|#3R!8_`P(2}>4k3SY>X5VET$#?J#SLYRi5qZ|O`UGCP{)fS| z)|-Bs{8~aOgtTo#kTCIVzb^m#nm@^>ttWoP3GrGjshIZ-Zp&@YFVMHxm+$SnD5h=RSjES15xhES_Q*_4 zYMZE+kWJg0`Nk~5;U2vtFwUQJ$kc(Og>HL8$DqZwnAf)W#`pZNi!zp*@}*@L*S>8y z@8UkB71t@VCNTa$sKDCVY3-=eq_Vl3j8E3@U${~PuXc3IG|JYwQZ!y=?F8y;!P(=ht!QSEfiwQ&=D@nA~-SJbq<2J zP7uer22Sg9;E!k8+S=d%0I*6=^N9e;G_ElNjA&krR|m?3O(#tUZQI>;&}Hhx>3Zkl z&H9fZw6w*}l)AYkDA0`-eNHCPvD~_~TnKa}ZLDK>pqw2z*Lg8t=nY&d zH}!@)I(9a8Y6I9Zkd4fC%k;Q39U9@W&3zc4@?CIbRR<0LwZSsW5kDCag3enwlYYN3 z@&mKU!$TP04vG#8jn%^sKUhzE;Bm?vs%@Ost2b`eGRD&8`c^H^&DH<>zy7cFvp@T@ zab)if_SfMhSYdErG&st4%R2N?50WBZ&4|J$orWfN$=^OF5j%-r!>4*lGTSDTs@z+k z;{abx?+{uMNfTTj;IJm5@r`|hL9Ta!N@Tes^LC_N-qLvUzIvR*q%$SSB)A?Q8m&L_ z2met0;1|CT=hGFgs3gi&J_oWgcMpf{(C<`{UG5d{tqSUL@}CD+tPA88eCK4ufRhHi3R|N8&%Ki4MC)22>98q~^B$2f2Sq?s7)p7hY~LIDSA;9;y~(mDW) zqXO?c`PxqzI>#68J74$Te?R(SBCGe;w^jp-yqR{}Pmy;3!`A2QE-?m!uq$WLd885b zP8jA{eIiZaTL7XDTMyjfP2i<(uN*+yT4({;`l$`dZHIi9Ymw z;PPj_R#HOZs9zb`Jy_2>abQsHEL&7spJfIv@;K<>1VI{zoaZ$t8LKT>0%kzxR(|)e5?M8E0f0 zU4A$=Tyxw%|K9t&o2bdtr|aG)pQ-(c>6%;Js+A1D(N_b1A98GmCkAyyq?PK|v0B3c z!qnBBv^d?7le)Ieeks@|PR$_a1W%@sfjgn~T?!g}rLKh^^JxjwcGAQtB1_vj5C7BG zxgy--#N2^U2Y#b*hrVVF$8)VFY4^)hVyp9}k_tIm$um;Dbs-OOL7g^L{J%4(le(b9suYdE8|70z#-Kh88_$Kd`GI&7G5Ph8i zae0>*2aer^-0;`*m%D#_Y`jP^@f4%E{d&>2sxLXxXD9f&iyiIH6_MCw{*)8rDd(^c z-jP1ZM?!w-BaDr_iG5*fgV``co}(B$!QDH?H5-budfK;l_jiC5KI-coMplRG_=)@L z{1czA!=>%oTe5u)h*~>+1w8MH;68qbn;SUBZLy$6dTl~qbX#}^e7L}9Z`Vl`_z6d1 zZh5)3;j{OVmH+N%ex@$nT&wX@XKMDunR?;1%XQ$9qy6L1bTl&l!gJrEf8DPK9yn3A z<}T9GMfhl~{>^{=57Zy|H~v7q{nB^qaAy%c3j8`Zq|mN4w7yjSYj%p|WhXLnXuPs3 zy=?dGAnh6%TVC`|SJtIiexzvuSyJAb;wE(H%{8GTXWjg{-Y$#aKr_#P2JV{t_)1W_)yYSmpwT1Ilb+IGKVeA@JWCqO^*Ceud3W+S z8PkhAYVSBwhy{>(To4?%h_ip{f#2PwzPCkM?2$dn@Y{+xPHuAr)v!`)^hLYy!nXbZ zI369FsNu2c8k#&(gHuOpV0;SQMSb*Rz4mX|l`aC1KYDf@^ne!1l_-*1C?_Y0?KvRo zM4glcPr#>t+(DA)b`3+O-;?j z_EEn(P^#0b-dI{#%mk4^{umw>w}Z8^v=n^#{nypi)ear{%N=-r|2aqVj!cq&xo$1q zq%R!C4rq%o{alm-@4x!>->A{?nfkx{@&8kf0M>MOB6A&kO&jH)kPmPvKL!8DwRNIs z{SN^|N>{$2rwl_7PdW8#Zzwz=jhBG;+b_OW=gyp}Pki*rx;5W+7i-HjzV+Am31;ES zU*yX&AsFX-}Rvz1NPc+=oDgAeFEPoKb2sUQCr{$PFJ>Bs8D?|i2`l6wlgM2Atn z`XvM0?l_`Qe;&cDt>L|TC5sz?G4DID+NVjU44m5raGC~kVBY%;VD02? z|1VFOMmQ|TH2$=8Q?KoCFcMU|Aa!$Vy{=!sUN>&ssD)cgwMsiYvNv+3&b- z6YuW2y;`FivtQNz7M?xq9zS!_rIWsg{i0~I4T z@=#Mw$;n9v=riZ^hu|^OyHj224mgO4Ew{0?SAXmO^_T1T$sQd%>T6l=i{E{rrmzP-{fUp)3O?=J&H2#pu}2=Q6DN<>3UX_I z>x8nqwxq8Hbz6Yc15a$%4m5V6MPFY2@hEIRLFVF^#tVCO`SR8J`Cs_edhdGE0q4&i ztIvJrBlU-V^7q#s^m+HKx3kFL$nmMPO&?j^p{(p+`-uw|lsH)J_n)BP6RmpZ;rAWnG_4$~xE z$L^Q&E!)9w%aL|YUP;%}r%%?YQ>SWaaWM;{1_t}mj*;Q9nwgoYrR9bC&bOYg%U7@D z6J&!!P5?DIYP9pTfcD%5#+_su&cLK6ne8E0SFT*GS6+XYcl*2>jXu6{bFS9WHIvwm z_HT}l**|+Ci#we>X5EEV!ZI`hZZr0SjZHh9oXlbZbeg`XJM|_<2Wo0!pzgo#RGqo! zcs=>V12r`?Uf+H7CHk2g^_AcA)jDzhSZ!}?W`a~(0{{be8~5-hMn$Ez2sfSaj|A-t z^<5|AJv6v0#a*G%viY$cdjVV0-QvruJ2gmO`?vn5|E>PSpZbq$5}WGuBag$ogSCx4 z<`w>8*|mO;mHZW18(~rD^xax6?-j5WTQ{r z;jH|&w#W~(YhN2({Q;h{nA`<`9oq|5=^sE2-2sjrI5s_8>zlXg_x;rGt0#W&Q+0Uj z{q$Yh->?wq(6?&ikuk&wv9)8s=*JVUj!EF!wY{RaOfMgMMbga=tx7Vrm!!v*x}VR4^prZq zc8CY%AEE#&0D;SX@FFhyMqTXM51;RY|DB{2=JaFecH1_HezooGlQHT!xe5T5Kxw}Y zhi4pbx34g+={WVDV`mW9*1^xdYmbf(k|wrFD_gvDPld|Z8+L+v!2ZVafCPM)7geXD4x$;RNUu{YISml(p~x5w_2J+Z&%+J&wPh#rbr2!nd2bfTlO4jT;rjg>qqj zC?5SbjDFLv866&ptuTfj)SoktZC00hm505GDf6pKn_g}wL5Ne~+=kziUIbCDeT+fr z6ISO0oP*do+^#`jwmooec$uyhTmTO`!m$;{bKLpu`$(_l0YUm{(}JV)<2*Z#tPi4I z0`f*z(hHOM)vM+WT@`C|i_gkTcMRG*c=3q3b~u>SKUU9}9^P|9MVDWUi<(JU!m6%m z^%;~eZ&lC6XS_<-9rH*Uc~_a0pR5mE0Z+Q2Dg1Tl(yZ1$+194dnm?kce{h&8pK&nMh26S%DROT*(}xGS znkM>8T*=S$&z=J7se&Gpujv>30*|~C|A60KbQ$ITw%{@U3H$IqIt`^x#ce@%CXN)u zZr#ch{O+clfstWKM;^)JcliBmi#k^x@`S!#f8f&eo_q(Zl5Pn8QD0F8b*R&oiQazl z+Nbi&Y+dM$I-F7$*EI%UUBzO0|pIh;BZ~HYq+F#RC2oiaaWzfK zpj>$&#dPTFb2hVu$B2dZ%twaso_e-hzv3j{+Bvp4C(?k#oZMvk#8FMB-X`nnigs-i z#lq<`QLVMd>Q0$b1)_%*#lQ0ufmhJM#y}pk$?2-Qn8JCUd*4Z`pC!NTt%-i$rbrrYf~800F>1gM3EukL%nkk;LIP2K||LHea1N1oHa zfv(tHv^MnZ{DKhLq&#-{NZr=v0@{3l8xy=XduAWI)3F#ocjI_W8`l!^tvu*XJuYrE z2xHJ_=@Iy@-Cpe1W6#D&z99cKpOHz7)-C^ z@V-|gUdHJVHVEQgrcF8^ZYwNs=XsY&9SKAgDm{l#ABoo|1n*}&W-0yyd2AIbYJ zPC{8am)ipfowhdy0~g??!P(v^ENLrvw{9S8AloK!DonPWTJDrZUBo9{XFw(zg1Fq8 z4#Qm7ZwKG2go0o4QhvK{xJ(|?Td&X5ogJ>G2HxyMQOHSevIsd07=w*GelN?N1P%<( z96MRlGiU10{JFoE{0CV8fa42ooZxpAnlLCdmKWxyqZ=9=qu{~1e8pW*+x4IQ=YO^S z`d|Oc)jxQoE}Xw71M8-F;>kyNK39)C`e1$L^B)av+}W}?e=X%`h}txrataLm25I2n zkvxqELaIGbMu!(I*>E9^TH2Q9b}&ss_bR3M+btV|s)f!q<^9eOJ{6Ch^4g?^;ZA<> zZ~ejdZP=*Us?2YlJUEcn+t(Dnt6N>o>|l%oIY*ATW82+a4*0D`Cghu}a8!Q4O^zdV}AsN2R49BMx2Zy}C9ZIn!Cx5E$(22-f&M>xr=6Qhv*9A652NIG_E7t(ENLYxlDI4(MQaahhFyw>)>NHgSU0aXK)rfOT?w ze6$^fJ$>wS_|nNn2eszsE>rbx`OVhR$*CHi!eJj6tMS>%`pp+#u0Q$b{!(39ny)3~ zT}O5g15e{Ix|Xtgp!s3{NG4>)kpE%cEiGKHjav(<8q#gX8O*U=@a@Ut$Li5XAFi|a zoQXlPx4&ImJDat#z7jk*QJy{szLI7Rg8Hm%fhrK}pfUxc8yel(d&6zH~{)h4-gP|Y&q;OzFTaL7A>Dx7fe}vx-LzfSH&cKViQ@HQK zBVTy6Nb?$e)?roWDeI|^;Hv#?4Ck%2otm4QtF49Q`ogoHuA>uEHGh4s5S}_dG+HZj zi}fC~5|^h&r)p|yqQ;?x?RL^;hkAy_a0zjFV_x{nRnWnU(26gF$o|$qkwn0?QG|_V?f^4*^jTIY zlSjUWunl*RA@W5jDThqWULbvtL@bPCx`uk_!Zl^{q*LuAhsVch3DhnQF+AFH=&JMe z+Sy#m4vLA%kvekhX!P!kznK~IYumvae$r6%vx~=YOVlYPH^a<=5enIrOFY*#@^7osGZcPkG7TZT<|R)Dt4QV~2=m`LE+> z^g@L2v_F(^R)0%AQR{oJrU!}McY}k{YNeEcZW9K1)!0@iF#1l~CCJG3ZLNR11D63A zLvCFRWPiE?4i0u4{GLK54b_DQpRSpEpQ*aBf)hwS>g$EilpOmV`#qFJ9j>ouVmRL} zrX7c#7O*XCxd%A)m;<7H^ndnW^Q*`T&gM5?ex?4#|NCbOB~~+M@4>Dasy8n!RG$mn z1|~^AQnwc8>)qF1L~r%h1NR-PYwy2a6XQEI)W2GP_TT-Z^`pP{`|6FCeyjTS*JEdA z*EBq7yVAlg@&H=+Y#POo+~DXv}+MM;`A!H{9{-+8qj$@e-jE zd~D7bq1$;8y80psI-zS@I%y`|x@o+2by!whBUr|@UrVRGybEk6&z!G&?z_KEJ^Wbp zP9Fgl%EW$g^yOe1$5hA7fdhG*JbOD^*%e|xA%3zL2U!fyI2uRLA2Dd>I6lyG=%DU4 zKXv79zRTIOhb(}<{jHTc+**vi;3R`5Y3$HvxSL>ftWEs)Ps~79!V-!TKM#x=bo?a)fM<>20bty9qulD`N6WC{CDtH8MaTzXV(U?&#=8*Ro;$d zh1Y?}0qAyY-a#h1hlVHX#*Lf&t|1pA0O^iWo$PJk+O3yge!Xtq+OD7aLtm}4*xU}b z&EJ@heWpEDJ=6={{IpS{i*}3jXm*P5boGVt;w(A?8$~B~%Wqf{=ira_+e@#!StpJj zsi&WLq81jG0*iSqtBYIhjqYhYPRy9!0rv2O1Cpo|@sVeE&ivxWGO~aWZgLR8?`fYq zcTfFG|Jom^9dP*8%P(a?-0_n~VjoDSwi~xO=GO1TV!<}-+HXAf*6A~6;nB@{=h6+zI;;_Nh2svG z)z}7Af8o#kvHHU2K3^}q_#D233!Bl~odIWht+aV}XCeNHxHP`sT|Kk~c1i%v;kBG} z)!_y4rW~s4EW<$*aW3?S2M%OQGwlMUyGce@MAJ*w6T;bGRmb^7?peA9XU=2D$Kd7?h}fhTGO`J^byZhAxAE`3{nUqXK6N!ibWU;6aLtia+2a;lko_U7Nd6bI3OaV?FxV!}S=xys|y4 zo~15%u3Km8PsF$5J&lHbQXn>O2JqV8x>sc(yL-)>zpacWBgALCB+`&NVu#sB#b?<{ z`RC7{kNvjjCwjR&o3_3bI-q0JLnSak+pU4G7oda?TWnYJ25<58#{e9GIgx}mw)oh z^#^|HPu39j^RaUe)WnI?wMIX;x6=kkd(d^go2xYd|E}J+Q5TM%u7CAMew@A>`B}pr z-=uHda{R(0uQ5vX<&f9(`-hFT;UyIma66gQT>xO;Wjgh)w6o3fi)vx_06%GO1G}KF zR@WBl`+oSd^@)G^t97_|IeKPuYYiGV{yIb-8gA=v@u^;OG$w^M{6rUtQeaHz;?)8> zC=A~7zUe7LED6%4u8z)gVL|*^k~AF$KclZ5lkwRT)?8?jX2VPJST;vM_qA0g=O`Fu z=NOA)PQu)eKO&!o2k2|F0gHS2(!O1G5nt;Y&980?e&9zJ)>7vna-Fdr>QrZ>4@4Jj zY_vt2o*?9BYIgV9IE|Y_Y(sxvryrM0RFdOysmleOX>%s_?OWS|fjSrm*S>H5o-%A- zDh-uMZ9B`CSIwi(A{_SV3-h1mJOBR9l8aU2aiijy7s<~4Wv9yHk#KL-7*=rY>q7)o@na@Oh7 zGG;+~$1KIapGL?ht-y`(;dOKwlq3EC9o6gx16@qGQhdp7E%vtsUv`4**X`4dwhO%7 z4U#!+l(#5MTzj0j_KbYw0s#GS+oGOUki1tHimyTJXHVZedE{6&X-v|eOgM%#IUW2B zW1Ec(cNfMxnMmE-cYn&U?X_Q&?zXYXYt!onFY=y(=Hd#2G_fqxZEvsF26~n3fe|Yr z@wRvm|3M$nI7$v*Nqfgveb)YykGB0v6|`~u*LwX7t4KQN;M~5&^x{7DL}XHWHrv!O z6|?w9ePEp-0T0&Qc;5P&E{bnTdh6F_aBRYq`huqSy{9=jIF?bAPX4qjI=sQ9zm)Os z4z60e{gGAvk#-`A(i^;uM?HaAd<#*6i}1aYKdDhNrZ+s&8m#gJ!S2IW9xRob7<`1q1bKhWUlRCcM;cZ|SM+Y4iKkCdFB)$h+QRlT zF6B@+_76?g(D-Z}h!^4lgMI__JJ@Y)cL8TFeR0DXJVX`nV67+(IfCFbVIx2C;OR%TeNt$j*Ji1@hRZKhu?7lJ8`F{M(SfvJW!v&f1Dh_ zZ(GB!TAjzwU#S6PdJucdlP&rN+hULX1Nb30m2QpaghM^i0z4f-)g#lzn+#ygQz0oOCfy@RbER@{RbrQy!uac!=>2mje{W+!jUPmq`BJB6VWk-shgYjLf8mX{g@RNMh) z=2Cg=j=M&;A&=1~(6sr8w%0rzILX--dZW)=B&AM*cbqG7$G=y{G(7oYjq5Vkxf=_0 zb?$mT`+Mf}sXBW6SmZ4IN5%}Az2q-)sGiwJ22!ncw$R{j?e?AR>yq&{ImM#%gRa^d zF3vNLYBctS@OQ>X!%NCI<;W+Q@1<#b@Qxc1Tma*}y301|FGr8jcjdDfe{gHT< zZ?yB#6Y`Y4TW~~M5+rcw`n{pm-FYCSO@}?-y8u9=)fH^5@jL14qcd!kKM2xp8?m!` zUPhz7ruMi3R2Rwqx`NagzFp%$^I#{g!)=Ju-o>s=d{-kZCygiQpeQghQsA&715P`u zq>HhKV3XeO7-;~=Og7$hX}I@6fP>&%jfdr?PZTVokCV?)je4BVzYR3r_&H;9(@zQNH}v=hWx zLs3H4VK5|blR)Zd?cyQvZ3jeyFOBj_e50qo78IB$5y}xCmZ8yPKcF$|Ace;eYp{-v zpRAwym;P9NV+n}SbzW9|d z)#Xd?*3lyuaEfPYWX$hdjn&i7JXH@p^guoQ;C=P6PkpdfR+nmRW3`rV-IA~|5QQ&} zeFtB60(h5Bv{}RX?gaqypvv=i4W>!xUK?%y|1ALc?d7$}tK76pM}dbALSysEBaMFy z-8!UG9Bj&vZ{;YDV3qfEKHNnz=`q~X(>22LF2=1ZtTj3Ey>FCETiAorP|hh~pnoie z(DKS+8=$~Rb|O`J>jZTbi~Q_hlCW8qyz6(rR#tDdHqybHe{^W1$qUZCyRIGd@msyZ z?nIvp52QyH01%wex5L?9+sXofZ4Ba_4IIT?KjqMjg&`+?F;3iZJTWyM_?%GE+37hP zr4)I(Cr4rG)GC60KT#OXufZ}oH5;CJ?D1!+e{=$#+^Bcoe7|n3FJ!UF#W&urzxbd1 zrF!Abw*pt_AgWQ^nXnS~{)m%bp;j!w{k^tZM{ck#;gUugE{*RRom(EHszYw&S{h3rOBMs&W@4svsKCA|i%&Q&^<7=P3S4z0s`Ry4s$+G6_?Dm53whR= z-f#Uz%2v;E6WZ_&?VfbvDaHc$6>p|D-rz)2>(82FDiPBkaFrBU@keO#Eb&`E7w3|4 zUBF?PT{$yJ@&QM;KGSt|fpOhn9w%zj4_Oa#rd}`)|C7g6xUS0f+FcFZ^6k~~;sUyR z3t8B&YnQIn(~mt-$B!JXUF7;;b+ZoEHfwYb+-&UB+wZ)V)|H-rASNVGjS(Vmn z=x)F3+NZðuOr2YFo^=&KhmT|~y{8*nB@j-9G=4?bDLM@|&dSsU9P{VPs@-8N|W z$>)@59YC6Ps_AkC1zkLJ^=D5e=%EQTlOOZlHuxX+skNV<`3z$2z=>&+7}__);aG}F zS{{`jk^~kfv}3>llkLrS>vX6x>F9ztziYURt{p`04%d<6M{DNDY>rl)ot=e`JxK-K zp|R!y0BGG^05CKR57PGz4*1EHk@DM)eSKp%L`Unri`VLBe)i|;)QJ=I$!9+f%{_wF z@9y>0`Fl>(6 zTUzItUt63BU%*RUzB4qQy~AG#5JKFz8oc2M{n6pa@B#SA=a9hRodp196ZaiNgL%`Y z9E#T&&S3QBeIYxIlSi?ipIEusd$Wx!F zv6Bzi!Hvb*TcJz`FZ<}1cALz&dl!XNhwH1gx79t}hMG09=?lXj)RRd!X-%J{ZEy*F z)-%>$fB)xyq5jb?{Ii-kd2b!P?}55LzgAan;<%2@(D1RWvcG)!VqL%T4tC2x-Ft4j zF24Ip&5Z5W#Kd;}-~YnDTc7*%2eSa6XLmjP=P|W3nqLPWxB0utv#_Y6Gf0gNbjOS^ zx`VnURHuOR;IrusgShg!mCKKE6Bis3-_a*6Ie$9k+!m5<5DqRKQEKIQRmktb)?)0Oy{*j*2sl77 zJTg(EQ%BP0^o)4a?PN|j=|i4fgsPM54vf|&cw+|#ReJ{6kv7)vv6fk+;SM8s-`+<% z4QS$LYU0i|7Z;!Jzm#df3{_eNF@mf9m`6uejU;ZNU&|CBK^XXe%0N@EY&2MV-bs#cg&$Zjv+|3{2#%E>D zfx4{?Z5)+D>#tw-+v&%S)RRxT0AMlgqr~`QT|RE|N*;oA4&6I-L_g$BhmW9-SKDtm zK&NbObSClXCqDS057&?V{vV+}(Cyz^e&ytNE`*j(H22KD7Ab!;Z{hF=Oji0s5h z>d@|X-Td^8lvRT&q2aS(xakphEjn>yndE`T)RoHk2z|cw3^}nxoets-k!I}3c+Jin zMP?4_ThF}-Md1y~%cpASAe_i_aPvbSe5(HKf9H?Wo{Q1b!Xy2qwO5~MG0!}K&-luD zC!Iky^abKh|J35;A@iwEqwC}^-uYTx=6Hl-2l9z+mqwW!Fgx+4*VyjWUw09}KC()8 zq72Z~=$}JurXxqEvNQRY|M{=igAYGYpZnrx>e^*&VdVDJH(ssBA9$$FoH+)F^n<`6 zjrZsSGNFO|N}H{{HZWvc{hnzX1aE^u+q`q}gaeANzV=4_!~gwTkq(0=0DSdJpR6x@ z=`;9Kd$o9TEzML63)SJJ-<=wY;=~uGozY<3yPQ>rMX9r`@4!u#E0izVcD~x@#AAPXOrc8F5&Oa+HM*-3f9^=p6jD`{=F}`o!IQPT+ZxnMdJXym+~Ouv(7g$LAHl z=;|z5z=n<;kcmORQ{PwPIZC`QM?1S%)nkVJ^w7lUK;3)(Tz&b6zFd!^_h7v86H!}P z;30#<)8Hp^Or3XRrs3~*1Z3o}8aypO^g?Ft4!<*Bqfg34PW&X`<_`FsJYK)>8_(Cj z{wM!*U0dF%nbQ~Q+(VDn{K`gctFO@&eeh4uCUzG6`NrI>IypXFKluZ{r;bkJv*53| zkl7unj{Eg^QdK|g0dG#;If(2h64k9aUcA~V9S6JG=uOV|;FZuDUh=BVsczcu-GgRp z*dmDU-da8N@ki>1{>V?@lWumpl*oG8y)4lmQIKizVJj+nb#k2*u=KQ&s(V1l|Ncj`@G-U+(l>5_pi5cAd585 z(7<%Aj&sPf`;NPydyH%G4PN^&dkftkB26jNwg)aA6i7eHNCcjj#nN-*7T&GM{vAuFjs$LUPap_eT%lE?MD9N`vH$OkK+*1(0AHM+Q`w7)VIBbijUw%yN9w;f8hwG7bbT(VTf_oFOJZ{S(DBm?$gbJlbK_Z9a5h z!ymYf%_2Vi+i0kM7Fd00)9q%Wy)9E3~`i6b%PW><8b|anshCC^+ru}48 zuZAB)K&PK|;kC_79HHT=rL%p~&1+z4S&}{k1IInPtxkP|Mr;6!;&4)b2)^X%g!usd zBr@L9KVFCUJ$-|t)x&Q9pSZ_UH?aTuknMhC+)uiAdO<(7-~cv^C%HO5WEdci8*Vq1 zZ}@0%XuQ#mD#Oq4xhFS!)z%yy!S<$UVVJG>4dw>Gqh*gX4(7j zuH4T<%WuzSX#Od!M_wAf%tq&WS9t*oro;_`aUe&)=XY{s&$QkLlG0)OUef_ETAeh&}f?@s#yW9~## zYlR^v6#`CcLnj`%y7=lYHdIzE{|eIg1Rd22)~k}qC$%Zh^iVc+s*mmtck74WcKuf02Om53({u=DoZPh`TBY); zq&CiP00nWmwnq6=Mx98}FlpaSZiYp7pxB11)LkJcN&Tjh=~I5^J(te24TCYvFMg~@ zCNZ7eByl2ErEeX{7Fb$?HGUdlt-lGC_4;kWHV|#0WD*f+StlqgL^fEuMCu4YkjXt| zIbm%3g(k-qkcz4`?nKRkAn>b{?)&b}*)WGqAxzK|Dic zgoK#DasAr$x^{h`{>%UJFOkkLRL72;EWbH6Ha>=MpR5P(yT9(c|3Xb4oy5Rjtetg_ zY1;~Y((WLs(aPh~aw@oKzxJHh{Jb%=h@eVE=X2h9l^)PS zg-C)%8B=${-8ZI9UDT0yc}4hnrq|0WV3p3f&EUTHw^UNYODc^aC&VdW7$bCQY8nS* zuy(dLP#_pDIKK{7IUy_`YUuUbfk#eI9>Bl71MM*IHR9zW@h8T1;$UDX@pJNDCEVS4 z<^AR+jx`;7KlP{iFop`(0c;&K2l&LLJJNy^dCz8BKKEYpjeoM*7r1Ix{+Bz1BMy~{ zO*lmc@0=jKVwtz=B7{C+ZWQ_;`Kx`+r}ZID2n&!|JVt zTHaiv1U-ElG}VZVp{iz3koa%$;(mWyXVH!+h4a%mZ3Bz_7(c_{*>A-8y>W4u zE16{@^@nYXqY!A?bC5vV7UL~-8|6FdPZOYi4+Vmt1g zRXS+odVc;|z4GcS^$WlFEA{p7K3A7C$Xyj3D1&<-E+^G`sjzBsSEet3;$2lF1){u;qCx{tD$qO8wAjSjs7O= zAL2;U7V+2?c?3E9Z+t2L`fTT&PF&n}(sx7XPdf9oeh>amoIetD(#q4)+sAGhmKP&K zdYH7Oq+aQ640KVxH8S&jCv=~?vC+en(DL%1!cuDcpZe@~q_aPFfUR$=)&d68_3PJa zVeTeQd2ii+&jm6Z)av|FEnd4-gUD|`4&KFg-U9laIyE{|(>U`RYshzjUy)3=MJIJw%+uR}EcYqG=DUX!)=uCGmxM)eboqX-$p0;-EQdaQGO~`TB1DYz@{I8Hul45v$m3aseV%?w7God zzQs45O+0eswQm62AX2#}jcse`r4{kr{2lx@Ir6^oEENaOi37>J?eI6a=CAfeQbbo8 z!-amZk&ZHUaX{eFgMBBVM(Izm56(Y~Q#w)mtDBft!sMVGJcxcvF6~^~>Vz;3As|sc zN*5f54=FtVK$!a1ep0)A9=_K(|D|91jk+>-BcD1No;X(5=T>TivIhqzYp>t$lJ?fM zYd30peW}JK)l*fAH?P;zVY7Me|;)<*GSlZ3S6aWzU zwU6Z470{85?h%~byk_v$GIJ$;n%!t%TfFu9187DSeL4XD_tx;kQSJdx#w+--(ePYz)5- z=)|c5ECWMr=f77!;}Ks0eAyOxJKww4PU+CFyLhNAuuxanhfukNlf=n27j9*8%Kx?l zow>QTUboPhQxj9z(aUvmcD5$Zo}oWps+Hv>N~R8Aa!_0v`d#mR2T#GtF7}UuJ*MeZ zpCTK==(k!O$cp|09(BK`fb0QRzEclUcEE=xB!phlO54R-+B8g?uUx%STj-q6e){9p z2Oie^6oyVX@}kYAq$1gAyM0i&MO%XR2FtZ>!`*z*zA=tXifv<@bnuw;k>R1}z4i4q z(sy`|kbBBEZL>vjhLLS|?|E94PO!Y!cKm4Tc4Ua4-tk%e=&Jf%WP5|_#VZ%Ikb*9$Ylxx7yXMn(OLWcjaUd zM0Ox){A33u2oC6H(2=%>t#~IY{l2i=Xx(H@yPIC@UbP8bOlE%q9)^dpi|HTecg82D zYG(Fm7E~x9ppXmwZ(3zJN;Di4(&3IVukcVs`T0 z#%G6a4lIiwCk_*bPpS@9j?ACPy}JbpudV5qMyV;f6* z{nd%d>3lQVBj6u@@)3%1k-&*X9WX%9Zz-SH%l zy}EeudcA#V4w#}&Y683b@kcJyv6IK*+j)wBgQ@xko_x{uJ7hTJq&qI-WQgB%4&Nb@ zCUfVcxOehKc~EEBkNU}q)s@xgK0g_k1v26foU}e390f0)1h$&8?Yn&Tl$8Z|^VY&z z4Ua18Gj;UHk;t@gDI=zpE|D2!iO5VchVHh57Y0A!r!A%*L!6(mGm|>j6U&?=X!>8C zY5E~F?8uV4&7w>37wjWE6+|Ce@P*ldTkQq;AY)3vaOKLCx_J4$nwXeqzG$ah^T;ZG zd$#K*8=Txxj!mmCES$pg-sQ`+u!x@uNa96$uA^u7!MTNLKRULh9QgByUj1t)*K(9Q zacK{241Q^^FjzGZQS%|Z1@7^6tG1BE{(A7C3-#!ukJiMoqx3O;9uNG5ALLze;jMWa z4HcslCC+|(FkW3@rfve_@~kc~E@>zSSW_NRO-8A~aUbJs3wF~6eDUHdZ`GUcT*BMv z!}mLbuK;b}d-c-+vOy!OPdPxx4^qdmnVC9;4T1W}r~A-6z^v?oGuyzw__x5>pYcfK z6`ZAS)_z3Zo82J4D?JU@!rIaRu+L7$@qQnBV{oLep8nt?Rl}5z;@YP#kKTk1a3*ti91vq@HJV>v%UrI^9d|%DofX_NDG#A3}#`qMNuG z`mfoU$vSrQNS!`;qAuKfuFjr0l|@3~e^(ZJBHZRGI&lfx|1v(oHEfw1b@}Qwp4$Q_ zZ8SGV?4W0C9}T`1*RXxpYjt_E)>n7%vB5byET?`zyIuOsedy<8qWV|c z$!eim7OeXjv>s@m9uL-W@ft3vU%I6)?x|tu?HJSeX!{1dC%sI$gLZbZj^ZmGo$=Jr z(K>x>x*mJrOnvb2`|6`lKU$yu=u`EXPd!tg{p5%0i_dXgt1o}yGxf#Kf2KbF z?5FD4Pko}E{q!g5lb`w+zmM0)Kk>2p=tn+;P4!eg`Q&5hn+NLt`|qpsXV2ErV@IfC zswSt#(P91IYYqPJ(=aYx1t+vmdp-*r>`%ebe#bW2;p#Y~KlPk_Ab&_N&(MC}+i_$^ zDLBw3MsaXYIhMEMX{Mf*fDHp)kbL4?RN|+?kQ8Ls-SjSi*KZJ~jt`hVJ5kj;+?xie z=)5Bz`Xn1lkV`+8$$#L`PU2Znv|r#}HIS1U)X~KGDKOp80f5(*GH%dk5(=Aj^rI)+ z<_4aDK{~c~!b)QFlll71<9Ks^luf#R;I{`7<{n^oA(Zvl_OuhsINvO;1iAgV*XFT5 zkcZSgDNp!Y8yxp&SJR-2f`8)7ucG1}P0ABBP`5A|CtkAnlsfFQVq;K{HcgHWhL4N%dZR75OHzyDqOw>hv{uAHUZ{MO?(>`^~MxGypC`0&;exu*b zO~+&^;9>3ZPY z@j8EUy3QS+;(D}>Pa#9__T8)>``T&DGWN6Rh&Fa(#TvZyP^zYZY?=n{;EL4pwf~!O~pqFWsy??swM~YVYRt z+F7`PUwX5)ROMd(z8FOBzf|cpRgGonW}O0a^K)266|M3=_^R*C;GPG$?G>3 z>)Mi^?j6pjImf1FYY)H24F#r){TLoso|^1v4`+Os+k6UzIQvYu)o5Frw8>^GA+u!r-yld(sv|>AH2qtT#bOZP15l+j$3y$u6 zv&Y&s<9e|f+WWktE|a$8DX0@~+?{g1r##ZACM1pe!l<_EpOAFgxY}7tdebq{D=rX# zCu0o-UO}Ti6%)wL7)q)()}kbmI#e1p+{c#Un#DNqzc#_@8hz)=GB)jIO-xVM)btGf z`6zygW3JR5#oXZ5t`_&w&Yqy#)?7RH2qP(d4~+gzoA^anb+053Z0a}j30w0O&DBck z*t){s!JBPrr+3gcVNhRLT=+@6p%cKEV+Pu;j&Jx0JW{0$?PnM3wT)%g(aQE;=c)bU zv0JC1OFx70@cPxBKl~$4e3R#o=(xEOPR7k(Ew z))8c*_H=q7zUAp$>Nr}E-Cq!$LTNMQxR6H}9AJ@s?2+TIwcb$4bMIP!q5bw!JrEzAYHryN8oDjb!=tq4JVlac=xcsHkQ{g z0>4my>`(nSbS~}=!U!8^-|2H;)sT*xpx~sQC|dB1Lj%d`#phqBpa0)~t^WN#|8JA3 zzs5$7*Aq`XQT>CXb>hUadi;rp>m$#6s2+Rju^O4|uWOgy%cP&QQL#4%hWbEYcg2qE zAqHHjEq#MK5#j&>@xBiV9FXte!f^Z8k}me7aZu5&=~P?cElAJ1@;7d2xxXh}Cv@>{ zX!JE@!%GXT-Y)G8;?fnBu3+PD92RAFW_rA4W~PbXK@qORSZI7nn@MYMb$pH?qg^hV z*~P(jm1A2#gIu6&GKtS~c8=-X5<(~5_YdM&(kKU6J%$k^W8e&6plqSA4)R@DcP>Z= zCt$P63EA6|58(Oeh)!EHw6i;zGEBd-jbTs6?>FKt%XaGYsYe`$ReW_Ghetj3u&=gA zdw4KJI$^`e8gh^jXM*xv^?vT$xq9Y9AFi1bXH(z%Z@*IuD>z!K8?|`-di~YE`d`&Q z|BYYCJA+V(^&$9P1*{DE6aLfzFF_gFKs>*Y2tE)uqz12+piFk&5ocSaad<8jm{R>{ z8J2la!?e#nN?l}dg`}r6h_h|zfq$L!Qy8oc$on~trqB3Mk5~EJfk+Kw;fcIaH|&{k zH$PdwLB0-M!F}`qos9>fO$`jyeY4Z`WgNM)vqx$dM}Bf-EIW8~(kC&P$8gwP*}t)d zQHnw8ZbE6cy9I{qKj0w^Tt}l^Wv-!$D_hI!OZEQETlMCRxq9Q)a=ojbt+5;<--F!l zlWJmewoaTnSyM-j))uhMk$xSASX08mV~=ne=^sWHOxME~PSvs5*_y!Uc4eSE*^W4F z-@)7K-3>nn7s@7a?j{juuKe%n%kX6fM`A}jDy~lWX&^>Ff#YWEbm&TYznN^Xq^8pw zPH*cS>ytjRdstz4s|O*g;12S`Gd za!s36@YH?l)-4PQ2b;HQZti*^z3{b&GAtj$|JKm zedXA(6Lr*yO+QUBJW56fx}?1;s-5I_fC)!*uzdo+ous4V8mnXIwqu#h`ul(Xf5WI< zsi!{tfx7U}eHn0{811WB()rnezxOx(R{a3;~M2Q@M{0l>R8J9b#B%UkvT`8$8U zp85DA^{rq3N2I+<+w5=A@t!uZ=RhX8K!G^UL`NqyzEvMLyxskCLL+!m27J!Ixbe*< z@LgLd)4@O2JNNSG9l7cLvfF3B-A7)unF!5>LVg7e8ksjXL#MB7y9}uNpdA66Q#@+0 zCdZD~aQ{p_^vtvPeUr6!ZGn!;{!aYU=d}*IO+*t8aZ&*yw8wZ!AKA0VJFacfj(Q(` z3PzRo=Sc-6#-dhMO7wSnz3I(4iL`zK+|-FoGfS88u_ zu1+4GtdXJJT3NhY$EJGf!iAaoPyhVCQ}>>osdwIZ9)0UBPU{x$tz8Yc24jdNBslo? zkJ)iRdOuANj^FK^06-v`vKnuj=6kO4A9yxTiy*IU?DP|N(o2I*8q*W+yYBnjf&95x zXUc6s+I4@?OC<2UeYn@y7U=KE8FcpS@tQnx99bBuUEXPz_YDnVLmaIU{N{%JyL^X*fx`sw5h>^!19Ma#(_)e2c5FWM7=By zoOs)5-y9rJzW{SP!fq2<`dwO_*ERI=`dV#ot=G*PH|x@+OR;gbN%P=CkJiV({FPd{ zwOBVUU&4~p2XnWr1JBqxtA3LoyNh_sa)4hw@1$gG4j{^@O4w>n0Bh&j2YB?ZblD}& z#kVe|a3y;8y*phUgjElXpv!;lH@;EVZmrgT`se;sj(B|g^*8fLCFSt|e^BYW&9?AV zu3XwqtVaJX|8$?_jU3epxm?sKZL}9(eD#ew2JJua!G~*UaV_n!e6LERv~|+T{L)>% zatFNw`V^5qt(EVIRK7Q@6B(X-u!)_j4YF2C>(G7Upsvn&@o8VX0z!yLD zy+odcY>%)?@l^u*X#|dc=0aGdBO|P|ZwE zs3!FQV<&Hn`9^hpjGjDk?)0(x$=~~IJ@UkZ*w5slpHtr{8~HSk|A3KV;$P^y zivxM0@s(|FcBhLpM<+%Dwm&m!|3f?_xdvReQ z6L(I2Ew69Y`UuSyt?E=sqwAl_}Q~aFlI4 z(!5W%MUa23Gq!N2Eseg0R$j`r|0ADA`-h|L7f9wJl{47FzS~$|j*Qqp4bUf8j=QK| zeD1sT_FHe&sWT^QWVoGvX7IiI(msWtpA2q!Hox!e`@DBRPd)JK-~M)8x_liyGZ4M& z=`ZWhs_7q39nhxn#Jh$Q^V|EK!v|NVcDqd!y6Jo}|u-O^uK%%X_C zZTJ^C7^2hIU0A9KY|szf_dtE^bKggN)y-?S=;zcA;Kd!4)FuAq`@^0sa8F-=jQQz| z?zj3q0@k)0lvP9bS$!D0(?|TsT_;ZZwrcwLX#M2B_M10Xc$!oZ?0Kj<4>oXmE!E4jevXm_^r(Aayn^(J$ zUnV)Q`CLS_zbA)NhvOO)>+#Z-;o@yid{DP$@t`!}XFnp1>scmo#g z2Ghvro=9TwUVB_7a(CM3=s0PqkCc1xr3;tTpV>S@o}Jw_;ysO_<=xz%FU2mfP5`_> zL%(5My0ucvD@(PoGFP{5F4V%}Tqer5=qKFdv9Y4A)Glnaqm^xp1Rls~jI>9&Q9kmC z1Nc}B#}4g`4a*~@Gf4XZ`hdthKNsMb*NHhN9F~_BYi)T2Slh1h?h)qtDNbn71$4@? zp8yBKK^eA95JjDSlPo4(J>olNB*7DK+>oINxd6d2#%#(Xc#W;ziPPqAXZ{fSP?&r1 zZ`2o_%-GdeQvbvGGGJtvJ_ zG-)~dlTI+Vi4Zjtw1Q7EX(|iytUd3$F8{`tBiKG+w4cf;Hk}2&^0lz!qyYI5+sgQ~ z72J{~G8Wy&RsW%lMWW}T=;YtleuI8z(;Lz{L=7(5$+~TvE_l;tjq+@88PaCtKW&3K zTKVuu`Um)l_tquY7SU{v);{3~tlHZh+9{KU)CXji(M?b5a^)s*zB65y_r<5-5PzIp zdA2?3cSCnBK_A`pp>qIG-!%Lsh_v(%O~=@8_)JS$ddlgJF_>i^{z*G+2k%;$(y_tU zaAW!Qb4quUB^%;(|6n8Un~g?XXFNe%3X0_hj*R>3J5Ya6K^gF(n+IIfXnwM_F=^V? z`qoB2;RDJB2Rf~d^9nDrVUHR31}F#G^Uj5Q=JS(I+Ejk-#Aj`Xt?0Twd@n!2vRP{` z&T^5{rhT@HlHp}aRo)C@RsPSW1z@rbU?wnSE{PQSt?G8+&Ej6tv^KYEWKP}zHcu;~ zGYO8`I~Tf>jLGpz`_U6m+$1#M#4LSJJ}WRdiqb(hAV>BgE+n;CgPtm28>v_OTAdIYq0{%! z8zUowH9h4g1cqxJyJ-mY95^1Zv4qct46Q8G;nrHsj1JZ_k33YL{_tbyHhdoai@miP zIoPT3{nZ-WTLEW|Fo6G#ZW0qNnifBf?Pc6p`v+VEucYvr1xcNByvbFHh)eO<;=7?E zA1ZgsbgW1F)w06p=FdByP1r4$oLqd>t)oR#SEro2LpM*xg}F72I@_!G^cpBT@_aX( zUb{Ah66Uy6_njemlZO2AqGfd2PnPcu>Tt@Ik6Pf_J~Ab@So1Ymrj6uR|N5@0BNE7S z;N}jiax90R3qxFVxw^EBY^}r(xq5vbpYl3B(RQ6Ub-GTRIa7X0%dw*D!0+Hd(3`HX zXbSC|KrhYGJ`gSc$psx00V6+TEaIm97Jl$dM=_~R>1(`6BfJlO2;Hl3ck5YwnY5dS z_IXSGA(y*;!b+avowDf8e-~YA6Y1wQJa_8A?pGeWbr?d|-zAu@`}>~YCjkD{AAjVV zfc6nvmx+Ih>F&r-X=l|NmD+$_zuiw6T|jKS0~u+U9b_1|5w0PRGPni-q*n=cj{whG z;f)v2f=ag`rpt&9?-M6OkXD1hAdHsLI!s^4NWSU7oeroeZqtcWH`FU@fcp;l~;#(QQ zZl~&s^*i_gep_e#whyLlxG>)LruB}XLRTSm8Q1itLx@strydd}xj|h1b|a!2lAiXZ zjL??)>~uTz5lvCz7MupZI1n0jw9(_ALj&lwzPVF>_W$-5>#?VQ2usaDYBHKR3@b-9 zydZwWO9bKNM_rrids%J#<{NL+PyYD7SZ`l?ophshWb$;~_s9b^bL429J$teqd;F35 z%%?t8=k7UMTPX3HmoL(xdXy7wqeFKI+5k>|YB=m}Z`L*nRfAZ(sK}yVps#~b z!BvM}j8}#02(GWt*@DFkcF;*DEzb^K>wxH_X#B*vL^eE@O}bl#KY2-~-ct%1|AGln zv9p1ZkaJE-&PpdJc@cP{74mlf&-j96`58S9hR|M-RBWKtA2w>B&*?eu`p!d#mP_=IRC= z_YI8RcZquoL$|+Yvi2OzgMJ!#4kC_?i#z^SuY48Zo zio5W=G|s)4bAY#{k>=KyQJT#3+K+%Lk~P$Yd02Z zeG#5pS*sx~QoNt~o<48HC9gxriyt*4)U3deB0CWiZKl631!^Yy}W z&($CO7ylp-Z`Ii7EY6MiaB%`5#ijS)5@AW%gxj>_ z-JRn;2~MhZY)9ivZ(hHxE}ZVvKgbL~@&bAeOR@3!>vtFqHw0AQoK zIB&L~H{tMh&ozT1@NtW4`MPJVIY4UIL+`Y~*4FQj zjg8jyks~!eH4A(z333()e(|zSfX6zNhj!_4fOMP z{lfznFTw}fat7M(F^~3Xo+@BE^Et3%AGuF|d9bn0^L7o`cT$E9G3aC=mH_{i!|66teyfDO$n`ZG|Aesw`f761^bZ5rzr zJ2i_XX@e`*H_8@=Gr^TY)~?E-AdBPOxC+(zuC%PSoSLO^( z)+@6*{?V!OSdN%Ito5yJaJF8Tu3fK7msipstaSR+ZurtiAFrSIk>6jR`s7Dz_Q+K9 z$MV_=wzGYLiwHKLvpXxnlLN^EqiMtT&U$RjuHV_y@1V5&fW3k2>cAVXTnXENgFndM zE;f)fAcFF3qxfpRQ5SCx7N(!%UOXAPG+*CXs*&+A()Z_`i@ZF-S^0CJ$mrNq%HP_e z@4j_|w#?IitdVaegN)i|^1nFR!gp%}*V><0)}DGn8%M{T)NKnOMn}eCQ#i5XVz=d2Ol_FU-;bD>ioU;0RLXy zoV!&Ei#M`k>*$fmEcBhm-u3&S;TPJVy0ZN4QL6_%o^Xc#r~UDl=|>vgU>N!*@o)OB z@1%4eR?!%Z6(fv3a2Xr@f0aJRC)65MbS{BkE z2kzQlUtOuM|H`lCq=}Dx>ZA4gOYhaM|MD-?x$~#%^Pl}lbb@r(K9g43hFe?oPc%Uu z*FVe7d2s5)ii@^|+vBV~();RA2c4}G>Cc=+DPxeH_t&A*9^ zC_njx8n^VLSr{&!f)jm+rbm4aZouEzgnngz768~b<#2s|#oqKJu<#*ptkBL&SCGML3pF`0P)|JhXkEDHLf(%I`Y9NBzuAA9Evj~zJ9pdCkHwoiN+g13ff0`kf0Zx`Kxo-&iH-cxz~+3+VbdL?Kmge z9FuVOYZd^wQy5vXi)#F$-jI%s$GkeJD7sjFP=YnSjvTWB(-Uy{(!L|@u)C(Zp7Nq6Ix?epg%n8*vbQHM!K zpB+a?zt!JPPeWVK*LMcrk)xdrY%_Rm@zy-eoLU9c$z2Mq#LrAlp(pUAC)zji z9RoT#J6p$(A59-?`}HG-hDSTP!Q=W#2T;<*zT5KPvhbPxytXyHLFnN)QraHB!ge@L z0zOQSepO#+&zaW+V=03)+N~*51>HDzg&>vxT6!X+J6~u!EO*&BV*&V zz7HAM$|3u@CXPa_@ z18H0=Cll=LuF_g{Mbit)o8_x3gQtuGw-B6DOeV@KA5^z;8@+g)HgqD|J(0pCM)gaq zyPL-H-IJFEKhaI{25{uQwX3U-)Ym?DH|^OLLw4TV{#YN>!PW6C>C=4LPLRO~)}MBe zk2~6LSsnO^(a-nZF-EVUiFgFA;J1S(c}@Mbv%AhaZB`LO8J_6E)$xGnchYKqTc75k z5TZGG}W-Q2Z(`H`NKA*KM^h;b&AbkDkSoMzj$1yAYM}sHG z$=v#%E?jW|rdFeVw`F=-lzFn~1>V@CkK1sek)OmxSGve)X?c@4d`95Urwafjwww5) zT*M|mt3Go0H~P2P2-amg9XIddQzpvkdR+Skry{e;iyP{+)79JR?8unPE)~Kk@`8&1 zC?}OtcTN*fRy<0*8?PKXuR{GbFj_bbg%MdJM~~LzLjccJHG#dFE_Bg|LoX z`bmfh^qZgNwwyy`#G~DXeRY+70D2083;7fQ$AiJMqYu;~{x(S((Ic@wo&RZMG(}mY1GNK7-$swVV#IzZJjAlQGs-SL@Q% z8+HB0Rs5!&I(7O?9Y1xtCa0&OGn9ur?V^7B8F1h9UZ;L(2UQ${Y;bk@pAMX!u#_wE zoNI?y?Ac@Q@KdJrL$f)(GdXYDySOu?eei~Tnk=JU@SlBUd@FRop8cDPHd(x@@z0V30(&Wgn z4kO3+OmX6Z^fIH@ZrOVfoHSXaVA>7?g^8&v0&iLoXRyvB1jrzv36OEE{MPR7bAF0i zlt+u}R2bTJ16r&i$lXd#E!aSQJzE7P#k)x%)^yM$)dsGg9&Cgx6r=<*Vf)v>CwE-e|f))UC z6lG@tfE}?5I?|Lw4CCOg1Cu++656vNG=v{o@antvoP{BG;oNYWPK57S4A3c~P5ko? z#GCeef@yr8v@N~xgbH`o#II7#58!6wT(A&Wa2U*Q5 zos|XQ%QwkTMwY2jEG!P%`OY0jI&1QKSH2=^G)KqGK|}G{HJrECHo}jVH99dKC&lvG z?wGqf0p+9dwJU%<2F!a6&mNSk2ARf?2Aqb7##?xftA@Pq9Ne}ORrl;dQ@@)wJ~^3{ zUuVyss`Ka1SKsJV-MoG|-*KFqpAXEx{L8;ufAz2Y*R_gq)y3@~c=ecn%ZT2F7db(5TlnsD%JM{mwL=f^?LcK#nv=$zMs_PMO!(wOkJQKRy|*TaAEv!ai?=eN;tu?= z;gQ_?J#=}eYjA5X?ofvVs7lfnqOb5O=#x?@ry1l=$oy<2|8=a z**)BX@8FrO)f$+ZtmCK8WrDx~Y>nm<$Bxwlz;x!ssZ8MQ?krIk&L6zd2jZQi)o5*U zg06-Lw!Jy0>L8oh`UD5!hlZQ3?czs+BmGKbE>YlF-NAbz^1TWoMsIl?>`rwmKhMsemuGGN_1YBOLQ{%Js?u#$hC=S{O z?t7@l=y&JmZbDxt-1=+hV7m?|dti7N9z+k}0L^V;l#TY)m933>Y3^F>4@{!MkJRY# zvvvQ|&!FwcY6E=v{pm1=eX7D^0Oz(t_o!XvoAxgJ3SN3Tcnl9q3pG++%3+PKb^eTT zV!tgSXvUxV(=we1aAIFPYO82xh`Z1@g1$#8i=;mxPH`t!oGXtxhM43>|7gVRuECdc z;mZ-^(#eqN>6tol{8XKQKm9K5@aP1Up<5}^1ugC@b|A{nR}2pLjljJO&iEVX_uGln z`99h=f9tvW`Y-)jJ@Uba>x*Cfd?swBCI)Ma{2Ln!HGlPL{qPsQM5)N%zyv-PGPv(| zWVhZCk1{$A;=7A9|qv?7#cRYG(Yf-h1<9 z;9i9$)+>(0gZ7&Ep}Y=_{B;)qD8Ei-xR595u|K_v+Fa2S;>!^F=GyQs^4jio-a~iW z*N&v@yiU57S3ES%Cs0netZv8?G8h~g)SI24?rEnmHGYv#jLY{x=>y&UpMwc0WO5v?YZ!U zGUTV=k^ah^TpkB{xP=X{%awj!`&U+QnBgJqK2L!GE5KHD;^>Lc=;qCvwM>8P&h6pR@mj{g9;aXUtruRXUwPq1 z{qcYC2kWz+`B;7Xx4u<7!i>(-A+-K{JCQoN{Z0_kk$Z#j-D@|EQPNG(cE_x3_|{7= z=13wi7EB$K=`G#%gLruy9#tf z+wCLW`UV^Xu${!CJcP-g1B(uLX!~W+!;ar9Zr?PPR>GgjPVjackNn(G7Ux|>B+RBY z9vW}&w-W$th&#l$X_&m>L}lEoU%k5krwL4P5>Fbcjq zFn;%hkgCShWt3Atr=TiuVWIVTnp0%gyws=6n$8LGt5>dtwjX-t6E!sG0s(x-AqPzG zEjE{8myJ)g0o5Jo=R`B;S2IJS(N1}d9}auli3aM0j4?PNr5?Dg^T_3RgoP6*I(pU5 zUbKAZr_MX|qYI3}Yn=rKuuy>wF5FYvXhaQa%7G96)mzK(FVYTi;%{SZJ&PQg-9kR-FsHLrZks@ zv+Gx)Q9$?u_~jd~*^w=uP@3)0zIG?kA;3Gqv}$OItl7Z@UP>N&jE{=C{!I*RtSX zcw{WTs(nKib9rh5ZSdE4*K22|QHK{T&pyI>#LJ+YYv4;K5MA^+4BsD_8Lg)tyHID( zo~fDX(Yo(}`)cawGUAtLZE9&tf+OO|K9VlmYvlH!3?7I-p1po(! zo-CMwkpy>w<~#nt-Q*m()5i%TA`8H~d*H&o%^Do@H zyyF5P&F0JFA{`P;Y5>3g7~wzT=x zAMSb;raf%3=;!n)=rr}WzpcHxd22p)hkoSJ;zBJf-Kx18m*U$TQm?0+xqw5R1&K2* z;mIzZI+=+xbZbAb3CG}on@{LtTvROo zN1Du!j*r#xW3#|KQd2Wi`LuxJa+8zTTBI2tAFT;=h70+A-*6bF>NSun&3EchXz@`(9eKHbFOSXJwEwo#5-~g@4G~J;BKFQ8~i0u_V*p z$PXd>3t#uyx6^j>9ngfgPc{qROkm7sr-cRU(;px;G}z(XGE z5bo&R&9=Z=I4sMM_L-Iv^}`IpZ~bel&5n>}<^x-K?>DL~!@gD-@dN_-+Im`hlr3=G z-^P#4+dd7G%#m})q8#_Je1E1fUxTN$ySrfA(B3zG7sp=xwRe=+x$g$))ixCpAKhgB zya_$6>vkUa!ZIl-_=?__p7LS`e)SX|2cHBLolMks%Iu};7`Wyb+ zVoTFET&Y7F4jN6I?Gpbk`X%>;%bka4RfrsJ$z|__2+-oOf!Bw6+GDP~upRF#9XKke9p-^$F_E8 z>h}lQ$fF)dHiZ#1QG}ct7@V&OO>M8T*I6iG-$T(&*0lwKe{_VE!B5if(78hFsB%1f zaHx7`&eYVI6E$)CTuq!hU!xP#Sq$JO0T22{xEZdVA>b2t(8+p*N26RkX=m%dxRA(G zkcUV5YaCrVaELD3U8^Ctd~K~j^PO5P42JO!2WXe z)3=KoX(XAcDYTIvg2TM)@R9@|^sz8BI0|oDma=9aihPCM&It-i z`V2AKp}UCb1fQK3Zi_EEL47HjNbh2NSnLBH3a9I{I*sgH+&Z->&G^izzG|#5*W#x0B;_d>yw}T zG15qDX^P`amc|8kgwj}|ixWDUUFoEO--83ZlEJlq{J;Gl^~=BV3$$%CNAiu0O(1WF zb^e~Sb@t@Rdhq`H>Y+y;Y=c8vs~BzTw3qs^s1*nW4Q``+h0eA$4B487Qw5rK2?z8Q z^V}p&kd@F77!-vjT*TS!V1#KJ{aSv1t&VPxp4KZY>VFm8?I4_SI=gv0G&GvJdU+oj zyDNuxO_s<{TSHCKjE_&$)WpR9m#_bTvMoFB`@rv2&Q-7K<@`Dapc@SWjYuLiKx9B* z7A1&9tPUpkHi`I~`LF%`uhA@!oRQYlr zCF5$VUE&|Xo1-)?vJDqb!Z60yqdDBtUL4vPsQgntz-@P&WPsxp9vy^+&ABzc@MU!q z=~S3)r6RW6GNK@XoN#AfnUAZ3{MMB-bIo2kG+)0LXQQX!9}#CGR;8$KVFTodGE{&Y zTiZ2Eqw3O?i}kMW|ACr4bE*1h6z=V;*5c|?t*&p>lTUuT{-gi>U#wsJ{J%`6M?0j8 zazOSNbUX7vCeypB&*Spi%{IO4byngD(f3iOQ*&nHQefs0|>G!<19=PYe z`rfy`Sub9@5gF=WD8`0|aFUQ2@;_#WhJzu(#(&d00u3;-6u=69_)wk#IPqp$qfSjvXM5Lv@UL++oef`Cza8Ls`rWC`%}zQj-C8%ReU>rG zo$cC!M>a5A>zmtLw`#*@oVG2HiaD=E8e8+bZk_7)oMs_44x``FgEw87c{DH_9dq*3 zTwT6&xz3z9S;yz*>eSq98sj?J>uZZ?)bapq@FDzW$AG#(d5eyc4>h>dPv_10y2CTB-(*f@` z4*a$>jSgjjhP1AUgJ#>nvIJ$HUbs=qM>yvrv(-O!ye`~(Uro)OtM!hv@o|}YjRI<& z8RS8j@(A`tpZC#&ge&`bcZWvm57St)-gl=GrhJJ5i5S}X-e)LEKBh{U+!#k3o4*EvS=>T?8o8Q`$$=(^=j5$eZ2_U+-jv$PkXDTb55wOw`Z*wf`O% z_OfG!pM-H`^cJ{%@PRY+;Sas57M5PCoy~b@kDf;^hTu6n3d2+36KdF zEit!;m&epZ2t?$@VwGogi4FdqKjmPjy5tu8|Jz^vdcD55QvG97HGT4It?u;It@R!7 z4*kc+^6jx(uC!fv3i~xyCr?b_z%OGPc4(aT*8}%otS7$z9pFgab*uKbx5E$3xKT)G zVU5LyxbV5v!_@??PLfjbWsR=?B7KrhI*BjSsh`9dShSOe*QNu5w~L9rMrXEs_|bqV zFQm+m*_ekHjx=DCkqP7r>1*S~2A!Q0&9=$|>fd(X;2Xn-b8^hs=vX>nJJ_rh{Jpi+ z4!D3LXzb6R-$zefG!?ZVLv=7ZzIDt~H{yvGSM9hWP`=S02v5QPsYhZGpS01tHbg^P za6n#Z9I)Q5O?+V|hqygnv+H*L?x8o-P2#QX$g~SQ z+r+)KutNHs@YCefH2%b3tx@jA`dSTOKii--U~|uKwvS{!(Pn8ac=red_7!q;vH?po>>Bp8?Z+#h8>UHq-oz$o-T{w5K?!W)O#NXJ!|3ju6yv$An z`<cFkv<>Tb*yTqD9%EyP zBTqXSN_S!ueLgicT4zqrWOqQY3M{)B96f$wHu^RL#44ID?lKuP>}-J87oXOz@pula z=xZ<3Cq86STfSDQs-aXoiZ+rCnuo;uJvAhb-Mu@|Ed_~)=mL2j_k~mf- z+P1y+ZPb?fGp{6!}B@dJn>hVJzLsK=}zr#@Nm#1Y?_XnAW2^#b|Y&xMZfADD=r+22n) z3_Y{64bP<((1ZGEd$qOl?*O=Q05bAH{;^SkQy)g(O<%W7M$4*aVSx`Fdl84)N0FnA z6%IrYzhM+!gf{S@}#um4*8>)-g-_2n=B zZaw+kZ`O-1zEJavw`yf&BP;#4A9~+A>wWKfTfO@okJa1W z_Lh3=(Fg0H#~!Ww9=f-#+4MPR`T}`fzG;iuM9}fU@-O#*PVSA3a@+)*MelU$Zy&61I|!|9 zROa;wWcJat``i>~y&sHhRnRr1&ahHBAgh9K}?1gO^ zV1cks=CtlEYUsyaq)qNS`<9`1#y_H9o6WI*Og+|IEa^iT-L_n>jOfTCcq)9;gSpw` zrejH}vkel%V+L4L@@!hF)xoqGV6_~ImEZjf4}eqgDUPzEg0_}$VOHPfQ-Rh+Sfnq0 zr|B)T`JY}J|EikhllZ%~RRO10KH29PAOfpp`)^tPlBmV$!bK8riT;!yr9EZHH#~5U zd`V`2OnU0&JN*jtmsc_{O8W+e11{=7UHS-vK@d~;1t0L3`B0|;>NwK3Ut{pr1PW<( zY^Y96jzsU=eRjI;J%6I^Ju_QZPfgXunTa|(Jyyp@(4YOZ{op}c2Ida{b+D};K-oO2 zGyHrVKlpQPGWj-o+t1S-9XRQBhdNr11AGeQ^PtAZ{JfFAkNiM=B_@$zJ$R-oJ0L1@ zeXvQLv?tgYX=PhybG2%FiR(rkExumcFFaYRFMqcdUVf(bcb01mxW~tb(GRq*_MkoO zOMPkme%nm;;fU+rBEjL&Y04j}rB!UmvXf&EY156Fm0*q(;#uWT>!n0YKpFWp{$K+wQ`N>VoL>U(9h$s%xZM1Lf0m$r^&8b7k^+4Ns>vCausS2dh@s0Ul#$Vu>~ zK4|Z<13>&V%djloS>NEK={0fHCrtb)PhQ2deCz~fC&g^6Z_u8g?FbFsWx_E-`(E9( zMTD5Q<4nTWCKI8*%`0toilU5m2p>o4>ytF2JgWwcgw6QA%+)^J+wfbI*OoH7!axi0 zm3oj#Y?`>~p2q9l>u<^@!9RJ3zU&Zy9Jn)&cCOdf@9C``eS$Mlh7Ol|gFbTaokVC? z4}F|OK6b7D?GK&&+|jY4^Dzu&$cd~catwY~84Db*I9r7+*vj@ADH0}z2gA|QYV^s7 zcR&@uTilkP_s*`4004{lm$x)hZbzNYU~?iY`BJ8hbq#mw(SyWnjGbx| zknd!0qzrSC5X8$sc(iNFuXV2SD3eljn4VNFqi`U&35R(YhcGiS0q}^^-qB+|lbVBR z)A;PZlcpZdCXL|PsJ0#^p-vVdTs@@{=fuP!i+hDedK*#Y(eUL=UyRN0WPRcjKT?

=IysE-b&Y2t*04_Y3FIAKGMh*HLDuivBrvr~WlZ~V1-;f3dFZr1O^4#86c zb$o8BE}T76SFc{Fd+xnb7tf!sB^u*vYquy#r%<>x4uBeZiz{)Knsf4-FlgHzTfMA@ z^|x+TUPSO5BK*Fio9GQ!ql%M8mq5^^;~P%n`8I`lw6NP}a0;&dOO!_Eb^c-So zAl)78kpU)zbX4SX7NCz!axM&+)|5~aqb#%p+TwAMFw>(bSG>mBcScRl#l$DzO5 zwJz2R&p%twyzqRz{NnTVH-GMbs=xYI|L?WD*0zZC7#r8=1}EJ2E5}rJPyPmJUOaU2 zy_F_uV2D#?(m2%9Ho6fcjf4I8WLd@UH`LC!ihp*t&P`6#X&R&_#zyPv?1_5!o9?ea z{u4h^KmOTI*9YJGuKLlBeXKt7(GS(b_gtyz;r`lLS*qv1`>k4p_jg};IfI0k@42_$ z`N*R+Gc;T;JoDY!*mF%H4Sn!BfJ2mBFA%WC(@nqV2=@%FJQ!Ua4zoVu8ajeo>3M_! zj@(#B-tX#k8WX-8BJRY*2=(#%$0PX$-q2ve%t0&lBK$xs$>^ z^ymX0eizqI8#rLR;e-Kn_!eP{o13*nI=7E%vs>-hZ8NHmY~WCT4gA9VeBHQyBb~sm zoV3BK&RSVpse@zOlh#fNVd!&(1IAI$%WqBF8*gJon`Ni;AWngFRHwuSq>Ihc5F=uhj!{7iwm3Ji9UIxTurTsRn(wDSKo8sMd8* z21aTN{Lh2SXV-7lDz@#|$niQFpRSqHm+RD}yK4jc;NY|~qesvc!e`w_ypdho zJ#~pNa?Km!bWLs-*T!plE-F{s#q|(e7)Ly17QNA=fa!AI-u3ReMGyGW^qdH`lA!+( zkT|?pl2l6qe~2sZxD}O?MeNwjx1DiJ#%Xwr42{9RI-tV>LVnOrUVslAIM>I@U_11& z;of{I`SKVz+D0!=)i=NO?ONa1s&~Ef@k|PkJ%rWHd{>tIgMai7Y6lr`%Wi-ne;i_P zeD=&lee5IetL2sJwcA+%Y8zoRjDQF90J}1$-fdbT__4n5Y#REU>cs||zFPN6SUMJX z9th0VLtI%m)3v%t2lEa;TY~T6Q&0&9&9~ov(kZR(EIsPoJ!T>AAYOvQ=v|Kz4yie|8*wIe+7& z>MUQ&c6M%ayT7$oV>FKKEPUtN9ZtoeD9zqi?Z^qhrC1>Q0@#>;y48d^fl7J^ViDiMn*(JvDpk zLLH6FkZ>2;ZDC{8=j1IM7EYnDUHT``Q2UTG?}*AcQDF$=9P{WXsAweAkd15 zvV|K9H90j=4_v)itLy8DqYlvbC%e=+Jec-RkioAwR8JZ%pD62uhzk+Q#H*A*kLyZ`y8n1efGj5-yDYShy8AwtKZ__$Ult^$d8s zzPL%fwdD@{`~68Wq5kPewhZ2l;)B^SJ&ta5!o?^)h`!bDef@j&5B|wNsl~i#y)yK}_wk*LBfZ@&Q2y<$XmXIq@_QYiZO|US+7;xa^YasJP}p4(u>HW} zgkX0Jb8=s2YdsT1ww>q)Z@B`&!Qf2p;LJ)S@GbI%E1lFp-v-OI0Zqrp|3M!(uxUq| zJNQXYw=~>x@YSWB$cI~xw#fueYJrxA_$$Pdhn=M3gh}m?{g`P}fP|*=)DhNIy@MVQ zxnL-^E%;PN@yk1Hr%_C!eve)1+7j=m2q)>y+>qpDmMu^53g%OUD$ekI{YT&04^dk<2+}<7D9YuC#XXY|- z;-^aF9rIK7xMFR24gR96LG0A&Q*+4NRMN>?!j(z-@>Z^#gjSh|_g}%sC+Ou7m(FX4BZ*e`WDcWhXGsp3zhNF*-qdv`G2RtoY8_3Sh8`tp3 zR#KMjFDFtchs|EJJ~Vl?m8UL%H-mHiDf>PeFS^Uh($Z>OzkW0F&?d++;1j;C2>RY{xf0EHon4A%4&Jm;hmOKo!1^floMaUkR*pWYy% zsg9vj`Ul)U-qrs4-|hwoefwyCAzw~LQ};S3ParxP`GmJj+wbSUsEZSgGS~-TqTj^> znaoIhx2cui7fzQ?Z&%N+`eff94T)+9n->NVC z#uw^~zx7-7)h~ago__l2diB+pYaQFJ9LvW}kRBb8zp=SPqq(2J7aSYW_D`e3W^4BN zi8^!gWX{ubC+hh0RL$7OG6Wn4yO@n#YzplzjryLSQ(CQ+h3mC)^G2;M+^p@D<=R@l zRXZDN_~2Xc%wdgC?=ei(>Eoj{2b$(4j@7B@{yH;xtS-zA)kBx3>(P5=>hXt9*L&V_ zp+4}=d+TH0|7d;e2Oh0YKk@eZ)cYUj{%!T~54@v3@Qz38iFdxaKK#DN>x1w4zIx{) z_t#s{yI0N~C*Q#u!>Qk0Td2*o#oFCmrXB6z8vfVLM(u;Ubzoavn6LF)^L6Xy&03ki zRm+QuwFnl^}nBK(pJ0r(QUE!j^t4yw0yEWAcs zWkx$LueI_xl?C}qM(1CD!_PH_yOli)jxOQwnfsiiYbSY_kbmzYH&{L2rCtKY)wZU3 z#7|l1C#6k9#e}bY4$=t#8gM;(aXB$kqxEO#?znLG(E%3DwtZFZn76&7f3iopes<1w zEOe5A(vKv*+CmO1BHq((r@r=kN<{~|)f4>`)<;_a6IR21N_2#G2itWF8`^p4g<8N? zEZuyy1}JNkIQv_jb_HMd3HaFsWlRrAdLHa)xAtpd1lut_PP-7DtN#g1Ya1P|z=Ce- z!>=Bl8mk*i>ovc!2A;-hd<2>88_OcK(J>b$(9Vz$@Zk}->~nHS>S(_JaO!)@Hxipu z9@O8LdAVz{LVWR&aZ+AMfAJvScw^e|clRo8dOA4=Ok@hf$hiC5j>gm8Q75A(Kg;UH zFi6~+cMEG}v?DSmU&H??k87X<=E#qcsJqv8dM_Q-^Wve43-jQb{*5kfqMZoO#4=9f z9zl!93Gu+3uWb|n2l;yMnUY8sejz|`eQCcSucAMC`KurJm)`1M+ueKEecOj?*i&W0 zNsp7`Q#Ijd%b>gYXTlV6Z<{*k5dW!*0QABr$7guUc!59ft*fOAM`GuiJJLkv(K+Hg zc=t_kZ_F0Q)2w34lBaQ6-qtmIM0os1@ELu_U!o*ON(Vo^wuSw#>akwl=^quRBuJmK zW5$ughF9Ow^5Tk4Qbr9=o(I?ZAAi%C&!Obb+wrS`Yz-DO?N%zlnz>F!%RKi8hpPb{ z`14y}+el&)7QCsUS|5c{>}aFWb2r&rX8I1U8X_IA@;72!wcAR|(X%sPQWBlD#32BqZ+HPeIOWpZu`Q1&Q9*W2li&~JN=4NnrPG7;vsYAntbe`!omSDoJ0 ziPUE7>MRSNouy+m+J!RakN2szP^83nf zq&fiH0lswl>G)8Vc#fk)8V8rRxL;mfhi69Xlb`%dJ@ClevU*$x#4YBQX!NbDZq(Pl z@_Y5?|J+}!-~HWhh`8HFD+tZi4v zA@^HcJ3Oz^@ViALcB!*l8>P|q`}}sIsk;YiWNZYdbS#}OcC5OM+y2&Ot=?R!8?W7{ z=U;rOUVZ)Lx_I#{I>GP9jv+?yQ~yZ0V!+7&(FIi4K^6!7z0ZJ6R}{HLr{PpW-)0-o z1<*TkgA+kC@FUH|fp$jhi5H!7>UFRcXE6;3US%K?#>k)>{41~6QPk{KJ4LGcUzv)Y z;+f_kiM6%O1~!9^eAw{_v)UN2VZR?8DlFdbA|o1f%CHl1kOnyNXCr2xI&JNAz`Ls- z9OxOM@!+a4WNW=sFFpTS-I`yhE0-_V`O~fv8>r-7RWvNt%1;u)`r=`(ftzBi#7 z$AITpCiJMwJcL*M>DEfoJ>UV%`-W5dEX}~54Mco|s71qFBBB5SMbqsL*r|I+OC<59 zo^Fm~P@J@}-Ne`W#E%FqrIYrQd-b&OkY5J@kGbaK$dv)=abW`e#EJf;Jl5d*SJP5@K+$*@|r%W+!F_>xVw`A#CXav|A3G z`a)>{Q*Zf3+hUpeOg8@1$%;(#qZ+;p6YezJxMt9g2DkjF&*lAKpIclGa1KRYl^rL= z_^ht*Z1-v@2XV~XdtiW2#}1I&_&WHJN}YBPo`}p?SL7#lQTp+$EJ6rUw*iO@xF9Q_ ztY&6U*U9@It)tDIIJJr4!{iaC>;L ziA~xlq^h2N>E-&h-}#*ynx3xFxw$%kzR$nDR9pHvWBPB%+vq{P^1^q~S4(x~?5V7f zIYz^Ec5Jv#pB%3b{lL4ki_s6GJs)S!@%-~vct){o{wP6 zbh4?lsbhRN>H%d)k8wC`ye$8U#dJXH>-mR2r%g-!Q^=iDovS=Hxg<~U5S8U2??jgL zn<%MoRJ8;gc6!-i42-}bkGSRkBsMHMlyvj+^Vw$J&SF0&Gmh_a?&5{Ia_@b0{H}ZI zXc9Y4`N!0C_z)SG^0N$Fn{S8qbaiZeba*y=IFSio@!b_ku2Hqfr2c*Dh~Upo3w#p& zk|PKHk%>dfIK&Rhk8RKhn$vDLLbeakd#-ND1VJ;VctIE(97RsCO(SDG4^b|417G^@ zc5Y|2s{RjfhR48NKD|US8;b9EkVZ=r26~at8Cp(H*Y1U0htiwxBzJ zYhrY=uHJuNExdNUUVY_-&>*ORzg4Gx8-06|hA`!CZ2|-QZJD>>#$d zvh5UKjrf`DpzR{uPS~`OhzUvZo|K`l{+C8Hu<8N|7zWP_5OPF(YMwdaE?w+lHEb@<*pIh zIQ7X8Ht%51Nnu zMIKj{_jdbv=b$+Cr`-5{;8ohDGXX!~&=ojo54E$kDIzT7ZanGZornIga)vyiTlFWY zQus_gp{!`PpazFB1Mc*f;D@vsxxXEUg!((JGe;XEZ1AV_(+`4{gV+!|6!oX<4~V{p zN7N^NpWHU0_Qjo9w6%M{Wt+;CLBbl?sjT{;bpi`l*Mwk-ujejF9(t2 zDKBx#m$5P0`968A=?Z;C`CH$?`f111C4QqZc1>PVR-n1MQ=RMNTDQJ-n{EfUh4-$j z7wSu2|5jaJ->RpceWhN1?d3W?d!{a3yo%i)sA1a98~DYGi|zulUJLWL@}1*!xOMgF zCjQ(;2m1jZS(k-H;-OE}^scUSQujS~pMeL_;rzKX2pkle1pj+GwY{?*ybt21iBD7s z;qVjiTc`a!bU9(7LOs%WLEe#`>V^FbI@<~7|iPIkE5rY@O$q3nHm|#x5b}xfKXY3EjffAVjtTfOddxL^l=Hxv$7u%;u`#_v%m|w z!VDsI9??_Ej(I6Zz^jZV|889UyG)odLvS~Y{EUs(67|E62k?g#3|D>rKwTjxYvG*Qa$N2kO_Qx|tak}pFW z7~_?c1Jy2X)d{AE0q$AIMwZ`IZ_ubGmj{=CP5!w~p3} ze9#5@>iU#vA0wUGfu^6u7a746+8b?K>nkHUKB=E4I5=8|&U=(aoy8-(&=_%+~wX(8|3@C%z{WihJokWy$%CsLQpp1O2Y7AB~^k>V0)*_#Jxm z={JB2S6?1N<3XNs#cu5y<`+q8{8tfvcE@(IowjvP&lPh$IL>g0(Nb@~kF>Df9y2H3%!4X6lyhQLez_OY4-A5IgV zqPP>o`!z#b>B3B3-E(22-uKok^|5!~U!VSgH`iyM_`dp)kG`uu^PzXuM}Oe0_5E*u zq~7)D{q^vJSL@!p&eo-KC+f_(lXYrtx{hPV$Dz$I4B<`zgXk!Ls@;uFZ7c()y9?ka zZEvjB(GD^t9pEmkHPzRSG<~H{m_9T8qz>MixK+u7?gWzh=y|%Cowt z4gBjHLqGXZTcZ89ZBDJ6j1vC^**?VI+9z-Idj_e6Q&5p%w+wG~8qKbqfX~z;>K5A% z+M37^!J1=vzwHRq>~s1Uk+T8XpV}NhL!+(k>3DYrwO;c1PCgSvcJO{b?QHuG?2dYobazNw`*T3uw~~^DyLti+_yn~@!QV8 zq%N>+sT@(h;qhOncWe#i8RmQ266t49N2el+I2;=lQZCdzSnyE+jkuCkJ z(5#_d#B&cFx;X4jiZI-^b=rEAmOq$9T964aN*7MA@S+q~Kbl9+uS{E!G!F3Qy*>iC zph|5FhSz=$tKZe=@u~Es@E3qDew2N4T{z7_pDcjopj=?`N{P|^t(_qZw9TNAx-2S% z%8E83CANJUurheDADXgxcDH1Al*_miw977smH;n8w$ONhcZMrN{lw7+Nnc3lNcr$q zCcnVf4iv_0fP?Jp0M4A?Z9i%jDOq3A+YXG3^Ul5pC*|y+do_ox56XPi0mPTrl`T$6 zG;>bsMA^LLkNT6}q3w5YF@SAR95b=871!W|{~dtsCt!%YhN#ap{@JOS;W{@rUgu9u z)Rprm>YghXxz5$)vnT5KHjVQQ<+=kc>K~{nN7~|t9YDi;>6T!XjVHPbqyzsboq*giN_M*925z^@!De@G+gpp0-(xz?v8 z-g;-aGPfD%m1v~(6CQ1RDM}iEjZ50dq_Rl3`hlEV9|+}%0Ql6$rgNo}`bPcP(};%WVmsaasz0CV z8;1X~$PaY3ol3<9dKY~mnDN8J5IP~Urv4IqKW8gG!F_a_{b|xBdQRGO>&TJGKlTlW z-s(LqhC z0WsV*pv=CoFG8v38VNPI6S-Xw5v+Q&c7C};y29q zY5W0%^=J-53(Emv?sO`&aJOg9y;LAt;zJzawK1Q=3`HBy$(QTxJb9HF6^12VuWVll z$QxnMFcD_mgjx7QIS`iwGK_UfHhCAE5+LCsIC;@<>5+V4gy1!4g+q=qeap7pw{WUU z-3&C2F$lw;8PPZm*OJ8icpWKHkKbI+ZFlCQn=N`3pA->HB4`G1=94m?ab8+e*V!SNIMj@`o# zKU^0toC{tT7w1vPTY)z-%|aXU>(;x~(T;!?(D3HT(T$^$xgGXzJ?^A!=bii8;fX^9 zea>II%Wp61-Zi-AkQg2+PakjRX&rn9D~W;}bcNbkjJBEN2e)i=@PBM{oW{U9c<`Hc z&;qMy$FOxC8L?s2_}-bCZoS$Qb zXPYhX+Xj^FFeV?$3BG~RncmrUnsPFOl?FkZ&DpM5@nM5)hXx37bRxOQdsn=;C8(>9 z{Jzl02nKg(AX`Vi<*~QcM?U>ynQ$WVUcdHI-C9|yXJ2}$UU=@=`U`*IFVx@sxxZ6x zHR&-ljOhX-&=Yz@$5Al{hHp0<_|rhZK9rxIP*j`>Pqq((AY@VFG^C~{#_HnPGj(?M z6b-@AIzBd0r)OsBv3u^WC*J*z`q&TtK)vIgZ>=+Pv$ei(qh9{rllA;Jzh2j#`EI@V z^fR@;yHNw+YI|*?R&UMM2*%3Bm-&v;=oXL5iwm`I?X~L1nVB3q$`;C7Hy7&pmtV=Z zl7eg% zJNwxQU|@K({0?*`XsOREm%`mm#+Aw-GbiEv*(_9v7UMMMH;%BwYjyG z$wE5^HW2MFcL2hMhZB%?(;1gRPGHK!6iF=2(HmKvPmSf%-nXlrIo-C&#JMC78+Ktf z{;5rPCV=9wAysKqWuSv}x<=~0@HAnegUJy~OKzcf;|L{s7#%`pCSdb~F; zx1+XGFt{bmJo3#yb)TSEmbb)JcUsQQW?Sj!AgG^bc-_mMb+Q&5n-N&iaPXaH1ywbql(!BA0^`@B?@motmoaoz>a^7dL1W zY#r$IkJlj$?#WYU>%@gCwME$uPT43|ZwEKj1NR4@;ZxEQN}WVZ7tbCPi%M^J)}i16 zMgmPYqIq|&kc9xskTo1yf>dawls0cIKBm^T0*DT=L`(S z)~F|K;2qX5JgU>_Hmk1i8lutS?ka(i{4(i`tF&qUi@p?T&}%w!ZgJ*B6DL274h+>( zPd`^LzqDB2_t;}~`rHJ1Vii3xm^dd+Ow@1w)^FGMzW!zCa12>=YjPWxhc!DhSfBaS zhp5U{ZLTj+jtx&cdE|2OtQ2+-C&@alOW&rcOS$4g(@k zx8Ms}^z!wZ-H~$gPCRhUfV|JhPA0&_Atj$Ujlu}7DJQ;ImxfA}nK<}thlB7Q;+!As zD$gUd5&G)X*{d~u_5M1xwoPNw75;Xp*Z@FoDBG;e*G|GTNMsCs82Lfjgh%sTS{q5q zV;Up-wT6yX@qg#J7waoeK2_7FF4pkOZ0+_9*J}&dUUA|!th5D&X-K~M!m}96PMtY@ zrq-5MYhvVBRui9^8>tU|;9Yg<)Fk?|Q+w#G{yrNw){y$}YsZXu48nxfCqbn8%C|CO zsP;MbI2{_sYH`&n-b!c5l3bI&x$_J{ba`xqMAz8hG->w;z5%G0F1!Mc%sF(9A1UtP z`&}EbL`wO9JE(sGFT>y3ememi9NF0D)XM5g?323=`Tb+Rdwl-F#X5iSa?M~v9guZ! z{ODj4`0zV2;6bkF80ANs?Cvk_Vq%aTo_6NByKjYM>dHcnX#oV@YdacQ)o$J=BH z!g+5;mfO`I?U6bifOdpZA8`s2;>RfS+D1_#&jCjtwmvHtu)F$4Ko#Dy;oTa=qyNv1Efnc+? zw*Ax%{uS_Kn{Hu&7OsBCDsA|?4ZI)24{`;!yY=j$hnue@al=pO3nwIGGDmDA;(_1P zzBwn}#YNSU9qqt#-#r)6PlSYx24iqvS393wQf*XDEF~>$wN6S2s55`ut+P2YCJF26d&D zlx-hlwvV(yh3*ly3?f5Y-|1(_mu|%!y@lM_!RH{64)@gLIK0ez@aDkCFaOFf*RTEh zuhSm6QgaidazG{|udJX8h3W4TsO3={PW* zKNJ;RKzje=UwOxNadfMN3yywHIKp?rsjko;Gi}Se@geEWvw=@K@ndDNvGKW-oi45{ zADZdw3Iia6Gj+7^bSjG-yG(D% zxmt>0#OL2OP__j{pY~P>_39zNtCrU{JNR(NGU4a>7q8VMytjrU|H2FSuv?pT^7I^f zV53}>>lVW+?zCaOfzch%)JJx9rJV+}mbrngyRooROXv%yMA(M%b8wd~pRhJmw+(PO z@GLlO5TiRlAn%~p=;SCh!kesojgMPW6sK;ScAG=2fs&@VXU zZI0M7;wkT3_0M{`Ba@_j!O=P-o4zIzc{V(ZVXID^JQIH=B!j%-$aur(1v~x4#q!E> zUBC8f7QKv(wQc*=U&b?8lR0_bJkT%7mwcyERS!~k^YMGp>h4ScKv!*UZz5aB0nzlA z)T>TD$|~y(aJRLccCtFK!ACCKXk%^vD|h0`-3as}mDA5|kWP4N#<$z0b+fD&d16h>!-ZcL%%$}n?`O^5ePlLKLr725#4u4y|#%lrM zB9p<8LwAZ>Uhl+T{^FOuR!=|mY7I;d*VODu@Bv=QOZ^p_29MeSJg6>*4^K~>sz)Du zq;B50!MnrQO6|6^wSPzc4*sKiA{Bwf^4s7)z`#Szc8OyvOgwcOuk84DP)jGWAM#k z?QE^p(!%vxxb||bFD;NCZBCWb;dTwfUo+U`3GDXC>B*WL>8}$rV|8|Rv@V}LUiV#| zt9Lzer9Se3$LbSLysbX<(f8C(e&z%96F>5?`iUR3=#N2aVvvpo1Z0Xo`%sdb;^9!h-Ih4E z?cOT&z~K*v@6E62DdSQ!&zJ&k^krHbPiZr4^ssVE9N|(IsTVUzOdE*5Z$Gu4Fii^H zjiRk3ra)E*3to@`BftAuXCF+1K)ef^Tp|CEr+mn%ng^2BUqe*+NotTF@cB0U#C`Bdq%1H%Cx`>X z19<~KVGZu?5xXHw4R#OT+m~Qo+5_QFz4D&ZG~BldB*2D#GoCyGddNV$w|YZ0-UEYm z1*_2}w} zXkxDJR-(K+(Zzme>5R)CUb5|w&ubgkqKIpCgf)FzTr>HWIvlyk2|vSyd-|1r7G$$S zTX1WGcKJFwY!xxst}%4Xxf93h-b+{Nt&cufkKTWOJ#_zFH9I?v{TM`lyS4g6oj5s` zzNX_-{WUs_4Msm{J`@4R2An7-Z|tnAetT{=47EZnGn)^AWxhJ|lS1fAMeviF(cU`Zo2`Px5ni`u6F+MIQb9 znR?b8lWfzuU~db5c@KMd;p7a@$ONz-Vp|45;;xgCX@d`NuUOcg&S&mmZm1Ib>B3FJ zp=|0NLa2bdhqO=IfcIey@#7QqOkYCM@FOEiLuHVQ?LlR%rS-X;;w(JGpF!%ZCLb-H z3z*EHmD|n+SXUmp`3Pq`IbfCV)Dz)P^R+(6X!uiLP0-D=#UX^sx>Yn5(p551I)cy>RAjB{lM9r)m@A(Z3cfiPxhQ8H{cof;dYdJj@F@tv6c_r*d z=dk0Z)fadOwEohw&DPwf5wTqV)6f1~J@w2}iSHJ8<73lx;?yY`2&d|&CI(C{OKnp5$2=j`FGTLli`=>qs~J$eyM@6mg1XRoYw^y2)% zE643OG01JZPO>IW8WEftPtUBV((vIZM0rzw@`3(w*2B+)MF9(AgH_qZ~HC16Z*~W0%i3IO>sMKv>$_H-I+(-P(K*yN7Vrs*c4WM9QZK*oz z8})~O>L=^IN8ZuJ=U#1Wuh;VWGVeC)U;f&!)}Q{|pRM^Di^;opV2f&asCE3>hRi@4 znp5Ym>ATjb5YX&;AP#^Pwu-YA2TWXm2Yic~9Gj}CiLp99JX+^xC+m^BuhftK$fxUr zPy9f={qeWem8+Ms)!g^K{*C&@zxTh?Kl>;DyuS9;FV^cXy_8P{Ts}8f7w0C)Z?~5H z_BufMJxLvrT^jTI&~%r&u65Sx#w)L-!$Vm(K08wvuU@S${MK*Q_08=XPa_gsQ-v62 zkPgcMGq@`9)wq zRuA5LUrnMrHnUn`x%M`YvCW<8EZl0=F=L&q&JNlz+g zH956o?Y^zxqDHlwLuWcs0M|DWA3|U5-tjMr{}W$O}AY zkTl`GkZ>kIS11QIT%@he=$?bk;3_=VpU&ZNa9~G|I{+w&IRayo(RBFAcNC!g(g(5w z*9IS0sgdDgi`*sJFi5&2{GdTvquGs@K8E?1{1Z>X*M-U;V8wkgvWOutIM; z@unvF>jU5a&KjFIR-Mfy8eh;Cee7x#isnG(NCCG82|<@Q;-(*Xl!xX!DT|76)01g@ znvQGFXKVf#vLY>vV><6SOk>*kkJ6({lkl`x^YT74HXIzA7Q7oLx#--cETk;ydcN5J zwomyQ0Pkbe@mO8D@^FowzEH8ZNKtc@=l+Tz%x zPSVm=IiR|=?YEv+xo*X`7$4_9R(gPy$4y>ZFk_)ZF_3}oYF5yw^U z>T~a{uN`n%jbyzE+IQtKwCEcgsnK!hI_ApmU9$5#_0Wt{J8ojXo#;*lZIFBunX)04 zoQQA7a2uF%0Nd44(pZ}v+j`*E-rM-q*h@R|q0P2i!Q$UszI3UkXXfhLPdC&A+g{K7XrTeC=jkU*4)sS9O7G@vT_M-j*v! z*|r~jBELFeWpo5y;@(d8v|SB{-<+&04~z|s$nF-0I&olPr#9e|*KaP=FZ`2#Qm43U+Qi+nt-IzNTC6x>80toxopo09+onP9yjvp#gSVJb@Z#mS-hM6kyed6jV-r5iE!GpKTYJ9-F@?ASxeB-oy;v32LU7ddW zS%8Is5{;*>&%0<@-*Aoo>cZk*?%{{9h9M2#R=%r=`0v7-!!)^cVQuM2Y%RgDGLIhD zQ`1Ih)r2$fo5qZY8U1E}sqvq%g|=<&d~Lp+cm#81&(VW95D91MOBko`8h*^u7>NX3 z9bmVUu!Rr7qpeNlKp1I&Wn*op=2vcopTG0e3(0?jD(s*~pML&T+Azy?;^gs~nuh=T zN68CHlZPwA*Ed!(Q0%HsYw8MKx8nAb9yjNgYYjOV*0xjr~f7U!*bs9a& zK{3F%fj*%PNe@-V1C`(NTNf+-k$WI;TPRwGHjz?$4f9KzLnRJZ&w@Ee!HGlm^79F@9 zzY~6pXg#HGlT+jA_vjO~jz?iLPFuk#ze7E3CGT8OeeL>uKJ((lCfmN237YUjfEhU3 zrwp3hroRQ=6?O-6tOC=CeH623}m8+pgZW%1}~eeiYx8uuoLxZ6YgR!U6oDYO+K|L(NhI)Ay0-YNWvvw z(3X>T)K_k&>&`#_;#YpP{^sBM+x1JI|HZnwa2=l3CT?di>H6!h*UHLLck<%_{;=%< zY5*_$iAr&zE^@Np+{|p9K6SFDC&p`Z*j>ZGGxEQ^u~Mt6i?t3dw>FkZ3_e{cPP=pz z+`4`L%)~JM^kAJoeZ1cF)`#l7?|h^_^8R<#ANtt$*B|=m2kNt*_)vZD``=#Q_trPn z{SRKPi*+Ne+g&t?cRkqT$ z1lBA{BLCEvL_#4hqDNpxb&T#odp186BN>4!K;kqm7!emn6ms*)%a$L603K|kcHeWBE(J^A38BIA>1H)?424aDD6tKUkmq=!feEpLlOQ zaPM7pa&DYD(B?hnWP&YxR_(3zCVOzKzn5*`t!wzaL%vQ7)4r(tg=jZ0xDd-e6a7|c zCu!re_mdvDj^Se-ZDLPed#x5;e5N!K7p`8+;J!zPi=&uWBrfP)3he}_Va>Wxkaq^ZsovWwil`I~%2MpnK z;g&w-p)1#A1L!5`8{f$TaJiQp@R@wfoj#cT8Et%y2=XUK+ApT}PaGJYt8YbWZg_6} zR(JA-BYbV!HR0_PnAS$~Ff!={FlAbv@s*t>pOGEqL2Oaoz(78-qr4yXl%JDV%P#RF zpWge!d+{HiRe6!$&7a3!Izr`bVN_3`t#XZ?vMw?Kcb0FvthGDa*>?IOSK%OEb)%ny z+r##*`3Wuj2X&kautvs4@tGV;(l#L1;)Se5tMVs&qRD=}^#7)e0NSlf!(+QOA~#-> z&UfkxG8B*9xDBT*T2H5Y-;VCPl;16E-<RB4qWSh*CQuC2jJ%oumTzA?*c9@ z;*lUfI|&R^D4QSKkG;yfd#>#~$wd>!#|;yy3)5oxQeq9z6kx4oP{0 znGaXXxPv?I2}>Eh=PnG!B@gSE+2esAsrWa;WJ(;JHW^L+FqQ*RI6Ej|w^rIc9H>9_ z@BK^-kD-uJo_=>#c*Uj8hYpiQy-C@imH#9PXN-oy7>(4i zv57i!_AHHs({6^P zUp8J)-D++WxHM|?f<8DNem6`Qb^ah)9B=vD*~rcLRjFdEUG>F#@7rn&3WPpwc4<6a zi~z=&tLX3>UT!a&$ppX|Cx`caSofwUfDpKQ4wOzR8Q#8k`)fN|x1i-Naf~NSb_AJr zuz$SPX!z}G@u}0F{}2Ad8k@O5owlgcYQ6mO%k}cBFW1xG{cio&|JVPf{)hkmZv=xL zBiSC&?=!ZJJ{<%3d)N_tj<)S>o2hHL*2e~mIwaq?M&6ugF$QnDjq&8@Xjc56IWbpf zW>3_k4?b9*{pb_*sZV{p-t^W7(^>xfx1OrM|F{2k{iA>QztrFQ`CqOtf9*S&$$or# zBHv_mru*5mr)y$#EFHtEt^$PiI)D2(_fDYDs0p*%0ox(u493k(93hDhjxS%nt8UKU ztS^7(DJ8Jso`ytrYXD7I?J6%(C1oQ9nEFan^@{SBZ^MfOUS!*A%7_EbbK92RjB~ze>_AGmQ@n#~>LdH?aNPvwyWn{jr@RBZ zbl?RWE3FaSY()SO3aV@aQGH{hRXNdR@Z0`l!ZR^Z1LLl|9Z8ziwUvA$`TC8UwX(7r zzG^EC+IQ2%aVEUjX(pf1NRw{DZk|mKP`|D&7AJQ4q?3d=@=$P=cwmx?v+%W5*#=i< zoU}32X)zKjc_;(&K1YfO_HQ4EV$P@p5l@}H^$aN~*D`(X&0e%@)A*hFG@dC(O2?rw z5k>f2Xgf5l$7`=%L%w(F@}BNP5>iFsN_@M{t;>8QKh8~xn{j|gM^mx7U(yR4rpZ|qA zhE1>oYKXFJU`|gC)MJm_TW8PDK=UPR^d@O`G8nHcSReJAjhG$`vFY9WW|3{Wz(vy$mXKxXCY?lbqwuo^si zM6l2U%DZ}9=+n_5fA-{ti`DKYoot2GbW)`=7#vNGjpMgW);L$~RJIwU{jf)TEDCrW ztnuk-bo!*3S;+>w#v_%su}wopeg{YTG-(GY{*4|#WW?P1N&FVykb<8}VR*?RuD z7c&?*Hae)qzBld}FI?j0SgZ5LAGTe@k zY!Sa|HeAR>roywEqrD>|me#^pJcgUY6Iu)AB~W{Mu4opI@rw)y>+X z9P?La*thN0t@5Y=>I-==tL&mrcqi{eGjW&ITEH%y#kXNj3`kvo+YSY{$FyTZ0uZOjy9zCapDxwK)6J-pTiv1vrPg+p;-9 zp4ZqL?$ePYo6BS1BK&APcrBtKr-@^Z7T!DWJhykE0{-*_w#YQ)Hr_SdNb-USeJAcn zFEOQ3(weTD2dCOS?F94S?1kqbPs1#?O){oV;QozqT-}=D1EMDE_qr-Lj%q;CI zCs84zwCmId+79z@@V~(h|3nuer^tA#3wjm(nH5jq$wR$pJY~cQ0H$@Kt-B;T@ZZxD zPMFd!a^jMD*Ol|iyepK$tK3s`w%uRHHy5A!OnypZ1X*%2nS&94{DwoD2fTtu9v;2H zd=S;8iLhD^XanqCU9qixiq0iN2TgkUWTFOnsCT8+@#C{KIyBau^n~6ak~DS4t*opS zht&PWm!1dqjZExw7aMURp3JxLw7l8upnRSIHNpg29iZNkkD5P&UrpNig@sx}mt|F$ zlaAzDU~@%}Hnx|S^%MquaqFi@Shvo`b|%ofOMtvQXt|KXZF9B7rXR;%?UQDGZKp0> zJzw|UdnNT!-}_$P5_9r{IBinYJFJ(_y^vPNJI{?+t^V!2{hTb3FY2Dv0IWb$zoya$ z_>7Tw{!=gQzU`+i8Vi0}=iA?VwtnjiU$2455q#;{Ot91r2%?k6`nW$v-SGlzce7J| zsO$UQ^awU}mqw-ovFZf-gs7MOdpSrf=m4c&wZ4HA{o_@AlQ_KZR)K4KmzdEL`e?Kj zH7@Yk_{>P%^OgteXnP&??LYzTO>B+}FtB!{Z(6L!>u+n9!Ik0-t~CJZFuUvbBvZ zb#Tu-YMknx2P~5S}Ep-={7He%|y>2Zm)-U|RFV^4r`+uMI$k$`vCh+6js^8r+ zeBSD8WzwhT`1mL~ZH#-`g5+u6@$hgT_Hw9Bot&W!dahVbRC)f}{#a84b1b>r5xT3^0aYqwsjrE9NLXJw%luD@QlZr!L=KOwNTgic%oAIKg2 zrySX@mI3u364ZoF-s>MKVT20Nb3;0L7tIY;i0_v9^`Kogi~G?zV!PjJNVna z?7f#a8m{`VX+!8>`=%QBS&6EiAwPA%UJ{bqOe$&_!MugY-uTPIT0~cUp%?24x-YL)Q_-oNEEpR11hgI(3%OkeM zdtQV;wWmDl7e{V9@>>3BkMXlpU-8p;U!D;kmh0JgttTI7Y3xt)QB7kT)b=gV$@9s( z@sMSg|);SpZ; zHjl)WKHdYrpAd3ll5J%db!}~RYJJtoT-WNgS6)PqE@gt&*ic{Hed&C?_xs;npZ>(h z>Jy*%NWJ@A@2W@N@@U<6|J^k@F;b)G`{PrSS%5S%HIYvgjgY@OEj!!kS01?N0Y5eq zbK_|!OM=)A*PzkaP=dGRIeh@am(MtNh|(az5hOpK4D z&iV%SkwphUn~@QvTTE-?^quzcjRvq4#6PxM*Iv0<<0Hd$3i~mD{T&%nCw9OKxA204WRlMbP!C1VP|&Xx4Nc#U6tZ+uSfrE!;Kx=9+(w7~X4_G*5JIBKsl_@DcB^{{-QX#Ctgt+Z_b zQg(+zqgv%2%K?Z~h!Bu`l0o zsCC*}PDoKtj!xox47X1JwEjx-20xzGqlZfK3T-$8T*G}kh?`q}jMEL4R#rQ3kOol5 zliSomTC{LWN~}D3FFqPhl1Iqh@GcSTqiXF~c#n)W#& zZq&8;{aNwPv+?C$*Ra^9tk0^O;w}KPT6XIkKDyTb&NrX>9F3Fn8p6z21c=OR;t&Ws zPgQ=IapV=q+ZF_bAM)&Y8u6^Cirl$Seo@CeZ& zbiCJK`_3~A-91~xq-Z!W#5{QiF%9#ZxUFHq3G&*&zHvN_YTa7;js^Yjne*4WD=GZ1n?&wB@E}P+%jjIxem(!qLRp z9hsP{d#>JBANtfEAuaD^Y~zxZxHP?j*Z{OHVxIg})bNlU0zL?>#{na;ir3X&b^zI#V3|9+IDoq<8&@%7V4-`yqq6Jw8TTlY zLamyyxytpibo}dR*r_8>^1&YTcLhmSFGS`vv+}D++@9VtJp0o@Y$MY_4_8L%tB*lr z2g#hdwTThesTBnooZY7EvA%lpyU*3bZ+^J`#Gm<#kU(eppjK$WZEbJX!s2{=?W3aUT7Xq`#D9(U+rNIjhLS-tGt<}(uyW2V(WCx(xIt9fk%;FoV0AA|Ad*eG{ zz!hD`r>5$z^XKc`k3U|2?2rCr{ri9J&(@W@?~9}G>T@sF7k=ee>u>+F1HpMQLMy2i$aY6u$4OpQ}Nzq7lW6%Wdyn%WM%mDRO0xNSsrwstt(s%bkb zsJEf@^z>|^^@+^z4ObYY#TQE(vr)rEF~sa|vx({TblK;wuG!3A02UHGvM4ZkVjo~_g#yH0|ezRo~h1$q% z-_Iosg!+9l@3%R(&^sOYZO7IuFdf1>Z5Axxk$)P0RwVb}sH;DvPz)*c(s^~3r!#E# zarn})ySq`l8*U$Xqh5dY)ocmury$f3Bcr1^+iE49-ys_3Z2-zn5IgYgWK-6x9@-N7 z2ENcy`Ho|hDkt%jAsg@6R*&a2EGfgZUUgvn7FDVfr%Ajjx;s_nHS$ot+8!*^ETzkOoWR5NBypMyue6Ujs z$nOqyKN_B_>C@-x(mfA=Hn%xFg2q(EI&wx&05{O4PO+ubK{gN}xo}#K*d7BU2B9_# zU9puX^U6zO`qTKMclN@0B7aU2$Mkci(#z9e8&wF5ar)(Y`cbM#ctfb7P}^{%`*+WDlrl+~}J*csDgZL}Tq# z-T&Za^zddK?sUNQ4z$BIgm1tKX;gfos`*!qHg+)Bp(PJBUyM3oBh=+-U>zGKUh7o! z+itE{ii1tQWKLD1(Rfce_=(BaLI~6cBBYhi8MzVN!L4CI6*$sK1=r%8Q}`k;IbB+>$p!E5|A zS=Y9iULGZmDGe8wkt2P9mNv$gBeva+A7#-x8`cQGA?NZeUkiw3o0knd-^u#~1#i(^ z&fbS7Aeg*@C4qmTC-m1hTSo_5w0?z+u@iGA+d#|61Pu{v4j>H;jAU0BCxnfS&(!pZ zGc`!Ol&5bBQnXp>%nTrE!=Sq>NAj&q#nOrh81Yg+JKpnxlYAq;Vg)*xpLlk1YO2S3 z=_5ZVKW*haay;C2F>v6{6){d|>vU*b!iO+pP0mbL|L{1tB9B2T0wam{&_bMHPx46u z{W0Q*2hfsZ4_%r;a>~}v+ud%f%3ZCswc%E~JK4#`ZU2=a@#prfD}DzV-|Na3eW$F%6{oa8 zc6R{YaBv{JZVPVbfdlPhWA5%ynYxG+Zwc+kzlxeN3 zS9q3-=+>s7R+4>+im;pW8hdk!0WzDF62gHd0KrQyAD3&u?~7S@<$za zw+`yJe(&4$y(hn2Km3`G(N1}wZmq-r%Nt@jKACLkWJBf7$zj}5H}R~V6VHt|d+kq; zw1MUO$T)lpeJwj3Dh)bg1+UQu1T|T<{$AVRyVNVXmby8KibEM|-;@Yks4W-jCnpfL-%DfTkO_mybt->5Z_EJ23QoAYpH-7R{G9iDsP>vjrus{G;AC z4bP&;a>Rj7IswMdX?zK;koOLw#kQbJe8)APF+-o|@3y?}9KE{a&{r6+EdCC$H|9&7 z6OR7T8$dgdhjQpb1big$PBS3aP0iVjAI>iY;>MLP{a~htx z8b>(IH~fUI*B02G*^kd}8^`us8!(kOG8rJ@rgIR89HR`>d8T!;lb@p*t2uByg5PVo z%7VIII0vDp{a}(@iIW~re)rq4b*E09CanXXZLm*%P+oh7`P^;?7?7e3({E{Q3nvP> z8=gDlEG#VnE4;&>I-;#m@`_%g&aPz2U`Z%!KKNb8pMw#t6O;_GJx;`SS0U-+3UF6^ zYoo`g=iOH?)`hcY>TQoa0KTzbv|GfVTQE!h#^p^%^sYge;oV9^HsyVu($*kA9oKu` zJ2c0}O~-e8$f_|U*KW+$Fa6T5S3f@P&?xjF%t>e&>;l+9 z()VKv-TiQHyHn%X$A|B|hxRqHg8kGQSgdxj`S6qwpunIE%^`cWIy;%rdu3VJnm!4y zag{$4-o;07gI_B=?#A`#_dN(LoT#%$4QVrFDx);f|FRtOw16%z!+cGzaSzWPI)T|o zQtHq7DS_5k;(!QPqQ2YMn4Q?uzIy5T*O2eyb^OBp`6Qn-&%`w{X}+_&U$oDJs9$X( zYvYv{w>&;bTLC;$rtJ-NqmxQ9a0^oO6;{{QsGAc+cI%73^~L(@Kl`)wr7!(vEiKMx zSF;gL2l-c5SL)`?>!ja8F7P1^(0S+$SCAf{_pJZa;8xCtIL-!)G)Hr z*|=4ETkF+XyHWc)YtVANW+sl+*^|Td?zcTuKlGUo)gSrsPuCy&V?S1({K+4!cfRi( zb@k2n*6H&nvK@7GR%>;h_Wf(m)#lnFcvy$u*WiEso;9#QyY9$IG3W)_K(&uOK3eDO z04sa}KREekVxmpvaUkCXBh3%c?t&wDK)dYZ8W)Qw`@MdELsvfAmkQ5mm-V~!QDU#5 zy*qW-={}4S7PyyC^FY#b8+{a#}C@`C_ZXkCe?j^^7}STGzDo$W(tU_Ob~Gab#Ze zmAOnr*<>8suMMQ!w6Bl}I2v3x{dJ&UP92mU2P#NN8Ky_~$?)hU^_2C>s!#GWd+nQ% zQm^o&G9XI9hw{(0*#qqtFe&&%6GF?C@3L#Dyhc0K@t5`q31we>WBnQ*m``XA&RV;iFzVR)_~29Q zx2N>uWy;KVuzAi(bn7EkcJ$tpto$#-g}->u6&ex{!$wvfEm() zC$m%ub@PjV%KH|VgZt(Wo2e)1%@hCCMN9G9gug4N0NDE^#2+2Ecyz!KjD z9qRzK>@w6HA8R%Nn7VWl9wMi1k$LM5-n^H;`knM6|D$V@K511FmfbCrTE>UV)f#u< zBH!%15uIoL@`pNW8(pkTf+z7G_V+hy5dJ%V=2T72P9_gm?Yl5@dTP8Lc;LZ${KFrv zCw}n#^@l(C>H5g~KTr?9>4BP_8LQ!;{@U(rWKh=K5RZ=FIeirE19`YpoRc>6S8`B4 z`Br>8S>HkYJ?b`yU#Ly#+uq1dEp_9yT3WhU2k2}kp^Z%*r*`&rs?*KJeq9io&LJ71 zEh@AuZL@f6@A_(-_Kyn~S5`J^oH%D@Mzf;}#sqFvcj>b_X#jqRt}|3!;k|xlxQKX- z)-KEmugY`XBWZY<$tmWT^1UKA@^t(l9xcpVy!zT7Gfmi?{FKMUl}D{_611c7Ns|NR z!832YPkvraq#Ti+XJIyabSKYkP@jMGZ3BRCuEe)`%3BlPMXE=1L)AVKgi{AwH8HvHyRt*x%XJNUckMHfcc z?`j!sS3m15zXOXf<*(_s#t(+KIO5N@KIKQ;i&yKJINA*14s`srxE7HRaZc2{=RR_5 zJoS3=X>~GXD<0LDYV>PqmOcZh)# z1oZQ6$0w{G2YO(bdJOhm>wm|a=RSwxIxj%sO9~hGelp25#z*3iF@ozi#XT}RBgTq? ztxzh~DxV*a8U*mzn4moqC8vf#EF?{r#&Hk`Q_&dLBk#$t86;t9>0_XCFFY+y@&%5D z#FjQi=K$5fB=UKi=e$ZWQIRc72DzQ6#X~zbf*3a$`|eI%tZ%EEI3OS5w>YLZj6BUg zd3RrV7}afN3200ORy1tAa=Q^*-kjkj=+i*uN`8dPKY5?HT{?*S9=`qKdgpTsMocL8 zD9AJfpmB0&NAe|O^KsIGPElsn5pKg%9O;zV&^$IYS|9%4C+hA8A15VvKH7p`ag-#d z0-uf^tKY>ld58xc8}q+${T7ONtA61Zf1$qm)i2?YPu9%LBu9CL;b?zC;Hy*8{sdtFe-+wfj|NUwH;Bn{e|3uf?F6Pi7^0)$qe?8Yeqc zwmZ%QhxXvwcg~7)z^NaHa(L8^8=O!YdUjBXp*S&tp}}bfzjV`?c80M^qB$6b(a41W zc2GMjRei_ZI{(l3fhXd>56XVehmS>~a zqO$T;Mxk(5{)-23He`cO#ma;7scN%|CvYlGo;Xz(&Yh`KCr;LbSMRPr{)hi?eeS3K zSUvIK_t&ZEX?XT}z4FqF^=tq1^YuUdjsLN}_N8yu7)6{R-?KRTrzUWcpp%ZC-(7M5 zUl`7uJzdAAkB6qxML=93=OhhrAiiuE*`RQ6e{6g*4Tz2PPCCsF(Bgh`dSzw3o_hM3 zS_3b$G_q}^*w|C9&9Bi~yh}$4u{;|(;NSN;fp$14(+qYetR z0a5w5>3(i0xLbIf3>|I^Bygugl>5L8(Hv-X<$(>Of#Kom8^)0vn}i=m)8RV5uw1Xb zcCBvQT&ner4RjX#PTBHHXLpNSIQS7hzd`9Bi4#TIz&rH<*4Q`n7?K4)*&)|#g?t}a zse|}(RHixC%7yWoKF;j#RFEpkizXp~X_R>nWiK601R7-Y*gc&XaI1aLO)#INO?Ea! zfs-snle6ySePvKb+AXH@oiD;Kox4zj(0pTQxehkBYjb&}R^}ILjNE6(M(Xn1>6)NE zN2J|cS&eRSi!dj1jE;`i^z1ARf!R8w;jp=TPz%WG{^&rxvb0nyI&7meb!=*`PF=pI zCg#r8#uhS9gH2~qePLzo81-n>^Pm#e2e^U;E1iR@6%?=3jaqXWoZmzcR6zC#AYlPSCoFZ3FHo~`4_&Y;EvcGCF`tA+WM`mHa1txlajQI9_INVWmi z!L=h`9R1Q+Td&Xmvwx9oGvx#aG%Ugi02j_q*V`X|2)u9BPG^}a9!Bps9@EdDQt_SC z!#!9C_bP1RcX*31VdtmuJsxBR`I86w!x^Q-%X{+EUih3(xrlGdNkb=9Z~Wb>w{Ya} zwfONiWebDwnSpT|zJ%9w6h^3%VdO3B*Z{ir_^Fz^{2*}#YLCXi5&UWAH;`C@g(v?s zAdv5L`d|wk3~HY};O#DY(@A4;nsBIR;Qv+h_wz42U*CH6rP}Kssj-u1Y6m>LGQWT> zqmepDBL;mfYrp#PbClho0Wy$nqOO^(27oqFhj%k{nwyqyN@Ji6XaN^#KR?dFw+ zfbxCn-F#fbl@&YZlp~)r_^#0mgGN7Cd2AgqcDyUAV$1;mj@S*~HQVn9 z?7@)(Sq&F$#(FDn|6m6>k(SL?>N7hz&uy*cYbRhvw(Y=^hiF(?MtpJe&NvjZXqfoH zW=Ooeqc}hDqYZO%p@V5#>)Vm*Hd$+_Id>ws*rMU>L`F4vc#}*l+q}Z3Tr&{JnU(%9k$PUgQ4ZV1 zH`(6Vst&fu9ldOWs4p`igZSEE>(trY&5HjU3rqFN_2pVv>R^AsXLtg=!;KwPnIR8P zq|vX+0%qmF$?bNYNr>nqU^DR$<+)|H_0->T;*&Zr_~9BH1~IK8$blK#zYc(mxx)+o z!?2St3IFbk*XzX>pRcz(`sRE#XX^MAbk-I(zr3-oywaCJQFk#@_+@8M7hJ`rLv(F* zCfWvv?Mwz7P!e9_olM71^Y2>PCi9Z2T|MrkH(&9Oo-utW0Ft=v#Zfj|+;$`%pT%QW z?rjHp^wK0B-*dJOopx{BUWZWf=EGt6o~^T68`A31>cfG(%B{TAh0npv42~pIi-nV^ZzihA zEAnz@8~JroZ(kc!!XH?Le>dRWBkpJC=IZLz3$$%#>YjVIge}PoA2~L~AFjLXniOj!Zv}9It;x+1}r# z75rdZHf_Anl>)q4PpTw6)Z0#K>fH;Qa74brqk4W~f;O;1NPhaJ>Ymts@{%{5*x^nK z&prP%`VU_j-)@@rpXDjXsRQ+wO~R8DAU~LwI;G1l;iF!sXYNHL`e(_>OfSD4`Z%e; z1nLR-(>m*Gxf7i}V)Gl@**Z$UUlzpAB{dn z@fD>IjWGDpUDh;Js=T3z>3ZebjrxVp|7snhEiyPVNxS1jCYAV^NIgF%1v-%LcajgB zN24YL0K}7HBzXMVrs^6T{_+snc0ehA9LfUkn;vNC^xZ?#ZhdTf z`W;{S!p%qZ=>|r?XSs6UJ}G%j=5+T)pfx`5B1Pd9dDHi?iSj3Yj(&&(|N0(#2W^7j zE|2}NrM(Vz@vQ_`=pZ`X^{|BC^iQ`vmZ*x5p6NM>tpgO%>DeSM&mgdtw|mKXR9SQ&cshOGqzgcWT zUEG#DZ5|g$pzBv~vfDWys1+s`{iuPS%JXOs7?5-~NW10a^qGnuOI&BtfO%gF-5Mn(N?Wy|Bdi{3_$ z>7zT~IEWr{5_fC>@fz=UbxdGGk067}1Kgfxd6xvYpD8i$1Onh8h85*#JC5pO^>kn4 z$GRAqBYkfiO{eC@AMl_zLtdsK$Xt_-e6v$Xlm++Z$C3Bqi9`D({8^+(#oH$y)eAz_ z#T?0`Qzk&%)}iWQ5k-V9KGm_ojrumaOL>+?@)Y`^Da7D3_@jhIFYpn2+WcO1X*=-C z#I51b+MOQKuP2`YU-|+#wV}$rl@}$=hgOEk%l3~sSq3-Wd#0T#?s+miwv5wunD!-f z1b6z{^0hFy<5w`PUbJ0JeCynN8?ov$b+iDE_8Bi8)j!;5$34ZJ7JP_T& z2@_k=z}%CRAGzA!lt~?0dSD|v>y&y?hH)Cdd059*UgJ5F+P~1=n_e9%ErKV48&8|J zVJ$rNDcMLL;k7Jb_ag5At!c#-`cv4#Jl(pQ$L(<$)0mvX^oug+PDTXfG(vFJk|rA$ z7iE%`$*s4t3GB&-zw~e2hFRTg8RXy1NB7ERDIuDzpt*`eX&OHvz(_hGd)?8=-yMabLc}CG%8>4 zS$sxi*?!Xj_!rruUi&-SG`^gq>h3^;b;P?P+sV&AUt0&rGBBBka-|>Pr#H2I_P6>; zynHGHJLRWYQfK8K9QpYwb;8utTGSLIa{BzZ0zl5Z=be1q4rP;Y>nI%P=DqkJZ9{`zO4E?kMHoSUClGcYFi($#W8+aziCCgW`S#JC=& zwSJ_R?~l0)hLhOT&DwA$vf$^sumqpRfqe(;{nV1DZ9)62^NoK8`g!g*Ekw;X{(JeT z`|K-#KU=<>P7;v#q;s;s;Nao4{&zik;&Uj-^8uQl0BvSD?Nk}004pZ|&Ctr^*%~>s zQ1(b*To4HJC|v1y;gyQj%7+1iAWq&(*sMMvFA*bddL7~+E{Aa~&ul{|8Jzo;hs3fG zL?OB7H*s61og%J+O#+S{1IM@v0k7iNLzi>{dvA1Jr(p&}?b)luqC{U37-Hs>K-SCn zT^K1&d%#QXfx`6UWm5A<9PV!`0Z;H3<(zt$A9oPTdj-Mzw|fENxeHgLRm-R4+rrY( z5$5CGxYEv(@>Q&Rv!4+i)+5Q>w;!@JI$wM+2UINUS8(klm$xJtO?EFHFq|&roi*` z^bE9wLOZQfL;=^ikZ)~-#NeSE?dUz|HX4K>vrIF%flL$E0j9_c#?e7i(}c%`QP}~= zII8fIo!@S0CeH|-4a^-H+U^`+BgX;i3dIz=`* zoxNpR2QJ;6z}e^nV*~Z<(=XO%f9SLI&i8*jFxA0!t!}K=t%Y0l+_TTr&-~1vudjUd zo4jj>?cC-B+t7ikz?y~!^{`eBghp(;^fRuzBJ??9UzznJ-6##RbLUQF#s8(*6ZNT2 z{$Tx;zw(#rtv~n?8o+bazqMaCt}WDyFFsqp{`p_3U-*ZAm&WKujgK6wle1GalqYHu ze8}ezCLKzv@Tx28Y;c}Fd9qHQIbE|fko{)t_Li$i_Ajl68vs8AzO0qhmDtL$bxk8Fa;mfHud#xXI1X7&c?{h167C4S zvqK2cGN93vO&quWo&A~_pRBowA=Rsqz3m`l41JG^5 zXKrWe{XThb@Vu?Musn4XfQ3B5FW6Q>@vugD(PTBcPo0Dfa*~i8d3Jz};#j&pr?c(F zsY&EL6mf0yyuP$l*P-2OHx}yF@+$D+ZY1(Y}oGRh=;qNvFzd18bzH}La2#`URJ)x{zEi$( z^M*jw@&2(oJB{-@GD?k*)Ah~T!YOVYz(+MXF;ml1Gd18q)bMb&|KEi6EAaN>{$?#8 zo48)pH#t=^r!Ulra~JE_ke|7bKZH?0n_r-OTJ3B_NSF#iSEa@3(jU5mrto2Os(hF_ zsmt^!e1RPiZ}CCoi$({J#Zg=b!@7ryshA&L#qeoT%JAqQjifggA~ z<+klL9sJDpmDus1i%K=Q6@Bt`mxqBheqK%o*7X0lPXn<{00`_(B{8D`@(o_ByO}iG z9)Hk(??oxkp^3iC=1L`VfTp-9eS4&DFTPi*IJ0z#*raejodTES@=irAAL( zs@j3SX`t4w^lGa!nt!DtLAKDP!7yQMkYP6y0OAihvC;RyAv+u$?xo{xgGQ*|rG4e3 z>h+ia0RQw!L_t*MGCY2wrca%(6%^g|h1E=88XO+2Q5v3m+gr8p?2{ziMz4Tp@}DAn zc<87G;g`qX@#cE_J07eXH(o(^;=h2O_(9$Bc!*9AlwPB+D-gL=pE%hhKB*ECIYGdT z%YWoP@xAx#@+x7i(PA3jpqDdvP6*fy?Ts%eM)6``K%fjri+t}X6R~gPA*GX*M?96O@4F`uZu)F7=?_T-s>SO5I@tL_gb^dCNU%H!o-Re)p zhqn6hc?!cp2IJIE4sd0g zXza+ogJIZr&N{1|e8y=P-f=7EP0m&LcL`f1^xEi2x17cozIO9^z4X#+^_I84 zqb85f)(bDZR;!&A%0S0Jiwtyex^ho{S}aHtJ2IRg;EH{D2MrX?$bcWdgewLk4^%mm zH@naA>4>PV?x()5y|$3f{wsG~1@A-ozO)0`${3Xno)9{@F#cU8h=QvFcr6?Y-|SMJ z1OL1F2;h8cYqK_~yDOD5U`D(h!d+Q)Yh|_O7gy@q!eYICbFCJ)fEOCsRV4f#dT{C? z&5R{SSLuhsrqN644#I@5r+;#lmr0k|@u|DK0OY(&JBqM|la|h_&t@>{1TXkNJg=>- zQ|3_oix*yarM~vn->IMcvCr1rnKSt=^7_hp+AL{<0;?;+^T`W%JH8hDYsYWvFcEY7 zQdTd_tKkVV4*Z+dB(Xoj)#7^v%!GOL!jmQeO~)?iOYq}q7}wS= z;6y>R4jalRKeNV@H`bjroDH6K_QDjN#wX}F@Qh^ejBi62(Yo=4uhq*xpM9^6@`%60 z`{>5@FJ|)2Anh!LwJ>Svzj`8e*~;^Vh;6d8?Hg#Xe?=PcWKOBGW#6{*EhPCW+rT39 zT1)#d`Uj8|{Y%<0;d1#M-d|WQs_)TxF3zx3e(6Qlq{@Lg2 z>)-emzSvG|`RJIRrb2H3tCM)vkdF?qUt8X&*I&Gz-DM`n+i6!|L+&8uwzK*eC+1GnFo17k zXZ4$H-?WZ+`PVj#!p?2D#UZ=da#sQhePxc`rZ@}P$Dp^C{ z!(9hf1{p#>RIwCbkt}~jMK(|*Q{6L@f4AJ;gKJ# zuN~^~F*pa(sjlybzk*&B_mCu$-=BZ}dH4ugq3flE`Gs!sDDq}rWSw`1?%V|Zd=}QM zE*)w<**NW(w)OgOO-)VKrE}-%9dCQ69(l_Hb@#pZ)ES-!2zRxdEF9Em%SjuS=19k+ z{K0ecnY{1HvEhUoRVj2of0oI9p30&;y0cZI$gpkGyDnZxzPsC-tu%d0Xedaktadaw zv=4fPc(|#HP&XZ?k zwg2H~>!1Jf=j+zOO>op7|3f+T^A`(?H`5p8gu8HNYb6@R`Zcde7sJ)`vd! z!TR`TKVA1$bf4iQ3=E+)HUab4>zf|A3qLDBCirts$k>F}H>sn$6uKLa?U+JN#?nSpk9E+$`bP3)n+(=y5gkaIA4C$i zpZd95hBy^h`dqHucL2^$Ph@}|o#Xew`6Z2$H~k#UFi)sp=rai3^Jx_q3Fv>I3$(-X z9T}&5^+LEYGARGa=IUctuR>>n)nob+(z5Nwi~L|O)H~*t{v`OVcQ~M?FSLiw3}4%! zgKT6gcx+8?5ZV0WS1TFtynGu*Pq`M%zx^NH1V+w&d~@*X`{*vy>AL`jy4+07+xNm5 z-vd~*ui_$d7+vSluF8n{D?j4IkoY|Wqe;8+(*`YvhV085%wnEQfVT`p)hzYzl<^_n zXSW}4<#>jAP5o_o+L<=FqQR=4CjGP}me=%#^z@zVz?DEh+2HQAPRMoVhxF4BM|-Rd z(4W>%p|kX5(-WR}lB$(zv4vNHjIGSeRi|A`lUulR!YsM-Dpr@bAFgx$#~ z4Y$Hc_QI%cA+CM5;(%Nemc9pQA>XE7k-rQe`zKw%QCqQY9fTcDgicNrB5LX-Lo^*v zLCL}TH2v*FfA4y<_VC*7&A^NAs6T$_0r6W~R$>8zqO0FURADhyOjGXRyIds*rN|4S zQ@JPq_>vZ(4EF54Bqe@!>xvv(ALWd82KPtUQgcjt+YIE~^FP)7@<*#n<85Wb_l9K` zJL!_Pius6{Zn>ruRx&hPTE^!Tm--e~7nm$izKVxyMdbs+a?eflLicEWUP4PfiJy8= z)FHYqiL8A4@BVayQV$Q|x|3(>lK99w5z~*UUf_NDv-mU2&m{TzLnjcq8`W|9xF;r} z+njvrCtPry(820B+l%H%NSKV{>r5YXXtDUWl zTE!M9hnB)i9%Y~S2h5R!I|`oPO_Z>&kl_Y)_!NTj_oYg_RXbQB>mjXWSu&9 z0Y73O3*@Y#pUbpfnNUoCi(H*Fm3|&{hdkh-yZwF2gI4RC8?~~sSXVF1)WonlnRm4L zvttl6MI!(ka47@f2eD~5{70O|mBB1&8?MuF0*^b<%WT5Gq}8Sx!@Q2N8x1_AEXeyF zp|!lqNkWr0x`_wytIYA(`YqK*g5;ID5H6QkmWS!om%y&wYUBv~2zK$W{;bmado9OU~#HWeFI&x;LNW!=9)kg-y&f|5KXu+9$-d z%@TOf6})cx37*+@3~DQS)JZA($fA7+?f_t4e#SjhXL*yK$X#+GU+D?n_3NyRk-9K* z!>#c09Ats#1UESkt&?v;bM>}`wX9l}#E%Ycac{>%&O+NvT6FMV9frNukKtY)QhpD7 zTw!V8w+}`>wjI;%HI3>2l7Dg~zuXgNz<4}!7WckTK$SF7+;b5idtT+H$d9LQsPRYvw6SeS$oK~)H z8uEKcP5dM*W8T7SymYi$HdKP)O)u|mSG;f zQyd#E3+6cwmSy?fa1g~)!@aM?q44zn2<*+c@n~1xQ>G4a|Uac2jc%^>s zZ~Qevk0QKwAUk9B#N2G%bJx{+%VUq$w%_wib@e!q983Fp=cJ#t6ks|`# zj-;VZ|No9&zTvxe*S&U+-io^0;XPP->Ha``juzH~LpkA@vpEmJOipF24Xzy3#MF2l zpP9yZ4$vr~v5C@?-*$Is80pk;+Gw;9!cXuc-|Luq&GxMFJVog+y9KHW)D>s@D9+vr zwrrh+LmZ_}T#c|DGd6mOm=0pMqO|O)?X3ers(xI;hjt!!xE?IfC>7^(@3##I`uS1(?w zGqWe_(;xn5{mh^L_v?vI{9p~h@7t@LdhuIN*4KacEA_3ff2F?gE5A~&Kle;cPY%_I zm{=_l((XRC&z{2L!dXXgOVHb$W-HHMJLunq_K zPXjGHH$awcpxZw?%Z(Zt)sT>vgZVa@+(v9_c&sL=gL$-K_yMLYR^wht$Q2#41KoEh z#|@qiC~FHxBU}Gd)+XUwyzhV`x9=5hZ9yC=`Ofm-S!ZNRC*Saj;&?|ob+j-$^G|%q zS9W;xwt=!^cXWIb{V)Vf>Kz(^qoXx6Hl7J9ZqL`**vcwPw`W~iUJD;M5bG>Lb(;hJ zezL(%F9(PQvt=i6lDD!ZJu;)a+t3#FGyr61aRR-CQ(LNhwuUTKP@dIW`6C?(qW3dd zhA`qA$Hun}qSo+GUXUxw^0P6`Hnv8I+Un>YFr+qjGc|uI;8G?_sd50*vNQIql^1-ytgTT6Fss+ zVf(;wh}_-S?9{r>9gTvqxifX*@;x;?Jq!GUwdQIZkfGku?hHc@59<{;=~I z{5lyR6SY9iz}Qf2t?$$~zw=#qW3cYI=St0Mu z068_`N`S-q^e5gIhjFvBNFDab2fi_0DhxJ)Q}NdIVY+WkRshHH}~8F`sr{o|m8<@dJ4bTW?q z!?7Bz3s)Yh{^1#W)N5~jGgNc}K!d?-h=)KqXlEJX--7&fgKdMvgSWMDf+A{c{QAA- z#kGz4+Sk8POIzS@e6GglPS%Z;^;+(1*CG1ZT}Z|!C&2S^t-StXc0U-#Hfm!>l%e5c z+#l2je&F$X=%GtBKYtzZ+yNwD5GCNk`dDvai_P>9uJ)5teHb4K;PeNHnde@kFFY^q zlid4mxboQoa|E~Orud}RThtQXY<0WpKPQg(_c`&BTe5C;#*Qw10(=HL!Jy|TzMB(P zCea7_fPh@f%PY09v4Ksr&gfL?J&ul0S6q4Iv6?!6nG79JT@PORDNk!+{o0CSJ38&4 zX?jrJ%Vf34FLpxTAA67v#~`v*NZAa$hVecFI@GDH9CdOYHe+9Z!45zNm>NH(g791P z7I~#Z4jDI70od@#$(b6TnyEE>ke6Scual=v$3MP4e=Gij_;3}UaB6ef z_S)dUs&h?;aJOU3&d$g?Oyi1TKXD*ln=a~)f8C4k0F!d0gpHi&NqX>1>A{hmqYhSX!6O~yWfS?_A%6#ScBrH8S_eFFDckS-udH1QO8r$&`Oec=h66G`pGjqBXiNi zHn;#b_zKn%wd^jS##6rSFp+Pfr#Mo!Ao5EX7 z<&(_JI*Hn=9KMzZywQ#&K5@Gsv}X>VqZ9Hzk`$Z-9}+wVvcS^;#OQ(POm+e(v@#7k$rBct2E6a5py(}s!PqwIIV3@#Yry&z5Kk-Ra^>#?W=A( zRa_=6csc4fE>B?;;M3-)Ljzlbx7kj2v7qfz12sM|i9hIo_F%27uca+>eC8y6(`XH& z3+Hbx*0;X-t@`E9f4=_u-})cxU;W1KQucn0yUn?iD%D+-8L%mKY+$%Xfo}-7l_lq3Uw24_mpS!h~I(Vrp<#Wqz=Q_BUjvttoC^~{S)da{s_~bRpv7gRj!h9 zXBH=>*d8JbAZ$ijAM?wEUh*-$EAHG0L-~#WbgXTeufM7sZ#m(Jdm-HR8677ZKqYiq zR|jlvkq3EGzP|eLS^P`n&`;XQ2^rWYQRoH(r2j^8^Jsl~yzk+wv?WQCD>}t`Sik5z z+A*2*ku=t((6ya(5ZjLRo9nJvmKXHr-9b#ONE7=PwmLi6J?z%nGO|7fZd*Gbt3`n) z`bVDAr=?8cbz+SZIpQN3m%8+}8Ma=&w;oQMu$}jvZ$C*q{o~=pcOt^D<*R?;H?Qqp zo>OnZf8Zl4_Pi(5_O63|KJ8;e*J&^B?{3!I^h7=MrUyu)T>?j2e)go3d>t4GVr4p& zOVDE-qym3B7A-JxnESxs*?z6HaOIC{Pv#q}Z#-J~?fl5lINC+=aEO9G1P6VzHx?IH zkdJ?z-7yaFo6ldof=!d|lmV`LXCE}!$6mN|#vpk-aQ8h0d(wWv&#)NzQ{R9pihFrL zu)SliWKy6|OwrJ?6CT0ig%6(U=4AuvQu}12TiVOm9w*hs6CqD`^jJktZK6-+=NHOv=zg2F z#?Suj->es3#D|4nPn|l04m*x~jMloJn$*rsP4YaH6?W>E5#-^@`MLV&C*EHl{`d#$ z!G|8Mu>;yIU-`}Yg@5=D>HwlZUB8>({$|};zE+nmpQ}IgnUB{0`7i(F`eT3eC+fYQ z`cR#}>kR(-QXSoVsa9V6UUgo7xehm1Y4ReC^%?+|N9ftw>tJK47e3oD zb;Q1(NL(cj{*Dt7!E3hVh9|%V6>mQ2AiBlo_b~F}4na<;0a5XVR3q9n`@|d+wvK7@ zM6XB_C;hmSM{A>-PG0ca`}$Km_^j?;ie^gO);9|u+g~m{36$TIUv;m$589rwKOp`w zkosPH+fI}_T6@`(M`*f-Z|cNO>ta5p&0sOFdplKSkeC~NI_njmllsWB!s=j@dNulq zaO0}OvvQVvdwvEH^5?%i)01xl?Eej~br0dR!>grLZ^Ps26|ef~yf!cUORP*}DS1&# z;L8Fg%C`KpX}a&kfmstoYJ5uJpt9LDfpqqzSr>Kh;4rXD&+vJZF;hh+6F2P`^3@jf zCiyy{f?$$RE?Cr#9?ByKp99l}@Cil4K8hdXXy#rWI2;fEfZ7f>b>4aJB zp}NgattjmQ3t1OF-@B`$_v&woCr@z8nLc*w!)g8fTvjIbiU7(XcdptL`#7lw{)(K? z@*s@YmK8jZfpr(Ryap@nwC)W*z+$hRI!%u^uyM}|Vejcb`8BlZ@}&3eN*rav!%WG& z_wElKf-gPHGtYsK6v$&MM_fz4Mz5U4rHtg~HHo^JXCj4<#&2+@i$FP$ulQ@S0xm7m zy2_KuTU#KFu!pDatI9ZGVT-#_}k!3 zpFDZ#LuS{=zyL_{T~)-#;m!cB)$+~j)mgrkaf;(pqxI&8?yZl1_=)=Lr$3d60T-`asvYdb4DW`?cL;su zp!hy*>h!50C)vFOK5;>s;}h)%swjE*Bzk|S8)!Ev_ zpL4v*db*3-X#C#It%DjKAIeUy8*A%zxV={QTs>R;ZzR6H1Av6reI|zfi~b3TID7QqY;WVk#D(zi%5&nF7iIS* z0QA!O%3?!X(-6>LB%a~w7WmVN21!SF>kBdC7B2^v@NRh7{JbX&`OKt@_K>s5taY}G zlo~njogVT@+IxYq*+kn^UezDk$%Y<(NR#^X__cdECjJ9eg8};N?rbB&+q7M_YXkp9 z`;!R(j+@{^YlC&KG6t$WCQspP^il5sQzq1qeemI73GKcGHJEc+XTl=Cw{hNZtMArM z+oA0_lk>>Wv6Kw@%OBoD*bMyl*`IHFj%%M2!R!a&p1SsBasa&LK)*WxB(96NkhKi% z69=EZUmr4q{MtkFA&q4@iNM8KgYA7k_f7_IB7l8nd3VeS0FRx{1b{Z+qoZO63y4D@ z*_mwxkgZhG(-p~??IS_SAsyiuMU{osl~ZmbL#|D_b{~bt83r)idj-m)7ccK5j`zkP zKj1M!d+&@9?(@FKRJ`pH$FzxS;_lH5k4l}xXfi_Ldh)~Jui_}rwhmMBhdE_-pbS1N z!XK4?VkeQBy9-0kR=1u&wEDDnskeCGLHGl>k708K;{*)gX^v+@$0_IP^&JJ7LDL zga3AYx{TS5h7%9N%&o!29ZX@~{IoK?CLs(AO{KG)w!*GPQdu}#Yjblq+mr9$jDP%z zPuJZKy@NCwIx+!&4pbqr$*TubPZ^oF&XfbSdwczLYhkhe-v9g$>g(V9O5TkQIe6D! z$4{Kd_ObWhcVE5j@we8*?d=0h2+nieK|HI7``h_;kK2l*UGKosjvhY3C*U`{NVq)OIyK}kgp)fRXK6D$ zGElQL#OxGs#-Rh|4&Iw)a7Z4&80y>tNWYH0e|F49VH1v_Q^BPnM1AwEUhtoRd<>&Z zFQ40qZ^!0-@-hwT2xDp|vr5-8cD7KwG%%e6GK>-RTi1RA(hhmIj#b&ZecAyHGV#*B z!`SL3e4gn{2Oc|uZehR&`P<&xt_}*_6(b+}_$TX4k3S){YIkk5mN(XGWqGas+2=oB z|IvT^m&ub?9I}|-QgmD1VOJc}nAJ!qLpBh^b3Sh&zQJV5hU~cE!H4h-qC}6+Ol830 z{E3tG-gmsS{F21Qp2Zm}68B}Lw>pa7G={9!Ga;(0gtm4>) z-h05X$(d(LleSy%!p8m%*S*>Vt}Sq|t5Y1~Oj$ZwGQI^1A%WIOQ5F1bZy!Dqx9}AB zbp~mhA&n#txw;0O06f}{e6MiaEtH4j?CD6RfkpNiEP;>hL|I(8RX1)eW>SES=P~%& z?KGV^?zh-I+@e$&Q0HWDA5pbIdIT;g4ERL6orvNEJ|nL2hm>L9>8DU}m`@AfRJwDF zjdklHc9olKGfSAdd}zR}NxP^pq4tY?1z0&o_`>&7mbsKfrs_7>8@<>FiSR zlHYDz>e9n`9DkpaM+=i^jYi_c;=Ad&PubwOl@ER`1E5>JHs-9rAbAaA&%o`0cpR&< z_q~NYhU;((9^O<>>Py+Fr_4(`^}jlUMkRT9`s~W>=tkt|*dFD=m_1lKTQmr1AisS5 zT7B`$U#UZ6Xk_YSjn19OZUC!0dlWx_jTx+&*}33(<;HV0H8Wj(=z>XX;|R|3DD@v1 zJgA@iiBHz)vop1@aGf$-IfL?qm2C7M=tocktk-QF-gI$~3;88p!a1#z{OtwI@lK4I z(758$_g;+PjrcZgdu!gVKvr(j>Ck3z@s1z)+lw+4+9Wl9S*MY`oo(⁢Lll$LD72@;wjKg@@l#gD0;N=Bg^_fbAQ^KB$KT zL7CCM+mHv`_zez5NgK-$wycJc*wRnk3E+`KuF6vIfF8C&hkM}5TX8}G`dJ4DJEV4S z!4(L|PxLS8L698`b|!^B+*@;Pls3r0B0Eb45w;`X%9g9rhIhuXlhRfHmNksr9aHC# zx8>N0;dlJ~9KhbrVLFgIt`^_esGV&6d#pA(o8`9nd(dTkY_f*zm;v?|Uw*yTw>ov@ z%H6e(AHD@%-AYs)k->CyzJuu6H|0wkj$pZkaE#*KM=uEDx=^>XOo>R=D;n@uL+Bg?$Avr8Xw8>6x5H^}X52CrmQTiAtl z;1OPVq=Ow^?QCSLBB!5@F6nqbQF+zzJXQUQX!+X%`H! zUg^~2eLD!_U!N&HHSfezbPVN0PjY%~ZO?WzUDCyuI*PL7I9C*&K6$*BZmm+UoqF;4 z=W5~Rjry@4`BY7xI$MkL*CHoQ-qLRv#lLVhjec}iV91Wb2;Du~(RjY;sm41A;7I3x zY^c0weC4#|;gP(E9{QS&gjx9=XsM(2M!jsJreuuKo_*qK9=&Vu8rj*m5l-lE+guaRf_Y>k8X z)*oiUhPflg4tRD)fa_~JwRm%lwg<4lo3>xKiKonirq;e0K1RNiv7c7I**)`k-EALV3GCZDo0dR+&4<>Pl9smlZO66=aKa><=w|Ny z1dxL}RxXo3$jCt%>nZHUb%Mm|+IlT7FV{TnId@kPHdpxS|2b(OldhG2Y=CVDcQa6_ zIZIGz30(E zi?u;}+iiWP#{4wZ066G?e;aTz326c!nQ}C_WgPSwGaa zm7kn4juXF;H*DYVn0+bR^`^%jMqrQCj)U2Ku1d3igLXZBi&?eF;B9h{zHrma@OX-` z1%VAn$1hK$@%;|A+J3i#gFfVHr@r>p@78yoda*wKkx$jVZ?dnA_OrVJ?%5AyI&3gE znFt^Gu^-p=mH5Dh+ZK}neFoR^yps&`36b?q{rbQ9&H5jI_GfEx@p?@kpRIG}&SZf` z2RYfMu4zm5n}6Fa_VC=v>Kg&z#rfL4{!(q+dbtj^m(Ypp+1mM- zpHbLfhdp*69bpICHDJF7?b$v~lx`ia_Ud)(5)nqx+-(h?mvt*zIUyo;`X# ziTu^{ynQ1MASi6S=b8a{-en*Q_`e0VK*5f9cY1qIF2L-|lPg-`k9L~p8Rdi~e=)_*cH4e*ekcn!{g zWzcUKlTI8P2d;2#vk%eOaf_alZPLOLQvv(^!%pYVXnUE>EYDTi|IIxEiU9?-}mxqhq&YN zhAd+bt(kNe_reTN%H3_*AdluhlOF(P;FEG~C&C)U6%Xp(6bwkMe)b>gdzp=cc=AI} zo_p|HzsMcB#YjaUlDpgO?RL8!)=Z=f#PS@KXd!&B! zQ=h31J@Nk9J>067UwyvTmo{o+i+1yUhM=9QLo_)Jd26rB3+C=X1fq@yzD?rowNv8{!LYjyU-RGplpeM~y- zsygimnDx;ki=JJ&YPRrfTKTuxvXlUnrw+xH8 zorgT^wfD|j?|m=NL~k0W8^&AXwDd_B`scMW&Q0TC1;MvnnUF_#PF<3TK+4z3wDzF! zU-R*my_Vj%2`4}EZE=0}>dRJUc8gI4p`SKXA4dhy`uxmDj-w0*vAg-kSR^lCGH?53 zu=CkDDSaRKFr5wV9g`Z#Zk#UIOMfWz%VH61R1XgxjXv5P?XCs2GQ3(hZW7N!h|-72 zeX`&bM#H?5cG2_ErG|9z41ocGqv){rzPF~J4 zGMWK>_}u~jCbPT??>I?DAD1&}eP>OYPc+b-03a+GYqs8rD~w0i`rnfY0Cm2%?dt87 z{!~tZwZWhxsSzLIoyg406Dvz?RjRFHm-G^kikmJ7ke9_PKdqrao!pMx^Nh`TewB`!tR_cUSOiUbm;pW0p$o%gh>V=K5+K(S7F)D)Ems*=PJHN z$0qYF@1OeVKUEVm=b|KI(BweUi|Z8OJ@oI$8dy5cvjHC{M5Dq!muy0v=WzUrm)4tu_h z103yWb+CERd+r^{%EvoFDX)jWcF2bY>Hxgq5cx^hi+ATReao8&;@Ei7#g~McRKfI` z?^f}~)c`ZI(={_S9fjx0`aKk$TTW`6@J&hDI^wOPH74u4b8BP$T+7kOtJhqeku8rA zQnw%6+1rE$cBnY0Z-;}%P^TUPi_+Z3pvP$;jfS^1VC0|X;M#D}0HLnpbT^sT`B7nM z@LC6nr|`MLx^=Wc|8#C}hf&xAH#<9r^@o1s$LsD#Y~UWN*I#*w zAL{?}|NiR+=WwxvgRSn$4ohR$7U;I*i=l8x+%Qs%HLswKU+^SnQ=R@NY zlQWq>u&;9g3LHG&`{K90$f; z_wXHXh;`*#$46OKmr53!MwYCS|dq!j5sF*#YoG}hc&-9gzYXg4r6T8Be4sL9jq znU}%k!u&!$*^n)tZA8g8{D~)RpA){chw^oBqs$R5oosAa3FSm^-3nV=2an?4`taCR zP(dT`B%Lz=Y{hsswslp8(|{(=3?3to-bbb=xaZI~K$*c156U`3elzosGB#G%sR4Wk z&p3HwXT4J|JpFVX+uo~D@O^P=t}Y!vRejX8&()~P5b4Hn^rdlt0WMc+?f~y5NZ6om ztI}ZQsLMD%|s^P?#+0XKb1>uoQ>7! zQI60!+$Jh|;dkD(_uP|p?}#%?yxx1Lawjg~k$Ink{FaZ&+o`V55$l!eZ!dH62zV_$ z!Rpbhn52x%>cb&Xf65BRx zc1jGvM~C&=jm5e(e=|G4Jp85yGP!7Eq`$_{8+PDMj~}nU^|$^8Nn1luzL^~BC*EfL znLv2?cuXOa0-89Qmf>(mqtg^d*gNA zlLsYr`IWMa2a@Agh#yje@z~?q3B?WTkR4bUJ^d_qk7sSaG~ej!b}=xt2fm*{2FPc?6uVuuKo4mtFP8~ zo_RLB-9I^3{Sy=Q^7SS38K95&%{dw#$it1-UaYN^Yv`BJ@PglU9)xKoMxocpVSV<8 zJ^~#FYmr9gG5s#<8E7aMb=x}K#a#=vw!wj&8vJQ{GZBx-*&#%j$-gz|x;jaNoKnBB(=1PbS@~@49(Xj4MMD&I84zO(r~2%&koa>=d8@z7KwF zY-|74(I?8k`cQp6IYqwQ*VfkvUoSuPV}0kBS85&K!Bs@VL!;RqS>F2YcV4K?tH2^X4v`KAqmQCIlQ6`;ylAO3=2u|w)&;>u56^#$611$X!e4uH90 z-$8@xHy1M)c>er3XzGNpt(2wz<);AroWKsY#Q3hNa#w;4Y;gzQr9=42+D0v{Y-Y!u z)l~=lH*0>$9e6smw2AMqgZ~0)dqCCs@t&>4GTb%HNQldN%M|loi zJ$Vydt8NPYpr!HxJ@gI5lZO-4(izIV>4YPnksxn*E_Rr(p5B(uehNXnx(Z=rY_Nv) zyLtB;U;a+rd*NI?_RvGMs!ozuf!|L)S+JcGPJVE!Nel2VJQ+}^7qkfM@{^TIJby&3W+F=IWYmVN#F7CRz z{rBa;eQWc1wD-OA(c`2^xx_JUi?1CsPB$Oos0ZH&i|>_B?TPVxj{WGyd1JiLD(Ul| zpLFO2FCOX^@NGYUc>$K`l8=96W#5K6Asxx$m-pJB`00bYwMctZn!D29RqkK^<~Qq~ z{NvBpul%cD%clS)CnxHzd+w^cuH0R-Gt)IUa~yl%iYMAmc4ARTCTatRa_J#Hn{SAf zL${9k4OV1x0I<=NwYZM#`TcMC7hZBTrvntG*Pj>~b;7FcJ@O}fJD+1}6LOSc2U8sY z-#%D}cl{Jdd#~KuVQOc3+cLb-Fsm8~cfzt^4aeZ=uuX3EQCQN^41Rcn4?=r2&-%Rj zN%)YoU8A3<2Wlw3=G-j4IC9gQP~>5mmXWeei>!EfuRZa3ANZWuLQM2mJA3v5Y&($~c-4Ilj=Bq$pKCjP`b_kOGP{Fb zgJ_|RM~~ll9p14nwvBuI*XjXp`KN9oOj{$phSBLyJ^j5r+h%tHzU>%42h($F5;8@-N;Qcjq@Y117TipnUuflq6XWy zIDrlVLAVA@=+t;um(sHJkJ==x#&11+*29Z%<@&~ZJ5tdsQ(SR3`fdhBImAU;gSBYh&|R z{X75e=W2THJh{@|1y4K3b0*+vV&rNLUOIHuh#1FYW<5} z`Iq%~|K867f2T&rrZPw(pS$vZdU87QG&wZ}-wxK?_)Iqk<+>Z7zN77X?SN@AU?Hn0 zgVa;oAdO>h!pq<)K8O=JoxlQ!)(2R_!W?bznHmnZg$(k${M+gQ_N*t7?f>j;;hzbQ z;)0{`4VjU-?>v*W8F8r3JO&2wZB=~YPQEq)<+K5I?$J^9i-@zx7wP4HKRuxTq!Ln( z@q#)v-C%tZ)gsKAc-pnrC!jtJ1_X>Sbsf?sI~czepZMu}p*-^=^Vm}d?p@N70et8* zkljliC?u+FAlj248+ZqS8#-n1pYnK@);>BB(o<%;x1DZ$i*BC>=&uoUy0WrsrnZBX zJ2^pGsi%22JT|7|26*B~*a(MKiId3y@&kBDTUVIOn`cfZ6a?q;NuLWv8vh8N6Ld3C zgfiu$UYkYT5Zwuq(wBc=TI2zTLMX+Z@6(3`--#*;mVJ%jF9)quBGwDBXn%H?P#*t-nSRpJ7JB?$N(HIOVbLo zd0Hp=!Sv$B`ty{+8%*Zg+Lz{~UG<(9Jy2k@9{h4s503P25NO=!cO4wyw#=sQ8xDmr zedyi%q-}7?Y20j85ABVsuVdck>AT#MZS#lpwQLWyI39ts3uyEN;jR2TcsEQ8v^=@D zq4fs4-%Dry#t|3#fe!eqR|iL(=s%uS*81Q3hfWUL0WL7C56pW%L6lu$h6ihEa*SZ? z*%toS<~ly-X6@vDwYJtbY8PA7*;=U!XXokz?|XOs#AkoJPG3BQTG*|%)dldT&O|=9 z#hd=#QDA|6fel!x(;+(aXk{tBPv7`-jZaO3$KkYXhk@0;_hEFWe7lJcVZW?9>)C(o zCx+UN9@sl{_!h8j(LTTb%DEcAj%UI-`Rc2tzeAdcDzT_sz;}*qroE|@^Rh>ec5#BG z_s}^3?VW6^|K;GULA}Ko?R7T}SCeFC3gWi(jgMO1zHet6gY^_HU$y7n(RjlfsSgvW zN9B1X!*|dqI$Z{95!-L-G`0}xFu+w?=3@H9aR+^wwC*VvQPF`1r zl3#kWS)P}kWYWzsYlNG~FjuEoD|N~j*eEYCO~1~`2-+|ITzJC9x( zLw4Um$sjnz8fM8NrJFB7%~-bgJ>}knr{&QO^6Az|=UQe?=TM3zlr(PL&=^I+w)}-x z#u53+)BL;D(Lo9>&5I!OiGk{ddq&7j%ptzhkPO_!p^VTc3_%cQ;3xBJg+Vk=QVB^6 zy>S3U%y<)+a#}!OrJOi7DoF7}LmtP?ZB%rkheyWi?70i|@gMnPyc&t3Y)+bG*<$3g%dWSMlQPjtEL9CdOPbloraW? z`g6^!I}BwyN8Hl3YoxM`A(^O163>oY;vO6Ft%J^;tf00Nyw|~?eAsx1CIJr`-3Av; zP8(rd+rXR8EIO^_d_3 z;kx+XTg`>%ow~L#U&}X^>#zQw{)_sj|Lea<{vPx zl;JF98v}t+o~APWz~KtGb2L`Z&7G)^y!(CiU;n@U+q&@BTga5==#8a%`WxS_-}vYM zYh8c!dGz%@xZkeTTdTGB+O^uELGG#pWpCT9XOIb8H}t}xqPW+XQrq`GnQ@T(IHNeJ1-n$E27|^`0fNCf3I~m(t$QK?yASbtUlPKg0Ehm zua}lqYXo`#wxoexIxOTJns@bWW3RwwnHs}1qNE@C!oT1(daC7o^YgN zHaUBAon-Y%|93Zano~KBjDwHZA+m;&#A!6v z4y>)MWJT@t^i)j@BO^H43-Ips8?VC zF{BRR>ST`2!A@;s``1YK%;HVxdsrhU&eqJOD>cFO2)XJYFL5T-ZRq1PLSqMl0qE|a z6I7%@p*?H%xp(qwMG5=2>Q4@Xd+!_omT%12FzesmdF|Ok3Br}I#^U}!7sA{zMaLVg z5yn;9)FS5;x9!&OmZw{ks~hD2UPq_;-LyEYWa=zbR|jVYfP4Z#8~|tZl0J^Qv+1gQ z3(nHH?;oTTbbr2Why%L=ZdccL>-lG1sG+f;dgt5U9(^pNyBET9UVQah4Z2cs;&>fHcf4?Y0eL`n^j9668ScYYyjc5A)IwJ{S$5nB zYVZc6s=3+G`s61*1YG->W$xB{*#SU6;3rwCAxU_FPaKLH?xOaH5IqrHXM7SQTy7wq z&(tGw$I+vMbdlGRH}89$XKMGXZjqa$rSH0Ng8%3u%LgsyEj^>d)J?qS*^(SE9U2)+ z=cKyI?~FR2vPy$Y`@6lp5i&Z6a_Q>Tx^mxxHGbtPihB$@iOzA*2On_2E#A;qcqq}4 z>NY18*eRBZ0*f69y&bYr|4jb0X1oV4!GSBq)QchwJ)I6FaGeec;#)_&S>maS9W=wn zipM_a5&lnx@u@swlZcnjVCpJdPNcEpJu4pIlTrMvAwT~^IwvH$)ql1y)KA794x;~@ z$Qd34#Fdq`T3=fWbu-B_1Lh7sZ6WU~nJ{Q4UuS*0mRGk>qu}HKUk6(?aeS_xedd{Z z=J|zs^JDje#)-Od{U+)Cl+sSEuB>B|9aykhy$PV$7JQU#SF3R^Sw&B_;fCIBKP&A_ zq|C@;o=yyPCE^bG>0_;|uHyS2)rpg{V^6cAp;Alg8F$NtP_h1F~$vV`J<%Kw>2!A-S0VUv9$10TLeI2FDlz z7?OJ-cn}a{%a&{-+cLtkHQAC{L(ko(&pgyTd^Oko{r>CwPB(^opX{%8?Y-At!@J&T zz3-ZK&95xi{L(_c<9}yy5uT@AU0SdCwXIqKKkLAwt>Cu*Vp?i?3PanisPU=1qtX`M zri*+hxsf~B+WHZc&(C>7e7W8i(LZEh8t59Sfg1sH2YV%Wfo1zMMq3vy1*YaAM0=eenyQuOIxOKbnD*fzbi<@U7S;cF3Dn zn>*h&w?5n}n@xVLyZr1M>)Mrh;~NA~gD?5ETbI;Y!h$PxmUZi;6BqG&lO6E^SD0g0 zB_~2jYt-JZ8UBPKl}6r7a)sA> z?S`Rc~}3(4xrI$k>L4ctx#HOE%r`dVPg*DJ%5m(P>8XiufVIu8Ov`mw-Uc!tU!0-o?T za-?l!9@-MG>RsAhbi25=MVgqLMmOG!A81>!(IIbihlBXagm#uI?9vYdPUTx|G`N6O9MJlan`w$NKPjkJsamJ(`^=_MnGw+NnzB!FLXB(zmAm zQFOUTmv*!|>`$GH)eY9P^Pc|IM^5j$z`-IL*i5*h(MIna_Mt z2CO3|&(+xEVd#R54XC^L7cP{s#!dw2M}~GfyY(+aL?!OK_LI6 zd>@o;^mo&dUA@(dpL!84Lwlb%>OW*qD+SpZ8V2_L^?KuzkJrG=aCKI065SQ~ZC9bt z1Y&5)Y%SeZw$~+)*t^*|MSGz zLsoq6P6zVS9=zs??o9HNXB=e4R@PtD&u;yOec;aB1@M!R$2&I{>tB5Q z4IYpdJb;7jTDjB7r`5FMoW!pGKEQK63x%H1y&iz?wZR%)vCZHkto_)A>aT%5CtZUN zaM4zOi+yk)Uy_dlJ|QRR2Kt53=N2dZ$+;qrL%bnwCWnznbP@cK{xbA~I^2Ft?fstJ zq+HsDyLEPJ6Xg+iIx-L6!vpAf^T`BPo~2dbgy!lbd9MdgM38%J5I<>{U~L#)NcX{E zeLnOl<)q&rwt@Juj}BdA8Qc>@`6QJmxxXuOhsYCn)rsL5^#S;{%-$s9$f-OB{Nl>C zaF_a!JZ)6kN8(v;+h6rsU?#3OPW#Q(c20hmybibtM`BT7xA)dxNlB7PoypHQ_P1nm zf$^zdZ<0j#nbH6YdYQ51NH1YD?-m{(S6T>5pZK88;N{TZFm@mGhv(t4z|ud24kCPC zc`<%>&|cp?1M<+oNdw**cYwA%Ta;6d^{#&MqI~ll8JXZ6emu7BfqY3>2eui!gYkgJ zNpG~j>PX7aAI>Bwe5ph7Wgk}u^s{4>JE08&hm*_fyBbZpE$ne84fF%^0PhDW-(3^3 zI}PFT3W=JmNCWszoLCk&?W8iUNEY$p&MP7Oq=1K*RA<8j4Xce0)8DDh(%R3a>pUCp z^z1nKB*lbKCNw}elJf3*kMz+s&fy`2w4b~sZ<2RUNB4L$JfgnvS++>t+$R&#%2YW> zM?NmN5=NQLl6n)s62tKtUg_^ChiS_xt1YII$AJOaktek&>?aHOy?5m6sjOHjaiFY4 zM#)zkw6c&n`QFt>*3H6$2+F4p>Xi)e5!c`z9ve2yL)heN%OjkV7u|TTp2FY}n7T4= z#V8UM#CIQrkEmJ}722Px4};CHO?#l6yY;?{_R@ZkXvF?f+T4**WQV$oEC=4zd&;o> zl@JP$MTwt`52mX_!FRH$?bdotPK{<_8D=nf;zy4T*7)g@_2gThs1JX~2Z3Rue&;h^ z!S~ee+{0cTClB;9ddj{5?X^R@*mc0RvxaYqU%di8XHTE4DeSd9WWbez_Jw5Prn^&t zPsc@+5A#%~3h}m|Lf&(c%zi#uI6Z=`babjn4Q-nb?=CpnQNg=)RFR?7Xex+BeUY0c zA$N%pd22W)A8DaZ;gxosyVA&G?j9!ndcI4KUcEzpM5Mds6*q>Nr`L>M$-_R&s|n^A z*vy+>vNV75AbpF1oC-gudEK4tDgP(3XG;@~lS!;7!Q#+*<{_u7(Rebky+UjG)cnPP zVV0e7VHq5lvJ*88dh`U9Yw~S8;zxeSrYqml!2TL#FYT}V$@3nzmOl0B>KgI(@Ur>m0M_&7Z5mi&G0D$yaFcgCTj`BZ?X@RAQj+$}Hp3Q1RgL|2Ag ze3lo5Q~JrHybs(S=54tCM9aU`r>^zC`;EsxN`rb@!$3nSMk%$-Ks?VHRSLBVK;q@w zg#ab6#&hOlL#)86kbD-nR}n`JiJSZ&O17HrGO|&;m&rWrtO}#`o|AVst{~sl=7vk5 z#=s#cM_XO&A>+t+U4~44l+z1CnYD14nYW}jhcJZicoN<}cvzNpc}?q%?l*Zn0!{-k$rRZKd`WFX=>`zl5Q_%U_QxXQOS#;5D`Z+x;IdE&j4%v%N5 z@-#%bibpF;Daluy^o(TQs&e@ zV z-i<1cCr+jN$(WNZmvW|9I$%&F`h&l|Td)$8Y z{Mqw$>g4G!6EKaaL=QelT$V1 zcdsUf!Qmja->p?>ed)zl>IOPT=Wt|X5Ep5}H?y-cBQ_o$7b+-7AAdSnZ{Z>%h} zmHaq%8%xXXyIhxFz7#|MI7ZSv$4=A~M$*CRMppihi3f~f`P~&~u5#K{&r&BR=hjJy_&QQ2FWz4xhmYd>P6Lm;3_KX(cE*4Sa6xV0@zB!xP*cM) zS%NZJ{o2uVhGFfw_s;NKvBb-F}L}5hnC&sDmyW2{;3&803M6Pk#b?U`~<$DD;#?@d` zZ^#F&-EAv9e0J;X&8>s_`cvP`s_OT==N+-Vc95}Abd&r$JvCjQ{;glH>o31dIp}Qo zzyaqS?9Y#U_?>m(-V=50(o5ibhjs-xQoeQKhc2<>iXwRuKl~5hdRo_Z-)spv5*$HK zV<$zz8;-nhD`Z<|L*&D2H;>Sud*8xZp0O>v&k-nb;w{&PC*ON2)7H=O+cgu(cKT~_ z`b3Q$K3V%q>sblrHp;GMX&rt<58a@qIs=+D!%xW} zRAvI7pf`N9)myvBT!U6Ni%LMZNHd4VLQ`vOm~!ru@5GsHA989;J7yiIGGizHC|s0G zY-PfB#7;EAu<_GDN@42mrc(7Y?#R)eJZ-uddhP;zDf#qq4a}d8@Qj+2$QQj89C|@zZDWxted@Sg%t@W@~nC zs&3qz2NvsfP_7!Zlfu>a4$e8)7blo99DsA8sInwYr6(SEaFoFV?L+c$<%FHvrq#Z3 zfPd4^e$j@A!$ItjjrFa%dh=G@xV2F8$j0r3WzMC#d3&L5&M(y+WN~GUc6MX8Iy=Ax zNwhRx~?|P>&d1N90 zDGifXm5sm|pm@^&?xr)10m4k(DcAVz(*_L}=Fux79XH$vL+iLnA-pu5d6>>JTb?a{ z!`piR1JB~ycP*cWK+E!!2<_TBty+4Ilw>`mi?mMOt$yt^Pbeqdk&kpx+JU96kG>;^ zl);&&27B5fLez;3PjaBJsSo6TUw8E}zq}K6%CfUao67;?*7h}AsSh?Yc^aLEF28Z* zcKyOH{~GeQlTX&o9X*jjVw6oY+sJq9x$voxjMi+tYy%HWrn7F7(^GX7U-$N{JEZrUwCg}_+XlB0--%$Z z+_p0_pK~Lf{A?$xs|DL+eBf5jILF3DYI15aIGvlFu5)LO*S+_hsSkeeTkDU0{~xX= z-}Ge7oIYpfwWpts4Jo%Z8R6)`%dg${;D7Z#2e+}Wy7qW`*FAf;yf>ZEl&Z*1_qAnu zfd1GSX73={q5hHDpq*NW?|<==pMtro4xc;`yI^Ot4gAY*4lpT=+HCfDIl=kjxeM9N zZ^>`t+fLKA*v5)SkQ+^tiGa2X0mmygkR7D(ohrmCH$F2-G(m7fNWz=mQE%gi(O_-v ztklH^&)4*^sp_oXhH|dtZynUqljwp1&pRod^yH!4;Q%q5o^p3~H<1l>i37z%mK6AR zw{Fh^>R^5PUw^K?@P*6u!4G_Ez54@yq7LmW)yC>}{8#}!1P=geSN^ymEUTFl<|G~P z<-PwPzzy1McN=={nHTD({{H_CJPtgMWl@9^2BzR0b-%k}OpH&~gBQ-!5C6&UtMC2( z?{$`8{ga>hx%y{6|Ice><90pqx`*pW{=+{}fA@d>@%pZR?+5GDeUDbHT&`NXTpQP3 zsQtw|wX?QZouzr&R^ZrJp`IONeWUib)@yfT4S#iwIGmKHU!{OMiQRTi+J;{Q9XNAK z_>SN1M&{%JqySsZfo$|ULaC3g|GELJ;hD4zwkPIEM%+iw>O<46sn0?)Z6JI(>E_@} z`W%3z@diW@22SJiFFGO<5g@YZ63*c+C+d7Bf6MLika}@+bR@c=?Xn9@($0Ibj!&Yz z%Xh$N9c}MZuDnMb(r!_NJfzG-$56iVkT`^CPui~eE-NVs5C3-gIq~5K;fqcJciO;C zK5FH6>rEc=r?6=!kqLPvx8&2aSJXk~-@?F&u!#rY0GvQ$zmEbYMc^dd9PR5e-)4hH zB9M7wSud`niQJhNdC1SUKk-{QOrZ^#75SEBdqkK$eS4N6t*MKd=941aAL@7D+=2RG z^!Z5o9QWe;4CQ_cowB>z^6-;ewrR0_v0K$iJAl230qcZ(2i>=M-r4P={n?KUy1UK_ zw!7DD+P<|7c?tav+_p^}^jQa8zld)Niz$M%gWATM*v9tF!t(Ip4lgXjo&UBt_o$0` z?7$CjLc#KRhAPy}Pe|>N#)${u4BTXb0PWR2brNo`ndm8x6Cs2Co`l6-<34@zk%=Z} z(epAHe86AAdGIE9_Bpnp!Rk-_2a^aDVYZFZ&yY4*yQS@2o6YCJJEFB<{+XBWA{NR7 zFmQwyIE5boy6JsreBBNpO9bV_xZJnD6kyrmGYcbc5BWFYK62m6;TFD3Uyt!Qdwo`I za+PhR(J8}tCLl}u)nk(@H_6A08vcYgdfmRpr1rfPf!iDab^J({Rfc>W1~IL2(8HzG z=+)Kw`@hK@qLn&yqjhCn`ZPF#OP+f&>S4aZWSRO~hKknc18fufpZu&YrAH3+JJm7T zNcxh3;Dn~>>FFAuo=9EBCP$zFvYtM@R=yCMJ`)*e^VI1B!~OBKhK7(qect6obTw_j z<_b3QVl7|4T+6qwlW?cr`Q|s*5B;%^)Z)sG+N2E`qWy63WBlBXJrv{t@@z{-Kx1|e zT@qk`&2Wf37cgS$ptX=A0o9Lt~cN1{jWE|W2U~j9|R+s9+ ziDNbFfH-nyAG@D5hCq$(sTZ~fWdkgbh-*n{o5c-L!m7c)I1w8{>`!ES6;)kXG6D4P z(4-=_+=oZGCa?AyJa?bHzZ(t=29mhZIhjxl>Am&7TUKboP2&#&q%^oQfpXkvW`dT+ z!#5UcT^gUO*Bg%tfEp_Cq(KVtk9)#W2Tt2z?Wgp;rLMLM>LTk1ECyKzU@{-^Wa+{L zbtzZ++xPDk`w9H-;J>7;U&n4n2Hh#qu@l=f^J^2QImxrNq2x&3UK}fk{<3VIdt)Ht zQ@q6=#Kvp>EYDq817r^JaIDCZO({qTpAR($8cKiBHhR?v2H z@qwp#K|k^q&zYDZ{=jptJ)GV>UUMH@zk0TXE)9Q9l*j~U>K;87I-9qp_`G|q|GiJ1 z{%C*Sz-bXJ^JE}codj`vox>tVxmJK(`Hv_BAw&Uqohhz7+kqmo)A=fwQUs460|XM; zlqw?QG{&Oh8bFd6puA?L0{Qwb@zM~82rJ!Anby>)Oe$rC+r8O_DT$iSfyPywO#$}K z7AE|pR|xXRyL1E!39!PT8Hyo~bX0RMUV2JaC2d%Mc};$)8joHyE8GG?rgN~tum%Y@+Hi&GiW3636EZ({5bMkdzl3$Ji`-H^*!JH12uj00`XEr@&nT3r;^Cv zTyW&u7RhHD(S?Pj7|wtFZ~WD|dgYZGo${NxZWTL`#>K5h-}06>)dTn6N0|e4AW*9>lS);i-0EW@Y?X(UbIu%w|Lz!`8RQR%ppItYY5OgbG+u-GU4o&^KYHK zxc|S8@F9hX=I|_s-~X^tcCdSLVzj0vCqk!qR=xU;@dkkBUI%R=CV zEW1H)?a;V7n=ei?Wu)VRbaq(UaCY@UWJMZNbR3j)EOc?4j_N2~_!nT@zG`@EG@U^z zW_jGkP=(`cEIW^LkX|(UePi1b@6FfFv=NMo$(h-7qCD}&*VkijdmnjV>~=bJ#g66c zcj|xotADkA^;ds`gw{kjp&ywzi`Pb6hxY)(-HtO?*tyCr-+{A{P0iBHzm7hb3V45U4Hy&s$|&o9=E zE7$6kmtU!Eo#N5)Y~iVsv%f>_HHgK37ncr%*kR_ZUL8&42vegbCMN6r*;6>xq=iRZ z@iK(ty$x>tW|AEP>YBmf!AyX$4$i=r|K!Qc9-~ex>l?KO9M4_4T-Q6>HAI>?KcR$< zU4y0RKCwr7->U1xq38@>iyU>Nbmbm8g(d9}$(55V!XKQ{%FcuinXg0VJHT-Ve!7Db zbbEWN<~i@cmn#^%>)>V?e&|3WcLZ1gpMEmH$p>M0i;I3ySELF;fzA|5zUVOf;-n?DYlT$T!)RhuLIAf&8Nz|Yjb<|aO&_~&wz#Rg(S_k#1 z4I$+75{FydszduS^IFiQ56@i11({HX#=j=_pN6ODqQkkmve|(|>)A7g{Jh6DPN#4T zz?Zul9eBB~Zd|@n3wLhUDEVJJcB)QI&DQ?fdTlJtqm6fKlDhl7e|I);kiuOite>j_ zw#C!HK|C9=aDN?)O;ewd+N8>`K)|cJxJi7e!bh62E=dAV#e3%BYxChY3Q}J;@3FxRcw2_4xhU)llZ}rtvU-??C;@G_H9dE74!_&37z81LMDc~^e zi=Xiinhj$eqg%|+3L4VO!-4$$Zu!YuVby*h z@04L$pPP&*XWC~@7<2I2)jQkh|074v;#f}C&cbT#p$~`QS+`Up8`7aSWI7cXZKFoD zbT`*De#l=lF~Nzm@of_!y7IKLwp{BQEA{!``~6yjsRyQ}Yh-4wZf&g9t(Dc_H=Js-}A#>5Wlqr72QO_0# zE^^Nw@+%6}*@go;Ap6fY$GV9R&frJ;*1VitX;`<)dDl%({UU3$pYnU-ah}ab7{nB5 za>7R@GapYo#8jqEv~$+H-}5(~gGFv3?RLw4n#MurK6rU}a&h2a6rld($&Q~b@R&eb%pj#O*aN^T@5)f+6MixRGN+uJZUH*bgtkdw*~Zs&1xVz z16b9yoURya__GaDziSV1r5({ZleYOztnYM!)ED7%<+ADC)?q!Mi?We!LCHacYjaJu!_};@kU9O_I*G%vkwckeH={yVx!3o1O9-&8r(Y zwA-|=*m{0<^bS0|kHXnQpE+4>bb7MZRyOMEmv7V#w%x@G7i$^Y(18O7l>9V+tRWq; zYK1Z~5rBK!K3CCd$G26wkQAAdw6?RoIIfiNyNj+KwKIDixEvUCCGpy(>33>zb+vBY zUaZ?2+qKNOf<3ZoN9Qg)V3ivh3KkCkh?bl{7h5DhW3(5V0?`|u&^kJYgvx7tA>^dt zlJq7?rsP9dX3T@1IQi-g(-EiXsNA&tNYQ*$!sB;pa}@G9h;DR%7kofx%gI0jd;nVR z%7K2uLG-86(-&K)bsVep_iK1yxTdBj8egCzMC?;9&)1tCy;yI2@=e&ZS8IKBC6k={ zhaB+i0Gj*@M&LKgH18bh2g*pDIeMLo9(K?-`&JynmtBN3+?f|=;x=CO=%o#hN2gP9 zYh?!{=_50?i@=a}n)|@jO-I%o#EV{T_c_$Fz^R<(UU`yUybg@!>A|!02~F}o2{;>% zdQDlv@1+%zc)5>mYe&n&?c2dS^4;31_C7lcspFtfP?R!liyANZKCmQzo?*}o%K99T zDBonke~Z`aN61P}44mniNx}|l6it4CokF$cCf9Q*0^xda@7y@~@_6kJioW*Xqvg+l@aCY}ageNJW(Goer>R zw?G)mXnNcBNgg;qFoEo_g=Q-dayS`PQ1H z4IY_0PNiH)k8YzK-r3PtXm+0dL}Va(6qp0IvZ^F_^sep5lOy=XGht*s8E{6SCw+_P zKktc~Td7RB+9br$Hg+|x-^rFo_YQ{Z=YRPZ3zw#5PM@wZx24w~2RF8t4s82 zJZV9D;tod0x&xlquiu7D2ldlG^M4{oRsYeS`cLZK2j5bi)%n^&sI#h^GSa`Guv1nh zTv;b@nwsuQ8FMf&NTg{8%szr9dD^$-8|x;=k`_}cw$T|NX3w7=xD{yIH9TR-w6 z|3Q7|4}Wi6ICHdq>1RJt|NZ~?7wfs_zFtR@BdK!)xY`U^_F*hM-5I|9f-Af zvv%pA< zM6^6%4lQ>vZmg)LGGrpOEYbIDxhIom;0N(AFlkb#E^R!*nUYCXKcUFgK7bpkzn>e89I~M}QhE!ZURT-Sx zLk(7Si}|XV1+vwqXP2Z-{)pPf5`^BWQ!jHt_wCxBr*7#YB0C$YlHS|twVW-+718?7VaQFUi zf(iKY6t6?WF5>B<&7|FeuUz$?$;slFx;lYr1ABRUR~Y(ZM_YH>z7De5fj_nR+!?6( z#l+*?&W>#WCw{U0oP~a?ed;O<`n|wppOtps(8vV7F6Zz#_hX5#$?pya#!njHc7g!f zE%k}*g$y`(PhGl){VE?rrJz^4Rsw_<7iZ*ufd z225w1W;Ys5UQzCxq@{1CET}i-d1L_|((e{`z7Lb979ji0HK~4gb`iYuZurZ*oqMB_v)u zlsVza#0>NG3V#QcriDM$0}h@#*ss51f8e0<0uu8{Q*h$WlkWC(?D+ATJ313z%0(`g zCGIj7$Gu=T81*?41Wv%U3J)Yn3U%Hd_F{Zjd`c(Dx@)O&;qFj#_nfZ}e9PN2iSsu; z{jae3{In|VeFxoy9`74f_n;rJlg#g+gAa{tjUGNyLsLgYn4XH9&hcFhJ}*-cf)17~}&i5zqp!gp#d(jlcbG zy$xveNJx*4G9=HIlXu8KWC^^eXT8FWX*-N*nv79$D4*d0v*xNEPtE{fl(aKS@@aWBeiK^nEu)nq?tIRB`N=4RXCeY&iiye0*P2GcV&k%(NT>emc^g%(E)+gx@8R8aL36S%zcBT%gTSE`fWDC z`yEdibq?M`WD@H@rCh>14D$$+ zdiaj}?sH?*b_8cCmsAcxysLnLse9jhZU=dgf088)_j;{njRH^EDWALqGy>W3Xb5fb zRp6%aN)wG^-y`_8bNC5 zEWZsTy(YfW$X{!;TcPkbN>AE}dui2Zr-<#L3K71zUy3t5+^vu3I-8=>5w)*%3FAZ*`B2;?U9n-GBdm z^|rUYwT>S@MwtgVMAvbAwll!;DxQEnywG5W0?Es}@B&A}asDMv?>ndV_*r9tjD$lG@!c?UuqOqA}Gy_0&%_sY6* zCx1(nL5MyE&zv}c!7_KMo_O%#`XB$sU#ioOK3KKBS*urGsV{u`U)PsD`xKp|l z4j}Js?q|Zu?dvydWoa4y9InZ+i5higxA@c{6`$bGdZw`fg?{^TV+(l52P_2dIskSC zT{w5T%`!yZH*tP#fTX3y)i4c}4ZX(W(6I8fUw7PwURqGLnfLXN0P;rNURzF}oDO!LJ*8nA4Qq#W@l=)-9J#}{Th!g} z6sN008k;>YlM)`X4RI#4N46#;t%judQ3K=%oSx`Gcq@&X4V-)vJ<)CO;B^914{_OM zxM`5sunT`L3>*Yf;*z&3Fr47x7K!E?eM=e6$~SF{gu6P<&f2Zo>a=Zz%*&27<4j@X zj!aCab4sV~*%x1|o44=OFpky8upOaIZd}!6x{=W)Gg;l$Fh?1pM`QvoxR4%pcn#>_ z8K+AhF%|>Jd(o7{?P8q76Uo4v?Yp^GpCSs}fG238aX)|SMy)QbRkkVSd2?~OuDx`r zCZPT7&}5xEahrw(|O20A_8aX2#d=ci36HZQX%7mYHSis1d-`=b{=$0j% zrNN0Kb?&}LtAFY+{5*=pH0Riv>B^wH6l7I_b%{d+1%Vxq2=gEHf&Qtg^_Fz`8PMeu zkKm^r4Og#uM?UR!3kwcGo*gN{v+sMz)Zpr6MY-yS=-y=6d(nONE+BUKiU+Tm!AV)> z<3yu=%E@FZJGRgV8oF-z>@GIrI3IRyht}xQ7`*Z=IQAOdO?oGbM6z z*TG}O`$yG)0 zyjy^3pDHWp5)yE#C*%w54|63Pc@ZE7ztl5rf{{9>wTUa84fOw=dg|%tFxjEY%;D-A zpRM`T^}4kT4QQ8!$H%}!5pSv1m*!=Pnw%cZCt^m2Y|jsBdUB*5f9$?`!y6tzC%IB& z6`7^I5B@2WG?0zD0bfpng)6x8UpvxPs~d@sD|l^_1msyeeQ}0E+ss2J%xmlX%E>#! zgO)st0&6Uq?09PVI)Izc!;o*nz}0|Ww{{;o#lf9TboM%J+NNziWf*_dwsC5#W)2^& z)926Cv9o7u23@m^W4{7^78W_@m$K!t?J3g8`{Q-$^ciS)H0|7e2Ip;)kTGZ&I_k8j zJFLGQ-FXlH%6HZmTB!#msVf8;#*}q&L|Wox;si(RCxpZF)-8i6)H$Da2_KM#9CT3a zEN+RyEosNoNkcq4A*EMm!!4InaI>3`b65InI|*lR%Q|(1gMglr*S5=B-C%`eb~HXKkGdD%JI$9rHn7^=?v?OIz|tC{Jsni!kLzG$BiQvW* zffCzC2dnL5iH#=Rtq=gXov046-ngV2L1MF`?d29WddR+xKftP z$vZ%`ZY}H312l)96=%=HvF;qwpeOqjmv~N6(SJ-QwQ}K0!fiJ^>}X2H;2zDB{M86N zCa+#+V7n)#g_721&)6L1z*Rk9M~R1Y*WM6c9`d64Nk6F9foZ%sa(Ec0^uNZEJ zLvN+fwi8b%=wNfNzVh_%*LQs2J8EQXvZf~{k(mzmYU{9Y;57L5A9=PbckPlR_Pb>^ z6a-GgGJt@7Fb$x!!ywbr)_J{~Ul%`j+aGN)1z+UmutwguL0z9cdU)&kRZi?`74zBn z-S_YWS(DZd5`tke>F445T%{B^GoJVDt?)ITd$nBYMMa3yh-)JxWO-)@3C?!DyXUvK9YCje|~MZC6E^ttxFd@2HfL5wxrIuPTxz`?cnv=iL8;71$UqxsxjIhQA)$6Cj4+G=yd z8$7H3Rn=nIcx@8B!qDD_PhtMn*{Cm6H9kI7*RS8e&s#`5cSw-FmZg1QNBl6j(D!xH zar_PpJXa=>U3}6Be3iq8r|RB&PuD%?Pu0;Qvo#J5PM$hi$4?$hKZ3SOXKAteY-853 zLBQo6K2NiW!3mk!-gh}9Jdy=23FC--wAIZ~tGX?M%XGw=%St8HB$eBz+7ZTcpD{PfsV zz4`ISYiW5AlcD*&YBlN824gJHvKvjL0+b2AsXLsM)81SE#)tmY>)K7Jh;j(doK!ae z&uwk5)zMS4b>E}+BP+Do+wcVXeeVGMPIk&`b|a+x@BsXvjgvuM>>4D8dbj=(<<^P( z!y^;i4b*05x1RdS^YvSwem4H-fBoP7*&3fd3m%r?ebU08%9k2c+6j5;BB<1kB9U8c z51#E~SX=GX2K=?MyjFkj@BUPM{hQB-70^`LRD+;Y&VSOfL7a8B@_KQoVc1DGsF*3wXfQWd~_)CDjd>mcW)~` zy!KS<2cvz_J`il)YoGK^Y5RP5+rIScnU%fk9bdSamiK`wnp8Xu|pk`L;DBB?6 z&*U?8JMpxMC55^;Qo(~~c$N3|K@6dPq?=(msf74Q#)NC1N8g#ZU<(In^_knGL-nG% ztmz?fmwp2CA`dQ{=4pM^ndm;?p@oPI>neFC0rUpv?VDPsEpyVD1O6LZ`?W$lq@@2UULK_+a| z-O|9Pi_26C_?t&3)4iwhok!j9Z@R8t*D2)>|B! zzDZ-Uz#r@hL;K{C;M%4so9;jY?7~XA-YKT<=Kq?jM-L}n8|;R+Fyr)Ov!x4WTUw&% z%Xzf^bc57wPU>_A8>9^#nnhUnj;^eBaSEB_oqb#rGo$2z&5Pa=ThwF8;v0wY@?^S1) z_9bQ+^x9T0s+(JV?01r)+xz`BIyG0rQ-`wyhk9PUHas#K9VzzHzS}Mk?&7B*Y%0eo zq`Z@}X=v-7<40?fc0LPd&?!0$es+i(+BV{P%QX`}EkE)@5W)Eud5}+i(|wV6(w?g~ zgoWZ(fishxc$VtQkP+3R;R2p}Hnq8A(M$~eV3(uyjieG?I(eAd`(7CyHXXSQFZKet zhlg6b(!yFEhBbZM!de{P2~)-pIbz307CCM6yYJ1j@fgw~pCNeOhv$;M!5UdiPW%^d z0TP^xSK&~mw4aa@Ck?ysuOpwJ*PPfsfF18dJI6I)FUrCOw|wyo9vhs(1~q^$uLuiF zfs3$y;_zP6-S^_OF&s2bI^gjf6xU=sKz{wacOc&h0NnRwVypxH@Qi5>IWFft;UR#! zQeTvIP^1C_B?nCZHGV)R2`fkb8%OxXyLogOEn`OTWOQ(7rTyX}U!PmtyT8zt($xq1 zb*=w>Z#(nRzW#yJRyIzD-#t=^$l!#9mqZ+=hDBqr?mZbxIG^o|^XPp8A*LRJ-Yo#<=k{{_`oh>1vWgcNAE`NUG#n%dojZR}O zD{lNQoaW1;u--+cFuaBZy|;sf$P`6rU@`A*+&s5>S)h+?#Uc0Nj=DAlE^G~MQv_}s zLtuByB;Ex6-eEpAJgtH2!V+c$HeTi)xXr9>p@HCd53H@M23t?~LMqSveD8pqO07A( z4K?Q11k%oRg~^ir7C0gK%l2fv^V>pe8(a0RZ+U+`^w_(N#c4X>>o!cmCl{^`2)2iY z1KU4U@U81N>rgV$~m0W?Qy+C6))iK{~*kDEf)(gD`_)xmXk@;3E2eg2-h?}@h% zrx`&GtX{{^{nP*De^Jjp|4PCf>`{@Aj*XKiP;%OtBi?NM2H`SHJAljkmE)&Dxi|Hps!pVl|O@|D_LS*_0UMwBBSeyuOB z)=SU7m`MP3!W=$)xMs#CC=VkWV_V1Gi3_cBq0wGPaSa@}f;#d`m>uWaKf^tTW`r70x^Ov}4H00+H3o%)JTjws@=~qq z8CB@L>9}g_xZUrLL?crj<_xr?f%2dfE7vtT0ZtvIP^{e#^&fpMqJA3K`@x*)MVFv}QsTe{HM>azj@D{xz3W?_gC*{Mf z3`M#+-8fCHOFN(;+y>u!f_N`pBm|Bdc?LHa7s6~A&ZJ#mUalM0uhfA9p(GqdJ}y1~ zJPzS{ofw{|Gl!=O?yEztZXo`|5JotJ-cf((0LiE5SGTTru*Z2ETaP`mwO)i*L$_|?EC{C3{jq2`JQ@B;o0frmy%-Xpj9{u$xo+8;5f zpLn7$;LbpAj}e1_cBq5=A4%X<@90HIPSCg+@-wf&`BJB|+x;`k6B`hD?>U#_cHZq$AE-BWLP!#z0v%kchgCN=tdy|7XTw0Z8Tf`B0!=mzR5{lj}2=;kCp@NAm}71E~Y+@(Vb zK6SSo2RPP{k*lv-$9pGju1<#Ib-o1k@4g@*V+IGpU=^gy08f`F$a|CY(BS#)^@RT+=06`n4&Tl4~ z8XPLFwm{0b`GxC@1D^94B;nmbuTtOEKH5fic?DjxQznCN>LYMw{abqjU&t#&6P{i2 z+C=Uhyvqv0kpXNnXl{pw_KM*d5TyOV_F7wBt99^ZNAuX|IA!hxr>iS#z|xm3MAtj( zwc6>_)tmE_c~B$R8#=Up8`Mq~?U{oixE~$!;*Evab7!z?>;M$}p$TlOj@$pc<)qt` zHe1ZLQd$CLbZbxsPq~82E%e;UK)gsZapJbbo8;|^vUHS?`3Cja#N^okSDWDtIZj^l zhNs4+IfGQ->7C-tsOlGQd!2{rSP#D%N1heGoCzTw`KS-tL>DL2z!Tu!%4U$Bn8rz- z=$qarPO|$MxTn*LF4)RF36)3rmtlaj1BCHh^+0ax&t z#xmiZtb~R}LnGKe`|vZig*zzB-@07i{fEB2j-EPQFFgBA@apbe>UVdDaaF6G0|xbK z?$Kb;W@)yMxKS3m@pATH6A#2o9G}}jxAAQUt(w)k>nrguShmdzQhGQyfAeZj#%=LE z3G-P3>tlMV?~_+zbKh{N{nP5w-sDI+)AKK28%JwW{t`gT_C>3AtCMuF9cW>mtqyG< zL|u_~hd36e-z?R=zuPH{j?WH$;rrLnm^YF#?^Km3Pn-!u!-Fy*92s_&m*4?0lwkkvA`=_!mG`v zZ7z87Uu#!^Ed3!K+>1vJ)5HftpX=*{-o*2*lXAP!8oPSYR>;pC3bsgSE3O<;Tn+1s zqE83lMg3tvzoR`qG(=e7K`x<5_@Y~XuV6LrhX%pFaKal7uxE>ICt`!N?3N;r`Ow9g zX)JHj&)1<_SJCO2*eTt$?FQt%W;MDI>`s7ppx;Fz`cmpP+adLfx@8r=RXE%3PVTtiz&`fBlLyd4(}yQ&dImd` z@|PFy)b7?gm9rm#c2=<9IZ+dh9@*fzr&oZtA&lqX;MK!4X@k8ZO%81|8ILpxI?%S` z@Y$c)nm0EN?6xLc`nYwq{?-V*N85#5{PHJ1SvRjO)!324H8wsAO@s~FqhB5HHRnS+ z$j;VIKJ7L$IaP0X?D1M(UB{kl{X*L2JsA?N^tq_B)Lp^I0j zkIr7dlg^PJc*NqI^pc6K;L)8qKJm-HSvPJkqi-(MkNoNXnmi|JW5aEe@oV8rCjbeW zKxaV)byRQYuWRFMftw+GS3lviOS#tdw?F&2`uTtH^CTh?$1uLzE`I3D>`WaQovQEp z_HU~n_~SoZhqeytFa5>;p?>;b{48*-*Y|z+L-jX*;&0Sj-u1n82)?S##X2;O8p9@YSb9J1D;quGfb``l(cR2Cg-II31dys^GUOtvVWQS| zteq6b$f?{}Zku4%+BVw`+n(q=d=?K|1oP_+mk8|}+!lL$aOS{`e4c@qeSKZxsdJPMYV3d_sx)-U1WDrU)3z!fwpE9q;{ZOM zSm|t^A)?>zlM6Yjxw!VqLp_t6ss6ewp*7SFY7dm#)=|mv7hQ zYq#r_t2gUq!d|>|HSb=$e7P=Pxk>o-x_aX_>6h!~twntArMfl0RP#$~wTRvBzcy8|sTc7_pMCyPCJTJ?*%!FJkjV#M{n|4;KU-h><}=jc zl}u*1HGexkhPrQLbgU-F{mkxICa&7f^bdeX_+Sq|%1>d*G8yoe_ozquM!==K8{0E< zl2f3Av{k>8Mtd*Gc;7xrK|EMW8zW3>2v_+o`6NJDQWtY2&**3y4HcSp40I11MpRcj zFxP9B%%AA$I$^dX`+eGkPe2&}#uRq^WVqngdM}12tDH%rWZu(c{&!Y!J>=)i^ zi=;i`2;5E#w*-*rm4Bv6oY&U7m0!&c;fRcKpaG*7vt@d>|I0G;#g%dG*i4cqpDbhn zVNQJ07e0ji`wjmLybjTRQ-#GM%5tY9jJ@(hx;a{GYMdDVhZ2f z-80%mG4oHKG$^2k8TfYx&gM&y30KEicGg$x*wjFsoI6Y$!gri_;J`ocu~7!8pLn-l z!M??YY3i06>8XquWPOw)@2xA(Jcy3)nmnC?xs@?{hj{U7JSvyT>`7xDjmFs-EHXiP zv5y*F<)@aXaI`!P%RBokSs!$~;?eqgzT+ ze&v6jeCuL6<7tmOD4(_gyr)e`r#JV>mkeDcFPy=f*1k3b?TS7!^>aMTNdaja_FYtg zZvr1=_loE>+HCki;kKOgBUyLxCCnld7`>N1DKmJp>{s7486rG``kVvWd<{m&DX0eT zO%H6Y>Q5N9E6)SsI4AHs2|!$9lczt*$pX|XyJtnuS$=~pvdxX{zE^cYLpr#GRf22Z zOAqLhNpjlqH+P2{zu=`m|GboU5? zl9TtA-O^fFeXxYcmDQIV&q)KNtvo~ z2Tf4cu690rWTxKo<~P=x-}1)V#DHnNI!J94#*onGG2OzaCjZFhFyR zLE+Y64(ymV+e_03Z+4tjzSCrjBD1RA4qA+j4A6s5X92ZunOO%KIU6yRB+quzIJ?>o zUAH!mgTP^DgvwKX)(NunM9{bV%m8_>udQa%MYdm)#?;riyBM@76MU_&u0kUR#J9++ zmFdcBjTRetSF*b*O{F{lZQZ)+=;<@{`geY7aP;Df->mC5FmPYKR6qLP{rUPP1}!hr z*=w0@k(rJdWVm(YP(Eceg~mN)3X@KR_4m8WlGLsD6zhwpPuJ07C+oh87wgCW)?crI z*~7KFuvFJydZ9k?3;&|F7Vp#;WjV{y&U<%MaNGY|w{E2z2j@rO%`u!sfCzr$&;-gR zPx8CFE^I<8SHp+{8*G)atEf8|Hf|Gt;q1vec5D{M(?-zmaH?nJXW%8SD~Y}KW>s$R zY{!AdgdJz%s2v{WnEcGq1VfP%!?Qqztj^TnuENAE(jZJhY*E&`vmK@6UeZ}W>m zfu6_(p7L`wwto~I$?2VnL{t|12qZNRuYOP8?g?>hkoIJeM|ep7oKK0SK)NS!(|S2r)eQcKrw z*4*%TJ$&JOotVM_1g_onjp_rxI&s;cpT?|bheNK(0mhI-rU%# zbpY8y|4*MjTSJpaak#+~JZHx+DkHUz4M8Qcbs}v{d*%&S2TD8+Qr9NWyrP~)@Wyrt zoOo;d$1k)>2v;)-LR)^pCl9{Qt!-c0x>jbJe8D|@)o|NRLZE-szcH1fo4fDLL&31^ zO$@?vRRNvPN3k2a+?-Kxi#$zgzd$OX8Q(MO^3R5=ESXGg`7_K-i1gCA( z#&SiD+o8JS$5+1cwYqZUdfj{RTs`r|hihqR1-gJY_;M5nX?1C_e(oRrZ@?yA;W<=0 zYN#H0c&{2z{F#5d`ma5AW{SN8ova@KsHNlM)Kvs9gSM~J`;uZR$IpvUMsYH&p-QI zy>jz*?GB989(0{Je!5<{exsJq2V=AgI+k__Uw-MOI@DRiDITcNAspkeiA=6pS-M$= z(Usrx;rG|{%ozG)m3FB^agBEXT&09==<>T<8Cu9~oWje!ClE$A`_8srhkJhyjZ8G_ zW1G_c3mIA~4?I*CU-wYm!}Xqr9<1~C-_QGFH8wL-b0<#L z$+PF`!o`Q{;)AcN3->=#_dfU#X)o593-{FM%(1}b&PCd(%1JtHxq4*Mua)KqUgU96 zBkDZiL)#a>sn$9}x5BsTVs)$p_q`pYwuhO#3&O;QZD5N_TWJw?M)4c4?#Suz*a+?P z7&xYlh6l5{pNMIt(7DRG144Fqq$5I|4ms_Fwas-EgM&oc5E~n-yz_Gq%Cv0)HUT!B zt8Ul0+SWUmLkm)VtABBM0Xn*p0jeV>PQV}v@ebA4IQA3xf93fX>&o(GJ@(MWtPpjJ ze)(r}vqN4Rq|-lZw!3y&9C+%SiOl?27f(CbMWtzFNs0{Koouky(Vo#3p$(*7{N*&g z*+g(X6dn%KxA5-O*Iv76eBa2)gU{Xezon7y+kJRmJ!QVqF>!b=j-_ez1aY(l4DY7# z=h2{P@eNByfN|A5oSDR$d-a`zsOn&CGW7u`@5G;S6`1|?uHcbZX*29NO-H?aYKc@% z*qj*RF*!k7055;#tIyW={^5_*$=TVugq<51=6zdnqFv+@VL^`9R=>m}Ox}*25`Jj! z0XDel4H$SB?y23`yEm$Nwxi`KuUk}0#R1I8Q@P2}uCL9bHvnf=;wE|@*-`&Q-jZ97 z$B>WU&aiITl2gQW)3@-~)Yo6$8M&2}awx+vP(W~l#e2(4XDoOyUf@J#q_1%TtEq#Z zUR_$fVvG8myxaR`e;{Yb><;=!K`<`|ltw17`KZ9=`g$EbdA7ds>^JJ?{@Ksh`FrlE z!!vVPWv#56qp&$iSiZ1B(}~#1p5Vl$CA{@}z>k@{MJoAOp0h`qaQV|Vvf(D`4ktH4tvp>UULtuCNz}mfeaC#WsF!7o^bN}x?MY*SZhYEqO{l2!}X!p|~ z_#kVpDsG){P8f24V|Cpf1RM}?rR7GgV_V|p+%4b3fqVOeTp`x`hO&A3DT4J5`E=T@ z1UuNr%2~GIroN-tJx-8!cby4Wq;*%uyAm&h{={oXyY}Q)v`c5{*>I;lCQkK*h$wLC zF8vqUtc!ys+F43!XJd!Dj@7MOYxS|8`Ipdpx<2^+Z?A88-$#O{jpcc35&dhc zBTnrLlF!`kKYdDsWxyBxqdi0Rb^V4rFRj(z{E7cLlMY9QCu($jEQ2_c6Vo+;|NE^U zcz3<|t#7H9pMSpo+W+fs*62iEegB95Q2pfJ{>l2t5B^9E;&0T#i*>Mav$mIR)&ABd z+|@ZQ>M#U1@;9}_?I_10$F zWm6qIie2xjRQ0ff=IS?fs5=fQ@5)L1rJUGl#IfCQV)!^X9UB`X3gvcr&Azzxb+@U; zm(jm-f-?7O`0T!S6>QT>y>=OXAiayNas@X7&{f`ckTN?M(iUVju?KO|M~j`-CMn@R zDsTMczYGdsK{++shd%12ctk%LBZ4PpV$z7Y+ zxx020_UU&xA;eFdqG!jaYaH1dnV6|D^pBH}rlw|*iNp9oM{0a>2H81WW1OSN&&c=` zakwXZlr$rx9~_({?F>FD@kuu^bA&XUgbkz12S>(QKGR3h({t$VIRKhKWxt+hxgKrl zcsE6wam(O-#Lusi?=U{v&?x@r@HqGz0blZQKlF9igS|}LyM24HUcPd*o(2a_7WmSa zzgEBVg|F1_fAt&n!Yi+&owXlWy{MYDe}4xV5dY(&W8mM(NR9UPacEGu(-73*AT$cl zQ<6(h^<$%d7Nr=co4(i9hW>=fcgA(x1=j8IS<6R0Q^$4{GBQMd(qEO`g2I1IURpnP z<8kFuyR(}Qa9FM~MS+B?SDJ3u?#`~Y$WNOPlhVmkyP(~-w!S}j=uK*{k1_S*XCIku zjC`+Msmy3YT8=WNOk|9OYx=9X*FUhP#!njpA2(ZsdI4wpg?Z?`@7b%owRr!UJXsDu z<*|o5@zG*Q2&k|JzZndz=nKmprgBrrorGX_x4e?@hxaX zKHBIr=da{v zv@>B?TsiRB*U7C%Z($Q};tU*c8lRAEHs)cN_KS%{B5`f{dGfVa@!Q(RZW{T*XMWMy zoNc_Z@tg8v;zUgj4IW>$Lq6h6os8cNg_f-lY1mR)UStn>_v&f7jCZ6B;@Li6iP5r372=bjryPS^Xs_{B2UEbTNn;H2Em+z7h#gJ)32RMK@)OByy_nZmi&$@^l2Y?`` z@ig*%DbrbR<-66#TT`p_TwGIf8>bNqHkRgMoDV`tNs5uD9rRn@SguZI6?#!49cl34prZL7?9i}nLu2Gc!%m)c22xh6sW@reUD0P< z{Dw#zAe8Cy@@j2%c!!g=yu3mlyXoZVtZ#CUQDBEKVcC`q<-9@oimMRpq23?jO8K1q zeRiHVp559YZ)IZWpoXU=>K)(qhZ<^d`ftoH)WWSh^vj9a^;%w9fKPE?any$G07C!SNw30H=L}|+KJ`)Y z%U6STSZ!cbFM>-yM}VVN+v!N$fd`eV@yU^_);o4|4tgUu&}eW(2VF-F1H_I2br*E< z+p=q`mOp{fHA?Ef4EW#ilLU*)wN1WT`>x8_s~zg>K!JnJZa?l|fH1p#=K6M9aWdZB zK|u#mT!x3hjrvYJid98)i*QY9KQG~7xVN@9Hu-*Ia&mOMCWc0865Je_LUyMn>)vxG z>aA~iL%sjo-&5c5?eD7({h{~PlW%{l9)9%Rx^Vx|dgze{f&E%7&#yyQ^yTnaYb(H# z6U@|qX##=K#@t3OlES~JFi%1ELVY#x)!)iaWRdFuJeh5Qz=`6lp*7$NJNQo==6j#u z*O~e|9pX~@exCzIb~4CIEzh<)3N(PvmD^zGU>N)W?XFBXSd(K%YI9?+HkPi|rI%l- zI~a#fe3+V=BK;8fYo8hz9BALC+X1o4A=M){^(t*ACv9)zYoQ5$^PUPzm#iM5F3kW? zUkR6d$1}M^k5g|~eC};wAm6-Mx34eOEY9fJljmv|{`ta}e!otQP1FOYPt`G;*>Mqx zEN`!_)|iG8)V6gIF6Yv!PcamX^5o*XHm@9j1&!LlZTC zvuwv*R)XPx#-Zpre$@St&b8&*HgZXaJPfG*j7b75)5b<$b6Rg#(9j0j=Bl5~ix({) z5A~o=G1M9lP#F1;C(+XWrWo}GaVHPHp6hs1FqINXd7od zw}*s}4f7rP(^dHgTzjj}+^I(A$POP@rMv3GRqM9hnfM3&vx^Jz>IxMN+0E@u;tymO z9CA-b($i171>rMwWOl0F_@>9hhb&!b7;2jio%aLx%ur}y0 zCdM%O2p%1o_9Mr&yRlQl1FpzNF4WcVBN;$mfI&ZMKqJ7w)%Z-OB@=A4Lx+i{1Gq!G zXM4-ND?Xlj>gigrEu_4W!^diD_DH>W`C4s|cEoSi507TD+s#X_M0XvcOegZ)f8h-J zW4#vcT&;`ePt*_nvG0Ml4g#;j?ykttcA>7=R`Q=U3u3!+9{!c3Y13N=rn*U6M1CVF zIPyLnn&gvJtGsYEf4d){{Zc>Jfyg~N&lT1>4(i7ZVABqhCfa>=K-&?Y?f)1ct9#$@);jy>lXd*!6Ls{SM{4%$gEe{b zVvQa-TLY6Ps(IGIE3Ex z`;`0G5!&@0@}xYfGw3$E>TPAqj^v$9Cv`ay6)grHGGT=Hwp!}#9Vgnsb6e z+uGVn-MVol@J&xs=Rvo}cBMc3KpH!b7nfFRd3A+69Q3ynq#wJ^0eH&8cXCqU5bdrz zl}t>I*UaI`T3WtUS64bUu-mDN7cbQM`WpIoz1B8XvB_4^D`;N;b#jEAdv=(2sK55u z2zd50UaP?7w)GyGrVdV|(+Q!|wpc~39Qf;CM{E*J^FeVkvi@G1W$sUeNGEZw%xguU zGwh(?+Wb8Ew_&`s;STJfL(<42kG@a`sB>cvw75|&?Jc-(A981(2A6VXnEa@duTAb2&2`Athrj@A%~B%0SIRzc z;)#>Lc;^aS@vU4b*6R1>qg35o-mRn9_22gX_vI59+fFD%Q9E#Bc@AK<4i4&1h*#m# zU(=Dd(?i~h{XyyJND>iEL)7SM=_C&17j-K2P-)%e34`Hf^;O`Aelx#TK16RY_!_#kurA*~ z&#Z33-)|jgl&lVq9<)cy9zt-t+u{(fD!_e`BSd6G707&%&s|KzI3 zJ=zct2a?*P_ok2J8T&2TBopfi?(iSb8V%+yPd43RI)D+b_}1bdxaI5MvCIG6euUPc zLmmBgaO@1rX>ql6LIP3X)qgqd#FSnX^6xmE*d_pWYI^K(^;^es%ns%IZR8zr<&Ixl z@Z~yoI`R9Ngw6}(a)v=@> z(3in>f}uufJCqIe2BHm&cI3)y;#RpJOnU=7`0Rb_ui|R|21w4J6lr7k49Z8Okp-cH zrh@-kT3-mecz2Qj7X08?O-#+y_{0c$cRAbDW>+Y60J<~wWAGde*t^4meTgOk%)k!~VH#=L? zlT#V!pF*!6#vhxRo`Gja;St(7=%l^rZWBYJ$d&?$ouys1kH4PJdUW+)lV6C|)oIat z@L1{rE$mklZspRj*ixi#bt4I925h8r^Z;_`iYNOQwX?`0x;4CQTGBxW2W5AZH}L0Y zg$A*!KL5op)l*-7rUoa5GYIA&jCw)+>Mko#Cfo-BTL z{LOmk{s-zm`cr?rPM>>%3_HAUp8&9Z_LFw4y|EplV$xMv8B~Ay2@m{j^h-ZJ?X5ev z>YxA9Pt@l=_j$rb;niWT``9`Y_>5!q&iB2e9)09>^=rTQiTa7Z|G(DT-|%q#2miqj z)qnNp|5D8yIYIr`YyZlNwN3r{wX=4&uyN@as3NdIkY z2kIqGdDbt|re0fFj?dyCgcF0ESRrj!H`lPaHfnRd1D&wp@TW&cuy4V=lkQis`D`Ob zhNrRijsVoxNf=|KG0le=dA9-qM#BX>TY+ERw zIZ%RatPO!-BA)u~3$dNm|Dqn+47>X8$X@(Ib%v7e`^+1Z#IJH!!c5#jHg>>?KF#v_ zZe6>zRF|&Yu9vU8T-R>js>|1Jwbk*LZq#-BHh0jvvt)nVPIYLTOzR>A^{KiwpFoqp z#4WytlW)={bBg!=AwR z1KvZ|pxgWozh^3Fn?u=Z7u&jh=-rn*$pgKTMLBVJi3=||p*%jbd8mts*AGks=<>i# zUT%9msNa;`EU;BZoIp0k^EmH_J2*l4BiVIeV|xp{EY;;3H|yEwpRcFB{>{36`zp=o zp*ny5R87rJq#Yd^apJI(;hn_n_qNq5{h1gO?GEwPXZkc5;8L$!H}xn#4}I6R%a&~i zu=68A_&GbiP^O>SNINMHMlX}EJcGZj&P{tm8x{XQx|fpV9ou~;7ZI94{CsuH4Oi>mg~^i_(No$$)<^p!Hi-EGuePW3wY}{5Mbc9pXaBWn9Scey zHTqXyYY5vyd9m%8n4G}&n6A-@QTT2go(67=!uEL;eFs6M4D{6=?WOjrEN&S#7q-cR z_(JlJ-%__+ai%WYL8k`i;76V6PNwHho~}n9eYAezmwpXj7c7BSeSC7_-hMN<2L5sv z|8?YR_W1c4!hYC-x15aa4u)G4oc?kbK%j3oHn-qa7v8`tD4$_qheCDh>Xmxp;fpml zj{K~w@E*yAX2bp4sNrTG#a@s7F0$ovHJgE)+Wp`u_7V7&C)IZ%rP&(<_#7HazjlbT z*qy{~d8)fi0R5n0n`{6*9k>Q#dc2Kx!r&?YbJgZZNJ}r@gIEH%7mQfj3ZCU~iCqNm z?4f^z7k?tVOaf>61Ou~vvOLyor1m+VCU>zJeM}MJffHiee#Zw2F50tpl)Pv^XX?V$ zabsoAoncHZtQpLP_iUH;u{kVPxZTy)&p9Z+VoG}U58)#~LdRwLhOz$!pwl377=N(Y z3yw#MA8FTC@{2pZiqC;Xb?Bf_vYqZ{y>UPeZXXu0B()IPs;lqt!!d zA3q^v=^4)sjhardZEd=*uJynFohLp@0jF(uT8FMzP8}JNz-IvOo3PVYTnHb8IB-1Odi?M9Dm`Sn&OB`rk8Q)XGvz6&P|Md>?Ccus-8-*6HOTUz13c^X#W`mN4!b|=( zsM0%~s4^sR?7$SSft{=9Od8^eTRWhn_t5Y}-FNZPdg4v*Cx77Ake45lpp}`cipCt^ zerc(p?s0p5zShy)pa0!2)h9ps@eFdhMZ3yZ3LQ15*EJL>Svbltu)Uu)}2 zRtKbaC-+2D(8LSL&qTlpf&ckQw9MXP^14D>=gVNIP|(7$;!x&&O`VpkdBq96^whL0zsoaZY5| z#y|y=;eCw=`Be7sbal?DQ>W_0-0}Kfe&Vm!1ct-nwX5~g(_gJ$`01am-Q|URs~KRk zs>B@)7H%)p)`kQ42N}$Dm4*X!vUcSYwtq$00IQdOv4WYHo4$iy=EmA340MjP) zwIOya%58>@9zB{)99QA%{H1Qh-z9>d4RC^utM`^MHdfYf9v#H*aBghXlFyrN$B6+0 zA8Y}~syi`|i=BkwtH`!`hGvMk*^U;zk|#7`?YN*>(vB#3=6ZM7%AFL05*SFRCKk%M<_qTpaz3EMFs(UV8gopcT z4d;7peWh;QynzF^mD&4GKmAO7>B~>Sm*lBYs4R1aJt;$8l2;&a_^2mRx$;ju;-@1v{ykEx-&xXgo z^4q^%`|$F`W2fuLC=zwORi zz4GEq^^LE6SxDhmWvuG#>B;)OKlVMfa_2?(W~(Mf?L27Ppot9)eH*jP4Zx;-vCxwqe7$r`S}7Ct{Rj%g=ZzUL_=sKu6DMjjfW- z0QJA^94{Rh;v5;Atbx&4_#S@RMrL&6&A-dv4L0sAM|m-S9q9JTwd=JCos|%Oa2TmcDf;V~zg__>miv6*zDXRF7NapZdxUYx1fBXihSC+p0G3w7?|eRbdc zkJf#!dtE)q`S7EU)%|aLeci+L+#`?HsRv$HNAI~y&mkZ^Q=v zr;gFfp`REaEAtc|IAd)EDM_@642MpkAraF?lZO)&QU1ZLwyhmP`|4C^WG8wb{2-rU zx$q7igWp_rWmdvs)^@PDbCCdyxiSbl#ts|#+Rh&WZ|y4kp{{9jsLfxf#ngYMQLUUl;2(poLAuL38UUVPJT?qm|~Hsv;l zc;WU+bpM!k${@}#IMse|2O@V)l6S0(`dJ%bCr+XeYm>lB(gfVNf+7P6;KUVC9dxrh zy4Yqp>A+5%4A4+p2U3(RKyN&lBNFtQtJITAyT6|9O)8FVG<^ed;S?x!Y6KCzVOZSM^uM4rj_mb-c<{*7W~916($ZXOPjnp_xt_Gni_UVF6iqJ#3j z$!i8Jcn%0;8eTLuOc;15R~uq~6TI$q>N`L9L2wFY;7ccLc&{yM9i)dgnn$w%gr)gj zt#gt5i7)(~2~Q^shkBOZ-9rR&2-mAew+_LJvSv`19(JH5O*fD3rTKK;LBFI0Px%Rh zuYf%|n={GzXM>tH(g04=@oTaz{#rTKIragMvF{aJV_HVy@tpexo4A&z%(K<2#WUZQ zXD`gMNJ2XC)Q+Z0rFY(QpH;V<#)!=x)RT}jega9p>Bk3gD|WY}K6TH1b!*{z{iA>U zGj;C#={kS!dYZ ztCRg7$w&AT-gH~)Mp@=7-!~y|_%qD+?Jy<(yN}9S@BZI7UK{3p_qknqC5qLCKTnS? zrC)Px<@M?(iBX*?GvE;>W%M=TZ}2dJONMEa)6P5>%lgTy4B*;v7`nFpC*@gfWX4WN z>h(wU{Gnz;c*W+j|3Q3EuuwPK#16R@CnxFC*F@f;dI0`vd11rhU)ny54vhr&qr)Td z+1+M6pS~lVHfm%h6h}JJ0p9T6;3YpQ<<&OZ&l}p;zqYLjvTGwiOD*kd~JZnkJi`0W;;*YD#-pr*!_g7&7@RpLNSUT)F|sSS1056Xn&Pg z9@^wk@+DJk^!N3ua7ycbJ4`@a&p&K(<8~u6a*s+?!PV4Oir|bp&mdDRs1=E&NpfqUx zMy;kE=@4&K%Gs~2nZu51GlTE?y$b72^^XvLV6Ud<#_G(OBei|dNnh1^XNCMp1|GHB zf`9P=Ke^kH142$-v2^8q9a~{=U>x2Vs->lk8XcLgzxxmV5w_-5J#g=-`g1?}-%u`P zV>>!=&-z;}2if$YKy+Y~$FvK~vk7)zt*&%Pvx9$lr+)m$|5oa4C;P;hgPYi)(C77! zJXG&~_q*#ezx}W3*{^@IzV}1#uOIudzfkY`!1scWg*sTdRtuM&1;<vu@wtqhJ%;P0uQ>~FB|V*@)*v2c*f&%unt*XklC zeY*I@b|*U=xcdiosrG;aubTf(c5)ES70Bi%Oya?b5W`uaEv|(}okM-BDYQ^eI+&(y z)&^Gcg0$h7)X%GO;UH?WlR!5CO;?HPW;a<`f0og+y=?2#Ut}EgqKD}`eVu!g7@0G& zM*WiwzwCTL92XA`I4MIR1#Yx-`qZ{JwLP~pVO3jsU_jqyJhQ8+ChO zxt3Pfv(^5AgS?!5=;nTW6c?EEQ5NfM8Y0!RI{WS22Z@G=^n}-wSe4kwn zc&YD^F$`#_siaL{cm}7H5U%$1IAASIS$sm7YO400blN`3k%MZ$uMg#fr2!`ih%!~DFO zfV?Qd#L7LXz@7Pb$9aT}OLRQ(Op-}gp_}ChuKe4x0~$=>*&K~;qf^kG@~4M8_vH%S zZMd6EnYU$287|h9d*w%dZ#xBm7l1IDenEUr%2ju(qtq>dhik9St=_$`22bo%!+Xz> z$#(y59*%^vG~*nGruF zfG!{p`6G0s&K4K?KwHai_L;U=SGN$~$!YqT(R=b1zkCjpIHpZm!X{)~S2xz{{Hc?{ z;b(v6tAVHObmQU{BH8}gw=nJ^f`J-4aqy-WZr`VQy!#^G_H|b{WI{**=)vEH)&xMvoT#e8f?6h zaZ(P{%MtdGe1qUOm7TvXNaRwKqXR zP)WKr5ndcvebaYo1)oVTbC6ZJJ5Ba@cQl&YJiea_P&ct>ej&{7B2 z`rrTdQy)c%oVFl4q@x5vnAS1TBe;U6plaNE@1bIH%N{E!k)urc$qc=8QG(n%i(DF5 zytNe_2DpirK>E+J!-O7kZ5UjjTSJ)Ik*DE?wWuH;#K!^Y=3@aZAIp$9JrAl}gRf*C&#FdVxoxQCw zG`{%>Z#Qkb?UvC?-|EnWmU{V8dZ?rm-V;!sQ!ZD98Q2)!H~H`N>8_&ChmR||``Yh`6Aie65ar&I(Q6wLso z%r383R-RK@@5;&hSKoQ$fs1f+FZ}pRL$VF%(o)e-3u7y{HJ}>PhiY_q zDEau>IOab-Hc~T(XX@Cg<28Hyc%41}K%G8+Zyi5zvW^@-QOA#;syUo!Cj__>&=si? z-IZ^45IZYZ=f%~8>o_+{%kyz0=Z+i!1bNbJScYfzE-zx z+(9wgA?~;4FmiARlnp0aMC7_i7QZUdZ1WFr4%%yg=+HSS!|jW0sDmc}QGV>yaND;N z$Bx$NlSgClEzHjox79~dS|^qM+Ik0`U&4975#oib_WaaC3>A3ZSx^neOz4-D=i+of@5R z!23|W_{?+l_x{(vS6}|hOIcJhGK!&yVYiFpFaPS?Mi$@|9iR}qE30X9Xn=k1v&N$& z0JS+00}cWpP7G-9XA_u}zvjbJR``o1U zovgW;*?Q^=zh76c-Ket@lXd#&T-Ew^4S?TKbvyiOhlK5Y9C-~hWdZ(4UC}>#!O>t1 zPmb5(=4xG8S*fqB-l!FDdiKJFdfU4{SQE3S>-w#E$Tf%~Io9|W+R2C64a9@8q(kV5 zE?2g+5!^1d;e+yksTql!p)1fxB(<3BoBWV;jqs6w+ZLpdq6lxNN4AZWOqMBYh^Uke z{z)4W+D2QJxf;noGvL~e7GSGrXRxp*o8;G=Jo7QDt5t^T#TPHtH=lkM9XnI^-gBx>ojH-T zqok8ZI~!01Jb&s}eo5eQ;v4L9$42UhfAHJkkEPn%SR*g=D!l8&DGwcb0BZH)IeL_o z%3Wv?x@Ze(7h0rpVSJy(3-7x3-5j_NR`Zyvpb>{0kKk%M<$2%UcTeq&(P;{6bdOPJrDCZ17Jow1+c>wPb88DNb zm(bg)MZdo+q|p_e+CV-lL#dneP*2FnRBm9@RpN7X;>@`^b>aTH_`28C{g1r9PCxPb zI`zOC>e%@QYxdN|8b5lzMh>5-fvKa_KR#CngKq6d`z)TdQPB&*C8ra%(qT{eH84zh zz~ex_JOmgSYzFS|1^8)v;UQgnyy6Lrkt^WGewQ~6(8oH|9tX&f9kLnhp=djm?JRN? zmMap((Fpd0!oN$Ko6o=y$I`^3ZNM&i((PvLgx>9JQ#QOmG6G&kW51TWMj%_Xf`9@{ z15RWbLdV*XFbq$@Z)yKWvcrxOEQiveI7!)l`oR?t1DVhvtivPN#=z|$yEep@bpVEi zJGapPo8|V*Q!`VU)F|!O*48rtV0EoiD{Cw1@Nkgh$n0F*yuDC2Z{5jGOsngzrgNwR ze%OZg;BI4Mr&d=tYNrEUXxlsM;*hpz&~N=>Gj!!UEBm3D5~s7c#XGkt_plxDv|X`z za{7qi@wLE## zU}_c4bDxg6WkeQ)n|mVLUbpAgW@l9~&*~Css$Ue|#m3x$?)N(J=FIuDH-0)R@=Gyy z9emCv)50tqhBccYVZ!gkAu1rP;+KgFD`XUoMCHu&?$PoR{)Rg;N|wUkc%&iSxF+^d zi@rz8hTC*%q+Kutc_}~L7}6%>F~FO<4;+?h=8=CcARup2J`8BSgzIwVz3_1~yyiCb z$%=jnLF%+AS&7{6a(B0ZE{)7XJefy3Ng17HiQ-M(z~&M!ylgAP_G=JX9Ud8~4eU9a z4kz%u@y$=x(!z56Pk-gF;h!F@qqE1`;6JvPeo>EZ!=fsOJSjJ~(B z0!|vkyKtMZi9+)uf=ZEWx-oC;UkZ1i0wvLiQr=+-gXv8`>| ze++>?@hAUeeem1Av)=V>-&Svb?+5D1cf6yXc=KE8;=_;C*?S+Tqo*#^+{rVX=kTdd z)%4uy8k;#@Bc6v(*3|Lyb>z&&I(F{CI(^Thb?$-J*ZBvy#|O55MZR``X?I(?h%aC$ zC$oq#ZI)zh}!~EnEf3&;4S6QdthG{R@IeZAapaVWP0i#o6 zQ$uy%1LtaOYq^$J?$Bm#gom`}9XM1+n1}Y0gPd7u4J`h&IkvE)+$~Ni*;wDH<;9Kq zr$7Jc+FUuPH@@-V`XfL1eYfGUo9+P z|L^qGul&lV>hqub-Ne)1_LFx*z&JSES0Bc&|J6_ZQeDH|{l4%0j`}NqXw-FiH$QAhmw3 zpn^$xEq*8jl3x12a-Q-1Y@GCHeP5zJi9%0tntSu(SzgH2_~>u@gw)|?lu6duaN@}o z(rOL;7wsN>HBNPwX&<|M z9fPl$d+*LrWp;M~EQJ#eaR>15ip!O8?U z-EgrB?d`gsMnG5gyNk)7pDROmAv>;yzjpI>-MGC(mC(gj4L|t+e`Hs_r{4L@*})K8 z#Cys%-Y)*)9zG3P0?@=a_uyp{o7Y{f21H<}=RpmNci!!EI(6@Mz=kZnKu-CY2@he4owvj>#X zMv4ZzSIHindc(`C=5WQw%PjVd@stfswU#y*GG@DW!#yN>(uVYg zL0j{dU#v6Nvi4 z9!C#*-~1E6pd#s0r)(#Om`h*aAgc_lbEFvMO&yEoLg%<6pZt~GH|%qb?rS{lWLxd% z0sE4;+V&Zy%hwic$cLQ$lm{Bz_TLEr@}{f$vm1_l+FFC$lbpoD)0LY;-Btg>4P4<@ z-iV9vbKJo8Zqbvh0Wr`w^aWJOSOsz6j%Xxld$ zY(U|+gh@6KMjRXaFpL6dUN!`TTW#yn90cyeAchl_XSW#)b0*gWlHTy%seqEeGwD?J zB9)SZ6p`skQWeOa!bT?DbPf4kCNW*gX>|mu)W@U2B;3O2m8aAc6PUo&e=03>$|@5 zJL?RNTxWBm?%cV9)P|xBuO8AB7`7 zldb%ofq2ds*`#-sT?Pfx@gI9Y+`Z3}dw5XgXv0_3!)^02`^r#haYB>dGtE>tFxYXX`KgrN5H8^bQBJ-3m!3 zFiKtC>*6sAA3W$#gT>%I-`(MA1LL5|;Uh1`udkX zS0De_$LjFtV2wz3>b$$Phl9LW%Q(r69&L7`GjuJ-4xcD#4yqM8*s(5-ouTZuZqj@Y z2iOi?w=2@Q*SXuo(BDQ+tlLSC%`rYXUMG$ohQCkINDWnIZJqq29gP<_vP_*zSN|`s zZPXU&T}9}&U7nldvxYpZZf({A;j29FQg#Oq#!dkniQvbo!4vZ45HcfNjysKyPEmn@zF^`(V+`S};>&W-t6xHAtAtYW0JfsfAS8cxG-U3=w9eeD1Ex%!1){hb;` z%#d{_0T;J*UaQJaP`;J7|tJbkXPMCHKGXI&g5%OX`{J^qUFvr!|>Dah#hOT zFNbir-R9DcKpRn4BEU04ZTAXwkNQU0P3AUj$(h43I2{D^JB#0V?wQ)!*siJRamv9d+djZ(^|Jsv zfiMq$YT~q6z%5KV0&WGYK98>Ba1u})ef4t;a0fjd3_XMnU9J7BDBi7W*RIv*z(GCq zz{9l)O}_NyZ`3{L)Q9f5P?MzDT3gA^4t+4O6GYaalZJvFtIbht4&y#J*4dSp#>Ynq z+pRTZdw!!+ced9xzi11FYi8~^I5?r*N zYI_4rT(jjf$=!ll+-&dIk)xT?+IL?{kF+2>wDH}%))(?{$4J?E1D7INog zvCR%w^wh_G<{t%eRlioviDRSn{U3fG^xUfbopn)=dguF5R3gT)VZO0`2#k)KNNKiD zpiA6TUSmHv^X%TkFTQS>``kUb_t}@pGZ8#2gA4Dny3ten#!M1LjCYf!3{K)?qdG6LGZmH@0xr?CjDOB5$e~`a+{d-PfG*Y3hTHAYSgRzt%Ge*H<^P z!_6fe%jaLYMo}ZRH#k}|IDuDh&)3b>Wt0)lA~bUX!1ZfaszZC`ck}I1pH`drJQ{DpkHiVeoIJk?rY-a&h#CZ4t?k)z|N zsCCXej_{lhJ^KY)LYeiGCtkC4bL?FrLl!TgD>C9WcB_*C(DOu{)RQ`t>`6FXjCz zahI0YYOS+@eiSw6IPyL;Oj|ZHOPE^;+JU)SOX%hfHtQ;Fyn20N%5Q}4kbW_C)9}b( zbnLaOchZ(PK;vXFw}riY^=c*vxa*If`cVd*U}y*BHuj1(K?hj2Nw1ydgh1n~M+OFI zUnpmEY&3%+ZDLtbhF9&Q5lDj$piDcen(tuyge`|Y-{5o?1J$RKM&-qHID*{)>bWJ2 zc!-S`T6PKgnj^Hzuidw!7q4A=?|hPbytD0=mN}!W`QwyL8hyDs5HHgip9uK`i~7c= zL}|Pb-s75k+n=tzVA_^-d+w#v$7*TiVL9Aq!)?2iP2(D_W_=AG=aC2Ug2f^$^1F7Z z`L|Vj#8M#0O4Td+$^1J?PaYPnlNP zlTyZOa$@}U>{ZO7N0Wa$<&9QvZwON#bKi38rSm;k-UUD1_wjq|U7^TU3NMqb@2xZU z0ms-NM|?{cUj)a#QqLGeI1<-tO~&^vzk$OCNq1 zK6k_UXGqhNga;PG15^I-*oDow0G4`kA3SqS&c>mP;4fi3gA)~TFa-}7(KjhKv?d?^ z^4@>Km#ZhaI#KAYtK&(Q^m!++-7;Ih8S)=){Dat#l~<9GO0tU9o7-9STR#rNs_ zCX(!+jG~SxN1o=H_{v`DO9Tg0Gx!C}%6I4s`O|4D&W)pO#c7zjt;Hc10U>=3WmRA; zi?&s|SWW{hm?aJAZ1UBrm`3FW(})#twY3 zJAr2yzr_I?ZR~~B#Y~2O*Sp?dQxns{sW>^no(z`3StbqxP<#d68!VRnm@SO&mU2!`u%KPZE11etJ3=auEwNR*w8ie6C_iY!xbiB{ z2wLE>K7`qhc;Vi>gt0d{!TK9FJg;p*dD15<(ffC5a2OgN+NwvMxUcRk-Ks^-U2JX} zQPrva7_FmN-w#@%Yj*8>plwg5t??*(XkeT;Bh^{ot8aeerTUdmK3#sk_#+?r-g?J- zzP+}w^S7`ms4-cQfV|xXzh`rc3iYSuM27HHwa;(ezKu`1Re$Rz{}wjTYE4egrXHv| zqHfl=Jo(OAUcObApM9mi=X>5)f9B8rx7akJb#UjE>fE_myW1;3wOwQQ-PutIp4;2r zp!j`6%KjJ!_w9`5>h4Ye;tqVCu_2uVV7mlOlo{m+x+E1{PHcbAM)rDPKK>grES$!zeX4_=yn{Rf5(AXfHZBV!!&935*06Dyp z4{?mIUT)hugQaemhkZ}xnK1%1YbH=3L(!Z1H}HuvARY608Tf+0MnB3h7Z*Q_j!$Nl zy8R7Wd+XqR6`S~_%UA2p@+xX-Ji6ily67u8QB^wYL%H33?~`0vSx=Z(S3W9=0HHu$ zzwQ9BwMl&a51t)37N7P<`)Lj9BJ17_7j)o?tfrP6R=`6ZvwobmE74`(KW(-6_Zb|D z3+Z8-kVSUEuk>m8n6LE@uE^VW@0u|OTMv%dK0%zZ~3;D@(X2| zm!}P%{lvc4o^hc^|1f23@740+d@aMDr;Z(~sqt}epq`@6=*P5!A|izXsc;G}Va>ix zv9ec_)w{Nah4KRWr>;b56C>0U+i7F6^?G0vA70yG+iZAmpsjcG_)kC4BX+NFaBVpD z>`p8quI1bIs`ub+_PV}_e@{7yhE5ri`(9Lh<6_UjidL@ zsMfD{wsf!N-J)^LWG?v}e0SezSICo@B!T?cru2416%@yY?4LP2GgBjDCq8$HxUw>OdsywZ5_G8kjy(W0MonwNB^_U&S&@=7^1pG0>&z3UrhG5iX>e!1g#; zU#tlXi}S}0S0C+L|Nce|I(WO!y`7b42H|%gHiwnv)&)l~0C? zj-Eeh+-^uWop1)g(5#1xRxhuDO`hYNO_$wlRE5~z;VDs;?fS{nyxrD60rP56sYpcJNQCeNHeog7kH$8z-S$EjV>_~Cr2jQ#D4MK z+^uGdr+zU^AH+DZBMc*&iwmhf(9yn;pX)m`jiX<=X**KN73908o}&%q-u94a*ZM#B zj&mQy06HB5ObO)BfQVrQHtoDuS*eVGHuoWDh$y1BBec>!gXfLR)T;?}kl#HUZB>-L z0X;EAzFb3mHHJn4l}TIycZy$fG=BG!whWCzkA3=~A z-AWc%GpJw~?=T+|Cw1u(DIJo=ldCL?JevS0+$~)@#XeW+5oK*2#C4{tE7*(+=eO9Y zdlzToN*wgy4Ffj8NV(>uQWQl*Ng*j4C)2zW4?0-~8hlFXhD$GL(azM_+#rrK>$8F& zf@Y_c15XZA@KZSuANFZZ7thXcUB;+={E;`+6HopT$`?*(>BXxC3V>whxbRShnWCq6->gKWEX`0#8EjgIiRUk^TXfBn!8{y=6$ui>yPugqsQ zcN}nd6KEm;nOVx&_yQoPSMbJ>S$`@g!+6ycOM|B!gj2s(2d*tyj%3H-CHHyfji-51 zC-X{vJSJ~1T>oG`dFLs=3S&Fv9lv+ee=t--)N6EPppG7;ks2MV)z#I!ckn-31d-3K z2L86&ouKR@!iR7et&2hqUL7a|cYa&6@gVdX8?M81hwIEe=j-&vi#YysG?J|W^xOWf zKwny1s^$5G3?en>j{Kpu9S1sK`F{NPM9rNzRmV@BtfR9>vI1s$W(o&qqZU_gk>5r- zzLmF)jm;=2J3{(Kn**w%di|T;Qsc9(T*V>1d80Zjo%;RHf2scTfAi;37Y_|sS8cdj zHe0sQ(Cc(b>kRmV%P2ldQqjtt@~r$Pj{T>AH343eN0MZLQ->EP;dNrP|)oS;yGG>1_&bkvagK`MI?bJHFw&WxM!gnCmAov)?iN5#V^VvSy?-|9oR#6g1 z2ggDF<&E{)1in2Q7`Ob_0ocT->fmtBW9eMS>AbOS19%8W2WoRw>FW$;Re@Q>B&A9@s8PS&xh(K<6bQIA|WQ;$A)vA*?fZ?4xr{Ak^G;Z)so z=48F^t#7LjzV}<}ZEt>4_0#ZNzk0pqZ{NA6emy2#4pS40mzy5r>mj-OoNWTu-Vqg&smvF-R+cfU?7H?EJ9&^IwHok z{IIrC8|w?TuynI_HdgE8u@e~K6ZNlu{Wr?(^)DPjPJw4-{tmiitA;U#H^KKh=klr( z0H_D`(qXqVM8nAmDAw8UktuI3FxjN+RqEs<0M~PDKzmx$=haL+@-^vdP?``4eBLoZFg)x!oT1}{%GwW&n6Z}Ml?V5 za0>DSSH!msO`C40?Wu7LCr-Pz*!=^PWL*uQzUGx9vpfy6Oy73jNQc}*gBYg5-V8-o z<7$jbYiL+)C0E~aY)!J+yTDZI*71B|zoNBX43)_0G(T~O#L*2GDbn2e3K7-qo)g_wr zH4OWuIo-;Yc^ZdjF|7TPJKw`O4*p@UY1jB|=0kf!H8MGevLNl2`3YBp&HBkjrkC%$ zrcgT+BVW6lk!P`z zPD0@T*LGA1n%t%{qWpyIfqNfYw11d3`PhXzd*35<_TI`x;oR|Ni&_!I`Poq=wD;IJ)e|Mq4Eqci6e0#O#A2?_3^;q zs2GCpb>`$((!rO0nqhDhhaczP?-q`7zYVPBxr5x?xp}i*eEy|+_M0!(H@@*)ed$YI zuTTBUPuFk!=I82{Kk@1M*vCFmzw`^gQNQx5zgfTao1dgzWVVP1QQMbyC3#FTRw|R<%iDyEP#!)Ms<6Q#Ws37ZWu*J6juDJGHpD8eK6- zJ7&f09KHo?Ti|$YZG-l|Z4>Kv7q!DzR%my5N4DsRL$$u$I_1{nXY`Ms@EO6*m>6q= zm)ekS8SIR4SDFcjBx}3NaAJGg3A4VoSsUcJfeENcPfiZhHf*y@pwzP6)Dd;@M0k)e z4>>ntFL82s(;lLCrCknF+>K;ayI1M#0ZnfIqx3@0r?Zs0b}hfeHKx?;A0)mR zm{$4&yx_=-@ymE;SnFUjjwucIv~1EP9v|CunK;$%)5&MVR6yIvnC&r#c9?a_wdIril)WSq zAL5K(R^S`iiy^$v|76qRg+Vxd?};KaPNQY(*l9YD^3;DR!!#tJ{9YRsoTM%HkVak` zpR~g;^X54qv^c#gc&MA2J=MxLt+MO{8w+;lul04ebH`QzkH^sY$4;K9fAWw2alP=| z3-thP@A&B9>}=*Iw4ea#kU?lD{X&9NfWo@>X)E(A?C?oo;%a9Uv}=)#D=!(|t&epU z_TWq$7~XvsR^=u16L)5j!7E!#gx0ah^jh^jYk`P6{5EtE$?z`U6!(I2n0gSme z>b1&v_pMONq& zroEoFOWP!8OYa$*(R<}xy`diPSzCf~XrzHhd!NF+rflQ3ibqfIF1vq`&dNoP2rIQ| z9f;x(|JupOgV=ZIRd?}oQkIizWiM?kCwV)#;%D(}GF>P!tYJXi)WsBl?jNkx&UP*0 zi@xbiPolfW@jdYQD1T+$NnqF_o1IKZOlKgOx82Tw*G_Z?N`cW&{7DM?=fVzFCvc)0 z0=X;wEpx064f=iP!*ytM2KZ*tp?*fm#T-L4^YF6$H|RbG_vAf&$sV4R>AUijGALdC z3?FoH1hE*rn>?`mV?6O--7{^>8&!peG0Ope!|{j-0rPMjt0)p_^}9}IiR z?;$%#4XaRp`%0vnoxb1^9OctQ;Cg5K0AFpPKKt1()NlOyCn(7>@HgQXCxjoJ9j^=b zo~&!nU9NBa);HH*{Lvq+zR5}A;L9xEuGNL>HB=q&4KA@G2ZsFw!*U(k-GC+%82Wc7 zkM@CUdD4OZ-JLeb-X>{M_oiMsNdpelYyHxnM`hRv;NcHm^tJ}q0+!7Q3|}{2quU7% z4tyT&!+wcBU>nB?<2q30f|K+i!NazJnRJ29Rz~%OorLeMOrw+7zt}|j7*4X;2k!07 zHmTmplwP$<)nD>{`hM(-BfquPjher)2rWkI=!p}&hX%mif$#3zn$P5*<0np~&K->@ z>`?pOR*>}$X%ElN)%5H%{EdCk!Ol~Dz=d+BKM2-<6T}go{_f}vV1=3G84)%xGzKp0 zE42U5pac9#SJk(EAnOm((kGHZT5SWu&D+lls&nn{Yl9E+sF}1Qd>`KD4_=Bb;Z;0P zn6lf(1uRzFx3uP?EU7PnZVZ?pPB&&6NsGh zsSUM*?YUpe>s$5o3oq0SczSRU8vCg{3h~ocu5N@2p<{2-ssn=3UR#;6ph51D5eG(; zLqSe^?I8~dnLH?tl*ukS+5n^v+Fb3ltp;%F+i=o+6!FyboA(mRJj9*PS>*{#(IdV$ z8SkZs@6$Jt_@*(=sYSA8Jw%^Rcy2LIFE z3cG2=1NqvAXTN{SA+4x3y?FNM`PkAXpYu>}lCLZ^?IeKl!Pq_9uF%FGmdd?;TzN10 z1{_;IcdfH>hMA#Yh|lvR>0q`!xc5L)L$Lqn2*psz)EVP-Ea> zzq3x;36RvS|B#=wu^9M4fW@VFNkv*6z<%B<7n%4$U<+&9V1X4)xJKn=oa2w!F+a?*m8gzNIw|sN)c# zK$Umxy*w%|jW5lcjooZ=W2%FA4IabK>0RiR+hA(X7Nw5$v<=4A)mOAVv>2YV{lAM9 zn!v#igV>L>iSnm-3h#5ZZ#{N9Nj>ck>wfP)lLE-sfq&`VCg;dE##a`V72**`dz-`);!78Gl!w)5apZGt40N*n;k>YqULV4pW7?-+ zj0;ro>oy58QBXZ--qCX{AJ6C^-f0i?+CE_*>{|b~zw_Kjq0wogQQ@W}6h7+^1?0d0 z;W8{WZIx4*@Y9iZ%Qk|+LH8Z;8se>3L%Lz&qQ@j*6p?I2y3#`FM!bs{E7uy$25bOU z6LsT>G$#j02=lfK-?fINRk}$b7m70Hc0d508(gH4348D)O0NvA3bk* zbgw;F45v^H-4@myY6YX=S6mPV&1Du@+5rkn)FE|i)qWh_h4r2Kfgk#49XWO%G29+r zVZVzB@&!ymNf|!Ne7@1Zkf{%9VPU@h_D}p|z3}o&MChwY9F8MLjs)g$435{o{)u|; zd*799P&Js{j%#yk1Lt@IM{kh)$dDq_IHv=u<)4Fc8zzFO+;syu#D}mFByc&p%y2gj zNAFed`r3Eh!2AcFO+mJ!;hWRO%V+7>Di-`diN0$n3xV>M2B zv(Ri}dZLyWmuqdK6Gv=&3%UcJgQ8QjhjADu>+v_etxi4gD9+S2PV92sxOt!@Ufq*jpgMULO;m&;?VEk+61|M`7n-cWMHT-9kuEemAeX5 zd2J49lan3F(oo$wKrT?8TQo##@aGDK^U5Z8!XO%+7zQ`fH8nL}v(uAx{^G?tdStG~ zh9~ObNAC;Hotb}wbBDa#Zh94bF92f)TskP?OymyN+ba(Guhy0Mg_=kHHei2Q$$>*Z z(-7xW!tiI=zLYQ9c2U5$y$P#Q2NeGbba$}p4G&$cfA9Ogr#}3__t)zmdAJ^b?BROi zVa|shs8gqpgM)!Od;CNl#h~k~uEICB>*|$jnVjM_#%?995$Lz=orQJ_y87+5PyOrP zu3!44PvOLk)Y~3?u)gm@-%%g_?mt{-PoFKnkGiz5l8&1D@42rof|Excd$b;W@WDEN z&-pra@?>_Za3<#1&`^!yoQw{-ibmJM&j!Fh>+6}Nd>vTq>^XL9rY=5kZ=E}Ns!q-w zg@?w{LDE(MG>60it2Ps^@lP6caGuEq=>XktJ3kQF*?n#oq!W;DQh`?ooow$EXLaQ+ zUZgV^qbp$$4dQ&KFX684K~F*FzJc}$jitrI7X!T;c&`a8)*8i&`&GdoSl zY~eEKh^#sIuO`lF25@b-?V2kN)-ieycD9hED|PM4)jBpkTaP^QNWJ*%3w8OGn{{sP zc%40Qn)vV$<%~~`W?O1!Jsx2EZdx}glvSPJVcnG_)O~OSoKb(dK01MT3)`Jq+S#qe zolP|ULFCj!=ynD@-PLYSEldV^7I1Mqqc=bp= z-B@07N7<+t@)3UXvmEYI-X85yqnWz_v`!{@PyII-Jh=maga4y$8BSStX2<5>%sqKJ zU~h-d0miGVPn?7ye%*e2XkfJF=a=fm7cbW?`f3<@J^I)qH8V2|FRe#!n&$A#WPRe3 zzgUa6Zw5ATI5#(1AAJAY>ge1gI(4JAHr7+uW-oZlh0>!x_Tq!HYP%cCf_WKzDD!Uk5`qGI6+y zHfN91j(sT~nbBHYxJA3M zi_`0hpJvc2`*sTLo=F`D__4NCInh5D`E`fPpncfVAh`@$FN^2;}BVQss{CZW%%Q}xW#FKgJ* zE;?YoRkO24kVp6y{?(=irD<0-kVm*UgYXu%w761>OY^mb%|mTzw`uRN$4~}c{f|Ag zv72oP$FT8aW(US(B;Sf_KNBO5@1f5fjN0-ON5s#dy?PjXc@LWH0E0UWt*>vR$Jeuq zkQ1S@gAOLOon?O4@rXMcK)-D#*wA)hnssS}JaVhQk)wNg_3&-)^f1M1Liy+Ez+4Lf zXpRhE`9seP@J@Z?gc*6lHdz0~!9kN`(t4U@b&tBsJV|4?KLQL>-!{F*1Lee@?VaRj z4zKE_RuLbQFR%OdX(Qw}aH!v4Hl_@3n#@z)A?>znvi&B6>ZU(zNV+T@=4X~Vn1PN0lyb^I=OsB_@b3G1u)Z|(rI#q$=r-tFUU8(U|g?LF;kx2*Vt zJ$V5JdBaog%c(8pH?T*?@{R0a?D)ahsnEo}A7I=AA4x}h+zxRhUK`JW)|S6`u@L#8 zhazP8F1Etw@z(C#`ZIs#MHl1a@1EoZ~~xonV(;%pZ+Hws~dNgGf{B}T?P+rap7eh*2qzp-i6kG`%(#|EQEO-GW{PaPO~aAZtI8Ma_$ zi!kFEKmGI^%?^(@@C} zO(Dm=xOh!hV+cW&C>b2T6!Se*F1qT+0%l;$Is_T(x?@1-x`jq#ybnnLAOR z{Iy?(XT=S5w0{MAzzpqIENq&c!Qnb}>U@pQ9;+p6G$-I0+4^~C6Q)licqbic_h>gp zvDJnKcI(#FSL*Tm&e!b3Q0;Fl;e*o-?`=eLZHVp@+Yhk@3VWA!O-LT+HDri##25|=O$|si7BDFk)jr;7lL>%wi;lQ@< zTdv^=c)qFM?E|>ICAw_d4(6TF1S*Myl=702lh$&gyj`6xM_KsoLHn&i&(|9Rq8Na zhTn~ovpqW*y-iB9t>((J?Ot2`-`WvNHoSeJ#`4LRV0Dao-*QwdzH{NIwk&yF>;KO8 z-1AZ3JDmn0%nB(a7KwaLr;EyyND78`lFIjSyao|uo<*{77$*vidwpa9YfNFj-t&;d z3L6*0ihNQB;+ihkY~|nb>NOfUWcDCKL|9WBa}VRlG(-`>+&3Z!oiTc`0=*^Ve_v(* zD~J)Lz}I8wRwnU*&x0IOcIszp>u;TW8382R^mz-DfeSI%!)u~BO^PFM7;ZZA5`ll< z=*e)>>MSH`W1`+n^u9wQcjK6+SR+>h2sHsG2?7apj4%Md#=CURwp%<~MIBnkg*XI`jvk+@Q)kXnZW}oVH(`^wG|9=JI?no8pn%P6S&6e-S2sC9lQ8Y z^4Q|NO8e$3H|x*-nLk(0zkG#IJ9Gzdghu0p*=eFN=r?J%Hg|AtXw`ExJ_=F5W5k7L zJCKxfx1BpY;Yz&0njM*{zy0HXvo;qN>aYBFKUTm0h0oQ(?K?F}Ig{glYNF{J2VaLq zS_fiGW@I_KgRFHNZ3Hv0%f0r5N_UTjC@Z{ma!Iqg+NlnXr7QPw32O!V!I{9OE+8Y~ z9zS-ZP8~myt)PGSKm4QAv#*|c;pzJQuRdL0d-l0{@$%(*{)Lz8>(5=P=U%>AcNUlG z&dN$%x^WYyeYI|{ZPqf*{aQ%Y_|Pr%9at3RZ5&7y{HA(|>&U=xicaS!@tt^+P7=bl zxSzj%r(S&S<$B+{-dqnn{z#2u1dY>p+A-ij`1Xe1EFP)}=ywR7vqNclaiMPAzD51o z)^&5UN8sP#nwmLUS8v{~TX&Y~AN}0N>XTpmMx8w}Rd0XmTk0d<`$y{W$DSy6r?`E6 z9@zH5|3po7w?@~A7{_rwJaago`#5+0Ox<_!-a32wRCbw|oSp>k*5)+Be5bazq15g{ zZ6V9+ID$?D*hW80j!x7A58YRfzV4wqi7aU_u4RxM;{o1qz(dTnfpzK>Lt+mea0WfR zjZ^MGR3GVjyH1cvMAmQosTY-+v^!lR#DN{`VZAW5fla5ib>c}k*?xFvMD&5n-P*u8+eNNl zx%^Un?aNQs#K@s~|NFl+vs6F-g|F7B6Nl^EJ*R7G68QX_$I+QOa^`sTj}6r}uF5j+qN9+8#+{L)Nvcx9rXLy=zWceJ@mxh0XiFamio49jk33>=e-vms59{M!s<$G ztZmoP6DMojtxEw=&8jYv+enVMdZk{RN|{V(JnNPY)PCAu!+5V;4%%kM`{0Tm0HiL? zO`C`c6@RvWvVXXPll%!V%%feTn0kY&mQdo@9_J-T(}Phdy%i|o5{~E=ufi?-P2Wd~ z2+#N4fWc13=7d8tSC*iwZTHmCega_3wrylA6F!n{4?g%LCHVCK0RQw!L_t(T-Zsa~ zG`CZjy6^aXO_`ParMashR+d-M50}utYvG5-AAPuvot!{U;Z^uj2YzI9s6O?nU#lxG zy%avNQ}xK~Xub2vhwI|KCsoI_yl@A;R(E#o0Is%&;-_buHQTRu&sBWqxrJvXoeW91 zgS5T${8_73xAxEtZ)bbXCDF8dbXxZJ-N6X^z+XIq zj{ZJc2c71eCq8zgZ03=HeHEiL^InVCBM9=Dd!2pmDc%=(wT35o_3HKd<_lNQyYTA3 zNKMThuU+K(*_SV+EhyyP)hZjC*fUqIpeH*}a@Sd!c#h{?BKQ6J!O(c44`vuXUHk^6SwZ7bK{lE^;^I3TlIJT-cQxf{_HQ-7ryvZ zEiB%~-gI)-ezp{K_W&z4JUE;ITWt_^a!`sRsW@r^Z*lDqwhSt-Z5-64TO0NH=NIc= zec{FW)nEH!UA^}8y7bB`^ z!K)MBw(UsWba$qmT3B93Z+B{8eUr8volg7S5$Eb5a`2N>+uNL!H$H>@>K~;Yo~hTp z?tbj{o3*hmQY?%A3N(7t5R&JNj3Pkd|K)AdKLcdJ6Zr7((?D50 zhP=n#4}|8Xqxwr1)Vsy z1LEEiTIR$eo%ys#{6z+gLKwKrd)hERg=5*uz%Dk2a=nVZJv%*7ANsBj)%B}aYh~#U z{MIHqBrk-=(}O#*<35?U!!1Z{A3+Urv5g!lBi5OgB>J3s1T5274|uDqSCeh-ds!-H ztsRX_Q@0k}$`j9?uh!Ya{|%p+Jcj-@c3|!yzgvd!0*j|l=FvU7Eh(o1F1AVH8Cnv# zM;pVvv}f~??xahe4M%xxT<~Ew8NA_U<~i z%v6mGj?}#m++SaP>dWghFki9hl(z4F|TzZgK` zRTx+P@~R9fKMggW5{0Bxh&;GYJaMgD(B_R`(>XzNWMsI;U5QFNva`JzUt0eo16sgo zUUpOl&)|O;zcZhuA(wQ@G0tRdX2m?fmJ!CY_iCzDgS#knu#1KB z>rXvf|LFhokGUPi@8+bvv0a?e2i;t(3+GPPfAP_OzwUkPK@5!b+FM<$)rBjyzqMS0 z1LOw+4jt^^qrg)`*m#tCuXQ`9V9YHfE1UnhPjNH{oKg)@<9-tz9W}EBl_^GQ= zefp9TbQFA#PEBUF=xq6q9lwRFMXpEu)Rz4Xlm#BMTf*>gCRo^q)g}lplTUw5O=IsL zJqFzSSuo?G8W+-x43E^w(`RaQVzQR8w;Tl9+1#q7#rf!nsi~P7qn**dbm7MicJQbJ zWzb7s@c+x$e?V)NUH5(9u9tJHoV&U@ccal60MW=f5g-XLf>AIfk)rI8$JQ)Lv}}2K zJX*GFYsNG7Xf0C~Nr5y#f&m0T5+DhJAQ2lG=tg&=L*@8#ey?hNzyJBZ>V}r>HTQe> z-FweHXP!(li$xr+T+k;M6ivUYF%7gm{{_eWDePaModl(^6*NK-w z0{vw5jq>SJ7(OuA2KDqcjgyHLIy>s3GAg}Qf&BANTS~nLj~sw>LPBgTaMeZArilQJ z<*oMCpLRJDT^hvTKxF<}8$rLLYv;|y6^)zksd@=_6Q(p%I*_mnHxb;=RLAx`PTq>h5-8T>~coJ_sbv8U?Gowwgo`(~%h&+yuI z7DDgxM^{#WhhH&b(JH zd?&6A0VT^DWkz{cW||E$f^EytHb2uDui5Q{nYU5M-}bMxO!mnG`b88B2XAO^U%YjG zcqF|kmkGk#;NoC&(=Y8;SPgx=Dm$SXNz_l<*JhppX5R_Rmtt-jeVmT}80-x*y?+Xl zu#-Ij%38_|!p7#EM^vA#ok#=^^OcrvvsK89O?Mo1LvX#coSI}Pd^iWr+2OT;XFSI8g zc_%+Cp9>43TS5L>Cg5sgT@a%$G%+$+fA*QL8@ZJYd~;O~v9s1Y69*60*uI0cjNNb` zHT9OXLQ17&vNCUR1y6URMU<<5*LE#0EZ5XbUD`=n$R@r!d18 z>E%`56hg|!ZeP|78xVRl{#l{~$q&*so|}eyi;^D)aI>Yo@4z-Yf?RX_(iAOk{zd)- zXxT!7M%o;Sl9J|zXIH>?>_~iR!c7!@c$G%UvU4`P+JL%8w|tgW{~a8oZ<4afi>Gc~ z@`S#|_ZoYk(csWE4IV-Eg)^EQ8_7H5DfueX*EaF7Y5(%r>JDuZGHP2RbOJN=47e_k zbj;K2eU7cLXo1}(D{2p7Pr*l>kvrN`aqu~@Ll;63O55QC02h5oyhI592xu@nd4xX6 zG6AIT*cyIW%)(z}y`!tCuduhiZ=bKBjwTE3ORk;xPWtsTkZnRi&?nDU-)~wLNUN>z zy?JJG0MAbO@Sb9v?S1R3j(q@y!?Tuf;R}ux0!KNrQdS~b2nk0);?(6tfr+#W=Ju?=j9YO>{@_h6EIEN|XX-o*vC zPhF`e9)Gev^r7F)p#S*ja0W>fu<6O^+P80iz2^ttRns%Gb^iPX96JZ0h1q6qD-J4O zVCO}vq}q{!1c&B{deedo>pXRi?o*Kc{-g6M7yos@3a>R14Q`R-ONUJyQq*#H0C0HM zZAf}eJz!aApnKa7O=oAOYI1U%wik^rjNCd60>AIwhcWXAExF2boo1bc=n!&ZBhXdR z=Hw>?_Uzps$6Vd8Bf+h@mlo$+g)J=Mxynh651lorZx^hr!vieu80s zej%R>SX^32W$OwAXV597J3FiC#5i{NIF&z6It3gYapmiD-Sgl>weRHJ-?GX^*{YD|GNJ0wmqztHcima< z{*iao-}^_uR6qEEpQ*baxxXf8oL-v0T3`6W7wS7tJ`e7G3SgtwDTg}&WLtmm*#Lzd z%DHQhN;D37)t>38nx2@b-CXzYKaknwuAqPCyWU=Rlh>(JPuKkFX6%9ufjCmoOnxd8 z+M^-2eCFNK@?3r8i+@`W-F6$!6y>A>wDaV-=bwpeWOWI=cE!5eQ1%Y=)dX@njx!}J z<#`p_ojP|p11Z1$J0AxBp1S?`{(9#-f3V*A*0&+Qy>TfX&@yp100E!`_7sepQu4>tNnIk)01`VWRN4)ez|kDT%&&c0#%ixn^`hZZ*VyKun28^^={-doT5 zPNywlWdm0Pv==n8Y*-)@>YaS^697(h8K9vugsf^CW4n2;&CPeBfvemM+UcaDN?A7% z76*hK{2j!m*s1jV^A~CkJ+Y(ct~+n9!$)@4+VUnaNCB;fMtbY?^Uu~7Kl|6fXpZ=i zLo@Ztm)%jf-gcCB_Es&B-pMXD-YrXOP&GU8edXoaouQk?jL+Jl@4I&Hp@)=cXFGh? zy$V~q`oinRmv%=m@*h}Xq-h%M+xOENZ1b_6_puNA<)F#t`VO)`RsDlB?i`s#2rah_ zJ)zs-kQ;rOz=20PoHy=Uc;w7@J`-$1b8c~|o`3H7diweEwXxGz*LsF(&ymA*b$zv- zyKt_qQ-Ac24CQo{`NHLkBt-5JsF}%`d|JhCmJRLNs+Ydxc)jA0`^d`G+>6*2@}WG| zox8C8*PzdJ>MU3M3=H_a`q3H~oxs-aui2wF)$EazHM;M3^-S&~-!V%Tdv1qmVWW#W zau>EN^m8RYw)Oh9I|xxjS-)FHM%UUkx);B~viq}<#((58ldT}Nr1vzh)Pd41^s>Q1 zn%vv)z&5%Szjdc4W!BJLckmjWc8gw5D$i!?w3XHadAF5GjefSpPAKaN#Ze+ge-VPL zl_wLD>*}WSZL;bb7h25OM{~W8CDJNyM-DjL=>Nnc0>)K~L#^SFzAz^69 zPXqPT7$>S-QzO^QxdMnyO-v8h%--GgbHDscHGcHYI{U;^^<8YTI|^LAd<8pwTa6Bn z;X`fnd^Me%c2H-qi?T1fyUqM!CPuj8%gLZuFU^NnBiM&=eGKC6!3Vhc;NIFdJ6@B7 z_d@Ss0N7Cq^}~uy`0m#8%7qgET5C47I_jSzx}9euDp%rfeW0vy z;dPhx+RDgE2Dh-E&T69E(l3LD_(_(<4(IH+0G_QAMqkg&gy%oyzJr(zc=BxRDW@;6!{K!Nl{zm7Dm<=s|3xdMy2U&Hx8;8fefc`0$h0)@LIl{q??g|4^N~bRJ$W zp(@BWk*&{SFFSbzjx-nkjWF{g9`@Y{20Po+}UCo;^?F`>_tb{c55?Q-Jo4Io#ky|f%5{OMof;Bjr+8NwAF<94Ra66(DLi%(bC+g zD|pfEh{++}g$Mt_U-C~d(Hjk0{=VzN=-yLeuHF368+DB~mpi2mP7KzCvzP0Rd+)Au zr_R<#|M*X8g7(eC*la%eA#5#YYddM98f*_Ib~XIvqn<&hoSnKUeAKsqccB?cvjBph z^z_ICDc-$M9t%YL4lhMIlPQ%)4mZ=+)L}&>BQ;Km(#qHBm5I+>hb*f z^8cTs;VZDt9XNp-;1Ku7a`UGN;~E|H=e_SbyvPI`AjqE;RHtqmZigvaV0h|rHZt+ZzTL7yy>_{qoc&~x!ZqYrpXSu6GG$?*wSRj z$%V+p3T@f1ef>N29qLir#Yc~ypglD~9k~Di@UTqywNA$Ex3d}hzqYts$BrJUL$v!A ziMNZg4&i6pZg8MiBFYb>0b6YW+GP3Q0w(s4$XCmTG>S0y^4_-%&o=qMGU$uozw}wh zuGHb1_SD`(({=I6S?ZLttGSHx*zDru7JnYtE)9Jx~Km16Mt1- z`_B0~e)vfJ@DIJG?!NCK(p9amEM}31xVkEC+jdiGZu5a|{BypZk*<7~V_WGe)VdWzosr*{RxIU#Qo;=8^j0AANU0Fmu7DTcl_knYcJi`)~(& z-i2>z=lj;i8X3`!)1RcCcKnH&2%pbMhwJ1AKj2IF51wsMRGVP^-E`0t2HFl$tvr)7F3(G~SqOKrcAePR+YbpVAFg7u>1zZN~PE#Kgp#y%={(X?H(xwcAMVI5qrXAs|A z<@{Fva6a2KK>fL1>-brl*njP+?Mf#dN>8Dwds^a6+87S(>&I@byTg?FJyZwwAI)Hw z{`Iz#S~*2!bbPEvCMNT|v1+?_7k=PeEnZnjn{aAo22pUa6|mMQ)I<3@1$WU1$}K} z3fIZ9Uh|GLT|4U0>@7Eu-LRq29ou30W@F(8c^VeGVk8dBV>`~tCdu4?{ADnd_y!I1 zX&?3DDDyQJt6TuOKt;dHr!M`TzxA}M_ob8Wi}p;Lu&}gLiz^$Mgy9YW0#)ua@d|lx z*DVKb(}zjf<%?~O3`j~FWI?RuGAR`%3PuiwgS$2$_Ox@lV8op*95m1LCw)Bhuq*%B zJx#b?b8da#HH`zVX|rupDs;&{raU`=+q@~KaWg5xG{7e?a&DW6gycbd-kWc>Ue6#@ z2lvjY{O1Zko2T=_J5P5Uusvs0C(o^?)IoK_{KI+le0_1Hj@)#(P8>g4E7%1mlc#SA zOca9bu)$Rxw|dfWuGmBe{N=BBTW=~)ZH2s=Bz>)ST74@$V-o;Oa&Wf(Z}v<(tNi&O zyd3a|K*E9vJXVTK$B!s&BAj`6RjgWj;6~kUWW$k32kv{oSDPaQ5C4m|wn!Cz!Q)nL+b@DHX$^iJkZ5lU7#bY z8=q7c@)U0;f&oLH+0S8W=i1=CeaS9tbBtoSjvl+Y{nv2$wMSw=UIMxU8NlA zfo+ zc}yFdOhnIg)hVlBm=prA{nauyapS+KOLB!!>Vwz}yjt_9Yg zg?E{B!+pvrtYoRoVN;=1+DH7MLCL2{fw(k1`1enFZ69gcpW&&`tt`Cm=%{5jpKf{N z2a+OSeM0DL9N#D^{glA=_3LMQ-|}di03d;Mq8;#;kyZ*aQo|8m!9;YCcQO>TBTX7;UnwX& zN!M_YgCHQa`_5rOBP1NYOQVCIKok;B4auM!todQ0u0d3;hV;&ZuwOD7Oycmy27FQO1CDImcdZsbrdp$ zNswh%ffN*B@xPOIz~)~&OxCS5&DZ#GVtCF3hEC@$80HL#!wZZ)4~l2QSEJwPVbvQt za4#>zXPisvqv9#synqjVI8>Xpyta&CovJs!<%etU;d>ekm0!9WwMEZOM4xL{@mu+J z)e;Su>lpUm{H@=pFMjc{Y@0PTY3Hg2x~KNiXmdO8*S+pFIHlJ!NZ8%>-U>J~T={t) z<3rxkvzveSe=-l9I?tWk&Qs^;K6&*tQs;Q_t7vNj(8}`Sm%h=7<9%q^&9f1r8OW9< zq7b~~leBR_QtW$c&z{{>y1QsxwQs4-&(Uzh>CX(Nq)9TMr;7m+eTlGu%%ttkAHr0(O9PKLW*`d}Tu`@TjClvn%m z+;gYut6%hbS<8(H3{*~#e|8y@Y@0S5eU0HmeowwEl58hk9{15*@{low0SL(HId|f^A@(1fPfAzU~>e=U^zc$e_ z*|`PKuY z!C~^BN~P3GgW=r8x%$#$->(1fZ~eQv#`{Med9Z%y$9}3#9Jz@&J9Y8O)pSTjUg4P) zrDn|u6n^_Y3yT>j6 z2WoiG?VtTd@@C}T&fKN>1@vNf-EsTvbM8`Jj`7(>BCip`>iOrL4}Ft5$V%uWhZ?cOU7B{1wj6y_jlB$$mX8_Ck{&~AN%?0*S@x9_s-VH^hos&_1C^bd+WpGKHC(nXd6Y)A@eh@YGOE z?wP7#_*q#OS+`-8W9QEhsuGS^!(`~A3p`VkJGdRwDwYKDpahzxx@@{nu zBQw#<0q4Fx>J{iS*v}P%-XHM*}@g3_O&^E86=GW@Gj}$r#Bw(pw%~~Z&Xkp z)#Dx<=;Y%gHb`4ZEb#2@3?7bVFwtx3SZI|-Ed}!0CNnvxrH+`=l|IUhoyx8*8!NXp zy>Q`j1~;|CCvUx}ZoBnJt*>sC-%58i^~m^8oq6tbed-e*?G^*M7^_Dgxub5o{V4id zHFxP^?DZgWBi$Pw>RKynXvN_*4s&p8XB=H=%Rk>Y`iQWa)WVIIw4H0<;Yj2@X@wu` zxaS{re%tO|Zp-u_5!&RoleYPQ#I>=14LyLwTPr&?F|ivx8_fiOU9N<3GN4K)d@D4K zjgRRW@WxX?2?SJrpXbLzDHM~>I&D;Mk3#dFm=Jea|M zU4!pE^HlAiGd5tg8+&%|tEutfTBB~99Nkr~e8t`9`4Q@oO zz3_T;x~BFWt=)%iu5s$~fk_&hL%XRvoN#2_r0=68$No~zY`ePA>PiRu3!Bm^Fp8(0 z(n*kz6f`{G&Oy@f0-l?D`v$XNBoeOw(STq80-W4US^B|J|&{a=6ovdU1)Qu~g>}+xHBMnZF10wW6{+(zg%R8O9`a^EH z%}oYT9B{Kk)Lo05;JC85RA2tWm+QBF^MjdS`s8y61z ze_ee3+4}C2Pt;ZXIeofA$BxyZBgZJS6Qnk3amfyM%0Rvw_z(GpG;*U(Zf~#i?Dt}q zREPL1L(pb=bhvJz9(>zt9;sVyI#N4JOSN`&t_HBHYd8;ca7+)=j#JYOXmh9sdf_QS zu(a`5p2}nUZgA8zU-v(7Uw!@SU$0Mo>6@u9g~wlPn3E`#vFHzB zO@lbR3Gh6<=*0^`5y|e6_b)^;f1i{Am86ROHeXF;c+}cU# zy+qcgxm$ySPj)D|t?C+t9331&PF`1T8@{=|kk16!aPLRYvvZKRaOhuVMWhelm92US zvkV3(hiNAhquEx`1qaHO&V|%zmeO=WY4)^xwDzfnFb|xTzYZTdf_)#T-~I6Kqf@jW@Tar`PB_V~FW9X# zkhPQ0&^%jxXy(e7m>~e-7&#-rNAU<>#G`}bHMNl-qIk$F@w3Crlo`keX1CmOLVg%C zv9ZIxkIWeox_HBB8f7*(h@*009N&|c)7QRh&^-PBe|Ef(cTN&_a)b}eD?FT1wu#giN+^q>&g{sIcezNftf=Czw>XO%Et0q?ccwzP98gv zPZRE46d4TMl4{HhHfU@Z1ltdho%A zFG%jXEE{E>%k;BrmfYrh3Kh%0_eU_h3^p>gDAsFoJDs4GV6fBDT1 z*QKjY?7pe~>97834USEcC%m?vxb99ey|8hUd+ir|z>aPrcgjrjZAPe1`f7P)o$~Ig z-}>N(>df;`)$r&6d>Z&jIrG`c;cK;LvbX-hFa2EYz4eZ&#S64C{XRYIH((A;P9WGl zwX^0zpLL3DTh|qjmd&~sAH(Vpd2{gJ0Yuwb>RxvTs@zk?^fgIn?W1zVnZ;%9rU2~& zn>39NrXdmZFURCwPT{$-e`I_- zt5p5$g{W_AEYuGE&Rt}ZXc zpVoiT54Bymi7?rI+uGiUkEqRo!r*S0K7o_GX$wUj^uZeqgTFk9ayOeO&Gjji9qR`9 zCA~bPS@&#m@6mm)jn?K#f639(cnCARCRuDL`X(=v2Q1x22jNm=XaJiht!;PZyZzvn z0XpQwNl<=HR6qfbtZUy+pSw`sefpWy-L{PkTh-<|abR;T1B2Ps0QzR2R=FfEWyy-Z?jC1KZC>+H~Cci?>Sx|Hxf3=0V1!S#N+x9j%MDL^#*9@eZjxzDbd!i~&0CX{hIM^?gNecX0TdOa$&F9zT zu^ETA_HjD-T3bmx#K(HoN%(!#+3VC5)6)|*y?ZwD;3sOOrwp|S?Sv=5*OofwVV$O| z!-AB*dq|^GHgH`9?L;z^1~p@FF7OUIrEUT-FuJtUjFjjWQUBn@1DMG{%4af zgUpye&KJv+M|20Cn)XKY9_z{uEE5rKo1={xv^4VGSL);U;p{VKIjk{ZC%d#dp?snWVx)`k>PHW}fc; z1g%J$$pSzTO?^Xm4-iIn8U@GhgaVR_Pn!S$gAPTwM4HH6+6m*#S>^RT*mVt7-gDPx zSQsKhMPFvOf=C<-?D2vy>OdIo;9V=UF)fVYb|~7Z5n;WN#?xm`(sdhYhG%97`ACEo zCs=VPwv8!W0c0e5+j|f5$hBo7Sl)N@Ab`L2+=-X?l#2u{wBg?& z4FTX_8Wbn9LKI}4+u1E=CrvjSj_$pDQtZrA)OWdq0{7B72k}%jm7pIXH9lFj_$sRp zz9yc!*cEk?n|HQHQhWfeun0{-^}A7{lY8pDKlgtDe>?3Po~^Oh(t}va)@6Ztc!bB` zbN1{R9HyQ6wg2p&WP3MP`45k_neKgqL$zo3OugpOSJyo+xvLhJR%&5kuBEpIA_E(J zD&!4g$3eBw@Q=uQ2e1T)PzQm|ZRe?TeE(C6)Hyn@!ZXPwNQsrBo334TfK$+poVW1C zM-PpRbh0vS_nzVk-3hmm>GqR@RN^`fSzT|ZI52Inm?oX^#P^-fgIkh0NH{z)REG}l zr$I9glpY$5SL@QHOZmoswk^jf+o|I0PWkRPRh{JFL;z<%1a?OXrNDbeBD zj?uC#Z99C5No{WhqEn>4+VQZwvXpPvE-o&@|K6IJ-B$;W-Bd^Jq(K=2c&*N#KVKIw zUaqH}_-_6EU;gFf<&jksq>-mK@ch=f-$IMW!C}SRtzbI!QD~X?5nvr-%1GD{jpCO- z^g#XayWd$~{oI%8Z$JAP?8s(128L7q-kO_pCaZ(W0~u6v=Y*^pgHC=o*z&{?qOdlo zZIEnH@x)1nKROKQ5FpLypkyzPrdz}@2Gda>&_^^P?|tu&)T6I@v<@BGUn^@%_4L!< z%NE0~Hu>D&{&n@}h{1np5^YA_`wIOH4Gcr1!$q35FrMZP{h5f0&`E32^Uwx@=zxB3y&yT&g9(?&L(?NRf^2NG% z^-^agxfLZ9QydQt96twd{7$~#Z1y|PI(Bi=p}iek4zRmZNWOImj8z=4m1PG=c&1#= zt{o)JAoAiih6nZ^sGE)+ts@5y*1>&yYlMa}5{pbOWR?8cGv{$gC+qmJV|B-Ex50nP zPPy#laFWC@Wp-jx9D>Lq4NV$C=B*Tj;eXfNchnEO?BP0j`zA46h2N~-{?>cV1I|F$7}Dw{k7-do|?i2&ym+Mj(H9A*EJmAvrBWe2@`E-xLw@r z?gQ9>rhEGj9fN-3H8M6@BM!2mD!Qbj!@#vuR_8qp3%5+|!v!q`*tA{A}Rim?9S9zO8K9`NZMw3Ec?*eiEe(Hn#d$AZ>drcz}E< z|8^Gp2?_JHV`z)UjRTQ`{UgZyMqRo%SI6FOemk95c8nf4JXNoL_^!I`&YR%(cCB2Q3+)EcJHL_IDmZWtP96<6X^_Su_=Q%6 zcA`kZ?wxww8j>De7~g+{;v_b`w!0ge|KU|z@zP~Lyottw8#!g7fX;>64ch6ngFiyH zQxp5p;eooo4j=3Yg^}uqN9tH^yLD%C;5fD6u|?vqzETg1A0bY7^85D}FI}l8o_M@2 zF4*CUOpj00#J&Ud%-K_QVd-j(O-)uWdaP7yUoaHWCwVwCFXtAkvdt*+R>)<=Yr=veYk+6?q-<-ST(#E*JfM~Or_4zL>7egn8h~C0rkx`0gpx)$b^VT8ZmzF( zeo-6k4{IDLohRw^4jmc7_731nK|^=r&{oTF>ne9GQqM+4M`FkIH9u?{ot^Qf9oVlcOcT|ke8$2l27nJvms<;sLw49hidoCOda06w~p@LTPLya z4?S{kJ@Cqh^O=|9#}Czky;HS+_f%}>raMPLSywK&f=|CQehl`+`dqs;WZM8bjtmXd zUg}GC$=S2p$#i3N{J@@i_*JjKruEcs{PyqFr@q@XledI_>%W#|4&+UyMgm zMF+XAMt62X_m@5LF!o`J`fRBd^4W(Sd@oni5eCXhBG58lS;?+6QVUx}c?h{Y=o%CGOK8+KYg5Ee0 zGXqyBj<_{{P+M*}JLS3VAgWIxVymxGbl){PBp&Z2OD3AWh$gMG%VNNbPEzBgmmmR3 zo^ixQxJK}_?1m$A!s2E4X1@PBM}ygwvreD~_rkU4s9*YO^UZZ`64uc{RYLlBX3`@- zn%t_-4W_Wg!1_wu#Uw?HoEnSx0O}6NXCPM?b+(M)5KJ(7W;u(Y_u=%jUv}B#W780bBo%&h- z{LF>Rb?Nfe8lXN>C&XnfZF4`FxKV2>>(mvreW+Jg*SKG$F2*jbZ8&(iS__M`4|u;~ zJIVL7n;eX^ovuH+jXm)*I{B2~mb<$-QD6rc)D48&rgOlo+n45mk@C{|UF_VJeA0-r zQlC2cJNMEpw4u#I3MYPqXUJCM8RkWt<)0O|g*QG&Xq$WcL%f=|eNJ|8^4vqa2mfPn zd9yzHi9f+-*o8lPyoSdnYk_)H+?9WQ4l5_|bwc|VWgmr(x7~EImKPStcQg6>PJ2qL z7JYX&19L?F@gK9837o@XbwYU24MHrCvM7^NfX2(%UptZQgXnXCz)B5_?W$YuI#Cxc zox`5v6Z(#{>F+cPYUY$x+0Ak_+r^vI)8p9rT{VBzPXn#iAAIynwToiici-LhlOOmc zaA<9w?3Uyr1Yl-WxIY~bNatiR7Y^wDx@&~}p;s>ohI86(*2RQNKlznDMdd8w!9B3L1Jv0LSE_4Bypz!31t)MF^l?CdQ20q4<>dMR zd6*9e&+5LDu=TAR2<@RN)nD{83#0gLqod>a#RI`ZJ-DV{PX6ZXLL?_*i-Qx3tb12i zHZ!4}2g-|Xr+>jB#IlgQFAy5vF<7usyafAw9Dbp`sXFSoY5k{vyjRX=~|j*+$; zEy&vHVkUg+ySU?2`b)3@gM%aKk6T||ArB-6oUNzS59^WiYN&_;+_R^h{lI z(22Zvxhv_ma>Ko=^o50-)iu)aY}+QETO;4V0IoXZ0+NKGH$EGe%y>3k%i9SANt1g{ zZ8B*xx#s$dCKjkOPBZ|P_bwXCDp~T*q-^<!b*G96{q z$LHWySV5P6(ObgPo)JjPHfb%V=2Ch)@h}7Q@K}G+GLlo$do-BXFyMst5<^_^X>8*T z&~{SS#WDUTbka`|5B)R;_FJDkZAEO^Kqf}D-9GdQf@j)X+8rlR(N;!YT;XrurITgd zB~*K5n(RKOkD;6eKW&hBK@)zFD>8P#Se|Hi^<831^el+4uaQaC=y8()>lyi^{uXW5 zQS5EiAN}bUNkbm$WiJ&RbTbzUUE3f#dEiJ5PRv+IW>TS_U$jp#pY@U)%9-?&Kguk) zU@B{fy!y75s(*W>P9EJ49;?+y+ill2J_b4!7NkU^Q7?ewvz4N^t*s^OhI@(e8r;Mh_;6Jb%o~>SyW7f%dr@T_`rKz@( zH`ohzJ9qc-OezXEIBa#QTDziOj^{H?rUkBcQaR9ers&F*Z1S0R7F)at)b|UmMA%5= z;UShjtIIHzxSjSYf0>Npy{}+|DV!XT*Rd_qozrxkd;}w`9dOBjKBqcvT(|WX9}#Oj zYGED^7j_;MIpefFV1i5p;E7 zzHvrGc_+FllEUjLS_CyYu_zvmm_9{d`3Xx$%)$C7u{j$jSKoVzyhsVfJKu|)N<^#( zNXx-%wss8;8ZS95YzrZ*AQV_>kR+M$TX?YRz)JTr0KX1 zPsG8Pz!PUHB*R-HR-@i{Ctj`WP4Uto%@63J9C!|-6W9Epk+ejva@~amJb&d%J^a9{ z>z)VSO)4u>fD4yQLYYqf{A<_;hjwQ08yZ}`aIP+$zgYkMzxp4|xyD9EYiw+aTpd{I zuf4nX)Q|u8k7tW62eNgxO)pL{9AwmFD1_<2_fZU5SPsMToSPfBou|%0p!g~;IlOvO zraa1_ZX5x4_{`gG5}uvu-STgAR%c@lsvBdO0iit!!jdk9UE;Eod zfJNhGx@Knf)y)1w)jv!ni-T*2XSRF4yj1_}zxrqO_~YLtTsnaev~P}we8jo8L54G% z?+qD;B4m3oZ{`AlOye^!5u4X1jYkXul z>{I5aC#I601FG5%Cs;rcVBr+UIk#cmw)%HM!~FbwaB_D4+R8#4W7>Olkh~sw^cD5~ zU;Ksok^kr)*8^{SbIsgzqG}kt!LL@9AT~yshV!P4XNu||omSflcw}s(`f*q+m~SR%iwvnmKUyOcKhtqM1AeCFXmgl14C{0S+mRJ#)B*76wlQ(7HA+M9cM3|uAh3} zyK7;2t+v)zYJ!HSw2;?OxTdD2vr~h+6s*ASr=CAm|MGwQ*Y)_*XX>uIZ>=AG|NH8; zd+v%{tspz|3yU~oYqqySdsWP>Xry^h&rtOZ_t((qP&td#t)iNJlik{(7#;qao-`*2 zJ;^(9=gM<;2JWuj)&Z+s=T%LPPa=oo`9|`XTk~0d{;kjmxBSDygLU6a?y0-)zAJ-W z7tWoBH!HD~X-tsmjqTaEw;s!iR>E{>V=jPW-VTaw!dE8{==ipdh(7dUD-Nu*6E9bQ zyUmn3vW^TXgInth=~(#M*S}f&4jrloU;2`|JU3T<%X)2XrFKvIUBT@PETt0wxY~p- zeB~>3`Ruv6@1eWv?QeZ+oxJ^|M5;Y|_tfO%H29^!8Q9Wh^qL->UPaFxq|awM2GJ>K zHi#S!j1O1e$WV<=Pu1k!+1d*(4sNbfzUMDptY^+$s0|9e2o1-F$7*KJ9teP~+SsWB z#C`cA57z?^-cv7o=)pRE>`)y&j7^#tKz3Z64(&Ep(1YdbMKA0~+E`zv;f-BK7qT5a zb#Fg>bwJPQDxig$w2l-zkenDG4?U!LYnb2!Y2*4^W2V{Ju#114$Rujv+H=DiZ?%aD z)z9JTllHNNBLq6=UX?Y&Ia0p@mmv6=x9zm?YR7<1BJUjd++3$&jjU+59Qe1x$@(nY zmtohm=jwuGNn@`UeW2mgzFn!K+$L6S!~3z(iE3NRZX>^I^~94;WL4DRgZt~|XA^MCW1x=20iarnqgz2@ON(9IL*$+cR(GM5giVHgdmePQXy<1J62Zr>7>d zonzRy?bXHFw`Ztc_2~UIKH3lc?f7ve3H1&-*grHu-F>k39K5Bb_T5~AGe@dMC{Lf; z{5SvB8N9B5cCtpFWU+1m29icmbQpmK4cnlI9ofhiVcOXcP;VbSYb#f<_nherLB29j z?<4?i9=6WOa@*8 zIBWP4Jm;i7%{y>uyKeN*xQ24^Y2R zr;%b9y*_l${k3;|1i$xEJ@e$L`tEn1tPSc%I|{E-ho3t2e7-MelWoXyUR$kx=sYq= zy*oNs`=`h2-dm2;s~>qOw)Q3U&;$3@iQA9Y^vpy}9+hmk1mI@3#6o@2?ljxn z+It=*AS{vnYgdn8>!5=^331X1koQE;{uqzb!4N+IvjU&p;(G@BarDrEy8YH$XzZ`l zr3)8RXUG%FVrO>Rwt(`?D(7wom6-+s?P2Q>Fwcf>ySEK$dAmihJnhhzYRD%nWkSSPfgmWF@fPxp{shNV+Oxag62zc@=$B(3=x z-?)M0yBA&CEpUYOeQ|Q(kUuvbpk22ta-i6lTRXg-YEMwbBzm=)m=Mr3!vv8;;vrrQhy3Ix3DfQ%pOzR@HSg?r*-UJf8$xHMN+t}f=@?^-+fpLQrd zu@h|_(9dLh+FtI!mlesbIHn%fAGE#dckx}d>F3&9<$pRw#tck4*p0RFPY?^?1gSkPAsRSlU6-_*sAOVFi2UfXC06rLI;k;6Rx@m zDN}z$&V3Mw0oMP@d}LYK_ZgqjNg>$5MSR|!8mQm-(C=pO<-pMswR`{Jx_s#hem1@o znQY^KJDD56@pHB*>j?Gpy?5M|&)2y@mDsz3jw=T3<4j#o*)*N`9GAA=a*G$I<&^in zG02_JTKz$YFS(7*p)WiO$yuSL?=W2*Ls3oQ&>d%fXRB^1`ojpb(nb zKGJulG}-ppx?*!fJJ?%SFVnt!?3?xHfBBanFq8r7)s=O8A9ZJ^W@xAV#1Fl?ZoU7m zs^zP7ZDk>Ti9VkEl=s~Uqt<5@J^e3G!qs@T%WyV4pvEKksylwI9)LU2Aso3@Mtnl7U8d;Y7)gYrw8 zAD?a1@BQ}=!Xx}h;%mcKX;){01F(1CuY>CPjOin$ux|6ew6K&ux$e$ZE*cn}0N%Kh zy0!l_@tW|A{^zzwTh=z?8b9E!)5^ck*p$u1z!^FD#pNNhbfGH`ZUNcc4X+Iczd` z%LV-h>p*o+eG)JEOzg%(#WmO<_CE7cSyNThH?=8@W)ZG>TgPzY zVusLJVW4g5z@n2J`UXb`Yxdm14Q-U|WyqPn$lPzyM#e7MF3w;EJm0Xd4SL$2DX(l} zT(j+Bn}u^nS;f{lXy3aFKHBfTt^BUgPTWj5jT7k2m2}GUF5o!m?$-W(0)qqzcd*@d zKR^jX=8=im5NvAg5{CU|oW!*aA+K$_SM9`mdY z0fW44iFJW~>2yE7fR1Yg55FweVge)@ZozJnI!u`I2;=3(9~uHyvFr?5=f z*@(CXblW7cvjVaH$!X+``UZw;YHA8m8mtfh_QwkIKvc_TVi#PAE9vlC z24@e{-or;~*U(rkuWgk*G7vC+nr&KXlL;97^kYI7Y=jf$hX?VO<}cN4Cl1!w0A^z4 zT#fW%Yp^AtD0-o1H@}ptaY{3o=d^>mC-0mn(}b$oe03mlkZUVL(phk^GHLF8)JA%A zbh~pl92zdNsi_EI@Za|hEcd>Moe&pcw`+W3B8isoykxRh-V4eQUMa)!h%*yi^Sa5A zlfQ&zR)%L0moSAL`0zWpwlv8X0{Yx+gZ*6M#}jtZ$pB(4H@I z3S6M+rvqZ=@GrA-0Aa#vpHSik{#jRLF$nK7QDA51Z0}oNb^HVH;*iKmiJq>J$$uHe zg?C|E3rbkRBT%AdI04>}@eKhnh+ChVaJ7cKKn#ObiqF& zr=n0X6()`-u}W)sH2Q!E46Z77F!z~CBhHr{w$R>{S63ER>fP^qf9*SXH^PI{R2bIa zY$^TE8AMw!U~=z~!DAeTi|5bP-+uOQ>rejdlL?=hnT;c=!5tkPshdt5tDkxQPuG&) zLS1m{E1WdtMM4Rh+u+mNOLo#($#!mUZD$qS=H_ZTOyby6Mmswv zCMVOmu)=#Sk+4!RaG~7#*vA`1QT-JW=2M&bMo0b%F9z*#qCn6@J%zZhon*LMyk2cXh-9v|QwL zKsJV%JhwKkgO3vfMr+U&&65)~xqEl**?*uOeA&zD{XhHD^|OEf@7A0D&d=4ouYW^L z9-OZ0>og7*F4r38hUJ~RQtQy;>gDq|&MTCu(c6v|cN|!lzl{ED)%x;kT}8&{pwEI^ z1QUPl%H>+Rc)G5hezvZheXbTSo~?_gpRY@2PSw)l)jIS1nG717K6e`YuV!Mxu3fVW1j4bqpr;s@$0U;A1;f9hOh#%%!c5j%PW9ifve1M|&9#M+8#V_WBJt!};fCY-j3 zR2rM`+zvNqdak3pD>ODQ(HMUG+3(f={K4O-E%^7Ax4x!+{6F|WP0#MjH|gh==V_>6 zr;t4-^vF}U!`A_ATOKxN)=vOzy7L4LsMVE?@IX3gxpbtW*YGy!iSIWBn_a~XShqpd zDz!NEG(dG2jjMCpzE=ef02)TGrY0uPpNSeB8A$`tT}95EI$K*b;Eo?TT({qTC$zD1 z3_6j+Kx`%XLm%DjvcWv)W>+ z<{F!v%AnTP_5S+yx4%(OJ^6Iq|G-@}4*lnl%gO1{T3ueOvuDrdQwG-uuGN>m`sEC) z|M>fVvL1Zk{_#0lU4ixt_tAI04`+9&TlwnYon54uc*y z8=%n?TY!A*ARi}gy`^6I&_mQU*X#7zXKQNDp1QWSUbo$PM;$wUyiVS9qW;0J{9=9J z7k{zd{?>QY?YG@pcieGnz4Rq_)O~l~U9WiL!FubP-dOK^%Ny!F?|56i^Q~{FA9(qL zb=&PHYIgTzjgAe~shW4&Zu+l*r#tyl9*%jHVG*-U# z?Qdtr*Wp74>*k|}s5AX0B>2EPcZG2mf~UUyczx@e-w3{Y_KwvLJa}u}dAE~9uGjKq zKdIyPlKKUdAP%zh;x;GcBP^bT0pE8>Q;%-AalLP4?H;CWNB7!=+r8&s>Spi3*~ui} z&^n9dn&JNE?C{kiPP3Ch(aLl-?cg&^?5)}~oHY7jIJoI}5Pm>t^Q3mWCL+AaePENO zn=f^M69BZ+eyZj>Pd53g;^zEYjG}a88+&!Ibhh2@PUt!0sx@X;O`YV^N)`g3g>g!+r za((E3`(SAidR)xQ1vko~>b8qZmD&(9N$pf6m9@fXIZ*C)pbcL1a5 zg8r$tM6&8TIbp{Y|HGuyUx^>DUD7wAL28Gwyt;g4F5+w_z-=dQtM~rQ&+>$vu5Q%w z{Bm}wnV6WtUO${~>$`&C`c5z9a>6Ax4u3;CFogadIdHI!9otuT-En)}ef!OI`-vm< z;7jkVyYIR^pLwyfdJVvnPmk5?)MV}5zqj`6nW;ku4%MFV(YpJlBX!?H_tyTKj#GEc*YEzpN9zwh|K-Tm zn6?>d*3RjND(IO>?hhH19_fHm6#oxSFPa*nCZ1PM>(9tHhn{h4YxhUQ*jV0spN2N| zm^#)=zU~CznT9nm)8>(n*iLCFV0qJp6=>=WM{CDw-?&eZ7|KC!U$A?f3kUF=eG_~d zj3n_A*vgN#PMQ$McZPF~ttU*Iq)a(E*WEn)ywdpiNIm-Um!sNib@`&7X4yeLM}wQI ziS*ZUVxDX(Xv?g_vxQ^OY5AosNoL(PUOHqvpp%F1zHOJ}Ls;kv=tei|A15C6*NNNijQ#(e-}${dbkqJixaTmr(dNMCaM0JC zd2C+^OZcuNwu9Dzh%SD@kI%t__!-`c)9`L}Tkev7+Wq2TRA^xN!k5&~JO@W!3WTKH zevicCzrmds-sJg47%{{pGC@9}M&6UJF*^FAe`FHnRHlUeFFs5YJmp(UpKE^lS<={3 z{ev%|K}W~Yeg}S%CNYuTIRjb31Kr+qBeswvS!O zqyMjOYjSDju67RoI+$qLowR$|PXN%i>cgi^yPG^;+@?^LY>zpp=+(*ZhNZ)gc=3Tj z&CW)5lX1}BEwMe9usPNl4o12I&HTbL&nvV=*K3(~tIKQDCr(tS-Jcc9*1`GnyJ%Zqdh{Wyx7~6ptU@;N zkMxcG{6YM5bLZZ?{bfR+xN$EE9TODfTi=p#JNO&uRl$fnz{6f_K@V*J+Tptfw(Hh= zjzjAuY{g=9I`*3UoxLa>+}$)2R5Ex9R3{Jse`9M`EiP`>M}Ge=YwqfL?Z$ukzz639bMP3M^z&E_d~D%U=r3Qoe7QdMiBHzE&z#0i$)_IL z!K1XZkXxMOllZ6q_#gcuxr6)WQrZ!jq-xQWbDlGJNxRSqtl^RE9Vd&fEmIa(O5?$s zfOocqEt9rYT8Ct|F(R)JL+m9XxVD~3_@Y~(EJz=J;H{3rB48&8@`qgGrwZ5!!1?Y! z^pKuI1AamjAjqYQO{_b2&=dKsO?NWsFl}o6$?NE$^5=?A{VykcxmsI!c2~mwKHA0b z#d=nMH#=U@4po1**LP}gxWC3G#%pq78dyzUwzp}op=a72?SO;z_Af0j<0o>p-z_@@ z@@@>@--Q&)-#UJPi%hmQ*T@%n$Ds7vro|6-(9XVw^cO(3U8Efz#1F+M-L&q(=dup7 zZw^K^yJMSvS6{Oi>LK`~k3<xKn#}7 zLW?}B0|XlGHHOGD!s5ljyT((ZbLBO*QTvyCiE8z!3|r1!@C>vmqXQgO>+so$wu(~^ zW%M(nwg;@MR&1v@!5I45*I}Dt3tQ%Zy_1m<=J40Hlb`hz0sx~x zT)+9JB%HSA#kuus+Mc%W2|z8(@)!n9dv@9PgpBE%^ddjf&3Dp1eGsPMedpQx#HU

dk`kD`gOkFI^v*<@W zWD9X}B5z46fBKy?LSE2XLT_)bra$`V(fxJbJ$F(^ERzSc41Rzny}pAmJi_!(+&*RwTa2vckp=ij84=7KE<`3_6aKCnnvFEU;GF4C!Can9u9*~5B1&J z{JFa0#KGD#)>m6gm#9bWgBL$!BIT1NU>um1$$Bl|f$UXFK@KV>Wp?7FX$%?Y<#WUd0PoVM8o~3(q$z+M(y)p;BJjb^CDBS@f#RAvzn8u&a z-gC{pr^OVW^5E;hZqHr6zR8s@U$}T&v%JzwQurfYFbpE?ls-<*mYjY*HvKg41C8dS z?%}7c%ubSAC95_w>4YQ9(A6A$Y&W8*{5!e8FlAJkZaNH1B5Y$1Gq|49dWuAGV4D5W zKAA=9gEmY%Bdi9;0s0o^d-O=#LRf5{@yVzAY)D~^! z6UO*H+Cxo&`6S<#x9?gy(>UfwTq3ql4qS5rz>)R|0KcOEGnz1~avWh$hlw0r-pOC! z=IVE?5&=g#GtgC71zP|RQRF=uk);4(+-K!JEX+)@2qQnOZzsGNOyh;4dEbnNI8h5Q3J2m*dtEjS#~3Ws{$#?JPSX@H)(}yKEC5W#57`X@bJd_ z(9l?x%NJJMDiJRus0KDVw~7QtTUJAJ<&?>RFB%4N&bRVj``XvmBQJZPE?%6k z#re4m6iQe5V+9h$;n!xbaUd%?UtyS;(J$V438`bH|XI&cplM z`mYn_RapK>g9KSY3#21sxOpn0_s(op8P$g_?Tzgq_Lzf1gZ*g;tggtmA z3#_lL)m7->%;sTN$vGJSr_Al3o%yeU9HMd)Cua%kXlQ)4UE#ms>VU;^8+G~MAb&)a ze;XK(b;8~1-ZAglAD>%e!T6hTwlf2vc7z#t9!{*rs| zr#|bYfwPd#h$p`Fc>U6U_$w)sN51dnOhKIY%+P{gZIIt`OJlEHhQRT3dLaXyhRY~r z9Hg<(bFH_=hQ}g1t}I--d=)yKE4Mmyb+!#LJ3G#wKUb$uorVWnwM!FRl~_TG6<_0H^vd2M^<>+4Ik z;~+{_0ZX4X8mes)hbwKHEo~jTo6BppvbvI4+cq+XX_RWEY@v zHI2^u*w}DnL){)2+^8>n@v-{JpL};sP0WTq4ivlMe}e|4t5W=?{b#@Q#roC%_ixqV zJu~&n*F0M9`HA<}%--2Lf8|15n!gJD{QjU5dK}EeJ|JU$-?kt5c0k<;38TZ~;J#6p zo$Ro@L^(T^o3@ZLNHgVF$4)rT?$;4lx7|j~Efbrp`dvIbVU=axk=Kn5NJ*Ny09{l6 zptpFrGQ@!$J8oCz7m3kRyLV64EjJyj>B%X|eI=b_+Iu@c{0^lndHg=3j+_kzHQR=z zjaVC`uE>zr8>{drxSPNEIk<#V+%ph8bmpF~wPC^<8Xtr9&H!A-&@R{2xy$v~H@;Ob zd)dqCrbGK`Y-+p~u3W9zp_y9WS*%Ze>M!f~u>zxaXLhrM)bI#>HFFD=(3xKGW_ zWCGDL@~%xE8N&`}1E8yod>b``18!S5o=zj32X_Zh9yU;1KjSetK9b!Y+3u? zIFJF$?|k!H@Su-E?KJ4fW3yc!=LsEHA`G=#p> zy|s6CxQ-v$Q}^C^qF(jN2kV`0d2PM%O|P$)-g8$S*gpfFJDJ$B>ZcAO6WSklTyV8F z4S9H*RXa3PT%loSiYt0ud1AZ_pxbClwSeJ5E?OPr>A$W`(e5@Ithr7jpXb(eHd^QE zGcZV*!$vz|1Wb4c&jOopng{$BKO9s7Tuq|QOk)#R(t2cMJX=q?(}lU)S=DMvWE{El z8@TD9rA~Z9J1R>~#6&x4&KIFJ7tx2M^FVKS=#Rdh#6{?yX^X zyS}=}b*TRMV;>7m_w634M<2PX?t1C%AioW-r0o{A(}ss~sBP`kNqGiEML2$6=e$9M zZanV|?_M$bxwiM+j!rU6*bApeyJkR&+pLIEul0ezH~JGkHTY7=B*urN9KZ(t);c^u zuSN%Fu=|t+$K0Pl<3lD{^NCOt(wJDd?K$;K>KfvuoZN768`ll$*e9QSwx0anGw1=b z**l!=|5u^^Q>UJ(9SClxqC00?x_k+Hu~?Jiqv4l}VJ6V)er|?pt={zd`;qs1(M#;< za9<6fr!#wx)Xd?NH8gWGb>tp!nM>+GWg)*^-e030kv8bEHcsQF%<3m3 zu0?Uh7xJcESLfT-;MDWTjRX7++Gns{d+z{0zQ)!%b+}szQy zBlsrNGw7G{y#dV?9XlP>OSfKj>t-jHJ^awa^|l}U$tH7%oP*}ti(d4&f8U`xe*0bZ zvil#ZlP6Bpb$o@zkCu(YD3L6TJM-Ei~4D#VtAK0q_ zY!FQ zWcPTTJbIuWxZ|dJ-NX0S>mRwlUi;|7_3Af1TK7Ku2yXPQ`lFBjS^Yo%;N$fSK9q;u zQmv7_Q$r)o=4w0iD_cjfdSiVJZp|ogXakT=Utut{O=&xEYCjW0wevk^o;kIht&_5I z@+^2?l3fljX>dc=ObWDa9iSdf-K;%RYwTp=>N}kP;aKkqkR$dhCF5E9o;=_crHuVE zm-zGABvZ6B$=v`E)o|rBzU9IXHLkb`$Brq|7}a#zHc4b1;sjSee>FDjCn$F6ZEt=f z&Ec&&f9?$W;0^%pRs}XH1=sitKyhHAtxQUzJ>~UC=Rn|-k9ARq+vt$~!q~$glA}w9 zPSM(P+AUfXXlU}}NHFEI$l-G*MH`$k4S8_QNqN&=qU`N#;VoS|lIBH+&-%skM&d|A z%Y<762-1ra$(13&bmLpz)<$SY8(O@(;nXR5H8}d|?g`>BcRkD=5k;slz>nZ)v)+uMWd~1{L&AUqn zuibkf$v@zzYgU`)rH|nPWJsv#H%Z#OAjE+L;o{qH?T050inKlmcuSZ#doRD5FXemR0fTtyfTg(I zxLgwd`}k*@!WX1V8`(~59$k$dO*AV1ZlujS@4IyDh8f7!Fyzk3_R3kyihB@gbTW*H z-pNnEbZN?sf5Ag`3=D!3H?Ej-FwT0;!N0Y2+7S45e(Elr_R!1EXt{!Cd3BX`{9w)Q zJ6IPl&1EvUw#&)th+9_o+o_iUO|G{0G7!pr+7sM6d4FrGCxeOZRM!UnwK47luv}}@ z(Y85V!JJP7kmoX2KPQl#0;o^dHkZ;~HT@>_xBWD(Xtuqut$!UIRUR8Xn(f8TMi-G; zoi<9}aDDV+e_Zod zmuq(a{@Q!+SY5hw!Nh?stxYVr0eG-Z*_>#3^O2isjJkW*mQaa;UdDcrF7n6ERa8!L zc1OWx|Fs|5QxPIA@0AB9*Q?*LrM@$7@`%la=hyM~Z2Mo^wNXb;9I9RY*J^EJA!Tw0 z0OeZanpKIwu>GG;01$5b-%i}+#r3*yex?4+2ftiL57YMAv#`Sn{?Se4Np?qsz)m1<5w1uxM|9h^hL)gJI|BMJ8 zDoc-kw|1rDi;~#>LPLlG&E1v%`h7p)KJA2?w^6;e2Ke zI^{DNw#CrNVSFHWOq#^Ev@gWnHMX(6+vptF#{aa<&qFOPFV|)KC_h!!t zw3m&y27GtU)Sv1DcgxsAyKS(4to7ePn+;@NpTN%m>XQzSPQ+Jsa=4jfbs;`zKXsJ; zr|n^BX+M@W-8wa&Y{X7@$eU1D7Nx%36f9U}s<>9*wsh7%$|o%CHP zP_}#zZMjczI)`D+Kh+v5UzCM12UAJPB{^7zls*4)F>O0qd8K>fW6K~i3q0%SWWjUj zW=q=;2D_tWbtt4W#}QbmGCc4VH-m zI{7-`qSdGRkORp(laAyu<=DsR@) zW(#5*Ev@n*IT~IrezITOd|_Skl#gvdztKwF_9peFy2+DvRK32wBg}#DFAF)O3-O$^ zrY|%)PFfe6&&!l4Pv>KNTus7ESssDP1YX7mDYiQczA(P>i&_C~JHi zPA9`zR{aY7n$f<^TAMpxd!`2J#J)-F%2ni`vvaA97jd=>Nu}&qFz`)?bR}F`N@rrE zP|{3U&?b}E*hxzy`4--ZpJh)P;lNi@n>=CA6WbBkoY6Ppb&uE%uO{HgfO+nXW%9h755lur z9~)%*Fa3AIhE(165GXbvtN)D;jyWywzU(&^PQ6lu^Hk{ zoNXrnyzP}oKY$@SB#N?*BDTLUj4+w#Er$lLHRd`23NC`v6ILl%+|!XTT5=^#4e|T)UkI^-p+jo`LDVonj82K%!ms^9qUqUAT#ZTLGo*apAf{BuBD9ny$-&G!TCP~X52$CzxBS56&sJL`F` zA#vc@Mv8V>{(GG%b=O^Y z)EnRYrh3U8x7F0-NKH&l)bZoTId7_|nHkR6nx2`?w&f>pzpeHiIY47+IE{ycM~>9) zL;LHNJ5JV1AAYFb^W#5OKlAtgUcK*^|8Bkh7v5j@zv(S?;I6xCVDA`2T}FuE$;x6a z&z<9Ojt1mhR+G(PrG@nOlNf8O8;x%?-r@-AEI~&buXZ%8Z!A$!T&wrI=SS-6U;T3X zGysiPS7q8UCu~>IW##euuoBUVcsm&ETE(ZFBq6gxoO|_Ay z>{lOO+h z9XfJ|GWFKcgNJMV`f7dlbDycVyy*?~*0;VL-eE^Cory`ZcCkFJEYRUbj0kKEGbfOw z(GWVhQpuV1*E8tnBorI}It1DY?Us#e2d$MjI|E(mDqnu^2Y;k4EnTfUZarDQ_Rs$F zdd-i&zjmR6o0l)wvrj%#i%Toi6RWk144gT8s-Ah~nR@=&)AijapRDJfJI(XydhYpi zT+h_$^B3ye#f!DPg6x8u+a=y|%dPeDhaaxDQNBlBdUxG?>=^bQ!$~;?2a#!T9~~LQ zU&G0xeDXxc%^B&fflHuH9oM)edRSgH97?$M!{?Y5{vjKA-(HhW>*9Tj6w|LPYR?TP zAYMz-nU{4Y&%{pyg$8_kuOo*|wBwemGH1t=tB#!rFg!X;9O?zqr>=l^HkKXS%!C=r zsXcck+4{zIZ0b<|2)a8QcshGu|HikdL+A3Di<2jgU`HmRH@m=L3SC;pE=`ixhky5X zGoZU?_i(-Jf!pie`<(!AoyN=6*!X-86*`J@$|e3Gp2f4&+8Fz8<`c-{3px+rs3Z17dbG>de*-a=d0Yu{+&N)vhh< z3ciYbG-(=-Tx}qxQ$heD5kOu!$ut=Ihz@tlqa9f1{NA(A*Y{4Jul~`g>KmD^;i;*b zTVAQt_%B-n)a65iHGoW>J97s6F^B$*MUULBadd#XeeYyVjaL2OyWT*(b8{xxOzhrM zqkE20pB$^|cQrS*2|QltXjguaC1lFsCH14NIssD<%^;h!qzu`8Cmma^Q0lI#cUP5l zbelSVZ7n;@xZ-|e1zji14$lq!fpp$le^|GZ&N}}^r-SYpsJAZbyiXi_L3anrZUR1& zr*M?bcokgYi4$D?cKXSJF7To@79Bj zzs;ZM>xaiQ^!l;0O$rjs4Y5o%JRqBYPg}Tmj4|H(C&MbO5H8Y0KGDQP!yDnck zU29t_=)RrDm*WFpK6jy>d*ZwG)YraMpZ=4-tdIZRAJt#{@n6;F{_OMh?JqxG-~P^1 z_3Zb~;}0&>$ka?tPVG)zvU~r5y8X_V)M0#~+wZ)qUiqp=>;6X`sk>f!Z_OUuQ`fOe zSC-c5$!AX0zx{U~u7CCGzfqri?o5qPO1pxrO(hZ}3$64Q?eJDtTfGBGyz=ZMs$2<$ zT+m1R$jN)C7+XWFmpXAEHq@FGA6ehUNAHEj+0yY=`bzfLi%vK(&0Buvdw1xwpWg9% zlZnIow0p2w@~%xNZF;UB={?Q_&kd${hU}?stF%$)PM=PF>;!=L71%91=JolcLu@pa zpB)Z%Of)+y3Bb)VcIf1hxZq{h?S0pd0tmddGih(~l(u2VUUzMCH(mF@D*3X#w|JB* z+~|ZEoY>tofvIQm0$u-o96h?vT|5*nCy*MDSJttKVhnAebheGq;CZOm!jb+jJ`Br* zYh^4xa!aE<5vK1QFmYF=wRQZA@sYYXcPRt5d-v>)9f-{018a-)?aab#7i$|YZqKk9n&=~wvM{-Xwfz`#qE1dQp|K(BRXB3Te-E!Wz%D3>brT4ItNcrr5 z|GuN;0k?=C-@Q*Bc_)g(Ycemlcn}f)zW)%ua^3lLg5a}ow3jWPUVK*d-^rL7-|&ZD z8}v7ha%?{Z&*CC)%$9%c9aoKrfW0Sfbkwkp?-tohdqCKxr~Wss_`oYgDeXq%<`i~= z->$?@bgtrutudTbH{$ury+5AQ&dR+fQALW=T}bq+{nS?M9KM_FJ?lH$;2T@$8+z^l zp8mY`k?j(7!-0Bt5F4MIsb}$zPoKRAE(5eR^jqXn(_zbSwa~+xO zu7FK@-1?pRH7jDZH>*zAr`|LB z?pk6VSt$wKZAU-=#8lbQUTIh4cZd(pl+Zyve*QVA=x&AawbYw1m9*MTgg?B2=7uWX z^4%>>o!Bslulpx|{;4|i+=UvMoUXn5j?(66TT};zxQGW=`+jZLTF)HWf2d~h1rm?C zx+ngj!vUNmk&ou*5pm$Tp(QR};k9jprsf9fqe(2YjS2B z9IwE)#;>$tl__PIbPi0kiT{K`v&BUx$&c2T9($_3_}J;%Gc#Sc-FjF3IRI{aN{MTi;JD%}x&? zyW902{)3;b-G?V@`^xFswY`=?xf@&T7_#UDyG#;?zxt5Z^$9Z==&lNI9y;{m$Hsn3 zJbf$6<>9kN6Pw#6qyk;q#j^72!L|~|@2j`^uGQBPRC^H+5=#*1QnuJP#5OnaQm5M1 z=Y9=cT6c#%{9RXg+i&WG|G|-w8phAU=}Yq<<%cjEE9<~@u>#ZrhI-rjg|)%gkagO` z;?B7+w~S5?)a+jTtLa(d57GX<9GkCC?0}JVh7x5YItNEzV^qDURqfW&z+3w`=Q~nq}@v21MPVDVI4O* zHknBS*%H_RbA3$@WKEx2y4t@cDYaqR=Fpet_9-WeNm)6fjnFyIM2XFd?L#-^CwIPU zXM2tx1nuNWR!a&<-O0Db8(lL{NL<7H=CO9n&sE4jC#JZN!kt81RJIGb=pPt`CxiHT zJ>ZD%?kl@wPIL5oi{eScR;QV_SDvFzK6g%@+ZDQlj{|6;ZTsD{ZK8&zB~LG;1MyZba&Mi|DVzPO z+G%xKS~-Dc8yn+S`3z3r z>1nv-jX&B+D}L4`P9BiX&2Ac3Tm{l-BtUWQ`a=mz%y#uKEdAZSBc8Bhr;=QpkY$ae z1HVbp_$Iv82hu_TLOkp|&q7Xq*tA^r-dvDHJtcpeZpja1sndtl)t5|&=;)Pxk%vXH zUi65sPyFuSw|z{ZH)v#%8;50$K#`z~ zt*plBl+BZWj_v6ChF*kjQ~nIvL&xrszE2Xlqp;+1k(YIcd>bAaPI=6KY;2-tc25L1 zd7&*(cI2()kR}dB#)k-MEf3q};^{BD2l$={@<#EI*5VpkQD$*nTwSZ#*#q^5fBcb} zTh^Kjf-J@RuEtHI zq~Rj^-oXc-gQ%cHHRUq1cILlf#A(GtkMsle6MK=u&2WWTpb-c%N#$laK*~-|9i*_lLsaAGtaunL+ww?K9|XV=0uZltvil+k z@Is~O#%F;FW%Hh28$c&c4+2J{7tT(36!cE{TYfJb#`C4P zTyw^1i%1v|&D+1u3|QEe{2OrK%B;j|Wos+oT5=-5ly;{Owi&(V->JOB#sB6T>a&yr$V=r!tMw$G-ZN`tXN-H{ZbByL&g~>!Xo; zt@h6Dt=&Ao>mBc?J^NmWljQ3s#pp~m4Mn-yT_v|zk z)MV}5v!{ls7}F_;!@9oa%H6BA!PP-tPieNXx(pv^_>jL9p7ijY-!8MmeRO1uhQ}~Q z%E4y`_3hlZq3+;44smFRQF$Ttr9JdLE)waw}u9?G}rZ#s5U9XWYtJ@u`}>%aML|3&@H-?lAfbprZ_ zXox}^x7uv0?WjJ16&vA6VdF@_UmY*?M>S9$cTZ1dOQC~%cGpk-*bmqL`R4l3_y0(}{R2N&Z+_bw z>dkL{L%r?2@2ofe-M?FJ_=%sYA9(k>>ZNaZV;#8ro*LY@ziMO>>|{8^*;+x|wI}E~ zbl-wT&LrMkrxCoiP#eq3$oB>fJIdk84rhX5!Cc!3p--`gn%nU>|pYhD>dvmHvh5F;aXmt$3eMX zBSV9=XV1QR)vI1vkA3-zb^6pL=!(9B+om=cr*p-D>+1uxjm$dGz6l;nD@*kgKlX#^ z{LvmPdarnhvgW7cbS7tC!-e*}))Q4o10B(^dXf z*kj}4k#jpblpi~&v{7=Hrlm!O;)!yR*BatbkkAuMM9k9;DjItOZ}Ez zcLiJ;{FK|3!32;{ekuzIO~ zn&YvJPORxs#7++Fva?_bld@Ak{r;b*lQ-X1*P+S9ixMMS88c_ zt`_I7l4iBmR+cll$?q$!E-qxNt=XyJy7$(Zdinjw>s@brS-thqH`dI^NS(QKuD_ zj#BdxB#19P2ty{bGC+d2D>5MkdBG!7bk(r@q#hS+DCD%9ni7 z1ljB#aCd|L=zzAv0am|XK0J(G@oZiC&2N6I&YizpDo^lvlOEt?%5mYIKWFyxUODVJfLBlX=2 zVd7!@_UyIEw`FUu?5Jti#DUJdG@<`EJ#5(XDm=jUqr+XDXbyjxNIY{|N#XdqGAOgn zT_wMD9gT!G{ZpW3$Fk)~^1v~D8gJl^D;%hXiK}$Tn&tMm~*Qm#^M;ptl@S(5v?Q`|kR1Hz| zANrxU*6eL})EK<)n}S!PN5Nu-b1?QK4eA|j9`t}8kt^{6O;=D_AbqWLZX=iJB(+md zTS47nBYShL`AM#T*#MWV6<0U4txKIGxUqpx;p%?uv2}&t!i{|=c@SDqZgB6x-lsDZd%l8=ps@A$*S=XF`i4R#)xi7i0ZU3Fy#Nn44jTw<@`cdpPizA|=1U&oFzebwlN?2-1^ zo30L8Cn01?Qgq!FNki1}2M_G2|NOuB=QT7oi=JWoE}gGuo_Vsq{pGJ@=a#|giCVvW zzUHYf2eA1A?g)Wg4NXns++8Q`x9WROJyYL%=2U&-Yme6-e&jFega7tR_0f-gy*~Eo zC+hQGKVO%hU9DBM%iGnRFV$f z8}88`yTt>vNepP!+Er*Rk4_}(MTqT;=!dp3sYFd(%>4lVfVM7M9|Bq1on25oiLYKp zhj|hQST^&2~5wPgGDI`Zsw89&aQAGUC~-t(S! zBCkF5JawrPh(@vXJ=6zwLP$tId16OH1~#FKofd5XS(*da&I8WmXVpTuWi{_sX5)aW z_AZ4JPfy~ICVr6#67v;K!g-+%c8~CrKT@GTpCDEfbiGSwfG6A9Ui`` zk4Ym0-!~fPOi1T|PO3oHH*TG5UO5SSfx<118;7_Bo-!hDksk-oDUB<$oo{L~qK0rmQ-gZ{)@-E_w~!)=@Tj`YNHRjm^M zYAHBL|aL|q#gox)CgMvj`m$1*t@s( zbL#8(d8G7b@{9HAhWjlTYe(u!d z>`?95x4RZs+5~`}toBq^Enm~!AOYO;0|M~E<#~L>-Sr25_?deC%t{@ku72ICUtO8$chn#Yvo6sb--wt}<>DwzWc694^YY*)Q3eL#IjytIFPLY!C_DM=pPZ^-^ zn@?idQL-%^XyZY%e7l=6x1BTbaq$QBPhH)w9ndEmnH;bFq2aowoj})>jmWfpPvqm^ zdM4ai2Kb%Mc=FNz)92k-UXR?)OitD8-hId}HXog{e@z)w$DJH7G&&mp(e3`PTwS1z zele^6C#g3^#>Z-AcDlyL#-g+CuIS_(X)G*)Xd8}U2gWJWVA_86zpO!PchztQD<=x| z4-VG~`t2lPt6V>k>V(1R>8YkK;O}GtbuR6FY+e>ykdKQmwDFP0Ccv7+*pn^$Rfv?j z#JW}(k$m~7#-P!*&ywELPSm%F|ms#NbtC#Sn z`*uOowOU(P#8&xf3UnSsw3;`*z1X(ZS|n1_pM%)?$#sv)Y=Dxfxj)#+^G{s57kTWxxKDl zy_|Qc!-1Q)oR&cnwmdjRJvL1_vyKHP>sP|E(j5fV?`~ahdaH+SS)3eaS{Kat9G^nF z#{1CpMp?k4-4X%-{&#&0Y3{u`tskcENCJ4!;P@QBgtBi_FZjBB#z}kCAd(>0hMxnw z6cV~Rp~iCg)@S>D+%dt$DVEJ$;`AxvTZGmPmU-H@X8p*k;1=3pTlFEdyXnXDU8~3f zKfZa!A&8%Jlt&oQIiC$74{eILny)Y&mq}Rx*67^qTi#J$O2fuKX$;M%vj(s;(pfxA z*Zi2lub8??o)b?lJijvz%I>KK5z?}Z1weehw+@fB?TuC03M z3EK3v1n>QHjte*JN7d$lV`QGB;CYP#vaNClR-dDz@}=2< z_{pTyS~LZaIP#H;LC`~c;G5Vy zP5`-nUDbq#%g=Kuu)i&!h*yn4To`B@1E@#qv=+kUbqbEP|fQp3K z-(mjYH23x*a0QmT7G$6wIHnCu!i>}K6;_j%hD)=D=BYg*;o07Iy!!A5VAP?;SaJ~u z05)fkXX3?>St{Rk;&@0*#573f;F?<$u4UW;O-lZ8<}&kwBxx*oQXCKCSm+q54&v=b zh(hXKJxtrK*T~Cj6L@1G;0Uc1O*%Qa@8Z~m+;rUo_%X1;=H8b^F^);Nib8NTT-)iAVoPzhnUh7mPgG8BY)dL>L{k5ch2r9hcl z824n@DorWHy$JYBjO!Y6>CsT+u!8h83IH~iHFPpgH(!es<6*-KM-#_wo98WFj87e` zpZmZ+;I-QrH(Z@HWf~R0#(W%h0_3kW&yVtJJT6?gP)qa6_32OlMg8Spe!6CN@2=UI zX$)`Mx_*xxxi_DvH^1?XsZ5@G{<(B!(^iPH*D5!_n{#N)59LkgocseH^J_6YJ^s(H zsnn$gnq{Lj4b#v|`s59`Yk-Ysm~yNFxhl;j2%%`0D?oK(?T9dZkjg9H;-^v_9qP~a z{;pu%Gc%qo|9f`PNLgE}`Ag^PEag0Z`g?Wx!r7Xi_nYo+K|hFG;8ajHF&!Qy4LHQO zbWUYZXTw>;>WdxGZmVnM)V308N}eqPQF>1+Yp-d@V@w@Hu_N9s&hs5X4I$}bAfbcz z0Eymq^Yq*%ji+J4QGBg{6~E1`Ygs)#NMmQ1^I!j)e^bBlTfa>yItP*5V&BgUbor1$ zb6|O-!iV1~ow%t#hMC5dttU<%uUn2DtAF-u|EzxS$KOu^Tb)FQo(NLpkjT|HGgVWE zkJJ=#M-LpS9vWhpWZN6H3!3ekoX8+UEnTWwzEs=G^T1fD&4tUgxj2`V{_9JNG$`h{ zF4UTzqo9G|E)DArOhapB!kLy zeKU@xGitLk+J;)3Tr~{}5vELTqW4yMnM8)}x~;AiahR(-*gZhQ;X2NpxjPtS+-$L= zLxcTrro59~kg?6PgnaR{gDG-*T!jhWB$FkYLD1AN27P1?~)S)A0kfboFY@ zPEFO%|HAvTa(CJ9jbCw<+7kTj$Jc50TM4nV!maP5v-MsYXV6M{afO4YgOeUMHr)lo z&jC0gB6y@++Uouq85srsFlk0=l!mwy29_7xjpceRa)0IQ#d_|P6GG<7Z`?bOx4N=i zSFvA<^YhpsR~|uI;A#hzYn@m-A+ib>nn=5qg@syKT&zXPb?L&nI)CPLoqFzkojG$6 zJ}lMHaDUx$;&8p?^{=isz4jHgclS&!EnN=0u~Eu@ZL@}<{a`;DXxYKf@8`$CRu;7- zWF&uL7tkrsNGVsHeWPYo6EsP^$aA*N^93P_n9>(G)~yC7UZ-)TT$%*1AseM;88^eQ&j`Y)&4R~`LMoLulvzqY*Qa#NLhCSn}JcG-AzP zUF3*^t}g)%+5k=G+TM1qYPR=EezZJcaZHgj^)-X1gk_}{bOB7eT8~Ib;dXxh5zyl3 z|AkM$3{J%Jg*H!lR);xZH$2CtY+?7dk%6IE@*Txix4d8*SJQV+8v`0h8xTCVj@^z; z)tgS4MTEHOFJ+m>Ual;!)Hk2_Ze3Yit)cPV)MZoDE&X+VZmupZT&Y21esX%Uu23f^ zvxC&nZZYo)B#WS&&rFYDM{le5{mf6`i%o#VXlUtS*u^Oa>MeGeGiQozwxL| zcE8hXiAfq)^K1Zjo4RTP9rJ#D6}!8##MS5J4EVQi?;`^nHc%b3U(3Cx+qF6a0f+}VL|{S#;5rW?Y$HAY|uf{l+)`P za963L)gfie75y9fI-|pxWID5ZPrc-=c6R&8k2YwcrY_A>2XkJ%aJioN%Gc`SAN@#u z?aN=Q>RK~5)qDY-p%4qB%Xvo-M5 z{bmQ!AwiH^z`FHoI&Fv>pNMfAGr*VjM$iR$ZFugU(cx_s(;^@YFq)B5Tc{s#U0TAlvxx9TgO`K$Wer~b6Q z_1Jf74tR4{=ChJv$cc`}$LiL*_t))r?5zjyJ6iX>{N{S~n_pe8eEXZ~Rd0Vsz2-gd zuGhWqef8jL-cUzxyS?ss=;d|%zK5#!@Np{TnOa?3uk#lc>Z#Kg>MKt_S5G{5wm$NS zzo=jRum8vT%BgdzUCQnUjZ_KOE{8^_zwca`AGP7==Y?ADj#jQ{vSZcBt$yEi2>s74 z2s{r{r;cF9+~$6SbJ)p|eS@6+@#6+E(L!5_B|!i5<{doKZ;EwLMq+rzspn z(e|6C*_cz%<9$}$P#!y0^uzL&_K%j;`%Xv)+ZL8T%4mAu-6(IT-NXyd7SBWY{<>xC z@*=un7>|-P_mtoArhROhoF4K=|Go1pd@q8^$-jV=ZVgu28@>{zbmUa8JK5*x;&j6& zOdM+v@wM&AopqAnJ21@1y3@1{0?4b&SFU6L_R^(G^)LVX|Ea$D_|x_L>2vk_ANhk! zzPxe?J3ly-RhvtTE5S)0vv*M6lKKs>gZ)EU;o)vhPJ~7Zlb4hBjIfPtn!mnb@Py~V zLEMO~9OjVx!50}0QTR`O2KuBt0MM9?Hl~)B36k@qv;Iwt7mhAH&C{r0;*&RxZ(Qvp zO2>02jxcj_@6qrwLFc}ciSnl$wRc!jF5;xFI{0tDQIBm5?)AUoi^UHFPv3>F;y~S= zdxXeq%BBz9snZizyWlBZNDuw3J6jrISW}2tgBNjjAOZUNOa`5B;+sYbmm-;uku5C& zJH&x+$xCoDId6=h)3EDSI>dZ z_BQPw%vBpefQ;a($o6CVGtdG?+B^L#kF0#NlJ(xW1gHN9-ZZ}fATU7kui!U0srw6G z`bvHEvBzr!zjyb(1LTQMv8GSb?1%DX$2lCt$D!Q=jIqJ7y6NCy!hj2$XeW8~kf(B$ zU)XOQLb$1wN%_rxUp8JV4DDzg+NP5S+~d62-?yvJ`tVxe29}e!Z@KLzB42|>tKbK} zEfc&yJn63wefTq!>w4XE>{$Ku`+ufxy7iXiySavM2`<-r z9oWP_BE8LP`D*`Lwkg23MAGn|2C;8TH8;OrpZ<$a*QsaE*Pi`{@$DRR#+LQ2*Y26# zde7TlT@StS5qzP!+F6=|SKtOA5HnDhk(7^z>-dW}h~y8>)M-7mHEe%kRO3h4|CwH3 z`}`~%cq+F&ywCRIVSBWfGHU~xUuHE#YaZGIgF8F!Q@4c&CL%5Qr@rQ=O^K`!C%ytO z?6+!@PdCg|!h%OH?b4ANoEWRY@v#iR_$g5*tgbqN0l(fsr(xQcw&6B6T3ahJ)Ul)H zzk$E1y&WANtt$CSic650e2L0Fv7(lPKsaKT#Zj;+s}SQCt;Yslh)*cgMa$* zBfNKqMtAg3z1&qOvW;xn|6t#qde`0ZUvak0nSOW}^6EU zJljkF4S&>i{rqg1&ASwW`_ykL6#CtEIq{wKi+wQt;3jWr-wusHFF&{7qKn~7e&`(> zt#xG29V|x1rh!fQih318vbeHQXU|`$?|t{_`jg-PX#K1I{=cnX|F{39{^~D3Q>ULl zg-!DFh4VE}d(1vCr|?K)y=grVI9{M#@^d^xq>T@JC%p^4 z4FbYLUkQ*W;cH`ah)dpqG4Q=r>jKNu`p(c~|_S?J(SNAipC7xv7`hc2U7pDC7z4k+UXS-J_X=98d(8##? zYvaLh+sX#|0VaN=ah&X?ZHnJdTQJWkhUL%7cjZjK1bXVfxH3KnlZSZ)7vq~Hu*F~d zDK9h8i=(wo8$EbPd0M!DI7l1V(5;o<`oQ)#a*kb8);+Q#L1XdX0DnOI zvJ0(o!Pz>{#a!kY`zx;2yUMNPhX(2@h*=cm8jNHFxGA^dlQB7-9h8OIHGu7Zg0}77 zeEvz|@D5wogMCgvA3l4n;#17*slCTGFy6~qBEW6TNAHgvD zN&AP!vk+@$c%b%A4q~O&$aRhDDpbSQxwa8qvc8s%(iLihCr^~qeu(zju2z-|SZgFBlW2&fc&XRQy8T}EH8~Mco#OP9 zExWwxlpv=r7P4>5y2^WZUdpls(_1FecgibW)E%>FZC9j|I#D`#Dp!vEMQ%V#)pqdS z74=RE$aedLS*Kdx>p%PK_=BGYQoptcShw$$>-M%|5y~qiqR_TOGgwa=SMBQqHrpm< z;7ARZaX9b5PaW#|$lL};>r#?;U~mH8Pc~=ZKkz&CT6nFE1AiiCVnQQxVB6=QU6AFD z>!e>x8eHFXw)dT{x#Q#!3|~zCsmouCB6dDgEQQO zNMqzF>N0{jJZIpadkfP#6GfH)j%=%^o$D~GIrcDEW+fGHdc{as9br?sEQqUeI}DDq zNEup3kuE>ZNA6WZ(pX#dm=W<2pk3Vi4c-i%TRvclzn$?istLMrMHD;~V)Lh*5u#39 zh1!{Usf-9UUdj>tG_I6)TUy$QqXVqb>xX%EVgcVu-~5Sdo~*?XKj!g^9s zHkw#T_2Bs12-IXm({$uDBHRi?<EU@NU$9`G5asb>aNg znwXfzNuK~0QuXhu{rh&;%U||jz3h<(iMO3^wy&+N;^6wdZjC6)LuIW2?dvBG1V?33 z=6bHX{XKbQ`!g#poaIc)&}oLwDHq5}3Z9yQ6K`cqg>~S_3Qc)xaufp#ZmE!fqkLup z2XY&n$)|syZFla9qf`v(n5Y`U!1qz^@sW|5r4ceQIaFh#RFsr+XKT6UuAZ&A%jfIj zh39dsF46#@VZE}3EYK*))>h+@VHH`l{nouSw9L*JURz&*HY>m-9_4lg?-~_Sn;C8= zfel0&JUDVY=+Krc*0pQi&zDPV8V9?b8VOJ7-{afFvPk!nz>fim&heGljC-UNoK#%BF%9TeB9kv0= zWCt3OP9s3w)F{tVVUJG_*Rj0^>;Lor`XB0!2Oc86vfLc8d>ap3cR2uS%NQOBubuGB zZSrg+;mq_7BX`=Lg+*ok$_Af!n@`8$Hc=){oVeHq4Le~}Fq!p^j%I=Y z`Se(G0XLNeS42AH-CCo(z@42MtI5fcdgP&()SF)arn++FLY;l?d$ozYUE9=-5F1?? zq9HpuH5rF(V|lIi?>~?R+cL5-L?hZp>K3x$;Gy4?w3EY6bJ+Gk6H@4FTk40;=t5Qz zz{{-8wG!>I{hUP7U;rMmH>IW=qN&%DcI`Y`Zv)z1sjf7_NT0bzi=d zN{KS_o4Bq{am%%w)Jr2n1NFLBy{vxj=iiSB-L5Oxi$b~bJ=Yb!EX(o z`^k%o7oV&3wMFdIP*yXz>cQ`gE-f$880UW3Z;B%4sr#@O8_J`rgrOabb5ewpyWG-S zU4c{B5#`bCOxNn_B_|ZD*TU5$WMrVGM+a-)%v8PZ(L3su5520kH?P)rpZr$MT|5n) zmuq8bsZ&QdnF8q>R0f<3gWW_vojfKD;@m-Nw`Fx@aev<^_#wBh^pl|aF33msuoD9g z5e5)X{R#yLyQ%*9v!1oSFllpo5nb}$(s!Z0b*0RZgJ7e>)$4#HGPnzy*0wYp2%qfG zbM~ivb47}igW@=DYHyHF;-$lze9xS_SXbwk;m-i_G*GvkJeKXGR~MJkVdXa}kM7@J zR~E0-CqDYgnx5#ZTW`6k9)105Em3W)%)!sO)SGUfrC(zQDM-YgX|t|7$gXl=7qr1; z{o1Wt%M~;2UK+%IfuEMga=Dw1vecYW>q)NBX!Qa*<=XrM>kIvnP3(|*Cye-Tq)nq3 z-t@zRLFE1VjwkW157AB_@6AE#gZ;Dt{1zt-0Qf5HL6mX;+d5o(*vUN|So&6}_rTvW zxZ{**x>mNR`NwB!aB_DoPf&J^<1qW z|MF~Ts2?3@lVlba7qFkZ>+k=)pRHrJJ^*Ymz%~_jMtZs8UcX2iXkBv+d65>>_@t(8 z0Vip7jdEEBJ6UiO{n?=2-$cJQR~F&*GJ3#0dY9c+c(=o|yQXY8|+iI z@a6uR6aVyEw%{Ff+oo<*_IysqVC=XwgL?X`G&bDjM|+n62d=gyD8(xytP#-b~|3}d|zu(<&T|=Jkl9BHX zYLoC$)oXq39m;BZp!s){sdH!-Y@B6+9x`g3rJt8ir%>-*nZHnToaZlZ)*^M{$pe%1 zYyadIYxeL|?Yj6>Z7rOuq25jWxt%)o^tbDYZ+yNU`^=x$XFl~u_2tihtDbrK>H5yM zo~-YF_lY`p_WA0)wp3HILp8m7qHekQF#h&!^}5%+vfl9K*Vo(L@s@h?+uvM2@bhJ@vu9@5JtKp+}SMThx+O>OsjUT;ywdMMu zwqe8*_L05+TBAHVtnw@wBYxLCI;c3Clk%-TsugusgHZ#$JzUUM=u=}5_G z*Nsa%zytoa_&KtzDUT>}Cx=%_Z6q9MWjz&V4TltsU!O@g;m7disRt>Jf&)y*}r z2ZPOPb^m=2)X8H<>YTg!@EaT$LWiuAI(bPa@2qFA4Ji-Yi+|p+-{Pph?2)#yxPn;P z&N51SNPO!j{nciBIkmO=`s#vZvt6uB-UWQpG&@ax@|(O{2EYsaP=GklJb40KMDtJF zu_fq@S80?E9$v|p(9@g5wO)2IFfp~);weEntP8S(2iaMd$}_no!M#r#obpgja>#qy z)5s65H)ZIlf!;Cbg>E7D6=U~my7TG7V+d-?7WS4gE0A?69EhiBWjp z=&J2#<>!s^NSt(Hn?^Xm?twq)n7pCST*1ACkw(9zA(pTPBRS>Th|;04dXci(!CGgwx`kL#s z<2N|hRyH!=@8J3pe!Y|6Rw;)&2D-z*DmrAl-^lqrG z5zaw;6(?P5izyervGyn5ZcTd!9w95rY07$?@m&o~Xb6%-3shY@kNa zL;bg%9c(3jiHi<);it+62c+G_s3!xP>vh*{x76?;l-yVbA3uSI-asq!QseBi)5ndT z$#YI+vTN6F^kK>Ojx5~JHLtz)4M6h@WN%_8suq?m)!p~rK^woXu3Wv49YUO(?ZN>k zz~%EVFvAXIw|w{e?dQ)e)HlEWRDI^Nr!tA=?mKR)U;QWl6m?K1;2Zv(cKO;a2c;=T zKXl(&1qW!lJp>N!C`0>lYnb%2b@9R~zS48`Ic(U@_8@tV!Pj{NVXaozF4nRAll6~& z<^44^jqa_qE&Y4pS$OGB+W{|dD)9%YU%7f;v{eTD&MxolZ;}qmwSBWzLD3bCOxBB? zP>`jox-r=F5x(hP<}*I%RqA>8uMJ6Zghb!n9jNp#wPBK>AOAx861&G$x!>GcB_4UX ziV#`UMhpxiBa;&~HaS_>u-jxBpUpO_J8F7AO#511s9*N8$V;k*;IMPu9kN{cZ+om4 zIAg%oK5Elj9)2dnt^Oy*r)$<-CeW*um4!Nc?n3$rhVehAXQygxY$Ts%vmC3-E1YY{ z=?LZ9MSH6d8ZFJL#8$)jqLw%h>qi<5$+Pj=9vEuaiTr0Bf;me--H zI^b?1lC+;|A=CL}6RdJzTb#`!TXaLa^y%5p1yA*vgw-8L)t=fGbHRd>+0r&bFH%?F zFT2}F`Z!cju72h#^?*Aba_vR`Q;(@rvD?~O<04nq3H`(YA9vRoLKh~c57p%C!8&l@ zMD06rvZnSOq^&%X39yep`BZ)K6CbY+{^oDgzy9_ArT*;GpRRLf&LPk6Zx}h^r_8#D z!~P`wh3m+Lev*9;^4gs<^LZ57IQF%#rHkBE2m$SLo}ty@y}19oT;CvvFf9sq^fE5|DHj zu5qn)l+E~Y9ll#{%GiJR=ZC^!i?v=eu1x)5pL{^5^wFCGGU#FJfLaVPbcAm z19G~uh@S+{@J8$4{sVRA?YAM9E4VGf5SPu!r~OS-=fH1uvUbyJw~yTa@fg6}a%j^t zV6JbX&w)>pHW%l0`|+V|(CqXj#}}ne!(fos(_bsEeoB)o2CZZ79cl5-*MXu2!Y}i1*exm*0=moryp6SO!KD2Y`V**Cn-Px_xfnIQeuKMh4 ztV}y3Ev!?J&)7b3Q(wT&{3wgKSO*GMoHCZ7NXfGpQo^a1#$Wq2 z2?MZ2)0IvCKCd(QYeY!qIFx>M7JqO}UfKcjrD`WR{Dz+9BahT6>1uzOxahy-kZ#b) zFvk|8nG+t}!FXwP9+YSU4Jw=4)NOsdvwq&F0j|T;b3=aOOk2dEE{eByMuaj6HHfKu z+F=l*P}kEA0j`sgBmw1C4y>N2ffeTr;9ABF>VRSFru{9y{kwkwOlY7#j9~TRgM~7i zYt=V3S^ICgt+vJ|sE$UcqwyQaf1tM?Z*e%Z(`SmFlb?Ou@@|)X{k@}g%^hTDv%ln) z-8HJbZq3&Kb+h$kkBgC@kN)x|X*aPY%0OCb@M{n|8F^GsfEjp-!DH9ab0fj#{!7&fYfG=QM zA!X%sGFv7IkXGtT2Y}}aKhO5Q>(xg-z|$c^z>kDPBVik$^AMS?P?<;|7BAq44-!0U zESe!~8OM--d590P?b`$vR3YyO4Oj2F1K=VB$Q*5(eD8>(;N_lw5(W4GG9dVP3NJz8 z$$OP76}i+)kOHJicMTvgZG>7`TSktSo~yV9ckvcRdp7^BVHH=ShfzL|SSB=lTA0CB zM#gF7ZG$$Z)L@$?GiNOyrxX=;sk=>n;?pKKG$#zkqxTPH>1Y-fNCQ&;D2Z1>cKxy5?dyWUqvPuz>(u3`Y&EcFnP zBQwl`jRYZoikGq&Nu2qeI`dr3U0$qz@!$NnDf8syRQWx071Sc_-@C8g`qsD9(W8gz z(v{0~`SPXkFSF?}8gA8-GK5C_m)C;>?v3n4N#^Fs%I}ILLFhXUpu2K#y%S zN+s>edNaLt{z@y$(~%*{-V8UYhoj*Zv(`u>gTkr|KI!<&4+n(oWLcQITo=!sujQqA zjE0qGZ?=*g9vr1&90)lbv~$a6R1_p>S#K578sobEO@6tKv5xO)QxH@_pPxr;Twa3$ZZ$_z4` z|E$0SUbZpDncZ1kiUTLjm1!NnIH%AeomHG|^*Q_tZt}S!XJFT6M>iRl4(Z4SS81b6 zNM{dcq)H=79KDo|>fjJghaF?mIu)ykU_V0zc^&b_i6GqB(~&e@X0lEyU*5;zlo3=L zBvJ!)+MLCxtPYqTyjhu_ugh01LG#{v`9lxao8J89I(Tq@jo6toHd?cj<25reR)_W< ztarZc4fU!=AEn{xtWGi;IhKu&YGAUtn=s2kOcno-sHk^D_=o(zyniS zaOki~I9}MXCmwcENHPch$qYPKsvo;HF*;tu=*<}RVDG-^`oJ&zY`ys{uOp8wWc)I; zUxp_*rx_%L>SSQYfVvkaNxKeAon&d-XoLV$zUt}Mz{;n;<& z%)>FVBSfFbZ>Wh_93?WcbJ&4Fb<#$>D@^R{Tt}Z57OrMh_OjnAU0<&I?!Kd5`|4NK z@xuq}*;7vk-;rVBV+Y%}Mrk++8~KgR3vZM?J0^9^vC%w-N5m1YunV~f6FH0@8G~nX zHI#&JPR5g_>ROy;;A3Fmr!qiX?QY7A-Qk^`6z%}vXBq75b2W#bhUi0wgrjT?458!T zYy;dK6-I~0c!%yPSIG1*x+o6~d+K}7)pO6EslLI%Obol_#PQmu{ zye7woYaUzS zr!y@zgPHxG2ii9&w2f15hr z0sS4Wc1*geYZ!1@;t*Z3p<;d08Uxw`b(#8bhqz7- zpl)}z_>DNJ>#hIW)y{d}rITJig0ztVNZaB83;WPIWZ*K-+45ighi#zRelXdC*r8;ova2-2-a~(Q* zq(;ZbYZW?N>)9?>S5DEGcZGvn%UjQAqdaY3*&%30WhRayOKG1V88*)C#d5&T0S*+A zGAX~v2u9X0+b#|sE-Wn}(>*nZJ-TpZvF4ZE-ms`U2kXSanfm*G|EFsQzhfKyvJ*huYP5{_6@JES76s( z`G(iko%h~b2k*SAruHAIe(Hw8CeZo|{dSh&<@!ophsT@L3+ny?zR%pbrMfb|Tyt|v z_5Aav>buWAR~If^txtXIPwOB4C;zwltIvO~$vq`^hagCmiHE9+U7rqIXV z{ddR$@A~*Rj9)v9zdHiG$H#_h40)fxwoOe;Aon9R9JsB13OsHyX&1T<*_loKl>TD@0ywDqdEezf^1m)42ee|>e! z)mA)MUgXpF*7eH{Tbp6P2UEpj7P&Oz+dK456Cep?-S=wQ>RbWSHAM~`rPLqt1o@!v3l;*nfN(d zTh{_h-*!BcXC2J&8_ok65VD=G?NrJlX~49D#=$^$=yPDgiOcCU0%36#nUr7rV}~dc z>BOTjxtxC-!9EOZe&s^a(%6b7*WPKHai)ANDW&MW<~udx%Td zhD8G8Hwei;+oo8cj!ZP3@g;^&SV28X0#JY=YU&<4oQH>$sm&T2yI!}R zJWwCNU#Zpky0*GdeP#m8_DyX4XJtoU&bE=ho<8L&#kC)<@ipxq%6`rIR*qRAk}`dA zp&h=Ya;AZfo;I1a&9F^5l#fgt>hp>&6RsW_KW#yMO|HW3)8_aN0O{B5ocfn-3t{4s z-a%Xkm0f|lWhshER}w9U|#aNFHJ%P;H4E%P&qw&r>- za=*eAe@MR!J+)6RpJ!ZNkB(1FPSx(|DSV^h>=33eyR_)%B9Ui&Ch-*)cWfLS_Ok*` zWV4UWo%7rUZXkVww#U<7M_K%=my^}pC5|kR1$T18ru4e2m{e-Mh;2#B;pY(S&rsa0 ze?U0#6BEX5( z(EM!M;p^39Hzw8=n<{K*?SrO(ondvQ)t zB!m5CDU)CHj3m%OIhW!Q5=w13eUnyd3zO-o8_dMGo-aDcu-TLr z9$QSDz@`58@IG+;3$i>L&vgEf!5r!x7t5q?lYaY7+J}){fjeMnuy1>(WHn{3E%gjqb>Ic^tnHIp|I>NPjH+v5zLX z5N6-1b*Aqcjfe-0!^Nb5-r!`L(f96lmNvTm7V?5Peg>`iK%V+p(wY3FH9n_26L;}Y zCw!NB10-w%xMPqyVt~9-cU#{OKbT@$CI`4R9c?@{pXO(4|C`MfU3H=1AOvkDxI{J$ z>&l1v+I*eJIk@;dp#LS`xo_>NP%3zAM@P)3(abVQZ`+aXY}t)xne9urtuu^m(~cRh z2Y+bPez55Y$~TB!QeP4WBsp6?P$*aLnk<9e4V>W@wDPv;5NWJPIC|WA&hml#b!{28 zMc@37|M(NNvfkoq!zI_QOt1z1@I*}=y}5Qx?Wv7kcOkr%PviP&I@@3wu;p}8iaRXW ze1Ia6Jy(eLTwg0c0sP1v`)jbU>noS5zx-VAR`seM@=_lNUCXCqp_`IxPrhwJ2bf(9 zl{SmYC(p#wpZXxJ!+UU*uHJ0{IuAOf>Lw`F?~9I<96lu-?^^r>@{%y#CFjVK zH|m@D`ykC4TyK&s@me3Jc(u6g&QP#(Lm+o=qJS!@dT#8;oAi^e5#2FH~J_^BVc zj@`?m*4PmT`0;tRjprB<@_{SV#w(Ox{4Y>+Kt8?!vS=ywfwk?9uUQw9D{1uATAifd zr!CO0-b7X;z4cDB5qk3A%s=C_tA2|0MfYraZm}{^jB+>;!2FEoqD{+{w7E7qxC4Rl zWs=X0hwAdKv%PPB^za7|xI=`5c?zNkb&vL~1v8VmVROPWcYK zKM`lM?*x98mdf zAbYs-jg&CZ%*owMIJ%vc!Lw7zSefa3rSS`VOa_z!76v7Tm;xCp^(~vp0*+H$G$Mxk zT^<{;!n`J)(gm1HIGYYYY!S8!u0Q+pzf{8`djrG@i=d>Jp61&DO}^ljXYogF2sG@% z`Lp%pcfVU7`Mp0(UXwU~uAT@!l-U8vpZbX($B`SX`Ng?fTv!Oc14C94HdxI-Icz0O z8K9+QO~%q!{)vRy*}F zc`0}i1vE1dPPl`W9?b#cKn|4$x5AAPmu3=~gmK6yrgE!5JE)A>(-7^RK?`IYC}5?R!8@Pr=2{O zwaKO2%QjxzCfZZ@5_wTa8eeoWuU~VEZXCvI>(SYfk%6pU_1lmC_&@zebqjoO zQ19}^^R>E2JmpS&!LdIB+1Iiar;wdFs2*D8*gkZOlB7J^x^x7_#z7ZwYAYVNB6G@@ zDrpCn`sQlpk>RoEjkNX~p;(TrXmBl}T)>b3`4o>>ca0e6n?AY3LrLu#ThOdF6t1wovfve!@x98mQb2*;@ zaR%u4@Nhl+$i4N_m)uvEE}p5ce&ZXNtT6`e)}M9`=qO18T}(St-049%QzRvg^=}?} zbR-Fbf~l{hBJ$B>uN}&Qu(JImG>xAW*@n-`BvIv!4R&{4v9m@V4T9(B=y;8dPvzd3 z^*uB&Os{@AdCc!YD@S%pIWfpOP)EPpq3Wz;WzF-MXP>XrXU>(|h`GDSi4#X_|Na?h zx~IPJ1+I?Vq1lUJq4#-$i22f z`PG& zYXHk=?%bJ7 z-f@Q&XSpw4xl|JqW3^{$tbX)|-c_%CQ|J4w%7rFHn*)t>Q=^;PcHr8ftF44ynHDIi z+W^fwN+~~;#S9uiBl##k@jLMW)F*lBnNAVY0jsT&ki@NnAQQcauZ-B~X2ZvcFy1TQ zb|9+v4u(7MzX81diOG7?PyC&{bH;VP(+iyW`T1Ig-e3Ft=j#(6`A}Ute=ZYfPMkbh z`wkw=XBaGhj_L(|!>1xd{;oCuqy)#3#XM9(^=gqIEN8bH|wRZa1^4qP0$ol&7 zTwOeUsxF;7UB~zBtA`%AryhFn{(9+yFRce&_VT**k%#NRJ$KjcJMOOOV>i|Ck;64~ z;BZ|ZA4ezfd&CvF&x~$-sSV_D30`k1m%QJiJuyFryh0m~Vf?{uJB)V?W<~kuKJ)qd zC%^iu^^uSNajjxUBj6s<6(sZys%u9=bq0e^|=+3;L=f_~A~(93LKn4+Awm zI#N>;qct`*Sfc~bU5!Pj?1V_Cpd1CR+o(IpKafHHq4>9U20QuJPWr6=Ak6^RHu=oX zXmwsI3@+LSV5=_iIg}&Y0N54d$Twx6&B7P|9@-4F$B2g{z)NjXvs?NZ!iubmr$F^T zDW7#Hu?(cdtt@%wzxrYx+H+xLH#Xj+ZXwSMWPoS<4uwjvc`uy}a$*BoZ~oxDzG?>% z`a%8n=%X*Ihwi(pPMv;^@D==w{?H-13D1+ic$l|1siWyEu`FUCsfI{AFHFf7Z>)7XflE7*T8MC0*u{RC^r_8|P0$gNA zCygg*5|k3bg2^`u0Z&3wfEH9ow=H%TgLcyoqU?GTMqsyTSk;J$%u6Ra_gN}-f8G~ zazd`{T6bgWAJFDt->`Fj4_sb5P%f+dPQC+FNzZd880&}k4%f)|Y(4q(^R>D{{Ri(g z`{Hf6JL;Ny%dFqzHTYBRE>B<3J7CLK(c>^~^Woq*Vd+>T)Vi!I-|UTC1e@f=V@PslK@iJl7l@KnN-8Gg8*qO)5dUzuXNms zn}i^yJ2-ZAz|v`-6`t+uTWKo~3{TW^&z`M6`oquEAob1c%t37YI5>&7`~(a*lF-!X zkdpv>w^a|^b5C|t^ZW2l3J4ViFMT^QoJuPgwIlu^K@kIGhv(!WzKN!Md6n+eIsSO( zptfap0)q=#_8!<%mo8nb^_{hdyrs*eB7ix$3&R79!P?kyi~64W;BWsG?F8}at4AJs zdA;pjKZwzxOwbq06}YLpQWt?saCG5_?SmfV$(_~ggzl^J=PuRX{PpMR%$etjJhI57i%z?FASW=0U;8G?vU)N{S9#J`5Wm7SU3 zU-Rop3O=qjv(Hdl)@(M#QgHPbNpH=n?xe3+(i#Cc)?>Zcw0F=e z6Wa-IJ}U1_0^K84K6ALspF8ahaJt)?RI@$lj)qR0)d$fw_=&Y%=%vpfzF9FJSO8TQ z(1CRHgJ%ZZNaIcz?j9o}<=qDM+es(3<>bQ(?cxP|R@-Hn$UQbvGrMQv*EoT69$$8C zd70;aeBcJ#&r%JKjFKGA;@Iq!riJIL{m>&KTFdw`>?F<+JYay-921G_+$>K zsizJenuqm`6T8gEQ~C7MWYWkos^7*JcYVx0_!k};SDS!-SWbW4I$YZ>uF}kQn>5mP zXCM>5#lB?q+%|p`L|H&i^#7bFH!w6-<0CV5;K=csI(Qg=bqbpAs;d{T)Z^cIqCWYl zPu6e#oBy?b`-2~>FFp2He10cLjN|L?o!whgljG1=pVCjHf;+f6$u2uVQI1S>CS1B? z_Z{9VCSJwgPeDX&Ipm3DNLvM2%cmbGXOmO=>7V`E6C5pLxa4t(nHTRs~{c{iTC zH@x-Znnpjzdd+&F|w#Bi;#fGWZ%gd|v#Q%@7{{Z(aJL@yi@8o<=)u|l1tCQ3%snu#_M34js5Fi6W z!bE$(W5E~)JhsPkZEQR^aA#~k*K_BYVa#A-Y;0d_g9$=_!9oaxP)?mwb$3-)I(6#g zoa(;sZ~b>w%a8B9&s|mf?EPPRg>QX%#r@3Fb^gW0w9CBhqz%fF?)VQA$4=Gg@w2ss z4$VX^30=ske$4^^{0C32^Ay4O0O0okU&V3$k>Q)Qxw2BPz4vgPzwfbXKal{cR4cdDP-vHZ%k^^u?F z%(L;%acX57bq74Av7N1N;O~?#?80n5X-GM-Il!QwpYp6vnk2t(@_c#VTfh432YAi` z0Fh_Gk}HP9m=3AQWN3GP3RtU5q|4N%>+Z(!P9<#PNXm`)mM`xu8jfiQF}~&R0DXB# zgUnC5@S3T}+rcLsxf7=%lo5?l`SBk`A;P^x5qFtWW`qbyFQdw?{B8^{^gJiCB{dj2 z!HF>11{XO{Z8}djkTOkYWJD$k4D19xqfKuE8tkN|-5R2Xla!6{s{p)o>ZC=E&`U?Q z31oZEy*0T#`(FInE+?i_r3qxm7N6gQW{glk{6i*q4Ku2oxa>@X$p%Jh`p}8`w(tBt z(z@zebt({{WU+oP%FzTIJgKqW$8@QvL2t=Mu|oZlH4}haFvfuN?UDH9`gKO z4bZ>Cfk7IF)rqnmbAbY27k=ZL@eB+k5(e?FfzvLt74K42IF+rEnoif*gxYe%T-UVL^owuew3Qme(O}Iq}oJ68#M6V2KZZ? zlbkxOjdxXYPDt)&1@qke^(Zd+Fm=F#Z7@q*5+G;VE%3C7AK+iUrfW=xZEAX=W+rFq zhyLuJs_*{(Ka+gx%U9~9tCwqz>%|u?)+1kdvOe+0pQ=xP`qT9b|LPa&pa1MXuYdPT z{{epcjkIL@b9e*l|og6kf_38#=0WdPuaM^2v zjUXtqet%GW*ugS{UkRpl(qsz<+ex4?S9iymLfg}B<-l)?$%yI!(bMWcfs2|RTn71} z>4&DLgi9E8(8Ql;bWl^;*on6jYdu?=bcT`NEgXEmRX6YV>gKQ2^wb1Ai_^5eR!fV^ zb!~1TJ3AZzl7}3~Dx4PwriDY9llRqi@__YuiMu$p@_;x=y&nR`=F3#sjLIq zj8el<7i^2cwslvd!|k{(gf?lTj#XDqOOyHuoxFDVnfxL_`1t3u15mb38B#YpDC$I%`19Cp7Z;p3 ze5A(E9n+IjbGJkJbG3tI;2JymWB0k{udar+ADrL;Es07e2He+tkCK zun;1jfv3giH~FKh3F2NF_}*3YuI9}NB-k2{D0Fa}#5FlNi5_zTvQ6A?IN8tDPHS20 zP5Q5+xAtlo{k;i~sl$a=xHsiJ@aX`eN93-N))NAR$4NmQBxS?)*qnBZk?m;fjbG@^GjP=@-@sgyb`3f0nOIZfx`H}P$jyBIy?Dkp=V3c0TwEM*#_<7 zw5Jy#mtXhI@2QC+_Yk)h{ov03<<-Snnp>*Re+HZW@sC22)jDehWD| z7TnI=xKYo%@M3-GiD&A@iui|p$=;7396d6V3Gtu!TYsZwP9LvbV0W@NgQCc?c7E#6 z;ky69hw7LU{4+)>*61)GhGi@R&ruGZ&2^||^d|LA|K|M~C#qx$WSe56+4oi3D(>25MBs#}$`cF=brH#QS`4laEs!9^6;O> z^tEEQ2>N6Gi!Jme!P8J&a9~ikTc5hW^`D?W_2m)=@~rU4%ekxIC=vR&c1bCSeK4uA z0RO(c41IJb|MYv*-vS7<-0-oHJ^9FxqAAn9z##cueDubLAF8*#^^Ntyi!K1z%!Gjh z@v-xymyf+7QoU!}Dj0d@RmyFD0+$!RT3qgCZa9elO=x*ZE4)E~Xpkb2Q}wNq+Li$tg=$e&Yt zhJ{BSpmb!!%)VP%TCL|VyiiX(^>jUZ{(L7dWO^cI>Em@G4K zJ=vkrnW&dPBBw2VYpeGj9FA*n*Dq^#^S5`6C;8C@>JN-GDZ0u)n^Pv2ccXLf^7Y>5 zk_W6iX?kd}-k=Xi)$`P!g~I3@ckGR!Bh90~oKL(#KNmW=n=G0I+It#-19s}bq2WVy z9{aKe&$jUnWC*y`59t#sr=*u3vjb7)2$g#L(vv&yhMb&9oh_dF8j~cw!MK**IDG~e z0jghH8S99UMfi+dTgv`X{1V0;T3qz~m{dl!xQ?SAR3j9FP9zOknk zd0e+AUs;~RQrfdDLbSgGZJqS6B|t$u`jtM3pJn>xU;e!sof@slnG-cWafm(u`X2i! zPQ+=9ral(;oAj?d3F*OCy{cv=#%pbHMJb3cGz2b1QRGR+;LXbe*zO+MO@1&5_ZG=T zz7|gz>J0Tgmf8~l{D$_-p^3Wd6=&=E!qxJGK~0_fsSoHTAzjd%6te5Xa9z2yQor)6 zkHAYsfBaB=!@It*zVhwwfDTkk+w|%6q5b5C8y3Wl{Q!nQdB5@Q+wl4+yoL_w!mWXN zf&S^IKJlqcDi4EC#eHpk5x7RN%l-A;-}4>y$_HLkTMIAI@z|g(;H<#Ez9awi)!PEn z?y^5ay@E@Xu(zBO{*jj(YC_E1_bDrb?xp}o;>!~yTVGX>8{V2-B~6%8esZj>acX1_ zK8ekxTqiDe@O#6X+E{+4e@UD?=)&nye2sqebxya^pQe9cKTzE=;!*nib3)$|Ok`v$;`A3ro=jE>O1|7iUjS zj-?MQN?aJPEpW2Zan%uE6<&RfEUs~(J7qed??e$*#(!~Q%E|vN-n(dQaM)w^T|BfC z`VY|$lsP?_Pa=3)q~iekhw+zGRN|60W)X#QNd7F~g<(hvv`vq+F)Ij5c}|2m?h)Ne z+tW`6pRps38Ou;%0`;Tz$q8BgHp?5%LMHXP@ZT)-;h>+v!$-%L+yJi2mtLyRe)5m& z4}Sl{_4~i~`}MJpeYBo=`YG6Czm6U|QD@Jbty3pXW)a;4H1%Xe$G_F#etuzL(Q(hU zT3hqegB!VTZ1`!^?KYkX9NW;b>lfKy+TC(u-9DD5mDvC4Emly^&;WT;dK#ivfwN8G zb~eS!ckS-)W{<_`%j3U1Br9oJe@vTfUmpnpCVj!26oX!LQpx_FzI>ZJrLU2(TsL>Y z&%ED37ISo+8FX{*LKmF8TV3WK#RNX zJYBDS^#kB{nKa-^#JcGL+KZXUE&Ob(^@@Y`Oiih*)oA*>5U<>0`*|0CU*;j`D~RWA zg1Xsr^S3g*$7Yk$dgD8h#zi5WT?#x_L|f9Q&2K@ZjL?VQe)1*hTiW=1JsqUZkxtq% z$Gp>;jQt8ace3~?4s~&hZSjE;*lee8If+cczD&7+OPGW{IOi$$py4b&hQ$j5?aN{j z$}znaWsHJ*@>!2>$Zwmq-+=*MY-5>H#&p^ReQI%Oe9N1*igPQpJb6Xc9Xg1gUY))d zZ^4~uZLL|_L%R!WdmZ`>NsC4Y{|TS{^r2B~6}s-ok;64UJ&AtC4r2fE84N0q7*S~l zU#{M7qk?;J3qK1-`Yqxt>8y`93(5QneP;ZE^{s>Y=*J$lVBk{kHn<%>xH&pr{rC_Q zC(cyg)UjGa$M#_#GIs`?V2Dc52m1X1M;Rh+2ki%XNBehbec@W&cYLVsJ2qO|t5>Uk ze-%v9=dew{rY+mm&p}_}B}T9NY4wbGAw#yq4G0aLHj;vEmws^C-OA{K;75i5sPPT( z7~gAGqmOaC<1P*0axP*?D-VT_z_%&L*e{W8R5P6Ryy_I%X@wcPj=|{ z(zj>o(MN)Z6~xd_+*&htya9eYzkp3~EX;|0((Ui_EHRz%4_(C*@h;NAC&@ASJoA@l z%h7L@pXJxaAF7YYg>80h%-V=cUlAU6oK=x_Ou;c@KO?nEl`bf1ew22hZ3c0?k6U}r z0)PYbv*kI@hOb9H<=ryfeXnho|MV}EOY6%RKwwno_$+O_t9EG#>4jPBnb-c6|h{GSEhzYfoXK2HQVjv@8TNO2rGoRS8fU z&!%)h&Z9zbavZ#q802ZI@yP0xvRWAu)cTFLj%K`xA|j3~MDyMWRs=FM04@}U_>Tes z*1$v=wm<23uVbY0H+zoY!+@jm`7I8u%d*X9>EcKw=Za5o5x5(?`=pVvWWMFKE%fPV zz4mo)sfXYA4ik`;yi}6TARH9inX+22Z2ttS&U(|E-c(nvT&<<0MFdMHqYb25YLuBY&>^+HRO7P_TaW(Ivx7v7 zMw;Fq=uM-1CtZ`K;V?L+y!-~A9Zxk>kyoWmo4aB)qW<;U!Z z>Kb(n)YQ~WEibLs#`>nMPyU^)o!WLn2W8WcjAgeAYf z8y_}>;Uup0D|4=hZIimRP31#f@>Tc`o>T_pC65-%#GUA;otLg&M~Q;GhTev2=p?*w zYWUfsT|}_|I)3$NIM7gs4^7wUJ5JO;`bR%ShiJ04Hx}T{&3ftL#rhjR{x|Di{p){S zANlYf)Mq~P+4|h)K2uLT{#-4utk&YfN?pCOP+$Dq=j!5x=j*%P`@R~SI8pl>H)xN8 z4dU7XI01EWgb+w)WkGosmL9!0n-7_t~vnAN^fY6Ry!WWIZ%FQ+J#^Sr;x{ zq!a26AG*ABm@V^`jqBMVG-}>Myz(Ruao_-vN1YE09@OEPp_-`ideyym*I)m!zf==+ zbnTqFbJkCL455GJXUh}nbV?{kS+PFbEe_>ZojW^=;@!6*A`>Fqr6bAmTlu%`7!Kl; z?R6d2R+^@66qpueod2>)O@%q+Rsb?zQ!F z#^oRF>K^%ZOvHf`JMxnJwd427=}@}UcyoIdXMH_&W+l9^;uOY#hKDz|-C>9Rw4=j2 zzwPbufqrv-VthISf%o0}s(S0&-d0aP_e_1^i;w1*MLQS{sHoE%xbZ{_+cGuoVutlh zwrXBtVx*C}K&rt5;Whr)$!e$s*YbvRMPI4^T_F#*LsH&?+)SXulXlMS#QQut3Uw#P zfpe1k7%;Ti54vqTF$6;WWm#o>{quK-qWBTgK<*S)|R(3ow;LbZv z))=Z}ePe~+l!o1L;L1}7Hr7|_$tNDGyY4z&U;p**q!GjD&y}>tqcS~7qS>8o>UH5C zaM%XkTemofT$KLCw`4MS((h>s4t8)V!~Wh7Phq+8Pm+0AcVp{e<$D|F(~ z;7!zKVWtWU9B7|?bWzkJk3Ufh>s#o_q3r(egC3XX=W0!w!Ye1vD*G?i`u0kB?C

k^s!T%<8d3>229V_+wrPA3B@jiBU*In7r%SQMufAz4t{Wn+J zZXpM{jN8HfdjxDw$jHDZGK5Xf;sM&0$$of9{_$?-BmR=~u~Gc@Pri9h`gD?{jW5I% zPYu^z>JoXHcwm(edK{q=0hN~7eycatbz(C*$amtK5Fdgx4$$fc$xBq9N123IWBWXP zLfWHSDX$F#*>>9r{MOU2?4ZZdBZ&`g97Jn_gDxacFJ#B&=1zPmJ94&V3!ZrD{Q3Ih zN1m+f%WJix4^J2zR<9|!19j%~iF)AfGxbA1{6oCxug!%8+MFe9dJ^I5)mkO+Z{S|3_tB~9XWD@=fgEM zJy{cz)0z0!mU!tK1!l@u2VjwTj^f1+9?jtVApT=?BLF!uC?`Q8cnB>6-TcmN76K3{ zPx=q;0%sJ>KmYD&x8XVaOn!1eeXsq`iB9H2=35^M+;X?S=HP<-9#szxx_+d5t`3oR zb20)jWuOi@j!%M)@x&l^skH${`Cs|rB46alGDR@?QQ6#YV*5NoRh_V3Z+hc{^|rUY zxz3+|4mtM|0NMuO0^Z1DyIR@mI3N9^59eLjfJxl;F8hhrX|}xQ<{ixgGR3Rq7%N=z zh<;-$+c+mni0Hj1X8(fUvlQIZtGptUdjg#k+U%LA$We}AN;0l^a z&#o;d=-OkN=uKyydTr1T6>j?^JNxV8R3B(t$OCVxL)2xlv*-o+dU9$!dgCJ>`Mp}& z&^}u;rNGziJLKAAaRI!%Y=gndi!>DmcRT`*dJ}pANPHS~oIbV-*RNf@S{E-~;CiVp zUV5pPmi39&$X7WDVD|8#I(_zZ9X)y+eRd?Y-A3odzkzqNNZ5sw@T!X##BuZqwW(7a z3=$8@&oDCMfRzJF=|@SA;N1pz_fcLjy4P4Do80je+iRO?V&g4w3;yFjQjWOKCoD)W zT@1#he(<10ZiC9?Onl>y;G4^LSpYy^qm5Beud+|sE_%1F0q|~rAhb?d)?rg9tM}Ww ztiILdoiZ#dX~Bekx%qS9p&8$U)%GbSU0y@vTNLDlu!|VlOr4o*hHhCdcQ^SifxTi9d5T?D%hZQZGhe&c;$gtzumkK-8F0&K!F=U>3j-KC77 zz#(r6n~N2jOo)F6j+-8k)XGzAIcdEbl$4tvDX!83Qsq=YWga=(ck1T zvA%@AwS)40>kFEeKE_Vl)#Svh8wH$5RTnyu=~#vSiUYNtMmag|!Y@xXz{i9YUF_=) z{h{bMCrac`{ZRF#lLgp5{6ijl^Da&%j$@a%>pyh{sz`%9C=D;GMY8kVme7 zC^Lnf;icF`=r}f954`4;wX(T_Ke0(x`9~ViMv75KIS#P4=|sp#U3g)+KKh4G)a=ni zHHI(q=l{FEPv}uSYkp4>=hNtdv%~}X%}zS7JRn0svk92k3RBPef*C; z4SvA8wih3L4?TJG_(UB$I$D46hkvjp4o%kn+7;lI{+qE+Z88ZuJK?WShO@o5>q2b@ z>UT2^py;Wsl?x~T!%yl${Xc#w7vUXX^`A0m3g5N&%9plLZuOE6xvO({&VPv`J!x~w z&NGOUPnzjWKOlqZ=uA&xbtk;L=aq|%O;6Bt(+#?L3tj}@ebC);H(jGP$!eXRx`w=h zPy5^E$pUF`<_45BR=>plvXDRv^>uddPmGnHySp(5jj8AG;aPZn6g<*Dotgm*C+fiz zFzSD*v*kN?!1~!wCtA0`v5V}TY;(e2T_WCWhrWeyx=3Lid*+eB$`THA%5k#ah0*G- zEomjH(J?OQ+@jBHo7B7VxC_Wz`o_39B}`wKpBb<7ia@eDZKs8RH)6a6_Miuw;7J%TLL>qB?0AePkHNAaXw>SsTjr9pvHdH`g7tlvmgdy_LU54A`PT6@XtSJTQ)hW1ZC9rYeUt-40`s$9cbz+1uYTYa zwYs`QF>RdI@~lX;Dc`jAw^xn7TDbKn59UF;#&agH@`8B$Z~9W=UR%)cqyCY@Tl!SV z`?SHjY^w&s5}=EIMQjv2*y=Z}Wm}2Qy)tBW%fNn80#TGfS^5{Ti##h?mM<@%Xrs2( zhsvdeI9?*`mT!BqXwkCB6MS1f={i^~&xKIZwJk2Sd|rh%$O&~s@9`Wv)I(b4e0rhb zmoiDGEtkh^QhWfOog|G64uC|_K{@5UR9YyOmFQk~7@!LdE*)cW+Tj=)cS zwov@&OAg_4X6(>daNy@koDa+Rj0?P*Aw;K+@htts!2x~9N$lWFbo6-59y^9jZ~YwW z@zg-`<)kai<)RSLm}fB>wB}E{JI*Ulv_!E+^AGC|m~8=o6T46e8aQ#RyvlpT=1q9?HG_K2N=8`JG^X`6!pzmIhwZ)0zatJU+JHx8Dh!zj=%f zAn+_tHU4V#1XiOWZ^Gjx|4?pE*SP7*d6&pw|5~0Mm$}(zzl`TT{6v}4_uPHH>4^Z2 zr%{G7nFaRnx3ISH9%W6Zz+B36n}c-YwfeG+f$J;jPl0pnC^zhyn`|A2#N|MjZpYuE z4B}i!Vjt7@Sp+~D`FPibr<5-q+k6=10f^`q6wB97Hl1TS#%Ut0Q=6^&lC%2z+Ga0l zpNV;WT6Iyzmz>LK@~XYE47nku7PzDHh~MT?Us`t-FvPYu8)uv4wa`;y#)OyBQ=B+wax-y{(S-2Ij+sqt=XMBW7M3c=zWzL|pt6M^WFfM+j8kL@f zs}xNW2e$_^Ka(af3I{^u;6Yo(-wdt-ZisP79vFpZnXZ)~Ud4G!7lLr5%s|!!%kS#RxCdj2Bkd9_*Mho0q0FQ!aD*vjayYJxn9mSvhzy3vyjZbHQ&|{oj6?^3HVLCmB z>wEs>`|He^vl%#9U0;sFo^N?ljuRzKu}RNXc5v>$RER`aX|Hmz2>a63d>WmXPp$=T4b8#2H16m;H9B7R-~Wnw@3(&& zoubLwSieF0Z`9xWsh_G#7hb9pCy&>iXYQ$!r%sedyJ<{U!TscvtE=ghxr6QSOuhg8 zf3iji1Drc~ zC=7P!q+^bblWqWShi-MnUFNHaS6^ACFr-{~*0Rm(z?7O)j5yHJ>bGMBKWUUH&jAVb zaigWWwK*0BT8BE4Hw9IEG~U;M*}Sfe2JzB`mp>blE@d(4|338K5XTaQKf zK2CyhF47^=tjaSb1!e~>#f%mpgYe{04@o!W%d*vHmN$dmy8o50sAtZf2X}t=Z!q}G zt|td~EYP|sQ=MR*z)hV_h#i|fQX}I7HGyOL-f#KV`ZGWLXMuS;M>ei+`2FkE3>>en zF2^3an+(E6=G+{9|V_om%CV_rjeM8ek|VK03RjV-dU%-(;`xT3dzci6Rr( znIPs?(J>qA8_`!Tl8B?)Kb)if=daJ_JJ>foePnH;=B|1C|5jiSN9GxyZ~`8_ga3xd zCJ7C#j3({OBQYvlZHb^73;HF0k;^5}*9!Cu(|jy595d zchjhw`80zLA;ocbg{uRy&2D688GNkcsgp`;5-*=9CAotik5W{&m1P>jfAY}|C7uku zd{#l50LZq{xR#FcoWzb@)Apng+iv~hTG^yF`6K0VC;lc5@u1G~KC-np2wkVigUyZI zl2^>rt~3t5*AChV5i$qOdZnTN8~>TE>rn4*0n_$wUA-}1pZ?+_wS{W!cTwN*6SabT zU0XyR;5AQ2n3z43-Jb5o@UuU&$nxCvYq9%B4j-!be%pKM&2M>AYO5U=Tu_I;v33|- z`yJJTJr@9Mad*dD`_}&svgDEfo2%ZpYI_4806gvM&?~#-m8LR920+j;E@DW_Y>y43 zJ%*-Z!KXCM@5bxh_b-!}-v)iFrXPtiKj>;fS~jftUR{wK)SF|tDcg9N(VK2yXa$a_ zF!jf8T+7!N z>Rxp0>9c3S@$u?IhioGQIXam-`Ht6SP7RUl$YHyr`$b=to zI1zOdzFff$dOAst58T|YEAtEW#V zU0(~W(SaMD_Ou(4 z6hF-c2>Z3ZxmP!qR?)dP>IT=9t2gSgN1v?U{;iMJ&;0Dq*Z=xY{?Gcw-~4Euzc`0C zdOM8Keg`TXQW;h5ppOzg1l;a^8G&zI)HOXhS~Jik5O~L5 zpY*8z@#qylapC3hw)&q(jvlQ;hmL2mJv;oVE4+r>>tn*{tpg2TdC4aZ(!`fUcK6_K zKU36vPwvtXI&48>7s7eew1d3c`X<1izLO1<#rWrQI6Nu`tprb4wk+r(Xh5M%Y$md% zf12Nse{;5bjw2-gmK@2l{87uvH~#9<`ZAJ0ncg!n4*Z@b(|nE=)bE||WgkmlOxcQ0 zPW$qkGVZ(o-uj9+zM;-Pe?Ia08*S$u5YT32UZh2q^3QsS3#b4pOaL#04L(Cp_Ike^ zM7kyG<{`vSgmflJ-Rg*>Z}rI! z-S2kq!6+`(JHXinqj<;Fr4^Poaprw+0aZ}@LQk9 zZwKF4yk0N8aJhEL>mr?e7ZkXiEJA5;ce2a&HrVtR#fkiD+-*FA31L9U{FIMDQ*E8? zG$c>uxL$K}E{7<5DF`4i56{U4MTDWmr(r4hBqO|+u zwg1nJDtwk+@_T+Eb6)C6eH&m_52Q{7jJvD@tiqi35<#);eagH2F71-9gw?#($)D!) z`@V~R`9++tdgsa_%ic5pW1DWbqg_O`yLDMIc^Y5#(wSatbmg{G!H+!X;_DIlFTS0- zdz($tf0hp%l(WC-f=zR$Pi^tEYhPRrj-kIV;upCC7#~Mu6V%t%W3bIsow9;g8M4>5 z_Re%R#MHLSOgxdsA6=UhTJZ$awKf~s(bCtqYL5RPzwno~I%zF_ec$vsIuO0>7(nv_ z>}#}fkP+;){5|dw<=Aal13BC#-41l2jQD7PnFRpy9{DncsGdX~DO+E4czlw6&_;du z_y4d~*66#B&(`7D6XbEymVOY0D6jN-se}FkjaXk@!>8M*yU(1hyUv`hm8B);yLgW- zwo>ZI0K7IL4ZR&gVVRqB6kUa14^zw7`gw0ju087Sp(uV^y zVF$-zPCM9Lrp?3@^|d^-C4YUV>~V|FKslDrC6BAa_34z0ur2SUgNp{E|IwGGck)^P z*@+GWSl_>=J)`}Hnt zl5brQVE@(yUd_fyp8i^0S?Tpz^=q&r{8PS%2gW1g8^EyvobH;n4kyL@Jb?Px&rUg} z;3qhguXd`2Wjl#~P$Q#bIqg85;aJZ${TIu0a!#MY#SQA9rsK8gv_9=@G;u<_+u<9? zdy)FajTMSk0heWgreU9clemgDAuLH)l`+J$VnaC&TFs!kj~TSGI4Yx`iZ zKKsWXuV4G+|5zXS$cO8R#~%&OT@hrcwWi z+oT6aj(28U5T1`*S^J>AG&#Y0PBj7Hwo^E}b{KffAK|hzqccYY9ULEs?2#n?n{oai zv^F;1HvvGKeLWiz zV!a*znBU*Zioexoezwx`Hqoh$M^af909cQ{w;R!1D6Y-0pEz-Nk~_MR`o*X1kyqqX zeuPHC?=yv^tv*VhHrM5Ep=@|`7rAjRc5;q!V0a{p`DdnP>w_QqAU0MUf(KG!EnKH380AFw%D3hl5EA1|ub#6#QoTFjMf%v*p1;96 zL@|9QeF#5c+%^w5fsZ}rHzpY#mXEb%@PrfRXD59eI7EDA2edP67*z%s|=HE2DTZU!IBUvEO^Mj#_5aFFD zAojU2QwugiKy9braY*Y0&9uJcCp68K;~L`lDabl}yylV^A~aZ&+iv2RIS?#D5J(*m zvYIlGJ|R6B#9Oa3?;qYpa8TO*b`|N!)NsAx!LLp^=DCGm?#-DO z7s=X^BpK;cVB9nKhmL>z(PzvSgK_B4p$zaTU>+5E^5n@ndzQ`;A)P`Hro)}V1Nkd3 zcXc;h$i89W;6=Q0iI6Cvmt9?4;=9nw=t9>f)mcWd2_4^eH1qE2a(AZpd29(vY!qWT zJvCahN2d9W(!hCg*X8o!Ld{=^$3+_e0RQw!L_t)agEw|FpgKK0oe3ZZQ8lRkYqZy4 z^_$r%;YZ={o@1dfnm)G)zIII1<{BLr1R^{GPi)}yOMh*Z&WFm$Z;?3Y-}tI2EGJL$ zEMH^*-ST>a6O`Yj4WA+h?YI04FCY(DrK&tcCYr1?WqXr%+qDi{n;WiV_B()WAT*QJ z+I!3my$z*e?QzP{P zXI2!VczAYIn$C`}bZNpr{zZ;-Je1L{EJNgw#zgPDEGT!W8%L<&K%Uky0r$2TB;;E% z;MIX4TfZ!F|H%NZ<> zj z$V4=bntCTN#v!MC%F9md`6YlEc4f)+6*##$8 zqkD0Vw>MYNm0Q3}YX>%K0)GAGZ+u7n*kAqc%kQ$EKmR=Uom#rSSaVnAkoWbTQ!Jic zB;=}gJGkyDa;FDc$fNC@Fv);N(+zFqBeIEey$h|haW3ex4RK)LH`6$1q%1p;*+0}@ zqhlk%@%Dzt`LBka?Fdt8gFQwkZLF+hVbkJ`rCM2B&513mEV=wvJl-mduSS` zf4%NJbG&}^2fwSn?j3Kdb@cF!xf?Yty2aT2AB&VR3** zaWRb{M^ge{IzaqSoIVXa<__j=Ai+L~-#u3kIw*%`=DJz;J@D!}aq4U>E^pNaym7N{ zs9ZO)w!UA>tNYMc-7#7RPRKfQk^j~{oC`T(S$W}-g~`oMe8#>rDZ z7opx#PFnwnwkbdGxnkzTt!bpQcGFJ+$OqaZn`(PxF(<^D9%=k;5H~qtf#33oli?sP z@n&kg+4G6C>7)e`e|;J4>~P(BmR>nV6=H>!K!p>dWcXujS?COqg$=6E^S-JSo84|5+I8V4!k~zK@--Dba^oqGofN-O(l?LEyLP zycXnb&7}8^&l+AD9uk6|js^|CFTc!(Wr#jE*s)TM4L$ReywVfOM>lAf*0P|1&hzw< zdhji8tvYm$l*sln?L@Z<=B4MJsgM8uZ`ZYp7i#wCkvewrL><55jv5#r>(K~UKm>N{G`o;A=r5gzY3nbv0BeO`fUBP z|MlnU&;F?&sc-*p{&M~JKmO_Z_22(wU0HJgB|s+F-r*k2oqq3w&$r3+lG*0&WdQG)i;?{?=iIOvppXVC(ib*t;V(|kKe1mPAO2_}L1FPV_V4gLHS~)`bYMDE!VHJf}}VJ)v3eN}rfAyAufWoH`o+t9v^?CYNP< zS(i41yP=c8R_5({ceO>kJ$`xFr5#H>`Tg?u#@|lYDrq$A>XI+$UG?HfhkSWScM!-w zWsk&u+uA;LNy_i}CXL7Sd7I77NfD&;w|V-JyY!!cQF|U80th+((W1?l7QO)1yYf9>^Kz-15e;*PvFA2yL{(e-P_{SMBo;gUB;)9 z-+}V0x@kq<*3Ak{ zR_sr!i|i+g68+=KsbuP6`!Q|t_W(M@&s<6~-!+`I;Io%Lo3a`FLbIe(kiln8`0<;( zsYE=(ALx7WC(i_r{qO{ren!9k1-_yB)AF?Sp1k4YYGA@Qv`$K;toC^)FPcf)&5yld%%u^Mup2jb)H{Gh>{?_|v92a^zUO>CInL=k9-f4dL^xEG^eB{p(+-pZjP3 z9DDbBvB9gW>vj3^Rczx9c$>t>$0x=iav}c>(qFi6p+5Jy&(}vj{PFq%a&i9hLalD0 zr_ilC$d$T6T<4Q|HC&tEV`JU1S8P8H`ws8-X^%M4Ws2KGplM)>3qELeMV52?qm7$5 z=OF*cAIfjDhh(Cx*g>8xD-&>*qs&p3V?|krO@+4?q4Ub;*qX-oUMU}bbKK1mh)r65 z`DoT2xCC@r-blgTodl*4EH49?A^HWuM_UR1DY}5_7?t&t#=JiV3qQjf*H>jUuFTrhFblLb% z*bLIuxGvs0x{$dT_^{cE?vkGyPv7$8^*hCyxPadec8P;uTY2acV2*w0;lJgj-IOhT za;gni=uC=EW*Y9OAK3lujBR&3)z7#XveA;O;TKpaAfG$|zxtRt6+@UP+u#^$7t~F@ zGvAlRQ%8dj+yk47E2Wcq$8w~VClFhyTmCk9m@)TMsW!j3y7r_I%tB3C3C$Iv$<3rgkP1&~D zalt#!Ij0>%aLXH0hv3nzX@TS zH_e!pW!PSIPReQcX_z6%1&{0iux4{~$Bj)^u}c|i}MzZ`|Jyu|f%lkxF)(O2R{ z+Q_4hSsMh;MFZkb+1P{U)#q*=aI;9RZ5La^+wiMCM7!Fe0OPWNjH(ZIpo$lL-CcZV z{ZM!L?~s;au&w4-Cjg!00gL=9ZjIZseBZ$vUVACS@iE6moJ+HR>e#$`2!7ffz<*GW z>O&6mTl-Ft$-8_r8ml3FIw>Rm%8EAQdbbE6mVx z@{7v`Hl^x&|6~FjdB|(>Q7~y*`I+n_DwC5GZl0`K7XhS=kqGmc)!#6w$RHvsFl`&m zo{2lk^N6B!P8EL26zM@c`J1yXvTP7A$m!O_CB2VIuVl3X~ zi7V^ppMx8IgX`b@(l6E*KL5o!dh}=p4wqL~<20W*cDx>Z&4cwdU;UM27{vK|G2d?T zh%IR)9-1T2;0EgamVV(mLk9EnkW}HnUixYNpr$6q>iF?Pb@t5ZI(6(=9Xd2qr%s)$QH<>I&hm9|;EFR0 z0M3wx!`1o15wK{NbT`PqU1&1uQI~?t8_#`qE>Mkk7Pq$l*a(Rd#TAcM?}K<4&)! zvEe#4d8oeg+uvOu_;Vkq3r{~^7cM>zsOp+R_E#H>IDzh3RK~+6!C@Onls8tJ_tu>H^6b6Un>lAD`WDj9S3BCIG{~>b-)0+R;wg} z;Aluj`;a?%NqF6r?`IV}65CxH%AGuvQzFRe&K2>a3`7>t3z<~ox$#H_PJ+kV|Bg0>6^Zh^aHhoJ`S(wgV;9y zrE_u9fmiTR?(lFI+F*X)^k{M?5NsXzF+PKG*dEKsk>$Q4Ej=Q!1QuW@Z5MIovBR(1 zAkD?Coo0_c^@vTesUKoc4+htwpWkg&EBVMvq0|B#IL?E8qcu7*L-TcRX@w56`T$7C z#2q@2QQ31KBeWqMFDVy&BwOiIa??Iq^7`5ubU3INFI}p~pL?!`#-?g`a{> zC6f2o<}P+a9g6+GK7ZXlbd8PEX3BH7j|+}o{mOglFZ|eFwovL{&msXQ5VDXcvdWdu zPpoC3jef({=1RHq-|zc7xa}ebJIK*%@qLA`={tR6A-BC&+G>Dx zJU9j~$?wo$2pN$k@b(?&?y6%)j@P+6&eV_nmA_Dfeu@A+?areW^tZmOu#Ak1*WmCZ z{O+e+OaeTcd$qp0Rci|?HGgffE?v8tox#`VJvC}MyPZDusn6Gc_@&>dpZ@8etAFxO z{#pI(FaM|d_?Mont1Hd-Q%1yuR>=u{@7fo=4zK7Zu5s`_2G5>7ajZ@qIZ}rwr|JlP zsVAcxKYq9#e(1q^+gsjLuX^Bq+Hw|u{Af*&jnyQ6tNF7t4gb*6*6=%B{B`5TLapPY z`ssxB?OF%y<*jz~q&%s8a(9Dx8yI%L*nwI24PNnhUOy%C{0kTB+4C>f)6ZS3i`QxY z!eZUNAsNW2)Xk=r()q(dwisMEJGam$paUm zeDznnsqVY?-g@cMi}0ED0Q@OWz%I`UziKV8MAmv2bu@VLkXs3qb zuX}mP!T+v5oLBtmHMjldM|bzBrArKO3X@Od2cN!y!b#iE(@QVBT%lR-x9_!YEofhp zf%rC2)^u2-Pflh+c3Z6k;>~Xw%X5)S`rTZ6@dL=qv;BM1I$&a5>LVvGUjN27*5^O_ ziFVZOxRZ7+$^b_E19vHP3n`p@6z}HI*12%g#)&#zT?H*HLa-@bRw)|=A4IsQZV=wE zjMfKo!I%>(tN3{vwYapBPx{=rF`vGRC#YG6zKu5Du^6%ADM=nLE|z5p`*C6{gWCEj zJsbxy!FBT;q^+0a7cQEa{Ou0DNaSEr#$8ejcX_7wI}J2raH7v*nKlC(;e)*CJ-}GK zV*%h>StXD7%UwF9EE9o;+-;4NZ@N}o8I$p+1Pg8Dr!FpI0vC{E5GzDzrIc_WpU9Q z*x#2=eW%T`&;$F|z$NrfqHb;-)cF@L@~*#zd&h1IBT^-LlPCQ=fA@Ia%@LixN@9ZJ zJ(eMO?Ol_NoSyvQ=Vknj zyL9)n8#7bmIT~HwS3VA~)BC&F4D52lHFdy$@_oiD@U7G$Xud2m_ft52u5KH9{PdIf z2ai2l{Ueig>eM-KYk#4^;Xi#27fHH<*U7AH@IQ;ad+3#~%8392z-*s&5MMWg?)=X{ zfBgK0D>6DO|tC8}EJ9(peVJZWjCyQy!ghmf7YArL)qP{&Tp*398?a6-S*@9ewq z8(&ZdG<{1Dvbw%Unq&3<`uD$$Ute|Vz0c!PQzFJv3V1`~GjKJMTVQeLJhQ zy|z-LC@npEeNW5232j^;?PQ4FeHK~)OKe;}evV{=cB07oqibo4x=3HC@r3x3r}oY-@f?=gXy0-Vdg@v_=dpXA~nmk-)=wIPhMpK z5VYPQ{pvb>3-bC2M`@zaj@zG{9EaZP)JNZiHrjV|Y$l)M#K%w%0K-5$zpVvU^{+AjUZOMPcS3Y%@MdMC=Uvm4`X+7n zf7|HLLXN%f4}~AxJ0)5f9>i*9@WPpgZ~)=R>#3E?Gt?HX85?-WYdblB;~G^ z;Wu^KM|I)SxH3Tg2VQX>{^>b*))N5G4b)+J+o`@b$rC>8|GJppF_`!P)PE4)iuPG2&sLMg5vfiX#hNg*#XGPgaG{f-IaQ?% zE+iEOec2q1AAB`FXrGvnFYJR$CkhTe+GKr77kp6yTwQ%_gPTG8cq5NE7I&0uea+WW z=aGkET{IBen+)nmWZrg4AA(-B8#nB$IW`)7tS+ZaWg&idOB&eP)##mmA^28)Jw4Ac zFY!4(HJUtD>1V2hNtBsne&TGq>FsK_kSC{|s520}O7qh&+imFPbSHZA;qC zANeas$J=(=9o;~hZS>J7`t^@L^Z9!G$tyklTa~9sQpVT>b`Ja;JynP9IafPqUPNPR51EMNF-i#Qg^jYi_0H&WSO4F};7(tzt|WN`s3!7Iusx(R-7w6g40X2)~w z>XnhJh46xig^ocmd5PlE9>`y^jE{o#wz0bsDQePc}t|#7q zA|KA9Hf^|bi{ZrHyQJhDap;Y<5Wu@WqFCfD@wDicd^q;w;y7`bjc!4Nb0F$nu9Tw= zG`-_K#wic9c+at^fyjYJ_U}@UyziKW_B7>FUZVpvk@=(#acwcPxFK3R`LFjo@@YCz z3pmaRV}W^YvZcS{*s61e4KB-Xe40)0!bVZ%S9iJqAPW}IU$!R;htV@+k6cBDgjXI3 z5gUxckW-tK8=ujg&iSS9GSGK<;63lS;{$+tra5a}C$HCF(A@>!cZe`pS(rxRha4?r zH5~tVnfG0K>Q19&kiuHa*D``E zsYi%Xv9p%w21|^#ZAIOwOyaP{Bd`tyl4lJ}0O-B90MiHn`CB}XrcYZ2A(mKZIP#q# zt2H#l#LLol#>JNZ!Z=(iyOhtRqLTp(ZSY2%h%b^#FfQvfL=loT{HCBOFbG&yI3`KM zl#G#QV6nUgvk8DTOenCxhhp3Wm)kqQK*8(Vd-ZMK@qKmr+(Txk4)KFf=r{sU+5*yD zNo{?-Ca3M_j>OGfpU;cG^OJuo-;MSNMpub-9ZVMhe8bm&Lp}7`*XAhl`Gp%eAe;Gq zXz(hpBqfi+-5PuAx5eP{wzS0Ojn66GU$&FGJdirQTH2nb*-!p0e9n!yQEcjwm@hgtwg7 zM=prVicen1gDO6c>1mSz;kZ_&?G`Ex0RYHH-EL}SwsT?`z z{ta&eAANQ8>eVP|ak#LwNPAo~p`rv%+G@vZlTPjje&~nl$jLLczj*`P_|D1GO`I7A z_<)VJb?|ozpZ^I>et=UTbI&b7WgKHf+BmbMHJy&%?ee6r@yd@K4Oe)N@Xi%s;Hf9y zUBRvt4DR`ZXF+8R1&!&W9TJ@p>HM+xxx$RhSoluKHll{`7S$3 zfkg@OnCKW>HB~1A$|29ftgHxW^c{TQ>O9kQ|B%xBhW$w2T@7~YH<*qRqeFzD;i#8} zE9F@)p~ET3d!hF}3pac(zMFH}%4u~9iFqt9jyPA-_qXfN-sM<3e(&sVhL^M@aa`4{ zlrK+_gY^DtPK5A_FJn%eJW{7m9IwxP=?Thhyf8lQ&K24SzJ$qz7RvpU17cGX_4V)g zs``n)`x7)~cfw_Z zV^ejA)C2IQd0iEmqrQ08f{4KaQl5*u9HThZ4ZStvFfq!&Lfc=7}eBBM8~$3+I>ukk1C0VWqFNP~1J&|^M3aBm0A z#S3xbNF&*GlEWL&RatdlU+1L_#K6}F1GTu^zO_3!HC4xO#>WRo>h-UCsGff2*?Q!$ z$I&H|nfyO?cox2;Uid)A(vF&LnH6=m#d=x;_#crCCnXLM=<$RyKwcf)$b{u6KjL@~ zy12-z4ay9Sj?}ov{JH~VWHdC)F_PfN_S=z3oVcJ(rfa{|Ax>DeK{qSD0qz$a^r3v6 z{4=MI)84~+hmp^GcO2bfSq?gT63V3)FVwrf@tde`Fnj|Vs8gO7;&fA@Gwm2ne^8=rGDQz;ot6pq)zv@ZmqBaAea7?GMEJgp|w1T-HGoaeB-HqFuST9JVJkf z`*eup7fNn;7v;uN8R^V(bBFuQks2GCrUH0ZC4->G%0=*zKXgneN#5+jkeKq_>{ORZFv1gvE(TVArK6bR$;m5h9mEdQaj==`}vbDDj@2}CmP2j}F zE#836eb}dg`cvQYUG-IOe>;U@ze%T0;J}*4{yV6;xxQBGD~lP3-1FodPaIfXsy*o9 zptzp}a4R((MsoE8f>|(oL0U{+;=U%MRoev z&<|fE9klFpboX8zkvzWRU+WA}A9(U;R8JX!`1ZwWhjd5IoT+zy|DPtsIGsT{bm%V^ zO}%vC`TEkQK3Pvc`8a;lXdOFxypEt#omk)80B`V!0^p$c7`jz>tY6)rtmrc;b3qUA zHvAprh+sdNBurytqjmV`bWP37M90q@nyEG9W`2IHKK(R>xB+b zmmfGtPh0%7!_FGKu{V?n|LfOo)Tcl5#rl+3W{&4G z0Um*?9Z}znO?WKi%^VBquF#h*T&gFZda53O;;H%)I{E1rF4Q%A{nahxF0fO7@2~va zf|zb?ZG9vMrl2{xyV-wJ$aS`xL?IIm=TY`n|2i;W`U&6}bI>P>iF(unQ4E&jAVg{k z-h5Q!H<2=)&=ekNZ_AjkEIZ#Y7glwZau65+2A$=x^75+sMtdDQO`4%0`Uu#{cfR8* z%J1G@zH%9v@sly`T$jIrQJGV~1i><>%aFma4hDY%tAZ^)jPYIkHt}UlbSd#P&VMZz zAw`HZ8k&US#k9tArwm_+la__&+^wjUgg2C@;E!^$(A{!UHZT*Px_j_?*FR}^GhR8i zJ>(Z||M4yZB$S&4`lQp>QZHJHzf&eObQh%bYvngzOCKi_WXQMx4?~NHFm$)?@*4V@ zZ-V!qeB$xCeEABx-@)3}aQ!Cx1p1R%pe7hLp0?d;1DB)_lAEA>sF_4e<@K<4hgc78E$ivFTzN zCv(J|cpKcPcDIR6;@BkL=T2k1TcOyuZNXulEl-QA?bUWSyjy0*4D`SK zEw5!DlXTQA9BrVMHW+Uf>qtJI@{2l^XZ!}s;oiz7y1fdFQE29)oF?O8^1QRYT=E;~ z`9XS==D{Y=-doc`Q%iabiIf+AbMTxj4XI>T>`9dfM+1j)7tNC8J|mNO9xF zO8wsNe**J9T4&CjL!SDu?=JkaPp4Y9KZZw3x%=B2$kt-bPEXf^54_5!1Qj^xhGeJhGe;)s%$+Ccv^Y^I zAE2l3dF*ev2v)tlwjoQ8*Dw9*AHgeC$B!MaH@)F?^$p+jZc0U8?c$Hn$8x8&K1n;~ z+i}Fk1NIgD!~p0+r#KecSMzg=_1L42*Gm^K=j1atQ|ugIySEnVRrlXffB7%`FuD+b z#gnWy)}R$W{N4`uS5Mhz+|5KF58(tk%|T;NT4@1%q}k9DScId$e~5GuLh#gVoJs5p z7#ALu-;wi7WRXu^a;!l=J|>7X@geH7`y2EZv+JEJ zlSaqc9Lee{y=B67z)uAtFzu%2En;Wa~LC3(ACryMWxQ6VX07rP!dTEg3Ny@y7 zboDDeF~AK?_DQwxo=mr~v4YDt(Ci01YMX6~zLR61P6FHKbyJJFI-l_bK5B?g_tQ-N zP_KCs*cSA6tk3@24s9G57^y=?kA{crlke_qMjk;qJYc`M=`Q8Lb~wHxe)M_eUFFlo z5;>iaxSX^aJuU7VUxgOIyR?I2xm$;02>K^}s!o`8;I}*OxVui=c@J$ymn|*ThyLTQ z)W7@1|A4Reh4|*4R-ylEAIcLBl{A!&Mf}`F`aq&t70{i2AV1ir z^{(%4{VqK9M8i$&yuPE$0UYy|SuOkahPEcp;Tb~yN?RY&PWe}ThK-aLxl{{gb^_mp ztJd3g>&vno&yvV8a|gzoP9TYM$HkQvgXQyA9QM+?p~BM}EzLBfpe_)w0L!H3KEG4@M^zTve6_{4Dv{^j%z%F%a=?+Sq8Pdr$@|4c7R%xyaHAN*2YJ|BeK zWKO{>{8QJ7cSCs6`rwUB{8FDfAZ_K2cqC1O%-YmV>MnSmMA8Nh4{V-(6u%OmEQ`@d z+fF?YdECVt^~UeCA5W>bN=_1S0B;UqQATJ#usF67f1PxuQD>MVCrgHZ9fS2<)0-kf z9VFj%oAG>~h0f;WIk5CD+uRm`H9R^M8k|tJAsh(Yc}v!A($lo+pmqTlaoP0gb}4}bCE zeEq>6eTjUQ!;3y)Mh}j-005pia=d2G-CcWQlR1G;+avyMH!ZbDV9>4!N*l+uPcr20 zb$DlF=SIy8Y}TvKokYf#svny=lHKjr0x&}(bOW_-jcz=pY~j~NWdWiKi=j!gqeMd! zun?gRKR3h#29c4tprZ$)vPFF33+-il_G)}ay!w&!@ju}^OH0#kl}|_{e=Iz4sif6M zJ?b~?w?X;GdXzicx61_&MAs-|;7oWkr)-~Po21(Djqpm-=C;UsNoRk} zIw@a0;9@%`=VR+Fm3F3YC12|MkVc-*m^p29VmxEs_zRH(ip*rWwum_GWbEC|aeng* zKe&;BBFbSaclZmE5G&SMxF~_>Y)B#LC8#L_nzhBiyb12|C-1uXAq;pA4*P__#6^L` zsb_$bI2ZX zs`z6FiSUhAcK@kpLa3I{X9ocM9pc$mgw6J3GJ<>3a;X@jxcP5^GMM#sj$LRZ4#Wv% zc@%MQ!LvuSW&r>xJpT7j|Im-p0Xt4Q(;xt83yy>MrmVnAKI`@}=xnszI#m~6x*c? z8Qk}Kcb4a1&*bDpR{Kf|zq78>?=C|psrXZ= zKFhJg^m6CdxZ9U`QR$XV$Hw|?i+A(u;4dz%)xz>-WFz?WfpR=6*K8qnVZW%Avwa3&ftv4&yJ4NfB$1YR%6qL!O3bCam2Z&>1~y% zZNGg*R^WxE%;lT*ySof&j|t@xaB6&qx5z7(@BK}YajwK=%0Rwdak;xCO+bpp7)ak7 zS~7(1oVW%?be#DtFSR50sarVxgB(ky=x)-5Z<4;3F3<9TyCJ2i&Q_1+&@nJVH~HN> z@{$<-M&m^ab@;EG0D?72&0xj1Rv-2Dq z(rD2PXv2f@XPbms{5jz3Z#}?yLXl z@BU=%u5Z>0&plDsFVE%O#(G=H?{R((mY7GpIQgz@<$2o$TJAJ$yKB1@kha-H1KolF z*{YjtC!3)28*upDhm7PqSMnk4qTi05a!_MG0~j7P+u_dz03L(xDJ6?^*ssl9&yoL5 z$T{gI&Kuo> z7vhc3How_64sM*BatE!u+@$pcZ5W4^W8A0m`zHDOct1dSPD9=-=rcG0FS?SNI(G0a z(9b*DZE*?|OdI97;4%CmK1BjH(E+W{4xfWEh6kp=;S>TU{iPRps@$`!2 z8%s4krQI5@AO4X)Tle1k3aVVMJ?wnrarF&46&c!CU#->U#X8u*2iV=NO=#ny02htu zLv(sjdJCr*NJKAWf|1|Rt+(Ravp;XgwP5qrnN9C@d0Lt_kkxw@CmqvSZO zB?}JKQU28RO2?eEKp)tnr%B_{YYwW}CU@kG9U8AuWZ>(+{e3ldcZDFZl$ux-uH2PT+3dQti)CxGH&OJ!^u z-22HA)4TZa#POqb_RPub@NyvAonouV&r6rD)Gz+~U#rUttD)26geMH0s+G0%=rGYB zNBZBv9VZ;M5i?U0^__p>TkG52_Z{@#@pIJu!zs%X(<;jUa2eS z%@;3RuFrn<(fZJD{BHfnU-_;2#b5l@`k8FFx~pUBOqh2ZFTK z7&>Bcgo=YP=AlX^CPq^ItUs}PxRN!=y08;okyn*)$GhPZ9bAysvdsmb@k2P z_^x`_H@%}Ce*MFw#W&qptL4STT0)mx$M0TVU99C5cynzP-QE_sgVgwwyt9fj4R7tHZI-!1-)lUjtn!A<-S6}$zqxG53eX+jy$P@MW`3rS{KEM)s zc}wR(n7PsiK_7dvN7J+No4U}x4)G2&7>`bDwq53vN=?4Kv~>xCB;tOpKj5b*kR`?SG=KUAsd6U_Ej&GU~2Cj|Xr5bUO)zdTk%?m3wuA zA?p<9DUtlt$y4x@63yCR z5Vx85<+*)}v=wc-z(}4>H*~+7t+O9Z`>+LpEjT9^&;ItkgD&wW#IZg_uMFeN(x7OI zm+j}Y&|r6`mlG^OmvzwwdCP^K_uczIea$=Go|8qER~Bk&@^C(d;Q~YXz`l`3D?@7X zLtE+=E{aL(y?gmXGzp6~JUfATPMtlv7}9S3Ij||3X-vxme`kUO*!6>)kaB_Lwd+@N z%8UyFFztaW6KBv)+m$f_2ammjBL|ZG?zQ5eFK0cew}+>0cRewrX@JL$@Dx~k_As=v zpJ+-m0=GpqIW(Uv;xlm`@f#MAe|+@bCf(%WMmuT6-2^GaR9@22-{=4*;!~ipO(#Sy z<*Odt=82DQZNg-H?|qBsAMrM=dv0}EZ8Qxp(kd=`E~(3(-cwoc*;}tn^S5YY{lnjO zH#iU)Jw-b`)oYx~u?-gwThlWA-)%n?#J?=CRbL1oFQq~7GBlb#^4$Cagu}Lw-vp^M z3Z&7c;l4c^nr|vk8G7xO`1#*riB9P9v|z3f?N$igVR~3RWoX^IzGK>JU8zt0_GioA z?S^szhrDE8HVaANZI6lb*CeR>L@Dc3Vj_jka{W=w=*_bczYC%C`q zM7s6>hbBALf;6ule#Lj%37q^iBn_eAust`v5;n_e`Dr~ja5kV{2q{P0W!{J1eJ6C@ zN5@>x0C^@S=|2p)zz{xPS>3O-jhpqM-~258aMc}WPu9EN^{#ruo8Owc^kE#6Gv6@n zjW6qnN>AT9aJ{#;N&WpfC1G=8udZC4ug4#wUparHM(CIMd6c!aC3Mbuea%`mTQgC+Qr&OiCyuf9<%#(3V`H8gpUg1T!v+<0J_Rx#?g@ygj z;&LY$C=X7^J4xmkkYioiQi-a3y6{;2==j{`#wPi1))@UI0b1&+R!; zV;;v=#DN=8T)aC%zW%|HoKj`~eUpBylY-;OyY@)<(vJm(Eb524AIC46LU$g*ubie2 z`Fs$Vh#M)PPDh+ zUHL+m!<+oJ+`OYawXUJFP07qXVN5G0}nE^$CZT# z${jj4dXDl0ExX|534Y{LHaxd^9r+BXuxDC;jKxI{krug2pE__hc$)5`e#&zUPkV({ z3C!kiV~w_fO3BuZKg&yz{T^Q1l^T0^Z@gq4Hx`;!9*n~G5S7kdJIlsBu~54@AU)}c|YTLJzS}a98Yv^ z;?V4D9X)X*uzAW#P6%{?3UHf8SoYl|Za(b`u-vvmji)<&Ix+8Ze(@i$|(Bh z^0lS<&~Ja7w3G;l{R5+Qle+TxAICtaW@_r}>FOJwt=)mKO#BWBvtdMyCR03tvs}EFM||>q^XFyz#y`!!Z9MVi*O$MO z9pzB%q~P+Zl_x%eCvMOQjXxC;anm-u z;*tOAq+RRoU^J*AZ}VNDE0=2p(uj?kYe-6g6$t(aMA!?}LN{r}3qq zN{2LBdS40>Fg1KNBcYV*|8H*X=bI)2BNKJ})E)J;-|{`A83#8wF60TlgcHU}HFl(A zCw_Pdmb{lA)P4D-i}lDOkJSf1`0J*wnVCbO$H>?yj5<*F-~Y<`Q{Vsn)VotxugqmN ztVb`>ZO-zWofa-$K73|!0y-VvBN1_VRpr@6Kb9Wu)4aM?+NF- zT9ogEgM&8il#@@hQWwx%_1*3IH_nOp*1?f*@FV=?u8}@FDpazw?-6r2Av~+}GPtR7 zpv>&}KxybGWR(?)#UrSsfksLc**UW#(c*O$QJgB1fBt-3x_kwAY$FC3;{&^9 z@Y-X$|LR})%heB0?{0faK%;VwL?KZIyrfs}GQ_!%`nJCp8Rbdn;%>iF9yL7ecUx2- z(~1A43F6%JWWvc(ojmDJ2H@0?r{v_ZUW3V1(!n?7x4L`*UjTPjq>;x--xv|gB2(A- zFm%Q&rWZbz8tKdeV+Ith5?N`+MB}W+qe5^bAEW~Ww&G~=q~Vp%f*?tV7);7<@b+*;oegUKnI>^)uaqM_niJS-e0Vmt#ynKe9F`{Y;vl#p zsrM$!mSLGKba~T;Jhs7stY+4F3$*fKUEnJC2aD?7=uG~}YuWKQY=?BPPMkSe3roxJ z;nmPaN5s<^#M!7j6>y@D9zRw;@wb1x4&!*PFW#t&&%Y3z7v~3_)qz(hNekuG$$tk9 zoIp2tcVZ@!12jwgTfUdi24&dtB%|^g7mDBQ69Di%aCp>aTlwF1#%f}5;RjFs7{;L&-|4XDdz}NBgzJutHjO%|EjVhOBk3y~f@pk> zue3cq+SzfDXU*r}o^5rZoC~2E3rI)nq#m9%+hQk$U{;XX}nT&Srs;1MM0!SDf3y2;YSdgiprtYV}yD)ygbjj+>Cm!oVu;rQ_1# zoQQXD#eqp_=*};XI?DopvGJbFHkoMlTrwex4*U}7={N&}4kWwt&$>u~JnILTa%E+$ zZp_b9#}?Oi%^o>a$BrC@w}!#LyHyTq(m@n;4E1!AN1jGAKzrqGGL&051wH;=p6FwY7MwU0XYI$w7w&7zt)24SoYioPGj-5Q7(-{8hU-?V;0yDM0 zvj!fW*q|ExIgc6LTwAWCCFE!6M!6Ht0c!K!bRb;$q@M7QW*J^ci(0dQxsw_Gs?!>? zQhsvKTpQ=Jq3KQI!)HFz=HxfKWzc5-DQ)?b0(ci&esUn?N6!&we$snSJk?mHA=tG* zV2EAlm6yehM6azZf%BWSvAdaXz(4$kH`KYW_bzUefX}gjvhH)XYabRF1+|c;YBvg@0E+^E1CtKmBihqkj3f zKU#nIxu@%k&%IR7T)j~@@SU{IU^SOiQu0U9(eB(&d%&$I?($Cr+HeZ=bG_aTjrnM~C~FsvC3j^}>bc>+_%ge0}OupRP}T z{*ik0+4FUAj&`lCL-*Z$tK61#L3Z`Z02%ZVzw)l8IfH3=-}uVYUDA=g6JWfyNoK zFl}eA?by~=BHt#qb8Q`8oQ5~w7~I06`h*VZJ9(-dbm8l_eCs!GKSUqmTI^}-=h5dw zcl%pk-5rF%g~0JQ_S42kFat)zf{ivpT;PXCFDUr@)}L!QBQJf_yrR5(%8s~9oF#io z;H8ose{sfVQ8rI$z(KSt-olB5A%ev3md;rH^n%P;?0U&a7Z9W|wilsnMs z$s}#^sLG{-evR_c1h(c&Rxd_w7E8{B33c!0xz5;0v#{dJXbz*XBl3;0~Z%-u%Or zqm*w2<9OHQBc%O68K(&t@~(&PFt#KcG)Jvx&GX-=RyxwDVox4yX={b_pnO?Yy=J$da1=|jttE^=|v-5Pz9n+HSn zOTY3#^yg4c02myZ%s5Ck5YeWvwSV@vx#NQ))8qKn=T4uhDf(&Z=stZczY8sYHbQdI zV(aH_Noiyk}^hKR4<7tkp{w=jzEPo~rBDu4l2I$M5dDY*wC2Cd%~2qc?F+ezrcz3$?2)2>5hJn-KLJsE%=KzkCA7Hg4@yF zo~Y^Kus&>yyG9G$yn+o`$@rj?`|9-(cLsBJQKbtJM4WhZC#pN>9lKuQouAxuGfz$; z1XgjCi3;fIsaDE>cY?O1*xc-;)(yx^#uU)f4r_^Ox%-eE&Xto9W46-i@T)`6L5< zUSS*@mVfd;6FP1V$b<)o*Z-Ej(g;&STOIF229?v6jh8$_ql|+o%blLRMd0#r}zsS?(?eR9hqbm5VuaI)&edMC~3fA3xTgPxbxmBBLke(*j z$_DmCCZdJg;QoINX-i;k?P%H5SQgd(-YQC9;Tsdi?^53aw-(cQ+pw9+5Y?`bDx^O#RA@N79==_`wKo^8M5V#>G< zq_#+#ZQsCCB^)AE|1{szNnqv4Hn;wgzwJ+?t-_^>O8XitUcwdM!M5?NZ(=?1n=N19 ztzMeYo|Hh_97D@kU7s=v-0-?VzRbjX`U%?KXx z1|JTOj@SI$QvK?$e-s^IF0g|yA-}65y&sIAcaNT|kt281&GAD?7dN6U6B zTlq1LfBuz@U1Ouhh)KTQWpj*6Ab-bi>JxF+>7UTU%SjVcR5%XskbWk+*I88Kr%8jT9zDc+LtLJ{{HeS*y0r0aVF>){ zVPI1}xdq)}1}^8*Y@6-Pf(Ov+XDfwMesBF<;ZQ#9i#QH%-;m$%uReDc6#}L3wt3}7 z-)x-fzj=A!+uwQi13a8jjzV}E6^Boz5L_ZN*sVg~YOB1d)OeXB1@ex>3P)!cWoq-M z(Nb4r=ymv*7d^b`WJEd1z(l}oHoh^0N!3(5ix94KWPQ&Y3GO$Dx7?8kV*K9tF7I76 z8{(>Xa9EtQ-`Nq_8e^HnysmDP&>D&T?DX0uBFq4DIjQREMV%mui*oS2N8?Hq>-Sp{ z@MT+>VTFMk7{N_`D$pvhz}w>7>FBdemiB@taqKT!t1!lpf;ZbPGsO|3oZuKoPX(XB zatYpZR+_Gs9*t(Lc9nJ`zW;7|+?I}F>d~+W8+ggRueQN~0yR83U2lBDSJwTneJ5#j z2I-8E-h2wL%+^RKoYvJs_}do*cc@;z{8D}J*FRLB`~2sCd!(kOCYyuI`+8P$oppVFp{`%QOzh2cl!aU2OYknE0G~mH8J0>Z{B{EM%Yx~8Kp1@N6$NI(ueaad zPS>0YE)b~FieaJM;@DNaL%=ZVN>w_u^p$k05Q8?hWQz-2n%89zt$+HaeM=2Vwa#kU= zG{WtQ>c9`pY?qTvTL_u&C+Muq&K{~`M`!EU;iGlm-FMZAqaIs*yiT1wS@+#@PkrZm z|3v-dPyDU=Gk@WKsJDF8+iPY5{BNz)+S2u!zk0bA=C9Yr_C`(7ne|A;xhq#{d1Q}yb=Y&g?yp5Eb zdN?uIo|p56zgyhx(EVNGaF7#vn_?$*2B`Hp()Ti%@A1J~y-XCg3dP{z01 z?F#>Ddo)rwRXPabRrzxfk&B1a8;#Ez-|1ZI;6u{jTYVAvm2ZM0>lP2t1GqELV19W3 z+}p8OqjS2twvi*7cPKx)W!MhEAbfgIOG}G64tfn3tZsG78 z<@Ev}doDHs+TeA1`cMX#EED!B zk7{%gKwsY|#Z7=KKX-A6b{(dzT!XXplV&TIFH8ydIvS#6HbkM+L_0d*O5%7cp@oNXL z$(Ptvmc=`levm#EeoOxzyuCXBUS@bdTn9U(@7ciQA`cu9KIo3~3uPkHruCu6C(@3; zmaTlRtgn^diT&c2o~U_j@8sblHI1FRd~GhiUmqRT%L_L$Nz^xle*|rujB&B%2K1Pn znWfZX(fBFYg$HCqPmxI=HTDG@qg}m$7EI&uHiyZgCw|h>!ZtQT!RwC!>e^gHR z(2(E$=(VML*E$il(Lc7ZW$5@-T(r=hlREL@x0hbr_VQ)W&|j9_-X?U5x3VapixtN` zc+3FQO^@dts8wh3=)xt}tW3^K)h2%Q(L2x8+rQ(x=n(S`J>vovC;u;9c&;uy`!qDz z4lnx&jQ9|A@-ip`oPH}^eztwK-2rhG_8xd0aEk}HjEw`1eQ42#?9U)Cr;p6m5$JN{ z@NAG}k}KD)*5c}NUAl6k{>8ujw>7t{F6*laj~hjPkDoYIGr;`fix+HBWX29uhu7#- zbnE)^CTT9#uYK^>>gRs`=j;FczkaTM?&tnx{nP*R7wYGK@z?9OKl1VVbUz@Pe_`i}R0Yrb=?vU~Bl^L3u<(v{2com{E!M7X-s z#Vb1wl<#179P|Qj%2FmU)7)l4|#iRj;h?c;9>LZEt!@EzZx! zrpsgU_E2vZV55g^rQI>N4La>t>n{@{8>t^{w|cNa&wuSIFKwSe+?XL8XJ~ptI;Vd? zL$reyn0v>l=1~X12d4S21z~RVYVcVazkJhs-|mJ6LkrymZPO*B>s6Ug5`D$zW_rS%9DrfM`Y0&aoQN;^2gu82`pY2Q?cDn z)O#0CHe49xt)y=QHV*FkiH-CXpp!n-!oqUR&CMfk%kc&Mq?>+U+DkF+3bk70BnzkP z?=`S;HT`K_&E{Egb~@Px{VU!p1v!qEbiNnY;#JsfgB3XrVZ+ik{={E`W)6gBoPp;| zQ2PvQPEJL{d-;;ra#NtbwDpJw<)3^eCcp3f7fe}J-nHjkLgu{(Tb_kSgjz@H@y<`` zks0zVtxTU1{4E?Un8!pmztv#q()FHvFahOAD+k>CPPL2LZTmR>r+&Pj(^B8ayKQ%` z+8vyuZuE~GM?1g=eChHWel`9NG*|v5Qu>0aqzetV(plmJU&0w(V5DtVUm6`K3AVSr zw-4a^=mp!KAnn>cjM1ex34f48?)Km8De|l`CBM(oJLQ^7yoyl&^UByJmxJiR;W|Eh zxMq*c@Ounjag+A9O%L0fJEV844!MR8)pdKwRQL~`OkZWd**^SAk3kl`8 zgF18eT#by+;7?(D(3A3K>+?8)vtGmeb^>)6fbKYTx@MQ5BcgiI;wC&~F7C}H>4Vq?%7LGU#|F?j8}-_U@2{ytV|9JuGIn;o`iELO z9Sd{A0B&bpd}+Bp`q5`-L)ATZ-&H^OgMYeaW{*<7r#pJ82YvuPXEuo7a|)*u6Om>9 z?CpC5d|z#?@7CP4rMh_WYQ6B{i&=~~H8n~dYxqw$;mf}I(I5VyI(q6@ZLeN|Zz&7@ z+}Qw#UHm5fP;~|uoE&U;x${w9Gvksy`ndaF+b)fo?uGd249UB~rhYMxalIgKL0B!W zw9>H2YwdGj%1*;0ij`seum6G)aJ$pI+i}jgp=<*yFpXhc#8gp_qKRIJ*>{8Y3SW{s(|`Pd2lhg zee!$~2^&9vliTcAf2}UkU#Ac3M7KVIvafDeA9#wJ<9ON4J~5ezM{#GL+&<(!ZAyO` z{#PAjl7K4x)RI1ou&!VWcK1!!bbAb+H~`oBCi09vwel7C`IkFoC3$>%{7%~ELK}T~ zO~W?w=BWjKN89n*v56`24APFty8G3y$Tzc}dFp)q=CA)+{mL)>dzfUOekOi4KKnX- z8u9Gu1%3kbb6pn1j8sN$LegKZPK^Tu871o*jR;&3-b; zoyMAy@E3e!|3GOn1q@x9F1=U! zMLy?l!^K-0_l*20(-PcqZrb8xx<_GqYMSVL)9W9qGbfMM(!xAAM>mjPKGcuVH*T`t zLyTqd*ks?h4kxgMl;Wkf)20e*#Zc$zZKsB9g)5+4Z4|j2c#CHM?;>N=1v|GQmBp?axZL-tKb)j~H zz`nFT9CDDkIQX&8IZl3SLwWWyCl+qpO@CVk&yJxTxVVph@%tjT$R@asd`55C#%FS^+cY33CivanI;hdPlpBYWX^%)}b>kzM%u}nnD0@%T9&OV0vA z_>JFK55D>}HFtfXE?>HcHYq9_}BwsH;DquSC$wfIbmhu6A`$S#9iZs^^7 z*THBzf&=-FmN)oyQknuF1?diAfb9f%EYhLr>GB(Y9$m7ua3iaTCvlFY*VxDe>D^_y zm7Tzg3yayEw!Ybp{a;#M&Sbrv1$n_;V{yL7<1rt|DYT?)lwwW|Af0vj4Xo9*6?n!~ z+y``OJ?4HfNvU(wW8i#Nlc~g17AjMe3Ow9aE++-$gACejt6lNy>SP@rH~=2- zPBroaS5s7^Lm5XAQ=rOt4;prZPdXVEX*nvZ&F%FlZvW*--l3Z{Lnmt*`ky#9Rj<77 z-g^K0zODZLKluCg-GAnX>uqoU`kFp^nljdEcX_d{T)9^B*XC+z@p|33K9_F`+3rm{ z6!X{fP1zToKVL7>$$jR@XW^Y|wY0KQbMrT-7x=hx#2Mr}dT16L4%L76=l?ut9;uy` zIS@j7#R+ZJS&@I_&!B}&I#|-aDQ)<{PL;-ztEFi&aO*wv&;fI>CvpiL+sa9uF7yYm zWYrsKc@?ECE_e2D6vCIySTUd#k@$J3kMbC*8(M1`&(-)oFDwsyeZ z@VYnDqfb0lyBo-ocE?T?ZJL@MuUFjjikg|6sX8poZB=t?eyO2GQ7KW8l#a zU)ywq)fsD9K;-w5aHKPcpsu)yJ%it|Q34ldpEg^k0|On*8Hfb%9IGdvi?&)#xp8EAK!q+MxVU6qNH>9LfvYz8p8drNgF9 z5yC{sHH}C=g zr4yg38y|aOK-N=IhqQ91Q@-KX)5XfAi;%>(3$X@0`C%O0H+2X&C&)jNPXYjG3}cSk26gr2}FoePnc?P9F7G^W)j^D{tuw_)Tu* z$wgV}f6E)ehS=E}8K2~tJN9a9X0|4$PXe)v0Y;$2B>cj4Fa?i|V1MX5t@UGPsxDvI zs7sgE>f(z__2Pwvdf|onI{(7;dYEOX(?mG79NC)+FP@{99L-eD>-vjq{#CG~? zdh!UaDZCHMt2jEgGM&e+$rH_aH=2LYA@7XGKU6>DP4BLsvW#qbg!vbcTg0}GoH$9R zcC6-ZEP)f~0}fxjdJVZkHb!u?N82K<)wLyT`gWZ>d9uFY>%X?%{`Rj;z5NFs&)H7# z*>abUCub}zkajcQ`FEGv_WCArPG#wokWjvp?=2V^NoUqa^^p%yE4OeYN!4@myw60X zeadxkLV1)SpSChg)Y@zCw=%202e)u}Z8lTGUUl(Rsjz^HX zx_f?Mp?>+-f2*FqvTV!h@S*V>$vb=OcpW`*to;7{9QsaFS*Ht=G)KWmI;`5h&3fvY zC+o?lpRNm+uH)~m*DC!8s7VOMI#k~d{ub|{QPUSlzgX*XAg=@IpTvG^pzz6EWj+l2 z4nrI}GFx|?IZ>~D)&2F&-|~(1-~PydQ$O;ff3e>C-tVf19(*ml(^proT(67gU#usd zdZJ#u_#!gtH*VV?gt!n0znyHKWF5OQJTz7wmpG1HvKDcD{l-E)bN)PKJYApt;v@Bi zFFjUIKJyYXFkka48@0COVr$@~O~WH_hQ15W!VQMNmpo#NY?(e%)2;GF?(miVf~TW6 zft#JQ&{O-7(__GQ_zN6lP(TK}6*_DM)Rb)o@hQJ6444V>0v~sZp+1tJFx2-3eTWQL zVQW2sMjPu1I1cV_f|sog`f~R5;IA!wcXwj8iB)YmHW~bB7vs;st1k4h4)wmiv)^5w znw+SUCyv+IJI~ac-uU`@&$oP2eapMQrM?lm9i2IhtZdaBe(vh(MrdFs-hp#%ul>@t zC`e9m!bs_Iv7T`>NKqxm2_cU%)c9rhEV?8O_PzLz0`0C|5Jkb267E0!n~sK52vS}A z<3D{s>G1NPeh}}G`8=qofs1?WpxNNpE@YsKN-f5BhFf_t%V@C<%KJAF&L=VS*y1vr8%R0L1rvq~$Gso$pWu)T?Ygf741^tl=w z9jr4aj@7xdchno-_=ftrulu_Cs;_)oz4a|`fquuL#~cI5wG9u*7xdTcLo{Dr`#l0b z2}2L@!GXSzFnQ-4b<+x)4=fTvCgrzL-n`RpPiM+v$PpwS+m(OP89wmtBJySWlGm7Q`L_vg2`a0f~zyzHY;M~r4a-k#0d%57!z0qxLXLRUU?2)z=F6I_%t4Q(caOYJ8*~k zfO_d>ZEmmB1FyZWW@aaV^D6dojeZ+xpzYwmX!?emTf_C~&pcPpJ$ns#KgeRFAO6ul z7kpc%K4jw|Cu6|91NZ>qSc@|4q?Mmw*u-hYB-OQR^L6p!T%AAvJpILunxfyZv9?a% zVxV4e@7emE?|N^IOpVvp!e!clpM`Df*Y~Ec<|0HC3GG&&fg>KHxv4js2B`0eSk#gbXMRyFz6X4cw1-r}N zMSQRW!M-kjIJ(zyI8Q(k4^6L859!p^C}_%{KLPCOHW!lS2yf(Xd$akB6H}8lHZjuR zC$GGzPY&z$VC_$zG;9ISIF7u!yq2_%6Zkn!$N0>5z`#VUtN{;Bp{KiEg%6)R|3W=^ zah|ySni%P;!?TBKntphH51v&|w1HpRX?NO2+URafaOly<@`Q94#OCul_)jEl@aBdn z<0E^@Mqc}dJ2&}IS?_)uOw%6Pdu>w|5t~zm=c`^C@#FZ2W1L-(ubWOK01wp?2~VZ` z-n*U;6(2tG(8exzu;n(vy7ikaUp?1KYql@t{THFVZ}(hX1R>Q4xAzuBT(2RP$Fq{x z616nlWqV_b>=P4XT1~&>%ANo~@2cMP`iJYpv03`Bi^z?>uR4;F;jiY0@mrr0K=LmD z$;a?l@G7rKN8eJXI>TpvIF3l>$gAKfJL&^vUxcdT&X70*mF~0O;rVEFGP6*6(bJ9D3=yD;wAk;m){_Rau&P79iAx!q{8|_>lPS zv_>QGMw?!1Klx*wwzH?3fJGT=IA}c5aNv9Nnf!n)QyyL5qJI{4att?lC^ER{$uIGP z{iE?c!47t3n6mImX_t1C;=ZgdVM$+%czqCkA~&K8pr0&TpIF`RNlyBdBEa~J#RVCb z8v=w+8vHx>KGcDnN+TDQOaI_O`3mkS1Ak0CIEF72eTJ?-ch?;?F*Al8Tx*}_4}Q6c zJAkw9*k5=evH>!D6(7xK-dV1?G5kS7`Nz*%YI6kd$)}z{7tx=$Nbr^Me$z>tq0!nK znyQ)8_f-GXN%S@q_U4~*nw7M({W4wTcc46N6Gg@IevS9<)cV4eI(KxsUUBAdZLVCc zk=g)OXzT(2>T?bfih+Cba|I6NL;j)g_@1^(o1ea15S@GQAiw!9{s?zpd*>mO|KSJA zHO>;H3D3D3kOy!n?*^a2t2K&4USmsL#1aIHQ|aZS%we%1NcYZ5xLR4}^#q6Nhu8Fd zc#izhetp<(Awc*h3+m)a00yseEZ!b}I~f6T@YBsPwMl=<;4VG1>5&QF!|&Qy>1$tX zchB)yKQ}`hWob*vO1bI|F4K`0^}w^5jn*IFe^Y>Ckiw>1x=5wX)z}8=rHw&>JVhRH z)8tsf5XZCp6rClG$P?=%%EWhmSyt0Wrmhbl0ff$GG@DGoJDqWyIH#ktE(hr+$dDu z?a+yrY7QzZ)CP%eP-gAS-5!Xb&Imt_i<|`99+QQJ2-AeosTZKaEDj>Mn#JETY}zDK zkyj$g0JhtSIpb6i)*}-}ND-!>5a(9VLNF{@j+>G2l%b%xI?0vJe*4!jfB*)cPAo!) zD2(8lci^GX1cuE*00lrCBNzzw&Negw$E)i*wYaicXYYBS{>D%JJTxAoE*Dd52i_ie zfTcO@X*2nyM;rW+SLm?e-1?ojM;>{kzV;pOtcl4(H90;J=g<=tJc9AmiKF#D{p3&7 z!>@f^J^J`l^~}?c=eSWFbqBx#zg19v_$u%lTyi2#Rie$P%2zKB9c;HlgE4%Z&?`$G z?(wC;U&1dQjtl{4-A@Kz()W#PRZYG2zIB|N2_ZUA>ka z|MG{9h7P;C;^kKtoW%KnCk{=y!^Go~oh+gb7Z{8W*TRB3gEq2z-W7J&=C0KGh6@75 zYI06=U4HFk0SD&Ase@ncni7Z4JpX)MzP?l!FI}yx z^Ej#UH@sgSqfzh}IO!$KUc!?0b>;MA$KR%thT-(cstW+9T4Tez)JF$Avd(XckCQ16 zD3co21M*mpx~<9H%j6GO9BfKUi;nnE`!*FCa>su>Q+8t zk8DcpP5bS89RP!TVz`AT-nZ4xbZB{}E)AU8u?}8J@9)DQh3BPP`_2%0mo&{uqn$cF zq>PhIxjN`s8Ov{VnJXa&G_=DbS>SZ?^j-DRwJY`F`AhZ1N4`+cz3^O(jZWn#S6AX4 zL67`{zxR`M?%W;q?3ca-{hZWqJNzB|70=?rwz$_eMfXW!%YVcOiOIzl*F~-ESSc9?7}2nZXhDl#>{y^T=%R=Hj>K zk+VyeE;Wb8PKLad1Y*JB6e6hnAY9xlgRhce4qMo212rU0r`WaYxdaDy7TNQ=(JNecZcgI z|KUHW|McOH)ag545g2X`PShTFYfhO9dAP^-5P!-hd;=d@zwnZ@AR%|?b3i8mT+>?e z;46Unee9(8a*>idLp)m2iM2C#oUKELkJaR%8E`R1o9IZO1Kkzkq@OezBi@ep_|yb8 zpdE`iJ~5KrW$h@-gIe3zsWoV&?1=wiI$eD@wC+x{(;<#sJZ9(69qsCw<(0)+Us^-< zwrgo=rFP({wbjkq*gB}KwOweAF5=qX>*Q3YxyhC?paU6tb=C@zB-jy{?~;3IXY0Ux&Qup^BZ4RZ-4t&kmy*Gp&ypL^Y)`QUG%6o!%@D`d9FFy0W>Brmo z+Q371wReV|o$1-Y#PF_r?>(RCPzIo>X|`QV3lHQ7Nlc)Fo3DJ`8|!WF`<~>Pzj!{p z^5U~k*W$HnwXwETV|G+V=r-d6tfK?>X`_?CxGll0rv~V6W*`)ymX9~_v0O|$j4oJP zr40O{Q?tk8H%yKqW0W~a`v=j|A0RHJ1 z&m*)U4 z!2h|NZ-;LStsdneiBICrn&8J%aue>DnegIVB*2ecB`ReJdssBMqHY2#*^lz)}={ zu=#VX{q|uLJ9mYvqsPVvvE}FRb6=59;tb*6&CVQ(ejgdfCW;B|8(3OcSin!cj!r_K z?`_48b@DiiijgEw3-Dx@&}$PP9A8KJS-*8lw=dHV{6>W|oc82f>}sAffMe+FDLAk% z75XGUyiex%JCxnZAaA&|wbe9u7d^W6^g;Ko7aXL~{@X^=W)Lp8YkH$gYyK{cLEl1O zwhheKCglQas$C_&vXsFw^4n&4BrrNTL0?24DFf9wZpv(YAo|kc(L63Fv=17bkk5e| zWp4=D%lBKz?>P4PzWZKTM^BH`V03&7Ub{7(C|dg6&E>WOC_tMkvl zSj$WE$dnWE`mAkHlHY)qhqLJbUroHo-_oOpx`ZrR0Ve%UWkNqZV*#dtU4>WP8U`QA zslOe>+e7EcFNXBH;3MUI0Dn%M;4$>_rG993flyELCmrPDnf_GxTl)>n_O=eWti$?k<4OX;(BxRy53MVsXyrTBK-XA|G;6JL4^nHZ{m zY=N{^*D6z;92%61@Q5#r$%*po+Q>U9skqTc-a2kYdi6Scd#h>x&VS1vz~ z?O#LI(82p#SuEffS6cw>;JRZ}nr`|l<5RV;zEjhaC+bK3>ffl}{hd$L8^7{hb^P>w zb>Z?gd;vgpGMIMlZm!k-)(Y6%t4;jg31s(`cb=Yx%Tm4Zw~F&@Wf60?yL2k-~Y|^ z$~W9w&!7KHU0=Ca!(&nqy?~Bc!_U0BI9$K*uRdD8^*hhgkLj;(c-On@7k}xOQ;&|< zmYJXkD{ZI?c$QbzYGZxB9(nZX`uyjgs!x34Q~3PrHHBaE9KP1< z%xHb>+uvCK^H2TF8ap~xD;J-v(ShySU%62StIHVh9ei(Xl5!7x)a6M^+7PSkY>7Db zSYA&F5FPqQt`ry|gZ5Ru5j7@$lp%D$0P@(evftZQuRGSN-pb@djkbI@x0A1*IQx79 z1L!Dx7Ej7@aX{>zewMzKaG~E19Fw9Pbyxe8SkuFnH-`Qjp#Ph)kwY|U`oa738|cA= zu9JuCGp5)C?XLXoDRT0KxL4l_mo|89V?FwGn0}_dmvXGnAys|vpuKjZeG)-=v8tad7{x}A$G+w;&l_9lo1Pw&G>+jY%bujBuc`T!UJkvCP?qKR?4-)cllEO_ z=?_h0L)Y*9=5N+V{_tb9vU;PApFBfbn@)0Uzy+2s;)`B)!We(u(-p+0CkJS2952wP z*RN?hO}|55kalC=#euX9A2e`@>k&tOGSCj+L&~Hb=#76Hasm&RL5la-7Tok64Eh_r zqB5BUBPh?I1IhA=ySB^CU5>NZ24RbR0G{Ygb)ov=R{yE_9iB*=-;~>pt2t)5vFqjp zVZjG6Km6#pgu#4G#|S_N#I4)^=g>91hoUr{f>IuLv@=bCHn}&w)?#i4?TYP@9^ohA zrqSJl$l(uu|998JuX|O!be{eazLdgkdmKN?d`QL_TYcS0yy_)wzx6412FEfjIr9U(c`cnq;xhX*U=%-t!mvSKHz--gYG+2MJ`I3}PKIvduZoM&Eso!YR z%64qnu?qCGIs*AVa6Fd&`M$o-!A38wc!Rp|r*iJeAX!wYO&oLs0Hq>}ruDS15QDN? z`OT*`2#e3Q&HjjO^SAMyg0qMJt=;!iEBeE3X7xml_-mA7J?R$$E3c6qbiZS)^1k@! zA8vDKz?*q4Y_jm$-)niD^mehQ_M+u6F7@kEA0UV1k>BONJ?(Mty*MzB!9@w``Bp}6 zk+N-}gF7|h=901ewjL)^(=j19?&CtHa5H>izsQX=(|%o=YRfJ>!<(KN6cPB^so94 zMvyz~3;J|#>R8o@d+YA6dv6Us@D+u|uKgi&4?qZ~xX{i=B*D!fKHJ3bc-`Dwtik<_ zIzGHv8y6p~uYc{)`tGl~r?xMDzGiOD)xf?sbrqdB27K5lWK5eW9rW)!i9rA302({* zmI-C=)}z!CZ=7xQ3U7-GbNA@sU4F;{@KB#0>Nb2Dzq#M|%yjUdd3t_c>PnDbEsJzr zl*m)U03>l15E}IKG0)0mhktoJcH4nAVd$l$QvNso$f8K<$x-*2d*G+>{NC-2C5G12 zE${eS-ZM`B%;)Zs&hoW9qWn-#eA+J;KMI3%rMA2` zn7<3{xxuH8?c~1m4=&tOPlTt4*Z!)j{2kiSK%k+;{0;8Kll~$&<2RxY9QEnGfAv#e z(-r`b(9oPcz~R=1a+U`S4W?e2M1rJXQ70MzXhfKbATiqA$^iikNPzbHHZUSNgWGv0 za3ZJ?P@&23UfBx$M`u( zpksIB$g#wYjiI!L-KjB~K^@zqfNPLqP+%mLDd-HdNHix&+iqbI+ZF(9?QCSw-xjap zAht;qCv-M2{42G-vQ0@sKUQCcZ0?@MY%}){Pk4~fNcVPl{$_%)z;*cgB!TV z5PEFVV}r%+9`MfaybtUcY4oaeveUAEuo)$#KsGf4g&na5&l%XlqmxlRa!U{21pt13 z&;gjuZQ2SB*D-QeuFloF-u=G%z90FAz$uq#c$$FvEPq7C{h7KEV#=1mHB=7TI9YXo zf%y0T;aBR<{pbfW`Qc=C3@=$;aqoRx_tZcBUw*0%&(7B8zxY@^{nX=i@4a`2Hcoy! zp%CVyZ5_YKHxU;3#wX}Bpn~Q1t$-4`6=<)&%_mRa4vkOS``ZdgVbSS!WuNr%NK-L6 zGCEji&zz|7u@PW7s4FkMP|rN^IL`5jy5r8flixNk&fmZ}U(B=Lr!#mwna{1$=J8r> z0f0MnHJnFhjzxIYD;^a(IW?An!sWHaI(_nFCh;y_e6g0-*XX#8*CZX{`GxDDSq6Q^ z+7UWV@JE?QBN!;MMuj?cI%?VdgdFM)**4#2& z=Dc{da&TU>3Hu z)$Mxay?54q_nxZ{{q}#R`u^HpcwVjC+NPq``1A}y$2S^;W+>G)@J&x|H6DyO@moD0 z|9OpM0CHnvt%l*9Ond-`aA#7(2{|yQjO0BN8AV667 z&AY%8y0tcRdP@BJ+ju*Mmfw!2LdQaXgM1O(kiVm!DIMDI zhW}R1ExPpX&7d|1+t4`>-Q|jY>7j#bdOKxlr?QS79veMGI&|RHN)MmN31MW|x_We= z9C4iAz~#zf4X6(E4tR6Y*2Oqy&Yi2{r_W`V|IFl(dhYob>f;~(Sp6SA^Uw0l^A-;3 zZ~xUFtMB{1_tnQf`q5fnz7~ggdxH+i=nysn-2<}-qq@Rf^x~~M{@-{98I*BX%jdH$ z!M`0OZ323XcDQiC0c7hV3w8zPb$SZB*LEB@X=Zt=3v0ED4IZU)A|G3qdgaiOLs{go ziVb__xo2zc>XjO$T{<4tV+T<9+@&IJ)tlNES3=eGT@zgwONBB{aHA6>eL2aoP@O(; zx~8Yb>*$fm`t;|YtDpFL|4Y4iWwH9v<)_ZxQycJM^rwCWh#Kf0%^jDQ=hgl>?e@%&tEu5Z-Z%0jKJucA}0 zWdXv<>QZ<{`bE})57giN+kdCt{Y_t6N6yjdvP|edci|It>4ncB zYkSac7pTxG$}3`~Z=(;fx4un09pqhj;b5$0X78<9Bf(DN1NmHdh2KRG#zonp)6^b* zZ?eUyLzQn>(x1@Sfp$L=aS8kJ=$9U=-~IR}YiX;m?tSFK(GfA`TZ zd;;GK`8hmUYxs62PR`bg7oIOanenDKy|#Y&-~F390Y9M`>gLXMV7f{BUaHF%o~k|Q zbg*OR37rG)Y^<);CO(dxztu?JO zmb(;JU-jZ?aqgDZLQmfCtHbZUd)fOu`>34+W;?>_!R}qB6J1a`kVTtYwTrG=SzZjE zZ|-l_o%ng*@nioZIfiTd#^s#qv%j-m&pq{6cB{>T&uRP@Pdi##SgjrE>hH<9C!6@m zg#$Y`fIo~6wu{X~bJYB$YjyqFa4T+2{);tICP)!aDRDuQev*1yJur@rI(_0~-FfF-^}quU z)LXvdZFTqEch`wiCn*SE0OSB4V|f)FfIqn~KbJw~wY9du)WxTMr&BwwzSK`zUR|!^ zCyqw1+xM6!&pbZY!qQ@N)P>7e>e=V7yJw*R_g@74VV9H{^b_6bkqLlaa_J(+m_+=j z&TXlFh5VvTR@WG^;|ZLpYwdG%14v)yuN!(e`>%y2(YMowy^*07oCM$oAd~ozZSn44 zO=0i)=m)u2W_+RzCQ9F_@k#8gv~CluQ{&h?_;zMyrp70y>-3qE=-^|}W%5>^abg1h z4LxlK+&-2aX)vA>8tjLxxJVU!WmsQ#@)_B|->^U9!YK9h^=ntqALvZ*GzR}=U{S(_pxkFCO(r?oKIUzrZZ2A3X zaVcIl*4rYzb@F(s%h|Jc*1h+=lIw2D=xp*(Ceod_l}=N$)8J0ud_N~-T)cd-o_YMq z`qbw>UynZeXkC2%LM>yTSF!*0p?3DSLO1;>Cw_u|8{e9R{qqo3*Lw1oi!{f_hH^rnpJth#pReol*OJc#7V3?e znZw9*>s#kkFiKzDx>=ul@)_(pHU$3EC$~?zzwHD<(`(9~@}$q$`~U}Il}&M>9zw^6 zPlE6XT<-ZTE^g!FRsdK2ww!iSLLysS57!1>BXdn|+qY0V`iCOO6!JGXnR0GT`oQVkJ zrw-554}ANdtbrZmbU%0=qN6Sy91*d-_EY<0&RhN$@iZ0f=PxdC3rt)}ZSu(f z=5g@=*Ui2yk^ukM9(v$fef_t5Wxe^`Z>@{Ze5S75c)kWk_VBeyKR8kA+c)dd)xP@i zAOF{NV*y<@I#TcX=I^MV{g?kD_3KA_vZFqgpFngWn~Q3@i9>C~_~eA{8T|qL{>8QW z(ifku&wlo?e5z*vzitzM;l&r8tCPp3>;3P2cm1`$_M{1}0ytSM98vl& z_+9(WZZ#QbxUlU|J#@9L9bed2Q%A^meFL;9{MmH=p2hTpB=&@OH!p=>ow%Y*C){>+ z9Ur5QN*_dBl<`Z_^!07RgUWRV0?TVai3%B-Rd(BNa+to)uVt8z{CgCglvb#6^tX=@uAT^sc6$VB=GgQT}lmrn|f z4A#bu^aHn2lQJDYw@t&OS3j@ftE|wkoJGclvci3&jvPHxr%vBf6L;QI*DqYDXP$Yq z{>}gObM=RR^anNQ$qD#RgF|EVYmdag^u&`FUc6k(^cC>9;LXvQAL;mD)zbTEI==1`7B;DmZ?u76Oyp@0IbFx`a-a((}d-*NC3qB9; z@(pp~a;i=WQje-P=uyD`xK^suTS1+o4IOhV(+&V3W!e{tpOk8-qm3KzkT!L8JV|M5 z^TY8^NMgU1YN3&o7|3b3H?dDP+cs#2ls})9y{^6dxOFqmvTp^R6W9Bxj%wk{@0kRo z%%;x_)Rn*VqX9tJUGO&I{v2L(CE~)7r4MfPOUJNwh`i3uQXBDq(4et z!@*5Y+Jj?ybe6VRbjk0~gE;fLXu+VLsO~i0gn#OPc zZv)rYe#6()8^8MX_)FI*MOlCk@R%$-Zbp7A{-oHuSQS^};zIJw{E&lh^k*evt>fG0TM@_u`owbNB*hfDP#*WXN zrDEVqJi90$eRldA{d+6e*Ug$O`WFk&)oYJe{rPWwxVGlMP&2#NYG8K>HMY`xOBcW) z3-S z`*vvgZux^y(jfK4X&2i2+Y$NX8R2$+l5*VH4=9<-EjORGvh{r|F|+`0ZLSv{;W@Md z2ay@!q6~jKjxL;;DCf7AZFjC!x%9WEZ0e_FE(-_4=1KEh_U*}UKTaLvoFIi?eQKYTwe+*FUCNSU(m9=0n@Y~i z2`5hr>OrN{ICW~nn{*J?`gGsF`JdnUse%5!H{1B0MGHf;-`x3~0FWI*TLq0V9T*a^ z3ZCJnlg{1|=Dc@+(I&|RCWSH6IO1A<2ZP*&rlM+{$>0QJ>)836-TsubzOh}aD_eE= z=&>3ZpHAA{ZCCW5ltIRkBS-6=`|hec?z+3~d*!|P&fmbON3#oUQ`l)#f_K`cfEplJ zIyvsZ=bbzJWJIW!yYM6#?Mjf>B^m*7llwH{EzCN{?nhXlc(;$fgR0JZaP%%X8P){ z{;K+GfAufoFpSivKJ}SeURbQzW3#}a)3p0CM(*(m4(Uu|1n~OpYFzu8Enc`X~S3@7LSD z@=b9>R*-M;D+d}a|C`d;|Ew90OCjz|NBTD*>XYo{<9)PpgimZf%* z$8hGdLm+kD)(7HhWWev5-=zI(p{GZRS)L2QY2%um5&^;i;!m*C6$dBd_B3)%V|D|BrwC59;y9pTL=1 zLuNcSs<^vq*A=Y}gn4w_W+t-aeRYX#cc=Xb&bDpwb0c-&q9pB;j=GKxaKmM?umj<$ z29MG9ZR@+KpZKJsI*`16Iph$lgBf0zE$!2%SwaH@nq3H=HHagjn zo=z}`zpR8wXtZgQ7}C^1jUF9i}Bor<`v2*zFOwArWgsAk@AAk0-`pZB5cWQm-X5Dr7 z{k5e{1jdo^iA=2NpgDjI2Y?gdu(PN6Yv0DhYQh)r6mqVe=<91ImLwj0I3av({#piv z#Nou$RGq!!ZuI(*oK_(IwE^PLU1Nt2&D8X4+livH=Ah`x+G<_Db}hDJbq#%ot?+r_ z`a)gD&a7>0WPskqEy5dD4Z8^n)9H*n0CU$Nlh5S=dB)u<@|Y*fgih4y1avyI&@dvB z&}1!opLV$u$F^&E=^==d8ojehiVMgw#l)GD@XZw>tR({Y1d!+n!A zJ#%l}TtOFZ%i}h<;mwH%Wj}tGogwuddd@OnHKX_=t%>KI?UMgCuv?d|&ebPB`RV$^ z7ay(dfyp}es#j+x^djvTIefS-Eicv|KKki8apH7#*Q{(T*Rf-V>&d4cFPrzyuX|hl z?tlJ0kw)06t>sI!yRTk)_Q|?|uP}gK@KY8Z8@+=}?`*nz$O&VB-xu#-TUr>PmB-id zKzx(4OvLeg+ka_+X3%E+RRXrE$sl;}-pd<*Ch?SEdT10ACchZ($q)KJp>;1_e3@6g zio9OvY>Rr_dvKe2T7JJ5FX$69lhAjdzU_N|unyn%`dVK^e=Nec%ky>lrRTC!&*SZ9 zCZ=j@V<~#hfi!LS7P>F+DO1RY$M(y2tINyC9NBi%#M|}K`4{TGvv<_n9)4Z9%XAB$ zU}J3sM7D{wX*&1Y*e4{aE-hTI=Pq5TpZ&#uSI^GlTOzwz5JtKI@zv{>IB8FV&>M{w z%2-gxPjNG1e9x!OAdDU1gJE)jSfHYhmN<9#LK^^UK8TRrs9gW(Ss(+bxVX{Eb% zM)>V}=s#=k^k0H=VD_{V+x-8g>p#FOyRO4bbXBgpm1F0CZUBu&21tO&i6p=%iWDg- zQJIRC?axUL8b511V~xj_oW>f-mgJ;tN+u~Wh++gu5F!FWZdlv~j*GvkUdP#fkiAZR_$~!ym#gK9daksm?;xf+D}BZEy)?WwRhL zzfC;Jwuxin9re<#ogT$zcuLzk4Xm%}{my6kJaUgYfiAwVo7s4zb zcyElIKRL2f2YlrAK+Tn03CYzB9n0Z>ANeX*T_QjE*FnJA24~XN8;W}HGn_HvcgBnI zoaf5&e9D0_gt(UXgi$S^o94PBw5l%-IJYdHF>EYg~!aV_8fjszOCh_KKJlh=2ZLE^m*ly`1iH^ zMg6ku#5Y%Td?{ZZDKS?*&oi^g!!#ZEHx4v$rIt!tQyDD6IhkA4&7@tDFXcT4?(4%# zE5ri+s>HbU6lDN6W!B0yAK$B&U8$!5->mQi@40aXw=!ZI z)t=t&xbCW>ao3%<#Gym`VsU07ZU5Nl`B+()4_7;EumRSFcI;LT=D^p<_9p3$z1*_> zu&E=4_8o}-`0KwP|KeZ&T3mD8?XmaBO(u5T~%Us4nKTlBOZP1+wt&M zz8Nn)`+W5E4dg(S>hfG1819X~^>aTS@BP3#VtaKarpHf3!}?UT?E(+^G$Qx59Tr}f zhS1lt(E)1dJdELam-1u1y^(*2zr>&Xk+PXSwtB66m%c(?!7T5^Ui#j)ui{HI!uPKF zu*o4u`s_xU5tB(h4JA#D{e<>XS?xCt<4mlP8l>-f*tdjewz5s)y|0Nb#K0N=)uo!GQ3o(=Txi7QTAACpTP=&{B4);AxCPkrK#asc`!_M{P; zEC1yxN)2ss>g<`AnOTZTMf<3Iu`{&rh4oCfK+ck%3tN=moVf)f54AUSIV=bKh{z?l z97%iVu2zUQ)Ai4!r*cy8P|KJ4TcrN3{-0@cwGQf7Ux8Wt ziErV__Bz0jHp;l^%*L7agYW;Ge5MQ;7Zn+5LMLqG z_8T5qmimJ<4!qgY*WX^v0Y-MH+3{{{BMzmAhuf!_x4J}psblP4$+q~`5{qc#mHosA zaC%MLA{+Wcm)cdySKXIza4$}C`$h*pV7IeQwb&Jxcu@0A1wL_v&xuPd|B!`a3Ya3EQ3cY~0EYb2+8dyfNz}vGfbc$+xY#37K=Ch9t>Kl6L-Nuu-xGs(yd!2BD}ez?+m~fguHPlSvG>{q{f1ay;P+Y#wQfXvbs~lvrs7BMzcM=G zLiDUTxXwX{%h(_%Bo%vTkXi4X&q-K~#4X6jF7oH3T|E!2 zeLnrKq!I5jpY2+E8@Yh2#DitKGVUc9D6}8z+*vGp)`- zTUNyVq!oT02OYvj48|_+w;*mK`8&#E9qC?K-;VK_+1LR13*&aSZpV$c+#Yv5@b-A; z`#u~GzV9P(@`gL&z>$;D(Ycqr`YE%MGCRS#gacXmPZd^BWg~I1Bs`OY2o%5sii#`2 ztsN}R@=Y|MD9*pr#J7v}GF)|Hp zIEDiwF}(j!9KY&h+;Gbsarax_5f8oheeuA(cgE$%j^k8z#?t&8PAm==$ZQ0@?(QCp zRA&bJj+9kEZFF+aD;p}@GsA5PiUYtml;7M!+2VlvbXvf#ipQD0YouM@BJc+fmENxS z-XHuZ`S!vzizqxDc!l2{ilV@6d9Z;!XHUyWl#vE41*9P+#-=mhfAKH>c{XBpb@wLP zq;Fr}U=Ct_}4I;JNl%QD8Gr9Se{nE^?o6iCbCp=Bc^@@CohrzV6v zyL2jutOzdrPTdqh^Z^P;$3a*frBz174Gj!LXHQ!;GOtya zVuAqH{WSI+*n@-EnM?N1+UZlZ0eU@L`q$;)7MJE?YHBKHX_O^|6j*0~I{?6az}CwM zK#Z=V?c8%p!CfJMSeRcoaS}f&HTNaA zLsKVAJY?e<0`5qv4R}<%%Jw>F;@g?t=+v5$g4)nmrMr&f;VK+CY8FM6@Axi9^FeEs z*e8zE#rZR*Gl<&4cy6t)#Pa-1tSm0X z{L(z%t>ugWXHYE@Sd}d#w$c%@^WVnr1@}~|uH>G!bF+)t3FcDkQ`5_73>s6ebPk*~ zt=f;=gY_M6e;{so%e}l>kF~`a@?1^5)ZVV656@F4^`6f(1j==GBynvb2+h6W;KRdo54<-XrNppzO8D5se)+LrBKv zU(ZKjROal67c${XIccfC=cOkva{t$2(MYOX0)Nd`Sga*eBD$h{qw!RBbJU-xR(GhE z)F0{i@{B(YLNF*0E)A>sNEZ#7a;-C!jS0#$zg@AWz=0Fo3RWq{8F7B=-qATSQXB)A z%ioDd2QcaAq`?KY7W4oKNGr_1Rl|k$s;$i>^c#`eDsr?MU7c;Q3qA~R`g{7KZ(uOG zx?19EU;BLg%CG+xwrVo-lz%#G+_F&|R?!JLuy(K#ADx#XP3 zuocj;i?lkG?ibvGBk7)Ou4bXFT3DHhso9B`9KVSD8jtDOaeiNn`I*sJUYLm0g)#gQ zU;$ULwMZRT@DD3o5zyTM04*Kx;0TNh2W_C2ci{_oD9wTIgY{aFcr2k-LW z-S^!Q`) zvnc`ocR6gz_cqFL zOCDDVu%k`Cv8|=TS1r-g-V^a6B@p$l|x5kfs^he|8|F?e>ANZ$UX0VZcR#X?EdV>;xxcilBrJk^x1VxPU+u#Z_YNFc;kQdN zJFC|Lxo+39jz90pbgs7Ga`4LGDt>}1#5q7pKU}}TWoz5ABiVSME$w2ljSgs3Zz6lj zt$rxflGjp(wVgIN@Go^(%DDWe%yQvp!JRsQYwgVV>C#!tC-dhYe!VfGahrZoy)5aj zj!9VHZ`DzGE}gKC$ZMTWp87}WGjZlS|EYI*T{@RZ#*r|m4W}M}t`Dxyt8cQUKEP%> z8%qD7bh4FBZD%{$0IFY^J|FcKpYpE$pabWmf9hs*(8jhiO667CGB8tH`ZvTs$gjAv z%|SkP>n43n2MrrnoIigt&Ye4#OEtTy#`+rZ2RyK`v66W?d(%O^4$fBwjN5v;JL2*y zjuOA$7!SSUf%v`;eIVZY;QevMl}Dqcy#fB2jj8d8(!fRi-AB}R6XUK({6YsaOPvpm z5|QFn{QB3Jy~JeO&>|%ke@|RE!%7(x(c(v+K(gmjpx|>2-gCv7#by0SbOrLbzOKKI zKHtWVB#m}82X^wI`;)KR0fG)R<~MdppWHSBehEj?mv~EPwRss6P^Rq@HZmI@HR5xT zGGhh-a+ZpD86zcM7$nOg&h5%rbLT4PY(rEsOgd2RlK>7TY6s z(f4HwfDYt;3BA2W9O`@F$}K9XmwHe=Vl5mjaA9I1<`(V9?armUrM=4#s|Pb~u{`18 zRIZXe#3%O|mx))|L|#dLq0Z1r`1q24MZYGz;Dk)`8`DY~pK1HEo?LloVm~Qmaq1_f zZ;|!q1QsvSTYpzzcEMG~x8hizfb_;!9r)FQeS_%Sqr^t;k-GvvuAowj>)h=&u z5|7%sTIeUeZ4|{Wqkop*TW1=(o!9vbQ}LbOyD9GHjT3 zGK2$uUCRFI%dd%UZ1DQh2H^!|;PdOpBtBBdQYZ0eb%5xS4h0VJS$NZz)CfT#0!7$J-pwuxxK?$9S*0zZ}ooLde+Y>Uas_4wEyKZ&m*y&@iZ`&;9V zdmhM@uWT2v+}vgf-&b3qZAm(CxWB9TSzKI`9gzLTxNzZOJoWSwF*{GpwlWJ0%fPx8 zH{E<9e&(lt4A9_r2d*v8e5dJ|sME~GyO9lE>wh_wbGtFtOQzzOksLj{|veR5TRq15-OPcS~K z_*2%;I%iuqIxA(t$^erCSzJYEW2=h4wIW$bi~e6;%~(DCJ7mBKFs{_8d*eWZ3Ne;@ z>aVUisIMJbRk$yM5PG_Mv&~T3)q}~J%!s`7dvBL_o zV<*5Gyro}Z9UNS=jxBN!TSt2vv0s16g!*Cw-dQK!R~L44LMP$JE~Xua22Lz^GdxApV;;cbMV^z5tKAm+ zQt(Aw#+-NRwI3KIev*2Pe<>3@P-b&59&}K? zY-6x)rZG>CL|M7bqfE$C9<4KpP0eRTUn7S)RXkHVQ>#EHWeZIYnvgp_E1n;ZxldjI zcB;n#&YJLHTQ{S-V<&FC_1fs@XohdclegC4JMCV|4_AE%+q;#4>?lt%P(SkHFKK*k zJARQj{Xhr!I{?~klGAfbKZKIC!zhf@IQR(CLHo=@U?U%C%X8Z{#lP)6IcR~?uv_0E zYlUuAPnS+co`@&(Qg(WwH|!h~f414V9b$E32^zt(=tKQ3DQcPWMz&$eliFE%j(=$< z$;+ktjp41d{);e5GH24eRko{h%AdJffp^qJKFk#>tT(^)Ra($#X=mV-OE{yh)XV~t zKE~Y+uFF*>(SOoPd^-Wfwi3;l_>yOwV3d>kY`;bq%af%IpbXGdAKB)i3T*+G>NsU( z8=Gm{sWG%Gh-wa`vwAUCJ!mZxMsh`hgsJe9E82P(LvKODTzy+6b4h>d=>P~ZfsV^+AiuKYbPTr z3@z|$A3iTQ-)WCe|M?ftm*Um7x4PaTojwak zgacicGEcc&o3#Jyi!oSfjsyMJ=k?`imQL0sSNCd%`^6n4f?M0lT9wa_#B%u?>9 zzWk(J(;fml_v**p2JKX_p~h?Sc3H_yUel%m8#Li1k37jsJ;>G0|4T8w;>u~ekz2&d z>#jhOawp!kyJ>HDtq)ESDcgLMG&MhiDbbM%M-YG+_{A5M22O3b{4LMv8_N5Z<1_SJ z-iwP8&$!J(t`D}w|IFXiADerLNlc{-pVXYqRvlR6lWLct+C3pk=c{XlZ^ zDe@%Y3U30XaQTls_|d-aZO)=5IrD&OjmrpDdTrti%G01XGqCTz=QnsU(uwD8!5tjXbw1d8Ps_2rv2Q*!a876l*@DJcZW%HOmb3Y+MA3>EOWx zIr0)>EfRdUcl5@0eBcA|3;)M2$4$4rFAiUJb#(O%!6XI|I4E|6K=8HN41f);*$|Dw z-&}*yP|lkeDR8xCtnj1bJd|AQZh;i3MRopa{=EO3}J|PJg;sd4e&l;4?XMozEziP(`CYz{rc&a$ zb@o<6HvBBlO@;d^S2~>;;_T_2n4X--pfP-9HNuk*#bu?1& z5&UQ4t*46Dfi^BDXq_~$3yTZcc%y@#GO+1jfwIh)z&ZQZ(NTWuj4vx;14HQu)S*l* zLsIK3E@2CmAdjWF@Z?Nzl&sECqDnMy(GoV>`LBlNuV^Ft=~daJ=Uo55-m2+z5G* zL&|kuIhRe*kmt4rR8IOkIKw&=M^z(HcrF7N=wU~^_)s@lj*VdAs&4R1n}cBTC_Gmh zy>XOyP7di%WJf0d4J>@7b4o@sUw%`97axIZ;Xsx(f;uJXn1KHhY#4MG2dmJJJZijM zz7;n9L2u=XY_q%!!qC4GRa)t|ivO+T4S9y-tkbJeb8XSvRpN5Zc>lBsyrFsEH~H3j!iUxeQ}V_?rvtZc4E zrM(;H6`n%|olW(h{_}6eC;t3vnN=4?wv)hHQ@TMQ*uae-b#+wY(9r{N?C=p_+s5WB zqZif_4>l&GE+wJ%CT9(0r-0S)d-#g?IyouNIE6V_23{~l)=m16CP!Rj*E@jM#!2}` zCrcVQa!{MGp-h8MXOj3$T@EbJnQiJY^?|rg`z{}V8aoqRUeKki3Z4tiifuBT@?kwB zj&#z&F>mjMpzwrYQz1FODKqKqPwA9b>F#~OUEyoV4L$WE_@}HV51BcKK8cCTC`Z2J zxdRE77pv$>0*=Aq_}Zh7M`KG@;?WfZ$TsCbzHyd3pA|X?4lwC#I>NGzcLu}i0R4td z99YXRz4S2HaiEiWtQEavAk)>`haKrlJ~KdBK&QL;n0ntCCcT3L(c3=|{pi#I?4^xI-95cA(A$GQ(8m2h&KhyKNtgT8hUCf#z?w58 zQ&)jId?)##y1Yt2KNl0D6LI0<**JIpT%0{~CdMvYh$+gSpBTl)%>w6YY;n3QYX+dW z*A6-`hHLDA?hZKYz#+Ac*sjh>doEwy%sU-aw^zx5B+leX2UK~Ja*grwtIx+!Z);q0 z@^X9*XIJjvPt7Dx?x3Gdi=xAy0jZqoupRK$-qcCnL$SLKFWC@5Ju}Ej{)2z&r$mEh zazoA{gu#52z6<)+GzbIojO|0dCue5j1y_<;S&hztzGy*Sma$t~;IOg1HC{b;4p~@_ z?t%W4J(r``E=gl!$nSX9JL9fXsBIz7bK|2iKQjpq*V1>`M2_tgbx>aN3&1rP zSlZZ?(3e9PvpR+Qk%#gom*VAxTkE7Av~nC}1`0UPXqb!tGXwMSTD~uNDs!IdK)&H@ zN`Kt6;taS*n6nE7&9bvt`M>1j3+bH=_3)KJ+7dd^K}W7IVn^1VLr3G*4}MQrY-2RdmxLWzR3nX9d?tzqCStjqF%QSDez4PtPyL7ryq*m|nzxqK*cW*2ZluggeOq z{`8B=fH!K%_o6{w(e`^y{RA#rkY|JRN>9ttd_ zaTkiHJ55_lS;~0*&|kGdLwhS%Ta~o2KFYMEOm9t5KXRr*mQ`yb(8j7Cq*1~N4bY`N zaWGG55H4lwn_Q{~hu>>I_1TS6OqHF+{IkqlwRJl&wA7g0}1V+zzJp_7?O^cdpoB{)=uqw!9Ft)3Y(- z_HM-TGsJnz#CUdOmfy1()0*GiX9oQaxpioB`| z^+Bw+{%#H)UtcFC-OK?F+V}jl28Zn?1#iWVvs2$g9MzA!Cry?q-wHpEwA-aY3thnj z(|PY{(z>7IALTTa9{ez~l+V4$+rs9Dr5s^&AgcV>mV;FB3(*6@qYvlc zetj^@&w(qztiD*sXK)aROMss~e=({X=x6kUt5PJalxywt5EcQ*+fpvj7Ctc*zqRA_ zBk@@WFjC(ANdOE#eP25XHf4Yw1rLQ|DGD&fe#r4U_42o4YYKy3svz-NtibYCit> zlb?+~bb6(`n-n(QQhvf^TT+IOhiBoNP;_eMb zir@NKnKXl?}36JNSm!)$6anf)F1$U0Wm#@HK4m z4nA(HVA~htv!8no`|3da&iKCX|Gv2D>Ki0Ga_Uy5_(;gOaJog1OS{{kJcr-2yk>jk zdJfi67q~?H-1K6+fDf^1oPpouOja9?AAH;Wap(Q-!iQdp9pqjdZKL;aWANwI!FDLx zfDXQi=bQz;10m4F&=>s*Z@5>nvxOV+A6#W=2fdryw>7rqZ#VEVz1vxAA!WuFOCAn!N2Wq%6y`bHa&W4SoxLO}i;_cxH#s#>OuC*)}Bb zgB|Yb9R!rNtiSC#4noS#@v1AN0G)%kz~|O3yrx=%r?oTs?9$#2LUF0C@yIPaP4NgUEq#gJ#Z^HSM6bO~^KyEi$(q+D>@40&s%Bwt6h( zlK=3!xU+*#{h(}FJN>#1Wwp68S114zF*0&6hA+E3&QDCoYcIVV|Lb>uJ0APyBjiCG z>3;a=Ty>Xm(kwo;gL677_&=^pLtWI(TQ&P(;Tbm|-yZr?*-lmSuc=_lpsRTm`2iLl z7jg4@AF2Ne4kTAj9IW{j>4SL6AHL_4)K{tlu5fkAYRQOPjgJx(;o>(qHcv~m9ZHp; zE>9TvO%~9;_`D7b5TC}+$sg(`(*U>nD)n704olx8>#Pu)-W7Rlb1~M^dx9PmX8z`5 zr)Bnc@Y`$Oe3Ng%W8w(djN8~Eshe3rhYc2t?Ooj(W3@Yc>+(-v6Jx2yPWIA^0im)PPt-({n~D0l%^;1jVT zfJ5L*18x4w%8GO?v`JJRGMIa~@F)Y;TmnU>pJdsQfS@~K$^G+pcs@2c6-(>e@k_t> zuj0OUeUwbP$Wy920$68x8|TbUd^_gd7jrF^mR55#_VVg#R97*^$P2!8jv`%0;jGyK zi4k5y>91l`RxuPB>=hhj_k^-RCmSbWDwjQRiF3czsajlGiHYgSaQ3E*>e9(=9Ua+- zmdoBy`6^D*^xS+ba4s(`$Lzvf%udh6fh;45m_V6rO^P2L3qI(9#ZHN1!M; zh0!!BD(t4jgCnZkR^f*0Z;dOixg%*+grOKX_tHy7me!XT)ED`h{3zc991~-s@#0G_ z#m7JX@qFVFiY^K5C~%Fyi7T#%@A|;|l0RJP*yZreEFB^KX)rxfU^0LIl)Sw!!cr5w zIsk7t60S?%crC(}{Fl$X`dd5*IhscNsW5r%Ob>+xn-skSu-)AqsgN|Pb2Ah0)@rz~ zq(PURZ{v2H%+DmeIu-8uzc{~y(OHc7IcN!8>_Ae1IchYQsE6O2z3q(lgn{~WRyr{b zcBt(TFxbGf2rgQqtEV&bo`;7V@nrpV7UWauY6rM7s*|H4&1Jr*N5N51o;spBRe+Jt zRb1(;CR~X~cmbtKo;sy{MgVYCl63Vm#Ywo}E_V~~T zzatx6=ccD)er75b3I3h^VB?55Sy))fAZTNw8e8Iw0AZ!NoDPjLxa5F<)haT%2EY4V zm4E{oCJ-|Ko}QVFd1Ra7P=4quct*q8<_rMpB&^4d9El(M(I1Mz;gN{dC4ypt5CSP1 zy4^$6YNwvcV6zx2uRJYmk~{)+fXR*x6?)NQl$mu_xfg>cf2qIfhrfL;o#gWpFz9W; zztGIaO93byI3-V6G#gU6&l4D>pQp4*!wYx?1i03Q6B{}_puc7K-ggNf|E*)@Rw_&S z;>_mAK6Jf$+tIZ3z((QrUEY&{X;sMD1@le#z)>!x1AJ+6z#)(&FV$^>-^$}9AB!&a zGB`@zkI~jB`7l>J5PtG4ILJctOuW^;_sDPZT}dbZrxRVTb7mln$}+zbpXO2QmJM`C z8}xbFFX~#Yx+GW`*x_NLi8ON?iEMbr$@ke3fzTGu21W+5k+r|8KTe*!0bK0Ht1rJ0 z|JyJ9o47#m>_K(V57;#LQCqTuqoQNt$X91N4-NIlEpNIx0}z)lRF>2e@{64qqAmAu zhi4s#rn4?0d@i1)V|L(cn~>S9bePoVa4Csw0}{!ST%Z=`j~oS%@db&<|GZ1Ry#FK8j1Y>!>jx3EDR_}y#N zr8AxR`Hjb);CuCZf2;$$-Dt^f3(qc9WHdHw`mTh$`NW(8{AfQ96GQeuf4u2aK+(yDZG~2?Z5j#3N zvw;LuChl^j27NYyECWOXe`jPRU(3I#YZ}n=3-RSIeL24Wd%ici`;LGof>rS4>Mu4! zrhNmKjW!mk0}0Z$s#{TM>%+$y1YUifIxp4u0I(q*_l4(Rl=561?IZB(|FHpc1A0X_s{<1 zkH+y6*8*#mSZN*qb16G_m!QKY@{2&jAIKg)mb{eO3b@osHiZ7=10cXOB9_7wwekU*2?hVGMd(Q05_uhMl-8O4}6g}?u$dcufH1!Q~umR6& zLxkBCHQHOs9)&B|^)>xweD1dH&Zyw4y!X3494&nZVtr{oX3n3B^QT{qb^M$b%BUbS z>ebwDwN`cs+K@F0Yr=-=Ygk?VC~c;zvRs(F7z?2 z3l)8Nm+G^ARrK37IC|~$#rV@NKWUcE8$+H9nyjWlq&_FNX@966I1n#QA`3VQhOr3* z=B3NKgzWm;?c}3wX~Q-+0Or_{L-E!J?unoLsUMHO_fLNzzT>;UFW!9D-LdboW6=h$ zY^_vd;=;vP!hdnk)WwB);*%L-0%Y{!g?RCm7vrfXo{T4+cr3pDjYs0)uY4uG{N*om zekH#6l}F>NPd*(_yl^U>KXpD{y*L%4v&%8Rx*b(~%52^MZvB6^B4`6h)CF?sEH?10 zJQ~wE2)DM^yp4#?@Gka%dQgX4{t_8>AZ1yRO?+@_8=$-PM47SSRo~au!&V(Qjg86y zqOz95t$>yQ7s1cf)tSwHH(>*?#iIWs8djrAqwQWwak?v-u><88O!7FS$#UCd8U z`-N$5z%`Tdw}mZpiCtb0|t4Ee%+0X-91g zQzy$q#s$V}eqVfZ__1bBUA01;?%;IO8M7H1rkxV{9T-tvAo1*EPZX5Kz~bm}axL;) z|4qI=^Ph&v10+~>+GF4qX46W$@*Qctz$^atzW8$GeZ7OY9QE?b{p-KQH|gN1^x-I^ zoOQa9l03{Sl`n_ywNt{G{AoGh#pmT4Y&-YaNXkF!t?kgJml%Zieh*!mZ8+n5XIB~P z%a6uOzOR?%f#%P$Jkm#%XY*C5i@a=FabsPCAv@Z5XL%0nDskL0GP;mH&`J*OCk~m% zu1;cCrzVo-uCQd9DzS@jltvk1YV=_?DpRi`mmQ9KZoe}J41VLWCoLsm1c)r#BO8{% zqi`44>+i%R@ZyL1%a#4sS69<#S9kd?bu_$X-So{I^k-+Z`~KTGlY8>RukwrBqW@yM z#;Sv{iHnRWvqMSyN#Wup@k93dA<|lqTuRp^nkBC}<;pPdu{4#RrCYW+fCK%R5(fYk z@60zFy49iRmh|t$Km1x^3-YYVTHaF+eQ#-H`8K9Hi`Nde66>|0`%B`Kvl`CyeW?k0 z-!__3W1+vqqZClSvV1=xU*IpkD8J3iD@V$|NJ!cU2hVCUq8zAm^mX;moV?<>5j%|?(Tpxm=qSc50O~lfltPQw?=828roTN) z;;c)eu?{S|hWkx6QWrMUFPsjq?kBN@J-@BCo-J_*EKZGXooCb&SFr&f09R^v2xO zLVV^ke~#bT7wyDeb}+B5s;9GL{0ApLpm+I|_8T9e5nX-t6<5Wc{{C2=TZ%^Y0Yx-H zM`vnU)}=#uYWVa}P76YWsa(DDEPo0<tu;tUs=X>H$`{v0NYcvqzp9&CvF9=I>Az4nIK-Ei+~@dbXBE%M8W zB`O6w?y1iPJhOwY*p4>bO!yKzzsox^1ZnNS;>xDJLoc*+@VpY+$fce81iO6a#0H#? zOoOd-hA+=b_o4#^oUGE4{G2VIf;L_PE`90Y)*WQV`)sHR8V0>7Pu(g8aF z+|{Zbhpo}s4Nr9T!h4i!ye=%%adB=wJKxh6L|@1j4)AgxY6r&~xA;umx4z*d8wc*R zL&Q7-9$&WYpr5aT>4p7 zSvBrJKeZUYP}`~Lsk9Bl1H)-MT#`NWr`~Qsph(}A6O|lvqJ!&T_kqLNzV_l%PsQ*2#;?bd zPdyQfOUuy&A6fn~^}9GV$<rIjKl?Zu7uzYW@J@3afbbs6vnUbO@8 zhbNDCPWiyB%@p^-udi2TXv>SXS4daVT8=azKYb!6uIWeHkt}Z0f00X(E$PV@1^%=# zmR<6pP|CA2(5Qp7@;rBG8*92xzOHxFSgdHvYVt@+LludeyrWFxa_(eUKNlaGP9V>G zm22Pot(=uxvQQ7-2}_nk-Ic_0OZpw;8$94`WnK=@Ha2J{hCg}LmC=b!bI_Llf^}97 zvfT`v>vLFE)*HRyd)1|Rr+ttP{LTT_-e^M;ugV;7k%u;2o-kqEj*B0_O&WT7l)T7; z+PvPTSn8RB5;(~@{UYm?x&^<+8O1rXoqOX2S0E4vwwdkn&a$$NoBE5>GGI>LK!(WE zpSIh%MYa4aUt2d{>#G_!0~*ip>B!r*b5f)qsNYc9ldO9Ue$r>VnKCEa^pg1s2K&@4}GzP-#UV zeLT}CC(00MOM95Cqui)}_)a*j1NhB0ul!a&4jIHhPg`h=K{=M0{v+v0l~)J;t1I{g z91X;eE3s$aaBhRFUkbFTCpOUw`5ko;M(d$oRd#*u>IU{xmK8pDhm7b|ZyB4R_neGa zMR&XU%IClQNL(0ONVvtVeK5A)<)AkGf&M+wd*Ennc6G;gQwKCDb+V06ICCaB&(v#0 zFY^t^Z4yW1*Rs13Th+OE^L2-#)s-+;X2GLdq}1?^-mrr{ zu%(~O6U)ptIDc7iDczfFzbbx{zxd4F_dzX*mgZYrx; z$hSVzf026PsPLcqS>BRg<-eS;3a!*7wi6Y5jP6SOLRZrGy$6td@BTHD)gTV`V1wfwmkT@(L3|FXTj?MTw^#;G3LC1%HYekBr+3 z*f{`&-=j@G{DI>i#YsGn!p#Tev@@#&buvOK8p_GpIowNZC7^gyN^hv(|erA_DZzrz}V10+ch7Y(UF^RooC9J4(aOh)Neq z0DKw<3PS_R%n`kd%gZr7IuXym_9K!?82N_-oPV{G^*V>!Y~<(zZ`Psl|oL!F}P#s+mAhM?WNhiSP(6257j*fIhWe>rbf~2C_TC2v`#q-%%GB`YtxLse}jH#)KSXo*?_;+HJ z0HM0#(x)pp=PR+Ww3r?KHfHOLVtO(l$-QlH>|IHM&oT&>M+XN7vN2n|>K<;+2<{&k zOlSAPh4ZA_1V1gs>31*+G}S@XK}h_oOm!mC0r0ogDvGx#&C>BN&Ah5?Ot*!C&H*AS zSnpL}oSWdJKH92&I!aKU(HZu)h;>Q+S8P{*}1<8{cC$$&(Fo|tV^&ibPjf*4kRQ*ZNA@mHy#_d*ko^{NIKCI2~2@xZgmDl1}{vYe;?@Ny_h{d&HH8cu|qc zBP#N`k(MVt>WxMQE8-~~6i)BUA)ZUeM&a$663)Eja|2n4Qxt#U-NKLl&pV!{p^`>? zpQDBO&7TZra$r&h7nWv_RGfq|D51!!UEk!2}gIAv8o<6+{< z0;HKvMn2`rBRkN{%O?qQ=23WeMc*HpsNzA8}3=X9o_x?xh2hK&~?B0dw(c6z!K4kJ(19Mdb+}l^`L0@fXCF+ zrM4I5XX7n5-;}dvU2<5**iJF|R+(~8LT-J5jHE+ekYNewK-m!~f60G388#Y7lax#G zF6`jdng7~QZJV9N*}wrH(s*;tfVMbftLv^nU{G$=fE``o%-)q{S1+MhapeeLDw2An z#zW3_NZ8aXe$0^p2hWAga->7y8xVj4lY;wpE{-im-#{<8(+-wp zf#owBev;=nT;A1Ad1pAu>-t9O+wPtU{9XMz|CyP z0VJnRor&@B3puz;d$O^%9Bb&qY#3ddi&gB###%X@p|`qPjir_4SVBL}&CSO2>{RYA z>>jgjoijJ<3`BHi&W2vf(fTeyEkD^<4G9W9_@__P3NN&Phpzs?sNhEo?%fx|hYrQi zfy-j};E~vO_-G7{?2G;T55)-Az5DmZ$ngFc8XAh>k-a(l%57;zM)t(;p1m>9H`WlblV&-n103Nyw6}I79vz)+aF!$dcbanM&T4fb-i5!hwLXO;o$FXK);TDe6{}AU6o1o8Tm(f9z{MZ+bcRnn)^cir0=i|QPG{HlZ!X#U1!b6 z%cEoC@#Hhl!otnDEyR+Ic{Uy(_siJgGoxehY(uo-yXadQcuI>-{F(0F_V`;r`_s`6 zjtl~A$X=M4j;XV!sqb3sr2HeB@UuRIno|(7(@I@w2Vg=9qqi8_@d|x*lY+b z`uUBi>UAfb>9cM=^NqY;(kfG#E|KCN^(1%V#e;G@Wcl2SmGs4ic#XJ34d{9H~IANZ1v*KUJH@NI{`FgvR% z2f6W5dA0$d(ouo#J25ji5##4Z<1M#dA197of!yFH<0ItCUC>8epw8S*;GMaSPv6oS z>*&W0eD!BuJRP5V=H+Pfy>iJV18`yo|2YfJ<%sq9T%Ox|2Oacw^+q?}?-?A5y(2?0 zGB_Lu5A2Ek`}W15!w2HnWk=)f_q{DX`ePrBpZKYth#&vSACG$;dTZ<(9*Vii@tC-H z0a4v z?|&g)dF6DBPE5!2!b&WnUts^Fn%+2A%)zOcE~FYil{$6et9Eg=wK}WKnf=I~oq)s{ z#xpi}IHTX0a}MCh*iD{7>1GFE##`u7?4%vk&Co#3(H6)k-rKmQop5kj+0S|dKNSBW zdCc<-hrqt!@TWyef^sOms@gXuOo`^-U&+fuHZ`fI0`Z1hOaNppaf~8m{NSL?<3}%x zyB>I3TsU(IdbY-I{O4bf*Iqjp5542u14>hxe}58y!Gw zdBd5go7l(4VpYEo-?|;Y)3&y35CSfIO(Xc&g*n8s@DjyrEv+2^ZxReAIBBnH0Ep8y(b3DXlxP??8X_4t8TV z7sz21xwBNb4&Hw4^kRJKb1z13Pe)X`y5fhv{|91d&jIvKS>4e=y%Q4`vYej&u5fi4 zSCN{Sn2P`QU;kTt=8KJJDQ$bz-Q`=+wOcz+;jKcar>=r!8fbKZMVH8 zdg0Hpv5U!@)v8;gfh%yaVyEqPGxx1g-Nf&TN^I`7#YVKnW2q@$E|>zyrpNcpise5CHk0lUz%#M}7q z4qSC*59HoXHap>subtRsyR9?ioybc(9Sq`lXOKyZ37X{ zPWaLvZ;S4Mp=bbyL&N)`XW#yqz<+uEThGMD|L}L?`4?USzQ*Y6>B|A!3(M75#HNVf z)X_Mgo!wo?+g1*UbD~0-)s2iGL$)c&YuN^bJQ-Kpxojs!3vdcj+Vm!4Li9VQGOJwZ zd)p_VKVH|}#qOwYtcx;O+O=F+(ZS*y$iC%or$6dD(xN9+qgfcI>P7lC0Ux=h$?aht8H|J8)iw8~C-G*4-5Xi(TTq9n1+>DMJn6bNf21kN5IJ9gy6wn&*Dw zq#~C-@94l!hbOy{(f;19=hie$ znyWZZ#PuhSb8@}*MBH%ls<`3mE8<3;Uw86Ku2*p_zi+6WS6_83uI2lyukdR8z4VF2omER5STXA8ZNbFr z_9%bL^AIrY1o!DzkS5a;ha`$Tyh;<>kHj5EW-rbt!+i9Kg~u&(>VtKi`D%){+6nGOf4e85b8ha1aLd zO*rge*N?M(f&3F+=4IJF5YJ$lvJ(GW*;2*5yj*Bl_>=sEAz@O6q^$sJf5E$Lf;_h^ zw7v?Q_14GC7?IM&pwjWMaqyeQuL60J=6wR{ZZ1F!9P6k-p70fXvmrQg0=UoVI0h&557FnYfi_%jY~y8hMm>sK}JtwQ46bwn^Dn zu7QhtF8SQS!)bqk*|;R>%V}r7zBMK1ze3xr(`eI=e(1zUn_HR=IKxqekePc=p(ZTP zEF9+o0J0NbK)lc225SQ2eF3;WQ_vdK6?}cA05eVb1fT-=rt-#BVW|O_%AqxSx^>nh zL=oumsrk6_M7)YiB}m##Z!oFTq+$mCpk6#P=vEoJ6m+M1jUvo;^tkk|PLCty-BTiGTT$iQ zlfCM!Z;c8DN(M^i;ku2pjnSe6lV%Vp-0jqVdTK7d@%2aJ)1UZMeCAW1iKo8xc+Tv5 z^DS?VJMOqG#wJFyBOnQnFc!yGTMZLWvJ)Kcapa$I(as|!VPtuVbITHElBuZXK+Ko?)#XtMSf0`ZOb~@O= z)?8_eKAiCON^89H!MDYA*PM*Wsp*_O;h+UOY_fwVnOm66TZ~vP7v9x{xd^oH3Xi>h z6o`DD(D^#+{Ce8tONGxzULB$2qvRRzM9A@g9vUKc@2LeL20?p+=VSyYkx^$~rjC1hkCYmjy+xQMBR6cQ!P; z^yRzX@ecT4Cr-WgN_OzuF=a=l16<^ZxrL>i;Xkvm91E)iq^oN&J-qcUsjHm>IeSW^aCH<}c0xjTWg;6N^0|D-bJ-+$&u6Cb9+r_wINmq_z%=FiqE8GQ zy)W-f5ATw1ar6unl^5a2tT<|V$h17O!$)0}GhO@*&Uu|Q)9~rwZtmbH!M|m>I%G&= z4_UIY2wG)>b}fDCX&chil{`;J0~xR}U;fJ(Jn)fqC~{KtTmcPOd-nX zWAm^BQv&KR8W>m?c~9JyeBmL!k#*860++l}c)d6@_20^cJf8cI34~4#4y(^VMV3?e z5)R{;JY*TpdXSH`L!EYlL*xAi`s3TroQWqMeU*jaw`#MLo!;1INL&5r+Po_jVfoIR7v ze^;UZ%Is_`FU`gB;vxYmI!&IVJ}y%!{gqv}l-ULr?SxyFY*abd)?;G@-Hnb+`mG}8 z4hX@%=vbzWCy;V5qx%C(|DnO**mqzb{C0U9C(ys*imT$vtFDP-CyvKu$B)O6W0!Lt zkE2JA$DzYVO^@Ju8HZnsrb?F{cx_15G`HQXEEkyC!-B|xk`$BX~*5pI{aAO zh~BP2vgyl)IG6p(8E*Pmg=wiZ1TZK`MI)0+jdEmjTTORoBQ5_{ussH(ue^3To_zY* z={l5n8n=2hLIyo7$*s8z*?{MIRGuV;6nc1o6>+6aG2L|Kke&(m5nLu-Q zV~OIoV(Q|>vgAE<+g0zv>&iz8j#qd(8_APjz+F*rfD2T@4*&T^6_<74tatvalgoQ` zpfYYpqV+bJA8_R7J~Kbik4pc;$syS)R+mM|!9Z0PQ)jBDZBu8!7Nn3;$Lcn3=eeqHSZ86el}z(z;75?9U; zrA!-hAAaP?_|~cO{3^Dh2^ni|Z$$s$15v-8PM5})9?%@zw5mU_gAVX8E#T)$76%UO zjazShb9~?f?~i}@kN#2o@K672Jn+u<#ue9I8-oK~$mJTk9G@JYXKog`UarQ)bLZpn zZ+KqPqcEEFCJ1_*#Ij~9I&%qr0w!2VS z^++F|e8|bk*PJmIr#+hx}b#+-8xIB-aznZHzOiYi) zyqz<|eGagjoWeF>7tC|d$ew7`0RYb~zpg4&mU86-E;Ar$I|pD81^6 zJd}S9>zVo@{hh=_UjMS+^~OUP_OFAp$OHZ9HH*ye;0K@A56VdlL*wGR3JQ?$+#)PT zo~jR2NLyYjyX2$pDX?W4xH-S)v;(FzI=M1gX$VT#;71Sbxq~IjAd7OCmu(IXDzkA( z%2V`6e52aU}TpHEE8#wapBesL_Oh*2(HypXor0b<(oCiI-0aD9V)F^!D> z$tOR7ZLx7q=#;jCyM*=)IJ|WD!Xsl8;pTz=eXd^wj5cx`3)pa)%h&QuKc>ma4=pau zjLs#koidh{<%uKdf&Z3m9b{)iv;$Wh;H{4(J&Y^u1hCQ9hN(^Wm<89)-ptuS*#3H+ zStfav1K2Vi5c|H@cPqX?@d@?4?WlquWedMBb|JldRnOBSuRPc1vmWX*W8CZ{tD(j6 z#Aj)@;tCwu@a=PPMI!m7%#cfO0ABE!a$d^GG^91BLI-p77k9}Ub^7p>Pi&)cHJSFd z7JPwTzTHJHEaI~}006p@sI-Mqp1cHYGX4W@_XD3@Tn%SbJ8Rj&YpEN-ZI+*93YTYr z$7=XHVGvO2VZux&>^|3+=RMD?YkuRiL;@!@2hZ6M?3wwMoU^v(;l1#1S~@pxc&-fv zkNKM~obe71Nklm+LBc1KW*yk4-tM+IcyM2C_vp-j{n=&wJXZk7&LjM<-5q3*dSiCU zv99~1b+PFgSv7a^vMru`>P38l8GQJGTou6W2ou-P-N81l#G&4G=GGRx>;TMz`wqnM z{YPVEZW&OOH+2krCgZ{D7F<>!!><&}b$ldWdSAnvxXOFqC9u>LT)?mDZ@)8NtOa;c zuWUtMe@7fVG!oVIdE}-FoaifjW!oX1e_=L0{iT;;u+ND`9r4pY@e@($9>9;#FWpMJ z<<^n|4%Y4HPJO5!`1H4*i@*2xe<2=!^xLt7@8e+Ixw)kny*M31!y|Fz@Ugi33jFv( z2VzWMI{GolJ-C&`%I$rStPefZAG@u+(b$Ub z-iWVHx`wt2=;Ks?b5(%0*sv|qEdsEKn_Kv2@J5iPv89`Gh^u$dW2D_SO+y7e&=hSQ zJt;%Bx2_O_&CSlhgJmMl7BQnU`FoJf-u@o^RDC3u%y$3^yudfj&DdY+@7AxbOeG(5 zRLY8O#!~u3%gd|y3GVw(jGMM>H!Ap;mF~`1ho|R>=O`*?o2nOc_AcohfN4y;wqXn{ z(@@U(a>jA$dfTrT<`?oC^{Xp0tr>5F5+|6+*RDQb=l|ALnZfU#-*$HE+b&bb`6X8g z6%YEVb|&EiqEj8{o*m*fy(x<};|qGV-Le6lQ|a3i$B6xgE<2JF*IxYAGx3Q({1`I! z?YKBT3oYAY9r$P9t!2x_UzW|)^TM$(zY-TFZNF9TVoiCj&exA}rBH3K^;RC$%gRP} ziXnF!)W^01eM9wtt5i9t-nKkfIcP?=>VK+7(pFIi+c~mL3;a6pG}|)ZfpvaM8wW0v zIANryEIUtr+h%PT1enkmy_Px(baCGXY|44@^VDNT7QoAUeLlAjblbm7&-bOS=o|D) zec+vSQYEG?Qtw)hx#}ay9O@cU_#oJS6n=SO&c+0i(D6y0#_SX^=Z^0?}X6LI2-lX3EzlX1o6$K$FKm&eJg zF3*9@M-LrH@7#_%aW44k_N*2Er9sxQrYk+HJO6pDO;N4de^C5^?3Cls5736&24(wB z{h9O=|HbDpzNB0;6K#n}zTus|K@Oe;m@@gr?YTI}N^4)=FFZ-BPY8WB@zb6BrJZr# zeA^t==fzF|BXXHCYk5#d*h~2qo|WW(i3{{S^g-oklh-@IEjPD_<$EkV>ERLmlNR!> z`%^GfwjWae(zXoGgC6Cy)KA|9Ua}nPz`M*F{CnSmui-X~*`6!!1Ct%``mx%|^rijI ze883Yr;omE-h~#*li#Pg%6gd&?gd8cXzZkrSdSf7OaT^sXyNv~F}yyL{c4#$aX+c@pXQ-d4uqAB-mk%=^>EVrEo0;`2_9#32{X`{29_CZ~ zJP#n@w^E@8!!Puv@)Gzd=?5lolr6bTmNWjLnX_3vr2wFg_q59)Af0gTy!?)Rx;9KU6EBnf*&xFk* zf#zM|@lscOZ%CiJLpP5gQgS;ni zNu`cZ9O`xO8Pp(+M}f`NO}$$0+2gJ#9l33eWCh030eO~Jo;4RoU^CT>e zTJ343sdtEZroi(o37&+50pOpKmV9BP#zqym!qkFOVMj7Z!#H@~(T;P6fGjR=$KuLH zeBsNF#Y6A^-uPR8_y6D<9UPZG5RopO5O|d@gC#-Q5&--*sEudHc0Fs(Nz# zG!E%btgp@|Ja&pTHG^vyFatul!Gn5fYwUd7LbEC+qgYn@X{b$s7 z9>qvH9C24-5N>fy4KlwReUF+DjIFFpHIcB-AY=89Nd z-HeHgli>ei($CJa`T5y!YY1nwFD_Scp68&+HUY;PIB3t2%JQXqK~|St27fh|_&3kt z{=PVI;tB$~{m}=`KL5op#F;Z=asLDN5=0-2#~ynuW~S!|D#0f(PE1U|FWa$q&z>AG z;9gbk%VNMmrP3jBw3SCHB^!shLb0T&oiUzu-X?hl%_>`G&&h|`7$o1;(tC9@v`&k8 zn`T{Jg2&w#cVuKJe&`3jCzoLz8$TZl)3e}KCGCt$1DVpmv<`pSU}YW}F=??zQ0hP$ z>Fi*DWlkI83_@J?%q6=#%3e){Pn^N!y>(azr*0phZ1#0_ARl+e$3OMii0Wi4PEV0$ zBZHBYPYfoxfTq$w<53)SoT!8sy?|nO>2lGNx#HtL4$^_rfPD)JwM5`JgAnA};~Igo&N?t4|&r#8g<&zVquuMtvb zmpouszL(F1%~7)Ep%dUpz@#~8nr+lWPr6l#&pJ8@ws91lMd8(a#i{uUyCXCkU3ml= z3cv7`C4R(d|oPK{}RJsa-G&rUG|H?TwygvSQ&QoghyU#>Q^`L2wlwIQq6 zN0%UUlr>&RxW~6M5fb0>9FWPD;y~xB$cX#|&o*>QA@GmAGCc%H{(w)DuWR%xjg@8O zvGr`i(X%7J#r^7BU8d35A*JKMz4M(>WBpyFr?rKX=X=nT2lwyA?nQJHY<%e(&%_V^ z&7Y6G2QH6BWTDfQ5^($-u;t1u!Y2>eAdw9;8O*?+HpJl=I-p2f=r^0#zRj(*7#!%y zybL&PRGOR^Lx-P9U0{7&Lb|Kcp7qfFJ19cOS-oBGFCP~C>-agdu|^VbrPJqNFB?gm zan_p4?prtWRgQXkx|3eoTvsr1sZoD>s&90VrM*kq<;=;gBpEL45cxX@j zouB!s_?!RWpOEQhEKfWW=U#oBfJ!Xm7pi;U`(_7RA{YDi97A^=jGeh<-l@%i8NQIe zbks9g;S`tYTUiH@DSrXz?l!zc;NyO$d>9k>9_#Ck@s)2p5`Xx~KaC?-Tp8W)$9%OK zEAT)wxctVqpNT1F@b-e&?)DhyABoACvA8%o8b=Q8jW^$VO?>Qk|2xdx7uA_F=fOBFWP(H`AS=S z;JZH>9f$YD-07F&rKkQPs!Iz|=|pzgI&)xNhmD$mQ*;c0I`lPwnOm5R*@gLNhga1D z%0XXucU%}fA5)`~arKGI;?VvBx&P(%R%yIlua-UMn|2Fds}tOh*!^T30MLOf>kFu} z{_U^*PW;iM-;Opr06{@x6Lo8`u^zer(Eh!{(c9gJ47GyS!;%|Aw#=rE{z^^`-jZPUr|D~h+=F7aMlKATm>YY`u z)!!hs45n1iQ)UL`z@G1UmZ@s^;E%7xk4K%ivyN{l&gcGg`BFoa+i$xo{_G21ju)Q& zRy4q~AO61Yji+Be9pCZb{qawK{_n;MFTF^>T9ZGdz&2_Qen&F_%Yl9SiWb$yC{hP%HnEeW0BBGi{r{y_X%*UuZ0TYVsMpDxcX1 ztiPA?%WrLzY5Z2(?qF(vdpKBKKf#7meU)M#n{$v$)&;ohHq3I3EA;Q{aPU9tt1eD^ z%hgWIo!xbKg{!#8`njBOX<(k2nalX}l~-Pg$>|CFj1KHu88mHOb1CVTPGm@&;XmVV z^!4L=;E%XcpiAkxy3o~ET@yFodUHHSy!%ss`|l)-N{sNOPYK+XT&JB!edb#^q*;v? zc}l-R!$MELuBG>-_`_#u1CI0y+MKCg+8OFRW}X@65xnDPat|=1^nw-T_`z8k>Ls4h8cxcOwD_nV` z93_q@?F+&zZc6>@^;KVGAfvEBFIhGRmf5CL%cp2j(ZsjdFTXPn`9(a{^}XN7Us8kw zTua(|8-(9D@U1i~TCJ8`-J`vIC%P-0v2WkLT$#o}AWi}i56Q>iF3Y114knT2AR^9P zxEQmn(N{72g9Y2Cu@Zrgyo_^4ok?&O!&u&-5kFU2Uo9YO?2q5p}AvJkZr1hxU%dop-(|XPeGX zkH^x|RLpX%F3n~fBrmyKuE5C(jlw7WY{{gq@^52H2Qf=Oyw?)H{#(BrpZW4PqI={> z>_2!sX3)*Tq84&J-+A}DVj5qfrHbCLeT@2`b>T~V zM#@dzXrL}!azz1-()cT{i!FW28t(b6UkQ|1_tb&FoN2&EG~(m7tw($JP8_|WKOT7K zmY7~S4}VX^W-P^aV=J=N6(9RwFU8;c#ZSjI$48>KZ#cg2#n0hO?T-y`vn-CB+R@!x(g{^kGuE6}JV2KOC~BZn`Ky$AQk()@BXwd2bJi7PK` zZB%1^Wj^}5dZQnIYIOATnXIt8K8QGq*kUp)0YvAHF)G>_-la?Tyie{Iu$e!Dp_( z=R~i`b*0Pz0RQw!L_t*SkV|7;d=Y0{+K$pfIy=GiTUx+(GxS1rqrZ2fjq=v2i^S*a z$l(fQG~(-^%doTU#G)?uuM*N0tP)$>4y#^jb;W{>t#FxsJA(+JqO+qX+d#Bq`UP`y z3$eOPdU#x)NFC zGcb_;FDfT0wODWmI;q1uy9c65nFB)y;<{UIiHq}#agliC-~IBh#Mp&NeEJ6LEwn?g zO`y*xvdlc?6xKCx?4Sg3+1l2XvM$ZEo33!=^R(yiM_qRqN7wbK^>Tud+b1-_M>z<@ z7%zFN=oZ^@#2s1XVeBg(x}<$BeO%)$I~{YNDlw&l#dF_o7*KrG{R!BJ-}Uw-=%DuF zx!1y1q@!-ee73hu5y!5qZD;JFO|l&IxUq$kR?^odj&vZnGUNBw5#~*PO?a(0^_BL{ zHg{5B&RUhPNt!Z5nw{-7_!v1NuDIh(H^%kXUJ-lo7n>V5bHJ4CHg36~KVT8fP0AOz zN?%1^mp{l(@dC-xi@My}DYqP1@4OSQrEK3hh-qU}VFgzm@P_S+YDI9eWd|ndrIGqg zzDry}9KW@rS-iV?nzXhI&r5krE8r~Ol>ztW-J zrY7yo*MT#Tl5fBre3=7oD#$YWLS8nlexb9vg~62~t%Ik!!uBy&{T2S4qLY0=;_-RAN#}?;&1(ff1i1HG&Hv50JNQL?9CR&fB0|=-h5AV zUH6vQI(T)g;S;u^`%0;W^n8`|a==21ZBptN@Y>b15yP!3G4?Q{aC{9CK1J_vX%crMRVc93z8-JL?4x;)jP zQ-&_3@m_z%dy*BB@dub|sONkBpsUc@#nGLql@1|CXUkv?J87ZkqQS^6rVJoGTnI{g~9ftlaJR4-4wN`pGjJ1O54N^H+S z)kZ7)WGrWTby(V6c(P6>zoB+qOC3Dq1?yh;2z(m5`AWP;Jl6CR-*?SNn;V)AGzS=#2380QX$g>f-5dmt)Vb{o# zlxHB6^fnNw0NN1LGDE*PUU=ozxc=rl<4w0bL^%!N{{1UUOXMLkNzXTaFD$v-CBF?Y zMWRY3M^A8O<2!}fn2-(BU; z`}(_c>9lne-ptHoJo3oXU}-(>zv~Xlbbl6@42RE72pQgv33!n@0GIXRp8^l|^R&at z&V}U_0)qC=SV17Z{^;%P%6!yK1N{Rzg7JOtc__MEl5Bi5T;57rDX5~p&Nre`Ws~>+cLIm#|(~s+O5^^X~6I6SIqD z3xE~y=f2r0dzHJoM@OjWC-@lNFc_y~WY8t#T*jLq2+#OE556<*d;8mCeR(!zN0U$* zVIV3;>M@P5{B0eRC%hD+Brc0VQKrR_idutH^kB-l9tTJADM!Jj#-O|t?wpP6d){SP zl&g-*QITnD%R6(h90T$eJH-9X_h=pnNZ^Fta0cI2;zNB>c-Y4CTt>>AvQE$~^DB;~ zrFv>~%aX16E9+}MWw~(mviQ_cCQ!W7c&Yc~19TkGu{d)dQnyd>{U-F0udH6tsYOmL z-(pf#SUq?$>%y52vLjB%0(wBl9IRC{QliOn&67J$ODNa!%=bk%lo>o0V7|h@eT{zE zcz}*Cqiu^!B)?=H)>B;KoWVC;$e)8v3`9JHr)&+d6O}Flz&4zaow)A$+v2s?UWtGF z&;PGjPc&&S;63=nnUaZFK~!+=#oRa`MJ@3=j3i4cD9`$e+#`;0_#eDeM)O z+jBX0_jlj0)1cH>*p*84M$#Wg!4ABn56_i#4{0E6l#_vhfz%g%zg{g%Fv&NLl1(xL zryUbEMAUg${xZO@gU$Q$9QdsVv{LVj?hGngxCbt0ySeOHH#eLKVv6XxG?}?LFT@hDaaV&0m^UZO?jn~Cn-f~->x88b7+;!L8ar+&&We4Sr zH{BT5TzyTPIB_D59z7cS_U?;;p1$aICa?`H>IH~^l}KK0g+^VSUDz>#By4eaS64PX z^>DQz%Dp4KcJfZy$dmM4*djYNI(cu9=c)>e^A18BPd;!pAM2qlkyq3$62FaS+J$UD z1Ig;?9`e6%VKjd9M}7q6Xy^N-SYDWoZG4{GiyY!QNX13g8VS_8JBMSpr8{<)34YOs zNqlgqRLBbHJbO((lxI@f$|2wT4P~1~eYvz+g^wEJ#g|XTi>FS-0644k4CZpG1|2Qb z@wM}332e|K@M9ae7~0c^Z5z!F!NGyvxb5~E;=y;mlWdz~qYCV6t8ww{Yf)WV%%$eD zAzz)BFj=-Tfedh88b1nz`US!ZoB2B7&1bc%y4>%rbCI3e*s%r#J$khh^L0eZ(y!EYb zi^0oJ#>V7@c=D@Xi?Pvj@J4%dA}jjIjyT7#0f23O@%x{MhyUWq zc>a|$F*-dTOZY`A&`_@f?EwVJAh8zr9h}WAyU`9>G=Ci(5iM=*!AUBBwGpS;&Sdm_ zTdMNMk?9HkF03Pp~4)|r*Ed{u zz14@OO6RkyY3Yy6!gq7{p0l&F@W@&Y+%@M)XFs|NI$}4sx7K6d;mhz@x8kdhJQim! zOy-PAip;t=$YMjeCGS1^hT`bKBe?~DjSkNK&6z^bDdW$u0S(M2{MG}QwCB=+N{hR(hsC$oE?{Kg8!5 zJ5h#>@Hsn%(^*J4pf{0#RH3KRk0k$6HixhZU;gmWujV~>xqoAmjotWT63+v$9H^YD zK;a9{V81VpU5vRUmowfXmRXBsJB*0`UE#x(ob;2{k&zYNt*ou&>TiBGc5y6DpME6= zKkY%kxI*A(zwo6jMjzed^}ph>;4kU*mk!^R_l53WPJIV4*qH5BJ@T76M&B@Y@UOgpbK^y~AGASuLmh0xEBe!^U}SjI`_Mc5pm?wBJIoTosV31097$9VXve zUmLP*^NEJ_v|HImCGFvVJCSoH6m=CNHaOc@Y0T|!`HMdVXE^{|JaX@~hDZM9;bm#DUcJKuooPkVz z?BFjlseEt+USm^A;u-IxdzQ~n?h1_Jg1hpkoP6i+OV7Xyf4LPW+}I*r|2{i)#i745 zY59&LFYwrVxt&3K!nRpC%gz z>#n*LAo^nnJ9+i7qn7~lY^z%Dg>rQCQg>@^LMxGcCyJgIjQO6v$*i3{Zs zy<`&6ll+1{b6^o7wX?GtCr(}#t6NJvcf|+W3v8?Dh-Y6Ik3av)Yxo{rF^qor!5{oS z_yNCVYZ-J9uPrZxTaxtj_F#85V|5e%V`eG-&tLkLv`;RJzq&}AW-Lds%ge-fz_^M` zy84F$)K*sXm5gP<4gS_H`p^B5qmlRce#->0>-g5r@+XwfrTdpwc4Dcz9g9_T-1=_J zEK=^$W}Y((=)^oXVrphB#wM3yVzwF+Q&nP}wV0T!#`*EZI6pQYXD=?qtEZ;o#n;B- zsprndx1K#6kAM5Mc#`uko_Gm5J|B-f`WNxolP|=x&!3GKUmcIL=jUQ{(iL`g@!hD? zif#%%ofE65@8-6%{hMK#mgEQ9*RoFNuhtH988Hf62+yDc(NT7yYRA{{*QAp%rUP7E zkqG+ZwgItweiA>a&bh$w%Gp1PN_V>L#x3mG<0Hz?CmomhmgI-3?mHf5Z-* z-Cg-in`6HEwrD-%i$w5?wl4>g!ME%9)cURJHdhSsU2|Itu~!#9*JjKT4+*yeztb)w zXL6W>a96QI4m@$tjm+nOFa0!E188fnBrN9dEZD7Vt0zWT+l|gH2gnb_p+i?h=g|Xk z>Y10~AOHQIkMkGK$HGb#9Cju@&SJ-w`0c<*WyFbN=4<=J(hB^I?d^68+%{+fjLy`o z>$g>4*EZWWXXn2&m(@Q_O(jOxo?~jji|r4#%WZ>C<3s4fneEfq3tl5+7-VU7DXOcBad4j# zx;isX&UBP-Ux7U6>}~m>_&`QKl%~D}CRdWkW$sOvopQE6+TK^-&)MpnrZpCKB?_`i z*lfSDhOW%w${4PA;mU=U=|n<`O1h~Jv;LGL{q?~tL;EJI@=4L%sS%5<a5B z($}^XU=m;Q7)+vF(ML$1Q5(-;nYJtFqbpdp^%{q|ibhk-_SjaSpD!GCq`EqyTMF9F zSK=al6>Ky073bRM(g}_}rY^?3`l|X7_J^tO`I&IZZ3(YB2T$0f-|s{d+S@no9}W$6#Qi;plC8iDLBJMX87Q z)Mo3;I>15yPTnK4k~iQ<8?CrX#&2C2sIj>{Dm^{9dhn+|{czgIJhG3G_sWoR?dpx_ z#Lkhu(a=8}TFIt1^sNLWKM%`Ed~?`dk~F}sHW=0KRKqP~2HUG~+m-#;=5x{H#2R!* z8Nh=6=DQs1j;si*t16HQ`Rd1(e3NHLQ|Fy}y+mG#y%LS(uj%x2FQutp{hoXIk{k0Y zUnO?XUf}f3In0pau9_0XeB9+30A@{@JDhoF5u3^|6Emc0!$&i>D%QWt-ohW$?Ik{a(i}Ik4;>RrKP!8T3pCRMrUPuI%8vYb}l;<-Jj7}*#36e#)XB2T&B%g zM0TdGR2S0Ga@pPN+=p-)85_2FkcxRr!vefAs^}G&DxgA0M^ZInXk=bHfC-RSs~A$0 zl>z|giV8=P+GuJAxy$w07~l-KWt?kg893Yj^qI3b&|`7&{Aipze>R?d`uTYL$#2DL zFTEVkJ^LI^_5=(p18Rcn|QSQmioXySvfQ#x4F=>qX=MvC% zPOBuhP}U##&_{68`bmRPvO$$YXCWJ+MF2-KsKn*~6L}^haxN^+69lj4ex_gj+E=2l zZvZEaeEbG{jo?__bkogo*Bx&G_J$ZA8wF=L5*Q%Mafy{I%0Dtn4cE!YTx$@NXZ2sn zSiC6C&HFkaHCwNFakih{I-&+#xdU@imO-H9{VNl-yurI!M=Fh3WQ@Z3OO z4^HMtF7rB%)3&k%Kf!bIkc~fXr(i?5_Clw2ZN1j{PcE(oU`Lw#BfqTT#OVA>GiTAcTQf0T0WHP5rsVeDzgPEp*sXSfa@`DCXvG|P8+j{}JuT(~$EXU?7l z_SsljU5oj}l~|Ztig^O^xw(~CCJA(LpL|$K6vdoHO=vJKld|n{K}KDG(V9808}wnM#r}B zT&c4>okomzrGsA{lRtEDtiSm2(}FAC>i~$e9J$Ifb)b#tamt;)K||)JT-vxx z2_>Hf9bl`X4G{*v)M>Czo|#d?RL)YjLMOiy=i*QdN-OeAFbWOINZ?X-j&e;P{I2Bp z`h8jY)qB6OZtfq|0e`s^m9snh2lmFRuf7!j`@jA-`JJ7VmG&-V6GNZy_?-XG6E zIJh8jT;nkZNjXSEr?;0N@8+AXLoVFgakDf$SCIkr$g+F6m&UCc(#X#Sg2JTsuH4!r5yLNA`CPBO?Xc$TiNUa_77n50|6F2#Ub zUJ!*zFL`WBo^Zv!w3k!~6ENxyur%oY1qchZtS zC^rYlP)F5!?)j!d6X(=32NvomA>Yb_de!GyzVgZGlH{!|9kC3aher0r=fCn50_?SL z6&7cH*r?--7Y7fDj~4J{BSQnUu+iK`MwO`iqz+%gE=*63#!D|e9p}!SP2H>Q*mvMy zZryV5;Gx_{d~j$88@M<2?%Ny3k6jTbuemDTbn}gI#~ruFefQlPZ-4NDcWHWg&JLR~trKiOI47EC&e&dS#orhd?O!DbO~xx}7>3hmg>*&RG{mRhbf zvt~IPaq85oasI+Nz9kqpu7T!dSt{*FX(Ulhw@AB=ZzMm+cLwekE>6Td-u__h2S<%N zD-d)oI|bbKKs~XG?R0yS=AC9>?Z;yLJ1O&Q<9ze;URUD5{uR>AP#Y37|w#sT-|)x$ew zr2fn*rXGDgN&c>n7`IN!L%!!v$_dZQH?NDpb4nR{U zs5|WVX>zrR{sDe>#IfTi;<^Xk7AyGMuRizf7`t$WPj|DkTG`9Nw=J$5<1E?Egw2Kr zSIU@QT0ke@8}2)Z-qOA_#uB`21MQ7BUXQOe5X+?5akdTR+CZg`l)3_a;0ji2HguwU zs@P&XuCw6~Ih|ctPJiO_4}U!-me$~(#^}U1$QiM1O>uDFKJ@SPaod~U5B)+sM4LQz=#};XWfS=sz(w2W;Kd6iO0lf_#`fCQQ&hl%A<`rP= za7HG0ue5bUPj@fAb$9f2^u`7{)PYvAVzHkk$NEwKUB4z6`KLgU#d4I!5$jT3D{sCU z9v+F0eB=jW6TG;U*Z=tK-_8N36<|Aj_%J$o5L~tpKceqBz#h8FnPqL*`XS)12FGi_m9^YD?7a{9COEPv;N+L56dY$ zog&NE{WQ6o)+F?m}%TI8$Da_1dq@_nr_Uc6<#@TX`m93whN zlYB2f6Hn$U-h_@SbrPBBB-xK@x4&fJ9vDM4?*URw7 zI{GiAJ*9LP{g5Ch|HB*5#=0oO&WiNhP!~b3&wc63@KR^=4-d!6<|^Oe^Ha8Qfbn?x z8I;{XY_PGi8vR{8aqSgX;#-n6{#e49rBDvV)d%Bbxzf9SBrd%1PyOU?vIMXIaFH94 z;agwG*Z8)`TSsRT{>Wlnd&3E6j8D7cYFqH6b!u*lNB-iqc=*xtG2}J@M-RpO-~S%? z!GR=msS_9HW^(qt@uKaa6XVnI?6WV&|M;EXN#5wM||7aJsg$pq3Gs) zSC982(a|*!oun=2FyE4{cOZKD_TmHXjb2Xk^uD9JKf3$tX$MI+h|c7`cbNPrkMCP6 zJ^04G(c0dhI(c&&+gNRk3zI8xesn3GdG2gH`{LPn^4Zhz)YGrUQ_sB`FTFMvFTOS! zXD&>|#fkZtm|j4qZs!0?2PxQ(Zkysp?r{yBpp9Dq*dU+rAq3Yp7@xGrx0IWMDyqgf zrE@Vm_t0b7B1x(LliPXt9$f0TX?wc6y0a~6c@e)9Iw%v$m3cbgTz}WCr#tk%(hscyZVxPRxQ4Xo^#bb z{fSNVNp^}*$u)Ss0>5`be>>Ies!@mL;C*yaM|Y1?Z{y&RC(fLEHU8^=`qg;h z<=3JY|5AQYFE1>vw4E^RFDQ2MP)!|~b-0^uM_U6vVn@pFzCrky7-D3wH--j!a%TMD{d@7- z_r^8XoWyUxA@0BXo_Ocm?GyQ9v7_7arKI%~8H_O%MlpfYWtlN2|`m?Gq zpun36{GzQY?JU{OQ#-TM-+^n$TKYRRzBfDZamilfGwnrccYc@WYI|{JyO#cg1LTZJ zlubXi4D%6h#(70G^8$WL-xB%L*Ru`GNgzAOrZTOc;{-_a0CSPsB=Xz#aB<`E*}MR! z`kDGPh4)HFB1Ndn`Per0Yzrf~v2LQ0vN8tdzh&FL<;TjazD(}VEo|UUUq{@gjLZMj z(bhTjf&|*$D12ktIrw?k4sB?}GxPG4a!DhNRNjL_WZw#8tl%B+WE&el6OS$Bx8Det z{G8uVSM`pkc?*~6vTX7ZAKAI+U_tHS8pgG1z~&J(3xu0AVV z<^%k|>XA4C5T$S%A3@!@vx9tfbfsVU)vtXuW*16Ygbo&jJH(Y8$RMl%dAsEMd&so;zV^&-bv*L5f5rmzbelmU-iKOruEc^&hPU#@BCCac#!Z( zS<4h}z~N!}sn2#}wCSgR;Hr;O#sM2?bMy`cWCN8Ml0(k+=D&s{11<|s<;2NzPiLqk z804Q!W&z1y<(V2Gm50z#aVyr0lWd&d;*ud40R_G&Ig)^U6=uaomt71XaP;7$KKkXy zV+IG~2Y&D)F?n$!7M2JkRyT44r%NH(m}JMIoh*e3VPF-RB(!{>)Dcoy=#Z&ID%sIn z$;Mho<*3Y6Os(x0c8#D7IvoUh)K{fxBf4_P+x!M~8Jp__yf_bQRY%BckR?7>nF-i8 zHiT)76K8d~jDB_|vNNBjryP(lA2V}PF+MgK=P#UzmtTG#U0EbTaj z!zjO7?t338FjN_g>2Uf5|45UBMlqzZGGp$O;W<*|cNe@(0d-t&J6d>nwyydZ+0N6i0u$4g;?4sg9VH*WAO)QFwKuD2QM+gSA5k z_7gy?#H?Egz$@b121PrZG>jbtb1qHh>`UvN4h?m1mVpg2s%Gn1oo zD?C6pJ^ZV3^O=KQQfaGn`CU5ZdM)t-Q5^$jJ6Jn zjkqoqxPcrlK#w74(}`UF!@vIz2yO>rb#Wp)mDFXfmf(`P26P+1B7A9}k?+FK1&`v) zYcZa{0U%yI>c2~a0{{c14d3bq-xWhsaOUr39Ez+Yt%;*~1~HZ$Xpj%5acbuVem zCk2bknO=XqoTd?m(x9BPWOyVdJ?iNSM;H8;^5tpsMzD@Jz-()`^FNUD z`MBof&2j4NtMNHwuP%)PV! z9RQG{E^&y(iz-jpVVsg54y=Q)q=dK>N7_*vKa_DPn16Y$v!6U{Jq_e2CkFtaZw%Jd z6^R%0nOpkkqe%+CSN8~8(jWZjLuf<&3lvFD2SKEL;wYW^(%&4Um3cej)8Y%CW&<>I z5mB6NM&Rc%uW&nc-$nOM#!JsW9j8yd67#cTRCJ9%fItV?ZEbDHeNWwX z;Oc9xiR-SvG48nQj(FQU-xc?~?SZ)ep$Fs6d+v!l?z}awyY9NU>Z&Va|Nec^+vnB= z?UYb>)>%tAun?Ze6&cph1Jv1pLm8-2H|0`k(m`w?5OY*@8~Cu(P2a^W8Jr>9W~Us_ z94wf7s38lRTMOk|nFC7MxeF1%Eqa3A&UP&hsfqww4c(l5ZMkkWV+Z8RFFqd=7cWva zvI^dmDF@TZ7p9dr)i3%)n_JFEGT4Wq$YXU4JPmfo18;kfa@N!4R*{FTb!Q?p#zu8D z+8gl&Te_pQ0bio261&i#fxP7*87qSlXs<1EseS34Y@Yl`l5~7I$uAvW-fD-J$$PE3 z6W@N}g&3WkPFvC5*PSz-oyoWezF(d=lXBkTDj;@dc6G$$#AM3+(S!TqyFT#VxZ>nB zV+XsYADJ+KkDPtUZO&*=h7uh9_}dHrgv>+!_9%Q<(~G1j2l~a= z3IaFkPcM0*BL{MjptGAMCT8N< zC!dQ?ed;su>Cb#7e&_dpFMj3U|GW6(Kl@xf^&Ecr#W8$wU;;~j-H|GbqZKBE^^+IN z>>qd&c;r2w8ZdjO{!)}n!BA$#@zw|Kk3slLz4#md={I7USjvGIXU<-T_q^+!$*abj zc8)l!Z)DF1dV6o|*|#T#M)t&^qjs!}a1P}*7dz;ykNy6~;y?c8Z{?tK{hzir<3)H2 z|I|j3j6DIWgSs21re&+ELDS_FD$-Kdwe)|WSp1BkS5LS`q7x<$W3-q)r(NK~IaZ8?60 zJew;9P{ulP;tCCR6q`;n;Px0n>dsA9kjjbAWUMp z)s?jvC%&7SbpZA*yfA_+msOPx9=R+I95|3M-PHIL?J3AdfQ?_(E ze_=G9c>G&AP$`qkE0iw(7UD>R8V$|JN1k=U<&WAm5nubQ9hLT;<%4o2(M?e9ZLrHE z6sO;$k7hjSH@3fs>$C%;Sy^>CY*)gP*7#%QOz%MKgX?A9K30#oNZFl&It>tsf=C7%ubMo zCM}Uq)uG~6S%MD4-YG}izwYmg^P;be7qcNO(J22Gd`c7VvlE``*9VfAkHVI^lG8NV zPJ}(_?yBTUOK#aUJv#@Vx~fO%^mBl#jg`i~W?@X>_FfaSv$@PH+9N4n1Ej!e{uF5X zHy)N*hqKsiPDym>;D_J)VJ%r{Q!C$Dhk87l?H~Fd#!n?>?YkO3c_e;#E5nj1<=oWe zbKd!)>an5Lp7=^%drKWbLsBX)qJGB@fMhJHY7fx znCD5#d=oz4#*ThOd;ICA|2)=#TbbM@ea54J48{4!UwF=et~>C{^1@>Dws*#r$ZV^t zTH=>!CPZ#_1X-@da9GuXUMZ3ZYR^o-?|kV4>BBqe#4Gt;+++p7pB;*56XZ)9z8qIy zdmOm+S6B1dCce9anjZf8EAhw^V=;pNd(Fv{@t*g62i5I}^_8iVpT)(Qj0d#wYinCE zIW-lZ`OKfkV~;&fLHPQe{n6jIC-xsWnyYPAI=Ye;EluER)4@8W<61dyZfHerO4~H??#Eb348nztPi8&Fv zbo5bXXKe1a;@bswb3HDOEyjh5GjaOtRJ{7y#d!I(3vu?sbiDlPxwtqvmjeLYK0sT( z43ACVhfj`n=m`%SJowrE->zfU!6I$R0 zCv-U|L4EAj3aJm*i{9u!$EfeB_#AFwW}CQfxovcW_KEimJ0?im($XG-#t_P5>>-}%1x#s@$6{`l?>eRte<-~I9Cn{Fb0KAC&n zj|>k)C%z>`qyJsG0e^XM5go9QTj$PBO~?4y1`;^Xh2NW(XR^hdh&QAN( z7t##AYelw<1J>au+YoJca(lTeE?PZ#%)tG90Mn0K**Q zXPGq>sr>K@c#SdM=nt0il~c=4pB>rA31r+Vf2OhgY)eqrYNyCc{2C+LW}EUP9rzxW zOMPifp8YQHnG-I>NhADkhkfZNGaGP2{cMBCILH+l$y0k}{yDKk0HhY^@R{|Nj%lw- zCfI(rhcF-8U*vZugq3zM4WMYq_I2(GN{GP31qjl&&6PXIPkT`Y2NvI1U1UBvV_1Fy zR-cKRvKj#P1K&p2EZ27qHnFJUmrxiuk=IPCMf6wZ1s-#CF`?l*E|gt(1tv>=ZfwKW z^1@DgX=<9xGGQw}EJyy4Z$ztf6$W`*;jZhu{6^|fN59Puf4`NUoS7ZSHI7+ZsiL2E ziN6kHo29x=pIsc~3JiRkozCDqJJUJpGs@+;w8iNU=?4Oj_(TVHU|;l2zWvnm@ye;m z{0^o8IN$)s`V~8*+i7Tvj=hJYnb>a|JJQnCmG$C(9>Sf7NZOWrR{%1OZP;E-d(gHu z6*phrN8G;#56)wA!Jm9Xar!m-(td5e(k=4@&g_gQUk`a*zsKLD9P+TR+C^UU4>e5O z6}pPw(r%g)x!-`>yy{=UAeVeUerFqOzH=oe-cyu)aOsO>nSNqn`-YZaKtRGFl5wUd!=QbB5bc%JF}U-D~2$*C^fFU!tm1>ASr7>3xf zq0k1zZiJ^Z#-^+39RB@(_{;I?XvCd&-w@}|jmE6ppz2D@FU-c;@($mv#l*x| zoIZ6v&YnA+OYKQloq5a2{Y7zPc4HGKY!wF@CljUE(nO$x!}JUP=qF;|o}L&#_c9-0 zkacXGtq0LHHY%Eq_U?pXYu!=#J2;0ppg4LPTP@Kuv_EdT`JQ<2gFgwfXUSo74rPqY}w|0aHM0e3-)PS?5Cz-sdH`+~Bar@Z~}6sPU_c48}9B zR5YfkeXrnV{sEq>m%$~k5aH-R(DsiE6BM|H8#vR+K_N|yo{N{B|5kLjSK`=}Ct`YT z7U!}WGvnh)%bB@_vUDwcxxBoHr#?iu|QB_2bXa4^zy+WM>=;E}gTPomQ!Z>gmIfZ`84awo z*NJrYhf4-9`f!m!ZP>g{Q=qa3-ukWnF|hamq8X zt1sk}tbbH9jd^6f(LZ$@d(FnB8jscw^Rq6(+R|LfX9f)BpE;Y4HQX)N#5>IIcO_5x zO8Kul0tBz{wYU=wNg{q+Wn&4M?n93mO#b4(`jyP96$jTDpQ8u#{_ot;m73ZpnE^$@p(?sb|F z4(9iDoU4kpbJZxW5$ z4jM7wC_JvCEuNcN*5E;x>b3J8>LSzH**5J5dHJkfZov<@rz``%-2W5DJNbb3b)K~x z-wTg4;Z^NG=72?e+~$6~^%^N4_y}1)wO7AZ;!wGPySi_(I>th*WP$CXE8Lw=V{YHih9$Iw&8)nT%KNAZn-W+ zzPdOU)rF<#?e2=s4)i)@IT&den|I=>%W`J)fkQ{5hi`X*O&zI|yN=E`NZGWLV08tb z&4627q^#gvC(deeYUR-t($;$$cl4{2CujIy+ONy^@-qCAbdV9T$vNtlJnN3Sy1TqH zP_h%V?6aJ8tUO9%fg@0X6T5^C&DMcr#@fXj)xz9IL@7V4S%VN;&-A0y)WNx zt`~cedEvD?LVteF?Jia`UpoSKHk#ty?|EDN(Z@eYls!vDsxk5EvoUqybab~g#`^qp z^t5$Fe_LO)>~=;&Yk%x6uSA0lD(YM7>?mH*04^H5r=CRxh;S-l8Uu(O8*IDqe3L&F2{_~Va* zyVZEp4cEpW{O)hY@vE-`_G-+IosZXE_*P7u#U@wRbIS+o-q!5uJ;*ZfhqlKLT{_^6@(cpx?_wMA9?S3AQFI=tsL5&a56Ayce)VIg^ya9UOPZof zJp5L9Onw7Kaptw9tv%$NwywT-@Av*dG`60V<~P2U+wHjbxif(Db6qaqpv~J<<=^NZGWJdIKC!E-aMgx$%$IHsC>gx0bd}^k7T;?jL+SULKo^!LC-&h~9?| z>kS+6^FROd@e@Dw{qgck=kO6GOXO2 zmIEg@T4)u)^^#tMmbgwNISGj;!LHP9m2@30i?4e9kiMH6<^o< zJ#27JUxIS=Gw0?Pvmw|zqan1ST=k9MW1gwY#Z%tH57OM2>EPZy(e0pVvaOJ}b!cm` z;ZnT-PI%z}&RKlt-~W?8NsfjkbSKEUNy)jrG+%Q z@~RW@3;*~Z$J-x#CwSY82kyK(o_+Nk^4y;L|Gw`%?~Hrzxiw|f_)eKpC+fG`_&+l{ zm&?DOJ9j?DMknGdet!BtwIe$w(K*^4;Wp+<1lHo+0sL_Xp5Fhq`{O5nx#hc$1yy(}6 zFIWCB-;#&OEPg>5aK{^P>F+vkn53>By!V>zTGsF!<3R$NVm zH2Oo)fc(~Mg!|U}JYnKf_n+tEY?Swzw#h9$N*QGUg)1&uj`pU+G{p~5ZwaUM^EM0SY8{?!8F>k^m$0@0E6ZERpOW`yi4qiKV#m$lW)a|%e>oBy-~$a zthD9oK{+EYZ7liJbfqMa-yPsNzr31!;%XqiFcx}aBb8_3tDudW^3I>ipGOX0lSVcD zZ=)!Q@}1A5qxz?Q`fhJ2l*1dlyQoK;ys)( zmhf?POE*{6aFD)f^!fAyn_C@>SeFz1O8K>vEzihL@QG%Kyms22gumC<(-lL#{V_f<8t2bl%xx8{xBJv)%!EF0t3##F6;RHd zyAWrwH(f3_YbUI@6V60vU9RBVnk;&hXRlw&)&+i7bTL2pLq+)JQW_5LZBxnolyNGR z?|tU1vmFQBaJ6X5u7BeROt$tUGPv_u#v&?Se&^N&O{<9u@41j4S2B|}UFgN3zMgpV zo34$UZ@4bzCojg*;!MoWOvVQCR_U}c)S(;F%bAFL=L8m=RpM0RX7Sg8pU{Y&Sc?wI z>xrNGTYo3Umm*GHe|=QZzwSxi(Bw*l#JKnjRs6Ot<1u7*_WW4v>l%pfe(+tfr+Y6h zrvA3u6=HMww)hl9_Hs~a@_FH(#1lu|r}D^wA&E~-!#zo?-0k_Gq+sZ^XjNPBgc4$G`dypO1h0o8OLWj*rCM_uUb{`Wycl{13$3 z%u6}3S-e0T7SE`0X+m*b!OqyIggd*NluX^*bnkr>{0G_JhzWb`^f4cg?O z9_nHIA|73}O!7gFY!}?z!N0e|A9@i@AUZ`(w$mO^ZSG6GmHnb)QXgACo+g`k^*Opn z{!lkbdmE#@mUg_tM}Ax4O7j*^1(#UBT-~d2*QJu7M$O+bz4B#DZTW6{XggM!`MX&L zk8I=S zLu`X?Bqng6u`7P+rwHT9qU~4uuK1E%dn>lrp|API&i|&?7UJ}|n4ek5Z#?x!J=CkM z9qs9x&7sfqf0c!X7UCi7PU^F^)?BS>X$4;c+N#Tp**Eav>;zLcI$%Tp+3hmS&lR9- zpF4kHJb&wV>7y-SLz~=d0=%}j;|rqG+VLMRJ9dn`gLqbPh1ZeGk^KkIwfkcD(6Q(k zJP?2JwMXOU|Nc+MzGEZVj^Y-Cx*h3XtGl6zc266s4qJ3}TWFR(DLBxNb|r#3&m{fu zg+1ht>`XzP+`C!kK~>@ddUcRWL$+sjG`AtE`?C2zNW5Wm`{7w{2( z@&Enp7_j3Qn^i2ITPNVZb9J&u@-=kTk5g-^HmtL{!9lI^e|Ct%!}=}iG3}7s0j}0o ztZ?h&jZN$j-#anKZ45}9?coRZ_Qd`7+?H)c4&GOWofshAT%o63yy*~Y*Nh=GP8`aN zhwvp0sHe48{;Z#}C7sk2xnk@lGJw1~SS@FWq7xm&xVpBK-`XCnJvKkewkzm! zE01nhybGPxDr?mhU|maHQU*3e-kYCjNt}fIL;SPhRx$7~_BU6Rl7~3;HB4np=ixUB zo3LBU;$KKx;y(TS-Sy1h(?%9~Zk7Dyiw0!Ky1II;Z2;EO`l}OMour+3sHw%anL?|S ztIQX>guIFa?>*E@@{s8)!{_Q6<4R#HG_|fp4t?+Ggo|uT=bfkU8xNSK0p6lY&z4JcXQv8eJDGv)=OPf$|GPRIeHXW97vEe=dFwFxz@3% z4gKYc;pE>`iT2Kcn4MdVU;EYHj$i%F&mIY};9k+_&Nf_l?BhTizC(H{2a- z?E}#?v=2Fhj`$$?4Rw@1lIOvneAVc7D~)T>)3O|uSc#swC*$2$H^lu{569~4%h9e* z%GC^s_i{={La*Igq1$9loO~FUX^6ql!ONTn3m^Y_>L+VM>l284}YWk<<_m;Z& z9dv>pY~wC@QWo+Gh_(D`I>^Brq|cSrt*<=h@5CDiX|jx(+^e5_r_6dZHmEzaj|EQi zEP30vQFLVbJk&Gu%z-_HqS-I*2em`gr;J)2Jf4%yNnBRcCCa*b4@KX|VZL%ir;dVqZ9!Z9=Lj1s z3cZpAxR*;joLwYy$qN@Ro{!gFJ{7<72fxP~XaBhWa0!CJN?ddGweiD0@ZnfqaWwOE zxNMYykt1GY*0l8Uo;k~Z7D<8fm9SbSzsqcw1SfIfy)0i20rn;ND0uK=$xmiAVYyNh zo|Upv=VtZfmCfkEfgTvZD3C(_bVioxbiBsLFTh)yF@Qtv404x?USBg{a3IPOzt^GZ z3bfX!+QFtWg#VzMS7$b;808I@gz`NIP6ME0<1^_u)Zdf*QubZMseotPVgLb6o_3V+ zD)@62l}mEjm?nK8=OrbzW=G3*QGx~*8AKr`Ir1IYte*{-rq!Y2uNX;XR%WTGpy6L` zQ?B6YoSkDGFt93iNB61=rC~NCAsfzoQ3hLa1S;Jf(dCk9I6=;Ua=Efjp6UE|c6Cy3 z2f4Vszk5x0Wrv)jtX;CzhP9Sfm-ANkB)r+3qTfS-h zE|qQap}152;Cjw9?mWp(Pu}M;GU^F%EMLiYmf@isc+AEolAasm-)T%@VBSwg{xkUX%ix_@=<&(#!>Z98KOi z8rmg(GZ1Jb;K{Ph&kjJ#fybTMyrMj6y!n>A+M(P2eM1@8IpfS3g)7UeIjhYM2=%Wv z%wjFs280}qFL3;qoLe}dUem_NGyE_xZGh)S_6(teJCKRh(g1{hsE=4L_DdaPV-Xn? z)k|I_h;f#!`b2W5N9)TxTXgb6;?XDS1o(`y;b_V?$S~k48741gD)5nXpma_ffW&zQ zlE5J>>Rx48elD~rv@#u%#&_kJqreS3%zM`!D&@$FWq?~pu8SjA2T5BfE#MVLvn$um z$m;9sCE{pBFEz&0#B6-=;Ve)-|J{kB`9{1rFBXF$cfoaxN{LbaM(>^%MA3o(xk z?dtD`?j5nT=Dx&@F*-XF=a(jpXo&$&C z`kUSq?Oj7arCr}f-rD07zU0`&(U_Z_jnNAiV{(S;=y;qye<5Byb2?r+eKwwa`nh=O z*%#xvS6+!%&z+4^q#K``kMpB5apvr3y!P7pIDc^>SNm{Ax`Vn@hQb=mAy2 zrSZjcS9fO&4-Lh>{d?lN>u!vj-gI-^bI(2Twg(@Icf9Le@z6Wp&Gp@J?|pBLJMX=V zclXAd-u&ix)6H*+n{K!%uDtxpIDGiBv=gJ}E+lVw-_utbXX-XIxAvgyTzNx%LONyN z&VPNaYzT#q^{Z^4(-ufuzs=d^oZ5gD{M-6bxK;gIc+4lMiz%!4Q_|L%ylYWiTS=YL z)6q)2)E!3-48}0Jb>E&o{OT@Zy5<<@?urZNN8{<&&L$31Haz(UBw1SC^S{iJew`{Q z`yPpY=}>Hr^{x9<9((uh&s92Z0M8B3|F{0jZ^g7rbo15r#(KPR=G8cJ?p!?c^fU3) z)p&%M@Yxq%it}URF}J+LS8phoTyiEeI?wpk236x6 zS5nbVZDXsQ3Fly>{_Y;^!&|`1sx1yhKax&3(+{szNPC;I@2S!PDS73{Mg;NfeJZq) zt~3&vLfTkypBKL>-(!Z=B?YIY9MdW9X+HzskMdvtwxsb=XkGHF!)3hzjBiaPeA*R# z^1=^3&rU*)@{RQ>x+TBibLB(--BrLad3;xFh47h&-?@CaJk>@lAfG$?TWIyCvhx>P z?y7n=SlUSEvwD5>0n+ZFqwS=3Tek7h$$06-SK_(nULe+gEWXA4^Dn&=ufBFB&R>{_ ziK)4m&27q7&|A*T1b=|5|K%2LuHvTeWh`zx!b(-Ux&!@Nc!sQK-|^dA%3Rsfe-nOZ z`sS*H*oPitdh7^-2wjSvtmI{AL1#2^2%wWntDbunNB_`X~ZP= zf90P~wBz2J&xU#(IWP1M5QJp5Rl`d*r)Xdx0OM?c>mo$_%Vujc-ky8l7wU zsNhUn=d#)A(b?%~=)RI$`I+8T7xhKerOrrHN4fgP=;UPj%uQ|h6XvU5$2FrCEuj6=D^?%PpS7nb25% z#u^}+s5<$vA?&CYX6}&{-6^}9o144h8|dZp`0V=+ABvr3+sD*lrnF8rIIr2n6RqAO zlkND7he!6sQ1_tCl(m_&;@WxwJAbWQiH-c{pRLZ8J<7uy+f2+y-tsC4D78i> z(O8$&*mrm^h7NSW{)_k@5z7t^#a2A>*qM0r$*I^k*b~REJQDAC_Xo(a8C^0J6XO?i zg;2M`P=84GFMaW=@jw6Q6ScBh@h4o)zc=~^2eX65L96Z^UM4$L;I-DoovQ%oH#-Y? zYlAb}ayEJ2j{b1~h!Y^xL*#7y#&2V^qF4%FrTwYVJ_i_*);1w066s5o)lN2YIrt@F z*d^?xOUS!QtNX~iq`d8@4$$=Z66ZYcX6HEN(BEa(vjYcS-9;}OXLVNE6vM==tMu8{CZF!2_l?he<^Ts{04JKb+Nr*=zdg3# zLHXFhv9`^LM_1>RHSDlg?!<2gbh2$M2RJmjQh=*i5^Dm3`k~p?LE9_nX14=e3%9fy z92|&VVw1(iwOnCT{A-tH7M5~Lqo(Gz7{>3ebP}iHR}1IL+H%6uLV4P)rjD*?tMtXO zV<)3yWEkFBihuvhzZB2BG9G(J+zQE6BX*$GdahFADzo}l(q6mm)`ZLGZEiEBR=(87 zW%Wn>0et|jsaKINu5R9f;fCbbUPV)_dav?|n~v zFU>-eg;-sh zi?yolvCG6@#2%~I2mF8r+xDQJ%j!FjL;n~LE!%6f*Ew(({?|v=uIM9v`LU;S_PA|~ z>gtpSs%Bl~b?uJrSGMPt_L_C##}8p$8Tx!P1;S7#jFH$r{!S%@N{F|u4K!-D_%O##`s74h)+y<#);x8 z>smV#ri2d*MOn7v$>)i0bfYm<#zK5A9~m=~JuvWqit7^^8z?)Q;Mn%~bUVm12V?Nt zGw-)+{snM&74P--TzS?*wA6KubSm{kJ4;9MO+SY3^M~m(=Ad5Y3*MDezcpR1{zMw{ za{#nFZeF<}xM``sZ6?`YC%=&1RZPX#HU`xdDI7@d$_pMzXY-)WY0GQn$ar?5TUgpz zA931lZy%p^a1hdE$Bst-KtDJ%{%pv&DRmBsUBNH=CV(kde*ocFxKigP?HIZgdib6C zb$8d5Ta?-D_{^U@BKdOwtV{bh@!kOdJ3HXi0hL4hqjl&&>}X3nDL-{I{7~Z$Wh2WW zZ)3a`3CyqN-OX?uL;KE5>?~b~o31_@O`D7G?@Ia<@`?4dJxBc`K0!+2SJ~7j*SjGF z`4&GS2NR;31Q-e=uDnxb%`flsFg?F&XJZan^`oMX?MTl2@}JMkRo^XTv;qAHBj)$M zPu$fG%Pb@ZAr;mi)gxUe^n7T;3 z*WUX_e35rOQ`?SB;;Z!iGGERbo&YZkPMqLdo8EGyZ~@4(IA!I6>N zGic9Xf1X2wy)iP>6GH>tF~aX*?)Ty#_Haav9U*Pu6gN z-A{gcYC4vd=F_2h{E5fo@u$8OvkOzH5PAu2ySsX`Aao}KYy!qynvAEy>fNC058=ep~l+Z@B;|OY;oPnny*n|-jCYSVT>*$V-?xA@5 zcYTEXC__#~7*dgD9jI28EnD(WCgy39(gQjv0t)=t=*9T_r#~NG|K=mSXoyOsGdp#6 zp}&ovZ+g?4_1wfFW4w%+3jj7M_DAzi}zW%OOa0*WC)RF>49i{>y z2cMKHe|?z(Sj#tsSUjj8m}&l#1V8) z9~-&dD{NtLIhJbykPWlSri~jq@iuxpn_9=IbjB2#3Y)y+00NB}(viWL{Ne~88xL}6 zY+$t^${?ya>8SxGKk}{e%snfuhiTKGbMGJzXKt!c?bOuKvSY*Yyw5$N;RSz7KVj0S z>mbP=1{rlciQ_Gw!xPzo0;G1}m%g^>+8f$ynS255QvXwq-iL)Y*fHd9J5KE+aireH7INM@ z5Wn=x|1!5r(b07!6>*r$yMa&TH(@8gvh4>tfpW4@JRL6`Zge(j?Bo}&oOOvq>*(&j zKJ+gEu}lBS>q|JmDbJijS*Mr0F8$m_MBG_k@)?e^^|FrnzGfG6()SDxXBn=3A>HhF zHTcy2Dn~X#Nz}H)iRlyf(W!kGoxpbkbgWE%eb9Eb<4|@E36Ql24&8`rHO#GQC8Hx^wM! z>er$Rft_c*cZ7E{0f3z~cHVS$wZw9DBfjvZuOJ8b9xbG!&W?!RFj(JM1#g?Nwy+cp z=#GWi$yi>RiQ@o`KytrF55^t0-WuQcJs*nieD}NKzWeTq>#qC%GWH)(yJcy8ANbra z=Xi6h9J;HkI%m4cO$G@t2rL9ftw9o&z-UHZwk(frjjbiwn#D-QSeBN=2oM?50t7U4 zH{DHF4qZ9jTXl2(at`;-@Au#Dd#jK=Yx%t2`OZ1}?C|U-?|AOThnP%*=JM6KI(PnB zT|9re7Ut$^VSW+6U={vww;;DWZVgHsZ{lkRI@$2Fb3z>|($hgfJ)B9dPb506v$Ui6 z0uJC;^q}-1mPYL#XpllmcHefcvnN)5#YckT_X8@Y)ZDQqn zx8P5^L?~f+A$H`$w^y%R#=fo9Ai7ptD4rYNaB>$N3EV)IM!kF@edW1c8gC=W-uvJC zo;r5?D6BnDJF5$|xiD9~#MxR|t6}mT8=S13?T&wvdJ2Is8_=nz*Ob}nkiq{Tjdxzf zym&DhJm%+OuFWsk3opM?OUS|G>({T5 z$9CQSj(h5df8@t7^7wnm*uvE-_2#QDQ;NI3fNhG*)%php;066cB6`v|3UGu6Z0~)^ zkp!KGTyDK+b>iCOgB*O6#SQJI@x5s2JiZm)IViB{Lg282EOc*;3=jD!4K!$w!4%-M zbIb;~`f+yufx7cu?&u`2T-|%u9niV0kV!{BcvP-!)L%yiR`83jUAu;T z--Ay&PMh1EfJW<%yYH>h$-VF^4Sw{<>e3?TdOi7-FW1$x=W2OzDL%FN+4<(D8{Cc* zZzlF#e`pwgZIp9jYBt;9j!sP0==fy${dWgi9Gst?-dkg%PemrnB)Y=*0@+{s{Yc%b&tuDR!)yX*e@?yF~>e5$T4EXTj! z!hdw@X}=??U#0%-Zg<<84kBb1pI$k(lR=%^^eC0)SsBfWA`aiVx@;3U^-#6QVB#!% zfdZvJhxD1&t0k4UprH;zH+iopMIlWe&zLg;;9$v&mMlJKL6#f)|02s zkfP}&`L`WPbz|G^=|9;wX9K}xNu#S^Jv_R$(uqa=L+<)EQup3@TOGaQcznp;_^scp zE9hl)j}tgH*LLd6h0As7{FOR=>1thr&sMPE9@bgBDIa#gs+;{R-SE(8c6o8|-$7?1 zbp2ChM!fr7*=BeUyu5I}+bt(lno_-SZSZ(BSF%d2az=@r_rS z-figvXUgz+tJAWZk9l?v;cFd>4K8Y8s+LBg<-?Uoh;_3Eu5e%ZuRj-$7qKkx2kx3 z3bzQhGu{q!JL$)1`_9Zv!-o{W;di6u)$VSDP44aZhqpR<{;c4QQ>sm#((;?AVHs;iD`suGBg7{EIKYR`b}_>Dj5e z{m$F##F66}$Xr8*;T6G0=$5tJT6AX;e71?naf)c|X*-COe+S(4&83gNN|#?9@C$sx zldJy(QGB=S+Yj*?GUUDW2iN4|YGv=mmvnSuyL!M$-mXUVo8NwZ$u^auXeXsM(Ds;e zp{VE^s`YN7L$XgF_%Xcw;}Dh(IB$n5ot0jB>0R?wy-5WcK08PAd2`p^wZGgNuaT4T zfG>gak;Ug9+~U>p(zYZoQa4zPZxngY*XljOgoW_nhIc0K2Pd{|4({JqvollJa0ll$ zvg?(TT{AcW9O+Mhwyu7=S@_537K<>iwn|N1sc(P<< zv?~klvT_3-#Lv*&zz^vq(*Bv5IyiHn^f>J^G5^#*^=f`t>v-o46-!lf1QJg3rj91z zIorl2Zj(W7lHMzhDn3=40I;>QQd9dz>+p#g!dB2XyV!Jn#! zSo*)*MR9C=fHtuGHKR2#hF(Mm`sq6L?<7} z?nCF&?!q>YjSX{881IHg2dE2I${Rw54g=GOi$N%Ffc#N@G{>=6;?k-|mR$ort zFhV_)CqGTAALb+p7h&x|mkjD}!lxOOly%@w`wpFau#O*rSkx5lk`a8-s|)kAukfwW znfhi+=)kM}-AVYd%na~eK8^O@MT1}e$&c2@KJeap=pFahK4k9Avlq}w`l{MQCy>}@xLy6o zYy6WA>++RL`1sqjIYzHTpI_*rjDvFn} z#XR~-zKw6D4x_9cDwRq9=D|B{s*{Ht(Dq3GiIa)&JzTkOIqDb>2M8@kKO=lVHjW#J z-}c^cz#xC~Q9l_kpb_Q|=mJ{giDXVh2#nOtzH09syf&R_QcvJ^A)#_>AG(AvPwCsr zY4N?U1H|BPLTWAgN{UiRa= zFhg_DXkuFGYbPA<`my`=iCLx#Y{h4H5pQ;zl_<-tS1>Q#8` z8w&n7n*Re&l7Dt^)1#~r_*f6rhRB?3bA?K@b5 zhmX}x|5(+?WcpeyuZsg^R39v#)B_)F=MASHY}A;xde1_wUVNdx{{8Q)q29IHSX!j6 z@Dgd$ztU?RXlDap`rM#BV5T1aQWeTe-8%PHSNDszJS!!{O#Bwli3lwZ^KNmHpU+MP zA)R?NT!DY$5UXV@@%Ny-fFp5|6NQ;aLgXW9CM_+~_r4?4J|pUo!ENv&&)L`FHTU*4 zhZk*UIH}3St;jL&GS-ByablGN ze=YIUSJbm*L>`nS@0nNn15>E*c4QGIrAo)~l6I*%x~M){j2$X_QlT0MLRt6HGqh2_tdZc+T;25 z(Y<#bsC)0aI~@ip4rjf(osQd|>r@U6y5il{eZ4q1Tt^(Z(U>8uUj5FH3drWW3Lae6!ty!|)mQ8H{_qd#U;M(qu2V0)To1nMp?cT- z@1RUO4`#D~hHdK_ssg1@=eu*GKe;Lbf0bjdv zwf_Ep^_ocnm&ud#wo?7UT7;-}Y_wz&r1+tJmhUg_aq*J*tB^DFFd#TN?Ak zwaG?r{U^E)7YVsS?gYbX##%on_%vi zARXIGw6?|2^=lVvaejfPn>4oUz`2Q2x=>5Yi&^15kJImWy4+?coh;%80n}l(qtKNk zX)u${4n}b+k7t{G;L*|1@T@_X$q5=llo7#20r<`Tg{9?eLzY1x_;e6nUZ8BhH>UF< ze`$2xrb(sS6q3TO!|K3fGd_0yL?J-2Gx9$*Jzn4Tt>0K*|Bc^9xHMRSj&6~rZV^_)XbyoVXOqe^c0ejC zGKDe{LvO2sqbmpEJon_Gj%@HW7!7NVoZBO5>~xI4EBU1w^G}Pgf9u|>-d-4%?nQXyL%S&A9P?}Gx zTjBt7d#^LBgQo15jy$C`d@GKca||rXZ8u+S1JQ+ve+lzOd^Y3Dqqf3O@-sZR#_`n& zHfZMa>6MsydpaTqF+YxPEurj!ATZ%a|I42DkaigO=LW4~>ltuYVIFt(q z+Z+U#nHqyH2Qy%1hk+}dJY7Mij@CZvMA@lqN13`oSy52J7o7<>;&MlRaY6-k8G@hp z?wyJ5Plp>kDV3>xe)5&418L&j!E$-aL8DA$piaRfal~oxCXZM$w2n=PtXO{=XgYVk z#i^!|sMBnI`V=<2ggp~Y!nZn8O}^!Ib&>kg&!Grg_!`!26gKa6dmK@EN7R2xJNC`f%9u1-RlnCi=lzn5NkvCdsOmrhH!^4IPU z(U9*Uzm=8wI`!(yHGlPTboP6(VL$d`KV0AYecw}G`;q(W=#l9f!_U}U-^M;%uB-Fc zXt-alrNzZ`2rCPogXAq9h0VHa2JA@Irc0a14DzhQtX|NuQ$uGqu!mvrLL5H$Kz(6@ z(4)y}!=+Rr`wrcxEoZ)MXI=zqGt_O{z$Op04i0!}6CDG;sb6#)4Yw1?XGO+SnmQ4n zw@)5&(#B@J{`%{@yphf_`5-I&b$Y3le4Z0NK*~bX9VeFnN9+uF4BKfqGFV^t4W9tk z;kvOlUmJ^VL%dqMYi$C+#NZ@i4jk$=Xp_9j3qVspDo2*>>LEs{ZhKb8C7krp17fwe z=9ibV0`ZkMPt_**j9^z~*$o>s!1uw zd!I!IT)l9qUdJCChPSfqC-oJy_`ve8bmm@@p&oVB50nbDQa^_VoBOUF^**QRt&op~ zM=zQ&^v^9SazJ9|Xay{tw z>El!N!4G~ozU}(r)q3*bFJm12gi0U2%vue=FDvWIv4h!4erchuT)&pi2k-S&dItt; z|B++$;ZJ@7f9n7ROG>DMe6BC9)U}Hj>cYv_>gMKdjT3ibe7r`7#*x_>d}ZVf7)B)E> zH3YW73VD5eWIQ@0-^~R{1JF|$mWH0izyn^qBPk;5wwYqZoYF)m*P}i={qx08m zZf-fBeerv<`j&1%+a2`OkCJDdU?YCiF~cJRHG6PhI(#30S9&{q{8pju+AZ4GvOjX1fkkC2JdGa0 zue0tw{d?-b!NXZyxwgDf|NNi-i%d|`wp;H_PHLR&2mv2m$lD>FG+8!>jOOZ>Zdrbc zsvXK2b_X3`XVZ-x^AoS=lhKhH!q$Jww|rCGcH%@`hxWGJ95}Qh;u*X-lJ_$%5@@?GC|3*vohT-@sl&=nGb|wPB(QH*$ z_s7@j99pwvPgral5#keoS7AxV1#}6()=>SzFSd^$&6`I$713LE2HI}WKWP3qw80k$ zyuh!oZW#`oTsZH}edp`s8>i~AM;@=IUwNZmd-H6aJbkXtUc6FQk+Jz@w?@XF(C=uu zpug?Nv&_QRr_2H1qAc58Zlh{Pu#*Da#mqr{w^Me9jJ`o9YTB=W4Vre4Wt`lcXuX*U zgvw&RF>416ILMA!6sX@}e}X$0Xd_7yesI9diPGljz)vr27*_*3N!tDcC+}xq89nQ# zbh3Rc{H)ySH>zWE)t-m0eyT{2tYrtWL3`ti28=##B5@kdojx_`uwV2e%)0 zMW6gkt@uGp(@9q&b06E{fo*N?c*bVj;u8&>)+Ab#Wq;{(7=Y&kF#cGih> zOLG_M+}X?7S@zW-e-F4c<=qB3P*XFK<2I8;}yX2O#_Ssk&cj?i0 zY3(um9q?tjY2yo9Y+B>Vwso?wdRiyK5=s9@UbYUNhH>z$A8CJ<9sR?8W(NAn<4*G2 zg`eC-sMX&%%}>`i6Bf6vUR+)RZBBR@tX=p<*wVRu3l7g#cH}fr2dG2X zD8QC_HhO1=kPyHqhFcd-{R`uC+JBbm|DNE3{NhttKI!xcfZc4&qwyNhsxOF>K@?#j zzm{h!UA`WH{|_G6SNmv-tV7ccCze6CE$o|F$ z8J(_|UprY(fAzWAfAo;E3^D=0t-C{G;Mnz<`$6RT#s=-BU2HpPAGqTkH3;4}RyP`o z(N{uxBlr;?*3Wvw8Ny`cgX!+kFy%GDd1&n-11Or47xLDgE%Vkh9vrLN?>hvl z*U%~3_yV*Yf#VBbda<5-ex(lYAFB_4^uzVhk9~wZdunO*YOO8J2j@=EUS3+sZj=A( zzxi5OI&zfsNA}jC1LSvL23V$R-^@fl zp*1lvT9Z@bH90$0Gqd|@in1q>^$7>M37?+cN86h?Gqbb}oQUA3+-A`M)A)Qd_@HCx z`5#JE7Tkg1?2VI#9o~xD=X>_?Z;+p#GKZ8!etYGw9p8xJjQ-JD!35@iAJU-RgOYRcTQ~lVY1N--7LWB#Xr)b42>}DYUz!ce1QILG2(D&3@Y-A-`L1+H%Bc zGC?|ui(Md-VdOIQ4;?4`vs0t>@ejSbe(Xp8a{cwc@e}o3-}48^i^cE9e6-f-itJk8T)*=l8gEx$=kJuuZHzbJvaZ z!KA+hU}zKHCUi$%u=p;HQB-gTNz1>Thhgy2k|6 zTl>~}R?Vf+#+U-EuJZZDO>`xWKnAGwLB$PGT(DsuPA2?AXHSvVbR4!7yVCp*0(d13 zny=pUrh|{Due{3uq6cw|?lty=_v%Y+qdO}l6T&*fSuvpwn(HQr`}C#IiYwu3V+K&7Py|pV>!=-hWM?!Gu zV!vH>_k^#~_vw%Pl?OlNpf4AX+=8f$^-TWYQeVJNJUD6F#cmELHvbdf1z6L+((&!I zXTc?D4SV}xcm}4$pRfU|ysVxue#4h^mS*}w`VqtepXw(k+1gJB7N|0?5^kFjAmC-& z`wic%{`TpiFY$*RRJ9G&R^(eT;!(c++}1hd&2#g8`g;a4^kSLjZF!>6_`9Cq%WNFq z5>f7W>?ahaCih~G4pcw56lUW~M{R9*+UgMG=fkb1v~}W=1O1U52aNNHn_lv}0qi67 ztH1s`zz&>)o)YrJF4`;jYC(_*^x^`D9JzyX)* zpUQhY8BaZAT=NlUwr5&Jj9{IFS9|8YdWkrJul30)59%nBNSm8C_gv`xTR--|r(u{Q zGED?4gjQKKfMO81%}L2o+(IBohRM90N?LI*;?aQQWqKYxQjt!6c1EGEs6=0sla&Rx z00)2yH%^(Ifo@H-u`VKq>X(1#vE+aB&{U0&Pu8W&SF++d4C5dIipkX%&W_VSIG~WN zl?~f;8#YdihAV1}+Z`-ZfoS+M(1KESz(8eZ1Kh#Z>FKGObq29B7!T~P*|G7OnHa;V znW*y@F4e#JrT=gJ{%3y|BR^GRBW^3QhX(veU7Mee zurK4dR_)urw6o8BKWo=LihH=3M3(U z!)qE-m_eR(8?8}jZ)crHzDX#Lq2SJ+KT{`9o~#{s=m?E3JGd`jzEplM>^k&bT6R1B zE%4F!N{7*(6%qg9)-A*{tBvwBtfEmRs^IkF99YL?oY83-NY2=H)tmL5pIgMapQrKD zpNT4_5zk92D>QU)kl|a)6c_TZ&W5!1YJ)|Z`aVj4`dEHC*T_4?bEm@Abp#WEwUJ!; zQdW$=OJk)CaI0eM{4ihHMgAaa6uC;rS)w{qex|^0MfdRRL>pz@Szi*$j5G}Dtz66Zf?*aD%GYKSC&^YJ6PG-+S#lbWcC}r@nf~JbR9XomPXs^ z>KgE)=O}-`@1>EC@L6|_de_iPE5mdWqwqUGT7M54Lajqh11Hajhh~VyT!YsM7EK>& zgM~AlCtyu{dT+f`oav0np<)*4UBen0a(Zt%kLXLv_c@)nfr~t%%XBz3sx4pfn7qtb zdTQx9GGm9F&Sz$g@!ovof74lpykY&?A#St};GOqr5Ll0nTG}Zae&Bc`ZzN)Ru_rNncuzZKzafY=WyTg>{Dh+Lt9a-yZe(u8wM9PEp6n32<2lsiV9y;GT9+}huO|6UZ zLN9cbyXcIKk45M91FsX$+Ca4Q2Wt6?^p@3T{L4>`PIh#tE6l6gXfXfqlqln0mku7n zrVM2(S<;(NCi8V0Eb5yZZ7a9ToHegh%DB?Py6vJL{PwF8T^j$YAC)EZbMV+CX=G4e zCq8KteV1)SaU|3k>VtvdzR=1z>15|>hq){Fw5e&-((oA=91d>wkbVWdaQW={df~Mf z(d(P(=y9-qXKl4kzW#EpF3hDP>uW#u;rf}s^HcTdzxG$^-g}RybAEAkr!HMtsB3e} zHNU)6tI*jxsMlTj;VwAp=-6QREBd1I)4teY-1Mab|K9UH`bK$evJi(E`zi}(%V2RH zmJ>%2EQ#9TJ!Q8w0tqBvPGq&s07n>zu7BiYv`jXGH?vZkdb{oMh8@%kOVMF=K5oG4 z`W_C*Y{R2&v!@;OTbzDk=Gx`?dgJsPp@pAqa&myYC5*xlS))uj$@|Cy`Nb&(E(g^d zcpV(>uOIln?;|4qz~+2yFV5B0%4+q3kHH>%i2)oX9dKZi_2C?n;5VQHx@T)I-Po;r<;-mKxtsmu=FfcLjZ^Tx&VwOU)q=Tr@i z4Fe;MNqA+|6_U{7LmzlgefxKQSKZX-0$-QTyjf>Xy#O2E0$5e67yCd9E&;JyS0}{b*e}eHuH8&kIkw zbJBHuoVkU?Y#qCR?|k{%LY+K&p{l)m}?eFyMM4`h4&1N#rvG`{{r4?a{ML*BpXn?F_G_x;~n zfAz2Zc>UO4`A_PHfB1*$Tfg;F^~taOntJG=2W#)%Sw#vzW3ev5FBi_8t+VIw5nr|3 z({=9LxvW&air+SWeI9=iIot3XoBB{r4Bd>*QQzt_*kF^d<)1$2YCFYAP2xcusb`%i zb^{-0WPF_V$iaI4*=NH#qeBinamAjfXVs_bQagI2xI1+C?R&qOtbAvMjssEXMOi}e z2=7p#*sQGxP&zlehTZ&f4bQCU%;4 z#H8hmx(`gC2m1+LGT85FnaFRVA}h)Zy30-}VN=iPp8*E`1G;R)$w|h(L7UEY#9;m9 zANr4>JG^`SdThABwTWEjNnXJ1ZOW050H%8-U`>zbW!`tMx#vim%tO7A+fMu-xN{`D z#p}{fz0~xxcBiH3WJyf)EDzG{t#Z0?TAk&UXp4^Dmjk`(y=C0u6a6b-OQ&i=RKZpb zyhDd=JB3K&P9tq4p`G<+*yPj|mhx*qynFG|<@&Qf`wBYyBC>Wh{*Toj8FJN}b`RRo z?)BO?Kzc~w{h)gf1GoN`?M?j!{R3f>hq~}Rkq2d!^yUEwpklJ?p4Z86gElEQb z>C^E|D~4d+-J*53od4jx`JhDJ&KEi`#j{~nw z;;ol32}~DQk8T~LAopaHmEOp6c3%w4ZBnTd;zmcu;%BPASF|tC-fi2Zsl22Paj{0| zL7J;quGT6#+|OG&@R!y%yr>*UN8WvgJnqKGcblkjn@qezImPJ zZoWQyB(6X34{+kleA|QwuD&O7^62P+yYB_Ojk*rMYfD_ot({q={dVT`#rn11{9X7=oz`DN!{arx_fYuH3D%iF zCC&hUh_;QN1aR{C?DQ;r$UA9FJQtu?&sJA33ylWQEA9KTUhzA$c|>V_w6?LR6Mxo; zoTz8VUK$MUuomzo_2t=xM^3nL1+>1P{b(b|!#Hq`ySu785lpztz)Vfe+K)R?larI2 zO=V%x^yF+Ng}QT}yYlo6(Z+Ji=RLHk zdG@m$6BE*W7s}Dm< zaG2d+FwFM(DG%;$DIcL>f@>Et*?!wbsXO>DZ%$56MgPn%(xwUiw`x`Y1YWt(J5dM8lb^49h>Wz~p>+;3R*27me)Iwu15QLmAjfUn0!@K30-JB00>nSi%P>d2u3b>CgL*C#*m(fY(kKR{W# zwY+q#uF^JGn4gC>_!IERop;}f9y>$x)%Ku0oo$@XFT640xTy(dfFg%7&@?s-pru=~1C!Ffn#TcE*Wz9zE&YzzNMJ!&L% zjEA2jG>-jIS#b={*%sPHemAfs_F2g3>ZsUS=-2O{K6$E}n?2BHkL!ns02pjGGZBi*r+7;nhaJEgtfseeUM( z;J+Lqzo%~ctJ+TDyJKkj#Q-_|jfAEA4*J>x8fj)*%g_E&rlbaj@R9W(+yqXr6|dU& zCS?tuMB+(a^1aN_gI*t}Z1gR#+RmjibFxp1IGBVc~_hJyV{)=7$r%u?n|M{^P&!&TG>L?VUcGX_O0- z#;4`I6A%aj2mDJ>{3C+z8<%vh=D{z!Mf%>wNQB8F`dSw$Lv+$%|-FkPARLXjo4){2Y2OC5;nU`U< zugxR%!2 zm7pBL+CckA!UVZ}V}rN|hO@DJr&GPU>cBuxedaSy5Uc9&p~>32cVAt(avdYBLTDY{ z>A19VVXmgoLwtXEFD1Cjh%j&4$^&I*1v#R`xXQ@JGrbIKUBp8=j)>td06z`|kVd-48xgvs2SJq9e7uvRVr`fOB)#>(beC^_k!QOr1Y> zp++Z%GyB$^3&e>EZf$ingCWkGbcX_0=;mARIGtJ1V;!8qV}qJBM4=%xQlI$vL$$PY z9hmGS+(6;=W?P({t<6s3T7iTaz>(XwgQn^qp1|qcu5b9Z@2c_1(iMn+S zgmspALU2!T(fCF9c4>TXW+3C~r=PCh`mNu_X~TKP(N~6?Xy62Z!^e)*U;OUxp#e8k z=gwT9F}Q-GGe*`hxmX82=^(Dtn&L0Bm%It{1&VgfW20H5AqK2H-f}>^O4nRlP`fcA z_zTVD2|2ob%g=ml^w??V4iY=^7DmNMA8;uZ4vrct*ZaFm>xF9~_D=CrU^@x0ST9AU|nj?5MOO+DcfSM$^e5Ha^^@Z4e`C z$L+O+>$SYL8dx-z6W~~0*nnr2SC`;xc~+y3JW$QdoP*vPNISEHA%#&YaP2)`jq8U?quR^A<8 zwR}6i)eFwxQeJHw+4ypmhm8)uL6)t@97rcxGm^jsPUFm=09|dmzDm56japop=e-9F z8tV1@3$N4{{`9jL;G1>*!o}KIUaz}xD5eh{PD9cah*TAazOC}x<#aNV&Zb9q1zoPd z+q7Hb8S57Z5MnwpL83bFW#yHJYzeF9K?*c**ie3L`XkX`OX|W;$JTg;nRj#)*EZnW z=+>P8&^_c4PY=uUXogLruM-M=;4A5(@y&`?vW=YOY_uc+;m&-w4#Fkv}-3>@%2a2$(S5kuf#@U?^;EGa0xRH#?9Ytvyl85}EjZqIbxHWoKwc*8U ziyNK_e!KN@Ck^6jlPKTt=4iKkTYjWLKEz#nFMn#ZoAYWOPGE6B-%jSC5u9@y10c(- zNB`gd^Unh@Re)^rCRGnkvK16>tX_6V@J7;FJP*xoI)Or7fX|!t$ELN$s5r>C-id1i z(OoM5FSK4=-pt@o<3lUfnK-0ANjh*Ce6?_G{VjV(G259@+nZ0>n+_wCR5epl;$jyb@*5Rs?MCY^Ho2Ot&~w&bzdW zKBP?LNBny?aPrqS*5a>pCpuUrBmfVt9&9T`n>_&U;3E!O zV75NmeODIX<3P-v8xHt!FOB3|;>thPHxnn6k=Xu5hbEWEB)=VSW85ZJ$OnujpDBMm z{W2)|5k7#wF*?@1$!=$&-=%b-g4Y3bql0QrlF$OU%5HUKE!+HGIDf9*eB+IJ@x_+`j>GFNMh%hiW|7{L!5?t^B|_C}xEiPJ+_4#M}4PnV~f>F3^TWgd9Z zZQ5cC3CpS8Rp&te>x(P+BB$%Lolr2)$i!rAhcCbwtn$XC3xzj@e>RpCi1Ovq@>1yI z0Ldpl^5OcXZ~3;mVTT}iKKarsb@{@DbPy>wLZz%K7gRQ2*s0+)hw#cMQHll#cy2fv zeXW#e@?GRDhw;qJ`=;lbkul839etU1#vz#3h8Uh0h6@w*lMf*flV|GXQ)lZGaV}h&s|!~aYJO?C<~a4uRG+IEl(TMq z_$iar1U-V@Q`akZ4y0=vo!~n>G*%ya_j~Ja{q#@QkN)V7*0+Dhch=W@?4xxbbv=CO z5byhu-CgLvTnqDab?MT@x_tR^UA}k)zka@!;R}7h6<0Z8PxNE-+ie`X`%2_dJ;qO; zPlI8{{kDD(&R-l2`qSI2KZk{!jI#L_Dd zyW(tYB7Uqpdbo9*Wy)*O^OWm=f)g>?lX9XEQl7NmA^bqr+HGamRnn&QUE>MqOFgU{ubcFRWTsAjO z)BP46tmoH25*X7qev{@RqPKUbb^P*fS-lwlR2eiyi_@;1JUqfnoW27V?>j{b8u?Q| zH-1y_tUsOxjrp!s0ke)nzl zt_L2dy|dHgF$7N6(aRedbX&&G*#25a|5|@#Sp3@F)}OL)=`DbE65lNS;yea_P9oJe zu|C0HbU`G8v<)MXTf)tIVtg{;?hfb3qNG1ru@2Y8GQ=T9o zJb+&#Qq=?CWEl7!y8l6JY+tP}t%k<&sfF90w4ydC-;8ovx`$VJSCGV2_Yl8whW=Kw zpQ2fvn!dF3fvnZ|%uwBa-@)n|a$(L!K0W0`lrMhiR6Y69Y8{#$tJ%GiHG_|`wYFHB ztBcrX;9T3RC2afJ%6dKd=jx$Ef;G+3%*d#CH* ze(X0s=oUEN*|qI;6W#AlbZwVd+l~%CjSPZwC-1{g>*yOXCaxXOch{B?eJ*GwPuR}U z?{t#66Bz8z()PP*-_@;a*c5l7TIF(edQtv_m_!f4`qy#bzP{yE3eD z3JyO0$#1SD`03T>Uax=oFMhr@Xczfev(0sQfjEAWKwl!Z6WQ2U-$s{iwOs_P6|B?k zzY>3{_5s>BkSI2%kD^9L+=XJ8Ho!z3KeE4m`fvVt{q3LroAsV|KUh7~-Ie`so_eFs zpFK}|@(S{_Q^UxwbB^>k-F3zVeCeMj)8>;}c3UWwgK+J1UI0W^&?>Wlrff#n+uzD* zUDPoFRvDJxiPLPAb`@532La?l)1cq24fiuZANjyX>hR&i;kP?(J6`wSbz2=fa;T0U zJy3VvexmNa^LX8T`yF*R;SWA=Uwz;`@2d}g=zaAyAOC2*^S=9P96og6)7;z@aOuJ$ z>P0!*@W1|ulVF_qa^cb$@CZD}jruD7e`JL^Mpm@@+mVAKhY!_l$B$&Ej-CPgKX`QuolxfE%_-mHV>)+d&S(38CHCCB z#UrO!)R)p0s2?(zWgN@qqEFR)p=MjWM!8U4+aZtx+8H^6`G$wC#NkgEpEt;bz!iW&>9{B2=ngRJFlsV#bnS<2wHzcbYR|h?Kf$R zz_qP(r=Es|;j9N`gI6Cj_$^+!>KmAk$E^Ws(&-C33BcXS)O&5EH2IT3+i60%C6RJV zB4r|QnJ>|#pZV&KXM!&JSko`vx_yyhZ{vAy!JM}y=z#(Kpzh$l`a8Zb1=yc1UGk4)Cm!ew08^^DQ=!RO*pdngZk?|4X0 zpR|>Bpa}*B!=Zt~CH=f@1m%I#Fv0aPZEpSP)Thy*S9#*v{q}z^Fv4jq4$n?9a6-0` z8gI0;Nt8QnvG>Y}Ax&0tZH+*}pW6uCT+ z$ITrl94DEX5(XUBOTqSP`lR%~;Z0wt+ky}3D9(~0>65oWt56%x z`rxPGnj`X_TP!@<7*m@5Iq5j?krjt2H5TqUSUk^9q;0PuTIdU=PH zYhm0YKq`bdo8~uQXSIq3p+!-eUl_UM6A)9o z1x2JmAXHBR8e+MML@FAb2eRb)E$ZgrnQBCzSUe>&RE=Gr$i zRnuJE`t&ou_dE5uKmDVc7;~%AQ5uGeIF+mQ+>6iGAOFecYISWn-x-`7pGoJ3!eZl9 zhe2VH-JOhZ6N6{tNI}j325~bWBCe>16DovyW~#3~^!|6E3^%G@BZVWYyOi$(am|WM zYOL|`J019X7#Tl}Km%%Mv_AE%--BSxP(Qy-X5-kpTZVLy;o%qpL!Vyqly3RK9q^1D zscxI~na_MCD>TN&#ChQqS2?z{J%`qVdn6Uq~3^}@w85}YO12I9@$LM4p? zY=HkLL%#8scmmM9a!OO5LtBCjmH*y;bi&`dc`I#8Co)@U(iH-f-+FXV&RLL|wFHkj zIOr+A{ z23+B`18Bp^A6z9}b3o*2X{ph7fX|LmXFQ{{DRWDO%~fUW$}d;vXV3~6k{;$^V;~A# z41znI@?m)12AUIMB4ZjQc%pTpv`!KoCmoP}=p8(8+E_ugfqJ)}wu4+lVVSL5Wi$hu z9S6t(OC5>~N^|vlSWY6ymNPiWZNQxae7F*|2Po$*&(&xC;B)oczx&zxH^1?j`uTtU zztj_tKUr5V&DWX}2R3%=`ocm!x3EUpeo|m+aw>GpR@gWi!lL4A2e=TYAT_!{8C5hm zVmMT`@KD=1f_r#34zdD6+VCn438_QtY=h&I2sQ@z^0v)*Z8}N37%s1Loj*@$Y=;sV zCT*itQ~1(%6Cu}G3NBN&vZ^_0ysp!c)iB7F@U^_vkI+wL%#r$0uXK3vt{HfApN74Y zZ|ZCv$yEn8htC=|>eh8Ujq9ONw2q-K@Jg_je|z69rf>K#O5zh1oB)FXX>8yxy%bHK8)WtK0)Fz^oY<|E3brfD$ixVl@6-=%V9td3LY4}P0- z)a8=~h`LI-j}v2lrpbHt1`Ri7kE44XkZ@(Uv{sgPbDCa0;l>7%Gvm>_PNv)3*$n?m zV_u{(ZGr*iOAM<(FOPztWi#Pex8MW#@8_V5Y1!OZY&OPzS$A-Q9=%+pM-8vdIHXPjLflMOXW5a&i zaw03_X{yy!J?oE;rpp(bw8dG>KC_nFi|eYE%LVeyuMSMcsTC!c9|T#4(1 zK+`YdANXkqzfb87F+N|vHdlTd*#Ssb>aVQ(8HrV7pA&v^R}?#+?6jI+SggyJuVwP# zsZ(tr|B1(+=6tFidF=7}(ii`%{^YY?sLy}y3-za8_+owe%MaHh4?k9qKk-z(MBLY2 zJDG3wUb%dw7SWpyd}zm|v2v3J95&9~fIMB*C2g9PZ;W#jd%3c@6n&izKyWxX)K`mh zbM^J#@QFHc``wuUum@je6?k`7HfyARsD@p+*)!l5hs0FVE0!VuDMuX2n|`}ToO18c zZQ8c;^<9*LWG-FTgsJl=}Oy>F2;ejPbh4=N|}FnlOa z>IdVcB%QX=&Y(kw4%gJay|sbA`q(31sYf4sq8@thd+Pn~dmr)ov&+@B>x*^q%JsSm zPi|nZ=N8xM(I=j(lW(5Ehq+m=zjmrly?&-ny?mN@7wgQav-PXL_^b8z{>%Ryz56Th z+Dq^}_6@FC!A37(>z6riAj3WQ8Uv#fH99$26SD_ua_@AF`ze6w*_w7c+3~&T)7h*N zn;4mlzqWs7e|S)*%o;g3uo4Mcn!dh7vS+X-#l3--*}^5 zhVNg0{bZdvb2{H9^)pu&F4%#2H4}$6*U`i9VNPsgvpM=bPNEyczjJ^=?r*2- zO(2zidA~9Oee>;P>7oAsugT*1+=hCrw=X-6Xg7KstR-v|zM8vu6&TNE zaDQfW61~_14xNCY4wVn}A?$eRjXltp^7AS3r1bY2uRD}$r?L9ni8>qW=wxc+zp!Iu zh}y|N`pw?+B314js=n^YUyc^Po%~Nec5>Zf>CWN%ZUejdOzlWV20o>Ac4Z+Kv&kV3 ztIKT*xl&P_X-7am@mroAUeZUDyxY4>2s1O|+Tn$M4{eAL98*XAdzr=#Abl{mf_Fl- zHbvidWW?2B<7qQYO~8{A<28f*xa;oQ>Yn@VsjvOoPXZ72O+5)x>|B$|)=wLoiF1Uv z6Wa4GZ4QtSY>P+WY1QZe&T~3P;2-0ApSXmpEySxK(H!PW9_E~t7C}J^Yw5@Yn;O2g z5W>ZY&wK2gLS9>kk#SAaxf2F>YZv$pQ@3VN5I%$Xg2$}lP<16WK3V6^v}xbK{hPGy zySs>)i?kNu`dD@tY5VlY^}*A&LJ#aacpxj`ZoB>VI&$>3eBao?YghR=xy^52yFGs& z^s|kUiFfEXz2EHm2d~T9f|3=R==v}nhb!HbW$WV43S_ld;1m6XcJAXRAuHh%h~Yas z&7D-IuGDY3fzD9|-NhvBOVd(R^J}1&onrWU`ctv}S?Sa!0h)Jwft1sDB9pr15B+Y4 z*xS?Kb1*ZVtHMAT`i06y7w0LDKRJn7pS8RbkL!WWjQo@@BjcfP@OX?~&JJafLD zeCnBc;e{8{?!5iZyXwU86IoHb369X3nb_`ku~(t-^4dB!X)E+ZJ3BoD-D78NDg-Q) z2hg^)#g%@d*GvwOd4+{&)?K*7kGv_&PUKe~SZ8_2_NcV*$cjbM=|-zdo%n0pRsEcA zbMNT~4(g@;;Y@oaZ834wrILr?UfOe$6Ap1tSd$kY-yW}X66dYMJUM)oGL(6*fw6zZzk!w2`H8^>yCWwF-q6SLwU zy(+)^=skYwL~T+g zhS^qifIvRnzFGI*ad(YDuXX%o2PExhOxuS<*56z_K^&RLm|dLb!czx%9FQt?6sz(^ z`q!;{@UR=4M(1%qJi4b&+;y-9h6(S-CtqBu!M?%z%EPbMGcT;w;i=&|abjQHar|gj z755)ge;l&xPeJTj4$@?zQnJ3nQb?}Vo;Q15ii!{_#GW?)D?L>aL`=%&Vp)H!&iG*2-B&5?xYKKVuOCVh1B zGdbv2WxHGPByWNjbJGTy-|nXET*}@yohN0Yn|F~Rch?Ldf!RS~C;7?+_UYIk>UQrp z(UE;N2GEl|Tot=apyA#Lms?H%a*z#MBmt!+Oh)~kIJ{9CTdOrb)?fR_fs63bk$(7U z9eshFrZ)DYq;G3v7@kJX$A&YZa%p}({VG()Y{An;4U%?pVmkeO*2QZ+Eki!85Z2aV z5CE_*6Tnue-okp-%jZ_=OV6FJXHU)5oAWp7^4df;;RvxpHRC@}YX)0X-)d>hsEPnOMI=nEA+)iLVb5 zPLICwSszeY*6!K|r+&Zx{=4h`d+)9jM-SK0qX+Bu6G!TpFgmcw8rMCz3kJuT6T)L~8JaM$v)sZ6y z>JIY1=dRo8J@0;?-f{n3_5Sz1tG@0NpQv}e^Zq(8J6-)l{joRt@BoY+wM_vYxbF^N zi#z2|Uq^k~L%GUuw{K5>BEGKuH2kDyG^q6_jq%dYMc&=r0L)iEBQoFlsUyM+@|MMM9O_8g2D&M1G&${IejMERlwi`v76g6^>!vrxGLNJ z9v1?r=X0fw4t#a(mJ>pPj~MahbK8$8MDZ{1Jsn%g1WHmEPd*CHJHjh2Dc&<;2#TR+vZ{RFWv#Wo3rwSWJCI)36%c5a)PbfGY`07iX$DxY?0_!WDZflX*)-P~178Nab3j%n|r zH%5m>>vul$`*r2|dg`Vu*tU+30es18WU6|n57fZaf$Ez$5WFGOsBQ3ozS@J{(_f%A zxi=g8Py2en+3p6m7M-f$!@j)Zwu804x=?-FYtYco5s^Q2)z5*kDc;HB$_u!VUC7Ic zis4lrDaasDC9cFw;~Xc@W^$--8OJolb0SpZV9L=Mk^eerq^DQ?@YrP1hn0N?T&@bw zWQqLrheA4Ew|joJBZZ>7!v4lh+bK;(Gv)w)IT=a+LAq-9^eyrZ+}i%q&&W6qbW$gC zA&!2z>E+$l&um{R-~hEh$`>aZBEqs;miPeq#tGN(&wC$|Yvi^Ar|CWA58eZA!X>SB z3jK+s+$X+v$N+0?=D~sQ+P(UYr_jm1#o#h8+hJPqXLJ<96dw)#`j*| zx{oaJ7rUe`foB?za80FcYij=tly>4#UoMdvtcLU7`WX(X!V!+`$qmXWGIOE#r#}6z zPouPsw2lv%%TptvlJ8)hv)vQ6M`3N)_8OqQCcVnSNAl1a6L1iucdXs%@}9^v@Wya%dTV{-X3ZnGzx#*JAxu?=4o%hM^mJXgfI|UVJ<{nK=bx^Lk@2Mcqd)$mT3PbFgYv^D(IE`LNF24DZ5pgA zD3Wz&91TXW?R#>t)P|9G)%aE)0*jJ&&_tU=rEA~JFiyfdYPb*a-LsA{<=k09!GKq) z-9ON1E^)Y6|XLO`@T;UJi`P zBRPVDo?2LNpzLP-gMaW3>e8jFp_|(^`E9bXQNQywQy=^22kS#0eotoCUcQ6_ilMPn zE!((v905f>3Nb8Y*$N32h$LDj*K~NdXX&Ej6CM*5(-#E?cKT;3S8k0UGV_h8>X4Jo9XQ@lPMGy?)zn-+{Vz%`Ko8 zYuWF1BUuW)2FQEqwB-ud7-47!kLYwofy1kI7>-Sk)!3xpA4WJyyR^L098f#PpvM%j z`2E`6p1~UI9|p&B<@W~rUA2J|lHD4ppB-cl!kBh*+Zm?_e#d$9a~fH-68zZ_#w}^K z;Duek!=hZ?bP@uFT0WN_z-hL1L7}_LgR6))F?4OvmAY`%ad*b7-`$r-UHRra9U(j6 z+(s>qAEYrKC%dE!U{Du0A;gYzzxk)L7kxrxosBd?fju1qQP{rHk<`%ej1dOI)WZg@ zaxDG5XoP4$k?z*PEp6#~@s+dn`=9+{{lYK*R(<))j}Sai2lgGq3BRNE9y(S%L;KTs z-rB`6gGjDcZ~~3IuMzRv*XbYxRy8M=yp@5O!+Ca-ix(qI?&h6(TXHo6AK+`Qs617s$H zQMb_O24OT<#X$z3fv?pQ$iagvm{ce75(K3x~4d2IR4=(l|9Z%y20O~=>{5m$h(+d+gLa(h2lXq`BEdp-5l=j)4)JYqpP zq~#zEq+3aeYX`&PsLEg9OQ)pKq0z#Ugh^v!iaBhT{7K%bv_Y4tI^ z@$D?kY)WYyh1juYJ=g>J3|h%^&6y#t9r4;MDw#$De|DV4f#BIy%x!j{vhS`fI%Q<1 zQ^Bdz?SNQxlz6rdz$o5rsB|0Cx9SIelC2k>UdzVL1IH{0GlRnC@0Q#wV~BkpfvrfgVTq8 z?6q^mRc5XP!FO?xv)SHZD%n%}_wB@79?&&Sf&z)hm~3@80ox?|UAs z|N0;NcXb&3c5P)(UAi!bZrHAo@ri8Twz{I8RsNjF=eFDM-BxdHtRWklPHvJHoE$}c z$}QC86`2tpfQGJ&x8b`7yY8U*P2|&+_(LN$E(gl((%nVKEqYzWV(ZG?OZISeGT#QU zFRv|w$F;h?FjsHBdA448_4Rt;`IqX6XP&7?9)7Am_r|yFGt>4*R?kzS`5#Lw4xOPyP60cG?TQpIAuamwIT!seJgs z0g=tME&LW5I>?-ZAn_%mFyt$*H*1wLc4;KV26h^li;L^^-~;#9M?Us7G_IC0!`rn8 zJS%haHHz&W?H_G$^e{PjOg<m7a;oX4Q#l)qOySh(rJ*Pxffex7{#2p>m3!e7v9X>q=W(Un7}nxFc~pR7Z- z-CmpXSL(`{*X#9HUZ{TT``EZ!vBQJZ+ts9$2C(Qyb%AzR=IrQ)tP}&noWiY)1yKK~ zBjFnwvC2Zeccw4HJt=v%R@9NYO0QhOjoyd4yPX}}a75=&ACZ=ZM35(ck`;Ll^p61l z2z~_m0hB;#($K&KH?FX(t5D>vNy~<8tSm_r3o^HFIEpbenjbTgKm6 z@2S^MU93~5F4v!Y@rior?6o?5aiPBOm8Y@4U#j2y-+!-u_wqAae;x25{XUD_<1JMfOWLv-1G@G6~B zx9|&TDN9c=o#K>XCgLH!XHJ5Mm%%XVWXDb75Z9Xes15ciPDQZ1?z5kdvMl`}aVog+ z+JQ@$(In`3aog~l5d+?-tF$i}xR&=)J9Qww1bDJ7ratuh*Va&c`KcZ|Tqh@IYGP)l zKKOwT)dTN%N9{kjH=maC8_4_j?+s7bmUlIgA5#vwz^lHYorCcagJ)@}z2Js6Svs8M zA^zK`X!}AK;zN;abKRZG)F1db_!K_JcR^m#mPBT5-ayCWzx1O^Ek~az{ywxx$2s{q zVFgi4y_HM-r8bB!afQ!#9e;TX-UB3cx8I9(=8XQByr@pjBqP@~V^?g8yC}i7mu(Zj zuNq&UI$1BTWJGzg;RfiInH;G*FV|Cs06S*L0nwXwWV zr%zw1tJf~!9}U&9+m6=*58YqK@46jZF;a`G*lPG?eqpIr*VpUH+;ZB4a9p`FjlP)m zO~*96uW#re4G$?_{iqy?OY7t0DWT0(9>=%PCm6*ybKt?1Y_`4a$MBu#)u&bl(uN~o zb>I3UVndwinKSNvvdEHa|dMXJWy{jD;5OD?k5j@W5=$1q7&Bq|& zNg$(IlCY2XJxPdPR| zR7VaXuj6BxRpCP!DVSCY0yIk+R!NFdAn)O;x##C=X0oU5z57VL>m7Gt&j+yy`p{doys%s=*z@OKc(s1zw?0pq9{8R^<25n4 zpSHm?<+_NZ?b>BuiwjpqNBjf;G7W+U2C!SmOm+-S-=X?Lp5(ekeS6Un;5?rq(_dEp zIMPpJ-H0EN(bysI7m-f{)30!njcq+Y+1B(hGz|a(vPoOEw}xfM-HjTww^vDz%wlg4@9G=0 zy@77gAHYX*rRFC33w4c57q~?yT)t4f-^aZ;6Zy2*Nu(V%*5x%~>vQzVRiNk7`mrMXSdu_8`Kfhd;QGGYC&l})sTiF2D z`Y-G7%R08s_WR{!cMn~xqchVrimyC^pRC**JaW9wzIncW{WpJ=HXitl-lLtW|3NwS z*}E&$^72ZpIe`k=>pOWwpQ9(@Cc)GC0MX##@jZ3q@WGlLAFNM(^b_?z{*V7tz2lzS zYi*vk=&93n=FFS5NW1jr&LSQ#Ists#*m4jK+{lN(?&RpmJ~EiW9{Z~tl;d95GFT+P z$O6b51SLK^1QfW@(3y9@XOEu?IY#Ro0LzI)TU+k(rjL#;anS<&CVx43!CiJY-PvTz&k?)Dzd8=ygEp-{%?X77!eO70 zexnorT`0GA?+p6!U>!etxK129n!*3O@3^y0(9Sw>^jIA{bTFSYwLjfS8SZYDeqQ}j z@MD{_nR4NFP*UAYl|%?S+FdYudXbSHcOq0~d*M^~oAT{5bAqzE&HOT8r>zbC&}6*J zpe^O8+Z_nC&qaQ)AJlpYlXc9XB&fBXp`<_%(6-1YKXe0$r(6-`b!py@iaCQC81dPtvNt)Y;l3#DH79mB5ldeZc|`WWUf$X7CLh_xALtu_ zUtBn(zEW!BJx|hc4i2{gZ$I^Bdi#OJW1F1d1Oh)V)a-QhG5BeVaK$6pP!;9GywdjN zy*gfa+n}!fy5yWbW#Wqi61iwD@{F93aeNfzFZ`oCXIC3T7ysew&N?cI4z|Kl<3jczxxuN9)zsucz(5 zvjwc!>i$9Mo>(9gUAa3jUgHOjSO4_U+5lMGPU0n(8tm96c`j{E?CG9f+nerIwShjw z&jYyCwQF_XZ3k-<{eZKPi2;LP*ECLc5f^Qz1MLU+3cVUmdiAqF<{r18)K?7*%-_$I;In{m~{8x*!7GNPTUCsFUq)w~tzzxXtOLIMcbPwDATwmLWI- zi%0xWWsy^P=00Oawnxw#&A)|j?StqhHa=(cjCB=%$f36G#;&}ke)L|vTW@)_;g1?o zhIoNb;X$)Z8&R~XPYsLyLMH%IcZ?x|NI7|yj`s10?;ErSk$Le>1ZZqL%Tg*y=v70X zquB+I_@B7}1=f`~%9Hht4n|%ivhX!IZMaK4fkFDlr$aVUEB>{y7vhnJ{l=XMGZ%V) z<|iKfG;#6?0DoN|Qg}y+Hbi%?62x%tEv$?924H(`Va=H~tfg-U&xRX?yfqXy!uH@e zWy=Q6G?c-coqEpLTwU6(`D?57ho5~h$qyf%!FWz*3r=!Ek&7TI6z-H6TA7|vlmQ9C zOJ0Z`kn*1YTmq-R)QLF25cu8}$2^2J%1?o9uW*{ycy%KULAU7{8=IoBGf`9HQ?++` zx>iw6PdxraJ@w?%X;}MBBUdOm_~CcDEJCBzdrndch(?^=H}K4gx$vQy2W72ONA;P6gU??Y{QtldvFSdrs~-3_txF_ zy`KUVD4A2{r6jmRqu2&6JlcPSCRYIf2|5H9E}Vl#>-CTR@&7=*Zq)eL7={Z)4&&Pi ze&o>M`kue=U76`PH@{HVuU$itK{u3TYe*)?RLC6LlLdIw-o91-ThEOjTl}Q|KRsH! zw;zA*nPhK0QYQG;5gDHtBV2*wYUv9zg3^_Z*8iCQ4=_>?)>2Qcl!ox zVA>J3o=*6oq0x?F(D~V_e4CO+h~E^qvp0izDlquPjw&5|8`3&u>rNE#9tG||mJQBG)#rnlx`4t*QzgY{{7VE%XKdayh z$X%SfaU6txYKK+g86lHJlQv#n?G^L%Q|JiHf0;`!M*uvfC;zpZ$jZ#l%I0oJ1VScxq6pobHs+IS` zZ)ckvD^8VR9cN*Z7p+%!M*#H;Ha<42!6^^eDWY?z?6z{c_SUe}t)oX$9ykcD2@4-W zA3Nk+F{(pv8a#38*WhU|nGYG+LBV^g>&(eiowVcA(>28@9tl{F9WYTapxXLcy8 zmyT@lq<|8yIc)F;WteRo7GQOiv0J>kC3hQiddnFKpAcE+Jsr;-bQ|3G(Rbh#N4h~H z-m#;0=u?Jv)J}1?iELYHw+59H5nQov{!UJtrg5>kK|bifN1uK)gSccHn>#%tx!v3mH=9<-nhhnduFr`CGvt!X~9m55$gEq zAOG>1I(Vpd7O&UbrSo;=;w5;^!5tulf7IOqA-}f9annsV;ECuof$ko_t^PDbwCRfE zftq9mZUPN%l9v^Xpe@Z&45zE*pn%ZWRVBlLUf>iiPE(ZzcDtIyZ}`Y(Q=o_^-pdh_Dt zT3%bpj!^j~DU}=_@f)zy@YYlvI<&8j>^l~JYv1&~8X6t0L3D*XWQ~ru!Tmn`2v=&j zWhoq#&b%G?!!72NDSeUk6*~dYUz_Na9XqwDoB9X1mB;8L4$MDc_;lEFJD3*WfxGUy zr#}Aiuci3vyCzHbHEO*23s|ItzA~XWo7>@9CvhO z@Z7y`JRm$=I`oqlj6*<|Oi7Q9lc(XI27fEVEfb}q^5dYHJAb%x|J@HhP|rO4Wc3XX z-~(*HrvuT+rcoZur)@u|#Smt6B=V3V!Ko!Exx%NoT;&sS z2=4L@+!Ko*dPQvdg1ZbVL+9*1#ak)V-nBT2HyX&u8|Sp}yc!#e592}|S(|H=9X=TvA5{iE-0ho+wg zbO(d9?V-8;j(W><;>GK(lY)^keNug98nAW_ukaKsIOIuR? zr#>PdeUgsr82BKKf6yG9darIIU(3=Lbz5=Um=s8RcA5OQjSVj}`pOfT7{Xnnf6KFl zm|uHVD;cf(oR{75yL_T96kqav{72w0EOi&>z-IiEVW0`m1kXEBZ4T#E#?JjvbBP;zV#KyX*71>zkihbS0W>=T1rLdBEb?&!IVCPkm{d zTH6561lRm!f?+rey3N;6ba2(V1Ye2Ujl<=wcbufRVDFXs z+>j)a>^ssSA)Q3s@`($*2)rf7(obdLeRC%qJj4$~zQ&NV!_dP|d8{liAQS84N!`G$ z7y>(HgRI%+(<*hC7P-?txthueE|%})se$3~_-+q><D;4?V*@3Gwo6g%Py z#m$wq>L<${=>5*S?y5uhwQJ}M?VUWA4t9Pq!#yxa%O<$>q2aI{U0%$}OUi3MiPys0 z?k><%`Ho+z^tiHhw~pLCQ?o}$vje~u{))cgQ%_!~C!SlZz1Xz7?>toRxc^Sl(iCY{mPh>0L+VKWPIhT}ti4k|8E$(tlMTRux-tW> z#Ip}_ZQX&>-Sj)TOVO5-z>yy((rn<%xO(4(RGavA>bGs)tu8MDCwST2z~5UUzMtb< z09VErpKGL{ExGPaWGl6>bUmMHa$%N}gsxt_hP{|a2A697`o%i=%Bywi`A1O*m+FHL z-Cv`ly>*Q^3)g7l*&d-TwwGr2P6y|T(A@Q_wMyGX-@~2cZ1eOC?8zCLWrLDS7Q`O~Q!w{lxNIj1{poehZ;&a(Y;o>dBG@<%Q z?iebMXUG)tI&d5KkUtwu-b0kH$6^UZ7hRh`=<}qXaD^l)HBaMS9{R)S@=Mk zXB+z?yzVXgDMZ6^lM821=<$e7jb%&qj)27;| zxr1HE!m2I!a%(-j=)xrj^6YbsUrl=DUVR(im`n+ZTx$39$0DcrpY|tt?`}J`K^Eq( zgD-d!pT>Rw`vw-d&RvI(u^Vfv>!FXd6ejy3^?jAU^rup%?7kw7sI%=j>yow$wzBnE z1q%pilVAM`^{xt7J<#M>pW0Z`8(fi>x=39Stcr4RMQv;+IVsU}nPB8EbRZsiryqpF zXWMYfkhvru@@|994u)zIrBk!<>JQ;j7i=THyY>%(djcgpsl_WerhMV(>b&fdMBRit zuuE^^W|FIV+6RD$?vn1r?dx%Z7h%{G+dIPODO|oc?@R!tq#pYoWPIW~c~^3Zzx2&< zg%rtK{b9RGf53?rZF~y+i+6s$kIhtP!Y7%e0BpfOGLc<#Ez|OiXX(~Ar*-Jk0_%Cx z0dL_Xq=Wa+gJ{B~|7hm=Svi0|^@paWa{`F|pEWehJcP$1p8}PKI=HkiUYhFvIv!H98T8srlifba$H~4Sk=IWIMb8YbCs$TToe=V2; zYH|-p2Cv=VTk$D7S&=u~lKOQgp5~pjGm&MH+7q;P@W7LGS8sF80vqU_MGEjm`qr@R z`ZpPLCS3aDa}YesI|7%?*RDKBv&@Am(iWzz#D zi_Av0h|`IiG@`|;gXES6T$wa)@|4~3)KBS7R!KZB(!m3`BfFHUu8Feb@ZPKfslnpO z(@aQuOa2nWXM&S=J5!bw_KEQ1-6D0aO&(G&NoaYUDB2P6X_AaHa?@;~X`?n{Z(5zq z8{C_|Zd~a7&wk>ePg7u<0MKE!+$RI>Wy(A^_`J`mL7xHKYX)eMRi5h7Wf9S@c-QBWAX*ARCA%9+(~vy;hB`YvIC*<$tb$`2pu0EwYh+@7 zz4M`u)PY0y5~&#q1xzNg-0muY9D-zd#t{;6u5hE+u3oxSufOtI{nEevw~0FH_k@Q> zabDXD!rO1Zt^V3y`>Q0S5j=OjmY0{!hWyFfnKBV5v$ueDM-lDnArE%ppdRfYOjwk* z|L`#L?h$D;P78bcD-Utm4Wmln#o6e+G+4*-YH!fmxwidp?d9GzBY3a>lRi}i7pf{tGsDB7^#2<+Or zGjeGukmty-9hQT>356d z>=w}*?Rin>b7=twbfp$)BrWi~Oyf$vb(K$)M{w8_PdL=8PAGM0olY7BaEMXXad4YW zJHQ;^KYZwT{guD+AJ_eNK2VQ5@o?mBSDwFVrx;iR*&0)E)fAWwUFtwxeD31I@}j5s zi?Jt-4R7D8gFIR%XglTKIQ)5MgGQ%7C2!1*obljyiofusFV+9{FaC9m+b2H$@tQe& zpoYfw(ZDX8aT}aAOgEszP)*OyWIIwj`2t_3>~;|!n)I{)(xDJlILX$l#JJKnz@JPJY5Nfzf9^gZR*^C{!$%a%p z_(u84a$uo+^0k4kerjqr<#~MadEv@n^bhd1NL;h+|=Q402}U z_om(idENjX<=2(yqazdb2Y>v>83@|9|6t@Y-!cd9nSB}*icx7ow&FO5;FLDA;Qfh_`G#3@*AW_r2JG68&wsvVn~>Ker~%7DHr`e9)iz1cU;c zA&q|0x^-+LA3FHxesw^b1?a?+<`7sfaV_t};fL-DXTG^}+T%|A{~PAk=T)kEy2dW6|PPlLxg zH!O70C>PY7G;Kv9@h~r09Y-1`AoUL5#1FbP_nsOV@mrabluZMa`kZ|Ije6{{Kdn)G z3-f#Mz3;7m{15-0eqvp^ypnR=)yC@E>7H%UF|dI<6J4BLseS>!faQt|J5&T*ygE?n zK>xL?SL*z^GxhrGuV)MX$DVjRE1Yo$e3)}=w8lq9GKf9mpjTEc zqgMx=$lzyy2BV*)nGH_|Yt*T^a_Wn@&G^7j!&@d$WS~qrhJ&Gp4SB0657@X0KIBMn z(K2cR@n&gx0e-V1NXj*Nwo~``;Y0QPKk!4?$aQq^jat5Vxz^_9YLYsQ4UQQT z+IospFXSy>80MWUNts9tWl9sv_mzOpr+be-7o(t9rZX z?ELGT0gF?6k`)z{=>#sY18fe2VH;Ya>Ko~=nf(Xyeb!fBd$pc_?)kcS?OL6`a3!mV z&YrtiUwPySe8AtRKYZ-*T0);>LQhYBxpLnXR0G&)`M?1Xg3T9ebl#2cWSEb%mXdNC$=5@)WzxwgA<-c&`tN; zeRqB4bDs?_9Y1!2G}s5qQMc!lH(XokPHa_%wGF_>PuTPQMh^rh6TU2q%;XWGkdpdQ zPkouxNxtzE=C>bjd%nfj9HwtC^^0yocdam?=urpj^=DntJ2p0)Rih62J2}K%HPlb0 zYaN7Jfzg$zZQFM{<+XQh09Rc&F)@*WUnlQ4@miWV_&?;CGDd24YNGbd%+~(Bd+X5N z1Nfb4WN)$PY`s}o00)QMxq>HvH{Juw|voLp-gRz0`6v{-MVo3C8D$a%S5d-+s! z{4n~;??(Cwne~lzbS6G5IFm-&UpqYYkD3po%@vQ02BIdog=e%`!Lj_Ktjl9To%9ov z@&$PSi{Vi(@{9~*yd3J+#zT2$8t#=t%g9#qp-+>WZk$$3<6jSXA^KF`lV^Rez{{a6 zwk@YiAdb_a$JI_!{YrV*Ng7rq9ihl-R*%4UBg2!F@4z|j+vu896Ph}K*=O6C&chl? zC$ji+CAF)XE?vG<=gyz6rG>>>pp_?v9>Ee$2*ey|l@@bt}`Yrku4)Rck+Nr_e{u*^Qxq?O-33T(3#4Ucq_++kj@;04{e3_0+!cRQN1H$8TY#A_0 z%kWDlPWRe$VTwsM;my3q*Ra2Y0O_l4?WSpdvplEWG_2vyeDp&yNbEc686Pz|$?tK~ zUf#jCTD(+my!1+)!%wnJeE7uCy8oT;s3Y)z-_h3YuB>g>{Nh|)I(xP@siPAn+$F$; z9F`;BxdW4Y>fluRi>Rl(tl!h!X~8Yr{dC{#)O2j(D$jC?S;lq_8%w*W&qc7(PvCepglfo5BNu)TmGYuKc4;zzlGfozV$(r z87EXUp24=kC)?nX9r1);^HdpqZ{?P&pljc&R=izMIR}{14Cn(RB0b+E7wI2ZP!gF=MLe^c73gE z(agOAEX@`)U&8I8^+Ci}2BoNrddou{?z7~yKBjAjGHW}&4e;59i~lJ7qJQu)@{GSl zUfI^4UoS9f&oUv5v^{pJke3r~JhIamc}O!?ScAOQws2ClJAP~}*Yf;4ZJ#-Gy@UH# z>gv^t_1dei*Bhr_t5c^qPriyj|6;xN%1ib7YcI!7eVO~S_~xfho~qYQoh0le_b=AD zGq2Un&BZ#tzqda0?stQ){$a`eDStHfWw3XkruWX&^Upq4FTZxK zcA<^-^9F4m`+ZmOQLdo_wITY+(q1|VyG46=>%BHm4Ky*_SBH)qsQXSFssHBh|4e=O zeGk>OOV{esg>%s68uh1k&@xNC$tx@6fkoY}&tf_9mQmVdDeG#5tP>}^6W$XChWQT% zLmWr#@w4s?52h4Q;>LdtgST{ML8RyrGHw%t%Zq~kYVq^Mg zfWsZa#8TQ1IgQlH=7kmPzz>YzrmMfDr$_n?!?n;>yGusFCvj{WBt7_3K1FQvKctg* z7Wd=`%z#4}>C~R+<u5k?3sek3VXgHO_aKpwTl|!%Rn9^EO}c(AC$r zX|oeI;nGGLi>55*z?O*LEJ&00Qh%w8Oh6Am1rizI*?uD1(}E@by8TGjKQa=2YWyc= zEUW1{ecR@%$0rQjl`#)xA^z4tPaUNF1(`BYtlLIN-vn3v1bq?9F>mXqczUW^0E;yC zbxqcD&%InUDzeuTpBTbZg`qvPLD0ia zLj(*&|OvCO!^BokRq z!pM=h!C4|3EL_?J>%t@X=5uF+im!a0#An_I|9MATql<6z3{G3#PD&Cs;qXuoNA7v< z_5($K`C4A#Sw!%*y|Z5A=LBNow05Q^_u${MntZf+Q_h7w=brk@kCD{}EYN(rjgBr5 zU4at_!z`@TMS_Y{5nvkcd4Urx-nxeJRz6$}?`~0`VW$A9KvlnrZ~}xyc+$9YHj{+L zVNnPMhh{MdIA&LN>-Cqf*8k)0{x-^|>OJqiqYfUvtzLTRB^V4PGl=jxg9QX+W>@!c za9P35fpQQQ8a$oH)^^Ijc36#6Hxed%o#*Z$Vr3YPE}L|L+ z#|B2Mb+%z7L;{B$8pd&U?$*{Ojn@?#M>JMYX4#sIa-3Oc84}bLK8`JQCjiKJnVYcN zTld{QSwHnR{$lN&B>%NbwYoGMMjv7L)64#JMkUIEqprh5jIR-m7*6Z1)pQ&H?*S}Oh_w)Y?F?wq6?0y=3 z`^%M;!$W=bb>Hyy^-urVKdHrqwYqTeY%R>qgD-N7<0oFgsc?3$mcupWG;{|a9@&B; z2Ykr;rl5@f7NOpLf9r@N`RBv)3_g<&r{7a_cI7Ba|KR@Fbf{YH>Z;$ZQ<<&T=;&ZQ z`@~cAPyX>gsn_2;T?2!CofY?eH9(_FWr)^-Z=D6~KtAOHdM1I`<70^QL`g1X<)>TB zg9pMkaNM1>=6hE@xrM&lON~zqXEyTf$4}G)54^VaMAJhHL@;T3Xr2meuHlG$wL%_}{R0Nb_JfPjXs2kTaEYPnSOOzBfA4tt=fa zkFFt8$y;|zQVG5LeGk;%`1@T{|&h6j9?yfd)doPrEo z#>n8Db{v){P-q%eZ#guyxq>F>f7hui=G2sN$Cn zo1ukHYjokwfex(Akn50{&W5gs@cS$r(QoQ(;4rVq43Bntr9K^8do;cA79Bh+r*-~z z`9!1E@(E2jQg8APE!0KQ-TIPw9B}eAZW>QChJjRZi(XJ?HT%Ld7Y7ErbZLCvaH3vS z$NBA8Jj`^=-F4snb^i5>^*{Wh{|P1Ghs2E&z9Wq-ojiZ<;13b z^lpQv)2U@S0401ryJfzeUxS0_&V0k&Z#7S*)7EVr$GM-JoUX%%kJj?a8ajNS{@FkM zx%%0E`VZ^i(fgpsME&I7{)zgjpZqIz=IUCVdgIMHaNt1g-@mVx(X&^tUQgZaEOLea z`o>Nz&aKqSimO((vP+Af`*G(KzZHox9DUL{R?H;?chCv8xW}+%j;cQc_=&LK)z6)C{LMi5+{&+c=TQ5jx?tA z44q*}-^p(z49~R5G92nz63f@--&Paj?Bnmq+rF1Zp3NupX#8X&IIBO%|H7s7*t*pw zx4V?VRnoujJrCBO{lUMlJ^jG4vsUMye6r3x_k11Ocd!ml><1bm!ta)=98uxqZ-<#& z8Tml2;DA(FeCV57w;pUxwzccup^gO7wVPs zXX+~U`6i9f$?2&&a(I8uUA&ZubNgqfvc={f{?2D=Dqht`)gPZvT=m%L;X!3OZHRVQIq)akZ{2tC`5zuFVdDOuTyuyF&W?G$ zb(poZ%#Kp$z87uPavbPs^wEC%`HGR86H^W@4MuM*FR#|bgnDTt4SfgnTrInW-=go8Nwe^az&UX0 z;8#}5#7+t$`ZtC~{?`6&fZzV$H3RqPh8_4nTgQ^lI?0cAq+k>f|4p4f zce&nm&%Nm4W~JesU` zd5yYQpXe`jg!y(($};~RS979I^t0@Mado8jA{|lSr_Zgpey}1<+EADcj9Yw zKl((TweQPk2jo4(xzFM>wmLeJ{J2^lJ1m=y>aI4ip5PLgWf4DVWFBdkp~F4Y(|yQ; z{*UcRL?s#BNmfyh6gg5FiFxa6n zILa)2X&}BDo8;c3@f^=)A!0egU*K81Bp&r>X?@@LkH6sJmj2MblHR)e-W_Z_k_V^h z-lAtKr;EGr9q+nvlO9AhFs1CznIhdLm~Sf_hNoTwAKN7s)Nr|p>t;sh|>&VzQ9kW1?9z?gn8 zH_f8#idrU8^_VJfA^Cw@J674-El6H!~2p7kf2&_78&Ygo_@bgC9 zxo;@n*uEj0@PKrVUnLc^*ByL!Wl+tj&5?fXXb6?Ba+rtd$WNa2Xn1bds08Tl)pEMw zp*|vay|=EOxmdmE5hq_j z4QdLmm9st2tAm^HSa7Z_dK+H`x8O$j2o^ok*rS)Wm3V5CXI$8^0w7y8Jhfi8zk9Mi z_NhCof9z^qK7TfT`Op3QTu zX!&z>VZP3vIbE9@>zPPq*+clSL*ss$4Ic^u2%Y_Fw)1WvXifyw7W;kUO=NF;Vj_Kc zzwnz+)W7m%KD=aICj?>aPF|L`CFy&41-`^YX|I2T&!J8hz8wZlWg5?I~kMmyOCIi#73 zeKw(Yd}IYl9+Rfsx_MAAlnh?-Ncr(E@Z|y>WDtRS?&%i7E=BYnOF}YU{xw1cgfcaCBy0GC#cqC54N#l{Gr@FRRec+@T!-H?~ zG2FweeF}z4W3O(_tTA2hh& zGv6bNDIb3GS$}CLi@P>!g*LVK+q6;4!^M!RD~oBL?ZGe9=92M*ii7wk0gF7)7486I+%9gsZ~2282bqOu%ew2s3WqpF&RjGg z9im^*`e}2(Q{Yd1X8Xjk9P3IZH8|vs!sr2X`zG??E`sJKm|mTM-jNSp?zADtX!es9%+Zj1poG z_Mf=!l%U^l-*%VxZuaih^z2l9_h0<3nw%Z4tNLqx+6Lb~lP-u;H{EeXSyS&IO-|_1 zh+2Qj+S~$9!{aqGd#rx*H-E4G#?Sm5eo58ZCcHph?2p>HISe1-S4|wMI&gRGf6vEj ze?U*RL-TrAfgy8{4u96GSCM#rY=+BMxL^U^e$ zy?Iy)*DyL)(i;*c3069BB!*FfHLnS77hg22<=IE;4d%F)l{pbastK$PQ~(2IcLyHZ zED`EKy<{H`4UgX`H@$|-!BRWKcU1;3kt;=RxNSf2C9;h&2PTEXItNx@4|x$TWz+I& zccAu857!6Z|Bf0*`|R1>$hUGFiLqlV8KjlHr}J$m>JMIbZ}io~^nrT!$NnSU0iz5O z2BZXFx9*wQrxB%af30=_BuvyC`pvq0`9l4_{=5ICmX=-p;))x;$=Zjqb~gQBeb;w< zSH0(b57FRWtjkv}lQpIa0}xUu0B`$+@ZhqQ^0qh9!gn3|c_eC!_~(!Rv%t;(VEC3Z z-{uN!X}Fl^D)!TV7pHr-q%> zi_5DxjK1|dckoPP1xJLmPA*v2XwzkzV*hd4(l{qwuI<7+}+778Hq0*9PfzZd5kDOJ*5pd-w}qs-OJHzgj=> zpZ;V$_rxRh{EIKqAd{DH_RtAC;KxZ7IuOpjb#=A_|H@V-#suER+o}65yp&m}KuWv@ zOQ%2%!@DrVz@qTM3IUCeGTFszlsPpAW*r&v{h81FP8~ULs6PDB572PmizB>JLpV2S zphFwmnaX_>D~UB`%8t)n7{rwr?M|!;eDETmw7Q^|9JJ1?HDTpvU4_F&NuM3%Tut6} zbd^)%=36^B(8{3=ZOh7RQhriSN22Lj(?|>J2nUcyR%}U38-c(Y!y^d;J6C?~y>K_! z6HZ<}HmT)VSfxGJ7<2F%eNVi0YIrOcj^&yG2q@64uN~{^du84;u#!h+?&ZFf%Zt=0 zs};@H`lBE1Flw|D4&&Omu`$ye(1s`KZuP*kJY|Di91Kp3*VyPp9XWcm?!5P1^$Y*% zU)0&NXE2h5Os)q8XN!|FObw=-*$Rwq``nNvPJt>$IUqKi@bka-khleU@oEE79Rw~! zm+uUBQbJ}v!~cG>E)6tb6t~)o%q|p1(OV6#;u&R5NsR|BRy<^7Ph#0Y?LdfeNkf>9 zj)M;#e#_qNvP~mxx`SpO-D{U0Oz(h=ji%rO9(G2vuL7r0lV=?Sc3|oza5#H5d@bH07nWh2go9H&^K5z6%wQYc*-kw{@_VOIMIOTFbI`)+ z%00Jwc3V;hAe?nPHa=F9Q?psM=>W}d{r2zF%db3Jvj0ZxmoKMuhx~*XKMx-ofw@YtwM-z2>;}J>88Rdm$T*d zWN%r}GI~U2FH+Bd5Y6HApUtOxRS1-p_Vq9u+ zobo3o@L%SGuNm$S?mJLF`olj`H9Axqb64u*D=*c`>T+o1K&(6$eh$NiY1_RGKyWFr zP3?8yHh~JEMYSTTsGJGR#5)3&iOv~0KeKnXTwUuYEZ%(MY`ylzse1G5d3*{Qb?8GsCFG7H`n1M##r4SeaP2#^ zFRO0VzwT0`AMEN?b)c)?-Nnd;qdFl_35Ip%YDa`sv_q)zH#!Hp>5Hka)QSCrerM4R z1y^UbPMVl@(u<*BiMSj1)_(d(zLnqArTS)eR31EdxIX%!kJg#fXDHuJih%$TI|grN z&={Q>e*@mh035nS{Iwb1qK*g8D0S=6!iC?omPaZgHk>wkdPrN+g`T`IIeB<5eNzLN zK-r1z)psS_`!?ZH-R&T2U+On9HjP{&<#Jn_YG};7SkpnY<_5)eGE?!3)Fasu5^B0=-B2Mr){d5WlF8>rf5D z`+lxt1V3qvw!qlXNR9c)2Y6s=l#}b^h&z*v)i8MNvr`1WPM#YOp~#Fon>dNO7aTfK zBkd;SJ=>_eYZeVccj8!CT&mUOjapq=&8oXwb-_1VTVE%RpQ~EIS96fTEmTQE*y6%M zEnK};*DhSF>leLWtSjd)*H!e*{MGBVczv;!=9WlDS>&}zneo}h4Q-tDm5th5ftR88 z@bEbB57o%fXicK0tl#)BbsHLn)Qx}l?5fMThp1U8LtR`|qkH8JYI)&*>S{-6(_Nn4 zgA;fp`pX2KvJr8lBT0ax*>}TL7~viM#isIWd9*ZHaS}ZvU%^XZICW^#qxysLHd5N4 zLYLY`aI#2t9x{}xF;&47(Rg1UgP*)=hrTdDw!mTD8EoL{fVaC&WFjK4+dhf!(W&2! zw3C$)O8{^1QR+szv}>>zZlydnF_zt@+~VDVFJ(cT`iZU;cQjj^3ti`z(An-Z1YhX; zI+=TEc`+TWe$HiiVF{kc9s-B8bBCP0(=&B+|G_$T=m@^ykvef0|M0fk>h3%5tUK>G zk%`#HjvT4Id;Q$saP6O-uEPfo*6h?|O;1eL?96!QeiVLalbxKv?IHke7g0iDx)7=e= zpiy$?kdM-VMCmb(w=GrOzZZYHmDNOSX+SG?*M@RKB-PG||;ErYa9 z(Ai^$mAb1_W{xgNiReUKUpdjoj@7(MhU!)MLq0cF|IbeQRpkBpwd=H% z=W5PLC6w>CW5`09V$x zYGG+5a^g3cF(SYUozf9%Wvwd}Ysd&r6P7Da;v}KAr{0Fx*pnpbMD3*Y&r`g~6H!Fw zlSnN-`A0{^W=T`Rm0j^}+FZGb%yw>bx>u=z<_0=%`GffcMGuREw zwmxLr2cGm*d$!SE(KOMz30K( zvD=5>hi&xNO3g2>)ZF}XJ^JMH_53U6u?v7FIn@8id~Y8-WS#N9?2AzEt-@am@a-jd z{p^`DbqXKtr59hW7oLBip84vt_0_LFQ%^thRn8~tnJ1sBuRim1ef3#R!k>BinR@D} zr?@^{Pk;5Pdh*F9cz&{;=6vcI?;q#-6xYYdrrlmfCfAU&wy34GN%WhQZ#B@(48{x%PH@sDf;SxGmq(h-XcY0e zedpT=qJ(pf4fUW;H{l8V_0`?F^Tbr$b!;F0!y*W!Eh_K8cTVnew;(_LH8*z^-vPQo zbHjTF_tfy%Aoj=4Fj%G&-fmC_cpN+AP9a0+er3dd;2V4V6y^|eFi|H@FVvS`z8)R1 z28qK0;I@je5^jY=VkJOK?+E))h z@>HF_x>kMoXlq;Ivj=&^ufe~GkBaWKe;|V)!UVtB?`L-bSt3t`pVwPQj~%X$e&E6S zAO73Fi|;&>9gt4H@p|a91pSuLg&+g}6Q2;;*p^V2y0eWt=Y^3eOPK}Q)K`0#f6^^i zWlP);U_8RCwgV_8*N?Fu&~GdU9puq;Ma$7_JlkRBo-I^A!?>;815XjG&eG4Z>=xcV zn*Hf75cceq_QOG@~@1xt9jra@4b} zk4{}k9NPI5Ni2K<{YMoXPXkU&rOx3Ao?AV5hKA|WGlh;C*M>?`!f5fSjSqco4_h0t zlSv51;(gl9;+BMO?+eUeexTbe{YmcZeQXq|u@J%o+S5Uu}kIt&{apUgTRz=5y=olKomcyY)%PM~xQ3 zsI9U8T3Q=Ve2c2=geWahwnkM)r{7y#bM>O{Za-`v?M~Zj(EZ#~#)0G$ z0m88Gz z*g>hpB|K$4sdwXxMpJ?~Y4dY$`&l_le>-U$e9hzl?%O9WUt3>%Gw_?X4tQy@;o_W5eaJs~g1TTM8d{TI>iuQzq3C+nY%UbK_-=`8@uR4Zu)y%Ff?4(F3r_%eCA8gRonm@ImvT9J%jKA z&%MJ{gOk-ey)QceY&$7~e3f&@G~89t{$Tr)g`rE^?93rNl-UbGM|-wv`TB)==#E1* z+qYIb%U6lLo^o&A^wTgzM0ZKYkV5~=dmz$-@D4 zTNE9?AM?Y67s>BR6(t^}EcsN)(e>M(UgvlqlBy%R7ZchJP$V4XO#v*kO- zb!gYy*4r^J+b3S3Klld>@`DcY7Qx~)<4cre`#%t&@A>0s{G5vW&U=9tGV@Wt0!!=# z0bRW;Bgk87ee2*)dC6ySePKSy-#m;PrFY(!qQZuj!$7G}!V!dHJv5wd+^9 z^L6qwO5&(jIrJmNN%C&xwS4n0m+lx+yENYQYCe%$^?F`*YM3yd2x~ae)-)QbU%>x` z-oNuV-uu=k063!@yidhLdKlgN1oI5zCraLMRk3oVkc8cO_tufd0e~bPiElBvtVOsh zAX98k3?V|NBJX|zV0C#Hr*^sih4;&I|72zfR`*ga5;jcQ~p zVm*BB-gDdvZ^-QY5zTN;FT^pdA-?mTaA6f6jnPD~k;#YnF{B`q(m0m@0RQw!L_t(p zrRqS4iqMG@nfyR1qh=t6Je*DD*3=s1tjZyd$dE{|OYY;G7@ngMxIOmO-r3>$$cOH) z$x$Z&xJ|iArVR)Rt+tO&;ER79D9zAj#AEzd^31_a-u%{SO01qIJhr^|JR^d*Vw5%!UKV` zdvxh1Jpac-eCOIp<0bID{pdU!{#M>^g?U&L4a+z5*|Bf zH#@LTzUKDL&(#VF#MSz)0`Ysxc4+%8{WQujMy?2ThT8i2R(1r?*|wv>?}GJ{&k(#n zJvm+z6QkKV!EeX?f${aVEy`G}MH-UV=N4;Wd5uPib=IkAJSd+vMT_F~1*ep4x847p7aXER+^9s< z*zI0iwp3viBYEhgbrhp@R8OTmXt%hsQos6Zzf!jyJXn9>FML}K4^P#rCts%Vej}X) z4ic#Ro3r68Vz-1H2rWj32WxC}IGs$+_8z4{Wg~E8$cCE6nesFoz~Ug}UzgLH7JiX_=4?r__ z3g}Izp^Z{!)GM3HON0C`S)zpsfZa>G`umNoUE# zF;?PRUC=43(ZGQ{`Bs^1POeU4I*fGBD6ZA+Ctndw}`0Lb^~R}YQ3xu$WEI&>Uk z3-6TgO*())fYnu%&B-REoqoJ)uf4YNiI}vl(ZCboarS)Av)`jJzRs=&%@WffBA+zW z&~Mq!Oy`oi@UPrTFGNlx$|9c5pmE@4XhCoq{H7&u3nE=sl7>0@Ym@r9lKZyX@2DN( zJ^JNG>eqkumx)MKsZ#pLfWWt0IWel!5=*b_M^1d|!$%J=KfUr>#dh=|s zjlA#>L0ix|yC8%g8hx_WocQ6{d@L(`kOpcCw>)C!y*QEYJi;>>P`0C*Jb52VLg##g zin>azYd@a`yD*D$aGe>o&5cF9u2uQ`9yY7aWL?gfBBcOWv6R;_C$Tnhd)@~@Xg;; z7q8CM$v01Bo8j4mdnvOoe#c8MyjoW-U8}R_-mKGaypah4P9C{6 z;$PXh-M)QmSx&yO?!uPSP+{!AlukeDX>=wz9HZ3yaJ326(%^FduzB4BcE2iI9;2beyYWos{XPo1}TR%Erf2m)mh~C-4m#q~plu*MIU8 zb!~nj1K3UgRG+Kg)JU0WkG!b2guzacHu$P;1Vga>b|&Hwre0Lu^npT0u}C~mBHAYK zDRlCkc}oKblR>-4XedKj-8w5DlpEj1&MQyb3EIPE7qlhmKj!6X48Gy2Q5Qt4&Nf z2s9l28m29TXraldH=znUqr+ZD5a_>@kn+p1bOIc+pwc z=j-yND|G=obnV);w7KTy7Pv0ppV3ai7LC&`8G+wj?KuLE?43DKv(tX_e5R(SXEN~O zCkPH6JjC^I9X)cWjvP5rhmIV~|g<|b*0U->0W)? zo?6An*gXK{+6f45R{i^1h?b4dRHekcSo~4G8T#8({qQ zY=r;ted2&4k8ZSfn6F8!Pm<>7zJBYuy}Xqd`6o=6m21MpcdG+$EF;Hmn{WZI?%TUJ z_CdLE8(#-eGXWsjYVgO-OK)tXJmogSc1$O4V3k+&OO$1K_wtqN_0{KJDjVq|lhe_C z_Q5zA#xW6Lby{rw}z)3UvHDJu`LdwkD!z7ztZ26_Sp$; zt@YzJ{XBk)hy0YKvZlXud492;dFEoR!TW34Lx`o{C9606#T9wz%D&e&F;G&suokW! zL^dcxyP3gc^3;;bo6_IGDsAB2ef#U=8>ee-VWl=<&(#fVA!VzlGO^A+A$?fu-s!Ve zq*cqPuj=RizWeU1<0p>QJMTDL|Ih#GKP#+5z5LR1_43Ql)%8nPYkg$}8@gHp@SwD@ z-)#sU5N1D|^o z;V;h|#-MOb6nKm$JFqo)KQ(hjv^&x_PxceCD3pd`bcM^ieZOOQl0N zrKOHZp8TPk*`o2Zb;`fxcoqNF$;g(Og%g3-1juiEVZu5YD(fv<>cTstwYCX)aW>m1 zPa3OR2j6RxfmgqWIJXAcz*Sez1_#6mKLVF9TfRr&L4M4i0tm6MuF|%$Uo&Gt=pJr3 zQ`y|>$D4=X%ahhcKTO`#whG+}Z4>iZonNkH?5-=ET_9oQgtMEayJu<6TAS5y>p;2G zBNJYU6P)T(cI2q>LI*6)Hkm;EkF-#Z6T0=A+Jp;fMI4_U{7o2j237~=U2q}5X(L(x zq(}Db*P?LsvHhE_f)*d@V$6#aewSrCXwRGcEX%$; zabQ^N3SsuO`3df(b(BlJ~Y!dc|<1=O*#&_xMF=R zHsSEGLmfS(*UrGD|({uuw*SKy~_FutgR z(!mY3wf4}Cz=t2)d!%k+@BB=P@Cm=4QXyl?hj!A<>hLTEc}&Ngkw_8BaV0F zb74?cfg?!cPg&BBMTVVtmx(9vPq@VQUX^d}JMBorg_vwY>y|fgDU-rQ_zq#&(S>l| znW#O7V%C&XVwguRZ`IrA%7cYVp+vLLE}lB+ItjbUGNE1^Szd!YdB%5?-u`vtrXDRH z1LY|r+$ziern9oLyI1mvOsnfsdF!TZBbk}ZVCBUDvPGQGC)f6zu?$YLC5H>WfA_Dy zr%eD*Py?U<018l8V7JprpnPsyuFE7m8&5YN5GO#G!`PXvkbHC`NKZcgZlz8ByhEUM zfSl21EzRG?k!AJv4-;{q*4A#;xzqFY`7gds>Yh5dZx-ibZ(Y88B^3SUgzy<6#bubagEf{A5@2`JkqBao94}IcOwfErt$;jEul27HD%!3#4 z+A#Ch0UXw!yfxn2!2aYDPu6e$@1Lo`;endPvDdKsjbgu@c{>iH-+uRdE|)J|%Ps(Z zf74F0+I+W_&`HQHpAZ5cPW()ql$x&^+S zVY`#hFSu2Tig4ROPn479e~^aCFnn$&h2J;w`;;@Y)3x`|!FuSy2kPs;>Fetozv&z6 zTfgnw>b)O$KktX@`RAUg*WP@yR#%oWI&G%_I}t%DPRdSJKRM7ozqFRl2jTLNb*y*3 zs{#mdtn9dGD9E*gwhoTFr&yLVn(~WL<@Bm@%@6#^mcCbLbWDOTS4Rc_EGz0r|Q`8wqob5yYHxXzU!TJwl2(ur>MiK|1ly-S1Bfdw*Ak4h`y zQ2#UsL#6M^M+X7GZFH@lKu`}6-}0%WMl|^fuc^pS-EHTrWvS_fk7}iU4mfyZbtWn7 z2-Zn5u7@3sI+upEMpDC%g;;)@=wOF5c%sbaTtIj9g5Tx8*+XL;-QeKAtgYVP(Xas* zkcRhykE|l13f*&q5XKp|+CgAdnTlF*ftMoQS^!au({2;e<%jN(E#%_1 z@Xng>O0d($;zlm9j`FhGDP^E{r>&6HW@Hxw-gRw*U(zIcaO+AVodS6|xhHe#Zk|C{ zw}ZbmD%9@|mW6K#_u3lDEkEP=laGYC4FuRYF+V?Qb9aG(6nfI)(;%YG!5{vi8<0jWz4@vp@Gw;perQ*>|FT;wSzS z>N-$=@JD}?No!JO@7}%CVY6O-`PI63>3m(kekI%Gp1*Le&YwG5%gYN{eVR!=J6myP z?SL03Il#L2y88zTOP*L;ThBmIW_&{r2a2*v-vMsIA}`XFce~23yxN(p6*<+cMmrT|yiXoVz*)@9);W>FIjlzIWEQedl-5XzZyK=>O_V z&!?{1DZh`W>`6beYdq~u2cVEe9w$tFBwP?9hgG_DkWVt;NVxI9Z3dNHK`x$=@4Po| zZra78rLjB$Nuv&TMNkg-FnTD!xGG;imiqu|RrJNhe7rCMCL7M&LU0VcEs;Oip{0fqV;IC<|R zB4MyYGc=6eA_y7Jsv2POyl-UgUgUo#A#tz1Y-OWcfOF)+oM`7Pn=gViS{2+piAwzTpMXQm#cy_x4^MYq~2dtBB)3$nwfU9XooWzVPKgOZw@V zsT!Yh5PPVG$3}ufS1gRVQhjhZ+iyAmo)vY-V6H=cSAVejhx&OQ>1^BBc3E)u5+`;F z=ZzaqyyTubia)3zfUOMS69TIZJ>@3-0owvY*w>UZ(q9AU$Qn9elFkuRL zotmDh$#Dl996XpJ&3Lx=9UGsmiK*$Dp52?tw@&z*n4GQ&CyGu?W{c(C0dO)jhDsW# zf&P&UCdiLYvRzqrXNlcfT3L(zbuyOUXjZ3XkbVPwgWtcg<<|20ln%a2FL|>0VRoEs zq0e`mpsnu*6PTBSN!sf)_Q7KY>G79HlDxLJmCioGeP^eD&&EfybDxeGO#?h{p7MSC zQlFuPd`V!^nolf(N`tx~nR$NeUTT|0^6HBLKQ!*Mjh?|w@=U&VnxKn`lY7$kIJn?M z0SAkT>$al}HtLP-N(UbD7##p_#}A=sK}lx?aBIT}Ne8KuPvjlpFpc^?oxrxYInCQa zn)c0hKPLzr#M3`9Z_8D`+37h*TSR?5*aI&*F%USN;NoZ5YALw(W-r~U$8 zX?uXzMyH{~>m350&`^Il`a)i$ZF&QJEsC28u`C(P=Z5=S6UI9uavD#L)t02ATHl<* zfd+N9I^99gtxa?;uqqSkR=>DiH`Z%=%^g49s8grjsLeG8q zP9N)aeQ}`{78dLB)$4Wj+B`D9&2<60W0}2aU)a9Mce$yY+pqUI8KvJr!XbFKH-m0| zD(5CXa8K`0zANh|a^i~zkK&LzSyyn}t$X))3m4EXaS|9DRFCaYeRL6PEd4FxCMj3x z#b4yQ8=edjmI)lBYi&XE^xkmesj;D@8JkBEbKjMVq_eHo2{#gaATJ4y10Um~!*%e$ zfeaEm0l+QaahU+cPXJI?%d|XB)c_Kjnx~&Yn8??DEd5Q}5QFGnGDe=Z>no2xQ8$qH zk%=jazL`E3+mTIf4TAQxp|;(b2|hWpcYob=aye}F6Pfnut! z^VTOrTUhFs*x;0kDjy#Hj~?#ITBjiC()3$vZ{`y}dBGJtOTYL;Y3G}EGW#C&jj5FgrmjuSJv~q{AxAf?dp12NPPKdBLcTU1>KrKwA7V4Hfyyb`vMyb` zUK=oo3&A$gTVgKX>;`u``YPy2+ssHK`R~bRN`?VzbZAcEj=%zxQAMbWNed zo_yl5di>ExXb0olql0a~X6FQWdlFt&=3U+Ggwxf{)%5?_Moni2a%29jO+Y=Y6L&oN zA51j8L{(CE`yC=zJXpUbo7PduOsi99sUrTBIY??wKbOyu()U44W< zl>CyR>6^dq?mFFYQ@I0yK+1OdRr)I_xRaM<3cvTpPn~!leN8PL6bwJ})4$WL@=!js z&(gOGU*H8-w$=9Z*q3S`I@5ktm?V=BgvIiNkDIOzYx>Gi_#-Myo8S1yc#$15ORRRJ zJoI^LAx_9Er_jbPglFti18D8N@MQ9WDacBn(=&DD+E(zla-3`@PS)|K?1QyG{rUTykNcQcBx%7GD=N9k$&>8lv^!MqD>o3dQ_>#a7eZ<-NlTumI0T3^Jbitl* z+NYXHkJ1Aii#N#@|3!aEdXqrkLAxq^Ee|n*ud~NKFzu3)8mPa6oY1i8J|qZvlz+vT zWE2O%CGYehn@+?(rTpMpI$%SxFo=A@OQe?@9d}UYlQik&MdR4kREDIxz}r^XLB`Z6 zX>*}7n~YgG|CDWbM2rYLJe%<<()$x{%7J(g-b~`Pj^-C1yp3^@Fgkm{-K5RkaR&H6 z6XKYjb#8pZy>%kr^bdE+@;QU}q)uNl;repQnL7Xrt9Wsudf#Aw9imNV|AKA!HFWY# z@^CwO>o|~cLfZnCjV{~MpFwCI;{Q6(KR7y-o!frt7k{5TteW1VJkoSg0H-e1GgN)F z8~gVjt(&9cbrXMBo)2yCEB)k!RP6dKfgc)(A146VE*>o4+gYv2p4EEa9eZna;XFE& zc*cciyi!r>4cOLGe#(s(??o@Ac$IEB+aXr-lMy;0Bun}Rk4ann?sL0#^Pt?^!h7&V z>K-20)#W*VJSexyfh=vI!ry%#;5n^h_`g$EZX0OC@!d_@#=NE6rh~e>qNqVE&phlm zsU2ml<<~v5O96x`9+Oibc?F)7YW$Xl+&cG`(d>r4N6OIdd7m`m!<1IE zrS-_O^(83xt?<0@kk13Nbru)YtNB8mtHp4&xHkANtb}uIwCsf4xOt)X|Nb}M_tp*o z79g@a1$o>uv<->|lySWwEZ@#BC6CBJ!c`<9DvB6lc9}JuLMqoN0Pk&l>!6WF0g6*; zJ~|}MT6G5ZAP)0RUA(YdU-;A4Qr3ZecIeFJ8-P|aTg0V^24kZi|8fIRwjR9qDt1CN z--|&{-?xMv_Rlqe{G~x<+77YXVcli)Mqmk{(r?a`j7@RWEpbqoX&iI^_N}_@gwXKm zIBsb4kN|Z+ioXpVDDw$?9CoUJsNBaWrC#7knDQH_y)#4gu@BwbCIHaDwUL$W$IMIR zO8uPa>C8_Dj7BE+)fSG+*L=fw(qKIVd^Gk^LTy%6E3466MzgH9!ek!nrK7SygKm3c zy*~H(&({~e@THoxV-5$`RaZiH^vJP#@Ldnqr#|(~LHqgh=V-X?;26_DmRaOWc_n%0 z!qM>i)^j^GZlw7{l5QDX587i zY)tu8TDi*a3t#*~U0d3&aT*JoI{NTWn{8)~Enl!zLwL}xK0UH;Z_UzZIDYs@oj7)) z4jnjL$L}~^_uPGV{qSGVq_!H3>m z_uhAJ-FL@r^^uQ!G&AC!eah{;=4;{F3ZU-PsvS@8k~?M4+l6#h=GytAXis5;A+uAJ1I}ryV*JDjRbkoZ#?ghb}sh;?vbSgBW&Q1Ap6F ztMx~J_y;t)mg??%j@SF&{n2{v)fd4f4XbH8bSJ>ifjWNlXx(x9vHIZq-di7d|NH9Q z?|!g8P9x-<58Pkxedt~Fp$~kpKJxJo)z^RBC+j=E{oCtnzxI=L-+g!2UEsBQ)?_(IfrA4UVBa-rui7 zUv==nY>kc#*2lj78)A?R1F0okaFyx zNp!B$PzY;s)$%uAxj{WuNV=%IgSF^XU6H zgq-;f8#;d120TvMVE<6<0!J%fxT&Z7X&pH2OcyGz4&>((8`c%Ll(Edh@99UkjN?G6 zU;2<+9DKl!lg+!#SOv}%J7)_2qeV;bip>jMpLb*3{H=4UJCL z*M7}6)d~*hwYf#~rrZ4vL?~?ZxJp3%98#&LWdNQU%EXiZbsk#y+iBWv#AhxT@g{?L z=p3dEy`5D_<3qZ;x-%>3ZS)a04Nww+HA@JcQNa81N%aBeNW)~^dFSE!?(h5V`iaU3HHZ$Aw)emI8Hl9Wp6kViNsV1hSwLgT)KDYO2J6W`3nCSf?F zhILn4BcZ{0%ctRA(%42Zp7+j{2c?Ol&(@k!yoAu!M?$uG@eJI&;gBErPaRgBa0On> zBP%#~L6+G8U}UJb{-YoKK?)hK_49Am^;0KncEW9V$H<3B+Gz)TBiQ5&&!~f%ZvrlW z;s3|le}LJRmUZ6nU2*T)yK>dZefo4bopXe4LL&l-f}*G>W=F&XW=!KZ@Ao>4Gh-SY z^c5UO9TgoDI4A-y(QZ1^fu`y1(3>+O= z@q{Bgre-Et2(s;O1m7(C^VRZen8=?$dn%3}ect#i2Pe7rBsw@Ye9DPf@k5aV2?*3X zh|ia$JAv(mqsL;gu^0m*BQdgD9cIZ^%3AtB44Sg*hl#MYE!kF; z#UwL3$(S@N2OT5w8|kzMInBxXB1)BG1I)*Qje!aNmGVzQ+(>ZjChE!{16Q|+CMk^< z|DYp88@CI8M0x=FEG?~jN8A;cT^5(_+b_MEgF>agOqnSN*g0`CoNit9`xE zBi`qm)$HV|^wxCk@y_?|-fDDKWoIk3=%`lp<5=8^p4A@Rw@aqld6quzlJ8W;k(Ted zcZ&}?L<2Teu+y6(8reD9;mLL3M)|z6URQX}c%@IMJnkPV4rx~FZMWbr1|b=mvI-Io znWW|&>G4<4R4E&>gSxUHcTED{ls#RRjh|~Q$GrGDd^YoQ|>S$(R=3Pw6*4 zJrmPR2F}mL9O;-CSZL{;^u_$F_tf)lxB z=NmD2*(7t~9YlTd^X4kWXvzNjUmyoLJWKz2XG(YI7}?*Epdg-|>l# z6>OI5H+qx)1^M{Y+3IR8Xz-#2J(J$?J3slVD%&R@7>2Ba)6wWx;S=yZOb_D9&*9`6 zc8=g5pg;JgGJe9kV5cl5#>l&(_AkoTa$P{z2+wH)Py~1ok_6G+Hb6cKlf%_&EqB0j zDuDdBZt)_g0HHI`^9&yH9(`Y70$2RYq5yG?{V=49;_;#Q4k`=T=H}+S*t!M2MPJ(N zoo&JoxB?&O6b3Rmo|My>SQx;O^D8aUd$Hk9QgEu<{Oqj!(dn3#o$*ohljj}J$un~^ zGkQN4&1Lzn!XG{qI z_w3XaG{zB&a3poK&^{XeS4}cm4%l}|P)i+#O3S{1aKcKBGFWY8{{+8>H0WI>J4xlq zNd^Y(dn!gc)!DFtAeMV{CI#`RirmMbLa;Boa%(*+5~m&oF5u30Y0I3|dJ$dkqj_bU z=$}AqY^Lp;e688B={WJii}9r+FT|PC=R~g+>BG8DZW!4meOT}FqCO_F>h)gvKIjp_ zsJd5$CzSuHWNA)3u&BN_@;x)Z7_<0XkQmX!E`lOTZ(Vb;BRYi%!du+}xT(ILj0c+grzY5^z ziBw+NE@4Au4~Y;g+rlq#J84~k?zcJ3l2g?BWP66ADp^{P-ekiFCjfxOE?uEEV`J3s zz8}6XPx31{Jq#YP{{#&56TC1S-i!G3XCI0U(YSA5SiTycT#$UJ4390TT!!pAlz(k$ zh3ze|cW5LIAGkEy)>WS5H+hAI7B-fn1029dwg)1mPSO8tS_VCEdtm{9^7tN`O&f3) z^0dh4WISdiuD|)xsO|2ImBmGu^W>9f<5Q2%#G&C@+;RIg=4t%niHT|J-y_Gy;)PRF zYL7}+`|*RIGkT2ybxz3vbu9ivZ<_d$KYsMbz$^ZQz*bshg{yR&eq6yTW#hMHKPs;& zw>hbW{8BfIrF^mnK8yS)`&00P`4OL+eDFcSnsS@?N*t#zjGhNXh(rH?w9vNnYY4!d z;~4m2Irc_>moY7L6Q`u*31|2~IgZM4;`xkjp`U~fg2p*1=-us1OvpKC+i1o{s}UWo z#i)vouXxFoas8DCrHkjHM{>$|vKOMcpDjBhS>`hUEIMTJpT%eWqeD@V?O<^quvTi_ z{uBr&1@LLPk=?sJ7Q(4Z9kMBmf1!)2@|~8H=j5sJc_F_1KY8=jSHv&=;*Um~`cF?i`T2P6OV7$4&H1z%rgiEy7R2>L zpM1wY*`hY-T5z+hzAiS;uu4AAe^xtGW7((adeF~ka4OoD@jHEAA{5L=Y8%gd#=}?r z5?{JE+K9`m^Meex4_;OMN_(MPbWqj@8h{x%@_j!0M?ET$z5!!|o+xeOWCzt!q^CYY zj(-BDf$=rrCo%k(ZB(hw=QmEH*A%80>t8YhTdO$w5>BJwi6iqHixwq+_+}~`otk(iz3`&6))UeZUihmr00Lg7 zU1-3RN97j`z(OlGw&e@T_d(aQpnPh2D$bvuvA;z7_!c}q89pJO+DLgeumC6hE*zj= z^fM5`4-@UkJPgk`XxW|;2iC{HBB)Tuyd%F)V>3?GK47$5!I&yad9d$nx=;anr*=># zRr&`$3tkosEV6+w*t7#!EbOF@h>zPP-^wOQ@Ol16aB;#4Wp8gV&W!Y=UkSb{9(U;6 z86V4gdN!aydK5if&<$YH?@d}m8}kWmM;|MmNm$aA{ObvN+6hof$AVeLiIvpvO*6&Y zPf;7nz9xNDr8NGigSgI4^#QTR?NA(^MfWq#O212R(l*LhIgl}JPX5Tx=j*avx~Dw^ z!A2dN6hS%`w(Qz75UclFpe=Q9?zVV+Ne_-WaHrrM2t3W zuSKn0_}*+r=kjdaaoKRJEYC#uc1t`4UDkEgdwdPal4Vi(OTKek5Kg2>*SxJ&PRqf?O;Z4;AC-HMyRUir!9IG+)A;Nm$oIpCzPcw;FmTj zueLLaEU)@E*72@l0f18}xJXr;*L(&bbB*r-3}EyO{O(NgE`c`iC`)H78VC$!KBv=9 zjGqo89U=%%8L4P-OzcFqtMe^mt$uPGdvPM}|J(@?l@kEei5=c8NBO++s9Z6OvE-lr zQn!K)9BrXK007RS9m0bC@T_YAf0^Iu-2g%v=gUO_2A6@P9Od`mr|SY~7$hYrt{~gH zHE z*w+>9TWis3O2esa1~VwYyDjuvlL3Ju=oG>2HX?}iQ7~YQe{_gL@@nff=e{fI=Ilqxky1je$#n*rR*ToGtT^G|+bN;R# zgCaPh-Ze<$R39)J-_)&|97;M7uOIOjXriF^`3Qid&2;}VT*y?D8XYhD{a{!>2{KmSWVAK&ur-yV0~^Rn1`$-(Fy7!*HqWM#t54hg;k z$%Kt}Ez5`uj_!*62QP_#`tZkMRm!a*o>&!q8Bp@~lTc@fL{0RxJBBDj59AjiJDbWc-eb~>&Rk3VRh+NSKlPM& zVI%Io>gcjj=g(# z$vIn%*||A)>S))d3^hlAf8rCLmNU96r%KM3=!*WJnS8?r#k#wHF!o=vJ6`wtH>myF zu{bv!e2dc)l4={r4wZQH!~l3zK69oXz#F)>qh(%V5R3_U2!oTgX~%KHP8GQ@YD}cU zhccpAQ2|A{KITn7I#kKqD&I7~xld<}zyK%j>8O+o2ykN1*Z9yJ#coF!XHAgeyxLJ# z88!}vgSu01d9|MFgMaj1aZK8JP>}RAMOP0D$p|}MFcrlJpXARp{MfC6mmzw>kstyu*pEt7BbdmtZf)4))~{uhm}ZlYYG$aOb+c*mYCw?Uz5- z3 zS6`~Ob;a4SbFre%JcG~eYQK+>#&OdR{!JPv&|afSF}#C5mYKZb@PIF;P`adH-YZG@ zN}(uE!Sta{k(PmiTq#RusBo}6a4tG2b9Qtx9k@??C(Y>0u}FxttU%7A;L?#NnD+YN z^H5I7OQx%M!!}HX8V02UX!0<~0*{tE>VN)i?A|-#NweuGjsRYW=fCuV z?B1O8d3T&Rc|49EJra$1PJ~(Vz!XTwepHY@gm+uAt@bkTDFUedeBYYED&b1_MF$9* zG^L;e{*S0HKLAwlGbdyM|LAD=`(5NyIYK0oCX=JQy3bXy%4nffe#x%Ik0PbNut#=# z6+vf^mGtSGQE%G#d`-OK^egRUem4wWM*f{j^-~SJl)5ARL0)myv)iP^l%ac$f=~TJ z%ZNe0v_*flDqdcVANcmSM!RUk0)W|*NB#ZrK6T*G7Y^_^yrX|GYr+g_^i2IqV*Mrl z=Gj44qTn}AY&BbPM*idR)8`mPi7ov$#Dg2M-II-lm}$)APCoGr6D8o1@03!}F6oqe z?|Mnx^NN?7rjsX+#nI=!=!xwdXa!;D>x2c|n$Exjp5Sl2FrHPe+u@X79^4+mTRgc; zU-=7eodp~!6ptw{DSIXVLpRf+$WM*n!@5GC2qM1+#!mSj$UT!Zw1fP8!vn?{$D?BJ zSbTNr%o+I^tr(Us;%|nlO$?gg-y!4Zf_3x)0~acXUDK93$ql??Jt1(?R{*BuLCT@s zz~I3_+7CW;^`bB7E)M`m{#jhcakcCM@qly}JWLtj9NUJ!%vJfpKRQG2@V#krQ~G>G z@D0}b{O)D<-eX>xnVJ+^vEWGlwER8l&AietdhgB_v@wopgYI{N+ET=OQ4_!@Km+1! z^NYY8X;OAnzSHm`>A?>^1`nk@F6{@plm16ea!M0Op2)%=61!Rl2K!@lXfz&rhX=wB&1(&TZ@N5)U9(?2u)!rritnI{E&h>^&WSaiZ>p zXZ_``{?%bx{gmw$aYV}*f@)|xK70Q z`KcJ2nu>G!otc=8i7EZ&ndDbq>G;{kN-VeJqv8|c_ps|ju=}WY`5By2&_6gLwNZ&J9V2=$vCky?hWKrHZC(9=?N}AyECgsO4U_$oQ;RV--;Bj2 zrEjbX=Eazt7A!N3n4WIL%>1(M6*sw{cMF1j&cCx`e12vsPMjW-EKbC!v*+W?*rcwL zikpa&XU-SDi8y|0Ouuo*A3e!EzcX>{#OZkP_^CL0;#3?vc_NOTIxU-a*4JZ-KYsFb zk&gE#^Z6wAN~ib}r_aV|!EoyAIiCn}^tjR}{>-`YIDPJHoHOnP!?|;DPOwb~?^84L zF)=wQI!u`!=f}o`|EV}noSrAgC*z!GbZ%^1u&F%tD^H#gOfQ}kY$ug&I;N&(griwc zhHtXSK=g-Y`ugggc0D6_@4|e z=Hd}1aJsx8fSs23pnR7UJ;Z_p{QtEUi*i}iNL=oqFDD5Qc~0T8Uq!|GS+2#`z~6>` z!FP~cV!u_eBmq65pJbg$Ob;$fC!!azeVi!Z&jxPEW-)k9|3PK3d(M4p>0nRZOMbg0 z=L{-yq&FuGg325{kyZ7v$DJ|cD-dKs%<++$P7kD11tZY(L*>$L_I!Xqh$%tn`c9B*%^5^IiTVpmYO1Iy$sMe?@)-eF^Xf zpOoV$;K()uMxjmM&p-Z@_dN24|AoiEK(k+w0DR;f_b^@hX(YfZVjIhlM9Nnzh#;s; z(cZR5FZl63C;$1;zoh1@Qza__^uQ;h9`TOtApX2?u4rs!f4vrac8!?6jfL4*Z>~l+ ziyTC&@`vp!?H|Dd!Zg8k@IPVjx7=B9ta}FH?|f@7nE!VvEwZ>&T1os5a}q zU9z+?Hy`cdyOG|(xNO&c@xAQ&hT5V!Tra#ty#+p9ss9QBC8AYCB|MoPMq?mf9IJc1 zQGF>3E=g~dPCv$rw%XQWbz?Cuzj|*B@9U1%@}g{CS3LjxO#I`6<8f%DFTVP#?l8Y_ z6!yY$OS*9`o_h9ZOwBcoPuyo%)#E7AVbFpF%~Azw1H3F9LW86yoZf@z;E6se`BJ9L zbEc=@7r4Zyzh1N%yzS6DfB9UzEBXo@KFNqUl?lGpW~#RwQ;*Be`%IT*!C1s`3Z?a! zbTfIWKl_r&hdZa4@vMT=SLGdPQWsJjCy3Ez7UBX1vJW);F+WNGt^0`(C_d$2i(|)I zQQcUG9zDMD?#tu4YYs+BeK_dF(c}29$Uh5MIqi|jaQs1bqH~H8n{e<|7=J}>DR)+U z)>xd6(cOEa*4r!npgyj6fQ2xy#)jH7&^IXizZz%H&Bo_nobv(zPMu+~BHz?z!7@84 zO}C;Az>jX>pPQz#z@^`*V7<=%xB^`y-Z7R%9+?=+p^B6K;@9XQ1-RcwHcEe5aYrpz)F}Q0qo_OTJxc`%%4Nk$~6A~&Q7nR87 z@+q8d`GI`{1IFR%y7WGun}XJoLH8AChlE@ZDy!gDet!C^tLlZv1T(ynuZwgFd@{;o zN#uoZw^M)7hQybC6mrqRw4D4;9ZO&*Gd$}>zJ|fk4)8O<)u|ry8|9J66>tk~!JF`G zc`PbcC}{(HWgjH@ntIX)fnP|%SiTn^4GzdZ}R|Wqg=Ux7} z?Jg_FT1ex1bOTtxV0^(SF2g12hB)8@F8D3$EypMspQZgBEb0|)fzP|z<fiX{VH^nlDjOuV=P&ZYm#^RIl`ns$&C-l z3S=M;yvW3(*;%mg4*QT3dC51Vg-_7y?(6Ehw!t7eZQ*_L&JO?54h8fI{A>CNZrcb2 z&m~c)-falC@J{j%4M<%@NZwIa>)Vt|e6iH|@K5?>mRIl-@?mXvSlE!h9P?cKNC%#o zD+5l-T$6rqxq=;jf?VYF=f4Cy^>mDEz#lgPd}qRjQkMU&x*>zOmA!o9;n;FhCb0nenR`u{%Ds^+#z2Xj;o3f zr8Ueyv{Eh9JuP0d-Qf?u44;VUidFqKqrTmU>vs3VKvn$IoR^*#^ylWpJIsZVhm!y@y(*W^PyU48p$}zB|H%h- zyJi2pIZ17!_5Kes^iG@u#QGI*@{)3~b9AiKmQ8FSlmEyBMRPg~{7HJ!+JB{3uH>Vg zjwsSozVDPTsqA1pE8d7Mo{Mzg!4vrf{mQ~vcvF^@@s!Q=!gDSFD4)MPBo17fuHfcE z8k4B3-+5J2Mk*BP3K(tUOf%kt2cXbZkGdV}`1yC-`8K`C69DuxGZuw#wP0~i8=dB& zt5eAU^NavEuL5axaT);*?uc;NMY_CK(`4X0fzQBJ@yi7OD4HI%qbr&#TXF2g^YOrg zCtT2;(ZLuQ9`#@j4F@+DocC@m10l;)k{OB370H|MF2qi*YPSM_r!-eNm z$f?j1u{Ht@naE{8O@&g>`QadO%2u-Vo;Y~<|PgT zgoA@qL5BciOGz72a7{2}_`363fg>XWKC1EHC6~ph z9O$dBx<20cbzdJp_jA7xKlzLAidWtD$~drpuPRI<=;L*lm*U*Xb1^+R?qkPKpE?!C zjvb5BXV1pO_&GW5Q!$};tSsMqU|-D7Psg**9+S z;al@$f$AMR`=>w9$g91Di!|o!qUjb-@yi`_DC^GlN_Mzah8-CZBRlj(B?=gQCIa|Q zb$)9uzv<7no$GR-p8e9Z@ySpB-%7IUF=i}*T zo{H0_PsWK8$K$NiJ|$O-#qc)I^*)H5PO8Gck7Vj5=Y1roOe9`<$-a2mJ@>>*U-9aM z>HJvmP1znf1$6M3@SwH$=w5lxy$FHQH5P;(9IxyIq(jHm{3rRvLBpZH;J`x={R#u& z(bLZpC*>iE4!Qm4p;;3kT{PY>ghvLqT#l*C9 z%u-YJN!PWMzq#riI2_?VHZd79v$NizwWQ8Uqp=w0rY7U~(Nl5DEw{#Px8E68U46Ca z#G;Vyxbfy&yh`^&fB)e)dhEq`&8xo39iK*HCDzo&ZQ+ZB20qpl-Or#hiinXf#dETQ zm`AvieU$v9pn+836N4pMCMnQmDQ}5$U9q`1NTR=G*9nx=`~XkU#i3J#V~z90E{SyF z%gHIQX`BoH;1xcg^M-ED#U{E}=hO5O9dkFHc!1QVB9gE9G>tOPD#t8RP`;g11%24RqW zFCxT;0w+pCdOQ8bN8-&m)V&=;;nzGPxY1tTL8E)8GwsYlc~v~zm4hz~;8H>AtP8pW z=+q{X6#C3M$#8GS@^Zuejt9KN?_@)EdW1U$N5B7jzdL$H_QcZ37h~?^@#tsZy{BLJ zO1&n;8b%{Q^}8bjE(?b#>o3{}&V(O04E!;`)?#2z`OnA>96Np{WRN^*xvG3?`i;*{ z$Nb7-u!9*{_98$P*O+7H)>iBt8IAj1@rt?gPD@zM&z>ExWGPtut1|&)C_MC(ep1!uoq*enI*ieWBt7 z8cl{qwo!_MbpBqgO6G`cpRyw)bjbe{r<#Y&=K>9EA(M0}L67-YWq=D(fkQ&-Olud@ zpMd_Od?vz4XW6v=%Aeb-x{Mp?vbL>ue@cjft5>}2<Qz)&yt`|1T40yA$`xJ?mUy%o}|tN z1yjN$0~ri(vg?E+*zu|8(@62Tt{(V+pJ>0zkOory;y%TB z=Z0v*&Impqu?n2>!Th-b@k^_Pk0$ta&mwg`FUI%p*>TTg)%^TQ%t=AwteyWfz9E-0-U;MnDdQCdMs%wvZK?Z57>kGA;W~#Yrim&E*J#3&eXd60GSH*x1;|{-7C4xEY9opc&sDk}O z1$;2ts@57W$ciT&^_m4~)!LABI+OBhvwQ#%L~W5EG3y&MIYtN|tI0n->SqhyoG&m%^0uFlAhh^b1SM@99r}McFM&oqotQ{RdsMZy+2fDM+FHdXu3XY$XYKiZ`UmpsR6}`rFUgQ~kqF zbrkg$%(~nHQ02z?h6z?}>H#&7c zSm}!^_8y3OcP-kGNy)-CNAzd&bT1}!DB9~w@e&YxEz_hta*_r7YoMJX`{I&f_w#Uh zHnyZQSLfou! zEq@tbJa6YBD(j{~_AHZbxo~n3kMUkvGT%-)StoR*4*U&lQpq>q&}23*L2~H_bcFpK z;)P4yFjnle`VlA3QT5X{;oCBUB1x$5!&xxt@}oAURcY?Jb${G+)uC9D zouVD+d?x>w;bDEfZmetiI*&9w}Y{ls4?&wt- zP7Q%B)G3t6zHZp&x7G!(`1sMMPQi;s(9tZb*gdf_4 zRWESjJWh~pR&!L=X4Iv-`}+rFU#IwTWNyBW;0?$j^ISjDRBjrgXL=OJ2%S;RX6K?jP*nQLpvhi$;{j{;dd8 z;?{7vSn3oUq6>F9*)8bBo8_bg=-~rd8s@)gWv(vlr`HnKcOUiJOUeClRXd{s3JY@p$pO@gP-jmOEe zlk)90_c}{)i%atJuXULPB_S>06*g$f{S(nvjVmPSp$ZoEA`Vp@RDuz zc(`zC{i8HFzK`FpIMG$jmaaxO$p@xhUES9ajitpXMP=lqHvH73HgU435=aLm4d4&_ zGkh`@2QVhY=e}0ehX7abh>}~zIIWTe7^@q3S~H8UQ2>;KZ;7u0FEF+Q-f0uJEQqL2V$wpVUKs_>nqtEC>9Ghom=JbT7)RVy{aB}z)hl@WOp z9NT@v(K)g=I$7l1r+%~2GQO(<1)kmm4|tZ$v-l2t*k=-7abJmb`QfdouFS?j`$}BC zXE0Wm=Y8wD5S6z<sZ+jtFDJbuAH0YKb_%Yk>m2z<(zZZ3XV_bEN{wKvxkFWwWM zvZ2qisDB0kIMr4fgx`;@YwM6rRGV2)$WkX3>3Bnu-XRmXgU+TJ!8|zT9@$0C#H5a+ zsho0Zy7)aHQZr9HWSn6s%7KsQq&a=zi6)$m;Gwvo+?`Sid>2Gf`%^Abw#xLGmL&Ne zeVp%7Cv&=0rq49IyZ9MCb3Vc6u~za@rDcb-MLytlWlF27?W8Nqc=Iz;@C)SlnM!%b^M%IdN+R9qm9j&HBoKL|1Sr9dngLH`lJ}IM zs}-u!D=oy~+q4*Hn3H4qw$zbnt!&4UBj@75hfYy_jP4qYUAuP0*w~n!av-_n^%UqL z5D7A9aT!@4!|mW-k*{o%2hwPaYvMWakinVqs*pwHL>lc$L}UDv!sQW18jubleCq(- z5EKSr*ip`)3!O=r-bbwn_Cj|j=teH}!xR|V@<-15iFMMp^WmpCnHWtvidHY&!)$71~S z+4#dh{(xbrtHbL-DJfqyFK)R0`uNW8`u1onHsjp6b8;Zot*BYu{o;oahOh!=?{ZvP*(5)cX6L>$wGJN$-APFW&QXIu5(kyL&l8u_WFB8ibCjB zCgoImD=~g%GCuy9M`EzvEn4A_!Pi?mD;P(24aI%0x+i}8oj)2s^9#QcFMHK}vHQS2 z*9RzkFZ$fM<8k`ri8yomM2wG}ip9okEG;egILPM8lI|BhiPUN>yQ9%t>+t{^$KkM| z?a9wS9ZkK%k%dP%CX>Mr@Q*WslW2#hz_**LA8oZl)UOPV+xgM?60gXKVRr~e#`g8H zQ)EE*93eU4&IG()PW&pDemRkhDFIdBo?~HfhzK~HI1ar%)i|(!Ph5G~WpV(jF*i5k zLDho?_Q$n{52!QS7iZ3$iYK0UGR}>kh?6IevTZ#M9lA8?HC9)z#&Tm(j@44EEOEpb zj^&ysB3Ole?#yXV_+!|oW>^?79bfpu7vtj}`KS21zxliPDl?1o}2Y9u`}Zn@!XN4@x?Da7tcL^ zBwjdjERGyK7AII3b6j#WaW2LtrabXKHZ~EbPMwPv_5O*co{UF7|9Cv~&?E8qlTXQR zJQSaQ;DLDTkq6^*pZ!EU_}Nd!BcFRXUOaJ1x~e@c6;B=5yWc#EL(cA;NtOHAe|{id zc=0)rp(`r2s?$|ullXSHaf{>X0e2jEwU-WXyBuP73eaz#;v4GCx4lBod zYI&qJ9RPM~oShhp_x=9+4ey)Z{0&hX?2TW0_pe1w>F&AbZVxOz`^>X~wLOLh2Yd|r zi4!McUV4S^XS33q`aHR;w8*nR*&rQ7Cx$_2nrMesI+aBVbnZNmCb>~}8ViNEgH8R^ zKJ+Qfp&$H`?M=D0EGj*^*EU{o(E$cgxp=Z1l;t_~0Xb~zAPB{}T+v5*!+b)tUYk#N zCy2m(sgHC4IK}V|u%N#pz`4^Uy_Mbd*&Y!|YC0V-JdX;?%S+Z7O6Kg=G5TMX<4;HV zo8IvH*mLDI(L8-T=1(7uUeRfw#*Q00rp_RyDmwZ4<$!h3*`rfoc=S&2lssh?(=)%8 z=)z>zi8JTo*r~J8(Nl|c!Llj-*y!4hv6-n@Xf5Tj<>EOOtuYC<&9RGW_ui3_c-^bN zDz3fp2Enl!r;mIwjvqPVg#zeMno5HOir}MjBcDVx^FSEz7Bm4Iz%&YXg5Fu~^N0MJ zeoB<9eKRhDuBDF9CCd>X`29sc$Abgi6rF=yz(Kh@Cv36cAfJgDv#ow!49K8{9_mbxcmkEq zck(y`Aq*~)5BMzSDigd>zlf0huCyE>?uAe)GwpAlj>B$Tco+Pbn+tj1J%~p}*99N; zyV}DkW6gNYYwwGJ(czdHA4fz|KdCREcrony&K%CqQdX2?3Yazfj#g*4yb#Ay~Y#NPR7Koh9SKUsNqRC<5D;iyl<66a*7p z&;%P|0S3Rim&VhN_T&IR<=ZAO0Vi5wGZ^S%HyD%Yp4@M)#By^vmgEOB!Rtx>#bsTW z{duujCXn@;Uu?vjd^%4u>m3UOR$9wGntsi;OZ5t7wI=;s;S*M8u_7lR6Y2w{H!sQxrt@JG3gvWu4iT|eR^5>JpV;jQtres|TQ)xNrWjiXIcpyKfNBx7o zz5&aOPdbo4z(PZIOQUBzhy}i7hnAPw8NBL64V<8{w%#V3NGGWtj_hC7Rp!$AhR?Ps zyC=NZNxp?1P@e&o9gT2v<2~_eGtX_qz3)?v;rqKaDoz_Uf?qgz|A5@ ze)uSz^1s@r{Tk^C$+KR-RC{?ufgK6~0H4J=9c4~W&BXNdtWS`bnVd=f}7bTGIR(qHIFbc*PWF7v$@l1b)*u`Oe^Ckp5S`)+CvzO(Hf zd>|2&aFInqG{d``+@Q8WI^hIo&H_{^RCvv$f)-P|i%IXKuK zBf}%HZ~y+d^6=$xKz-W%m+bYz08Yyp8XQ#ra4?3{KdcKMEWG4%O!!B3;RG-BTki=> z)QG<^YhclvfG&8r5+%5)-})Opq#x=>7rhr=g#!;DE02%(S3NwUv(fXJ3Z0z3lgFHN z<@C2oSD$1o2Qb^wV=T;2c}BYOT(}ar-%@{^Ma2_w;^>Rw*-Gr*KkB|9{tpWwkTd)S zCTZ=>i2-u4ZQ3*Rlz9Y#>X%nFNHXpdUO>x5Kbsh(cW^#_+$%lrl*s?!Io|P)eEc$# zkga;K{c87490@=o?NYsh)Nzo7_r@1=(3N{8omom{e@^8?GEZ%tR$tTqpn~7cf z_ex*Yq)(fsB?G}CwE0Z}6)le3m#r8OFCIR4Y4p|l@Uq-TRszAD<$u`%fC8r~D4+ee zNXV5Wmao)j8Hy|Pq|eu}DLqM1eEMrH_U`YCLx)GDhg#})cEphvr{f<#b22X9U5gv8 zJ><{Gu#kRia#sD=GjZ;GL$)bpm!gqr`iKN{sW;Q0H}#J3Qj=fr?1PFo9ItNlNml>6G1z#5BXCN-~`37=40%7`_-21BbvJnh_pe5Xuej+B#X=LN+ zd$1VJ3$mfDe+tr1?!VD~F*CHH?dBV`gDYuGSKq@kl%V|PC%qvre~cpKVO-2Plt#~} zF8kB^$K>*k9C(a|Vid0{sskGVSrQJ)D3bCcPGM)E88W2Ap)LFslX(?a8D6JQZ zs~^G`2Yz>+>LFiO4(|BOw0x_Xn3!CR`R2A{BD1Z_A4U#2S)x{Ba+wWEl(A-BCk=8h zadUg}6PSb?uSUY*gItV48og3mfX8iAo7Okg4`G3~3aQrcU4;v!12^gQ(}QMI|C6Ms;? zq0;&D1TL3%x>q?!u6KlsK4KgB5u-7(dsz77R0+mh`TPxI5^c7p;3Ve}1$TvcujF;; z*D3$JrGD}Jd?P;bnI{x4|F%c?#h!|XvToX}-0I7<4alb(8i`I8&I>Q}^KcK0YwIYG zC|tCOZXL?30Lt)}I@?>(DSFmoF*;jwas3sS%9me&lcPg+2QjwKr~kPKR`sKsbftXj zKI$@$CtmbIft#$$aGGux?#ovf-$U;VMIoXu{~QCKnV#8y5M^8!JVtVO$XOI=Is%L0 zxFYlLAD@)-WIuXVX*ML|Ug)eJx*vIS5KL&3)E}x(?G@MP%Az>oK^4M3*mLC9wt{En z(<5ALsF82VQV|a5IzRgj@st7LDvHyU>dW>NWmpd?f_B;F&@PygILcCbVOB+w9-g2) zU3e~k@Pzd-{wcCW8p_fYSX8Fzpu9VC04_~&JOk53fuu=K%2zt-ATLa(q%NySLqy_{ z`{HN#O8n}UHez%OxM^w`+-}mf#aP?iksp3kLU|YDAe05{1OSEEg&^~bF{%L5@Vs42 z<`w|~FQ1Lri+*KX8MZ~NcD|v*1is=qie8?&$%pZw zGXsvvmyD#c^lV{uCsp^d+SgybE8h9`H^jj`>Ugd;VtR5+&gqgTO|KTC^r#*wB_^L>Btw?%=Lf{Vn}#6c_^}%BnJ+#apZw^@;}?JB z|8SmNyN9Bu%6GK8qt??EZ++|c#4r59yJF(JIy5gH@!)}lQ^5wWxW711wu9)RhrCzV ziT{dF(8DAmkBnq_Z@$fZ;5|dh1I1N%rfgOw0}d;5aN^qZXG=zxZ~h&;WSf1KiUaZ~Dg9$M=5!zlolaV!~h8sABzut z@MH1RQ%|Xb*;LjJ@zj{G8- zvzMfgTIOd}|%F?*#lPg-DP;j_Hr z#2}mg%!$Ze8qb^*K<@I&F;rz7xWze7IfYiqw??)C?Gqb19-KpSc!vcfG=?2wydx0L zM9Zf51sy~@dS_UXHWBhr7^-#kr=PI<>f{BPU>R@~DphoD7^&2e`rM<7iKG z67NcV$qH0p$_4j9V|WSIC!_7%5W!4RoZCFNvD3C4CssVPG=A~-rA(oEn29)PVr1*X->LC zodh+@cA*&D;lAA2WE#7?lxO`BQW7_`Q4d9&Zp&!nDbosTrad8@970~fr!5f`D_;x=T4(C(lO_?aHn_VD?`>rNWg%>gJ#AzJmu)bk#CaM(r+pn zL!bhRaz)gg^v`;eH?4=&A*D+6QVBaDJC6ze#MGo0fnbNsq0#}Q8WNu|8S=;Pe@}eV z+uj*d_y1Fzy#Euizkhe^+jEIJb#?0_fW|hXhuZKVMQ;X6*j3JMY6A6A7ikIbEc%*V z-`IoZ%4+=c<4?q=9(Xj`WrJD*dR@AHrP>}(JpWvrU6_n6>4~=PO7!>kiodGSkbT-* zX~rEl-w^-)-~PL}@3mhY(VUNm|L=c{$3FK!95{H1@R#*lUQ`$O?Cf9?1zoOZ(HA`l zxjoP#@VMU*X(okA`bkF21AdpZ$ccL*l-KVr@Iu}s9eK{Ug10VIuyZZ>(oc$pXVD1j z1&vt%Ftlql*44>l(v3;fr6t*4wSg0N{^6q^iv`(Nz6md7YyX8Zm|%ntQa2|LqGQq1 zyia?UesHJo+R0)m1_2l#LdT%b*QF;FHWZh~RLTyies~N2i^(@&H_TF?l#dM}fR}8# zY?<=o7g4Sa8oE$Cgq_$>dIovX3-hy$_}QO)N4!=vefo>f_*4MO1$X%Az{@0`dEBy7 zhJr4tH2GcS6E41YnnDM{l%gwoixZECqYXKbn>6D&v!#weHr1U@|3LgrJ=jA|(V*Qd zC_)Ef^MQ{YYb>tgdEdT$F(-fVAO7JV%=2yPU_e(dk`NwQ#ZP(IYQry(jceH=Xp;Ix zZ4-UlezgK@Uag{!*N^l~1S{lW(uYMt+X4i>q5a#%f|D-M zi;0}{-?2rafnWtr-B`wIm8utoVOrUF58l^=zqKYXZbToG*x6K76}_nXmMk^G6C{PI}b^amtx;wSn{k`eJrRr-xS7G*Oj zz=GWl`mmC#;gLP@yMOd2@lT(6IBvY-w&>{Tjq`K!u`Is<{`2W<&|AEGfKJ>w->2S( zM?CfX+4%gIj>q`S62%!NpGd=1@3Gb30$U?Z9^r;me25urNTU)Ax{M;Xeqv9 zp#*A~=dwRX#a7ijA@vFE7P_3TOpy3dFmIZpT|R+VOp1`tCAjXyr|bVR9A6Z`qvA4+ zvgyim8CS-;zCw4Da!t56J@I~47QCtU^(Flp(YLEY8lvw)`3? zLzl7Kn2)9T8S$L>QgAai+uPq2y?uS2JmOO{OAYBn?0|TOlV;W>)0ZE3ID=@`;oWhk2_&{c2R*V2En462zsvWnkA z`-}b-{8{FwUrivsDqS}yy|iohh@Yos7h-aH-eWJMqBySf|F62@^62lc##NUbj9>nh zpN^{!9gY{Ddn!KkcYhtN<@q>t;1Vy?mf4Xi0HdP2@Kf!nM_qlU{(&L&0eZc;gLVTy zI@aSG;$w6si-VySc(eV}1^Sr2D%d(pf#F=W_+)nrqddq(+Pnyd1>#D}UikV)x z6K-w0Nuzf=JCkLTJrSAN9Gu5I3XjRgs%$+uUs)dMfr)#0C(cLtsDmv}*oY??m$P0$ zCW`XZPC?;o8ON2vDT~07+S@)EPj+1Rvj1e7=6e>^`ns`Zdx#EbwN`9*l2_n+!J?wO zr3b({K2`c>v`H|@NP|1Z`BvA`$8G0`dVykD0Zz4N1N#b24R6XL?^$GrkL3*(Y991N zU$Y3cP57jLNL?(_#s6aS80B*UaCb-5r$L=Lb2iSLn~HNYD{c=pa55~PF6xxsuT?wZ z@PXkNu2m#nlCO=H{2IY2e!(wmZ_Cq8L-K^i0XF-Mq8;$2E-c0@@u`%SAL)prZT8XX z_3)%(DWmM?nimOM(hmc5wF7^1)r% z;8p%L`Ru&2&!_b0F^}ts2h7`g#|bbgH;EsL$~=iH@R--E7fR9gasnUwLwuQ@@je22 z!TmFrjZc~Nk_I?I7x%P}Agbt(`r5MnWqrbiao5w^V|l*q&fDYmyKjvq3+QS+9tU8N z5q_M@!7ou6d;`9#r|Le3NMsr4=};GWnR=v5U3>`^S#;t4MfD9`gH*UfCG{@iC~TcF{}yy1*DhlE$c|5lDlSkUoa?xN<4`n zZCFvpZg&zq6({(R!BKjY`{Kf!8MThWS5d5rr7WB#@)bNKLh7XiA8&~jVFZNc`fEnw zCw}OSabUMP1g(XbogTMSP_NbnLuV|ns4j#ts_JmJMbF^g_>Q;zoP4Lff||iG!6dk$ zF#||$zZh2#`@R!Ju6Fn+5a1H5tY~}TOV7p6{LIhA{SQ2BSazwi$IcK=XQ;@4|K@-C z?Rdp~FOO5F$K&Mj<8}nV3Ce*VN6eQ-p$r%9znpFtr@1(6(}B?|3oeSS5L_n*SH$_b zpkKiwrX%n4Tz2${3Raaga-~D~x6|Z~Kn^b-pHrKP!j0HJAQ&Us1xYUwku=UFojdNu*8eo+-(K zl<>JRD%^j`aO~Z?JB}Sa8Dn#@C<=X20q^~-e;;4}#;=Jl9XT1NkG>Gk9{G|wOI_wK zcIq|e7h-j};cs)}bS|ze#|zK1JN$HV`W4D}wqgdZuFW8-*Ull}mqlx1hM=&s>rmUu;5HXVlyU`KPLz>Gx{bhKEO0Rp=d=(|fqNVg@==a}&^tLaqOKXSo{=uC0O z-#%ti7_5WyO1)q2lSWKPnJ?6t8r-uluDt9}?B2aQ>H~vbMcvWfL*pM1X8l}R7BM=(*A^@mW+qCIfTJD!bu!GR8PdX+FQ#TSb3zs!IS*`BBU=t)A%N1x9phfQsT1hE?ec$0b$T9>s6Vc;EYfH{SDKem!3Mnm5ER{o7xR zcl`Lz#23H#aNPG*Z;aQz?yKY37mmaOpZ;uIcir{z(!1}9fBx+K@zBE$#_;f<^(;8S zmf^^9BtN#p_>xXAyr2_8s@<%R1{M$Ks7)NRp7^G%g%4ud<1EukVIs+~)9y^)F5A%5 zPaP0XRDxTop(8{(I>yPnJj3?{28Eoz*zHutQy%5x4-gWTg3t83kS<~H0HDH_JIV<= z^h3{4rFofpxGGC$cf!p5*wlG<5{RL+LY+jPKyONI{p!!XGv4)|-xISBd^C=J_TzEM z(4N>kav<78Qx)nmee#L|E5$vKud+9!ACewfpgTGR&fpAp@+3bCOMGsp6`y(Z(fHH@ zPel9RV9*_FRlB0u(-BV}dojiqrlNbGU;X)x=;`7cbOX_tol&CAxa-y%;ywTAH{#|y zZi$fH_|%90KAw5v+AxrZbu81&iv~ z(cxH<4&zvDzJ2`1fBHXSV!9DSyZU6z*%6`6x_nOZT{sm;C~LLk;}qYf7EGPp>`X~N z5I$ib#!4wVPDCM+H6~CLctTdH%%1j z#WK{{S#bw?b!o+eDJ#0Z{N8)xXMgrxF+10YmUyIG008{d=ME_a7lAkiP^{kspo$eV zqAj@Bo$+g8WuUD|FWI0D+9jJNyFfbllj{clns8_OsVX>|T*&yvGMPG<_IG+A5-@-l zCg6dSL19i=Vle=AYi#0dJb&bf2dTSyx~w9al2rz#*dd?a%cdim1A5>SoFOy#q2Q4` zIoK#4LwTFRccZc72~7q;)>kPHUnTh(Y%p;H%HTRqfnb3cDnq5mrt+lzP_WLr5Wqcj z)eGx$c!nS?>-K{y1`KncNwG{6=`AS6R)9i^WDpEKux(L!z~%Jn({#60gr91$kT2n2 zkuvG16ThRccfdI8V=-Voe|+MLIPk@f6VI5`WcY@~03#zKJ{gAo4t)iVj_>P}EwV3d zFdb+y_$E+&fr00 zq4}0{6B7yWj0Yhl7an+(%xuecn83wAD2t@+pDJ!${$Fo*H3sV}P)vP?9%j(g3obpt zSlANfWgrxP61f6Y1h#a6 zbiCj|XQOx6LCRLRuxwj|FZz#mK&~)9`W?}xl^&Wyb^W!SQXL13jjERA3oH*7n2A_7Zw1(JGmeS ztx}bb!8lcCPd$c4_Qjw5-QUGu{@q98hCA+zO8=lwZfX{b5IwjpdFoJqyfHZu+p_P6 z`gX!Ui>KnL7bas(=1PWGGzN{o8J*HzGaNi*D|5=rhO)$F7WH}Ipe@HlM3i7 z0)zFu-{CFeR;g9C`%s2U(MnI-OFjfv53a|mbr#R1&B#!+Tfur%cp-QneyVmcp#gAdL&={{k<_bJfxSMarT_}ZE?xN zJZ#M2=w+_A-*jEP^p2b3mw*1}WB;|+#LSCN$A9^aUzh#ah{J~t+IDg30W<-(=zsVO z9XYaVH2OwHeY#3hvIlJR^q>)GfDE4#pOOtcEC7H`sAb5@E?n~hEz^}IZ3w@fPC|2D-u9~~cV+kRw}*#E42N22dp^k=r#@-!#LpE%`4Z^ZlDcW0kK z@Zm3U?7PPwV@-XBUXOj@_jr6?d5|3zD{r)rDaN$etR?vnPVl9mu@@3;v+zUxHNgQL znYH!CI+aD6&HMOkdPRF^gQ5XGaZ>OEdEGvr{tV$WFB#4AdPe84LmycKm+lv(5j37z z@Nb__b-7+$-Ip>Pna=nG2EBvl%6dsdx!v70*M~o6JtsbAfh}y0pH6(G)@48Y@by>3 zSAXrRqn+^+Ddg4$pCw!ty~NMbZCow4YGarBaeO*#dqX^_zSwqKJtk)7q?01C3aY1{}A^Z^3s1 z_O5PvH{##b*%%aF_pG0dtus%?+u!_(xTM;QrKzJ)?_7+oHub?neGUc2$dPgP3&2T1 zhx7;08C<_IIF0g|9^&PkcO)%W1(o9qwB?Eb7k8NLZ3GS5+@51FYQ5815A9rwIf>#K zez3nSk;mRJZ?(26Yfc|%gHN>K!^?MHVK+TzlZOSX-~4Tu(OJB!rxrkoy zflqLP#wI84NSkSleer9`nK5UW$P0~#r@bj-gfGWs8Fc?x+uYF~dQ_QrP5^*`N`PFr z>NOxf29B1iI+vyEMTM1txQs`jxn@HyAcnXC7@Rx^CuTRoSdH7(Qe7Oyjbq!VYgHZh zh!6esWAWSXdBFLuzWS25@rIk@vB#be6l+#YUbRvnl(q;Bx=SYbe+*_I-xPn7ksuzy zZ3iDJF+*8L=HnIbnea-*nF<1CFdeta1SJ? zo)kvmjl!hJB~~6h%)kPG!T8A^`Np_pS8uen=3-`Q!jsGlQgx{lu+o&FQS#)m~b}iZ|D5XGJtL{jvs>b~%Vfz-TeR$hX&D_450aTnbq4IR>+{ z+8KuqUgmE=ec(_3EcWi-6GvY>8RO&Q-g#TWFs>}CEEaBHP++M7r$#gULAXFS$NVe2 zmLc&_(#xF?F8?C=jv_61#B{WfW84g~??hi^SQp5;ogGT?YpCeEv$k!x_Md-9bA%6uV0xxXJ+l_b0qB=dPe*Q zT`JW+#d8XVTy{Ca=uh+xouGslJ>M<90X#WuNP%F%XWtONpesr10Xu<4yZB*j$xmm* z4z%vUFOCeWW$Ey9Byvx8k2@#X9^p%l8J#Q2Xf7k8@_Ka7?$+F82yES6Ex-+ zgjd9;&9uG5KY{e7pQ5YW+iT>19{O&;P>D z#)FT4A%5wnemo9bc18TqzxhGc(2Cc;;hW->uY7epDnH|i&p#TM9=J5Fz4q$(>}Nk4 zlN0B?n}Ufm!>zi3&F+|<`yYNhKJ&;^ z(cU*0bJ8E20I*!yjwfF@5|gd@=E>|;j6ET z?d6&H=->Tqy!hNR_H}%errJo$-~plC`c6f7f)xJ-dxRbE(dE$3h!r+W9t2GIPqlg> zT!eQACnl7-y5Kd@!!#-6(d}3HJNe2`47?$)&_AYZO*IWEGU#GIJVa0r!X;}i!XQLIb{ajDOlk#{1A@G zZ#J57=FBPCTIovt6Q*KraY48eZRA_Gg4mK^X}pZdtU3E@~{Zcx6!Rir51d8YM{7kB?6pG z>FnDG7IZy;GXP?87tO|xaZo!fE4_4z4 zMtARus^FymwJQDNKCbFvL9ZZ`PHOtZkXCCcW*c)cJvS5c^YhVIT(K=#QeTh(Up^18 zFgGXqa&pL;biRCd^gMW0yC}CM`-3vwVDa9%^no!`)G2-jeg+zmU2woNe2We%7wRBm z-STPj6I;+V^%C??Jf`mSK)>mZ9pIgQIHRHhY2Xw0d#O|MN*oK{!9RUzd}U;jGH3(z zV6u?~Tm?nCM)bG%U?4M)n4d^A8y}L zh8Nhkq{|ra=CgM!W@;=gsIS=YpyWbhUN}>Ggi-8}CrMN%aILGK20pC6^bBa&89w3Q zPXbB)uqQbwFZ@ZbGss>pU;-w}VZ0>Ab5x6X8k)eL(7H{&e|@kwuDkx`7~C~tenYo3 z#1~6T;uGxPnrxB!o<90q1-EzP1S&_uBA;q%maTgA!tau<$tB8ZIzhi2WXG@KonQh_ zrZe=;U!km32IVF_c7p1mxUy{JR@ur&@v7f?+z*b(bl4#Ag^LA=O2ft8uWgRST8>LHL|l!2ZtIX7 zNVYcH8?n@yh+PNT;>|yJkDeFB1MTs#e|RR|_VXW(Yp)!M@Bj9%isz5L5a-S<#uG10 z#F5EW%TKMI-!A6Ftda*=?8awjj1RGZ{eST}y`$6(=wyge=mMG}JwXtJh8g;rSB(E` zFD>M{6KH#85Lyr@_(u^Q0|RHpGq`{idEV~U6&;fL!e#G_vFetxaL|=6-xqRK@L(CA z?+O~Yj(A!^!w;W`pZhN#u|Kq|=9XZ314DsR>>88F8~BO^Z(2@` zL*+4=L}#_#CqSaRU6A}W@kJ^Wbd0bFj&vOu?vK&YQBO#opOOsNwr;6?-S(w!yZ-w4 z&0qi3xbtOqOD4PH%Jv?Rg^xz`I=#D6fr&@^DEt)yNlU#h7r01Y)B_JX zkBS3k-R+da`(QFm^m_!p$Ht=E6P-_HCBX(*rmD7@tb^+GHYu3_QB+yoY;)O6QVtnQc&&88ZKNEceeQ|K#zUb?d&XP}t9K3MqY#cp%zR>H+ zD?S1b*f)=ptJ+Q#ySJ}14qv(}daI&Ws~H{G2_c%3m|zhWiS6|B)ehQO-sk2_@d+dl z+~A5pm&JaLbRvs+7(0PK{rNiKAN;hbA7Ol6^kdFN;voJN{|jHWp3bPNovY0ig8df^!su$TJU@+4e%(o@gn0s#06 zpP%>S1FztNv1RHe@Qkj4cSy%#2xv{Y^bHA$HjE{lId+(MF2{!q7d$Oqg#TcRlB&oy_(T4!HuclAiD1Y0p)$1G||- z*YsV$Q5k?m#aQQOppD7*ERqz<=*Jv~+PIna!?shrVHlNQ8~BU1iQ--WK=1}mVWae4 zj87TnvMTgEppXk*qn}Q_Y`99iCcc3$18pzL<~{UKxxi?7qb5vM!JzQ4-+k1Z;W%fS5u=K;d*&`jxLMW~L(k z{%;@k2>{f4_=^2;%k8(tqmMo=1+-?TWs5S(_K69&m-j+6`rm=T#ui4`J}IizxtN_iAB}~@=w}COmE9=}Ty(~I zXDz1W^j&r1z45hg`9D?Wtm}eC98YP7fhJI&s>w+J!6ICggP%rFe#DCxy!BYOLL)JZ%SPd@QP{P_ny5PVCO zfKNwL#jS3Ni$H;Jq-uBc$T6WqT`Ci<8#4SU-wY6^ z1KQn<6QNEk9e44>=;%mXefSFTKqdNyhGJo6Ha`BTPkYxLWbRT$?65y@$==vIy2l+7 zc4p3~BR02yv&uAFM6c3bd+p_M>eNZe%yuj#7!JTo3lPnc=JFS!A}0X;Vo(W3h>nUK zf8i2Gm5w7jw>RZ5GuXiK@U12*3Pl^Gp?$r5qM4l8w|v{1@1EeWY=I$s!I2 z`i5hWSEN7CGhK>fB7;F%2D6&*U7^!yHv{oJlaG4&K{NDbR+mPDjt5~!{}sXSl>r^+ ziHJhaw^r4Wmj12CK~k&4a}{;`_|B?$1X!fDjvPA~rzeCr={xjDZ$Gjlr%X;-TNg4x zryAnXzH}hV6@Au~b~xEhzb?IzJ4x~R*jdLnbl0Sh7;v?|bs0!OIv+kQL;05Eq%#@B zu4xAO=qPNq^jlqt;o;#paOuG~bLLE(KRcmvd*$5M6AsBgv|e4Bi=Kf&!OoPsP|D-&M3Q7iz#O0y;l zL4zqHQwhZ6S%o&q6PCQ4j;!D&9-gBEgDlea9@z@7D3-2ZMRJ+W12*cYwNu|F3@=mtTHW{MZkFe|-GopNYTun?H+c@2`RAXHf!@9t z9_o)LqsQBGl|GQHR2h+DJaj9Wo{v z0*B~M;AAHP@yPAc{BraR4#qz}aDSW|KP!;=PNC>1|1rOd&G#F7Q|{!0ZXgTo8?n5y zV)`(-+SgxGefUBxrI&8#PI*O-r#{Uiy*UOE*nr2oY9uf@L9k~3hJG1=mNH<$KX#=r zax#$owTX!de_x)HGO%610zb0T8vvS1OKvl}Oh-nB;+ku&jUW4wAC+yd#`xKDZaWLR zE}op@2X6W9Wez}NyU}5?!NM_QwLF1pY_4!d`m_Vvp3EbzB0r>mfW={XbiD)gm%U>F zlYQc{KOvq1PJ98L*%ifSpc->|vd#KRD^}!JE-uc;?A(HIz2FJSb7#h4dUnS5z<@5L z-?1jTvg4A8E(RZ&&-KJStfs(1Fx9MP2!6q2-$ry`AQc*7mncgD;c{67$8~j;2}`Jv z@($g|hyUdTe7eU6U;$=ZrNbxu)N3r7W2Z)!ch`GzUwt7yzk_eVuKSw$4(RWm8sA%I z2PC>(S0|9ApJ1Q}hN2t-vJRei z`mcI+9q#{-uG9yzJ>oScW-V739MQ@ABzf9G|G=~2U-&xI-_AZl^0;-qXkT>Mi!$)8 zt6=t z`Lgh@q+@80&x%b?jfNQ@>4W-S-Dn=Wvn&dH@~JA^JOB?KY1IJ{Dx0 zADdMlaz3WcPkZyh($c*71DoO?o<>J?boO{WgPr{7Hjr&z*A@Ixq<@8&p%1OUcsEZ6dyq$+t;pMPfA~Y)#0er`(jDH2IDw0Q?t=j|CVy0gBLJ~e(7ch4 zI8ZKd3NFHyU~TK9Y=jEDkegnK?1RAJ80UR^cKH(kYHEy~Jt;et3jpe*70$cbb5aU@ zhn`~5A$qX6-V~2lq{s26R6IN+8{$cCr5PL@jZfYGNc{GD|1_?=^_Hls54R{AJ-f7+ zc#%Em5MJA5BbO#;VsmjN_E!hvzT57OJFmMbx}_%;`7EIN-z?H@6W@4Xoc0oo`^1MI z1tapofWHH2QSjnrzOxOka?%gfrJe?T(08Mw5sORbW7olmH-6V0QR^4I#rOAr`o;L3 zpZJ?Nw7)mL{hMAA=O)j@b1$Bcr(alzF)0!J#NOU+FESE~sXPXB6))cao7gShrF`2J z(HZ(N{zAH>hh$!{*a37M98d3!zoxHJ6&+6h!{0LY zGk>Xs{)27>hj9jj`Bm1*;sHWVGT@)*`&<-2Jb2bS%O|w~KfW$dBrU9`&>GSWon1<6 zp9DV6`V+>Y|3di%2*k;gl?<&!=T<8^=1#hfYN%*`lGR}2jd`LmQWljE^4-%ztR)Gyc<^})LBR;{Q?wv?CrYu`-%BlwaQbAJb4(VwNowhA=(gQCDJxIp#L5+>Wo$9Mp` zAlt2%LhE`f7dzR;NSCjBu7i`Qq;utC0t4d|_|j4x$}68#@#tetHz6PHGJIcApU4|( z@o&;fSODH${Ky?G#+% zu&An>t1cOieFLgPu(XR0=mYUNUB+Pgs+wT=7MQyC67XvQ|E=DD=M!dTSp(D*4MAJx);G&M8Mbu zi-+k)cgY42EF1a>gq?fnW>^Y;0hwUTI#1QfbOsK4g*~ubrEKu*JQ#Ha?p>B}2tMII z;?`uR&6C1CaR9)3{8{`m;?RAT1*PN4k6<~G-G!eR7X&|uovDXiRv-`0OY-Rs9y$>B zzVfAU=~aiKt0(n~o1*q{Vifj{QvfQ}y7ZCs?z-?G{okp+b$fenj7?5Qtv(#DdF{8x zIk6=R0BG;VT1Tv|t%`Pa`_nP7JGL*sE=F#?CpxdWHMVLau`Rugf6Ivi;E)BYG!FfP zZK6qxrD3D0n~ms~ec!t^7wgBKh*#abC*Jgut77`xlTq26ik?o^Y8_ zljNj_MVKbbd*#K~wNBYKU0cod9sSH{#H$-k(X{Bd>gWCp>7_sLNz8JZ#glgv}U|)jLY(ma>@p0o^k6}fev41!+8fUd|IE;kw^@;A>o za+>lWF3f9YR`EF8436q=mW49aJ-+_6SJdMtf9y?hV6^TN02bz^?6_8I{eq#(3jn0< zqOEr{#-|tK-dBG^+<5QzD)+4DB4|_yW9cPnQz3~Tbf51NH)^fp*s0B`%fI>Szl)#v ziJyzU{@hv9-XTX+j^P!TT@kN%`91Ng@BTk2F23;empl;CH_)#FbG1CdPF7p)Ho6c} zcy@vsZ?IgyF8-S#J0PiGa8S&r3`9Cn$@B%U8Sl`Gd(i=Y5iAUJI3FDc;h5Ec>F}UR z;V0cl1CH+MS2|_x*!JKNutVSeo{{LNsZ%Qc^}yueCiXQHb)6W z8CqRr{G85YkzpT^-rrk`xw$zTiVVWL+RA4Iz`qB#gg@YAWj8ct#Ue0!B_ce5ASskO zr|hJg=EBd033LJAM<1|x{7ovUFEad zP4rU75ko?qn{wWaYB&f%SJ>%T$RRbIl%HoDoeddE1~`+}I0WD}dC;ro`#c6yrW!2!L>^2OtYJli<>7#{Qu0~k5r$~z`fiA&i^hmvwg zi~jVp?pIe=M2Cv#h=YnqvD#f_EAP6TrN_VU#aNbOz{E(W=(Z-@v2xl+>8L#F2M#8D z;5zGm8FFMK9Zdm_Rp?{_5{4(iB8J;u}ojHwT zRZcXEB-WNzqS=^tx`PJ~NxoL%=!uijEw~tHPG??DyJ!wQs^U{%@W5lI^w_F!DF@8B zmm@$sU|Qo#j;-;kb^_rhf(W&rKKJ0+GJf3vbzl^7is>4u_{sItc zzG;V9O$Uwu&oLZ`V@s!i0Z(=d$SRAj0#EsbXT9S+9UK-~(Kz%Z^1}D-Xxk1SDCF+Y zns~z-zAirTiOS8Zu-`i2^vM%~ z13nPU>S)+0R+o{Z+kqdL!LjL4{6Hu$+v%f?Y6S7yohfhV)3m=ha2ZT9JN6<8xTPEg z(i7kfI>ll&MJ9e&ctP6qFJoajmPG0(`u_|&BAqvS>)BQoY6U0DFYCy?&G^~U(v|N5I) zIP%39fBdN!?Cyz?;l0t-H%OyZz4${kb(^Grtz(5xp3y-GBZa5l!cVXau8H@h8(9p* zLNgWsJn-n#v96Bzq~bQ&wZj5{XP%DP)uvAX*cL!6fa5#&%QN%zX5;H$^P2ecKl)?q ziIv&0`0!u|i0{<6j{{HjmF=&9!? ze%L|7Kv2D}*E?alRIe8Z3l{V_r%O;i3mv_K7vd{y3;(o7(&1;dN|@#!ck)xwN0yJ| z8T(zSB!W*SvAYijfdcRelbQ~~kI1Wc|QoM(elU(!O_zNbmQorzXJ(4*FK0JA? zc6nhE5Q?{f6&hh~ZKoxtp4^Ze6L$DqaMSkEw*Sjt`bhf8ep}B=-}7wh0%tZ?bj&Nh zewjA|o&>PUW)i1%fiLuj(;3#HJqlK_YPajM50*HU6*S-3k5CS^Ky4#W+5m##QTYsD zmkX09mWcs)J%g^K5$-&>q(Y4+r%hg@RCahfj%?Op!j7@LmZ!FA7w=GytRpdtw4De}(zT_9d$<=N7ospbCg3rAm!BS*d@f8wYYAgwpoV@C2lGb<_Xo$ zHAB*JZ8MMCrEcILl>83d=u|?v02&&hyAluJNB3MMSM4G)Say4|C_OQ9mPXLP!&`v(~j(!GTy61!f(@t^L6UBRWEF(kfocL%uQYVa*d`$lX9mOX?7?7ko zLc8iK%iuWN&FxjUhq7rOlNs)R4)5`C*H!g7P+a(3_<>FN|7=E}Pii}`#K|<-cVAjs z6peCoL$fvSc<&Nbz34k7PkLI%>t~gqKax@NgWAMGo{mZ_D$;Y;-EdR%508K}#;1Rn^3MA*9=R=_OP(O$2RRg1WPBU{ z3D_Zl0(r?}l1k25WXKo-^mV{bDRrY#Gt-Z4fxjq6CCQ#JcF^f>o9`JNira3u4y_kc z<7eehG^P7xSM;2p0FXYWvcfNBQ5XEXs(ujt0UtpOzLms_SOi~pr5=5wBk{yj&&6;3 z-v5YwS6&$%qVEnOKhS`Kz6RJ#^S{J*jQMO13g3Wid*iCm)v-3bSdrP%!1n1 zQo(9T;V1oT3jnBF5!S`t@C0s^*2ma7Bn^!Z_?Jm}6^c&8zgur>#KzWQ^zB-Wm%jE; zTzZ&=Z?eMKkKLzJ8(Mx|2KGS3A|C`=QtNN0c3Vz~F zZD&G(@_cfI-tm{vCOcGy%z^Kb@|Qc=(uROP=)B|Sng=n$!XJkRQ-V70qNaB_SWTNGU3fc6ra3X z-l|7e|L%DD>{|TZhaQY4UYLm~sdbTBx+k*cUG&bc5+743T;NVc~z2rdjNcS;@$q~`OC=T^$kihS`Z1axt38w90 z0-g2{@aJhGCt>+nO=SGb{G)ihmegtRhj1WTBwjAi8T(}UQzR^Ae?UGMVnAHEh;T!8 z6F;{MnO^C1E&D>AfTWNjP(Qv0ezMcEyIEy%pZTFxhO8rLrUDIz%ho$wNczIM(~Yi{ zGqtC%E9iVa6GIt{RRIomnoTU5k`4R~bTvog17l0J%6*NfSgsj|p+0P$>y+tb@jIAl z@2+@L$ojhKYp;7T{+jy`xxko3q#fPDll*b`qSfSE_1WJcczjjmuPQC$v-t0f0d7iu zIQ4;XZE*YXPkt(<)dzz+XkJZz*tX(V)nwQJ-QK|lKxizu)LPRf7co@S67h?8cXw}B zT)9s=AAKmCwN@5|GS#a-TTS+z4K(lwG)J$XU+@hGjE$l<;Xk1Vj7dMMuOgF>pm7FlA^~=jk!ecImW_$v=y^&zBPE|mPK0OL2`w!jDuW+9%yark>gSblh5jcW?8qEWsb9>VqDfgVGwZo@8aH{iqhx*S;J~`-{ zzwPbu`7cbkFRB6r2Rc%oF8yHnlQD82w)b5cgE!wDJvY20)_O*xvu`9enrj|!_Nfs_ z5^e63{0T3#(Y}z{Ro!SrWqm0wZEr@`^s%_K-i)`t@|sv*IURjlQ=)W3O_hz2Q;9sL z{|m25kHf+$m%yc@TiVbvKGQ>+^hXqIJBd%dRoX$)5K8@NyQ^pZ<^q7!zpfbFC7ED6 z54}rz@X3`;4)7G?n&=>S0h`7m03Y?Qel@-_n_F4P%zNa9JYHyCz~eD;>OoGqbi$|7 z_xLIFAxijlm;HzQWk^0GWLvZSq+|RSy@4EpGd7OrmA0zZeHoR% z)9)1zP#$wBTnUr|-i+8}F%;vRxMCN(biqk{7@ogpJ4A7LL8+Q9a$21Nut9 z%I_j1(31S#2 zPnfXUJtE~W7~>Ob@u`3MV*Jv-`$xeXarNc<;-;H!h$o+TMtr_%g(fs7;vkxCd@oq? zNL$wkx>ZjBM}agHZyB7mfEmFo2Q4lR{?15YX~oN@=YYlBA6mDvd&jrNaGzk4 zqq^Lfb=iFXm;qkdZj)e=M{&ph#&cpx5u}9>$k`_Y{ron zpO3lOM(5D<68-f+KufU8`$PT{l*th$DcT*4a4|x#^gVYRIFD%T&nX&O; zg+62?cW+O(I#C>l)EzZB05}V%xhgACg^$&>6}5j^DGS)Slf%vqZSth!!60-FO1YgB zu3#raadvvS+IZ0p$*SmQrON=67=v;+-Cyw{HGvssp(+Hy{1#V3B6S4njI#@T(m-{TTcqxRW8nOB2fu z1TdvxOZuT_^GaFis6+3Puh6g50Z6o!I=Nb@Nmgik8pCwdSRK43$CMpV=s9-Wl=YGi zM~Av`fPKUoh7Bc!E>Zv%!zR9SdD0_HvS7g1lwL*0qWJtV>F8>2zjtkP_4EpUj<{M4 zz7J=)WF?~7jZHK@)P8oy;q+nfpd+EwMFjL91CZ#G!TNxYd*T#_v9U2PNMN!-p4ySk3WKf;ehas}=!| zt1g0SQG6McANZ+SZ}>EYcf8`<3I@ueUF2XuZwq~^bikyRxK8APju7-@XMqRoX`b|2 z*Q6_@sz>t;f4; z`5v8509o%YWRgJ&FjF83|3XLtFC`gyk6m@LLfLNNz>>hv1YUwIJ);X$j?Nz$@T+} zJRYBY=<~58UY%EGrcL&Jt=<(6KKq53QYXD7ye?`)81GQ z2v1v*O$HIR1b6D4Trjle9hT_ce8vi7+gSZCnGhf0ck%5?J_|HCGc6l*;k2qXfx%}* zD6guH|AyMlP6!54IDx902`81hs&?_2k6UiKHF|2&@g9H>zzi^{J_Z~Jo#H3&Oz$0_ zQFyFn#Dz~A7J4Nf#iUwf)%jMRS?^MWkAi7n8mCXut5ciC+R1k z6R{oo=>dNYAC^H3U|}#3y-l&!|L!k>OX(El_wnsw3QlW+{w2MUyL^8k4yI}EL@A{# z0V^o758(DA)A(I_&%z4^Sd`8S9#JvEiTeYBGe3nO{vtnlKta*RVh|5_sh0&0ER-Rz zSaD1FJb|wQb6_O>0sE}ru)rS^&D-Uda@AFk7hc>2sv+P+1$sneZ#U#0gWt zLD*0ngU24kRvjC%2OIKv(IKVX^QWA8YKDt)J%}GHTvEAP42DZ4zya+MKX3vbJkAgJ zp(lN4^f`feN2bE^OkY1Nxe4twj|8a$#H*CZ5l}8oWWxKF2d?`Qou-aqPtx;@M}P^@&d_OUu!en7Yd&K)?ywl^s-D1>dt1v73IY zU}MYxJZ_06kl`z?yejrwda3L0XsekAW~LWnQGH@|togJP`K{oC0DqVLQwGP~UR9d! z)9$ed03Ky=3%K+`ExjhKd0hG7Fy9x!`b7c!nl5BuTuJz0;ZaT)iJr>K#7CBuW%%SO z1y1lm=oxVJmuny9dMU?D_9NoJwj-EOfzxBI+7~R7|u~q4fO-^qV zA24AKF1yv%F6oDb@rl@MEXV%(P`v!6+vA=aZ;KAe^up|%QivbbCfd#^i=`ihUjdi9P%K<8`mNDbCNIkB6Q-9%m=Fqz}Yt>XK7EE*(3SvE#wcG~od` zgpWJR3PXL<&d`3vW8M^Z%y29**-XfF>BkWJ356wdv zT&8eGUT{|wodH}5TS`?t>m_-MM2;mtlsAft%*26b@ul?{az?c3=h@@Aj)0G8kL;9i z56>Vg=n>$jKSCcCSrR6cPBvHRx3|Tw>H~MkX0*+pjk^55Z~ewQ;;ZhxA$pZ}-*A64 z*s#)Eh{3_(xa5-k(!Dcr^tq>EwYA_Q@T>BZScpjfyK_4y>^4>!F+Ve_=E*k6FG5yp zz3gmGUW69-MD$BnI(lPes~TrF2IJ#TPRH;6<)>p#ZHCu56;qJ8i2_l21o?%}uxZGK z-lA_onssS`%OaE;DG1;!-@&(82CyJwFcxE2y@UOt-+IhT7C3E*g&kKOzAS$F$A36} z@JD}C>AGWO?qr;M{>Aw7KmP-7&bi^bD`G%?Z)15`=4HLG?;A0^Yj<3K-HnpJRr|Ew z7$RG`(prrr`L^XkD{ODIqCS;;a~9yHu7dtT02EWBIGGClWE}^sqzwr4muLrhct<~~ zlu`KKi`ayNowlkh`e`V}i))XrjE{OvXI33*wF&)9fV^gu{KGoQTdR{w`dyXNXvG^I}})gB|F5XvX65k~hq-xZ1cx?t}+6Jg||k-km2utgbcG?;VMekzJ}&K8ff-pM~AofBDyc zZB@cT8YT);&NGyB%CWv|KV!}4GkgMk-m3j-UDfg{c6G)6(ZT5N>5SIWl6YrLeb1_W zTyVt?IK4MT8Ufc`4{Ee6#iDF4_IW`)s_ngTY5*k-NzKboJKh@ zJ`-cpE71`ALT&Wu3I#^hojEkr8GF?S!j`v*zdMv4910xc93f^r3^V>DwEArpyyE2Y~59YKg!y6t%3f>`4%n#xBve1F?P0wE_IG@T*8-^Su z&#*DZyrD<%&MtJ?DQqR5OE#D+pPRqi{uKYr{d|9$+`-#;onE_`;d07h39 zurS_8`+A4{8GzoK?unjT?v3{Tz0osSop7=z7taP;T!Kj95qK;I3ZjF6T-AXlHZs$g8HJan?3)p+AcSn;)ZCJpVj|0hOd{u`L! zf8sa2ffx9d7X8hJ5b>M)Gcaz^ZklX<5xj1T^g6g@!=Il8hwGyZpilEz;AeqZHW zLIsZUqe69zGhJaQ3q~dITsEXE_d>^%c$`%!3no7W^j15Qmm-#!m3%#wVIjIN#?EiV zgP(jce&QGYmSW?Y%l5_1x7-k4c=AiaZz}8Z2xYG_WlJ2+o{(%J5brjD+G2uKZ7_lg z$0B7XnTo)>R{@+~o3*YoOJH0Eb5kQ{HvfaAtS zr&=w=T;CJp^Ue67pZ#}Y&`X4k6&qs8Vi=HmvDhd#J2*043A%(!g3an|z6E)VL3siC z`mcXoJpZLLJ`TTIOvIOk*r!kX%4as--DHUoFF^NXVQ zlH0|qOB4W;bPKb50z#d|)iv?@xj1_KSUmguvmOXq5q~gY!9Z7HAlp)}_QimlcaAh? z@SP4$3#X0A5y1{VE+}t!uTZqoB8tjlU@?VU%8^WYA`kx4 zb9b%XJGBOT2a6-IYihHcpcV10GwCY%fL_xEVMEk2ra}Z*4T47QCY0TXK?s1AHR>Ud^if@HfT> z{mq0C%tM*zZvv8rBi*fz-LmN2rnXK@&cJBbxlm`dGNB!1&gHHpTID1;?%@tfAR%K_D zID|-M*XbD;+6~8IG;xfY@;#!rL(rxGXB0i-Yw;!iMq3n>_p|ib8_`Ym_MzZC} zKL(riY&cau?{OI24p9t#*p`Dv8@UHQ^9=A(KIQ6P@=M|!q?T~uG%|?Jvj=)rU!CtR z>)!k$en9qf@+F;l+KgVMOk~&&j>_^Nq^=C?v*Wq~?U8@+6KTC$6N1Vv!8>#k$ z^aULa@0L?rNKYpM+`Gf0vib%Fqu$pahcCZ2rWfbq=9kZSa?hvItZTx7LFvJ2u4Q;@4QmGlAc8l=!hf1E$xO^te~@eD`>}Mc@9N- z!Vd4aZ~`>XVE%D_g|a+_R|Rm)F_9Q`@fQQ6o6@Ce2eY%p0s!-h z_$TF{-8=A@XccrSolYp_OIG!hRK-nKTp9oKy}usA;-@*;LQd)!8QdE^yLKy~5=(c| z0V(xR%0lu#{+!#+vv>lmDJR2YW`@dCvn=#PetF~e% zx6S|Hg%-k(KB9C=&@(mU{ut6)pqr%rJDivT{6t>Ve$A)n~X^4W0; zGK}_P~c)*3nIy(;fn^kBXKp>#DYU zVrH-b{Yu)F&1@&dtxn?999;@bD=Z@MN(W18IDkV5uQ|iI+H~ zhYmA*u_4_n_Eeux_)%N%nGm1t00BlP+<#CVo%7pEif!G%CJB zId38DIqm=^1!WLZKf%z2FlL$G_-%%fGR$S>4m}ofrZ(Hp5ub=cic?bAvCU@c3e3L;lSOxA+Wj~5hFXVj!4Dei zrQ6}bQ*5=`4?Ng+o&lJ2+yetwWS9OH?P38Pb>~9pP4%_aA7LOyd5lx@dFDgsuppgJ zL;#EPtJ%CS(L{|o5 zIhg_)@Gb{pnK+QFvtwB>akQ*I-%#wx24^G(r{ESAXT89BMtw+53|X9O+P1B# zzd?J^Gr+Mk0L!$y`n2rUXX1Zz6@r3e!G}+-y0H^Vni460#qZQW%{`9hXsVR6J09`|VhiwjND0i8|X-8>A@#TT5uhYc)uxvC`sxHIYl_1L{{zxwIJQRQR_@%Bn{ zJyzt;KwtEFPfh(Nl~cmZ!dd)pm!tL(%+m_A9~}y=cP56EEf@TXU<#S`!ZX1*wCpLr zu0kfUez(=0@`rxI-?ks>bo!C6us7H{%0y3IyZ``u%>w)`_1&Qtbi=dlk^Nu+z}`J0 zasA;d?61vFpSMrMsW%;>S(k8Ek^kN)-Gz?eBpYM|+!DHxNx?}ypc;Q3TKClYV|d@5 zIC=Vf{LcISB-Sh9joLu0GPv5&ohNDv)=v30UDADx^OLb9`Ptju7x!F$OT6snJA!$0 z7SymX8ygItmJ0w}pYWL1#1npxG3xO*@ul#{GYfyA0h*Wv03ILZNj#4HP)>7Q^>nnN zHnyMw%F)p{V9^2el z@lJPej7~ZE;?uFZG8el?`_w2Ry0MxK?fr-(?KfkO8Uyo|n6~0Dffw0Qew( zIVP0>o>6+)CF~0El#_2%w(4*r zAE#S+n9yHVKOsc(-k6jxab{vRmYQ496kV`y^jQ`c^0S#z=YpFFWyWh*3@rUBd@;t( z$&J*zy0$2NUhyX-E92F3RV4Xr^+&17kNrt>U0b zzNfF#r8eQ39~kP0gS&^KF2A8oe8U)Or5pK6KLFgA4$w!}ShLK?w%(m1(?5ecIrNe{K ztG-c}Dh3|>_th2oS*n}Fb@eZyJDgKfe~X1Pjim+iOjmc0Y$anF4;R4RNx0gKX?Yc%yItq{ZwwHhFyYJB+HTuktAuN zcF+d;54@)joANEWB&}fPNO|yrPs!&~l`QsMx;I|(^1EZtCA*`s(qP)di&;DYYF>~$ zs7w|BuxNqJ4B%*0x(9taB7fxH|JonK@4Wvb!TOS}&VKnoIWJ}T;#9!uK(vkSjoM8w ziN4!j5iximI!E@ZFTW;uST3Kq73OrU*vGc7EI*dMPYIFi{Dk~N;a>k$Vd@JO1W*UbXx}R4B;z*ZtpnZao;#EHWZAh3CXvxc~qnDY3vOg(O=H0g?&SpUQ={T@V;EoJ%aSDWr2uvx^7?6B1%5 zE}h^EFcj+5_j;fB$-E*}K2x5{(p7cB2qlp)+6#;7ST?0Fq_lgaTsU6dD_qQUiA%Af z{>hjrKS~F@dq9XfI@aU5tNY_!?|e&)^eJ(3PLAws8e0^bIu5Iw;HO7ce$vP>=iGQlhW=504V7? zyKx2o?YG|^fAJT8=5~xvPQ}>ivu+LFb7R7s2^q>V9=4mt6OMs#tT)D)()(G}V}QV) zNhpWkPlDB*eeoO45-Z}%-G&}K5ZoImG3A3lz?BuOg4?CCH@(s+2Vg85;8p3N9q%zp zHpa@+EhWStCt@a!You5I;JM(JfXl&J-&CwH)zKvCSz@7InJCoqmJV7 zc#`&{jc4FAUx4yEb4q>mHxI7-s6Vz&3%CdekjKG_#yfLV$CxM^9F<>b# z*>NBrhOxV=SNEjL)t~56cn&9mbk5_S%AjK-Ho%Di7w8{4P{;-y2jD?INzZ_2(y{nN zf$+tN2{_W8R8U9Tixk+ws(8x6ab%)(dTuegYE|ipQ}M#dQ!zBMFB*~sR0%q5T{vN4 zq+0J4C&HHs#zo?QdXb}yM*5O|$Pn^reBpHI6)SgH{KcgG!cxPq1D{afybL-L&_%Rm zQ;xxga7jDa#o1iW(>f}mJ9YDE0r0^>9$>KEPzQ{&9kwmeBEl?1cbI zL*PBUSkepF==kYDNEZDhiyjPh8&hs5gsbsQJ?KeZ7xk9qQim7r;6P+`P6J&HHv<7O zAJ{AS1b!eN4m0J07UZtmM;PX$Eyts>n!K#XbXIT&qcht}8-NwvQ}6AMk>P$%*fS|4 zMo@e58`C&|c~tdM9=(qJ5DdW4tv1`K!_K?YL3`5S!vXV-G&%lsoT_wu^keXgMOWDL zRTh}24C{5_oo~m98|6%Q+s2R{9kQ$rG>+{40|#Pw_nxR#`{T;P*94#aJ8}G6yyt!I zi`{!hw=wUMe~KskJ;S9kO;5pR zJtX>*4m$g)f5WnHA{jav#XWQv0>E7Oc+H((%Nw;pU<|4Ee>7%Eg`C>fu@Ds5l{k0^) zSd!5jTV0CBUwl3qYHugs9hY5YVH?NZ2dBSmuEtxx?OWq_e*gDl>Ga8%I(;lY^5GAO z?<0l>`DB%JuXG8m11{POKQm$Lol1pXfzQ#KrgsueKY|J}4++ma7JRewQ+nEh-3HGL zHe`n}?OsVczE?+4+!HR=5dueUR8gNgP@s_?9cWKtiLXnWL4NoYR6Km|&o53#zVX>W# zm;7}yilFjY(a#a&EF5f#ruco!jk(l6q90?QP@e&_TpYCJ@9Fy3I@QAt_HA{*vF9u_ zVu7AK9@oi^n|ifwp2BWmcdaKk$j=EjjW|0t5ob@1dzUVQ5$HM=uF<}7ppRb}>?alc zNgL3E?1rR124+Z_ol7u&giT4c-LCrU|$UN z4@JLtcX)Wz?W|U6!KVlCyRCcGE(Y%U`g`QVFphh|e6~OIu$TLSGKzp!aDH_ZiR5RrOm$3(KP7DJug$OPMZx6Us3R ziszXC9)JGcG))kca^x)Zvb|VTdmdl z)BjN$jPIRsi^LVajP;i4STFEPpe|%FX#<^f7416kRn*=**(?_`Z%fy(y-psMJ}Vq4^obPN&`&xsUW9MfAx zuOnchlThHvcEJrjpi`IHO+beOcj~I7AwB{Iy<2t{-GnT2LW1*msW959I$f{sv7gY9 zyetIv$xou6?I==h_>zY?g$O?loOuzmfLmt(6PqbH2R<)!T@_3sOzDevir;GL+pc`Kc)t#wWX^Z`BXtT#Rnn-*N$fG@0-Qy;IicZ&_Xqw>G~t&Z_L8bf|b&K&lRLm+X9Fayr%*mt$W~f4t=C z8{@t^UJ{)uXJv6w;HWOxS9tU09JK+wlsEzZd6QRvvIRyLZS7;J|K-I^Di8i;K$>0g zyhlcXeQj%5rg0$#M{D~ZqF2_`nKlBZKc;(r_p#LPZVs4Kcwe68t0>TYcbyPO|OX8zv}Lo96u#@rWJ#H zu1fEF#P7RD2gReJ$HI6tq(j-2jE!T=p-=c1)}tjm-el5yX3D-HGD_V&J>BxJ>OL6^ zn*yCUm2SPOKh`_?*>}XPv5_|4=kL4>3<4w?7`M_sxj6vRV%**3e!j5gK@=@o8 zWZdH}3dnAb8>lqZ<^GKMByocLCvLFac~2XVAr?j9zZ+!XHOpljPZ8i>a3go*!yc@) zq`%=x(KCHA(M2=@PU=UhHkz{kdhhySQ2K=`2eI1OaV%b7JXH{>-Dyv07jV-r$#3#k zJda@iRLub=_)qu}lm}08Dq3T1);hAMJ{X_)+~aX_Y*uB;hgN?Cn?rqk-ye*yK`|%Q zRGvWuXx-4)m%SGP6vsv_Xj2BIH$5n!h!=ingKq_k@h)5>9n!C1oW?j&JaWtgTt$3d zF-kV0rgxX`9g3m4d_6t$X<5drBo!|A;_-g^HOi&3k@&jxqV=s1dMDE5HI|LhT@@D$)kWBYLWO>=l z{(NUO<#lt7cj>2=8?HF{4nMYAFf&faSOt0uKDG=Cnc06!+POTz18n;5I7AEh_*`6< z{Dy(yfw<$&o8yY>4#)D^a&*dXT>3>@;%`pHXbD%tqa*R( z{`=p?ul(Adn}*v_Q`%nPEc;JxZ>29{a5Q>uyf^x9zfbr3qvyaO_3`n;as#BWD^W!2^HVo}fpB4nB=R`$S9_Gu=}D&+ zU-$$7kAzZDn%O8=%_1>Zf>efpRHbG_Ra8unU@f*>`3cTh;Ns} z%)lKT2Uf#V4GRDes7lwc()YyJ^hUh!#CW{*ZGU7UUVmtJ+lWX3Kk4u%3B3=pWL zzAqO5&{5@EK0Rv3#R~v1_Q|kf2ssus0rSW+gH<`OydKvd?u&Q*^jl(NKuWqX6N?Ko zHb8Cd>~vz#0Y^+qv3Fl|*Z0Ni-umN$jbnc<998cV9Rvf8WGWyDb|HLbK)r+Mpwkzg ze$vO}eed_ZT`ySxKnJ&``nu)p*5Yek_xkvc|MA^&5;>N8!r##LN*M9ex|A@63`flZ zB?rO`>~^W1f{#>TlIF{30$vJ!6FhVp=?KB_2n`70-U4t@fIlePX%t+qo*DF`LyCh& z@W7$yW`!apsM{14J;UwwIs*ea;H5|@7{O3Df(-COM;r(y{9EfSD;E{u_w)0MF*!LI z&mTD&Pd@onoIY_jmKGPh!+%vUs75OdbY`WR1M!1>{fh7KfB+MhORW_tijLT&&hNMSH=}r9xxw`k4<{;g%yCG`1mK|^x1LCJLu~W?m23gkSig5n?XpXJ#_5z$Y41W z$R%v39S7Pt({c+ztcg1_&_g|x@Ius{f_=4nVGmf7+X?ntY z%S*HI^wZD81D|;?&YU|MZCz{emN&mOe&v^bIZmHC>y8c`E49|`sP_$eN8tMEO6o0i zA9_jn0)BR$z&AMmzz^@_l}R<}PC06r8t4T6yV`SyC34At8iQR-@T2qAI3`;GoG_sP zPcxVUjVJ?t-I4(Yjx=>DM}`urkQF1H*}a7 zth1vilzPInZX=x3x-roo%GUmTgxSyd!j7MvPe`aP7Qy&vI`I{8tnvmv<3RiX zE*Z!ro(?@b2l&pX2eQ?XB|m|qQyBnYMIvT8qW+xs9D(O!-EM@_}hDYTy3D#MyMPIcyuDm0<@g)yEG1GEiNXA>C27+wY~T zR#Oj^JjYK$E2dT*f_G~6!U+HjcnGBqblV~SO_yF4@BQVUi<=Jai^Z|ASd`ry9oZkX z(NUG7vLwgsqIS8)0~g`VcrECm(iu45-kObes^h={fTrZ1#UWq#;pQ&{?O9oa|AVs?!r{#Q-!mfK13;IY8l{ zbnuM*2o?YkU(m&MDnItc_Qw3T?hYf}0(hKn_5nJRHP|IjUQ5?(6n-DynIw1@j5=ZK ztLSdwTWtoPbjGo#tI}hfL^ClyEx&6b=H!F27!SVye+j=BJF+49+pvxbwOzVS{SoXF zaD#7l@?bx__*QWn8`3H0GbXeZ*pXKed`t+kD3kgr2Yn9D;iD439sYEO(o-fU%AgNB z#d9n^0EhYA``+kNIlaC8F*rCVoiiMR!$VP%uEWRXvjo_B22WWq+221DgQL4Vu*5Nm z?5Gh`uG4%~*dE$KeT5-aAPw>;el$<;EAlA`ZM9tUL-&wRaquuU8QEj9W>fEwWOi#R zfqvp8l2eG<4~>%-6JK6g&%hS)2yWMscT(?DRr>P8IUhW#EB4eh5#g-rlmTD2v{!D8 z(@CGuFHl?g?jJH}KBH{e6c%WBpk3`^Vsmpj?RUhg+eUfNki6(IK39~VVc@vK^=K}x z#VQLr#Uq>`)mm@Z4+1{kGf=zEBtH4YS1d$LxUl=c$nK?$;FCA-N&7}OYEkMze*{N~ z2kXj0-odXBXZ#zl#Hmey?SUuZ!#YLJlJcY*_$46N3MCG}C&luN4drM@U{9IFze#+- z^91gBmvv+rNzV+(E5ZJY>UCdR@(!)2yJ&*)BJU|vUWibnkyoZPk_%`kTf%~BWR%k| zpbG<1S)b@?ok6*hL-`g=2zPb%N{;ahglmiwdWa^~e7G`(y9EOXaiH{V4#S6e79-2Mdl}p4);PU~f{k&{u%U4mtg` z?lf;hSK4A8mS4v}rP|X}V)}jpN%}4Q{FE*7sy~YF1uYRJIc|Itm=&DtD1>Iz*Il7s zAr1 zET*Ne;!msyO8O*>S5aotQ+=o{^&#jh3npky9@>EoM3%~D(o!zZ+~Y%HeDtXHs);@Y zKr*3d29QxF;Ed0Ycwi7d(=$9vd0zOhbbJDex>-;K|5&D+E^(N8V>8xe^I5b21MrSd z<+Nj;#VF;3Xh~Dr3O_Tk#W%at_swH?`R;m6e3A0f<(-WzzG$g`%kKWUh1r-}6t2Ap z4SU<3i?5`c8D#G4%+I61f1Q=8V9XOLxvhdY*5_~DWPaKfH1K!XxJNOg;oJ1(3 zxXsv9@JkzLKY2MC*jJPaI7K(ZYzma`MgR6EkuUdhlp^IBzP79>SP!WP(l~t~%RGBR z&Uhjpeur$E>Su7hx7r!|ckhYouDU|~uVtSAfS#*J=DK9ZIRStL04#n|7Po=+fm;Tg z8EA*b42BYHLnY_kmAd%M;jZ9V<+?SupApiNE~lK$}Q{ zD_Hpy4CAB5mCEGm1pu-W!@JeSuCwvl@3=b--ElC^KJ{X}<0pSBE<1Q-)O+f2YGOJ* z^T?@KSZ|AEmE9#nCDrCWE+;KmciP`qsU1C0uT{MO0GyRN1{y;<3<<~M3bSgPiAOH| z090VR7-TX1xiX{-(0=Yy&mp7bGk7TFIU_IRCGalaCl1Q;SQG)zme1rT9k^2=W(2?<}eTNwGWXVTGr^3(nc{i7mY^*KHmTViZO{K{@{_{^UGSdMRb(>?JGUw2A&I=N8m0O4EpiQLqR)2Nh8?QWA>yk-j`Xa&Gt%sz==<5tF0Iw7>L&LLR@qG z;rNYT`%iJ_op(m>uF+^MOvTdNT>P)U`-?d7!VA&Q1{mqA!2$W8Lw#N>MLqKieBW9& zN)6e-2P%9De%8U#5iHPxx09!VhgS2P@`wI~eyP)V=AGJ5VyWO;Hn3!xoMdKU0O>D` z!{GC&KGB50_$PW6du`hw{Y5(JC9v@$`APT6N64q-SNbu~Q7wuMCXzV<+HIrWlr#F( z<$Opka>Eb8?-l%BxX&0xjvF&ZO4!l);;r-}5@)un;sb0sW5V>`2_DB1JR583mx@<< z)i+?=WmPbtKTH3BA7eI<7@N*XnU6gFbR0c?KIRyg-Q;seJ+aM>X}3cd_V`}f88%%( zBgU4E5A|2Qfltf?e_GcHX5KUBKpyy&cKb9V`5GzHl0*8%($B`Dm=gcUZK0h~NR%&% z%QmW=`01imkJ{|hI#k5vd;2}v&gVJ%)VE~u=r%qCzN-7fx~N=#qEmFjo^-2k1`cuh z@w@SH(9>HTf`dg=1N%K*d3hR&{k(cYpui|9YN2J*d0id!JMNq&r;qmG2CvsS3`wzma|gH0Y;Z`o1B1%ytSl z>~F?|e7fE(*(UiR_JftkI23G5Y~`pJj^~B%@GDSXIYE(eoIX$H^5jv$N_&=eHH(ux zj-!jr)3L3+Q> za-p5e!WVTOqLbombrnax3RQ9bis~RbcsB$2Ozv-0UiQfp&+r!grId@zvjZ@&<5L2^ z<0wB*5QfJ@L-+!{GB8k!k@3N}^^Tk3*1K+wP5L$EH@2jI*IkZyReVCfd|!XlbzIv3 z=hEHEgVo@}(pCTSeIJb<__1HLkFu`z*FbYzILtmJQ8l7A5|zQxsNM0B7`^*TqB3zb zdiNfUmi%BLA}0Vyzu4{zN6;7_k;!K)_G_puIasSib9p-U_H?3u_IMnOnRx5h-WLOv zi_w}q6}8?a@weK|LcJ$dir(M_6=HhelPZ`7g`elZ!XV>1WmVYcK_RtNX+KMSMzB6J zQ|q@)|D+pR)}vp9VLx;4_zLAa5kc?L4`qQIeyAsl=o=G2(KYZKzAAFhJrm@tqx2nJ zE)6Z261^(@v~<97>M7Z4>PszX(|VE(aOg+<_!%DxmSf?;e5!c9vdTjN9-&VM9ZdTX zeaUxV!4B!O^DMAL;v#+eP4;iGwJg-pd-~t)H!A=GJN+Pc71;}Ta$V(`ugMW$R4ZgJ z;{&^IB{NwM_|nhH$Msbj!^4iT!i$P@fVM8bpo6nk;9k+4FKwW5VIRrBj@#e*=&}^C zlq0jw&cJuQEI@s@GI#Rr5A|$q-CuTarGZLhkr{!=IfD~JmJGx@GO9!?fz-DQ1%ug9 zEo!wRh%{yl^2^zJG*o2-#b7=Cz#yU(g4N$Yu8Q@?`OBS{Iky_G`Nm%k`So%8(cN+1 z-M7ROk3a2>KnMW>G6+Ut5#a=e(kKHkb+R`^K${W3cp1P}`CXx`Kk0U`j^CHxgv!j9 zd%Y{OQ^~$=#}gdHgOCUS%!09=Ur^&fP5_FuA;y5`hy$h|Gyz=zuHkhY4>wX(Uki*H zP&YFH;78sV6GOto>bx2@OaPF`_0)XUcJvJk7iuZgC*sCiUlO;x;`Pc^aktuHdh$jw zV>GFcXkwUgFp?*X6M@1lE2p1&{IU2i@BZz0_kZ~v=d*Xu9;M%iJ$v`Zu$<<1{@9Pj z8@}=DVrptGo_p>&Gb08BT4C^P5Jh{=Cz6Ly;63r=f(Yg{U3bvK&oZ)xNohEDl$EI% zQU)A=o6gm%IKL_k0B=Jg+$EEW!9CFT~&!Izy#Q*ojSH9*ey%KF|X3EZ*2Pd$4gP%~#vJr}?Eo9{MV=a)*o6EQZxId7Zs zm0$I`_{E?9g*bWggkV~amSh~*X;{`8gWkfE6%lmyFk?_aJAY=am4+1+fUK;^VNd0s zxWq?Ja3VFE>&}BUVL|mO&N3nUmRNKYWPK0?H z=t|C<1a2m?z<+Fs>SF`+4;>)$X&xhQ0y=Sque!d#uJJCn!%LXR*1O{;67n~r& zjAN*K0`V-%#NW10_!=rpasdC@X%W8QN6G*;^KemqNq0LCdgpHnJ)446cxMYs0%Z`$ zBSY%Gyc55$J(XZ*03O{2zBXy3$sq%7;&OF-m17yz4_(c~jnUEJXbugEe%UyLA6AxF z;h!s@;3YbWSxw7X{2Xh8R^=Od2AcF29j4y(fI6#8;HIir?6Pd$(NG6Y4|Iqwbty>n z0$b1K?km|4XV45i?cD*yi|h_S1Bwn5R@yP)g$C~6&@Stw` za83M+AO4~E!Xr<{HHUAAPk!p-(wiR;wb`ms4UHGv6i1F6iHAS;+1OMAVQhRzJR-eD z!v;91SFr^Z9Zx0w&@i||;1s^6?pOVc!oCqp-|4{4P<+SknQmpgG9-SsgJ|z~+kV&8 zuub=Eo&dpD)1wLv8{Z2%3on(y9h*hCdRLTFep8*OKZXSvb!Vr(QFj^&9oZ`6$+Waw zh+oAE1Wy2vDBN5uJ4YY`uG1nmd~AxXFmUMeD2c^O3!Xt zzvI)EgXmr97CMYwf8oXTFZD}-*{xy=c^Z%ACwK^2*j_0xaiFkkNzZ}~!h_`qEbjQm zy?xny8tYNf*=0LK)in{4*Uu2%J0b=+d=Sy3*SoKwu36` z9e9M7YCZV~--U*X+lU3f+MddXfw958eG>Rgf5B5n^y|vk;GRxD^1(+*ej>i)Re6zz z8}ZE87v9}Tp}ab+b}TQh=sk9w;yXTyX_b7Xuh(S@mgnbk0AD`;YHQKHxfn$-h)-Mk zKDQv>S?wUnC33^$UHehG$BeJ(hbUPL5Haefi)!tO1pW0b`^!}(oyn(RfDoRiGYUNb zA5lN9@Bui$rg6`%V(^TRWqf(y8y-W)NoFhu`Vk-dPx+-Epttf@`Ab_AxxoGi=ZZrV z@SOaf^y7-nhd$;9$pI^7Qh!)GIbQy~8Vr|U%ES-@+fuwlV3@ zvqh(}pA!%G2(*e!TS2m}d^S`!R)Y~Z)?!e;>)_Cc)7Kk$zH2}Gy{gcfd=Cb5^W0V1 zwc>cHyEaTtF)vVe~P6dtW0b zIWa+tdJy`0ktykBbg|0f#||;&0+fdG!9LWz?X+momq$NwPuvoJbWAb`VIM_$p+Xmg z+dlT8fgGs1F|W4W%-pQn>Mg-1 zpI2?1f##6%QX5CSNE3`Yutg~oP{r7a*tj+<05qF~q=n8684AwcnwOacXUlxFy|oKawmu(}PkwOU?zr*VBOW+inx0Yw**o!jpW^i~c|+|rCexws z(LwkJs3=^Zq?yWgCvWN>V5?N>F~Aw*vN^y1r+*s9&tHnohJ16qTSX0!SpH>2cA-bM ze`RVq)|QrHPkkg_bi>W@>KDH}`c>x@@$^PV^;O;28a(|ZU@4T5aFBR_XO$mDz;L84 zDJKB%&OgQ7lpt5wy+UnaFQ`#o9d?Q9F2~5YbWin6e90T`iCbTKBqmRtkDvaD--sJ- zyi2mQ8IK=79iMvae5}X^X=5kE|EyT0ACn0H*p5vmAm|^f90!L6Bp295`UDa;=pWj+ z_+C=*Dj!9IPeZ*#fAd5^`+^4&e$~)G*?rQ z=4$+$I9lXg#({PYjcfMm}T#Fe*q0#yDIyYzXen4axo)A${*AKaw}RSNc)>&C1o@e)*2cOZXJ{3iQh| z2^HJL4u;^FUDfDEChge7xcj{S+wcBSeCmnw-r=G;FcMpR11MYPC+_kUn*=V&8(YWm zVyd#sCKxvS9NA^`67(%s=F;EHgg%cS$d6@r&QROTJ0Zx{QqNMiz=)KGUxlAe{i!>B zBa*CW;xEZIGq2~m{iAVY_qconFv^L_rY{S$_(q?DCw->88a4aSMFFV(QpFZiEP3crt@UJhg#Npk2aZq-f2>|p{ zfg@}p<*{1`?R1VFpuf>=YeY?Ocz|5;;hl$M=RNU3@Z-hj4Ka=(m{vtjCtIEw=A{KmZ-c&ld1`gzt_ABi?aArsxl=Ov9`Az_C+j!X`4}LNb zt^j{k`&E{{!yf2*6)n3(-q}{?oq0*0@o|t8V4_&~2YQwL2hhFL+xDrM1f@3L>YCbT z((8jGd1TVj8;-<1FS$FKqfGv-cw!5aYy0G@+b)RajmC(|7u$*dZ2yGkj+}#mq2c)C zgOA2Hz2#@zp4!}!uUH>7Ue-Gt*(*f#4M#LbqW{*H#L!(YkG{P(Ms@FzXh|p9x7W`* z>xhSZ1eS268pWFUltoWgU=N+sr{W!WAa1fVc|t5n zm)q6Ze~Zol2kbniiExq*d4greG)s36u+f;mD9z zE-*t5Sg3-kc#mgGu@ao^R^-Hb0sst?M!Z%V)3@HZIJptalk4%y*ZrJ3WNtgMJMO;o zhB)=?DKQ-lMH)6@{GwupmGW7gisL6Z{7sBunxm8oa6#qH;O|A)fip8SdAQCX)ph4X zDs=KJGzxDG)6Vq3MS6aS2Z6@%{DK;DG6cPfJb=P3VMRm=AaRsQzjFcrznlQD9=F`s zjCcL$x5cO&*Y)KYZ}Y>qbu~sfYjvy1!d77b0RQw!L_t*2RKr&d$^NnUhHwADh|$B! zrPnyO5GXE_2=U&Yry(=g*(d4l+41Ig4=94O|9h-I>=T2g>Lu`A6j=6e{13)ScS` zuUk^MiJzno3Jd&^#)_Hdk6>ZY2n9vl6eNt83@uIp^-)^SwznXrqiI8|Jm@$WkV7&o z#|h_&fnqwR>2N|IS$V|jQnpsa@o^`&lz*qS8c%%T3-Q@UABj(Y>Z9@Onafs?^ur7f z4=BIfDPee2d@VdeBk#N*7&-5VmAdO{P|i%xMQeFc^k5f^e$Q}Y(3nmTW*%L8-O+f> zYhI(e_Qk^DWGpPr$FLf|Ejl!m24l|-4bL2VDmqfiO&Lw|ixr*fFQC|Rsfyy=;;bBD zK=9*4GBIInjBN_lxR(rd6>olaPD&Mrg_&PMoOr^sx#97iJdX+GOMo<%k5b`tuYlvg zEKjywPX}C>haNVj`YxJ)2jd@OAgEH#l%aT~?0`2<>ODpinh87#vGc@x{A{jSu6VeR zwS*_*I3LWA@@7k!$*GxGY_GV(gb7f{9S-}@$YA{VPrM^;xaJzkYByGwI^y|f<9B}l z_k7M(e;wzoPjnsfcJ?=1b4c>I5#RczH^u8-_qB22nPaiMB)ZXgBgcGvge{`0F}JuN zW4bK-bB1mze(;%cozHae0B7PhX$O87;P#nfisu3S9_1liVX#;UkIto0Mmpk`&Ix&7 zxK*HNn1<0LV+FbL#^|smG0rocfqX0RoiZf36OX1dEc)p?PBs2jc0j73D+iCkwM98n zOqlubM?M{O3lQ^h%ah;<%I(mtOEs;)jky-@{tsS3*w} z1_WO5Ejowq0*>7@2;39Mmq6L@Do3SwmEk&>@jmS@a1#pg(WBxCPMoRYmGdA1_1mhf zrURmP;Lu+|q&nhe0@YGHA(D>MU#pGFJs1D3RGu{|b-8 zS5n+@_-Op!-~Zk5s(Wsaj`Zy0`ExNbzB`5{_9~M65arK+WJPw_6BjItN@<2pe&0C( z!1Mt>;)AXARr_6>dHD3Plkw4ye>`T|t5F#oiUl>uK7a099GkrotEv;bwe-plU{?R! z+>Ey!>{EXK@@+p5@A{daiRClT#^VotA|8L_bC|>!Q6BERP+H{Q+o=g6pWO=m@H=c66A;l?(4ScLRJZdYSqVYI2H5F&hUy7;8i!n1h8?6;Jq}wg&AAA6O zfudoj;5tb@@!haFz520@v;7L$YwR&~Wd(4xKj}e(GX0Vq$g9%fuG;!&06K87tEz8Z zHV!^u_Y;2OqX#`vYH+X-Bg4ZgGgpeTRX@1o9-0sr-sta>la3uk9#k${i1x~droJ|g zzLp_*gcTR$MeT&O{G>ef(ceoOL(n6CeP4!>h7K5ZF^AiGx+kr_3l{X6SH@D7-lYvx z`Es%lGw?+G(kCHZySK2<)6R&aH8F2RNmo`P;<=3hq}tlNIxf=b{r^`S$dt|73Yoo@@yZ z@8PRcu3jbXX}_poU}Ew@UyY+|n0Syl^@IcIB=iV*d1pHkD>#4TMQO>$_|>!PNEwR9 z&IZV(SF?&1!_#ULg$^U7)kw(BIWKKVVtZBhL4o$vgf`_w7=kXLjD z@QP6=hZ48zqG#d&tKpWOC2Vc1NzSCpGi%o^JF^rDec*J;D9a*ZkyiE=zlhE~+Gjr3 zM{zuPN&$b>xCTB5u3$e|a?Q$YCa-LWRvq1M2rl|G=`37F26(qFKF86<%7E8dBKqY3B?!{oWWHkxJ97VKrDUv=wN_$x^tTYMO{0 zjdRgg9(^>VZ-F0v!=_mNB?r`xa_QjfN(cB?bflpJm?&^%YR)ptWd4PDb{JT4dvtnw zGG^x&thezOTas<)$tpabQ;R<B6 z0nT6M=#=!vvn|&-m>k7tP5w#jyjYR zmw5qBC>LGF1T?qpBzFvYz#Qn6a$o>E>2#pgeKROre7UsgQ2|HbS@|j+xC9q<+1RFj zhY#$HYY!jraTCk)vy#(}@Gl-$dItY1vVXLN1d{1$y`(ETO(l!RRaV(IB|dgtSol$I z#)?4s)AzqW9y@h9HfzJtH$34ZHM}wvJtL#e1b~+O&GqHw*i#>h7v6Y#y!wSNjy~0Y zd468_g9j)N2}#^LfH>s=Uh0+CP{8fR5=dtF5%)2>`9` z*|_)B*T*a0a93P@?ri+Z&%QhM?!8U?)Qv}j{FW0Z zc1HKT@-6#W!50A(vhyBrOM|+N|LWI#Mf~jl^V6|^|NfXbuqQScn3NuVRBfL}KKoD{ zIB?LrRe=*G=AwTZ;#~$TR#+q@eNVdw9lEmI_5@k@)H?(!AMPo?)KAnA{gU<|;o_cP zA5S6ipz^kImqE(}x&9r@CSLOooT-Z+vfBJ(-GrDU`Jh>G64;+X)?zkh~{ae2qzxTcm#qM2u zL~QBr`k?H0!UxZmxJI@RCEj~slF5|y0Vd$h?cIU5u+`!h`myax2qsU;Wj7(o13JUB z0jK&|pQ_&YQ!7vAZu#GvH6p|;-0 zz@TjQK(5FaAKiNM(YWRI+u{Qs_&}UIe#Q&G_Nv`C)V~>Zm5(nE|GC|yGHT3@W2aT= zzN!rezDrJ+?6hQuW#jk;J*l^M!WH7dGk%r%3z+aVl^^~C_(C?VW8@bQ*Sr0wU4eQ) z=D{^T^2AgnPE+1_Pg={1UyJ_a-y?gXxTtC93jFrfbdTJSo{9GGKR%SrjOgI}6}R-? zGi_0};!-N|09^j2{Jn#dzL%3^?6Zn@+)pAJvZx20k~@>G#^6{phDRE)|M1?p^WHmR z{J@0pm=geK{q@QBB@c8&RlXZCwJyFSkDlJT^!7$HhKAy?M^43?-u$maO-}{-14e8! zkbNeCvNJ?9Mx%1W-7$3ME2DAbmZ{>@-?xGvfhGh$Fl0Fa06OYg zwrjw)Z6H5n2pSvb=qTY$ceFo*MB~`_hkcZH(m&soSNe}j{Y73Z3@iuI&42jY`p!G^ zn0`zga379!M&^so>vq`95noy9)RqFSa+`g8GUyKc_%=*BC(VZH4j=hfn|Y}nrT3%=lCFeYUc=z5IvFI-#@DN z{c+)<8e83Zy!d7RHWrol%?C&0Mfcni$DcVN!iz!F&|sjzgfZQOWYxvnabbu6%gDWo zJc8k&j%B^T0k=gwzPS#(FBrl|1y5yGg#e)B%s_?WD+xnz*}z0TDgXiCnH7q2ODk#| zbz}gxq9H>F24GgngJ4lPLMENu)Wb7osZZe?v6`9*0C(Oz81MLzZ;fWZ-nW?mFe3)& z3&z zP#v~g!S?LgBd52iyhmJyS5L?}1Ipe?(}v%Z76b=pqHsp??81D^tAWZG zFAZ}X1Uj)9NWSf^JL9e2^_}tP=O2l4=T4Z0I0e*aadFxCt*)%agAacu*wT>>b=TF# zE4Suh0z$5~Z)Qi8&~3#kxZwj7#TtegS&${=oJ*Ye5}#HgypLAWQuqP?@E!HAku*Kv zuUy4~Q;4!n=MYC1B~{L-PREybiluAfgMWq#9b&qxeE5QU_+l#^atCByAq{8A0k8}3 zX2ZZMjCiU1=xD-F%+4>y@><(X3mWejE94a2_eCnhfrjDr*I%m!{fL!yZ%-{|<`?4n zLkHrv8?TFZyz5=wUhwpZQ*uJrqQ<#Zf@z@MP@Yvep>wgR^7`uoVnXqz2|f@!w-78aVQ56nr!o+6eZtFT9s*Xg+}rq z`n%vl!IA?7Htf_}j$a9_lDJ%s#NoOP8hPjemBo%(#N*oI-dIgL0O0^0;v4deT8cXP@M;F5NmZ zXgYE(D*Bt_QwD}q2gxQo7z~aK$LBxyNPOtSAC0j+Xi8UKfAks;JRg7NDc6O8K6jd`KJYp=V0!`pGjGAW;BzSW zhhG_N&-Kgqq6suFRZOvLz)R+$f@vAo#Lw;(z2xKlst*GZ?97 z8qN&%FahA>4?Y-|Wh1+Njp#J{CEx?{Kr2Q$Dev!jk$^^&9Ebd>#5qU$-e?DCIi6p*h1vKga>}6!NR@m z74+{(I`PgjqN;#v?p;sgFzq$^f4$AU^3$_fPjp7#5RY=eg!P*MK~FF+!Z+f8BO2R^ zX1QVVC*R0}t@Bs~WH}bklLxcx32yj{rVNe`G{&qy(C4f+;k|V(sI5XeMj+~uAc{cPLM3<>5 zLS58~j*z;_AmI0~CHP=7cP~M|QFiKuUQZg@w|^`e_?l4hY+jr?zI`H)-dEMm;=lV{4=nVH6kKjN*(3lgL3LX@UOw8C) z{^`RN0sP@7a4qILU(&&TVlb(T(ZJbk2J%d%#R=QMH!TJ>nFRkXpc43>AGch$e8Pikq zp3II9xiC2$Q&%PxhpmZMV{v)eer2vE%=QmB>B`UJhaI6k%sc*ZmN9t!$AW4oxzvxH z0fb9@N{*AU4iI0_<{j#*M^iX$2oKmW^aUN-`0iyJ6aN%AIM<(J;ur*AFs4U(i2?nk zMd?J@4d{ZOJg0WM7o-S>)!IPp-n&n97<0Kxsx#-%!rSoR=*XDt=cs@c|D#J)&T1#m zSmmBNLkl|p@GF3iBVw@AzzaXr2V|>6?*aXICy?{l2H8_^%&JPR^f56oL7m9ceI(-L z@(4z7WIeWyoM;?{`HW0PZ3E3pc9E_r0RWi(P>B@9;S8@k{7vFqKyy~9&qNaE&i}L%{ zR8ROrZT-|0l4XU|v*PV7F+{-&__n+&b)8;{bZxzojY6M_HyNY_ucomF8~dZZwxn;Z zs10;txqUWnddXfN1F$+j6~FU~e-Qo6YvSzWTs(RFay4Q*-sy1|@KHm19RdagIPHTOnZx2079nA;H3)by^|Gx;OV&B%M zx^tRBC!p|s#l~eoQ^3AFfseDOd-NxEhuyE5%^~Rl z>F1$lZ1xRF%m(AcVoyBq^kn?f?|nFyRYm+9f>7k2l7mj$Q1F(|MMUhVaScEGM#9j+;jhnlm>u=B!!K+ zi!puW^0wYWcVoB4$D7t+JYT-7@6ti`cUmm;ll)7Uu}dlwP6xTHPv+Sh+=yzl*g8$aZn41)*C?4v;kbO+mES7l#BbG@eB#6)691YAsX zW#U(RaZY$%^E)Vp4e=nLY%2aB$7E3MT3hx&vZ-h;hw`CkL+|?3c0ag#*vCI3W7frb zUy=R=Hrj8tL&Cdv1Y^KgwwnnNZIv^#)Q)pk=3;TR5-ZTb7$@Z2K%L&M)Q9-EB87N( z%#*N2Yr}E$nj>-l3ttjkA>di zIpmE=8sG)}jID#Nv{BZCTqdwn(|*09JyY6O`m{*TF1zp#lXj_J^0vMYqAWWnS0?{4DbzL$A71*1tK*3>q-SM4V+8E3JUrYV zdk*f7JMOtX4qSIo{9X4r32ia@(`&V+V8ySIe;_{%>blJ#U(e$W)kd5=btS&<2j3OX zoLmu9?7lSSD-)8LT)wHe!KD0a?~LYMuZ-q(w@2Upo1-n;E(x*j!tPi{O8)VcY45ex zm%aPdsGgg$%eCeC815{_?wE`M=_l4&qPvVsRhoo`pEYf3_EoX%eENPzK(O-p&mPlLdf=gNG$+xc zk0={Ggtp@dfMfsiA*H_GIV$+9CIMw$bf7Rjz;y+Y9C%jfspP6tf;*UmAa_n6cqB_$ z9Ve6P3-LrW&_P+8&jxTz01#eupNgDw>MK3=vy@PSzoj1n0 z(`Q5jXCZ-WvTCIa#gQSX!7xHLEc#I5}gbe>B?d%{XxVeesev{IKehGgx6H zX4b$EuzC&_;Sf?4Qzy#-%9VH0q%{Si`uYAQz z`H56qPR&v(q|_jTN7ch2IUmtpw;>ZEJPM&(G3A4KkCF*&umHPtqp}6 zwhTP>^s{m5)T#KwW1ovh)bKoY`m`xa#|{%6fQhYMM+e!ey5EML4r9)|WP9qRMLDce zfOO1&gO2C|tP)o@U{=*wgdf;8tle65*}zv`QF^`AdJGLWJh;EPnF^c<2GnbfGmMmP zpB$jYzc9`lAA3wUy;^2Euv@+nq8 zDJQ?j5r<^V#N@dvS;I>A2@*n@^bY%H)ad|E94G`jr@p1xxh~v z-VsXp#AYd@#XIwp0!A^f#Q?fkV_x#IyxIVIK!v|b9YEfES6QsuL&n%bn05*?pvNbM z;=qAJ0;R`|x#qxdOkTVc7iTZWbq5c|JAU+s&1Y;02hD17!f*=eI1MTj8Rk7B_Y9JQ zQ*cB_6V(Gg_yb+I5vJYc#CpGg;FL~!cf@Iiawg8ZOs6~2|z+N0~IKlM=j`TIU- z{^vYAIzwpm&^c8a{Ov8&#W^M1xWtwR-m1LhuhbdD@iP<63`5e}4L2w!2e?d^`6Zkb z$2)&hov;f$TPK)TGcNceT%9yPAYHyEBn;_Xu?1ZT6Zx6{$w#;(O?D_JOj(aIz2B(* zG&VAC<79`4Pc z7PBLfMg@~e&^2sHma~6_PfLkdq?-;1c;uRs$P)K77@&{4^%yKykkoqXOz(Ii5Gg0~ zl3oxjbil&5q(wG7bCfD6O%7I4M&?g7tMU(s&#Hxykr7V>7--PZt-J)w#O__v%Ojqk z^~p~?D4n<(x7>0|yz`wu7B7Cq%i`C5?bqUypZsLp_re#(sgoz-!yo>8QIH1VP~33s zb;ilDV^4ck6L?}pDf|Q+@VxD-0KwTZo(o%*4t&B{2ELTI!Ty?V#RLG=(pRwIuv2&a zBY{Svuh=^f!M$-(q~8&oM+uiw!d{s#$S>?Ip-8`7Vo|p9HC6~XlEf>Y^Sh*>cwgGj zwCm(&TU-S4nVOzr+DwrnX)`dkBJ&)#A37TE_|`YZ>%Qcr5zDQ(a^^WVh{q2d7QvN* znT__fgd@qT^@1uzX8|Q32i6bbAHI3w8F&%@FbRN;`_=AhoIQV8HvEw|b$K!tnbjp^kY;FL&>fJhlx3oT*vh^R9Y)lrqJ}(9&sHDX6$${gXPzGl#CqT!n$$&0;ic%v zG<~2GOG^wW?{r_jhctdLrw7cNYvIS4+y@DrRE0+pomU6IN zmgLv(<s8Gc~$tdbVac231=gxXi(9^j&sPGI5wQ#`)guU=IYlXlK1#@Hg#Eb2GTYmP&DW zfZu~5sn2@!o;W;PzI3GwPzgTB2h;8Eyc3?N7x&-~ywhGrzVU6`u_vDne)91aiVshb zp2>F%-q2AYKvRB27m0?ydj-FA5S^x`S+Rm2`cpk%*%>@B+&)j@%7MD79_AC}>5d=j z?nnJmDD$BVthjf{b6phRKlx8tU)p~eTxZq8d}rQMP`OTX`jvgFw`Sx>`ozEAgJi%pH|| z#t}5?0vquPYh?STI?*sUt`rYHt}8u=-XSC(RNeVlPGD8k;;OtzCcfr%C#_!cj8bZtxa=o?`t7LGb|hZlMn8E2qT`E69Ov!V_1B_C`{`~Z*9 zE~*VmrxooqCbsY1x7)|rFac_4bSN6bgE6+}VDvX@YDf2YfB?Qm7a@1llL-%=h#+dQ z!iuucXOwN}(KmKv(v9m$-Ra0?gX71J2M34$woFwKaAT zSvQ=0?ogD?lHTDP1J2e_#UK^=5=?8+7+aJ5O|+Sj+gdEQIXZ#G0pexhhR#X`bvA{+ z@m&+%c?z5Aff{yi@Jcf22H}PYNu__}{)igS{CDUF>q2+(F^@|Bqn{b5#5W^v>fIx`Bv9A%u?3>)OurBqG0lWGRKK$TkVuK0Hqr0PRUqbK$yV{|Z4if;T zyaM&W;8?u$);r=Qx7`s{RwlzoYqZOuZMHkX2_fxGmPe;B0sQz~5hsD7UxvvN;y<;; zt+#}4+X^3p)GxngQKq*o`RYV#<4PR9bueD@9rs0h_DcNjul{Ll_3w+57iZ$++2wfX z_@tmv+aB#KI>0xkfy(Y;Bf!7P=#{_Xg&j3)itL`m%55rYjxNS7h=!C8EP&3S1$=BG zE2KY>{JmYL|5b3*9r38Y^sj@;wqnzFm+{4+@N*NVM96YW{@TtLozJb?@rmn_*Pb6T zy*RCq@AOA)zm%T7-ts;CpYiEKmF$T3SfSt3o{QmFj5mG5>*Jfh;dSxHfA&A){U7|0 zWH;k8&@VxWTW&ZIU;W~{Vpmho?fIw*=lChK4e1YZ-x_c#bB@ZJynH@pu3U-5g@ssB z+o4Y~&M{A8`Zg@RyR;@=sSL--rAoZ#Z$1<6{q!l7rxJw*SE0ZkwuGP5j%f%E5)WK6 z&Oe6gGL-qyhgPN`FQ+9>p0~HOM;;ldSu-!Ce@{E18V&h2=#Beca$o$!JKh=h-2X!J zUROFCJaps_vMU0+y-vv&WTJF$-)0Q-*Mz=R>nh~&#EBEJvLZPsCYM=X;=_nJ@!{|l z(l5|Gz7X%|_tKU4gwij8#?YJoD{t#e3Hnyr?K~opI;GykhLR6ym;gXS6W@*r0Jg`< z5ZvaZ5wOL_45bf)cR&D=?E$CA{-5+$vV8cC@eBHhxX1q{PWG$G*2rE`ck5>PiL?{Z zfwW&se}y`-P=<;0OR|qr0Om7&j@DvZ{&Y{Q%Er|Du*u@bftq-4%#+jZy8Vv0_Qn^) z)6YB+=he1F2dJSDci(+?++zAl`>c;tp4fFjHbM18zp5PiTwoCB zz`g4VE>$@7a6m+WvdfN2=tm%iXOxZVtmuXn`ZD=zi^YF}xeA^OUk@FNPgv%Q?+fPX z%YjbdjvteBX#-rlZ!GS73Z(Wmsplx zA%71Z7sd@LO)*cL8x${YtVKikW2L_P0u=`h$_~Z1-*{uZ`gO01gNF~r^tp@i;DZmw z-~R1~7)0HpAiJ5H!wpt{qp;Rq3Y2OrC;_g&FtWs!mC z2zDK&UzhLLV+PFO4f_C+1KVi)Oaag25Bz`l2)yPuPI@@@iysw_`~ua<(HUmWe=Q2<-!B*(||t8gLK$) z@`X>SD}C(L0U1rdN4|*k1AdnAr@XvdmiR}mQ;d$)V%MIDxbe0dr5sQ{wz}p z*W3{UcfKk{Zn`ge4%{l6%=Y!dSkbn#TtwT)y0By5>PAbpemzF&HMMP3k0sfc&SLEC zS&Z(L$K(FPmH4Jt9EqjrW6`Y4ibiVtc4a$+qdW!)Vr8ATeGYI~$P2*($;Bf$jtwEj zzLejXj>s{6t{4>XZhG_8dJMvpd5ae+E0zoNE&V*iWj8qRX8Dt^O1}~9BWQ#jCl4ld zdoir^EIX(%d5FFq{y9EzgM23+l|68`nTj6<3n~T`Ad<7 zckmD2tgSD&9>@iL5LHc`MTvQ(e6^k^#4?TSGuDU7+uV?EF1b|oY|DI9hce0CC!jBR zC;luiDhEA{jJqvIUy1Nsws)zAb$KCpOjP2I2?j2h`=VZ@ts>-@4ERs|Z=B&%V!QOr zyP^xn_xwn2ID{V`7sXXSWK^m#--A1aawj_5N>ztAYAZclnL74E4~Z1_qZc!faRLtt zEA+^MbXDXZ1ii0Rg;)$nL13p5fj}t4!Uz`>hU40fhvM1kh$9pAc;TJb$Hj~11s07|l%}!;u?VFd9>KA}fO@{S zP>5jZ#Ao0=vvd=zgeAAnfbas*ZxpC!F?j%CBcKe0DCL(kQ=CbOH$Iif>@Iw&G>4`Sv(?>+5BcB{V7@v0$cfIzP%wOv2eYg{FwQv_RiZYj`SVH?1n|9sprH& zvWkhyO{y7{=~c!`Yo+HprjfTcNbXCA|mwWR>XbX0VN!yv(?z<8IIH1M$EE z55!;o^@pN4JXqwNby)3WXB6_xmhy@7)*r+KDh13Hoq-*|^3LWjAs z5Z<^-c8UpG=oIMhj$ZNUwobFng2%FB0v$`9(2G#=yBjIss`!<`##^B)rw@lFVIsa{ zNAQ}C(naN@nQ|5;fqWS7fG16w0-x4p*iQ3s@~Cx`@XvShDWAC~Y;NTQAieZ6jd(u; zpY>>&AF%0Je3I`r%*#01V`CH6kI;j9pqJoJ&V6OI7IZFWsjtgP#@RvNvm%}qOeJjO zUE;w!qImE!&NXz(#;qU~T%-}H?L<*mbT4W7A%D3oR@OT^#?>IvKk|lO$iLz82OXz5 zQ1=9#vGHA!3pI=fn~K=$RaT#P@DuSDAO1UcDDD}bh#PKtLHzsw?|+EprM38zKluae z_9NFEmIK?4k9_#=0Y|#D7B{`%1;)|YvuCpbr#jQ=N<-E56XL;ZiVya)CVJ-}Jga^s zoe0$5SHDYtK-4d;Whm&3J_M&b(i5mF;fvq@@4{8MGM!hkS|`!Ls&_l=h(lQ+g%X~; zFXbTl4F|V;XPS#{tgh*F+O8M;h@B=7hkQH%K;IZ6C6F^&a>d=e?ckC4SKs*h_{P_K zc~qs}E}eSLodUZKUjy-F(^^rrVN6`5j*xE5oDx3Kh3#?qR_=mQ?f}5McB>PYXXoPS zr=E;s7tY4i@@mA`aD3qNpNq?!9X}vhP=kEWo_)pO^>pl+828q-Kk=>~i?_Y~9Wj6M ziTKzD|2C#BoHHK}4Gnq_8oR{qR_G-LDp-9%N0e71E99zPA6_&itjwes6qZnU6p$1M36rv+~cvonyMpCDzGc3bg!rCHd@ zM*1c@t`*1z`ax@g=#hCA<)fRCxnxf8|s?2G5t?`M+TravDYFVeqmR9K6Cn_2jSS(b#-MSE}l6X7cWU3CNFv0 zeYWSv7pAjIY!_-qn8WZsM+9PXfJqwYYz_-18ezGp`IL|AAzr> zZO#r!^ceY;`j~Qx(v{Y*=trjySL6y=?Ge-355;!rUA3RVIChiB@4R=T+i4}&@GE_uq93iJX2TWrw6;K@A*j| z^-k%Dt5Wp^tEkWe@92G@Kln~QWdL84=_3$qJMx=2Nok9?1@8i0xy^t1Zm0hv<*h#| zi9s5{M*t>BPu;Q}=@XHc^MsCiy8#Ua3M6yBeBZ`FQNC%J{sY19uys_CTu7wA>W!-8 z3L1GLgz|-tnTUukc0D9#_>^7wA9U6*kP2P_u`c~NFjQAQ&d%j{2LRDf?z_^%Yj|3! zTY*;?lKu)h;g~)cG3xS7W5MeIcfF@^z(u#;i01l zFqvcvU(jCw+~^i?M*t6WmVyH8TWxqmJlp5=6+p6H{Xv})2j%d?@0z-*Jz4uob`g3x5=gE44U3 z+m0)f-T1vk`mhKUq;8yVi`W+wB zi;$Sin2OUlwa!-!GGzV!>5z4HdV+DnwG`kT>9JGbDcM(L@mF>66Xl`ds2lajx5lya z)KBRX=Y+xzY(IZdQ2PaaT*h;LimThy)6#d=J4Miw;K}1M`qmd?ytWaqef7)ZYrpDC z(O0VjRU)D@--)f)k;iXo(y(Bz59+A#GSVsjq7hX zthVPq*#kL4yT(I`Ipj3O!sPilclv~P4`T-cc$wXQqJJnFyAQq*y!59)LE%F9oqxg~{K3FySGI!oz44}h zRr>10%e=>?5U#oANc`&``_cH8H@->nHq`zeQu}-@R_12h7MNdTlFw$WuC}Cma>U~0 zuY6(DWG9Y4@kpF`?tEOid{JfNhjcw)OP#QgzPg_1IBclmsBGK~k0TVpbf(hKl`Z%c zXG`*cp97xrj<$>92yS-v*bV7lhaggcR7?9KV4HBv;g~ zdgl1C_`>6-qc}A(gFxc(@V)`@=e~H;H@`97@#Fs=y^&mY<~)Ir{%uh+sIZ=*Kk&%Jn7?O{3 z=WVyf9rxZI2aenjL-Oq(`OGKdz3=^tICkz*w3Lq|Job#!?-j8$KNVxcy)mGQ3(9Qc zsBW9?sorIToX?_5u8XX~#ZnZRX?}XwL2dHFhG9JW@CNqOz;*k5jwBeFtI6PdaDE0&%}pU>8k-&F9^VX6``az zY50+ccRQ};gxH+$0^jF1_krk{9Su1?q8!-e9r+T6!T*MQNc<0vNziwyGVrIEFhv|> zh4yQEttE4|6{Ewo7!w~Hz436|eAg{8Ff!p?F_2+&JChO6!}OIf5rl~f_(fat@~TWi zt*~2fCw}GEem8#WcR%d9)f(gGr&gQ2A;gD$^j9mxL$^f#O)rVDTkegXLw9Pk6Y+_lw~Z>74cc1!?ZTl-X6)TAvxeZ!j~ei=-AXv2*s0O%*# z>FLR%x$sZ$DUnh5%xzKC$v%zvg7y)zNPAm}L=*9Ylml_;TS8AL!_*(reJaZNL37?E zY{X#lSJ!+Xt7BfvwyNF%uV5oCp{x%9oWmPiNoWVd6U)YvtE)Ub)>EV*(C0@y#$0%h zo}piEjWnX+R_v1fLT5k+d9$+X9-k6XC=dNbJDdJETT*?n# z&(3<#cVl?B$V=(;qkr33i4G_+>^t{VGsl7Sta{Og0pI%XIypV|Mx`1q@eO>3Kh3r5 zmnZ`~p1BI2sT&@yOg{O~A5!A`=@2Fpu$zo`_2&R1?g)ZkJjfq0kze?)0>q~ffu&3N zdV4Z}!uERs#49?dwnTVl!sw-tM1xX*i@B>3#+kVdH5zusSAOLW#-pcY>?Zo+o?EYT zV~}l15fHZFv+*erE%xg1Y!&V^s5m2dXGmBHCHzE`0y((PE@UulUTT!hn&&Q>USK`>IZ!P*q+rNAfQ7i!oxqkjgsTl+$)RxNSxv$txGe+EhC}Hg2jr{x48c@V zNw3TwI=lYLhnZE}lb$%_;d{%XGLqcGZ+t`f@y=zzcSS%Q@E!OZ+|e$ahTtz`7+xvM za8w1#!YIJM%-kamuz({1${PGFx5N+P4>|=HOrYTd-{6#~hITONKoMDDNwi{6WOR5m z_U$?lht&AH@BVw^z<~pC@WAdkf8ng^j2vyn{KA}7g%vJm`YC{G=nBr%Ebu!zguNxB zy!*D8!W}#q9^uk>RD}$);H4pn>_S_%kR@2zD>piunPdQ<+jSI9_#s?0mY7)#JakZQ zcrt?UC;SqJ)tHnC|G?JZkFqJRG^Ws$hMVt;Z-g2HF8aonp8F3Ui3dOasd)ci{k7y| z$n>KF(`Uzvuh+XYa^M$cfaY0Nrk!%LoGV{w0}qgvRW%HFu?h@lz@-+t+$-sN#M2!) z0CHsQ4pd#QzBw zJVzryye{bYtqUH<7XzTf;hpSwfgW@^!>ejwBQMlZ`H=@{Rbs;HvRw@a@*ur~^4_fE z4i4^2;hXYJn3bEJ$*Y8o0M7g@ct}TFbOyZ6GxClj#nxk&k zFIJj42p01c@GCv^0q>suE_iYBTpwM%GFyCBV-it6umY%PpgM@HvZGSYK739Y}OJHleeS7w~u|Vf^b6~`ibf#wJW4Y6E+UHK4jz50i`)Gc~!SUT*ZFcPq*T=Vi z=bOcMTd}k}8Gregf2n%Z;>f{0?gZkDE1adfZ@*LM?Da%L994Gl=u;zt0RaMRh2AQI z*$(6B2k~b?CPH%R)!@Ma)p@HJFf8MmOz;eJgm1(n|5PYxN`OW?p9#--R)=9!`n^4|7a-1^ETj$$zVT)l&%!w zRZal-=WlpzeCzAKI(p<=TzT%aqHV^WYp-%9zAg^&QC2w-~L_k7Y~0XF0HOaNB#^e9>@3Win+O&SX`XXBLD=) zPyYBj$>EdtXW!V2XC00(Fw0Yx9s$%Cw# zCki~f5vMrlsjOcq!&mE#uvE6{_9u}s^fq}qjqstjt~a`z#;*iWx~AgEhv2DElYi7y zBYd#wc*36jVdM|{%9d`dpafUQ!0B`6NQ6iacDLu_bAQZ*b z$d!yrZkPZ{-Mo5Iuwlc%5#{vstvNk%#p=tU=BNjq1_y^@|NgzPckgcRxHB;^;T2aS z*b?D$Q1+L6s;WN|EZBmMng6MSvLVX7SA~KrbdV>t<$HJ^eN<%yvv5WSIq`bvJV#F{ zf(Lssf+8aLDQ%7$0F=={7N4HpN*BP=wIv;_P9rJDSvY<^I#iIwqtEAEXDlHwEW5iZ@=FWPqPL8?DQpX z$+^+)M0;*AwpLc1wjw_O9My$Gd}i8W(1&|?8rq?c@kP1k%5P!a( zRoHCLO}jNaL~XyIlgfdH_BHgL^6^_J0~&)nCZukyqocB&qJpzXcup`o9c%}j)Khh1 zVx;Y=J9RRC2KYhnD}n2v_~3)I{^^xjaTCAFh;l0U64=UT(()_oU+&oCJQbLLNGRi} zROb&!z(**{Gc5qZ_!R7dY15Mdz>O|W&$py`SuR6!g!GrLmhJMC5T3ydlPAG?5R^E5 zzI(n7<(u%?m2FaLqhI`EJZ|F<8q;1)z1meIbO!V!ZFYDmw#ublMmoyNP8p?@tXPK_ zdgYBCQDCsd9b>ec^nPPay3sHt9!i`^9Fz~`^IC0Y!2nFi1TdgFY$MQT@`w3Gz6^lk zPvXauhR$*HH@ld5pjYKH5KftEDx)R!*Ac&!lTdS&qS_AqvIf%C>$2m(&k-nIpZfg0N#>b6`+y@_MYS zB~0)PXaCclL)dudMF_HS{n8P1o!tKM~xe2@$u#evLP z-_c${T7(DjrB|jSkDH>W%0u^6kyWK-H9rF#OaQp{@WI$MG92CYl~|mbRK3;(ll%_( zpA`lv^o(fW@8dbxmr-4isDAN%KR8!8_Uo*$XrLAAx98&hOL&$U{G_L4p$o>%8 zR>)+b4~>sPM)h7L$iGaPU$+ew-thP6dn6mtqqDZ8G{R{wM>9-E^T0;D>50j?PAmxr3}$ThrT+pP&<&m}?FVoTikBKq;6ul&4k{Mgr+)iF022 zm9L05e9f1~XCC}aoILrQbjDoDu7334pmiGJ&#rVu2ffk4%a!LRgx7ml)dn4mYYy*@ zdtd(IIC8^v%Abj}wHRthcL|0?wGTRy)#JyH%O{(S#q}QVSn;9HoQn58a>;1NdP}6X z16A@*_n^>t!~nq;Dm`(_NBs)aXT7fmzLN(5)k)1wrycJZP(ij+zhE<3!m03OnV{wc zUiE{o&_TQQ?2fN_?dxLy-o2hkfj*s`pNeOmdO9v#JR1v(d1M#dHa^f3-|+Qc7ys8! zzcZ%imtyA9l{kCmIr;q3yQ(-g!F>|;Ulc?y`-+7RT>8eYFV_MVg6XP!*y)PCetdwm znbZ^8qR-NK=w0hbMW@ddeMlU)Th^t=z=z93{|FB7jDB)n;41YR_yN_V<=H$U`{Myn z;gDc|OggnIUEusM)JlafowBRv!l&SfRR-HTwt0QgVuzcROjq9CPb%vhO;>arlWRpqgjjLW^erchyFTx>ceqG`WBnQZlnx)p(o}p&BfV^ z7vk?f_Nn;5Uwu&U)MD4c!?IC@-9`&8g>2{rh@htv? z0&J&F-N7L_@eIy<^sszl%ENcrSZ@pe*^kCTOHX1FtoC_CM2_mA4|(6{P~5aca%I^q@KsoTv#;VOOOjiKo8D|R^&ZMVAZXjZ#%?R7_E z|G|S^Kr%2qAUW)b3n!k9&pq<#c=!uvg6ZoA)V|v_F&bk-yJF9tU9m15u&^)}r!O3r z4!~APA8?kLPzRlR!JYU!`%uM=`Z2&bz;RAB`jLc3@kej9;l8c*^0MRm2sHUxw8g<2 zz7u{M?Qr;>1^&o_cWc655-#y&2<)~*diW7Jh1Jl8W;t+M=vLD69{-D@qcY#5P3ea) zrs4k1B9I<f%H&6$1d4Jz>M6R=FO2}w7lLeZ1RVZJQ#^}&(zaFh zw*Ry_kr(7sC5sPo0TO+Kb@BJe_)zRSyeIB^>D@80@1T7>cydF0gO1{eERJYmY!JC& zEP|aDYW;)aOxfDmwRq2a-XA~ztAA=(nE(I|*41v_*zAa(o5oJ;y*7GZ@Zy-b<3&+9 zbVqE;@94^gq8Izvfk9~B*pv@Zliw!T*My6XG~WpPCLXIVFU4SIDfU*Eqi^YC?Cze5 zZ+_LG*xT$xUsrm+E6x&tX!PE~uF^g#`&YALEcLIyiEbi*kc*wuEiX5+|8=_`kvPr^ zy@|a``V(@%rQM(%jZH#+z+sF3lZnFdTlrmcr77oYf^hpLsz z{TNm{ZHVOzfmMnS9z?iMNa=qR&X{OaR``np69YubyeEz-#N~Mt0E`U16*IcyLoX3< zsYxLO#epyn3IY9cFnVHq^gz7zyMHqN?86u0;BYnWzU_L?kgLeyhJYo(4Vx>ddWZqZ zG0RGZLiORqm&7QYdD1woU@gCKoV;3UG* zxMl!vR*h3&XCRl(H_{T%<&uqu-Om(T#H#&c^iQOw{TP)mQY9!RIV+ zhABHz<_xl!8D)TtF9p}P48T!bjYH!gX-GQi&+ORN%900c`8CwAJg|2kl@eYU>{Ydd zSCPi)=?L)5R6Q@u%*DCqPQ{at9*akx{z82GV;_y@E?jUKUmTFrK@rI13}pug4f#fG zKpzE%V4+O%Wjmy@(+IfXd$ycHsj~{6dV>#)mPgffr)vwB#EBn)4of zxg0g>t>BcND?b8PV!L$GvSK4?!8z;2K%0sM z@6bOTBgulC_YzOwk`5ye>v# z&{F=;nof5HZb(xGR;d85&+`&*lzN{|Y~b*287dc9My8nv;eiR!!n`Df&+Z=#;IJZe z^752)PH)UkPsTGRj>i=_=jgpwi?dVd(86Frs#31u(E=ESg3U5O4munhXgpNJKZ#Gc z0VkG7z{xXC!%n`$DWkyxoW{S(p^@SGBjhTqTop*2QHiMw#b@FNPvTKNe5VBNnW^lw zyjO#`L5j$1o+MIe88zKlbB z>ID3SKCmnp`V6AzHVg@?G&JcHaGc>2cpiOV{m`eliZ3kNncwPW2Mjxh5Xg@PP|~vK zI8}fa48(e%M~#6kk%~^_x+ldThql-Pz0gmhD~$k}6cy2pGT3o|h9G*6ANu&fz5_8b zI__=1>BOk1!Nm5lOG_*9=%bIu`~Kqnv81{;S(zzY#l$eSz8&ASC%)nRzsY&+zao%#|`J1nJLT9fb1N4Gj*-2Wqx8H!%iD7cF+U*0Jra5 zO}*GATz)HV9efWRc&}Rz%#g5nCYYv00B7hazf-&{D|G|+JL4*Leph^la)5Gs1y2TP zoV+Rcj*$5ldATBlEUz43+Ad4=hvwD^gBM~WHzyY=AV z_~&2yy7-Paeyv(nn=y0hw0xG;*n9Mv5Cg=fbZQSYfX~&+ryO7@^&+s+0g7Grd*x{u zb+xTl717I!E3KHEyc`dH;R|u*+?CkeHxhrN`}5Ma3F@pMy%(6d5`!^fW(i# z^BwWFAALtmKlO$9(EC3ai?dUz<5qAa3|k4pzhK#0q9KkY%GB87j96&FFEK`A2wZZf zKdR48eA2YoKna$_5R#~qpOlUPP@b81Nar(^H9oki7+`5Mhb6zPl#(qQ8jYc0&XXRr z9$r@?bgh*WvzC@yKF^a?`p9*E=u7-hvTL;x3q#no|+e~vs zewri0(G+gS#w2q`4|rl`jLn+saBpc-*e`2#Y8{l2p zMC>nR(d1%60=q!)jBWIUN+m_dvh_R!-xBc^uFHq!d~7ueaFA##3%r93VzJH4J`w7GRltkaFiB} z{4!(VPi3+49g>xH&jm{V6lzHH5$wItCBIUi^c|H}S7F}wq;va`?@D9446V!>>5qA` zq;Q5$!~|ZS%_@6oKZACbU{8CfJauoqEj}}BdgeXC%KMGg^soF*0doRuwgnVQ-KdzZ z{$9XL@T$1N4iJ?j=t%J4iaS83{0?t&rXkPVJ5IfFCz-zhz{5? zkcU&MDBlY<+Zo-v&P6Eg7IC+bp`vI)zAek(27Zos6I!{h;930GLq`m< zBHx8h8S|^s;XhC^{yyL7ph2!%D>;xr9t>#nj*d^@q&$9734ZU6ImJWoFku0n;|y@F zC0)Q1eBi|bN=rSN8{@Wyd|L)fm;?swEj=$uFSev3-G`!IM|vkap6CcvEork_K6NjV z^7fzwZGejbeX#6KG~s}W0^q*4k=xRbj*okXsl5jd#4h<FJrY_U&#@1c&u4T)MOY;4e+-_qa+-)Y6>FuL|PjsUXYhbYHPJ51# z*|YzEcTZptZ0gEftafvo{XB<%!);ui3D_XRCjCI2&>MaIYQxKyb%(q7t&jRQ^9X`Q zqv_rJ;DJ6SW$DU-1#CfkHGS*ll{xdO+nTcHt7~~CwRNR_z1--2@sFz5qu~Qwu)t92=%0Hw85Bszy$HJ!MY~^ zObj>N;lDIJB^g+YW}`3UjfhXz4nu0=S9c(h8ntv*R2P( z?Lz7Wj(cdlL*^H@dNlUhil@eT3tL!XW5$ty8CHyQJ@bBebaBh8Uml&)Bu zo0rVBuZowy>cw%>4F{t>u{WelV!hRh z6HlISpW`FXJQsiSv8Usy*@#J%v7@SjPjD7>;96Ekuk)(9*pJe^Zu5Id8U2piAPB`FjgvOiJ24CM?+CFTs-t zNnHA|0GE5xxJ=-McVy>D-)Ii%xe|-ZD={;-=mj+R-|V3Fz$ZQ#m*;XPfDDOOAQMQY z9~&%`{JxVeDEoP=N97GbI-;zPaTtdS=wbi&|P>3{c4Bt2M2H7q^H#MnKd6hEE0G^e&Z5NBX* z30t%Gt6k0{Ef#?F%f=nqGZxq0cqI1j-zVLj3kcU`x1T$4CLaC#Lvi}@a_q$Xel?A- zW4rfE#GawOvg0fmYkOzF^A}H=&h%R}`;sqNV30ZoTLwR3kMzS|(@niW|GRidK15%o zZrpa(q$6lM>7MWSJ>ZqL1bNaPH2+&ZGXL_(Oz@7MMjDqPeuQVO7e$Sv8|7m+=$|a! ziDvMd`B`O~cC=fSH~F9w_(q@}O67F$2+wP>^$gDQl>i?5m7nDg|5^E?)T~>?L*^ps z75Z4wMP7g)-2j}_fw=65OuwQmzs$bstoZnx_#y7Ut!~8V?%~*TXd>>p@2=Q)^r(D> z+_iX3l{DV5G03;FtT4$^xv}$MUv0>G`pVQweCDAi;s<{CmkfJfZN&BMiU#;Rm0tNP z)O+l3^xSYyjNkRrs9t+tY{?dEvU`)tV3!`DN(G3Ifmir0Cjbo5h8CV1t>tKTR$_c> zHU?JC$Kc8n@w$6PtA^)rSZL!waXHILyr9W+Qva^KyMJgPkc&AS^+4wj6%jZu^*U#27ZV%M%Mj*5^@ z=ya4?e&5=ZD!m1LBOvRjI4og#+iC`@S5mCSAl-}JSD zZa`$m2p?rEJSIPiZzWvtGh-9$>_jMDVBryqxt3c~GEuo;t1Edk?9%z{NG(lfzJV99 zHyl;N1ZaF-cKiTmETDn+;BEH>30M8S*@nX&(ai8fSOJLq_$qoJMILuie2z7ty~a1U z_qa;;nue5t@D+h~WneNrb_&|K%_SM6!w`Mo3EAS=4aq%|SiprRc&jba@pxa_l`=oz zg@>6i%e$0c!3I0XAJ?Pmk@C#1fau}M*)P2HAyw#pe^4e=i2EwkDDDa6)yxGWDP!JI zMZ(pAbiGH&i|7Q$)93P?aAV}Gw(+IF_jGI$ZxEY?dP__>JD=UFbRg5==9#bLMSYB0 z;37{LgIWQh7({h0Cji`j>sb8k&%8A{i__6sn6dz^ac=oWP0Z30jiEg;w)e)k|MlOc z)Z=0_2@nd*bx8`sxC!4?xPcjd*c9V&ee%gCqa|nMr7wGxzwbY=TL!Jz_EtjrhBthD z{M^s|oE+v>oIQ6sR^)Inm~90wZzUZ8X6qq<%3lJ9f-^sr#e#3+U9~iUgoW~2;-#Q- zquvy4W%%^@(4iyVLZ5B7+pXM|c7A@*JOTr-BC#cfeC*h<`0(F-M9%&v&{z9?>Z z0cRAvSOM#o=P+jDh!q3oT~MnuC2z7S1%*i<9`DI3L`E-JqrW8Lk_b8mDzUGGx1Np z?i>=&SeL0-D|0!5@SkU|DK{GJ(mpM!m_A zBr*`<4;mf2hsI-gWH3gChGSuF*1Ut`OXm%BVU;8R^vNM;7IJ`|#6iKilj^d8VkH>g zz;Vv*x;l`(CTG*Q6Pl7%iAUrpL*n@=PSPQy5oJ6mfG2Q4{md&gP6Pw_dNnq1fj89+ zS>qWVR?f~Bo`c6r{Mt|{z;o~rdD0c5fzv%QJQSC*>?8q z={R%Yd|bLXYkBa%f#6%|EZab1&=GsX#y8|l&-9@gAIg`j&vtc49@KxmS45lv*C*=+ z_aGOzuG9^>dN5jYxh4max=|KhBdegOQ-;JZxV64lUgS?X8GuFQm39&SQeONBlx5?G z5deWee!p@anb7&j)&Z*VTHu`@^(X`QBwqsY_*Uj^JSfjHI3L05?@Cj~OZfF2Feo=) z`yNND)VEBiVnQA7ao&*|ew=H}Gho3+eBY2GMT4p;r!UK;(MRVnUNO83%vWKfp#d5a zrsOkhPSx?1C;CO-u*cAbt>}ozkR3T>hIhaG$HwR zKG0=HupLyet=7HM%6~HcJ+MqDuU{MrxQc;)>uIiEGy#BZSqVi^J3>)5O3NYTk9>Gz%WMyqdLsD<-lIOm-8lh(a^MS6Q(YR;w;ebf-}joYino00*GtJ%V&>$@SZ}vH z0f5F969CkDD)6s{8~iO;u(g85WuRkIo=f-?!~^gcxLZ>Lev#kee0=t?$Ku&@=c7GT zj|WejipMUW_pJZs&`?yF+^7cZ{NkMS{iw=ai+}SI?~L#Jk+;Xxlb?%^e&BCnLG@t; z8LNKiT!%+e&u0BG^H@iTNIt8M$O|wtb35gdPlDU)W|9Z^j*kG2=mbNDQ)fD}=ww0G zyK0DGJINY-OxY`~7UnJnnhcI^*)PI2Gx^}s6$U*n#>GpQ;>4-v;_RstJ}Zx)RK{P1 zO`i@ZS3BxM^0S1>ezr80OnN)vJ~iImuwb_i)t62!rKj_scTC3IH9irCj~tAnYNQ`M zd?c>D_IlaT5fAd<7qWGpjD<@}{wfe5TfVW%z*DvhzyqYD?zUy7Z|X~IqIIHvC2jM) z?y(!X!t3T0HOehJX2#5qdJ-HDL`!e^{&_+1l}Gv;vK7P=(hXaM&X%;RlyW|tQHK5wnaz>@Ui#7OR_FwEo>cv&#gcxIc;n)2WrcM=rAhS+VSnHR%m#i=?6hd?XO~ z5NeSpLPgKUgW{ks-Kp$Z{@L;4I_m4rb}RASZjv#vKVjJU>0ZyyPx%^m7#ihZeFM^q zdJ-ppeJSzC1<&F`=TDhRpLilr7U@+H@+rbrPsWF*?QfID`Q~@fMaVn})R#CZo1AHG zTebr}%Wwgw*+;OIMbd%k_%1_uHPID{g6LEMm*z>nm+zH7da{gPeuGuYyhxY3NFl@c z&{y*)`H}Lyx`P#h=o4rx9~j+geqh-dhKw@cXBFygoQ1Z8rJyVI)%#EzowAStI(>M@ z5jw~pIvf9$_s-iP>rM=4%nn}WFIt2QhCv756+aTFGw`IpOI<{CnspyLO3uvB#loz9x@QMHb`Hp`_f^N$ zws#o-@3j3nzHDOm9&fG8;6AGmq2sFJGMSwC?6$DHsH<$i%=Dbg!>48SJ#m@P3~9YO z5<0UrK2El}ZbFhdBJLm%PGG7SyZNa#*tn%dtutIlP z^#Et7XOsW&pUGzs{L7zqe~`+jGnIJYs%*z%@1QFa(>l_N0RVWJ0MMlGLAt-<33~GR<@jxgx4Q6EA^SDyGA1xMsI4SQ zN+Q%YXbvl%-dL7SJbCV1{LUZ$X{_mac+b9QvqDVq1cnT+WLWLc?&3=HNw*#x*&W~W z#&3w0=uqu&pig2%d5`IUWs)Qx%@hx(C*cML@F%Jxxkx{;(j ziAzd$X>)|0CCST7^zT}W=Kh7a@2)%IXa4X16j!D$xIF^Cz;#dY@k@6hkQTDvv(sv9t8U)x(Xc?HOKc$q(pu{ofG^ zyt@Y6mpI7yJR2|DXAq+scu$^`A;?wMwtmJTbtv*J>=dw+&kp4?p?K$VfZKVMa1}v% z*T>69uFC$N45=T<4?UDRA$2^fEm^FwwYm_aqVw0i{;T49{@J$~zcukR{e<{jTl59U z9#ntn0mC*zB&x;!-!{ZpAf6_Qy5XUlT{K zzcwaz?Txl{#O%yM{Et8UKs~)!Vx5LJl!6;JIxw1Vv^+z-Ff=5hE z`JI)Nwoke@5960dr$sFv*)(Q^0fd20O%A7ZQrf zV5ca*e4i6ZeJcnQ$2w9`1qbn{DGL|S_g=t2{gpr8fPr>k*++#earFcMWuNIOv#7HR z;)+aW#mzg!E4~Al`#&Xj{jyu`=TTtge+e`E=)SDt8-c-p>as%$1M%1onuw;c%AnmZ9>!o=K`2?P zw2g#bwc)(`NpDT|bvW&6Noe^oJ=jVyjBU9540Iv#jJ)9^VaL!n#;f3v*QC0L50u;! z0CZy;p$g_|^5jP*0IaRFRhDorRC!W?>;SutnH$9u(2MpZ6UoNMhGSf9&#{K=rr>4Q zGj^!lsMca~c_k*NW@E7=nl$8q^oVf9#5{bE!GUJf#n7y{XWWF%eAsyhy|b~o7LzmQf|ce} z&jXpNB0C6p!nbHeV37zP_zsGF);}zvdh`LW_yfDQs`enqHDU4?FV!oxP%#uQ}+LJW9UAftK(#z7XGHD%#Zmaw@-gJEt%-*xXO`o_g~fRN zn|@TW@Wn=5M|PsvV0X!?+Q53Q3?GQC>u!%-cfUNUH@-ABBC6W%tvf|4D~ zm;t8x$ny)1*J?;6IKsIh`5qQNhh_68Hm9O*`m^z(Bi;Dw7u_J;cp*CNS^Gsb)lpS2 zEZ}InFT*$#KdN4(zsfu7rD6hr2j|6R$h|3y?C{$05+A%rE|Rq{bwZbQt=ZoM2o z-4)%LIDk}d@J;2H`V*g$qli_YTEPbyI$#3QQkU-Vm(Qwz&CKfZ`f1R zBF8QR1G{_B-^YFa1dk#?BGQ=+l{fDRelCLjA@qTOv#(gtJlj;ljqr|tEfP5%6XUXt zA{khv9_$>`jgFoZPeqyfu^6#mv|~3)coaQkN#cFoR#VTyGx{1@Z*4tXIrX`3f2g-o zxgUcnQMDFB8KAVN92t`!lbg@|j^Hbfohs$?8;X!%6`?@p^MVk#9Y8$Q*Sp?6A#Fp9 zBZga7`ThN)0*4H za&A#>ppy2>P}M{{0{M}AV;gUzpfLcO@R;>v`+t!w?zn9{e&J`oKNe@sM|*zS695>1 z+p5&%kf>ojxGQeC>r3L`y{{1=aL(?1ihd&Adx3%-*lPe^Fe(L$M!pSa1_t{q1ORT6xZ z!T5w$;M0QQ+WOh_Exsilw_dl;`pEwfiQeeOhzIe_7E`l&7 zeCIpyvJ^A0d0QT(rOu^cL@0BGvtSVatV)P+e0ZLnU$j!$w`Z>#{PeTU&CI(I4;&1l zqOmChobwKNXtZz-f4KumUIUFp;|jWoeoP90J~@#i;ejsD*Q*bO3p#z*<>zON^8XYxi=(tH7mV6`pqTql*zcQ5f#K}Abhn&9W%WUSqBU$nV1`p?_ zDo2qx$U3x?zQ7S74>xwDPu$r^noW2{Hqi?f}FRU89G8Zet{Jbb#PA| zlr@bUI)<1;LIWfR{#eb=tW2p&X6))_R1WIVqlaT`WZ35-li&Enh|fTC185*8kL=Vx z1auO}F9-d>eg43C>X8!v#vSpCcT$X|Nx8Ksa3x-^-s9Y3P>ffem5y)92mJJ6dFd+(kK*t^aES+tpsN|vFimZbT|@&bI)exW+ekmtVn!tR+&3lA-(cn-^?xW3E&tQ;Ez)O z@+s?_-%}TfCj=uu@Zz)P#1qhfe)ZLj+<_sno$0+b6F8FrMuxZtZX9yYh%SP#V;d>9 zQyeTR58yg!QpSQW2{fKbZrVsz-RY!wI4v}^O4^gw>3LSV?Puj>-YCM(`>W*%Sq|Wd z`%FXB3N!nW9|6rEkXH#fkE|nh0=_GqP#j+4L5>?cpl}9Y!XkN*e6S^ONe4l*4Fh_U zt|;FNE2SR6;YJq%7p1}1@Qb$*6@A$DzoL8I4K)U0P(S3$tBF+(>D*8oWQUHX8o$kW zr8^F15kIc-f2Yiw7QfAkI!sdxh-1 zcit&*at8oGBzS7US=Q)1CIFQy{1TT1-nab-(NN{*dub}&m*>uw)NnfNFKOUcJ6DAh zt5D+3$@I-{!BtTPl8OIByqo}_7)3`LtHIN$&OEM`Dju_JBuvPw=_mc>p!{6E${_{! zNnhn>{Z-^A7!1E)aAVhggq{Tlodft}wvmON$~=@-ap#yUrQ4ko0Dw9n@9_kH@A=AC z$9I3nH!DI<%*mJN$XD8X_;B*7Yz`~C(f8Ds0S2d3IppP4kh-y+l-_c=3POhq^rR=* zyY{jg^sBA-!ilHjsb^2ck^nj}--=H>`E*=d&!b?x!dq3N0Xo=U7q7JA9dG|v@%Eqm z$(T9zM11P6|0bqqCf&dolFz_^1r-4|$UHc}F~?pp^Vyk!NBtO7-Pn@vDp;{gd_xbJ zto@Qhu5hqBVx%8T5aC-owOU8URNty>S98F;+z}t6r{)$H;>pJ!iL+O?|Mx-g$gW^(p&wTQwt~TDCKyZGJ2JmtLb-NXk>2-(68(1R+QVL1>iOmOSke&Z_;S5VSX zCEjSsziY3o#)@dM#2~+P%DLyBj%S~G(ku8^TZ^%>%&Oc?G_ITToJ2@R(fFwJtbEgf zhJ4e}F}>@}m9z3!=*$}EtC<$i1mBT?epZ#@m(f_|%v&bl!jEjF4?N(NV*yqcpZC%-E zz2!7plF=Oe=(QZsA=s84;y5sNxZ$V*57gl+3m?ckJ|Ft5E2yrbawO1VT$#!%cuIRE{HPoz$iWAczbOGa6al{ov{X-_Kf zL~-GNomJ?p8kcM{@zMiTl{KfMLl%KWFA*1n1QyWO@QRNE7M z-y9fpUFid8Eq7vdr4o!f5`Uk;NAQWgV3IpF9{WMCNQv;-QJZ{2 zJuU0ve@|EvuX5Z)u2jUI-*jFKzU&zrilc}2##plfOdLdDPU znDV59Rrp<*osVAG;TPR-YrN^JULA86ry|rAtqEQ-rfKQOJ4fY_%FZt+wu6Z#h8^H zYYdLZ`HOQt9*bTMwlt>R^h@DGy4@ikM4W6I*X0AUaK)0Uvgp!aKn?)-zR7V$SiWjb zQpA@;C%BfD0ql3$73;&)L@DZ(fW9^_>77v~;LCwM zw*!iA(A{UJOD{w>akWL;RI-Mha=(o5jD2%|8+cN^HY6Ktjm=l%Ph~LS*!a=|>AJE@ z_%%#G;`mZ@GLuHTn>oOYo}+JveoU7u&K;2c0~7{I8L(lE4|VyLZ~W5u4?p{}@$lz9 z8`GDkqAq`Sqr-%Xq&+Y=OI@Ix=_(!Le2br1PzXW?IgS0KvQE$*TyZV@IORe69yWG* zl|>!6(l*P!F7U3r%r2Rq5FkC2d2W|QUg8^Y!hOD@O+w$IWK5};yjSW-o@qB?t4L#+ zRGO7Gak{b8?nvhj$Av4G;;;VtftXpuRu;rJ@8M^pf0Q;;$lMVar@{ZWU#S3%6LbIT z;I_PIjI6MOQ$>0rq`%P}eZ2$OpNBtTg9x5fHf6vU3e=mG!|8~W_~D0Fn|>qj;Jt?M zg05j;4JM$!wahOEB$=SK- zr8txBg~CP*$>$jx8*_i`-aQi@AK1eYk@A~V9q&H+)QJo6=;t33l!@m->BQXvvDuWp z8E8gx5V_fk!J+hPI9kHAlMcl`(~HG>#+~5#Malil)QV-6oe5Y#gIuzz8$PD)zDoaq z0{YLGP|z=!dZdTy%GWa5!z%a9SY-ve;=8+F=+!6X1*~ZcX>Z_5f_f%Zh&tPTVfr9> z+ywTRdlU-3AY~YiBBT!_I{}sTje&N)Q#WXb@fSHTbL*GJW^+VY)`MLoeB{Ntutz-^n*i1DzVN6Ru47$# za!5LKczrr1)-Ofh+#_*h;9`8|YhNVaWGa^DFGz-4iVdG7PN_R|K|g1J_JkdBE!t%} z=xRvW=O*L(S@))=m1db<`d(@TdWp-#)8dnj9`~UUi?~dbz^7YN8LRj-!Xp!XH&o}5 zUHgUm-O=qEilrs#u*G?Ohl_f>QoYsLjQNFDtjI>TR@P#Ao+G%H+|RnE2prXJNL1HV z{D0|Y>^2L&%;xeR(2*Dp_?z(+c2j^K*#Xw}27Xp~$k5n$E<`~#`Sk{qYZSLJC_l6& z+9P-9V(}tEqx`$=xmaJBiIK`utj%9kQLU&;FR?fj-&%}oodl0Ep&z=i0FA5lnDs*1 zUGH+C8Y#AE&b{eK9})FXKuas@?cjKlhb;bVtxPc(g(#%#1r2bZ1Z`kYxj{C}k{ivm*$>MH~zWXWCacL`>0% z2>`X)n3xj-SBt^Hees?@cp(1S55GGm8WGnX+-)V@!vp~-2M?YoKRY`@9w5<~L`fwf zGF*j|GU*5~K?=c9MKk{Pa}f|a!z+QZGAr_ta8Q;^yWgOYjo{+Dw`C+KKZr}2hD{FI zVr$7008l=CA`)AkG4NA{Oqq-XP0E?qz`|fgQw)ehcjsLb@rytIy>aE-bJ1Czm*S>z z2~#yCVC(>}Kfe6+Z;k4%n*~qPj02;1^|tb-TI9(iw6KGqm+4Fw&WG^()Dus{AOG>6 z#LxZwFZw+b0B~*?&|`M`AHL`J6i3P{4ncs4Z*GTc-1z}+L|1yH8})NJpDxc&TsvX_|)e=AqR6aY7z(r z_Ssq%Sec!=vZRP|GzJGpmA{;6;em#Gwx2|M@S(D(`&HUuEYsL-^Odw|Zht5|iE%5; zVicU@kp?u=3P9r=+>dG2*#UWNn9mw@{UlCsWuwu$n|kXV@@cv$GS3VucraY}A&{RZX~1s^)CJuP|MH6ZK_?mlIGp6c3KFh$ z@gsRxRiJ05seE{nt@_&HY0gJ$wU^bXq~V*6CwaG5)gX{=A`j#R7~GivZiQ>&A`gZG zI${tdD|8rASL&7yVs^ro_@&c}j!~XT%eOV@T-QN?vUpDe5E(FDRGvF~g*iWyHx(D| zU02;h(=xt3I4|BSW}*N5O7!6YSbzzf>y5}+2I@!f_LD_f{4(D{mbeGsJD&Znux;ie z$rp5|4AWOW0I({$`J~j($zS{uKjaJkI8zPyRYhI(tLA&k^K439I*n?mp9eaX7JUXE zVE<_(r@oRr%Aat)5_GR<)fJd&WDiL@^p~UQE8V18`?JoSk_{I0fXCC}ioSmBVAl!Yo-)?%(FU+a& zMQ2-0`a=99Uc_lL?!~V=WzRwQ#a93aWvAlvLV;5*JMEu?b!?FzNL3&oU-ejKmubSBAo>S}y z^z0V$VYOH69B&JE50ng_s!obAbVow|#E@ZOp87vkh|XW|Nzkkt73 zi;sUY9=m*2I;s|%qA~eY8}g&1H)xpri|_lsc-PPW`&c^uWIXiIkHv*^XH?fU4?J*G zQhDaT^&vJ!?{Z>~^j?8SceIJ$*Ed_%D^>ZC=neRkL2a(+m#waBwCD?uLCT(*8kTi& zw%~3_2hPqcd151zR8F5g7bng<8>gq2cT(GIG@#$;a8tHtKzU0()e!Cg2cbI73OzERJX(00iBnctYKrMh5OnVcvh znFi1tzF@_lZ2>-$xj?DWh5APxDqoIg;3JOlFP`z1xr$HcoB2X;wA(AX--?;bld-_y(EMy%K7Y;= zT{u!UZvjN@iKJ%A&aSV=v=FJ-=|7Zp!yoW z$Ul8V(&gxCp;hW%xGUl^DL_REUh)v!;#WGr@S~)PWC!(4@n}=g`Ojo@^c*`8sGvf| z1QUarsuL5qX-5q-2BW_ze~!tbvWr8qal^7vOu`u)9*P0!%d(FGzpuZ4K*h_a6z=-< zUMSe+54Rfyp5+pSN6w;|7#i{9d_wT+Z z)>QA2-TOV@YTn!sPQ-JYt@T)&TZ*di`1N1@74h=h?ugk7mqP|wtco5e=nO0bbRS@e z>QGv?7p{FI-gSbkHt5IpHnVXKP{mMlv}(!NG-2omwGbiLqP-&zn4%*N)xYOL2L z<5ge&lKB1q_TS^7FHFiGmaS-x#pJZ;(vq#NFnJ{P5`%!WSLi>0*HnsStx*?#pe^7C zH4Hkz2gp2KjroE@r&LCvrgAs&h%RBlQok`ufN3M;E0zYt_<-Wj&5rIoMYE{uuOy ze~HL~g_VVsn4X@FOY;3roP0JePfp4|pNq-ad0oYN0;eU^tL6d=k7dI;(BSEvZK(p_ zQyqR(A9N_Sm!1DIAlN13+0{=#_zT45Q%kGk&RHa{1s${O|8f0B{Vw7c9U< zJ%EQRewE>*2=N8!tfv&$Zj|C9~R`a6>$|amAO!l{cWP;>bqOJ~^;& zPwW<+hGZ%J)ero$c<-P6VLbHtM-`_MSC*th$&Ud)wN(%A9gBmqHMEU*av z_>e$;p3DlV^$k2gEAm9Pi0APN$)C&CGqCZVAM!vTKI!StEAw}Kq^HRTzNRmba-atj z1Cey*CbGhLc)Sre-g<3Zck^vAGBFYZLk$l!uWqbKN2+}IRkn#qp-hnQWOebh634~q z<=9voh%fz$Z;$1+@Y*+I7}wfMDuD^<4VfV!YGiLzCJx2e{jZAZZC@Uhp^51BHl%t5 zGkt%;k$0~U-@1RZPqHc9+bf-eZ%|kL>-s+2or&>{*=Wu`5@V~+#P@&Gm&HJJJ{BjR z6Ca3opgdzQ!UI2Sr7kJsC=3Gjwx9{Y_Ou8|Pkk?JbW&H3iZ72f;S_!e0!cnN&o$vn zzG2Upa9mR?y(hWo#xV>`;BBu-maFU%!T|D!e2;2enp%wI6(-=cVtQsFuFOowQb(mI zZAbjqS+96utN!Z;BuqF^{bWeHl5qOH039912JP&gVczLYUZsCU5D11mgaa88wAL+* zPh+pZ1LKN>zFu&jemTn36R}j05|{84=9EeLHS^&{)O*%qe4r9XM>gZ|U^i|&G!g77 z+TY|@Z_!UOwIMm=SaUGmi;s;k>OC^UqA=(S&!BI?yX%0yGoDosfGfx)iW-(QoXO*P zpSlv7c(SNIfdfhf3#Hx$fAn84ahLm5$s0S1fjjt``Ux!SRLh#=3OLviHFxBce&|=d z^vqAdm`{5uJr7r&{?r>E>aAApr$YvTWS|cR#|Tt4dgX|c*$&>@4!p0j4Frr=I9*o* z+_WRu5@awhD=k@WRS|6d37JBQKvl)0WPqW{mfM3;%6$^>J@Jvhelp(hmS2eR+E(n} zI~2Re$CRxMiBfge+J*-N8mTIzoB&Y5l7fBzT!LIsXT^+le*rEQDuc# zEsoGo|11MK6a!TVY9YWl6l?)>;@Gix=R4mOAO6V4WR&`2*Y0t}3!mM}Ip`n!%MVCF zH#}4RztDkGEjbbq(VtA3yAJ%0AIR%CSLaCFO_rK zjuTIvi2wQLe;$AIhyPQJTsH7c`b`Xv#o)+La5mxKAVWgJzZ#AV=(n1)&{Lm z#O4d|FZ$({>4j4%L1eoggcpc8N6z$>k#WO7`BE1;G3b!xzTL?INIDEU?T!aC88&uM zCZHds_`HzS{r@9K=oPsx;kOc0ZJ3*j0-uM9T|#Agv= z;GjvNsJQppkIEuvJsCy%&bHX9H#7xiXiAGzfer^8#nRd18~N}9$Gl@EDbF-={cS7D zMJc)#z%CkC|LVQZ2U4X>BZXcWsFGd;984ygY%)Nk_;hrYW%115J$T}oLB*Rh~tj&EUVT}!$@QjJa)!}e;l^VJL#8k6At5%72$fv04%FZ+RKt3 z;SU{yk2T5bE9`*M5{Ip&ylPDME7IrXcA@A@Rw#lu(^=1n3*};biblTbPW9oP`G!}bHGEqJ z(xdmc8@0*?c!0Ai`6I1iRQ(C;egR(+7c{vZ5KY0jI%H`g0 z>BqBerI6%%2rsz-NjO&63_48UcXWbhB;p{!N5XzTvV{{z1415RRWxM>aBm6MS-0GJ zi*co?snOhD>-7!*@DmdPkinth!5AAJkqqVvR|1W%Yi_tPmgZXV-uJ&RKJm!oem@9{ zP=@3URo;@F0F~%3bPDHN^{bKK;{ycqi(hzOJpTBjaqR3lCEJYqZoEa6*%I%}MO8Mn zF203-Sj7ddvSE|*cr}QZRG8U?dMDZy3VtQz$K*HRucVDBj*fj^NWLuf0QDi5CkVp3 zzkA}1exjcP1v_q8ZV6*?RsE8$q*xvDeK~`_^k1%qA|LT#mYw`S{M18wPZC|3oM@jx zKYS77)pZn1$l&Z^u5$XPCjcCYH-Fie#1Fjn+w`F~W{#hT_2pLVJGf8EOYnBqVuLMQ z#S<0TDG%l=C+h@d499yM2X*$k8PLq6JzPO6_;nGV(Ri#TwYv=?$A*D{pTNv z_kH}6(GjiNLJM0Z6OZ%IX^el}YhD+>^zPq`wJVq63!nT{Jo@?1i5K%oqDFB{3?2Av z`RTK96=(?aOZ?D4QKM9k_&4BymB!!(xdg`M;AnJKg+JkVxxF0A3(Ilt-1&Is^t18I zi4$@0;>DO+1^XraO1kA`XBd`@u>%o11=RZ4B}1~+$1XLaarnT#ICywpjE{}R-o1O{ z&O2Wedk-CmiG7EP)c?DHjw6%>iw4rcfWtsEx)9k~m)>L|2($X}z0fOclkZi)@RE|J z{{?RJPWrymx@04Qc;03Cjz1XPJt3cTS4>Py#J-8WF*-cz?fV(%7ov*#DKUCL za+I)3o>Tw>8a_wb@~5g{i&y~%|I5|@vnLoRUY8x5gjeFzpr#&O*(h)2FCP>;>tY;TocSuP>-O8v+^bhiIzkW_!eUBIko*T?rt)6MchF6}7tr!xY-5?l}$ z{_^&i%8*qNdSQ4o6nupzL;50bww%W=B%TNU=|GTOz;@9g>rNbCP%u7KC)-z*L5Bq9 z*zbjR1_;X?!g$7>`(AAn{1C(Fz<0q$`DII@z%Lz9l*d4>@$y9hUhuVmi4M);Mn4tc zRc7Fve8{h~{{{Z!pYxsi8n#0IvmHmA(mv>2K{~?;U7)*g2d^b;!s%ApaO0A6h27zv zav6xBPLu`qJ>jINnA4PcPr-Vk)Q9v7I8VJ391Npg4T3>=Fwxdq{;L?;ZceWRH-D^R?c0 z*K^gO=?lJr7x>NR1&$$$zA+IEZNxiPJ%JM<`5STdW31eLJM=FoUfK3kQCS}kU@N^M zZ^_Ri9fJ}~oI&rdiPuqqoY_x%X}ObU+OMjeM0#u|Ft8HHXEM{?QC(;sVQ2bks)zFP zvt+QpmPhA}NY4(Bjz}ku#mMM*uwxWE2lNf#tM+>l0sRAgYV(m#&0Csg=QT$HQn#w^ z$qVXnCckA;c(A|cdiKOpd)2nNB^^68Gwb%i(n_wn@2$&cRlfTU?2p}h#^fKgyruZk z@`8NRC0`d7X1s$U+mO#M@|*L~FW66Xd%GpyNA*U&d8QwT;{?E`eQWvjz|7hFgRJJ4 zzsy9w-TU@>ryf>F4yvyB8&i{WF*iLMlb5g9zhD5=dCt#CUy6^gNes}d&D&6#QQ>WP zbSMU!9EmrU{%@~jp)KANZ_};>cjzWoQXM|9C$5n%!OA0__o_BIJ2?#~-9TOX65XMC z(Z*zOGi8$FZJ@8@g<8s>()mlE&hRR2o?2rdmL#87rl;b!e)kV!P4yq%wI|xr^Yjg| z5^NniCmX)G)RwNT#9O}po8r2m3GcwOwzT9Ghyu1KFZCRRHqT2uSj~wXS!cq)1n68S zHunPx&z^{?U|(MF@qZJZ7j57b`WV=?XQ?%#Z%eUNZ^veBIqrJNE%B+3y+8iyBgbP^ zS@bqW;_~EDEXe7UwO7%|zS63V!s<8r4pxgkC19+vy|vmz(ir-OPot|qW)TOzQ@JY& z=kF3fbz`BQmAC6rt}1rDptmznoRr(`dKV$}w{G0#LwJ=ZacGCA1T+MBV^ax9Ii*v% zd87E`fqqdkPXG|?R|7C)Tx9NQ4g?pRwSX_q^!}Fh9tR7-g+EykmdJ{}J}^iX{0@BS_xee6Q42sMj}!VYLP>5)bGZuEVx z6?Li$#_}i=Pt+1Ek$Ln`*?zZ<5>Jec48*Qo z$anJqI#jxqV1HbAWHK_}SQi2x!C!7KsBHSg&@1%mp(mCXmZYOsy%jTXO;1kgc{v8q zyRuQsE&0@v!9Myol=qju<{0#%&udA1@dtnW-ng=`>NMLyyia*75VS3~ zEKu*vn|oI0Vppjbesg}`knA&t1FrPjlHU)$3j7h!Q`F4{7U43UbWc4tQisV%xlGO$ z?-7sL7qULdcBw#9zSz4${uN5x@LZKmcR!TzA>7M`d;4|<>?lWZ+&5J8f99k`kgcC^ zy1CU!eXn;%4)4`(SA5GieoK7Q>%K9*_ig_y-ut1C#)!&ntoz#o3FOJi+aYL!VGFFf<&8_K&OGH5`NTEqf&+>uW9f8;io?4*l8NF(1OC;F;Zv ze15swJH(?sjeM=P)}^y$FBO;mQ+!S82x`Di+Adz|r-07wQ~S49W!I|oxyoUJMiu(2 zDV&Kj;zvc7cqe&80yQyIws^OfMX!1%xZsa1Sqf;1jv#DG&+$F!1a7fEISA-&`z5<2 zy^sU)Hvh|3C9fq-;X%s@`dz`aH*cjJ^3zARlRTjwlJQOeqnpgbN<&-UE9L=8!8;A; zJ#opaD!kB_4vdx?ul!d*O2a#)P+W8q6C|XQg)TrEo@mAmH(e7)Zn!~hYWZ|ygE639D~#^9ZA2Q+RyUgyZFNc zyCWv{#n?SBkKWtAG^!H^quV>|dTee==ZgmTHsT=n3@Cv9a{q}VHcgC5LpuQi0x3F&&#xthBw;WIC&m?e0+wo8&<321*k`8f6IRh^hY?-ig8vP<#qXW^ZGOcEz-N_R0POdkz&vusGN zQbrOlJ_0~~Ep1Z9AlLzbwre>NLR=$Ur0j5Iti&?xHV_j7^uw3dx0GdMvghH-7asWf zhpK&*`*AW^k>teA0A(+wQ7V9fu`Ikea8yW@!RSytzybt3EE;4S-EXgeFRHC%LQ8=p zM_>jTc`6?f9ODRsdwYZ)&N>?u)AmNaIUWyv;&i<3P5&;MvQf>(X54V?wTjQcXLjh} z%$OJo7$4y0nXFRriE9;r6~iMI^f$i)XovDeQDN08L*`4!NdSVCBs9bYzh#xWV#pNI zsO@lCFa_KzUj&31_A_$}F(W66?Ox~%Vyl-jK1}9@tq^a5DIImvCP@%5u>r*>5YBY+!HT+?LQa8(>PL{^$BKVmS8&iiVv(* zh`K=}MQ|sR@X9PpCICG0xzEM7e9O1Q#Y@vMEXQ{jjo7u7ICSu6yyX5D#jpO_uNd}I zr_Q=@iDLu}s0;WbUpj%26}JB&xRE9NnQjq~c|Mcr|`P>EPQR^LwBZsb4Lw`s-(HAq*Y*8Y7p#wz{t~ACFs?y-VJJg%t2>^gl zkR29XQ$paO69zh%MxtLHdm%U_oaJ_JIK~KTNA+V%M^xIn@K;u?oi?S#{`P<9lQb!pgy=P^Cd67 zJDtgqgT1K(kp<-~(3p+6od%<-zyucpjvNE(B^`0{mgQu(JIk(*Cq&3uSW^QTd1fYF zYjxH6q1y<+fnLNI`J5f`1&$Li^B%M#q`=bcuI7Ls(`B7j`a?q0SLJ99=#U-44Ag=b zCipNDUcd=fR(H!eSNZ6|mKq^GMqphAQ!+^d&J*2mMvSf0w~42u3s>`(Wnb`#$9NB) zVPplqam=|#@D>6NPQgnCYz+9$_Y5p?J)y=2XNFQ9D>%GOs_@4*=n1~9 zTde<+euy6LfdS{v{lc~?`JVbfzx=Le;n@YCWGpN2xNw*?cYRbF*Ov}uX4=Dk=wH`a zq2x>B!^#SMj{G>KvK8OJvPB2J&?0&uGX>so=+!`SLsIw$2k3@@hTxE0b0@a&j2zPO z!h{lLFZL-8^ky;*vV=SiG#fEAz^X4+Wr+9WxK8Zd6CeG^$Ko$P_9>U&uWB~s)F=Fj zL+e}N5ncwDuFqzlJ2V0P#V@%xo;`Lv9y@VdSlEoaZ@ERNs>J--nL;qkb7%0lvci6#;m_Yo-Oqq$!?V*mg=kBfT;Nh6Ov8=|z*WY{6pc zI9t_Fe5INjhHZUW43uPf3Wfht($R)X=?n|^*gu1zKYFGEoN|#xa0+jgt1dioPDzjn z!-fng-ps;WtmzSYfGf+nvhP)LX@)a2SH3)c=&f(oi)zfBJQbVLJq()i>~kL_ zKeky)LAhpYcnio>7C+ONdV>Q2o%m4|1Qc7iV?=P1*yeWti9mM0c6urn7F)3{Sg**2 zKk&$7@$j)@YAnv87~}H%e2j?iR@*Ca$4xiKZ~d2FiG92F$%kvj!=HE{W~MHCXCZjR z+d5bA0djR*SGvVziqAXvW(pNKBIvLUReoTIL8F9sbxE+y&c$=5&&SEr&&9dZXM8U4 zQeivTstlQ8Yr8S@;Jh}$`PeYnOe9J3*r82R7@U8t&;1Q3SfAw72G#ZS+RZiCAif@Tc zL&MQQ$a!(49rbDx_)Yn&i_Wu?GtpjJj`ng+-kQ36#alctEy<5=wWGtNE>?UpNr^fO z)TSC#97V$U$<5|~2SaEKPv|;6H0}v*yr(YpIyT3A}A8He!e~Awci4Xqqc{gb%>B?%Cb@}g# zAbdz}i7Fgs=VFnUaE7iRZw3_Tq%bdwckvN>Hu0yFP;nBkf<+Ke50_0{0K91Iaz|i* z#CaEQe9KkzEBPtEXGng~dj^L59ashTW?^qjeO2Zsf2)k=eBcj-ac_uKi#|IiwOX_vgIEBB<*1Gh8J9p(wer#yRv=E#Wdsw zJqw4be*BDkiy^)Q*Mi>#Ww~WP!TBqx^C%{E^*YLcj`m_if{U-51|{qql^Migrdw0q`exXA@rBLsJmd;j$6=Md1JeQk+v5+ zNBe|lVaIf~ZZ}A}mVBmn5Uzxw^pAWG;FN=Ik^Lnv(^Xf_x`*$}iLbN=NQ*up;J1>c z+v2hx;NM4rs2w>seX=6B#iWFislO@gcn7Yr^lF^By9 zUfJ=EiNmxv@THlML3!41Dyzg7-LqM5>ZLD&g*IzdHoB)@IA@?urZIO_wPzh`|8T4=on`wfVB;FXj#{H&hbyXw6wJ1odp(Vm)#CyGCvbq7Z&DY zvAv|WW$F*M4~71$4r0Q_8Ux_+G0^GQD6T^zBQZ8I9(#80i^2LxG~_3fmcdGnU%PnW zl8+0Z+|_o=N4Igjm*rETLZ_qZ0j-)t@r0-ivTwL}7M96&H~a&Sic!Q115M#rl$e^GivRpy z{(Gznx6OeO`6mKUIIvu(tykfEX2EjH(WCLzulVxlT@wE07lq%o7*HKH*sC4BF8yAZ zYdgR*1i^*S7Y=LcKSkZMSO@prK-myu(WPM?{LWx>pj29!q`9>7uC)~mjluq~aWFXGXh0Km?n_%oCVeHr}c zUWKW2!O|lgE7&}tQvAl?asnp!D?28Tfrl+@8Fk}jQ7`m4-xPocZCCaI=L9n9nK()P ztzT)6tpN1Q)%j(f`K+*Qx64qxCxG+k(c}4#CWG`1F!8f~DblA7LGS$DY*TSz6Jfp$tU6> z@f3QF^d6v*T;Z3aC*Uh4KI~GvnjKZ@%|TB>!VbDWR%m8o0Xv|8XY|M9l}qvDQ%}X? zk3AZ*GgH>h;NXb#z}V1myzus$;>UmDU&mvQeId@CJ|}sV@8k(|)I9YU^aD=drM~5v z=lM-B^^QDQSmRxHSV0QULj>6h2GYtvKBR*-)S0wgnaDsMx#}|Q6!mhyKK-joOI$*^ zQ zS(W~WbJ>y{_hEaGPZ8Kvj$l7fcu8NO&{ueRai}Y(MrCC?120Dd)B3M zg(LGv@}*ZVyS<}89KlJO?ZU+=!K>!9+J&U0dbU$Id`fS+=$-LHzO>&JsYl=Yt4tme ztddJsMOS-eBR%;s`_bt-1=aM4q2rm5!uW-Cn((~7wQ3wwoVPDmESFc z^B}WAWmEA?+pGKnUGM|5ZI}AX6Up@5G}Su-@mzQ61cL*a@6(8bd0VzH)Gs17DMKZQ*(-zTsQHKb}3lBKeXp2+dR{R-Q8C)x&j?ei0s^9rDDu5SS#0qS4o$j)|V7 zm{>U;)pMVUZ~N+d;`XD1F?Zn^(QHaQAU{WZkKlnv>j?8^S=_DHv@9otY{`y?;yGIjpSxp?gO z$#~?^r(6wJG<;OH_3y7YWW&S<;!k$w!gr_tt6CE-#TTZf z@FL8)F6_PvpC}{O8Ghxu#gQb^ryKHfIHrd<^yhF*J_IkS2NM}o|Ehc|-seKb#DVh> zA(CI|lb8Qk*;eC+P;#83R__eE1a{>s9+s8{8ybrwbA zj_QuwsT}EU<(_h+JKI6w-F5^4EJF5oAh*fq)Xj^36nHL42t9!jImlmOV`wPwT@iW5 z_%`Ed;L*DP2#;;~_1v$rFa$j%Su5p^pZx(nv;99i1p#Y+t$-PrgcHSkxbpCa-}q2( zU-f=hND(ct0j6c_It(@Tf$iZ zDRTz&=&%F}j367MvojXsTB)vMA&N2KsZ z<5^X__wK>?s`)>OB*lKlL-wn~W!qyoo zLj@{7F*gIjs$V(=0Fr`)4Wl}A1U>lRgYgw#`8sc>fB4WL$@5m6Ju?|Ey!XC%`;Yuk zyy=_2$+$gv;#4fE@rcvRU>^cZUUZJ*SL7ghuF$42PCzx7UN|r+9=v(Ali+|qXn-%b zmh`#D0|Cg$g)?X3H~!OqjKBTczjhwXs^2xXH^wGLl~d0GuL>ueFU~+Pd38~k zLL4yQP|HNB^i)7}@|FgWLU5`@k^C-vv1*BWF__29VkWFWFFIyWgjac_;x^Gx5p8dF zcUJ%8ksQ>SS)?+sq8DevcruXECm)HU#h)cE(b<&0?Xc@Rat{1iUSk{I2q*l{peyf) z1HK)1Q@U9^P9P3tuwrF?ekO5R@Ch;njLaV9nL#eL<17a#bE{I-rEpBq&%lYU!>pE* zB(fn9ax8Er^nSfa8$ue993x4hKyGdKs|@PmL#o8(aN^1AX+S=I~K2GQAcpdkeVsX=Et z2p_tb4#^w3XArWSSfjd8CxU0O>Nz`OnM_ek*5aM`mv}14nyy#$;MrRXs*G*8Oj_wo zJ)>o)EA{3)C(d!UE<8dyY9LzW+tcNp~oL{9(Ubz zlN{JyZ|lvVEA+HKAw05Tmn~tbviYKPY81FF8;xZEh75_H3}A+a9ifab8Xyz;m5nRE z)75;4U!*{p85A!S#dgU2ufoG~zEfVMKcmm&kt-52T!l*kJt(}wD8vO)=VLgp25FGY zr{D`&3ST)tw=4XR7P_-T1OZDsx3K6ATdGhNQ&mKoZRmho4;+qfe9`^!j_>_$7%b*b zpXNZx7#$f>M9M@DD-k#r^Sey=zR)qiX`WPGcDw;XPbtSGzbhl@XBsTxJUP@W(UM+Z z(ojXb$Lg{VKJfSPvCn@#&Q8z7W%;Hfo&?t88MQ}_?v20t(BDVwxi+RA{%m~u!M}}} zg=O;z9%a88l_O)LYBX`=l6X}OCOV3|MVa7e2|ttb^YUds8)wg5@;SrjE}V}O$46- zwkHtN0E1@WT(%0@B;R2tRKDatX(#3r`3j~M6VuC1geWh zUbR0u;%#@a-~HlW&Ra&GG@4`LDe17j+{K5!2K%+*8QZKz`cqOYMx~6s@FXp(`dDU_ z7P3(KUNiRg;dDRCJJ7`@01s8Zy5j|!ashCSEwLN$f=$=2hToJ z(fJ1ckVzWWbVf4p&#nQi2=JgL`I20rgu`?Y%sqU-=ah_7ZaI0>) zU&(3e!y-RmA}@ukx6=E&Upef2zM7XW3QAk(;GO!A{C%woFDj5S(o`1uPVcN6;75fV zK-c@i-ak);qFc5@fqPwTPs{vNws-qdA=m}(xnh&(>?l`H5f6XeyUtL)h*#DL#N=&$ zOFkDLkwzb&LAUV3w%j2CZBiaZM5V)jqdOFM^iJiJd0)L=EpG?-3CjjFOrNoQw|!M! zrmy9;z(?`k1ceU3Z|Ny`TX?gNsxrtk^F_9yS=Pz#l!wbVO`u1{6QoM7LKUnFT(10O z^CEG@i}JT9o??*g$!8CYtER{s{K;x1^rG>heEH!sb6#fpdsltRw%YG& z#ftdkDF&c~5BWvFTlzMZi;{;`PFbhoMR6~>6GiPum#gv<23}?ycpDn=DhK}wMS4Ax zuLlgAUFJ{SGUT&qMPA}-%2Z3wHkq=(yZfvah?D+f$^vqPKJ8T<`NjaK;m~*cVG6|K zXa#(ASy-uB2500iE1%qcTC$?{6#07fjM@O~-bWjBP4Q^YG2xt@1U9g3(0m=gROQH~ zpj-MIMgJC%iO%%_!6jOAJk>yRm?RJ&Bw+{%AJ>G@HXdYKvJEyKBw2H- zrJif9I(6z))&2keYwuIl!rU+Sd%oR$>V4n+?!ETfYuam%Z_FJ!qBerh40Plxj`Z^^@Gdzdh z8}!kICqI0ZmE}!;LV)9?FI>FjqsNz)`0lFOy;T+v@ICTgbhmObwrz_oQdPdPeez4*SH$a>B+HZv(s8hnDE4}LDy(IN1HA4YkPNZ zXQVhTzb9pd6Tqnd+Ui#P?9csttZi(@(YZsCSMgEp-{!_nwA8lXBh0Bi|MJ(qIu3T` zW9!|uto0m zUQ)m9{+N72;osX?iK{CY;_Qo0#=+Ts{Imb#m*OdrYjXZzJaSIF6t6kMfI)rAARphL zX1A9l*tkMlF92Y{8SRerkMfb-ieI)_*dzvh1rJ;V2Vcy%qqD^?AJliC=Ow5vgh8y^ z+hQ?ETl3d|!dF{>eZQ`M0hDTOGt}`vN4xAoXd4d6Pg$&hjdAvRVnM~%^rs$Ke?qo- zwXE!?lNKHkcDLmhvNLdNEha|$@%`Wbz43F&hx;?=n6^qDyK_>=LM?|G-< zHe+>7y#?t@6P=OG;D;T9OFnr`ci`dvFYT0QkbX*2KBD~DPM)mSkZl?pn}|8e){jNP z*ah&#W-?x`+Qst9O02DIh*eBNaFXdzv;}Ma-~r>awy+dG_7guDPd;_gwB~$@Gq;?K zW9qAX=ik38Za#e~KK}8K$I`;Jn4FTn3$7<8OP_)xj=doOAHTMl!dX-{Picp}09s{o zzbU#{?iGiCcp-y>^jPg8d_W%DmLVrfH%1>!qCy!QcTKR4DL?HT>7XH@`mKH0PCuhP zdYA77gf_zcfRR|`JJ)J+*bo99^a&VGMb;({OvLilrTE|n|0>?~;ZJ5>0zM@2sz;CA zpaV1zm?Gh18JDr5J@PJkwQaF0HskrU zk%7wgX*BCg9#>>So^nkW(L}1Ex(S>j5i9D@5dQdf@Sm`~iCp0y?Zm~4i)vqYy#PQ$ zY(1kq`k`!gv0owB#4)CZ+*a63AhsIJxNYB_K=2)b(nB>M@goZlGHKO5~*U&CcFVk-;7XSc{W5=Nxe1yiN_nZrDtkRQ?(|p{%E6`4jZ~}$o z(ezh7(z;v~(w8r=J+x_GYJ8#*w>{&uY|AZi@Z{l`J31d%t}gi7^JB7GEl%PR8R+lX zKSbOm8~C3Ko*SEs@A|$Uk3akKzxH?!le?5Xs`6x!Bm?4Qt0Ui~6RkU59CI&xZH%0} zBX$nl7+v|{ySuWNl8F)V1HDlTBOFsUvz-us1((xiN7paM#P(tw?w*f$7d#XDlm&iY~FKwHG+cNQsf7THN)4aSlUGVGF55g{5x5O*@bv}w; zcrbaNIyEYPup1+j2j!C^U1SvFq$S`exkcWqUxnQBDF-jo zE&OF+5&ULY#{pqde(Hz+LdbzTj!cjO=PCiK5*wv(f9e{_`9~yi*5BUU!=*TnkPb}hsZY+95 zmawa&p)IoysB$?WZFO}m*4NbDh>p;*?CO`j$gy+UpVpX4RXooP`Q2WWQ#LFN4~^VA z^uBj|**(qX=$#~D0f4Uv+^CR1sMSDGLD??7!vKJ^>tG;5>X4fOex`~T1u9KrLnOuz zL#YBqm%Rl5iZ900MnVoM-#FV*gE}mw!5HkQaTpqINq}4KbUps~a{S~^zde5U51$a+ zh*v!822Tt_1Uj-zLLnm+9|sGb1jqwD!V8C0gvDX-?gv)HC?GTXtKYu73gGQJ=(-;+ zb@YlM#8gBFrfBP6x|lXaK$H%rkcmW;fi@cAEG(_Y!os>bnaLo&f9>6ELeqJQafFU; zI8X!$1dRaS^=!9y-64A3bEo2$f9ZSU)A#+2jMIXIWK@pYx;mh@%0Zfl&;Qb|kMRR9 zicsfqR}J>|2*<7i6kNqm^Fo?i3?=meZzqt^VWl}8>-W6pJ@KYDepQT5cH+pvc^?(h zRYQ38*4yG&-u||D)^na2E9!Kff9wgb%4dZrB%{sNdM;@vYzSEsm!;^;>le*v=m~BwDP;+UB|qA`KIhrUYb! zGFWBLcRpZo9q6tDp}Z&XO&(}X#~bCVWI-Re_YZy&=}@oYzyGQcM{ktI-)oaTwc0Ir zB5)MptCdxJCOr-eG@#*Ta0htEQC~)gz#uD~NjD6}HzyErX6rMC7Dp1ffsfRU2|*_3 zn<~>o3Ci!|M7D$*ZHw}v`;tVL1Ha%ExCklJigE~+k2>&HJ7AI^WIp9aL3x2^-R4n$ zahCjjD-$i9b?8spgrVE2H$~NksSgb;opry<>yBmYOVU+!M4E()d8@p(6_ST4tClzY z@nQjPcJ-6sk;i&2c%&uT4GW~-9V^F~ChfO-$}1V=J@NtHuns&|9?{WrTFKe^%Cg~O z+lfQ2{I0-Co98c31n=O33kVJLrZ>9cF7I5pmCqe|1Z(cgyD@|E|f$hj+ zqzFRY>DhHdUD-wMT|0&svC+roF2%YUPk;7ic+3uUHj260P|P>1tZvr1)*XGIZv3d% z@DATf6CQ&_9mykq%S$>rOs7dOSRCYmg+dR=fDF;$;#zkPgM*!C7L18e{K)4;ZF0zR zcWagpPTSi_YmT#prpQ`XW#eQqF^2s>9^C%IO9uR*5m&w4Vql77bl`-yCpCm0GM)~n z(wTO$*WwRlK*tP9I}HCa0*I!Q1lJS@0yrf#j*YGQET&{teFz zAVL=g9)}xaarW#P$@8ig0MHMAuG%%ecYf{K72%jTGs)=1F5)LPgGGc{uOcJR5&Ck! zFYG-Jh-Vz|d-4^^k6A45t4%CU|DYpIhh2)y&PBK3(u@5i?UiE=I9!K|$^&-Z3-(Bs zOSmqb?Itu69s)&C1Dr}AJ_iITE!zS3#}!|$kFC)?3-4rkEEjZq7nYU=3jhpOzfs{L zE9&9EXPh`4fA7UFjPHNTH|fPlEIsmQ49S<6oao38qzrH(r|7TF8cw%q?rXM}lskgV z7hMKfckq>m0Sfa>?IAk}#YwOHXSF|)gN<%Y%D)i2#}}94zrXuk@!q@dwjTEMFYVMu z%*-E%AN+wIjbq0T#MMhz-ll=>e?S=fKTpWp=Qoi=6k7l(lv#V5y z%ViN6tDmKM=@DB&8dbwGNxfODFocvTLAAGDxT)Z}%LpL{)z!FX-F?Pe^=mrHC24NfIy!suli%cubkLo0po>GngMiL5 z(1mWoM8}h-<}Lho9SZ#_oLW}xKPSwDJ4mYF^1;bxw25L15s`UZ{MA7J7lO5*(+nlm0j%<+RpI&(<^LQF$q~;6t3= zE1HRy5%~@9p79A^T_4g?F2(5HGE(Sno?tO^VG}+XJmkH&n(rtV*`WTiBIqyWdGJT= zVD11<8@;oY^e2zoG=;2#GAJ4AD63b3 z^4kE27evB8!zNJ%%40l7n1~eVn^7U-$tH$@YydX>d*Xzt>cpU3DGvPfOuNE+>bpB4 z{S_?RROqW6E7*d& za>awn_(onlDOt9iLaZb=J++~36X;8VzxYqzhRKde_00}UHG>lYpv&a+RLsoH$H7Af zm`5ub(|ZDzlb$;l1Xqr1|LxBlE))$fToqP(MwnZY0VKlCqp!uPgQ zr{c@L^tEyA+~pYVtw%$AW&DOZhWfwAI=blRK~S7!zUYerY50@&ko4k@{cy>u#EYwC z&^Cs)9R6j%hN0o?DisxDVSOo%-_edUXFBmSKlv}>yt*z^2j}9U$CqNm@|k+V#4WN= z7wY#)droLU$FcxGx8_&1GseRy(H6nN2e+Ow{<>EukB4$`ovdYh0U4Bkk#$a;O1L%f zj{N8rf&BHJP@n&<_X@HLLIx>M`r=?J*#7xWxcVk}M0tD9z=O_up_gg@_Ue|lEGH8A zWxUeMhh$;sj(ngAiP-ml&v(U}-}L%;;DHCCA$;)n;1$jIk@9_$Qxfab+(SPvA2Is~$K4;SK_&4;% z9@DO{L-|kt=$-NY4}H>~zY`^6tUWILHq0mbW1hHGpa^7_@{uvujkd4g#k1|K@yP{J z=Ap+na{~qK5qN+verT-=pepXzM>eov=dzyi?NyFHtpo8Ch-U(o_7XVIdPKCZ=~?b> z+n}FMyi@_sgWBlcsDrnzZ2m!Ip))5&;$EN=-h8Dr>HFaUaJos;?U;L)J$h1%*VERo zxxPMm5Jgmb!UPyHO&;WeEAl(0zAIxrt~d5UPFws z=E(UgZbd$nk2Gv>yLkDk`tv=1u99&&J`+j3F_=5zJ@??}F=^RfZ-x*|&!@@16Pg45 zF$zL}=8PZG?lXTd+F*iTbyZ&AG47V9=X5*{+n3zI2g{3S#ub?UjHY}8aE8yc2>E$E z_4Cyx|9@Km;JPZ{&k;wx!CM4Ln8Fu6Knr{Te!%wTMZx5rfQ*w3`s25Vt4@PoXt?5| zPgYf1l_%lVHca$WJY{a}Y|00dOsKJ&J2W0Q-Fjo(eCy3IcX&SLkIcul#pPI$J?W@l zKh~a9UdAgi18KKLX+tG|D{JD>)Li_~kG(a1_qYF2iXfWoz%v$MjCe5z zqqn>u4&C{hXr6gq^ruh9s%#E&-x59}BMfXKm&jDgm_%B%%dr#YJBBwdN2|9OCw8tz z zh`)?&>B>n^%%3u+y4`Qal6ZqMk-$U_hl9?+@YjI6}RUY{-JB*yGET1SMeX;Bq4P8Zx!KozB zN%Cvk!8jcQE%XPZ4&)#o3zVEkG_akj{RIW|o_G16y!e&FIi7{6psVOsrH$oB4B;L- zhmGD5|Ho8*TfFAn*TbEDTz>d(;{JR7D2^W3iXZ#FuZhvsC!)1`ImU-ply*hkr&Bo))zD(rubF-Hc2G0j>pI0(-bwN-Bhg=XPm`n zUYlOM98cxs1N;gWh_fJkov|sw!#3NVNKR^d)Wl!*Rlc8i!6yp6Cyy092`A7$%~6hq12`ENBj66laFf;F=Ls4mU^*f&S5u5KOcZ$PW`U zz-SDxjHu#>XOd_Kc>0a(Hg(Sg5rZlcUY`L&JE{*%NViemWMe zt;z_dk;7l6k_0zUieqx7u8^TTR^4-yjlaL$ug(KRuL?v+Cgg-Lb;$Z=rx+!K*@{)1 zf4Z;FtjvNA1fHE%0%S%Cr%g4KFm`2iBbKF*`TZy{L}{ucP9Cq^g=R`zJH~YZ0Kzcd z#LgX&m+=KlNRdw%#kqjlH*q=Sm6ub zy~2^ClZ|8F7Xo-0!mNEG6wQau^Tgp!X4R$w(8JZ1GLDbOVvEOdjMtcFG0 zkeT!g1Vy;{tsFnsrJv3ttA+GOywz1aY|uehVA2g- z$W44YjmVnt(>wCu98@`m#@GoO7&=VZ0RvZ@85OOHvpX0Z$|v2! z{7@PmEB^>p7AYImz=6USr*S1O3kb+J0&PdfFlwG?_o_)cUIhlnJZ^1nS?~EY1CzgW z=BNX<2O6jSYf2rm&g}40HmBi;dE{_ZY5GHwWy4i|;n3+!q|H;AeAfvYfde>`7uw-` z)m5lAz5?9T72^!8y;D+k=QuxjgPvf6$q!!CfsQ0s*H^GvaY`JMSm`_=ZP_ zG8u0@mW}sS4h5mVz2UrdKv&|O@k~~6l><$B213&gWP^{SZ*Hv1p;?ty;9O>X>jDc; zh^XEyvO$c6wd>D!H@hl(jic*C|5mf(2Hc%~$tSx)u*V*FlG6kX@bl*|Bs0hb^;dMl zmzML~Q~mhvGBkwr(2IQ8ABjf|niFLg}MQu0t$Ud~^o-6(Yb>yDKGcHr|23)G*^s0MP^=XZb z#zbe_x?{Z=K~EWM&w(18AQp5y{MdQ#{uk(&pra(5IDWM)C$=tDay^9~JVb9fzOW~I zKcNl-1IxGEazY#>Et(+1iGakQz&0|KyUeawWtAi=KwWdz}>f%zYEL@KDrE78Z>iJl_c3JdYu#b%{vr`}Y&{WwS`B$9C zgWmGpTuzH(#dcFZ7pDR>R1T*)aXgzH3J8nQkzQM#WE*iXXwTUdz~`^P%RU2sg8VIH zojAmb!3ZWVcJUo($6I5b_@~0&DWtUc84N6v2mcIPPMhREL$d!ugWWx(QG&D`yhj#! z#%_@lItw>`0~@4Yo&-RA>L7jqgPNR><0@jaWar#|ZnL131pr-j=raV~;AH(4# zI^gu!(@dOphfJAaa2m=!`9BLLfEF-ZrlDjTjB>u@>g^R{VwSZ*SZ*k!Dq_1-m1-I=aT7B>V&X7f3jp-KmP6qFZ^xAzf@xOd!~Ofb&GdVC zOBEcdKA^ibUthXRAEYJ!8(*fL0KiFx?9R^eO;Ke5AN_8&^}DmQfs zTy-H?1?L2mAvqlA8&@SnUXTqbyM2NJb8kQ|abse=2G_ULp~_edgF2L})IP5TF8*24 zP0!F7Smjp(^y5c>7vMy4-z#r6RmC6RUhh~0%_J;-v?MLMTZ|`&|9bXBdzbceH#TT1 z6wgi|+gat|2>!}bWP^p_P5gTK^wZO`apcAu`;}+@v^GOEm(YD(2wRMh7@A~9{tCtt! z;>C;pE;umxjE*~X@YO_Rz#OV1-#uVUG!30Xb7VXwrrR+!IU`?cHYR2#V`gS9X67bi z_P}gRO?UJ>6;m8ZF5B2$+lqhluYS>s8xG0uVyBk}oHr!f@_{DXo%q6+zbbAzb}TN* z7i$WqG4%;pNQDe~0h9%MUnqJ2Ryp1$e5)}i0r`^%SiVdTF(G9w<%{|f&oh@Qi_nB! zYRj9eaqxznc=q#V;%EQqTjPPt+cAD%Iv%~S7G3zJoTRn26f72!GciY-Q;fqd-;>z$xHOI`+f;8W38Y!u_J_!g$wzVfmSE_$ecEZ*gWD0PgZ z-Q12J{NC@3H-7n-#AA;>Du0MRa@XU4IXNa>lf$}y+{+9Q{JO)OQ*qgNLpj(w>!<={ z5=Z}sfq!-oV*7pUzxu3y^@;o9!N;z|2k!Z6^<8)3`$x)69Wrcf zEXAmN`O8mT@Yv`N|L_mTpZ)1O~wTsk`PhRm^v^DwWoR(PaJ^A@f&&g#wcfeyZld&#NjG&K7|Cez)`T|>w&k6Rg{px>;^9$W*3CEt=!B$7U zqUc0h&ih;lEt!)pz&ol!S$1t{UlniJ@;C@%uU*lL9iTaGL`dC2HzkKGEK`0KqEl}| zl}%u9>WU;o_i2}y5N6zjv2s5P7rv#A!T}Z>;R&D7qOW4Q6#ImXP&S{aMgQO-W16O^ z((*ha9|``Vr0&D0^Wn*T(;U3;2f&|6OW82#H+;6Pl2-bbH-xL&=g}x#8NgHa)WO4@HvEa)62espUwb}j@n*!b^ zIzBQixe|}?A34&y(;PQq&;y!M7py(_stT?z^`=E34K%U8s-jq&0G|oQjd<*EF}#Ke zA@>E#B>XE#d)!z7Ip|BFppO^EiWZ)1q>>6?#UcRuAHbwsbf)U-uw>ZtAiI)l#!-z2 za+hQAFu1S2&lX3rORuKpT5;yK({bkPZ83XjHVzy<7)xt>3PScowvNw&BCDM6up7?m&F~1Xc zz4lqLx%e50;e{C8T^7CM&yyejh(-*GLVMsKJ*E=^qdF;ucgP>}n%Eg+e6yVLROd@M z?FWB$dz*1@*{SBF{Dtw@Y#fN)=AroL{TJgWfBrwkg(ogYOdN~ned(Lxna_KvB(f1x zvj@c~wJEzz+cHk7T9OZVd0`Q^Ek!yd60i} z##NJi_`P)9<3tMRFXOAo3$m2sr-YOr2lFv)bm}bfiOdW7uyR48LB6yD0_e1i4G?Qz%Zo*%>AC!)P|F~(y#daKvuS9ZOz z*o$stPx-_Y-{CiptdF9%%40(Wx`)j9r?WV3<8+m2(HFI^q^W3WB4f@gkaL*hRz6)(Oy2rv|bb>iF)N*8WhWmRI!LTuXVeU>ZcOG#WZ9cOSP-fqQibVen}ivEJv zoQz-m`R|GMzyE!)vCcQ|8*%`LV`lb@n14K8^rd$R=wWq;gw2iwb(BF9)qro7=%3*N zvFJ|LL+|00Xm$Cb41`krv$wuAe(4u~*+=ZNx{d_^7>5_U;05u=fBZ-OhVGM3ocDL+ zR@FI%1`J>k(s4p*w+9OVI28d0ert0RGFrJX76>(yHqZz>Ja8?{4#Cl=7>d40eMD zfp0^>cM(91R<0st5|pb4{$!3B#D;IiK@B_bltIrl7CCWm*_OcNx5{8>PP!q97zFBR z*#sZU0INBfgrwt~&MZC{9VG%QEscW;01k3cb>23k)v-f@D`@)4A!T(x<+NMuF324Q zbU5gAdJseTNbB8qW4VG9{m5PS&^Qekw#KVW=%FY;}Yrtlw!DET8kB1hJ8_{m?c$VpL1(yJk%fy-5^l%q7_m&>qItvH?? zbmcuig6&4NTYEwNi}EQy3>Qr3>LXbFEc}3noB*3LR8|f=mmpp^4fU$<5n|GVyKyXe zrQjT&V+Te~4&QELP>lg3WCdR2?s9C8bW-n;S?i_rVOVi2)Y($!9hz_T)j?I~&x>}X zL&!d|u)4hBJnP-u34^{HzIBMBH&q^;NFI8BJhFPl;eb5V)2r{K`FYEH}wM< z$w_PEg0>JDVo?C>NZW=OV0)^~OQ#sx10Psh3>eZDs1X1rwug=^SW&0|JO&F}%?Z#9KHot)=ngI{HbU012LwBXDv!k;Qqdv`^Vaic+S7Wl3pk2M(tPCk8lP zKxbWLQcWEg4{oM!GTMNSSx0EaCxaH4BwSrH9Jow5Ez-)qaBu#reA2T3pyEh8oh1fZ zd4KxM&7$#ey!XTRs$<6jfU_PjxqSJe#2S7%n?lx|JS_7_d-0pJPY?DB zumh&4(wL?NR`D11LMdGI$&ikr6az#`T*YiJ)N;8{Z=UyG$uSt5FS>0!YT@YrKKT1f zsPPFWVbC!m-k$)_1NYc!jqka7HSxB5F6q%Q13;pe+dH*$(8fIXyYy$>amR)$;EP#INuwY7 zgM8DLSg+-O`ABNO=#K~W#H)tju#kdpPVdTI?`(IYx4axnqTRN?GrSqSwY9Y8YwMms z*us|5u@rp#C-^@#JszDVC+xIiZfaWo0N-m)8;S48XLpRd(yNk|J5DdE{ej%cqafT= z1IupO2k{$wpx1bI#33@|wY=CjREL;dlLBqJ>KVH{Ccm^L8^WXu$3Qb-gL0FDcFmn# z(6MshH$xM_0fx$To2oJ>({L$I?~zG-JmPA+5OMunUp;%bhTuYT>$QrY4Ig0vD13q6 zsy7P&=%jl9jW*GX0B9SHs(Fgu59&(gMGKdsdz(1FPd}bO-JWjxj3dDIg{)7)2PcPW z^C0H_P+;qYFCH8$_gQYC13`45q0xvTqO#Ml;6lyv4-AjF_B79+S z4o8*Se-Ym~Db4A{mrR#^MEbb{`>po_8WD#s)O+Yp*`!evC&>^Jzao3&1tvJj2G+Qz zu~n(jn@neUCLe(^iVd!DPD2sMFYigO$GQUoA0b1z7CCXb%8NBJEcmDE;K;>c2B=k* z(z_iP;IMa3K6Z&#T)?dI5&a;#zN@~9^8f-=p>@&WOs9AH;a|`%VQ0mz{6OsFuKHcp zSM-oN7q|p?Bo8ZfO0fKX-vmUpyVYOJ{Q2G!U^a}|rfp+EEo~4JxEtyN(02Ben3JdYekv2Z zw80(u2rMY;OijeWgNOAz<$n9*+*BSt%BKg~6VXsRL#_JwpT5flzt|#d5cbH=szXjF z;DhOYEc@5oZ%TgF*My&Z{8jliUeK_?QRGYhBog>DCWoA*9`O0bT+Gco0u%Us#&U`e zMLdfimll>|ZFxz3E%`x8zpCp76MsUL@fi3vLOZm@LY7^(`_R#F^y|q-alc-UIJk;* z=|{?cq+c`M(ogLzJ0L&v;C=1}A3QKGb3Pp7qhqnT)Qu70{=7S$9WQ#$3)JsgiBC^SWmkW>F8%g`$WX=o>~@5oax zmb%L^du%IS{JBTs7k~a2<71DnMq_3o9=o^^J<$W-5gUbE*)GsFsEyC@BS1o{JT{uK z!#cFU;4?TS{@MO`l4dv;gvcAWxwI}B-@W#4FkWjTkoS9G?>XUTsQOv^gM><#{b7H) zYMZW4Q{b0(^?46gA*SSQFJR}g;-qt3RSf!o4Xw5epWHB{m-g*@>ep_r#;*KPj&c66 zAN`?t^{ZbV=N@@DHrLp&Am3Ab0w%cP;{gnR&5Ndr{Q!QggO6ypVNAabJ>V6S6F%k} z`yqde@vSS@mSS;rBOZJ5LVW0>55-fLuS&@=_Jx8y&ZH9ivJFk)FN<&g7%^h(*JGKI@7k|X zZA??@=y3r3up>;m+xIH=drzja4FNv+Q&!L!#tzYi^!w{69eZ+?_j^C$#Ba+(zN5eI zD{>&v*d@jnJ%*$H04Rb|l{MC;u}({qcuIM<(Y+-F_fz^t)Khg_i>? z=x;kHIpTyw%0f3sr02x(`8WI~>=r09=Ix#N%EPB;D31jioD4B4y{BJ7|B1ZtizDN$ z6N2GWU{#4tXd!%QtFYhoGp9ZIo9Lt`#nCMJrzS~hM~NrwfP-#%gl4>}V^0JoDk(qX zX+34r1kt*s03D$ho8GXsEULrLW6T#>R94A9{LJ>JuQ>4I6jYu)&f<9j@r6FB>c+iK ziIS`{vCCLA-}?s^c7)HqF-``am}LU`N??)8!8)^oJ2`H{te}_!Q}Gg%ic_b z&N8RP7!oo=|3YMU9~s&5QQgQX6UVAAlk@E8hlkW3w}u6wV`I!6w7if9o70n?aY7ag z<(n=2gtzYcs;GUg#^_Z=au;#4UH+8Z!rH;^5K4vD)46!b~>!@|gfW3v0&#{;CZ4 zkAJ?nycwfo6Y=kV{r?j`^G|Nc)=JXvL`wTdCF_`mttgVF>Z)!F@5!cINn%@H+|mO zm}p#%;q~*;*p=+)2Tz6(MDU@{Cptn~8tUYMXys)qY1^=cqPX}eU#~f0Tf!I*vNJTI zHepD%L-qG((&;PoJ28LwhPblQh!21C)A0j8^-Hm{6ft(oGh^c5nRvw)y)kZi=JVpx zGUG74=!qwN)nQ>_EiNo9MUU_7tBwrG;1@QTU>;ICP~;510sY6Gn6JVO8Pe?lUxF9+ ziLcTE*GE!d{e{<_1E8;i*pQJi?;>la3o_`kQor4Ymo2K}61xp-B{C~Hv)mQhm3+gZ z$Ea)upNE=Mp79~IZ##=IJ+c{}{NR6!#`a_JFMj+xBGw;|&h`Z_0N7f;7DHPb-cZt( z4W{n4hbV%ur+nr@{YX8)y9UO0%tO&Z_;RKgX?Uhy$hkP}x;Pm>;mNxMsEgha7`LO` zo_NWLxqLR)pA1lXB#$-``=0Hi+6no{=pfIYqx3wwYydyS4~-1nJM?@1@#XhOfV=zc_y=Q6dUC_T@p$eXH^h@qTvB=| znw%0k4hE7tV6a^2Y{6B=jN>qeP#K(-`%K)1S zbA`BBUmAeg80MA6w~x_PevV>WS<8uX8$ac@@F3Kvk7NbRjgmV0ZO zs~#`^{NwSqU-^-E|9k#i9jsh#F-bqP5>r@wwx6wyQ88oE8*0c zMMXtC)5=F_QB)B;CcL1}xyQ~M|F8e%Z;1E2>+YDEnUsARis_kockaIIi@!8}`9J(} zJaPU~y#KxLwL`{t_HYuRJz>vC0AfH`+{Kh(JIOJ%-7nH1ivzsIRaO!Yi*b&Xn*mXI-)Daff z69)1W0QjOGH{heIk4sWrth|b2&?6O_!3VWV9Xa|q2E~wgQb6^z{8*Ng);{`%pY+a0 z4XW}u3*?=io$@3^N6sRvNEx)8ot|;MI&$)<*NhxWCdiuV!FX3Ya8%D~SelKoBJb2i zyi)^;QRdr)lm$N2qoIZ!`(jyF`7nTb*$|a<4uUBr3;i+uh@-*#e-T)5Rl~pmEwJ~8 z%5Kr``tVuO{=W(sc)>K>3R^G>PX@sg0m5}K0R414ur2#!vgqXg;O{Jo-uaL^T!VE= zHUc;qkSky)4uK}{jBVs`K&z5>f1rUgEJ0&HM1k*>xk1=zC%?e9!c~y;>2KP>{8jj6 z;l?0Te$~AHPy7Z4>6&0NuI51bXAu!`9W zs+5V2AKwf_<{QoIh~oIL^MFM^z6uu_d=IRv0ig3idRRt#fTQbZ03S|>VVd>52mHk_I=&`Y&J+}q~J;JNdu@~{=y80=RMdK6q&SJva&wQFhH z#T&1HR~gh98%O|u+AHMKyBaNQ8IXM1NVyxfe~`)xlSCuBO4wv*?GBjm!S-+x2Ou(@ zNx^Z^N}2IN^bED@G1`PJIqTpAJ=4A>4S`LaTczGivhaOHY}~u=zS~mzth2Yu)_3B{ zMfn#(4&T7#st|V?4KJPW49SDk6hc1RNp;{EdT^m#7a|A@2o*b!ivS3s7q2N-WmZ^& zpuCo?LHfN=V|ZWl53b^c;Rz?G3_jNf^5m6w19qig4`36`{jNGew%x9WdceH~zh^u-#dH{Bb$vw593b z;*+ow)@~GKE2vd1^^qJ1Hh0-BDm`(F1#IOgba;L((e-Y8spn zq4z`66GBkw_~grkp08eu2hTkj_kZT0c>LlM@xa3m#f3{3ynBWY3A)7YzQe~)#IfTy z#2t5hPMp2{8FBKKdNeHo1x}&}&nbBL3-13t=6kK9e4ugxkebrSo zWL0ZlHWvG}-IKj#0BN)5Pp#x{Io8GZjrAp;4Abw)Ka(x#uBr#BK>1Zp@UJ+{2VFdT z_^|xQW=u|vd#68r5)arj@FTrWyCFOazR(1bU@(i9(1XcS%RF?mX}}i2YUNV|gY$Gc zIZZl4K6}zt34v){l0uSSB(=S-eh-6RZOOyL)TDfywwm@~Sq0g(v0PlqA`Om|c3R++ zd#>;U8Lv8Oe4VDWZK{(y`by^b(hl3k>3+ArswdT>?*6yYD1%V8x9V>oHw>JKFWcfh z&ny7&WFxdOM4*(bgbGcrIPn|Un-T{2CP4?Grm61<{;PYQQ=*VdUk3TjpQi<PCI-?12_Ak8atzO~+qyy;;0$mhi+J<&;P?CRaoUQoLS1M?@GGU|1TAFO zezE*$CJDLsw^Q}L;yZ%oaj$s8WkTQZG95dphzD1pJ&*??EC=o{5LfZe^aN1k2dRiN z$x7SfJ{>lHUtIMANxO$5ursau^@^iBA!*K$vzF(0(T_BSQOd5t@RZN#(NEEv{)(@P zl9u^eL&D7nCOgRM!65TaQzvom6*R38Z0=4v<+kd5{(Q z@yb`~?s|$pJMwueJv{wnU2JCXhyTnTlvOMTiWEeh(W_2*%a*luE zTj8y1tpg`0xxIj=sy8Q2DzSdR60Ipq*}2E21IuG!4SA?)i(2goW9$i9$%ntszg;Fw zl#g=yvbV?t^462x_67%~U0Pg;Ys=SE_>lc^;KEBrWtjM70FOaNCh{jI$75W5^U3L1 z_s^%Mrep5lp*V8L*FgtcxteHptugB{8hEEH?heU^=-xepzh~D_v)Pp$M9wt`IBlSD*1L;%o=_6oW zy>`jZ%a^al#%eA+@Tqa~n^+*hm;(C8I36b*!W;PMQx$shH-f%}&->DU5Fd?+vGJIm zRQtOlzd?1~)$23zxnB9Qm&astDtarcvAVP@{N$GniAJIerxq|6o%%)n(}razidpp3 zWf71s2<2M@Bl|n<^7A~9V|*o^%f0oZ$m)>#PMb?Hb#OCY{F0OLtH1sq+3i0qMj8d~EZo z{+k}Es~EvERbm0E5~}Kg3GF3QjH~swC2wo19zSVFP=5T!e>h(D;uppP_kTL%g~rMT zC&Ni!mCrf_T!F}=sU_=@QKW-U;xVCYJZR5VD5#)|5>*G%GycY;Bx9j?hRbW~v2blU zuB>dtd*AmBvWE7$y)s=xW6 z*StJV96S(*j+~0wsj290EXm%ekGsh@1%;=+^;P%W1i*$H#@y5=kS~c&(>GgQUW{%} zIBn%AHux;`tz1vh4jZ+~*opMlZI0>z9iSiMfuoI9aGIhV$oTMwKN|Oa`XSf1C3|wy zO*h5i19S1pSG^{_`7K`)SD(BTk3IBIEb%E4%H^2ucFVk?kA#_qP98@P-F76qIi`T@ z%9cvz8NYze`ANaV#r%-9q<);_fn7?TrRK@#HcbVDcrk`I+GI%n(!Lfr&|N++<-lF7s6w`cn8 z94lj@Txr)=)z4KO=zs1l27qBIdpO$=Vd?UPvf{Jtr{75%0&MT_6+fu2KLLOgN^ZcF z{Cp;xz(N3jf@O3<<*vqsr!Kl|`ZHs#Vv|&NHl!Jb9?8#dSA8$T5GXdi+f@6#;#7I& z;99cl_@U62wy`HZj!CyzSInlk5!DBKR&|~eO8p5w1>jIe+dIh(^+1L^m&bKjx~?|H zV;t~80enrD^4tGxoGb4Yc*m*3yyK(>U)?I3bBo zYI(e|F3bVE@(qdJY$}5HL#oGcYd4|F#WO!5>04=aR@LBRRupgo`V*?x`KdkotWL%B8rAK47`*?iGi%-XmhvY}C zJ|>@KMTwWqH`)icwff;Rq~3_>roUvA1*(Ei8t6wq0-EBBcIC4%x7O_BrzDv}-I6{v zMy7-#`6uGo@tfn1{`8*s_V0fiC^}4>JQGugPsf*h#ka(nXFN9^lf4G-r>-o=LrYFjDy5I9$CxU8aG9+PK6aMPr0rlZ*Xxw@z zpTt?j5EKz|grmA*Oah{-lxblBrJRXZebM*FXC9Yc&Wyw@Ck_Z22Ama8VgeWlo{!6t5z z6pb+VOUrAqw%!-rhrNmy8bWJHglWU~9!Ljx%3#-NHdHxyH$5q%IlL7wf7Q+LuYU0d z}e!}BF5!fiu8j0xX7QFZA^U1!)OTvjUK=mBBT zgwDf*4?d(uV<=wp+SkTs9(*k3=4MQw*_lJJC4>Bb{O>;%cfI8?0s9m{qA s6-_D45C6yydH`g+ z)3M=UFq?*Qb8Fp>4INAm5JPh}Fa=%-$ODSWVhjeskxdpXKo97R{PW#dzK1v79*a3S zS=6T~nZyA`3GJjwekmWGlGpiFCKKw!`(BFYqxaQ-)rOIPLq}teqK>KY1Mc+1jE_3T z(c|drNxo+zyx56K`J{nf=qh{yE^^Ak1j?eZGd&S>(VMc!$AAU9_4)o!V1WNt0M7qE zLK<;8205vvXW=D6D=y_3hacmXat1z*F#nm{A)NyAywD$~fE8!R*${&Xl5Ow28WB9jS3Ilzsyg8TWo(#|)HrZQjtD$s z!SG!$@HBYT&uMqSZC_CRWl=M)U}j@azCA$|`|@eeAUU{P72>1Kcgh5u;CR=g@b+#e zNJ`LiwY%uIgPy^OPNFE2ak9C@=_a2QD26B+8P|;m+b6-4{-mrkAxjC5>5u$AY0;5o!BH z11m=pO4oW0v76oRdOm`>rw%ZU`7S#n)sa|UTlII~JwUa*>JAYdR(8Iu$XH`3*b3~~%VczOa}aq!>om7uyEm<+6kaorS*s z0h7*X!o(loXwgxBpPVBH1wudcQO$`v81OI$GARc~)Ps7uhrZTB>MOb>-}I9l(FOq* zhnjfqsS7;jitMwyp>IbpWmC!A0hP(OJw^wS1M7tX1BmVjsx6`Z4yApz-@>DSkS2FP zw9V7oZoAF(d*^%aW?yuiz4^54{)F0{%PN~Zd-@E1&}f&P)F9-LN~fyr1<840$}okSA!!ur92|r=xaLtnMOFMBe%4)cLG34uztG91ppDxxZ&pbt~b6uzWQ}v z>ULx4+~ac6Ntga76Tox~h~b_xyfZ;~f}02Vi+usMa`~PofqtjD9(Pc=!j+8?=Q~|DZ?s2YJU7E60Zz7$P?O-B&r`Nm_aJ!wA&ea0>gY`kd6P6+P`!XD^qyCG1f z-8OAG7^B~Mt-4VjgNw)<_f`>PkGdjs%CBhgzuFAJ6Hf*zsE`BW$Q8avwlyNRxF`NV zchQSD><6&a=0Yd{0RQw!L_t*WsTt^N$!BAS6cao^#!u_f4iqsE*%9~K|o9dk9O0O;JW8q|JW%OWO&hwWE2@ApZyQh zAj1F`U7;tmO&al}d+4Wt3{h>%NXfsKHiK!cKwMfg9`@qESd{d04ZtX>>%g<~R>6E) z@XjBF-bo!fk z;inpmVCx+rC!{ld)z8eo&vv7i(2p!7?fyV|ssu_-YZz%i6V_hHg$3n7fk9>RrOnI1 zCW<8;c`X;EPWYIe@rEZ{2Yr707Qyhqtm#tdGboqBl79r-e$}DAuh)E^Jh2@RTo#wp z@1cFep0BNLT<_zQuvb=wq+2ZyTJPtHu6iaot`zHfD&TWL>-r4;3L6THJ^jm2>Z|K@ zcqq=v7~3iN|5m7SpleM-y%RR@h|F@#TiOWld0KsINV=DM@rvjDMFV=6@f=`DkARs2 zc)D?&vroi(N_F`XET2?aCQ(Vxi7(c5wOf{L<)seznxrE=@bH5;cAH5w`XKPND|_ru zbV-;IQGBL#J#)ejK6w{^or08KoYxQfF`+2AROQAxt!Q`TXQ+IR37?&vj=A{*-T^u@ z&E(KrG}>b^&S^QNkKAmrxE>#+r9MK(ZLIs z&zq6tFEH`y$se^_wC|h($Bw-*`8uQ<8*N5sSU!d7H{Y3$7eDLHn4g`GRpC1-9O=vT z<-4%JP*k*E;S-m{)0CBcawNI0b_lwpt-L;D9@T|(RbZ{N`XR%Hm-+0=$j<^##vC?x z)}u4K5ifb!srb*o^V{)9@4YX!q{dIkEzDC8)iY8ZRaKR`}y z!AJjnHO{J~Zk*43Ug3favw*GI9V80bCKyxj#(3(<7lo?dyszy)__-XzED04d?r(?G z-M{^QShO&FHbgiSdKIKde;r>cPb${5r;baz1J0!1pT`RTR4??1iEiX(eRU<8$~V^9 zj(_|^-xtq&&NJhapZuiq5Bu~|(%SAyzsa5p!_pV=c)i9tI9~c^AFH$jj8#OMO{YB*ok>1TI_eE5>>g*~6ZXB1 z7v*?wTei6A&#?F^SoD{e=qG?1xOl8(L-MHl;UBE6<^~qVnYgmZ1DNm*1&H^GVeLQB`5&4%PD zeIXVGz#oqrU|j91rLDB=>&~Cb6AWwt6Wk3Dq3bUnu%Uh)>F^V2m*!>;IsW{G^Il*^ zKeZ)!U}rz&_)`X|?Xc?RQ?-#H;Rf%R{AWTR?3G02QeUZ-ek|DW0)5G9j#KqqZbR*J zr^yL6;@`G-X)-2{pf_y-;|1^%S|G~Q2RX`x07<)?!w}D~NlHV15;&H9l|y?+wXq?< z)t%Eqdt?Uq#P8>;G+F3yEne!Ti>dhx#p_RSWJw!{TY*fzpI9r%^uRXxl=Ly+*icG=e;tvj+~8^ zP3pfpSOCB{ga{|v3N=BK88gHk>A;ri)!tc+k?oZ@x^*=Uu0I~_l}F+Wo-rTKzil#N z`7wD}%aV^J(FM9Qo~Emig2oak@kbBvg+(>uFFdNgiS>@QUh&X|PuH-BjPa!IhGbqe zXtrmgx6Zh6OTN`keA~CbHO?@#b&+!MNqN=f->9{~q~bE3vRD+p;Y_ z+1@jm%qh(9a4dFcBgCT3EqQ?AmE}*eV&7Oe;m@+WJ?B7_y}YL`KD`ZFA()J#FbB6S zSwJ??KfsZXQ&u|35cp8;)0wi z(QM0zNq}49(U9;huJqM7&c$7K{qOO|@4pgZ4T!#zBrN)W0NCwR6L!V zB!D`AcIr_C23;{0QuOU?SnPmO36*UHRGb9|m`1n=U|C_JgrbLF57BVuGiklbBw0NO zG$!>}jM~UEK)$rRB4e`~lM_tXFj?=@2w0t1Jv<|J%yt;MeLA3nt7TQ9!q zO)rce`-$&}yZ`Ke#^sCWV*0=l;opw-^vQVPtG-elz2hJ*I*Y#Bd%IJ%)d>OToa|Q; zmEk&=PZH$hc-(*g1FGmUKs^h^oMu?25WmeCzqffJMTD})~nVTTo=Ef7tW7NnT*d+98%}@ z_x{@-#J~QfUy9pqyG1gT3k~=V)x}G!EYHs6-x-QXVhF6TQaFK?Gu@8qiSamiXfCFv z*cq$qJAXB~l|iv?82$%-Gw zKRqCD`q_Gyv_;P(9hk)!;?xEiI|e@C8M;#qrpPVli!(4JJN~qXv__pDWCgw1p@mX# z`o0<&951Qwmfmfv5w|Ndw5q)5hRRW=iLfnvD93PcqO6x`&(aoSCp4%X9Pkj^ z%sVxpjwe48fY?mTa0QQK1=5lbi-*9ap68qC%P(G-C%bvRF@Y88~E; z6FTyve4JV6XsSvNJ9|Ef_unNi43goU10@<*qiAL5JjA z%29PbjjZC3*ug}D+8cu9N>}K@eZC~5?$(n4()RFL_0QW3;!EaBzEBQ$IT!gIm*o(w z+rHl`$Dyp$FUj`O0}D7jBNGL0@OM182&df@6v z?%^paE?3YrM_QkGthebNGa;9BZooEd19{c$H417ZQAzDj@}Cs<78(H@MnI?<;*9O;g*9B0s7^`b{0 zSy#r8!@$Qc1()ygRXbhjEE}R)@0LM{KAm0_#lla};^;$vD^L&MlURZh0|`o~+;-+w z9uU!A+~&)vpeS*gsdi^_GscI{Pi`tlGQn^=pnBRXc(G+L2!a zZtT+UZgr)n8v}c{3(eK0vhyFEWD!8Gzv6H0l9$~%_(--tqD@2idf-a^im~yII#3)P zniKrQGhsf|62FA&K<;RQk#*sO{Pa~8bYegN>)!9>=SVOkEZ7>^%>jj!Ex|#LpqJpe zZ9rE2gSMG2^)uy^1zg#tC`NMSiEYIXD-Co5CSswChT(FM4c(^=#@`gKXdL#X-SSb* z?WqY5u(8tu%SvB|-3i-50$sLV3%~82;Fr2v{Pc&b=|NfITOA~YPyTKm&$@??yE`kw zl8nNY^h)RBiuK=hk=)he|5*&egb8@FYX*B?^VOZ!lxrNyv-L#juwSO1`eoIY*q5;X z$$QbS*oOVj1n|W=4^BKGh=#!91P%h}O&90S`ct;sebI_4IcF(@^aOAuPy+CrU%1yD z@>RC79?4?@$)Mrhi7VHKOrLp`(pLr;kQI1gxJqKFG@jYd#5P$^B@5^qvTj&7dIa?| zd4`4zLhS6SFSidr3pVuyCW|~(4c@az8GRra7x>Cxfb`b55LZA9s#NHi@Y7IQhOY|{ zb}tSySlch-DS9KH`Y)Un&q@}P5Fd-k+WwThE=%~7IPQ@r{1@F4lXX%VxD@1Z`(nE4 zSNb!R9uABgMn>!Gb6Vo{+F){7k{>zUakLl)Q96fM2 zP9HxLhi2wtsM}YcNjQl<9Bs{sKc2u84Vi>uLB?kbRW>uAmnknaJQgdSc|BbGk&IL* zGK-v)wuwMHY+oO+L-MDjx9!eq-1(wY@#lZ}7xCMF{Gs41pNpyoCmxZ0lkq>pWl{nE zP|Oe=@r4j6iIe;+`BCyoku8yO;18$BryL^FUgS|vmqB(_d!PP(P?$KQXZz|M&;KFK#|_W8C+tPrFTJ5j%3} zdSizL(`}XV^Vv@8rhI4mmHs487N7ne_CtBlW1og4J%wi2S;xy?q;G_7pObul{NoSD z=REfr@smIO&*Eh-dwDckqw)Uty*nO%_~E$go8Dp(yz%(K`0B5HV|?3pes4VVnfv2+ z{>y(8p1nAH_@I32JbE6!Q5HIgWMg}oI1`GFqx`E^7h>V+HR;ub_}E|HukOl7yzWcB z(mKcFKlNq;k)!_YBdXHK6?kJ?xaz+rnwhQaOaHE2y^sqBRLcW1v+<1E&&D@@<2Ohb z+v?vh$u_X4=bCx2vBD{c>N?3s1{Qe1AMT-T)*;(QxM_PKVPdkSrr}Ebkm`z9^|sc` zch4QjAI=?)$XuqaWdJf|0X^1Ly!Apo;f_qXEWsn6f8uxMD>NWw`5Q!FE#+}McO`GX$+rMBtU@Aw<$4e2@g`4&Gr@sSm1 z2<$EOSJp(wZf`9%*1EB>sy>+Fm07w!A{i9En^_0*5Fb$`(6-cxBk-eL8j~GtsUM5J zGRAEiE@bG#A>T|eF+QJs$okifpS6zouhJ0Z!yhbh$h_*1^fTcShHwIg+gS02FfyX| z(l2-qADQ>zonffm0B^4B%(q`HI@-R-2EuAZm7v^!R|5PH>doTpdYUFW!j;eAfd_WU z^wf`hs-<?*vY?Q79qy7$NYSmC8Wx;p@SPLvF+U+2hq^C>_j{v0;)0_Q0S3 zQN#JLTc(k!KPDY&F#f4~lF>y_w?1nlT=@JYCwv-T@rRR0(a)Cjlg|KP<9vb~ZG#A* zSHf3Um7gakNG^msiwPL-YN?H-Ev8QRjPS^l%nDgX%7|$xgunrQd)xs3Q}D2(AUx2+ zef~dk=77D_dt5c@3Sz+F1V3OR7r>M@iROIDN(rTpERq>*sJ_CZPK+8{X&N}WG*19v z+_)dNKl_$AapQ?-cE;k+v18F3AM->qde$0iD<8Y%(d(3VYy+QPTv^|eZcfJCcYh+@ z{B=K*z*Sabgino*#ReM}b~imQvokUlF?S*+p8e7|dgqtK_VH)NGV?$}xs7~`isQ0R z1e#}+V%`#%%iNM2bapqQAw4l$kuAZZ zZ-mU(Hp7dr!G9-x70bVQMEgd2q`siiK`-Vs8_XvSx4e*HOKtu*JNFxtv2>*$|Mj>3 zB;NjCK596C5vgKY{$R5;8DI95-yDYzorw2*@ZHjX^=&${vCYO5`4{LoexKAThLIh~ z zt92E!qB*jT3{ZpN;c=(q-hF252~U0vBPd(%kUMB5yf`VtS0!q-#-cO2EuV2bnsFf> z{;NNXvqyH~pZvhr#1r>@Fh+WpVq|Ac`Ye5y{N;a&J)g7ys-9y}oviEh?_DIm znk}yq2zO3}Q9LqF-MxTS_bh(k9{RX`^(y@9m^lG_3Hbj&wOpXH`dL5Ab^qoA`i$62 z>#`T$?7<_wO1O9E7ykL@+|w8xy%Ph`ks`I^e0uUg70rbJGAOc295W?e7$B2eNDjc7 zUcm*M^~|aRDLf4->g~eB1R8MUnfo7aE5&3OM8t#yk4eL1Kp~lg&dS z{i=n4OL|d|=+p@#;$&Xloz>l?@V=g=o8y-nx4NCE~jo z)H{Q4uwxB#KuI{ucuKt-H^e|MX{3}%_|lMi$U5soJax8{ii1+hb$R-^^G2g1Lr#Ol zgv^%WW~Mtb>e=X5XO{tSJCFrEnFR47o$c+d4ZYtE`CBp8?8i5K{TIh~{ljmH55D8~ zZU2jfE@`cND{e!S2q>rSW(Kd6Vr z0@?{SjgXaGl){06Kb8ZfVZy~Q^kY{%-^W|Hd?~*6uDb+i*vHk6wsz!bF1r(41ICCt zdQpXB*Sov@y%x%nobydzHQr28jdx~ZYP=H%XJ^$p?!<&Tz8v+>E(=y-(z)G|V}wp{ zjFOsmDRa_qqN9%V^iUi-aYKwxPUWP&oH{0(SPjcDS-A^vf7rt_13bW5!FR4Dq!>V` zyov_C*t(S0$g3qxJS*wk=`049L4aM!6IrT!?hg`c(K-24-@y+%KqdWPP{spC=x>FC zAeOq2A7ckTiY86{Rw*(~W6;O>Qr(He;DIKC1Ly-hA`ecsodRA$Cv1n}QrA?5WN8o2 zf}i9X*tru=9W1`zrj877IeHl0F^Iy>7s`V!1R5gbZg{wx9VKj-aKPy1w~!Mjm*IHH zBnD9NyVwGp5J4H0-C@Csk_eWZ7$&Glj{sAS?!k-6{vqpBE>J=jv66NGg_F}j8!3Lx zQ}YvY2NtxDgRCp@ekVr-`tp|NN*`zj4uCw> zI;rALvF>;Jta8q}+i{aTX5IH-?x#f!H4gnXZkBz~f(d2u%HP>6c;~O~uA!mJ!F2F& z9?3XIxte$7ol5iHI_Tm7d2A5!tU7{=2j_Om06l442b^|Em23#_Y1ntA`v#J!$`zVO zZa8ZaJlF{$xa_nX@5+Bpp`ADpmH|T?DR6_X$pdPq zLTG%Hj{q&;E4t{mRIjV7(z#&42JzTd!j?J-45H8xfqtZA@f40$OM23ggFQVr<6SEg ztZ1K1J!Ie#{MXc}Wx)ieyDV^I`09!r_WT|xXw(kAC*1{G=w~NQ0d_HA3TdA!M-FeE3HK+jpmvOk@A-hwO*zmkP+PE`ttO!rdQSZuP$y zkcWAL{CVa3U?ru+cn}26VqNe=9>@(WgW;Y8WdL+ASs^;ZE1zHiLj}8mPT~{M7U}Zz z1pt^;esU#aOuy&cbUMEOuCI$Ped*`%k;+(8o8KUvFz^Bu6~$5X-~s84kMLvQ-arYZ zGSD|EQxoQsYtJG>+Lh8#-YxBrh-G=qdle?ZvVXz7?E98L?C8EPoq_jmfd$K--Ds<` z&4iGAh|zY(@2Df4acnOu@ole_kM2)Bw$D5QD>FH9#6C0n4?WQlXxQ6YH$J?R>!>OT zo@@();+Qu3(Yp7pd31&a0PGYHTo$uYr>4sH_cK>mNV@F7@toY3U1FD<+EUvPa8_H2 zoZ}B7nxJGmslwsG_7=m|G>7bR!S8B%fPK6@VS9rQg>CUMlE^^ik!6R$CXN}Ejut-! zy`;RH@X8J_3E?yRhep_Y>mKj>f~9B70(C>$( z9BF~;-Sw&JEltjPaNu99u_y!D;Zb>Prb4UIliyjz|VAq)!a){qK0YWR8 zc<%J&W1Ac)CobQ0(|e^ePl;36`7Yxt2G|kpXK*U-49{{x9^`}Gi=x!8 z4gw(C(7zrDXuVYO>+meMd-xZ7rfW?%fD8V5WjKTMrGEZFDe6js#LbhXBpb9p6rD-M>b^Zt+!GRu64p>02}#INFHQ-{+|Av z>FPBDJj?pnjtegS8-5ZeS21B@dZu6wLe{s!NqWHt0%-whI|)vbCx@Z~&Q0bksolh(P;s|uwDUVgyUNr1SB(0OqoIvD(KavYBgV@k-#Dn;_2&$!9W;$&;>ZewVz$rO}YDF4JE>ZA5RlIXNC4zho{+v*>v{A-u4#`uW(788@PxwIb5 z;dX4w&)n^*9jC9L_B$8IGl>G7#ZTeIc;lX|2q)bePX*gIRiesxK7%W9DZfJ_jNd5i zGeaqB;++_eY-D|v!15&=MmD4835LaZ#%;6l==q1@*M9$x;-Z@J3vz|{1~h?<3fMi$0MMxs^0%b_KK4}+*io*?Vb-JcSxUPCZpdTWXT27Gs&0_VX-a)7UC0am zyu)YUJu;3O)dj>=?}15MfsR>q6slcHd}=H}s08|_;PE9weNRYwRU2Oy6k4zpj5l$$ zZ0e|TGd(oQa^ba42oxSelC@3AC&&Dcs(!7`R{X&Cd`H}Ob80+%A&t1G2moBWuu~Wz6 z$AA1s;yKTMK`bp@i+BF%pT|G@m;c=SJvM(dUh~SA$G`lApN|iG@O|+={`))PDdl4l zoyp_==7w>p{uB3{8bYz&FsAmIiQN_H=jxjDvAA$KZaj5UJpXww$aw5;xSb|GFY&~ zyaN4&p74M-^b?rWMz$GqbN{Buo^=JfD^&UOiLu~h+A`*#v|@%g^u#(+0=HA1EI)~} zVHFsbJ@ZoXME+8)CrQhDzf=0mD@bAhWpdJiswCOwS!whVduqRq6Bn^xdIxQ4*^DR9 zE`tN(_Wd#r!KnkhXUqcnc_*i6f&5bk(qYTVM_a+}e(q)AWq*O!_c(byO&0$Jys>G( z^QXo@mOg7Q>q4;J3qEa_}dZcw;zP#xDdkY~{s;nq8sB^h!$P*XYFyY1qn z39*@c`U9tIR~lsj858b|{jo!u$$9$VD#dw0msW!fjC$j?h|p5nQRt<6p{Y02*(7s) zY4BM@&-ht>stEfcT~~HCLhuO!D~9Ss9{Rv+oMf>CG~{$(>Kh0q@R;~ z0WO8RPjA|)X#L9*4@6Io#U?*{R(jGG@t`HhoM6Ex0Kl2W{@63fLts3L1puv`ZjAI-V}AQe z9P3|+=JIFa#L%_4>vhkISU(q|y~}Eg7Q|!G)Lxmc_$LBNy-i!eVgbN_wn~{c3mFry z8Gj~-uHJn<+>kFg6q_Q*@J>7C4jhd~A6bg8z3ZRHTB?-gZERvxe3^`9VXF z$QH6}oE2aYLG@5&^4<*6bMd_$YWgxJ=Q=x1KRX`OU$Pc##6f=b6Ab9#w4-q4rlN}s{HzAso`fPhVxeRiDEBM14A zen!fHs%`yEzi(K4WB$sYfuX)i;`hMB{^))HhXnxt^rvpSr@;vTYS6ulPX+{M5(Cp2 z8}&p8O2Gs>284GuZV-cV8#z!`f%6@EDnuNCfoIDB;-);F2-g1$G7$duh2E;N?)->B z2s8p5BQgdiI^Ug`lmPZ)YUW`4`hR*?{DU9kO4#aM0g1yyBxs- z178CK^FW;P;k)@8Ni7Kkje|FFx#!BxAM!AXL*vKlW+r*~jwm#knQq2}I$|4ADsZO(8CE`=ojO>|XAzp>0gT+) z?FyZ(I50OF^Rqkgv;XwR;`XzL;v;vzGp=4dACpq{(b1`xn!hQY@v3iE5o(;&aZ8Q} z-6EXCqmUfwgDfE!iGJeEm9(Gu#3$my`KRJ_Z+b)Yy1OwqGi_eY&9c*fC*Jq|_s0C8 z*?8x>-Wz}Y*B^_+hYu+l#Yp4iflL}DcOG(83UsBRqI2ezhBl(msqYDRCj8+o&JM@U z-F)+n(Op}QANjF=7@z#)r;Qu@YfDCfHL|Nl%>!+$vd`7!d`D|)d?GkDiHZHm$qC80 z@Sc-lo9g(u=f=>aNXkTR$~L=w;Uf+#<*%tj+ibSt=+UF9?|j^H>oelesndE>!eB>2 z4OO@YLjJU{&oE)^x5RVl9240R_>3mNkf#T{-lF_)k*k^*Md_Y_Sz8z-kqqtGQe+fk z0Z9E3jR%d0u6)Z~T+gHkzBK_!spVuoCog4YoO0>5FM5GJYK@S4rKIS`Ne(y>IFvYZ zeKimmJWp_egW%9H;XA+#R1?Q!0}Z_6rMFw&dAO-gMo#Lp6Or#!Z76PSV^tjyzQrQF zQm2J-`K}2LGRCi|PBh<9@=;onw}}byTJqLxbZozTT#E2+wEC760uB%IgT8?ua*zSp zV4)0#U%H}-d!kvquwj*BVJB4YaPnwqNLxFsRR@4$2O4_9FAR})ncG!#T9s`0q=CL( zkm2-@wem%@m%RbcpgscLKxH3?0I<|!$ ziZM-84Yw64k}G++`WtrQ2`4Nd-{z&_=mfL!&EH1DnS>6^U=Re)woq3VgEet>$qTR5 zh&l+i0|OnVk#Iq8P({PqqsPDrupaAYX9wEo2R*69ZmFMPam`DHmDpLj7K>M}#g!{p z{Jl($h+9}#iYu2F%g&gUSY2JQer%u*(yP_=t|#}OmwiOG>WUpg4$LF* z1-(lhp>5|AE3(1Uz~O$(0~(X#lhK*zM5iNXTzK<+LTC<8JixfR?Dmu0%`BX_aOtw@ z$pja8?RZBo^)|CXqa*UmzJnKC+fPq`vJ;hm{kS*Oqt3^K114KXy2X_edYw3PW*Js~yCjQ|! z+!e2W;hnL)x*4liFUi-Fja5Yr336lOXU)D~drYx3PNiNlCJ4 zL+z>Puc@6}TlFGR+98hK2X0?_!2K|Td1TfzWRvnlpHG5Fm%1wpmgC(m*$@_{Zpt6Q z9|i~2pEfM{imqYb2;M{o3F zaHQAY(7TiiXg`e2RN;^nlqEsFPaH@p!~m*D25$PB&@SI6XKD8+ue61tBY4+^KP>nn z-knGD4!wi0;Hff9Aj($T!+VNjzz1Y1jPk=Ca7E6y-4R#oC_6#@8EBZAolysS%>Hfd z_p(sZesC#gXm~2?Dfz4VVt(j~JlHM(pTQa8mDUr%Yzu`tyXyj)eK1LkX=NDbpX{_6 z1;L?hknL8S7fQ;GdLjgx(qE$f_G3jsb&Q-}c+!qx7IuqoLE5^I*Q!J(O6{NCLcHtJ zt5Bz}X|q0LO87}j;jH$irlG!rKpR^gflpt^&&7`c*L<&Xloptl5z&?O78m7lSs0fB zyTT+SUicWi7Yy>0u!oOsTxIH1g6b;jn&#L~-dO%5h(p$mJwLK81i+6;8Md%m->nCc z_A{+Gkf`4tlJrNZgX^IHzQ{Ar$OQ`oYP>^{>{*WKW~(I{F<)RZ_j^~c%pCnPSi3u6imVIL>EMWwUQv{oAemSCyIR;jJ#lmuOv)52r0OhAze_(-6{E(M>Ji z4|@UBuub)zz=ijYqp9I8=3%$#luBDoVT7Ra;MI zX|uZ>8~vVqe#xWqZ>;zHty~6lmp9jAQ+(#LVC#zGo8G|kS zMnpf=Pu&*R5xhMyAwO+Q{WSU(^a-dpn~>O;w5xX1Cz_$#l7sE8{I>o|T)B8D9{A*E zVqJL7cFc#(^7d*p1_`fF)0|9YJxhuN3&lU}-A+NHCtsELH zCjbzN$XQhIHx+CzP;%t~7JA8KURQEA+FDhAqph?X@%G>Oow)zpdOW41t=6cue2cP$ zA7d5+IW2Ouxf$R8z26x}56{It_uLcfU3??uRb9wW zzmhSFTv(KS8{hyMT==#xeawtYxgwrPRQy5vZMCZCaF50O^2-HxWn(QCmo{T^ZX&+_ z2Y)D@eBxsK?(h9pJo?C!O1%>=e%`I|^> z8QoJJl!bfCZ3)=K?EkAidft@ca?(|Ib6GZMEI$A9UKU^WhSy6D+j05gQ?cg0#hT=H z)AT1VV=@G2$hgpu2Q*9sd_`3L7*xEk+tOEVX2ID-+yRm*r*FM_lcjSQ% z_>0`5lkOvn_Vhif{Lp?GkDc@f?CTW$0Vk^Nx>7zoSN(;R%1YbM7zeO8nw*ndGM_I_ z?}Y*@PT?1ct9IsRuqS#gG;ylh^x-4ZOy z^_B7ppKRkykirL5lB>r6ktNv?%7P99S<-vvwdzh-=-~-s$%^rjjC$OHv1YYdoaX8N zlE*MrPfjai1s!6d_RJ@PVu(vUIgOH6_`QtFj$rd8b84CeUu+O9zycEI%0%jxlUwAJ@^KcI|(x5^v{^@WF(Ug!s#4e&E) z2Z@7MjDb-h@FAFYDu78%;54OcKFyW!JN?5~OL+uu>Y06YV2}?u=qua@QOkiq3kxp* z$Z;m<`Z%AnbR>bGai$i z*gQAoleie)M98(J zk7dYb=*&l)epcM_iZ{f_O)rRL^?Un6>f?*voD}HKvI%Ez5K?=eat9yTEHV^hyRrqb z5i{K@aj<_mI_r&p|h6nv6g%(d_v(eIxaOBi(Yp)jvP82zwztuiJ$)0|IIKws*13RE_~a*rMkZH zOUq4@pZd}}OSc`!ycpN!qjrRb}jt9|xHb1d5O zP1)4PI2((9{b_XtiLvZL>cM`$r3TuwjN=EdvyPG;=~AYx3ysmgs>_Z?M+6i630X$@ z6*`mN`Yt;FOzfhA>RxejIrhUw?j8DxpSby+(UHcTOs28|x7lDXSHAtuH`_ok z8#bI8;e_L>_pG3RHU;feHsObZnK-Az;z4D__qS50C@UK`6gC?{0_7vRbiCL_$dNwo zBa?gM$$Qx@(G;d{+vxi^Pcg6GOZw6<;kr*m%aFG zeB$FDRDCvN=%-|;J8|&DGvlVuy-PYKyqGx?OxM+dCm~fG>Z&3FML~bUz==>D@B7sK z@!xRn&u8zI{2${TseYyxVaNY4xfr&`;Wg8|K+#-b4*ON-SL^28dv`9 zICSWcoG8AJ#ICb3PxLaG)K;f)PFFihGBh+g3{oT0tg>b|-cWzcbZAfHL^1K+%|AP* zPQkJ0Oizj-92r*)Q(g#3MFwGHc&nka!jTB>@{gkVTQXwse8IqRuQc*(21aQR3f*cz zD3u=BN<*ffc;g)e=vB>M9AD~GWzko?s_-oCy22kO60#=xEBS@~l~>4$CvBCl8XrOq zAR_N6hYcoK^bYw%Zn;{1WD7Re%eTN+)#;Msy0W^E4($4>otD+r#aQ3uJ3uN&&KEqO z^HFzfGqJ#mrgl3U=LyLJI~F=}emH7;yg6$}dqg%0zEZZIbs$ZN~~M=M`Uo3EvnD@`3=-%`j=` zpsT0CGJOtCC_nfzS)zc5FaXIysDznvm^KN!mn-Ng-!P18@@fx%1*Oe^tYlu-+dE(s ztEAvv(-~IXDvJpk%H%y|!dYx1JFl?A@Cn|tLzL1}Cg8n`b{>8>q=be%ZPFmK zIFB4(KU7W)sm}_##)G)3nE$s{) z?QAb{kODk>w7%H0y15oht1Bv}*bmXx9l-5e1O>lXQEZ%b&tzI2Eyh4WPksUpJ&W}8 zuAGpQf&`zE#$Et~Y-~gOd`CQe2=i9>a?PYovzQoNc{vaQtO9(a8YFeKsNpk z#=H6iwWw03gptAUpS}P45Ap5x{~E((oSLxW)nwWx@Yv4v{NT))r{Ig6Ew% z6F>F6-yJWw?e^HA-MDf&#zw?%(h0BBse&(bDpyZR2%ZNqp|R-XHc5E~JCC_0;hi1+ z5v+2^dZ{<0--Q#xdUdz`OWGn&&xKmBLG>jZDNQ?8Xd-MW*;mQEI-l$$VXr)}Hz%KgN6Ro9>z}n{9 zbxO~7`Zrf4`*ct?w_-zd?8~p(UYBmm?h+^i`%6A_4gAxurM0B~0tv zn^2upKlp*YlzlSI;4gHhc|Z=3cM-gAyTh~UAiDuysDt^VJj3#T8OVdDc@&zmQ*Xhk zGUFe_hsSixfmir6iOLZHHDK#aOn{cM)2h*}&&2|hmv%j%bcD!J9Xs?Dxrj6&e;_U+g zUu97)Vrt$)H+ZXbp2bmVL$XKUxXTgpN+Z*h@}vCr9k@y+Se@_1D?JvF2v%>yS0Cqy z%{R?T9htDN^Z-A%3xx-ePs&=`CO?yn+LsA`(p#@Y|Fo*Q2R`=4`9&?=aPM*z>IAau z?EtLQ&E!MotNM)zW^?_Ks2zNDImMpF4dD*0{A!$b&;RO z)n)o!`iXS`fYAfC=uAG|!9&vLDFB5hw&9|kuFe--^%v3eDf5M+d>K4NSFLYK0oc8A zsYl{u*p((lo+`fllAd{`|Jj!S2|Y{L<4ch*(wWfJ zpU*&1PFie<1GFFH7w!FA_^;rrZ3y{QCnw)m?}{71m5g_!Nqx;U*#}OUEF{VFRnAJC zl)C&)`!q5%Zq%T(fmfL;^sMqg(~^Ee;hSJ0`om=c*JU_w$`)62ltBI}e}u#++mOwga; zbCunG&!5s_vX1YK6Ly+IF)DuwZ-a%q9G6|&*oMmEnCxNU4?VqbnS}&u%ixeF|D~(D zJFD^3W9Qy07x_xey0-fjJ1*frynrzLbk1 zR~A|_m4sr<&`@0f09JeHY%_F44hYC=>KsduM3Yg~qqPwS4s!(WX#9uY`LFS@&s>Wq zlxkKIzu{}* z(v^$x{`b94ezSZ@^($G3O8^_-qd(wh8iP+6Pu^t$6XW7g-ceCMl3tQ1-XJUZ0enIf z`Ky~HLVh)UTwtF+e?D4cBc9x!nr_D{U;eWA-+u51;^QCvtN6`d`*vLLU{Kl<>Y?F{(ruMQPTbp^(2uJWAJa{l(_qx}{(IbbvxRh~-hIt8z1&{K!c7o3v zuqYM3SHMhDXp}S?^4PeVf4fMQQ^n`F54cDVR6q0qdVvpP0DMAXdy`KKU}Kq7X3QDz zg$J&N@CT@UD7^%}{+JK@767FDWnSRdwwSB4O5TXiNp``vKIKFyaBDr7MBWixj~%%l zf-9fUG!-09Q^A&59>-dq6s8Quz1KJLBwT3g4IOJ;M8|O>P3gJ%5gyl(Rd5@l9y2@2 z-R>3~Tu1#Fp8#&HmulnofNQNEz|mK5!49*L$Q!C;r%8io_XVHvUc+F1glx2<*od+K z09|xBiUW^I55EJh-cio}K$$x`<#QWOpL}*eHVW-4975_=rZr5@-zZM$Rf_eE_jSV) zX|n!_1M^z>nNfI}c45eq(&T}!z@TX8L*G?6s{UKOb;&&A)by#48y2+*U(u5>bo#FD zJL^gf;6F1z9NW)=6sUkN&B>P@`&ao*;zUcaRPPj}z*SLh-^nI)J)gli919D8?L~eP z$hKaNOO~jfX`oEh5mARewtFy3f%cm^(snZk0xsmiCXfezm1lnB;EH^CQd@A`-=}Qh z#b@~lmV4|zu!JmqVfcf*U@5%7T*0WcH(UVU#!e&`8hQ9?r)ku%v zEFVa9I``Ps_^}`P<@k%cA5hvn0YGebJ{G_13P&#hXii5QyESh9!mo(-?VlIdFro4r z;3M4tpLQ&{@psqN9#`9mr_xi|ouN&Y+>MF#%W-sPIofLv#mI$E#dm+rOJja?Db}xj zCdNlsl|!X@0Gj|U!lFjiOPm&#^ggEo|A zUG`^fvzN93>nY!CSvKs-#r!-fGQx(lLcoFiKuc&(S~`Ibf@;CoT**(Z zRSl7q!Z<@5NZR}2;xj6;HL8kd7ytEz4N0Hbn|C zZC;K;hlb+c|EnL0BlF_v_T|`GzZS!uXGz@UVB2PQ)z(?RL?=e@%p;dkany6k;`)+5 zJM4+S;u!C%Cy(;Wce*eBT-wGA#0zuR6FR${S3Bf*rK>QQFRQdiD{Ed~MJI4^+p#lp z@6eC_=*{;GH%9Jc<pP_m0toy3Y;}*l`7GcV>B6^@mV<>BeA@;6My-hzlyhf z%WuVoikKFz8$CHc$}^$oW5?&>;9P#sgCE~&BOttntuh$Qr%~;1cim9qFu5@(4V#6; zMB|E5!YmQk!s=+@N{3wU5pMVbiv<@a%&T&wFf`H20MS+ za{9(qhvk)?m7D=2m^3P9yV(@1vii4KoEVG>2D`=XVWIMQFP@M8@UMR&K6T$mVse}X z8so9D-iQ;oz9bHweKnRy)uZ8;xVzJ>`qMy&=DWqwCC)-B*j6+T5K^h3c;LRz#792# z*YOSC^7oZayv6D%{^Sj(V$5off$MQWFszKyckbDc_F_1O|SQfK@$_5n4O*V z&NEi%-F)hlzlU|`$Wa-x8FfTXd)H2v&MYzSC7_ z%El07=WXE)3hy!`t2Hju;+iX{A;4y2BZMv`ZQ+HFDHB1^#J=<7xk+Tg6>IHm)Oqh4(aI?X3SXfUzc&~2mvPnEqGT8?%Z&{VXCRs?JVl(W%N zJS7qQ?JmnYIN|(BF5n?J=?ORlc*cx#-+CR~tK(De`;tw5^_d?IqF0ZqEIPX^ zs=~(WmCJLPhywLgxhxDzUCGez%NZ3qE=$*JNO8ht!{lU2id`StCfW@Kf0-n!tDHHx z#3!3HN4)R|xv}#s*>}UN&Ij#++Zq)_o0*e|LXb8`Z2^JFLd%MO#8o>{0wg9tItS^Q z>bteE66>52u(a&&X0lRzd1*D4maoRz%Br0fY$kee-~IQ;2S5A~IYQ!(Bk!OyWrJ2rG{b)IUDE>x=Hk%7gI+AgqDAaEpCdv~ z**ybo=}>H}GZC6cCVHWR=tVipYb>g`7S|S5)G=R+ci;Olo86lZ&Boyavp#V^D7#!S z#`q^@`c+$A!#){zI^>>v0kj^G-?}4OI2-rkyRvx#;_3Mb1wPpc=qvA_r_&VKsqZtC za{XQ}25|G5PO8#uV~>E7dPX|ro?!mV?s$@@)I+h!Cp~-e6rV!rDlYZM>R1on2n7B@ z-JQZ9^y2bT^aK)!U_&#A8DtM-OZP9h?Y8*EpZbY7sW!Z?c4R}p<`6z;+Aeg)_zNB+ za|gpgG^i7G;4E9`Se}JL+TcuU{g)xY{;(RHBm1Efwp2oSJL2?cc#K4dehmDRW~ZFsA}U)&JSj>U2#xb+KDTMAs$rBL_Fm@3d_h(unTl67 z71oE=*n9ac)6=u^W81E?Ab6*Z?VaRytDpWi#|UOwwB?kQwDSo%x;M=v3oMXh@W;pe zyRfctM$&1MF#`78=!}~W>BHf>nK!_4-h{c^FMa_#qpcsJl?2HFLYEe?ID@x%<3fzGGr;dwBrsVM4apOF*b2+%;!wQLpxm{j^h8{-|1 z7&<-k-tDVk4NgEX+yqWibl>&H&Q@O6gbL8hxRo6h=>t->=m8kttDJJrWF*1xbTzph z@}{s(?^EtaxY8LfC~(~~ zJ?SVz(xT@K2IY6c3wupml?V9eycy7$#4F{(a$W-O=zlrhbxOODC}n+!7iIOX&_Sip zhs7R}27kLf-ie7R1|P>1R|Y^C^aLI`$X@D_q2TUc#&K^vWd~>*YDijTI^#q?PP)x$ zcHuUu-rgv@ao_RX_bIB`Yn!+x;2Ag0};_3D9N)`B9v%Vu#Flkw3Meyr<2_ zFXqT%uC#xnltc0Q!-XlenT#Tz%ue$%h zxEUuKEv+w$du!1dQU85+MAS31#bjr*us`am1%pR7oB z`E1#`XtSpN2E4M2scq^@caULkNE%hYfs-rsZWMf}H{%J=W^>y<&XC%;&wSz&v2=MM zjvYMe%?RvtIb3YKK*Sp7h=&L*(i(PhU7!JT+w*I|1zP0+`#7n-yM>kg8;FI znE>Wq_5l#&)3c!aYFLK7=S%dNFyQ3Au}#sX8^=ye#P9#vAH_RA_;6fOO7Lg=kmiEE zB&SaGluwZCH!uc6U?>D1i2jyO4Mo273Op#1ave9oL3I`HsXJi^-xgz6>^gL}+zWum zj9g6CcV|TSD4WLslorFpd;0&>iTr-{K)uo^N#<8xOtdPqA6~Ubd4eG?_*Fdf-a4vh zY=o}>CCG?4o$%%afUFOEQw`vm(kBecQPGqw!eNuqKl*;llEGd2`fC5*@W$82V^2IB z@B6^tI9F4kSqluA3#sf{z>g7?nP|xWR;0Wu>n`OO^?kwBQXoEQC7Aw87>-Hl1SjmW zk%?pYflD9v__335?D&zGo1Bvy*NYE*v zmBncUJ@pyv1C&7es`J3JTnh#KZR8*)C5Nq>+_5Qy31aLxG$B1UhA|LAm2o3gg5e4l z?^qb_UBB36;o}p1cAIf+VKK)~MdP|C_{zey7!&U&XC`GkS@==vH-yiT`T!o*UAH+| z2k<6PCh@&e7XtcX`>M9WeKw)HqkgA_mG(^u`OF3Ryx9SIl=j&R0rcz>OeMIrsu|Me z)Zlih#PLg80c_e);ihND8~)xsA?uL3UE|#LDxNE{k@e5_wf?5DI8Pd644B+|;#u|* z9K|i!AL)W*$qUPXCmD56op`hj7d<$c%63KlMRxse`D6fHC3B3i@g6zOML3ezot|ix zW9Qgx@MP?ZGUyjGao7^<2D&17Wb*@jNgAn*kO(p1&lnM#lO|Xw!oBeKgt=g2`)M=D z&*U@30N*DC^fsN(X;jvj>dfbO>VkgKAYV?SeA@E(ezT9#0P#U4|1+D*g)jI zK;@C};uG1_UlcG+n5;*x;ezEyI_Xoyi;eNPAUKbdPh4x$DvN=bGhXG5Jr`||BtMUQ$-Zr<$5Pn|W%$qWhIJ1ZmoRJCI0Z~x$nf_MQ-E3!szT$E` zNA)2`wab#vTv#Gl&|tgT8o_Wn{S0gGM>=%Cfrdb~VjKAtJ^PZ6;cfiEuC9yHDT@h;lzXnvErjO2Eh>P{L6aZ+TLIBPd3tgkyYjeSB6O2pOQ! zx%fB#_BY~f|K{EPjxfh!^;UO6?WUE0ARozwuO$YtDwuB&H`O6wfRl2V9H;{V13{8^ zkV3jg+{8JBu`(AO6VjPpib@_P`%wZ`JMs-SjzwEt*^t8^oRpMy6=7^iN?|6QKgyq+ z8j~~Fj>Ux~)qTstfWZ6677-MfBhW(SKs(CXkkNs@y|tYCR|=%Z|`gqw8;D$so`O zRMp@U6etG+D*k>dNu*>C-*;d9t6%u#c=!9>8RIJZ)|=17^!$uFZmb49b@Es|_nDs) zle05&re~wmZ25Sqwyx7N6VVtGJtWYLRy%nqdA86J@CFHr&Hy$tE)2AbF37oq9Ut&7 zO2sp;Sb-?oS}a0LPjM z1uHwRL{A{r!Mi$9#7i1L9Ea|PoL-z3Iq%!so1P@xfI1b|K)1gTc1y#W%~y&JG^e5d(_hl)RZfb!V6K+Kq=E zMr06ugrDl+Bk!OY4y@jhhi6fLk4L)ZAhqtrcj{kv4A^)}Hik#2N7}v2mkpEcgZYj9 z(ydat!%cqWrLoHiGL;7&rPDK5(W(GEWTEmu4O`;lqm9Vx%;1-Di({Gjvtg}+1>`eb z)PST713{%7TGuo0v(AG~c1fS*RhSBHu(=^l^9Q5N7_*FFB!;Adn-AqP*fkvQMk5}1A}Bij{z&^7f6dv%@8$wqPl zg;)CO86D#t9cu!rV7%2P<+m6UpPuv(tQx8h4lom*th622dC3tSfaVC!10BW@slvD% zF14p>X9-lDHj3azoZZIAnHzID4#@GEYFmLV*jQ)slFx4Bs%`Ak z<~s0Iru32a583K=_jdJDC(F6YbjL$K?ljVDFr+s;5ZS7_Qu8X!Cy)}18x*NN(xwfY3O7XkJfBBt!E7#u-6jV;d z8kZgbzsZ76@xz}+5d+xKWxo$SlU~9{f^jo`8SrTdpleZ{AldC8Nh%fwpjOc zN}e9jQOg$Zblr8kuIFOEYj7Ii0k;JB4qm*ga>js}zZ))_kZ>!UV&{lcARm6ZVTxaM z0Ra4lCz+O%B|XZiI@;$@I^N}EEH|ZWS2cGpI6r+APnyHO9AE^`7N0bknD7K)tD}x1 zFa_Vcl6Kl|vsefW3=bZfmRS!LWYz@$e528_gYtj_aMW?F3jmZb`xxqbu+x&kA^H=X zwB+xl`kr!BF_l4`__imRh~v516KIn3$huL2X;ADQoX?PWRvP6yz>7Ep(F|H%?^i%@ z^d)IU5CzDr|5?`>D!e`aAbH@vgL1X)}!8MMr=_C~r3+#x!}Toqq!J_`W$mqSQgimlRp#f@k4Ux7HP zn6{4mrF_woD$jV<(JC z_gZJJu*vu^t3+JB(4Shrg1kblv)eNKUV>$cI#+pB8snz-ritFmR)d35DaD?AS^ovg zHiQ%K9G_+Em7lmfjp->_Q*ssYUm zr^pLdpRm_glg?t#C`0L)5YGCeFW@9!Nqw}{Wlr86;6?hlAga#DO3kZOV}wB_>{#<)TQF)c0+-Mdl}Ncc!4wk z^&b8qPYxUh2 z)lnKYvT(8t{a^av9=}m_m>foC>@EmT()-gxq}4tC1oqpfs_CWasp1%qabHL88SJNj zf}hM{78d2t96XRdv+%#;)S?)=?Tj_pAlGFUH9NOxJ*eGnyt1s zW*j(hFizieGG?adV*cPvOwUY-#=Gi&tjEgwc67JYhN&X>GsEaCx+>iA^P?><_7&y) z4!XjB#)a0GRi6Cz#~*yqV@d~S=IqzF&n91qMUS-kjn0_rp8h+u-|!;uo$QaO&cqF? zNQ*l1!^IDK0Y0V(?C~0t!2+Nh7Z4*Xm-)WhFXhsEo<(!<5?R<%o6TmN-7)o-nyYcs zt@H8j54=Bq|4;6Ti^|iIPP0hRpQIB=K4rFPT@-Y{Lmu6uGRQdNvy8VfsRG`XN7C!c zGqOaedJp}`PsloDef^VPWL)*DH1M+$IxTi3!KEJ71yq8~`?>(224MJEnX-)3?*V;= zv6UG}R*F<8WrHY&@M*!g877mLyiZ$YU&HYC^0?n1+Ss0{e)MY>mRHmlk2r96F5dKI zuaC>im*U-j_HHi@K`uOzTgC>m4(NyAOJ3md9!$wB8l^JHgFW`*8wKct^&;&EW9!l- z?{ZXq_@rE8gp&qEKhcadOptLLJaV+wlWvqgIFnn9BLKU*#<%4KPx20reTpJ|E!qs> zKywVg>CXTY()hjp5$}Gy`tyu$`jpg>NnyC{%{jZRoV+%_6uc>*`lW)4-C*%DaG1DY zOb#|It3OBi{`{b16BDRpeg`Oi+>h$&lRcq_e$XJ>I?97*dM|uQi(Rvr2?zQ_h#JrI zA3ZnGYQ-yF@e1+oi(_T!T28{cVPV=0yi=~KtXDZ4p&zK4U~8eR>skexApM8u;6-WV z^H|chxNO|=)HnDD|Fe$ZkDX0ELPOJCGDsZbH_(iCRMID)=pKBx=}m&8ew$l-LS;Li zx^yvCR@WqpoRGR2>$0_*vRMb`566M|Suf7w#6tR?oRSP2aJHV8d*}yV+5S*>{0fzg z4N@BG3dqw7R~qD$^nKCI>`;FA?eS9JD2h6H;*4k2-!vN9(_aGR>3`aw5^$aQjFjz# zLY7O~qF)REh?e%v1h?XjZ=0W$%(~@COu)+b=o>rZlULvey;VoqOUgp#X(uhO`jOu4 zGh+$r>n0D&;s7i|OYIEvEYMNG_EbgMzDsBGYDy?xKiYNS*i-w}?XG$(p2Yz^H9$D^ zx*S>G747KL7^LwYL5CzoP2D$yC-TfF3~2yQJEc24^FyE1UvH^h?zBA*N89HGF3Jnu zx^cg*)nMX(%X4o{`J}{qk-Tob;D4+YW9_EtKM)zcLQJ^mVb=AB~>m27d~h z+>_p7)BOoX(GVR2d3-hXrlQSa@tt0buU(4y-Q}3>Jrx@dy+6M3b`E$9zXg^+RUNOT9O~)ds+0WNKgk7xiax!F56D zQx0lAES0X<4&r5>$e&{gyhz#*GG7T&EaHdV7vYp?;UjwD6{=O#d&%MErC3>hDo!6C ziMM>i7ss35@G_aV3$e8Dr10$K*f|ZX>3Ao<{UM-e$#)91{rvlNg>zmqK7fQFaF1ITn;-SurxiH;ov^6UWEq$pI7V zT@hUEAn`6&0Sy?21YkPKgh6(2a@XY8_=I@KcQp^m@F?fdL=2D5$G`oR-;Q7Zjo*tW z9^H-AMe#@yE_7m3j6m3dtP(_a8CwcbpuLelCI?%}DSr#5jdFB`1eBFdENmbbod*~U z-r&P~Gu1|ucR1)$kS!+vb2l|k(CUUdSAsC8yKbaYJw?lN6kk^yPkNbm3sczKQM>Nv968~UuR zbmP*+Yjzl!Wa_CixxSI#bjH!)l!+NShr;K=l?!%MC&p*u?z``frNwJ;=G2LpnwyQ= zZ$BHadey7q>@By&(G$nw#?yS0zZ#Ar+=>2PsLU9IqI}j`g~59n0uNqNhvYvrK_L?) zQpBBPK*p&au|tQ_4WeG5s1AjeZiuA|HVPPseYkKGs{~y32X_UBB?r`5JTfHt^om#o zcizP}u$9I|w=Ws!uE+9~wRF4yS3s!0)xfN(Q?s_d=x>L2d&@o|a(Rt!hOdgo#BIdN z+OqIeBcXDAyteeYBRK>nvUXs0+KXhSC&t|wIdCX<*3K%P$^Gf61ID1E4*vM`jO=gO z0i}R;%gmN1!aif?z%#$3xtdxjY92wH176CjC{|N4SqquaEjlPPK*$9H{TR6&IqGRH zQmc0vxU?a}(91lJU~pl9{+i(Fm<-E?a7;T55gl?G12ASMU_L8=9x$vARjv{i{B`4| zL^xxO(T+QO!%PHu&?@U+kILKYTyVwuv0FT0z|*vIz$3qSEC<|n#toI~tM|wR!8?du zE4`ty6h8SW4afK|4uoNrfsC}Ni32vMuHZ#%O;5==^h|vxU8c>63}BXLy-&C}vxzUy zz_{+c((m09pSIi%LdjEc1x_|tN@tt8-_GW{8pkj13tdZKVN~K!VP?9TpN5}zb;WA! zobSrZV{r?1%(5VYp!et)9k`sB#c@qskORu;Gx=fwa{Of1Ryk%HeNw|iUQWW;+~7E& zJfVPl-qR6rM@7yj9bUe}hy0*pw42BRa^OJ*mBr$LJh7w`ZRz6p#IzShu|k%evz)lX zky@iIwYh@hojuJl;Z@~9!48I=ZO?c{xuh@RVTL?D`}!c7Ne25X3*H%kO<%f}_ZP2R zj^%{~uSO^S+_`h{2bfN7@zA}(VKpq(AWO9;SMXj+>(Vq@+ ze$&=%mD*Rc582R_K4roZE%~`z22=RnqIe))LR;2Q;KYwLhy9tC>B)&Wbof9VJ}?(k zlQS_RhZnle&mW3|bBEO#=tNI)h5h^Gx4k|7?7e?28b#c3a?W{IR+qes**nmS7lFfx z4rV}x3muVnt7yte#UFsklLz3r7QM2;`&%Jx6*@smcgm#u+&ehG0{UPthfv@NzAt3a z)y}AIYHqcgml5GZS=DY(wnMQI#h&?Af6xu$d{r3mNBr<{kCH)FhWovK z3|L-UTJ|6^e%P+?^&(Q0H^VM85%u$5_`-Pm+us&jk3JT?#RWTcBZA2h=brEoOnk@v zbfg2S6slXbPy6swfM2%d1%KZoP{NJfao&jw%_V>AtfOPgz>V8~bPJhO+rtt6$TYh^ zd>WQy44cA{Xdby>#W>~GwnXi8?fej`P2=3ZUFkx1bJe<%9byK-olO3R->cW6yT)gA){GmU=5o8C_nbb`ZjH*f6g@=^KAY3gkAV&Kej$h;s}tNPb;2&- z3*wtVQI63vbIqQ@RVEHY4ELcHcTRr#$qaXDFXs4Anur0Z&C(4C|x>j{q9=y z*7Fk*q%qwJe`;_Z!L2ydv;_2?jwsIz%b8Z#Ht=H*PGlB$OCI!25!MC4)DIkGnW>QB z5r}vDt~iUnUUOr-NC(Mio4E?W);9Pu;9xzjai;B;ai6sNTU|e20BX*bWhz(S0#pA)RKTbGW_Bfiz>Eb z*#Amf*GIaV{SP|RdRCeCzlz^d;X!lzI_d|e2}?P_AE*ALPk7*GMDz^*&;i|39wjJE z;_D>FDfwK>OSs@eKsKsO=5rmWpl*soCj3VF#0_{V6ZX#eRAvntXZeT1-yvIUSvMx zN$_fWQQ*|tR=(G^&$RZnL=<{bl;H_)WQb!aU9RM%mJb}{C9ewm-eZAl$n=8AYuj-F z7g6jp;kVa1@hcOn)Xg@Rcgl!g$Urjk4-5PZ?o41=4vJos_B45!IJ>-p7mIelhG_-@ z@O)yK`cQlhk57Ft4j^XR=d;dYU&*VVd8w=Zm`C*0GZgw4JBGY*6}G;YrPnj~@5H+L zbJ&cg{6H^&7SKU^eVzVfnS{LPy_Gw8pCRjr{Q)<(qpGg+tE?(_&O>m1La7t%gD>qMx&;i{PsR@V#ec_VX5v|xc-(Ka)%F;{{GlT;>(4B)J$&>3>XVmaAZ&G7eAJ)duZ3Rj}a`BxnoyiBZU!WryGXkUT;z36>hc0#5Q@dPMsr z7>I(unGNHye+Usa7zT*;@M?E>Ga6Gv(cQTkXU?9Ck3IP5_;Xs}fVdOTFhtegeWq>pM4IH00S1+4x#GFtXPpyXc~xfBWmo=wP6%ebYX3ng z{MU7GNO}zX1h+FpkRJF&?rlf8DY#ZWQ#}l-w6El`tteq&k4ri3^MD^|_O*-qgK=X4 zsBudDAGCi1J0Uu$ZcGH3o}(?>=f#!f;I!KtZ@wws^oG~T3iadezx;p~qcFaSd@(-2 z_#3*wNr5ArjwL@Dez-j^1MZZIwW9CNv*|9Jx$?08TCu~}kEwBDgz6M5-ZFy6byn77&_ zk;`)TcLih>Q^rCF7CW$*2)Nkq>aTbMi*Nyl8jwWXT`0%m7{*e;;SFE@CGmzYdtE&7 z=%Y@@dCo6u-lG zn(3YKH4QRZ;SS&^o;y+5wzkpic>D~#CJl28$P==`2CtYR z8oDw(QcnnGp$QAnvcBNMGxjCnz*olWkSFPd=)g&KUXUZ-j5drDYP?{MF#u7uPd}eN zn9>c)t~4ZTENx>*lEnb@h^U}XEOGsJJ!)ePydVCiU`qd?EpchP zQy$p}jKZ0R%HxI8stf$*89m^$;jn>8cIK{u$Pl`}uR%;u9Z#B)@Q_tn9X9Yqae#T+jOy8S_|>(t81b@Q1qo z6rg?5UQDiEiEBC)U-u$Z5@rBPl8jDYUDq3+xxGA4cE-8Ez4=k^OV||ye zYuQu%aMMyxnV)2VNXH8i_?)sh=g&Wlv}dC;aUh;}d?|kWxBevl{cnHRcuvR{!`H!A zV(g1?BTn0X`Kw+LGjsFt$Rm#!fBesOhZEGZervt1$AMXpxUs?J=Ji3XpiL8= zHQ-KQQoT^$K@o0}#h22yTVD3bj)VP)lr@Lio>b5Rn*BA=oku$|3N}1?MqqqZIl4kC zM4BtG@TSaH^*y#EZ#yb)c(^Cti9_8>v2^wEI6l7}U-#x$#J9cWb+OZXDweK1p>ou= zh_55EkFs3g|sDKj>lvi;+i@e_YQsE58Ql@65Q!YeMub^MTy%| zI*gxpp-X`QPV3(V4&}rs3k4RA6xAg}7&LGMO) zgGN{l`bgV!WAcAvvm31;8YdYs3Q@TnJG#2C99J(tEV^&TTfY8l;|Rx~iAOh{eL>8e zx~aVDmf!7 zF*!LdBf2elP=|10I$r(iSG$f&ON(Vff6>QNG11JfGA6TerlEmPV9>v|3>FP;r_+qt z>HMy9hsIpE%}h_l9d|q^v1zAaG9UWWa9E&}$5u!>mc|@I3>~=U zAWhW)XycuA=*QNEzx&k}e+f)1;E*9l)Zgh8tjD^#Q4<}JD+aIaX!2gNLNKkNuVeQD zb<5Kjz$sC6+MP0?CgsI`>N=16}RpV?!nHG|s-Iypg``UUl%W3JwBo(Ud$>f?=Cx z$tN5iWdCo6w4a2hUn^y)|NgwS9;Tb`^;8=4BzMxmv?@^QGi?LP*dn$p9?0#8%R|aT3MXi7<{nV?YUhN~>)q3{U0zz36W{10RK#^Si^B ze5AvHv&vu{={$I^ckX1Wj4gkz0DDcRUNjdB>=OIlzEr)k3Q28nwgmi zzRTa9ly1ubWM>g=D85L-$aa_?W$-O>MAo~B6nrcfE6T@MAzHqzB1y(c zPT*!Nv&cvtk;@k^$Sy6#rOOw+IAC=(KS8j(D2GwDj31vIVbFr|Tz@*!0*$?6(H+c7@-ON0X(d2X39#nKYP+%^+9QD z56~~tkZ*rF1!TzZk}jwsdPyMVAW#=wT(0VyA@!x&70TNOS&tP$#?}Fs!2*Edu%a5S5 z>$Y9#Y5)A`kuA|cde0GfbPC*oa(isUB6Ou77#SJ}1?tQXUl^NR>6tcDImBD|ooxv3 z!5xXBae~?+fj=a-DB|+9F&G%>QlnUcO;;A-uZrDp+o{+1 z76i9>3h0~hBfsS_Z6JQJJESF4e~h#)Tji3@agJjo}eu{stZy5|> zmnA0v9FU(oHZu`?0*mhuZkGjHIk~4iva!A@FIRLAjNtw5_NHWq`Xe{OL+@Bt+A%Ju|ZfyJ=uvn>E1)Lp^UH`~_a<*?H8jGaodQM&S1 z8VV8WivuyBrR2|jDFxzymi|cLgKAhEuX-0bt-%Z+M=Yl7rcO9_3w%vV$*85i3jhbSCwzbw8~;zfXfu$YCX7C{tiNw zw<0IoqY1y#mqEz+LkE4@9KMcuAiU}cQ|O1~9lU{q9I6!Hx;{n!&|#-9&%Eoa?eoG_ z)wjFe_0i_goqj%gXnB!Bn>S@gUj0-TGLj}R@wR_pT`lE`uURkdh$?st%CDj1Cy#kr z@(uJSgX5@^>R!Wu)fA&ppPlZr=wF6fp6Vi0Oh4U&SBgji6Xo;jIuWo3;8ykU>4g?O zRN26*uZc?#DQCz$&(;y%jff|rbxAZR!+3g}A?4R{1PBA>27Tu~>w}*_^J88ix5CGL z6SotJk}dI0NBW%j?&3Sg6#H`oOwf%sEjLX152^1n+H6bICG!GG6)o2)iyiieAY(_= zm2s^+Rz5#Hfc;@29h~hytBqhx2ifc4pD3Oa0+3~^nP6_|-B`QriTs%ZbJ3X^S4H5T z;>S9Mv%Zq23N9~gcq0O*i>)lIcwC6Z2G~*WzNFpnwBp!_({c2M6LIwD;kfyxn>@jf zU(-`x!QcE>*(=wUqp7-cQsIc0&jLp_9re~X;@ZV4(bBt4t0TJg{n;aeKmDaZ{mhpn zzvv3@k{_mn?Y;OY9yzUOge-Yc9r*@9G=x9WpKSliJ0XbuBCT~EnkpUgQOBS3)@5K% zp(Hz}cZRyroF0wd?oymOdpsV$^ho^D|M{QdDWzbN6Wa5=Xkc%OX4qQ-bp$QTihf{0 z`g)|YEy8r_Ya z$IsyLi5pMF>%RCkqW4x@zI-_s+{>1aD1TS|TjZYrP2KkpU4UbL2uDPR^5F~Rxn1H~ z{5ksd;N=BQ=(%*7F+kv~^GQukaDtwKM-nFgdES(rrwGRKm_#H!{cXZ1bx|34np5^4 ze3BD1U_m19lo^}}e%?n97RK~?Sx#$2zNqTqO%HC<=$|wBr@V@Vfa6F&{D|wz zNF{|5DAY$IvqzK1KR zIu)MKpG00si+UMPscWVKciNERuegVv@_zo?%GMZKd-_1iffOAQ|(? zVw4csM!N8Q4Umai$%JT~3&j(@b>1{lKUH*P4g$Heyy{0BCo}^SI5~%w<3py;7AF8; z>s7DbmOnw=<223nHPJ@6iula=Mqhn6k;J?cQM_PCdccNA+Cx5@iyh$ee4Mt+^H_5v z))pCKTk)o&9kt(l28RA1bk2dJhbH$sf6eGxs7 zA2z-+`Ac3T0*1kB^HXWi-x28mdTQR7=B0g*41fnRHf3_`M&3>|Y%LYe2Gble{{tMm^ zH@x<%V_|1Jdc$q;RDvb``*c-R#xz$tKG}q=@WM<{cTBt-R=e0)Ux-A{w;Co{NKd*&XuHHsJ(zMjK?9==mE4AEhJs0pHDQ_D^eq8UO3tsv;Spz z!$%bTMjNxy7(WmfF0RCH|K|UU-~7G5a{To8nD+<(b5-&_m8zqxh9`qe%J_2cSv)Kzk0IJPG>jot`PLGeBBbK_b1b)<27LL!Hx8}YV@^c9Q` zvLSSA>x{)9&SHhm?(E(>^u0fD%RP#{6Y2u9(cJOCY(s{C0e7#0lESf5kBNU9B347H zu(k+-(BpWj&dQ@7v{ix9c^OX(X+aL)2rCN!s56X05~O4p2o?{J4mf=^xHSDdV|)m# zWZq`bi9t?v`X(k1#{A(MVz+ZlZza3+T|zwZR5ZAi+9DR?|&#BedJs$FSA=- zf*}FzZX$RpTA&#(Q}s+36(@rTu|%FCP(F6ecNjtb<`V?vPFP#mXok`iR%0O$Y`U_* zsXkNz*y^u%B`>?h5E@o}O^#2+*L?jK#!H@gL)`zF54&Msk#eoe31~F<4yzmnCQyWE zLyePHW{ch!sLuFkOh}n`x@&RysmpQf@S%9&3qCiFoxCYd-};=GI(b$|9a8ynU}$7m z?Tb7(pimgrbYMhW4<38vimk5*LMqXn1ptdz7rg-B;m4kg>B%YKCudZ+Y{{913gZ*w zF+DXQL&T9N8c%*E>O{=U%*A*)5`HfX5PVwr zyW(qyqYRvENjW4V-r-h*j8pFF7pC09XUV&jPjZ>7wey+DdPG$^E52gLB!lL#lp0WS zw1@2^iH|6oXh(;UvN`F1Z^EH7MLzx~Ao6W{2* zc=?jz7iDN!H7ExGUh7|bqA9(~Z(C2xpyPCMeAmH)hegLTex911h%t47>A>*4V(;>^ zL_)VpI|-wIHK-B{YM=;1Iq;kGBt4UH$bjAz8v6)wXoeDPmYugdEV4=X&;XsqzM2>~r&}T@E0JtI_7*+Her;lPNr>^S8UMaH*Kt}}Mb^^nJP2#;^+SsbDlv_G7wa)pe z0^$lDx*9(kbtQ3!m*_H~=Y>V~54wveWZSamiyq8&fwgnqC*Bio4t;OuZb_Xa1O@>@LV$&Zkr6@&WC;X9AdtrfkJDg4LO5Z9 zjg2iZV+J$E7z`M-5QrqTx|JG*4(f2#>GlmL?Hs9(H?lUzdGl$Jg$KLrZOkQ zXoET;pNUfMOAnwG9XZ4cTxG&FZx?k=I^Ep;tRuFFjm}$o;FWrufJFdip{vh;PLO8f zm@Dl`V;~jzm(J9q;M*p@2`8@VRva$uw)va{a8hiNBQ}w3^w#w0go_nL@Z7LhlK`{W zjJ4I3wzjcegTC(|dyKr#$9ClS!8k|!<~$vM5V`Nn`M(sTGkBfs zVe57>xZOY8Yr8vjZ0+9OLEaYF+dgQ!dq-`1i|g)gCdD3X-NT`@pWdYpiC~c5x7~^c z{yaIxlV$W5iYC6fB+v92ieK7KcM5xCwkI%{PSNo9_@J66$#=PTAGQ``fji7-n!AmE zAln{(XrVpv$ZZDT_j6pDN7ZEmfP;B5=6F^IPzty5CY}1kdnzzsTO;40AyBnf@(dYN zK1TlB*BvlRXDVQm0>ixiluOdRD#wHWgGa9q z0o;f^da})3bj^FiX*iE7n7EXXLh&{3W@?Brs4K<=oIZ%%rTV>>n zN9Y%N&emX48>S6Z28!;;2F;DEC_mDM-{@=Yqx4XmD2 z1QAgGlI8@4={51{ZsLN&;+^<|3kSooaZx#bvg;+Gw{32jXZ{#x!M3gOH!{J z%JloRRbKZPNGmIK#5DtT2HIC2y3y9IFjiS!hn(~A2OU_8m;JV7?Y}aKr~gRN&<(w6 zzA*zY+}H#sGHIfRnm#1`{$V?#U(hf0Qvk}|1b=*jO*G^PJA(e0VTOLlppD7mJo;Cn zU-gKGAp_j#{IE?$i&`)!ekY(WX3H%&A>Hv|{8oMWO{E{!HGHV>C{B%`@Ei11p1oBL ztA-BT){~&0_v+B?Y7gQzjFW zkZtOP4Kj-IkuB_=AI`slKI?H4HNn!4Sgd=h1s{OuEVDP2r0(*GGE57vdvB*_h-l*n5~p77yuniKRlx=R`TnE1KCkzUai^?+ybOZ$i$ zwoUoTxKVpdJ!yls5<7RWzn@dxv=`!S|1d)@YW~QN{8ayfbG5hRfjWz5glfl_Xmk8i zq1rk8i7_!ZLSavc+)B;jU!52-V3RLxIe!jLUM@t9pbj|EDV*V7Xh#h$jE@kW!>{lI zS{h=ntml-mu6X$78KK5c(z5fnhoXr$=lX4nvfJ2uH^Fo;$oT`1aoo`zLHgIzv?JzC`Ui$@wl;R_^rNCxW3ZgB*v#6pR5oMaqun1 zL;4vfgZB@Q@yqw=Gr>cl$evhF-h){98Fi>`wdCXFt>K z-Fdm~G9Ga3vQOW8gx{q6I);AeAwL!HM0@L--rSyk?5Xznw1q$I#P79_Q5(55C4n zzTm;od;4KGXH{Cs2Tb9r{2&X^$Y+f}`4iP%X_x!t?Z3oAf9Xa&$H3P2TH2UthbQ;j zGjDsmZSCD}f9iYxtM+NK2m!C8tuL*JLEE7%>U`+xLM;IEBd@S^^ITMMW2kL6IHwhj z^+hCQEpro=>1`oa9B~6nc7}cH{YlAQwY?&gCc|s5J^!#@_CuEI{rD6{*Bc>EL*mZ$ zyh6dv{E77aLP1<8TmD4NtBiU{nBtK@d&^(O-^G9OvlK3}KX|YOt!CTJ+mE(i|24lZ zpSN(%P5;QvB(ij$ai^cWOI^ZLU!kj?b{Y%5E{IYw*Rs2_3$;;vYyeQs6Hi=ZPTeJb z(BlmMQCN<7)jjiCPCMnseorFsWY2BJ(jK4fsj&Nu56tf=hJHdfuovo$ROQ+7HE&(8 zq>Fj@pL^?ZG0iZKzj|yR976@UX7q=Z9PL;wawmYVuw2x2-o#Bx*5?Ua_JeaSG?LdB zA2QcZ9@`Qgg)ZnEZPKpfgkRsnbv)hQpwwSaJIF7c46Yx<$`P;Ys}8A?p6chD@L&6z zeq(#x<4@4QYGq3vG~|zSXbDIYzTSbv<1W_-6wIaj_p@=LsZ^J+zyrZUETb&K&Cd zfAWLvCw{6nGdO>}y5@^qtC`R7g^JC~SKCuhJkmb*xzD#d_g`rbKm2gJ{?Nmj2XS+^ z5>@@SymDTIwgg{!Rk9UZz;%{>UmZI=EI-M4FXv$Ohs9k!W+50km|%M{2BHn8C+GSa zd5~t@5C21688Q!9$H`}136j~lG8euqn|YUR$A@gl2W{ff zKJ~cZg5457)ANJ2%A<4i&eJ%)3jX?6v?YD6V`}WBo3rg7J>5^NeYW7wa$L0ehT zw&hLzJ?L>`MbL`Tn68r#wJ0ytTahIiaTU*G{ty)pkd zQP%QaFY?56pFQc&lOMcaTvY!V*3!q9KS)7B`cGKx)KUCg>`VB4dH~&_#igZe0C@V@ zN82Jk%;CXtyL|JZ_R#Iyf$Ph?`bW;sxoODHtYQhlTf~;Zd$E1;Q=iL1_OJY!KazIk z)8Nj3la?3uM8FLI^UE1`ufOLj+cUrMceUNct8H&~72EC_=@RsI4b0R0kZ0u3x*;4I z;3i6j`S`dC$neVU{kAdLZR-c0ZAYK{k@ls}Ewpd_4R3GvU;MeYaP$(`)|==xDPDA} zjfmbm0+?eqz8*gNe0efp)tZC~D6 zjg7O}GBZ3x_J*Dp&P2f%`!%oH8tZr6!cew{&~1h7s(xO*2FA3{=b8X~WwLqNu5>87 zknyNjH*qMh{G2O;9g)|}o7ulnjrc?-Y9o&POB<9&>|d=vP^~(nekGJ{ z7`%=#RpAfs`4@TUbtNxE9c6^Aj(K%{Iy@i*mQ`bmLc&gyoQCBm0KW6Po_Rm!;ayJr zTE=52oCt6yWeg~WZiT_8ovt0;BEq$}1UeXiC5;jtl$}31))oNZ)$+(=;4M&bHUWsN zSBXK*8viI5j4s57aUcp+jWGe^DdEU#_pirV$Jj6co?oMr1Gkk+;JFM#FuG2dVmQ|~ zHo+HWf$#C&VY_#CyWPF_Ajg!w^vWwa#`u-{_uBmj9<4Bs{ahlMXyD+unKuARZfBU2*KcT?D#)Vt@v-aaZ^$TrxkFpq-tzCDN zkr!tOpS1msul?oi@yAve2-NXZcGd30uD3HwsjSBn7j$HGZkCrZbPT3c-mNe{SEDjXTP{zef&)@;4-)}NJr2lc?QU6gXVD#<;szU5yCRssJeehm+r8L zcn4OiYggKz{x80-{n)>JeSNHJ3vbqT>d$_L$&Jxl5# zT?d6Q^z{O-p@cU6F8K20KT?aM&;_|D_PXP%r!^tL%-G4^~jC}?#%Cj z6I`$q+iE8WoGtMz4Q(p{ea%~3zAa|Ez>h}9;fXQe(e~;QSka+6pe>)pgCF!;*Zxd=|h6 z+JYFt=>V|m^?BqqC?9$59YyLfzdE4f7h3~g}eeo6D% z$;^|3TCQV)E$#K$W&~jIJsdKb4%GBa_8X7gHmmeckgi@4B$9|9<$KU^lUAfuz1;Q0 zi}!}YW3;RC*Er%l-Vk`U4Z3Pw#Uy!6Qy;jW?g}K;{5mSqNyUe=n7nflt2K_|B+Prf) zCkVJ1Z3#Q$8}WW(!0$k9ZeFIZS;-*PGKRcy7Ty#SLqq9+O>od3J)kBx4bl-}+Cj$r z(POAU|JvxE@tosW+xJZl#YGvV58mGHxYuSW;d`vrpu+)e8k-`#y1BqpP;_EF65XpC zsGR&0re*woUQO84_wEOU)S*w&k4Px-J*{Ng{AS>falQWwjJliBH&^W-FuM9D z1ProM^->?#UL^8V!3FMu5iF2p(HL*`1~0;x097%Qq8`LXDP6``sLGT|ijU z!Dl~@-%tE- zlm(sV7@Uf?3;(+PE^td5aX8BrIOvl{g+(u@FYTml@uO$72jEUV;~nsbk0kSZ`{H$` z4iYB#?bA=FP(F4l{ic0ASN$PE_HWRRqPO;ntNiD8LLFIrRvx`dg9Pmh@Q54z>oj;z zyzH188_ujRfC*lOWd`bS5=y%kdmJRHJ;6!l3%}z&$*p*rn_EdDpMC znWfD(gWYt3$~50%itR#Pq??;b$E1~?z;HBPI4W22sQBVUfdp_TyAw_k};gER$~VQ;)AuRHZC$bbC^?UXndFN%hi z?Yudy3Dc^NU*vB*Q?D>3Sh!*6^#dp5S08#vIpK?Q>M;~wD-7Ivn(az`;5Tg-@(4cT zsN`%^NO{{P4xRqi;nP?^jlU)w@sU>W0vK7y2o8n+*hDe#nmJwKmO6lkDg_2L@o)GI zwyW1^u(;I&FU{@#!+0|(w$T8*@JpEp+ULG&!LqOw;va~Ib`xEi(SJ-Ed%u(-g>S*H z@_Ab*xT?S8qy3h){aIY5{mPYeQ-7Rjsr-%=&gpT|`6sQDU)G75IDtP@$#ojEmAx#E z4f%S16P|%r=ZAi>K{)z1)euAHTek=LyN3ANwoN-?{`AY#O?-7Xz_)$9?o;w+G6{R) z5%ta$Y%pfKbmel=YZAG9zPyc$E}yr%`g7@Qe_@CZPhKo)%8YNkDm~hNCma3ri$?-0 zC$TsDd<#7GiS|tW*w?hT+9R(C7sfvZC(J6n=rYC|@hiYpJS&*rdby z4DuMg89Wou9#YP{9cc0lJsSAfKjqne)@u(_d9Ik+phph+Z{w6-!*oN5M|O4^ReXmu zX&bc#=RC zy7`6N`bm^iOYLX$Hx>Y$Pk7E%n!dF9W^gh&i&^*|=64#ZHVMq&za6xLqk1W2GF#(I zVXXTpLFnO;vh&!rK`ZAlJfXgEpKd(-F~vIuWrDf);|B)&$PQQzSs_S+}H*Uz&(rO_*5>~s8{-c>YL-R z@=2)(WJ(U?gt|7z`2J%51DUh~gsJ~3ty9M0(5B0-G0Ioj!?GFiMR?W@W`5IPp8yZl zWaTAx2icmO9kjWXvvzoPzrFoEZ*5=pYky_?{onP6+DAU|nRah?yWQE^ZkI1zYL`~m z+g3ZwBBz@~g#^o}3w#8{eO|g!o~d_?)ohz~Tl|OLtB)A@B+VveBeU`bZtAT;y{ph= zFzq9A^vzKEF{Rg-xbM%}kI?MGxF+R+Q=#Z*sD6FKzc!5eUK2VVV?MZ-G1!x_RGHfJv`qlp9>#d8Y0<|r`T5L=J>YS)fd9i14HSr zb;@6|LQnIJ1!!^6hL2!h68s29`;0$N_ESgA-(V`h$??IM=MiW1F0v!sV{ucx5D)d${9~SIX1?<`_^bDQ9s)m6AD!FebAySp zE&qMd%f)wJB+%}uBUF`gE@q0apDYv=X?wuKQhFke_$ub3|H{Td@DuWscVr&4R~}u^@-0yDh>n7{ z_M`Y*{iwg#%Mv?6$*XzQ_q3DTS@5TiRxC5u2t3QBjVWQ!*Pf|2Nb^XG=O=ACwnE#rGrRj+z?*58F0D~)rrmq+a(ndAC)XUj4s}oKN|pOh%dn96M6EVF#6Bvz-Xv& z^#8Pko$lhmgUIE%%7D#W?>_;q{ElF`vY|8xJhfrLQ+o=0^-#Q=v)8{fuXyu6A^kh$ zGA0%i`X66Yj1Ox)$f@!A%c+n2_FHb! zv5vB_ltDUNNcHm!_NO(5lb+~4edx?Y9e2H{pB6xVU-nz2&lHxp1TWg^i7@)S+33a3 z7jXSN@5(CuC30dP=qKL}k9M&Wd)SdfuJ&Wq&pRiqjft%U<|WE4_kL>8#SDB4$DbJw zC+%;Nbj0@tC}Xu>rs|gK2eVKo_wQ+5kVf;0tWyMBc!s9?J%v zibUI`W$C9fguj5#AWK&^7TVLVf4V*O%){_v5?ZZY+iW*(K9YHrBjiFK#_@^tcOKO5 z%R8kpGmlPz#|tmqi|+i|-|z?81J{wEpD)C^FkJKUh56jldglL@zv!2@w|@QaZ##?E z+wRPIJ3N3FOUv-4^KDU|wV(10|82rCGP4K`^SSZ;owj_m)violXosKv@%HBHTkUs! z)tlS=$xCf%@(TU?gYY4J2=**vNc2X&I?o)gNN05~zP@zQexZf%0d2^9=_&1-<+reS z1s`pxefsnF+xLFYf75>aXSZS!H`dnM((*>z+Ij#nCfFNa>|So~`LcJnYuB%5P1H3| zTcO+;tTLarK)wzR>eGvE2z8msIjZzc>LzUszo3UYnhHb5qGfG7)<~I#jP(=C>_^B+ zD0#H46_AG{NE6CnC#WjVu_3b{Abb%tue?ir;GlC!&=-vF14B|&nFLhELgYPTtfg)o z?B0b3_u3=3PTE)g+BdfU@ZbA3^mey>_W2LtyWY#EcRYPcQ4Qp5?9sLe63D>H$qlJr zJgJ9Y*;Q!+epFfQ5m)Ss4;t3Ahw?s7%aF^wI((FsuFl294^4YV-mCQA4{M{+Egc2>yX=uvPT;fN+o})*^ zQ9)fq5MA_lVLS!`A$B2nZXV+;l7rzGs^Q1*hQ43Iw)FLvbaiPD(j?9yK09@{w| z7M)Juvqtw|e;bG0%^~~kaQl9~#p}Y*D=**6qLWATyCCK6&eX%t#WV-9tII3x#*Lf# z=Jf4{A8nggE;G2h+Ad$eibzU7F_s|WB+lb`(BvX*I-feu(N{utC>vzYh5Jmb1Ji4G z*(ZoB8~VU+jfEaz_%2Pz8wb)wLY-Oe^LCh>QutlPY1#3|IwsM2F_& zbqAfnRD%N)kV}>MhA22>VVIP`-x>halQ-;mu4Bk-r;oys&Yg#exBdL=`{XlyS)SEL zGUg`u+2d91faD24~I+ChM`dkAz$2m}bY?Ff{J3#a;ay8*9 zO~+vG|L;%}8|`!txZ}90<7^$GisqkZz@&$s{l zzx=P-hd%ttw!CnbxA`8v@lf7k(r$XvN-fYK8xG=d&a)sa^SYeFmGC^W?X&V2*|mLK zPQS4J3!`MZXy9Nv{J%gFCxg$r{9DqA0OcZbqrJ8`@%6XrA2Ot)n&&=2pL#6NPrNXg zH&^l34(QCvKikyPMmvXm+W$HLanqll`SDEz!-V^J_^NHp1PU~B5axuEiyR`cFb8fl zXC4{SCcW+1r`tQ9zR|wt`~J(;7_7hYV?WxqUwo-uxxCTV7uKMqeVCI&6jEK0q4AB~B+O1znJo;9XarRjQz8iQ>vLVGIG5s#)8Gcv#a_D87#>Sx17hrp00D66LR`5Ix z%YI2aD_t@GA!IX-xWgznOlHC=yjy_`;0ygu(8alBY|x}+$D-yL?M!>5qwTf4fR*FW zTEB`GoTzX3!M%TQ$Q?YvO&XPrx4eUEakG7ZaIyiWyX!smDQ_YXAe4bud7=&FzX@N; z4f(Hj<_LT3oIH|F#`pTeOYs&Q(b1)$cO5t3l^;`i^e?pQR+%{aQ66UO9}H=Oz+B)_ z#vB9!D-(v|YM)4j}8L)Zvp?3Yzn{8pmjmnlK2RzaD%!JT^ zZ=f+a%|sMwvX8dfPVXnk)Tt+vsG~&qR!TMn$ZrPgCyYx@NS_=Y6h2^5yegX&`=uO3 z$N5P&^@H|Y43P!+Mj@KsX(cp}%7zr5hoMr(2!bfM6~^KN4d5qyaG@<(nDS}#>lggF z;1POzFV5oF?PqM3eCqn)vvL+Zs!Zjfi1mt#@;|ko1oMxD59rhyBx!0Y>f(nKTp-^tQj|HXD18hm&ewO-{6KY5zg|f&h`Av^pi3dC( zo2k9M72-efEFTS}|4uw&pHto-ZHsWBpYiH1`Oe{0`_g8b<%F@e*5h3l+(fyy+E!Mb zYmi2zV_vf;ARKGHz!IEA@j>22|_<5MLSAc^L(6`=bM0SXi z|G{})+|Ui>1sV2DPkrXOc~9+v*7!Y{A9G^>`k6_5aGjf9iM;rEM{y~i37Lg{bLg(~ zZ5}DE3^BwZ{~~&&UO2K=MyV4N{Y01dIZaEH!TIn+|N5jo*xt_OUB7j`w!WAbQk*-~ z7u??6V(hTimY3G^Hvh)@<#tHlcJGyY?Ue_w(7)}(N7RXi$QveUN6H~WY1nGl{y10HRkZwC4{ zVHy~I$t&mxPb0(EM(*3lidE@C{n+taAj;9cHm~ zX{PPccKx|0-t=VqZGZF+w4G0Xs{QnbKGgpFfB!$UfB1j?KzscgpKO2pd;e5>{v*$~ zZ~yLpFXqS%Ac{xoqFwq=+Kc$2@~@RI;rKoHBae|oxv9>g_eBRci00PvQNe)3W~y5mU^#6R`=C)zX5J(H6-^S$k*wVc|LPsb6;AF<9nqIWgl zW{UNtU*X9GpwUjR@;-JP+vm;pTSaLcFAf`Sma4g{Su&Ic~*&bKK;- zr0toU9A;v&+J>2S^~&YEaHl`Gwz8hNZ^tvcd)rw!qsr7NeNEf-Qy$nUKZ&)xGT&c(x}57O z<32ai31@v{Beu)3$Ii*093h&g?QnlL_Qv1AuAjs4B_#9NXO^F)4%?3Shc+$!ES#G0 z&FF&ZSoHW1I;dELq(@KSH__6>Ih4}l@&oFV6U22nbOTN33}tgdBzb+#DMuEg?c$*f zhpv=Wls(~RVR5nTAD*_Yd-vN5FTBuxu- zo3>qRxOwwhm{BKw=oi#WX$IMB*}+GH^BAEEV+!XF>IitzEBR5j96w~^nE_BP_@^y| zd_q|tuhlp6=jLnvK-&0oT<2*C_Jy&_)L~ic6rZAT`4raYXU(iBwC+`gu8zq8B9F`v z{RXF$>2+mceQu@b4Om|sNjmt9IWe+9OX;|fH7Q?W5ib{eZG{^HM)|_wG;&G{C=Vo% zuJLxyXG*WbKFf~AFRu2^@ca8TP_Y z22?wdC!!rwI>(YX`=yio0T1DOjQ-_~)pqU5rMBUwkySS$Ks)CKkFgbC?dNpr(*xSv z1Nd%#HED~d`)y-wIY*m&a*#Z=UKi&l&a+T&@YV;Tz3P>=z)cj!SDJdDDN8d zJW)rxvVLWQpPM{um#??^&NY_Z@vMo?+#ZH~Z+j=kberq;_AY|7mJJ1e?sWt$lGl@bdJPHJHB_p|yK+%CmkQ_)VF$4#i$47e@PA4{$4+T$0SYxRI z8pMtw`72Pl0sLG|(-?R!9}Ea_IusS#GD@cS2)lv~WeK^ch`VK_J-NX@_<^tOm!AfY zK$Qnh?3B>UQ;eyTwG{Ug0f~3wig7|WJ=)`iHTkmh~GXR$kzG)mBHAel)5Sm-+7r#k$ z_;r+FwIvg5@`&PNq0mVHZDnL%Q09eh2^VM=<>5J#!qP_`SRVZv9kO_M6x2vJIH1b2 z^wm&WzvcQ*eX+c{RH5|MomZK(>HLu6f(TgY4&1OT14d=dvLt&1D)rdLF{+vJRyj1I z@G#uLSp#k+>F13OE7yX}b2Wqe!gt7z2|g!-SL(9NjBo6TtNA(|BmdL(_2(%ZW&S*_ zY?=0sNDVK#NksBkUyLo+$hGjHPbJzO@rZBjnzMA|br4`(eiJ-;kInIVf^(a`l5DY;2t~^I zRuA{#C*@D9hubk=cgfR!ab?ACB{EpQ>?r{rD`p>DpC8D?k^>6j9B2hU+NBtYW8z6a zaf3>KgL&{%94b`4qr<{$M!hmn$AFWc3%m4BPNZ13HbQ+|#5WP9_gV~{B8)x!0zWze z_2JkBHowJ5xkcc9@L&CO`%nM)pKAa6-~Y#G`AmE0@@l*N&~^F%zZdCd2W9IdZ)AXY z@YC-w40??TDDfD)^ICFGdM0AX%Qf-YfJTV?)vxk*8M26=)wk3!^pHHj&Kq;&i*1Y^ z5TC!awbQiVS+5UWZc-P)foh%y-Ob<2_4o{QW19;%29`oqtCO0L<`38R8d=a-XkT6Ie@OI#l0jf0iP5&U$eGB}%L8mE#wi#O1WKCU1 z+skk2&(a0CFeqbA3^-vV&t#SDj-8fJUH`|<_<}(>bS3xT7Cnjm<&k2_thhz~iT0|0 zs?~L3H~wnRFHyxa@TpvT5WgC{gdd*TP6n*GMQ0+T$iHwCY^S&q!fr$4lF;po{hxT+ z?*yN=+ifR)J%2-!;3ur2Rp3WX$gTGc0UG5b|iN|wfY+Anhy;`_3NZZ24 zP&_j9cwj^>sWW`EZo{Bgmw~(~WauQwf>iSNAJn!f(mnX}@zK}-F!VTfTX@K8#sxgb zFF?NXEH22BM3s`UDONZG5uTjL!@K)D**F!0B-QExPlXD2$6bw z^UM`GR{o(U)qs+3K?0w?JMtDgQ*frAOd*7p{8AtI77mtCe~NxXAJ0<%xy%?p@GHGT zM$Jk-?M&WYBIxJp)5P*xe)z+b%mg(sj1P>I$Ch&M5uMiMgn8=e^~b06qXYKEZKjC>1A1U8#T|k#99v{mx)+ZVr9;zCex`38Go;=p8t%!Z)-idq{ ziCnPq?He01-V9Z4B2#MNz$r8?{%QFyhO%46A^+yLUhvJv+{ksOpLnFGZI%{vmwuKj2O$_AC=W+0d_%OY(75-<;pEsD5ztk8&yB<)^;EE^i9IDV$O7us zXV%3E>b8yrXz%<$HJf%3H#ff=O&7K&)|}!``DUgIF)l=ow^wt-zHc-FTYR>+A7pN1 zWqGMBuP`Q{|Ju6$fV^(TJ)m_nr${r|snz@8H{?#z%kX>b0CqCT@;J4$%u24Q+cNK`9$1g~ z+hw`|ATlkD7T~BaR78K^b=pk5id@$(?PUCtF1M%0!~gXeT-DR*^M%ms9r{xJk@-e{ z-b1fPSUl8ig3monagkO-KSuh6e1!8)6Lg+Vxz({S0KM}lte9&He;YIqbqdLKWvA4(BPiVMUI=>vep~dl-i-1=9dk5J7yS3$~ z|N1F^d-$=WdAiBw>Pqtz+)L{l;0$*u?|g*mE(}H|z)xE&eCvxWa|^Yw!eq>@WP&PD@L1^(tNy z+M6{9f-mrB+dllE549is$NvP~bW_rNyR*HYg;GDiBD~nA=&`zPGw?Og_q;zwa8pC+ zwYC&4lQ!O-HjEeM^b>PZgz|~d%pnJf2dF1wDCd^&RA=Qni{G?KTDY^a-u{;^=o5B! z&W#nczhtA!tL-LF;oap-itwCqA8m=wz%0iPvDy4FpXCM|>8iXI{3vtMhm5DrwwQeC z$S6DNkE{Vxx;QT&ZRYJ?d7hz?N6??xyg(8OD|$8@3&1q&9F!x;mb78!xyNB&h@H0Vjw8YszvD~Vjhhd{ z7f+AH7W?wl@jf)7&!CO90l*hsJ$2PJN!N7zga>>=HfwWR?{}bjXzPre}!#1|on z&A8isa@ddZD=ecfXI@jHAy3LhHZmpy)HUAhil;9G=uenVw11$G854wMh`)keuzyVe zzGf*KcHIB~yrcd5_yI4qM{b?9x4i9Y`^$gvPqoFh-S+%P{}sOG-F(`^(*g|Q5c@x~ zQvQ{3*i0n9a8W0u1N4qPvh7~UVS8%hBTqB<0OGITN%{!p8!!y|665)N1HgHn%yQd^ z?x)4(kf(eX2-9ttj;-kio<1=1d%pe2_w)QNdrk9YCyPLF>;&S`xG^jUV7@1!a$;F> zrK4Y6b%#pra`J8WMK=ocj>itj%>#B^fXJX=o_a_kO-3PD&Eq(TBVD{a$}k}eSDA(& znJ(hh*^)sF(|OZ!8b=z9&n~In_;SwdfCXWj92nU?;=x0_v$4U>K_&=Yfe;?`YJ3zf zuQ3j$pVk1)fw)G=H+9z6S84xZd$6^Q5^oc~m$$;DiO0B)qnF(^GrN4f{eeI5eeLHy z0zE)^Z&DKyGfpr9UsL%SP$vC(Pg9s2&9~dPo^Jo{fAXKTl^ah1ZISevgit`!cG@PH%Yc-yDkwuirBgZH z(pG^{h#kPFJkrTu=Tq;<40228a>%D)3e8#ZFj?5}(>lfJ%!$-!yxbGX6y4|dCuJv#7>*h#)g?Ki5ocVEduP`)2W znF;*1J~y#=6qP$u?Xa$1xt`-4uUxsBqq6g@{naaAvQ`Ks*lvROio4gep3vnhvW&9F z`SJK)cbKUV#Ki%kBR6E{x^xH(ZcjCswz<;>0!h!=MGc3~mZ7{8L)w9fFsn}KC_KxJ ziNfGY`izcT!oaf}=2346O6gMYRNwSiR7>;p1ze4sI;;*(%TkU9F5nky?W6E3guiqk zBcv33w!HKi{PtN^8`MMNlugU$TkSn4Exl&yR-Vt(>X~b&Z){GVtSWp-^pQFhwS->A zdeU}+_vRmQb^^h}XFFmX91XG0RiM){HVoeN4scpWKY_yZ(XQZ`bm=f!WCVG9L(jP0 zj_E=o>6xhN_B#`zI1h2eNRMn}LP8uU=Aehc+sSlCJMrm&O1=yZ8V%}k_;I2Bd@D78 zbkwST(-Y)7ddMHr6bj3Olj(L);@atR<|K&s(x=yZF;u=PUje7Y71NL+x&_RGv(dFWQ(NEqNUKh(pVF>j`uj$kS zuQH)xyQ#ED%)NZ<0%B5NkNQ)Gq2%A4ncmwE#-`9u$F9RW>zVXH5O;M=9tL(eG9EAO z5%}6g`v}vICWo2i(}p_865mV&hzRv~Pc}l~;jUi$wx#8jwzg5b|Kk{;2cE#AjyR}w z(B)26CnrM}z~#*&@U&iS$I!8)3ztZ7b->}S%1RvVck$V`0%(uC4*$gw%;;EA#lRCc zPblyxRAG6vo;x|MvvjlC>RUyh{>6{C@BH>Z*#7zZeo6^#4{xrw>sRWVzUo`#iu``R zPkSaDk8BTJX;4&By$FiNd9C);gnh|>+J_wo&VIv+VA{|>%X*#4RoYeZQ1S-6A{FpT z|07eQ-sCs#oK_Uf-#T*Jbm4=68-SGiQksM+PdY`+YWplBwzbe^CS;Ch;U2mp_z))= zde;K5wU@;E>l%|F+WL`BAQ`_1Umv-2r9FIUqkZYyzo`9(|Hb#V_QK1J4!iBW{9-%X zKWfX!f_=1`PG|A6d;{c+0o@XVTSSPyPFo~?woe{=Uz1v)fi}IjZ^%*TYWwteDk=E} z?Mp{5(6B$(ge6z%QD-osBuXFRu?fTHOo$x>UUa(TLfbdwJh1(VyM*f3?BJ!HPUhR0 zD;Dxe`+j;-CwJHfJLt+cNYhWcv6Z$+RPL2)`G&S7er$};=eFC^Cv_9h@9@u*2C~xO<`FEsTgQP(} z_?LPH3<)HBuflc`jw|w?4N;Len{K1_-{u65_N>8QCz|)6fr}Zd$p7ZmE3uhQe#l5C zMTOyazfXO;1%EcWN}1YxS2S>(694J z`pN_4Hlfo?ev!@Z%qPEgKzjBC2tkG}J%8DhL{Nqr1CJl@rap#g}*_$P3U?g zaLeVBBeX9vL-xRu#?}i@hWzLG!ZjecN~a3vbxBUMMBmS&d%0?-_v)V$f}ub4SG#FP z>_M_k>ncBt;KYh?v?FhJjfrvUx4tQS@(+sU?I+|t<aRpm0jyAoDxHXrcJcQ z#H(}qVK4mdEg^Kii7cV)SUsCJlRIM9Q8jOLh^u~v3mu;3vgW7T))*fy)bZ)cfX9)F z_p4!Z51s4}e6!ko`h-rd9;3HM$DTCkhD~sVKC$nPcPZmWq6vP;WZ2KNuJTpr+(Q=Y zqFk%{UP~quoIn#V=rE(bl-9;`O|mphgNNSt@k=K{^l!w6ufPJ{wEiBfFZ`gM;Mn0$ zUm%!17j9YLrk%wP?FUKGt6snOh+5iHxQx(SR>4fUzB*5#AP8Mh8>=1vR7NvVU*)ekEt0b#!!-RX(rv%_`Nfuy&F7R- z9$Ww+=#kpD!t{>*#x(pDGKv2fK!{1V3o z;^CyOlf;=T)33FxH9>FX*@@}BojO4?5b*z>c72Rldjb2Dvu6&$c(TMzD~ zei!C!q&nat=+^#D7W&0;a}9s|<}LK%q#Ya{WkTN*Ka~&r%C=C8GU6vb>ZG4m$aqAY zhVz`b6K(&S#})g8hUWeGSG<(68X+Qw{#?fU+OUc#CDoX}8rjg{SUnNAGUw-Pk%U@D(=1XO8fjPce9YcG+Un%)^^V` zmdmGPJv9{@;iA(4eVuX6DHv}vZP(`IOKo*^xjl00c6-yC-qbE(2iD;kEyce*4DU0> zfkqjf#O6akeO}@s2Qn`_(-xI1&;+8~KvgnQG^=?MGRD4puZ?iQJ+uYB*Nk6hom(I) z;9(^+^93^d>Ed0}0u?03|i)3(04RvQN4!AF1LW9{cZ{E_zYkAJ+~y-)i= z18}iBV=SBg6~c&O$%c)%7`nVqudhw7XTZ7^6nbf!TrAE!4r343ZHy~24t<*qZ#)4& z-8|{vaL;_M^H_bX718XW(!I02Ub+tfT@AL$#btxEG0zyEhwk>Dwi_Ik&CsmdCDZ-H z^9*t&aA^rU({1NE~hr59nT&a4Bhj>zda2I86jkmv3 zE3C&pYV^Ocv(W0yI_-cXZ-7W2Vfo&6>`U?9^@T;odg!2Sv;uhR`jwv(a-PjCdXB4I zCo=eHWY^OSc4Ok2u@7~hOt2%soy7<_zal;Cix=q=9(w3nyL#zzyL5S@t>gvP!yMoK z*^hs?O?F;tYfE#@qvxxQ(yQbx4JLRBcsFp1>iSC7By6uTEol%X+vNCHURjTNZJf4pfACjkW4EXb8e%i9 z-n`b9*B0?9Y7FfHe0A5#0)HXe!Xh#Q&t?}_+KVr3wbiAK_Iv-}f7JfrKl?aspi`v4 z%+eb7^ykIv{2u3N z<3I2(pB+`q;OYF_YHa$98~8Cokw@(RG5yIgexRS$@a_NA&0Fn9{>9I(eq}xOrIu2kum^qI zjyCYuXRq?F;32J%ozzd=`h-5_QwweY0N&BTcH7#%n@<3|@lBW8U;Q8c6g1p#pZMrc zwaMXDe8)LA45`ahu8hyAh+QYMzUq7c^+kUOVjzBKN*>he!q<3w20L+O#uwvlZ|uAF zq40LP!K-F5bS7OI75uVW`teCq03C-#|BJp^*X1@%w?yD{eLFq-z}y$VbMxCseFPYS z36oCaEP$=b$#aZXGKiKPAPxlvN2X45 zC?iCmoE`T*hN>UE>Cu@kgg5~A=w#pg_HzLHIx8r}-foV3-`&}2J6m`1#`yl;cH761 zKRDiH@V}M@!XR*Vnvw{?aMCvfQxc$|0+xYIl(=6h6N2*7;ZPo&n2_cLD4tLz) z_Mq+W+-uu+@3yV`cQCAXbNsVMtZd!C&tMaW)T5*5yH_`s+O?YxwWprNxMKj{^^SM8 zcfIpH?Vay=cYFK0-qoIa`#ah*Z^AG>^=!L(`{A~B`C408U8k>b;sPWmd&tio4$&^Z zJ^K9v@WiNj<#*;HKeA<~>u-WQ`CT4m&trlTlwmd8Hfz=RFKz3He3e-q)#6zOvpBFW zsLf%x?d0Q>lSxyc^X%llZ`c}VTJcpF#?>{?=8O5DjGaDrL{opigZ?>oGiYgAiOoO; zo`@Ge9YFbLY1x_Mv7GE8cjSQQ1ob&LgTdr5-5y$NXu1G;F0UP%j=wltzp|OwJg3rL zObG;2@(>Rlli!?{@tOEM^Naj>9X3wF2(QDfGW|aBR#iCG&$d^bAvRDi8t24gwN#-P zVR5luVMzBlhZOMHqVhh0JpEcXUX9~Q5uKbuxacR^1_z+M38`wtn>k=Egve8$4DowN z;-p>&Pjc(HmK~t9z^vT|A%SOY3i1jLi6T84GJFs(^Li1{qJJR-ZIXu88b#i7Jtv|F z*(1JCD7bL|$R~^uvY23p#}(`aUcV6SJQ$N4;FBNs*^PWbo-}2(IAOa5Q|(VX%$pj# zue3tBw-Y#(w`f^9bY52+81sdHl#K&Hw$wl3$=4sX)y5XS%BqpZ#bA@7L&3972Rutb ze%cAq(mv?$&Js6=<6X|T16CYS2AjT_zr4K4Zxe@k9j9=mt*&p>3mzpZoaMlFz?unZ>_&Q7`UoAz2M=(fc6Zv&&Mr>ke)9wzzxnQGat_@MeB@+R z^$ywxcoX+)h6jPk8`XotI3d7$I{~$ z9369IAn*4F-CccjSl_Zz&fO93B;)-%ciNBr$d9)VfBboBXzlX)O50qkw{qvhTj^Rh z)z2bXuLC^simzfD;@@!XdOhqz=@Tf1tHvomv<|&4x!{*Pl|MNn3^waRynVU2cu#rT zUjfQlcNB}f>KKx|Aq zQS*;>I}7IK$UM!l7C@kt<5R>mIto3(WCwK zCo*}DjvONkcx&*i_+R~!Z8TOH)MwC6i2?}-vPXy+=XbTmdlvggEP1p?4yct8G4N-x z5GutX+<81dT^i%FNr67b=gJEVCsutHuKBfPCh8Xirz9p0G+uR0(9TNxz~Ub~lTY$; zWRYJb(-jvTHO+FNjJWVuz|leYFxm%lwqF@YFfKRJpYrL9yd&L)C_h|lqa)>bPJWVv zqwO%an>@5tzO^L1JkI}!F@|puxuGGSV6r55#J>w&%&LEF6+(fR-uusb)4syN$>|9{ zPo)SA%~!&nJhsh}`fwF|CWw1(`m;EUYs$!f`|;vTbS7|1ehpG83nh6Zp1f5JS8%la zdFbuRUxI$btI{pce=8v^-;z)6QJ;N-lp@wF`C|d=vR?BUD$n3&W#La2qDi(G|B9eo zEgMm+da6J6TJ86r!M{b1X$y+Zy`B*w^Ooyz<#TnCCK!JXqxe%iF4-OMs=UwI@4_w- zh_d|erFxkWhQ5}3bLr2KQ)NwC=QAL*M~?YyQsvX$k%#!Az4?$UZe>@M;g9__k(ocx zPEl65^k31d%c&s35MlUJOH4}XNE)FmH3b-{exd$hT_ zB+LqhM@|6f@IsNm62|$j;;X*gC-B^L&1`8SvC2hGfN=wkWvx$`+F|d>EC1q;fpYZD z>bXiQy9M8tI`pLi`n?0+usQIAxU)=>TZehhj>XgJk-5+dK17zxA-w+cU+(KF65(B+ zm>&6JSArnviQ^t{+82uz-u{GWoDX$1zBbHouMXzf|J+*@|MEhI@qsV@JACP$ye?w* zGW}{oo^wqkKlRR}uCHB=OP^C@`Ww1zpKAlk2RE*MhH+JVqVlP~>YnOqWvS;Lf5m^%q zFw_RsCO%FsOZe0D;o^he5+n!<+@DPRWa4gH!arG^U&e3qE%hbZjz5CWoliQQ9Dvtx zyZg$Ec^P62e|tHfwJSJF^phK#n{8=%sjcEO-n@M~8<+e{LXPyMUL`sUL=amXU2t_W z)a^0aNOiClP{sW?Ot*KnA<`)u%=njuRGTH!xW>MaA@dwW*z8lv)7fAmM&5B~W3+b(VJtj*P>TBP+z z=bV_PeK`R};PFRs%ybWlcy z^G+!JQulz8E?xaIfAK#P;iFIFS96J6&6{%deDQY?j(3UcFaopZd+jf^k;iXGW)88rE@-(aB8yGVDO5 zrzQP3_P0*lL*lqk-vcZels+^+D9s;5$$rCeg84E>7QEF1Zo$!ZD6W<%dGgu5mk6)w zta8M&{jFmZ<<0D=m-O_*J;|FlR3m6(k8s{!l&8~zxM~)wT;VdeRG4c^+{V;nrT1u!|!b`ef~~+u=Swb zy?ZaVc57!llC3|%&sm)jaNT4Btv2zXD;H= zo?(t}9xsFP68B`!6ygHH=0PG37sEefOqo2P?QX2vgQf@Qf_})({z2R0=Vq(JeHSDB zte+Q5QA!f#) zAUgVIDK1x>hj9((2s}&wY{PNp27FOc|Iq#?b{EjvgEeS;DKDw4w$)YVwV)9;b9H5* zt*k7zW#n*~cCC=M46S|9auHj(jI6CJ(}$5Kr(U~JXn}m(XP#`qcwc;V1D3LhT?g(0 zbaI};Z}!_C&Orn1^TPaM^h#e@`BxU~L*2;b+|(iZSUQFjCSEzNpSr0YjjkCNx2&5kVxPHjAN9)~>OnYIw0`4OVL zH`JH{A4d72t!~hJ_~Bb^6>*lP#7d*t&(6EQ9Mm3HczeuT8i3kuGA zX`A4Q_J_>5&p5|%&n&nj@MpAbadEwU^drx=fA|AG6FiUTFY*c389()ejdT75xbytG ze(h>L(d0U(wo>^!!Ip<-;GusgPL4~o5mZJ#odvGswc+FpD|zU@{+wiP+$18-($%qN zUKEr{X0k6d-f^w4s(o1K%43@2jea#x399LOg65|p%aUH}1;lxuf96LHQ-`+1tiVq? zx`y3wgTuz^S-X0Dq5T`b`74pLlXmCMi-E7q`inn-9O-Asi;`t{mVttGSH1-5A*OC~ zg)buwnAvn`6T~|Vwurn%c6>~tIz;PekJJcEt0E*h@W4Sn=~tD!oCM!WsAp(dQQ>o)SYY`i3<-Z0i#vqJQNHH3es|iuE#GrFyWi-%o z3ZHzk!RkTbZ++!BTP}j)l?F)woy#~t-HC|+5#sFRUZs+)2&M|p&xu+m(;U#atKQv5 zzA5PQshzF^fL@_+T?o(_^a%bK+v9^wHY?~3$nB($o#5trdg|^-^5CrdtOK);vKW>y zya#laM+bX!CUgpm{OAy*lY=cVZ0qZ5?b4-na6V~Y^&5U&d+!hZ%l3)SAGLLRXN;h` zXS7fE0r`_Z_v!ZNL!0g0?|yT;zqOtAF4L*_R>0xm4(&Nerw(P{NA1R*n_Xidand%| zZ?s?Yn}3g#;-?{13Ca-0M(DV>gE+)41K@2jKwO}&&qgT+#7Bt0!Hz4F2Bas}9Vf|5 zS>hvvf#p?6tDmA6CZ`OyX16*8Uq18CzX4@1kM-JV^g)J&G^Vr4gPkSx-n#ps?QL(h z?K`hSp+mAofuHSmJ zZC<_67T2$|xs^>UqTf=h-C`=0bd&~ZPlS~x2C;4)34KA>QzS&QCPQo-?crCG zgdD0br+${fbm`YO!DNw0*$f#l0?IbOd}rN2`{P76?xzE-=fSe_0# zh+4l7Y*VPm6F{^*CWWp^B2+zIrAx_`FirHn=WziIC`lt<4YmS|c!D&99>!I1^D3vH zVH?6*nU+dN<@GGz0;I}TodY)YMig9d4JRo=1|0(FBV;ou5CI|pFa80>^cn-((}0xQ zo}Mg~7W{eik4f1$A-$adD#R+0?BSbD@)-w8v@(ey-NW(z|_?jU7g!XU>Z5nb3{US7$9zi(B$F<^CVje(HA9?!kI9fea@kLEf} z|7wfu@{hG)#JTA1VDd11V3`~#Bb`sAhozJaoAXe= zl<#>AQ+qV@Klzjq?$niO2q1paYUJa`|C-dHZtFEQ1gx@ABQgoEx)0je{T`)t3@FSP zABAUs;#EHhM7rPwnTcl7y$TNcPL9st^C5h;_ilN&GIkN|p#uk6u9R2YM zv|D8WeU*X8GY>u1zWOVFP5XU+2Afo%wB1mm+UHqy3H zek=e<|2Xaht4f@>q`s235J4)W{}ml=vo;&3M7VG$3w8YR?O)_P5QvkinZ)4V5j2)3 zF6z7Edk!6$U1YF3S%Cidy}-!=KRTbqS>bU#jh(Y1P|zIP4$*-gp@np!GU=u-Ui46R zpr?O(TG2{S5=WTQ(LtB?v**{|6D(ncXZWSQ1*NIvKiB*XjOlnG0}O)jz1n|E=sa+z z%o<`#z2?C`d7g2zozMKwGdzr3+ZM}96Je$>(GoX-XZe^jx4fF(>p(_L%i?5o5#Hs3 ze8_L^z$VVklBf( z(4T)!9>vy!N&G^`V%S+BItdnk6nStV((lC*A;m$x36RVowDr}mHhqj%^Ot0^ zN1Deyy!JU);SE{me||Z^%Xl878~?^+dC}Z*$c*ySm8amAJ#qtuaGelO+l;q80cS=q z^}e^!xWb_0kp*e4JX@#jO`2unpKFDt0UN^O$_ng@^d2y?%u@ zG__5ZNm=Ekid44BTXN)+lu(ciW#g(pHDBQ@J%*e(S?MCGsU=OykPqYuezvvdOmKag zi%gS)L&mf8r3ZWMU~eB76ZBu1k*Zi=Y=r2FM(il?6}Wst06Ya>G&S^R<8A8nN@Rc- zz)ZR9_M0NPhEB;SY}2a^#sh|5kIRKTj=xMuu#9=@DSk`}`BXS1Eq^RS%d|CVN7Z5G z`~%M_rmpy}+HL(oFYqYmU7iJ7H#JtxE%~c-#v;L>bT1R|Nr8VvrxxL-RDbqHK&jWZ zRy(WB;zydc%SdI)yk<$Az=%A6Wfco9W(aP*F5kel#}94DI4>CWibNzSGVO-2Y!EOS zdJ3md;g5P6zold=Ho-nn`7>VSLmurDX86%Dkam=xvY=c65HRG?ra(J2Fun#zNo7H} zfmSf$l{aR=;Z)r}^)0B{e4CR{pojABJlvd{tE3xgE>=2T@C3%?MLfZUnKs$o!H+x2 zDVN)?+-=vcUTMpWMdBCmFZ97jcLU7f!Esw%S#OU$@pN0id?|A^D{Gh9f-77uv@APc z<8(H*i1r-YMhHev2&&pA+78~wUa319nuvFT+|xPIR()H?708BjW%-_KsH~0SO3@G? zasG^ihTt5!X}^fq767{Eq69u$HHO|&A}RnR%`q;!zq{AA_qW=Q{i_eQU%2!6l-a}w zTwPyfJjFN)SlRdro1|;t@XNwJmAFY-v?$UR{3g48V(O*M`Ua`DWNy5eF%Leq@eBEM z{>RvklYi28DZ|RE_ev+UlfPaonC@c=T?o=oi01QJ&+#*f%TJww#?cdh{PTbHbt8;4 zE9p^&fV4fvo4@?81o5?;7S}q1D{)5K4&vN^;(VarF_x~k9=(!zUKc%_=XIgqm&cq> z&Phu!UEP41r4z*aHgOgffd@5_i=;}q;NZP#nE5=@Xjk=@!r-2`s8VzeIps+d!&A!m z=D9C~InHoniyL(#$L8UW^`t+hxC_{tbjRIx33&<5ML{xVBaeQd<5c4FNtv#PS=<4> zb$SAzn;Qh~yn^%Q*0R9(+e#gKTj%ki_;$Y&SuP&?t>}~X{HH$O{^tMjzoE;owBPp~ zzq`Hb9lx@D(L3JRF0XI2U;Nak+5uy@8`rL6vGmf_O-jzT<<;eO`O0N}mvX&z`&N7S zkw@ASk3ZU;eBz0A>()bUV{?shUq+VTRy1^u-qWz$7%i@~IY#@W-(s8mL3jB_d&5I% zE^VQI(b4&EtncxBPcvShOuErpUwopSlG&bI(Nuowg$-IhCr-do$0!+jk@Jv9o$zWI zf1$IrMqY>`&A#E}@Th&_qaSOZe*XDv_Ic#St@LrHhm&?`bEBOglgo_f+yJn-vf9=e z^Q~aJ7Ur-yAT|#@T<&y1e|5o4_Ke0rk0Nam+oiXuiufsJzbUT0Py_8xjK{2k47se6 zXSZ?L3P|QUW%Gas?|nhy0C{n9nB!6Bd|aq@-bTBZbp^_*(n^J!n*G#F?3VU>(W_&2 z`wDO&->e;-_Sp((hvKB>*3}>R!P_~VT3BDmHK#5x2J+{4$;}Clfz+WaLV6G1RgM$% z*|{5;m^9ic9)3pEPlv6n%(wNv8DO3MV|{I$vuoThx6SqSq;ITm=B1dmrM0$1 zy`FZN>(VmxUy4mx6(`2yzH#s8m_6yjMe=-Bgf=XbSNy$SS}`AEc>M!U9Hr5|xM1Vs z-^c${7UB4?5o2qK)F%j z%H}%q!b9c@=HWk8EY#eJ{eT-i>?hTEb%s>>58G6E(zht&Bam0#3DS7mrmabzNThfx z2f|Tc+|cpSUwFR#^B?Y*?{@PmgLb5O7IrLo_uJChL7Uxwv0Ym{YHxYwam3Zr z#P!S6De)6p(KhtZPg8{T*Qr5PWbBBKr4Q$`JhESP=HSee*=FJU(&hH^Kl91<4}SRP z+A=&o!se|mZ>0U@XYFE3s{hgEtRnOIOc8M07&f6_7<`bA)mMTmS5K(OMhWFkp#zEd zyWV0GAYJ$w;PNDtQ9jgnQtNs<1cbgjXj=58wdR_=HaHU@iBc;6sf}-;c}t5 zu}MiJ)TU$j8Mp(NSUieo(`?rP`hM1~T%B+K=D+dP_-M!N{=JvT?MXSF<)96r&GMw% zGwV=F$GAh8A7PAR(qD6Ts=s8MebtO(z+?cDKyAMR2l42bX-_+h>a(~e4{^!`Y4Jt; zCvB{h|85wLO-VMc@}R&Z1UL1H%&{SD=5upj@{TLt2D2Y2Dv{8gykrV;J+p;rsa=SO zm_vx!bwW&xLqMQlDmxvEjWbz9Wn48NVoigHvn~bNqGo|GwscIQRs+e|sWh(Bkn-T8 zFUy@qLSn?w3hP05i7{K`oZDLRzg6Q5nUg`3Q_ zoErfS4(faIzA@?VI7cbr#M24+CZx`oyDsl`}JS_RqgX1{&>5$eW&g1(xImf z?hsRn&e~^Qc%gm#lb>#HeC~DGNj&sD%6*+d9FZpln-fd?4VuRuY4(sUM)ihVsZvF3DDMj z{v$pAlvQLbJDcgm?2z)7BXFD~8+1(`VXSfa}JVq1TD++aLzm`#<)0a zc2bL^^YgR;2dE2KAQYyHhSs~7PIdv~XFbY;3y8uDuaU*Nm&4Sjku)#yNsBYeeQ3b* zEJhU^ZD;9{@g;lXm*<{mv^BEdWohIaA@`P14$}c}Z(gG9K>aJ;Q&?V=NAq*dy;+i8 zzX6x*DW6U{*{YrC%q&#QtY@x+tASyfc2aOXAxGu}d`jn<8&#}7<{@~Q=DjJ}A1*X7 zlXAxAN}RzoA3N@WK!NNoBWm!#$*f%AOXvyS(c!cyjU`B{YG00eHYAB$$b{ES7UZtl zT{W6tM8qw~^7G1_u%$QkM?aK*f1@qln?L1yn$K0uxFkEk{8Lxyk*JEBPHg1oJwN0u z$wp1>+$dv4|BC;4sMc7V{G!v##Ch;uT@Y>%GQHrQ>ppo&51K|K9!mEOBxxs^3M>QB z(8RiM=(uJu8vJdaQ->pt9An>X&CzVf4kl}| zflK-M3S5)$ady04IbFL?-w6Rr%tTa-gncxC7no=MYe%>6g*36_2l5 z!VJtY00s8)dYf6>Xmj{F3)e2UMd&@hd8y59thW>78Ydt>PXU=-pd5X@yO!l$`~=&r zpJD$RJivrD_$^57Ok^cCIV~^zZM#7kjQ?&P|I-$eC~Iqd4jn7m!u(P0ig`ZeThyUr zs9*UKc;-Ra`Hfg^3eR2!+$s-G56=CknBZZ*{pVPrmotCG z^9y3!*q)4Yv;v7Je$rsuOh$g4u0)J_jL)KuK9>V==%k5q0GsccBijV~ zZ%`^xCr|&K+v^KgWdR=N)ERlLeopfv2jdq|+DB<&UKw|uS4@YWs_&h9ucBvZv!_i} zuMw0GD$gZv3At1|M@W5PaFrc-(>M0K#z-d9yc*~D_97hl9-ES}e1(fRST3@gDB`#^ zZs3zvausvaV{-@vq3*OqxeGA!@xPbJkGM=8Q8ZK~qyGZmGUiv_Qk;1E!vYTN{VT3S zfTv?EUmDPepJc%We?h+?i%Xt5w4a53-(2%$frH(HHo@2N`>eiz;6(jAG;-6AxD1(y zO|d`l-d2KZ*>2*3OyOE>$PT^bB;Rhd z;}|jRjt^ld{4f3zYukLbeW7Q+m!_}D{{?^I)$gbN_<-qez+qffb@X@&PM^ome=a)6 z+mrGqv!NsT>uNiKw{gx7Ypi5G3^C5}L>7Q=Tf8^us-M{Qh+Aey0{q{Bb zy@U43EBD&k#zwpP(ABoNx!JB?zt*l?y3%~xdI|kC&d*;d+qEd%+Wzigd?9I`qri!` zA9d_~;@qqW;i+~@9(eCq3EG=SJ0<1K#ve}sDfu8D%v0;#K;Z@rbu&MNC1uR7-tvpq zqVMYTsq;l3<>F6}?}X-!`qTQ3YN#&)hVR-QXsXxm%6v6V+|awk3C zYTvKV*50`OP`iEeR(tMsuWQd@8=raNiS{^S>4$IKYPWCSZjU|k2yu^cz1?m+bfaCn za*gZFl)L`WL+#S#%kb(-USe=Ddvjy6ZLF>JPZTXTKTqsDh^IMfL);j&x=?Sg`)2$+ zGL_@x!CJGkmwvcOYSR1;_+dV)>kE_pO?Kqh&q%oGM0w0y3A~8RLv;fb4jo31 zq@2pmEO#rSJ?w-!5*xuZkppTxoSh|)8!nPUT& ze9jZR(?6%5sP@{H%)et-GPeznrL%c#Wt%+)XYa^InmQ_V{LvKZpdFz_M2mE8sUta{ ztNxJl>u%PGPRlO}`Zlv`Pm|NyB$1bL_=$&~E_CgLYTE(;eW@Ung5G%vLD{t;`!8@; zcifPa&wpAmY1))*Yzih6;a6kEt}jMlTjWRdPzDp9H41)eror_9Lu?`K0eP}n-aZl; zByVIueyXwQ!=TmTY`c2x($x2mo=bjm6r9HzhvO!zb?#2QvJOG}Jvo#b;N@9cTw7|t z@C%=A@BQI_6+9AV78w>hKS*Ki0cF#NE@2;UpkI%_z8!JD%MknQRGSFi{BjNd44p?j z#*34;w8_uTw8?H;ngjOs=i18gz4orRK83HoOFy&^PY>FHZJ5c)^QE=MS|={VFOL4D)twf_N}uyrdM0?S)*&6a zRw;fI$b5HxmLa!S;EAK3I7R}1-JiKnI{$>DJf=*Zm1)yTN|nple)W>)RAB6kn;f7t zx}qIh^8|ov3+=c3mal=X2iX8HIXf!9ntaX|Th0((MAxZBd3QWQsD6fckWs9~-%mnh z9^Ze#!929ne8z_l{A|y-x9qs@gfh;)D&pV+Va#TR-u6+6t-prEh@+Zl(@Ag>9O)2Mh9mu>JUB0D^fOEZYS--q|R z86Ht&u|tfpPNv6_UE91&nu{fC*p`hpnY-4$<@fxt_JI%Yv~}v+rCt~49T?D`CcOPk z8|@o^+gH&k-D_K0_Ys6coTfQCusd;<_HbtBfO+}yBMivq+i(4?zq|daul-g^(k5-6 z-~T(o=(^a(Gc2q&+N9Z?)FIasa`cI`kj}J~c1~%Yi-qdhfZ2`k)<9fZmKtgR&U2Yh zWdtrXm%n){RwbCh$s-~%XT2DDJ1_a7!9UI-kURP4yeOA%oPvAsr@n=SjVf;kOmI@w z;6i0}Hs*214i1}whRjqTpN=M!b|EY=YLqe~VN@`pvyi||jE(h5CuPI>)0xr#syUodyXi&xPm07yOKpM%)|cn`bAd`$5G}gF^YNgI1CLtrE0Xu(ZceFbQ-> z3B6s>!!*sEnp zoc1a4Iw8g1l9RRMPw0HAO%gbI*trNhVRQ_(pB5I5wq+JsPaq%9I&`6pc-XdX_r|^S z=@Ff#H7E{T#lt1>B>9ps`AB8tX-uk?vQnsjWn#|@5(M`cetnI&=2eQ`)z$?K{)A0;Yp6vv$*u55s^3G zD*N;9;-&Ajes@Y|7xglO)TQ}MKrG^Gs8{~9>+^GKxjItIxFdGJ?Zfy#n?y$P_ zZ_v5ku<7@SGm~=H<)X_=8{fbh&*CWQN4_3{cjt*g+#^E{R-~P6u>8;kJ&2yY;CA@d z6Tu^Q!-t62ZnL_6M*r)t>r>U28K=d@mKBXWXc=eZQS- zKWN(z?zYnh+c}j%xt&EvQP`}p<{OOoui-tlNMBKw<~feo2au^gpma6xBk$mNwwgHQ zFYQzAyt*+dSL##WxzDf5-qa768Q-urfV&^PBv)t+{S20y^1_SpR32PT7NtDF`rzY< z=XLrh>IPsgp?IH0Nr~Wa9Ods5{j{i3^pcV0gF=fo|K7h#St$_;<5X}z1_#^JsCjBpIVV`6jng0NWevEd(ATHsxaC>{gBWdIbP{Qv1Lf+0M z3|Z6Ps&LMm^q)Kzol5`5vx4hW>@@8kc#oh;DFdUvVZUv;Vc5~qRl)`SzdG0s>#OlY zX9JVZSsd~6r|hn`Q~Uc7(xjdX&4 zz%WbXKe+YuL3ki155L#Kt*qsb4*x~JK27n>ttmg1HS`1+E)45+`fL!6V1^!L@}3Ci z!Lm4&a`fK_mhk)q7sgApAa!M;b#A4d;vCgN7rv1j(s(|#obkZZhf+XeBY--mEf_yd<0rFFgzDRL-Wfh;qx}JCZy9k|c2nGbc5^nAJ{souglM zVFX-CCaPccg`2U+t86%@D0VG=QZwcx}`Pl?{bfNt42hIa! z?T60lR4|`&1)f)L#pBiT@8TO0hyVM^{rm0erSA+~u|?+o78 zDG$h12qDaxv$PA*f16OK787VADAs1)vuviZ?j!qG@@sK|BjwEN=WGYRm5uey%Zzs) zX-_=!7#U{U$3Ob-}ueH zseSvm{@d;8*FW2S{%1bY{^$Sse~xTx4;JCUB4hh)#wNSyHAvFm)tFvdi<>6}_V`2X}oJxvyLhdgJ; z*a8>usYkX&28tJfl?`+9txiaog$dT1mk^XkS;Wt=wV~*(T-7}+&WjPgjbmbUh=?H?Yrot>SO+1}m?4im={$VaZU-HnO0(WKfU z-kwZn(0m$9lU$WYy1)7v5z2Gojk6Duc7#D^+aZn5c@@$oe(=+HpXV0fBrvKhd6E6- zjEV&I)@z!lV3z%Zf3%6lq^&!9+B}2$VF~lo9{8jkkFO{W*x`Kka!%Q&PR3=+OUS2h zQ>Rh#nEDt$NSkc#ry4(*9KE-}=#(}nG75D4LqEY`JM6zu`no6U@qr7^E=-~q&c~&! z>thQwUy%0FZ_O-64(FDz9=;ruxeI+b?I7^z_oRh%lTN-p@8=j1(Du-!&GzW6o0tY< zWcNY(kJLdvkv{`B$GL7O_l4EGKnlNIdve2&^qHamj?dG_$Ds|iJC9%x_gX{o862UX zx~_itLhB0sS#$)R935Z_(@!W*b@Hb#ekf-LhkN8h8y6IuNt>q;Z+P?T+SfCtuMCKjkUJ2hF=RH?Wp~z>xA=*b)xh9f*S>v+V0+ITfe&4{@&mJ7wudB&|d@! zK>_g7+4F1M`&{fyTGx!@cz;FFj zdA+~C56yf@YbWcS^Gnc^I+WMgSLmG+`SeGoesGagZE}7giVW^~)~{jwTk*ybOJn@R zW&Cv0^@WJ9R0Dl252}m0lJ9s6(*TW=Zag97F9ge^Ze`3?_4%zC#sK<9%Gl1%R=a)U ztiA4;rS|{&>wm7zt!%en{KU_oE^f?#_Q+BC!47-w!umtaU5IHtmo6jw$nr_nUaNnz zs5ZzxJnN_GSjX95`|JlU2K%V$*Tui`M&A%DUm|h-Rk?7ETAwj5mMAyWAZ+_>^IOy` zg=hP~bOV4$7_?9^{fHP5ijHsMr{DFB8OXRY|IYzzC5Z1E$g8XWg$32vCfLJu3GSo0i29B3=_HW5jIa zPdj8@gpFr}tQKktmjoQUIDEAGBC$(oWdwDSZXWT|(bITazd7Cgud+_KMnN&WhjhM+ z^C%1XR2FHph%q|cpKXiF4`D>s+W-BJ{%7syKKV*}u&o2-P9E9-YKJ(c_yX6XV3|K%)>0})mRcM71mpAJv zqXDr~%F(6JE>5`cpm*q~Eb7sqRafvfh-BJid*q?KF4>}% z$^*ZZA2omv7K;;kN1{zSo(0Y^V2$w%%cxk#qBP{-sUYG-U|`>^J9h80Y$i6ho?oPXqT{ z9RP=yJzw}pknV)=nqYG7GvOz{!OM1^^$Fm}2EX1$c^1AB3U-&rPTyezXJ&OkOGgT? zqHTEDc|3lG!58yaM4!MXRK7~DJcF0qgHPf!S#i#<#P{0-`6WHlHtG;h+w9d}x5LE? z?$N)5pu;l*_|SncWtFh(5qd?Py`kwB+5k}D9IJtU?#b6%UOGM|5C`9Ir_7n-FoR>q zWm@@Yd(q=G2dAH>do5JVr+q8DD*n|trpydZy7ibOefk7qCil%_oB^6#EUoUFFW+zy zclppW6yDHFT5GmU>u^l7)iizj?Cx%NH}e$RE!-@QbM;%I2J>WMV^a29)Ey)^FmRxw zsJieaE|u!Z*xN|#`_OIUm6 z-skV&H-ev^i12(vf^sm6tk_ihy3j`%pYgb|qLqEL-;VR%%@B!;PgucuhHP%>GByUO z=1b_dC2f|EJxELKfBc}Hx9X$3vH=ZqL>oazym-;`0j6kO`oM^*%5qN(v_pTDYbT5; zVvKJU*@v7Ex9=cjeVyN0TUc3b3m)S~ztWbW9|QQdw4pFlu=5=lIa*X+J1KN}+>Y@uc9qY6C#(cY(urp2fNIN}r}IY7eAEh8WbPja|xWewBV5^^r5mQNbdo&^7VV=$avrwuf0m>3l6Cd zd}n7?iVtb4H_*qnt0Tj9n|B%tCxdkacCRPnLE>VAB{QLL6Si_Sf^nm5g^&3P|Eg>B z8`7fUkhN4(>D4y_@yxg833};HjDotF)uXX3x|#uSF4M>5YA4Z zAGh{DiBtD0C4Wc0wEKb%8gF|XKgTZ%0zBhS*4W3%Lgl>ttt=2&QAaelfn5sZ60yRper>wZ?cbs~x zF#gK|vU}{L_aP;~*Ts8iu(q=U4oScIDd5_R_rv_=U^u&^#y9hfik^j(DjfE;4{+^xlmC zhsS=d)6>QFvzTfByzJ>S$A!N-=EN=q(yvPMVb4S7@B*I2|MedGg|CzeZ_<<{JkP=- zEf7}mTe>^fC0XT_+U$vGFK@lvjHj$gRI>UwAKG<^#`Rdxp)e@dnSzUbnwN!ZJPYC|my0#ZWK| z1E<^%*webZ{g}?blpk>J4^%$;5AV$??e#ldytuoy)oxzB+ zmMeaW|s$TJp-6nq6?82w|XVdDrHijfnk8h5fJ~>uj ztNnq>)%?m>Y(a1T?95s%2x&8*t#j*vODV@&=F#?O53@Kxo3c@y;Np(oGe2~J7@D|Y z;PTZ=?Vay@ce{D}c3VQ{U37c?v!8A+eEP+9|K1&BYN5UHjc;s^KJs|Gdi5&2-)-;v z!28=zfAHtp{@zw->!)u&_tGm_4|x0A-_n-XH`-xuSXWzdBaqi2fomZ6_|*a~m5P7)PGa#(^L- z^i&czP$(A`OTLV2Wx<8Igh<8G$PnmhJZnO%kkW3!3#;#t$c z9-a~(l@X667hDS`=^pDuo&#j;P@e}J-JrC0=n>e|jZOAhdVAub3$V`LM0RPn!L>P}AmnyF5D8q9uiFRG^m%RMOePHBTcEkB2p=nbqkM{SR z_R(>EncJ35t@BwdJ(*N~@iL!9DxBkDQ0|3q{n_jRFBay|{WLM~P&dAyVxJ@PpIbAt+&&}ob zU)plu*tIIlOTO^CM4a-eT^^(FXk_ah$AQZ8Hx) z*}n8^zpd@wczt{6Xa(I}qYh-5baAr`B?8D#GRkKmNtZ=fGY2nLCoi?lv#mDw(vP>5 z&;EG(zy0oC-)>($YxiFHG-Gz+v26?Zr6)(b*sLS$s4qfMKYhBU!cz~lCmGx0e??~H zHF2l-BW?ij^VJh<+s5Ua?XUf{A83E_&;Fln6+|YJ)wa5E8C(~!F+127?DQOR1(63_Ce@&n9VHFXXODn@kxAvxdT1z z+u*E6Je>20pP*l@EcCvrf>MIKxB*icvn{rXeA9Uavk8k}lLObFE)nEY{oH`-2B(Ge z`PwRVOSH--rQPV9<+U%)Q3lqT8vwrLx%Z>w?-HQ4DUAsu#vr!~06(8KG45CWH4yGfa+jNLeCd?b+y8Xx7@h2Lr!E~(HwXZh=W;%)LF#V0&JZG|G#Bpjw(r4UL|gDKN2YsXT?*onOmHE^f^!O@uqvot(pN?M=5xG*u`FYt>cw4L%7 zg36!*xZvlh03PvoK&R=wzJNwa7#d>px*J-CZEoJku1hD?84k7I`7M8}ecny6jqTt5!+!!K z7iyqGC+{SOZ&RwAGt`w60pM}kJF_E|fv#fljIdU)jnb(Li*1J|BTyPS#Mqta6qyGo zNrqP;RqLHH2!O1$Q$C?WF;TZNh!eeTZ<$a>)JXAgADf?4x z@aOXg!ivlhKf=gwIKLK8WmScXFv!z40F3-j!d3e!7@y}NGS5Yyah*w zdry1dH-?1Kn_e{XUb-p^(zQ}v6GWPnL4SjbVx3Pla3&A>cOFPrx1FIGveHA!5jW%s zz_5+-?!coh!CO3gb@+67uG;F}3Fp|~{+zds`Wpo2bn*Vx&$*_vgGR5?$8(uTj~aQb zX9uv6zdR8VFHN|bmyoh_-U-k$RZU^2+et~#o>zL&Kw9Lj866m~D!K@>?#JMJ;6Du) z=$f>gEZ_sUCG@^YJYN$k|Ij1o2QCvgP>B!DZ6+c|5M1YdnyH%UsEMI z(i|y>&N&I@4&p49;cS?9WvK>Bwk3TEF#Hykev35v3G!9-Z5ICptoZ`q|JXSV;of=nc5qmYiHfQrc*=SqWIqwD*+$V;qlrQD$6_ z43CJKmhIOO@;u#uQ*tHWDqrE_)Pmxbx7xu3FD@E8vg*= zGw`fQfE}{JTgsco`JAzZZ3Id1?Ak(m?6Jq%<;^SY=^HoN7d`Vx``5nst?iM`$J&Q~ z@`LTupZ;`voB`eIzw8}t=Myh5Kx5zpUk-PHzvzx_ki&1vH{Lrw%Atd=@Ii-?in6dA z*_9TcM*cbxinic~D({vbd}(LuZ?#i+P%8H$4BnpMn>yiZxlHg7=eN`Hwp|a>&?D=V z_wY`FDA!PA_!@@MPo+;}(4aqK-eiY(bBj)TGl;)-tgn{}>(HzSnkMdZ`^gxYuSG$SvT9t|K>=9NU;LNE_h(@gY;EGX&yqG=2xmS16n(O;^sCCG<-uhFy<=aIYv1mP?F5Eb zgGW9)NwTmYZD}+1_1N!-yC8s_nxnt-$id>NdI5gx5G-z7 zCKfKhj|>vTZ^VzQycs6|xPTW702Daj;i#}+(58;QT$hkuBS&Uy%G= z4xHfc6CpIE^V03!`3Y3#^b7v|SzZSZc}{k#sB3^DUdpyO zcV2_8sO$eu7gH4zHvkySUNjzbDuR#v zU>v<0_$Y!tX9G^60H*$MO@8QL*;fVAd%L|CR&X`HY07GRjUGoR+Q5tXveD)-WW$ib z@fVU}+IVYIyBOH`>$Y7Tbiy>Ujm{eEgH4;bpeX%__G)A7UbJr|vt_3;Nz`d#;3?O< zfoy#ppW0yiO9SXk+azuVpHI90u3%KUxJ#RouaUP4W8~Rp4*zg$1c_jI(T)g}|i!0sn-jC{&{)mH$#HEEijYu!syzuO<5O&I|3X?@uJ;y3sz zGrdu1ukbdGFYwO-! ze2KlbhU>aWnf3Mc_<38qd*~TG@-ezgTlVOqfEM1~c;t!p#O{}1g>J7|~i z`&?*oviJyEZCvsbSZA5omCp~~dIZkRWkc;(|HiLwk3I2B#PQRgc|Ps=AHV;vw)emP zC#XAU#9j2*WBDZoRu~Vtv376QO<#?1(jvI_@q@Af?VN)R|Fl&JWF!o^wA|2LMX}Vf z%9cDrbA1(VYYa_PPfK4I*eR5@G&g_=M-bAp+p$saG<3ZicB$^k(>E|0ANrHe>!5*I z-1P0S;es&DvD?er`|X+AH`+Vi^Op9R&wM8HkHYZ{X|Kxj;HP-`T*3Q+i|v$#B_jr9 zE%;HHdf~GR?S|1e8aLpdPRjozchO0mjoxEP9p3=UaY@Fl5b9!_!3~VgHFoM-g>4#;5vSpTkyo#%kAd%o1w=8#s&B8-)Y+qwlZJ+*0;X7ef3vHq~`sdn-AS*v882mJc&z68~pr;i|Wpy82qf^5wz5QJKEl3Xy)gD_j2U?-Fx@) z=DqEqM(odg7T#UxJDzybq%RiY@6vqR<+wn+V&5Vgv{~F2Xq$S}#|oYl;27A?wi@)? zl#P7aWDfbk=K3bTxJw5&rFfq_plzL0txH9F(T}uuu3o&rp4E7`+G{!6Jb?x->}OFo z^3I5cd;0|kxo+lnoZ{k`eS_&)D9gC1V1?fD!1hsv_u^xybsqHR%+JN}?9b*>RMCMP zx!MNHyXhtA1l7xX#mG+#=8a=A3uEZ9!F*I1x{gn6=|3co>npU;PvGPeGxQfFr^2?P zqF>1tkp%JgUSBPF=ws!haECru*H-2Ose|&(9q8Ei?n}GhI zMd*gK=F`Z;tAgr|Yklzt zbM8{)%qfQXtc8?qrxmdsPI2pt-X4GQY1*-n)5qn9n*g+n&J*c3 zxdC7oAI`e6Y3oFrTz#TARnV5`KPWIVz2rCeN(g*LVd~rY0-e6E_Dece8G_HfjVae* zPwP-$iTA{w>hp$M|6(*M{MsT2HQM{se>SH#iniRF!F;j!lPbD5{Rj zJWO)OW6H@eO#*bXb9J}PtsBp_{o{r9NB`YF(|+PZFSmPlWF8I!_~aYK7{$#snSR#Z z@wUg?o8R(8+rdCgPM|Y*NXtv>kG608hHq)j$;ww zjU1q0?oL$R>eo9+=|vd00w(oMKT+W3k>FVTqoy=NSsJ{mQ0++>aFY}k90p{RH>6yY zTzP^9<~5%R>tF_07mpCU7%tWs(JbUDSX4a{21a_C4nO2kh29S9Hon6v83leeGH~w^ zg7QROrcCL@pi>4D9p6#bVA*P)M-NvW3EdcJ2b{1^8u`*iTqr+6j$oBWQ~Jp(XrE4# z=uyvw;Ip-y#p$ea10VIc?$0NnhEY@wDy`b;eU&ZTtIg-}BN(r|6ujQf9&{i_NP5pV zg7;!CubGma^GT^<= zJMp3`T?YLO#aC$tUW13)3vS~+k+o<_dgx(!62)J5-k58^Js8^Jy(M<4ZYI?;L) zuXC#&8LSKZ(k7}14#MY>IPnIL&;}>9+TF{_58-1^LnHZS`whXT`tK2j{^>M?AAv7F zs(ypp{#TzSgp^(p$F@Hr!OYm1yJjm;Mf{Jp)WP~ zy)r`$3dBcuT1V5_e(MLqLTe8Hti zE&lv1awZJfNc+`i>8?ZAX-3Iu{=pA_7en}RVFN&cEII_A3;a8;yUT5_fABB^@Sev$ z*FdhoN)GS}AG`nL&T0o{d-x#rNd(fcRdnHPpEAClFdFd#o#Z(}*2~YqFGQ!7HmWP zypBEaGAm@-L*_?o@H$#u$^cd`c+B2E#5=xz-1g#cBCc@d=o#q#`wWE zES-E*AC;`|yyGoi+Qzwlg3n35!-;K#rx1t%JM}0F)F}=Q-NwY6cGUWm)i$Y5P+7(h z+m`l{E?U%Monygo*r(b&2tN9MomUk)ZI!{!>x=zh(t40?E z`fd4JxY(jmJ}}MS;|w_+;KB(!$!q;4Do?Rny?#QP*UL&5IWg%62Gri5+&P8&agW}38A^sv&F>NKb}jSVBI&$NzeM8Ojr zB4hl-H?n17$KgV|#-jZBBov|h59(9Vsqji&5Gm~@T6!gP_`#hpPEa@xz5XuWquBq~ zaDx5D&Q9}n+(!Ks3fAD$sJp^>`zn+lB+ll|CGoGu%e3U{{f}W>4OM?V%d)A@E}h_w z^%Oq7v0hL^-zjY=pL~`*hnr1x(Srab|Da#b)A@BiA7LA4bCN5~z92ADzROnl>aX;{ z$wPgKQO-EKB4x@cGxK`F&=KQ>obEBdgyg{o1XlPtT9pFX01!CD`MDaa1$B?OcTUO4 zbY=NGU`xs4+vdLczlrbc2138N?Bdq$?rz)P*~ZV_s_)nLIWa$Ha|~SjyPf^xHovme z=H?gLy}g6By0O{5=-pr1E?vFZe0luj$1sn;NgdFx3%@9 zwy|-kt*>pi=bm}8eakof+wD!i>`R5xw(oov9?!Q=e*9z2&w&1Kf9vnI@B0gXu`Pqg z@(O^Ie*{39DlaSe-ERHZhrUi^$LBq9qS(b{@P;O14lcCnK|5)HEX3zFH2S4M-K=tz z?4Mqs%lCAH5_AlmTmOU}bt}X4u4Nk`d}%LbykwlM92eb3m~Csxul<2a(=cF{+?W4C95R;n zkd}0W!9{{GUNvptu826^R+h4X#5CtQsmn!`;2r&{NqmF%#ydZ*UV9%!!+*&)ouwBz zr!3Ffc;Q)3@)A-WIN~7Xl!1eTded7x745@)H{{C~D7&=WuH3lXHrCeL<@M{C-*;os z&Vwz+!hYTld0r^_Ut&D=hBrOau3ov`KJ}?jw;%YyA8H@@#K&^X|N2TDll^61{-wzD zqiq*kpp`kpJ~;P1zF+Rbp{PX{xpARVT@_z9aJV3TIN6U~QLe1l`CqS&!))I;`7mvj z+diwrmH>|9*7ykAySTN#hut}@epX-6Bimi57Kf@Axi~s$``D*i5N+5TzjrUsr5o|^ z-u!MTP;aZU-cOOg{F}Xg=MTOZn$t7<3ixBlr)Il8cdkRnMUs%F7o+uyv1Qg`kt7|+NL#F>Boxj6h2>c#|Q!-?o z)TYf)B&o|^>4U@Ig>Z)-Hu}D! z`DW+d5TAkauQoihk2q=f-?>{$W%(+;nO;UEL#r)Q83(xNZDK#QZj!@Q)sv9{VCedKmqU0sfS z^v(APvbnpnLtYBOoGf}`NBxA4I^qccD~rgAt!3P)$L{zFb&hSb`xDQJ{G)v2f`85j zDIcbJy0!7jXEst$KJp3dMcQK8v6~T&-N*nA^OLr^w$v_NTWdFNUcqLpw>e~ad3C8> zxpo;}VWllBF15v#<=9YP0+NQ=5F?f6?JuZ%ZlxU_QEqd#{p`;_-~Rpo=+Cwv|Jge- zuC>KYkg*YGw4?kC!|BG=*4D4KU-pgP(GDMZTl?Jp3O>t5=euj3j@hNTG6?4+3p4xeX#WAvyRpAZ z(83M=_D=@sq8xbwKwgdlT_sQ5^#p*~rPX#cS%Mxn+F$vf-rN4nU--ecx#{Q1p~>PZ z^0JH#laF`P_uE2Gbb0#e*R{pvrFMvYJMjhLZoibhgWbLM;K74@8bBUJC!BvF==X@v z=>N4DCI8~d72T<46Ri!tayj?Ep$l|#omM_)e!)9s2-@Y^g;-N!9^+~Qx)GJ;I&4jM zYcr4~@G!r9&9IT?4DFN$pJop6@7&~efFI=sfZ6u&BWLaJ{BM7$&8_XUPki*Jpw&VA zCG~xLk|A>}GkcWpV{s@oLRkUr6*LqN>aI2D!m~C`JNHx@ut(dnu^w7pj8o6moC;-6 zg1`ERy`G>(%rnaC<^bOc=P?eBND99Q z4NMTS0W#8#sRRi;!%#b!a}0B&gTj%WI0z*Kk_k>+>jY}f(|F1`c|azL3mZaN-Uj$Q z=uH^R2=Qc)6jJ!@w0eiCK)9&j#P<$P)C4EN<4Y{3Lb({2R!I`r)7%sT#T~Wax4Cz#` zLUIEc8NV!im}k&K z8e`1Q+^I#y4zxzv2-D76#CmY2mZ5N!+U0$DyDM%#!GbU!NeHW6R>>Uo4g#_8Kv0FEA<`6lmx3GSU9)?3XS z1{C~gYwc>}N@B|S4bltwMOE4LSzG6(CuKKn>+O(MwQ-3cVu3%}L9lQSF;>RyL8XgC<(Y2gHr`E7>|{y5TH zK3hb&5Y}|DAf0+S8|9RAnkoyFQwHR_G)|QrkKojU_xUcS5Au!sk)8<=U{PDzCmxA6 zU$T_!7^*%i5oS#sB#_ba)BHU;@Uq(yI)Pl$;~mMnIwEkE0sT}isYjS-KEF|yc?(~E z<`kXeqa0(8#D`A=cOuhs=iWT@;7QLgK^P>LJeHkzXa9M;I>NvdxDy5q2Cq8QI&wK~ z4oHPBbz>vkK;g3kFn5xBoL-fwd{(6ltCMlypu=L0N1JU|FKxDK*RHiI8|!UteWiW= z^DngP*RQsBzVof^bDw>wt*+2-gQJ7hjg^hIy6mLpLf&F>$Fmbk!yj=!tHJs_?$lLO z`vjj$etSDZyP}QpCJ+4?=_Ln*5{1O4Ej@aK#L3t2iDxcDk4Bu=;K%jiz3msggHS_c zmk?ioH0v^7(OQ|RzSs$3Rmx}mnce*zQt|iZmoi8bj2PH%<;~Jk#;d-O;^$Y}E>r4X zf6v?7pZsHgw6$CKKJ$z1Q~&Ho+ed!teeEm1>1$hiX_vb3Th`ax_RFt$kXvB-#>R}N zO3iq5Abl@=z>q!LW=**gcgO%&{-~qJN6>ce1V5^ddo|}AgUqwJnrxaFAzdfNI7~;Zo8mEt-OzstO@d*VC+HA_h;|xwv|I$rxLvAeN z1h+bJgdQzkzTVc~@WzINXou2H#7WLMc|aS*%{RR*cceT}w|%ejp2?iE(go3oRv)4x zJ9pbL1G&?!`)#tdokL(fnogNc`)nypR7TPdqf;)zQW^glu=Ti--&flY^LWkvZktVr zoT9siN-J7{82ajLOwVYbUzDVQMev$?`P}o3KB>~`YF{>j&q#WuQ?Bw=+0^S5Sc%s% z1yNyEpJa2=cH*aHp{4RK&!mlT(sy=yKj5ky*;vxcjrb`X!q7GAa~$Haa{ZaHo-Bn%;x= z7oRT%+g9T(t{_-iRbkry5{j}HLJWxZ-xOu6%30)`UL`N)uzuqQjKCu~x|4o-;0_*x z4}_v&f2Kau0!u~`(l-qUh%nk&`Of>-i}Cg!N$)W1U&SXaOap4NQei26nU5Tn4P1Of zV${*#n-dI32fxZ|odr8>msG~v7GZR}vSAB3oAt=>3;3Du)%r*jGBp+`)T8tM#xnk- zp^MPcT>D-y3SVWTU<+^fJ>HkDI0sNP9Q8V$H+{VI7QN|d2IWM3HIMa6woIUk2XdWp zYnMao@|U_OtLO|`s2dcV!ZZxFIL4sR6o&b|pFW#R8O1C`dVQ515L}D1_9e<&Z4|Yw zmaI0;xPf@yMhHhf+Mf!s1F;S(>-udQr-8Ukcu}E?kET1K8FE}SB8YP9_TVYX!^x{|x%4!s;@49?lyu z-~>l2Gru7f5B=?Q?fO?aO6Lv58~HGh?bn$hDg#gPXwZlZcCMMpl+^;Iv)kf0!i;svfDUZgyQ8 zzs+?g-cPb{zKk!WZ|IS;_wGJu#|Jz3aoC5Q2l)hm8*5ka&F5BD+KYEzX^ZQZ+Tt?4 z^}Kz1)R%)M;FSJ= z_Ln{KEj#Qdv~j`XU}v{&ZS7VfQe7ZreWGZXbL3W9@JM)O(v}NC@4N$QFU+JfbI)-FoC^+qiPM{pPRv zn)ds@`;SnEFB>Q?NA2YoK9fxa`l+A!%;(x3xc`;!|7-2fegF5j6;N4boU**WnzlL5 zC@yXQu$MlCcJT?KJ2e)Fyb*k9%amdJO6I!kQ`-9qC5r~*Lj59a&)c7Su&#>thy3uj zWO-jPhc3z}^|+ygRI`?UQ1x4P%0rjbN8W6_MgQ{5Pk-D1m6NZ~D4tE=hfT6|j^FNX z@3pr)`$T){iAURKKK<#;M^5mc^rNCPj05E5Xv>iA@$4d2>O{UxOB_5-+l;5rRXRKPVXu4&ji9m)d*2^d0S; zU;NH&0NAp2K`;Yls7I2dji2xj;DuDSrCNRbiXdh z4<6{R3|<=NJiGVD$h>A+8bQ&}LAq+sDpdLN@Y?4u9I}Z+pQZCTcp#e{9qLlXwU}@O zp1Lj&OCle%zb?>1)8(a=TD-w`3U2UK`idzl^=1&zSMF9du{$tbOeHm)f8DGk>Z5zz=_{a9O+z^PxR*NEe%h@Hu4j zWaUz8{J!jW{JwVjg)gEVK#mUijn;dSn%U3q(>pcw<-$dTP z`&2}jK@PPA%B*$y8HYBvn&0usa$8%!+WzWa|3~dl|JOg*E?@R@3eaZ`pO1dl%{a%$ zp1>`CY6HNNPd?ceS6AQ}@Fp|Sts!Ik_+ob-+|PZDkA>ln;B#)cO~0(X)pMchpB~{o z{03`c@JEO#Nk^``9}i0eMt}j@G-g$a&CTb9Y|X_f@%4g-LX3NHSQd?nP2*>(sMUn z8CUjBopC;?d3Z+iUwDCxWM){usmOyo>vt&|Kh2(XZo z@pgO=PzJd0Dqxk~x3E<;kjTQ93Zwxdvzq`kJ~~cL5O`|9viWd06mIj1n+>VmK^i&A z1|0>%YjvF5bt2Ff-+&FlI}u@mAqII!09ox&OOQaEw)#99BTk!SmMPZZO&SQ{hujeq zoO!Rw&o7Flu`06;u?nM6CvBlRCylhmS^7jk2~lu+J!&=Brcg-O3QG?aU3Em* zBDCfQTi_Ct>t6pxl;4k)&we^W!{J7dD3x+Rb@Dhc0O?hw8hnpTa_KsZ;e&^^Wke8y ziD*w13u1|vWhp$BA?um$NbY&Tr{DCe?HGRyB?a{<1zT2cP)oz)=*kr%m^O@ZTB6Rttox%CS1^~mW2UHbOzDEx{GFT0ME}vBVu#s+DF~lgN>xnlH^_d7;G!*^rA4tMhNV0iefe z>{Dh4YNv}kGR@`!X9aXm89Av2Vwg_55nQOTU!a8nBJ@-0K`F zZ`74GSYC9H}&;jXxLdFUI1$haxZOcv>Vs2!kjhbS(9I4vra=I2lW#?JoKkE~bYpG}Pm^HrLnN^(&X#!og8nIhwRDf87)9 z_kP{iwuj&SWn`MPy`7!*;UD@z%AT}0z4eRR(%K3>^I|)|DWAdr$)M0ha0vtb-N6nr z{HFyc$IWk9x^U#qYbTXlfN}t%9d-kyKHDtt9NW2o?x1dV8T7JcooB92zEV-rGs$O@ z(_Zw2L3rw#a8KPizVpC0?4cWU$|fJ$AD=RA1aZAj7&ITeGoR@sCr#SA2qbF0Dd&XP zF*Luwd)yA_LmvC$FKHXMA8j-1tL=1|0qi0J+~}5^82qmKhu)34e0wGf8seiJJ!8;v zwAYSae4!oRyVGW%ksAPlzOXM^+dBDQaU)!W?RDfe%;t?veYsk=Qdg#U zL`DW|^Ol~+rzRr%KoQH1o{HyT$@(h{Q)ROvgJZ?6q`E zywe2XQr~!?pvFrQuNxgDg;nGvA7NT{&g1`U1HcL8C(&IpQP&Aq&jb=L z_1y_`^ZBN(w2Hn_er2UjAaUUi=y{uPbqya1U%`b|7gpNw3BKJHzSvPa+&>_j{s#Bk zTW!NhKWMpuUo(sE_1TwSY)dP?)i>YfS6AEq(MfyVo8QuIKk>SDcWb8|F=jqi7r-U` z^g{Iop+%=jctf@EQ|Qn1^M@>Ix@3)bX`{v)*Xq9~X8M-?-u7M=M~?D#w2MnY&w=-PTqY+ed%?=i4iH zw%WpCycc_ZrBnJzcVilz0VI`xM)}N za78mGr$e72D{@eD1W?5T%V2+5*H`-w=J9%NXSjA{l8tTvFi)1Zcpmj?6Q#3hkdt)! zW9y@kpib~d9aE@(+B0blguqqihks+t!2}-~UOKP5Jhzm$wEf2VmwfRX+e25cw9kI_ zGbO*hPjg;&)_G{^bL^lk%-gy&bOytHnz{Jj4N{dJN3D^=OzKiu=aE6eHIUAmvSVvJ(rtr_9k2HrI%ho&+vSpq2G~T za^pPXw5J|@ygmKw>)Olr?zZ=T;C-3<)h4*fM!MEQJ#vGrhUVblCxIo8Zm8+Du<{_o z%4*(hR*9VpP*!R@ZcsFBoAz{wsdjL*chJ{+X(kQ^>7azAY!BK7g9}W`qGgn`z?CM# zga*P3{Vm&R?Y%s6?hGknDAj#6cvl-8Ek`za#~4)JxDaeeI^$2-8`Do2$FbVPfq$v zk;g};jG6tsvTxOD_mSbg0l>v>0hnezZt~Iom+xOEFiBVCz>^m&pG^g{bhIt~plHKW zpG(i7FZrG40G+DWd*CS-)+rCqWv^gKNbfxjsOnaKp#wI?P;f??wu5W8pI*io#xkyH z4S(A_u~p=E-r4yo=QGynTk|p{kX#tQw00%^_Pu-e+Y6t25u99OT5eA~{%AJ##Qp#` zCjjhjM;XOQZSiFTU%Ze$z8tdxFI~z#3Ek0G`&&?xr`ooBQbu;#UfQk9WK)7`Wu899 zFE9OOAyYb({d2CzHpV{EHa8qtesSrnt#2&1s}EgjSFWyObJuC}Nn6`oYL~8f0syvb z9=~L5E%Q0*p#F~=0NfB@e=+OP_3%B%$FIz_Pd@*0`|tkp|GWLQzx6&z41$~d0T=O_ zE1oHblf_FI{?+!5um9Gz_{J}9pW0t+$Fo<{HhY2veKGo_@R;B{lX{$&o3$3iBav-> z@1?debJ#X_KGcpr^4|8pHQXOsQ5z69To@pty`W&?nfIi}Cb zd{UrSa1mRb_r!6y5c!;4SOpJHCs=K3YuDQU^#A;!_Pu}pz3uV_9GuypymNO7EdI@z zep%`Bl}ne~6Hh#Wylljkal_(aUPdY1&?kCue>;4M4S-)`-qyJh^PLCpiw;AUM;QH+ z`D@%|x;BG&+LNHo8uy0RJX>Eqt5;Qz_hMFtsR!~zgT#5Da6x~R2W61nC>sEFc6Qnm zkIu9kw_E%B|NCEV^Xm`VM?dtF&|xPpe~meK?Utoy<_;qN!qoTC7L@Ks`-wvy$Sa0; zV5zf$@rV4X4;nIKkS;~b$~$Q13!gFB3KlffewS}recAMfF6rwa`O~tZ6#erjUOZF7 z2WEc9H{E{!?EKujaTX5^1`*Qq<6X`p!U$Dz9GXsO68R*MH-8 zwSV-ZFSJY0t9Afa@8Ki+N zoDFn+v>5#PH*xZsWF3@+m3|B;?X?}FebTvZ>v`Jj$YsGabdpWO5ax7B@b2J1r*9WS zV<&;^z-I;2Z~U$@*t0%2PH6x04Sb%@pkd*yK_wo%vY^#vQJzHj)hUVL?=>Mts<$c6 zGP)z0$r}mSk1(;G@U{$aSD#~4#4aI8-p3>WVSQ#h$lyhyWC9FqzJ z(+Y0SJOZ%l$aCR=hcrkVd;3fWZ0dm4(P2XB^0~LojYFw}d^$6=qsw5rae@+u9xJ>a zl(XqXiSAl5mD~iT&9Kc=dH|DO4PxmQeC^XuC`<$Tp4B1oEc}F%W=%u2Pdr8 e1G z&nD9*@8huaU9F^oL+Ms$BZ!|~I-%NBL6}3%Q$BJw3U|EdP$FwVq~fM9 zke9$Pm@X`38tl9m<_N!}USSy*94cece8@_*Ei@DsaMM9Y~t?kGb; zo_aeBqfO>Nzdx7nqCo*3@Io8zF1r;~h^qm4nkhYWIg9xx39UIO|$O_ydQ(eu1I z4R3o zjvxl_awLJ<-zXNHQAbUfbor8A))Qt_J9EUPS8VCoMA?$2)~~n3z4C^CQiD_Sq6f;A zlXsIt`fdD@OB-u(&Yyhhk@h$L*57R3_r2c(FDR2f>#XhV?@^z6j$gwM%C)tX#kRcA zH}(;tj?iQ}d?zw&Ds*)O-3f8oRJ`pQE4 z6aV3V-oEWSepkD4`9@o~a;Y8Nzt@)0+u#h2S)`Jud2N3#{iycR%?N(a-ihXe{Tiqo zF}U%2t8HPa&BL#y)k_TemfPIgYFluSxVfC~u+A*`W(#>3;CalYHrxsCEMz+XrkyiK z9$Cnhd!LWDAGG7G-L^#DS!n_~+ApazXgqULdO3XtC;QD4RFxd$4Lyzcew_7{C|Yu6Z{I;nvp?dMIhXdgfP+y)uyzT`rA0!yB$NE@bI z*?Z7twqIxDeEhAbK9kcIZ!PTYKC2r46=Q>rjdXxRPhX z@b?VLn5RVmp?DX2*m(!6-qM2KpJK;*J<(O`BMkHkf71GWt|^9nU0&!@*KTMNiVl#FF_;Ip|_Oajm+jg7$i( zY0AqWBQq6TS8WiBup?-$&AVl>6NEw=lp|1zjEw;!U!|3vCv+I%W$4u;6s&W3e{HA|mCtrwESpsApL`ZR^(d3lvEW_I+#!|l zy(W+sc^dxjR8L=E!Z2RGnK|jPD-3nc7Vehnkv4 z{SWwN`=w#*V&`LgUjEO^2l_-|%+gNV<~O=0`-fSu_f&u^94XJVe;%G?eiQw$jGr)# z{gS+fz_%Z-WB)Ucsm^1E#Z5YGA8fT(c3*0L=L3JgZQ%DF%e)~84 zTkZS){GS6(wdwTeL3?obUi;A>{n7TRmp;?3t*FP`}M-Htjp>D!mmWPf+kj?jNlMAu5L3?;2xl_%3|8)@ocWUOaMdPYNH zifuvG)b#jBJ#)aQ)OZti|sG<8NSoN zcGc%Iu+$wpukr?*WJTL)q)lK$0EUsxE9fEf^^AEfcg5FlK*KejsXKl`ea5B|<`Bk^ zqRI15;!}&00Lu%E$<)L-BVJT`{Pwi?UN;IEr;@Um$lJL1e{(I_pyDgT$FwDkwsI}Q zd05ofm2z3!B&Tky8<;85laEg!j+CY^o*^gZ41rcB@xTYhhNViex0mw5zId)o^(F_n z_~CD*E}=(xPhQh%3Z0n0C7o}5Pb_0=pN1i*r|J?{ue=!xxZN~6n0E0d`KuJ^ujn3i zBMs&9_CK_?q`Pd$(eOd3a`B88L$R0W82ZTD>4~ezL-y7!P#}L`-Vcj-K?KUYq(2{wCN6$XF&5bAdTJ;$qDbMf= zy09PMLC2^&ZI@48qBmT5F}K}bieu}i=+ffqm0jC2KFLkFtiSnt{+vvK+@+kz27iuI z0WdGeComJ8b@*b)M^|*y%L=TUed_^cVR_+=jw7SviVSTU0US@36xId zG;#w#Q@#o7g@USAOkx`n`=6mj%NEbdj~EVH(OEnlKmT8TFMjC9-VtldYKy8H(&6a-{ZU^!&=TCDc6FCL-$$UruKXGg!i>?c$=F-TYH%1@$GSUq96h5>}v`BF zkWJCfLp;Vokmb=H(0A^Yi27qBE9g1rbZDc=8`_4`ve4904+~#z+I89(5zY9G?piZ7n_X7C7m=2>`=YP6+TZC<>$cIASIqqr-inm9Y8^ zw($aw*lofHwW2sS6+__&0Kv?#aDu{=1rXyBIBQ`d_gs-R`5gBd!1PGL&;urjd8890 zJ%c101Tnzh%GCgBBt>D@4V?(PK;{+})X9h^&1EH9ioIxK|)r06#e_|D4TNdTQX|T&;aQ>{?Gp-zV@%ZLDYy2HUOyZMMqsJPtd6|gI3kf z3(s_N>?r2DFC3NPUFNE<8wm{-=bm1|P>~6W;i<|Fk6>TD_bx_d;~6J$Z+l1Az7-aI zNWL(Kjq%PMkRfAaoLE}{^jDk}Z-%EMAPmZjoU-BIWMm$t6U_?sea%ODS_1YdX)D1T zl~?L$ywW2-(b-x1GTgZQT$~v3%Hl_ZOF3hx_-p+C95Bqjl9sZL)P6t11^}SyzuQgWS#4KI2Rfs8!u50bSbqdX zZvb#+ol3DbqLOJ^D}lq8H$`5pBMfVnsrGQX8bf{p5%*TDX)}24nr?g zjvat0;c=Ipwc3jaNcoek&`kQNaPYXiieL1ZuQPSQq{bKLq)ZHQ?`Pr5bcwIZl^yk< zsW5&MU1@E%4dBiRoC=?QpE92gf9i8W)f3?qA6y5)SnVC2cjGwW7&|UgZOCm)`jh<4 zAnT9r$urK65RuIkOlZq~GXdTvAM4wb#*0{q=C;`1F>X0E$Ye}^>4#bLXL7%m!%)C@ z4=a9^Wn04=Sfwn&`6gUyhx69ACE$Oy-J&O?Ds<4_=tk{V`IX+~Av@8<+ycO$g0@q4 zn0t^xx!&+ZqV#9j0Kjox4?p-oy!~x&iI05f1M&DH56c0To@kK8XKY65J9(po&1xh4 zm@l9|liMPkW|!rVCmAKp^!)&~fq48FAiuX+_-T1g@is4?703rCD)3x!c|wY^3IlLh zJrL9V^v--Xp#UsO(PxeQAw8ioRc@}CX-uN%hXH>#lf`iHvzE`KS-Zorb4StZ4x**{ z;{K0(EWYwfKR=%Jm%b|E)T!9|@IA4t0opr%_qXGd zANXi|{WrZXj^A{B^tM=Jmra-M&oDsL0Cm_OSni|J&JTx!7<9XmXD{}x52}p@l`~82 zm|t0r*7}KPE-gfRX*Fis&6r~Wou39*az0f($w9u!OxcA$Zu+8qmdRTt#QGZS4|`tx zKk9X3zq=bps^6UIE=~4gw+GASgKE!&o=`C10oN*j%y(VUH-~{+c|Bpn(S&l2ndo2= zsI#~nyQL?tP zgFWFYR%CkF5&chm@KecIWbp|q<}0!u#le0~PN7TYrTGUgJz755zNKh>My_xh86}P^ zA9@+{nFi$H&1$9o*b3@~O@S__NghMDaag&tz7>c!4(f>yCAw{7N*q4`yG%7}c@8hQ zARj(tUBFlRd*XP8Gld@P-&2A7*=3eI)E=SJuZk|0i3})@I=~mmK#vMuKkDjoWsfqv z@Kxi6MHTIqR||B^5%FCps-WO(v!5-$)co{1+tFnJa} zC3Rf{R!8_RZON3R04iTIVczFe|E~!*c$_3qy~be%bQn6hN0(HJ#?_Qbmjn4`yX;o_ zp-=$@sgK}7k0)}@a*0DM*bK`MewWEb@>}l(DqT9La@;n^5AF$lWKAAcyD7}1lWkOy zeokHH2adE4NjG??o60;?KLp+)*BRksF<{J1>w?nOHdD&6?6OLN8_O4jCCwB51KfkM;v3Uw z$o%7WQ=rn~#TSPcqQ)|r#sMV%Dx}Z+R8X4n27m;&L-VELPspCQ{shtoSG}CzSLNsS zJY~Dab+?{FL#4Oc$q8L~|CD@F^)d~@P5r$=S#=sI&A7i7$%NVLkVXu&4TBbi^PHs@ zoN7-Vhcbr5sF6;lIj-kdMMAFFEeE(`>1PnRw*k2V!?`JBE9E{)Fqa_0wmgFM|6l7<V zzwf<+{(PC+6>Tc*X7XG-scwPvu#U&gVDwT1GwBU*q%hBvmCqc7gL0b&HtR5l$#|FVz;{&!+nnA--|cD^Ucwh z<+%RL>DU?W=-2gv7T-w!^|$_796xts>|QzVH`O;cx1zVdtuaS0mfMSQ`O?Lhm+yMd z`#%`}=m-9FZ0(B2lGo|>vSis3ZwB&<2iA|eK#eVB!HPbo!{WB=WxppsTK>}k4FOw# z{^`~~@rEmq?e$uyaVk)lc~EAc7%1bI%11EIC0Zc8m( z>b|AayZRc~+SI`evyBx@Z<4R^bcA(0Jl-@a`7o{_SET_QKE*;|r14Mbc{ARhNmDtd zO+WggwCj_y>wGppk9*bUf;$%qa|4fUo`4g+XpfKr&%FSycETRR?0D~n!u_0R^0~MsC#{K(j2~%3ELi4d z3l{a%ZZr;rjy%47&foop{w6OB%I0tagBLkwa{a+RJ~m8p&W-o!J79;Z-oe-8DOKZ> z7;Wd96CPgVqtfKd#gmj5_i2YG{x5OdGv@WUT6VDd9aZKNU6-5`$H)+VGLJwc_4Z33 zDxXgt?u#Dk+v~~y5uNx;;DpzNW9L(LNfY&?oIyX!#rH=?dC87+v}+c)IVk}7d2FC? z^5EBl6CUi#zHv`&dy>eC9O zm(&HOfYYa8s4qGi(yuWVRVGZT#=rDd*iklm&@R~!0Uj_j$3Ez*PD;gw9rpIq9`Wyx z$&C6lpSp@)Cw$oP+{8hLAbO63U-jl5y&H{aJLJoDga5~ThWixZh1TSIwlT*^n)J1t zIIw@X=Z!W@vrvZXtMuSUFO8ErD;MQj2BH-ok^1;lHzns2gV0Pl0A(R#EH`UKg^V9aA94m=`z})V}D$ zsVjLo!5gx?z%SZCNxQ=HV$RENaZ3-pAbr>u#YfQ$>5KG{e$eqMl)6E?=gErvXOGwb z5arwDs<8YQ%UZrb*7LwR&0;nAIRW7KiPboJ!wKaPUGr+g*p$U)EG{p^!cyC(YIOAM zlXyjw*S17av)$1n8y@-bjeNCj*_+4DZ^m2S_OAG$fAh<#5j1ub-m?Cvx)=`k4F^s2 zuSd>uKR-5#)G|P^rC~3Ngr98#O_M|$WGZTBiRSqt0KJj(sQDNtI;{yjpM_I zV)vf6#IvqDitqUP*NDHH-s~6qd!m+;{Ca*lqtkB6?uw(*0|UAWP0}xEu=LB^iXe`$?&1 z)a)X2@r_8>NgR9y$j`OtJkL@r-4uIEV&;^~Ge8)qJ~Ig4kz_4i&o}pDZ?_k_y`8xE z#<@6o@*v*!*8do@3p??NkNuhawr;c}XX=7oFsSHR&tFqV(d@Z&=A$VPiKX3&?=#!a_BW1OeG82c3vB013<1jxxFc?@hd&} z2;=9M;tyxuJM;S2oxNK|;sqeB46KF+xd{X?z4+eZ308X24jp5odw@R@;3}r8R;?N5#32w?xGQ1DoA~}X8{d^ zA*LL8V}gW(D=d03NhN*9ehXFLWW!STvh;}S5=08R#18nJ*kb4q>v=sMCtC2r(SV?Oeh z$5MauSoko2;|cks4MXMV5=MFGHyb%BJ@pFI+i@7ij>}XU!&4_f#H}g|%NwY&d&<3a zf{`+)MYA3XzBE1nFzm&lQc$3tw3;+cSVxzaoEr=7$GN?&zlVgR*KglH01s1F{PV1`|e(tp<- zZ4h(dGv({Ac_kFanQ2It@&LGU9Fysw;(#*Od)wv0_{@`hARc}J<_}Q#VBM(l5SSN+ z%mDaH@9E@%TcypDvI691I-Qm{@ML{_FZtO<7ke?` zk$4K8+OEaVio?JLAM*r9jW%E8TEI!Uitb7$fFAf6`fHr-8SqS}13vwk^01RA0y^(t zMTo}2_p$tx#*<8{q3d7d0}9t09v1K|eIVerR}oJ^p?3_WJRs{pzsWL#B|5P z{`tA+FH9gc=_~!&cPpF;7Uf5Fp6F7)G+>}K%mE(h8XT1+=a#;3Wo0R@zu`=rIC(r; z!nM$zkyAK~H~+@3#V`NLe~I`0+1>tDsN1i8e0z{h#0w$|Cyyi1pb+0>9tTIf_R*1q z$?q9Ebt0IFCG)9#!+9)L^cnq{lG8OXCC_Zb*u9Berru08)P;W_`3V)s0AWC$zXBm< zn=mcn5qYcKarpv&x`-?Y)sXg(NBTbRTGw?bUJk?u?3#j{o{fF0OP%Oxl)=Q$qI|!m z`olZ#x+VVkUwvK7pWleL{QR%PyaqZi`TS?Z7yX^T5wUh%^q+XlZ`0rNzI)r zZ~unZ#*1F|`LTHVoau#s5@QT^cfHY)HzE2x$+Rnfs<$VeGI4wm^PQz=tt`g!x$9!- zoP4O&^=PaglYnF!>{z1T@(Xa5c|7F&g&d*mZ*w6qqz< z@@*&a3%QO4ec9MKZvr?_8+r87POO}}KJIw=OWCp=v7&)0ZC|+ECY6Rdxy?xjs_f~T z(oV~^xXmG7h0Tj@-r|q~k9Ibrzqu15XxUM{@Z&~oG!?!>wHwu`A)Zi2vq;ZuxS%c2 z)_wXG?UpjpG1U|ES|=$y0V1uPV_D|1TRVC8^zblY^;$Ng3?cWMW56xOVfJx^ z(Pk%g#rX=&hqN#81m2P-H9z1J*&=L+95xh`0C&!+#(OfL@-_v7rKo!pU2}o+l}F=dC*PNze1GrmDu< zb4&VlqP4i}O^MiY-jHW*3H?Oh*4I{Jp`kQtUweDqc>K`^V%Xn{ovT~1art7jCHG;s z@6T1@$j$Ki8kK>LuX6OT>N(gKKGAYzYco#WcrHHgB`=9RRb(h;?a5zYK8yNsd_68M zJ~+Iqa-zPr53+XHyVN7veQ7h;q9Pj>cSieNCB|oMcp??1)9&f_M)aq`Brcmq=Jz<& zpug9Xk0iZC2SzGIWze2kU?v@VkvQ3um!alW6en-8fU&o;ZQSpC_wUAiJD-YMj@=x$ zKkN2rcG~ej{^(tC!wqL+VPPSD`WJsTZhXcw;y)0 z<1c>I*NX)8e*#{ET3%T6iJDs~r$3ZG+~#20eSemrrEywUJ}h&QOAEXe%gLOB7)k%p z1?(EWCmRU4cheAF*ay3qRzp(>2Wp#_H>W@7TWJTD1=qYh6|k{D#XAiaRfVa3^;)P1 zo|1VA&;(MImj^8mFD6RQ;qQr)>v7`5@mN~y1aFx4Go? z;ZXWm&581&pS<}A&6b&h*@)vZjAPad@s5$5X440Df_nI#af=E9kH4v}IL4`@=hLvv zYvX2ck_BV@q)+x{P8vN>`xXAfgJCq9Efv-Ax&fbIgPw(jmAq;kcw+sy7vXq5efnhV z?x-xa!=>eAFV-TLMw_>ju{AB(8Sv$s+teHWG+VSu_^mOCY=&-=o}-rJB|fP>w0n`{ z^nROI;NhOyGI*$0vwS+u{3eg|HlNepCXTFU1$s%IVJo~vZz-DLPt|8k_gE!$5BtgB z^twe4$OrEMPC3C>;oxrUXkW5rK8jBPiA^ zsp>B>X5qbj5kO;Tc{0Ms27WdMP)*)`r|*JaUgT!{FSP(qyEPw+?d6!~d+9JpWw4;% zYUembjYB~ySAA4nTvsB+5wFS$r(afW)@=A9CuiLi|FB z67bTB>w_&29%!|kRo|5JtZ|Z;_P}NlV5aB?bOPMs)}}4t#D#`8$tc8fA^*JE8f^zX zLt*FCr?(eZRI3>;?5be5|KsY%;SXzAOR|^9kBuTMIMNPTi)-K;(BOd~vY{l^S@uAd zRB5DBswp(h3$N!w+-TL-6hAK-Tb@Jb+v+T4X1nB*c+^WJ2yxbx4FK4u85Po;KaBO` zt8v4Pr$vk8D%*if=Q=a;OWM(vZ-LL!Y|92nK5T@*Z{bu&P5`i-vQg-W53()uop|uE z%ki#1`EY#SKl>S#CK^SHPXguz3jsREhNsayD{%91(_i??xan129ghv$vBL@Eh)Fo9 z2X@4t?2taWFCZ^W5MJy9CjdlmBNk?MV`cCD*#6Yp;*Rw}e8)GuGLE%0n%KS)a|gT8 zVqRf?C-%GBv8aBE4FE&w4!jYUt*^!_|7#@MFi^Ya^BD9UYyfC3tw*mfnifvRfBA*q zjeq?U?~G$>NK!SzkWfV3hp{wqR>>JG%6h zsw?s6t>;w`V1s57~eAgXdpm|YnnWzS;5?++(j3F^!EVSgEIay zkXQV!^G!C&l;y%w=j-?*LAK#Y{9pq>gHM!qW4F5%H=Ub}lV=X%x8L%gVs?Q~0KCti z0AOQ{KY>wlQ~y~$83OJcfayn{sN(2a^;g<*+K4~9Sp0~rYvRta zjZ$OxJO@D);RyYf+Kl=%pD6EsQf-?nbrPNVnR)8xY@UCze9#Y(gZ#vX_s)FN>#x6i zW_IQUnGDqFP?|^$7EoaB9YRJi93xW)$ZB?GaAh*aht#~m%5EW*t44@Jl}<4m1UPO4 z2c+Gi^Ov#GGrNZ|sBNX?N@Z|J>5M6bd%#K~CV22hMOgsU^DF~N1YS-k;5+j$wN9*= zY2YdgCL9Su7z5z3L+}}X`OVb4$DoWA){p|vv|_G0O?Uw9@Bu@Q+kirlj&hjrW8$F> zw&pnMLv-&Sh#s!OF{g&yYOO_gZ%&Q!#(41IEAa#W^xwuOAJ~b_ZN-ZYlxzLOS}eDh z;p-fO#m_@%Sx0E-X7QELyONK09)$$UuyqeB-p~7^11f)3{%ez9Y z=QK_LJQ=29O&)ED1$~PcnV}Q552VJyEc~wJx=v5UU+K$dPrOpkYuY(+axO)0zHvaF zg0oCurAbsNRy)86o3nnE z?@q_)9jGJXG)PJ$oMZi=+*%HLn`tWDm4``V3AZiF0f|^$V8Y*8uW6XJMQ*RsAM=Mr zGXwQjI`~Z+D&=#Xv>(OS0+V~nUdEDJHm{jJ}2EIb9s{rId3h0 zTwV<;dQzWp%#g}7U=IQ6Q||%#ATQnsYQ3xz;HmV$$4ZK6n&eNIYX9_eDjyj0$yqWi z@hCsfMJI87%-6BpxGIgDeVipqBfipr_;4N)(DU5Y=!stPnm>snJ%I`28^{@^UQ&Nm zSKCEy%YdfThjMZ-ll;vM012jrd@9{n(dn$jQ5V&n_BTBtqrie+xYR!a)?f8?42}ka zo+mmMmU06k&K?6CjwhwR>vUSNx3wMbzx)01;d?$9ci(+?eC*@*#B!?SuUhMbl+Y7XLaYzoFHvq~uw3_LI9LcHHFvWCY(-1Ef0e=Qst%q(0ictP*Y^eZu zg9Qi&g9BYreHq|sd}*A3ZKC;}?O-fR?$uswuN2KdVk9}@pHVW!0+Ro<0=|=dO!}{G(XbpiOza%hWoGen~e~VfcgI!uRxhsdeg~ zd0Og)Cxm@svPdH#?{+^L^kcNQ6^FaKG0((Cf2giN_(yzC47`2aOLnw($<7YE?yU<- zWB$nhn%#%-ObqlLhq^Nv509xEjy=c5E-tOaf%rQZ&BXoZFR6~F$#B$f+G>OyYYXEB6YyV`sqO_s~*`*DkU4V!36bF8{3DT$iVq(qh0N#n%+SZQb zyV~YW(GR}FskEM+&o-K2X*0RUZoq?tlm!58%Hqu&aAL)&j*)qm& zyCs>r`yTtLj4f0L^`|4+B=Jfw@=Svlez+E16U8&KbhrcPpOQXoh|5#|R{N-wZ36mi zIxR!`JZK>v1j-Ad3Z&y#?p>CFw9-GCdMj>s9y%PmnLP2%qEhE9#+ZNvBk%(ftv;NxiIPI**>~hAR=5 zajv%>X|E_+88e_2nWyM8<2f?c^rnexPWUEFbPW8yWOBhI4*9yxD~=Pi0Ln`7PI_%i z(6SsON!kiulRr5D3S_@U?n(MWZh>qg(nPl>m&u0Q36YA=c1oCO$&q*f^lsJ7hk3aZcWpz;qq z8Am{gAA=%F+O#LS`fsH%!%U}XqyD~C9O1&JI#X_~ljV^NR#@tX>40wf8^r^zgYcOH zMZPXaP{$1<0gRtCb5j$L28Ag+>sHIlKB2C!Jo`l*CU9j#_vgz70N%FK{k(i({EfxM zWpCnSu8eO5b8U54tZNJJoEP(0{9<8XzQsm>o)?>a0;qh#{^oWJ`g^gram5<|&OiQO z>|VSW>%zOzZbnCC9LVq3+}aXd(jQf(z1)etfoxerUQ4?j-Ci$V@`Ya*>u1i!lb5z) zZ(seC=-^FRP9WeIUoVV!0{}cpoq}g+b96*$dm|JEcXlY?F zR#(^J?Ag=NY^x0vo4>y=pBCB6*7`gC$XUDu&j{J8BHYstvIu3rUc91AUUc({*Q8yGnN;an944N8oQmPk zbqLbdsSZr~>kj!d4@~*Mhy^3SZZ5XB;>7VYmfd2zl7vs&j zimz#E{{ueXXA%S5@L0bUMM~sD5>jr*E*pG^#hj9n!o*~pRX2XDjH1#F*?A_6#tkxvj0hP~L{+Kv98YkrchS6&um zQx5j0-|b27g-_*E)}Wh<=~8~h4{TH6CG{el4R_cjI7k~yJIA^cwve~!^MnL1j#E|j zDcA{mXzJE#XFk-YsXoo$Tz4CU>EegV!p3s?2G)IMPWTZ{i37lswB$AK;Z?Q4a9(28 zGoNg$Y3m#fV^KDq5|%u!X=_7~+3KTcH9Phh=3Dczu*e3*+{ot__(dna3UgLc0A1C8abo#AzlHGWku!_&@S_*w zo=-d)fBPT&n95S=vS%KLvbd_e`B_ngw^r0(BCdPIm&7x^?61Uwqh@sHI^N8~{F2+Z z`vB@sqvk69i*Wem{zLqm8*N3C4FH2jqj%v?;>^s2_{)FsIq|g9tFeFeLd*|0VhMYE z*o}k!cC;7T_Rp~QwiWad>XTY($g>*1x@`^n(U|Xue)@-{nCq;{FKb6*;Z*$6ul$dA z(~tgcEU!|x1?&9LNH$h-80=wKu&tarz7VI+o{D3~*3CoaG+FqE|6Doog|O^LkdpM- zRwQkP!91OQRyy}b0&G4!sz5sNqcocY(mOxxM7QRtuDYRp`MKhETH0PM51ROKQ9`!| z{fYyD?z>GvkHTo_Y@Iwhk{>I7v?sr2cV{bZzF|JjUUwM3_12$}UTw$6KK`EQc6Vem zln)=N^j);c;1eLi>$!YAvjN<8K=rEaH#hO?9)k@)4|R|{X&0oSzSf-rNo&@{`cqm1 z_kLDep-uBc9`u2}S32U{2BdQ}A0&|vFfHcE?0aXv`SrKmtu)>M;0P65_@xGb!Ai$q zJTW5)uyE_6=9F+wg9qoCOGl*c2&p?GePoE83N=LT)zF!XWg)oLo|Ci8t|PwnqIN6; z!;xYfIWqY!k>~=Sa9hx#!-Fvi))kZyppxb(bfObQ%d)~%IiN{lMA;z{tuUNjKuQPG zd=-7hgP{`Mf=YSQ0jaw-2sdwvP!T@y&Lv{tiFs6&zbGa;0~)`1tMaSyXY|K6Y!7GD zxr$JD)oiV*6OvKrcjEqscjG($?!S-w9-fV5=Bnr0ar13A$C?IC|NcjQB%brqFBC!I zJ&hZsqJv_i01Zivl#Ul8s01oApAXqAA%(tp@ezfM;<|G;ijKBuX9qa-6F*Q)bC^z# z&TN!Bs)Uo{kK+I%DW2&cWhs*eOmxiqK3Z#69sDB4@RmVEv%Qpc7TQ>% zWCN(YQ?e@4WTOG<@>?;;ghrOM8ff?m#zy$D8mc;Apz*m(i~#t38J9`a_}(&IPqpKWXwpi}I-3 zgpzT&_2?mkiXsL733Po8r7WJy1V~P_5?1ttiD}U~PUk!dl71wx=}Sq=1eO6D;?ty! zD!uaxWY-PwdyG%lV$YrmW1c~eD^M$yK;Cp%s2}BJKXwh@1Qd}$!bh8sgYULk^+OJi z68qkK1f&DxF@1#{_0t};U6Tg6TMjND?JMxg#gz?DU*bfjMCT{kJ>9=4;;Ri>=(f77njaXWg!>_RQ^<%N9!C*&SO_nAQ z(nlZf$8Wv;*W%}Y;b-E{@BTnEHCPfsa+1}LkN7qmi>xUhNXIw8zu?3jRG#06?{JOd zvmO96^RurYJk-rT4$u0*a`cC>AbH^*U>z3zyi#Wdz6=y|0Gw}cq>aJ1;!O+QR$(=VIl~J0n(4gal}Q0B9_F z0MmTLHZ~Ln2=W9N@s0r1*BiPNmhFchRFhxz$FEX*nMJo$=h5zV#NL$%(R84}Gn)X8 z`m)E;0dmO_N|%^eWWzuYVduB~%C>tqGr7p86c zh)AVBI#5rfum)oc^Hn?rk7NNJ9;HoUIc>U(LBQh$v575aQwFwxg*c#YD9n1%_VE#s zZ)v&3UU()=@;DfEH8x2fqC(ms{gGYFzD$*5 z;c6#GEH;Q;^^+Xfi&{<{LtsAeC2S+Pul=8{Y_4De3VX%G0C)(uU3cD;i5KhTCnWCV zk8uulul7ZH{2Zyg_@TB@%ERse$dGXz`8n|j3iynf{V(K+VG@wvn_j8Ac*0}^_LjO- znw(B}LG5_8HE)h#v$oQrLQqSZxx#0tb$!^?q%iEJ@d*cSY^VMzPKF&^Zo8_3H>FT! z>8ng{k>_MXarRa4je3#Zd89LQ$WrB~_KDND(C@4>&)%g^o>Jbpo{DEf6q~{wFS=oq zzq74h_RY#)^wsx->k%BLN#&sX9Q8>3Qcglr!19s*iG5=%M0)%SmnZr?#s#4u@iTU( zy+E&Qg!rHRXDM5#q08+BDd@g*U#=fKGS37_pQ=*YU45n;^buN3ztU&DGC|TLS?UqJ zI*5VwMvQQH&EvYLNEQ_VQUc!SZ%A3@C%}@IP{}2q;g#?pd+Ql-Sx?6+%6T;I_qEt9 z#utp+@dLe~O*(`8$pBaI*YROEc3oJ+&wbTJG2)FXZT-*kOF6VZ+l3LBO^<0&A8H*a z_Q-UJPRh!ZV8u3%n7Rv+ube`lfka_bu z4z)VK8I`XG)zHDD@djk-XTO#l$PwnkBgLy6kJVgnXk@C|$xK(C5T-PQQMT?ATHdP!NvsxUUd@J2s& z`#>$BL?;8>v4STl+d;0w+#G-)Qdq?m6Ngn5j%UkF+jh?kM`nn-1>|= z;+8v~8NL2~T-w};ebsTKcy*b6V}7Bz7<+vd)j4_?{)%T}pT{!-WM>_&22A`X-3;~v zUMs;Yw)?QDHY{6DTel6M{p)wYo0=)3oOowLZ`%w1oNn1_bZi@TH@b1<($%a)$?}=xfx+A{)%fDRxSS$8+X5x{@9*vFu zZhZ95KO6^hgZRte@>k-!e&Pp33g4=pi|x$~YsAOzc_i+B&mYD=`kwC#d7g1{iOm>o zR}Wob0cRkers4TG>4ffCNTF`M;lT89e0pDgl!=}S5&^<&v-HFDRs}rfsQnG7$dA9u^B7box5?VZ zlnxbg84BX(6vtcVL!IFcD!l6 zFC4?3@@gw2}9ROYBB`>lRre;Ajx zw&T(j^}E8quW_JxUu+EWU;`1*;SKbT`h`W=1Gy7yu7PJ~PMwX_l~c;YSVH!+hh39g z#iQW>pNI|V*^cH!Z?*}^f3z2kgRNL?9>x;IK8HD^>utnMEzZd6CY)-I*Mn2i2B*jl6kRR6uvq2VLlZ=c9BzFVy+|F7vube z&3NFki}82<;g5ve8{2V_A$h3Y@-x`q!KXH6+RGX<&&G)tzA~QmRewFs&o9KDe4~Mi z@%9GkgI}Oko1>4QF4PO4?&y-dH1T9BmYTz84j+o%#rtA$``-A{ms}UG{+wH4X6x~2 z^fzPia9cPe=TX+XeQh)Xj}t&o&kGr+TisJ+|{OmW$dKp|nYSkBQB~*rcx1d)rX} zLqj>$P!aEW9iB-m9L3HhzLMV|v^(7~PsN?+gMLnGJ3Q#A?>qDzv7jDPB}JGMr}L4$P|oiJd7vM!#_ z#!I)qN_~ruV^BJHB0Tg%rc-$G4ijxrRXC!{b9?tw*@Xhz^ezTI`u zwbRAGrKJJcffRiINS)05>G)4S^Skk;fBUDgwxrIhI}=ZT#$9pSZMViRz4_NgptQ42 z#)ve(W!4b&k_(E860qrldZSbd5SKKV5yQ4O&&Okr+#k2!`7{~SRdvde&&-0#p3sfq z@>Q)nkF`^$B%FM!X;%CL8}%1rcTns|;_ZLRM+y10I*T?^@DZL9Mn_6#Q^6Ax$}_#t zXDE6&9(Y$CdI6Sl%d@#JaD3ilkn$4|q?*=eeorZ93MLI#@EPc_T-7_+K$UpsF(2}o z&YQF(0pNMI9VP_GFNeMGd0eLgHQZNql&%N&@B;LZUk#wbGQaqv3kwMeRsRa^aXl)% zh4+QOX($Etd7M6t#&{xK;nr`A*#8X1cq@>n0&$5~9P)K6k6P|Hf4RzVCe%(sx`kA) zW!;#U(-TvMaeZ`aUgpG1<>T}Q0J6~8<#9A=kg)?P%vyfoM+M=jI%*ipC226faZD;r zQyTyhYCSWb+k>IljoN`$-JIeFP~|z+b2?#?iguTy_HI2QS~so_X{EEoSH4xKVUspG zshi^%eQ__m4tM>*QRz`$X!7I=xxth5_26I9Q_Y;$dRxnf9-fg0kx;`cjwVvw(7H`BdJ|g+HbLav1%AviP|G_o)Ne(8O2k=jS4iiJoTJZrcTTt_wGw zpvUG(F1FLyQ0g~scZE*mE}UGQf4p&^)W7J3oruo>;GM1q3oJklo8V2l&|_B*Itwq7 zZsO$FP(M1?B3FtxF4Z4~7oIY_f_#iB4D|sy|=*@ieh3?;ZcAJnqT($HIsy$=(I+HvMvfw2HI|e`GXX2+V zN1WZ!OH1=?K#nILe7o)PH6^2c9hE;n#W5PV6O~c0-YkVs|>##4Sf^QluEAPGr!AHG8%=Ab8wip|cppyk|%N#qZE0YRL&?b*HVD7Y| zFPZJ?{xJ>SPv3b$m@Cuk9mP@ z6)Dz1>4Dp<($usbH&6(6(X^Cs>PCI4z1H)A8q@*bTqGIoPWpx3XTHQsnb>nCuySDo z`6e$jzTzjxtlZfw|6o|;sdUC6{jhzKys#CFAOh@FVEF_y~^$^9Gy}>!4ZOQcF%j{OI^U5<4W|)`XGFj&a4+a zqyQzAZiwsF8TM{_;*~n9SZoYo2`BZCuj+h zKk!W)kv-xh1?Z~&3ILvzv(?YQh5`oRwcZXBRsL3NX+r??)p!UnhpDI9tLtCt&4v;` zOK+SWSyFC6Rs9_%P@L|t`kSB=Kg&HSp@Y2HhT%v;O#=^^u;Us3Am8dtd6Axei%wD! z7mnOG>j@*`-QGyAI=DXYR%PM)pbsiyR&7I6$}R0@ z4fmv^=&S9Hu#x&1=Py8dPZSFv3mTY|vcIYH{=rYtD(X}<>W79=zUYTeXb>>z>UxdY zNgWlZ`XP%d&#@i}AJ6vJ$!7sifH%v$z^(K|sW_+S*!QesUV$+$1jgxSYjj?zkM6NU z>Axwj6|)rT`WF2tbRn-g-oVBZZhezJ!S87R6+BZ)Kbr;x!gqt>T$b@=f1<*a4tr9b zp$;7>H1UZE)1vZtQ{VlgQpX{0+EX6^bXsxXwdhrNZVG@|@CQFZz8Q9oJd(xXobWbO zrs4()AE@3(dUH8?^Z`8D-TECO3sb;N{mEaV|m_h`L7&187qq`o&;{r z%I}rm#c|Yg^w;z9okl}{x)S>Ff0s|3upV&a^Dalu4|Zek>SbyXJ^2ggA9)}?`0jT_ zck^=GboO{W{f5)A(rU!qem}N*dvW24(#n5ZK68`hu(7u*%&NowVVt}5)_BIVp6d;s zL+LLk1M~-Kmr@$^+b@$e2~#JrqPd=G>?g0DFr z1+bYV`-Q`_sk9Zzf(2fma;bvQD~>6*|0|o&6hC>R`s&3?aq-Cu{saQwPlg}8%`36r z-%*}kT)gmLY+bn^pLrBt_R24b#TCBg&m{kH><;!~uiKAL-gj^854PiJFMe9Q>F0hT z2JQV=TsRe<{P6pfb~8Tq@ejve|J&ak$JOK)mOF}Di7m;5$@**`5q$jmw;Zw!r4vRy&rLn7fm%sI0Dh zUfu?z&uh-*Mq56`kU(FPyu*B^tpx`+kOPbT(8kFI^o@;~PEOdX|EqQ5${V#@ z`Qc-*;fEvfXEexU;|U8`$hIY4|JbpWIC<(!S+v(!NcfLyJj6oR(&~!m)R7JrWCF!T zj-d*mhSzJ6mq4DiLg!j>Lpdzu>}4%pB|0cw}#qf+7%`)z(PlLbt8N^>I}pG2bs zu}Cftx{`LBwhJHY#&X+R2?xGlrRk#p!T_5xgR+;7;sGTo$Tx#GruH?gCW+B%Lnv!pRMl)%_-a)q)gG$l%iK(vYK{`nHOPtbe z+|dI`jBz~qv8{YMu)&&(G8t##3$#NIRHiKKVbfW^oJgw zQI@WvpM`S3pA=L*ks&++KSeVppgiUkxU!)(^+md+c*e*0WrP{0%FDNw^F@r2d?Zej z>30XZ$|mnbh_$MgKxpOp6cafPgv0QCn(haOGJhvJNOCcIcjK>ac0p(AX5;?xt4khhB&mA_^0@uMc_q=S@l`&&*v5-r^UEcitFsW$bIK zK*?t@TC64>j(Rdt$ojP~bo9&!?;CYP}cz zfIne!xDyM_VKnw1i~Y@qqP6kyc*$+;_~yUx!Z_H_bMJDrhgWh9^+3AoIS%!Vya+PS zCmE!_`$zsvX}cxa+Ya(7O!hZ3^UJag?dTpXMtk{O{I_3wXT0(M`psB7-j3aU4vANv z#%2NaVf}$*K_2^c9rr0`&z=oV$YC?hpwDJOwV~ZTZvgPy?BWkLFzbt6qYGSt)M50L zdQZR#sB7upOS{Jc8LGijORhL_OrUV-Oa>^f+$VnFDC;(vkLOI2{Y2uzW+_5Hf#T3X z@}plfhit;@DrMhI^0Q z&1dM$JShj^EUd@JB2J`DZvaTh#B~swOfw;>CcE)D0%-z5!T>=~-;k$61bJ2@LH1Ej z7><-WdmJjEU_*e~tmWQZ4?Roz@|n(t3?2YGZ+56~hi9?M`Mq0}JfbV}3XvKCPM-5A&4gdq3bg{^ z=};)YlZpW-1v7XejNPCk83$k>#bUQMyN-t%shv1+TRik|H{S4F|2`gmSc9V7*?7^5 zUKn5U#jlCK_x(TMJXR<*YIcyCKh#45O^#K9Hg;>PFcC_lL#8?Tvb}RD9(m|vZusjb zZgpA@azvZBkOo|Go#$v2aJo|i2lomCCuPgXvB1XhIQ!~kH!fdsI(Cy%@5Pl>bs`<5 zg;($zCl?$J$ZLhq!b9jL?Ni!~Kmm9+rUfW?1u2&_986>;q+zH#_zf73YoHdA4RP`e zc&Ke!`&?X|Tyk zym^<9{4eDT>Xq>6Fy9y4;4wahnSSX@LakrTTkEYL!1|yvmDaYE4AHX%af1%l4o{oYyAm#{d{r!7(i9<6P~^oxGBHb3-hnw6cTpY z17p6Ft7)q85Y%!Q@Fc!`cKafo$%QDDVZuav#Z&4tem2x=k;6pM`7?!IDMY8+kb~;A z0OctExWAKrdVr{ODF@v-L*C=CFsQBl0A-mkzLIOA-xV@{(c`j!MGy`L6JVbSDxT?j z0M=L4HOtr4WT_6NIB0S>S9k%fJPRN7IV5jM$UH94qI~G(eVDsx;#-u3Q3jt{-> zL$P^fJFZ;1s6lAo7IYZhSl3_)$8Dt11&$ln3BQUSSnno$q%Y>4uo)cY`E!x3c)=CD zKnA*1cbDI!7ct0Ue=kOZZp;pcak$rw{r(_k<%6*Sb54HOfd)}?8U!;yFjEpjV?c+bQPpQdn`x?{4Y(%>s}D9AiCFZE+0uGr>eed@f^ z7)RclUJ>kP2GxiQpuJFpPw3$pdp~LG)h}~j@A9*qcmct3pZt3BZ{Z z&alaNL4Gn9<;PZx0a_`N`l>wg)V{*B9pt@pI6h@O!(<#c0_$_Bzfa*3O}q`yLIoR1 zNXG&-vH~aiLme60V8g1-py4B9(+Gbxc;byprx(|$2XFNFwsPVa(C zTE&(8mUq#01JC-we%}k+|0KIJZz;!lbdTMv^q{Zs#c@8WmI;8=ciJ$3lsrOJntU5w z_oCBv053+EI=F2JxA1zC5%=&0%PhrlKR`MvN}TXz*^C{MMsjczGxu!ZsOdau?O4jq zg$5?LT{n#jGhv=uP;KRSqop^@B!1$3?69f}E=diHu4_Z#wVk0yOsrA%NI!2#DEREZ zQxze}dZkX$pHl{PPd(L`P^~q`cbxExittSQOj!Ui5Qf|`fO?u9>dZG$ zX~UK8f|QZ^*1~5$Q*+BV+iB$$jT=?qDh>%BW$9jg^TsOuTp#KHKh&`OR=9Wy{aiB* zSMaDm$Ho+WQ9-^Chi)lV>O|o`RV9r6kfH$g=4zsX1e1)L1NiE#79c!dxC!F%J(G(J(r4~f_9h4g~TQ-gg=AoHSMj+6euF910J zE>9pX3x^rvXCU=ApQ$51(`uS%@h$0-ZdG2&KV<`bKAl1Q^0$TgM;seiRMMD+Qa9b@ zrTEdvoW%y?^}-~_a|;h`Tt47{iFW1X%gKlHO^*t=j^uMNxj)^o=GCpZdUZ1v78m2#+LDh?XL5hHE1&0R5a%B| zAACptxE$cqXO72%_kA+;;y?oIp0UV9`{_gSBi|1o^^U1j9qaTR=;bxpuonH0wm&Kj8+!YrtoR_b7 z6wSG%*kw$x(2U2E;brF+#d^7_Fx0k)E*GkE7Xqxz zQQI8jOgrC}pLc9=DbB2~#~rua5_diQPSqDpI*dKZ(F<>qf#)N{6c#vCf_RALvG9}q zje)jF`_mO&PlJigClZcAyOQ`h+dMz=`zqO@`T#b`KaD$Cuo|$6DIo1UW6_zB`W=nw zkf~(AqFCBe>5KelzPZgez*&gI%FSvl$08Nu6~4F56?M(*4(QL~es1D3~62``H7Ogt1CBLn@qPEC)@|ovl0fx;PofX9? z{Mg!>+T5{NUR{ai@Wb|z%K;obiZn7{gC)(emokSC0YW`)~tr*76HerfYrs7C<2 zDa;&^<*a7JLTRtRt-R8IY`D@7(}%du-^iS4;`#{G!e1Zk|J;%(k1KKx`_2G7_f^eewJ4xW5&%4V{F&ponatpyzDCP%Ml zA0+*g?y-S~wu;|R9$qwTOFtWYFI{=k-hq?xe#7Hc)?sQrm`?zpwbJSN5tR*I+r-GZ z0;<4CB=Ft78}rJP=o2SJ98mqq<5)_?0}y7qI6WvAwYw zJKLMGw|(7T^%#tGr@fu7U+Unq)gn>k9+*d*2Eu77eD9k!!ixatRT7YPQJfL~=cC

L{e79i%e=fLUvUWpt?_rK;24{ka1x!n-ALPyaLPbhbMbNax zVacZ*6FYr^$=AO2)mW6WKRnRq>r@_p^0DZUlJoAFcRV;YK0X%Q(@#CnR;fSWX%Q{v zDF@24aHkD>lE0G^X;>7&L~*;~vk0f8d3A){m?%ruGuC*LHD9A{38Ht)DB%D(?r4h5 zNioV@?jd}M9^ifYQ}a=~P5Lj}g4?wlvqA`Nw+-5g!m$X=J3g#SCBA&`xb!3}$|r!( zhG=I@o|-4pW;>TgMw8;TDt`w5sCUX|I#9b#o|gPe&elyRhmx{8 z#tW8Y$pb#kL^J$`qvTb1^D_7r)qpyL-0;E#r>kO1=8*2f&FgXF?AfTYlS{IQI%VZ5 z1GTI2pB2ug)ZuyP(MMy?{rAP}^&8$r2Yx-FOpUcS#kI><;*B4_5#21(5x%)HXWyRP zv9MZ=*WY|c^(A^KcphzIWO!J1Q1GhlipIIymfGj^8tQ<>7v80&_77k1q8*iuHoLlB zReH^)TdrYZu_u%HOzaZ|JaAN8$*(~G*A#SibSnpRBjH>1;|Xff9_=en3=+=ZH)^No zOO&ye8}L-JtCb42~+cy2*l@G=y7P@6Ir?2Hw zNUG6oMfK_dSIgRz6TpTpUzN_tqEJqbq8@9@qBWJ9Z*T}NBt7mD=5!zQn6A#AXh*M; zKE>$+ED|{R&Y5`o)Ojz!=un4f&&2-Nxpy+oo<1AX(nlV9^6~iSCq5ZdQ?t@zt1&z@ z64$O?iT~}hpO1cZlF=1ScH)1JpEwaGPCOW|zWPdBzH%nU$4277fqilEjkn^<-+3i^ z6pn*;+tsO6pUhVPv^TYzcjQSTxiFL~VvGmm7$-4^&{>#B`8&HiqNkq|h`1_1^&`A1 zDbqB>>%l+R1?`4LK_kIH=)AfwL+Z4v(qDecKMi%@=E$#X;r|}pf7?b^N$%L}xThF~DpdOPXTz6s+Wh1Anw>L$NPGvUZ*+27d)o6J_Y?IzreB zm4AlNhv3i1{wa)LtpAoKUEXUN(h|?h9062Wx^2i=xJrwgon#0PL^vQnF_{i2rYX{` z+oG*Fk@A}`DSz{w*L-^Lzn(X}`s4wGbuRU`E89>fW$tA6evyEt`+%|aL7#UsOPJ%QZmII}E=>k1xaC%*Hw)O!h9 z+Vt^D-LQk&{-mSGTUnRz6yYhal62X~w@ij_{TmMggMK1)Q?D)MgKCvU-I(aJImgP>|4?O(=FM_o`r8GFUmL1Pu z`|=kgLnA7ZD?M#Z(cjY%d-v~-!M;A}63d=!re6mAA#3x5GrcPfUp*zAfc_#f%l#hd z7@T&-h7F$RY|s`ujgNJue9|#nxUxucn>5{bQ2mjnLnsb<9oPG~9|X+`8SdAiugawC zXPf~2PP$T!i3b+I(x(x;O2>U|!9<^w$y9~39}C|(*^MUyt;kmj z>Kho0{-LpWw;=Qw((OI)naA z*{(a&xvcddXkFoGBk0i7FPjh0;nBY#8^>onqbEi$pTo07a@J!t$|86B zSE8!EV68^KoXP)+=#}vc>D0B{)Z>09dW(`{yb#5fNga-*-r(9(rLnxc=rP6`SF5dY zP3fxYloJNkPZ#Y|&nwks!MbhB#wI4bfVOLJAinkOAH=5m z-tC?JvFqTG=KpEq>cq>iG&+|Q><@eT; zFv+jdN#E!Zb(8O@v-(XTaBH-g7vLmOQC?^z-+|LQQysLVvoe12iozWlF5I!DdzuXE zXAv&5;9FViC(|v{5d9I~fl4v=cI4{2x&$6`SxaI2WLW=wvlNJYa`KjRypL%}TpGZT5G7@FZ zrw!$S54gV}H`h@G7ZYuj@}ch{Ut-awNXxJl{@~gh zStJ`dm6H>{#qdcu!^@x3h$u!g;}7a@w{0v(%UUHq_2Cc2@L+3PI)5e3o_bUKycVy& z{Jir*4)yo~D*A-gzf3p1&O9IGwet$Iv^eiYg4OC`EX>Wu%+!sTx^*+=6n1`^vEjK`6o2N*zpBDwNqc@~ zD(0qcM@4B@6^3xt#W~OAR0*p*9A590g_&4V+AKKpm79{Q%aT=9cInH1Rr&iWPsyFt z<)xUPpN<9DN>%Y0!(I^V)6Ay`Cglq(%aw(gx_(`7-i*b$X~BFYZr!>Xvs2e4qZXtq zOvR;(XO!lA^!0XmTzqb6Db8QI7MHHv3a(vZ>}+CUXAJZW#0Q^yPI_Bs+?tz-Q>WjJ zch6mxtcqyoq^ZftxO#goZU~oLmANkdvnidfOL`-E4&#Rr(cmF9b!UvJyLXO?Zuwq?F}sp5)0y_J$WRFkisc-^>}K>qxRI zEyhh)tRY$wEUOj9>t`hPmjr{X+M?7A#Ss`Q5GcH%pmX@38JMgZ2cs`oX(EQ5;-R~u zS-I^HM&IxdpGc~>9s`pf&C+F4JjQ-uGaj4Y05gp5N;b0rUTJv#%j0%rLwY#|jGoHr z$?#%Mz$EOdzH5@_CEsGK+T+0Tqd$bo)LEPw#8@_`O>p`oy65U9V|zIl30xkVmu1Wk z{fiT=;Q)+Fdr^q=*2)6c7mLnRkLdkm#kCjS9HX)bWE-x*Xl+F&Q#n=Nz}Me5>bX@M zeNw3`i(=Ubx=>rX+Pu8lDut>&R+SK|0=#Q#G6u zLIa(E@3(*^_h~ZuY!LJIUzNj<+h7r{#)vU}G zgYF_cNRW;V@1@P7r{n{(Q^EBMX$YBVy^GMRX<5B2rMfk(C*DI$PK45?G}^7W33(z! zPmsx<-FqygEo8vddf;*yvj9VeO(O7H5b2ykJ+wjzH|-YKoBlw(mt z0Xxbr3`qH_yvQrVPG?MMZLX`{#HbiB=5&DG-eGm(hSW%UeDpgcZ&NyG&PVq z#)>+J4iYb-NhVc9%)nTu4QPjsk>D^*Fp1foyF=;Vn90fC9UiRGV`mxnjI+|It8*|G zMc?DQ_ebC0j+ma~K5tGA$X(tS#jp{Qti&h&PIdOXI$GV)VG)T#JhrE(_Kxap~6e zn5k4^iPhQaM0qE0VoY#|p{f4Y1gEG^EkTS%^~7Wd$E)QIKl!Lp`4ouu99&>Bgk1?I$?n|kC(V8sZ+{={ z1=HVu8@hxhhIZ8u^*aa+#U9v|umheRftzu>$HmT^s|)Q6o1cO&lTG9ko`P{)LmTvw z@6Pfl^Zo(vD|AxRchYnRv4b82Zl{=tTzHlA41#W_t!Jfy>*O*~*s z*`QO8aCSwe_xWhX$H#Mrv}Vu5-rBowckSD|yY}zdQ~USstpoe^*1(Jq54j{Pqlfp^v7`Iz=&=KJjQ8Wm4%XqLd+NyHgM{B#2M-Vzvv0q5S&r7k_)v|F4`!G5 z=tzIe@JJtaUyY3Q(%ux9uE@7@yoPc`nN%cbOCI;PP22D^5LZpQqOKpL_H=fG7YgVy z3f|Rgm+HcWH|y-#*Xz|YFV-usyi^x1zERgNU5GN~YEx;^I|UStOw++)~ zO;0)!cNhn1#Ci9-OE1!_CIdOpx=-j4T9CYSv!2Y)HlRF{|7lu#K=B5SRe33GeGS`F z1lk>+y{V&7lsG2}UBxH;W>K9=3QCCW!*5sLdWuV%G;BL&H}nJu`Jg+`t$%k>OGmq$ zuiBPZwv%Ks2toNg7C-RsaOI&3Bf8ptrLFLp*N^y{@Bcy3~X%%kfwhlWOIH(n3nB!tS;T253@(OF!h zGlBB7u;`BLRYi2#Uz0Z~Ps9P=U=-ZBy9(3e2@OrbazQ|jTipS@Y&G7}iyRy1_XGy~ zY4Fm7FskIa^F!Oo*PSj_>hi?ZX1kCh_eWtsC;~zGO8q)`K^%AKd#t*HUZ!Ek&xJ1U zd(Q{z{Hy2d(i@lS`0*2Hm_yY!G7L^vpd+{$bpZk}q#^%<4+i&YiyGYQR9IgQa)<|M z%#OG`*E%#W>pc_E_)6yC+0w$l!B`$8M^5dLnNz<_pXtPj(v3R`i%L4p3DP4TWtUHF z+V)+!s;&`#;b}b7RnxZ}Tqcdn=iW>iTnQ~LIsm9qnYX@A-gC6Z_l%;u($*=Bx}Nvk z-GBIG?He0U2S^ztjc=>ZPzS5hHQ&w@TPI$fv%AV*D+5ukIucL~C8*oWJ@&%3MX}m#R zMLSh~(cYxnTy=q*c91)Amv&(NyF;!QT}@h-Mug2%2GrF&g`|5DiAU($SMWq6Ph=Sx z+zrmq^Qb@N`uS^Ds+Y3Mb9#|yk3DgJ-MBtkFTDH`I^aNk>_>h$v_iF}3@)5GckUb? ztxZ`UwpYvGk%FF5Ha9Z^<+{@Veo39oOwSP-cEXP&=22@9SWC}N5j3C03YfG@+|4sP zk7yUtk&7|x&)C*h(Ora>V69!KyU-7iw$dJ;-M|c+KZH?c5Qo6TQUB4s$@93! z4_LdqKw`QNv;u7A@6)yOB;Ahvr;QlMV3hgW_wn7QYyTfTcl~{HP%CZN_KSb`9Uc`G15v@u8L9 zfBk7`>o0Han3TI?C{G^UI`#K<(|P{Q;J@wZp!@9){8fF*M_Q1dG?;0z*j>};z9kS^ z>csiip9WVZ^nLHpRr|jCbgJtuPZ#(`yTOCy?Zl5eY1k)hVVk)_zk#u(@%F*jw?AFF za=^ap@>Y{Uwa$~>f!*&U-QbYc+D8 z2jmJIn78tlJ5R$$bO&v9iWuKJewzOi+_a~`b%zEs5Z|G@xM8I}-tsi(_Jm$3w?v)r zervk%@4^zg>OQ>ZyOT$9=)C*1U7pycBV9-%?$CN$=Is%C%AhQ2Bn=n6`W!lRFt+0~ z%HkgMxM6hplZOu@t1HLlN@1(STcXb*&ed5Te8&Uz_@j>!Q+*nFy950 zS?B5mwgqLd^3p4gT*Rv0mG(q^sEf4)&gzzq3jj0u zi!pIx-@!-R*$=g^quo8ROxeDWUGJ2KbR47cRLNFmWp(TG%WKu$SJpfw%^i2>FX&fe z^bLCGPumXBcxrVO{U050m2%i$iXPB2kTQGX)}FCF@xOTSDtgFRJ#_zl zb>Q%!x^(GEU7wn&{vE@?p*p1}y-DWk{Lei7PRebcY&}%By0BCOl>hbDUJmOUf}+|9 zl&y?zsIm-=uZS1)P;@DA7GAx*T-zH9=)jBA?NV*8EJ5y0lB18Qn^Ep85{;sP9u19d z!v~aE!W3N|KH69*`J`!(E@`*LJMs)E#d=c@Z)teMnf+@043|3fqz1>NGD+`3e)_1k z4Z^r9KQQT+g~RRLytmNP*B4f5WnsDIXJ%_|dZwl)r)zO$t~T81xk?<$v=e>TU8;5ZX6y9j*U`JySDg4?$OO;|c(6?^tjyP9XM)C4Bb>xpTAHcV#dcBxJ+2zq zHAMdtT1CfoJV;zf9~OUkVKLWj45Ob9LWje&l_LiZ20xCq3{$tB2AUn9(2Z?Kk!62b z#oWF_HX1Ct>4`SDx>~b~3*ZrKqhmXs>2-_LD)=BB^?2&Revylg2R&gGoSP znbi{0_*LG+hg*I4G&zpx%9po%$F2LAM$*AZ5M4u9FKvly`DAqVPW^SCj4$h#a&=+oz}Bgc%%1c3 z9ULV6yN_-SJ|F#B{5xgumNzoA>8a5>JLYZ00p9L+*w4S|A+5WyG2!Q=>jVSefBNb6 z3%c)r`swB;fD=LzcnR2lAki*M<#^BMO}n+Sfp z?ITb3UHy2c4&HNBc9JB&PGR4syEf0fwt9Sfd^|g*;|?rhzO{uG_pN%e z#`gI3hj=~EI_c$fq#k*Edm?9~YwVy8AheMTsn<>u&6E10oo7*o+k0&HF392$D#xfC zOE5i8fnS9edWF&4Tsr~D@g$$tpN5|!XN|e#d=F{LBWG?-)f&3Z!t8X-FU-`!%rtVM zP0G*XHgoHG%}!oJX51vqR3_97tJ|%cH*4y~jkgKgeb@Td#n!IrtxpSjtZd|XK$(vc2H+B7LO7_dN>Wg*# z!s~VMjhE^>U;AAB;b(udKKI$r)HlBVhxPTZe7?T;h2N{s|H1Fpm%j7|^{@Wmf6x23 zzwwoN^Yybi71dLAJOv;+M6ddRLw9|AZ`Az{-d~3fA3!G=&Y0A->)>c%k!spiGvNK| zmC5Ln{X=89dUl7RdadyR>smP`Nwl5*&T}-r+~v((l9xL>_TzXvN(pc@_Z{e<%Hf@k zgCVaybzslvc&=#P+69bM>g?ijox6OauFo#iGP=Oh#%3*`n=X;X+{#8Rlkci04l})7@Ay}vQ*eU2TIw$1s&jkNlqV27DenCA)>cn;)Vm8mrxc-c zdsVXYVGFbYH|9F7mbHua+knsc1$A+C3ia3Mx`WPh?5dGbbd;gn;6Z(Q6XSRsaP{n@ zO>L4tx)wAbN`%p?Y`xmp6Cman7V6gGOwBGV0)vZNEjRh}qbK)y;&qccj(n?+xrkBR z^bMfjkiT;<8^GpbN>4~yUZcLCyII=bDplX}y3w0#p1k8WPxb>bBB zfSo36bM{BW0}gK`Z9xO0d#M?3KuBSnLU&Kv%| zbq3-Lz<=`tZp)-?z7i&`;!9jRS>kFf@9$V$TB(Zn1Xn+f#=RABo~RIx?D$?9A*8IP~~%ck5ci z9)C+`4X~6UCUk{VEuV~hC9%VEBsExdVHI!bfqCz1uwuj zT-D-n*zU?*vyp?(+*eC|IKkvZxI5UrOD6KkS^{<5Qz>%ah2;yAFkd0&#k2_LN@L@B zOkUf$Xh#cLC5#Nj#VXkibh~q0ii=f9&q4mS;No^Y{OIE_f~s$R^~Pv@Qb+Qe^zh~}NE55TiWoRD~k(_`N~iJ-U7 zPC{ij-N@j0UAS-|JE4b$M(a<${H6NYU-+l>#jpNx9X)=uPCfKMedbcu|GE0$ z4}GNeA39byZrrM6>gUFdshUM-O}jd775c=ugSZ#Q;NbXR{}6Bv)A{h!2Rk7iQ!+q3 zI-%tRo;wVvTi^_nq`inQ+lMeZx$Wege&VJb$%(Q7y>HW@Q$V!UQt|~DF5$e}MJ>bj z<0QC@QO=y*=aN5_A;Yp^$f#^b(!FtYqef&bX~VXS@#%y^o7<6)c5s&?G6Xa`i= zp5y^8{;{cL5}rD2lk=U4%EWb29vgU-eNI|Z)R;cp`@jQzL*n0cyW`i_;%DIi$Z|oz zz#yGH+QuM$E)*C<7-Zo9W+3nSIl%1c@LaqGv;a`n6Tfi~j&OrPZe z2SWrNAiV+L8YIvVWgG0LOu}e6fLXU80((UB2p#Gn;2!Dkt?`ke8b#>r85^pJvBBB{ z|J*Zy86QT87{v{He7yGU9&SuqwRhim?H<>EG-b;@RPsz{YZJcYO3lsn z73?)SJ-8neqVyOjq+MFvfrzUF(C2|twZGL7FCDWCnC{!qz zo~p96xJbI2C|XYXcuY5KXhWMk;X&>Ikh{qLGRY3!9KexlNol;`q=JI{0I#-rIZfLg z(c&qfoAPB`!#1~ea*n*&9TLEwB&B}`zFf3q-L}O`-4h+m!>cMBP&3cXJMoYtq@@ha zNdbAx6FgFvk-g@Lt?bQFO2~xG+b$;2Zf%qx$-{Q(qkM88!+W!Hmpz$@YftpCEKZo- znnI~xoDc5gCrj}7_rCXib@G9S>!s(OuWvv9z1&r=^7OvwDjOsZO1` z7exj|2brPrlL=kqgJRMhNa_H&?uIe~T2l~xldi!_27G``m_yLP4{$@Pp8jB%J_l4W zE-rH?c&`)t!^0@F!_@%!0DGs>{ z(ggyvO&3GSTjSo--aS6Oos>ct!#R1X|4#Cw-Po?3*(*7WlZ>}?`SdQ!r| z3oX;lTQ&XC_iB5d4h8hC{$hs@%!SWNZz`avdti|u?A*E=%SXQ~N-$l#8|U6iJg+T@ z%!LLNz>S@&_Yj|Y5YG-)`t8oCE%KkgdZlh&zDPSp-$1$b3Ws(_7qm?~ScJyrpr;+g z{qQG0N$1d=MZ`mq_5Rj&S`{*Pd9Bu0H*4w&9mAP9coO`FPKTWq2mY>HJ_i=svxwp= zO}Ut4Y;1&%R4+=iJY^eQ#{1@nP?WuLWHoeTdDVS3;5o{6+eX?U{L;4O{jZj(AAabG z6yEb@;k{a^doqQC^AfeIrfrqtF$-^q7k&V~l&>zHv|Ty4FYcAmp2Xu-Au8~mP$F*$ zt-!aWv5AR2=n-R~Q;+>p{}E96m6`)gTge*ph|6XW&S-}(J|;oObdL%sAu`;l?-Q2wB3bO#s1 zp+^Fycj3ufq+bfxS_dBvIJqDU?1PJbcm%X84O5@W0r|3#ESI=5EgMZ*PsffSl6DJe zVA_ang?^1$f?(41O&K9T_?woG?z|h@zZRkE*?GW!bH}m}@iuHXT*tHfq@cR;*Li>Y zBOE+>iH9!Sf}JF#QQRqGd+ypPQPS+@ZCUv4!h~r9uiY^6B&?2~<60R!OB(ozNA#HB z5WGcC$15o6CKGs!)6%0McH;YB#~mB5_HCK;SI0FxX~u_me7a~xZ)?pb1CKa3KP}Dx zA#tp?0Ic0f$Fz5N^f)rs4rr^h23Ou=NHa-yzpaPX{@UWzPJWgl<+y7%h{#8`E&{aj z(=h;QTFu)ZVqv%PbiRF(zF)fev~=4pGrha+E!>@GlsB)sy~QNIJRN$`F*MRmSKG&T zjd2X4Q~Qn`1e-?(vfzVMv6F{7LDIzpkqH&IHUMoK=IMmDza2O`a7)+P`3_3Daix8~f&{$d$bSpp+5=CL>!jy5NQ-~|eEc!rsN(XtjCMd=oy2;{G51y%KUR-p z6kd@4y3!`pZPeSc5Z*C6VFf&Q@T>Q$%NVb#tM~|$yVaz7V|qb@q)n=u1wbV-D8diP2h^ zo~eKOkN?NIdih38fR+jLtx@zy`!$}(C!W|bQij;eGo~}{*5P3 z*dG&9E+F0!9e1IA@ZE2fBS*W(M{Ii3ktbEjXW$;vc^F`a-myd9=>wqKkftp=_pzE?MqdR6 z`^W0^>%cX4{f4XR(?1tyF4}d0&BUJZ8iGE=rHevDhbNZ0*foo1`D>`P9;8Qi6}sEc zHWHNLNuQtgtUelDpM2G|^-JG2h&G*Os6iv8p5%Y6dLeRoDA?sC?%G{>+xV#QsCS|U zp2$hvcu1u2*cRT2Z0RwRq2_6lytiEW03K=?)AiFpOTL(ILfWF_&ke4o*Wk)eJJIm1 zFgDwmDTm6P@ED={ByQ*3C;`TB#xlLG2VRt~eRm>-*LBKA9mu2n4lGofeJa5SDB{jC z^P0FxM-NNYNssSs=%C4S!y6_)oyYL4Zt?`TMs094T;GMU={sFNK@=Z)HMd+FXb|ep z-Lpc{bkgoacMds~rH_)9Ums z2KwQbchZ8lr@W@mcdHwHTLZA{?b~Ec-?Rw~On2dGc}3?-I);f;dwh?HXR14(>INzt z`^dyN@mfEknL8)B1+M1j)7p_w`%d`00<+JZyorNH!*Y8pORKEAZt~06chVi`IwQCC`()>-E=PucHSJ)x9T~T5EOo5fMO@>^+_AyF8bO)z zdx(5ShI(t%@$!tjV@TMsZa0%TCO_l?OULhU=M!u=rs8pL7X3(7OY>YR_mt z`R(Gprw;BOt^<392)|YLoH$UA+;YYzNMfLU8nK#eXnR6F%%>L`xgS!vZ;nSz< z`n=clE~Q!#XVrtQA(BIK(2KP07`mew&RcFGgSR)_WV0E4LYi_cH?-)y4sngD&XI9c z!YD^K$zf`KwoV*7M6%n_U%gUvb8V}7oiB62hixMCs9u8{C!U1lxEz(9xruETF+xkV zK9AP|I1mop&|O?S=Pr2(TXm5!4IFx!%BgZdl1@e=7ci%^Nznil1UKW5Aw z2W4|krslJqyeifxFW;!8=;&^=>%n$|I^kEgLaT0G&)jl&zB0{m2~lI(NIUc(XN_$G zh;(>$8(#{{l-rY6T`;>wJ6g+z%~e?9W@z72oqADRc3@n{?Uh=&GS;zN+P!0d>*!G7 zCDDVy3wn>Ie7Vct@k`6Ou(DLM^E0Fm5dfp}r^a<5tyj{oI*&>k^4ICR|G zcDug5cD3ipC-#1tq8`ma7>q*&+ntI|3V3Wnj$8t8?*vux3&S80nYjXSlL|C{nNTXp zB`K~J-QjLKm3b5Co%gnaX0ml~&X_VQ zCl_;{F)&)$3Nd&0JK?{p_jb8!&|^TIR2<+P9WE1g@PHt)ACsf9A-tWyxvNK|Od~`# zO>B@pJUCkKfA5E?e{=$*(BW8B(X$ald@h->*X7-y#<9UkRlt^s5{R6iq}6_K_l0t5 z=iPKtCHzr=Du&+4=H#|VV~8&o0<6w^JjraWQ;`?XpQ}^%+*7+BJzd*#GZ~!Bol_IT zb?e%VdiBK@Y7t^vqu~$HQCOoxn0wG*8ZH*tJ$|^39=WG_`bV=mHpi|&CGPt6_|@T& zA@cNIy7`)#oT};B89G7(#G6Rx%GKLW#JNa0N4Gh7VZ)&{+0k2Dg_0=8K(BX}TVDJ~ zdm318%{fuMQafn`l0oilqppQD_gG+-R_F+dx-BQE2f&a=ra;6irtcs+1iZtE9FLB6 zRn7v8Xd_2-S+^!^-Q@n-ow$XwxIbc2A5;5-=Y+OFOm@h<)`l|M+L>pZ>!CocqyFop_)=@yVa4 zpZJNtT2DRw-WnPjulc3TI(z;a11q`v@K@YP1fYJ+$Gn?@F&HJo9ioGnOx%;#=+ICm)18F(dkC0v zR5VJ(5DEvuvU+TI2!&>FXN?UTmv4mq2%e+EBgA(>j*1cWPkt&*rY9bpF!7!v2l&7| zJPFG6ZaoF@w)P@Id4?I>)PNAK9hvjtyvU z1HbiE!_Wp1avqjVVl+#Z~;hQqzII|hq8$) zkDmr_?v8h7xU1@DBmK~{i*E)p*{V_pO;8{59J?a7H!uir?`YfrM)O^P&n%}HCinwyn0R&A> zK1gHLCOG&~L9kO2nWOx{MMcfY#4HHF&k6ptCU+Ox{%nU)P7LQ_FY23i>K*DXHpt+o zc_7zp&&t7ECjhLodRc zJ2f`C2i&jJ(@(y)?tSpVx^{iCzW&^IYR~?WI=Js>_@+951E)(1n>7oatSsNA-aR2^ zg7Ulb+g;kLwXi%3d?@IiWQuN(-OrR=N9!VZhDLg6cRiVuAEs=W-eLR)P=1DK%R{(% z$Lb)m$Uz)8cE>gsE83kr*AVv9aA52x@mMxd^V8G}^9kCv)2|^RC=% zlmE(>?1b(pY@WyQXjNrM^j}pyU3tj3W%H=ihBkq z&W7<)vvuRb8yRrhqTM*iX@gj$ldw&_4)5--MQHQ-BDB17z25ii zyK8*!L~S5bCvVMF|Ii*_piHh*MV?KJ@2ej?|3Xb(nW96szji{~r%oQL!^ig5%P+qK zox;-~vUzA$P{-)F9XhnHMke;augHUX9-Ek`@sWM#D;qU4JCAHuKEvneL@cd%io|?& zA}QB;`{gt1SuEg5EWOGc+Kz*oEF=s70RQw!L_t*4imzViLrqUTc8iYA8uCtB?YRxD zNH5fra@9L=*VjDi)H~z6N`UsIzKveH2>>p5SYBC78p^@&9Pl|lvIm+3{*jS{bx_jq z_(qQ9XiD;v2E8|5zT^NqSw^q7)0tBgaBxvfb_l93@#lbHniTcu$t)gQ=0ZUi(F zynU$t&VTrK>)z9k)a13BMX_scZVCKs)zAO@&)3xKZ0#8zqF#rAWwY)%exQExCq9YJ zbE}^F`d7fmVjVxaADK8(zx6MFx30~s));lzPvs7P+ab$h9e^*d5m7!$UrVcV>2T6i zY71RvLjsk*@eO*RHRzLXuQTwp19^jsM0XUni<_Y}320~dH^`vhAny&rG~p!Bz7xan zqPDwV$;3DDc6gp1aN4HYpbbO@T+)oZ`>l_uMQ2ksP?rwG=G}CYk1~T$@<8*`MyJh$ z4|Eo{#E}w{8qsW!#Fe23Ui@Rj1Co~Mnv}8xzrbi1^0M!Top00DjVRm5uMAA%-tg04 zYp{zm!wXZ3o5e}IDI>Fxm)GQ#G{hP2$qT>0roVU+Zs5l)6LZ#;VS+#1--^GMxA>Ru zpds*W{^G)SjqZ)_5U^AJPB`pVhvHS!smqk3W26J%YJ;xA2^{>U_@*IUQI_c6opkNz z1ut>O4Zfd&<2n&lTc`de^OnBx8ZE$|NzeBzcu0Ma zKmMT~;Of#@!=dT7JQ`hyXZ`pLJicgKkHR2)?YkN3M4b2tqfWt%f9fN~A?*a=9Y6DE zu)zDeFzAQXf?0O*RaemN+n3Lf&(m@C3;v_is+jyNaCS}WPd0U^P z(Vr%T=V@to^CuI&y6JWC{ckd)q2iK!_%vPkd~+v_j2>aE7vSEpP5;+;9s0|uCdosZ z=-S=-6CT^1VNBn0Yc-6Uue`(e+r(>=5Vix|>>v2fANj@TE)3ZG z8=mrtylA9LWAN3XDN_o}DUaonA2zd{yIf7MneYmOcVO(cy9QGi2im5Kalv_~zBIur zMoVatSO98qy70)$thZE2Z0pVw?MRh&?P+(JOfXONaQb2^l=tQHXOIhn_2>TlpUdvC zv(LX$=PsPDm5rr(?16{szT>BBk-pEh3peV9JKWH1KKT9**TB$3^&>NO?HHu*v|9Hc zKUKf}o4;9;b8|ID77ikM#Qce)N07VdpTt{OUaVw#~&E*{!w=bNf)*mGVvW8WzwHMsq)5?`dr*QH0)JV{Hw#DORRcg zLGVkS(GS#XT~sKISRal4aKaBaX-$*-C=c>nd@2vjKUX(mZ+he_@17(`HuOF0H+U+O z;|R88$MPr+Wog%@$q$=8sw}>9I$9<(=-avD$$?$^IcPrg8TQbRbTWN_w7nJyJ&HPY zQ_9l3W$mev@iA~cRL{ToVqLy?qwag)f%=J`{8U}OcD4THzx+&HymmP!k@XEyF7&)! z@MGWT-~AguRgXUQSY0}IzNW6;ti5C7b@J%ZdgZ%6sK5U+KUXIy|DoN3HH@CLfo|wv zuM-~9rKgg4s)47SxiMzVaUb+JX{(?9`~bQJy%`D)ho)X}P9Mut#5Sp;HTvE!fA9Gk zfCk1#hrtzEJ9^fZV=tk5$62L#4gKHnmc}XV541j}@A6R|aN;gJ+p}#ZGE`Exe?#45 z;|vbl)L+^e^`PEqKPTfvwnO!w?RD_GPTfdX^x=De%dvnp^knSi`T0!#Z?4++pQjuP zahs=4ZJ%Zs+Hpe1u?rWPiwpUN3$n97yMGh{bEm1gC%6RoKJen0PA_HNLVrfXuJLi& zRR*G$>Pvt8hc!LBQ2USUuCaalYu};$^}u6K);GWN{krnv`8xH)({=3h({=vpWG%1R zM?z;D>;ta3nx~HyozDBG9h9do`-b|m*=cCR%{0{C+A95&txR?e4cV`DVTw3#GZFQg z1t;<#u}?jMw*lJXmSYkwxToA}^uN?0JxRgk&S`rxEpY8e^aSwR+L;S6Y`^kujO{1< z6SueOpCZS+eKnoPVAJxw^$ShW@rdvRd7G0EKmZlABzQ8>DeN7-897RL31lbj(sEfB zSr}j#VGUi8p8Bn{al0R*4l3N3q-);7%R9PTJ6!^|(5>%&iVNQx>}1eTV%nI<0BBQ3 z^AlFf*Jy)$wZqhtNlPOz`mSl|Xlxytlq0s~YFUa5@p2nF2t3LcpTNg^*Cc&ly;BE? zjOoA%4!XL3^0g13U&2@)-LiHb(-8l{Y>@)1FCANBd96R*J>{$IQa24@!14)fmO=bd zj(A|Y@eD&j^QW7-gGM*lZ=S8MeEAD?V9#(p{^)6B)N-w_%mY5U9>8|T+?6wKzzSEAz8v@1 z+Qc1MZ&{JqeH0;g|8F~v<+$T^^?@rVxaI%-$eTf$*T|6L=+5zNBL4|j>-ly=TW=3_ zP5F0`UYaj?sq;F<>O^d=(m`HtY^;FpS$e!J`XWY#SZYbTV5p<0`=o+4O>sYq^8c*R~Twbr?vE6my>aDtP`BE(` ztROeGYTtna^ws!5P*Pr`tP27J^-T%a+b?L@b&Cf3& z4t8XNuXoTpVeA;OGT6CH$NnAf_f(;c)fESQpapO&%}B1$Upr|9B}^Iu?~Rb84e&v~ z`1a1#+D48KkB-!#!-s2rauzy4hv^#zPv~rGTgXA=o@0XcgGpy#*mj|8b0Xahd)C`V z?E;n^TeH=_HBY`PwX(QG__kO;d7{oMe^CF(SR!&RC&fe8x3@ZaF1nCyE@R}-cR)sd zsxR`Ec5K^_=CvK8G>mP{xa~A1^a%9D=1xt51IMk_)^?Pe*c`*(*g{5A-_9@fq3=W< zszYb|2OVf9dPxsBpij2u_&ROdyZT*Ykvs{zP8(ZVT*bJ#5PjNtPsi55X)pD)YZtWD z>p~0Kv27lm#)TLg-_@R{o;dtzgw0X!33XD?4km`adx$(OhX$zvcb_=IAG_=!Ek(mg>Z%ye)9x^9u+;`fxnq# z5;HP9TJQhBhfn~=saXd%ovbn)%L_~)I4H@C{1+dX!0ii;;rV)YaN0W7aWNo2AS4ga z#E}TN=Pp>3W{|b9w3F`C6D+py7_? zE(%vj+hFGB7wfe*F4V(xcn%*s9Kq`D-X)J=L#gc_7^Jgi+rzYc1*fo?;S5IEJ}Z_6VCv?FWS zbp)b!&e)-uNBLixTc~HAeSc{F`7eLPdC59_=vW;*a(+Q7<5)Ozp$7=bsJYYtq`GUx~jGZSRHWoK04uW6|oap*6*e3S8D3oOr1UZM*Tnk z%m12==hZrL^4|K`hkm5~{9pWw^^S)gCcQrV&)4koYR!Wu)3W2?E`&XM_SJ#?2Wnz$ zPlWJbpU07HDU}`Ml*!XE zay&9Pbb`RT6Ni>bljFBZPkIQ20|!(R9^&roZC6xLN85CMtV@roF^nssb6gEkb~vd7 z06SsId5jC3`fpNK?hxE^)jLeiNh1eIHr7|M`4%?PT8EB3Qrf)YAL9;Dg`qY~9$b1? z-J0~aqOG54xg)xnbgn6@yD{83;Jd5nT-oL>LK*dKaFDxYuw7Y~6FYEMiD`JUjL@#q z#&8J9uKNzlO$P|_fmUotbioK?8_%f;#L0;Zq)UeJv+S}9SxS!n2j8$g>c>S02nZU3 zs|;1Jv|Z@n%F3a^evFF=2E+VE>Bx?b4xrQx@jX%_ybohskvTFpkmHBO$A)WcViY$F z@8b@dB;IHi33)g3kf##3+rjS$s`foLI+U>EVPGa8eX+Xo1b>Ez|5^$TwFwnTL|5F521JUbSL1Y-N+{DCih#yzpOLmf^x}IER-X> zV%(t*7|@gSYkc1!0KnoKTC&YZQ?f>7tAjGueeNrbEHNz$LEDLKTf)Go3}d@ahezH- z-cdH7E%~T4W4yNe-nK_OwSC(r2`^1D=;Yw{P)_zjgGsj%veucD@OhG-n#n#wKCcNya?- zMn>RARYN;RbC>MP-~Jx$b-3<%$HTR9YZ^I9=Y{sOjsE1q&7FO~O?>5rydo!~sAuuF z3&mCaYwP-rT6p=DTA!V*9q_w8dRO5o(N&zlMs7t1Y_NxZi0ZDTEJEqfPvixD?He0^ z0(7qKX~)84kVu|JSQiNhpK(G%;%{ZCCNEs5rJ1?%*edy-9Wv=`3w#AV#92ZI+U9!~ zeCmn!yu12Fch}^tTdrS#Pk0BGZDpedXuof~{CZ7ZyIyhQt6b>;fC znwejz*`?)%BjE0bXB|6o7}^`FrDgOiY0h>94QO0Y2*IS?xGOGohD^`3LeNOArogYY zS36!}qSE%=sqcimJG5M#>#BQojkXi#F3p(!Mvex?Sy>z1ssZsmV2N-(|<=16?flrMPP zu#OIbyO6;0x8sKiOP=Ylk-jU8y)Hl>7kEuhPS?Ns;vZN4pfY8=PMtiJ#R}hj?nU&U zrF#77r|Yl(jlY$v5hf>Rpn2QxX3fD*f8pou z81s#7{P0{N1iMtnEk#>4wvSba}qj1=01rDLo-)3ec1ci9CYrj6GzLKS)tCjH~y%GPwQ zw;0ol%xz&?c9uumF!Dd~U>Mz6neqsK-l3o5#cTXq{k{!$U%TIqV8w3m`l{7_+nZri zS257eopfVL71J_-`ZCFoUezr(F(ntmPNKe(B0f-gP4k?@~ttq}5Hr#;H?o!{VJ3 zNxP-oP4n(|7DAI|`;$(EtK-La*Ql?gW#VZ#Tea5>LddwwPk03vIb9}r=osT*d<<^R z4)_adD+u%ed~M=8j_nbKI9bvBhYm;&IdTWr#1Fej82O=cLDQg534GVRgE3{8Y|nNQ z(g5(T?qk?a;)&A%fRNoZ1QlC;guB-th0a1t9YeHCe)u3$g)514;p8!$?qfZ_?Xj-n z^mg3Yr-bZdLIXb5HMnfq`9ql~oHEYEk_IP+~k zX7c8Z`jc;dqk1V8UD6ss&K*B~2-%Kafs9oj=tGW8?Ax6&6Hh?8H9Zx1KRi5!-sNN) z@*mmj0J6n~6$5jqgmMLGu&GYnS#IF;Zhhri7B(&}Apg;CFqU0>nwEWKWukGdUv(Z) z)Y2d^=?P>yc_|&pA00F`&zx{b7zkjpt+{>L915k{AD}8fik~gs-oy zjWi$InmA8eYK)2|W?KQj;4~+BSwHHcz~I1fCjG&Abka^#ZKKyt=mb1doz144-rv7z zzk+v-ecWuqAdPm?A@!vh9I64_+C?ujC1(#qXPCQkz5uX#hrzM^5#YAY-SOwRtK(=+daSQgCc+I2?FwD13)x?Epx(zZMEe9= z8?>p^5&dPylGbTk>K}4_bn~8m8NcmLWJ)jcM=Ongoua#G>n#uE8~QPbkMv>KJNlO< z9vE;*(s8>Z_vr(t3AZ1kl6K<{;xpfTG|KJ1Ym#$|=OeuVM8@m*ZGVPqJSVXPvi$dgzdSH~d+H^B zoo~~ShQikZSQmcX8!QcHDFMbZcKm$rgwYr*`Z{i1|I}UQyXEQYUALqw%$;vxYT-IY zOeDh=B_DecwFN)QL&FJv4OjrKv zX&cOUY~yzQJ4X1r?=ggXo2|cpEu8ttqxCh>!~(#U8%7ZDikmin?HkGhZg<+YlM!02 zwJQs{;hL+w$aWN5kYD_DlTXklGj1W02%DH%-KVQx3s>BmfAmy=H08wiL%<{`-h%MA zJkohLop_Os!THPMhu^W=yBVcQeD?{1)ma<31BiKJi#u#_V|3}h`J2PtKmX+yW}v)x zzC)VN-S6@~6u=!0x*=no*K;VxgVq__<_-yx;cp~TCDGX@A_ zV>@0?{}fY^j%zu&yq0leapaNu%6+ePymaM8z54na$kzTE8XKu2M^DtB{|kS<2GI?#UcXjf z|JHYEabc-uS6AxN_g<`_d+x6j4?KiUc_q3J$^^0qjv?JsmgrH`hhP9!u&z`h=2G#!B2zyqv&!n+DIiV!VnN!#*cs|ri{wC?kc7ll81$q2#ps-lM1jiEJ2u827U}D0q{tq zq(WK_n35+6WMwq|4({a$Y-|}?V+6oM12vqG%Ists1mW9!OxJunHee5rjMN9-_aoIa zGzvkt!O%N!SuFnY(s_&%K@eD*Lz0sWD1T$n`itYP4NeH(z8w^>(K%R51+PO~H2!7p zYw)ftn5D;IuP$%aiDO4I(eC}k9+&T(aaQoY{RivYFpf)?uVu2WXJ8--yybu8%xiV^ z+7;?>M?L!JBRS&L9nLpzOxDy49ZO(eoL@r8?T1L5JRD3%wwDfx3a!@!IN|B;X;-6L zoYj@3G%Q!{x`KUlY$A+Eg;`u}*cidDN4vROe$+e2J!xRnJ3&yoVVK&&FR^=6u<2*A zO#A^;lqBohcWl2E3f{3hq*6}!bRO{c$TlaueT2)Savk_~Al!~_2G_yG$}*kbh2>0K z*{G%`Z`Gka`|6$VeQ#ZP^HP2GH-5WL-g|GIe#e7#yLY?(@&E9%? z2!HIMchq0}t3OrGzUST0nyYy>>-^@n(o_xRrIOdas(INFFOb*y(L z9-REoj%eDE_x0!K87fPG=|qBy53(Qw`g6y=j7uiO4{>aNpdL3npiwX(O*=OV7pl_Y zNa%Ea`fH$f2pVXU>kdTd)dy{et3H^H^fNF#l+Ke!rimXXc~h4jYrma^0(O$2=mLBA6qv{_e<^$)swosO!8Z+T*bwS1L&>VrPJi$iSZwh?zH_YF8n(wBN0 z@m}j8O@EEinf56E-Fs-qC@OpRj@5*d@e_RS<$KRW?QtdK?(w+oo*2UrXTn7Qn6a_i zw+FL(qW0ovS}qXq`hbb?u^NNt4wH}f=DACBYFT+8+9V8R zqkTmBiz~vnHiRK<`HlmN9%rijmY=&iSsb|lzzJVvjCIjnoS~l49pH@On)HYs{%AWD zEuE%a>cs>dF9Eba7-=f73&diI?)cH~HQ z@DGd(WT*UX&oR?U+|7i zWDxSMXBYC3a`2OZH}AY!TB9)3`NHZ}edv=vTK#(t)VWLNYJO=Y^3R?3%kblop^>_I z?OMJ5(#y2l0n%K7|BTh~<41C0$?W1v-JG7OxwWM%o^#>bfdhxKFv&aLT|u1*`xWH0 zJAUZ&*hW>{2k6AQz{z`NckkI79%(ay@W7dimpu7F9cQcBuKI1>Neemb2^^??;8Oh; zDwZaUDy;;E&>gr7?PaB}r$&GS`G9iNHVC~dBMsx}G!C$Yeg?{g4Yd z{NBFZTfg)xzglzii}2gSwHJQ;$&dU#C6 zeHj1!1E-W-p4ldL(|Uu?w>^Akuv8qHjq&o)G1QrlZOMAM^H@Fj+h%o*kM+o3O9td8 zFXLfo#UR}lCIBXITPT7o z2z*JV^BAT*@{p7d&jx4L5E$l{MM9P-an(ujOT;@rDWiVz1n$JQ{Q3ir;ShFZSdQFmEH*FcVthCk@rsQW(?I9oCPNSB$7-r>MFs+ zFJHj{+^yO1^XcG{r{SmrhySLTaQe3}_V#tR4t?a?J}qq?;}GR8zT4BuBKfz+v<-)y ze1yqx*1Lf_@OH``%=v9teT`>){{mtsU8@zC;@8Dn#}3`3tu(t`$;J~t?vs=otbxfd z3nNS>gInC;g`H$G*eP5ln|zGg!iPsBY4gL+gbCQ%j_rk?W;Zd zDYH7|ic|L|FWR~b7h#><(<2#o+CBcwz7sUh-FnkeTIhHM-e%fhoOGj6m&HbAOUu$j zCOxs$KO^X6@0bl_=Z*PkN9OAO)A!dsCypbZ`s=rU`!{Qar0?H%s6O)E57hMBR2Fz& znZ8-iz5HrD_SnPq-gmvH7G`E@ls=YM9_`vUQT;nM>uZ1VdHM*;wM6`#$U^&Q!$V^f zb&I}rKYf+M(K!Z@X|u$C zMO~;*wByBFjWj)-HV#=NFZv5^NKltn?{VD6dUep=KKrtZuGOW|pLapPdUPL8ED**e z`s9vp43DV`wsD|k`rZq;OJlh@fOyl>Q<3X#z!;*xIZL_EpSwn1d8~fy$No&6zVD&> zt>66Z`qH<)TqjPQjDBsqx;}Xm9dS>6{3AbHpZMfQqhnq^cQ*a!dyXBgVf2Gv_#gjq zojH5H9^AVdQr*k~e04etopz-{4sD5Zkd*}h%5=x&Je|NXoZi9V=;HRVJmtYf{qjo} zp>J=l*5=|uz4_Y98DAZCavDF&8pN89ar)%5aGZ!T zy9iacY-X|kDN0V)Bz5U1Lezq}JDT@Q+Tfertb}n4#4Gwd4S*!dMgAkhZbInKm4IVT z4AIYbF#7g7@R4Q))k)gxAlyB-Zo33W0vb7tyZJ~R(ub2pefT-nq)x0pHak6=KKs|c z{XF%(SjSJ@S4Z!ANA-?Q)Ri0a^_}Nmt<_z__2m0LSi6QMY6ksu+ktV?k%s5#A1z$F zQagantIXVqXdhdg=CrGkA=sU|BRVoL3Y%l!>d`yk2@b?Nj&kQ%e;SN+iqMAgUQIe#8;C& zx<2uo@uu;JH!IuVQsoKB`0j&IZG zhUxCwR=TwnA4c;VJKw48j(ZZsBy`^Hq?2DBUMyncfq4m>&1!M;0EBQ$V+|b3ptJeC z^@(dMw_#i1W1?=9Cq47$J$)C_u?#I1c?t*KN%?I6b>ZvqA&X!rc~1Ml<|lUJc@2U_J$@y6r%YPP z!koCAIC1Xe(fPnmIpvS0LOIf2ZBxN9f5O@gekUG}w8f_`F8=XpG}%m}Bg?Ok59u~1 z9(f+WcqV^c?%3w9O~8al(nb#ODR0IxI<5ZVT&wf09Q8_O0m~%$PRDy-(DuWS!_Ju{ z$@R7BMUHK*EY{5ROZCZ*eyHv{b+Be`O_!&^KL6Zz>%yC_5wdDzWGu4IvCH?r`vZ0A zf&1&qg$woF=f4YYb{yk&uKJV|<5l(s5O~PJ+mV67 zsSeo2Nu0w#K4FwKZIJ+O)K5c;<)pqvjYgi@8SgM$XdZX@a2D~yvQxFXlc$^STxhQ7 z^~&7zO!(GqdmC6c*AhmZc_+HtE?`pA>7KrerW5_dxdp?7Mu6Q@9i6LlY-DO`vc|@C z*T~pNoq6?aeeb0=>cF9W(Ra|p5B2RL8pB!N zl(P9@MHk>Be!RCn!LB-d;6UojX^9RgVmF#{k)|@Rox&|{Tc5!DMuF7*cc z$i^4eY+5i)aI?1QycvG55Xb4Z*2!l>T*?z@o6-n1+~PL?yD+$6O`X(@q;8(}1o7~0 zbrD=fwasVCf)iMO%>{s1_Aamx$cs6)7UGn;CGRbAS8)s~BS zr@!+Lj!&yI$j7#*3vobBwMAZ;OTs;KtJvzR_W1@}Y#HMTT$@*W9((-2rzzP{ikAwA zA_794h;oIqt3YHn7TVoH0w#Y^!L+hGg4uXZLdgg`S|xW~;b${maWGRQl{jSV#3dl`ZFxk#1~=@M zhh=nf)B#)*7B->7rlNDC8qe^^c)j<%AE}=H5yxG!@>3Ssm~jgu116&5Qo&Ik@dXDr zSeJ1}c<|oBG!tS<^X-IBS)FK*5VzXoq=V&a>*VcD=T+itWw)9GnTL)XtwC3}uB~J; zRlmK5kJZg9SL@P+v(-O1QrB+Y%%sVoBL{N)trOyl9?8x7f!(7uK2B$7b~^V-c!!p| z$c57#x*l2Xq%EbOaonzUXEu!r7wJOc5-zWdu(5b7ohx2F(tmw*y;kt^h}=1k0ozz9 z?@;zWT6bU(JQXDT4z9OjVLXPtKP#|D1_pU|(!O=3`gwQ4U;1;Et&POT!D^X-4M(QhoBzd@9EaUc7J>1)!&vfn7%Gy?}?0 z9<6&$-do48T?im!TOqwgC##_aPlj+X-F7(Ogf6&^V6~hwOmN^tU3SQCH6i6a{tBww zPTbQrfEb)~;((h_xbNULE!IvC@1BHV+RJkbwY<0jo%Ci^`Uv!5zT#PB#EE!eTZV3= zorU?inwy!&%wT3~W@@_TpuJmDQ*_>%n|HKNPdkx16NO{$)=a+LWjv!hX2$rlgq@qo zQF2ba&fL0%Kkw9Aj&di>8LzP5-S5ep?fcdg-*oDm|1HwKUNbja_$lH}1JmTqc6a>E zsT(zQ1Dp37*Kc5M*7Y0L>&A^6b^Us?uU)&I_p4X0-Z9s%U8^f7J8yN<=yvUD3+w&J zmoHzg%a<Hrlig{6nOP8;Z&t=?%fiybdCc&9$O@hx`sf$)O9$Pf)Zt+EEU~WD;ZxzbQ z637pFl#d`w_?GUopaU9pA&3htymQ`(y)3SPwmj`*9Rb`19NNUX0}2jog~O^;k}m!1 z>@*lc@U^3F;BABbIi+Ct#NIlvcW>?AGamc!!TojY@WDEM>`2{v@^}V$?z`s%O6UMfOfjHKzmte zw&n5kfZ5rZ+JJv4k37M}NBN}8QwG}3#6z0|!HssMEQ^xQx7V)RkvlxPE8}ux5M`E6 zCOj!=8`SXWh!a1%zDd*eojZbnMVS}{3unNf$%98iq|*Rz&jK9z?xy&6ff_uBx|mz= zcyuDbyAJL>PemUWX3sr!8EfeNCbt?0{dR%)*;S zM<-CWZX+j#qjY7^PbJoswq5y&tW<7h^1~gx04G26k)Jn_+?Bs=fWeP8*rL1T^|`B8 zcyfoG6Xk}fNAZ%5NX$V*<77aXd|X(jf|nC3@QWNbf5LS281w4ZRq{h`F)i!SdNtGN zB0KxS>pd2Gle$~Ey^&L9>@*DC-i1v1p?dk-->h@5zf_<6(I2bZ{X@{s68TcE!0WMM z*@cbV+^KG6O@o&n7e-JH^%9ViGS<{gt-g67ch~NO|0{2)^_0^vmZeKutvJRajg+C4 z!*@OXQ&A6krB}-8Aeg&m%%9TU!O0G3aDeU=}md*xqbQ z4ousY)QzZr@@up~DZy_RZBn*OB7m}Vz9_$VYLf}*01%p8#B>s9Kk5t;DkKzoNT-9e zq&xAfX$!NQ!UoBSXWH7xYIKXV=ky-PRJxYV-%?jrdfK}Btoo;;kf8+P<;{+oVQe=o ztbW2~8KrL*rbs7&$#~G)mP~;!J5?Sf=h3__3|@e@X}|vZ+4|kj{YmXZZ#jMXbiMmM z@2z?2_r+IUfv#`YGtWL#f9LQ0hsg9L^p|DILmz;6E~@*Ve&J{9O>`pf-0OE`{7`T0 zp~L)d|HLP=$m45Y`Qr@!dbHlY1AFR^zxdU9>C7cdUj5);0GdpPoPI9OoNhcz z+Da>Lfmab;J`BWZBW*%KdPRTRy+z#bPW+vGLw zx(N$`jmK+D+mRw{mdSKGakNQXXj>VY2`q*auFiKe;zPUZ)`i>n*z%{O0i@JvaKv}| z;JS7Eau=cg;$1j}J^152(C}w@2^+V_zz+O@xf6%);0_trCZT;7#zq%O6I)V=n{ex% zKhjEc`L6?VxQk2yl7thE*G`%p$5=6O7hd0?m+&$3?HJR0i|KO9EV#)Z-wCgbgO3Yj z(#DOn(x>m{OWD$YiqVa4_Vb%^Esy|Ra${$VePdP*5 z6i^rQObVngT-|ij_a*GMNBPHxcfK8Tm!Gs!V+$T8aZ=&{8IXi#M~-j$rMr0%Hj6m<6AmiIUjgRljw|^0xAPt&jQjz%b)K~Z zrjvN&5f5m9Fl`qZa1t&%r;rzd5S&1>tKe!K&y|f;Hc{E2tg$lz%G`al0nO`$&? z9~rGj9=N}j&|{VRi}Xz{)8|u{dE_1MsELvB8lD(0k1ALH9~tw6v%&htx4vHU=t1NC z4n(fj#Gc(*jI2K91fykevDF%S@Z{8F766XZ*HFjbiL7(sz51Yar2g%$D%FBK6w|R8 zQJo2u;0;gFPyfiVo7Pq-KkZtyETdaF!I=#cDkwZ{rxNsyzhiWS4lnZCFs7ludV_Ul zzeK*QTXaicZs{m5;PUXf==OwHk8ixjbkJ#G2l}~iUt8EV*3lbRyl!bbV?vo|-Ed=q zxc16}LDJj>4t8eoqE}kxzE|Rk7vmadWYjAg*Xqr47r^IsJ^c7n=qK;X^=|*-H~vMP zyL6%MJ9SSM{>?7VrJr-`=#lz={NMgT2Jv5h;l)~*ovu?S@2ze6bqlv<>mU8Y{};MJ z)!`AZ`a@R?4+qDTyDhTRfH%sfOv_bJ;7q-L1K2#t$%YZg-GYhsJ#j+n&UX-K7UT4Qt=U{*t^jb&PvX1k)e- zm+2ZNeATG@>7BNNyEGs`j`^%_&`$?H>*)3kgjP1osP65F8jdAO*C`Zfx>?ApAhL*7 zTmVP+no4Ij!|S))jR}FIyiV2#bK77uKev?QZomAsZ)Ydm$IliD2N&MnTo#FMR6ATELich+B^xc2WrXhCbZ9C4G8onEG^^+yENt-t7Hp zvO6EChvwYe#HncQGC2N8&$x}(``w8_HTc);&<0^>SHA7YlCU_7)@7c1fz@03K!w z_cU5KR?9E%|Hq~mxHIP<94QNBh8BpN5SA-E5FVy}ZaZufI`s3@w{#aTOph`} zMgX%wOur^gY~VDE@6=}eo1X@_Ej!;``tr#_KkY8<0GD{rJI2~-wAHc2Pu_&M)n&dd zRQ~zmJ#|U>g~&GBLz_#ML7p$-ov_BqPlKlgYHq%nj=|eCM8N9e6u%RXcjAR+ZA*TV zPxN!++h&DD|K!EHVFZ*!!gF==-Cy~?X`3$w&*ra5`MU4s6-*n7_l|qMsrNYhW7*-m z=p~MqnrL+H$Qgj!SW}SF-?wcL99}fw#>LfzTE2Fno_^}ddg}4hb?xdT1hQ4%|L*td z&DUNc5oy%LYW>;h^zb7O*L&Xkp1OYHT3tDJzHZIlBEPkar5@U|Ki8=`A?@k0%2zk< zxe3oUOV^#_6aHLn6VFEHnXILJjem*{Te$QJr*lpX znV(c_KiKW`f$kiVwqs%PVRe%>&MEHPAe7UU8&dJdPJV_1cfHP&0SnGf4rgNsvPIak z>zt_Wn4g-Q&PG_cNqzlW&()=ilXc|e5#-QFed;HEqTct^Gj(-luDN8~UEz6WN@T338hN;iPr(qmXk`WfpPDyTG3dR*Smt@p_=` z73|g8T$-#M>zLb~8i4$#PBwQa|E$@@Swg+ylzf$;-LbZA9i?7yx1KGx{V3}xvM%*) z-89`jx~GupCXA%1oON@pW_67=fud~PxcgrST$~p;z@zfQd06R6om!nXi&hEa_^(%8 zuB@%pD!Pt3xb4x!e=cH_hi4oZ+HT__)HwR*E;nCNSJJ{dI=_6>O|70brA%n;pl$fc z3Bl<2sZnX1r={;4q8shDGtRYhpe>+vPQz1T9k&x-*0*g--e+2#ES35OEz7IRlx5>; z&*P8o`!t9=Dk7~^J3b}vYeh`2e_A5voh$Nl@S|un0D9{h8cqaZ8 zzE&bS0{o&8rEOP2laP9Lx#9sNi71gC>H=_)hTSUFrrH=CEP*_+fy)p!x-bi1bj6TL zm>r|G`@f|Z1t75c-581rLc-9HWCrFVGiw85Kto!?1Eclqdp=w}11yz&s{aM!My$(#d;@ znw0rlhr4JrDEZmRzqAa_Zf6p{-Mj68w8!An5dy;oU70O{_khi{ozB0>h%35>+_%ce0}H5H|q4!L%_HW&i2=*{_RiIBppHV zlf@6@XQ$HvZ9BxC9^u5EJl76_iyD^k_lVxFe&uWR5C7qR3j?Y8$cNrvf8}rd&DuS> zyRJ{o(CM6SjU8B>;5c&laNYmF0|?QR=_pEXPMUg0--`FU*omTjS^qAcNQV5&KwTje zy1~s3k`rw0m}hY8WD#}Xo$E4=;F&t~ZXTJd6RgH{@={~HEY8g<;xM;ska{z%jpglH zSzOKJ{WQYu#`WuU{_MrNdg*FiyK=2AU%Y~Sy{4|+tee-b)#SCwn!J9aZaEo$?Rw2j zO?4*yr)pt(x@Pr<(HojAUA6MHf{tz#2!#yi{NHdWJvF`TyP%+P`BPW&6@*3^xf zc0zyh27WEy>8YFe<34?}X2{FDXE4*IIo&btO!BnUhgFY@jCl`Js0bm?M@ zabg406&E95U%q?+({W4MSAYi?f#K4H3mD>G!2jZ7tT(>jB)mGku=$i|{HZ-gAP3yGlAIL)=Dz%A^}K4O^iN zxxffs>Y|o`ff^edsJ(lJYTs_}s~@Wa2PbO(f$=(caCaR&cA!q*ceGBQzPBEF=plH; z{dMx>u_(O9jvT7J6XVDbgnqBe1B$}RVonQ~nVkgInYwiTd|kZsChdW;QXj&(w(2Pn ztKnzf&lFh$|FZqrrq}Rm%1u*xTtTnmrmka$OnIipqXOgPp#wPtqkM*gFQCYFkHR8y z%B-)7g>ps3#?xOMWYNn!yvIo}?8bQ-fQyI3nF}`7R_Bl}PTuyw?`NU!+3FvX$M3Ai zpL%BWPEwKJ41Y25awwrmf1W4 z7r0Q_k1|C$RsLXm00>8-U`;+Wcr9;sx2Tx}eXWi=g^m1=f=48hi|kPrsdTvrLqBD^ zofC1@765>63mma{7gDezQi0kWnA{K<#j=S&m}@c4VZ*--%rA$u4c+RCbV0 zbPviEy@~(mP_&5++TA=n1L5aJd&1f=qJu#2=t?*mwZY!-nt?qBfugVJA@I-+?<8^9rK={sB60ZD4f6^7IVU z!s=#SoL#C%-}ywHe9wDoeiFV-S)>K!yy_G+KwBH4SPOOG;swB?;{c3%Cp^((qV`OT z*7>U!>6}hi-_Q`{0@j_q(46giyvAs&i}Ulj&ZA%Y16~*L`Gn>uvUKQ>2AHL54X18(sL3^+E&Ip$(B=NtT*d8>4`e^39n)q zh#pShbSkV$^LOlkpsDnoJ%i;Dw(eY9L>H2@fB7^23_ibDk3IfS{m93Dr1tJVRG;TJ6r`IR!RrQagA30UDFb06BAZ6%puCH>@?Nc_ho-zoPU{Amk+d(U9KM_wj; zYZnO*TrE|v);3(26x7iY#3c;9VX@@&iANvGjJzgB+%IeD3H#}Cu| zFvd5n7;1r=9RONd@}@hBC<2@A#3G2WOA9-9cyxJttiSGC*KT30 zn-;h0*JM^FkJx@04tLzdT*IZ7zm^wQ2U|ogv4ST6Nto`Ragvs9jb_@B!L2S#JMpLo zabaDEk1T2-{H_j&;4WkUHjCVX{n=ZpdTr}yOZV_7zHoUKQKXr z#Uf1PyajcP3F9u??j);ygA8n{=g_tW`-ceF;y`t&M=M?&M<>D#k0yRjMZ(*fv4si) z7|yrv)>j5IFxZE z>v5a9`s7DHR)6^~{d7%EUaqrF!c(4mPaLgP(*4F4{jG%yTjG&&-A)}*J{M6E(|^a_q&xLx2Z!BFzm8se@!VP3`;HnO9wOat zd$ls5XTGt5i|nZF82y`{eAXv{;+Dz5?sxDj9S}~v8kh(KZQxFS_>QcX50Is0R5f&5 z)yLDDU*W$yfRq> zV~6UocfY%4mtFj_RfEGLwYa=SpL?UOUOZphi%YeWwz~s;51_~oosfRC$21S3kJ?YO z`keoB@r;{#Jk2vZ>s7-~F?!Ne810Q7`H6b@hBvm8e@CJV``6nPj#O$;&{E|LdF9j63PzXJ~h{>aV*S z|7@MvWsB({&$bxzt{b(5@!M0wq&eP6x5qH>Q^We6FqpV^!iRRqS6uF^>lZxS2~HZot^%!;Q~MLn(XSr3#{2BS~x?so_F z*4g1PUo*8`88%JXHUZqi75oyXi^qc2 zOlb)nzvLbH*!d=Jk}>GrC+%F97RX8O_TEf*M(7Ja?8XC((QIiue%2QCV(|FG7lbQv z8oClYnaGFUvlsw(OBMJ{z<=70^GcGhsVHszuvF%&&JV{SqV1z)oaFxve=Uq@wzQ%K zV1iR#s0lxKZG5Wf1-d8mJPD(VII--+zwH@u0Z>2N3sx7$;xZZW*v|5_Zzlz#r^er0 zY~h{sC7VtXKH{eFVr<{@#sdJ|$$y{t@#9w}ayxG#-k^a0-86zbSS=Iwx3J;c0RVCRBI>q~l*Pm8r3JK&?d-*Lxe zkqz~z4zdm{Zlc$TyU;g(j{9XnnU%(qwyEqTSZk;F^Wg@ewxhgFLbrth#u2W{;=r6- z1Mhdki3AG@OJDt3EiP@<32<}Zz|s0UfA`<3{d@M; z<*DiV`nR7$-zX7;xFu z54Ie`bfO8E)t4G^5^r061Qx(^n<$q@Haa6f(uhSo$B|8M-=4jhI5(^`(s&p$8ysd# zgg+N{45`b3TRB@V^}J&PS--wad!MNttJg_)8QNRBv*3@wZ^1RS0*+&ow>m;w2)Z6U zT%9fpGBO?}+|oI9DdVE-{1S$r5I!Y>p4I*I5KikS- zBI%41A@|5FFsU1FP>#UFw{0oc`T(0U$+3v&sYLe_bCcPWCNR>wG^;-D0s%KqtM~Nu zcnUjQ5N2o^Zr*i~Sffz&tG2M$Ilh2}g5(KO;=vO+clFa&XzRXy-}dg@WaL|iv%udc0L?Rnzy{hvla9ko(LxJ(U_2xF7kHbz86qPQqfGhhhONDw#! zv;|Z717sRk1_1DL^1aKk!1SWp>OSX+4r0pJh?vlB-qB*+r89Ww=}D+9%d z@cC+uS{Qd%xnf9tH7fvl&ru8{;-Fbg_jRZJ5&e`ZzK4em|V;eWxw^?zf-^TOTS!$W4q~CezgADPyhAue)}ue zr$SGzlu0e4N$FV5HMh7*JeciV;?AX-akn=*qg2N+!C zC8G=@#@&@%t}GJY&f)Y4>%ai@Xosy`h2SLWjq8({_;zu}(%eFJT|05QP5wiKmrscI z#f8~gK+rGEE!K>a?+ASFz47=UR7H3N`GJ=#%pud93;-SUBc%F(X8@W}Y*R7NM^mB| zQg@VYOeTyJGPpNk+rn6`gu&l7qr34`z?-tqyX_BrN*l5;vJB4x^w!Ss$x7Tdufdmo zx`|79N#hS${3PtOt0*ecx~sjzYczJ=2`67vae>)tTrA_g$(gLd-9;u2awI+`X(dfN zjvm3tw<{n`2iyB}-sNgB|viJsN$KBzcQZY9_ zo5d3|vvVjvF8H{WlLvCDfu{%LWCh=E+I{V)$4D(QE$HadcAq}T%CK9u;(w=rHfaIa}`)_)vZYvceK+lJigOGfK_;? z@=bae9UIOdjjJXPA30K|?m3ZD10KBZR6X+0!}aLH5A%&ueDBG+=ftr(a_m@*jE|)5 zlrEmo;t47qtL>nilY+_w>rlED&$?%oD;*R0v~41F#G}HalH+4tXu?A^dEmIji2{gI z!35^wsyC`z|!FQfI;Id!Bi=jvqgW3|cSmgp{W)Ev`mRY;Q|*=n6omay|$i zJ@P15A_yn%(zWeXIY0?gB6rY75tKDZpUP9h8>0JU0V1Cn6vfAt;VwS2JWe#qLmkL$ z_-Q9MWHF>|KpqNzk*BEqsup@rcn0gFe|e3Gi({UhIVDnxD>U_8^d!0P6?H*1?Vsf?9nx}d}Qmp60=c8Qg8`KrDXS^;n3E_#oCgq46o zcbNFY_u2p!@zr~9zfI@F)x*+@ICK%tt;?5cg$`l=j{aI-UaQ6V#T+4~BJ0i=>2nR) zxudVYmY}~$+SmB_XubP`AA-gfqrd_{7NuHFbrKh>so%Wz>dT=mmn-MUSJK_Le;@5? zp{`BOQJ|gCJyf;_hx(B9M`~=g= z(xbHO0EYC0F&`6E4AKD7VW#$cQfC58sJzEjlCe(J3v7GvFX}3~1o`BseBhIRxVXb~ z?YtP*x*i%DMGo}?_wDd?@40*V#WVFsU;bL%4{v+&si*6icfF^+|J?KSg)jXH`qY8? z*vCFn|MuVb+ckgl8v52u?U~q}`|jkIfAodV*XO_ZC$(p6D7lRqucQ zd+Qrt`*JPJOwvYo!BbDwg>#qc_rCCac$gD6!xVK8d=Jnr`gRT~snEcb>&iyxmh!dE zZbOqlJK$BpSI{I*={Hbz=~r4%cgz9+=(5!X5vkIKFQVA*>C$BKe@qsI8&XwTued>h;J{2{J( zzJV|Ay21PIe*12CJKdxM!)56#~`fvMTZL%&CX;#Zw+p6EAP~W zx{G@f@f|w=5>LMBf(_{$cmgA^h3-S_DM<5+F`@P@{u{q;Fxog$x8S4IS0~Gk;WzF= z?6IM(m79ZYM}!!r6P}M4i%X`7-?ck_!GvKcL-Z$Q*`2tlqJ~TV^na@%t>BWlSxim# z0i7~wj3*hGmj4D7@|J$H9n`RHyKoxcXc>6X7KV;#FsaqIv>P0)3}_5_58e_^8HBB| z%*HouWi#=+>QJN!e2mq0qbU(gJ?+xm*Ec zz3bjFZ2<`nG~Vi6Svk1>wZzt036)o6sGpbvcVv}I-fCjAM@f#ooaz=WmTun+iwfi z33=yfa9TIKn{IREpS-&nVeagJ)K@t3+P?2R@@>l7T?m)ST7v9^uEJBnkr_#&TTXtV z7j77Cus4(cGmDG$iBEo_PCoQVef1B&SkJxua*ctjV+RkFyNbOdUfdqsySH9F`(~ZH zc($H>_q*%3Cm2m$2W?&<)mwY_9Vj;`eDRB4N^3+7uH#1zW-+m{U42}AXK`t<_V3?Y zM~rSyy;nW;4gYY%fo;%h!JM?rRf@_o+ZlgQNa`l~u6<|9@Aygjo0b=N zTi~X@com$Fx`nhR-rW&pO*h(xj)295sBRoc9~HL@-G8BBg{>FLwr`vVn#Hso-Kl*2LU;Jl(?{oFVZ~aLfKXxJ~3)xp3930HG zbbtM)f2tmQ@Evva;+ypby5+>cNUoM!xHVJ1`13zUKXtMuK=Y_4%h9){5Otff*(VZ` z&>%D_ZXDN>FWRO627FLl_<@^5J=`-P! zhlWP38f!6Fy9Y`eBUwax%!V&tH_JJ+qAE{U2YkVs_L7XN(nt$pvuSvVLb^E>e zb>J6<+dCcf-dTgxt#>|4OX|XoFC+otNJHwIjrMkgCX%MyOn8GL+oQ6IJVBQI(3H!v zTX-Jn*Xt{s5P9?5`Fi1%vo$(#pze9-9W}B4Xid#6)un6Gb?N$a^^G2^`=5BCR!QG} zhWWU$%gqBf-*`QBw`*{eL@B%d%)qGZ=uLlM5S`cieyvCADc4Uz(=IUeIz`>QpE*~b z3bgp*rQ5H{#35nTM?C`AqlXO>9gBJpo6=}#Mow$~nA8Jq{B-PA7w!;G*!bg?+}clU zUn!$l_1jbp*WQ%Y82DPoc)-6Y>Mzupn72y&CjIb7+zsywAQRd#TOPxi7I{f3Nhj%d zzDekgowUu57v#T-0W#kJ%67UqF=_CPfARn>?ZivEE&Yt&nTKJ)Yw{E{jr@u_$QU4W zk+SL5@$>OL^_zH>5135Fvf}A`&u#0V;X-`V`)YpVEgWs9cgridG+Za`hFjs$50ml} zFUE41mbrJ*@(uTVb?{>xE5Ncx=JKMGC5RjreoQMzN3I)MHv)I@rN58)M%H1QLF$N1 z!UJMAnrl4au1Om(ZNlU|;cbR3T??EtTTvl>Y;kWk1e8xSob3vJeAA>x8)A~Cb_)Px zDQ^<60Q}>jop24FASuKlw(o|Od9?2i<>RECYj5Mmx50qNU*T-+s=+M3OxhNz`{bQS zK6)9DaMG@R6cM-lv;dZ&g~nUAP8=U_)Oe@3Z1LK6fWjL%q*q~1gttF&c4*v_A5~AI}6T zLOCbre)*670KL<;i|j{s3FY1c`|6_~`B3Ahiwkw`wb$#$jVbh*qjmDcvCJv7&WM1y zd$||Afi?xK(v5uFvFywhKx5$xh6OL1@L0!X!iNYWO_`KCr&E5)uj;Cypo>c)gCS7Gp!!u#UKmN|GM24A;xUn|KOw)aAKf8n%9V>V2gE#wRkz2g0 z|CqP^CU<*p0?UOd+Odgis-F+5i9_@N)Fb?|cY=1j&rqnClV%@troU(0pV_@^DyRMe#$yUIKY zdZeXJ+q8Tsb8Ab{xeNnNZEumQ;3D%f;>u5OzJKpN>cOhB&zG!PlL%V)(T1SzEbwqH z2psg(dhNQsf!A!+8oKZ13Tdv@I(fL8U$g@hHs6kG3%9%M9mfW)w%DN2YpTd_aj`tM zd*e5{=Oy*+d*W^gykp5OB-o6-w&CJC-*;dm|8o)%Y2p`>F+E`DPUZ(ZjP!v+v%DJJQX3!zi z-}VoB%-Fu^QNC>EZP4c1lew2xJSASZjOW~e8@QcMC9OVi!k@gxHnzQE`)bcSpV;$h zUXMmtk(Nre6GGO!jmm*9Ckwk1c)NBf$Ur?n^zl8sYp4mJ#78Jl-G=WYod|gW1Rzo9 zpTy*DK98Jm0YFc$ig81HJ0T640=%vTcYCvNpljj`>l^R7g?i zE*#*!rA;{Nz$_31QG8T%fwCQG*bxQ_7rqAuN9yT!f3SM`hI4AhiaY8h2jYV>-Xt#G zEnc3^OIk(8>NvGZ5lcGQQe-mNa^O3G>=5A#leP1*zU-0vYxuT`^tdyXBq!BoW@fVj z{P@wM=_I-eavcI4-nXx|fb*VH_tu4TZ`2#FodK7 z4eYE#2il$TLr%^^uoC1H9h;fCnVQSd?aL4*%5P`)xhtbBzpH`<`-W;{Vu;S!PzLU0 z%08}g@;GSA=>Ytm+zY-Z3bayJh1p&BhR#9_Uqk`Iq)l>J3r@UPFVYMQp;oD1v1ikV zjA6_+u3bGtS%4s3He4mtWz-}hTw^0+nH&)RPG-7d$KBZrlxtUiPwg4sQ%^npR2?~f zPn~_?mHOQ8{_nMK?}57ifm2y=|Mf3@t-ko&x9Z4V$_T!k;Qzo!K3EeIW7&nc0iz0Y zAwO5+xDw3Cx8)U>pN$#Dx!^*M)wTM$pZ)pz(wD!Q4(&&O_#^cf|LR|(&^zn=<*Nwi z<#Zs{R=0BG+^JKi=!hQ9(POLB{mLq|Ks{&IV3-6H2#sU_%~WV>t?htF%Aczes1w@* z`Qa|zw%a0tL5b+Uc~Ca^=Xdj%P8z%Dm% z!c$Hruf;Ltj?(KtG^O|qUzORz6Ql6U7j19&@3f;W|CDFNh8DC9?>nZ2Z;IxfbV<`P zI)E3ANH5)h-*R-r$Q!jyzjYjZ*UcF6G-+q$^(ZvD1Zb1SP$nK*Z6 z?aGq($vctmwI@rpLi_Yxv%EN8i*(T4)!%mW(|+eVr#(QE3%bv@yY@Y$LjLHH;_e1s zg{GI_qo%E!gKh4zaZt@GDduPANz2m$W^2Kd9H8l((9pRC!jmhm-MCg)FSXMTE?>M{ zmoA*k0)g{q->i%0-mJ?P&ez2&7jp8#`v!OAdVt~K0eDD% zjZTc#?!CL~z@bBR@ZiA=3?4szG*^5aIdY`-A39h=gM;Bw%0X${cCosQK(`gsrp4#V zQd=Zo4de(x`6_ka6o3AyPFFqp7)B#Mus$8Ui>*OsI&kOlgx*b`i1JL_q|>b}up?XCRpM$|1?k$_2Fi^m1xW9990q_%o)2vapFAS%iV|Y0 zT-hc81zgz9^lxn;3bL{msbcxvUF|6ZwiS&1wUv72U60l7eTNga9hq)gq^$DjwG-Hxa3OAOoHT%Y~V-8PCLA#qeIEpg_NGG1BSqzeAdou zZwBYJ<(ID7)nA_79(g2f65QJ94qm~r^kUm_U?PLz;$Hn#ew#XI`m2wN1*}pX2ua-L zo~rW)({&4rgo-{C9S%OH=5|p4Kpme&uAu zI&ENOajky-pZ#KX%|HFjQ}v^N=ErjN&aeH8&(!&g7vuKd{_H<7g}QkDO?co)4UUYW z+d2TTS^xEa^$+VJda1kkU68XM{y2g@^^-sL3EubA_rLuO+UjQb=;;UVuiyKfKdNt? zzfyz1N|YJ|Z{zR|Pmsz90OCTt0=Inv>th``;Dm)`4x(ve@|bj(?C?`pSzu&a@DD_c zj7{XE56dh$`?zQ)13wZ0>1QxXr0FlZZRnk_t^FCch0m^6_(y_t>OsF2rW1+h?wjsD z=B3`C?RQHvubrQk7jK<3PFn~wnB|QEltT^=@8`#86CIYg1opup$%?SBOXyY5a zm8w1F-3FS~U5HEYfNI+2ATFZQwI|I)SO}P;gR|}cahL;gn_~PFUi#%xZues~* zo0xB=)kG_A=iPFMV+ELQ7-4N~m~Y{WeC>qu;X4D;*ruLIG~bg9bwFHYlXYi(Cp)9V z+q(6rY;L8&1G}|DZQ_Ogg+U(Pu$}Rb9{cM08^8G*KQw1tzknljYrfLE;q=ihaT{E5 z5+B-eBXZ|yuyo1)|zzom_IYvb@0lg?X1$B8GXZP9o-FTtJo2Y0+XoR5g^Xx{^1Y76LIM~uMuYpdAvZs z_03D?>wWM0Kt25E6ZP_o->WZt=_}Qb4%$zqyXi9zqK^rNllf;ao~!3ydcBSu-cwJ% z=b4&Xn5}j64q+MFeLVf_mtT6Ju3o-gi!|I`pgO$&K>7f~BNG|8wx2Za)c^+$LgVc^ zK~FI1r(aLt2afA%$`LC4C} znVMb#w&hdGNAT`snh29ZGUldkM`5!N8Mo9bc{!2U(iOi1plrFO4R7V2JjzMdyb#{7 zIqi%7LM9tbADQh2h~Y8%174R!Jar%YbS|{;E_w6w8Y%giy8z_}?#?owY4kh?Yxf-3 zQ~g6j^@AV0Tvx7Mui??L`b&S|FV{nlJW=2N&Ufp-`S1S`<=I)s?m3cv$%`+&SbHX@ zuXjFK|KZ>L53)G*GJU-(moC&X`fP^|?W>o*`@Q-fe&JVYg0hVb_M>|Y18aY6t)k~r z1~&nuuMr}og~*F2XE4Wm7IJxo89YWEM7Y%nGr3Ioj9I{=ynA?l=2qRhaXsUij%zs% zq+Y4qHvNWEF-Bb5aoT{X;0W^#kHWFRBD!0fGrWQQYwa5+`DtLKZUHHM8p2u6c3YA} zs#7{bj|m_31cnZ?2{2p0+S9HQ9rkL@KDsLCWweWo!*<4K!*7%o*rp=CS5~N7h`ZKD z6)wsGUNZPinFT7u{aI}5NnwBZ#V^&(TMKpc zRvb$MZt|mo%1>@~~aRp`4Q^v@swT9$2ULBmAa~qBwi(ux-`) za-yJXJQcS4ZeP)jJAJ*B6MJCS?Ha(XpEL%DKSX-k1JrSB_@!|MDCYoq492e+C;j?7 zJum4{cW;R^nJsEi<3~i#n&r@p$tJhQKD0?;z=|@_*>V)rXxa+ue z$CN!v+Huz2UB+t*=QDXk{$2}XeP*MkGyrV2wKo6ZL=|)*J;H{Rdud2YJn6LiZ8Prj z2-{vKuy*$yGDK2Iqw<182pvkhEuM)PzI`_%oh!SQ3mV7fTfC-cb~86FmcjXct1d2_ zx<%(ikG@TLOi^cZYGsf=bmKHXjqS_tFfn=D?bb(cyb-TNF##drPdK8yHggvY!_%%D zhv_&bekWZ18X%chY5bQjoDxnq<0mAK`Gn7r77jb%AuXk5qO+4v%R`3_AkpUxL-^#O zoQVf%7{>a6cc4Rf0)YI)GCICxdRYKVpW3=;ag|QDX*=81yS-ksGdF>8yWaE6)0q?4 zMSgR)Zq?_0|6c`vj$b=YX*}!y=_enreTVj^ul?%FFV^MDS8Dg(@j7}0J!e#&S~-Q+ z`H?K@8$=!uH#80Xy0lsmGJw1MFJs5pj`K&Z*#{@Ab9D0NcAB&D#IY6Vz_1ojq(oRr zX_TLIR?*L#aPHQ(GONi@V#s41BP47#k)nT?k|z-v#&JLqC!fze1An$B=fOl_PUhb5 zgu(u_F=5Hk@w|E6%k1=A7A_t-ybo}9)z`lE)jD^6iad7Jv7;yJCqDJ#_0+qcsdZrc z^4Gs!lT$M_x@V%k_08|p%Kceb>8ca%Q6zyf>b79j4872k!6>pQo(@lX*LC zn}rXn%fNv@?^&#*OtImD2WfHFj{e#0IG-eX@7)-9K$1p$ZTt>Ydo>XtARZEM|r!tzMTaC7E@ZX z%+9G%80b-1+IWml%uLnixlBTa>k81*lM9oA6NAiU|JNC>Qjj-o;(W_NAUk{OW>VRL^!0nMwq zhvv!4LHJG>5Z{Yni{gTS%)$Wt+7W~;Ok1s|?Kghp7YI8r`%q69I!J@V^{!`sxOVgo z1Go1scqEyDiAQ)Vw3SV(5o$M$Jn_fUDV5)Sa1TtK&yc^cn6U&5fzc?P9L>GVFgY2K z1NED9KC`pd3EMUAzU`|K6jblPHND`A)P@fn&Yr*v5?=j``q`6%uPkE>H@fsW( zuCs66sJZ#o;BRo(U>fcC?(x77MV3Y)Av$Sphb)W{qVf*?g*gY`y+2ig` zjvE0_qDm&@9m*2569u|S8*NpQgWO&N;L-Q(iUe1QFME6*DT}1xq22ZPBahYLWA~PK zvw!O=e^jr%@@75!!2Naq!>18cH|wAO+OO6IaR!EaYjk2F6Ls(Tp?6aU%iwj6v|Wi0 z3^Z1cY;pobMW&a|^YrW-<=U+ITl4il|MEYtS6+XkjvPKxAO6tC>hJx%|0oRL2QR%; zi@+t3jM6DRb?<$3`k{vr6i3pjHxG9nh93I|f~&?%6i5nx@{=(Icg=2XuKV1sW#W5B z{^SjhR`+Q5%h|ziuz`M+eDWS?o!8+(7juA5aJjy?TsN*-N2(F|797a>A`HE_LF}M?e`icdlm<3x+)7Mmp}M zFiyWPvx+`9|78h2@*LT7i2Y5UlPh2DnxG}tw*TQSqWoXYZuCiPr z{OX!Js13WAaLd?EmL?y|zJmM860kH_U7@aR`pb)JNpq31%U8soajlQ!wThvB^^0-0 zJMlG6lI0{1jDt50R5f93+lKOZzsnErzRUL`Z{VSWbYMJ1#7SU(4rOv% z0YTd`J;W$^nLkPjID57&zB)^rf8)%VdgbMp z>Q&nM`8Utjr3>fl!kcf_<*Q!TaI>bSZ)H)Eb?Rv>9u;V@J&IqxH9RD5MH!%zm`P_l zddO<8YjMF*ui@aW-2rJQ$zANz7M}n!j#1D%kNC?15VaPW>&yy?^ta(gRnYkk!LuU$2Di+1J_=g`7H|6sl2v3H`ZRUJKctbXvq3w80* zRZzMC9WB+N0|$^3`=TKA^$#cB%=8@irSR}|JGKtq+llo)t)_^RCrXw|c6JfjaltJE z|CHal6?z98Gcaz1RHQTmt*Bh#pIrfx#;PRBNS4laQX}!z!pA z-#XCmB89$mP?C7)B|pKv;gwO^K9XPDQ!hd!B>lAs-lCMEzpSE&A8{8Foq`+ZZqyfk z_t)xydym(9o_#kd05@e+?%HA8LW$eqgzb=cQtmXl2r2}&Wr)0XfRX-W?U-At9klOj zuf1FazjMIF%1M2KN42Vk8*mvJA|_3b@3ar=0vF4A#~mub&B5giHg%L=TMW^&bIw)D zyPySkGKtrf`I(x!a;0`cbNxL7wJPsjUI6#4GwzCM@f15sUN5|dwnkaT_V2Hg_ugMU z$SGw@5ADL!id?KA{f!O}!HZ|>>ZL2t7<>*ncjEYw$lW>0e(3OlxY z$lypFKv%M~9`)|srSdfgUfj9l!Uk!}Q#q8e{pef{NO|O%C#Z-IFhRZCh8M3Ox8)Ih z0skiN;fHq4)eC^JI}jr(rKNNfNZ&i$go2b)*)@` zk+qja18D~Yl+LUO-UtA_bn3~+)$`K6FdEME>|9tL^*pn2v8JacsSld3dR#g#w85d# z5!`ws`#i4qrPt5YSHJqbdf@cE^;7@OUxPPp)USQ!Gj$m~%7sq<$$#?q>%NB{uj>~s zz$35L{(bw(YnxmQaPiIa^)vt9|1mfjLMIueeh(iwScmr?tv~azAF7*Iuhtu{z7)7! zD6)V5f%@fN`b^DGCwri6$Fs%;cVsZnd(k`^+uaVbZ1qDsQ|i+4xwcEN@rmuu4%XWw zpni%+>p0RL>?nzE%iM>N#~9WlhK=9k8SzXjbrBTv_-&iO*R&0VryW!~XzejSLz*XU z1Wr_c`BsK?V#bw}6;Yfbils+L1myWf6 z;7hh4y<}i6Vf2?CEQ9Z+(cXncdg|gMi>HA_TFNea=($b6c41W3D*u5CI!&L)LIe-e ztR*L3=sy!y(yacxXOK)7d?!pMCY3_wMKWyyyrrSPkjDr!@aDcfLTq*NfvCe5b8I9u zy-i(bXN7*aZCQu90lyp0t(6S8n4jOi+y2NyDdg@F)84{U?OZqd`nb!>;M(}POE;Q3 z?%uy@;WF6{PFzI12|pGFG?o^he3GVd2xBezZ5nV6j1rk|!Z}D|d^x${<8FG|6x4eR z%_BBtbAjCE?f!&c1IFBQ7#MaGoUWAv_YG{1NHmR2-qG@E!}6I9;e0oLSVrfxgm)MQBgu3ZBrA*E0MYd6*5&{+wvX>V zfIu9f6h7igrvRAtB3=oHbNh+E#@AN1$dNnl@&yCqOU%OQM5A3gxe)Nay=H!j~+;9C(bcLPhZ%;n@Xk^4r8l;n+ zPQ<)<_H5mv4}A3AW3^|`-pJG5p8ia33!^6oeDxdOsA>9WL%==1x>OI{|3Do%a=6wu z*J;}g?&z8ruS>E(?&_iDYZckHy0(!v7(EQWsBN5#Cn*RilX&u79NFsJ9HLyyLLt)z z1)eyj34bO=P9j2XO5Z*XGUr4bJk`76Pu%VzZxwgv^hB;uH1A6HnLV^j!Vk?|ruZ=<8pKPI1pYCu#=0(rXA$ow~RF?%(>` z_3(qIYkuZtou>|61iE+kD7x8T{ra!`a((&5*X!62eKBAh8yQ0XK$oX)Y(-?E6%1w} z5V+1*qise)q%NGe+PSlH-4wW{s!|sj--OPsMHz?-E>_Tw7pAAuH}-yJ^+R>H)+UYV zWBs(2WmRjZjoJ>#Gm}|3=7bDiz@-!(KaKT(v5tg2qKY0Hm zwf~;`YlZspgb5d~t|QAgJw?PJ#f zRlP+!piKdzr&9^Li)Wmu7hkqNfR21omvCaq^!oaSYDez?!PTFLhD@-otXp+77ln8- zWItteQHqlRgV;mp#u~>9hlj@F?xGMkCmAkRm<@nqbaea8=|9l^GQo_=iIdvmPa5rU zoWXHa-syL%-~VaTHlW!5kXEE>=|TR``qQlp5{yPVkRGr?YoW5dr!EceF9CKGdbNB2 ze1zU{iChXkNK>4I7Hv!BDGbsM?}iVd6CWCKvc(YXE~le7_Ufv>`p=7QD#g#JO~n z?@r&>)!De;-Z7BP0b~TpsQbb{2-g;sVtgmB_TApqMLms2{n_38(Wh~@Wa`W<9BGrL zI&;$1rofIxM13H=wm`GjS_s}!Pj1K*H@pVP;+KzR8jtI~H$BT-+Y|aWN^H|G5gu)W z#>OvEu=59y+W4ivV-!gxc}sKgj(fty;qKF2c-l!ceL@LL8fzzYSUNW;v@2~K!xRi% zTRx`2J9Qy#S>H|)@7i>8#%8T8&gS%)!{|B>K6E+@Sr%tz>g>7mb@TGY(6Mse6rJ&P zv*%-vJ(4k(*Uy}(H|f`pjtrKkXL};5w0iBv^?Z9Z&n$@l&`-| z+;fi_Wqn>bMWY^@I9SJz-&;TZ@BEb--LtPQU%ytr``O<|HVjt}a{doK`vy5m?b#)yu}yNmDKfVFO1B3sYmM;u5~B1m~*f7Oj<_TS`-x=TaW zzj#h7rmXFRZ2%xKuOw0?M3w`HW7nQ|W!%-wZ+g09+Ym%*Vj=mf`>0Q^t*?-$`m`I) z=tYBY$C}muT&ULz|IEfTB6(Gr{YhC&PTwM3%WLOV+VBJj+qQM#{Gk|fku8y~_B`{{ zzE6|*Q2~Fe0Fa?OP?<@73W^-1!$OAxKW*J}{21vaox@cdkf9TcVrNz1>+j4=)#wqm{QA(gk-opNj8 zU|h?YS|dO4WV@4C-0A6{yhrmo(c++Cc9}yM-i5U2vlY%6=62*1?pq$}p)z6wY4!hDL@_PA76lj6|#Q zmIerSPNSufi+b_BzOhQV+yzeK0QT(Kq>PyiphPxS8@wGP8EiT&*AQSHJNuf34o}$RqW{laJMb zef#QbU;BFf(wDzld!er#J(~!yJ@w%azqdvX?5WA?H(?we|4*YwL6%-J83PqW;=X|CRdmr~iEz#E$y%H@;ObpLrQ5s}3JMR*ygVWIg!c z!#UQ@yH=ctbtSvyQ&AN!HhxzTr%_Xuo**28bBj5q=lbQVb>Y%^$~_y# ztneSDv#@Vstd6<6fR5Oq{d;PhPR$sdo<7QO_5783%zISnZ%BW zM`&+DgG)2B<*w{$XlnAtP11Kq_ADLmxlGVI;XgMwUGB(sN3_>f%-ou;$(uK7%ALM+ zv}Oq7ZtMB^7OqW{dw>2s{`2Ks?oOCHx$4Bb6Yj=w0YMfOXq=FrH9u^==VvvfHP9qV7)2W4B5B1>$*Uf4jqf7URUd>AF&{-8D}f<6~y+ zF#wBt(*-u^YZBX7Ed$P$!962b9f@dop=~`Qk!@3$3Jm#kDKXwjQ%a} z#h=Xgw5N9H=43KyP2ZZWTQl0ie5-Cz2e-g|OLw}afO~2ZgMABFrf%J=X?Gd;Z5+~= zx<%cQX55H3MH*B1O-}LMHKghH6sBuWO-{!4Xn)gc#=J?3G;>E3FuOovaoz2eMP_wnz1BSmgZ8Wfsi15p2)H}lN$R!4b)v_eo(Y4ZDz4%0@K|>pTdyiV%6vOzk;o~h{E`ouV;>ow{Vp#X^YG~hQ#bAeFeOr@1JIY9X}gfQnJDKG z1L`7XX)X?+pux9n7nciStQ%)usV&;NJ2V_n2@X^5grPpWCqqP6AtB-SPyD6x%)5_t zWPFKUoEa|zE6`KQNP>34R33ADvI3koG=1%Q&0f1!x50TY3EDYZU7}M!++fMFkdNI6 z^$932O862ux&<%&z{fsPL&&f>+LWgX$@hdonK*>(v>o4=yb<0vOF4H>?5_Jx+(YMO zG$-1~FE3x84E)=ht63PZXJQ{6(tTL~XU?m%S@k^InY1TtSsWA?J+jroH5V@p;YQtC zBk&A6*)E7$U-#N36nJ=xog@c6+aL(&AU)%eAoOAT)EycISL#+xkD=^+{rPr4C3PdN zRg%D&c?K5XbHG83DD`1n-2Ik6s6SbrkX&fNPFvHJ*6C!dd3+p+`>oF90HE~i^&9ep zEYwWoj(%g;j`rY*Q4WH;E7C=5Z(g`mzx*rzYd!wN!#UaL{(By*Klp<`uK($O`^`Fj z_)tCbt|#k1`p^FJz<=Z7Wzt=*y>xhoh6dmZSL)|}_UG#RXI`%nRH=UI(m{Xq$oD<= zjynCosrv5Mzm9%7ok7l{hYq8cT&~al@pH9K2X%rteV}D@u&s=C5}XnOFXf_aE^6?I zVre|Oeelo|x|5x?iE8oL>Ch2Ro|3^(>Mq9|qBq&W*3ULUi=Gs5 zYYC|5`>if%`tqm1pm*|Z=@~M`X~`yEd6sc~f=h9R$wYhyhP)fjc-BIvy--+8!>vB1 z=}rXsSPM*BNO1^HT5Tafd$%qO(+0>)Pup}fwh3rmKi({!x^l{eLw7(Rle(18$yEmv zoy^iN?T56S)OIr0;nyt~pZ7(|Q)KPw9Z@bP@-w&~OhnD3uUX<}E6c<$el7&i-LZ+> zJIS{bY#B%(j_z=VgfU*5(BIi6(#1o*2@8B49hr%G@^H6J78#I_c^Q7)G6L`BZTNV{ z%?Wt83bti2UBa6t-%e)7CO)MTKRy4&8-76AC|fKYG}yM^x>(`@g?Yh~$%Q}$(I zg>qzp1NlmOTeu;;TigzgY2bI_?dap1_uI-S(trU&Yu#_s!!L_M2-Y2(mVT{w`{cG? z*{ciA(MoW$JQRTEHnK4EDg7%SNYlE`Neh^?5nxitrq#y{e{d^3w{K})H)$IW;IXY5 zBve2c2mI1TLXjGtPNU6)On8AQ`fU=#H%;Q|FD`u&mUwUw6G)7dg-8LZ^K`##M_swl z9`kN~jaLxzPQ)a1hZnUnweU?J^;LXy;^UB4-MdexJ>PZigzbE(tC((=l_hiQNfhvz zJo3m3EqlY6_li0el?fagyYrfD0z|qZJ_)$P-Y4yx*Lif6bLcNS&~HEd@gKowqb{C3 zU+3O9mtA)6e&@63Gb^=)PCPz1n1#aXB(J>rM!j(6d-cg5`>{H3{17mc=e9Dn>fr|; zt#5wg>-ExWuTzGt^vh11I8mqXI~6^{(;mG7&?|^MX>b4j{n0O6jI3U`P9Mo{<-HSj z(oggN%I!d+3pk=5%fq4*P-gtqZ8E58UEt<`V;d}WSFASl<6MzJ33rLZoz>%HLgH5M( z*R(pYI&QaLWZ%l`qvVgaM{8>1Hf6R90SWlm(&x6X?4-;FN`MooiREBV?Jl+N6z2NvNYsZ~@T6(QqtENl%z+qnC zR=8GH{a&q=6;A~8B#@O_T3fC~(qCFyhd*xA;>ubrt*&MQZfS{c-WLh8On7%5FB8^@ zMeA#Ab(^%@fJPg`)^BMU7zw+w#`p4O77`g|CGi>zp495m(JQ1Ytd=eKvrLvPRE5M|fy>D&O(v)6V=^C@LA}<6HqsxF*q#Ytoa0zk+%9Lot^RzCxfA(-d*Bmt%h)) zM;FM3nELUgJ|D+fJz3HA5?)6;2(E>hlC?z#79AS2 z-mPzObw}=5#oz{-7acHjg{=;3UrsznPEjuL9(sq?GCrU@ z!oQuW+$P*}A`|+*<4F(t3nps^UkMgXy1XlIlEX(z`O0OeG0=mo2DIMWbG38%D!6e&D|BZ24?UAj zevN7!wQ(!cQf_Hy5w-rN4~--n<=O^q+>Q$ML+*iFlTE-lJC0==P=|~BlXoKf#Scj% z4~=Jj#^z6%4IbLqK{}=oM6@T+*cJ?< ze0Jfmo8O%~bsW>hD4vq;c!Ycpd|d5$&yxo~O~yxMaF8~MH)02_6t1hpyI@mBAs{jq zD@8w*Yu)eYp|}W)@Tmw!=~QEH)uU@mlSIX zVr{TSNhJVfOanEYHfU$w!Ye$|hv~Pn7+yOo>BHnRE&@5wNU(~Vq{DbWT$hdfG2Mmm5TwQJYbxx zcjDd-*!b8;t}763cTGxAE5!F`;QjYMP~$YTOK+ZoNv+kS%!oJxBmFfp;xW|wbF6fO zW=D1gN1)LV#lh(42!fo33hXu{^S0RiDBMG%Lp6**uwgmzxiIhUWt7Dg6*QPPib>m5 zyqI^7;7}no0Vh=`ER1o9vaO{tS;t+Rs?fVKS_Y$FlQ`PyPwG-4N5ODq%m##!j`V39d+XLScSsr50wF z>wo!Yzf!NCKVJ`?ex&}wPyU7aJOBQFP?s)TtZ#hlxw>)dR#rye|KNS~jz=G>drqDT z)0mo>t~msx%AE~YCY_^9X%FH|p(Jycak&G}cH*w@8&0f{roz=p9d`_m(3u$ZbOr>K zM_%>OA+yywfUDAZ^TxG0`^M|_^{;-lzV_9x)R|Xbjhn0ahY@Dp)S;CDhl@Uq7!#>ssGvT#PsxJgno`~$InUrWsh|y&lPvHyE9$eW7&CkQa-!;p%?EI zcL)02W7}Ow;m&_|pLZs6-KCEE@>08NKIvk|0Bd%yFO#1g1eAU68TV=m{bjE1KwfnR z_c9#?zKyTDdAiUi6FIs;Kkn*O8Hsx)lnHm|+cbp%IElNwWZJs5{Dq?#%G-gd`|U3O zg#|k)3z;1D?)RJufW5dli(lKZFK)!|Jm1}2{8{i2cNbB#MG0~9ebxmE({XbFL|d@n zVgVNrxY%H}rd_mvf-y7Wf(6~$!UX*iX0G$DndY5v&97rRepxt?Jb7Q>ea><;81I(d za^JO=Q=@?Vj#lc!x=y@CFT&ty42#s$Qn#*v#ZFjw75S-rxDZR(mCh9cRNkC6D1WBSdvrVw z>3}H&Gl~a5>Xw&ZW*-59~PSLJ5$>kshe9pa?oNS(k*^X?*(VS8iR@G1u`KWw;Sc|X zC(CM;y}~J9&TbBPs;6`~;jiLHVQYPvPJsNB4uc(B6?F3+7#gan$y?!Lo)lv}?A>>y z4jwy*!h1Zr!j0=UY6^OB5WvBb(Q$d}9^}kW?b~}0rJpuwyT*3%L-DDci~@!Hu+tiS z0#TaX74Rol+^P#GP34z~aYr6>%kAJ3@`F0jMLuZVcpQ|S3`lbb@Bs6+TvThCp~ee&S2gueLcB{((8q`kNx+lI9F`jm?LVJAk@}>IZ zCx5g?_wCKq91bolz)Kw7jW>g}JgrlW z0cYL83;nD<+qW>-*%Hq#dU2PLgZkzttUh*>q*FV&)}Q>P&mCREL!tdE=s`IZ3ncx_ z&;66yzxz;q^5dVXd+xcX&Ye47|Lgzunc(vA#~!O+_=TUZo`K<7nw!iiH{)aDv~y%1 zJlBPK|HXg)pVMKi@`z3sVeH*Ko(|++`qU@FzhC;n3w31w9`fy@O^nx<{_yK{<_h%> z}i(kq0iM^&Puhn9it9?t*i6iVJ8k)mo&`#P)Ur9Ih>qc|(sVwMeK#)r7+oeGG zg$&8hHknfti0ikx8CD+TWUFl^YCdtHpme6wr|IzPT-#z=M7y)g2YhdlH(VH-&MBTy zZfD{ac}Tm-q%dtRJJd;Ujf~a_=K?C@xxm;7=e4d)+*N4U$^BK@sgvbv(Dn*^XPI{4 z_agek3T<~8;8qB;iobDIDZr}XN!#}|_+!!_+$wQ(Ujvr)?Oowh8{6=!PI$k)j;*_6 zehd7pAvfFPK4k$9?p_k^6$gc>yiBwwFYp8GDOb|BsBg}c2w@GtCZ$ZmJaCvW1cY#@gmsMjC(lLvX4HlVe#i;FH!I`45aJ^G+) z;)M9dS&4D*AAf*1tZs>qSu<_Ss%0h(^D-Y{vz~kx7aPzy#Yh<}lRZ?+wT8a23J$ER z;2%6%j#d}qR$PiR;!8L9uTs`E=?@xq_}j294geg&IzZeec-G7p|8020s($bTVNp(7 zM$0GNXr!5yz$JZZl;^_FJ8dcWA&rJ7%SYPOX*1$g*a$0Kbm-cJF4BrL((ER;o84)9 zwnuqW+LZ%c?%*LVE@VYG7b=7kwRWYQc;PthTYq5P24Iy=TRN1c^PWdH zERxJ)G~dNndw|UF381gGvX~0s+g-4xe;b}~dyQJ;-2t~OW)jVmSAE<3a%?E{z6{Tp zr_bh&qJu{d*P$baqsRWiXTMN4sEZH0;yW_R1HH*4)hJ6=bycFwO+q^4G^~K zg@b{D zpx%1+sdv>g?|D~Uzjmws$v^og^_A~DM<1-ePTq64JdwdZ_WlEV>#zN#zg&+!`L4Qg z@nXI7(hKOTTXoO9Cu)=W`uZ3Dq`vY;->iPnwtE=86P?A$e+SJo_C&dryTWIIb`mZ! zSvnLy5#p9rTC-)k=-L5XLs?U%;{Y)4$euO(!z&BHb#zh7X`Qv|&I7jj)#!+cpY2lH zbZq0gn*%l;{v|*s&>j8MHlSWWRXM4j`(}C1&dLTLu%a8%|8UYe1LDB0or%oo`s$zR z#rDCM$~&MPrsC|Per_f}#IL3A}FgO?;xb-ukyXZhxr+p)22(a7kdZDF0`rpx| zlt;j={v&Q}KTc9*Y(e>lFj!y3pE9?F8tUJY5Aw*#ZfMOuboUg-jG572@zgWNVwV;- zYjL5C8%39P^UH!KQo7loWAp8GuKD(z{?Z&_XNfnrp#N$!^D8+Kc5ZRGW|vlKc3%Hw z(!^lTE_k&oZj1Pt7xvu38tlubZl&aaPtf6wLsYyu$K~_c%(Hy-wC(44nO9d@PUbb?u_<~>8{kw z+)_=?Ckfq~y8!ualqCiah0&y>yZ znv7qWj&8)&NE_Yvw;E3jc5KoUaHV@~)0bveN#E~w?Iw2K+nDMacx@YL9P4y(!e;oa zG~3>xLyY54>npyu`HA%v;^Af*#&sNZ4co~o$F=P@xqBPY0H2dj*mp)J3IA-So%%r^ z9HSg*^EHjb`9yw2A87rXa9C`=@0#ZJR%5eNukyW}Z~>o@?_x)Gk(44Tk4;iOYVu*3 zjW6gREVgO$(e}}9b4o(l+?!W>Cn(A2Pkw~k8U0C^JI0{uSkcX7xP*^;D_{5Nmamg- zdq}T)+6&B{&@E3VJ*`7ew3NBX+r*`i5U|6;u;ER1B-8U!_PBY0q~@;Qq%GX6AOGYh z>cGCeb!+lQ9fFts{ujSk8^|U5kj||r|J^A1@ehBj?z!(|HZaO_Ca380&&_5d#-Sqz zpgG5SJe7~WH+ec9mkD`b6%OTE#$eIOTo}|&P~qRYL0;PTbYY<7&&E^b4<-xQtjFMz zu#_`8h589`)qur|@+@Pn)-klD%+2@*bYUpRPL#9Ca8c>vL-j-zZ1LO1SW;8aFX4gR zF+24T7jlJ0;NSD%DRXrRy<_+8y*06S5AM6__dfgk_3bw<)oFBzv5|fC@lX7jdi?2k z*3Dbf_4RLk6Ipew9(d%D`u=ynS8sg(+qGlgp?c~gKT>maj+SV5_K9;!9r;A>qivCS z%I-pF^Ut_u76?)2;tv@l$RSJ0sD5EtRdO@Nt1Iz+9M=j7V;oEKif!kc`gftP?%tEJ z_|Ua}w4vMR6xk3!z1e=;TxXg?o!;Hx3PA?whs)Sz{m%(IU3oi_MQ{p=(J`rKjGTq0pzWEFT6$D@#CDXN}9$~H*{>> zlK@~v=!ohkqycUn*U63Sq+@$a6QPXiCXg8r+HdISM?n|?wnb8!Rpfm7;OG|_R|2D$Fk|WXaBRLngMPH{}x23C;5_Ul5z0pA=SSR{>=r2ISxMysK zit_X^H*5FeZd-KhPq;^hhnjBMBc1nTd|O&~VPRi?@6{du@uwgC77g%O6;U7WJxYV5 zuol$D<*xZ%Jyr;S$$mi;*lLZ;E(tgY^Q06}dSxj1ghA~@xv_FUmNeLXTmhm&N&Tk) zpfE~jCVBY|qYk3my92}Bfk`W&F+VDUJZ+B-re#va4p*)fXkl%Ou>~$QYGvVPMMxM_ zm<5n&Z*PG-CyNnEu9_Yg+*|+d-~BJ(0J~{SDhBhE(TUz(qCoh>@4`YNEnO9QfHFI= zkjDnq!B@wF#|b3g8?zGU?$&h0lXv)M)gXf173B`1dIb8KLVJ0xW^c{b)RmjKp=iy| z)S*KM>WQbGuEWQUren5)4%*RE57b5viYT4a&FeSn%U}4T`sH8zr8;x=%{qPh!8&ww zPd)R@yJ$Rpbs2`^fYJ)YIyN*|51oFXPCa;9BxF)>WpyDFsWx~g7@e@QtfQmjwRhh{ z>PNWkI5}~rk}N8EAyilWX4Nk59>?L4g?$R#UQmV7zP2_;`SrH*10$E7z>CLYH9WU^ zG7l4V=UA667nT?3Sln%tc8pwUBP`HAI1OMBY1U=Eh|G z+AsZlz4HCn>)-n1$LsNTJzckS`_*5qU;NzXYJeJg_}GCuxO=2N`0@AGhd=t^dgawK zFqMt!rNQ;=8meVGJd|x{Xe7JVuFp=_{LE7Q@~{1Bz5K%Ydhnrl)ZhP4{^R=CCw{Ch zoIO`x{FATL_rLdIoxJ~4-T&~Zdf=f4YtMnhQHZ2@CwyI{o4aXg_#Wr&O=)`0Y%-3O?L+?@H}d5Y>Y-v+|8|3i47(&Sj%(<+!1;6`ea?ac)qS)y`0_I zay^gBop8r(Cbj4^4-Q9hbRmWv58IEcS;cKvsgtf9WDx(-w{6MyFz?&uD;J;+pd0Hz zMytQ?(vLks8L;fKGEbmJrYjZLJa!_(q-^11Ugrly;)>#u#M?enzwE&V$-`RTsBiQrb* zh>6kSl!Zrz^#7(K@5*`x2S0i^}!D_vD)~NlUSy+iM&%_Z|$J{l8!sEBG`LI zY0DiPO5Y+*2m#gAgW}soCz}Y*c{=XO1@Wb#dGD!Hb^Q3rTmvg^@&gYWQ|UY=h}(?=s*1j_4}XuJe`OG zb^6hF)TjT}zgtf{{Y>Q95b3@A!uRUA=bpaFKiU22iR{tZ&0hymt={ z1g6`-ro2Qv_Ms5C!qKvJapJpmCoQSWXE!%~yLxYjziIRz8QvY7NlVMCp0u+X9_S)C z@#myv@I@Z((CFDQL?^`^^r9OcsPYDFfa$;jM8S9J7yPK>cgs@%xU~M?d(%y8px}l-jl0H85Bnw+yhgiK5jv3ZL&E zKsL0tnq6f!bpE~L*1=ZYeBXIvx@vl|e(*2;x0?U<*J>}ctGr#Hlj5Q(%jvFeaqUWa z+oHH|L8dg&PnGF`Y?hH^=+Y;c${H_4ohp|5^Rnzx(gi zt7p#CoH`Tbb%5{wQ}^bSovEu=b4AQ&e)U)C8-M)eI=pvpoj7=~KJ}xY`2X?s9^jf~ zRe|=pIp?anm2*{B=bY(=CMjS>5l06}A# z<*Hk^>gJr&|GsN~Uv;58v#ZWM-#KT8wbx#8$2f3cUz|E~I!0%wgTp`Et64*%hB$-mNx6UN+` zN_i~ql)K7Wb-8$!Ce?8p*fYv6C$OXp#6k3%IK5o%O1I`K-&Xm!bYhe?tj5>5Fyw9} z8Nb*@e&;P0atBAX2G~~6;o3{q$xBmMn3P>bK4^p3@#zs*)P?qJYwy6$=*Z5hk+I>J z8l8@he(vM(_`@&6SKW7CeD^!w5yy|7j$eH5FUHBiGqGdm-uUUCdQW`QcYZfE^jr)L zUC85l`nUH(cN5q!tMR+P^Zxi>zxu0%9$~C$pZQD-V#BKE z_HN%9)9~Q`{jCqh#o6U(z;@}TYTa#3(G733!3#AwS9Y}L5e?8@9X5m=(G8Wss+6(t zLk^Jf&B1OpV4&2{56fK`uw~`*Cgkn5Z9Ag9rvvPyph-F4KSQ;xXq3z=L+Tr0 zQMOj)LHUR~ns;JzRTGeH@-Y{5P>riJ@psY(`a`{ADE3cj?`j*vHl~(yeVfnHUV(#W z5~ocAkizU)=1EWDg!i?St2>&{1kcbncz>Q*bEVzEqa^S4)NYW zuk7H1u5>jsF0hz3^HOFjw~Py%>6alZjc472B0DCCE6$`vI|lqNjmiV+(X;`GCw-ev zeQD616F=H5+R<*6rqAQ|)s0nn&Z#8G;grjk7Vxl>3&j`qc24B+t-Pm$?L@(~a3xqe z_rT?qD*(8^#7C753j77r@xuuZe$i1PmOJK=GTp6l;3DRMc{RzYrHKtG7 zEp0{H7=E`yBs)ut~QiAA`6M_f?q9+uv2bwST3g5oT#CqhoXvz40xt{n~iyi;u+aPW)(OMYJ@>2S5E!@wwCI;>e-? z_@+0%DPI58Ul-S1b88-HBQ4ENjK&k#y>I%qH^+t1$>>4_*iZA5|M^{U!?oAN(8a;n zKwd3iZ>}Ocw)Aa^rncIcnV1aEOjVY7o@=ppi=2}FeU|p+4F^10+Z;T1(ZC!su1p@r z+%qONs&<9Aa^;XKw(%_%T=7Ny$y?vwRek)|k{o7*8mQ67~=LG7e zxc#;p;#dFo`@nB&JoexNF?jxT)I*lnUVBxX#x8&FPybY$c;RexVV7>{>4`3EI%R6z zs#A!lCGFuPZ0(qwjElZX@&vxLiw(4i3phN(J_jB&bKVLT`8JNer|mqut<`!;zFSaQ+`cA!TWHnn1}wc;<+zD|J8 zK2^X^9ghA^yHF`})9c48c+5k@?Ct}xmu{a*)QyMb#5{zez=B)%Pl zamBVaOSUQZRphuU-aUd!-nBoErsTIirZ$2$18JdcahJ~(+r)TPd&2bz;=FRQt(ZDy-1^Mjh>HovM6}aqI7YA1e6?d*%;3l;Lh=nPWxoru_`Y z5{61}a+7sdx29aR7#c=A(8yC-ZW`wE3*t*CvHj>wg(cWSDZ0M{%lc zm~j)763?iK!3xfO&N#G%X){&6r1Xq0to$TzC;R2G58|Pg_Q;G&I|;9N<5#)!I&tI{ zK#4=(S}S~Do+-03f71wiDcA4DPZ%m8TWPf85QUsW_0Sf%3ZZ7n) zp3<8Ae@-?~_P1a&8HC&2Axvk;?a)D^i;L`E&0UdsZ`GgI^NzF+^1c=6n5+;`6{v3tjn_{IP8i_x=vTRi#9(fFS~|8vx*EpC71J@H3> z@Q3ltXFnOu*WDR!_};h2v!t0^z}|tb7Z$(?x_NGGA=c5g8{np)slyKGqz0>_Tyi%V z@9CeTdkQ_!9`KXxsm+o05B#YApZ$->o=frqeP(T~YucOSW&7EOs=oDzu7xFcX>h_4 zwzGb_byy{xNom8Z8)aYOE56%bl4XcLY!V{!OiAFc^dn%kP^rd-mXudZ%Vp7P6KyYz z*=OOT_B@`RI=8q(Qqr!pxR5^mIyS(1P5{83)E3KeZR}&~NntJ8!}fhsQTr%n7v`g; zftE#=IBuQ0%3!OjA6lTzCgQKIET#|O#L7J0A6x<7!*#dc(Em27w}a{=Lal0Pd;}!M zP1&N~PGE4sTT0E!JIlb|I!fg)0)+VTw)o2PM--gIaUduKfBr6ci&$qFms!7-1wvAx z6vTTu(3gVAtO)s=hn)2we2E~PaRi5kD}^{9CER89IdLnnBn!l^E0O1_ zEOQ01&X96N;te<7OB*+X8)v(f!PHcI+;f$mR|fO(@(aOR^-~J=Ql<^+ zb+eOFMk<3ZWwwj&oE5T$rI*YA@h%}0m2#%fts>xWS2%grih|LtA7oe{jar1de6Z)x zp=gGHhA*Cv(c!X<`rbqPVr&1N#Je-9))tn-8Kb$?9N$|o@Vjwx-2%y(qRy!4?(XL5 zq3s$WzLgl8oXkO;ocRZHI;%%vsX}jr@N$L^LRF_GXYNu@?{3jwjyFIsiL87NVb_&2 zv1%dsT!ol;Ds88NNXMV_PN`PV%Y$WQyaNe~(31ktfe7!w(hQu780K?J(Z3Y|cI&Ot z*wz(pJ+?Hn5TEP#hih2?G4w(-u-*PQ%fon9bz@Mbf~i>j+6MIMF*a;;}FANf#R9%xjp!D z8PMwL>dcuwo;m33y*!?PR&dhDxl^a&smC9UhaUVAb$d9TefF8`h*zL`j(>Y=2ei|g zD?jr%2nfn^6Wv@fIIWfGKhm9 z&G=s6qwE+_=`?^p@{ASgoBYK)dC#+db2h3D8TUMk)Nk?|W#nvC;%pd)_{MXFqR*}b zuErIfET{6EE5}V=QDo+2-pZTOA*n2YmMSvA&M9t}o>d1LCpmKy*_pq6pLzH@6Z^mN z3{2%!dG?=iea;(yGOfm?cjKC_aw(_yF3%Ns`A!|m8I-((@XHF*g1_okWI4Nzw#?2Q zp50-=nZM2&bPNBo!-DS?a#E2-S()MPH4lGPE_m$H%xAaGcb5d{E02qqb*BT5d2r{6 zGRxha2qq_I!lN482_bhdn3&;R*$u%J2a{7C{ZP_6Gj+zDI&x|hm2NnEKX)RSJ)o^wy$)M2@Kip7&`n^uiL)P z*i8BdrlrL>i6QIa>~Uv$sz00@=Bf-=mX1zLCha(Q;=oT^S9i*$ZT(wQM~opyTruYn z8jJ94fB&xN=pe0noV>a_J9A=zs|s9EW4<1xPy9|I ze{XE*>x*7HlV~e-UoCpU6|fr^jt$`6_gMx7^EK-%JXv92uwH(z#ZeGRV<(Tr?CDd{ zl{S;`D?goNRqQPXHAG>VV8dOK0oQ4pVF*^8Tr%ai_LBA$C3EShtB=*AD0E<=vb3j8 zngisn=9wD47*psIG&FR)nE0}DPbsJFGi};pl&21`eI4MktI8vfCb(|D{mxhb2X6m3 ziM`U=QC98rKxa?~ZARJ9%E=R_QK2=t6F_%Ydu;04OdGCe9oyR5fvqlOfV&o{!~6TU zfD@_)`xKp9aYATgkI4}+xnl{kM4Y*kRZAX+q@CENWjksWqC+u82`rfK<@c{u>?U{`LnNc%cSJ~1&1H1G?~(q|sKKOXx0 zqjAg4H^jI6hi}hz{M@I$7@zp;XXB~^SH_)p-x2Tn>HiKy5!1-nXI^+V2kEyU2hN^8 z5fkGx@$Mh{k?h=bC7m;yxAgTyGcw|~8?KC-uDv#%dHgYC0eT`k@DIe}4?PtRJa;l2 zx@?9PTX7;=kuC0~(xiNYF}C!uKYJYAT!TW8VSggCe8NEfkHE}e$|UKWv3U-k3vsHp5qYnt*l<>8<|$me1`gCu%F8+GZVm5h zXzW?ac6GGzSKu30h2#6j0`J^9-qqNq_1KTwiR&c=yz1Mvc~;00{qXHT7oCtf_t{bW3MmgkeFt*jb{eV#f0 zb>6M|uC|S5@4TyTi*>%O)wa++t0XjKvT((D1|*v^gE)*h!Q4M$_2B)0906-^`&`=2ozK;UjmI zD7z<=-3sJ~OWP>>8{W;`5a6riM}E(9zqGRjyeseI?i9vH$K|dFeDk+{YB@pyI|cqy zmnvCu#f_2#Iiszngz>L>%6I%lZdKkXiyU8phg1L984aH*1B<;#PUNlJO1=M;3R+;G4yl)&xW(OFDb|pUXD*zIJMXzKuD#}( z_}ph6h-0VE#X7R+6}Q|TGovHX)9NmP$f~(%=(;}651xfzYU0V~o``+B_F+5pMtgf( zE?jY;$L@W5b4R=fzj%KRvJa2U#MUjnacJMZaGP!gy*{`rM8$}>*VnYJ?po)nVhTW? z+7a%NVCbu~Uuk1t)(@~FQUH@cY`^;?ci_Pf(l)m5#roL>HkZD!M-I8V+br$hNgSGw zf4I`_;L2^uNw58-&0%o0RZi64joQ@q0kEPE)iIQldYBiMt?iNhYP4srYNL>}%i#@o z38?zbwuP(Btdkw5X(tiK&f^*U#Cc?`p`pbcoS;R0AM!qqf7XUv{qDDXFQT(8E)0#v z|N8l#jlqkEw=C20j`Jh=)!U0)q;q&*0y-=(Z}Kg zANl*}YmVsR*_{=&S2P;SppwU57s^VqfQVAr0`G$pUh(#FY*ETg=UqEJi0ZHh9zB2x>h!jz<<^$JEEl>@#Hs+n5@0| z3UB3!_O{L49$gx6r%h>fX?`W?%vIEX@t1##jH``Z`wqsDYi>rTw8n+8>0FVmyq>~V zz5SK1jBN*x#PH}O^)mf@(%$K#&&T}5!D#B~jRx9NDJ1WUlC*<`Svu$P=urt}#yaB2C^+kF}n0 zc-F_sJhkARc#txKD+g`LmH6~t8OSkCITotaPttafHS3elm2VDMW7Nz06r5@Ee(Ay| z5dJ#v)&VBQ|8d3UhW)LTt869DyNts->uNfFdtqdi^x~Jic%Ue83BD*J>*4*CKj?(3dyg^E=~kDL?aHez(DlRFQK@2Rz#brlZZ03@UspJu7F6 zee-YcNetY}&38!}$VO#y(KE;e^n>rSPafUj0A@8&=4KY=$Fusx9v0JijIcWzs=K;6 zJ4rheOXtr;U0YjRf5VNm+kA9(Hpk55bUg66&qRGoJ76n)q}`3|r@i}@o1M#=a$lY5 z4kY^hy}f<0bI;!B?&w5rlP_(ZfPjax<4nPywuh6#atsGvOd5sO{hkB8@SOcP_M-~9 zL5p@3xU;X_EU61<6)Kyj_`iI&$~h63`epwL-{b@Bl&k~qy~HQKlD}*&pS61p_ASjp zN6KiT<~J8$=Fwz}*e*a*hm1IL<_vXQjVrD^k~@dok6riN(HF7%>SEvCy>Zu_uZY|4 zy^Fdv#v|Aj{{lX@?cNhTeOuyh{^_H!IzAfxH{23?uDKyj;Agmlm@;@phaA1>s`0Eo zmvY!i57xmkI46s)F7X{Z0tIT#XlLyT($;8yd6|~P%7!FB0(CK-yBdl}X+qnfrJ*Hy zyL*v|;!QhP9DyOS*DguCffvgc?{&ThaV?D-(M+1=y4h%08;|ArandhU=-cAVujExG z&})j9-|Br|*lx*>;6GROD|?VZYpdX!C?4tKx%(tb+~lM>_}Yns?k<2Xbd3E@`#wZU}yPb_`8>Sj@Tgzy0{i z0gs@xzikz~LBYARc5|bBZ^!~-=k6TRCdl;2D_7~eE128*J3+Nodlz-XBR0A6obrni z$nT9jV#P@X$`QxGg}lTXB{a~C0+&baE@E8@_h1F>myZ?v{H<<@fUDlj@a z9%HTy1csi?-LbWQdk+5BW{0p_P0?Uf|Aey%n1Mu&Cm}<4Fa(UfTRHGA=AompdB1J89^ND7aFcE+o}`jyd!(sDr1j#0NhThibGrlxjhWH-hl z^%3qbJoKgbi$DLz*tTtRyzOn@6K{FTTjS*MlkwMo`>}ZBk;h`&)~#{fb=StO{rjT5 z)5!?5Ep+2*6bFWKWiWUsbyaY?t%rlHl!s8Cna0sYp-SKV{e39aZs2rq(Du`Ln2EW` z@p$f;r{i;<`gA<>r7yefPsF)1r+6NSp`nX8@b7l?Zej1P11oC_&Obe`Ymic1X>x9fLn_T^QK6yoIBVm?z(^HX`6mjlZ3pYQAt$gSXcXL*^QcfOZ* zUUnRC*Ej_Z(>Ni)j*V4}z9q`@UCxvuj`4Ev+Weg*rlPR(A@dggl269FR0bDq=d*%6 zw>mVQidW^6_j1{GIiO>{l}-!cTi5VJtP-g4XY;z?M68ip(1e4MR>HtAdwa+`3JDt9(;Wx$BDj?$(M9#t#o) zjI(D>$3^n&?Cy*${ho(u=aG|x+@ju9O*-__in?P3J%Yr+nn9Fh$j(;eO=?_T=3piC zq8@j0U3M5^_@klFg_MiqwFX~!q)dB9TN*4*xNv(;kCsu3j*g9`T(+G&7Q~%bw(Z>$ zt>}=g{oCS&6UXA%i3_3E)XtJa#5-yZ+kPy=`l>&~RNH?lwc+$a|xyKHO^Ci2$`Nc4TOL7Xw_# zGo<~&lg^TzHMTG1IqP})>_ChkeIdF{Pdhmnt&Gdbm}@#=(1A{u_%g4|Qah}?QCIm+ z+A)9eVePXVajT(56z#Y?i;ufor)xRafUNP012K(L*lgSl<+>}(oS_R{LT^chi9=`= z{Ds*9j}r`Mk@2JOUl)$#o`VPTJn58b&RVSfXww?P;yln?YznI$dmF%Tf;L zNHk`hJ(|RhLeI7HNS3yij`;k;55#+a{ljtV)qCRY?|26?q#+)Cv#A%?^XKow{ZI?7n%>h9po?$S>*PJ*ZfYQ2n7I(g<}Sv`#j!ZMI35ElQ*mK+BF?W)#mM?XjAKvCU@uHT z8`B#rF$ESTfqWiZ!1;O2fzLVaOWYT^7Wh36-h8)GgS}nrE+viN3w?p0T7jQ7e9wJ7 zT0lb!IL4=GXhV*1IiO$9`=<5?Z10A4R~)wCS2>v8#^?5^Z|US)NBQm5)*W@N#_c33 zK3H34)OvPsdtWqnbw(3DmggEb;Y&1i^hIkI?>l;tiJPOXx1W1owDoL?_8#P3Pk(gu zZHuu-MTA!`gcV4*6q=|&ELDCdkfF~JEMzx7iqkA z;}4p?qi-i=Y|GzmeLQa>Z=Nl;y^r$nJ=^>GE6?7yTzYQq+7vB4z~0#%E!{oQ*wr0P zPA+fniUxi+P|tdP*MXzDHtyi14%=b_*;B){2ELZ@x5U+IQ){eYSFH1HCGQ=vT#x*( zZHc9N-Wj*Co#&=lscVTv+GLUY0=%>UKQGXJi^vKm5-h@d3$*DR{4~$+8Dz!6dP7XX zS99>&6f`}F9vi{W8RtHRJv5B|9Kr{=fG)eZFc)VQ#$sS*B2G>Z$C=5|I5B=PPK-j2 z!}wKP-cMaP7bh;Bi_?SW79h-)13E%EXt*=vIl_>=uZ=qx9T zY5zL#;lO~hu`ah~pnlx6Bb}6(;HYd(X~r^>rS@H@BSjg$o{Bu%eBvZK|&K+;KzdJYcm0TL6SX^yEZ9 z-lzYKpX06;c0}3^w!gNF*sx#9j&|)~ZLu6o5+~}3)ENA;ZHb?{lCte!&5l&c(5G>Q zoPLOYiz+D>uz&|Ut{wcck5QV)A3m>Tds@K;&ANupZPNZz! z)D^pS?MnM|o;3Hp>}B!Y?|5g- z{*s-1rnWZCS{^fCklW-7L%H!L^EQ#8@I*di|0Ii$4?yu7{3pKcpDnzb0}J+_@~wO) zKkNYGW!g%<(tUyeyTL z^rg&vXI*fxb0=Nl%X{TFv|T1>Q70?rWg}c79PfqhlE~E;mnIMxgptel2Ga<4HEs3U zz^@@*@(K9K2;Rz{5`16HCwa%T+<_H=mONhrj&h=#BG+fWJ8%v$5x0<0`E6S6sWz#f z^+&d5+vKzL76%kbeYM~HU2sJ^qKgt0;v!+Po%v?^O55jUdLm{YI`={kKHKLfT@c^= zTywVK@5-I`<=Nlj-*S-=+{OQfo%=jXKo0(|uaJSy_7y>|%D@fYZ>WplS>vxSQAhQF z>ko|Q3`ILukP#3y7sy!lzLfYV@y)~hdCokSv#fRR@R0BPy*5vMcwV#bjB6b^xd#8N zI(fi;K)$W;yh8lt$~||!TXI4HzqvcUVqak{QsZtvrG3NpH)bC)dLb|BWq<(u`)N`e~oh|~X} z4|{-6eDas^?6*$))X`_&WS>;x4_wf;_A6^|sFzG*f3dnz{gyJ4fA-hg7RJdu^!IHe zUW#vJs{X#Q=b*O)i}$Ua?u=-k_8PRZfo-)E_kZEBIJECj>^*!io_gxV_~Q?MB=g&b zuXXUimGK|G?Ym+NKG+C0{$Ky~-^KBhr{ds|E8_gn`FQyAU%&xhiJQOv>+l(ya)L5S zk~UqAT5LQmS;sw!VBwQ?A2_VZ@z91G3v!Z6sjEWkcx+a05O|Uys zrGpnI=gIpXk)_`2?&(V%F8|9P;!E6!V{zkREyp31FXGcKjrxt{Tu9r9H`Ng1z%@lV zz=i&_@67f}c?6sZ0)MJ~tCFj-T)ihxxl5>hxv(Q3SRUo@-u^lJVsfV-?FG^r*M+`{ zn&e61rpWd^<#QV>Y7hAI)8q1xDl%W8p9U~2-og)3nJ0!P{!--gp z(3;rQkCMKv53!Q2wY74>1#pz($=#s>{vvLUzbt@ru+an$+0WIE-|qr!2kg5$TB5tF znQz)f;sgGK=CvW~8_SVU%?*vYP|yXbE7;cd@k`eYP97jno43_?jb%66)@COqtPj@w z=sT`?h}zv+MSP&}Wd^tPL8YRM6*aIE9CN zPp|<*yb}P5AQoN;RQP!ZGBsG-me+R4?Roj;&gl(pUGW{?`_p97g`!x>j$-TV0EKXw zhw0O3rzCNahC%+mEJ&e#BP3q(v#>z4#Lq38p@yY-w_7f&@m3tDT0lwZq+uY=FP|!b94VDoYg*<3gM0M zYW;ItL~y$R+|zT@F+1amZjIQ*WS};;R%s~PVOLkiDL9=GSci0|uUXBR+RMY7Y8WlhL|bQP&Nx!AW&Pn3=|J9c zAZlqnSPq2CxfhtA$dntjW{4v)OO3B2__g~J_mSKJ=TP}NNysR6tr7Ewo+)Bt9c*15m z#1r-}3zc$m5|y8L&N!KGsaxWJcLm45ua-4x0t%ik%hNd3^wvF}g)@U?6}fbIsOD4s zUY-q?$1iY}4nRYd4yti@=CAson#N+Q?Waqa@4-hEysRtm**0nLW+>$tw^XZAK5;VO zcpA+2CAdq-dJ-?4Bb9K<;Sv&csfP;96xvhC6VG=UfhbAqC4LoeRrHqpkwsZ==-(YEG7_GqN&aJglYGZ5816C)SVGt*JCf9*A_#==M#?K`OV*s-fr~A8@?tM$A{zK zj?Hm!|1R`sbJRHT1EbtCqBnr68N-mo5U9p1b;HDVD=PP_O@5gZ5d+cBpM>h5xXXus@DH^;n#L;Y4)8+Y7FY!^-y@!RgAXHub1- z+10|XY;*;19lS8fx1rhj7+ji<_MO|}=YH$=VguXcv8SKJY=st}4clHg-IZc`Y$E2z zr{dRs%ccJC(JFkJjs(`aZK9KA`+NCZ zy~LN>b9r~Ai#pnYUum{VSEflkQ-<`TBkT7S?M`r0Z3pF>c4kfh!2ZoirBH*gIMK>@ z_1fCz1FSk1maC(xBa^hMRJM(j-0%^(M58-;5U;+m6&W+0a9iHM@WuG;-}{}kIp6e6 z-x@bwe@i_4;3M(VKl3xuhm*K{_r7@FFaKIxecSEX$uorQGdzAFkMq#BnE>afj-80F ze&ct2y)IzV_zmY-^1sbb+;Q0GGm;g9dq*&T!PZdlkeM{T;P9vD~$LmHD};%ZaR=9lgaOtEv zClM_|GuE*T+7k^pN}a`1wNpzUsn{z{kaGgAHlvex=V_bl7^6*>D2%2i4)WQKK087z zhdk_P$ULfl0o4f@E48hB<6SCG_yD*K>Z62*lB*n~?OCyuzZZA;xto8|xoG@iCV zb_Q2ul<~zmA@eAo^0y1NH4JS(o;9zEi$c=$B{Zkk83iU_~FPoC_-tlr-)}l^tNyb5R>C74I+@FX>ZD0Ka+haO;uYQ`jt?(9< zz`eW^X1jn~0iA;s!c_4&tb=j9tY`NBm>#@nH$Va9JMDIPJZqVQSJ-lfobbda>u)GW zQda8ed&)6L#~Glrw9&zb@Dz$7kS^m}Uc$)(`Dl-2J6Lv-Ab6ubwQIo3v$&8h7O+>{ z8Kt2ac;GuHJ`s&LrCcZ8P9p<7dg!T(r{dAGXXDPhu8H@&_q}o9%!T;Tcl|`nk4?oD z+cw8fe&^ex0lVhJiDTIM^>IpDr>7}C`uWerIqdbe_RiR``%t{~yWbkGea%-#*QRc+ z&X}DVk7Lih5dZDJ|CjiufBsDLbvDG7EnDJy-})cpiaiHoW^q1YcBP*3$SYUwOM9S6 znPp#`6Lib@%h(n0g*2c|XM4BcbBH%5&>LS`DbL7oF3A>zU>_Ixmwqn#RJ+?r)zXBM z0_|s$&*eb}OO~*gv!4gLGavhuY=b&PlAW{;vWtIpihfUHv$m^SnL6oQW-szJ7ldGA z3zMD0?s!HkCM-@;PdUna`NaH3#>aAJrmmiD-qlogAghaMU{#Ln+_NXX_Z{!ZygvI+ zpN;Q+=Uc&5P3}6jd(ZaRzHN8zs^BET|MLsK7+2kLa~ys2(Og8)0L}DbX99dojE={9 z{?j{SZXx3Ej`p-UI@}5#&^$gua%-{!89e(={a+2D$xB)lW+%sbcCfO`y2+pJF66x| zh@wb&09+qG}_5$2i)b0T)j^`?ZDVKS96(gY*dDa>mIJ!<+lE(h;AN+m{oF9uD?zl7VebsAX z23a^gyB6mrrs7MFJ{==->#_IR+v02A^42&%J{KeKr;}yuU!5JBjR!yWH?aZjv>!Ye z8?Bw#CD1DPQNKG$$`$kLT zTkpl?vVvd#I2S&wmC63LeYOv@D*e~4JD{$NDta|}BHOOSlaI5_!K7_$KX~F*MESOo zE`R4=?q=^h_{@7sWnU(RRq{`HYdmC$anwWA3^FJ~DXXG;m8BUf{V$h7ely?7?`o*R zt$->5*q4=iNt1o!l{c3{Ho5unr5fE(CFj%&g)TC91i+4ySDMzC;V+!aMM0KBbImxcs-#E`f;sz_dCnusD(~e3;V#d4<6hEUUbg)y)qJx3d?m=p@)D4B*kDzBxX31Dt!aQS zeRH7)J|}Dyo$yjQ`kHCaaktb`g3OAf@v7LlcTc?USARV|@#!zdHCG&oe(b~BZ@oSK z`!D=bj0{c1*@4sX-`@N4=-1Y``j#8xv!A~|KL3eN#zsp^eBBSeBSw)^r^lzz9ry*n zQd`%GPI0%rIoeB^ite$Gt*-1g+0fV$3p2B^GUE{kQ2cVSyCkC`+e+Q^A<_=P-g0tF zksInCzGE-uPAMQ`9UAa>)Po1EqFu^yOp9w6L1b7Dev;3{8P6W^?am!d&|)X{V}EN+ z9N5+t+gm52r}jiFj~@r|{#WrNBsF-A2{U=e=SG9F~_zn$NE#23uDaFE)w(e=wSI!{>uqIw7cJ( z(U3=^5;0W@`qKE?qXy$xzVvJU35)nMZ`%zh>>sJe9`g9$=E_*N_ypbXKx5Lqef-`% z#?C(KT5Nm!SKQ&s#gf*h+Q(1!-(7!gv6Z8ukZCBa}U-IFa zAN~HT9|Fi*6(j}N((&QIeG`n_1aVZ)Q-p_z<6xwN7{(P_{5B4_aW!2ckEkLi)1<;g zFj=S&L52CAATdgKHcCOd2&>FAR#;NGJV&+AWX3Ol3ZfHtwM&^!Wa!|e@Zf`a_}=Kb zJsm}s2C>x&A&?kES{I4Nfe|8eUs|D&<~O3Ht0%toJKqgzdcxJk)d>J8e1I+UNJkjp z$V>e3qJUUcl=-*dEsZxTLa-w+9X9Zj_=X$G1c1fN*R4Ai5RNOZM8)`byA+Agb6gx? z_Z(ygy{9Kf)5)8gSpm~ed8}C-!!kA8=I*saE*K9lP?I;0wAe|()-I0jv5o|ikYv9ir zdNPJVr_*!E(vhUTwykZvxC(x9#_Vcz_4Xiux5v)@9kCmMyMoiQxvL`w{U3PbF>tmJ zH{5h(^!M$FkA3tnKaKZs^+MZy)UmIpBX;fB6yNy9 zZ^({c@zT;-A9K)xa?-Qd>Kof)3BErkmtUv=)lxOS7#4tmeXl+J3rgd?FdzTZl6Am z12jJGz)Lk0`t&aC%6s*dk8-VFeXP=T;p7wzX~5?9N_^8w^3_mHQ+=g9bD8<}WpOo3 zN+_ne<-l3|Dex6u%U}7|vJ5YE94Q?A<}RA zpbWB1^??S$mj2Dri6L(OX9v#ZdyhGA<%1n-S0BDAuDIq%bZ_d4`6(Pt%4|XhxO0H= zQ~9ieQ0q)vg83+O9Jq1k8ETjE#=97qN7syx48`2^D1I+_AXg?wCqeOAp6|83x)=lJ z24may{^;MlCGLOV;TWG9N4GVh|C^$rsWa}n?^SW%t6vj4_Z-Y~+duiq`{Q^22AFG>bUm$YvRq{`lh(~=Ii6^xzniwM@EL@nI|4kU83>T(bk^Ey!B{oawel4*q%3u zp=3u4jZ=)XijGZYQsP`n$gsyK7dXrG6cn8B5#4nXQdZN^r%u25{PL z(moubz7==aga!xwi-9b@DT~X_!g|uW?P`riE6>qDy4H3anI`B8XNmHUKlON=f9^zd zz-!A(lj*R!bBgUtE5R3(p%cM<2^dy=Z0(34^wl)KXV8g9FAT@8eBk%v%6nfP4?p>2 zoF5;`L0OM0>W43!1T#H87A@fBAO7+$$UKCT&0JT5QsCNejhU)-iA>xHvAVw^i`8tXU1tl?dXtDg$*q zC?#BZknxe9Nw0D!^2alwHMX39;CpRaC+zZA>`dBIS}67tw7*nf7A9>D9b1y69q4Lz z<%}!tvP?=@r=5<}hz5C+r-~T;vV9((g9vY4R{NwM5X9vgPHMidq-|-#) zG5+H_e<)$Tcy1tNgA+2ZzTu`gb^Lkc(R}>qyM7@4`3uj)&US3nCiof~at&K#|K9EK z!{7UDF?#k~cIu1gw$83Rd-xwd`kDCX!_Vf{)?3kEeaMv_kE(Ll5nkrdE%;1&ly9L! z@u_W@cogl~Ch#dZsA;TB;KJ^g)@_HR`Sd506%K&VPRNE<&zx;)%AKIPv7_zmb^_dr z{t#_xI^}t3NBGig`JMSwW*M+jUcgTqOT@&Na0Nq_5B@19;UHc~$6doYQ$R!1*Y?l+ zY;nq~+i=!5dHaf>Oj>i05E-bvvQ3R^xhco3n}ajZ#pKdL%p*hH{$FQXnd(kC?y{o& zucPYDD-#Q|N&C2!v4X$ns$)D9U@v;Z4)bybz4&wTj`V10&=q$@F<8wyIHciFjx^e^ z(UL3Qt)n|diRZ!}=36>ma&U{vDXXg!yB4{$p8g{KD)Otj)!kC0 zZ=7eG=saH<9j9){febEx3$o`Jy2C|nT}zf%TT{-W_PgmFw)-h>~ zE8|poLYlWTEwi#>#^I;7h7(!{+1Umjq-8_V*(FqA$w6K6%RC7wvo7m^Hy&9gv}TjL zRVj-r9Y(9!UUvA2M79t^*%5qC4_9CEChuEj=PqT0-3w+N6yN?P~u5Q$81Z%r*uV;dIg_ zu}I{KA$}u7tb-l1MV>*2)FHR*7az(`>*&CL(gt`WROu(>q;!>Z03A87PeGQF^#|tU z6GC=&t2b%nI_z}u=L9s{)0Jzh=!NXBLguVf-a2wrT7#IP8yjQZUBke`V*|(I(J>bA0LP$K(E| zpGLN>#yGb2J$Kv{Klsich}XUTYoetWc?eyKzkm3P55|xE)K6gtUvTY4+;{i&@y*}x zO}S%ZH$K9^>0>dEEva3r%yO{aPV_uNlKR!*xUZl;oyh6{yKR@eLOWz%K=N|w8)#~2 z#O5z6sFGgQ3$#;9Q<)Hz^(WZpkaP;oY8$k+@!efGz@G~<$T}K$w&OVaspbJ30=6VW zPFkT{`9gn2*e!Er3cSmo(1B$&HMeJ76m{~VWy(J`v&YCeuwtDj$K4TZkvcnp0C_;$ z4UM_Q>{?D7^$3nb2M)*o`sH86-x-V3Cr-x?{_qdS7ryvV^l$BrEnEAdue&d%r{-lC{buGLz$zR}>s62MpCRd7t^h1KjKL*|I7&BX)ld?AzwrKhTs%R5sve z2wP47AYL7C*I?(%>rN2*T~>815} z{`C3y*gt+UwqAK%e9K#ZFj{vVj#Fob@!^oS)N5*dCQd%{T%7pgXS2+%1N&pWtq1Y! zihVd3Jd202yGl*8WAnGRtuXQ9h*scoqig1cJ;OW$Ekndmuy3DNQ}b21Y6lWHpuJpkh0GxnHTwx>fZ(J z4f9MwA%Ekb2lBlWYNV6P2Uaswyr|6ifR5g=hPlyp^D9rf}@HP6e#zlGa8 z4Ap!p&pBXExJ2j3ee2G1+EreO5L(c?dPIbiPZ{8=q~mwmFqiuCDtF^vj>k*okiYFw z?Hsi{>t*@Y_cHvwgablO;4?t`+8=iKT5h(1@R`;!ioE@@P%S#~<;9B{(r4gz){~HT z(&r<8OlKVuRQ_0oa3nqooR`2;7F65k--PsElwam0zR)L{#-;8>PAQ;E|4Y%omjRvM ztNmcYk#;a%MX`S?WK3W6^_%OrCObusUD5DRm_- zwwJ*)UepZvtgTwflW5h6+Um({JMCR1E4or0=+Udw6O*yAFhf&g8#*v34unzItOGLH zyFOFu^0g({0K1G5+nbSTGvhIN?3tJwI2FyC+v1nr`%CfiyYGk#JP)5e8}E4M_r}b{ zf#~bs662%8*;hF;JsofPj{gw9{X4%C=ZN>{gAc`Hk3AA6j-H8~dv-<_zP`4lgPAV0 zaGcRaFS1QDzEmDP-cZKIwBwO>*_KW~O?zD9#CpU3sh7CKn?A2}_^x1260H zcIx4xmlD6+mDl24uhsN9xG$c-iSbJQWulO~#d=)J3^1o%+}aRx3-d8>VT|_+@v@iS zoBewO1LN`dlTXAoN3M?RZn-U{c=t0u`}1*rVl?i$?bg`0dtbcs2i_64-*;~uJ9aic z{E-jEr@#26ICRCq=-A!|-rgUJbMw)(|3JL@>%TG1PcO&0(Fw{2FKsR8UXMie=&z*k zzVFCG*wB6 znra@owUzJ5TSTl;6NyHk`Anmkitkd9cOL!8=Ym9#uM$ytn_okd4uyPdhfPh+fY(XP ziYpu~BvEyFD2-~XIfE#r4wqXaxy4cScVkO$e8ac=0Ijqcf+{-zWWFib$wkbk!C?NB zm3M?m7FpsLO5<}WY$ewiz7C+f)tH0b%Q|g~Zp{nfJBu4(kPg-2Vm>d- zVo37a9ROVAFg98SbKKH(_wJpsxpy-Lbpy=W8|%$&u?|t~*?&dsJajOPdMX!R{N$(N z*MI$e)Tb%BiFegCSLF(S38iauAMm+dN)yI+IiGtPf#NLb>3N-z<=m3Fqs?<0>!Oq2 zIst}e40;WH?HL8H&f+Sx;$0)yfpj~j#bcu@sBC*+NP`$d+UFGnfwPvXxOVb`a4#=o za6=Ss118bCbv%)xyQ`Oa?~WZi_d@MBV9;?hdH&-kKN-&+#X&%rU2)Z+xZ}>3#}i-p zT>S1IeK2mk^2)g5_FKSJUmSh@h4|i|_;JKxZR~IF$ej`TySwA|Td#|oZoMU@CMQ$( zw@}Y{d*>;>zPTk9mp5W!+AYNv;=>>N+c3HSVz3_z2yS%D!@jP(1VO^MCjh%4VBwVK{g=x-uk@67a;5_1q55_@=Kvo9z0mmTW-0RXC<}rNjbok z&xVTdOTH`on9ru=Dl6(ue>t!6sp4cRDypH>w|sx;UJaK;G*k`xO9r6ztd>Ejz+Tkb zOQE`g=KpDYex-1!oNBoI`OEXlnYLA@;41~>-z7NyU3e+qg2!qjeR=tqT;<1Q25?rn zV#uecGgJ)bm+)NDrE8t-`cnD~%Ha&QYvrdQ9TzfHDR@u4MTAPE(*0p5{8Ytn)**it zx+nt{1iyb7PG5o}(^Tr6c~!zoxZu4jNPVutiz6*RONS**RVlCMV1yl(S-y@RL3!u1 z)lt?t({a)5&5*rmh%`FEsvW~?Tgo@L0&Q(+L+-%mO%BjCMlS~0*8a`FQX6fp%`tRw zB2J$hh;tW)vdkN3F7mTz*4`=V!+BMZobTwSO9(P|MNpxATSRK?2OHDs*2 z4LB=!W_&mX&z_3Kndvm%Je$qk0UWX1v14n>c8#G!2lmHfk3Sp7P9BSnuFmYZYU=3B zzVAElc}?7S>#ebs`qb8R$A|vv&*Q@%`m-FswX^=-SKJ$~f5U6jme9e}Kyv2r;J}$U zed=6HBD?Hha{Khg2610Hm|S_|s$C5^9izlE`DOFi(Tr0eJ&IH7S!lB53%FwDccoFl@3BX z_o5a1qXi>fIZS(;eB!Y<{mcu|4Uep?%%)!Q9KqRzMc|chbj7%62Zu-(kzs4-v^nZE zOPRAU%k$?i#O+`Gs`$bG@-uOK_(EKqm?)wSTdJd@D>}M6ViudoqofwGSw8sw55$RQ zo{vC3x|;3|b@6qFqkHqtxG;JVhp8c+e&(5Y@Zm>NFK*wyHNN(1ULU)5?LhaBMRN<~ zIv9;UcH*P7Q#$a9U02md)olaou|FL2@wc<`UD2aBkT$Bp_&K0UeLPFEo;Yqvs_0{r=&WlU-C))4qQ51NzbG= zpe55*^H+O&XIcYeQnOM>xFC zMBnBu@lD_Srg+cK{zB%boX|df_L(Q+(4iyI)ZP?NJ^FY&^xy;W!$0+3VpCm9^mepH zPj71;0pOs_>tB0M+R(`RZzFjqbMA zXxABoKXWA~R4G5Yvf3@sJZAZQoo$oWQ_AX^-8>eQsB}FwY?qqqaN#J1-@;XwlVf%m8PsicD`ATyibnOt2DT@Qnubt8xYxU zC-)eTShjpU*KOZuwaWE;pExjP5-%oPLX zLkY@ueNoG{jV!-xk1HaI{?9hAFpbnGXQQ2{KshR?c?N4_d>I(TJS zIq`xf$jLH3OQ^=p@`_%zb6+yZ72>tnS;z-FkPNOUkv6J$^PYaXcmpR6_9qNH%a2a< zls1xvvJCTr4oKr=`*^~KRWE!cPPr|%tzE|YlRvkDK&i{ik^ z{x+w1SkF5BdcIRo+8cJfIWb=S>249Wf$dr4JM$_%8ftC<8rw z^Ea|WJ>dXr%?f#;>a=3aQ+cqsG6!9e7xK!EQO`UVU&_%Y+Nh0oUX&J^8{+xlfq3lP z(P-*Hf9=>A@BE2(!}d+_z2EWOF}_qE|MQJ+j<3JtW%2kE4@V7hb{hOXKX@v7`upR7 z=bnz^0~cZrn|$-uZLxjV&iH{J_k!mf^U_1y2h|99hyUw9}w(4D(> zZ;$VP`}fAKo%>^K#tmU2R!Hi2~Dl{;wUZWo2dl^0b%L|fZ; z@_}?DkGkVko@ovm?Z41p@SE|Vq>=`z ziK#jJiR{B`jURvad!lFCrg-#AkHqi&;UB~wf8bA}4P0*PFK7SfWYMX)xc-X6@oT^S zTS*_!KmBwJ44lo0u?^_*9eeuY+=)~1lmF>m;B+JUI$ELyz0~bdIN&sQ9wVJN7U$Uy znshE*8jAgqwh3{?g)&*Y!uPaH-mRrUIq_HY7#l27{s zi4iUEQ*v~_o;9hmJ+7_6G;H zk9x}w_B)%|8Z?~kUs2cDzRE+cYUM)WXSySSc+H?Ka@qHMu6|1j@R_>< z8GeH|y*q*R%R`Z$8+DYOd6ehNCjb=vSAy^wip=(Xq3=v5&2i87)dBoUelLZW@=g0g zcrx8fc^fX@fznW2IKnS}d#}`0JL59AGq2=}YCZX8dx+13x$sotIYY&EsOFbxd8Y!o zl$W6bw|28|Lx-hIz6-bUGmux-zsO$~(q+_3`Ttw|R|9z^OqF_6?$Xeg_4ir+C0;K< z_+LU@CA||r45m%^q%q@yuL|8<-j*8vKACgA#eXej0=MM>hw-ZuD=xK_@MU_-mG(^m zuHlEsaF<8`0RQw!L_t)7*8)#TC!E~!v8&8O5q@qvs^v*U99yHpZMC|?z4 zHtADkeTpBH{Vt^QciGvDZwYT1U^HC9iLAkQA;@pwVgtRdzHrB@nTc^6ar7a2yX?3` z9OIc@!IhH&;A=8SJ!adBC**Bsb2B75AFYeCar&XpMvR_{Z+ZLM;wOIkzsAJSdA`@g zyZ_UV#OMC`@1tka)@)P9uUw3@YwwQu>7V$ic=;>t$wBkSAALAZ96udnL&MR4-ru=> zn~=ol@KEj~*V)w(ovp2@M_jS2e`o)`K0O+PYU#@ZtFVzyTcRF(H~~$4i{zsoFX6^T zI}UhSPwgaW%$?6l`++ayHRe~rOQlgIVdG|h0qN3Cs@O0FcShCkvyM(!Yi_QO8Ek}$ z10yjsIu+O4aATJJr3atLv93GretEX<-~Qc4?(E86)2tu(fDX z^rHO&g_yO)+?}U6^{ICB{LE~u%rB;Y;RFDvDDhqD!aM3I|Eb^7-zuzNB|(IA=mUSi zw{+9ejIGqSJ^RL_vzdh{iq-dmUeo3ix7Y&UGLJ#Rer&;hXo;EF+uIOVZC#B`&BHM> z`eHO}Ob|yu1YZTDo7VEzSEPID1(|yEmZYeZ$i>glY3=~P-Q9!Sdf(@q%x^nVHr$w1 zs9Qr1^HZCq*tHNhFSJR^;%h4lKI)48$cdBotDt|)Cnu1Qr;`toCeR<+d9`)+yHG#u z9^>i%@dMkaS$0pFN%~oL99N&r#dc1fYy!sO+u0W)pBW$1D(xy)`rB^C)&4X8Hf&mV zQ*gIOWmfj(0Ota2nma4ft{$K4q=V|Nr20^6)Ty}vyA-%>OLt$gEO!?u_GQua@XW(? zx8Jz+ZGf_aCMKfB7)V2aG}b#q5ha)%R2nXz4y0#in!;jLq`RGg&7;5v$q5u5y9=mpJkK1p&D~=sM8u#CSf1V*dF+NRw>tkVIE#^E^2ZB3%#gW*8 zv$VNyQ*7S6Ia(d~URsK=@$neJ33Ur$I~F}Z-I>Y_IAqOjEjVT^Ir!sD?~bnS=0e^-ZE7spaOUcl8nX&BNz(%js_Oe`$Tg=YmjA;R+_)Cw)`~T!mVr+6W?m{Tv{hC+Co^6NY)Ung?8^85C z(b3tFGqJ9{?z(vP+2`Z?{_~F}j%~XB-p-t%cEy1`anF5sQ&${I2LW=WzDGpZrl>mF z#d@AUKODdRp%29A7cRydzVQ|Dj(7bq;IG6d{^e8g(Bn^{)Y@{(|HFr`imlssA!KN8 z4Ez?}x3;$@%`G9IrK`FIg^bShOw7)X#?tb1%r8tp(+J>3>9#f|hA+gEk3Alr`sYu^ zvFD$Q3j^o#tUAwo(Ya}E(h+M1-j?X;>jPiT>fVS+oa1viwdY30auS1dn!K$*w9lk1 z?<+wXkbf$$q_9N}q_xAFqOdi_s`*t)K$e&$jUWVW@_n7ML*>M84ZeJDIAkFZ<-4%>(@;@1#o&_%3mm0Pf2x8=n{ZfO$cgtF3)Z{jcOSpBZ- z;+b;N_`FoV%B6DV?v_wKYt-gHWg9tUab_!s{^m~_xt6E=s=}Y`W?9tFPUMuG#Iel$ zO@5|TBj;}s;w7A#7Sv7{t!HMdY~#C0vn?%?n|~6QD$r6doARX#*z>Bkm$LHmi}%#E z)R}zID_LjTPaIJ1B4_!QWs_-MJXGk^0ZX(8ub|O1es~98<$0HagdWQ11F z$+W)27;}Y$XOp{KbmEf=lmXbpkf<{nd7%*|JHu$WGr zJsB?^e=b@(o1?j|J(iqB?6ENN3Unb&rHvwKLJMxcYe!5DSn=LYB@H%fnp=;i;Rlx@ z;M70r((G`?J}D~(wRv21SK(vYTDr00*_~?C`x>iaFSiBrf)wqanFFrHC9VbVgi3Rj zQs1WjxZ$ST(4{+L6sKW!c8$6}7a#lB$K$}^J#qKTUKSnD%IJk*6yj_gJh&&?db*RA z9N^BG+{yvksIH-%I%1OmQ#nU;HOagV`d`ELSdDpbi;<6Ap}}3-j2$yI7NgHU6%DjS zbJFX2EQ0r31uA}^*W6;9{Nw{o@N{(LDekTswc{mugg@HW8pYzw2@)F)ssX2~_vHPW zCh+CN4B_KGKQ+e?L3vI&W!j!+(Yt%l z(Ob4`P2P0Ha$~bQVz>ea=g`hf`E#u{_U_snk3aQvj9eUsw;JQc6EDX8g9qY@D-U59 z*F|HieCYu2YMeTEIv#)axoD>i_U+r9yx^Jlu57YnRi2g4bKn8GwX?Lo0UAaY*4MXC z2Y2bPEnGcS&PDf(H3yfg+fT!BsrOgGL7t1L9M=F!Did!|6PLDlk%zqVp7L0+5Avy> zGR#$|4pO?)h8?tN;3u7$S6MA8|LIhC{$1)tWh+73++&p7l3aP`z>;lQWFAf?cHSzs z*!=WDT)>9>|Nh(0M_*S@{Me7aJFdLy`k0>_j}Ls{&*Q_N_-yRkx+!kE{pR?E-~1h5 zbJY`e;mibdv4HK^1AH_!GV1Kf7vs%u{h{b>s)2u3qg}g-cJ6NNh^^f{ao=5c#h4E7 z!XkF(7GS4cp!t9J*r(&@)J$|yb($)=TI-?(d#BCx)YNVQ`<(S9iera!gyCMLRs9>> zt91C4^>(Z;EP13yLwGit6RPUdzPC-3;B~YSa)tb)?d&XqKWsO7qA5GYTazY~o4GwU zbZmFNt0;1H5B^LoxG+X4KHE*!TBqH#k>n3MvDBT&Ctxt0H^pr)yDM(K>(;pW?mOb< zdvA~H@479nzU{WS=9XKCb7Ne0^9^y$P1nU0*IyF{uemz*UvpLLz4A)(Iutwh9gOY! z_Qke6`(q2bWz)``l)F8;;r(v)1efQ}clK_JcI0bo*XC&H?u+K0p4`c;sS}uiwYjS; z8r$2Vz61XR8^5K~v+}!n*NzO}LVvj%R~^oGE%Z>=(wdWWo$TvY$PKMLW4G6#f9sm? zU%cE7*n7rtSA(V;G-+sUCSF4{w(+~2JI@~Z;=Qq*Z@jN1jk~5aBs}KbnB_IJwq*Lo zEU$$;uo=WXzEgw8QM7gPu53lzfFI@7kFUY9-aD7|5|%pK*&{;C%RBki0h2hXgV(H6?#7niN*(LP5tq9O)wdwGp;4bL+jOqD ztRtUVJ4*6hYaR>ZJ6EgJi&JsI?^@u?yor-^M?HiiVaPjtBM&&>g1oIE&)R1As-Zbo z0B<1AH(X57ghW7Yc&{a&THvT5Z}Y8f?gSRn!cR4{T@CHF&hHKChXX|(lmY&8f*obq zX5yn(oPtN;@V!Ug)LL)qY}}1T%jR;1l4-$Zjo;Br*0H{|GkK`7voG4Y+Ir!kzW(Uk zv?)5bY>BR|J<+$bANRb6t1mY1*b@DF`(xYQJzTqE7e3_fE3S;42d{~}S6mx=dER^Y z8hGudIB?{e*nQ}5Y=sAR>^l^j;K#nLJEFa>H##;Wf59(;4p^kECUAq!M2~v>#|rXn z75TV`P3rUktOl;Na8DksQ=g}ZGmadZz$YJCn~U?URau1$kkwt=-S1_6@2PRV62Pd0~h1!>u!iw zyz-uS@beGF=?fR58JNHJzL#MugFS4AIb`(s_yqLP5J!+{X9fna579rsc;?*MwDbG> zw#1c3u7sB01?ckd)z?M`zs2Q?Cr-w(GsDr**&bIMI1nuz-O=6KmG&Cu19;hpnJIMc zk~{iguft1r`e|b6qiEih3BBqmFrl(U``GbWi;mT{u73B(!2WxG z;l0t?(-k9wBk`Ai`B(9)zwx`dh@#&edZ}{z z3o-eP`txWXsZ{AClbrCRy$62E;Gw_40{Yp39Mfs9weyBVeQGr#%>ZQQ@}MO>iNu5(-Sd^%v)HT$(;^7?qCVNSX<}P#}?VPjeWTAUY;?G z545sGeep*<<9q|3F^?UAE@a#~2i_^boueG)u#Y(98$O2Z;gt-|FPqm;L%!K3ZJSG5 zWR*53|1)OB(^oL9zj+5O0}Zc`xAv)Ay6l@MG-C$&J89XRd{^F==dToiBR}y+`t&YL zlvk4RhxY{z@0H)x%Rkb&up0l;H*z9szRN%62fPl9q1E&O^6+`@JZIm#uoEZm@)_yM zLKriY01_bHdCAN0RrZDPyV8dwjU-r=xR-)NK)z*SgTE=ag1<7@UxCXwUK<`60sWAF zDX)52ugflCSK@t6Q`9+PcVNcyk5jB9R>X6lX-hj*!d>CWx3|> zcWGQYv)po5CyF~Nl6lx>ncjEe(<_lw%G8g-p|B9{!dl{|{`OzifpUc{6WY&U9#xpR zq?yV);j&&isSzCKWLDcf+ftv8yZ%+um@q3(`H8Z0qKLMDy4QSa;q^RrR(evI5?c`T z?-LZs!o)gZ`3|_To#e6HsYKASKF||*%LK?K<0l^{`22y-ZHgRumhbHpBC35)$^nxa z)xk~uQ*ETx?7u>9+h4$MvsRdH9Q+BT+lJ9dO}Rw*4v{W_J8 zuo1`e{`0qmXMYyB;-0=Te;rR~##fq|7>o6x7h@QCe8Y`5#=iZ#bJF4T_zD8x8wo}kM|rJAIpA)PW;DB{r$jM7qj!TIf2hz zUEGmP8$@4M`Lw!LOFbOh0{R;KoF>PPuurh}6JfNZZR%us`{jhmNvTcPl@6|I%au!c z4o-1&bFwX{Uqs-0Y?1J2!>l5so#g9L=k}dA!0hUG^>5i}s3s?`JF%d(wFB_nF`OD; zXE}*};M_3!Zkf8wL_dCUM^8um!^b`vU-;sea-wKkXG>gr!?p20f9`+AEH>v5cH&?C z{olulQ>T$fb#d^}f%wNy{$orIo{x@$SH{b}`gPcnV=*{7NwZ-uHFZE6`2OfcNipp< zSIyJ5_Cq<=k;m;odn+s0FVsg~D1A%NzZ1)7`kIQ}k>~^e?)+XpI3ZJ+r+q=Dj>JRX z9(!Gnezjk)n>u+kw#V73AMIl^8|@eA&_V26pbMdDFF=4%k6!F;k7#eepO~K@U;9O% zKJ}Xc+2L5D_N6-sIQE&gs(mN`nf;N#FPGUq(wD)pTf1Y_$KF>+Cx6u7D;Qr=H%^{NA0C%m}xb5w2*pc`<)VmfKT6o{q%7s1ZM9k(!Fl>JdaN8&6 zE*RF^i7g2e<>c-}+{Hxh1OTq;h}#0+wc*p{uD19K-Hxwfr!Eq6x;t*#@;1KgNRbZnIAc}W;Ov=9kWpzb1fEE?lUPCN`xkdlY&U2Pg+plIM`Ae)1*a-l@(Q%sgcVw0B3-}Y%iJIkVZ5)rcrH%AkI$B!r^T+%3Zn)w+h3*t_p4n7b=&3TInr+@}*N(PTLg zTHzvdA`kwW*QH=R{gPj~k(SS(i@*;avP%5Unt@Y09o-^L$7GcTSMjJzEY}7%d)v_g zp29HBg0k}x*x&*gU=D-3rL`F%E3>D2dN5Q0b$oIxMn}h^j_2$MGR%{_dxTF_2hz8Ut z*tBVLY~H#xcJA5}d-orRU3>P#_MJOpE6>}w>x>KyjKrglJQaWUiI2vkPdyPYyY+^6 z)0^HLz5QEYhUPf)!m)V&pZsC2sJ-#ptK+VFUJ>U{pNgOS$)AkDh52Zs>`i>zvKhy^ zr7`Zh`%VO1LrhIfQE$(O?uca+Z)3AYTTNW_%zYfqr;a@n4}Rf=c;##EinqPvojIHR z{`((}(>VC;&7E=R;Gx*JZ$D1wwro4wQ3WM4=(PK6kZ#>NMQ6bQJGT~|aRnWB9k*$m z`?Duc#e)w%7*9R%c&t#i=Kv}&nh^dSb_SBKPVMF`oAazP4TsUu(RlXRXJTORV$3f} zulXMyliuVH_=4ZQyPRJDHD5}rgfE;(~MqX9B=y4cAJ#lF;zd-GBL9!CAG8ysDsF9(;fK zy&9@%EI0Eg$VuZg0jp%jEuVOoa9s+NpU>r8wGA%SBma8${pDAQn-RQQ^-3FOo>;$M z9{(#}W}xxr&wqdMr&6xGRPN>XOIPI?a`CQ#Ehr9fJ6NnP%K<&!`aORUfv3zb>Du?w zVWt#6BCC{ZFcu8$UlGWQGN8PwOs=G{VZt)%JQZ+CXOO^i>)#KdHrzc>=Z7l+7uE%xo*mAeKwDZ;J! z&z?CGuGH@AXpU8kUiGbXu#}U5oScUcu#VO@FAXa7530jXL+at#p~CN(oOTErv;!P>EP`uWq3(H z%jBaox0<*^-jOB;*2$-Zw%opbN9^6ZA49$^&Yn95Y>P26d@=szUp^IEHf@TVZn!?K z*t;*L#>Zor_S#sTjl)L{XD6m~wE;fd)~XKOj+F>}o<9ohP=p3E70LE-73f;Z+%?op znN^D2N?c%@o{2SZzdkV(wb>DiBLqEZQyH8knsyxjw9C?NLPz9GbPY)(@h1o86HmZJ z-PH+cw~coo0`75Qp-meBv%s4w{R93lR01 z{OjZ^KRMXF0CJC=ej#4{+OLj!=yD8w95_VMsCJSoj?ZO5TkDl}1k^|1}>fp9{b2y}KWw*JnmpGm=SL!TZrh}Wx(yhE2;fEq)9PrPr zQ@L`I2pX^2`CIKQbz*~OrYdEm5AmA#f`+ZWd{?zm!9v#G?~q%uLrJ9Uu}!Pntom#j zrqv#Fa-tnPIpdc&+5S3q#F5su3*4qxd|Otow1Ne^*EH4OOmxIJ_J-%f)vVUW)WTf+ z+Hd|wbm6qWvx+{N-Q$O?>3<|1J)~kFR~ztK(Py_(MQ#SczKrYMyj< zY6{zh3xhE@Fc|OouRk7V&yPnhj!Ppx!q&~3qXir~%lsQ&|LU9|vN%1P4wMrt=dp9o z50AwCUwS%MhqeH3GpKI?H64v@(S+Qxe_IB14*4~UoESvjogKOmC(oSC>$wxB;^`NU$8#r7 z#B--l#If_|;>^g!I6plR!`P2gb|&NK*EP1|8+73u_5dIEroL!x>x-uLO|)l!w087F zTi533>e~{%Teilw-Fsroj-9c6&)(Q`=s@hh;&2?k_L{izx@+V5TW*bOZn-(GyZPq0 z5x?QqJ8z3y?z$sxy7i8@?XElHjyvy+n|XKhEqBCqH{B4|-gtc+y5fr1wQqOq+DpFM z`lEX*xEO^Y|b0;9(BgzR2%IY0Fj6{-!N_ zUV`Qq5$dk2H_bA?m+_C5koQY=8lcOTh`)?XTv=b{a^f`Kj87gJm%ANpcK?`%vLJmf zVS+eZp}kC5*+Iv9{eiR_9F(M;bM?KvYzG{5$s^H7>$@ZYeqV~qJ0}o2ppcVNq!YfC zNr~javpc@zZh7X(H|3xNYB`WrF5pV}K_2pUcIxU!=cQdCUd^*y5Nh7*n|MzgeP_$J z6QRI?AKnP9J9w=u;MqJY2e$qB&If&_za&`vJ!LJL@-a>_T?Mhmw?rXM#XG`Zq z?zWup(9ql#jjq7&_T2c*v1#izuC39JFT88-fjD&JNF2H5T6p%lxcE^f|zyGG&?}#gJxFN2%{)X6f_(<$JaF~4d#-{$A(b=;t zS~@mITRT2s&QB~A_Hw>!bd0T(h-ZmnTw&&45gn23iSeus$q`1i|kVR#gLwj?dD^8Pro+{uP}ckW2L+1(WO?cN&)_wB{zpNW&l zkH_Ho3o(g(vVZT+Xhi=>-RdLx$37poJGH}aVPPrDw0=%X)gG^hhjNjHvW0s147Q3a zNg7p*Xiq9*w0Z4FMjG31g>CFa=4R|92lHK}=s;=uE{RogQik*3VjOKDc{lqEd9Q9( zZhG{fGSp6U7c9^~F+DY%tEO#d+s?j7@h`0UMf2G84w}k4lf<2zU5ZVe-Ocom@h%-Y&q-~CV?6`OT_)kT9Uw=IL_#<)j=@;Y2|NBqi_iW?@Y0qChbm(x*FV96^ zOLu(T>%Thgd(|sqdhC3>@YHjZF%qq`Nguj#4Z56~nvFmE&>zP9>`Lt4+)GI{(L&vF zXEA8d{?Eif-VJDaWvkE7P<~6=bdaXRl|Y>onuZ2#n1i)^vQ3n^#jm7Iz)9KCl@``E!RFJ-=dwmX@) zYn&5TN3jPxuvLsZiG8%Oyb-_hfB!H#kW1HGcXhn!KfVQ@ZylL36yr0<5WWwNOvE7R z8ajLV{`Qz8@0rS+5N^+$wRKXYbwCiz<<DX`0GIg7!9{T9SO#Pe>-b&{glxF$%L?siT$_cS9YaQ~c{x10m zt9ADrW_Rs!0*0{E^S;h_S@ug;X(N9NYx!Mg8V6pixAmm0m1{Dr`RS(@{FO-xFJ0E( zceb0)#__7ap5My5ObB zeDB7W?vXt?*;=~HiPzLiTY) z;S!I+L#Zr*+TU3=dFxk`pSD7s_gqQt1rZuO`j3uD0mJG#dD0Hr@#j}rhqCNL+RROn7SRGTDsV<5vKTa>-P3V&|tZi?Ox?DvI9-n;NZzuJjzNCMi1 zq~MjdfX=n)?XS%~R-OwzI}wr#`g4nazvl!1>u(-;Cm$d-O}1lB{1Fxz$GVk%H-F2Q zF4#!Eqj1M3Q>R+nEEAep1xLb`GTJ=VA0o>G}A6M?!9%lwl#`%FWx%IJIu5H@V15>WVi!U6-L3azb`sm-f zEqXWgMkfxK69BgM_s5pan{#ViXTZ+P&f=g>#nA9@3}KXCyf_>e&t2r%-y?DU{CTc{ zoNO>OG@dhZ-NMjL5(oR+TRWnwt2ee%&bD1U;>h8{am6)P$AK%ZioFMp#I~KgqM^AZ zTG~64nU9}16`%j~7vsZ!@sarWzkVV%b#=wJf9p5LZFk-i-QB%0G%y^`KKWey*6;sT z43CY(;jLTZyT0%HW8lPzc=x;ib38qCA=_m$N~o>9A+~h&#BI0U7<>2a&aG8ljk)QmoXNhjG#za%P0`xY7`?qcw8IwKyA6_Bj=_rq@f>kaoH>=9%kr?JniIMUBD{m(s@to25lOHYHXM)KH`B=cJ#3kfQ>r=`oS0!OJ{l5#w zuhcIiRU=mJm2A9y0^adGJ9TF+SgsW6@A{cti zfo$c%;NZoS=Q>dBZ7t}lvMOP2b}Gilr(<|zJX%{@V%xTD(T{9)tEwlScsd?=@`>2g zy&1W^DQB^|3f0-K>SN_lc5d)1XAPrMTp^E2!4N6Nj59m=53bf?VthDeXU4%tu@Rg^ zU`J#lWt}^FHufDnkUIhlkB%qIb}%`hxOdO)SOM0jpM5co9zPyeAGt2Bzx8%xbtiS{ zh%bHVi*XkHpb<7TGoAHl*r+3)(cD(+hU;&PtFFHp8QLDlkDov$&qPOiJNcJEcH7p8 z01nbRtJp1*7QyH2qN|wfEF6omkN0%(Z1i=x3(>)7?c0(Lk+RX1iZv~5faYpt8V9_l zF{^Uno}E?H$pI_3GB` zCwWGDgWr|e{8(NDF>Qw&gpnn%NnA-=YsWe5;&$S5)P4H=aI|Q+>4<_)9q1)( z8xq;6pK`$o7zs0ULs};xT3_4Ka_#th`o(i`&)qkte!Va}64UcD7`=7q@eat>wA2Ae zH>1CuFr!m_=G^I=sAzkyEU(9b16RbB9sM}UYth@+n|A0Dc)Tz&9M5C-w6(THmn(1U zUD4l0n&q6BW5+WR0a9^tixXbzvlF|q38R}jYs(fJ7{0~mSB^O7*60e%re^AljNr1P zRhu_gR8e+uLX}C>*$#=MQ*CqV%s=Ij^2mt@jg8WPlT%l6(8}E=lrK6;wn0l{EAc8b z;o&_m2e~|xv`hd{?t)WjR(^3HuBE9xaWDz5VP(bO_=WiY{=c8i9TML9UEdq`zWime z%(oAJ z>YqLz?bM^I5xa-BYHq2E?v{2O%x+nU**Sj4QtU0bQvx+-=39 zwOqwtN8aw>vAkU5jPJ)Mr{f~@{KD~*@x=4b#*@dMjpxrFi<3j=X^)W@oEeQ_9O;p@ zg_wkQrpb36nn5FkLq47=9!;&W4D3xEy-|yvY3uEa)}G$z=-(23$fNE1_r{esUKfY1 zyEd-5>BhMJ=3C;{yY7fv@4Y*2y7P{>?Vh{i=DY5S8*eXz{#W04LtJ_N4RQGDtK$%I zYVQ?CxDLe5eFtLu-o4Skb4T=$cZWN*bas}N_N~y814gbUcR(m-bu{H*?JB&vEUkm< z_2NC-h}mACIwuY}(CnbFmuWnD#KGzuILFzob7ev^yhgtH-WAsw-*={gZ%j*m83)_j zBM6L}@BMDM2?v~}?7>-vK)@uyZxG0uj=H47b1g6^G;7Gqf$z*qdeNvh4m79?gMGR9 zEe%*l?q1b*2`@aHgDuc>9yC!Atmk7Ei==l0Z%z@V9uk-jyrmJnyrUBHM$q z#RK1LpWGfg;i8;`iA$VUU^IU2WJCUm3(K)RZ7=JH{*?x#h0^xm0Kjr$l{m?PaPeSA zNfUV%`zcpxDoce8`IUoJ)(d!3#@dwJZO<~m1suFXI21jTwCCM#(15fcd`8N9rHxas z*lyBOW5Q4-BsihLi+A3kMPU=>23KKJo`u`NPvOm-zxbY)tEtJ4#O0a0yARe<-|EBH{BlB-FACiee-Q`?QM6&RX5zoyPIOq;VWYMzWuRz z$Bx)_@L)7{c6ov?yipfx?!Ev{Rv{vH5Lx5Cgl?HzS&Lb8q;ko1>C>?qGsvrn^@W&0 z4o)o3#NhmR9Gf1D|{l%NT{ z#Ib|Y9$TkArNvV>rp#53a#ycH@!YiI*RnfMo=yLN_IGlSldqguWP8%svPNDmDrzNP zb%#3!c>Z&J!+K24PsP~6Voc3XM{j?BY{K?9_Utp*#`Cc_H5xZwcU^4W+=F~Ct9R$I z8*&l=?KFiRb@xPRuO3<$#BLtL*Keln-8HbDI?Efo!S^z@_NjAcVvmW*i5MT7EQ9U9-9dXbp%a`)Yob+buV#1QplpAqp2h|! zzK5Ouj%Sn=h_0p+UUyHD?p?Ui!gGr|kzixwu9?toYqKl#T9E(T$=jV0C;)zOqU?Iw zCfZHfAKETmz1`WTHH8hoMmZ^Wfk$0y+3qOf&Suk7Gch$i9jDKnkM6FXoCMs7E^}hT z~hYuWzpZVXvkveF8Vk%yE_E`Mz5C5mQI5HW%y&c$Y*bVJHX)DyP z)yFN@Ul;HGsdwY^EXQ+?Jso3XLpjl}tKBW>k;UNR(fhv?pZV;=u?a%x>TE$pXjiN0 z3ofOJv_1Tv>H`_VZzKx8`QhCc(w}lgX~Oe56sWyyy|w8aD3X3tHj=e*awQyXn*%)H zMY(2v68A=(_OnE;ZYefnazhr$UH`z}mX~k{D|h}UZxHfx<$KyI%0cB4zjIQ6(Yfcq zzjUhXCR_V|U4WDmY)CH+Xcy~WyAx<9e9+p~mhG%O)&@*F-`=v)4Dk) zye#8Gjf@UMztgCRhS=289|!jzfmaq{^ulQT${+j={^fYwaqG45)Bo##rvLTq(Q~=` z!z{Mb;P7NT@Wd0*KpS6ypLpn|yJC2JHYfV&e@|dn3D+YJd_Fetf10rQm$1(;@M$pc zZhgR;vN&)wqe zM*!{(_#kBw&&nw{8QL#40Hq}^bMcG=aZZw;eZ`l91GVB0e&GW|I9gq!HxeH<=Mp3Hi-B65n0<%5Ha+kHYPD|0Ns- zp(-?OKJt~{%kR<}`cdu&0dHz5p&Oo#2Qw;uR$L zf}iG7Nn6Gz%6p*5d@J=V@4-Q>aEW8`lN>AgPMy7SK@oR-+MFarjDjp$h4|S= zxUvWc>TdJ2&H_!mFNoTFCO|godvhFS>)3b5m z*wb;xz4yfxSMH6)dF9hmJp9muF*P=vJ3QO}pzNIYSe}`gxaEdhW803ch}ahLcT)Cp zjE_&n(AadeclJbMQ#-o8HQL+Sff;+ig`_SZr4F8L?~VZO7NjjGOpejn-{E9}bth_C zf9mI$nKps_4Ib@jAL;!399KC)d(Ckbu3XrJEN|=Ti>8+L^eH@!HIHV7FKrIt$o?pz z+DD?@BX_vN;}WvhV}^ud7F{>xq=osFxaDPcMOSxkeD1TKjidPP9=CVm+yJTSW8a~D zar4c$#*bi&*ynrd)Np+8!+)MTTGnVgVY5Gm-nw{ZAY$v*xcL?L#o)wD3{T_H)6SM( zkFDhnrS?PO?PN1b#t;|nr)^3}#FNH6f4=ky+U`mSNu(+R3LcZT_$;sIuDRqx*?GpvET5e))Cy2-?n_1dy%9gUKje$yNBu z-90?oUMX0}-@M@~3#cr>R9}e*`*4Iyd&`i0`@k>Xl}Sz1PoA^SiGMBj?_$?9;xF4j z?k?f(5C&`U)m@Ql^V#uqbw(33TqZ*jfpRO;yuq1tSKzmw!Zym|Md8l|ep`<+u8zei z?WT=Z#=cvjC3olbauQZUj)|9VIq+`>Z>9xCd59)%#IA0rtskuUk?*BMox$v+@#Q8p>KIyc;^><*$5WGRJMA?sniL`RU%%^O{J}AM{(}iyi;Qn z*?I430B4AwKYIp6G8q#Nh&MD-*S+zoSG^|s`nqCbbRvdE$6{`BHh$~>{(3qjot@3` zwzt0{R%U18o!|ch@#p}&KtsRjlhFcLoxr|j%ckhY;Xbf;XT0{Sz6x5biLvpEmmEgT z9y<@u4PJ=u{w;Cx#Oe6i|MN$2^)(0M?LY9=*mdxVc=XXnob9{I~VNyi2< zmSz*6WLSBn@V6liNrJ*xWuh~aTg~`gB}Lp^ zZD3wL`~Fg%rmx7M%-!RqB5#Zhf4i}2;u1qc zS!Q2@vTl8);^b3?{OhqT@}}+VWH9+xc`pyUir!sOv{h{Cobgz*l7q`v9lj>F1KrZU zJqFHQB+g1~+rB+_0O;xLh`syv#PiR+5RW|ZaEy*j#Vxm9A2;26L$ow^W=Caq)MJ3C zbITdBc2txZmzr}{H)nWDcNqIRDCf_dh^f&tG!6O)-84Ok93GvD?Yp+b$k1rqa?35T zA9?)1!;j?loDOokJ><=|-I@C1fyZBnQ)h?bz?E0VH-5vn#P0puk%u+$na_MGKKZFn zCtfrJ4<6bdS6y{g;RsDvn;>#z(e725SzDejvH>cA-XqpCT<^l`2JWJ z9gaiWd*hX_`l{&Jy(>J&D@hEavj#^=UE&S|mTMb1<2zRvV8PzewAwYYQMoH9*H0SU;jH!kBrB}(oFJSd+!!tabO4> zq4V18AcvM+(f@%Dd>|fs=%MIrYmNE2mH7HMeqG%As=Kp;vJw66q>~Fn=i_ttKa~2s zuctSzhTe``aWHo7+zDT5lQ;|9mB%X)pB-jdCRvJO zx8rlNWph(Umg``c{G#(XIx!B;obSLz z>!;)Q|LB8pVE>-@#@D_+{@ZWpP;?w`LA~7)Yr8ozc|syK}8*_GOBEwN6^ykE|lt)~)lV`b2 zz~^Jw>rNa}&MUv>u$kwPS*}Kfdq_>4D*B`WnPR6$bs*d=0o|@W&yuebu4Z6q%&ise zH0gAEesJ5{+ne^Ga;U4z?faVZJX_&*%XjTUX({O@>s@dqt=ivZSQ95IJ)V_8l|G}K zpd#)xCky>K&_f(m9TAeoNt3dakosQUcB^jr!&MmBOr@YIt!GCguvGP}L0eEcp1<|K zkO6N0neD}U;X*i;2=GnHDOn5{YU?TAcj)1?wX{9#2%xf3cQ5sv%xfWK>Esj z{3dQncqwg?O*W1tm3!ieOApw)GBo4Ens=@S@Ny&NEyiUSsR5P*ma`8!KI92$E+{xcrv|qY{Kf6+VCv-+5u%;>BEld zEZ@9%R=x|L9nQSqZh6SaKrfPyeJ2L%=pa{~Va`LgCxE%C+#R4CoRkTT?ZCCW`>9tK z@HOYLYler1a|`UTv9a8yeH8mbzuo?UnVBj0eFc40&LyqGcXUuaJK|R6v%f>QO4BaW zK0u3}Gp=yD3cs_ZE&4p7ud_3{+q!c1mnLPFI1nn@F!5oXxtg&75`W24W=tG(TWNpf zzgud9CuQCW^>w$E1!R>2Qw{>x*COksAL`o#Vbm@lzro+)@y6gDavoSb^jz#vK92$-%Uwkp9 zkolW8ZOa`Yl+)Yy?vA&7_gmw>*S<14J^T7PQfL3`zkE9W@~{6YKKk*GXMf4fH(nQS z{N}HVUhGWuPdhe^Jb)C=zM!e`DRAh*4Oa=ZMGk`NY)U|cp z1!Ps7D4wuc?2mD3*$TRR6`Q}w2^p@OuINtfoE*Fd4r$FG9oqNfq)FQ{X_oh!-=hppQ*rRs1dqluC^yAO`?Ei>e*w+t#{_`<7 za4Ot3wX?l7W+HbLx#n%S(GbS_oWE{ z26?^m%krp4PDHB_W_W|IB^_T4S|){qZnn3uM5LqkoB4Ufg>3I``lh#@MLHi#1dY@?ib z)>?<%*Ir&DuN}8u}t#=Oe3y69HS+4BMLB;H6C~XC* zN*eH!x}M-beA+zFjrmJDYb)i5R$;Sg)xp`0d^caEp5^*YvMA*%(<_t#y=XgE`{oQu z69jV2yifzY;}TwRVP=GCJnNypD7x(>yP+DKFkXF5T~djc{WMBaVX(r4b+=w2s7w9r zd*QBr&$3CI>G@rrsgrSdRW1K=7^?i>YVzv4Oy`~F#HGBFS(9GRH zpp?m#{YBuU8b7nQ4PbV^TXun`lv^T@4`r%BEl<@~jaLoTdVHl&?Tf73XwwQ^2sgog zqnGMg=%wUe-j%%ltu0CYq4AVcHVx1I&3Ni~UjnBAs!N!(qs+rWP;r+urQzZwjhHvY zS*?TV4ApdGn$N;y+plYffJ47|nP;iHDWO5LOL{11; z9=E^lRq@-u`3EsMJQ&lX1My4m{kixeGIi6Ie(Z<&n4O=`)>jW4K5%9H+Hbxu4j;KD zUU=cT9IN@8fBaaSJTZ`CH65L8S?@H;k&(*D@$u2LQLJB2XICzCa)MY}Q$wbAjLd$c zI=HE^(tnwgs%Qs~-2==?|7>^jDo|51lk%V_**`-*XpObrbkWoV)cu_Dqpq zq(hwhUVd>$B>VBR&kuL=iygRfe{}Wq$0H9u5x@6)e-c}EZi;<-_Q#ovLy78} zZ@N8x^4&icS6_c^42%uOKmYUR;?Ms2PL%-Yy-@Vp=9Sx!+Z}qb{TbQ0jN6_Zd{VK4_BWtC<5TTvC z8_>z6f&b*qV)y5>G5}gh9&~eT-sidX;+}8y zz}!)f#$KL^Yj)Pf4SSnoX6*S`pFKzWQ1&|QOI-VFl!Z>vbJ0>RUV`{?(R0cr>JN>2 zbc2&embvG9x4_F#_|2+`kDT~z&d5ODm3|*7qCVE`*Dz`3us|&HrW3gew`fD)FQ62*&Crn_}0Ee)34LOE`U;oI8JOnPJDKoe|I9ti3lz% zpuMEmqEm|xsl3qE2dnAdZLDuRTzA_ITi!;*9X7Zqvk`JIoCKf=MgUDR7T1> z{ATPH4#uP$XGjCUKf!3Rj7n@0M4{ z%5;3TL5NdVk71mhsxl{y1EOri5~td*W@kPHu7tyk2&(iX49iF*DojouurXR=_nspd zyTtXWJ}<;4#HcAp}~PTcI;R@O*&WbkB>~n)Fe*k z;#xE#=(~HmqpiIw`rMXq^VZn9bw_N+x!SpVckJ7HAolLt8wU>@j)VIT#opa}V)w4S zv8{hwuGn*hM{f5C%%j7jaq7gWxPb6Hb?RiCJb9Y3M`C1TjOR1Jd^Dba{)ISw_7sX> zIr_T0nxBgc7lz}3&we33@WJ=P^y{&?wJrYhPyBR@T^NiX{K0p| z<7ZBRA(RVEfc%WDn>u61=DxV~)|;}UUuVG%-NuHta8;Ik;I?+F2*Qbp+4%Wi{Y`LG z7vJ)&-w?0)syD>_UwkM|o*v*GN)HEi$Ie}`W9#nd?C6Ou1h-_-KwG(mo}A;VGdpo< zj#MhH@?Dsljtl1padxI+ZhDS)^Ks%R&dQ??$H3V$F+VjO?X8|WyB2LN&EU8jTz5xn zdmBP=G0!9%7&sfV2!FTsUV>7kqjEDd3hzyo#?O1x5}WRUa6+KzD1ytgi0cDsfXJh5R zYRO({0#eXA+S$U0~Ua@)Q#_%(<+C2&NSxdH+~~S||Q`E6Z<%`!?pWi>B(Y^-M9 z*0tcz_vGudSR)T{OA6qve9Qbw8cL{?O&q>Ci><^lu5k*jRr4v|%2o6P-+?K=aW8b^ zH{~q9tCwYQX?R(t7b&0!^Yu6X$g`xmJU#cUiT85pu<8J0c^ks1@l=J${49&-EQ=J< zv#|2dfj@rcT_d_|gQgU>eD#m*DgzanZLppm>yq0EVd$to?FgNloy!%wu7;6UT^-~3 zS-CRQ4%1oJSE;{>{&V~5CBC^6jrvIWVaJiXlpH&GEIKig?I3dnva5{TIb?ZtDcajR zk^gSb*oGWxga4P~!3Q1%-i>H&?aU4_X+!v>DOU))8rX6*CR0`*5Hz0MjX>->(A133 zw8MMRZLlyj=jMfd4mpVtNM_EG-FskP^7c6L%#J-n9W>n2@0O0WxwZef^XKDR6^*XF3P?YJ76TGGJNSjls+ zsWCN5x$Z=A)$m$&P!JcxD_22**XGvtXl?6AUDmy6OU?xD?(Iu@k~TGvb>uuA=Onu4 z$)}%+Meudt!2Y=PmK&pYa}Q}-VhDWP|HaQnTXS7(+teR>(3>q=w#4e39l4Ddq_hb- zC~{USv`5}5`{IBK7qlqp6@gjkTzF|Wzt=)T%8(0>KOQw0@om(3d43tQkfwsp+$uKh zH{@#JDfzRc7e_g1EP0Pho|Mm$caa0gbUV0w93$3}x=iq;INkPUm!PZayZ5 zCgT$y`)K^a`|P}}iEi-Ff|Jri`#4b10d2hcp4;->(AhzFasxi-jHc$6SOD%XJ^W}K z9Ue)?sU17N1A6R)M%(Iedhj2T_9-{F;|KT3n_NX_Ws-)$4<&ie8Sof3*}3Xlq$7&cq3x2gWAj!qh^H&CSIOHlQo{m(V?H@cAPAzKo5y z1m6X+WgY%$>fRh}eOse%`|db!&5d#B+8g7BJ6;wy-u<$;<=$7u9j|;%+lwe%mF+s``CT6n zUUGZbI*$v{2UYh1uYMrBlLNK9%N65-4_(P$%07Rmz2-y<;yNf%CPB%c)ys+JR$H4u z(PlozkyW+n)c&izZM{~4v!Xq86=p1>aEhl}o5T_phuk1qM#5-*jS{meS08cHv zCO=YGZ1BwvBY?G+iVNF@YXg0@ihS|xeeWB{>{SQK0B#vswuY?neEW6YuaNf|d9BdE z>*$LO__wvQJ9@VCM(37Iv1#Xy*t&0D>^^)=9KPu$dzuDk8d zIE$vbm9{2KUcF9lN*b14uAOg z^msfwG8CuIpNg{s12H-_9n;e@$Y;;HY=Lj7FZoE*61BQk9ijfs<6_`#=^lxe_DPlC zJd%4IJXO1B)F1Y-_DV6GN8qkap?HbnWc#YS3*kffooK(1Hm#Fi?Y~j~>C-G|hl4Xa^K&2>2DeYE4g0CBLpy6D{e{xm8@VdGtctf2 zyCFtL$6{=JI=^*y_vD$nP8@b3z|hDD`gJ{yzHmIoriQU?BEIE~-yGlm_Wy*ug02QH z#KT{DG~V)E-x~wtlPTMJTf1m#cN}WPk6MYouFm+0cmG7v>cr6bID702xE#e#>Ovm0 z$K3c#4m5n?V}BR-KX)p&U~+dhH$@{f)Ib|*Ou6-S89*&^TcT68mVaWEfBZ;U&Rq?V zKMcTPzn!s6<((I4M@XNU6Xm!o5hf$$DDPaPWjZs1ZyeAkrb)|0cr&(7rrhl(DS@`C zz+mL+ifC=6e3Nz(?fmN~AAxtxw+Y-h5F$T}VCx8{ zJKDKR%qsS~i>F*rq}EDaZEA~ld^oHqY@~&lo|=vE={e}5H*UT8*681{Gx|1fjfs)z zc<2j{#(RG5H(~;Qy6eWP+va_K&hh2K{@eZ1-%2EyI17kx3UkU zSZ?kT2kxAJ;!z%p_}fl$wO`KuTkSzp%Xbbym0eqlKUAY%l;0~jCr|q$^2~A@&N!=J zNxlG=+Njx|0M3Y&eey7J%C~wMkhaYMF5+w3Ix!%37Qjbx=K$%{3At1ci12mR&6E2Y zvcL%`8%yNFGv6Aap+*Nm*A~h*Cu<=m>tHbB*Kv8TUw85r_qF+`U76$kLNu+D4`ny3 zVZC}exIw(SRr26lQ~q9{tOfEg-E7pE{|fJb(=zcydC&LtdD1Ttk8j>dS1X)6*P378 z!}8k|*M6^GBX8ho*udtqF6+GKxAn@4cB}{INJ8SRa^FA>uSc^u1g-{f;?+dmg#~Xv z8llz3^+obr%(^u}FO8JZ$a7%|`+GsW%@HRj{-s}CXE}=dBZT;)4 zqjiHm#7o(P34CB1H@affX*Ix@cX^%k4u0nG0OXMqC7nF$1VY<^^4Hd!biRymTwI_Z zfQ)HC?&#OM5Yhf1cbricX=fE1jBn^X7Zy5MTl*+?vf`dRE}zNgv@ePca4BCMC@;yc z@LUeG`^@FU$K+jjDtF=l=3M1Z`(zyGs>pZvSh$hRDOU615BVj(qchC|1s@y83nzm~ ztL6_P$clVY_Ck}wY`=?g3pwhTj&fOl)P9}|S zPU;sHWrQioDBC7yw!ud!`nd8~Z{&^bXWxM~s|z+gwyFiay)i!%%g9uZh?hN|eiBsyACYu}? z-ID#4%?;&lf9TZIOis>pCBODrjguh2hjQJ4P4#F4>9uV}v|3 zZqzI7VQ>jf7MJTGfwfr7J{kQ8Wc(U5XS;*{r3JT~--uavcX4t)^pX?rkpJqoN$kFr z*~Pf$tL}sD+TwGc`&@kFBY%UxyB<5X?TFI@=cA*mJ@)QD9It)NE93RAe?tt-EXJ{; zC*y;E@i$<&K4!>!%g$}_$P$X1n43oo6Cg5}FHQoJma>g$du;^iSO3b4lP=_aiAX%~$alv* z#80nB6~I^8fYl=;4LOMd{HE=|yBa$Tc&_(Y5@cd$%SvqO6fPH-OhUH{$d8I&4o-8x z-};fp-OF>55RL7Ts`gDe0l>bqJYp4ERBX7gq8RMzM#r)&Tl?R;I-IoRr@djF%7ksf zvhMnz;0R_(tCKY1*@Xf2`Pc_ncB7`=w37==8nnNNZC{c7-_78m6+cXQ<=}rSK4JC= zfYT=8yO17tFMqe%CjuUGvV&#o2lK3->ttp9TYXb~R>$@lp>g{@gw1#Iq~i#dmB+|% z*@Vg-eM^0j>h7bL#zcU_`s%~$4_C*B(!#8Z!J2n|*VPZvShq?z+0m$y;G+WBhOCBy zuoCRp^f?Vf6tR^sQyr9Ipz7$_kw%_LZ#)Blv87FsU_1p(eO*UBSM$>;D>JGjhO)ZE zZ@Hq%$|g)Gwd`!x!A9|LPvg{vv3$xPCUHwynI?&vTb_{vMj*7DF}toIM_h!*Y7(R0 zD)ngWiaTERwb9bG2id)nPMT*H^T7$)KS8bWq~_>0^Xj&q(F4}sY^JwNOA za*N?gpU&>i=D;aamQWJk2AS>(~g)$Nu5J* z4-O8dld1l2W~;(@2FGm{*uXeM+iInvv86t`dOEW_XMJ^b_3#ZsZb1nnpmU}uc*qq{ zDpl&QQR^%p2fP-R%NAM=0!TkP4r0(5;I20C8Ufa1Q2sfvwRuZV?A^94_V3>xyY}wR zZNe^|J{RZDT!?@8_}|6JlP6ZEuUkiK%$|+rBTJ9XubsG)XIDXy;;A zM_cUQwI{B=@^IXC=WU7i>6vjH)tLf$~|1q4Q_s^yyPEg2O9+r6Wx$R|#}?ccbVOhO2po;`sP@jEzp@Dl#W} ztk5Q!Z3-?23Y|S?8`%CDMCp*PIRS?@fMy)9l|QPyBj5PV%eeA{GQ!R*;WpGYASd8I z6^rrRZa5uO%5mUZ8JuYqNGS{C0r`zPS59uJl##QP$g|2?{`P%xPbE&i_nkr%`cYYX z-vDlfdk)4BCuyq^Klx1QAoErD(BPwkct!r8>?(1DL;idzzH&m`5Rbd{G|$A3(o)Y# zKF{(hvOpg9mU;6&8{7I-W`wJ_5`aZw8K+W@lo?rK;wfRtXPrUg0Uy6}g?K&#m$*@0 za%D>ETahoNT0Cd|;>|jVlLFH^MtS0sc;y@K#Gvu3xUAf(crm2XG@W?yE==CZ!@Hq8 z3qYx=MXHn&u6$d`(>wX*_GpC6BXLc=F1rg;HGQ=VLH1{X-B9YC@h=OP(qtLdQHqr> zUV<~f+eRj}dGh^B`Dj#>^n6YlFusmWNs#erSL@_AX&u(&z2DVmDt_n`Igh$Zx#AW^ zrqxKO;x{Mj=vn+jV(wVDf+(BmR z)~&I3_r5rE)!yttJ9FYx?h^9wBag?CE3QK3-x2K{u7+NWTI7&ULzWCYmaYC;TAqXc zkXw{lgA>!))e_5dQ?WGdcKR2P(c>}c8Ly|$V5~GmXJ=U@(b>@*{r%fg51k!2n>yWr z#{-8BqG$HT96I$g4?PlR&YX!o`wqufz3O%GT4ZW#OHX{^3!jhAfBp+`^z;dQwT;-j zXBT<}S-xv`?xIrP(vQpmX=Lc&*)zHNOuV`6lI_=u)9AA}QNvX^Up^+ z_?R7?%3XoPjWewqmGQ()xm~GOPO3sD8z(ykdFRAJ>+PUMZih@d*`mI=A@RD7f!*5M z8%;gEu?}6VqX%csoQ>f}p9~Z%;twZrbs?S8aoTQrZYBpXoGg*@l{QNm1MFZw7BtMg zQ?q&KvaYQuUL2i_7snUl_dfiGvE|^Q_~N6F$29HRvuP8KW>@0Ei7c(qT`&6FRXUG7 z`e1zULm!BzpL{X4^>2#i_QrVaYhM$ueB~=r&vkF=jPt{T@yzqj#iNfs6(^4$i=K|o z*ad%GdDWpfcwldK{zctl@@-p7d$G~T+ZoErtki?l!K(>;sCS*XAdN2D$(6bkdZCWS zF}-QTZ`y(g5A)7;fL27T+YbA^$Q$KBRxi^V($TNbs_|4O!el-`rVjEcxi^ZjHbAo4=0-9=tzpyY04k)3?1jdb@gK`20}(>0kX#eEQR$ z&Yea6%TNB-c*B4A_Ut@Znw_L@>Vb{1jvZ%*fVGy*n}O^8vo&2zAc{o+ygN&IUGB;xqAaPCG>Lc!g&1cM?W3|3uU5l7xmoK z+!+1Zn9`p1J4ndMXWA5?M(51miO0m1VpV+o|J3~ln59>B=L@e&r|Q%>m2-8@tq#hq zj06%$WJEC7Bx4(6u(3T(Gaiq9ufqi28D?xS4uB0NTL>9pgg~HxP(V_*)asn7yQ{k^ z=c<#dPE~#X-`ek6-4f>7{^q&QeeUi)^`;%xUVFtI_1^82z%#yWDV>XY!O!1`r-VNJ zaJvG{!8_d7vR!gz*d~w;8g?hEuBj7Guv38aoctDoPqbJHD;^n)Vb(hL@IP|AsPyyF zPvyF_l|>Wm2Ebr{GKtKfoP^pty(f&Qi6Ba7B&1){4uIfb1YZOVCSL8K;1(6Yo2I zS-I?KJQLSvaUO1ud0tT&IP%8KebKob*3c0p`u9PKU?(sMh9q%&0Jqb&{&P>6ao(;Q zm0!#=-bZ4h2gMi29(Knk<#=(Nz%#TdUF!h{3#1XR zI^9NaVE6GY99tNI;{}>V$Cu$R=qof6o$>@a=71s4rEm@J>DldPu7U|)yDWY|pL-e5 zrRwB527^?*7n)x%laJy#ZK@7F@dQK@P&(*d(JA#If1flH(GeYB_^OH|TTk_ym%S=` z2^k2lGI@qUgnV||3fWn=9jB&;eIo0`*oaL{j@z8>8K^7B=9$mSE>(F;^Rl(!S>d59 zI}~YDmVRL(iZT~`WpRI{HAy!#HKwdZwqk>1b}CV432YVWgN_ycnOI#TSXo^|9XWo& zCr_vj1?gzyw6CBO-cqVVLhlm`%f|j9*pA4nf^4y}2rgH(WFz^49UdLB(~|e=*RcX)7)fc{Due)Ni{qnosV@q>6YY(#eQ&*WX#*9mUgAAGT+0%bCp7ILI7fFl)VrYC&I#;~hnhuS9Lb_eu9TVgx#KhN&{&Xz&O5>xeS^)dpi+5ZO501H=l8`MmJvA%aK5LUo-_g`!ANa^G+Qwa%i7CNpN2bvF5AkOaCAI)avK0{;b}EIR_~Qhne-uI$W#E}$KtyC6T16yhu8 zRZ)>tGSUV)jU&i}L|BbyZ~|U^wV`mt1hu$t6}&E1%zM6+9r`o5Cy18)7CE9uBY)uV z0XbM&9}%R|AIK@xx3_`H_oG$Rew6LUsmQhfRtDC5f|IViV{ok|8FnyORyz9qnp;{N z-oCy*t4neGsB9GZIM5LFq20kvMz#?BQViZ951|A2u(h$lde*Eqjtj~a$~M%0-X8q! zqxR(9eYSb^2KlPj*_+<~KKXI=_Ti9}HFo)} zud(A71}#e;lhs&OzMz~gJ9=oJ9lr1DRxMwlwgG;Ue^@Wh6g-vQXN9hk9SDkq80;(F z;a@MS{;YzmNeU;b$D-shEBRs@+mA%R1_1ZS7uwYE%{_fMY7M<|ImUnm{K>N#i@{GO zm8x_#7xGmtqlhlMl_7aScMPnN0k%yw=_0XAaJ{2;gHN6Q^z*d(fp79aN76?e&U_aQ zD9`HZAQxR05>IuYPe0J0XhA*^FeB%A=Y4>lf3QzNJn$Ca;2VcjzGv z0~5*+twOw*O4<1WSwV? zaU-1;>{Rfrb|GIfi=m?u0)+Uqjq{(rQ)V0oULs)Q#*=C;2HHZg6kkd8q{3;$IFncCC{++ zH$nJN-Dwy4bwMK&3Ikq)oFFYUhA!ah_OgP@EiX6LDOgAWo&~|hP6FVK$-rKw!xDnG zuiCh+T>bHCXq;y{o1b&^;3nBbA1cwVOBl8(NSO4%#WH#=rArW6MJrO)JF z`XskfK|C1Xpm^vrz!cH0u7N(3x1QJ;&CAd|d1gj#-c2XO}izr;~C_hz94!s(N?livtk(jIPW1CO$GSws7%ZhoYfvc@OZ zAAMAtP(P@@ocB03ul6B7;m7f)@_oUp%HmvcWB{#Nvy`>e{G5%SJ!TcMLkg2w`_upV zr}mcDy~-v>N9>VrearsxFaFHNr^Y4A8*OH8T6!VvHU^7g-uJ%u*)RUW&sj^)8k?J$ zwBtuk`r;*yG-WXq1D@<;$Ak#qtw?gVkj>k)ba6>`4ejF-^=p}|TkS_FbNn&7j7dHz z=_V$QLju&V5}(Z{>4}aOIN~U;dnWLk8`r6hO*WV~{sxkex_Ms7p5zW5HF#THVU6`8Mlc!R}f{wyY8q7jqd&_2)z>AgJU zx_;CJ5rJJKwd6Kvo%oE2?VPt?C!c}cXzL}reAjv9$%LhlUUf&_$VY&!$(!8VU)id1eBfSr@eKqL5SDZZcmt~mypB|Z&k;kgMTEBpjN#j4NB}3A?Ajaeviin# zK1(Q<31_vb*2>4FtQr!9q|Ef@(mh$XGiofwTaxoxk|M&O zMCi;%TjNoG-Ypi{YArZrt}x4TKxIrb=vye|U0{`^sMvNNCgLc7uo?mfuiaMl_Slw< zo2;p+*`{YR)_?A_T^Ja&iKz*joZ;w!D%-hpx7~c}E39YrD(hOaMs+OP;MlkukPH&x zsAuH3&&Z%YcYag?a$b&P#wMpHg~LFoi8^MPxA=_m(D{wx4T{7Su-ZVaBox&6ws%Y{ zfL=Ifmvm*d`}DNRn4Yuj%&ecST#rK`Lw(cc4R*y$Au?n>sT!W9Lry z+xNf!m_6{F2gD3jQbNsk%PVfSU;OYd*$=<-uzmDbf5k>3W?I+44qz&;DJ`e3)wXWh zYPnxBt{^+f zZKqcPgI@@jKy%_X@nMCOlurl{jPMXEu*knrIAQx(e&B;nXjW9FQcXVi#-s+yudNG$ z1>O!q27XsttDRS5fwy-sEc9P;J@6-;)6g&-M&2nUm|WgK8(hUCT%C^)XwFBxhJOMd zLq9w_J@KRR(fvy%4S+%3L35H-4v+3#CV2XgH}JW&Bz}z3hQfbB2+1ynILS&tArXF3 z9#V83US8J!O_?5|eC1@kB>Y(UFAFO;1P933(18(fs%L#i)&%9r^F=up$BBRbPQ=80 z950sFE6->=SH|QSr4fvA2W{AOMOVd-1Mx2Y$RA}Dy;pg}m6ni#yO>V7M{-v0=-A+= zAWHOSzPLjMylP+G_`t0k5O^y#5R^KC_Y&Gu0A8NK4=dcs-r?{G9C{Ee#Ep-vVTK~l zF0(NRq^ZMp{x}%{=OxJj=bZoBH5E5p{9%8Tvp(c5L|U*InbZU*jgpca@2Ab1+n2`lBZR?JKY7bPpqPRocN zu)gDcUVmmIvi+)THQTafv#naS%Fgtkbs4Gbt);cqc3*k5b*}2PXZG&1?>_XH6=fK8 zcK6tO-uct^t`Gi#@?EymC;IG5U;2VQ^~`>sIOWH@Y}#V$)@^VY7ZwV>)&APmYh;*i zwzcb5xsC?k(^FG+=1iZRKR+P(I4T)CE%?hWtB^Szo!!>d*d`y44&S`mq2P3Z+;V!7 z9T4lPcmQ0;)<4-Aa?02e9RA@udD0<@eC0l^dnQ!Tq0eM4&Yd3`7`5TyVYNSJsV&*g z%eL8dd#E@xMRs#Mad zWq8s_#cXy&7Xxb;+upH9Ck-?nIjnA5Kn0rx49I0Bwq?~`oY~3B z!6kWAWBIZ@JDIb0y!EyA%YXXc?8O5|tbb_OYLc~9+t3o&LUm`rd9!Gws4@tj{p@G# zp0C{F@*y`jYdd%Eve&%k4twW2-)ZeVot6>r4UP=kV~;;-Pdxdw?+7F)NRTJkN#r2{mYxsti}%nha>3=TP{lhs(|Jeygq#Mp+-33&{E|#H_%_W` zvfJ1?tgXGtSE@hx+;jHr2fk;8a>3qv_nYi}zw#?~;q+NMarA_J@~{8a_RGF{)#W?v zzy0}tx9eZ^1{I|L#jKpojGXeMRmuLDn;26&ElL)gvPZxDPxkST|AplguSX6$aIzIK z4ZFT~wXJQhus6NwEjBwgZD)=haar8Z+~nt5ON!XHzW1;l?CUoUT1csC4dUAkYR9%@ zog8&>>pXV9^g5^$?c=aQk353|AH-4p-4;;p@PkhffUt1{(8|j@}pMI;v3|=zTuR!BL%A>k;~ZvD;Gsqtd_tLQ@KSsoVdDZjAhwJ zm9h;h>%A;9kWO^wI#h*_C-2c$ z(B?`97C7)U_(Q@pGQsh!1S&J`I400Ge&m^+vHE<{l75~)dRDYe^LY6R$YhTzSoscK zdFQqz6F;e|*N)PdJmJ+*9)T_>*7=hUC~gJn6AzT~+Cif{Jy1tNut@b9o6_4*p*qS( zfxPJ)ao8Xm>K`XoaPXt@1@_82FH3=@<2^~DEIqF{1luQi77UJ`G98hjtw_ht9ZOMr zd4M30&(X8k!EqJaw*d1B3X1 zo@ltTWE~SlRR_uzp!6M->HCl!R+9glo6ETGKQcIA6Qg5x_V`Jg939th%u2GA^RkBu z`58e|5=l|Vl5z>YJyY3OV@`Kpdqvv1Q*E|-RkyXLn^joo?4!?WN%lH*@f|z#?B~9! z;pt1E--RO?>m;jdRDWq4OL@D~F7vWemMv2d?(1u;SX*U#<-fl$F=Z{OD(ha;Vefv+ z-FDx757=Pep#APgKVX-y*<{Ts_sp5ovXd%oN`8A@ZO}h5W)D67L#Y6E?O`W|O3TRy zU{%`a?3``gzTH0jkq_Bh-}W|Zl$|TUK<0gA?i~a2T z-)(!YyT(`HvrYaY6Y}!(8L{Q)f=!H#dVBlPFzt+ou2dj*R5M;BclU$$Ia{*FANry75A<1s^lV3~ zbh7a7M{3UI?CW3rTYL8TgVvzDHmqus_=ibVG4Y%4i~_$Rw_-sk#8yR<4c!hAWjo$f z#})c_T_OHK&sC)S8*($q^<|#`up(|0pH-BL@~g>@j&*nqT4gmT3$Q|Z=rZOn(nUP( zf-u;Ku8wu@gT9d%dFf<6p55sX@L>V@h;bUa&2@b~qidM&LQ0N%eRG>i zyoJ7ja&+VVE|a5uB2`hgKk{3C6@3A<_*=3^&=WqGqxjVffQXmLj{vUm0q|96BX}3O zt*KfxR{C>%3bHtvK&C@e^0AquA=rq|u4mjcIh8W%#Xr;y8@fKljt}(b2OkiBpNU1l z>b}nq@7obB z7(bu@aX^m01&B`^|E~7ro`r*+PHjn8lAh%mT}J%SR}^epw=d zPmTN$@`+w605fV|^-vt{$+NT&>=G7C>Lbu;X4)Kd?o=9kfZmJ7o}uc3>-cY+n{8 zx_)py!F_-gKaW+iKIvHivv`jQ9lnS~G{+HP;D_|Wy5Lh;Ome7^jZ(!iYNBt!$Zlc2 zz)1-574N!Zf(&IiesNL+Gxv}O3pT+c#~`X=UVq@jZsLJ-PE`7j>CT`TV@&SvAeVJ* zs&BTAwob+KKL&_4t5%7 zq69@1ufelInPZ;G2|hlvkOx_!d9a+ZN^Bn06IwIj30RSL^hrs9D^KBF&py6}j9tuF zQ?kbTUf64P_JVD@^(y<1zxf;X#=CCuMWyd~*U#8v58Y?!v~XJ%9*a2vqdE`-_kaD9 z|7ySTE5E9H7QmPt?>lc#Kk=;XKXAZib6M{%Nj0P_C4ItT0p#a`^1+swo}ThP(NZbr z@`ed!^@4>308BbfN}n(=i~R}71lg;4cwM;`MWd2I(r*j2`Et8xIM6*7Bs=igA$6-VWb6 z>*hVT+TZ@oU%TIb=FB)bMPJQonTT0Hlj}e5S{5AG|U1K>AN{N_J9fV#2Q3Sf~3bE6$vftuNYVoL{;K`yaUA2|^g$7d*%jwFi2E zT`ZBK#95$V1@FHXUHZ=N(5<&$;D2l;c);5Zys3WR2pPi97ve4ye9#@a5z88QrYw5} z81%;F1q-5l=O^KoNdPR!XE!;H+~Qa-{3Gbk(T41T7bOD#nEnmD_I`B_io{MxisHCO zm*N9g>IGx0Ot6GL!-C(?_s4`7Ssy+rLEyPugC5w00l3+14&CdE?-_enevHopJKuTF z;)OsP3drxprM(ru`tw&kr`tUsDGo3Vf{+!wmj*K8heRd5pF=4J6z7P3AS&z)uVie& z2{L^)Fwj)YdjqkJV`u=%fiRPyVF1txG|V)HSvj8!rH3-)9RfczAkBgg#LAbV=Y`Y4 z4D@&cfIG+nDwN~kdw?J)KMqc0E-q@ZgH*1*EenCbS=D4$UH58BHE(c5ke$tke3jk} z6*BM@oI&7{+KYmroN-j1t^v0ZgDD2j?li>dQ0z+MMx2<=m5ZD_u52so_iAVef3YZ~ zs%Lfr04H=NFkr;YZg*N0hqyjlMns&GgO!&6Mp3tSwEBQxk$Yd?IFe!;pVg0Ai*ug_b?MKf&ZnKlK64n*AzO&oj@!oga+TPXn;J5$D{@0&< z+-Akpopt8t$fQyga`@V;x1+}yW&34Y?V78uc12T^lEg?yG-a}RIi($Hiwb-0_!0ZU z7w@$zufM`R{L$aBd@f_p?mOVZFO_UmvoG7OUAwHiCv@1>CpaILZR13r5|q>%WwRuq zKQSX*%hM}J=^Z_`-&d|K6tb3?nH4E5+NqPreC2;bUA-Ir&^4=h8(W&aqnW|Lg4$tp zVqEufkn`dh;SA-;xwXCnLyB2+NjV}CCc<`Un<9^zy(*h+hijxj^81P+~zqgkL;s zKb)Tr9ON0*&qNyls&-gjq(0mO3u#`O-tpvcP&bseo-1)U`52&b7(5-fm%__!s3X^v z`Vtoz$E^a`_H^7wR}ZmK8l|T^m!rtu=-olCHuPY7*M+5jIuWZ&=+DuGL*vROppBptgcEOKD z!FWO*Ab&Z3e&=Po?3z7S+t}EIZk=j@y*V z-Be9XV+RgefEs~==uHj?6Z6QEXb~|E}yW~-rnsU4ERZOP@qrRJEgBA<7yM(yi--UQX57Vq=3Xihk!#p0Vk^Gqy0B^>P+uJ7kvTZB|Y z2XNROZt)f6Fu^9TxCK>hNls~DDR0w)<-$zHGBqvs(f{--w)yfKY*1}HDV(!~F$3QM z*jMSZm8^GO$+=vV1OHFo_=Y|B;6K^$=$Pyrw)n5M4}9PQ_PRITB|R;DDFc6G^nx8Z zddLnQJYi=}pLV!tLk4O$ZS3~le&~?E9%Tz+2Kd=MDd;;ox@7kzSveMPhiqX>Lo9jk zOc(FOGJ*gNhP052WCi%wFYN5Zjw3E*J$~gKf8~=Tpg#sTz)?#&<^0*wP-DOF z-nUqOde%-IKBYFPliiZE3uD8!ZO3jq(RbE9fB!>X7fB#%lkMLke($Jnw2oA*m5cL2 zR<%2Dkcp^JG10qvR*5+BL3W4WI94pS?+v&kP8cvkjxeJh{aY#9kkyxNVTi_5E${H9 z(+o%43sadmy|Ri#t2m{La@32K!AV!SCE3)pV@dLyqYS`(QgB!6KCfcx+ncPaq0w5p zd#t@@tu@OgZ*T4Jf&Zok*=(};SY^g!9D!i-f^|XsU3R@<%A*`?F~NlIwBYn2?pE8! z_7pnL`JV*32NS@7zVXfu;l>Bw^+BldfUJz^kn5%RQ&Ap#n|am2Cq&7y5AT9Kfj$M# zj%y_aD*w)>OY+i_{v(?s81##EFoC@B{h()0=Qf_|nepZjYe!l;`E^s{H(@2&xz10=y6xTZDJs+3W>`dP94VMFk@Z9^Wte z2DuO~#!hn;ImxrkRJr0>i zf%Iezc)&znUKH0TcsBaQ*%2=SGroB6_ZPEX`4(OYCh{jAT^)ASBZeg|KMs@?nN)!8 zAo)Aptl&=f&}|HtU-_o@9@n2GpTsoYW|tNa1=^9<4wwJ*hXOANPID0-d7d7i7f~V4 z;s-?t&$@~R1hbHY&acIC(L26;6_-*b1RR~#tYXj4&C1r4 z#qASV=loce(V+`=`owXYo0ziMi5Z`Wwjdj3d8y<+cS*iATW-&@?X+;3lKyK-$u^X( z>5)xRTSbR1b)dseGQK4FiH%jq%Cn5BgncF0^^WNboDWZ57sp*JESF@jNG_{s%k`CZ zes0E&$zLxCR}(T&wr*^(H@@!m_W93z#ol`7ZT6NMZ@0R|lIoF>y_@$P1g0`&%Lv{x zs>e~;7IU*XwMDge?k@`-2Cfp)8FdY{_LjH5)9!rz>#VD%TY9oqx=Ey0U2jJY9ynJG(4-|yJD-QIfl8>~?_537c%MQ==U<){jFn8a70O*kT@rM*oypKN6j z1-b(pd1gB6&u-JmRw8f@UC=KT+Zgn5K#SP6e1i^RdxiXYX5geWAM9)F7wnOS2H~B- zMA7Pep=^WfN<}LOPR;|auc@_h*>F{A_lksURoRIh&22U`F=|5tBd(uc_o`RfTYupf ztSr2rJl$_2gG2TgpZKKR|L~&>9a(2niv|TAOU-!k~Bf_8&cLfAjIbFqwqbNj;gglCJlT+Sp!m|2e`%5Spq# zKBZ`nL5c8ITuFR?3c-DLwQWUZSV8ZcBYGe!{3eomuKIv47;mlXeW=)u$_@N_|G}mD zAmV$^8=h1KE3!lJDA)aFWC-xTYytp-3*LUdNK0?s)Ja%nB6P5``vN-zFaZGnsG5N| z*(gkGVz;7>&Q8yF|Di*&RhSrre-Qfizyv-TK4y*b#Ad=*Vh2-{XJd1#4G#?3nNtJy z_}(Yv=T2FDQ>(4-S!Zv3=Ns*nZ+MNJJkf9apFeJ&_}kB_j`Eo$D{i{^m3CtEylmQP zyZoxF?JYm|K^vUN*#3h@#Xq4Q3Hctg@=O2mp3e&v!iQk56@64DTCA!z>HU{J`9`$t zeay~#$Y$`uz%^|t_*GYqh-3Ey1_|>%d0(Iq*KLGgCm>+a(_xpT&^E-02V{`3v2+x!ixq-LTPEmgolRDfSs}#&?I z8j`+vr@5`gC;21$IKQ}|wnF|uwbcm_j>2K*DbmpX8>wfc zXMF)4_zUkrFhgTw<>-o`7#NA37bY!SIKZji*p1>Ff>J@_cj3~6V$JKk` z%U`;Gp|oTT%~3W+jpC6vnvrv`JxDjjFkD)aVuxohM9;b`a;s-_5cT(Rqx8PxgAg0z z(04WRd*RpXtN6+l%_~CDv)&5t^r(gYG4ctv0(l_YxB`jGk;o2${^(C;z?XdEor@Uo zXUqei)OV|t{{y_pevX8kFX1D~FB0z;L?gu#dX!yt@Mp@dN60(RqDRsz9_O{s3z1AR z0PQ~cK&1lXqU?{j{w@#o1-k}1BZ&G`zVJky6(4#IvQ%;m*&vvyAHD$J*;<}SXKZdW z2C`k3%8wxS$`@IIJCL<4HiP>xE7_#@y1a?!KiEy?~^ zR$CYo0o2Zw)g`OMuar&?j;!8E#sRbALv4w+aNOydk}fV6W~^d9XT6PeHgM*GE#;=H zvn6F8{-qDv2i|qJ9eCk@{lV}3wjbThf)#L^FJvVz8ZDQfwGG=Yv)}s2uh|{9-{uQ` zfH5bTcjU++J87eXVzhV6;J4^IT!MD(lr zLD^2eOhnN$w%&s1hY4;A;+?8Qm-JbU+KojFIkj8HX@nKt^ga2)W>_eg@4ovF3pfXd zCM_i$@S4}Z+6H8MeEFWQTmQh2w*$LpH+H2hnQFC$RE>S|?>=p$&JPre48w-2ldTnOOYD4=rF{KaaR?F?uLfqzWd_!LP@k>d5PAzTqKG4*Ru&`7 zbEd1Fg8~f`2O#<|V{P7@y}8-E4>q*O=A6g)2Jj7j46a4c0E_=jf3WzTNEr z=g%>7TzFU%b+Jb=Jm5 zC;VH6!D$IA6u6%YDH?#GX;UTuU~tk&Ph015v)*1zz*x1a$JVS`Yn@%~)*@j^hvw{5 z#s}z8xaZFeS>Ndw?AVEucIx(jJW>sCuO)_TV{kvVf_A*VJ{n^)I1$)PxCfByNe z+toWZ*{}cZA1GeYp7_x-mQ@+># ztqs=P(kA{$SW8!rZQiinuD$*`yYA*2eI;%~eaZ*;oIaQ3h@nUp0-gEKCyxXgqD{C$ zTNqwWXKF`QcDk@$4l|Gt%*1or*3Va)(tQ8s$gy%*uZWe~he{?4mW+4{k+ z`lJxb@^>ytBY1%?B=O+STvd-auMiiW53(RQ<|rSPHpZvfRw~BK~`nWuYnQu}~UY0wMf-&m2CO8gcU=zH&C^)#0esIH6pM18d%eOdC zI2Yx?>!=-KU?6X4$kJ&w6`?5Xq{U3yko&axoFJ|>;#gu?Ci7^W~ZbRW__@_lEG(Hxs;pp$xQuc&f3`Mpd0=i7sIyI zTeofZGI0barzUJr!N{cOn<+CYv&GUc*{jt^wRy!0!R zuV`OZEzQjqf+3Zc@gK=Q>=f(?a6`ux?dKgqvJpZ%hrG#`BUqG~-mCtFqEFJu>WAK& zX6G6i$$VtsK9#JuPU+rl+qT%+-nEvQo^kokDq*&~ed3uP+J*CHCBK_o=5OD)(XP7e za$7I_H1w_#-1DLi!A56VMV)v?1VzV+WUp@%NoOf!3M%aSXH|L|n6WdG=;O%v#9DO6LiEf80Bi=wEslt;tm>gd5PT7zA0Si; zhs;M{#aXhx-VzP|_7K?*GPp-k~=mJLmEp+w9#x{Vsd!+uvr5-Cee%chhRqXPJV*mKyx8=M` zp1krl`wBDFtLkOw6^#{lxs34JDf|k4?8M2LY461S##g^=U-{Bk9sG6TZFX|vSPY!K z9?63i;p%m-zS%Z!TxBmj{k&`t;Yc)-mA%Ri1=XUL`@a4yJ32Jwot1g<>>9OIvuySD zbdxnnK9({$b+Kb1DvsuKcJ>2&$>#1U(2nvHqCBEFXB#8#=EELC=PC9+#iwMf~jYP&&#xhn3HY zZr3-`qN}+laI{glhB#b(a8!@Wk_}F~tN_q6`0^FcBt=h>B&KVxA0NaUx`g~_FZ8^> z2zk5kQ&724kxCch^XzruC)tQRC;wo_0XuPsOxgek{20URv?YD-&yba5(88kH81{?s z&No8nFpL^f0lR?oUN$c%LuJPk3aE?QR+On{w|^p7LkQ1)=Vb(081De$9lid|FEVSQ z_)(t3_x!+tM-9)F(g&VxT#N#FtmLzDb4dh39Oq@};t=dHeN;O-KVE8&IV~|Ltjki6 zaoz+3p$r(6I<$YBUU@~;cT`b6pG5wmq3}Au9X>!a7vJfIc8&q09Tn)5@nL4t$9XFd zjk5T0@0~|+;ZYWGR?Y$n@iY0vQRDQ4_zGB{g(Ay(hbck_IiyG+9=|vbkPzrI`o?V} z%IM=>SY($vNuby^QR5QFZD)9hjKH6Qw|~@;M1LT267Xxli_?*!avyM~_qxXI$ac}; zEp#J%fD^$`xfG6&3!SbZ&~7XWiFpZ^FKTkn8<7kGD#Z8m^Lgb_mYfwoDy~mtBx4Z= ztK{*Uy~9oakn-NoqvTaWUQ&K_Qlhz-&B=crwezwY&&&2abnr!+7#{VV01kKEZ{u*|K zqO3;Q0Zr2H46-ox%WBb?8R1vDwyrKIy+XUzdY_243j~};r(cz6(Uu!@A zp0tm3(Lq zr+@vpM|b*U8l@-r;4`=A7V?ketutE`;8V(nf3XoOWCtRAS#29807z5GMg#`8`&Ab0 zOQ$k+EjAkVJ`(|$#9C9^=y5n&gb4s?@ewvm-?`I@Q*WErZLqekPD`bm9X9fU*6_LT zwG#D7%gp8N#IZj6_WcjrzWp!QqGS|u@s>ST+Z*2UHoM}+>ulSW4ffPC&)Z*p`tR+# zPd{mOwN2iidD-sktaW3rJ^%a}yK>Jp_U;e;vQ5ks?CE{a%LXf0b91}fZDsisk9_Bw zHuUIYX0pGMvbm~Quv5*U9AQgn>Me`PZCL~|tL+@u3GsyEQg{%LPzQWxESqt;B5%=R%ZN}bD3#V=$L2f4_@51bXsLdixBK6 z-s6v_>YDu-*+O84h?usaqwtOa7U~f24DRtGkVWD|eHR^rNCmizcvQ}L2K?&LC%t%j z)iu(Gym$Lid3hRwPv{iv@Jv)b?diU>!m{x1Iub5Y8vHMwWBbC-%_itMV3RU+Wq?;# z(&o zN61TG&xiBkp;LUalKe{7VN9-3TDITzcm?r?WDvXjf>&$~La0l?S&+9h5=Z&TUz5(O zsSbmEHA%*|!p^#Eb&m`!sZ1u@%}S?@kB`}zv*&GWYFf4e6XA-IM@z0t=)a*H{B$Nz z(}E06GwFe&<(RnOJ|y}?FhK3>I!9oqHE$bsQ((Y8b{cDxiyEJAPA96Z^J653B)Yx7 z-zNb0K;c5rJ;E`4Ktih5lZCt@+8Hp%1JY{uUE-uKwPCx2*-jh#MdPusY@Q>%BbUT^>T_y53t`dx2#dR>$s zg-tXvHf4`I@`xQgdep~^vh-03x5$Oo))uQzh3(G+sS4&Lc0LUKcHq3MGJ>2^D?9VE zI6*Q_jxKmc7BZoWNp{ddU1E*XLSMh^u1r?)w_qLZt-ccwI}FuI_NW5o{qWxdr(53X)mXJ7jA-&sa>&o}S?NBi1+ z|70_{1zXkIWp%ARqRAzD=D-Wq|J2hmMw{)5+wZd4<|Z4Rm<@erY6FhKLH0uXv;{j^ zAP*NLlV)YZEi6bDG64X4134(XK(EMG=wA#ZJm8}WXUG9%;{jQqJSvu>1#H5G+YuEp ze~YdbgWbVyWylE@L(wOn|2q*B+V(y8F8sCxo-ughjd^LixE>vcW}#TPkv z`AAe`W0*KZe=_}f9G%X0_<^!0gLcIJt`}X9A2NWtxNe}Xica~$_TGw*{OpeBlz~qG zATkj=a3h59jzUSqR>FQhp*|sp_*T7~7eQ98%zp4Z#CW=R2NCc1?j4|#K0JpXh3L;E z69Bx}%Fw|{XD1%6BM(AFC;_r6OI&5?m8`0f!32GJITFV1xF{bGs`q~NvI1eDs_;gG zd#1dlm^t?hB4z>rGmsHdD7wn3lx^E}i#4`y^a%iTcESK~83gHyb5Uu1g=%OZ-GnQ7 zE5yb|AZ{U1xdV&8bE73R4hr=*VT+5Mg|9a5w6VO;u&Gc zbB=RMikv~p&HyDjdqpvi6pw`8nzg;w(3E!Ks;(|e@Yeb2{+6azy{oaKr}}JkQjVeO z>gUjut88d&%nlttYK82QwY0Z5k)|4&Y|Do2*0X-CwY7FzOKYRvv!xmBg(8g3r^cX_ z;mEerYy$@_SQSNJn>Qu|umizTx#)vb>{5Wi2a!Ssv^5HvmGNu^cVS?}w-}q87_;%2 zGd`;}%@)H_^jkJP<5FR5O^tH+ zT4Xr4+0Gr8>3f^1ykr^SYEJkoN?6zU);o1};^aB|<|E%1jU?>nKm1|q?&`H4KK-g?>Y4I9>5G96Ze;>2=JzbMk3=3*fydSTnU8Fvh3CMRrYa8%CgX|+|s z<_ny`yl9hS<91H(U}g*w=z|rbI6Rz3!`3eh)?fg!69XL)6_v~wl3}7U;gfZ1x7fRX z=Iyrgnk!u~i8FnM8#Ki>d#q4I;msG9C5RXV6AeMb;yDa16iDF1i$CW*B#i_N0ufd~ z(7}8DUID6y7jUGU-Xp^p;}IFcJK7)P(zmlAuK2*?!YfE{1)e=$Pv^Nhy~Y;3cb<|4 zgEk5tpS%PMSLz%Go`U>{`ImBd4&U9tjvpevTzMv?%RR1o$9EhB%Ax~PF!S2uD0Upj zugET4;}!dkA;BN=hOmPY@HpH+^d6$J6oPDg>9f}ZTwws}y?+A^<-$F?r1*da-#p_8 zuIL)_L z2gN)@I0DD?P&z&EtgB=lX@aZ`^btak?JL1yx->svh#mDy>!~Z23)fhFD_jcqj|Cq9 zxHOMT+E354OK2`#MStO4M9bhY!V9{J5RRKcB?Y~RvW7>!6G(MNpYk4EgN|L4+-Hyq z{e}L^=jU8r*4H=M*zkZIJaE$f{wrUwCm#EuwRCpct+&3ycJ1D3r%&}OZqf3YIX~Kk zbLU-u%b?`!p_3<0*@@$4ZPWI3w&(imUH>7EScybw>;%B6~0xB~{AH zZjy}GJC0$Qm>jq1=}DWL(KqQF0!Gf7b*rtrr^`G0*owa>Ig7!y>xwI^cWsYkBm-wf zd-BfrB6{?bUE?2Yz zvvYQfX#V&9_&?jB3+HSqo5c{dCE<+i;+B*ju=aL#%ib#4>fSYWUb^<4uYA>>e{sK^ zKi}_1Rz-0E6|r2xK8-4I@k|PR&3Kfy9rP) z+8z3$0~~#aO5(jcv4kbrud0u41Lk<8@sut6ui&cn;Z&0aq_es?K@}F;@OSs*W2x{xy`rrT$Woma(>F5 zcy6yf`uHQ(*_g7Q|M_3E5B=7E^n6NFW5S^-f?ZIOBRVU(k^L|_Iw9NTf_?eZe`gQ= z;7RLFSNgF>$P(a!M-z2b*3sBzx8HJw?cRB%^&Q)9ndvFX1G+IhES|Na{KWZ;{2KnUQgs@PGuA+-LPp=Q(y)5x zGGn@lzlCNcO`vaX{U=<+K6}hJfxglI`mVbOR1fieOb`A{yO0?{<@x}AbfxV0yk6*f zi9QFV4fI0!A&-@ObrZuJo}+vfydK6`_~KzDbe-Y@7{pPUC=T%Z3J_0=?$AZ}qE-%r z*NO^2XECpY3OQ*IU+{B}9Zd?ir}?%dy-p5seHFO(v!C%JRS11DF+YYph@+o!qVI;u z=B#8bO9s-3oYmQuZnWOEZs{ks43`hjPLQ&j=u@ey zU6#LH5IE8~lG6-%`iex+Pf;=utP})THc@GP<3n~~GUuMbtm=Q`W$P_7GGPDZcmBP# zS0wG|kptE&yHuv9^owLa_M+-}Zg9v>ojGgghlg!wVk&G8D`cf)XNA1nV0vf01?@T2y*Fa5pm9ymKWX|H(2jrNw;zQww`TYS(G zn}j+P=CVGqmm?yPSVh@S>?}aLWwG~lPahr=KYf4!A5HLhf7C))rHl-v-+%!Z+8uj^ z#%1t7DR>BF<>#x>loq>^U6R=8tV%Xg86Qu9T^%!dCKI#bz2Np*OS08wCdcd<*&%bX z^;gTbdgB}3Y`4Gq4k51AP9HmA=PwM{_rCjm`@cT83{8ylSoer~mT%f+1nY4j#4_UUjh zU>BU&$0^z7Oz;FxWd=4?1^Au%2mV27%C~Zw*S+fG_69_c41j6OL8&tb&+qK=An_7JU zodqJfl6)T7O}T>XX2HO2Zu_3yYx@r#@}sso#CuD!`DNYNRab1cpZU3WThIDcHaaw; zcrV(gzVKD+8@^yI&7GEPsIx0?yj4D0yFGu9bC`SVXMW`)s!N?cz3+e>JAK+Z+uE(J zvDFr{1>3jxX*==c(`IcAmXdFnkgv$5y2VA=r%dvcj8bBO6$VqVzfrjis<_RptNS{{ zRlt_V@57&Py(D?)JKZAt)eg=p3O?|{Pr+B=s!e=baHNCir$RE1iQTp+-lJ_*Mzz|t zR`~YSg({bc-Atrz5N>z9w!&*8y%25(55G{EaJtBNV;8qGu-i!BuW=c{>ruzxcgAglqJ?w1|&PoDiRS z2E5Vc0e{Yym)I?8aOo~(DqJruAs1ysNBynDiLc&|sysstg8q%`#15B^bLn4b6L-GpaDUZz#}=Ki?a2K;#YiPl}lkQZ&}?WlChA6XXSaRaVHnc#pc1H-@us^JX=VB^3$ zLhLgTf(Vq2BOdUDe+o+O`Ma-G3VkE)Ly1nvgLu?|V`RNdCMtOQL~;*X%JHQ1D&PAl zR1owIyb-%O9tjH!y2)-}kei(bk%2kMgo1ctDqE5tzhG0DS?R`tbaBaL6gxAK#wT1m z4}cHhO>|!te^7UHK0JXepw7XE7j6W9ZC%0_r!bJ0YHTnTDljP={W~>1;{(a(hlVXP z$I;D9@(B}3oF|k|Az$_ZLUb#;Pa=DOi8kSQZQ27o;x~AptppQvQd28CNwATZ^MvZV zEPlrK1xB{h_k{$qv83!&w&V=`6skYFkGSj-4v}>g!tIj$YfL|+DI86RQCBl@D?E!t^nT6mS13daO2SZhPAU4(Y-++rIyZL!JmNxSzS?zN|%c~1E*+0M(aaJYPO{&K?p^nd=5 z-F(%p_T6V5vA_Du&q_8YtfRNvdRA?*!>5kh)6XBWv4aP#qP5l9w{N#q8#h^5x~Cxc zIkE#?#7c2&3;G|>Lk#xQ_ze6OOACU%+VKNzqNkat#}zs5698nRfw$m4s!{}qM|8qu z`%3rULXhj=f1pD`rD)da8=qQ9YS}$fJh*!GDr;+RvANuwu@jTHQR%`J-y*?ys_Yt} zT6qZX%NeV$n77pOsO?%^X&ZV}$BAP$HFnPRQiWunECer;_Ve;1`9)jvOdk^R2_MQQ zUh!2|>bkN3T(D3t*TcHHeJ0<6Joxb(*G;hky!z*QO?mixbQJmPM|}8-zC2_vFf#cA zyB7OWSOH9@LnfAR)OnahtBR|>dh2o>aL~&fDWR9nUbL^OD&7;m@elRG4yXPswqYEU zoh^g!r0?#31wRY=0(P!U0swvj{W^{Z=}v4oHwKBgSn=p6t+y>ZZnDO<&6bnkDo8*&S-P>PEJYu_1rhm2a043%UkNlHjxZ|@ZfvkYw5$kUG2jG)6Q$aN!2ntb{CQlI7V(&^E(e%%^?g-iz9=Ok z=dKVOJPh&L`h>MKHOQc3dw3}X2@}rCz542_C6u=r+sW@gc-$@w4Ov@ri|xL8k2N;5 z+4F}F+xH*+j$IfS_4Blb&J99RwqesoAE>$Nsy)`bW{vW0GFE;1>dC0h>BJ4!K$wd% ze9!d{TL0MrZ=>dxw6CH|H-|HnX@8$Qpmq4z!&Y0}+G2+f9I#J(;xl&4XN=2%#+XjkS}IYeYqg~tl5$q+t*J371A3LO zir=zzz34RQ9RveoV>UCM^TGR`)obmAg9q%3|L|=qibw8#$4}X{H{ND@_q}N2)3b6~ zv)10#WnEpJwow9n-Nv;}TeMTF(jz=*D>|!L&A<^0eja#X&W;~FZ0GvUh>oRL)kbwv zxWiI-Z0$WUHspf|%(~{7g1ThA&1UAjO($hQuu87~{D_UKtvjSxQ39>at+r#=PJ829 z-)uW~U+y!orZdwvJTz&elM~({z+f2s49)m9M#2xzp>3f3K#TD+b%73%4b&;-8yEO4 zC!qWA9ya!!HK0pfF(aWjF9W_MneIJ^&$xF7mbl8#pZO^bB8LG1j4R5);15^jXx*=Zonh*@MBPBIAiuDcADb&ZV;1GW$2th9fr7pWK)5q$@8jl6#aZigo=32p)$#bY&owyx_w+-E#~FgWYS_ zuC%+FI-#cfZpb()D)cRG-TqR_R=J*=4qE%T|{o ztU6$Dj@1{3B=@hp=5pJ8#coR_n_R~(EU{|He1MykGEI`5N$HK!+?;R2Ju^KTRyCKngx!#t%i65uSg|-~t5>b}8MV~EPH>(d7_?{i z@3#X758IUF+xBg{?Co#+DZAs2*I2rv*G`{4W5^ZO4xvv(qR0 zr^_^{YaAeG$pO~}ymL_}K zyWVYgz4NU$l*!w`$gq{6i-s1pPcXbYJKKE{K)PPGpUORZ_OyNJ%YSdr?>nITb2cu$ zv`zNNU9Ww${q#@$jP2UBL-i4V%1A#yaL%6J`+_~U_Zd4O+|kyY+uACdr>CpK %& znVbPlqwnDz_<%NILI~%c`q^d55`!HsXJ9|@dsq?UazZ!#E~AeGq&%kAK0h< z=l`@blXD)IGymz_0lr$b5%wURov*&_I(x-U*ID1mlQuIo>oy6i7+8JDimjokLA&oC zAGWC?gRSx-BzszGQ`RBpw=F4~6&oBqC|jBQ3CM1rl&Jd0`h`F|m%}d8MX#}5goepO z@94}S{?fAWt0;QwvUgrO9quVBEd%a6%ir={CYW?GD>*X7aT=;0lU=G3Y42R#D4yN( zirZ|%wk@_@_Ly&(ul$&5$3RA2?S%E7s8>yDg%j0>jz?(LM%8dNr8J90n zIou;(z@OV+JS*^xK!;0fQbbEuQz(t{ASk;l_L!TCbZA#KpX@iPM; zUN^}IK4agxY>EOj5C%!eKj6k~8Ob61KjI==9bV~UU<~C6W}+&o4`_QGE_M)Ldn9U- z%2efr>>Vz@LR+dicn@%fD?6$ID;?ep9)KP1w;*8aMz?MoMVV43AMp2}HsCcdkWTr= zxFhf`c0c{K?t!07yzbP4N4Hn?^C$o8^^aj$mi}E| zYw?lf-Udv_?yCfCV-qbh9mukU5mGnw~!T`TOK)=?Mm~q=cbHpTG79B z@cOyPio;p0bo>HSQFiFW_@Es+bin#f9hcrYY}v^%8y#lL{b@hfKi*j-AslXQ>bC8x z*I7?m{(E(;&1Z9hbI}S#`c<-E&PutA^mx+hlZ}>NjE;LzT`JObHk6&VBxJkph=ql@zlVuV43g`(sJ_M-HZ^Czo>Wec*3ok^%1rNC}XEHYq+MXOBts{4)W zR!h&8eZYi%j>*9h`_?~w$6h@4qFu9lm)&{iYwc%0{A;53gcTO`bhKGRYol%3aGA|uU~JwcW$*`{?%Wzl)j&M{snvZksnxf9jm(7 z9zE>~L9(;5TZF6rBZuvCfAi;7BN}d&-P9}_u}XBbAUmJFJ$$aD+Nqb(MSt z*^ITRT5D>Sk0ac})1#xKmeM_2JVQTh)t{FSc<#&v-{sV~Uq^!%tj`pDzfV zS-i&%9ALdt?Tq+h@E^SkJ{W+6*BnQ}1yew1GjK|q6y{~oxt!y@@ayQM(NIw6H|Y?R zvEsTM*7qRCq_bq}Q8EK*46sQ^ISnz{qe1nomRxU>FWlCWwp*{i-mbj-D%-ZB%T~3T zS^6|Bsy3eUD4EW0(_gy`h*u#Y5+1#v6&rbO+PYf2YYX$bQZ)GgnCoGgge=}zx z2YnLs7X9eRni_W4gf4`aV0*@k{n*Jt{7_v|9QtLT<$x<_mhWNWap+?d*@pO{QxC#H zUUIb{e-z))(<}IjZ6?vHex#!hn2F5gMf@JgKGlP>osq#T$YT{PH19q-lNad!RvMlu zZ$UpMAOQ>S@UeXRb@^<3pI^v1ykUE3eU}Z!w&n5o1MSTB@J+m^vf)h+!hIMuBSiIz zuY9{SL`kr~LkRU%%SG3hhrquVL!AH1gZR}mkIRj8CAa#;r4S%vQFUl%Z)@6RF`O~2 zTxef*^>j~y;}2Tr(N+AN?cP-qCm$twc$2ebJ_-?eT|4^z|dQC9ERc~>+CO+p6BfukkK z*tba+-*enwrRs?61WxR2@Wk$bOhW7J>b9D6+R}|FALz>!Dy{$Auw9VveNcLbNdZf8 zGMOyE1c{Ppjy9VIP%#n5bE-ic-Wnz`0W^FDU(u%6kC2%332p2GLXgIKfcC?m#-AwW z=j21@<$KqQr)B5l3SqUsgbC-jSESO)2R$!2C!WDK_1q+{Qo`$^?411B0o!@yI{T0R z{x|J+e)D&1_`vt9sdv5ow?Fw4`{4WEE$XhYcf9?b_Rv3lT{5{_poU!!Z6V`uv7qEF zZ+Nr);`={f*WA3v*fyV|6sYUD^8G}>kar%UP<(~T`z$L*&$Elq=Y~5u$)HrpRDr2*KavSy%cF z33756bcVL+k4UO)7F@1c-DS;fO~PN!^(3&V0UYn}pdT3|JB=er>0_y0%2~QXG*ce4 zt-XtO+4`gvr%u|$Sidge9RZ(ioRTbO0dK` zAJ)^qCFM#k9IXv+`6Bl!cBK#U8@=HZ?8r~JcHc(TVL}Xj0TLFfL7-v!uE>`Oslb7J z3k%u1jV6Q#>H7dK&7B4kL|lxwqm&@7?yC9`-mA z^pOk*7adaGD+r-{<2e|lil<63D~OIxqA>}8?Kp8t5#}@s(Oj@gDAmY#j92LM4kC=t zY^35|R%FQPgI{U1IN%)O0CL^}6=dM_;sgN7f=Eyt;U40NXA%;)Vrcs|O$3#NV62iM z$kw<_0B|>g4t5lcoRf`PuCwO$O}^F40*xlA%z2>X<-e5~a1os3=}Z&K5NHaq@(xcv z<3slt@38}4fzWazB+OQEgGSd9+nPkrIK0eGbE9M-%%BtoR*aiqEETNCkpOZ6F$URU z11Fy?M0D{P;%rq+TdZH*Bj*RFIVj(ei4i%pO;YNu*4WWuLsCZP&kcAvS6#cuHpz*q zU9Oc<%t$HD+2Ip?cJ6{4-KiOyoSO4;5^@rm@yiO#rsf7$eDUB+b6cx5)Ytnngp46j zRhdNAJ5A93GqaO6CxtsQJ*x7DY;;6U0s}>bf-?l$Q-Ry0+N@m;#?7z1#IG$?j`c#bseuK5PHClT|leIQ>Sa)}qt=qK9k4fks zJ!j*C(=LEJ*Q^r`=j}UBJZ?XD=%Bs*U2n1LUhyhBdg6o)j!*eaGK73{ORI!)ovmB9 z#?PmQ?wCE#c_9cMSD>Pcyy^iiM+OIMWI*K%58CwPw9hDKq5z8L!nw1)y)ZM%l6v1p z-hzcXvkf2vc$H%8xoqAtGEOjLSy{Sn{Tj!^#?2dqo1i34obDIBiuqM09ZKMc&X<^W zp)03HwpJEN#+^=X=qe50d4{eC&=_*Tze|zx-FKu=To&wH-^WphlQ`Sr4#HEON+=0Is zLRxs;1I}-_3w4rGLI(w2lZuaw2G@BC?9hNQwk-Z|Wi1}3vkk|GG_f9x^MH4eksPfN z>jsQA-lgSaF?p(`t=W%q>FVvU^}So{dk;Ts=gyt8?c2B5>NUMKIX>owGIEYV@EtpL z*v`v#S-oU)on#dgk2r&Cd~(*dZQo=!-gv#Qroka!lAf9#n~-yj!JM*&#yWZH;{T!? z_QJGe=6N@+%W|lT(mNB9WylR+MP^M)KVXnBSz*VH?LMeGJThpd`K+(d;Tt-G?f#jd zfU$ez_-T9c>AjYbzA8%ZzVfy^?6t3by|uQt$=GeO0qLH>u>m`C=8TO`%sEcLCHe(p z7+IW_L)zTj6pShTz$NGL&q#-16xG$HqJbkhbJ8(kMO4Cdr5l{ek9znPa*}B!)t5n7 zA4sNinzIZQ$7QSJ9_>YED#r(~s;PtHYnq$=Xd1q;5(?w|?3v?s_H>^O4-Cs;u2vku z+1la`GXtE;!q#{8*sdMBZPlhtR@2<>&g5cE5>rgHCVGNCWUIvXjLTTyfhQK}aFyIv zMV#U=6qzy1nNc{c*^JE}KWYnyg1+_t* z$yxe2-K$zS1J$~^Zb37s`~oZ7Wlxr6M@ZS)fNY%;*_nZciM=>w9gJMDnE$bb|!8v2$=9kD)T3ryO{X;eRG94!j1aE?4J zI5j(CLl;KemKvGLdb@Op*LJEtojvWc2RX87(Ffpv_{1ajz_%Zg4C%Gk-*&tG;h+Dx zmqRznqGSe>ORJSPGvkLvH!?G9@a!r3-nYJK-~INtW#2Daot&h_(y|+7F~gms653aJqkHeEN#pUt>M%H(9gf&XUStu)kXN zE^-Cu7QexrHvzOD_UabW9z*lgQ_a$}( z=|IAz`kSpfNsqnCz$cy*D1~33Ctw7u2y`ze)C)WaHVGxnSGKU{%E~L%L}dV1CvyeFd& z&tGwGYQJzB%BP)`%m=_}>j>W$@fqZ~vly?`HjdvY_=Ev{53q#b-&c?yAymO(XCPbo zT#`i;LcAC!4wMJMPBQLrU#gW*^%UAY#Ik@#k&NdnG7dhb?r9tP4_4X+`-}Yab3Tg7 zMMuT;x|AO2APaQ^FvX2K%c2mcfo9|2?^IUc4QP!x`s?k%#8BE3pDL8)aU#MCWrsXH zL~Xjvg^f7)4lUK^sIcGKUj<*{lgS#Va4+SPKQbH z;X$MiIwoFOQYHA&#|-`kKM$W!wb8e@$$^VJ0*yu;g?h(;0`JL}a%yx>XPXDvF-*{= zpMuUb=*oA$Q5lr)gRZh~7MBV>kc6xv4|WJbuP!Y}*40Y>d0N54&K;7unaqsr(@9-N zZQ$f_JO1K68#{l_marRS&y{8imQYzu6$#tW)@AEk+O1x48vU0q6|AC0c?pKne9qyf z4*KJrpbJ$>FBr3;wfXv5J0#nCgk7%0%n7wqcTLJZ_PZaol6?Lu2E--9Y9smI)!QT6 zaKR>~rmgR6znvZ&u@mPm*ueOtNtLRds;s30`!MALk9paUojq&quGhWZuDkA9+089( zGk0{bi=l($x_bW$FW4iG|InU&_D7bH4bG&e4O`dS^;cYNmtVEZ(zR^UkB!Tj#TC*I zq0fS?_Sw209pY{3BvKu8mh=pLeT0O*qi1Q;GCql#fqnoVw9|80SLg#~rC$o#s;RH@ zzLTQrQCpR^inVOSHENj4j-`%4M}SiPU$B zD)FQ9zw915y;-?ho0i8Sny61VSYu<#Wgq_4Tuy$5!l7fQ?D(fo@<~50W zOE$I1KTBFuW7=+c#SOBbUSSQ*t(Kdax96YTZ%;n^ygl~x(>{=qOgG#5O&e|3mGW8l zTyIm;1$*Mi5j)p6VDI^*U$l+eF0=c;@sOPvo3t+FQ7gW!mTVfj&}UEHd%sy-mDP8z zvg$M)?xJz=-I8Em@IeN(Qx#iNNA`;F>g|TVs0Z+ZKTqSx67f3GEhk_*BiIQ6sz#gA;ypp#Mbp|5f!fGfhkx&QAX?T7HPd{^GT zJiIL5mGZ-2tS}N?qj>T6I7F<6uMq-%2jPEj0syoe0yYGJ`jGY~;UO+R1~7*ECxw3r zpDz#pO8l(^2Grm`WDTorYrVglqeke5U|W534m&w=txY!g6?vUbH~Mk5DaoA16szsi zKG)P`GNp! zi81M)vxCFdclv^zJAXksa>)ulX=R!2Ht}Vpc&=!Lq8=2VqvhabOQe%wsxN|Fv?70;Bb(}F zTgkw%!uYtA#)j?sJzMRgANhIv>rZ^jjvsl!rUp;jZ~pG@+pm4(19tMln0@*ae`yar z^04?`HlyrJQ#trudD&vL;hHsT>}TKiv-a9Iy>ocuTy^mYLV4L=(M-kAi^>qj%2wOaMUlV-x7=&J}pWt`KsueV72iaTSSb*@~91)xDk8E?aaqH|M&N z$rwyTaC=3?)cAO!Yyy!esIDnzt-i8o_2psP*s)+cH#FIN_N2{C4p@FJD>_x3Sx6;) zgMK2PV8g0@f;H$e2D4RH@hEx-zOE5Y*x412%Iy^0Q7_*iKyaf^941}SRkSyHihJhh zIsGUV`JvQY?gII7zHgYO>!1GeYO6Utvjuwz1NoWQ})=S-?JY+ z^^Bbw7?&YkX-!GdnuJ3g9j>Z>yBvbXrnHn+y|uNbWenF@TSt>`H?yj}(|TlVm&!#u zd*QsDJ3C^@nxw7Yxy2?2r|p>&$L-ty_=w$b>n--Cx4+YlpX{@t@o68FTD4}aty$M= zt*z}o1Agnajn>uGW2Jma0-;XLs&>JcWnhd|Ukq~0W$etcLq5=7RvMv2Mz-ircxEu+ zeE&J0WeuP8baz@ys~n_^G_)D}SAqj1Ud-%i26f*obgp2YZ1bFJ-5F%ELnB$TV=Nn(+ zd|#fw5a{7$-{X8=9#+bEdA|Q|h5t?b{cnf=kF?)U%>UmA@4q?xOXa*Ae?KYwKLzJc zDu2nYyz+AE=#`h!o5^JplcPRkwxN;pkfd)Hsx3P^rnIM}3+24e&jwj1=YY;N zblZgsgEl0mrlw8~ne^4pUEA%-E3UHh=lX4YbW9Exh80FjjWxD3Tc>2`>UBMGqRMuD zXvFUS_IK@6f4}4tXK(GW_r3Q6_NqHxV>NOL=j6nkK6%nko;YX2W1}`QJ|Vd}?|Osn zD6482%&vC*${G9&%44|H$Y7h7EMsVdV+1lchvT#UQX~oKgq{!L@CJsuU|p_~ zVwzB3npO1dH0S)jL}4`4EsQj?Cwz0*lf#5*?lq$ z3+IMx;pAzxS5dgg*^J7bQhUzJcFahJdS{k`a#LO96fKI@)mBT)2v}rW_F#W4Vw9_$ z`=;mYOisE__RFz}N!xYx9{a#Bp1yvO3u`IAHN{6XnH%Wpf2Tf@5f4)Q@hOE6Aa@8td+9x3#?+od z-UiQ)*z@}i+K--l-bQA|?au3OuwVVX-?3NT{$?+7Av5DPBYaV-dJOlUx8VyzvbC0F z8-3Hh__@Ea({f7Jh#X{YyTg=fko}{y^Mxhr?d-60vcYz2S#NjTc7qL{9}sNk^u5Z8 zWkDuqk)z#2MYfozu+M(!YoYGZGd!68+`H}mLc1jEV z5b~;DVV)hK^a-9;VBn6=*z7(TML)gqZ}?|8pmBQY0}cti1MV2d3>5mkzB$gLG_;i; z4I+FxI5eD|8JI;56Ia<7tK&(+uPi)y9vG*3K~4cjWw*=<}s zbB_$;Z0)jWi8?F_;+)d&C139#mI~ zBrxHe*72PWNV^<%(1<%Zz2XEv8Y<3%GNN{0#($-qqxh_5bT~p-USbCr>L2P9mvu3O zytul&^DX2F^gSqz@a8yF{qWyIUV0WU@Qk1CoyC$BbpBLJei2x;>Vs|&5A{`6$O53C z*?1k1vGNP=5{YH2^--7T9k&s!0Z)uyo~aMND(XYN`jO?u9isf=`GouEg@X5w>7~nX z+EeXIpwG?gsWf!pkiXNY;HN{#cI-2 z_$*HYiv;* zeaaW;29kFmNyVTvH|9pB=HOfj&F+!aloj{J8bYx1SuE zFuFNKJ*KSnOWMY+Ro2tqj5`qiShsu+Fw7 zAM`$X?6`gU^Pjb+e)P1Bj&Sb3*|v47?Vay@m+iTBw>8LCX>09r+n$bR`VnR)Gd@T- zF*#|u{H$*u&SDcEAf~Nkr_|O39R%%Q2Xdq%_%BPYG6Au=Dh%cj4;rA~F4a(PO|1=9 z5}MQLPPeVkoH}9OfAA4|=!XyaZU9$bxzm30SAN4b?6}Hd%}x&5sgvjI;EMEIo^uCT>S-uliD+R4*rZO8fz_DdiBdAnJ@!h-Dd{d*7C#PpQx zqZv!oOFs**)Cc|9EL=?ujoJ6V{h;mp!DG_lX1luEEiK|+UXXn%7@=_%7%>5W`qG!? zGE6iZR^p2nLS0u@`c@{A?q{I~IJ&7`HvYu)gnjqnAJ|jRoU#EKQ*bU?W~~^iu{miyt9oty z+HH2tHP_g!ue!}z+q!)qZ2ym*vU|V&fPL$GkJ!<^vzBg3+Lnzwyx;GdTW_-Md#bhE{89UnPD%X+QkbXH3k?76X2k z^gILHHS8SPBwMY@<|XH-^P=h#5RbhWWMPGDOIn3CV?d273tD_ajM~QKqZ)_702GSj z9eWb=6VUmJgDu4BKlHjQo<-4;;HX)auO{B96>cjRbJo+Cvg@weW^a7Un{C(SEpFpt zfMli@!-U&G$%TRQR?OsN2MYi4trjKA*fj%PzEoPU#-;|nkNWThLS220Xf~WF?Au9; zCVU0F%AXTI7gav*@P*;SqI^>Nb=g6{w=18T@gOhaaow%kzuFzomRKjo<$p%m0b>|5t_|Yri=BYvKQw@bHtuznm_SY;;}7 zL;;Qn>Fw?IT}tSiX=rZpoprlct(N>ryT4waOiK?mT5YmU_tII)LsB+mraLSyWCV61 zGTF@(x4J+>h>0}i0HVC)-t27IhDRoBbbQhVMu%-=cvL)8@LdI_^Lfi;rB7uq`!U3_ zF*a`5Y^&GG*641ry2fQ29$vO{=O^q$-)SFr7#GNWejthEzvd37Eu*6Zt`62^$%13!I=&!su zhg`MyfpW^J$CoI{E|?m#ykPu=-}s z-t)c>+Bd%RS!--vEBmY3rY1&(vm9#8P2$y^y>{0fciEfY{ARoQ#+$6Ivr{_8WQ&*$ zkImTW{&Ujnve6_9Guf~LzRWCO)m2*D>$g~@oj?bDP_om!jR=x``YlM8bFExjkX`Bi5=Uw+s>^UB!_qTxZA93#ghk***|>cUi?49Fq-5Twsyx^SXah$k=l-P*de z?kl}Nr@gDgX7lXyC>u)p3@1R8p{!*m2+mryJ^HXl`LhE+{c_%q09f0cx2v|bSaJ4@ z^w@~^!ByD2Y%z9=WnexW$>O$$!%2PgSM~|xWK2j4V}hb#)thtp3GTCaV=uX1BDL%3 zNMDvM%8w(_xzf+ej;@QU%cA?MN~iL|uBViVkHh#gzIpJe+}=@pW52T7m`?%_9m#?a zUR2*&S#Nq5@&FUcQ~2b{WB|(GzM@)iNdGR%_{Ar{2f~+yR;f3Bnzy0+(puFsloRYk z+Qn@R!5E%Zgb?NRQ=ZH!6asAt zXx)Qbj2Fca078)i8XP?@R1s(ZDo=+T697OW`S_}T5gwh%e6LaENC%J@&&tM$n6r|| z(}fk4qXiw2G^GC0;T$=Mlvf*-!!w`hnBwY=iOLLyuf9uYVYmY)dE%_bfpVCAf|J6` z7T*DY&OpIcVHmYZ>s@=5b*#SJvYA;40ulRsn6VM#z;^&}#tBU0D*_-~1d|h!Zlh0x zc*~L)KBma{c`S4UDG42Q43Mu7tqe3@;V)s$%xvBIcjc^ow@J3gZG7f1{jE9rltuh;>w3pQe7y5_n@uz=ihYswsx!GAahMD~i z%t;XfGfSDo0&}J%P<9zX9mUAK9ogmB9G zE)3dV{pG#(%G95ve8P?#dBJ8zCHS&g7bLVb!%ZiTAN3s%FvOc0q#T-=P2DKktg?Kr zXwwpuxkAxqg-c*4sjWGq2PYdtoLPauj*y3!Dwjit&_5rF-z42mNd=#ae_E@9*dE?+5y{EEw83GsIX z@XNlb?(vMf;9v@D{XEvC!s4F`$QBfN9MskX;16wbvD|+yyyShPU|$;I@-M=%(v~l& ze-tF|0x3lKMD50`!BFowe-sZ9Uz)#9v?8qFC~80P%}c_M(L=bh)%i>78h`)iLmU@e zx!8{7@JhHeJut@4FReQrHl9A}NFhJy)>jPZ8uQ1+^1>Cl9ETOS71!_iNWNk`lJ=^LFOcNvluQ zxxQn@;!QW+Vmo*4_L;&3$>$?SkJ`Syhx}-itFF4jSJLBf%w=Y*v7uhZ@TBV_R&1qe z)0WT9%2;DI|D<;auxm|T#^Jp5$mHaNA49_+BsySjI%f@yDcg0~cI%b#oR?lI6tdFW zlGXJo)vIic?0O=(&6fF`kIk8p2alYz=U+TzQ*$}V-a6ZH*%fx(jn~;#S6?leUu#*( zzicM&JJAdb40wk{R!#yqVOBXhGm*rgk|E?ghJjRL3AWN#U>j2m!^Ner0s=isn`6Xs z@AASsP}DxMHIQEfW{IK1nC4}qxgdCu`tYt>W zER&&gXU3f(@6Z>WxiKnyPzP+Qcme>cX228jm~+Ye>{yW~E1cQxkAXub1yt3g9IwEa z5N%d9$cdOV07a>9bxw9a(d7#AY&CUlB5i*QX#7b{N>^v&v;k__V&a*;UtEX*b+-qwT!r3TtX^(XZVnFiy@* zc}M$^BZqx!UFZqh7aPV6aq*AO$knw?HVsD};kXxNPsKV!*j2Rx&*K>I9voBuFc2cD z)z7y{R~)BhwLSI}+va6t-(_-hzWRU_tT-(V>1L-%>=Ue7#!;ZzbW^+NrOGah^w~GQ z_MlCu-FI%?WOv+omwoux|3ifPvSlX5T{JU^aYi_pn3(ac1J4{iWuN`@C++jFh{@1{N=q>$#3my)<8t4RR&3pLU2bY;Nf?Sq-@pyHi*Dua`uVjYRC+sXQuv&hlN?i-Ma6CEUS;dHY_rWfci81O+-z&M zZBhJ^jnAk)vOlp48$}OvNECF>R=HsdSz%uJ`5-qwk-j7Es)U0om5r0eiZG{hI(9@0 zHIm=>NPvQm%L;R!+)T}&f3>4fth%Z0q8U2Fy}CZZ3wnZv6m204A|S`&!2~QmXa}8w zjmQ8U^8+ZK0S2yuz~ONEll}p946u6slBKjQWdt3?YB2Owz;k#G&NKJ%d-xVO*tLMk z%RXUg#1GA!XC9vA;y{?eE|* zA%eIJ3=$S)cZ!viA3Q}l4w&xgESEneej*RH;$xx}JK`)8uPhs1gnB959-ukkR|?0c zU?KR*ZGEGyl&SusQ}pAr+c;4?ULa@VER}{<(U(PY}NRIl@Z}-LkQ|nj)wlSG5UsQ@emO;AW5|Ob%g4jR^q63v!X^I#98ynbL=MV&LgJBIxFaB!vG30{9MptwSB0ng$O;fVYR;Rr0h z^+4PZ&{cvTl&D<-uSB%qc-4IsGzyP|6kZNL!9)OGA#im-$jq0=%jG$UlBh zUV4V~z5ErP5Z$SK!ct{8O3(+k$X?Y)&xe7oC_Z$rH2$kZ=qR1|j;*CRGLbk=0}Am< zS0=83W9WwOtX%TdufQjH9(Xe5OWF|_iLZ8$k6u|5@K3%ac8sONPW+DS!rtW>d#*M~ z-w!LY6V5|SC}8VtWH1v~y-&pDMp$K9Ekgon!Q{x=vTTgG1jKFlka2u{Db3nAFK6&qT7yJ0cVHHNNO#G1@vC0>1S3ZoBind7wpK1K0AG3 z*!suEW%sfurOGF-`c9V8D=b7{=bAO^*4isyd8_T&v&YYwtz*Y2>0~A$qQ~1i+kN0} z-@bje?}h#LK9iNL&Pv{l zWn_nkExC){??ZWhG$#61!3Y28^OJt2joqH4!?s@UYE!~th2>`pcId=uJ8Xli<@li=?OdXVxK+s_*3@G-sf#vwjuqA zot?ebrFhrfe4}-(U2EOltL?cG$zOz=mQMGkmD!O4?Ddi!NQ73LoCz9kmY{iHSXa zKD`>5w($O0msZ$PLJay897^_jKn8Sjp<;Pi1x@c7tU zQNAHRQIi{1L1siB{W*Mpd5G#S{P8`m{|a1;KV0&B@fyK`53O5Ky4zG?q9@s@d~eIB z9ai9og2Slyl<9DWcy4=vBk-qh*des*N_h%#nou9gz~T_c31_rD>=WBtKIkU8jpGN} z2 zy~8-#x3jZT&+WdGWNk9-XT(b~xIA30ln!v29O#A#s_-9yV{kYk%#U+ZT{uDjy_qk} zTUPeV(D0ZIj!a5+P1&69oUtT*nXq&V6ZFH;#4YV@%Cts$I&Xc_g=Ysw?A*|hO-{{* zV**rH=x1&=Bbic^jgSiRN^P(h^^GFCu-oVlz+GYjAGQ;8KwpBN6)#<0A)O8Ei+Wxx z6}?`Le`JaHuA1>?)e{-U7(-&I=*Ivo&*x zx$Tvri?3w-hEIT>v zi#eE7vU%H9*_&nQwTvYDUZ4Ha7wo@({FBzz*|Q{9)Sjvt z`WihKw%!*XV*85Lm;~T-2tUE<{2|CTfOFv|J~;oWjEg})@KUxsGR=Jf`G>WlEhYf4 zFtvjT0NGi^3uBV#P@f1dIY8eZ$9ssSMJuu!q@$8nw1%Zo>q*Yoo?Ts5%AU4`?6^pF zPJTf_I*_ASSfH8@ysm<=VWmK&>K75go z{2&i>F?|g2LU;Uo7OI2Y%4+I^Op{Mj#Ge69{8`FJ=3y_0*HkZfkNUDZEy3~O4Mtl9 z8uz-&R-vDn-2m`s!Ik&ZDm^|g3r&bep7dLUK9nFAgrmI`A9`Qd0bpK+4G}RyDHG)t zJE9W5P7ee}suI&t?@oxEBckEBvpNMwT4g%GZfxlrGZGO#JcC4LXrNELV@&CSaE}#7 zsGJ9sy5I;zfHB4-f`pgBqypfivKZwEJnXh$AS-ViJ+g5_0N_If$Oodq5b$zk0MsbY zN*RJnC`9IcVT3FpAnUDn&6U=^YPSz$b6y1AEvflyy>P$hLIS3e)9*A-%dSlp3ikYBeOjD7pJ{_ zx9;91JT%&f9MUiS{e!l4Td&>qhIiV@lYKUo3j^@%et>b%+ScK-*%MNlTja1dx3s&1 z>@+Eyv0@euS&*U4WP~G!_S?a|dnM$G*3#50;fKLqYsZcpwL=G9P!r01N!6)6y8UPY zXqj1hGg9tZCQ=BOW0Ny-`dN)f+sIiHZ{fs?LY$^9hKPq=65ucKiQt2GMM$Do924

=?wh|n+5WM~skpr;; ze;jxQzEGY!7Ygo(ME8;;x|jBFg?wpv*>i+L!lmg0{-X}ql>k02#nH>)1^$=h_j0(S zyjJ2|nwP>Q?S~L&nu&)MyhK+y8vk5aIm=f0tM&Eo$J;O9^`+%YJLr$(-U?lWa**YO z6&$=A4o`pS{lyUQ2pnG~QQ>OXC>as+^5p`0~#nVA|JcgIKC z$;Na)JvHmHk={CZ_TuU*q?X|^t{Nx9vWOjd-I7sZUF)m}*v((Wxg?B-iv zVb+2Zm@szz;oNry6`8Du!&SiyR>^Vixu=e^QWerwjQ_fGN(*-O>cWw&?e+{W4NZ0q#Z0@V7E?|t7s|M@T4+0$oyQVaCK zpu(1Io9u?0uD92``n7iJYv1G(5K9b@i-s}843N#wU@26SKM~1{nl^)M_aXVyTd~3 zFJ$1=D%ls~zSF_NT**!zKVqNx_$TZ~hYnj?o!rt$7qN{l1J4ZnsHucoXgXXdB$qXWUABcQjVF}yYG4X`hf$sUX`p9ytQ&X8|o6)Ci-fZ zyx<6kqzKcuo5qHeeZ*>bs7%UEon;b}Jn|*QX&UbBx?$@&#>JEB;G}W?R%|s=E6#^dZ&NF%XH9=%9N*QZdPgAQI3Y zIyIR1QIZK4^sn10ILv~Hf%-%hlX)d|18E`f#7EG(9Cg%=p2L>D@-ft4J}Ck{$Rub5 zsvx#<`G^vadcWU;|Ad{78VduEyvJz<2eQAE2j5Ur?$anEA2iYnv7VDX z6L+wYpc(gl^q#U7fJyfy#dkj_E?;uQ>!m!)syn;u0SC_x1L#n}eDiqXl>l3?^ZeXu zl}Q_fbke-K%6;~7$_r8b`RTn6jCcs~7?cWE`A@R@E2V#N>f`)8jd+H#p*8RUuEK-~ zt`T4B$`Lc@nV2pE-SUfI0RIq$%JuLIE@O**`MP)T*)c;jspHZbY{eX zK|%P3cp;_=Q(!_oW%zl8@QKn;7i4jG4ig5Lk`a9H&=y{<(;vD?a!jBFTPXOulutmX z@YnIet=b+MWiVXDsOi*B;K=bD;MJqziLSYkT}eBUhqv!y9&kOa z%3jfTI`KG1zZ%0u0q|Lv1pK9vWRL7SLapLfi)X6jAJ#6FY-xVnq{b|%JS${RO$}x2 ziSIsa2cP}UU%Jjr@i{lJA8pgHB0Cu zRI#0W*BIPQ!GO*E>Cb-F9(dpZ8ylSTPDGBl0dw80&DOhajoo_dt+so|Wwvp{dS4)d zyu)TCAeVC4yk&FK&TH(9MSm3S!fus*RFv8ly|f^^gRNxq`J8O2vv&M=pZ)NeC#`Sb ztW=ro+^t*emRoMJk9_c#ZS!TjJ^x~M)Xw&uvtjufpZ?5e?VfKvF$3XGwZ+8Du)VPFkd4WPX-cO=FL~$Nnq;k& z3u^ZU$tLN%{=Po@^e6t8P0lUYsuZ^Jl6AE<_H0RQw!L_t($ zSJw%D3^<54YZCY{iYqc>@Ts%6)6#8CcJ$2Z+$oq<%5P@mAiWXb1t9AwYA`p{~&&O(+LFA9fG%el1-&ir4kr%lN?u_$ecz za|(33MD)M%-QS0LuwvEuhI=ltouhmy8|whY=i?ZF8Y@InDL%T8H~13-@`&>T26lkL z-ieO@@Bx4S7TK9T0l;td4*pd6L|rkb+6Xcw~O?L4WdsTxW>QmLO=Jh;s^mZ&WBME z|GE(4JEp5BMDV^mq*Bd3-qF$3Eg!qv)~#P_ovXTh_eT~Mbm-S0U%!dtP3lvYlyTv3 zD?i9(LB-5w!~@6}=?Zo_8y^olU7qbfFL`#(1}CJCh0NMyqpg)5-nna+wRg5zvYu_j z&BmuQ)_-Bl_8mBE&mTBw`2~)yRK_?@%CAClkOA&0=`5efi(VjAsDZ@b(6?o*$#j>bCsga7sy_Gcga?^c%lS}e;h z5-u3f4ow!?qrR@qu9UBR+ikD1+i$lO~hOcj#lpR(Wkqd&3Bc3o!I-FB_* z*ty5%b5r)QkN=I$9zATB}YQ7j_F|Hv_ebY_z0JP0vd26|JG6 zNo}4MpTQqBvR&AT2%f{PsBs@z9<^*b*|arkmjwC^`Bh>03_G^s)8H$LZ&z3)mLbRn zUEy!V_pR@B^~qN*W0$O2^n@R%JK<9FlSnE}mG$&?$X;!;Ts|Y+$pRMTBfEhG6-YsR zq#BM3z)&w&TVgRB0nk)2Wu1v>yJ2^iE#zeT<;SHCvflT%Se}zkWpZ!GFO1P5SA=t9 zk^~6-t{nXx@3`s#aBbMX{EtIGm zw4t;rhVf|brBU>t+UOa)ATL?S!p;>8_&ZHO6p~}?LcrKGw1s`7QiJYNxyUdk15~k7 zs=o2wkK`^>hVZqh_}Gog1LmgA%%LS7i(lNvpiQVF_~42x!5QLSZR8W2X=kOOSmNVb z`cVvCH{~Iljw4k`7j{5UJ*zABR($va8=n)ndmz9n34IDAfGj|CA*+gbMhGpCp`Rv3iQBqJ}E$X;v8c@Is*!Mo|hV#&MvQ*r-_wdR5`u(@+d~180fE7 zdCWounJ6W9{QOFup|U>lz#EQ)kpT={N3gqM1a<{?z!3>3VE;*Ys*Ea237~p8THCF2 z^)5MaOO~0<3c)mp;*p1l4o8+efjQtq5*o&eJ7)Ba_k8m2VmOC~v?AWn(GlRGlbdbh zLq}8?%x3}s&e=kNbC%c{079lboNj#+F~@1VP3U0Lm96jnn_vkm|5cLm$;tRcug}e~ zz1xf!E^n)3C~uOJ$11U;1QP@ErG>IH8F@fhe)fc_ME%(|nfF6ulYR#C%q*)ri{8#` zqeDofQr4IZ=k%nSn*Dq_@}r%gkxIdf!&Y7h9mVX_fc|2uHU=R^Mh9(HPV>m*m<^o2 zU|Bhjd6mf+M_ST9DQC9P2T%~?wZ8gnLhvjKzex!~&aP(W9(l8h9y;Sp{)Xn1ZP>Kl z>Ko+5i)lxsJWieOw}F8XUzOU{(PP8IV|MR-4@l5=*_F4v!un44`z*6;uIP?=Q%jpQ zOTjZR#c=}Z#+1)&hOS{wu4;MF%aoNQc=Xl=FPya#2M@{znz9DLMB5>BC!{cs9Xn!E zV*&_#A%1jOqGyOQzZcgRTt8FtmwV@nr23qFjc*nv3Hgv(Ge4WM+ zMxwL$JG2;aR-jp@Q_)}aoj~VZAXZ()^Km@rGtfDu5CXl2mEM6K2~Oi40^j*F3T4H> zerb@n^Xp0;alFW|Mz&nclMuto6$usO6+*&exdD91HM~`b^AGXEj8{EI1Ni=xqN5cI zd{dSy_ZRCO1>w~L>G>2QT@vE>FH5Ho=j*bSdPewld^=9#2vGokFAubxw@<(y;iYg2 z|BlOx&lm4621(B7E)H>}<9uIQws(450^afpp4ArqJ}LuQ`tm@1y$5+7 zp85Xcyb#Lay??({_xL;NOFQ~2J%{(y^CfvjSAwV4&keh!&>`>V`tVK4omq;=dUPQ> z^3XA7m37wL)n*_5;Lq9C%eL9`FC4L_pMF|~^0dqSbgD&$_N#5<){Wjl%eH^t``*L0 z|G-guYOiYYOF3I4R;Ze0%>m8q|q~4DWS#D-R>5Gz;lEn*b<6iX1 zUo(&+-!Ar;Mk~K(nZrifM%P$mVOv+%bs%4NWA*`Wys*E$&kz?&`oob2-o5{#o z8W{FfL!*;3HZwQptDiQk+vGEHw{G9%+dHNj8hz5+^jyw6Mlv&b=|OfT$SLp82996R zDLM)Kg?5qMBND?5a=S(NL#Eay*y4Iz);c_8Yu;r3;vuJ3!AYCy$Y_kKWtRS6PExleZ*b|~R-?mikRwbQN z$dvrbZ1IW(;Xw2=FCA6pOv%|fo0^$+SVfttx5!36p4rw5o?rq1_&~P!nPjR11JAr$ zgl7dibi%O<%t)?~L+JyeOJ$!l!TD_!N&TdUjdG>yvud9O*(1el#z?ZnZO_Taa^X-DO3CuDQ5y1)ml zMH{_x1aG?ecH4E;)z-DT*Uv<4l_Oa!7iCwB+Nu6NJKfiBgCnDEQ!)FvzM;+<<#@Bp z54_o?XXuVjc?hqNWAY4kQE-63J2o!P4*bJ*YtS!xkyTpket~>T;4G*e>XOj_og6ph z00UDDG-l+~4vmi4=@b2S;ru98r>*Mjvb$b?yS?i@@3(YQtIH21wgTsNs%<95=tRUx zTe9~uP~e4>Ect%$r(ZvvAk&-Cb% z@pcffbavq9GO$AM?a(1`Z;Jp&CA-Q|zT-RKT7Q*4_@j}i0~HtUXjkzQc=I#(y?)?c z(1~84bMp222-m>1T)E(@D82K@=?se&`U1y-Q!r5?<#{zkTddaO8>^?-amDGkf(}n~ zP3TAulLYi$jJSd~+E@7EN!SeUs}!FSg%7}rC;SGSa}9J9T1hp9E=UVXbYDqFIQZ9( zz7Zy9g=a+;9&4(YU=!LMbon-*`Uo!kz3jTU?c;PjdwhLI7h^Xu8ISMCV(bnPncFu^ z03sd}k^Nc`&}gW|&Lj*EyMeIgIMAiYKX z7FDKSg&}09PbiUm^3~Of&(3xWvg4`~^$ss~SAa<}jEMx6_tn724eUnX0XIyXP}LRJ zsY|j6`GoGyxeVs0f{J_tD4fLjOt2WTq`G zdxvRXTh{klTCh~Aj&#y78E>If@Bwyd6X}UM%L$&5?2L`(=Bz^R$ET(pk0tr;*ktVH z_!}R5zpa<<8yy-}JBJCBpo*P_STUV$kStP~XP`Cv8W`{z4bSmRV^vbSb3-uP+>RsL4*4El;ZEY=rp~5FOhBH|q3gxe+ zpPebV-90oqVk3jY#!g^M(z)}CB`Ylzt);!u)~;S-uYcW}?XJ7-k{!lQAVv8WV~Rgw zL;Yv$8{d4uzJAY_jecx_VQum=SfvS`Ys4oh*=Z~m0T=X>G|1Ncskgnu-u%wDxvV^T z;Ft}J4BJe0O7vgtdWBW%!h+3BN#je8_pIr*p|fZ0^Pl;QWhSRZOTvG`th1rsQcA;C z4A2jgh@d+{bn$@w1wI+ni&y*xJ|Y$0;jJJG+bAF-0)Nz}I4w%t4J zr#|{2>uhbe??3pIJ$LA!>ZN>^15b<$Puu9JlUBh>!3NopZnKLVgxN~@S;!*>^@&Sg zS4E9%J;@|w2KYf&(H1^Piwu;!^!CvEMRbtvT^~mDP$fH?06rPe#8&1kZsIVB6*0I} zVgi8lspv1Ivb(yPEhl-fSv>U{ANmDby*A}G-_+==4G)~NGot-+IpZr-@im!1;P_S> z%uUSM;JE=ia^j@*pE>ChU)Z@#)|kWb&j;#8fHLBH)t&nLlp5MS(gUIw>KdP!jXdTV zUxO<~wBuu?|0~j$=skT$yAfPZ>V4=ZQyvHf-p2#Z&@23c-jMtcI#jq9F%sMDXQHV{ zt|><(p#RNhu}A@^#8d{#!Xajl^OGZmlxOD%fI-Zk_a(eD zpM8fj!ic#Rn@R-TsFU80~1ry^W%1Ic-TfKX00gQ&Q|m2%kI@3 ze(rx=eaaV4o;lxd`(Av}PM^JCbFwY+xnSG6Eg`#!{uufy{1_(H0sV*7gq-tp%jNHc z71H8k2^jbJ=!=l7Vv?`~ri_b{aJL{?nDmVo68JjcQt)ytKxul?YOCb;$$u`4j9YU@ zll{)`{X09dcdy;|wfj`RYMVWG#_PNjgr5!)C&kB-X*ndoD z&t+s^3Z+%D7n8{bA3JJqZL`gnZL%A0ywNVd;xg&%RX*XOtUAc#bGvz-X z<#YI(90wJ4LPU;Y8-;;o*#V5b$;M2|k4n_g78QQ9%IMgX_lrL9=nw6M7hW)qXzc9h zRKl8YghYF*Xr{^+X}<*4q9T57|BU zecdXud0TF8v+k}{)*{;poibYx{nI}qnsIIr9paOv(+$2to!y#J^-U6uOyt)$(HUFD zmlzi$u8dghG$t{}ga1rUz|SOb)WnkTsf5BOvYrML#fl%G4`@3`h{*rb`ykgOFIA4; zt8H1eURI;S2~4g;-gI|`olaTITUA*Ut&tNtQ!q;IyZ=!sJ01Ore>|U;?NG3$%4uu0 zF}wAuRhCZ7SZ4f;Ef!}aUW(Fjb80WyJhBg=#du;E0r;>j>lpKrjR!r^FXsIpHG<2f zL}aHZ4bSLs(m|)t0rHDov+1WTiN^zvFbM$3qIlJ+t8!uIXo_WrMo>vSCSCg3|H_QX z2=uE#Q^4rHgyIv|``ge<%7}La#(9;y{z<;z-{}K(gim~zoPoUve%z|k2<;w#-6xc* zZ1CXnOIL@HNZ2Pz8+JfccEAv&r5_oY?YlLpb}aNDc$=$ec4@_5s;OGuTk)$uzvVf> zv&YXOPyrx}N+U2BOhCT*3VXe$(%#Vlq#_blZ?NTqKMOz~co)!|X;dqsqk&kZOuDcI zm zk`IAfzl#h6zx7LCGv3ATi9O*?lO4LY{Nz^X{-RL127*)J8_ee4G8=RUNi{>tzzQEml-I z<=_mC;SPLs=F_Ry*3oW-T+z3|@d2G|(Wa*|Ha#ojbu?@>mCMZfDn7P^^VuzGG~$A9 zuD+eA=pfzLV(o3+{=JYb*yPN#O-+(EYsCW0AqJ&&RX6aAojTEHld3O5Io;T5T^$|Ps2^tuajsUnUXJYgwbrC>9Cd&rn~hmSh$Y_m@62;<@E1mL~ywB2q6qeM+U3DVUu8Z#m}KE$|VJH3Gp?+ zjiKzT$n{9Q!#hnX_hQgnijIqn^e*Ajw0apmgn!~XzNxvxI(s^7!-kEv^YY!km1&Fe zW#%ZIqZrk6YG(39=_s}{l}?phz_9S+9(KwpeX$Y_OKrCNGE8Zotg!UqZbMcudOjLe^F( zPHQS@+jm`IYj<1@`|FG1v1&!CxuTFaDFs`c&Di9b6P6nuveN9NcW5RBW0eRrEnL;h zP)1~9$2qStNRDwY$Dta#U>=&1Z6Id>F^tnND#sH?1K^O{7`VaUjXGqcFOVs&bM(Hd zQcj`bL60~g%pec890s{)P_R_d;U)(W8vxfoZ>mh4KvVgW6+p;<++03qS>Yy;YVaKfCRC3S69Q%T9K0~DRxn$) zW1aoxfA|kp)7WgsPWRb_Y?PMn4r^%d^!$MnKB+Eqo_d|157^b-A*Um0=g;=pV-J7F zzWd$pSyoOix)wW(Ne9b|^>)qm*V*lN-flauzS1^r+hhx}ZG;@BJzoH8^Sdb3^s*w}=f>^os+&kWi5(IMflY^`#rZ`yN}-S(u!2Js0wY7?PH?GpHRQSlh~s%3>&6ps!A zNSkx|D^qngEE_Gad@>6~8=1*?`pSlOTeEhP?Ya48+jPYh#t~T6q9qAbePo8a_yzdX z2GF~oz3)0!@=EOhoii~gY`-R%lon1vf^;zhTR7;f^zuM?kTmi}{z`>pi5?wZ@Dv6N z!hkr=9C)u{8w6FNwk~|DSgdswj3X%$7N7g)m3wR~uJFIIRbKcN;6k=_kyV*`=aYkZ zFPWx$%44NjN&FC>sm%l}cC0Fwvw~y6WeKb37%;(C? z=ixhqBFAqrOCEf>b6+hO1RKw!alcmx1~vl6RXF}D7-*pETCWQO$M_-S2eRq7^7I10 z+fbEsSgRy&7ilmioU5MXD;dn-DfxsB8dXpf;JRHn3f1Nsa7vQ6L!K38#N|gDBN{_M zHd1raC?D08qy&d2AjYp){tWmASoIxSgbuRE?(&HVo>Qa~bx+`)pUWZDH_kW25ilx~ z4wq1;OGAJurUN=eJiYvXuOB5yeqg&Ons6KPt)fFi?dP4o*n7aGc7P5*JJ0-_ZUh6n zmLO}qMDVAwO6XgGh8*yI1<@T;Y+XZ2kFT#T;X?MR(iqct8Ya|ek_6V3$zE_Nnfh0SmryfguUY@7y27uuVcs~J9bRw1IIH|!e zQE+h!?TKGO^?X@6CePXaf!{f!yro6)l-0asSiZOzCM2mHC<9%I9*dPC& zKA^`=04!vfo*DB#jPCAs+qh}H54INs-|^Gu?9oRbwWs$UkiEO4{5X0_`mv?X+lUEi z({mZcU9^JAp_JaX7C&eG=9_P^>uCmRev^6(0yY8>AlZ~lzYJ}qoWU9iI zJFl|4-}+|T^ZK{h_~9dV=1jkhj1SrH?IU~Yn5+CKj70zF3YtK(S<8mbS$2~>}*qyP^cR53TlAqxP z#)tS&?O1iGyhr{Zk98%zK8eRvN0nJaxvEILc)C_Uo{_Pv9<6QAJK1{-v{#GYYh;HO zq^}y~cg-uGGbe}K{(S10{dT(VoF9$RsB+e<+iV**ZnfRlUvI4)ZFbE~udpWZPChqh zPdu^DKKIr8?5udAl*?LucdxD8xYq7^*L&^Gx8G&YK6TLk;hPWJ;OK~K=We(E=-eMV zcit*QuT`xLR*TOj#z!aMUFRFoT(C zH*Kp;RlDl)UB1%WmzghLy8V8>_xF35Roye-io5^pse1Wy*PMIqxyM`^?P_z&wfWdN z*d1Sd{K5D;zwqOpNK~0!j*IV~6RqA;^~h(}J67Q?h!za=an#VlDAJ$y>TrF9;g?}0Q> z#$7IBHq*+}P1hW!gu8JT1H--4KQu7Z`^R#~!*8Ywkf!C6qvK0)>cZuC?f6@9`rPGs|J3=2iu4usmy8ZsEzwx$FnwA&I%Nav&9cg+{Fu1a zUhXt4r)MUm=tLLULfCqqoGsb4A$-``B$!01Z>n_D#tU)16Q$(cnq<=aR4iS+EZN!> zJN9mmzxT_3H{N>dSp43<`K^fY(;`V{Z1j}k2j6=t9=q>gyme_je(>~*@w@-){}S)N ze?BIzjVQl4H4)j0@V_ntO+A$FK-FIJn{_~%WhaP%Fv^*Fk>1rk*7mB4Q z$jHNdUlr{kKQ)Jpww~3M;c(u;uGH`JRoMp@PTJP$v%ma{vLP$+`+xAe@%w-Jb?-X5Z}*<)-m@e2Y~LNv)80ywdKy(b?bYMXAM}_L!TO?TKy>pE6+1SY5sl4Vv3F7h6&%*dySKov>=&Y>S#? z8avggC8!QXt`-@=vWFQgc0Yl9j>XD?5)v|=6 zo2H^%yB0t8bxufdfuQ5MRi$wah&WOW)Rpx@K^>qlQsfw5*N zu5bAI4x>6^cU|w6Cx3}>=shF~xWPO4W2p}yO%!ZkqKxXBGLpuc>J`AoqrU5Yo+9(i z7?1H%5h*8i$h}^gHqeY3)vd!3#}fbqp2Vl}Q*Xi`rIXQv{%g_c)rMdD?>t4mn&aD;nNxVd`k%7jAQUXG_Tl5v~^ezq`+(s!YXeJ87hrQ0Bj`%Q2=jgDo0P~ zgxpOD5rvg7B7Gpc7!R+s&@;G^7KJiVg(^(oil*Q)%-wKow5Dg$%hOcs>>dT2koF8$ z;lZFj9R(rc8D z)Zf>Q7?fb9@Mw(+PX`4J7Y5xr0f3c(yj#;dU)T92+N^SP0SqN!QCpGmsq#?4%oGx% z5f`CC$8fc}tShS^C9I|BiO1|7&TSnZ9rf8JhYuW#ef##Bnb}UdSSWj-q^5jX@ijR; zrMhK%-*ny`ZHvKVD^8_HryzpS=fvsCpbircmZ}T3y_n^tpvHmK4D7bxhN!rd-F%>W zh26bnm+H)QPX~o+k!N>;i~oqj3`D-u>5Ah_e9rtQeo^&AXF3knt{wY4=szprOIo92 zlTxnd;`-H*xc#2n;@=T`@2;sQMG0sz;7Z@GcFDtbkgQ{#=dMUw%pXoVV~rR&(YITU5Pw;!IpTf5|v^ zb+o(j_w{r;T{>OaI{UhuoQauv?*hO!KP^6Qmt7d>Os7GME;MlEjp0J49gsi6YtV;h z&*IE@&pc3}UMU%p+ZzB(E#RnXKEWh-;85g*@Q?hQrQVze!@c>`mWZ{K_@3sse zn`Vmtp#ZI%w|?N`XGJHT??_*HVTJxkxDo&Qb5?&F4B)%&c)v-*`tuDki0>$n^lXeU zpzGm*?3&ZqGyqcC80;w?^}<#3zlo-G`sX+ZOe@ni|HeCgX9ln2Fxl0!GR}gGr?!Im zBfEFC<@~gjj^v}HO&`MOmlUAVtJh!3W9tBX9l&+VJM%vY(%!&%D@YF{K2Z3Z=s*f~ zDzcP(>tktgM)}F$p#w>{)g)gRmu7s#40?kzofu%Ao1bxK;=0@ZAzeDwmYGnsCYmdbSFEx#0R#JpBnF2> zI{=!Z=h;dfxy%3{I+Oet=d03j^RcKO-_gAoVC+1@#0n;LF{#eSGPtuTO~ykCU?i`r z{)+Tsi!Y0k|E*1{(I-8&y}vsS?mrR((#I=v3(+7)XhZt6N!L~B;Dw7&l*sWD%bjxml2S%@tiYAF??FOoO z!8Z&Jz;D=5xlZxK0A_`of72v1Z^J8mJm4V4pjqHxY}js-OCX0+%n}VH@nWm=R%0nD zqm!{bc1@1vVyrEQM=I<dOKrsyy^jsUh(5?cit7Z+;e-{!gqKu z3hkw6EtR7|&b+E1MrSAD%GK-M2KTCXcI3+Ss4gzZUTJrG!RcWOfAHw(>J3)-^@)eE zYv>Sxu5JUiVLkOp;&;hAPcX>NwsBl%XBJ{|a>{(#th_sf$F^-l@yU;UG9LNJ<1xHV z_DT~rl5EM!T2vP1Jb*tjF&?W`Jors z$HAR@Vs3U)J}oD?ZbX-CyuL!GS6;G3vl?G&yGjpS;JXd`E%2JS8=ZuVh7S}Udmgvi zGp=#Y#MI3CUW;NZN$1QKn&OgpzM?dmi#>7Yt@p(CgNJ*KHwc&8LHKvI+YA5(H#X_U%4SzuV4xb8Wa^PJ+0j#;!R0f=X{F@}#iA$Sf06w)d zIcfwK4t`c0u+8#Xw!;`(?0XjZdaLbbw#{BxTK4fN*!qiA$r;glS?}PFnw)jy6>^L3 zUNuWQX99sT78d3$?}R31#UEl-0{M7SF&%hv_BeALN0@9WsE)YI4eejG@a3 z9PMW*3^EaZC4K8D^s~MZk48<5e0yc;$bJp75yIi&ns-us^dIzKCl0QZdsQ|z^^bgE z>u{c-J)Kn~0ZvT5pbnrF{&6}h6uHj8P6`8nJ32?Xxn2_P2pjo@u8TDBgq5`D4CDZr zVB;d+@elb{nn||pz(NKTfmJopHd-5N1 zfIcal^3R}d?|o7!y?lh2Uv!q?k$=N&65FKw9Y$rD~k^+G!&Khat5NIwq`Bsrw} z4LaV^>M#Qx+DeCAl*k*NNW4KulgdMXffD2na9@Mo!!zUy)0lq70V3j~XFRXZLdo?4 zPPAFmLAZenK5;reKmni|QiO73^iTLJBHvn+hwf!1I6o40nCuz8;+p87@G2H^0K16z z;%t3qMShw<;G<4Vr(}Jp**;J^G4-nDZMV6cSz7!b{rWH4zF{T$&_K8Bdc>UOkIClIk*$)#*KWr>{B{udhyf7;pk+Z!~NHvucyQZVF zD~{ZHOWgb5LviTHt+9Xq!Hm{vHwAv!IQ0LtNteM{*yh)+j>MTWXT2hsV+O}3C%m$M zUN*~|^g0VmSZD+9A`g&xZK_N9FPK~~G_)=D?Aq^@i}&4mpYq+OI!`P?9EFKB)diS1!;jF?UL>IJG;ty*C7-`TxmcNo0e9HwrVf>6<_lwj%*_xgS z0QQHG8Ilnj*htv|FZ!o)-e?ssDQ6$Q)GGPOB+OPJ!P(i3YELEE(Cz(Qu_k`nP@83U zrPtqiH;$b+CBB#aC*OjVkHeiqv3tk<*tKg`Q!q9#7-?cWwd+jqsMzWl|w|KpF#H(7{3{Q9@znU{`5XMZUMdbh>I%z7-#A6=ZA zV)Vd$d)T|s&vt}xW-@52>^Bw@tu!(L26-jANCrYDXo^freF3@`oq5Rw08bbMka>KR zo-jQ{Z!Ec_Q>7zi2Q-Q&+3Md?zJ@$mkxz%6&dy?KOJ5epec_46;}?GJ6A?=dF)v^4 z?CCQxJ2j#@k<6;j#q{W?+ZBGnsgq~o+u#09Tt0s~;bER)@&ddf+#1pkIHX+Mb{Jj-6`9Fg!{#Hn7DW1!vtMyj~S$p{F;l})3 zMiV;O0ja(7;YR%GVA=r*U$y2GU-rd^!%c;#$Il_j7V_I9Rl4~A=?t6r=k@F8j3Dxk zXTQA}|E@%>fUJDx2}4SUGWwcL7+}#Pe9Qt! z`hjAD9asceeDf=i(~Gi6cINMq3)rZ+eayfja)Jq$42lBag)K|^OopZ}1KTTaM*z5B z|F96i6S$NQK54=g+bS(YC2s(`WmS3-e+j#`T#{eg-W|Jk?2bdX-WEHDcg3E=2Ym!* zPyc{z=92U=lO!i9)fk_eiE~%3#HsU_Jb+2R(k*xFkAeOn$vkv?ua7}FbN*@^JN9Or zJ9*x_?y(4z#f13i?uU^)YbX?>v8`lV2cyZ07F!E3H#ehdSy6eg0TZ71Lo3qtEF58i zf_(wWGs_Z{Riu+#6CEV~T4Xact%)9zeY0n-#=W1qKmNh5{c8N_pZrn$;IF_l1kDBcN`Bv`=u_CM;Mgsmc9VquJ z7MIyiQamr4KvG_|Dmnq}$kkpEE38+K}pcKhL%mX4RKRUL}GYA1C%r>QVeDw_J zi4VhhJ5m`w5R6&D?kE;g$r;P&Apr;Tv?yGQh>B1KdE%3{D^Xz+UJ9?DCjjtMrSpzB z&3LAQ`UC)2QM|0A^j*)HeP9OXs`xpv+$iWQSA#+YHsPHx0YC(&qk+|Cg^q2p@3u$9 z_$@IpCILoAQfrz4Lgyni%E6TdsA7E2)pP{%aGP-uXnBVg!8kjL3ZkQJRmy_dFw7ib z#pK#rHt@g73>ju{q!k5pK)CZ*0)d%7bUd(HY=zFcl_X@CF;2%{+5&GWbzDi%@QqdN zY*&2k+LhEmBy=CX_x^a`;rpVuyGQ6Y#MsD~9H&`_2cT84t!&)cCYM!3ZcxI92@4BL zX?7iXEiKXExEyn{3(l9-l1x%ybsOo?>FtgZ(F-R7a3fFx1s@Fny{O-2mtVCn3=uQJ zLz0p0`*0F_dl=ji9i`wHe8D;C9q5h0z9AXkC7)qUr!_0vu3o(yPd)#Vx0L(XXFnzf z^-zpXj>Oa7e>sl5`)(XObjauIt~W@iDsHP9I-L;1+je>T9$G(JQ*&UX@=sNTD*h-|#q`9S(_w`v#~7f%mPIq# z*!0Y7oH}zZW<}4c%F3!Y>kFtrS;b`e3UC`5bzb9Eyq9iZdi_*)=Ep>1Ag|BFf17a; zGRfFibjKqOe458k@jTf;?Il5a$JJpFd|N?=z9DarC_Uqr!hr+%rdR$<_yh@+o8q~i^iFij zb0!$~$UiZL@W^lxZZ~3m6CUP60r~5j&oF~0iJ!^$I-c2;bSX1VI0PV9!VS8WX)U#i}Wxv-X%+Kx#hO_FaC@FJoX(t62JeuzZ2j8{?jo$*zW=~VgpU;ahB_}q(e`yIE&6OTR_yZ7&O9t*QGlD`!XI?_p0l+0bJE=wOt z$H*C|EiZXne|DW&s7UV3%-hL84=par;1j;={KCK%+ld}NbRarp*e^+zW@6ui)C?NX zaZ9J7k8)`%%HiRB^Rn{niUsMFdFi1u=Pu}XHZEMe8j};#F}KKoyzC1ZW8HF?cJACA zJ$(Zn3}q4*)&i7gM;aT5`q8HOp|b!F?0eE3t7R9hgV1-2OO>e1GQmyGk?72XIaV6c z2|3W;=SgAU%QYV|C6@6b^mNOwf59#80=&b8C>CyMDOxc0D{gFH3Kl$}v zi|Fi$)0Z#Acy+;@=dI$SA|2wg=}5OGn#@nlc@hO3+57kH_F(Mt++2L~8-E_pJ^TF_ zpPm#REncOwu(aSUN4tA_;?QkJ;-N<$ibHqa5*^a{w71sIvdU14CH**xVs5S?C-Op^ zJn>dk=F@rpb2D@1v5Ly=RiUCCHHbZfL&3yGY;fulzUHW;$%(O;pIh*8N|q0*&tdqx zy)*8)^R{^6qo0V!KmM5be@CjPEwC^-5tX@lPQ=GJ6)@Q;N1{ zcI`PFSLJv=|N2{TRy@6D_r6${y*N2O5(8>O4ES^Qf1jM{qWHL3^k`B2s+m}ycy+RU z6KEkGR=MF%CS!0;A>Fg=WVN7n+}Kid9WYUZ)fO{i_bZk87+0v}#?H9y{>Nj_ZTCj6 z+HJA36ovMV=hieo1~ zN_wV6$Rp=zf22hx11^N^Sk))ItX4u(@;wQvX#UO)E_3s%Ub)InQa0 zbZ#ZTO*qTJN71k%5TC^9LVakks4R2XR^ zU${a)wxCBJ+qYsu0QCSpuoq0H25%)O>E!e_?XavMqCx^?YDl!KUvK_ghdlnxkjMZ3 zH^|F-6J2hGI-TC*{D zKLZkzz7Ig`>0%QOeQIKf$>B>n#E}#3j}ePvPc%u0z_%>8AdUy*B@@t-0`U`odHnj;-;ZCQTGkx=Mj$l={N-Fdm&xqG(LP&p&ZS{ z4tKmSsoi2L0&KZS=N|SQ1GngqQhT3i%TY`mS%i%^Cwmef(Df?$%!Er;wo83Tt~A6F zXYMQ9!s2Qy%rDvwA%5?W?8df+sET!#B^T&3VG`W*OhxjQfjh|^6{EMaD;g!MJF&^y z%kjc@z8%ke^K0?)bAP3{Q*oeo{1^x zoJq;=4cW4*)SKkg(t2Zj^!6k1tH1EGvb|khWb*(8$g*Dn4%NHV^Or0T_U5B31wcaBAM#l z8955*1CW?_PIl~y>KWVM^n35bQ%`?4{`BjA9PgaD>hD0mR7zK=!^dl}O8|X7@K{H& zJ<8$%IG=jF^Nu^>=YH*5{c06_m@k_QS)N3QrBU7q`Y*vsg6 z_?uq>_&v2H|IlSh4p)#iV4IjmBJhM6{i97K9}gdaxN2k6uiKjBgzal$@CUt<*NfV* zvrR%GZTX+#rRNM!{C$gvMAuZC0%U~b)vC$|N(~H12%vil@=qZPLIhinW1QFpfHv14 zUgnJda(_=OD`HbmDV9aMXOA9>ciuWFd#o)!@{z~liN~IdTMpe8dxrPM$mq0ez=e2! zWHMfP>%I8))33zw=bjT@v&wNXI(FX`kA3vx@%YC+5kK{Jz7%6)>+z@G{EPU5uRR@8 zGm}vq*dD#xdSh-{KEk}pJvAv~qZZ9w$Wmb{nqezjpCgB4OGwznisYd}sm^3KRMp0r zxY?Fuqw7Wl+)!C93+X2o&Fb{c=*SgrVP!1?67-LIvatByTQ1iY#5=-4^xI%SY<)@m z#6aOn+_rab{L;_-L_GMwe&==l%(=LF{*vU*xa$<(dv110{Iwco>6L%|+rJ&B1-`e9zVy=mpYyb zYI=DvTlM*2@S)`aFWx&H#mg$_=8))B!1cNX>gee`b?tV;Gi`@&^e9UhAwBqGnc1;%V!WD+ISK)CEX4nE4;9Q zbVyA3rZ%s8y;RYpOVpmSx-@>uhVLZjtA6Cq#7o*7E4$Og)~p;^JY*4m1h{bQ06NL} zNd7iNNjkGPhIZ_X-Fx=NUH9D^J0vTHcJ7IR9ov(gBwJ`?ayl-JjmEpD&c?;d*Cc;S zamSrU;?~>uN^f_GmTmU==A_$BzWr`|``h1(vu~b=*5MuUA8XMle{p?!E?Q)>w8?f^ z^dPZ(H0e?X%^9~?UX=YKy0G&UeiZ$QtcbxjqrP0nvX59i3;$6T7Wy)QVs&vLO40|* zlOs_*cP=*MzyIA|`=$8&$3GIM_56SN$NwbON8VJtE)jnte)ZS>LHxuQe>(bxdZMkp z5UmYuaq-gW`0i8RkGJ1F9&aBz9us5Zu_|351X2|U0X$CGszrVfXRolqh%AZ%cds)$S-j} zfOln3NM*vrd?lk;vO3^tr!W3f9kNR|I^OzX1-U{%d;--wqug!}v;pF)9C9*xdpd0! zuPjyUZ`btBVWex-T!b@=3YoZ@CR{hKRQ&60nu(6umH6^UMbDw8xOnTFX<2HoYHfJ2*8;M!jy^d_;}z1BwS_3 zT5hFuJn7qbfb#|u0QMhUTW@?YMF3&l=p%@N7lpOAluXVwFlFrD6c%On-aTOa7kqyZQ)uSgsThzfD2c(&$)=zN3L$R8ge zAc8xL3~(j@IE)&bl66?@cTZTz2$pXvxCt3yFp9*qtnlC*d7pRDAP1*782fL1SlH^y7sD_u$qv$1-R{8rSs5FVaMrMdvm zoY}lJt@5c#!7yWGSvd2JcT^24k}#_CIBW$<+QwDN3tk`|4TEiY7^FY|Q16_f%-M9} z7Jnxmod>1%4iDgWcD1|neR6yz&YeFW@11+jE%Btl3BiG;T}H6u3ZZC z+U1M!#?e=zGB;^wlr2k$_^jrrBDyH(b_ZeW&Z4JXM?K`!xEUga%nA2@XZ3;fkj3I!x z$)O~!aB%Y3cfw_DAOWo$NWv)@-vWd$3G4fR7~B{x#Z8bKt2vJ8lk)X*5`r#E*8V9o zKfia}{3poq%b{NG%;3HwA69>c`oxrk%MJDMVIY33^9E_;_iiHzK0FD9B@7)ac>w6Aci@R$G30pV z_-`r4RyyTvhIq&v^fd!Bx|41~hbBF$Sf-KXr(_2+zUfFs#(wmPkH@e4>d(jA!eYGm z{3~(k!bR^+GQ4X?+WC?%vvnB6=Hd5-h)(h#IWPc(tJhV=j>c~fU}}!$zya8I*F~Pt1}DH(bExocJ7b< zz8>ipL>`j|n1Xiv_mbmeEW-h=14$tGU<~rE-_`0fX~1b8|5>yAan#CSr7Q zB4%c0V@WaTWC|ek`8Wes70sV@7c92 z?zr`iXcfMr@4gYU=g!CS>~s`FuafB8ruu9aAIXfez2LT}XV$AFcdq{Bb0 zxP!(RwG4uQfNTOe;Ei%D8uhJ3bZThrjF4lxTA7RLq#UZHxmY)k=$T0*`o@kBHTWDl zR%JH`w>sZ9raED!@gjo))kS)T?Tl4%NaR?~)|O&oVI{`r7NSzyh)@6Ym*UCKe<4N` zcCi85r$OmAMpvmlO5!bM#ur-4p2SCoym?0WvWr7!N6GX!fAYO}_4U`|?71s3Gd`)Z zpod%1#J~2o7#bRg!?)fNyZ0ZEGua>8ckhvtCP%r2)o2CPd+NYjT%5CgwkAD7r#Qz{ zU}v#g#>J~weYQTwWzYtwPc^v2J2LFCp;@t46b}w;8;b3mJGf_m>_52AD+9U)`$YeO zD!i&+Ef!RMPsXf}CU%@niA+`S-*F;(JvV1GLb!GB*>P!8bKG?}>}G zmAR4`yFMB0n#C&zdJ7%%+Xp#26P>Ufs)-#LM z`0BU68)qd$?%lJ~w)f?W=VPe5Gup&s1@TR%=-DfI(k41_-Z<@}A(Pjpf%pmkq9z-$ z1~Xun3TJQGFK2Z@cIb+DlNFb9m8xV!hqtSnRy&!J%~~AT5%)au@#q*Djup1Em0aAh zf42ukimX1Bt&211fjH5=)ZU()&Nf(C`72ABTViwhCki81%x#7?$61G-kR=|Kr0@!{ zb=kqBp++KpP+1hNT3NOIo#eITq4;ffcHR?Um|QhK$3&y0SP;#ylNsD6V>J!^#CL7% z!oe!pl>VyZDL-Ta1K!xstaj(Sci@t2kTml!1j@H)a{2m}Xg7AtT_gR!1fH*g&mEz{* zMXKvL`}P5-Cz^-PuhjehCy>X>;lt?j9|Jezm(s^aaL2HSkn~l92dfKh9jPNpvX1^0 zYzg#(x7e0!#h>-Sw|?kbo{=B)si12a2=!U63QrCAU0aLVb@G*=F_V>Y`La&iMvo{w zt8AdR<&pT;zM%NqHjX|Zj@%B_9@E+X;GfDvn}Dyqf>3|m@uzm+>eH2YZWjPHfb zJ9aCiql-K@uoYAWbcFE`y=dp?DJBLWi^vhZiM`a(E*q<}U3$3N_^`lWY;w}Ov`{|N zMzXuFuQyAtL3Tt{NRYXdq;-{^%sQ?wmr7P5|f%l3w~{_>`v7a!`p|v z8oR%5Fm~)5h`!gWQFH7&wNSBR|OvM|=UynDBACGt5c}up=M06C?7TP;pC*{_XWFp7uv_!k~ zT7k);;t_bTNBSFi-Phd{KmM7|#lxTYXtav2#wX_D?UScta`LL}qh{Gz-CeydV_HZt z>D?B=d+#j=Vs>mIzWUWajkE8_-VxqR04S(kc6Z925e`gzZ4qyyD=8`yd0=I4nT{Tl z4erTndZu0h+r(}|Y6I3G$bR&H;(2}^*tY=HPxp0uT|j@J!_cN^7rH`E(o(hh`NjzV zN=C`CgUA-)Xj{Wqbho~vx)MI%sg{&2bYO*C^5pdnc_4h&Wyo-pvd_b%Y=l#0-sweQ zi>e3eg`5znfQzEzZ93R zUyO3^V5~|u7K&Z5rnKR)m38rBZP|Uz5;H1FT6qgz)Gc&fZ){a5rSBy}tIA8nRKcL7 z=r8`Yem02yqJ3UB+`E26L+XNi*NK8-`(R&quwh;8UVOhM`+ZIAo57?albBRjEse|K zx`fI@)zSXpS&fe$5_-gA7|crH?Cf~>YbrD4s2{{EIP|=apu%}@h|_yKbLKk zPu&rar*Nh_$UD8rUdDD~mkYN~l@t8A{{hJ3XZhwk7}e9Lr+-sEDl5Q_ty=-T>T+y>8=q4@+Yf_e zn|>HTyA%&z_O0qCmnFGD=Dt5cKJb?VHZ*Z;pXKj!!%a{hcqa`64e!W9>E!)Z>3xbz z%>zUs|4sW^^_AnGXIPGO#Ssoy2gzODqYwS4C~k|H?~>{x^!h94jeul{o8lNz|L*aC>eWoWIE2AyX2#cC#L3O z+m3-ad|-DB4)v-%(f5H3vJq!apHDjmy>-HU46#%Vj__0l-kpdsFA0EXVxpaFNLD6e zCxRzCeS`C=WN4$pY+yfN`w>U!Lo;>`z$RF#&`+}L$*xRZYFSu(%-ECfmcS##@-AK(UiNXHpk^ zR9#)&KEjYeb&m7H7weK8vTyIsxaYn*&Pd^u{Dq9pf zVw-HT5=SbC78Pti@m>py2c-WBrEbZ1(M~r0f^1ZD4Sfh+EUWrP=cXOh=x>6@L{qn= zY~VZd8z~vQ1a39P*jS(GevnE{V$6|sS z_BNAy@DCXL=LmqFjmhX-y%t}3;&2?^S%_5I6Zy@NAM7s#j&@{tc`! z5EPJ?%OMSY;-2VpJC}L&$^pDumBr;`|0R zt|l=AX?oiPWkOmIjE;SF0ASWPMqA#gY~EB_7TVB9X)_Z9`OYK*Y}Pc1Krd(_OaR~s zyley~!Rsv>klms<=2A+Nv=Ia%n($9&Jti<*P9=H62ep{!skF|Lzx}CI|i8h4ZUNsW#o680FK1>ThK7bz#xnJ~yxg;CnNqI9uPRbha9N|6!nb$`|lz zFDYE)oKjK-+~^0d<@Z~_`C~oS=?Fea$bL9%E!&N00eL5yHHg6BvirKRz4h66N$zBH zNa0Z|AD%zqQhk0f?;LF46Q4B6E0bHg$*wm;mX>J1WTRxr|M2`jh}&km@E`SO8l*QF zzCQ|5{sbc)XYkr^snafVuuOAmm2z#xVM~Yc7Tn-VAkV+f%NgC7X!D_9`h&-2Tp-0y z@6x@}&(iWco?ZWme*8A+pm=qfLL=U1`eSP*f}DOBj%m_YQvAqE&q9-sQBTk%n zCypOK?!n2=edg2g@O=+>1ts_|$slIicJN>oHJu@#xhz?V4qKXM0Di#}#g=PJK0=@} zzZi?vWj)Wk({*WXQFcOebd{!gL)z_o-r!v zpvTE!2bt6<&n^c=*A2Kb39LhT@}7?S8)=;ew`f;5WW>S16uD$eAbXotmXgwMmz|yJ ztV!kDSX+!P8I`@ombh*Ao|riKPE4MAHyWnqqPyIIWfCPB;taCUyqE!t3{Bh%OW9rU zJv_j+l6Hc!j;=HTQz6i~st@>jeVG}!>8$U{3_Ci^B!gd$D7s3JRL zE@}&`;G2(mIj?l)QKyI~^c?0hJl!UKW>Uzc!q3y$tX$Tcn|#*1Jh2!T&yKKiS~*>v zsYSEu@C!flH{#H}_r^k-Yz)~}H90D*_GN3nQfE4YAKX`zFZ?+-J?DCbFE}EJPW!2e z$ru}*j_03w*6ricrAx9O7K9JZSwjr<57;)@vwvUQtGxCd*dOhxLppJL2Zy42FdZet z7Ns^^RWRzztCRG~I~L7N`S=WW7{JBIZD{zAU5H4(Rk)~7s+COkZ2>yd*VO(f+nm}d zG-Zc{%jeF;n{uvSeeI=~m>dx=scnhAEwW)MOt=vbGMQmncIDXkOe{&Z;8LZ|&Bo~N zEX19+9E@8J9X3s{-&rNls&d1dwUtFXgOqIn8Mnp+k>=RGZBJadaw-1x@BgXluR(IA zCwe-%W8~WP=qyThh~8c0QgjQCvf}cieXx3wi8A6m^N!`B+BnWwReZao_S;hEj8)Yc z-7C@5##lhC%ITkzt;%j7*t}B}@lkVo-2LPy;+{``E-sE#Y&ZAq7>qr8cA2J_6rNlx zepc3Ep@M`2+xz3JF{lA$1rll3rqhGZUKG))*QRKIvjs{gO9wCBxwcNb+VAZVr?4 zeE$B!()cjQ(^Y*KAPn%M8V!A<#R^-@Q_ z4tcnn;pTk)x8x7!rJV}sUnwwHt9!O8#SZC^UbWpKTZqZ(yerDq=rllrQ)nkR5E@Xe%r?D-byyhDd?iT=SJ z;S(`2H5>C4$s*~2S=pD^GJOMmX=3I;kFeU19U++|IXhEX@Qyo7qA1e&t-L5RlL%W{ zRhE^8s7T+_4?(*bl#RDAHydrT>1Jg|uFQ?b^>ZiVEC23y;_CHtaZBG|Z0qatQB18c zJ9eCO4Ljy=BvP?hb{^;%R_0%m4LTwFW=*=OBJtOvDA+PzdiL^z`(I z4>~+~Z)9v#<*CH%!lERubcCt_{j1vc5j&i*&(`|gy&bV@&u;0#{^;uMiNV1^(XroY zF)^SlJDc5rT4f*E7L+{zZ&*jDZs?z)pN9#I^V73FU-$is@5c$@`quHc;>yT%*=GsZ z(HWugKKOJwM zIVt;SDSCxJ6FP=f_HsvOOi#?lTc=LRH(88L|5oqMQtT| z#r~be#^`J>DDO_$qASXN&3%XL!b!iRJ9EXymUGg>^c}#TE`VszxSnhSe!Sb9=u4kf z_MU4lNAqS6{q!C=P3L;r!awN_+K=l?4X7>Qn;qIY6#vOD{#@L0U@$P_Ca%uL+0&=u+WE8IA+8{M8Xt^> zMBo3;x8j8#d@E%Nq)O9OIjF8^vEZ?;bnz(FJQ7}fBb>_MNt;9hPbxYC>JpuAJ5j*S zF!?|U?bqq*Tb^Dnzwz>z*O{AB76CKTuiShKyc{jgL`Q}$9iQ~^;lj47q z25l z>0Oo=cx|bwdcJiKUJkB6qUk#QZ+bLcvtYJ^`3_`scV8q|KXH|=5Z%_~W3Y>}2isKl z^!b=im4CXXy4{2fvHEork{t~}^+RCg^dl&8L9~L6Xur0V>7TJPr$!_oKY%>lmm$dW z#+M;|Y5^=^kP9Qm8tiR(+@T)CDj`WYu6qesrPlBu-4q(c;Uy~z{&LN<=^YgiO( zcE?6{PwYN?B<_CbvAFH7d*Z;Yw@25|P|PgV;^gV~8rudDasC^i>Sgvpv8IFDQOYTdUc-OlvrMvjpeJ?qi^S)`1}9aFU3Gtmk0HK z>tFoym^t&F%8g!C-7k#@Cnk2Z#^F2fj4yxoi*e5bcgMpI+#fr4ZHt+isknCiN}N7@ zB3^&>jX3_+u{d{{e#$xN=c>dxyVF2d{M22Yn2Ts8+3*K0ojw%iPWVep2~>q718N? zZY-#|Db8voT2xQH8{^TnHX2`e^g!IUw-}=r-i+pzS@GC{njt%cLz~pU;00MKJMB+J z^0ft8>uUWgeMsAJ^Rb;pA4@6+J5m51`z1VOp-m}0=)A7>W_wk2L0K8Eb~yc@ob!e5 z78rV?xGai+mhui1P8CV|qC>0ve`HCM>L1^Rx}hHFdnY}Pj3PMskrw=lE@wgre82*3 zz+r{Arou=VS?V)Jlq!_~a%`W<3jbCKg;rTU ztPo_THz+g1h>oYmh7~p7tl}^%m4=ZQE@Y7k0|B!45LE9e8)4wp6i#8t&xIDZW-~!e zP+q|XCR|E0J*RN%-u6}L$Vj9iGXcOy05r%*Z0U}}w?0B`$N1QcDvX&Qi8c&o*9Smv zf>ars*SsQgKtH(ipkdhzV%owaYW!{h`r&-6%Axc93aj*1>|nV8GvLFwb>64CAsoVv z!G6wGrmz@m>8#H*o8I-Mva_llCwW=UEC%b+H#d`?W>Bj>P~~5J9osc zT|1(?yVv=!TL2x5ID6C?jgCnXO!h#qxJ*jZPQI?-430R7epVR05&r31WglIzDtx5a z81T>R28V^#bi(HhP5!KMV3vB1oH{x<*b>BBcgjIphb{_=0(}4c#d!VoW8Px0r%;SX zAAcn7edM7ix3u z50qyU@;Gh_Tf$|p6CS2}_TnbkBzayo0hR+mxojk4bpMf%=e;GJ48SYZW1@GeH}2E( z2O*CyUJ(xP-zGhito{IOsgF(G%N3FzGq@2qAWyrFdtUwwTz|gdFWJ_nV*Hx&q~mni+q1T| zNasrzpg)!*|2PkkZ_5h|_@@CQwnCquUy4d~+N;tQ7phUMD6I4rXKD@h4+UrEcXsu; zLsX(TILB;tqiWj(`QOY^T$M-S-W{mO?Nu377{vTASeX>SS?aJLdU084kY2UA>XlyT zp8BdDl>yrE!;$e%GM)VLPK8|VQa%P@F|O%Uf%fPy_<{J)-1VWlM?^BSnyrD`NY`H{)X_SRp(+S5diH^`pANA!Tw1c8&x{a}z6(=*(r1w-!r!4}UdF-=j}i z-M=Ipr&PA_`D!c(|9KhSkAC{oanF+vM@_hwdiy=-s?xeWGFaWy+bi3&%{+x3XEF&# zGF8Px?W%iLXO~KHf?HZ+T6+E5xpVREyYI&I^$GDK-HmBAwUYNBz~0?^ylc?5?K^zV ze48Bbw(c}x#YYXPUf>%B3)lh~zO|Dho7v|JW8W%G97&`}iGgYER9;vC{-zIsRqm{e zrtMLGOx#?U<*d>7;=~(o#f9_ltIdwZ27@3)>J7VJ^&&hc#}{HobgC-e(&CEvUN(u^ z0Jd^j_?FrlV%N}c96Yci`Z~MS{!6wOTN~H)tzbK?sxq*fQ=y|AbK)tsKHez@8=Lyl z$aMVnSN=3qV{u^LcJW1XRD>Jb0+%GqaOBJ4lMe9~J7M5LdV&kKI_E2kM(~STkZnA* zK09)&;yt!XTvIz;A}GJcwwROLX^@RIi;cP36vI1r#hs6SEQa>&k2BYlZ?WBj{(JZD zi>@xU#U|`H*@9vNbR%uQrNE9-8&O?M@&SH;$2krGOcd054AIam7edrhTT25W4Qiim z7Yg6q(dHc%M#jcsYH~*MPx5Xp9gT%7LM|ex=nQ3YC_5@JNtJrxcqn$yLgs9U_tkP^ zdM51_!7&b`!LeHGjJ2Y6z^)(+Vvr`g1}t+`T1=<~|8?Pqy}csm9J%42(oMRGACSFQ zZt2H;1{55~gZ{OU>B0}dmNY*UZjN_z_~h*KKB0vC_05ljd_bF(fdKNCyhMFJ1BuTz zF{lsBXIG#xs%jhh1|8^pVH8C=04$lz?XdMhV#R4-;6sp-&gi$bjM=@yrVdS&M?68A z`2c?o3>Z00fbYtyKG1&y2%ma>!e0+*a6|2i3$LX=k`1DKv#`cpVQtG8cZFA@qtgqp ze$&sUgq<$I;eg#7&ZM#$33%Ijo^E4c%Zw)5C( zW4h3!Ycw%G>vVv2Oqx*)Kcj&+A;+C`Z+@SBqfrVr8d)Vw8v3iCE|1D#UxoCY$7ULI zD-oJ&0#@z-Tl3jk_8&rr`ZfQ4bGRJfZ@#gUK$^VBge~+D6Bv8?TBD=8BRb37KC-B* zv(IOGw@G(;GNs}$IFF7%FXa=a(NoBlYAx@uPx!kI=&{4rz(x^xGQRL7ysVBWN=Qmt zOIJzWt+DGOHdJP3(5C}aVz)H4MWI|0UY(M`vcX!KB(Eg*=I3H!MtW*?E{ffq-U6QE z3bEx&UFDdNOq`gSj>)MR+g<2!wwYzHpA}c=d2BT%g3_l#{|4=WEzQ{#0Gk5*k*SrL zSu{-yiB|KI6KcM)F=of3Ao=;uD=)=2zVdsqp!~|>!`(x@(Jh-gklC^TWGZ+kmjZSO z_E9T?r>!v}eK*CVMwNd}b_P5%DP7PhUHH(Qx5TF(zF+dbHKxWUGkQ0RZ`Wh{_U*A} z&%Rhz-rI(ED81GgotTc-Uw=JLoq8|MU%Vb8akSp@WKnPj#ia7VC$<~-c3TkS=nn$*rd~* z!G~p$5)nh!NVzB;Y%Qk_UD2aOe8_}B>`X|`j#M2b`rt$>eQz%nWtTRHH@o8D`|gPc zAG$XV96Sh<#n|XnoH~0xmR6_j#~^FjWo!4YeY)R>>(@u)-Lt1v&k@~9bH|~BQJI{M zKmO{U#QD>wtPh2{?2Ygi?Hwu`yO&Y#*jnC6Q}khC4Yp-!aKz?rj z;$zwZ1(eirTW~L@-dXku@+*LdfQHC*#WGFZeiX)X10N21TdBdHPG3F%*Y!*;DVTo9 z^Q`@Ek<}#b^a#qRz6GFjBB1zM>5&il3rCKo;Ot`Lu*Dd7>&FBD{DzY3_Lc^lya!)>hsLY2Nfp}p5zgh&GrBZEeof$S1`3wl zXXW*??z!e=1JZr6MF7irLH-PEy$AC6dEcYQK1eUm%L{V&vf2KiH5TC3a85SI%aF?M zcA7&LHs?9gA?*}D%TM3ZHHUPczvDt)DNbI#t!ZpN1B@2{IbS%8>KoXMlb#bK9!p76 zhO`(je_y{QqGSLcr1Syjt!MmL>^IkW7M`R?k7mYa`ad#oqbp`LWF0!3qiK~Lc?X28~_p8-B%6mUcG zuSv42GBTp7UXR}1?)Z)W0 z?@%Kbw+zr9l&>t_oxn6rMno4XTmI!(VGA^w}}1HxLc#j$twAD z%tmSp9>mV-5WeM^+9X}K6{VE#^oUNSLE7%g`ZL!V%gM%bc;D14`)H#@Heibv!8Reg zJOP0A1og#NW?%U<4Q*mQ`T#d40QAIE^sHTv&pxs*?%Gw1>5+Hzeoj@kD1KW|nbxDW zQq{eZUB~VfPJ;Y6Bj)hHXO;oV+Q$Ge*FfPoPal2aT>?~|v|WGFK};G@V|N>zT=GDjwsaq$8yyN^wDjfg1jV$gB1^yqgZD98Makzmb1I4@axg8cL@8+ z(sG=5_q2Ye;`F6+ad7`0pLh3@KlY{AxqDa4PS3=Zv5|Q0g%{%o&mN7QzFs>ug>^nACdQ*8di4(ss7(~3qoW+XeLbcRvm#$Remu@z95WpNW?a)bK|N8Q z%@*|fps;i()(5pQ6a?@-!X@W(_zpuS@ngn&2p2st0fY7iAVdsEGYDW#g(rcOcAf__ zf%7&>`JJTF(^LAM8Mb-;AA~GDw~dt7jh$^7fQRHrj-1OaAV2@4V4N@Ixxtv$N6H~c z-=P_T)6!27Q~nJt?7#tzeoWsDIes@n%3t}=zB1to(EffTWOUn1Hxu%bgE@Wj@_%Sr zj(;Og9{BMT5dkz3q)yljD8HWhLduCol1;qkWxbApjCPr_<@x4%tD^;NNtd+Lep5Oh z23zsX>n6_!Ii$9YV@*Y3Xy8NwS-s^n%kd{YjO+9y`cYR+GNf`4ULWSH0Lg&Q@-m?O z`OZ&eg*WIV5txs1+L+0BPG#PT4Y<_LCUdzq^V|At{!=n9wJ#aXsMl#!%*VD%^~uGUr-&FgG2&k+OQuq0=JRo%-gY=OR{_e(K9tNZz7 z$-6Z0wsZS7$?HAVC8UqRx3DCgBx4`FP%4(C2YMyLq%W8tCwh=JD|gunm06gq!tp?= zer$cp7N*b{{Loi5rH8Ju-qb(WgXo7GKu1aP%H}{mB%6e7nY@jY=x$p@?T=MwW3o$ z*olXZ**v~y|B|*G5P*BzV=d`yIJWfXozK)`fFH{o_H($_R2Sm)HQI~tqn)-&CpNeD zxV|_W&9G*&6lOKsY9(o{({X@1bW@l9X`PrxmA6CG1%V{)z zvP&veW-4RH;Dm*o)rA-x9rrOAy?x!$<4*R~IQ8Z`@%CHC;_Wxzjj{33SW>wbRCk?< z)uC1hPj8nUGO$AqWp{gY_V=oNrJaRXg$G_~P@+Lje%dBbEn7cqVA;xg&j2p=5))u- z$(g1Nv8Z;0^E-8YRCe5KT)cQWE}S~$V~@tiWp}7v5fyE$b`-xBN*qJ5Xqr~59EXr3 zBmPoRyhn{An@ghM?%|=>wR1Rz2m56Q_N8_xyAHdanc@poIliJ#O?AU|vYdUlvfdcI z!^2{Q=6Lq$=i>KYKM{MC=FUC4J>WDkc~yMBBR6MY|N>hbPjBfLq~3p&wTl( zVsdE!pg>>0B~G5c5S_z=(LKB?4jw)zTv%bh8tonIq@Z@8ALk+Gwv=V6+MI1yaFI_@ z6u(&8=n*otF$o|4Lyu(+h88rVALY|uyFL*oPrfIbRik&PKRSw?s#4ist8+Wt9D{PJSqacg^9~{1(e}2sqxGDc13OD1QQV`b*g*+{QvZkw=!;geyW-tItK-IJU z(QOChspK*gs81TuJGM}E7_|+PdJ7%MHz)sJhr2d# zH1d(j|K*N$*)Odglph+}7CW}@a_4GmF%9ZtH!E2Fue<*jx zHqnR%uq3vbnVq$wiN9}z^o+&gpMucz zkh18H4;olI<33WVUvdc^`e zcri|&J|(kNwr}&QCpbV8`f*mpJFm%iIsVScIIvf~Lr=SpIA8~|ul)X3;+gkO#E@{I zTwOw>i=%#I-<4&nuyYf~3}I8l$2ODo4}QQ7vQH&`W+w-vT-pvl`3(NqwpOWi^|Lau zjMC3Gw;{E=oND$pEK^e2O3lwnHe~TtA8D&`wI|Zg0BvP?jWF9HFP&T_$F~BnxB8%T zsOJ*^@Z;H4qM=o?Np<7=U7pm3OMMlD1ATtpa#-@bg%zaH9UE=pT+wk=?Xx7iuq^wg zTXy10&mE8d=YR7*sA%};%D1^Q4&HNbJo4xh@$t`nIl6}TMX9sk1vuaM_6za+cfJua zR->W67#ls?qfb2E-q{!IBDE^|HMg|KqU3~kK~SCTkbgR;wtVOQ-SL@EJQ{m;HOa<~ zxHK8@!b@+&bFaM?*T-k#;>c7?SE@1(OP&OhJ3;l;Z7U$Kp5tr~h93J*WJ0c}^uI9Yg+UGC=Y%vGv%|g=6(ueAYTvw;Ny`7GN13{lb)%2)wl8|U3@v-gAek(tySr? zf~S0Lt#;m;>WjyXX5|-J&#+% z`JKu_8r7Q8tkNNlt}chaZ>F2WmfOvH zwNr;N?`;CCcH6BBbQO?? zgU9@REztytn1ktbgZ)7INp>KU^8B2S@Fy(>;dx$NS+lRcyo%phjjDV!j>zS@#BOcX zn)`(oS(ol&QGk4YPiWSA`pMDbNnGhV}_ zT#di=w|+Wq-8&Ss72+q|e(LT5Gas2pOF@5z~Y|K|8dU_&C`e6_^7d;7p_6Z*J zBdn<|43u4XP)>B3c`z}QJ}ye9m)zE8r>0}0Dmt{4Z4)j{PegTM#Jd{ArAzV9&;DHe z`hWf#F)=z4^D5i#{+oXp^Vi4Xw}0oK+cx>F-~8=(<;CyCxfh>Sx?7GvY3qr59(o|| zxb3iH{O;IyU~e2ccp!G}7>bTAjtmkWjG3`Zws!zXi)B|hihz1RR;wz!P?X6Q`mGn* zVnMR6QY*#Ti`V1KxpQ&!2d~7l&wf8Ys>!S(m<3@u}=l7Qo_jHpOz(WD7elk%eH=3c^Y9y@ByLCIHMwtLmetVLE!&N8)pj z?1?*em!mTFUbL;wC~w)wsy8M`un@%ZVO@Te=-{IZvNF*Y(7}|0FkVou_L9?6>Lgku z-_arHD!yT-U{|pKias+Wr*Yj7UG!UDRebTkLU=ry<04EWa6m373~77f8x|5GyL_fXb{P~rYv`P&H zOFU*xQ3q^WGB4+l0dTf+V@5f3xKdkphd@tHpJ>n&-96>#?dpy~TT_(UCCns`tG{#4+2c6z?N{c-E2YTY8A9*4kd*lJH*kj=7^7WCpc6B6PdiABKE;iVCER@r^ zykJH=c0NG(qs+=a*QGZG`nSc<_8rmZqX2r1Z^!|#lGtdDxtS^Bc=^J)cW;Q7q|wxX0}So%3O$ZDVeXiH4+O;h^@AAaiaSJN+14JJ=2lmZ~Eea(=46M>kos32RtunMw4(lIo$A!Y*E^&j5%&uJElVudxX4c zcT_4uwrfI07yYHy*fG?PvMskkS#2^(x(Gc{x(Lx~OV5-pPdr_>imVrd#u?0xEvG z7wys~gZY;BcSFTsbf>(5#52I=y98OiQcnPR=J|ifv(qsx{V3o@@1$!|z3`mT8X$h2 zHb8toZ@x=q6a54^|MD(RV-t6M&%SNO&+@OA6C8M^?)9@y6h7pc)hXZUw4&Xo0o}AZ z3kUt)`|pb%`_kv4S2F3fmtTqV=Pt#C%a^>B_s{?BzZ0MP!pD84*trw$#q+Pc;6eX8 z?z}D7rc0S*x`Xq$)}k^!>+Qf<5saQ@mlW8E31v7H;D@|kSgy(lT5u;MgIBCttxPRg zMz2cmU?24N560d-yR2IdqD$)9@S@fc8;<%#iV z#ZqKzQdMP3LyKp)ay&w;rwN#@W8nr4$Qkkn(6xy#^j%@3N)?6yeae9zlI_FxZVHZV zv9iRO`_taq+m4O^UE>ZPe$4I_kfu9>TIfWREKqu$T!5WNedr1~z{ed(axTFEc1*K` z>FkkBQ0j7r9-a0a4WJ5&RoNQr;-$tlIdpO!XVKeq28iddi*V{0u%@2Xip8UJF09A_ zNrp6aRue6%N^6lpa^bbg1O(_)Y?9;G5EsX0W3npSr_d0Of9z9n=4tbm{cI{b~Q_hhC?3lo=5;#zOy`9n5(dn&#FI{{; zPMJaWxS04js4H7lkf4^eS_MY^R>y0jCL%49LFd>giJ3EO@dZj;lLjMI-n> zb<(MhmEW3(iCqV6W@o2kV(gk%Ely8P`Nt~9nrO&8O;3_wb)w=nO73F6tV{n*&d!VM z$c!`>gn@19mwGKJFIN2R-?1~cZyOLTy8VsLQ;zO}Z(MejsTI4adBx`rGtp&Mb+qlk zVe$1+JpIDac>SHz(IcDaz~Lj}krvsP6EQP&U9?i!#NSK+D2g`=;*E|LwNG7HEy`*{ zbbPa97kZ6$zN+>^GhJ&|8&I2CS36r=T#aS%XSug8rbXvz;W$^T%2D4Ix8D6g+M&X0`8w&9(z`_Q4-d*Gm0RXi)T(JFbvgsirr?7r3}4^$<6o2{0TOr>2>etzy8 z6%Oz@CU12~d6m?*MHD0f^t&P+V8CLzy6P2;gy-C3Pg+pAi?fqanVghdTn^3(93LHv z%aro{O`uJ;iG7lN%&soTA=4+5_dv4kh)sBXcP-OG73>C-$OxbP{*(Rp&uJHCmA+~er0R#QNOB#UiE@*}Rk zCd3825I^uP+c|^m2**ITze7l6=ugg+@9<6HAwLVT9Pbp5FsdQ)-AWL{<}=CVsN@p> z$kW*f36~%*aCYAyyE6gw(yOdPSJG4XfJefKu;7=K!0-8v%p!gGCJUK?bYzCh*8DxN zMV=WRgrV+90dRWynO%L)^B3U`~l&%@x3gK%*-hoeC@=UL{K*BZQ{Xsx?BO8%H z^zU?b_1K>-$)D!DetcZEk?-#5jDE>wjw^u2QS~iM{;f)%N#9_5!4u#O9@Ht@oYI~- z+a5W?8MzkJ`gOBAPrLkeeY-j~79*0AO{GE%4h_Z7wr$bTFaKORoE@YlC10;zzaGoj zg{`b&Z;$TIUKK6TguV_IXs`gFN#C*iu+dl~LAmhNYtogSlHV;WwU`*a9wiyHy)tvy zvFU~H{#E?Jzxmg)&ni(Ay>H#OCrX7T*}?O%PQQe7{oLHV&$KTp{TjR}r7^R-7&H1l zCmUs1%KJ#nwI+-_W15!fl4K;p;e{BPg3+vQ*vA$Cuk zkj{Vis?blTpeh%NlLZIuon_l#E2?5_KXx%(mc7p)3%0BK zl~gBQkR#p5q*?eFJly}F@Ej$Ua|rf1_2qsdmBV$4jk&R6p2%%C+Iyj>dS`+k1O6?g zSeRSz*8lV;lsn3C_~71n_`ZkYAlt~Rj+bP&j9j0TPQ)jnKZRTyRgQABD6L-k3A0nP zaq85W7@ry!@AY~D-S&O^Vt#5azV+R|h&SIm5$*I3%5I{qbjkMXDV975ffpQI!^92B zPQPKYvsB*9hPHf>-3^aYHh2T`-hCMQk4$p^jKcCAARa$f$XSk0aw7lUn1zSp zJiC3Wp6pX7U277;30Hr2TjgD9>mO*JS$`-rO5c(%t_b@1CatK`OL)oh58B|H;QNp! z`ReEP4Ly{%Cuhi(rtD1&yo6;KCKIlGHcJFGU&rdRU zRRvyer0tgD&@D$|-@!f6Q(%SRMjYO|J^CbH7-(7%@77db?P|*_qTkefE#5kJFRZox&!mqPhot|Rdu2J|C^sARGPirStKvZ+<`a4s}PNvpt%Y*W>sbuf*^C z=5MJ=u<-!-?U`>5jj3NxconW5z!Lv~ZwvHM+hqX=cyWezj;l&&e~blFY2rBpSFMiE z&LS+HVggpVTyi_%=oR{O*;TI#A60Z`SC~>8@6(_e^??t^;7NFWEX|!C^x*0~$?QIr3I5Y7@i}}(9Eay6`5@z6 zPSR9bjnzbRJ*T%>y=PaOBdH+u^!$zXMxIUCgdU}Z4+Y;QxtMI{JSb&zxp?U`Q_(!8 zAr=I8Nbd0uFLGS9nfs0OaTD60J?Gu*RA9%3pa1JJTMzuqz*fxNs^sP!Q5$QD03m0~Pc2=`@C;vXKJtbX-w#H&YNiKU6@XzJ+H`+`K83Zgt33$lNt zOX=4^p3KT-kzt^Bt*%G6Q`j$?BqF4Bza%+L_(Ci%RBXSXuf1rK<53itm04K~YD?P<-#@7vrzK@^2-)O7Sm#^S_flpyXwT9eert_`&zS8zT9=m>hupwWfMoP(9aJOrrXn zQ2m~{I1=YBRpR2cYf+mX7wsj-RWBi%3&cw-PElH=&dz9)O@=+t)!i#nHS5|P?99l* zStbNl&^_W8{2BPArh2Dc$qJI6oZ1k)4WC+{%0^n3;Z>`qHbH+J^^70NbB(RyGiG;E zv|n_Obb=?U%AZEIyAO?aWjWcP_bL90c%BB~UC#8A8c}@uHWf^?(W?wtFhDISFRIn= z`J;dghT{u?0C_hu?kSr`I)NP%*VG2u8d&g=+SqzaqxCfDvCBe}d~209n$$L06{f3k zHU`#48*>kIFv@Is?tP1TkRtc!smInae(^#jjDjWU=IuhEZ zMdPB0Vd^MfOl)cJm>fq(7lq-iFDZzNgK5{1uS{ zTe^8cNsCTOAG=q^h2t9+h0DQhC*2ue_#hnzk?NgyjC<$uIfY4e$5<%!lRuM&Cts+1 zwh>hK=vPk^BBavG(*#odghS#lXed0^8(wYrNB`;JqYC{Xjm87=bcTtr4AN78w8ERr zol=HC>I7#6nnW#WCkDtE$H+raAxCF04X34rf&Vlsqd8-C^22CirvT29A8ZCjhhb5fyNfQ8skIKr~ zR-EV03cGZ+{+gX+fKFw$h#MUlw-e2ZPPSl0L6pi}?gVDW5{jza!A$FJIr{DH$i|?= zz~t-<0HabZM#`a}{OJTwM-MEoOdBui6}(bifm`AO&d^|%GWEGwm6Dc1rs2?W1)r|e z8sq%?7vk8P@5adJM9fQx_x5$h!QF@BuDkDu&wl3fGK5>xR;iWwn6FI6+wY!=7mmIt z2SE;C@2~_t&ccd2ET~_Udsz-QopLS3_UIcJh|Z2a5Bl%gyHCz+pUc8_*U`usV+?dP z$N2SYrp5C=cs53^T#W&RXT=v|A0wmJd`9r%;=BhA;9<6&ZI{xebEK=cJ1$V=#SFnT)@pB^oA$hP*=N>{fpYkaZ^k5Lu>FJx}x!p+`lBVj+ zPF|Kj@l;ynEgEwr8P`8FPyB2iR>Db6Koev=a$Y~OraZz0S;~)@`RE4jk>?y~!Jst8 z>`(sWPsW3fJnX@!s$}J#ee+v!`sAtj=*OOjAOG?f;=sOru{bvyU;D<_;lPV1ovl4i5Im-hI2HTRMiVYoUc#8LzGgR~d1l1Lg77v7#pqCA^cjH@1q0 z4(JyKUZFFyx7h)Kb3YS*JE~}@X#6BkNP~%Trj@RWrF7B`HmQ0xV%e)(RBx&?oHZ(d z4sC2193k`|+t)H1oZS$RztkCN)?`1C$I{Haw`3o^dMRc`FGrW~Khob5KlAY?qf-X& z!nF}QCrlS*SBN&@PMz8QT3a^ntE9&11{tNRdCHzO{7z?5YFFe>IWU0N4EGOu5PM>D z%)0-bV{fXCuf^o_RLrS582aQ)eX&;5;5l94U^E(%jdR_+Vn%c-%7!TyiqYAX&UtQz zo}y!8W7%zIW^P*anYRt+&I8f9MfJo46w_RFye`_ZWgvD7J2J8IZee*UmR4{$UEQ+L zg@fv?q`cXYVDHYIv2Ca?hWZASroxEls0SwFdLpBqE#vgDK{AMmJ&me^4%stn(0Eqz z;;Y|`Q!`7kuhbHIkK7hzIs6Q2j9$B}HnI|3Dq}(X$AC~-#!^ZA)6rZM4V12XcWB2V zh`?=JdC5wZohJDu`z1slR(DlYwpO*bD>Ie2rhJP%ebF~K93TDckE=c#;`F%-F($s= zws&tl_K_!I`|h0{46?424dtX&E_@zjuJcpI$Q#0V#gv{~kS}_R6WJ`EfrW6?7xWQ-2H?0EHN)Rmf#v-c$l^6{L;;{@%C?r# zKaa;+67S@91I_}M3&`TT#^FT#Ov0x4;FQw`@B{^}rYUi9J%SFA`~r_mH>D4$-SM24 zl<##KAq$*-vIqUDUhkw)k45J7I^&*fwvt*RRK+auM^7pL+}9vp!kIRe@=F06$yd%{ z<_z5QK3%o~Sy4`a^_AYtqZt z$3~*PuRnGjV1*dR?6kYnm=4;p(Q)ZsU1w%J(QfCiebL5>eA$O{vlTCDD7UkQSjAVn zWM@HEa3QJOV%!| zZ&%9;d0zV0C|XPBb_h4oovnJ&bFGDD>vF;^GT9P8)p`)llXjHsKt7}IEJvs$_?R}P z=hQ}$4FX;Zi?gyTWY3gKseRQXXPAt*NVsNizq);RAnv^5mbmloyW;SzcLzr;v6346 zFOFP~(a90j!CG|4uRuEN*|j6uyNYf@moHzAx8Hp$W~S%k;GqMuCz*iK6XUZZ@q54b zdvR=XDh^{qiSEl4+3d16ddh9lMPDBFvg{061Xr&h6W+F6d8_~Vz#Mijwk54lf3mif z8cUK#DmyaWAJaRxZ|pTSFzY-)E>7zUozNkQDhpf>N!Qt~7mb}Rv?dbol$|K2IQo{! zDn|zD1OHqpQU09nofh~iE^!#5Kn{_<3w|I2(7|c3V9BxlhMO9(^MA9o`eOR~F*R)eG^JuYEhd z`pv(Jx%V$eERH4gO6^t+4V`HKy4=m~Zk_J$!cS0bh}Q>VpnrSxh(`zedSi#|@lSp3 zN!zyLmoCSfM_-9cXHP^=e_tF>`#*5&EitrfSBy^A;`D`)_}bIokLTVv6$=ut8*AeE z-T~2(#Wm=ZR9^sfY(UpgchE~@wA`h7^vA0OA?11{npAg7voq0ME=0T9&7SQ8@ug2a z9*^9Ahe)y!bK|QqK5{vJ@a!}G*49~w&bE&D%Wr%w{{3(L4=JykfC8ZrVjpb{J(s7VVJTMA}q$!rW=)`uIN> z@@P5KcaRb>4Di|J6@6lMlT3h)$OeAyf1%xprzsEpDZHa?5y9^i&ipDsPSH1>CJd5Z zny5qnne9aEkWAmXzP4Nwq;?2iq{)4HM%U)J1H4Pm&WA86iuI>%(?Ws_^et_>y!p%9 zp>;Cv2<~!{ZkApKX(hVKNHlPA*!0aZgrK?tCIiHzP15FVBOnV>|EGa`wxE{-dAi?7 z^rIgl(VTt_>OQ9nVc>V<`j$A#hiil60in`6Bj!43YDx<+{LIz32?_nUrf+$AAg9Es zV&19Ev2&d3T;QE<=_?Qdj1@J>R^SL}{3?RLwS>%zm{L}me7zZRKK$z-CwLAi9?hH| z)*XYW>E_~W05WAC1gqp!W=1)FSj|B*)@j@>&4Wy4gX*jb9{nfW+=?45Y=sy-wM*8OBwtIVU`HSN zxK^tRQKf!l<1t=U2a13mbe*e=9nwLHBS9GrvIASY`l4Mjw0B_8b`kQkP?8K*JENZk zzneu(mS@6;BluRtl-jSdM)uAZQ3%>?Faj6Mc^*=kV@XdXBY7tB-asMPtL*Gdb$VOy_@JXO6>EgeTC~cFOfc1J6 zG$VgiAmobdyT-P5;U;@gb-Huseg&v0$fC+z6TXt`vYS?fyVGfAa$cLu-^2pgjiu;q znvI^7$@u&uyW)WZU9mWMMmFe_SU(+wD0AO*%XI{}c!ynVV#-Ikb=}ZA>jq_otU_LR zak%J=OlwnpqB9v^M&|f<@=T86uY)UoCW|IkYD@q~l7zGxu&fk^(5W6I4@D{#%-BrI z=trK6$wkY&`QGIufJ)6|4%#hwk_JC$>TF3HTI+dLbX}J}ZW~X}?!y=Ej3+r8(VDpK zr_Xeok1g0peQuTy?sG<0f^V7_;d0dL*$=)mT~$~44kya#)rNoc>qm|X@dxu&_$4_h z7&q>~mN5rvMnaX57bX!utvqrCDSa~l@0FPgjB!cD2tg(QP%*5SX~hA`DvL^z?-9;B z8;G>AVmx5s0Q@_*JzE25MRDPFm?XF@h^}O-e)+$~QL>m__$|1K8Hhc9mIa7@ZiacBD&P?$kz@(|NIu0uis0CxQmX^^7=$@R|NfbC!JBsu}zn*W{2c$Heqh zupRW=G^;_yYie*f*c~06@#sSj$DQ2>_cshOaOr$)`dOmrF>n7q(=cX$> zLfc3NChv3p#)vhy_?qXTL|q2b1}HxUZl0Iv0d6w!kPUv9g#~haavW}kln!|%ndAly zsE+FNP!fc5x|sl{CQ?3$e>@|N=RAG}DNahukkQ-d=)GqI>S2vpi~c~{NH`JKSAQUm zZcIROS3I)Qa|)XR{NdQSW7sRlIk$REP7P;tA^X}S+dS(~ z`iltwt1^lb2~f$zC_uV4m{k=uud+|fxVC!Ivd7bvsN zs(xr8-ldH0RF|yq>{!($w1bpDLu0GxqqgdeVS~O?74)0i#w^9Kt;LO+Ke(a%812* zWH%*|x~@|ZKPjAKwrEJ&Y&F-|EQgBl1v(VvFvvz+mlH{V2JyYB z*ah$lGwIU|d)X_#7mbtwGk>um*zE!HW_fMdE64!Oudd1QU2Q4E{6*_eE*2%d@|yg8+k^z*L-M{H2H_dM{BZOwBhPkX{dn|PgxEu6pGuC~`$+lWrt)+M!DCQiWTboj$V zurd=e-Dw2hstgUP!==^5Skm)?>bGZTS4>LIjEnY_wdUA+_?CF`bDxg$=Pt*Hod0$4 zY>LCS?mQEKVyg+1wCCK#u%m z&8pq=wmjsZDA{GQ($+ZYRSyE3!GTn{nYnJX!c4QqKkI#9vBUre?6YA#8X*a256Mkd%{^D-GvUd9l(3}?cmvc%4R5avZK#%?A^~XQXTg5TT7iWJ~&8ko+N1*ts<+9jm!treVw>X9xml2{@tv=KExz{qzZYxsb1~T6 z9s`~2p3H~OkKM~IMr>Gvo{?~mW%1-jp~YM9FGzPUEUiREx`(5Suy4xJSHnu*%;Wn*gV!# z*dOu-=+k0lB6*}fcV~ATIdn_ha@(!3ZTo=3u1HR?k>Onj;_`)y@s&UR!;lkTXpKU%YDYE@ZLW>MCkickU<1&ZOHsj-J~XbWP07wz-C@tjNznhAnpLJt^rI~r zRQ~jw(x4rrXGuD>N4G;gV~3|k!cVcS>-1d>C4EofSa?Hh58w$A2SZMgrrS3^64TWe zrI*RM8-doQFqw>F!WYI2{c)Tp%j$n<%mf=`Cwzhb17EovDE|=M8bl+GRoiF~-?eu} z`#^uRF?na8H{QQG5go(5vG?A))UH}%X|5WlPo0ZzKlMVq^wLrJjqEz4blbaikzHLb zdPO?B$>v+b(?^J%4G*=djlhRY)@&E zWAdxox_aWst+&OKpZ|Q62lvF)nOeMd_Hz8mx4s?Aa~n|{7>pGbeTmobNuf=WwYsBX zERU(4Oa`mIeI$;E&#_b-7f~p%Tf{~*%l;c?u&ARY9=iYT_{lFmF55{qyZoV%OIPCQ z?>!rxV!wfH1CoVpas1fP_@Do8|E1}GPA#geEI`7}XFF_GcXo)s;l(x8FZbKG4aFmm z$acN^-Z*&RklK8w+9-GfRQeAG<18NYpoeT~2CyjN#(JA=c-maOPXgvb?O1Ry`yoJ_gj@cc@=y2a<<={95Mk1lcFa3}P4}Rdr3HTI zdM^aw$9LhBf471Z@SOM0f;ahNLm=lp5S)SWBuXmUmMi(@X{pSj2Wch0Dlc1JKPml` zR}LhS;%_db%Icw^lHNN^0!%^dfCQWG0n(*9r1^ZGY#reQ9UMpUN9`A=w?Q0ff$;I;cFV5NiTJ!nM*+?W+>vpjt2jV)fmD<~%b+;4T}`qVeMrKou9Uvo zC-L#c8SiM=XxBTdbB`TEHkk^l8{O+i-#W)zE>z~dMLrYX@qH&ICSq*-T3o$)Ip*hP zV@++cIy>inGV5WA4*!ylGG3Pdh~GZHkj5&OtHMIEv9WC<4jj5GzW8^3Ci-{miP?p< zcUlMh8lTRG-umg1Q||C2cO z%nPwNGb4M2ov72~D0YfmUQ(LMJNZJY(`<}OL^R&iHoyX7^u5bckk2mPdPxzJJN0pO`!s>MT4$9XhVd~4FuSRt*dRsPt z8|l!fl#i;({adft-w@6sm(x`JV&CNxKP{()4~x$Lp6QoHdIFRSo7;sXY(^s>l_8bG z3k_5j%F6@*@bb5fhF2T@%ip-=Xv0R+gAj-nN2~xUszG3IIRgbokugx5JN7sis)H

Y?6So^p#FtO zf>f6hBGeu6IOD8QKemrXA+&e*Mc=?sY~Q{s2DfdG{=Q*5@^rd_KubY9tE#4q6*<>0 zzxaGiO=h#2RX62g!HV|mnNwm7>NL%GWNWRSo}SpTW4Fq(5_7Zj@$zf0$HnoP=<8vd zt>)ONJUN5=SAOl+V&A^~F@*vZji?udQ7+`bojR})1Tq5ipanx}EBd5+@gB_2ck!p& zMb@T)#J5SfOj8a!IgC27nC4C^MdDK_!L(_{(x|B{u%H+waFwW(z_JEN$dL! zKPxkE(NFm?KnI6g8LTFH@T)5ahf6zSu#F?vydCLQ-)W<4o2&P0=}huGFVf(eWJ0=6 z&%{glBta|QbN+lwd8hQM^}N!%^bI+X;^*aK0AX3YP6u2{gCB;I?Jw|&kBO)BWG*5ZtK%Jx;XujoH@i)IEzG1KsTGZbg z5-z!;xCtq}RNtw+Q9hjS8vq$eP|M0x9w6UBIPnbcF`z{M^=iHRIatmrJoXZ4aoq}e z`JImF3FPV0A)fAYd7ZAt`G&B_anjlxATT&A0ByE}&+u^B1m-WUs^?rcna1c*-4fIM z#|`f@x;h`VD-=628h**k?*Zw|@Jlnf)kb|KKc%a945qpx&%Bkw(#fC*?qK=MCqENk z`tp~&I`+)lC*t)tj>j8sy)AivE&lO8`p5C<&wVP6zWj2$f97<&@#bqWKfM(9+bX9FJ=BB1B=aX)Lw`HWtaAp7;odNGIR?%zvE!X@^*)XoziVglnb}v+x zVzE+@CDP)VhcRc0zI{=_Qx6Nlnt75P1F?0n)029n3c-pheB|M zIeCyTde_@UHDz0|W$lg5B*OD-gR(JO0k3E9$tNx%(+Q9KWnkR&)wBLcCcx{o@ztnQ z7CnfK!M->vM?yLZhY4q?+$O!Cy6Bayw70J-K6c0L(ItJqGBxR~XPI2VIn3mTQ3%dW znY@l{OXs|8!6TL$aGhDFtlc$2WkrAlY9 z=(ji*YfJ)=Gg;NugWZywbc9!Cm!heawonVs5l%C&#pf%`vRA&!bBW0(a-f$9BXYDB zJ4LRxn3L?8sIA1+srk6HG9RsK)A!&1Kz#f&pNyS{4|=P(s_fD6xv985Jr&E;ubjv2 zyLZH{y?bn*%r7zk1U(vJRq}jsL5}|9RLqP|##(h%xK`4R4f0C*yZfW7)M*<7UcGSb zdR)2we%cXWuHuz@bF-QzFlz`Om(;ppq{iLUl?aQ5-yJQFWv_bGAc4B)8H z?T;NK^pZFKg_7olBF&dkSmzxQ06eE(`}$hK#t-8~OK>Q#{EE?$YnxoP846fe+y z)uwWmm2^Q)R8jR`!g*9V*lreeVLnwox)V!0BZ;g06{fW$|7krgjZZ~mxi{t%c64D* z4t#Ii_pvA9z@aD3fB4QBAF>4(l{me)%)) zK6f~2H>I{_=4-m1=qTZwx{dYac>DF&HD%A;+^s>w|B`OE10K|^+Ln)5!=BsUGkPyGRS+{ov#^CXS5}X z1nz+KqVM%VkQ~A-u*qH@4vKnXkd%%cD*<*vmemaa_a@NCEPrUrFUct4^L{fOY)n4k z&v5lXi|jT!?fh9ZNt{gtUWg9j4?2SoX_Ql;{n=@#p6m6i?{pgAxH}na)a)COcT6T5 zApf@c3?QTUZJIjZN5V<>6OI+``G9i{`J-{<&rOi(2iZ;7dg6wJl|b)r{LUAF@J?Z} z=L}4L6H%P!JcX;6nDV6O6n;bcF9+WHlfWzWeV5s5CYk`c-;h_jCv@uQ$UyPIRGv%r zthNO1_}>b~mWBq?U7&CE{s#X|_yD9^C#k}3^|y()73y`GeTUb!(tq;|-8`4%?iNALv>S%3cFfJ5xH~0GdE$zmw+)+=NdKOyH#-r4Hn0gs?wTItC`_HHydirnDrD z$S8fH{b&50 z;XO4otvVcyaoN4BDD3R+@L(_tVmJc9tMNobcI15X-Q#iPn)JKc$KGw*WBcCyQEDm2 zv#&fKzxU@~x9zh-2sf$xZkH{MF<30e&VjzD$e&pe3>*|iuU~i0G8H?{c{X+cqzyG6{zjQe< zQn@QSdIy75>n#PvlfTB!R`eydHm!<>L}$^vCYy70Wl6l#7%j55Sl!-B8&!OE!~*9< z*^{$V<1sTfqHrr}gB>xjZ7AM*?^K+=a4{wpYjOGdRP4R|-uSz}@+~y^N{0p%rn{9BoD|$Ny;xE7X)%dsn;=eNusSA$F z>g(%`viOq8E-kWw3+=_Y9@wuV^GC@ zD$MD2KpI@(TYbYog_=3Nbxn4cN~Ldkm^@$WDSb}?X_KVY4gg+Q#s6!X3O5r0_tBlghg_etw_hmhjDRl|RmNP6zTy z>1X(I=jSY4JTE;HWmWCOlUK6uIk>4yj->Yq2L=&SKB-@X{!ez4O?Ik7@kjTnJFdzn z-6Xt>pVOu8xu!|c>l@W%H-KA43g0_$y3en?BM5y`=vLBRQw5L?{WrGTb_Fm8D?nAKw{w-+627Kd?vsJm=I)*2qu4 zdTlJ;c>T?I>U-ahnRBv9q?cKQyCNG##*YWYiySl5l#buSc4!d|)+8_1mPJ!|1U^+^ z?b}r4M+>*cj-upN8OC&&)#{RNtHG{;Jzbr#B%NNJn80z4kj_5%v%eUB@1OmXxH38( zU;XM=QKXV>G8|KN|~+3$Zhsu!<_XtNMG z8oP?IQI*X?UsV-qX29;uvO$as0G_HJ7O1dG;088{?267^JEPp&EBjG4r07{H_gDt8 z(-J#0EJ?>>%cNrn#hao5{ZFj4$IfBbaQKUQg~u5Gf-ZU6*Vc=War}SkwWz~=D2A9`{b_TRZ^cz2e z!r17it`e@aN9xsak%QUTD&Pf1&8mo|bS&l0eTSm9yci9&Ic1Y<#uc@XRr$Uw@M&bM zS2Cipp%9JBb5Uqmj^2jp=-Zfx&ps%-?@(VXO`eMK`W0car0k@ZB$F6llvFpbk|uiB zvOstu8@&rwHnw6ps`4_xjeSJ>WJgdYn7R$?J!8<&-}=Ec5hWp_@LUydtgT_2Dvb0# zJB`ySDJ%2^99Fd8N`DLsC=zXxZd~73u$@NU&c{)e9(EE}z2Hh+AdUz_T25EM#7=;| zT!({iRL^-h*CBbS{&ShOk@ewvk}BoHr?01*XvcHHi8@c;99Nv>F#-_f2!Q|gAKiUa zl=1`sJA};=tZhXb0SE=~Au_X7NEC2Jstm4qM@?1^8R8fgsZw&JNUK!>q_s_zg5Xs? zY+>Fa$w(gFa!Uyio{C`bKjkGv1*9RWm_Tg7kQ=R(L83I>D~aB(S^YpW~+M}6ok2e zE&W(+)=XIuKnObtD&sEWbJg3T=zc{dR=E5q1Fv90xCs)y65p%~H=l`tW5#<#pm?QG z>9-Z-u(8@-%4)A1zWeUEHx3`UO^$Y3a7@63i&v9^mGWU?!o<{^x8P^x)#zy2x|daM z3_z?S0O&1O`t9wVR`{&$L*X&2zQ8uoo!znLzyUd`yJf7G?D#cgj#7h!0|Q&I-&HBK z=bwJoM-I>tixT1e85H}a3qHq>PIdBNcLH_**tTtFEJ$&kI{SXSbK<>NtZhW!K)3k0 zDURH7DE`?${jcNBd+)L0zI^GbjQP}IYyMX0JWwph7+liKDZW#u@Pw`qLVy7X=TV_t z71V5Ny7H{L0N`Kqq5$EMWD;~w9e3_hRagrvC6H!L>3!Dzb3;7+pwH$rJm_c+$31n8 zQKl4}S4z|0g$`l#(a-)QJ|!RCdlwM+0q0F0`6+n{w!K*x+ zPWH{S>jdc=@lqbNGk(;&zlks1_LZk+nyK7=?|h*R*`Pl8mT1L~F#O2N_vEQ$FpBEm z2#Xvey!a)(2PYJ2N`r|Jbeb_}!Ml7y1T)xjJb8};lZOXZ*S1cGNQ1iEuj;#x4xv1{ zhnM_qgVh8w8kA;=CquzkC+GJ7Y49G$51LWuyn}`=i^`Yk4NlP&-r}3$Y{r8Q3hKJh zSE_~jGNcTlSRT*M!l-$4BIPycirI6M z>&9{vRMwB&a%&98kY1afiK3W|b0nMP0GcnPqbyHZfu%M9`{le1UmGVF1Vp@-r(G^Z8X&^v8)rf?Xg z%8V67@F(blHN{IVp+82Yy=eLpkXUNm15k1??Yyr7e)mv%6cO+yF!+FDaA71Fk)(~5U_aw;a~ z=3-9ey+`)My${|OyAB_aLtKiPs`PuY7^8F3KCib`@tCa8)zuT_u6FBWXv~BV&c(;x zVw?SinVG0e&BXkSXt}fyjjB6r0_fi*yP_%_t1E1$pZI}}_=SaKZygA(=}f}3ZJ;N@ zsy!h;;qeCL2mdu~V56$-vvN??3Xci{suw9H`trk`6-^Z$+qRkd6t9caF)&b$y>f!L zdCSmF;lCkYo}-^=i)x@MOR2M<^peA~B0Sp)qN!xXg7||$hnb4Xrn)P2bj8)HSK~X+ zJQFi>E77NT#jcKc;QmMA>c~i(JbTXc=h!c3Q5K&RqWemr&_I2`Z3cQ&p~PDiI5{2|G- z!|~)NKcTcZu0%3MZHIWP zy!}?pPRc)6UWneF4kf!0m(HJ$Kl#HyigWM3?R?6GE|GH4Q{-rL^Lx(0xFv1U%Cg};ebpKA0QkW znk29EK2s}tpJWHHj)UT_2`o42DH9I;O}(TyDPhSMU7%wM4^4H+I}Wq-(09-;@9^Ek zAf@cIx8RN4l0a7&U03w4{YB9_{|psKi#DI?GJCJY5>BK};{bQ?ltYjF%)$V+_moEp zD+>UgC12}^FW^y`@A>7;a_k0Fb1 zdJCXEkTfN*8E-4ppJM2Q{Ixk>n^*XBK4}a<2 z@4-IF^BjlN0*RN?J*W4L<#HPEiLNOo-)YZTUF3MW9H}n!QRSt-Aw8Rhd3gO?9@{f| zr{JJK+ou^z?6_svuhEsf_!rp`00!qldb(ATl-L9NSy{mL22!WjhB~>kcfy9!7$;be zMIQLP5C@&T@lGP$$83@+UTnE9kl~Js>(;nc2hqoisWg~6d?(MEQ zvW7u?Ce|~MiC&tJ?NpU6=IDUi@4nmi?cDNubPjBfisbIak+B#Vn~Wvdr5y3c?o!x+ zOcJmxZ7Il(lX~B?n5qk)@>69sAv0B{l(Ys!rjnwR9!yVnsHHbS(oPsm}xbNPSQ%_`BGMs$0EJeFn-` zx(j=p0pp@*+27w2hYlQw{kwNXf8U^OUOJN7no2QpZQQmH_5_3fbfVW*7Oe;I8&;%K zTIgt(Z&7B^iSoE6I$yhXH5RHf(cYpuAKo54-Gkn3;E(_08}Z_)cS6>kCquTgs!_CP z6;0YptwhFvSdCkkpn{wqxFk*YbkF=Y*46rXJ ze+QXCUl{M)niNbl0GezBg375X{OEjlG6)cNimrF_s;4#S|Kh~j+)7JOm zWx%!~I2cFe#ek0Li*t0b?|6sZNPS}SdpB2=6}u=M+0`0_VxLTS(Npi%OND6b?ekF< zmuJV~&%XMdxS~25+%^!~cOHnHdv-@>M^ChgAF675Rkg$Q^%m37KAqaiT5ZmKh-yM1OBb+;wDM{Me@-ksn+Ze}!%Tr)3K_$dB2(Z-??} zj$hNe7kqgtufdSkIS!DJtBWT`Q*pr$A0X`;_z*^$S2>dG-Vf+#ftoj zH4h|VlMqjyo7y;iv8f*hJZ)D=j^eu!f-~pWHWHs=>-wA86uh%8K|}ol#KXU^q?LT8 z?D>GO^_bq#7T`zpp}(^+I()7=Q}lZNfMtP7ozj7RdZ}x;Y4~)X{@ZK!6(`aaq2(O1$&w}dAPgR+MoNBlm#^KicQto*6=xgd41H&D?b)GAx^-Md*_(h z4|)EuGkGF^oF@H{rf;H0o=={BZpY;H>xsch-yM(Oq?`N=UiBe}X5U3(73r+(?@W7mOOV`6kX&PXP| z{K_kI1dIOVICx}t3~V2aJMX?Tw)gjXH<{~K&c`YF_9IuX`Pf5jph6SlQkB$y$kEiQ z&<4>M+t>9d{Mt)J>5XC(yJSOPo5)sd>Flr#w#LFM;kc&q&Ck*QHKRIQ742!?>#|>2 zFe{y|u-;~0|DXw5!>hz*Rqysa?E4X(&NXhxYF zR(0)f)MMs<^6XNHmhRVs9?|e>!~g4_+;_C8x%oj10~>oyG`vEzwqdSFpy|C6RRPIN zWiapx#_)N80t+q^D=7p^Jl?BPaRSmLfNYiq4M*==TUBY!lgT+kRB@;v7tS49n~)cl zc*d1y0%T_pxg*JpsxVt70C3%eEN#W}jsdz^-;e_=g#go1V$KBgHZYAc@*6v%x>kto z+YiaX*c;;`<1T+QGpz-5;>AW=YFjz+u@-4ab%x@qXBvJ+M;Z*kYCgb>k;UJj+=PcO;2@%Rd92l9I+z50U5Dl6i4i}F7z(8LN4)n(0(6G0i<$T&hhYqP28&R2C zjJf&wn47IeWvL<>G~2PD+zehFJ^FH7ymHN}-S!(~d6ldQ%6`k$kp_=HABU@XH zQ>V^&yPw{!ZV$k9vb78y6}|mRdmzr7e?MM+^^F)`tcnBIV|eF~71ifI|JnGb|Mb6d zo}Bw~{rYv&&}~w5Vt|!_z$8!*4z#~y5HnK^rDR#B_6W};UP$dCDZ3PB6L?DPDM1Pk z-%vNyF?5B$)OwSEZA*#+VdHQlr~C-PyVOqkuAk#`)pycB4$vH6#f|Kq4#G^`{VL?r`_H2Laugin5kA4sJng}Z(!AD$ELJg0X_Rsfst66E*f z!)n&_l)}41+Ts1O9=4vpu;L>$A#%m5P@Rn-^|FQ9ZXt@7SNQTD;($DeA zp^je;S-#K#_}`5GenEtdMAa`78cfRV9WO0(^7}4-LJ6ELkn)C;46~3h72VTnK z*W@HHaL zNS^dp8n{L0Fj0X#DF|o{4Ewb+>uyrPe-Pd+u0V2S)Jlcg+WE)a8YmM zjUfjOeTKSh4_0D1FnEf-7)VLJWHJhaPIcg|mQ)Uohhf%jo*s~{I654i!yv942zH&3 zK3u3&f-PP-?}?R0$N}^jTh12cXmLDFxrM>xq9p2iw8@ER5C%GqA_LGBW`A?!k;<%p$ZPZ-U_+REq@#;8q0>ES#&J?a1`B9Ajp~%XTerR%yE(BGW=x(7}vshp*U<4hM8`mK_xqVs>&S z#;%Qu|7TQ}4Ct;nP1~;Um}-aqBxnw;;d{#m*H2neS6xn9zSBvL16f;?Ers1Cy~jCi zOrYYsl1m~;RRG%4+`w2HKO&1I_aZjo1mP{U`a0ecC1-ht@UOHO6H#cB)M6#ctcr$gh^7WS5#3yM)Y& zkCzr?%Ly6s-wNPIXBIMQO%5LInF$H-ApVDWQ9O_bMh4O^VbE(gi8WfZ?%_SB$P+BX zeMWPo!&dJz^k#Wl&^f69bU=I9c1z^&{~NHy8GYf#Sg@ zjZDPQBiMUzgdez(4rym{5qXCl?OT6Jd820?0@{JMa5Shq>3ddAl_w3x^GmoUen}HU z&<)788)&2Oezp&oQ<8W3rfVMN25^1kVYk9f;RLQ{1}_9o3#boZ=hu{l&yV(lpPrqxei9+sGmOErY7aRDZA@Fg zz5(i!7gYAm?}-k1c@r(SLZU0_gM&LKGa8c~`Vv$a%wdo#pOCN>|CA;=Q;5SS84rm*@p>?=Tt7NgK|t;f=s?_d z+ikID`|jxM>xgBQb82!rs+DTY%APrL$8B*~zI>&&79E2-f_1zXu3V1u*T*DtIS!{S zcI@62ooq4R*5S@rJw=P@@v5{(bnD|n}n5NNw;#00h0`u&@*~h zm7T$&lR4Q69U}A{yZ6P%9(gc!4iClLOj^Ot?itN8G#IGZ(3OGnj(%23m)xJiw&mzE zFSL=ZX1`T!0v1{o1?W(%R9WH16QI`y+g5W+cr%3cpPMjYjlw|}D`AS~(A&ONayYyV)H0;E1V z7>f9W_v`Gcf~9K)#O25X!V{(;TL9s786lFPPM@shjPu5DPdIQCfG4lWUu+OB70c~W z?&|kW9LAqU0?6w1(V@k{MhvR`4vLprMI%=8As@lDy`>l(z1!lpTMoyb0|!aeurW909YCVB zTTkmTIXx4<{qKH1zVW>u#Cx(or`LL-Yxk}3<)8hTm{?ee?|<*HB%Khx)b=$u9+{Yh@#~h6xapch9_}QQT+i~|@xA}bn{7WX|rir*| zvWeQKd>4(H-#rCIqJ!KAE8c)d z)}{Z+j~^>8SLMqfpXqM|2M;<(o)MPG%W1Jjk{JolBqw-hn;D-KnNK=dA4N((^)s_j zgfQ&LNtm=#0sWiF*Jl9;_xuQtET)_(PjDt(`ox*Ep30DZ;Knu6yXZ(=0r$!1J+xAZ zd6WINK)u~&*E(#r+y5x|K>B&Q`89_;pPTbb_Sg^o{@3EqH4g-cu;K@I96Lz%EwiB* z?*Q_CVE^>b^2U5bv?SNeSNyW?`48aA0OxwPpumd-~>x*8@5a1s%0en$c`#)y3g_Fz4e;nbI(G-w$`*u z50lLZlkbL&PwqY{)f;1Rr9wj#z(+J%qI+m|BaXlPN}M}?I-Y&@>3IFsW6@Ubj=LXy zG=Bc?|AXis+!lZGCtr(W$4|vWkKP|oJa)f01ANrpRvX;!aPqx#@$yqI$9r!d_X_^j zzMfcKlN>Iz$A7R6)bDkYm#kh%kZK5x%8v` zV(Zb;n0VJ(Ta4vV3CZb+h|0SWgLlQ>{6GCy@$rv78oh%9@xn`Q#Vf~NkE1WY9x*Z& z(KisK{=T^5)pf07!>m@S;DO*OR^O2?5FneijvFhbfNIwY(Cqk|t@H&%`Ux z6x`4$3Z8&SkuGlR?LH8U>fImha^w&C)(q)`d=!*{u zir(hQwmbG65uN6}y+1mLvaiS{l2kXo?3&RaUe!aitj$N8=r-6q6MZojpL=*;Jb1V- zmL^U{+v>$=Yee^}?o?hZ9Mc&R>Bm?o@}Byne``(j%4IlorOe<#d zAnJj8&j89Q1p(5gf~L;-R9XC5AgC&ZjyK{kkwBjXDP0!;W0gWE92gOnxULsZ07zjW z1Vl8!GRDY>w+WeHt~?L~az)js$p_<4zia}4EJ@{8RwHYP;cbVaXWRam7@aU8uRCJ@fV2tF{c(uO- zC-se>;T_H&4b+2|lu_?6t}q-~na6qX#82C$V7z6k3slY)92hfm>M(>4m^|VM7{ZUq=8wFTP1He3TB94P`24FpmYWk`x5HzaNB#%xST3eD_{%tTC#MvBpl6eEd4k)lXZ zBta4&AsU#5Yc#si4ru?n`@MeNm-p_gs{Q}|dCyn%UIP+$cOo`oPu2bIy~pIqlP6DR z9($RTTbNrAomXOJW-88}J@4;gb0puE6wjyx=)r>reO%tE==_;PAY26@0k14c5vylsxhgKK7GT!7!v8l>nEB$uK6P#Pbf@ z0nhPh3*zg17mB)|Pl&^FH(XF=`0FiT>;;uBkjo#>Ga6{=x!?(z;UcDCo;kSM>o_+cW zwS}(WoA2E+;F|S*57_Y?u6ysjS29|9OtPKthd%b?6PB%i`KSI;{Ll}5P;%?Vc=B6M z#>I;lCBobBp7*{pjvm@4Tj8jum0#`0N3cn*BWL+c0Qb@fL4Q*3=s)CT+8@2qWWpIb ziq<$(BJX9O0ewcj!v`DEw?fnJCVQN&bKj z^~g8R-HxP-dzg4o-Pe>a6QlLTxp?0__r!s^bkXc=G~CYDWK5^r2NCkAK^yX`+5^@N zjw7gOX1K&B`HA+WtOYBL(B4qzSLH!}v(j)wSX<=u3v@}+--{YmCKfZ)RAyv?YFtS5_?X?<`Q=zs z=X3?vK0Qa&Nrz{qK#w(cxI2!=M6BEvduOU`HBU zz7hRHEI{EX(d1Xtpp9&!Lzt%{^okHjeRybbp2Zb&!eL&|)9#FKh!;5CY-w@Le2rtW z!Mc+{EVV;U#GnI&Ta`V*0>I^Y2G+zcET~X^oVEe)QPb#9r zlh9l0JgN^KS1e?p&^kn}a z^@Fm7?~(ta-io}i&DaUQB>Y=r`{J_rbQaplru)z*ekA&aN8^Q8&cwXzGLEQi4GhPf z_uL!rc<5bm_~=0|KH>x_;4n6Xo|P(hp6tAV&VHu0R9;*|{K#U#x%pY|OssQ6^ZaC7 zy!Lv${NxK!m#xkqcUv9eKl)F965oF8OHNm#ZK+-F>g$iDI?vFKx}tuOi@V2$Vsv;c zdKo-Xy-iF^#q|7y>s3W`+wxtF3Hwy6m0 z*nKIrN$Vy=gZ$-fK60JQkqvS2PY0w5h5$36Z+$M{MA=e#ktbDh=S`g}g5UF=@X?uQ z3Y_~y#G@a>`%U>}TJl4-Aj-%M7_>n8Rql_HCX*{1a4Mto#tExJvzuJS=Xy%=sP2JZ zMa?o&o~j+lQC#G$-)*m^e0c--lDEsWoqx*W$}zLJ(^cgs{>`qGW80PdamBvs*nh~p zolbe1e$@g1aC12%gR@*!eq{jwbX7dtm!zxeX&c{yPq76{`piFR=Es}TlOGQI(ELbw z zFZ_T*CF9EC7ScPMd@4RD@!j4^ec9d?Z%_`hs`AhNfFBhXENHKIB>jMy@UooFPxR>O z&fVh3v76Fo6hYXw%WrKD#UE@bPTmV0I+PU*;jceJNI^ba&;TrPX{Qqlzwjql`uYac zpTY)4pUIw+&5ax^^#nfb;NqR}2@j?TUp6`vUR0pc>KR;>20MJG!*&=N9PuKz2OfAJ z?!WKN-dS6#^~aKIwh7t31A4#Xp8KO;afbFD@WOy8*^_hgOFj{R&qdMiAKAAjdYc1j zgGxu0orgW`76Z>KgJ|Wna{czjf z-SO`G?~joI*_P5PwuwYbXw+!YudPR2HqC(aE#Jy-G^Ago50#DWIrsxxZbM4n6K(oq zEX#UC6o5D;o7S3@p6k}qqh@O+v6SD7j*|t7>?5OS5 zZ+`3X`2B~!8jIq#J;JR|eV%^7=QBh7Os0r7{p=oB_`=+5j0_EVcVcZr_L&R;YyoI% z{crwmdtm@&ag$TKXHg0~$3&BP556RJX=uWGyIreIE6(=fqe_l;kB@^G&nru#jq>|sgDlEOQ%o8g8C7Mjve<#m;L(=Mng8} z9E)&fIgN8%d_bS4srFv?xZfsyAAxO3J`4{I#Qp?q!mIu-$5ZLNQLr zuKbA~{h@gK+jn{HYwF5OeCIn)$I8r{Y++8i7>UK{srX0#;P1qhGw0*Bg9nu6L_GBF zcf^nV#E-|}!v|f5i|WT93$PD40hPu1$UgeTr7ucbp`MY2@M|vk%Ebc6(p4GBgfB_H zFs8*P1>8?ozjSFnPxb&7-_l-PR9|l?C*tV?EG;d>ylhzPJMC`=;NHpHbyswKr&W6B_te zZ4@UiQ`3T`m4{uS$4x2aN+j~zi6iz4VLR~$x1Dizx?9U#$(2t0ZTa=R9qG7k#jiNw z;_`pL@LOERp1LK!9d^^UD|zDtABCoVsg#Z4g16q0g_g0BVU_(LNo;-MaSX{a>u|=y zBydrjs zc;^Gc37yS2Njq+gPsf>)m*eYS{?nK`cP_RDnv#1eoOHO5QJGZ^#$%*^u``ebY!E^> zaS{M(np#dd&!|4BLSCEb8eI?QQQZ&4s8OYcmtEu`tP3sUj|1irPJQ z$It$)zZv^v-*h#{d_w7Subhh4UOyc(lBL)Ieb^w4p4cP1hQ)s@Duphb-fTOHzO!(l z-Yh5E>mHk9?8NxX-pL)_$i;)eDhf0r?S2a4_*V*Egl>6vl+M;vURW2K z^zQ|S@m@3l6bCmpV_;?49Us2^UUY-5m#y-YH!%8MXBV+nvy3U2SY#wAfOctckz7nFA0ajjhF~ zOAqbsnTxTl>+y;A9gBC~J{Bv}ugG4$7|pI~0RW9knS(duevDC}bFhn2|1s{T`h~90 zS3Zmdrv0Ou5Xv(;vWsnW!2*ah(_CmHq1z zq7;{fzLq5w4Aa7Rf=8CwJg36>bV({zb(?uJN9{K9RM#*4kMDa@ksoBmijv7QkNEUp zx)3Ub%|g*(5P<+9q%k_$GE~TV_^f155&zpob9o|({KF5HHKsmTAB2y2m!UyF7cf#~czOSx| zxdk)nm-R;njt0TE_X+Egf@2c9-ZN;y(v(u7UN$8_S~Omzt24k`8Gv+l`Q9x9)O1eC zlTx!##_&|2wv0lFc;O|Q(r`rs29{Hps2){^HgE*Iu8xHTs1z!Su&-}O3bGZA`hfiF zqKjyvc&ysu2snS*K>W6{CPQE8pgpUw+8fI;zc{CQ6I?Y22BGH`IBt18&YpWc9{%#z zlx8)?q%ihyazJ0*D~b5l9Fw>$b@0L8yTDX_Dh5FWPP@i-s}plu-1orSyj!1@`>aHT zj;+1{(Pmxvb;abw^|*HBQatzEbJ3?T1QnAJQ`6&d{rc6IxG^riN0q>18*$rhx5?-~ ztj_Oh%+9UHSHAH`T%DhH=SN!76IZ+U49742!r%1wWY1kXZ-v250NPr=I!Ha@e+CTN z${%5aaB=-1NL3z={6Zll|Em%(rr=Mdcl%P6WYfsDZGot}+pppw2x$}Wl@$vds=F7J zATh;Z7-gyGgRCdZ1Lq_t6ow>k+j|~qxA(v{&yT~#4CG*z17~67B>EUBm zdxApwCO8Ay)CaJ5FB4|un|>yK2Y(Dq?^eY+kUWu3##c0_r*p15xcFYt$44?*fgl?o ziul*X&HB9cg1)}P541~+HqrrCHxC&2l^?l|fi7n`CNJ{Ug?uUB|1q!Ns`%x|9OuJ( zf#(|7hEYx;5hCd?J%Z~T#U~i9j|z_QbXkz2dR7N0%T-*;cP_&>A7`H6yVE7y#CMzj zOZ-ZwC_N5&$9n-$ffPMxOL%5L8Nl)CLdTcC=vLZArbq9ISHM&Fd!VQ)UKv1w4~wIH zyImMgMs$bY0|43H!Lu&ogvrN}FKv>Eb^LmJS}yPKNPi5-Xx=lnD{f5Qh|hiQ_v5p_ z_j}%v1BTMFG1NcglT|+ci64%B$+@~ZAiDB`Ex}vF0 z$@|~*=1^BCRwpK+F1F}WUCQpuK{)9GIE16a;2GW0RSQ}m7_$C)GYC^-)c5t3!gkL^s2&!pL-CQk8*X zY&RSW?^yH#&jVwFfi~)_sd)4nLO#ElZ2y{iTIiS?2h6`OmYEP&ODH*(LF~ z_#9coNfUfimmQ?oDnn{xbUv^jv-6`6+1d;i@crC@#(;R50YB<=P4z1{;_Ez88q=rd z{P`pfZA+9ld|$W0eDYAQ?(D|v5xhS1pJc_b>X&byHf%7W`*ewc!HZK@H>5w? zl6f0SFXF{|cQY2H6Rypw9QD5V=*K@Ey~D%t%IjBRVt&cTKi~KEx5nN>2jZ^#-W+d# z*ZnauwJckZMGmVTWOnCF{EmF_IJfEx{fBKyonim7+mFEm1_#=*b&xTWS1!l59{EoXirpQPjIgW?LF3GT5O~w>D{ydXU}VQ%lfa&`$B);*y*yo0HHm zE#G+W5&deSBjqm6G|-i8JawobT5fd0LMN^&`lxFX}0s+O_wx=_+{m`;JX! z9}|%0OrJEdx8xG~-+0PpA2RVJpF%$z_tZ<|TlrC6{)8MZ)3JjHw@ssT1r9Qkm|jed zv;9K;D2UVrxzqi7I43C~v~c}OR-|8rx9w*|1jkOH^;m*Cbl2THKziKH^xXBnBfTB( z^i_+4|0;e@S`l8skt=jfSLmGiyRIp7WiJ;0f|vD6GMt}pf@^=;8uSuA#xwivw!iJN z{*tEPl6iLUClA*_DSvU>`QXaJ7GT*1CLOSxmblyWgH#=UbiUb+I#tcRf>r3j1^~lG zpX<*4AWfK=ulDAsh*0>){c+q9%G=PWR{thqFU7EqhVG#gSY)7k)6LQvlnnC~YNDPh)#_rvF zZKrX{6LrRkjzn+tGtrUsWSkD&#}3K>bwG;0IyM_NmnsB)vAqXD8EKXS7I^a+8ytMZ zggG#Qu{58yqn|n1P<8}15RB;Y9pT9&cj_8{RbA{Qq6^)y%UfH2bF)+w3@%cFY z`k8p`?CY`tHsYQWcf{@YzQtpYd>{Fje(6`@m6K?ilI(xC_RQuJrhkEDa1lqcUS zf63`e962p5)jZv;IuXuk!>9d?e>ug}@v1@PDskT226UIbRvdV2%LhG{Bz6U%ZD&2y zllZznjjhUgOsYrj)=#Q;>YjF6qrZebNa4hy*fiLd%>mh%{VmCpS`6+Uj1`q*S$5F$ z%zR8v%|+knV2qCLa({@VeCBF)X371(M(zcI2Ww)%CKT%nJ|uu<3IOtcr(7i5A;{aP8O(ZvZ~;t|FMSG(1&8$&)F>g?=R z{Oac)i9dbpsd(?le=Lqa@UD3HE04sDGuNV9Ha$FPIpYOXjL#yEQ>JdU)wUSK*ZZ3K zMoiqW0N@}e6buc=r~dp8#t(n;xHq&ePp`yt&p#6@>L2VGQ$PFQ-ne@DLj22r_K)Mm z=N=agTJf_#^H<~V{=L5+`wkw6mBnc|y*``(0RQw!L_t*9mu=BxE@oyIJkd2J`7tv! z>j`-#o9K&Sced5vr#*4ue9GSn3tH)~sUN(s9J6yvv8?-&XzYzyipv--3v5;-x5V^L zn-Ujyi~cV6n|No3vk=(n{`2~8+kHR(|FiP{|5bW9;=aK@+nf2v4K<&A*Q-qqzcWr` zUw#xo{~3>>Ws58L5w;W8|8MXI$C6L-)So>)`4SfxC33+Hzk<(y$uav&+yc0y`?IFM zwfx2X8Po4X9VZ!w43%cg_o|%8Vam1DtBBMa7cv69TRLU{Lsm;?c*2eGVlL<*f5u3> zi9-67#p%f1jm;kGqWXaD>J#_(H)CYim`|AFvz*Ax97k$-*ZbhWNDNC}4!3eXV@SGw zes&_xou7>V^?&`};^de9Bx=X+k5B#V--u6s`bXoH)7Rp2pZ!m-n_N{A@eE^2I-mr@!_U;o2iP-;Sw;Zp#h4CF5HCLy|$OvKu5HRVGhh zN|}H?S>M-??vTwPeZ*oz@?iHppP7Pg z=yd4IdJnOC!cyQtzlH^Mz-NYxjSL4QbJ~@;vFdDg zx1qn{>V10wfarqVjqxJ75Px+4-ni}99Wg&6g(sdsU$T*>4gU1!n5z)~@OcD;TSIhi zY_7zf+Dz<@$@s*(kHv#`j>PiRtI^uH9JS2_)up1UY-OH18rL)~7Ix~*z`Q{zGLAfYVo^V62g!w+^AytnG@TXg`sWx-N^`^p|uX`|?3QJqe zZKS4}QoD8EqbtWP!3TSpte_iXpRNz3A*}Es&siVTOQth^;D)TI>W3R)rBCd1;!z=} zFzCQ|5Ar1CU!e~Uc%IV||6scr59&xZ3-rRg#dZ~$p6dE{|Lj9g)@t1kI*~30Bx@oZ zgLd{)88P&5;KGsT6kH^6hNS}vL49{GPE{ZRBs`IT$uT`Qglzc?0EQ)wZ|Wwos6>p= zY>1@;%d>KJ81yL%07}Qu-xnc{h{Agki!`K#xE1aWOGjrH8Bd4Y9qu;MvdUA!zPpPB z0FIyVwafZ+qUMv8>aU>l~$u4;6ii>wTS#1#*+8GZuI2JjG1P)(WrqQ!IjY%4m6gx0C zqNR>Tn+Y!oI>V8|VWG-udp7RGkA8kVy zc>sg@K)B&32W{VV1md(0p6Ot6sSSu?Zl}pl#3h#eX;;LNN47t>Q}`*LPQ5!m33386 zjV}BUn6$+doMO@{Ic+BK%y=miXn!d<3mz3bI?xow>B%$m#tS%Q0GVTNY|t1lg_5Vz z^9~J2OIrt~)9a#@m-jLOZKWc)(bbbeUMdVQewMG}WEqn#+;JRHjOu*!-;;5dN#!>^K?5Q})uUoliBe}15oC;3xOmAi_EgOnLo;o5QOm*uD& z1_4gA?WSIw1{}aC>(6yVx%FNq|8D~8e2Jg@S@@4~*k2v-BFAwKt9T?`B~Rt&yupdO zuF6x&Q1S+!j-T%389G>(sNEP=MQ`9ZPK6icz-2mMaVZJ>W4|gL29RCq2lD4mdovE^ zJ;MDXe`GV0Bg%&|kly@Q)u-wzX`lE*6TeIDAjfDIz$)dVY8gnw6~4uUVbHrS!S#|hJbZAS?u>gQU4-6f`sbA>Ev+V$t*K%C)9$AV0@{WZy z8O1WF8GOWfg{WNTTU=_doKz#iSpL^*1I~k;&$;3|C!5iu;KZaF^n{Kc^bE;<-O;B^ zRP#>#v4Wd8tl(xs7JfpH;RmjUC4I}vR>#vbcXn>ogrj%j3g=$&%aF>~SYC>E-*a~y zR7YWLay(k%8|;?4_{FP?MF-nZN)K%E!ZG=bNs@9j^D4CEzE#Ov{hI^xSx>Sp+ET*6 z<#RwR2tl8&t>gj#!`sB}Dic;I&ubzS_i}8%zqv(uRR$l&M_9G!leW+~PR*GMf0~uj zcDcg_Si#r3PuNkuQncm=08;Pp5Ig(D$hl}?jU6(Jo;2TBS&J3E-KOHQOUD!D*jDPK ztS&C)LI7o`+EgCwUf+t@B`lV$SY}ZOcCYM&b>TR>ybw3$=3~ak#0lPDJ>K!YhvLM& z_o#E+8*8`SX z#+p5G`1t;K;NEw{{NjQ->`%*PneuKdYJ%gLSC?fwif-_K6WJ+#9j=S7M56)OGQ*<1 zI796@7c9Xm?qIL3MYB(BRPAL|dGmeQu4Z4XiPl%A#hXJT{!Bx2U?g67{X)#EgTJn_ zwq#!&IdMGhyzlNfE_pP%dyf}{)avT=iN|cS?y#45N3VNARCz;d7HqJqfs+gvXkB8# zsp{fSKL3Yt^}-d&PSv4w_p{&rc6{gA$5qyJvM5EtF7JB9zDDMg$1q0@kMx_?OKMlA zUp*VEsw4Q1gC|XQy@6NOg@Ebe&gF3?!4mrzUufICDSMnYqk@geq71_v% z6W+~H)ki5lyZ}TdfYTv^3C#2*zs>|c98$qB={tm|@~`NVWxV7GD)KD#(a9e@ znE0lBx{Xh|Dt?C^6`amE8MtXNX%3t>`+5awAMZb!*?Cf39zu3l1)FJO_OB_qt zUGzKhoA?y~1~S2d1JNj@B-n)r9e!9$1Q&wco#@$R=j7*ZB7H6xuSJCs4qcRut`+;;3}tSG*? zG0xu@kMmcq#gbTgc-O8N9osAWMiN>Dud^Tw+e4+;SYC~O!D^`rrzfvPduA^73`rO6 z8i`%4-Z=T4C*uph`sRCG-u-dn(7srho|K)k5>1t7ePvd*pXyZQLDyod zV*_vsL0k1QkL{v*T2&iZk`C>XZNcXg*4EczsMUzQBV%#rkt6=5KF8HEIk~tv8_lM8 zK{{tmGMW<<`R@8)|DaFz>K~Log&#b4h`o({0pD+Vu_oh6=pQJ@fI0R{>PYn^gd4hn zy239{453Zw89syw&>iCIwsbl1%9tPT@EHpU&0$hZ)G&O)B3y9t#vjqQcepnOdk5nD z<=5ie>*r!Yw#%^e{-Gm>V)y=o;*myt>ucYN&wu3+!J-vy#xQodAPzQqqb@$_*Lz(s z>&m;u!V~cu>&pAppTV|*gSxkpCky{uZ^6H6KlsDz_(P4tOYk8V{?rq9%0oHOiP;{B z3wx$~vPB@cex@A>ryPUn@re)A17J2Pc~%wxl)TabS8ajz0FG5UT;e8YS*mz??JP>5 zq(aWB79a~vFUS1S(p2TEMnY}e~;QWJk}g+ zMz8o7*q5(e_3l%Dw_p8+JMX(EjvhTKeo{HbZ)2?iwU(%anG#vD`#W(?uI`xv8jIL=YIb; z;*WmoH)8jmJ!+rL_|&I^p+ipKXMfFWz zmo}NZO^eQxGY`mB)$h8ttMqiol?a6=%AG!}TC3V@@@>iibRm3E_!xZbiriH`j#uf= zjGMgG;l-O==I1yahhNGdF56GtmHkk;jvmf8UHQCi*Jb{hUilUy_m=#OE6?s6E3VTE zmh@JpD|lzR0*5sCm-Mvbd@tqA@xDx}*DAb2uN|(m;}SO+tE5jn(>}mu`jS6*<#WNW zDqn~GRT))pDUYglEm!i(^yyddD;r8W<5rTJ23F~N@=G|GZX3TUo#?ln5BJ2YQ^s3d z$6saVnG=}M7G=K2<6eqVj=3)7wCnJJ9NvbJc^5w2cH4F4H>0LFPDA`E9pkO=vic>C zzadScpqvcMc-DljEAUgB%*S-9f9f^MvNJ%gN|OLZ?;UgmuI*kH;G<+ZbT@MH!#2}% zMz(FhvF5GTsPx}~80sI6{l`wkz4soEyHDH^NA9>QKKI$r$8Y}ff0V9NpJt#x{_a2g zAL4;`yeoe7H$NNCKmTmJ_x%sWJKpjD92b2OSW7Ew>i>7et0!NJCm;D*oPF)3h?V*1 z-nBcrtRqDL^qKnKJ&pdfPgMVmwJ1No~xfJKm zpO4w;nPBIBPj8N6VEcN4lsuVyM&HujL_<&fqlZOXPEuuYS5K`SJ&fyZ%|{cP$F>Q4 zSa&%Yt7DAU+itJvco(k9%SZg1*V4X#AL&oJUR96u%ftrJ)kN3VV~i634jmI8%=+{x zQ9c&{2mx~E-S}#&YbbO~>w-h>ySbcszK=Sgg;SidI~f2{{vdR>wGr z=9ZJR3osPDsRMAa4)g*^(Ms=bbBx=|rG161qI~tnLJn-oo;rM&L~#3tF0^RdX-cK? zBwS$*?+6dS3oh|DMF-n?zy^lo1oLv_!PtkaV8g~SzJ((XpP~sZky(02J|U|TXL-AM z<5#tDXwUewqHmwvUdeLk4=$9MaT4K%oadAh)0nubL&sGD`M1@Kd7`bPQ$}5aK%j3NtvJ2o+H+ zy!B4JBy=)LDt5hl;14(|k1In=0qiaedNoJkzb<9a-e|?(@WI%B;7!2-N>--O_?Y-+ z$5Pbtqyk)#|CwJZ&ZHp@JcyvUVrt6olMj?1I~kXkmn2Y_q;DD?9boVDSEbSU1UIh; zk^#K9v?5$Ky%MOmug4Sq9IrPtI2waP{mF=;ANhGjj12k>rGv42e3^L`2_>eljtm56 zI%J9rV>q1hU<88?E4?vHR}j`vePh)V(TH<&n+H=R)%Zp)!kE?5#937*na*EJ!o$ZT zGpM_^-QncOEmktk&o3BXnil?R;4A)sFDOr=CBYy(ndsr${S5j;+}^H+)3P!QK`}Eo z8?T){6_+nx40h9K`tb8}Z23zZP}zDX^G4!7=&IKr!WJKzVdvDE95$>rOZw ziSw5)#TOoZlreecIb072Q25r1b$7ZBAHn21@1R&NqCt>nv=YDH&amD{u_y0lu z7L(ANH^GHZRH=H{&NK~CwdhI#l=vK>Z&xWSw|CLkCq3XKz);YgBX8|W-j#DcDsLs9 z{LV9Xr>Fg=pr+o$66wVFbotFLOt@LFR-Fbtrg2JIZVBJ=Logf|295&sFWKb$$RFiYm1oC=vjVH? zs^ia9(KX|dGw`$i(%(0SLrU_YK9fY$PgOn~b))hK6Fn9>3%==-c;P4)D=_o9x~uGj zc8O12e(0t6(9d)LFTo*c;R}wDc1MeNDwq8+jzuFT0dm4vd?y$Tgn57sCweiE&3oyv zLAxxobcs9flGn&-frni&XuyNTa>T;Gh5z$=QL0XX;Z%~|=3pG!wlb2lc-TiZBBws^f%nIqcO8$hvE4B@J{f1uUXIge&IYFs{KC)wOg!|ycgN>{@Au+c zk3Z@iQG8?lz3+TSj12e328@91&9gz3wPr1~Mqs zYW6ssRl=$FM3Y7<_zo$jAW>z6^F6{ob*gd!7Imh&mC#22K~HoC_<*Z-yH{<>JOF>d zH?D8$lD49B>>ic6Q3UifRKMS&%L?2E_XfI=rh+jm9)s;E<>s|H>x}1(2NAYc} z_r#KTg8^o6?`ie=TkJ6H<=N>NpJyS!YFwTdio*5Ix7;2NedMFD@7V2eU11Z-b4_iQ ziSaI#aYJS4lO4v6C|=i|tFEou#J`FWJ+cirrmmO4SkZvRvXsZRi0Xw; zWSNGQe4+l3BiKg7TW>GrcZun+iAb~$9PJ!OTOuCcxTC%~6~H_y(!MJ$R>{m(D5z>>Bj-XCDW zMNcdWcaB9~$KLE6jCXzLBQd=1Ks@@yGtsWKVq#`0b{)DccJJR8hmRkP_kQ5rvHQRt zaX^n`+N|X#3!Kz2%?I>HU^^zFZC{F3(3Z}9TefSnQH!PZrC6MvkZimdzwcxzgF5k1fMgQ?mnReQrDf6mu*;$USw7%0%1)s{^=MfzF_Gh}p z$wnWyL|t+7Zezl3+y3;86YI87?YJ_j&7v(_;)~OJl?RI%$^?FKxfmehdLmnn_X<0? zf-kToA!=2>RxvW2%EV1@isO;IjDyl-y#PngUTCR&I&|2+7y6}3c-sT?)Cu|HeqLoaOS4~`&|&*cO|zA10n zg1IC(*jPAt4;v185YMcpI|H7=Lb+p~uuxq#mGnbRwv~^5RsY?Ce1d5k)qJH3eF_Yc zfd+SqAv!=w52*T{HdpqySkfe2fn6cDvazaGU+7EndVJ^|55`ab)t`>r@3}Xge(h9T zx-uKn(lhry@Rs=S4}VNHK{KvS%*E8)a-6zwIcBA6Zaa27hWCznAeW=DnLtNgVJEFF zuEoGWD+Z*uuU?{+#ylYh z9Pq>6c7tHrwMB0hF6s}vyU8XK$r>h#ajPl|yhU3D)`sj8763>SDJ&QGGT`3d=od^q zGjQ9p71NW~tEPh%5rQbz0$FEm3hnIkE189i=j zRrc%bM8xbhT^A!JFU88lcr4FM#MN^bV|jiqW+oP61%6Ta&6A?Xh-|E(u|f5nWcx48 z%LeBF>djabb8?*iYY&55y1s^v}g>SEu9D>zAUZ zZ$M?Fe+CUi7S%fo5?N@xDVfx3*{r%o)=8!uRC&f^-@pIeZ;QY9=?|FVjIUlebvB-U z@|ie#U~e3}ZBNWxn~cx=*007Fe(%>}RB3uQ>#?vr?=jV7^;OeleRS%rs^2%dtmjVr zWSLU&+phTE>N@er2hM@##nYYfCnXgh?_d@4Lb*!5$)CkfuA@)o;A7G5W=Ah(2U^zI zI~?O7c7ku+!v&5h|F@m!Vm)2)zu?%k;?q}(xNTo04-N~HLPOB%s+Il-f9&xMwFm9lt*SLrD$!9Ib=uGlkG zdo?XcN7?jV=v<9M0m<-PzDizXfaCA97}Eh?F(^Y7(eIVLUh;RjksZVXY03!xroA3? zH=0%XJL9wkK6E8c;a6bURpV7vd8tR#J@ul?~K z#Pa!zQ9FJtx`v0Ny|OABMfN`Sl=|o+dk;i!s}FHRXlO9-|RN@uEsYl;|+vBHx^2g!_fA9z6 zt`jG`Aaml{^|*TDitMy&@!F}Au{y`;Ka7E~VL%y)w9G-(>S?byU*(6Lk_+s+W1!U& zt(xqGF6odV)RVE5j=vG2eEF96`=2OwcJwiU)$m}N8@q_EYMfA7|^KLN0( zXEOH0bbRtXx5qo~9F2{+Gtt_*7F#QmVwqKyAmzL54{y?dX|Sp2e^g__EGRKfdEb&w zb-Ogq$Sk3$`eP%z`$sB~?Ha*zyiPkTPGy8v^f#zm#i7m^M~2?zT}3lLxHcI36-=KB zTWx^%f=w3F>6y81Y{Z*gp}*3pjR>sEv$=`>f?nXNI3=3u#N`(48N;OdY|G0AK?dP^ zWJ9qLjV4D}>O=MS6gm5RR70iTbg}8j3kYrVN)EO+p6dGjfBms1RTh5&fPux#pF)D1 zGG|8;4MB8*7(Glz6QeT_qfVsqLSW~FPL**r5CuW}9S|9u5QT4?qcdojHje3}QgN!V zE|^A?CcY~VdML8*z*f2*RV42nN)-bF7`pt8W~hsoqS#OGxpG{2VE9r31P~yqfh>QI z-;+8te(=FCW|wy}28PuEx$Ryxesx;Z?$%_$K^N$SL6NI61RcVZ1vp z(YG8gg<{9B6+qzwLNJKpi!KO?Wp(}(B)yrqHTs4;K_Q7F{^Uq5`ARs$Sf1>H=HdwI zVVNVMygPoy^|vlVn+X-~Y7$WB!HQQ_g|NVaN$*CzKbnIr)gR22b?KF-YwWIM#TAMg zX|Wk=>oT`jx7^@3CK`BrqgGV$YW#HxsGM*TKEjWQ@V4SZFE?`Fs~%u*CU}U)@h!k9 zh5$Tbvbeq&9N&of`FVH5>4cHyHSr8y(x_v~POgzBqBmT^_)vGdQPw%JDMbI5RsH{e2_x;k`fj>Wt8NzH;SC+_-wp-$9$7nu+0&K`F62oOfTN8PC4-YCQ7dbLwD7 zXf?7^xu$X?el&iyBMAl;W`HZoh#ljkCoGL1eAQN&OoGl;`?CRMKCGA!o}&SYj_C;F zmo}^Cj>0JM%Uw@Gr|dl1xpUrpS{+$wqYGyNmyhZ6Lh~SRHweX-phf6rsPJ}Lcu_bI zRMJ-6j4D zGFxDr4!I3121n%BX_0WnzrF(-!!Q*!%2qy|PH5&O?^5k@>6hi_8P4|!4tj=f?60)u zA^gM_7@wB=q*WU>zv-Fi?g-YTTnVEs*}tN15#AU-ltJYL9upvj%S4D9VU=Dfov)=I zuZcKCBR;(Ci96{b=dWPn#Xx4SUfh`wT$4xdYNwWu$aV5XZb?Q-vQ}`5flYdHEdxLz65t_R6m@C62ow}3$h0ppL|FJNmMN4VjzZHNQ; zF|LZASNFOeRCkt*@D;X=B8p+4b(ds5{8v}BUfDr?O4C?fiF@}Qh~vX70GRR#0IavJ zD;$=S(Owam*2| zEOKLFl(xdeJM=K$n%AU5d-9V2JTutG-4p5RWSL(T->ZQ|t1c-z(?X5~01PP7h8gI_ zfajeBNt|-TDO5f6UcqdLDEQ~eLz_M_8<`>6tDd`Mzc5h?&umIha~co}@KzUA)VEoS zHQ52U_R1m#ugaJwOYjX$=+@KSZ+m2Ub;*O(OR{M=My|WnP{&RNzHpdP{6*>WDe>{_ z(nbvI+ZA`*|Mu8-^hm5X>oKQ1IX$gS-3V_<1h=a?3p53XNzMfsj<&s2Hq%OV*CYIU z(f6`RnJ9#=s*tQF(StIH5=paCm6Ai)$3!LG7RT86B^SxCu!ar>W%Nl`;#2CDPav>p z2V=(r_^NkKA>cdpOfoMi{wj+H+6)S;xxRdIK(iKuBYWf8^-J;Wv(LusvS*lNP~SS< z@~-#9i9-kCwUcM!$)}%^ow_dnnitA&$_99D$u0qXj_+yAk2$ zqx;k*Qod1Q+1aucYif%ubmcP;-3{R*STn2bSe4AL4K(AvcfK=5_uUcSed*;`k(ihj z-hCq@vG?Gi7#bUmd*6P4y!FAiiFA@T;$P^A-HvRdonsewj@%ZHTGvS4XImD%mKJRD za>CQ}c8(sFeF~j=sn4`;k^QCJ(oR`u#DjT)cBp!`TtjT3&(P-zgT~pmvhACd zDyB&}2p{QLX@~JJ3@P@}7Fe3kFm_;A8wGm$_4(v?(gS z2A+6+lL1S)Dtqx|%Ea;s0Q|G8;9TKIxPo`W-wvxL_7gkds(Asx$um9B|UyO?U;qoy(AP{9djs0I=(nXS-ZD z`jI$K=*aENX9o$k(+mGXKhsTihU@hrHS!){lfLQ>+U<^gCG0h z=;?39{7Nk@-nbE$Z%oCqaOoc&h~4}5dxDXZ3t9uxozOygcq4#HH`uDj^-Jeut8K=e zNA^gwuEl7h9*=+ZPvbxSAHNdkUU@lUaXyah9gF?DMq}>AWYpHA7lkXk-mtIQRk`6Y z>aeEt=yH}aFwnfXvgq%tufs2@tYztFu&B%4IS2lAcw}FeZp3{4g#|a(05`WsFod{%`!gd4k)mEIna#Y*F^1`tY zo(28nPkz*q`wF62PB4fU#Um^L=n}6nIngiMc0}#MJL1%ackSQno$POU@E!4{cf3>a zltHUMVs%m1g6XxowjdcGn^iXR=KOs0$hO*;yc+XY&&JB!l~|g->`!_v&&>H~{N+Wp z3z3;VqN-C483Rq~K4GfS>~>#L#;xet9WC+E!V;~tC+2%vF%$jq;&eM^25*b6zIrXr z>UsF~2jZc>@K@uN3)6At-1(^Y4~r)Bu8={peML<8k}6||BQmQ?Jl#?mo8s%eqr-9g zZF}O!e(dA2e}`l0#;lt4RJ`)y3vqDoK@oK$24x^W{?#wWzx<#7+i3QW#IS76>G2!l zHudeAyiqGV3j$d5nZ|Cq^;!t;X+wjwYPxWY5`gAwH8)tZ=(DfEaUT{a^ zh})*Gc}nGT7y?-Q;FBXjq(i@ohpTy_Oi$X9U;29kkZq;3SKvuE3*C9A+MEyl28uv^ zWAN~ze31hQr`T)quV`0fsd?LcFPOSaFENkXpVHxLT$LWXx3kz(`I3{vl&2TsRwpz; zzPxYOpJ`f^i+TZVWM{%=(;#$G`SreO*$OSv{$`Py$A?@5{U{FV#wY@pNg#P(@2^8f z#bIo0YePNfMkD%0$Kt>GJOB51@O>YQfBX;rL3~HDXW-5E#s~h~568zo^^bJBt2vF%>)s$$5za(>e&5QGDYx7ZCoQwA2RIIZYU~MT@ zSrEo40By!?s0Y#2d3DFT-+F)i;D2BddlSq?TY6<(wk5QXe%X*c1x?cq z7Anw6`SPhT&)cXU=*0mn{8yQJqUBE|!MDkC@R#Zf1Bp6^_Ik$_vJIqq<5VT4>^x@R zG>WHo#HY78xtfgw7biH;WMMDmiT!SWm5n3z&4BjMGvgJ)W6%D5u~*m9;*6RP^)J1tzYXa| z766zBr2F}UF7?f&sLTG}Tbqu(T~qOi_ne5g-?=BYW>1UyS7T!-7kt>(5PhMkZBMcl zFHs)GtFSBE)q(?$J770$%3$CTEFTtF9TP1@4DZ)0D$%^y258Z0pI)%O5m6IrtDNaJ-R&ALId|D zC_mLCJSmgmt8(0*0vGiWz=paYJWsDueUgvrnJP9d^h(=i9Ja%s02X{;ZM8dPd#dYm zzx44ZdwRMaMCfsBLhd*bc0vrgAQa&tzJbIsy1f#(Fe@GC%xvAOXECVB4C1>(XN8qa zhNEF+N6+lsVbo0q^GwtvE<4FFKIuOYhaqAUox-RxF_d`kI2ewbDVO+WfgzCIX#{%D z&M!)r_zIWd>@*@G;Y=(mXW>jmQ8rabcT1hUS_};BkK2yE$-cxbvVILgpQCfTxku`S^Qd>^NwgmlU|u6V190c07sEhgT#{QVOK zNE?0qKHiH-@&3UO{FO1G22h}x9D*=?WN}6#o83C!ikf9pz!_Nx=R;7Xjdv<<0&YnHz zMF7CTfTyFdDY}3UaLf~|7!soF?xYTB?(jE_uvd+VPX+86*%iZ5s1LmT9sVBA{M@3C z^x(UBK3YWZ8EofB-OcuTeC=yri}9;h{Echg5zq_@umE6E%94{EVr@P4?cE(mj~_7& z5C{){uXMA?J4yY<08a7J6W{#nv(yQG7Y`l+~EV0>Tv zD>`=WDA1km*6`vA{9EW#)%*YdM z16eN0Hcplkv^yn{36OGDz`S!u2I%T)ep?(UOqSsqGPoNJJ~PrC?|tuk;-eq_Fu-GW zdM>VAzZ|cfIwKiB6rcL>ACB84)4u)q<8kWMmnHu;qS@PwcfRcbpGLz1uu1S4{ACjA8Ka+$joQ3q4{0~rlI?VKWLNYwdZN+Y^a3sx4D>boq|8 zGy!JPyWkJ4JV7o)hr!qtPQ5|S3noUnNvib5fpXJP(L2vRB2+d79V^OS27#H_Ei}qb zlWy>!%Hr>vG4Trz>kfHb5*7>eLTA;SY>=9G5&!OPj-ivz<|F_N<{sG&whsh!W!(!@ zmRX5CyQogmQnYnZo~;dzyA!{xaDovukv$?^-J^P6)twWVmRIt-nPPk|KB)H(M7`A) zv$8u@RTioLxG^^yS7zp8UUWZx?|t#s_dFQG2Y2heE3VHj#8$HrbLyNes6CaF1p0=C zqlcY&sX;#cYh8x=hcPqF}_M|DgbKGi@u z*Hjiduh7?Ilx;z~qRf;xzk!y#Nqp)SdakJ>Q`@ZB=4;DVX!f@x6WVd*()IZE6VJry zOE-kuR_q$A#fjs`<6ZCl0p+@;xapW6{z@5F6Z>hFX0IX@U*|)w<`M=!BNr53rah7 zi5z&Ttb`{=<+CCxUiSa8{Br0?oRT}3Fz93CaL8}DN`7L063<>G3S}UToMHH}g1^H# zJqu#0@~2;+Te>pvSX`N?!g)YauP(Oem2UeEoI-O&&{cF0`Q)7i;OUZH+XMP(Z)|6M z3w-CNtJ5E-+#B$~Q3lb=blhG5V4mrs{!~AeJ(Cl?x)c|^ly#G?^j~m!!k(Zoumi7D z@$v)((X|_1r~KqiNW~8ozScc1o8Tz|e7lqGxUAobALL57d0c5|Zw%ZN9LCF>c-X-l^bG<%~ayYt-Zuf@L6!8oq|{%yO5W9stR_}0T;jeq+u z|Ccy>=C!zEY%EUf-xn>Rye8WY-Otikj!Vy@`Maf#@>5gL0o%j{6ZP}9fxksivljp#b1%}?_)2R6* zo}LCeQ)%WF;+5A<#mldr5$XJO&jyVqr>sgPyR&2&`1Q6I>4Tt5iHs1^K-EYf<(MC!xVN%1*_Cj17@X}Nf2e$DvWYLD_sh2^flEZ?zOEw zz0r&bmG9NL?wB4r91p)T9v7zCvG?uokN5qBpNVgN_hd|7z8>9Ud!pGIRK9r{7yU8H z-3#3}Wvk0hUY6arPxdAJcKpzR_|%VlEbhJIKwQ5z6<5!l^H^kEw7vVz<3gkzQ`fG= z|MeICa;(f;j)DHs*kt$U`m)+UBUaVsSm@)=b}A1hTA0Y>nf68fm4#V2^942-uA~2Q z!fJa*r-ICP8E>b)!6h#ce;cm(0-^Em$T&Yh6MoC zhAryLdaA+?9I$tA_)|vWRrS}&5BtylH3B_2>HEQXhLK|CD=0&rTVtvUvlN?@7PSttCHaJn$1=RpBL#s}_EN2aK=ET*-KU zf{k{I9Ikl8{0u1FO8*RJn;ZLxej?>l7@x1?bQ<(Dda<;B(qO&NhekgzKa%_o&@S~` zH*JSRn+1IW-kyxNee}oUpZ=?V9)I+QpO1g>zyGVGM7<~e$A9z7v3u|F_>IqeK3+X} zE(ErYKD~2yL*qSOE@mBNNm)jwQEdf`1D*-3DbgfGP zEKAe(s?H@hOPvo2^xcO?DHymTKuY`>9qK(Ry0T(nvnWh?1kK0WG9uWUMyt;2~m3oA2m>fGfxckxEN@bb%X^8BT^HZ>Ph zGtz_DN%euKHO6AIIS^~>LQ1w7<79j~kja0KM+)Ai@JW8j_F=XqPwE05;u(CX7xQe@ ze-Qo|N48)^GsXb<7$J^%IP5aVfllx}3jkyf?Unx9cVKTUE=-HQ(!bKdSW!sMHS@mO zlXM$rme$l?=w*y`YdQATCS!m1bbQAu+(4@vJErf5ak6OLdK zSByxQ@gi@asGk8HQr5~6R2}&wVx@y8fzoMo0@MQ^fg^2Cad7S{F%Bp^M9-(X{`i+a z{-oX>qyx}Uhlaude4UOZPDDZYNlqoQB1-wrMIy}LVwW0F5(NW?LL=xsM~#&xEbyao zdBwfp>&f|eC7^SmQ7{`nVE48UwGz1GF%u&U3lteKv09aRCJ~yz!?lBd5X;pNQ zFxV6gsEg7$VaFoM1sX8`%jAxCcuBB%H!d>`WUY3*zNtpEwiz7Jzq~r-Is%8Kg(WW< zVDf`w=W@~>fhcN7k#PL*z+fYGjgE+Z!ijIVFo7!?Frm)(YI&v}5m4eZuWUzvmqtM+ z1L9=es}Te%Y$ePJBh#=6qk*a_!KJNEB=xh#X$ashx*@a}xSyU{iiyd|xO(k!%&HT~ zVgwd5u&b7GQGOh-fyvH8-^J_h+$j$qT`05~^=1r@4ZHJu`-!{a?tA(6e|D~M2y@E8 zu21N~vCW)<@RcuoDHazOeOxSUgarV6C+FhDixQH{;v^aS;_Cx@_rxIyPX^wnrxxPN zk31TaD$1Zb-5gbzyei%m4X_W~K~uYclhO6&`%2(gdCBq3c-nz#*nCA}al7FCCRg$3 z4g-zo7@#7)U?$zS<8AYF8ow1h8Y4F=Lp^u^mauGp+XG$%Dwvx=87T!wfH}TAtz6X3 z?DQ-=EQN79^2hBQn6xYTt2|j=Tq$$b=gsbBcy6Hfqy3b~x5U{2Z`&0=IpN^Cuwq46 z7nr(k!QV>Cu2R6|-Sohf@ao=)Upc`e7nIda@1kG&m;AEqyqhepWWh3R3D`kLlGq@^ z(f&wrbN+^9Z~ZuZg|EvjSL%;^I&{c;;n7Shz2f>#84HGX=oH#!^=d<|dM4kFzmeT` z*`CQK^GF;Oxso4###QiqG@G2CE8fIy^G-g1qr<;P{gKKt*DmF*4G3g$r@jj8@}ACc z$pWL4TSX`KD`ixVS-1_6yS6-65z3wB{4<6nVpZ@fxVtRZs z9{uK5Vt#HZ24xiQ9@`c7-uotH+>T4y>|<5`!g{km@IC?=*g2)2xHI~TO|pyF5}epit3fq zU9f>>=4UPQ2m6K`7ef=B16&4r>+CF|P5P=!otS!&S@sYu}Z~vj#QirR~Vm|4%hHL}AqsAbW7Z8dL zxGZZ9L<1W`*nXlOc!L9r${0XQ7E5`}z?>&(!A0?)IeMQ{05}1FZ?~uMO8m6FAQu#- zMdW7z(ADx+hYdaMXX!1|wHoAtj@{Ca_LF1s-i~n5h{XZ!_#i9PvE&<~*d+eWRfFSk zo2m!lu1*Mp*0vj|0}p|#UZ9VZBnxW!8~{3WWqHx7#Fu90rB>EsaehHz%R*jsT4kNJ z$|e1a&A=xknDB;Q)$p7dq=ps^1!U>wR9f)79x|AN*e5)gMRicysJIb}VXx zLor8tlC8uy+$P0GOR66R&e@sH5vf}WTUQ4TyPD2v4Y)A5+KKtP(tpX@@QFG*yvrud z&P~N*Oa?0q{^4jZ(s}2kVz-s2Cq|SWn~cd}(qI$9?<{D{1&peDrDGs}O;jA&J?7J; z7G@XX^;4%kAU`FW(39fA@%U{Au>Dt9_n{qEEuA}QV-aR zE{~pBJSkLd_cUrv?|AH0o@0G|G162m2yfyt(E*RK(8nisFu9__Qy(|x7o*MujH|OeHMj1C7c;8Q6L;Pj1A9i};PJ!pfe$?t`wkvZdDlGg5B>b91jzw^ z6Ir~r%rbs;-2B-Evz=@^;j+Fw=Yv^+)1?x84!wPM?pj{OKRZ+}st& zELQD`ft~@|qPV_+zPSC)BhlYK7}u_AzkUx2UWeDjYU= zNA|hCkrk=iXp1gG8>fmJ?hmZHKe`MxRnX<&(Hnab!Ov-r^hI$gGNEyFs2# zi>JFiuAP3x1}pb;YTt^2oW6AdK*v>hyG+#`xa5iRBGQC#Tf6qU>R>*U8m%Qw&qxh4FlbMX;7u4;|0XsDlE?;VgF zT#t3hZ`*siQtoZrfrY*ncPfASfdw{|o+W=(O2;7&>&L3VoNUSNbZCYR(yr`A<0xHl z^w>S|xBu2(kK;$~iI>k_j!T!XMgQ1H{Lm*q6+`>?NC(y9)w4I^)TOi0R{X)Su{d<> zU@WdM9_MZ=)gm zWk~oO8yk&2wM%T2hUm7)r=6se*<{eGymB#`=zvaDMJ%l?$tF<=W%po@PA$yE9ClB| z%an1Ig|8yT?$NL2WjRh5--lB?G) z#`ulNn46ywKJ7RrJL?IO54yPQ_!7eJ5VNdOdcD7dZ-dfMfoJKN~05<=-V+ zp(z`yCA!F*2)37~a#S|;u8=E&leVkM*cR;s$96d-MXrekoHA5+jWEV~c-}b?3)^7R zXLEMVsCgmtcgorweD4OyJK@<5EvGDt)c3Ls)}!z)d1DqERyT4Wbp!Z!I&e%P*-<76 zB6!G063V5l!R5Q-*j4gJ?cZ%$IHOD0(QnA`OyywW-Zny$75w-G3Lgv#KQQcn4Gv6i_;flV`VAo8%xox z{$p=jyrnwWTw4$=B?lN@kVt_p9E>J5@`8ZgN^P*nIE8x3-J1Hpw3BY}shWbW%~%zU z&}s|a{V^WRI5{72Zet|AdTJu(H~M4OJ06M;eEMhNOJDzXEKScw*U(t>4G!2oL>6_) zHfCa6EEfIPhH8^rYTNtAhGL-Aj30RKJK{(G{JUcO(qc?cOsbDG>v9hE)%{7V#p_q& zpZ`z)IG*^%9~tMq=Af{ZjVqpHla@c5sQwP}&0|LuKXiCRu8fOYev9v9so5WWWj<57 zjF}TQ@Nn{9#!LHQrxQH9+z-ZyxAe{q!&~ES^UNFF_kw$~yZODuFB_36HnqEnH_Lc@ zr<}LuQ|PkI|G3TusPb;!z}<{T2X4oHso-x_^y`F|{GlhX%6kQGhy7`Km+`P2{&t;q zC}$b~M|IHo1cBf?u%s*b;Pl+VqjKgEoZ|Z#IV1?3`m4e%bL311U?d#gs*vvsdk4nf z->J^F$GYSsJDhz@^2&lmcZ%1^qoNbgNw?GflQ*A0%sSfvljoiE!*ys_!7cD@|CKP@ zcKFS_TESQRLN7k^0smP1nK8=+4BAW825D;@SPIiKi|K4fU_U_<{EUZ!okn3Qm%|Vw z6?G-;)aH1WOx5KDzDfrTasifLKo9u?kM!u>9KY#Re|FXPw2`_cFC|BqvlAco4DOG2 zyzl<_)TcikPdxox{N87NC)TfDi}(H1UyZ;1|M+|H?H5?c{)F`Fq4@CoJ|wxs2`b3; zJmvD*wHxvHqmRThPkcK<{q3Ip`@Hc+hC`O6IS_-RW3i&V+5CuZKnHH?9J#Upppdbm ziQjRG%eV_84o&Hz0qKdgg{fGaxEis#WZOv|u{k@fDv{mPUJ+|CO1cpTN9u8OPd$F- z)1Qn#_X8i4TwV~F7h`jEA$z4L55^79mvXjAWDl*YPhaN!7#H;TpX53cbYr=aC$1w& zSJ^OYg29*pivW8Xb=yEW4n&*7h7_tR;#W}8zjWhh)O5#I zL!b6EWtAvxts&cGXe1ha%C|1PIj}pX=NIDY>_R+u^7Z)c%O~TpXJ3ukwJrtkie>TO zs!ESeYfC3H)Ubk$2rk$xZc{sWv$XQ6edHTutLn(@k1+Y!Zpg7R>JYlPJqflHs`y)q z7&zo1x!EOrIf;D#p@Xq||L$0tpZ5G7r*_J!(50$_?**&7&i;RunGN2X^U>H`jy<)> zIM_WCAAj$Oc-tMjVq^Mrv^LKxk?MS7*)}QjQJ%9@jT#@}=`jY`hP1cMtyMVAb&05B zo)$WkwgT$j7$kk-6Ya5KC^kH-a%|G(6qlyU8w$%=cG2kn!4(kxxJ$s*UX zKL*QX{@Jbxr`@}a2=|oJ8BQvAt9*)>5M%iukh%|R+WJ?Di`S! zzAnMzUg$^Pi}a+UuLM5+B;dy8Q(a&9l^=al#^8f=oJDdmE94TsLd~6NCE)0i4KbaK zV#P6HA}-%#Wd&%DGJ@y`DkfE(u?Wt z=ooPXGXt|sJoGf0!LEM31u{Lu?sMTGq1)ZljDf+?7#`UbW4rgpz|gS2;rs>%Ust$v z6qn2Hhw7d#kCe*i#lJsw{hx{&um{LLvF$u$>bk^3@5l|&_{F^)c&>o&q z8K5tpM`+Wr6#saE#9AJCOGk8iYBnxjx*pfBkH^f+RPfDV3tL_8s7Wxu({yk-8m3Df zV5*y`@xWW(61(>8kEO*`Inms0R`aB&(Fr0 z*I$cQUVh#aWqiMm`sOr*g}M1Sf8nfg9#VY{^f%(5I<}+gkW-$QPo9d$pFbTd5|9JU zrWFpnM-2qR_-98?I&Ic9RzA+*eYG&Jd6ldN&=@0xNc%VDFVgA z8BgE{hn?kjxp0D^fP4mag=a}`N1Wm;P?c}>dXsbdiY~SSp%qzq#G{phWWyric z6Y}DNZQj#+^_aZ8{k~lbcz-5XuySJ96Xa5BB{B1p-&yKYS{y%Ui*#usD1ic{sEuF6*~je+t` zUGQ7EPQKFX$Xm&?n%Z4N0oq9?|n35E=^xUDQ0 zK+O&kB!kAq5EBmINWJL3ZW$zbCjCJfIOT>oe1lXGR6gtsN?G~K5Jl&tPSHE{qI9KB zbLXC7Z!Wmpz-IAce`6qK)cN2uGP`&0wq8OX@@W?Eq8dCvCc_?B%S&`Fq;FIn7MHb! z7xl?|u@At5P6Wm~_2S7{-Jt_8`=p0pm+lqmR+YbBc=k)*-MM#993NIEf)&?uv+4*T z>!tfdgC>T&Gwfw=p=x5TJAw6DGLVm$Ng^Xj0g zIuvI}JU`esr~w$aXy$`)zrQxG{M8U0FJHVMF|`(tJ@RNg{jJC2 zt#5lEPM$m&fB4(~UJ0rJKe^%7po+-=K~LOvrPqt>IsqE0C+xK$7C6|gm*iXXi4RovU={S`Kw@p9V26{vK zrIXh-_BqgBV91|(t6<=;vuRVE9dtYL_0x|0cb0#rot<@6_@Tn@miokj-_CYb9QhZy zQt~hHs85~?Uv(V(dsCbp_NVKXa)QG){ySYa`pxN+|GUKhL?^b(sRwR^B!toq5+5AT z)ERob)~)u3jF2B!;UoA0o3{8nO~;jb;9lrgT%j-SdzE3^ui4j%fx%r~jMdu{H#Z|(Ws9iHECOhXE=+JUhQP7?n=+ozXDwxdUO|7)uWrN~wvF@v znu!(-4JjRxjY-R+2lmJ4=%5!B`16+VrRtLte_BkS>m712@QRIr?qvZd<9;~ThwjMS zVz;q?sNP+VW}5GA2KGpdML*To+9@)K~C=&q^+KsVrqv58?naP!u!|n`!86n8&ck zU`QGs1XQFl5bLzu33KAohKN(i9_8uH zg+AD|YWhM{B~X@>S!N@{$;L;9cj6{Hkx3{%Z-E@^ZMH;H_2UG4Q8b{PPHy$Z#pPPO zJlhrDzP1u4rv zVr|*q_P^)Yk@!o0@yBBKNW|$^u8My}H(jf93o+W?Ya8&F{`vnBzx`|fRLD!VD?KOQ z^(v0XJJlANO*Ur9CR8Iu&gSC(tUnw)RO&CC=v1{u^kDK)RnhE#w##?Iti!&K{@sRK z?%VM?d?mN@Io&os@Dnb1tID_Hum7^!&X2w$pCn1Y%D;p6cD!^u=}|e;kBK-TgkDvW za#CW};?2%<+X3In`1FJ6eQW;GHS#8`*kN|)U)Kfj6}Pj2rt<61`4)FeePkVSPy4%Q zYr`bJEKDU8vjj+$`K#=k*?xV z^eb?7l(&@CPW9d8`IQRiTTY`-0-*zAS%T*=LtXB_354O8 z<_W|q7ke07d~HuKZf-UF`9kIkHY7t9WXt{B-}t%s#E*X}{=5I?zm8YG{>5nA``-B5 zfA7DEeYYKp$G`oO`uFGJP4_<#Z@>R766x4@8!M)y z7wVHUK8DRD*JPgT=Yki%`+CQGs#*vl%F z`UFrrrfsbb5)VRsefoRhIj22 zY1ZSu_k1Mod*{2OHM%R>y@Syl8jGpr&G>_dzaGExJAW8AW;SD$@s-_&Vs4$K>)o=o z`eIpngv}BD?19-`B{k!ad$vQL@C?aIGl1_XB*b?;nW=OW|08E~)y$i9+nH!sY;ZgC zqT>1!0KIYO$ZavYXVfPEcts|8O24fO2R4VBKN;&|ZfBzwJzERW*jSCdJ>zkpHW{CI z@9pu{+j9XxbK{&u(2UB#NgU)a`=&>iEI!*GDunHP!S(KS>&wj3<(B+$f5>IW*+OEd zQ1sTtI3&7NuuZCpCxJ^SUf8wRV2nFsgOd*=10Fb$U-lvBJBh!@z<3ChZ~)(I zpWv)?ED#}`D=GE1H(m&yVXOYu+ldct*=ZAngrS(sy|F2U_~eb9#vS{Xc1u2%1(Zu= zEO;>g3IEx4R2bzY9Wuu7=-)-xQ?;M{i8uYC5*^BcTwNdtgFsV7bEbM_BA@%F;?WTR zD8_S*G`h%$KLw@}Vx(n|n;p{_kT4_&yYK?W1yL!0iL7u=iB(L&Eku^59N#;`kwVGS z>E0#A<3U*H1>i~CoB+v)`7StBi!d|!u3t4`;~`uLcNql+ESR8Rr7HrEl{&q>O^?^I zTMW)Hae>Q!!AXv6c!iG6a)+a#l1P1>UCWc zapT5RT)cQAu3ejqE0=D>)$7;eqB^vfE?tN#S1$Xw`l+d@Se)bg`m5drhM@0N*@uUR z!#Q4-iEG;g_?DS&vs?*mNGcPVLMqi5#g#<6(>QLFlL22wFOhgpO z1z&LNG~dMa5jbv0q6@P@ii4rNDxvIe#Cc{x!cUdKyLbPo-sa}!;_B5aaryGN6(9rO z{+5dXP}l6ZZ+Kz_Tyi3-NA*P~x28r3PVf=mHG@ClpuwRb!KlZ+{rlqB?Y9TttAnlR z6w_#W1rG;)y@D|_GvjZ;&d${l ztsq`f80wvy<0z6kh>i!3B8-uYhNEZU#`?X|nYNz{54T?%G>S(Y@In|NBdT)cm4*v4 z23!-DveG=`fLnQXd;uuWWd|+kEiqJhid=S^`jUU=x%e9fcoTp70#_!)7>M){x=uq_ z-u#i2 z1~7GIm_#vrg;)6)66Mmn|IihVTg8W_)P?JXc)+;j-Qh}C!F1gbre`ZW*L9Y^Q}8_O} zpQth_-NAq&@`i~>V1qyP?ww8Wk>68qdndExT&YLL@N?DCLdMD$UZNZ}_v8m{v;Gl( zd3phiLtJEh@{?dFe)3JyhiKf{4P_^P2FTWA05U+`qqOV{NB?*SlN@-Fkr%5mfFM52 zMFi-Il-)odR{5!K;l3e#ODWZQgcI_DJ5IRC z9@)g$g#IcB_DmX$X(vHyLq5Wx&3>Q{nW@GtLqn~&_xPcB-@D%#?|tvP;@H7sar)$|@%3*!5|?Fj zwdy^wYp@l&hlkYm`#t#H5`9=i;C8}9zwl-^Aoc^3G?KDDQWd+trMh6?z9xTKD9n*u zAv)-p74`7)hT<=AB&>+g6rBggcEzrJhoasZjO*i5ad~1omSr#RIe1%i_w~l%W5?py zok!#LyKeU+i5IMj4zxqJb>x}&m>sfir{a??7BGvQ)>qeM&rZjjY`WE@g?Q$vXXBd> zKOA@8bw^xy<@NZ(-~Wu|4EAil@@L24qU_GS!+YZP+fUF~;`He=ar)}(v9Pv8_kfbc znkY>_z&x+A@jYsGX)co=$HIz6Ozya!q;d*1yx=tr!n3HTyCX_TGQtkan!oJkDAl|Ny? zDsla+NVINWRX9#0up=Ix3|4?|$;&%{6#xya4|ZJ0GGNhtbzI)vu}<9V=V$z>XYx^8 z=f|CNf=)VM3KveGabCy(;R4Obq~Jqh_v1Rx;#lA!r?9W8eq6#LUUkXOm*SWP27p60 zK?E+zQ=SOZEAik@^O@QSgOogbQr78p={*7Cm2|_8vSk$bg0fO3qUq1cv&@`2LBWvi z-a#hVStmR3(l7E{0dKqr(`EaJ=hWTgl&-o1`zDugQEc zeS>W&{opYg>#IXcwOqCJm8G_9$a(dpXQihWWM@hyN>@_Plo?#mh3r~KhWBDO z$(BL4TPK1yv=)Bkm-NIh9rX+hGo75vB0TUX9sIwtvK)W%@Rx#b#J}?`Z;sZ$a10L& z#naz;Qg+!$^!B&n(Eh#F|9pzR#QHpxXxg&(u(6%sx6N} ztjm6AudjOHM1QkC$2k<4flE(VQmZN&bvD1a8aKu#V{u;gq->2wOZKYjsVyAggOsT` z;EBDcAnS5$X-@UDT_IZ6d$Medt5Az9Cg9GbE@LArsyB{MUt%$WEBNZ>ard1kyfI=#_7tZ$@p(;79{R@DzZKV~W`b_+sOqmJ{u-0r zH7dQ{uW}5_mh10rMnhqJ!l_3zsxUIoTOz0_!N;^L=<1dwK0wbA;zhW`!(T9=3w)7s zF?lKnm(^~d74ei7`wH6fZaHRVg7>&3Pi7de@R%M5H-3^Y_*H!C9ejGM*faB%%WU-n zzY>9P$I+WJl5?b#7vxj$*v5x&^3^So(N7hhcfqC|XZy&uhG`ESEKgKM!&Owi4a_24 zc23e?K@M^1XT7DEia;N-wy_*ti!(9#`pKBR@LDWRT#0V+LQ_=ktF>cHeAU=mivd+? zzr+Nez-o2Nu2!BsUF*>;S;ukM@NR=k)yQ}-@WBiD1Fs^9@bkiMy)$Ol-|UN?dLzgB zgnvUkwy@a~a~rj|w%QfXox2fB-2>_`jKr=x?~VHC?znX6a%>@6l->)8fK8d9k>6#% zO8*S@HO;pN_w0$E{IQS6@L(h6rX>TWZzyIvuAaLT2lwsvIOL1Zd^i5Z|NIY?R`Q^C zNNlCPS)xDOMGd+x|hpw{k-Gux~BKWyI|5LW&i5cS1!V=UOE>#koYRN+b)x2_(KRB zvIpEum>m~x$2)amCI%j_l%t9x?=0O8mxR`}4c9o_>b_^4!xOyU43%&gdB;r#!_$7FP%Sq{QN4-#LY^EuHHz7sscl;^Ig^7xqHlyf1P*Pz+7$enPr=HeNdUVtnueAB=ssorqJX zUXLr6uf_1lnCz4m*jTTFNB_V;T)%N6u3tPaLm(F#GS(E5fyhVf5Pup}`jW+0p4@aX zA%bqAn+sd~e5>x2a~ZsNJ=-&%4N^foCgR=O?X_5*osKQpWgO|)r}P7|afVxx!Mpq8 zlOK904(uO|@mHUX#p~x|b7?9zmuF&qPJQ<|JuBVj(qgn17Gr&WAy#H(f5^UETUd&f zxy4wX=e`ha*(`P8!e?FjWYhX&p*p9&Y{h`CVHJ6>FJhz>e&#;f-yI`;T`|@lv1ha% zNA}f)Q!@^X)nflxw=Uj?G&xP1Ohj4(f>cnA007AKD05uf|wAIdhZMgQ<{EUs-y&-F)ds}&oZguNoYVY=>= z|K-9TLAjIeJnJIZu6mICLZ1@mKiY@hQW~jliC5}VVN&!lJUkfnzE-TQseYt`bB;)L zsyOgAa>3*Cv>)ME+gcZ$*Q39-5<{^Rcic7>2X?hoPm{9QrUip~E+-+XjnW>CyDsq) zFq6-aEqZfvl^=3Q{SCKyUcq1WX&ugd2zU^O^11CQlFM40^gVeBciI}@XSxj>d_Ncw$gOmY z>p};o2PZDlBY(h6&sCiv^AwQ%3B~j2Y zAc_zo6PFkPnsMEstkD4^qa?PX8dnQ0!C>``8@Ukh(I=`s(bk3J%5P9HDbVH4h9_zD zTsqDeqfm~)I^dW{CS*k*DUf2&P@#Ad#SQ}lLRFU$tI9`Q-x*nhb9tvO7~p~L#q11h zkvBqqaBxh9o0{%cP6m)WIMM)3Y~_G+5up{AiBa-X85~GQ0)LfD`En!>_%aZPK?sdD zlsCtaU>xzisJ#ad$31U-pE{s}&c_|+<>i=@a9fyH=Sv-@g~gSak)WHrF&URGT!~j+ zc{N^q;iY)$sqe(&k3SJ#`^GopOJDd>{QmF%e*E@t{m1ys@BCK$?(hC~eD-%g6TkQS zzZakT+-Kv9U;I)$`mM*~x#wSs*@b0KP%tUp-#-vTV?!}Ix-0ex7v6_QN1`c#wWT`p zXB%X2_j9RJY^qQwHTq?Bj8POcF4&9CJV)+w+Q52yMKsPGZ1{7k0uxO%3I=W|E1g!1 zdW?OP9*vy=?MoN0#JLL>yhEQ3B|J&T1Rg};`@8!<5bfbNH%R3}SdAlH97Yug&x+q# zQg}ndBXRfL_r(6a`(kBv-T8w{quwtVsxKKF>r#llwPsus4yR7P;QFKULT8dq$rlY>w|q9R`Q4Ul%eFUyYnPXl|Ql*S8yZ@Q1-0k z7>~}wKMpU>JmkABf(<|qzz{s%2uK{NI}xjRyc>5zP#qaYd3I_CIUar$za3#xdoCCwbuqoh$qb zJXT4Pg)Z_-2Ykw~-R_8L*bd8QJeh`ZJ2X6=g_FwSaA3gaSS&6>rpQi`vIMp+>oL(O zVM=GKV?x<5FbHR{z{0|UKPmITefRqK&V~6|$;M5|@!4pvb;VtGo-mBrnJLM=RZoaw z*dIB1#PXBLP2?l8nBC6|g0O;?KMyD?YitG^L~_NDA^@4~KAYq54(`OFTntFcU;4ux zLd9c3)P|j@FlmK?wZ70o~9iBNyCQ@P)C)(Q9>G;J?Dr>dLdx z9FWo8iq=5CY=qGm7?iFY9*Yrm0QiPHyP7ErC;BX?12Q!|6%*3Y=gyvwXP$pHo_^-L z@!jX2i)X&`Y&`zNx8s|SaXlALfA?g(`ufSZ|Bj=vPr9R9b_d7TZ;7^P606;cmwRdh z)@RsEFcD?-q1jNDUy(K*lsGcE?5ae8oDrPyg_HwZ5&3` z9i9>#sE;GWw5@zxZ@{3Q;3I;nqlZm<;n$@OR?1h&Px4e`;sLd*o5_3G`z zKGjHBFxxnSttQ^g0acZ~S7q#N`n&xsuwatFyKtn#sk=qhBZ~o8J>3*dS(!aEug-_y zH3wSWan8wB>^R#{nzry`5hl9=8m<0l$2fw`IKm{+^v(@X5;hE~1Z^{DqKU0=gHhg!{M zG~qA4$Ez~c>%IPdeM4p7v@X(MJ7E(pudKu@JFYyLvS=P=K>+o^X;FjyL!P9+eCb*| z_Sn<$wXZ!IPe1o+T)H+MOG?Lgi|@Gec)a7mcg6kpy+u{F9?w4gLVW96-|`~S-rkKE z>hJSJhuf^mOLw-(l6Tb%ZO8)&;u#+`CZ4AwrCPCMwN4qZ zGnfot7c4Anj=Gy$Sc^W{WR1a*IB?`>438a%iK)f7a%09PFl_b6<{TM`(F6P9#9b%i z?)%;pgX*C654C)X2lk&oOU9q*zyd1xnZ*e`J?e9a4>{ts-n%7Ra?v*FlTSPrzw;Zv z79aWO2W)%(+OPhK@LUjQf~)vbf0X0!ZO3BouKjW4;^lbh{Hx+GJ_><@iv^b_@u@#w zCcbKmChPhu6P(23zg)CGd_BzK=NxchB9w_+w@*b>Ym{#V-#IC8UG)hF#p{W-$`41eM;Y)MAxN$+<(Q8(G&DKEc7Ag7qq%c(l;6ppvgwX} zKz-Xbs$ds8NklL$sAKN*R|$t!YCG9)ui}teMJ6qMmxQKY-oa0m=5H5LF1_nle#FtU z{#{kbZR3^U9oHd<$_M~OCQ8;X7Y=`5LDO`E6UF%mIe5G9Zt|;tb>%&C%{zEtKXhmT zTo}*JKE9iOt2!wTz9o;$oIBy<0Z-~Cmu2LAXIgNtA~>viCvWN!a1N$i1y=HT@y|#V zOx?3R?;}^Il6K{k=j*9lhKOHZ+0Ay!~JKpuozps42E}{R-Wb(?= zs{7xoTq~SP#AyI(^NNR_VGxV_ds%pfPLdAJ#r^0J@d=wWoLSlj%E2^+J7^O)bOGZp zjOT2&SL20Kuf^ibQheZ{x5aJ8Pk15MS0Dc47?NGrlwEiD$R5iuP6L{lnu#vq+Uy?@ zK71-`*(WA7WPc0{v6!~$9sSee6aF+*e@k|n^w1qA?u;HW@hdN!jM>?_m|LCk!p}NK z>|e?oHFd5$yDM$lmjSKeOq%AzsZpDY$`^vL#*AEz&3o66{x4&911Xdrs5tHf&h zF(rE5klnekyx_$HN-Db^+1;xyh+UD3;@M2Rqdyye0*GN}j|1|5^K}|0} z#HRPIZOLnJ^MYoj$5t5}7>wb;;TRnnja{;d_w3#syGHaphTotU4)Ynw#hHbeo}7$V zUwu6ufBdm{=JgA41G`i8D=W^s{)bh*kyayy8vQXWyQr^Q_K;*~H>Wr$yhb5)=P#Lx zS3OfG9M2BpQT#32Xw z9=lg%_im|=GIuSy7N(-Pz8FoltKJPhx25(7-wB2OMwfVAbt}Yrw`2#3rd@4vQGX1Z z3>hez*VUg>cGcuRwAF*sZ-~d&b@i%DY{bF78xXHif7rkqlFMvTT2SA9YPBcMO|QfY zlQXf=7>%vI(YWm`Z;KW6nI>g}M^{s}s`!BZoM=uP!XD(i|I{xO7#?hz=RWd*cgMRQ zJnAvm3umv!)Rg*hublQLJny*sSll>&G5*Ov_;1w*IH$C-jGJ}a%E$&z>Z;YLOBDfK z3{R;YrmXx`xNy3YD`h1tOix*!F-jSg}J%iHzH1>f4kaNHb3WBl??oP1At zj(=i-EAPcG@0maR;6m`uUHJXkTy{I&RT%-iiFfuz5|QD2l}XC8Vb zkZ~m7PzJE6@-!6Py(mm?NmuDg<>SQ#&P!<&pK??QD2<(MSMI~cXXIfDOBG0Z3x!J>Oyo$H($GcJ>LJ|JL1socgK^D zd^1)~y%77~{LXmyJKrC#o_jrBd-*lV&3cTD?vk96F69Ii;X5=q7^_k#FP=RWU9!74 z#k;%JFH)ihKoa< zGCLKk6B82jE74n1e#)TP)s7L_D|a0jh!4Hv&gfg1h?7r08dH}~$L8`}tj{u4hzy+s#_Kf^h=fxG`eH7Rf_>d;9`=yA=%z1nbfED*iw6~tuIH<#+vk+()4!6 z7-{-g%-9=)Do<-`EoN_Aim8jQ%g)+}+wZy83rlw&I2dOxT#46ScsY7xe>Mh&qpRMu z-sQwZ8Jp6_Eaq&O^d`IVhTFQO7hrAX<}7AiA#&6Mm4ys;9Jo|ACIEP@>Ti=ip#dUi zxX3`4Y(wmvA=z)evbQ-kld(KEb4BO$6{oD)LfQ|~#mb|ua`tyE#bDR67XTa_6~$M^ zrR%3eb50?UzNOtbE0qB_$R_3O#lWg7Jkc9&j=~kYzzdS~=Q1c3?bG%P7j2X>$O?n!BR)pG1 z&i9Z%d{0o~tuiQa$78R+-P0XEVVSLx}1Ffh&_EvrZCG9uZ{SXYZG zAq2U!N$gUQ<-&RA1H=>39F?p84nbkNG|qsaa2cf=Z6PYA?;nt$lprKdrqL4_BDf4X z;X_{4N+qP8Zj|(Eyky& z;@Y+Aab^5^y!7Hr@%caeQhecyUy3h&<>C0^7rq#u{e#cNpM3F8;#=xSefQaCy{;l}TXFd~O z_`(cT3t7AKB<;CPZJIi2nj;0$P9&!hqIC0CyE1$M#G~hvP>Vk$3 zC=^>8MJiDZWo6+Al7<>2ai6h64`AD&unRyRL(=IqN z4*2j7N5)X~moJ`=%U8}ttI<@)W7TylZ*>qBd_n=Wtr}JbVjy;n4tpiN^j@4geIZ`G zKF64<6k*deW0fE541I9sWj}2@>Ww2bvmALLpv~JV(l(!S&v#t%d=*AxHDtY$28;=F z8h&B^D$JK#g2HaS2qP_q%Z~UZ%vaT}fJYpM-FhXJc-F7}l*UiG8*i?XzPf-%EW_7@ zP%CZt`(C8gU1^<@2Q7rK@+m@eJ6`on@haR4c01#cF2BKpU*%6chkq}8GF@I|N&Fnt z;@xzlW_I5Fth75X*N^Tf6&qT_F#tWg5_EBQ0jhVUtjdxwOPm})$SC@e7cZT6KXWO+ z0?TQu@@=3Kw!>U+-1X|gXt6W2AWeyv7rd*x`3uEisB(l+O2IL{`R-6hz>l)vU!`^Z zL&xp#q`%%M5BV@*;0SmW$2$X{3a{W*ndIF%LI9yaUcaz9X#lAIoASp9#!$ykMyBKq zSJGNsqdOh_^h1VKcg6Kp(JxO(p&Us+#_ujpGV$l+-}?-;J+)@FiMeh?n zA9~>aIJ$pt)KpheLb{|!71pb=Q6IH#R%xpcDlp|>U@UiIE$QL}0`?#af60klXp#Kp z30FCutw$6Gnu&Z6*rJ1pT=RtP$a*Fq-~}%n5iFt-*+#aUSN#f>b(8b5H)gowQeTAe zu3+cIk^QzSrQ3vp>x>S!VE|V!k+EKYN}182;wL`BlV_hBMS=k~B zY9~v=cV6}g&&#qGR@9ajmsewzwxzu5>VypR4Md+h-nJ10e`azfwnTq+tL+&WQD?BG zPOLgl!m|N!*V?hNxEOP)`vuuk(_G_|F)14jJB^bArY0t1dTP?AGI1>K%940)MYfUP zF3`bMJ`66d>%O$8vd%3l-jZ#yIvtR@Xr%T5Z(qK6%}4it`>|)@%$f7Cz+^?U5yK<( zIC1n~yz_1M#eMg^HTLe_AJ;Bkj<0|9k$C?3mt#USV-Z20I<0-J0S_Yg3V-;A4wYJk zZ8aAC*dA6LvZIlK3>FU2%IcyC>6*RjEUV+#5^ec5uk9Jpo(TtJ+G=~veFv5d@C|-! z?H<|OP0@B}_wLwp@JK8yw&VKLY}}ZglWja2U4+XAYof6@ z%rZdSlYFk;YR!F%9)D+eeSJwVr*&VAWy!7=o_{`m|2Ka(KK9WMs#87@zws--qBbyV zJ_e?0H~N+Ct~>5Bjh}t-<(ORNY+1rBLd1czEjp^SOYyS;a#uF8m{_z1WRRZS;Hn!u zPp}_(&KKHjwi@2Ie?8zOs5o{6``z{caTSO5RsG@1j|bCrArB3HGi4>Rme}cZ*Sqtz zyub$t^0&<=hydWdlDjTaWjf8pkAOY}sCvVdEnSKN}(sQpC5c*0#>ut)(OUEUF{hKJ+F#wwVWS%n* zzk@F{6amv;bU;4)S$F7ef~DNQ+2H|_s#|DcI?3^Frpn8F;erz%KG<0Ukiu8Y22@>NC4>TU;)-_w-{#qrK1dcz>MBm*8gvc%!e zYql3I>?&xH7j_l#xzY}+Y_m7KV|(ddO;BbY<(}czxxmz4fzOeedQhCy>01oYVSAE> zluA?Lx^Ja-pYCJ(Da$StZPVaoZ{lK$nP%jLp96qs8%z-@Jmdw>fF_|`*r^aH%R&6S zb~?(zjr{dY9h-0TY*|Td!g0s~{P0n3rV(~(PW~4g+6yxEjNV}Y-3#xfZfaAaH}+dUPfPE!P_WTL$FYe-Jp|!sd&qcn#TX0L1o!gU zGch-P)klJJQk3HeCr%JVCqnEsd?IFLx2~;CGD4$zcYqQX_D>%BJjTb9SE+`(SHbQGo zA)vkGm|ZSlsLO6Pj_{l7PZzLB$0xL?Gu73~!cxpH&&HL@SN!?LQ?H$llP6z}bFw>T zq_g`}yixJUo<>u)lk6(lb-Ra$edr_aWtU;b_3JrWK2A{(pXm2TPSE9MP!F`v!F4$RX}#V6RnE|`8OihgLjlo1*1 zXS*zCdCdt-(M0{vqW63^QOY~vJePOu1pG*gpXg{<8Rw0TxYZ-nH{<9gB&?To<+wuP zT&@!5=BrW|E`xNJUSW0hijvJM9B8ui8Zg$7Uj!EX$~*Oi{gQUhHvgA+dF?Fw_r9zv zX)ki7u5kyy>e+RY;oD_Tn(3*EpZ1%5c;(q`xbm!=b)+41*cm2xfCPxIicZ`}Q-I4M zNBma77Y4#V@$g+~I(PhTy3!ArSPSMa3C=AxXc8{K`pswG$w+7r2fvCJk?VxxY%{O{ z$!V+0>4}T{cQidKuYAnAeoPKuq8~Dnil?juzz`*Eb)_Jy-l;dyfIG6^_K89~-spUH z;A?!g|4dd&!Gjr(wA}L&XDc0fFxJ7?1O9TR8}EALa$QkzDgW(nKkxxR5>am2F@fk7 zVL&2F|Y$TrYbj__AM z_{To|)A5CeACC2@D{*6CGv4{o`(odLJ7aQU8dT-Fg5&=!1C2W&{M8-MG4$_Y(y3Eebuz2>XEA&xHFdx~$C$F86p%8!ZrwfR}e zO348E*S5$fVo=G(>T7Xuv?Uv1HO4QWjhbX^O_jO4z+wf_eodelW~{P7hcj$O8-t#h zo79#0KoK_h1hQ;u#&01npA6WPoo8E;bc)CwE7kLzd_qa@;KQfN8+G)iqIuqG&8Hag z&bTWlO4U?fEZOf?f?hpsuCFOfIH)B<}v9Xjw_4ctLS$qY52~kK3G2w%X743XS&e>C86uye% zPuTM6cj`{qi!NXd#WGMo*78`V#S^5{kr^`wrOn3Pwqol*nVi!8A)EgUAjF8^s698lC^bLtgyJB7G zXy6G4CMUJha)(3619<%Sfq41o70FbV=)p?uwgeb-W1@KjrK9{4-EFjo;|x`cxXh5U?4_@M_ey_^O)oK*y%Zb<9eJs zcQ#JG^g=xK)D!W=FMToo;1B;GKL4NoI3E4xH{;PqzZnY)i*a!Oo;Y;ifLEV1C9vk^ z=VaWkh=xqGS342Kv~=iwtc@^1h`LTh7CO)?>in{}fN+?$>2J`8ehQzNo{QJdoQew< zE~t;t4vze1MLP7YsRICmGl*DM$B~9fy#pIVF(*H$hpZcSPL(O|JrZUd>&vlWM~@zh zJ^S|CpQCkvhY*6E-YJU!=7<_~Vo#rbRYHHx?Zyq#!lWyvW~M!0K!zNn$M<%3jg9zt z9UAa6&%GEEOHv3b69OCu56~_Q1A3TQ=1+vnwt#^zpks07xeiZnw}s-eedt13 z;~?&iEWj^$m^%#t06QZ9Bj1a#%@@*?xRsxrqgGd@+IEC7h~;OJbgmL}8_pY@XrVG> z45+>xx0C12%n`CY@@y}7j!S&z%pKTqrtQw`T{avwMQrG(@>fTAb=;14#U;IX7Y=$( z2sr94>)WxZ6CQl+e7@x>RgRpl_@_X;Isfup9I;58VaB^tXT_E0oqme#xI29@Xm#_C z9JKU51bBX2DIK&-TePeC;^AOSap=$A+OB@yie*`57K*st9N~a5V37SENpQFowoBo->W< z;rHTzO}V+q1YKnBf=2q|C0>_Szp}C+eO-?e42()%UcYwTI(>0LGF!&`-EY2AW!Q=< zS1&1hc8{+~r)~I?D!^ZpKCy9#yrh235WE^Rb6^7;(a>_l6^@%~ ze>{5E61o#$WPzQbygX4!Ql5KztP64gkAW=7^?J>?(78fSG4ap97N@~LJ9H799lndn zu~+D3;0tbtBPnx$8lGq zKi+=Nk+}QlQR$J~EyXt%Q)kK^Q+d0)`;{j7fNE#)-0JF*@y@|l(NTsqgXMhkfdw7V z14o^*>!-96a%EwZjbx`wUCoYZ{QT(#;RR1vp6ZWuJb6&i6$!2LozkeQ>iW$c|KLrv zRkBCaF%PIrUgSp`Qa#b`(M!}P^xG2NWul)201U>~nz+hmCzie%c2uH#C_OZDpQv_gZ1^*=v`Whei`luheu;j z9nHOc>fAYeUG|3funw~#UsM;AhPEjzJ8@NLv{_6sJRJLPI~aGr>CQNC_+X4EF1r(#R+j}^`CISjWqpEN zP@YU8SmxH&Vtq|*W^Fmno_{TV`Ct8VeB{ILAt?UCZ~i;k(-SHT1%l7F;*P_&$FAX_ zc=fec;{1(kaInh5@Bli4a^Qo@x8hV3y>?m%6abs*4t}%)J%Q^@f4m!W{gJZ#q4)7Y zmMd?P1D$gESwE%B`lD@8f7$L5JLH825bU8i6k7a8yGKuW;kKSFbE`XbMLhV7_tdF8 zCp<1tqVER`{JH3B;YSlU5aL&SlPHlt04f~8MSi)kK_#f}<}>tx2L#OGA;B+2=Nk*V^iu3SJbRK;!xLlQeCO1jGjC} zB=RxWRb|X{2{&;8hErudnja{GFe8pHo~w7mqu$s&037%(>o70D5KX<{NYC!)a`8-< zVUU0}nwvM|n}5b9FPffsRx(jx-C>lBd5E=6t^6cj(x$s*k0J1IpDAO z+&wBO7@|2 zSa3*=KGmloUDSiTV?bST1OmNwZDKM`zH&D1yyHZC-~%6sfr0+GF*y;>z5H5SICDN8 zcn$u63yZn__vv;q^Z0Q|eJMk-1E2GN8}+4Z4dX#j&fmWFs)P6fD4iu8kU}0ti#-Z0d4-gA))}oemomJm+M$a2n!8*~S;npO16r&%}$* zy%aCL_)@(7`q@~PJ+>yAUzu4Hj;d>wVYnr`qo2=*^~J8CK|R-HcT~E7Hn%Rmr50W1 zIp)KJH05Iv1Gv(PJ^0U^`qr~2^NA;3+!Rl(i+)}ZAYP(8ECSe)KYs8BClEMXY0O8& zSH3VxO=U;!sj14Q7CaDAeA}j^SD5*d%0XF@k_M&AafKJ`kIiFeKZ8i;_Mtps$|`PY zle`-?Pn`rf$|yhNrtk$AH1ot1FdRo|tKaPJz;7up3`zA}+PB-T%Hz`@R7Ms~Z!rm= zGWkRbF=~%Df^7w#!suCZ67-g%u;qy{z5lZn-FGU3-%@HSH+i$%L1zQg2&8ySKL_HiZkO2 zab`(8(;ADeu>*1B?l;H891A~}lTOfx{DHKomI8mPVp#xClO4NvWH^5CgYSvkj~(`+ znepq_)J_(oS4h0~-48@dd^kCNIsVE2@ZZJy(oD4aM+9;;nvF)x&QE(BmI*Xy#z`IQ zdS!PnV}tGs>5no|m*_n78t-7l#dEprOMO=Ha{)>QY|C$4xfg%eZsig#&6(Yze#TpL zp#M&Nh=IwO#MR~CDvS_Sfc!lUu4iKVyXwSo84^zC(mP23crvUA~Ij=undI5Zc> z@g4u|3*85P)VOgo(gw91uahTncfP;zvW?>t2z1wTQi77mISw#XE<7j`?+zz^d4~>) zP{sA4Jnqnl`f)J#QhxbEx6Y+DWl~o3lsw6CO~Ul-ym%m;qX0q=!j{b=g!63LgGY4( z2V|b#6$ZX4h~KM{a#4@OCtc@~NAipuc{@U0<~i{U=vu_6_v0X7DNWK+;U+M6Ine2B zvl)yt>BLVwtNa{6$s8r)P?n@aMGD~P&l7DN!D8CTCQ?$yn;aEFk|%(G0Zc?*TvUJV0^RLHW{i&ah;oZaW!i%qX0p7lS2cp&Lj~>}e^v{u1 zP1!B4z4~&jk55MT(2%f@oIw`~2edC^WZ=ii)zqJP%QU6_%ow`RU&%B1?jPU%*H)R@ ziX)nAFt5T~k;=x#hK;qw;Nt`}*&*Gc9lB;f8T5&ULp^IT){0n=%w;G1;*8R?1(y0i z{a0)!Qn@anAM49j<+-vJu_m68iK6^u%goCjlFr!Rga9@SsC>ZTvxd0{UGM}*qFQbHPywc^5Wna<-8=jaCK3*%c^NgA0hYAS6gh>SXhYpdFdXNZEI^K z>U9=c&WS%FT7&(ud)Hwh&>suyJ@F?`JT89d7e7efHU{M18|`KFIYfuL%av&>8gb~)}} z<*6Uq9%00T$?!ve<4i-*TVV8}yzqo(&Knsa9@TrMS2+QOa~lD!;=yR7!CRih^Vqze z^Sk|OdyEb0^2vE}z(Iy&Uf68#s_uG5<^Vj)uJ)yLv|e(9xLaKtSGpeg&7XWy1b7f6 z6gwkABoT&!BEZOm-84Q{QqSzt!9b)_!ZE-~B9Lipqui-T3+>9dbq7q9#&?u*GFER8 zM)4SoXH}#J&js&jW0%(I8sDSaj7I-ZtSX;Fhfl=bBX5e?nF-~&6z4Boj$i-vUypA-{$wmJ z&Wl!bjEDU_+P>ys@O`+pDw#zUFao%6R2d3^0bOWMV_H*#<47uhyG&+2VVlYq0~(s~J~1;bg>)t^UA|y?HXD8B z)2_%JFz_SsC^tv$ak_&Cn3J&`&NDk^z?JXJ!n4w$vFE@($2oA|P`vY9@A9V(s2j=* zJ-u5-f{*X`T8Xnd?%K6@;)y2|XWG2GB0u5+`_iRLR;*0gG<5Op|NHK}!~K%E#ku(0 zAAK#ZEr&YkG9)(pjX#4rOjuc%%P}$C;hDl;7F1>~^O4KeB_7`4O4Mza^>eE;OqHu^ zlTUC6UJY9Em;iD@@xSDNEgskzcV}DrGq^2wRo&#O0(fHgKj*fEY}WCA3E#wrI2>xAp>x>C#?!NpmA5x0~Ry76AoFOR0Z)fL#d0tn%uctHCMEsNest$8my4^)ko;Q$z*RDk>?*J1ZTZ2+ z;_D8-6*}N5`gNRXlV$9z#8&-nyMo(}^p)Hb#9-CgfD)2}ROaa^S(PN72@EreJ2=^6QDCwY^&W?YOf!!P;Y;)-nWCmcF- zmfM;CO>R5S@(f861j7AAK}tCay~l@wu0^_~85AXM=HOYRX3~QYMMf)JM>-;4FV+ zIED;FmUy?LyMtXqF)1_0!COaQ7;g0`M#=!wg8>}LY3xE);7Vc(0HsJ^7{NBgN}Y7e zC=#q3NCAE|p`dte(SvyQ6OAY%I%-)4_+w8#5l_8zRw;U-k8E|Z(`Z9FgSy$2fi10} zv{gP8e9CGz6s}t}VI^MUm@%TU`ngBwVm5Px?A>Dn@mD@@fBdB%{BZP0k1meih>eBW z7(lLJ2Q?$FD8!T`KV`AiGFTfHbg{tTph z5_)YdmerBrG^ZNJjk)8Yl64_>L>ufi(K&bgXPYLRx^)?8N7!@Ne(C{v;U{ra#dL_e zyZhXh=sa|b-@0qkN3ttrEanB(&MM3+`wc+@#^ReUEHl+#B$ zldqC(h*m4Aw@qcXB0PF!#(fr2DO3W#2S;ZbbbTD=wx-E{~b7vgC z^JwhZzbAI@-zyq%3c*UOE-l93;IL>xo8(lgv^7|8%r3Wu<=I%Csi6d8lB`xxf` zlvO;7kfK#ij3L|LBeA{PiwNqJnI`3e2UHb=eQcP-7maXCRv4!AA9sBH&VZ)UxZWy1 z_rH`8c$l9>>@M%TmYm-haGdNIQz{`w-4S1P0xXW2E@kj8c)4s7!YMNUk{{0R7Kj;- zG6GXopfJ%(dGieadLhP6K2y6RJ+htlMZPrOxsy5oWm5Aw5|`m@J_DWzn62os@|;YsLv`t`k>iARUFqd@$=p3 z3bLvf#}VNY9%Yk^*pU1LM{E|CC*=qHPEsYY%QWbvd@oMD8RyI7mgx$^$?fbjcVHA6 z;Y#~u02aPhy(PYx4}NySizobYf}rx(Gx6ISOC8+uo_N5Ur4Dh%UH6KLj*HtsaLSQo zmM!5+%leLkZsdmpP7Zu2f0d{35;V`WnG5v8m3$0QLA10Ee#kp(k;(~P;dlCuN74sBKa z64#e5Xvt?YxTAM))Pw6p;i((brZldqbHfon*hj>}`4f+>6E>Vk{`E);s|cXVg{)Jc z^Sb+OZ;K!L;XfB|d+R&m{`b5;o_PLLJpamT(Hhwucinfdbp23Vz0SA2yJK)<*kcA9 zvA;Aw=g$O<4)sgl4#wPUe)gv+Ww>W-DC)AOj~v(+=f3k~eD*j0O+59@uf?qFlMzl5 zQ2s-$-e^d+abgyWJ+Ya4RhGUM$E9!BCNmwBFZzc`Z^~yHiGCiRYbd=a%%a6b6>y0Y zZdO>7Dy~(0c!t4N_aF5Kd^Xx!$rFjRc^G_XA8f#Z&xtQzmw^eb zVzc9bNj})Fy%>ms$qshLEH^fp)K(h7W^&hKYO>Af)pSD(@{}_TD*m(#t>kF-W7w>M zeq0oq^oeS?3FXk)bF6Z`6RSjOEQ66 z;fb2L!Fp9R<1<(uOH*07wq&#N`7Fp)Z_1XFo!Dq_BH32#9UhCV>t~{V<8<7$yDxfX z$D`3+ik`Ok0NYja&?xBY@F`uCH9mnr+gI6yhs#*u?0OU3b4*b34VxM~93S~8x{Dsv zGpB3LFR#ZOD|P!tQ>8n*T8|edw&I&-=i(WH&As9Y9Pb%c;uM~6Xy-le`OM^jqRP*&#xHe9+J7C#4pPHU z{<_s!R|t+b3qfb_L%68n*zRpZe}yUB_*HfU*)iS9G@!^6dL2^9y=b zDP1?Tdk6CaU3tbi)0^(rZ98ODy6yL*N4X~tbnrqpkbhNwJKfIr&a>-}cWY?joo%(f zmi$-R?6xcMlXis}JI1e*F8J@LYY`ionCQwQs|WjwelVPo<7b5qo|90XmWzpk z+z#C2XM7X~xv?%8#dCSb{w#QO%D%%Dy!d59tR#hA;L)^$5N*s$5X1fCigzdz;5;ek zc*q-gRgxE3rg!ppze5q*?VOm+w7?@=JcyjA(%bO~QQ<-P*9C7$=}aHN8ByOEy8Xi6 z{#)_KpZSCM&hszC=KPKLum9;U#}EGKPsBg@m46qPE{(@MZ+=spIB`NY%wQbYw<~T; zj>qb1JAU^!|3jR9>N`=}w>LJV6S~$#ZzeJNIN?a`R%PRJc1btk>ar@_SC&%&`q`#s zp`UzAzuG2?`_zV6bhS7eUChYOPsIAvrD&~9N6+$%y10ufb32YH%e#gmKJ@m(@%Ce* zu`+onwpM4=&#&5ycmvWSQyD@+e}E$!i8iD^+Pb{3gL;vx$u5jl$zUa}OQ}orM6E9U zj(lCyD+WiqV)vl~F*G_77iZRD?8uwr(3^iC7Ke_;t5aR^_y6ht9;c?&%dYoF zWAunWPs!L)m-^x4FVY)F%UF|Ew9qd*+uh!TH*FQ?cj~6JWfM#Nclpt`@aMktEHddG z-c~rDSW#{1A)=waimu!5ITk(5rtI9Q*ixb03}mmjMH_5SzRM3NtO@wFE*r72xfpx8 zC*nYDJbv_pcgF)qhGK2z)o84}9^LD+;`ce%5zRO464@Z+No7_TfE#iKUaHthWlj4D zT$2`(qe}133aO4fJ_8u{*U~|+9FLW42@RAG`v(UvQ{OpRuX;!R@GRmXg3yD7m*Ii# z8tpBek(tJsx{@9kDQBb&rh@^5q*slpXFXu<-^y_*A%g6_=yPTqa-TwJt%lv8Bd=a8M-v!R7q*U;mHs5C7qRA46k%VsvaQcJ1072M!(g_wUx$ zbArFl0ITvcom6Ag565e4ZLFG!SmD}`u}tTMLE^c&g=(;vy3WoeJAe4DK7-jDug^pP zj=|+ApF%LL=LMBvoB45kBV}k{H2#Nw^#6>HfAS-7`juD2C4#5)d{+xD;#e$b(qyrM zXtuf}Jk+~mWiW$&2n?zuZUEgGA6;t3M*QF(ZC zk`6f&AuN7)=%I(=$;Y3NvC4%M3}nMQ0_10&IT@EPUyiAI^?mvaJ{dccu^BO5uh97@W0_PYGAzlPsca zrK^l;T5{IyyS1Izz~@Ix*HI#po>FJBkA`5t+n&nL@7>8Q7t=d;4!!e{h? z{JoB+(!)2E?MY!KAas`+QWr{jlI;~byeki)_XMVn zd|MG7VC!3QOtUjn;@5V3;pe|9zUFJbHtzq(gK_D?*mBwuxJ92FZBiYTJWI3?_jNxPS+K2s=*r2<5m{CUnVDRVdR#X>8ee$R!T4w2__a~nY(?wh`Pfz`X;g9zCs_(W&$dzOsGo+Uk0Ah>R>4xjFI6{S!`sXyfx_^F{8>J zRNOICQ|zH~6x4Y8(Se~xY%9JQ85@&15H9BXb}TX>D;{sEj7J}PD(?U2$Kr49e;__B zeO{Iv7-E{x53QC)Td&xWfJx+KVS`0b6lje@W*i&H5LQG}lpZ6H5I z6Z*@*q4YuN=$7me4Ku=XZel9-PS3=|=$IE_fVWq2H-}?uvKd?A%L*}s;v~n0$RGU> z6kol$o%&6j)RC?sGvvua5n@vGl=jP3!hF+@m}pjYPS4KR2C>4wBi+U3oIiUeo;`gs zo_*@+xNz>-jB}(j=+ls%BR>`vaHP=?Lz*wd?p5a(Izeh~IWUV@*70sdF>boU~p{&F+!DPQ*ChS(Yv=DL#Je z^pHduDSMj96EY9X7}!iQN>z(0s+2x0h7w6PIOw`B1wzE7TRn* zE-jr_z3o_CzZ^gLV?QPPR*#q5b#MHKANkkO-dr@_f%?FRVrIqbijRl&uN7^{J-eQ& z$@&b?$0@cIW5C-U?!@Xpcra;z4B-=RB#H&rDFFVu4d@xiA?c@WGdhRd)tD%QcE`Xd z9b`0QN4O-wdkU$J)R~Z1We<+z3S6(CT!SxVVY7K_ z1$uhrQ@4JQRCsBTC`}QKPIaz4x&d`;J6N8109ZWSl6~!!3409Cne85o{MMv0;*UrN zxR1>>&TYRrcKI2Tjv|P&2tMr8F z$k%zxJvk&#KK1iKJa>Pi;#nEUcD+?1=MnI{=pvAKsWpKMx~6of0qqMY=?auO`T;pI zm{H^lc;Nx~Ds90ND0w*+y2#H2A9#T$N8eXc^thzEx?aFE>c`^$EVB|&HSwHnBEjV- z9py5pW${R_J()0E2C%an%SN}!pV!h=-6#_l#3_lF^qWz#ZnYq&!pC`3PvK*^SAc8N zDST+pCGIKdN*lx2q}>J5^EPp^dN>|Fc_Ahg2u_SQ z<&zsRs+fpzK?N3yKNT5nkL}`Vx7kjxUx_=qjy>?s7U-y+W8*kyvYm6Q zbTxK|jVtZZ1Enf|NBo0#To)Z@e6r5M&XHQw>Wcp)YnE5|&8w|!U*N%<6YUn8>X(|3 zo51B@S3uaBowksE4GXYyav|Fk#{!%qOTcyT9Na;x%2R0PCTi_;h)9Iy0)Q;9l$%Gj z7acgK|3nddgB;uDv0c&5f{Ja$kn9jZe99Y$5|CnY#a{BcpfxexV1q2jb~B2}H%~qi z&4rV3d_w$Lz8vE_YcZl&Ro-1@NAweWNJjWv>;@Z0umeC>bt+s;dN6LnTQ&A*1Q;(B ztF6nHvt-Gx5PZKMWSWWv7FRkkza|-vNJk{IC)kX#9ry@+{{FL*=s&G?!5W>IQjH5@yydF z?E%KcgWIk<7E=?oc=w^?_C)53`v_u`XOiXU~AphmB71!=-hFVI+1X(r01th)+h4% z-1nq^T%!|`Q=aR}HdXCRNAww0=4CL(TRBJ9c*gj(!rWfkJaU~l0(sO;0r6GhFPg>V z!GY=p2m$GezamX3ufLo^OP0YiTdThN*glS_qj@jmZhk;H0wvDrzt=mLj_!_?_-_xN z;v0+Kv@NNgk+vWAym#ew`4K;nzr2*-@oUa2p8BWf*p8mBtyq zo}Tr}R9-ARJn5?6=LMKfga1=;$XLQ9J-&y$;EoQIasj&OEKKXjPhvk@hj{>BNS|fo zZLI2m+gJGk^flmt?Qtnsb{tmlhV|E5Gt<;_ln;iy!)-A5vk@#-8gx zKmOZW|2*FP(NDx*z4rq#Iyn~CTyt$4Jai-u9NOdg82t5L{@FX@kALe;5!17dBTy|b z0KmV$!H&`f34j-)kbxJ~;vaRijTK#6 zI~(>(Y|X#gQjsFVKe-l#J+kHc$ycx-ltwcon|JQQ1;t_P!map7Y4#nPifgXF&Yx)5 z8Q&jAZh2*NW^RhlT-c0X`=h^#H^1jW6*3(o$8MC*B3_VxYD~sXd);$V!~~FOkgoBW z0yCll^cOzx86WEnK32e(p)`#%-QSgwWhfd;n}kkZkBr4&b4tGKcnsEuyZ~T8#JfEG zgo*nV@P`2h;)!8&b9*)F+pX9;a4`-JU5>B*{F~zTqZ84Ze>iFz&&1HSd~uu4ZKrMU zjzb7hh%=VtcBY@lPr;T(Q2{ZVpV20&UrA3q>~ToOtdM_o-}XneQOuzF&rl2v$k@J4 z^luU?>I~AX<7Qd%h*LNW9>XK)hr8B4mCM+ru1yCxkW%_r?5AzLSe^?vbyToMx?Oo? z>~6i4?x1J3!+OjZS*N4eT}NJ;F^FfL`&1d&82o$258$ZSK<(0>qNHBo;KeUG=d)ho z)dK^6^E=;ge|LN1UJKd;Cc<1Z*iB^&vf40<8`Z(-7%=pzU{wmCbUI;>NJ`|y41lzr z^jBa>aL3>Uc7y>3+Y_EuGR1j&dw+g{BGklB&nr8^HB9a;01#0LC~f8iPk;`1<`;Qw&kK5zDk)hhCH6`w*DWC4(qbTVy(PL-cXMnuPTX{>I-_Ip@lSpt9)JAN z_}=gR-gxfpe7x*sFN+f=u8D?%ZtNyow$VMKDz?x^W^5zKTTpDlkCS8KABV}CgLE8u zQ;QYg7-38wJY+zY4kVv&Akd@p;B5(&k8Snf9fO*!)wO(6OF=A3#a1BQ%}$I7A8Ba( z@Bi$d#y5Y{H^!-Fo{Av_82&z+YSPhd4A8e4_IB8wk&=XRzcCT@+N+ZL-+jzD5s8_S$ zFz>tXWpT~5Ct~69f&%#AXsvf-(3Dpi&A@AGZB;?`P%K`)91lMDk+^vFeAF83+Q1gF zYA3S#8ynkk=FD^cMl_v$w#YlUZ*Saq)A3kZT8$4s_&_}P=vfa_;2^ek8gA@f*{zxg z0*ouV^I5=8divf6*6pex^#OX=Pk#xxpvzVUo!xz0!3&h{vbkUC;QZ=z&*yh3+eWPq z+-^*DKRmzQY|s7ockw9ol%g4u0DAP8CxG%upRqr^Yo1kauwoB)aH`*~uh z@Gbno0dwjFFa;>P)QfKQgjB(m_e^JhI}|@laJ8#vRk{}d?q|@|S4M%pa!dZwZh_r0 zchgypyNs-k38}YN0r*Dceo{00OpNpCTuQO-P#kJQQj}zBi6P@)O&?IOjroe_X z@Vd3VDLc5!BL;T8l2|m_ALx;{JkmYvJE2+HE+z3a9KMzyNda4>Chkm$W?~5fUHH}G zf#}MBS3Yl(H;80=70Y-6M6m$rT#JmqLMHiwah(t3XIy$Kq;c-p z79Gfoocv-WU>+wO934PPhSr6$8GyTSW;|YT&HngT-~PH78)(Ps z>9aA|pWI|+jM$4f zprn_u6^A76yZ}Hk0dLN?<=ckY{;RGRUoxI&&6ie6!-_@Z2M3raCU9+tHgz)9+wo#OzVXijfHmPaC^{C_)?!IC)W*l+>8GEK zKl$r-#it&5A{H?rDsNmkk}uWo#@judG@G_Gtk-8{ryv8{uI!jJ+m=2H67zqlhK$MvyQ3d zc_7k}s5*(1i%x+vF}vd$bV&t^d_yjJtlpqP5{ie|#g15lg(c0p_@=z@dU~+Zas1XBWB=j(ap>rw*t36M%*@Pq;R8D$pqngEW}8(8 zM!=76BzuRlZ9c8AD2%#sY5BSM)n9&N?46p3>(#;j^Os!zm}XwfKG6Rfsf!=(XH~LM-+?3Fz~wo>b#qj0SNcQKaysEQI@S3Tg@K*Uc=3B9JEP0moW3%{do9iMEZAWzYTF}#Sc%G=wAPiJoIk}dznna|5J9E zgy{i(w(t6iX9CiTgzA3cVX%MZmo}#d;$K4EPkiX>?Zw1mtFn(2a0#^B@XUeDo?qG?D;l0yrEtOt~{wtKC#sU)6ScC*iM_J ziEq~fHa>M89GugehAgMxTz))<&a7W4g>LFZ4!E*&4)r*luBn6g2fOa;&AeCqId4D% zB@ZtXl)6h90B`&nx;PQ3fP0sqR^u#;>XF!<0Vj%VM zydS^=zPcC0XbH{Nzfe9@Qxow(QqG{CN#@`voCGoUir zk%zb9Ym&XBfl`SN87sgmII|`EIl>-m zxLjLT{qp~(FJ6o@7Z>DXn~Hf9FUhYCiATg0>n!S&OuZOZ1krY^1AgO~HqKx^HLJaX zo}gIR#K@UcFEqf9{B2Sm=Sn;*-bmZTH1u5g@#&SF_7m}eNmb~gR3`Onb@@i+8yy?= z*6{MOqCLmXYSK9-&}kFUo1aVKT1RZUfn)NJn2(8x0;$t|N1+4V0Oche`3&G(RaO`1 zFnB@&Y4U;7+mGb2KfibGsutmaA2d^*7jsI!d!Tk`b{{I%T9gyc0o zIieVXZ~YI)!lyqNhtyBGc0%#q>ctq)Z;Wasp>4)l-OgombuV?6HkobHeGYIGm{#56 z2yPh@VWV@*6uw}qa!04MFARDwueM@oT`{Hft);w=E)K?r&uql~%iD1Vhd+Ho%-#Ei zF|_wkoSR=o0oV`P{foNv6gKf)|AzV;tkFHRe=hF4`Np{Cj$7jJ-syPg6A$^@*^}bK zE!Q54y81MiFJ6eB`tcu(r#|{u;?=ZtMEzv-N4l~}`}fSnH-5uc#_PWIbul?Q7VGMt zu`|Yol+f)(d}fCQXklW=dM;XchJrFa<8eXBf&2Vy1BpG5lNeF%101nq9GQ-RXYde6 zo~HcEJ;2|;MkT9>W5*y)OS!xHVmqSy+5U80;#GDa6o5~bU$ym0Z)rQ0YoGpOyS&q| z+B5#wv6pR`o+w?ZvjErqwjc04_$74mJn<>uGAbS7+$vOoQYS4gGwA{^j?kS@@C84W zAsT$7T;->3$7kZS%lG|qtayV!b)Gye&y=I0BPacBZ}kZ=iaEl~3c4#Ca+kPZ@>pmZOfPj#B>6V0nQ}_j@ecO|OSOy2nvu z3oofg_o3J6$fYAaO@m_*C6#iLryPmd%zOQ0RiwWeV`N#fDUPqQJoJla07rf$CdgRF z=*iY4j>fHk5G>^sTz#(W&`}k;TbFKRZg7DK63vnE__?3`>G;cc zzC9)m-Vm?)qA!at|Mu^Scf9+(v9P!lvj_IY(WA#>dUC>_G-A=)pH}8vUr{#G!R-&nXkQfu~wNBa+RSy?! zUtO<$!kxH}!5?;eg3s^}4dUpZ+p5UIo=CxXOh1CO(8rM%lYt~(ev<`E@)LZnkR8%j z*1ECYVKIRECi>wv8%^ONe_7|HjC!`Q^A|7sMPIQxh8q#buHP5a2c~0nYc$q(>T&2r zuZr>41GHDoN)sg zXWIS9mVD|?JLU#2#(|;p@%3NuqPR`MT)Xt?sBNB%!S0G^S@8~*=y=kUu`6xeHV7Y~ z(x8BA>M&GxA3VXm_z!-hE8`W6ixjYx5%Ho}$JEPSpF{ZLLpRx#Lcs>(GQ6mrWoE2W zz~zr5qxoDH&vNXVxGwz@I-q1(wtlId@Rl}@p62`mx<=dT_xaF9{*srtcw>VxKIR#7 z-ik31^GEfYZ_p~5N7Q~zx8fr1d)G~Pq>i}0c(uomnl%I?xJVDoHi-bvgH#4k>%}D24V7>E($ca zq`Zo9*`k(hx<)7V#qn!z5e_4DC<0Ohq(>7=ae$U2gybQ=1(a();jx*j@b z3l3-w)uXdj-d>VY%*`E-eMgVQ>gr~E>QkSJANYYEj`zIxUD2GFjW7GkuZZI(j`@wh zg{Ap)0>XtOD@1r>lm0MUl5P6iTPRC96n9VrED)%Rmio|`^bUFDpg@PcOT=HPoKmSl zWBf9M8n?8-#%{x_R*@z&&M#c@WcIr`Wz`7t_qrj`H zi^45ebu*D)S76M225E8nOc22Ok38q2BIg&Jbw1EmoaF(u`9&T1ft_RlX0@6X8G6vEFTUvR~EiAz~R9^2~koD-t zHZRnnyrmP?pZ9rC)$g`Oddi_*&gC>bu?!d|rRzOI5A=-uy2j?%ZB*$UH1%lRt=~KV zOw%qH|H^%9V(KH$F7UCrlG{{H|_Mz0)! zI~syAxkb=HN2TnlL(t3TPo^iP;~W3(*TjJX2jcYU)6rUAmM!KRrj2;jD_<)AcsWj= zI_bq{moClwt@B%MzS(au@j05!b}QPgZZs!Hg+vC~;6TjbfK0shrwNQ(;)uOevIJPJ zbK9z+^#9J!fFaZ4Ln=?U8ef9X9~M2>R~9syo6rs0Dg`0$w(K-wV8V)tNx*MdNWtBe?Q;;pKh>yJgYH<13@wbz_&_vocDB4cn};GesiDI+*HiYft*O<{z_{}ams zo(2nl7-UALXsexV-o6;}#Q&0F)}f(hY^Xm_Q%rXAE!X>du3bs8IXN4b*4E>x(HsCh-+`WK8_qe7__l1_hI11c$ z^_(L=fN~g&M~*zpd+BfCxd$GUH1py!GC#-E5IticMslH}&gAdrR$KbZy)2XROTMe2 z|33B6Z}1!7!V92n_xJw*4~WyImACw8=iG-@4-P3$Dcg&ZeV%d!xbBC%h8I|x#MewQ zRPo4Fe)#IRy3|*805D&B z{7PEtfrUMC02(}i59_6?06HnJx^%Waer4Y_q41G1e6DigGn5tVi{5yWLrHpk^0U%^ zr`l3hrsWxaw4~X!zgP1GKxV3ke0>WY$V>c*ZeiEC)DvH48uX^fFWV72P{Dg_v+t{N z%3&OVL8U@#iLJQTrC+J`w=KKJE<l%87u+^`rBQ#Kh9~7o5T_-%W;o*2tM6T`b^SbM>#U~0sX#@^P@iTk3KTb z%YA4n3oE!sekMKaxAcs+<%u=eRmQjxYoZSxj1&FBk94L*RAbROS8XoS_E&L1(k7oG zTss@<6+cw3%A>5Iy5uB+^5uqX7w{EdHZ+pF>3XtXWp1-mrSgj&aAiWB!NZ$g^y2vT zfBZdh@8^C&G-hYzTi4>9@B2_3yZLUVZ^R>yJrRemxh@VInT@4|b}X!1jK%rOj+L&v z=4ebS2Ht3|i^tNJ%}$KgGdGeX}vZ zK>u1xB**w2eDDGS zrQT5-#^fsB^QXLG$Giwjx`Cg-m-9KChHzkFyxEj*)(;y(8=wt&F;_L#N*!Irvh2vS z-dU5p8IRI=TXm@1Mfv>8tDU&K(u(=zMeiseiP>Uc&KQZ5+e+Psqzc$w09{BdQ&$0O z7WNxJ2hbF;#9=v~D9PAPc@)}mS(pCQpiT8M|Cwy#4+r^D+sEt&ffVCbyhuaqCDp9r z|9vwv-mPK0(Nuq9({ICzfl99WMJsv)N~k3KE(?3GTe1$mpHNQHHly2QEOtG0T2SE! z@GQ&Bd!#A!n;@liIaPb{ZOX2SLbm>1FZaYup7~t*H6-e{ktK1bWvX~w@|W-xODHCh z4)K|W+3BemRr{DwHP3$V&*SdHGjWVhNG+X@P#ZwU*{zIrXb``l8yhChSZ4w2oNZZZ zo+mh@-=+yc^OK%2ep6_v3r`lr*hWfLZ0WzcE;}W<#T(_!J+*ugJRm$~Cer*9ql*pUza^Yx)V20q6K;UYPJF zdn+9gH>K8A7cSs{18R< z;as(l{EI!49v6Skb9Xm{OY$J`pb*NSU#TZ<@4kM4bAQrRwCCbn7i*wKr>k)B(e&>^ zdA18zK2Kilg4G)ubWAieKQ#6GGXJO!US)#aa{*~fx_%(e$fKFAN~(5N*^^y-tE0-z zd*Xol9(+OzNIrr9@nFTTzFz@Ot!qQF_3D#21`!`{H~5`S-=C`6a*P`XAo(yHVeFINtbMzZW;gV>UoTgB6UWK(G#EJ|X51}_y6{{h5E z%<<6APCEu4W$zd}ls~ci)pb|)iI;41+kYjIUb90K{piN1`rnNyu_njJ)>~V#zSWKP zIuop7lkjac*)K8|L^2M*;;+ROzW2W)|EU_chT_CcQ*q$>L$TN%iVKSyF>~!barh-) z7EdjY#M?jonfN!q@|zK(hoZLcI$@-ES8T)<=cKQYs^S{jn|ZJ2W_2GpJ!vXK>gNlu{OX>ei*a!HLVW!f-WInV8jaP9pNf(0=@{&Y9~*oIK>4K$(Bz$dgq!U(RqG0Q zV!6HZlCF(2zF#_KJ)f$U(PMUs;Sq8R!^C>ZLmTreo{s#YT!xc_jbbesmLF zY<1hljU7T+0MJp4PE3s-HVwpal&yNbTac93Te(%8 zytc<1On^FTEAo@4DQ!vP@Q7Xk(|Dy4P){Z2E zvAn$OK$xw{D{|3MG5yBb%i-*Ql*Co)Q4uT@5Q4E7W za2nZh-UxOYRYgz9$AmL{lG2JcBvdV46HS=2{N`3(+t*e! z6QlVWzRS;fpa%U{knEixh8gaa&Lem8RkC0bb$nFM$6|17ELPN!T@}CAB-71$J>K_` z2jU%n{r;GkkRNX}W3)LH)6-KPfE%hSZUar>q0mD?ed;G2z*Xtxnq*j$+~5g|4~T7) zTKs?)^5+EEbb5s=D{koo@tFtS@Ma-o2i}N$Cb?MwE^4ed*b{b4N*+pU(KXu%)kQgq z5$yk^Cw1MM6c0w#p+sL<2(WkWY}C{)hU#paJL=sh>N+1*{4qRSmoAGh)*tazPPG>aixsF0e8DEH zZ3w%;T5QO^u1SXnum?&rH#ZkEd-ue`(t6Yu!qCy>oH& z*x@*Mv%q@FH>?^v6vv?zKeT9L7HQ~1%Z~o)oiI2Vi{oGby?wtkL$zadSY#cdqB&MdO;^4u9eq+&-^=c2p ze3nCe^&9j$*4f)nI^(z3#h=nf?5EI4;i%izcj8{=4Nw)@6VLGpBRd6?}QD13)~MgEk5{#h4vWI;K#7j_qLe4&i?>3xv6RC2al z@FTik70{a~5@fohx11+!iI<-lx!p855nnL660=ON`bnaruW5fSyV6nRu?^Qbc8+tu zp@m;lI`0}oKHcFtyNTfM5JBcodJDd3SMwPQs|uiZUI?dip4I7-D&{S3lDP)TNJUgR zij$Sj%?A+w;92fuL_L+w6TYU8Kk(QWSr%=y=m^D-5BV84Vi)S-jZpao(!0G$9efT? zs>+>T_pmZu#RqZ`&f7m$)JsDF6%^Z37CP{w47Wj1Oj7C~ouBI&<@s8;lyl=L=*k~4 zj6qvqVwnX9ycGq{7^5H^`s{H7v4OFJb;U6o^6g8CL0g@5`JQ%MxOg$nojVsBtLuta z*PM^CkSj4`y~dXH4e!YKcfS6c%<=Ykc5CAB&HE`k8q3mwiR7wmRP0b!uih zhDS#obH&a^EG_VD(@xxW%gu@(8R*=ON%<9Gy}_+ktSp?3!3{+B$vZ`XJT5odz)8wHc*U)4;y77UU~(d>?|RsN{;d;OzO+O=U9+<>$aXdx0FR6 zOmzBfw@RkG@MD`NA8mV}XFS4Z)2^7%Cr)Gn6W=zh_>X$n{zX)#g&~9D4{?*YWm^XR=-kK%FPSKD73Z=8 z5jgXyAYP_n+>G(A9md_nQ@O0z)2zpVnLRN%)`$tkX~fPWqJzZ??94^Era?H#QmAi% z4U~-3fLXAy3p%%bQbvMlfn`&GXJQ29r9CIj?sM+@NE+zUW4B%IRnM^aAOo+V4DhtHFM=_?E17Q()fAJfo?1P2)DCh1FrRJUTlW*ukjPkY`^5YHG+uS>{ah5r_^3;&-sD7bdc?Vp#_pGTSgSc)u(gwkW&)>G_CvKiiIHlx`^|V(`$dar_lu7U$MBW4(*c>!-5$ z<}>{hU|Vs^oZ9g8L^JNV?fUrq&$%zw<}b$D!iw}@BUa{@W4~y=>#m#Pu}^#|zVCnk zw&={C6vCT|2bFJnIz}5~(UdMU8?|`JUAM=rH(uwR*)Gb?u&|m1n_eiR7(D$I`d$s< zOU8JG+&wM(Pem>|vCewDfODnbzD^}edKI+cs((hF$_HDAl+Q7i(wCphb#FxXl^2;X z{sIm_Ui(aKQDvwsFMt#Ou`T4@VFx40O%L5R{bHMsBug7_-IKzl4)&Lse!%BC5{H!Z zPq{W!{tu|=Fz+hh9Q+&`>sq0##AwQwkS+g5;kwi^8^ zeWT%%mhmQ)$BP%4N99*}ku&o-nOFR!z34tiXT=t&Qcna5&z>FePierlDl;MFov~=% zLl(?2l(@e@!J!{YTnaWNu5~Qx>q@Wjg*rF2ewNqe$34ei;tO??$3N)PbL!~P!F?UM z?m^*yDUZ0ToC{1(@4716$@4i!Z#BZz*5cEjcr?D_AAd*8FU-ex{9kW~Z~s5PGk);j z{aRd}Uy3{Kye%firea)a_sq_CobLRk`S`8h`OR26b1?>{rrl=QF(uUB9hNU(fji?O z)bALQ_}KChL4CTRGwWrc-~1JR)_=yHbbqk590Tp;*p@xrIQ?`Cx0d8bR%1-*8cKF- zVkd5%+liN6yDyG6ySl#_vy;>I8!WC|Q!uf*(n-E4J+=XxVn0jYQaG_tEPW5VP}2_= zo=VR`Rex@$8js_p5GGNPLm4LncWA&i8HtGbsqbf{PdmD(ohnsQy`XkY*PsEu^t1*4ei{qM?e@#4caX9|?!;i&} z|J<+0_wJ8D-5*kXK^%*_vP}?v#Wnzpn_@#~7yZz)AJ+Lihfa8ctgLg=DXzJT?LdIg zZ+XJgq0y+%?2T)0I4ZZf6YHx>%F_{jl%Y6I_lUdjL0)*Oy4W$Mwz(YjfsS_o*gtqC z{_Ypw6}RlK$Lgg|#z^OsVzJfO>MVy?V?E*poU|k8eCmwtu+MjoX+xC0qRsM=fSg`* zppWSfi33HWiKuu%?5lEE=x80(54|9s%>@9=3yT9ZWP!9ePcZ6$cmf-H3jn}}`nR(@ zw^Kdu;pX_7*u3(2s-Jio-Yc>4RqbASXI!@xe^H0-v3nA@p=Y#pc6{fYSP7r*I8N!X zNkEAKD2JnWdg8eNFH3#47c2ny$lJc|e%-j2%G_zM62nM;Bc+cBjxaV?W$0+46@+sx zC9t`b!k7S+QIIfW3nv9^vr0!}kVzh|AWUJKX$4W;gOOw-Z{kpufkFy0@JPt?eQ8uj z9`e4OvFXzZ!dFrT8`LGtM&ZdtF|e!7gLt#O(=>Mn1~@jOK0F&Iu79x{ODz@`c&lBs zgGb*E07I-U8h{>Z7Hu>h)tPu99Ma1u>6A_YipeJkHrt!gqLZf}aC55}`wpLoBiFu2 z^(g4=@HEfbp6`wiedztMa`}P-;d*T-*3_|GUtRWqst4^=f2Y0U-2}Y#FNLUll$^<< zr=Nb(pFV)j`Y5Xn^DQds=B@h-qzBzeqd|E4QyCrx&e2mF6Y;t5AKYz}94rq;b81F% zsKvo!$Kvk$?ux^Q537?n;BQr9B+(D*1Ni=Hdwn%7oxL0%df)@z+L|rUwzt>Kr}b3^ z{FV2@QncE1==qLLJ@)OLj+<`2J`Nq+AAj-Ze-nTHz(?c^)bOSI=-|Le>bTTN7|DTR zz_^d@P_d5UieyBB)MaD^a83a4d*U5O+Kh3=s8wnB72}Xl@j~f#q!(q7sXTL?%96K} zess$Lo6?n(ZqrN(IWHOXR~dcH0I=%xq$%a;hr!|*_|sne#s?jL-77}37;wsVI>>d9v$jw6D;`=`sd10ruakZaS_tzDgZIKJRN)VDp8@6dQ^VV-J(YYpri1B0qh=- z1p1VZb`7|W0!4PokgR7YgZI+j%p%!H(y*Eq@ak2bsYCpfc_NN$25Sk{N#k$Jl3wy*L1I__j5NII>@A=L zym{8$*s?!clW*fYk_2T8P-BO;L@;mPGO>95_1DKIKJ{q)o?@Y(?QX} zWR#qXY?|aMUuO9feSrq$@lX79peWop!heE4w$^75tl zterj;!@_gW9RT@pwLKP5^0o<++U(8%E!f;7XYyOneGn2 zfH~h@#g6jkKLb^~3CLTzYbu`w0F>*lv2D86R=rsqs33TIMsAGzfl7^!n9 z8$EY3Q?M}5^5Qytxpi6oxGTBPs)yCN8my1`*d1c)f8nW4=cT1q{N8)s8Bad^aD3gj zeM{VZ&&%W3@#~_we_zDRtUITM4e>+qMkq#!b_UUdHCCL+Mo@wHvaz_}c!SSVun4gw z{&qIo>ePyEof9Xmw$!;?TZ;>qF2~aPN-VA}#`5aAC*7G$Xtz|h3fz(%SXX?-LPug6 zViLaP2OlJkSYMNVO2^rB3cUtPu_kJI#*b@$Y69XJtHv8K%4Y)Vqm{4D_=g{W8uJMw zY{{DHeCp{_aenQhWGd1r$EhT5@pELD?cPUykN9syol;ub0Ju3%0E+_vSq`^xbR0l$nE2%l&`sHGCJ=~! zYmM=k92<@OipN%3Yid6An3&oVPnXJc4bjfb_}N-D4@Yyn)r@H4|)>O_k0v z%JQ>rU%EYcQdIrtEI09@`~v;gS=R2+K-^Nr53UF;x?n84rIWjQkJXwq!)ai21P=C zy?VgK`p891oB=nLC$jCs0MbG;M9JP3x~!uZbR~8ihmvz&g)s@l3lOG>wxo3Ml6sjK z!p>t8;B(TdQizeN_=1T_1~h@hmHU1DscaCvyp+YWQU~-ffW>p`9PNSy-8@r!cbV9J z>aX-6d6oBEkMtD#4%w&yWxFh;=RWPo$K&6WPj<>d+}o}@!}IT@Ph$xCVedj$&mPr#emq+T2t*`&xl)}eeQI; zSTnut|x}<1xK=E`IC{KNYY3im!-Ao;?%4@tbdrJMO(Z z?zrckc=Fi`vAA?821lB~?p9M1ld-(A7?&>2$N0pkV&?s^e0eGM%#OyKVlc)=HrChl zTZnPBqenmTv3S>?ye&TQ;lGJ>#T+;8-4nB;^8NCo_}WeR#LbP>m>rvlvC*b|(8lJf z`N#x4J90v!#}Me($yTFB`L>T}5f43iYXQi1FzzBf^>@M5|3V*Gv;cpZSogr8gg`%? z#fRwV&;X14bgs0-IfKKD5h@0i-)(i@~y8c-cu~WI1`@};?qh?OACtU6dQUGsr-Lu zHCn4H(NP@4&Q7C>VUH-LJ~X>W*J@{*vJr~^m|q~89aOp@=`8$o%YYxsM|{Y60iO$> z0^r3xezdUyv)C;y)9cndriF%52e$@4CnzNDmw%aW3roPi`O)z z3$ie=usIa-+r#mZa~tv3=Qm8P^@%FL{r|um8$_jX(a?pAb`F;!aGA zPkXE9Yfl{a4p3*GI~5l%UGM^GKH&j>(N~TQm?8c%VFo`f7eGRGI?>6PI&GF~Y(4!| z@C6t{gbzcE4`JiMzm8VkP6`c3a(Z1#6Rse&-(9EKNIglBCqMubCg`1mUGKBWxA_pIqt`ss4icL zEid)Q&oTvTu3zWaDD=aNUnpDGezDVu;a`?p!1;AA%OOA4*`H0j2|^j;^9Hto#-#Fil=?1R zQyVO0?Edne)y!Fyjtg)jKdq`to8>44s?b9*Py8IJX-fjyHf8zKZ_p||D*4?$m7X?E z{kgDBc0)dl_Z--nhB#$L{Z$sKZ!$R;^3#eDCKW48PEN+;#9rClRy4==#OuEGJL6B^ z_8W2M=lyT-t8e|~_|3P!KmPi?560ZwY+QHU4eCpe$IR5U{WcTMZ~XONi^t#pkrP8gQz-z?+)jFY0Fb)UZMAjUjiK&Z)VGBD{Mpz% ze<}u+&PJoX5=~_rk-Apw$Pk}5Jl2c> z#ytiZ_q-SvE?kPmMaGOsht6$BbA)k1@()HEBxDb;jl@>$4Be=w9h;g{>~QU&*lAA1 z#kF=c4&NL%fBrYdgU@cn+urw?`1il?-=tgnqV}SD6vs?QhtC}-E@AwRer>j6@mWkYmL7D2~=gY#;4j zz~`nYgU^EHh=Z>6484Xe9ukAG@JV@1Bh@ipLlNe)9xK+cUH45%n>2cW?vM{X%DK9n zr^Ej$iELx)msw4$Mf`5xB3wMqvn@T(1ps{7oukVYe@&OpIodx9yueVn`el6S0Skn| z3w|Z9+)f`Dc<3G9e80%?0ssRJMF?_j23NY^dwsyQ87u{&;=G4NOyy#`3zejs=^VK0 z$t8}um$ZriCVnb}d`IU5{^W7qN+ERSqoKmZ+uABH^NL;Yw}VY(AYd$=eZX~CNXdM@ zo9pkcgn~NoR^Z~nEYYKwTxGMm8D6NN%a{%fOvd#$-6?}Rp-$eKJ2MDsYa0hfnOP5k zo^?u&iC_gVJ~}lIg#*v&q+yVTWbi$qy{Rhc>GD_9+biUmiX|8 zJ{+I(IiKe@)i2F2tNvV}vaz-9vMndkBt3F{=AQr@7{uo@0Na~V?rQXzmE)To@m|3) z_~1Y+r|Or+8$uG;rjr3}=%#KahnP6o+{m2(2n6^R*w)%c{NwNX2l0P>|M$m-KJ*^R!*obba(+Iw-42L&t7l-j#U0sSZXU@dQlPCR&116k22qXT~ z)e&W&o(6!DH<}~fexFXR9i=*fjat*H0FLm22?jb!3=oiRgbr+@7Ng^{G1x$^H^$93 z-|VfC2NgJZ0Dof*AythT8EyD2%acz%8xKA7P@Fq|KBm;c+}c@px!XGzV4bMu~!}Wef##sn||x}8m&0j?W9Dgfjj`7?P!9ec!+8^!TGRr{^ zJ2&H7!N;RrZQc^{5p4Wp?->I;KI(hDvy=;>6~?+A}gYboJu_`;7F+ItvWX4dbjH2 z0p(d26xe{npceq7ewyv%cU37r?(On3uL07hY}4UVK0s1G6VG$vUiYM%E<@@2+qU^y z(F;(*t`9Bgzzh7UvaXT^J}c=ekmuM-gW`yc*E{PCauiNCSNs%3&zzOBz9JAnKIZ3f$&qN}^M3z;aD&%F>}P=TKU4(vWl zSD2)y90Il@6OGs>zxq2r`TE!|AGY?`WByPO1Cj&cH|~Q0TPCml?SK3mTlI?=-dgnA z|6Uv?zKPzNWbQY`^n7w^+zSC%c!y6Ml#I5Nj<|@yItIx(^41dz2v`6BE#L{g_^YH> z_=}FR004cY{POcXvBF2>q;n5sSr?Umcs&>LDM0VCh)1y&-wGX)58u)Kq2_4BIB)om z#>tiCSeO2M@RJ{n$4;J#pMU2&BgWK|m_HvotVUj34@O)!R@%{Ouf;~YrFbU?VJ&ms zk`@9i)MNm`lLHFK8Gx&)-O(uozudYDJ;LUv?l8#BcXH9e)isq3FT`(RA?_8uYw3D* zecihTphxBY+Irgq%e)o0w6vu2c3i$VAMMq}*j5~dt)p(XvD3jFSQh5-CjDS#ztO|8 zW024>ZbS8P%Wu825lf1#mL;#o0N?P_Gx$8BJSwKc%5V|kwSnQGW*j+j%nJh8es*wJZMQiYk3V-d=G#MY==vLD-;KA${s z{~tcGKPD!|Bp(P6j-nO5Kp_Jo*e*Ks{F}dc!F12hpNXIPiJy$SUvi_&??C+Q&;7Km z$D!H%vKMN1#}3DfZo4(U?(6?<+;GFSu`oZcqPM&#l|g3=hTDK>@&FWrwdhq%ai#cz zd>D*PE=Ye6miSQKmM0B(z*EwtpP(;7o{Z}O_?3E?=@XKVsx8Zxu0k(UEqb7z+M!O} zmm$B?2q*nR4!K}}m?`CCKj`8SdR33h!%*tHlp%2Xo`0q7D;;f?I0%{Phi}QW%3p!+ zSCIajXVM|@5qhnURs9(|0q`erB)=!As&ZVva3m%5`&#+YQvg2l7n!&!@DnM%R$4mm z#O#3Fn!#lVh*wg7;IVMkx$jrUUOIThx%iRSi7Wb$$6def@>E^ckL@F`9*ExF8w3@- z$St2|x{PsEMlXgd$Hb+?F|N~4^klc*a=x2p7Y6LFQszpaQ{k^X*3q6$RQZzy$rs}w z92xLdx}rmQ6pl;~RQz@x>73>0Jc&C`n5ZMl)6e;{pawxJiiil`2Zl- zE6d6B4A}6u7Xu+bJ=&?-RpEQ-FUXP#LorBlahs?S<?2`V(hWOoWZ>T@B6srqMaq5|8#l=O%D~gXL%PGZJ2lwoc zdi{I2Z7^(N*F^=wcCRi$0EIPAfjM-i=F}jriz= zc6@kVadu3`+Tct~UUz#Oy8Y#GdVVd|7`Ij*F~3=_m{fMKRv-4l&wbO=al_%g@p)hH zGB5I2xpc{TJE*?G!99CoxZ8?_iwp6C-~W$e>+%z73v=F0iyiVN#%E$?deV5!&tH<> zEXNq1hY)|UUC13kUjgVw4z_{1wp7I@Vs-8Vd6X@yWQ%|1EmZ6uJ9rh{vHnVa`4Y~q z&KRmVqJZ^BI1UdnF`{dg?fh_1F>ucx>LEYUCmzO7e{bIMO&%R-e8DjuZ38+s!If=_}sQzQmAxu_yJPye{94 zm^7KzwqNy|4xAs*KQ>}O$MU-5D_|V(CszPIh;&uEcHUiZd#vyRcKYH(Ajf)kam0V2 z)0yYWvMd*DiBX_Fu2r6Sd}X;+qTbQA9-fd7y^<}0J@%)%>-xA>8cEQ3d@cW-Z~DVu z1ut*um-2>FAz66>;CMzs;oixaYmno5v*rh)b z5V|eo9WRKE0r7g~;NjSRWRK|VNblB_UNH^rO7*e;fM!l?Ddcn~d0pFDjoLsv4h}BF z(UA-B4X?d34o|dWWBJoj-#8mX>=x8riMG^$#inAl`HfF8UG`;a8`3pGAvcGrEtL7= z^25nyoU8N$87Dh7-f1YUFe3J=@?!hV6SP6JB6H$j`(oK2uEB?LJx=5JBl&4NLjCS< z2_Xg&$S;0c@Acez1u>N`%l5!IOwN~!bJ1>9mfMmny6&+6fO08=zS6dSd}_iA09Xi6 z?FwLZt*YC)tY;GK0|Sq}`#bNK;qxZ|5WXv=JP8$sS7jTK zbn&%% zN?<~*odDl$mEn`}KuqFl0+dhbT?Xfg4^GR0rmFEa4C)98BVkCeqvvQY^eK?zyRS@a zZtC7vS2PZcs-rR+C$76Anxp$-Nu9iQi#LA=I9V~0^;#IJPq_{G221{usk^}BKYd|V zz;|Rca`08WX-vkAH{apoCqD6sSW#g8>M#A`*z9b_%U=G9xcj9qkHuv*C^g9SHC86C zWPz2k>}-giQZ6g83_ASe4NMP2>)HzkP)NzItDu&SF@}#pa?&s;%Mtn8FjPPTXL~mS z9tOARGhIIv^(*g-~P?a6Hyz_vL)7Q>U%F)_6-UUbKu>cF2+ho%vY`d}=p zQ)m8FjgPngR+d(ywYDB7pMEwz^~gi9wzM8&s(-jvHBj-EZ|*+x^rI>sBd^ZH%y`^& z>rJkEXs{8#@=I@yMd{SOId$wuXJSz2bV7zl21Uo1uEldP6ICpY)eY79l{)RZbnP}K z9N}lCrM)PfnB*f~$9ji;r;$pXkgx>E-13(pAwM$YxmVayhWP1pOi*LsRhAu%3>>&n zp7q2Akn+H>l%2TICXOxa;6srSFz0s0JXo%=$8K;irnV zZpqqI*Wx4TptmYZnxZ_y7oQ_KxJwy40n88aitVoa29{YNodrSZMW)gzIKwZ>)~_re zpbqSV++o&J(Fs2Wx6^--#$ck!mHdIl+$saY3=qRBbS{DRmFK3b=%DU7Kj*&A=avDT zD4vmC6_D2H^~{|V5?AE_q$~Lg_#UDWK0eR8Do*)~+pdhPM-|=#Ecjx4AmA;F;EmcS zlfx_wb8w?Fx(s?CJUwUPpEGuxL1Qm2Z^So##jE2BU-PB$#A6S~vrj$|hYs&^9WS}} zE~kC$u}8eMDtRxTUyOV1c}Yx-PsPRg%L>#Ld^e}$=U4$b6ltpA32joRY8oBcb>s{l z>+8$5!|bk-Z|1CdA;HB9iw=m2{^CbVJRoky^sEB*@p1K8Mq+Ny-q<%g6T?j=v4>)I zb~g6yJLry%I}WmEd{=U9xfL4<4)^Sx_NvRSkikFw%)k4oIDht%n>2n^+>qU_*&k+G zM4spE?uIv$Yw&^@i_{m&iYLAAK^0 zJGuSSj{Gh`>Y#mE%L8zH4uH4!;RB26T4L&^kWK$2e&gqb6G3FXF%7uvr*16<_?S}8ZCRmnaIpaaK~ z?NdBsl|3uj(LW~Wx22!Mji&ggPHuBVyjLe`dOFqy>TzbX9e?z(hvQS9ek9)bhrbcFy4R8x}q@;H&2B4jL z>&=R1;OJe2Fw+u{b!tStHrf|{3ZY33@W}&wDi2*{vUp%b=Q`R>DIaf-p$mT7MSR4M zt}M)Z0C{a?H9G4Z>o=cP%EgB3)=6YRnY`&oS@4ODEPQ1UPHJ z>LFcVi+P*PB}oqceya*_M?`H()<|)vIx)QQEk1O%)zMe-rInWO69$R{W@h)s?Eb^C zpjfNU0L1v-c;@_kJau+4_MNyc_FsErOdmWZJF`Cy96l6>jvS0*$LC^wS$0A8fSpR3 zYUkEl;b%jl_=dQ2dU7(JdFC0#(Sz|vzxSs2^k+UE2j}+1Ti^OSY7dHCMeni0N8-lo zu8(i}roR`j{h}{Y8&BR}QXG2b>~o3@(V=!U#>TzXYfE;>TVM-+KG8s2!5eF0nR!S& zN+)tyoqF`ccqwk6Q;Kd;_K4{A0s#T#5TnyTkOhzvINXw?{5;F{xqf8=yU4FRv&;lU z16fviMjOw%fD&(!-*Q*Fd`2ELAg_7i96IM{|G*-Zsy{}5%5j%`g$<$ z6xWkJ%b%do8TPYRFKtnTVp74~vaISU{NS4O#1&ayf#h4cXM6?JDu2?7?ot5q$?v;#bYC;1G% zxJ(9&l%9q6&Q}IeRBqCMo{=`=5+Um&-WVKZ?Gq1dk>spaVjNk(LmK`Qr}uc4)eA%% z_*A?ubop&(5W^q|*gDZXBQNQ}O{X5H@qH%~z(4S3(T>uX_MFHjU;^G!MV@`0LKxbDdp zBBhL-3N0$P!q>J~wxj1mWoH>|+uUx;R}Mx^0o$ZmxsEv)q`1DNt?0@>tap!&a#_#?4d*jdl;(hVzFM4fUd*d~6^4y{a zhg)jed-fmn?lx!6F4zyAxZ(O(UR;si+>9Hpne#yI+Tx|SaOzZyipIHSBR=>Se;&W} zi~l9IR+n_I8%GtlvP^|YnKzFac=aHwpe!0OuVAuoi$2*$_(q=z`M_QLbtvwK-}GO3 z>u5`HDS7Kmo~oQ;pIP9Eel|yWu|e^N=pRJOGKC8p7nAk&b z_abP~CCqbRR`oK0JuFY0COH{5;cd)MOCP;}E$;22|?G2vNhJ`P- zrxL8MZI@xXZ{Qbr*4KKfXB^W$VGoo_G)UBXP~F3aQNFU0mR%&oLGzDzaZRywTl{HG zPegNKEH;(5TUTEQIgQrUpPI1DIu|cVzt6{K{_4--Uw+-^#?IwtuLetT?zmqw&Rjw$dLv~B#0+PTzF z_oSc7%l7{r=|M}neQsqd&To&!1E*Kxq4mvJ8r&OeYDtIgcx5!^u8Z?5&}mM(Zg%P# zkZw}m_-I3M;#3^ow>Q50)i3wwxK2O&oZobuV3COe*24$)Mr-L(yzRGsBmVsNeg-y5 z$M(edSlw~))XYTiNw&@In&V2^nswK9N4(w|Qd^>rAfC$(xgBEDkb~MEtu@Es@(9o_ z{izQ?cEi5P6CcWNd!F*99H7|mw1L<>{dCJm>lf!v(z7GVlt%%&fUov>l@8BMEwVJe z#_rnxGkyym=o)FPbBT}bb#;y1FZTiT(D=zO>4ECWv^X*!tLx(Lt+#@*MY=NPa~2%< zm_PD49oHP;r|_gM@G(e_bl2CoM~*yqTdCsAU8wR1DmL!|{<3Wa;`VegeBT zWtU#pE1fDbNGSCtuFMG`JFYVQRrTSOeZR6()=|n3UEr*Hu2<>3I9)WPK8YtiToZcj z0ihL;{yDGfNSsQ3ps00!d8h&D{Sv0|OZn%s{_)YGFP4GZDY6h>t83Ej_8(GCrm=#& zuAqwFEhj-4Q}1@=nVqRBxlb8B^W zV>gU{?b$OGlateN`yIE$*;AL|7k>4Pv930J@S0oVNB-^q6Hi}Uj=y}@yX`aYzUO5z zp+0Vm?8LsgsrbypPsVTj)|+B^c~x~we)7@ArOpm9W43F|ISG56C;lmiq_Lk5*ekBF z;ocdKu`{udan-@~`aQt_f+xZ`l3NdWe3*V>_C} zEBo%1@{o2|c6?;SyFoSNCwM_-Rq+72$oN}rq^TC2`pno0@k!c!hUCyPVosR_**g}C z@u|E9#}UPqV}mhwcshnyTsFHW+Kv6O_r_PoQ|o);FFtrG{{651mhvBr@mueVZmk*Z zB|hx9<-R|5!DzbeOZRZ}m5vo%LI&9%=Nb3(-1?_$>>bSD9(h3$m8K)e7%@e`7y0?& z`gj~Tep8H1k9x<+Ep}`eQ9Nmt-}cy(Jezk>wQiz|%3s@FlPY!MNNpjmZLY-Ez2?QS zuelbjOCOK1cuqcOMY3oqJ|GTIk5KJ=b0hVa`4!~ncA#s^4VkGv#_f{F;k3~lk`&IS8Jr}K%)b*G9bUTLQcTADCqL$2 zrI%_;@Bw;@9it4!-XMn;fr!l=f2dr_t8jT>;Hme1*ZpcF_j=WygyrChU=#%SDKQ!N zxd2cR!>P#IiQ$0AXB3keFw{obsH7kS!v=Zt*#E-Ph$?iFJRL`kWfXd2le%QZ1P}S zcjJrH2q)s>AAKM``k{};Km6|Rao*Rw_SNySSA1TaKYzhNBzfBOGW(R0$dW;0%4LTH zwp?Sfo>fv_MWW6lM<)Il1ScS+-50*m(e@Eq)WF<8kc0T-`{F2^VjFA$%JuivWr@M0pFtZzZaePx|5dNNmUp&|qqGKy|OZ zwh`;X&pUUkLsFlZjk!bD#vOOxEeAP^=~wxiYGBffZpRZl@N{8*DVFtHU0salPM(iT zm(F?h_*g^EY;eN?CWGI+-FNb-$IM^#FXQ+%hvLOI-eB1*&9~zB-~0~ocQEdG#mnNQ zpZj^Sch4bzFS|j2i$>082U5401Ve9Ae#k!u zJyI`t27ldd#3T5^qy!ULNZpHRL=*WadsA|y9@0@>afl+*N80n2?hX(pK|os4@rd$0G09Q%)2@nr*y`c{(mqjkqHyJrM+LVY z?}Qt9x{@>P2OP5;CzJ6OUxi<1)oxA2_5#1Q0)ZS<2Tzpz_ciI)Dc8&Q7?{K_SV*w3lj7Kv{55{D3=B zNoV3&0DRTjs^XD*DZls%e$_m<zObi zkRWZ^j`T0ch{XbY`&{l`_(U{$TYPgTZoBQ4c-`w>7pG31jB{sBd4hRjvK}|wd}B;a zG~=nKo{HzrToAh5SX*huEjQg1bF+J6L4oti(nc(|)j8p9i9v=r2mSWdMn{1pgNv6J zya4R7SM{cy!j5fjD(DuiBf|M znf%301DqQwy`=->Bl5w&GJ&GJKsg(OO@N)rDm*{$mIa+LYccy0IoOt^G`I%pdnIi|{SAYGtWAe~c#M;HU@bt6M z5YFt#)Rj)rA!L;_yjA38U+Op870Y$$P)O&YK}1R(tG2ZTS9lAqlBUmf+wvgZ!7nHM z_}!H6#%6vq7%)G?w+%b7)PvLuRb}3#&!EBMySFzwwog0qu>|k$UX_FH~S4Vc%gE<4nX#XH)HNZ(LsO#AeSH z@4nfXo!cA7jvZ0Qc_KQ?#I)>W0DaP3bYX-Y9)_Z&cv4N>x{ZDL(0e`@AN;_(r62A1 z%%>kx-UY>i+p%YQCcgYjzBKN*>-N~ce}Bx(?HB)Z*9LfB8_iFCc3Z=_4|eUQX%P-7GtzTRoJ@moXWXM)%c#;NBu;rRd0FVQ zf{PCKVO?$>-5x90Er)y=eji#ojs+x2DtsNh2TC@)(WH5L|f;(iJDQVz){9rcJNMZXQ8 z-OvUA(>bu=_RPQ(m-Mev&WW$f0V&aiPGx&ZJ?9zr#OY~=MHafxz*y>FUNcyiwj|3g z=OrFu!pk+P9e7e(HqZWCelsU>TyJ_xw$~^{sDlJh8gIXurqe{u;aP2%lHH`t#xkf8gK7 z`c5q#d-m!0%;V3-;S)#W=<#EUaXYah-#k=r`VDnnBss4b4PUo^|BP+ZNL3S`LFpnI6e_$;>kp9MA&T!2hpcEgLp(#strM- z*&^{s2GEW_7f<}7O;`M4ktzHs6B;4Bkl%3iA`cU;=mVjj{h0I=e>&0>zf}I}+GS60 zvhahq3yH_>k<$aE*l% z&1g#rc+0*vF{b#Z9$mGI4e10sDN9Srptf*dQw3+++wp9x5f7eOi$`TMHfsl?JvbhR?*81^sqKp;)e+4pPt4GcGoIOs z*{R7GmCoFD)A9JyFZ#T=bmpA5@1NAYE%lK$TdQ&6*#5YD=1lzfzx=LPxpYSI+*E&X zOuvyhe)JmIz3o_1n_XQ~+$lQgL!)!-t^loN0YK004ORYMv|*hbpSUfc->D1M%cuq_ zu80F@55;#SpHhc+?XFVz8RuC}>J|1iq0$r578}hu`Ll}5%h9-Hdg1}z;ZMwkA0?&@T%Zd$=&$-vFPJi(FLHv^EyL>Cx zd9UIRh3iQCoMl*}glEa8^tOR%3v9PcOTjmRds%)$X&2n{Gw07Vc`wtJ=jEKA^<8|+ ziL;V7?X3aageyLWi9_e_+1qOTw~9Ai9#H5h{O~QMlfTWlEA@dF0p!wD43p^8r~8y= z|IIP=7)U>_a%3loPiV*VJ>eF35rcW2qsC6h>Ay$xqaVrqP-Bc2SQWP@o;q-Je=IDv z z@55@2ofw>)Ri9b$pwMC$&$6Q*{3yI*J_bCU$g&ncM6dff=o)$^`j|h0$GlkN1ptbL zw&tIU?uBP!XYpJNcUMET$&c>D{#wML+CaSOwtey1TMtKLs};4abrsK;$!4rAuL;*R z`2)sfgo{K2I$lgA`tcu(jj;$39MzIHZ-_RPf4 z)NHJc%*N!2d*b5G_3>x#e=7d{ul}z5$ZU+=b*~uSjJ3sO^Kft&+{I@O1|ceY;&#Nn zs&C>p;C4>^dLVxH+5-00ZC*Bnp+g+bf-dc`&Y{Y?)g}8BSJ-+($Z;gXvtysVKkr*8~8AB|dR9w{A zlpcwn>;%sEBOR&TejV+yI;TB}9u~jkm>lC8(5aYKTumFtyshFrVw$14@RWSjR{c2; zmnYhW6l0KA%%V*aBclUl(w&#dU;*HvP(lBA4SK;?8Q`7j!Ph(#PRf9{EeinPx!Z#o zN*X5am0xrbAMhIxkI@sCUt}nt9Liv^B79*7*w!WsCisM=DYia)Oq2Fj;a8K5U=aq- zyvWvR=<89ie-8|t{P6eOFC!w&(k0AB@DeCEC!=^G{4p)MFq8Ewj@I9N)v+K+!5nSm zRT0VuqlzIWJ;1dYqF^W)!$dmwGk*$GrNRi1u$EDNDl3HA$w>iIe=9@@iHAP+(fIXW{f+qP z|MCmo=JX4`;0xjpw@Xc)_*fOH)ZTR{2 zC=s`3L{-o8l+l z@PqMQb)Y+ID{6cMCaO;*ThUb!e1JE6tB-Qgrxh9SOP4Oi#fukXX?dYq+162iiPfg! zGlpC^^Bq?jsuiAqn!$28rcre~*`BN|+|VyNfvDZuYAe>(+Fl4io(_Ys3|y*{+8r8; z*}Z#|cTDo$6txT>Z^ZfY=lq5#Z=15(W^QiI+gER?Q{Sv*BZV)^OBXH6rt*sr?*M?+ zU<R5^pf3Af8)%<@OTwQK~tLW$_5$M%(HGWmV zEg(y_`2_U2Tymdw3cy!*%8@tv+==PpplJiCm)^P)lq6^Cr|RSAv=kzJ&Mk&2eXbg$ zZ4sbzw{ss!o_4Hs?5Uq;TO}`zpKNM>0PV&0NUc3Ca-puXL@;6vq@vHz7{@pA=nW^m;#xrhA3)|8`wbNzL* z!K;|xDvL!e;AdMR!Qt>rz){{bw9J%Ng}H2YqM$%71lI=o}h9Iuqc4y9ZqnyYV9gHLSd~zCnxRRP+&_mU6805nV;Us)&v_nYPA` zFif~kjWy$rJMM@B2M)w5Uh#^U**oJ!00i{Z^NEjtCjR)(-xkZ4`R)~4c``^TAKq4n zh|ZA8H;-v|3|0w$Y66RU~I@wu3lcY-yfvoBE*p6`i44&_;b;Xt|}b#05F46 zEHv12U|PO!*3Ya9Qcup|Irf5c@b0LL^%ePi5B#zln0yk!w{whm$W&wcXHu-aCWeesJ0$=j*_K`Y*d~XzdhNqe_)U3yX>2Co z=}Yro1khreUSgEtI?Dmo`DNmQMUXY;-I4zA`JpYvoI@VafIgLppj027H(*%puEt^l z+g2tV(pHN}wx2tj@QDTE#9}F5>ws?ZOFa-BsJnj9#lSw>{i9neD{HZ?&dl`eo~Vt_ z#O&ODFAA!uqp2J5_;csu{7N@&xa-aspF0q92M@*Ek%QjK@ZiCNF}r6nYE8*>Q!&}7 z;&t9wla6+)HdRvxc}(=ItS{@?Ry^^UN8%U%^M8p$2j}9EPd}nq^Mc>XyY=>4;+{M2 ziC4e+H8D9k;Vu7R!P2tqlW5*|XkXN1d!U03C+&}KLfiLy;z;z0b}!Np-Lwq&iQTbH z77qr5OIPye(>8@q8`8h_dQPBGkL|U@VEcvP2BgoPuu}Tmj*h&#uXe^aIPu?3hQ82- zrEh+NnS{s_U#Ne2V4sYhx`0YXyW?Uo=Q~PNk5OC5f5bAq#OB_yvf7TK2?sg7eA+zPM>w`LRpBg3zgp&pw93a zj^S?$KB?z<4H_lA!Oyr?m0S5(JCX{Vufn7MydRR5yl*>L*-xEh9uo(jgkV2y+|x1N zq=K5v=PevTqgPeZNhkm6`r$um3-Fi;RMXL`E9Fq=>y=UQsVo>tAuE|wEbmu-prS2h zlh0f)@-i^t2rlBZgy3(DE3W9r6lrozVyIkFJ9 zH~E7ymO&o+|JLz}zB1u#`m455^q)M+S>>tf7Zl&v%PRm6QgDxR$2PjbLLP6aOPSbI z=S`bQcJmWFhf^N#vViTetS|JzQ*gE%z(>!MCV}dLHgM-H6N-di4BS$=`I~mcJd)?JE>}PqRp?dWp*o=y9!mb$Qu4!;k`}S~ zEm`QQ2HedQz%+?vUFuX~@y(QwrUG%(K3gSK_n|9w0^7tJ<6GOgu#xYnHWY`B4AyOv zcw4SsABscH3)lLW2M9ceP@Ed%9tCH_N?WlxNyV}YJ+ z6R*T2<&n(Izip4ncwvD2m-RuiGH)bbud$fjsrEt_e<|HtCl;Xa;n(FI`xRKl|%Hi#NadE%Ek0 zdz*0TxNKtNI}aa><1^DSu(BLuo68D$6(1vmPCAjb-zD`|okv`>Ts zKT>b`>7FwrkKpUokm*tn$miyX{CoL{!*o#DL-30`x*lQ7L-vpz0ET&^kut=Gk*2Oi zQyU!BPhtT;O>ubxxhToSXP=2T|H0d1O0xWpS6>^S|DtPRY*5fe4V zO>KNwj(f_>SU%gsiYXKmD75j|+7@H0;tPufj1)W(9-gH0rD6>62LIN!0*u7E&bQUC zT4@i&;zm18w+G{qg{}DXO2jECTxVo1wj2B7@QYs>-P)|uDGnbVm42x|tM=eAHkG|^ z?_|tOPQ@30@hjuF`nIQ@IW63(Pn#;{+i9)Ev4b=5SAY2z@!LQ1!~Truw&G2`vAs{a zG^4n5bDP~^S0&e6sLl7O0os%8q}s=Z>ARA-c`QDNoBjc0RZ%qMY`;8kmP+q>$LH(c{~Dx! z5eC(n^9uMME$8aI|Np}Og>VE)`p@?KYPh=m|6i3`;A;8{ zfu)@4_*v;M@c)+o1+ZniK+CxT6fd}aRFkBozOtZ!abR4fnu_(Go!dxJ;(wc8+K}3x z+hrBExn9Z@xUEr!{9DF|+|yBQ+&clPV8%Q$9!md(@3rk$yjS{4SswOVJ?(*9sApTE z+>9U5<{8UmmrD2X^$6d=q5xi^WVan+I(8Z!Z;s24ufLuJe1R{}`tr+zWJUb4GNtS!ZWZ0z>J>F8c~ItG@`#qj2OOe-}z0PK^i zJUlWGpLfTBc-0LvQQKIJ5!q69QIdD?oA|3MZP{O0C5GJy1yqGG}6^-ort@6D9Ks-hc6iskNe^m9Q?m?7m)qVfvL|O3@Tq_;=OCN%CR&Cuw zQ3bt4{KO$*+Hh?mj@)o-42=@6wH3Fv9Sb3A5a>vsIme$uKUxbO!-{?C(N-M05I0P8 z;;UYDbL<~mi_WEw#OOA2nXTB747*!()yc~glBtUH+w{s$Nu?e5=RSvYhBiDXe@NTu zX1l?kW!oSZlm8?ByeREqNXKkzJX1V6sCuAJZJPKP{Uoc)(Iqh?eDbyRBKgJ-JhJ~J zK8aB3PXK47XG~DMrd_x#RQy-AEJOE{(&KynT&>C)5PlUa%1XMb%-*jGm~`$AzDOQM}zT^kKP~u^gI7aJou@Hwi$4*LO!p4v73r7-eT%w2J`@K(i2m$rNzq&9_UJFT~|b z>If@n<~zBbeBa6yw0!T5Rs3!!I1U+TI({67in8=V#?UWs5<=(j2myfDQ>#)MZxBY zyqPjOs*cB=>6o3~6ZK~Dg-$Dcg51a_E@}t2+mj?onrDT@|KsPavI`&q5 z>UwYoy+sdwL+8kY_LuF(9d*=IZOi)VwxAm>i~4p0yu=~%QYS~J;c0aQt~jrpOMiNP zyg7&78V~A{@$Uz3cTLu6AdIaIJ!n=9_>~{@r%q?Rz5D2Lb+6b4|KvQck7`Hf{u@$Vd96rk8wj zRg%9_^9FzNO*pG4f(urADk~P#WrJ%kAQPCUqKEujMHf_Nws+YbrS)e|1mIyl2q=TR zz9%G#O(?b+-7(N19U3-1xl&I*;R$e_e7=%zsmJ;$G)O#CkB~<~bq!CPFX=|!9DR#a zvSS6`e#kt9cAZyx3Z0e>_r(A|)6X)Z9ON$kds2sIg5*QR3$<4#)6qIYv?n^)N>VjA zEkBscKq9#H^D)aTj01LY=UHj*&`m1iC7u<2W_}%EHnNZxNxs8F@FAV*VH6d>#+&!T zPr*^X2~S8Oo2{U=yO9&U>n&L$(SdCT-^&Y^VsU=nTVMap2i_OwWxK{3LtarlDm%}& zGwE=A#aDh=+<5J^N_Q$2&RvMZN9O$9u%p)=_C(t0Gv^i8FR13gNcRZ*Uzv0Chk_QW3053Wqrjb55pk(2h@J9EzhMuncbdKp% zdM4@MXZJSLVNi>k3Jqv;SaC)ewouUx!+!o#S8O3-)z(F)zVcy3jn;s ziR$*C6ZESdq)#lc_rR=ozA+zFE2`yzQ~63J0R1+O_{i4nd<&Q0b4zLJl6y{Ms2-)W zzN&OOwaCux-Eo6La9`8W!d{?@@I`GZA2W`BS?F1^D7U%dwfS<;m`wcbOA)fnN zw|YiCz;P4$Ds|*DNGv`O_fbS$OXoLo2LWQStd~VLDx=b6s&Zhj|L6?*g|1UxSG-jp zAkJL47-!C%jmMvSI?gZfM*B##B)g$H3tH6)6_-cqyrsmOc@4Svt!UH~XLQyTzo_$E z*=%n^TFK3CAP6TWp7lt0G04!x%&X$G(byfxgd4f|fu@=54*elsaT|uux<cEJba0U%cqP&ymti`0a(+xqUG;J>ht` z*h)UzG_H>G+S8Yvs^rt_q_JR%Q z#x>Vm6Ne7&6EW#yms@M*JM>LW&cy7#S=l@~kJ2C2x3H|(l)5}IkvPBw_=8}O4cE}^ z-KlhKpRDKD0@7z5Y_IMs9riu%lNo=D&Iz`d%*c3N|NT6Dk&ft_>^NXQq5R0twjLd; z1~ELjpgWEOb)M}rf2LV97^=7fT_9aQa1Yvju6&8Vva1eI-kmh8;tu5pXQv@;<+sZK z5cv0S0GJ3@a(hvwv;K&xER(uYzogYZy1j7n0uvyB`(7%WT$x|@Fw6q3ska;p@EHEV z2Z%C$)id4jUk}vz1b*sR#lwXbc=0@7;EOVK^qa_e(?9%oLd6SYWGKhPH`5Es0HpYw z$E(wUi&9hPRgm)Nr(JD+JNwnW^1;bOh&~#Cpv1M-Ee0jc)QUek&+<|y86Y(t;*a%+ zq|}8poi2|6N8EN9^dw!rj7+nwMI;yRL7{IcCwk+gqBFG5kgR$M`U z0{h{%0C%c-ImgFRO!B7qcHs!_%ca!6K0CFId0yt4S$%&UUvW zGQ^o*E#JJ#SP%Q)G=t6XytiniWqL6-^^14tZ~7(iSLfIh>`%|eK;wvx@|E7kRrb85 zSOFdqBf!TI75?zU564^n;LWo8^D0UFVPLH`5{>RhvUZEUu3^~>C$@CB#3G9goSjFS0;kOL*^{jBAb^jY(oN-S?*(XuIOO zZMG*N)Tn)3>4Jb48;~z?A96%*@Oz9Oa!p)-oV+l!(hrXCSr>YyyowLRM9C6?7k??K zLwnVK@2Xy!A$@k*hA=a3>}&=9x#yh+Rie1yx4^-dL0ERMoKU0q-K^~v$r zzwe-T1sYcj*=_OS&q(+)K7(ot(zA|YsAr!$8$a}Kek9)if%mG6saRQFjvMyQ#l6=b zk8Sa8l!eqg8>&&q0TwT8dHiQUtzawsSaK(Zf$xGLYn_vAw{w7wW)we1*FJ;cbg z#|;L@)eg69e53=4agfPIOEIGQb7~5pCOjo0#mmUh@saJD`f_4Qw)de}WtTJh4~l0O zR=Tm+-Huc3k@(ozPCTK$?Pc{XJEMDJc+WMl?}j_070p;{DMlm?As=tXN}i+R&Dc9V z9oHzXeC=yqD%v*U;@LC7=c5|>4QwdTR=hkC8}XB`|ACl)`a_DZXVqSI+^;=y$7$;)U2%iT-(^ORz-!ZP7?FAj(5Fgr*n(f7D#dEO)U6q*JMwqy_CPQ=C$xdw# zCh-D^JHW+PyG`T|D0%?5DE|uGAe}$y$pu`lj=E2hQbu)fI|9Fi%HD|1a-Hdu1_{e$ zCFm$Vi3vXEukp~Wf{XJAa^WM8*SpU%PAc&(XZ}k)6*w(AUJ(B8K$g3kHfi3?mvIVw zEcIUrq|7H3UiLEr7E~B^@iJ6#TS?#JjpN6{C-Rhh6(0qOSGm@)%4>NrFM`Yz2bXbP z(?pzO+@Vo*!+X-JKI3h4EOjRy6Rh;u1w+BVf?hu(%PiuyzEwKPEy{F%U*+}dlfiPM z2SXiM?||Yd)WyKx);R{{KU`3lj?JS;!}@%a`(oYu8#xz55?Gc zJyw>MV|a8te(9HfAr>BfGzN|y6MNM@WEXc7e_{6uyFB?RT!;yQVsp?{A4L=73UNR8 z;Cq`pvZP_ox z4CvR8Kp1Od{8HhS`rT>6FefZ*rAfd-7AV#lJ5j6C*9SLaVbSF9Ph}It-(LTRa+JVw zQoCZYC<_iZWQwrE8*ToZ@o+EIT|bLYeGs|M3_~L$tS1| z&e02BrVM(I(MhEM55Rq7+5a4WQ(F5U-Lsv>_Q5d-P=ug(Xt+M*1pqq@KDi^k>vrTm zt9Bt?!3W|v;witKuiD_*@b+pn#(dParZGZ*S#Kfd$!7wuS zr`s4~adFap(MRkFek|@8QtXu=dQ>ipBQbxJ<9S@>FBQaczRru-o#a1wnBz+9HJzfJ zu|6-xPz>if^iQZ52jjA{{lLJ5kALs|;_JQc?ColXk+#B!=nO!0%_Hb5=>%#@)w4?3 zz*fc!ruG`Yp1X6We4Kl=iH-=2fo{?v4&SSUB2EiI9cg$JPX|${s5{H6aH!sH#3(vH zoEMId_mIy9^8|l6GRK&E<#Zh2-wib5i`WKGtf{utmvSe!MNe3J7RKb&T+*1 z-}|0;^=rRML3T5)zv1Ti+OPY2F|lXPZ%Q(-y{>??z0wkX&>%%qIZXO@b5Odc$wQt#vwq#6Ew*Y&W1EhH6__ssOlR~4c3l<~RCU7# z?og>O25j{9iTqwWw1Mdn5kf_x>P$&5YfuYOHjIDaNC zojV&7(@noc&msZd*g13Nxwv@YY;13CdCUE|x&5k}AZ0A>|M(;Ej`zIB+w6DM5h76L zo7mV@^qheyNMoP~92v0S8}Zp52(pMTh<32I_;Zyzl-JPL#V^T8r@s(?=CoTJ{`AZk$ z;vjK*#V?;;ZPNhw5A#1Xq3elSoRdFq?mrm6@z?K*|M0<2 z#uwdtd;Hwb{6t)Q?BlVsb}nivXJ~6p;Q$K=#=HOkI=Y)J56%t`Np7;U@=D^hj-n|C zZ*`C!6DJ`D@Y5a32mXL6Tbx2FZ>D)NLRv|7cXZ98jB)WnOz_jG;#+aR4n0rh@h$+; zBY0O<2ICJ|wAk(}na`!(ZRKI1Wa2`+1fSq1Jl;K`vx97{D|#<4EX6aY&cq{+KNc4k zFU9g|i_w7C9D zICM~Yt#XDNvDDg*GYf06C{@1hMK6iHM^41_{{1m`XrJHKo0*x8nb|4h$JWZ@Y*jeQ zRJYinLWy0UP|6DvorIUdhE`Aq!oZ~jWW^H2Ub&YioUxS7Qw@E79fK>m+F%Hw%Z z*;6aY74TWoo8o>b>8`&2zY15^|Jlm_pRK>7FUQo$T?Q0?`q`jAKkdE*9;)BGR5{Y6 zEAip{v(*b+oxZ>e-MgxsGJx?}|@SgaK zXW4#>Jum%-=R@90d#`-5Ek0rkIQD4TrLEY8gvtl#$o>9-!ji7oKA`0D(J^c(NB1z` z{)B?=_w*>!ctHT^m8Mq)X?pQPmN~$#f9M-@P%RzY(w}g;wC_%1*u|m$Ign}kd6wrg zn<^Xnu1pW)U>ncy`}p*{moYxEy)p}>vf$BgIqL}5u(52#={G_pKc!Yu21JD~yX&YP z_zI|GUsFe+h5o-HCe!VOz~CpjXMB|S0LUZt0zS)s@>q2+u*6_f%1gM>Hx};RDW@Wl zIw{Y%b5k;5QaG+ZNWUibIDIqrMqtKykUi}BtMJs5lT&&11L^#yV6{9;^KT#DN0 zsDH67|F*iKn4=pnzWth5Q+zl@tgH%aQk21VD=wWm6GIzo5vz;w_(vX$U;g?37#HQU z?wH;e}|x0>%MC-Y4* zQVzuc%BwW%@~7v|UyP}l@i=_sK+Nr(RbCbg%cnaQE&YATNO4uhhv+V0h!jNU$c#6V zkO}pS)O1f64h}b+HwUsoMZA{`y{iM014`>YwqiRn`*Q-qnzzGHZD?T7)$waZyRSFX zo^Z~X4fm-BeIf>9T&SzME?r)f7Dy9NVW{5eMoToc6gPIIhh6w4UZS5f=m@x7SUGn=pEsZvk}%$?G?Iv>1k6s`bRoIX*0&oG|-u4 zi-ALm1sUh!8BJXMuv)X}#oar6hLVLa;`!jH+J)K<3jmsvLK+mYS#_W#nkO%brLz8&+D?Po6U#78e~#s%eH#6V5% zkJ%gVh@t5tiYdhh(YUGOfb@aSsf>>{V`^+Nrl-f_wXc3f9FvZpeeQH@uC6JrY`DD) zODAZPkAD2a@tZ&Qqbj)__46iKe1K9$|-D#||yJZ($ zhOc=)uT|0&WfnS1-U7Y2=CgqPYH2S5+ElrA`;y%+^w^IfK@R%i-+`=B^(#I6(V6cT z-$s5fAg$W6(e}g=1?a+)NJS4yoIrerP2-?H;N;k#s#KdDz9F919eDt0P5`q~z=I;j=-nf*wFZrH;y4 z>dP|VZOY@b0C~%E9c}xr0F}2}xaF6yi$~T^A>6A9D|rg^+M4v4Wa4+ZLyR}5m?g{J zr5&CY+)YRI=!&8(FUG?vd~{&3B5_j}gR1)c{CRxajz)7#4R6?spqS&S?>`=2^S^yt zeBoDqQ~c@sJ`~S9^Gu9SPQ)#@+!Fg{XJc!y8)FkQ@s{8JgZRiFy*=XSkr-%BsbpkU zSH;8%s*kad${zI7lW-+pO5BTG8{Fb0l}-$-sbAh&jP3a|v32oe46QH3NM}7Jl~j>a z?9q?U0KDSXiFnBo#Q{oRQ@iH#NKKW;NDVl&JMtxT3v?eIFfmOVWeNv>Qg5ofhAa;J z7YXnm6$06GC@MOf)KE+0}Q?b<;j>h4eV(j4Uae3?7`2BZ&CVufPe;F&?*_giduILP~ z!?j8iy^Otkyl|U^7}yh4#LqTD6;*vJ1E+qLeCIEADi;Mxe&`YX2=Ox~-04e@Nk8Ts z&t!MRZ^ddtg=Z)!`>E$A0EpG)8$>L{j%=+)Q%?^LUWi-vb>mB3 zdR^?TFGOqMlhN2ZD>mhex6n}*0C-`k7Xjp0SXmHf+hv(3RulgR2l1a^imj4wR9s;@ zCgI>yX%F~b#)aA4%lf9ci$%i3{Y1KqDYFX&yvepGR_It>jO1S4Ctlzh+nRW#-4*#B z5r(U(x=K&XSw5YOjyOGa(7tsKItF#5??RmH9Z)$!t9VD1)WdkFccSWSIIeANKBRsEVymd$90}G*Bc|EG5cWvpTy{PcODqH_{s0PUwpckKq4KEel!?y z+X(2M(9t)MbXnhVSQA&`bg�F1%7}BtB>h4 zjVh332LN~zar>S3$xx|dAZ7cbKm5b^```ARv1fKNZh!Gh;x%9NrO}+2QfKVE-^656 zyuHrbhbmP;7IiRi-Ic-+IJVkrI(EGc%w{LwdfZ?!fI698LGR$vOchAMRDT90l{|zx z-89Og4h$Q-q#S;hpXl6ZccLT9H{O(trKI2TE#Dab<|lqI9=!iSiACiQ5bv-9fznH1 zDW9z`D3Fd4-+N};|FyN1SYKK9q|9n7w_oM$Cl+Yf3(|m;mrfi%c*sr(;K#3_&Il{u zmc^50b=+lQV|i^e7M50Z+_0QxruRm3Y)YMQb_p1c33ci=RXkgq)z$FEM1woRbXaBB ztp8#W^a7ibxjU!AXTyBpT3k>knFR!`7T**Wt($F67_jgGd34lSren;O)&5@I0^5L+uh@W+)(6*<9Uke~4qMpbFcOblJCt;n zwpH*aVC|iAtmxsq)B}q=2!??zY^ZSWFDr3NyV2_)zd-%Tzlw)_yaCWb?q^wr*0dr0 z#~yA1_&~XNLHVg;zQn2O!V`Qt_V}VY`0-s#-&d!dXZaZt25z!U2G6)A8;&lYWjP$- zyUD;g`Z~)-q}VNvj!=!(0QCU*(k_+z z;E?ba*G-p*hkgb|Irel*h@ksk>OB;DPvnzVaO2@HqV@`S5GXtD9VxiAnr|Y0+KX57nkGI$*24V`N4g;I{-05y*VE5c-M#HFF*L+IDF(p3^v9c z7_(}VMQD73()uZyY+oy$Yz6n)T#ulP1SWwD79mgJ8i;@RUzpFO{r#vSuwsvJ@ z$qUbB_sm3lbtQi2NB-~l+OPknc>Iw^y#oy^tKk)ck8CZ^_w!kK$wD|-h!)lvfR+v6 z83P{ZH*a{eIDoPj7qgC~#g$lIUiNQYzIb`%a;&NT`OC}lAsy?Uh|0<4P03~XV!YtepkHMD#We2NLD)?9IXTCjNwRQi} zzUYa#LsQIaw(hb}Rg)J0Y*j1ZrO=JqxXbP^siEg%duP|gyK@#y>hG}bRZDZj1!o9YM*@b=ay79!ZWp)H&@HrD0S$E|lP+GNXM zcMc-`Y>iH^bOGH1&=1-fHgH5iGwqBHAy0FX0n9Qj3;WbWtY3)DZ|djpFT~@I zKN=SnF308N_1IK7!>s(5&bAewwKf#{%uL19& zS=6&C8s!&_U&KcrxIbS1e}BIh9G*P&w2>PbR9pY@Zc;8mbpr|kj?kA07Pl|AA5YJgX}=>^44gBMVJgFo}?o}N2~mW{X) z5|4bQw3Qrt<^63?(q93EW}uW);q}?dWnj0bOhBGrc`u>Zlf20CQh$?|{OAuGicSfi z%u|lLkkFZb06iS8z`qap1R!bjJkz3Af+{`tdSwU( z28oxlUfMf;AfeEu_AcntYGJX074}$7BR<@rm=SR-~ z;yG{-UA7Z?R^*!3S%<&okh(YCVwbx^7Z7J5~FZy( z-%N2`cSSLPe09b9;-gsD4+UMtaqtA3ttXJkQIr9En_ri~u21+!CL|Knj)o*%`NGSW zFH5%FICO+J;isahm|}#_0w}&^yvcDClm6m6Z&@{)O%J$IC!g&?&uc2XPG40q0r5ty zmfRg;2M1pf;e$@vjja`rM1)!C59$VyLbr0Edq$_9W*}?vNqMz8CHb19{EY!tn76T~eSG+Ya)=+<8%yIU%`X_t_>W#nlrg-M^d|aYB0bc>Q;OLmZxnc={6`iY@gEC!0go?P2K?pFY@9-)WOw#&pjw-7vl!qiXLj#XZ4()pI6{_`taaY1qJ$Ih+B!>x!GJ8$YYxaUC3 zU4LIJDOOzFWNS~wd1~i=DNIb=J3SSXJu?BwKt|G_$JxLm%Br4`h}qpZLC82iL-{x_0|AKMJk1slul`ROkq@y_I&? z17v2{#REutrN>pfzcL@^{IGw%k$kPnD75yTnV^CfI#xZf;i$^up6QbAa3EcICg`yb zxp96q9XQZRt{eyPpVsf9U#YjnCiOt{=kr}YWL?5Tct0<_p~A`divLPeCS-g3q%G5- zuF@9rN;(c%bm_J08M+4@oLBu86_kfw+P^0C8l`)%rbPfW-^4#o7`OnmGkACKSv z^~5UmsMg;dl={$lA?+e43%hi@cwZbnpTw-{R*ja4!JRZ)f4T*gW@4 z47Qg134lo@9aRxC;>X_FPQ3KSv3Ti`W^Au5#^6Rb#_NMIsox-r{uC4_Eb$nM`6nB> zB|Ojpe|}NbH=6^|WCsA9yWN4F&cTeuWm^n-Rrsky>x*{)5GyH?w3NrSSR)HR5N-Lh zH8zP+bMUT7%EyAR!HQlMC|0boOpr&7&!6GnM|Wa;uly08g=!2(^Wcp!bKOhgV&`c5 z?w@};e(}xkinWpbF>}-H(NcWIIEo!Si&ljYeK?N|3n^b)U%+1#_IMAJBZwpnlE?9r z95EKi50a)aHXFxZbZfN4;5Fv6R2&-+4RWG-SO7qtk?Q3U_{%uj@Xm^N z05~{wE?zvh6R)}NM9d6biuTf{b^n5Npsnl~XQgYbXi`JlVZ06yec+Wp6|DMD?ZFW~ zv6F3v*jH@zIc4#QIhuKX7DIei42f)L8{oIw=V9!dGSFX@Mj--rW_P)WPWSAS;HT=7 z_82G1Fi!YkU#ouck$WEFQ&3~SuLsOmaGx@mucLhs(J)z0yhS`h99#q32 zZn*3K-~|9;)LOgc7z4Ry+zIO}`XP*(?Ak^Ey07g~Z|q1};7H{rBtEz1(!A zFyG}^^d<)8l>pLOZ>Dh|A@?dHj=;oB2W#FUS!$$;xKD#H6R8J=vi%XA%x)DJp=@xE zfJwkAYzwFQ87cJ4b?NB3U}S!4$(=yA$|~>|P?sBuR%O)n{6}B^hWP29`MEf9cz@h^*WK~*&-nrw zl}21#m{(x9A*EG=@mu8T2&#O(JIwdW*7ydM0&Cv(Z+Yw9wtl>|--*>_p2^?}FD4T) zKxVHDG23uKMbc7DL`p?-lF3TpfSk8N`A#u!xm$AT1hiGh1PcJ#oAE8*_KorHe)3<( zM?U<~*lw{ctIFHSPZ98zqN;SKu`O%aQozr*Sb2l7-C6h6|6Z^lI*?uY#xARrXtYd5 z@Mak+TiAZRnqE+Y5IC^Y(JIh}VuE$99n6al+rk-s*w{#qh6l$y$%Ee08Rm^T z8q(`*pVBR-)pH@fYnV9CXZ*@}5Bvru_obhCBcI#Qr7n|)I!e6;ITd8jK#>x;9>96^ z%pFi1kILa5#*_RE5^>~*5!=|5(Dmls@n0 z;0$^Mq{Gww3h$)Dr5i`v4(hLP>fxfUwI{ETzf$McL+H+c^&@oyogeYfI;Fsbb0%+w zYvr7_o@Jtg;6mE+*$&1s`MEAm4_P2^Q1rh6#AC;X;bhz+aBpr z8-Q!-1h4^=&x9>3aaT`fZmYk7NKl+${-@`gEl=pz&70^5DN0ONelc? zexWX|`J8glxvV$KMRy1cI9K&KuvD?Fw5O|o1Vv7-jzD9i=1JGKY!lmY>p={RY)5NV zcB;!~OhjG17E6os@xi}&fBg1uzsY4!jW%ubXJ%%CtuX)5cf8K_{fWmOlb>0SJMX^J zc(J8ElXA~J^K@LAzZ{1S?w7yclQ9V%M*;A*UR4up z;97L19Q_?6`5t7!M4bWND{YaF&uI&iN0yU9#DDONd(CDBgxGHuoe{9EtS-hI-tc4b zMPL5K@ySnoD$bodCx0~>b8~y#Zs0j>iy)1K5QAHF`L(uecC`>k65}}m7z?`^HR@0R zRxYr<8>qgX$`0dV0z` z&#=W;**cC-G4wtQBE4df8#J;206%EIqdMRJ}5Q)rmuQceE&cFws`a}-yGeg z(>Q7IV_EG^ep5Ql0-*KInmQOu^0|u+g7K-PgVEv>L5dI9-g8Lu15%f=)4&m%SgT_f zY1_hEvPXurTb{E3%fY(hH&!~6&Iw0Z6mRHu|IqB(>w6lffI5)o-9rBJ&#Fpf+ zu8sn$;A@Rh>FjtM+BYX&*Q9IfY7kOuiih9Xw>utx8^|5iwYjSEPM%e{K@%Gup)i`}|Ikpw6 z-FU}6F@5Mz>^X2W_Or#RbYX7a9`QZf-o&Kxch&A>Q&FPT)n&DtRoRh7oH%wU&YU?N zm(E>?pZyR2Cf@trzl>&s-4dSj32z77rX(=s`x4>dg&O>cLctjNUHEVXRCxPT&mFrPu9U;hKz>8H2R;3)_)_Hd z0&*Zt_8$@wr``DA3aIj%v>w2ZEBRjr;e6g##`Dru&vth@+rfzS`VI@tlHWhj?sxZ z`Pg=R`PaWLzW;~*Rh*e$ig&%|15uxvjF-IZzF1gVkChemljvtD7HKN3Sd|Z4SelQ6 z2lmBL#l@qlroFxt%@O(j<;A#gQgMy^_Mv?f@x;d;jNksRZ;a<2|BPB|J8nCEeN6EU z-j?E`tyN*TEiDF5+KKSuSnP?|;>kMNlk}$iSn%hzF|_D(_-e;>ReNSEWn*(aE?!V9 zA>UW455#S^+!#mq&scAoqxlJf!NIQMAQh@OW-uDVO~*;ZDK?wyU=*jOmwR+Wmy7{A$)oP^IdTQCd%O)pqgIihE0V<6U77;qKG z*LUL7r3>+?haZds$8Ly%i2?Nkyn=p-xMo|)C=R*+sw@+3VZTJniux>P6^ z&5Ps0;+FbD>IX>2#8v5#NlQfSuTEu@k^R z%%46P|M|y$Kplu>F|s4QugApLzO)Bw3mc4w?kJvs4#oe)e~IZnpudb+a)d}ei#j}{ zJbH}Zs6<>rB~QuK@~!m2e5ro4A@`Fh06pL+NPpjVPsiPbJy~vV%%Q(hVpz!$e?Xoq zpd$C*Zrb53Ft}Y-Q`EtX~-ZTapwh^Vf5Vv zC+slsObkK#Do?Mx3NPWa<5-^SlBR;nj%BRbhnMt(dNNi8s$(^Qz6&qRKQDAJBrfX(NVHzWqo>)XKFVmvG_FAB! zeEtkA{cQD7Nmp z^|62S_5U%3=Z?p({o1d2SLS=}xi2OsC*{wFyhGoq)92$?6o;(O%a4f%1LHF>(3})5 z!W-N%NR?cQeJKF$jCuLIlA+^V_$Wc}*K#i&ODKJcDv(G0E{W?w2rf? z;w9G($IGr6jlp$2-_SF;sj=EXG~lma$W*Cv5TFFTjIA&SNFSd%n@wIeQG6hr23g4m z>p)4_5fd*}b4h-#(Nx+sJ3%8TSWQ)g9K5K*%QIcEa(Q(t*2RBzpu!T77Bxavlml>t zT~vwtCk`7BKFullf<2QlFsb-y|8=qV`n%(D_h|gipM5HR>CNwomD<6WJaKchRcRNW zr+oRDQIM95Rc7iJzXBkt3L(VM|14z!I8LHG5jN;CEa9tug!zRXCBBn3j!o@}Q_P0HDa336I?yZ_@mL3tux;?mVF<81VD18UT z;X&dY`F!z^@iUjrkMaf;Rfr!Rs}mo}So&_9PpoUK%xB{wD19Lo6@Z&ehP8;ls`qss6blp+d&ZEq&Sdj_6>TTlic3&Q1y$zWg~aOa?6FyO#P z=T^w7F+jBIg%(=g79kx$xUPj$X`l#r*AWiO2uKM;7Y%zrio`eShla*tdgegP9XPHs zYSCI{yLhEmItPI^6{^+zQlKgtz$rW8dZr_&ffs@97K0DlP1!J3BX6ivH5o5@$>+qm z(`TgEwfINh@g4C;fBYA5?C8O``Q}^WuDf3v9acGs&wRI@f&K1APQY!#2bIYrJA?Gp z&2du-!aaU${m)y7T_&-G%dm(iZ}Ja?w^V#>-sl-hN{v2>kydKe&BOxR`@>tVsc&Lb z4OWik8@}fdD+Vw%ayZ{SBV&JN$icYS2v_os5l^9I<4; zg#QjJ;>Cw9=PZU{FhBXYp!gTaV8T@7`>a17`a zbioH`gKzG*RbV}nVRpl|?xlXY^IznLe#1XK_$ZTPq+gYGduPpM)8@)OA2Cp>uax_|quUh)RpnMt(g6KO_(MJ5 zoA-Ci&iYGz(le2idX;5eNq2>x_+`FZH?tfAN^1Ubo%u3*;sU+}$Fy-3J;o^^ZAIoS zW%sA)PfNgUJg%aP{OC=ta7|q*GDIiQBhx#a`USS+LpNAp18~38#bTi=^{|>SQwNq~ z(Fgd)qzZn-Xs9if^!ePy3G!rMM^2AW7X!m}W&de+DHoNjM>tNUvfZG(QZD5=o#?gw zCY_$4V_cT}z4R(2opADXJJu{EI?_DXS(Pr2JwT?YI(p~wz$G66UWj1S`|>FdvuWrm za1^0X3Tjyl$y<e8Eh!Jq7_r6727btTrW-tfd4+x;V-9m$UGbg{KS@AYQz1W%CG=1?T1B#8@H~-#`+BnU^|jKetpUl z#;dnBW2={O>coz8Bzh@hyR%KDPlUc5_2b9_n$aD0CsVTB!ROxIT$aNx*pKP&v<7qH zADhT6$V8Lc8TtGjGRXq}D6z&wJ3lvsVG(O*Y{PZ&!_ZaQ1ob1VUExH3ObX1lj>hW7 zX53o7;Q{*c=0;q;v8woIr~%tj3{*EOhPSqOSUoL0x8t@m8cg!`yyG$b6uU?(!mPT~ z+*kl0%3P<Z(Cks-Pzq@!; zgX(rHo;;!0XGw8eKc2g`95*-narWZDV#9 zi8#gPvc#8CO0Tybefgi$CyqNFyL$Oj{L0V$eEiUl{1>&EbrC1#_IIK+-H}f7%|-kT z=cb*>6=6glY-de7lK9LMc5UA6N3y>|2TY)&^O}bTkZB{5jj#dO3o=p`=ko)Z0?3>4 zNRQkpU&)ic!vjT9B}IIE6YFrT{JthHW$L|5!`@7nuDE*`vOGWZtY5h{KA@n6)Y`8B z^?_+qUKya7H$YszOte8S{8xK1KZ54L6!_?~hiX&4flt;!l{zk^ylH^8q$$g)zH(jV z3qX%SGBf|+f`46dZh0W5TEFP_?pYJO1)BW5pJO2<6{8jffwgHd|l)VsNT_- zQ}i@A_>**w1N1lf3(YT;4;b-NaUQCtT#wQVU1J8t#oLr4-G6;PUf!VkomTbv65#uV&T5|(4>b6*bY*c2@nOX`l7Vzk_^#s>+miCJ zZT@(@_);Lp4KD_xF8M1BH7-jS$*2PPn4T~vOiXn#JHD6{>!{9(N`?Qw*ge2F*H z{7_qf|7i4Vew7b-*f%J*v*}#_@o!whLKdff=9M_-Ued!vV6zxnG_vay+oVUZQ`NMr ze?N|$zZhTswOc)SC5IcW+?q%U1nUGo4snTaN2D zHvI7a{MnQ7vimQ_siki0Z1sgdWjti^R^^$*1aE61<76JpIX+k2+$W9xmd_YY6aNT? zgPwlG3;24~?>IxU-xvL|V`4G-O}2Wc|KPj6Y9%hKRwLHLf8!6y%k+}$9sTPXdWey1 z^O0|X*}mLfc_1LZF$VY7$|u{O(vi@0O1hd*Tt3qky)BJ%=H_C)y$}kh#EW7l^;>Jb z-PqY*i^pDgA$D}{%>56>%67yzearu9Q#&et&vzH&6?fecOB&m(-MkhGg#6&wZKBXn z{YVbkpdH@eG-FYxrlGss_>p|*tX?PYiJxB?2HFp18u-YAEOa)tLwx*0|T5i=Ekpn7`w}XrH`S@ulL>{aFnY_)d0? zcUj|b`>9hJ11-g8z2Q^+8Mr4OdptH36CRnHkFMIwm1mxglP8wqJ-_`c@jE~HA0nm~ zqSIo*40eC4jF+W&>n+EUOxUNq(c72~#h>Rib)FOdxnRKNk3Nuv_$GR?Q;>alCx{YP zSvIzX+Fu;Bj|$wY`BJ`;Yr)r)aUYO0W}6@}%K%Ax!A)6T(8rg$j7zr;T{n3Pn9h3d z5KMi{1z@xfLE&pGxq(8SpM%BY3!x-G)ku&Ww0sfsM$d)K`mB) z!B+C4_e1uO_lrKtLoT4&3D)Rq*Hxgg|8h^oQr^YERd@+IGv20r(o!)O_nLWtW&!Cz zy4)t6CEJ~M@{}KH;wceBpGP@A;FE#SZpo9N7G18Z?Qq9sHQ?r@Dauv)8akDkJl3_! zbB%APx^EWBm1@{Q6Q|FvDf0O4KX^$G9MsF0b!W1r@o_7*H4bM7t@g=Nu|C<3Z~v}; zAFuq(H^mSC@LS`_C!dU0yy8`UIfU=!pOBASTi=TJyyp+&4}R}m>c`~2PMwi2mJg+$ zh5+}k=%vu6i(IrS`bru=dP4HS+uzJU?`ixVENZ;@Ozd5KGA7n;>i%ZTi4XO-aa5{V zl3G6gVkbV~_HO3w527nS+@0Qw8P!7=jGgalNY5rnScfB8{BE%w*0yzKCu2r*;D4%l zs%wEHF90AbvU8WH41B75rbT~BEGSxqn3rdn`-8RqU^`YfhTZ|7#V(-g17MT>1smbp z`1o2`jpG!4*xqhm^matgG4Xc$iI_anjhW+j$MK7=iIvGa;>UmSkK#vv>0Plge_M2K zzc+e{U-1O1^B`eqgf+pg8aifhF} z(%lSmER%8M_{lhX_dVXdvA4JF1pqVA4PK5RK4SsEP`L92x4oDZt?Z05y|XI)ZN}-T zOYw2%CgQVSdt1!M)#xoh5$*l!G08JG8E5^30sQ6pbnv;RYC9RfWnIY55A7AdumAvg zyWdsc>RofBZ?mo=pOgYv=*Y_(#OoO!BL`%nn7gz$06Sp56q6=Kw%tfhz?8>FYO|6# z>9i;4stW*!#h5#)ZIUC;3vhMt1;yH0x3s zq^V<8#B~a}o>O1VOE6gn%Kn}hMkJ%MlPZL|hyqpyGSrVE^QhncjNevb(7{&jrxgCB{()}~kavL!wb|5+((yz*5{2J@}| zjm>-mm91M>msh+>BgX=d{GgFejSh~>apOh|RECHD)XST0JhY+N+m;nLz&~=p z3DAJxmu`g>+YAJmKz6{76C3V$(1ko%Gyo1v5xXBscRZN5dil9nyLDZ1Wczy!t>ln+ zo3o?AmRHW>1dK!cYc!h9s=rqUVMX^D=+Cw6F)&C1{kX|uG$ zamcj1Hx9Lp;>f@~4wcS4X1i+^Fv!8@gOA)Xp-%iXM>@4qA9xSfYyVR28*1k!AZ`4f zuf;2}vY&B!(n>zkp^x)=q<)~{s=&FgktcXm@A&-+DVNNf_vvJnUIO0AXZ3dkYt&sA z5qVJ1@J+rEEgYdf@yh;-tOzq`Jv z@{o9pTj!{qbC(golkG~N+Xg23X;=6!msfjZAp!O6?B*LVG&m;3+~mO}oxqFLAo)WJ zlYsSszHZqUvUR50s!63h{8v76a@9O3aG>tAyDVdU?2<=#*qW05utL#UzLkp3MT0u_ z+AhIaWkMa(3LdZ>%O9DScBX93Ode!Iovv4q4|Dkh!=C(aPv>j}nG=ZkCMHF||H2E; z$56rT_kG{LQC`Uso+jGCcmAh#x8sZ7@}_wC{rAS7e)LacYjZ8`fAD4AZX8+=!{sY4 z#C7?*ql*jXu{G0;`NgI9@t^+1c;eYharTb8VtRJo^&)5cQ@QRcQ*==E%Q0mNPr8BL z;7$FVGg`(e+sZG4Mf%)|51xc`fTT=QzvIUW zS??&lT$g^f0eIDcspTc1Zi~)W-+z1j%dh;)aX-6tOmr11Yy=OqgU`(Dsqb7{jrEn) z*j8W4&J0)pt3~?~BHsv}7ViT(N7*WUlDh-(jZd6Ypm_YmiRdmYSpLX>-FG<81pu;3 zVu>Bzw(5Ja8+ot+lbb>n6n*%pdumIVAhuZj8|9o+2juQ+9+(musZ3{fQGB(%0D!F_ zTl3xM99f9R)JNak>cw~e%G=`^Rq^Ft{Auy+-}d$K+#mi{Om19J7u}Bu2gSRJainwg z!7Q*_U%4K`!FqH$v(cSvJ6_vZ@5l8kz2N0{j|+rvQU<~bT?Tm!8d)IKZs+0<`gLrB zCc_We8LIlcfQNhJrHyC%f*=1%0S`!YoKbtDO}o9zPfWLRCkrMxfsC1vqjX&cUM?$Q ziF2?Qzx5*D$s$a?Da^pDFaFmy`x=C9$MaWj#md$&mX0sRP-S|G$$C6gz|>?nr;nWQ zCx_TNUzXxQlidn>Q@v=N9j>SgS-AhuJ?m1m(XKG#8PP_J=GX{OUi=e_S@7xjO>S-O zD6UeuEnRGT@hM+@+l#ryqcL~%SS&7`j!ng>mv61bjdjJbo%y))<*$qj_dO6N&fgJ7 zPoC1?TfT|9MDL>f$oaEpv&|xZjTMO9X5?d!AC(PBuPosF?O*-P_-Fs%TVrjD@BC)m z*_Q9IZI}Mr3m6>F$yVua@E_HW=;&AwvCz>7@(s!CH+?-R+tab?T$0M-D&&EuWash- zthuK@a*P(E;!RhDPLaRtLU)vg>~${EG8R{jDrUn|S>&AUkcTZr{uO_cmN?x((!c0B zlev-P*of&<=+6=d98e7;>aXtReRON%FMXOP9K;)WoKu&kkBK(&HuQok%Qy6s1}de$ z=mD*Ml^^>k*^K#7d7dpJ*Q8f?+%Kv;^r&x78j*LFmo%~t%5z2i_#0P}_3lq?_~`x` zAO=td^Qa{2q(G*Xe1h?Nfaxk}v=--WbR#-kwp`crVpmlzlr&U(J5=X^I>zcxnls-h z&LBQ;l3-Mi@hK1ah=~qBRa&L<5RB6T$b*%$0)I~Vaj5j#t|b5DwbGt2s_&4TOuz0; z);?PLwLC^L@pU8L5xAV>&#{h;4gz%^qTx`UhW3W85jg)?y$AJF=`6stZLi`ZPdOQf z24B%%b4=egqPHGj3Q4^OE@&VR@eAqW{E<^@Yigm=exa%`m6+AHMsINho5k$sK;7QE%Q?z`PerXH8M zX$Y(CQ`X`J@E!6ydm0b4+Y9Q`=mTb=bNobfj-8Hw`90qg55Dr%@oT^HuK3VLJ`!*K z(l1vX0F*#$zn!>tYgv6YaS)%1>WamQSh=|#OG}-2&C73#n>V)N*7d7#=G2m~?ZuVL z&&5Eo-Mq#^i|tPQ{Ez%#{LC->SlrP%8n>TVily0hOes#ERL9P@>*;aXMGEdcmnQt4 z@FV6E?T)v~Ck}`MJ-Io#<_Gg$#K41DLGA!I5m&EYkB$CboLFqdh1<@>y%)~;&Hru1 z_{=5c0>FF#G^6oV)|>U96CNyEXPj&7(qa+0{5JE&*gT6Ico zv(6pSZvw=??N(RrgZd~_bSD}6EkNb5J5dcRbOGRt*oOYwaV{_ZNKU*>$7c$N{}@M2 zwl&_HnT(#sBJMkNV_IW1b~xzFF6p`(GjsDX(D>(B#i#44Z&PEU%Y$M3&tLnsSnetQ z5`8CHbMfh~c|}aBuFaL3VvEmvO)FMm-e{<}ygVo_{tp=@WdCoS0r)HNm3eeoj=*7` z+2jEyQJb$5&-e=@PM+pdmF(0oBm7`tSG=}j7yV{5Hq`l^?(OQjbM&a}n-^zhyldt9 z+RgaEANaAja%0uDcUo1<>G+kO^P2doPkUuN^Uw#PyFbtvaUfau9P14C2L5g^O`NYM zO*v*3Ar@igyvcXF2bww6Enxu9{z$A8=_;&rcoP4qW5{K{__t-cj<%(>c!k_M?B4YY=pnaU2F5@E9S2OY|DZ! z&I6XJ0o%K=0AN>fmUWn^K>BtSMSl`jy{72Fe!gm+k=maHrHvq4Va5LW2Q*c?j>Mf< z6Ei@Wnm^l`Fo-YcbE6;k^;{Qd?6lnHI(fOU$8wR+E!jo|@&tCCk>Otc4~VXA;= zuK8-nInHiF<`}MV+|B(w7;4%9XfnLiO<)|u$G{(TMxiA0q6=U1=VLHe%vV{!Q{_>Wa*&S^hEiwkCzK8P z?IK)HmK!ZF9xn@I`MSUnVEkKm5cOhU!9L?j=mL-9QrWnXl{AhxFX6KtOHYSm!RFd! zNu%!U6X~O9eMTW@+J_+9My0RNtKVpRmr(lw*I(sP`bq}e_$}HueZLniDy`!7voQ7d zd&E1kBj))!r%uPs<@NZgZ}{8sRe$f>c*FZ2yhrz1=M&4K~ASDypkl!|`(gPHWzQL)SY!pVr^7mmloLMLWV-5tm8 zd2Ou5h4|55cz687uf8|>$L@^Q*$c6OK4Azua*QXM6x+DD@lY*KR0Cob)y1xx_LUZ8 zGbY~zAA~4z2K`pz%<}AxZX7dh7e8_K_BegvZrQ*_^cD9^V`KXrkHxY1ZP`~Y0GJZr zLoWaz)@$#tDTdureDGX++#Qqg8LvDWbNiQLYvoDhQS6|YfCuaOc|h5a^oQ)R50C$? zz@(8MdgQo2sn~3vxkB<-%C^6u_%JW%8^sAHu_Q0$WSbD><#~q8Kypxd#U=^F8^oNn z4bdSzx_%)-tSoGpR5{eZf=RzvB|fug_J__tUKYxT35+-GB56~dQ~AjRJ}JhoTYe^j zUn+T^(Q}h!9+I6v`-*#508r8*}u6!c0FLZcz76iI^?oo*cE2;C*HqAZI8 zY%EL&>K+{|m?*a+F92`-K9TEm%*`*x+`P0S0B|1bIbfRbn8Ggr)>F&F44+04gk4Uxm!NUgCY7`u;PWS z+gOR~PXTyxJcIRxSv9D^Aim^Fzc9Y{d%r6__{SfJogQxuZpld1P$gjUyMD=jQ{`9G z@vwRd+ITyDv$qlJD{FpG?nWRo7;wNF^)MOEyf;=v85{+x>M`nVRuGH*joxlN^4w+7 z)QK~9-W#9)MPKH($UNZNneaF5Hv2pA!gDXg^{dyUgM2Wr+AvnrM?*)k)*(a);MX$J zb*|#oz;ze7s@^^6LfLTm1Y+@XK;AFD2p?4Q3!`y~PSX6A zC#j`=ewi)`pu5Nu;2NlTfaFE>vrS$~z1&Z`Q*&`1cKWDqVW4|D7XUQe!#7e+Ddm`8 z8gyNM_?f(4I$_mnTbk;nZ%`eXx5!QRGkt{D_t7CX1O0TK(dCBxS7XQn=iL^fg0s}Zym)u#V3x#zn znk+x;VoW!*RiNqx+>}dMSDt{hiAi<%rbqWIKQC6Jf+Bm-TyX)^Pd-i@z&DE+@MS!h z%>_qOllURsQ{Zb550tC@5=_vy8VIVy&rp6ThVn5AZ0j4bEd{o0_l`enD=3)~KkN!{ z^@UsUM}PEt^0U&HBDBuzyaE^DP|*IxZ+UaP_BF4F5B%w$L~nf~?zrpDICt(`%1?Cj z_W$)8m%W`BQfH+jZw(JeGM+j5n+wxNtvP>*@cwe>CVQ+`lf=c&GN>6+kFjmnKlJp9Q{@b z<=DbgMY8M+9&HVg zseZPRqE2Y$?XFX&PQ}p^Cu~ooL!UprqyDmf2Wz;UD^9o6UwKiQ@&Qa3Ql9 zvPyi}SH2;>^>2JdT>X=G#opTGm{nU)l;%a0EZD>s^jFto`NkEU?|71LQ3JI-4Ujf^ z+p&CoGqzL(gA;6#mH9mUhhFTAE$>(ulYK<4u0U=L7`g;4B8&;Es&#>Gk##osa!UsoVwK5V0pZjialsQiu`owONk z-gZ)%ZN)b)+`17rSGQySC<_3lLna}ev;F>7420+?Z`GeX9rK-8#Z|Hg#gW+c1mAKK zPyRH)*a85%yQ04a+~yoWA3> znC>pc)f=mE7-3y&&PLv*FTN-y!#!|nwl3aInV{%{={2jryK>fjdGp$ zbS>sd$7&4>Or4hf%;Y4CI7ekgpk-qaU70RH_@}b69T{D7jftVlN}K&RiK+t{pw;-S z1a3*jjy-6X$}4m``e+lKf@Hv8m_1a5T-v129xa-CFs=TY5SfFhn zKUI}$`q7bYl;2_jU!{8lFGWvX0MOiPXc?8SG#rL;x^2s2kh&wS;;Hf}$C7_ydb7x} zw&P51>TC2~fos{O+ihoa?vfdG2BYrZi_>V%*VIFHN`eMMQ^X%Y#Xh}K_-p};P zM~#WVW8Waie1*Q|O}A36fqDU5qdF!PzbL29Du3mu{;g?$#rFViW7sb8F3BUlv8`g) z$V+)G=f=**Ws+R4?)f8Fslkx*tA>#sC@g zfBAO2^s5B*%1XosPPreSn(io$(tRbfixu1Df9_kpBEI48{GGV8ycQ2V`nb|t@v;YB z6|3u8>i1^kA!*xDUPNfcmFqX+-n(y)#cs>H1kUjG)NCu(Z(NUcUKHqU#>u(axbpBv z;;ldaWAXT(elU)=X5y}sC!;f?_B+_|!;ERsGwpFxPUZq^cS^*tXNF-zcB7bqz6+jr zC)X^S)>KPEfZ^6dHFz^BjrLWTMl-n_GHr$lQyBgK=VSM@`SY1g&mMO|0jL026I2(|0+Tl{0~ zz`QCB%L!y*reCrhb4~q8AG>;b2L#=B9kL@&>`V5<_ACq-ipaj`9ViZFR|$=dVrozE zscdwp-|iHj655G<#ZfE$-MB1MzNTDnf6pI=!ml_Yo|jb5r@i*$qpRcU^{cU?c%EID zm>lPU!0^vl0Khf#1oo4(In`O)Pd!$@Nxxn3)HagkiyW%Il)q+<%Ka%-O=_H>F^(63 z2*2kOW*DEfWUH;1p6$j^wzjW$c<%VIm{m+E#IbZ@DV~1fiTF4F@dqUpMzOmwub6H| zx_j;2r{nK_$s1zr>XXscSaO$}w{(_tCe3&<9{vw^ zbx&!1Cc#y1M}>!?bGg@xb>ZlV=c~OzJh?oGEA~x_2Zd(0dn`IZSPcfAwt= zr0y*omAwvQPGiO00iPA!@q;1k)}N4&eAp4d@lCZ|(PKM9hotGqIEYUW>Ba%wB7<5k z=M!iHH4R(GezRRPdbKjEo=15`kTS(FRvFWk)EtRRS7qLkmh_TL(cGk3(OIrNzf)0?#83XjywL8sv|PD?57f0z zE6K3i2tbhH&zr8h=mQ|m_2nojuU)4v6AJytGcFtYsj=%Cvzw?wYXk6wd=B6RpVHyo zc*@PODh>kmjWO3i)(dWiu;U15W2UphIQ-Z<{T_22>;{Qn z=h`uw@_WvTIdomu7mM&^%ykSIw@yvVuQLBTi0<*D8kcXzt3T-t@lXEakHz~Rc`SbK z_us47X4Vh>7nY93;@rIAm}$>5{^$??pK$nw7VCg z6Y6_+cujXFy7b-ZOWDE z4?QzCiVVFF#dw5Uw!@2pJR|iypM*kvd)O<@qH85{`l{|S7g544onOXq}MGf z?Esar*)B?&$racVzLerX0*C4bty3wd1|6(0&OGU45g^L3LNQQ;!HQnT8|2MNa!+gt zzf5yf7JNE3N)psqv3(DjR4;qZ4ID~JJ^(O@Ycxpzw+baqksCLSiQLtpZ?jO@5XS)xzidHYCyQr z&j$<8FF@&hYYam~WjH0mW*UgMlpu{@UiCB31dv;mW0kkh=|pu}7geOZGYGS6CC|DY zhX+SIobc9Hxd31&V_sB1yeA|5ny>!y_`ZMlUGa{$zbgiEiWvEx${^J3?M>$;z++q1 z)pfqV%7ZKo2E>oI?p<4xL(Ml08O*VA90P_PQR27A_!NR0qFl%xwxIuP2R7C2#*JH> z@&3mik2~&qAin4;za~E6(?3&z3dHd}7=P$)$>cF0T_Sv{~{fZj|4QWbr2ugn;wKT>zMPAjIlc&mKkb329t&6hIZGb z`{c2Gj{tfq)WNd@lY)1IV+1uVX->XUR*4_KXD1KtdM&%-eojQFlxz&&Wg8x=qq(M> za5|rUK+QLvztUgJ`5}pp^>-L*8f1WVHWMJ@c|m*t zpVHE|WIgpBf4Rq7xhAI3pVUqn*|ji944mnn`zB|}`jNY6vSf;U+_xwL4}^2l(WY=nC%Fqp%Fpzxk-8pe21@gyPW%)wt`D|IGEcUf#GjCIaGH3)e;TA8 z(v(}|i!-kv#?PuE%0Jq9`L#SXe(f1F&}S0O=~cPTv=R4Ahoct zU>xWO8h9hvD=pOzm`-;SDR+K6>~~=3;9M%i|0|ukp%uWUaczX4EW(*pT)?;B=>I4; z+*Y7B>`UK@IW(|%@b;7OumAFw#iv}jGbVaFarL>UVnzYO;^JIPcPC=~`mN|Kuf*<_ z;sMzMbW$%n0Bng5o&fs+x7f#z4n!ky!jRR!qW#3_6B_89Hf=nR8>(-EW{xZvtmh1J zwg#J`TehYD)decOj0SKjVqn4?pyzYx2AP^{LG%%iMKU(KB zWd>kJjtP}k@muj@Fv1C4+@rq4fqZc-_LHjL@r%G^sHa@N6gZwTp2PJO103^LhEgY* zH&i*OtyLJ$4<4>7{ReQ3fw#^(0Egv&kiHR5N*9lg0}QmrJSOCkiHwB2JF!=HOep0h zwnwZPUQFjm2QLK&@gtX)1lI}F{IUmET+e)bA zs{T?pBkv+mTJ1NJXK2j28hm52AT8h28>NjP@s9H~UKYrERbG{j5k1ad<<`h`baZ)M zBmJ|WmZSZRwLRKOE?zlQUxWV;c;Xls=b^~C(G~tfc&Vq+cLDO$G}FQLOVw5Jlb8Dy z59g!0DxKp#%Fxt%D7}Dn2A}ftoac49Spb&C=|8Nu3Z&QZ%&5L9r^8V59rkl0|B*1N zH-UVI(|}q&??G}CWSd_1M0tsx2&M(qM*{Z$`Y|ypaU1|^NLBEF+Oeq9)bA7fDvdc7 z#w7!l^}|B?f7+Jftz&mT7+?OEzBb+q!W%KKi~tiJ$n9 zAByG8tMSR_?~bLGIz+{bJoukc@X%pA0>=ar5(lvrpWgAzhQKpbB< z5-V3O#g5|AnHgD9u{XCZ*_7ueL|w91Y+4s}-oV!gq-k$?UEdT2k7Xl2Rb`ari@Zv? z#Fy#lE2einG45Cf`=$*sS2!`F*t8YXow*p!NM>{0m_Kqfy2qDP?{u6zdpv&iSAR8r z?ib%CiSj{Iq(TeXi~CL;jcd5C$GOOitS~O zyBd@Ad-2Eb`mK2HFZ{U5Z$?XT>ZEL?Jw2=VjV+*=M`0e6I9OvUi4dDZ8Yy$}R2N18 z_zI~Qxv`LdkgMDgWS`sg>l};z>8{4R4BItn$g4v0&?jMZUJANxk)~_nM$v^=ASLGt zz%?pgZSf#vERlAUJSN?g&HUBR7SaXOuROMrQUmoV{<3_O;N#wm%8q!*g{*n)`q*`M zN&+i#^4O7c+7RbFz{O{{KG}c-bpI_3D=lc0!b>U0-v(telE((aJEPn{VZKy|zA%AS8;1tsOu-MN> z1T_w?Q1c=WmzTVOCQil}Ir#55)Orgp;ZMK1o%DkjeAJ690rMw7H)Av;9FEsj&gIjd zY)h1BWKrps9#ltT6ICwwH6Q8U1WmfilV9~%=EMP%lRlvz8}$67@=#}`Gv!oy9zp#C zCwa3=%K91~-5e8-`#a9d!UEKfJx=- zpYzuL-KFE|``afBRgVo}2M!0Qc25 zw`ia2#KLpB0r7DrHRfHrx)VL`K5@3RIC5zYavAlRgo6sF)^#TBTSppSSD~N~l zcV3L8)8{>hMz1kVdr>^(_(=lV-rw;80Ip~BL;ezR%Vjd)P4Rg8JAi)W6IFSj*rl9qY?cWp&u9^NjV*V*MWz{h*{UH z+?Z)~f|XV%_vq5Gc*Sd9ACErza9n-%QoQl=-mHuf58QK61Fa=FoP1acP#5O}&B(!v zGjzQ-!JCL_aDa^xV`T>f%ujT=lPC|+0S8Ec@-ShGJb_vl>>&H93*HV)BTNHCHyn>J z$=>hBo4@dL;=g|XzlnGL?(fCc`l{qhW0LMM)@?a>$qkvTZfHx4#8TY0J0W1KxdTS#-_~;XH{;rqBH+|bb zip686VqY~SgS+uA_#0sbpqIq zeT3@rr%A&#pSou`71@awCN#24;;n&S9rWRoYF@y;v(N`LIx1=C_;4Cf z&pFyYdDXq}#?Nj15L7(%SOGS8fLV3U{kj04g46UPbDn1!yt;k{0D_9Q@(Xb0?{Vsw zWeUhJ@J8g|8{kLwAOy90qmj-9umrtE$GPSz5yJg{w1N} zFc&3ny7x!PXrExu1JH#2){Q2F}qSwYLr5vy`wt@2}1h|$=cz_Sx(9ePZ0xe_$P^W3o zebeTJg#h*o?(BQXXP;jn`8Wtk-=6zNCMtM)L<(I_g z-gQ^Z4EEyIr59qjwi?GzAB(BZRP@xZtzW;UzE^#6j|mmYSTt-24{z}AGVtN;e$|QZ z;^8iR_Z};J#nTt+014R(H-g_~tBw@43!woIZV8 z91j&Iu6Y4$TkYrU>EnJgkvAIN_wGN8zyB@Y7;9=H)~R^FwrLaDM$t>g=IkC-ZQP*o z1M=oqb?SE7bgT3&_!Ipk5Bq<$1^0XQ^C=fk%#*INCDC%MfUIagK<1%7pdLq%_X~Y! zzTy#Iqqt%WO6+C&&>x2LB2YbKs^cl=DMn~0^-&&xKg&+-8bhFe6CLbmW?TZtNSy1w zAFfuC|4cZ{@4-SZ;;n%!SM_O{034Bkw%y1HsQI8@;7LF0?9X2S@Ml9c4Cql=m|KT>73I7K~;?(=YoF+Y5aYN02T1XYB_hR=cyPUNq^4s`!KA|A_ODCEwWPZGFpuxK*+8YOfbpt}$;Dikl|l?sKQ(<#(Nn{h{JA z>5JW_c6RzPKR2iTn2A)?pt3@udKI^+UnN3ep)fQs`OddU@hL12&~4kvq<+j>QFcPH zZ^}9SSDs6{McCON9|Zbrk6*13PB_w+bFV%K&v>12a`x+rPh>8B*i0$?AukIL*rk94 zvCM%fEbxO8`U%GeVsA>+6Jt`A1pw>)9l!N9IlmNxso8jXV>fPp-KWL>@n3!YxLjkm&XJ{QCU8rfL>?C?;XeI;tM|EfjBeQj%!ap9?Y!rNhUr+IyKRb z*(qCrWWi2zJC41P@4zwVlw)KTwd3Q6ZTp`1&dKk-()$_%Z7H@KY7EK(5Z*z)r5JWy zd5Lp}((#^XkIqt@xa(y~oAZK<$yP`9qFAscn-$Fqi*t&R+wnPH@L6%{_|aHhUiOZp z9gX=~;&*F(Lu1xXY_2cIFaPNOCvHCWh-`Ax3)yM!#FUfLI}`6-I4XN3j&@vWTSMl= z$ISVvC62j9Y;p>nOs2l-gSz~wO^7$!tz?QCc|e#UYWk`XkT3gcaO5Xk6Hiq?z-KZ5 z-h}mibS>avHdbf55*p%Krx*JpFTgo8G;ON(JDz_c!zf>+*Y@i+torbu##QnQT!by; zOC5Qy-iOc9#~YAuJs03Rz**xRz&4)#&CoOopvaB>iTt&HYw9aUjYah*&kf+_y2!)z zA>XW*ZMXA*Y?gQbHZJ%vFrDyP?nA#nU*di*p+3MTZJb^ApOn{aYH}|7S56W)`D@jt zRdqG;DRngZgnqRZyjoIU>&yBNLtRjylKz_uEGSpUjalCQN_9ED7F6D{-lR84z-IAL zve$7&MGx1e87T5lsr>8q!nyQU{F<+I%t75d2Mg&_@|Y#-FCl1Z=W&-zS@g_!NH z%7byP^{k)EtBik+XUL#?$f(3WRVLPv4)(18^!k_&G0lha5x3=M2(li3LfFk3Ji-l4 zMNS9fdifd9s6PAyb)oa|{Ns7q`G&kSTV9sqfwVv6qpQgyC*#Zh`Zvbse#Kvjr=NQ^ z-uA1%88h9bID7t_`-j=tZY-TR9*=zF!|^M>@QV>`9t_VbPgn6kzDe#~283@yeJSlP zDWJ-fm+$FldsN;%^(8IQ$D+X*#Q?j5?HJs;5}Q|_iv9JgF}WjuELlvepJ1;=TIYqO zh*zI$$C<@;98rI>JJ|AWm^$%pNGvF1!P%^mI*QF^RVQ;Y{VnDncoUq5$igWyW~McM zklct_pbb8G7DX)78|bJ20di3dFOKm7!u#hUUZ*Dgv2B0+Hr<- z;66at*e!Zse!@+e1!2m+MfJkFbZQ|M&mM~jHpE>#6YW#?$KL$C@n3%Y9r0W5dom`^ zT#WwQ(bxocJL5HfCY=Qb_IZ11IXWU&;bu1@+Bk`e%3=UyfKF#TS5|fan9dylh&lb>U$nQ|9i_p`;bdi?esWE^fQ{TAjZpPMo_TqoQq?}PQ}vFiCCCF7PH-X zIgq2#omD+-Bcg!R4o6hZDA+uDRDtNNRSgzy#Q*ivKO-91Y2{4JsT>UiMW+2>xDyJv zyH=_vCFyKv-DHp)+#k{qYd)Nk_;GhbD^q* z!}`J&2msJgPP8(tx_I$IeDX(FC3)IACorEUa^CoU3bztE$8kn?A3a_T1SI{Np%EQuHDe@TAaS^_V~=t`8?IZ zn~*!P+24%IS1!j#ANp{-@XWKqyDRw#&wgKZ9Y(&4m;p5iUy6EqzZaEss zZDJCBhcP3^8FJ=Bbx?bI8iLpv_eBE>8hSdiT?f4PC3N))(g$sYt+j^(z8R>&jQHmv z0?v{*vAsw_dV)K2XEo{qonq>w(Vx&h(Gw3zCcRAyHjaMLBjuEta{A}h_~poCJ~$~$ zJ~9yqQQj*Xr3jbzKaLewUi)o(+EJ6HHYn$3({Q5$gadoPC+KgzCb((pr7k1qzD|5k zcnFG&%(?QfZ83k&&%t*E-17hi{Mf&))xjqQ{VllMK3rF(=RYol)g7p=-5B!Rq&xrk zPf&0`ujx>Ia;r{H{&dQ@=_0Svl$bxABWL(T9;rzElfGpisY9-9v=ua^YI`3)ZzOaD}V1VJzyIKL0qZ8bdZFwN1bcMK1 zo135ZBDEXKR~2M$`pxu(BYe-D?bRcC^7Ysg5C1!g3i*~J3%@?}@Z+($z8xL)hf}<9 zEsXg|8`VV}l{ea{4sIQivu#enfwmY zDPmidWQ3bE0w>xV?E^oZc_dldi2_3~(bg>wZaR?EQ9lppaM-%6eJ6K_ouI*pgnN&G z5Pzb=#68}@14&4BJ41FI+KnT#t$6U<*?853+tpt7W3aA1X}hNZStq8{-wo+c*ZIb# z$`TI=o5@uj4q#>G@MIsSH0U4`aTRTx1&}PxfL{hHv~`1o!!~7X$HfFP{VQ^3Q6ADx zhUL%f)4*Lky!~hbG@uG{`;B&#rCjWMzzr4Q{tI6qPmS2 z%HRW9Y{%SB{kYF%!bNm&Co>X%qgVAqAW=R^FE+L|)ZYwZRofQ;KgbPYMd}oT!5O!0k1dquxKoA5i4b5Z z`Kd-sx%3@QS2FzDntT*4T#9g>kk{a&%|Iu^ z@bQ1pT+V$g^v@N>p|7G$$MBA4R|Nrtb9Pc60Qp(mByjyl062<N~i45aq5}(&k|?xgzD>LlPZ??bz+$&=ByR^(^gm@nm$+jE8lRq<#=l4#%1+) z6Y)u(@~Osk>4i%&^_@6-@{B|=p_q~}&SbRJ|85f>NIrwXR@{Eq5yd7macg-wT6{}Z zeH0Ua+x@fw(R@dvE!CyZ)4SE8FM%c7Cs)pI^Ja=A$CNyDw}h8{PLW%m;Mp6DWCB9tz~ea zXZIW22DkD7%H)@z)VGkxLlI&;zB}u>h09wDi&hpkPqx~z&m5a#$eH<0Om{l|{3-R# zv?k*>fBWrmQ+CSy)wcXlOXaYkxy6pgF5B^eM;?wVSC-?E=U3vR&u+wrFZJS)%j@xh zORMq6`aQO?6;G}7;-Q3Y^bfCa+=z!Ywt0ACJ09x>(ztiW}1Bn&R)>RyX>x$;s|w>`l+a9uwZvv;Oqd#0-<(8Y@Xpj0cB1 zTh{Hxd+&(5?mlPRSzpt*O81YbjbV>NrFFY=@z9_AQ9S&6?~tzhl8<68#iPgxzt-2- z%eE)}ha9n0{bLui9oZ$jw6g2f4s$ZLU(RLUOyn!C%+9-+Zu?s2I_{`1!+vOIz714t0-*9=U_AYz#Y-{p?Eb>2>v!Eom5+d6ZW**e>iBq`TZ* z%pRlsuqD|TX&91p%?oC{e1)}vLlRB*E}3vmPmpD)i=X@rEJ=P7$bn+ zWx&V0p4tW1JmhB}0euxWjo7&Jd4g&2VgPVKgMOr|e99D^=%%jG*RIO%@e}_n0^mow zY3EvWC9Rp}Mr=QzYafMsnsYeg2-P}=7ya`y8vdDn()hu1;xu2PbC;bygcI6QB|4H{ z==~f9|D=DmJKeWl(|MVWec1uK>SB}Zu7M8FbJjqxkTvP1O=W#w=NK~O;0?nj zU38Lp&&X5`TKrQ9rTN^nf`PgUpZ@8O`~n192P#jJagv`r$Pe3-Z?j#a2gu;uc$Epf z#z$R%^2k3hmQGqEPwC(CbUyKtjH+F#6rfgmL{9MOPn8iV&SU$aer!WWVg^upVF~r=ANQEOqg_Mmj1V&} z;M07&8k>Le8{Zfwj~|bxpMEOV);3~cVNTLzUS&IXJXWb^p1&M}jV(E+p8E;jq-GJA zzq2m9%xTq-qyD@YM7rjjzGqrKadJmEnUj!SCKV%0ss9ee8H4p(x>hVD!L`)Jr*$7% zng0TBOY%L^o{D*P2b7}^ zkSILciWvq2$qK=`Ju6@FJG>k`HPwzSUNTX?g!xiV|Mds`V&^OlOP_%4`g04a2QG|L z6%IugJ212s+A)7*LH(BEt*-K`uNqFx#a??Ue(sllH+no|pF1ku%&HFM;dvPHV2mVO z#H*7MN(=ujvR?#=%+ZsM-VsW_*5r1~P7Y#mW<4$(pNccbCZ)f%*zVmF z{R7u0B2ze`!7WDhdmcoB&9;b6;SV*bKK-MdcQA1})NoTgi8jiS5B*XId+-kRqLZq) zcDicASGrBx2S`fxN0yLmLv{^mrXx%Euj?UQbs_hZhSQ+ce|5o``$TxrwS6aYM}07C zJi13mj~u|S|73F=y1*W}!wW=?156Y4l-Gb|CB~?KMll-;Zl`46{E!2_0K0Fs*zJj( z0I-?3H1X8C|JHkT`}IO$L;w|gg&jiX+CK`wAOypN1jY?{Z1Ukxy{e+#ciChbZeRf- z>*R7isK)g>$w5NE1xWn^od<5%>+Udbc0{!p=@-n#%W^~{W4;< zY?@T#VtfDT){&U*E-1)5rUrO6I^No8QI>*F;0>_39Pb&87x{ ztY`uMzzYQwKoc;ljAbrT=#aK&&CD=V7@KWwv50zU=e}_ zz7)fT69A|{SHHlaHl{>mok2#$Df&uVaGLJdI!UYNs4geW(k84|(FpA|50G?I*T{Gi z_rbIT2ppxmJ|+|m)vl6G@>SgKHOh4}@jgaFLfV?!+~Im&~CLINNc8Gs?aURuUC#)as%0sgPg-#bM^iYpG>U2=zPjm@S zQ$HOF1)zaWO`R)p9O;su5!zlTU&}&Yc4V2K;E(bRAcA63+=m6f`KqHQas|+zeptBG z5Jqyr{;4N+Cc%Mg+O=L3#XwJfyFU8XIG*&yjq> z4=81IM&Ixa5iwuhKo4e6gU&I64b7xP)SAFGQjOQ-B5LYi< zjK<9JsxhwwhmwjcNZcWGL)$1{(K7{^nW_~t$D=V>n^-657Z^q;x>0*J;K>Zp) zIXeIh)wj}@$}EC6Y-ry@>27WaRCMQ<}x;$uefmfB7Zbay?eI5o>;=-{EE+t55MD=V|V$f=qT=JsXet@^9p?Dqqni9<}ne=SDubRe=C-{ zvyPkiTHNyTa@@Q%jKQ}0cM&5|+l4cii~Wn6Xtxz4D+V;py2ti-#An&hs$EH8EG)51 z*vd`?^MNTx^oXxy@>qq1FBe;>XWnOlJu$mdtU zykYZPyK?}jkMrcE$q(&7ldsC5!gwLX|MUfA1L~u90&A#0Lp#pbvg26lIt6@p@b^m3ZwEb&Rc+cg0!tFFY@aL zJ-$D(*@JDM9I>-;Srk2$E9Dx|)!?aF%L)2u+PU;&0H>iI6Ts25FYwhi&4Yq+KjDx( zUWE4$s82|aD&86==5?NfKaUlUC-QaQ`@fVtf(yDea~7RqNwVV|QFZ zA2u$#C%N)j;{3wYi=ok@@HXe;a7cc-HhV9X7rQu|3@xUo>O1MjUnbxK#-Oyn^zX=w zc#mAAT~GmtbR3s6{hJ?>Di)xh9xA?KQke&EEM(hNpMCV~9r5+w{4MdxpYn!y{)KDt zhadQ(xaaN%>ys`{G?E;?~N#V=elZ{;J{`#c}(a z>oL*q#Za-(Bk%u{_{F#WU$MWj6?dLI8FSL(?A|cCe3C}TY0>XDjTA?tH>(VhE5dQp zv0PNYXdu3z9(H!zQh$2&=1TMxOLW>3G2e=K#of2X?MLTgccA*2@7rP9)uA5-5Yh26 zk+CUG;5RKf<>wUlBoh|dVQG?~VhHjxAx*p66P~?b$0`WQzJ_yj@UbB8quY!(82;lg zYzJb@Wo5@6yK>68@_DCB*Mn!^$eUJ-cC(!k11Z#q8NTH%TIn8!3JzH)!8nWQ4O3+yB6CrJq~q#;MtE+;`h;!DE`u)tic) zG-4y}lZKeHV?hISAuD3K`lfBeCp4F9AJY`aAo*(M0X!ZR{gO#7pZD|VPacUw9vb+~ zPgR%kCBkZX$Ec}kCXZ($MEmYcE82=7W40ZgBS-zYwzbU-$zwbI*>`@sA7C?%8t!fT zTl&PyZ~E-dihug8-xLo&_NVcY4}2&defY81Sl*1CEk4K8ja9|d{jHto%;gSotWl;v z-rA8(h)%x&2$P=6fJI41diD;W_Pc7GD&+Yi@=Hg=nT$uK6koEn^^SO*QcO7*iWkwp zaO|8U+;$w!=cKm1V7L`$&YzAW8k4f9Wp#Zky7Qfw75@v}PIT24hg;k6f!}*qJpRZ> zVo~AfoF~*}^Ii2@YFq4TK>KHxIgbbUiF8~g@`e}aCH7>Fkak9wlM7F2jVXKN;Cp6R zZ9!?^LO-Nghs_*? zMBiDuJ_FaK4c2R`N!18Lrm1X=Ln~x`$d39-*^(bzRGmli$ba!X!H|CHF!-UFA#v2X zE3+Ux)i2s!pjUVpzdb=^F_>%mwU!UWP;R_=#koW3&;>&hGUHSZj z@qfN;o%9k{rJT8zENJ(tb9Z9Lb>!l5U<@B)Jo&d=1WP$|53eax?2Yp5@=|HeIcen) zFNho-VAMQ=hkGusba ziY&Tk&c^=Y(fGk1`>8l{@wRyDPyJFn^w4Aeq$l6C=h@V3x9c5?R#sNxm*4ib*xl?$ zXX%LA(oEWe#uYRP_)2^FHy*_*8AsbG$QXE25>2rMoa6Q;Y4G7ITBCZnTcy9dY#D_*>uh zpJLcK7wwbx$5wkW*40kgaSA$z_&(;p)!$)5_;yo>jdJM;U+e`)J2yy2l;3CWf07+* z6#q@MgpX&3x~=jwoXKe6mJ;}v(zC|1238#kX94I42dpUP(dvOz!>ZAtveq^pl1 zZsbjQ`c3LfS;G@Ni{4`6cz%2t`t#fIJlhkH)o_n?BJw`V3AD{2yjsivJh4dFM=i=rU9^5 zupr{eC!UChKlrEdgOa$URO96BtG=8zgMcl6t6#UJke2G$m(%?G zzwpNR&;Ry2;(z?ouf(Rm@x^4ga4EoNkl9!8&9_(wyQ0x65cguhTcX=aAN1Y%<*Fs| zrz+98C((Iup=$=rriTtp4R(K02Cu;Vx(2Sa$4fWX;^AkXi#s29Wqjq={?+I#o>Y1( z{`8{{$9vxWesBBFZUtO3ly7ENj}=9K2^dSbO#~xL3=|JpmH^!s@DjP4#2W2>HEbWO_WOsnV4X`1SKu0rao>us3w)fI7v{K&}+aU$*&7^I)tQ;6MX~ zMt)#K2dR=A_Q==&s0;a6uIN-gSMr7J3gj`E=>%ky16AlPX|78b3=V`JJI~1;%2A*E z1wk|Yh3J{$yNtS12OM=mlb{;_9lwfSS0#adL0nAoeoFFa`gPPnNAZK6K;r;55m=S@ zIn_;GmJwur{Sj`y8^^V2EqxXy=KQ)w-{!XrMC-x}K1YKMv7!j#+ zNBIH$ZR}fo*Cw1aAxE7Ph?C~ONsqTgn&^02?mEbzKUQX4>q5L^KA@pJ;=<`u>9z62 z-Px7`>OmY`>cn6Ci*JeT{$@P+*yC~b?CH4w!Iww3JuBb1C%?9t{#(B9=#izkc55xJ zU0I2r`=7rV>)X?D?)+Wyqb$l3O$yHFSIF-$gY=wHE7ut&L<^*cTZNTz(pQpZ3!Ss}Z@2y@hwl>$|hz58sn{UN``lfG=Q>;Gj zZL1ZCF4z_CJEENh09z|-{st{R!2PWHBOd1ON>|WoSEG87AHVJ4c5LtVyinuJ$ZaDSirp~cbe1$bi zTir+Q3R2ZSGkEm3N8Lu`n;Gbc$H$j%#s{8$K7Qrt>v2QZf90z_E57-wK06+M$FIkJ z?>WXEvCwHpd#0n}366qB~?*bt2a`O6jQ?vdwid4j8} z0kRBA{$?tAgPqtEZq%U|>Qc_B9si?*wrQtM3-2xT)^HTKyr5^cX&}61@ zsMiP6>qK4x7OQ&klh_f4>o*Yv_(~2Xa}yU(zjkdLyJAAsw^k9ig^N}@aq zuwEKQ{2zcyM+3E#gB#yB_+;mbytP@Svt<^yPSn&p_#ffJjXA=7LB#69ed`>;{_ZFsOhNIVs7{kj~nX))iM*ExdqSf^!l4|^~wwJ+SlG2 zCr?br<4;|Q4aIsqP-jwmf3O+*t7|b_yA^vYTk+N(`hob9KmLPwVBu^WSN}IBU)oYU zhfkfE!Y(o%gENg%!J*;}#RK^Msa-xaAlfAW7Q!yCuf@%c4NHHaJsHQk5f5HC z9Y+;^4tlHpZf=VSdFjAgvMQ$X_PO#u)P;OzC95g5TSV4UACBy7XS>Kxl2_bG%uDC% zosnb$m=+VE&5lT|oKOd995kUAEgy`b#%yPGz9Y4lKsqwW*bIGfPe+@LMr=xF`kFVb z(Lb({GJN)Nc1D_&PIpDGsPvICL&a=7^JBTr%-oV_osK?z@BUm26bG-X{OZiH_=$JC zFRu0WV?&tFEiJ|QBXe=vvE$Mi8`S6LmssS?E)l%M!>*BHRI*HatTskWrZZi_LjrBk z@@tZG&4u$Qo%Wne>OA8%MeS{Qa&AV7zopu_oUI{*Hg~kK(U?<2RQ(6S4g4)A7)w zAB|f#E-5bGj)xw5G9G#CiRf+hV||N76cVj;#GE2?%lhvJHGotuSu1{3UqH<2xecWg z7y6T#o~V;Ovz0Y(`FFZ86mQJi_7x9r4fZs?nUDE};~GzNV}4OID!yM=-7I#PY|(!A z)MjN78Y3)p=VD>59jA^h#qr}uV!O8!Pd@U|xOVB;n44{TK`Cv8hd`UX_1GS)#g4`r z#6#%73lS{qoL>Mu$02>%UZpDJ&F9V_eL`_>>L=$*+&q<=G@#mhZ3_naef4SNp*_=* zM}45`QaW-1cu?xO65>AB!!Pea3!k0i%JFZp3&*v(>BS;Ca_t9XvJc4MntqK>0(NKl z1~{Bc25MKdJ8Tqt#-#D9*c+m79Im?IC24?Y{J(C}N0)x3j;&G#(4=xY6$i%cbRC%& zpgz+s-+?6ftO_);NGJ0r?Fg6oTJE{L+PFV`q3{WMcv$HLF}(N(NUljIG$7k-SGGU; zKuDKOdj46ws2bCctcNfzFpu^%ysV+TIx;wN?;(MJpHJvPQw7qA0X?sYUE?FUCS8>x zN9fPDjD?4%WY|NsuTl>SX{>AcILbCZvEy<&_c)Ek>hMFV%ZPsRm3dY8@m!2}G7UP- zaXyHYU-s+m&4mM8w9EQXig^dN; z5ZT%XQx`@sl>Mr>dI@#jEt+*ZG@{BC3P2h33j2Rx3BHd4`v++^R9=hUP+c+t(`ti`)- zk5&21@B6R+Ek5J(J}G|XZGRB&{GE5j(bH%BsX#tM&BH|&04&UR<0s$xbMeCEtI=It zkRKHt*d_9`F0rBPSFj(?zgT9lt+pt6PBQPfx9#IxYc{qNcT7o7!`@1)JoBuGS~a6H z8e{G6Y&gCm&R7tmi)#0`AD@gnj?63G5Psd8lO9_t&wLPXo-?~P-x80=T%8JIcKmZo zzJlkU3U8Dz+Xr|7Xv7|{MfxFZ8G&;RFD&%pxf;HZ&na&0tT~-_#-fACJ$hB*&N5JbK9R7eI6$>TS203+sTrPJmniM{9(WTVK51(;QT;3IBm<~{M% zX%@zTWRz8SXh%sK3u(BvywEH1QvI$|M|8lkN!!79XCF;lCY`tOxmIS+b$QV$ym($2 z{Ar{0hg^0ff5$+D&-`pF{y=e^`ZX54%&HGWN6e#62^VdcPXjP7PadJp1pp$_3jj<6 zjtD`hAz@RE4>W<|UXFtsROD-ng#rRPD5v4DETj}%6@~{+ItMB<`wFumoCfGES_I6A zh@O) zUrXeIrwH2}G7Ul<36p*x!U(8Rr<@;{+YzzmNr~ApB+br$2rNSVoE^u4fQ_7cx*?f? z>zSvYjHe!cG`{%Dza%#MyYZ@*-4}D+BZ*(pa&ORc=ZRD0t+EU{ONF8bgTO#*S{@5s zA|O*GBUv}$2z6Jo>8Ub5bk)Q>PaVpM4te{Zw*=WbjkV5fuM!Fd)v$&W@w!iZb^P0J z|A+BEfAv>mu(=~+!y9B!h;tlh0LX+h-yAU=vI}ozK2R{gs#NMPEf3S84ECVvm5y#j zmk1`T9{O5NVYU@(8(Z!apSf~19(&>WxNz^w<&-uu1}T5fch zQyP3t?yh*{H#TPc0@pA6cBZl~79!O2{FPvxe0*~RxmZT}l?KDD1}d&n#*I}@?%K^8 zu`T_cIB_cOyz>q}_`kJ$C2rif>48t1y@-W5+n!LXUC~w?@XBG+0c1dvKsI;mDn|P= zt)jkY7J14&PiRv%WtBJq9GR|a{q`SR z>u6z?dhB#`O%Nlfv{XQb&d)grOMN3cOXCFZ0VriuHgVD+Ajiy4+DHa@?T(?=UwH>q zSHN}srmg5Y zd8Sp=HcrQ`YxjpmoYH^1nAMF+4!tCz9EhtTZ~6O`jD;{o9dG^5LD1I zD?h!mF^C&iZpBaj(%aO(b>htVJG=mZHbt<`n+g8J1U|rB1jIn2PK%y0NW-28WOQGI zIp(lj(jKbp5o>~_ocGmknT$jJ^riSx`UQY8>d8#6aSLp+@uxOO!*Z?5^%08FByr@my~=R2GI{Nx8)*kcb2%%~ImgS{;;ggCmm6vvJoi|LjI zgaY1TAE-ZuM;6oUFgZuSseYQbUT9x@FP1mpEo0#%FLL1;F>uYK3T-3rNv@ome^ufE zMqhTs8~1GuDs)fnhCwWoe&icUpU-Ws#>3Zc#Ls`^1@8dxwSW1I@ejWKFT|7Yc}K+d zWtAJ`x5nlcRB2m%`hIM!ZpDpDkH@}ZrMYfbK`#p%rsC%ES}ZU3VrN@1C2bfPLaW3s z6vED@AErIP!wN`aaR`3cd=ho&*tEkjAU)f)?2r+e%G(3+XG4cG7QHj@mJ0HK!Y;-QOmE#0~HyI|k{4+Bu6)({#_}`(f+8==Mwd*Q+&HW0a zYs*n|Ri1$QFt$RC3&+ciLnDuZLLSOTJZsXC?XSi+xj+(HOLDEh%A0YzPm29EP~^<9 z$hxt4$)@IIo&%^p75{$_Pvs+VGz%?7uW6CK6N+CJKGJI(i9YbtBNIC)mpcAXL$^+u z7qI+9bMji{Tgo-lhwy zF}km7IUviT=+wSNc>I7YdnwaLf4~9&Ffb=E-R{O#uOENo?|y51_1FBhc=reYG@g0x zO3cnL#9enk7=wv+3^jh4?yxJ)Z0rm+qNn)n$b2ha`|)?g3s=?@oAyoa=K5xIRL2Z2 zT`b>>-PQGY`1juxzxMNQi=EY5u{5(Aw;w+i-D$<~!!5NcK3OOltsZPsBM5W?`_t-^ zcyJ)$P3+Fg*g z|HN$mUa8WiIp>~gVPf77)+|fZB`IU$$dWiuG`YR7m=K8SNmTzFxnV>E%RZZAunm%ciW^+{Jn09GUc0=_QaR)uW#^aq}@2$6}~COy^2k@ zMeh#&f&UUo8$OAl+w;cVA5O*2o^;r1$F}so)?SJq{>}GAU-9gQGMt#3jgzzOxb5Ug zS(&P4=LFLRaiT%Avf$d|0QZCNqkGZ?^LmHDa-tnpXQI580r;>vWFXEGvR%83ewM}L zv~AXXgT}5c*#V1hQ$8x&QXHsQYjQ?5JR@zi=c2W+s2FlST1*}R0XB9(k$8Y}T zZ^kdZ?U!8#Zx7P0_%`zPU@N})fBlB|cmL*lWKWy2i?-*xh~KjB9dEzYcmDqaDo)PC zv*aNwbL+$-*|vPHagDBdxPz#x_vE?);_M3@50co5U!g7L7it2A;&Lou+cGjn{`y;? zUURH(2@WM=rZsUNY~5d_L7P-I*HHm|K?g^Gd*mY)f}WaI&uiY=?>FPpA{WMUX$Q(E zJGRZ}KW~v%ejKAH9`RT}_uc%+UXIiadd{h3R`QVAy;~VLcu6+vRJw9ZDKkgz;SKPX z0N3@eP&I&D_(?R)HEtWeAlvuLH6Zt)^32qbPzvdYoDjMGXI{$qo&a$ut8waBAI7jF zo9Cy+BxNaJ{H0*6pIUssKxs;Ta~XiXq7p;BPZ6tTysUn>q{39A7 zrQB!A#t3z8Uckqp@MUzwGZut=>p*pkJ)rbBX)oU-XIHzXe(Hfs3oGezTxCIVloZ6E zV^3b7qMR6VK6s|zh1*s^EP!Zw#T>U?!U?-hm`ap z1L!oN2J=pxEU?&}kWd*@&#Y?miW5A`E~L5Q&;0p{^WzggqSD&`D)W7IUG51B;No2_s8SUJsX|b1uuG} zzHQy(IPlIWs7%sS8dDf9OaG^z$IiOv&k3pSndw>O+looi%UFKx`c<*eiXxFP_fR0`s-HAsBUKR_%sqP#^6`G@=Yz(!3(oI48=D1l%hZDDf!JR zeR6+0CM83EBA_mqP! ze@q=Y9|OfJJ1oEy4}4mhv5`NaB4ioY;*g<12hr}fB5UB>Z9;$?ay&w$ARSCf2I9+~ zQbo@UXBGcA&cIhtXm&n2OGo4Ug^PY!!!M|cM;1>^O7{8U)KtzzsYJ?B{imE_N3q20 z#IkgDGwxbkiO;xqIvzM9c@M6{=GqGyE^J6YqNKFPY}=wy!g1TdZbheG1{RITogEPJ z85loaK!o4qk49iSD?2;znI_|upP>w$8S{-S2E!ln6cMQ7KaMOyL|n$>cE~yQAU@$u z_x!>n_CXvCvjXlz2jx`4$2f8WI8@BFJ?06<`zjAT$&jR@BO z*eOAX!(!7X@yl==+KSzjfeB55lPU0wUjsNQ@>>|J?rQD{UttKRUBAte_hpbJa4}s% zV21Y?j05S9j~Scb!gM9 z?is4%k)60|=(MAB?QRe?z#6@IoD&c!yUMRgFeIKd763<(a+T8Tg-g1Wi(&7`U@txY zLOlP}lkujvyjcU2{dm>O?uxnYlI6uh2)0wM3ji=+45-S`$09QivTzbbcUc$4f;=*> zw*a?XvwWpD^Cy^Ab&jrIcEYwAyb(ud$O@ZXMagtr^Ih4H__+VU`{FzQx4#{~{r0yb zC^tMN+kKP_-3e0`Bi4Y{j#R*qeMco6i1G|a8Koxe>jvgRKrvA zmR#hxrsv|)^;_}Lr=E|K=P$(H`PTnEZuEL_`O?k!$VWaBk3acjwA4}bH&$b@&45*S z6$E-!oTTXo)YPvdx<(FGIS)pW$wb;K6RXf?T3Es?Igh`w?VoV+&>orEVd;c+zs(y1 zGM}qAZ}{QZ($ZqwckcssTGy{%kLRC%-kn3IofGpJtc;GV-jV+j@1jNaVB4}YpIF+AhtfO<{GHMyVur?k*?tk$1?Bq?v=t3$nVU-|q!m_rk&HDA&PloRZ7{3v^) zEtg2ja|J!3H)YB1oWV=sS=W?wX9%4F>qI_;91{vS zoQ9pM+@v~VtaX)0fOS;a#JPvMiAP5&3~f zZQrsgawfIN&ftYUI@Aeo>rltcYOqXf7c~)GCl1?6u~8sxfcgtQf%8_pzRn-ll9pIc zdoo$@hRg`^IvKpoSjG2dgrK}izsRH}IQVtcEQvLBDSbxZ)?$&us zG3h#)nVE6uZ1QbOmMs@s~h-0vw0~@{tfAZw< zm|r|*+E^9LH_nC{Y!PSCH^IjqH1)RCE%&{WmWgf#6NLWBp4yYH={qeq(E$)Qcoc}g z@U{MUpdcP5c|*S?S_ojL6sNUx&BkxyQ3Jk9vY*FqUX8bY^io_=+E;)1r^i42<~PNa z_r5#!w>QQAFt*k-V8k~TxeYc~m3A$rL{_Igqk+i0?RR;37^|zB8f;?s*n|d*EMy?S zmpo7l*N&|u7kKV(NmgY7t?JRXfL_Z3+?tL~i5bC7TH?r96@P*avH)PSe1d?54&>tv zCkm<-U#HC=q^t{fo$-j;U%|~p*a@8%>EG{YjKDrR6&)pr@*Vf|wk31Q@n2Xt^#@*jNxx~Y1IdMjPWezk{+~8yA1jNs9E+TlPpJ@Ye*9fK*)D^B%UXI%onz;j zhW=`QQGerrF2SdgmPg}*TrLk&ys-kHV9J;F;W$IQX5R^vV}0OUubX&GMX&+V-M9p$ zPH1&))+79L4Kduzc59n6N_ayi&;jN8kzQ;u>m`ruS;!d|0Qmr;FlI0^LAkGE$|vz> zdOZi=m7~(=unJWFk@u2LA63jUpOl@JG=uzem08+vrK288eM+aCZPpZ(Zuh69*oMX zW|!y3G<(uf?5y(H9E%*-nLyBN$6Dzw?bSGdaovtX;h%nxW(Pg=(IdP*G~l*uf1h+W z_{;o3<)?{9#`P*lKc(cVzbeYw`fPZ+XfcD?jh;hKc{ksZNx*oj2+bwYO)bxZ?+|nfxc7 z%KlW+4&DgmrJLy%+w;z=?u_HvEr1DEV$F2>=t}+cZuaHag3YbU+qSBVQk?Zddl}}u z3~kJ9gE3OgD`0%;VNQA*}|M=PXnRkC6uB;7XZ8#IhI@57#b}sHbdo~tYt?2V< zD%%cYDcYUJC<;)C5qW{BwsG?!eRzSdZ5v-Fg2eaXHdq%7WnTpDyzd=R)6rxab%{>b zrTU?vJ;Ow?MiMH@+ke=rzgs*r6MHNUm|Kduqf0T(?rgHX1;uF#OGo4St()enlf$W0O`{e3m-G85)C_ul;wq76$#} z0WLV0Z{lC#(+FSrxl+60T7S?X=QU5>t@sl>UMPV6p`moiIX9x;xE+w2_5>rcey0J* zn`^XIA$3{e{{}|kLzp!`-qEmv2noPb;A{A-L1xNp@Jy&b*HMPVvlU{D z9d^x88V{zonMc#bR)0Ho2kgk1b5@Lb^TiI0-{p7dLxx^ZkhQu0rO$^r5xb{Krlv97vHvLj-xe` zWBF+f8}?P-ZhyypFpCy>AqZ{fjv={~I*S9qi`ekp(~5t*m;xb4xf6;7JP)Aw4MDSm z2zn>d@$M^1<44>8OlkbU?gtatk23O^0SVIc3z0)er%nM){zr2 zRGh@r0rtl;KXy96l-WsuXBgN9iw*c>InOnimN8v^UM1a6<+Gy+jh9)3;T@82)_aOu zusp?s1~EE?J&Dde<|1LV+l?cqPshnq=VO0%Ci;>$a-JMYjtY{H6Z5vxtT1If!Z?{1 zSZBrCTx_TxU5&esuE!hhpN=~g6g%!-kFB*!(pyh?2dyJCtr5_vj9N47&02 zL-Crix9+eIK)>;MdbdmBzw+BD^OhmK5&Kv-#)F2W7Zxw-KF=Z%19Mumt@_dqaUb`= zg?ZE$7D!``&@b>1cM%u6S(W=^bO5}lfxp|8yjWc1g)p`a<~WtN#Bt#D^Su^3(JJ0= zSAIpq4!gGIi*)Rwu0p|X0N$lSjMc>eEIc9R=J{c#)y}v6=R3hC%80R90ASmQiT6!B z`i`%AuQ-3bSB9DZaVf2j22@1c$vDJe7@3|7cw&Nb7!#@DmwX1}$Taf1yoe?*=lpU3 zK;|Q8P>LO&1t-DQd(>%xrBzG3C66b>XtZ3Dk3T3{`Bk1b#`!K3j&^STbey^H+BkOM zH7cz}MerkG@$h596ERs9s2%8G@Ebk?0u94Ht6%dJKb|toYh_YBdEs>7gg;nF8Kn$! zA;Yi)(8GAT@6K3SJQl+~ll9ZF#dq@M*uCA08cUU>2LWjd zmNUScy+GENP$$JKVmVIqfq~@%F&QcNB;n*G%XxxJa`MV)4J288Hk2WH0f6+yo9s-y z;&fmB%6sCU{Jn3CcfRwTHg*ioZ?TysVP*iR!7lu>nhrd4%&gY(Ho!_BmIVep7=(sh zbqs!+N_A5oTO_kx$Ih^2$G0P0^9|f+&BfL0EAi;FFT}!$bMXz|^i8p%PU)#9pN&8L z=tKSnupjnl&^yBp0RE{QGGTINmrek{-U{z1SWe2as0%-kwP^R^5|1beNYfC=H~z!% z(1_v7ox3_pMwl!B=x_I8VSXVlUcBfB54UdJlK!9ZAaz!acib`X14r4uL-L`)WcwRE zm?TZI=2(EdxlkaF3D`M5;Xe>nC5=GxPaeu;8p9Uopw5J`@~$^+s{(DxWPc1`kY1`Q zK<8MX;;nU$(^=9SKw?+eF!fgeseO{}nI?XPU;PErmsEKq-K>}cEGrXUa@#JsVr8mN zErZ%2jG*|%29lLv$~ozn zc0$rL!kPG3Sfh(Pr(DsWb!;@!Uty;-hmpSAp6dWiJo&oPSzAMqkNFqRli4BH1pv7! zm1+y}BCipsyeh8zc`L4x#Yyr3;yx1$S!-iD$_(zte04oCBXC0p=lWy&NqeecIy6$z zewp}h{C>4_l}UMkRQ!nlzH0@`>XP*d|`j%XnkF( zLK6jruOYc!1Ds>e)g}zm6E(n79X!^)5V9!45f|3!hcg5Wo?u?1){a9O3(7vDF`o!LP>MNdn_PO})zw&#!>cnvcmJ{luSu8;8o$}9q z4RGIqZ+3^3^0glEt7G{pK(7NK+P9QoIl8SN+g;TGV`+Z`%JehNs%xfeQtqYh=1kI* z5UpPlbw=&#rF<8tc2(af<-s^~Lf@X!CQ$-*Vf;YSPcR70Gi*hpeANf-4!11ixFXOz-6afUbl5 z$> zEx0V((mrZGB!EvQAu%65wPAe_Us|HNXWyy*j1{?4GonRuQvW;67H4XQLv{#|e4gIC z70+!h$4`Fn`FKIsU-_0#jqm!#FNn(@cu!0#5S~`-x3PRVRyX*(4uca`fos6Cwd`Gm z=GrX{M7qL15z8yXSiZ$KOch{D9FuC-ri(a3w4hsWL#yDP*bti;Y_V98N$TXuZC)}W zuUA;Z4(;6f<({sK7|4gs@BkXwGqTv+-c=irr;)26o^w8ng0?3d#b;DIfrd%R%JP(Q zU5)-Qalo4x+p5oROlUB&BhGge%U@aBh!>W7u`NG8%K`wUQ4Y7iy1waw#B6I;ZLY1s z{E?WO)4)-3o8+}8>XePRPf{VQ*fEo02NnUSwv?G@-enP@c;wq^#N^)YPVpHr%4)wC z8?sHd8{V55#O(ZBoIZCZj;OtV^sy)6#^#;|xzlm>o{RDNH@r~;^=`DgM`P*uu~)1Z9NjcuBK0q5XzeU#;%48i%D{;c4TojV_D z7G0(nSyZ`IUe(SWZ`mF|uYAfsq9y5t2LtI&s@G^mFg8AM*wJq|pa?#_iEQO;C^mkv0GT|MW>jK~&{F z=Kl=PFPxQcFT{0cSfJK7R<6=#$(Q!$c-yJ^OBuJ#JWsw4>V-3t+~`~B)nOijOeHg* zT3weh=%@B9ekZ3nhj#oTefzZHEcM2%ZX7*zE?)KePmRC+_1_RHihVC#S&6=4k<)kF z6Vvk@^>h2to?BG>vhE4Jx%sxo{&(MbO#P|)>($LzSzC_zmVCWp!fE={&9&Hk;n{fR zBM-$dzV#>L+VzLxfs?n#xuwO3{-!8V?8avzCCgSj-y|9;*tIOY7=Q=!ZOffB`t!Bk znqniqY0CrqnOJD&E&vbSc{YyBY%3;MS8Lhz0zuxM<8uHkuA5P9%)O)boJ1GV>bu$P ziMWNgw}`3G2kp--ga%9`yyMLYzNwBD&&r(A!X9@hW0X z3vHtG2P^<++K|BgvzZbN$ug-2@&GxGF1V`AJG}}7Tq$?-%DMi!3=c3n?zeE($$zUTkxW@tj-ZM&_AS3eOwm9n2 zAt6IX9tu>k8Yf5o>4JQ|E`CZ}i*6BMezK5v#$q9{>DCxS_poonSij@^MVGZ~Gp>?+ z5drfc9vANtFLFbA5LU|&;+5zaSf|iuIf2jI0_9ld{bAUpbq-HJ;>&aBupQywY;Q`G zuWDZ4s5m%sUE8tF6G4t6e4iuugnExRP#a!#4>?dKnHoXqGi&?{U8aZlka~;an+1B7 zan_f2vY%wfy#(pP^ws{a0QxJql5|mBX{J8WS9c3z9)Ws!xN7-;PV`VL;}m3945QGS zX$c-f@%(_Y(8&?Hg@|7*>vGdcQ-ulHk?mXkr{~3$mw1Lbaq^>cx9Qq%icjkUY74s_ zx9fs7i1SYQr<`a%TvLZ}6)EDMX|IVZ=T0cgr=Rij*fmh+snUjY?Xf<`Z0*KKTa*2m zPVUJLX!q1B2{ifxe4le1b`hyX#2i84%V~vDj+UwXGBU06r}qCY2wtVjQ)l^tw8+o8 zfS>ZK@`HYAGJB$v9q_1|w{`Jzv}x2@`yh&Py(=f;Hlgz3(q-uH;2HcKbu1owB6q0( zlAxHQvT(*9m9@1s*Np!F4dV}lR^YOEDH}96O}L9`yEUlG1wp3K>~v0-CpH{+Y^{gl z5_jde3Dd>GJ>ygVhcBz2upkYV@sWsXUo1zIsz>`?0;Y=0yz5`K`0qKqVYV|OnXyl_4#kF$iT31_@)kjZbkKTZaGO$otaLv0Dzr$+wG1Q0C<6c@~Xb~O+5PcuYT|T-thIj z9jU5BzdEF19S|)4Y9ZGEBsY8f92=K{`@@> zmKxM9f<<}C>-b2xR1UP@RHk%T$Uq8r$D~GRS}ktfr(yEC!UC_&pjKT^*LX_3-IyaJ$J=1CPZ;^s&%LV7z0K0#hY3{$|~_f z3)@Jid=h*PeK;pbv*Cy~bSWBa)Yes&J1__rI9NqrI%0%%2LKu>D^Ivy=xTVly&JE5 z^?mUj|LE_xM>rxi49fVMyCQjy)!(9V zcXx}Xr+!$D-K@wSWwU&1k1bgkC~WD*Pz`2veLEg|=BoPgZhY0({MFbLeY~0a*pp9q z)m&$`E$5})g+&E&rhLoR%5e{eZ6hb-0_VxI2WrZ~8yhJf9J}-`8%sH(SMrfxP1Zzw795L-j))gn!*~r%DI2=$y3b^&-nO)_l0oPsafPBW}{3%EBvS>*-OUJ;iq@yeVKvz!B zdayIghyjwoHtA#1o46BPuF<;>PBVT>MAy(%ijXjLQFQhcgdreDxaHFGm zro1E~oVuj*R&wdnlmot$ApWvlpf_F0ue)yHI|!v<3A{m?V~R03>$DCu423SBrdQlm zjlNsxrd4_+B-h9nz>W^tnn85uu?V!n;e17kDVxLhB&k9N;6Mahl&l+o{-Qge(Q{>> z;O7$hb*EgHi7x9i`NdMIU9cLpu70j*)p$xaapQ@Hka--MSU)S8l{$LuFJLo6iLR1NS%d)hz~Vs*wc%-t|a;m)ld&&1%!*r;f(SlV{C7 z6AF9+fNx~cceCh^bo%?vvH*Z!GIxMd|J>Mc(T4^AtnB8VFBQ}R_76oi_@IfYZArc5 z`>YZN-?VIo2jc|qOcX!Ay%x`It;LW1@zZfd^#0{9`Skb~fBVgG<^Au8neBeesQ=#S zud3f?YkLhmROMic9RRM!p6;{#e@lZS2CFNpJF&dHDZMi(PLK^>lm66z7lMi|{PK+M zc|ucSVkNs*P0Mbu0ql%H2{EbzcjOH}v?cRAYAbaRgRbbWvF*O%IL4j1E7FYQTkNIS zLH!wz%5X2=NKZQhx2w{%(0KvCo<``CEn=am7%1;*Zx~PASo3#kTPy&e-@w<%jyDu@ zu!@_N+f3hgWkU-Ka~{ZfLK(Xkc4QA7Oyrv$$!SV9Tw_E0$<#D<#6*DXMRvqD;QDG@ zOym-oZt~C}SQ)+>^T&_JeGfhm%e_H-^vP#qWgvXrQ}J=Hdu`nIvIo6zV1DUXoIZWZ z15p+!!28*=r{moDv(njCT)A>7W<<-m(??@#b0g+ElkxlSczb-;xBrv4a{Y?zT7y#6 zi)8|e@9>5plV&5ZY@Y#I{r| zzQ|v$UB9nkqR^!4gHZFsEX3=6$=k$)V`)H67>XCA*EZagZ=lFp6z6?Y>lDV%&W~M~ zFKD8Cj+^tIpsBy^AeVZ0@qNkw2ai|>fIR?>T>e}rcm-J>Z3C$3qx!))vMcJVKV8bt zndXw;dU#35{0E@u+fd4Vtn!U*l=}@me+H!94(1!B$qxu+#S3I?#ZUKnAmTI8Eqf*# z#n?hJuS-AI=!Y@mSY1lz%592E6{fBlzj@rCd#)aQEFhyi_#|WNv79?UcykHbs|aP&w)C$)Ja2Fk^1e9p-tYGsjc()T+lZx$mYOTMg<{>H|Huq1v~VYv^ZIC2f!F z#4O<8Cpx8nUMBMHpcrA{nOKN7|5*e;TsKfK_VCqV%+Jrn{inLZ-BWxk&nlT=w>vw$ z>@pBP`JjefuW04l;%_jNETzkBG0|T*9zXWGe;hY^5o~OHdae@}mX5~hqsMJEJ6jv( z)9qWf;dWWGA5M`ISO8bZGb4`0@$!n)8L9s3It7%35 z!ZoFK4PUtxdBzmk&UMf=51M=~ER z#H-PQ&Xia5?@Fr7X>y;sOfz%2MW3W6Eg4tjHRFUG)6L=te2!m8BcHD8yiN9xspmT1 z#m*dtJ-m;oknAYOrz@dn{IgEhS9#Fh^p^&eEH7Y`W@y?iull^h+`~pF zkEnIcd;-Txv!v*w;|;YvKB>w0dU9&k{L%h$o?JkkI-&n?8RbVNJhTPi$LRv_C!|hj zdBj+jGV)P2a4~GL4BbcLQoWdjeWN%&^;hMFw;lm_Qq~1c(rY`e zZD=oA;-fXKR#e)ncYRGZL_gP=UTQY*uoU^LY1v)l?~GPy{M4jy=c-Rf^6STaPGEtw zN)z)W?yr@(wmWjzg%&(K{Kvc}RhL;1HFT#e=-3tFT^3?F9eLBodm=QMN9JvXQm^b6RY^p6=Wc@&mmiq47HRNvcM+wqH)>zkWi z0O03SJ35Nq;c)2rFrLu&S638UUDGKq!%WAv`YGPX=iw|bACQkUJ|)O5N$l#xd=H;M z;FCq*W|u>^3E35&cwsz_pGAn&g-q=~r-ery3UXk+jediNw!>}ufT0%v?5K`SK1)D6 zA(_uipNQVD8@@&v~P@sOu4*OL*@`5nI z1kZ&y5xsnNDZk;*4M|lB-QMlT#6mYtoIf8k?K#Ipt?5}mqvO{Rzq7osBcniOb+_?7 z@!6eGx~}IY*A%a=#Vbzt;|&kY#*yh2m0OPf`c(x*tFnb{$-^BjR9mC;qsUdB0P1uP z|D#xAlDcSjydkf?Pc~uss#I-9cuedUTeWWimyX_131d=T`$q9xU*2UWERMUSaZ#S< zoP3HS+l4ToOW8erG3BXuYGT$t0Da;E&?SD6c@py1zJV8HP(!n$hu_?1fkH0G7#6uP zo!IWj8l?l#6gVLmQC@!fAqrw0CxLCer>+pBY93w zPrh&B;kSRydlk&TJ{xBm5pv`lr%7XPTCC8c5H4%r$s6H73U&lJK_Hws34{-G4s@~} z2SdV-FjBzCAK|2u%-k4CzRgb=1mU_=zw=0_7%p_}sG+kRf>(yvAS5&yc)P2BcB&O~ z$L@@~U-Lz}x1@aPpuCDS@t~Y3E0;zjNRk?WXT#V6fFv-(pau#r%4CsQr;yu}SGfjM zpA?>3oaF-*`jdwrc{pxexf-ATnV&-~@rwKIj^js8c)NJEgo9oT!E(ey9|AW3>XhsO z&Mjkzuwg=zbV7hefTIr9My>pH0RUJk)bAcz-WVc|QU=T}1iTSwW0qleXC;fBLA>IX z_s2K?&A%Rh`0hW9Zf8bgUsm}g4HgB8^-4U#94deMMH)3^bKfd%U|5EIzv;WYCAC70Aeb(PE zUYJwRyE~9C+!p%28(dBn2;32-!hCd^=w_k@`FTPD8KwNOFXV!R@PF;b)z}>LVrhOM&db40x2N6dJoof7vC-dl!v)tkUe0UK;d<*Nsr2Tfqyu0B zd7V((W1Xac9hstXyS=z=HfDKvnK zM9T8uNEe6eO=qKP;~3Rn^YM1#2*ArdrHftDDLN8gLe*{aK(FwTTidFvTUf5{V3Qj|rP_!IL^ zuvYS`4(Rm#vJ$ZlCdVJtAB4mw$0FKvujtDGbjjMZPeG*>KuoMIuKv%wBcF0!Oeo6~ zedo6%NgM@4POo^&0szaKeh8f0Di5MLPr9U&QsmErFGa%j-yc~nI?f9n8`?UCnU-~6+<)aV9i8;k7!r5J1RKGch-u2~JyLKgZ z*7K7{^wB)jWanRK@N{eUyuzPCu5+}1!}x|cnnhN+khK7$|w zS8%L+;{S!=Mm)KCGk)TO&&6{p^H<*TsqvkE`z`Up``-~=#Ru#{w6(D!ny38`dq#Y1 zZQhDoSD*E&*;$R0752KXUR&9bJ&Jz$QC8PZPu2KIdY4=v5&pfsn0ylhB0hP54%2QD z+!rX#JM@TZ^Nbuxmmq8dN)roePnnlN9MeJlEn=sX4_mHMuIVd;4VUP}CDL!5n9KzL z$Wn4(6+~0=T3{+D_E2i}SielQa$% z<&}a>N7_gAcUhjA80(CI>gE1{df`j5%$-k?UNGbYQ|70RtPz@hS69Wy7eAjda?Q`R z%4h)WG!~v}5X~H2_94P0lI^FF1%NzcpW|Y5yaT{L^a?fikx^djLUqwT-2TX(ER5wZ zI0OxiC9cRF51>Eu_*D5FoRkkfd*R8{<~7fd;QKCQFiwWl`jf&MAjn z?Ds1|gUEw`_rx}}ZEx(1BgxmI~dL0AW4%J`ip5qVOkMiSJ3R9lb za9Q2f32hjRIW{Z&7GPYh^*~A*26FTF~--@LgMW>6xCIjb48zKK_$GHU7ar{g?6BrE7}ErlT+4Ine$4 z?tg{Ma3-#=+)}AY&zEd%_T#kryxVVEjI|BsTl$_anHp}zp8CcX6VYocv2|lD9{rPd z$FKk2KO47RxDxY|Tk+t9^NMyRa=Tvjzisuuvpm35oM?S89+NNi&OD0g^hajjj^TDc z*2{!8^AiiRlW}am6L;T!Dwbvks&`GQk^S-F1e4Z^Yk2(TXT~~b0f5^nI3)x67vdTG zc77TF*+`p;6OfY^oyq@Lw>CCxv&8f0%;(j=`TVB3zxDqqYYsV$}+@(6zJ zV;LUr6U$60P7-Rm2^O9{bY&3p^HXupaX!V+Q~aX);sJH<>ppRGTX<)sYv!FWO+K$Q z*lop*bjKoqmDXJR*l&L@Rz&8GV%k%4GjZ3^V{u|}NinHfnsjFwz-zH}^Z610&F=4t zJ=VVIFxY=6Z*3p-2uK`s`vS>c$Hb{Cp_(H8otpB5F!eAF=ez$yolj~!$9MnPIcj>L z8{N4hUI0KWeA^wj$FKj|Z^w`R+)s&i=x@ce#&t|cPj?r5{Of<~e~JJ0J>TgC0JN4Z z7KA9SnGx@7Fh#_?Ka}n9R=)VJ?MN!hqX?&e<{BO{E#n6I0zwA7CUb+e|MR?|^40k* z-OX_h_;pU(G`=!_2rmQqm>u01wQ8%{Vg)lH9iOVK?6_OoYxN;pyjZ= zs1Q)+F+^|nAL-8;S|Lhuu@5NxINjxQY|aADl!?+ry9z*o!91C&v`gBZ%X1EW0`mI7 zi{&KSus@KmkUg@HIB8Knl>tnq{9V>5dFi^@X@Lhfg32xn1Sk)k2HBQOQfXEGW!^wk zTSz^Nk=!WB?C z{4}6SDU-jVcb%8wK2OL5r@L>HHK?o?v@Oe9d^2A1SV6yP7l6;9SJ#a1lQu?_(4F(*F|MR&r94p<-njpz@Fx-OI^z%O#oJ^~O9&p}H!wca2wuz)E^=9Gb&NKCx4#4IiEejS5Z~*JIaoXm4+N0nWUn<5PAcl`d&p8B^+>fR<-E@ak$KlZv!W13dFnKG#?3 zWoHHKft>=0wXp&Bp{ftpG}u#IuVX%UP5)Y-19Qyn`ZGpW4wvOuU*Pf!)#6vJPGzxt zr@B96*Hh--yL0UHITQ1W>1L)C+fCyuhy^F#H}R3*`pWmpC|}RO-f}M!0#pb=2_P;G z7O4sBcCc<4Yj#ws@em*w8iW<%=2Qs;bVDGQVvK_W9G#`{$7X{D_fSuc@1s)(-ozR)IQ=A_xV7S>ZaBRt6 z{q}24przr`t=IrX2M^QPae;5TU;|zqE&A`i^UnC%ullmMcImPlvy52==Y}Y}(2S$z z8!fDq-`QPP*?jxb^V&258k~+8DSUZS!V4XgqQHy>1CLBkpP)4*HzyR#ixfT+}=c4XNo~5xKgdI2jF4C#BP1Ck{vQDy-bVhO@J2(*0fMI12y;~fbvQ-^Of{_ zKv}Yr5Be+QbviqoCU~JM=}8&5ZeNc8dxiFzo)|MYp~E-;^!Nhs*nhZACFA7BaN}=$ zZ-R$qnU;AAeID4P&ob-qRq>TUqUBNo7`JJ+|KtN_$fQN$tbo2pevW$#L|h;GRo0}X z&`|0(AH_NIAP1jlT)Iy?CXa&%2rU!EjgPUvQF`&Swf-!_dC^aK6WIZ0Lo;bfhk12U zsYmpV2h`(oKge@QQ*IS;UhF&hQm{Os&KugLA^Zn=%DTjZQPUp4k#eemum=D_*aXPr zvENM~4|1dII1p$6_-rp=;u<{S$s*$(^}r|kR2o$0#Si_kvliR}XquYrC_jB|FXrT@jw~IE zFZ#kSh+8+;;)Q3QjN>Ox#VcO^GQ7IK{l&K33kwS|Ki~BNfZzD_-;EDG@^nmhj>qDO zGjs?F{6uI8Xgo0~FirYFANEk&eK7-3RNBW&0XBh3%s{ruY*)#z1F;>|X947cTn4&1 zAtgCT*4PTZn>urVmcAUwYvuK|=wNUuTm*A~+XJ-eniS`+wgPZpY2=5(>YsS)UZjX( zRtn2UEU(4J&5ihruX!N8=j*;E)}DSUjwn#=sNZNWcDy2ZTk^g6>?H;HTaqJh#H(*s zdHM+Y8y+~%xS!02{d{}^6xp$? zf$HKD1sb{eLHf30hQ6+!TUm`;TT15bEe!xnqjbe~r&~LG)*!b5#xB_bfX_g%D*`L+ z{kEawn)E}S6vB}L#H)o?c3-IeetR%bJVP9|8=JDfR(CFX8o};~@TD`y$)FL zJbN{szqJ_wxvNU4-{s*ow(vmZIHn|4UHW`icz2M%|4{H}y;1z-NsM z3@Dm|e&jW`_9L|xpq!Hzd}EOP1NX_5#{~38Uimc1EVtw>_7QYC;Br!bVxaVbN<4`eGo#}iq90{0TXVL#u%V`3=b@bNH=(+Pa6 zdXsaowbqlisq-4|jM0~6q(8xU`Gb&sRm~4%zK;b5>8xc&@M3<@SH@SuOa4Rn zssErZ6AsfcP9NvB2HOvID!`gEt}VX5*sjas=gXU>-xChS8LAK6TZJH}g^VsJ(Nf$8+CUu2~421@RrwH&#}E={rqd>2ETR$|iA2 zkzBZ+w*}RXNah@Wt2*q@BroC3tn~YmMYMEHihkBRd0pYCPRjaT`Vi9xUlz%P`are< zRlmyT38haa_UUt4>W^7`$@b`cpSOGRbo{@*>g(bSU+_io%(b<+a9CHM$m%xiK1{rq z`m8gh^P z6;o~zv(7EWO~q0_{yQIxW#N@X<8-$j_ng-FMsd}w`10)GMtM1=3{#HW%Ok-3dM|&9vWepayiM!x>PqGw$1CpJQ_Q`iDyJ0x&#(aC$YM;( zPPz+=vcY-JtDZS?D*n^=|6siHPyRqPOSh9{K1tQK7LIt=rmy{*UmrjE@BdY7s2&!g ztb4~R$xgBMTwAuiO}r|;^`F?C2a?q%Sc}5PTrS{wL$N7~*HQJTzv900pi7MtJy$0l zh=D0)pRF`-GS0;=7za6xBX*(kJ0hC5MjeM|bqBz@z@7>%02lxsaN+Cscj@oZQ>HtP z#D8Gtx@KO@)fE&OPoOh&Be~<(0n?myCV-Rc)OFP>V{XnR312g|L#NDjgEQx&xtHbL ze$0CU@}hY;O}w+99enrzAZ1ItV5~CXF(UUcMecF!aj~x1Vw)|kEeFae?ytc3$o5M* z*F1=pknsg5v_8D`R?oCe6DRkUKCNhfkId3B%p1G6H(*SygU zeY7!f2sinm0sf6!X_W@m?#0t}tO#(=tyVI@AGtneu$8{cG8^L%LIDrPNGGmifjf1W zH~BN~Feu!}iZ__OZU0P;H=2CVT^?$a1|S2U<2Pkn>9;eDF*HEE=1Wy$k2$Z$Z{&k8 z%YB}=NI+0-UnHE&AJ8v=ow2AFxMO3w&9~jhuOVNON_@~<;xFiRJ?=ZWmGW@=6+ZyF zre9+O#heHE_b0ck3*rob`EjQ!kzo@^`FY_^-!UCt*odSGEyDdTI8-^8P< zQQ0aN>9MxQI{w0bs{E&=$I8F&x!I~d@rOlGw?!@ReevsaA*DPrEhPDx@95h_fL~cX zzoayV^d?Tohy|FJ<{1yffS>DW*$Q-n50OHnxbTsV*w@O;vk&I7pvQhv06pBZMK%1X zZRc}egR+Yz{ZT%|B^5!Y=~Q?j8R}f=0E-T3;zRW}Gs=j3$zvIizl@i`$c_Yq5H3Rk7Gktln4=9_m!t?Lo{*35%Ttr77A`j0%3X^5;b_{kl<79g!9yqUC(CA^?0N*EGJTIahavs5On~}W0KlBye7EJ@#5=9ITmT?^ zj+7?mrI3qSxK7XlK5orUr^zaU*7hOA610GMVAwOoP1AdKrN8J}YS;cvCOUd7Uy zKN+X*{wxKGa?U&m)qrq66qGY5qK0GVqN4?CXGk+E24f{cJ9X;5^K!rPY>DQnfV|lW za#3VZp*R_*>)a0kML#QY*`SIA0Lxde#%F%!XT?BS9=!KroIiJa+_=ih@pS%()p8V* zuB#XWz&VYFbV%@ZW8ev-fy3%?Fd!Pxfqobw@=Tqotm@(6S$1^LTNVKL!GZ>c%0pfn zE#K3>^RCn_*xpWT&kOv{iQ5Ra4YcYLfMIJUGl8XfqeXs z-R{oNI{@?*upXPAkCUfQ`N0c#o_p>EZwJVN0Ip}GZx4WUjzi^v6GJyYxMxzsa;&azcJ`GWQUTC0w8%A_gum~A2 zH5WQ4U+TBiQOad>h!^-WA9V1@%xd9&(#yS< z`i<+?`5S%axlAhaO75z~cc6o`WQh)uOi90P*ba3!^H27S(e|Vx{RsCd=k_efYkmT7 znI_41Odpi2G+`S}e(Hx7)tyJRr^*hHf>5@Cr&eP*rwnL=UVuZIXvurDPiHk$_16dT z+{=8b9~$dKe~|@IB)ExsAlK#HCMcsU9B2;}m@s8B8vsXTqIm;G!Y1yj(SUkALJ_;8|NFwst@MJJ+A3@_>_Tnqmbc{Mnu4UzRSgT_M9JG z6#;n%fTA1ZME+d8iCvcblvf%H09=pGDd7bHJFfa(eBFEQneKdhR;$%&NtR^GHfE9`0c>WQAvo|!fX5Ip2@k@M zFl2&27?M0faCne`2VqDagqH;4FeN0AI0PH8E!lXKY+19^l3G3A@yz%Bf8XD#I_GxV z$>;w*Z*`x$_pV*FYE89jRqfiftIb*#NR0m6ah=MP1pwropAbK^RflAxe#Kr!a(}kJ z8CUm5@vk0yES}NzpZ}u!<0roNtK#{0yfrR9^|azWY*PHqcN`zJRrh4H6@%?7a;GB& z(rpF59r3*29RRj8$mtI?pcM`C>^OtYia*e)w|3N(X@-B+9&*pB)P0&9B$_S%k={#N zXB9bYxoyxF$sf{JBSUZ=h`zppI&^{ubxd2z*2PT>VyeAjOH+Flkke_jLmiP3I{>hf zoQLRhO|>@-&c

JY;`yM>6a#dPf6)PgQos;G?hDhfhHe1M~2LMUliCY!&7=!{}xd zPoXQfiyC_~AtPR>8$kBRoIETlU{W32gE12~^U;^hOeD`U_udt)ZpYuad-$pI@&Wz0 z?^9kKpQbo*z&Cakj~`iGiS_k$+uB}lH#XK*YmdUaa%oaeNZez&J4N|`%SKF`bxW*+nL&f3; z(DfVyDn9j{!pphumAb0!>!uHLijgfpimL8;8m07y9k>h=T1u~em%o&(_>wg2BmEvz z7(lfb+6zZ$VF=?l%Q% z{)}1N_u9s(gSMU+@-5=Tm3RdRrt(m3fggVgy)&G$OUbt41IS>J}}34$^TSI z0{bHFPkBp79+Ll>-}Fq&a>2datwkqAXBj^jC~x{8O!!=W7GJtvzk@=(+!PppIX?uI zo*U!@OzF893VnxxG9|y|;FH8tnzzy`eZXNlZp2j=w9fdMfpu5$PkFnMr{pC8-ZEd! ztG^}tVLPahw5I$jp2PuQ6GeCBcr(naZgCkH3P zv+}bQa><_r7n!*KCBN>`CL7{k`D#|6(tH?l=Ya%Q1D~Z$ALala!37SgWy@q1k^%TV zN^_6uD9@roa4DlGpT&G0ABaZFvZSS$CG#S0A8uZNV;fR=>_GQzKlK*$B3tv9eO5?+ zoKK6xr^gaXGk*djDp{&dOaIFQw}yCRA&KX4WGB3k!owHhCE@_ykj5D|+ub;M=3IQw z-~G{Ot(=Vae&mU0be3XOG1uz)aaYA#`FjICm&Kdt`?0QA?BuC+#V`l4d3`r}yW40m z=EwbLiq_U>E}nht(Rlnr?~DKP8^0D$KJ&hK$*J!S>-;WH={%M29L zv?aqfajWhzxjbsL74Nj8H`kIrkH(Wbz4*0vKOTFsiIL)&TNayf=dq1g?JP=el8Iz* zyT-Pr_Lcnwyda;$a9BRhZLZ9#Qjp2Fk zNYUsnMQ3eIJS|64>5I$jiVIhL|HRQF@gsl#$Ku21pA1>KGaSc>^A=qUN{|2)uu4fD@8k|`sZ<3FeiTF@i)pf5jsmolJdezNHE8f(`+ zq_!t)e#*aOY?%}vLNeAQ?EVWr*9U%by5%f8i&%^KHqe! z*n;{3EGTOesO^Cpyo^CjpKK?6sC-MI8Q%%OZ!RWCx$uCVE){w1vpPxG!EO2JK4nd( zXhCnJ16*T2Ie(FLCgE8h`8=Og`Zi(Aqpm4yJSv;`^2oy^j&*#2-R7czEK~1Ke>_Jr z|0!3V5#@F2v*G~g3rIs>9{VVre%5W1ETk)5qYOpr{7T|6io7StIdf=8+|cKF=y{ES z#h>ftC$9!?syXYPLgLw%jXNLqCYY70c#{Wws%4DN5`Gr5yNKndlaih19$BAk=8(Lc zm7mV@fnA+fRT-tZ1lRaC$xa!4k3J3-lO_R%LfyiR%#i_(4BKH52%eMYW;|T9|B|C6 zFSh5ltg^N{-RGe`4?t7CP@w!}KCa5@Ky8$|OL_-1hVlI4PsODR7h&vP|_eP2-nd2Fc_#6Nf;qSrUS3D*khwX}f>sdd1 z3~iMK0NAR{tt>(^NT}{7khyO1BSt|l*sG{i8vPDC5J-MXPssZcw0*qWk6>K-KYzMJ zM@vfCn1As@dhAW~Vpo)RKP1+$d1_`(kM_h%=lKAictrO^7mxn!%XLnbw3P+n^2<2F zZ97%0=>-hJoVJ&Ial|KjUK~+kFd_^&beml-B3SISqSNXserG(VE26o^p^@J@RU@1*a1}k!2|WmTB4j+s z@WKxThHRZOkv%r%TXAt~H{Sh$55=c``k#q&XYYvje&7RM;k4HudbK(OMy!!HFsxIR zHQ(4jGC>!DOiP7wRIb!rY3Ob*I-*+#64oj7mG&WkUuE=l@-&kChVkN+i=ulJ8>=g^ zv9aL=0OYxF;i6>A0~R{hY=;2%+|Y%4K>@+UHjwtoVutK={N9`DVPbMNh~Svzxo32? z35C->_ey2uW1^?#H(tq}_FnA*&ygLp$|!hkcmgwl`_l*41^n>LKA;9Vl^0~Mdg_Yi zQZI758Ju1o%+~UBSftM{Uh+9hmcr?rH;o%OVTO z-`CJ2z!vMd%a_Jq%W^dR(CzywU)roY419|2+MbLiI}6JSdn$a9Klv&0(`~0I1G%~5 zqxh)0^MsV{r|y$)52A&KblMgx;15H6^BQooP+kuh%7bd`+#RXquX~Z-jGRs=N>7ze z(q(PxKez@L*BVfQSU9*xV}ftAPX6Q~>#cTHA@K^_0Fy_ytLB{Y2~}r_2b4A``X62J z#s_IerS$1JStwE_wxP~IS)EFP(uMS)qXT6j^#UdRcg!dIGClA^q+%JR{!F)mJFJiL z7L#zI5?1ZOKV_5ul3)3%?N}C;*$k%n7V&aq;5i==J*1Q9s>Me{lKQR{Yj){Z3rCwiE4@Q_)^pQC)0FsX&fk51865wmRd7 zqv_Aa;(YZUF!fvTW zN`A{RWoJLYwGW5r0gB21CM450(JmDdYJ@YtFpQSUv%UZ3^_}?Cb{L3JKt%aAfm zIU+;*6SX1lk~6m_mOYa30)T~3A1J|kK-xl{2l0G=Gq&XLe(r&f##0jE>t6ec_~$?P z#<=+Ix5Wz|eIopg+olEu&5r$B^mn7beInXq=|<-GVp*`bjJb_24q~IML^b}_^C_0 zai&TtomNOmpF=Qn}mxxD6bmLI%(8a^TRKEWqannaOt|r z4v)w+(-9sw&@U<74@D*K>{oP6PRi?yi3w`S{3=`8M&hsKOMclJ6JdOU!1~oaKWGA@ z^JjVJLEiXN<1f4cJ_;|?R=T06g6|6v?cMiv?zq*p6`0TXQ#tZ*Xa@k-m4tEU5RfPd z8h`Ep0A$}&@_tO2|F>|6M#GFRz;qQzp0fU0Z_<%`+zieq$a44u$*9PWax+;bWLn_| z-!GW*iE$Y?A~R%=Is(tMTp4HGNDo$6+~}YvIB6 zo%Z1|l78t+vj1ekn(f(T5_BnYXKo%v9)2vY?hRv!hy1tRCI6j^^jJ86FDEWz z(cI}%$6|GTA$GP0v9ps4JoiU?3S9=V)?SV67cazP4}T!u@z&pqxBlL5@`1}Zdt@Vy zE~{V1aMaH;Njn^{b$>oI<#U<+p5hQ01v@gfVYTqbLw+Q))|rdbN854NZ6~8E8yhR; zW6^=<77p@3Vng_(Ntc7JmYl5Bq#yVFKuJ*hF>WYXH;N96UhBA=evNW&bM!S<13JgE zql(#lgJG^N7&cZNvt8ynh@hwFn7Wm=?|RkGjMWy@Zdmy6(HF;Y{KQh+v96e5zaM+d zM@jE|*B6@{DkkZ)7IJZ@#zu_$+4_H=x_Z%!?vY#KLoZy8-~7M}F;Y;(x3y0$HRGNW z8?oG3OgSl5%g=j>E<0d&to}cJAGXH9DUw3OGUQ(NIUf%cE%+V} zCc)c~-{9d}Q~Kg9;6|HoanHq`>|sH1*y^$4-oc4Dw7awxD;q}@3$|iG^!&~5`~J8* z=*1G>kC*ZAIZ%8^du1aABI+Bz^;_bX{_zjRMYD4dFJ8DFPdt7;&YrnlV~~Z|+GPhW z=~On(0}Zu$m&az9$E95{E$-opezbRfTwxEiZGSUYsva|T6CcD+jvGjnzW^6t+Y(KT z$%#3TCw>BP`5X1@04n?N9GlA4xCk6BhmgTTKf`z*JsPL-QpdhzG?jk>=jq$i4pS#w zC+wS+92@K(X#68c8x?vZB0|i4nKs@^k0DI z@L8~gjgO0b%FnTqe#G;@Say$}PQ3Zx!g-a(_^?0NQZDg{-k68=q5vMO6!{4c*B;Y? zSKPQ=Gwv?3hWz>|80INVvvAsHSeLqok0lR_6O@mKeSSl7e^s`TFO{X7lkVa~?3+Db zEc)QyzNLR?#^r&4RgQ*`wD>4c1WFg%1x?kr9SeS2fQZe$U zeWbo!KeRcoGA-awu_^K3js#zajO9<&E&4auLK^3FEP{WaucI%>F^|6u@sw+V)+pOSd@U z`;^&voGv=&o8m|EtP2>R!*!PNw{=8iwG8LN&tQVx1Qr-nAwiaH2kF>|J@p-vTe7CeIwu z6z0M?*(C-+!6TmF0^8V+UDF8lx}zU2F^RS!1Ay)I_gS?d;JK-aOD6K4qJ`ZXk)O0K z`$*Z0N*vEi$_fQdLzOEuI6sY4*M5AgWRI)iUG-yq%ssJ{KK-@%0i}Q@#q)JYBXVc7 z4ZvPnEu2#iJzjEEN4VE^^}8@s%)n;`dU1SZF%|~>*yuE3wWHG5`}{b%%}%u2%)tn& z(6{G$G1=RWrG@LU+ErY;`(pGqpNtpJe^@c-bLxY3q^fOIHHu+R@vV4s94HDJim=Aa zJ<8eQ^J!5$XCw~enG!RGEJ6XixDie;Jc0EhTxt%sL*!)HV{c0I!o@juAlG$~9}6fr zGJlsxNt>g794#3M5BVXd{c*lPTEE$kt!3TWXH#G8tHO*cKv-yv(oP~;&t zpHoaF&t!i=8v$=avDX|%U+|#Nq$ArP0B!Z0GQ9PVoV>H=0xv;Q1?%(*y!{=7SVWR3A!o>&=jE6!~Sy5_RdIW_P@ zbfj^kGx$nFx8Xt)9li3nw9>u$%lVP83|NNBCjfX$(E3%jc3X`>-tX2kr{gp4|MXZ~ zYWe$fYEdBji*m4U>*-!Xhzi7-AqtHPE5>3kDw>Y60ok~89 zb~i3x-;B5Y@dNR)SAJr=@|CZO_r3oEaqZeQxODy021Wh`5OOKWRp+~ zqa4|SXm$@#-4zaSd(Y@wM7ND{e`r{m$~t_(fj; zL6m7gH;|z$Ro2cwPk=y>ob3y`byJ6|k9(P1 zud6@8dN~4wiSu(yK9|HYuL#dFMV`b)L3@JdlwcmcVt32c790 z8li*hgi0It=|I6#q1vNLP4iJ6y}Mp9QCQFVq^~TFj5(*D5M6#?p6QgcO&1FRV|@LR^Bn|9!pk-lGOg-o5Y7;Vabfz)H#-&9Y~c?eveZLLg*P1{Z8tK~?y z??r#$n6|$vr}P&Ue=RnHOaMpPN%}Xw!^9*^Z|`y(U0YOeu@JBOqSwc(UiC@wuYc)Z ziKY2?-Rr&}TAgleUb~u;j)S2WIc;ul#ee$U-;b@`aV&I>L__^G6P?5scIX1^8e6?V z28W2_lRdsxnZxN-=c(&idR0pDz(ZEGBXr3`gpPFpnemCvr}9j=0T)o(bJCFZo%XME zWDai0%zc@FZ>QxTIq|yvKugg*aZ}n9Z8W!OomUxu`)p_6x4-UNUX1VmbDtZpzVnWl zyS^Q44ZFOmAb2q!f?a+5spwz46cfprw=S4y8jNzMp2<)?hrUN`kx8@Z0)T~nO!`Al zbn?lWqw7cf7Ivq@7P0K8#2|^lm95GNrg?a+et&N#ec1kzc#{XRz2Y0k)xr%`Zioh; z|EV(Y(8mh^RZg-YMq>d0{zceqs>Jl?_pZhD@hJYyd!C4osN8Gre@Xnz54|xaPdywj zh-V_X1;tJ6)^ar4Y7hP0*xtGjS6?_Uo9o5WQdff#Ch8XA`sOgMUEPYI27e3mxAQrH zEJ~-eR#n4aE&#xX5rc9w@57H(FRE+YYCorZ>Ho4k*OrTSHWE*gRlm0{8Rh~#^fvYL zvIWeLwn&<&Gv0(!hkp6CIFmorr$M~(@iNaubj&x}0Y?MnxwJcoXE#SNRxHc{0L*V+ zLPgdrv!Q+UhHP1^VN`Qo1BQ+l04#SGqt#weSCoAsHB%4fnG^F$^Nb8}f!dF*d25JR zW~6$D8eHwkZdmXzSy+hUXU=#+`Nhrc_~_Fw#6Wg`*L^RK*L~SnhK7dTC7{#JiPU4q zjyX2k-r0(Cx1EWz=Wf+tX&{NJ4P3h%>+%%~jdAR1K>7XO^X>7@cf8kw%ANki-~N}n z{P4vS#*zo}MlX>sWt0m(Fg^kI=ha_fZ}qy;OO;dFOMYxi zWS5CTj`)BC_a&55y=14)0-%`kp0JX9rZ}+!?7rB(-#GR)eX`0D*HMWe4~eh(y9!g9 z!0RkjG^3e^pnkww;zaOMHx!y)e*)ofd1RgZvTvzrHhZg`OluApP|F4j=iZ0Pw=}IWH zmHKL%nAMx~xvb!D*_sAqIylmPZ-!a9%2zqc9=KH>RAl|Lx;_S%d3r0nYLx|uKe~ZlvA2@&cd!_F7uH`jPdy&Fw*asPyD6Y zD|}~MC_km^{rU0$N=&EDB@g}OoN@`-FISmmzCs`VLPDd?On0L5ilC%v3M!0nk+=BI z^7-I^@fAnt^w?Z|+E70@?snUbo7jGHs1EAf%RVu_=kNSL>@Tjz3ySNuxB9Vu;#T*4 z`;&%ZE@D!AJ#T5`#aqQ@-%8p{#A1Kd2@z<_8yHT{~7h|+_E%vTmi9h|L-;Y1| zkG~xko_}rh4{c)ZdDh;#sz1Wyht~LQXf^IHfnrwdfO@GnwTl3bFr^R{St>MOIZ8 z=@)qGEA@zP#F=Gc+@4~RIbQtFzEw0RWQ#@VqtjHhNxUlj#8o|oMSBWTmp5*W2cCK% z{_{s(RC+_zumGSP_ntly-37%MEXq|Z&iLB&mxeOOYw*aBa~l*gNm*swko<70ax|}^ zD-UcztE0ejgI9su))Wut(uqhsj1D8)(5Wo6!bh_pfw8&~kZ}v-d2Us3t0yV}n&GH+Sf5r!l zNr;(AgTLe}=bvj^-B(+J9_CYP?2iwm3@|^RZ@g2rc~<;s`}dd!Jn7q#8|bY0NfgqJd_gLClgsAT&v8NL{vC8aMOYqsZN#qvpQk&5o9wi%Ku%;DMa)Au8rUn&6cmt_v0P^XGTy zn6ZC}+zS(w-)|UmQuIV#^CrJTKJHZ+x}P|7G+1}G_US!oURUygx@|g`Y|#aHdBHIC zkFP>zDZ3kR3Nj5Dr9C^%GAw0^&MSESK=~`pl@{8)dw_yF7f68v(!r>_vhi#`l!Y$* ze?I4EDi*1@XgA2OE>`ic*r(f;uF(neVV_}xcky(9v^(Pf-zrwRiV{OWR zOvg~7oaYN_uEckMOF0ThmM{?O?BDv{IeHgczrK! zIkpii?N+?}+)|uUbw`wkR>%}rwnCAmAM{ZkYv$KF*in5*>bkCW70X_X3+LY-mo7dY zd%Kt9W49zj#dE;|Q}|+eB{B3+a+8iE3>E?KqFAdzTS89a$~J*+$|4TQKJ`fb*ok6a z5r*w41a@lLv!uuZfZCx6cmWw3;dz7oi*2x+bDy7ZS8>gaJFb)YvuZQ79n_t2sEhuH z1;X^9BgI(x*>sc65)Ll_ps!_TP?Uxr0ADTwknbQCQ>C7>5g!nc^|?aV=l)A{0`yz0 zvQUDZ1lR$9&!93k!4F{<;^V=&5C6e;J|KsFzY)=BN(T(OOmjy6ENID_b5GcEA7aRZ zA}~&d1WG55Cwoidhf$oV8(a|{&6)`~dNXxK-Zhaf3cUh|vz2#{+!a`rtL*&N&63b!Zrgqt@7KKjUGe%t@^zwq;% z`@TEwjI(FY#`DiTry_|ZX`JD&dayC;oHR~-PcXlaQ-;SZ0}ak222;<~*fC5Ut;-oc z%Y|IJ#8C4S03*K*ScATHCkJ~cPo9WB`{|z^M^@I$7O@%(59Ex|rvO}Dg@P^1DWG3> zbU1J6Y|OU&ZrDi+jb}I>co03^0!k4TjkJDi#^3)k!3)aEn`YPI?eBP3oVxXn`1Jcf zJwE!_<8kHcW%IYtl9N^=LR}s|>l(Q87VTntG1k|XY^%`6E)Q&7yt2G3nX<)|bRj2?9NFRwoXE`eSWA$d zj@*t)>8$hw&=o+s&uLcz<0!Jb0hBM(@<=(3=qBq+T(k#{qe0@cy0~9#rQ*%bSdyk= z64*&YyAW`1ZlTz5dGI%#C?Kuq!yvj7XTf7{Ay5EZ>*6>YVASB^@I958;vr3W>K0N( zukcl`oxeaWgU_<;%C&A34J4pH%2^i0FAx}DYvAzqKf34DYP!xhNP#Nv3|a`(!84OT z0TD8S2jNYmz7Gl=oDTp$=jc1{!5hCr_{v1Yd~}OnPG3Me$1I~%CfVdE{a_t^OlT+E z5I@q@!NHc0KnoRdl;2eS6fYziS0&kl2>}mYRYGl9w74DD_Co!o3~doF&9f&WA(tz3 zN>T7Po+vtTI=(G~I?GSz6;dwQ{&=YBG*xanQ3rXko6( ztz~u7a>#lsZb}#*3v}#b+l!$o7LD z!5`1Fg=(S4P?&a5Ui9f}^;7sEcT|FT^{Kr7yK+uD1D_{aNLH}xx6!Zf#?ICx?r1mS>p%6L`0CfZ zHV$@nqbt2Ny9&OKt{dmp<4?x!3kuTrdVY(H?|Sl3)?4|jzh=Ye2fXx)w**b2l2tM@p~#JwN(j`tm`q_}#hk099c5iD~#}fPy{S#OpnDWm~g^ zr90to^u)f-)xXjAE7Z-Exw5wd;tCDWFAjEMAYOjqj~|MMl=L~DdPn^9_kBZjcV3Lk zPe13a{#!E3(O7jT?&~nAIk*;=pMOd*%66CPbDm4xqh+vc8l?DvP>0f1PUc57P{ z5F<3~15}8%X`I!F=+8J8U2X@B=nokf4TV;|UiD3gS>>N6Ec{U4dSz!E&s^)pV7?s- z9sD+ZNXE+KXK2L^0JQx!y8x&T9#$+X?&!9gZqopgTM*OMs%&8z`zt-SXXBy2e6h-l zA`NaEu_u}J^jkS`JWk(nSL}`s;v-Ky6&J5<#`>vS;|sp*FU0XPcf>#gJLD_li`AuN z#f5DRg8GgR@3`}vVxW#<%x%T=t=QST5@*jGj|+-}KlJW*$9I4CxAG~vSdkdfPfPa8 zVhzN^H}e@VWnaOh3fp%2hR7v7-yE2i|INt?DF;sBlYGik&)qiFR`8cOu%|q>%nyB& zZ(@At#c8xn@xa!N*c5Ni0j%UE)|J+MJ->kci>&en0zHb;RcF41!VsM}7#-+_?8Os( z=m67`qevUmO{FohkhTcj*b9B9uCWsP^OOfXvzy1vzAC7k?&lm`$^gIbzO!WYedW_1 zr6V_2oqT{BoGQb4y~dJ~v&2t+Ig@F5#z$EmpGX7a9=)oVG*4-ZZX3D>O{Rx$@LC64 z)cVMq=Tzjl-t9`M9P73HzHBdVWc~``*N3TmvixD7_`w`+U{d0a9REE z%ZM)P*NOZg{h`4=hO#M7=&D#}I0Xub{9sY|<*7`E`$T1Cq7I&jldvP+koAPVf)geP zx#kO3OUF;fYd+`m;|*W;O>uqPiYK3b#t%>Ly6bNBM++YRtCSxM^!t0Uyt)#njvw{> z3jNmhw&Lm0KynyHW1_fOeK2p{KlIMG$AA5eH^+m2{0Fk}Uc6j=@A1Xu7^wdoD^@4| z-j&bqhRfLkH!CZVoObexy44DSZc@1 z&zy{o%J8Q7{*cd&rk%kja$-CUJ<#DcRF3X%mF&FL9*doW15VA0+TS$Y^38MZIi3)* zOjA9=NsPkWgTKKp8W&Uti!>LAyChnT6k8Z1RG4fwKM;>?E4I^E;n)7{TjFQ_#V^Hc zUh^8+)Kc^|H9iqNO$}bWb1QsE>BOVNO^km$>ma3jJwJlL?vyX*KjBNf;1dt=j8alw z%BOR5r2oRo{Dt@=-S&$efQ9&C*SIYwY^KEX87rk6^Q8Kc9QPX`*Tj!1Pc15yX~LEL zooYaz<+{dWp2$~L=055?od&^qR2hTrRiAGVp2Gl6>Quy=^iY?1S9{Yf@Ye;HQ~3a# zE1z4D&gaE1G%_lv03LqWPdz7&B@Q)y%FW4d$ZlFV6G7G?9A$Bhr0jI)n%)yk`)m0I z7LO38`+lLd_PgkabeAkNIA5^?&b`QLm=A0!e$z`HU0XzMZ?)Z%VO9_K=N9CJR?;{tgWh`Ra;2(a317FbYC?7nj zKc=0amkQvaZ?g}T!B8Ikf!BSn(%FI6w9-Zeu7Z5Fusk;)Q`#2*KJiW)qbqg(L-Xi2 zL#;1yB|d0{w(&6ipK-u155%1%>8w>w=Zlmt+hochSb(D=@-j>dh<<#4e!fLs^@#x1 zMJ48cC|@Sl^I@`ziSMK{$A{$M$5_taH81u16OYiqxUbpLP$+TJrdTXMSwABY9f+23 zvB#IT33QkJ2A_~)Cjj@gmVxL-^tuK|UBG~!_l^pw79B+we34K6_-v5_zih*x!roZi zpHS=@1J`-x(GQx$AL*QvZX1=H$Pe9o`osOO`g?fv=Lf`RtGVp_IcJ_eBiKbO&_59DqGb`%BY*UjwViO!KBXGh9@}ZsU$l?k<%fwsy(8lLRqd5@$ z^5^okEK>B?h+G8Pm{Nu4dxTd~lPr48`#i8s9zybhX0D*7j ztI-7ywiRS5kNjG(*OYb2KR+u9EyNSzhPJ9Un&0%7erRtvin&}sMV{1CZL`_FnI1qt z*mBA!+d_T*Gk4HUdyqbfsW=BLOYBNEMH%WQt{E%-W`VC41Atd`vxE6W*Hp*YL0q~l zV+O_#_<;q*{=9TUd!{bz6Ix|L*#_bHfIa}56Ma%RGT?KbEg3m+dRsoE)9lz5ygLaC z04}`e|9C+1yx+`AdtSlfb2SzOB;X#Z+pI)_jfMeq`2 z#qBM`byLh_{rdS9Wx3X0TPVguQchZt1*dZ$1*lpBLZf`Ay(kV@ZCxD+t4zb+p}KTs zFV5X_ubgJbD=*p$tr$xIO$G620HG$n6lz_)gK?=cp94%}OAD6O#{&nMx|On{XgQYa z>Dz+vO%g2X$2TUPdg_^Y_Ngc1OTY9>qpuF;6YqXmoH%(bE?&B1-qS#nSHMoM%12=N zl;0HKOEf#k6?j{vYUEfEChq)soCX7Q+`7gQRD3j|u?(5jDtrqVI-EyG(vKZG5`Xs3 z{+T$wx~@S)%k}K>78iO~KDL6UGw^D0H9$LB)kO!@U|V+KN4KnYMeaQ8cOYUOlXtFZ zENnYlJ3o2V=ybZ#m!dCSxf*})_CJcn<+XV2XM9eaf8j#B@WKlogfq!)-3kwm1)$Lx zihOWs)4⪚vu1=kq!M~rd-7y_MK=3#!1w`l=dP^i%=eTfWr7=)U*UdvU`aZCqTe7a_?=$; zg3I|N)5?QO)H=7 z%NBa@?~Z=V!(+Cb6U~Je+m^Q?H`j~xye zb*b&=gP|ONQZDnRylgq#hfUZ}`^&e0p@%*Yulkfvj5}^S=Qq%wd+}m?_~DPl=4JW4YugF|$FaP!E?nJcb{Ewj^WYU3 z<-ihOgZ%)=8yYGvrmM&-2Q1i_&V8L_YW{kyBEpw#&-Bx_h6G#Ley|JY1vdql=q|Fp z8NdZ3?s8FRtx)R$@_;-!DFz~W(+)iWh<^Y?FnQUcpR9Au^3ArLe(Y{f;*5gd&%Nzf z{LQcXYVXIjL{rlMXzkQ74IUb?^W-zJ!)JojfACiS0RubH!2$qJN|%SD?36U2ji~L7 zV^8B+-g3uB@5hSz%ag~CM|ZhvJ=64c zGJp>J8T!h8L}}{RynqWEQ(1+n>UFyckO|mjgDvcT5|{d0(QnVkzxv~k#6!aW>X)C5 zpZ@-@jkUq`xc=;O>eIJld9fKgTca3=-tL01O}66sCm-{+wQFl@4$h(V+SQ%7a-}C8 z4x-go4>^yoM9=DDH89OVH|O}zIomFuuiybQfirdmOZYy!JDRDkQ@3I&o2@{6O3wB* z41ATyXO2`lc6$@mKWpFF&pU{o_79fkBRZ%l3dt^!YgZvU{Vov=q-?xE5Df^tinjullr4k1u?~SH|;K zH)Z4EN3q`iXg-b|SD~oL=}eeHU+$d1z4S#fpIZo~z9m3FueW#WDxw8p6OTSa&3irR%qiavJSF{B$KvU6>XnM^(c&`2xzA48w zc;K}{;-(xeI>8hC1xTZPm{!|$(uZAg54j{hRe(r^5144&Cn<6^yDykm|HoP7-4k9q z=ZCHw+X}1?{YiW(QPTb9A5M|qA#mH~IHkREQ|n6o9n9?*XRW8^bDUOU#+j$ z<$f-}t@&nhDD9&HvNPOB|KSAyH{=(9cZ&N^`KhV|2aO%sCkdH|{Q>n`o)rg>_+_ca znS9R{5Z_e4iXT>?b$#eQakk@d#+js(-$t7GBt21D=vT7+!6AG&o@|dwRC_e|y2LoF z_OrcXqq>J*q~R640*PDa%4&NB4|!z*~7wjn#qMXtm>O2fw8o@aW@-=Y4Hg`xO$aLQKqV;~(? zP0=nfth6Tpv!Gl~?$gNV*zmdY{tOhbr#R`M7bkJ{>{8se#0wbx7z%e!F|i-ic$^ur zs2GYkiI+{7z#WPAo?^CsqZ@0d&&Kb)@5AxOPiZ`?bYy*EsT;4j?R2zVf1d%XI8Q;$ z7dhGgAwIWH-`Bhw{2kvG|MKtu-FRY)xNSau{$Kv<_?dtHui^_n@AG51v*g{#h^Gm5O{dfX{rE(v zP8l)lpewZ@Aji+iSLF>Y=N89l5B^DZrD?i6mP|p+LQUOLwAS%O*dGf3lw!GIN5$_0 zq|NHeBmUI#L;?t$9c%$~29CroREaawiauaKP}6+G9s$PJx~~0nefV#lGW(Pk=M#Qh zSZTOWqx{g8c~Uxu0KblW=@W{8ynsTZF7VZ2+^g-${FO2}zpqTPALyz54Bw!yeH(b} zL6xD>Y8owTX@hcV-@Ye$97mddofp1&@rrzIx7XX_yu=$S>6e6xaz>Cr=fB%EkR3GBXZtB!AN#t9Xv0N-0V@ z@}{i$C$abeCHPoz)qcw31j(rSF&c~Ot@ftz19|a}uD$SDbqGxPM-OnRa@3Jyo1xBA zR%rvGOW9m^p_yx>!aSvV0hs{u7Ea;>-86pLL&ozitpE7`nH|70dIB%zr#s?A+^M|n zw$RT#?4knK2`QI~hZlLw|NfH4%$!?h$lAq`^$b9h*D0$hT3!GYqbgbI%J`6H3V`|7 z4a>v)RgfegNA$RErA*}~1g3$b%BPK!gMLH45;^KVKibPgc*oK~eE^QG9UmHRQq6Ght-7Y1+Do4wVzEADOe5el9WSOz}$~#X&3&;e};CVs!)9T|?r|H$t zG7$c>BSAUBFS|_ww%24s^(ZD_EKVO<=u;D_^A%_h8~r|J?3{nZJh`&&p?>}K$hL+e8w@#*Ax%T z9T0C#V-E8J=EAeefE|F3R#W!l_F&|u%k`iKm9vYH-2I6V>ZT5SuF_ib?2z43oQN-0 zA=>gBJ6wx@`n|Dyo1b6m+HF;_lCm*xfgkiI19*W2eoPlHljS_ExX?K-l;FElCWm+4 zunqYq85Q34d0vXCD&75yDq|N5=438h*^Qy%&K2FeWn(?gom`Fk?_P`Bh1WDmCOzzy zod8|8>P7xWEKSG{w&w=2OYDm_Ky0i0^V=%B8wZ0c@yyfjjTfJLSS0PJ4Q$52sOPxJ zyUeHzzLL0!2jKq1gXP_^uBi`wRNFxbq@hDEBoqyPKA;lVBJrEG+7NzFsg2mr3s=s+ z2nDq29PolGGm~JQqEGez*bMT;j!C23)ZT5^+2)ZW^QWGF^}N|Z3>crVJFeSV#87!S zQjT`R+xzTpf!!={uGrE(P~#x_?*+AA%%6G}l;*Zke#|`~6Uul2fbz1Ch(!gk+-NL3 zIM@5=FF$ZFKe-<>$^bl-2LZZN8rXGG0thqD{V5d3FwLj(HpUv5N)S|7rRNTp@)}4O zjT$Hh>^eDQL$6Tlns!n=K;Z~J31(j%FcO*rN8<9t(nZ*R{A%VZ)l z8?wEe___7gTjR``({YT!ga)Dv{JP6aGUm1hvrB&Kvpa`#7cL>7!{s}LG!*2>0Ua+i zX|Q~&COZzBSfPW9ks1_b{CzS8G#Xg5)&JEiTk+=K{10-_OYvo2_GR(JQ_sYO=PtNh zXK7IlAy?}A!Jry*+9XwTgoa|zv;%hrDVGFvoDDqbO-MN+*R=6Ur{DH=e$u!%@izaP zTUUY=l)P=ZvBoNU4d$Eqzy=UmZrIeZyW8{Pg}wgRoh5bz6|#Egtw9P~S!V!H+g<(u zJ1nf2OW6<3fGv!Cmpr1CKm*5vb77U59jbERbSY1^Y76ct>4*h5DDX1z>`8Bk7u0r{ z6W=K>nt*8&5bKst+lWjs}cE!61#8w&ZNc zp7JyQ;bW;Azz&ip{e;I4@%&A}$&Jj?Zrs{Xm}b z#g3gzRN#iDu@RWL{ z_3CIkr!YH5_jOTBEuTJ~wqEs7{e$WUkNAi7eck|(Uf9Cf?M8oGo`zt4D3?3}=nWl_ zrv~!Acc7pTZPj+8b4-YD(0UNVKo@$1i?aS!m-Gkk)&FJSlmkMxq|5qBUC`v~Y$vo^ z%C_2z@<;vX4L4(nE$9`{e9|YsD!$>XzdGLez4hkw&EZCwQr9#@jkwO*;}=*9^Z&Yrxk+> z7h>z_r`;!yS?wr&5&W`kEV`a&n<*t%*{}d$Ae{a@gYFYl6DXcMbv)KKHXNk#bFi#3 z*x+bC7XS=;ed`83Yx`EHS@KC|@LOb1Wlr0*HR3O%FHa<>JyIT5*q-ky0umoglv0F& zPIKQ20D7&t_~myz5+7Fb%kMcIZ~BfuAGdErTzu+D$!0Samzpwzrr+w?AMM26_T{+r z;(6~pab%?(OI@~#oQrE$_TuW*zWUht=m;nNT4pcWGEiP%U?`sJjufN*Ft*kAvJ(Kh zu?=E!`l-oDZ*ZogeO>a3ZMyvss*Y^0^?k zR>MwLS696YfEQ@0Vti>!Ge!y=wVx92Gm2@3Hlre&z^L+ztD|WzWP(+v8T_+ zmF>NlUs{Rl(#K!?hHs2p&)pNxK7SIz_xTkjJdXcB9@kx6nFB8rByF7@yM}BIe~4`pLt>N2-0_V125iE8o#YGmd~@HvNR~=GHZL3Ucg1P5(mN};0c4zm zM|7_?qBxKK9(!Q|9)HYWaI!yJz~+Zly6@HCvaEnfhWMZSJT0z^OFBCZHLdZ zp3Ga*jR!mUU(j*$^^~rn`zoi8g{%h&)@$mwww1rqFs0v+aybl{58qVstGy@Pm5(Cd zLy+_o8JGA1Ab&0(R{e#Jk5^|x6-ebRuV-{r`YX)v%xH#A)#dh{JRi&xzmf@jB@dS_ zqZAsM(z(ui$uG@`W6eJW;y!2*e(O+m0iKvPj-(|)N998c##LA_i@M;N^t`4$=qO_O zj2__o{8S}*GC%!*j`oTig`Mj{EB?d&OPMR}$b$TNt$2idq7~k!(d2~ijTRr|5RD}X&#b0 z-WKlRh%M+3Vy`z)ztfJD?pl2Gu}2(JEh)Ag40?{KSZKrqJ`3cQS5{(2{o(zu`_lNn zAO6Sjz=s}*?cOLhR@dUx$+OPaX?J61j|H~s&s4s<*ic+{O#RqGY;6&D^Kf+>J3IYY zl&o0*dHuq(@%$5y#xMM{pNuCTdUtHJJ8@*O8)+6h2Lia|B%h_qm zlZ%Q6*ml)EU3l9{Tk14oeQ6E*M zF}33=`XmtmUH13tGi|%vQvAKAc=m(O_TtVv*W#8%7Lo47K;xXA>OGL{O{7cAy|bWk zifFCdx3kLQg^Y|F1sZcI=6A zOe))T(kxK_l=HBhBhXnp1z4z@C&iz5Ne6&^(@x5zt}9TU_5=Sux?M^~k^_1=;313R z93K83bmn7S@&Drbdax5058zi;R^!aAcg9$8@K5|7Z;FR6JumEy*kA_$;ojRH#Yl8^ z*N(=nX#URc`s?x2KlnG|qu2JtawGoXKlt(Z)n9o_eEFAsc^oJfMV9zU{0>8ax(KQI zH9B#4s~lJiWnM*BnJ9fra>B*@1@~1Z0|SF?f}@ghES+ioq=3>SEtQx28oZ6Dqy0ab zc=$tEaHs4H&?scJUrs*<87Y7JP~}bkuIrMIgXd3_fb9Vlr?}7I0q2wz9f0*(d?&!Q zUzR950dSNL>u$tp9(4+Lg@h?MkIEnuX~FzC7L`npo&C6dDmaKQQx=ssgY@Q8F;9Fv zQd_A&zN%Mv#@-kc(T4FcfENIWI>txr$VGc~y2fs{-6H?mUhAA3F)yNZd=9Ew@F4pK ze3qqSdSOiG@LI0&d6@J_C}$q3cuk%iHztd3rogwQ=-tvmITR};mb zE~<3rr7r=}=Bx|>dG-~P*S^nXMFT#ZYs5qOx^SeXn-)X*FysqZ#-tMPs&3x{uWfC{ zZ>`68;Je@>|GxUSet*ZjP3Kq9m&>Lv@%YcEkqxMmKlF>e@F~bdjWxq$>I(bffhzSu zE7D+YnmSnM4oK4SUU?3fX5}^A#h+Jt8*ESG`{IS$(58IM@577B2#6slL%YD{O7jqi zBD#!<6FHj#^?B4OezK;7>JKXY;CDIf8k&tm>B3p-Hw8D8ON`Kq?OJD~muqR*)?$^Z z829L}vYzC}dKFNsV>gvCj>5;jcQDSf(7+sr%NJcw@fGgs_y9XI9g44({L-F$1G`p27kOE%hq|%F3HI4kdB*g_kM!fRcb+vI$e_tH>qKuawh#`=p z_?3TPJ_(+PN@$~1FZAuqGZI7KS0@AA6<^*dQmOlrEia!scI}U2LovcFi|x4U^wD_f zZAat2GmCLfGDhYL`a4irMa#x#d5A^;Z20wNW8eh|e(oWd$9F_uQtYWrKl;7LzyRGvn)JxUa zfuCXG^Awz+U-RwxmulBS#9WK=;>&0g^bM6K>sRqpQ*kr(a7{c_`zwBoFK8cQk1s-f zC7;Pu5;dP0C8iv3?lvypps@(gf|{)bFC+$ktHT0R>?&=a@kX22t;BZT z6+yr}miu4nfcOqS$RZaO)8sWagS{<0I5&R!*B=nE_v2vPh!BdLcs2rzC>t|JejZFY zZxU+Zm3x`M3LW7g4~@47%mYS%20(rVbT}EGyYZ+ln`M~*n?Pj0K%h|-r~;)5-q`kP zQ2nyuAOG2ZCPA#kpZ(0&#wUI1r^K;Sx0V5h zgetl*9NHz5*#Lr_I(X_79dvRv?FLzJ>1%*AXo5}}K%ox@0S=%jn>;uoKa`NMU%q@f zo_YMq`1Zf{*W<~jAB%g>-Vvv7IT@Rqn=%|G$7R&2AKJk6@fHBo-;I!ZvOx<6daeNn zK>iwp)@_K4M*=~AIkA=v2>uxu^F6Oxj~kDUIG_UwoJmH^1qVc)c^PJ>)5)z@TAf&2 z;33drPSUm)HOOuG8?=j^uAE!bi?o_;j2pS}kUt0OtR7=vj=sjCfst)n17-r^mIfu` zfn<2;di?6Iza`qOZhXZXz9OD__I$i>{#iFX>Z4H%XplTWb)aGvWQ%#U?l{1Ixml0a zCA=NF2Uw2Q^NsmwaCR^f-u`?}ChY9&Xdu7o=D=k8x(5GV-6O}w1DxIMUF#QH_k(uv zioFc@ey|(}I%FpWv}w7!T_z2#x7Y(ZtNQ&|pnZ!j;ZR)VazZuDf0#GhGneQxtMcXa zO&M_bOnO#v*RET6m-}gZbQK1*QRpr?r}WaX0nn5ug&&pA%Co%f7kmnUD!n)IneC$n z-1SGA(@j~P*Ny`{0f`5D%Sk7omaE{79uOT)mmJA^P!=Qr9B03zW45(t9;@D};lXSE zGM)2^BjuG~AE0(feU^)$=F9U8cxrk0x$!6*x<5Zwhed}{>L+OMz+ZY`GA>vZ=s||) zb-$1gthsjn*mHT#aC68N{N&+Cn)3^0b>uyt7kilY57K<>a!epAb<1k>gVNH!dH^K6N@u~92#LXd4(g!Y6QesC7%1sD+rjSj;-7y1 zcf}o@wb;G-{{1^2jt?sR&T}W@N518C z@$!?Mc;TZT)j&=#5Mr7Og60De!4+Txtj-{0C2i6kA-7%xF65# z3_|RBaJ;y@Vm;IW2VFPcBSUAj-@3Y(_-Ju)F*@R(2meggxou(p<}ceJHT&~hqDht| z84zddOmWGj-JQ7Q+?{dq_B-Q!AN+8P)ZY54=h!W`#aF-auf&yI(RF<%R@RsNDUnW7 zb+U*>K4oRG8+X3sPT|Ivi8~EMcdlPkU(}1$ZZm%5hyPam*WdqdvBH{Nb$=QkS-w25 zVDWBC@#D!8x5TZtor~3tBeA}IB#s{4h-PO=aiRuM;hV(QiJL-xEXl zS@nP9#f})TVslw%>90ICf?s8|U#>POigc`F2*7Qe1!9M;g~9nM(0&LVoF;+6hXLUk zGUoD_gL*`3mM?J&6R_nP;Ej;O__I#%*9Qh!mlwks#GjH=WU_18X!S?NU+YU;^&Gtb z*3%(i0YF0HuXWD&$@-`11(I)mDbFdW-2aLWpyF~`tp{Te+|2i^-opQsu2L@(T{l2s zw7{%x*?pSLkWK^0^k>HkdQ6nEyE(Ouz2`tgIAr{c@uQF_7|5_bjfS(c*N|QR-G%#$%4D93 z&aGPM%M0A*N13mbV*wF)YT9&?U)TIhpRaRr48{)UulbnR_qD2KYuF}}kIKi+jGoA) zFH(HM0s!zZu}j?6YVJp?OJ6NKqK^qP#v`!AHkahZpD}lUyr?V3d8$vQ&jKp$^DqOS zS07Z7<``RuM#Ljri#Lv5xK$QNu>gRF=mYusZoA`ojE{cwvG|SOc#G#G*iPT^1U^>u z4aURhsDJGbCb4+tj(Fp@eRrI`{UveX+MZj^>62&NCoi^FgU`Y+ztZe>#LHZ4D84>% zdO7xXN3p%f&M^o6mO4FtSB1B(U5JC>Ui_E;@aFi9U;G!bytonz!`)bs?3(oNqJ6#9 zi8DtwVo}8J>}LWqW6E2%I`I!%Z%7}?XB-rfz1i+jq|(8abBW5#Vl?7) z$y1S3<`LAkN8TLoq`oRR-eOW58Cow+!2;&?PymhC6!h?nNuar^Ce#pcD!@nb*y58|RH z;uTNQSF0@zrI(>((p=q$o(g^M_x;Uy)8F~)UH~xaPvQrD;D_VwZ+}nxg+KS@vO#u^ zV@ER4C;an`jGw$HSk}RVZqtt;2t2oGqp0ytru(l&AsQf5kPpc#-zBZ!0TNf*2r;tX zI_9lJW7D~PCcZ(Z%+pBRt`ocB#E@|awnz64x~w;W6&b!IG2?@{Yl7-wIZ zw342F)beqMko_b(2xc44IUlF%r1G0I%I-9dB}RArrn>Pn-nEQzi1L6p`-F@?fr`WQ zkjAyr;fOfhyfOcw)T{^Il$Ke|V?KmQ8JHe^To*Y|Bf310Psv5sY6Aqj1!|mIu7pH+ zr`PDIEC4|-=uhL>v9I>2@*=oA5(E$wu^B?WSA&%dcer zmU8hH+3>9&dC(uUa1VM?rd)$PX(11~Rw1Qi{fQsGl8~xH%6ps-f6iBM7k|T+=e13d z*EW}^eeJieCF=^tK^`(FozaKu(Y>r63dFa{A;RQKxG)u;qYr3IJ}<;jMDH-6J_WWL>CZkyIhhxksQ*dr3!lnir?5_0Pdm%>Sg7uu z$-&3ioJ{(Uozxvg7=@rof3`<41uuTF*o$j2doFu_sCZGaA|+MtNHV0HUuqIVjGMg^ zLv4&A9{Ryw8{iezv7;B0k{#c|CGRTK5A6BZrF zH=_5s`EA9p(x-5F?&zT7_yFVgi+Tu#g-4bld``qCv8d`*tazXv`CzW2`{0y6rB1(8 zl<}>1At1BlA`H@Lj)izp!!I}tMP2cI?jI$qvHJIU_4kXycT#ca9Vgb~6Yn|`uQ+!+ z?mpR#vx~WaqaiWqxeDa0Ybu5YK3{RL&mttoVVe@?^KtRn_s6A+ z9}&)-80@j@c~ANIG}Er+pm>3j2gK?P#l^XUG2n%rlCb5Ce8dyBg;g?5Ki0q8(RAN) zp0-PjOxKXa#F}l364D+1g#6aG@`0`TRDRUM&fFYTnd3UQ5Ag<#_M4K)c+6rt=27b> z0CKT0FU>j>2fMS0CNx~ftgte0`R$vN++eP z3ULHEt+3=1KV&J_&k4r;}%5eKL|^Xh3%5 z`n7oC(I?_3e&WaDT@Su9&Ye8%1poxjG%x}b0x0MqQoCURfEos5VSq`C&aKRpBTm!b zUSdE7Np>bUBy?Q$p7dp)mOSu7V{QyeX22d{D8_lHsdvzNgby zhw;JpJruwDpWiAOwc^Xa>(XO2lVl-GF1<)_NA`sGL zP*sPOW*i(19C@YO0PewaaRja+S^17|r0R`vkkgbwPWPUwN$s=rDf*BL-&0&oA3j|m#| zNdS!VsE*Go!$xUq#)VzegheW{Q`b_YffKVNtUb1ly8)*1(W%bq3KQy)(uaP>;;Rt~_l9nK-Lu0AORZG1H}U`%lq?|MGo0UKx#Bd#Em;u&uNG3@PG z9t4_h=V~Vhyty{Sup|%J0uOJA_t|ovz8iZ&59~&>acsphsdmR(-ozX{bY{!=QCR>$ z?80EfvXKn3jc1!bEJy1~KY#DP$VD3Tkq4{fX*ZW#o~`XAk(ldbTX?Z|J%-)+_zw?0 z5+792+s+<~AN-~-ikF{m$JJ*)96MJp1ly?1E8fH27)14Vc4eD9G(LzUYt2|{cSTDx zu3p)WE0=~EOi0v{FIpmOw2H+mp4olAZqmIoQOS4ap|856T!AegqEHmS>~%9F=-j-Q ze4OJ~I1;n@dZIvoz^9XFlUOJZHSh=0GXZ-UXgGuh7dVj@a#L|^iQO*TG`a7jxIXH|NVL54Q$8s!U)_$!UwA$mD{IkLTYdc-zC6y} z^Rn3NO}v{CvWF%H=Gb0urx&-KJ{2c#xg`d>yP`vbWC{A()yvVIAIGKVo{n$)rmvFy zPGYqyN$IH2kMW`WhqQOlipAA6Km5P@-j~Li+is6Dx1V!-#lX0;)UjL{Pm1~@xdT>lF>hJOQ? zGMKi=u4EP_{j*|q+P;;UdO*Ho^{HOmLH)Xiu51hXrLWMrr7jRnZe|p;e$=+Xrz7UT zfSy6+$J9fAPy2H_O`i{)7=6}}{j?`gmCbEk{k2gX0{4g1155f->(9w-5^Ie@?Y{+q z>NPm95Z#umzdW*(kLpY)bjl~!Yrv0Y4$;rK?HOxz`XTuk zsy>RH6#S003^gP4rOrr!ynI-aay$I81Hd`I%CBQf+w8Pr->Vb&@So+Y{3>1Gfwsiu zKD~fy79WZMRE&JI$2viatyNI7)_s1;p?XXVxs`Y@!G}_B(p%GK^d#>3Fv;hRQObSk z&+?ER8LB-oFpvHox_HjW-w@j3OIs$Vi( zUtf+>$Jb5Ic7My0+;pI0#bb_pd)w->7vj;Pz4BUjZ!9n5X{O-toLSMKvAQ&9O@rzPjPsDqzI6el zJ$a`Z+NpFyx%3^nBw!KnfSA0{#$r+IcU$q%N1hqR-7i~@Exa9=0AKO9@9`} z(u2vsyz|zd+du8M`sB0_9aB!ehEPN43vTMC;1fvMbHC~Ve?yzA`E!ip31XFT|EIu} zolp5>0TbNO5iZ%!+Hs917B!Al+i0kbHPw#py7S(64<_~C!_58{Wu?Tzus<-HgUM)5u0{kP&HA9*Ie?Dbz9&ACqO?rhn7@V$J) z865U4(29Q1zwrBS9(0m1h543#5eqobch04xPV6iJ$-((ZQd#cVJ}?T~NAUqu`G6Do zmj|@IRyOh*@;LQ-mVf$=G_8s!%OKxrpX9Wxzn0@@85Fw&@+BegmAar$_G-G7&DVuL zY@8kN7}vv3edq=b@3@4Wzz=0f$A7uJVt($ErfWtUwnO3_*)}5ru1zNcBsCY;EnDG1 zE;aXyTlB+HFI8IM0-a zJd#$+0+gd}d=q0Yd+P)*)>Yd#duGk5)1=IvjVU|JD`$K=%OC zhkmKYag1c9BFNr#6dEdRy2zui>v%&@@Y!$F0MqAr3fzE4F!S9rAP+RuHUW5ERiwLY zGwEKT_7XSS089xMxblk(#`ZE zU+KJ%RM~UeL*}%9_`|;lyd#9_Cdq!Q_|?k0+d>hs+nLJI4$v32*5HkG=$uzSj{V7Q zX_;w1!oxx_$TeSh7saKb26@ol@(pa+yT<1lp^I)}l8MgLj+D*=@{!sIF`MHh02_lW z^@ZK$hGGzCrV>emBYv;QA#GDXXmd*8U$o=@*x`va{iFlt#lAvWOnBnpoFBSiw*+FL zvBs_}0?6H9X&c*K3?L0>l?Pq@#Gl0GdXYKpPV~{Q_eQ6qyJg3kP`Sld+CDwH%ZtyxrDZEHcliIPvJ}QoQOVXW~`2 zpNLo9z8ZJ0FR4!9mc)4;1b(yx4VbHa3c}WLp@~e>HjvXkzF&+^=Ga9K*>9@kAo^R6 z#f9hoGzL4*%fDQS;qbZ;^%Upssq8?0Z(x7aay-N5wU9Gn5xyd45Hc^B^<-bmJP0qp zND7|ILDIFX+mGnVR#q10a->>oThH$MO09#H=K?dUWp!#P+0U=cWQ!1yEp z1Dyrdsr;Ptz&RoFJ0RuWemjo_h{1UEt9TUnQI7*K)kQ53$pG&Lkdrb4)Ct_Y005(g zJ{-U3p>tud!GpF&lkJ6L=s*=eOevzvOlCp+}#H9YyfG@dS_g z=F_qtFgsY5uoyVf7Fbyh3F(-U7EhR|9AvY-j~jsaOQUnQZ|4J_C_W7lnWRDDg;>eb z@9+6OZ+>rH-PB;QA4fD0{D1~>?|kQ<#H}Y!#px5rqrca)L(VOEGa&H-08qN&3DnT7 zKWf);3g}8e$KdpGJ?+TB0RsiRNI<_S4&~2jQFu$9c-v`byRU#?939CSJ%gV&+80`# zG-4Sgbi*II!5}eyCP^KmE3gzr=81`3FT9YThdh{eur=^CvVvXgfFPb&rYRkvr;ZvX zvTe26v9!WAxAW24nZ({mgSov)eDN25vHAPp``+tqUsl#v(3%$jz=!1s6qy{BTRpFO ztw4I-OPO(=_h<60WfQ-COgIrh$!>B7fa`L)jaXY*iX-c5=7k59+r8b`-r4k9c%JxZ zFUH2vqjB4b+wAbSw|Ao}KEOVe^7R3v=wKl2K-GZ)e8^_hAzAk(4H>2#dEhCiI_A4XmJ>9K z9@8nBJpk3UY0%I0nobB(JsJR!$18Cpp z)FB{$+68jURfs(3cAHQ+-!B~|_uThM22R&GN5HSvddlyS<1hmb;^b|IeV zDqOzcQ#52;@M$`b83QiM02+!OWiO^h=^ilAy}6z|2i#Uw4|(cVfIOVca=Ahh%2{C0 zP#{NL1l+lrQU91|WkN_c$v}`}(j`2OrIc3aVc^WeK_<-DdWd$%{q4;iKj`9t48bFH z_)T*8f9|sdd#l@twe@4(A>j5ix5S0>&&8X6<3Ggi?zR-wSHHVr+`N6hu%LbeUn?2H zA@p#x)R3X{Vf#cc(noE7)(b^Sf0pG0MefkgF?}KZP)q$S-%`D1Ii>8CT?%u%D}4b# zIdlm>Hv%-&K}F4vO)0%9X$muX5(@2Vf9MYTLqm&YD92rS3$}t|`Ya&|BW!;;LJ=3D>1lG!Pb(lK<{LZ@{j`xX; z+fJ>=-~Q$=ich=uR9t!HLvi`VXVizby_jWhcQYmf4I;#UZ|}MW5QErQ&0U4YgZbFn z9K@v;chsllxRe1B>YbCX^P~s^auLG>GYcc2ZRkmEwH;+-5XA~yC?AWKiFA$cf_~m2 zL9dv}^i34nw(`{Waxk8c-N8gLkz}HKbpe27Z)r&H`fJBQs&Lrs@>0M=5A;Zu3gWRQ zq1I7pBEneYpS%EIupbvh`-?k@tt4+20N9pQZ+%PL0uc11Pk6-FL)GHqQpUrIJy@9F z*g*JA7HvVko52lIWbip;m42fYLuJ_P^$=B-UD-rOvf7Fj>2UkXmH5?P___GM|LxCH zajdlz14@bNdb}8eK@vOGtRFey#l835_wu;&?t9|wop;3Q>ayrpEX9_{vRejLe8L2O zh8&1Dk+Z-5J7jWR{!F@u-J#+fCJ%UHP)(q;oqX7zTe#&zesnw>^Qoqc^@(w5F95R2 z7*~2n2ZD+4kvux&} zzKZik+}x{m1JnHFyyBgL>LWelp-LYd^PB1N2AU4x1bkiO1-%ur4|C&8pwB7xkn%-# zRZpe=nnKNYGl2i_y+RX4D!|VtAhScVDE2Vr(fXdrV-Ee17n+q+`_n02-E)4?KkHMI z4)Rxd(FM^a!IO0tJ9b@(hrHmZeZ1{T<1xT7U&X8R8V}r9p6j%CKM>YEZx!krqOHie z_H|hw}Hc~Dqw3Gl=-aiinJq`{yv&NY>7p-+5H7m)g%=%Bo+GY`;Ve9&3& z6Due3k(X&w@4c^>el-$)xu`x6h~L4P96^r@Y4zqlC5Dbsm_(cwbjneha4 z@^%t_3c}q7DGx&czp>>wBqzXlK*!`4{WOzdd;-Z2@l^n7nw?xQzFhYjHTjvu!v?B8hokxp@uuf*Sowba+O_!o-}~?J z?sq?+z9!>1{JyZMzJ2v2bLxkdVqbl0PyO9Df6w2EKl|F(#p6#tEq-#L>~PeGQ>RWx zTlseOm}A+G&QdE@)ql;kvQ6-#kHL3C!$EIHT#aJ9bv1tL&HqpQ_AmdU?hWH~rxQop z-DrvS73q}sv)BQE{-E**Z`GGG52FxAO`{(hOU=0B%!z0+53@gvCi;h-4#XP4?WO|Z1W1bDWBiUK) zK8TCzgS%pYZfEEKzs{Ib`B{vo_-k`-5|2DPio0LB5y#qm9;&a{?;v^*LY$yDfVGzF z03a;h$%v^kb~W0QpmrOp(cZW%e*F*M6(8PK?9^_^W)9-a>XG<_bLV2T+w+4Bzihz{ z8e*OadFb<45n_ZHerSJ|gBwW#blV@$Zq*LczlvMQnE5{-%OIQdFV){t7pE+^@!Mk> z*kP-*+x!H8>`>_gb|z38Sv|E8oz+z}Jhg*XJLc4$8XBYAd+*ERU2l6^{P@rRq89+r z)*6cUpiLtJKU8RG9LM6;zxSpek01GtzZ@TW?z(I#;+x+1SK`vet@zT<|01>FZftHU zE+-aY(Sd9T-(mMFNFJb9oJci@EhRaZ_a>!HZ?hW_;?pfybSP?>~UH^1Zo+}1_O>-bDC zHE~1oGtF}doDL7=Rw2#lepqw}?0<#7_BEbUFmuRHyef@5^uP3l(x}_-3`+YEy-B~u zmeNnPM-iC%k^GY7;zyO}+wfr`gkLtvgLG0S!sB+J{P5^HRE{)fFKIwMuh$9bn-5b8 zSYATl#jnJ}#m`Ys)+bVFKczgj!^07j?H#z3kNIVBVH~9J!U#T+;@FZTr~E9C78=D7 zNkfj6nP*}Pe84d@eIXZ?5BgMT=D&mjy69Kku6gkS-4$Pqqk&!?W+6z%eYR`$f23O; zqRuiykJ#lQ-P>MiJCZf{xI>fTp&a$I5Q+3^7xd4J3285+Lo;Pu2eir$)oXl!7xGDt z(7`oy02BE;zZm7KgycKx0iZ4W4fL7x&B@nLQSK~5dcxG7xptr&!9lr-+cHp^XoB94 z2RG6Ig@>r54cabhJ%W;Un0CO|7^wj9P1|3|c4IxMD{0SHl{eyFlU@ADv>#O>KjT2( z#9F$hI*(bfMLMDB4!D(vJZ3yjdZA%z6SZAQMk16sAnVaQ=Y61#4FS&5&lGzjt7x*W z{W3)20_az6q=&koTlK1}UtSYjjdYc`9Yq|42j!nIkE~+ISSPkG>>oPJhpr_X+IZ8Kw27nx zzhGNcK6wl0t#px&N~wqY({%C(U(&$5y6oL;Q286|Zl3Xp+oxiaoS#5X&`z65`rv|> z75uzZ@gi-YzKr4eoP&~y8)E!onNn!t^w$%~hg}fn=R007I~=j7EBj(E++aRM0W!Nl zdO@hJnX7rlU1#Ezr;o4q4e6kei`ghi$sgmhbDkM9Y+)!3jn>HiL}-D0R%|WUo#VXMLlC#_5tA zY8inX9^ip5_#1tlOCba@!Z9!F=D{=%6FZ%ym|t2`pf4e4F!m|=;se7NPX=}*v;#EaL8y$(6Hi5;On_rx7zBeRch1Ei zFeJd8x39Aipp1IN8LHh$@i~bI&WUIT--#2aG;kWkTi^Ote>dwk4Z`qVOjPs01$kw| zE*&Kfj|v&AB6hBoJ_}Tlr@*=bioS|Is{Uy_E+@k@-THB)Lva|SY}ugp{RR}@vZKDG z%tH7Q||s&0wz>Kq+f^ z3XpHgFy(96aWtfSu?JF|#UjiB36PUY!s>=B8&m`5AF|uNz8RZ4+d6N?#_D1m6;1Ah z6kxHv-`37GHSVrxUG+r3*Z!q%h;RSaZxc2;b^u2j<^kKG&idBAonX@6 z_t<)Rx2Ic$i^C2 z+LsjB6P%}SDeX(=4rYp7SDN5USq)sX@)|kV7GVX4E_{Btu6+6J#4KQ4V?(+@KvHq~ zfi(dM=VHr&dW#l49}A6+9}e1Ysr*F#)qZHdr?gz8L4Hh`618KaelG$bK#^kbjkG0l zK^OKwAj2DoTzdjhWz3`65Oi1{p1_cNIaKg{XxFLAR(&FN0zv8r0pGO2f0<9^ho8pW zVlGBdey*VxV8X<5D*2N>=|Vhkoa$ZY+y|^5AH|vc1@^+i0o;-&Xq1K@v7Sm^0ustI zQE59RP4eWFbZ>P@UM4>TO(tb(=Uh5*zsc0<7nMkF6;b$PLGSN_fB#oj~ zxUrwqk^D%MegZ&jYSIdHBA$NoLR`7@g5}&@Udjnm`I5D@wK#U{SgfoGhwT5ukDQMm z`nx|EZ-3kG$HAx{YwIfxW=yAaT6KY3t8GIo*VtjEiz1Qd1thc?QIz);7x?E79B8 zjq8^$I^Nv6c17{jIM$b2{sh2iFt2!J5-+~E9fLgu^a@cKGNgKGhqqfgu;P7aQ7+k^PBulee@>l0rc=Pz7VjMI-5;cqW6c$D0sUHI15H{#4K zr!@#1sEwv?+}qubv$vg!cfa!;@m=5gm*UFKRn@p3YwE+67UeHS*~hXA6_X=tCr-%U zoQ^y1x;I|+sh=EY&fXqJWY6>+JT!uSJepVU%Z44VIDRMAQA|SYDf~|Ft93~5eB+rm zGLX(;n3!cpaoV2lW9#@i59-R6&vXy8H5SKad(&W>IA5r_#%8bybm{vA;DaBK>E&Ab z1T6CcnKu_o*7n4aYiP2)q2ojN-Iv-If>SoTFS(OnUcmLJ9a1|jlWX|mzHhlc?&`h{ zew(K*fO5UM&fAAzMi+G+rlq!ga@HT_P=Qr0ueqD=xz!Ud6&j-lp6TKuI30W`x#*=&0BN^sI{9!0% z4$*P5&VXrJ8E@7HR<$3830Ob({J}^Oap!kxHt>#JcIdy>xQW@{! z>qx+k<*$@MX$2pD=%0zJ3v19*A2`4lIQe1A^{u!mP@mGdU;P|ldW}1;0n(;%##COy ztt|eOBK4?`2N;u8T;|C-pnuNTgE9tBsHPlfs%FALA95MaOAjlpg|&VAzj z@x%Y{r(moWrg%`0jBm zw=^bFf4Xz&`FP|5?~9-Ohd&(sD=)?|#c{W+ZbWA?jIQWk5dKA34!rU}n0|sq5q!?9 z>cTP@?8R#LAnv~XmROKJ_Q!kCX7Mn#AX)jlqU8;&gGtZLmpAh9%fv79ZN*YpsK)#B zjpV})kc0I>J+>jm!lIw5(IH3N8unllJPhSpsHSa(^K9d?Le5DatAAA@IBcHIrxR?K zG@6NGqxtB@X73;#J3oqhUVb!=G$en;9&Bkp6d@ChuLpZ90MM95{IIa0rx<`Zc+}}e zPveNag_VfaTjDMM{R9h2YrFKZ_z&I5`~q|dR_oO9k8E7+mfeq+ydfSPZU+TF1HWf3oz;&s+9QWLFPyFV;e@pz*Z~k_4AyD9WS4SQw@WM}{ zyA<0yz4)HL_e1ede&D;}1J7Rb80%}l<{K67HsVWO_xZ8ZUWtpBE_m)#O!Q# zicCs>VBCy0)9=)H#yb@V(5cDbMKeIQWXV;3A($uH3 z0znIztMOrzoRU_I4!>r*_k3l7(eT3vg@df7+x=B?=RR6DtPd5KXAZ@FQRNFw;LW%w z(+%QJLMj&m;q-V*_-xm(m9ELxv-l;!bgI1R13$zz@N>*xQBTS5c@lV(#AFxNO)aBq z^pp3MrXp#_T(H1GhjKqZHJGv0VM?qQ z7Qn|>IqOtl2`jD0U3pok>xpd9plm`&`_Q@ULdeX0#&+bPE|n7<;4v_thhGytl=Djn zAkr~?3fc4Cltx#SQ2929=SuHACoW7f!_qwNjCRC&JIl}CoddUI-ePk2T>-;3t1yG zjRk2BiCcb1wlU|2`qGu!cG9Dx^5c^*iFvhu@5S$;laDVv9I&`t$*u8{+j5D>+B!tGkGCII+8s}OXi=Q-LZG#cGAX@HP6JrF_0Y( zH9#1Q*l88olwb6vFWvWi{*q#`mf9O*7UpGn4!{nb?PkXdn{#nO9-+Hg#?{ud_#%ym z{~WzASEwbS2j>nh{K*3%@O}${)B;s$4nC_?5q9aIrcgRQWy*XL287&~5l}nkD_l82 zK`w-gVPKp%c~-^hvN~cre%-5%tduvPWDo4YSLos!emZqyk$?!C84%HdG}??H&!V<>6EagIFv2%@|j%rTek|R_jb18kKg_$ zp47hM&f9dKNk*Jhkri?p>KsFHVMp2zWD#A)Rb?){ z)Q*%OELS(ezvMOGd-Z)!9^>pf-PJg9>TG<~|Mr*Sm7nx#IVTMOWt3aIB`W#!drYX0 zG{EX=zK{6!hn&0yzkbl*PFVblLuq3Z{OFd6gk&yxM3m*! zL4~eVw;X|Ff|*jk6LH5Y^*HD zv7>9g505+;-|cO?I1>l0b|+r*n%Bhle9zyA*78Q|ZEcJGeLD^&I%xFsvKv;gdIg>b z52-(2i!8s*?aoXWrIWDjN|4yA9}=sGZM5*=phJ0*6`Y7{6&f)S*%j_j!(Ez*^h~=^ zIknFWWO+!eHYohq1~RY{R`0_~beb1=n82mpoan(R=T^jYT=~YFoTw`0Sl_?{#yCs0 zSFX8F`=bM5OFaiRl#xyFc5N-s>bB7U{b>L{lqZt|?hkg9C(kEbd-cDZvdW?d+hM_J z9K~_N576fu=N|Csr+NlM>87?3C9x8g`jMmk#7I5`@uCO8WReR8s=lV}PCz;P8R4)_ zxW~2R;%z_CMYHIdcB^9!3{(&GNqs^u9pnF0^pGtQh!9lq(9OD$F5r(h!Fl+DU*;h+ z_nI9BL-|0v)8#E)fsoV5v|B4mhphA`bVxbzRv#A0uBclw!52{XoWDzy^6(o81d?VZ z-%>!7{-$TS_C#Nc$t3xh_Ok1t%u;7j!2@rbs~%Rr>0i6G9PJLBbvv4EwF}vo->jB0 zvXjPl>4*BTZHN<89c1vz_3TGhnQ~skD@Rn}d-=db)aDUA0J>k?rulI>9gErzLG3?F zx#@8R6sv%$@lT`?r&1@)4lmEWIZvvARqanl+Gv8>zy2W)KpJq_+MbAmn2Tbe59X?t zB3x!r*J}3))LAq#VZuo~dbbW$$ZAfBKbBV17UZs*>i${O#!K8R3ZTD2rn|ela+PU+ zPd@gM_@_VpGx7Vs_3M)BL7Y5()Vjf5=-ZfFLuTN&PdNnU9Y!kBbTX=AxdRCT~y=8iL zX;J#g32^ur%UJkPe!v0%`AGU@fVz;e2R-V4=6N&64FLY(S-hb<@sA$@JRtG|NZKLt z)L=)>k;zxeI@Zu}XRQ8E=YRb4`FQ{CUYt4BjW7C)d*U0u=1b+bFUF$}v0d1exc&Cq zVAB}*%fr~bepOW;#KuxL))nLJDgL5EbCHb$Cq3Ezt1Gei;2BD~MJ3$$}SKj%6!MD^Apgpo6&Kn)=8W`v>vD&LDa! z-(6mjzZEU$9}>-Y(o5e-f2zXMw&+~|qjxYO^Pl*pc_H*@^c0(O5ikBL2cResf&j8^osQf!j4cxi#91ML7+4CziVF z++AMy#MZG(i*4J=mCF}mbz?bxbtse?9|CP ze&S5rb@#pTvQKzL96fR@7FXG!3V9}f$bfirPxPQ)7Rb1NpA1S2Am5-t8?h+9k_Rxn z3CKfz%I=T$Rc0ukm;EL{zfXDMWax2Q5l^-Q$u-*!LYMqyUuhrs648&2b&GQJm0aV5 zc#|meCD2d%Sn8N{qV|1kjlMD4AHHJ0PKYypF#c@w9;}NF zc=dhJ?U-2cs@-GhM@i52&3#ozz4il2AzAesBoC)*9A~;b-V^V|A5OO*RTj7>&2)Mk znY`F%%W?qYkaX;;%e8rOX|5{`lDG3fr_xG#b@ZFJ;&(!LpVJi;5NDyNo| z^*hBmeQtfbs_e($Kjo1;W>lm6Obq)h%K|B1>D|1Oa;Sbm-d9rIoL(CMf)( zaK0LER2d$|kFS}gm-4)+lX>7#@f2^om1_esqdfW59xX%h=a%4h1?XHp-hN2!6QB$~ zw$QD>Vp30<%HIK|TX;PIBs+6??8N;QoCyx{fr9&&LOVXkWUExZsaW||pXu*?$|otk z0z8&zIDLc=m(jD+*inX~$0KF3kakU)C(b#_PM|VvNGa|kp{v-t_+;@j4>27#rof%X zjzn~=Jk-xZ8T)(Dw9qu+SNVC9ooz{tyYwUUJ?hqwH#ee9pDe>X6*^#NGzGkJk`M3=x!>e;d20s8XvJJ)OJ%e1YY-N;-|4ci1np|xc82- z0ARQqZDb~eA~H-ulG0zwW_x8m3AqsSlLvWuQduzr`LScq7w8|0C{aa@{aN6p19@~G zS}LGg05znM0(rqbLM|VbWYgk~_@eF#RDrwDoos{hHKu89DYkaU@zLi-arb@4Vq<L^Be(ATmP(;|FW8Q^vY>dF@usq4(6qx=Kj z!ZJ~v`|kggUg#&C`m=A7jKvUia9(nhW9H3vm3HDf%I{-9yl0-O4*)*1EOJ6GEZ#6J zvT3y8T%w0IljW-3OMEiv2sLdk<5-?=WOU?N)d+1~-1`82M*E zh#I9Y@kO4Lhfq)-&oT_j2mK#3`95^%+;mmF*dg*u{cvx3)OgNw>b@`fP?Y_-Ky+sL zn!nhz_^uB{0md_=BSZbSE-1(R9qEaKoV9$?SAiF%fPA5e{CS-Bcwm;#%0WI#vrHwQ zDz7>>=<%C)qOSyB9hapYWxeJUeM=r`Q>ho@r3$Bs9v(0VKae6&d0&Xh>I*EdV)vXD z`VwX`GN??V$+CVRt@FC0trx~vRwbSzzi?GQQ0FPp5jEO}g9^KoKTF*KRess7YBK#h zFvEugvwn03y_Uh$ZgLI|07vD^K9~Edll#cuWu!0gF@CdtirpYDp+AIC3<*8dPD^&YU@=N}-xl-PC zs8|@jCW-}lK<@bxm0f6dqosKDC1+2?edmtH%WgXw_nkcgJJFWrhKdorIMfIt$6~+st>;#F_J9L1wRw#zMz@CY1q}^m8H~NMq>_+E0RKAE~_2NSDZ?4BJGr?Y|rlw6G10UIKa@+>qmQ$C6Fy+R~V z9it73Ml^L`T%!QqAX5-3A+lQKfJQF`xO0F@-Tr(xo_PH6_>aHyd-0!s?``qg z&-;Stt{#ia+uIiY_HKQ1C;6fNG#=4GJCgxH$G#d1jgmlme})%?#06oy)T}9=J|NZUAM|W`hFU@6!K0JtUYup)o{oUBz z-VwU_SYA63OY67Amww4#j4yn{SH|l4MyzdQfX;w?0o&AoyTu?~M}n>SecntJUY!s6 zvZm1>c6xgng!SXsKwrhT3q%LuFaRQae85)vt}yciim<(+s3c_q?2;oC)&9Sy(rsi4}bg*oDs%84|u# zs{0(gT8M9mEU|J3M@JyQn*fW(!;ck5lw~U`wjJROh{nQxZa1RNgUR+~>A0hI$3s}& zoWw!#{co>G;=!>3R8Ke+$4T>rR%k)*Y~hO#aNIx!b+msn@SlI&D|TCtnOz0S(Hc4o z_4=lA6+Wi?{~X9FElcJ&aQ#H1CkUh)9 zGONtd4;=(s$IyYWh1klNNl#nd&ZP3U(#B2mOGW`V8c|sKFLT9tAmSs$99-^MF=}@maJR`Y-Gx zcal&+0%i{S;XX#AVn-qbJz~SPO!Z;%)VfFe6)lBL0w}v0rC}@V98miqbnFQN`3N4+ zZC)S7?p8m2ihRWrk3Jl~@=O0F-u%nIAOXz9EvHYaOioZCCv0ArXZ;w!!at-%|77D! zy5Xk=X$Ilr(vulC#TM{@-^9*o2eeN)~QYa{qR=z zc%n{M3bWp&T+%y>ZSO9zy1E*z?xOBtodl@Ji#+m4d{#i$eACpl>%J{cdbB#G@=0J| z3k~dOz=LF4l{y2;H5pjQi13Yrz8odn__h|K$6`VCkz@5s^Q+78j%S{Y2VU5WTh^QL zXI^!CeEYY5RYbQRA9=?g#S70q7B7A2-O-VGzxdqwxN_-ojCy@xZ^ZG{6)&*6dSy30 z`p_lY8Q*<_XTL#W-O&d^XUfO&&^5d;89<+v4`i5VkpGw7_+%=d9Uy>b0foWsPXkQ# zod@D`j58M%#BQ!^`L~YWg+n9;u$u)W5MI}rZ`B}-1 z1c$O&zk#kB{vMocXDFRsmp#4Mn~UeS)&9f>-}-GfmhAFaxWk}{9lXE?9^wvSBiaIe z4E@CN(yC*r1r`B7$Cw2nsmpnF-Yf*%QJ)9CMrSFudxJQ3_U>4}<<9t%4?G;ZqOp7A zWPHsV|8nd!I&o=-HyP(*V`D8QioI8mA#HZF6UUAniL+;pJ7%LzchngzOUTzZuS#aS z@xOobH^y@>JYTk;l`m2K6ZN0$h{Bu1#2$-_t8x3Ccg5LrcgF2^-Q`6gjztu^>=8FH z!9ttVxMf@Q@9b&Nwa0h+GcFqpcP$6{aTbu`LtvYQPo$@skq|qLseP(`daU}b{5pUn z4+{a@x9d75w6QJ8U2Tvy12BOJNX^Pue41d+I1=1e2yk`gKv%lO@1uu_;>jtvJT{ho zC+UD)5r!e#m!|Fj%I}X@fReG97c}cWews-%^6@j@%FAyc+vmO~KCqSY;K5EG{3a8g zYmb(Eax`6{-}Dqe>G;|0BKxI!v@ijX!&5~=Piemna4!eEQ#@q?waS`0J%)4p&U$^V zpSMRY6Sw~}sCcHhpkJ5v=}PrDR6lZAUWfDskX|RPz+qD0N7tSd65S=24q1-`sbAuo zUQ>s!`Q^3sJK-Uv%JLI{H^-kD1I(b@XJSxgozM8P|MFJ&)SlRc1i7o1Kef6`;W4DY4>Dc&Myjsrn#SZyvb9aC9?1|K@Wl=roX0$~TLxx&3MUj1Tb6Uf?O}CWfw`7NC0jL?#?fOJ1|X5}*8H*Sz6q&!3s$4Snvh zGtGP6^PYJ7+aC5jGy?51*Q^U(oETd!yj|6a`BpcE(TbmZ(@(`GzUDRY&IjHfyXsFG zii>t-B)6QtHP+TPVtaST`m|2lEswi-8)-P$Rlfv=Q7pCgV{h|H99!c;7c9oiQoKklTU{?Sy)^5nIq+fWo%=ANF^nbP?;&hCPqvTHiO_{kl&o~K`eiI49Wy7QpYQ93&DdvSL#mU{9)XaiD0NF`sHGiSyMuW0 zO2nOaABpw(%{b`E7Nu&5Me%^bCGoV}&AOTY+LcXV%e_t~c4W-kjn%k%uoQ27;3IKq z(uig0p7wU`#L2jQW8FI#HRK0J><&~Wu059`dU)W>*oQ@be!Cq%X4-+`0{|KgK8<@* zpTMG(Y%2h=;?(>qA3H0di+uPmAE3MT$%>Wc^*fl$`O-PXdHXD=UsJrVSWk^A<~0Ub zP`udarhOhgdNkhj<8O*bo_SJrihkjvLO*Dcb{M%3EA|yb{M0}DnfR;U^mXx}Ctiqm z{P8>DpSi|w9O?tWfiGs>G(lycr$N7`E1e?R%7+co*^oOudzQ=jT7mnQD!Z)9tb)JDL^t)P z&<&{QbYTOs(s+S;reEofo7GqNMHuz9_*3SWxE?8QhnGDx;DQ?3EPVo06;2B-9VH6R zNoTx(=1?p>VJ-kiUd2}wagbJz@MNDrA71FkuhajI6@MeA8n4Xy$t*w9Z9yz>8hYiH z%Hgl=T~vhj;&z5l6H4U3mdRjqKd3r*aKx`+CADL&S!{&-&`PqdsS%un?A#_UV~2t1 zebq((?cF7H%{b7y)j8w2jNg?<$HVH$L6AE;iDA7{dlX*}&Hs#yd{%ZVN`5}J6KluW7jO!3g{YQf3Mi#eMU6=S6aim;s z#&ILQ>Q^XZ`H0TE);(ygooIoY56+#nRx*_{{fF+uuiI3DV^)qzvwoB>@gzU!+H~8W zQA)N(yFqWHPvxmQ)af`;2etQPkG%NoYG2g=D!(&ZGDxqPqvW^Tun*b+WlFvh54%nS z_mF|~2Tv>*cCzfrIu+fnQvD?OR8Ep|UXyKy#GDS#iI6XOlwQ#nda9gF3-(aKaujW< z%cdM_aDK0zIk-0$odEIADC>LKSkJ^dplplfy{JZB>coCJ@M(wG$ zH;O%WpBjv!H_jacuq7VWw-xWRlkVbDCl=eCSnV$PMNNOkjPZctF=(S*a;%G>+{ZwS z@K8T|b1tjUy~cxc`xpN30nvTGxll4-f(3NcwqFcJ0>QX3toiJXooBX%A!&jV;1FvA z-m-(9l>9h70vI_J6NI@4w+)5+rSX!_4Z-PUfK5Xquks{)2$R2}t0;^IkbnaT-4pe? zFXI&r6Ajd!c=U;Q@LlhTH~rI}jaS|OS@F_O_|$mu+BMZcV6Qw1Mw!6ViLj(0rGt^+ ziX+5<>5lHn2*mTg-<(63QnXTXLPY1K=au+U#?wzf9cOPpWnbeCALpg|Fd#als;>$u&jQxlp=;G?Jrtl(*>M!AYD0y$bgW9# zwRNvc-XL_E0uT(j*V~T%ZeJ;lXfGX!*6Qi_tk3@9_<}F~@>p9xp#X7FGF4;OsF6uz zPk8&ma=sbL7UVo&_QN8fUTEc(!mUoy)8>13s)sTJB}_Pw4=Q+Q$)W+5zF4A9uxo`Di? znDNPnrPX5&>yEA;jgz%Q4bj1hA?n4!+CeCg@33&h zP9g_z1PiSA=RS0x`&OfCJ7v(=)wV4aSLZ&f#C_ZoPcL$wDx8POnomVtf* z5WEdeGvH0^_JRhJ-lA|NI=c4Y0>{>r7ZB|@I6GHFila^O2@kXurz@CmFqyA9=unlh z!4|@*-@c*Prng46uRtd_#%!-j5`xq{bfX);g#eGxZJ3=~eq|DfAU*S?O;9D^6^}5P z~e>$oA%0tC-}x1sPZsFeT{FjwQ)YIpl&M9cHx1Rf&RS1T<3`{{UtbW*hh*Nor4P-}FrFYXiQU=ve6}%C(&vxhi zJ>8RkZgs?UBQ9QeHh$yZ{d)Y?zxze;z7VHQsh^Oo@IaA%1$#R@7{P{FA#W)0#dM%n z1{M{n4)E0+t*?@feCmpOwpAJp`|3k;VZzP&`xu~+<%$31#FL=LETta&3LuI42$-JI zE_mb9eFgluO#106JB~cq^a23skZT_EHhI{g;CS=;PQ2pe@%Tqy`Q>rfk<}ROU618X zGrDUU^mf~^ufF)gQ%^_#@+FbRoBH`FBY5|l%|qD{_9YtpZPv-2$ui&4FAw+^)REQ& z09^%|OcIfw1pw@vf{(j-0f4vGG(TxORERvYE|sz0(YbihP36(A#90f?9EA6I!&-LU zl${{&p2||6sEqwiJ03j$biDh*cAP%ajC=1q9^dx^-zsr$$7An#XFUJxV{zy0w<-ph zkIOG!j0?}6S6sk0VGD75V=a!YuS^#J@W7eLd=^>p*175ACqD5h)wdaF8|WN*l}0}9 z>%aO2^=)$H zUNvtC)3k^V@*`v2O?qV+Y`LZy+}>&4Vft5pibDsHOE>dsH+~QfpQ=RsdQ74FJ(_-VF>bl% zzPPftALp;{#&g%U<7J^^ z`OD+T@e}GF8s7N5Z4(g~BWef%f)=M)c`ex0LNz;+_M8EW7gXggWnV4Fz% zwLjoTKFfjjC-D7RM%Yc$f%K~+=Xd+d_zizMRvby&_I1GrU=xlFz)Kt>1WX#F-xU4M zmF=8Ci-ziry=&#~q7r}YgW*AIE69|aTO1=fY9%p?3TRAgs= zTz$UMYfOl-x?187%YypgQ}xqk1@41&G+w7bPd!VV(_`^#Q~xjEdYT`argfudwM#>h zmTq|rqIf#XIaxPE6URb3_qg^%7(>w@(V$YyX;FOhezCpiMQ}rn$C>@=HWe-HY({rsKc0Bx;rJIn@niAmhyGM|B96%x zjxVo;I>czH-XooEEP4W%anM0@I}3h$oj7sQXDk0ntcagac?o$#ye{_lWmofF z1b|KKtG`4pt!@rNE zKOz2P-i&ra9rUjW*rJFakABmIUeX`i+9P*$Ys%J*E8D$z?s~+XFF6wHb6YV{AR~9< z7g7|yxo2YW1~EAgYPqbG zZbe&uZy^2A-n`(BISd|Pc?TECE19&+%wHMrOyAxrvbovU`@x1G?H3>?IC!8T%~%&6 zH%mY1yOhR@FATJ0L;E_P=W~-sH)27tmFI<)I*JQB(Oh0t`)Ngcu^T_~qdyu~w>SN; zjkcNdM)`n^Pbe)f@wtj|{M^s~LVV-be|0?k_;c~Q|LOPQ=l|ue#JRifj&o;kSB#(W zj$>o#WV+=uZCqXy`k);iQ(x(`;4d>)xm3T6j#C~psoV^dEq58Ga6pcr5Wd6s!E1j` z-370&%bU9HM<^?N8Sl$L$mS}fH|QpvMCF^8{X3iRrfv%#r4G+OQooc3S{^8ZC-0TE z1C;)EO22f=z4`#rc_9nBN*yk%O0!SRcEe6f&aaxtkFPTE$XT(Y?Mpx$n?A4U-Ld!- z7K`R-e~yENl6gSNx&48==!_j6XGY;fan)?T7OV!6RjzVUh3)S@u zuV`CnOy?iqll#Q~ya4FA10J|2CcqzMY^&QUkF9ww!{r3RNBeVss`$I6NB53j#)18I^Etxd8{mGlMl5vQid}>rFxYLZbQ*G@p|IP z{=B|HZ@Z=Lf@fyWr5p+{)Hp8FtDgbDOa2-sWLmvn>^6wV+Y4?28}t_xm+ z=RuJX@*{T5czza-Rs7OFVvxNhsJ#08xyUzL)5Ni*f#;HCsa}|ZZ{wOgsi#6miCcK+ zM@&H+jjhb_$)h@V>Z&teT@X8kO25EDQ4_@xj~#*q4*EW z!2DtxN-ue8Je@M)M#>Y&_K|T@NfKn6sd*}-eF$H%OZdz@i65|BUB|3Gve$gRC$Su6 zv-)H3xqQLTe2@ENLB_AxE%`=&cuG!0B|H9Ae#y!?q_UGCMlzA_XGbO#uuhN}&yszgbL>@mulV4EJ}<9TkNS)0 zi>p(kknGhDQ6KhF7XVNjKs#iS68;`O27O6Ed3dRmmwrj>OBTEwicUu(7G^9c=5NO8 z%1W#)E_+9SPD`=7Vl+O%<@j4a;xT-J$9yHtnt-2aOp!-oK~ebN-1vq6@qpx-3jl0j z)nE|9?A#4fLg1UbY1KIBa$i~psRg5S8sIbtQC22_Q)pbM!?bf#SqD?412&{$IM#+^ zyfkiijQY_qxrf5Vm{(*{M6|Hw9UcpQamLh%BHXwXLeK$&WV5ro9lhz$#<5fDo{S9z%*B-a#*P;QSndUI z1V#3TA%oHa&kesC zQe)!5IeKRRf=*`+tBT2bGbFzTr6GeGA@z-X(q_S=6ZGKwN)gR|E0?+?R|4qE*RIG> zTvc%XB@gD#qij;uhXeW-;xi06{6;pzizwqk7Z zTDq7-&V2KVz`HtToLxSom&mO`;qrC13pDEm0{nE2FPPrrhmA=Vq|w;XV^03?llAH6 zYX$ z`HDvZR?_Jl{9yP{IkMz2ZPEh8mMKriq}E9KNV?nJ-i;j|j_BHN=g4NkMY~`CmFKb% z?1Yj!ue7QLrK=9vKiBR6;ma#+$v2}3dH78)h(`w^$Mm`L6#Yd84q~`A{>+>8rB6pc z_&Xj}(SefY2Np6??4jbQv6k&qnNswbKLcSCK;hD63O>~ij&z8Heu)LB$@F=W4otb7 zc`JOBJ%iY^||)w1cQ(350=ZgkvN z(8M;>W8NUacIh8^AXWSkIGtX=G?*{yPrMcbftk)TFEpg@FfG2;4>?JQ4)6#Pb)=4L z1Ii1%YT)PsnehEISS3GpR3E4vTR#@di!1WkA_Xfd{IRp$CkjL&|lK?gu*1i%OJUXqzqoFdsmL1_l7ZIml^2?|2)w3Sbetb zK)@Rx=!W~$>!W1P&Nxe}i?Ok$n4{B88&KMyue_p>e?DiGDvQ)4C%$zH&2Ga&kLaZ> zyC2$%Cp_#gkOTFp9{t!VdZE#lToeGaEw1FW&~8Z{jp)ms{ieEN#F6aop%-6__dItc zP8@B=t*2Mwhkx+fBGx9k0N|-d;*Q&IiH_>tx^^`#KL3Jvr+=A`74`Q=R+nRYs~=B4 za=|hu7U7dG*qUvhKFB^?_3=boQ0=hVf$d58ICAPY-{k)-Q}WUtl+|`tR5yCx8cA%0nAxF-W(&>^8@?pYUscB47szMQqZ~ zeDq}lM^4@vYq#AVk3DxSp1Y>Lzuk>5|JrYe6SvQjdO^xC9 z;@FX;IDP6wjChdI;!%clr#7*;)Q*eKJ{AA`Cx0@2>%aeJrO(F(3r19*`|a`ol|kh~ zlQ%DyoJYmJ>YgDUddkHT=2pf z#|g#mgu(XYJ}`ZmS&%QS_DS{hdEl%5kC+S&Xm8o>OMjuY&vR6D{a&hZ{Y>7K#L7n>= z`^`AnwHc-c<)1N4zJaLlcXnBhFNlw;+ zF!2{z6hAu!`cvBr6Ye@xVHf+kS@s_fjIZbuFkMBjx-h@gnDpETH6>eD9W)iokr&-^KY_N753su6yHMc1M0kmP*x84=$Fy7Nj&#oJ zHjwF`h^rXGD6RM#dpp?*%Eo^tRJoux$-xV2xRx(;u`D}lhqc|7_E+y8TA+gtF*(bF zb_XBCk8T5^i6RwWY0rS`FZk^@g#^FmYw6$lRo4Dk_0q@tlMj}CiMvIII1r2Y8;DT< zE7>s~V_pGZOv_w@$G8(ra$ z+}7lm=LY+6tl5dRmUx?BGt9HhMMuJ63w-Q}kNdQh{}b*z6>BOx+_`QW;?oIj#%?_P zr60hLNZ01bN=41mnU44~((%l70YH7|gfBq;wi8{ag%_}pvBPpX)q%?ESAhipBEfzT zU`z^7Px?K4G^o_Rq+h5SDLv=g#LmHTT;AM`7kUSZACJV^+-^jFAe)HL@IbLa&NX#a zC9yaQ0D6su7)oC|vZ=|U;^NMFJbGmqf8+%KiZ^8g^h+;2dp1sMY|~fVj^AdHArI`B zXlExgT-SVA0N}V!ck|&g_El*w0-#S5I9b?g|CRZYzY3&S7qZ>_!~^ptECiCCd`I=m zmiGMsw-Ngbt(eCajva}G#YIsekll5bWY5bi3TTP;gZTa*`2oj=j9dKKKGlg{JQpOn zudN=5p~eOO=pX-h{I&1=tMTEFJ{@oVwcm(;|LeaKx7~3^+?g4OX5|Flsz! zfKCpzUR~cL@gmh@>{ z04~wx^1?&^k2Sb2P<`CTG&8?Euo@OSrE=Zd$(=f=tK06_90zeagd*9QK3Mt90Whg}5G@&^13k!)E@3MA$7tkBeo4jPCic>jIjvaaxCzUCd;D z=J2)eo9COZYy2yG^}er#Ag@y|0M|4BnQ>ZOa9zQ%l7M^pK%DC&-uYNSgszKS)p)4h zCvNgp!BZ}k2OlB)`?5c2!g5p|7I35-E4|sSd?d}H(zQ;EYdWq|S!fu`|La)$O(H5l zmZ!c?<>hA#;swaJ{&iG4@EE(sE6K!ER^WG=p`7X_7c#{`I(An!+d14!%K$~c6-s#k z{^|lqfcS*gU?}y=p(Q@c9=}-TwJT2dH^QU8IUfr@s#Cm@FWYpbO-IU_KIK&lb6Tm2 zJSlVdOP`es0F+<9vD&a7atkNVPjwP=EnW(0$4lKKpym7Ua=LW@o57bH}&%D#0%0CCpsVgi3O|&Ij^36 zSU3(H@eUvcm!JKpWYtNPGxJ5H`EzTiP4)RX3+KY?=MOHEws?U3iVo&kygRhiIhNlV zsSI-A!9QtxEb8Tx=?YTW;gZG0>#M7=r1*cj0KhNw@b*KK3Puh(V zQXiAK2j}+B|G@*2)ctkfUXA19frEEDSD~scyL4(R8X$tA z?7@7l;K1Nrb|M0FtVCx+(~p6(q1-bv0R($ciLOgyp-$0BHuyqeY&-GPv(LnZr!K`e zef#&u%8{e-hOhkk7|ge0XKxsLy?hg}K2WQM01iaM8~ScA;v*eXHYPeO9Et5gL=bRb zTnsLO>B=+V<0lo_q0!Hnk9k&g!;3nlArr6}tn((%?(Vi5)bIZ1-;LwP*W&n*vSzpK=2aaqcQ{I3Yjd%S}V$_>N zcWpg7tGCAIe*PQc^S|UPqSM`o)wQG1A1X-nYHxxU4IbO9`Wz7hYB0X2G#-YO&RPRU z%ToCyY3IQPD2vhNx4JN3w(^7W9nlby-OyXFbrn>wV?sxA|MOq@W%0~okHuYg-5sxd z`77d)M?UPv<}J|0cQ?2&`HQ#eFe*}`{0i-AVl+3qo z`8$iX~Ai;_6!;YtSGvUdg$a)i0neYS7(L*;xs-&^)6AND{Q z_?n;N0Yx)Wxg&(vbeM*s&uIXme3p}B zfkAQ)8u*3`?aR9klzjfH&IKCkq5*e8dCSh30326GCj^<~11JMf=CA`#ezT6Ly^>S{ zBJ$*et91MoQr60-_MA9`OBBEt9nF65b`bQGz(ZL$rZ}>0&Y{DAtaP2CCKJCbSZzt|FWl`I#`Q0Is4pqop1G>ty%L};X0Z(3MTjHE_+)`d=p1e=_w3k|*KpAp9 zu%MjN<+}@A^k>coD6QhJfaqwwc9TUeq)|oEmF>;~G?2zUqtg{-%1?aizhOSqu1tdT zt-LwGiCh@qAEr$=kr|YIRFp(@zo6&@3=v2kcD)XkJ9RqtzDsMtYDXZWM0Bx zYkL+0?)C<8XS)?|{ESbHuln52isWQmY;{8uw zj-?~bSX`0~{n+1#g(C;?*!$lV&p!I0xa0O)W2rgk1pwREH)HeCw)tCIUWub?D={8S z;@KxIhPtxgb12jaKi0oU@vb=w`9L2Y{V*K$&YDa@QMun7Q2Usuzq4OKpEh{xNPJ4= z=9z4=Yr04t0Br9~LZO}84-eXz$af5>vRPg~0rdHZxgkV+sclpRMFU4-Qb|o3WcLL1 zDY`{kUxl`$hzq@Oys)eK#LD9GqU^q7JIJ=9%&LD{hk$x4c?iM6 zCFI2747M~oXmvDj*^fPSX!m@=Cr59-74LrNv6yt0Mt*T=SE!0Q^&EC>&H z**bqoG_R=s1^K{x?!46lo566;@gZ**ZeG6Svs@m0c>+oP z!w-VQ0Pzdore&}c8nE(a|3tR7v$N;;9!~QO8w<3wzn1hw+pO&qLoTNjsJf?JTOar+ z=|FiHP}es`T@LD`Klq~`Oc^Vc>xxKtJZZ^k(Ju>0C_kmE_?)TlG0gmrcYM*&K2rQ7 zewCyV`hKZP01f!g9M?c!Nf$2TGug_cxE?FwjS|Y&`=Y9}hp`uCP=(W_48|$CQrn^3 zgGiwA*px6BuamH8X&D8gZTRx!3JNU~=xniA@%mbE8ekfIOg@Pn7|i?4Wb&5n|L%nWTbNX?_*gakq$U=tvaLLfFRfMXnrNoa}T4*|!R z5^%sKHeef&z*V+nt6BB+a@)D5-FyGv&v(t7bMBK!Snr*?_ntkoX0=(fX7=paiNkfH z5BXKgG>vSGx3*{cZhU%i<+?Aq$Tt#CXFJNtq1r@|j6wC)2lm{{I*F&CpCcS+-mxxf zS)@uur9Jt+RlaWzgY{T?fo}5%aw0i7V-16KP#`pig8)8do zci;Q4s^N1;iidQc1$^y=j{4#9mbS{W!;yDklLGhH%}TLS3!k*xjsC{DcI8Q9oLuXGORVH56}3W-I`J&Sv4$eT;C-Y|q5ApMD|^FL$E5dERg5 z&d+PqDCfe012|#cV$S+U&Z$2{l!k!J@*?LBQ-;W!ybaZ#WspJM8`~-RDvKB`Ht+@5N>*Dy* zV)Pa4%@n&}0f7Awep)o_@8!htwnd{~v0(j7r z1)t3EsGZv^z-2a*SK3$QsoG2GvG@z03uuS*x5oKcu*eFeA1Lw@@=iY<>90Mz?q`X}`m3SQ!*d|3B=MwI{n|MW>jK~z=kwDx~_zb@n;&-6TG-P)T^ zssMEX0QO%gJupX!Udy%Vmx3rs|5y3;Pjq_B%OW6rodh;2V9Ko!^ki}F;!{Hw?qCkwcp&v5}M$REDc&U6F$ z`CLAWxP#j)GyuO}oWPW*OXL3{i7|ZudZ#>{zSF=Zx_Kdvnrc3Pe6A0IL^F7)nJlhD zznX7Wc9>~kQFiRsjV}8|r@3IUKb_;F(Vu0f{LZB^O35^(RNMEpe$Zn)egmI6Bsb+@ zz6*JHeh^(*KFX%+T0b&|rktbBv|V|BzZkZsz7&3R-}t4xDt}+Yn@b=su2WxJ8x+@0 zgLUQdc@ETTz+)v&TqnGiOoHkB(B!s2`=tDoJSb}2f`YkJ-+`CH0}@2UK2LtZQS&Ec z3|hwz0LR3i7=cNjlk(uXmv{G&4s0LXaq+xQT3rl6g=(RNg2X|62I5Q6Lcd6A#+zAJ zj+>!He{~ao;d575=8H`OCzJ6UATJ{NxTdWoKZ!r{umb_YsIdt8PCS{{RGJQWhaZ)f ztg%w8i#gfszbYhm=J!B4I40MA0fsuJ`h~n(?I*8Hm-)$jDH5(zzVgnb`Zm|-!_Vfx zi?DRG_^=Q1i!RGb1-Pal`r(+@K>9`d7q=}c#Xf}6a}!*p9X6V|<^RfRe&%_0rXNd+ zD~UrX-&B74CEB=tbXzn#zW}Aai%%9ZkH~IFyu8MvqOJ9F^35Bu*xr`z&%_I!dQ*JU z*S|XUHXn%I=6!MMk&nfG|E%I`(KJxh)+C0XkFDNL>y5Lop@G+;cWy+w) z!DA3)C!LNIw-Bc@=O}sDS6D8IqrL=WIf*AlPU0``0X&c7e2*Pa28F zmZDi=IQKm|)_ytbL?EI;WF_6HOW%Xky}eujpbX^`0M2iq*qH4go^! zOe@YwXXZEjOlS%dlJX3`YbQe%9bGn!%e)p`4)T;YdCM)(2u!W;J`c*_X)e0mtytzCrf#m#7Iz`wS3 zIIcKxWjyP-FNin){7-t9gG)p4ufRusLVPn&BCI2b)F1d!eU@#hAG<`o)Q?R-C-%h_ z$o>X2tKfj_S$jpUrpg= zwyc95>W7}aA#DuXqn(HcIe5Mk?S%mZaSZNtADM9<=Lt~OZAr9%f4UBEjeVB%L1!@b zWroy|-@4^#kN|oT2x?PCKz6X4WGA}s05N6E0vHzRI3F?=4fMs7&EQI6qKz8TON`|r zx(NB1C~^fklj%GF^qOwY#WQcTO!JB!!DBKZn0(wDDNR*cJOYlS1LU^=OQ15xTO}HZ zFc~rq%*KG;GGCRU<%$f94;gT;1keSh9|TONCnV1FPT7ZCjztih#PWCY}$6V z0q8)LOm=pf9l4vG_?b7pDc<+)cPj57j;^hghl?m0e?YrT+AGgM9@8I9csnT93cg?M z2f!cbe#O^nb?o2zI|?YrZ4{_w3NG1=@k(wfzf2y(O!|*89pm_>$QwaIi`x$X{oKg* z%mWZ?URIN~qr85ADWGp7&#e3|i`{zNo#<|L+>m&RH{d~95>&pmSZ zka^iWb2_$8or%8sW&2az8z{a3cvuaO=y=4I`y$`LsQf&EYh8qcE)T7)#?s0PiWB?z z^_(=J-BKTKim{uE+fQkv_Sx7bcq9|($9B9;J`F(t`>L(z0=*MVEAUmnV@&c(46=Br zOdvi|e=t;k^XT?w-0{#Ujro*MV*c5m`rcT+dMWPx$ou2ryFVHyjva}mCOZ{$|?Z)W`*6mL*WBeZ5!7>p*JIFWBGayG6ep63*bjvCPYzjKB zytyBRZt+IjpbcBS;sk#oRIZDTA#HLlwzl?SV?!QS?L<96G-=QRCa!3|3|x^^^ubqQ_ctWJM|-0X)L01F4z4+x{`LFTc-ula?m-uH%^)WE=%JK#a2(cp#_gV}{6_=_GK54U|_5!5` zA^Dy_oq#P0_5l0wp!%8WE9!Oee`B~DfA%d^muWN7ue0$-{RgK4Nk;edgB1LQou={^ z(z3mj{zm7vvvPA{A4y-$;9*mKM!d%z~#zV)BC zjR9nrJ-ST`(4+H}>Y!y@mNEs0EIu=g z@hFd2oy1iIcj=#@v%-W10AG;wP!_#_yT-SfxAL0w*soV30@S9!=Lv6(|L}dtl4IQg z2F9wav+ksaNt8@i8r7)(o75@Vd>@`up4df|S>mB@=b|jUirkt#^MNPpbbl?NjP0gO z=Fq=jchH{phAmIYPXH;NfWnR_BmdxsV)lFKH(ZB7=lYwpX5Eu}DNlaT3eX;0UR70o zz{&V@gtI43a2aVvblOHO%ZWU|1s=orK^OPnLqA`ujOYc-OVz1(MQZps`#NTj3eb;& z8@ZSs_5Jks&;hPWM~%@f|FQrjWk=nL;bd1VR7*H0k1>c2r|+?QPvUcV<~;=P38ah! zvQJlmeN#>sRbN>y>!{_NR)Kru7vKD?z_p~Hzd+`9oBMk~mP?G-K;liw0*{Z!e#2)~ zK9z?`9s1zjq&%@Ua+t$5lt&)bc+ej^7t~r;4o(~6(^>7R{TpYwva?Gty$cCR1v z%>{|C8LiGjwAw7{V68H{}A=hsXZIur?8IyuIl$uKm0^pyoCnx0iJH)%Ou#ePwu;v&#ivp z1V`-?s6tY;58C$}LHY*zG>K$lzr`=|#y!}I$xnX>?ZHqT2Vih0zYSYX-J7-G~`|JPcI+wVOSciev|B(7LcU%H^S z__P~th(n6yw-kr7Yl0`rT9qr)EkKT$&+`LHt6R`! zyQw&ICi?w`A?M;ZeS&<0DyDwpo~JKwL}Q_?*li^iRt|;2WdhTf?=+*mvK)txUnz^& zjlcN7U&e2~{cVX~vH&0SJBHAG^xy>m8dJRC4L=;;^}275zy9zi;*CH4rnvXhkH%Hk zT@%a8tI_QZMYG18qJ_AEPXNG2ZFoVN(##XKsQ#$xigNTxl%*{u-MIju z%Efa-k~j9mi&~^2KYtQVc9Y{4$w&SMe}x@-j<|u1=Nu#TGUtqK;uB~K#5nZVq+!Qi zfGE7s#7ktr&JN$0&RtLxJUA^s_oaF~uUC~w-<}Ht+_&Q2WoN|AnWl2|^J;JGkmv;r zqM`P~Y6!Biw$G6SG|&@3H|^OAoK*%qq**`ipBXo@i(w}gmX~w}Gopntk@%}^lcRAL zp8-uaK;=c^?4}C+;Pxkw#V-rvUB)^=OlikS6KQ*6-FmC*LK{3Kv4W(n_L5R{6Lzq$qD4xf&6KEfNjuel9f*Q(Qj4o93$nh0nY(sIRM-f8lY*a9A-eB zYH!m%cUJv}v(fLX?%V+=d7AE%DqsV(?@IZxgP;%~3nMCg6uYxs3yoAx$V4uh3eb>{`MlAiN}N++a43VnAz7n1!_p&;=<@3f7O zR!#wB&aMEuvwSY{;z%Vme!qb0*B zJ&jpjGCHd3ZRxlhhZfk)>2$p0IXA^uy!xfFyZNyE|ATS*;ZMYF?;+F10x5Q(WC!of zt*&>W!SpR#=q~L*6-gp!AHS1yu*k%9D-X|@{0UTvAK$@W06g$bK7P|Yq-|$Dbbo(3Gk?E$+2AQk|)V-m1M z1mEHPo&WjUHh?GJ^hD_;8y*t~*?|>$1a^p}Ga5tDrz*+o0#5Tuc7W-u6VlO6a7eTI zhwd`f*XoF&x359s*49?=Hv8i8np*X0eEt`FS$x4i{MtBl_=;#QEXIyHV_;r9@~wXY znRzu}>YSTh%;P*X2)i0)8naxEbgIxKkEA*9$nRT_#Sr-z2rSOIV?af{{mX{92+TlZS zOoOoJzvwgLwcq`HlH080xovr{z5ZyB#PdC0?8p!8i=DyW*x`yE+nFiHzVmEX_Ge(C zSSkCvqE&KsuqIxqN50(WZu7Kr-FF>gkT(~QkK2e+X+Jsnmi3I=jY0Jyl+rojB=?79 zx9pnXvP9#rv zAn3F3T30;-WH)~7myXYM0F!_fp3rTTpL!)-3m@saPk9d}1k~;{@=C^&V6zk!pWH|P z6+96`U`Aca?m$EMy<)VMQw@19>vLY!Z&~`6wWr0yaW%>E78E~Z@MTLS$l~XqES1@gq7jmivd`;b^H&@&_UH4f%n*1am83;hT z^gxBe4|_FmBmhRHRR+4_a@b?tzL9&iKKzyIlYh*IJ0$?74vy*) zZ9V0akTS|Ncqn$v#9V0;ls7JL)Bvjh-5EtG!{C!fYFq>u!ac&rLceIj@9rw#*}8Zt ze)gyTbKL%C?~eJ_C=T;xlL8tL;oreE8j2qo3g?wJfHxs+=g{etl#Sc`lwN>(G+;-* zm-=8UbbKWX*XlsIWb|KD1wQ2CJJqiXrs?ql0QVO~F2n^cSMne)(;=Fjj{g)5YA4V~ zKZ=eJHH)3d!voNiK|Zu+0YFQ#-P+g`z5TdK{pYifFUAjj+dqyg7gnQp?ucp|b9oy6ccb)fo)P3=X1>wJu4-e!0;90Vz?!izeT-e->#g*0QODLm3PXm{kSZwaavS_{js-ud57GraBGj;}wdlox!bZt3K zoqA9M%$@j_Z~odibK!g}N)b89qj*Q{)^T9jL5xhu=rSEET>XO#V?D< zWG0{lc$$Qw13@0C3%zGr(nIbnM=a&~q0Mrnp5!g-p5Usqm=4jKcna^fL7l6=tMwbN z{9VQ6^5tIYkMxOMIe)Q@=?SK!E%`z}nLGIaTT+!Ys^D1u_D!YVbY9tgzJZ62VllQI zlxed6b-Bb}VN74Kr?FgE0Kj2vXGy zm>d?aF)x;1r65*4W!$)V!6KB z=F30-i{l4>_$T6yhfl_4x2L{Xxzz8p7nY;lS++ixmsiX+w!lt(LZNoeo2%Q=5!xlG zWoP@6KZ*3G|N9T)%|G#D3cC6+t31oH#kN>kkX)9<$CBFr0?*0BbF(2EkXgyDn55KSX_!hs}cRVcD(z; z_r-%3dB2|p0P~V*BW}IniLs(_&GyD6M&Xi)_{Bcd9(ds;`GS)I(5F0>qkhv+FS9>T zR<~v5$;6kE#{HTOC1Brulp#)C|HLGT6C|i9`!@PANi3Vk8k_u|1{qFmmi8B z`hout!%-tnTysKwPAj@BxK>$W+*|}DP1v7QeJee$#`-gULE*fpQnu&hOvOHGe&Dg! zHUH$E$MN!C;CF0-tgs)Gb1*;G)HSPinDiygDt?)!m)#N!#h-Y7#b@N8&OEREYmGl$ zzx7gb+t=uzG=A7PKOR6P6hz~AwGb<)=hJ1;6X#>RwineUA7neyCHINl`u!cncEt9h z7!14d#3w!>t~_>?Uq~CNPg(AC{4k=&gA0wdc(~JSseh7Bpf36<$|5)NQ#UUPn10p4 zHFer{)NqlR`$PZIcMSak(60M09u$L&Yhp+9q&$oP*h!2u>cj8QU(!!_(W>m!WvHLA zBz%j1#=X?z1+EeSv8}>2mnEjQ_N<^P&>5p5-eUns1%M9}5pf zjdAJHZP53G2Rrb~6Z=h-r%lp_Q(x*Iz9q;p9(aAuG47Nqms} zO%?!{u0lVy$o-mk3{%fr43*!%H1pu^@!d)S)$X&Pd zy5Lb6#sQN!>T*C%sVC%8bw>JF$Aap=7?+p^)0scoRxLXzBU=}ZH8%4Lz`7?(8Q%ve z2hKgM6L zC%7I1&`LghtPPobcmnFn{9}DN&Yi+r7AWY3d9eN24I36q|KK&viQjbTd|olmWKoxVDr0={$6QiV{)@aU6jM~7JUNGpFI8@; zE6?1}t?EJVy5DXts4o1|w%Y9|4lVOi(W!XmEyv=OFMnF>T)amP_0d>A`)S3p_eT>M zC{9-6b}ZBF^`eh2*Eww)e_|QySC95Tk_|_8Yp%W#I{W~h1ppYTc(kkwFc+IIwvxa4 zL|toq=5y(Z{PmNG*gZ8gw=~ zuw{7@;NZnNlYL9Lma3FngvqKod3|rAKwyl3=9{f}I7i7= zfI4hXLh{WqXcWCDe|iDHVAzY*rPX-Pd)^Zl&Yg{?KIO@3Nb`Q<9^=3eJmFbnKhaHI zS2^W%Nh|Uk8~k2AfFG9;n2(xIK(q4Ql7}4tkV(HkjQ-9>bo&~l%8xHDEyhfHEnfce zSH-Ko_-o?mRo6vFgNp5~9S8k1T2xh6tYFxU#){K$knGLNmV_eCvQdKSq5+apCf1}E z77I|mrB1%xX<8OD>f~428dxZRdf@&CiLT-mulVfPymU5hdGgcZNl$umeEj2g$VPTm zMxBQAjechM1cT)S5sw49hsjnw)@GFtl?B{K9?E3gb81aosgfj#q#6tK-i@}Xe zaUnc=YIks8CnzMy4_kA+N=qk`+xI6w`JjoxJhqU5mF!gmLgZW>8n_6-yfqc)x|a_* zGw7R*MM^ma<3WL{(G8tjoRWd$WL+bQVw>)ebk7gqM|9vgP|1l&Q(*(R_5?k0mW=(T zy7-_`x_Gf|;(mZ~m0tf;4wZ3kzI0*Q0MS8SuGJzyniY$ls=9fFxY-06kuOA$ zYKQil;FJCNl#(Ugx~HtlKm)#vZy=dY7Jdjv(vI)TiK)~Tkak4?X8LswpnvE9%!_Cd zcIC_amrK;le3DdYS)TKXcMYts2qCHCMBU3@Z;F{TMQxzMC8Ka>z< z0N1?btiMT{fPAh`fN!hy(|3^HLpjPR3wD}DsPoySDgO0G@?r9ohyBQ}4p>bSnp7S- zXCY8Jy69zj^rdv?)m{EF=#kg-{p?#3Os8~}eCEUA#HUi~r%fM6wUtU+F)v+b5T-Np z=#}=^yvmxTVoT)7yp`43zN(y34|~H5=$ViayQVpT2Yu*gXn#N7%D{;D?ZW8?(568xt z^Rc~tF&_ldubwdJK)R-d*n`OvG}bJqio zOAfEKc&P;4??neDy8n$oSRUZp}nlA;Akq0-( zOFy!^TH&W1T210~Br6%B7x^ma*cF|;Gjp-G8|Os->Fr^x>)38LHRzfb4#kab<3d>9 zK!QF`$mZuAyxWgsFv&@_nGX!bUu!|O(DEXH{z!DN8$+WRqnU-c_Q|)#;_<8E&)@$y zu{GF>SANl##OHkeKZpk&J}aga--=1U#UdI9y{%YOv%BfoRnm=Oh*8h|A^n>?MXang zK$JIv>LeWk6Ya>}cC3DEDBftH*fflB&kr4V+o!Zc z%DVp;5LfCR3J@?sZ@+ALlTIJVJ!n(e+E1&kWIxKiEX!CU{YPF?z5)vy$G#JqX)8W0 z{u?|czbExeePi0(rt*IFtB#v-*smssokFe zQQcQxcWpf98J`hLs~U{9mzCa$rOsjTvFEMm91kLAq2Zx2Zp`&d2gqO2Ds@qB9%tuO zPtlPFuMQsJIrUQWPxHYwK)ac%Z&0ZnX1=Vm)?Ldvz9oHJZ{kF5HJ>B8a_$xT#K7VO z+h<~y`{>5kB7p@lOpLHtD(TFHJd{U2l?R`rAIY%l4q0>0{ibNDvWHUCOkHCgmb{eX zenR%gK2tI}<(hM7a9hyNcuMSoj8aZH&gDV#?g;zCd1qa2TPl}vf&5OL2q6~d8obI& zI@i0=bzAhd=4wy8jYU5}-(Wu?TiTH?C2hbr50!^E?Wm1$P;VP-YKQVYV>;0#`u25-y-^-rkRN}`HRR*p0It2z(d|{V;+rIA{4k$Zq0Pew z>D0lw?MwM*8fj~6-Oi%GzGw$HwE0G}bj{Juq^5VbhV=d z$_P)Hhf2IYohm!cTb|P=@lPqfek@cuP0CIHI z?U8T!x0a57&5y;F^au7As9A}=uRoZX7fa6RoXHX45jwY{-~# zd;sUz6_LgmXqV8b!n^+M#kSIh^GosYY5B0mUR--ZvF?oGsLif)BRq6>sImv@v zm2v{PuYj(vIIiDZj96{OrgZwwzkVQ2cBMX5wJ0?%s|`Nm#+##~yo2rSm=i%GqB7~K zgv0WA!UvV22On(ti7t*lp9W7Xla}^0?lYro%Pv1KEpzvG0{C#hgglj6<2d)z$}`-R z{@QK#?R@8VM>6Cy0P`Id0IbH56IaK^*2Q?^8{ZUdOH^u*Y*I&f9f7|Qg`s;3vE3dsWcKUms+hPX`g@JN)ebbx^5)ub*_*&OR zS8(eW2y}wZ$vUzwDP6U=C0<@q5%^hrwYgEf)`gQ#3;pazsu#Pkh`I zFCObr2K@L7EEL~Qzpi^D#Za;Z9wz9n%kJ!Kx;+!m5eNRvPrb>zV_e!C_+_sJjU%wV zo;uGu{!>qFeoO;W=3HdJA~jO~?OW%x55*C*55RNd!bQKzyeKjw zc2FXQqif}NUU=cJ@`@(rFDXYKPaB{fF{Qg)3yBm!yrK)oAN^tGM=#LGSP(zN0u}Ii z(X(Vn904%qvbWcL@(+hQE}x%0C8iKR%9Ck|c@(4Yau&PI(jWKaJE4iROP4Oi)`sX2 zuY7hOANpq;<#Y5WWZ*%MH4dY#!*j+|_`x|pD?oSPf-dyTXH=~R{dh*g;v69`od%5; z>95(Ji@=WA194+IB_Y;3YqFfd1)Mc!`kukdfa;=uXJQdv2kad!$;DvA595w|U5P|Hg zf@5+4iz4yk2V{laUlu1TL%m4(>)N($`VtaFp>s^Tiil*7IVq#YM*0Dqh4y5l;A5bx za#cR`{4PV=H6Gz8#jCEJXAa+>v{9$z%U{f=f1r$VyN#31wds+3lioeGQ}yHW1>SXp zhtOhx1p#$VpR!8LbH>c_E&hapkh$&h`7_~}o9oL5?8b6u7&C)2@${Px$ICwJ*4Vjl zXY@AjjZybx^tVs?WnW$%p?>__z+)wRo_r*}qv{-Di(H|74w;G!=7uO&c^b)2vV$+> z>zJ^1&b5Ey9?z3KS4dnd{;S_ZenM(om+Q^;rE7q>W_&DU;RBO?)j6PF5Iw4$G=E}M zGUT2Y0pQ26XZ2(ABA}y~lGxunsn0N9iSHL~(MP+cHuGlNE_NWGUe4`jEr_gNS#F;h z-v7?qbn|%=L#LlE!lQBMXfzs7VR!@%xnhz|Q^(z$K&%a)HywbwQl|nz9>El5Rlx*m zfP$RrgKUIWj}`pQe6o&{ilU9H7_PtqQ^(b4v=q=QxK{wr;Gn4i4+eMU?8R7JUWqsS z#GB%+Z~fEw!Y_GcT=&H1#G~ggiavyxfn3kdCBd*iVYI=2q<-Tk^bzd4@JqoIQqF?Ao9xMPL z#-d3aT3+-btZ;8{_2bBqWAP8a?2F=CzvZ>@#3x=6XV!=D$fFO%v17-gBS(PV zhdtT4+8hBU_0R@fpJZlysxKQmjv+gpqzh+2em()9ys|qS5PV>>4rrBz6Qo@bkYck0 zP4MA!;T#PbX@B~QOe7`;=K$&B0U}2P#JS}GUC2cG*Rjfq3HaJsXIj$C4?gl<%g2cN`uX8 znu)LKG-)Ppp5rg8-DEyz6MhvQ+me7hNkB=JNcuw-nv6sb z{NO&oj_l;2b-@Av-gxN6`ub+92>o$g|Kw}`QM~L)PmS$|PI^MEA)eYA1n(=~;OvDs z^T>m-b^Zbt6?^J)OtNr|)JNd?1~T%&ZrQ@yy3Rlv*dO)%;BRSZB^DMIqS49)icIj* z7Yucu1(qWPu<(;{4ZqTk)u52Wv@G)btjIw4%|Cq7f0HolEp62z)aK+9Bm{3YuQbYR zhIrrY$HSW!;@rkgoZXE0j&FTMeAZWfK|J!IKa2Z5@mH}zfGj~VF}>8BkGnp4PwRKm`uC`-)V-OF0YQt_AH|qHh*QaPe`RU_5xuN=)xake(6B*oKOUZ*y%@Z*i`S9WX zwxoD)dzE~cl;nX9pBoyD#IJNiv)dOgKGCw%+mG$dJ%8GW2`B3Jie2f31$NjjbqK5L zKvFu(`h|zd4S*f4MtcqW(E)unu>-t`!Cm2D0l?Yeew^OkjSV&R_CnJ;Br&0q;-{@G zxQ?_%(Jl?32Y()^v<1FxjEYzcvBzW#Z{aI$+u4<^wnX*UW z(Zx9X@TcQve)^5^?mz!O5E~1^!Q{{g`^7Oys51;?bq^M>D;Z8s6p9mn|Ct3jg4$lI zeWnp8M(`WR*lD?MZ1lZc`A^m*a9N#Je~N$PL9#37zJAtPktdxKXx+!t?%gjIKmiy< zd7zYWch(2+Aj$mUr$OQek19_ZM(BF#HDv*40UtzWebc_$`2`bM5K*fXcB}Sg8S2Md zOtPSoM;5ZAF`jh)rC<7`@z!7eRi^{6Mg5ecA66kk zDwr}$Jt2FJm;aJ~+Ftstj7e!FnNAGrBLt1(B3)aD1*hxFa!G9ZXuvv~x{g`-W=}4s zT!^`4KDt(XU|hN=`nA2_Gn9mFWj#EAPM+L9>OS{?)C)9WhuMFCyWXD$WH(NSFUMX< zyIP~}sa_>nhOz;ZBFU1DZ%f(df-`nQBYwgEq>ZVp=u01y@lLf_pK}jAt9$|(Gk(c9 zrO+=r>Vyc3m4U1yeQn0-S?BCbC+$z?v@56Sddh6aJX;sGw@Q=JjeaWg-wDWQ`Y4bd z{7FBTPg&)$oR_|EsBt*4Qf|4))c*7&&6URdl=+Ay&Y6NR0% z!|}`)z9e4vuU{YA>c8%}_dfNzZSMduGsoMv%h6On=m}MkAxw%R@b$9Q{+1s$x7kr~ zr>FRAGcKGv6(7C*w)pSA^7An`|8Oj{7h_)byQn^e^;d_QjaXFxNjY{T;xmdJUf2-X z*wxYH`FPfouZl*W~@yNN&Xms}Csw>u_DVdD6d7z~B+vP1}#T=cPSd_nH+kaw1TypKNewNce}#dYpf6tg@>`9? zPRzm)!6^12%pzdanU|FIuYj48x|Ttpy!;mbv%r0e=w0DxS7{Kx-e{L_E(_3^gf z|C9L6*M3jj@|0(K&T6x(F@xGA^Oh_~a6joWaMhFkia)yuF_Y;j8=!Jt=L)z_8dv~C zN!MDTh}Mw#!IK#*EAhh)LfP*U2RPC9ZH!}IC9tzDawdy#`|b=6By?8vf_y7&i5H(T z!Bfx}$VdN^d{)BC(Xwo>lxc-J~;^TLG!Y`jS7_Z27-2a+(;gW5cAN{-l$Enntav?u?=2^%h z`(d2JwO`Kj_&4J?FLaiWz{>-RYGbt?px)yliRX#a9OYMq8(FC?+deo&iRqG^fgfL- zqR2j*2Z+Y6G!_(CmSvop_K81{T*|q+`U1%>Z=h2TF#u&L!!=N8CKkfpjYsvm4wdD4 ze?K34;s zG=>{#ti+FW;)-EEA8I)^k&Jr7p4%MPrbos}jKLymVrZTLc^rR_?KPg5rjTd`t;gq(;?tBzI{s1?xr_6xhx^z>p-V9->C=4% z_ng=H3sifk3lL0ek*RKteUJ|I8H~L4Sbuf?n&-FVOzp$3t?Z7&oEezqCupZXcH z*f*B)Twmf2>!I|uTtf#({c*ij9!`t6eP-~f z-@9!;hR@5klQ($_7NbO^C8Gd?K*;I93>Y*IfRx8V7370|%A}xcd6;m6sJ(S*415EP zlGT95^_8;5i&2*ITp=VnBwPeQ5R0KXnB{x75+K`^h77Q;fifK@-%Uec=PzA~E3UXA z-umlrk01Ks{}M0#>=(o>&v->Va{3(eTRACGI=&y>~uSy$H$i3izq;)8yEQ%D(X0E^9qxc~n9<8MFs!MOPeH^iaU zwYYHMg7MF{iZKb#WMErmp7hai*98DLi80;QC&`lq0O`DtHO4#9vou=H5|B)V(e8F^ zZFRl1*Xr6KiDM~V@yaiZSN+4+#LD3-V`*WPnI86F-bKddt6K~g#q>0O=-(_j@c#x>W}fMs?R>~?tXz4u37G`-?;o^Kj& zdD2tj$}6voM;>`dG_>V#2te}BnSpOZvtE@Qtf$uDLG$`M>z*@l~(+ z3LzN9`3(hp!+bEax6g!;%I_|!jDjJVJlp-j2UPz3p-g0zQaBh{lYb_=u*=TSutS60E%F{L~F6#Et45vNRHlOXu zNhOaYMjcEzyZwNhA2_nT*U70Y2Yt#f-hs3|<NaVUd&wKYx8E|-fZz7WM3;Q% z9$VO1785YvbP!a8b)`QjIfy%hhpzgq&Z%YZF^dgl04~74Dj&7`fwTk2^lzr6_#@s5vFs#MUnN~%$xuAL9`bJYBF|j_a>l!Ou0kP zgbwVdG)CoB8<9-R8~2o-p1Im%4H%)L%FscG2GF)Q2U;zLJ%Y_On9TVF<2ziZ%g%}mX*Zq??`SB0L>H9t+RSaco5zC8BZ^3%^ zC+?4p?S33Twh~tyU5L#Kz4-K<7tN+e0)k$Q(6?qYK>lGn03wU%FdLnE7~{LM?v_8i$KmiyrVd8^Wj^- zkF4;Kp6Jri;^L>gyfs6YxVzU-%rzH1>8&p;L*Zm98GT;t4dcvUCeHPTv95aCiZl2= zaFZPtWbZc3J=u%_9jflK!^3dDueAL#n3GVY6W)Lu2=7qAMpwo&+g^#?c?qbw6gNHn z`LTHQm2uk#KN%bS{kZihPmh1_rC$<5`D+%e?hSTqe@)RkJ3EN}_If<+mK)SHds}hM)yLukfAQY<_V4`WBu}u~YH@f~{Y~0< zjsd(l$o*K>4NOl!P5L|KaoeXt;W*%!fSMm#vi^FW`KiB@r9Z*9o5ZBgKE>tI&)|2n zyv~*8MV}Pcj|W@J%gf}BumAe5k2k*Qr#x}UGsT_ljTrURRuxZU*!$oy->4&H#xKW} zs)>dms3K%>4iCm_-PTF*pA$K&7>%_Yt%q_XmSh2d%Sk3Kf-ll_#$$=I{){8Zo21jS ze2+S+{%C9ZnIF=mi#g$!P1%lAM!9T>hF?M?O9b_e*gKsvWPOF4eZWoQxvS61{Ar)qI$~v9)0bZ-{$mjV4`@!FIvraZ zJMqK6+dhFrT`1%n1D7@F08QcNH_Ush$5A;=jSI&hAZKsP4+n3v;_>&we=3D>(PH0mkpl{Dtfzf@y4J1 zFLCoTo)ho?n~z3!up8~A)o8cZ{1(vsTqoL#3o);;zuSlKF^@w3y(j-Yr#?J(`Br}~ zx|{29;mqmy`Jefzxc_h86AO!LvESQ{1=Zb>U9G5p=QqzBhj>^}5VfrMcz>v|et#4l zwZ*4D>1eDiG`(1_zje_M2$vPWwCCF%$5759lr@m25dI;^PT(;bI?4q&JfKg%r$ge?{RyB8uCvT0F=s5rgQwSHadjq+ z9a@ft>KSe_CRX2~y7v{|b{6I(gW1>--l33pnYdQF9kv&uxwaagI(aGn_s1`Ij)XXI zxiu3B$E~;j{N|sD*ZhB9694%Zem#EZ4L=^weeUz) z(4j+dNpdAF$nlPNE`3eXoOZ%EobfT^^9kfV_({m?$}516GRp-nzn)xFT;;f5USLBv zwk@UOJKPoue(Oz8uK{GQ13R_QzO4(c%Yswmn9lz=;Qo933B~k*wGTQ-{Nuwk|A2K- z=}xZO)a?9J>>LdGe{n`kTZEH10I-)zV}W%Zo1RWtFu&#_Sf z50n7gaY+NfN&DnM5V$ISC1<;ZZvjVsvrXia)1_BK-cf(6S#K9%;^m|H1bf5rujr7N%OB#B&| zU+p#N)U|4aHtO?&sZs~+-w*#yLyqIg58$uC!%hLrrIN4L8%V$GNWcqZjQiMiDR(Vk zJnt9egllVSI~NRS%*D%V+r6FO(+!t4H{$&Hv$7T8P~Bc&AY8d?R34FQ?$n@opFT-2 z(9yfwsSXxkTYs`mNzDAhJF$(&F1m&WXxE93;?oX7R>R^i{U;8`W{(~}5!YUIW2_uK zsy=cdx;u#j*{6-Vuw|06hQ6$0JcjZ)GE13~R%Lh)Qok$|y8Ok~3e#<(;;ZpZoj;`o z>Ov=?v-EAqq84=>K0$WWLAgRBxYREIzAR<3JhG8Il~(CDFO-2^-Aj2VfAD{R2l@-$ zA45ex_J#GB{*zcQp~iA)$Iw#qniu?{LD-eNU*?l5`Y{bI`1oM__OBo){)ECQhr)4_z!47EtEBs zW7_W&jOCZ$MUv=QSPYyCm-|upRlW8H;tyVvE|@5K+FNw{LsywU)4c;3F@oa+>_(P4 z!(4@+%1UJ@=dvopk%eT;_hx# zo$XD49ApRdyOi;F|K%^o{So;E8M|eD$PyX)TKB!cyR>)a(J>wFnJE$a$Vii8;C#z=iE_^V$z&REZ5^8fpL~An)W2@~qh9!R-q)Rs~hS`Bje@ zKxvtma>ihxlU5pzC(#nO2UVgIW-(TG*b*kIO}Os!%95bLgr`t!ZueqsZ7u%tuRjvs z_1)he*F53YcoK4=ISpy*&=j$u^?Q5BeF zGbk{9l^4odA$)^f^-4A>;m+Coh>npAkYM@jsk3q02j1_+3nz{rk4u-<#a|jA_^hWI zcp9iTDlrNec-hH{hx5EXguw30r~mq1aV^5jW2{d%L#*x1-oXB)A=hvy`Z&wlym z$Ctn6A4O|vO+ifC4|U1Q#B);(p*7cX=iOBx!4}%AM&_+q+5{7Vc2q@TSi@b1dR; zo7JGc5giQ}(RD)uYTgo=Z?@z5>z^2}`I@heFZq%$_5+G@>+9CFodSWL$~NW>8~cBZdmF!qKTY*5z-_gU~7%>ZG9aN^{;)r8swI|Gjf{9D}VR+VWSA62w0qm6> zI4H+Cv?4D)72#b42ukhTR8F!dbl%&`w^dx1emGj3UEWt6&@A3uPWAy#2B6$`9*w%K zPlQ>3JjmWhrPpHyKY4B3q^V64pXPbWsp^btWLl4t&J)?CZ0uO%YY4a}l2tXTi~LzO z0pC>1)7Jeq8bU2Vow`x*P1>L7a@{)j17q?Q`uFk;#tM^j)dOt;)ueod?n00K8hQK% z7x(hM?cc|=AAFjj*gFe!eCY{V-P>aql2WIG59s1r{7>=5b*(#T#CD)71Gt0tfC>Kz zwI4{>V*mvY?%Yc~mU-rpmFRwZdrl;0TVlZwxRS2K zpL#)OwaqB+yzbfFi#-;)t#75fj0WY^9!XnOSy~$pv$& zOm;Mk9krA0%rMr6-FWcig?M;7;Vn)r?%cwJn2@S}0*(|5(@`k9!QZ601*j(NpV z58U-YZ0+>p_!WoZ#1#v%eqlTA`p`MGJ8&zGVDY-@MgPrKwmVFk$pRjN?sOSYNFOD@ zPJJbRlYn3OAye_6+e6Vg`DpJb~L%7B)@#8T}Nzus!TZcG{JahSJZ5?60x3>IL}ynZ@{wmwZla z?as#kx&4E2=`A8nPFLZo~a(-W_bmc|c!OISl6 zzZpio-OY_yP^|Z}Z~jm5+i(AM)4??0LK9ofw@28u!`r3Fe>0boc9wQD_MPZMKz{e@ z*r)E<2MAJM@}IU-`YTeaoH=IsYP)%yq*eU&KEQd}Hf_5?m zXe>Tutb4AZ)A$qMce*WAfM-uei*IiGmg~zOb-CT?CSHO29Ek;(5M4CNUv$)6nx0}GMFys zK14k3?UD});e{9?WkCD_)H=CGybmOv+NUI~u9G%CW#iMo+J^jwi;6J;oA+|OGj3~t z2VhU*dTcL}k4VS&12u-2e2NM=Aot8qe}hgKe<4>~5dM(7EQoI?egZewwr$&q$|`TI zlOu9-{ldj~aZzLLlaHLz7;6xp_o^?8zN%@xpoF`tuDeidKVr9Yyd)65JaWI}?H5PYM;{MAQ3;W%cd zy`XZ7(e5mo?)GBaF`u{Zmi-WuHF&dHv5SmKq$oDqRjk+D@LT%7^&9^^-uc#Fjs~4? zuN$pVFN86cMB1u+0y`2xw?g$;Q9Q&yk6F}zKjE5YJo(zI#m_JX8|zBijKzg!EGQNr zKAllRpc}Ah3TN7>IAe^VG-M|d<$H9$hyvU8o5#iGfN8&h{W2eb-=crEpI2S=L3Z^A zv@v2wfE|};YGeNiK9#L80Z>18vn4SNc30w&vzHX(&M0nKas0^3Bi0!~v8vz`0DR)6 zuS$BVb2!_KJqc-3xLe1M#K#{vAMgI;x=muJ80>IoK8~*}#uJWT6*DX(?Xv?W^kYLg z7YKfYu`k^*uVEiAIT8O&%L7mm%YX`U!ID`j?YQs0HQV?^mX>|_5nsAJ=p1{rf0HuO^HQtV1Bq5 zO1W2g8~d};U*|P>rt;fXDsDl!CCe=BTY236^92UCbwi;;*Wj&nJ_d??Nx_%pt2ET( z1SWJ(`cnH(gXl7zl3tF}Wq^Zq{l8&SzV-u!KLKsiF(No=>uUShhTl3L4m2ieXiRk} z{>T6CziGVOjypd5q2ML8r6moVBs6Gmcc$saWek~u~Qw~irKjhsCc5-18yCCHQzuYdHos!_M+v_{6Kj_89ix>SF z;?ZDFKKyvx@PwOUrqz@k(*@=yo$V`B4xH3qkJw|L7v+|A2Q2`X$})z@wQyO^#BeFE zEK_Yb3r+i#Oi8@LQjW#n+4hy+F@|y$8wMuVlquzOZ5_}CNEO=3lW)6h;sk2?q<&;E z@g)b#B^ScK~?D^WW|efjmy+ zUg@6Dk|5pb6nQ6byqw0#b^Xi?FDuY~E6lKf!za4`chKxQ$Fe?1CjiH_xLR^1a#TXS z0*IM2_oQs~4T+n3_yEa8$68;KRqT&AAX!Pe_HqXf+DNvAY`cEmRcasmffB3Hk~jyv z{XFa`iG2B>KYdF2o}`UAPCqQi&Xtch+L0Zf6rG<_Y%4ohZTDhvZX=E^@5UGXgXhGB zM?M(cOLu8(q_|)0s!wG;2(LiJ?9ZYWjo;|B%rpLIwOoa)cJRp<6l$s1?K#$O&gEtVf0{wJ9NfD%djESqq3UI-~4qZ^`$R)k)+v(^^NlmEMzG{ zqlO~fv*isk*sgMz9xMVDC9TO&_&0r=7l*4KwDNPZY^A2tPG(gfPKx{68a$uBa4C7i z9z^?sA#S|!##mlia4_Qq6#fp#j0WJVlKHajV7^gT z2NBFooJFIV-(E`R%sGQ+oQNdCZU}8TY+bWx20452jCf4lsNK*~?XZPI^s?;|@|c~6 zZk17cg584>`@aDi4h^Ig1gh<@1$STkqC4wfM`Sknc9{>prrwmV?yGI%=%5W4sZ*dV zkT_Bov?%;l)elF55-B^BJ(7Ps@hGQKSFF}RHX#FI=4i1o=M}J`47$d61@)Zc^m#pw z&lODXmH?;@#ArWAGkq-d7jObu(q6GCfB>ihc$1GjH~&-g1NA{2_wbuD3SiMIOd#tT z(73}?{j@t@mm?3D6UsnkMxC3k2u|`SohWOq7k^>7)%H{HkcWGS68XU^6CKcLdFqU0 z{9E1^%_I_lk&bL+0}R4A_fL7rv#+)Rf8%SH&ujnWFPNxhur?AuY)NGIiND6zJ4m5W zl1uq9|IjS?xlOB$w3@uZs#K)`HK1eyw%VpAOVsw#DPQ1UTZ?sArFW$cZU;x#c7n7yiUQr|M}L>~ zBR}%l=jg}4jl7)0`Yr=PwQ+oh?LoFVRG#yv9*Xb#p4UouBX8Hf*ybUiQ6>-6Yo!$) zOMNB-I?07@T`2jTKsJn|S%AK+`^=XU!n5j|ni@RrM0a~=pXhtanpI8LsYJ&q>XAWN z`$z64LC#zI+X~}8q}Wk~#48-elRx4I$YYg06U+qA2yi?>&P3!sixqk?WC1{XCfefb zMNhvee*7PQW$c|f9rHZ}(CQz{tWlaOqyZdu5IvB0c9RNO(J zq4vwFecr%lmln2__x6u|7!`i-zp}FCZNOuy8K`P|TDo{nNI_0p|fo z$eSkw^T>#Pme@;K4AmwoWK$JS+&~^fwPg5`JXw?k4i5Mc>^|cg+Iv%d%U<-h_M^L@ zb|E!m!t|qwQ8*+7+dK;hU6Jq$*LYBY&~QG=L;Dk!=t);hJhR}YE&yP=+Dqd7jN-EM z`tjCUYk{}_a{<8qOir#b0qV9X;PYL?)5OfgsGf9J4aC1AIf{MRN>A-;M|E!L*4)ye z7|K>Vhpvpz_{`6Z4}R>EamQWv$4g%M`SIDG_XUzm^1^4B=A}cn72fY}i|^fd>aEu+ zei03_H+Cgxcbe{Rf!@}-;x6 z4vIf`K%Od}{32y!DL*9xm|<%M`CrG-%4hi+(q^-5qlZ#n0N(oPuW`R7NS&99QhtK3 z!oh1qpZx^swM?O1Fpb|(d;(xQfxhBv#Wsrv#$d`O> z*UHP{>e|M^U+rz!XTcAX%qdUO@|X2xnY@OMnvc3Tx8CO}-U7uAGS5`oQ2xm=>&MPm zY*o`M4UVCz-B$a=R;g1w)5lbu)0UG@)jL(rd6kEImSuTpko5zV?!v2ZX8k$}5B1e+ zpvHAoAN0-O_Ja&)u4Tu^9Gh6yg*M+S=b+o|<+i&#NMn&6_nk*Yh{-dKqAtc`NoVq; z5LUhvJ6=1q7^hC3i-l$~HF|`zg-qec|+3g7fF80e@6ULvCY#R`e#q$G9g@btSO86O5-`o94%X zir@N7yjj1}xix+O2d$a6-Y?n5^jdz3&oUW$;T3yL+P$j>*W?o)ej!9iJdZ{mXcaOR z0H8u`l}^@vuq#Z*yTWb`1JZ&xwnzI+Uy8yW6w#p+Tk{^C9FjbC}o&&KBYlM!1NVks1V z?+$`>kc;A^sqqZl&CiV#lMBCMG2)q-K|fX(_Toj)equD#PQq~L9PwiR0~y3@fUwF{=pY~R(!{Izd>c+5ifkvi(|gkj;$TVmX6;N z54LGPMSF)|pF?Z%p1XXCuW>#TESbQ6+JVdJdVF8}*LJJ~w^jQj$rl-rO@Eb`dAxwo zP}*$;^iymINIZE2YR=2!vk4CII0d!e;eI_g{;7P^_0{@)Iz?Aat5AJft#5o^j6OcT zDjOZ;uNe2`+vt*;ZZqdF8be63{(tQN6Hxu^m2vv8)=GzaKAKl}AxMyZ)T+{On>rptcRG ztoUP4mT`mV@iPSRs0LGG3HV^F#aO&Q*tD-;;T8{fXoL1eGR-{}U<#1?uKYT4*8udf z*c1G7jgIjG%(?2q8X_j}=aW>A+^~UmkDii#{e+0Jq`5633#B`5P*L!EOe^3X*Svtn zydL>;o;Y`afG6jH4~>Vx%?o!tf5@GL^3%B{2Hf%+%;&c|Iky8o@JMwD_=(bf?VF@@ z^h&%)8vcVJl(eGqS%{`=*$L<~$;Yk)yr?6lT@Ru{A6q-!=ytnt;o?Tz`N>blCqD6s zc;~zRSi+u<7eD{S(bSlw%iKhfN75ku2xkTG9cUv0Y!h38vV+$N)7O-PFZ&*>nVXW^ z_`WWIijV8`MR}c2l#A_}j+A4S*%rnr;w1+&_?3AoEQAj<=@ID-ONgUWMK`?>>w^-RDr zC@J7J-O|TI_Q^}uTS}^taogpvv1q3r#nfacDnY_3~%<69B!9dzj%8 z&D$z38Wn3IPZkbf>-Z1)b{;EQ&hoMNhvd7`ox1fH624T5`}73(Gb97gBl)Ln%M(yG z-^;mJ@uR*nai=e?&zAYQF8Tl+J%4KXrC;WyM}z0I;zmAm7OfRKmFu zrVPo#NT^#7J5Bdp!bjne!YBiyK$UG?=RNTHihTOz1byb&XXId7IFdebXwEByFy@tk z#s#1ZdZ;;d>`V@@QiF%wm`YOt|ImZ0KYHi8;nqcqjd#s8uCO>b<3GC6gl$^Ke8sx!co#6 z4=O+SEcoMX!NG7RHoM!=?~NP~@V(r%#nm`*{SDD$m3gZbJFNWW`?sN9d~E>UevNrKOyR>Y7#XSN|&RNyS0sw$o*a2W|neF{pDb(`T`U}e|{zfxFDyyf_ zFK=V=JzivlgJEGsV?MWVsr4XVJ6zdh(!|5LJkPi3^I@IarWBtG5U?4fsj#k<0V4|x ztShks?S4y9_`NNavg^|6tZpJ7&K^ANMmw5bpunp6qCcIpK*8raHl;J$fb{DQMEd37 z5-vcN>c|Y zH)ZjL#4awrq+?NsqLrgaFQP|@1*G_$vphRK6RGq_$B8r_E32%z#VWwF1PK= zbe{)V9>8GY@NZnQ2h%9Ck^XXm$r;o}ghTf#?%IyP2OqRUcyq%*XX^h@BlL6MI80~u z9poi@?rUFUof48)tK11!_yO3CRv{h(;A>^B~xr)^N z*HGk?Q0P{E(mE^v5PjY4ffq?R2fWqnLT>rzmFr2!qjg9I)#1Lk&;ZTlq#P5f%uoe1 z_#0^|N58!9nKgH5&JTOm?tHaU@)9 zu>~}xqvf?#_pkVG^PnH!G-S2@pwC3J+K}`?dfHpc!+9l-Y^vp&dj-52RRY1sxlJn% zpP|54Vb{<{yW8Q76y@33AH=TQ(*vhA;_h?napl!>@x$NuPh)0vHU?Yg;(!xzFM`sfGnF>e2bZ}4!RHt*eQu#5fN*`~L-7hC$V&VvW?DOYSf zae*g&pFnBUHI^Ip3tkD8$>TlAYGLt)G)9i!p_s~h?z>A1&`xR*~IIA@b&TC-~YOJ z_y2yE^sp66twm8Tow^@D58$e0vrhYF4Vn`sbpynGrdI{A=L39~e#LfaKrWdl^;q>o z+W~*#0?Y^Zz=>XL6X41EE6$YD-yzS^_e`MVv+srm2I#WMlAkt-3~G7vRPyF^k&F4w zHl1|VcrRs^eRReS^l|1@eAu@X9}E2)z3^T5YQOICvKRab@k4$lBV|ULHV*TFOw`V$ zx7nGp1AysF+7hyD32VxUeolGv{zMn)j|$YXQ#Mw0HNmU2f^UM0gh}3niR>zHzw(~f zI@b18^bEd=19XVpbSrN4m~Px2kz3;MeLo1-c_Fhtus}|hFXiBCT)x^lHhiyu_ZjFLy{sX1Mj?I>mDhY#u_^5L>@l#KK%=(D{z9~D84d@LA;?rn!JrrYot}KQa7pHTey7pck4HcKVEpkP{9b(M z1Mi8=3n!!5??z+Tj|KJlE%oynjr%RDmfAs^2T-C5oHL_t>?$05<%@4pygnZn9(_oD zV>jBWc3yJiU9MS`XDs1|{n(T0g)i)n`3LS{8$#`&mfMzoZiC1`=aj>2sy*8NB{wX} zGSfW|(~zaxgxau@(P?>ZWPH^Z7@u(!e2}FK9y6=`^oR2?=r!Za`ucbQz@F@qPd~9s zmKvY=VV4E`u^;{m&kk+7F(2z9rg7v%y!XTR#YfJwm|6N$zp&b#k6W%iu6SI&bwK>i zXCXv|n1&wDRER!eViV;tgKWV(6kl(7sZ41X^fZ*69Y1i%adv)^54+ai(%#&6(XaTZ zGV)Qg(m%jlNxqH7%LmB46Nio-@dNsykTlzh^A{ImuGw~6xq9qa{MTRlmH6bRKPi1? zTZa$ih%Okr;iHiI_1E1fJ

_`{iGW7rpGo@vYzfJ@F^+`m=c63tp&w7(b|9oq-IPtrlAIr& zWKp`s$OYYFUzEXCnHN(l)_Llk=K<6<8WbC`T;%_n`QjZw9EH6aX=#V#fvvPX;dgAS zzEm{$dRKPN!eqAz(N>QvERce{!+;w)z71k|`6TTNe`|2NMSeP`4D#fs_DCJ@=g;}6 zpWt}};~?nwT$k!&KFe)Wd7#ttJot*L6DiN_SM8ZkFj?O8Kf1N2n9#mqFL#QS* zKpwPnfGWAqg02zqzi8&gy}F=N6)UNhQC;d!@?{jxGcF;G`Rh9N1E`BO%ry@UsSy5w zjJ5Sw_jn0mX?Z!8mk&j^D|_vY;$QuTABgw<+56%%U-Gh;ThQ2aNS|8fyVY-JTan#~ zcG`n&OHkX80YTFsial^E>({mJA%CtZYx`2(ES-4)=P!AE?Rrvh9*-LGnsSL-H0s_Y z4yus(z;S@i;=hQ$@?`2BP=#jv3Df1ryw}^~{1vdB=`^)dm8s;7as59Q08HbkbFURZ z#ii#)1eGR4MDJ%=mo%W3C!KT8>!L5&^F-Du7vS%zyRytB`AMZ60ki54>VhHzKE*mH zbZ`$`IvNlCjqbZI_w}UC3f$qjVA50UfO?J6chlr_US;8WGckD-REwDgVou9LdGfyK z_c38CKl!k%Kd)0xc>K2g#za=xE+)@`p}YEFwP`sC+H}sfF#liLH*p0%S4aDWsUKzP z4kCW;(`u@m>;@;LetCAbC-R1|ywH!O`JFhr+>ckhU zA{@zQ3`HsawD2vt;pg~2{rt!CkG$Lt|HxGQ5Kl55#*fx{OMdV{Iao(JRd#t_JmMg; zwDG`OT>!wjWvDu+8h@WOXpA=B_RF*e<)J)_Wc>_DwAMCHJ&ehq13w5pP#`xwD%*KcR+ifzm=eYs6qf1B07QB@q zVB)y4GpG$5!8@SR(Mi)sm9IY~m_fj4CC_w&$<4^u9&HVTnYf=-L*z~J^-JgC{P~L>sA(J)S02AQZn*K< z7zpR~Hi3ffNcQN&4n+efPu@#!_Or-OgwzSh3MoGUL>h`L6p-2mHBxjN;tm1=2lw@Z zUihvLleZ+RvlrLxXvCHR>PD=s9gcP*KLJ1hNnixOJdoo7JBuE85VO3(mbKVXp~t}6 zf5L6qq_gk@plGCCZ}%ZQ@JAbRP>5YA%|W5)_PNS=)v4MXPTr1QY3P*pZ@e-gOpC$2 zA0#OdVbw4L^$beolvwQ~oNR*yZWe187Zav50|Hl7Gtbs2Gfnf>T3GZ7(|HA?brAz? ztv0P(&A@HuQZ_d+J|}K4?m;w-tJm$mB0j_nUg4#nb2rHX%;G8-;Crs`N}qy z_3@)EWj#!M`8&bB63vAN#AQ2E`|-n5+Eyv!e!%&-FZ~NY-%6xTWC*04sQqc6TII?E zcdUgvuxF0wx3+U2(@WjudbHa!{Q&upyXBxC{m@9|N7y&{*;iclg=omfKskLH?H(Cq zeLOJ2raVEXAK!E1n{xnVKl-6N;&0yf$I)3{i$hDR=1u?22j^CoCu}Gny;=4}kJ1%& zbF{y8d;mWgi1PqF1rATTra~u>N9Rh0riD%`*qRex%tAEq0bWQ$T}96TI53?IipiJq z%5&@w8z(>a6S7`l(&uGBjvo7-$ zBj*QXIX(j1XXlIk=&SFLWXOu_XWDLqYz>VzDZ_SwjI#zK4HP=FanHk-;!_td#*t(5 z@z1~Em2u52*F{5q?4D2jZR}h+8^@0yk?luZI&~?|o$AJoPdpySu3CsQkDQN>e&Av6 zR5aU=4x}$}sD73{Gy4nZV)7#w(m-d?(-<-j+wJpq99sawGQ697%LaQ%Kgz^3K7uxq zw!I(Qn|rajK5~7H`IIq!mrnqUcXF~FBA)`tRORcycb^4C;8T5F>8q#uMjFD~>YL)k;4F%#{k>QtOT|2Y^a-$?by)?-z@((H`s~U z!FIG{V9U}STe&^y#;fDRp%u}(V_#3ZnO8mdiB4xRPCs;i{Ms-7YP{nQf6J^lXBxRk zM?Mmt#P>Uqhj-M%SL;4~b@w9*g3Pkl%X&cs)6q5lfh2aUc^efTCNnI7WLsjT)z&InxziMPT8 zzjuO?{f^|{v32_&$;w;NW8bO=Ti~;4gML3=|N7U*cYXJF#vlLTAIaW(xiEk+6Z$Yr z_^@v_P+!tgV^h-&52#f4tM4?Ok_V9eaN?alR@vJ#C=c7!5-*PHEk9uToOl>;3{Yfi zTji{Rj*}JGmdk+3pf&{TZtL3rZ==(2G-3Oo<;*9a#<;&|Z-{ z`ALHI#GCTbkvFuBG|M5+q%HZgz6IQ449etlIoDOnPmcprzG-_-D)}ep zN%KL#X7Y>)e~-6ZMv3T1XT(jkJMd;d1P!VenG#QmHtx{|c-Tv9^~4))j4%A+FN%lH z63?S`_<#ptm!F~E;F_n5+21HGBc6kvDF;G2;<8^t&&Ur}3SSjS(pv48w$)#u+`u3jgvfbBxWq+Qheg(mfXe;*Kr zofn zkr|Er=KuVR@MCQ%%fezsx*Cs*AHP{F*+}f_;!Ugj3GvmC5Lo+#2#Aj`k=k2SJ!bn! zsO^t@wLLLu?RKa9jCg6wy6d(=*}8zd;_-yF5YVpZhp?UDU|un7#OBsc96mA=3#$tf z#7yk;2WlItlzA8xpYO0RRI!!t^S;lP;;rUFCoZD5wG;7&@4qh|P~7Ynjpp{^P^S^k ze8SZcyc4i^R*EVrwAr1SeJM6IF4zPcc)Mn=7 z`|S{E^FZ?Kc*VztA28x($j5f+%w>8kBU@oN$(6N3swy8uHW!zpwYVagD&7_jUe~H+quc(b|#r^ku!g<_(z?bmw ziC5^N-8)Tk^@BF_X&x)Rq78b{Pti5~*NoZ{Ft!QkFS7Bhor|X&y&H+HbxY~`qqZgM zQyQ`<^=SM<87vEqEJx^*qLRMSUr@VzfrckuKm`uZ0ny5pkDXB&3!5&{i+|~K7Q}=6 zwfN^{H^%3?`#sBJju=$v!N;5FZondBq_#mlv=_$D?B+)r?GZW=3H5PKubllOJR%** zM>o(pbwEqT3qmiSPu$yKuC-Ecp6F&5H=z0n=I8Kt)Po)%2+yf zG}bq^tt({bU3OGfg!u^1zAp1jS<3{lXzGM+}}#$oZr(K0%hRWiun5H1&r}sHY-0=qJbZJq}Qw7aJdM0&cdZzKK@~hxw8i@U)sz2;Gmv~9)W6u301F| zufjq5t2?)HZU0hu2M_x6LIkC;04m4m;Hq_3yuzi-l_vXh*`?d3ig>3$`)ghEOjQ7W z`zSdf<}c@Tq`g+ZMVkGWVkG-PKZGWJ5FLARnv&yUdnc9}{W!ANjhDah#@If8SM)X> zj)gWfZ~A#msJ~#Qn-{0eBmS@(mUhj<|8K}Tk3n}1| z2SZg|CCqWU4VVkzx5MBqNCc|Eih4#Xlg5HDaV)%P%ywHdjb_ZYG(ftvA-Ws!OKw z#E{8OLuwq;kWDvaC{Nx8D$Uc7_sEmk&`$;#I~ox&;OzB#v8bk`erwd9y$_74U&oABiDt_S}d+CdH}{Zo!PS5 z@+;4K0qjLm@)0*;haegVz6cPh-}Wz?0+1lrnXm9C8?ek}hh7zHf|K^r)QuC{;Tu(? zv+6tLqx=ja1T5}At~o)M_Z`3?3+NNybh3P33txkVSpcvr9g00`mNK+Ez%;51&X}^; z5CNm{h)AD{E`LMWZ*by-xz4vZE6>=e(U8+#j-iNRcK zA=>B|RHim4w#EV%0Tgd7CePwc9RV_~E16-ukIpkzVHRrZX&eT)IGB29`#vo*p1|s^{1RFkcNG5WUdCE4lRh(6JP4! zoDW+QPTp?C7X1L+Ny=}!RWm-9$tUcK32)v)kR)xp$Q~OLJ=BG5+m9DJ2Otf%E6Q~& z6Rf&xAL9Um`cgmkJ^Ep<+~?bP(6TGf#Xy8Syoo#MO}bGhM<$Q!J?s+OupTrRPL4UaWw2VUVxDzNaUSp_55+Xqrp_rTxeX5Rds&7D1K)H)*w= z&O*bJquU#s@&N>D&ZeX4!8ouMC03dy=ZUWZdPuxF=TO@leSbX{pr34eN-KQKs~i(} zEIjm<;%W!9JA5~9=CH*+`b7sL^{>YduS$Nyc-=RAQ+(?iejwu9LlI}s$Nlg7(>Qn6 zr((7@i0iMtCXOCEEFR{gcX2&-PG5+J?zt~UTRRG*o3T6Kds@5=$>K%nPBz6rg!acG zGToB>45Pov)}m>PEIeCW>iEr%v0t6rlWwKg?TPKcqbCS-oxbBBO#1})nb`7-__wW- z2>)$~O|mZSknI%-s%IK%OS`fmHS&iycjN9;r(MSi2M9_T`~mfW1~JkWS-^y;?x-Da6E~>c?ZJooroQ~fw&J9R*LUKQSQ&X?-AuHa z8idWU1Az2R`Y!PT`0>2-9curw34bDjK1*$5BwHTR{u&)M;I%lrvmZP2?bvM}jT@f! z+_>iH&y0_LyHW)3LPwgnF)W?(nkw z<&#ZgP}ynv9Dn*cY4AfA)024fUco83ah^GqrYyw2nO}BMVycn!4^Wo4ZfRl3pJ)2f zAAUo8=fC{6_}$>#VvAMUUVOF|wex-EVHABR5@ZIr-UBre?} z_7LCXa}yT0jwyYNkJtcwX&eF;gKRK&cqlX!{_CVL>Z|y~z5NXnEnYA#Gp2noe#I&9 zDI>QIxi|X?5d;n<8f@z-VA-e+a8>{}`Dt(X5o{s-Q|1}#M3DU?WhUj*#uCPOE3f6e z>clja3AnLMkR}>jwv^#sjc+Q>lqn*|Uasg1O!xz8 ztW|A~^ip;49|`tT*hJMk#gt^Sr!yjG?vWqd<|i?Nf1H+}7{z#?zi~B%b%QC&sCV9*B7j7|B1gx2?WFw2S{) z@u1!|?OMDN&EaP_Vu$HFGWYV@wWZh;XT8?pc*mdL8xKp1i_NxL<4zo3T8?MkbXDw12mAia$Ivp7Dn(DmG0>5I zkTwd@$)m^PB7&V6p_wg$kzeiSCtKE+YsFK!gGBP~en{y|di%agvx+Z^U2N9r2>%3H}?d zze(ehmH63TcuU-J>(k@wzu}+60}q~zn{RoF%FV{0F9Fi`VDG|#%|IXL3DC(QWl{T= z+UL}M!)=aq={Nl_`HS87ja1+DP%?99iY2U5 z)w8|1?zXkC*bzH>@jrj#*VIP3@!8E5-mqb4qyI1vcSDe5YA(i(Ga<#MYd;`O zKFL2TKjLEgld?@}Ij8M1{;F@!7PBb%3ZO%m1(&X6p!t9sJk+P33)1K6RPwSNY5eCG zH57lrd+v~=wkXT@PQb#!PFRdR*ky+@u2%RMw=reG9H8fq^%q=xO0Y4%;Ft-y5+~xj zkQLn;vSkj8g|qGE{B(#RdQw)DtNLaJsoFucCCgaA5xmR|ptmX$%EJ%WRfgoN`T|I& zTwZ9*NTIP#;6<{L3=%TEKG@EBk|qQ5XG+5dzyhdfm$XpF20G(GAUMsU@-ytmj(8z# zUVfo2Dq=3oJmM2Xs_tN>2Bg88yy%vNi}2HEwqj{NQ0vM2ld?5^8pgOQpB2XZ{@-DOGx1h@^~f#GC)X2sRPeY{J$B7! zKWhKsyLnx}Tihe(Vg&eI%ZRZTcA|5`SbkX_FH+-onE$QmnUiaNMc>FC{au2Pcka|oU2|h3WGwV_wwM|p1KXr~CpE8*T%mqX=RML52 zgk86Zb;v{8&w6q(5DNf2wlpr`X-JNWdDuy+J-?~@y;yB*$ID-EodU7@V@L7nVp~=| z)5|zV{+1U{hl)qco8;>U`J&JLndoOct&y>LsVam9`q<$fyVfYNs!M)ldh31K6Q3_& zUetcuP7D6=Xw30R;&T2Z^Sq8vAm=!o*|U_l{fzFD2F2!E<8pRAw{Nz%@UifYY?RA= z#$+xZ6xN^evH$@2NfoAtbjw<0t=-}aOs8mLu^P0xy!tTa|C6^O2bF~m)qMNR-u=IQ zo3cG`A`ujmhEb7T?qD*#R+1fw^Gm?aDnqPJ9lRBfAzRl3)QOJM+(Tn-6(~P-E#sOl zpWo(}5Rn#dn>A#02zUrxbdZ^Uo;Qcp*^Fe2vz^5lt#4}ZG8Z5F@E!59|M}EOFEbo6{9cYCYnH{1ZwGdadR&Ur%{-kR#j8GKmbrc*pCQ0IF^7z2W4AZ{lwdC(Xp z^plRVFenVVp9X_Ge47dOzIYgUt6jFw`I`^@wHin#uDarQ1{tb@t$I_wr5p_yxz~=Q z0J=(mPF-6!9Ca5ovm$k>b$+xiN%Gc7XVy$^>GJ*OHg@sB_~H z9jzAMzsm3a&+rzm_#Y5igNI3w5pU%uzvQ7D=N!RB5X*w0YLBGZvFW75RX1Lozd*_mO%iv%dgk%=_|JRCYQF)s>eam<{%lT&?} zzpY<(be6S#q^nkyEuE#xG+LwqX-7QJo@*di=~_DEhpfDfu9U|cds%Nfbl%|f8$W8t z;G~`VNMMdAZ3D;~Wu)AYC3x+(7(fai^*dn4{>6uKI{4;Z>PPaIe(cMrU*+;!;RBVC z&qMvTfue7n(yoom`c_@VMgUg5`eF25uE>*t+orHd{2KL>49^544A7iSDC~2^r}5+G zR3C#jIy>sdFO0xjI$5@jHm}0U7`K)uZ6rC-E~_8r%{LD)bnOSPqzNCZoo#>eY59;d z2UWX!*++2C^~(M!!vk?&`<#{Hs!`XbgP>9MV}&!S53mj-BmBJwrpgD6H8A04c~ic$ zCBQ2hm1dnFXZ-*sZd^unlE$QB;_*${d(y@Z015Kvs@GDI-W|A#BuGh&7059GY+va9 z-asJaHl?<=w7j5NSY2_Aa; z&926>cTky~UGSR(L-}B~QitCwj;zJe~@ubX@${hvUK9Z;w3< zcss3yxao%LV}4;d2B*)&&iRXR-<@|y_rj&^7({i8>^rX3JCf%pV5PO{6@z$gE zr}C{{03fNMNBSi4(^_~~=v{)Os#Buv6OW#X=*-4bp45te{LNn(Ee#~@z3aoVdG1my zD$u46T|c`W4?eURx7>6*uD|7~ICq^0=IZC3t= zytG67Ys_amM((m8myx%V{6^6o>?&qeHuX1Hx7?j$-HW|pJaVZQo3d>d2Jpau&)qcW z^Hmx7=)IAtC#ZEkFRY_o-hh*R2pbE{WS+4n+uW0F?X_0o(r_j&^k!o5swc(^KL6Em zZYSa|{_HQ~#-~3eUi3MiZ{6_-p(R;1WoI4L)r?-W_IBcFPr53uJa$<2r-2xg*Dd6y zvEEKUdi|YPTUn0VZo4hM=R3X`kr@`+%c8sCc-Z`_0J3nO${TLdRgKTwFNzmr>F{6q zYJTgHK2$M4J(r|lzQ~sHK1=}FPeJNE!8p`^7)rj${V7CK{z5s%b(l`J@P!UH3c)yT zKUl9pcqS9r1=L@clcc8t6I0M6c^Gs)b(hL5cYlIRcd%V#JUiF&!+y7C_q%(svb5?g zrGNZK|3iG|cfU6N;2poK_SK7(#U(#rc3yxwV9v38sclMrnZ~vBQDn}5J)!0+{=d>M zsJNV7^`v9QJGlsMUOENFEAb`P`q?S)5QmXJeUWTHXgn!1lUw6E77@<5*_O-ageLWu zc&U&#l3yXN^9p-WY50hZE(7s z2+#Ba@~l_%RA!-%dns3?=GShjEzb5)|tO>uOKjk-pNbv$c)~SwM*M++H0_QF5hXnwpg@tv19~5wseA6y?Q>`xGg2!4OS7O>o z7aor2C22`tg1^Zl^(6lQX(@y1uMa|&N#1cCIsU}vYdc6_oD80^2+emf_}9vQ=G?5KYk=@AQ1FZ87B&0modFyZWR z!1Rb8Wg?%&O4a|jLen={*<0-oSy_$;ZN~b9HhiS(%X$nhgZ*%xk1I--&I-W-Q!d z8*dhpdeNtBZBM@aLm&9__}{<(`|;>~ACKL0pN^LLW}*qU@^6ctMfnyU>bDeg@yQ@| zY8vkB#&uUL#}~Zx8FBKF2V=CouKUUtxd1>^=wI&2#tRFKTv^}@*p88doo?>nlVfPw z2zKq}mwC}Y{UvyG@PxMX;t6=&r5)oNXzxq}Gu{#w_W{_ratkK{$LAwpd^z#8kaXcR*ZFK z)Q&&6?cq43dY79mwas2!eP}J7cJ0v^ZtsNLi{g-s@#m%fM0p`!w%1eJFy6qbgvS60Q09;EhbLuCS4$F?_ThW)! zc-drOWi4jgit|Jl3o9>ftjEv)@~?V4&F++lmUhLYGWFE?D0FkfjW@?aXEomPi@y|C zU2|jnk~Ws_L6pKcsP+{Ez9g-bITDsj(Y6!&Xurl96~Kj{zT0+PKdF zeB`+Q0(2k7C~xTjlqvbg=LlLqJNn5uCuK@mpwu;nyuRG0^T6OBWL@~zG3}27B^)sC zZWrR^F}Pj+V7-sUS$WGm9xre%-wH1W@^jz56kKZ4v>(|f3&Urn+mT{^=-KIRMo+f8 zytF70jpFzJ=WoT1?mhD012G&hM=qVK9b{~!v6gVn_&ESpprdRKe`bBDK4=*C!?k}F zSu7-AAt~T7seo3VAYGK03^e)jOty7e5x_Zrfw3Pi{W1MA`EyjG`;u|WJmYoGc~Z14 z0NCqmTqN2QcM?CbtLaEQ@nQ`-naVrJZslhi>?Vva;DMFSy@TwGpBEro7Vc1^oRdn$v>GQO>4k+_qsKXjG;h4~k@^=wgcn^FgHM`32*o> z+C$a}?P`b4g_aYzXJ6y{I@JL;;cM|s{+gFL4DKT$-&Z-8cRR>D3Ccja| z00Z~P%PF!4ihsFm3;uB(bzCY)xv)erv3j@8tk*GN*WtFWVbksa^yZQW;&0`CEk24!o7K(#d421pt&KTnHK=e?rz80 zd^eu^^rO+*e>8gQ55?-zoEN3KZuQrUX9vn>{+Q>NJ}m?B$R{0%MR`*e8}>6i81_AJ zyNkY&C%%qeDqn!0y#1V+@uCSTlS{@VPhFGNPvV>uxv=6CJK zh|lY@pp+|;ql&6~d=fDc^&m}-)C(?bv--ol=BHdGkD<^BKE~0UL(l$zJ|=y+v*`dn ze6Zm5nf(Xeew#`?Z@gl@9&Jcu#sGAz73sxk4Q1#WfmU!E zm&2<8&?J$c{#sF4A84~3lzf12`J2}WP32vu1eKFK)SvbG0jMmDw@SSlZ7@{KJ0I)k zyYb%@tTJ5T0W2$jd0UZkyV8>1Y*f%Xl5#Mjd^j-cH_3ZD15eabHHPTHjOsTjs+I>Z zHY6LVidkmlP5D}`&MXHgv~*Miyll_P^c65gd`T>v|)>#%>Wd6KG7J>`iDAyT)4Ei&% zvDG#Gi|tk{cIM+nFL<`(xugJSGgrybOqe{fAhN8QijVV+N#mk{z!%M`+7YyJkFDAP z8p(7+z+NfWS?Wl4yRop)jNkdAcgOqhxF>Epz7of;JmCj`Eb2ggyOIkvzzg-_fD&{Q zpd)l$*lX6AQ1aP6p#zEGb zF^~-A6!=SSBig@oM;`Ei4?O8tR3{S?I_5e255qK8;mCK(Jn5`F1hB|})vw@V zVwQY#4Bp~K9T#%tT@2EpG3!nFIAE3RurJ?NKJfDG!db~)r`88FsLN5_?AQ@6lk=P# zO8aI)g(K|&^7nRA&%5Z2C|N*90zMwF6IiBClP&B1E(2tRbCPK!YZj8B0l=`*|HT~+0S^RyM{=a&QC&fM}N0sC4+>0Io><_zOaOb>i$t~TdKTx^`zitC6%0Lij$6HAR zfqeGDLFC+malwD;DES+M>PtQ+Q2ERB^r`p~g9ng#o%j-JzehQ?C}r+~hmf;-=uc_Z z0s5_YQl=HgG?;&W*)K8RqZ+qq=gD%`XK8D-tVAn7E9IJc8?t|uC;nomA{GFQ(1rW} z?X2J5jSCmf#pdRA96Nj{p7M+*#M|Eb)`-PMJow?iiT&=ySYkWw9Sx{=`muF!o3DC# zE7wuCs{n8(?!S{A0Jfu{I%d=!X5>SATbl|_cca(cR6oQ}ItR$Ke{6K8o1a7h_%us% zp%F{VtI<-uO?;I4A>KS>z)yZYCt#iTbD<1&17v&WENu>nC15x5t#e%jq)P_QlGk3V z=uJLZ1!+%gO-g$nsZHQ7&T9bhsA7)I7)GJR^tev`X2 z*t9>c@Ly=D42}deSHA(r01P%{PpmMej92FBxY^x}i<`1L>4*t@s-c}X=>s&Nux}Or z0PQtTO1n0lqDeT3O83dV-km&sn}A$_0$(=H3&=36mREW!=|UA5Ws+cWW`=e;Cue97m=U;No^@xaN8 z@ma5YMO=OJt+9D&GY+jDj_51?Q~4DZ3T~f^Bg@VB%ojXO_Q&KQ@}-#(mx%}I^wRo; zSZFT9&-~1r;}3u5H^ivu6dz5tjcYg6_vd1ZDvRv@Brp0M+9iIT31OHmd8Pp?&y!5g zz6#;!y7aFVCU1sf1E4GV)xndI=z((f1qs#n136ZBJUB?x^hAw;@zA*L#{=q{!d>D| z_&r#x(1l!p;?IGxt#V886LQ2k*}fDX(+_j*30^5_v9qMKVf@JJe=xrFTfZgV^{4N4 zJi5sCon-+hV7v$XrsO1nO^@%$Gd%_*N^zQHCp_kZJk_6)3EH8rj$tw#`{5Zl)zg=F z%T}?%ITN+`CE^4xEFuI_E{rjks{;Qmh?19LKM?GFDes;>fW>aq^LqvDMwm$q4y; zwyLI^%LS5U3uys*q(9?&BpdKT0`cI7S2`k3>gLFO)4>89m2+9~P(N2xX?L1VsRF!Q zBk#;Z$e3H`q9e+X7ny^Pbn*gOF3W<4iR_;I2n*6Mul$Ubc%qM!(_kC|WPd*83)~5b zR{06i*o7ov#(ZM0#3$6}ZyXavL$~Npx`n&&n>^)P))lx9aKGBCayfmHFXau;AA5`L z+HRDm=AvZsErIZWTIa|H^QV0vAL932+5OemUK?-tkspcQ{EfF~VfjUGNnZVzaG*DM zr;Q+JVbjUww51-$e&aDPO&w6L4}#_OxF~8lf!oUEnUda=HH^aJq>hxQ09#O5;@7+` zd|KZ?m1pL!{7t)M-fkzKZ&p#%Ic zyX;|m8tEwy;~*hd9i%#L*Y)%~0>aDj4sv3b&V0DUoKCl=wy3sGHsojf5DnC2A6jgi z`8XP^+pWgqACWh;RDlZ;3}wosVZeb(lJ|V8Z;hH#h;tJK?jOD{CvCvxZ zgZ55)(VrLWZuMe^IZKU$kh5b`#`#V8c>VJ(XwZdjWH8W{v4iAiz9cE^L;Cd?xWrxN z-*a3!r$jpWc%jR2t@>8dcvF^Gn0wfF^35X3Jo<$d$!A}3Of%C_xT_s!eZ15px21ZZ z3;pqyFM1j6N+zPc<%JQbDccS7l1@Q(MxYP$A}j%PuoE4hF4~Ko#EU8mjo31~FCYuI zX+Zb!J1pG7#981=8*Q~#Rz5n&c@vpx3TjQm#{PI{ja!B#C|cUKRp0m;P@oQ~6Hy1^8~{Jq?vc+lXQc<_r}@WRh;7 zW1g?rzc$qF<)_G+?0+6U&MPnO`S<<#~Yd`uVNOh#z7w;vwz>+^^5;^2JbeBMhp~FVDh< z{G8>gw0ySd?52qy_rPmh@K*Fx?@#A7KKJt~K-_8ig#-VN_Y-fF1()N1y_wr*_8lvtiZieOBQ3B=OV^!-2Mpv??E5?o=8ouROB~gf}j($A@meGyd=o z|5sdj{3;vHc6TFIR}RO~xZ=uK-`t8%-E*IeDBlJKcw-lC zQG&(I#JH-?H&M;84VO;aow@$O2NPfbgN|mWt-#_~Y;W&)(x3ZGI_;{rV{-U@1z;b& zfa%i!n#sC`oC*UQzQ8?m{)CA>0UnQ&j^4W-*k5%0@VYDO~h zR>`Kvw`5V=OMAC&Ig##ACt?L^Ug*dZi+h~Mrf>kz67H1Hp0mV{jnU$;9u67(aYB@t(3pQHc(63@MaUzNbVi376|3mQV2ed-vEnRe^ z-6LzZVov)>$IU(PA`RP1nJ_|E4oLUqn3e8!rU3?d(L?P(@kX@g$-rd-DPQc-mlK)+ z)ei>eF^KrbX8^TRCC#P+cq%UBh7P%pj_QPRm8a{vtbQgF0j{wpc(eX> zO>YN5xfn=tQ9PvPV;poro`|IBDt~>elC8_<=UUNPSX3SjScR}oN*I|dE3M7O0lq>^ z32^HJHslFR1|~_YX%!LXO}FTua|@}K-@IZ#v8Whexg+|NFL9PS3e@X75}e`Z@v;fv zo53#$YWc~;(F7kyXra81)+4fHl8?90g-iXhP!)a^xEvc7Qgbewe|l}B|n)Np48J?*n;p9)ks1E4F8qVFNtpdO2hLaG9lxW zH2r~eIe`9QZ1?u#(t1C37|#%kYQU3iXA}z(D*Y^e!oCCEZBzKC^w}fHkX?+(YB>y7+|OS1(RY6}|7u&w{zUVg(9X)_^jC8+J; z6InRf9d=`&bB9Zv)tFs562q2kZRJEf;ps1ny(5R>Pv7;yxZ?U7;yIuBGVzKYM}COe z9QI{@dvR>J9mCDjar0G2<4M;Z4Vh82TN(5Etw-9`{9Zipzyr}$O#2OA_f@iEc1T(f z&3lgR=%1m}e!;RX6MyQzbHFF?y^IfSKRVX-NSq-t`Yi2HWzm)BGhXgsmrASGE`VPG zitXw|$yuJdSm*&v{7>dDrHnJjIZV6@M`;*!R|p260QnM%|B!BKN-*#fdO{b2_q2CoV@oK3|8eCEu4bEa&gFFU3z z3l;e!84}ys>8W6&OhE6)`EwUNK3rQ{Q@h}Sd+MA8!%TQC%7-m3FySe=$%Y?z_@UU* zc=dr#-xqIr%Uj~^dp<3Bs?{vb#|vNh{P^`>d21ZI`s%pr6L-q@ofTb*vn6jo*jK+h z><@)To&){i&m`Nvcv@Uqh=tCguBBhvihjdU*KrR0t3PWbJtCL+xdlHc=MB2F;f#|2 zj_}Tri67)q<0G!Aiyf_;F8K~6vyBan8F?ddrlGRgo}rJ01?1y1ChmXa$5<3Mlwak( z^Nibkwqeq+vB`M?o*bLOj}M01mNkp4;SYOpU)*lQ)@Ii@c?d||=()b}NK-wZ&KT#9_f@|jHCP0)Q5sE<8_tB&-jUY(=n)O-RGV!^Bl#C71EUDf9{bG@1jY3;$rQM<uNtse;SXtNfUpChpslKc`aSj*Rgo0d%fw z7KY3#b{R;fpZDs26#weq{$Om)wBwFD?~1MNuKLgw`)Duhp_W`=QiVL!h7b6+a92XXQIx!69hI^G%Mrf&3Sj zWAV_@=ndtkCA)={)mS@nG-frH?aCKpgn#_U?~1?t@ZY+Qw&K2)`p~|7=TNaG(xU(1 zr3T={i7VsA8*YvtedB+OrIkbRUElfL@!-jG@r-9Y!;4M(J;_0IFfk3lvU%W$A5;p* z3?+&m6rXkXu=;?lUISjlBc@r<;AE8pGLLRel{b|3Uh5^E$};7Cg<`MJU?~18^8?sw z(gWZPAdhsa=K&^5MVdAQ;Ut%8a# z{ViT4aaY=aT!*6gPi-49)`uhP1VF&Cytt(HGKjbR#;>VgzNk3jV~V@fwpETdJ>53c zpE%CZ^^hG3^{=Uzo)?A&iZAHHv3_<6@`E3Cn3COlhe4%>`X9i3o$1f<59f|A>Rguk zxPk1BerJ(5PBypG>xmxuC&e?&0b`fmQAur(Z}KC5-sE?jr?`swzFmzs+se$YWze@}#^NpOg9SHTFZ${%vcPmdR+m@o8^D7P#@7)~Kr16% z=-s9t5FX-s;vpVf5mVF0Fn%5E>^ffMSqk$LEMV~bS4(oSVedLl_96uFM_tgf&|Wr; z^NqH~6HUotMPp#eW&Nylg7=hfl|G5*Y$r9Y5-t8bg5vibb|zDd-{`QQf&L-o*I8JM z<0r0&cmD79#J~8qZ;j`F*5}32;iDp_=|xNId}Nza8TuFG4o$R+eZ{Q_vKrcqj?hg! zQeS$>WX9WQ9cxw^?beIb)c$CnbE324H>f=Q8f{Q47zfR5`a!s1>`xNxQw~7-8Or3n z(*6t7@h9o_0|lRbg8=_wUxS|_@RGh0;8#iF3;HMXT7|Y6q!ph8-L9Yh2)WS#`-b{& zij;hou^mkEL9)v!pXH+?dEEB7FFm_F^jp_iZ{?St(G~o}F`Q>yOx%I~+~R$|){lrS zt}!IunexCqucp z)2Vp%{F3#o1m-)0-!G-WqvfR^vH(r(rz9EbAJfKj0D}Vp9kU-|4$|cvfKJg%0Ee7q z@6_X%i*m9rS^ymI42OkJ*t&7Zx0om9%axS{>-b^(L#w%HpFJl(o&Kql^htj5(5h=Y zmOH&TydrxVJ`%UyG#AI0HO}gu@+Tzov!#;lp6ri$#0O*3{b1Lc?EeXY#I<}xj?a)^ z-7U&-C4D8jQNB5i-(j~d0LWc08K0{UN%h#jGVY`Y?}xNu0*utx_b&x`KrXQi|6B zL-T_0}MpCwt_?kxOkIzq>diCncP7MJeVMj&X1LW zht`hy`s{_XUg3gFsGkYjvuDo7hd=T`1uzS7TgBUPD65Ml5q?rXv8>Wm;kyR4_pMuCMUnMUUwYkkA_h4_)k-^QDfFA4uN3so(E6G zTmR@U;)az*-15|01V$^=$%{B;2U^;MWY2Yih;G1Q~Z5WG87=&bot zuL&trmr*_kYHCZY3_ZWq<FGVTZ|qhIv~sUKU2Kd&WI#$2I zj_=`K4O|I~Bv03+bdJ{-FYMn23_DHu%fN8QwHrO^P;zuD}Dj` z=mQCeuuS)3G9-3npj2mZMR-ae<~~Igi6HRcZz0F_AaGr}UsnuIF92}<(w>4<>g$7%@t7-?T3Q7l}V~4HyW%u)~roY3E@_2mZmw#z|@4x$Z5xuRr z`)~g$qOZW2AY1aDA1b~WDwr0ui8s`bXmlNm!ZYmlV!&JT7ca%8;)uSH!{Vy55q zZUJ+WPq&+^@v&n9VIJ%dv_dwYzCd@YhnGD-?`XiEfoAq==ovpclHoHrLB3TcsfQv5 z+AK2B&p|hMSpcB6j!$FD#rnAntl0|#lpVK?)pV8K)BV0^xbNWy7O^1BQ9;M`x8Kl<<)h_=gYpVe(KuMOHSgp3orH3Yjn}f!;{Gq|@>oT7kbVxHl_uY`ytJhB zVZ8aL|5JSRSH33x;xFFM(i5k7g1{Z<<#gwD?URW!gn7)zm^}GCfV!y@w_bN&u&j+& zdUHQ7tjbSZz{E0r%t$daZEkk9<2J{5jhJ|Az3Yd`+wzlV&tH^JIPXuq@pc<;gRyWF zzvZ{vWqeFDGr`=~IE;sC`PP)$H*|@N`6k=KGLUB>!J@ZuU0GR*!)r_N>}Nky_ZQ;W z6(|(y^d#-uf9Ln?uRs2gJG{dNZ+|vtb26BT zpqx0)9Soosy)k9L=N`DnganiLEG8fHH%upf)1OF^KIzx(mzeAof7nYya;&zLZH9Kn zIb%Ng$r0C#XIinfxvlyJam5u^i0}D0cKm3pEU(7tGpD=*4L}*ot07;sy(ztGoVl{- zH<=lvutN^?JI1K--q^UXC1eOq#hz_vJmlj6DvOUwyA>|)aOK!sx)T57&4mlHFR79K zkjZ4qr*FqtOFXEyJOR$rXJnj``JAUN*eUJZ4+qIdHUqLGZ)qp?rlL#joaVqhN9s@Z zr32wC|`dL;>Bf4=7OOT+$BsV?pi1sqr9` zaU zueQ>-;`;d2|Nfh?eEgcY^Pc+^7s?)c>fJ;R3xjBz_!Hs?$}qo_Hp)XVY@z9gTP%$1 zDhBLs?8Jk2ek?xn{&&Xu$xp{{dp$O{@LN3apYvywTB2nRKO{fjY^n_?HlVpY<@&XF z>9e02qwac~IlbYyr71qVcwBjBWnaAE>Q4iRzHDOI{&T#oNKXEYw~gmzKP(cUpUL)? z54hJO-EF zVtalbD6v49mwWhx5^*mqv*w`6R<@r`VdHBrd`g}Xy^~rN_S~Tnk|6*Hy zNb-K>6Q3N%I-S_QaM5}Mcu>Ko8tRKI>PhsMwy*dT>ZC~XpkLaD(!>UR5M$WIN5%uh$!SCNQy!Fa9K=uZMXvn#M(?cb!~OEYO0-rFD^DYKq!Y$shvkE}`#tg7 zjEnNOzwsOYBewk3H*-;Z$F{BhNd8l4UJTFD2h~NNbK!f z^IygtcitN>e({T?7ul=+sJl&HCo*$E4}9qe-Ni?yujU>w@!i?x;WMu-C%3y&-Y_}l z{obXDR%^?Tk)OL=>}t`~{Z z&tjXu_3Lkqt*uM($vf{*dyyYkF}7~EtTZmb&)~88jB^yf@UYskfn})nNqbSh&)h8X z@~&wrXF17!9e-5+tF#uX%v9!7rr%s=bhab8>{@oA!hj{4WhY*#4u`ZwTYw|(XT=-e_;YdPiQ}qsKejfutxS)PMHA&3j1R>>3!05bZO_ja1YXD> z+SoafMPT_LUFXQqYES=Hao%X; zB3nLxhff}fUhg<5&$+a`7>k{DJo3mX@3@RUXVov$^$=UK0DyTkWU5X@<>*79l^Aw5 zmR6Si(&nGM`@Qkn@A#JZjF){utno=*#q~TWM85WS!b3Y#QOm~ljp@>;=j!8sT^xGuXcqy3%kq={kT#nPe z$K_RB@>|C3wVpDcH|@W(e5Iq}DSnRlk~HBrzRR}YulXs?d2y-A3!TzOeSsB!Zv7X& zQaOf-e;Pq)VX8qoPNMEQPG zFwdX>hwkZUf6YR0$r1lV44U-7llq?&v(8gi)XB38^1&_Rcj6`QSZVyspQwD9&moC? z^JV;*vM;*RP0!0HE%`}()%5Hy(V2Xs@Nm7y0ss+Z-X$L|_@d40oVXZX)i!RQ*}wk} zZj|V(z zICSXFuKSRLasehv3+w^3C*oB@k%$PQz8gZ-R(p1SAX)Z z_=z9=k@(zKe!dJ%{7Znm!L+rl!BS5~$+vG6_<8`fvE`1MZGf0KVugnd#2tlrr$Z3k zP(2a@R+{QTL88FQ!5CG-0~p>!hn^jE+zjH{?T)|QxUsz@oUo-q3vb$~1MhC_#J%_5 zr$N`bIQIW%?M=WmyQ+HcRr5S_SI_BmlFmRT2oeH%09in$zY!3T83ZD;h=773f*_!x zC?KeyB1AxtNtBC-f=qf5uUezf~Q$e$V$j-|Bwr zoU@0u*Is+AwbvfbKHJ75#B9ds@mp>HbQJrZ!kc&ci7p#?wV`OMg2V!3HMG!t3Q&*2 zdjMx62iKw>UgE^4F6`=^o}Tff&|qIjoVImSTzcunauB*asO%Mu&Tl@5A1V{x+g2#d zvVc14zCzw7jlLI+X}}W*gO`>&CV=G>F%!<)iyd-iCZ^`&gP;H3v1hO=9(3_}aqPsT z;@NrBg`JeskVewVAw0CrM)+@^H}Hh;>d@EcY9QfzhNq!`RvcEsm>fSjv*--~>;z}B zZu|Ca;^n;JrebbJ4!GK}uTPzw7{n2eOg@5#oso$@%WM|Dz(<;gIM++U&b@!gfS%1a z(Pu|S?SP1wANo-%oMI*rAe*0T2HA4ZhW^uLO6WL#GGJ~80VJ(+rr_hMbO7;HBMF1v zq+GNZ`^Ma-f;(}g2S=s_KZccH)rU0tC1g19`INNOzbU)A6dJHTtqEr$1eKD^i}lD( zLo9P`)dEhFC9o`&<@3w1tlP$CF}Ec;)Q4O28*5wBBo(!cLPrUtY;N&6H$w6xfw*eM zX}40HQ=sy#!Vlwn#aDS$f$+jAha+Zom>mXRxk2W5PN?kF z>E7`!7fViLy5PL6lToU1|=C&y7`wX0H0 zh_Q-|W0z@o`$%E@m$Sp00O0o9nJRo~Q3GKWbKuY+^>6dh(?6n8=^s=+e33z_^qXI& zK-y6pX6nIRwt1$f?g?qzQ_pJ{sdC`JCvsTu=WY4Xfu7hrgkH)nt4>aD<3tGs)KP&N z`rLPkKTw9=7+=8)WO?cMCax?WUKmOl@WjWY3Q?ERGN|{C{f7E^jr3Xcr`*Aj;k*tF z$Jf8{U$Oi2UGe5Ozd4@y{O82diQ{qjjvHcec3O_J2I<1v)?ALJ{1q0qS7&FU3mK4a zFwmdJ!`k1`K%Cdl*ED#aJ9J+hz4MNkJ~m<6_@@8j{6es^AL-&-;51mL99|aYsn_6O zzx@;@4XDS;lG2HPp0rlEY_y?Y!I8!`;D=D>Gw08@ ztS;r73Y}^zHbn5Y+6qTAHYHIsxnwqdrj#=l#!`R8S6$h(0?-?f3^|1#J~C7BKJf`Z zMKlvn-^d9@Y+PMv$iEXSr_|@qEwG7!H}$c7ozbJVqts>TDNcIwMqw%s`{p?YrKLsk z9<3C8`pY%9J|>V`-nB!-ImPQ9qaiw(IQL*o?Z$E8(9TO-t80=6rEaj9R6Zu{fsA`& zp_+aIc@P!Zh62;M4t1h~wsiU6SGF}Tz)4Q@!_LrdY03y&2R5oX zudV=Pnnv}16^z%i3!Pa{B`CCGhZ!{MUTLuJ`l~R>hQi?w-;1LjmTgho6mQ_urp;Rv zxe`~r_s`<_&wZ{>0N|4V?sIsf0w18%i+h_;!ipUw+;1W4(G%rd;XP%qNluiezQN~y z0A!_&>qam5LAZ}NRo33VzPSIs`{M8Z_7mPXa_2q!cWIFZ_v{|jpapHFaDtE zOm63qvEX3hfrV)LR^Il$`<{a_H8~z{{>|Tt6XPeHe!YGXXYV;J9{R9{#z#N#*PZ~r z@%o!wUT;5}7&Kl|L)g|Lw(Sk`6JLowfs8n`di{(zWP;faJQYOeRS;~=#Eu6 zEpv_8+(_*S8QCf(kk#gV+ZEs#MEVJ2hzVsT+W{sas293?{N%AXcwqq=4R)ST<7WS>5D$~VRMa{SXTeGqOtPeCXTA^5zYM)BT1|1XXA!t z(cX=2C1}uQi>WI=+O_I0C!#P3#U~W}c?RX0pIeG)*`w*niP*k%tA>r7L?9U#uh>As zCkx;S_Kk4rgT5qg_?g%GT%j%i+kZj$2y*=^ussZSKI`n8+@02 zS#qsLQ=;@C@mc&v{@5>j;YL6b- z-Y(-{B}VF76sL_GxUn8I5{M~)!?+`s~ z7yxLS&h~}qcHf)r;!_}Xr7Nns!85+1dB>;j%p)R18;kTF2&yYz#y`w{8g=U;on+2) zZpgpdgBNuyd-y1{DfEelqr=0oBAdl*!oqwb8tTV6+^&~CN?px`F zkr8B6c9}jo>yB(CU+7N>vu~tlHq!d@kdi;faxfh^;J!oka30xTUSjI#ABxd!r-?Q4 zr{(*1_4ZkYSp4snz5Um3d?UWU?|Rp-OMaU1@us!L4}8)STx@Lb?Hy1*vl{20e}0^` z=d5_uuf4%)^;>WH?YQaod*Y{_`1lwd9gF*qoX~hTb-`mswM8B0!W5ws_>bu)T?sS~ zD6gfjv_0{*DL}oFkNcE^7J6id{LCL4$PfRj{i* z)P=GOz2#bg{>lCj^AE)?I2^e)zHEQf*@E?ZG{AAp-(h1ABJ-g7f7l%^3KQ>=S9&aV z*t|$JiPesbPo0xKNiOX(dsSJwC;Pw&aU;Y1YWMc|^v6CPGxKwC`>i)f*BbI$ByX-i zcr~6dL(0o-;ytIH;Rmz8fhFN&ThpWR`7{#!I&Xq9r|CB=mDc@_eA#M0sb5F&6BtX{ zXOXNg&aKMkNj^D3Oe8c{c)MSyrQ@^$<2mS{zRX8}i}@Ee`e9RfQ=NFY!={yvwqA|9 z=Ovd5@%4ZE_jt?izEv@T4RX^hTB!>%%Nykj%NiqV?8qraEWmq#S@qxqjnlU8iGTe3 z=i~G}d*bdp?+H$WV!?+EQ7o9RG}-VjUW-qzr~C)HDCnoo_%fUT)!#qp#e6okB6i%< z&$9{CC&|stnJ#pF{KQH7SW-#zNtG9F@k@LI--#0^nod#|csS=_{Kq%H6R&#tOXA^=etHazjj8EKoc*3({wT`K z$toOGZhr+`#MYo&_R-dD2DE!oAA7>M#VVD4z2z01w*IHgZrlLS!dW0~L_w@vJk!6q z&27^YjK^uwuVUj2*s1k&HQwXDYVS-x_X;jA8hH^};FHxPYkq-%XVGF4YfS($ed2=N z46pQ5d(|3m3NLZ36QN68Nk@A7ixp(|Z2CywTA(iF$eVC_>5uEG@=tjdPzARI_wZY2 zTKIoR`7TG9$e*|{r z(|rK6)i%|$!q)I_l$4h+XkU+`Rc;H9_@8v}T>3C>#V^AH`MV(>(uO^@nCQ-U=>PDO zDaYZ3p29!hDxK$q6bCrxJ#s_%oHcW=GE$VM#MlSh zh&I_oc~D*LGqOihG1zu8&fK*Wdv^$TbKEBYxX(miMMFb=o;Ng5W}C(@$c&gqjoHv& zWf%JDnhx_VowD_m=TD0&Z)>hWa^MqUq>pScw_X~#d{`lM|D&2y7t9D_aePAZauT&c z5vj*)XuwY^C&wurI_S+z_+a7#_?g$>69dQvc#voE=iD2#6P|6f(GLckPCuTTj6sc0 z`QVS_fVop_ve!QlCV_c^46t5qbhc+#wXNRsl`F-Z%TPS0m7yiR76yVuTpf*6@Tzd^ zzyNsbYh!qYWTe2zMW}N4(FD+WIHwFqDPM-m?J9>Sd0xtIz0sS`HdLlz zogR}=3=0b?bhaFoW5#ul#LS7w`1oJEFB)_6KJIceYzqqIsLmer!*6&1(D^ZgT)@Ua1)D`KlhAtQ7OyF30S9uB z=}H;G(3J480e}+#pntHhD|T()91pwXf*8{M(!y-?hu6UqFi{H3lfDE@!XDblI}2umAqZ& zx095flQHUGlC)loQI(Mw4?&fOd*br*9P9kc`eHCscb!&p=<;h`J=b(L>c|-#aUm*~ z)5!~7;a4!}DI<$PQfg5#B^@>uD~)_8KTT}Ckc@djih($F5x*F(=+=2Ne_joc zNEkT86X-CXDI>`@CFV>kQ_@iBt@j=uDHCC)rKB?v$O+{r%|<)`&(Kd=@>QenyaK{N zSD$MfS5TE;x1X=nL4iqkrq^HJ0~_;@q?NxU;dx_NstS7gz0Q{swE zs&r&}1sB}_Ovfrm=t_?n^eP9R8X=xdUkGC-Mo!>w?SfD8O?LK0k!MBxa}rfO@#X|M z@(!Wx@!7Bz10R1*^CNVf3>FE0RC8U3Jts;xch44q4Kzu=;8-6ESX6Y_0He zOsO}d$p$V`7qx9i?|@ou&7T73>gtWQo{qTk>V0w6*?Z!z|N5`vf(tHE{pAl#9EoH1 z9*ABIrn|_yv=|NbxvL8cu`o3e?c#fn%3u(UGs;^#=9kFF&l!-<(8rch+4!X?jz64> zsfn4Go12a4>A8Fpw6S1^8-I=OTqO_Mqr0ao`s70~fMFqsNnd;;gTml1(6o`IzHAs& z8X1a&+%Jd@#C(2I8^@pB0zt zclh|p`1W^ijPKt*A030)2k>@=P}2jcKW&R%Ca6Ucc;_3!DcP%NhW%f0QM&&W1tiK#$DpL z2Nz)d7T`PNT`sKFv{_63Nk7eHCSs)H=&@vrZ)bBfwB;I`wZ>{`lQ;zGtF;a4@=Q z7-T>rC?+XAuz_}%B0wJ4;8Eq0D|RghccfwRJ16q1Pk{{L!+WPX11{lWlFp|#XwX`D z0HC8h1Jy^7y+=TUZ&n*_zv?_}meyCS@czFoG_5!fNsl?m#a z5j#j;u97oD8SfMnYW|dh^}b8=3NQSpENFLqKubNFNjr6E`47s=*aDlcOo>l<_!g>+ zM{M1?)#IQKy#I>$nP*=f-}=^nMN_tfH#gfmD_tr;fNz%ah|PmQ?ja@ZvFqXREN4yj zn6&1FXmY(Eqj04{-O#|X&rDJd3=PLEH{BdR_wz3Kz}^(jTj^W2Z1LOszx`Wp zj#s?m6>;mWw;Lze80+MwI6b+2bk#kJR`}B_r{OO{7j(Cd=hF}QGVREyH2&z3`(tWq zDHi9Zl^9(^vtoin@2ar*w)Q)-nr+-*=L>=XP$j#%+0RE;%YN`J61grzpSy? zocxw0jgP!oq9)6U>oT5G{oz0QPXE;cJ)Z(CJ{4a~bf$fJAYW^R^4T`4pe2e8tfa7S zrwT&@ayT$H9Jk$iSG@FvKcjwclVr3*GBFr^{X@a0GvK%VKm!Q)Qe$Z-)$D-ywx>19LpaBALlT3Yog zz`c1`!#ojN>w+zWM(i9G5}qoy(7<9AJ{pjAO8%TrSXy)Yb`D7nmErVrE{@mz);nVB z&fW4`7URT;3E7<`@jA0h`dIA0 z_1c({A9e5kJ7a$8i0Oz}Hk~Zqr)6(Qa@XWfYRsWJ_K0+U{YiCV6C#@exYDH$^mW@O z?U(NL_H@UPo|!-ERh~ZeOT7y36wTs>nrU98sNa=b(QkE1Z?Q!Hb9AtSxe0X7j)lsg z|7RYGIQ&oSD|Kwz*Hk{qATkXfZHL`9*ldCNVPu9)ifn?3wM9l`URYz1A9*wX)GpK? zyqT{v<{C<==PAiaATHm^+&iHzSAy>SK2Y;LL zu{i3Td<*i^c{!=8XD~*0oA_3W_&4E0I%u^wMdoADsA1~S@d4XWpP@#>xKdN zgPtJ&_NSQtFut^bSw6a;F!I@^CNC(54QkkU!1S-yxoF9tc!(@|eoMIgU1OnSp?Xz1 zuw{&sMLOeCANy!bPEW-C+i#Pv&U}Y3OG%TJk~_xD(l_g$gc^LD1i;(|;}F`(V+q+Z z)Dk*8KCnLL`~>`k2GgOsVh6E*$TBb9v2lP?e;99%XgtwaV$a?lWWSwoS5a z9nml?IhQ{m8_}zB*mw$$rY0t1b|E)m@#Z>j%43)R;)5TJm%QX9aqoe9J~;^_NM+rGnZzArZB{I)*% zD-Y`&tnI$+Rd0Vs{NdaGIR4i^e^I>ZlOLTY1zHwmxqD@!&pGF;c-qsR5kLLYPglEl z=f?I0(WJIxF&>zinDxeq{6vZZL|a3+_*5I?7i={$#Y+LaB+$itHHKPked74Iq@g3e z{%>E8kN(w1VtBY;wxuWD^v2(br#$5;ai{#|xoNeN_|wywelc=jy-+(~)7=jk;1UG% z$yXoIZ&Q5zzrG#6`ih^Ahd%l#F|=t+rb2v^oaO0p>pbVLH&*e3Hb2w-H!hVNrxi0B1fm;b3UgetniQ3PSO%$Jdz_t)0!99ru(46<^ z><@vuE+;%$X3f8@Px+kJT(=@kK9^|cPsl5H-~dlt`DCMQlI;tC|Ee+=PuaIg|AX>a z@T}!kKnIh-wf(XjHYBiMYI{cgRjzphZgf?&QYOArre`)ySNsLVF@FpwtLnTy2XpeR#ea{z^3yLo^XWOSJ7WKpAAhgAlg(ZWv9>%F zeZ7;>-*zI-*uE4O?;TM;bTZnTv-Xo=lRQM(n^)4Wz&s?LnjO;`C5m-DbH zOFkSrNe!ee8Sl1aZZgN<*jL}nh7q5ig%*!GBumgQda>8k3t3rdx_9=cI7tszsb{C? z>J-nIo6ml(&?DaPvJe1mr3I<(z)87KcvdbQkt{z! z!iol#atJmttLms>90KdarTngJxE5U?M{sc9ow*2X8@te8tZq%J!c}&CGswo<<;Y1^ z*yJY>7$3EO+xjKx4qq++gW=~cL&cTH^c45f(BVkf~#@dDKjW54l_IG zRUie41{zximWC#$5_r;BWkN#*Iy4++?-;1Y|!M%mgj7xqXcyxLlOaLhzj$-DA$KVnl z3*F>(e%*Tm0AOWf1Hf|3%*=}RW(*B<#2Guc#w8bD;6War0KiEQ36{Y;(jQx#_-G3+ z_|W#Cgk*)27K0452H{FWoHsEb{Lm<40DtJz*knOhfMc|@+!mks`>#b`e|wyL-rks- z=h#M-Du;la^@d*Iq5{)R zBB?JJN%)V^ws96u{OwnqE#>i`6T>Pz4A#&Cj*PNQO1A4{x($qYjv+UG)!AIqmA62> z6JG&zu`71K6eS~0((HUwmiR8caV4v4gj0_tUoft8$SwniNgY`7pz>J*c<^|VOKn@s z5piSzsGG7ljD-XMoSew8w8%Ytv5(+9R+9o2Lq$*&KO0W{$b)R#Ve~T*MbDIwtHaPV zC1`b+<=4@j6LOw}cDkf5Y0dhP7o1Gyi4DH;>?6Dcq%{u=DSwG0d7AP~nk=6%?s2fO zt)!zfs&oR}SDZd|LLItkE)Exf!&V&;f+z<$bQ?*&d=+40S#a>Z@|YeL$VEO5TGE3h z6o7||X@v%7DmFvyuYLi0)xv}0A{=#bOPEwx@)4faXVRYUIE-gD9bR_!TqUv zsEPx+Y0`9#lTM{{y|A~Wt1<^uJh)ew>nmJ{GM{H*AU`v`5Z}M~Rt=ojVsOhg@uAP@ zgk8K(yG&h?qm58)F2dt@!h{<-P{5Y|y1@YkFZt5JP8^&@z^N-5sm91qZUBfC4LYg6 zWiJ^B&nndR0AJ;A(p+fuHF*j>+}HX8`J8+r%xOgn4q~_bA5N}WZZ1UwdR0L0(2z>+ zj1!a7F*de6?mKcgZohkfTyoi^@kj4^XFT#TKk0JTrjNx619=v&8Zu<+gPRM}F~f1! zxA**={v;Wy1)cOoiDTNC_}%QeX;#}MYmffe~6 z%YO3#I}RU~W~Sok@d+>dV=MUXlwT{C&xM^r=RC*-o$_0TkT1y%{J<}+eg}4+q+W=G z-{OxKrdQNwB6r{r)zFA29Gjhu1tB~>HyZ~}PN+7`c=(TABD1s>pa07Bai`icHJhpH zHc9iEC$nV@)nBBPfYb6LfLbU`YAp&X*JSwWu)L%hc>o|{gmWdDU*Y5_Kp5<4f#eyH zQf(cFXaMw&q+^2s@lK=XPCrYI^%wp&Ww#JP%JrK#wvlXdVy82<8=f&(04p{p^|4cf zG8Pq;Jru=E9g;$nlqE7*xY_eHih+YlY`Ah{92Ll5GYX8&;AEhZ7 zI(X0i*mvF48jLr6I)dp}1#@S&>P=llEqUy#00@!*Uk0#URUEQLUfT)fWxPTQ^GqFk z_!fItx8x2vOZ(R)TVC}R#!{yHDe+ulC?G%FQnYQh{3r3&8&p(YPOfHK0hSBp-)Msp z^134b1z3MWO!iH>lbg4IdQH5%*|Bk4R40~ml0?t+*L9eczwPi=O|y_|O0R58E5~%?1GT%W_A# zrR?&Y18l{M7NpEc6h?8b*%}jQC66E%q9vc^)e^DHrI2qS| z@0!?u&uy_^XGi?GcU7oh{X&xxh=$VpZ}crvp;@c+_nE6nC@~JFTx*B7N}73 zM9;Xk$SM9K3v;~j!KN(qlg-}fGz&-2ym|9z3=Z_i)z@4fQ?hlx^r~OUN*eGH#2;0h zh3T%&)wuA&i{r~*{1Oga96o$soIH8lG%^vxmD353_g=mitMv5aOl0zwT3dT>OpG6k z>#n^ne*HIovy_mw{D(u)!B%^%x7&XYMXuE@-S{<|bWh4Jx^elo7V>BeZxv#C5~ zi+Zt`b9-WTK$)l2wZlB7=#F9VL zqA?Jg324XsPH(p957ZyslMehKlrZ_vE&s)^p5Hiq5?jg&KfvV_e~ci;0rqb?dSXE1 zM7~}8s$c$EA0R41+#ivb> zPaz!ysvnl$G}SYY1#iWNjNxD9Rplz{M%z=ta}tI27A>u7X)g>7cFjFAQ0Xk`ioG|G zSNwk<$og1!J(mQnFj9njJfJb!lIq4*jlqFY>80%1P+vUt>CcNNJ>%KY-P0R$a|>}| zd|Gi`{&q79J&W@*F{5y7N>et9AM;A~)zx0SDB|TdHf@Sm{atHu^6;S;KYCx>ar3^I zIv|_Rg5gSCAkw%!f`4$m#SP^n1uV!-0_`!N zG(*^M#dNkUN1yEQknC|LC+QpVU-3^DWqat@S#U;N`h=Y=W9#ycTNA*!#p0#yn>>L~oS%_FxTYw|ISM@!*tjf*MF@RN~mU?Ku?jgN=C93ppYWc-#cU5w(W6-`o>LLN29N|Kh8Ys zjClBuJWT!5nCDG<*%h(@4jq||=fCjh#GLNv8Sa+JJdD(eC!}f)w?8K)>&W zBA#h$hZv|kz=DQ(RG!J_`(iKZJSAa?wjv=9m%?LCuyvV*_M+d6rPHBG{Ki8YdA@st zjVdp&tuMgT@-x2rpRN2ZT=Av64!;0F8hircpeyr|Z+-mAh-E`ut#6%=*K_Ms+!hTR z%cQT$XWK5o;mHSN4BrONY@WV%Nq(Qkp)7=d_=@+%?9_DJbN6kYKf`8KeW(Aozb$?@ zWoy92{1QG4iyip2obHA1L4V$01Bm)9=1i=IO7BfSqOU{p&k1F0&Z+WD{TCPJWD95G z-UIi?hd=h=7?M52zU?{Vtk}JKOwWTp^8E$ReU8h#?Y7%uVU97YbWb!TZ`f#ntZAIZ zJemNzxcl_Iv2WkLc>eRAZTd+2BM*B-ocEx;@#LpHGoJXkCwgpxZDj)ho0PBl{`ccQ z{_Wr6@BaR?F*SAEek2=HIVEFxX+EC$GtZ09|J`Tf?*02?{KUBHS<%=ENv4u0WjyLQB;u~GXYQxns1?s*r-LodEmW3)(r}KYh=;OvgI>p#L_uAHXFWb2*)kNtx#8KGP{cKcLdRULIlo zOSvKwKCRPM+ZK&2I0cnnu9eP;hjyvqTw7_1y{XT<0L*=hcHvLhkcX#=!**;P>MTv= z59K?p>bx;8cG>uCw;h`CWdmG->Z^1vM^Ug1GwyiR$N9y_^?3n)HlePZ3OhsViF`%Joe{!cc<8y1*P(6i^j zR6jmp#m0Z)U0?B-Q!$yR%nbmtrK%JDuwN2FNA+7368LW##RG4MGVfuYY)@N{(khJg zUdtHm&N@%7y@3n+L>RWsw3MHi-nxfY>a9|p7dq)ju(&jh;|0z~d9rbon|PE}^cfV^ z-qywm0RMWWl3Z4gFiS_nZEUa%6~T2wvwCAdsPyO|3RlJZ2B(rI7*7CknHE6ME4Yjm z((4AnM@caBrBRJWNX)N75W@z*6V+CPMMs&b&<_rU*9kPjYh6T<8KL%)0FqqkSb$h< zs)73A`&WH8{_4XYjQR2a$^EmTr8BLFO@()jG4Vn+!_`*Q^AggVxfQ?tg4;C%nOTjIvM4#!!i zZHvCafqcVFv;g4t5X?qIxXmL0itYA=z9LMz&o^)hS3JDpwu187$P}q!lFV`fZaKX0 zgwGSq&CFR4hvkTzv3q+wNA{32>{axx|smKpj9Xt)ZjwY4{8E8xH}6uG z?O*~*S)JB+;ct~cXw|?Eunc2L+BIMWFEXM>M1V5ujyKBXvj@~QPu;+o&Rx^&^hYPt z=q7#~{d~^BGev`gP62X}L+e^;_gY5H1Jp1tlnXc>&&If<_m3#iLI8C;_uEJ+ILPYe>^<*;BL3tVIBO`vpO#I+&rTO_rTz&oRu^@39-m*P9dj>3Hl#ivL zo(AM-1Eig)b*J8io;n~YY)Z#j;mS{Zrl~xHpK?G^GC5D20I+35aVv`w7tvsr&@Q?u zdAMg2N`)$G6p;;;bc!Nh*-7i-OCHjuEGbawO^<-|xH+9`dAy)C4&MTOv~$wwo%4f3 zqp|OXTcTS7=yT868!vzPOXC?Yc!_Z=&rDkPNNzv@^=mff!p9-ADPTdr#RV^@a5BKm z zSS8yDH=t={DNi(V-Mr8!yNM$$`n=Imc+o#L@F9da*#K$bSMJ2sy>M~D0B@e4AIqE# zD_9Wz$vM@nuREq2OL52DcZmijn>ykf-@i9F4BRY0Zp0vL8pjN>Kf%ZOi!R11SO|-V zh} zwip=b^P>FF&_Hb6wIjCf*dD_pn_^(oCLgJIa(an_+G4GHFqWhzlk-c_*`JGZ9bGJ> zs!&xJ)YuEYAv~z|8Dz7Y=O{ZjwO!T!J`FVcdnL;q-F{2FNBt<1VAxyU!eJoF&we(x zyt~&ttacwgXJBf%S3QtJeza$PcT(|6TXop<_|2GXsJa2mb`-Io&A2J{*7vG0P$wEz z+ckc2U!-_F+i&5oPbh{%FE5M_y*`w;&T|Ssj@1a$QcOT7wds5`bGb) z;km~&%jFXpYw`n_)SQ|aj~lM~Ui{e~z1n zgMSi#`JS!Kl$kRn~!}uZoA`Fw`KOH zoC?%1ziBHLV3CpbvT(rL%udJRvLc|>wn}uOt}Jj(O`en)Ux}&lWAVygdad&Tz1{U2 z?@WZ|k*Y)LCw}zdm&Rwm@DHN<&Nz8eHeR~7B$_)!E4pC&D>Di|SJxI@zKlh)x9dZ`RrXYp8CwE#NU19b8*cz-$!a|bki3MHr}b3p05w?D&R+F9i_7uJzxk_p-g90UH(a;RJYiBBJ`bxe zrjE!9JOsbrb(R~U|`U4D5y`lp@{{ezq0_|&|7F!>Vw1LC!8x8#e@0pKHa%5F2}W8%&y z6IER-<#U0)tRr>J1lSb&V9dXWC&J`3tvAF9y>1JIGk)OcP2L<`Q@;+M@EhNZn4`v#a^8XxnJ;h| z;HOMOKGR-|yMS7EfVu+i8(dd;ljK!;!r%#47LI{#=^M)2EnNi-eK9>QI0x1LxGdr9 zWnxfvcyW0?#)d~@$M&s~$G+e*T5Yn+^x15#YwH^j(%v{ZKIP5%jYUon=#_oo7;E*J z(%re)1nn`C&cDL-(2UZbd$n3q3!{Lpzc znHwQKpZTHdtLm+kR$Z=GD!w4N>i5NCxjbqDrI8(Hi&$q@vHzA++Dx>F*Yw{a%{B;X z@x8ntfG+8`++=bWnWZ*}ani-Y-?H>>R5SCGM8ZqUJra)4lRF`h@$zXPpy+ z((PsW+@0b*3xnWACRU_x$h$XqK@;Vw9Lr#el^B%cMwrXyGUUVl3v#49$ZT#p%Xutt zS)QR;?eF>t4dXF3GvWxcIM2K*8?E?M1V{bj&oAJ6_790?Ju$j{XKWfBi#>by#Dx#J z#Pg5HIwz!X`pRHGpNayK=l>n>sf51nw%C7kGOqpp_3^vE`&Rjjy>Z6rr%6s#H~DSo z4mOnW20*`&xRDvPiGJo0`bL8ZQk<%6sh+yh+G$&>tmUs&!Q^v@KVYy*_R#ASdmj=c5T@dTeptI```1YF*`FA zci%3*Ms(0O%P^ZR-uy+E$dB&@Zp)5}L5B#VpJ(&gEHIZ#H2V`AjztBm?>sj}-9iJR=dg|;rDG7BF?U07%W_;py zTmjcZdWWe~?Xnu82l~B8NqX9c0Lm8_m!7Ey>Fee=XH_Adxd2})e%&7GDgXLe`~ZJ> z<^{4=9Wu~yr-0%~>-4S=$?^&$-gV|uRi`gW$W!yzejdnt3HF)vSlg@BzX%Cmb3G&mY53D*Ds|MW>jK~(sWTY>U{cirbISM!s6NvHWpI^i{{AL##7B4nZ%0;d(K zR+?7jkSp38zg9TqXE3i4UCeniPotVCX=hK)+tf|djNiS1OtuE!p7CH$-)ao?(yz_L zVDC)yteuE+cQxapy@Rp?Cu33msEmT};P`POsAQG4gB<3+3Ja=_pQiaN(GxMg=O9%Gpeh7KjopFSwgGe{OFW* zw%`&^GHg9kqG|MkP=&AofW=qPRb8(_=42R- zqIf{PTcdSs0BKYlk+RrE6L?Y+=YoWy$N1ARt8b{Oh-9t?pk_c3*edAIQitdWup229_G8(gY-xFW_+!x}u8*Wfzw5emez>UK|Yo52~R0iMv=gpZE z8YZxF2b7x6hQ#J7pzBG`~mI_IJr&ayvqbreDC{r$zkt{(XkOLM%_`1KxG+4s48I}Z*?xr@lB*=j0|S2s@|N-)CkM|1Tkq#j z<5}_h*g-WTG7V3fyeTDvZ+yamomUUdPoBz;(<&LKhTN9zit4bsXh97`fb>mkR`meebZn~ihqebF<&zg#QGn}O+LO10p@&HxXpW``aoH)y zicDO@!Kp$M6DX^8NYW;|Jsl>VwBf{OU%`Z)HvmvJ@<98!%@Pm%ukBHF+xWyA^O3Yg zccGAbYzU_-5Wl(%EyCHE^+!FCJ;*Yx@MTr)t}ypMFq%%2Izyx76p@8C^;ZgK0^jk% z%Z@-CD4yMUdvdbStaidl$Zr;B`L4HqOuX`KJ<>6!#sI+mFLi+zS*+tzSAg~U*RBq{ z9X=AD{xj*Clntj!1PtOMB+G&#@Isr+Y8vi0`^7 z^~p&pcJQNS;Bgxw)2b3z-jsumCDAj#xExn~|JImWYLAia+r^{&1~T~&C3FjbUmF2h z5>gqejO0Xmws&@2MK%llnGarPekK9x27uwguGlgtT9@$ylpeN`j6M^)Y1!6=W-fL= zq1q4u10aApldjPBK*%>lh(qROtJETVgREnv&${Jf^!o-Z0_aq?_|nziAK$z7+USt| z-Mrtt){e9Wa;u9P1n(9M$?L9X}F1=KRsSS`5-ZFx> ztQ0BU#le5%28SB~!RCwien$9LWTU{VA9SwBC!=g8Tq%QzAsqgubjynfY-+Gf%64^k zMgLHLEGq8U(TVuyfB9`D+bOn_~s@zvt{~QjwOd4!?rX?MbIO& za?N)oQ>407HKZE;B66y>XiUCh6QO`HXl*CC4dxlV1+v*Hcf)V{HN`J@?7lupg4rT0 zZ^2`oRGjT@B5@fNh%3pRKHI+pD2`_*;-Z|bI6+-N^TG%Pxo{%X`gD>TAWfM5h^Mxf zre`B1q*<3|VygU+8FY0cEgOG%bFi&U2$7aPQMm!2=2?%!R*(l< z;K>BZ7vWuO+K`?Sw%A;ZnaPQG_Rl=e`8YMJfzPP=S>D_j+<>3{Q+UKb`hCXhmN(fS zd@+uVPyKI?GxweqzxJBf##5jA)ELqDYX6;g$Nl%+6My@eFU04+@Q)&Q+21tnk=LlBzzS9#uDmTkkN!DlCd6j8>K>bn-M3=5f zTG~S7u-pSb`4f+i|NXVE#!a`}5>4^y#Q3;)(<3_NyUKrHv9;Q(R)1e)Hw&uQ688g; z;g)la{}Pm5`e?#|2jU~H+`B$2*miUsD~nIr7spPVjJN#G??p$?K>X~>f6Mp62Dhi22p6-~IK~0MZPx4Cn@CrFYTu@zkd@i4GPEp~>U8OOya~Z!dM?1F1$*EQ8!nFIu#Rb(0^iQjq#($qcJ-jtFtqrR(_j2D!2^a z<*n$dencT`T*RvJc_oC3zG+Q1&$eIxXb-eemZyMZ7Cwk6a2CG8XH2m94PSJj_ofu5 z(NDcX?IBwLpM6yZZuyyq1a7zjRwF>y+Hdm%C$n!=ergy{%jU;D<+;stPj&H2=stFg z49A&UPYZd<@>3P7K9boQIc9G61?e`Q7=j4KQSbx55n`w-3tsqf1~V`xpVK#1*MX&%XCUq73(I%9BfAbNYcVrIR4{1*T%yi_Hgl`OLd~I`60Mgj@DyqE$Ug{p9 zlReF`uyB;JYKhebCXJAj{z;qs7sf=juTu*DWn)Q&q#+^8&1c$`7blQ?j?0J6>PLB@ zpcduNPUM(4+Xu6Xy4Lfy#Pg9Pm)S`219?sejwHc_hw0 z>x_8MAN`^G{yT5E(elSe9Q&T^rdAe?dMAJSGRfNx_0v90!~U1BGEXk~UTNl$&^4UT zowBjd8FF77LTK-iO_Gm^9Cvm0#h3r-tMPAN`zPmXfvV8gCqJO4KR)xB z&%~o1{n)tns%!0AK@)RDEs@YO{x6GAiyAvm93PL(o3_UjpYgO+k$_ z{K&&Dj+xm;9654Cv`u(^l+Ob5!VvvF{M#v=xkdiLYhM5Axas;E)Lv{%!lHM@?lbnp zm;UjK@>}{{CZE!Re|$rqK9&sy9N9mn@e?w7|MA1(M_YX6Z~kZe^(Q}(#6A#ypgrZU zi|<=^?1|5R_8;QB3(t>x@4n0RW?`R~$$0C(&H=GTz#=(s<)3-_wt^wzo`Vzeb-orK z_{$H*{@d<|nHk>9M;4p0b@$$Q%bVUDPkHK3nf62C$BOv&ORxUb*!SJ)n6gV0`Oa--?fX=+C2PU^s>cITd0mp8oWw z$EW_94FK22^vPMTDR3$qn?a~gm2>2V`2g%6z(OON{x@yj5}USejjw$5U*q+^{E9gH z5kC=q!q2#sIY-2LS@j`3`koz(YFXu8xNWBa#?PvLJwX~>vPw#bXsepu&uYw#{$J}J zc)&B(-twn#>xVaTlOO-D2q+*mRkC&cg@5=M<|T2(J^`YqmcWK^&$|gf_*!WMq*=E{ z^;x>1Uv7#aZQ364TX$H4Q%d6ad5+TMXZ^D-cmTN(n&-rv6p_~aQFz)p0@w5ANp`}q z)|+9XzeO25(k&+6z_arFpl>Mre8L5r2JLQF%A`I?hZ*PRa?i!w&<}er{$?E&r(f-d zT%JsAH9cD+n?3w7Pn@=j#eh7W2ZRf{QFTe}x zt?>&qh*xS?p8%k-c#rHobKuND0G;BbCDCgV=(?cc=PHoOk&H>4c>;hMptTdSC`3(1MT!HqZXh8! z4U^yCQ9Qd18*}si_R&W__?L12!FyxRX}hDhn;nU&jT+vDv5-k^^4K;F8!$e?2YxrG z%gF|%-8hzFjaf|dHw6$HxhjdTeHy^CQNU%uyONiQS{#qb`G&unEUzdgCTHWC>u!(Z zCntSE0i81reUk}u(P7~g-sGFI@F0g0F$K61XTxIysv9LFt%pvu;|6~F?}iNk)PaRg z%HeGsoHusij}CRmgU&r8&f2qEilTu^gEuSHaZ6#4yEZCR87L~?S+*GzsotP2yv+Lz zvIfUX+i}m`nrKCSnoV*W_d>p0>%hEP4V@C{qo? z<^eC9Vq+^Gi!9OMxl9GyS<^kH5qZFI!vtVZbKzZO3a1yCb?@Wr)q$ah`8G!C4E3|@ zC_gmAd&=TIX@soA=gx=@mUv`ORYSL}H*{baaOe@TtP^!tGuojP-#T(<8@i89m@fT; z>vXK(ZAv$IgN1=ZeaprRzKV<0T)3lSG7b;!B|{2Jyz6`gc)Jl2h@$$?}k8CW`f>J z2S!7itDK}We>flps#R4PAC2s~XMWM}8hOhjulNsBl7 z>E7~W6hemlf)wgSB;~*Z_{4%E6ME*O+ME0?yMPx^$^-)P$9FteSKG_PFwTyeTDoNV zkwMX9nPX#scS=&8Qu+$dvkeUJOuVDt0oXBKdFf}B>Dd z!W-H|ZGlWf%Q}z2t8{D^QC~qDnr_O0S*2Wn$JU!FS)!oE7d|SO6CSWD*b8jmlK4G4 zzZ_TZyDb*vIP=y?I~yeA#CU^@t4hnSUn@Vdtb9(IvrfE$k!ebLFSG;Ht<>9fOF&*c z_*!0x(ZR0RJP4t3BBc-T2S|LnFZ{O7z&AnVX{9U58yt3p9G*7B3bz3~pmIERMEvn= zuW+yQnI?2ok|aDGu_#^)ZQ2q`vZHt0dpOQL_w;zhue>&%_RMF;!t7iOj*b~8pN!!x zT*|7)n43P4%3E8MV>wp24p+lbl@1ZBVYFv18Sla?CU?D?siCX#hJ1~+g}GRqoj2a8 znb}yJ&o?2Ncw~ZRi`wtZOCJ^T^kdD7si`+vmg)v>@r`52bz?d>DiU4AzH7SW~_$?)dZU#x{F&okl7g|Wx!+M4;I0V_KZHtWtXnWmPF_K8-)r!C#^pp|H1TqO4 z)y4f6byt3Lp0t$Ht#*md71=HI?Q6W9m9mBJhOdeJpq)tD-`ndr`ccZTEQ((o&riE^ z1RCG9HeUIVy?sM|E1n4u7V7%?hoif%HwFeqVt8mIMmg20e;|fNhoX0IAo_;~y_iRv z8fKl>@M7D2dRW_$Iz$Bd9kEl|0j5azr`d$=eX$TxE+3Jcbp>7|pX>+*s@? z?NWUdCKjj@t0msJN1K97HaAb$f`>x94)2U(8wy_89ipH(>Jugd>%M=7umS<<FL=(g;+{hjv17-W?1s=W*`Qw+zLID|uHYYjE{nLFV1=K3@BN2VfaFec z*6N$3lY7rTH-73#kBb+-^kwqVJK~*x^!xGUfBb^!G1CQ5+~405eSKrGRlfU{EnDKJ zpY)7)$tzzT_Z_-dwsOHXfX~OV!2ls;BDo@1xrQ7aFPxw|pn|+TSKO%m1 z#eE0widVeymz@vboB7m%3ErkRXi>k_*t&JA@^;35{l|aBrmaJ9!*w^tLPLE7t5f2+ z^+Gp)!)#CY(F(7ADbH!&_?9D|3|Y|lwcp0c}y<6Qn-z&%N5_MT~i4B)I%v7f|Q;%^#({9TX{-ehP?Qh z?ewL3COx=}L-oRH2usF04bNF$@>LV!v?>z%9FM;h$jZ*94306^eMxB=b>SG7Ay zA2+Eh{$zpCmUsd9z(NW3`$`lP*ORgAV+)aKl)NyW#`gp0I{f(nGO7$V7n)wt0v)DT z8SMku{*R9q6+dM9tvl!wFHo>SlCk8X^o}t~xBC30+V0A;DqPK_+z<#&=n8MX z*RohZkrkFct3OwN$|vvUW$(VzHPZqAGUOU8%y%1nhcQGel>>QU+qzb#+lS<*%ysWPez{4i)W zOKH~UlwZU#I%pYi_tDp7!kL#+heaEW17v!y{voV+}PV-)-%AvH(0~g9F!`3seH= zbZhF~kg|aM*r!7mML&H!xtt!oGYRlMY58GbIYcmIzbFMiDxs92D`}igM0t*_+&dl` zxGftfvrM4SS^_E!PRkV>2u;afPtdg=r>hs`4M|5Tier=)8q)4LiuP#Hv~KI_l>$zA zSr1!O?A@CD3}Mo<`ZP79`Oad3!sn(ZeOla>k^Xr2WtYagfA4Lvu(S~O+;*$lcS-#} zd{5hq<*J|F%C>LX?&K_3^6dWmj>Ws){Z1bNKYnsrHbzdG>V*BM^0yAPt?Ts%0Ur&Y zt+~VrLJ`}y?}(n>{`g=2{LiuXoO9yZ@9*=xB_~8-&zRGI4$HCpgw7sSRz81Yb}9bl z%l{|-x2S{9=>P_U+r977I=Jn@2|Do_h|&_rCYNc-hNdtlVm~?jD~`xK(`p>1RDNe*Lw- z7GvAC_%sCiu2;VDmGP}_e$$&SdNjU#^PAonuYT1l+V$gt$key?Ql z=&}3b4X=A++#vTPlVy$pf0QHqps3V`4FL49 zi)xGdIWTt{6nC(}g zp_W@!7I1$2GjvNjV^6{Qowz+GYeg)XY%hen>W|7e1tHJ20j5gNMgQnm;ECxVKJ%MK zJrhp6?XKyE4$4+rs442W&l^9FDSaL9CEM1^x2-IuIqR)3 z(^vxA`Mk4e+fY9Is_j_w0QpQh;VyOJM{#xBPv6=CrExdJt>`0dP5>mG#NfZ1Z=UCY zAC6A)b?-a^O98Z0xC7S(k54 z1!6p%^{nz%>ghg3*I=HIc*(KwkuQ0a@AH0}U=t4R!+gp?7W?Bj&!4Q&waxMM303Ml&(Kc8G|3u zR}`Ne`#85W?D8_lvo*|@zWyHHGZ5?U@AG7X9V)ffl7yLWXK_T_O0zR&MZ?|q9*=K- z|Hf#nE~-=CDS_+pq&d4&5Pl1v7(zASfq6vvpyZE$iSf2rk`f5Ft%>3k;zeSvmB$5~ z51#1?|CmT;GJbw;#^ta9;NlDS#%?(o?GnI64IshOqq;IMwU{VBiYhL-J!soBT%~-; zfF05*KN;+MAdM4r>L>nUfOv~*RdwZ<4U}klb|%_;hvQTK@UO9Xv^#e0-fg@bX-|to zJ9VfyQJ8`@K}HpW>Z{Tb=Y>wQqUI@ax@F>?%AvRp<0nI0k8cI&Bymb{=q9FTVrp^8 z1I!LZjBVcRPXLS`KOy;=a2*Evx?{`MVNY_g1Bti7yHy5l%7mef4|SF@d+{4aiN?Cn z1ZPlu8$H#}4wC>rRGKh0ZJjx>#hZm3!^@%_ySFf69Q%Yy&Mz#-d}GnaE`u9mh=Zrv zX~3B6gkewFKp!br88?(g$Cdn1u=tN7lkFfrk(P-7CR3_GYg-a63Ma2|vM{T>%83KT z9p%{82ye<1F~VGBc{O>IvZZv=CHO*l%AH0T+$zRp(xJ&%VJ|B_WzTeKw>01?3we<> zW?`kjk6pb=CwXm_P6MzF6HX60F!JbRF~qLRT7H|+JGV2q1@6eeqw*l#{1JHbL$IKn zv=QzTAgCMhDvwPux!Vg(R$e**^hsgng*l*F&|&F@2{f+W*=RcppR*0JT*^VFvn|_r z<6LY25BMCWQY~gCBjwLK)#>01?X88FqStiPiBAq3oil?Zk5ZUe^SDAm~HQVJBfEUUKJwLbsfIc~S zmh@FHa;0TckJ4xmKhobBn+6oVx|ECJr5rXsfuCpV4&gkjP7d=g4(*WpQqIq_%_z3AwU?R$2`d*1hfUWB zJLNW40FycBhh^7xM-D6WD2}7>ZTHYS*$bovy%vGSo4|eYQB2lj`V?)Hv9u7)#RWO{ zlHb8$B+Yn$^r!eS-3b&N{sB-6)aS}C!Z(y@D7YjmjyHY86&^0k$L#c!-*`AVemw5F z^FUm8-SvKJ%3zSsjcLkdr`nG2HWG-+uH_%YDk9!uB+`B1bKW)zo0Wcq8q%>M*G_WBfBbxB+!D5T(SKGk#vbVI$VV)oqbX9!4vK>PGG6viCW^& zBg)1mVe<;Cw8=m6Kp?^VC7$xlFWr({evT*nh2J9=$p~~=u84*nY$!DPnPQ7vmTQg6 z=k

D|LcT^!JP#Igz2oZ^bp1vyVXs*htsgBP!H3azEp)n{J8cJ?#n6H8kY6Mn81L z`{FszeP&#@Z=duAA4Wdf+PurJWkP?_mb9BLm09lr(q${?entG39Ri>Mju{UrOn1sf z(wMy9jWhNt;jf;4)>!=E+y5Xw_qi{|*w!({%O`CzN!@Rs7H)E461p)Wo#Au~CT}0} zs2`7SUwKu0@k^hxzm4v4yeVa$wddS;+)q6=e(q&2w@?0p7d|JJ8!VIyM3ZV0!$W=k zHaC-uJ9li2)6duy@4wpRUS5IAKyHwScHUyQVoJ}{ zE*x<>G&~R^()ru&J{XI$Q}N7aJ~PWMz=A$+^DptXfcO9};mw(6o)vpe-xZ(z>_6~j zmbgvrx6F~ZsvG?*dPSKO?Kd^dQ{*vi9W+0jtn zM7uC{^+x2CoB(1xKsm@3S8ujaTM4trID`S{0sLlsf!*%V_=B?ewA1C!|C#v6M?W4n z+;np+$+puM;xjREjt#r_-g~_v`Uy{XqE*Q!jL{c}c7JY4_ms(|OY9HrWWUXtoBCis z1sRfnGqKaQ+HG2pk44Eilk?~9Jv)B!6)%mSx%{~vlOMSE9@(~}8C|E2;IyA7+vIwR zhU`Zw6#2H|KLDf;t^m^RzAgEZ_5gd5dJTZH;&vNhXVjM9z!rP_%U|*5(0%vEyWaht zxO@M7@ugs-61 zH61HzuL_KJ_D2}yALXfuO3&D}BgaMnW#K=8 ziyzI^0_x30`I6tsr<}k`zLVd21z$HnhL(O z`nvpCil)Yv9OFDczi2(CPjtVb`oj;mU1=wDf%z7IV(~%nuU!wNlj>WB>4TvQU0z~* zFIzP~Jtw)6?n)Qf;6?m`zd3y}4#=+Ecko~wz3)&=o*0j%+38qo%m}Y|<2M1R6<5k) z>{RQ$s=(@R<&rTVgummBC&n4-KZib!E72Djx#oGi7w_w)K<%^aAPxgnL>Qz`05qu z0-G!8<(80P6_76tz%ANFG?zuMp58&90>%kf_;L7t-0#@7IYx9pGCCYvwvENE?Ym>s z=282HTQ>Daua7+fQWpMOpc9pI$Gl>joP$L)jwz}3L3Iqm<|ynF?SAOURDA3cpNjXq|AVqqgR$p~vtv<}URJs_%5q!5U}0j+ z4MN7EW_n_`f93dW6zdoI=?zrG)rM00!vo=QTWfqzUq|D5zRuBU1Uq))^*6*Xzv?wE;|C$fjq6Zr zQm+pL^}9WNmM`oVCj_0d_l!8}tTW?dAN}jtbLQ!B`|Wqekt4_4-+>1of(?Ao$nczn z@oxEmd~WsaZ+b`k=hfeGy7&Cq74hq@|JAtd&Vw;^@`P>FG6q_J|2L_z=A!t*(e+#T zCceg*cieqXyy6#MuBNdD3rj~_@SqFh9q)KYJo+(@ifQ>x$BrCz8$eI5{0?};r^aNJ zeLVinH{Td9dftm-YG&GHv6+FlQ0Y(cBLPkqVACaQ4t>JGr-#oxb8nn;)`jtjPkb!q z)Gx3BV1e}m`6VNRL)KNS4Q108bK30oojcTiJ<<{R#uA6^J4WQM9*6@64#ZQQ@)YqD z`%O)nv18|+_|ZrGSiI+t|5Wy8)M@a&U;J|~ifgaFMmB&?P_4!#k9c@|6vn(MfDHg|c-1e)j!XZC=T6x4$XpJK=YCn!Ou$kJ zq$t!M*#%mDn*A|!tm+>&BW2O{0~t~<$)SOt9ctMX-_vPq>`1UNdc+q1A4ve?;kqWD z%qG9+r^-_m1*9!Fc~TtX2g<1cjqXRWZM*>v4Yn()ukB^h=qqWUr4k^()!@X63LDG9 zPGH?D)cFh3!87#3Bl}5NH)sbVNfXxT|3QF0r7s((=mA^)mTjl%V|)oe43Ft!0~c{= z_#DbJb@gUag%DadWB}`n;=({yqTJ~Sln%>=rA;}=_RBw96ay)uFa2G~B<)@J{eOiP z?~0+-P%FBSg-Qn-hXKD#CtOsYD)=-= zmDu9Ts_aXV3(pa&Ebalzi|EXB4pRPgd+j`4j}WgJTRN<~$RXYIqPzNH_+*>Qc!@Ou z^}oD$&3r>!@B$2*9EYDAC+&SZMAXHQTZggAs z&6V6RYu%Pj>!DEXDo6;b;9roR9IQzY>aVp+97D4qa; z?*%P|Edp&IKV^_#e_%|TI;y%uHUT`zFo5*rGd{ zSAX|A@t<)Iu*`qDEj&Mn0p6Cv&0(IaPp?^w??ni{OF#n-=gP26?h zUUh(*gYTK5X#A?PZUZHtu3oS)nfe1wy7+Ay-oVu}kaUZ$@I8Aa=d1DHp*_q$gj$0a zHURLp2E5xm(jO1G@Z8wGWmB}vuq-ajx|8XVuyYE4ozRqY^8f(7+f+EYmgaAj=?+cd z6|10`?*JAIv=cJKP8_3Mc$&rGn9vDa3 z-!fKsV(_57$Q}0%FOem^`BJXXhZ9X5T0dl8w0lyT&P}pnBOr%pVsbjBmYSU0EJM;6 zTjXf(*uEu>A3qv%Q-@?zIpGNX zHBF0lCn^Izg>ljZb@exWB|9wi<2X;xFZkH1hMX`ai+EFz$s1&#y^S|mQ{Si~b@jdI zVA2fW8G8d6E}I+kZJl4##Riu+DpdHn5q{WnE-Ys_vLlL2kDi2(HcZ`b$(5cfztVtK zd8D0{R$1U4afFeN-J@WJC%KrP`FLiKjvP@h!p#$vk9^qyAua9iy3!d#6NPZ)7AH09 zTHA$qW%Csp5}*y975NtsmplG8bOwsI-m8?vd+lTh{2^M-z+QlDb%jc?hq|6&Q zN!!)5T!D%k9uQ9b%@1XRjEpY)QG=Rp=O@7ihsGUkxu;0+!hA!nafnM?#RFa@3Mt>b zA~U>n<~8q(r(bcsM2|at-FL|-G8jZA*ip!*1FFMo+Y1tdo|6zE%`h|!F#*5h*3toW zzgcXPW1-RUB}`YcvB9PGg;d}B^o3ybDhu#hd$7XTQOLonaEKFp`Bo z?l~IQWWy`bWf|#EIVuDP-S%XuOE?s-fi!RBb@5I0f#EndIU8s0*%McM^dqrl`yS)< zw4)sUMfJJ7$=YqpLb5rTT%>)h;DE1)~jjh7> zuq{)2(;jUdi@FENdfR8arVOqWIcK2FggX;LHObV*AB&3s1^Or71yaihXW@-=GzU_x2a<-;$={7mmC!zATS&vYr6^<({AU zg`E+Vgjf35tnD@vHtVZ?j&JYFd-d7$Ep(WCTfIy5xa-#I+V4}#2aK#RbddY?Hp$~mD z78+e!)s~VrtCEMmsT3FX=KA&-_8$zH#Q53gK0hXA7nF~qtkIdYg;@`I zsFIHr*?;)U;%4ec(f}=NTwoHtvrYV5YKsXrku~#k3g<{&{EX`7`$$>xidIy%`m^+rti#z( zFj;LNd?D*qeHPoUEFT*N*pIW_tUrOCE&nU>e@ScJC>?EqOi+HYq2MFftfN8#>)=&u)BX#RG8CW^kfu`m}WkDrKRM~}qqx84{N$B)GFqU@DyLSu0* z^A*1oTr4W_xdxwZCpy`+M8C%OxZ62_L^4?+TL(Ug`gL|HfZB-X`d0zz&v7uv;o6_UWb~C&mDtdx zE=yyX8SzB@6dIT@XtgKjlpgM8VF^jF#zjewXX^2pl!f>PA$Lnys>32H%E%-D_SOlj^g@@#5!@7F=n+vfrW4PX7pdK~q92&)6eqL^gZ-`Xpc7^2c__ zUs;Q7V_V{qi!O+Fyz71P`+DQ18*Y}bay$lx`coa!qLyuPCs4xoOZrf1F){1PUpk7yZqVDGM~Cs2Tls(Gf<~1{l`A`kvQ|r zGyQ2MY}8L*{#1<(?v9=T@pq*owrtuGAN{Kj$LZU4E3D7;qMxB|%$-s$zdLWcHD2;_ zKWks32ftitR@WBe7k=><<9&bj-niZMcmYeYwaBIfwFhzo?>UhU{b#d1 zKKtnQ?XhWeOMKy*@9E}ty~%ko+y*2>9BURN*R z7a~;#__2AU<&#NVZKs5fd9*b)nbI(ZY+z&)`n$4bx0av47;f7ak9Ks(IJm{Wq#T_1poR& zAC4Qpzb|&EgBk4YiV->ZynS)v=rK70^PZeVuwgo7doq~%(9oWkME#gFsC8lZi~?q- z3{Wn&-5P|kn{8oXCfenUAe5CK)SrPm3$wjF19B=BG?*WbtFOB${{1^wdNHYQa6rO~ zGp#yV7-(qn=UWoynIU;@_DTp?z*wiRfJZkBI8>Kn)BpsP90)YuE9aRVn9FkX)Yr$( zv7z{phh7+iJrY(qw#~&wl_RC-ksxAtgo}1_XQsBuw*eF+(n?F2wtO3ta+9w)xd13d zOGb+9Na;0fb~|zc024Da3o;}Fqw%G$d@UwsSL4Eq&K9l8s&q{e%DOZIW@9Q2yoNtn z-vVoOA*qBRg6r*(_vVZAr1Fzzf_zng=__>rJQ!bIjmeprm~JfP1^}hmwq>iF*kSJ! z#o=V3e_*gDHjj?v2>>f{_(T9315oJP03dGY4@W@(^@y76tdloce#-rN-HME=#_qJ+ zm>|oEFTa5xhi-6W*yXSxU{MBYe0nzS*I<8kUdBq!Gm90ZM;?2+WkA)4d^Z|-pgnvf zyXswOpx!wNnFb7PnidKWVY?Ob&rF-$sFN;-h69-BmhRQ_}m5PvIgR6C3c5 z2W+REsDtB+T#&CrdZA%@)>UmpIwnPG{fw8N@Db$j(&5(n@Ck&395}If2Y!I^s5^1O zob^(<0HRF0@l7_M?ii#zPtuk4YuVi*KiD(sR*%|Ypa9)&JDy$FY@*l};Ad5;Ba(v<2%pc_yBG_*#Ua+wjq-4AmjYc$Fx0svDn7*g%(tg}#S5##3mt z<4Ss^$jQlUH*Yv;@dDzS(pQc*O2^1)^z+1wpe`uZc>0TB-e{9vuSu`@rdmC2-)$@0 zz86${$g6p(^say6CdhJD zaJ6)(l-0zRNnbXLCFuSQ*T5ff- z%oYDY6@ee{qS|&QWti;f=}=!d$lF@|^5yzHQG%}b%8$f}?-}TgUip_S@-sKgo3HAk zBoFHUBwCb*Sa4<6G^gLFxy_M=`dl1$>c(Ui91scSt0&V$2%8u9PBshfv^{;fKiwnW zG}~CAY)VjD^9CMy-T#T7e9DIQKwptN=s=#Cpv7NRFKgNG_x#;=Lw~ZLi9a+0S%EGl z11#ryUV=daWWarmXso9vz*oBd{(&yFhxn*&44lZAdj%E?L^H=1Q#K3#oZg_Jr07K! zIU-ke^9EeiK{#C>Xaa-E@hKQo6dH={pxo3&(aVo-&~r6Uln1|C0FBhcYzM?Qr-$+7 z8DQ7=Iw*1JpIdSo%d_oI=0_td z*zT0_XcoSq?agW4{7FBXpJ!lm6gB9U4Y}vW>*H;|_ZCm!HWz2&!|%T$p8c$+#|_tC z7mfKjPww-EADasjH;~sXL*$rex>0;h0YIU}L-e-BZ~{+4pzz8}KL-E7UGXEAv@ME>TJpNx0@{_n@gFeh!WcsvkWw~ob+{n(GiOMd>9ao&T@@M87d z`|pVZ_v|-~Ox*VN=cEVrgtxBm{U-K-&jaw?KWN*&ZHpJf_uX)FoIG|oe)(5_ z*>C>>YzSvU9bISg9e%*q?oQrZXYyesF1q02c--S36YqTIJ7Z>UJ`Rc=>I1KRBp^H# zZ&SVm@L4j$v-MHq$Zm}<>1UW==$FkJ8Qz+{iqi0w>6FF-hmYJB_uO+&+<7n3stgNK_F?D-)#T`rpLv@IkTj{e=yEJ|NQvTAALmp$shk=G*pj$-@m3Dp^MCQ ziLR8rC5?~jMp|^P#aGK;%G`ekZjTn6s_&|S@9<0TN$jJ!y|9m&51WX!XQNn0-%uP= zJN)D4zY?GRs}Cx+JD&d1UyIYvxgh4SrLz6>9UR9=9^OjkZJ8DITdT4ieC|ca6R)n& zmJ8xpCpKWv9(9o%TaEqTv>Iqrl<9*v)_I}wr!ABne%o$J7{FokB|Z0Xp`G|oSwDLa z!f(@B>}-oig^tSCHTfNESZr_069D+J_-Q+?a`CaK60(svDu3x60JR}}D0#DdGUV6? zkzxWE;GXfd7x>}7?XEa1+u5Q18{0`Zb+dkz_Rs;n*-TW>y#U!jCcoWSjXYGj{J9a2OPkn$cbOSkM zGc_BRG%WV#zm`=F8#!4>r;bcSM+YaZqeB;JK%q`4HHkV>}a43fQ z`jtoH*G7FSxleXF`#Uzq<;zaUuEu}j7W-6x|4%*=Z~e{J#JQI~E>`6i&r1;J%R)(SJy+w@t54n%AL|KR6+z7}GgHdSaw^ak)qlzWrSUUm z_>#}c>yuNxiKxVHz+Kz5Ri6qKANfgdSr%dh%ZmWY%spYM%Q`*aOL^qpFMbpswEDI@ z`;E{tmt$NmFUtTIpTEm&*dW=8CJR_fqGxXaVB9Rd?P|`)&h6XcXP*76_{}%I&WpNt z-FA!TW>t#)KAi2%XVcqrcc-uK4MH{GdHR^-dbhnH;? zMcx2KUkdPzh8Q%@be$^pm^!?pBK-0=?h|PbSpKo zU&m4P_)QJ<>3s5}N5Y5Co&7ea{mAZWEH*kk6gyAb75ncx5|4Y*<0R94@yzdDnrMpPh-zFTXs-$0yTXOaC7E@c$8?{Hu@0 zrY&PJH#-~ClT!|-Ui|n}6PpxoxaDS9v9);W)1IJoX$P5y|e!TP-U*@sVk;4aLQvNK9P~e|eJ5mQ;jO*#j7X(k=vj^4k zI2Rtw$e)`WKN$xP+#4@^(ep(C3wYv@d@D|y8`(4(fBBbx84rKt!(-ny`^@{F_^BU{ z6NgVmJM)*#PW59w@mTqt?|aYt>_cL^y5uj+&Q6;zxmGe1x8HJ0y!;n`K02lQ%oh*z z57~cv)T161U;gqJb=(kbmbEYQfob<-`J>9zFHhD#6Y+5Doy&f;7hz4%dx%Fb0#nLqy>aOfHr|` zMXj4|bK`FUw*L>KR9w_CX3TZ1`KBL!?ex0WLZG5gY;+xtJWL{r}%P+qO;FEe(rA@u%q*v@m z=A}ndJobbQ^sVyXm#mQdDuA(UuEACM2biu0Cm1w$wBd4UR%58<95FH_Oi} zYeLm0<@DBU+AQ1r2T$n_f%?hZRuZ+TKRG}e9*sN8M)#-}bzt+SK`LU2&~vMAgKpV2 zYzxmdFLm?$f}ZiM>r189X?}|DWpO(+23eYkxw+%9XGeE5<_^T^+q&b)k3BCIC-%z` zI;p;b4W&!b-QJshgYbcCMRLm8So+uCrT(cWrWN`GmM45CrD5EV^@bm$8B#btuWX9O zNxTS&-_Q7$&FpEjl|p~;R(XLAVBOBAYIdvM5}77`J90uPBE;!s2P+MPamo@t1gAJN z)Sx_{k^_Fzm~G{g5hO!B;-6(iI6AwKvobajUdGWbL-x-nTv|xLLA~TFmbqfWz3IdE z&{Ay%_p4X6E!_35SBl)rEW{Wa8%c~KOa;hTZVUJk#EMA!B!p@VzFWn_5@FTQctA(x zoGkOu0HUf8Q9%_b1{y$dOq0Ua2XX6QQYFmA-Wt}OCFNK8EH@kMl>DU}Vpj=Q5)Yg- zASKUgqY*uWgE4#Jczot#e-jfYj@n4<+P)*&Z zhk`+=)lczA$h+h-d2y(4bZ}ajq#}+{HVTa3Ez3AjzwX{aJN+!wPRuREbvND^cOASx z(`rD;QTTR55JCoYMImy~N4+=jl1`B~0Si<*mB9I0J63611*I~iT=)nHAi5s`H? zmo2ziKer9!5a#Ruy)EBghJgArFmMOZ#jZ`I)yR_iP&dVRvWPMArJ|+>9tqHi zdcO`CmUYbfaa1MLf}`rWua%BkFNhZqObnmF5EkQtALyqLWrBm9S)wbB{m z=)f1kiHDzQ*VAd@nkHbKe=IN|Hk9YLg&eJPvP-0~zN%mW$*&Z7bYVkvZi-IV?;|ZWbX7lf^^Ld1(TRoFwqsX|Pt?2C z4ag^f=rEXe>Q%}p^5_i^rm3VWP&WX8ulgdc6ShXwAXmeKU9q`eJffd+{`5H#A5eS{ z?vMwd*oRh{@&<3Vjevof;x_ss^_XD7y-@*ORCc&@FR#TDe*PkMn;(-mUQmIr@BpA5Q<&N}eMHrn@(7bV!{jWw4M&14$$qS;|AbC-1iL{vC75>o z*!jyOj{U77gV4bO8WT0Ni`$6mD-EBEV!;!6u>6Y#e*R_}iDbVSB-+-sWK!uAhHm({ zFzINu0}PNoh+ksEUe=|(#S`(T^wTQLS0*@o6&i0^&<)ACg>Go@W&qV`g$XW2G?t(% zZRDEdnT0AQmwm-oQywP9Js{JSO}Hvi_Cy1ym08om8!q_V%L)T0dg8@-RfF=;1&dp$ z*Qy8nY%oZkQpeXJPF809elVIU5 z(Dx96>^Mye5Xe)3@~l_JYb>TO@kkbxztTm1^waGuz!q@?H?QGZPNla#J&I)u<+FuW zH2zShaC0p6i9>hB2d}syZqpcpV{||G-am=wzu;%$h8wQW4FF2NCVAFhmkFLV>tE8E zbd)yNZ9cDqtE;x$2<8EavEM~rKv?q@u?<0~*)ea^p>L&DA%=74Lr^mu@)n~Bn|TwI)29sA5bTrEDQ zz$6Zz#Y2{w^7HB@Cvd`d>c>Ph@--)3OwLSu^9Xu7F*O-iU43obc-_7@bl?6sc6404 zZblD2k#KUf{5#+I`|(S!er;TL-E}cJanh5?lmmV?0Wcn64EK(Ayd@rV{>5?8MHl%K zeY5im9*067{T}@)GQn5^x`qda-5%7N&p|MmzW3}iV`O9~wrtxS?|<2Qcm4dQ2> z)N=M&=fxu)@v!*dUtAG$iw)B}udz1tkcK{{bqWji5djipp9yaXC_GlZGW-XD{Ohou zhf#3_`n^^sWDq`OL(NFJqW+tS{O+N_IC|t{{L4T6@A$}v{@5wE?EaD1dd`E@#Y^5a zc9R5JzL6V#coEW!XhMhZ5N4XNB_)k4vh%}7tc5r7D@~oiW)cjjb|_D|!Up9tdMZm-=^?|Q=1Mt^b%aibiJ?hM${+EN?U{f#A!j?s~E*m2)0i? zocdDodfcaoWwl?Eg%Wsfn#E)4X?)qH?muNm*=z^vRu+{8lEn*XWsZkhv02Dtca^im zCZM^wn3PHK~9qCb}xXLelxZ^y|=Gb;^B}z?1y^qkZw46=rD@ ze*kz^=U%vHArRkdWb;@Ij10%{CXVIbW_!42_gQhy`RB)JXY7qlqhm2VJYqegE$3v* z<{I)>WRDpuw=GNd8q0psf(6U$Bh`)~(%;b*J3-mcibc_=mw4Jo|MpY)+Eaqo}~78c2e&bgfx#;2b6(v5iNfdstLKie?re{*^^+M9E6+Rkn9!sk9Ge(g2C;+K=| zzWpYdLAAN%Msgzg@J1`qM|tieB)_N?FVHQ^w;3KCjl+kJ#FxMHFY%Wj`Jm*vE4GY| zSXag;PpVAjhSP3#b@gQ*tNhp&+!k=oFV4iEWbW9BN%hB5F*r1AJF;!tw)prbKNJ@~ z_(C;fcZ{E$mannoMVa9tm3!nq_W|CRBHKAPx8MbG<{Wv;a%^nOi|f2lcl_vy_?y4} zt9atmo)J%e;uGS;(fj3(%}WQ&G5KE7KjC7bXmGGUcI?_72kto}yEq>&dhv_ovmNp# zIqb}Z7d%Au{d_FSRyAgq%7#kuU+lMCUXd@?-x)^^AC6i1NiTZw&#QgJvo1abJnT

NqUWQ&m} z^pGnj0)*`2^vtAB$HF!PON%RU?BvN99oZ}&btZo9B|ocB=FhU9@n=T{2I7By>QnK^ zM_v~DuDi*0kPQGQkB_U3bMr=bmwYq%zx|tf<8{CMrg+|SFOS)UdB2F)u5rfn)U4_; z7@z*s|BO$4`fp@|l}|d)qW$pjKs@;33*+;j|A)Bgrdwj-y*AV`!PV;5 z3f}ts!17yp^78)<55NZ)XXOVv&iDcr-_UmU3nGt@2VfDo+oZ>}zODKz2DxtYd>ef@8!+*+{f317{s1Nv=TgdTlhIit(%7(W1D!gn0;I01onG-SCzZzSHS7Q9|E%B3& zIWI0bS9baMZP62R@@Z%r@l5o2tg76au8^&|Ay=O7APoOOHUphz-p4+m zXu`)@TN`k@t;l|Iaud@Lo(E8QyhH1qrN1g(1%(i?R zcpYHAa&9oFfLY>a8l@u%tXwbI3dP&dqRX` z&8Q#Drj9f|gAy9G3a7Z?IPZ2LJyNSICK&U8>D||>Nv(O@CXv15p z{8JosrxhYhbQePaSg@#xcuFUXqAkqL%XyfWXs*ScJv-uI4?bTw;iCq22&?YB0f5zR zjE>rxok*q$MNm#4WkaBL@I?ZQOm76!UE~5Dm(zARnI8fG&8vr=ago(SIeAjKDUk>iEXe>4)Q|)q?dSlDx&H084N{)<(b|$|u zJT~IumD^dkJ$R?tM2AX}=wt_lvYRgP4gT>=CxNbEI66Az9HAqE96)qR=5Vw+z5OP0CgA51Ej#yBsF+T&}`!?72@6{c`?SC8Id^oju(? zUX=l5PT-1W<*%KFbHG>He8UfP=DSJ&-~*Er0C}6mP)D-&?wQoo zFf@6o{2YkaVhu5^Li=~rMZ$TY#(tQ+ISJscV10Nl=-x3uHcH!~{B^_Au z4ja|<;9QSsZhZu(fI27>M_DpS=V$8`bdgRp_?qS9U zwekue(_~X1^)2*Prl7f{DhbJWv%~F1Ka;Y7psmxD07}B@1YP<^qTgCVfMkm}gcJo8 zKGUl{!=UuGg-Xsh2tnpWf5Gaq@JQ+0y2^uIcF9pU4J|s9*1B>^B{qQcLrE%~h`p~_ zgmXa}en;9pII8q4oGByrnYMyP^4dn~85#G4yY!M>+YG2Zxm_mZ-M;XmEY$O3A|HM< zmDUcu+5lNn3sP|Ky=q>xg&E{!GMIeS-ETe#Z`MQY&|U{)yv1MmP&ic%5(n+pHC-9|;5*Q_<(u%jS03ym3qzENPUknK8F1x>C{B5S&Y7th`D84@ zuX+MWC0id@2%VW7_dSb;`5njH0Kg{=kXw1^?*AG~Gqz1xH*6dWf3!$GXMZpHRRQ0( zC-sL8S3ajydZ~;3%f4Rv z6v^i7RQ%KD|3O0{`S&WDZ^6Fwr7wx=Z}`4eA7q}s2k_@hRFGQQa=}|iP3atMiyf%-=X9&P zZn-Uf>kV&?@r7APlD~tUz&9mLXY4-h^mzP}o)EvOG@N$DcUZ?pm<(mJCNkXg_gdLx z(2(!Rr{QMird&?1`ga!GkstSqB9FJwka3Q#wFjs&JLP9Gk&FM$rwT?k4SKQc*pd6< zWiS85n3>L_fro}hJf36Y+|>AFY#JT%+x80!bLIhK3NJ!;w#7MTof8j#$a`M{uX_1$w#@ai&H`osdb>Ou?G?F z{-gKBo8R)AqDO7Lwipk-;34s_OCJ&+`p^gTyb{-5d3DUqEqd(3d;^>D@Uh*0cBVeH zpxB5CsqaA1zYSoyZjCpkhYz4R?F0IOJrjVN{!iK5g7nFX@9OD|p8nxDeDp-@yNXjg zUa#2p=p8*fcAR^Oc(%!l6Zl|^zp&TXXHHpJZn9xR*i;7L^kezfhzhanATPwxM@goY zl*K#p_$?G1a5cn|Tv)_LI>`go%ET4S}V8 z*b~JVKR;eR@TN)7pmWMeNz~K!Sh&~ctwf7?E#2tC&Sky2CCh8*24}N(_C?>&VDxG1 z&l}vF4#aWrLjwxy>9f5-_mDG>F?eGip5PY<2KtzLO4(p+Xo|$gI)25RQ4bO5Gvx%ci@*#4BwzEt9f*1#D=-aLDl9Swkx>`;wKxV0rFfz_Abwj48FEq12 z+#{T%5ffu!p5IDLo*a+k#}CWCP0Eg)h(m|&i^=ih(O8`Kr%!lmbb4wsRu>u)hXq|( zFjrqgJ=kp4lpRp{992&1@C|9^Lp;9GST{Ggl+6&dDHRc|-l)nmGA%xMqaxf;JR3le zb0FoAaPd<`LO*S49;uD8JvsTIC&qT47MpkNjC0SwApXZsKFJGqXYM&oV?um0c(&xX zc;{7@g~nn`FE7SIlew}sJugR>bf;Iex-V0i_;qZ^!{3wiT4$HlPJF%t9l_^Vm3-qr zDcE))k1WOq7C!ugCG2H*!GE*Hm;=3?v1QXH-Sc^Zwdn6fk7c`67kr^*@fSNlW!u%Z z?Vb5{bx&XVj<`~8Ljm{99nj~`&CLk!U~J+O1sedi!+SPodIP9<*DfEtUv%7X+r9C| zH~m)p*VWfWQ(>D=J0n&kCwv+XUx0C++ffBuHw$FHoKWLZu6Y(w1>i}J_43}JwA^Cn z(P91NkHDeIU!FFUmykAKV_ASxm9!40eB`Ry6qWDP#F4J#tz%l+s+MK^$s!0BgLPwE zx))yThtt{blf2eSkqMwI%o$@!1v%aUM8VIJ#)goxl7>voI3)RnspoVr7IXYYzXZ5# zX*PE6+!n9=#h1s6U+{t$A3qj%-FlM*V8wO#Po4_W$p3C*Zq2Y z@vyZavVRFn*fG~`@LyU#HxO>I`}lI zAB*k4ul?HV<3GRojht_ko?djph4G~?e_p&q9?%;W{FvJp+f`rBpQ^uPbL0)z-576s z`|qorIO(WMbsCP-c5aQc_nsTi|GA%yXI}p77?Ax3CMRZurQuBrv|&@Wzl%D^>FaK&>`p`?_B|p#S zrkFQW{p6qWiIN_*&Bs3a(RkeB9vk1^ccW}fTm0Cg9w9qD?|!RW{_gzDVl1mj-96o|3JL=Pu~;Ic-E8R&N~joyy$21 z=*&F+Epx2%mgROi5cGp}0|3z0(G&BH`PjN^S8Ut9D?atNpNqG>;WaUQ(Ia_ZIGT$0 z272TjB~#wDGX81l5I~#(-PGeoy>7)7s0vG*pNs!ibrYWkC_Zo1e=D1zlx@4ED=*Mh zTmb$Hu)(gSy(xZ+9TELnbg5q}_Qc@2@%#X~ftttVQ^)dLX`>Frk)M9h04~KV_!K%T zJdi(tc?(XZC*IFIiobbhzz1o?JrGE%ytdiZZk*!nxJ6 z5V=AOcx))rAtenv0_}Xrka_q7B~;q^oM;`2acB(~TL6<`7_dQu3l!pY1B;8Htu$Nd zpieYV@duU(l>GQtGg+TBk4B%R7 zs^e3N<>gecp00j55x5&H)K0~BuiY2-A2}{|bw=;tQ1tc>N=P(75REB-R%!E`oksuy z{R0~SpsBQVecPBO1eoM)39arK5Hb*;^G62HK4W)WeBn8<+LQxHnNkiW33=<)yOh-) z)QJTllovT+!rOq%&|Z|UNYYk1tK*sij`Aq4=!Q4E0R(SZ;O>^v(c$wwrs(dEX*oon z{n~e9@Agfxb^A6uJZ>ws6AQ#%;9HRa5Ms+M^|m2ZOVCJwbl&$k=Ku~iGOM(xKyCm? zIIly$eafLHKm;tO<#&TcrLs<~1nzxtG2)p7pF}c?T(N4(|3g@!Vy|IIA5N7V{fUjrj#%XXmDa?{_tp zA}%i zCSCmPVNE!p%jD@9K5N$Hnx z2vm#YCGC!Esvu?Yv%kA2v49Sijwla%3JXM7DH$Iimz{B`Eluw-w8)is@{J^^o$p(uy&xr?GL~M56LdD|OTrdGKTm+9Dk0x9%$sd9psEODYq5o|r886R#RrZSN!4 z6b2v~Zf)q39-D7M>pbl}t_L0sivmnm5Rb02!2`QW9Bp76%zObfp6nTus+7C7EE!X~ zzy}=BY#-D{xb?rv9Cnp~E=Q_SK7(*3JQxhqJ~&qMEMl#w(?(vD@7(kDNVofCm{k~RD(@+~`-eKUYOJ3+zEGxme_5+3Z8 z;%i-jjHCYK+mOEij}FowVG>oy6S$IRrklR4=q)y%W0Khwb>RNvam)V0UX1DPXHiAT zi_Am5cVpXT0Z9)u0PZumlBrywvrhZ~)pmoUZUCtH+Qlyxv*^D@`+H(juk3HS>U@A8py=w)M)I3~N!1R;6R!Kubv3=M3F zCp`HlY2N&cLn6!12Fm?S3hxA`7A8aO_4W)a8&sfQe-Ci$FqZVUt~)1neCJjDUaOi>L7E| z`9a&Vsh0bs3q061_{^sy($Q6U;wf*tL4$Rf`YL_e6?j-|mrH>E;Fqk3Crx#Sq*KZ) zk3QS|LGrEc^!3}t#GCiZPkZQ@lbL+fn)x8wW&ctSD-Q9i$RaudD+qTQ<5NA*CWS!* zWtmdZ>~Ppk8wv+>vH{Pst>RexHJ{`Ia%HD6JVC}#P4lhtmm}?XGT-@$9H&wGBqh^{ zjw)MSDw}?nc4C(@J_$8}eg<6V6n!`Grdv%&-?Bh|E?dc`6ecESVvY@vQ&VwF{_3%# zGhVEn({D*O7q;OqEHBN9ZpnH_kI2R*cjksc^~dz(o!x!5ul|Or%JT+1M20sikSpYZ zfD{+e$iPVtqBR3mcj_Yk5uCCoz#pPrY;OhFF<+^vpvbgk4_rn5eb1F5D(?htSJA_> zO^5Ot6ATs@MY-2gxdKY3NBgG4TkgS+d}q2W6aR=Sa;CIwh&ECR%QVKLI&*J*E_RM* zTCgtM&_}T-U_V?w{P@wsaqyO#qHlygay5ScEx#Q<|I(k0ef##wy2ze4Io=sx6dJ`( z>0pbtEM8FSmatye1bi0LU)a47oU|+w!v5MuSr6Jl<*F{o!{Wk{uhfG@7p|05WtTSa z;8*?q9e3UquYdJx;`ro@$@4~4^-Ig@2k_s{I_s=>+E4#<{MxHu@BZWDi4*1#a?)5{ zwCp$Jw_?A$x&}-SLr3(H1#zBvgPuuF`-%7+Y8SuVrgG4c75Sx}H1I~0)fn3`8Y}b7 z_@!69I%`>qwQJ2eiJ9bnwq z(-wQr*c%VK>|vfzU1+SuefJ);{$oqn#K5;?(Fyy63d>Wsaw91Xz+^e{iZ9-bB_Gks z!W{ho{tUSMZE!Jy2^s2EPtCIri7d!Zlw}YvXruIz_%7@~-=L4eCH>d_<=^A?-uAmn zg$0T@@0Vf!S6{sR7hdeQ`8=jlfBMi%9u|*y= zV8>akq;Ftj2FL0iJ$5o~*mp~71HiW34~@q@`I#{^yxIB2r)R{o#pvl4U(qM=v!h+( zVB=0Y{8=vOmChlT&_Vx)DXsND=eW0SNni3Ap1_Z!werL|$TRnuT}`3(QTf8Ec={|w zh@X__6V!ySvXnf;Qy%wpRMbz8?kbOTVU_l$>9jbG97IR`q}c!NyjG4L$VtNFTeVj4Vb zsDckI{=;*HCC!D`GP7JguLHf}8S;(X(B~6pT=JhCrVLH&8ppFby|hz$!RJf>w~xvp z++!gFKGM9-oMb6USqQ z&vhtGn@>_&k$uv=%H!=V##f9tsVnw}_Tip|FY1yD-KvP&8D6W}=)775+R+*K!n_{O zy(}P!KiCtF(Oy-1@wts2jakOF?~F%3{&8{XqaG8ZJGUrJ#KiGM%hA--bTs5Q@^%}2 zDc|B=Qs27BizgC7_`xTm+T_dRMg(Ps2JwDX4iLT?@`?^ZDt@2sxKcIOG*76YZo^4+5F4f#rukq6;4KLK7Qso6KFv?jO4ESp4h1{8zl{b+3^C z^v2GIToNky76gT?q(n z%*#sFH|1UbB#QcD$szv4d;{5(y<1z1`wkw7kAL*z)=m7N<0p>B%*>Q^4W9!U92p#n z&6`GI)7U22zMeS$ybEG*a5%Q?*dBlWXYY#}Z@bg-hJ5z+@EIQTV!cg(om)oY=YQel z@k_6Kb$LC?#uZ0ooG{hRTi2VWF7-Eec< zci-XIv16NLUA6%eAtv(aUp55zQv;1=y!9<_jeq;b*Asz&Pd~lm?Qe@a@4VA55aP4U zFU-dD%#6}EWYcD1dg8doJIkhtQ>DhXZ;xF&_r${={)m{KpN_GSvG|F{KPt9v*@=kT zeof8H+D42F4TfK0Q$I01A^nvfz-M;n*kvVdU|;BUX$O2C7XP7X;>5}L%%}c7KKuE9 zRJwN0%VV3)+u2nZ0=KDH#j=L8ppcI8dA-QKp<=ts;;Bs)=BPj{3qh_ z>Ty3;EaCyfALCWyRk)3;?vY92PbpjX=r_b#0W!Fi9vmBNkAd_Xar^)fN4X_U z)dlKS=COck!#3K!3ONp24{xP~u40&fSpIBFeyS(7=>fQrKM~@4rx76Mv|+9XwHQ#Y z6@Lu}QfAUGOd2x%hBPjlxPoVm@doil-pq4>{q)T1^x&k5bzMl|$SCbb-$9?Ogko*- z2RzVF_jdsKrg!p(QjAtK?eDW5AJnNNZBrS7~bPh@6{@ZCi$7diH2^uT94bp8BwO_$6n>?D+m@%-$Ov&FSE6`yu(lJ|#&$94|Rr zW|1G(nMIRjNwoSUI?7XzhK*>g8z9q+J2c*@vcM(;HrZj<8J9AKU1eh_Ni}|0Wj;!H z*<49K0nOe(t$KhHezO4_-;6Pk`J{L@`!hafJi{htf5K_Cerg>&qS1FsuY9z@vS?t@ zzk;gZm%TXM4LZ}eX3;-SBVb(veTD9g8>QSwyF%%o)%Y+y>9BN>Vh;C9b%kAXikpDeFqN4`~LWk zVrXa}p8mwA%8_1;`|mpvb2E#{Xi-2}9yl@S<8Naz$s!ezlTdeY_*4LP>vri`Mxwb) z9h9d#2bv6o5L_=T3oDDhEUYk~m{AA6)IgbeqrO*-?{~;!X08!;?!PClyZQE*Txgoc zuD*Ve-y>Qy$h5%FX1rmD(3I*SBpT4s9+h8dY~j70sgL3iRG_s1KmbnCa&%B*p5w$K z&Oi5zIDhY%iqzmtb@c8S(cRbE9elsOE;=Zu@%USP?4UxH#3$%scns79*J||O9XzHE z^*M!@osDeZ5KlA^W&(;y;^ta+EG(|Z=fC#t*s_VYMs`I*H2Liu;ib)OT*W_dxx+;m zF!aKQVW~0)Z7$F9PY0jMmGaG0LE_|@bo|_eFjhDnrncr7P>js992`yn;Pcai{T;DW zgL@3dkt6rZp;7w{bjPR$ZEQI4AY6QF7eb7x4XAXEEBIN!0ovMgv9Z2!flFTBAgmMo zRI~DrytKi}3X4$S6K*->9D(Q^<|^ZuoU`UiN8Ep8BHA^8{NG>ux7cy^gJNZV+{b$% zC&(bUT(0WR!ZIC6ZR>T&L3XtXN}ioUS*SON?);2KPD0=~dm>RX#625Ysv%rhV!~i4 z7Bs+MvXEacHq6K3e4e7gabSGEi}Z939JOcpMP4MM=!C4i+tD)0-#+3CKiwVO+LwD)qfVM zpqo0uC*Gcy4>^-Ma}+sFLE;ZYs1^~u*<#3T=CT^@pF8OXZpLBA5Gyy!Gnv|m!~a*)5Y&B<4H zrb=IJ4q@;CsC3~_fSXBrj!x`Wd@g7bh9i=?4K6md=)q+euk?d9VrMwOA}irc?9x7- ztP&0;$^DG2lC084 zg9qY~mpvl>{wx3K^gv^A&V3r~1uv8R`~jErL#W&2&;s79w?PJ>r`+TPnxM1tvFK*v z)=ddsm#>VY-)hyNmTx=?bD0bPhUt@n~uGLZl$wd#Xu-Ou_Cn2TYY6}4nI8!t}C0R)T&Oy z(Pr-VC?6gRL|JRH3VirJbOUYeG9cWW8`$dP4Y+EVOiHh7IULD0=MV`Nf|#@SyuFH| zjLr^OA7qIvr62Glq420?Dx)p>J7nV&-7Ti$IyKb~BM0>oOEB5t|s zjyQPuNE|wFBukJFR!nd}*0Fvzte<@e#RX@T^gYL8GlQw{U3%URAApP-#EI(xgye>D&n>U`g zAWfF(dToFzcd4s@@Q7cl(qH;Fc*x=n8~%9nvP(E7?z=bcz2o-i9pde$)p*-memh?B z;upmAH|!Jpa)CDGNxU{*I9uw6P#0L2`O+S=yCB=I)(cqY4S0!Hs+_>XHe3G1^UR)n zV4_dYN?p@Tn%`mh|n?r?dC&ji30*C&XLc z{1zXD%y<2#XQw2O^kdkPrv160fjnuAiQUDcr%7r@BiQj;?tl0lI_JaO~K&HNN$o0^{BesDoV0+P${t#QF_CS`|h-LhY@fAlqu*o0ajQh>oMq~Wg zc-(o%!Fb&pUaOSYIE|H88*#~l9~w`2(&OUo@BDp@x#!}VE58>r)AKPhqI$4UCfUXZ zv@MbTSGh|la$n>>{f3Qloif=1HXLygjk)P4u!|{u>l}r#~D8o&v?f3qjzM~ZN=M-ykU(Vu<4~+Wp}j8e`is$=ueJu6X=u7 z)0Tb$vK#0xwhcbCaon)(MI0sodj)Og4K(T*ytG?_Ivv+dbwIJDN31 zpOu#@y>)T~8;%XoGv%qDbr;FA@~&XN6~Tr-0Gj9y%e*mSQF3>+Ol20PY!;wx(x1Qk$7DWRfdmaden|y&>K%tNoT#&iqm{dW*i~ zhMacm(8nk$bCmk9b=!xA`WTK;N ztg!E{dfU$s`Hqvz!nlZGB!}qX2hnf)tt0VMBBJ=@qc;;u54~wzI2f-m&(+(@LN9M| zF{ajdRcYvN`NufefS#&<>X5Gs9n1wWbl@AXogKLW)h~Pyi2Nvk6DRmI3Y*fhZ^Em{ z@_CVf4OlailQA`QBxa{4qA}Nqi3!Q~)T}oNOpVW|K26mvPl5pta|Ej_9xcgdke8xS zoa%rst8FAUoU+m(UjzN%(-NGpfXyBq8H?@Px5w74+vDO(9ujAqcV3KaYL_30Y=vxu ze09}<#m+_P8T8MwfJME`1NvI=oi^y{@J1~3ZDD#rWAs_Er&}VGJ{W?;$^u}l(n+}w;Gzo9h^>(hw z{asdFC6|(4HMS^7IkrEnCq5Cu{;7VcxlE@k?4hW2NO;s9Y%1tgnQX#BzVQ!x)US~O zS%d%Zn8nl+lWTF=V=s-ETZ+?u^ieSM zFC(ENy&4cSW~Orkz@F{#+FyB9JnN}Xjw$KX-M8Kl%e=s&e7yLUr_*v&J9DqSYV$tn z&Oq@u6s5A47Md}&I3HWL?1-zbx-wq>>%ZypTkt^sW>x=Rp_*eh%sddz-n%V ze5)PtU*G(CTz%zLDyt`cTwyQ&g;&V_Y_dP5E+dYQPefn8Zlov6^Fkpx-Pk-ir z#*?4;q`2kQ+a)6nZvdE?ne#dWe&DXtPm3Ra#E-@Y{^BG5ZTr3NJ@1O&`@=tqdHI2! zH^~OchYxxV(sPp@kE>r${qR??k3826`K)hnvenqGU9n^P zY4PEYe=6SfJ8y}h3m@uFi>Nb{400L*z9IHkx~cva9G>UEXGQ-dr?wdus#1a4OemT9 zLS@Wnpy;Ea_!-u9pp3(EBLGNhzJd^3hJvGx|DcEdp&DrP)Y*lXV0_V;$6i9C=z3qF+7j~&f9@1We62jH7dU+T06d14 z{Q_jCXe;3+Z6&wbV#k-V0jF^Da8W5!lnktXYv{S2UIk#EVuXxom@ z-&1+|0r+{?QJZMVkUdh_K=0rb6=bu6Rz(rhJbll#`d_h^G%g7bp#y@%?B3 z(zWK%5hc-x-)9~nD;>Fh%SIc9FTHF$RUGp**dIs$jX-k0#~ zx<4adNMp#xcpN>vKQ6swcf9R4e+_?Cpz$L)Q*fv@MW3zARYI#*NS{vpAaB^z*qV)^Z`GM z1$BUVSLUsZ%RG_(!3PRZ`+$>;3IP0~Yyf$I86Hc(JN3KJNg_m#cENu)!;lZUXULAgEH7!WAi+Q(?!4#Txbt2q4Z9+R zvrEpH#%ije#8nDudUTI5LiPY0D0sAPh~ZK4u?~viGm{hCQ%43??J-qEqC)?N8n?{2UR~hx!iolz4IZ&qkD3ZyhA`d9G($#hZNLMFD zvL#8epteD#c=H;*piszYZ!D-?ui1A)Ois+j=AGNjgF4UxgJnaCXs8`Iv2@aktm0c0 z%Ycm7@TO2Vj$rH;^B7f}dnA_uDX^vrxrNlyACYeW6rv2AqN?ZKoMgYcnY z4cJBoW4NCqR3IRobtWC@5O6}NIkfs4h?LES5%{3=zKVYg>PYF)HuMO$Cv-4a@X_^v z-{eEi=rm*$JGz8f@~%Pj@$u=HoN2_!woUPsFaC4%i>{k)xXy3zfkUR<@=N8_#h78P zx8)ytxnB|Oy_OqKpebx+SuNG2K|T}y8vy<>NmnQ8kX4Le56+yZYD$JMvi#sF6X=fD z&jXL*_~s%k#xWPf3FqxiJJBjXX$D0uk~4uKWUi0-#{@cja$BnXkO={C@SREE4&h%v z@&~D6@GA#G&rEFE(G*yI(Xh z$N6P1=ul7x>>fMjszYENAn))ITA+`*>xbBL;FVjqxY{Z+BT2_6l&J=#@Tj`pP9uD|&0^pZ(74$Mx?#eIkFjnDqw=i+@={7Ia1 z`u2Fpd1pm$&q~b8hSGknFYN{n`a9&Kl#l)^TKq^lD9>f_Mh;Nqvu*&uMp4VG15oQx z>sPL208#1A$yg<8wIOtY&%r+cnI@s00FZR+-ZBbJ3KpaJE-`QKot&JDoA1~ktF%>r zui|nsjdoK}F7F2+>0PI*rju>d^F}&=bsQO92!6Y(?=AZ^O%1WT=3TcsCfjm6iWH>3Yiu=KWAX_TlZ=UwA{fJKjjwt<;?`A0D1s2 z(O2q?-K_u_VwadzksUP7#wXhw z_(>hW)t%R>T_Rnobn0<=D-ROUk?{^SAOJ0G*O3dg&7uo~)CBA@xZ#?yvjT(YJX@Sm zM??0NfqdSsLyrz09f)1~4#c5D2jYmX4;?uggTtdSC4#U}tw|0pIN`gtx2PRokbg~C zyh$Ya#{b3UyIn#4#ed8`vd^SFJ2vHFA^h!GVanFGvLB!-9kh3pe}_{339tfzG!O^-MfI zx^GY1b^EPR*9f2I;(hOZSG@7hUlTXqa-;efli~xmNnk3NwM76GY7VL|g9i7mch<#z zC^++5t8aOpX-o^9SuD&ppoH9l9NUZKXtLjzvXXhY##dl)5FU9TROuPS#TPtyU~l}% zOa8<=PEbsf>W>}7KV@L@gw31d@lSYieBgcWj~U4n3v;o#(955Y5Z<-#>`MPU0VZ33 z56X^BEI`F);f;TA0>AA0sW9n-?h)Iz$72^V^9_Rd7BLVZ{pQa*Gu+K&kK#oi~aP%QaP}|$$lU_#kWS5a)$-@ zK|Fm_7Pm1_GB!5m&v&p@?cl)Sc*7gtY&=Af=<4i`2R-OP@w6vDCEoqP_v-mb+^qhE z>C&x!k?2ld4$6gg(34N*Kwn-Uz?S33rhKOyG&ZUKH#w>P__QY{=sU4slgTl@FS~ke zU+menH~#qfe;lKNjog9{I|Q6{<^$q6&wYBl;~nqt8-!bLx=Hdr@BGjN8{o6bV*;qi zT?TA}f^EeAJz&2wWcnJcW%;>E*+KqAbL_O_$Tp95gg%XYS+?b6LBm_#@@le0|EkmD zSJDT;Q`gOS+1VuKWn9SKMX9p)ud zLa;0Vz+Thju_b@$t1}UVjO4xaOuuP2{8azgbCu`&)D!pPNuV;&4G_#e8*L!Cr=4N^ zfNX}Za;X#f@s~);wHGd{Z0v$X)Qb_s7 z9%s9nap1rrx_lEh<7k%>rgcU9gO#*l^xv59Wg^vWEPe|06y5oTG&`=rXQedUDR$8t zpAhb8e;9!5?eC8bn>NSBO((=A9UVQnl{Rmx(_{h{-KiI#oD>pXipxPw_5Xy6F@>HD z=(WxaazhXNOZUr?E-X4BU~_ox9hFE&0*9o7@l##UI|eR!Q;x;ZWt`7r0mzvArYc9$ zMsj7p7T)PMuPfM;2Mu+Ltpy(zD6pWH`qI`*`xOrsTnVOZ1rwXd0}BVRD>GB$9(z+y zRzPz+f$@okrgndtkTHrn*^_n_cuO{0SR|}!OMK+RAZy_l4+cg?9 zO&=J02#s5KyIiWSo|`}7S&Qrs__5%`&4>VECy^7kyW#^|ut$AQ zJn4@f8+YDtW2}9|6JoNhBW8dHx5S&8%|H*_KH-m7b+`S?KpcT_)^bg~{8 zsHI1@IM-m&urCkRgk@m~h;iToz~G4uEaR2_RdlW2RoOA+@tsZuhY~!_NPD$Z1Qp7Y z@k?BmPkLpUOM$)$&lV97g6%cQQg)0(sk3pB{P;%n)h&LHN0W6w&zNMxLQGGN#oWYD zoO#L#@ptckPdxfD4~so_?}+Vp-5Rr#Q}XjA!xenmU)p*mtFbH8zq7kb^hkZ^TeaH# z1pNIm<$LxspAnCK-uqeG<+1DQ{*Q~ZZSlQJT4|u=>;`MKObItZl&E^>W-hI2HSw7c$-}~OU z`q~=}3(83ROpBE(d*tU%$5+1k)p*7;pBFb=zs2L6J-uD(W7G-GoP3AT={|6Y_mv4fpM>vM16_^17~q$ugTYt5(LJyy{i) zm`6P(CMM@&$35HQzy9+Jv17+Q{>jOROim1Y7XV)V*~n*LUB zNlz&87-IB_iDYo7l`HtRjU1dPL(j|jO9|+|jZFog`c+vnu7XFL1;4TfwUCL6ophR7 zD1b@;=d?FVq0|rihhMN9zHEl+TSG{EU5>xvRbKWBt9#NIZ}KkTQt-pxsD*G_V>?-l zDfFz_XaeW7$;Fmt`jn*%f+_l0&bY*pH4cPK$6^%fQ0w#7`tQ_(^6;sZrfL_^N7B+} z0+jI}NZ@fHaF#Tu$Q3;GtSZuY;3Jui+4f-9Yjw9w&)60_rsBKPDh-EWtB#Dl6I{M* ze5RpHbc=ZlU5b{BNi_&B#`r;rzH$1^vM0(1Kx~SAHMP%m&FLRgKgwd)B*xiUIO!KJ z)CRN&25{Tu2T+&!>7i(tAB~kgO|tuk<)a-{8H4fux4tRPIP26nykl#O4IYRl+V+Ks z=|7LtFNOaB&xMZkx%k%E{-`c0FF#F>|Ceud2VYEzG5DP%lC8&>lO3 z&M=qDd=hgt_@)(IMJs$gEH^ueEP6p*mT8|wWz5aY+uvcHu2Jm{I|73v_%T+Hyr}#W zd>lcm@r%0X+@O9aZG`856bC$BfW>cP^sWv%2p`4}yl@uT6K>Rx#ko1(BfAf-z{Y-3 zo&vRHyt=-z@!Gn@ksGcNpohD1N|?x`aB=PA##C1l3>?73Tmg>;14bQuU!B3F$`8p& z?|UJ*JU>F?Em*3sghK}B0qYuGf(F-@2UVoH$P>!qGtA(M=XzE-MmC59j02$7{X63}p zim^;svTA#7QBDnb&gb@*^$f~L*w_Z}j-5MWV0gq%84Cp3y1Io&4$P#3kk$YPbiSg0 zDpAthJdl*yXkk!vnybEzN?rzg89b(AJvA;tlhF0|bjK+tZIA=jtuphwY@}re1Ugs< zCUC1_tZ1eH6IeDxNe^##51(0NAi6k@*odfkJ1pUA{9G#hWpcUMTVamN*jwi8?r=iagk_^6yf9#Nn1@FuVnmJ$i#Yq`;C8q2jV9qbIx2aqkM6GA{+~|) zcw%1!py6SF149Z8Sui35grO816!rZ0gYrqwHAWLU(s<=Ff{dr|^!A5J>unL$ps}?Q ztE=`O8i~8M@0Dz|#A)|GHNNye7sT}9WZZejEphC~K@Y$!%7D5&XeJsk2<(QF!hl+p zWrM8CMl}$o8?+(4oK1`r@-ioy@Qltq6Tl2ivc(n-GjaKrKi>>v3&b2qMSq#R2Nyci z48GC2ikzhxmg14rtK z&eSckdY?f+cz`|%u;n>rwjLf;G@u-=YuoVax-1y*a~62a$tEB=3Vge(MfDu@cl!92 z7XuY^8H5oXa8;ibY?Nc&79eo)#FXM%R0qnWJaER5raiF}U@~BjD+fk6d#f59Z0*Vz zA*=TC&DPWh!lL--&t<59bZVGDkn9V7t)r=P4j!r^9xzIJ*y)~ET0J8@+wt0Q0$=E) z1A8OpDjs`{&OsgEQa;25Rz)B`pQBo*rI0Gl&)Ud7vhBV-SOy$ zo~6$2q6emc<@OMek~BF3rjUl>K#PGku^Qgk1}_h_HZAE<{J0YDdX+eW>u?;5THTkW zBb3|1C+pU6p@7mO>R|Xaz9j(KuMAbZg^VDY-iz*Ob28mB{A>BKL3u?q>?S!p&3NRK z2ialg&1X6h^VJSgWw#PpCGkz#Q=!A#__XVDN^klL56VRzUB2ug6BNvmLs!aF3g?v- z)vpfhiHv&Y7w&>hTkfjNkLgBQ;sJNy39j}X%;w1yL`56)>v`jh6J^>5{R|cw0^U`s28r^3$=QF>-U7?%_X0y_BEPw0Q zX#^Me%hNkldb6}32AcpJH|V;r@>I?P+~nS60akve@nOd`o{MHqqx9g4jLpfPLQYI) zHNk_&j>eT&UJ-)>2i43^$2;Es_IS^`-WE6Bc)fgl`X2(M{2mNJZUoQhRk#vBja$yf zul7vkLu={Z{}xPBrLWP}G)Xx|0P)di&(i)%>*)9kCfl__L;OB;7kOeOJn5!q>*DZ% z{qcrBe?uJOTleB$i|j0YCBGpm3Y@U%#CXzE9`CpDCujJU{=OQ|^lQ11&!<7nxRv&Sx%XD-1AUfme+yjkl0*F53BR$Le4Ce_b>z?y z$?!rv;~7r_4dqMOrF>|~!i96sJ1;)|(T~`Nzw6FBJ;)Beo}3r{lnDE*>idzJ1v$CEcHVEyn(pjuBgY<}oQWg0xn**C z%e7a>2S4;-(}U2`l;1Z$_fe0FuYUEvqrIm;?%sBL?A^6HI(s{^y%b(-TaR3VH}v9+ z%pwoo0#S6QT<}nJqFKD=er#mayT0-H0dQ$l|Dk{PI)ljaPzG9M$1-3W z$Y0toU?SfcQr6IMWPflTVA&PrqOE_VxRX)vm5i*+=@gsyF-+Jtk(*^`tkSl=GXz)Ym*qyXu_;pk>vr zc-vI*i#t9+$+#4`(!L2!k)5jENn6!PIJ>^~C3LUHo?K@EJG2CU=)$2;7C_hF%8syD zu|fR9J~3bmpP>y4jo8kbH z@AeNJN_q>NLgMPW7x@!jyzN!wkcFq}b_++v$)?D(MXM^;sd}(;WWSD1MYbwWv+CE{ z&?=g!PXW!8jf2ha7_X!VKBZ?pM8X(@5Y5qGXwRZ3c4|;ZCK|;v^FuZlT-rK>C(t0C z_%$PXv8x>u$<#M@Qk+%$)8YxyHre8rzVc=9-EUqLy$^U)ME}Z|P(H>2SoG{q9ioi3 zLy|SdWK|w@gg#!;g5OZ=Y4L-r;Ish@@3=V*?Ak8g*2m=dn9!F( z3q|L#;oA&KX^DsCPzG3Z$D7!*o6@~z5G=#k3)xtYZNJOP?EM;a`)-a{#!ivg)fe+ zH{B59!$WQ#+3}R|+p)RCA$Y@;2ZwpKy>wV$4Mt{jC=0c9m7Y5 z%meHc-^|C3u3Wt;)~wqQXP$XxJnZ2Qj&mOR@L0cLZ43>L$hP1Yu~=(9Zn^!o_|cDl z9GCw1M{3t|>-@sU2cCWZ_~}o79{sCU#vOOx7V}g2=IViiN5vy1#%FAY+S>4Ir~S4* zcDG4}4!zpDe~;kb8h6~XU4GE_V|Zvl_JlS`_N+PkdOh7MY_~4D=%RS)A3Q#;zv0H1 zn;DBoJ^DQJVa2MhXl?9>cfaH9@#epJhb+=u+;ZJb@y0j5A+EmqI-A25;nJ#lH#5G| z*&c^>m3k-$MjbEe(8kENTz2`D@woFI9lyEa${1zGE#bqCQ{K5s?H_7_IJ3wKem2NI zoK=6cNAz5~W_|qYzyDW!`lBC?jx)}QS;4JqX>*%H8;{U?cUH#DaUyN(1+3D=(q2`{ z;m02T)ica=p?lc@wQ^#JsLsa)8XDk|mSDZDD&SDTtHvXif}U58d|d+VlFQV&!ZGh@ z6S&v4ZH3C=dChKQ$el`vuj*+ym9LZ)%;yTrZ3b;nE~+Z|sxTN_t@Xi69h`{rh)-NGw#1uW_ndgxqaPY`Lq}rozI)VP84*5nUNmK06#cvaU3lPcG46^FY&!=Y zY7bmb`E7pDN;WL#+_+!rF(>fTLGW0ie0$&~JdqQ6D+_)izBvo=>92Z`tLRI+!>)Yb z0p9fGnJ>#vV6qT2Wln|oIdv@Kla%Xvvtxj8VZMiUVNS}@(9m>k-NNt|F90C%?+3++ z+2xt)tNSEG1|g1M5K+s|xt2h;Vu)^qlYW%m3b!nfzl55>OrgsFtOA>`47P$hQ?)=I zJ+G9yhPQ50@zdZzkR{^+2T@!-qxYinvqP1S-4RiIeG`+NvtnvX9ND`&KKk*0iYu?U zK6dTc8|@t^jvRmvHF68no^)N&-{rwovEMq>+R!FuOUxEq)hHvt41_jG==fGrs~zkd zAk9@D2#6hy(P8=BQhqt_yups+4?rulLfvAU-`s_@;*1%4TOAvhR78Ta|oIa1Y%)94s_)J@qCH5dahq?-QlsM>sojrToypVn4M6^K~DMV)%|hu2^*uQ zz1`o9L`WH=#kpeOq&TR&QEA#@L_~A8`b*=bIudOJq_9rJpdv;E{v_W@hrO(L<-2ET z02X4>c_NzIa`Jb0d@@?wd*h<-UMV~xHlMOey4oefp*rA@ARit;r%b^U?cC|A?xCG2 zP#N$H2$Rw{PS0!dtMnpKE&xy+6>mK0_^WPASg|ELf>IU$$R5U;o*uvHKQ5Y$kBmwN z8s$i`?Od1Tqh5TrSSlxz&N#kaSRy(SU*|X2pijzrF>Yy-@*2^Hei@eZf*PWjbV973 z!W{#F46<@^P@Ue}@7Sd_wju8Sz%%3XpZ$Ey)lbEa?c3D}7_o6)n1zls3WDnv7$auz zg01o~I+Ry+TvP-7Dqd%rp!vBel}Be?oliQb@j?Uf1jn0BC97Xa&q5Bi ztM#CsXv|^(LP8!y#g zA|SgX$a+Y+Y4HY}JP;sS!cXAfkRX3<&p7(XiiVtnPS=lq_>=go^l0Pi&N%O3 z4^U?>S8gM#EI#wj0<=LDy$ImCG=P}+lz<~to@c0@!y}wO>l*h-bK5KE2L3#Ue*y~( zKdzMvwab;_U0Jju?qNC!JJ87cMo7#?f;d1cEKzUbvWTnLRcsXeT$*sr^2(cDHg3Mp@?aC`OFc@xz=La^<6_W(@dAK$l{Zx_ri1@V zlX)_24jfjrbG@_fq;a}>>2Hl@SuV1ROu|#@ow(%0Ch;@UU*u_Cx_9`@>u0lU*sOS57+iboEPd>f@$Bs|7v8rFEd;}=Kx;wrVN#GL$(AVFiHBD3Ih%a`c=v>Xu z6@G4Gp^5mzP_^G0MIUHE;05WZ3ukCn(ZbhCZ+lei1|2$`8-iVyAW+iV*G^vvkxs;s z5*mR)rV%oNpF15@X~+lvCFd+)aQRgz*YqtcA_8pOC5P_l8x}CKF3a;0mIj`WKZ}Bp z1bo2D`k+wB8^Q2IN8}uTGT~<%#h_T)>6AHOP*1u&M~@uvwx9z8hXrps-u1S>ig&*A zZLxLhR{2_d0zhz-1zbz}SVT`yeCD3=l*;m1@P<#+yG)=i7umAy@$#@llYB;>#Vwok z!g?z^P5-D&^k56iTdGV(H!x_RtZa3?Z}+bFi$8mP3^QpXd2eoPv(28E8h754PC6+b z{m4hfKY!w1Sd$Wacki|j2%T8Co%P9j&v~VN`Y#0JAAG#Cg08_4)-Yvee@5*X@dbuI z<3_#2F9s`HmQCsdhe>PjX0YMtz<_++rg+Lzo}_w9pIA_?_7dEt<@=m?!U^&C$3H$k z_R){1zuFwz?z$@`C&&FM0)pwHwCwl<6JeJ(e}c(1($3B-##v{b6(4xtyW=@8cz*2KwJYwt<2KXCyGKd4 zvyO=n{(H)0p;vXTXj%*Ok$f)xk}xGr)h{A=z^w{}7uY)Ujr(QKQ5f`@1pwgP)!h?A zBg3(4`<{5spZ}?otn5EA{_v0f$deW6OXpw~0i3CaKP%$~LU>E3(gqb>0}sxX&WfgG z$1~zdlUwLw2a@*INr6gRnRPI%0$+U!!^h64?P0=^dy=OO(6zF8;9YR-&l^ewrwT3` zI2Hd0`Mj!&+i(EMt_v3ABiP=WM@j$OLYlN`H)}5>07L1Jcb8QesD=8p_wN6TrLr!5 zUT}2zl?)J1e)CQ#jh@aWkcS1G1jEqb&kTu{hz5OK_=V4ie&Jj27QN+9w4@(M9qAuZ zCwR;Rng=UoZ+M%%tE(qAo_Io>AltBV^_u8k+2`>l0y{@8*74>$c9XuH+T^Bc;FBE* z6zs>!W+10oAJvZX;hBPZ3WqNEB;Xs zWkIK^MJ#E}fK%9SyzwrUPL zB3k(y^A*q7{VZ(~^iaLQv$7Z1ZCL%kEdcnvfKEGNVCOF<(JRDI3@csKNU0Mqw&DgPmXVW`OA{Kg*dQpM+_d?7ws+C zujO0t$TFX#rEGR2Vv*mh^qmF3!$afI*4gIC?mpQZ79cTxxuUnjzA-ppi5Y)xYG(YA zaUAIm?K;n;OW2<7?#{U3`s?ER-~CSf+vmP$f-*!%bvJC-%|8qrMkQNZO8ar)3cuW2XXcFH^k!1Xq@|m z$Fmm5`5PPBVaSTnc^4Q0&4|NL`eTEO`kP4z&Ogd`|t%pbQ19YrZ^GN?TwQd~5bV>0KAy zBLt;hC_VVF@S8dmKNL6!F5@r8m!zr7zaM~A$}M={cPZZV$gaI}n4Syg*)rL@Bg`07r9bQ5+ zW!NwZeRKs*hK(I#;+`>NmFLB9It(D&-JBc5mxIpP{xWt}`TL{ zsk*%2vEa`o0iS>^f zC+B$4!mt=~6F#)5^nKep+G_oF7CG?gVCKhQLz8?C<51bbRDqOleA7H9Jvi7Q&jwoo z`J(UTIlj93Ya9Ody?_2%W!T`S%R)sjWkBhQvgjJcU~~w9QOUm7RXJ3)5m<@?F{QU6 zEsrnHK)8BVoyl*3td5M+$$^U{qm>4z7y+fPj9&^Y_sUyKU*Q4A6$_D&cMnF<;lSC{ z6Y5T6r;Je_Svp<=l+N_{6BctIEL6#Q2orbD_YLlwoFeCMNZasy3@}q#g(e z6VG!Jx_JrN9DA545ML(L287SvG{RxKsRx_0AeYCw#<7F>5|xzv5VJD~Gk`M9FL zSB}3N0&IxVkO-Q4D-mTh?V!J|Yha0oze}* z^k($~pLrX(UPh5^ljuM*p*}FMF9wF?u(TtuIBB^;8zjZhIPOj5r6|&Ik zoRi&DLU`+nADK%!!L9R{9Mb<>$#W{HE+GHHPX zNG5n&az^s9_rO6#)yLY6>*CqZe5MT4d<+~uEL>-eM?)QN>8M=u8(?&;RR($|UhxL2 zJMk3Zbc#bJbj*8zHVqSXpVML4laA8aX7IrLr>h_U$Ow7;2@N~pxhTd)P-W6E&}pUa z45lz?jU0G8Qgt$U3k*4*nwpIn!Ee$%Z`8vTcXIJa!Bpt6-HJc$Ao|Z$hL_18>dj#5 zjLv-1e9~LYODD)AelduGU2(%%)f?IOr2JeCUQA3&ZZ*ArK3UL@?8gd+22!1U?A|kU^GR56SI}pd({L&sTNX~wux(8?uJ8wXFVGpc!pgp=UJNR`ArxJmU$o@21vGRlR^UxE51J_p zi(#Pqr0T|`68Qv1aFY*gNyf7oHSX9p8v1F`jPsQAgbp3sxYF69vq?N7L(#3N zweG?089OPgxMsV$Y01Z|>cZP{bnqB&zh~!Oe+#ci9kF$5Rw6RWBj;QpWj%xKA$&wv zAQ}=TKI=~L$d(SvfcRR;i<3DhUe|(PI6TkK@c=8!s6grhK61i7ECquk-f$e_Ul`>$ zcu5;s%g=SDahbJ%JmuN(`e(~j+Seg?H#HxvZ7m+O%!OjqO=(GpO(8#nKGRrO8$MJ?JCV)VF0Z9}shS-yK+3j*plJ9YL+KiIP5 zhZDFQ&KzKjL(;q+-6;>eL>vH7%9lrS%kDja?AkOwsYC6L&W{wvq! zlS7pjgHxhPNJRCcLww`fsjH~0Y)1IxP(7?gRQJmc)4mjzm0h}A7XAANwN!4&E&~e; z=c0!ULP(ngMSsG{ZH(nza;JN5|7)OC8tEfE)vaM-#w;5g*hkB;&a$1zg8$UsvAun1 zYrHU%Kiy)}Ef(m24hBH!NHAd5=m|02m{oU8JuBU+eWEDR3Im-$4!Hxg?Un2=gS<2g z&B8^kP_)PhwW|Zzs(5d~J5%THy*JFGJEDr(+}UZhos+XMaBM6N>^~MaZoM;pd*uyr z5OgCLL>QZIh44qJUf= z#asNUidTk1TSbYE=omB+F2d1^9VIJJw^`3f&-R@RXj9c@Rf=y3v#U*0Ba^)O%&>i? z-oz=tZW!#tpE95ILp*sKgexS3e~lcvrgNL=lus8tXJQvq%stQW(>T|QuEgiP^20yk zbkNQ*z+OwEGP&k0M&i8y(D&r$DA%OVym^3^?p?m_DMAS;i_a(E!SE&*XXxg|i3o#j zWI0&03oc4W8C*+l@Zq2_J2JRFx^Gkhe2Q7Mm3w#Hqc(O(HgZ1BeavIyapyh8J3W9C zc82yEIke1DHuZO)4(M5lL1Pq9AeCV|kDd^(bjrFEz(ey6l5<-xyXGI&IO!cTwl^O#VZEsfFhoW!LRc6jRP>5Vn3*Tl1){VZ?6 zIx)#$F?x!2g0JvX{pna^Bk((Oa!J;tY6tK&oA5bl(}a)xCKQvp;}43SJjaT;zM?sP zfcu>)XI4HHd2=F(dM?_ZK_0&Hm9Kz?%1c=3mn}wrU!Ny?pZW*qizkg~K+uQ{kFofW@0 zq@_TY?(mDw=7D}v9hbV@4b_8Fj@^q=^U7FnjvA&ts6ZFGjs zyz!IYi3ca(D7^q;f78Bk&OEk>&X3I|kDl{T^(;pKrmeBl7?W<(JiAGnr^ABBVGx48u;siZkRNG5SBM71)x?n3I zzXF|w%Jd14H{RrC;Sc)5AS#2LBw-;wSyHxT`IO#7pz{2y&Rm;MFjOJ?yoo2fbh`FR z$Ko9(AXyy5kO1w1eJX*Yzsq@XrKqU0SFvdIRThp&+vBjJ?GN&^; zD|kmo`C@!pMXjt*k^?3@>TT?(uj-=qhEIh|>CEm5ypO=P;lQ4izB-?!n>*uV z9c80Y9ClJfhM<)B56+?m@`GIw`lbnbmlNBzScd<93pTqbw3G6Kz9CPUH}$a!_W>{a zREqZ_eyaP7xL0tDXE;Mk?sUI++ej&-y%Ldo7lOslUB5=^A$STUa}Rr|eS zh1*YxV9bjbO6bosaes8+Xl&oUEd~#(U7wha`=5S#^sAkpo|yISww7lpAG@sOVyD^g zCv+Km5FPedoJe4r5UCyEbmv z6sMhbTAY0HsnONji@JLP|IS-)iy!^u7k*EN>>Kui}H5=Z?C;7W+o=&2j%Alx`k(ZmvpG3J(`sM zA?G|S9`o2o#@zHmTz2^{JhnD1TeWk?ws`WB9tf#?-p)ryBnyJkBp5T(Q}M^D&lzW&68G#s5DSup zFaFP$RbO6s?2s-t#{KXAfH?mtPZCXsqg(c9QTFnLlTV6=Km3t#?s@0NOJ4focM?X6K=#=?- z*(GGBtwVNC@^;m=*T!{M{3cr0ZIpY2{gzHK_HE%5ysG^$^6s~y7sM^oWlsDEt^+4u z`hh}$QQg}%m1j%AQNYlDLZ>z29^8rKOkPq~K{WWiK*Qo_ewH5u&U)@c*YJ&i9jT?M z%5##cOy5)|Y$7Ki{J-1LDm|f=Bp97M!_~`i#m0=%J4;;<>hp$PWai*hC%KT2`G(2>ET`mY7NwIxp^4}qg&L88! zrD&!Kd7ecyCO>D3h@QC|96wit6VE6o>j3BTnE?wmVFRsf%M)g@OlQXsC*4q-$5|;8 z7&>^y9f5W+7r9!pq!*-57|vhaLjx9AEgj^^bofKijf%4v){A7S1Aj#tDEi{xSMcYuplbrIBbgOl_KE_AKJ&%PSL0i&TuYPh%#L%JrQ8zcNe)YcC)LR#C ze$BJuiBEoH#PqQkI(8`L#)oBda*Q5-2%n_T9ekk!^Bmx8Usv?OCvA|e_dJKr(9)kL z0_XB4TES~#+y{cLvH|SY?Y37m;V>W2x%^j9ym)CIz*Et~FB3@apgsLg%Yb|p%E1?> zJuK~dnvoo5Vyuu@RZe-8PPAlP0gn&3%uDi|egZZ;^EApYm(3HN-bq$WC6)at<@aaS zRW19s@KxnHA8-*MY^4s%p!`i<_-W+RE8>-6_BVXsJ!Js^HRs4s2$Dc`_?icW5LH?W zPX?zw4$l93sM4BPA|S+4SxGeFc`mZj*)==Bp_G?-Z2&6VT)wZDr7?`^n)G_MWH87t z;)p;Dy3~|X3=kJ22h>#rS3{GlerXq*)p$=wvl^XeKl53!as8^e^|r0iEa!D)Plx;v z-o#iN>({M}uC9hy)7u^=Zs?2kYgfuy>WaQzHCCPN{(eqRZ+EZ*4vlq7Te~}SooZBj zySml5ws}C6w-%6!<`%XJmy)TYN~3OjV+WbZZYDL=K|r9zU>a9o+M#a~qeVIfpKydY ziGgTVx3FrwOmIoibj^er0|qFFC-ansNk5!31eeaDJH?_e+Y9xoE^We5k3_dh zpmhyPpsRUf#@dwVQrF0m;&VVjhcbX`iW{&M&6Da(aEG!21t+5QaEGN9famKY}0(x4B+hrWP+uEW<24hN&7Br=U#1>AxMJG0Upd;lJIzm-B z=&C1pSdKx-PhGfaVthEbC$AFm6gx`1;hDIZCj+!2k6=sC>GblA>U(zYle~7v2`8Nx z&v@F??RYY%fb8&X7q;I=9IMfp^uuYUPSoAIWGD`q zVBn=$3Q{JyF|2+wNau2o^UXjYfkB))IhQz==m(1dc;2Q?F^0BT_qk1Az4e1tzfHhW zJv?BmI-~1NtxaA)!z$SZb%s6YqWpA>sT(V4+gox=(N=YI+oZdE7l&t55iWy22ezdI z@BxPo=`;>L9fq#PhsJ{oERR1=?F-!I121AZM z{FjE?d1n^Y#E2)UQFEd+6s+ zIVd56)||LEWGQd}>#Ic!K=3b;}m@bj!~6MSuSa(YVL*!S~2afAO$Wohsf& z%>fTqiRX4Zh9?>`m`WaOC-Ucb9ZusxW%%CiG+yZ^et}!N_{1}42HkUz3L41jscz`p z;X}vb$Cv*mI-2WZ^M*BOn{_GeA*_Lx%YyDLOYp;V&gV&SQ{6X8VVW*I&8rfy7Cxh| zy31nF(Mmpzbi$;plACmi6NjH>UL|rmwGBKneyX4KTyX828V*hpbr<@2M*D)(2+hXF zr(?nk(j+O0qZ~=RS71^W@`=;tl9k_fi#zO;%F28GbSom0hDUbRxF#b{X{U7J@Z^?d zaM2TpEW!dYJB8GP9QskxJUb(Bmh_lly`r$n@yfSZscn^9*U2|=h-ZqmeW0zL;rq?4 zF+4UNM~BAZp$~glyzF&~Xn z3HJ{8Tg$-QXQ1-S!ty(=m+oC?b#SWqq5M?aJo0&o2fjAHcxEe+hwqmT{pe6h!q@a& zRw}e`TjhMBlOPb~R7wV!xGtfP1o_0*%ZY1ptNj)^ur~~r;oC}Wbrudv*(kXLFelo7 zqXge6ku=Z}6ydda1qVF|p-5VNY#a_d6W{I}s2pI@p4gsB#uHGdf?C2_fh}u-J=9v#E)>n3Vr9H8HDB+C+>F zkH^r+NE|tGICgB`88_Z^Q*62Jrnvg5>toCHTjR#9x5dtT_QlcRQNft8ZDd=?-k#p* zmu>CtkUiyIek*=ppW3h<`N0fAGXU&GjL;h2$8yc*Xy(;HV*#OPBV74QIzW5cBJn}( z2YSllJ$LxviQ=FWZ5_Ms=&>SU2nW~tuizm+upG&~xu^Bnu;JLJ1c?{q%62Pg9Rv@8_(nJy3RGQS@C>`Pzk zxyz=_xpH)t;{yCsfQE*oqVMLopcR?Dl_YYUGhVhpLsZoK9nywy!o#yrB`ZaLGPE- zTp=^(y$;GJCf{5n4Z2NP%2(w<*9;U}s`@fX`lBEJG{z^`0f33LhMXu@+l?Id^{)_=@q|(>o<5EY`&d%i9oRZ>vOTY@7&q&*)KAr~; z6lfpjZgId~xc z?jQbMeTLCkwPtm!U$b8Q-u76zVYPhT&UoSTpB+zs?z3WYcsLFnKHvd-{N6eB-#o!1 zTao^5+Dqh{K>rmQO-+xh?6SKK_LKpssvg3Z{snKUtz5A(Mn=ZO%Z2#aFD{J>F8Ere zQEnzSZU5z$_OI%VQ%^i8{^HGVj^V-4IB;OE`9T0SI1p%~DZu($5y~{KsI_!1q2N>z zFDa%Y;lUG|9%$$oE|*~V5_;sVBhU>3eliD(M#hFm;yd5}w&Pnx-xE$aDORjpDf-pP z{!bczcDTVNxO~dv#9u~cxhja#Ah-Ei9X#+g&pE>rkWw{_ok5Z|&n1|5rd7(jpDT_i zNU9j!qqBm~eRVLXXZt|BGEon=yNXNOn>4`Y+3tys(G6WIE+Ic@U7`{djXveFPwue)B?H?r+X%YJmFmRtGsQh>T7>M^-Lc~v=V&I zyp7B_K~8Ebuli^7<(y8sj(l6kH_%vL>$~d z5Zkx!j$3ZIJ8s^3XKcCQ*4VjySM1rlN511xFS;Ken@D*Uof%8)QePb1q2BP4J`FaO zg;$qd_M4azt>@JLW)TDn44?~XbFf{903iz=U>6`oDX1R0aleqW_*6o%H)$`NZ0W#e z@X$6!JaievI94a%>VKK7xmSMve37_h9B1rDKJ#;oxSv({LtOX{T*Jt7{!%x|F>ngb zexfr!%R&u48suvO)|q_yZ23__-Y2|rDhJH-4NgS&P8$g6_>}$ zUi`Ayc*2I*yKjFS8ybkQ8D4^c@415!wgwqwY>JoNgt_7;Enips_P25O_8rDpsw6pU z^aA4*EBfR7KYE&F;lTdAF+3z+ReFrPG2YFYfS-sR=v+V7X|#wY0`4SK=zk)sD=*UsJ2i-GvihyPx<4XF%D5a-lC z#2(!L?6cyjPyNHV_10Ts_nuv`|G+*S2jalNL;f_ukbI%R!6Eq~EMjCKk#Ik7Q2Mn? z_IV_B+;vy{>K9kUw(Z+g=8+g1Rr%5{KKb<2r#{i2NMbQWtL(y&BZuOd&wPRWzBO_5 z@DYJx{FupqCXKM!3$ow%qRDvL5?+eI-@u1qSJk1x!T9wrFOR)@_maZ)kZ*Xm$hIPv zFM07x-_eY8dOh3wF?gOCRbi_U0f$QYhJz16PuD|m~x6H+d$MxDseUc)mQ1pryW7 zaME%dcVLf5lXi(D=#i7Vema{DB==3G`~MU~OR`pW)BYmWEp-%}wAY;P8|LxRq~q{} zsp%!WIUB`nYfV$L09=z7HM{5fl3!_PV@b}R;p&+!71zq^w%ZPEUV{sK*O`=6X+9XW zae1u7B92I(1rhlrqLQmDC-XU(;)zYeD^njC;m9vX|kI5YNYWZVYMYWj*kzMV|3*4q`zReuK}-A z4E_;gSoHTfEH8p>o!7s|xAd2M%ohaxGUAMABN|wi70=5$jLFf*_ZXLDnkT~5_*H%= zmDRI(we6-PKDOv9V(u4DD?*ae2Z-pI;y7@aA*2UdkPHT*#dL2Ss>4hnze35g{8)l3 z#B?lrRRRcdWZ6!q1JrAwit`E=-y>YCx}a<^rCet+9l+8-8O|eItD}^afw&Az#=7j~ zK}2U@-wO+71Hf-D+>(;N1b!%673n9J-%}QI+x3tm$Qn?Zywwa~#g|ab#OJ_%zdk_^I zEQCI%V6uug-@cWjF9+INmxDQt-^{!^gw5_8^!4?{s+H@azjuXTHJffcXJCrAd}dgU zsRnvR9lkj^Tyt{%`7ZJtZ>3ME&bn?;8Fg|f8YFa!GZHD$6s76xYL0F>`W-kOGKR#Z z(^VYhB9s}}a+GdKvK4t!dgRJ{$$RCh&gIZ;#Rtmp${iU6-XyJ4w}-8Uyp?wYlf})V z2l+AJjb1U?-?zf%d}@+6Y%pk2L`5*+u3LqqtKnl5ISAlRBb~ECPpk$(y!A{3wSUstblJ%jT)E1qoz>BumUK=3MG5zU=7!8Uxs z%d^UZPRHpVt9uE?SI^j5J`E&;B+@aSo7Oo&;sH(2P`#5{tWuv6ZY;(D5Si6^Rl`Ic z-Xfp%8{D~l%7W4n&x&%tK_z_MnCK5W_>kPNXoEMp6B*&)#+**E%1|EO24YJ(Y5{F< zn600}2%LHQn6zFu}~X%~MOlu5qM$yk$~t-slt zj(h8$j6Y>C;jYq@A6m&qi9YP+!FOOvd0fLA(^>K~tNiFT*F4LhI>RI3fsM2HLvHJn zs%aaeXM(XP-m{npnC=v)!=5@W-pDr1FH9>9_KJMaM9}9~^SV;n9qE%9|4i z5jJ~ve#+lo^rs)h4@tS=R6o_#oe8Bh{ordk!xnT_XJ6AU=*$Xt+Cca?EBY|d55168 zp(!3B2bKOSK5=J|IR~j2Tmq0M>t(NNu$A6?6b+37`QQma74U&P2_ZI?gYRFXUmQFq zAS2era+K$WNeW=*8TyobB^aM7_!*4j+Ek(ApJ(1;o|~7>A^W1O6YI!&iw~}&N>O3N z84v6O_F75#lnz_mwY3qi_655xzVS&2HH}IwzDmBpGY3Z!u2@!;p*ZmebcsXctxAeV zUj#1Wm7n{hLE0V>mI&w1HUJ!xS0G0n;RQM&jwrce#BU#xKl?pgs~s%$Q&A2DHxBEn zo|B(Yc*$EgL{SFz>cJ0usE~nI1muYHvKNwX+iK}4G(bDDtr1N~$`4AbgYtA?`5|P9 z3MNFxW#Aruxn|;b(q%c`J}S8IEU$&%@rQ9!a2bN>HbeN)X_8Fmp(r21$c#~3t*)Fa z=KXI6X>_JV^LF9*pKP1vHVnX$lgKvAhl=2UHe!>nAvsP>gZlV!P3gzcmSS(FXX|5pd|sWW=@=bTCu(9k26a6=JT6i% zdT_t3xjw%G%OQN5g?|I|5$||Ym#wO2XPaVVWIp!n8IS9??vGzyaa(-%;;Z6+F8E1& z?hD_GFMaLD@xx1h8(07Cmbh#CuGoL*a7>PkM1yQ-d);hw$i8+p%|*X-wySY2S{tUL zO?B#2+0><@Nu@N+N0V$rBfMsyP@N>+#i$RRWwh$(l-Q<2b2^X0vue{lF(My`w{f5Y zgTgR_&W(LLbzJCVaxjogooHKfvLtEmYbI3So&7xQA4 z!|5t~!7C44TRN)4TT-KY*aaP2`#ulwvAwA3K}dV4GEgzJ!RVqDm5tFsHnu}`dnFmG zuWM18gQkd1;u|&tAJTTGvM=QM-vjAYFn+o9IInHnWlR6}pmfWF^}h7|u*IT>?G$mU zjSuWsE`%n|II32r9sE^9|11zkSna}U*QVX+A{2hZMk^XBu~0UH4;zZ zF%x*mt?8yRQihQ=_$V+syf9E_{EhCOzG!Rj%sRNeuWYgG*qnS7aAa{nnTQie1#ndw z=uR-LkE4wkI)~1wjI1lRJ+B#PFSM)Cud@C36_!VH?FloNr~DkU!E@ppyERFp-Ik$i zEQp+$n)iauv7xc}>-WAt_U`6Q`}*ip|MP(lJUjl+zkNQ=d(`7%W#8I((_7wV|7dV{ zB&KB7=);bWP0D`JKM`-_OCjg(11Vq^s3Y<^r#|a~+DI0Z=Hy`xLh+n|ZThruuC=)> z)~;I_Egda!`0(!d+rRx=L4uy-&2|_+lZE+hO>OanN1o@hh#MXn&ARcaA072|&B96E zi4JJ!VENZk&i8p7A5c zgekAAuJUluzDYjNLHI5CX5!6f$(jdStGIGtbt*HWkk`hcI@c@{IF-Y3luSE^ZLbd5 zI5m65t!!J&0rpjK6aU1d1T1KAH`fdv!D5&dF;lpSQJ3h=9|j>hQdWRW2Y!&zXR zvg@{7d2^vpw(C6OWeMWSiv*S-!cKC!o}7&_*{m`3v8P2NQN?WqerD>W4z)kPVRw<~ zsRiZdt!B|p-SVl)dGBC1H8HNXO8tBlF3W5=z>jMYjk{E*p0?Il(bFAkdi!ES|C-pi za!su5TM^wYoob^+e+zi*{3I^F;+nYSjyvP< zk)z&nycSgN^aPI-YfbYSwX1%;CA^{(oR6)Ss1!!eKB@VNmD2=)e_ z$9A;%Gxlj{hcP;7cz{>4eb}z*AYa-to@cspFec^&+x9(F9&I506n>lYVvN6V4w&GkpDK+Ke4CqK-E^*;)%k+mRw;be^#jG6_Jj%IOJM;U2 zbjZB@S#WY5{2S^DO^bghe?qYEk=PLf_UY0Ade=**>cv3d;hmsw@VHq@%Y;vZ`albU zjsL{wIN8Ypf3a0Is7^L#W_sMRJu)yD2M!KIlYFkG)=p3IGj>8+`&VvjgtPP-8nIIo zIzR0(Kl)v=TGHL5O9sElAewe}_gar9#>Sb)By+eLelMB2b&lhaN) zBVnj4%U%@@?mZko`}r^A(@5C~<|ndO7o)>tarEdwOpMRQ_Pck->t6F$aoerios6M1 z4t58j4(vopVAn_Vn;k1BC#B=_e7=%?M}8Z>g%^#a-|JSbjORY{8S#jRJvjdB-#-)Y zd*27*Uq11%c>7^|!&L>wP_4YNup}7x~1D%8`8m7P;yr_sy-{vcsMJ3{8{l z?#2!4V%>%fF+Mt08}s8}F{sBp@Z)vDuSa)^KZwtaEQ90xJmW=@9pTR6WPAm-*r)$V zO7QcU{#h*mCt+0tsKWC2W!E*EBpB{ErCwV93BTvl5s&_%d)fEPd`tO#l;E+SrC`{c zC4;`M#y~h|&5KT(U*;Kg=A8XP7IT9ud6$l~Q{^~5q^&(ZBn|jP4|a=r9OF@=(Y?d* z5Nc&r{g2WIDIq_*NE@P@q9M8p?9xBU{K6Z%>%2J*lrJ=4yE48UX`}NQhw~O5De%cS z`Ub??JTAAr^2^e0LRURk6n%g9|HVh{v(FEW1xq-CyLn#@mqo8p*WyzRcJ2fQK+bVwSyE#r)$>fm#ei_&`^Jga1Cd3=#E$O%o8KG2DNVATdy zI_1y#=#p{?=)C)<$g6_uYa^htnXk}wM`yo$tJav4PsF?hy%olQ8YGWxE$WNUvYX^> zF*&+B*7P*SU%vJwasJaEZ#aDBd~{^Yi_h3em&J)7igjUr$mPhdVf>r^GY7U5{<)1S zh*tb9b}A?2lMmJf(^UuhE4#xa3X(?1bXDEB)^->|W*Dro6ZgEb0l0JAYRQ~1+i%G{PVrq2Ui$j@< zu?-N7n9E^qhq)oj$i#w}mpB?e_}8z0tx|0e zcFFPTNQKKI!}4nduKk>o1kHuzeG;*HhCm7l)7kez$N&@|E`QcQ-SZqcG6qnRhROF1 z<$QcNKE6Olah9Hy>of|P#_+2o@Z2q*iblCeLOocu!5c_8st6maqCL?iftnZ@lye>N z;By`rFZko<#&^DbQ49`^s`IK29SxCS;cU;0kNBHPC`F3}fD)yC>iu@Av%^+Jev3q9 zHObLo1z%1SC626!WrZSj@K%S46PJYtNUI|wIN&xzZ$)r%7#Rd3Z#q77Ce&$^qu(JX zjJL}XN>+-~QFCWac(E0Ii)>YPR3-k(&!BhN5|mZzbby=X6w>MJXl?U~3p9LZ!?w2Hup_)SjU917IRs)i>&tk2f@k zX60iJh_IDBoqBO&ejg#*&pac} zJNI!iAhR(%H0X)HmL|HZYD6ecI1=C!W!QmJRB_~Ns7&)jfVaKpB(MIa9py>(bB77a zmOM&6(Fa!W^W6348lVCJj_?gSp%ARX;!W-ub)+$X1j?kIq+3v@+uuj@cJE9Wr=0nl z&J{c}O}cA&P$AUI*y<-IbySYdysgmM$|p9kY{c=LGrGtCk@zQlE8pP9L1s|I#v9xO zmx=ia@tp0-CdTO;sFSXA##MAeKY8n)feG&fAXtP^Z8b$K+IS79eLh$nqZs2Fc(YT00`v_Xsn#>1;dUTj|4YWfE(rGwCpJQ?0cW83kB1KP-Qc{R8LP_ z>%Wv`8Keww*J1rvItIkt>9226Cto%|2fWArD2d^s2i{%4g$N$_>Wsyw{eW$Nwd2m2 zVn|n|)rHGvf)oWM$_L5GFJs8|m@Q4hheF9m08fe}ucAy&-6JPSU)R>xt{-*fUh)MD z<48IMo&ksQe6KS!Vt@l03=NINoOI!BZ+&-cIO#;EAs~a~cDaqh!*b=K4I6s{0Vthx z)XUqzl7;fBgACBcpLh?e~tI zyJPE(cf^+KZ;jtwb7Ne;<<_|I<~!r=oqJ=?zQZv*KIv^B8H{8RVynt&r-Rbk7`^TC zQJG*+y?Z-bqf_>?Lrx-Z{ddUb^G$L^$+ihURfkDqCTrk}nkL8LBj+Lq4kkmGK%owz zrJf;oK66N%kbm<}1n~rwWlS)2&u#)f!!4yZ@q7@J&Z+}3@ilbg$DFZBbc*nsX@kXi zJ$Dj1OZe3`Ry0N9*fqG|th0QitXJZ)z~UF(XIaD}V_BB(vqT}P{K}hj)Y(E$ zdYxrE{HJ>+Y1018`BP*2_U?(J1N%km#(2ns9}rJ`;^Vyoz@mINYvi$9lK6gNKgBu>tAG@UY(oo1L3f=Ug3c@q@RW z#-w9YlM}uk9UG6qfg!&U%J#A29l(=0C{j1_x&w85RHJMM;2N*8J#6fmNU|D*sd+p3pZdX^MsPep}JlkXD}+8!F~EcESzPq=)#LG^z-)C_E^7RLu}r> z$vy|4xx&QcK&Wss4z!=>ye@S{?T$b4c^(MQ(!HMRw_FPupKZTA$VPhZvyCR+ekDji zvs!uuY?_YrmGVe`hIsBNm+}ful^s(ZD4%|r;gP4xo4Dq^-@?~b=2M(ta_=|z3hYcQ zHVCc?@}RK+>0XCzoe#yiY(?wNyQ(C#d?xyGUCK0EBaSHr~#n4B2k<54g5-k_At)OS#$Qj|_W7p1|F)%Qo_JE0Mb;48^zV*(en^~zJ zWXnPIVPG*ymi43Ta@~Z{5ufSxk(L*0a_o?PFKs8+%IrD-ud2W6r~0ZN39aZm61F8O5b!Ufcr^jC*iyx|3b zdA9-6YLjNcX^!1ObIf5}{5!P&sNdYZ#MZ4h$BtdQVnlY6 z-PyXjdZV|y&$hg)qbqv*Rv2gA0&W(4u_^3i$U+&$4=KA%*B!kbrZFt#6Qzs`>_2ua ze(|ec%eEes5pItrcEb^+a%@C$fNw)R;Gyo3duW$!w60Y{kAI;Z^O6%KCf1)0MZVBk z<*VJBBUVdY@war&A3_zIO#A&ny9Zpu%qXWdN}QJ+1(xvRo<2gRC0?g04Zpm(ni2z` zlxgR8(yX`ZE}5|@$_G9>&iU>gNM8ap%lqTgDGoeTWR?&8(njIC&d*H6%2g}k)vx%I zICkV%Opc98N91QzelNOUKNrcC?DOdj`Z+T?(>~)9@7cE}?z(Gx+7p7lO_U*F4E7!ZqT0^9vq6Bw%i%&3K-YB@KfL&d(zPz>!9+Am2XT#?C5x2xtIK{H zmtX$#_{?YjGyd~)Uxqr=J{qcJGV% z>8be2*S=~y*xuG=`o8c*FZ7%KLqj97DHCz{(4n|nyh2|2+%3AvIN=?)-Wk9A<iJK8a(v{&ABwlU?@RG({eSfm&ST5_uIs)d~7xZ@iPJY(;Bl zr|bEf-(D3r{_e_XU9&M3#B=)P=%DqDf9196S;na+*Z9T?z$T=UDU@>wPP{A>t;uxY zm%6H(EJQI%gVAyM=wZf|Bu=l3Dmp_Z^4X|VX-fJk(n$!p2wfN0Cx_}>yI%^4ANW)d z{FCX@Mr2riy%dPU=GYpSQWSKrxU`%1l`r9y{uTUNp2{>LVi~?!{_=YeF7?JtZFL0)eE7r9(R0u?&d8H5_|JTXFMFBws`4BkoJY@q%i*%I zuPP6oPDw?DnAA@hRoS&tj2p_TO?mUAA&&HoU7` zGQqZ=>gE`MqJ(R1=?~p??fw~Z3g5}5NZnyCxXyNqeB=aorB5K4k9#VvYy3!fOPLw6 z4Dw+Ed1;%apTyypbN0)9g4L+bJ-#OkPgNPmK|#u(&(_q)?nk^32hxJ5a%sn0hOSjh z#GqE6#30+r8MV*eRhSs@j{bg~`{3XT?dY#EUdV#*tTXu1H^5JI#)a75@WJ=K;kAm} zfHMIj0RYkSUc$i&8%}!Wj)zx{kN^k>Paz0IH99&FWa3~T2MK^pJ`tl@wUY{!#LDyv z&JKdW;HIj!E)!PXXF}z(;HF`BjGv3l!rw-TG=jl5VIU~wWyf33EJeax6)?eJHf87x z5lP1%1Bc|J{^%B+pM`ljFltciTKfc}GghuSF&_QEv*IE-3d3@m`Bv9SC!T00m~XMM ztv+PHQD93odQLE+xrNSR=mDn!3&s59qAIKO&H!HvRN?qV79o@VKHLaT z|DmmJ$lKV|7DFRrv1{M4=v~q6?VKL|$Va#{$rep40B{4aAfw|FQjha8%gVQgxXx$E zU*JRAq!*n&4<4$fobWq@h zE0y#hpOV`MGQi+EDjquWHXnFkN5HcjHu%Of2W4=W-FZ;iIK~XPQ7*>baFmV4v`lKJ zV+RaGC>zL}sK#NV4ozOshI4{WgQq`9Al>wv$J4S8YKXf#d#t04;3vccwN3V+`b>dnM#I-69#JncLlOARU^u%oFqoreP@NUoHq`+; zY@AUc{n-}EvG<#Bq6g)HC!HAVM>e#4cX2kRr>0fM`e;)pj!y*eSpf#7m`w8^PmvF) zirC?MWfebiVHYGrCMh}38J4cGu_V$z#BsWmFL;~n5OK&@<|E#EEqSKFCl}vI{J~Ft z9?gPr@(CNfI+E{Tvgkos0hR$ za3X=>G%ASbQjUV*7=Lo-JqcWtkhK2F=6I3{8dPwRKjq5jRi4b0VNUvDCWw|YAL0Rn zcZ)GAyxxd^~HMFZJ@ zS!~u~Oe<|?cW?apYhNEdD_3S7+6u~1M)aF{DmxGO(X|WE6^S@>U-8p_q8~|d%rjpV zoR4%Fc;o|H<#^qhP>x(WY5~3}K+vV!`Y-Jo_XVeNP-Z!n;-Om-M*rpu_2gF9z^{&c z%LC33fS1l%TujH?iOF>F}8w9)5ADRJ6aSQY60Fa6*PzU9+4phDX1N(eC zm&Hy+j&!Rw5`7X1vn<|fla4pW=;(AD8yJn-?%W;MUUyquzvcG$^%d90)xX;sH{E<^ z?A*CO_8mMH1F}KGBjd80GhVShD|;MMlhGihY?NoxsxsT98=Y$N_)JlU>{nMOejuNr zY4pSyZx(umx9SLvUNEYt-xWIPxu z82(PZ=;80$>z*MX(jxnXKWPJq&dFG8j_>iTYPN}drfwBS3gzcdvd%qnL<#;i%+zcD z=pQ^_8~4@IdtVDT&em_zR`Hx&wqf!hZA|J5hdPa}SEW}xQy_Vf_^MzZ#eU>1@T>cJ z`W0D6>#>VlHc16-O$%s;pHW~Yed^{b+iIE41Nhj$8TqAqcJGcO$8w_l>@&}b^UwdI z7&tcIHf4_Vst0s4oeYV9eF8i1q<3RXtH85-j=fRq=~%Y^&kLmmn0=27$(JgR&aj4S z&NPYkg^N7P}8qn=GpjK%gH_r#rd<$!YbnbI%AhR@7SsQsyvzce2= zZMiXS-g-mab=%Ex!wp+v%XQbrb=O`SSN-lvPnP`R7r%+0{PgEKeiD~ndU0HO$t7{= zk1vfM{q&N!_>xQGyFd6&d{ceGuUv3JTy)Vz@!ju#FTVbbuSIuvUp(&I$3EbOwvL?n{jI!0j%))4{Fqa60;L=85<#TkFNo!i(>1 zUce_J6gt~FV%6%^amvXj*$?Ks%gBTK=9D75rNuggi~M;kLHe-}9vh+)!ot1pkOJY8 zmr92`alub6Qp~a<0#E1c=aW8Z&vo9@7ZPrkxs1;=<={Ebu%qaz?o|{% zoBchCC7rKHr+=M^XVN@vhzaOx_?>xKI1C)cfdffIcT<;mMU+KRuAiz;TEAWCGV}1R zEy`z|P(;a>*XT!8uJXAo`!x&Tu!7@YIKD$l`H^6B=I^2K8om;;|a8gJE;#yiAO zuJU**Qo-WPbH9- z#jRVn#tpK4x88I^?7e4u965N<6TmBb*19cMiLp4#LTmhLc7{nj{0Rq<)MwQPI^YNJ zX+9R>&CX6LPJS4DH0++zVDmiai44(BxxLIf<{7#{NA#Sx`4y}3xW_qie^*3hF$A9i zV4}^RvD0%mZvsPl78=o)hLPUh9Q=ec3{5*Mys;UjI(+qP5Dd`2dP1D%7fnJQCrg8$2c z?Oko>_P_CDq-M|W`DyzI8<-sRZUwmptZAV}H*~?$*t>vdD##7GS zevleC^=@~uxt(2I>c`W5=Oo6Q{7SaWzvx>(h*@?`55p?CcWyl55f5|QcI?j zK~!&u&-#H|Cd;%Gt=x$lSG?F_c+5dn&w*3;%?TR;ae!+$!bJC;uLM4IB(d_5f-=bm$FeC|_!ADcGzN8QwsIK1aB`H}mX0g!*B zHb?aWw&xpUzp?%Jjr3V)dqA5u!Q=r4pWy*aN-ja890O-Ok7xKM*fZr<{qR%Fu13<$ z%N~lJ_5H=1jIj>lyt}1n z2aePav{Kx4EilCEVt>O2-}A=T%E#G2jVw$^A_=H;ii^%wpoYp#V*QRPV>C)v72beD5PaWK^k zl%chtS|Pz*55+iEAeskCf$;}tze%eYzg;9m$>VR`>CEbOU@-`5hh1R2y__yugpUVq zg&&7^1kk}7%$!+C&ZKKwOS3vHbS!}@K@{H5#>yxhAvfsRfydAgz*%@&AccVkXTe$c z(J?f?%qI>wr2HA};K{IxgYcdz`w=pwdU@hlII_@!?`Z;`of}9+hm7xf?%TgBhDK&Y zd-2RlrpHDLKEu>6`h$p;2TVY zu8zj&>1dCMiAh0#CoH0nY}kP%Cvpc}bqy^I4W3uDa+%-`H1Sz!Qa2P`m2I3=ZdL|b zlT8oFBRa8w0m@>G%rxt?InG;CLo%{_UvJ8e zAsr4J&50RxNc6{NFxtFurNeJ%H8itxf?%RUI(t!wWDrZnH}aW~Zcw>yV01Jzs*$s< zRdQNrAv%$t&I0M6J8#70LJc|>Gs1<1Kua>%HX%JL!E8^Yqd3kT zo%sz|Z~d$CD5JTp)iC%b7vE~6{0Y%xXmB_Nq~2GrFN}lt-h=ZUw|~&!PmvcEBR}+DfzuqF9pQ$&HnqYVrjkOs zk{vD@QwE2n<(41pBes#xsE`Lfv7m{C0F&e6>fp>;ujokRB%K;x>LB|ItrOm?>cI** zu^R&{xMeZL@_-!Ih>9ejM=BHA*wGMNH_{FyL_f)tc|8~>rv_dP4vxn6F8xuoDA~y; zZio)aBRb_zN|1-_z;Pz-Fc@RCUZ%=3 z_vN~}qJHEhE)9x{_B}?;0sB~R%QWdEaRIK`{vtm@(|E8|U^=JDD~}NeltC)*1WD@`X^?qcz}pPvJu|#100Qu%9_aqcf37=(iPt!>+L9? zU4VkMN+4XO$|+?5555@>imUD&bfT^Ds!Pd$@s`{($T2xF>j8((zP@<%D_#}ty}inS z&q1d@^8(A4hR2aaN$~y=%w_kSjksv*I3DJs1iBP}oOJWUrQrM-xGKl-uqiXMqqj(5 z%Ip%fr7JI7;no6B#T%%iF^+x+gglpr5)%dVP502SdQ92QMzVU|L?&L5@C5u+PLT;` z;8!P#he=6Ka=|O{1k*2Buxyada8m97XZxYMjIHwHQ^^#u{cl5yY!UvHd?)bo_U-CC zWqYhrKuItejK$UA^8@or!`rO$bB(B|o{@%uFW!_2PgG4#u|@1mjEzjomd(nhPR7vC zL<}4ojs1HD;?8Y5be}x=_C<|5@h!ss` z&lu2Pf}U&D1Z~%$nyR>Z#rf?>`ACf-1QG%dX-Zbe(UVfdg?lu>@~jZy!mYiRp2FSlAKkz}%h;|xXpbw+6f%2Z?*e`R}NJoeH>+? z)=r_5v-ld`rEg3+?9KQXZ-eiOn{K&TDWDI25(C8Phv8f7*tR_``}MEl>fc=*TejQ~ zTW;JMx7;edy5r8c}{8 z0U@M0V*Q#;@x&)SInKP_nW{ay%^*6@kueq{ETfxvg}+4@8ymCV`OS;I>3hDtEA*X) zw0%9j(cRM%&wb8w>~9=7d|0|atGZ{o;9yB8@I`0Kj-OON&|m+gGkpnWSP8^|mt@Cn z83VQQ<(nJ)t=dWXS8V`wqy@?(kc!)SCLLXfK#$48S`p(ElaUDG1BX7W(@N_oioQtCHNRyYI-Z@)XIv z$1Cm?bPqjv<3_TJKTf|Ax_$X;UornWT5|!w{Z2p4cu$GOTrdG!+NRQf&58N64VI@A zEXUQZm))0ZJ|*TsXygD{5`XYx(izILF{d2i<}wTeSqDzl(&8|JG}*8zFkNw!V_4NW z7cA({acRGtM&%kO|B<#zU-Ijp`X-G@M_W%}yo{0eSufcOUsDG6%1=5@`eDm;4;&k| z%6IyFmgy_pDmqbDXC{s7EAEru*E-}s5l32IC=OrQ_d?qF#Vc^)1rFP1ct`nq=G}H^ zGbNkkVfRHZdgje~!=WtFBEO6$g|{PZmi*FA<#>&3Og-Z#xk~^CpAhKo>hSK4o$6Qh z@J4zYTg1=A(L+b#o_lu0wbxu7*IadN+_Qa096YekJDT8A`%{+cw=T+&nNfdzS~dZn z+dRb<3h%^C^`iY0e)I~|9c!wWifM&J1aiTm=>`8nAp?mZqS`g6U=C+667 zP-RlOWl4Cuk1qS%R?nvz>P7E5+nr{$q%Dng9#3O&WoOGmbhpZ0b+^Z=zRp;obAMM; z^e9dz`Aw?3%0{>68E;VEhBx9CWY^h^bYgTOruf{+#8k{osJ)i0+rDi_+-I z9JkzcSJXAiAEoaDPXx~|CtyQ>tvItCe_k?!ZL_Z@{~4ttJt1`tcJm_`+z`(|J#l)D zTu20ryZ*V-k?Cte`7fXu@(K3PF^D7m?JUf)%CfU|Vd(hTr& zI7Vm7IZ?AoKAnVpDrYgWgr{^Z4K zHxGI-C_3p06!Dy~)#jEq>5(k5bg{j&M=~$FqW%TD0kBAB_wHSB;f3Fb)oWJ9$`vc( z^=gC1CdcFGks~P+_-#7SGh?7SuswW!Qi(Ad#>^XHVr(LQ{p-u@ixQYL0qeDE*2L4E z{)cMc&hXga$jF%Mjb3|9hW0~$RMV3-mRnW3em>57%(?NUud1C?x|Y^fl`(F)<4sEB zr%Cv6&5L6G6qz!5jMxI1ghS8g@K@|J)yJAOYh&}qjq#UneUtpakrS4uTy9KMW#vnz7?=fBoz6^k+O>e5i{%Zoe}g zc=m(jOFqeCr-Oq7s(X&BVt-f&(Jmck%#OC7vKX5|uXt&Pqh9^zBjU%dU3=o!m*wsQ z%!9z=9{H1ull}1vUKppGc2ew>Kg13YU;X-p>Z8ucHgv0;mU#Npo))K{aa!E7bFci( z8TDJo;^#m4S$ylFAH;XQ|HF93A3Y;BY+M`1jt;oJI3!;g`n7j-#jzuY#xja*V#XN%72{rF&hKQ=3FW<4mu&@)- zIl%D5f%K&DEwaV=xZwO%;5jx&Wdm0g%fc*o_%A%qD8}xW-{%GAsyrcr*XT23@UslX zX1I(hEWajXKZT15j%-Di1AGoWSDwlgR|n-GcY5xuP&U)m(T617<&^UCFypuv&(W{) z+-3PW_xCs57>EPPUc)D+|hyP19Xx zY#V(Z;<7);0WZK)XMu8GvGU2hJ-_P9JSg&Do6Zx`3V+VZ;^3Dn?PmzDX6!XMF?Nt` z02Awk(x;xAXYrP_xJmfnH{y00f2En1jTaoqXMAL{(~c*OUWlQ9?^vgU$$#u;eljcl zQ-Mlc+*Tz0llQq$f*obpxrRBdI<+sf!7R$i5w)~M(8oHY06f!3<%z)i>*tMxI42=c z^XEk6Or|6(xm37~AA#o_8J`pToDmOaMD$@;d1PMV5=IU5eS}P(=W^|DB~*fY zd|sjGlqKQ{uh&T(=IQJw(adPq>g>l^)u=;AmEfZ59riP4|oMee+v!!p6;U!*$!^mhC%Z#rm~!s#o~?M7-rl!`0K(E}OFujWj?K3=52e z3+HD}Y3AnDsZe@%Oa$M~19dlzQ*gkE!8!uC1A_(<#f6($Av!h^047>!7S*`7qChH_ zPZY>ivZyo(4%@ReiWnRi&qkGptJSP$Ey5XmTP27ztId)dI*|w~6Tuu95=4%&pso)! zq#Oijhu&Z~scRm3;J8Y#E1s6Xz(*QOuJc?%aq^%BWMD9e&QT6b=L932LYzo&IC=AC zb(;8=+k})g-~9JN3)Rw8gtn9~zJp^r%}LJ^>;#gIGrEKDD81##)l~kf4#a^Y^oLg! z8JL^$#1q??Ggsw_P!)s1)7cWK9jFsDDT-e+{9IQG?rZ5^#_1m1B?H_q(H6bodBV#E zNe?6o1mF`aU#crI$2GfHsAhKZyStd6QW`n;IZ1`gF5?GGP@o*D?7CYfbyKAy4(Au1 zF?h@~crZOP>51M2$pfo>Z@THGxcGY)$EQF2>G(gN`ec0ROJ9mhF1a{<^{ZdSHP>7d z1A|ASRebALhjsm`b#dCMr^fnC8>6eQ*OQARlUMy?v%1@-c}`6uRIC>a|Xx_`+g=vYMN3(X|NgjuqWLL(fL>cT#qjH=jD% z+P$*?+Z*riPFuC)P&f`pr`$tn(Z{w$ z2Mb|{C4Viw&cOB=xk9FJbVOrzQX$Ym0ao%Ub-chY`D(%Xt3$f!C)h#JN7Azt&)L^0 ztL47H1Fsy&n)5+NzUX^8u_AbnHNl{ z9(9BcdFBE2&P4g#m$hJ=b<8U*v|Uhd1XQtE^D3K9&GC#P!GL($U@uaW6Ev?gZ3g9_ zTMLciyw3TZTk*V^Hc>*0%eE%PU{WffOwh6Wspt&q8 z$IW&@2XUPHSL4rSS=5QQ$^^!Wh`&>w{r@-MTZr~*^Y9D#<~%+D!F>StEqp85s(j>B z$R*zNVY0m|p|lbBQ4XRba0(5%wtZG8=US*}17A`bfTu%xU|I@t!DTsNlcu7%6qEzJ z_@*{x@!i7t*@d|BcUQ-r-3Oe$r>83>C+6cBPk&y_DDQ{<;bXC9+xB?V8{VM0L>!a9 zkIzQ_ah+{jRWLT%gCIJ<9iHIl@hLuVVC!?@vElTmz4H{`niOtpSFVZspLtr$$Py16 zIvB5e-RolS-u=dh`cd8T2{69r*xlC=-QAt>fHTjElg~OmhL4WK?YG|HvS1H%C~Y?( z-?YY3rA+<*9#RfL1V0xVE|)j!gK4gGVy@dM)wS@Dl(bL8`(8MZ99?LU?WRwaVL^46 zQ5~e`^gHBhSk^@^CM2;1^!e$>*ACs=Uif)+2srlGeOASL=NG|fQX|V&>Cmdz?V^Ealo(3O~LP%C@1v@PbU+^t@hv(Ej8hTU8xt|5cXiU5+}w`KzOm zG_t|9@3tzf+ZEZ!viM4tM^c{gEG)?QJjhcH6`$pq^70w6$I4smz3r*4p=A^8zmBxu zN@tsjEym_64RoO`AV2jgZKK*Vy5o(yZq8^_=9-VCVlR}FK=#*Zu#b2 z>PL0bXHp-kL;a?X)`nvsH?dpI@Pb`6GL4pzf}Zj??@lGRY>q@$>!EY^-t>^-z2{aAEX7J z3w}wK&;j}`*o$J{@SBl!kA*R|pn5WiD=$TQ4u^^MXq4M56B-Mrx{}}h5cMS)FF;PQ zeT-S4SJ)hm>@Oip^Rbjor<$M%}}{Cbs#Xvo7Eij!#a+4`=6v+Aqy0tRglG@nvm zioP~)|6^-x`&qz^trCr4tj;9DV6l+RF^((sAXv;pTnm?>G=&j%C62wz?)@uggII1TdXW zN5ZYH{64iUtu1lt$)_lfaFp&j%q1MB zui92w(G&`7spPO-GQWP)N%6{8yxM6?{lJC31v`5nBZc2&#d;8!z>xuQ-A$^BUiH}1 zj`q0!{m+ciDR!j9KBdj^*dUWKz*Bz4E6^Y0y+bwu-D43bJAxP%hDB+ifj?tZpW_2l zqTR&QT>SZ8yfy{~j>gF+Y>rpF>XqJc>EOYG)){|STD~O<4tjb!qpPPo+S;3R?lfPz zdplzF+Ew;HCVqLQb=rTX(yO?>R-|4M~wHT9tl64+(*}Sd%IxiOg(rB@+Uv~ zd3@)a-;@2nHI5vS?HL|4T-v%#n>Hz}3-Kfri$QL`?H1z>Jw{~@#${hqAp4XQCe zg6Dv0&#U@YM+rqf3M}ePAEL+*!LoxbkWIiA5HfwmGw2~27Z}D_nU`cv=}BS}lj+iT zR|GgJ3HIIXl=GyWa*;Y)t>t^5H`X1VamJp>_LMRzLCNN%g2emSdv``kOkvea7{RtwG`n*1;>$cC|@?ge$b-49g+zdYzQ)1FW(|& z$Nj0uskuWja`5hW^f@QSe|`Gnv9e2cY;=1JAGj+<3Gy$& z1D$1To&JmIY9z8gl*i{xvY(T-it=kTRY0{&ClQ~Q1Gz>fIQ(4rJFWB-Ii8a(VYd^H zC96QokNUE_IBnmkiQO;x+);~DJhk?Wpn?ry_c4 zSGjMXvZ5pD5lg&!8o*igQ~e%+@TIQo?nIm0-YNUu(V=#WiGO_9R{L3`p^Vf~2qb>l z@0Dt)ePUdkPj%HTjNE>WMB(8kWRV;N6!*SDZV!zEmcVlfl!8~%WC0eQ5+JY$U}l3(Q;VW(XRCC=Ys6=A%ki-VY3B?S#SXI?EY4K-RwKowx<^7|5?b-^2EX1**B zV{Lc{JsP|$y}$V>?yy>UZl+!QQm0PNGfb~*ZjAB6Me4CsByi`zxth>8i%x!l(!r?9alpp32XFMfz78YI8*@2FR ztHz%1y3+R(DsDeOTUNyRm0YNZ<|?0%)Imwq!Bw4s2VEF|rEGX@A+|u08Ud3WG|pJr zions)CU9Tk2<2SAzz`!mPd=J&Dl4(ZArW-R4{RrFTUxK$I=Iq8(N{s>0@pr~*xPCTN%Q#qe?u8~ibs5tJI89E~&z9@|s#VB}CjPgZ;nqf!AO}Hu^nf3sOI%qULlHElLR6S>e zcX#*dxbDWSao5hFIPJ`n(OBFLS$+!cTJ#@9uw_&I5(?LIYBQp3oTT8^Wo^B!_lHn1e4Dryv472vv2_17lcDsXHWF4UKJhKn-<|PqfVM~GeJtH z9{SXYmizbbk87^HDvk{g#i4y{CCv^I`*N#U(U=MQ60B1S=zB{$pR`zv6{}Xp{qA?a zc<(#j8>`l@ihFkKtn3pDj1qTI+W3oOF{W}FRe$R}9nYDCm{tY`C)jS1709bM_QmkP zuo~pL7#8f63qCo(wv4Rw?(6G|2cCVVzn9zF*BzrHgXTFj zVS)%diB4lrI3v5N2|s?2&L_|4TrhBmo+-}HQ`X4m1zf5LgDRa}y^@XD*mckTc<#$y z5p1aVhvz*w)~sra(ZK0 z63kZ}RRuFtb)&x6mRcSwu+rnK8EyiX)ap6q6TehoDM!+i<>0^=A?&B(>EMf}^NSOT zYKj@vfj20R4UUQIN+9J*M@e>1a$0O2I>%sKDHGk{TxNO7RbhP$%#no3_6J2XIN?yi6P-!?)kFhJtB`Bho|9qKcDCDzznioZ2 zj6$A$fj%Nbv~AoAGxIbjzOqgVMShMCHMl~EP$ds`kO9vMpQUj8eHE7B2R^l6`s%DG zkx|INM(UI=Zr=|Vcu+ZQ}j zykMGcI*?&x5jpVq8PS4=O_`fx;Dljc@T*s+CFzI#BEQ=Z;WVeT@RqYpqX11#PRGnx zn+H#^J-p$|+ZdyxquvsGYHBV0GG%R6YS%#cCyB*|O z@fq+((mzk$Z~@VDCMXX)HVjYLr2IH-6^j3oLFMtxvXkZ4g8L8Ez0xF1@|8474Qfa*X`T8J8rqA?1os zMo@S9HTd=O3ke^(?cB9PZCJ!BU-@$9=;-K*4z-Ib)~@#VbieVDy5Sfd8kS97hzZevMW}P48v~{Ilkk;$4*Iv?#GA^ttMUP{vy@N1`sVuV zlgqY_jL*j5{X61MU-ByNutY$HdHb)Kl`yI!b0*DA-EqeKPmTWWj`-dWei|KJt#Q*0 zw@5y2jJ2y*%l~e3S=l#$cJ#%Cmk#3)V-=kWt$?jilk@+#ko-s;SDh<*qaz-8B))qBtmg{%Yqu`L` z(5Iz;$-+`Zl${2$ADjJ8!t#7cFWr$&@%WG>IGuEXGoPR-_7B|a_$1Hr`k@r6nDbQE z_X@HJL_1WxN<0V^4B@nV{9t+A<}3e$kl;l>o$aGka1)-&sU{Nc9R!>7wX}JW<7D_K zN};x$fm`6<^GI`rDxZEMt;(t36GSf4%%3?@yeGevhWV*SR=(cRCRqqKW<*2Q}1GjHU(-z_@{ z4)D)!fLHPa4xF(wsf+XjWFzFOfGakQaX4UN6Bys1&a@$>xYMLxLcoV)5gUOxd>`Pk zaM<l$T8BSVW&LE#Bk|%zoyEE-FX#X)__3Ixu93bhZ~W&A9W174W1K)~;4fR| z;^WiTTwENtF5;Kk51g6w&R}C>O!`mxGmN97V=*y42i(@@vxx-td{-AI#TL#lJdcC4-FL;8 z|K}U=uWAdTPJQFE9uTwGBlVZfV+k!Bw;xg`d||ifgRKE;_=>L~QK-qi9x89`#=*WC zlN^aoB%wo zNP^syKpFXTd61kUPZrR`JJT(N0wd!y%~B}!%TWAPh@ZNIqf@29CSgOdtKvDI0HYVw zFh3o$!vpcq2R|^b_~Ex=%hsD?YBa~y&_8zt^w%u8Xlg^|8e&xPiVn<+-uNTf0lr=T z*MI%~*s$@$c--S35&!xx{~8mMllH$D7t4hJ(k0pu)uq@w;M6xYOMg17)9fzt_kZ`H zxc2(%c%C*W4Ok|JqlD*FVemP}|wq;mK;o+-NV*f8cRQ4KhH^ivB`cbdzp(v`2e)m%q{f zcOUqhIP>f?-4-)e+QLf>qVfE~wCn`qtg^fG1%*HL#J`*rExNnf<6|HDr}&>Qel_^i z8oP@!nLjhdj<$2sv(!0u%Ur*HO{`wMF5dLUzldi)`-R?ie{yO(`1~dMSYECgA0M+W zjSD`zcaF(^Y`bM!oOb5@;*pPfjC=-OB$|sGZn!Zn`QfGUPyg}>R|}hpkF#OjM$c7z z|2yB0lTJQ4uDtrHSe%-OXR04CHZdyyXr0Pyjtk|>o%^^)$1OMC8Qbr^Jzo8~SI5B7 zK|Noie!_fw>eK%gfAaDd#f{hAr0f5P|M|)nT~2pLU$AigUGI8Fy#1YTjo)2;wP}Vu zg@5>YWrEp@gB558+@_(gVtjLZy?nO5=zxlg=i0^#)%h7w*IWaB1M}3Ru0%i;I zup5kXqc_$q07;g7tuoBlOnbb}*&izcs!;B;uc4#di@#2WUSV&LZ9Pl4VmRXu04RF7 zkc&PXl;>Un*&~0EH_pWeAU{T?elqV3Tld&7__+e!s?AShuu%>>(JfurrVg6o(0+{XAq{59>C@@78nl~&1?#y7Fw$b>FS{Y^*d z61RrG#6P$GQZlj|9ypW6G&MBw4_(nS#dGgT?NawrWd-uvmbyGb#ZUOBI;0Q5GsO#* zWr>0-Iv|4jH4bGC(|oNj3vXlxmA%xv_^;quLfUfjBl{H|MIJ0;3dkUEDOa}%Pq!Ew zg=`}$pz?K9D>VZ-Uj!{!Wa<}qA#znk<7K}BW~jd_7+yR;J1sx2P5nT2H(D4!8Vge+ zv9_l*y1Uh0PaKGsKIhzc*^8eZy-m|`Xvg)@Jbx^PhYqXT$*$*CIRWmnFa}T9hppmC{CG zud(mEpw=*#pZJ>>%);j;_51xvDg_p7s35l)JZG*5f2^go!*VazDf`RSI0JmfKecbC z_69m(IB7p-CnrP;kDFXuH#c_sHNySj5D20r*E1ODiufS3bWX`oT@=!R@(sdW-6yOP zcvd=KC?xmEM0DZa3Qgl!1uJw3%ga|F?f(`kq>GY346c_PL?fO;7joi7%m!K_;a|51 z0aXVPyJmP%Zt{Dw0p-xO&Ip*l;YwP?)i<_9Lrbf~KnSI4TZ zh{r$r!O+u|uxZbTS3yS0NZSIvUWL0KTT3pMgacxW+JZ30??4*<6PNYQWN;kax2UO9VBv0~*qJ%=Ca+=8FLf&*P4 zg3+nuTD41*Qgoaid1~-CAjzZ5!k24;`rL`LqP;D=_|+!aV2e;DbNl)_QN7SXpD}G$G-i0Vs?7M-<4v( z#GEK)8Yl3ek$_IfMd}|xCHp>g8yX_V3o?i;aou&-##`U{KBxQrP!YZavJ-^!!yfp+ z_|~_+6Wh1n>9-WhYIU4p7Ji^t$U6h6$SwEq4*swOtG7ZFsMIZFDQOHpfy3MS40c}r zi{HdQ{NqQn+*(LEUJA$Ixh(L;Mr%iBtXaD@{`94=224;5Lvnv)R zr{lAq`*e&?&&BZIa2y-SK~eCPxW#jx|D5>bN7VsnX^Me?0l%%1tNhgg5%DlHm;d6j zc+dMkQ04o-h5M#Uo9x}sfA-5b;nel9d&h3+PpkP=uQK2d+mg&m9;VfS#6&bmhrjXl zi(<#FT`@Q?Y<|Ft_SUX=@{`Yx$3FU@@!*G_V;it{|9)@(|A|k0BEEjn_gvop7CNMQ zpZ=#$#~+;kN9s5pR>w3yxd0D}yjrh88^eP$=YXwCyMvwJ^DdOF43b5pNb(}tqP|QT z;AdmcJA3*CX2IJpKIOM01MDSxJ0nvhC<%Y*_#vGy@-H}t< zu;2u0DI0Q;Ow_lT_D2Rr)OqF;4l^pJOwMOLu=kh|qb-owKn4j3v^&&|>X{!3>6Fjg z5vx}l02_I0vFJCu+Mqu;N(ADv9a4FcL+I=F3_+}(yRlp*+t7pZEIZOU@Z=iYuqT9k zo(s+R6bQRs3=K}Wy`B@Q*Ijpg^sGBson;oHs6)yi8?>qNyWFbc?z~k&*!fqndbWg< zc?XQD{j^+Fe1UI<=K#N|c3c8RzOo>YU^)=z^i>|EH+<2+XtAokfIBW^x(rKYk_WhX zKr4q*tmM&y-@1pjNyGG4s0S{Xqm&nrOaLNEH1(9{=c*T5NpkNv_$Y$5f`MpmJ%)X4ukS&it%sKQ)a%H)Hu8 z8yoS$s)>oQ98{m4jLETy7#*6o{hu6DI@wx$14?8tlmT&8IrBCLbb^MEYf(DEL>JE) zR6@RZ1C{}!RvB||FWVt`ZxG(nDd0gLXk+`Pvvol;RuDe7H;5~JTpV`y2zo^Wk-rYXN7@l;u)mQkwwz*H02In`E3or z2rUmBa8m_jz`9@A8rvsbr@fLKD7nj%exwZQ41B>bJyi}oweLp$WqOpMDhr3abx-Ql2-k~bD;Ahx6F#!*3=_Pb1p$xE8$rREq zx!&l4%JR4SSb(5=wPdb8JWpI$0B|gBxM^$r>gwM}J0hO>^k>HBKmXrxxBQ7Y`4db~ zFu3YT2mDo+mG~#NOZX6DdY45QqR+j8$}IsOh8;Alnhwfa)n_T6ITs7^cc~Y8Kpn6X zl#_!PS$?rGo?uto!yA#b!*Bhox7Xrp{iDK9*$?kFp|*n0#o%9g%YD_8bi8TJ8(9p9 z6y3y5SpIZPI{G6FIy0z|Z5jI394Br*F<$g1FNrt&#p`7!CS%{8y)mJFUu$zt^xI!m zKJP@Oz-~$C;ei7OV({p(c;%~K;dtJ>f-X+4vQON6Labf4GQNJ{SLJW?#*H`L=rXAT z#Kdnxe$uX`UV-QGP@60%@~V)c3N9(r^g;a1ZUMsncC>XOJ+W)Y&iMGp|4}ltg$(ks zI-<3$F&_Px$Ha4<^PG6w+ujiql9`LXa6vrh#eXdSZ*L47JLqrZVkg_X+J$ev-AjKK zzljB;Ttic~ekFi|bzXHR4R8Lpw{?5F@($@6hG2wnaCg){Q=#iE79X~|+!bai)l;aG=c_HpMw zQEkm)tm*HGQ%*T0{`4ieVC3-O!>YIXTC^MV6%Yj3kLA84(0yCkDax%?MfjF`@FlQN zBh%}i=ep#+^gG=)2yRJFI?CZb<9M#D0JR;?`k59A&P6(m3++htf9ckU52}HQev_)h6PHCk`F&u;ilsR|VZj8r(fyu_Pede`XU(Ivw%$&99O%1S7KbdoZvwtxXk;Q0_p)1#hk zspYS#Xxm21ui#22Nau-h!C?}APClzAhozS+9+@o@a9Wx>VurWA)c%Z54BNM1 zk@oQDq<8w7WLG=s96Chc?bHn$*6G+34>;#xaoYXP2tGmAz}T5=9DP8(+mBxX>8J+Z z)%T(wmU^m~d3<^mu6CjL%5VhJ%EULHNn&!nqnRBx>!OpFI7G%K(pMFME(>!x zUdTj0`Z_#x%#){lCSgnmi}@$VC+ri>Oin4?xan^HSAi{_!Iv>DJ|D-+BJ|bM=b_C| z-53oP|DdPwluzHJOuT(YII3#E68RG|@Xcr$CM_?C2`cAncU-qG} zhwKsw4X`Kpjs$j#=#Y)%^HHl-_Qd_~cXF&-*=nDdosn+des_HJf^Wt@ed3d-j-VA=@>s)?s(_!!t7Htkc< zlQA=LEFSUDhs5RI|7L8x`PLX6K9*(A@d*Iou6om+cJ=kdpk#K?p9?({J-w?XH;i?5 z#=rl^XXBEKzZaY2@4W8MUmdS`^`BY34;|d^wzH+BE&Vp=AiO}A$w1^5`uGzAbAdu>(;D^jhoiSm%j9sShsd_Y~8vwMn*=WtE($|d%HZI zJt^BUICRv0>TfT*B0lpUpO1sbhNwj}HZR5n|NZ57)^na0ZR#gpb>-FZt&6@FpZM2* zF)l2Cz{g`T>DqPc;%i^|T6A}J+n-#N4SV`?o-3biBsQ+!o)ka4DWs4U&nji^RC!>jk* zKJ}@O$1S(q5+kyQ*cD##;ceKo%c2qfIc=zP2s=d_eUujYmmNL*(caw??|I(`;?j#Q zjGi+e60@{%P0gZpQ`$A;Uo@v3PTfHd1m#gUVmt&r1E}D$~;+ z@Cp609;!SQ#XJdpUoW;NLk%YSr&FSnLH*!LED~4hZM#DRM@bCe<@05FSn0m;p&Zw> z3RRsPi02;IG=|`_90v!*0ng=gC@e4Ap{mDn8kJB>LRrVH74X#9C9)0qo*s(E z*@;-yC!cZraJ>8lPmFiI?)5Q0byo}@xhI#MWjkz>+5nSV)F4^2`jp*{b`in zBj<)i0O&Q(@dxLagAfB4@12<*77gce{9Z8Y>-emSXe?UM&!V32D94yZE#Zr#vD*@1 zdTh$J6t+?)USy=cV1myD?(m3b!1eB7@`dq-EhB>Fm*er5<*#`RNk^d(xH6~fPrs|4 zyr?-lH7;D4zh1}%0D}K;3k-_qq9I%*FjU#Z)IG|hc#>9B(zO7Cz4P&`QUK@4z|%B_ zx~Aduh3*klXq81~s5+h{6h;C&4ZzYpA$@$f$tmVcVX@+(HQEd!Pp))B$6`7YB)RaWXyW zXX^ucimH%8)@SixHh?I5U%hSf;Kxq7~1m&r_%VFnxqhn3DK#4 z#fG@-iYsH^p{Y3YLHCb;_{cxTnP;BqH+Bx}+ow8BtE0<+S_NMrd6ayGH|Vt}Q^T=@ zq-e&m!in#-cgflP*-w8N@Bi>e98c))?XgqKY6@>puRlA)G$3k6Jz&t-8IOMKqvFFK z{;0o8h@(9+K5G4-!&>wPerAU_4P(JG)Bt;+6`U>Wxs_FhoXBCKhqPO+-4gHr;NM0o z4VMf)gBj8m52__h1@lEtwB?UQICA{ugj*&gBQt`xY2E60%;VHJzU3X!+*$9pJ=rdk z0Rpx<9ymG>zxw&l<5QpgFVjk(EN?BU^nwid{Z2hSzJKu#-N|5^wc&{|@n0P>;YEjo zH?3V4Wn`t<$lyq9xo%5*P_$ierx&iEGm9Y(#z`lh6t8&otKv1UdX22%TpZkgKz1Z| zBw@f4i^t^BowwW;FZ;7U%X%s>3FYriCPPglAG$~1YIMdPNO-$Dx?|1yHSw))eKWdy zyJOp(cZqtH{Za$T3TGO18eNeihK?MIH^1#KJ)pZ3EX6)|@#%pzUGdXPe;lWuen#AV z_cp`&`6WM&H@xi~Rl1Zk76>)BcKWS_$35ne@vis1J5Jhoatx{CIx-?UPLJ344vf@g zek&VkV?(IQ2SEj_5%NPCXkHvZ91jM|c(ay{UuRdJ*fAdm4<3y_eEy4+qCWoc5%-JD z>sH3t@Sx;VHXlZ#$1eZgVX2L*P**UxxyY|2e1oF20(YqoB)*irLQ2Ju6XRl zRf-`1i@N1o8pI@C9A&#FU|TXO7TD>llg4p zs<)l+iBxa=CSW6XY}F36Xitz)NQ&(9hLu`x$y2s*1k1ZRaQHwAZ27C_)W=T}ZReF+=tI1DYnDl+N__suU7OI*Se!ZgunNM<)xDHh%27 zF9i=CISv^p&y=ZdI>}w?NgG0_kToqhm+Kn7N~%KZ7SsWz&giC|d#4A%W!u1mhmXYd zTeil}ue{vW!EAo0PQ@8es4&TZY>Q@P0l<>| zBv5wJhmd^%Y-|-FNK2MK^OrMnKs`9-DuYWG6I_Z6QE&T-EHG3)CPMLJU--v4F-rdd z|D8@XK0sG@ulfev_HVf#Iyx8wBgZV8Q&VGxJ0~5>9b<9_fNc9!mMXRUp1L6u#$UKH z0Damir^Sn3`VwzVKRGiRdv@g;aZIk@H_gb-D0A8b1qRyb$1rey_~4;<&Fj(!V*vmI zwY)6`9ZuYQQmk6t7vH+*8`06-6I*Y*$u_b0HP{Ld+#u5xA8hZb2gDW(>y`Pkj34m9 z%#eIKIdnyL_&FAKsaA>mao0(M^I&a>zA$t2($17g^vUuw||0>3hjK=NT zZjIgh_j%D(cTb<%7Pf{&XVA^W4LQPZrN7S`Se_U-KG4tlzXaPHgjKY*?7#=?LzS1` z!i8Et&anz&irY^w0BDJxt`7Bk#-c+!dEy^FIYuYuW5desIAQb2@yb{Hsb!q+1kb4N z+1}im@UW2{kQRLLJn<1<@zWaACd?jkCI0_{!Q1Yr4RoU4lc%T=&mEUca?upOd zKKYjN8dilj{*rBxZj0XqOcuc4qhvZBLid#O>?Z>|aj5yTN|$v3ANf*paAu|{Q}qyD zWdhi5Lcjj@V}t&JO_r>7@c+uNf> z_Sd)wEIYD5!_+n2O3$*`eTy>K;iA}F>xCZYqDTCA#aFzXpJPB)JXbpE#Y8$D8Zfbp z_}Qk3Xn`GO(I;bRWg!=Q<5LAqb!r>fy+8uvPYTtsD3gAM?1R#`V6X5;r&ZUP$rzs; zi$g~SI_`c{jTKz_G@LbtTX)^roB{Wkh{v=>mM@O``HAP=fXPkb@<~a3)evh&7iL9Hp?TG*S?=Qt?KK*ZUW;>(n z0S`h84X^Ofc#GHI?0dm3Iz%17l{&+<43!>`1lnP5jteORDcjJ1fZ8t7r*;0MO+LrY zs5XMdWPVQGqbbxK`Qlm9fVT;)EeM?xB`(FL)b0r-u2#;nXUjt3Up=dMXt`9#d{w}2 zCt7g*6ywE7H|Bqp)a|;jvJEqQ=gCEl)U#27!vcT?^@(RjkH$FmNu7g`l)BchyLMj)H4Yu#6K{I`TVmVJ9j1SGXSeFL7*BrkljB?8 z|E716Jh*?a>4d$o{^LI&r)uM{k#q2tMF7GN`Qc?N39WI!=eTFjj(F$W-yX*f9gCTT zsTdg=m5yLr7W~pmhstCeaC~gSI>6hbizu34pLo(qv3c|6_~IA-CpK>06nEc!cZ`k= z+P>!4A&lT9@)luLUw81y;b#D8x<8k%XSI324|4w}7^Z(&=(1*8EH*DAt>o%>6FMZ)l-T~ycTknwX z&=luA=?N-h-a7y={&LABm&Ak4IZJ-*{`kc&eiCnb(_3SFYCO(5?F{=Q?|%2YWZW(O@-hy)71T4pi!31RhuON=zD+zh69G zlQ*(Upeg#$ZrXlwu!Dp{g}>5OIRKDlS0UT4#FPG@Z{@!);l{H<2in~lq*_S%s9;oe zFrX@JmPegsPbDLze^JB33okN_EMejx$KCz`Is!da2fuWIzHoM49mqC5quW6FPNIeF z^~`uQ&QHe5j>c%L8;ZWx>3H(FXT&>R|B7f`I1=~V_S>i%KO~#75X}qpM;VV(8#B$= zc{>~tyM({XO#57`V~qr|1Ga^Xv&pZL4WKVeJXNIawcXQm?>Jp+=S5FH%XSYK1lnur zRTco49;A0CCfiUJHR5lw0KoIZv{jY}k3JD!ucR^cLQPQ9`b4UY4&Gs7>S{N|pH>ZuMf zzY>1#3EbzA12Z%#IUuQf#it{pxa#079Rv&cD8fQMp4HGUJO*r`kZ4Qi zC%>VWhrg94W6+?E4F`@pu>37nDKIcN=cJ0oS7~%s`Tou-a8!n62g3q?keEt_yy{5T z^^yq`2PfK-OVx9Nc_CwOhYqEK0ap4Yd0_&+zMi*5$;&F7EQ^6>h{`r;NVc6<(St4b zH?LizG&1B;hN(&7!9Kr<=7}1Wfl)E9b#2GKGSWyMhXZkJ0kgoQJTS`Se0HjguVCQp zBKmBb$~TfU88$!-bEV{xZP zc_>}3C~t~8w%z44JZDmm$$uuqy1V+Lqo+6OpqY5Z%3$93p>u$f&a*L$sLEn+p%>p`ARgCaum^3h7p zOfGST9xGS%L}wQbyW~`~VfPSk*)CYka>f}vWvj>K0eZ4K4!Q!WQZMN}4G^-VIN38J zul9iz&A?)NvYwvq#8qjBhDIz`eEZdt^y*-6KPx^?PK|ibvSLFOs6-yTO?zPPp}6%9 z$&L6oH%o`0HF{R9jiz=LR)r?vk5kav3Q}|Ns#pAJJm#^FkJ;&Q@4UbO44>yP-lbrp1UEVsfzo$c;#P(PzJFv2=Ggj$-MX%2N-RjKt#!7W~ zSE?=T@9l~{J?ri4h@P%CzX`~>t5dSjt^2N4=|V%awehB)95AVTi`p)!oXW<*YtJ@- zw$tsG>L|9*KC-~Ajzgl%LHo)UpAsj@Fh}KdsSe12&#rXBpx)>pXVSwze78>F=D4}( z7#xy}L!UW0iL(qC=$0VH)Jb2b>hCdPqTO8u5^2&f$@C>LIFs65J0T-s0MlJ*r@sCX|qNqH)ACEJJ{k-g-ai4?Xu z&7F*L+xtldgw+o7iJa+a>HOrhza7e=sAI>D#Gyk6V)yRdard?zaqI1O%7?lmZo28_ zxaOMc;@6j75kL9S74iM=|12)L=tuF5uU!&f{o42A!f#v>U%&9;_~th+jf=kZqxkx_ zE{$(|`{KCpn?H)Lef!7pwQv12zWR-y#W#O&S^VJAYvKn#xi&8O*|l-$Pp^$1|Ki&C z`DHi6FMe}FTz=I}@!M;+#?{x|64z~2*>~O(x8J=xZo5mzwtcZ<&yhGdFcyag$K%Mr zxcE30LxW>6Ix?+8^_fuJW%uV~Hy3n9EgNLCo7LX7)ip^Yl=?EWV__FSslrR*|C_L z;+T++KcR9b#QbRw=ruRYd8naFqR=I~-_l^^AA7>mao z4QlD?=5?@|LG4%_erK+1ke@KE{wrJ2;={0zfH%CPSCXR^mD4I6XOUy8bgfmLzc$sU zwRuq;!3e3LI!7u?wt%#Z_s|}(FggdkRepsB*X3xIuh-PXV0(2h_-wQ0L3gfYKbj@a zEmBb53!cd?jLzgoN$#7pNFnnz>{t5)Sk#*v+%B^S-zw4u!~Q#oB_ku zvsSfR&Lf+^L3uoDmT}=*$1E0XQ6tIXJl;rhobYW`T$^kT`T34>i}cVqS4T5vJu3%v zYnB~q7Oz{NtLn?QwOgXupZ75hIOK!+fCf66p^J3A-19xG%45(Y?VV(S)zPWEEPhj}d3-=Ytrl}1ABt@*^|)6MF#ITR;+gw} zMB5=GZK0P!jfMnhjPa%}NiWX114Wfi2fhwII<&@s)vF%7Ey-X8ll(dHqZXyHHGcB5 zpBjt@D3}~m-G_(AJT@n2u}T0q6_1N&mn-u>R}+K?Vu>4yGB`8{rehwX=+rDv3I^Db_O{l7F>9pI{D z^xd^CpuOs^J)Wx8x*gBdU(OF;NQ+_PLR0+$e4RK;_UqLfgyr@lV^xPu!R?pqW6`qx zU4EWbCdT!uKIuou;dC3wYNbQw@Y$FKu8P^PEXA*c%MW74+87sc+?`p`+ z$vP(Ba2s+{CWC)&X6Z*3^DZ~v^CXVrwdHG;r6qzU4)k^8%Xi(?-^isu>hI$elw9Cx zU_cpoc7H;D;J$}^pnt^|i~gwb5_2A1$fLYHjkzB8d#Xch_C<14FD|q4>Q#I1W5L!Z zf|wW4ICO16ti)%O=4GWrs4riZoL^ZMY>mPA6p-PIzbMWmruMl7U42Zt&E~P#zI9WaxqVyglnglM^wZ+JGq%N6>Fvi39*FNt zm;2(UJ{h0>@Q31~?|)x>;v*l7t3LCoxaIoq#o;}BV?phDab`+9On=WTrR2KWT({cF zDjN`_k8o&WP>MaL&TYkR_0FCA_w8tc>1yE+}K^V6|7eIl0TrSl2Kn(A$BNp+?% z!#bPDR1S-68E=SW93tyOTfFL-`ew6#&WotO5`8^CD1P#Mp7=&R3j)S)kTTF}MPm{y zGNwno&L#lzSXN!IIc`ZhFZ%xM^ioXEECuQ>a~Ogww-K}Qi)AsC`%u9a?U_MiKE&OY zjK?_2w|%G#d0Q-D80PEh;=JW3V@m@d4Q@Vr12r<$ut+Q1v%(D{$4%r#yFk7&C%SC; zi#}4v;q+1T^|`SX-Gi`{+a5nzMgv#YGMgKkL|f|z=_idr@A#{2F)vG;YK5C+KDbe5 zSQ&4W1@BT1miy$b@UF6WQtLOZaB;bqhZLPX&o5m;<92TUq>qAbU2FhZ73CS%%lfuF zB?m@szf0py9tuOhK&N7^l+AWW4j+#@@4P$q?LDHpUhskjb6vUkFIYX&P@6sCh1}ri z&Ax)Oq0hikdIuNk(Kr=-M>tT=$gaiNDdW*6U58sf8v;t7v^V!Y!W?}$quaJd)%FLFMHC&Zi{^NX;-Nu zz2jn?AAZFae=lZJ9^e<72YwRwwyQkWjR)fn@}msoBe1e5BcWr;D%gg(O8be{T}qG5 zejw#>;!gg+UrGxO!j(D&|8;OR{lpi}OLYccz$cc~q{S*Ocq92J7k%#))UHv;SIADhx()BnT!X_>Hejhv&OdlEnW`Ub|K?|&smGaSVAPeaC(SMmz zw>~L&jsAI`{J;GCP4R|T|8@*_&&ScdH^=hap;(+hW*v3Q=5h5MQb?EKM@WV4AsMo& zv>aYh&LXd68F@QT8NbUIN70u9IfOB%ZUZnkZj9=sSku>S0T1LI?74 z;psQ%w^e3j6a50?!~P~WBeiV%`-UW=2R(-G>utF&q8^teBjJ^tU(Wn#hHMPwrYF{? z`ZZo-Qz>hJdwonXU0rKC#hEUvwCbdn9HFkrJd>Lqs0*0C{y7l4;TR77qH9GUjS3H~ znXJIilNJJpGJp_ww35~@DwG^&D@%R=@M6AfLB;+Fl8Da0w&tDDJKs;S>U?o2FDLjc z^er4jSjwVI=xpku3Jk{rBMVsTW>5(L^3*fYn_>=P(^s+MqI+#FW)^1SNk8+{n3_Er z-?{pSVgM8f26pP?Rl&fg^Tbj{764fYo}sgCiPJ6x&|o7Lx#mp`vmxcO#RP-~)N*prrOeF^GIsP1 z9l#?`<)wgQvw;wjn>m96@t`FO`qWwi$n=hemj$C9DF(`t;-=AXMP*_4?QM7Lj_HL} z-&XK5Py1O9n!$H&c1HZfM1&NzcD}`w*HWvp!o-A2W0GL0H|kULV6e!p@vnaUYf^x3 zjR6VPmW1FLJI{z0Kkr5H)_;6k{MxI2HJj-$xB`w&wJi;;%A@n z^YPRtJ~{Ru+#Az#8W2nAWD`IRXq69qp$t99B!A({MhgTjZP)&?{PC?VzS|0)(VjSA zn&1EHmh#<<$?3`X{O7+Er|;Yzn>I~&Ijtc&4Ggq2h!9NW1Fu0BgmP2umTOQqZyk@b z&ptE8Mn_}Q#CV)@);V$R+2_PhKj8@;FtK|K0S&%<^B&$D8yt#vz4!gbnZb?+lX!`$uYhIxV*R+U;o-wF~#X@8RP}+iOEztvB9B1 z?;P%9f=+`f(l(WDNHiE39+Dl2ojcBmm%a3tVqVJs(IZDKCIprI8%@sp2;%P)Ii96WH)?VbVNcdxxB4jnv_=_Hdd z47m87;`3hkg1FDQ=fsWI|4;+41$E?VbR`!S8>$T9FJ&3+2EWqg>?U7(=c0JZlLIJx z;;H0!l>s5IvUn$8A6F;##N5ND-s&Q9gEMJYjK?p*_ja z=^54X-qHR#+gUrj7{Vw=b<+>GGYtkgho4GV;$hrgZE)ebOqT;3%AesZ@_U>{Qs@Q= z+HsXPd8&@r#mCf@AegqRo zpd>RWX_3FcAuE?L|H3$M&n^LxLZ~*iCK<*A#VT?E z-a?_563+x5lMl4THML*jtt>1_F3V56j4WNkRoIf0OiqAuvk>8)n8TG`vLuX-3bGhE7CE$=JHOu|U3c;LhJ&d3_RbIm7Y zX8GjF%tD-)oQla~vXhf>ZlvFh4>e;% zGHAHT1a^Onv^eD=xyie41X*1Jz>CuA@@WYSLVF~uy1JR{T%^Uz3a%KI6Kv=ow&HDMt`~;Xt^`Il;j;+dQh!4m zwORB93mOQ;_R0+KJuL#Ocurgm;!7v_# z%d)uSg>uOXiB;!G{9fTFRBZFs1eF84d6s1olYWcm-hwN60hj#kr)L&Zaw9^%6NfO~ zvFM3xbm%LKXla!Xe}qZbtp4DgILZ}%xk*8BNiW@tNzFdq$yf1c|I{NgEX&KnTQ1^w zG0pXWKXB6~?97+C@v|%&Dg6%K_|aZ$DN}CR#h=9|>;Hn=1MbR@#Sz-#EqC8zS$4)5 zJK|Z-cxEgz)=(SgnqK7hDw!43j(IaoEm-?x^ivxKm??_+Lrj=2A!;_mnz{e#X zCR5>uXf+>sU@bo}4&xf)^!1^0D_n$U0@HC-uEC)p%QN(>cfRwV49Cv2sVGJHGOj zFT@L9^pZF_nVWRMw_kWLRt6`=#Z0QO(Rio$m=k*!oO?mM=e_TB?7jEg6F1&`tNP0Y zk8>zLV*qc~6)a8~!*l2<)C)MK&I4U5Z>CAT0NVa1MsI&M@OtVsN6#|b;>;{H$>dt-y-8@Et0@6eQP(PWIrv-pVb z)_W6z^bt1k(ANN`iwS$iChJVD^Solbo1~L%p^M9du{He-bfC|oEK)<<*R>km(rLP- zGf7X9jp-ov~HA(dckfy6bFA9z7BV_U?|m@4Pi`x#h;#z3a|6e)ypH zcgi|38vuG~KPzllV2q9)tY_iG1m~*!sW;I1+Gs>a7mkSD)D+pJ&OdLQFl;R>B- z8x19*+)V)(D0HE(^+JPScgeDGcZJVCNq(=WT^&2RFK)i+#<=$CZ^gCOUK=;vaFb-> zfta1rSh{ODhD7gC>2%}jebU!vQWWUcM zR*o-|w=AYrE^c07(~5NzRWf6z%9p*WZ^lL0CIv%fqh5dj2tO&nC(Y#YC$_XqN%_Vz zafk_Die}0Sd4%3=!veMv(SOqC&CTTo=DeXmX_cO_|F-R8ZXYU696WM7zH{w&*fiqtUM&7g?0RSh<*~ANz{)t5q}aoD2tsbP)ZrsF@p_$BqLXUA#Vw#R8(cf{7Mr^VK7TV%J!+0t{K z^5iGRxOo2X;X@v~vM7;!sg|hxnYRK*2*miYOFUdN{>@wbDWg5R zcWG=pWxZ!nvWPwx{f&9RLCG3D3ctjQx>!?xys$VQP4(Z*30!!;i{cX>`-te!?-T!< zM#p0N_U)cCM&EqM<(I@C{r5kL<@xzIc<_J^39=B+=M12yZxv7hM>Rg>^CJK7&+l{H zVn&AsWAen2c-61}x@1~2zW;;k{^q4?Xs%bNmnL&$PG{9%uXOD?~^#_vaC$LZTN&Te@u{}YdVWL);ZOXKyg z|6P~$=9_MgJMX$HKJ&THD786O>APY?I?A)2^>iP`L5|PP$c5ka$9$l9AKjBqOx5X6 zk^+G~U4BjB+b@0hlb`%_?7HLDXl*(zR;4?x!94n3Lw3Sis(*7jjRNL3-`gimZ7ejD z8V6hbF|`q599|nVX>gTcH7Q_@#B42Dwzv^G(cesS} z?Ds;Kn%@33PoCvLfoQL$w_JjWNI1{(2dquWCuve(%HVB!09I)-TE=bLTJJbs*MlzD z^lsufT;`z*(U7B!M!!967pZ>CwN??=}Z z4>D$x?$axMa6~#w-}+39v=-x>Gh6Y-KYG3N%M-Ehj;mu~@=ob7N-sHquCvIdMafBE z_BGjDghn7Ermy2vwi9 zOTZEjp-0)+AH$pyGE5XwnOSFHPFp%@Z!;SG14NA#jd7RO=VFE1I@ONchbb;=5B}^d zf6!0)Ktpsi^3C!nJMqGrVB|(u^^pqWK%{iIePsi{`cCI$UZy~q5WhSTkk6)+O%jR* zYk`7MCoAkSjs>lr36ewXsr&b15f6fyHyG)#VZ`YhBotrzcC*5Ij*Jdb>^(fe9N<_GeYdtrq-^2QUc44~3m@ z!Pk4@NVw>McQI6>VU{XQ7+5j2=*@yfF4iktJPRB*2wA?NgrC1Tr3SLFxZs6?X(?*# zU{v3jG;qcmlc1#u7le;HQ2y0$7>Kh;gz_i9fedvA9!dzesQ%$CFQ960uF`m6WNzj} zEH19c(4YoUj9&NI_fUQij5V#e-z1S^AQjAfyWs>?g9-9bScml^sOD;{+m#Z7v_ zlVE}X6D~c(k%zh%OD4 zVLmqihz=HPv?Fj*dEL?3(1Mh=^Cgdhk;3gqd}s}0$$@|QF4w0&`Kh@5?t46$z^62} zOR@gDzxqor+#nO!fr`FDJ#%`9Hb_UnLio|+M@45AnikzYJkditRdyJ7n*3?s3|^T4 zW09E?Tx{r}o;>IkO=ySk6pev$(NVB?zo3rmfB)OZV)Ld=qU)N!Z{;2DODdm=$+cIM zmWi5GRmRXzOR{1;p7)&R#T)eK<7CEzz`$0<4Hq=7*vm>Uy%VrY0I&OPU>_=!h7Myc1-_U1H@ zQQH;HoYIFURI?(x1{SK{BO081^nZWCc~OzH4NiiYg_QT;z)-BK{XF(@kBJ99;6Z9n zv&QxKv14(|Ew{z3w`!nPzoKz9*PePxD9z` zFB!t{g8~T~nE`CDMn*|hldr;sx9OtxBubRH@&gwJaB@{8qLJVs+h}Y0bDxV`P+HU? z`WR0J$d82_Ch3sXImrc7p6PEg2eOpNQ#%$vLLc%Fc3_qTNjlM<=&D(`0bccEVUJsN zyn#Tj`$+KCA2h}gmkBHS1F!_9brxrleJ*X*^+QV6VYd0NLLO)jO7f^Zd+w%ueZf zTH$I3v(xilFz3obz9%{(e-@jN6|_4x2QYcCyto<*(o?vmm4)?1^@n`hdlBpJ*KVQ_;HP^+?nN=Q#`(1KRc^DmCuyq2PZ#SxIcdUcpN@@G!7g*6o(HViNgnv z#lZuI%>HC+dLjyHjl~O6x%k9$M!Abu|r`ywoJ&5#c5;XvLms5 zd?a?TV2>RgjO`;WpSI`J*Ork+Y#wIvwi)Bkw*sM(64R(}jPb8(+uXMI`nUmP*Of<4z6%HI>v;uGrx;!*gR z@3Wy}+G?;Vp(W1eM4#G-XvK-a!6p;&J>s1Rp^%H@N^j{aJ;{p+O3P_!BrS^u{e4TG z?Di>hMc_NO4Gq{hL1OK}MgtPMM6Fe_;7_u$aK22Yo(Q@uBPO-M=SpymJrYD|Vi~EuQ(br>kZWbe!N}(S@;skd|LcIb@!r z7lUt=f3ulg0eKfrn2vu977keo*M1T{wW-dwGE!?=XPg9&FA%+XE9$H(I(FMg3X zu^~^gr|NikbWW2wA>Sy&#@YTBG7-I(TNyZ=!i^GY%Mupg#l#Cb7vl)iQ*_8Y({AWWSr!wO z-$F}Fnsa+IDR*9qa3|Iw)QuDWw@33EGOF;?m6E2&Fl4Vxbq)lYNN(Yyck-7(Du zfWd}%ek88E^1_XHF+fJ)+m7`i@5qt=>tg9l26es)nOiwwXr{Ms>Fq1#Qg?O}4&oRh_ zFRZgEf%QY@fj`f*JK}*a>4d8aNtj{aQV!>r`Ltc&Dlc?EXB`+4-*ql- zL7*8oSuHPe;(tM7_IZ(L$tTIUinVJ^R`FI>=3-?*Vcd)mY_eTYA0asCPik+_nUmjh zlGz9L?~U(Yb4`5j``51l^VBwQ_)IMy?0y}&_^OUIL31LsJ2ts3!I}q-OvZIL-xSwK=EjQJ-)6oW->Y(wi;z*~1v}#+&op!< zyc#Fud5NQE;)|?$fiAe^9pm)0y-d$fdeKgxmW=3~>!gbjMC*RVjhDuyabyHT<^7sU_6V;R|zv&9z z_#$sCeN0!9iY0}b`nz@Y+Y7VmBX`6LpZlCRaNvO34Y3)wBOl>QvK|?l(719fzWCM8 z#rCZ`V?yJsiSdcJ{PN4<(n~Il`(JcnJpOS%r9SAPv2V{kF?sx``r~EE1nI5PUC<-w zmnjc(weURr%0U{RY8Nk^ICePx@n6+fE1HH#(%l9WZ7p8?s#nBW=bj(??%M4+6y&rI zUI{v}L6%T%3JpcL6kNuKG*Ix78*s5_QHxRw}`Ol3MUI9H&@azwRH^rq2mZk19r>QthiM@s32HcWNC5fn1!_Ix&|caZ5LYhc_Ivds zz2Ji%L82#VD4%7Ho?T?3)9{@?&%h$ST;O7z23+;1e$H|VKi)|LKKL70iSM|HFVCc} zhQ)TTQzT;Yp|2F4{6P;QflsX$Jj(o}g9O%nn!MU@9j=INoXMMK;g!goRDP2A89Wrv z>tXz<9lN?D2i!daoPectj#s%J*W50Dv;pwQJ^*7#q%nBjWEs;K z{|pC5zuhW)8{{6F1?|$1%3{*aGEN!Kp_jKRdlV zErS$5f8=qxWR}NzvK;PUor!}9z1{u7NIH(%EW^$6a{hcrsNuWN1Q z)~gl$in{R7{w8o`HWZpHZ5p@ULIjAd0ddAH72x=o@-EjMtq5%ySxpg;TmfZA@e)o+ zUj(IhzWSMbWjtUOOuoS&cnlydG+cRdgiC=<1s!w9Q9j7?Doh-Gj%QM(C6ltWAo+_53HCpM9G<+o({gJ=D}}wp5*1a z4DaaFaLi6?P^iDYzNW$!B$S$> z{ZcdrS{5?dV1%;L0@rj^J(AU0`8))|Gx>}itV*K+FX@&z#i_dT_2wFtj*hBploCg~2fk#W>l!A+HE+3Gm9~i!6Brok0)C}E(6PH?D|MDaht!FG zb_AK8cFAr@_`65S7iBmxyAYc;?}*QR@hdSa<^GC?KP3L{@BdLa^~Mw@HIE(=@2_~1 z3hDBSH07ZzY6nC2vZ?3LszuxP$_-`(-@m%l>F0kSYBy)f_(yt8fF zHeX-&zy8JJ?X%DON!)4OB*W4mkelQOIn?ifFUIi21g1ri ziK%?c-*jb=m=gy{f9R)tl*ZR)$x!chk^huiKgLb{Y2qTeFPz~U;80f%Z}Uag5q!%h zL3(t%VBb3eKW@<01t7Lz2LguAsS z<)cTzUs77WMGxM5as$6r`b6RE$#BU9B{6Wea)2Bal_l~dv33WwXmJM!nb?y#k;VUk9a0X(1$g%BjLsg;S>$Ea% zmemS>b`{4I@NP>+N!*Yxi{d?>EI_WSYfPkqJV54qyv_^1E*PqEh5YSQZ?WUd)8d=o{HDYBd>8!KX}s`^ zD@s5kHe9l(!<>Y_x!P{Ow0Wq7RV_AgV_XY9WZ}@|_?)%sKQ!nV(^NW*$$>s-6S&&`A#lTlI3vO_M zhHO$B9BPQ~j>oQDcg4Hk^C9V?Uv=DMa+SvuE%cG}J@BE*4X-O4`b=z_h_S)Zc<=lE zS^DGlIJW;-eDC_}WAemNZ~Wl)D8^~xFzc>RsP>iKNKo`u^PbHq@}kYPrC5`#FZW@foKkJ0z50156l=75#`K!1pe@*EiI$7kp0Gq?)SNo)lQ`}f?*Lc+rY~1hhkou4paEue4 zrA(At&!n&Tn6`#f>R-lT;vY6)qwjXoyE98**TvHO?thjUU0_XmI_bJ1Z$McU-Ls|B z0_cn2@08uf4SF~~xzIuNk@`iJhjFD(YLW&d?_6psO+GcIeQ-IJEz8F5WFI`Xn<8 zm~3L?b2-e<@kxeWHuLL!b!AC@f+qO}{q?JoGv7jeShW-D=k0gQ-+7|Ghq#O-a@^6T zy@ew+nRN9xX{itMnRGdLkZ^@%IpIlJ$@w~eO4+T~QZK~yydUj_ctx%WU)QVBT34qo z+_rl>c0oVn(=U`6U7JrwfG^k8@}1ko1<99Ie~b=|`m-O*o1Az4Iq|>;-9IKqQd(Yn z)9vv$Z+}O;`+vS&!hLfz&OcZBM(Tr>4}#a$S&cL0hj;SbMs37===Q^V@+Vx2vEF5P zCBBTqi`-HS^J0#xDQ`&+Jsh0*V+y$(-+az{^=#*_d=YF_o-xZ0Jop|%zQ(WQU+Pl~ zvk?bVbw28@!=O{O@xV|n<6Go!EtkrZ?nb|qbRh}&DZXIQdr;;sqt9J+a5zB!|~ZKUKK0SBd%Bf&3rBJmt@!HKzY))U;Y&rI z2gSB6WAT9xf5dF}t*fv0oEGWW5RdFI?bW8xb$a@_X(VMFdE(D!N%(|OZ%@neX2*`x z-IhP_{*MHo^Su9k?-vh$*dt?Te8_XJz&~*0us12O7&beIp+aX5j`C7n2a zINtF;|2{tOfsaRikRe%(6Z2=Eb9&r(<1M03>2H(kqbUH)#ctZPHJ5QAfg2ua@KVEv)G=hMn_gUb|vIvWqbopFsddk7%1Wn)U^UIH?!(AP%)I zvChoem(mP0`Gj(}#&YUg<>&l`vu`R8Zr1lyuiQXy1sFX1nFZz{&|#=gg~i%;y4GfI zzFLjviZ;V2pic0gWC{~(On3`0z$@dIh6i!0%rn0Wjw`RWBN&HwGaMBsLOQp zs>p~(KjKm~q<+Z~;VP!49QCTLV@IZZ?e)<3h?J{7pAch#j*S5fa##!X1^{sysw)FZ zCTFb1s^ZNM3ZrbmqfEwA9h9#kT1{rGjUvlRBh#;bD61!v70f#w167S52BAQD4~%7i zZ^cqHHJt^|;erXBH#!{o8&K&$?S~-`;N&}-GqU~|oDrJ*;kv=9yoO7@5Zfiq@C{da z7hy*JTxZ;rf`>l~akt)nM;tnMR00P$EKG|&5{`KlB#J}BjTbs|72k@KH%^$E=F7PO zhQS`?Vxr1C3C(30#10NbOlXh_yk>uXOP__n#rgRd9F!nvG+j=1f_gGudASx$Z`Dj0 zfMq?=4w5FkclQ+jF7Xl2G)b2gAx|2SWDU3w(CwNbBNKZ-7d_Nw5M+JAjcebJotTqy zw zQT-7W%Sjssm(%kLK9TdxXFe-#z5AXRZ1HWuuDIgLhs7uU{WEdRcfS*pNAsjBJDVF) z?kFpYGBY}DI^qpU?mK{Gb0KQ zs9kT{yd}m*ClstYlhV!Ri@ug>M0BHkyKlQSjvYN14}a97V~+kG00MK#B^So0KK1WX zdewOvOXJ(uUZ=q}U~)l+(*h5^ z@*(lnuYEDT{MD~nT*8+tl0QuiJlN3>eJJPb%)EK+bD#Nqy!kD8ZCBGgCB856TD2b+ zUwBcx>Q{dyp8c$!i-SieRfeO{!z3XKhEk9@{m*w3_wL>mZ~w=)$9vxUK^KlwB6KEcMcxf z6Tk5rzY+WP=T-ikICV$OgpkU8{<&vrK-C+c|H9{D=b7ilU3cEDes?`~-F;_#?(<)a zH@)f2PRC$lRdSwFDO_FthBv$+e)so&C$7;zZu-O#w@VfrX&=y;$p-L%6}+KKW@BPA z`K8S;;%|52t2v-#!a3!w607Y12O3dUP8H5CoQOv~`bn{%2Q zPzM~tRD`M+OAkm=)cop%fN+v*>}H}(eSzPJD7ZXt7~of&1VwO(MxR-6r|;rQdQd4@ zd9gJ&EeU?X)$LDt8Xw6aFOs`lqCagGyaX2|O43y&)w}zjN3WfAY$D?p4B-pjg1)@Spf>yddBIy$pH7}DEF>vUPtJ%=-I591 zY=FF@oK}3DDkn%F0sPBE61tlE8PN@LCMhTvFG(-!1Z+ak0F-drj8E7o9^q^tVDZ^` z$#!?~-9T|^7mHj;TTfh@PI?i~xIa}pBE4FavM+7V@r0x3n-k_r3k}SJil=@bUeJ@{ zIX}{=pEo{a2mgRCd?9a6k@}l>#1nkC4FwAYzq@~cF7kA^V7BA>IVUCB3m{A|ff5o` zGKNWO%88DZWh#1(`fGo_KSxD2p+QOP(${@*@$E9+~s3%Sw zf)h6Tedw)typSDym!xueD8Iw0djTOEXoOznQaPNyNPbF3FKd3#5B1wc`*9WMhs0fy z*-D@74qpQ0Po2mMSOiskFg(^aj~-QN>$m`S-R65cS&?%};a}%RUBC}Kwm(k4%sL_ za`Z+cjM7(m$;l0H)=LJ-F`pY=@;t`%DY&)%9Zorb#k;~1B(Spm;OG8DKE!7LO*OJH z#d@y#NH$B-HV+;<7B}5`N4)JlALpQAJn*uM;%#qvdyH(^6w3?Carp3I=@B_`O1_D+ zIYxOTJ;B8eykkZv&KuXh_%Jv}H?dJ8er$Y#xcbrDGmHHWM%S#TN*kAgh0-(!60wNNn4> zHHJq=;*(5>`QG=g zjn91MGjY||u8ITq?33;~EgUxcH01gBy)^#rZ~r_FA3PMZbNP9cJaHh|uCay7PZ^Of zA_DoMZ{B_P-EnBo{&>Z!e#xZZ76atiA~!Lp|Jk{7r(}3neDMokh_lW)GrscGug3KB zY>W<%NcQ!aUciHI@=8YmQ9CC+lL-Urcly;|aDt7h%zBVS6MZ;A4Q^~KZw^Usm)zl` z)V8{MyaD7!K%j+MLZ{ zz_L*>`lVN2aN&8ebH{n{=Kuc3amnSE$B_evy-8wrVcrXe?*B@Af{)E#Y=o1sJkF=$ z7UOvlIQbO+wkhqPZRk_vKzwAi=m>u!_tPj)?X)d-Fdh9K>3v|K$`Q$5XhW-`ZEfE& z5l?*5)8e+>2jlF`V{y)z+vC;0{7caq=#G8+_Qb;coONy5J*DmKR$V%;%&#qHRNs~> zod>lr7kckA1{U~uKQ2nUSVM<6Lj&_+LiT6xIeD} z7$(_E{iTjX`RRMH^d-cntrIQfMLjUk*1hFNebj|KKda0M103?1g9p%wXAJ!U@f=@X zY`Af{H8udrr9R{$(|vdYJxcE@l0AzW^l`H#p8#2q++efodbFH=P4PK(J}@*MBa*SB zqkOh$EH;l%_!CV{jq%W@nOsFT>FI75_R8|Siw|AFkQs&Ll|_w3=i|WPL$T+sUGC>N zCCn`Vj2~4qUfkxhO8$nsaI&5S9xAKLOPi8qa+elUCwc34iJZ-}ct`H6_})1rUJ&R9`ijHBtlTm;GnE-r&GO;6EUWuW~z9FNqi9Y@9+8hBQj zlP*cacKq57d8dv@PjHFbj)y5QZGNO(i>A=E4X^mO;}M1}^1}I%hvJi$+p-Mfg3=eH zQM^(=IrmxNcH|2@;|~1n2U3QUUF}C`Q|a0iJlX8wxWdc&JpGQ87HGxoCK@S5uiC`& z)Zw`DVOPZG{_W%O!|T5ntMk(yhY%lKk-piRY+B>eO`0(@Fl@Y+6^1^@qA!c{v}xJ{ zH(kz42SSfx9&W{lGJ$t2pHDU?f$*oOAQl9jwJ_W7vGG0V(dM-$GMOQO_i+*g369*U!rCtThcU)A3i|Ko4|I^OcOe~2eP@u%W1-u$O=#*VY% z>TA9oC#EJX2O7;*a59zZr|cM70(4(>e~*0TMyO^qS|cDUnX?pQ6BFJzbo!ZR$Hc@I zr-L6>7G`2EPMGR-hcdmT`{Ks!F$uqeLz$w%iYe5q!7ca#m& z@ebA1dgAhnFN_CVdTIRi+us%oGjnnGUH8P~>=8rk|)iWr+JwE z$SToC_^*}q3a<2KV@g*nn4az|-G-8eY3j3Jbc|azT!l-1X0+n(Mu#ngEpr%U&@Ar` z`9r)S_cx6EEiY<%*$Q8HZ=|cZX8TW>&_R651}9Wl@priVfK%lPdW8)c6+<%cf*z&) zxSTe{r94ieFyA6WA}o0n*lm!&ktP2sAFcNY#%iI9%PfODBu^TT-}UVGlJBYXq+X0? z@jrB$ZqFbV>fu zsjqX|j`Bfo@L28hIJs|qz;QL~j{a3{2HhL?J!>Lf_>4!yD_{7Gm^pS+9Nl+)tS{_~ zq2Af(Uz?MX$DGz;G&SyM_BG2bqO!D0krTS6-lAuhOW75fjIR$52??`=>JhzzFv{ck zP3TCt>5tqejUy);O9rrU9hdoE&~u@sAE7VgldQ2`BVGa~__0>g+lBt9u`1(4 z^;3i~&wye4#k)k5kk^l%T?XF(qx4N~zv__=I~CKC=gb!kH^-$n4aSQ4O5__*km=}a ztmgoaJ~sO%h3bb5rx-U2m5=@voh#$ym=%j!&pR74Q{HJDp?YAgj6Qfph_0`#e5dF4 zfA7~`q2Qg!KNA2vPzLpA1yc|FvGC?a2nq@|R37237$s6#-c3~M!PQ_OdO^|91J5dy zEiZ1Y%AHri!L=exxEl}(Ghqaz%Ywac6d)g^XG+aNo^UaOD_Jm12)RCarBM&xLUfo2 z#r56{+?a@GqQKYx6Rxn8<#H_;yCs&D4l80YIl(vf)K6y|rNe)HeM#w5;TjllWj+f= zT!BbgP(Ik1$W`O|@wa+cR^};jJpQMCI_|jhrnu|QJ{GLZlKX0 z^GnMyDn*3raKVA^@zjMW4RNZrRj|(|2x*@v$-2CA*rq zT5?_W$RtOrDM2jMpr;oQl{T+3gg&AQ3OmIY-H1bZG5Jm%;|HyCz~%)wXe4ha?_G5y z$Z9Ip<&dPmo|2+^mw-m$W`Ipcao6sBuyt(Txig;qoacBK{IYnM!7;pvvTG#CtmKt! z0$gN>-t`Fo5yyN;1Z0JhvP&0g&&)1pz&;fJ{LlX!({oa?QO=d(ob&G+zwq4W#jbnq zQg!E|d$TdPu`tHOm*D$KI3)ld7m3`2xZAbrgu#Wfab5c~kLuo6vj|UjDgeRIkr?+4 zv!ZtHMP0$*s@7TIdSL&-IDF_x96EF;_U_po2M_I+VtzEP{oXb4y&JBJPk;6*rv)r) zPJ_czMrS7D{uf_t{ycu{Nc1)OJki=vSvlRs)ihjxMZL0vnt{rvKKq&2v-g0Xp)-Rs z_1SUOx#z_%zT~B`Yu6p(MNyCm4XiSAotAQ)IB~+0r{B5ynz;7+*E64>Rwj9%C~K+% zBsQl&$Hn^(eaJ)Ol1na*CDjq%R_#$o%~gSPF3^pudN`?m;|)KI@BQF9)3d2EP|v;1 z?s&#ipB&Rmc`{=9ga#@nCjE{7W5U+FjZ;-`quRS&oS# zd=;F^d-(9d`1Hp=Y59jy^KhL}MYPYYTero|%{$|zzw~m;Vy?(tkvlngJf^0m} zv;FaCI4VyaboxqB;b8f#zT;ot^KNfE+PP^UHji(LCCL;v7Ok20QjWTh!*w>6%7slc zj@yvio_L#g_rbiUEGP*@UaL;#JNUb{_UvC3hdxwskq7XF(-%6psmjmU$irEb^5=De zge!ltb2>r_{-x|WvE`Wf%Bp`LYrWbjc;ey3XT3Y7GG`N<7e-WY^Q!q=98CGB8~J+C znQ%AcEFbA<00vE2oGsG@26uj&{4emw)fPr3$I{d0vb*Yt9=8rSyr3o!i*-+@nm z!B+l!$JbZZE1$e>UU3AI>$16epFP)11p8WOwNdh;e7 zw$l)vd<8Eizi~A%@BWMdJgD|e{5%;7kJ2`oq$hsiMJ&8&deQL4O4S_=SxT_-#UBsfvQHsDII(kk5j4Bn1!EbW(c^F#4B zmGzu`L2jUAL9;vVzWvVl#h1P^zWAl9;!9Uu6<@gO%kl4D_)>iG)1QeCe&~boPyh5U z@wUHvd%X8O?~jlE?~fV|@R)S=70R;oSKPn?PyF1^|D3;Ziw@(>nW76fo=i?oTF*e= zIL6IXlP7#l?!kkHVrFhuEU+HupMPFF_A!r%m%sGo(ls6*4}JI};=vDmP(0=_kBZm4 z<~7!x(2;V{TkXY@5?OD^W6Em!2tPh6!RJr7F>GEk@ZUcEabwPvvEWI1`hYQwG3X;- z`jVIUrZF}QaN3QDX7s`e57=%sCM#n}wKp|#^lCyV8&&0avedQ5w0h>o6L3L~JSIJM zb+tR*{O51<=UmXwM@NRlD{JwTr#(4tzwP$8`R3bVd}1^n{D23?FTVK2F*ChrJ#c=0 z*5yP$_h%2J`+3pc4Mez#hV=jFH{dtW8jQK68R?+M$${s@<=%}Gv<>)GFZ$_rKERMnvYe^l2+zO_lW}{I@tlj(O2(B&(TlVw8ov11F~bvk@j8i>F=6 z?fg5DCoWNxKRknf#;b4q=(&JFFC!%V9i;M;SB}F{M!@^B4y*By*QDZwaN^?+AQc&s zIS$Y8HZX|?OofpLVNOoGgztCght6pin|@?P_C|qt;-@^UpfIH`f=LG0j926h+~THg zM0)+T^HbjT*BC@8D{L6m{ehlyJWhI^2?suXn9`9({`m|X`c3*+wL}F_pY+jaalYY4 zap`M;ZCCVn8l@$^Z<8b)eY>nb=RjYm`1#p{YW^V_Y4}Z7#&S|`l&#Q}$k4OuLL2YA6hGQmDQ`aWKo}XxUv*##WTIjSSKLBpaDtAu z%0F?DpP%I#hQh$Ze&8uD8@+qOD(QK}%|Lr87+R;ikyU)6fsdahgJ_q&CYpErmH!4# z)sII!I}G%4xbQ88C)%s$1PFtlm9ATDj?d8T*s(QU^!yjZ{{4H^PUaLvxvG3@+GpVw z{cCP^QGMxr%*@Wkk>kgu3r$KdIPUM@pEx$@@5}p>b<&I6HkeCOzUtl+p6U_0KS*&8w!HC8+4taIY*v(Jbtu6R)V)^Go&fUL#deR~X>amfmL3u9gJ z9`o+556UBW(2+KQr$2!q+B+ZV5%5g5Ls=T2bO6SnQ`3{a;q!>f!1wowKQuZJU-;Hn zW9Go&c+Ic>dR*~W`ZQ_-s)_`e}n(Go-KeNp^7~B%dRo zJ&PnZ%aR%B;LO<_KYA<<9@-bXckhmU`}W1b!$)G@p8fvRDPw8Uv0)P)QAR?y@V!`L zS-3MV+H5pqUSru&>4XcCVYhz&hcP-a=37zBkfH%&yr%TNR)0g~8&?}0i=C%$_l*aC z@}@tFDb?kqblVxFw{9w3bz<{GY}tBRT>aIr#%bG6i$Td;Zg^_7`lL4x#>8kVPTM*W zn>P){(C}#N7yX#;fPQQOJ#EvD*uHIxWcW}FOSeAj?6c#%v(Am@KIiA9I|%Z;^f{blEJ*;uyD#@EK74XnTzYpHP+#KhTKxzxM-i?9l$`8Qm1zC_*p9 zq9ya9%&}UYiDX^_T7vN6dAFPNPvaI>j2S2&-N-PVt}JrdUV?*;fXvt9Mtm8+7r3}J z%x$zIo(|$EykP zZv)wg*RB`g0ZrNkuKvgcy$Sbpl|(M|&F9R&?eA=q2Dw8TzyKC~Kv}e9x$aO}nlaQ|i3eV|BmUqwe@*T7hFF~4D}Crl ztSugo-t`5^KGrf9lm+u(z3Okc-BZPZ{K!B7Pgg1$4FiQiQMq>2ZyhZA zIMtK-Drl4ICd}!qdo5vcVZr(sV)p?a zT&%B%qOPb20fG=%!Z6Tb0rXzJn zCt};ev=kR*TTHdKB<5O~k5$<=0g*Kf4Pd2ot`X1jbLD38#Qpq%hRo z5MHPUp5U|C=bh2Qk;=g(Um9ASc%*UJRgf7*L`X#)rAm!O9S)^NkzwG(#D1R&)aW0K z!RBymI_-@3(?9ytc+f*15WDvtjql%lk9WZi3=Bq3gPmlcmdOTId7^~18|Q|p4L1#Ni?y^QY{Snd9h^6|GaW{aSXraGDU#x5 z{_qbaX&V|T3}qO4Rbz48$*I7EoT77qaehz$sIB!g7PMBFqHV^b@VJ3@U|OPRkNT zz6it~b`o0H6Hkx)D2~ZzQ}V!-KNA%mjBr|C{K(by&|sNwv#e_1Gd~v#vs`g?BKo`f zXKv;g9N2isD4!@XgH5N zkn0Tz;zbXv3oT$1WC`t`$^UA6LjbM@wX*fLD6vgO;f zJTt(eEchXV>%=Q^!#qOYBc`Z-4tRY&x&Qc|dbDx#&N<;sebTXVt41}Z zC-tZ@6K{HIQcB9ax;(*Zv`VZF*+r$%7s9+Uz~s@^K4~&Id9m`O83`*{;FA}^ne^ab zSA!912xIZao9d6}JY@q=Nhmp3>d|q*L2-OShydkfziO$^GP%PpRx6~kj#>;molk{Z zj#5NTjFLC%Pc8^Vh0WSkH^BHL!RJ7zd%P> zT#}uijfJVHSeDFLQopk_H!B5u#(s0tld(LjfjRyEy!t8W25bOWUsOAoUy9`!^;;~q zlSXJKP zu5hze{wTR@9AI+*3rUO8`}n3S*CJA0Y=&k2#34VIpZ;9&fVnDsse@I?f_3SOYb#jk zijsx(d3|-RZM!A!ySXVu{yl4R8sN-JFGH{6beR0QX4OV!RfpWFv?x`3QGLQ=7l?oIort3)1EUDO*6uzK#7CvD7|6co|l*0(OKL6rQ^&z;Ve zHV16sBspWfMR{OptM%CnA(jd5p_~`#dCTuxr_FwnXYfkgu%z=lZu=RSa*H)y`jfZd zru@mtOU4={U57PHi7F(X9@ARX@RPRsr+%&ekpU_6Qyt`{i2?^amBdn>F`i-OAQcgh zq3>n9fj zOCGZ^mJ?^3GDR*zOC~arnb-$i@xXY=i=P+2_FrEeFL?e7eMK_zkj zG8JQwsl{hGd9HeLz(yIN6`Kgm1*H!Xnlj=V`X;3@Khc1Sv0uptIMg9+m-=E%ne_-v z`Yw$Z)ek^pg;pd;} zXMb;3eM`z^$xaC%ZeQ|EDaxC?SWwDwc#hAPm!x+~ZZnSi&Uda6?NZNa_KR2a$aU1c z-RQ)~1UQ#6I*P+LZ5@x(&)6P+@<;zqJmL`#i>b-!`2Mxm`zEuo@e$M3`m*VtG;GsI zJZK%MZ92mA{C_sJuF_U{QWx$LhHP6$s4D za;>;0lO*V9GsrF$TaaS{sN7T@-UPcXSC9vU;mX2?mX}Izt7mUGhCd~Qa^YKUl`F)j zoJH1T-m<{JUw9fmf8;@)uDxpYuDWQCS;5d_<9vo9-Y8$+;c;2uZ1fd`ZNSU;lsezD z4ols@DC%c$*-T1bfZHve05-B8KNTgzr1bJ_`Ce6qZGmSxV#!-&wckk=yy(y9Lx@S= zLVqXN%E+5sDI2uJT4%&2KBk-VhrXh-!r&aK0clmFwCYpcFQ@AbfX=7%*>ZcOhj0TN zwvRp?D_x;adII8rZ@2QT`hH-@uV4B^Lj>qo95&7ohVH_KLi7f72{skhKD>`{ZO@W) zoF#=Tt{F}OY*!_vFZw}KN_NB72odv1^0ZoDq8`|dSy%{RXi*Zjv9B` zk!6#o^q2Jomvvn-Y<*3B$j?>$)B2Vv`AfmnGmDZ6mt~?CeoY;PiS=xc)F&Gv>!wKN zZ)*Lsfg(2rh#w^v4V(Ez)gK##y-66JfR77bZ?r#nQE-4s#tQRXnP7W-qGvW!lB?$- zm{$-8w;dQ#FI>;P#sZ||3~p3lM^bn-BVd1~vKw2-@2)6SB zzUr+2&1bok8$G9O6mD7+{CUng6kN$kbbg(0>VEoqWf6E6` zCa0!j|Ni}^2bMWiHa0!#Cm$Do_7{I1=ic``x9dJ79&HY4Y}xcCQ04%+NeaDccx*TZ zMg|3UAhvJW93vCl#k!(?AKPZ>63q4fd^`WgkS_;ujoya|^Jot#Dw)J<8Jb3VW$y~moA2^b`BqJ&|q_llG8 z&~uTsDRz}l#Y=hP@Z)u(^6vR?YLo{b6UM>f=xnLKG$jkRRSgu0%boG$zo z;&l+pf>m6V#xj8Zr>?W54x}*44&LSRp58(XZ4AE8JgJ6N$(x+_WbC^r93_LH5R{KR`1T*hc1_~aHDH~Q6 zn#M#LrT0KNVd97K>xC8>FGipyBjkA$1310Hou9m53Kvx9ddG~2l16dx7f$vsFfoaL zGRej_Gt))6rZT|S3vZrB zBhJ{dEr$AgV`4nNb#(OT5y8+vs4)=RHcgmD47?Z|fG^)sWWvOFp|s0g5fhO=4FrD- z_^EV$cU*-#j_8ZtmG2}cer*gJia&IxuIPmM=Csq&sfw2M`|*O3>D8eo>+qAXD_p;e z`7m7F3e5WLZC~vID1N{PSNLIEmz14X@sCf9Do+{}S9&;(=xJD_GqAV`2^u{qMn%lQ+ zibws#BVs|pW21xWFuLMrp7Hc}``^DUzVXd(8-{5n{&77bZ=sXq5AuX(CKD0zE2RzK zSL+%2vNMZ!7J}$ttgIsp;bl;lIa%%rFcaOUeeZKY{Nf8<6t8^wOGEvJPiQUknFK`Qs0!IdIpVx5kTJ@*;I$i3e6>j0O|GigVxl-Z%dC zum2`4d*J1wO*3}y+v~y1o;`bF_nr5|v!4442FZX=P`wM7MbY1Q-D~4bfBqM7^;f>C zK`t^>v{7N)zQm{I1^5+QigDlLhmHpgOop3Mdgo#N2{Jg4PahyNlifQ>Ryp;hsI&TG-w`AIQ zcEC=P^@l-@kyoc7%;At)Fu^4I_G;kq+P)>}6@+q&A#NJgh|r;4fX^mI<0<)=23d}i z^sfGZN!+p>+0XctdMWV@neckGqTXHat}BIlGCtELoGi1aioqK+#W#nKAC7^+(fHs$ zz9Xh)r{ne;Z&uiHaNQI;;JIoP(<5QxL7(~+pdiovUDNf3-kIWMcd596TWeI-CGuCI zRr*_ma8FaZot`c4WCfpk?(I&#t2IMENpxrhKf6rA8$arsa=EJI2PR$}EOmq*C!9!Q zKG3_LF@}CSmf4K}x5|c({cRWdNmDKan5CVQT;GGsFi@CKu@R(_TyweW8K%TFVsC zM=;2rX0f`4{wr_#b9s0oPH?#%Hv27WLZ*Qo(*h1qo{*M-Z-=p*D7BeV5580TLB9td zwFTNLG|#%B?;&1>GgxyTqN~dzljSbu1OE3h_6ALnJIVt1(O5(}~_yNcALdLYI z8DWfn#kz?EtC95}E~{5xO9H>Dm54xXH{ix*ERH>f9@f-m$)v}l(E_{YYRa}{pE zyUG#dwcAERElGMrU$YfA+;BtumtX!>>m|sz)XQ_QoQu4Ir+U+CaBBr8iPl#&c-9~r zfegLY70zd#xY^)c?|N4}>d`+LH{EoTbd?oPDs$yEJj08utL%0ejUrj5^n82s#ByTZ@2H+xR{VwaaA(xE>!0=5|hmXhe`E-HI;J~o+^J4t! zLY#f(IhJ`}{5+okI6b~})mJXXoO;TyLDxuQFhr!LQ; zWSIDxN$lQ!!iBfSEeBQquY2w9#x1wq47SqC@_V)qf7rv~D__1!S@lRi?w5XfaeVp{ zAMVMZj7*Hh-o1CmZ~ppg z#XWg1#WO#-@t=AO3&47;Q|7&w{e8Wa&89uV7_CJme6rW81 z@YniEwh=+g@{lj(tmCwrFL)z++VVfk!Td`;B(LBD-SU|@z$x%C%7b=R;fM5>%)9hE z4isMC;diCQ&v*!)%D(}PFyItgIloc{8{V-AJD)KMN48L&cKI+__cdO&gKDzfIL~6> z2g@!QdYF7xnsynpk99h=w=|{x#5cfSE#vo^XZGJ&0_*3*16N=g-%>A*23*mD@B$Aw z8+1%_^1H(IYu=GHPZtB8mFKIAv4820I^(B-E|h-57;mN6pqKD2{WN|o>@;d%h}G20YnV zRO`hXWBSxrD84tZc#@XI>KV%ynj>>;ee9NWnnGvM-J7LYL=vi2?aYfxB)y(EZR<8r z{W4#k@$HfpYHF#!^^gxh;v4FV@|Izs3}seX=`UQ@$Vy5hx;TE36VTahky!)c!_Uvj@V`>Zql377x)kMG1!J@TOu1Dhf?Z;NORMHk1BG3NI|Pz+8g z1`dM`od&iB5AV!TIDA1dv!*em>2PU>DkJZ1PdzDb>*4|$u;FzW)7P}d6qmLGj_Vqj zkzr|HIlsnpjbDxO2fw1XB-6;P`n$`9sqV?;EQF9Z_yQXk7+p_y(!{6C@Z>F$`QY+-03e<=#;oHua^e_71jC;izzHV71(i|! z$0^D!n>NMhrU`|yk*U`=TkhR;kJDi(1MQDC?zZH7GGAbTPu5S$I?tEVPF04a5ph+| z!qM$d@4z9hPn1%Jr9Bb`-I;==9AroRSbXTY(Q0~w>Da`G=PXADMy03S6<_}H7vsxc z{A`?4zI#%giSCTjA!fce|6mM_uV2o|v97E%y@uhC3iTwu^o!b`;?mAl%jz!@K4K;3 znP{gnh0o~E%aebJNWA)2d=nNX!pQi%Z}=5g_@rM66VIC*?S5xte$SsX zg)SL`CHSgRUWDwc3j4Cnz~$|v<0 zw#7xh~hb4fk&6*fML$% ziw^JtdYj^>OjdOL^sx4*blqN@Px^LC9?N+n&;An4>XP~{!DEvdpX)>JvKh>M zh-gyfsPy#js6|t`c#g{zx3YTnRkw<_=Brn^k~McCXbg_T#5rfipTF^sr@v>}kRr|G7^hICrjN(G z%G}HKJz_X2o9>5&Ep{4q*Il&KiI@SLJP9J&FmOUZ`J_{^4Gm_RYMhOxN-H6d-xr3l zD|17fEQgg0+2&wrqn_X&yQUvy z(R-nF#!({{z5V2J)_eu6@F)4xXt6cSJEDYh?u?>7a7*66Tb@@#W;eGKm%(N%NvVwW z#TaPz#P)4tasT_A84teXym-h1?jw6aT={?t zj2&^>)=jZ_d@T4TBPY)MiHH?WUqCY!a6P(r+GOtgk>d0L)!D;ju*vWi;XsiSIry3f8^u{_?N>+&f~KaPZDA8XNSgbQiK*)yZuGw7G!+26w`PIPk9M0-xjwA9W%k zYD4s4;{fI0^9U^oeN~n_0F-D>;M9)^p;iPkC0n_=PWx z*S+@F<1?T7WbC^0t{CiV#fSy~+qP}7d|Z%n&%!*oFd+`Fv7?)Evymn@J5k5#xXmA` zAHF*-cTvcXZP}zTtvb_Izpogp{4p{z1adaiBNOlD50&3!m*Ep1xr?1X4;g~oR3Q8* ze(}3_6x%lN9oBq}Ro&>F34)v?C>CBP9!i!#3X(f&+Pbf{Rj~OBILe1sopLNKeoC3y zsoWb2U3kEsN^h68r!o|o#=?!Q!k}q&sk<~uLtcTO&&soux&1DiZApIOFE2)l-nL52 zgcT5+PR9J}vXnLmpB2||HE`uSme{=YL$Z#A2h*__<67E+>m9EWt}qnoYLP*yR5?g) zrJL|qKgNU=o73!1XIxB$?|zy(qOLkEv=q8t&?|jZTD=xtG`|!&wx6LF@1{qFofKZ4 zJ-LMuU0D2vu07Ig?$~vEjE->ST~{p4O~?G)WGv23Nj;M6SXqc=POZ<+#L~=6aBaO9 zNA-fPB^NzvQS~F8$$HZwR&_6(!>8tbE!2!}5AaO@+*-1z=egP78t-{Fxe4xqp1Hb_ zPk3-jpN&Wkmr}|5+%#9E&Xtq=bAFzk;-m)Y6i0S$W+CQh7v<`CdeJBOd7dThiD_?? zA`h;G^wp4pyC}OLSWN2p^gph`7KAVT@zO%xVz9KJFqH%A^4Lztg6OiSFmCF>E)rjs z?;W=-X(gxZq9}_leEWH>Sjt$fZwrv3PP`>#z4T8$%d#THozo!8D|6D{q-g7z4P3|( zP8Dzp4Bf%k{&!2Khy}5ebYOI2@fC81PtfEk`(DdbZU$;eexcem#T)&cZj)X%AU$EI z)e|EFJuxIZ*y4NUU9#wJt9c5ppOgLQ5}cGPw*e@8Sm}oc_eI zneS0j>k%%qUN~XcuXLOgWHU=PbJ{!l^o9kNuMjSMJEtXWg%^E+t)*uE^|HH}>y_2U zY|)|E+$%YQOtAjsHeaj^b&|S_WQ(2!(&g}m2t8Yd6{~;6Ar>dbMkx2?S$*PlZ!se^>&F%=2y$Vp%=I5y|k3Lpihwu zS~5w(=3n|n`VGFfihjl7zc=nG>q=AoyAMHLP%dzze@A8@Kc*!+Cy#5~rgxrM{GOZ7 zlkZF_bK-w(Ir}H{0J<)-Na>&SWBfXwp;3O5KFjHEZyHzCDU%Tjw?0$)AW!zu{{g(v zxSIStPg-t5f=)c=#rhBh_3Q7-(lC!wBg#;3Vt%i8+qq9<&*ZQ1xBLuEb;!ve`oKQn+be!y0fpr+Zo{H)%}@R0 z?F2skPkM#1Mbw`c>SDwBlKk0ZN;{^%Wf881r=LG9RBVsN{&_o%N~t!Pp9M;tz_8Vq zyT4X`++tNf-;-8!lAQFd0G2a(15v;7?-yL~=7hg*5y0ZtyAs?ifL_zSX-IttVa%a$ z^8lYPK*#V605V!yHewq@zwe=VM2`h5|6oGpdWFA&pNb$CX(5lD&rh850i zxUi+1SdEQ6x3?_IWCq@a3~5Dw(_Q7!m?=NoDc7=zx?v7W^n(v2;2n5tLGq%@C&K3$2dhr_OkN zO09OLG<+96@f8m!Yx2pBHU{geQ7$e=o##l>fRE&=`+pWzxLGd8rCwxV+^2D!Pupt@ z&I0w(gZn%V?~#n1Ju&I=DyKmiFETHf{6pCi5B=KKe7(U5+K5-Z=|*jUeuHl{`y{72 z@ZO%>)Q>)pw=mM;tI0}X&Xm3-H{&2L6rF0NV4?$LC(EhhbCS0$f$ELRFTXTi|9ih5 zpZeUF<3s=UnfTIG-;U3I<*NAjXFnf*_E&F;hd%0&D(7nKXJfdedu(Pc4?P3@Ond)%8WFMZ%;aq(pjic21FMO^xThs2rZoE_WGJUupV-4TQ1<1sR^ zDMly8qcNm14z*$l`5;-z4F~%V9QLOU?!5h0>65!WKfq?I?mp2N1DeDSz!_+N&D*CLcf3Tn0I=8XdBAZ$`^Neyw zPsO_V2p?pPKOd2@+R>>b$)PHfIDy@6-h%D9a-~f?i|fvTY&M>m^U67E^2SGX4in2V z7wEZrFKUhmw3du#{i1uF_lDqsx1_xCY&2Ri zI6M@?qhm2JH0Uw54+gB#FQv@GnALov>5f%16XQvr-)xoL(Q4TZSmr)VZ&V7*R{ z99BgKn^&I(U3wY=lIJbqRTg^?1}xZjNhwe+@>*0yi%D3NR~#?ib7E8P>kC|y%9Zr=L*D7S#`p~i;sTK&Yj&WEHvdLQC4=fyaNM^+FjFircakRxapU0 zGv8D+1Ye9(1u!^LQ5fSZTPGSg?ZD*p%FI;EOdgN5>EqED9*nJ&eWl`mY&ixe6nxPNJMM9Zj2n`(HrE$Q2)4eu2HPE+%ir7BO2T5-EKQ}gw6 zXFS!BZHFNoSl-D?I`IwzZ6=l&NMfL&6*$!=0uUiFFGY{V1?-9xJS>AM1~%|E-^_v! z;BENVjZyr}_l!{@IpNF#3OtW+hquL1stfYY0bMm&M+)ObLYgpfsd2^_|Evzxii8^- zkPSk`sZOF-`eY4a39EESr7E90jl;x47M9A4CP$S1W|=4{?azjh9+WKjZPZ7>#lLbl zinw8-Hz1 z%A_MZ)7gFM9as|bophkT^nj8F^)apo<>MV;dPX=SXmcT60=Km56u^zxbMQ!kBvk36 zL=LV&<0`N%QiQ21;CP`~2H{Tr;BOJ9K@;zZQ?13U8_G@F9~mB%k8r2uh#u_r<1%LofdpC=3SVg7`HXyWnM=KD#{~uhYK547>e5V z>l`EyK>SYs&hS$$dE$~+%?H;o3O-E0`&oau`J8ze#xn6Tt&Qrg6IUy`yyHo5jVDvQ z>{17wG$4N^Fg}tGC&7}2UNl^+DFnRDbNs zWl+n67T5cEYmDMCdFc(4%S;5SKSQ@t|At<+q_*Xa0P3&irHf$cqqrGmL2`h__xb6m zn43Bgvup;CojI=ONj(?KVm@}3IJ{3yD-9b2unLnqJFPHNq@-J)ffj+s+;y<4-Q2ZsOXM+Np1-xNkX4){*P6<}A!h3#ZQsK(C z+Ihtl4)estW#fR$%4Z5@V|HdSHv+Ighs!4h)Q_MeEeaPlaB&)SUT*czg}!|DhA`C$ zGLlIvPX?j;dJ?OCGJ)sRz0yxa9H_-1nIS)Kv zPYMb~P82hNT=0=)qAn*isx2~4Zj~48{PyRxtjFWGAxnshd}5fEW!Pv(=#o_;wRL{1 z(p4F0YpG8AQd;XY`EFC*^SbF`I>M`;lM)AgRpB+Sw$4~ACf$7g5q%fa0lzbCE$>PG z&I>wkuOj}&nv*0SMlioEB?ny?n5L#NUB4Y?QeyW@P z9DHJyMRV&3D*J|flXdzHvq2i@AwCoC$mEK{so7)Tg;NdWOk9673_QsnmogzUnV`g= z(-Fo=Ee5@`H~Gp>FhN!L^375<7m~Kquv?xy*)(Vid;Ale7#q#o+!7z!Y))Ko15$6lS~G1+hIUh_SW8SA5UxA{d^Sbis}a#_S?Euz z0S2~GFejsm>EL0KR^D)V$Xm~b>H3zgt@6TGd=8CGU*G%Q4@zq3xSrHmw=BK=)?B1- zHo5L~b3FAKKjZfk$0xn10(r<)x|ID?`dg2XAN5*k4*hs$<3u&iPV_+j*7!U_V{Xqn z?X1&8|84Qir#vy9|DqTBwgO+*D|t)Z47NsG*R{-PsM`+u6eTamuvKT<(B1yOFxElV zlrPpKS6$=U3OBby>Yw(5Epd2ghk+jfi5oYeRg--WeLsDI`UpjXeI|e5e=tZP5idw!ClTw+$c|N7I76UzN zF{ZH5Mt6*=dmCwVIo?1cH)FQs4oK$kDI_*yHu{7M8y3Zq{rcha4OBn01P_`YjCeoyPk}Y+nGQ;lXSHVrY5ifa;z9s82 za}b@o;XCG*WJ#~OG5z6Rxeha2#aBJR`~J2+@Kk4OtCF|t%+a!Vmi6o@7^Pp4>~-EH zFVeC&YF$qLGbf~XFRsSjckWWSk~DHo#?OMEd@X(eZE)6@-+T(Mo8Q5)SQhhXTP|NI z^C_nKV}afI?r>zk%STzsTLr-_as5o&mTs(v)nc=|G9x@s@~%MUhbmCiFK~UCm&_#cY6B!JvN0dzFkA|nYv>W z9=B?(!~BYu2a|wT7H;&+f2UN7!DJ4KzMZ*La9tG; z^e*P|NS}Iwpek+_9hg8TpK9^XF!J)}kxG8_HQDd0HLG(%f2;?wz@^n16b@WzAKt)u z=+Keaea{{}&&2qqi5M6fHZH*OMmynM4|IW>4}JK#SZE@D=;cin?KwYT1PsxuM|c2- z#Ujd__*1^L=sODo^i9rmh|&f8Mj&fw1?92*H2RQBQFk+}Qr zU2)S*H^&ceydkdo>Q~~cU;Re>$2a~XzVXd(#Ml4h>+$WYuZ^p}^S$`q_pgiVue&L} zd+l}cool`)zwgGkzkN+yqvz{>aHCv3fA71oYtLSliAAhN3=NK$UM?Tw>pA8tG;WsV zpcS_$FD|UcEOP-KdoO!Ta^l2^SQL&_yl>!QzLqr~>eBTI-9+mm*Me~=uG=ZLm}HU; z|7vmMR*R7gwk*(l@>~aBTR3O9;}lt5)BCxY`dr~uYeR$a#wN87?38QVu3;#}wU#G497|Mp9WsYnOukNV^|LQs%x|FoIm^S>5cMLS_G&N7c zYx)`&_eW%DXL_FV9kQAhr^c_asv*pHZ`7^jTF3iIhV)qauxiX_9q~`XuhUfgPPLh5 z-6#j{hF5T{a#b8ofS#46(yid*aO0hE;1i^ZbvOD_y?djnWV_{6qZ$1g`?JQy=W+)m zZ~Io4V@SHx=FKB<+5OIpUw-9F1VJ)$ZCbpx5M#qbl9+i@@vz2z#+Qv^Ymy(Rd?-eG zr>|h00vs%hiDP_K^)&quey7`L6!0zd1?O1s+1|mhD$~=jFM-2;g4kMl+(W|2Pd1%AA(bXsYF%g%?VF+LaJ!K+f5is%$f%I zocLV!Wc!>37^sDIc92`IqupDJ%F``RJvxRTf%)dlqe5+qQ@C;lWD*-bv!YDTjq#i~zA zkLZsDHxxtagX=G`Rsc)=R>d$s0g=MH(yK1$!|yJeDVt%8raaJB9O7ZY&Fmn)MqI$~ z$;;GjTKEgHLPyHTX$R*c{^?au!Fx6o$j);$#F2$b9Ij{AW%U>4-eIRV0p~r`^ zx>WE?3eC+ETm0@_Xmvm#++CNVNhiLYHk)v^bp$Yr=EOG* zL4vQT2S3Z76Er4>;81>e!aH_C0rUW0 zFMzTuR&|@7LScNM*5u@cezM#Q!0x&6@p1WcgU6tR{DAQ^o%)+G)EbJW{P<2j{B+e- zUj|4T!KIjW(Uo@1Vf((mZhvcx^1ugtN?>+&+V#z3VpB2-d>Hs|Q5mYikIo6ef~K;m zKE?7Z6u6y0s}93}!56kS6JhT@mc1rLH(~ zWI8_n$jXNJA9Fj#8XT6zi$qA+;R_yG*pP*ed)a8<==YT?$QlMeL-0K$eMLf67PY;Pe$60FsM@zuF;-y{=^7 zz^hW!xwd;vI@*#H`}z54wM*%Dl1ptXf4-H!$RvVvy#=*Xw^!*c3(^buK4ZSm2Pgdf z?SH*{z)qYwto&tZ^Grzi8~<`ySj!CvNpqPd&DE%d^eZVaBAD#EdKlS z9fLM>2Mt7I>&b4ulb@#o`L3_}Y_T9+pidWK^-lkymLr$==oGSwgj~}VqJ&MzokA?@_B6q4B6)xOHit9W@Cwe#0(~G-AkWE~YR(V=QWs+B=r>$v*#a}QP`?;qi zU*gq#YrQE-;b$*Cdy-9fNMO+acoTr|r#zqj^v8*<@jyRoHHycWhD(Hr-{Z;L3-5dX zxZ=u(dGi%x3QilYs9u;@2MuH9MkU_S3v_@cjKlC_{L~s^1I2pGOmRyUM!GKJB0Vi3 zPnkq#63g_B7?Xb7*KEWk_qk8}=70O$IC|tr-0;I&tS8dP^2vnZ5$T6&m(ZlP&usgY zpZ1Ha@N3(oKZM89UTCxbXXE-1-ED2)pK3A1b$uvPcwT9IV#swV)8_wLU*G^w=9~2; z5tPj$11meg0$*SPNCC0TScs(T|JV1uX1sCUb@GVjv1=u(P9o*Q<9f1Iaq zFrR^^Wtsdflkz#|1u($pIonn8Z$7u_i~$dHaJw7y-iVjHiZ%aqyz`uXzyh|)fa}q^ z+O7YHzhSFs`?=3h8u4Xa)Xs%%m4z}FTDi=$V~P!~&eM=oeEF+>Y7?)naYmLJ2P8Ua z)ZgkCIqDnO;7RCbS|&afzHw9flgYJ?v1FnYl(tn_%@{>O|6KM$Ob`JfxX^2}AxLu%6 zc{QXzHG8<~d?EV$J^#7rk#5bVS~eqiK^s?kHZG$iHbttHD2(!}9*Bd^Bp2FSuV#`? zvdVQJI_CHn*_3)-ZFlepb7vBeDzjuMZBLfAPg`7*99w1ltSSXk)hFDxRj1Dy)Z zs>LAFY}w?B70s%A0%qG%t`6(;mal0lypxRh$MOqWK|9xgY$A5D1r|{{t!Y;Lk=rMQ z3y$eZ#mZ7IZrc?m&$7;|SajxU+j*BVATOCm(WBmpTU;b23rsnPp;j zg`2GGFS?S4(}-psZ);rY@8oL?3og8)FG0fDnU$EH)VNmTbH=yO*q^RdR>WTxz4`PL z8y4!AX;t(-HhIhkj`*A2lDl*5#sP1a(}m{m-&953KqSStv8K^Y^mR{UkdmGxg$6x=;q?2d$`AV8R3b zHn-{&{@@lA->A`UQ?#?vu88M(68!GP!WW&!ZGc)X&)8~cU$k|%7wWGVHW*byxyX-+ z?oQCigHuhXpYxGC^QSx2mI#vtT$ydNC4cLL%HQ;aU;M1HkZ-ZxeA&i}@{}@l8e|BD zyxMXx`#l@V3taq-2>F*hO#e;`4B{6TV;Yq_+H$glcUpmSK9bYcdD_piu3OT{4>?Ad zZO%7r*z`|YC{O2261ldS+;r}Ib{@oqm&hxh+oslk#mDh8FUJKh(}cLZ6JN9@tkar? z!q5DMu91G(?`5Co4gM9mG8bFx&9cF8%X�N3|Kz*taesXVH(;ZzF?^x#R@>Ko9z? z<)maPU?mfoQ(s$}k5&`8wHTX+TJf4!ydcioc9!BzD$Z;)1#hI$8(YV>#MVt)V|ZXx z=?2{PS0$smRbC$&Thmxw^2Oy6jUZ2Ma8KRBo5Cpg%FCKBZX@j!J zz?}P4o3KtrpRba6jHYt+G$dPc&4;x#Llz9O@Xc{Wu2xH)v>))%Q~E1&#!bttb4b)I ztKG0h!Y5R?36uW2r(Zf)zx3380oFU|X(!$Nc}SGG1j~8jobsJ~I4qn8SDACo&q8yB zKWTFQTPR7Mfd~CvLv>NxH=lZ8ot_PH=vy2zsC_DKpYp65=6d>bGd*+0%*A$da>=Lg zIeBEojtbJVEG`TDxZd5s?oTOwD0Eyc;8Xg_&~ZEv=ZeJ?-lhz;N4_VWS4+ zsLaj;8ilFvZbqhgmMIs^6aVBDlnL3&Cvs5|DL!eM5(a$FiSHtEdfBJoB=q2CZlv%D zHa#=LPg0xdD_mhmB4|4W(+i?AnKWKG!A;wk8-OW2zM;c z#>F^q;OhYtbxHk|@?z_5QQ}}K?alXsQR@to^SYT z*uL$wm>3&x+szZ3B)_)Cwk=!ZvWqW`M?d<}QV=G*8OFRI0%zT&HGM=&XvT(y(*&~` zCJ``o(gM8y>3_W|9{z}*j0ZpX;qk0zJ}3U_uig?Lc<=k-t6%!M<6ta$kblaHEVDj= zlXaN9oXS9@IJ|1p$(S9cG(Uw4Z_*X~ZBYJ@r5)8*J8|SsHr{%4iIvzJ1pqH{iLgmHf-EmY#HmQ1} zYT7jx=sR%tCRY~u1$RODW9LQB1@-yZc{T;ekI73;$E3gF5LV)}-w9XW$OZ}em^$I% z2^K5q3RB(j?aW0^YzR*_;99>n-O#PU9X*574WcKyArmsHgGv|bs7}Dz)u75Whfl0; zC(l6VHeE$a>oGMPn#!)@FJAKmHuPhVsk*5X%1o@YunRAmhfCXn&Fgc_jpH^YSEq;zKzpA(7rm-6LWXoi?pB0BZJ1y9^ z7i=uc^$Z?0jr*DmuQu~8?<$?NPFt7M{>0CI*JY{cfZq-;&qPud1Y#ynPF*%WxyO{jj{&L-ayFX6)sqy<6!@FsP zTm1Yk(=Aj||4}`oo&Y0x!T2*zQmJ>wPj=?SWPIn_SI6#M`wRyfjExK{J*V^+ zvK`3w_BP_dmp?=q_Qc^sM-3}!&}VuigE^3RP~+!*I$A1B~8A zzrglHe_u{+MN^H=o^b!wtShc*l+%(bqrV%?Gm!d|pRo zb2}}0TX86Ij{d2(h5y8Kwx1to+B!keTQ}0kzlLF`!%{akmG@;T-(xzT3tMEJ72C-p zRo}N+N%=>Xwk7_L7|;Y4y%-xw+0?0POyZbBVWY7_jA<|=f z%bVmD{T5e1`^GHVGU4cs)$%Sm<4@yY)$XYiWDj~XSCHow(#j9H&zz9*Vjd59WY;oY za*=$5D`PKYFQ%rOEwp+Xd&=(xb}jR(M%0iGAyym+Qk0^77fMkWk+1)5r? z_4T(oc41rqt@<^lS@Nmmh4|LhUol)3OHDP!HO|Gl>`I#(TjW=)<0yqKINRc%b!m$( zizPkr!4+#hEG~mgblQ^M7CLm=4tzFjfq|{hz$UEir)T_XJX^uDTMWEk`PpfT;TgWi z{#YY8op}?tzPIxzF8-bN6a!yWUPY*|h{8L3jb-j<*FAg0JAI+yODwB@Tv_W?{D%0d zDPHaqzjw!KSFiXAi`-#eOZBR_K2gc0v9bumfr2ij;m{0z`H4PRFNT8$n+v2Q04@qp}fhz zNBk+ifBXL{e3N!xhn>xSF{F4mG?kwW{zv~mKdAZBJlmj8`qYN zWx}sp&)#rqyefT?c6@uDJbQb%xs#g%8ln}8L(03siO;gBK;h`L9DqV+@g{@5h8Kng z28W|5KW+o{x1MENEp8&f9gIPRkBp7_VA7E2*4J$Ld(p^J>bsv?mC(f`g9lnevLm7s zn;5tum3LXW&~=G1bqg@)V7lqk=Ku@^Om-n>f~> z^%@7chJ^3t7#Z0jcU(9$m3G`^Y;l-LI3X9&$>1aEiu|ERYls^mfR~#syeUPr7#bQ; zy$maz>Y0v_wBj%8JsjQ6%5~4BAk(r;O_oFVRlc#Plhz|vtEts__9?#x$G(H zErS)Gg-S2#17Ehzr;*;9yUKgpzm~PbRv5%5U8ilQ>wIoo#V>w88dj|H!0C8DHJrRm z7nQ@dBm76K!l6C5;$mw(LZ1r5?yI7(NI1#dC*2W=fEpWA%|{A;C5 zX~i$?s90oPiHF;^G==I~{E7z@t&Tn58QKWrP(_r8QY7VrPSvNj)oMz9G2gZ{7prqK zu{=8y+c&r3zUS?T=l}eZ;-?<*I8kd-C>`{{8|LBAqgpKdw5yUw>`kSTmh%?HNCJ8UwlI-unj%9i^!!A2J? znPnUy;EmLjBkc-$Bx7B@RQxbbuRgVw&F|7{fI}MiL81A{6f;8_#>b2S8Qa%eP=G;t zSwuBAnCvx%ew% z4~){whV)*Lrh-6L#_*g@_z8Je5H3m|J3v(lofA-}8U@{JSf11)-MxgD4g+tuIVx2+ zq=inzFAOE?0g4FCvzm}_lLCscwU8~g&{Y&tTqy}C3!?eliKEdi<(fKgj10x5^X?bF z|Ns1UjE@Y*{{8#o@YJz5=iIYn+qP}Wrz@`g{tx2v%Px$Gty{c+$)YnmP@o;OA#bk9 z@h%l9Qka}>l>(wnjH?&|CZXQUcof%@dMGG*Q8`^UJ;L<6IaaSaOI5= z_-JvByUI@8!fWP7H8L-vtSksdPJ}~SpWL&M$&vs7|MW>jK~$5=<`_-@GDu`_Ln9|I z8XS{Rt^S4vwo+cC@bR5r8Y6ju6So9FdtbLA$lTCSzhPS28wP&DRH1WkG%`VD$lHAx ztR^pPr*U0E1Fi_RQF+^RswRbtc}VH!*!j!ARh8b~!_H9gh8ZQ{+0|?MC9aqnR^fwo zJD-vsQ*Ypa7ZoAh4#R2pCDE4N3z#bsc+eW<2Rbv3CU1CwK>&O~yqYh(Z8?c#KfpN{eG3FFYAE%;zTW1lPS4y6@F6mpZ4TuD(_X}(Uc;9 z0#X%s7N)5W3d~|Zo$})z_fyUb1HNi0#tE~C@`4e3zAt;tHQ$Z5{_Wf1f4t>y<9Gh> zcQZ`J#Z<-BO(~ozmf&-}kQlyqgbq{rRwc@M!Ke&2s*ARgA*7gi=DE|jqc8DmddHWg z>=^5pbm%nP#4_ovXTsdJP$r7W_{oo?fj{h57Qs`wP<(I^e!yBmxqz3fsAO9AP4GK; z6#m9ya9!UQVNw!F zlvgz?xYT3mugnAVRkCGq+P6Bf5YD8X{CW0*zm!8J>NXbs_3rQL&&cw9tGp$EMK0{p za=krZLG4k`3zopaq-XX4N;d=s&MGk`3HMrMz=f0Bv5+dS`ZN9gD&*+Hoq?r;udbrbJ6*Tb<

)W6Lvb(hH(iPl%8IDKN>; zdFs9Ftmlz*mc{eN{Z|Csj~3VrD;X z-d8p=v$M{LegypSr|r7$R6535YLb#m8HS}jn<|1sI$-7}?NT1Q?GslSkSE{Kc3nd^ z*-A%TbX!x6Fg0iVm?W)6y?fz7=$Gj${V{BK!L;EP*chJ7)AI9;X(|uVous5Up28cs zVlm$QY74n2U3w|JMSiq5~t`)eG3^=6DSZy^M&xvQ!3tc+(X&Y!tnRp~U zZOewZ9rTqU93I0$57Wf@YK||f&3K7R+s}28{ZVa4hNYI}xXN||ex|i8d4s3>Ew%C5 z?~vB%2sTXI0!O|MuW<0M{7?+Qeqp6$L)I5CmSEK9~$ zp!;}m)w|18-t9;pN)DNyRi|JBOxJy}_Q!RCVx)6;!=RAt6;kBqcLiY)i!3xFRjdPm zw`rNO#bs02$|^UxEh>KYzqDz>z=J)^?K-iKo++=BW?7&GjT&(1RW2X?87uKuTrV!8 zf9pNvqVz25s)cW4(827I2Ru~ql{HQ5zX7+x^0R$qDz88QH|mlt0&4~+&)AKOiA+VAaWxm6~T zU!h~hkstX~!!zON@zOz3c}RZv5kq}fx6^j|-J5|v_iFH$1@FQi$ZZ!LnaBJvHww6I zssB}7PR&ilo_%{gU(v%N^Uz>4hX+|1WYU+5w*XqPNu-{X9z_AYMI{a?QN)t z-NBx~9u|W8S`zoI=n_6_3R_`;U$|mJY1UbcZw)Gac?XV9LJlfjBbFshx|+&cdGrsC zM00dJi(-ssnuzcDZv4dL9cv<>oXjQY9NR9y0g{;ih#c3eI& zfd0xsnU=!XJkSyho*TlW37r0xHwolo{;=v*`es)v`nvga0(!3Gl4!|>0X7b>u;1Oq zyeKe*vuM;$9k2Fk$gDb|E)<9FYjXmhuoV`@R#gA8D=U0LLUK|(&E^l^;Glf^RWFTR zl}&D+>Z~Q4Sh(k%%?-fuT&y=5q`X8f;xc#cO%QT@aI%!WPx&`Rb3U=PB%WW?n0NjJ zpY}wiNH$V7@g<9cVt8?W>L+eD@W0zhwgdaM+ZOFr{(dhmZPxF|k@9SRS<4XM726qB z2ShCqyZO9<^GsU=H zlCAxk-<1YiO?z#Hc;&g8nywnD*|`^pXZ3UWOdMI2**6vczcvVP?EinpqTVLykrt-} zq=K+vsT*6>3vp|iajSLQ;#W;gcPgHA;@E1OUmAu5*fdIGT|*wkDe2npG90S1KH*pR z9TXsP$*=0gO6rAc^^wdyq3^Mo#y1hl@M&WX7qiH{Iy>k2vi`oc7#|#nXFu@~@#1Iv zoMNdgD~Drw_PEk%e6=#KR;D)VjZ`C&iNlh~qY~e;y(5y<+{oMVcjvKPlI4}Z(iFPD z7sA6~$^vd%S6iSX^x#e&-Ux-vS0BhJe(Ob&4K6bsGe&vV`BKIe+}_MBS$*nzk&nKuP-V-{ z*63OOY?|`MH_-rnK!U#seVKVi^d{C?x|a4`d$r1a1;Wi&Fp5c0RGHu)WB3d;PAT-P zaN;Oq7RNY=>B&g&GIJ{kBI;S8RvL zlDvLPu$8wLdX!n7o>aI6xGKVYE(TQk)vkG$r%#>rx)h#nb#%uL9*#Y`_QY#m^I9v^ z54r3zxy$}e@Ver`@6$iSr(*$~&<}y|eNhb?D8Y6f+oG_3VGs5&aph)8ZqmA+{q^$K(gOdJf zx|qnf082dlX#w&pb#-rsj-}0S>_(L_ZS6EDu=SHf!;h6;t6p|XxZA^Kst>!Kx8hd7)<*G24n3X7N z=aIBd<%YNnzH5BPk@bcZg;%;8Ul2RVWP76?;dIkU$gXK1Fg3RnQ&S6Z=RNzS(5=Pu zUiiXz%Uk{`uDkxGn46#S>HwId28-4yR z=-9|WJm#mKsL-{T7#lUr|N7e3#_#^#>*HJByxJWAy~t3d;c%Wb+c2rYF@ z-lo0Yd0z1%OMcd1etyxnXN-)Dc`|!#ea>+yJA6a>wt>VlmUHcPlz!BUug~eutL6s> zTk*}We=}b7npaygFnPgAN2KHVXWu6t_<&2|o&Wq#F*U0})vkNmlbtNsARnk>zDe3A z;eTRsDh4IY-}I(8#rr?ANoMWv0SqsGK_!{-?$S$ zl_T1!y=!`zn!j-=bV#``d~3Ybob1LBpZ=|y6p_IA(Yo}>1D@6XIt{X<%GfH)GJeTl zHZe`Z?te&TWz8mOfPBPsmIuQ-c%r0nI-D!ukt2KeoQMzp%iH1{_3Iz`;QM2FW-ca1 zM`A$ndXZyJEjoyweWDZvR<^5Wpzs=Xqk+0)$&%tMC?6(C=JmXybV~|fRXk20iQ2{i zT%f1>2V@_0MjU14{$6pQGkOB$VHYm4u)V-Unzg*iW1Y=q3iG5N;o?hRR9aEpe%Ax= zv;RtZBuxt67e5aMnFN&)dm1LL<0xNbfbsWaWX8d?Pghsr4e#(?U>C;*smp|wajAmc zd<)*0rk&2g^m*u|_HujA_X=A$n$JN-`i$x=-#0SNu;dM{rbP>+m&kVBfvXB+c~VSm zADqaCewcPX1s*(raoeCSI?~IxxTHyApIUAigVRsD?n!UYG;R8K8V`hJs9>87c(pvP z&r)BV@`ZO!zPKKhPu0&co{UtSPPssN^P0Cb);mIdoj~%OCgG#slGb4b=Y)|uSi2nR zKhkvKt+(zRpklIpsVt?8MzhGHF516L#ZOQ8s^r8^|7wh*VE`cg+j-P@gqzIPXX3>4wEOnqfkAJ~*}Qpk>^y5*9Gg7uf%w1v z^LyjZ|KfkdAboRxpXD(NSq%Kygmd|W9vENx{1@Ve8*lbI;nax=1@Sz-V{!rcS?jIR z9Y4!eJ+DYlK^`$q%zlu%%SG<4?i_zWFVhNJV7Rn;&XZB=D%S~>r4W7X8q328a6MQIC8~eE7p3 zjGJ$~#kW(j83SFgw_o{4?f{>~C2&IwG10@diu8AEG(gX1f}Dx&EfbsKO@Hy``0Ka6 zMRh!6+#x#?!83Dn?oZFT;OyYk+Rr}mr{fR*^pE4#n{SJ|@43U}^R>j~LJTUoA^ z`494U8HygmJVx@R3^J?ArD6mjQC2@s1Fo_$gUQ^_1(4F?|0-uTFP45 zZAbevF7Ti90_d7D3|QF@GBHk_GC7T0rw?Tl11z*-%eHvJ6Q1DP$M)>mA5VWO<9UtO z&%7YknypxZ?sQ7j32@09W4}?wbvD0s7}~$#ReVlKU)ogSEJOP}$rL;aeg9K)QvM~s>O!Oc%+S{4FdAf`fCYFy zvBu|n*XK^e>WS%S4L9Q_u6%g>#%q2}F%@65pI^v%ILN%LzMR_|-2TJ|)V-f&-M)ka z{W9^Lo5wrwsxk2VjH{7yh+h#ARqw8F98azfsQTEC~q(3D;@KiZaUDO zUCT1i1{^Ttn`JIx!Ykj)I0*f-;6fZW@6&FRCMi!S8yhW2!zQ0vCN{D6^^CYYKAa?f zHmh5I=1+JVN5$iSg7sMX81V(Ll1}p(E(?uKZ!cD{*>vNR0OSE&=)k81L?qF^J4S{F z$@xy~?hjWdGj6d2B;!;iAox~g?x{7*5&sX~ug_>=0l%s+JUTZgH1|IvCW z?;1a(!C!GZ=A)|lk$2<{cnTIbw9rOrkBbYrL482&q`wc@r@mZ$_Ld1w?ytn7F1tA1 z`Q|qZg4%itdPBPGMuI0VGP_~Nw2eSA(@`LS8MYsm+N zR=ACmxx3287}2O!8z|y$nS%T)^sPD-26i6~>R!;gl8iz^y6VjD!JK(vd%T+g1PULsiR5(JvUteq=TM z2;pev(rlx#r2#~zsS}dK zF-*aLRBn*s8G>CeKc*u^5wa;vy~f0WjCZ6J9F3db)2|L*sB0QId9f2Zy^_DI>2Q(9Jx6>KDEDv#EubL)1 z$TUvYQ5M(@=%AY&?*{FwB3Rpbh0nM699QiyHvlwZdTuEW9y%WT4o=3R==!3Uy(Iqf zFW(&B`|b}l0GKjAp&*;Tt>}`s@YXZc)~2)ar+rX~wQLxLx{U06We_nvJ0C+D2tMvf zPcp2{TgGBZ0{iz~|J(7)zw+w1>4uv{_Lbn9($Jd;O@t-il47C|#S%lFIl#}n;AaN% zq@j*+XJ+Q&%rnlGAZ~gCz~W+lSCq{$7{fMKQ~j_bmd;{wYAQw~ z3*M~G_PrnasBeqFNY}UAIoTv%{CKm02XLqYSOA`)IMT~wc&HiO!r{+^`=cKJn7H%y zyW_*}`#{Vr&cZFs-!T^FoV8N}nmN@Cu#~=tGtyUsi~>G1q3s&BDiK~ud}YX6 z4!ap$!IYuMip0O*-6=p!8fnJ?R?8L|Avy=5`_)b9mB*xQ++n>P6npAcE@TscI%B=nF z_jcdjn_d7BTR=bo*##635pj@3+!*vPD*AVuao+$H5nMoF9B~7YWt4Rk5Ev9_R)vOc zpy}oIa`$^{&B~?f{e6EYA~WlDx6Hire%`2xjCf)>apJ^@6DO7@9*2+M2#lCXaq^l-% z#C$ePs#4%!pr4;ho4Id{(|d^TImT%!T-B3)U^&1uI0;8TXZ|5h3Pb-jErdli6LkSV zdSuptenVf#G-S{4Y&+yT&(z;Iiq3b3f8C&et09Uh&Gi=ic%De|m4+cKe;a5zW2u!SKTW<@PQ90UB~(|i`EsV?4rrzY_xi#UpTlv)|ufC=`oV6EDWQM zQO{#Xk9xEJz3>0$_>JHAHBhB@rv5FSL1*W)BnQtsAXroJvoHSX_>mv`@%Z@1KIH?U z=;kbN)J5dJ^duoJ-4CA40|!0$rr-u#4gsKZ9umwK{>YEUo8J7lYOe*Q5v4W0sYZgs zTy-L@zW2T3vP&ozhszDx% z8R9)g+Klm_^%vT)=$A^j(;%Y=tFXt)z|?XqHyMXYeBpD;GYrr41$d^=rJCfMZR_Fw z9=Yg$`0)PtlUM(7{LUZ0E-qf2j)j?d9{{-M!t>(99e4V$KyGeK<=cDkgJ2pg?T&{c zkq(NZ{3=_k+%i5D7x1n)kzV--uktC=$T}{c&4YT(@v8x z*0%(X>R9QAZfV*OW!MTAZ6v@4v~)hqddT?ZBYAZ)!B$6ZQ@(T^#Vh`nHIkg^GyJbldV<4NZ)r;ik>w; zt65$VmKnZCqdcAy+7ueeWP8cg{E0BE*Z%|sy4Loi6_SSvPBNseSMuU#9f!Bqkl&0& zelv}mSa?HQJh^Ajo_N~Ro*Jjmo{6`-5`}9`~Sw69yzYu70y?KTkxPB6+dv^(a18? zt{doZ~^Dt?gu&}^v=;-~J~hFV4Kx4ImgXO4vii->jUJSw8Ku*aXR z!hi@GspV)jrkMOMmsvBd5*Jo57b1CM#+#KaGe=66zSeIiAuRKkE zVcl*xzY^RD<>51Gyse5s&*BaD>E@hoa3wEt0X(=p6ulGO(0a3UC*va)^L$;;ye&yN z-OUx_yD+~LZ8oSSysS^jC7u_Ih@YLEF^}h`Ei^Vsm!aMb-ewh!u7Bxs;Ej$$80A7M z)^rDcS&ogA(QN0&tV=z|nZG zH2q%cnS2(3dhkXlBprw*=#Xr{&(BLQ5`MHLZZWCi%~yQIaPle=}~DshMSfZ?{m#P!KK=xe0g(C4Fi2~1b)Fj zx6HdY=aygS1Ag$z-J#(ulpzo8Z&MOVKAdaZ7uBz95K6|clsma;T!*HN-#gaMXb0NM zyjjLK@k`1L%?-10?E;CCkS3W5seq@zbGxU_&ratp8@z@>TP%&Wft4xps*E#B)_vsrd?@rom4iuDr-Ias!t```jMle5BL-2OKgDeO?Jm zKE@Nwr01Hc&!k*~2Be`3^avIys8L2dwGeo7k4eBsX6$6TBnS9uu$ zB2Xmk!Z^tIxw5h<{2QY&iFl{cSU;673T#L|q60H7E5)@F-8=sEV=DVW<}I*LVcye| z$!FE82sN&6cVqbp^2pHP(pN(CEKdr59>x3)ALg*3cqLK18pBLUs+uAj)Wq`>U{ql- zF^js{Uou_^o&PSAV|k4>4Gp8KUgPnURVN84^T45)bA+IU)tCV`Ch<#V@@YQOi6Lsk z+lZLCxj1@Oq&M?O#|czaZqhgS){hje&WZ*xb)+o~sxSG?m&MUrZi%JEx#(~7;?tk` zWUMT&`P;KkeB7hs@+&Tj>#qNTH<7@j-M6%MQFMA>l!?JGfAQC6${QMZMgo;F1h9@$Iwn9A$H`J?vBVi9uWh!mO%k5AFGcH8N>z~9KllcrJ-JvOyZJBW% zt&8ayhd`v>9g2&1-~%2MZLT2G{Lv@6cq<5c6n=(69*YajARGM3hK-G+{EQ|Hjj0*q zzi-d3c;zeqAYT32*LeV-pEMd%F)bQga>eEG@|V3d?tA~QjZ?=@SP*mdHn)IqgG17V z0|i`Z*6?+yOLGhHvX{Ng2LR{-l+S#hDlxH!K500D!=R&<9ODs4PUuFx+|TS(**C?Kk)o`+@l|-x=zQ< zx8LHcyI%I&za4+_C$G(}sfN7l@~h&lZ+lbh-n~1{o;~dw9ZE|}+?cUW!GjI-P#FePO}FzA6z z$ucIh9CX?>KNlyDpYnkZ-jrryL>&fdtH(Xzaq;%Iyg5Gnkq<|7sDuo1$nJ2m-Gk^#nm*(1YcKfHp#w+LNPo5AX#? z%gsKO{Z>3#U}t$_a)A+E=|HDm_gm#8Fx=-0&S^4zddv7H3$NC`0&J|yC4ULC0Ty(q zrV1Y${K~sy0bTOM_|V2>qCuHdNG=jwenpVQE9^Ofuvrh0^-Jo@y3k~d-{HLCy@RFyvz@ZL98pu??>EnC_mh#s)yJIx0 zhH#fBJLsZV(x0c|$W2G%d!PIC_=R8j_4wQW{-<%`$ZfHI&tgoA9ZOmeo#gVJLDct-=&&gzV_k}&Y8V~i=K}z)D|CW{J8>qk(s203mcmG89nJBy zS*G_SMb=d>8>z@C^eq->JU;SF{K!LA_x)TjK|YVsN#&B1;*A$=N4S~?BN5|7G9ssCiWQWTfxYeyH!$4(R^{4@{*+JwU3E;-+ywgXgHf zkh0t;ep-ci!vo=(4wz$Q*cd;B4;PXXU($Qr#}B_3T4U@ts3IdBocN8>*s*b%@qlVv z@0zcsD=-w7d7$Aaf65m;1-Y6(I;v~(Up~`@HBaJXW153s;$Go%1C@NTNh{Blq!iGM z$4;E_0f7Jdl0VD?02iFEImct;XMgVJyzO-B&9_MR>3WQ3;~bc@KmCQV!fitTlfhq5 z9vR_0P9K_VXbQe}S->gvBp>+pS!7lcbO(hx4!xrq(^;_ibVqxJYfIa{eTezgA4`Y% z+c&(~2LKpK*EhJ5zUzfX7DF$-?9zD0-~U4_udInqJs$vIE=Qkc!te5>Th*%wZOZ~s z(zv*;n9`d7nNOs09q#D>K;=E!mIYDf7Aw+a&OdK&JnRwQ6w7DVVs=jYgbx6)h&qV# z4jvYM-T1&iy)Uk~&z14P4}I95idvZGjrn||gT80Dh7TU9gT@y8=qVU4xRi}j=C$(+ zTtVB6FMshy>rgL$`77c*|M+g{VjLJqJXtvQ8@U<>F1zerF~2k)?|k#$$KL%1;}f6w zgml_7G0nBU{an0-0RF}{oTqv-pFmaM<`0)IeTbVG*j!?mTfrj0b!-C z_}xc?4RnBsmuZaJ{{bR1j(_!IoCV8dv6u?qu*cH-nX>#`Tx2Y9EFjCIE=5@3yTUhB zrdSIk592R*QV$`J2LKXQ8sT4gPo~zxo3t_$>YqNq<#|5=$^Ro$a5g?8zN+mxf}OH^ zyl;_|s^L}{?z+p4(*l!itoBpA@&HGPXOrJ~YWh-N^9A^5j%ge;9?_pdGKr3$LErh5 zhG)h^iej@cK_wn_E&#s?vc?NVnGY$e%)tvUfmfC7 zWs84E3+hwh$S`&b8S`&#f1b%_kTN4Wf*U~d78Ll$vRpw~_q3!>yr_HTua=bDY|18P zYi?R{McPQO<7f6xxCsV&^LfB$KkwOb^X<3B%YNe}@wPYrt%TODn3X;;P#V5cDDhul zrZMh?u2x3eX0qUnwcaKk2x zbx-A1qkO}9jGGY+zPbUIdrCis=RKHI-PMPrU-aqUy{@43Vyd?xG1iUVRxetrZ$s^{ zvC)me>W2H5Gz7I3;}&(4{Goj?rrEjp)cV~6J!7HCDE=PjBHfD=DO-U5Rvk& zF_HQdHzipvNPj^unr^r9AgAOJIs%`jwH%b4VxxV#@69$g@9C$T-Hm8Za~=PzM5p9~ z1fis!%4EHQ&+1C9NXJ4iq%Q15V30fD1x!Bu!eIg6SxzdRvh&;Cfi0H`bDtK)yeqyN8hb9i_yN|a5Ux?y(wm$S9z>cPB+BvDn+<*lcMzp)jbg# zYnLi_zM4T^p-&8Dk4%L_7-KAb-0f7-0NVmu&u)OycL<{Ypce`*G_edEnw%@un~L*U zGTM3v;hbyko=@wIy8odO!uOo+p5Rs-RDb8o zbT!;>w_}nRW9gMc$)V}aurgj&qePH^bbhI3M;nYAwS(zV+oP20&%vByOy}Y+`e(L* z@)rFqc|m1WS&CcBrko*<4FUO=8xZf#R6Vtt+IF=*qMqlxh-db>h^N2s`ODnn*owpZ z_D9TY#l;7f;%9#B`SJL#f4E9L6K79-E;iTih{Xxbsd{IWUU{Sk7@YJ%J|oJY-eb*D zcuycdG_G{{oavXokn>GtA@UtCflQ0J^|BcNM^~9-unQ+ zQe4g_@gqyr#=@LBTh~#=iSsQr3QN+Y&c^rj*-&SG&$Hja?`w{ze&>VcJw2?q{hv95@rx%s>vpJt_gKDebo@I!VI*5We^ zl5_Yizl0A50IZX$;qnj}2RPckz2WTgy755&V!gtjWrB>;!h5P68^W2rC)$_1!t2_J z?(HA^m^=>xq1yNHn>neB!YGdCWFdL|GoCmd=jTU=s>+4&5mF==<=<&U+_4BIh`B(B zQ67p0jKaJ?iJ%)(dh4f@ScGwnuf}we^`@2zsLvUh`7@sAk)X&^t(h*{G8Kyhy5-4biGDH4~f2}^axL$>qsl~94G6I#R z(iGOEP8n+B$*v>DjPM#A0H7`-m?Kl@q584_$wE<{IgxV@sYx)$H@ z%x5{CgU4*rfBl2LKECe$_lw()9Fej(5$G1N5iK@qDtgWo7ze(XFU7e8A$>pW4;}o_6CBxOE^j9ob z-5);hQ2akHdQrUlJ@2dR5{q*)v9&pf%dWh4y#0UtZFCiW=FFMs@Le65hLmZ4164GF z)mzQB!Y1R)*|V{@un;eK$xGsI{_d@wihw!;2t%paRHhW;c#Xd{kQg3o33q7IoR*@z z+4CfOUieTBpHAS-d*nKU&ECCxV@72k*tIJzz4w)IN;vkr9ZwRT{M4t$Q@-sC!J{AlB$!4r3L3TIg7Yto2R`s2@h#u- zn0WP{zA~=={0*_Lw!ZMf3**4S^Wy1Ge|k)-UN8EI7puQApsLdJ3AOQrCKC_-#)rnc z-~RV;?X@5EGn0+csp(49F;g3~j%C&GYqcl8MTc5{W zH$gFkQ3Y^fVGMZVaH{JtGHR@7aN2Xd6L;KtA|CfG-xRNT?Q7$mfA@yC^^0GM1A7)E zlhkj-_)W%v$w4$2Uo88$_FA+=cGEFr`IaI#0QAMrJ<+HuBCo4nT~)5rLoY@h;=5VS+@yVf%pxBcK%i{7x)% z%_y`y_XFw($*GA5Kv<(3Y@wXAesmms=+89hd7F&}Do8nUjF&72LF*bo8u(t00>?L> zDtr?Vx6~JXI6Z1r(&C!Opz_^O&A{!_HZ1s=7KTS0{h7E?-XYJ*WD!aW-}wQ#37+5$ zj=6EjGqfG*OI6OmU&1XXsOV-2Q$FW04$zgdfI%J74AUs6($DGKanlGRyI$Bf3?YD@ zzEkVtazPB)hd=XVnV3>n%HpZuRm;aPfrOp>Y&xf@X{~n`e+{qg30EHsQQhh5iBqXx;ScESIJzzURYXN^b(11j zWRIUb8-0zRzx=X44Bmaeo5^%f z3Nktlb1v}Y?KaOD3Z15AzF}J|m5BbI#WuAZNuyv{b^}!EH=e#2R*M~l$_F46UFR=LpfHHV(X-Hfd z(@kZuaEr^K2KdkDNH5_HeB|O6Z@59)+GITKnNO9TmY-ZgwSs2exEBpMfO*wb_l|FQ z)T828fBiSDhkx#~`3V5@ed^;+4S1nfKnf4|`;3a?5CL*!qB)59nR%DZn}WL*=HpS1 z`{wx4m%kE=^Gnv5Ix3epB`&(;us8Xi{T%z35%5%0L{&n|r2xYM2kv-ZUyOqpRn8HIU_ zEWt$_*i=688*`%Dr1<^t`TOH9UjO>|wU@ub2LQyG@q!39+8)JptHzS`d~QFs}$*%Z%E%_$UBtla)|P$7tY~7*HboWmi5KB&ZWM?c2W;b z95f!6qG%%Clut}(ap3}dY`MXJn-eFkh|mvc7sjD7#sH5x@1TWv0lw5okaRBf5R7a$ z^=}@PmzcU1Q8%Y3o5NJ-q}2cqesH=mdb<2##{KkL-(X0ZLN4E?K^nY_9)ie%Ft*%3s%SXjX7cJwz__sPqvi;3Eg=v*1p%AvfzSUzqG4>AD;;mNF@y zp_X={J?I<8A+sc1Ykg#DIL2*8dN-3`W1UVk73q)-9$&-)A2;Ww85;Pk z@UBhvCl!|Nj<~xq%y%&r{&?Ah)#vf@VH6B}sCw=&72dF4+)Z7^jTxYrN~j$MW)O_w zxKfSi+7qw#6hGaHE#4UE@*-HSMQ}6iR9~?9rm=Jg^C5aoUGIZ&xxJCM7uEhYW?;%w zS*n}mXTi_?nyi+o1s9LIaw*3hNxY^JYa69PX(m{Fl*!&iy!c9GJHDYX>15eX02!wf z=`-)B{l=fi!e|4`Fnf)IpT|v2Q21kLYWiwwIsgAMMc){fTikP-yPCVy^PccMw_er! z|GIwvm-MUkG#iKycb^wadk;ika}soa`-1H7nN?R$}5Fip0Ei?{v6LR zSrlDokMA;GCsg=O-7?#$?vXG~dE8d$P~?}ksN(NiecIQMaHP@r1v3{a+*6VCd_M$|H z?OwzCp@U8JIq85tUjvONyBaQ5qPx8;>hjsBz9fC(h_($qv#;jH{m?2kuG3Lp(kTp@ zPfarK&i+H1zUGtOZ!6E(`eHWqMbF_D6194U$ze#7_Msp4ZyTDwvR}~l#yaJ(=VCal zlZq}4wHxz(_E12pvne2rsc1?6X?bo*yL4m5I#GTehTHPJ$BJJ=R$p}oruEn9?5FI< zK!5g%tMQE|CSV$?{xr66&v`WUQ;X`4EB@Dwso?4hwEvcVwcRLCZrzfJbAqE%I@(tmAdv^ubNPrHE%89uK2Mwf*H`|kD1oSt) zwKzEmlR}Fm0F1+8VbX?hdDKg?j-s~&4t}lxBL``d(b$+#^@~K$^!O6)=y|8vR^cvV z%=oE(pVak$a zV6rm<%agdWAVIxrn_?@BQai)Jc(5XkpGLIHw4yhSGj2*ga1m6c8{tp?pl@Uqh%a=a z3=Dk-qvuM8DG4iohn15?f;ZTn_6-0x-+VmIE^kCv%=+B#`+@kK-+Aed0{~2fO)n+{ z#ZEHc5j%&~It*E3MZ~Xtq=Bp^c6p0@) z2ruY%=|vaEtN!Q@)t?T9u-{R~4H#=Z^1+6a%V(mywkg=FvC;2% z;dE|s$qVOGjYj;%U;IV<=5M~F!z&x& zq95O%+1S|hHLNUNa{`VvU|5C0&5I}Et9Dy-(?EU8kz1vJEXBhg^|)+D8Opri&_(f_ z?|n}E)KC0yy!S&Nh^IgOS&nZ@nL*k6u^0bX{KOCcNId4rPma(2+x5a(eG|Fn0|0|~ z$TvMCKJeaujE{f(W6_s9g=fJr`>P6}y~pIjxu&qR=esuMD+Rheiy|UGwU4pb%wh;_ zG^KWa&i6e#K7IWyaqmNmap=GOJSn2kKe)n>QG_)ufIqY<^5lNe)i2crq1z$)qjG^i9!tRlL|!WjCY) zY$4lJ9=!}~fQ8wCAMJ;uRMg$Xp@HhyLE7}$)CK$s1De$5aX z3*T3|?1+8Ax2&B61qtYUUNSN*-@cgsdQ6p zI+d47@Gzas$d}y1(Xe#QXfqVvqq{&EyLy1T8!b#p;d6E7;kz27aBkW%~MWEFY9a!;#Xh(Y99c& z?1Bs8e)s>Hc-8O!L39M>)?06mb5)u1zRLS9_t^rnXl<2X2QV3fAv8GVWL;D;WAPVWOH0njK~5&f zd1!GmE>L}c`=u|7``zz;{%-v7^?U(b!qU$3cou9c`=1IL|;DfRkUvfb_@mn7sH{E!%bf|e6&bKOYc>C~qnjb3f z2j2gWaj&cI9UuPizxvPs2LOQ4M-LNC(Y5%#tm#_ljGr{E7F zT=%&+dh~ew*iZ0HVCh@Z<2bO|ReI{s(DUNrY@8=NUjDM*iThvk^>Om}iMZpA+nk;> zRIOAi%k+ljz-98xTNb?ikN(Pa!}|~Ix6a6C@4oR74~aWfK6-Uq?Z-TJdUiT4yW~>e zPVxG`dPDTqHsZ6_UFSK)F3m5<%Qt+H=Uy-|t?OdbmiT8n>C7N32K4~|kSn~0@!}~5 z7-dW|w`KOPm?)p%7%tcqxAxa++&9JzZJe?*;mHhT64}T6=X4x!0DSWMgctI715cb2 z2M+FvH^1>s@e40~X`DYZ8BNvu2cG||xX;yB#+^s*^upE_W4y3rg>D=tIZ*eKJoq#tT>~ z-{E@ZkO4q^P9P0{^`VP7lZ|i$w$fnds^J857 zTUONr0320F<3bgm@}u71NS$b>ihsfYp`@*!6%Rg1=Zwdn{RErHj>qMyZ-%8&*ciW7 z`eK1i`IIwYzC+oP-!*TQBNetS01lJz(?Sr@+yDPZ*a%^sSgpzxViMh<_qN~p2$nCv{o8Db1(!8n(zcN zEypeNh|+H*zpAXlNgNcfKjT3`RFkrKhObH;C&jNm04C$UX?)>7V5DA$VN=E%Q<9Cm zph7tM3H_CW{=`uT`Fwy&<-h>7%#jgos2X9akMULgosary4sVt!^1_JA7*6TC;OM@o zFyp88;3YrwL)HNp-+R4H$*t6$!sA0j znsJ#|`pjJoaFtFt-?Pa!EDbyI6HdN5|5J@@s!mwbKr8Yk)9SI}A{V^s4LrksUf}Uu z@wU7a9P%5k1B?6BOoe;*hW4z6wi_SL@6K3(8ZirPiTB4n~og;U@%Zx&DNz;m1rRT#Qw?Ng&<>IV}3eo;npjrDfmbBLTn z@pCit(NtewT$qbPdv?VOzV|=H(;oF`IfW`a)DJT4pHPcPf@M347ZIJ_=j&L#T ziRiJBE*aI|-0~;akt?%vZP8&Onqsh)2F|%zQAP5JH8r7Xd5K}piXRIu+&CbR#(mI5 zpUk>5P9k?%i>Pxc>dIK>`in-4!u2)-#?5YLBRb_J24HaWOMO{krah;5x@0%=Y_(-i zXGSo^ol~yX5t`j?14TI1U=CZgXv4yxY zZggNNQ+8A=4L`;dB_5?c{mL#vSYTL!05A<3A}k^qdJzn$y6%vaKW2Dz+|Z2s)70xQ z?#Y0U8Oj4x;$fUMpju(r;tc`i7rrbu)A7W14kHZ%FJVlq`Wl$JU9Jc3#8gYnG$Tfw zYsL8&UKBt2lRx84|5Y*Gm6u-`7azVLb}cNbu!!ZA?KsnkIDTd;-v7z3#6SGwr{mrK zd~JN`=A+S-0yRIk82k3`kA=m((P}To+`_JyU)ZgIcP8Hi9yEMf4uO=+R^LJqedr{j zqdNnJ4j~KKF|-kbzpu{~jq_R!)t@%BFr=>4LhoYS);Oq=2|q1qMtMqe4^6UJ!AE$2 zr$mNb=x#+#e!fctZQ-R{n1rWtaV~LAMQUqH0t6+Jwxd#|9Y^h%)|*!@DZbCN1#4`v|7X6qgWVcbgD$hm}3&d7PD;zA$ZGRfo zMn8h9Andr|^b)8;uUzayh@qL51QLE+!SdBN|Nl2kg$X+T8;JL3u_Msc<7rQOTKx3Sy*OU=2fr6rUU`KEw#m5Q(E0IOzxvDZHTSu%;nD@E&CK*n ze9cw&RdWifX@!Um(v71fxSV$51_8hA=D{Ok*WzyHx6-ZvfB#E(QMXN{W9;Z`AXh{K zmCKDNd;^v@>>ADO*mq#BC^)OQY14ps47ATaqI?RYXLo^(A&D#`S=hG}XO~ZCuCg7M zUwKK0O>H(e2eGMs^_Q>tvp9D2v_DCJVM4({hv;!)PJ>`K`#+0D^*gtem-32n#@Tk5 z+V89DPK*q%h?)1Rn>MfXM<(fshLg#DUVw=2`)pFEeDLwO0W$!1=!Y&@R_&)FBluY1 zf++DKCoJQ|My0b0oH1Ln%}VN*Qa?2GV_p9Kf{rd%#)s_!5(QyC_nzF25ay(qWR zQW-#>1qEEk2@h|A>6tvXJdax$((a^t8pCxtS=M-&UZARFJ8Wcl_VuHJN&WZ?%4V1E zRZqs+sS|N<{~_~6S4uf_p-!Hli#C=S&{r1StuLi|&|QtK{)S*}s7UdX%N2dqcP3Q; zpPiUene-^e8*!cdOpKHFXrqSO#&(LUrAU+MC#16biZ9;u+TL_N@?#sK`;=U4L-lR) z%-E%8ER$1zbCvgnIcNq>+ma9PfGWbHOxfAyLER{a%T@^SXp>{IYF zL^tKxVd87@Du%M_WGVCIq$;1) z4`r(^=rGAy`jGRJp2?-!c^EGou#6S`B7;V8C66rSj>;iE;g0+LO-#Wd4X*u+Gg#&I zL{<6*Z(8?!@>{O>54iY=qwh0G8>YqW0ato03wZ?&`n#I>W~SB+^= ztUDEkI&Debu#mz*Ci&B39cTiP$qV4XLLPCQe>FUlmIa)FV9w5R6=5eXzTmug%wxaV zoBtfPKz9@*<5l}476ZO`N0xBfJZ-&|U}JqXR#x&BJMf1`IbaI?StNP>v%gpP58}4l zZj06RHOc7hSXLmr?D9;$>Mnsd0FJL{82yX@myt;;+;l(!9n7lc$7Un zTx3gM6>vJ`!L4#|C^M1UvXns|td@DSn0xz8r80&9&x?A(-{X%gH13Q3Z1UzDg0e(!#nEKcHFZAtKyd|@Z*-Jp zjN#S|>d6~b9NHYo2eq!?(I;4hv^=&R#EW@G*pno8S5AdDr_aNOf+2b6j;Vnmd%OO&(VvxMW*NiSn(_KLJqe1 zB_-7vLa@%q0U}yc*sK1~eqeoA30&ruuaf8GCCOIlDi>UM$eRk+-|$7HiI`oO_2yn( z!*cspH#wV1D6HtE)?167ZQg)y(oOj+D_$(DrF`Tq)@GbXt6#2UZNeJE!|}4k?=a+d z4L>J-SPtVwb-}%wnx)q39*q*YF98<|j4?QOXWaiOmon8p!!pLoAr4#2kSzM))1Tqb zG$=b);m9C&eduqd?LN{~J-0P>Zi@EXt!8XB(+<>rJ&m1w!lB>LxFg#`7gC!KxIu|S zX>73a*0prA?a5YJ<=bY#hA=j2W!Z>rNT-=<&&9;_tkSn7k22jhOJ!o^ z$Ii&#UZQ-tDhvM%&x=dZQW?`+(Lb;H@JXeGd4&t7rP*jL&Iqr0#kHfk(3Y#P1@e$y zxMJ03n)5Su+lz}TYa!-#?~bLtdxgU;*~OSscxz5+W|e<-M*VC)<^*?Maf`dbMddEe zISn@tHrs8b5nj-~!R@2u6MnLMM|w(Wc&TVcc+aa&yad#mn~RxQl?@%{Ri4Ue3vc3p z#mh&tqUkg*@k|SsmT;GCwMA#qysi4T=Y)rFY$(m-^t|caQrk9K3t|h&2GQLI8#K3_ z63;c2j?cyN0!>r6w}nrmt+o)n7G;T!)1udm+MdNgX7PNZTZ)bQNcLgYZ*vXTYgBXr z_hBr3Z%jY49!y`vjJxMr4E?KiD*9Qq#W;?>S^OA}d&NIA;4x<0DV5LZ3e4J<-Jc0R z7e+Cb3&kzton?y$`C@AwDm=;>cdMx~<{nHQlq|4hHKfRWSV+;3TJBe~50i!1uD z9_6u12C~^->09DQ_>}P>lNVEQ!DPA`pYC`(Cd;qmWNnumhq1#TkxkD+Jex1$g@6-) z-lTK^DC_-g;pw$E#YrQP?faPnRg$Qsjtth6IcCMZb`=jy+RWvU9t(YFcl?b1zJW|u zV>IJ4I)U>lrs`^*OKU8j+~!UHb?z)z(JM;38qKZM*zTT;*7llafNQa_ay)j=O~ut0 zABf-kjh`-?|J;suQ*_Rpj2Y!^3eHwf^JD4j=pB72&F)idnoGt@psRfL)aYlqPvW_% z^gPek71nO3-%m;oP0T8f#sa7Wn&@ca)h+bJEw10E&hpb{@NlufWbVfN6u!Y&&#`XC zJRx;$aI#cik_G0S4{M7gUpm076-YUsJ zQsI%s!ACI4QdNXtdJ9h(dNN);XGnojqBAZ-Q~4g5>_D|M%aqPE$_(p`!hCYq&uI+~ zVePuBLV8Xt5{Imgev&pa#9=WCcYG;djg!wcz8IC6#qr|=;=dB6XRb{%eaH_{)v4XN zg_aphOv(w2`ewY^I~hTY2(BzV(OLUF2?wQv5&1Sas&22-iSGJ_@ZXM$ul|}?(%|}) zFW(TGo%J|#@}w8{4xGO?mUhj=q6SDhWJ?VWV{Aws+?84Z$R7?`Wa!k}rSdX$dJWO!Pgf%am7naxf%FsbLH5cVRqs>=e3^m;4iIhaxx z{V5?dKw0OO0qO_qWHTua$u1g@$5)R*W9L?W3op{LAdOAGue1dQG*{S^0Q$NOcezqF zKb6GfHtZJxRjc8{15Hg5Uir#Du+B*u+KN5|E{}ffW8z6q{`NR} z?9SL+T^A(55?u$Pn>RK@6WVW@2^NDxuO~%%Hs1Gv55#q!zkUdhx8nJR{VRX>cjH%n z>$l^f4|z}=KXqr^dB>5Mky3rdWf#VafBJ>-|NP_+#@Bw`*T#(Gz@CNu@s@vha~!+n zm`d4Ag;^E);(75IS50qRv5x3lWtKT+T-&l_gK3AZL zqOb4o^vu(@-g-;C>)r2hKY>8|_w9>^JoLeF`0!!Xi@{KRQnZAw$TAiJIca(P=$ZJ{ zU-^ajr+<8JeDO;+#@AeZ-?;Q%m&fT-r$t)^Sr)=1AO)0xabW^to7-?nbw;)O| z&-#UVpl6z6i?{~6C%2m*}ST0HDN zBi^<=6=34BT;V5~NxN;U57+OI6NhZ{5|8Bx@s`>Eb$Nst6YPM~}po#(yMRn=xDIk!923LpJ$(-42uy94YpIRhf06 zOj!V)Z2>;Qkw4?1bd1^FJXc$S&7{WTh9(e<%TsDQ)L0G-s5eTv1TGuc91uu3%7(V4 z=gJFDu-HeqEYB#ba!Esvv0tt?oXM*;W6aEPTUP0Dne_M>J~OYe@vgj*8+pJW@o8Xz zBbd4Q4?ggXo>eE~>$t?hHz*kAl5SFU!w=2X_K*v|bss&@&mC^w0ay6}T$>mD!s7$9 z7a)H_kM#1pQA;=;<;Q}N3!-niYXUo|BLWySHFl2D`P|LTdh{Rti9QZg`|t?{#)(|u zpnYM%!l%Ha&(Q~!$BXsEsV`dZL~d#9aUYe-ArYy8vKo(Axq=CHvt{S zHW-R6B|H`gT%Rv)PslR67T1MH*x%| zG~V!$KV3G(Nuy`dP!GAl@Wv$76%22_=$R|q`wH~tyKrMcpZMJ9O5CYKkBMFl!<7Yh z*SWx_9}bjmSWoBxF4R*lI(}}BiXPAvKVjekeyaDD+OMWF9X%n0{)MhilXW5-eKnsm zttci-dzv)_2bZ=727CfPB)=H0%BAh0c~5?DQv$sIjdDU-^i|xwM+p~cWlsy zR@5LX2;O|NpESsS=Vv}ie)JI1C@J#E^8zJY+BNT63vR#tHl<u4=Aa8Kx@9z|}IeKN*Es4n4@Z+PmWz&YW|ma&CHSnwr&;HFG0Me~x~yBA}2b|Egi;<7k)=4|}d@BDHcJ90D@51;Q%VQwkoI(K9;2O0R* zJY+xuvbF=Qa}6tTaj~o&f)gYs9Dfod>0vLlDb^YU2L4c_`GiCn&#c0aB`p0IV9S6I zG`uGRQj8cM)vvWZ5#l%!|XUqheWfu8AM z4l6E}f~wzExZ70}3l-!iJ-8AsKJr2b2^?3;EO3a&9hOmDviL`qJaRB~ zjB?zVTN98Uzv0S@Yfsu-E3bb2R!cUeP1#)IWeYlKcy>ouRcNq`AfCQ&(qT&smf!04 z*kGED;)i2~c)gAtF4ts|OIi|=ReEf{( zlvZD~Vf_k7talo|UBxLqGLuIUIPORy`*A;&sWvnQ6@%=NmDvgfoq9>|2_O-R+4uxHJt4bv_T*A*oCYUeCvk-zgrWQRz`cOyoS7OzN~M>e zWqpSYcF(yqezPw{ck|0Z;3UNezG3VEVdF=c%A;W-*P!IjIN>Y=(8r+!>v^~p=iECM zw?H$c_b?8}i6+pxCtSRi#n{7~L^u+L9y^76R|dEhI_LZiJ)iWf*Kgl0C?d25B~Dwg zoa!Z2HY0_|aKj#EGWBKDh(jw57%tfQTWSKz6?(o#Z&SqMtuGYpRI=+vs+ok2i||$1 zwp6N?XG4u)aNw)rvgA-9#i}vcF_5Cd6%$M_Xh}A7>Y^ECQlQJ0_25W)O|t5nX+Et) zW4r!MrLVSSOZdb<)(t?pz*W5khf_Vig2h&4LJ!wL46`L>ww)2d+p=nmJ_}9YjB=~S zozOrjruPTDg}%Q*p}`sd#>$$nGvojO-`I@CzIfQ<9}`c0@{{A((GziM<*b;44S|_h zT-+D?_wS8Gb?7N|@cv+3%0owkbthK0Hsg-7EAi=@kH-f;e{;O)A3qiE`M|%#XRrTq ztc!8x78lHHEVQ&V@U~~$v9NnV1GENk4eX7E26#?Fg0BYe2o3g=8qgba;*<>M8hF1w z0`t;o=yRO$8X<5I~V2r##Ud9t33rGV_#?@UUBu5e5mPV(j^0(O$OnYyOWgxr`Ajbuls>_7ti;l+lU zp(!iZZKCAhX~6;w;BVl9XND&&)vn$G;<`WjABzd}b!del9sPs>Atw-&opeMt&|eaE zV@h?F@W18Oqsj(uf+^Zj*XgF{08J-bU@n)_o6s=@s@p_u={GJ^KhvVT5y`}lQ^GN+ z_EG;CsNXfF<}^vzj+xn|n48_BG9<9Y-)vCliM3QJb5Kgdpf%(l`TN_@+{~=lLCnuD z8TRtYWyV^n&c6jc->6|R=IieFwLlV$gs1X0HQ;VBYKza{N6XI14Y`6n)v-(aHMkx~ zsIcY?3S3LWqUgZ`dtAr^9&k;(;};G>o9tgYs3)IE1S<~5#-;bV$UN0qU-dW12ciL!nf5fjd`b#H zz7@#8<*opUu<~>I?bscsqTQU2nf9XiN^*@8ezHp{Yj%FO6p-EV;SYZ#{_Qi@$G?2| zWAT@N`R8%!>``A?I-xe?EhFek+rciI9Tspkd1x$%f3sg>X|qAU5oeBzcIqE9^9!+i z*8$hZ@=5erSzV7^`wqs@qbG-mPBg@iYD0lny;K((NcG_W3GIzM<}@n@dEi0dxuT4H z!$k`C8va#&@qx!b@d5orQ(Vy3ix=@TpKW`nERoMKWQ4nHGK)mDOG< zS2RtTi)={wkn&9Z2@B!W4plz5-?K>t4+B%>8fVc+Po9*)L)%=bX?adLDa-9gc^3YD z3s~vEBg2(Pak(HSAgWX97wJM<@J@Y$K14p09QE}g)3!9uP7daTr^eo`w#v{rE>O97 zCfEHr^Lt*!0+#$J)5yySwc$i;sBf+dE_}vGSIGhM3}}fb*!ZP9(+V7l9$`9@h;~SR zvCOAT8`7x0p0Q5_FYEPttLoRPU#!Q+Kk*SSoLzSDB~~V(u|djCi&xu{VUrqb`)h(L zbfzU^CV7iW_1^AE$=gudh(yzRZmQ?DC11(cP{q}AM5VrT4&I8@I5DxVIPTkE6^$FJ`*gD-95$7IQ|XmoZP8RcfaMK;$%hFkn-dz-CnOiJlNy`&%moXJSWVY$ zr`2Dk#WT|?w=MWHjefKy1z&P%N@K@_Xp3!#$C&`Zbxo16CElEtJS2!BasSU}Dj`X5+$)63?L$)a^y{IX9%Cazr5T+!frqF5B7m)#cE1Ea^rv%S8 zAP5F_0)9XyH>7Vg(FfGj^iRerwg04$#G>>g$Fg%~+gHo8`-Wt?D(uOEt1SA2)eFYxwS;R-*&ayFFcz7&gqz~5D~tBLb;Wg$X7;{#P~)2y(WURf zBU_T!1F?0VxA1%LjdJ$HYgqXQN+UHo`Wh>HlEb!Y<9y3}GrFCQisWosZmQ@FWEJ12@T6ZF{!h%7*1eXT`Ro@e9k_@!gHS()6RVp|ssqS>e-L zQ@Z8oY_9p4&((A~ij!;G6^xA)<(FQr{))S4OIcgSWz%pOyQIIb_hVgs6t}asC0N2| zy&LNqUpZ7q-LcS%dUvU--?UZzc#C~QeR)&uu%U6tNnuZB0BxoJ%0@kT4O9BfXCF2Ykd_SN2NE9=64)Ai%57vB~6=Q*YOPRe_hGLVA|R(beL=@?76 z2?XAj+`%fbg5l%LZ+%TNl<{^;1Q}^Qn$Jzm#{~Xll_eirXnaifQ`i0Wz z$GCAl9%EdyA)TW+tM;67KlqmqT`T^bi221u0YXnuI`?PhkBk?xzQz$8>Gw%2#v$N& z%#(hPjco~g^bCbD?zxYNJS-wiNlr8GMy}*|%Xq>qSjY{;zsjR*$&7=Cb}Nr4rZFRR z78W7MD?N>yBg8|{({fEZBQgm+FUO`rQ=VN1#&6-Sw&T0M1C0fyBj4Cn-8Q;ws$bTP zo60`%(T^Cmb)?dU99~8>VLrN~x#OcA^C%(AK?~)T@Z##njqbAOC^_9$J(0rF(^ZJe zK*!-*hRn?Y~*hQnRJhA*g-}mCP^$3j-z}ytCZ%AHjNN#P2S2~L077;d6 zHZ`_vNM7*iuMUT0#7}m`thj7?bd-$5Drbr%BJF`Y&Vdp#0$oT#-I-4hHxC>CEl`?Jj0TI zV?%bmWosClz*GIzuRF+VyDD3>on`6;?E2C7_SDxY55 zWSmqU;C0C_Tpf-JtvoJ>#_A*;@|%viL4^#?@r7}Rv4sO|MP4JHHJ+zc*%B7K*hu?v zTa@t^8+0_Dvmxb8DfQpY!Ma?HWwPCEzPq{-o7=RJ$`BlH%1M@SYeQFZ6U*GLE52mD znujx_2kdqVf!y6MAw+fDfep=>9X_&VImomgSsvmuXt7sWB zxcSEAdJZD%bwK2`%4A+6UiSE)F^svn<&>V8?=aT%rK5Q6!~BH0s4T`h{G|07jQZy! z{gQD)G*Hi$JfKe_BP1V$x33(R?C_XqL;t4F*M496I_o#?|Hua=S3jmbJoc*JTTak2 zDv$L|7T!JXBID5&9mYBkwBT)jg^5oPedvgJ5JPR)k$I{O!*X3we#TRD z3&tbH67iOKV?s2Q{^CQ~qPaiCW_<{H^10eM*%{k`Y+qy5K;d39RBJK28;A~A&%K#z zlMk!<@k!BvS`tjbxYWR78E-3Yd92MhKq(hF;k8573E^S4z38X5-`0F}3+7Zi@o25ReE4;DYma%FX2 zc1tv3&2CE*H!Sk8D;YFUdks_%VNh(gyfi4&r%xGjnYo%)!huh`vAzMWJ<+JIcIt_4 zxLaGIw`^}9cQCIsbH$35q@~tse{UJ6y}GJ5E_Fb*cYMnwoAs7o2vf?V~5yzQ>29kF*)meo05e+(WY9LE1GRe9*Y8|le%~e8qz1kvy3B~>I*&9 zvn$&Z>;dbw+~Od+-M}wikZ!oG`Sq6O%De#LwrGf7W<;ZABRY~}J+VTsDVnyVAI^(s zXH~ZRg!PnWQ~F{@eBPH%w>>kb?xXq@9^o%aO@$0`l5hcX>` z0;kZQ1()?;+$tX=OR{f+W6>{V-PhIDLsw(S5C<$}xDMbdQ{|Q|Wy?i&)^UaW(k0Xw z$|Cq%$PHwMto_JOS>c4f>%+sc6RcZv~8cI|An9;gC<6YwJHAUfGU5A<0 z%movFf5KyEiKnik`g&cE@lSQbYSu$G$2lIFIh`DIf6?1xE4Wktu{LHL;ieqqkmsxF zPuym+Eq%;-yL8J2IlR}xJq`$ZUmf*#;DrM|)ovzoCf zocV~Tp=ab7SK=czd}F7^j3#!9xAs}rV$DnC#fI`@JA$QgSpC@Jw{%lCtMr6v%{AsH zHLkMmv#}bx)Renb*OumC3+e~6-PM@uuf@XPZ0wsl8xOzVq4?gXJThMMTfY#0@RC=> zqaX3q!UHFzbl(zN%g17J&uY)z79^#k$KP zz=~*hTAZ;KO(Do8xnH;hj-WZ#1cwyRwWTAkt#@K=RpXTA6C4_7^C{?Q zwV =;(Vzq-7tG%L~Xt}9HuI@OZyHm~*@nu0|&bhvYk7|R_)ZZ);D*xpT+yEAoi z|9@u2P#rf42AY|u+VV^X^s~^#av}(k;6g7?&&u%cgzx+cLSO9ui7-H$`L%vSo4o$K%ueFQX6GklV^pu z@}-`C>B~37snf^f)|+pPKX~=4;`z`2!MNnYYvSv!dPqF_p^u2~eA+YPY0vym!c8Na z%$YOG@seMCdA$2wdD8+oV0s%GK-A|qR@b5-eEFWumfC|;i0}rBLuz9Cd0U0+`a zq~>bCVc_PI5nRXGoK}qLLHaYN&c+?L-5M)rHsWKS{IJvSUfL5EUwFO;D;9RVK_NOK z)68e8JG85OfYN~Bq}_CL+HaFBublRx@Trq$E z+y5A72aj=juOs?%Vw}k#hllDZJ318nKsw%1@}iT{fEV%xd5xi7)DzddM?05(gf*-& zgG{XH1aQdbZhzU-b(5Num+q(^md-nT7I&B~L%EXC@}DCMWPKH>+-mC8q<#xuWtVQ5 z`gMT~yE{yQE(56{Wy70@;6PdxNSVk`!>Oa1p5cE{!r^?T!JS?y{EXMIM|p=Zppg{h ziqr6W(52o~W880Cc?PI(#$OHck3^Zi_T!zF#Y#36`)coWb6lvjBaJ5~7;u!#st3#b zEY}khnRJLNoxj-noVT-JGe0)VAsB@+q9gwL)aR} zc0vmBr10Wf@xDGEc*+Otma5hk+_q#wOZlhujBQCSw?r#WAy*vgf?iYbSyu2B{EBZ0 z|F#m%%FZe8jPlNjXVNcN%GWl|@(WIzXZhR0oBSM3^OR6DUzg`1qCf;9PO9`6_;OJjXJw z^b4#SHcYQ@{A|oavrI1?*Rr$w~euxM)fK5ss|N} zquVX5(<@!(D|8Y(cn6r{mT+iNpU3mCsRwK}@skeXJ;DlZ4s*EtENW=H=j{Pxxj+9T zSqDCrFYX~*{)`bDI$RO!@yvSuNT%iY7ZqL%VdE~Q_P1?Z{-nm8mNnFnZ$D)kY#{R zIp9-e7JhgdSZ7Y3jdeERN$cO#h0y)??Asl)nvY&_@#S&N10JA0vMF5MlF^X9*j5}m zQ_4ShUp&nQq3Qq(c#pEC#ZP>iOJ1ji*0j&c+G^~ZQG1De2M!#P4u2?iE$xf1z4E?s z<-M?`FVns|Us{%D_)LaFDO!Qp@L{ms?eB#bVE$KB}UGjo}o&uq%&^8$ornvuI6 zoi+LOT;ZlgE>&5Z^3YcD#^XbggEb9dymil8IBEL|PySB)({8TATSX3XsG-2GVXn7a z_chs?uKM9iTl~Yakn1W-<0Lq67@z}A@_UZT2Dsb`!>;(OQ{}K|&xZdnjxgDE_4QQ_ zg-Hgl3-*TKt_kkC;?`tW@r!@4tF(jax-Pq+e6Fk79=uE&rG*x_8+zuDge`vAb$&TW=ow2%WzxhJ649O17Y5?(E1MTQ|4C?ox~@6|Gaq5mvLJ%i0T<3=x0Iu+x7L7&NfBB;Pn z8`ZV9VVJDDvP?~jAuQim92vj7;xsablh27yv4oX4{bqe6_leVwU4^78P!)z3)=hx= zoOTG;>C;x(Kyr^Y2$nG!j~S*BF66C-IyIzdGuX&{haa~ru{1EGGR4l>g^ zJoJ`d!$Ph@$Z|NC`G^nsQE#PT-cv;=hlukHO2v~#>GShjIY;-nB|wR{!Uaclg3QWRkv=%{>9>kCievMJ@yCTU&=&E4jL| zF{;0m;&QUAY^!lYdz;U2^_=nO2HAerMHR@Jnb%K655t2N#zC(9hF{zoPucJ?YalfY zw{{Qb!PRq>M@Y2(#7MS#zM?qmpRQENER$s>LUUiR>$Wm#;*|4a>7E zU@iMdFY7X8d#bN*W5CaPh2yEU=l~5w8^Y>1MLOUB8`p4<+q8qqNF6EbR%N@#0MF~N zjB^^d1|wK?KIrul$}Tu&U693CiFe&a28x; zrC?AMA#_oA8UKdOg;3VvEes2=WDSj@P;8sLl@ zKPT@Djc~Cw%mR1(+3^K7e$2V$U`Q)N-ONw<;Bfl0`=!cu7akh*9{QXDU1yo<+OW1E zlWAmGIB`5OnHW7QO~&&)GKoXtY4~!Bx+;I-b*?2GT=n354pS8FaNLEF+%MG*j0JvX zqgQcio3!W~q?g6-KBRCzkJ2c>)@io8cC(y(_HCv3#eeu&-%Z_tG@_~Cq^!*G1sTZf z5jyjtNYYhSFb2|@CnOVCE1Q(%1q?A~zUV58e_LZ{eI_1vN_^*M>2i}jjfZyEV^VV? z?1Xf}ZRSwY`v%gX2Wu;~{neG|t*+YkH7Dw?pH=v3Y_oQx_^q?DnhSN#oQ_yu4GqvS zskp=E?~m_%^5f#U-}99C!RLH?yz*tg6mS2V{~m9B(;MS=fA9C>`QQJaP%Up) zyL=&fr#}P)A}2Dtv=UexYG`s63+aL`+IBbIs%* z#T=zX?Xq^tVNde-P`t>CLFOP^kX;=3G7kxk=T*i77%DTJ zlnIs#i37JoqXTfbpF_GnR4hGs-D^97?OezVjc;Or3F$?^hEJ&5jxq#K+-81u`9iyv zGsF>?$U@K05?0kiJT_Yn$rTv#LOoud=2k4~DkKy>rDugH96a*H6UjizkVOWYUMkl( zkw$%lmtK&y=I6` z|80I{E3VC0mQi@cUrb_`=K|lSqOee&9MEIj{@)s4j$ug0DAwQPlCWV|;~6&MjT)3_ zP89QISv@J%Zzy)EA4>-g!~?(S8)9+KUNJiYBh5a ziSI=;(Ck%bJx{jVnwU<+vGs_Xj%~yzufH=s{LvfY{U7>NeCR`;iR-SvDQ>*wwm5U* zjFkfx#%BFpq1pT%&s@tJx8D2{GqK^GZ_j22q|P}hk*n@R=cA6POv+BE7;^`Cx7GzG$74uED#E3P8*Si^pr~H zP(4@x^+|JJ!|SqX3SX#Aq&gWlmrMJpzIC!h9hh{Ytk5B?%*o|CCbntZ?B@5H7dT^< z(`lR*Eag>7V3^oYALlk~V_`+sxRkz0*$x+oqz&OoBg)TqDn@ZU6Nf=%Od2ATn3(0D z?0E3y)SdCGaMGI2*cwuvE4>{m{1|kp=jPhFVXmHC)!@P#nK?02l+vkeIg`Slt8LgE zv|@mu743KfjPEKlfpZ%cOWr&or54?qj9K-Irg&hw-Hi53GcLIBJo{b#=DP4xOZGJg zOAbIuw{0RIo5plg*-Sp!RD$;q-YCpWl#ZP^T2f}6F(RM&=fXo5c%r+pwvqTjYq`E& zOhs^mKCpyV@5*0rLe|yZoqGJB>2_;kKA@VKJea8#+`Sbk)V$_qdtA) zcqfjZ+7#kSm`xg|mRI9lZ+}M|Id-R?2?kTRuvGrYd_=kEVgrP*}gq{a#A58 zcygsOm{FvB5N$O0d1HdUpwgf#hY>b5*G&8M{%YKG(@k;e?6TwAGZQhhFcZ&t{&%|{ z*2)9ReS%c_8Hi_??67EV{uIr9_(0|OL^;Z#Z~}ki1#Qpd#O)^ez+1x{c;Gaud5tzx zxGjEXltpoYKUD_!8fF>vZAcrOHdH!=<>yE=HmMxqJhjG~>|{80#T%rVf9IE#D&su#6Vr~#gZe|_ zmNY(spdf6?Xro?6#GK-U) zuAloe%D=*x5D0GN>Aa39>qpjlBZ1{1^e^Rm+<=EfPg{ixWcFe52@MWOkj8%3qohN@ zbx%vQ?@er)YQ4*=kLm=n4?nSBL0ih7{gFun zZw98a?r$bLja=R+)C3LN?QZCKbGW(9n}BT2dLyKSvDnHtw*7s0HnvK5SL2EoZ}79x zPCT2_xNJ^KwpKU*oj_v0Jxwa}ZTq@`o#hywH|jI2JIJ@h?PnvO#Tf7b9%;+w2Y3!) zJ6_>zr~p?JpSn>~(^nVzWL$2@unFNe^GjO4(a*#Mm$xSgb9$9yxRjNfi2mliaZx{( zeuz8RR2fXVIFKPfhXMRXJaq+M!^;B|hNX$0VQZ{POCG_L-<#$g!II^fw0(`e2{#W{ zBuq`hHIerL4VCB3xv~g05Dcsr0TpikklXE)dO#2APF-XN)Pb@^Yqy`hCi^Y@7nv?sxNxHZx()T*mds~KXL^#ifHca8CP-u2c71h4dK$M1rP zJu53`{T45V16CMd7f+UFFBS0MpeM?f->#k+4|qnGkwte#z9OSkOX)#unCI9lvZ-#! zGSyM?iq8Z9gSSyQ>3{JB7rVTU`nu^LeUbS}L-WPO*b$FY;A zf=_$F+Z>K$6B<1+$Jyio=TV#RJ?I+d`G%0^z)(kW5t8|eU#Wq73-^Na4|~4Zm}te^ z++2L~V;?DgT#hqmPN^DvreIC(mT*zMMCU9Ix?`!d9L5RNU4@nU7sEI$81QeV#DJHR z+SX^>O8iVZ+^%4Ir%8(S@znsr6AHeeRb2&R?Oi%PN}bIc=AF$ z>l(a?>NHBOZsdh`7JvAp$2zxvNDfpPnlhG+bh(l-8{Cw_A{a79mIWgg>xbz{CtKr@ zdE~2h6h5-vc$VB?BbwU)EXRSTvip`t;3K&vUEhbxc*ZUIr#CyCMtNj?5CAxmpKOBL z&-fwhTLC1KjU%@cP`0f76+iiXbAg`oQwF2*`p9ePh~QOmvtQ$gZ&4`EZ0d7cf;TnP zzo1dwOuTo4B>c z8~?dM;zI>0D{o{-yCGOCV4~M@7=*EvG1fE}PMfNqVF*@j`*p$gb=GpRl}=d(bya;_ zU&ZOULpsJ$`TZT#Vxe1?Hnd-Lm*v@Gp!y;5sP=y>>lWVJ5j~Mntm{;|``KeTV^e7Z z*$TU^vm`9~wqy(AM0sY-Df=ef#vL{d{ZyvPo9a)K@=5;%4&kK}(a!{Ks3+EN%gPEP zHXRaP(!^HjJ;yBhj7N1zTk&DsDdQSuoTj(~wYv`$sI7TMW*{r_ zm0_&>!kc+2c=5KOY2|T^G2g3GE`wVhd?SVW3iS5chiN7&7~pT6PqG?XuC1(zx7IWc zP0H=Wv~c3E+J`>y!T7h&d=hk{cjkm}+l;vx<|e{yU$=XC|%&r$u;p!->0D1Dmx*L>6)4)HA@3L~60Q_8gD6^{5NU2HRr;@qDY zFOe~dfrpVhqAT6S%_-UJw94;%3e4y1Q|@ymuEN76ElQq3ml~Fa_H*yDRVxV!>l6up zXcfa>T|-C%jD%C+;nuv6*fK=*Vr{m@du>ROsjH;FTKczN264Thk<#HOtl*B{^8(Sw zS4bmUXwq7)+lRJE3*LlNH&`s~C{5N~d@CI#+faF=sBkO7K$i7!uJtDl!wV#y2Ph=h z$@L7Dw3Bf#K;vZDL0^=iEcZ8snGWVNc;C^6?i-248@T2^yP{ozvC}xm$X0nk8;MSK zvBE8F2?s%2rG*&aW!y-HU*X#+XKXQsIqrwxf>o!desFg!%g3Wq0@g@O90fQTHgu6ehsrS#ieLT9=o;()4Q}~ZX z_teqYJb5(ZPn=fRY59vidpi1OPsP^RGci~@8{3-q82;JiSk*l3?2R|X+U>V$TwRU1 ziB3G^s*B@gzwpcP^FQ)mzl9*gDjxf1c8`;+>z%LiVm zm!6!?GLovhj%X~%Qv!1HpI~kZ>F=z|15dq4bzsiJTnL?>xdJ*f__01b z<#hr+P0c4+=r~jV^~KNsL6vY=cBjcUNMnEL;F$3^Av!cPPz|Sb$RdlAhY9$v3Dm%T zCKfYtcZQR9ET1Wasn040A{Sw2KlWTRjz0!rzaw4srwd!|FvI+{occUg|Nqb=yegD2 zW`0}RJQuTQL~q0>8Q(Aj4<Vr2R%A8Xc>3Hsj+?uNNt8&CK8W=qp6Mgt)LW?rQ!WM6GefWbPj?aAV3$OrW zjQQCa;WQObf7*A%g%@2IU%KH-!ml4UT>s^G-v>Wr`a^lQxp-(=eVT9G?%TU7_U>Jb zi!Zq}E;x8WY;<|^gu@8Q3%eH>;`oU(l1nSGEV`aJF8Xe+#`4Lt@h7kOqqy}5-;Ujm zT}z9?Yc_uLM}O3^;MmclqFFEBXx1|e#@s5gy0Yd?y}y3r-$?GDe84{(dg{f=lgERb zDgNTm{yL5yKNfQf?YQ@qSHzF~$dAS&9`nudk^l8C@%DGU%lIK)a@YXwR?N?|CU{C9EtvD2|Jjo*)D&>zz=;kqn zEXYDJgDpV8gEZ!6!c;d_Rw$fjSzx%#QZM+9$yrs(%#0-sc~AgJm#IV8omJNHTwKOfo&MFU>_VtDb`6Z+ePeVkyZfT1b;H$taH}!aXO> zq$eKyWT-ZNk3Sj4GYf~w5m*`Z5pMuezT#>7;W&&7n<S*8K*TX;fiT=0_2#%$%2e1?=o_c0zc7|kB9@M4PqxQ>d;XViNkFvN!fRHo8EqZ})N2QH++Qm^VKp8To-vQcU4 zDR|_z50ZkTT*C9sVzMP3rls;C_uvt2K--#LAV?^zozj*T5WHtp>RFm(Z6t(Kg+;RnBrJ~BAqEjQsRMM;IiKf;YQaE*`ZOWmNApZF!3@|3&aoDi!Y8t_}_WgPS`9N}kR3su8K zN8p;_Pkr`tv8H+~NdI8t?pwd@+v4)8u8f;*xm9E5rfG-l@&Pu}N?@QLcqVvx6-S-y zf{*ezj`T$aI8>JLzo6`iFS3#_63|M7Oa6FruKouf_1QEM&b7U>ZgLCE2B(DtwBAfM zE1gHZLHG{hiYu>59rNJcxa_ja6^{3$OXZ}<^&*Vq5!~nqIYLkH;ZKk+ZHvy0 z-neJ)KF59h6CaJG+1;^gmvr5I`(tr&N%&qY{dRAB{}22?v}dN{jyvu&yczUH;pK_H zbeqJ1g)gqjXO39LP?tj-i)_#r($z`U9x0g9CECt-Y^06R(;im%qKAPf7%eYp}B!? zaL~9s&v$6~EZx$4JoA~~6Hj{bx5oE8>wDq}Pk3V7|C$F#=e#g(z3sMG=lk~RTP%2F z8-u?}Q@infMcOEBwo$EbmHE}!58saUN5amvBa{7!vXa^4D(@%>9!9ZkH>PbLi&s9>$!?(y0~j5PTYAk{>R(j5ltn7yoV3&iu2Fc9P8{E zAA}Y`gr?d+ecOEr6a)|1=s6;Nm@<%E_^AxviYBeAk!iCUst-Y=bKlQ87>2BMHv2QL zWjfCUseiJG%my#IG_u>@v+wH}eF#4r?(z@NUp(G2W)N3oY8gM=*BOf)FPPq_P!Atk zPkMRQF@1WX%6xv&FFs$E$P$hUoP~cOvRZdq@f&ztKrG3AjK(MbpdwH(U8d{59I?7 zIN+-1q+OP4yu_d2&6rMF`@u-OnPF-Nmq$9_;7>e!sDSpsEphpFEM-DiXs2@323c3- zrF_xRa83K{|4`bxi1-&h!8(T0t3xO}apJM|{h`;bsmz?05KkM+p{+~&IBaBzeTq_zT`%Kb-0B~`8Ew2(XVE)xG9bs%cGS(q>4*N7 zdP~Mr6ygkd=?xgh4!OQnOO_jV7+abeTN$&tT@TfUZ;5lb<(hA}CQhC@89)4j7rM;g zYTkBJz$askgRC1XG0@z5tGB7f?b;n|a-#s(b#F?4-i)m-hYvU~0pHM%#lX}J=3}E$ zZobi}xr=Jzah3|{8Tpjdf;Oz@p}hALt}#OT3ZDgX{e-i{mi(Uk2##7^X=y1L(ZXwx z3S-?yqINjn@w`;IEJJ0PKiO{>qx>Fo9nL6E1ka-5Sh}?OEwG1H>8MBg^<%flUp*U! z&}D7UarS53^Yeg|QLLV`fK23rAY>zmXFPt!<(YCwS1k2|jS3#Lfx|=>A3Sh+gCvsv&KQamfAH&cOnFwW0=>AT5GVbh(blYSY>msS&%dc?D5M%>?&e#QHD&$n45j*=NWH>p;TWbx_z8Z9tsBC2i93?RR!`pc7Z! z`!JTm@`g%A#e|2>ir?S62ZpS7jfA_`pr8qU>a0FaKhLPv`q)za&oCcfs8?bP*m?V64IUVV8y=8@l^wpojfl`lna z{YZ39-w_*UPDXe2jPTW5ZDS+4!l$>!HS>(SqK`nq8;xo4AZ_jWA&pw}hPsJQ`>S0^ z%qKSGli&YJr(fa#rWQ&*htI?*4$Fk3O@j_1UPY%vPxIK1s}dFLxl759VPD2`Nwsh4 zmck9IXVQ31t~{v&60Q)6PT#dz|nif38iOj(JuepQZy^3-%J?pyR`KHvM_HNOxo zf4_vcqN!r?GyTGEh~SqL_M4Op+EaqW_f+9qDebq-kG1{$}xc!dX;w^7} zYrOK0UQ-gMx%|@0V(+f`_`nA~5Y1LIKKap)dvSDW*Mi@$U{GQJWl;$lvN?>qqQQ9o z-d*wX-+e{A?yuiaIU~;7zt@A(U;oWt#DgCCkofpVKB_iq#%uoMFXJ_T_UB3`ge%g@ zv~O`v6ZEEN(u@|kPA|nnAN&C4`^cw1D@c^K6&D=ZA6H*}b-eC1uTi>%xZ~DaV`Ehc z0ewicZF>QkZ;)-r!k)#r;kwVqQ=a}^S&o1#s{cRgF^`U)eDRB7`Q&PxJiDxZHW}?^ z+iwND`+ffufA{7$W_p!(@r4(~;R_DNTi^N?zpe85>ptr@GiPSoUSy!%8m+eIdPgis z{ypYfo>20-IBGdE3fQ$c>jNM^^Rqu0FL=RE#FuWoK3@0AKZ%$8{wtiMCE2Ozz6V5k zV)vf;c;OFzUwrp>J|kvlmVAH%#rEXMlj;^t^#HYrc!f5GU;WmP_=3Tcg&G#Ez#{vN zs1A?QALs+6U!w3c2-6(7;5-!zi@Rb;JpMh;`=0pdr@s(a?1SeQW4bkACD`x;pZvoG zq@wIpgUreLiyP(f;Q+NY;gU2v((W*TD{(bV_C#)pfWs=671!geAQ}y&Q@^69JoQiA zv;L%aeeYt(2&_Vn0@wM#+X|E{1Z!iIn zXL=}~q^a@1GQEgL&Pui>UNQie+3L?wG(P2F!j|%r?+K>JPW)5BQZ}X~#v$9qadO#^ zN2Z`0GBz<;VsV;h4gpXH{0J8mRleDmZ$5C~C*QQ5QU5=7$8x;tw|_al^Cx~Ou6@HR zRoL^&I+?w1iXl z%#SI2kVKEINxo|)IxquPN#c_DRUB*SofW6zj z(8i{;gCO}4ll3nT=NQV&vUgUHu-^#3YPj!(l$&ioxZxX*9gW0Z~c@^ zz@c3Xr!;`u4^;FFd27Dff4NDh!o=$qcmC|pm|Qg)MDnXV z@N?VqYXJ35p++eT#gl*8#(D$t_qF&!HLW zhsXeM$TZe&wztIzWjnNMkDq@}$oad^+wpdS{$CekKlU{&(oS!*S8Y7sQoUUKvYE3%;e{)1Um5bvPep6b!yO z>2@C=vvZRjJg6_>Sl*z-WfR^TrOHp)3kyqe+s${xU;fQs#%;IV5x0EhCdvK@A42DI zPFLOMs`&Z;@=I|*I_hn=-KwVP#j!h&D=&u#W<+1rlsT1X<2Eh)3NFCsb9c-&AP;gE zoWO5kVZm*{CrUWr%jc(Pb9DOE&T9PIr$1#}=jZ3uW@kj_X3Q^W4l&8Cbnt*^ufn~- zW|}AMVJJ4288cJIf=9ByVN46BOXGegGYZSLw*G~o&B@FBt?(wsFmYw=ioPDxyy*as z!~O8V$dKPqfg#)~PFZ%s#hCAgI167ndk-we7ytdb`0nREH)hDLum|1$ig>`+-9Oeh zmV<9?!-vRPWE1!rZsAkv<+|5;6_aszk}qn1f}bl6<1`!hWAKxw%3A6LZx6QCT@M9h zLh*m*O`f-mK(ConKhR&johoI9;7GTkE|>}%Tjf3VaXQtHd?qPyC0z6F`cS?yp??rZ8T8kk?U2o8m}6!r+s6#gweD3eqR7{g`H{oQSHpOATPsV=H_R{G6F1=~yckt^PX5X?X%a8)-I52f?|Db% zDd)$PTreFgo?~rj9w@Scjc9Iv;9#$i*x6r&sN0DyN`6n{6NcNJVra|K|0Hllt71!i z*nV*uq2j|>qNTsztFe+>`Dhak>@yePPzSdXaZ?Mp9K7IgpcdczfnOIt^%F0S+iuSr z2YC@_dTxf#6)TyLTi?ifv3Rb0dNA&R-mUqQ5YxXbe&T^K#>eJ|Ddu(bL2%=6oBFR5 z2If~uFB!o_KOmoV1!Xf&dNX4-+uCvkUd(o5{4#$H4g4+omG)wz#keCHpi>zi=)b11 zGCRJ&BVVRdKGy}m;yK(PJlyxHQ68GJ-dy4QHQv0D26!>a%|T9FLd#e(mabOB>1i0! z*M9FgF>#q@+_-NP97d+}H^ag+EyPx45!E~8vGQH9$ zZ`OYlmquZ=U7(ThoKX8=hGm!ba9Wv4gDg|=rXMt@unQlNZ;Wp$&LbLD7=(@0iE#s1 zm4BpT);x@Z#&L)PuIAUKO=)+&wY>w!on}N+>XkUFH36(S27MJ=WlO)2P;wgcX`w;# zpz^w{)K@alzm^v}cvR2)7HM6`4q0sLVfFcMAAvvRKCNy%uJtUA-gM`gp=r3MIpxdD5voH z&h|)Lljl_iykePzAAVIW6qWdsR)8H)$}jvu{jEPpez*<=Z==LGX_PD769?=B2e$;v z;cjC28JA1H9?H*>9yiS>JY5w4O$%Ne=E98RXK@SN%^#RVj11{w1=ClrV~QNZpfk8a zekl2ell0%`@$7(istGlUo=*CueBewb*(Q0nRRaadU-~F9Wh@bhHpHc4Papv|LG@m*he5P|@ zZrU%i<)*Rp0il*{r1FJMOJgwySQ-s@6h1-UkPA+I4(g`d;d}P1X+c*GRzU~1sVw_v zbQ=sX3}2?|t=#g#T11k9IdC4f_2Es;Kbi0GnIY?OYDbqXxWL)k-b{T!x(PQbqUYo~ zKm3J!ExMq}NSMhlo?mYc{Kh=)m|t>uoLh~2yQARqiL<>E1Z3(S+wB5Lh^gc0PsSuq8_jwCifdy$5maaj)=pH zk<3{!;KS!eIY{{|34Rm8)WqxU2v}Wg2?~Q9G)^Gjb z$JB`*ly8*Wg8``_KNjYbF+&Uo)`EHM2(Dl;FmLTR=jJDTC{Qsae-#$zm*V|r{JIns$7`fOu-u)%^CYA z)DgXrG&3K!-*ij7?)QJ!S8Me<%~)F68?&=*r{lV=<+CfXCdQajV*`r?6(+R)wwsFg zm3KX8FPS1{^>?`GbYjjF0=emhAKXxyK^1)Yh8IEtMg?tdTAZ7kilv2_*t2^n4(wZ! zpUuWrOsfNd6EHXduVF>?%#8<_-J75iPz(eNJfz#+Vgo0YF~yiZDZ~ri5U}bLFf$t! zo`9ER&<}=JZo>dy1SB|MJg7NNRy;J^gi>cq!JQp5J0&;?q-b92V0hrE2oKoc#RNod z?FaSbj%t{&OFnP{PZlzA(whR+S5nKRYzww6O@>_89Pou$fkOwc6;-H!xn5g*%bs-d z(=R-6$$CIP@)&R7=W>`B8MY>J7LZAM4Dpi_tyo^$j8A^ zdaVWEgeGiI83QkXCOm?`%en{_10>H34hTS(i>vyp9YC49Z2rwJE=kC?Vy-=Bp;iwU zaHU?Q-ND?{dJ$$pwo|q#W~}YnFT`tJ^P2dB z*Z#RxXHHI@fAD~DdBb1*b=>DZ_lZw@@>(y5{`zmeEZ*@y|5@|qtn1#rdt+{XIu7qW z6!*T*74e9NJv^@Yx(CJO_qx=y;+paA|B)Y#o4$NY+lQz)yHrB(v9(?CqFfht_UwE$|*B{?t%qXo22u|O@(t({pDVlUlo^K zeo6e%AH7P0YA=o)xkYlN?}c9Qr#+BeTsgbYo{h&m@rj1e6#eVM9SR9=>xjkT%1bYf z`(AZr{KcF9E?T12`#<>4@wtDyE`I!{e#ZG`+sVhgnZjo5j0SYdeag2#Ca$^a8{%o- z@f7p#sT0S=Z*2UroF!f=gR1#aeTr`z@@$1cw17tHUFH{cT*pyQ_YL}|@T32+FoB}G z#LX7-3-O%qeRlloC$EdE4lKmN%v{V!PVzr`H^$xN;emQ_WP71@&do$DyvLwFTpFmkx`r#J7TiFL%D)16ut=~>yof&uZ(kJ z317a;h}YrB2k>G1HN6E-X`O}Wf-BMtk-{T9GL-3)!XLCM-cqnF`zSyraRgrCK^%A? zhb_woQjAPO)^n*wYpjCT?vXm&Y@h(dV71&_sUX6Ne1L zRGg>=d5M9%N;!-JQR-3oytdC>xl&rc)$vn zOvw)(RVMN5Fw*%3sf@>EqUtsxpUUGISpqK|J99Rc*SF$L?|4_7QIZ4F6X>Te{^_5K zpZe*ahF(0kISf$C^^Hy-G*%|CaC`#>YSg5Jt_BTP!& zksSl+&Po65iz)YMzdRU_?N&E`vM)s}&dtYqryDold|Q0-+KVz|F0qNv z?N6trc6a`Cz5svzVb`T^B@tpj{jM%t~MmF#Ye57_C&!6!8Xafxh=W`s) zOW2^>vv)pj_{?YHyPo&lSklCS^O0Zwbyvmx?|(n(B3-|Im2>+9RO_9%z^_>^_?72j z9fVKDS6p`Ha~hd(f2E)KO1Rbew&&?G-q??h`2%`6%%49)W@~JxfAFOImHrh!#uDIH zUO`?_438Cubh8fR0}f&`!-1pF3b%|atS|YnspKF{gp6Tvk~FSQ>5tidDr`IZ2L>sE z0nP|7S(Qzu#3SRY9~iB$QoJmDSDHSrTX5n4~Lh zR&fV5vZk6^)_A$qZ=-e_*U_k>^>3#s^|MSXj113Ui;hE?v_o3(K-Zxj%BMP*t|+PJ z81O}JHEh?r)=~9Q|MT-X8)v{`4o6uj&l9fKBZ?nd*0J5Rl|dIUzZOHZ>(9Xy`Dq;& zr?8sW^)EE8`d6KARD;)0VL+9OoF`xAB|&lz`7u?lr0$|J$>X|}dMcCAlu}dafiW!C zOJUTh@CTuoZC)qWPUe zLdNUqHq{*&!zdf@!#-K?8RwyDfKWF+hoZmH-nJ@k?BoE`kj zyl4dPo=nO*Ox2~5P8pW#jf5x9)HWHGk9KH;3ZBN2Jj(AnWjoRDoL|8;KQ{5hrrZRV zlz$_bZig{Lo>AU3uJ0JFr!@cFmQD@~(oQlC!9T-#6~3wMh9YPWa4DPg4#;7ln2Jl) zMMp;YsIKyQUOcA1*f1ZlNqj2qWWjiTjXY6)a(F$2c7!G+ebv`_uEw&Y7>6qjC=N8< zyGs?mvVKIF<++;L_Cq{z3*Cq5q2Z7pNBM`%QqnswI1*tx3Kz9Y;-5HYeXPoHqhI8g`Gz$s;$$pugg^ zbzTkP3VclB$2=X`mir)#m)WMO4}36@>m3|2aN*lIf09QQ4TS+6zvP7Q1qOU&5QV-x z6YeTBl4s#F^MrWCYpBZW&mgjwqxm^HM_+Sx%j?nyv4bt<57{>oZh=3P7fMb+YL~<# z`Gw8(p6cVz%c=gb=u~ss>!#p2tp?!$!0b#*;hH}Sbv{eSypcUBKBva~7MSQXGp%+s zHCLXQn3h^0`jvHJZpE@IdQcAzGhp1W)EAh45(_@e)9HuCN9D8M;Po9DbQEq#>ud-I zJu_!!ZJFDysGs9{tika4rEaWnLxt!C&&19=^gixNH2DcgmNw0;0m|pP~d`@5D zhRJA@l>C9qD#pfpld{vv^wx(yIB}STRvH?Z@eEU{BMJ9!zH06Yd+epYq zM<5z>HwB!pZ=dOI$I;bp+;n^;ZoK7qeCdm~#Vxnr9IIkh@ntOS+2uv6rKMdlx3K7r z#*GHktyz`Fg40y+t#BsY`E3_rtIi&FhQ-S)5m@r`EE>>m)D;{EqrHriAGde|*MkN|aFh?4)9x;>@J$Uv!2utxGAH?V@J0s# z2_f?kFsBUcS`BcFEBwr$;W%)CJ~+^s^7|6NE79XrRks^!D=UiIl6y8*&o0N> z+F5_=wzIJw8|y2vvAPx;YwNzAo^KK(i2LIIm9>>vXK{9YQ}8yTv&L06;=K(GZe2}~ z5WGDJRSv*#MUMpe*|R62BbZ(o6kdLt)O}HZl1orpWE8%_AO4`Osw&ePls(e1;KHPH zL;dmhU;gq~D+4Lt3S#MYsWl%55A2EW{;uziRpGsSW?6isNih7ce0*ElWg!PtmcMZa zJk^0~$v^q&Psg>N_>6DRP;u%*voSl@j^{k*K_+6Y<73z9CL8cQgR; zE%8v9hho>RrTFdN_^o*1i+?8Wci*dG-=2k7K6Og{W;;5f$$hW7Z@l*Pe`#Kr(?G{y z_JqehKF&Y?P@Fk?TJ&HsKzyO1kt;MC2ZLJb#~so7gYW-9oLXL0vqA~QA@|+R)wrQ$ zE3cShDnDZ1?mcmzdtDKaddwr^^vP3dhjmY0_$EKRgMUi0XiecL8gF{b---|Uq=Ci? z%Sb`fvwMyFmtAr}{NVGRA6MV|syKb>bliUA7S;Da{M8%&#tFFA2btE8tmDd0zWGMI zzwm{x#0Nk8zv9HHOaolC}xOPeDnZVPRF!W&tc&=IHTcADwptka&gK*%- z)S@AI7M~DKe?sme652Cu55jMI>s#X1J5Kl$0CUrlfs%j8Yvhv)4CuwP>6$nQn&BCK zkxQfUiYZCmrxorf!kBmh$GpaXqBd2U?d^{F+Qu26zg+Swh*lepMVJDgeqqI`1kfY= zue2q5hB^$tloOXNLMU6JEkEO$(xF4RZlyd*PkBThp0XVJ2XJwjRBnav$Eb`{{EF+m zz)%2@{DKGzKtAA+i&|5YQi2(G7$XJK*9S9ps6qGykNtjDx$2*}SR`yzhb#x)lAHj@-tPN@nXZ!W2iPl+e_tWSeOAOhOkkgBM%zUO)0 z73vsq=Hzj|QMu9G@Qp09GY#KDGCMmh?=X2D`$d<5UPke+aRBo2C;8kwHqKMhlmJ^Vy3=xQ+f2uTwEeU# zeV_D#tJGAp7`M7@0p&-h@iRYgG4bH5N9s=^CV*tk40=|XW;o)(Q882loJciA0Dj=O z1jQ!|KPk>@m7I*nh;K}D&mSlVdE*$FOi!?{ij=2bTSwVBU!nxT!YezZ8SNzle^(fU z2>bHPs0VyPlgDx-RQU?>La6(Ug0Kp&f0;)iq|*<5cpABeA}u(1Gl?F>ukaRcny+BTa@!AblDsYX(bZGNBwx3u+osF*R#znAD!rz!!=GQ6i+%g{N4qsASepDR483fo zJu4c?Dy-F}JmzI=So6&F{w-gZ%*Ok4i#PrAE&RHvjh}IdXLJqg#NJo zyD6QXZ}c->LbtrtKy9OTcbU`h1o|%eQR=CJ$s^B|Kf?%wsH2 zyDw&#H+t;KLRiK@Kg+8y3Z^!WSKa9nl*>UJ&ktF!BZbSfqq+$P1$mT&r%jK<34Zrj zE4&$>Xnziaps#OfOj{6do;-Rq-t^YDGBb47zSkue#YKlNkUqWUfDO&3_{OCfL1jP> z#x0LsjCl%mA5+>KOY>O-P<4u&aGR8{Fr% zH_h*8f5sTKC2ftj!la#?PbT4E$e?E$)(s7pkO%yTR!Wl-+|W1l;56t7JQo_kW%96i z36CR}8NQ(})uS(nLbCb;RmpMa(HH*y&b9gipKGK~v3TfjVGF81Ctxesgvk#Me5Qc# zq#?J@0#8=3NZa4uP}%vyiuEUj)lUy34Ecqi@p7E3{Qg`4A{Eccvd-X7uBCSD9ZIVEynGNMenh2D7>QnXPD0Gq;?I*ILfb|ge^SK z1HsXCl5SCCgK<(+{s2hNz<@upVgZ z{Aeq1?)AHpCEU)~kHy^!D!CCCTy!{=_U?{f_{CqL&}g)N>1?t#-OW4&{)A( z;Q5u1qf1~c|G-^zPr}NoZAO+lDU5g;(^h^KKKh$VFBjQHcwckF-iEJoo{-)48qiv7 zZLlV!XW>4%B^^LKP9EYjFYt(y?4|+4lM@Q7G9I}g57Q~hb7Z<^HF6trnXlLt-Azk9 zlV?ix#-Hr~48^nFB;HQA=!G;?>W5x?p?w9*t4YYr&kq%E#m zJJ;{hNctORk`m3@OO7*X5{|-4Jt}O^)$Auf&%#;wvo^*vSALVwVM^z6hG_)=6y(Ck ziI>vf#i|75Ppe0TK??XcGZU783=__wE`;dWUhqnRe;J^@g6&wv;5v0v9V96!S;hBybc(DlsT0rxp zkMn|uTQ(NA7ka`mOQU>$a=$G z^3|3;h&|U|pU13wH`(GhmIICtmArftPzNbM@!znZoSoo9rf|pQd z{+@5E=W~v|(udF~88cPC$%(dpv!3V_^c5vndZ#6v`Q=a! zK+wMoOpnqX(JhH`BLAqBwz&Z(+ZWd|L-jCRNG?C~QTkZoMP2l)Fza}N!EosP3;AIy zus~KYM{YG*ZUe%YGv+4%w!NNX`X#tB4`+W8y)@UHRjTX590%Q8P^ABn7IBq2BD7H)|;1^}V>Adt?%(J+-Ie@BLA3{)L6w4#g~-brW-%y{0rGfM0V zloKTG6qw?QC_qT&xI2^K+351qx|p%_<2jYgCJz8$L|C#+YEo=d>Q(AGXP^ioWJ{uLxunb$_X|H)X^2crAdB!X(^81erJ62 zpWhpQ{5!9R+wWM83(mh#oqoy-Q78+iPc8cdNRPT$V-fSJzw#aLo_KOuahu&OFHC^~ z6EJYFqF>%f1V^7nlrZIX02s`tS5yy9=A`05#}LDc0l9vI({X%TkLq<*jzxsvv$nc8 zGZFju?TJGNcgNxL_QkHHg=h=sjrFrCQv!tvh0N3>bw&6z%HMqr1WG>VOW4LsME&0{m9z{(AxrAfy^ZsjF>E@5gdg9i~K&A;dTOk^=F$^p3F%p zw15XvP*&rG4sej`aM8;P{K<5N;R*7D227dCmt1j+@dV9qN4%7$Ifa`h7pjKvRM?=c zFqK{UkGen5T@LY>#JS=onC_?z^FmCC*N`aN`6gIH0`JW7W?cL4H^;)_v^TSU{6#N{ zpZ&#OjE`RX361Ge91<-b=0anZytuQ&bfJkNn7x1EP{@x!~2*T?o*F&&^Z)+Yc;5HDfF`y`(-0tBnsKkoFO5e&{NeH2Fa7N} zwY(BXZn{|m^1zG3zD`Dc3d1*Fd9#U8Ygw-~Q~{p;g*|KPQ;dnP~Sv3K{L zm|JYe+yDL_Vt#QUKJt-|#p&b6p*0Y`+x88cQ7aBTU zDV1s%(?ofJ%~WK4`E0!66|anU|I>S;E52jRRn6tL5Op~4ue#9>MN@K!F5IlZiME!M zk&7?5G=A)bKNOGtmdC{Llc!?s%(7`(H{PHz<=l4c_E?%}#dkmZIdNvCqXCjWBmNRR z%4D(g!2Uz=;A_4%{_L;+Ccb>#=i_Z_=_tX2c9Sh^i#*v zFX0tw5Xt{lu5*(`OMEdA^YaV7Y2o?Ldv1L26QA=Z0CvwVSb=DAm5L3p#C}#{;rmor z(h?$9OqRn`&66fiX3Ldj;sF$+f=~5e(=quu>*a+d$sdaz04kmZN>7gIcfbIEqKrp+ z6rA;QzhMK;>4BdZWfA5Yaiw!qR>%e%EkEt24B=Gs%Mef5KIJ)? z3rQ4Vc+w|;UHdU%#xrw@5rqPHGLDItVlZ2x5+BAQFOqKMNpu!SX-C?R@daEMk9hmA z+e6vP@q#gwCIh8hELI)WuAy;_jV0uci(&jxILSHirj+KhjwWj;qHJUw*uO9S^0j{y z7hHH=-2UY+#GBvvS8?pgM zD~=mcYCY*230{75D}tc0ae^M;!E?obR3_;%Eb&ixdR9AQa?!7pBf~FnsPniP!c_!* zKi4!BKk9CO!6iY;?_cM4|15RMG82f~xWF)G)Dr;U>3Yl7JPNOUx8m*03im>`3~?WM$(20|Cs95ban8T1W!-E^r4kf9o-tMztV0hFkz{Guo2213OUyt1B* zvuvt*T!()omyk(ZE!@`_d3t$0PONs~JsoCFZeKctaH}_e5r;0%VFtI0((ufo6maLjIy_C^n3+rb&>bA9?aWWKcnt-%wrvkzJ&a9xXP&Y7&nGr+Rt@&eo7wcT(w>N zOlK>m#$%C{zy~k-Th--wcG<>}{NR!_0Ir7~c)c!WK(|68@>LgIHcic&az1rcr7OG+ z-&a1f5zUYv_6qJ$mK5G39o~8)3h7;{aI!p79sQ^@^lQoie`LkbAb(UhpZB+(_L%@xub_*Xnrj0vwt?;A&PF;;bSHz3f-r zhHpfVlE{chq(7V030D#?mg9vFL^p5j4NcPBg}Ev)!2$&+tgHi8@^Z5$<6voDw>xxk zXVSCBNEr@ri9WTxtbYnVxI-7uL%_>i-c`(EJrCY(&_?o}UiHawt*2ji$+CX5OXFxlOzmzD`%cXP*usp(0 zhomPmD%(!!M4!qJlu<}HG^B^;(_h-97$g(PC#F_2ptjU-JlhGZ){mUk7@rb8q*KXP z+tzpzhwdxm1Lwt%cQ|h<_>JFiarb6aewtA_%1M|zapW1fMGdR`a#-O#`k1)A&|9=N z-(|n3#`0X1s(gwue^z|m0II8^U1l9G#5d$q#_|k(vrkp{#rZnhT(+-iDcM!rNq9z9Ss7|VX( zk&ZB2;VRdMiDVMy-I&^+$|opnoWFz*O}1Lv|8oQ7Dh5gIk%6rF8RBvCe_jr4J~TV3 zI}SVjyluVHXq+p)Gn$%}JdZsRg&xW6@RAW`XB6ZDM`P8YNo6Gzm=gqObf_Ca9^uYQwt%)A7py8r7~jk%;!oz_6Pmvh?l;R$Wrv$()`=E0F-$uV`zUf_aIX@a#2n@*eW-_ znUKUmtda47TU>B)KTAAoo&KOIok066@|+y4u5^VU~*QIs11x|Okz^=1737y=p0O`UaA9Y4%8uW&5A%r_M;k;Ew5=zG;<79 zZo5FUtRbIZnSsru;wQKe=MP$VjXYArl!PH|9IHQn2~IV$5w{S^rQH5O%1OuOfX8TW>O!Ag5c*T zzl}lNSUmO{6s?xVuc_#XiPXaJ{`dcLoH(`|d-m=YgXSU?8;L!>&B1`8Kk^!F<&QOoPBs+!g~Woj{eMkLC%xNeLA(Orz;du;#QZZ=uZ3#vI=V7oL-{ z{uUUpR3>W^U6G`RD+nusb#!?nzI^+s_|y$Y;-mk5W8C_cFZsY7`FSgc$pCMg@TMbm zq262r&l@~VHg<%6gGEKPxo)nWRmwP9d9%0Lhom zZM zd=p$MiUy+z@%HCG_xbqnwI7#=l8{%YT3pnCr2h8ACq6*~_e9)z`yGCR?|tukUz}Q5 zk6mrmOLaMX;emMCQ=S?Z9=Pj~$JLnVA@f#_OwV(VlC^nUkmD6aW5s zdW6>r_AD*Nw|?7GVtM(jaMFON!Hzb7wtm}GGG}v*n+6(j`qY{D(8sPdFDk3sfPMo3 zdn_o64%}iud+|mY*R--=%wT!xB^So`e*X(%W#y~}Fv09^sy;bc?s2V-+)WK?yiMJj zosPf$v%m7^2Fxdd+hhSj^5nvc&eNc_JAUmKf5G3@|MHh_j1zYr6Wrz4xA#CCK6HM( z<30Z~W+Wdr*Vm)LIH7v1i@%}!zCDZ1|MO zdDl6uu^KKlgdz<_%)Yj!c7Ga~*4Z8P|d4^8F+uzc>CJ zp??Aq-r&K5Tz~sX))4S}K>%5$=SkIvL4k1UMjr73Z-Dyd3(56*0|s=e3KcGK98BU( zc}}}B4&gE$G+T_5dY(?1-<;03E*ovesC;{o@uMY|ae~`7NY`jJA+^RV)myyF_{sRi z_nuLRJL1F6W?uEQw$YWP|8B<0+D5FY?{94I%}eoUH}j!Z`mHe0l2-R{Bc5(>wG-o$ z@U1vwJk}@KBvS`|O9oj+i|pCGE3Ur$vbgeV?i))7_QjX3zaeh?(w9WTNy(8l)00~~ zAR+W&B0NyrxSbW(qrY;9N-lB_nZuiN!cel2%`M5G9^;Bax}{B2e{ZTN-UmRK%qjt4 z+_K}hpoAxIIce$}Rph5FN$wxP*pi~mp#T;Xk>h@ohq7e{12&>4Lw*zk`6nmK8!Vs) z2yk#MppE?0L40GyT?8Wx8JzVa9g`mVb50Ii&r%m}q5w+{b>WRSTqZ|uBbNisfG;2M z)(3gUQlBg@`#5G4mlm8TGSeh(f}!#=9pThBaq;*edV6DsI&5*Zndszt$%=NK{Ag_9 z`>sqp2(NfhPCbF0r-y+7y`}3>Zox0plfU2rUbLT{O(%s}cNT2R5vpdKXatA>DCCl2 z3V(Q9rN8Kz^p-nRYxoRImUt9S-U5TXq{}i=Ua^>}P{j+T(|&4c5z8umiiM>G%|Z6Yp8b1c@BUq}cenhKTYR&9?>_nW$iH`g?Aw1Z_8vSCdk-8+ zcOUkk4;Jh{cqsNAJQ({AVb6;LhYmPBZ`zZ0@BV$UTX1#>=C0j)VsY0}EUEmt`FYPB zxY~biVJ_x0VV_}MvM}p64(GTvX}%pZb2HIu&uDDm79QRd*PKK8$jrR-5{(HQhMtnX zi$2?GYuuQTyw-TcSTf@q16rPANTU0ogc{6a2)6_na-Q^IbP+n=Lf_~?)x?{ z#vaBh#y-tU7}MCaclpv2IQZ-v15_TH_uN##7)JWM!60uusj?s4g7Jg4_ju`zeCcG( z$=J2DWEixqZ!e(Uf?}O%yP34H`UAuX3}qNen#Q)i)PIk-r%2C+st_(i+LsoVOyd# z?Fs!FjN{N;^s98M?SP$A-uZ?6#0|JDEH0ThD7(#D4oXX#FD@>`{QP3fiZ|w!hlBEb zQeckqeXGJuw3Wx<%7?y+ot2#v{*=Sb3-C=F{M9D#6*oFHxkW+YbF|egb7MU-KSuw6 zkHjO?!@7v-jqU;cIq1%;va4}{A&WNkST0&;`=WcMuHbfN{;RM&oPmxtDSY5t+FX#7 zhD9C@0-QQ}JpT4=Z;b`F+(cY>{=vB5yz{(J#5XM_?bS#DTiM+f}Kxo;jupx2u_ZF{$vw!Te=rxlAq-wlX+X-dVtDf zErW30{P6;+aRQf;4}6KMFx7Vr$8#K|5e}@`)N;DOr3}gczVr(|t&(whpvpQ5apGGJ z^=P)I64IYH3MMUp<=X&APYUYhc&y5&ZqNWdQGVj<4F#^7=$-kqeO)hkp$~rfWR)Kn znMM>lY3Nd#B^JotcKrJ0us@NIOYGtQ|GWPPZ&f#kEwCKX)c17r}0dKiBrPmKHtr|N|u z?5mm8Uz~?$vG(d$U-&iAHd>99Nt))mppvR zQ@<iuH@C6Y_PU0cRk;(l{-*ezav-a2i4u|s2Jv6Dnop)PUATex2P6&qoibQ17L5cZXw9}1Q=qN&_OfGaVMNNaWj>|xoX+0jd zTJnQ)rJMO-XBz{D1>4VHuy#lpsGi?Jx>e zW>RLhN=@oTdT6eaEO1C#>zHBH8Vo<<^mDrOyZLwqYtrk%zxG!=2_-|uXQspleK7GO zF5TJ>lb#B(Kiz6ruOXf`G(H64!kfUL0ibJv<@zXz*LuKB^KG{a`05eftrWuYV}Esx zcUsfkWzx2)3w=m1@v{L59Q@{gic{GCFF>eHXsQa~ir*GiCo$p31g z8SRa!{%VUc!HFBU#N%f4xfb&ug;%%w?`qC1ue3gm@l{M=?KF3rd>n4A>o|A8lgM2- zwVLF^Tj@6W+oiyQ@LKt%~+-Y^A^{OHennSZpz2uYRaHJ`>f;DHB-hLfn**} zJoWnj5cenWwry8kCq7TN&pzGlUZ1?YbcBS206~H!qGDNqfQVQEg%+Z;QBY8z=oG~& zwaP!%-}+ane}O^_6cT9+5PC=eN%MFRAfyA*zUIFEcBkKapL6Q>{f#l!T4$eo--}hB z`qa30pS9L(#vI)ovt4sh3i_jLHVk+^Lt|aS1EgDx!y%JCD0W6!%;B=Zo$}+fp-9V^ z8QZBo9nZ`oLaR@G5HGA#9UUBAUf6vwPXI^(4j~#6#aKXbAHo!Pp+G=e}VS$sDK zT>B|p&=Eej!q>)F5nipn#Pyma*IN(0Orgr}hR>e4+M6ur-uKGSvbimUta@IvE6 zt(0Li=wa6wN}Q@gH(~*ei4R@n1=`gopKjMKKiaEsW|(FVKwdi>tIi^$j5Ip;{8Y(A3j;YSn5;z&j0Ri05vh2GF_>G30UHa! z9?WZ?jI?JMoxyO}7wsKLZ~ zyLkSb+B1xww@zX~ZgwAVw8Tg&}7uGqlq| z<{NfYO5PUm_vh81%0IX~>xBWi20Dbf@*yYTJ7y;oWr&s-I4YcCDP(ZvmLaoo0@E>3^|LeAcS?eaq}mcIac058%g)nen=HS!0K2$!_+4Hz*`a z3>p^v#-wncs66a$aon!$9k$IaQUHj(0|LyI~Z+$?6ICVtCGj;=x zljBh+F!`m7C=YsQfZ&mE=m>jpxG3j3JK8tB-1D=UD1YqiXW}y^tRg<-$K)U|la6cy zN=s$-r0X$nw84|osvdxwiP#y(6VX{^AT3$V=&TVZxjUa}gYIZxX!bnd7pN^YzMw1g zgw1V?H`~20d_jBDFa2_R&1+uM?t9(~JW&XZOnyG+o_pKh`0B4{uX@$Tw?{5N-mYI) zCr>s;eHk%%EZb$rJrgT4b?TPJVaw!S-`Hp$`tS$Z_kQnpw^x4bCrZwwIniLcwXarL zyVq~Dhd=b8cJF;JZtr^cyW0=F?uXmzNPc3x+&0c#XrKQ%pWR;ck{7k>SD$KA4M=hP zOtNs)KgYSQ-*H>}@z?)E`<{RE-R-&0y{GN%?6*Jpr#`j)^Y8lqwkMu?T=p|-H?Cec zuS|G*z%O~yurZ1G?sxt{`#XQ<>tw|XZriF4?Xmd-?j%~=lB}opdMoV#ufxMrMbQF59a>Z(tZQB?Vvuys_0zMxcqqY=eG@q-5i4-=H221mC^JnO1b zD?<7@G8UsjZY*#W9o@!u$;Dx!hlGAJdH|G59i#5CsKSI^(3EtE0Ck1Epv8uiwh-+% z9VfpLC;#HfQ~|hZf|Pse75*K+ArJ1c($SIHRnfwPCbmJG(=v4`zb-7151f*jWg`0U zDfasaL%fy`!;f%pnTkK|>+OR0h2KP8OjvR<1<#8k)vtqny6|o5+!NQNQN;L+)1*Qa*lD~x#Mvfa{7I-Gma#gjNy0E3(; zKkOt=V9ItuMOq#)ArUxe*8%gO_6oUj3WMdguo01hNDuwWK%bQ|@{JtJTlN7@<~z&l zc~VCZ7U{rx7)6M{MK_TBl0ux4T?^D4B^Hm!-6X56j1B%6yKu-tg6`a;%h9yuQw$RlJ>w|D#$Q0#s0zhRJ3#rzKPv2be z=F1Q~8DOf5_r>Xn54{min&3+%WL~b&Eh_a0U%o=8(o#os;~evRBQcj7KKc3a~K+C*IW za?;JNd2oWSCN$s%8z}x_Bd+X17q=G*hcA<=dTag^XI``3m>1>cXB$&HQE!fiKZ`hg zV4ZA0U(!Y=nb0RJx`0J*z?E;Hoe&<655TQxcJKpX-^wcmvl)r!SYXlBKQ$zoZt+;- zw}tdM5gf2w^&)6R4$#PtaN0q5jfo9(9=-#>25FyW($h?dd@6ycq0c~{59_2|^CkrG zK!1Z*IW{*gIL&nbfFXrXlEVN@cao8g3{1?i2+Bf?nx;|eXa!-?BiHlpty z9!L(8wxKZ^_D27epEoFsBEM}=g<9;O7mxJk*jPY65uW^s9noO?;tjiT+iH7AeF{G^ zVsgfE5{<|Oe*rBEi)_T(H_z^$DIE~@k388p5gRaps?U@WlQltG=sn6P98n(fK_6`T zly7Wo_)$2r!RL!m)2!H7fMs$?>A)wyqT)in1;|DD0qj#!aC-FVc2+o)$nlDBogqDV zv16k`uA&Qmd1fp=J>2t#TKbY7{=@gTFa4Uo(atI9it>KSCwy%CM2$hNJpH6`9I+`H zpx?r{&+-TwO*_xR1ML8!gy$$X_;MQE^DF*>kAP}3L#7~#`fb@(JBgCje{(;IGVqzQ z5$=h%R8@GrDOmo4HY1+!%G7`2Wjjh={RYxt+zerbjm*NB!_zK!m&k_Jv z$Iw9)xK1j~&>rB|>12!4YcJ}h;kn(wj+CD(+0nP(=oG|7Hj^T-t%ag}t>bI?6-`yA zNMnTLQ9$$^h}MI`ql6yJnxON~N@KgBza+V!M+108x3yK71TLGn0;M7U5&j}XYA_wn z7jh}E4a;6EEB&x}#yhT0Zabr_E1*4Pt=B}i+ZAclHDqsaPF9I=Vx(&{+%bM z)m9KKA8kGd?Gp2h%md8iv)9-7eCI~HaAB*x{Nq2aUH;Ic?XUiguXI{I86*sCVXQt{ zTWJTovX!G*Ti#e>g{>XQZ}WzbWzNQ1bUfg*<3+ztmejG7v1PGzC|M{R(Gu)L@FSs; zU-v=>3WeoWp(`3L=zggabkfph@+3Asf6;T5Hz!sgqpu+>81L_WPOQdyD!WqbX zUH-rs`2$Y@b-BKKA7%4XI!F@n^b~nviMiY^FG4T~Kl$}YoNd!Ms$6`h;^iFh57h1Q zg`{)Y4J|AN{=ab`bYK&0;&}dG-tj}f2IGDTjm!YQac*Cr0i^)j!4J{ zbY;4Qdw6vmmy}0W4#EgQ1%Bk6`T%^FxC>9!m3T%+`kUp{iGuxe$tUR^Ks@$g@tk~< z2Y<=DoA7xPH5+S{9vfpLTj3ACApy}~wLQy-d2Yad-Z$AEbn=~-_p?Ez|Gy6*jVgCo zK7!1?V2UNF@1#`EH-iu}IC_~D%!&S{;QaGY_53C{hcJ)-BPcH6wES(qz80k1v4Nz; z^{$)1tj1b)r+-T2D-eH6Z|Rqmm#giQDC1JN8hy$Qdu2-s{a0*R;nX#y$A3^Yyy0Bo z?gtT19>sYPc_nCrRyGOqLKS)GaxJy}w!e3+z2b$pwSW09zP-Kt6Ygu%gGbuIji=g- zjVmh8V?N4&R&fyi3Cb!gPoB1Sc7|?LO>bz z2QyW_$Q#G(=LXf|sn_3Fd!RpHdeX6Zj6b`o@EnKR7ec?}q}%Wt;h*BC@}Nb*+*2HL z`#vRZ#<-mqtPY)LNjO=zYI}PJJ`Ers<`WrXdOWWF66+)G7l>zY znNE$FxdPTJZM<+X|4TUIGB$JbW33k+pwDeSyy=Hp_}mhk2|V@_FMiu!^v@KAE!&oq z)@u}ka!1IYwJCotZgouWf$|?1Yyc<-MRpBK0S^0AP*tQ1bYnqjgG3||kzw!=)B)kX zijX2QlLnwDJ!HB}ZKx!K5gmq_kR8dh5Qfng5S6QQfC3w8hijhThzCxUcUmZd$~Q~{ z6is3LC13ucFw?`%4OMCk!n@-Hhcn)S-B2g{TwBt0rVjdzr>?Xk8R55n!{2GY`U?-V z7rpR#HWViHHa6nzksCYPKBj_6R^IGjpvqf<%S+?7y}zgULoc2)Aw#3Z@wk*rn+AWT zQH>nC#sR9XcX3k*CEufyi5D|j>}MinB*Ui?@D>G~5EkNin^xS~I1fY%--zUmLq)qB zSfol&RCP_NWG5ypaytaX`!=;*2Z_;R8Pv zpB-0p4qQpmy>KBv(s+=Tc&YH-O>MqI-&{{xycJn9+vPOTr-vYPJVYlIHY#)IgFKY8 z+}7>*j6>*EuwpRVt$Txib*R^l_ zj(^a8{ny{(PXH{+p1h+7n>&huqARZf{M^G^IQXz76-h(k36tO-=XJzRYXLNo4xBhc zFEr->XeJ#n0Y`I#6Xs19@WL{U_WbAGC)-_a=T_I+j_T0v z-u3qQV~_gyZzl3?yW>vP`Sbq9CnNfa>NY#g9((Mu_Q<0TS(XdeiL@Yhp4pmS0d6WB4 z@_b76anPQ4^69oG`d8QZ42dm?ewys48tIrwwms&C0E`2B!aAy&QTJT8ML)JfK4iul zXm{RmseQ#)d|CVDx4pf+^!BZGUc7S@BgZA%j+Ms)QhX9u0koK&FhQFpevL4<%L3mE zj@S{Y1Tg@PIQs$?7?elo!?e@xmzIN0o4#jKL<90s0B(w1iXPNbrs^KWa2#s!Mn{vm zO!7g$Hx23;=>eX^V{%W(pda5)I$?=;c`)#2H5K49qSJ%VL+++mEZ`6E@upWlZ=5-b zj+i&FDUUlu_-x#tp0T5-?~#WH?n7RpnIlx;51hj7g&pNZ@1$hn#rdoY@nVH?O&O%z zyMdq5V240hg>aN*AW#6kNEp#^qoXN*3eT$hH#fvfG-^@o`tN+=XmnVIJ+>(oGUsBS&&A5j-)HwyB?%h z=c8QeZ_*5q1^xloqofJ@bwvQPXzzM5qPuJZT(sckO@70Z+yd$+8@nX7lqYnEzm!jx z-w?EMt+WmmUCu{Rqj#`Z3fm26hbiyr3z(OwiDW zW+IV6F8J^fL1X0S&5(|HimF!rLR_(Qj5O zEyw&HsO{d7KYHrMVf&Q_-rBCJo5tJE`zrIC+SI@OO<&(W`XwKuHeUUI>e-C?E*?bG z(rWP0)p_Wr9_u=@Z)N0F0hl1Yn*n0-Nc4s`qK1Id~_e} zfCk3MGsu*Maz3=~bffZmV=?P2gq-%4_)JksfBe!;JFfZG$RIsCWr ziC%}x9!U?q(5R-_X;~DmNrvdh9l6ixx9vp*-;AeS76K4U4#+|@ubjlj0O=IjQSYIX zjg-_cVQkYQjr~+7m(_moIRxa7ETOGzZD}R#nMqXDHTv4?;|;e*Ozr^7>LV_V*nk(d z%f{Ghuc~j%_%7rR?aS`tBPVJOIG2`VyhXdo#?jHrdbBr^9sD5=@^TW77qwNG}LvmYOI$vzNySv^Pzk7YRJ^jcN?MHt2-?#7kksoXql$;O5 zeX{xxpYU<7Y&UM)aQ(m^dT|sVKrysW1OLh`xJ`@hWupBv*L-%`7ULao7@w~2h}}gR z>b>}fL05?Af8e(WJ#PEal;As*o^j|=ZlXmS0Offy54%GDA{zXbsh*WOOGDqKB-Jez2Gv*6&j>L;E zEFhvs={NFW3qoU=h)%Wt(RWa8JtGtF0ggx8D$DoIKPW%b7(^V!R^P3Fep>0T@EQj) zq1deN0S<5oX`H|(+GIGH1dJi8KY<6p_CsG7E$M?+cp`5+XI|wXjJ`Wpn?OPKqlf9C zsY|1H?vb6}oKhNp%Eu|M`EpkQ}>|F{I&%U4?*dw46LZi0NghYlj(32bRse!6Y8Akkv!np<*DCg zyposSbQ3zO)%6s5l$Er)a`gVm@t#li;q(<;f%Sz?RC=ElqkEr7C|!8Nmi!#!btaPe zacbbk##X!M-siPP9{h0oNB{JnwcmdCZ=1g5m9c1MQ*xXjK~#KA03O(0SU^1Yks};w#Q{l>q3(xy3vLyrXKWb- z(kEe~0PTwI?f2+Uiy7uGdkA}lN7F?et;(VMs5d=?ZJnmA>@DW$27o@CmLGUCA2y|5 z~$vXu_yB6~W5f}9g+oW!TgLqNl_@KjefI#UXHZ@8A z#d_lIWT`YgpL4Vadr`h`rGMSiW`zs{;1H_FmUyCKbzwXgt%}X7{-RUJNO^MvJLyb^ z<0BpDy8E~|AK+(~2h=t2xewv=`Ak_N);+7 z1?El9<$I-Z>0v-V>Cri+O&WB{v-(-~SNhfGm*8pmY`b8?lAbBYJ|GOk@lkE5%i&v0 z7n^$RTMU#m`!mZWh3{Yv zdE(DbhacobHpa2bZAEEVXIipgbT8j&f1|SYC;L&Rp6O%OZ;h9+vVV9azeE^Rpbt)7 z=E_D%PBQH$S)&L{5=^Gmx9v6b6^ns4iJ144ga^{5&>-6{aIX1;I`zrK3-iSq(>}1U^Wk6p4axk~6y{D>ntiFY1{7U`)y33v z5Fd6yn4a|zSbFI2<+X>3nTN9}&j3OaBd0nxa^N?Eu40ki5QB?bZ2*Xd<}_`?_#T4) z9_Hm#Q2a#tZlI@MbVQ{T77tN06H$N+bTXT$gTB_*H!t}FfD;+ml_##WB^kg!__qJ9 z{lZVYu|4OWyPTXw_x1Hrbf}g0>h|@v&jP*hE-u7Q-lf%*wzoTJH}(!xF*HCJrID#& zq@vmGVY4*B`eE?M>%b9 zBhS*Q(Toz_AjCM7iGGA7 zE!FOD!v2i2mC3_jef*&)GCvx^2v7S97W1o9ZbSX>}_X% zCl)zGaBd9nf+K`e$7Cx9YD}mPyiK)2-IeX}t^R{4pF?0$blmp#_N+(jdcwC= zg=d)3zyK?oakLKn3t*e`B z_C0K<+1c6lrvX^Jhi`u;R{S6*8pe(FbDmJXbo*`o#_Y=Kq9+3Ob`JdwST-Heh`}Ng z{H`0KhofFOeieA~$*0;=Pd_fccH)h(Tp(xCnvT|Jg=24HV-41`w8*irBQHRxCAEw= z;`Y(YkK13MujAQ)^X#cXr2(b>v9RE`^w}9H-H+E}vXyVkPNj43@C_X{46w0)-E}@r zRpHnQ;zMra!47I7EO?5^Tsks5qub~lVYlHQY*T8R@P!gV99dPO7d^w{J8@TUN@RC?-v*?ug_5*TrdE8+#f zr#nLQQ%|us(d@-|L?~S5QTc1a%ylbBM8)}>_=N``{zP@fC##5F@BRc=@plp%OG<{+ z^|*#ync(y|Ok|=5u1+i5PM2?RQl85fpG6x$-ogh|x$K~|F4gWSq?3K+vuKqLDBciR zg@KQ-dhYe0EApcZg;m=D6mII0Clz!@IeaPuI_Su;!HN3x=p$F!SAFFdw7>T)-`sX@ zTvuDiw|jTn2jBPm?H#}QmiE}=54SDV+b!8H6Z*5mJ;_3KR`QD7=s}M+8)+~QrYW0M znU;m@C;Ve9_5hYEbi*t6QhbGBZ=~nB@_Unx2GCBg{Jy$fpfEz9A?PsA;E_x$hm;o! z05&|HDciEqEp=bvq@`VQUVN{9JX5~uu>B~18X`2B2cdKQbeZHG=nA1zhg1-6>_$~k zMtF33>_|2fVhMT$#_M{NGBKg}ioSf>nTU(}MBX^6QguMZF2rsb0d+wa@e>Qt3E93B zbUcU{%R)(mj-bh?2l@NBi0>h2D*W7I3oNJnxg`U6q$5g9ugY(D{3H$`G1i66X{E&Z4w(1%qdeye0dI^O)o zCJ6M24CK{65A2;j;`y^V4ET3>u9r%``9B^62&`Y?f4jl$0|Ducbn=EW;MbG57 zuT%EeEEqn{<;!S%8j;v^J)qAny*Q8Xv4LVn-4n_5fB1A0n;lrB4;f1h8gn3P7J8gt z@#MuO8S}YcBmRn3+JAIMzmzcgo-8W0ptJ?>{6Bv?@QEqD*X|dCf5@FaAsYth$4!utv_y5W z95gUQt;TKN5Pbq!gW|y|IVXp}NW&F6>!|i+jjQQ9$0m}{vF%WF z@Foa0N;v&H_wj5ZN+VjA*wmo@IE!m6N?A_%*&^)48w6IRFDYuIv~0G3F6tX)^Qk** zgNY&D?tka+zP)|moxk56IoQ#t@VMPJT520A^N)SfC$>8-Tx!>LuGsd_g?*BJ%%ENh zxW}5gLN~IjaJNBX0I4~h;%Z}X@Gj<@Hhd}k0KV5hY03LEzx`$0amsS+Hs!AxM1ezE59yX`P}V5UKJ zl#NF|w$*l$IFe6Uu5s~9djMS>atHCaeiH)BXQJ^ zXHGD3nVAMqRkTe}jML&@NCQa6U_}gsaZfsIz*o|Ve*-eW9{5z3^HWCR5yYe^GDUAi zA4sC!7M#8pk188`M;654Hz~Wqu@J5z+gIvf;ourS(T1ZGK9!nr$eG;K$_N9hJu$85 zFQY7L$`uCUOJ|}`euStu|5!M>w(BV6=FQw6S@amcry{5PVgDe>&YsR!NkK6fwFP%+Sc>A?&$zwk;p$(peIOIcApLfE$~lANW3CV^k4Qv zn5%|H=KT&cGdZHB)1~uzXGFmuV#w$1}@#J!hh#Q&(;AeP`$0%3r zb#%?duVih>E4^ueo~plzq#G}i(lib1*8pHZpTB%0*k=x&PeF#OJq*4EAms5$hJczE zH=M$UEFg^fk5Vd*^n*NSp!8DLp8lJ|&GZOY-DqCsA7wiu8_Q; zAL5&!KWMo*+=@P%+dPDFg-d!EVw~4g!dVsHOkTh<>ExM#B8t?$xGJaoT*aK#r|Ud@ z4=1VggbJ%Gg%#oWsDlFQBz#L3HU!#d=Zl{$LONl?9rb{IR?L6NkI@gwel|aE8h77B zw5UF_Il%LHdR|`Q^jsF3)v_HOw8Mj^+ACjrPkY0Ue}7xs+G~fq54Y+5HKm<-k$Xx0 zoARo@mhS>wA60Lo4?&-ZuyA0ekujS7SuCVA)HVE4{QzH8uidt*Uc#IEc3CEUBEs1K zz-s@1w{*hzS!2ZspGX}^ zU7=_4vbGccF7ja)p3l|znim3DUxYUF#ClXUd20Q5`y2I=4+Kl2&T>z9sc)2y=yBi+g}pLw2oUIx1S znYi($$9$~8P%iBv&l-2J0pQnuLzrHjqEe~VVdA-mP!a+ZMJc=p$QEq~IDLsfggggn z3>{P}Q=q&Cj2oCwKM$%jrkCD0S1G7_1`v`_U=D+;_^FVa!|8I%h|lnp3z*fx?;J$; z91sHq^7|ruG4Qhy$g07=q7F4XJ!649jCRHw!7HP7@%Fph;`*i#EVmn1ugSuW+CTY+ z-`RfRb^p;D05A{+DZC}mcO{twWyk;S4xHdAawX1BP6GPUH|yZ$JtB*h%j=%X#@~2O4pmE>w2a&&pF!C`gCCXXEt1l~z8WLZbM72(`m^}Wn{@}yGnoqi#)i_; zn1+q6j@CpUN78R9&&77QvuEC+@y4~QDvm~&iJg!MS7_sntMTfl=*Fk4w%c#Ny{*aD z&=B!WRS1P9zgav|{Sr;cmI(yxNF4>#T$hnQN5cvy{?cVQjlv~3NyE=9vf<-5&!2C1 z+qGL9 z1qN>U>KN$aUxHrB;Rk@9pWhAfnukJYBwYQlzhT__3A;p?e=l|!vFg7+TZ=!ztJvUyeQ1F`N?+Mx$;zd*YEvi zd(XRmyIpzwx;l=Ywr&S|JGyV~_<15&s@WXlTh^qh==jwFBDTq{QR*o&9?1vMh9fu2 zffU{V5Hth$aWA^y7oGq`C=D{U zyhOLl^e2TBkBp>mCG<1ABg>e0KnB!#$%p~0>YFgT+yd-N8C`bxGU4!owPcfnWZM(I z$r*b62RPJF@^Kt_ydeqN=;yH^0Xq->ggm_n@xw=ij>UUU7(*X%_634UDb>)lpfsM^sw$j=*6vsUS z_R@_ga+t@X4nYojRG6U7$}eI|IazU3xD z#=qnZTM*@lQ}W>(`Sf>PU!@Q5PzTgIi~OQk*^E2*z=wsO11}3e*dJj`Du5^GWTSP- zHrj*<8$U%y;EnujPUYG1qpya0fPcv=c#0z*DFgaxyyz2gA7R)4zAkWjf-LK;`AoV? zepB^1t9)CW>@MV~{%QJgo^caaAxU5x-{^mg4HO4$q9NW4CV}iqDrRhJJ69VSb{j}s zCjJr^pmx~2&2v|o3rSD z6{xq{_^8jC`mpPw`Gxo0*Z$}$KcT(%-S6@yfdln(Ss)uzkLe@8Bz$CX1Sfv zGjgyb#T$G_Iz9RgBIQq8kyMpOX@xuT1Rm~b$IHf%12*P<_5i?D`Ft-K6di;1}=@^h0K}>!R1{jAR0I z5cEqkx)=63(XQh^WLKY7EGqvS^zWV7D9*Z+nzn(8&_*hRA?^`~>}Lz-23&d4LuplOAQa^#&6l4EA-^ zk3eRz(I<6=q>>JB(Z9%~il<=tCj~^`EYg?{Im#2i}-|{`{8L z({5<&JRYyf2IWWiWZhkNdQJhofRlN#^S57U&$;IXZTtF-_QCi6etW|key08EZ@k&% zhNRgnw$irFU62nPdyLN5eQjf_@oB!@>(@2**ftGJSmQf=A_roi{eoBIWw8YPWi;jg zDF@X;lbVMJVJs&8WI-Mik*{PxcJk`Z_oy0;SU;dF|64$r%G=!oe#Q=lX?xp$W(Yex z3&U`5nX0tjXM$Vdn}=tiFInshEWGph=keq-{jX<))A`WHX&BTAL)dN1s}~K@tI8qV zK3LJxtYYcA$#$x3R=OoNbf@lJ_jLs`d9ZxI<5lt#v8;4zOI;t8$Uew()YwkY3$C&m z+Y{l*mN(!~h9X6VBIqq4zb*=ovaug?@ z6hZUpc#iwi5_uLF(xdx@9B*`Z2R2e-y*U&;^HB74MVtca#4|#tr+NHm=>u*}W95&l zYM`$}`l6&;lp{EedYFzh^<46ccC)85E`WI!G6x}V{D*AS^=(*b1fD0(F!!U2elhCw zDL{)!?Kmfnu@2)pp!D9rt~%vzsr(n=i>vDQo{ZXpe3knh6ru5t`_k&i`lO$QvC`7M zD^hiw7dKbcmL48nZ-4AnFK%!6sqa^TSK4IvL+xmPPwX7FRgFPTrolhH#_oha$bdRe zheSX>kbZ^R(G26d?D^O5kL9m`sL$cEqVIz&92NQH+v)eYPcNSFQS~CWdfV>HaKbp{ zvXPz5pSTnHN{m^TS+|iKW@`93`Hxcr{h5F^@u%uwfw^J(>Kq^P+U5#}acO!=-uN%-;qJ8nakMdCtbeeDY zQMlI#fo!K3NwPl5Scpvo^pOasZ16^YWmi6Er(=K4(CmFQFV@RYd49^z?`E1-|YoA}i!(P!r8IfC2sHX9(`7 z2$J5;p>)sSQvPfZLmc3f8Od;!&O%~s0J!b0=c-4p&a&b;>JNkX z-@fagwI6-$54XGTx|nn_1T()ITK zLEAmpvHHB!EgGVOqKcUvE;||bv{eZ-La6Usx&o7t$qzv%C*FFNQ(%(KJM|^!a2(O0 zvm>Db$Y8xg|44>!N21E7K?92-40?DIi#J)wOXcMnauXI;UYv-^6un$ktwc4tfs>H) znr_aoFSPR)#_f(v=iAoSMq3kpWWylBM12(xJ=9{%7L$7~S2 z9yp(b!+;(;B|92bK*>LxVoV@#4QGJdumR*HzD}3~=RWKT9%FJj?tz+YMrL&06kzMN zzTjKBEN5yt5Uwy^3&)`5glT6-T66*n;@g9I$>r*e!}iYiJSI7f+P)m>-}}~YX2^XD^OQ4m5V8C(OykBgeroY0Cy7 zj_NlZD~oCvpgHK`Xl6HdG@#;@i5-B>36ogUgQ?>=F4o_2#>OQd{ENReE8yF#!ABgQ z3k{R0c$XjH(<9I}RsFK*r~QixY3D=@D<^XVeoh>~N7!y9UrvAk4>CZ;1S2c7!sIi+ z4oOZ#p?tsP?fO~rpsr&J)DwIOO2_}uX~3S~6B`K|vwYG&0#R4PewjFn{Q6o{VNx$a!$O{jFQ+bQOt|F3OaSC@@$%gKKH5j(5 zl&|w5I~khafR=CK(<|g#9r2kT`H$tXeZdn~o@$@+UwlIQ+OPcT_MGQjZ0EK(b%P_S z#P`llyK?oZ_Q(g`-yXgEV7q$dO1p9ON_*(?qpF|#va1bu@;NDBtg?`^HxVH}PJWS1 z^VTo*(vt?#&qT5useU0($YkT7`AL9w=a=mObeNB1L&)1c+Wt>+Q@!xyJuZ>@grcmY zln2t0yy)yZz4fm&QWXn!;A4WH`s$-{$)ohtW6GhfdU7<#5R?sbm+5^8o8i{+BF5&F z&(xE!t$2>~Do64NA3=Iyl)eaOz!UaZJjFo6rNW179Wh|%Wg|-AwBW#x;!iQjD4!n$ zhCJd;MVLw=1@MTxR6AUrLFt(gVgpVV6JUH1V0w>r2)_{5H<+^)lLSl&>DTakSpzXcb{oO7DI=;9%t9wn_009aw1!`#KkE8`?!_+~0GyvZx_Z;C@?u9W z6Iuo|bU)>#ui!qmc%lCLO+3kVSN>pEeF^3UCh8wOzI~$|!1>0O29uY(Ie;+<6`T3P zCHZ(ZL4p(81wXRDVnbH>j6PIO2YO}$tovsfgg$Vo!+e0-ngJgGeZ=WO*ee^G@Cnr( zyZ$L1Klfv-pRm1kg#3aD6@}P%E3r^~( z7nFsAlc!i@q>r?y^rl1jiw|tp_mVja(BOgwHgUqY7e*yh(lE|}Q~Gz5gH8#TNvRL& zw+w9W&=L7%kFwFK!<<6LMhg0aQCGs|d7}e)IKOCP49n>f&`&w^r!60q1#W=NZ1lr< z=0`r$WRQ$!hwdLTDFSWu-}y9H@Ev%MW}!zkxhS7^^>t_jC9c+@e}Y0t^kbi%_&pF8*kZRCN+}z*DJGUQL96(JE@Yx`)1xrgHR+?nt6RoU>ucxR zXuO(ss{FQ(*vL)ZT3}`jk56H|!O1ts(+e?5Pg*8%J>H<+a}|@|tb4B2X7Y^e2`fKm zX9f^1kyKce-PebDR<7Vj{=;)A3W>MQQZDpYB@~x!d4&F}zaXp8ed-H3R2J&6J(!j# zZwQeV8i)`1$RP3>X_=RS7UWa;eS%W*hwljc5dBtk$d~vyu}t^)7SSI^>hi0yU8>{= z0{s{_Wd4OE?v+pe47)NO1AJ7wp(o+M|B*K62zfcZ^1oM*=Ys#76n zo()ocKNKT&R&Z_GJB$S6A8(1Ga}*$ezf@5xOp4QIyTljZduD--u>5>s{O! z%OCEw3+J}lZFgR1*RF4S@pO0psI84R+jE}xf;L)T4}YQdj+2 z2QRnBo_Hk67w~z}t&NM4i|7+A$c>X&sPgdO4QNWo=B$&WIQfG&7eglDHy97`Z2pm) z<5D+3+F$pqeBmDyq98U9k5(5&$EN)vw#K{&Z^Z2#9=MJnQzqg`k8Cn~fXfT$?}kw2 z7C_ra0`vI_D!hp3H}u>bBE9{`Ehsz-gWKoi&G=NVQI;M=cj__vF4Q9cUkXS*zR@!g`ZFk25cKsyJd>e9 zP8qAB^<3trw!a|gkGc`%{^voJX8ZG9l{;5{AwR_f(9<#md@e9d{!H|xI(z7hc^-e# zm9jrA)0;u$k6s0E3;u3Z&KbBlzeE2Mkp0(6p24wpzyoclc%*GL^Sy0y^{>mrbMm2h zx5n-xByzvY4aJ{|i70+#N__^1%!|g}f-u4GHUY{tv`l%FkRO5;4M_2v8KMediVZ|R{e$7VK ztAyb_`Q5D%ke@jJ>?83HjRZe_mgU3) zje9koJDBYWr=*&FaPW}EdBHDi#Cd$JXHjgPX49aRJ_%R92&DcY`55aR%_iN#Ie3{f zJy752U|;=3_3P;qMn5TZ2XA)Y%Fl*h@X!__U+D2?;3z-!%cspp(k*KR%8dKYHVzOTOv>Q~LwH#-QusRPD~pXbkCXydI7Z}J@HI0gB!1}ywH z*#NLq9fH)Ht|6!}+8XdxL+b{J0*SXH4#8F>a5_+t&3FVzr=ZKPXa`Fmm6ytrjS)fI z)wP3~5G{Juz&T$|-{0#8B?c>9Jc>B;B5!7uts zt}y^(BHc$&hz7r{ss@Y>E4zZ&&4O{SDE!hLcc}-zfrYlc8+$UWmG;{2`JVQJ-}A59 zU3c86JQIJxw)T#v}+8nMp1$QZgveGYhN7 zGv0U-YpXF~v^r)$gYlD36-5IFj+x|Iau0h{yvR72Ql)Wc!sRQ2+bH*DFjm%e`Mfp# zE?!(~ciuj385wU5npZRo3ddo0HUUpj-|7e7*eZC5Lj-Y6S)dpFWpNEcpl=ER z0)Hz-N$o3`*`42D9sbg;ai*CTjADbSPB}1a4>Q5;sLe^p$`38!J zHs91?-thK9tnQ&d28`SXKY(hFKXJjr8MLy%LK++#i+S+x2{p=8dgOz{##)q8h(wrl z)X7KrmZVo24mUK|E`w6al|2?uHA%}Gu-Gkw&O;51uiv<0$Gf_sLAP|rgd>yVy#4Qq zeAPRS%12flkGHH2GYjVM%tE&}Jy4ctqcPZ2-QSTMJA2z60I#oYwiWTjQN8Hjw#5YO3KN21YwU=l0a8&ehY4^7%lHAx$9GVV z$Eq*LY^sKn&k~TA%`?t}eW+n$g8^^+^Qi$oJFvIC({Aj=H~-mu0gaSrUCRETdu4I8 z9SYx+w-d2bb&zwzrdtW_ePiiJ^ru5FB?4J4?0DYPuFTo>b;+v2hBN`%qE_6L>C*x}b9GQ$-eGt~U^+Voa06 zO^)NMPgXMaKvIBS0OM0$N++g{<97YW=w&OO5I4eN4DUr*rzfV&g8L9PVD?{KWe*C+ z{=nhB0qK#A!jVY_VgO%ey6UIF9arm3dDyHLanxLuAf2HRXa=ZP#;q`9fbE>ZXIw-_ zxls?x_ds{VbBsG}S&|ucp>~sE~9#}I51HH=PjeGiz0DVO3g-vZqJL$oV|6$>% zHa-|U0g2qxcce}B_7%KCC;cQA@agv*sE^kzq6^{(=g=qqiC~URrX8ewexzShpAP!y zr{G`JWpo=0U2zKA%zZvi006Kn7JsIaz1ulH0Yt?rpg(|ZiGJ!F<=14&-d^;h;>Ft++q9DxVK--caK@J(;-N7!!m{ zY&Y`F-Z(G&aNW}QfjWi0=`Zsg##lg)@dTR}sJqw@Z@(kA`pzQXQ>6bH5O#FTe1Om* zCzqExcfCWdi?KlCQw#)*UkaQbThY(D7nDEf@QG5u{zM(OBi>k4dUSx~X*Wp6H!bZ0 z5*Js(-5*9@p0J|*&^T1`;Z%kN$(G8@$zoTYdQx(qw(}ZeY|*dPJ!2^*s@QOWOnJdT zPQc}OTqKehvxct;`kx8R7vni#;6Dw6hGHlWw@U``M9_@-5^14h2wot!>`ahxAWs9c zmx6jxBT*=k{Id`THx7skG8VA0)a5DzbRDa447`#j@tNFhYnEN|u|DkM`)9O98ux8o zR?J->VpvYnr6XQ$2tTCQ81uPLrF$i2z62Yl{xF_p-XR*~=x>A%2*lp_A~ZZ6tNkpZ zi{6s8=@gFIc!cf^F&}p)owk^El(?DtN)o>K4?gJs*0_Q<&;noD^GS@w z87C7@*i8O(B3am9gG#h8w>V<1S81Me_g(GIyU)vSw)XrNzNp>UJ@P4fE2E9J%p8T< zj+N17d-Gf0(%$;!xB1kcGq5o7hT{tt<22H}-F>%f%oou1@F^3wn;Ki=U{>Ul#6e8c zx(U`@2aK=*$%Rjz-F@fn?fmANedV@jxxRg^ zUAc0l?Fj9%B!Ikak17=i$qMqOdrm#}0dw$d@g0QD&j6is8%sGb5ooCATS4IqyL@)I z6@k%?oyC{+x`U#pu7$4zFc0(P-JBm5Fayvfd5Rvz2{o$kz-I10`jL<8&GFt+-iFMh z`r6T1XewLI3r1rxD5Hw+py;o_ycX(t@eKJfRQ_j8m@q6HV&?T1B55y(cfF6ezWqr5o$aOlG->G1B@G5>NFaaDe_!tcAO0cBA7mfgCWO5En3CO` z3?auscwg|M4e5e1=8(o?@h^USm$Vqk5cf0K-_76*1E=Xnk8#(RF+@e&@HtJP2P+Q< z`p-h(4GlgM&^p`<=J@RRFCe@V_TXA@nl8pU!BO&0yCbli6=ZYeT3pk=G)Rx?KMYU0 za3A7W@hXD2(jfL2>A@m|_B-@FX@{6qR~Yu2aqdfylk^&c)4p@>W}dwK)Yf?}@`O)l zGVc~IVjNA>FA?60=P#*ianPnaSK24M;<@d|e)b32_M?B$cCP(l8*9wBu|8@?d+L82 zvEdRHW1Ld`ANR9WPFP>a6FQ0(r-28JKZf9X4h-rU;5tR$le)=I<-k4TO}~7pJ^*Dd zGJmKm_w-$S5&-gZKGKL9^@X8{aUOFBjCZ`5d8TpgNxT@iG-4dnkH@@DrhW;yJa4Hh zed_Mz%L#oY##8YEo~0mqIbDFXY#uM)A_+Lp!NH;WS@@mUB+vQ>IN_D`3Qnln-`i83 z;5+(hLd!-4^@T`VZ{@E_pNz}szwlh^Jkfvi$RNi38skx>yp#T6Xxtw;j+Z$Ob~MIS zUi5Nt^HRI*!X=H@*W2>is>Z$RUMpEyU6)*Cr?cbO03h1(6957_(AIe<(1u_L;<-AW znxTv*3>YG*qQp5Jw6p=j%Msral>ao$rO$@o)_^epJ^cZJH%S@bDFc=x$zb`{SJ)gSjIxSwXJh)^}=oPKdWtT zd#|mI&$S=;p6_kn^UwcTyY142wzGd|*>hyZrAwD&*nH1>r|ll>d*X#fVT_$OOb=y5 zGFf-{rwkMr2uYv3l{}jUz4XaVpNtYcqHez_#G*mdq#R(nIME_2Cm_R z!dcihOC>3K!o{lX6mE@9VZ1S+v3CsKm1#{fyr7D6erxQ3ge$pt@#5;*it(r~~tIJlA*Ibjdx(NIo=G)fO+!dY*u z;Y6`H_<>Di?Ku*Kp($=*6Pc>U8W$TYIO^6sEv?D!;C-z28Nn!~QM_TT}iOTiCk)9;*FfNDN zzJxO-%4r^Hl5}MO2Yl9T)M3hF13*laR7U^_z-y#o0nxfZr^1gjrf$W8k{m30fjHS6 z>G26Py4HjKu}Ok*iNgk07FWCp#^2#&BIJ146UMV6z9BztBO1-zKrsAPtLuq<0#03J zFU4ZHnlWD~hxG7S=}eE}kV7@BsNHgNxFy@)>j#b44Pgc`dmtSyt3mVxl!7d0sR;bD zBaR}My~=jO)k6dF3ff7FEwW$_KnG=b^MkIu0SxWX>j_K&-z)Nh;*>@_CWuMnV_7AC z7St_r>^vt>IEop6u)MHl9~K(|gi$~289Oqsx{}`a)~U)BztDpGSyc3i8v5ZUs4q-> zU?bwv_KAEu8LH7PyTxXzKH2s}ALUve)(PQJcgoIc0RveQ9(E%6nqG>5xz)<%Q0WSIT#ZwnjeaK=*pK|B}e4|FBHkWXg0u*OzozR)GLNEo$|Tzkw_6 z0;FMw2}^1W-g=}KgC40asF}J@8vw8;+W<1{)bDi+Vu4!EaIJR#)TG8q?WM+rF~X#xke7?!MGs@Pd2WJ@-DZ-EsGQ?dV|I z9((k|?TJSpYS$irvfbF-^(Gd+r^_iE>#F-}tBY+Up|R+NwJ=F`>EeYpUR!8e9PK@l zecq5VG>~9ols*GI?d|NhYdhDxk?H*T^Og-86nyG};6QEP&VDTH&=!hPhr3g+{PmMLr)tA-zK{VU^$Qz6YX`h}#7G-~f*+zU)gE1~$|+ z%UU!Vk9ehCfROf$x1%G!@P(7ld--TdGQEvisDw;|G}+eiBg z^>;J?BOPyCqIj2U`GoxW)P)yx@%2Kmf}K#X(!{3u|{-o}(|`o>*avhZpRq3;&XjeVg=G3TsvG;*liKe+^$8Z`3;YQN&;(0eSx0qSKP<|JM060q0&P5pPBN)vly*^U6h7CPNaZ(#-FJXD=#M<~ ztio%XfsU{6O^g-fb40i80RFLM>H=jbkNIJu42Heog*1_$+Eq3R;3q8)+Ev0N6WRmv zy1f_BmjvkNkU!`E2QpBJ=tfDS&n_7-uqZjA2ivRCm_4||_SIh5UV@gBCG|P=&j3H2 z;DSbQ1f8LOP?H{CUTqU)dxBVWVU{!5EcS+fCC&?(@_Y7`Zhz26;Dav9E$e0X=5h6R zQa82W4Bh4x9SMv}e@P#Fqf^DD9IzoD{h2%}Ym~#7P?2aG9vKIPUsGP{tK}6oKxur( zn`iE)dwf&-xE<_ow}agSHM0xt;>McBuxd{!&*SZAV?7?nm#V!9KH*Vm%u19q1mXdg zoAp$B?*PAdI5Jn8o)e6G|U z$-SH4l^*CbGvQbERb?5CaR_7=K+kuSZ`vg+j;7|`<2~}AU&kR^#<8*&6&b!I^k)Ad z9Y6^i=b@Fl?RJ>9JNp@?ME(Jwj&bq?KpeF2ysU93eGy2k8G~hF;-l1`|N9qNdHRBM71LgIt?j<9QYa!R09&D>o9_AW(YOEamvfqd+ z<2w6Cr78KvGyF3~5iS)3zs@iHx}FP|8>n^#@X0#jFU4kA5a&0!W`}kkgi#;qFVoj% z;hYmb*nH2Yx!62FJid*7qbGdj&nAbe#}YgfzQmv8hWeTCWbthx{I1e~u2>PH();ed z&|dbE7qvhBiLY)O=T_QpzxO@u&G$dh9)D`PUD-cr&%gW5_GzE~XS^w3fA6Th=}m8H zzww(7_yj@pv$3{m`=i{+WU7AqT3cRO^AWYs0AJ`5z&~(OV5fWh8Z@C_%OmS0{aL5M zn1sV^TiA-@xul!}!jAy>L9lF!jyrC<&^9%$p=R-N6mzVPKKgjuQ9P%op$eBHn?p7w zt1PB_J`32&0H71NW%`1-d>zox#Jj55izAm4JBB*3_D*~fEJBx4NmkmWz zT?=2PN10Xe4Dg|6f}E7+qCLUFme!m#@b&$-cs>L88~cf*H~l{abv8o&9G4W7jSRu* zyY`#_{e%vCG?{vy&*l=M7)A1*d0e2EkF!*odFb%X%NwXLU?}Gyvf>vY{ZN18!JhDZ zm8Wp_^cf_#sGGy`E05|$5=s(_u|v}_VDO0p+)#hrRwjMjbCnzQ)O{_$PLF5FtLl)z z^IL)_SA0lE;3#}& zVTk&Sj51{9Hg8Y0qgZ{N+;}4s#i?#z8*~yY&3F;(U@Oq`)J2 z>F>D{AzNRhy;r)$aa-l{!ZY;=)fXErj@r`EZky~p-ahG7FKs{Zvp>>yo_bF^+Iz4q zFYwtmwXJ*V$DE8^2KG)F^gsOE@x_(?MLatWP&(+Zc_Sd~KH|If9GjQIT_438&+dyW zp1vCW&j?FAqK#wvF$(%w{?UgPu;$2SNRuEZ&3KV{;>rGAj4|nJHs&qWk5gaG@#-TL z4v%-l2cJMjevAV-?Ul65=A9J7$$h*S$%}J*#td0t zqs-MpBQlsxf?oRL=U3O;xs6Tr?Y7#|Xw)_@oNw!!TjF_Bq^-A;>4Xgczw#Tx{c4!D zu0@C;6^*hAA}S3dD@Io#(qKyQPFqgV&b}&8UTf!gDSp0Qc8~_wH9T_(5o$10Q^*C zWiaGtGZYnF)xf+mk%}{8@tKNmXC~;5UDiFrbK4Dfs>rsJls%$^WCol<5YAEbBfN6d zL5t+7#B6M!LL+XdB|lg4b!H`spby8JoEvC{HgzQy9r32L>XzjUtVk^2o*XT-3mR3O zza+U#kK2w$T_ZW}bsxL2(l%B#SklM?x>yk9XmdJ0IMCof7B_qhpPpDK<5&>t6M0yq zr(&~!zVjdaqxP4+=&!a9J@`TIR3iVu9t#zsNjl`sfI2FAsf|(} z(XjD=Gh#q(P{)&US>Wi>>sq?^1di)L)(M5-gBC42g)u>)T=4JjB&z|1D+bD>?-l{X zE(1c2%*8QdPp(TeB9aF(hYr5QY`ywzbZkSqP+pmgXab*b$Y6*wos~~<=7WK^@Ufep zMXM$3P=Jgm-wnDT^2Ejt%5q}?{k(yRZf9Y2%PY_yd4On?sCm&*TV%nBc=;XkD>)WD z{Fped`b0f-{nH~y)3~8?ol+b%P_%UouW0az9*B-~3E&1m3GD##f?8rwrGGih^uE=%N9dYr6pWsb0(nOu6<}1DA%-dnuKOKCIsN~JQ zrDHZ^NTFVf}40Y?bmXHhi6(4*c8$El$GtxU=dVqfP z69X(#hJ5|D5cLcBi#~U-IDtj5_+wZuit*FhpvAwH~tL#w;*|#mw^w}ZN40gd(19~4!-0>{av^P}M zu{Hqg2%x$HTnNJc>7m)yGClkBC>>h)krq3<6_}^Y!y;4EAuI_WCBG)0UHOp#diMq~ zo<*#Eke;}>y%r*H(XH{xO9}@kqZ!+pe2yoUiB}@(H@1#nvp-NcM&!D!@Tk9Gr{yQ$ z3VX9$FgPa4%q;#wG>|unbg+3$AYRR)@skz0i8tER8#O6zNF6!?h&4V%BWLc>*@o`X z-}-p5-G1&|J2zTY*hyPge{lsADv0{z`e9+lJ>5%-Pk-?Jw1XYLIMy@uk2)E5RsSs0 zR8URQOORduinAUhur9633C2td&skpq=e&xG>=Gx8y;^TU_IyK$J?bcNeD6CV2G+4@C)-Y_G7t>=jv5)NNKmNTKJ zbtrs?dOj4HhsuAT_!IR@kJMgq0>HxZn*CG({Rw1<9-tF>gFXBv0nv|vuA;O0NQRQ{ zQKWZ0Q@U;fra<9#$+@6s-ixb&v_9rhFYss0AzCzma~VD0eYJbYRWx+Oo-aa1-y{~IV7 z(pDEZtLO>ai8hH129Sm6SqDt0n>W%RC+_uASn!7hN`^UJUs~jiiKt^mchIWO zWkG&)fgWf#jbAeKk=@yUMc2NIVnnUaqUdzr#?qYvUMKM-{0L@ON&+X~vn zK2?_ONm0Vb#wo@pa6mtxlZ(ptw~7JiP(0Zve|EsBdg^l>vgki+hZ^%A?oHYjpTASz zbX|3MN%~u|EY;srMW>Gq1wQ&50$pbVEH`ALAC&V9_*wFbhoFo8T-ZXNKLELgjExT) zB(E1-QqG_sER=%ZVbGE?;hr#)BCxy?r=jMPEK@<*oH7*;w4eQnE_yQV^pn-E6S^p0 z%upuvQ{mBeD~_1}r7wR z)RDy-93&eMCf%q9EWP@hvCu_%!hubdZfxry1?)e?EB3Bi=h2PF9kyi!Qm0an=}yy6 zKos>yX)CYw3$NL}oPsF7!lSITC;2tZn|rr!S%0eDF;1(xYx^V*WXrEJ9tSr*j&Pp< zKz`eV@Jgs`sPINNr6C@^XrKHUOz`2y^q=~OZ&+FmTmU?CPdK!3Z?6e0`RsufpmG@U za0GwE=~?6B;f5OW*|&j3X28jLylFP`B0ZaIr|odI6N~*q>k}{4A>$=^tVS-svjYp^ z)HP_1l+OT_OyEAV3~FtixBiaW9p_ft=YH0wwO76LWgZj!?63Y>d;KrHGv~h++I_d* z)&BCAeMvii{$jg+W4rzE>wctt=tJ*qtK%(?c}FW__fN4AMq}{F22Xg#&wCt-Ez0KL znR*!gMPw?uE8dHIW-@Gs^kFYxQ`9bDnG&Xv%@!lv>l*)bkR|E7Uk(j8w=gVLnqgQdHgpi?>`IX@jn~P zmp8O0cqnhc)?|F0QZ}1E)!O48RAWfE+ z;5HGQ@N8O9{o|5G6Rh@$}27Wv!{p8=j}A6NsSZKM75MzF3dP9VND4DB;u z{&`6M5IzE43`2Z*FQVabk@c8tKf^VC{;Rgt2!%%}(FSI#T)5sd}#phg9Zv z*T-=p8>?8@5HyB7LKfHpvf(phY*?S{9kz+m988dn@Ew6W#-ue5gXQtoKN}%eWy5R$ z7;kK}@!CdP-8k2lSI>Fg=6JGylMMg`RWAT=K##vtF+J$n1|Z&%QB8o5&W+|RD&+hI zfUbaf2Rsjz&K7pA0JBXpG`qGf%9yT`R3)(`b z{2iq8tO{FFk23o!6^=+(lpsfWd{LSOFIdTS`<@1Cd3D?_-f^*=Ut4!YJ)Z8#4{KnP z$EMBXbeQBHjaPK9Mon^N@n}Pi<;f?XY&Z4}+V1YG?d_kmjdeL|6~-H{Y-nD=L#rAv z_rk^kss=}Cdw~jB=y}8<5Kf+S(hoa|Sm5)3M!%KuvhXNU4hW}HN19utif0l@O_Tn` z1^_z=g&+sg)mtz)Tgv6TgtIB%p5ADWUA@-c^WMvT+f?S)zUdqPR{M-U`)AwlzxNN7 z=D@qX8OVCVjNzhXpKn^LK3HFhftFYRk=|t|rcaLA4%QQY%BBVs-TT`q)=RuG&14$K z>;dFsbCh-Lfe_`!M5&L=#by*1b`<3k2kUe8gzzWc9>v*%V?pU@ILQOg-mIZUh#hhj)t+VBL|vcbsFoN)0~MH zleo&DIsk8!TN6$+@YEytB%C8mqf-D)qEl>FXP@*j;i`LuW8WM#34j9{>J6yQ9*Pd7 z_5z9Qk~#rB@K|!?YI@jpcoez_`LIER$y#I@x`^-Xv9KE7u_+vs$q|Oo5%Sbu@`xgL zKuD*u@k30kh!iiZM_EqZjwYg+bkrXQ#$doK7_cq*QrQXh^<<`|R`LFRIO%P4pU8WKY%7b(R9+TUriaVz!OI_NYpa*q z5B}h5+e1%YX)n5ctF5iY#1wXo9+s9>o@E|%Re4>zc@p*-I)S9Hm6LoE6k5J-r~PqEY;T15|&BeEW)<{#G(8;MWE zAN65T*CgF#d19PKK?#cvqa#k~xJKff0zQ86h0njco!?w(Kl@uhFM983*FNx$wl-R6JJ+^N!)R@}9q#Y8AODX()BerB z{l50R=ib|1`r?mP+T-@vV~@3m9(lA~xw_*wlJ-Tzp4x?pI>*Ug2iOfNrb*|>WbCOX zzFFuh{Q=yXv+tFTU-mbARJ5+D*`BB+AN)}_V8y(GFXIa6#gxXGkDz)c>t$i_ ztb9~S%C7v}V>^}?&&2DWDAzyk6c{03h^)K;3qg_D)&jnS6Gjm_*vcF4E z&?}B4Q(2cVQMyM7o@ipWmu3e!tY>P=mb4^!GSR1 zu>tCJE+7Z7q$Q7vRY9g%La#+Y>sgeUt`17d0@5z_6S#$^Ao$U*6qYX7X(Tv&GyU+e zr-*YN86D4rb7LMN+IDQ373Rqy83Fh33*axz2NMhSh2*6F8@z(gcx@#rpABIG2E67Y z`^V0QE|@eEL>`4_diN8#vS2~GC`lnwQPK{Tb}AyK@->Gl0}n?pYNGH7eAD(x7v!P8 zMp0@VTn4q~9ej63lpe{PonFOZNv z`+Y@s@ov9EShf@MuMt7(6JAFQgMbU-83|MNM$d>%saWA8$-2Ku2t_h&(RV zQjF&+KgwYu0~AiAI9et>%K@SbfONh7ijN4V{o+c}qKtGQ_dqb8FP;0Oni1sY)T5;lMs10DE+P4vmia%liILU<&(~jY7dM*>0MwzEa*=9N^lX%3k@an z&`&JTmj0ql?qlp0J}JQEW%<=EL0>`XUR3B8{8m}?vjF@^$RzBivu)kSbL8p2S>4N& zK9tkAz1I)gnBaAM%2{#+2OD@zX{Wca4d{>h60%2belwr(A3lzJG=kNBh!%ftFa&76 zDW{&HmFL(XrR>C+?6RM*gE#@Ihd6bIYvQkQ8uK0CTp^#rp_cxL#va&*Kc_-I#ZjMP zqnHA)cjr&J(Vs(^j0?nX^%)!|e4b~JY)ImHJdL^6#M^Xs(9W+fw?F@x|7H7_7e3GY z{`@chTKnl=d1o7~F0{SFg?8WbU(o*YmwriG9j&(yJ^11Fqd)dz?dhiv@>(BBXr#G&y;*?-3eS4krJZ>RY{lTo7ZBlrTV>t#xdWD|A6?^i}f5v9JWizKrvYL zpUmrwPtp~i#A6@mb_nTTT#oBe`f0Tb@fh`VxWOZ!=dbh2N*n!I zXd!bC)tBjjEJ_0IN@-Gy4fCmAr>9%NfN!$!NLk|FIg39(abM3Q9MGBVJ8mUw z+Xu7<{rv_MpuaC&VbZ(7 zInVi;FD%|ZdX=rE3 zpEz=S3%C!(JfG~(3*NT5>Z7Us(_^$fycEjk)@ZkK9fbBm_RCzY+kf?mywO<2(3bmj z05xV)wR@a)IUZl|OJy7^wR`O| z8wc1_NgqkqDW8Tr0EN@d)bC*ah0kCjA5J*yPq30_*O;h&u>nBD$^Y^Bt#$QjkY8Jj zjQ}jdbHyIKQN_C9g{)~iJXE=}9e?lt;P8eQ_4zcrU)J^d8*6ne{%cI-<_P{7v$6@{ zC^jI7Ev_@+_9wC>M_yoDlbRWytt`u;vRu;f*=s}zF5{IA!6PqjbK2@O*I%K}(okHb z;l_ip0ib*&^6#K@40MiDL=By0 z83YQpMk{WJ2uNgS4Co_mK%(q0$N>}@iW@kgv(P*9g5Y_`y0@Dq(J1Obo+__O(k}r7q%U;nw z?=$~ITUa0Y`{n!lyGlH3*RNl1yF1%HUioCY+_v}k+G9^$ZfgtUwz+Y>J%0HSe=qZi zr*_)a$zr>R4=;x(CQ1fA5>+~ao|2=(^(z_+Y`V#2^=$~FFTvZQI1rQ?&WN2&Eb_C^ z&0qdoED_4bD!c&x1oHya~9>P63O_uO+&d+3n| zHF%68OVAy}GN8jPfPW_YsR?a2TmsTrUo`vDg<`GGnmFVe`c5aBz~OItd$LdY^)pVT zW1-9^Ie37r{Q7wVN_Jq~Sb&@ArSvM*`Ov9xOGd;YJLr+QSw1W{a}+d8*lth&^#glD zz~oO{t`E90F9A*#|6?8oJhiDH($PrsZAXr2 zM@Ew$zS2pW4)TEySz9(Pm)-rLOCahhdLhg~j;l|GU}wB7PTdYa67?0kkT%8VEwVw*R;0PWdCSb6M+GrBbwrj3abSE2{F~Oovg36o7xM!S9AwD3FbX+~B zD%zpgzhG6eQ@DPoyWmm&V&{)Sa%UiAQRz9H@UAU(=&^}{NiXT$#+h%V3Lks`zGge& z32TR`9B4g0aQ&-#=T1&S$RqA0sgyBkf*1ILKb`@KWr0&E;~6{iKR~+n-vI8@1fpAz z5vmE!75@K39V=?8o<%lx9vgSU##z{A1DZRw0_@jsy7NYiIzx!6XzO6me{fwx8!7`7 z{9qrt0Bp;m)Sp_|Kn5{MMgp?t0#l;2#2E&roL;D9vFx| z(g0Bzg(IIO-z0^utD^Og^mlx?Eu_6fZYq~LO0?vK&ar>e;D3S^>H)lJ_<)~bQdP2^ zsG|xF{EjTa_4e=}cF?j3;NE*KwRQQT*S+pvx95M-pK5!LzOQYl-(aH{uePT?^kL;W zX+QG|Ki9tZ`(E2V_GK@1JHRHkUA1|Sa5BIXPfJFv?H%l@UEKERLdxR$>G~ucV1uM# zy@CZd7C%S>%WYY_QC`AO-$y=%MgH zKQ)JEP6^l;ue7_)oo^RpU+Wq~otq-Wqo7*2scY!Xu8^yKp|`rv0XOwfc$eLO>Ps#g z09WKMyb33o^iOyu)}QF-0@z6xC*ZztL(e=AM^n}1Q314{K>37W91!#k%AkLtM?Q66 z(=@Hb2c5(RU{L}VYVAORZx+y^q`-sx$O~V^cR=!;{D# zGLlYMOS;JA2@GG0kGNvD06xZ$e0#xD_)C8^jvUeypnK>MuaUmLTE@`rmQ zD9w-tfM;}Vo$J2iMF4JOP^Tq9N#Mvdy50;Q{8~39qhs~UO+qS4`nWlLp|9Z^LS7U^ z?|LRA!V7QGi>>B88wS|`S9BfIpyJFKFC8Dc|2R9?gHVfWQ)7sWn`>jvm{i$bg=@~t^d<7C8n=nX33QF(o0w7J{s(XGC zp!L>)a2hvC;J(va@P|z?;fkCCzkGb^%eZqxl*7SaM5s-o9Qs)X>yh%6K5dXP++M+x zp20^S5}@9CtV1f*8O9vMlg)XB23pk<7Cz z7n~oSc}oH?f#^Ql>(o=X0j{2<59dcbDvV(3F*m~T$%mLb3f#e}m@LR1+sDJ;C)s}F(B$cO%oLxe=R zx|U4>=p%JXJYxa}Q;2yCn*I3*t{NMx>UJb%FOC=6r~g-<++OgUyW868xc$t}{8Ib* z`~R?Ay13q+x_a1N{Gu1PFZqfuQ{hYPo$q~j`>`MWk8OYddb{hM7i+8*Z`NV&eAAXP z4Bns#?dUOdVLMYh@7j>x_z(K@=(D|$QS?{HORXTUsOK?pMEdAW3dFZT_-K;wuZ$1y zuUi}A_WXOF>kR-);`88e&oAmceRaEi;Ngcvd-T5-RwOgt_)|GiM#`E2i}DS_I%VDx zioS#tM+f=LH^~jMgTP$13hzAro3L&2vL2K_;t3PtS0-CP z5|WpRW!77Q(`A|=DhG7|y?u3c5~y~*lV9MIcZi2G#N~A^xM#es^ZNV9fi4kZ1-NuY z4#);h$z>QReVkg0GTcNlg3t$XN{=3u5Bd0I4(nB@m*lVVejZ#-$s&tL6qZj;TzWh(f;*i4a>Y4RQlrcbo+dB zxH-)E-)hhGx92Ei6p%O7UV@OmNF&j}ts=vfG&-HHuxEdDY{ z#kdjsSQ)K39X5sSd2XEae9KE2U3V5Gd-VlqulU^OvYuB*=Niilbes0KpJ>zV%k9tp zSFdhA^kYBLwx9fMH8+p8lgYNsf1;L8ZSRrf>ADYI=*(-jw`FS`4|Dxb*MI2Gr>=T} zQ^Z#r3XP64TEaM?3Gbu6CuL+>0~2rJncgp_meN;X-G{kWex!>d{h4D#jtgxzIf(is z+hR_SehK;bJRYY2FyG0>3Ql!c8PNw)9UQHQ)upyN9{I}LDSC{2B12dBro4&p?`$8m ziC=)7G+v;h?kcwz)>#J{k5(m??7N{8uWuZCKHHye5>lUvE7_vE@ye);q)%S<Vv+l|2c^n&{fP zaNcx|H@Adu!!~;~ojtH{+KTL#}HA9dK&R83TF- z2UIAWG-R$5#i1C(BGn-a#jqq@x;S(SFrYX#B|6v}w>_tgEdE+0TVt7G7M$O_tee)l{SQ!wp0U!o65w^H2gIZ$oScWY}qpo^Z4i+iH zA-Ym3%z2a*dZE97c=N*qgq~R>bRIq7KKYht9N88-U0m^S3ZjwDf0QSoNRFj)s~cw} zo9G9C+DvA8ZeO@Zt95ulZZ;gO{(jJI}`=FW~Q?Q^8en zA)*?To^BNd@A+(=6Cyt37SE)MwAP(uMSABqXG)_B$(Kd8-}I6hxB(Nt*n$V3I8t#p z(tv1fZLRI?PTJo7)ZaDX^8gnvTxjbWAYQ%pbX!*a^>O(}vFPmuWaVQ~+usnEV>Qi6 z$6(YvsBFH~1aM@bbtVegZRverl%DXIWR5a=kQ^0XlW>(*mKS;Q2l%RVMHkHkZ?$vJvGiQ^*J){>vKn;PL}M^2 zg|qGv&kT-{2O1C{jwr4JbEPnMLOQ=v*YqRvka!B}RusD&o_)=FGptip_Aq||H5yBH z=ST77&4Fa;jHIo;#0BmlUgCW1!ZH9o>My}u(UL)xxRV8t@{lF^+6Ctgc&iM9B9OtN zl*y?ZoU}xv@9HUB*h~{R zuWY!{W^>0#X;0*r3CHL9gaYBD7;FtF+OZkziSmY=+VTjeuIcLfB(M(vmnEliM2BUe zILl5yswB6;RbfO1A+*woaw|CUcOd%gx1E}@2xQ`3wS!Kn=-{*xCZ4Fn40znpVbNAD zb39Ty^5h#J+xrJ?duLCbwTX3Cbq7Ee_-@O>`T!K@z5os*;{fuJKAv4cBTTRO1& z`+LT5X=}A@YS8`(G2jP)9hStp)h z??iz|w&5rXTHzV6>gS&40Y8|e8X}4OlK#@GB0mr!je#~X;3~Y7VhOal(qQC{jm>sWwlbb@vWs+v{0F*6py&l% zAWMkmzTb%s-B1w|_*Dlj|1K?I%3A`0SKSgGX-FIT8$!J$9O)vjO-uJx4tQM_6R5(| zhYJSEs(gi~fb_OY1ZMhl#U2qFX}Rh-%H&x%oKf<2@+iMfM8IFUSmBD|dts7}fhDk5 z3nAGb^#%Z(ANRV@ADXCa_5~k_Mi%+8Sw0282>`Vrz?%T57bk26l}}P$lSzBzN=#_r zg~w<|1v&nU_EYh6NFh!4q&5y`gGnH8SPlfS0G9?v80EzS_MZB3`ISaLWwd@UBLYGc zAz#cx@dSrICF9A#zQR&}oe=SV$y+Vpldh*b?40z* zt8m>p4JUlm=55P|{OC^d>*;uAnCGqXRQ_SO`6NE;6 z#zXR-Y@)()+o=E&4P zXE_O#qT=a?A@|}is6Cw@@H5wVG$eNC?|`~Sy{$kI9=9QPRAoI&0HHSQmGGc|1!R}i zF(21z7fiE$wrB84-<>_{hx{U5GE$Zy577sRcFf~K-P;7PKgHALQ=fRITyR)2(5fk@OjdAozu(wlkgID8;t7+?_m7M$$K)g4Tm5_kEFPzkl!gEugU9rClaM_8U zJa9^^aX3_PQ(S2W=novo7TQJM(AUPN|4?&;#fB*IWIK@kF`iG;9OJcUKNL>=mh5Q% z=*M;Zy6m{Rp2CC5SIX4?0H^cC_yxU%FN*riyb5L1OD*tYni?nO#S0nG!HJuwkqKh5 zVfG+0yI02N;?k%Y^wj#fCX?>x6 z?q_^*d(NGA`pDQf{Or%SUwzvL+tT_{+uLjH#V`4&_SawY8jTr_+pqr0o7>O)+)s;u z<97QU&(j#TKgqBBl}k(c)CPdCQLI$yiQ*(kUD-ShEFQ<3j`JG;m`?-X$McBD!Eur| zdGq;Xkq;Dcul8ShqE2p%#_hg)?{4QjaUO2~QirZz+if3u>NDh!nnC z!BDP42%3Spyz|LJRD#|f0>WN;n2!g*eQVl!AW!8h0UmFMT6weC;k%Z!2WXc+WRG=eYE2Sm7i zCS@6yE+uf~r#_n&+8o*m;{&i&S~J=e@_>hMd~3B?f+X5$b&qcAAv97m*O7B7hbM|mc0Pj$i1)N|jZ1OM6^FbvG&;as5 z4!rdYyL_9T`Abr9@(=u}r4uED75;+ZbCt>SkcPS+J{Ju01NB^K0iFkGtk9H;+RkWS zC4Y^Ts}EA`EfD=SKsa)I)}1_|F!IKaxGG=z(<6A|7vl%pkLoNZvNC77vYaPw8=@cS z`Xry}m++R<(n*F4<74fDc}o_yw-nCyiiLK#`$Rjw{#g60&;FG5+8_F%wteMyG?0J9 z8vxqTVHxZ(8GeVWSS=ePpCS0q<&0LVB$D<5-qmM?X& z(vb#+NGGnTFND>nbv<9M^vu_>xPLTJyhvdK01NZ7HqSA80{|E_HX@u&0BiuLqyL%H zBaL6$l)uPU?a`nJisX-UgqXRsNA^#-DLAF>|AQLK!?uk~4VG19Z>>GC%6TcznU6qGX5o zdC?U5SJ$weXh&I-TU9^k+_`h2O}IAKR1a5$g??0fVCh@G{;z$dbnskD{1lY&h0;Rd zXMZq~t}C? zNTCXey!De>dX`LMJ~(;OXgUiDkABDmFarHdA9q}3yfOqFV;pU*@Re~JZEni&WI(E{ zSDtvHz4Le8F2mecjxgNG0dMpix0wu(Z-QWuEVg^$UFFe1_V-0}@40%M6FM|4I4zav zw2D(D!H^c0*eN3;B#C~;qC7s5p2Y^`C9GJ*FZMqiRa{kVm4M34WUrnTj*;`mG36}L za=@$7^2TwLB=DbtUiQ$AzlBb27}G;{WWc%dH}Mc;-1-rw8Uz3MH`PN`GTt`Fxh}Lz z7q{BR=D24-zW0WD`Ew)QxQM+rDWWik;x%0HIl@VDOd zn;dZDiP!_T{;YCzxe8eH8_eu#q&zt+#d9Q!k9Nj^L!)ew9bB=I!>yGEU3Qe%0`k+} zxC+#%i-}TfO0+D4OU_SWkT0s8n)2iA4iy5Qwqf)hR~pAfoSqs?Y>aEk#*SXN<3s`M zO<16ZhKd0vwCcw(((t1iCHX)D&Rh5J1CK1oA_vMA8SsXnaL7@A6eiG}|9Qu$!swuT zkSxu)qoFit6?+0rX0zUWjf=s>IAwot*1PBU0Vaz$t`XGT^Am@9lrPW|HIpBB`7uz& zUihII*H@sNctqeHS64>*vHNdDotjxY*qydR32Y`in$8w=kFORwei9rz{|BT zWlr_)!3sM){YPW^_*1vR%M5qqm{NUZkOfUR)oR>DG!Q5?gmg(US92FNQ<4h-VmsJ z+G&#_uDc&;LA{mYMJqDqNcNHmG7I{JSG3A|Iu9~Lw(w$&=)Ppp(?S{I4>}wc`fwb4yG(w4#V^Yth5OanZ+9N0b=`so;Z|oP^XQM9Gd4>+{4Zr1Y>j&ssySm~L4&Ua z^?w%C=ZP{^r%9{)i?S7e>tEwB$AMFNc2&tA{JMjxve{Whe(JjMy7MFkm>eOF{QUGt z9^TyGJNPP2KlT506Pw%R7lgy(>wG>Iz7x^uk9hm4@`THINIR9(q1E`sIb~B0$aPB^ z?!8gibdjGl&>-CMfv0~OW9Ul9ufmj3{09%#t@#x1)1cLM$4`3VZU5D!<+iS|+luO} zPu4>Y0`!KnjCw5`x*$i?3n<2a<38`4HlO`RxB`)$xhfD{#GMmGj(kptF)g;lHTv9G zICy|zTHq`W?2ad{Su?T4^q-~#I-DWs@zpd+b`c;@dBTGXf^5-Z+5n|;!iXo_bx}WW z2-83MtTJW&gi{FI_EV0+U0xsvP^BV{9~#2TQ6Byk{~e3FDntM9vG$cX2ypOUamEPW zNu&KRH!7cL;h3Zp{`x7(wCjd@01AU}g`hu-DBD1vv_-g)idO7f&p|gq!bx^zSzt_L znu$>g(4`GHA3w`Y7iTg`%gXH)oWeJG!3W>?PwE3Gt(%~YjQEpG?QpkOrj2N&qmJ=O z5{gwBlz^X#@mbI?l${e=%HoM)_-DC5I`_l}AXnGXWFzLpf@glQA%?||Rh1!q8XtO8 znds2zC>MED`4P@M4O~oplq+)z3J%ic!lV_|@7O@AFx!{X(}+2<%3u*dS5EL-T2}ci z1jwd5mSs$4JEPAF4Qhkwqb`lrKV=?oXSavW3%6W$J^WGM;{hK^<6hNT@Lomr$-nA26w4P+k|kyfa+(QSd_fW z{`_P;3WqmPi9bYSGzdb^;DF^o$c^nPZ+&&Y%rg_tsBAHwT2bXKr| zID}@w2_nBYv+M5Ek3U9*-~sYP`iz(Z(hV_$jrDM={KGK)@I9`avSDS<;a12L`I3yQ zf1;hM<0(^7`P}8n#MH5sGAAM$XF{wr-H?;zsiS2Ra@-S_B&MqQ#m%ng5T z0zH~Y=fCm8ljCcizM(7GhVRPOwj&V zKEj9W-0!4)Tv};U^=X0L9uy<8>A(2Nd`CU1dLHE!R7S4kiE=^~&Xv#ed4^EW^f{3k zzo6gH3teCiOba&TIQ@b`!s7?ev~NzQm}u^~iyz^!hC^`R@r8H-Y1`%F%nki{Vwi*C zg_bTK!Ac{)$&WQBla2rMnhTuslB+0F-eumdQlHA72AJ|Wu4zo~focRfgib#iNFSWOM+IaX#-)mXeCxXX8Nu$!yuTXo< z=04U_z-EWeH4|I^M$I3_XoTr5+gTnwKPy3=X#B%l|BQiVe(_Q`mBixFANrj~8b5JT zG`6_1GH&D5^|rdYreZ^f%Oj0KnDbTNi8Vk8pNz%>OHsE4ybFoGRBR$u zJ)l7o>C|=i-y|pca{w|UKYbg*s3R(x2t=qn>honfPE}?+?lvHCupSKZiTdoGvoxIe zGy}i=KU$5q|KUM^9_Z^~6Y#)Vu-D8OcRTyhRO64J99g$FsE9Amc8tr-Aoj!f1<*Tk%<-GEm-6sQ7?R6)3~a=>5aeC zHg11$yZ-3g+s?HsS)LrHD!CVwB^SP%Pa{VMW_fkomdE4v@P{t9MYVaK`YE5$9-XxI zf~}Ev<*>_oZM4*`UAxw9?Ci=JMFRmXI3RF@?5L#3Qo4786Y`7t*4G2SwcttF;QJKV zqbLt{CM@hgGT2V@m=UroAFdUi*4I|r*2Y@9%2CdT)3&j(;kTEedu1e99msJgJ2LX; zC4^&!)3sw#XGhpLYR^~M2E6#qSNsvL02)2;4%WxcZ?hpzs(5zI~X7hRkI|h_1;$zJfYOEgF3 zM|p)KpE$PpST6W^ggCP0VaIF+Hy!6)TOt}YYAJV(+XzkfF9Udb))C^yRT z-7vrEkLxdXphsx;g01X@U^+sZ8|&@POXocyLq6&!lj(=vkvVPKyL)Pz&>b2GBIp{r zHXDa-LX1fZkoADNtH7b%*A+kp!GFbtKZ@t_LGU8Es354|N=M2kF`#48NI$-cH!oxe zkIhJS4b5dQK;RDf&6i(c0A=aHariUDTIrCh+tsXV@n%o(j7fDnj1#xp00H&OJ5a>~ z{&_=woOWiSjsoetF+emQ9_=e^Po2m~yKQStZtS4_!1sS|`y-$6dF{rB-_`!$-S299 zhX*eA>8G!@U;4#2wYNR+n{97zuibsubK2V0g|@xD-S(A+!OD#rd%`K5%KqJ^OUE7z z=s)aIHnvCI;GR!#c+(a(xfC{GxnldgRitO2a==_vp?t|%D`9h^47!UtcN z<7ll#8RWA270RUao*Ye^F_?y4xAq%qtFUpNh@&jpKPw0YQdiO*ao*TT_zdU||6u&a z7qlW1%JR;4ZYfXY&=1lu&6@$SBi^(|MvRro(jQH24yqamQvV6;_O7@zg?x^5NGYB( zmC_HP-@sepl`bwF2saq};GT}K)XLe@4X=dzWG4kPPrNA|-EnTST^eg3CO;It}dm^WZR7r#u?>2nTHJJ97fVG$U(X`e}_ zKSeZ9wlW055<(&YaVPs$86iP%SHZew+5B~xem?DQx+AP%8a&vok_L&HKgq$adfKud zv54Fwj5!GE0kQLXRc?-p^^^@3^dY4(1Pv4B1s4`%uzQc9w)^e6DK*W?pk zXdrH&Hbby4RJCtP_Rb@f)PVS-cef zz8@$YDy!t;QeY0d$vRHGsD4=3tN{AK4_6W}f zBREn9hQJw6;qV-|y_tbu)?=4Rd5#SLZ%=pNsAu?w_D(*@13;RN$Ar2^es%Bl%zOj4 zXzVHIc`>d|&Iz^!#vd_Wa1b=fm&JWwCO~#X?MZHf?86l<^lXq^C0*T!qT~l0CF>NO zfz2YtCnuGgQVSU}O#4$gZhw=6c{YbYF7nyF6R?ZQ zLh!A#_n9mQ!O-?@2E}{ej5Y+Q@_9bdxGd~d3{b~~uF{_Y(p7)iX+aS48qNrG^aPe3 zis-P{>Jz)^p&p6W&^G}gBl8Cc=Zt>4Zr$&pXw^%u-c)9xqes$LUpVTOaXOxKvJd+| z@+UTOQ2sroP~il6ejZ;7CFU$pmEU%KmA zpj0B^P#}IQEojdGg&I;(^W*n|F5MiYJf9Ul()^X~+3=phm-G+#el{q1)YWR19of!1 z^twUU8GwGGT@CwZKAEEIPAFPq(4n6t4G~VSbf&6E%w!COnXf1xMR>){UvCBIn0YN~ zAzh}Jhwd}EP86L2FTO-$;b+qFV?Lz2Ps1rZ^=zP>NEy`Cfc*p2fU_YuoJKq+bojgZ zv&5nPCs)6jj=!M(A#ds=<$%g`D9+jZSND3ZF)!s6egXB#;o#NP&qSmy36FjtedG|3 zY4J@eMM~?!20%Lk?izPTpFGM9==rYvoj*x%IQmiA#l`qUuE}vEN>y6Ruh$Fu#b)Zc z#vsg1v-n53rb$`7=#8CL8JO*nb1BUd*;od z`Aoel{S{RIr5NbtcN$LHL;pOV?q)!m;4xsBM||raV~1FvjP)v1vNm38ORGzsL)kyv zZLq^= zQ{h}8Z}bBkKjQ_n)iz$)l${>h-%PfjZnGPg+h6>G&uIVq_xxbnxqg2;-g~6A>5hEX zp)5yb9F1ItH&Baym#O-40$(U`2&M<;Nqhvw92)6@zqk*8uDbdKP!_mx_OcbOl32)D z06Nx$NZbE7Re}x9>OU(jzLK^)M5eyS;Xyl`>?(|-@el07nB!(4-wWNsL4M}gU98Gt z@t-&1qdzKJT~^-!-@Fhf10dTdJNq4`OZ4lW&#N={dANU|{yd)}5dO)MPg{p?=J&lO zs7enmUYb3e9JJ}7>bPV}_`bq;(U|!_)?rsiyhM%W0~VBz@}|>?D2eeq<(Lxu(rmFE zuyNe{=qEl`#V?=w_NS(mzhvr{wAlcl=lC3WoH{@}^Yc<;d;&mr{%UXorN2_J?(#~2 z3|fgh6_d%1GVcv_E$9v6G*kf@PqsvLQ#gY^uP0%^&jc>1W2i5LcI%cMurm<(tHCNb zS=h4@I=m&1TQGxiEn^DD(C{Bp4m$H@Y_WxcPSjzYO3`cpSd#IasKPF9+}6(B@jRtj zXoo82!+-F;_Cx>b-?VrA&hIGQR8C{HUAc0lZSOJ3f7H%jy4cQdZM5Bk-L}7X*bXL# zN*Bk_<2+bcR~59eSEZ(d?h3`u8U;`Zy|BPreSnP0-@Ro*TRfBl^2DRA23S`)t~lIq zuRk1t0i8bjS79^;v!htJ05Hy?!;$ZP>?!XC$rxL&=`+(vO=&E=Afob|hVrBnchbZ^9Y&mPfSj;! zF18KDMT1rY5$XkX#Pyg78w_0?2_L^JTC7gxVZiTGC-m%bJp8hA1WC7MZ8BU%aXVzDBI7`lfGkEFo1-QX<7&;J?tyTvJ{+$I}4l(|souZ7_ z`GYpfWdeO3g2tf9ay6a7FE(AYA>W0QvG37Kt2wS3~6UJ*4Et@n9?RtPvl^z3kNg4m8Et|{^|Vss4X2Iw4eC#|Il9Y z$)Dby`2G9aBM*N_x;bbQwGWR!_GJ6zH~o5h>)U=qWiGWByy&Au$5MOvvB%o=>sLiT zn^>Aptzck_O|TgX88g8Lz0e75mZR*&GRGF>2MMDi4X+Fyj}K)_@|C1R4)9v?MIT-m zMCPL1@&=XY0ajGrs^qb@y4=*eX)7`!e)4W1YwHVLLKAjC=UQcDeZyx{4*}~@$f?^N z=z{~9lTP8Zvz4wE)F=ZzKvTsLj;>5AvrdGIcW?NSKRK43rwa$x3-X$&AAlTqVN=5; zH;x6Nh z7kA@XPoV4KJMG;HvYcZLK23M=7xvuCG0Nwur>AJ)5_U_PZUX?ese67$`a>9Tv`;f> zGCpY$n|Il`QULzrh1dWfO!jZKcRiO3svWZY0Q`JM7x{X+PlJUsfXGvRy@z4`qD^7B z@Dd9Lq9=8JOn*!F7PpU=u6WGH^^Yl2#QlYLCKm&h~Zh-?C*`=!Y*8)9SJ7I^S2BxeH8@s-yb5O|J! z?uQTs9@h+O5$Mkre51NFh|0D&n)pelGAs<<7m4->L2nTqhgt(5^f!sn~ z5nj@z+>k$QsZuZPJ6s9lnpRMJ7Y#Q9;_3+b9qc_(-CAa@IqO>2LQ=s%=Wc5nPbMWmpUytzU zl9V3b0v(PY+IY&rbPa%D_i03Ua^t@M_1gM&|B-Z2?}mJZU95B>#b({-$s_~UuCk+j zs!z#q@(ah%Qc^9j*r!6b6%f$tv*^g@&e&3aAq4^9NEMwYD&vp)fmi|-V*IDrxX7OT zhNg(6X!pB~B+B&|FTwZRGZzL-)K(Jy)F$F{He+uzvU z{I>hs_y5};@TP5WpS$bM;8%ThPssap0%2Dtp*H3Lblpvd@>D-wIf7oUHmEhHw|$aN z>B2Ii9V5RNinve1M1G-DJ@cYcmdEGYkioglaeK*&UZlD$oY=EB9cYXqdLMoAskW=J z_C(?APnhWseSPW+rVzA*EkJkSwkqN_Ktz(Bg#%=-gr+s<##X5DRvA$}nC>1}$OwM_ zABHN|G!9^no-^?UUAlRHKkMkXb~1nQJ-iOX3(x^sK<}zXJ@Y@Xf-$f&$I7_xEX+$U zY{Yg)1=BNig1#v8Co8gTPCVtE1iH%Tm13Eukx809(wauu6DDdK5ZlZ(s`<(|M~g0O zB>E#ZB7P>Bb4<3%|6C;Fe3w^(0N zXLCvIhh^dl9)1R}*vN|v91^nOSD&BpX_46f;wW^-_*FPvw8yd<-^=JYd1YZ`!!J>+ z>;AE)A8!ZB$AaB7H^2dbHslj8t^vZEycXw4q5tB~1L`?q`{!H15dY1fFSozv*Hck< z$eOxV8vtzA`tjl*GW?^T{7LO?Z-1ceT-|f_RJDORXq5_yW#1Wn52yGgx>$Th7!+ct z=OKQ=L_k3hVw!D_;)(j|K0w-6ZP15I+hWdN`;<=Sk9r-~48x9fHEY$MkgUotxxVr{ zQD00y*4kD^u`$PW4V#nwS^~_s3&)aArVQFtRb`W$EIC%6K<()fb8*T$xz-M^KG^=s zmwaCPm;dYcw;NaA+D`T_`x5{Q$CEbV^rEA%A5LF$9@7UcHI7d{fsS@)s`?Q9%Fubx zRJyACrr+yxTp1tHx4}+uql58W^6H=JENQ$kN%g^FHPt2OQ@hT_o9Rsbq{Cg$SNanH zM|_tbdg$}0ZZGw16@3@*ast5Gc&srKC(s_Z*~wLDTxlh5Hc$-nxX)*$1?=tYx82?C zws&yoxor3M)DOm1cySCL&Wp5^gI<{vo$AL+vaGf8NmgE*V%-6IV$q((d#@D;KQC`B zs(g7Um&fPdSs&sw0B};rEE6@FGd3BFc)LG-tZ|LuOV`m#_%vSH7>}4A!ml04--tH~ zGT*{8yTt~80ndU|jw?=7iVlX3w-8iCHIxA;`V>x}0uukt6ZLQge_sD@ZUA7R1!2uY zg>}YX^bYtZ?mu?}KtR!+WuO&RE)*ZeqrAe&PT-(J;nRb*vT!BdfBA#& zZ?F5F?{5#h^({6wcJV&-)D`bA z0H9+pqjd%3h<8;|87eBE>4qi+C-nLt95IIt>5rmHj(j7V767*3z)%*$F=YJ?tiO_^bl9p+UmLU?XgFnYGcvz zCqM1e+wVUS)ay0B+p4)~;{!nE+^6YDb)#EIPZvDg3Dr5A3;OX9Kzo z5TX07lS>+sFrkt*Xugq)zQRF_`a_^D zfV<;E>A(y9@K{v+DKM;hpNxFS-nHSz4m(TQRLuNKz|$WuS!vF)%0=qF>; z%eD{`K)yF*xN=0g0hs=<8^FmlfN?tU1AgAHGdNh$f%u9whYuSyxU55b3sg$Yb#AJgOH=Wkay*SK71fIPrKR1AGk#!$LWF zj0o#VFuansdnQ@8vJi9^KZpYzvRR4r?3AQ)*@1P>FT%JhD0+h4$Zva4u@!epZrFmJ zBPiw0F5INE033vQ@a_p*{j5~sPC5)~>lPPcQVyG9qaYnfzWqC1TlEP5K7NgIHSmNc zY;8I_Z2PnQc5%GgW;@&Mhkxk%+oyc)f88E>$8Ttm_LMuh8cDRvk3HUg?&sdve(l$O zUADT|p8tXuNEf5_q+6ME5MNnTX6LhG0m2EFTdlvP>J(D~UK*M4+>7#x3!7_gU4x!6b~{x%4d9m8=%l_T zw8Uh)>VISvgwH~@E*l>vrrggE)FNdk>jc{kx>CMAP5~iwKRM2L&|Rb%`q`?#@D1sI3QzLZ7I(8O?0bNBKnvkLJqcAg1>>cwy53Zh*JUW#_| z`K@Q}@slP5Iu&kONt^Z~e9oWr^?VhSoND5wAj&GJxFkV0=?5}P(F=B;Xrs=&B&l#& zxO|J_M?7+1>0j0L@&{boN5{aFmi!2!{p@?21I zG9=;DZ;DLz4CXcM0BVS`PILnak)BY|B6{GHUoSt(h;rO!3XDrZVGWf|kh+Nw;vy{& zqNubH*6~|>B>1xurw`$+ORz|OHB#?0036hIGcP<<=32#GxKOF62s z3T7C`P?lr>%P|2C+o$QP@JL78oPM%C6_gE}g~BVjLXTv{ zxX}}uLvR_9+L*xSFwtvTivEPb27o9#&7hXYRPrFi?AB8kJXN$2EK&X&Ha$n&U3VWf9Tu&hk6?NWCDDclq&uY9$yroH%3y#Ho< zfS#RJa87Ca7hZTQx?>CkUyQmmP7@mXnLe^wx`s#QKbB>q zR3rKYP)c3F&W(ee(ddDC5F|%_f&CSHsISIYgnJgaS#aXTvLjxglYMwBDO`Mac|00R z_sgD}0WZM!k%i3pYhF5Vh5Qy*LT~Wq@=kd3lDNtvp1hj?+CpZ`O)jh7QYY_17eCU+ ziyld<*z=;iz&e&J!0bGdf!j-mL|F9gc7#Kr+5S~O0lomSB6$XWaw22;1&q6~%LUax z7P8rJg#Rf%j-*FUt*m;Bd^md<4wSxD6ed0TEn&rxhRs9N_tAK@t&O+Z%IaEcWBId{ zb<@P$PCMr1F?{qCJ&+4eQx_zixUaKc=MSY+&hoM~r8p5%kXvG*as zD`9m7(D(2e=>qe43o1P1b~@e7!U`(Rg7Y1kiL0LR<;oT(5+du9cJbmx>+LUp;TN{| zJ^cRmGe7xD#*9kQpR2QeVqGY~!p-Tji<=Ir(1J4wZUxn^7$ShD^mifxxx)u(uo+p8 zOhi<%>5ul?5Pq3-*3)8~mRaLa$B`=iH|@UT1@Kq#(O<%Tge1qr(8ziWw$_~hpkiGg z@k#i4jV}o2WLangY%40Ch2ND$)*~kNhaC6M27oXB>tERZr|Dp~j}c;{$jy&Cr_Tc(;9s=d_?JeAr&NUXj@d!as&hr@z7X+t)@ul8I_P$p_kM z9HBei=UArv4?lc4&!neb)AyK7_I(P#jKzQbyfCk_sHc9RnAFO{qPyiLnXO7r%PaE5 zs{cpriuGJ|dwE55*r2}IOma~g91{HQ?;f<>-QBi#u|C0^!X(tMU4o)gSj>w4hT`8Uo0KbwO0AN-*u_?+6+oG(5G&V6E6K9dI z452zQy`gj&jRR%h6lkFYG-gh6tAv>@h#OpU05>e}X3!73lQCVXdu^fbz&tZ#ehk8p z*BpSlvI6t*H-(}-Xm(?*%I5}F@2@@WLg!##YtGU7Ph0qGrpqiq{NbhHoyAyt{dOLey)%IQpI(Lj^dMk}Q- zfFs`Jkl7s%$w6s6Aix=BCxiufAndTW4b$ph(pynUzmO@v%3czmDo|rC?+gY0$S1zM zec=~;di&aM{)Z~>db{@E2POZBk5vv_`NpGgt)xSyqAsp(w$aw@?dnrk+on1>r zPuZew=X2Htj*Wrmjm>d8*3I+pxxIbDD__yx@y_3CSDwCRhsd{?aa=6I(Ii4cEM~@$ z<{Yuc_gCnE8ko?7e%UAh-8@4&c3ia5Ap+RB1%O>H+WD<#|B@H;%|&4ff*1S@c-aht z;{+@|8V>R4`K}ZT>AVP)_8fs~Y-vzV4lQ5gMtS~jHTZ;s#yeyVAJIvr;Wp6l5qG?0 zTJV2mgXD<;PvrOD#GM4?XTnA4dGo0zEujC{&PY+RMW-E1*qG+DjUISbTyfL%r%jZ8SLi*Nw{=Hq-wr_TS5Iuj=v9M~_*zSwBPu|dd;iDc|B@Zqze z6F;bqGXpcl*>2N@Iv75x>`r!U_DUNAK#QF5rLZ#)s{vbpC)8lpXmfrEmm2QD3MJMu+aWCXlUgkk6-!=&_qP z$qg)p(>j-6C@=J1?K1HQIQRgs2g8f<#UP}8RA&z!R@oW4#746l-eh7w%1%@G5-kUZ z``($psX@f#`cC_o|Jy%pf8oo&wmtH$-)z^Oxa>~u%G$VHzI?g;ga)g>{^tALR^0Qv zdsW7$UA=lW+XVSabv!w#=x`GLZ%PLr8f7E+F~W{i5ByzY%TcltJ-#9r__ll$D;<;0 zg9U6%c(Uq9*}=B~+%h6M*3_qfN~mc4R`fhlmi5)8wyL@_Qhq)Yu;`uj(yhuB6>hib z->`6m7%gidsQX$_GJfcE8%7+D;^DY&uY#vY&-A{-@P7(oucG)z=XW^ z0isC(wj;3klS$&pSMf|6HdPt);{fVkiGXZCiqmoc^09m723G(E7O+&h?s;N@B?cnC z_vDK}0^ztqe}g*fqw5NMh#%@Q&-8EDkm+_*as2Ex#Wxc}fg^e_eBT|}p3wf=b`@xz zi`oD}I$h`nDm@UEEJDVSzS@8eEbF9SP>T%kihe!mNIsH$q4HqFTMfb zzHp&4Pk<_o`|}a%ehF#t(@glf9*YhJry31JvJ?6*fyZo<#<*ic3v_VBqUe*k&YG8? z!*L4k^x`;doO&U=wj*6dgwtd?!+_6)lNzuCL3cs)qcUHox3m-K!)X@|?RQ*W;x!*i z0!7xVZftMRsC-JHEA}T{E$u12!c|s^Bho7t{|z|3z@OlOAKiBoeZt}XfN)y`)Cp%x z{uEet6*e98M;gTs$^`tDWa`2=#gpklnc~g$9UQ_%f5YkEgE|G@$U$*X7G+2HS;#nv zOj)|L1v~>?Sf{`2FA7M3J#}f5#(6Lf736+ww*er0OfB$rIAq5~ONANE=%4Why4`_$ z+d494vM(Ug4|$pg`Zx6Fsmef<3H{iS=tA~q^7mns+l_Ic#WvX2!}4v>rbN1UtJk-r z&_DD8|In`d=0SD04&LU0Gc1ms?2(Q!2X0UYz=>VUwFlnJT=7vI+ z^ALfi#R;nZ<)B*tcBdMYUvwA$-4%NPm-H79;Zaw(>2Ux(o`9Z5`7`KYA1CJx4v0n9Zi=&0X-8s!8%u4>{B2)?o`ml1SCz8;_By76)3H3(;&T!zem z1wLJa%og@S{K;EkGcEyT@+%s-4xhVE;3U)=o?U!4q$Ve6GET{Fh&$Tv;WM=Kb+xmr zqzhaG*^~to7cZ$qetf#Sc#=0hm@xE>4iJU$s!vXS!m$g_he>ZU^+`Elg0i3sJ(fPJ zJ)-S)IkRa0Ota;m<&&HCU5ULQx2S%C`sb-XK!yo#CR??)#WkTf14fE6qe0u89ZM7iWrp9jab99AqN2^Tw zOWwR$`^uNTtgVliHO9W~)Ad+zJUp1TM=n3n4utan+|+%guku+4WIB!d3VGC5-pb>1 z3{0f+!VKLg)EqXej7KrgRU7r$tmUqraJVlld>->DlXc`e89cBtc5zv{){u$vOcGK> ze&{V_h|XcVe@5OBkG&H|-z4Tp!9m&>>s6izi+uHLz$XaYwhF4goP|W3{aAEoSlAbD zDqGRfUJ*_ivVSpEeH-&k-B%i!g2tlo9B)p#t`r?ZpCy3Suoc~dXBKso?;JCS;yGg* zLxitnV@wTTF&9{tz0&Tn0Kcrk3Ma#9xZ0j`_ucLJFL;g@Dp_!2zVo(=x3_a!8*TlZ ze&aPS?n-^K`B>=TvvQD=B&nRS7hb3nGIwI$wsUS*s|-DG?P9VNGUh;|zKyS|A5 zJ!BzZm-RP@J@T?zu;PVcP!bKlqMz$Gvy9&-t7$vS`#C*?+SJ>Cc9WPmePP z^1mhL^Xe@de^!1=PKBWIQKrHO79DOwMU&f3{qQ+HWwI+L{Vu);+FNcQR$7Ope+-`o zWH^vM9TA4F)OC^h58-kDNPS{mmlox7R>n%Bg!GX_S9}6MJj#A~(|o}j9Fcg?N!}H; z*-J}mn~x9Gjw$YFr%i4=++Oolf2IBR|KxvdyHCBfO?Ez{{=a;7J8a9;{pq^$!&T_N z=%OFzaW}N_7@>lcGvSJdw%{+qx%V?V1@X@&mA1>st+)g*-g}+nzlOb9tudu6a@V&WV)huVR^Lb z_V;*khbGA~^m^Aa3+Tavd6TlROZpeg(;x01x4nH%0@#Br=VP-FSMpevDq}L)7hTae zV!@q02&b{eDZucx;C6$1HYJe{ZCZ8$=2j2+WQP0z{X*)O=OA+21#RcHw%Vmj7yQDP zkNM}6C;5>$$w2heR;rSLgT4zJ0NAYNewbt-8O8|!D*Dx(Vy!j+Vyuq@jBEg^5TVEf z8w-+Dp;VxZ8>T7dhS|CSL-ZixUeMhsEDWURqeHEWD}z$yH!M5}F6c2N6cb3$5Q^gJ zKF|Lv8vq2zN<1u3@$rbl?*XCsi(PVbmQ@+lyJcedc)F*K%%)EOSQ@RjiHz=vhaPIb z{?otEUi*FjwjE3+?YXz#*{)x|-X4DJ@wR)g-!{)}xns}7w=l%ck6BD6`%O~Oh-@(k zjNb!384Q#7R8$XG1SH7dh%%Zs9F4AZ0zePpc;XGcNMAB_tPyb&ozT(;WdlyC{7ZDeg@c7~ zoCHZ<{mCEKzU`a7x_#zf`b(`HUTxctKdO93{+2bBeP5Ls*{nc!+7Gf_8f~_@GmEVs}7oIlsz_6OJ6%kNq11&50+=Mfl>9Wzhx48IZUK@?slaeLuJxDNJ@M3=&a=f1mdYp;0ON43W;U-kkK z$N#UbZM1_aZ#ii|lX|kwMUUSUN6*Msg%ZcT<()fGx#t)8&{^oswl5NVGz|Vib`|`@ z#88@^$u&Nc2+yf&__MuHW`NmOT>*cbbRe6`dW0Pj&M#yldX%0M0E`Q|jUOZ-TgRK` zu=AiB!jv8yJRfhE4<9il9MynE8M@jRLKn$Fm>m%zFU@t|nHKcrsGd+C9TstszvmY~ zuYlu#ig*2td;#@b`N;>qIzq9^K^6hvj5?k=Lm*xLv`ad{E;?!C!R%m%$Wej$EDEFt z8NHzfQV01gfL|0Yz&9Zh)1V-W$MT4H(}SMW@hQ7A?Pn&s0!lu?N7T8J2Uiwk6{_dp zqj;``lj!6FQ5J58qY1IiI4J=i#eHb?XvWb$X~YSx3-agS=Khd{{Kb)X6WT92tmF~9 z;zNKck@};n-=wDwC_Lg-Ua%GbUCBnzBT$KuYt$jj7j}>d^0M!!^by?KTc5wcxVr&W zq_27w>8ZzkS?KTdBn25_S0a2UCpj`4onQrh+Q1u7&*jg%dLx)m8xWXcf&tsW2EZTr z_~GB^IMM!A{UJPLDBUH>n7|Hw_VfyW*4KifAxO-AOPYW8tzZp6qkJYZNT_sNEywV; zp#$R3UDyF-IbF6twm{P$Bn&&dGnWGBx=eFJFPT3u}iSD$L%@vYy` z{@OQxdwc9XzulgC^s?=d*XSR6?1}ahuYW^(^IP7g{HyJ`_dVZ7lWnW*Vn;1@NGFwY z`K-lMolGxOr`!NGULhyiOTz81(gz|v^rrt#?*(x1F-Zsx*jWr2#2d&_rz0-($vyZH z6J>CZUm!Dnmy%e=<`XznW z`3AlHw#qB<0hCp5(PAM1q5=ETm2wwq!CNUp_JP+n>hJ+wqOtk|70v<(X$u%|)}8;R ztSb%AmS^IRlZEE7~U(2guaH|_5H^Pr~Qi(1VZTcrRYiCQDNDg$4`|PSgaHL z0$4DsjVjPe{*ra+7l3x+k8=Xqqbq(JNPk1ol9O_VcdppbUTSD?R$KS19zzCX$(AS@UtVc^4e{mhA zH$4G8o<5(U@C-ho0(AbR$ee@17d!(@y6RqhA#dVCRndOAPiKFxyd+CwGW{u#f&NTK zZGaQFjDgMdGJ=lcE9;-)+@Bcu>43t28fwFn>4vwmc=}O;_q{7D>lFM|?=u3gq9y`l8nr8i@Pbj2ih4?VF6z)&5 zV9+$qG!ODu8!LhFw(+F>mCyR<_Tm@b+cq{f+K;{AjqR8$JEbBTFH@G9JWK*?}@wA?=atuZOUG^yadmLv|MW+4QDm=D~=^ zjv}x6`%LgDt>+LF4t{>bQ}3CKUmZy}LeDJRGe6CJEw;oMH_9x&d1mpW;#SvI zU1wI5&wOeSNEs}WNN^%ZvN$>jeICi*GXV^*-Mo#S36rk-jS;jHO5^&c`>7bxSQ(zM z)nmTTEc7!r@qm|&@z@Nu0HohzkqWx`^ep(0J&V88S@JWE^0DH=(F5s9e>~~F+CSPa zp1XOfj2Bp!)FUCr`?!KS9DO1a>J`4-!2&(ww}t>Om7w#nVT0I?{X232pcx-ac?xs? zR#1zi;6cRZTeNUrYYpBsC0WMF2$9z0!=InC4BLD__`;G)#RvY5X4g$U*NYd(cc$m# z{^iE_L5Hq`Fw?1@K|I+H2pBst&d1(XR>z+II6jij?KNNb&8C!o8EXPE5SP6mKTVmu+)$LyLr^06EQ4O(0VS7SKF;WhVe(k+0BN*YXdHk+2IOQb^8}MSlt#EOgm_p?_ria{cVp14pl_h*2FY3S7|GFxm9h>(oWK^cFXMs^9i(B*{?(fDj!;y zOJ&}Zlhn6w?AotkC)EEsdBk#19_X3nCLY!X$;&5*m=6uRiVeWDQ_)^ZChCL`HyVw7 zg8OtD^QLSuV*Zdg=IP`qOxxDxdb{n?MV|z)B-;Vc%6Qc$t57b$Iv8c|@9&%M^%4BD zNkn*-eF6X*0NSfdxikc*(AjaiRl&;fy7TNC7s4+DR7Ma5f5f0NidbitQf>xCS2bQ` zh_QoBPz|NX4GJ^9gE>C*465G1=kT6^;5R0J1tpik27=SZANLj3@mOhW^#3P$e*(4Z zcGY#F>+8GSp8n*VljP(iCoM@J2C%RI5yXhnD_jK?gpyLavPJQZK{=pGMXh^F8N*>y zL(A(GhctqEK@pKg2*e9PA%@U~^f}#bzP`P`-+!+4yzlqzeNKW}_tqG9?!CU%&o$>< zbIsK}>v>+<(sw~Zx0Zs^(M%w;L?sv6D2fN;2it9VeXCu%=b?NF;FvN`z3+YP9dG-O z?OVU)TiWjKc6;fIU)+v%ciS($``vAa4$r4)P>vWJ+LZ<`0PG&@(=2jEX|n+(y%uTG z!BAx)%bWUhd4uj{`X_HUlWv}$V61}@*a?ee{3bqux=G{!W@x*ZLirq|jHPyPq*9~2 zkmCga+ZV4cb&+H0QFa>m$&e;mt41kDfyk2jqYJ@d`N%Z`*}Bz_-mrFKi5PHxpt@e)-oks4E_DM%?>Nks|7dq&Z`ed`qO?nO7c2lxQF}?bt zFN6(3n{*fbw1RAET#(iK(X{b^Qb}lwaxg)^XD+lX_*1Uev7B*L!+2=#d7){s9zcju zd7kz@@bgZN&%C)B8&PyS77e?Q6VJ&}9blI2qb?IV+2rwudbU@W8=R0Ra^vk3d zGo7jMkVnZVl1sVDuROCWoI0gwJzxIeEgNTPC>iH>!9{XbdI2}JS#bl2PPox<9IKw< z_F=m5LBHU|m~nwhUegXJjf>0At;NQIL;GF}3&%0G-VLO@X;-N`zt{{OXz=rFj=$Ig zZRi46f_6D(Kb@XWan?lP`2Rz^|Bm1CsVL%AqeAso21ABooZmo!ry z$9WTh`Rdlrt=v4VakdhBZoMNfup#et=sUW#-M;lNepCB`|Ng7nyMOv8+l^;Fl+RV| z9qzUFz3)TqU;Oj$Yd`t6w^4tsJ@K+vw98lTsoy9)VI9F*Q-wB?aK0Ah29-LIX%66OeJ1T$n@PWK zRvGU6sI$V*B5LPvJrFBMIpuEH;@Y>+P!RnJkL1fQ_>cWy3FW*DS?SJ9_|@y!7jR(f zLSM!Ww8qwe%`w#vdW7LOM%UgAK1P!BG37F!8mHo{uJIZhF}%R2Z@-4N zet?qxu)-LZC7z;5p7kR=UD01U*a?}^d&LfuyzIZU4G#%}4;khY^ix~q7gS`(X5bM9 z?*-anc~#>I!t-2tK|Cb~(kBFipBdyWgASG|S&EPP^j$t-ZKlQ&sJvNQQy4t5IpV#` zyAG%wb%VbykP4l7D_sC_OgaEn=XhiPSMkCV{FWc+rm`cUHtG^%p89@{hdzF6r$BpT zT>UVv91QevGyfA1z22-%A(id~%GP>(@LTj}uQXSGzW>j0GIVIvSHBal`q?jAmo)1- zLJHhlTBPdwlS${na@b_(m@d_HotXAek&*Ly(N$xh%BBo`Fl=1Xl=0Mmg7~OUwx6U< zPWC7Iu5C;pXj`^nTb%>nFin1^O}y!j;GFY=%<@%K{Ywqn3oRt3pB7nn^#+SIOATB= z#!HuB?~4Jk88&m5{9$_q<)c+B5ft46#-~NKr`EA#;*oik4W0J7{X6WnOo4w9{mb|t zZ9%*2gsw5xqZeYc{ESxDx`Wq@o8YK6r*n=D+0O*vs~9O5=P%WZfa7H@e}Wm_XpwG}Eb}i+7&C6#hGL48HY8IBm)I98Z;J=sb@3`tpJJ`*GsR0z zwU+B)GBQ>S%RlYeWF8nJ7W*&t#hZRV&&!Om!d2rt#Q%kTc&_B(&q=eEE2m;dkW?Qj3Fwt4N*)LU3ysdFD_&it(&1GNUO`E;n; zuxElikA1*t*7xdy7Z|jaork%wW5Kn3!h-m%4{y#Xqm7|))jua~1OK6Wx7uSbda%9c zJ@08-n|{8V{yAjLM+=AGqvf`~xz^S;>+{^fK!2~ox1VtL)8cFF1$}|)(xt6D$XeT2 zZyRhP*49_s^2%wu|Ni^iV~@N%pW*d$^8W1FCis?D-G5oP9$>Go`#Ev!HhYgVeC3CL zAsA<0We%9F!-cUfeSS~}N)OJ1@%ePa^ZteOaiOQnFiJd?6`rrWaq{|aV4tsZHyC(& zJBibmS+~!(QRy>RX|8Y21L@j+FVZ_WM%qPC&ur8Bx-{3ryF2x}ugjj{krBN^L$rnJ zp5w_wnw4|7&l>TOToFwGY< z7>gOND6u+=y0N4#8zXI=>1JIr;;yH{rl-Q)4q4y*U)XU>E0Zd(h6mU4f$KiS8$)3~ z5R`4|j5TPj2T1D!P5OmG+ih9|N^$CpOXSir&S~Yrux6>~@x0A>-@M{|oO=v>gKqa{ z_CeYOzvTUM{jGttt~1 zuYFOzyCJ`xtSxSNxsv$|9+%hFu@_gU?V-a)J3$wAZ{BWejQM}?kG{Qq)t~sY)U_SI zNU_+wAn2AVXRgqA=*5xV=75sQ0BbA7i#lI@^wGnp|IyYP<;(ZM@c`#0>Gw@%;nlBt zd3))jkF;<8mj7#e>}4-$zvYuYrM>H2?~P?sT&l1l)^|vrCt(xfN(JCk;pDcj@T>D|kMF zuZ~8qkj(Kq?fX@+dTCT!GDyn11M_qUV!wxZ#i%OQ~qSBogD4u#ggTd zy>@u(eeI9`@&B&<=|BG$+upN3(GG9?Ds8f7XkHQk$>DPJ4%A)W3kIpC)z~Sn28^=V z+oiC)J4S2#1sqHAN7?CIoiZ6uC$8Jg!!DBSIL)dEo31qy?__U!o$)iF)eiEa&LO-W zu!cH1=syE+<_mO;ss6IA^^UQ@c)?d+K$pK7_e)F2GcPzRe=-oP7k2!a56^+Mwf58a z;@STGVcXu`X6o=u14V~(yKeNa{dJVz`ajB96n$0`S3DpHos)# z*AINYM>F#m0D$yFF`DkB#9%6;)IL@qf`>tNuAO!cN-;ia=mQ|&K$#X8b6~!lON*V6 z;wXirOdHSwhFcF^q$|W1AAp;W@D?6~=c_lM*B|_M!&AS(Jn9yd%trZwQO3L}oVIWo zG(Lo7;Q$?3ETVilaHA7_=y-IzlLyR~u0PmTE?v!p@}XaUZ+ri*f2e)qpZMeL*0ayF z7r*$$ZHtBI&;8;rL`hfIQ3O!e4Ha*yc!0dxZf@UZ;Lsw9UrXOQcpe*?d?^5Balp9f zDMfYjLP-~~&_8&X-Kc})v0xx856niH_zXVq`8dG1hupcHa$K~z;8tlbPVi2+8iN=? z;*o7+??8f{QihZn?MQ&h>1uSRZe|xa(bp!<_H+!;LmXtL$OQQ43`7LG9#(5(DDfKTTDbwF11 zrLL24@}XV)>O1w33YF1043dcSoxdKr_ey*4-g}rNmfOmzI$5`uq6?B^JwO_KkQq9& zsOt;Dt{?WPkRB(!;u^JjkWy*ohaYL>Zx&b7*J;f3Q%2%!y3nSMyyby%*MR_G+pu8a zrHr@62&PWhFB=otEP0b#8LJn?i$IeZANkE4(3h>l&qP-JAzkfcov?jZWzHNoaH2lS zb1fKcx9B-LcCqKgZ$D9oa<)J0kTg`^%Cqb!vkI5qQcfKvDE|Bgs`=2J11a04Qp9J&VczHuaaH;1i!tugKcEdSv}K(NS2^bormNj^xI^o&v4lHa0X;GEJU3$w?DVd#A2S9s8Anr$e%fw$N9AWNU|5qb8B z-i58SpE537c;u7a$*3FXqhH5GDUt%U5d_i~H4c2JZ=W0Y?dk*c(@%od29go1-3L!@ z=Bd*Vl-rDh#>hoJDy)7L50oG+yxP9~l7lbu`h^m0W%#!r=;ulICp{>b4tS}Wda9&! z7zRdm3IvMb;OU|fQdb!D@a?M5*{jZn2YmxZG8y?R4VIVRiRZa6qK}LxnA5*RyFYYR zy31ZL=ftPqQ@&*CflbMZd7a{vPTyUG^bvGV4_bh*(ZfSN!=Hc9QV(NCKbQVG&V$D= zxzfWDw)2KFpK3mUD_V!pXLw6RPx}GXO1pjgcH7(E4GtHz8!Y^+r#-jde0k*H=63tb z-|}bM7yS?4(0=77f28f*ewGC%GQsvg_0)6i-~7-Iw;%bDA8mWP2knW+pUlSg)~(x_ z^LD)n>mdr@g%)kGV6Qo zbZI)PPo5i4=|J|GH%N;<_dz-84@lYQUzc0-iL#`b7v>`8f>BnG?g3R+GgEe5`DPPm zdFw-@`WSj}vxjWbg}v+#5aur?$U?%so#-ugSsZyVHxAXo@4C-&_DgsQUgOn{2Q3t!LHyS;(S~5L+6-x!3$U*G9p4yg$1M-}tqSG~4fO@@d{yxtI z;;u9k6c6Xq3OXGZVA%KK%RaX57=Hp|)uh44fQkNsQCe~$+w8Y2)A{J-T`veBJ7Zm0 zBxw!?Zs+1I^FIGuxXMFoNG1ZFN}H!?P-L4{ul3rXLw!;YEjQ~kp(0~kxc^K0%4sNE zfco)Ao65`-Y($EU+|ll={|d@BP3k6Zb1)@?{+!-IfA!5gEu*zco%Nky-JL}p ztSFH0s*n9hiu{YC8}zu6qaD-Mo6-00 zS#Ks(^oUA%+n)1;_#=PpMm1BVedFD@@{7#79+1-g0jZRWQ3bvK7Y=4T`KO4T^z$1h|^0T1DXiSFJXBi{9NdLTc_cmC)1-@>3l1)J}`c^FgW2tf#pM z-yE>Rca;~nX39g|A^*(l>P-lS9vY6E=4hG#dXhe;Ck4Zw(O&>warOBv^{mq`0F3z} zqpjDQmz$%ry1g>Bf5sipKKvKNpK&eSRFaQH((3X-(d9f|7ijywEe{-pPS*s358JbT zCn`|w$-n(PN}PjEe3tI>jLd^gk;59hqOmq%2q|TO|_&iPD86GiS~5I}4-* zZ~jdm*Ix6=SG9*8eh}7A+XMICn+N-wTRyNy7nkZ{KN}qS9pBe-d;dqc(Fr`Qcp)UpX*|KAUELW^QW3EA5wo6d*jOzr7Oknp)XE|-zLgC z2eMH4_TfBqeqR7O+73gOAGuz^C_mb@pNk$L@&bJ3^y*iEAg0jM(QhCZY$`Nl z?740O@97e@Dd$z{B>INFFZ)lO^K8lW!|`5Q#HOq+9<-wy?`?nf&wXwCnm_eT?eLkO zLI-~t`FrV^IjgN^JuoCuWP_LtCT=m`WYYn4fJsz!F9;oEs#E)t2?$BgY}Gt zGUE$QDs$@Bmr>+-vLUpSIUf7%S68ZC+97V4Bi2!eN88xo{rJ8}yxQM%KV^ROeBFoP z+HdEK>I1}yZ2bxF%=_st`pM7eFOFxDbqPS_w68qlJZ3()y|a_K!!It0GtXE3>W7p< znrXh!=L3JwTK%HBU(*gxh@sB-N;k${$FR@aq@`Y>D*Jlivjb@MB0;`_!{D*K4Q$Hg z&CPag>vF#I?gg{x4bF%AB|jxaUFkU3&Dtz$By5wH1kO$u-?sE;zy8a=lI9;thtptL zH7Qca{C5I1NufnCN_bTc=K+G@i*8O`UJz1We+;&HKN*!5bUmK55qCqZZ<~goH{szA zC*aEm|4qr5i~*rm(p0uyHvC(@yMVrxx<|T7Gh9!M8%idOfprnmmc7)+jGIzvaKVt@ zip`)>tz7KTl@PhSy4jZUd63n%=kHLQEVh5~z5g`7N#W-pu3o>?KJbAL>gx8w7k56 z(ZP74C(_4?A06~9%W<0uJ1PH(#y4 z6kRX61Rom6Q|So~!>RsgKs9hjhssZ5iPqqsikmzQCIyOL5%Ax=Tpo-#dFn%VM2X8} z8rp5O^N|PER2upf9d(>5N5-dgJMl7MtYDD6T(iu$)n7q&D;VlkjPl9~x`!FpV@!(ZywIm-WZ2bHG-vbFehn6JRWt4yHKCmB>6`V zl4e|9%VsT}^#|I~5#rc;>CEranAB;pOqHj7()In_s=5(rJ^u2?vQA>g>r#)hHf);w zSk`hyo9Y++LWjT@JsETtjc7yfLn2)>60{1&9UH{lc)2RPaE`FWsD7T{vtHyFJ*?F-#<9Q z?(Vnkz1_CGdzjx5I%K1Hw%~(iCT3(VWIW1q>{pV~rlgFp^yc8uf1%k#|5rFHCO@%K ze&;j7(S0+NknNnvtoW<64cXMx9uP59)*J*$`3d@`yV&=+!acd#t(;j{-;H zl5Hr8Xdv9+l|I@(-kL2vsr1=`%DhyG^G|fq;J`~uS%9PSmXFRdZpTBzPRQ^w-bC+O z!M18nqMrxNq%Ue$^3TFm#rle5cGmIe=Vp)mJud8{zl|@oFL~@m?Z17?r?kCip7u$8JMqRTdVKlv zRy)A{zw;gMB>AX4@Zi0n@%HvD7L+b<4~QQJw{xPx@jxq!bP2@sllw(Aw1!4xeCC{F zaVVyYbDZ<#JuQ|Ozi++NRdxlG_JJL-y+2+LPw_O zbw^$db3UPN4kkOzi`rtJz*{kTAg;3SP!oEB4}@baqamxlFwe{?6aG>JJn$J7DeL%F zW~D=b>tM@R6<5vL7W5`7f+DN?v?H+|s3Qr|jM6@h)d30Rh}-u8olYw#RJZ z5=?~UxNuEHwCh8@m21C;mNX26Du8((JdVj6+}K0n$4@|%)qMWA5l=rS(J~&mn=Tzz zskN0lnTN8J%FTQGB4gCFz8(z^9srBZK9ytT=)aPm_5I30{PC~jt(f$vRXU_chMV0y zL`j))C(=O3c2ip)dX=~TDm5kQUwEmyvMIh%zhpp;^kc6=LtI))i_&Ntxe#F;<2b90 zJ)iv58}RnM81S_NSrc2%bmO78>Ox2G1Q&Jmr7XONqZcHPKU3P|EN5ZUQv~J&%Zu9f z)f@Fu`oNuoYUs#C6hiJXOk-C?T)(G*z@PNWnxf}b`xUMRg+Z73ovz7&LptW-^Dezi z+X;hD+>6XXi;u%R54W&@0E`KieW3sCfje+#7zv%t^TwM{iZ+G-zD zWkW;aIQux^I;@YZ^Urk3@h|I#&wgNzH+9d z4+f=2+Jn!0+W}AF@Pco0wXHls7`fS&`BXs={FL>swn${{;rgTKE4=2p zb`z|b2ifuvxs#rOLpr(Tpsm~gWB_MN>L#N+`xGzmk+0t3r~gu#aS@|PDXbbI-we>!GzujZ@j}}ik z3MA}&+{a)cBwT$}@xonu8FgjeOg4@0Zu2Eq{g%oF0;lJE?u%0=gcW^Y*6g;QPj9 z(xFE@_~xhm&|k6%AIe4gIvpeumNl5x^>pdXnk3G4T_2f8SUy~TlIOg>*3Z|ZCs$n} zSLmRe?fUY?(ZOLmIM~bA@DBG5^Dtz8Z?_%n@3mw1v4{KZaQ_g0ukG*DC#4U3pntI6 z_O`d%{_bwu+q>O%cedN!&Q9Cex!tyJ-K5-pJMj#XJ@yWJ;_dC5?dFYV+H=o6({A0w z-MZ19edej8dFH--`(|(+9`3fosj zgZJLse(&dfZhOreUPan!`>}uf*7hIY@e5gg1ZBsqh-S??n9V5Pe-#&u^@0=pm(lOv z{EJZW8BCd5yH12hT%CUt4=(kPbi$s#gy!^}uH#dOl2>?b$r(~x;OpdIZzhmJe0Ma4W9J%&2@9cyp-s{l~4lGBvSnuveWpLELti7?)Js^!&%%22;6 znDIcmGB!=VrzYYnW@>cX^fm{s)<~IT}aPZNN zIH1ps0n-P{>rEbi8aX~+_S6SI&|dk9 zSG0#8zQ4Ww?f;=YbK|+Tw!YquSTv}F-iEn!=~7!?yVRa~ibV+v9WN57H1bxTJt<{W zVVumob>bi_L$y$c`3#il_KrM7Zt?~vDtj&f$Tv70sOD16sm}boF)O>I`KjS!e=`TZ zJg9ae@slVH8HH<^JY*KHIB+q7Kpk=e!>6A4xsmDusP}^qId-r}k;Q=Pbex_P-~LYU zDg*jeAM?uguYcl)+wpTBq@DfT=(L>ssRjDT+j_wdZ6_>v^2G-9_K-gF3k};h_TXc! zebrZdMf->U>Sx*;?yKLW%8evy`e~zMjL?nS&%!@)?S?>_)Q9j9JubOdyrB4>lWRAI zGj>m8 zcNlE=Y03^;5t@6>|)c0YJc&YEf3aOzdnOf%hN8uRb zS$Z?Cj5nglFZ?Bs2(Clu18?vn_uvmb^Z~wQgI|1ZW>Sl^8mgq5PXzf{rV^!vI##E> zv=QI3v$#S@KRM2+A8=wt_NwB%9tR&TdS@N#>xQ1_3&9K6U#CaN9ott64(hu}F^`RN z%F}M)TFMtMv=!>7pAwQs>!|lbM`vwl$*Ybx0kva3zFU)yONDNZrN(U+2s7hZ}^YXE(I&0+}r9@v*$i!Sj5rv5K~*pbko zr)+!CYQJC>o=@Ws)#ArC6Yl5+1Xt&)pbCT!f}^eU>8MP3tagIRJN8WnZ?#b!cC|ys zZ*K$~NRIOL26Zm0;CqvDydf~LJjb71;+ULCi#a>8qQ1r4zy-RTlri)S@bDsdDV*n; z(|SkpC>uWX%lUvpea`p+vQ`0Vvyo6_YPjm3uo(0PackSVxAU2P=N{{8$F*dBu3w(+ z+1zYr+xzXyzvTC}ulsA?)IR(le~$IUjl4~m5@L-$R^S45MzwRP-2hqf_KHapcHcG{)VyX>764Z;rhI>3{7NzDrL=cPyK8`#ST# zZ~U+M1qJMNx(yr-&%90Jn?Bt>sR7}m|0X7xDeps2^-TKhve-MC4Br;>!d2JJ0qJMU z(AK;?BL!!>bM%8WdPe<}$Hg}H@fwrLoS{H=At|M`hmvNhFNfRQc&^+W->8{VK zOpQnTB>Z>z)Z8si9pUJ|Veec&0!24v?04gM!8`P?%Sm6T%_;yN1zirknNg0E^3=c1 zi!egAYn91n*!Sv_JOBe<_?r5x603-K!C_XXeZ+%X*;eJ=R@tkh+1A*PL2lVu>pE^n zUoz(>**F0|Czxgif0V6+2};)0uLH*XGvr2jl6;Xw4x>Ya}X z243`Ilnu=24jSqendo#i)!QmMW_GxIq4Epzwp2pye-em1hTaVryv@%If!;6mMTI^V zsh7_^_c{eVzxUtZe_Fpx3oSrg;7u?WHUVksd=47s`zJQSY80BrN{31B(ggEa>JRx2 zxeU3A!~AY9dw=JW4e4iN|K8KHe^0sd{WsAu?e`0&e(3cldq6yTADQX*a%26I@drM6 z9(^#Uk9UG)I^T1U{ntkYch*f;LXZ709&U&KJ`SsYOf)dl--4crzGqAuoOA<-yeb~~ zM|M4KUzpV8Jk^Wtbv?m4SHE?bYs)>Zb(i5?C$p^x#e&@w~K)x8&$D3b_ zi$B-CG!OlCnG!W?EA^YM?V$Q^$YMYrV^w!XdjS+dqvgjJAW~n%UB9&#rpZc@r}?RH z8BhMg_`T4gH}+$$KO#%w;*K2XNAbx)#DqR({7?EZ8-IfHxwg#uo_qMhifn4{$2^pB z`75pNfqDdp?wo_ljHu|ZoNK6CQ!(2WTleQ>DySc$h`N#a08jaZ?`q%vbBQW_B_C#u z6m`0z3G(au*ShM5(sRAFe|(ZJz#Y0TgqNUBm(@By^uY7j03uPKI1x{w#~kVm6l(Z3j@n-O&LS$gX<^li$CqP?bAQ` z6Wcux-QWJ*Pybx|TmRcXYQOq{r`zh<_4XORb9Q*N#!gA0A| zBHb$TTwxr(@@0>=Rni}P=-&3%{>EQ!554^5B5XH5_`!DMgG*~Kd4ONq7hcp)WHX&1 z(2Ob?+0u5sP(}XALiRM;j9L9oll`ZwSn)!ChQ=Oec&6@1n?d22FYYA-+pUMqGsyak{>vA`^gFsyC;LQbn6-JDw>-t2Gnr%N z0BOXIO0Vf9`>wa-c@BC4{TY(S@PJ5;7}Gfi^1S^}Jvq$fgX6<}*knyevj+#Hf#0y_ zX7@e#V0+-6d(w_|1)uT2)pQnA{Nf|4d}YD&Lu8^1!%EK=jH`=|L4tsNAfFb*smMU; zgWIigE@5@MmGgPzevj?C)6N=Sastog6{N)pTqO7T&q)?YN_IxjR1 z=z7-uqQ9Wp?~QlJWxC+)_StjX%nN=04iqm%dwuDGcptaENSSe7MFHu?sRNZjSo_t( zQTC$EGu;Jjta7x!v_!(v>P9>D0pyall99Kv*-o&(2e-B*xPA9`{PXrDU-zeTa&<(l zRcy4a=4*r9mK>-zoq!o_=V77`xLcX|{O5=B`-jQDh;R;BW%d3+e{z%wpT}Ldx7M7O zmfI_ycxn6Vf9+e_%U=Bw3ZAxa{tJJ#{f)o<589ggMRmK<<4{@7n80bl3p##WH9h)h zu3rMbarz7B`>Cxe99@IX*mIO8`qAsvmyIpg5l4SZzXyzP){<6q0U@IuM*bKhIZIk% z9=o* z^9SwV#(Ubge9JesFZ*MEvK>ABwzmDuFH!d(U%t#*zuTP|A487h2`b?jNJ7RE9yIxz z_0vllkqd42F_1ZDuG)BJSmo_Q2MxYo&k$$q1upv@^~f{5nrkC3Xtj><*h=F5q^Wk) zpU*r#_M+ByJ3QLSd8aR4`ZGL^QS&{YjD2=mW7K&TYJ0x)DvvAcJ|JJqPcbd}@EoxJ zeKseZ&JFhKv0w1r+i!b&JMC!yh^q9Tb^yB6-^hXT$H%tkxhS-eVcR|j(AIj9!xkA&c`I(~i_08gYc_UvVkdA%dM5YZd zzR9B(ODwaxw$1`+OT2Ge`i8InvadwMkBm1$6i+~*M<*nXu=+6dsJ=?BN2eq{TvwzC zUkW8@2Bvp#;T=T1tQ-0i=Z^~NL~cF`S40TLDR2IiW`3oox{8J%W5`L%Ar{F=>a?cW z)F+IjjXH3nyrEs_{0Hz&4=n%)vd|D|5F0jMOyZ1J<*Km@V4GFms(u=bNR-V5n{OVO zVd^moY#Vm?>IRB{zq7rI_!ip_eBVEB&pi8Vd-&mp+vTg5+J~NcDoVe+qGg5y7vU(; z#>P5|zsw*xY-q0MB zEKG;`6q&Ch)PgJz_UutB#DMoOc&k1s(BR}^vXl71p1p4U#r!Vab2ofTSp()9lSge2 zW467$-)`?6v|GFTZD;SeZSSAL$8o+Wv5QXap{M)A4_E~66ZQ>KaUD>1p9S##;Q{5) zKaA(@!G7D_JtKUAK6)$uxb1>-hrAuyu)KP@Pul*`emgwylPZ4Z1)~ODzW`81)}JZx zZC5`9ML#;52Vy`_}aV^_&n640|g$;U3>Vqb@8L7YH#u zanha3lkdeE+z}IEWX#y}>l6M|#x8xndw@Lg_mqcmWVnrtmEZQ!E_#Ub4SVEx!=LRy zj=CGS_u8{JZ?|V}-EPm_-f7R=+#$Rj_sk8_Z|=3HZ`^86-@KV}&)nJ}Ou1Y8)IGrM zw`aEx+H>1`;6v|sXanCq-Ls$7QRqB?ZYTI728-^PiFd?3YVcvN8xQAw7ba`|4qU7u ziyY=aYB-`PgtQ%#gvUnBkvtBh@)ZXbf?8{Lp!QC_s=%1_pX1oRib=t)*4q;HpykA^ zPZA)mr6q5gTDTUz(Sqnh*@Xf5O*&f(I(^HC-^GszEo_?At70y8^?3c8fnoDTSQIHc z{R33AK|~65T_fFp&Kr3{T)H~HnfEQ)Ny>r~dPBB6S&=q{GwS0uttgWMd5lSXe%%5xncER`iyeDnqdzW?flBOyloYord6W) zkj&Hz9^&%kr?Kj{<*i=_T8^Vb0J+$Py}wSgl}ab#;t zP)2;81n;-`RR9YrdZLu8&vGe0Kd+ThhHQPKVA(l851^n;{SmKrQ>;Y7j_v3hm~-JJ zeA?&MmpHT3QR22PYn>-^2LIM8z|TQdAsyq;5#g68GM*@}3CI^)bVT%p*bVi|_AILl z2I!I>yCfV}d)1CAEWPGPlfE!VlMUuULWRQ@D&VDqAcodxjfo582d*c?I)u<0l)Xd zcp%(Glkz2}zAqX$C|_j;;nF3J(x2g!R`4r)_Z9)m>8UHE@jnk6^1tWFY!CJhA!?hB z&kEO8tINTBn;Pv?rqstk;Vx(2^td>NPayUv>x4?H^zwruuD5UFfCr2zUx)~sUVO`f z)K!-9F;MlzSyY=A1o8cssBz|3$6xd;`FhZpIlwSD=zsMnbuFj9jQZr8X8WO6)B{)K zB9%eJe>1gv5gJ`ZPAcj{Gi@R|WNKOdI}VDmKn{r#@=${U0oy3wi8(33a=P&Y;=1K2+cqZ%ThoRp^i~3=z2jT|cBR0Zk`7Z7|l041DL~$SMR@UdE?EQCmNSE3fP? z8H29$-w2lf`1iZFT!!bDIIj zll`V1*4PU#xbfAeGIXx}h0dRPNmkYA61M*pg%MI}996jzf;4D9NSNzC$LUnwII+@C z%h{I2iqB}C`!?hrew7D)9oTjoVmfQBviBzNzju@M1>c;U@gq3?=F%)?7WK!zQ5g2i z`MyfqmWAj0j5Op||BQ7YKW~egNBfp9K6NF>v>QD6GxMin#PP#NbXg+J0L1HfHl-6I z9qP2r_@GchL~iCGpIksyE)@6t%{8NvIHI5SSu^gjpWUMkTk&nb#T6esvc~F%$gU@* zH$kB(Un;b(B5eDTn$BS~uta@7`>MSBL~y8<9?Iv9x8PO2@jN0`m`8AT)WmbuixJ;c?SRJW9`Z#kG5-%KHBbi(If3%+bk{zZhnohcZ()EHHkzQlyu1g>tb#~wfSk{7kF{o1d> zLU>7Np}p%Jztn!{M}Lw$A6k3Cl*;N&!H+j*KkrH&ii3Yt2@8az=<=)cjU9X*So5uv8nib#0gCvUJ1?(Ox$2p|a!O z^}|?W#<#u~0B~6kocaadmB`!nYTOeV1zP35-;aNpm#VH1dNXmBcF$mJY|Y zc#yH<&bdcCIiKon4KQ64I@e8Nl@}kh+efR*t8Hm*G4^z26`PK%97~~tBI3-?BWZs# zXA;*Y$v$)1sxl_gO9VczTksM9X}0TI`u;o4{7-S#7~=8yY4-fslZXj=)CHey$ex!r zOvVgbD|Rbio286PyDqJgY2=ew;(4yq?aw-Ml|NH^nK{W9yz`XWUl;fVk$tsD`Q2Azu1 ztikK}V#zxM9; z<}*bbD2Sx`Ak7DvStK(c{Y`WqP~`v~C3Io5oEZ_G6(?=g4IJfT`wmL-q%jK(X(26n zF60Cv-AR4qtk-!S2VUw@44(trKJey~ZB{V~*k+XpnxqjJVg4v$`?@x(=!qFl+G)?e zwV3km<@N6dX@kb48QD1CT?{?>>c`s`{Qj?M?clj~w0kQLY+dN)Q|Rb{h9)A=P}ooW zMVu@cw^&4O=Aod!HTgq7^h53KKlf|x{%fmD==G^&{)4Foq@SU(Kl1zH;8Pb3M82Va z=*i4QSmsj3xGYrqC}Y0yntbA@QwR2aOxZD(vK}%_^~g^8mfGr?6OU~`tByZD9N#}g zSH({m7ty(Cqzs539ExB$2F~%H8&AmcFu!qr+75~Pa9Y_NQr3s~;yuRs(8b?~t}jB= z!&4t7Yn(|+I4Xck8g1xOmo-%$y7q^y-a<58eAW#v|C5~E}B!n=(qiPxNRBhS*FHq>OxQ9>U2Si7eq`yWD@a#w?AWWMExV$9S@KX zk+pKs&*cPfo_SGW_qgrR=-%FO+uc8GySs;NXYa7xzP;bJaksbk2=BIA`nPryzIk)I z-MqciZr$7=f15n~+xX;f?@%{&cG}Ht>g`nit;8*ZZ{6+vc5Aol+`N68IR5tScJtO= z@^6CgX4*9m{Ma9VE9chjy>??~54^NxJ3G)w`c2ZdNxQvsKzVRr%WhNFe9KEy%9Fmm z4UaoVIC=Hb#1XtBgIu_99Gx=eN@j=34xKwh*6Le+hum?6+U8(M#dZ)f9y~bkEs3T6 z7QUNB*Hz2QPW<(-K5^DyR?|Fq+cH0q;N%UC*qe?rmzcarWgL8tE61k~IbB2KXBDtB z{scrloJ3#fbL&zJA3CT|8;T6kEL0q?(h{;AyYS|wn`m&?e971VpLRuDwyh~u_cC@! z6Q|=RZ$vxqSt#i)wzO%;3q8zy145F^B;iswZD3gkJ_5mKx_>BW-2@3sUoM}C4`+MU z6~xpAMsb?651S(2uZ3yH3)`Cslg8kpq4Bh0g&Pj!K z6rX|k;tF>*8KOv<9vI<8T{_0LI36tEL5r8v62?yF0XAVT8-4nxeM)=Hn}16?xb{s4@B??|Cr-9)`JZ;s zjEt!q2bk8Y+m~s7s#CNY$Bbk?JED;Di1V5D-SO`Hl?@*JdAZ5{j_$h_p>7V0!L@`Q zYHsAlblbC7HeKM$K~8juc=B9tl@3Q1ov`2>PC`dGoB=dqc_H$$F46P}ep*2Hr}bB_ z7u`vIpByRdjc)5(&b$<4yhvYa(UvNl{Q&wi%EYc_%^Z>~fUGQtq%E5?JKm7emnLvg zcO1y1ub?rXm!<|uMTd0GxzK0Wa_LBL;T7%>M;Q}<$fJj=6bOsX984x4l2l8HtSKcd z`iF<}fra~wZ0^&J1-oZ^o&Tb(GO{j)&H<5vbo0-0qyA{Wz%e6T5>vh?iU%=_vLy6Q z7FTGO-egfG@zDSgz*th8tjw<%*t$YS{#3klZqcWM2}h2&A*InOS(O)tovkneG1((qjqXN;vh77Tieu7SJwa12@o zjZc1LWP7$dIokk7m&Yl#+A<=tJ&l->EaL5@s$;EF`d0c2r-%38x6RZiEp!#W@I|eJ z!6BV-q|2wV5f0xGSI-;WmpHZ5{Rv0;;nDs{xx#H7TPwL)&qR>{;t>>7ptwrSQ8@^P z9Vz^?@iXd`KgQuKz2=UxA#yV4RZoE~htLscS~BANJCQaI*osNNaVZTSv3p)EI#mA z9|(PgreM(1^GCm(tJY;JfVX^jPEq2vg{M5^Bj#bkH~i|igi^S|v-W|=*z%)JP^WzL z$%rQyC_SNG@7!?R^RsTbOQ(w~eV6R=r#2l6D7YNEel*X~p}DShi^g0Wlu_{KLLPeJ z$c|ikn^|jSu+;b(JcuJ9izzSCog0!5uH>OtsvZUSZ(Y5+_|jvkObezLq1jW!AFj%~ ze{t`k-n!z-#p9kYRj#x1;0EFn2e%~+SDDl*`jD?b2Sq~9>yEI2vEI|kE1|i6#MR$L zAG})D^`WVTN$+(BsLxhPiVajHL_2GvoP1M0;}&}w$xDos0ljbi$vkC|^%|X<-!dmH zHa-3_^91`)`fmTwOY7`a{bV*MG8W{Yjb$F(QZ9M~?wUV^5CKjf0=k}8Ztmh^A$tG- z|MW>jK~z@_d+2YTU;6SBupaypC(oC3mX}B)yiC4ld)Zg6tTlfdcEO)4v@ZFVS5DjO zUV49f@`;z^Qu_Np^mKdQ`<`mgvH$jqb`QVkk@levKhxgzi|>M-<+ieMxvj3cKRRN4 z%DPd$8N;9o56TYAB|rYjg0@i=cBqGML&{QqEoey#5Vp=(U**LV`7dNti9#gr-1;G7 zo;f(}J9b)ry6nFDF1MFF`dDbRyMNc`d}h0H->{={7_}S{qXIuH_ zdeQVMuWd?uo?G|y_|j?rg?s5Q`I%>!KrdTiof3!?D5D6XFYzlpbEBMe1v{j;wybh> zE`eD2|Dd*dJu(!$2R{EHH-8798uVZEOl~N7#_%l zwC02QtR;~*vPyrWd+8Ix>VE7DdJiJvg*Q5ZU*m7)vhP)|pb_@9g2>#s9YdLP+~_Og zv}ZnL9p{y%wq=$it2)~9imPnHT->twX(#QPsO@D3Ykyl8QQN*4{b^xqrM_{)qNI*( z8P72oblBG9*te?D+OzBlKw~|~4zBuRhGoDlUXG`=-?1p_|pLt&5iw^I6 z=P$MI{D1vGu((Ed9<;qNKHSTxYXxaAWe1p2sldo6lKwM5ISB(}_wR;lYSduS>e-}n z@Xcx|_Kb6NO`aFq%bs|wec4z1zP7Wo9l8A6+uzxK;D`TRc+F2T=#y^!;Eb=0Iq#o` zLB#MsO8SkDA`_U1Zg(89*WIqyeA{_UUCS6Sk8t|hvWyeQ1bNQ=!GW&(FJ-}3X@$Qa zF~|`QVa8b5HS0i+6A5F`p=#0W<)(FsY0?HjL)IPQS9eU~Pku&(`E&6UyLRwg`|Qv9 z%=X%k`}o%OKG04MZ$&Rpmd`=2icsZ)-!z0l*tJ5=$?$7^;Dw3mFKxa>mzMpqIceHT zWqRzGVy;JSON;B2V~k_Bb6G;d7V3GNKXd8}VdhgEyvW67DMQP#_jP$n3jsh+$qhPe zkBGD-zCs`2Cr!HH$$!3pWxx6$-sj4m|2uFjmpAyCC)U?D@{r$4YdIsdF5-9o@IuJe z#%9}C_a(xOwua63f&c2t2IihQ23vDR8TAPQ>KVY{LFP95LmgFTU863XoV|bP8@~Pz zd?kbU5&0>AqdB*5!X$w3VxarmPH}XI!i^y2>-b5yq*6C=l-te|qP;U0B{LYyTy^3n z+B=@kskgelJ!Kj_w{VQUc=&4EIx44vA8`Rj(=LK`lOe56jtSFt+625oXkB|jY2nMN zKY1X%K}l{coOIMb1rN#<8TiRJ21I?z#$Kv7n(DWO3txDkO%-~fF-eC(=}@jH1tHcQ z>EH|++V#M|ae!LdItCRy(#c?1-E=Xq%mQYot)lGz_J{vvd)GVO)gF87;r75o54ZQf z?*n;gwzhH!h1tx);)sRC>LLpc+Vry_-YoWkgEu8tQJVS;HVQ4j(BzFkDf2hZ%F#M` z(xrW}z7#2++z=1Fa2c5+`^ZW@BS8344&c`?k(>i^4Op!!UHOP)#*94>0a!+^{0_by?GsC!2Lt*w$yNBpLbe@%PK z=YDZJJ^ZzHxVMYE_QH=330K%$)dOl5Ol%UoZ3EAT`^bwyyLsh$em{x=?dRX|_V(7d zzP(+$gghC5j7tW-53KjWQNOdC8<6l9KIJ*S)OURi52cIBAO}g2h2zUI@@iViIr^py z)jxI8u@U`K{^}!T9e3c4LD4|55@J1!vGY^^E)?y5=%Ax&(^#MPx1J9#eaPuUncQw7 z**E3);m^Fo^apJ(m%((h5MSt3xmXaX>&M71hmq*F%vDN4A=Lf`)9 zkDO*K*f;h8{Nmx@(uqxZSXh}M>&QeLRu3|>9Pc$ov_)=wlF?-yT@}Bw3=#BKHWQZ9 znO641w;!Z8hhXSf`osB#wg){KXD;TNKtH*mVD333 zZ+T%Yedfdet##!Uvl2TbNUAy_633?(7sRJsa%{|WTKc5{L~a<-okccyK4ducv*s2> zF3P{yc7cUYY4JJq^BkG*Dg0$o0e#BF-w^lh7VV3RC^z5HUB-tGPZgpzyi&qtEJuQPw0&;t?{h>_}tRA)icq0@a30D&a$xv zr!saPOh4J**g7`YtLy93@e3h)q0QF4W$A&D8@Md~oIBKw=tJiPJV^_G#Y6bee(*29 zB_|iIThzbUWcUL|>=L=kYx zF$;GOJZMWhUZ1e8cHCrz1o@8qOK!2>q)SQZ6g8wF|0oB3$Cno(Tr;E{DtLhbtC0ES zKnJRw%#HT@5qeYd1qsWD>W^HufOY%LHsvM%!&fiNn6C9EZA-H{0`8)Xg3ehf>0iM9 zG0QO)9P*ad2#+**u{=h>^-JxEooOX0;R)#Lg)STr*SKIqgDxy&=^yqAi&JAzx#ue?Vo~=<3ojJY*S1PGIP*0Q z^-sCTSLiUW>gAF^ngW|BLQ5}`a{AT(v4O6CkiT}B@NOcKZAix~vS3(BOVH58)$MaS%5gnfChdn!ShZ zWh`5N(AN2}eE7AF?K@7OmbOY3;h93TPDe3qdHcz@{ibZ)KUl-sphxpzN5h zq+J;qj^CZ-lBN^Hf$RNNd>Pl5UhUm{ol%~=IC-`%{TqoYHb=TGM? znlyM09(7+|RQSqSzxUr*@0XrfM<5(|lN33NpU64-#i^@)%Xxux4nF$0p7;w?osgpq z(X$WJM$HH5%(PYV$(zNFS>oX%xQ4R~zGXX|WB)$cijLndjgFYF4KpqIohR#vPTdI2 z(y#9Kc7jJFwt0XUGu|M{scC6Q*vt5P9b_K&EP34N$;EY^LhG)#&R zE(RlVF*bt{28TeM@G|k#^S*)%XHqHHUXO@i%=S z-J!F}#)TKl^?p*;_E~q*FKwO^SH;cCfSMCbmgm$ z%Xk^~$aL^V#^Nu3CL2LtK0K3yHu|g{H1%_so}lwTd}VqwMe&*%)RX2ydi5aKJ% z|LHv9@~PbA&wiI*=Lq+^DWaUU5w15%ChlGRiMQ;XJ?F^!f9#8D%%x9bhid=eKJDtI z%kAFl*VwWk8+j`bK5OtK<^%Uw8_XO;LZmZ0%2_$s*_^17uVV&*Zpsh%PUU`>RqGn_qmKMywIymjVJsvxovk}UxH?gnql{LCJ ztRzNtL7oo5=oyRST$@Ll|6E^+FXxZG2;e%n&Nmdlbe9a^-;JGg%A-N`k@gtsu!aVh zp0P(CQKj~CA_1X%p3pw{^6*ZXx&LN9cU-pR&5TP1Wn16aYHOQY;PvfJ(vW3dWI6z& zKwQ6qw$s&|Rh%3`>k)h&L)V#U6f+DRp3g3L_M=GP)w5U6PrbOCL}-&I?U!~zIu@I@ z0D9oT4Xo!@`GPI^UNUe^;Q5)1IVVQ%v(bem^X%H_n>wv;+xD%xYnl8ft#>r~6TjY-`}U_Yo7QJ|o(rqci`2=B7a1pY zR#dctGY|W~hXRN{^s`<{jD2F(y{@y}BdZ9Z#WpD6`4Qy#1%-Ns0cYB4&)%wO#Jbv+ zS1(hAH5@ki6q~uoc>1|__yvF;0*^X)jGRSFo-z(?o}Fn|<+KYE=fCimwBe*p80AL3 z{(M=%{77%01gx(!z2^l6zTpPG+4zUs{L4d1QUvM0bP4@k*lTZm!zbf0n@6@;e+q3Mx zP`ayqad?qTo0~a_dB`pvB2Ihd~-1{FqmG_8>fbfMbCCZ>(>n&EREmUbjc+XcqeUeR**B zyT92`fXAbAZ~R&}w;gTRr!Xj4A8MK4eM1l_v}KT&e$cOTB2Ks1w;e{7c;_KE>49uG zd)`!LD4%(eWRII=n1xKj78&`*sXrWlk`_Kb0t|i20KvKg5_Q4n z=TtJQ1A}hc5{D0>{GIRcMtIP$vcA%;T)tMUh0d0KQEzne;l?d_QPhILQyre&38Vda znCCkN)gmN4uQ(i+fb}wYQ^5!N!A*ad?t}kj$Ah#G_d$pHlQ-O1ob9nu^bPA}cv@at zZr84FVw}(`@OY`fHz+Bp&C5gM1N2Ls_aWvX{Q59B)_}75YMsu9b2=Y7?^E`OwtWz& zf%REd)y>ij7P}3AuFyw6Wz$Zcz2zZH%s99V>kjQN@#Exsk+JOUVFZAlQlI|zb1mju zH-5tKE?XcrZ! zZ+MSO2Q?N-w%UR*SCBfT$2os7ux+*;-d8(*GA+MHbmF92Uu^K<;W0K@4R`WZ4*tgY z*>WTLH*S)HJ;wS#59qtXUZLfLFYvNw@s+YIdy!sXG z)s%nrt6tGw^~#syKaskxe&tK!Uipd_w^u&#vi9W5U(sIieHDM`!RzgT`>(e9AGq4?z3(dJuD5&c zxz-+f;GTB1Yzq8xtfo1IV z`ZSkDCmeg~7P^+hHvB#;1JOTyb))N^^Y$H(N|ip9>rgs2hZGod$NaOq5UFrGS&tVdYASHGWm<{7Y*zWe1M4`}vJ4l-~16`|awW&HU0|5ZAz7Pvl` zM{bOcY246C`CxN>1Af7AV)KL7*qlzgaYpkwf4F?6D`L`6l${uP!BLrkvBn$jB2-%C zIiBlgHnxCupPz$V_0w8w_Nns$ZFO1%kNK@u5WfZPh=gZ zjm;q}>qqQDfPPYE#(XYL7_ zE>QfbU?s;69*jwg^6&Ht>c4enRL*qh`Cww{Tm$D}tl`|3@E{97zV z$J(riFI0bFQ2z5Wo#V~5Qv5=k@OhB#9_&zdY%ov#6qxCfAXYU$eXHwn_X>HaGnJWv z56Y{p86816nn;|0wzKQxfFX|@1}1Pm|03L>=NDk&?k>d!pXSw1Xe4?bI?qGDtABgi zT`R}~xbVe4ic}jF&P9U3AGpLFSz1Qfl4`ipD*aA)VY%E5?CLnfL#~nEeD9gh`e(ju zQmf2}TYnyx;fpYIYc%zLG~6*3yFb9}bB=DD*Pn~|15(a3{*1hQ*{0`TgjxDb|DPZ1 z|GCCGWNhlr(J^a(2EAWsG3skL#`}0q7HcTV z)?~|EBSDr z>UmPdC)Xe5jPk+bApv0A8Uyc$XWdQM2el(T*dTer>^^O*L+2cS%FRL21I}fvl@RK1 zqn_;whW$d{SSzb~p4GW-WN}qz74YOG`n`jLw%32+a651PpXCgrvuAO145I1Btj%1r z@i&fmo!Qlz)HnIvW2`Lunb&ciSL;*HAT8Hi=eb_APt%X!$IBNA31m-Wg6W;F0QtW3 zaeQ=^eX5s!ZQnif=B151O!RA{8?5zrci9gpo4z*(N#Tla&a|XAGp|M+diznHD)0Q4 zROk1}7k9T+b9e`g{*9w=1~0DTh3Y%{;dH<4AJkV2eIe!c&aIZ8+Jrx8S!Qpuw(i3L z_`z9E-aYFyoVKCEy-0=~bbD@6XD1zcd^VQs!}DO>8Bo55UJ!cwp<%!UTQlIoFuo&x z4v#p70|t-rNt?>fg-3keoy)rjqukx#VtSXQG)Hyaqr8Bt2k$jD=AC%yoj8apC!N;I zxUMpZyRUQ11s~<{$;`M?H{z4#_#Sp7<0s+SSx_v4eaje9pZuDxpvFvNSXAm|4!yw1 ze%!B(#-aG9dDh^Yo11NIbF-~%U2SW)OZPp{EOP+pPKvzL>QN8TBAayt!xwzuE`|>jS!c5yS^etE+8|{kv~yBZ-W}sUN|S zRi(J#5H{hlF2v^xnx~67uk{?(c1+8esE+EsNHX@oC;B+|Kqv6-z2QpJCW%iO({67w z?WVujGk1T;k%TzUWn<^?-51RR&?#2>qvh+r9BU+mnI72g_OLN?fGfJ4F5=()%dRj4siPP`Q+TY zK434YpJTn5z5R%TBkIAQd6)iG2k=LH6@F_W}eJoXiry3%UZ zQMQLIKeYkOw;9jA(GK*vITs#!nNZ67U2A2Fg3B;`A7W&WyfkE8`1gXm>3)ercD;Xe zx`I_$WliN9^Zxvc&#thcnOoFGpY8I`@tv>a3O1y8aAOS++XPZ&u1!xnht-}mTTb6+ z44DtG$9_?!`p0JmK4WGcqKx_V%mdrZdL>^6rcfMdD{I&nZJq5>cXh?D)N11wW7Ahy zqxnS{$5$?M!Kg10mM+;9Ro1m+omqk9xR+Q;d+sgDStOr=mg z9&${xg^DX=Qxi|Y!~nvki5p@nuAp>2=4K!Srvj#oG17COW_imIt^+)C1;R2CL6@+H zE9j*l&p}ji^7*UTqDeF072%lZYK^i5W{N8wjz9S?uayhYfXGoU>-BbPA=29lJ(lkD zG3|QT>zij%};0`uW2)aBg~@|jXuqTVtCad~}{jny&zwASAF^Z%)R-w*s)yZ4H3CqYmCHA@z0aoD z2{2;?IEH^8_N(_NguUD1IPnd0%elC<&Iv0?FjYPS(6`U!F`wV3G41vKz6*BRXJhL^ zRXXzKdeWF=-SFbX9d}wc>ZHvDt{eD#&EUY_`ajO6`^4dIyzd_z;gbg91Kd98epSPV zzP?F+(l^tG$mf`Je-mE3UKYqt3wVe?xDGK4JMP0w@|3ri3(U{Mv!kfg%%@H3)FmPx zG*>*I;U=7)5eOafLK(~Hvak>b>9&c?l(jOWSoIUU=Rk&Zx<9gv`gA|#hD%v|#ZUQ! z>B_`$0!4y1{oEGY6M|>18Jn*N@jyE^vOb;HIA}^n>H_b5orCj3T z(SCN`Y)~J1)NkeI*jFdQhct6Lld)X5&Zuj4Jjp4Z0Rv4FW ze(mG!%fI;Z+Gqcc-_}0wb3U_u>TmhP_6Z;R>h?LG@yYFrKL4}YTR#0$+9!Sd>)WS% z!W-Ie{lqu6-|`8sZ~x85zpj148(-Dl{IRcUAJZT7Klze}+M_SJr#<@6eeJ<}ue68n zyWZ}<|5CelX{%jY-)Ni6371$9ZTVRte-44U6*5_z)kblu;Z z^0P@NF23ws(=so5BQ@^~M6NDg>|Ylz%Ed*EZ`LobYIl$pGOf3?m6!7jJ{{@omwAC& z-jZBQUm!p0m}i&M4!y!=+)p?$#_&&0ncEKiwBJ!XVqE7p?x52*-u-QS>G4zc_M4lx z!`-9It>d67l*A^MQ*pI5&cV*B`CTb!@z!X~F;Y|dS9aX`(5Oo)kYebF3Th(0Z+do4 zDlH(t{IgwP;0t!sz*!mw3KfieYglH?q5571^ru@QU*s-N&UGFndpXccn!C?#w{FRf2z^ zt>=)KdAy!4; zf@h>vz6qlBk7GV%1BA2wm&P7Ud(cyP&%F!lY@^LmmnjjN0;!F+ihas95>QyFIxfi@`1YKO25 z6yj=`kZ}5sM1$yehHA~l3D(xrG%Q}G&SLb4m99x#c zbB-eFS!jcnv}fTC)+H^~EMs=B7Ya)0Oib}Ec=|X6?8TIm&6G%4q#7LH?Q$(X&qIwv z%T=ApH*L5=+MQs!;gW0IM4#oWT-pl_tYN3NEhp6aUo|M+d)V%kY3CR4~3mALR#-K6oKtZ_<~%fo<7}*BqV_aTPbcP*gfV8)fsd z0GYXXHSZ?k;d^+;SNW%QdpLM1X=D|gi*$VYj;E?K=8!6TvHZ-pY_&1U7!d(@GQNo> z%Ys!WAju}#@01M>7yFg*=LoHKtZ2XDcO*5pVzsp2mI;7;DaIrsS6Om@BSx_F71xXo~;jZ=|_qE0h(_x(vTsnS-=f zx;77!-Z?kvS!_xxFQ6buuT@fI)O?dr{*_gv*(mpWM+dRFGzl62ql-)H~r;qyN0>>c(QJFW|9hizq%WDC_y z*l^#F184U-;LW8Dn^9J-g9-bfB!|(I&0(|aM`+2*3KYoiOM~0>Cfgd?aW7nSmJUXA zS<6OsDI7fb%5%~E8G5t6eAr&|*gfsZCmwH?*b9E(sb|}bo4f7j-}%e!>1TJ_YhU-O z_6xu8D_A&nz{BbK8G)k{_E{kIE1uG;T=g9ru_ZPtLKr_d z&)~uNfZw+`8E=Sn3+=4&W9&=JUUZjJG@jL53KU|s2@r|_W>+7j=yS}y!(KY|ip zudADR%H6LiRQq`H+-*?K4(yv$z)4&9(>J}-0R<5Y2FOqZoaZXB*`#^~zJEaO zS^N2Syrcco@BcxqC}k@A;MYwSV&6-%m&m9VO^rw8s>x z(D!!Be7lJPRYK_gAS>+~8TlQie4t3270n+b6776qm?B{UYUl@x+t+oTr5=&4l5`zU zIMCZ2_Q{R}f9ae!l9;@z(%VAMETX+pcU?zChh5KE%JQxUplR&UeGA>M@j2e>>mB{; zQkKVl`1Kply-tqzY2c(SFJYy2Z?!M@{LgMLfAyPM+y7uY*xSLL9iyVB zoIkbVI?wZPUqG^5zf9^{uPC8i@E;x?F^4Q+`?uQG=H+~Oe0goXZLD94jmew+ew{*{ zKu3Ih;W_x}@nIycJXgFxy5u^RcASqER^GpG>sNp1r{L$!c3gFU9cr`E4i*(*9@5(d zA|@e!XCRZ7(Y)!AZ+zYlFd-4N&9rj|glt^pc{nUAFgXh&KB%}Gh+o`wYsSu~w(N7E z)*|AUW(Hx;u-*((2pH{vBl00W9yo=T3g^?{@)INuTJkc0=_LHT9Yj$t0(kIz*7j50 zH_3&Xe2^WB8aqn}4{^Rhx!g7`p#Ym3ZEt(8tt@V~zw|%OB$=9$E_tQG$`Dvdje%cPfeRNDcklQ^P zBo`x2eznLi7WT5T(*hwTr3`R{3e z;s5pzv6nx|hGB;iE)1d1K}o;)kk|h6H`wZ-(lG=2m;rgEtzEmH4aQ+MO5gqM-_ic? zAN}*~)sL*@LGC&Ww$07;w!6E}hS9|&?H(gjC(dJdDTRzad59bzIn)K1+UN{0JkN6- zqzZ+@6k(JZRW%MVcyJ{+0c4WVpkg%3Kr7p1XSmXXgzN)aK|X?;!L{|xwzYmwd-cb> z4qZELPk;DB?U`FQkl8Uhy48*k>bJJM5m*LW9Z~qD`&s13r;X99lqyg4)4b>#SuW~w z>(e2OzDQ5>jP#>pZ+9y|X_R&s@RTR$!`$dH@l52(EWEjxbr5<`>BB+rsiWjYUWzc{ z8Me&!?FZVRk0N2}*TPtx7Qf|`vvg3Mc1JkvhIit9T<9b_GDw-~x3Q6R0#DOG&}7kR z8GQgeLVbcysCY@yzg2F~Al4t{e)9mDBNwU-{RScSBP0 zPJ4qd=K#EH76^DCBD#9STrl5EiSa za}Q}YHa6M=_ukj8UA^2Meet91@t3}+J@Le2?eQmG+#Yz*!(aqI{eSD(TkV+-eV{%5 z#@DyAwac_9K5kS@|^K9fFgL&}Hq>k6HUX>R)%F!lH5s zx!JZqN8`l{Kh3iL+;;nd-}5`#U-`@bV>_TvZ{BzsI!@ZP2kxUT{rABSwm{0iF!VSGW{!PD)Sn<>9sW8cM&EFWbY+cYuy ztJXL9oxtx2%2VA_=GGa$&{g;x)Wdy}dl}oPdR_0C4+@Cu^>HL5Tmz+iYlr`sOSVL^qhyRacrv?iGIu<+D4~+U=ir}EtB$9X9m`@zLy>imYmQ0i8nzz zf~}m`yaNW+=b`#rd0TOghrX7rw5prXnV--XOmloge|Vz*im=dM_~xN%<+G*&__Ibn zr9aZ2LoU7Dsr?R<9+lot@qrI0{rN~RPnUcb|A}>)hwT>b#5wCvp)ODhLw|c-=qnio z@b&4wW@OkL z2~a+0JOA_R&v{U`gZJ}&Sho32I4{48`d?#9z*@@~>so!+&jAC|&Rs1(8K+9iGK=1pOga2(VIy1n!AxqRkGEBMvL;yr5* z5X)cii_m#J7uKwA1K`g0i2xT6!bHrxyb5pZ-6nqa!|d9V%nlFZ0G3!E9$1>A!9sx(!8=gYR0$IO0;SA@<77N(s0 z7IQ+M@-)3<;UTW;qZNM-)i^mrM_gj~iD%EElz|7Z;>enmBBJ#IdA)U3`rs}&Ta6AO zuXjOsB<#wf~*v zcc1)o(B&{SJmS;*H`?s!Q~3@PzFGT$x%JBn!=P^tz%gDh8sZivR?fj)6M@nGpzk-z z|J~=mJK@guXEK-m!9e->bbI>pB0VL)Is>sZ`?{2={4sXwH{@qf_85G*FiLsox1PT# z?PZ`PbjBte;_m0PL7x9)uMlT@_`VX+!_l8E|9aSL`$JBXCUpAPqiXNK8#`iS(%>4{ zbSDw)Q)55r3mJJa1DbsyC}XsXz04Id$~eRKr!GvF7WsqkAvgVc4j?5pCNoB9FF3Lu zEd9+q)${eoer^VZZ|pnCO&v5a@vMt_{n55ybMx~+^`mtw(aRdC1KwVx${S6nv|I1%(Z~5Fm+&=t)pNaf_^xync`#pc) z>!=~$OU%8@U6HqELPeP>RZ#63?LFOFy-5F{ucr?f`GQ64?i8)QoAKh@-ou{NQ7Gff zXwIFSe@j=QbJ#2Wp?_naLIQm|XaTfu>PY7JBy=A=O|<3}>-#lw6(jo3IGE0mmz}dI zV?A*BLggv+c=qV2lWPOdHzij+U1krzfe*g9y#2oR9pC=<+9$u|OIy3~ z_ICT354Ph&e;$DOd)2YNKS@FudzBwI*hmSlP;e?%Em^P4h$%sz)9M4?iYC~ zAAI-yVIUl3rR?cZJ3QE~b75%r zS%Cbg1)+<$FJNu&?zKHHUPuf7d=BXIB)>MW;@9Q7ygVDUuk*zx#)i+m_V)JL&dzST zeQT$!tgp4}SNwYIeQn)y^36-_@}=umpmA*vx=B4piZmX>r`?d+OMp-0jS%yZXA>E1err!`kTq5VH{3S$Q8 z?XP^xx3o|A_>XHZef-7k-M{vp_S|#N(RTm6V;X$s>1P>a3vFX%xvj3PMPY8=zKsH( z)n|_TgO8b0+VjwCJb(aF-rpW^L4X{mH*EujPbj|*{xe}KKkBHUJ`hr2 zofLu}C8_eWx&x0ijD<_O2$|VyqrdfoyO*E$Yg3VEeCsEAx*h`|-DoD>aY3A z_NTw)+uHsI-!@$Uux)>%L%zHKAjS5V+Ur670>)%z{Yu-sbZ^^(M?W9^Bme4$+vk4a zA8D_Av@QT_t#7n7`fhL6&&2uQ$Ok6q2eJ_VL~mp;WQO)}HkMi-se%`pJDRtvTFM7CbZCz@A`|tdN_KG*Zx!ro- zFSX}xJd?MZZ|^fJ(nGhe5?M%x9h3Tf^>i1OD_vCZntlj0Tt@EXJCA=B-Dh|$6*(nI%wPpQ-0ES@Cc{)7Ep%x zj9C}e#k*sF;Iwb5e;gN?41ma)diu&HIMq+e8YhsnT$&N^t?QQv)cZ3Z?xr8AoOZG< z#;79_1rF23LbNYh78YIT+lF?|VdijXDEg+NTeVOGg26O9SRxt|xaG@!;SwY9Mp|g^@zLjgF~o79?0o?3q`%Ih z--p+iuWYo(9($}k^w2}?6|Z`Ad-3BhZI8Uk)Tm2retdRcbc(Q_$=3XwHUQh>lqyA{uIPPpCv9kN7)rN_T?$7WPbo=i@B8OSe z2cw;fV4E(aybaAf_JTM*3V1#;U#R^K$4~!i`x9UPr`zBChu^98x9eNJ*$+zORf}VF z%=yN#UyC{W-o=-T@Yu7{z1R}Rr80C(&15;-OynS}_m^EW#z#B#0sy)>E)D`?vpV!n z7k#iibdI{>9%D4O_Ui$@>)ud!Zm=t{pf)t@gr-q~yWJFR`fC%&}XdJpjsD9w|vSZZk^H4zOPeJk{v!3Arb^=u|2W;VU%!ftRMDGOPu%Gh#tQ9|hP+R4TrfN57$T~j{kaEh2 zweY+hb)3el`ChPWF7ARSG>X%<5+Cy!*g<2D26vyYXD}Zws87nB=c{CX9@NJfpc6DS zE+CBl8}`S8%c=dl0sS|s4tbqNPh_TCb8UCIoXaCTcYqx+cm6XD>A#YhdA4PNnm=L= z4J;l|Vei1Q-;^Drx07HvEf6Dse#oCmKJ zzF1cOyk1SVKJ$!q=kU#i$3cBTg{z%e8!rr_{jQ$_=)WR&;4jx08KlRlhc&;DM!DwgTnPcz;VZPnDI^zM#B!`RmFGBU7{aJFK z^PSG4tv3t*W+ zdzrLZJ?)as)>nGa!xWqKpA>na^CC?NjqwYP$|*UG_Qu#3Mp`&@-b`B6lZMEO{d?Nz za+tN($A@&G>lgR!1H7o+1HHXoPCDlMpwo3aU9jum_&YcM&9yI&sb^l7&m4ck;8B09 zMdxAk!GOpk{8Zg}c?qZf25DfV7d@j`U)B|S;`ZX2`!UBK_@buJowrpxx{y_WeaR2krQMFfk?`~Yv)j%1%BpNwIsSOD6x@ch zwxqpEuLqDmSdtCtCI@@(0kUs&l^%v(_{h8pZhiS$Wd9^zV{NOR)R-##vgEm6zFa_> zyss?QKIZcAjrQ4}^osU-f7kD9_dRld`}tpbcl*U(`#}3g-}TShJAdi@?K6J+r?!9j zLqFOMkHNoksa?8yt*x%Dx9#1XT!0AYeGFB5&~DUzH4i^tf;C2zt)+#mf7r{KKI-e& z8XiJBxDU2Y)?0lY?%9a2w8Z+&i`b_}(LZrK@W6fT5_^V^fAeeF&;In!v97h%hQF@L5E|A`WNj|rFFdO0__<*J@sLZHXb~TT^~y2>w2WsS5+Bx8K3Bl=l%Tw zqWjEx=wEbB>*`^@yZok$3JkWvzoZ$C>(3v;QRWos)0ZzS6gEqsTG@H`l#G41H>W3_ zo$dx_zDS3$Sm@(Qapvoqu`|704HP)#r+#A}K0H5)?5*JbIn|+J$Q5uNVXUM&{+RPe zd2t7aW6`;Rc-Fh>6zOA6pd+EmgVW0jr{e-UauZ8`#pK-*Z$S6iTOa3l@ESqb;vp!r*SSg_YH|e|rxa z&f0f<=l8TP`I>Jm{iEu10f2qKvbD_vA2v$%M}i_!b}m5ghzYvP?|^FWql1BSR^EBb z7aEorM@$LG@1#BQqI(zzd+lHSi|=WVzv{QPXFm8d=-OiYu^;}a_IrQ-AMJCf=OgRL z!wXdPrCQ~g^V*(&5h~{*t+V;}ygvtbph9SK^R@+SJF@Td?Gz9G(g7iQk$mS_vqlG) zHwy=bj&w*p^}EN#=_$M8V(bgmd(V;8g}gkZ-bII9H_>LRvq#TOebe2#)y8RCrOwJS z{jk5=PLFrm`r={Rzxkf_&%gWI+sFUbFKq4R+uQ9AzmGcm?eyr7xg6XJ`#32oxh3fM z0cZ5S6B~P1!Gs0#@^WgIjVlww4mW+{;)N4LfpbkonrADX=M7nw48>n^jx5k8!{Y*v z;8-?~QVDx5xVF5`04h9|-QBsFva9Pp7dy@KOP|--mp%`uueqGC1}giYO{@p_$C3Nt zp+S00t#;q_>ur5)D=*+~ zT)vWvBA$arkC;~mqz|f&5p-D(S~F2+#; zI(liPjDm4D1>&f-eMk=;#RVREk^tbtXUciw)i6pmLD}6;-A6 zA*qTa&*K6>7D5VascwLe{`ZZ(lJiU-hI}G#C*cl0*gZZ{X13?z3EY$J%-1bvxq73M zbUi2d>a5UU(&)EQXBIBJCh44btDDxdnUTJ$m)G0!*-raYfBNg%SAXNT<^n)F+@UO! zJk|aE5DnfDk?NU-3l{(`jy>xRyu!tU1h==j0HCqzDBv#T zF3MDJ5#cuW$z-Mt zNj`L*qhB}1-bk-_-W&s%runN*8V6@jpViL@NXnGvBKO#Z8~h=60@*~WU-nb<4CCvU zFNW^>yY7ZdR~IsA*k|^Oel95-)&&M9oLo*|BJpPd9Iv~CPbueDF9h+WUVnb!O*e7- z*)iL5LK-@uFUf9t<-oL4$4%r__2ebGB)7&@^3{{jz+cM5rgxQaVPOAzAQT%&p7}PT z&2*63Y}$@3bx}5($Sm*MyMhNKPTH&frjH9K+Vo2@tb~m~KWCU6s zacaZJrcB}HKWXxB$-?9OZk~09jBU^S+;G+Y(=Yzso1c*P?U)?PcgvKDJehR)4CbY=mC`a08z& z0GwjSLu-|FTufnQWZTF|KBd*c(Tk+axByTG05<5jL28zt!)yN*{$39dw{)9k*C^|n zqf6(VXIwlw4wl#Iprpn%b(!yXZryG-pF3?&zVapQ&wTYCY+w9^U)T<5cX#`CTiaM| zdn|?xfAcqgb9?Jg{7ifD$(ONqxY};s+GaD$#*Ia=hv*(!#y-K=;Xy6X-S920RR0}o z$Gjmt=A-bkp;@Zs?%nOb~zQhtBaZOmB<>Av#3e$Z3|` zp=e-N7C*MZc+V?0gWv#3E+r)%4}d1>j&_v0X~~%tf|inR@JMH;r);FH$%}R35h%K5 z06pCX6pwv>-~(-A4uHV04;8Dh?RHt&e^#V?{P6~S^rg!yh>M;XI9IA~Rky~u`dJOl z`0o%~3D2X>9ry%jAm4F*UfRm4@z2F@O_`|0z4nSA)k)|l*1U4k&)rzrv_I=11=`-3K#ji zd;3Ff7yIub+<5`!BL0goqbtLY{a>kgr$08v{;76H`{{err~Q>hX^ZhsMsL5?7L|9t z{h1zA`&0J35qkm18`~6}*QcWGJbfcPpdPd*1c{aI_?n9Ji#2)UUx4p<-VIN@Q*nCW z-T|t5NT{{)E5 zyZ+Gi_KshEcYE)LpKbr>yT7mf`=5SS`;<@p#P)+f`c}weZ@qSn_o%+mR^&zYG$WoZU0ASsoN?aa|SIIo658!89IQP=;u_efW4)Sfh zoT@IJwTB;gu)X~8N84}ygg3T-_D{d3J^I)K?eG7+zu7K7`d~YG?pgLc&(=Okykq{H z^lr3O@sf4%Cr#?tfaHw}RKx!FekP;)1yx^MGdqy9ZoidF=pa<(20a(y!ho9SJr9Lz z9-a^1jGq^VS^XI=Mk?LDsI)uzAGBQLzo5&%!@&D|^iS;qDf3@^fdXCY<1@IUcLIEn z?)aI}r2fv+L74VS9-S}BPul=vk>`x?ULtP1hn0FP#wB`U{>{u4##`-5qw+UxaUjK zp-rL4%Eo3cPA#r&+K>I4f7`Cz|N3_GnV&<3 zi|r?W?5EoofAN>Mn}^mre*u7A56XHOQ`8sfA1q#^^}_z;ZoJ%q$_D1^)tHFvoipd- zy>g9j(@Rfh`ZYR0xm0-ayRAt-s$Z$6{?5QJdYV6+>XYAuk8aglhZzwf)-$NuKu-P-M+Z95-+KQi89 z{ywCS)YpC5aNSY#4G68$J;B%u>sNa8!0|}BVY5fx;78(1RM@YRsym$_LB20>q#X5q zF-H7?&!r?sy0T_4OB>Jlc5SBfC7N}AA|-1Z+B-RJJ3F_7*XOE{U4N-)(3UtjQ%AWS zp0>l?y~xk={Nuyjwzs#NwzS1w0PxNG#ig~Z3y<=`Y3Yp5Z1(nd+ri#`o{8lRepvBr zQK-||dI|C1XrJ<|6=Ek!20jbCeEBkQY>dw!DQ%zUg(qk`_9v|US!ouj`}+q;-&k8~ zSE#eTw$awGFPAP~huPKG;(Ucw-9X@D-xJwl(%g=QZrZoMOhyoITiAKmzx}DRrPF)? zU_dH$nCV3C_!!6;q_LDQre|;=u7dP(>P;D7h{*z|IwO#QUxTgyVrN8T(dgtiI@I49 zPTjdOLek`SWT`yj?vs+4{#N!0?fbci@nAF$BKl$`gRt{4CNpoE&JUK;qb>4m$HB`& z+s{%g`TGnERFo%&?;c>&c^a297VPua z_P_tVzuW%I*Zs-%@o#=@d+>o5<%a81ANp`E8hCT%5arswb)#;!FgP|g{Nz78G9dT& z!H+;4Kz_zrKLZipG8*Qi-BNHDDI|B^i$34LN`FTA?T2CYR zZ~T+(EB@3sx4jR%wJjd|wt8(+)Cc<^la&JrYb_r8V(A2fy0Y2Y>LsmDTiLqO-tn{V zXrKE#Kf7JK~$w}Ik!Idl5a&GV^|NJ+! zFZ2<^W-Jt5sLru`5%|S{q<PWf$A&sH<1}^PFOs5Mn#|SPPm*drl$_-Q=q(g65puDui=3!rk z9T{@G}m##owy)nKPn<<|IijN)Yx3Bq%FKU0} z5C5UIANrZ=Dd5*sR#-@X%U}4G_U+&P9qo;8d|g{vyVRb0?q=J*u}gTfow)I%&S@@i z7Jr(2Ro!-sPj6O1sv8gKbN)Ix^a51ffRcUtQ&7(KYwF2Mcq&|HtUt}qS3Kl*;ZmkQ z`|DUeab}{Ne3oAz@4B>&FOBIx+CV&{*St6VRv7e_ysBL>r+wvzY(&kT0lMeySGB2Z z>8Ygn*Ak}b`^yX7#aLyb@7Dox!3h5H2}$a@ympSwgMWyWcl*+drD?yXr=J}I*aXL# z0`fwRErkXf48Zf8ZWToqN1qJ#b;4K=j8F6rJP9SsKPA6`+M0$=#Y!^iZwP7W1G#G%+&TD}2r!8Rt zm*{NMdr@u_%{ps9$1(Op^EmGYL(cMNQR1FQ3m(0G=Owz5a4$2m zu_-e;?@m*RWzaL=?qz4t#}jm=zn#01yj6b3m;QHA4|FCGUkGl+O--xlivn`P81MyK@I9_%Q#(cV0W*Cwi7(e;sG@TYwlI7yqvfA0qo z3${DQ_t3#}7s@3vdZ8;DKf^cL==H!=-Rhr8E8L^pM}n#|_>hIPcd}kNp+mUn&$l=a zod?!A1wB6GK&^AbuEQj{!NXkt1v(v*eqDg6Kl|7ihe^WS`g*`Zx8*ZFJ6wE1c>#(x z>3N>|o$l<9fZ5o)J1qkrCA&YH(}hWYfcbR2#OG|1Eicf21vM@&_TxOh0fn=VxB31< zepy2T_FwGcU7-4qJlgUCP;jZ&(Pz&kz!N%(w~H{^FDN<{+hEN)BW~G>mn^a_Bku2@ zJKwQ7xO;gIQL`>8+-Z*T&M!XHwM^&_Eyl}F!MSkCB}kfxI{wtHjQkaf@3BrTS_EUY z7qCqCM?YadLtmGJCF^h3Yj6x|%(3mFwc5%)BLzAgz5fKuP4stgjFgtqrlGb~vev-?zV_jg3n?L^mT&v_koI1>SQD4%hl&!u65M168Uas|@o48zv z?elA5b=s?+2GriO^xLu}XL{a%&~+>T;cyt8slJxk%FkDsf|q<3@ac18NRk%)ze3w7zcGyoXm zGTQk4&Dj!PTPKgDhi|=9EW2b@A47QKhItKl18GWPx{RKmwnH$ zTft$w!>3I5eR+!6D`E9K7Sw@ClMY|;*5|!z4`{e(>*pyJBwpvTjyCEqT%#q&A?2LE z-MfdciaXAvPbj-lbJ`fIKDf{Dvit<{cA;NlBt=ksWh{;~;yD}9j-9wqb^p$~!VR)z zXuz-gnLlKzNS2hN55B}lGNwA=vL{84@v2ugY^<}Sul7lY#XCG#T@hKg;`91lXX9Nq zp2;b%<^>9Apv{!(`$@`#RPC*OHs(L@8?uEwxX%SB@sf_M_QCAn(=RnIu57l&^($>* zb+zr?+IF?lzU!adyuxMeD&KVg1Y%w0Uq;`~5qhlkvQ8mn#4DK)tg8oOC!w z@IaMX>hnN14sK1$ML~-JH@Rb+mpQyw}18h-_t(k<3FRdoByG0Kl6d+PXwGC z?>B#Q-Y)=1*8JE!=evf+7HQuc^0WQ@amKG-&`2ENDDleE&)@js@r599XtO*IPkV@G zNq)z(Px5oIh)Q(<2$^6*eD)Y%Tm%<&{W{6Y>QY-_bst zr-T*H-(WNMcI7fSYYmte!dQO!-3T;c+ekjEe!A z8yjtdwU|HMzP@!80b^Cr+tB7<^kP};CFzLKuew-o^s&BY+8E!vUjVQx?u2MN|K*5q zH;8rkZD$Gky)za5oDt8)A_FZFnL!?axrjwr8Ms4&4hRR{h!3#69t2!ydq6%9wx^8` zx*i98Df8eh;}&{a{{*x+GC4S~tMdfkT9l0wIw=eGG*t`h<&Cwrx^y4ugjd9ca(lwN zERFNLTm;M75h`fOE^h*96o@*|r@GLTLk=fWfdanyPalSdwXOB`J^$n%xBvdjzM{SP zH@&VsaQ}n3p?izTW@qo9Ev>Agc&BY=_cm#_pnIWhZEZ!Noxo0y*K*kIyc(XtuX0L- zJSrP64wyC!Q~dNh#pgc3Dvd}!b;v2-GJLp=@J3gl-8AT0LO=ZZhNX?mjK|fs#K3&l&;O_PfBF2+w-S7ww98vp+NF)n z{^|SMj8k;4^$P&dAa5=<1`k4XBI8JYlH#BAQ(*1@HsfL#OY-OO2$6|{f;{_4E%f4p zbDsQ^Y=>NxDf+BTe@g!DZ><(T9H{<5FSUVWRoNPA!8JF5@TG7iyVe1m+5HrYvLM*JKy5 z6XO$O!r%0<0CMmmoZEunQR`1<%kSK{|FlHpBSC%j*K_xs!St+V#=pZNtO zp0=0VcO@Iul=Wb)lO?{~de0cC#hG9qp7{w#@+d(a=^OOa_{$=`#8-5MC-dlO8gP`V2DjB8P5;jVKW8~LUfn(e7(g3PP{q*tHT0m++kt6y|$KcbVW|&s(<44AVoQ;qb|rTC+n_5l7@3_ps&or zQ(xoRuhL~+{=_faL z$X`$oJ(iEs@7^B4%#G6lUI8nI_~VVw!Iy9@-2KddNgq^ne{KMcvf*Ob6e3=_nA+Fw zsjTFSZrG3Khrw9UYkTL{9R)*nrISGLJ3cDzM!v#xc0w|Gcm_rHpnr-x{(;f?tX$;3w{_tF-8vJE`P1=Zvy$HHpXY-x z{8rlFf4)qO)p^K#Z{K$!hny3mtp3!#b$j`t*TI+L#zR(jc)`#A6{Wc-HJ3T}8;;VYI@g`W^wg;LS9TgaIn(FkQaFUK>L7x;l6XhIX zj^2Um9KHDs`KpohY20-J`naC;dxw6Gb9Ukdu3iSHej73&KF8ZgE9m+|*{M%~%y-Oj z0p|?rWO$7AsE1{gPnhcY46yk)<3n~C7Sb+1c_?d!NUPB7jr=Z$K8{e9^3xIL`)0@{ zY05SH%spV3D_asRFg@cu_`2LizufHt0C^KG+pxYl!86(rJ2F^e{R!>jm$ z{N3NzzVAo>ef!OCdTsmh|M$)BzeM+yt547=W~C1`)l9&SK8w*d!+r7@B9aDklOL?t#-qoN2LuP!1^2RgWiCB3C-hNRv7f1 z2bt6BpCHe81bQFO^tqtQ%tOaN=D$8Kr~OLlzIo5z{vBwa7v}hz1J|DeCV5{77u>!6 zlzxH#DgL~0q25P?=V|{g7XS)odUBP12yK}k`vI<;9e~F#5(>$eUU3W36#Au!CZR93r1P<~tYz26lpHH8 z&dx{I$1#rx$rp!bRtp=wJo(Tw9vJFdzslYX%2qEf(cju`QWh5dIf$~~w&za(ScYXKwwlDdrKUZR-*~piOeVV?b!T+;g0PwFtA7J|ixo0Rx$^qSc)vI3A zUi#=(`<{RC{gk?bz5Qj>aHajs+uq*(hp+wf?Wcd?S4Gw~{Cqq6_k4{pDJ74)u`S#^ zdDyR>>;5wXaMs_^%X2W(i-Hu5cWla#gXMUB|%K$4Gqo>D5IY(M$ z9Gx8P;i8P{q};4`w11Z8 zaelqoGqcmPgPark#S_o1{rS|@b>vJtzEGE!@Z=qxI6OUS2M2rYaBrVA5#v=Jh8_+X zgjTpA`52q%m{C?fSKeA*Z&xp28|*U**xm|#>I>vP?3ee0gF1%|?vusPwz09%)?JUS zZ{{l_{_a15ELnSYNPBt7O-B#PvzMi#U--cVPU3H44M_reFdGk@fs0@dc7e*6jD(pS zfRzFWNpr9mPp=!cj!rGBa+9B9P8E6#su?bm9lZ0O`=6Wx!rQLqYrL%=JTUE`%pei> z3LU_paaX}$%3$pSR#10cH0Oo}9dwK$_*U9J3*mzU>BYcdz%N~Ys9k&Dv3B1}o@nXWws$M4QsE(=Zu%jz2R zV4R$!t54|T5>3fSh3PU^zxsHFq;zyon3Ts#Jy0NBaXnqWGH^8naTj=<>ze`RWzj)e zLoa3Ln!h{PMh^mmKhuUz+Gg58{>n3akd@DBOia)Cuukc8ZNi3p;-_86B z0df#3|J>ZBLbY2cFG3cI0U1PY=f%=N8L+TI4!#L2esR|Ln4^0xTu|let=|j%pilV` zFFtY66Gjw9%K92)-!gsu>tCO8rXB2Tx1F2MwZ)@7+%A|gj4|xT7}dGyXe;#nN=D5=u=%i-0Cjrb8%?#hMoMJ%f~r6COqgSF5c2h^O(?Cm^cYh0Ov&A-Dw>xR2%Az zi(nr{>WdF~Yn|}~F4q4J^Rfhgh?$)y}yzO{;rp$HL!C4=iPWWdh`-lzSeqmwM zD0_yUnw~u0TJ~n4Vf7k3i@z+nH+=y^ASt6YlTsvH0K3%|Agqq3m?|&2)INkF_PyndB)~?T!lx z7C`nL@!`~&$RGVmJ~GO}ig+Gb_P)zzwCh-GjeUVk)zK`@2zwFEpQ+HDii5t24yWD9 zFM2vo7i3ANe8vNPf@~do`|y9f$GFt(E5m)n%Ytgxv5Bnoi61e34zUZ~(md@piyLLX;j(1hM<8d z-+0ZtGBN;tp{x(v{mc|Q3YI&Y=mwZ9k1P(c`;R^T^0tyoC#+nU(_Bj^BR{8_;cb(& zqx{sXdFNag-N5(lD_L{HEJ%yDY;)5Vea?a#Uni~lE3}pNC^O{X!rjeI4h`fnbMfG> z<}J$(HxDE3d`6_gef?l`$TkT|YE-XIs@CWu(KovA;zwRv&2Ey}^ky#}V!5 z^+!N%lJ*?coJ-zK%pyDxfoyh0vTJzIy-ts-7ybg}D9?YjB z-6Tc@OFzS#Ymy|f=Ex{_Q%k;!F`#6Fj#V#YG1;M>d@d1 z-`>#~K3nYg4bQvaD_la#bT^}E$=5{~?Lc9i4$p|_3i=!d%u_qn?e)&3n~_M_pZu=yB=s3M z;|5Z09?r+-^)SwOualJCzW#ZDFZreq2CWR>o(CH8Se}Hx;pU5 zCG&Mkk4F7TJe{V=Kc_$PU3I6j@=gD^ZgMF)=5U>Ja&(tYJF~32bbhcbCzklOp7Et)EGCXjoLg8TKwlr%k%e*P zLLs4dqdzN6lc1*wt>#bD^R#EFbCY3u8mZlHGYIq=AxU2(LSZ)M*X|h zdtq1FK3`uL`SansyHQUbN-ut6Zq%E#{~K}AKHNuB|4uL+%|2nw`4Yn&WstMzu`gyB z)-R{t^>pp5bTEd5F~d;ni^Yt&B{v!0Tfoc9GwEO5& ze`<7hr|s|FZu{Fe+QIg-?O^9w(w=K4JGZ;rZD;%2Y&IFg2M2Ly*h$aFe8@(_P{=r> z!6o+nUWCyt8)p10F<#tv>NB8l%b9ymu>=16k-LtZXlHS`9kaR1vm5;*=wf^u&%Og5$2mmSxO7eJPjT2? zUp^XNe$JS(@3hC@E@N5t!?8NXT^{=9!P5Mb9}LiY-Ze&tMZ{F zd=D{w^q|m&;Qq_Nu`4|_-%E(+D)e8s?ZySyE~hMn(*3Yl^LS^X*Awi|;&0wn*%~!g zch2_`!_cwHA8tVEfNzOL>kMzqAIM$X({(0$I26#!HtM^&JmzyDZ{n+;Z@yEnpXY z;d^;wsjaL-hrjEoJjp|g`DcIFcYYk=zZ5)6znnQ)IEAA5ej3(Yz(%_v*b;VRRqT7fkymoEAN4xIuSH)Yh7;Z zTlckV_dU|?f9%EWp%=fb-T#s&+V#g?*0vsaQCq!oFG{t6Hk3i53yOyDA@9YPpp0e{ ze0A1vW5MD)tY76zF5|c8f~U(++{#1Y=C%t;rD92g|L?v3zIN#{%HBb_i8qDxEf;KIhFo_;qlD}7Te2gkrO z0eD8*9eE=@pWY4MZv?_qi8g!-LvM8XEpNaF9!bMUNXwAX{Qe-k3zQRFY(#FjRR;Ux zk3WIT(Su|9mW~nk&;yNy&`C%e!_6-w5elx#+DhCU4W=N~+Hm3`6ANSo+iC3K$?xET zbkHN%wr}PQo~1Fy#shO7;^g35y*_K3Y_jfs;A*?~#V<mI~R?UMJHluNd;pyCiB2fCq>#AWky8aH) z3VAx?_1nwhtgyek>|3Py?f2fVwzUp1YZ_(3`uVJk{))fuWEZXOSt1RZtt4-R@9cj(25ObA|@fCqghg~jv2>^b?4n8I_^B9J#lPmC#Xgp>Z9Kb z$=DS)rkQQPpX`2$mgOuNSkAD#c_z^KlpryoFyl?{ghR? z(H`=2{5iI*M|vJcOGnab%nx7tAk7|tWpzL~RcqKVC2u!;T(~LZLf0fFb?|1g#!CSKVjRpEb{ci z5n^fU=)sT)m9X#N0I#-8Szf(*6?@@$w|#U8-SI0z8>=1|lwP^EaBbp6maI|mrNhI^ zYTr4q#;9|ox~pyvS?lC!=&KMupiAHM*nRT!eOqeCAwc~hBVj&o$YVZV5NC9hVrdti z^+(-2sJ0Hyse0g1R(WfpL-5iG<1itRwraha5!N>{0+`dLU@_l8@lwET!Z!}j+H>rF zp55JVH}((O?SuWckL(U;?-<@afbp}o_M)35TQTfmh@VCFy8&5O+lMSHqzx}~xFz$? zIbD)WH$U`uzk_vVP;}3rWEt1RFjutze9EkrA!Q2&6fM=pL|b>Eq2~`!zjcGYi}3;W zSsn0@*T;orO79E8vw2YDej##Hilhym^I(NFiZ}nKwqV4=fQq97{lStTcGG)^blWaJ zsi`;0R~hM;aFae<^b2})mYeb@7+KAgBR0kJrcj50chY}^pLWs-g?rQ+xXwLjhW|W# z#*v%6gpA9ZIK(GUIFYs!^&(~foUZGVLIqa>v$!7tzB#;HSsW#^&j zoQ1Zs_^ElX7_+xC{+kN$a$x?kJ<{>GN?3&LNVa-53cT_NVw70!nyn&k6+$h;aq#bpmV_N4cJ1KetQ9^@lxeSc-Rr`!O(%3 zE>i9S4HHm)a&sUl6K`qjf^6pDF7<8yZcgw)^7G}5@qRAe-GGrN%$FS?twRPn=~XU6 z2IpikXrFkPZ>htS_xvEv(O%kAUZ&0E3&~^L&ci65Fzw7%n=?G)>_dE&mgu>*1pUGL z49ElT)Q{ac8b-Z&aDACF=Wv{_J3xAdo@Hk+SLq^-QSTfM!+MsBt*Q#*$!MLE`=~Q` zi|xms$&(sW-mH9qcSb6 zYs2Q<1rTR=lcpY@pwpO(DV8DZyy5!8HPjJ%4$Wj*f{=c+1im_x^2?fe5XIVton1Rb z&U@_twhxZmjh#K*VS9G}xIKG#+MYc+W$nZo3LJhDjon!u^sA?ml;#Z&XWRhbbBeS6 zBGjpGe$&Tt8reX*{;8R)YXfuQo}Y7~3(ANJdEl?3k+}3-ci_z0bGT*CE$Y0)!`+;7 zEY{~>J&$moX5R8@`B?+^H8%gj$u0*T!Va`gZFUB$kFyF*TFsRdO?@9`6(8n#PPnnT zmiH=e-Q4DHF`NAY{Mm2wbK~8>R--D86`||r3)K7^gfsR<^zLR5#7zbK_#32EVwbEsH(N*eyS282wB$ zdTd$W-dtJ%&(O_EPYBc_tY7* z-xn(U{p$UlykOy3U!B<>H|U@U`CB%0_GJTWw+ea%;XBzj3K8Zd}E`+?Fn1Ypd6;x3%l{ zwDqgk+WO_|ZS(5AZR;K{3tVrjSI8r6_43uW;)Q}u>aJVgH(@Wg<;|_Ow7P|z++f~V zr^)rUf<4oDvBAp@IT6CiC zYc4$@qWpA}wlH%AeS`GW6u%7MOJl37nU>b;9Mp4X53OCdt(r#uiVyua)`_#Oqn(AU z>99$$|K#_db6AGPl@-tD*W1?S<+$~&OYnLb9PmKh>=AU@*yv2nJNnTV2}YmBxxAN@ zDT*$r1n5bBR-y5Vrd&LrE3h<98))s6sL3dXFb5o^=&tt8Z#G7H8PquPy~PoQ>uJXG zX$zIUJE52bG~Vu#5)6O+98+YX2Ohwbp-r0wk=F##U7ot>k` z-l3hYAj~TdwvFqLw0j?YqTTz%lkI^gUfb?{>1*2MhhNdw?s;)rzVrw>a}Rh?z%vJs zld_9tAGEtXX3}UI2o_=%{FIEuGXOOdfxb9J2Kp7Q0XZfNp}rEJbJDCqnJ*Xgvf(RX z3NO={D5aZ#MQ;ofx3hhyytY}NvQak4M={eUkvH|ho0}vqFgguIo3~!1rHqhDq=Khy zV9=QZ5%WkMd6^G1Hn?a|3{R)qP$xi!1(bo;&99Sy{|B3K-w3A|# zR}Z6Cq<6X|`cg(c!b3(w*L(xhuK@U9BEO$R|E)7tidABg{B`DiAx&U)=51t6Hpg9%ConamW+$zl(A?Z z6xyQMI5bZOY{@VA&vX?!MTty{e`!6640JASHJ-)8aoRj{k<7x`H_9G+nF5+rT8?NzfIX6ep0k~&SH`Fb4bLiNeB7tXbwgIZ2m)P9sxgc|l zkBdWNo}g6GbXwy`+hAMfNyl;82N8MJ6X%eFE}(sI9ss9r)z(2u*$wSgh6^&v*Bt26 z$R#`$pZeo=rEknemXR;CHuMWWZ}j^W1#sl+47M-gZqtR5iC-H?>**)`ym{idur2Y6 zd-QGEkS_29uQ+j&{xJrzp?-qRH~Ia^3O_|wveT!C!e{h~Z^+Cx9QXRpldCMwu4&IL z>3m2|T_#aCrF*7hvC+YATzaTyTsdo_W2apsINub{v30gD-l|$aByWi?mR;gcIm+oY z`|~j9xCp7+{-moV!fc`$wojp`pO24v!sP14Y77oV%^%ECURGPc2kGUcwU>>Ksc z4eRm>bv?XT_JZ7MTVG>wz3PQR7r}nAst#)Xd=;x5!nE0WP-e~pUZBc43He92j?f_R zAMGEoE~`rcezMh{EQ$^Gt4QiC_=-i>dme!L`7ZL@xR9lbEl>XpS8K!hb=Q1&%D21d zzeP4bIebA5OVI;~FS!cFtRp=cKOMI&{Vk2p1GyMF^>7RW{V%<~Eo7jHdcsiVQtm33?dgz9XG_SSh?7D%|uk|7P zfNQwY+wM#PQY?Fbg4FB2W7$Ko36PDli1Ts_{TP>1cGHYkzB)IJcZiLH{(OO9Zu|{5 zU`mrN@cH&d#?ri_1^L%n=@$?44jSl7Z}QhhyxSrF86RyTM>@Xv8FdYlS3}+!@>i=|hL~8a9oD^c#5Up*b>y?p4DXXz5x zaD!Hfw4D(rub}iK^6bvIeHy&zjALej%%`k7n{KnGM`&f8N(oCiV+l+L!tM8)c+Z&`+8=*oe`w81aM0)a$h5 zA#jf;PuUnBWl|1DrcT~#)lj-hcHE@fyq>H6|QaFMr)yXw$Y+fhW_9E3-*;yR9D_ah&`^sKAMmUek}Udo$T&=ot8l7M4{ zwYM+hcrfZZ+{@4PK*mF8R-n|`r@bBaUAI|}-(oNA8~)GkAGN1hGd^>4maizeRyx3! z1Bbo(APn(IZAhL3s*FlbCd;*5S8P?8^xxF)UAEM5e^BR7rjZnxIOfD6ASb;pzsMtH z3tvPVAG{&MHduF_;oJ*~-+bgV9tviCsr-g~aU|j#6X+LF2^n2B#((rvvYbQkQ%^dq z0KWR#pg7bu*4*szYQMk^hOsPpd;2>@hYt-LCp2Oi$V(g11&v~)SS(F2^wD-BgYKkD z`X?y4R_)?*-r45ZQ=e-gXL2PY^HG_JN7Ej-?|#ZGw1d6FoR3;Q)uCUjBBVUFE1&xg zg5u*h1cR@MzuzF(=Dd3z7=8czPW{j+&%3#j*RZKGnGdjSowB)`v+O*d7vfX;e=hvz zwVyfXg8WKXUKmD^*k|zhsqb>7ZzOg%50)+G_!1mHehoBV1U1z-ZtP!jCe>Za(^pwb z^mv__XhSSh^iR6n*Y#r#alM_fY%!X1xcYLDzgr!AWH^@f^M$j+qvofr^X1e1-FCQh zvmI>Tz<;hCZa*7$ynU;kkbbhGzuivvcG}tD9vf4?rg%s@j1PHZU#^W&-ubGc56VJr z@tLzPXwH`ld;?|~d$zI~ca}3KoL|=MZydMf*_Q=$3#;px4B|dOTVuTX1%b`2wy<>> z|4LiHc|lM0|Vy zxys(r+@QLQ>dzz>3EB$WSyZ>G}c<@Qt zfAn!Vc=B;Mdh)Gu^z@T*_|CV=;gb)`?xPRO&chFc@0Y>hv$CQ7H}@Zw?Zb!a_ny?Z zSFYt}{6zxUkza_}?U*VDY@sU{Dl;;(7wxK?m4aTl5P5c z#DM3k5$EM5V6os{=u{Yoq~h7hjUWE|_xS5q1^iN-=y?sPK9Mg0a6n+Rvt0(F$SCK>#m|_!J)Epq-EYYzAnh-v zV!<2SX;fwVZ|>>Kj(p<^B}fbq`eLUdlZ(l- zROp*xrorH8FtBiL?5Zv3i!%mBn@~QJfnredU?^qaz;U+18@1JbwoJ81O>{M|&_uh| z;Bp#l@4@48|DEUM$%mhocR%?~dHTr@%F~a(R~|i=;oX0(h2g#pA31zO61hOjFP53* zb*3Yua`jJ>Rf<$kb@J9lTp?Ez#2JKunAkBeNjDaJ=(6bN1)#4)6taQfVO6=7^IXm} zNOG!Kz^C=xlm4pEL z0~i+`5-v3PTc8>wsBifRZ5Nm;V-p?lF!|$eeahFHL_>7t2&l^g6ZrC`K`2TJE3e*s z#+U)g)14>I5^P6NFWRpG3y&9{fTUy|WEZix)pG1G5fEL|7E(Vh(l^Tf{#F^uH@^Sw zd*YsllxKdZjqX%Mb!$7(;UO3PC1u6|f*2o8i_r6<>Js@gG*+45+q{#mnD*y+p*Ec! z*1($(g}zU$7~X|}@x%C;`k#tNEe^cdjWgvQE8m4Pk(U(L&)3Re#2YF zWJ84#Aj)HAJiCw!U4gcux zo1t`|`4JC_(+|EK!`D^crtw0LDu^_pk{&($dqV%tu`l> zIPBzChQAO|$8Utr>-Z6OzQ%>nWs1kwB9sP4Bh&V$mf%ZraZSJ*(7e@dJFoOThKe)3 ztxagwsa8!^%MD&Qx_UOOv5+`nXB!=A0DzRB3|qGwCJ&t zE>&h(@KN?Dy-#14E#qO6U>z6D^Q=n<{ji^F^UWuh9zG0zkIewcKwTj70N->LokT_TGGN1QgAl?_XcuDbP5_2}Dk zE#&c-X@eOT=sXBlsIi+*dbF2A9$N~O}7ui zd(5^$Gw4c}Vw^(&-0)#e8Jje6g+Ji>hVfD!ZKMpwjx;~x;6oXz!92zF@jJeeL)h~{ zTVt1|ZSdf_3eAJ$by}6P4EZxJeSt%Gtnxb#8mgQ8IgfTqgO;!6Cu*v9ardv#y^#lP zbif`@OgK#(8W0q(0Q@BlnqwlAJEWc3M4cRW=to29tdALKDmY%{4Xiu}sbhIX0Deq^ zGUU-1xW3XYB%Opdo%Mr1XWc4y$e$`VkogHAt8|n)QJ(c$5y-g9*ZLyua`oW{jGE6p zaQ6eZTGuv8T5&MqL6bU0^zX5XIh}TFz9=V83HMP6Crzj2OY8w$Kgtm`+na_A%}Bp4 z7yp%32Tr4~K-vMZv#YdJC)?ga^=Twddo}1Dk{c_H#H>#}W2W_o{-C3JIfy|o%2njB zvmX^W@2+yi0qeKcq1;HqcbwtZmAX22A&neY7=2_JwnVy?ujFC#$R}PnSX60vjD=lX zVQXvn2J)g_wX>Rbx&9y%nSH$gLi_{ujPOvyMA}0SX!8?IQ|O-)q5sO8eG{m3*2xDV zj~XX~w7l%S=wqtA+!cEn;rVP#K2jd39A@2wzPm+V{-Lu@<6p zEub8b^p%6cm3@bR?oxdT-sdpZ3M{ku4P2|6YDMqpPtoMxZ3)$>^`Q)zUhI7&NA4{l z>bPtr4+?m%r3=Fhxvn!7u-4CVh(g8j=!-A$7|_3%Xf49^HLHgrs ziN03k>x$Z^BQ5)C`XlYHRI24<9QPB$78PhiLOpF@Es*75OVMT(Y|Rl>D9bT&4eh?L^9HSR`mRVEtIK?+HK)B!8lyiS zUaMV`&un51Dg3-#MPAn9!JjWDw{HqxNsKpvgLnKPFHe~+euKk#+;8)XOp5cc5SrYt zTa3$ca#`klA>iVyTwNRsUzMx#*X8Q;mGX|u^!&s>F)+D0Ez|KunN9g=Mzx#qa?x0Q z%oGM~9!hpEZD`W*)qwnR0EiFqeO1j(aG`5^?j!J03_7rzgVh@1bWuadLLKF$Q{bKu zhXweWz-}BA05=Z~%l5%h8So{6!=o}fd{lNGJt%t*AC&$356j-Yd*$GWuL?XUyGIYp zj_QpLgnLJ2M{Rd#ySrbuxW^`+!P~)Gyy!qU7&$&1?MIqJ0Rs-JanB^)p7u37evy0I z+zgbjW&VZ#6)L@{GVYrIh|~dgmj+;Y*X{7nH^9D3umy-qtBE&x+NtiLFXImbHHYsBOIO@WCZ^&7tibUr|O2V?%hLmdNb5PM|qRKN^LVYjaD zc~$AqT;;32q|&4gdJqz|eOLM}x7}x6pdtC9t>;Gw*OjZCb*wb|34r^_tn<6FX~6te z_4C}6pC9MvG-TLn!yEEKKKHM5+2GJzl=)}MH->6I65k3zU%QacUSTAEHgbNsR{e^+ zIL$!wbjhj8GMss!ZKzL8|57ZCgQ7PMm_sizqJL>vb8SKATcJF;u4jz6|4jzA6 z4&V89IeOiL&@|& zbMK+{HxDh_Y_9$1a?4+W=WFphyN6}Cv#anxF6AJ8Uw^n$c*)iJ+}uLN_TBd1vSWOe zAJF?A{Ek1_&+}>a9^0A+-17u^Ih(xU?v8krjtAS4Q}!<8{)%7L5^^|*pFQBvEI)(5 zEpmQpf$J>}B5_}JxN}ej+q>Pp*4%rBt#B_`zTGe7$q$==e6!+wS=nDo#(1;^=b#g9 zH!;@ONZUMw5L3X$vYuMuVFLL^1|I9?j(j)62m?7)+J)4uH$FL`5huB6k5C93C!#nE z5K4#}l|Id(t`JN-7W`>Ahfw^&i^rl&=QH8l%Hk6dd-vZd51zkY-udv;^7LEZFAqQZ zZaH}RN!fY$u@;7BWn=V2tlYN|7K~Z)aLMA!7x86f-~<$KYKzl9K6x+Xw=;Y-%x}W; z!h#1(y4hr5W$}y9aVgn~7F`Br^(K|*SI6LdH7lG9^Ggt-$wPUJQiJ7_g$yEbFE7xf z0e#g$7Y#l&|K!P&a&^fkyXgz#VM8E2@+LK7pz+!mZhIkPTsXnUr`#7>#My|$H%8+( za@~)z$o3e7kvUKL$uQ)RZa|GeAf?zsfZOHfJV?}drKe2|n;CC%svhHw&RX7t88@i! z0LD&j)dyn-m~Zg}AK_T54J$8BKr1V8LyF(jW<}#=Uc>{R;kHA!!-sb0aAo|I7h_CR zyoo+H;`|0Nr{J_9TFSOQ`9zNDV9P(lgbnZP4hx@vcjuy?zaJcswgKOfN7^@$kk5LR zt`&tdmFmBZQK80--yjBwSD;nG>cs%MU|h9{$=^cq2{+XtU-HCU4oH3g|MoI^7jM7z zy-yUE4NWf8M$S>)V_N{_`9O|&+X6a+X7?ajx{x*62L0R8YLm{ki;HgLISVS{g}#)h z_8t%E*b8|bD|>B$7gn_)^xLqKG5QbL5}|K8{MQ`vTjHSjPvp0T=gR92{+M5QXI_S0 zqEBcNi$38ub?di29@=%~yr~XKKO3xSj5l9tkq-T!pNntM`cKOq+=_&d@lj(XHLGJcYwA~a}^J~J=lsRJ|>V1wb3OTiV}q7{L1WsY))e z)C#oELSKoDoyLu^14UxN8l;JhO^8FSXJMl>;J1~w_{o`y{C1g70&Bd$4b3@_V*tz- z#O5vJc;JnFip_e#gbgqg&C_=M?SEZ_VIQoGgw)4RqXTRk8rYb1t&j&wVUI%Wy3SD+ z9xmj(01>wh;)A7+YJaxHx(0;1>gjW;ka~IY-3Qg6!EmZiuNN9WA8M;M-~%)%7pEQN zTmAMm_G@3uc%VjjWIm{^LJ^D5NSP3uk^i>O%W}m-RyIW8d+bNx6kEavkdh31Ht#z> zf*x1a#ONP8Qcu`hg^crnfz7c`w0l3q19Iv|8-sm2efN;spTi;bfFYaHIBdva6xpNfR=s-T z%f+t`j400M4|C2To>BcG1BbCd9%lP(1C7~@_D39I;pJNSi-tp{= zH^{KcK4W;5);w1`P^05F{;z~*s&}I9Ud^t{i}PtYlkYuSPRo_nqp>#m3-PCG8*NY3 zRy@wx4|@E?H)El-0^jI8i|luci!yFtTx+k(*RAkRDMPihWXf?t&Wm{PhwLDAR`tOA z$aOAgf@-7w^E$zMKF!~-eSDATRAqI2SFl~HJRZtm`^xh*3VW3rx;lIzbEP2=8~vPe zPK`6~8gxGOTo)aw4sEM!>60i6n((c*=-6{T=pv(SMHg^>aw1K~Nqfbm+ekHXLa`yMDU4;!rXy77W;#;wzemPxc2Kg&ZL`I|a4c1|FWaeiJ4?Ci*W zA+gG+U=f0b%S2AW7lEj54N}|buAwnzjZD?Y96lMyo(PXtgriy zuoL*Jd}_|!=z43prTwgQNSf;w_d~zJy!7p4C5^n)H?N>*Bag-p+VC9w39B5Ai-u{@ zH+2}Zic9gqdT5@gM}N>XFIDzR!)u+f@*x|DgT|Sk^=S`JiL11-y{L)@agA==hJF+m z2CsF@Y@dxyNN&b3$1&umelH`s=x+E!m@>voxP)i^Nw0ySI~A&KV=Kv%%E_Z|=UHy) zOdfc=$ik}*-ex`q>aP6*AQ z$1m$)zYUrmGSl{Mi2XtA3zGK?xi4b+6&W+Hh4c-7DFf>V`)=QdiZOJ!0%BHkD0R02 z*rM!?y|dRDbhKm*hlkLI_nB2c@RxC~PuPA%kDK~uZrjdO?E1vjiyr_hkq=w_=92RB z`rUnz@4Xdzj=J_Y@;$tTb)`Mrg+xA`YTqJ!F&UQ^msjPBt8sZXpO#}?W1ff}haSh0 ziUW|^amKu2PB1kp_PSO)L=`vaLx|6*I&-?#js3WwRc6f!(3s#WrPB=kHXr+Z=kbOo z_XX?qin4sovZYZ_b+7T<+*ODUwzSV>PfQq}nvVRG8n-giZR(4=0!4{h{`=~N&; zu?5u@ugb5qHpHjDq*dvI*Rl^^W^=z$W8}9uxs35#T6+JEO0W?Q1oS*>99ZKt)b2<8 zFzVFnz)Vt*sJWz&wZ`{;HP?a<=3VjysxJAOdy!UsY@5F7;Ktso0o!5BKOlW5FF`zh z{MgqkbM?zxbLD0?Xn73PPCUjOMP~MW%()id3@h?N?@s)>u-*WEu9<}F0m`;y z_YE3_ti_eC*L(SHwz|H4)-~bMajn|Ap63grd?^5&UroyFa$Ketr)7L`;==$}$6uDo z*_UN@{-R7yzAV$TFC3qpy()|GsoI^FYx!}$G;qVc6YhU|4Co*KLf_(#`-V!3oybQW zs>7E~U8Z<=eM-GPv`(MeLm2$3Q{e*7ef&JNeH0&23V?y3X+vTz4Y z^8{p`z}LSTXW{KB9D^H5J<2gK*gO z^K~F>tVbz@^@8Zv0@Tx5C!B3lyu*y+&8hf+YTidT`67$tlzEJG>#hOiA{%3Ow-3ASO5%Tbo z@XmhN8~yP>nvDvduHPIC%l20MHh=uI`HpOl{S99OkS%X=aNwTk?5jU;PxJ6Vd3&P8 z7sq$Kmx%GJh#wV=ycP;wil@3C^IQ7bM_4D?TNL`{ix@s2Q0c30<8uW9S#X7@BW8naQLp~_cMRBpbU>BD~AgX zrNNPTj(d0Vujtn%sQ9)b!=3J0oEww8OwLat@X#IkHt{W%O)Sd)cyH%>opJAzm!1f7 z-^3b2NIrS~d26BXsfs>71F*eQ{f7L)QuBfPcI zeb6;OFD;x*%3QdZ#+Rb!^37BEXwwrv{>fav0)Y*3<0|*XEx4=>pa-$$MMP}bpaXrO zs?8l2IC)VViX2E;^&!7Ecp3n2@}wYslR?I4=dSps-zM9UkRvOc0|T6fm~k+G$+kJt zfXX2sJuHWho|i}OeOR7+^l5qWtxx^X|LDnwW#_>Yztvy1WxO)z;8A)J|M7{`X2LRm zm@vpk+BbucRm#2L(>i=Yx|dzm?E~y2G$6Ujh>ZjV%BK>JL~R(Ps33N1Drd{Yi&^IGv6Eu=oNp@H=0yD!U}6L=HwvP}K^KIbY62fHPsIMf0+ zn{vZGK1%_>15Ru)!*Qr@IWqjna;|NQ@<2Wto^}1OpvsCZ5x3*6h>ThOHlU8_RXz0a zT44?7%Yc&**!iZNy#cKA8t-hffj99rMUVxa{#z}g#19(tG+WtSGy{OJ{7BG(( z11?$yvgMIB2lwtBm5JJNLBi)lym1jgc1!k+{Lty_$Yl`^3|XL!?fElHjTF78U1SyN zV&c!7br1{0CJ%m09~okEhW+m^m57}Ra;{~aC+nu=Sv0If1=Co0LG~w5rO2v(OGLv8{(~f zC${Q>%r-|$Ww+p_E`+9^{& z5V8r0!wYGlZz`bjh4j)Px(5|UkJ25VN(a$bmybS$@WI;!J3}6%#^RoKsTO6fF^Jzf zpNGG#w2XCJ7$?36z;CKEpRgh7k7x4#nlq!(Ky#FJvzXT8{7h|T)(QM{^s$XP{tNh) ztU15Dd9cmK%_#ceA*|o>=by@Ro#ZbUs%gjAU+}qRhf}SXmH)t~M;mW!D?K1aH8v?7 zvXNq6Ap7^*uCnWF5AU(*j=36PY=#ohaSyT__6G^rq0*Tbb-4+p`35hHk7%JoZ9B>j ze((d=lR{mVCujr23{clb^Om~Bh7yoh=$q{+yVo<~Ej?g89li87#jAZbK!2_LcR^Ck zx*iMhV3~yMaa8I$=$Zqb+tMw+>BpBP*zjuOC_`O<2==4yNECFw((8CSg5l8D#0ix-iF{S zY&Y1EJ;`3hol^;#8vd#fpT9km8$X_u^6GL?PBlj73x4M6+TY|^Xu;)zi&H|TiA}(Z zFgd_aVJCnOQnFWQAaw<#U(I<5e?10W7d5tv-tGeLMG^spU3;MR>pa2+0c5dl3R)fk zum4R}8_&9?1#d!s#@!dCm9AroE31>$a^Qoo8IW3`tE>|b-5ga(??bJ3h4GdTq1xdq z#EY!Q^@+#)k2I9Q%!k*MIdz)p7Ft;oAlvFFMM^M*jwWFpPw3aW>-}^$s(>8B9Cwdv zL@_D#?s$%A3n2rtrF`TCepB#+$<_uNTG>Ynsk0bAwbiDfldVFpSCuPHc^>$neyhJ2 zH{zlLO^r8cx2yeIUZ+#`+TQa|D4MJRJ&m4TK-dxX<343u)S+0O-SsG2ax+@C=eRay zwoCJ)HsYDy{BT;4uute!q`da)9yLe&hLg*qE#-mm6Y-R#Ph!X$@ z*p8G=zv>23Pw4}qKQL%iyGnaxzmA-5R9!#>y`T zdpb~CvQuXO`@IBwVV2kCt00|acJ}u+=ww#duxf}oRm+ujpYVb$2+fCZ1!`H~ZCL^s zA7KpBYPor)H}K8Sjibl1@ZR(PYmK~Ab8TUa^$1`G>ry9JLj3`= z&PrVvPL$2LRF&DQ<-s!RSN}tm{a%sX9oGq)T=UoK1t7!Jbv)>gJnsp)sZLnwKok2+ zgb(inYWwg@*h5@Lu_upoAkfhwE-|_XiA*5!*6h=5EuRyaSp()`Zx>tQk3BZ~cJ4cH z4TmzgR%DOKwb?pEdqM5DeW*#?{TgbY{tKU7LGakL3#c-7_0VIb!8lb&pAPuzVA35t zK`(X13mUuod;SSnfcv5Be|>$a=IEMh-jt=~?dw4H^A`oI??!LRso`II=={IJ7!yxs z=Y2J(O`YFvs|7gYflR*4{xG z>>ieF;b51q1O#p?e_M42yzx8WGu?`--8KgSc6qQWx&!Xr#l5E-C-e@iSZOtm-0QS^ zo# z0a5AXwX?0N(_j^Uu~lyB^T0fq$UQdlw|Jg9;DI$?0$}g5?R&)jDGTijRLSjf{LBaQ zO`(4RzWP4o0v*pbtl8{;6!^fc>^t{6YA*hn+oB6HWmVBS5=u15t2A%2EVOPfXsdao zc$pDO9u6KXhpIPH!s(R{KN=9v$><|O7m^@Oon4i= z{O)wDP1@7`iS08Tz} zLWIvmXmC*2vM5hW4xf?WU;rnrIeO|7=Qwpakm+PmCX={{dBX$L!$)QRou}pD`yZG4 zAAefzzxPQwdh%)6)j$sRpUHtg5RF4|!wC`$hyj5|@xsJVjFTv&YH zr;Z1+Y-9kOxjS|3oAVFsTw0P^FTl}x{bKnb%0HX z9$O&5nl(#z^l=nY;&lgnC z4oy{iA>2~^+79UCw6X#jFIu7Jc=%6WSq`WTyG-+2vC_WzNK^C z`tfFscB)&)avfqb-Iq}}+J)V;m>X-L(^4aQ$|tJEwrymG?lR4767tr|6V^9G0d0T$ z4LzG9#?NEO(J9fYwB1h~x47A-Q2EFmClZrS)zkl2R8>iIwFE0Kd~@9R;|5@~i5qXK zX!5kLyk)-hC}lc7KD(WI&Gt4I)AiU^eu9MhQ=J@}R881u0E@iEf=^8*q(%=xQ*_`J zJ_M$h`ZS+Ghq}JF5@M75Q*9$*q8?D$bOuDuYsXpwa>IMcWgJ=Ae5Z!wHQ=D}S zNm&bcBa=7Z(DAoF{+99?Lir`NU+@J3=@=e^hn~lfvC4(cMaw69>%OQ1_93CUiUJfC zN^79ft8#VVc%-A>Uhi!j%y~!cHa~Q${!pEUIo`hEFeP!x$C_^AUf7@eH{}KBbTE|e z{5xUdUVk{~!l9Ijzq-W5xB75@n$PN3y}3{CEwF3#f&S1vw(9Yyr&lvgn4izeLjgG# zIJrzbE?VLS9vHUFoWO=C!#=C7TKxrgD(`KLkbVKrQ?*r%pa*SHN}idkTnPDMsLosV zP|O)hlcwJgb+p=~zki2vCagD#=z^!?!vT85O#f5ET#aC25o!5t|{b=nt?QM#-k?xt>@rQjqOm@pzlQ*XatM)T{H%{v-Q_ z7kr8H!_MI&=^`KKF(E=35B5~k<>*U%<(`wDU~pP8TXti#y{*qk_jVC=(G~l@l!tPs zsgq@{8$1LZ`asU$w?i`EQRsD}wxP^mKdSz{wuz>nbZY$ok->5a7(dmu-P17 z2%$ki^qIUNJJ(UzBm9TY1k{K5s!2hNoBz~tbyN^zgblhsfo0LPRs1fZb6nqSb^7k* zPOu#-^q!Y7rLK62H|WELO~*7s5A5p`%vX(jzaV|Yfq2GVNc*$}>#)+1tf-GTv(k1g zJ!gMD#pq%sx`)bj{T5gTDL!;o+Y94*J;so9_QhBdH)`x)C5Kd0!&j3>S<<2z zI>+>Cy_jEpo7N_*)7mc4Z-pCUFYEH`r$3<)Ym7 zLvbTZ#BbXG^5xh8F&^lZ5Lpu&-Bx=NF(Z3{$P*}S{>?xj@#sfmCim|0rmU;SzMXDs z8nAwwF$0p_m~(x(=PQ6cpxZir6uXM^qmt(*ev3VLhLv(&Z+;bM8x{c5g6uzJrSDbW z2(+?rNCni6E>tYu6jPX}Jh99g=ARNu{qv@hta2v0R~ozv>Fq-M&@;uqn9j=Q)2YIF z`EtG}FT~_AbyRmOViW1rlA@E;*BhZ92cRFfge(udlN?H6zme@4wUSeZLm>=FKHHNJ zKe)l$aPft!xG~P&HT<5xdZm3cKYzuZu|<4nABT3(B%ed@95i3}uq^jOD6ex(3h{o% zdw22E85Ct2C&n|&Rv`33K65F2KlW91W;&sRxOb^O#mjIcc{P&oNLxNL&VgIVqnF$- z1MU;zD?T8AGOLwA_4P>f9fV+rv57Uo*XZ5a5Ilk2&(}Ix<`A8ThWpP@0R2uE_Gc02 z0K?(ozV*wOv*LxQ&<)q~c_UmDq=TT@VF$$9^cx^1M~@xAKll~U_We!z|4Inm8q8nt zWgnaQZ-v^vlP~33%kRn)vfeKX&}`|i^iUv)7uS`6wU+;4 zGyTCk&6SJ_;tgHx*u-PlZrB%X5^>gqd7lf2y_Rf~bwn6#;VZ+Cx!Y%T`cQp|7hjIy z{wbg1=cUDYSzhO1qPUmDed_2RU!yb&pynv|PW>f4KP#Yr$I^hb`NU9{UP~tId$bJF7(SF;854jd_8%zW8C1=;KD5)^a=Yf0mNYeetw#~ z&F#IiA$oi+-8?O~%FbwCaTo|NJ19B93#0m!R6UYzG2yH7zbUkRXa5SV+nzS8V-GyE zzX7!Nk#m|n zru>C>B))$x3Kd3A&3b`0yqi+)$aRzG8Dds?>C$9%!3 z9j-J@Zpw!ryjNz^aXC9V7L(V$Vb85}dXiFZ0 zog!nWae)`nNS;iC5{)0@l0beB(64=|cF+o5GbD;A@;#~{=cL3)`VpmQSM}kIQ>8_J zm=w-zhu=*Wj4v+5H}eHG!RtoZlcVMHE1!P)9oONc^-T7_Lk}K)_(2u4bO8z28k|h*+9kyeqe-o;(nqYyn?(9%mF^_ z7t7AhUit97XL6KBMr?Bu+1rLto0JJF8T0n(NYJ_6P-JB z;z^-=Lg;dFkz?5DQ$`i>Iwlwyb;xl2$nVe_hq%C@52^uepU%HEem$Lp)FH?mOiX<- zCqUjB=;D-1C;qa6e5L(jW0PzbnFj+lJhhGK3E*X+HsYMl#pANfuEz57Q^~#59FjjC zX_4m*Uh(OgR@0*@df7;CxnC=;vB zul}xotFh0w_!`GlMgD#%YaTS7KiB?3FrP2Vcs!Qx(nd|X5nT@CD zN}raEK000YV7pTvU7fB<>UtHZ2Oz^OkSwikyE$3&gV)+;CEz^9D*dtcCxjj|)%BVr zoXoU;(LcA5uDlLwC=l|J+=75ClE@bCyKw0-3 z7t*I~nm$w~W%7K{ekIl$_K5J<+m?ZGPCYn}wISr}>IgD!{RlHoK6L$60q83qbiB5~ zXGpo4UU5*3jdadyprz@)`=S*{+IL|9wiCKcNV*BDsk9l*6BT&=swrbmE|j!GEEmc< zp4^nP>7ty^m*rCWzFhDV0lFa2*!nko)fU-&*g|`C`|pN+?#Zy%mZ$YN*sUjs;jt%P zLjh&2{s4+Y6Vab<#MOSgaa2fmcOehLL9f%;gXD5Z+iD{T$k3)+NEsYgl@7^o<2B7( zc3uCCFv?Z9k`^jXee~Ss87pmU%_!ZuuIl7|%-1jq?Kf3Md!Sv@`Z9E-IC;{E^+LTG z&rcH?p_xeE8V4U_G1k7p+CKT}`vg@c^!%_dXD;_`+zx&;?upqpz7WN=ghC#KC{2FS z@Wrp%aQ_Ubv0wqsOj3^HurCBwnE1{Z2Jy^a(v9eMLR$`ynCYQt6Us zT3N0_<_NO!(}YbIrH^b$RFmbRFSc;4+bN zSkszxwSJ{BQ`aY;u^Ug{%4l{Jv*Y}cpNw$kj<##&x&I@6s_m6v+X`cN<`4xa(k33~!u zZ>2o?41K6prJePWnY>=^+LtgP+C9J>n@!FqPQJ%! z)lO({dRp^{`+ynz$$tU10otgFzd}JBBmL;F+S+GUJ(zc)%)ow){Uk4k=+dT4G!J>p zpZ&s2c~ez7o6XCKu2oL8zc|r#*opRzueHZ`t^M=qa#1cti_iDR8~q_)0=vmKL2&a+ z0($X=h`nBqeHxwDs>-&iB&QT3Y{#jG7~^Vdk|sk`$xUfrE&BQ>WP=9#$=F|Vy;N_4 zEA&?axM^Dt!{$DeIQ4?RK=+yb>AH}wCSnNWw~+PeE&J_|T$hJkkq2N?3(+LR*Cq}8 zYtkp(#;qgOHz@A%+74VD+1uj`tySJDFV%nM9FBS(6ook+)a~Vp{(5KgJp!87nN6Om~L2y^`ksRxQ`8%7I z`Q)-p&(Hn!fbrRBxjH#6=O?er#qlfQi*j}RvW!n6e#vu@)8p>z0do!)@CAX{W#Mbb z*M2oPFkaeONbf#MX5F(wc;?#HljEJJnaEsP{JR0`@)ta}ZwA1>_5?+!F;TTZhz%u&q$~ozJlvg@z+-%=kyScXp(3ZMKNB2Y|?p4~X6|%B1K~6fS4d3tud!9izmdotme)=tEd)M|&US5zYAe zhUzzKJj`R>@j_|bPu|j;;DsaGzW258MK|J@c05Nize4mzzBq}W+hG680UH{~5O0*X zpnzE?JP-YJ{AkITmJf%emfvX?#S zZddeR5w)dsSk$4gB@+1B!e4eEXmpos$ ztf2;f71#Ss_LHGEDL`T?&tDsegB}1yl;dlL!|gcuF&IkU1MAm^2GnlrfA&B8@BH^P z4DVT=QlKtD%Tfm6Q^N}VR-LaeGuRPW840le1YwUCib%KVbwUbre(=PYdJWqf(8Medc- zugi3LCLejJ33;x~__=g?B^_Og-?4btq}t?DRpN1Lsf{fIF21FI^Qu7^)l(p6XXj-( zo0b3l|L*@_-kv;uBDOZl+3|@)6_aYmW`7_9;Nde5I<6+17}TWHq~Z;{!GPb$!Z~tU zOH*5+e_xz&*SI3nloLoA$00Z9wff)4qmUoY4n;`6TU_$7?bd{~zQnEH#zk-9MS9X7 zwM7n`zf`EEVK2&~Uk)O0ibms#UVY*mJEI=sN!%9*^ea1HOt1@)M>oEKLf>*kxK#Q8 zU4goi$EUkDv7xZl_nLh=t1@;yG8eI=(KFaCy>w=&H%@6hNU~zlN{*8f?!K-OK;e?T2ZWj zIBCx-x04g`22gy@Hp}GlraX9XRQC6_%Rl_D{d?s*fBARI< zLfB`Bq_6nh8i-3CPuAeueW`9Nm?joryHT1kgGht66=bP(;q7r_ z9^*$DebJEsyKr2z#4~h4r}SIpaGmHAPJ3CYlKsL5NILtkeHj@)LKSO%s1rqirXS{6 zJqd{#6Qfbg!%$nM;ekGamQNPCZ>1C6AnF?}OXc5K)64}z{D#vprV(eXL+|mE1hj*$ zC|YiGhOI^1JYYDKS$`m7hyF4SKMMR$q1wjAjeKY^MnTgXv|2<6hisN^MAu&#fR@)D z=mS+2<>NaUPyfUL%H$2*S}Qhw;qUN~zNteWq&uW8yac$Vbs!m_6b%IE1byLagZI0U zGVGwKgHUNNqR`tC2yrm#hdoq%P}jPZyyipbCgMx_h&;-rF`SW-apN*{U=$zz?P8U| z6B{AV6DyidYF%wab?K+L{Ipov@Wvkbx3w0^7Ug%jSlc<+*Tv^m`Jex<{x?PL*$14^ zA!~!@sOd^W;&Y*W1Q(H9$Zct(Ox%0Y`u2fs6=ZC&LF-)k>_1Qh@<}hYyQ)uk_3wo- zp4J)u1s~7a#8`(+p+8d4R!n{1!_$BI_x7PDIsI@=YxPX)47?0?*e7hpK}6q_$5LWl z=K}3eYt*m*#&4Ft_SgT0O0*fJ-a;~aer%P`e*RhcXaCRtm*iKU%whFG|E$I628LoS zK@Xa9=z_J&K38La-(`%Itu2SrAq$5v!za*QO|&MnSD-z6uO&Z&v@z4ZSajV)bv5=@ z1-fF+nHKtqle6KoL`QnTzxW}9?Nd!>Wr!D+;y5dcl%iCMp`3s7&pJ=W6p!+fV_{ZkT-G1&m| zstYWBS9`)ul5M=9WaR|0^f+WcDVyM{J+YU8K4U7I#jY%O*jn_9oWXDSKKf?;1=Kka zVfafS`#|Z^@A{!rbjMh$hOa5?YuPs|G}9rE^{6s?M*0z76k`$QN@=k3m&|KIP`Uh! z7Iw0!z3C>lNGqj%vw?1*kNw&vuoEP!{Tf+wt55M9#YXY9!8`Mb{$hU#N7Sc1GQf)h z0sO_fY_1j70vd z;UqsYEBKh`*Zp@u8)(Jc!;dnTLk2MFde(UT0~oLBzgQPC#E+q8g>>t+STza! z8|XD|x=xEv>Fats{3kaOtQX3O5EnfkSf>y5OZ%`#rR5LI7yEDTgYj|66B-Z`(pT3f zV?$jL#@@)L9(?gHZeG;!BXkAHs{VcLN&jRy3|`D5GAPG-DQljSDjj` z){QxW%uD!UP0<=-`=<-nR7^fI9@0kt-V-R~zu9x2=#6s$csrx|MO4`qJKq$V4&*8I>Z`| z9nf!;F?kTW4t+9D>8F;*+OIrFl1~-u4Ee|u-*x_w<~rQ0n`io^En^sAT>HsJ#JeBj z+IOHES`)@t#XMl`!_RMP&7+LE%Enf#N0>oA_*YcLTGcEI{hLphhu%Ve$|834nnQdaVTYCpF9N;~)C9a7!$2Y;Qd5h^z}bBr_fD_#1hiW)QCVXMdk?=fcF z)3H3ZO%PcqhYodO{LoMO%-$za4sXZzEv#IMoXgLgI4u%YjCH*PR zeou;L{lg}?*@qw7TXrF6CduuTTGyX3W)u ze%y1|kH5N!9+igyY;NsHhONTSZO+BRWWH6#Vt1nPypo;Hw4RPdZmjj-O#2Dm^dGBV zu5xE0g0-txa}W@{1!M>ye~um4w^)U2PdQlAjDb-}u&XDu-4a(|nQQ2rh(`vI&_C7v zhM#m2_k_4!(CBK+_)T5jh>tN0n@Asn{qe?@zvL!8`ge1^whtB0y*qP+zS#3)x6ltO z-B2IUv{{J>)0chVvjXcr+O|hp_QAe|dBx2b=AnF_ZALuMx9eac4z&`(n$}X^jOX`( zp`-q{*-vQ14rHUBy!TG|{->Xmy~Dk7e0pKpKl;f}%m3~F@lQolnx}v569>bcGS|5C z@L+2jec@|D-+L&bMsNRq+#%))Qw_(fmn7Iqk4@d97wjW`3C{Npd+oBUpe@I#8BFxvwO(R3Wb zVI#o$G%3LYHJSD1L#?#*-v*i|Ko>6s-^aG|G$V8tR&vJmU z=6B`I7*bB)Kj>29lE9Ll_V%5k!5*~4Pum9FwoM&7;vLE^tiRMF^_5RGuZ8fM zw8ExV$1TDQ+bd~+`q&wNp%0y;A2P-Mg|a$s9v86oZ-79rGk;AW*UbdU<1aixS9^Yk z_;Zh;=7Q@6P4l5XXV<$iJ-{Xf5 z6D9jZ5IR&0U&D2!-IaLa-dB5V*?8L+S)rq1-v^P8z^-HusKx_LI-b|Kfl7 zzm+y0=aEc%Efd%WnLB^1=Lj>jARlbVK20MT{8H9%dbrT<=g zv5TmecHy6yRe9{EAT9gCuF<(kK#vF^eL}3#cNz4lYo^>j`jkz29&;_o{9}G8=6yfq z(XS8Mz>j%WOte{xc&@W8^Z=${hc zr{j0k{h{g|$&4OoOLwnqkM5V@-aBQu`%c+8cv5!nKP|iaitjxx!-EH9xc{hZj*jGO zj>`7nKpqG=dBI(wY@Dwx@b>=#-(Uu3{XiZ|?oy4Zxr*dv6V zC?O_$#ZII<`(XLhIfpjXmY%?1{n8Oi2PqL>y94~2MFN}@qm-2JAx4t0Y84nNs z{gyw2tO{|eMCBkl$MdO6E++ij5yO#x^JGhd>QBqdF#V|$Hp3bS-uU0yzhCwrJt+^K zd{iEP$Zz$3w>(9#g z_+?q}$(z}^*79i?YC+l^ZI=DvX4%`_D7!m~?{1g9oh_xw+blaO8_G8ghBsxz7&Ct~ z@N9-PeZ-&S*TTXSD_STY9Lh*FnPy|oU6*q2P{yc6j2WL^$;BDuq6IxjF~z?J&Ivgj zJ$l11P>dkPJAi48V}g$;y$$5&2`K=l$)wJ_925Egpl=5Vnm?8EWXDf;0%DUPz=^&M zDz7O+Vq}JFhROprO@BDG+SC9)eY*%rN8Oq){*rf2T#LyR7xrQrH$2TE1XN*zK zeddE)i{=TAg!T(D%gudF`Eqc`Z=r0KU;mBYP#fjfHti9f zV|-R@LADk8PmGQ-o#hFUuZZZjJghu?u@0iFOE=}tU*W%ptRoLy?J;S}mbx}xbsb|J za)^(|CF9V=bJt%sK@ae(`D0XBLfeNQ4iKk3Wl=>yN3}Ejjj+>Eh4lLg$z@Bwqg~?x zNxQ{>>?6@P9fciON#S<@PBVHlDB2Wh1_Y|yZ)>V8`P$-`w|v7b;~meD9rctEW!&cO z{Z3qfF3Q2ZgYrN6C;z~61mMdHlu^$Qx$W0N=bap)=RT?)SHSbVm)-tvO}7atXU16r zfVw0&FHwK`G-D7|3|*ZDtlB0Ey>@<2^90^_And&WYmYC60F8&A)HBzj-c))TYV$~Y z4!Lonlmm}54mzp~L}x9ceE=G<1LFpU0v@2#mVwl{%`Gvm-#cz#+*002| zj^6;TvsMmm^0vgDr<-V`#lk0RL;}kdWlTS%_~QmSKIw17twZ2$ZqS8nNPk_EO0F0> zx(&%uv`e6$>?=rw{me}mSyfxeetsf7pHFYfxlH)g)pa?ZEX&z!TF!K#aG|wjD)g!@ zCGy51*J#=-hR&=M5fPFOTd(<)lUK(GghS?a{!Tccoq+kU1p33=b3JTVZ8L8Ll^=(S zX-ECQH_ai+%!lc5B@l5SeM$m)0sOWB3EOw%y&d3s71nKP9olm16FW(x5p@2!%IY{F z%~DoSZQUwELDq|Q%6BM4?%FQtWW8*k`G&d|TLG0N@4<7QPRLePA$nIB4*-ooa=&+b zAg_hZH|%==^O$<@+E#tNIL`6SzNkZ-a_6f(_LAj5#<2zBt`8lBo$M88pX*Ua%c&;6 zHc37*-l6i)QTn77m>V8{O`GJ!`DO;FevJ99Jl^6>8Y11*W1N9(pOF1${5Dkj(9DXJ zKG4eHG}W3k;JD;i$*<$-bjR;nJFxF6XcK0^Ak$>^amgJtv@mb@D``Va%wT}Zln(K;t zuT=MzBG%<`?FXb>_z_^axYk-9e#ksmSrxIq=!0?Qt0e4q2|2jAV9&*zr4QD7;Y>uP zs?Vz~W7Xj|`}w&bzSucD*el$9yU?b1ELJYGhqx56D)XtA*yu9&cssN-6Z#QQxpf=$K#!Sl>HERj&8iF3G}_%MS65eVySK*;{-|fUrGq#$ z!NHoK_cmzPl@9LMJKe>5EAyM;|5by#^z=4Z*IQzw6Ib)!1aUnZzCO}P=j*k98~mbe z%|e}z2F2U8B;zfRpHs?qS?>j5jd@*V+a%v<>cw0MjK=wfa|>A~mwOIy)(sr^n^;418U# zPWe*6mx{kC7s?x-9GA)I>oQT<}Z-W|NCD=Qpq?mt{V_D2v&pUu@v>>HMC$ zUtW}hBhHNeYc z!2SPTlL6$n`hP2keBB=W3Ls?o#Ua`uDspjXV*lWvy#MZd<%3T?R7uzdmdSIG)E|4D zH$mD-3)W{vr?KsB0x0~3?BAv4ypOR?(my{_$5#S44_*_9nfDsDkm>b&AjjLN{aNTJ zu2p+|MSP{M(DT=Iu(8^IMXQDD67A(iBOYo8kiTHRDf$cU*UjdB8I|9OpHDB#RN-u* zxi^~#BY&|ZyehN#WtohbSLbEwg9K+~dUaanns4*Tspg#WR5qW+TwBi7R(QR*kU*2N z#Q}&L?I;(Q;(c1SeLxXe7UGKuCcRqD2Dim9x8j5wL#@{wAn=!y21;uVZF8?!?R*nY zb=iwc7rma8C(_=}OTS_v&c*tslyD;s`2=n&ZzNQ>913@&tOJGGJ1nHbn zZ;kjFuTczc7g3YuIvbS@x#5i2^@KWR8Y4wzFqABy@nbE9=Y%?IqL+$hkSGWYie zRNI840T6RK^Gve=oHzWc4|uk5L(ewH!+vZ_rIMRC&YqKaZfYJwCuluCu?M(M77X+$ zJsU-lpM3PPU9541jwRWAZe+}P@<#(Ukz7|}N9$Oe6y7y1i1V4=i%c>l0`_??e5 z)>pC@S%7pg^l#z8eCXW$bbKXLyOFHXPtTvhb%XCHtNO2WJeCT5V8vq@;2{#wCLc=F zocQZa0~(aA=3n3p)6`KwmPfx{CEZ_OC#OQ|>@85M zb`axN=_a%?34=a;1-Kva-G_`-J-}4QE#yLG>AaCYki3we^`(WL|6Y!G11ZOf_7xS! zuiXW38~lOkCyDoRrT!BASNUKMlHbYIemNB$Z|sn6>kBLc`4yO_b*_Tx)8N5Uibw0Y z?Y9}n{G?slL(U!Sy0aDAi{xwPVNIt`?AoniLf&)?-oZLf@_~M+uejHdI>rED@&K%d zv`0LD3S7DiB(B*5U6nHrO)nx;|UFP zE538hXm2tYWbdIv?VYG6jKcz8v~jg2M7^frapO$1_~UCE;uRC8Pxd<1ybY>N_@LxkFcwj7lBRs}0JA;&_e%0Hf5m*e?$ zc{N-3U;rbW2LbrWhimP9m$G>u1m?uFd=d2Qi=+?68Q_o%cHwancIzEuZM%=X40E24 z{SN9%J9U|;`crfYJH-CkbJTh%Ee|pBL2n?3dBS#VC(xG8yN4^{1+-l5tEXv{0c1Sp zV2#%v(~c-}xCUXeyJ7&-{sl zQ87;dnk0R|Jg9EA@A^?iFvgSXGjtbVzG~?9g7{5TrKO`>-(rocarqhR8s4D=>%l_E z4)6BSf&QGGd1URrd5#qVtxDMBOW*r_@`!;{XE4Fe&N7mpl^OC3VF~I`}@2sc& zdd8TErt?Xod&woA&5~h6A1dnX9+Z9~Q!>&9dd{!NWD-&@Fw4{g#t0FGJ}Ykq@oIap zFj9v4z?ZGdk$4NR?TW~i`+k6Q>>CiOa)AyKfwUn)^_OdT_MHxe(8U*+F6p)|{7@!L`MU9G#u1EbzraY>wv`TPIHC{t<%tyi!>akMJR548 z7DBE%Ryls8;X!#}OFgu*n|`&aj&2QC`VqZY+oNv8StC}*FXu9_p7!)WHfIKkJGfR| zeWx(KN?4J)plS8v$y{SjqdV)5bx-{SQa7-p<8f688|1UsQ>Vydnv9wD zEsU%0*U+EhsFWev8ftH&#L{25;;lK=Q#)xXl7 zVWQHp+VR@WL^>TSoJy3**0Su}yH}RlJMmf$5`oFimz0Nf{+h|0shi6aY{t^M}x;+jPOukaz>$T1I+(deXP)cW+}eVA1kwRy|fyigY%icfUGbqn#i_P0}2{Ql{+j`%TuBbje2kjGN zgm?7+l^`0cFNwfLORL9R{{Z&Y2In=AL{@qkke+s|ww5FM{hzJ_R0C)DL~I4)Q>9d{t&wm&WPkMHye78m}%+%J|~U z;pFVJO!(UJ$w`@?9hdp}Nm*Q-mBrK_yp`ZRQfUl+c4d+SNO!v9Pg16~UL;rD+~%J88qjxS}#3pe<6);OLu zvOntjsC!PY9TGb389@0h;JJDiSmQO!c$>Zr(bsKy$i>%WSSxWE+y~-nG{!P)1KP;$ zaf1-`WwgkeJp-LQ@NM4Lk*3h1Jzc1Q*4fStgo)TDun*rK(R{F8O+4g_y++VsJjmnT z*X};gS_S5nR=lVazY9N`^RwbStD36Id08&BBfOrJ+4NFt=hz2mrd|BZIGLU)@3c&3 zugYZnLi6usnO?lsoP1g4m#@lV@~SMSCuKP~EjQy+Z6?oU92^3;5=;Cn4_|Z9yi=T) zy>nmWgMH%Ba|<8AToThf^xu$O`5plWXS6T69;i$qKkLc>p?7}H$~uw~mV(w~%&(wW4YTO$hmdnFnXE4`@RhH!iLpbYnp z%5e97*}M0w+<)hT^61%z<9vCn;m+IFnX&Jn2MVlf@fe-%yltcraTHzR2J=72lL` z8+lOA0-(iW$*C6M?Ce#!di8lZ`{F0%^vj=?^A|rWr!Rk6PF{Z|yMA4knjrix2A_4` z8*Y?+-s<1kQn+3AjGJXwi`kCKhcZ;|xpU%eTZSubTUPkP&e(inwtRJfhpxVLsS)%k zvkfhfSgr;RA)78K8o4--Yqk9rsi-!-71l;DSrfpy78fP2*q%G2b8{^?WxItn=GKtMvrY=-L9&K4_~z`pkX|yqPS zPkmE+$Vrv(VZ`$WI7SP8?cZzY@8~XI~?_ zl&xbxv_tPcGzao9S?v=-zpKc+pwF~r(}c$=V%rbc%I@DfRFXa@Bc$J4e*vk%0RL(b zG=xBRV^csr(BJsXvO2GbD!l70!mkg=m?(1o8*zX%C`~$@0BdyHx~74~@bK_nxqtLP z7aOB8m$+kX3Qx6_d^Mhz*Oz1ertgI=2CleR(3Qemgz`YZLb~w50BMGBTYE|#7WsOr zqHl)NH&S-%P(+f-S8Apq=el!UHV;p$wfw#=;oaD;jl{v;Uv#{P6i8eiCGw=lH`Tc--M#^z<-uP~H*qlQ(;xBN zUnO0aWj!@+%S?l?B9_>uSCBvS?RfT2JkmYQs0XkHF`yR(yVq=^SaAHlcCsIf5T3VF3( z_d{LRZ!z-Y!IStfoy0l@SypM?h3=ZLX#-e)%soQ*O5RbJ(yj}l%c{@FtNv37=TTn^ zTO}KLnZE{^m-JG$w!aRjLz+27S)~tj{6M41>4QaelvIwlY4{t@XQ*Hq=)=W7ZV{*~W2Yb^5KiLVpccbWrPc^t=9}AHauAtA3-O z+81SOZMyV2QrGosTjkL=uu`wm=<*4ZF6guhGM~9544+7TjeE?a9(mEemP5BouLA98 zPd)ZfOhcopf<9EHcD|8T=^%%FCFpctm(YDF5`8xPGLI~fLTs9TSJpCi8*%T0`&+Ds z_%eJCH!-bm(d!>3;x;AHRztlo~ zp?&T7d|J*X^Kv?!l{4Y_WLZwH7Ug(6D_7HLxtxs4WIids`+I+-{EgrLYt|HR{2MyH zSEB#FgB*yvAbGA~^4mQGw8NI(iPt>F4oIEn-Y&)`_Kg&|ysj_IGZ^ulQ)kY@QXEnz zt|bVmm+O)l!P3)lKW;)&iz^O0By{pC_nUcqL-GP?Z#>#W-V;Rwu ziG46($gz;!_%JU|a|EygcU!*k9dt|+WaFpOyxs%5xJt}Dyb-PeEn9$YM1pUi-&emG zkPS%L`3mhoI$}t6hJ2NFv+PLRd-o1iOBW@YPsqqTvp#Ca(caMjjnSLo&1J9;-^tH9 zw$P_zJpOEeJ_6TySm)<25~RIl{8tHYqD%T$tuyzrW@~v6%>7j!e7~;7SK%$TC|<#@ z0`uS-5n(4`k3#a8zcox~r=<@rAwos`S@sxZ(GJ%KW_{DuddD?5UlrWm(!3qi7Xp+A z;7hniK>2{{PJAA|&wXj-mH*k+TDZw?_fKYJb~P4W)q?@&Wqf{CE-r-Ur{(#0TxS%1p~D&r>-(aHaM2qRi)~Wj;MFlgUY$PEX2w##cGMEVJ_$!q0_Yl*QGH zvYft_Tqosv&H(|w3~(hajm4qn+Cr$k>dlRCF*cS3-=tz*Nzx_HL)rOCN8;UwYdqHy z(SdlEnl~k*e*r+U@zYD(-;)QjE@GT=J_wf*+6RFXmFBDVAYTUHr^zewu>QL2X>Tqa z`zHtZ8G(KAS;INH=L-S{lH;Dbc_jPiLH|2tWB=WUj;AXrFl zW=lV;oodnWr(<4zUM^pLR!%?vNqP0tKPfMM^e5%@XFn|$$6u)YSc}ZLx*cnSd0j@D zINRG&y^w<{TQW)>-t$@h3~%^-8uiO(FUSM^d;YpGcvio-jnY;I%|)ED>x zGyT#$^VFwcEzC~3tQ@WQ!p+DuhZ$bAK zJGNHAI*AxvhRkvvF>4sJxCsIHVoIBZ1$MZ#qs3&S{K4P)dvb;w0wd!sIC z;)IL|Kso{4f>kcJiFUR%+GhI}yZpYrdB?-lS-3?%1tW& zwZE_>;>d~JLO*OKwhKKbPxR-L@CsXAdfsZ>^nzv!Suag1i~Saa5qij)>Aroo4M2_t zv;cd&sys0^RlX`Wv$}6by_lo6bJqi)FGR0ktpkntogwN)+jz(tK8`A`R@<`)@x}vC z81bMPZ$*k5)gcW1u0r=4youE&fzW{ZmyChLSv$=@Frt29?gXkZ(BluRdHA%4==D|0MaDzF!0%|Rc)Lnz9tws}qq1aF8G7%= z#k@9rTF>x9hBb1sIe8QU69p3H~F1&oa=`vEQtNTYjnEE$DqKeeT4 zBYl8!J-!N*4}HqVpqHB!9W|$GV(?<_I_+k(E&(2=c)qn0gXh~fE?crrFY(dBA>$-973?| zs(;XXA;%e0$;Dbl=rp}ln{RX%bHP<>TPoJ^AoSSMN7$xqkiJl)k@9G-fe*i2ZEqFm zpdT5HfYJGezO2s<+HZqwZShQ_$2#=2Om&q;Jno}xbHkWW>-F}|X1Sa!%3RH+Gv4~! z)TP!&IaheHxGAq@^YVH!E63wWIoBZbAc0Q;qo4RPfG#>THsC@uc(}x20Dk_9eT?@L z(iQs|{4gU0hW_9#betGISG=tCqkZ~1)CiW3@{gybpXkO}6 zGdK-m{~0I0!vLBFXbj=4`b)eBx~QUQ|%)l<%h4(o35C}y7spqJlK~cYx)BMHBNq(Ib{97PC<{I{2Bg{ zwK#pH{bi+N2ps^u?;=3HD6<~Dp4T+(;hQm2yt_WLegG+*S8rCsmhTKtL`>*j9a@z^>{pnqjcG|~X;UF1_zKP-<5=tT zVi(L!kbbLe+BWc^8gG9&i@mls>D=rM)J{mO#a^_6?9A;FRpdh?F$o;{r*p7%u?-*e zZ5y_%(JuIKld@bWkDHw6!_zzGb5wwI8&7bKx$&rJbL^pf99Do!it)Y zDNDx}ytXL%=)>uG;z8CizkIL?RPM2}CF%4|wBDIF z%ndN+P?z78Csum031e3t?Z1xx3{@|-f6I}0xBg;&F~(uvUdywu+8=zVvdR@SWy$v{ zWPgck{nq^0gA!kdpuJu~ZbCj3H6>4GP-9oo_d*-U`c_xg7r2j{C%3TrHZL;5U|UTk zcj_KpHMpUA+*fc#_6s2M7nCpA;KC~Dsm!`x#vBV83b{AVix%>Ue*1XI3O5%g@vJ2M zC1vO{)z5t!eh1ypAp~-<=5d=R9jv?fYWo-YwW-!Kt$~Z_!at47@1kFg`vU?Kz8Y|T zS}xB{%GJdy<-bFZ~&Icyln7==r9=Ri|cXuc*!M+ zB{$EZ;!D|n$hw8SFa5mc-RJi#7l6CNzLL(}fV^J^D?qBnTaX1P_AAETKrXYzDA9QOnYbUW4vM4cy7G8Iai&Fvc1L6 zkWa-Q2LXh{7ZdSvAr@6{abA}5wU;VtC zpU5C5@mBv(#)Z$4`?JrM3_LktcTAJvm<{LVga;&00#`qQ&A3{KSK z6wOv+!~g-;r)`zFs)01OA~eIy)AsJDy#L_`o1yp&~p^Q3AYaakugO*l6@aC zX-0W(9_Z+@+HXyM=X>8TPoEx?*;0AZYrgep(KHO!gEmkpPcJ)2o@S7O%x~j;h%>{f zMcjGR6Xpe9%{FxTxjDnQ8k%4{bd;sZ!7SCOzY3yF1_M-1iEAb4w zc#~df^UX0K;$qf7JTSzKy4@RKHMGyvVn;mFMuetS&2RBd(AG(R?)sHhj$0dM9B86y zlKr$aYt|aL4?#;W+eBG3w3l>o(@_JAXVO|3w+7dtE#u)x%P$CsR3{Hvq}w`RJjjiF z;_%Xn>vYwl!8^_#x*J7QF^YfkEB!=w0;GPcAAK8Gud0q6bu{bvW_P*bSpTlLhFy)o z7$?Qo>iHrR|5?+l{EpXl`Syj|vle;%Q?!bM)LGkyH7ToUqtem00DV@SA`|l9TSHgX z|0wNzLElbmA>}HY^YcFNr#-@7{>y*G#U#1#WX}N&+7ZGlK0ErV7Z=o5gXpsgVGy^B zq1%%m5|{_k!9E$v98Lggq>~nya}d-Y)ls~Y5$x;34k2yw0eiMja6NdaYdFZ^uG|Y4 zSi6S0pw=G8zE+#ah1NHIQq8CnI^by~+dn zbN@B&y;H@!aLGEZ$6U(zI;1U$G;OjyUv$Vg>p69TW|du#{E!Nr1Y%%eqhYJ9y~ehz z=Oe~7Aai3)kE$54Qx!}T4y7A8iXzwzYetby4 zbVPJp7eU$|q94{KwmF{rq{TV<$TG{JxaRl6le{`lqewMg8vgb_r=y`2d!t5&jY68rk(kkgq+sywWn? z%FDJuYBwKXP%r11)2d)MQh~Wf9a{Pmj3SB*9jH9OQdIaYa6F~8v5JQAgt|x zOt+|l00;ImQ7IGx9PAQ<+rpmB#$~2(GNVhKUc6lyI%5_SnHZ*^-+-y z%I3H0?+&0GG*nuG6*=k0v4E})`CVIp_A#dJzbSLR*zi2c@k3r{m|m5wVbyI2;a~OJ zdW3=0TTQc{CNIO(MLyUBFf+>5RnyiX3Ptm+x<(h*ajKAU*ROSZ$o&BA`)gcrpd!&> z@5|bzfO@RU>C-yj=DtH$zE!2X;7j{0t)YCvpRaEEQ~pZ(*@~L-qmUD=mAu)1t`NMO zU6=F8tX#~Ne#`$#3#6{fy{3C@R=Z9PrGe0Dfb*3B+xoAq-iGg2{OHF&^>6y#g6fl7 zeSIxxJN>hQJA4i1X%oG*@9M$#yeq7Zb?!qUl50~H++!HX7&?^hea+x|l_7A`LY3m$ zAu#No7Dn1C9qnSz!`_^|K5_Qwb6s_xC#>CUY4<8jKF67a#M#SJm-$fhdQ@BdiD^mC3J7`L*+G_3U(k(5TwojX z`kh1U@%De*Q}=TghkWg0R0jTfz`#FgyffVP7Xvz!Z{SNHqk%$QpY9UMf%)FNXl|;{ zP5Bny-1pBsPbX!5H7V1Jt1>>nC|CYjfir(K;PULeT%4Sgv(wk*?D*JU6SzF*rw1;| zRPp5%Z!FKs=4>9%o345BQ~Kd60rnf$@>7y*EAIXC!e$HnHhcIyf%MYb=bsM80FDPhjb?DDieBZ}(+fTt_dDZaRe6f^fjVG*~Xn)|OD z=(FQbrq}A7b$@jnWHYSc*lx4dMV($Z3FODYCQ{h-YMn6uu#I(~$k)>1FV^06uh~QN z!OKIjrbBBX8>9^fmmb`ICn^HsCBE>^+~TcqEL8mQ$$RZLLGL}a=ZvpUaW9Vapll4b zeLt|pEzP@4?)!P3a!663`iN(Xe7!?)ZLYMdb;yg)AkSCJa#F6Rd}ZUL%&uOS>G-5f zFBS500_U%~FR(9^x44om&9Ai28VfJP=cMq3fDOJ7u$Y*h`;Z7*a(eW^b6}+7U_h?h zewh^C#QvP~7tOeM&&JPd`%pi6RvY`Xih{_Njbf+LuO_Yf2Bqs3`R#t51@gOF;)8q8 zJ<6t(#pk+wI7Sw5Xy;6AC|KM5Kd-zc~diGu6_sYHJ-!Jz* z_@Tn@mc0ibtJ>2tI(S?*ckiqDUb)`hE6c52b-_0pgra-nm$3$7NByaLY+#e|yZ_F! z^5Dt4EM|j3fX6|MW>jK~!i*8|X*K?^o~^ z{k8_kzeCRHE1zo?^1BtB^g4Z2E>FHJufF)x^75zuw48kQ$K~|PAD1g0_K(jc&r}jE z%23A6Z?+EyTVq6XV?gBYDc)XjkeLhNV* z10an|3x~E;r1pmtO+j-&~;IB_d=EChzX*-U< zT1!Y>Fyb9ZPmE{$t_pEgHnj4AXxdaln@5l+b*L}VfMr2!j8s>P zGjBi!FW6tmZA!lNMPwt=_@n=U>i*oZ|S;0-&BXvWgQC98BsOq$VMtmMc zwG_`A^w9#@lqa!gT2yxSMn3iT-A_L*HxHltKn_p@tt)XVl#Og z8JFxYWr8+k=u2#Qy^cP-M%TU~9&1?ek#AC3NBB>L_)%y|gZhWQK;)=>Nv5UNIrphB zUnY@#X4JvQ`bz?$&s_C|I|Hu-^Q@fQq4gGcWBU#pr4Q)QWr%0%49kPocHKU%8xDW>E8j(aw zpmqORzO&A+=tJv{WuPbOD=zzB@8Am(^sm6SrwcXOpj+N-zIXqjuT$9=PqlgDGn>fC zGRBwzX-CTiV^sO7`S?|F5y;7R1z~&05%L?VbBjQF6hqhv8Gb9*wy&{Ftm6Y}OKsWU zAs}p`w`oG`w2$LE30+%tIR%`10OT@XCKq->f2==j;`uqKt>Lb>G+bzpc6Q2qu_zbk z+Kg+H8~WDXg-{z;E-Z5G;=;fW(nWu>>WaS5Z=&mC3*KK1w7yjIZa&&4`i*>)mim)O zmfh*d4;8mI8ga`)f7W33&5ZzV1d^xRx(@rG2eu}=3EN`qf(HP4*hK6{ps&C5n~1Ja zSjTJdy9&@sd>gEl*S_4JLFFU}Vu%&qXsoo4(SCM!M`L!lU;Zcm)Bmu1{NBf|FR5G) zuI7?u?g#pToX_V0!kK8j&|LXqwkV&EXXV9YT3%dUl#|K0T5+sa=bm}icwG@ z)`MKzW7h$CI1=&4pm;a3htCJKU^nNaAEkSYIm9}{oA>0!oQnMT>Pw%dmner$kDl9V z>-ooNb>+FfkPsRCa|v!4K-_CwmECW>s-eGVBz^lu8`-ndsuQQYDGy9Ns8iF+K0<$z zCuw5eviH=V2{}#s3ao$dPXF+WKeaB%N3PHd=(N|;cv#^y7roFypNd#6D*;*HFL(m| z_Ie_91VKCEK#ZAms(hsO+DDxxzYm3}166mo@Fc(vJzCgfrYWbQA?ZaQDsdRGiqwa1 z;!1=(PUsK$y==bNSLpSl^XFhdH{RsogEEZ_o&Nhe9((46CY967L$_6$IPEL0Lg=Ln z(VO=y@?X{``e{*MpBT`OQOwB>oBoXOZ4BDf33}0Yhx8};y1tsek=yI4T75bdoM@@D8F-OpA`Y79tLf4Nn#=&*4C3Mvya#TGK_W^5{dFRog ze}Vv7q}@&dby>?9|CFag=@f)^l`~|eUBokOdbM4D-CX-iKVThJUBd&-$U{^EV+3aV zW$= zS;z8$_H)Rp{Msk+*l(=?O*H?-Ng#coH`?N4r>~Y!S#NG$LM>X~k))BIJ=+#a> zc|+?q5B(Qfljgc6pQ`*y*JHfhf2oU;b6szrYHxZrFN&A?8~(9wr-oDG+O?<;MJ!9? z5t7B4;UfN^Cj}xco}~j_%kYJt$$VBGKjQ7_mg;Kss{wsBfJ$5Zg(1eLM-j1Jg`P)M z&y~2NRI6auZIA%A)v$wj(LowxpMoEitw4`!lCYaP#@IOnEW-2yz1RnaQf&Y1y}h63 zo(TKHT30m;(4|idLH)A_Uur%!e;JL>)N$lI>EFRU? zA=nKRy6Np2y5WLCPROt?%rd~Lt9xbudRVk|sQJhBWY-S_5=g@jx*#7L7u^^Lh44Th z<~iFbzgy>Pe|2riG!Xv9G3~*%div94e9a?tHCKCnFZZ(-FU#ranQQj*_a?4=1hSIo z+iR&6;kZLU@=^abu0!QdIy72qDZ_2FJ+9C(=W0?h)3^FfOj@cQ#0+ixt&sh6{r!TF za{B&Vvu1sz{0S*XLM^|eeaKYHJ6vy%46QtgS+AnUQ^AU!u*0NNp!bBnr-YO*F=!;-1#wR`%c`8d(;;Mm6y$o$oXDx~jC*F02agd~ zxWKy+3e^luz!&88d)^<)emPK(_XFr_OY5#5T*9k}#Ch}?N?okWbi(ZU9Z+G*~7u!pp3L%`FH>B-&W3n z4`ShGx3)L^!U*F9P4+}Qfb4j;Z)a_c{zC5C{jb6r|8J}scG@92ZsslhX5*MU8B$-R z61v;7sh(|Az@F33gk-Dm5s$c-JF-dHDeI&6eJe^^QJ^|X8509m+w^Jkf?XKn8D89T z#;0%(ed+rLJoxA5O2e*2S>dC19x4_(6PKd3jOTs>>cxS$EoH=4g}4vLS3h|G&3(Lq zh@_8)ZMDz)CBnjC7Pl1*)?(%q2L-@29avvg|DNZ|iQK`oY)XzT@xO6BEjJtpxH>Jf z%hzRm`l5`_KQC9uKPguypOxv!&&%}mr)6>R^KyOuSy_%>%M8yX(}nO#ev1PESJKH? z9cYbbu5I&TlXRysvVWp)547UiWAk-U(spa!_u@}v1Nfq9lL3`)<@u{#BG_GL+bobX zWmN`E+0R1{A0_m=>6{B%=sLq+8W>4GyFNg{b5i!5*NTH2!nocZiB0Wo`2BDW1MD0L z@0GIuNcgO59K2sP?|rL`9(_`Fo_x3LKmAVGf9G4};PFRg_rd#RbojLF-hWoM_a2tb z(Sdj$NKU@qxl!igVX3uyajo$liQwjd&lK#b(ny@wg&|a-e;6GXilhm(5%uRFWQ3q5 z`cj)xK6MfcAR1BK>cD%H0m>k@P`pA61Ut=;-)jQ$w5M-7aj`3d=lAcI^NC3C7XMVc zJRv7YaI9ByzLV23zdR|^i{oZ)Z^M%W?6B|H00%?2b495DUZB5QBy<77y{H$>l)}jTbj>IBPu%++Sq-6W17$jW3XVrmo|4b4Ce1tRQ#YL z`oKnTgybPT>7_Oh^XWtzjzPK6h5f(v@B9NkmRqjI({g-#-09Q{iWo@U8l==V-O4~1 zn;_*mUCUjnIL?P=3cW!S+IDLD7P#14%%L4(;pCX<-I!8IybX6pWu$qsf4Ecr)!+TT z^nI-j(Uso{g~!lY8vb>`fKHDd8v-f$xX(aL7WA%)@6uQLjhrj*0&-aRADV& zhm_N@d4ZN)GZ_T%2f7Y^K-SPdh!1_Lw4)K?M;Zto9Ce;g;8n;z1ACf5xA&{0;VB>d z_iYlx59R}z7`veDJ~Rh_Ouq>|XNm@ca0z4SXJ^<>3URLP> zGMus**5O6hmzH<*xm_$qL_8h;bCs?L3bZ%S#5 zLdQ`?8vm4EzroJKUn#0eoB%(2bu4#zQ~uI-A7SNXBAFJ|9vAc8-aGxXv8?fK-otYv z->N@&vowA4YQM>Nf@F&_p?M?_Z`-l2?)xoe#sQXr_P0B`O^C^tD`{UzjfDQ^?n9R$o@uZwi#^p@=8$K_5DLa}=$MbDn;E3+^ zP!}A-VYwdAhw>>?AAEK>4tsD@UHxIdvcnh$b<#lQt(2+0FVNQZ4hhg)v8ft<6?m+G z%1g!p($v!v-3$KUz1Mz$@%~nnn2nOA+()GJua*(WIJx)xnDwL7ty30 z5AwerBz@XN8a_1!O$^z2^;Wp%GbGX8FvgWOGpKD(*3F`fH*+`%L}{D}0MU zkIGL&s&Fe^+pe~YI^8}?K_!0&tQbi^ZMP1PHGRU0nQuL2yETW<@)+sbu)+$@#2L>< z&g4DwvTp9>v;8VYpN6{}lzCl~J@eime+RXkady0-x(#gWv~tuCu#DX7YVSi3ueKi$ zjHeiPLoZxB+y^t=hjHLYebBG+z|8YO2XSGC7Fo6G{I+MjBVdMf0T{uAn}Js1!E0eUj)`j|yTLC99?u@)p+0>_|DKYv;q(X0 zQSuyWEL=xr)(7P%$(b5RjT>)_lIO#p#Mw^@(Muw4tmaK|(os(UkU($0-BGT6nnT8? z+El*|YUQmygS~9-k%6!!rL{lJJ+2P}$X>E;V5SMVpI7(c_1 zY;A0p?TtbCFaO{FpYr7wFUupe2M}LDk-HP`s=I~> z+#~4^0#uMcjIoQ_?Jy!jrX#MpN?SC17xfu4P1FdP%FsjpXbUE+(9;-m?>)x6#y#%Y zo=&1Zs2SeHK|}Sg;V2T(Y53yQZLjM-KkhB)8d*)^KtT8g4$b?E0=jzyEv6pOi5NCghh{d(btq#61f6 z-R27b9L(s)nz4HusNWj=4wGhT9_8)D@^u@ zT-$-H)4Sr{R=Li1SotEA_b<_(_YXA=wue*6haZ0DeLk1HDb0(g3B2{q%gh^_LfPQP z_KtYr)AjspPGH#_NjBjPPpaaf@r?}EbG&S8d$8>X*wAF!uF*-$eBpo)&L5 zv@gS+c#f`pmY=7vkK=RzUkefPRT6wVhdJX~9?!8h*_sYiJ zqcS{rUWWJH^%n#79)4W*AARIQ0ecTWD5InI%ijL;vVZ?s+1-0mMuYpfe_esphyJ)l zJ`>;*u!{3(PMAPZd`4+L8Ox!~%0d?ib6uox5*6U26@ujFPitmO3L(b5=?DCHHw-sU zh{OboH-|aR!X@8^97Wj{9Tct*-|pWklH&NZ%5rg87UQ!rmobb_KPxAn|D?S5$sd<5 ze*90%tDpa~a`F1-<@)kObuVR5v$8k1DZ4VP137g*0i&puYo5&x59DL>3m=xj1;~L6 zvEfcxf@hV1)X~7p#X&Hzwu`o@56zHUAo--@SX7BK!O#(UQCeuqkX9b(Jo;BHQTO;@ zNEj7{^X%z6h4b3{-p#AyV=rV}u%Kfm9*`FZ{<#DxV=x%mx$%2)?l$Ekk)X14=1gAV z3tupZHw@c4r~n;yc z4+412IsuGdI^wSb)C+xe7w9dd9mC!lSVy$!7FqG4s1F*MR^XVoG&~7&cL<2R~2^mx8#|C2s=!@}`otOr?ATaHyP2BA(UyhSUl@$QIv={(x0t|?c)1hlkoL)ag~l=9O)#~HYF(Ig1}mGt5DHh2sxCa|6bAdr|;Z~i^w zrtVz`tft{oyf(Zo+d0sI^j%HzHmH7%vy(pA zzsgU0q1)>_fJeJ(9SODLqLAIrD8t89JBev zpLpZuTJD)V&TI1ndOWb^Q+Rp7P1xQvr7dfVX(a#EhZvd9;ye6hToh$nx37H?a$$!& zZc@wXE50%y9oY_oSIp5pS6zapQ{$lvd`4fUW^aZpe53#Q=HMx;Fl9=p>MEn5_Pg49 z{LR1iJASzTAN|MwX%q;gZrg_>?{F#Mske}fJgoQUQzb4BUYHMj834Rc`n3$_i}`i= zQr7`rUR;z{!q>X+ztGro7+@kB<)H>An)yv(O(eOyfoN@*HWdEOm&i(58{b*Yj}AVdT1N=5yg-E>=BxO|vhE{#w9hJUf+d9XI8f_I^qH`@+wA zP|?Jlw@~f1gS1IihQ0y&^*SDPvwrkZv7zBPAlW^Gk&QV(T6IjfR`sC)(vdU}2gB#a zev{AChc6;fo2uj9FQdDvKSRXvySc7*x)#u93#`-GCf3xfn|ZoNOF`y!Xzw75 zDqVgH{sh0Z{>nD|1O{&;C*UGm;Bn7+r_;PAnUh%z6q8ZJ5aSN>Te@-@_9Gp z7znV<+E?%w@!l5hhOUieshJgmj~EB19md#oVBRYa6_N)a*27gydQ^$NNe6O+kiC@s zkmjv@sEQH+%U46LWqpGX*wS8QOEv^%nt7W#?&L-HvVWY8sojHmZ<0FAye1Cv7CUXwb=c!7cum%Lc&#J! zejnTBo+AFc($XBsc!KFmE+14Nzxs86oB8y|zMC@JhWv3WFZ)PTLcb|j&L5-ajcXb+ zKe7~OzJb-~TrU0zdrkz5gWG{oB3H5kc@5ATaMgp*^c2oSd#d==&Aj~Q|Ji?0{_p?A ze_8hTx5}QbRT$)albDAi9LS-sn5%1@bwFP!<2uj=9~(SdHpjm%q`X1r7l&*Yy1xxc ze~qU)9?5sgNU~-BRyy^53qp3_&FkH5NPW>J+u8P*TX*>T+dv(#j>8-IdlS4}e>smb zyami9kag5+X%F@@YWFr&Ey+!7>PA1mko~-+|8=%kz7qAP&a*r z{fpwuA@X{o!E-#Mo0B^pSmc?XTzZjr!qV#l8_<7(BJ_%m8yR`lA{r4gP z|MIOooCQ!}@QUmJVaD$Q>h`>UeMq|Co7>+qmkK=2<9-4DbLZf&eE;KbmG6D;dn!4R zE|z}rk^4Ztha|ysZS8*opdYIkzmPGzyUo$M4l3{K*BlZGNRNuTD#l1e+cFvV<-iSBWbooqHatp=GK1(41!uf&N)qo@X-OcrMG= z$FF(GTx+%R{iOiS9X|a(8y}a;(=UaemoZ-mIRC87&VE*|FTO10@{3X?FH2FKvN%)Q za~bQUIN<95v2XId1?`LSo-WVZ@*ZS-l{>}(Y_Dg`vh$hF@&%&GPk(bSlvGc5y-KS+^|CzGiErY}N%IMw)W%%&pviIN<;iu)^lkb*IpKRgo zubAuxM_um6ibDaSwm?8#pE{G9rRv}E$aRmYW0wdp8-A;&?;DQ5s3FiFVgb8sx? zrXwL*9d)QS4x_o(a&8P|UE%=cQ9|L8v` zXX9mg`8pny(5DY-D72kO&*=I_3#Z3GHk)=Ss=<%#P7LE{)Nj~C<;{7)cRNOQ!7CfA zUPlRCUA2>m_&|~uw@p6(U&_(petG}Nlk(t`Z>jd!c0_&a&26La4lqoiW}N4oc}6}1 z^P}&d2g-r;+4BOv3Eo(Stzt9K7q;}=zj>&-P`te{8rMrp?@ zou+=!l`pI`voGgYdVuFfrRQ?zRr>r<9)?FeG5haETH<)XJQY&aLIaI znU=KZw!^%@(GnJUl~%}19pYgVp})QyJ$adQRpp6IXeDiK_Jv(YZ)wZQ>&pX!hoA+- z4Ftt$OIhV5`=xFC^h%)Xc1V*5vpq6Io1jA+$Z^ANk|)<`#$mm#>kDlEO$gkYGj9Oe zfb0GfcHazAR5Amg&n_M7Q$o`Jv&QUeJ#5PXxy-Qd3if$Kq<}mj>0})Xx375~!mKZ# zE(lHZmwmC8Y3Je--T)iWZ+-kpcU%wOH_=7h9A_w_%e{~qS6-F2kG>7<7y&68`B}yk zCGL44Y|HL~Ebpo?7n;0WZxc*kYm>Zm%?Ys0v7r?BXOFb;=Yqim>wJ*64n4_lFSG|Z zzr57`?ApJlcX)VM9zT9mc6WEnXm=-WZnLmTP2O9um(W^HIGgCAWVU)59Ei0|7X=(F zWkb)QQmu!yiLxG=Ww+B~2eN7Nc?agWdjqdcI(?_knX!ivb>hPoQDsl3{6)a~n&W-eA1oS$)kblzOR#@cqK)zk%&XCTYF zy3P$Cjl3x{koA2111cWvS3BwIDL=5K5qSxYS0B{Zvy>HEVD70seKy#ECumQrErYTj zEMy$)>P210k&ChrEb^32Al-%uuGf^^jV8I4cN?*V^fO=!nZ8|@eRbk`yI!C1a5Iqo zNA96rOF@m+c&+C&R|7GYp4*fY)-v?B+P1~Ik8_2tmCaN$1c&<;ej}E`r0^^WytB-~c_)dQnV;4MozgrF6QL(^qwSraI11zRDZ0o=!j}5QsfOU(dX*0XnrDHEpPQE%30FIm|xDX?~tj zJhIkt+!DOf-soI=9lzy2yDnpisGv;QKS6U#sP&NBa!4-{eldafVoXzf_gMENtDLRW z;hs*s(YWoyoAAV5hcehlMGkA*1#9(m>b>vN<^TY4rET2?S|^2s%xm)B)cltL>;!Za zVXDnDrqX>Ls$ZWAkw@4tg-wynJ%`$ae%TQRCfFWiMBNyyVGCVdXf$=$WBSJY8fLyj zwaUGyzhb8JD*e!J=5=K`H)LDFo5lTtGVV2wmUy8DpHh zuo2F<(-q8zWF*Az;)i)-nfo31XZWE{S3?(?TRbD<_s=&s;`tY=h!1S>+vnGOmVB#x z>!S~%13}kk^@r|r%@_W;013-gI;?U)0$^?8w}XD?FaLHaI}aty+&bFX*%1yq+0ga; zYUTp~tw~sag7_MM#wC(>!rRKe30Th+a2-4cU9pKM5!O9`_e{~ItHat>_l~x!YDYPi zx3JnC7}q!fk;fjJ{NRT?`XtWv8r8{L2j9Pe6~!rMjhjucJr2*c{v~W}4a?5nUfJfi z<#!Jh9+ctUQ5g)6%I4rua_o!9zS`9eYH>5_XGLcR=;Khq%@uCLi6t2!?*pbx(}!R%67ge4_feki}8^;~<_5ntdK zYmH`F6A5 zS>s$?h&G!e?3IfI>Ct-i7Xr4G&7mgEy@l|4N2a(ddix@OSO&X~%3$wl8Q%Lq;k#us z7dSps7Mnx{kWFd~0H224kl_$wKwF!f9?{@y;C<><_2ll z58%!9RE1I|id3Ds24+6tcOOp5?EFQ!JpNfZ{rpGeC7<#C@jowL{^*~T*FXE?a`xi$ zGC4mkH?y(Y&a}ys5o{02-e|Y%4R*`UmiWThcyPMp$)yydLBI*~8Ho+$gPRPD<8sD| z^E<#Xpn=L{d$K4XlRv?5p&^yQTI(NFybIRL_9YNYG9f;ggPf= zl>9cW!bIpFjuR7P_Du?hVSn_Itlq&M(I0;&NK9rt`A1s|l?QF`qHc zK4Y9W%3PtAGT9)8!vkOV&?2V=3jVRx?29xTbvweUB6#vGBn%#%pc`5+5d>`9)1gw_ z9Rg4~{QHNCX_-$il;)x%`1m*f&A%`D%koN9wnnmqEwxuYzH+g= zo~RNV2B93oK)etJ)V@^@`o56CDfmqooTxgl81pjPc4+xTi#iQhMSN{TKo9x044)dI z9XzvP8N!EFnP2;j-zsIG1%aP8kk6RUG;1^ma9FAPPVaJNAUZV)u({27@ieX0Y27lBPfRS@R6^9nRFB4tIIb`T8ioLEFUo zn>;IZG@ik0jSGEx3_HPRYFs2j1<^n!s_S_LEzPU$;@Dwe+yw7^S76ZZ<%BQAA!Qxc zeCP|>(~POdgd7tPvx&I)b{zhr4&3-69X=}^>KLgb?DAAMf726t4BCu;)Q5%-OTede z$ZXo3Y#QmyYD$e)~7P3jpRz z*`lhlH(Pj~2TfXY0SIq7rae-pj%lu!)U$E-U^>HrD+7Q|*7oeogZQdNXbYkIQT^v+0pH z8jZ@%a3?N=wKi~Ij$abu++2JjuinIzrdV1SH`A3qI6)5$Y+thwvR7X@PmE z>C7{*rb5f5J$-mtska8fze4tYHmC~9*YLV6m~;3xs-8%=ziM;tADN6MCEnIhZIQbl zL*_mRKa3xFQMZL`4ZU{5w&@lPwS7E%<=@#EmQA$4;+VKePiS5pl= zQK(~tw~O%!PWadBlX&F$&9I92hW0@?To5LOT-S1bxqtW3%BkAqt5Uljl6M=BDKPzx z-=cAQ#cR9NJ9!CC(|6_0SY@YO=SLK`?21z>b(M7a+-Kf5$}w^~%2lyL`c!{zpSo8h zcnp1ed}Rmz?4{dnMZD6b9ckWNvXysbl^5v&%(-=2w~4h+zCOTOgfT!-^}P?M`T=Yo z@Tt(Nocyi#MvU#2u2EQf_^c@Q!1W7zp%I^1m0yxw<6-A&&l~;cLO$hxrt4>Z)BlRw zP+^kNjl%7rLT#j2o7LQ0sem_|d8;3m*oRRU%%A3DtuP9_&WR}3cz;LxGc|%Hx)pJLlN8xsy*e(V0dG$Su;*n5^>(PjbjbAyVFg%BT!F0r zCO{J{CXuy#?3Zz}amX&vsX??!TWK{DMb>%D*n7>__&eV|TjZ1nqEp}Kr(D4On%GW= zu5_JyM3q#ePTV`m>lo)%x}<$QqvpPm>~!R3S;oOt=xL}2gOWQx<&^ zs;-7|Yg^-u{6hN2cW@v#!LnnEHI9v8Z(L^qy-y9WzN|y^8ln5P+~SA!o@+|MCUM^{ z6hfbndIZ&;6Fhc69RvHNo2h7P|F^+B)HTG-Rhel|czk+dIr#eKfe!$L{EP>7yp5W~ zwjVy^JxH*ES*d#dY7js0B(#p+0=LSU{4IWC2)&7kHz2gzeiB=tUd#nmY7qNK8RI@o z-(TisdDv3lR(~T9WL{s^(d!VJK-cK)(8y!k|4QiVm@oAgV^U>lAm`IMUE6pSD%PIr zSKa_wKi9fg@9vh*^BEtTatWO_-(Z!s*6Dc5`nPmkF~_GJHzW5r7%%1rb>9x_e6#L> zw6~m^)WK6?4>VmDyWJmHHitl!Q+jRLY7c7>r}67lw=N{N z*KK8Ic{I?UprL}0MdjVNX)fR+0pFLZ{+$ugrlefYSH~K8c(IkA0_g9#mGa>JBd?5r z?@x&q*!gRZWebc|i_iks^M6sei^eLQRLz|rez_m?cmCiHH8#_7v*3G0l1Vy2{!I>f zUeBy+bbfVpAziT}*Lo|tuRS0d2g)7)1tHat_%)%MV}@SdSW5%DGz&%EVS&wIXJkRre zInj9b^gUIiB-Xd>!JZcV9cAr`*MqXT%|rZsZ9DIk!RUS&jvkclod=4)QwBSa6h2n^ zvGxg1%GTha@Il#7oz1Ny>E%!|a43M^vft5ucTarmsB9#g=Aghxx*@K8r#8id&7tCT zufj+Fw|I7}wDuR_$mC*efp~#yhN)kevV^Her$uUnvO61 z69HGJFUr~LpOmwge_XDP|5=%y{%Kj9|7lrX{IrzG7c$3}QW#$d;L8Bmt>&m~IMy)v zHH9~PA=G0TK2CzFj^=wE4#p{b0KDT5n9H@j?HSbgU^;4K_odATGV@bWwkxiiRDr|4 zOX-2Hgx!cHp)m&A_hi8h`7~b_(vA9$>s!UelkLFtgwpTL=16(F>R;o$rSaiF$JU`L z3I~T`$%2T0jDtbhkfHctzXvM@kB9yqP;L@?B1j}0q}nkcOWx+^&GH2g{Uzj*Uj~`0 zjs|1N@5Nr0`Rv>e`^Q(W%J`h$arms9zWix9`SK^___H7Tt^U_v{6u-5m$5c)%L%{T zFe^LSpdRv@{kx-bC_LI5mV=$`vabu7-JM|>NO4T-dLG4PpnobF%$>wlw6r<>*Uluz@@s6~ed8D6+l z*AqvE=nD*mT%^J`vhpd9#Ud_lLlz;6pIZB2Gg<1B&=~o{5ZmZ@&??n^yE;zTJrkn3ilzQS6eRR?2F9--xPl0LeW6lmru)^+t_*mhq|I4cU|P*dSnk z!pN(2z6x0!lcxUo6TTXoP&fJr1Ux34K9DC8AydY~_as9kGv4sRX54mxOg0WywUu%0}n z94qtEdX6~t31j5n4sOFZL9*%#u=%b5Rg8^1paFlI{3=XL`^h#fbadA8e0%J`y>%GC%YE^p*Gp|(IZj6(xXi=Gb?D2(wC z$Yt&A@A{_b;f^*)6u=ly4+n^|)#u z!DqH_*SsYgQtR4T&SOuY#u6+xv+)H0Kxu89Y=c5(^?b4@i+Nn=4M)3WXJ^;(eSZ6G zFZ8x|urD3%D4()jKd@z8Lx-&SJnZLp;pVy+<^2bg_YK{3kA? zKePu(>zywI*agjm{A9qXOzmX4C@(K2<%{zR9||~~%#9}#9)$24$=Io=sXz9(3y8^f z7V3e+$ukb)V8g1Ddm)ce4ROrAP*byj;6|&QLw~&~=YZ-_3TC;N)gTjC%kv`L_Epnu z88$?H^Crypw$C>}+9Tl`ZL^lOwLPSCv`4q=^;hWtwiz2t zfYTKsUX2TQgI*XpNI!FNY1dwE-q?AxS{$ zrUk|pznAn@?XN`_cR+2>pVk+~0TD3e3-nJ4)qXj|)uCGV{i?1)`%vpL(k@f`(4hi! z(&)u&4|*X_K0U8}(}zU}EocJO@3fG#RfSS`pjp;5A9WvLei#d{ckE@jNf&mu4GmarTAdtJo`&ZGMHZ&+K{bz+y>LOMh^5P@X({r@Z(4QTg4!_3xFNseJ9pE6c;r zL81fVz_#!fy#J+KnEMGZ=+V|y zO|PW`kanJw5%mN5xNJ?);7{u-&*Qiciv7adnpSDX^#xz+3`n{fIj^5XjpaxL_vHfj z6+cpu&=2;7+eg|vJSba(N7_F;?%o_9>^&=^{ikJk_`Hk`o|e)5cZJWL?}Gvy4jA6o zR2Ge`1L=gX101N!J>ibl6AD651AD8LnXR_0m!WYKn;)QrUE;oEF;O5-8d3{ng78$=(oUaF5%6_l3 zc|JF8@z>X6myeS6Jt}&+wq;v-6S`BSbzJ(s2 zsP_QTLdIvur-{z#gutuU;9#E+DR@$);b@SbnA zhl7$*Z>B~**}wx!&S59|(oK`cr*$^O@8(Pwten`G8acQS6PR-9$K>iRHo0)%BBGm6 zEL`X>PKS6?QNy?*VOOfWYw@~r+LQ}P4Zixoc}OtjiDq@OK=iC!^%Zq%2Eb<}7_Yjh zEEbFMyMOgx@u}XiPnL7RC%uS2Zt75XE}iK*&OAAP!D0t5jCmLdpYP}lQ_bkxs5}jT zLyQlIH=qN{mQXg}iv^E|5Vg2oO{-aA6rdX%0FxY@!B=f0pbdIjfmDit$}{KmStDHIR{TT>7xse z#r#6^;j-+EL|=COkN?RZmA!*SnJy+}Ea!2(IrW%fkA&FPwc;B-O(d-_x5Oj1oicWy zG4v^B$r`rB_@S>}Ntlyc(AJAiZd?@h)s$4ZgyZJq4Zlw#eJqx8x1(LCmk*!4TYl#c z|Bif^WS@#h+k1>xIB3PC3oOcmAcvI>oiVgilS3ws2;4|XS!`#Z@~|<>ps<@iT-iaV zN&Bk<)M+t+`Pm|?fS>#!Fnuzn3VpIG`~YabV61v(&NX3Pyc-D9N7VL_*Px)g_!SV( zn|+$Hk`uXfQDEO7JraT%pRa|RoP2Lkel*@F=(oz2S>4#zvnC zSJHsu_#EFv!8mIjql&$Gx77$4Sces}9t;_{-kJo{bEIS}2&SVladZ=-RL?4(zKhqEh4 zgSARk@9WuCEAtfJ1`@aLWNt*cZ8zo!2mUulXvsKdmxZ zK4j7+lC|*u{rlwz_pi#auBd@(Z4dD6vbSnWfO%aTzq@tWgXe$cJ?hr6W6We9UEKZG zI{0Oap+DwFkfC{mFVfZSEBXio^ILm3Qr4mfS;Y$3JF#|bXuTWxcQS#Pv-tpiTbnW- z6u&q=QTuDHVQk9du;JeRFi;BH*@4D(8SQMB!EjT!t+k9B$=lkf#|=x54f964EoW?i z7iD>^{g>vRPupTMp3}@l)kSXP2e`2*o?|?F@cPZVt91(ff&3O%>L5Cm>YT7{RXfIo_uNe^bc^fOx4dd7} zwzxIUvjlEBeD8Z7=;CcqPPG3Bz(xXz`(RjYl~8_JeN&ir;j|I(jz7W2?~444Nk%3a6%H}M8f)n1{QV`OadSCH|uPfh<5 z{CsVw?cA1;TIpvUxF!hM6WA+sYaa8UB65X}V;nlDQ`b9!`Q|; zlGm9xx(4mmO4h^Vtwk9aqvACLB#-&0YlV(ZTPE!4Q%{2zZ&T2J(7U;g`E5Na>}3@> z+W~-1B-Nd$xwwMNg?>KtSo0yr{1ZAVyGiV>i#=CV)3y@)Zpuo#DEr}pB5I(I?5ISZ&wM-M+H z8GH~TbQqBQ_TvChhk57UlWfpGBe(`h56o*~beZz>_J`bwqL04SzLSDHVQ*jY1(g=V zAFM*x=Nck`=l&YR7}ijjJh&aaFzk)=jCjn^=v*Zucou|YCJEe^QGVXpU9ro zgK2==BbYBZivTkhR|RGvM5uYCN$$K|~b-Y6@_Z8V8*3gUj$_QXHyuj4gZRc*r z$P4|4bnG3xXW*+1Ui+w-brjPdupbY_>-a=GY?Zgy(>C)u*!wQ&(R*L*XX1fBNSb{` zr2$fpx!>(`+BW!G`leq{BxOH*sv2IG%lP`%VmXnWW0Ua(0P0bneF%pz&69%At@!p_ zfkv#GE+@}&&|yW#nPZeeC;bOLsmcvK&x8Q_xeM%56(Y0cliK-;7yG|W?Ijj7S-M!c zoZyd3Ilnko<(XP2v&x^Q06)6KQ69Ww%V5Z{3bnyq;5`?7Lx{P79N^c5>~|f%9)u^k zqrbY|b!ck^iLaFj(Pz}FVBgswbI6e6*``xwFzaT2iNJaunDl5{HUoRChcEqMK{Hj=o)Th*U+R*`f^=PeNKLH{&pHU&{sz_H!}5PENP7WWg(ZFk zc>U4tUma?8Ig9bLsc>rh#y~?ZV`fKjqoAY+Q;(P&MAly38 z29!7WN5WloCFJ+zjl}tc{5?(m2f93asBMmL@JM8ys_0qS+hz~Vi zcmbXLY3zTvZ^^!fd$Y9L)HST<+cNe%2K47?i_g3%L+zz_;eK2C-5nr4? zJN-$yIQelIpZ&;R30R*0s9X!n#ZR<>_;J}7|G3;-{j_XMzbMyNpUHGzl~(Eow=Q|Prxk0V=BvuN)@DD?mu@%Nt8!wQy(($m z(qhtbVZnoTKgi$GgpeiUG&gyWSwGibbWy|aEnF`V^vb_)z8GJY<<(i4Umcg}Wgx9@>=!zeT7LGi1zMqyBv;o%Dsc#aDoivBVRA*)98c0HLXc4 zPMt?zeBSNh(?{jQj~|v-+V`RxE*`jvpKsT1@kIyyLq6-3cDx0qep-JmS+EnzqX3fXhU41o3WeCgoc{_-^^f z|NVbbri*d;@4Y>ez~d(L#j+%{Ow5g_<5Kl!TuO%OiJ`}G!zfRbYD3gp>5F`~g>U71p;EsU;=uv=*&xd>D78_(+rX=! z*L{?C*tE}0T+jju@R=&h_Ejb)T^88@n>2v=cNfrA*WDdJS={WceSZz;yI*h9rsWr? z*v=EhJC#)EWVwUMoa^{0RWQa4qKe9L)l;)afH1={=K?pnTWUgmtQNOiY|+DvI~9NarB_wUCIbbKGz zWx#@qJG_}PqaXFlrmO0ZJVWwF@(!EH`U$30(dn9JzTW_*4Pdj`Z>`V8ZqQ?6*wZ&O zaj)~@g$uOtc&t4oKLHTnH9`xQ-{OZ|DR(};Dwh|Na&d7{j$ghiUwrXJIXO8gmzU>m zKc0>KmNxoH8CeGh94@GzoZZ{o^H*dDcMtfDxSjY)Kxc=mx*^S%aMHx#y?c<)7z$e3 zqF%Idk>lP!f=c^Xz=imp$v$W5e5P@miPlor4A;`ZjpU-A+~4HBWGO=i zX-j^Lp=9YgI|e6#B2Jl z)~PG^UFqLthbgsX6qBtM@&GA$eu!Jcq>*LW#`U42ISV}=c8C1z%T-P*;lagL+JGr`H;sC#Z^W=?ZC!1Xlro&N~f)#@!F=#S0wcIP2}wR2huLku@PQs zx&`x6%UY5s zeSAI0_0b@jT$h+1=(~>?!w5TN)H38Jk2y-*S+WV}kgY$rFeW+2eTI>8zY+4Lg}#m-9s_ zm+EpX8a#gVeAE2%I>;rAE}auuoA(Y5{9EPhldeSX=;)w){PD--;Qk>R(7ynFdlzI4 zzQY7MeWZOXuBU#JePfX|mYnK$gy_-bVODt@-9+p|bs_I`X7ih3=upGa^YlE=6L z*cLqJzdw7(KEh_gMi}?b9t`9QQF_K@Tj6_@R6h0Xe?YRrIT&uqAzSp^hCVi&?P^8B z__{>ox8Q5pCZC|qquY$fb#LVn=;Q(5fw7ZON$r-G#>x-5qAvIU(hsp;G-}|u(H!^u z@_tHOPXT1phA)ExUi(oSb#u-0Sc{gM*1TZeQQU3x8to$^t}}ZePkm%w*H1v|!ZKAE zNG)_t#(k)Xu04jl^*7ip5AW}ngT4Lo{L!OwpgHyQ>GSgFA-{t>^xRt18|%nS`*wU2 zGAsIQ`MeFt;d^rx6QFSyt^@KUz6o0T>*#XYXI|24>by0-qDRqdVdsj5?T>a@FG-Cq0q|NdjncfEU^b1H@UIL?`S_G6AQ=9ptP?|k2N zt#H+I{B+{UZnn?cal+{+){M5*Ghx`S@}H|SXQ*xuH(L! z*F3~8(k$b(?W(-mFqPr+rGs*%j@pBrHjc(=uEWpyPCmSrI54k0U+&}6HR~>D6?mP3 zE|V^^?|hu>A?11Ql@ArMp#_K18XH@D+BCgwomCq41c?%^bk#8J_yI5DYsA^e$Pe#m zYh`UY?zrs^?-Fv$imM@#1}7ta4Bs}eV0zi7jhHY>Hp)P^y78F<4g2S0a?klI=c3!`?Sn) z@C1-%-yKGOS1wZja-8P4$+1)LX&vQfBj|_gWO)F&@iG#+HIn}GQ-l0mH`iSJbv6|> z4xx5kbVv1|%@Iy^>WO0S0Yx9#QfUHSXe)0*I7Fiy<=yhBiEIiU%x@_WX!Fdh*&-r z(K`~8-g-=y*0fGu(VBZnYvrDBE=%{c77f1YlWsiWA-K}`qJY{vQ2so=U*1^KI9`f5 ztq1n0&&39Q0*mL$@)_ex%lwRKCpt@xr${-o6w&TbE<&%7xh6xELFk zFT|BA&&HKYXJc#Qnb_Ty+c+0{8_!1n%J~>2E40u%CVSQ#Nc``CHTR9J!ofKiUEFSMO>1rI0hZ%qih|3 z(*c}uIifgU9!LSvo^-)JfS=(M7Lb#14pPOCgAUP2GD0qBT)#|m{yaz) zu|f4@%K_2U**|=+ZC`QYh9(WhL_xeG_i{f7A>|Z=u_1xLH3X2pw3VU_mln1^>tU^tH!Q>dN%(#1>(~jV$fx3)sLXi~7YWrY4kXJ)CjA>4$Es&4tolv3+*dRnm%^ z0otqJu*zf_^tNcAt_NzSr(7h{7CQiWt`2YTN&`?hhEFO4?!iBDZ_@>CH-s%COc0Dj6k%VrVtQ=o>kU)p_;d3h0G+9pJI3#ZP&AQ6n0vYtj_VzLS4Fr~Di} zs@tR`9!bLa%nweVkb+EV8L>e>qE@okoh25Di!tgCs#Nh77cc48<*oR=ijzo;d0EB@;Jw*>DL0!ogUBWrkB8qD>%@*Y)?to#fPrvoFp#n zQo=xns|`v6hr_THMK{#)*8=1*#ffdm-$kC*Nzy9)cuA|*ql_u@lzEn+vYQKigj>#O zIP(o}w1bZZ!K22c=)vV0k zZP*m5F1Pt^^Otm9)0Orb*8t&me2P2!7N1ycXWACDSk88Yi&Jh5SAZ>|*;3-E@pSAH zaLFTYsHd+ucDsSnZsQX7tjls$_Y#_XlSO`O9YB7I-dV^sL@8V4=dqRZ5kU3CEg#wl zm+|K>BcS*2?Ud=bnewN&ilaNVemd2lY^AfLr7b96VXBUT)p>m2_Q6lPB`#O{C)-Y$ z(_mZv#l3Rof?W8`|-~NGS^Q zkevFG@0bJAPDgXqynn-gsGMExdpGvRacOrLm-jRujmB|V`}a-FSGzKqeC4K(6Yin$ zx110&2huJ+FGTRQLDnvO4e9d6M(pej;>3v)ar(xa;)Qp|_ zktKSHnqCJ)kX@~RHJ;}M6-@&o9#>Hv%2!8}@d31T&@w1{A@8lE>&kW9(vSen6L!+B z%%Re?Iu6jsw9^GQc~uWvdI_*bnwBkPaOj}=V1IuoC!JH6v4=7xruF=s&Thwj7oo<$6L* zmT)_bhdAb^IMQ%6Zr)o^^Kr&wX6V1#KKvm6PPWjyNxDetoK6*|jB ze6;1qLnwa@PW^_PD~E1XhSUWg%~UZq`q;RHx?VHADtj$=7-r-Nla*0H!^eyd*nSjf z@^kPgjxLBVt);!DHC~k`otw@DDA&kEkZUaQQygO~(}Wj?gxsI-pKamOe7dRZuLqgm zmL0AFdDpAoO6QEq4qoKUKr)eM$8y+4wS}KHGi@ip1LS;&EPf=jYTJS7Z|lo1X^yLi zoytzXs4{hY9Yxxdarx9ihQ^I9Tc6QAWQ4uUcCoF@=l*T;Vgw&Ktb)`pe05ho^G_@)+{v{N{hIVuU+y{DzrHwHe6i-78Fz-+ zIAh|PpgyHnPyvcJbezw`o0N#tkmkKh*c)Ol+z6q~k zs2*LXy0l?3r=&IDvBO@Num1VD0XU0&D)W`o@U?C|Sk&6Jqq(=Us_;rgZ#|-WB4X)O z#OmodSUVl_M{kJ5qc=w9*e%gHeoOR@-V)s-XQFrFhFDs^F_wf)vAOY7 zY;if@qWtrZ#>Tma(TJ_bVzPTy^4^Gr zgI&pbTj5Q)ZOLy>>u*u0!G@dTt5l+x_27c^7Mqs3Ne899vd+mr;3XIJrZ1L|FTJ(S ziC1jqm}mIqGG~Q_w|(>1e@#sKx5Q)+bK{{(YT)kg#ldKxO^L#zy%_Fp#dzm(40bO? zfBRw#w=cz50^uP)`P5yMz`9b(j)=$u-UYj!l#vJi^B7e*nOo_HHX-4)Kg#>$<|`xf*2tlc`QcVHttMMS~YmGNe{Wk4zBM0f4}&@&X`r15;oSQdi)& z$3P=IsJ}n+Q)w{$@|hU2P;q#Q(>dS(mD0HZ)fAEQ^r8iDE?2;x4qmYx4%6xKC7D(^ znngJ{*j5+;V2rdNICAV{Jo@NU@!sG5qqynBx)#ShbvnjJ1A&LOQh)`?Kjo{gRvNVU zw^x+_jqu|eC2E7svES^1XIiEX{l(%LS+GbVQQ!rKjEjvKMn=h&0NJK7eWf@^P5Pzw zwfRt5(vj^|+*5$>l1Ckksr4e|Uk}LKGI5wACVXDdm25YqJm^e)T8IiKzV2^* zy#`Y^h8xcawdA!37Zoq<({pU3(ZKSMod<^=l-$-4UdaynMjBJJ#iC*`*wxtJ_fIBq z?DUzq>+ZYb*M8|2VsWV(JKH1Eu)D{mjNdm|jv>DpB>FgbDWyI-w zwdo58cJu0^TpCf1s&-<5bo!||1owhN>0~l?D6a*}KED;B7;g&RbmQr`?WR-lcfRp& z$%sd6UV26bawOeDu>oqO!~76p<1A7|pQx3O^w0|?EIwJvjKz&K76{Nm0fwqsLVoT~ zFj|9D4zB4d6;eK3hVgUQWE8#dxS&RjPaV+2jTf6lxeVJEu{EALHFZ#+Ogk@wjMLn@ zP7;zfhJ^$NHbn@^D#p6dOuE~W9kM-g`<2ebQ`^;_RvdkkuQp6GI2~7A7AIOvyNzZd zR-C$&$L`W5_-L1O^`Y^oFOqjvF&#<+C;teOj?PmSN1A;8=0D{@i|lch&332d6OZ2- z1uuoDU-=;&oXCxTBHv$Qu}?i_4%#U7pqqmb^n!MaL*_UxFJYe&FN=KDLk6@_T)x2y zvBcp!`vK~WkESd~dY(KADXGZb3Jaeqiw&A?#_@#%#dey)B@G8Q>f;}H<)7d(LdkRj zdML?Y;jy5@F?1+yd#41(43OCIm1ZL+!j#}CDqAEvbt*#|;>kSvSibe6RjxWOLJCc; ztAFlK<|fMEUn6c`_2?7iCzpm>=_W4eK%~i|{vbb>fsboA+QrULiKLR2izMnN?-osi ziv_vbh7M)$6Gwbnd_n7!?d@IvZq$GL-QSLnKk`hhX#L1i0Q=AHpE8f~N&D2LoVS>t z+aQZ+K8;_%CL$r{gX)|j4_j?=Op8;CFTnN-KWsQnfBe9}{17hb$WCK{c~jIXjW#p~ zXu*fPJ6#^~3l}zDwC8Q}#KC-U;`p)nQ*ZgAxcvn$Fz$)wDeWMvo7|55#A6S~Z~gjv z;?`SkjT_F~90Sd1W=mzq99~L=nftii&74smx|&Y4&s*lU{bl_D-IS*Y^92Pxo}EDz z2}E_EeAvOT&|}yQJ~WBPo9!k=cocj8R9LLJhbT{xnb{vwnq&Lq2*#NR8Kp zJ!;4NjnA6;6OW#a-~HX+i3dOacs%y#qge*ND5pBuS7op!4=n%8MV8ryRE+2 z9PGzte-zvOLG-n8>vQo$eaM@fTrA+NP3{QuR_8?c_eCde%vwacl!2E)g1lU5MGe!w0!}49FlrhM*76GB_K$Zb0 z2VN2~(RrqmF!r9DLZ9@`I?g}xDARJRU#M^T>4SA&yr__%dkOlP`YnQQp!T(Pzmq>zKXLJa@_682PN?WjR>*5qA#UX z+eIJ9pYm@YyWnz~`r@ z^QVz7iOMf|6=<8p%;paS6IVUqrtPd-9P1n6)tQg1laBUk_*!mCzhNetWh%7m=`a1G zwy2V>&<&B~$@4zS{?!ioc^oV3feVh}(+0AydIf-=wSxCefIaaKJ^C-{WIlD&FJ!x@ z@t5M1Qhtso(V+ceEl05gMQTOl-uP|E`ktT6hodD-gaBhJGelMXC5~PwMh>5F2Y0S_ z08Ok4MLfg|CWP3FBoBo>$?`Y24)tX@-q|8f*#B-Z)(UwJ_xOpp3*=IW7 zAbfRYC5|3F68siFZz=OTt;;LxPCKIbz6d>c?wNS-p$B7kciTVZ1B3iLQJqpMv_(sv z>2N>I`*+TJ{A81;>}Y+%dWy9VZ%wk+p*(w~deM@vLGc^L{QPErccAs&&2jpM6Y+b$ z|DR)T(0VzjgbYNkO1PF^p$ew@m7$sR*=o-PGbp^zMkj;>d8Cdwpfbs8>bXMne-R@3 zhr%MqW?@>K7UTrcH-o>f~ikGRH_Un*2JYF>c1iZQffrd z#FN3|pdD8H*NNsuQS2|)sp&&EocvU%!KS{vnxFD)#Vs24zr?YV%2&M_PpE#@ZG0tX zeZ~oC1&8~B%)|wNjycnoRsHU`4J&%!KQ8Ay>Y#kJY{6Sm<~hYQZoO#MK$KRB;>l~4 zO^c&=p{J%BNTWG|N~3gbzs4bv7|npUEpKuTLpu);hO6pK!Dm5kPbFPuim%3J>l2>F zvpjQgmZLvN6;5Pe{PNr{rp7X^@w5}*k{+l(%|B(bw~UuKlM_{_oAM|)tpvmXD}=zg z#m&NO`fQmgpNVgl@9M)8Ve#uCMC9@EQURCCfa5^cMNWjp8%#@1aBPt@Wf4Y^p`dtX z7nMQQPOIzR!iSSA2Bjf7k6lvL500lkAXZDbJj|W&Kv?ClpSc#3-Q=X^|Noy_X-!o^QiJyJvFKI0`#m{p9=2L&wsI>jbC_W68@CwwH1DE@uV1o&#;j#|8 z=d(k#U&m3NUnh>#H96Kt_~0dvE-%f}*KR`2dcj`b^;1V(B~fSH0?3B*Q?JJ)2wP#7 zDf72 zeye=X&usa*^|^z-rf7bSfeS1=yV9D#rz{9azUd|`u>Ol5>$>z894VX8X=7e%)H&(1-_~D}TZ!l{#T-uIx$bH#EFZC3)H=DdycXTn zBhg(u7M-Od(OEsJHOiXu*IaKwZk~$=i@f~I+2>-07rLUl;|s6zi|S{|kISxpafq=Q zUCVS%c?+V0pXK63Xnvl9{EmE{oic}^yX@VR<6o)tvv09784u!MFpvl(Ypvb)HZR3N ze<#M<+e+GsNxvVX-K`jIZ^U44GxqyeVz_%*>+LJ*o^4@MpYRNxpEVv0WiQlse5ID> z^L!zIuL7uD`j9V+A4Fett3SAC13l6i&#@YZ{+f$0F-N0IQXj>k9paS{b7vRc_Dx^^ zH3!4dEpA{m5J8(U+~afp=SAfC*x5O!@Ut=OpO49CQ;hA%M)xI{K`g2P-r(;DF~~tI zIfzJw-LRU2ShYr{dtgW#aw&n&;jz+C1lJ(!NR}mIj+@JrPsK~*8O|H*Wzg|fZ%wBx zLLpr6d7&d?Z)Hg8ild!&T+Wk{>Y>HnV3Q8*QVtJQTrCSt0>3c>&|bP645a7aG5ey2 z$FdAAhq?V`8iX7ACVZ-u8#bgX?%Q#+E6<}UdBf4M6XjuqCR%j!aJjp_94lI||MZXl zbS$l?+`=f9wGrfZd3gXm)<#Y`?ptU+v>y+xJSg{A%7#heMl!#T1HmlN>92`=Zg_D! z4W80)l1A^Qd~y*Z%GB(k2Osb+25jJ<-$o1Cx%?r+Q*e^!-;uHOjF%AgTNARS75Wrg8H=Gpk1pv~B6IK~6 z4RENqPfyb4T5P~IeE_f7Ju2VQXAoC(nl^;H|R?Mx###`ymOg?zkcdK!*C$IWtT-+==7P@sak zOnx25t88uK;T@mWEMMWB*L-4KNWE}-q?;D%r{e)W;PEU1_&t4>;Zr^=4m5ra_DMrp z)S(#vq~WGB`!DH{gAeE}v{R10OWyEPAED8cxNNOAv;Gatl)VI%{U&^jY4dKp{7F8r zOW1D%wvKADkvVSx(5rfQ&`olaC%|^f=;SSKwSXO`oM^FZFqSy@&_5+6&_2oVq4uB! zIXLdTs+Soz$yw2Vs{(s2k+WHopZ!CY#vT2&fKMOFDeUpWTntYX6TJwK@trPdHtkBM zAFaQhKU0shZlQ^bKNwx)Y+i-b{4%FNl4y{O9mluJwla;d`hdwyZTO))ZGhbU4sFDZ zSaq;XIMw&q8OK;Qg#{gRGoQTg_xH4z>&3$lJ{JG!eIJRQ>hF)$FJm5fiVry!kF=AQ zy#NH5&f`?=s!{nGqg6i$i+=Nk@4m@-$1FRP{N>|L=u>{BnSMr*;F~ew5^B%(N@2+W zj*VY3Meni4`CFc{hEJmW#8ZOr{cC3+(n_-AMB`4WK*34oRZo27~xcQbFG-vHZf3T;L+IWauHc!yPc;WXLu@Tt9$-dG>uNTox z|DZ>3%)Bq>GU}_TVf8xc!(JeJ*?r5;GDco7uQbM#va($;8nu?2I>0-uTi_s_Xg5#a*9&Q>c6* zFT$#G2ns(=uX#01&WNd(I_?J^Z4?w!HpO`3fcW`ymzszF03UYZ5+ZZ=yf#4lL-7VG zP{Wfd!CTsIA-7!dH71$M*z9zc;>3yL{_0e}KZr4x6=_?w)#Isyhzu?zBf3w^$}Hk` zR@p9iR5gU-)6p zmzRfH(8xLo+ROaM?{PAIHSdxKC+XC_@cWa2rH72q>@&{PmQgZ~5yq}3UDyWG1 z#ICD=4-i=ss8V8eC6~=U7yfDojiMlj9`_S{rm%kpzJyIPdHBeUibW=NglC*(hO1?+ zQzvLc zejSI_!^E5GJ=&%mV-lR`uXJ*vy_SEQ7PqAAl&$9v!2#(~WxYFok0zd}onn;Mqyy7IkW zqNM(c{ss0g*g%ghjYQ!jFZG&2**BJ@d|$XPa)p<~B-)S#vXR{L3r}+F+f{B}wVfVeoHhb zJMG1}?8zH?yfWoi+IS;qA%9Lr1GcaA$j)#swuY0~9xJT)p73$h=67*a%@^s5gDOvM z^tTesr^Y8LhN@ZJR48=->LOOxR^p~Jr{Ysz`Vvh^ofvXpsD8P!aVeg9{E2w%$*1DV z`HQi$v+bXzVlAg>OmGk*iI{mBbRO0%fD8@*S*oBHD=WQNT3(2?<14YU)QQcFJ)a$P z7WbpKq@AlTR?f$Wp9mNZqT4x$<+YCTHODG#X{{T}D)V37_eWR_IHMIg5r9rPjLh3W z864>~uco!@Z-HPPd7o$F8UNu~4tZCUBI`$FT)Hb>3!pgPas5G?DH zI^NJv&FvPhv{J&hj`P16&*(X4AIf*Rq@SQ2Wzj*$p-BXKtPp3cq))jz4jALuk-9H~ z7w9oAb!^>_*LpbErj2~)53T4nbyQBc7z6Xnot#P02KGh#vW6II&9|~Rj(hGt6L;Qv zvtQb}cx6j#;ZeN*k3SZVpWpD1S?Fk7^JbYE8?)Hgc<|Stv`_MIK471ionYGmPsEgG z8mp~U#Q&60=O5&VuIAa9%9wBH7mf6X#uIy${K^UIRq%jV+Bo4U(!R#Ua=hpTcgLNd za##F|AO4ZJ;l^9zj=NqIFZh&u;@(etb-d!f&x!qu&qRM~BX)PSzbjudNPC9vNvr)u zNQ6ora#9pMP`mU;_6dpdy{=GwXrO$xXUNwMeWD*ToO2A$vzyYflLDVV*-Mg-VXN|Y z|5F7VqJ>3PncEFM$Dz;QLA+Kz(Kp9SyOuX|82o~tbt?JQXUwE5OpEHcdypdmuLBA# z?kn&TCrrLMw+}rj(@GEVbk+(B5?bUl??T(0=26R7;f2%{09>pwO*b^lekL!Hx0>|1 zGi|VbU=59Zu~TL1jswDj{)%4f1L@FPee_fwp~^;)TcxjY=7)w;R)_RQF6|Iw8iXKg z;i2D*ADsB;kJdkqV2_2%x&d89KCUmQ?U%0*xu1~9KK7orC;MnFBthT2f* zvR@r>95mqyPnMs;mc5+&AK6K7$WmSRm*mgdgckuwW6RO;9CHxIj`2m6z4+YEer0^! zH~mx9Js+d(4XszMXkMAb-uB3Ld2wM~^e@Fg8}+gJ>Fkr|;$6S+%dtJ6ZDF-7X8p?< zN{)q33YDGe%umZzy@0p2YhWrP^viRl_(kXZJL$^tGYYvzXZcSM9U#jIDy#BdCnFpc zp!?}N>zdb|YKOjSoHS0fWgYYcy;mQ*y5wB?A;&hjp_3Oe2@9vvvVOLQ>U9>^Bs zFqEA8YO^0bt>b!~VSL7CzBJzQC0`W7ttVq~|FYWL5bm9rXK$nxtxaF>`}?dPq|4G# zKW7nLxi&%)1LdM;ebv`mTl2kI6M1NCu}iALUI#uDQW7|zy#TbcF41`Mb3MdYWy&I2 z37lcNPb`D%8y?0g!y8_ddMh4AvI~x-s8f(-M9u=9iXYVQD$KODoY?S&a@q zFUnV4d-4^ZUszTfvMuu~;#>Hnm);v_?&nx&(HCkMgV;}Lk!NR?KfFmcr~&g2 zV}YN?emB+YU zM6Pn)2c^vQ|Lnrw{NKOwYeu^pw+wf$#GZ&4>~aBMM}(-c15H9&MEG|=g`AB65Bxdg zVnJs?xWgO}Qi+YpT9|Y2Fr-DW{-PvK$`Paz)zA8_4pIDd!Nig2q$$84B2Dd=qV6>C z=R*V>9YwZqlrlilNvJAj%`{QhfRazP$;pB3pb#Nn)6~>&HP!}raaPHmV=-H+dR&(CW^x#I`>t842r$eq}g zLi%&OQU=p6!^KZKm^XFi{&1vTVZrStq*&@}rLbviq~^Xs%qSc5GYMAO+n_uT&GyhG^A6uCmlLycEGvv6)i``lnhsNucyUQ9X=(xSfs3fokpRwl zUjUfb!2YZM*Z-lkZVWfi#r}9GuD-U^`4tQLce@E?;cOc_|9YXh5MC649`hxmOm*cHaCjOPRUaTMK#_5ws z;v4?P--x3pj>sUXkF*=aU?LPB9wB+;K-h+eu}0tJVT<1;L@vbn^bBLsGH235HZt;( z4ORwR?tKxQV_OrXHtrnCIn9eY>cCqqgUdA6^+HeO@XZtXWP?Xh_YY3(u|O0aG@VI| zaX>di$Ak%sa+C+H+bIszly^OvQ5au;l76LEoX*Rh!DVGU{BlU9u zMxDf`K2$yI89Z`ei19~;@S{PSzUwF-xhkHE2{~n%COP6>fYA?RkZ-)ox6Df&>bng6 zUD|Ov`H6=*O&q=o{phr5rH|$30tb2&r*F{><0OCTd{`&PK>!oKZ{Cq6JLXSpY3#59 z;?^E*aCqoXBl6}yb!dzHQqOozi=1%e7Xnh|)?4s$uwi;s$7Mt}Hb`ajcfk~<-;~a+ zK;qtDz%MkbO&)f-KePa}jFAcXs^F{ z3yQPuB%e9J_(AS8JoKgQD5DH)6y=bO%hiol(o+#AD}xRbgBE<1aVBLIa9AbGCqe&_ zr}7Mll8;LsaL}WF*iX~WKgL~-J->zRvedygfuqph_{o=b9Qk_OQ6E`lI9Z{E~bd`VN z7xMW@Evm3@Vu-TY@laJB55SWO=5y6kLDo9;(3^Q4CRkr^P|J87^EqLS6V?lzUN>&P z^|sjU@5G?bt>vW5BwDr!)&a%A1CXzz!*X#&9Disw`M|HqCF@Wa5IZgddT&)l>&{?b zZWMd{LF|o3F=X>9#`8CxS>t5mqp|qJR;eG+VcS+A^udSX)&uDeHX6Y(H@G3$f9t&D zOnS0bY>NJ?N~Smf7T_fp*y7{>SD!Vjo-KxaVy6&v2Ga$nk*_ zKeT7#l4+g;^uM;{11Ykcm%jfKwmeD?3#aWvsqe!!ao{|5(uUW02OKC;mw79l*UwDA(8+|SFc&hY%|BKl zT&UmC`eM63iS5xSHV5O_RodRZ_TECCkHz#))jO(-8nx{ms=rtL|#SyTtyp0_0ner9QDsUz3pMWDF#_FLliyKaj|A9})cP;Wev z97g*w(mXj-oWA0$i1QRaA=_P9@|OuvosT~FNH$`CC$+^o+td^St_$>O8)oC5to{tl zRxbek&a~W#OBPN?CIk=``SpQk=6TS#424LPCX!4V(M&vqEA4}35GjH8ZsZo((+uO$ zwS?PwiNxtv2kSY2{%Q?Yqv&%90%@|1!F`r!sUyjgr?7gPL_wV^D6KM5N`hXmppz=k zn&10vt%vws;ib7@y!g&zar-T2VzIXn7cXzdK!rZ=#}C947qUG3n~-asEr z(j-mDvF^xzQy(WQ;vZTmmh@8b+Jp%r8 zl=hiUt><2gJJMA~}XoIlIGJjGJ9XB1M<8u7AodNk28I^^B zfN@JWae7V7AScE3_(Q!i&ndC)6TIG3HaV_hT1aD`ZBA03M2~$DPkHyN%OOAc*5kzG zCP5@{JQEK2X2Up$xBXJ6)Hkw*R{xbFG1I1YzWCTDd=?T8=nwT%JpN;JAafkK6HuHq z#;yMO#VtXGsbj$?2A@cUu>QIK6svNx&Zv&5JTL*@iX7Z^VnE!(thet{5L zs6sySud;`9>!fUt_+`z?8X|wPt`}Q7yK({|7 zz2RKMQ3C{)erSM9(rdlKMD_6;OeDFU&a(VNy6}soq_2=N{46dW#EFwfyLrH zL06Ef#xFYLJe4i&vR=)q%9FlG_@$nT7{B^KvZ{E|JCgV*3HOucMZb{UTa5LkuI9%1 z_@XzxAwK)HuhqKp>Db%892=LPjY}7wiBZ2Fi2t$Xl?>=#TdgEeWs-K=vJ{EJ;K2cu6+g6KUX4{{Q%=n} zps?o4R&pkzwrdR1H=bW)JuX6I`?=od8N&QR&pNocxDuW2lE&DYWV536^ODl!x*CrQ zyzRf_vRpc_@8=3!j^=EhXIA*k)$o;TAfNR|PkFK-oS*sZUS&IqD+dY}w8k3m?Zt3! zL*a?U#+S2m|6o)a%M0|I=;kY=ypTfrg4PKK+Lw$}x4*L~cUkdWt=)#Pa4?L?Xixn$ z@SO-`GpBKvUx46jjxPq}+=7JZA717{r~I?-UdP#;UHHbY{fe(S*zeym@n;5j;IGC- zJ(I4xQG|J7WuQvH7z!IWZdHRGcMS(0+lDBPJLAS{rf%~7s$cvc?g%mDdtlx=!|yBtbi z@}CnZ1nyd{#O0D^Fc+M)3_fj<#zK{R77j{h!ly>IFGV|$(G^YZCn;`sWi2kAzYxzp zbv8cqk%znqJbLV8^tI6&?Tvh!1;k?>4xl8g@`aC2{qreYb1GxxR-l3MCX0HcDO%+w zS>_FB;-r(7;iQjVr(bJ%0B%51e238k2C&MgtPg$2(O_^N$z>l>y7ZsxffECjm@1@h zB&`mDU1gHKq=P}hLmjB#Fc`f)dFrT)L&RVCIxYar$6)hmUxeY)w+z~92*GJos%;q& zcT8<+L1BZKqub>OvH$`2T@en^G5Eaod-UWT@fn}~%J_w!``Oshpx)YKlX4K}H+N%4 zGv!X7H}Ykm)&Jn-(%)`>7<){#!p*}R|JL$o5+n5?yo{BHxJE2aL>u@x*5s`}KA8*O z!@kN5Bl<%bSl)V+>yHkCmj-DA+E1KVj^(9;xbGD&kH7G>eNr|g0j zEt(XR#)DffGvp^%$mdpOwgvs*fqthMFNU3R^8FB-JmF#@Q61XP&7k5+o9DOeN*WKn zy}@xg`N~s4O<)~OTq?(JW@|y|vZ~|BX;JC+^PxFmf98gETX~+`6jnvYEfeT;TZENH zzGTktEc3>wX};moMga6OFnL)tGGX-LtA{7hu`(ii8krcq%4~KMh;(t1EnG@MPX20%!uen{7jTrRZ%#WdTyT<(e*iLs zJ2~;r;aDzV;Dd4*2UvJ|gM@=V=tr&~f`0uoAHGqIFNcj0Hu?yAF3RvX0iMU28%V3asgK-ch?GvBml*w=jGsS%nb zQ@(b?r4_H0BrDc+))D&O^A>s{{jko^FG;)m2^Qp|`^0?#!1h7(DDKUZ3VB^o`py4T z9>~qN;&OyDjt^c-$?~b%F!d?1xwQY{YLC=-=JEg__0LK0BF6;-=m&~d zfI?X_fj)dLpmN)lMh>|I^bzoyJeMjdepRt}~WYHaq~?##^%;ej7Is>5N<<$>bAgtvY!F^0#k%KW`YV(raG5X zc?XbtR^)F%;dZl);)u(!bg?hTR|5D==56WNrq(tagK=zV?XPK*I32nBgpJE*WtC)VJ=pW-ta{Aub4j9(%7DO)GVBcaG!b?RY8R#Jx> zh4?|(=F!Cx+&(>yJw%4kMH=#=9I~JecoHvcK3EtpDI5U(C9Qc~`koK6S`~R6O&OZ#mi|-Eqmow58{KctUR3L}uVSO!*pd zLh(PDqn^Der0{x@^sYue*tV@{w_MsFqqc5FGnEZ;9 zKiU5{;^@AEZll(nQb(?bn$mc~Im;*?93(rSIJ)VAgh>`pLgn%0a^%rB8TWWm0R8hi z%)+TiDa)AQ@+rN_m?xe58>ispnZmDp7qe7}ctY1;yg zC$A5f$TThqa|r@^kq+I-H_Q6`&mv>n*2aZ*NO+)`sE%KExFNxG3lg1Hu5mN7vWm zmG|5ex7>bby#J5>uxT41dP*B8K$2xo<+5HiFwmIqfN~~(E96NqI{>ESXt8wdHce%Ph zsk_bFjw9*yC^}+f(AW6UzGQ!&HEZfH?f4>S9lJDd9+8ur{%gnLd=}Js5s3#cDxvv+ zWT3A9{1DB&790gPSf=xsa8TbHZ}_550m_L-(h5CU9{DOyLbe<&^JyN3fXp|n50F`E zsB)=;jtMefsT8mWE`pR49nPbq{xoz7gj2l*QY!JsX$hsDtyq9N&ye<6z2$zBQ#{Yo zlqR`*%vo=7=q+PT4&VC_HOUzcW%#0`%an4;%W83rQ%rm`A_rssUC-lJg6kqCD<|PhaOn`h>9dGJM?iu7gnh3Uar2b4x%xL>HBWxW665|6^1Z}G*zLYSTB z8S|SqJ?ji)aONm`M7lPa%^mF#e0AM{jS4TSaNJso2gV}gALHX=~ZoTD9eEK~vi2wZ?zDX%d+GwAX9xAWb zjYW-f&TRcclZ2dWeNmus{lw!>#yfxhSA!1@0v>BxKQnIlDeAOGe^P+n%3lkvDIcKn zM_(2FVJAgEy9jN{8KuhM8I)jgQi(W!($TNXe@Hg-5emy(%v z)AHkTZ3@flO41p>%X`C^d zl}Q6v$Ml&dUbI`t=M#SZ(OHo$b1{Ht3Cl6xSq*+}Ah-;`1%bt7m042zJd@xj1vpdh z3O_o-nqIo?`ITovYKLbFejZV5hWNuw&+qTX&Xs3l^YZ0bQ96*i1&+h>j%yfk1x09|omlU?rnHa0(@#i?CjR zOGVtLa#ocx`2o#DW_r*CfV2u3ws^py@+>wgV{{2Yx2u5!)P(_cyuosRtFGy!t=dlY z6PIt`HL=5x^-4K2k!g@`wRD~${1+c5Pacic^uI<6&QelS3X1AxIJt zZ%SmNGD4D#^^pG@iX5U>VfEKUvZrl&33@xdr5KI*ZJrC%WZ2Wo$A=p#{UNOg>u;cswiahoR0VjMnC6mxUZ@W99Mi zjZZI;k33wCddLN(XyG^zEgbl9fdWgruXJS3!I#jeOg=!_m(KakVA8}B7tf4$pESx7 zSD3z3Eha*Tm6kY4IkiFi*fysN3H18+#lea1cA>k}fga`2E@cRF@CLnAE|wSK6u&;y zq+d#&`$YOu<&M1Bkn%P^^C0nj;7>kr%zmyYU_Q?kav`A$3 zElcCh@QA)2b5V>wMc=r!d!WVq*gj^4H;MdvU*g5i{Za9x?6HmJx!^W!^aXl`Tr2&` zb6*u%6V85O{1o0PN1Abz`iz%?Z=$|LrgoI09ypO>ksaxIF-G=6G|zW?@$56t#ykG) zFZ%3YAh*PY6m4+$q@g&uD(=#Q6)CVsm>py0YPn8`@y(vp&H0<$^*3 zJ~GM!6O9SvA^8)AZ)~vVN*Xc`u#r5BwSdL>0W~tTY>d(1Qdv~EkX7~xSc&I@fs#%e zlmm|-<>3@xOu!Pc{`JFr(T&{(4?viZ^{g%=3V05&9BZE@aM0re^6c6cRQeOA`|nQz z##eC&ytbBKlY|ZuO^ec4*Do!1{ky|#RCx2yJcvgwwjg_Cfoxg(=SC8q;0M|Yemwa& z_m32gx_f)OG1TB+S?@g-W} z%eY<55X^?#4zYn%rYZ&+>s)x;9gJeTKaf9&p=7ep{HieyVob6xen~z)w3gmdm#@8e zp5vd&qsMs|1^rppV_JOJX3?!7gG!U2HxWb@qc&250t}K9V%7~iUz7VI)FXh~Hwq`QOFJ;MuPUa8W2juTIREX-u zRce8Z^M^7Zs&$-UsB%yG1TI?o14X5%Q7Fw}G6+*pp^}z!TY^it90l~BiOA(!4shKJ z8l0YE*%>v@=`~GSUF)^;4>{7V@4%Mf*?QGY%eEc7CcniQik?VT^o8v>|B8(LwKEs9<{GDGANnBO4m>$;$P-`_E6!RTH0VF!Y&Y!~Usuz+272C_-bDR zl3>p9O81vppoV{vzV}_Sr_9~L#~kJPUmI51np4~lMJR`@7EOFbl>G>Mg@HC4{62qQ zb2<(m|D@*Oe1r-t}|IJCJR<(L8qLm&KkZK^WtH`wFwH6qXF3McJ3 zHy|KlTvB9hw8Vpb)n|`Qy2-O6#dU>y%?hkHiaK z^g`{qZ}NlLV@FQJ0}nogM!GFTO4FEfXk~56g+1d{f#v1pSVBf>Z+(3&&YpcbE??M= zn{PW4r;Z9`3ohv{ zbYP#P1m>3mHf*VE3UUAB7M#uWam(6vvHYe=w9BFNglxhn&jrbFKVTgX8S8Rh~Lym0{09S$~mBVao5TJ~;OI;+5)*wSVIU zp%LZOuS4D_R6cZoi}c}ePd@3Y(^s5wYPa!8hd$LEipHV#hO|p~)F&_V`5BYy28s{t zr{imJ{eE$SzS8#mvtgOGTf(ZV_#QOush;9|Rje=i25MtOTx|}9eTUs9KV1TCijPc( z(&0_y2`5Wt>!p`Fjaz==7I3(Zc=&u|X z@eR^p#u+w)a&^sGZCj3GYBDCkk#TU8f~uo2<5anWv{iNPXGml3Y>9D1F9-j0gCxDIGMPQuUgLYf|SCXLl>mX zfeXxoYHP93!gCs#8+QGk)e#Xb>&PIU~gBZ{ewWsJaW}Nq?>X z{Ac{jN8L>KzUDv4k)NEHO!ne0fAwFCTkg6$wk|#%I~$i|U$<3hB)PNZW9=gSmEMi8 z$+DAsyF2d3zRC^u=ubXXFMBKdJyyP7-jZ!(ewjN!r$@3)S}V%;*ZSyh**w!Idxjhs z`^0(n#CU)YM8X;XyOnY+<551B-^lgFFY!q^e0D{-$AH)36!s+_3pCpTKNk@#H~F?N zma}q9BREKtzWc&7h!hpt*?ondi7Y;ig~F1@;)8+gTGZe``*& zeMg>>3tuN!hUeh?IVQ}_BYnFW{g;D0`FbvcW6K0lpW@t8gg$W<`q z%MTSTTSdo9mc19XonCa09E)e3I2$XAEAct6`P_K=Ld564{uNT5<#_t(b5g*31T^q1 zTFh5&Y(0faLB?9Fz!&kH$kd_+wt3ZJ;xHe?Q|{1GL}(e>iB1;vlwrSkxW80eBCYy? z9SLl1mJ6YICc6Hh!57oWZ0ho{@y+K8!d`9cE=ZywkW zSa>UQb(M$bi&~5ha$za#EazBs`hlwwAj52;Q>x2b3tS5DCq|US4Jf+V@>k)#{(dZJ zzt>&q#69=i6ED2$uK1y!{25_bjQ*7;V)M#ItgNgF-@F&fd}=uOtR7>lC3^&zKQQxjo{$8>u^Z5*nDz0mmaI8y&Zy;&ho?Ykan z@P(X6rL1s412h7tH&)0u{au$jEGivzKLY3)GDKH%0l~zYpKN3UkY#vyqcE~)#;^Z6 z>_rJOQW{UMJy7e8Ds+sBe{NXO)pa%tfPm&R|#OWHhd zP%@sxI}^0zK)xuGe6nqTHnBg@KbDR1TRQ4s(+`6(o8V!yROzF>(+WTC0`XqQuI<^} zK!%|7gWC6)7X7N`y7Q9jgoOvRQ(kRlTm=+|7Uv1aqIqMiq7!`1CuyTuKOdrPFLcwk zlDE&pR@xShbPI?2a@uc64;ul-y4zG9zqyrAaF)IVjL!=y<$^#^`vfH-%&kS@rHU%C z;2@qf02vo8$Vc`j=Y!NU-Xw8c#q+0SO210_3S|96;&9r_q@QyBtfbPw0dK}tcujdE zVX%>PitMEy9&-ozNyn?)sfjSRt+*=~nAbUk*JFH&Gv7gtx~gMakF`0B6v z-{SI)fB7noFZHxg)8cI;85;-fCr&O!G+-K9@#MVFO*r$|occF_!KKBc^JNcGZmBN_ z)cFMAk9kjiy)c(J$xr)J%Yp;maPURm3J-s~*>6hF;xONW@g`Hcbobpah#&m^x5sN< z|7PRhqL(I}*x%dHqIDsD_TT=y`1ZGbbKLvNSH#hyN8|BNoQ;c@Hlnk%Ci1H;WM6S3 z9$=3x@1s! ziz!jLOqMI)u~+(x@grTGXddKi2fSg&Vqj%$Rby8(kJ`2E5KeGl7nrHsZ!H|iRt?(* z_%mws4>m%;g+0murAmR)fBp-U{{C-)qRR%gfePTtkoVyPc4uX2*>gnK2i6Pzz2B{k zO@G3l^#fm10Tq{K#H{-f86aC9PRdrm1MIS{sJh2^U06g8i~jn`uH*>M?ib->I8cZ4 zE9#)f=o4jZdTA3vDTq$Apcw}aCuCgh+fP7ah@0;jLAIkb(FV#=2VgkEe|1{^Xj?ZI zUD3PT>qJMJCw>|M>7^adI>^c@An>6bPI4j5x+lIVkKVvP<>@HuW0##av!1Q;ulnmU z&MbVFyrh&k{bXVa-b@BO7TD-QEVzUNdqLegPO$e4K%O_XDy}jWSEUP}53{b)7}p9@ z%ybf$l=$1wCrEzEI4yYmiGR{dBh|jUexnOh9MI{SMLx7@8SQIxPF(3JhcX`%2Ty4a z`6b}zIuSR@ z%lzzE(GBy4{AjD-F`Y?EDu#e+^mm)*GHd%zFUj3$Cg^bVf zD7Kwd=C=lrq4i<5o#JzuH^r5p_K|B{_f%j#eW*VKuICp9THx_WUFcxUsUCVZ&%RG% z+xkFUe-SHX$X5sY%^cTLedM4*%pqQtl{{bUDZJ#kI!x2y_JM-0)R`@B6pbtds$cad zq2j4H3$9XUUh8*s*W?+poz(S&9B+kQ7TUHa)rQEYN|hty)F={98$jcSp2$>d2Fq@t zj92gpKjG?#GsmXrwC-0!$dPv0Te5cK^66N#vS#+&Ez*V~tqp`*rLhKbEqSPHm6@g@ zuM?rydn)iMjGu?PZNTnisH&0alDwH)*;nKEy#JN0P0^w?gitWgG2D3*3E<+W=S}H3 zpB`OUrY-jN<5=$YyjHv81$V`%Q^(`%*>hg2UA%BP9{a=-?lPK|q@cz=Z-DX=02fo4 zSJ|JPIC(5S<;8c!ZMWVT?|IK}s{R$t=X3F*7v1jPL4We8^QMLR%-vnS)+Sm zh~T{)_SGpPEt3+eNZhrDBL5R$wm%9Kd*eRCXN+oX$862nf!FI5tUDZ%XPPgRu&)tV zx7FwK>Yuj$H3$0Wt8laa%Wu}ti*MYL zKX__iKtqlhg;6Sj_q6OCWS87VX{)YDO}qJ%Jd{@3ar%B=dfAUt-L3e%&wNR|{&PMv z*4KOS$P-WdGUUJhx4#-6eDu7}$6&wPUDmQ-DF)+_#!2cspU&nSi3b53qJVeWLpjC{ zL6xJRYK(c+`o(;zw&!GVQe_NR26z>)zT(V=boLRaPaoHK+mFBUzx<_m+jo7RO6uXskoDRUpGch#X8K>(1=sgGo8RYmGzVyyfS1 zgx~gxd9m<^$GI#u<|ThuX!uT@sq;|MI;ExBdEQp^exaPFXCF>sHeO+7T2^y;{(;=G zow6P&@;ubG0E?bb5EY%Oa_0hF2B=S3Eq54CzL83DrhC0+UMtAaWjNIe|F$|85(6s+aW{V`__aq-ac8OsN8Q@~+BWi)**n z=uYx&V7x$KsfwL7GlWYbJlC(lCM2!ESat^a`NbO93hZx(FP>_iUg+_;@#T2-%BD(o z0=Rq}eGI#M%^!dRIUrpS~(DI)PsDtiG z*64>7UvY(-GasiHIjetdA_aLyoshaBeXHZlhL|y(amI}S{gV8%&(h9uPHcZZ`9_zZ zoVV6J$7>&MdR!;lN|$lct^bpr<~jA3>^MJl6T_W2dHlGV+lya&*U!a^KmFD5$OqmX z8<(F^+E98mDlY_eR9-akmcRNMhkkipfqryDG!f^eGtP*XmU6G%?eZmG9)jn(SBoZ( z71u$g*bFYhG9H!3IIJ=PvK{ecofZu&!qrxt+dM|aX395tt#;WmP(5lZr;(3W`)=d8 z^?<(4a%$5Fn*Z=>ar&y*AKMYF5tvK4FlO5$dTsN#{IIXRj@CqVE}ZaeP}MsVF8K9h zxOXXr7oUh{9)B<{o_#{=d%nCad9!}%uEvpLC*tU_;}YAF&)Db_=Er==Ph})KtgQQ5 zG|=~RLyax*Hm^F%E32`zvh2MJV-Jgp)#9Z*zS!GaJ{FzcdMvMGiYE&cLE%b~64FO#Xa+$gHS$B576!#& zDBp4@d{#wlAj2pbt*Sr{9vJ|CQX8Y8iK3^)`o{KNoH%|a{`^;cMf~!6?vK~M>ctwg zD{<-K6&dH;aOa!6)F0&ydLLejOP+?i9Z`ZYfH&wDr14Ao{6@bZ(=xve)NdP0z-$)j z7_Sn%XfXauplED9`QhmA?8J%;ET71;u4~}K2RQlM4Hy$Yd@%rJ4AmD6iRwq> zOrxep#$z|!kM=c&z!j_ zjvZN-QD4v^gj=q;QF_@uFOIMHbAQf#!o&Zqt&Lb>L#T<0o4hlA@CNZymZhJWZrkFtKDweW3T9nQ46 z0X(cfK$T04R~&@~CMB28htGA7$(!c@P`J2(N}6RUo9etG z*Xn@8t$y-L2^`9BTTe8}3}Yua=%tJ(R-clu?9#U;TkUXpz+b2kb8`!6?6NNh$@$RFdGh_i?=pY-$pu+Ro6j7IA=#iwOe=G)(chN^}FyJTqZwmSF zh;-4xT;n)Nd7#1O6kQ>oa?lTCJR!sAPsXwJ9Qa!|C@wnLtTFELFo-f0Q`RTW7!?*9 z@yUGJJplUvZ^Yq^^5&^_o_zl-fp~;x>iZ&)`h~+->W(ykAA9uSc-y!9qxg@%|1m~e z99iW}cP*Z^cw%D*{zg7^Jz<$;5OUgl*TsH@RSr6NATA!!dFt9GDXH2Q=p|hBFxy0D zQX15=JltO#o+a+8WB9ziAz{UeHk{oak2SLlzbR{aSv0B~7XZHRpMOuh?v4D0rsj|B zOTyXJ0(M(-LpR>}lRp)2`_{i3_uY42tgRo7#~yzoHaB;CD9E8P;zg;!HCHy?u!}B} zmY*L?{bl}9lIkR#$iii<#PG`;kg{d|;^F-A>WXv#ep5%MK6ufH1k)iMVl3vvSQFtq zk4r(74d6LIZSC~;ls@rq-tsA4=2>5264W&e_Y~Z3s%yIC;76EECp7spz@{lL0MxRj zxdK(2D;&yv{*YouWQjq4Oil%*FKU5<;|mtI-f~NPHM}||Lft(kz4a%lX znas=n60P=FkY#H9+4Lf-24o|YY&!|~fDf3~ODOb-AI~%80QE49f~)FFTTCD}JyRTq zQPIQtkmbEMzL3CITkr}!9AM`GUdq!=`!FtTMP2|<6r9Mw2eTlnamFCUnd2xZxmni| zKX?v-X>9}RtE@K*Su@kCt*&y)3yPWNH~68Y*b;*eO7L_20W|&AjD^~!3RZq9H2g^x z6tAd}e^XWtnygfAOyQJcQ=!Ov3Y2>;C^S@ksS4Y`_3D>;6;$8bIv&U!f|+s+TpO={ z0I#a=Ou^%i@|ofFg%JbTjG}K!2HU|T9~gKXn-KZP zMY`%VjCMn>wrsOEUtsK{;M25|{_ot?LxxuA}uPkn0$^vVMf#l!wq1oPP6u zq`6*kP6m7lUg`^3lwnTxXqQhJ(rF!2n_qW247B4CDhDMNjd=AQhhTb1j~dAG3Fu0$ zQ4*?sUX1+QJ`!G9TG5`l6X(vJi-q2t_h&fPP+pO;Zm~?};^@keSkr#nYbMEthpT(r zd$Bv%bw2wy_H0~`R0+4qU;ktRVU>X%&J)<1!@K1ox4zbk&%E!IapC-x_|S(R)O^nV zO14&fQYX(@M3GS~`b&bWgE=_#PZ0=*b1)5Jj0=1~M{_;<4bS!EqOA-3Pl2oZ(@@4c zte^w+0zmFpQcecqDr@~O3g!oAjeaW4W3T81(AEKugA7_vrJJ0eux=ucG?x*0*b0ep zfm6Ce*kaZGG}pegr#{eHg1Yqr0N^$YF2HeUk?dKUd0%J#z+KLjq+j%{_ccmyc#^Sl zy4nHU#=)NG8v6pkn?B>k@rKX6FIJB(#eOucH}dNM6VwXS&nYrwCCCbn&6WRt#>6T>=|)zm>=sHIEAa3bL2eV zwfdq3wO-Ouz+ts%r4m?frEjH>oWJ;uMwHX^$frLF?%6hy4xsqu1sdWm+sd!)B#u;m zhdt)dVaWh}c7Cac9-EArb^+6gs{pR@0$i=%oX=M}W6kBd%T9vCz~o0m5Ir7hCRx}9?@Xz~L`*eJ!3p~qcmx5nL; zWkbOnn?BnO=?@E8?4auxeQ-b2@jwms>`8~mgylgWrCi*1;6R@ADe^2t)w6qpWm@~Z z;=-qW(GBm^FZyG=K#@Dw0ZW2$@HIDpdJWE&Jp)KjJ`xvg61M$_ya0eaaJ38%@2O{7 z=6+I1zL==A-s-C8>&E8ZD3*^Mi+}dLKM-&Gj(?&3BqF~H9T+gCyQzX9o> z%P|gwG}p?a&GUrf%ti9Kz~iCfeFkyX;|gmWBOsSGeZp_V$sefwwQ0VlEL@9PcaMka zlhIC`I(}4Zy8ZaQ-*|W2@S>N*!yovyxOD#MII^sAqJgsu8r2l5wcQ@rgiFn_ZYuQE z@yw_0&41wy8CcJi&l#D2UUWfoke@;3+wp`KwHC%+8C0uAkPnqJ+l!=TVY^7Y#ccHM|$u5 zdfFiu@g7@su^|{*)*#qDbYC{Wc-41|^@-LV^Sp^a-i^Wbr5J8J8Bag*(Kz?SCt_L6 zvL;zwIU2nq$Aq`gE_zkfyz~5)mvZ^yvd{Lhx$f-pf}F<5f%XK*zQg#^*v$5YOO7ym zhdHg+c&Y7Rt{02l^;kY}bI90;J*|E8k8<;qxj&fOdE`I7PbByY07~O8e_WvLi-PiZ zI^o-|Hz*{aTmaR2rIQ*caS6K`*fiETA-P&fzcz?L`h(8mw61h~1EsT+QT!Ue9@P4! zT;&sCUkjYCva>Wc3zkm?wY@3+q_5A>AjhpWz`4aBR>g#!8ez|fZp57b-Sro%j{ zUg>`}1oml5e$uMeR1ODZJJ14F;!MyCxC}G*@W$ecUwm`C?lqqpZ~x&RSKsW%l_x){ zP5D-=YGPyHGLeG|hoSKe9T^KAy2CHL8t`*34x*Pr&{ zt;*v!z91G?PN>p4h>YvGA@yMD1F17=#jy;_`O`4!>wfAQzvni$oS!!dKR~TIcNi`_ z^Rx{5yf#o<@#x1Ni;q3@@p$^FXX55tZi_Q#&ctx9ADdejeIQWZTBh%a`;b9=qfbs7 z<)^rKz>gew__DjZ8#|lxl0SWz{k=cQuNj~Z=rW62ZbzZ(l;MI32Njgz1{-f66DHgG zBjf0{$1K$q_Tfm<=DxE`Bp*$XbLc(?9*VQEqdsjQ$L!~_2ovCuRu79PLrvNTXaaRB zO-WY>XI*r|_)SW`V?18M4 zc=O<24V63=qsDung_`wDj)jxTk&YtzP(CkFTAXMa-qq9vd2%47(kruX1GG8u>s<1 z5>E$h9A83hp zAK)Tu#W`fZ`vrH#_y6;^$7|p4`D#S7xaM_xij}!m(5D@&Y?DK*Nb{-Cp9-}OyRp*IB^E|h~I19 z^;>a_ALpxX+UW$w0eVXt)U6NxtD{ZQp@+Epxq#=1gJEo|4L+&4w!Ug4%tw5}tEwAv z=3xUmUz@HppcBR;!)d>yb1=*pBOTqp8Y&%AUY-{~Q>luFq;qaO45$dE5|)=`+itz} z*7)kb^p$bq=!y7_Z~xBN*4l%x*?$!FaPJ(@DS zaq|~`H2%?uynB!!^(Kp*QCQlJOIoR?1$9OR)J;z(( zNSy~&GF1=jxj;!<2YdksP%=$6Z@xk+2?VQPDu^yBkPcecs6}`HX}vFa>`(d2{?7W; zh1P7>N#@UV$=*9(nT)@~I8`6nP-lOwnKKfytS_|5{^TI0J)C%kKk9-DG;X`r6)5l8 zaCJIZIKZ`>y^m$xI34VkAGG^`7`_Bm;h_bX-!&gq)(w zp&=C#3jG`;GY{3ZRnw1E4o$xv=I<&>-2i^pYw%T=jn6{Uzq3%~R)IB~fb$b&x!IyR z=JT5NO0M$0jpgnF?}F%>5E`gI_+-xL{7;O?iO+<_yAVr7sEdwhKe3 zk^+g)ji}_g$yunTFd^x^3WN|9lofImc`#=p2IMCaY`-&)1o+obhBrCVILusGITFjg zrC2+%8uxwHtK&0X{aJDO*$eS+e(c}GqmMppIdCw(pmjR);L6HU+bBT zW&4s3+2KFePqUEi0xTIhyhY}SlscWR>iQwUZ zKR*}1IuiO={{r+A4~dp~Y2W<&>C!glH$JO<*9FG_rl zE6;mp#;4k2J-WafYLmUVp|caOd*!|H`qzF|96jERhaPz%HaGU-M}O*<<0DUV0YK$N zHWvWc&vF6aV5Ge>`(4^+9U^^cE&#~*c&;X>cBecMPuY6yFP=!pE-aK6IKY>XddC3w zXLOb4d_g^IXOPnNOJ4HAxa*E%@yqY#i=EQGbVGG;!!e|9+ga>}!y zv|0D=0P|UPqsmu$fY$jpUob+>XVit)*tg(>eFs*qtfMgt6hN~@Ov^_R(_G8!21!#hma*b z32qPiOBlyeZjKA2ejbwhH~Q3j&O$$JcwQ+q(UD@SC;Ji7m6PpLE~kN_+X7H9 zmc*0yHF9Gf^v0I$Oe1gEDRA5FkuGR>b3O73P677I*;Q8VKWIqXRqFy7FN6tbV-YzD zc&?CTYn$0dyRBS@5}b7PN3NFR6XbJg~>v*PMGC77WjOfVzKjzo3uI$4$NL zH-Iq1uU>4c`s8&zl?D46lk?htW1gy>=FekFCK?Cz>v&3aewLZ0`AjUj9x*-X%--%+ zj7M8>^XU^>(?xvn_um`cGbiJr_x)CEZ(N8YYnsP}!e`Xo#pw6Pn$xeie`pu|XH3%$ zZ;%7n4$|E=XAa_n_0PQay&g0Ef&gQchxnH-$a~$vcfTeXq#xxFdEA<}We?H4llHK&gEd;y@p@pxQ(;_=wL zaNcW^Bdcq1`piwSe)LFOym&D#UAiC{WS?Lk`1x;S#<*jTlou*^*{0Y<*_E-{a$IHj zqPs7LXibC8NSx|JE_rk%-d^xwjk(@>te?Ik7LVLWM0!_j^W@+U<~AStjrWQ5`_gz$ zVK(mZgpe52OeXcgck2QGaA%D5DJp)s`8VD4%acQ+xCOBmj z9~$Xwa9>Rm%9K#+eG+K!&y@e4cmW`lG}}iWA&Q}2Xdm}$|X3+B{Aa6ukoZu4vQ9mav;wTB5aVE3+1|17?fcA6X z@c>{-qjKb+6WZG8g>U2~S;9rmkqj(0zj^V10h9;YYEz0|jSu>&0&*(vzEA>-9R?sl z)KG&S984&LYrlKkYNB#=#;Sbm8#pP#b8*z`-h1wdKlPT^#9P1fyH#NlmmdF^4oUuvZXU1;r#}rM`Hr;l z;Ww}OG|zan5W_vC4Tdq;9mEr#_(VMP*rW0O4}Kt?c%nWjg{CwKpTi7KctK-soN_oP9Yx&g4v5*UL%Y(^CVQ-+wFMte0q{Xveb(^?&;7pFVfh8|EG&bF2wg~zwlzEPTG;5uc4r?3wprN@-37aH=- z-Hsp1WgUv<8zVlXB96{7CRuo?%%Tb{&aY)$jgf{u^Ws8%QfaM@^!iyS(=gdx)9-1ou7+wK0JkGXEO+6- z)jaeQdky;KQ%3bBkLz(Z#amZoxbLUVeAZdEeB-Dx`LlX@OVu!5)`jAdfXH`Nn?>) z`_ryNrzB~OsLqJCB&+f5n()2aE0i)cd)@bx2W z(w(^&s9k8nh5+;{_5&MGabrJ0!#ME#UoI}z{JbEaAE^S(d&n#D&Y3gF7A2$2l~=nnCw5a_9k3 zP+Fb=2C_R)p8o>vvgCOnJNDdA>s3GN*!Gk*@~>o8=z>1wHZrYXyDXn{!WD>fDFFM7 zZRH@x2R_VA5)Qw!i+-^5$O{0{-3f0=q`VW7t{f-1#<1S1sL7E2rydtwY+ZnIAI{hn zeD(#+L4+V_&h-gz5S60VmAXD}xCe_S61aAcn*En>n%WA5|~;N;%9e@b#6< zFo2M8$}NyA{BWoxPb8kQSGJO^18SUd%>9-J4NJ7_VR*ys*wIt| zO407#w&(ig-de1!udDDRE?zjNb!FF=eA&|TW*-OLd`guAzspxP)P5O10!U^TX6DpZ zK3K>#G=K?5#&i)6ZrEG#1-1GGqU^uyvoQ~6)Mmb6uK7r7clJtk?Zc<;jazc?-a$y| zpSl)XU0YT?HKh6A@|E+!ZuuSY+n$hOP5> z(3%vDF8XXhCnRI!=P9npGVKb4<5;sYKe2Ar9!RbF!5F!2Jj**yU#bN3R2r;2D&EX@ zO3d*!tv3bQOT5`Ha`gg$01n{4(Gk&48N#&={P^omX>46@j!S>zy*Sz3i#NRTWxfD# z>_jgfe)P$B_R?m&H|wzpXDXF1~| zyOt5rwf#&{Kg`rOly;`=m;54k=Q~_ojvIXQtNe^BXuST@)CXs_N4^450^&_ymi#8J znJ)2_`bx77YW+`JP5HWZmJZeWw%J8LD@}mIbJ%`x<=C$KdXN3uwp^`m`Uxvibsalp z`vA|#x0JW7BrM?kq6v$>s(+lnxSe{=+QJFcJu!PCm}I`Ch~3CGmRV>_u zEQcHm|F9%pv8SHOaQ`Bu*DrSBO70$&Pituv;eGG1p; zM*V?2-CruC$)?dA@JY|;Huo8}r-JtHMfDAHS}%I+V#k_dS-><&Uh`B?TYk3UFMuD! ziss7Qy{#Azx8knbZj4*coQ_|A*MEpOz8nw!!FywG^L(tYEXBciOX8w0&#Uv?mxuiPc7B(OgbSiUcso7PRAxcz znQopXBbRxfJxf-yCtPAp`KCM$6bG;8OxYvEz&Vq}DZA!=N-{(<`h*Qky~_1T0s2Zs zE6r4^_S49U4xmG z+4KYcn{K)#7M2!cGmflh06>I!oeTa#~*2SX{P!f=Zqt`m4rD;PNfL&*dar zv@&&9`K1pTOC8CPmmawc!1LhMlXpb-_$?aL>oE~No@3277XT&)_m#Q=n5>Z;M9Fm3 z#*!yNStxZvy@my98^ziBb;4j^S+=DuAln1f26CmDEF>s?dk_Iv(V=+7)6~5N)Oxe{ zW}(49;~!`OH`Mg&!8PP3^5zzK0|X{Zei?vS{%V{kD@KL~1Sm+o0DuDX`x3mVy0Eks z%O_98V-GwOZ~rI%G~V?~zwWOD+04QUf+YrZ&puu8&FD9CN^%?V!*A`qGc-g zq|cwEQxb!Xyn}_2!^$b?l)`AFe;6pI0i}ArX;XDUg?UJa&zs@gLPvKP_N38HrDLia z32YYZtE_cJ`RzJ5c=4tWd8sTYAQQe}6N82=c=^j-5MTTM`-=E0fBPGhup5^ie=J7B zzHh;ITgI@`z^#7tmjMSJ+A`I&S>!cWCl+W@5$olxeeZ05m_ zG31t2K15JOA0i(p%maV7#m0`?*n+uo(=c{-eQAM%dW8H=dm2tfh9C1XG(aQRo4=!q7jHD-A7T{qcH0}EW zM!SD<<7t_zkG+XgJr*zk8#nT?_p2)_!oBV|^}Ts26D_9KJi;^mCf{YqBwYcU7Y>KB zX%#{i*`A}+p<@{<&VdNE((-*hkRSq^4M zVBL0o`kfkuHxmn#a>7mh>4FlrvOU{h?1Kh6x@kJRKu;WAEKBApp-M@2(hAKf=sch+ z<;j-}{4ibk7-Q%m{VD|NE9tL7P-fn;n!@QDcMfcYb3xyP_g5Tue|w@#T8Mek?{efT z&Vy#5Q7xsR8_=Kn-rQKO0ROG}`fqyh^RfZ`3@wF70c~fQ?8i(?9a6~h=71sbrEb;- z$k2~$pc(AuN7WWJ{zkGf^9h5fiiWe8=0qEnf5b*Tsr9 zu3RGQ4@PludpmafJKn^>GxIiIWf}YM8QN0kb2FT@0hL~EsWuC5@k_mmi}HLHZfk2R z)-*?Yv!*oLb3@^u^`m08I|{hX0;p1ra}H?<3J-M9F75G2(A_=Fms-4Y0RSW%(p!c| zLA21fw1>>_DIKIRe{aJiZ&VkvI2d9O=FIfT(IGO6O)5Y@y>6$`3AS<`;pd{{ND2R=2mR?^P}gJvHIQC`igD&kNNV%8n{SCTH{Tc^{NNwQI@Z-;=V@c<{?%s=A!`9P(zA-7Z`YTgt_?z#HQ);WMi23`4>BFc5< zmb{T?bwQu8PO=V3*|>m)Z0xk|U@l^K5ckgQ{=&y1sYo-sB&(VHQsW8 zF~}>;nVI5ngH8I4mGy*no;#g|%)Y6;_D?F3KMd zT*}H*o9LEhf}WO#@AMl`DS`SF8~hO1YRAyxEBig;{Dhf9huT+4BcH_Mwgv26a!e*b zTh2=r80XS=&R43;YWYGJB!do|6X_xL2RXnRY}*#d_j*Bf+4Fn6qtgc+=W`*B!#?H- zaFNCyUb}bU&AD@^@uiYx4F@$BXOgZcp>8=2**Wf8ai1|IMe~fkpquFH%Ax*-w&o@~gIy zHwUIXKxMw>GAA~PeK%(wLK}QxY;Hck%tJcCiD>d{k%A4qq?e zON8@W#u$#&mhkdIg32FTKPK@&$9$YRbt)eC_(x-}ycqJ|4Kv*^?T3`X^`VxlenHf{ zfRufK_~u!AX-!^jeue2=M40`8))GFbR$rH~;xyvw7kt2Tki&9}8CnwEK7_4vi+uMV z`nARsZK@>~e{-k!L+; zDf?U{n|;OSW1z9#j}yJUc>TREjyJvGv*XzD)p+FW*?97abMfOp{qA_+=}p@O+F0y$ zQ8w*$pig`X-*nm$^N zu5>N8m)-M{_$P1uJMnp6@)wjlip}#6#+8kWar%a1u{__^K4(vOu&y?7ax2b zEq}g1>0Tp4NNK0^ulvCQR!sS3O@8eclTK230OJ02e%&8douQg*9|q--PiqNT|N4-0 zVB3wi(zRdDpDWO|@F>5w--b%JK{Wdfa)bD{(U5ODue?G|)tlxGdWD@uJLLkv^`MwD zLM1Lf9ilp+zUs?o;j8wYPf_^>#G@c-;BxOMOwD{DNV-CK)z3HNOMS)}<>lNC^pg$! zhoCBQjxl^-I_48n_TZ3>ohCO>^DEs=yo76i_$OqYT7WMA7^bN%FAm%FadoYc`J$A# z1|nww>@+ev^~me+R=?Ex;BB#vq!M0^0{b6IJ@{Dh2spmbwq59C-7%41C zX$w&K+C~NBm9(io`$U!DfjMcVD};eL6j@iY|pQ=b0`655*0CztnIgujVi{04O zWPN#OD;|FATztuwezj~YFKkFw!o$x|(zOf?`f@xfw1`ZzQ&pzW^87#*i){G1@1pqW zYcG%3mw6~F{C&{`?h~ua{)xXA-~HnFi+}b@<7>Y0ZCb=!kez+nKfQvT<}w-QG<#b+ z{wbs*N7l8*&>BRvS|^!@MeIcL)<-`4vG|wY|3mSkKldKx@l$%NWrkV@(NEKUy*5-P zt6%$ycG^&dQvGW|!66$#_gJ5qV~eh|=imnF_?@*M<^0roQgkbIVR}yJJ!jQNa1h3! z7Yn;N%}77!gStL2jqUag8Rw^5;V)mvTJo8Kx-#~5uEcP(6)(K?=6KDkUJ?K5NB&jJ zEso>i_rEW8HZR0#4;u*v`d9q;8C2gR3R-!d#S0+-mpAxn2ga7q9gvaAcrC*GFFJjW z$eSdnwrI`qqWU-~t;f6S@cc=wd5x;t$~%ZfUaHdimY>b)_$P*_$JclHtbNDgS?sI` zmDV-9Xgja9uxiiG^*naw=kp?gMWjA4ZDrAmJd;-PtA1p^)$|19s5JT6|3W3_GNLi- z=GnX(DmqGj(xNm!ztH&MA~!PS62JjJouTyMb{q^gV)x3kv322GY-w%6Grt>eyxCvw z+Pr*GW9g|F?QX~@?2GqhtzEe=ESd1qadh+2u9`t^er}^S*k{@1aDhTLxYuJ1pqWr* zdHg_`McFIXPs4%gL@$<)-x|kmx-;h1Zd85Al0H0`{K4Gj$KUflad02Xn9S9Ih!Usp zVAyecDYQnRQ+?!~Yrp}e9tr@I`b8rJe}ej@?zAHXrSr>cq#0+hw#!@#>VU5FTo0Oo zKjZ&*UI2)NohEKYu5zZllrUv~HDGMm}-7b zm;ZmfAh`EsFN~o!l%vspjD{0!(3t{rLT(8U#0?sxb(#18dSsRbr*f8PCziQEoK2?gusq8N#SI%*qYv_Hd)%I?>G+I^h8DF! zAJrwdq_5`bBN<1^`!=X18qs?oy{mkf_j0ksx2u%yiI4u%WW?o-dSfu=cS`%ljCwcc=Li)G`Nm**kweS7s;OFDQkA^x8oJ+?XdjIp4!sBYf=W4PpK-(V6$XGHXj|XM`B7oZx zukeG6RDNC?5##5^p>)JDs{E=9c>ERtxC`S-v77+glTyy~t9|fQd&*)9+veUUNefYb zH6T$cdC&cxyiDcD0^A!+RRSM6oph=Sh_C@HkTm|$U-Ik8D%n_+vDw|@kY`_iw38_;}zZ4&^9IF5(%Ne))pfxWJ)qbur z$G0nTZw{QA1GvnukhhdgMdA~$K&{UK4>kgxu+tOpls<#QDG0Bl%2R%A!w-|-PYYN> zvyPZTrPuwXkZo4B=6qt4*pAhuWk2*sUJGi2MbxQNH^hSvel#9_=qW!u=b)PnOZ}u9 zq(c7OSXh{!Jg*6_=qRiw5D^18o{=$ngubc{dh0O?l>RXPK=o-S$K_!tI;(QYUzW?V zsjGYfmTFu8;J4OXj`4~86`(Ju@QYq}SNy;~|K51*n?BD>jrXpIKFOiq_nS07_Cr4$ z-}bHlI6midKQmU=j>W|bo1QbYLRUFiYR0W(0Xe)W#$04w6ra$D?cngc+7va>&`O^` zS7E`BJk0qg0O|z*zF5QB#2ZQwz`y#>pP;4^dav<&NcQdVEZQtX$p$)*kuUIQ{m>um znxFNf>uMwCCumCfs;;WhXTJQXq@*>+D6}Mxd3mAMp$r=|aMiRnKnpm9JrkZUwD|M5 zATGbH9$ncgKJmsU_&pb>{RNGeN+&;mf*gBGomgF7izCO@JytU@(UmKkv9rym>#!3Vf8}ABnp8Q~5n6wW_Z!Zfira6$Jsx`Sk$B>XC-c%2 zwpX&D3$fyD8#V1bWYSWIL-A>P)=FJ|PH+|BrXwq$FO9ts5PmLb_C*O56C_vi+kNm{ zKufjGqU@8lVoKAd4T%T`QssgGn{n)s=OOi1hdD?*&F?YV$27}bM9>!dGc3y1xL9FOSw;kp3pZ)nDOShD_5g!wadJjb%TR^zdm zpVfxPum08Uak9A+2#>u|zru80N_t|S%R!|N4JACYucWMPt$gH*|D5A*c0MU>SpM)J zKk;>xr8sFOv&e_?Ri{dLVj-1vqsC0~w&S{WfAvXWR!>V$TzAa=4xp5pX*e5 z+H_wFns$p`G`!XE_Z;*H6{^~zt8+N(gjzEX z52yWYUlg?^p6d*HN%f_lZCaAXQd)UTOZ)q|KJvOzyuhFTu?P}SpK;1>?|V;|R!Y9s z2X32k`B&5`G^K{r`tyL5-s?dlrUok4iMs+Q4yiVoGormV%rxHg+jN{Go=Qh8%`#b% zYvjj6-?8R&_5;lCozAk?HhX(JmLotOU&rI?T*$B5On;hFdtUHhAD6W5q)qHR=QiL= zyN08G7+ZXKYhG&=&9wbp^FAC8{P3Tj@mW|H#*rh(Vrg~FZ+9_IU%YhAYa8%+A5r?2 z{+^7rgYAhIt-Fq`ABh`oxFJS+yRo<|9Z*>gmyaDk;!Df-+;dO7{xz?OpZ}#_iFf|| zFPMi4HhrKi@#hUyuoX%_(9h2ewOsWJqSgff1MIR6nM>JMIArU~I+2I{tUsM3o%cM- zz8-lA+~@L*$8_eLk^8>vfBG)_E9)be>~rcXu6m4?sXbBLnc$NX{b{cWQ;x`CNWBtX z*PUJfKy&Bj`_{Mlc`nncjO%B4)2}G`tm`ZT&O8pJR|kVQ(cO*Led_gn9Z#kEt)--?ZApVS!H zj`g+WSXo-pnB0;b(>N@B0w&HFSO?Qz{sN+8l6aFJ+OZsKT=dTFg(;r_Brz=ZsL+Xh z1Yebpakb<_AFFJgmxvp$aG5Rzn>+z^o$q?`+=hJhqt_ty0)TjQJ<7RhMd%ClgC97N z0$oX#AoGlzvm`P~XFRJgtYTtdQx}{yyFK`_B*1i=VH4HR! zXsuw5YUS*=x?e%REcZ#d0MO(s=!T~8MhC6|nwd-JTJ%rrR2(%a*D;FM_1v|f@S6No zTt&XdMc->qa(=d7UqG7b(P7AQI@*N>T-tBfS3-``X}k@@r~3y;V2@KSBNzKx<=XC- zYk~U;D0Jr9Dsi^);}S~wLchOq#(3~gfXTjMlxsbOZ8JRnR905=TwuIB3Xqc_Bx-|{8#pMU>>SnAD3f1nkXHi67Fo>s)(epOY0$3#_;Ym55YP}YY) z&Lh(m8xN8FrAzsI(eDMf+^qG%nRwM_y(0eEzxXkg*p1z@kBh#W8j4?h;~`uD#WZ~NYVr8YHxa+X8u>I=W*_?%GbuKrBF zG*G#jj-RWc;9yd0;A*|8<2fTmZCV z(So*rsc*tBovY;g{GJFlOzkqh4}pXzGE5iH*kk%zX(h!y!3QjxqMFL7USr#^_bWGU~ltM40oQ51^HLp$TD7s8Csy|ZO&^2Q54L+1wkQ8vhD1{SV-f&I6a{%N-nxz;V{Kg>p z7=&h$6-@gN>4-xk zb+awnNvcO;b9F@%pVabGUi{+t=^y=`xaDQ{L};^p{+TD##zX@w`6#d4PSpS@b4m@KYugv}h1Z~QipsGm zu<8{ZY6D+W$og^`xBJ*8jYm=K{5oj?Ocs>o^KtYs)sWdx2EI+BxOBhcNY6UORurh| z$cAC=Tj(GxKt8;Hi?H=r)e&vdE83_Fa3EN4lY)J*9W80brHnS73Ou-G@+A+nwGJ>H zuL+i~^DCXAUw`=m34M-=SFjE=8%u%c0di5J`tA!!8F6b7SSMSvrhd=3%Ba5SFzv!Y zSi^;dH+dY^Fi8^TA*0KSP9QghluKOHH4aUvhC+i6knBs^l+nW8c+9KPOCD(e=?+U* z{6zz^#74x~O}p*`s!-p!lIAK`bPIC45OYd`tH{}&yPftJ%6ui5C&nv0IbZxJmURTa zh?Izm9w%S$Qh~Z${Hb*SCNP;TzD?F+V9!h8brCoJ->eF{Dx{6HG{@_#|xu_mv3HtWOT3~68;_wez z`3hZHC;De^s^`mk6#Hko)WAp33@f*?K8^@j&(94TF1eLvm455czKJkj)rc4yz zhBzWNAyXg?^5LhbK+B=?|h^L&97yVuicqN+b2hDB1RCN$b%Ul4+ zMLQ_QEI14Pk?h$E@47wyL19`r}wTdd#v$w&24y<<0MWh%Rm{1`geFBY`+Kpvx~eiA*Ws%rZcjqAT`l7sFh`|6PdnWIBWrQ;#A&T>mXzL)?cFWq z2AxqsHf6Kj2oH=#OKNG}nBhN4L zdD+L~nWxTiT_8HrRZ}6Fm_?w)r;A0_39~XoE~KS=JkJ^0zu^b8FD0cJ1@B?nJx)}!# zn`hh%7C<&PxGabLD|7*jH`=BAEP#pfNl%uao8)g1I?Z!|OEulmMnhV?m3E>MsZDF>ZrUqw)woTV_dE+t8XqddrC&S6G@NqNk7%;~wm-;V{&b9@10cV|b` zY25n|5Pg`gK>)hfmmx(g7s$JsCs`ix6FetRoQM-APpYdp(AC;gYaKqdK2`x9;xDZ% zJ4`+1=PTNe^Shy&@)IU;)0rDpt`jf*lo!Tbcik1g^h>|!ulaFEzuVXPR&=t??hiD7 zPqfxz3`jR_zxB5Gb6@$VchzY!;nYkx19phG)*yK(akC*#-N{hRS^ z-}TQ*dnrNINXTO4UKL{>FH}8Fxj4451=s*uiImnFgAqzInBX21ZsZL>-j+R<@`OZYpz8@ zrL98ZRJ+Wzg#C$Nc+tE^S?#lv=0Z!$ej;0@b^p|w3R$>LjG(ThiSi5%?dK#^KO+e}H7qubZ^um?KjQ7sv zQa|842rqLoW2N%foH^LWt0^e}c`mE=0dP5uu)S7Xl@5OqE6b<8WIpBbeQtmc{Z3DC znhMvit^pNa>TThHaw5QQy+WI4Oum#~cy8#Dm%2Oy(AA9J#GO0=vlU84Q@-d2=Pw0l zV_Lrn9D>TBFqw)sZ99$n)Jc7}eV|WT zUnIg7)cqrRo>g*v)Lc9(ZxGi$K`seN*YzOFs#H1mx*BS`rLP^G(oQ=4Y&%C<(Q)jS zWn1J@ft^C-ITP3)t0+GqmMIBM|4yN;KMk#xN73OH{moBAOAgd=J?p>9n53eok~Npw zeF2NH>sRX&k9k(G>3`MFxc<~}%l@MciaK9*v^1o!;!cyRX~@8OqJ6&vFy=G<*blXV zMS~XP;y0QLcprp3oi~mN>j$OtcJs>mvACr5!||JMi{E?i@5bxi@MW%3$P{41*@vqx z=F2imvJ8c+LvVE$BpfPR3uWSz+Op5opO=5?%i_$@WAXF9_|8~Zy)k0%Y&`O@kHnp~ z-)6lT^OAt{xSno(oT@%jDQVk%0BL;->FPN1(5zywl{YyIMc9j_1Wp>iIIa^aT|G z++WIXa5m#ZX~Kv7Kz*ArP;|ZS$Egq0eEZS^d@ca^8Q{L=w2{Uk^QQKbFTU%x_`OxBOi@L&H3F9b1ZA4#1CqI0@s@BHX&r%p5s!qCd@&D==Yoa^uO1r z)AIxKS{%nucEPSr#T!(%yfU32x^dcuf@szQOnbE_*XIba90p+$fhM3=cpkaTaOGbAh5PFSpe{W12V=;aH5qm3?$gj0W4OO#&L?tZOPAX z!IXt3g8(qylo9{QOY&deQ~*ihOnvg9x+j#p006Xkx11#}<0=COzqy#;=_{eu%k-ox zA2_9m5gXL=g*@$*+U8isku*MO;xy4nzwp`PU=z2{)#iL0dxN1jk}Nza$0o=IMMixv+|%G4#(;rcv)UqN`2w&NpBFF`fUC~4fpPz--ZWpmYMu^*ajHEpriwl>X&7J8Lx)>F<=odQm$IOL*xclV zRr`xK>3u7cuP{jS`4R*5Y8y~GR-aBZ;hPs_l6SzTw3vXc_lXO=t$b9tI$ro^PK6Hm zX{94Kpeff(I;Fhm#_>t2sm#$q1)xpxuw8O})w7-|jNFU7@qO#wz#FZJh#L986`w2x zsDP}BoN1JsXnbn8(gQT%R)3hzGL|c}(^FnaAM$X0wcp^B{C&gL5eOnp{A&@#eyY2bQJPR=wOk^0HWd7)24q(AU5#`rEX;++84he-1>I=M>DVuVWt!%=Ly`?X~Nm+2nj#P;!{%i+51Zc~+${S!H ztoSNqJn5Ozm2CjYPn-T}xhe-1sPHo7E!)ky0_>j378LST#(g-Y1K=}ISwH3Bej#2r z(xr_lAKAY3Ncb5e>8Iku_kT17La@99 z4T*p3x3iKzz9?ZCz%vU%;jVJ5vZ>_c!w3=z|3zQP&XUBdtV+0)0j#rt(&)<6CF1(4 zI>~1)s{LlZvU=T4beFYRX0fe$Y0pHnni%iT#|!ViBi{bK-xIHY6F&hUTgC+d$!NH{ ztvMv(NB*B5j=%qRzdc_4>X)mTZd_ct$ql}~g@bX5mM8)U;WK5|9Q&J!uf_1@9f#S%rM8T*=yUCF>eso&Y2pLl z53rC`*>E5=Z^mh3(2u_6+O_pn$Ds!YJ@}MIl04k^=aA`VtrMx=*i4l{4!q@OP#L}$ zL3(~^U<%2t>JX=^OXezeWNk(3nO@v^#~pFoZMS%CAMFm~ z;YS|ST)u0W!{@PM$9(aHwG5kgKHbN`27HbAX&>nZ^e<|&z|RHnh9NTO?+?^3DkENZ z`a6;aygj^rJ`ox#8b@@9+6>Cl(krhHmD>?Ek_aog`=u z%o;#0l&SL3{B0kFR|te$9Ga1CwaqoX&a(}k<^ZzR6L@Uac50jQtw7n|bso)4pK3Gy z1fG}hDGOhjsWkX0bp_Mb%6#Ni2dPC4EwFeB@L;oq%3m%sQ6}rr9C49mONmbwGPZft ze1c3Ene!^#na>B1NhX``T!%tG*O5iys^IeYH0q)mXY9If zBopv^J>~{Vd+mCbWyX=a%T-|haDNp#iX7Si4apM{&UIe{*C=nO_PIh+Zdw;K8XYAj zkRCP{nBdU-R6Il!F8fOkNYKQI=AT(GoweE_W`UAn{lgWVDrDt&5*>AaH48^J*s^iQc*T>xn5cTF=)@fUS8mX(R4=4?5^yrQ?`$Tn>CF6=9{1RL*r3Z##fe<*Zzhl((=I-U_}Ze@byewO7>w zOl?{luD0KYp~-5slThVU{YrK5+r&`hIodb`R5!{*K$lv^g-*oiF<0c9ARW=(psTeY zZ{n@2<@$h2h(pc$$B!S^`cZRUZ#DYky*U5$liojXGvAJO{fSQ2Ig5*Yuchm=1N5JE z=||>NF4ceTYhDv4jvrM!^NOF+x=3@dTBi)3=4Y&TyDPq^$Sm>xKX_lf_dou2Y;g{t z_0e5-+#dWi)pvczcgGih@f+jo|IXiwUwG%w#^&x7;Tov!qSiQL^Sr3_8*}}>#?k2$ z$KswB-Vy)TH~j5*>g_N>BbMg4YkHyct>)k05!Bt&$ z2x`C0LhZ)_4U*aNGv%&=TE6rrI)OExY0eV&GCa zm9J{x`nBIQ#-uSyozJ(`O2es3IMHw0>}0b;Ue0fdXuk#cld~$s!FU~~X`38UTH*y< zU$pwC=0u10Qe|HlbM*^|%)gnR@vP4SJ?8Yf-(qdYXPeRSV~e}-x|iP>U-)^S;|l-} ze&VTk;G>_2fAjBuCC+Z~$$a)IGZz3@AA4?rClJyy@<_V5TrefQ3W0h_Bce2Y*kAL- zC0f;$%)ItcpXGX4dw{YQArF~nKg`9c(M)!>ryTx za@m=+_fYG4(m503GVDlWR0G*~z&n4y`dl(A`)bpvJj<7+M019-?OFzq7!=PmXg*Y? z=yZ*@{Wi#YH))AmILbwjTBq(uY^OvIb+WuY$$<+1q)laQnBxIu)iRmzPP3=*sqdrTLK(DS8VvH&ox*d z32$DopdRNV3Ffnmi)lYkb;0e!YvCjHu6?#bTg9gweNVlXe2H793Z14!er*q+OyWtf z44fvYvc{f4*L+?I2ORe#T7X3c5A#V^e@}nD>l*k~rrIdNN}b|GhQgBV*FIBv{FF2N1VFCk+{iE88NRSBEF;o9+pz92#wSEDn~u(2G2PFSx1c_?6D%! zwMpxuzkkbIa<;5x$IKHnNX%%0pB3;`06)0wL`Qq&C(mAt@BEJMjQ{svztcFFw>hT= ze|~}0$%3RiLE%D=`J;`D6n_>{rz@C0`F>^uiK6SKn{M*C@=IR$qWG?Fe{0=H8dAFgcu&?Mq)Cef078@cZwNzxY@G zmho9{Ro3%j!(65RdI0Zj|9a)03r`D2Dx_T1nYuqoNLxdG>Iw3-{xOb$a)G+qB+Ars zhx$X|v|U`;2P}2zfHbQ#4dj`2$tyUsUKS#WKEri|Yq5MW2Mrzf_u|OvV!Zf8cgBzX ztM7^1ZaouEJoZ3bxp+1@D!=!wjYd#%99+=Ot2T=w0*L?id-c(UJ%yu*==<@E25w0WEwLI9nI-0 z(OF)RueusPzVInu0qGlKQjX^n;1o^uLXLk1wvHFKDLE)>I)_42uGI>u=70K=BKVBO zq?_|2_ea5*bCvj(y!0G_MF76W#=4NT z9X=NTc=5@AXhb`*$4Ou*W6_acPS8B!u_SLi@-B`2##rnBvFz~N($P3`$Gy=z^&+Y4 z(b%WFF93Y(H{YkS_gSu(G;06=|MW>jK~%BfScIGLDkQ)fDhg^R8Bk~xQB~TCQztqr z0|KddBrt*uFypRxs0&=X{uHvk+Ws61H-y3gNP7fTXy_EQd6|W#y;8n00JAtNjzU)% z*wY(-hv{!GQW7fuN>}llg@+@`Z`Bjs_0X^yfHo}5b9*p9Wjj{6C3R}OfyAd;xm~k- z`j+_k2kwtAdh?g~Z7dcZM~)v8-8~<`qr_t#R$HmH_%5F~#;8~skuAE>T9~Fo*`O); z@MK6p1H$9Jk!#8#2L|%0e3S)*qD+!DMk+%Xdce_a01CMO@s;GdinhN1kPr7uovJ4e zt9^KopEoAOAI*stz5sA!y`v4oeth1WzaZZ6@7@sy!wbH(!^3tSI`9T1o8~^B*FPBh zZ8-4yZE*EJ>>@wZ%0Claq#khlGB4Vs-a=d}+OB@1EqQdXb8VNndv+BMu zdD8JjEYnK6{%no<(w8@sU;CX6rtzpAeV7w#@&jV>DmbMqIH&f4_C%}F^1vXeZ2&yR zk+?dY-n6WVVB8@eCNSYK|4D~sshs+7IyYl!Uwj}zlI&mRRaqxA4~B)${E1I=#cj*A z`AWz>s{MA&p(yjOX`zXQo%J+Sxu zAs`n$%S8*K(wZebTuAP&cw329_^vWq;Qjnf{ct`IP-q8e`MAG(asca1~(e zs&V5<5Up-+Nu9DRWe z;;km2J{;(2{^2dK{B%_EYkMIaY+&zx!R_$_Z~y1<+Bd#gr6)1kzTkQT%}GlO<9Pe` zyglCfkH0@&`O-V0(_QkVHRcn(2EhCP{m@AJyz$NfEOW6h0F-|AZ_0~ETFzl&xr(e+3DSY2O99XDXV)u*bOcr+)<7Gu-hZ<4crt_Xd%DAh9bPnSKO8(^Px z79_NR{&W%npvkWUG=vn$zMT3hVI&oSt+~{D5+~NyQK zX;Z;(0&_WWXSeS!N$m|rapb7*Xftx>%{Rr(H=WkT^NPPtv~+YmUUtvD@y>U?GoBV5 zXeU!uZl;Kdd29eOV8cYTur({UL-J9D)MLU$F2*US_yko%#62f+DCN15xEL(?fY-Ff zQQK()`kV8q@oV0gwp-`T0<(0}hVv;u?Zf8AI)XNoycSJ|put<|tA53qKGmxXD=?*y zlr=Oqg-jOBF)Wx?s;X1;Ti`D;DQN4Xbf(tn{Dzw{$@pLhk~Cp%VZ4 zJK$+9Mme5iwq3o+^IGm&(3a`-p*>e6%+@RNEP6z{hdyy!!TpZC%Kl4PVAsn0$HmO5 z8-n^ko(tm%;?Z@oOqI#?_rDbqPnCULzkV`k_%;5*BT)Cdq{DN)5L3ldp%Fpq$W%@q z%_?8%E&WsIy9(4sUf3x0YkQ3yEA*80S%s#%X64h!!QPb5v@&unUJY5W>YU40<+8$2 zzVcGQ`7>#PeGLfaPva>3vyMs{WGCdcZ9;~nBf{sswc`77j2FUn?Up)H5|@T=yBTsv)Hwu$h)as{C3qgG)S)3>Yos5J6GYI)$3ZIHNjG}>Pe%2*Wj{d zRn&mbGqP;82?*99^gv^tbxQumruNp_?=er_a_g;e;=~DGoZB4^<5Tbb%y{i0?l1hp&&9(JeOz@!t;p^4u(evhRy{M&{zCM$p1Su_UmmY|)vMy<$)mB# zTl?DcTZc4nvwr9;t++kD9e9y1OfB2c7ic6YrrCw|!)fR7U@nBVYrNzUNXg+>oJ^u1n|1WXm#PN9Kk;me0 z@wYqJiI05ffq35s|2STJ_Y336$DWG+`ryY4-PvW0lq*Y3LKSjb*g)-sS$%n)`gc{g z9>z~!A`7a~Z^#W9=9s%0NXHJ%mpAFL#o6EOd3dTL(CI!=9j_l1<_utQF8AN)ANkEi zJdI4(RG)hd?8O>!W?dlrzm8s;ajMn>@FE9P6B1mOhec)oL!5(Re#1TW7kuJZh}>S? zC#l_hVI|w=CkOdlFzp;&*ooKPb1J^z^Ijh}+;lP?dh%>M@Uh3@M}Fd$?8|LlK=SA5nRV(a`PapCDFHFg%Gv$UWwNdNN^OJDo5K^!@9 z#Fqm2Qpa$->kB=!KN&%PUFYVyrU5lfA2+`X-3n9AIwRNgpIM&bX!f6S@aI060q=K$ zb0?Lizm&#)-24{3@%f2$LuulvdE>jxGw$jlQ|lo*UFtb5sPPK)8z#;?sQ-Kk#k!Z} ztI~nrJT`ve+3{>M`Nw5@dHw?KIuBMz{twqn8i3ih+G!aneu0_?-A&y@*+3g-r4LzD zoJL8<2@1>CgshkT6knmeS&x*)%Rb=Hg<$Alrj|dM|kTm zzeR+Zr*hLWqK~A~-|EnHqY zN?K|jfDHqxP8_EDFjU=}1@`0EiRY?E7gMw58js4R{gr%dtFV`p%6Yo^f@XpI*;vu8 ze)Lcg%jCQ5f;jgHl8a=8^(Rg~XW@v5XB3P*&i9~ogbX`N(Oq8A7+;FVKk-;R_3TEx z@l9VWJQK~&o#<;{eQhW*FEXryY7hfBg^sa z-}(>Yi@)N}xZFW($KcYH=(2ZKoSzMvXpA!lcGgx@R~(PVF&Xd0B3~4g8uj}_=kuoa zV7wm>Km2H1xgh@D@@IuZZAwn=8yZ&qgkqPSc|F!&|2x?t?5vnnS-E_R!ca*sC z(_cwI_0fY2aPgrLp*rPco`GYB6KU;OC98=z?fS+$H>_ps|skrUB^36n?m0g znKL~9PGT_Jj-#t9vDO{Mn_u&q_|bR#i+JRrkH#}kX&pM(_ho?v=`hc5mU=uy?)WT+ zXGFB=eLc2EWq^iH=%RlLFjgtw@Fjca`YP|ZA@y5W?AQ8P2i%!9&^$jHQVJN0PmO=+ zEBdIFDi+goCv-6{`kX(y%A42Pny*)Pf*0TYRW9imV{K96iS-IEt@wf_mjlWr8|dDb$NHU|Am5qnpnbFe4+*hh+g@tw~D)VK)X9J-!Yq91A`gTPQ3tZ_Aw(5g=vD^e;`f|PCOZAJj}OIsd7$5Dl!A|fett+3k59Z2&1@^haVr> z(W!^qzcEm988D0+7F>n~}lpDBHPD38PYhh_QDvf?5%;kll z4395}kVgM14a4s~WV4|*h68>ALghKF5b#iQYiH9qmSs@6Ah4y)6M};_lEN=WG^7j&12Hq58fO{rue0gT^2cTdM){qR`W3=?g0kwQWmT^5m@<680 znR1YP5T5N9v=_OFF6(sJz;IE;6C6d|Uebtu*;m_?cU_GU(=Lh#r`#B1DSP7;RK2Y9 zI?jUv@Yg|H>>uSS{zH5#UhBJU+a=Po-+2IrpM9qK^dp7EbJk1yUMF(XEbv3fQV*ak z4$A~-+~}>es0XRwa)^a;v}H17|C8=1pGi-)L(8Psc>4lCg_IGzwCW4D%c;G3+t8qL zq&IDq4P8ykw%hnqKGiOxSXoZ;5^b5M^5V~Ys+`*ft?(po1-G-2@f464kek({!|}qe z5U0#lk187H-_+N( zGmx(f7qGKlMeidSpFz z2SaT*v``jN`?4W9_L4@%G>|%6bV$&q89IO?Md>9?`w0@fe+ez-1NT!!PK8)Zi0 zfmq|MI{3=|^qe4%3j=`Pz!m=A=6)OFQuGF|I{9Z^{EU zmzI_Eq#hKiUFfU&3NKuW;Ox?D3GxB}Z^!57^4lN`+2;g+QU|^c`xPpJDI%)EX$8Lc z!#?N4`q6mF-M7V8|M@>3Z~VMB#g!{pe5vAhfA7Dy0Z`~ zT3g(D+f8xfjW@{VYfYlH3Y$r8Ca-97f0R#*X+y;q8CWxJ5B6evcNkmSJMr)nPsHPA z&uJ|)@D~NvdrR@Tul_XgJN6fZ_^j~BGdIMi-}mY9E5Gt<@!mhY-|xI2HE8E|+YlJt zXPY5G&|lYtwhY||l&)d=s>*Ce*aZ@;^rhNQ<^;+*qc&RYSzG=!4`>u8{iZGE6T-D# zyH3W$nf})D>CZ*4w3lo6I{(a6pF(Z6X(xU7ur$gz0L*5=Q_+!~HKzPkil!+EMY;*9 zSW!2)il2a8Jq*)wWy}z6RiZp5s?IGQtCDM-_o^Z^c-%e=l=MmuNXx#lwlfQ9Yt)d& zZ3EJ`+GmMcf17;K(neL*HK0#is%JZ!>%od=df=AyILna8yx$@yqfI@p;e?dhfMsFG zl&dcvwCET3%RmCN(twGf(s~M#M#hH7i1Y~`-=Rh554X|Ew zTEWxo_e;E1Z1&*=3jX#_EVQBN_rf@GuliIWNiT89mHb(`0q&my)3sAA*urP0^^5+| zk6EJ7>AzEO*+Y1972>s!{V9mD5^BS=L>qUVU$0%K4eb^Z5>J(L%`g6S(3w}uv?|!u zc>kLKbVfx0H%ma}n*LXQdC(N~1q_r50XGkD6{kogvjOmetA)RI_>8h5Tl!~#w``>- zyz&3(uevW@@rqZ(y z1xr47_r6{2u%=+mz{OYA8Q259aCFD*H^m#@^m>0ahrK^P0Bk^$zvVd6`hwr~L{E9? zfc3`Xk3ZqB4BUF_t?^I)$#=!>-frA*>STQCr@b=%&NqIO=z2o)^n9GT>4x~BANch;1Ynn!<8_`vP&kDZkN%bo{PyIXGHG;$VbMBOOdilF3M+M5T zHdT6^EDGcrPjePOVdu7rJ&^vSEy=Z`b6mfyoz;h4_X!=B@4auNOtIthMThFXll=xX z9dJ4;ayYll6709J9c4aHs)v>A^K?E*fKKco58=u3K8bysd@cYapXk_v)^6BO;i~&2 zbr$X9XMfcY`(xJ83;VGaTk)n>+#Fx{1#gHOZaftaJo-d@^dk?&kNo7X#N`3&bn(g_ zX;F36HZQ#Rpr?REYJ2X09C_tLU(srlEH{KKSyX#!S?M2s%9Drt$WrTQf6ACRm$l~< z*Ioq~NVb&cCofK(T#aA-#b1gOM~`Sd`{B59@tHVz{HXe*@T#)x?>PJ7i&&iP>}jm< zlZ{@N2TpQiJ{H;F#nnAS>QlO!SL2m_pv@03t&{a?8c=cI@B_2*;1+soR;3T9^ir>eYa1R{ zs#x)oO3k|F$`=Q;9(7dhD{gr}mS~3l`oalz%;-G+6rWkYG;RCB2kohp<=>)3fJ(Sc z#oY(|DQyG(;v03mOkzAGUIRYmiy)j{7C4n}<3-^MeD_VLx8e6ck4p_gSTU=ed7UR*js5lBM1}XP67Azd)5+Ez5qY z;y3o9r*5C|U)ICEj1JA5??Sf!2#_~2gh#T~9IVuHQ1CTlNMXz)aoVfxR+~br{(8_T z&o&NZ?}nnEpZ(#J`R6ZPjLD!AU;DSdEld|F%mg^jeFj zeOLS6!kPirB-ll4B+pKGQIQKqH{WU-JkS*Wyd;Mv zO#J3QxOsK~i|98mYb+>^hQ$4jT1jIQvonKh?xI%BJKPmQyHNZ$ZNqxWAi`>-4!5CSK?*$&#%kS{K zMS0rCqQAY~vUIwua5uW*lXF9G@RmH~{L|Wt*{^(3e~wS#L#evmXYu8Wh?P!1j4!%D z_t!7(c+86~?0`nK$8!BibN+IHfwR?}aED{}VZ^wwQX>Xj3-OWp}zjHoDdlxib zv=(2S$QB?&+QHT)pOYcykiw&VFE8e@_U|k$nWtWN)n8}Ab`i&>@Kx}=on6tO_SG)q zn=!aQ-;En?y(d<0cxg;lj>kcFUHUNogSm~5zbC%{P&(X-O%Kko2unew;#VlzS%kt{ z<_X!s(*aQ8R%@WtYuBe85nd9CR}Dep0S-0as_#xhYnwKJim!m_oJkYR4%jK)%1>#u z@R{uZQ@&CzmAF}awfwdDN9hvequ>oxJgUcl(;!J@Z1Y$^v@$6DS=yqs-dxJCch^@X zH!1(}N}PY@TwHkSVtmOLe^p%EiFoBp&&2Y|y5FuF56gg7d;fp#{ye~v?5gj?Z)U!i zdsf!gRn>d9)ZJ=HXqUjCVG%+Iv5YZbetN%SK1R$K0wZ^_L0e!l0Ph{#t}T`e&G{7%)&h`7ty?z!ild*eog z4UH$C)zN%om-c~-4s;GUC(Y&|b{r7x$5W99(_`lmK=>MGV5dQ6!EvOX0AV74P*YQu z!9E8~ouKsynf5Ol?=@g+JrX3H%dAV!UhR44)#L)90kH>PDyK2i}JXJXyDQMU=SG>xfa_sEm>37N^yLEv{sa?2nfzzQbix0dFfG7Of zX~gbnXm0Gpt2*c9NZFOR%MBpm01pn*jrp!=1b(q|lQysY0|e3+w#N;4p7slTN@iie zlWsOr5V-WjAK%oY(fIbW+EjY=Ai+)<8w&O!y~7WGr{YydAu~3~Zt|7}06K1vGf0b) z^8tz3)I|>Wx4Hlu^|5>W2M)d3dHn@F@F1r$b);?CK}SCNF*LkHy8z@d$aAEhB|lz_ zlE0!9;jn4&76swsN!zx(O3*k0+L3GI1NQd^%F?;g@;grG-F-tx+8<8__j*I1Np8vM);`L*3dH}oFC zPi}08GU&b1uGiCj{fs~GFu``P244Ml`{9?B=E+dwhl9?67rcVDe=aMW$kx~(`i>w2 zI?)6wC;KM=Hh<`gd=xG4qzOoRFv&p^WOh@7!^+&3T^OHuP|_4Z69s3+Sfu52dBR;# z$Y0xD#0S^hz<|UQ8-#$g7mW*+0TQYWZ+@cH$U|OXP`{XojaceiE33!6*=uWSrwn#> zeY1P6r}0fO{zDHvP~P{x_o-}sI;+YzuK;!^8-aiH5R)UFi*fjK(;)=BQh_v|NPp-W zY0)$2NDs6WGZ=@TFy6*VH&#%qp~1v+y1i2m5lbm;b_t#3Meu6+FS zpIN^AOTMT)`+3hVk9_1I+xUCm`~LFk*SxO0{hjYDk6pZ2&Yn7HJ3x-R@4B@-F5p`^yJD{DAXLpShvD;MwJ0x=|LJn&UOYOM=FF6E2*{B?SVm%#VrpphYu6LsV56i)!z3S! z$A;v)7Ptx?%P>*@2yURkm&XM6bV1vXuBh9EYw4n6e};x$3t{I8t-fq~o%+#+VgK1r z8a;PVWEH2i)hlHCPr_t7B^*s>5gRzlGmt%HV&J&~Z_=-Ny@uKvAm1oz|7gb93AjSe ztATJ(-!%1yc+d-~0CIJiNjj1Tg(lqt{HGwV8~>>?B!T9McjOGZ$DWjp{ zNg?ZL-p;%e-scu(%S|`mSw8=BzM%Z>>;J1_ta-&M zgphY(ajx8Z_dVr>FL-)cK6b)y9RNJ15D*J3&dryPe)JLH?3H`(eMb4kU;d@?*0;W+ zoV(%1^1K&4w|vt#eN*|rZ@gYdBsa#Qhxsre!Kj~|MVKy2No9M=^?)DKiJ#% zWY<#A?{y zo*FxE)K~4IIZhVHcFbez|Bf8HP2YuyX3F{ze>1kpPbg+>`bEYSFb&woNGHZ)jIZ`n z5wn3j9y(#1QytlX$9|6o@narK?B1L0m)=yY|HPXEp&#{#8g*hC;5Zlwm-A~JFaEsQ zSd$CFuZ(Bzy9WLF896X{pBwo`d}Gu?)VKICr=$O$5wel4v009yY75Sx%H;AWpi^~@#|%8 z^yQOOK&*d;ZqQfSi|59*@0s5Ypi|@zeb6@4i!#_>0`jnMpU^F*i@@I#9?5TeAYbzZ zZZflW>-JM|?1Q7@(8`NVL2$_C2<#o6SYXo8ctnREZ`|VuE`D8bI_)6p%)i3MgU@y& zOeG|H=FOh)B8$);{k$GU|C8;CFTYWzeiJ{^f`4=rYZBFd^dEUyKl2SV>_%XE;DC4N zL(Bj!9YoK%kU??tAAX>Z99|>H^6rDsifi-0A2+S?go~R`=!yAbwlfg*kgH8IFqy`O z;6L1tX*UpuPq$rLUsd0di|f>VbFAxa6gAyWydUx-kNQKk{RGOoK5SJUeV+O^&jHR$ zzbEi@M;<@0CW!V-I)Ojm;J|f2n-AoAm6(Y6`o8Q<_IJeYu@K?GCd2%uN8 z`gH>U>m2}oHf)QvV&M+f1>##g(AU5_BN_Z~+j!vetkvTqeNV*K_+$KZ=#TzInr(kt zV~%i-)L+LVjidX+a_so3=gTM0-BMopJ>OS;^vC~gnO{2Q&l3-Jhh;b(8NYENui9;& zwPO9xw@m;Zcj_4s>rowWM~d>?l%Jbf^nH!-c&E%SES7os#*?Q{mCyOy&nlnzqUV>j z$1jw%_4DQU;&QqD&O6GvTW&53{a#sInzJ7rkNDK#Oj%jQk4qlunb^hsV?5>01#+XZ zH@i^ozyHCqx4U0H=~F(jY^@KaLwd6MpW8b53_Eitxf+C6Elif1%)ct6zBtzYr}hib zv7@8*2w&JT1aw|~mOlaBvHjFH_G|l2dBAJt=)?M95coRm1Uqw^ib5R&(=Gwp96a=e zI0wIu>f`-G)@g>NH*;9dojF}*X9nfJzV=tkZTCK_T=>vmln;OCon=vGw?EvKtJ<}i zr~A_?H!oYBDZ`QMn*QoB1YYQ0^5wkgL45?iJU)r8f0hF&^s{`D#jMf?qfh5}o^Rq| z%?w~rD;o- zy;(j@!6rc2_QJgT4CNS$bA1f?3+v|MZMMJUjRS00@tE6F{32VhIq*k6-`8*)#5s8? zE0+8=LdG+mzGoakCfZTyGhPRkuV+26`W^|Q&b(iC)*ma|o9D|=$IImvFh=gQLKP0Ud!z*_~!r8(u(*AU&VvD+;1Gqk4#A~-kc*~^MkZhPMx`@ zoV@ipWoqe$vX9)O;ajHG-v8TgRuK0h3@&y$rNP#Y(ZEE9a3=w(!Di2E2DHSJ$B(uC zlce7nKm(Lc$D+3gA}Gj?dlhKvCte0rfO>+4PRfyZ^7^kXzbVv!I-tWN^dtjD;LEG< z?@lDcr&DI|41IT6rT2JpPEMU0!YfN>%2)oAFE6kDkAGHfy=BRhNM7H&KaQQ4)WL}G z5DaYG=w{M8ROj_8@MmV@h9L$jpij^_=`_gL1RM;|;uU<+1{7q_6`xfEJ1;sGN7oHG zd{lA2uAchB?;AbPY4p&Z173EgA1aAo?ws+GcrE9nm7xYlZUVulZwSai@)~6L^|zl3=T%vHanO#mL&}LGgf8d&#?={Ewb+R4$M{7T+BgXVj#RiP%Msk@ zAa5w@uZ~KR^2TyJI1AnY0ByhCS3hp{V-mzhyK-F6q#TZUpUFRTmCxG+2*e01dG*Rz zc7g*oovIg(F2@Lz{Zsrfc#-FC5Q{#W1i}vCEocB7Z6j(zsZ+#I&t zRldgG>KN%2stuY~0TTw~ZQCH}C!WMxfy!0ZSUbt_Mtr(ccj-~L#S@UnObaM{ZH%{V{7gRMs38af23AFzx{SMU~Dp%>E^ z;+_IR`Z#`+0FiUrlOH#P59FU_coKSLxjg*PgMN+KaJb_QShJMh3;hJ>-0dNBsKR_l zPI6^2C5R^hE+lPGJ`1261Qn1Ua(B136ek~)|NDEtN56kpKJOJTF3ZPn(1dW^Z-CjB z1Kr+<{vVr@q;>Uao)nKbK>bR5V8f)-4$g;f@#=O_yRxX>w3X^nMpoesX%{YO@_F)v zU)$-;W7v!31}`{5FL6$K_zT;i)2g3RITx+{B5&lgfd@KUgUvEO&nFE~k0PoWULnfs zOrt(}rD=i$HW~bS!=&*`HvPN-paO4{K(^R~N5!i^u%AsOsEywoCkwzu9Le$IP5i>= ztR@bGDlB+@+NXR%dHJWkq&(&ByUIiNKTy`UHp~0o{hsnCfBLraj(5GYTv%H#x1K#G z42Na?(ps5Ynl1M{<+gI`xl`q~TW-`i-%}j0T^1Jk-XDv#bB-hVra$^xT3FWP{;+)L zBafB8eBh&=1l@7#ZRO0Vm9n$F;Y}R0V{eYPQ7xBeJmV?l9slh;GN{*n1lKwGE97nrCNY01OK0;cz5!Or3dpz4fv4?aZd=vnp`T|_497C{DTxI=)4R?$)ER@g42QUxiVFGRf@Ib&Fcivd;x$Ew-a%`!r zuCDqf0eo=dp1Jo2@4Mfh;CuRA_m(&P>7SIhzw_PY)aldZX?NdUzW-I3ir&*5h3u~A^QcOEATblT4_;Xj&ojKdYs z&`GHEr6fN!t??x{07P7t^P?+)@`ua=jZ^hijtu~AbL6C69p8mjNu1|00y4YJIaWM|IfNL8J)zA!b#I+o@%Q#<8_cg>Zr_|h*ZH{5W#d_Z>d zmbbjS{Om8iwyaAWZa(c}IJHfPm1%wRr&!2*NDj}dSog#rxBV|1<5u5?u;+eY0owLCW||X7h83J&%=7`otIbCdT8BKVD{q$MaM1%4@S} z1J=ygU_hPu`T5fK=PwkK4)!z;np1z8D?C)QKW1%6R;0F+mQ+_GGX0es+~CEYWJ@6{ zaHaRb5VKzD>xviMdVSt? z<2>R~=DA`4TFUm5(|!m*d6boZ@q8A&QUCy#^8Azh8N<4qia@jZl!aNmWL(fDmV=!Z6u5AX)8DcBO3ra$b-gw15-GlIXAN`TvLhzw$&0n$27c=fbaY7 z?kY|Co}ZsB)9T-RCXr2!=t8Y;9P|D8va~#3ZoKJ~9~PWjoG-^$j+HZKPM6h_r@U$L z^qCvW>9aSL3zyc*TyL)YfBxQ=mHR*NLGc5Y@Oy{YpW7t#;Zsh~cWYWTu>+{N3zPX* zg=|EQ>3}jV?KO{?o=};|KBxW;q+iGlG(N}H>sS|QFk9_e^<<%HgSa0=dHP}4sBow* z?UUExFVtp2=XO+{pFn5BAD?;_KhJ)~)5^Vfo-6<1*I%tV`(@*y_m)c+9uk$2$Gy?s zwonOItQ^OxPMA<;2{s0j(5vw=VCb+u9x9pv+NHyRS$nK@cNqDMstOc zVs$o`qBE?=G9X6BK*tfFw0gsVNqE)i>JMi^D9J#e)-YbDsc!BX@>B87 zlfx0JcohpYBA);RQtT3<31G=bm6Zr86V#Zy`^_KFr%hxZQc)So?w<3!yUGineNXwZ zpZ!Il-7S|s@?i~Bv%<-4HaR18*eQ0~!OK)U%}1IC(-N;!p}BaC4&ORPCir0Szt5BA zfEbGMam|E~8}5G6T>PLr;`9#XSO^kaZb&=IX+aa`j}s%EfKNK-Re|PJ<=GT4hQ>(pE5He$ zofu@gG6|msCHO}V3>w%~w0osT6Di*lG1(6G!!^K>-@uzA2*A099bp~N25f53PkQ4! z#qi3mawflivxR&JpK=%~=sP%=*RQq*{NX&s5A+5j)vGt;mEw&JwqsgYWrqsv4%B|p zHlin-#zDW89mKU5vn{I~h0iyf$**=kI#geyU(|_6>?+p}iYkMR#3WLGH6W1gfI)r~ zoJKGHu{ZY-m81TF{G4q>GKvcY3$ZZZ8~md0L00A2#l}DU3jdIkatS_%yzuc^5L~kH z=}6u_3)$GOWbiI*;j})$={|}KYDXOry}98}8-pAD+8(h#+CG55DQ(=>*kAT};j#^q z-kCt~vrnzpwqfbefk7P)j1OF1fEUQ0U|*Pj*k1MLrVnjwG5HgC!U;q_XukPd^MeP{ zRR=naOwtKS(oFD<+$vuK;GiV{pM74`3t7plelzJ?9_%1C8U>G#4B1=$Xgo3kAEc93 z*e&+pE4OKkkFp|_Cp?FKwvY2gKW=!}YXxAC1G{(BtKGRd3T@{r2V7a6^vbI7=Np8g zzE3;(Ac%ku%l3m*4!EKK_`F~z!0sBkoGt#%4>BTysF6|I-3tZg2Vc!%T=WluFMtJG z0G~mCxU?7xM&~a+THg1b_xjbVpr`BF%*2Mt37GTRl&%Kqe-cT2(C!?iZ&!39=$d2a z+B(2K@jWn{_NH^w$Z`_|Nv=t#tJm+De|Frm+qK6Hb@`Lsy{&TlZRg6r{jnb{pZbzd zr*s)@vjL!A_BPi_U%dUm_kC~qzJK$>$A%f1XE-kmeXdl=f8ze~eAQFR!Whf}8NLQr&tJUcZ|*WFwf{tU=s|j7 z63mn4fcqQAYCLXpl}q~%4=$j;3%u~6#$y3wXK$x$s6F`(J`08@jxjTAA^a4QxLwq4 zDFYBs8o*D_{(dxybjtQLA()$E<0PNp*{e2D8ztC;v7?ablhBmQ_Lg4tcdEv-*x{TE zk;TnBZoH*@-e-P#`Q#UWQn~4to6G$leW2`YZI^ey{T=0HY9 zwRMksr%s$K!@Ujb>+;$qPfYH9+P&qrJMZvYh4}r<&;O_L7w`R$#9>1ZdR2Q^B&3lL zQjhILKl&B%ALBYro~mK2Q`DhI)Edq;F~2a5&=F*IAnj#;-9X469$FT*V(z>f#~PL zLUQH*+K_roK$GYa&Brky?EhneN}EF$0%Pv}fuxN^9od zCK*-Ad5w0Pgl7H(P`KOf)nKm%w3TU1l*#x2m{d6ci7v`X5>>1#;cCc$PP+8QdVp2s z&io{xqaW1HlBS{7Z`2Ox0uK{q!rQcve5C#4HAl+jHNbVZt^hhBN&kz!SM$5{uLezf zbENQ-Y`+F#d}{gQ{3>;lk5p^rw}GZQC;YD0kFr7@y9A)0zM^ZNT}b>Hza%8Sgp`GJ zV!DdA(Oc53ddPT){-*;Lb#+MLC+KJKPPMcl59_rW^HA1NT99Ah=|%Oc{c*eD5p-H@Wz1IIiA2i zD33n;P!}8Rp zJ)`{LAOE?z_*)mqseU@I-L^FzL?Y@icDWpL7V4;c;HdcasYi^;=a$s|D$liVW>8-J zfY_5rm8Y-GhqZ+*!vj2#8}p8QgB)>c&RZPI^5j@G#Hh!Of}Ze4|8l=jfRz05GhNx6 z-(ny>C=ZJm!bi}OQYA>dktT{XZH?}w*C>luhVQxo+d697 zCge$+VqA5(`LEto&fR!pdFH)OFBdOf)cj>bF*|SQR68;T>`PAR*7HGKvaUrOkIq3e zgHDvj8Xo%RhKKRXs7=r9B65}a=!VIq;LIDM?5N-Bjw`;zUtE{({=#qBG&V(?xi_r>5q$|dw7@qH*}R|{LHI_ z5*@|50eWGcPrE2XK$Xx_(984&DjQu8TK(Y$3RlrrO-mX;SJtZm`ru1@fG6}#3{DyH zbd+pT!)F8q`?qQDCMN=6g(0~*}+i;-r&Dn7R#Wt3() zs;+@ZuY8fGW7tCAH+}4h_HWw0>cZveu+e_xySz9dk8}G-(dRs0MNbj>12{;>I!(9H zU)vtW!3{}us7%WvATm)#Y13YJRjeTVtV8kOmr|9+Lwodzct~%sJ1Wa7tA6;T&y79o zP4+@v<6=#j)~P(=%a8|OO`6xAp2W>*Jq8H_4474=lgsjkohp_ z++XVT7s_z3YkE9IJuU-{54-ZpvEBwRemGFqy&0a0-Y=^sZzy-)^VD+dnOnMaoSHHUca&2hs5eVG}@_2YbW0z1)_h+sDLygm>r!qX@%{r801M+0|99cK@!zHo@ zbPr!_;CR8ao>RX2+rF{9^z;9Y)UsDL9=^ZK%64XY`zo(-c5lmXd)VAsFFV^Y?r}SH z%=jzZ*q04dz~Ees9kAN2g~N&6f)5NWTp^r+W*I$K03z?G4S*Q2bS1 zxwl69@dg6MfWv`!87PhWi#nGa`)ZfH!H(}e4tKW8?$)KUul|Pr84Y*UE-FX6*=S}; zZ(*)1FCDYLV7bNj#0L89ZJI=HX1~lYt(Idq+*an7Z!9w>ZYu}9V{)6~cRZ>a0Qdv| z13fKJJ3!uu&+De6Gz$>v^s>dma7bs@VKf+*QQxoiA+2_p+?iy&;JJcc9uuylA8-}= zKr>(^o@l7H!$x{8pk0lAblSvQj}BlgNL)VI4FG^UcIR3K#|D7PnJ{+ppoc-5hZY#L zT9ARtaU&}o%AKXs6S?I;79yrDx~2Ma=v6M+lr(O0R*D}LzYqalg#5&_)O>U_EV z&U5AK|LH#}U-*yziO5Wq?F)Y?mZ$tmp1wLOA8Zlb$?lr^64*DLq~G8>bc^`l@|!ZeJ4*};jc?wI zd34Fdh8z2o2UOiRyu4A#{G(N+$@z25tKT>Zt%2NW_*R0raX*JIA*lJ##fGB45F|3W zXOIm%Il;j8RRQC{|Kx;2*RY)fHu`2PbNR|Mt(n+i!d>IejDm zc7Ev1{?riuhB6^%)IYib0NhzW%EdWQ%O_rVG5$!U9y$jv=$T02BTe6azm*$kCW=Qk z0H6icL$MXY#{%$&o<*ND>lQ*Bl_xs(lPMRSuBHb>TZD|kr?2YN~+I!M%=v7;4=%t^lafI>Mo~yjJ6Z8&EtQ*~E zV#Q5C9eLFheva>=KmO<=<&no8F56pMxLXB_(uL>}(j@zsm(>FzsjiVwbVNWW=o9-6 z{l&E)+u7GyOXV|ep}g%~KSWG@ATRnRZ$j_%LwINRHgEkQxaBqc$Tv`+c+1Ua%d39i z`^(Ed>lH$)N!`wxcc|~It(94^@o&EK-<1FR2Y$S~-3PEAY8acgYxL(7t4v&$P$soOYUYQV;lL*Q*mo^C!FFRe!sD(_TMrbh80~dOV%Q*a0_O+b_~yw4dtn zeKMz;e+1wOqpAL4FDFkPFH7}P zP;8VtacrgBd(Z9VX?NaQPM$jESEjN+JvXn3kNnQX%a_XT&e)qG8GB-qbXXq0e6j2f z2gb#O13hhAx?FBPccy&YC%nM=xo~N->}hiIqd)me<^IPnh+Fw>{bjrWM{L$&+zMgb zwmu8dLsnnGyLKo&*>nsK#5 z>H1=V-lF;tJWtw zX+9xzWPC!n9{o(OdTy>0((WhQd$QalA2H5dNjI-!lirp26Oej^-%gR{^eWY84(4qv zbL?xb=#T~9rncg{n!GlZw*!tf?mKp=evsH1K|-F@<>hk2+0*5Q)2I9-9ybOrUs^AB-hF%d{onlq#czjxCHm=8 zC;XIGZ<#j*^vYNKqrX>{6z~1e5B^Ab`#axbTUeOymH*)rUR*w1alv1H_<{1qH~nE5 zj@HYU{M|1v=O2EwoIih|yyI;jC>xuX%1FHKZ0?F@#e>M#v`e%jeu1PbOzax!ZwqES z<#Eyk1w!O+yk(pNYR&e^f18Wo)F6>jcXAiPX9QApE!ZQrk5A zRvJS7>@NA$4J#@Syxa}djLoH%d!Fv%xjZJzf#!w(N11_ z5G_COAcklAe8+dToI7`}+<5k;vbC|Teg>W>pBvEm>p2;l9aP@D6OZ8o_Cp8OALVFQY= zEw3?!zPt&w;)tqY51D4r`9ZbMs#3$zdV$Zj+pUXzP`A=&jlksuzQLo)G2{i^W}H+F z<4=Fuv>!j!_xdM*T~r2uI{!YpeuD0TJ~2YvWUhDxDM#={o*?@p{OO4AZ1{Hhwthh6 zx3*UTX<(#3WxtEh+(y5QJ{aY=Hm^<$zBy_nm-rgbBy#(Zg!CcAJn2bsLZ4Wl3cZ+L zF4?3*9pZ-uz^!>SwBXq|mFjXKca+g}jc2$83s@f{Q%!TJTnB2`#zHTC2+Sb8P#*b( zllIL|IDm@>h>(wX6T0{S=gVIOYRQVdi@rC*OTXlL(~0ShxJXl z2DL%ncM2JGPMSn@A5YeWmby;Ef2NG2QLm91`BJ}XFZreLO&zfPgbjx;6+N0tIA~$7 z=OPEh#Y=1esa1vD>sLC@4HD=Z9^BWYD9!_at)y#g1-{S^>k-^%As_uJ5^#BhX5$iV zNmNgvR9&1Kk zS5{eWN5GfwL(pH)5ufTbKlrPC$3sKKK{4KFywZ5d+cRhS{c`c4$IASPW96X-?=K&C z=UYWfy5oj-7g{w5Uz{77^N^!GX8p-e$dmk z067xU#%LpS9<~^9gWtwYtBzQ=7e31JYI?OL<3-e`ofIMJ7-OA4?Z#UhW;CWh?`ijz z-~Pkj5>Lt-4$AhW^Kv_b!sh`fKHG;+>0D{tB=(!O$Cc%#K)1-xDGtf^kzfF#av6| z;G^bY+r-q8 zKX2mUUMf%6lg}$d+!xreAvE}3(%Ce?rv&gB)2h$hfKPsS^Q8I-Z}OlOdE*yo%&%g3 z#v6m?!85)3IdL{Ijz_AyT@Lql{D}ZQ={?xq5xL1~!j(Y>Z%+lFXmToMgDe;ONhkgUVZ@*d8UnFg*0nmsVt>Zibq;#b@ zJHMHh9m5LkG7Rp$-PS zp-<67T0d2)GCZB{`Xe?0D43BRMt&6&1DSXf>Fpg2k~?8*vJm664QW8(bPeBrxU^KLwAfDn@HS8=*NVWunbLJJf{q_m*7rSYtQ~3wKH9~aJFnXBQ z_z~slv*JDvQ<;~bxt^#B+Qej_jH8t^;l0ex5721o-M z{~~#_p7WXsY}Tnk1AzwgDt}#^S>Hh){7|OjzbvnG?sJu){jCS|FMU$qI7C1ECVU9L z^hI4n_A93tCwwD_`sC|a`*MQulZ`A@$6nZI#?2^iKjOBIev53jY0Hl{?2K0`AC|+p z4>~5k(2L8|K4G1Ubl{IR^P3nfPo-ZGHP?tDD)2&9$Qj_tZ5V`pwY~5SMiJNiqHSwE z(Q~$V@e^J3#=d2>t)c<>7+9`6pf3lR(&>!G1YZ5WxxH2%e)uD0V{ITi7jH}iLQGYw z@^a1}{Zh}wPr;x6=m1^{q+GTA^4h!uObh^qc~`4` z`0Cu<+LPXQ_cr}~%pdx}A1E*V%$KW%eDlUdjf=BobM3NP`=I>)zWv+E5B|u{m6v?t zv&+hfmGanQ=gZd4UYTD$R%T`w?0?uH&Za%SRfnGB`AnN9{}~H&96%g=Y(TXE_eFeP z_*Z~UYD`i-`q(4!27rY{>z5$|fM&F>#to-P7j+yk)PA1eSF~aab-cxH;bW%EmBDDQ zZ0v0NNuv1$K3x-w7mPb03o_6~*hbhtz8sm@PzE3aamz3nlx(~KU|s<_W7|*xym1JF zp*DZp?lnCj^Aj3TvV7`J^puCFYyhC`&)smQeBukATR#7DUs0a<%x9I&&CRlQ@$vHB z_rABh;~noRZ-3|8%7YI-R&KfJj3$q(<$d_{SP3OuzPraqw^OW1wCp128Z%Yr7 zx9Ve0gtNe>&GsxX>8v0YyTofJCObK%JJ3Ja^lqS^3jJMFK>Ux2g>jM z;hW2r#uosaX@QJyfK-=AR@$B~6#K>o~Q zxBCn2O9Ap_Q_>?o!EK{-VoJ(}FJbaczG6R(Z;m%)TP)Jx7mx$HLN8tWJ95q+`#{oL zd-2cqOS>MmU)R!gj!$hKWk13cq3*KNbg6=;I|r38R7zuJ32>S?khpl*KEcyEq*}K7kEQm=h|gz z-6sXzx&n;oNP8r%E^U4(L)OpyS{FQ|(e9Qn7sNAvqCKc?R4R*pnsy9Y`O3)#uxEi6 zfa{nTZ5aG?>F2V1!eqIGCb#ddCHx)msh{$zm#+5 z&XkwF^rfbC%Z)dcpZuvGE4w=^+Lm(i#Hybbdf|(nTR!=dKefE}SN?PP%g637|I^?5 zqVkTny}$h4@4lhT%uSa|m$oGXGh*4FWZq-m*|Z;HgS0I6oZR%A&|d?S>|4h`+fD(K z_V8B&`c(cho+oZ=-$GLtcxhAtM|@;6j;ddKoh01@a#BY5QHC`Sooh^qSYXC$WVrzg zfgLl5j_2#{yP~JqCin_E99=*4#rmiHTU_I|@r&tafv7K;6ua=&iS!4|2y&6#vMFw1 zoz^nCT~fzjkNG4E{WH_!GN<_Z-aF2ed(SPGfAzn8ZMpYpPcI+5|ABH|ZT~O6=3kb# zzURZfaWM8bjuoF_*wQ6yXtaOE2iPwBAYSmLW76dsUI6=-mLKZJhMAg%jBytIOaOmC zfWMG)Fm7n^0MCs2+}!-4;>Z_pp9IJCY`N&#MY(@_reJcOE z@436;G<2;rKW&TExM;LLDO{(FNeA0v9fHiw7bipEiwochGF(Kh?8c z1rG9kq?PMAzUS@SBTr?mucF-P=+Q?!=}0-+!C4w!#sX1~;R z-c&|cE=9g^f~2*{fLgZEQ^-&38W6e+-u0{T7q%KaL^;Z5eXpb8=g@18_o@FnpVUQM z+a8ox=W;Er79S9GQE&Q+pzApiafb4bmaoT#t^j_R5792dfp)l|8_gc&+)vF1K2^zz zSHXKwufd0Xx1jREOA_nX0DcOiu%^`pxt2(uNWZkGvu+Ud@TRUXM$O8_83N<#(nxCJcW)(g9c9&w>0rPp&v=Fdfw~- zFEu}S!oEaU%?^HI76WY}T~GI;e=@Fc? zoR~GIU}xG7SMbot;=)Q*WQVKP4<@lVp3 zn15w{DjFboLSREZN>`@QvENBR-$2?%9GxVfU9cVDW8Bu*MjLw!Fb`wjQ08sL)Jvd0 z;2M1IM8F#6xf@UWGmT&SRsXbn?Kgg{)A=OJaP6|naig9a{Nu8;$gAzuMjm$sva`|? zp7E$`@2JfFuzegGL>ujio@~Hz ziFot6sX$zdZfq99Hmw``5Iz zpsRG=D9VNy+Kx8nI6E7kBWP&w+5zzbf5c&>(a8(*m4G4 zhtmLcu91(tN+&=hxVHiEM_{Ky2d4AL;%62X%F4+z{*L?X!pZWcH~e1tlE3|xW%U%V zdEGC^jxQTOH(Tw1RNroh4kdg$J3DlHKZQ5li&xsxk+Wkls5CNLPNi|Q;|#ey5K`6w zBj+-7+f1}a)YpI~x^xKcJCcT4ty|#<9k=DF!i^O=7NW;%YM@QJI+~gaYcOj4>{n_K zz(W4*x1KA@i&Opv+E;(`_iCp5ke$tFcc<)bZF_>p6Yc~WUZ_^5W#ENBZff{@{)da! ze|`gtSEU4>;gA}@ZqS8}I9D77-vv70^8zZ*&uxkRtyhGIYxEp#5pCi?Kxx$BW;Nv0 zotbz55LS41BWhylFso=pNfv{(il&uAk+o_Hw{A<@d22i6dU9 zXZ(r(f3zBR4wHjdyIDu4t+KJm zaH~X=g$MBaD(F+k>D3;Z4FVb;NoNRwAJSY-a#O#d4?@b}(*U;-*T@4sc;!@lsjn!b z(iiAFfmb>PD(KTLBnJOn*Fp_M8aUyFQy4@z>`~X^fi!5sKY=HGvzaI{f%TC^BFYWa zPV~u;i*&pZeb@3UjewTTEMxfuzw%E&?98{>&Og5HcBa1r`DA%c!~p9Lo;lX|A@l@< zy=A`Bk!KR>jX&FGmDktJ8yTpJ-a>95uJQfxmz8@*uJ|~PM`>o<>p)P{1?h+zW7;XdG&a_GJbPcCLw(6S`|Mb zWB6On8$YE(_nKn(v^ZCv!Dkupm7Is(*Z_bm4?p@?Ieu)lEG{nkW-DWOj*qtQX-y*4 zwpd$t1Ay%lTi_h})dsx)gRM$`v>D${<=a`CyPKNO&XxHECS39v@^{`uCLF>l+``X7 z0RBSwx*Gs&C+aXGVu$7v0I>mp1xoCOS9gVus=U!e;wRfW^-v-66A~?Q)uSio_O~@f zaxtDRYO?*Td+scs|Jg4u&wt(vy~CTg0DSnvA1)6*bbopK+uvRudH8%;US3o@xTsi1 zW1_~bTW`CuJpVb*DyL4bl)iB957~t*{KAV}LS}PgP%d3svrMxKv)0l13m41d7tUKh z+{75k2GGg+){euqZHY!x=oBtxu^|IbGT?92oT1a&{B9;4)h&G=S?@OG&{UGQ^QmM~_jy1^Ns;hZaqRBI!>oGzrnSuBa}X4qf!3 zt_p;!$go!g^(%w!BdI2e~3&d%o z69D{01hkpDZLV6^_sYYJQCuq960CWqhqVjRA5b4?j(M*lWoR_4sou zE!y`8+a{$v)5q9#Fj~%`-;vK9MAzurVLjGlc}$orf35o0f@*(Fy=&Faa`0W{H+1x8NckrD zOgXcB>V`!{L07-k#{jWL03=`a+HIs?vz`;_9``9*+t7Yd0QN}Hb)po|E^%|P_1ugT z*=`)&P4#1JfGx%ecyxTPe0+a@Zo16R&Y9jWpUxSK6~nL?*wfgrm{5UB#5XN*$`eQ8 z4~Qp-i*lZk^CpjDs>k=siP>^PIP4uixmfnLOZlA7`>gVur$4j&?9cy_aA|y6S}3>O zeq(vYGw&_m_x-Q(Q(de*9$Q{6t4pWUC1=WWp8Kq_x3ym0^rvquU+_PEUb%Gsa(Tyl z-{twp*5;bV@R>5#pI1!OE4v!c_Xb;Q=#gwfmCiIe;!+V`0D_2 z=Z|$7ZsyXW=p8<>Bah$JH~6VewL7}?xTS`rJ<&If9WjNAM*o)1m_snVrw;APqV9CX z=OW;gTrr160q|1oME=1sD`O!>BpRVVzBR4$w0{BfjY#V#QRBS%t2b z`ZvWohxFF##~g#tTf#S=w17W9w4r`&vE7FR)`$4iA^Ku~V+!FyrX2s=sVIZ~tw1Uv zBk9Uk6=V*5_q`s+2A0#OPrKfn;&nFCE3UTw2H0-8L z=N$E<-Lj67`CL+C${hjPNXkr(%%84oHpT~W{UnoT>&geXft&JEC(KB8 zU;6-WcW9xtj25VnWilU{q}Opx^7>F|wy&`Fr80WHK3bprQBb1)i$+rQdWwotpK6RB zpmIL6=qG?3lR%els%T`DOZ^4LrWq@r4OBd4t%CVH#E*b0`#=bibAZMVrCgKiht_XB zHHiOujoMsQ!o_+swwCoH-2yp2tNe*?`W~AA%!-l^a75y0Ml@{;CaoS#zWB-w1=xcB z>U!EV(R=VoTCd9L_Uy!jQJcOFLI51{Yn6VK4SO_B9RXvg!g8Rx^EubCiCwBrS&L#m z8EaADpQz&&$ClMk;g=0h@X4CG*DCGbDwmQY^ci%C7w}1LqbmOq9!x+Nk^wb(ElioD zv*Z*KjxB4rRsS^(`%%*s8v+?dGPkm*)9^aGjT zkF|Dyqu;tLeBQ7lTccjCuMW8Jtvv7ptZSk!`ik|Z^3e~K$+ZGDKaI}~co`M`$8v~{ z<2O;rYudagSx6UmY99F!brQb;U{>WZZV`)^D(-dp4Hzl|j=&QeIgrUZo#g>putl82 z+cW297aT+J#+i}y!b4g93;b_Ibh_%d0yv&Qv*}6gTm>Q zu~7qgoP>&}!Po78(8>WVuF`ona4q`H#J=)-1^o%S6{hy*+|WTExO%w;Ch6DmP@Uj( z0}bRA=YtK=Bm$ zLB7I7x?pp?OM**ud2MgwtkePTz4z{NW_eD3o-f0|#oNQ(h}!Ft&D%usaefMCaiGgW(F!m*l z^i$yC(XZA)7VrvuEXUF$j6Zn#0#xXecm>4`SL~CsT`H) z4Mzi)-Q5ZMqCI))KL!rb+Z$@|5-!^(HzycmVgT{0*x*6kgPTz7jEidjrUhuR&_b{SxUTG2$v&s;4Ybc#6K<=L!Kb!WwHNRJ&};mtu=-kf z+;7=|M@{^f+nIh1q0ka`@4iv}Ad@mEN;XWDpzlD_Ct<_n$FbJ06*P<wC-g5 zk@XG})^*TDYgxy7=s?nLptgn6{RWueHz3-KH!KjqHWT6IsDRt(@c{~edr=;mrb`|1 zM}IkzF1!}oc@T?w<#6&1G|}FN=uzcU9{W%G9r|t*1>y|^LL-y}WI;FEGQjj}`vtxb zHu%LJ;+i(ZZ;(!Vr=JP>Oxdx?+HMs`+ZV)CW8(oKXF%*|Z_}vHxugl+MLSK%*YKbd zHehwZ6OfA5#njVIQp{=Hnq_Z)OC$LCoC%eno*G2%Cb@-* zj7*j7jg8V*;Qh_t{LSSDe(u%fvp?bK<=Ban<>H0)vO!Fs@z75K^ws+$Hl&b~*MM=3 z4rm9)F?8=IesoTm*a?sRC6I&-o#-eYX+!H>lY@sJd919i94kx93*qPY+kOSG0R}su zJxQy{f<{qdfdb-1v^_3LPm~3pHvnkN+1%SIy}8-4z$OU?4(;yTq@|iV7lV?+^CaXb zakE>TxI8=eqdXIgX-|F`iw3f@>f$D#+OM!5Jzfi1xg6_Z7ZX)_N;+?D5UF^XJc(M<0E(Jow0I~(4TEt`sC;K zklo09t@8tJE@IoVKQ{OwgY8Uyh_dum)~x9>ycRFp#ipP%==JeSvOh0gC=s5Ch{GTT``DJs%`YyMr`++>G8$}`?ZMFeUAlBZb*IYO z+FI$)YL+%T=Nsc^Pv7F3`5TwE6teG^JDz&7tgN0a7cN}T__m~RbGm%wp@+&~Z{O=5 zv0gHzdHA+sfW1&erEh5bU8}z*hO~=>>y=NKtd|h=9fL_;WM>@bhpvddVqQSIG8VNz z-+qm7D;itdEbsf^eX2WC{`I&1U*%VS<9ECmK34q4A~wO)c(7;No@TLHau0T} z2UW)A8LLT^c@ z3whn}VB2iVabV6lP2UzD$xq;qYu{9ru1Isf%EM>bk9no@(MOfXH&Dfvb;}%H{8@Lr z0gr?FDJ>yF3j3%CepM8>zK+Po8M**mU#Y1J%cFQ8^rjfz`BC4SJdWTKef`Box%Qik zk)g)Z;D!z|SuesFtx@?9|Hup`^cUxX=8a9*gzDD#+G|JF5&A-pfeqV>c2HIR)cYUN z7Ak`*)mD9;06yx45}Gm*t5aX+WMy4Z1ZV0FXsbpYm(l#q^^&CKv$VbCRDI@7^pWso zI!fQ6R<3oB|Dm4)F0XoKLwuoY^PsDcIq>9KC~3JlgZW$bsrD_Plo3wha2|cJ+C7cv z^r~0uPC)x*+eE#XzjMx-FY67iU-J<>I1bxLK%~45Ji&h|D)7vDYkj8vuOWR^SQy$dX zqio!C6sUgs&%}d{DC^Avy2|pb53tD*Kir?6EA!2UvHf^Uu5>=5xg9q>*^D@)N%^v3 zmN&lfcglE&w-16lZ8F>QdJyf<^pVgA> zg6Ec>_{kqIdrUoeh!q;^>zB*<#~vx0n;T_mxnGtS78U=oW+~g5>z7_%{z!F*8@vwM zn^*hsrcl|DNWl}nY&hE0cswdQyIXE=_~XsW2lYXTzB(7{HGDFae!^?_XM4O6Oa5Mc zoO@e`W639REoc3Whv5%_vG4mSP51%UQqczw*0`QX`N2_@=e~|LC}lZD&^3H<&YP`y zBRX%u@I5ry7CHgo4P4Rhq}wrljrNGR9l7y}^egO_H6-F7J`F%R%gxw1Z%@US0c?h_ zzNBy77yxiD30_FEOm&0H{H)qqx@Nr@hr|Qu#8nA_fUXV|XVY=J8vuZb{7Lv2=tn0@5I!uZ-Y{w4uS>tHuP20S z=%bksKUrw$VOVrz?-rTvYaD5BRG|P{+xHLxUC&Z5o3v-~dN;LKh)B`II9l@Hc6p zCH&yvs1{TiITPfN6Qw-#sOvIf)H|;O#_kdblbT27afE8splFdBp8uU1J^-Mjk_thwrji8ZpWzkOJA4Ld@ zuMgEvW5`iH_-k9R07z*DnVe*}ebgUBHh5BbbgC=<7|aVi&*VaYh`N4JK;{H6 zHvr_$cAyE!)lj@5Cm;gZs02v%PC0<; zMxOLaJE%f#0N`2)R2L|~F@G^w$2Zd&CRILyf(#BaujMlU-vCDJC*-dBvLB`Fq&!JK zj@V_KMBkw4-4Q0sDeVaU3R#{TcmVvFUeZT?^dhCWFSv}ZbG#HzZmy?X+1~jE2kPez zXz=9*fClXQgD&6=0H~h&p*qzU4Pm2%Hz1gP&2%dKc^N^cf!v>)L0*kNrZ3t@`HAwh z5B>A-V9!3m0U7Da_n4>gk5_!-JCbqA9cbW0p0WIpHzqX;h|&`NGRG;68I+UP))8Mo zdy01}9zb_tKcR!V;4As8gV4Ty1d5c02c>xfAi5N0>2!K(KFX_Z$boFsXV-SMMakq1 z4$=X!0yaUQzOSHuWAkEJS)ME3_O0JizT)ryPb#cnfBk~}=+5S5nOB{E{muVpdF4<3 za(U@;oRLGl2H+7{dkt1 zf>tiujehBB-OV}bq^FpHH$CtTzG1atYQ`D+93_YNKG^SRa-i5yj604}TMhPh5JIKY zzB=THf`260;z|#JFBbGjLT}UZN9c)|QP?~x(qdRMp%1p0i?sH)vgfK^AL;SA$i+kp z$b5hW2z1J16W+kbCX1OKZLb7kEgcW0)pii5;P1H!5&a zLc?tU9BBMEPHA0qSzrWPb$v(1y*Q8l6V|{{ zA18Cxg?WvBlWEzX!eDjY$eq|L{SMUXE|IxTsu$#^YP0Osp_A!$lHL&HN|Hz6DmYyQ z-pOm~r!qNSjgc-PDl?v{oMQ~iGYG0o9nS)&qr5tPg?hse=}+X7=yS5_5lB3)I{~Iy z(;0j_{Nh_cXIfoWCS^(k16+>$PTJ&kt==th6n}+empq?O&;{V#@>Oh!r&|}uYoME! z=SLtjvQl1m)4CPA>j&@{b5_q+>$qsZ*0Hy$@4ShTH=G*#J<{&LrdiIXz_&4tnm(L^doKw$m3BudBdfJkcTMqV}YtZAJgm&we!pC_}GH_ zc_)ctj+NxSaYq^nYFmYm63)IU7mbSUFhjN;@F7) zJg*cAqZW{6y~?jLMujaA+f`aZKp4r;$P0>kGrVEwpe)WWXr8?#IS&2Cq;17Bq(gdT zdET$!Wn=uz^qikU+E`yN3k!4Q)X9Z%`SFX&U-aVrY=6mbTbi3)DjORcWmd5P-&_B~ zKl!7wwar3G$P2$#b+aB8>Fx1akam%9z48f@^%C0p=qjK7C)OtB^9BIvkM$|9H=)1O zv2gB>1231ftH*G1Xx=&wGIn|4RS*wyh^DIf$t{(S_96kC{P;`x#7&u~@0Dnkx9_(9 zjhiLD=~ew%@M|s{OV}Ra86#l+B?EJP12Ik566J|+vM!6!*eOM$(bTk}om3!_u zTYmFb|Gn%a;-O0y*US9!VtLaW-(0@?yS}Gf+TJb~FRv-K>XprntFB6NG>VVp1QL+Z6)>1$B^BctwNP{;w<_Q&G{A zuH6jH8)O0W5Ol3C@KV5Vvc3BF*jI?3 zd^6fJ@x`_Hs5}87wiq-2)iwlq9@oiB&?PPE2mZvB_}lBN1GuOMwpi-`H+2F(wg?V> zzQL|vs3%r}NH=fI8;pYIX0|{bWR^Z+{Q=Oi`U`jreCnglruGfqe6)&WgOXeCku2`q=7+ zvgo(kFU(NCqX)~GvNGB3K(6UnClwbZ8ZCIOA?gB+9a{j=P<`9TLEHfKN(EW!8`hC@ zmi88UmpxDrJsF3u1ZkftY5zozN_;_se0-*_s~>e6>1RJ(Ww{oF&P5+KDlJQEJ%=pR z@8$zRJH}T3CKfBq+Br%#L1I`&_&}PuFzx6zXB*9`V9ChZU6x2<4ON|K>tzwY$L$7kH67)&gVHf_a}Dh z8uXNOZWC}{RV^jE&DEHis6)1}Bb~GldHMya!giXbI zqtUJ(l<3b8S00w_?VYlN$4gz98SRr=5sj;V~Yp z6pwYy1w3pD7;~TIAl6Cqhh6Z)ewh1&9SM&xdQDR2$VD7NecRM<;Lk=thxe#M%#O;^9EDxyhw(od|{T&`r2zNdp{x z+Q4~Hw!aW>6o4LUQIv5#rShL|{o^X=3&>xjKQ{1Coi|^Pi80U}_b;XMUXOL$(9`Vf zyznWFPihGG3VV_xU6833;VNv{q-e>3#M8Tpc)WWcd2FE;}Gb?INX zTtb$we5CW33ZriG111OImQFotLW3b69?ZIfVjzBne_f1oM^IVc5LV0cZQ9vm$IH^m zQ{_+{b!q9Gg8yGFU;Yojp&VPDE^Au{<(8XIYXZot($(3do}vCgqo)ZYlSXvN0zFS+ zr{R}_#W2jOXVcWR`DQTTIz`TOh}!6`x&x_{V+*s^W94>|H!>-Qaln9!EMR zoz@6?me1c#dz+@$v|<$Z2om{s$|lk$F3fXae9 z!7>s+vB=HtZR*Gt=(y-2bP$emP`{Dy-GM4&CtPnn(eB(_=M}cL6%EGw=ur6BwaEnk z(&fiof8V?ul%4HieDYx+rz@w51NN))<=DaDw@R?`uC`@&-*6y0RVH+dPWTQCywSeq zagPb6&Xpo9x`$z%a#y9Ne#qxiz#-OD4*8WSKqSuh)fXI?kXUU=thzja=J0z46^izc zz#6@Ju}uzKR^)|~oRClsJ*J(2#NVveHQ*-`>)?xSEhl}}-0P>obn0+fNzwuBW3*wb zc_XUq${20aLYrO@8Zu31v>$C#+Z=v^Cf6~3aLsx00yubs z)O36kyX7mu_EhC;uK@x)!-T*3DT9&v9XHa%9!B^y3iTVyX+wwPv}n|(RFM9(2>50M8Y{;`C{Xz z12sQAP&WXS@b#p#doG#qp7u{*OW?`t20b>;EJ#NBO6ZS%Pk%9wjJ?8W`E2Ku z6A!6R(VFh5zX(591}|`+*RU7PfzTc2l1B0|W#js=C|IPgiXIzQ7_(?gzd}!ZQIc{5 zz^J{?WQ+h1{)ao|;>E|y;{0^^hX3{J%GZ3&*NKpHGFaDfs%)*Zx1|IBji_z8^}>t$Et18;=g+mqdbo93haL>GO-Dn7)o&tgSf5hwZ}pK*e5k?BA= zScvx3m<8nc-P7b{%_-7W7{W>MVrL}0A33tc<@fUYM`F-~TC>>tqorDaW&uobU~R+om3XM2Hf zFygH?!SPYZ1W<=8a1GdwJ_%7;DxZO)lM z^1ZyJ`Gqpr+$#OKxpM0*w|iWD|NHJMmle|hd`~xUhECkc;JxGH0D8p{plk>1nkXV% zN3#MCDnXaTp5W(ctk8cx5eQB6RAWyNRb00HmZyJB0bMnj^*i`_Qh{vO^2?smVsV}`aPNz>nz;O%tKA0wuJS3Qfng@W^*E&`b z2GY2`qP{ZX`e?f_$aKpYl1`>`UDJH1DCp;1+J+<76r7-U1rR@}5_(9*a_!X~)twYX z{U>EKq~a(q9Y=kDW2Nmem|qpbkO;bI30-{k3bYI9cs_Fvk1iGx)4kv((a@;WG zDQA!8PGznG8TrV{iv5X>k*^U<^=K={0*ziL_;&oMW7bt=e)!DjPv8w+v!9s9nx`w# z$H%wrK{V%hT1nae4|xJtN4z?H1Urj3qvdC5VbO8n-Z19!0Ytot#cW$)MLf8-vb^GT zow0lg^3BaHcr1M6k&l+aup?tALk#@)i(j4?vV0yBV*DW4`c-?d34k$?{*kd7blk4= zMd=hhIR+WcJDh`$7K119>jW^l2*QfnNvXZO3DbsBm@ez5#s-xuO$u#9Nu16m6K-6Vztf}t>VfZg) zzOS;wVY&0xQ{~5B^_@i_YP|ASZGQVx?<(utyX8;*_$}pozW2Mz``&Y(H;(M?ZI}7E z`LZ|I1F`f7Zy*f&x4vtKvBttV382eQy!my#t^e`sf~V>y9r0)QI*=lu;~3_QI-+Os zj4jS8)*CA>pYGGfqJ5zF8lC6-T74$;fJQ(UeY*wGp>IM{gU^BIzi}O$xAqumN=5hB z0>^qI7FOvx%y+X+mkwoptZ8}k0id+6u@lA*C4@ffIAMoYd93Hnrvj^-q?^CGew<|k zN6=14yvE~tx)$@K*X}O}{D67Lc+Gezr1q^c0r9{W<>JOg&Tr#^2U?ILe9?dow$|8W!cl$#hfZqzC$5~&g?$l_tn{qbVZkRM})?sM0ZZ_qeuI<-C0zavnTVNB&! z==9lrKUhgRX$(1Duav)J?1S&w{(PA^=xYoddQIp2`G?8_A9$DY=tIF*qs-=b9e|B4 zv?_iAK(~p%uAg5)w<1DL$={qeFiH42Q0*pYC!i#rRPU+1b%b zYTUu~dWrhBbhdK*wDMQV^Pc>re+#pq+ig*q}?qQ$5(!7{yz)_}nz|u`z;pByB(MmAX%;LaYh1>2O-Q8Of#8KE}h-@;`oq zit-30NQXz>LJIG5y}0)}!yCL*ra!AbCK-9FyC16J;aTo$s@&m}Vu=|YiOIw`n?wxe z1wN`D9uH&!vb%Ed|GygmaGG5}0Re&6S9bb;P5Rd@m!R@ezO4yBKhV6!27oIe>C~eK z6LqvhIaPUlI+njPTjjv0y@;ticJ0dNmbt~H(py?8TU*;@b$Qi6|IhuxFPHEBo*ydf zn^WcViA6uzM`vQkEw8hkfnN;-0FxIS1z<7M(aLziv^*}mBgLRbzJq$5I~^z1Emw_F zQ?S4zKqOQIaFZYb!^QXmR~Rza9#HEI6RyYV=;R5L{>Mbi4nPixr{F}V0odPFZDn#XV`fie5~6BL#T?GQhuYA|QvH2l65h*X(fO zoHVX2yLcN7qz5StXNAMT5obNx0zJZ8q$x>ur&HC}1b@s83~r#rU=QR54AKAw2`@wm z7q6Wm;3dyD0F}-Ygn(P{FxnBlV=KsHCq(<2u5ikWI?#~Q^tA>+%Ee-8*b01Rn*zjh z0C|?%wxkNmV6qJ_*}svCy2LxSFAtU#w~Ms00f!na0Y9NEDN=TDNRu)n;pl^(bV~9^ z$YliZb-2ImYxr?JjDu3THf>jGakjLj1)cg8(lsgatFX3VUN;sfN$wCz|*fp z)7R9M?4%KpfmZgCyiL;A-?*T?^3*t}i@zaDZNriJqzya50`gX;N^OkpeFPy4AiY8&P{6EJ{Deb5PdO}g8o)nTLy_u-F@`kmwno?5A3eQ=!Nic|o5aFd>iIRAt>yq!0R2ZS&}_6h%*AFYqYsi~p3D zvQZ~gY^tF<*AMt#bMD1qH%0T3MP+g2SXsNgRR$Ud_Y}nMDiHT8 z_QeTf4f^6neb`s(TxVV=gNNWB-9>=9Sj&XnumOPPfLFdH?+pNR^JQgerPEmv3g#Qw4c!Xnpdc?0bqM~%Yiq+w!if?!{kWqt2Sh!45ZJ5_XZc= zuoJ`v0FBd{&{TI}IhZsd3*&w_rfDG;Kms3nIC{JWU{{v4Ef@9@=RJ+*j2$_KI?Tdq zRXpti-R#TG*lmp8-5;V)Vhh=m+K}?HLXU$0>EPo>58xGbjQ`*wZb5$se50(EMR!S_ zJ%buXHOWzH(>H|~%b%%s!NLo%Dz6E|2l+d2DsMh@f+ZbWj2jupmX^y6XU>#UCr*`F zP3j|D+&(=`(6+XAnt1in`kFU9KJeg!<qQ3~EeWOjH|-+3Ca$L;@v-9(PXUN3tjivV^`?5*sI;qVx3QFOe(Gd9 z`m2u0Q>*)^rfS%NkDwXXwwXG-3B5(kGI>*!_zfGvHlUSbrUCyrrO7T6R{BS!M8D5Z zn!ypN?!R`7!pnG}bJDzFuA@#0cXftHc@A884Yi853*B~kGSJ0YMMMutuQ`fm_W{w1 zIa$)h&$URV4mDv%-l{LguP6_%K$N9_ctaZt-a3~?UBoh_3=Lj-XV!#n_d4K6ekxY= zV?*Cnput_`XkuH<2O{MP{6MBLv~}|vNOH=&YJP%z18mwCNCo!w&?jzch}5IN^n}}O zp={|YST zOos<{Gl7)$2e*sG;;Mxu(YUfc_!2lLzfD6Y8fAU%stHw&BarzW)&Ov1KR626-?{+p zTDaQWVY6{o?ds!EWVv8{HAw6>162kkeI#UQ*LuVa*zCK8uy5+NhNnDYm9gx%SLV5y(VLf! zQAm7$*H8H!GXIIOhz-!hN!>XYRv@uVE%YgRs9tD152PPdUI9lZ^*q7zg}@*_1^?D7^J$k!2}7Rd3H}ZQ&Cq4iM_)jfz9HKm{ZW?q3?1pZ)|9MWNck$CYyj=p zEB?5t*QG7OZ7VsKB29&+B^+!nV0L5@Dc?QSG|Tu$}*l9 zH~F0CNIlt~^GUW&u)l^0UB`LGQ63Tmj2m8a-(QGv#j%ZMo3*M z0VO2<>udnHCLkoluT^e>F%)XZV*u?!EXKnnJfJ{(1pmW8?5a_;6+<;?W_})fJ0{Ck!1h-t(49?!~dDGuj!J3zyf*Lytb_jR9;D zpdPdaJG)-L)gOCvirJ)R*5P<-ByYKzonie^_NllL+Wpzs006JN@reBd z$i>?e*h9&>syE%p_Mt^N@+_-puy(|oH+yPR?w8E^b4zSM2-!TpS3P*0Vg2ZU8}`^r z=!SJ1RLg@bl%pQ8EO=S(@msp)=DYy_I`my+nd-CohIolL!7Ew%lDl_9ko01~?Vb|LVm0kK^~CZ&J#%8CxklQq6cp#x=b1O)$k3fSqi7!m30 zq-ElZ0rtx#w6P;Lu$B+8SMA>?dJNJ!lINSA3|h+L8YkiU z>J-ZGlaWqW+RkuXR+iXBQ_9lpV!7k4Tgvyo^4rV7?nZg<`#w-cyTkI}0}qw=-KTRo zKz67dTVD3=rwbR(mxmvISdOK>j$ZmEkTveH2Dt%k61Sw+9!XcB(XcCNPLx3; z$3eSs!x3@k((ZcCoi*BBHB&cj~rK^;lnt?Y4CXN$N+c7yLQ;itCDrMg;1c9VkJ) zLvFxS`u^MD#enXp%QzvvtqdwTUx>Eb4FK|8dUUh)mEwD*|sC?DRaB>vjB!o+pd1)<7Mvon?RDc>lgPY`&fT{30?^mS;s&(QY5 z@0;mU@$Ig(0hiSw%K`SC74U;uj-%fI05t7V`$L`R0@hpBsi;C;R2w@(`9+z~0p|p= z@XZnaU?Ube56b+k;)1=6vVQ5J0?C8&kN?p>DBt{l{A&^DmHpj|lCM{`FJIF1c)xtZ zH++5hnP2+N@_C>BqB6g<>{pBttnW!*+uPd_cZh$+yx0vMwyCX*X$DAVuHmHrGH zsH(nHhq8Rb3*SuI#fuje;P%V%@{02)MSAL&8yL}+6{J7X_20CeRqZ&7MSg7?wIAcq z*7mO3b8#s?DMY(_(m^@J6HzB@xZAb|oegATO*VKcJ)Xc*HR*^~=+ZVr#UJ2Ha1*;& z`m?Abj1$Le0CsS-B1&icp5g5@UHefRv6*wkO@1ZwT~hL~HzpGvKQ-xKQ8&_62pe~+ z1Rmm^{0vmsk|sKe-*@*E7pZNy5yFiKzjh5fP#sRCF41I!;mrfsDz?ZXt|pM0e8La( zoHo#StPV6!SWEi}39PW7b>hUT%FLG2Cy$lmCr_4t^$lNFKI3!$w(?>Vl_6NYE*zGP z^3e}`uzchr50}@y?setQ{_M}nBlk0z+$gJx`=0;t&n~yzb$hwz&b!OoHL?D_pZ&F3 zGk~`BIH(CU{K9|OhMGWfU?bu8DoFpSQ^2jQKMV_YrTQT&qXv2ObNvrpX`V2riRbFd zalh?rb#G;wgg;^j z%CLY%n#@)FKs7)~KIfR=YpN)*IYw3kll6fpV`@5AyXmh+hnk;$$%kt~V~6_LhfA)I zr3K2UOIS~ogVzmC3%X{lT5Q(lQK1MiLDToc01CV?8D zOyEY9NwAm)Z#nMPf@YCRoy3qM4md}03CDu5flKtFlPDilLRS4zJ}6UmgUaK=gHg2_50z0DbcFS|HOl&sPN+wwu>na}wZjRwJbS zAe*5W7o-o4EbK8Bq}{+DHCe!R|IxK?_%n8@y%XRO`Ruc)NE>UeVmqq&AmW<#U}4x$ zZ@@*b<61xQ3uegp%=xj9%{6{%dMZAl6KMi`uFpew^TSVRSp7d*G0o484bb& zGa(4PA&82JpM>ZSwVfMKdFh8I*sPQ3Ru_;!eE}h`aTuzds&J!ALFiNn(79}lS6Gt| zQc9pdr>hOT`CH}P-vr#OM1Qn5>0>qksI9pXi7rh?Kb{O?zTvu{%XzeX9-z>sRRiL8 z^oDJ4goZ)o96!M_enGsGkL-Q;D16sbK76Qow65@MUZj+Ma~!fgsvdrx@0l`&hh8us z-WVt{l^;}N$m3N2UyZg$cAa5=Xeg>wM9uMfFku(hge;dwC<5J>O5V(C22lo?9EVn+ z$oGv9*%)IgI1a|4i>wYYtuFI09dZb$)!lR?- z0kY$R;h=on)1F#>?f?0Az;4RdHPq(e8UP+dJLwaD#;JOSd2D&3j&u4r>(?PO8w4!>H z%etzl^h)2lCSTB$uTU8eUljI<-eTPqG|Epg5MS519+S}B0MG?kk2LfbfGvkig?__M?85DnJRu zZ&^R+w7FdfyAdQ^6eyUQK6?MA%M0h=?IBNeb z-6w&_`b`EV%TF-owgx7SYCp!c?z(4;p{jz8(Yu_a`?1csp*$SxdUm#)J$G|CbK429DH$q%5NGc=Ib5$(*d*}a|B-f&n9B}k>euu z@qKs@LypbRqx7em>LYPqj0oQkm3h!qY>%3xPt)RIrFlN-6`%NGEpuA>nuh;r?kf;; z^~%9?eKQAkw9i|^*VTs7@7{4?|30nbF~Nx^kC03;ta zkgLIUHvl{_`1PZWtiY3$yMd&0z4BQOBi8|kZ9^oWZ3lt2Tnq3QyRCc?N4JxHW!@7} zI<24h@5`vr)pFs|h4Q6e_Lb#)MgM(so+rmB%OJ-%2TPPA4q)QSE;I(Camw4r0@Mi`8j#dv_S$Vxf#nu)B0B8_}%;IIJ(058n*0|!oqvZ4hZUL7965X7)C zU9(_+uoXI$lL+0D7eIRaI1jnl5k(rb*kCc>MQ1_(dm9hP_nA9BND;GqQrKY1>mZCAl|Yx&ims{SIQ z?f+4R^e!(US3{>xerSsZ8vTf7Z41AWjuSVojt&;&L-Q5<6(Xn>7>CaD+I^9BgpTN% zPu151vXP3iIi8ST>$)Dc>g&`4_;oxYFZe6(N%sn9s2RKgz>w{Bl)fod-VKv@-OiKx zwSs)*^3@iwL2q>F*b(7I;*Pe`SYh4Kc1~_-14+NZIbnY_y$i12@E!gQdvSW5K$&CY zRr>~iDRY$vyyREjywQbCbO*5kiEn=MdPeVLriS#G_`LCqdYT-8Gw`Pzbpb$SW0Q;M zt8DH1M*uwh&=WW?MJCYx&M0`{S9)k^1cdx)H#()RDs#;n2m*qiOpo$$-o;g2V?&1t zEdG*n2a(G1MiuGI(0x?_>l;}x7} zFdeHL`Sib>;K0vxX*b~5XDdkWYn*{ct}m@!_D$pE#brr^AFqBVRNb_9qa*3e`K`(t z=<1Pk_?VuW_bbzO2YZ%bZaxBdQ7;33>PRT7J{d!)7jgt2%?1FK;btkjtGSU0Am6NJ z4jhM`G}R12SOSm}(BKWa0ar1=F4S((K-z`CF16vim0N7+Txqp0h76kgc$~$SNQ(s^ z`@5P3AOlHp2ptd)%-6<+fl1NAY*|^<#8Z=>6-_kfW*6LMerjE9JJ+ApWNlA6 z=GANJv-CGl{F%KjSgN|i~8_?P>3I>C%@tT6ud;6)-n~Z>A9r@5=F_@O_pV$i?YnY zpiO$>1Hd2gTIkN{8k0ia;5YqV^JXKRwoiv9KKv)rB(!xKnO!CUuH)KqDCHm_6l@@k zMgGWVjzAHkQJI8%1#wGP`V?C;GqNMfR~AW2%9#dmZgo`fwHqX^Cn4)*oi-juf6)qL zO=+B`jEi@1Awboe^L08&A87H%@w!m;3q%{L{jh@GR9To?D2wucywz=EYqOS7#M6)Y z59fS?oqo5EK9xqh7!UT1jAF$0svIxS>y){VK?iV{*C?ud?7!A`8EN16RHU|_Steel zp{VG5m)5y_H+O;(ani!@jzI9kF+nI*F3ub1T$^`LniloKMh&%`QsSfm@p1&voBffu2>CmlmJ4}E4?6xnIciZ7+0i!iGx35h7-Ze1 z5qG8k(VtiVr;O_ProHF|&Y%l`a~6j$xnvG53tQd<1wH*(YpAR_5B3?#XkxB$r)++U zd8m9ZbyUf6Gp`lklb``_{K)fIEWi#~bi@>Ga^MyXFEHxQ{?IdYgF+kE&AF?2@L7%L z8wee@$LoQJ8?IMA%jK~uW?};XV@$RO?acaDbcyI^S#ELz06-n2@{LtBJf$-)=kMqc zoD^j}ZCbH23%W{{Zm1vp;V**+c%n_Pv)XXzvGRicasxo7d7j4l0S(HyrFexretCn< z=cXCw{#Du`2lR=B)R=Cw$it3jdJ!Ysd)sa0m;U{$%0nN0sO;_Rm7(l&N9x2k%+Al3 zA+H8j+Kl45g@yTYYW27jFf1SW$b;q0Z+>(6vp@ec#Y`7`bAYGw;)%f#J;*oci8I&` z2yfVa+~lZovp}xHLB2r=NE0xtA^RMQ?DLCdelglraan#BYI*3oaPth@k4;=Q64<$=LB3gey{SA zeQ}yM$W>n({HmOEWe>u50K-K{=zKmE?<+(5+p1=?NhWyG?vNs4c zCPZ4`o#Ks^mPZ^@-Z#LV)#xwU$P~mwoM(Jwo>f2Cr9ad+02tsSWI;v%)KM1DmE)~A zcbTd70k-rRS>He9xxVlw&Vaz$reV*lp^yd-6~94<@omyqw4cgK2CpyYdZF4mAdcD0 z;9@*)q-gpj^i=3z$;UXaN7{&2_6c}O9B`ZIm-nGu4jWb-k2kbm1F>gsB_>86{?sS~T^*zsOjUYRdF#fijD z{Q7lmkIxJ`hJi2I-@)m)40qXVs6jb$I&g0X+_M@!iE(G=BAHQRA2`Acwn`*M|@gUKkCtTVs^SW=ZCbWSP&vU>Mf`U8$!Zw9jZQ`@kXktvEKmj zx;KmVi=2A|Xi$tWIz$@e6@XFWcpJxG!It}E;9BLfBVGqOj>k~xUX5QHPzR#S)#>Me z1zb~h!$kdb$dl0MBah$|KY0V9fto+5Kb(TuukzYSfMwQC_;b^rojvSU8IE`z1|5in z(0K9-<6yGKQ;r1Z{;tQ&Tv<7`P}bKr%cb?b^6@YJ;<71>H!mspSGloxTbu{KCYSU? zI-e#Z^Yl6#!GI^;q9f*j(+&I@Lx*SChX;iU{YNa;_y0PwDX%22o?lx-d#E2((XD5|Iu^h5aWDFS3|nz-=mluXD?iWC zQ9!g?40nPoBb;WXhB1Rm>^ zKEik(x}uB&k0@~s2)SB2skEiXRtGJBF(3kwpi6snZ-DFPbV1XOl~0W~?h{In=2kvM z4&svnZdb~SHhjasC!NgTF4#HYbp*)7c3AHFvp+81{XO4V{{3tIs4*>` zGn&&M@G%*s8ZbS}A~zFA5%xG2_Qy4T!G1|dyph(vN4$vzLKoVpaw8l{1BKGC8|;TU z9Ix!7ZJ1Ec6;{RxpMPOXUg&8$II%81Mqnus{YcQhB0msc=aD>HNifhquIiYo4Z6k)M z)NM~XgD>V<=+}$FlAq?l7VSgmQ$EhfTvSFVbs@5{5y@`a{Q4?W7vZ?xH+_|suIa*z zg~)^MTps3V_FCRI9U1$aM|rgeGSlw0yyas9fNT-|`L_9?=1_+{c!q)i9qUA{*IkpF z$ap?p&rCbcFyF=Ylty?@c^0sFGa7FqgITk!5;g(rwX^zlgW?kYY~&Urcq zkT?AnSK>fC$*&3-xUJ`=PP8@<)q@iG;08E%pR4sr$CTjhYT&vsC%IUt3SSeL`*Sd$ zbo0SRxWFIhR@ z3x!Ygv#pYbeu%$Rw%t&HFOBgwAkzb6VX6xf{e(aCWNczv7Uj6nm)1}Bm_Rvx0kyEe zX>=e#yWkx*o8?12a3K8<3)%XVUisiAp#2_=FK|CYV9teuI3aJ?qGOgZ5S_e{`d8W? zy7yZKDx1ke%4*wF`%bBkV5{g4duJh#GJd*7<>i%}4?Flf)?4f!lw;KJA=98QbMu%N zG9fDA1h(X6fc$)U&3K(}I0^_|UkO(c;HofL|9bVZd>+Fd0P{z@h1|Mu zTuzHH4zNL(0xpN1LIyrzV1>hq&*sh@U$4e^sHTK60yBXG+|RW?pzObcAO zeu2H)hY0Mub#9!sI_1;$48mc4#3S<=c^wklobtYwk;FVWj4H}a%O|PV zD}H$DjW?Db|H1F_W?kC!g5vstVy8X%gyn^0>4unZV+RQ%m;ikib43 zeBkl&{`bGPd_cdqz5Si#(T_gt*nkbS5$`gd!>{ao#rVR6wL5zz_w_UjE>#xKh$9;463O$4u_;V+=sDpe466=MgsTpKB0VXt{BZy{w} zuv-oP!v@EUS!Tp1`CNx@QXk^P?INCq<3M9M{Xg>6=iry|m$sp(eO};n`~t5Yb+FUO z11y8_avg1&H?YkQ_BHVYayU1Yx9tJ&EnQJZ_7k9U>=1pScQIuOszV9KI~rHHMp(G; z4OvdeuV`Gp#xn=fUtAl5q)~rU!MIim{)hYnwUd)VMxUw->kp`aSU2=5o<$FiDBo=8 z@A59&o<5BEBlP2@K60XO^r74aW@UdR*4F$qw{ni`!=#0MTIjZ>~3BuyVwi5o` zz0Q3+;?K~lvc+VyuiD#fs6_M2Y1pI7GfwE566Rm>f}J!%DZujkO!W~SRH|%U!ZU(^ zK?S{rp(ZqbuA!-}hKycwHggppb;$=$192{G_!xSPb7=|E70`Kj*!7BcP=LxF$Ny4M+m>!v-MwwQ!T4b^Krs=@^w{Xx;$8Q_!Xr z{&R};GxH7pkdgA7gO1D4A?+{vw2KqD#%IRcdR@X5Vtk~%iPymE7({+K?4|G6#OpU2 zXL{;?`{OOmQ9B#K4B{95QH*u*Hv#CX)n|Z}@i74W0aAJjPfh{sEwPvCX`#5=oeLRQ?s zO=+I|9*%b0hTN!LT3YnY=@X|G%k8(^Tuz_7p`1H&x|}_Gqw3H4!4+t;CNS8Iw`@3G z(L|7Tr&VpN>Mz_wA-`+Se&Sw1t?R*mJ-2-b4;vnbO5;X8@hY}+pth!;(&m(P94kBF z9wraU@%GaEL^W@h#=hwP@rlEjUn@-Y5kC;0lawvXj`*}Un*z3XHgvvMwstnlV~<}f z4?p~9*<9Z!yE|-P-1fcEq2pr3!J^6o(*xNizLcl_r^}$sFYwS64+qa#*E}@DdP%=G zSC*HLDQ00a#Z)?6t4kxhF1X__>+Nal{<>p9EcD2IRYK}>BvGuN1n$a*M~CZIqv{w;R50e=seAU zksV4-z8m5vaB$T6!Ji!!DieM6@zA>lNciGBUp))hWKBmkQV&(=?;_?BNtd7d_FL=QQ!1le6$EI643w277g*>U88trAuCQ zj4t`k2fJ&~Js~fP0;Y&QlrjxN;)_?TQ=iGRJF27BP5;Xd1>^!b< z5Ki#~hWcT54kRlctjTb{qhK!*s&xg;%mv{TW2xrmX>+pmlNr zg~M~upawGQB1k#(VB8+ybR}I`uawnsx`!@f0t$T`JbF@=d2pIovb;L(Yak=MFkbOL ztw4t-q;esnd4j3RPIS!qB*3Hljqp&eDdUM~K!XEl^dCnLR2=IQXj!faV8uj}COjRp zz5=XD3#k+O4d)eNKn)y88*SLeRL(q-Mco9_X)h%`5nb8~Xk-*L&3cV`M|1@=0gDd? z=R;qK-SS;J?$quN5!foB)@%J|x4b7KMpN}6y>QJ2fY=?}+N#yq(OasPi3WC59ZJya zyn!a^#j))_K=9ZFbWA@4A}{F#s2wHWwDnVEt91w_4CaHMc&heQ$u+d2Xon9LN<}2| zNj7fm-~Bk#YKND;nljF-?U6cAN3gwNcdkR9qiot5b-@d7&tPDR;M&LkA^#C$P+c1 z#D3~a{#NX*^h<7IlTUp5}U;0*v@`!Bz){L-)gLHUf2e`b6FU}MW00C=S= z-{l9U6o(8(3RG+VL^f!9K}Em@0FR%N!*PiM>oF66A&k?05uXsRP>1n^>-EhI*E?3< z0Dy}1*Eoz{xYNc^cRe+b&JSLiGM%Gw)C~YTd)&M{Xf^=Q-Yk$)E*8Ai2Hdq3z*meN zK-fi`5i7wriyab6%rSwY9g)>>S%vT%)Z-WsoCU6HLRJIXs#iiq&8w-AXLhdV8|{pf zwAF$9$oj^Hcp8a6O=~pCnb8D>zQG0t5YTeiyRsV{@?%ni%uHUGnB92e4du35&w4{B zH>#iUw7bfef9V&M7ryv~rOcj^F4<%_TbUB-U&X1Bb#j2k!HsGuKkW1Uyk_9UZy4E+S`9(S zrz6p&2z}wv%NNUce8+c`xBSW5RAjy^&(CSvr&CbiFV#mDHId?`=4iNKT@VY(qTH^w zdB+{}X>@oP@sW~6-FCqQofjR<&Ms(@*4HtfxaY^~+|V}@$C){`R&?mHFvmuT<1Rxy zNAnLxnpoop5M4eY+lf~b)0Rv!G0(hFmTwL~5A>^*6*djeDgG7i`pK|UXU=F+s|n+Y z6T%yd@5bvbvg+f)Q+Bt6Ty{N>yqX6L2Rmg?KWrRZ=e2t~yMwa1xo$gR63Ta}XShi& ze#i0^W4=Yl4M?@Ob*LHb*pqo=Q6Xc$jwv-t;|j^xx>s6qyQ(BjQv%++|hfVhuOhTzBW z9W#fE;x}x|(pvvNpQv;<&p12Sn*a^fOXr-52&T$1~he8%mJUy zbil^kXT*c+sm(lJI@l{S{qWtuwCrB4C3rDC9bkhoiuwrQ@ixYd(J;m+ZhZJ=sY)>? z0-%GffIH_Rxd6m9$I)mTqNe9qMD#)e^uPtu)5l4xIQ7d|g||_F2j2V|Zsddi>Z%%` zZz+Si;6pcjXLGJM&#fKd8Oau@B)`aloAkrHlnp#{8Z((ggm{3Q*&(n8QixS=AGt z(1ec}`3dtQpq1b!F2ob|AX>a)T8Fr<^iW!*U=|~mQL=SSJ&UM}Xm`Nr-jHrU(uf*`!W6MDu(k&*9odLd8s^OrisN3#~gHE{wJumk** zpBmB+=xHpbe@*wN%l5{$k6T-t`fWQd;l{il_84Tlgsz3qV-0-<9z1kNA6dqINom%h z$}cU=D?X6TsosLd22{nyvW5B8vVQ4&xv+Iv^mn;^<6&79g(GAMm~68I`ofcetF-wO zLYB{C*PdhpENtRWYJav)u&WvQGo8A9xwcKIFLyQoP|tpkh7>T4<5PgpZ5+M1OL(FW zF%HNEYB#`71jsJwkuW82JAezE6&DcpL;sCy_gCeE7a8r-RM&5=q)*XTod<@DOk6vCcYrA`8`Pgx%$%4IJ$mTO1vY~CJ z4hCi9jUh2ktsXm7mKNv5U-8D^M<0E(eE7rnm-l_(z2$)iAC$fFwB4@l6o0#|INRff z`sqIX=3rM?Ws~$l^rG~zO-ZMiE}p6*URC_;ahLut842l-yZWd0O>{cqQv}vyv zS5uz2Q`fM`+x~)Ev^%fz(p8hMx{eLuQTT`#Jobldj1rZk>galkIV-d)Mjc`Uf%3#3 zHR1+!jJLFnl2uRoQG7)^fr+$fgn|bA8yDp%3@Wv&bm@L=Swo-9#e#OU3p^Y;mTl|D z4P;>?5K}pHL!0VgyAS@rk^IwtxOo)yBQK%lsj(@aM;SK3;lsFgd!PV8li#TuN95?> zXZ)?#pmKTI$MzTcv#es&KC@YWOB}QtGODzbV1Vmjg#MS;@gNuepB@;p;Bx^i29(^g z^ZKw$boqz4uCX7k$=3H8# zeRD1zY8|&Jw5s0ojUB`@p)+-`ZK~m2<}*%0q^Hf}4%G+%V>;?L*BS zrr7{6V&16veQ*Sx^k?XyWvc^;moOW;=~EXhzouio_PYFOrG=o zMDfep1@~tZU+;-K$!$LA|Ez})he{UMXDm^C;rJT4^kdT@>us?vAv@BKIGqPPd5Fe+ zQ2jg8@sq5h5qe^qGt-M+w>vDNEm;{i=<}$H7GQ$_^mQpa6CKA8im~U!H|keiK3loH z$9>GbvbMQVE?l_a-{#t8*;LHEv%Ot5HrC2O@e6$83)%2I8th3T){v&W?nBIeIFx@@ z0Y0zJ7*5PTzrdRsx!KR!$DG$~7M69s5F0F(=6hwaKj$}RiYvbzlURp0HSoCs^vqfi zjs{xL#%HjZhq(ja^XDNE+K-2JhNj{zg{U?TWAefrKqXICAH}Exs zgpLl17(lQ{=ToOnxF_jXKAT29t|%yuOP-*gJAecf3f`FkG@*nYPy`%M0RU$So$i9F*4|}2F_wrzJlF= zEX_cpOR0jvDuHXqE)}?Lo09=L&hqAM4tPL1|43^ggOi>*CW9i*DS=%>@a9)!D4p+e z%%}`$4i=R!0j!5vCK>98b2tk*gc!)wy*-dg&Po=SNv9@17j3xt-wwzK96Z>8n_m_+ zoups_K3mz~9rf`lR*%$7LK{1%phCIhk9u_M7LZSlV@7QoDAl1wSI&X9C)zMy_Q%L1 zeW?wD-#Xz^-u@JCgHRhNAHENUF_2Hy?}k_|>f^)(y-qvZxpclk%_I`L!0ocQ3LetZ zzHyA~_(B%=*~e}QyTEbup*Iy#TIj}nkfsX)B-$*_$&2>$j!>1O4ge)bT46}OZt{?a z^qOxQ#*tgkwNPCkgWm+>w>^NL-X_5AbRZ81T~%77z9b$vKfcV-LBr|j0)cmfoteESB%5fCnl)2B=?lQ~xgJ7KGiz-t*R z>p)k=%4+3PlIogs{FHARE6sE&ka#9b z^At;*(Eq@J3=mWq>_oI0xx|mfQC=^8}ez zH7kr=;Ar(aCHqVoZd?w?Bz@s?%`>#&$pn&a1Z<=Qo+#hol3?b)wyVEAI#yk`w|;ID zrAxQ2qw>CCLseu`Ilm58z#9R$F|o%@JOz@63gDmnyl0f3`l+8%xqcaMpSN$n@YrKz zdbm^m*}wSe@~gk|r{yzVaIf^zFWcL@8cX_`Y>dl*N1;U<8U6VkKY@c^jw1_*$b(&Y zyzm$_Rd8fWA35Z6Rlj^9@FkBPQu7 zxc9_Gb(oALR5p%4!}}8fstVF^eg&qAMP+8^)&8=hQzuUPwffwkpW>#$_MshkEV?3FQ|dvyzHm9&@T(~%jK?HZ!5Q)m@QxUg@31f z(f|B4BHn(NGoGs7DN}pGul6_7;v?)FVI%fO&KASFxF9*_{@F)gVL#VFkZ|=yR zcunn~?CucPYGQh9No^&(ELsplsXfts+7#gSQ9AJ(azwjJI$q5!ehxJOoMB-b9VlgQ zZ?Ei*htkokCWphaetEYCNIyc9#S94M;^1fWe`|yohJ@np$j;j zI`q{p-8@dw_PgGt~F-@552q({W$@leerfujD2mZ-V&!Km6@7*xONG?Zs=P_jrn!x2dSC=8nVN zp}PA(Y3g4ZFYxgP%%4PKcXv=WHW_=xhHT9@SH+iarpqVW@5(3p$xqdA+$DbP*XhT? z#WyLZ>LiP?pEn+fZoC3m44h{}7mIiOc-jV@`x<}YmvTHrFe7;wd&P?Dkap_$i8AYj zzrHsSP{?kpSIC{gU+tJ@=nbu>q;W zn*OSgem0TTW2m!*D~L$(Kr@zLC$hWl27t5$r8ClZ#~%7Q4M^WZC%h?oq0uq%INlf$Cf9Rpcp+*wSUpi|~0)5#Uogpyze9zRH>x5C~iH z7_>>F3<`?2rf*RO`Emmf<;X)`HUJ!|FWmLiJIhb~=qnxXFsyy#!eix<`axelZGTw% zx#wTOAB)dZ8W;BbE#N&bXcO;%_fS3?f3m2UkaZ^fGV#*=fB9hf^FM!U`M?K1TpoYq z(Q@I^hHp&7%~M#6@i%-Rc+>`TtbNI%HX*(w_LG*POfGQO`2?F?0Q*KXoo&1ybDi|7 z;VOa!#D#Uyj3AY zQ^c`F(XdXaM~3UF4EmDul!j%B;;qidxvA{6xC*gmt7~V6PQvzd2^s{bz6s!gohxow zgP%;7pX6VNBqM3y@>-@h05I32pT`COoF4hy*b?VPmgR##coVK>y+m?be~M{I7Jhhx zCfEV$ntuT}s+t9{2FW$~)a4po0qr$)L%;1c`Jy0xxOQ3ESNNUM;A8e4_`JrTBbzI% zB-4i7?crHyJjf;9rU_Opv zN8Q>nBC9!KABOaDSEj_%>vkj6}qae~~(X`-Ge>!V>+0KEl0%@#g8bWT@p?#RY)HVtlwVd>7x%9{JmGlk& z)4U1HdXo(*Kl@(N^#K{`SVdWEjP?$_hb-Q9FS_6chsRV^@g@M}^TsjazrNzH177<- z&D*PY3g4Q)61w)6a!x|kcLUcdcQu%-+l42}>q53i(yfg=$%plc92Kvf4)VqLM@~Ta zfY5!GmtJRN<9u&k%{tp(DL37EQ~A`-ctv^Sk&ETh8k;9%>GKOJ*Y|@0Y)&PH%}waL zd)taLBc^5?^&1H_4v!TZ6F0NwLkvT!65zqZ^5RlCd&8-6>#etxd+xcX+I-aSW%+?Lm)6&1+$SRq+CQk7N-#O7-nDeIFv!_X$62A*qv~kFdKIi6^tRvR1reqVE0ZETD z*V_QV?|L1IGF`Y{`3a~F?+Qpq`UDV;=UU~SBp-hb`T^+xY8TD{^$PkGADzA)|GFCh zV$cakZ-=X-k&IK3g0kYN42%GSqHD7-lQ)LuW*5ESO^o0GM1lVKix=+{aiGyXdTv z899_snb`5JjwU)RR@g~VPMwoM4&DhW9Gp_e!~#D9pi-NGle8}QsS^ik^#?KuK`6`h zff&JY`YFz62&z@y>o_Gm?ELaTl?Ea*};G zRJwT2zypZ(M(0Y$=BdvhqH7lX$ORmCq94l}j!%`G4-V^r%Jtf53iwDf34n2;%?IR|((S`w`<9PBuD9|72m#!a3NEAugsOY8$K$mCi&3@H<^UM53LL+p* zTlLzrC6^TR8=&{2iFfpEZXD@xd>N)yb z^s(R@JXuHS^@h6hl7?|cdZK4S0mLf-Nw005Rn&gLcg4pm+|Z%vgFn&@=7no|4?HNp z(s7>=q>jKDd;`eQ;kN^MO}%Ie`5QT^xLKaMqnxfhE`qoE3Nk_{mx*zw)tSrSgDaZk z$!YMZw!+tDUkwq!g%9MK^Xd;ohwJ3m{){~8@H7+j+;mYFu)pyrD5&+Z6&CKzhx{y? z*7yz-0qD;5hPs;q?eU;I>)B5$Kl3v`BRW050RULLc(Ihdt@4ll$yb)&c;nm4XTI=W z5c|nqp15Wa+#A?vV*+#uo)h|n+Dzho6g-4D4p!lfmc{R|XvjjjbuQrZE{qrJ8(SWW zS65ba%{ZhQ6$fA$IjMG#_7MIn{qB_j9@A1hGuF7zD@%DwcUggUUoioAhD>a9bU8Eb z>1|WjZ6o@eQyy9zM7d6p5cjja{ar%aGZ0=b3?9X{g`RCv8Eh@Pq8FtE)CQq~-HwGD<+4z91 zvKGsNa_S3Sd19EZmwl|K_?cM4-9&$4a(SS(QE|!2hQk9gHHl$ycxH(w7~oGfq;&gO z;fwySO1ft3j&2M8(ck?PpYlh0On}v%<6TXLM=Bm07?&3om}zU~wNe$JAI8ilF4T8? zE&D3_k{Q9IJ%Fct9LzATA}0+mIx!IzFEu{1y{MdDRbF|rZ-D2@XWmqZ`uubz9Kvi{ zVWREw9dJE(b9#;4;oo?IM<$QNbl4>uMw@&oZd&?9=e*ej9q}oKG9Fn+=+G<@AM1ph zcKd2S^TEQBG&e2Z1mtT0X?V)!jR<5ItG}f_rsbQW-$*XnNcuDT*rM$jf~v#-B3^^L32@0N32{X9_R8x$&i@=StOp>CH4fcT4g(hslWI8fiFpA3eMx_4JQ7?hpxHNhOWZ!HCa)AVeTn>*zb@H2}52rtu~jPQEafsliF-J z82TnBo7M2UTRZE1LX}NV0H4vgw7Fh3b_VXQ+q=9ChqqD;{q_p`9qEqmFAjt&HgRbl zrhmLGZY}Qr2yx;}C zsmmt04}JJP+s3h#NMAqH`<6vY`QADf7mbg{pJ1T z)vx)r^6;aNdGkwuLApXd=+7LMIq8!pqR*T;qxR?K|BS}oGv(OoQdwSIwVqdxA1lv% z<}=Gv?zq!!vb?lX=2waW}+`k3+m5om|^kTZ`x7sUd&p%fJUwl1x>*rn?@+K(4tQ?vG9hQ+R9(~8Pdqc;#SK*O$cE4@^6S#=p>_+B z;H*5FzQAKa#o0jc=NL`pgB7khp=`yWdY0dY4*k%FZH}0U@+=0_PsmMyk2-AN;GDQN zb!PfH7t?;Dfpryg1;>%HsTfYx=V~3#6_N+lkM&)A1Z@#%k*7XYWohb)SFclAc0l-j zO<$!f*YsohquLKpR@AR)>bFroY5?R$+ht4;{T+yPtg2hn=SGm))H325bZqnio=$sn zO`B2{Td;lvl;;|#ekSc0zXBbP>y?Lq8t0~80=;-nJ|hntRVvwk@Eh{k{sp|{0XBk9 zdN1BiAQ{c3t|YzwDSji_ei(jhzY~~@6zhX@z_qmN`XJ|;H%O8n53G=Zt~jmz3hdZ1 zpljj+pi!N5E1mnA{3>_m16khyHAy4-o=d18GEx-z+_#{My$Kg-f##@Q4b(N?I%R{7 z1qz=Vej@1l+BOQv-qESB@&vKpCTWN`k~aYWzV84n`dei`Y*u(GuA~FR8!nJ#B%Z0;xVx2=a`_D)Rng@)BJEmnVz_)o0-NYRFS`hrU@I+-Nq1En{Fp2kC zknNFpYJc!}T!Hq_zL(0Z_KM$%2p}Qw(e~D_&WJI5&I1v>sj_DPhq8w}e9Bsf=0H<(F-MY|JjFk&xf7o=WTUWe?u+W?`c_%ryr4P9&~FB~aB;n? zZ|sx{k3C*CF0YH9QQ6%YdM)ix^D4&5zU-ViB&`fT=yfc+AMXvzu_eX-^HYkoX06-B z`6WNp!o~$Q2`n!xdOpb--VFCNnFHy^dM;(=B@Y`e7Zzugk0$j~qk>OFB*(1VkT;I8 z5d?j){>IxLHA$4x{04wupV$B}Vfbmd*M#f<31I{+T&F|gXpg{uANu+2oTML}DIl6( zfFb;MqhIki_^ySw9>I$vWhV^0IvoqI3o?^|2;a%`5j2w*&_ zuX5^#Wlv?HPM1J^jatPn=b!>I84g(s&6on9Ub;ta)P)p*m+B1djlAkG(TQWg2C@bY z1U>*#?9@=JF4!fruO_gQPyzu13)_8lkkE-%Z;R&2sH~ELY)8xSYbR4!U`|_)Vz^9;>g%<)D^TfcUI@Ys} z4y2V%JHh`UPt#d01}kLJfM&bUwT#TcgX&oS(B(j53eXpyvNfnA9}92N_T5mKV16&Zx!H=S6X$UkrTZMT6|Z6Y5EmhDhFBxI+NwmM>j2=Cvy=8!Y z^(WvBoOMD{`$c0P^+rP0Pl)jVL@m9^GC<%X5KQ`2kNm_Qb*2fLu%}Q)fO!YvCZTtZ z%Z`FqxhgNJhE33`(@tA?s=g{3CiQ8Tq^JITPJ@U(wgnAO6!1;-BeHaf|8rR-m`iA3 zo1#S-`#XYr;WyZZT0S_2pNMNu1eDDi0JgU`%4l~_W8kPf{TWXwzw{q|K5kqME|;NV zg|!Qp%6My|{QZCM73KH-+kNGKc*Y&Z;cs-SeV8N?SVMy}e2x9Ocw-?S(3R!E2jZ_8 zKTs1-KDsTWMfgiOU^>l$65KPyJSHM+@5)px&jKIL_A zTj)yl$7A|lEa>u-AU9j)`}1YaPlNS+ebL={v_8SE>V=rI)1z%Xc z<41l}ITYS3hkKXH)%>%(8QIvJ`uV&KrZK_~yMPX#M3?*)ofNinVxhSbPQRfv2?*>|HpRJqpxYRaR+=IrfKp z3TSuK_Pgc$qaX1^mB}m<-1+6DXp5eB#;zEKWlO}=Ec_F*fYTdWl<$pA9L1yK64^zJ z8S1Cum~9_S`0Z{qPq4l4B%F9fR`kHIhOCTF#v>eA7r_23m%#p9{?9bP(eWEVyMaLE zOoO^X&vnRS!A959ig%#T4PjoNpRb>1Q$*kk{C{xE^_3N%%ZlIZ8MW6Hr0O3H##_5JqF{NewP(QjP zL3MN;`k-&BuW|}60B>nPH(X1fP5gpi(RqGLJ7}qY45%053~Ai>T|RYI6A&6wJXL>_ z;CU%^D?K(a1~1nDCN&KNPe&o-RYCofpSWtA)It{zfNfw*;?oX@o21lDn~8R5=|%lE z{*)fEll2RhyGK8jT~MwC7Cg<_WrLxEfrMpZL-) z5)yCnnb+5$e?3V0*Mh6*)w-pketv~@>X$yMV>+?AIPF*E!l(O9@~b-7TD7z8CZ4bX z$|+W$u7ffC6z3G4D8io$SA4Q$P#4|gzwQ473yU%ho|wUw6EEY-M)}Y?-&bDw{jV%< zc*7r+-EH0~uvwN=AHR0&*s;=IoGmYV*~`nfe%rT|`NbQ-_QZf10LZZMg>RxeQT$G` zuuK1>%~H02-+TRkF8|B_{te}AZ@sUaI5u52Hx9g}#~#B4zDcUF)=va!eA$)Eh|ClG z>1+DZ@h80Cx#e?S{<8A*|KINplKiH5Iy-#9gZJH69{I>4_?V#fSLE#;C-Ejs;?HtQM@@{)Y&P?oKs?db(?y#5vSqjGqwkO ztWQP;sJ_NjK9NN4$7cAn4dV~7rYh7i6m~Qos(j8{JvWza*UK17hy{|qpyicuQfJJC zn(+sogqy{Dgl0|7stS5tp1bF?4?Lj@)|=pmb+F;ic6sU@ca~Ru-?x{=r6tKVRUW-? zS+UT5nZ^FqF8=(aep|a+j*W;d*(|)I{KX~WH@=r!`>1e^1|!9b@$LVdSF$;aH;l4& zh)rTUytQISal`$8`H^zphu)>w>yfg(wp}i-UGkH6F&-quoeS~sL2mx8R&RoI``0`)tPKQbxVc`#xn3u=6V_LU#83@6!s z(GGcJoFv;k(U=Vc9mW-g*R+p@j^Z0^BYj_{r)?<1*KNF#@#z}6kj;2YWvuJu#g42)uYu43 zK90VbZvfzl{@^$^`~g;ra=DH-1CkKeej7xM4MR1`CtLjZHi^LZA9N(2xu7YlvWwLX zl#O0l&ow<}iV4&Z24`gd(q+|q=!*WXqt0EgDlKlzBcRGM#=wILV9%@*q@Bn5oz9U* z?asB=iAI>2L!nga$c;WKe zf!fbcm@78i+u;F?QQ^(l3)~DA_No1t*P0*Y(bkv>8*PVsgEAiOdfv{M&zskc;k)bj$3Zm@vP&bzQ$?ZIxu#?sJO|CNvxAa+%qOtP!etLbsubu zu|%TAXI>eM`l3?AEvbP3+nTi8vtQ||j#!zgUIEMISdvm93=*Q;r3l|?RAN|N<<^1E9 z%K7sb%erEk!7loi{5)VHJ+s6*H_IDScjcqlEGcpaYB%AUUtBOv9RQ0Bdi20umj})6UWjn;iU^0_-Z@ z0Pr`p0YF?D=nzkUqgl|Y(4~jtY_7XFx}B@iujTA6kC2P9={!(I7rcPhO~Roieg_yw zkX;2*{wlZR@mO+Axw=OJAr=t?O@8h8IiNO>_)~U{5dSi7LCzruxMEpZg(#PvW8|c2nw<)=a>2Bm?M4>(cjN^ z?|tt5{UTmwRtf#r`@LV>d(W`Xp3Xk|oO|xQ>~J|fizg4DXF^AnliJ-rZGZlUeyIJk zfA*X0=vYochJJ9kqYk4l4rRH{A7_5aWZon zfX$Kkns)F6Jb5+~K#=Ym!RpywEC#=jaNpSLZX%=949Ysk5|>KMf+ZXt9bTP?*W=PT z{Mrr%xy2ZGc$K|x?rN}OV1p0R7id@YD=!piO<$P*8=O~5OM=R5>z zamn4w+3<>!nBb)If)*vfFXipDTp$7%+JGCL=!)y0%jW`g7B~1smvqIeV%SK+#F(2F zxLchnKL!nNPJln_30fsz*-h-yhQ=6JcnviZ#RUco9sOz&h{$~# zPuKlvNAYi-nJgt7z!$V?Xc$o|Ij|Knql|fEP3s zINxmwETeJVXb(W<<{{fKupx!RXkubsv)-8|ToJ6(CU~)2>Ad@4j^r!P^m8rTod@d0 zb->ULd3_=v2-}Ze*f9Mk`eL78$zDrtAs6t=CcxWmp>RfhAnK?r{-0`2)1{LmAxOc; z=)4N4P0XZU*ErGXF23qG3!y9855GDyPMw1&U(og7zcicetC)FGKogGJRNKTX1BMpS zO#h1RA-+cKSAXz^5b?q>WHb$3Ka{Zys9!WjzjE+u|3TU07nM)5B07Q|@Kjrczo*Gg zIPdZn)H(UQ>7cHo4vPr%dnMp!yAQ=)!q#m&>euAiCuAG?)A5vj6>fiGhyGIZL7MQx z6KP#9qAA*BTvU72hlP(aG^v!fq$u=Yd$%1Q@VTIq_V6QD+yC&l{$`t7Uuj3%H#H_M zwCgXv)K2#9wQv9K?`nVlAO3Rt-+k!mwz09%Zr!@mcDDE15(|ZCt8BIlKhF5CKB<1n zQ^2$b5aja;fBXvcam{$z{WP-F(TlgD*CUenzqb?L-`ZS>@7L3Y@wq<=SfGt0gXNrh zE4;oQ0r>G6fg{@IsnJ6P5i6QB5j?XKm>ZmqX|fKyqRWVRQqH+ZGyX9~vfv3VjFX;F z2=?|yZvYU@CTm2yVVczYz{rr%yX#a%DMHz7bbR7XZ<1cGwEx%t^Y68#t%qAXy3tN| zw%hLRZd=)0Z=ZSYh4zpC*)O+m`u0EMnkyoc?#sPAY=pRoCql1Yz1$voePOkzQ|`?8e^QXnRL21gorQ;%yXu z;#SuHaS%j0{*k=F8Sx6rl>N$1&%_sY6=^`&MvouV4{U#DF`mT_xydeFI?D;1o_J~z z(zu*3lq;yRQ6_99Y_(`n8U4tuH*?^JIC2Bdyb`w<4?c{7*p*K7-~G^&cF`<(^9^|V zyt@014gv~;#%X~q!T)K9-t!zCY z6hWVTo+~Conub9SjO2xXxjziD3mD4;6s7+tM_g^aga5UFF~=*VSuX9UZ{(Y@E9lH} zg%|&NJZU$tzo2edH@lFBO0q&B{aFMa=VSwt&ndc>?eI-$ZZi5wUB&?PTJp&aDLz#> z`cm17%|ZS{wS4Wdz&C27w;KO)<5`V|z}WuS7|(U}&BcZIUOU%*6=VV%iV%f{SaB~u z&YW2^x`b#j&#Gs0kssoFm{pESH7){PTur+_q3=M1fKQjW-wO>-nLv-}7NeGdaA2>& ziVes0l%XFwQPGT(qCW^Ses(`BnHp4w`sE+#Ux_ZtrQ9k$trgsrTl8#bQdr5 z-Cj&5v$khD2a2A&CcWZ)@E{utRM*LC#oN3oF8l#?h2O@-TrKR~um9IS-r{3lr@rht zsrttva_q7z8t^6HI?!3srz5YT)wu6L5ICC1R1e#t;tv+^78bYK+16!^d8h5pE3dSF z_`m(X+x6?$+anJ?)V}CrpJ-c8zD1R`^yAYvM`H1y9qsN52d@nc+h$=F8XaF~{v7&? z@#I9~j?*;`Vwc`5=g$(YiD&gGG`_slmUuIb=BPjWcYmh+U;fOWXrKC(XB<1d^6G8j zUTO#X>R(H9ibW(3v5figsUP~zqmMtGmO@K;;geYxGed855@{f77p zIxbzg(wnRPw-Y2XN-mxKsB zKdANvoS#T0E32&I9kr+5_Kx=FKlz`T7llFXKr!&#{E9cidak1S-sE?<&uFLf18)HE zX1CL$wq*WQe!^Om=CP77>td+r=$N;tL<~Sa8++l8C-jchN0$~?{aK0qgPpd&zu&g+ z?Rf*ht?T;Ty47|L4!xj$uphWB3j65L1#2#jzs}+fmb4kXrvKnu{gQVr_zxiYsD0M; z=nM1R{)7vCoU$G&x#ByhhGALqO0;NIzB&|s&f{eGCi^F+sbI!0rcG^;WgDB zK{|62<-aSQ%=M3o^}rQAKK!BEA>R-BfSjKo;Dhw<3BqklU_l^lHo+feQ1vE*yI0(Yc9M<+__XsSL;ew2@L=xf-35Z8o`wO)c<4gJ69OR(%s1O(}rJPNA);<2E&i#c>( zqcYgQnAf7e>n-|Qm;DSxo<>~LL_(E|_T!Tp^k>$^BF&-_ejX5`{Hgp;Y(@dXZt1&` zANEW<-?^kL@*^7_oRRITpM;$Ay5ipKe}$jB>A7L2=Akt|?EQl@ z@I$YUK+y_x$`Y!~8-?os3Fqv#+NySD@R$9T{m?1LQ0XbhDSRoXn7bwrr_rx3W7_0pWHI;RL<-UvL#W^A+uJFcv&1>R~eG!Yy zvjJND2LHU6^Ppwy4I85_6`_PArLk`3HzjJK=YFj5mxl%X;Erh7Qf&O_qYt(x9(!|p z=R2QjPd@oryL$DidEurt@#NxydN=C{j&XT|qhzLe0H2s+Pp@Nzv!l3ADLX*_Yy$A+ zVA~tAQ!GQABKE9)9>Y9<{13+~;pq#G~ zQ&0kT&NS9c>o;V22bet;WT2G}3gHE0hq-TkaWp>Oum1g@<1LuJjs+osjLYof=mj>O zG1P!i14PGD^xOw}$1fP;MQ(+!_^a|&E@j{O$nv@_2i!qAVSg~RVX!uI^yCh%D&nc0 ztm0c8#;Fd2b5VElBDieO-^hFXiMMGod9%Iz;`R2czxvDV*FW`ayYb4a?bTQAv^)2< z)kzi{xbGiq)37H)HeAt<0g}c-dC;MtU$oFPPkJB2w&-H_4BrF^QAgkqI|5^Hb7xh3 zj2Z{!d|ojsvSrJzCyw+(HiYuj=appe8534Iy-17ts>(Qtgb=Uh;nKI{C&Fq$e_nwm z^h1Xor|QtbENWddF~SjeI+pZMkM;q4Ic-1}&LAc)s)$a&$F<5+*OSJxB|8;v(DSwe z^ua4zykkgo*zw4D`E{{6C*Yz!c$mbT@=YD}KYzPX$MI$XFT7I+If@>3Qr9h*vf`6YMI9U|0p`$jwc|lAqXDawrrbXt~Lcj`#^sK zg6Y$b34wXUwg$dpp^~z)U-Lj83;sR+8LE8B3_D=}?rqNC6tH2&Mn4M$tYxyas$0+?Hej|4EcpW@`?4ud&iOs6OD=3hVhsbOyuWloMAlSx3skA7=ygkwRrLvobi->_A&K8 zWZNfEX{SsuFl%H)Ii}Ni6Z#KoK)jZ%r?-Et?eE-mxs}yR?V0CZXwSZMqkY*|eWTOXg^CHM zCuXXDX=62iDQkw@D1{DE(8|JpZywMmyNOCp$WJ48eDw zCz=p4aj`uRr%1kjdl0rE+Ts`d%r(h6`%T%p^L1+7F@9^D=lgdo8ZWPEGP%B?a&Z%m zZ_h4hVtJ+sqx%_8BH&MDYuxay_@aUD=*28=i0T) zmmJ49?o#`cLnf#u(=-9xXo5C);^zspA@o(_X89rb@pac<^4%BC>c)HEC*JKQAt?U? zgnVdRCE$gE8wg)`mXbCf4!;GT>-8@P|%R2LI>r0J@2DvmScU00hsR(?0DH z_e2@e=xaH?s9la8zd4uuEfUAuN=o@uT-5EsOhIj3N4Y-F@rDWL)lWy}ElwsOAyEX4 z@Zqx1jZErVr?i=i|4W7W^Y#-_VACHhbulCqge@d!Q|DUQ#+S zJ2FrPBJXkzKMD9~eb9cSb-kHZfc6AkkHIh3w)64)??R}0I-|Mdd4RGnXwYBfdo0(l z7P6@qgEjJzycB!;DU;Kq_R7mIw4?n!+q1vrP8(_y-sndJ`S=7j+5vurBmIlM<5w<* zy~OyZHbq*=2Q#dV-T>$0G>*0h)rq($fcmywXr+vRSC`^vu(wRmk2z7iVmh9#1E*#b zLg+R~XADi9^|_V&3c#D=JSOnOwDbi}erid+{*0$@yz5#&R7U&s9qdSDSvcY~{ACva zV-zug8V~vmoFmk_o&nE41iifM3428x966~YU_A;SaTu#@oTpw5)mko^lY9{0lE{@co9#HrzJ=&H={P+G)^`l32QZ9(nju`@28$Q{w4b+kN5J z+6&Kr#v23CDLQ%ZkvFxMU%IZgTl@I$iVXnEj@wQwB4Df)k3WX8=V^UXBF^)=DvwO$ zVkkTX)7QgB@vuif<<0`Onn8B9MfYX(O_j2Lqs{E!^`<}DvH*Uo3pfp1Cy{GqIQEnO z<1K~6s`#}&o=~Q2K5fi6d`^raUC`$Q_!NANU+oI09__H65@SA6R(%$2x*$ovRSm>3i<-#$c9GBlCY=();rxltZzVknNHDR zxk(QmXNPN-?-0D#H|WtB?e};>5c3%I0jhU<3V?=bd;$0m%ZKxk3{A6sF@NS7uN;<;n)f`H>!Yx}J?oko;9#H^4OC)~D&n_7YL-t;Z=e@D z=o=L}4Dtz@!!Hx}gh{F{_EYWJmITmgD15X--%@=Qb(@q8#*~H>1U`2KfljzKoiVo5 zx|U@iDB8(K+u*SdY1cG`U2)BEHg1N!GBKd&a9fnmpFwH<1TWXA2j1WSgvU~{4xIUI zHnf><%s5kQj9y6&ocReX;x3gVCUY}^Mo@H?y(FOyDn|s9KECW1g{UJ;NPke{Y4_U2 zzwZD5|MW>jK~xW1!#R56JnMPGL|$80Wj^eYJjI;2^rrMRowOsd#@tC-U&4SDcc@sI~KjZ7h)_QyA+uzaN`j&UL zx4-@E?eWK+Y*#LA_;Y9*nm?_s5M%7SuZ11R)A&AEtON4a0o%OCS6LMS1i*MEdU#_q zbg+?@*sT1ahGg@Mzl^wAd?6>+Yt;|9&j-(w{TQ#kF#?rhOEOICo^ic!;U6jESWNlE z9ngc>a35ny=}XJJjaqq%4OvGb&SGN#wvTM?-P>t9`}e%D@y^X#?bFXa+pfQIqusr8 zr|sU`ZufTLEds}0cO)J<6kf$kNAxNBM2rU;8|$6}T;AMj*Dh_f_4ReXeT?7c)~erV z#)DdjYI{$1A_k83o5sgOop^2PyR zAl!7u>C&m_FbBs+b~G3h6Vi4}JKR*39FEzwa^%JNo!xuw^f)Fir0r~np>YG7$-}|^ zktPyy2s+X}ui8D9Wpa*_*gM+Qgo?p1Z}zG~Lyv=2IT|J^4#q_<4uJ_YP5}Fig(Uko zIj)j}hcgV$~5_(?rvfLustQI81_B4bi2fA3c(flcCerK+Lz&JkSZ*1O>_da1h5w;#H;R{OS?mW*3rVz^lr9E%}D( zB4FbQId&@Hi0sroIadME!ngT3r+vy+utget@d}gW6$L^J4C)`Df4&tHlUmD5z4{9jW3e{ZF`X`j$(YrwfTE zcrd6>7Ej7_8_0_dCFnbXZXYcr^y{Xb=*UQK9CbG2c-*~Vys7BIdRkLkYyzi@Jd(Ymhk z_P3SbtD}md|8#2b{ia~6YQ-7w8koSpR`f1t@ z+YfxE&2-0kf&1*}y71SL1t_58!qK?8eWhKDCWrx9pAC%k<+Q_~D}KICyrR^{;AhA$ zyxh#Ln+?%s@JYF#W$5RYN7kQTkUcF@W}qFQoW~6v$91ARBa^VV@q!}dV?5y~U(QVi zNf4kN`xn|%pCr)d``)KVO4FFiw>^LCzxgZe!6%<+lii!X0sO)XFSO&`?e;Cs2Yfr!H>Gre#%g^~~Z(ch#IYGy?FJCQR#}VI#*zlSx#5;AIQ!CCl^fyrG4`ZEZ4;Eyr9uGq= z&=$wB9>J7!3G9btXNBEp`}mmj*a1+&xdT4Rd8}2sPW}2&id4I0KXt7^A>=!>ANWbD zI!lCrQ1$LtUob^Ka@48q z$#g28YaDF>vLMc(Z;My_x8pgbJ8onwV4*;gL+)w6hzJ9V?3c~<|s`Ra@9@L;bkD!%o&B4U_B(&zDk=)_Ml)QvXvKR?Z(G~-}i zW}QSoO_Ldc3L|5oM;v68Zr{%|Z5dJ8}YxEc8!k%cGIj3zz>CE{!`U!K% zgPSo*R7vJ2r`{w0KIVVaWe($`5JWq<0V3lq#tiWB>UF^LS<0TS#0_0=t4)r~MSO%l z`$6dl+jRe+a^?x*jTxM$T+$yJ(4+f{c+EO~oGRd9BDELZh`?P z>VIC)7qPWDje2uudvZ03jY+c&s2XvMY&+;lx+GW9Purp5swL@`g#*bWZ@?kLiN?gJ zB3zO^b6(a+{B{Si;klsgE^L}jm^`gtwA+T#PU(j+Cy(_IZulQ6Mmahnp3qphyxtbr zs3?Ay);8Mu1CI*tQv1=r_#^E<{j)!y!L51i<;IO$!oJuJCwtAGHd;MxZ+rZ)_Fdoc zjqQ8?kN>vUa-R9sf9f|K*nTBEKIO8yxz%31aYu5PYoGr+zQ**eh?Zk7(2HX66Vegg z{#&+z{z)l2KsP|@y^hnBUjt?harPQ{4*iae5`NQA%G)wmojEoJ(uZY-VR~T$_$5eT zd=Hz*K3H~#O$bNW06s=`VBJtI`jSekiJ-yr9=8*GGJi#`$b%z#4BW{F>jJ`0mGGBg zcTop^Vtt4^C1JlBZ%i6~iT)aPN!#|jvZ3PJuUu9cKe^^t&Yt+wUMv{zZSS@Bz3+YP zZ~WJPwf)AYo@s}B2kp^EAN9j7J3D)Ad-t#%pYXccn72&M4s}Fb5!;@g$QET^ylFyB zDNlZ0^O%xVw7=+etg>n9)qI|H#Jz)98-y3u3n%J76OFs@etdM&_7C^k-rk;X{@=cH z)2}YRvwf@W?CyH4k@%d~9)p_)eW=fjC!X|kGm;f25}5m3>eBL4l9OqV{*{SopEbV- z-oTOi!jFuu!w&@hNKd^xZs07+p&z%czDPN)t!v6z*Py_LM5Ad}S)G&}dc3k@3;b-R z_67kzDN1R4ag?Q9?8zVsQOth`Kx{uvIb%Kqb#4}0rH2n+I$i9 zbq>Ao)FJEF8%Hf5LAEzQ)*S)uhiyy)>9Kw|26R;YU#PNP*<=ihrrwE zPC$RrM)bRJ{uIrf&*ZIm?D1E%Jj7eW%MdAf?+&>1ev$S%2I?^0%F)J-MH#s>yP{jke34r}+X;wj=&QjSq`9_>Dn$ ziVycK)irJ%@fUUft85hbELA_+DLV*4dCQV{6~P{VRd!4sB?5{r zXh6flt)f|(1DWYXjk-!R{DdZE-YD0W*P;G&FFM;?0aP%>m@Jr$^HDp1H}VN@g({K z4_<9=fATHuEpL6Yz3H)Q?b-uZ+nQq8k~&I zd*s0S0CHYXj76N!C$=YSh)^6uJ<}!{xGzb0u6Y1=X(ieq?NG7H{vL78p*K0+-nrMV zzxc9Z_8YMQ^42ZCDS)+BWDsu-i#U-@7T_dagATsyzrGQl4`3s}l`H(Nw5yl(yS8PX zs0;nOyW8zZv6S$}1^_93^a+4~vC&57!XnaG$1pVjDZiJQEt`hj%U5TJc6#~GvVMPc z%NvE6@~~;c;Kr)~e&aX|N$2cDwmXd1fhot3U|I@aP zkTwGh6l15VMUjLXW#p7}9qDXlQ2m2lJ4tRtPt-|yq7oUx z4jUt=hYWcl(YSTAV-adXl#mfS>l3U(scH>u<3XcaZ0Y~G*c7>zAo>SN6Ti%<2 zB_EVP^G>H-xSZ-J;Dq;-Cv|eZ)4QtNdZmip(NE21Lscd~@6YCOv>Q!4Dp%Q^u|$WL{vM zTIbW_V~#Sm={w5C15 zkL4Q^$NmJ&Ye3XbzTja5!K;#!aQLO{z(_&(;W7Un(`o@1od~=r;QE3L2%~(^UwNc~ z50urGB#}cfyd!_`M}SE^O7$J(x+ZO5Np@}|s%ZOAy=>QQv3ZZam*PX?q?3=I;YO^- z>g1aZDdrP@By@}c*c+b@bA91L{0lN}7SIm2*~Po(4FEs!tj zpKjZ?UTWX^?cdq{=|6v^ebEQr-ky5ryV@`Q;xBpQz|zW^{4)BK|34NjjG5WjVo1S= zIKj`#sd=**<6w+A9-HP@`?y0rj}^#}aY*NUhkt)>uU*D1LY#hH@d{gx-&)9-mp z``dr#rzp@)ZamXY_IAuazk8aLJ^a|4+lRmKBbpQ(wD&&s-u9lS-_<_xiBGhz`rTjM z7Pg)g1IU-B{dZeCxa&_s5c3=#?U`RTJj4wYwW*>udEvGVCgqqwrN>Ed#(@PeNz>PU zs#sBTqf)ho}4aik8r z9i{N?f2tht>i^Gt%HK-c+S+nGKOIL5r*n@d7@BQZ2b8IH!lwdbU8LS>oN^0e2p*^A z59;sS2(>YrxEi;}!@hK$erbThUr_ieT}OPzsGd>Me)dJ6Z{%VJ2z(|_zCnJjO(1A0 zC=vLk3J}5-NIvA@%f@o+a!Grw>nu}r^RwHH05Kje+6ycYENAIFYcNBP4J%HxKj`R?rnFlNViDf!4g zETiK|+qrX7er;QQpBpiZaiU8+*mmvfSvtWn@TLT%u>nWE5t-sEiQ_3V3znJa;8*97 zKKON@(%~h_C2z@N_2IbDLW=5}PsXdDhjV1l@gg8S{F%-Xi}*Av`2ABBAa!2iVk{P* zPwFwAA&Zy?kipmyV})%<^27{!+oBEJW1Uvp$lnWcdCjqIC{frqF(p|L`Y0L120UfU z4lB;EZL@Jz4x5;<%NCtmR#DFL9r}fExGivCGZ`0>#trt^bfh%PJmhIQyPbiSH}dE@ z`yBNF=;xa88#zQLKvv=b|Bjr%2XC$yQ1W8l?Q_;c1YsY{-gMNsP9F5JkpPXN*8L%ppqwp0|2zN0)5=(|kkOY@7$je2XW1C|+}0-h{*E%86n#_X%V- z@;{|RziC4B5PyI_;%(v4dDsNj8t2GWR0{XH_3ZhOemoiWz?+_EZH>3`U^`dZ-mMqg zfAAmud+o3M=zrTLdl4(`i-(P+g?1!A_TKlrwf)aO^%L##rImK?=FN8F<>&l*R_6SN z2PeKkyn6X^d+GWO(YMe(@ugqe_;}HhpOTVHg;0MXO8OHSJ}baKL+Qw`xC8)!Kz_ez zKpv?GOjmvl7;I@8ke$%zCo8p%YBN8y4s?bs&C z`nLKn%t)t^p5>*(fS{W!+nhIyLrch~=26(KkK~CoJ`KR4GX9KuXZTOyU;{wRQ6lcP zZ*kqQDSlkrwBbd~5NBgs_E+-REE;p)`l+Yh**^K_{!IIg-+ZQRZEUvpy!Yw0pg5kz zXX>+g<$zCw$;M6HEm_^xlPdm*E^5x^U@_737rV0Z7{jyI1ghP(CZ&35O`J>?y=-Ti|$5j}20 zy@uBagV%jmf@8czuztu#_@RT>7_(joccrdc_j4W79)1N{Z38)8B7f-d;=j&$6M%Ik zy+wZ15B-ytezD)+iN3@quYek-fVgSro{wcE;5XySb|f31xX(>|;&*!jwG-_JufxHOf zsr;m?)8o3-%k>nMt9s@=`K%l6=*r@d9Gss9;0@sdlaw?Eec4}ijB9K|$GKdGO^7-{ zOYjE%K}X}f>JQ`kQAT(uRyuOss#nLLE9eP2!F3+rG@vIiuvTMQeM7Ts1)w)5Ih*O? zt$t8#F~0gF7Zyekv0~)gM~!rdEW$*hf%6*xYCeQFaVP?>L>b*>VB+ zgYdtDJ}5u19sYNC0E(kzU3O*2&eCD_dJN)$dmK6=K7|L~D9b#_@uBRTjltgNsW^kz z!}Emsnc8PuXG0&G33&jDu|47z(ZZ4bC%sxOqJM}BB(JE+_8FwNZe>TfD z7jlmq@}e*8#2bi(r>?oF&ovP@YmICGu;yhC9y?_}jKN1TQ=V*Rd_UHhN!sS>N_+Sb zJ{jlp0j?OW~k%^U5dm#(+lcklQO?8G|}H*kMWk`^xb^yUNZM;;w#?6uf7HdopM zlI5chKisZeyV@Rl=;3yW*j~1?vcBBbS1w(&0U$dUhJdvQ;vD0&YNz3x1Ftf(Wz)!d z`M{0Q1$?gct3v`&rUt1CVU~w<#wfsfbZDcI(F;wAK#({IV0>D5#tvhEa}-oN0yxl{$xB)M==-7G8fhQIUvwe3mIaH3KgJlJJpqf$ra4ZGY5$9cA zjz{f53%r2MyI^Tcd_;Fc1FdkbZIm2$0QX29Sb)+`%?`ItLB~8(S$<7 z9&LlRlh7|406aiT?~#{urT)G6g&dKal*^8=f|4ai1}dlV6I~1_)HBeYSgD&y9w=F0 zQu3Jtx1bG(z%Ki!Al;6d4KM}7Ky<-2|N-;g0>!jpYP`W41-%Cln*-g64XELTB5&~!N zKU>B@1t=v``!D3_LeL5zANps=m8O7LKmj5h+YkQ-4^hZ;B@_)EvK^b6eq~d^Z^+U1 zqNJcFfZiO}!8bQ@{dAG)#kKH_nwh=!Zc7?X1xYP#gKJm}84c|Ppsfh7W}%mrKA^W> zqbx(nXE;K?bYadVTM=hIoga0|oS~a0hwFhZ7D#9-`oG#{;{i5;Sip!ne$~r`Q0nD__s|I`_BRkN@qz-X3|&+uF$<--4ZQ&wb|8ZTrqE?c2Ze zyW2ndr5D>5yzi~;sdv7+z4+pb?cVLXYInJvDv+h!dV135qTRNo@ZK^bL4y^>y04Vu>S-iHv&y z?5t{{!{Yh&?tb+7GJ(V~4V6jX4+svb@NA`uKZksL3s|0d`e9+`Ub(c#WXXI8OdQAmw zRvop;>eBK2GcN|cb-4;VuPp}NI6f>Bco2(V(?cR@j~ANSg(L^f~nf> znCYK7u`N)R@l+@nNt~SvqM;A9fi4Z||J314Owh_k5+UM-wB=&*jW~`4sj8wYc#U`y z=r@#J!#={_L^bo1eNN#sah}GG7HrZpsrx zOrpLk6qNNZ(U(7Fyr*0}c;at{>a^rToxl^6@uV(At#XKVny&SSeCB7SkFkq6u)uxI zMWt)X37O?9U>+Xriw|8dufzt4`DLDbUsSAdrERQTY73ViZ-;kYZ9nj*|BLp+f92qYG_+_jY^wJ@0D&pa1pmxBa^>x1C$}g!4%8+^x30zG?c8)X3J=Ywe}$w`|Y9 z?RR`_+gAI;Fg)R8Hj*ArgYyFHi_CGqqC{30R|KXjzXqftrhqi&e?p_5tWhCa#BTI4 z$7R_7{*8h@8aT#=0n$SUk|T;Uq+Z0YK}rNu*pSY^r5|r_u>V)G7w}bzdNr22d4W(m!P%l&v+Yx$M8-CLU!2b=e=JQx=?IgQ}}CJh?_+w>)HL zJS_(Px4iis?N5IHA8-HT-~AbBZLz)co$qXKdE&`-d1GDF2)}Ug)`vsIj6B`9D;wHB z+HK1k-+1#$Y?MiUL^E;p@xfu++uw0pNS_7&rDc_|cCg=&G41miCguc$%8TO9@(^FK zW`sQP0mRoDT~6W+l6wa{4R<0NJ#2UHZfoq>_FG7|@9nzX-JN}JRG@v{w#gXe&wz@8 ziDG%$uABV+B#Xvlh6U_`v=jQF{fH9n*YI>$v*rzC>D>N1J4N%*H67G zjw$+WKgHhExGEsNKt@Gm_T_2bX)F0D@=`z3S8#Y;h40gAyk{fe;o*VPcmvk)Nj%-o`XPK> zyLPQ@UD|Bdu3T#mJ@jCE@WE^C(v_|Dz?I8wb8AEGowT*}xaR;J-1O!i32`wv`7{Ce zmX>%s7PQ5NfYUSDly~QTkLcol8hwZLUufipIrhxD*rGov8MNER?Ez#5$W(ra`73Qx zmivF4GgkW=$*LZ90S;*5zTL`d$p85GKOCkAERYjGvsX28Y(;w|W9U6N0( z^Cl$T67bUXSK8g%ciKxYztVPiihpH%0U!sA0q#@(J`N;ko26kI z!YBodHrY`JC_D+ZW6bHasiWJc6rWT4jgQJ= zK&bjG98d?|BBAp;=%^s;q%#_>DPIE&on7e)41Kd(JaNrGCO_^30(S_r8v%3C$0ry9 z&j6;kWTRuhHb4V3t1%p44s?AXL-r9Jcfc9fQNQrSjlXfbNoUHXc2&8q`7K~ji99-N zzkvXqD%N745E`$7uv_PCRQnS-OYoQD(EQAHK)FNSEFk{; zjY184yo#C06+H3He$IK79G`SxmlL>-@>PPonworG6NgCA7xaH(@RQl{l;yGKvqv;nuHWbz3p;u<2s zrTUU*^rh$nmFJ_MxQcvHEE5)S0{T}h7U0wapJ`E=N*k92!2}FE0?XV_iN!TXx-#XV zt4nD3x*NBeZN?m|@B5%|#xH(Un^&@}wI(AG4 zKLBqKFioA8C@=0L+tAI}ZtBL3@e%LvM4R;+`Qe@dN3<_JBkPnG0VKRo27c-$ZA|W> zy`l{$9azcc4_cI_hN7*IK}-l?(8%+evmk;teJ%QZtzX$OWjz!ny#~^SH)H~D;g81P zcC)=c<{GoWwls?)+93_T!{_AAU+7bSD`7?eOL^qM#=Lm|3}7Q8^r?ni2Ij#$e}g5uk*w8_m=z#H1QmJCc6={i5QVs7Q*C>9Z?+%(uYaVy>%H&y1^~w87hZU-9o)U$zWF=8t^L9;zSO?-qffUl`1mK< zjhlDc&b{4s>(-riZ|AP;Teuj%X-{n+CAv&a;`E0UvZYEE@*>YR<^G5&$iyC_Z&~J= zQ0(m=w#mVMeD_Knj>SUzb_M<2<^_EF-F~AKj$=Z@PT~FIqqZd3 z@(F-BO-v8fc6`YhhtZz+}ohkRdcxy{3r`uW{Si*;RcdR>vLF zJCK@CmNc7H{1sF@P3e?#%KF{!dRzPO=e@K2^&kI99;++Wxk_@UyG%Kx6&hj`)$i2$@E0>48_QQCyQG86^v`Ji!Dzd{fN~>%#I@zER3^&9|=}LE&!EW z=ZWz|2m0Z)wbeY0(DthUWdTgr=9!#UPSI;jVqu4_?kp6mydhM!@x z1mzN-6YwH&Lg0%s-dNi8u3xvK!Vz=_fMa|O%v^W-srF`%0>zXrS9WK>u7jT`fSxSp ze4X00B!06#erpTqCLzNpFt^T+#@8^B}KsGTCob?4&^ zLM+@-q~;+Zn2LrXfv%_Imu0KmR6Y5L zY%1E7?vzYFV1dnVYH)gh&#QVIbe;Lw4SBxKLcCGH1-Zr#S1tIYoxBv_>-YTRIp3nRm!G;W{_R(LKAfn&@bQKL$BXKp z+(Yq(L&gWj1(jtuX2EsgY(;g02lG46raW{WV+0Gp)~V$bN7e~yy}|Y%jKr=y^uRdg zYMBmj?zWZWxjy*05U!AM9HW0nzw17M6cs^EA=4N)OFt}>Myw&amUyM;N!#7t@m!so zTw7a@w8!86w$@f2(Y0hIod4_>{@3g9@<)1ToK-tcI)n*`t)-9onQXd?b%yL8uz)03~@mZwDZ@euwoeGHURX7 zY<{bOrri{>7P||6B2L#QtSf~7De{mXudE%@7Ejry?gjWH>qbB7q72Jl*Z4!y@n6QJ zvVM!n7-V;@7cIKn6fpjUQTx)m#?LCK!KTTB7Q0YD@v%SUB^#s;a>ri*%%QMV)2E-; zv&4Mx67el@1@oT9)9}ZVhuHU~PW7j^%1%D{$v@ft!Vms%yY=ebwz9O`E?wPdANt_u zw@-Zdv*|KmA z5Hl?+26mqiKP>hi9ml2tZvNYsK(G2Jn+D)5<_IBYK3xC}e8zxJ6C6+CA(p*e#qEmc zSev4)J9qAQW5DkIe%syM_2M}6@$?{lHgpp*|s-+-()TR+E!;ku4hzVH;G z_yUfFH+k)J2e5A3Wv&W+JXic#JQ<1};iIn%CNPb!pqdFBgZ<0`Snb1-FYDRoaqV$< zWN)=$a2DzT5Z~CAp4qU#k+x&AN#KBv;%kcUPF~<=`~(=$UB2Ss8Mzm}exIQ0BadVK zK@awo?E@Y^X`4=m-C)P2SNxbJ{Yjf2%W#$lrNp7ymHrG}Z=vUsU)6V;!jJp_`eU3d zc21GsnCqw;?b3Y7b_{+XRE9G6ia5ptM*|-nx|}jE?b$dk;2RjzSA25w*&&O01xNA2 z{0Jyq#D~OoY+zH&#s&;sWB;TB(N>%f62QNu|JPNQ|47c{)tjXvK8iA9{SEq18`gn- z_EB*pUM?P7CLd@61=oR>^3cV(4#MMj%@-LjR5_?6uRyd>Hu3`EJksJ*Ld4jrCmah4 z^-6!}Al^Fly%qY05;Q&z@`=rhYx*h8Isk-6r*1=SJ9ZFW{dXE`Ezy3;QMfHr-ozlX z=^oA>>4q4`-G#1&tm!BGB!?5p5PEq_kdMh1Q$d7C8hRuq5hWUn1Lu-r%)_Joc5ryN zakHLr9TD*9LdJD~hcfugz{=Wcd*Ip?zoqek2QK@Q0X*4$X=77!?nOVq;SC0+KjJwy z8nQ0Ojdj+1SkL2L5uYMulj8AYU-80DJ35LR`9Y^>hf38qjl?UQi+A%TIk0IE?Q%^# z0T1X+bochFZTs%sHaR?&p3(W7%1;#6%{xY7&2(*L zxh<}(wY8=7wz08szYPEYhRuKyOboy^A{ZY5&Vj^{26PVO2&Ul;*2hQON* z1MM-0up`C`tO_iQk35{OXeQmokz2__^!psUqKvY-7@(!f_4Fvi(GJYx)kro8oo>|+ zdZ`DJ)Qf=h+Obx&k{@-4a$FaVYR3x$y5?1aKgnExnxO&oOmXbP%A#+W>-p>eN$>Onh+C-0Yt)xxx-qj|mu0 zO~N}jws&`Roz9%sv*CnJa745r2e*58>Nf_!0|N=*T~A84{1&*$pf+&e%C1i|Ko4{= zQA~0Lu`r1<JQQXw*^oeM3b+8aEp#SMAxhMOWb_J=!K;>3Ia4PhTYfr`;JaZBm+bV+cJ0 z-3f@u*Hs(71145$Ht zhe49^vvL7Hd4LxX(Yvk*hJ!xpGf3mhbY44#2>`PU_@F894>BYV{~LN!j{8jTSirgY zWAT3^zu*xleFPBbq4Q)r+m~K--e0dOuzZ#1II(<(@xwpzT{LIvscN5md^>zZSs?gj zK@>bn7ooN`^|Mr~fi^Hux7m3k@jMj&u`z{H%uC26Ehme<_=dOUDPhzFEVf=&LXNX8RBS!~dXt*;jmZJK29#e)w3y z<+E+~<}2;nzU#Z%Kl!zr?GvB(Wc&C>zfb|^OI~E-=KR5A&w+8jk*>fSK8Tu;C&4Sg zf}lSGpb48OFg^HsZeRe=7X1tvBri*gZFld$3yPZxT1jU!4dwV*KG?J&?CxwK+V6e} zTE-wdp>FAdC+P^r{Y_DglWYKBA(RONuj8UGLwA%#&rHhHhs280B-=?YE;5EfQ*8i1 zRj8Wp%QI$f@9qj&=)(YVIyEpurV3%{hUu&<-=X4ZN>h?{^;0wNf6u$yN8kTc`;ou# zx0G|#_FwqtvK5WTvKzm7lvlJWM(_r{g+)zx7QLIAo8d>t(yP$HJ3KLN9eY>tQd?F% zCP(I(_GF869h)^aX&MK;c=IWG&b|yj&KZy8pj0MqvhZf0w#39slc9}`rauH5_N9Eo z8G7uoR1e$ns4&kPgjnd$IIPB3IaKrvUTndWty9^c4*U0RwOiL;Y+DJ#*ddr*O<33+{*KK5d)l5rBVu z;~#L}t#Xo+PSH>Jn~JJwnDMEsQyr7_AXH|1U^&=mJDqPEwNwupNg~2 z0dY^UOzn~W0`g=^K1ru&;*A6W;lq71((;P(vu(=qm2WBqq6uBn7nI{_WPx6~<%u7I zGT_xW=#jtj>jqam5k@5{b8XU;FT0Tpy8Jx$66J(^slieVW58*S`GBlTqT@WrciAd= z#0}t@KV5O#?$b)2o7b48^!^43$C0#2zraq2(a7N(ytK>#o`H~4*QdaA>bldOv_-yX z%5*Q#^K)Su5w4Ww7@~frnQngSgR+^^(>)E5+)x!?n4-pKp5R|x-B2vE)E<2FamP>_ z*B)p76hJqh(F zw^@t#Y>wd^cD!NW@X(tF=(OJ0Y};*$JvD#f28VbDNHe|Sm9$tWr=KE|;4$<;Kj96N z(g!|EX~gi{^xxjwZTowNelx)Cz1_C6zvEZxLIZqYYeDk@F6$mJ)R$cUq^mG z&_g*O&XGOxc6sp*SbzFC-+EQ)s)i1&WB5wls~*Pzw~9`Z53Z(TZmfJWhV z9UX07NpF@7-xc6=>=fHpIcMp5OhefyX~Elox3|xcflBFIy06uZb_~Sy1egY27VQ>* zelzi)@_UR@?FjlCwJOhJE%kK-a@_NvU_d5g$Z~xg6wTnDjYliK>V9JqHG92?moysq zC@K3mK-9CZPk(O6`hiROuR;6V{6V0yH3#l?kj2L^zTpl`>2D@aDe*V^kz*s#7BZk- zHc)hm#$nBhqLjlIG7d_1Y44>!s#W{sb6&@3D4f&nbvd1re?Z}*J)45*=S209QFby` z|9~mGB*7h#D}I7%7e1U$U(BQ|0t647W#+iPGS1k*K~|1Ap?CrIFCcG>Z@`ll#G56O z_E}k(-z-m-eZH^8FkNSgzKfvCHS4AR9A}Q&kG3xW;f=h&Uw#{Y0OsEzvudY~SufiQ z`yX`dxKjBf2OZ&$Jf;dt|24KFRMv$32?5#@DoT!R-Ze#UsRW=Mz-5luQNWFQ*6PtKT)Hx zw8kOs*VnmkTraNqydD6ej|TpMp0VBgW`3r*98aq293Rg$8+biVrTmexZ(dW^BuhS0 z0n#@R$lM!e0a-m4Y+byzbn_;<;z#w=BDQ4!p|%I8PjqcY%| z13Kg$4F*R2&*OVsjzlMku#?Ha4V>yR! z77|Yd0H169UT&7I>2w4ZP)7mA>u7}bQeSQ>A@ZtR@}K#$asYpqMFvmEad9cmOMXD$i-ia~ z2kF-f5lSy7qJ%a%lEczOrgouI-ZWW{I*4Z$@{f)Or$!#H7hz#PoC2>b&Zpn<<_$Q! zaGvGE+2h!RS2VN0cfu1+;&FLp(G#l5_he1y?5y(V3(z!jK8|n5@iqeRaWj8=``D8; zSUQzG;-vWo;l9hogp5rFhoaw>scU4%0txMK?VAsxmjHk=#-$(pkj{>7>zH~HO)Ur* zqj;Tfn=%*Gs~ZUP2=Jw>!6{6-E;|T+#kFO%z?&5$H>0J^$VZmOqw~UNK%Tk=tV?d( zrs}JV@#$AQ_&WO|Q2iYl93MqH48lWx(kh^hxDLMSNbtj1&41j_4QkWNhq3=2Cg}$e2}bqTZ^2L|>#A zy+zB=4)`jVf%NcElC{!9pCdYiHqJv%m44cGqy8!V7QCq^DGA?DZM%%yt^Sa_jXo7p z=>X^Wz@FCS9ra5Ov2jf>LoZPE{Y$dw>s5xy#^U0t>LyRpoAbJ@#pEJjgg^5q^1+jn zyHSR~-oA^)H2IG7Lsg$+(Lo%*Cq2$4yMEn0o3noXm;bl+z5mW1Y2Wb8-`NiDz9`!` zZZE(5Vte`7XWMsv$M0{y`r>YT-{aTXqffl4z3|)%?ciY2Hn%Rfwat2UIy>3rrQ}BK z*XS4G$1;%ULboE_PfTn4(dCf>aux|gFQuoT6JP^CY$`kS;v_r}uTNC3{MxWFNUhtB zbwn>Ce>n^G>uMi-^K&stU|c#DUKTD_)Gp!++E`LJaXXBCuts@s2fY!9^Q&vae=9)RvZ49hBDuNCjb8$C(A75y>HisicHNY={NufGCM=LiOi8{Z#w1FaB8jvp@7# zWS+Zi|HV(W$^Jb*sf%6?r7L6x?d-DVRk6GYf!772Gh#FNF=&zmUjR>UEzT`l#&Ps9i3hhGuYNle4N^#3T3Kz&>lG@Wk1lf|r2ZI8(r#ZHGC#+G0SZ4j1km9DD(MiN7enDGdId4C4KP(V%Nf58 zSv1@td5Mp^0h(9UO+x2`XBIbjn2C3L$hIohfX|ES6;%CEY;rNkf{bFIKWHXVI-I}9 zg>)Ts^fv%xvw=NqxZ@DOoy+h9hSHgfT)O&TTiUv0ns|$qpB@;s6_AJxO3Mw1{RtlU zG7N293@Ps^s5%$qb-y$8zls+O-L5hYmh9_BMD<hvRzt zJN&1xBQMIRefwPK?YO4v1`LaDD0}$}#p=EJ=HH9_>|ssitT@K%A4N@?fM-$Vqq}?8QyXHI5aJgNr)V))V+wsG~U2JMGkZ|Bu#+kf(>zQ29L*MD_;=M#^q z-`;9_iU}-B#S;EhpvrA*Y-nyC>jGc*jo;S(zn^-=8%^WpHp^lA>EiVAS@AE#?{@j20{p~;hGoNgCZ{2N|E?rU?wM{w2 zdOV!*$Rm#^j(oa(3uHY1_B;ySMMn0DEjK7Cnc%Jb^8NOWxon{-sTL;YeG6@#@;TK>m^^*Q~!l z2Re+hK_5pTc&R$*A#Fm(Q|W@uI+flx1L!;?B3-8q1x}7!gFA2z>(?dstchO*SJta` zfgxRGtC}ram+3dlUaPGAa$bYUYsKU_?oM1&ko84BW!dOua3h?kZjwZGlcO~El@XYK zXv+RK_+{#h%F+hDH{;F`V^a|F<0OxLJjwF+-9Qk~$2ZlVZokf_>p`>2^>I7o(((9{ zYv-Zz$NDzS)sD`MJHRvO#Cc zyW%iGr0)g3g2D@ZlI@UJM>o>J{UT*$0H+6ltjkew%8hO*p!65~mTwo$BDzEk^m3xd zeYfjXKwlZ<1Lmy|D8Kt4@^y`mwh5hS&OPd*eghS#QjWw&rYYNWJ0)YcB?}tW@A`u# z`mcBiQlrlOd?!D~2xU{s=|J@x@I?8nKbqqMz5jXsp!%tF*3STY!<7A)2I!3QqABA7 z5gqB^9odv=0AI+Z)=5KW`l&3E4*jYAyyR=rQn#`<(wSE?Hr56>^1LBa@?$M4{IS}q z`yrGu&y*7{2ukvY7xU{MbR~WA2Og_b1=`BKJjMzi;~~7&HGKG*PcCtdH%Wbaos3vC z)eN2Z80>c#52M|MTEAD`3F}M38Dl(cVs|I;fD<;9<0I=in#Kb_|AtV~?R-`B;!(=nL55oKxdSX5K-`cWAov&t*l;EQ#$Qg4B+iS@ zf(Ty|;3FMdLHe7_3ouR%BDMq9mkg3Oci?Ozc+PX0HZJ~zqca|P0P;u+dO4?TGz<Y)PPw#;1BtV`7N&^fh&<7qsF`fWm3VsmWN3bs>)m^l%#7(D0(Z@=o{` ziUtNA;wb>n>2bEBHu=_x>!icsiF-epp=&k(*g1+mZqkF3vb-V?TD_2<@}3ZSa+?bl za)j^$?YjqZh01SW+7f!bx4xjJ=Z2&re{cjguEv zC12@267(00X#*;NvD|g6{?C-$*O^8;oL4iJo68En&bV6-?0$=C1>V-b=N3$>`o07BWuYkOK5Ilr@ODCklt4^{^ z@Jjo%Lz~>lgoeONe~UnpJe+9&b-J$;aE*JBbo56*$%6idDVN_ds7|yQ0Vv@1r9X>8 zM1t1&Tyr5@MW?8b{sB+QcU}KNP6I#j1IEYTh4VNfKd0vn*rKa`0Q3)OSS{eJL3J`({JSg_5|1vH@-cHZW9>6PwpsjBZl`~JZ}JKjHN zYpWa1$7g#ya7*x2b}r9l5^}7_arq*!+*Am<*Z@FT#xC9fuqcz`HCeRHxBQ`-4FDm( z&;lPKpfIF`V-16FLTUyKS{i|*2QJygXeAp9U+_LjH z%4XPgB7jCVuDIVrk1C((cX+t(vI{4N?YU1s(^eMG+UCZV<-m8-ut#|7ptQ0NFJ6*1 z8utywZ;SE6Wv%dlv0u3uQhyk0_wDxq%5;6}$c^s~b)Iea@uQCSgTi;ea^#Qm@*MnC z6>zfJrq2So0ifj5c{|4^cyx_iZ4Wx93iSo zeCsOp+*OGm)_Ld=9N-OHu*oL?(Dm}lTD$b%Lv3zlUA7aOS3xnn8d7cfK>sWd&iZ7< zh`xY2BFf@<48`A2?n3Bwv(@e|;a6$Zp7a%*MPDM`Al7%^HT^)8h{YeJu^~tG@7}xF zwr}6{A`2S;>@UQdV>9=kXtVIx{|YbV0^dL*Mfzoe)B3zUj`%m%>+|=_pOGI$I{71@ zF7ZvodzGky7>5-#4Ke?tV97n97OY3@B3Ha!LGd@pQEA}OwKo7*M+4sJ=Ce%}`baN7 zBp`a3=vklJ^j57e9v4ff`TnuIB;K4q`clLvjYseF*B@mOTeAjbY#B#2a=cjF7<1{ki zX{m#zA+9auBJtwtN@H*S=H&<4qT&zUY^2$<&V}soxb5%VYumS8(b#wB#ZiV=k)(L; zw5_deC|;SgW#Rsg-~Wf(Kln$#MvIcGC>QUZSL?j!?lv1D5|qAxfwzBQcnuNbPFO&# zheF`RvNOnj73+iwL#NgQak9#EvIV3&mJm~(yGXCKF@(6s>jtX8qI2AE4_{||Wg`?Q zNZX}c_`Eo`xnO6s&w3aHi!RDAzpOc~Z*&kF6Z0z`rEbv(FSJcv;$s$-%fAwD!!zKG zNoo(fTU%Rm{QmhL{!shRe&A2GKmA{PvK=2zypcpM-Ek4P{C(M7#jKLfwFfS@4}9PQ z?cGnky*>KyL!FO_;&$5O+VP=i;l_IKTCp+w5@RCEjI~t;T5ZEBzylMkRQYpxf|4uv zmn3Jz?>DaLNIIVZ@TUz_9{Q^Pn|JC9uhi7|R3&sAim|=jgSNMQ&~|q2d1C+z|GYYP z?_j?j=|@{9JRzs~02?G&v}b*)XoMGl_H9G>fRmW_;fNwVjx|PdUdPB+QOa9y87Jr( zzZ`Wa7v%wD2Sk~$XWp(|lrk zt#ysirEthsm>`Tb6>I6tI9+N^x zj3a>i*cf_!;WFMDPfD;+zh9pN_zHeveCVlha-VwYo!MTrANtmHZUCt9B+76tKc-V; z=e4G!+kZo_AvYyV%ORl4b0k|}@Jr|EjIzm4ms3Hei%)QPek8ooex?J$J)(6Q3h(sz zUGf;@pW`>3_d+P%)6Ru2NFLQkhBG8!)|B&`B#?E{D0-<`c1JQg==#9stdpvv_C~&a zuI;%D03OEz27EZzfde_&&-Z2nr~C0p_hA#siwc*%s8@O6 zU#MQu5s>*K8py-{MY$ON_yxWYEb=r+KI9=Ye#T?FNP7HcOr6i?`n~FlTrpPmseJ25 zICUT|NK4(IBj`X!I=UUu!993Xx9DVCtLs`%@J7-qCx2@DP`>TiG?OXdP1qv6#=X$@ z0CV%J`U@{H&f=ThPCBNdL1)qm@(TW@bW488-=culpw|yaB(Kmk*qf5{TbO49d|p$Z za>_X6{*u}N5BHn^?6V&DvfmZ&pu@&BgU)t>F7vOSH*ucwhDz!{Kkd?l*6-16Gi=K5+|Tv%1vun|n3*o1M^n--MD z_|IF4(A$Y(q=;u2(`ww-kBye%CB|QL!KO@cn47z$Y113`bFedbB92x)7@{3jm?UmE z<)KtRK%jhoYE0*>g_TXSQG+7@3iL!g%BBQ-1AN@PaoZaJUU}t}_WbkDx0heO-uCzR z+TQ+7J2~EOYwMTT0PsJ00{{eu(IH%6?7%fpX()zsew8Ocd4?crD(^L6WQc~(wtms| zTIDGNZzG`N+Stbn2;eAub$mUjc3-c2mCIwv%d#9nIVjHptIimz{A{}yl}(UwQqGH_ zl=rm^vkXqgMg76~3c5a3L&+Ew23I=-`wp==br>bhYl6>_YGs&XU_b{)Eilnupqk-7 z9pE{A3EaUeFi8676b02T$NFAx{enj_>2zkuY4b};mprO&m8*IHNp%jmjM~Qu(9x%K z=ycW11&`jY>^gAfr9szoJVDOn2-!ks&}}!63*G>-i=aW|s!?R;9Z2f^2kd0_VqECd zH&Wz?$X~)asZ5_ZP4?xV_-c|!*%veS3 zX$JDV5?@q{sqibjT95j4cIvS}6m{VR4XoSQ2B0%#T2~=w&dslAqJH>D^vdg0MDOWc z=M#8@?o#Ko{aO(1XP{x(GJs|On<-COx66?PDcAKq3Q#_V6+b~N@PN-x<*Hs}m>`vn zj#2{`?E@3>Maygki1eVN`gx?i76cFTwQ=pSgM4zVs^b{S?TR>X%C)IF!ZnHxoO9*Ajvhh-pJkzXwGal|#FRC`S~y$k`mZ#=HliQc(LvCse3e~X zSe8g?13<_cc}5x92%3CN+gZ036FpfuThUoudn}`zW1^vM@RQ?;hzBpChX3W2+@RIv z7=EfIGC|iMSCb^4xX3Qh-Mjui{qf;;d-j=MYv1)<-`2kA+rLK?-jV-3XfHqiTszsl z*S_j^{~PU>eq+0R@#j6!-t^cL?d2Oc+v!B@sI9|;9XA~dcgcs^_a|M_*RTWGG>Zo| zm9fzaAMJox@{UbO__8IBDS+j;xZo#@_^mIm+m>+2i~$@=HU+_B%89TWIV3nhCp)3? z80VmivFS){qvMUW71IVSydIx%ICLLlO0=K$DqZ>7JV@5c4}93ai}wuxEHwHXx`1f~ zMmL^;f#m9G$zu)>U>bhS`fG^dJzD9!VM1n`( zj>%*s+f28m z2az}@+>^t7(Jy;rL(8+z`kUnsT)pHr0s5l(mi}te42~(FEp&@Q`wci6Q@NZ!41Nrp zoQGbD^Rd!nylIApWM?XK zHXr_bb#0?MBES$SG|Cc z?RhxUxM@gvVGok1&jBO?)L7AVmhum(18g5EH?7wJS3%^fCbrlYC^^RmGTw2`AJrY( z(iBvBP|K4=vDmJ$D`gX7DRd(juAQ3KY0t{QUZ4s8yr4LXSMV2jkvY!dud)h1fF0Jg zWh{PtgG0ZN`Sds)c1v0$&QCO6yS=cRIrTLvcARRR_C0?hZo+SZ1~*cHW68prRv_*{GC#TVQ9R(zs} zry$)llU_*)3o z&IO?GLM<|%k{rbYybu=@gl+>Wj^>-VltIQpFL5<`iSa{b?;{&^;L{r)!7EstCx(gs zON>$cT30C5{WIFPZuN7_%QbpMeE2|Oo6L_nhzjT!0Zsh8QNb`0K<4v`SK=~o6UP-U z^NY{041`1RGwrgugYoJ8ANWZ7kH7zqwg20X{7^gEJM{+4$-!A$(VSrW?k&f^@XF2l z{e34Y33Z|16UGO_nNy4r)Ua>qkK8%re!FJfM#LoB-`H)=)+wA$Ft|*Q^h41Hx-a z$|PQgr(?-vZ+oxZzJ1eA%kAv!==`=f1RN+# z8PkNp&_@+}`o@lMkZxLZUb>(R*Q}TMCX2xF3O~+OADp$ZI>r<7qMh50>*PM@H3~TY~YS^eH^Cm zf+x!W^anKRk~#iiCo~?M=vsxM9~GV)rvarKpOQgIKQ8{U2eV~N0YNIZs6*9FngTec z9(sD#r`-}BV1`H6scX`r{XNU3?5AYq8>6aA8f87#u@LzM6g@5zXPq8s4?Y7J50%kt z_qv?>L~T&delV^hXM{vhYZrj++0qZ*gN{K?CBJ~QU-W066lH)YpSF>$K=GK!W;Z0WkpJ7zB{e#^p<%G4G>t%^Sn-a05AX7j?S)3!R$6 zazi%2&^AC{A|?D@;PCimI}Tgn8a!U>q)jW92JmO0H}@MQxox#oG;tmHs6$KB?W1SP zc3Trr9{jNhtLg`Bp08Oq)#rmz(_pCaFW#Owq^o6UV*ca-E}`r$<;EE9+|-9@O9-A) z1_?g5ttwBaV_vH+c3KK%Vij9>g_mshVX)JgRwUq(z%S zOUh1FG~)O=_(Dc1W8a?qS9$zV(gChH38cE5X%4X%ERO~3Q3a5j9mk-@lC9gdzElXj zPOFD)YU3hhlnqa?jz1*@85c632z1kOL*qiAWH_*x(R~P2u!6MVHANt`9+Ti1D*~pJ|M1XpvgR6AoPbkEx z@z6eRaW8Ot!ppiepI)_`1$TG%6yv<=_~zx8uD55Od#2rb^#-3q8a4o!AVXPLA)dV! z1!-8}6f(aYs7VLv`t@MCqkZ=EXX(5MC2SdNt0N4q8)6@}qOw;g!h9Z=_Gk67c%Q!OjV z<2a8E0KBdYngDi3(+J=AKT{huv0}o4<8e`?1Ef_RG=Y0^CVF`NHuBUp_C|S^Q4{r) z=Y)x&&>#9Q*o{%96NLPQQE*lU;sn}U{Sy~um zEuBiQT-RVXwE;l&8K|J|v~4@z1D9mze93VfC>uRMA3(ppj`rLJbvd(2kqJKj30F6%V`rDlI|J19`FQ2Ss>*kd;^Xs zKR%~iTxXj45ZCyhx~{$h(9apas%{*C;IY~}?frjf?>v-Tly5yr7UgH;|Cv12!lf5R zeO7)*X(P1N+(2s3g?&y-EWkJ|i@1&y$Ut|T#Do^w&0^9wExR!d*>IIndE02$R~<J#`BPyDSL@#9zSI|y3JZ@8*nwBryp0w;8+PH$P;n4q0{!kK6|EV2^-|6vEA zH2kV?1-~k*3gl5Qcq2V_bn?nDjsRusO@nL-i(i%HI`tJ3JJOXVB2HP*MlPeae4hOI z+-=A8nS$mqZP+itul+Badr|8!nR|NPS)qxa`0ld1ou>ot(LGRAJ+wI=1>+QR~ z^E=wtf6F(wqupD=G2fnf<~Q5H_MP^XU;VZ1GuK=Dq7S^eJ^A<(?fDmY9qe*^TIQ6S z+R~ShqicLPt+Jsg^a1-pYkf-@e@>S9k=~#i*&#>E5c%-S4FE^`2j0cYxTS2j!IApt zq1p)u9m3x@c`UOkZ<(CN1^{p}j-DydUtd`jEpr|-Stu->!3kg<6JHdS4}H<*sRL5T6neTxc?5#$dnd&!KBCvd;Z@yz&M z6K3hhH%SL`@dRYhBpC4r1ie1J5Io;?fvOi^Hzd0I7+Us)-YK8*s`h$qU5{^ol>7|i zj$q|aw@aH9d!2{GXTAZ|Wo$}Y8q*i>+IRq0Wj-+Scj&0D>buJ|ZbwsrH#T^Ii{H4d z!mp$16AMktfIdX|Q8`~%+8CFbrM{E0Jn=F-LMLQ(e^XG`c@)uo%5(wn2BSC`k99!l zD%vd`K$Vn)-K72jmpP}u@Dna)y2Eb4f_#y7(MNrXQ9L5|0Mh-WP;VJ@5$B$s9mE`2 zd~)-bO?J%Hm*$svR!^gv=DKQw7?Ze#PyTIfURLaP)b8B8sj>6WH~FE9`Te}&6GkL9 z$n75~7Fu3y|Nj5!kGCKF$)C05od{>V0f0FKgoa%}vtgv_e<@JyTmUR<_=Yg_h@kQ_ z1;nQ{Z?ya+)4DM*#4E~CI_J1%ZvsI6*sb_>yehohv~~QC&qHtaatT|bq-@1*6 zaeZySOM5DlGNX(l>h1@E4i=F^?>rUk`Gv|El1}i3Zg~nU?3grU;G4<<@Tj!P$SxEE zLGA}W_~G_P|JWaHKlsBx(hiT$90#7Avgz}zt*$M#{r&y6BKh#jT-JH^c6QpmyW2u` z)*gKD!S>+~f4F@>F(!-ZtIKQtlw@suQx)SoJUYnF1B4t;WN*P!yahn)I*&I1EG(Hq zzhQwV?<{x4n&e|(mb3d)@-Ohi9pVi6FD&tIHhgIiq=wv43`xl$Y33vKRPk)g zA$6RvVFf>_ISqW7H+YQt$%@Wv9vx6IXuZL($`w@Eq5&A!H*U)gb&5u4AV2-tspv`5 zaZ=fL(KX;?3?JjKJ`i}ofo}rJ&sF`B0TB2ThsuxJ@TM^xiC6Gr^aAkv4VI*U#kes7@yZJp%>iQw(P;}xYOw;JgzVORr^ssXtGS)=LhZQ=m)6w9H))uTaI&d z?d(j(4&fDuHbS1of3z1+^hR3B-29Jqj}N09^_AxJS+0YAAleUHRle8fhP)kU$zOhx zd~k?Nij_PJtmCFnE_!>@)evU{bz#RCtPfeU+4dwgog;#{aV08Hzv->lE;%By}{ zc|f!uzR0YIzUXIB7GH`X(|@RETwH_4@)&RxjQz#9edwGjFTHckcx8OTTWwUiIHtXb z|A4{~c&m=(PkVX8yKn%eNkZ#mB5BxzuJK<{K6Kl4V!VMT@`m=x&l`l-=F`gze9AI( zo^rPm^-DW1|JmhL{N>mj_K*KT56Dq^iK;Hcx_q>wJPzO&_*@_=d|q!zjp&L~?2Y?`R9pbH?a?24m-jVyiuNjT7-+OD;WkeAv{dX%DKG&J}hAnYvU1Hh^CvGJ+{ z(3I)robmEbhKgU5@R+Nms9YK zS~(Ac@NxVLh<4ckfH9`|n@vw_M)J#&{yW$U9SdD#xn6$I&(M}qZGT)3X)ypqCm3MJ zkMiR@Ak);I`C)o|VY#S3;7+`$$EYLf+0ATuf--4TnX1c#hVv>vv?EvLq{F~0kCt66 zK#rpZxZ-^pkJ?6gsyCS&Ns2fE*`DY@wm5WlZ66*UYZ8Prib*1`I>Wg#+2^Up{aqIP z!)7L$$m}1oll$0C)VrW?U}|&!Xx}@aO%paEuLUW`If(%Lgum1qj((7nX%<3`v;p0m zTefU;6|J6RIq1f(1TEy_U#18K0YZR!7(|8))P(0oGu(3>y?nG>^k8z~kdgc#aQ3=Rurtn37Bro{A7&PGMFZB5- zn=s^PC*=o3|5iECjo5s%*>X`GC9qiK8_0sF3!bDip=3}{eZ&Kx>x7)f3#aanqiaL= zFLe}NkV$`I{MLnGkjsz;Apfy$27ZgjfN>q5+DN)YANGTP@MUjowq~PNwKJ`Qu0jR(M zq|&Hm{_zi_sjX;jWV6sCyxz#AKFl|ag1?Ca9Kbv}4gq(_nf8n?`yqY@+LoCAi9`oJ zNuwtixHQoR-}aNF>j+k%u^(Ya|5s6ETaNph<4@dc!jH3mtR|4QNLyZgTUB6o+p=i zMdhB#43ov$xyk}AzQxy)4LIGt_@F$S9?njW+wPqm+tS{_tL+QE;0xNH_!ECzMDY69 zh4$%B{d&9k;*0HTzwTSwb9Y+%=u?j?NPf8OZSS^)rImJYz@{=b0k9EEbR!kv@qI!%c;>}R)2H&-f_SYB0IREOv6x32;KJApRel<V}6I~aAQXUsTk$axIg0dTgfo@ep?Yn>K@&oUCcl&q$(08_P`2*i0T(JRQa z^|rFH+2&SOl^GLRY!3a5IjH4aMI92#fw3LbFPf$Ko4Xb05b}Bu_S+dW>v~?b6k2ZS{eNgkq^p z6hC9b_!Vr^_Byg#$6{(HnxYRy`T#-S8-v-pLp(AKg)hrPkNCmT#X=Q}I#)=^clZ<+!Bx&zSN0Gg9(rgRG|Y*6Sx;#{B}u|0vHdWqRRy zilEaK@x2$iV3z1pZ=cshWY~9Bt6?EkJ!|-x-BPj%HO1m<3LB}i~bV^;e61;l@S87Ez!6M!lH7Qal&ZQ{Rq>Yzx^Q=_7rS&d)wiQ0e8zfGPuoU&4

G6d*$U9yfI*Vd&m5bw=T_}g>mX$$lR9gAPjKan8U78(%kL|X%d z9~1Ppa@^^6Q?qU72NCvX{3_3B^vTq%+DZ`#(5u?+{+zg4wxB*3>l>kKe-qndaL^ep zINO9o=?@p{^$u`zPPQQ0MBi{tKJ?KhbMwmAi7Q4Wl2Tj-{CJgxKCfwI`=t3n9f`zo zmHC&Q29dfJE_umL^ zbN@(v%=g3^u zK9&15mVE*8i?b?kF#pJ&3<#5aeNMkYCgM-mE}uMvyrLb`nh+5jeuM(52jSFrd%~B_ zI>vU@b@}xBfcTqV2z%{v2W$Eqm!qEXCBIeI?OL!sZO8)#Swwo+*I=gsfnyBlF?b3a zDEBdV83ZwQNHSNMD{ugKX%5rVqF1}G30f5 z34C5QCmrG&Sp%S)`>lYovDqy7S3j^^gunc5ItTe4U3WPJznD0!&CSOfRKOFl0CYwD zii=#{b`fKBTuVOkMIq;b4sRScL(?)yBNTvJzl9TxfwCp}18^2!K+vCh5mcMDD`k1E zK|ioBBPt7?{Z^FHhuVvKBw>4+B zO7JXYRB=FNbcJ*49?ry0X=-U3;)S{`lkV;fEh= zSFT*~SkJl{8w?ri2`8|>2~Yp4EqH|dSa+2zs%@_Wsw}VepI;I`vS)wx3qK>9{MP8R^jhj~sqf%Z{yMcdT@^(E@JacQF@LxpZLYoOD$0Nk2{wy(y0lOOQJB zw}7~gfsfAbx^kQ~(bzxSGp@TgZ+XIhZ)d;Vx^+VnAh{X^INZQFJ6qA@Y~r0t@H&~q z#*m2|C(feuPy3}KI~JKa0V%JL2l(88-JT|v3vw>J1{ITAl&1`OascySA)IQDoA3+B zMQODtDAW$1IYI|*#b*KJxTvRcCn*=}M*N$`w8s-a(JAM6sP>g~%+5Q}&5ftvMeV5? z&K5X5TT{B)R$;d#zM+$ItBW$6iyqUjBj*kngpV5l99>5P8e?LPQPvN177k?KBVp2yuH4_lax}* z>wp6`PCA~dCrN`+47=TY1^3WrXUb+estSlcQJr;MkF@f$=2h457qU=26^)4&WeQ;l zmpUi`xhv0ojrNlk1)Ru>Jm{v~$O}InbSdAEZDfD#J|nMVObpwx4!Kl73V!$Vgx+@I zbNfjz?Uyc1OE@msCq;B_`PZ1BBuGc!$OXAeCcHk=6MEs7>J8-c^R71@Jyp`cDb zU-Kl5{5QcgK_Gmw0fB2(m(;{R6R7--lwS`kKWdu9)8XzxTUe3*mOuZ{M?T!X@B6;5 zZC<_P_Md;|Q|4gvb{!C~J3&^LCaI-mjwnGJw`Jn?%CfF9yC(<~Zz zf(ck%UhxFRZ@Celj8macY)MHT&$9nX-{WigPrLy@6-5$Mac_HNdCk0t%>dW6R^$2t zAQzbCtN=Tk0dO)Nssg9)fB(DN|M~m=?e?$#o^KMa+yJ1MU@bQQ!2Wn48$JqbJ86rS zliD+n(%Fy}Hf6rMUPI>T9-|H6VbbRsEO;{ETUu=^8?rO$75~U*tTYhv+3@QH{LglFP*f*ru-;CkncFmF%5;m-p zJkFNCpFK+duv_irad_f?7Veim3m4P-x!1ox{lgL5vPonzH@rd@82hJ+jk^9vvgXMl zmy|7GmzN%VxGi6PPzmy9*m9yd<0d)%h#cz6rX1tQzuA5$bIyK(zBdAXgOly2AnX_z z;us*$I}dT>i8;pNS(wH-OCKQm>68<4&fufo5cC_` z3F(K6Vsh9(#Fc#vS9wcbJcTK!9m_Ui?x>?Y=7Z?Qc?nJrx=K~v zjFI&Ka1>ad=rL`CK03q=0_9ctifQUs5Oxr>nK8viv4LJ~R#~?nYc<%ya_S$xJ5T8! z)FUhhp19;y|H$XJf64D`-}7(%zV?GZ{FkMFHaPPcs8iut^k!IY(z7;1-&~R1(9Y`W ziuVPwVsZDbVym04D*dRfZEm#pzW3?&!S{Wzz5VTPZ`Nsl=Xw1pH*mx7SpDGU&718r&pp>(eCheNx4qN$*!V17{5Hyy&=I_#S9pX6 zU(=H0O#8K#RkU+HH;MHcQk;%!t2c>B_VqRe+Y*rW6h2euN~B1>ofn;z#x-*saEd0%<9o=rKTi4tu4g9_5O^a$=s~A(^mSjqiZ;Wi z#nHGt#|KZ=KOJtMJ73`bRpm!*6_^5WDytsQsQ%@fzJicVra`~*JN}fVLG^8G66DzL zJFbCrKiR8dr8DEBzS_hcv7sl^!C5zuf$8?5{r=`Oo%A`@V5b9)~E4Jk%qs zYx@sfc|#HOnl1_pKfY4&gvBWY&KB|ZFI2ORq7V9$9m$K=t-)Z$NFKgYKMGMg-=E}E zUvqh-n?B$y-Z0V&8&tOFt83HDdC(7#UojYZG>@i9_*CEeFn{_{F7Y|Pt|5jp)KB`6 z7i9+dgD=jJ0mXczZf*deEni#y)!)b$RX9$-*GZotwaQQ)JT#*}oP!IV$ZNTdkDly@eZNpgQC$%2 zh@P~g&cDkA*tCfYay}5U)(^kr@)7^#kg2+qrIK)^c7Qzyt=kdq zDi?AfzKK3(+s22fEb$^h{zPLo<2@qd?R9HwYi(0;&E>0C+9QuV)E;^G;r7r&541~H zwiE+PKf+abJ#MkiYP%F}$7{kbx}gCZn-Kp{M@*m}?29p;^3dVW3khN@3wfaf?A-jO zKLfBi@@WShmplsCz)3zll?;eYi2qp=N0y6o3v2-RnV(nnkLedR?a+WAW&n&E0zgdJ zLAe)!i4#;uFUJA|b8$UO_yB?bb)hDN?vOyzlh;GLiLV@A4|oKe&azwb0KirCW+4ZF z0e=nfgR%k=U#ByVl*dwlQZUz)8z~%sl@zBS>Jva&)*Wx^uU`u=cG2JKdH|13(O-SW zfSm*T7)p;i%i|2xfzaFy;%G)yr%?awiS`Ih|!= zN`Lx+PvvWXuR6g8l{s=s#|pYDKg!GL*aowM>a>41QuzJqATOH6?i?mvetojZuDXHG zZAvHDaI~MG!URK1h7KWM-3Pkz4bZ11<- zgFQ|Bm)qLpp@SbbLtBb1JwfdY@Mla6 z@WCj9M%hsoZTqQ03hSJ2)9Ip3tUpcnPk1VE>hNvixH&hG6J@}%6FtJ2LTJbZ zUSSft7&N>COL;h3ZmzIVf;^a*ms9p@QN^RXwvu^*8Qy6VeIkFq$)IpyLi7_A4!zU{ z1t!o*Cc3XH@6yn>0;By_d;~Vp8l!XxYf$~k+ zFE~=B+K&lJ&|w{_o>IiF&V|#j4N@LAMq{kdpildnIC}>nH>`w!G|Nfg_SLrQ>zpw< zCRfo9rs*U-Q#3+sEB z4i=>|-oR1q8KPq+4CgF7#?u1DGkzRes88F`7Dth*HbQpb2swaDa$rX}xS$DHt8HHk zZ~3q;H`5tu@T1}~uB=~lsvYc$AN$s|a7Vu<&BdHAOEA6oblbWLb4{7} z9+R%+7l7~=^ImYGJ@+Nq12^!;uXQ4r0W2fq4;wl>Jn|-{D3jk@5)JiM4fAPjQaS9O zGE1_@h2?y`{6Q?B_(rwL^NM!*yVat0YO&3WJ>qv^vFsv!mihs5wk~Khck~wRJh4OX zg%oaV&#j0KfM|;SH za-1(OuL^N2IyvA|-Nj`c*@Y^7k=6jw@>>0sGUb=?e_qHuj{Ze6nD4Tb4PR_bGccTsvj_SAV07+n_Te z2aOMwUrs`hW7(5(T-H8Rwgmrt;s_f;|K=0r3s1e0nF%iA81_v$?zQvn#d+y_k?*Xn zU)JC!-K$^HKU_z4&8~5Own%L;E|XIeNnlo&w1aPiCgg=KPGo=R;%MhiyZ!Qus(<7s z|C!LC0|Z4r=Q;~pk{ubf;)$>!@!?5qwHf}e%N<@_Zic2QfB$-b%;ggpYdVkkLP1>@ zx{9-Vg6g{YEZ^a?Xl~opXG2`eu(={v1^P>%sl#eS`HsC;G|L(Xn;L1qpo| z6vSpebQF3XaB!*^M)hn13XUIm?D00YzNON-*0=_4my_DOI6|8qIF&|Xw(0%@o{TpJ zS_Yb46E2wlmcJUuFthq|GfMNDga)=cjBT4SV^$wQC*$F1JKo=GH(sQl$(EJOx5*(V z{AA*?thtap%kPcneIxn}BRv>q=vRR}o`;}4cpvL_7ASYx=c9?Y&tHfB9gy^d;}pc0 z9rM2#8{%B$rt6;vTy5OY<4PC!8}z4Yib9k*mic-5@L>_Vajo|J_w^urn$L_f_>WIt z2@?P8rjUr3MdgTp=AhU+Xu7sCf8afGhQ<3C==>yx@^h&dZYE$Zb>oINELpy0T4|#V zAY*y^?T&(?i#%dEz)u_stiN2)R|VT;Eucp%o#jKo9Vj1ThOn3a>GYR9Royr$L%)$t zX`T+uu|Ykd%6el8Wqp-Ey^c!&4Ze=J4{nL?AnRB$F>k&jCI%uti8ln)H8+c6ejRUo zVa)X>op?H0V>Yiu$39v7{J;PB|D%1+zyAZ?0Kf}#i6`Nm>5h5Q-w|bklKLBfVzMI$ zy$#2L!a0k3D8~(XKXvB$4sk%}GUis(6oww)BlTYdpg-(D^N-Rya>h>so-?a_%xh_z zYkr{zpzBeGN-IC_^YqKkWBx8dI);&4GyKYWy4E2!X{x^cgW8lXSu>&k^9^43B<>_W zR#IHM4fRf9F4ck;Y=E2GYa5%2bzA$2ulUOL4Zr7G+F$u=e?uf3#bP}DK)AVi&-@fz zwU|tM3p!#~%c6&;n_{?+9wih+tJ^Spl zZEttm{mOGY(G3rjqYe64@D?%)Iz+GKDq3Zp$48+fXfyrt$H<-eFS-v}!{3bQhDWZM zYeWwW**Ojuba{Zp5QbIw9G|HF_cuyrxn91HJ#lo*0w8=Jb!`Gi@fUTf9Q0JvhK!$a zQ0|}cqrUlrcCO2RVt>@{q^)kVc!)Y1H-pxR@@TFEBTLqBmol&2Ao z&q6vY%b+*wkJjI> zMQh)np8BUw@EthlV*s?Ohe*x2{#EkOG5TR%r#z?&4s>o@Zcu53N1T<6`4Qt#q|U=W zg^MxRc)+jpy#K&D2mZqAv6QuKM>^nj3exPKDHuGlZVw*H2HjDQeqrB0X{8T-KtIs{ z1U}g#8WtMIwG}s;|Bx5;%J0Bw=rnv!t=9lFS8tSMZmZFr(w1QG8&L5JzGYtHmAjR| z{fTu!>mbTRzaRSo+6lf`yIfj~&oS1|6fTNZY|Gs9K9^55z9?$GEmwF` zG0IwgizQe6Ql3l1z=|j2xfP#CZoYiLoI^*&2wYeCU<(nqRi0^7OW;^s zi5OnlQKw`>vE-5aG8{?w#;mm8?B?e;0N8KHH>Xo0VL01}9X8~MY)6!pTOkyl&? zuD-+ZzI@KWh%e)r<^mI-~<*lIr-IJ`{(U5pZT=E zQ@Xge)YexwT=qzl6`aq)(z2W#0jivy=##rplR42)`;=iq!h$}1n``7i-dtOCIcTUG z`Aia-pt0cM8^yv%9d?!DXkm#PXS^EaAl)jIzp87%xI_zkp%JdMH{-3o zZ29UhF2N^7M{LM2eaKsB^Vp8$8W1|bEH<_f^gUcgWLxsttB*TCCEi<)Xf6xntj`(fRbP;628e__pSW{NUNVg2UxG1&FcX z3i-p+#_Ec1RL(Cgw5^Rbe>@4B9hcS>gRE=Pw=6rwhT4&6DgQ1RG9KX+g%3K4PxmRMMY;Gn-5DVRKASN) zJn+WDgT38$=jE4VEC=n%<*l|X8%O@&v#xzE`VY~rAWvk!fXRA$vXpC|? z9j7CQHw>Wzv(WoQ#?8o}z$mi~6txFkFx`e=zmSPAH8p| z{^vwLc{<{YMiBChyf_cAen<7W_9Bc5ok)L+@=e#CcypUu-Z08DVqA`tQyGsxG?hBW zqfpTHgAY(&A;;8kZQ6bH`p_5fir?^6Tu;xbivCP1elA+T6O?TaFnwL|AM=9A(SE!6 z(o6Em>U(OFF>=xK8uel08F;~~j5%WivEc&xr{J9Z&C)juZ%lswA{zk0F2YyxoAv{5 zpnV%z_20su{7=1^5ZcbuXYd?{lhW#(7 zz!>E$s-8B2>c6x6kLI<8)W2WLYlaJZ=ucf}3VfqTi9;WteA1Tv&~t{8f48?fj^P~D z9pVO%8b4Qm;G1)A?lt6*ej=XuHf-r*AXO*uJLZ1|$Z>JSULmLOC!9|-wm}mMmc)Sv z#|QqVy62#(gsd3z!Y^rDo@Y`2RAVM@D2ff@`x9;mFSbipF1Nq;Gyik@ra$x_HHE9~ zNOe`qZN(EyTm?M?TqMK$fSC|<1T%8#Fe~4Jp`14W=qKW&m)zhkIY4X482QAt?1-c6 zxB5?%3H-Jl$|5U{iea#&dQ&2VlNM;{6GK0!HUe~%jLQ}|79fAxrTp-VJTKFSo0h-mg@kl2%#~e+@uN3acn5GD(daU{;h*@;*EKHV3mJoSwA?ga1kS{Vex%d7 z;YeP|qj>hb#j>Gd$eVLtE4{id*VJ@<;jOWw_gQ3lN*5w)? z%aq{`egYE-nxdfdWpvSAq#36fR@xq2Q?4Kws+&vh`2!!eh%^I zz!R!t!=z4JR(Y(as_p38q$5l00slan`W@|MoXhwX&Cgc_4$DqgP%dEGBy+(PrLElJP9HVA?fUjK-*$HpP>Af!`{*JCr z1e7r;;#xUAp7<>|;fp|FJ&FZ<0~=ZkN-i^Mg&-e@lRW-|TJm6;VA<mCBKr){OPfkSk8W2DMW6kqWG;H9mr?a{{`X^%eqNPFTJeiFC{?in{JJgWvEcv#Q7-frNBFU>^pj?s53pVi{_-oXXZe~%*GH}) zJGBMhlvO%@Pr3Pc(Z3hs(7FPkuw3+NN1`#}x#9TIKKT@Lp_*<8D1|xdYITXL=!0l0{@TSowT^$Nf;qG{6%8_4&3Q!I@ zIaP9qqHs|<1EP*U`R{xK;GpADOmwQOOuz=jH|VTm(o)wpq-?X>MThBh0z>Oud^k<~ zM}71@Dhqz$WMDDqn*4I$7eTcVc#^-)r&8=?2PY9QTW;tu0oX**g&fM>Lw}~x*HNbH zebg@5owmM!lQ6Y#Q{|{TV?W^4M2G;ga)+|nhB|9~Pnw_UDmqjT$H6P4a2$N!&yGz> zMlLI>YrdhgzPT>@h?{UrE30j8MFGQ-j(9yB@7bYLCmy;S(;n|5*LaG8a~+wCRX_lg ztQ?>e6n8qNO_>J*KX`KR!>>SPNTVIM#l$ZLL4G`$4^-OmWUn2=zmgAmTfp(6u91l* zn8!!$$Tx8&?YLe!cbea=mhO4Y;qDGQ&5_@cuh|jK?(?H#;q?=sv>!*FSY{Bgp2Q<> zCzvpS6Mx|5``3&!+!(8Xfgf~|4xpdV9$uB?fh$0akQQ2|cA0YEy% zYQbkd#1D3G>^4LTJ6^Gqyz$5lNZbiGv8;t|$C9OFa5{-^$2cH1UC3PRS(hrC15|uR ziT*l(^&$SnlKlwR&?(QN<3zMYAFtPd1NG{?k_$JsjX&FF0Cqm~NXOnmDOzX)P|=i+ zuS8c2aNz{uLG8;dy`jNHs%#+W8)UkCqdjnd*5iomPn75!w8EKfyB$HvHF!9(UTTa& zukeGJavpq<7j%HXc&%eV$e{S?ui6p>ZQ*C(-}QogT@*h+@j3!zI-l+A zPv;7#ZI3E!9hSY~C)Fl(+`ojKo9FZoHH-N4KYZeXe4TZtvc3tfKrQHtCp&^J`N&1S znTbs2+wvOUeP56@Ec+Fi*eN%GU51^O%PJ>6R(ZOS-K4@n9^VyTT3wNUT~^??+Loj@ z-#}klb0D~`|^EA*{Nh6A7NR{gN-Bw+d)TLri#g&4q# zY`WlhEWGRJQv&e@0DPqCM=TjXmlq!Q-Lw4W7jlJH#e;m`6Z-5D5VKUYvcdwY#+-ws zyQ>&|22BCQ7nB27`bFU0C!hfxqObun(CJ4$_*DDLfAP$cBS9CfrsDsf{^ec;)x2-4;dr* zw8Qe!dRyAqX!C2A9XFg1FG_dw_!$VGec{!A^;y~xDg>wT%yI;v*Zv^%X*;1krP21$ z{+@iog2wc_?dA*5iMGSGzQWrrE?Ix*$-d@T@ql#8lWmK0t3nH(;1x)Jq34wPjQ>II zq1W&mJ?`x=J{MH|3jw=|NnnqiDxU1u5bOryGV!y4C{D6`!MXNct9?P$@AVQd_Av#i zgico;XXkG{zhL%y2L46se^&G(yMd3;8~FK+<%EuPowV4e=o&RSPxVjL)ymixH)`-TT)RINF5xk5L6{iy%YJokyMhkuTi|joB*?T``>OFjah5-v zm21U4*mm$6_`D$1A>=TW8AG)n02Pvg`nY1}gM)VW)tCG!02UT_s)CU;`U`E+PQ@PB z{D8{~Sim{@>$;9vuG>dlC;tV#UcrpKMgUFn_*4864+gE(uQPV&bmTGpL&c8+Ot()Q z5_m2E$-zF3(<+m8%eqC`gAbkWY)_fg@r@Zf{H)LU7^+_3cbh6B`Vm#>pHIgHRC%86 z8N)39aXdeNpUxMeJQmE-SM654VSNDbxGE?)Rrz>;0si9&_737VWXU{;>lyh9m{cX>Me?ppGh|xhk7yp?7UHHZpGS>?j;OZmSv#83Q>d-A}e@p6r{d zYLoApv#?3LzsKhT)i*C)x!Qi=AO55E)!+W_TGMDpEklx|1;ygu=VM4+y4=9ug%IqG z5R}!`akfC=u4CDcN)k7uo-B(9Y^ zt+>ST(30wPn;W3pYXb8u+XLrGi|LXFs=Cz{a3Y-6uO3>T;)G6Q12Bh+%{ZYm{2#deW=j1#WF1G>m3|66*uN60Jhk;=Pw^^%KjaPVmf1AD!9RF~ z-@YK*Fim(Z9aDdU{6l}SMg$-SaAsZUZ~D68MB3vRa^MEELzs-`P@@6Au7f#--r9IT z7T1MW;PkZVcx$jpO-T7x{E{@(*n&@t{uI}J^JTULKvQ2YHnD=@UwEQ^rVFC~WxOXm zv`PP?|2v86)DN_qeO)_F%XFpWQM{;LzyFjlk3Feon7#1ocgE-M3~>#Q)bV(VzjE_o zui-m5r~g53wMQKt@E^VknuquhIyojSZbK#MF9NTJQU;shnk@Wvu3NEsX&h{NQTyrz zN<@a>XEjs)*aDCBRnj<~di^r5AxvPE=?}d0Pydo+UZ6X4!dUDF63J9Eq}Kf`uVZr^ z2SNGh;2YU7HqE1N_XWrncl2%aPWkRvbfqNABijZ{|Ge5$`QEn@f7m7Z5I(iT7#wy& zT9jcO_vkS0FTfh*98*iKkruXTJ8_+%?cfm|Nq#CHb%_%~UR)bgrrJ{h{i(8{$&zo) zV`@Vo8wTKydl+$FC)R%)Q|X6)m?&1@^Q)Kv@yqJ^O1paXa(m?AhuT}-_U87~yWY{R zDSlpAS#rFuBf^^fLC!3xZ%V1Ow>LPBe4!_*@_Nk)EvDQ&)TPXSAFjL34*6M zcehBIyBtz4T=OHAnL9mwBcA}MV+ye=IFBlC!%s&uluM{`_k++ z1L^<{AkOie?et^eh5w=cfJ<`BPBI1Pvg-n>B4C-a3m@;Nzk&@hY|j zJ_HRUeKi3vcpwOVEK?CkeebGLyRrt;QoP0!xPwCq-qX(XgHC?Pobi?XxQ35GJ_N>; zp$=t22GKvj9Fuq!EOf4kFay@YS=?YIP$`YMUg=L0EpW19)p&UVnfm&PLDh>I`(&V zawvNb+yLLi1AHw~kZaU0z9^$>={kZ1m9rfPC-f_zcKayP5b`hl@hJlIB)=g#cyc+) z#~|G8q{Fx_`{7kqZLwHD4}#xryGrvn;8ngBT)}B?Eqa9qVRY6p-aFvA^ET%W}`&N=uP>&!33ZP^%x(WoplAs-Le;W$1paa3X_vP z+aUetc(PAsw&m8K!2Jh03&$A?wyMuO;vKHTo!-4?>! zV5I;aIr!U}B7N*X(4px`aQ|XlfHBKh+3qv)ODtq??YJODCy!t9du+^FT3qqs8*Jcr zs2LFboVG$nadg{)5W_%2oe$;1HzJ!OIVUy%uqgw3@&rrz2%i`I0Wco|)PQc3yq4ooVv1tTLzK6?)uXh9*^6`or#XLzgVWW$?qai3~hBAzk&) z(ogXw8UTB4;?%j(B#!Ye!yfQG-&f%MKo2!6?Rvkm)7+eYXV=J*Sg zZ}$cSjWcIVBxNUDog5yu?K`*H(f&Pu+JYz37{eHEjy2$&ut`w3z4=x2Gv2yy=+ve< znm-u2ZgSk9pdsk&H!wR;W&~CL0tkE>uhORnRQ#OR9e?2o{Qv(A012;2ztZ}+8b|Dz zve=gEWdC(J^e`$@w3Pi7&|k0}e>zL;Ev#;~%a1-O)h#ui%44@by(auY*qQqe_N_L& zfIxGig1Fu2I}Ggv%wCVc_!;-OP+t`uMQF|%%hmkN>W#MPpH==G(BA=&PZ4NvlfpZ< zsQz?#+_rDM(sp<5No=PY*NLs7ooItT>>42hAZ6V*w59&)hV(e@iMmPmEFbE>(MEv# z@)Q&vV44rA^$p-1Z}NRkUa5y}XW~gd)~}!*L(eZ5w;#|=5-|VNDf~tMOnaT4@((e7 zQGU$-d8m2`BV&u4YHybB8NZVBRGbK1v>zKV$2`u^eUASbTD$+MdJzW<_3M~)_3}A> zU$6myvdAR#8hjOvwWx_0ivB4W`~R_`7-tQn(MH;(=!tfqh3v8a89|q;${Md|q%WvE z^$WXkCymips@^n^V;(hs#UEKOl3A=F5c{4uZf30GMl3$a@ssPuco>Voi}A)Hjp^j` zdP!HZ(hFAe)$)FxqaC;{Gt8{fW`SGg=p|cd?5afa|{DF)8`jJvN|9ren;{$ zRDr@*$H7kL8Pid>M-5_U_Fs&h<3<|xqv9px#N22D8>%8srFr6`M#;l{0tt_zC#|o z5ddBw3Z07Ag=p*4pX`!NsGo0aZb^Uh?f3rPZ)p5ZyMUv4GR^TMKVvG_@gzB7Qa|)~8~TE8 zuGyH+&{XmK`~ov4rRh)TdisVon<7}pFm3@dh!Y*{17rSw?aCDd?>7Ma#&0~+e&L_| z<97YVt12?*P2aJZf={KLXpW(nRZVTKZAzA_UZZ0J0C1vs!t!S$Ja5<%z#H_NCeQyu z_vW7mDntk8qgsq-4s-28|}=Nk#-B}-_@b^+(-ClpIA!}wL>NI=bH@Kux?Ckc z=O;~3X8=eq`V1cb(Piqd&llntbNr;O14t5Y`VVjpWhE0;x0qaxlftK@Y62c|JjNeD zE8}s{M;$Wa*zuRYR$Zgcanh(rBQg7JKsA<7^#FXjEFGAYlK~vQAv?-beU~kN5B)Zn zpwk;FEg$_D8&V$7F-w0O^IG6+l?R<4c;Qk4tM&b$5gUrK9M9yh0&n6gesf$xuTd{( z;K+6O3tgWo{z}>{n~YPKAI3PxHM$`+aVI#54UA5PPN;x?4l%O2ex_Qka~{3k7z zPujb{d|G!D)gSmu-br(SH*81xr*3&@{xYI>nBuc5|uBynAHwEfwB$}WPQ6A=`?BcN_H z6DWkg@QR(w$Cyt(zDY^6Ng4ctd_Qza3(-u1QF9|$^-Z_R^H5gJXX3sVa*Xnw?>u(e z<|2L$9V4lrp~^+PEK0L{m8&?K&np7(5*t0zUPt|+>Hr*B%k`&B(U$C&wYG@2u-~$` zVm9dj=Lo>#GI_rTs5<6@@me&RqXp4q+0Y{L7f$8b9%PqJ$L_FA^apRs1E1q$(Z)li ztZDfsy<(BjA>;3Ai?;%V9kcn-_e_+}8vuA{=}m8Xw7vbU?`Us(`&-&o#nWqwgSgQT zF2*9rfZVgAoC|Ov2+CB(b`a?v^NA0fH-PnlJqW;oy+9-PDZD{avPFia3w|P$4FEs& z^Q!YP*SrYmxKl85Ek&2&0wRB`uux2OkTMJrAYcI&DCM1pFaW?P(}8rz0dNe^Q!vm~ z5W?tHY5+(BKhJN#(8g?j@fCD+wz)CaA8 z_RyUvgJ*)XTu*80*qoC_*Ds)bCh1HL%0BDHAd?0hD>b+dM<$2n9Zqs2(1C0g;t6;R z0>|Q4|EMAC%7Z>&TMIq1-~c}Oa{G3us+$Wz#X;qJaE1@svW$X0fW?Qi6HVaARzIXU z*8x%%-tpwLfp6*?hveo3RHx-Y*z<$$UKW=qJOrw79gZ~W2?IatA=(`v+Nu5)0}kZ^ zVj>i6xm>LTz-!4b^=l|R-CnXk)y~SigIhc5g4V2Ki1r4^_S}Z@N`75lMR(One4UQ8 z@1m6jP4k;HBbQK6Ugr(ZYOCzI>UV#b?PP#&z|U5|X#GNC)l2AfCX^o{nK~5(6`wQx zKl1W(2zuwMRe}c}N%F{mdg5SDS8TDZU=e7soDIEc% zu|SaHJmO{ZfkARS%Qr#*bCQc+@tRKyzj9$wuR`p~)Crva{2-n3_1eLNe< z+xW6P^3ZkpYx@f4E3bfSevsm__&xj|0T+u^-cI78JW-+qwVNAb)Sr2x1pA0d=Daum zoSfd3ABsBQRDo>YNjkO-xqmu0-eY{b@FGjAmONuI9;F%&_3isar~eA?+`^LR zil0EYY(>Pe)HM(>hmwQult5?YM;4r);{|5qwVoGKiO{IxNZJe zVB`t#S|Eci-v~yO8Q_;)u~6wU_V)y`jASaK35e;ZOv+)Bi-C+^Jdwr%84J^tJ2C!2 zN7ye%T^cG7hgYD+^YROfm&l4>ANBACkIeGUTy7HcW(V3sr!3Hr4n&(#7+M^V*Nt!G z#eyYik_S&>Xbe>o23X`X0PrIA0!$*jt5hDwkYsfY8fFvmyJg#ecw1(%PRKs|!Kd5b z{Odp5-u%wDwYGb^-MRU4Tb5krWq-UPHg1A5IfB-*E8+4nh#}t7=%$kxpH3O4ZC7<& z6F~jYuWt% zpzS^1M07uhys={9e6YXteXM z%pCs1X12gJzeIq&S$==-?{b^(hoR36>e@Y zoyw;jY~N~kZr@O~v$nh>JVFEC_LD4-NG8amcsoH z_LIj>Z!Yw8ddoiV1G9Jv?$dumra<5kBn9S?{f$S<}A?nXRc1hZo56;q!5SOu#<;mjl1 zDFgfpzDR6(Bdaf!Nh}LkOwt#j5>KEyHqg z6$km|zKv|ecXVdc7eDYTNiqd5Z4&SL71-IXW7X`p*0btSj#!nYT#r49tI8ilyd0nM zT3TAk_>&vRv2g&r@a(d}=(h(dpErE5DE@8V^#|J3Yj0{l_j5ld@hJ`y&&2Y?-AuzK zj>B0uA(|XwV2^FWZTS$FN#@vB$VD`3Uckc%#JbccM%&-J*A5hO?(FOs&y!C)+`j%_ z|JwE?U-%^&Lxo*-dUyMt@$B#OW`MBCeZ_X@)_M>f_8T%}cmaC_9RFAO7F%t4Y$Ki*(vR4)lG~ zBM26C&e#h7Brqpa8SK#;WR*vG+dlPju7Iq3o$F4jZabT2UPdvRz~*Ta$FUIWf_}oA zYow?`#4qkE`mt8s{g?WR%Y%b+I6=7BGB(cJ2fdjAf#%pi`Gn|yT!-$+k891hvpj3p z(XY6!@{zBE0UzzLPGS0q+mw%tG)H4SH*83Bk*~5LCwQ!O$p>h!&Q*l=7h?WX^~*;9 zag97k3;O-4e16D5$$kA%^#=%^BQ11mnhXQ}0QEN|;TPOqF9$u!#h#?Yv;L$tn}85S zwn4vlef@M~MrAQro|K2@kcTgIO?mq#XhYs5zbGJE1y|8gyhIugytqDXaC07gC;_{t zy6tVT+Kw`Wn!dTr{7B__h)vclJhU18)Os>f&gl!(jlK=I4f|eQIL5?jhYn>Lrl29OuHqm1J*mxmY8qcY}Kcn##C zKOI94F&C6N+`ck0|7J@J^Z0RgO|&aIr*G}U-Xjd;?B?h;WM>C zT!c^Z73FlK8gZ;t%V9&%fe$1o z!PdRHq>5qxK_|EvbF(eA<1x(;@|)w06@sD<`MJ6PeAFv2-=pimIh3(q5RE~5DMYlT zs+_O9z@H6fyox>m&%aZqK$XWom@oHH@&{gmF8#y=ec9v52nXT~T1WJYt{SsC{optKc(ckvz>>pf;d@_(};P9>s`Lj1Cn_G?bYT4Z!DDR&hmj-KR^mU*?(C#JN9Rw z*%UcZ+`Z@H{{Ers(<*P|df>rF+fz@yy}k3T?`}^&^^SJ=%4*;d@K)6$^>6AKwP-#$ zP(qTcXjiV&ps!?sKD@XqAPu|mQT6BN=FV>ba3={#1VpbJ!OXP` z!gcaSbma#DBRn-xO#?zfz;xg|bp8Pbg#?JHp?B)C2A~0;r(mF~fX*j+t4eo_SqFaU z0plady+YAiP<#cQVq0ZLG96hEM)Yz~mL zAsC+uevr*&ZYp*Uxk>s0=s*hWvV^ zR^dHk$DJEY$0)o5Z37H6OnUf+v~hByo}t2a zFB<2W*5g#dxjM}`FwN87l0gJ&n0CITd2A^Sh5O}}u*1P}g$ zCg>=KTpJ*&zj<&Fc!Gv@G{JU0GW8MMgKR1dx|~k5$Dqk(2<3x=^tc&FR-~gdf(oGb z)wD+Jj1Os$2!9_mi+4iTlOD?)rO&!x8sW37l!0?VS6)jrbeg+x@w3eO9ScJZT z%RtGKCIqM5g z1t7jwy8EGN(ZPSQKlmXbPk) zi#X_K{`GU6)E`qCuDQv@;zXCP^;Yx>((l{N;)>9p`v`48kNP)xgP7vl_M`F&Fq|di zh^PE#ysz3;LbfrT+rKlN+3YlnEs>+cNM1THHL1dbEvf zg04s}IS+CJOrP`-euH*(ZF_*O-~l^`R^EXd5DYh6c&9@dORBiF|hpnp1i8NTq0O#*1<4jxm~_R!C@7!FqQV|CzYHiKS6ss z_v^LVp8W(QTW*wPp;D&=-{2d+#irz3W#z%$u4=_)%E`LppnUXHI_wi`(QbFegxXo$ zBtG_&O0@yNvXpF({g#u^`(O*9|JZR&+w!MgfDJo~Cw@Y%kc2t2KsdDlzq+5IN~ zF2-kMo3@X94H@SXF9N)A2Yzb<2NvxomQ)8%!|Tyk);HS1`j$}enG50bMiItGnn0Ft zz9^d?WDxdB+gwMQfe9LXreA;_US)iGq6roPJv;R04Q{{uVq0BXZtLso6f$qLb&{VA zfmW+aU>fLy#s3UM`iKU@>~+P%voKq(Anap+s9*8C&hl8-g(vF7lmALf`yS%)ysq>t z-|?mY>GbLv{>L})%C-XvPaQ{4w&CM!`{i$DAjg?;`-V~buMJtg^O10_{tUfc9%BgE z*bYP&c2PXl*d2h3f^Pb??MS*qPMl-!bMxzM^YYcUwE3X>x@+TDa9}AT$A)t3Wl#2t znzPv!C+b(USw_it4ip{5Z^cPnj?k)u{MR%To>>_46=3~W|E_v{o&z0f!*ZuC{YK}G z?{qD_^X8`cMfIRV^})lPcH{a>_T9Vzz_A6j#3TK543=nBW5Eji9;VOHTkRCg;+clusto|MfIh-L5@zuv)cT6` zcTQ$w7~?A+D4T-*nevWbNE1FU5R>Ne)Gt0WodxJD3q&lKxU;>u#?ON{`1QI8GRS^) zN(@GlkJRI01fRA4nyG(IuR-?Me%x&O&+0!N@Keh7d{=yNLnQbc`P-6X7?!RXV@vL1 zdH4|hP`=7D0?B4sF_t>C-?+p^CSK3Jv3a>Y^Xt#F@A!lNX8Y&Q-*%|MbelEBY{hV% zyZgYn63{EAJo~Ws1i^X$8+fT3c2tq$9mWT;z)L;Kh95#UK_73zfqLGm#ao_MR^c^b=AG?5ajY>z zvEatmdi!mk_>%VF55BKmy0YbUj1!?|1L)!Yf%!N%K8PFss*nB-4)&rA`m_2qIN=$7 zL#EP2`X#>s5xRskQpx;SWD0d^*R^kg|{))>&5zeaHI2B!l{+ zqlxBF$8Bw6)iLPS#-`{!YQOX=zubQAAN@jm`Niw)vicOuqpOOKCx=J>Klc7K*0wFX z@5AOj{cT@cRjiJ^M2ce1Dv9c>xD%8(fx9SV+)ilsYn)ScGa-@Dzcha@7?!qd)k}-@BbfTuC@0$=iXP8Y=7}M zXYSdJevUcUT+LqF`tzp8!y|rDVc+KnL;-p?@ZbP)uoL{q#!t~Z!2TA!@)iKE7x+za z#0?5!Zk|8cw%C<@Kchu4>rs45o;`Cv03v~k`^2r!U9^7r`XKY8<|68tw{rY$GBsxZ zQ0pCY1|w_!SqH_`v_<^d#yM`TH-9&YtgdUK!@8e&RC76Vk?WpxXFidT@2g;aLl}n% z78>Sn;wgasqIKv}q6z?R1=W zT^iw2&A(jdvtp)M&T}QN^i|lX%=xOS;vXF&UmV1bJ%QSNJp!H)6dmw`a*wwRRnA-g zZtO1CuP1)_!}YS?b^D;k$3CU7#1A3mw=+1tLIY0NNB$1;&||H#`NjVhRremyhifLt z&)Zq6-};G9xyJEV<7fG2!c}==JZc>QXj^=xPxEB{EcUsZx@g^RdyAiKTGlGVoDYnj zpu|C&_(eCBmB-JTzr}|+)8Alsya4!H-F#^ol_VIxHO+UUDXvKF9v`&s6%J4^y zz&$0m`(vMuryL8)2gpyo_~|%n|JwJAMcHY8h`-Ng18eyJ^~h3E9us2+G2pRfoY6(~ z`2axa7tH8eUBW5c=3Db5$8Z;hTgdD47TgQBl>#lI~4eX}r z^`geB@DgX}vbmpZy2WqoU$C8#lTO0NmbK3S*X5sFPgz6B6*#u|$pvDDvT3W|L4v)h zYbNQllR)EY{MxTJ9b zLwUVFUy|bO0f6^1$2=&Y>9p3jZD@#Q zN^AWcSaJ}unt0M(m7~jwek0M2E>x8pZB^ff8PL_o0)P(2=L1M?-6>kPDz|@3LqaBP zSI9CzMC$&oi^gPZ02K6A3x1uGN>UIzpwY~TC`ChfLf3r@^kCL}5COF8&T zSNPvJ(yj5gQ$S^p^JV%T^DM}egVPrs8W?-C1D`VNLaH_vAY`b)<3LBt!?)B6nFVR> zzYIY5ZYTAFn|KC~#Z2YN4;d`*I-rBc0gU#V_|}ID{Ls*+HV%p47!NM8^MVZ@i-dNs z3>Ayl`UImF`d+j%-z_HXoPcQ)?;E`=iq;qUWubh=jRH;_gi)fE2bUW|Y*zXydSqBO zvh?o*u7!!lxf;ub(Tl{5G|A!S0(Hu)E9@xUuEa)anfx!GwgT;_Ow}}^vu6S8* z`Z{OFXgu{5K4q_A*sC_m?g!Vt1rJ|T?v#G(>4^s(H#5|yvVX_%S)QKvH&mY;JLmO$ zoQUz-eA=w5_z=H*lhNO1gvT>p^RIE?LNh0*w0k@_87EhpH$w=Qb)X5T(1 z4jCWOzymz}(eEcFmE#x70?%|hC=()#;q0e9H24+$gD-wJ_>DhwQJt8YiI|}udg{bP z%U7LwZ2t0xaM=L(wO*7ZTc=E(Rmewwd_vhz>Jbm(v7J2r;7iXOn2{M@po=$woLreF z{#2X$sjFMu2+Zyfv58P_0mO06;BqI7PvW-U{UX-pY!+8uKhGMoft z{!u20PP;wVp;ID!yU%6yLF>iU7azE|qXzd$1If5~>*hGPb8kF+^X>8Q?RUnX|F{0_ zasTPF@xGVujia0V<|W_I;Wx0jaFyc)^Oq-B&#}|&6yaPYmYf{JloQ_^lho%E8jX#8 z$OS49dEFO{f6J42mLY6_`CU;?(5RyyXx3lJn|F>Q`fIR!^Xq&muaCc8gGp8Atpf;}Ebb}xEfs?+L1*l{kUkCt0V)ys+5Q`rU0L(6SR7Gh4TAm9wMOa&i z{B#DaKJabt8UNg${HGOILOgz~P-L!1XU{#E$?tdgB9;=oygw13IDz85Avux6c+s1B zKulD;kPTcA9lHnPj969qkZ=Cqx;<_j-Vqw(38K93ab&#Z7v;PB1dn)fk^;N<55Ll? zMip=P;k5WMKe3{i72eqibDDXn}>y*GCFnHgyaJ#poW zQTR}-=d}fa*K`K~ywOABSGYEieq^|etdePiK$DMYrY`V4!ledxrnmvIwH%XVpRvD3 zkLVtGD6P-d%VSk>53W0f%~;w-_9JKG;X4oHkMrC}wBanJ_=6Y|o@BOYyonoV3oQAi z@v9UakiSmQtIc_;QrT_bq3@b@i(Y+pbLq2`RZ}@JtXy$P-EA(gj{-k2RTM>9h!)L<%1!`vpz7QH zo6t1dx-V(o5JQ&0dbJ%ZJkm3*Ni0+B>M}3XKJg1=Q$T!+Cu>`-XPB54J`A{q{j2xP zYTwiTgd51`+PfZcA(Efs+T9yZpFSO*{N!iHCw~149(ks{=dQVI1O8d*^sWJ#rX%0x zzU~urw~r?_L?*LNXgr1DbNPa}LLM_(u9r{`2783~)G>?eNaa85aZ$d_aZVLEA#0TU zQ`cWwug38hIkteN1&4U2jr}ETX`{dN6WGW4WH@`g5#a}Yt)npL^LRI{&VaO6<_0f& z9>3X1xIW*9pKQ#&^L!i~-O}g&_{c|ow@%M@#@D{~b>WB)+!Peti(-q&4ZiG;ppzba z-BMTz8RsNa^YKyzUk?$g&j&zJ?6Jq>C*XYS6mR(LX@7cnxI0dsJRV>D>YL-2e(4kA zSAXqS$D_x`>?JKoGMV4; zt;bPqtj#_LiK^B!x@s=TNccYUHB0;jKD_W}%2_9}J^*0y#TEcsF6jH>zsmgV59j<` zPutzouJ+)F5i}l3egF6W_VL3%@FU~9zxTI~ul&YujIVy`zxa6H={j?f z)q{T4w#er?fZY=jgP$-+od(f51K-^?(`vc~|BNNLwEOy0`Hwb#I+MPdTU;IUc$fW6 zw}9f%$3e9VmoCl=eNC7av2O0B6|VQtV!|K2CRdx|gqB^dJ)j2|>gVR2m;_&dzBia< z>ZM$C$Tyblp&flRKGt7}x|Jsf1>3*@Ul{NaZu008KMD_f(8q%b1q_s|OVQKhLVXw| z_PQ*7euD%>jiHx>R=ng)|!R%jJ#yYoN;4~h6 zwcf%9P|@;3qjr_)^KoZ4^20N_0n-;8bMyxu$@1*W<_T_Sv!@A8Cm(-xtW6ny90$8z z_&WKv91Si!cD0{vdTiO%UId-F#$+9kCH>T0_$p&0L818IK>+iI-gJY2aaoPFuRPdu{+_}phcJAU@#KR15%XMcWt>eoL#?!W!cxOH%I zy!PtLepv9{?K|W4om=CM_6Yk&hZ46l4)%F)V~oc+xX46B5~HUpe+C$i9vHi!F7tM=Mr8YY*u}1 z{TWQv?GO-H1lGXJ?^U262ZiT>778zVO}E9fp%(DPKOPySt|#w>*-(pTV9S5k`g*!Uf z6ksawX}(o0Z>7p_fnk{1+`Skpgk7}kXdbMSd z2Vb;4AgXUiCyw?_B+MF}Vt}Ke~`~*rCB=uq4$9 zD`;13YPC(y)#fr4ZvvqqhNQp7$2UlgPpT6DXh`%|JM%TX&eXtQ(8h&a^dhHUd9`zc zGj(m8eFZ=?IuZ58Pq`mt&`Bv>Y)*N-w;zb`l?1i((-OY9R%>ypVukE-AB&GdZ3z~A(B zog2iWvpn(93I2uT^2$QKiOS7hE(Xx{jBg*Do% zar2&BGCkk%x%)(CIfNhMf~?qO`8Bpn0v{c@3CMVaCo4+glxbJGS(j7Su?;!UXo(p6 zS_F7Xm)De$_C(13e2)#G){?Gwf6C(R_N5Iu-KQpR%9wlPY_S*KS$|k(DFbTtCr|PB z&2l*U__NMyMIvVv@=7EfQ|5wC>foR|Yav=VU)4>64r~TRr;41#^VQ%$DjKP|JHc>$+PjH_q{Z3-@2uZE3wH`(Jb*0a^qiduK7=V+60d)&XA35 zlbg8krr47ngqnZrQhn}{PN0$ltmnR%sN{{c_0yQ6d*MiQfUy`-8nUWssaA2@scha^$Df*Gx~(3GG(D#pIEAD3{^S7usuu%pgSfkynuDjm0*Mw z7QR4^{i99&hrVt65C7SJQEVTMi>Hsr(BaVne!OH`XKyV=*T<) ztR^aO0g7WZELj`1{_Sc_d+Y08)p$J{HxG~e+iP4PWghsqis4_r_53j-ni;R+dkugS z{LsT;DFe?T-hht706g2(B-Px$@Vkbu7f1@l%ika{11gmSX1<2z7k^Uso}K+?frq%0 z$m4J3#k!+9OHRH4LkR-?Gd7JvdnX!cZ~0wEKKWH?#$3S7A~%1)HdeAcJ{neK7qK&K z{cRc7EThJz?Cc(nTXz-foZzb`@uZUCxyRJ!0Th`B_C@h? zBYo6OXdCiiKro8HvV+h1PQB@v3o3&~ZX6{JDOdT=sd*`(8+h>Y{yuiwIXW%`hdN~d z8LFZ#{rKE#+Sz#c;2j-aFSHc29$Mu^>;2Oz_5+!F4JoGq+-3d7|5<}CL&h<5+o{L< z-6mkW-mq?8^9ACM2^CwxTaAZrY?Ei*%|jRDtS<{%_+CzHUl~--x@00jBP>4gTFba9 zS)Q9U@{SwTVM@1HczmpzD(wrgO&EeKu_7dud%+VJv{rak9_ne$5Wk)-hca@adxcp zBB5As`G|cm=eC(Q#fw}@hhOAcZ+N>Z&AhJ!zmq=2*l&K~PfeW5vM;-N^Uiqr?knTY zt$TwvihcUCpB?|%$38Z`{>GbrW&SR9h(G&g^yOL&w($@!KQBd{3*o*Iz$@j2XQ$#G z36dt3&pzH4`fD#qAF-Yy>xJ}Mx?FeJf8?-a{l+#BHD7qBP=DvAoR`Z_@;l$oLl@2+ z_xEpVF5;UT${Y7sCqCq+KSQq-BZGI|S4m_!=9sJo(t^$5d zv)lY;HTQ`p0~eO7{-o$Z=j!-s|B0Vz;8Ol6TQlZe2ZWI>^T@SE_`ySSTJc9aY@znU z4nXtAHqpdQAQ5kgGw575C4CpBhb}kyv5j^kT1EsJJ2=Qt8R&ZO6W=KpsGsT54=XSk zo@xCthH#m)Hq|oKt@E7VBAd1(NCT#&sl?%aYA49+fgCp zCn#OF&(*9C@2QQVe#ceCPU&WVu9W06?1OKNI{wDm>gU5^%v}E^-$mi8U^Jt0g(YJa z)wt#onq1F{&w&1n-*YB}WM;+!!RkC(saDO_3AF$VEIc7LWeWOGA4O!@*UgMp`4 zD8G|DOVL@lpmY8ce)148K^#%jTAs1OM#e%Oa2*IQWa8J%tN0D&gGWL_l^G+CuV{Nc z^tizndDSL}JW6~-SFfj@f9dm_w44UygCBi`LF2?*C-h*r1Ht9k+k1oLr08XR?PMfq zo=6Y&J#L$NAiMBgPZ?bU@nHifUZd;-%H0>*);0I1#0OOO!e3tTd>}!WHs{hW*KbwE z*Ia)=F84*$?uQ6i2ZT?VFa7LkZvroV8i?}2!GY7!;mz^Vy*uO1otMU|FWu9=?2dmj z;IrBX{o=3u^7z@0e|&uM*FQbpeEV%3=Y@akXq=v$jYA#tA3T0EPR}^93w9-!hjr18 zV?Mse$NEFA?Wu8gt8(suXpgY@0KoGh%n}HeP^sHH0m@z6ZU>+VWWa2~70~T1qks#9 zUW_YMKSt&vK_*z}(;issO`}`^eg)nJIPl;v9-T}BKFbalqA9~m`b=HHRaW@xz@1PF zKp`qguc@SR7pu-lS_bvXw{-FiWV*J*Tn2cMw*0^WnB45>2WYR{Wu?gb;U zxWa2*RnJX4CK5E#nTG|kT+(gWoWM(aVA0zbk15+Xp}CB|@N+H&z4&kN!GEh0Xn?Hz z3*F+9v;pB)3nw>FRQ5tB-oX$5h&MyyIEM!+RG&}cMKl+rN)6lXme>0GpuobKMcIq0 z|0Emzvl1v-nCg7?;t#Oc>VWHWP;^XO_v@PTS}EcKUx+uqKT|So4><7f;8&{lX>X<6 zbVAS4mshvgmIay(nW6P0w{Gx+KXvqs->GA-eMw&$C``aBt;-aEkNguAwfTf!231LQ zb{Z5()f}r5^KPq+P})~gJXWiGCXEid&9UOdSn}1x!vrwxZ=xr$9a^Qnp<3TYwohB1 zNu#oO;S+z0zS{HJOO)nDDeaQOZ~FH`1J&+2>#WLtMOxk11>k4((L=WJt!lq+j~lga zi;f8@UP)8*tdr=X0~DJd-_#YKcpo_;S4olUKXU9%T{&ZeeANK!OC4Uald^qBzw4qY z`>{8^qK=-?F*wqThd(s#_1EzypBy|{`P{@_Ie|I`F1Pv)nI?+Vg>xfSN*wpFa4$W z?hU?)9(}Nh2Tt(!4r8J=`@`cSUJ|lv8^wyT7hlGOF@g?T!N*3IcO*lysGC1HYV$)c z(t|pBcV8#?LvR0@pOS)R2Y)h$Jtw3me)5fZwbN!NAXl`=u&$xcjV1hmZRmkLJjfs) zGKP#RK9bCo$>r+{d@cPQAkbuN{U!q1i4SbipZ9#&pbXv3N6E2Zd&r3!_%Z+RPi(RN z77=>uOj6zP4nKozbYI5OE!;(CvnoK(#DX|D?<8&{kXV;kKQa-aqE-FwP44+3p5U}b z=qh|X>b(VPhc=JkGc0+yYIp2wV|sM=mGR*IgK_rsbo{yh`=1|gJsINzuf3!V>r3PK z_-vfE=)~m(qy<}T&a{j^OlRx5E+Mt8Fy;dsTv7f=P)#{m@0@D|w!4iY-n$CC0#}8Z;Dh+&DH7 z1`j#{*0sxBLYDK42R86pu+}Zme40A2a0c4WRkG6-8i$Zr@e1zUz4P(i-}PPNkN=53 zIi5Xz)33)rJA3Mf8xZ4oRynUw&T&360F`~Bo|uWBNm>aUD56%*1zM!<9v+B~?9&O( z!QFdf@8+E`bi#tod^^e~eDIcTKIZHAkBs~bk@#Z1K0JQ%Kl=mx$4N_cMB&|WaYm~m zl!+?5Gv0)A{Aj%S^{;A3&d04=H~l+n9xLJb!7$-v+-n}oUx-bdGLI`RUIgV2>+8JI z%Ky&=(R+Vb$+Y;AwF|vC2}UP(QHDrBZjm^T_HF>k%ZEX#esn{xZtu1(lc^tc;m!S< za`%!ed7Wr^ZNCxVl?0B*b|ygi)T8JpG)}d7O}_@3)Jb?Y3H&7H+mvyM&x+WY^e6T` zNoZg#A;*5~V6CrMQ1V{WV;1|om)=!e)}A`;2mWcTL!H30c0?a|N*Al6GHa^(GInH+ zLW%IFWbg)z@~w05Xr~t$`C9-yfl<{jykhLkMN${A0~-4H@GB&%&58L>KlrPhuBIiq zorF*HCmhuT%rTCm=lShs?T7aFWuw}lge3bH%|q*&^^;>SIoOM>3^Weo^Di89c6AUZY_4<$eRGpxT|N7d1s=4&8;ZYJ`OGC#}2w}T|K9)a zufb4z2px+zC&UVnb5-9;?{#AAC{C~;Db8}&fpr-WU(?G@)+AUek3XuNOqI=8Mjuv> zYA;}~LZ97Qyv#^r)z@1{UdxcDu>vP_@Ih$v%e9W|W7WeRNj&o4jIb6ZRd1Sr>%#L` zfx~%=+Wd21R*o?w_OzSu_(ewcs^|g_mAN3@x=+aOI;X)dzKv%xCF_76SUb2`4;vni zKi^fXNq_aRCOkfVGCuXm&yA1&!Y_``{Q9rkJ@0@2`^Rgqzh~UlxyQYi?wS{`*XL~& z$Hz}$E1o(l;~{Z$@qQ8Il?&`k=6`$diQ1lxTX&AekNxluk3aZ({~P1WU;fhg@>jn) zzVY?1`>j^I0{HOgrsp8v_U|vVz?;1zF}1h1uPSYtzbK<|;_nQb(M=zytxbApi9ek^03TyCAe!{6jVd~J-TdG}J#p;4 zld2vIVgz5NL3!fbwCJENp90IT*-NWpG1SwS>w(lGj|U=_y{hCXZt;cLqccp)$CQ07 z5c|81PT(r<>pY;VqJ*xL%BR*h^%PXMF02*IG1{S1n=*$f^3`T~v!h?1mXB<7$JW^E zc;z_Z^9KO^t7xDOZB-kuPR6Oc0erLQie&U-OhdexieE14my!^n7{$U$!RJK#_b8OyBT%S@uDESa? z(^8*1ThxX`7n{064 zE3b1Q9SpjmV87 z8n3S&PvkZ~=mE;6a(SH^H%ZhpmQ{9m;B(eACFYW43A@k`VO?t;8qmLWu}{zy#NaR| z!e=cXp5nx@oBK_E6<_Gh%k*4>1W)v;dY{5t;n?y>RlANJbeXrC1<0-aoI0~_N_&ni z(IfoHc~G8s=bRaS+8gZiAfrZEdnIBF|ME~Dlst_4^{>4#KKY4HjKBGF9~*z|umAP& z)vtfeZwb14^Y(b@rF-MmS6wx}Mx{@z#@LeWa0Ebqw^jze2O97J<-kq+6E&Evj zQn)7Hg4JRWAP|)~6%F}Hh4{4XTl2*}_0hD#e?F|~AtF2|HG|{|nPBqa&6vOk1t!rV z3_Ryd0u>2#C-Pa^I7UjtMTi0_i1qmPQfZB zjKcz98Wzw^yu{do9|(?VDR?s=Sdp&5#{lIUybVj;eeeO9#WJl*lC8F>XDyUjh*|uq z&nb&xZX{D?c+gEWwWxXHMx3h7Ldpq+7f{#nKY1TeLg&}x;X`=c zIp;0o8itfXA*i!xBN3D2T8T~VE)gK&WEcHu?p>#R9jY6biF7<#9urj&Z z=(l`yb4%e0spmR988e$u`0$_)9R3L*r<%2 z{&TzVeX|T}MCXeO;Bx6ob*)Qm=BeeY$jaGJlDk~|T-H3N&)CYj`Q!eyhac^T2rasT zpSt`>z3A9H+ZGK7G2^5bd(UIU$f)kPR9zp`J$A%R#i(P9GO;K;rfOm{-zcX&{6b$5 zN0Uo=rM~nA7k{XL*7(CEn-cqDAGoTQU5-B+4Ja3VPL?U#85_y;w3~-=;t7SQP5g$1 z2BkB$k?T#oYI#6IcELwYo`3L>EwJ^g3C%z9>>Jndz3BWP8SL3eslHRW`tYA$iC8q$ z{Sb)q_#1pSaPlN=PLx#;(4IsY9^un+-Bxl#lid7#VT@4Noztd-fLJTD5)9Z8 zT^4*&)*!Xn@5oV3WIzs{%g_X`6MfhU2K;@2M6_Z=%rW2+uyuD zP9L3)Kl9T+J>EVZ<3sO#WgH#d9H*yztAueBKI;x=MTjYf)sZVl`4AJ z7*mzYH!c@?e5F`a%;pAC{7MXUpW`XRFGxFiAh~1GJ}R&-Ef2rILpaohm)EGu7Rc>8 zctk$nxDD%qYA;26VO+@uH;XU_creB{(E(1%9e-;`d_-Wzi`5L1{J@|Zc$JwE z_|4r5od_S?9DBF!D0c1$RVN=TtM#S{&`9|XL)A8`Fua0M@C*$Vf;W#=x8oX@lovYs`fJ%1smb^=Hx~|Cs^!ZDzlS_X?%dZ^%H5V2d>ec3K#mimW zJZaw6v;2HXenaE8oW_3$j*D>ay5ic?tlrm-M6s~*NTLZKS>|=COPWPpx z4|082HIKXgv8VomP&FYXsU;suWvFGLFX$z|^|%J?M*$D-*i_^5Hk}>6VT6ko`&tvw zz*ARpus339$8O}O4=1hld)KM-R^~$Hb%BjN_ zCl&t`ln4?Pf)DQM&rYYma>>iZ_8PZ}hs||hokGWc%P`M>1@JIEcvUxEXi?FjYoHLF zpvesYY+KPx@(XsnC%cOhx&-~5@2s?*fG);2OpVdn!rPChqJE_rZ(&n$!$>z3oJ^a^F}Lu;+FOUDiHKcV}u6A1d82m3%Syu(C<4dSqcP z$4cUhi-_3AE93c%^oI{0O2ppyfBf73jwu*phi$3330F1>uF-Lh!!}%le~CZ-P^$fQ z680>3=l8?Z{B%u%q^2`Ab4wi zCm4|%x0Fq(zvd$ljn8wm_L3OO^73(@cf1ZZX^rW{ORQ<0n~SP z!tei{`vC>6E<8PcGEUD=#|`Pr0|U@LeDrWU)$#I&e&`3s@B7`qYrOQzOXI8e-||B^ zeLdm)_|#(0j`;$C^xn;zI(Ya2XA_cK1Lz0qql)~1lSy0$knYxw`aIr}RMk!etxY~A3YTv{7x(JdpKPE`J2vM}ZKd;q zTlmzyE+SuvW1p+~@VJDZ@vT(zNS^bZ=B2-4ULk8<>-E(8unFCc4z=%gTc-juabcBP znF4&;^=;hC;A?6XrydTft@u2CH$U2X4IlxkTPOO+=?7Hz$#?Y`N}p*1(EI>+P+#l6 z(8qOaet_!kSa4=in>oxjY68|3+1TyS2A?=m)$z{!q)$*b7^eqQ;5kiwZI68yK!$Hb zijXuJx{Qq#&-7C=tBNoGb!@4mSn>C6QWIar8vOmox)85H+uG#IE`@_$$f2%@Ow|dQ zmP0+1o-4-5`NPDeT{xL{Rd(Jh(-SnU9xy+2ychYH`#`Y7COn8zSKjJ(E6pUG1a3c6NB(EOV{ zbtf8HXQhr;PfsJ03fI*hNBC1nd6}a*?)Czd2UTfD2ELLiPt7Z<$xTZ3&h9P@NLKV4ozLzLf_6JmoJZ4(zp10`1(u z3U8)zPEYuDr0ZS20+=v3*>ai(X`9a)2oPifJ)orr8yECu#Wvp;fNg9WLT}kSY|NT| zl^czGD@`7h$?6G8_fb@iEKvBcSmhPSODY=r0h*o*rT^LvGA0eYDd%G|_pnk|!sfX^ zsl~oUhtxN8;9y|7UI2sdJQyxFuWbl!`clpTC>t-@lP(>=wi1vs8&!A|J>$=2c!wwO z)MB@kUkxh5TkU3pjqtHd=&35-;bggx+Aq`Q;I0#p1sQ=P`IDPiurM7wlZa2_frkFv zY`UR%fei;_|6v#X`uAo|WuW+FytOcK(8A`qaaBLK&koEbLMl17^R(3b%{=p2C2$<4 ztZ<@toqUx5KQL1GIXg++uTX;?T$Si2j!w)Zs!@FiXFQ$Z&Qu(N+%kYB%{OFK&&}fI3j@`uq7#|F&8|(X*AcfU1#Q;J`S(%C-H}b%J|L%$aY_;2 zxyNj^v4$T$QrTn2_)AwmiOuc;ffgG5i)(2k5L5W_^xgdSI{vId3ch?RNBWHgmdA&Z z^#-pK5Iy4OYBHc3S|~5ud@_w;l>Nj#e5v#FA@f6c=tnEIp_XgdOPA=?m^A~r>SvA6 zhJ?2hgdBdPjGOr>d#3TV&rAotkq233c@jS;>!OgzQtF;bgpyj~)KIDu9r zaJ_~WZKIT4y-r~7G9F5vNC0i3mo8SD*Z>dzAv*yn{)c474{p-~l(9+e!jJtzCoizC ztT5{#n=)GIhnBozV6Mlkv)qhT&dmp-F}l=ug`voCqx?+9ldtAaL@{SrcMt+TzG6J_ zCwfHI-r=or^WJOY;r(~U`O~xU=l}d)7~gz+Hop4*`Hf1{K|W)u3S(DwE~L!|HHaI_?c!$-IX1D`q?^vR={ zH#in(fPb1i1}ZzwlzhPw6zh}C@yI>H#^YW?3Qf0^CibhRGhdc#*KK|a{|NF)t`fvWjNpXSM&;{W8^~mQAUTeGv2&`PCO`}cs)5DkM6%YPM$m%w{_xk z^VX52a-o8F@%W)58SCUT(W8s!y6TD@uZ5-0Rgjo!=(2KX6pv!Cv{~TuV1?T}$m{$e zZyUl9$%94SX;N0IHl07%FZhZ%?ypiTBj+W)N;2IeGTFR zt(%M9^~xvUC*p&>HZh_oZ{AAs18ed#-ejc3pBoNO9zPr> zPx(zk`YS%w&iwW9&pu(lsXFUFYoz=b5T0+6Wj(tD#2tF~aj@)JE`UeJ>!JGke5C2; z`Fqn+Ue_DCPQIk*zv|loy=caZCxS;9UpBW9pweYq-b9>{OQf%+Y3$SHBTD zsgpiK{N3Z!{ zEyV}!i3keJS5PWSu*vI-pv|J+g+SK-r6uv90>@0Z_BeA)%+1+cc;vV#jYQ7PQBb%^ z3Gssm4>jxd$6xrj{=zs>2N8%KW^)N5|1y-`ZNGCR$CAK0rWk<7RcRaKlZv%m;~~$U za5D9JMEhzk$WQnKA8f|Hvs$`U38|B5J+Abz_pUWWNZvDwC?m*t2}!vA9es~MGtRyj zc_dzaPJ`QoQ&{dJFww!^I5k^jsoe_Xo1^R}{mp$LFn-v>y3B9j8W<;nh1kI!)o$Lr zId0v3b^O2&{NQ-|!8_y8gGa#y6mT5F`0aYz#2@}6#}3)cTLi3|Wbf?q7B*t3^a2y# z>CgI=-L|iA@30vu$??b!^e$u+IcXUQw40|*zE@gb`H^Q zoS&#oK|kZdpQ>X6C#B+TYgl)UBS!d0vw-=`+x?g{`@HQyG&@|+6%8Zh*Y8V*vy0Pl zdcuwT3(qaT7Fg-<8M24d#ZCLy==_;un5-=q!93)1{je(;bZ-(ZR+VVyyK#k4c&}I6sY6pz<>oH-GBIn}&2Re(>Lp&mYVZ zy=o4CTOZ0eD&UVEHxe+NwE>H~qAx!4J`lSY`ylqO6S!UOFZbj@U-h&fz3!>(`wwt# zeT2)v>OVf99fa-=m`3t_-A*P&M%xk{9Frv1^=ZB-FQAp4V-&GRzvHoz>D9;H&&RPq zuGvywczam&*VD!4%iye1?DU7{Rla=1kwyj-{RGC4zvAP!x|l(aALGd}+q=mv0B@x~ zGE{9JMMrceaUB>LD1Z$51lG6YTMx#9jv#m!i%)$0oVLWAFkKGhe10w=CiRCQ_?ZX3 zcBuVVzF|*%0O+Sr=8no4--RD?{L@83?)m5AxMM9e?3IAM{X_LB-prFd*ck7;^JskL zGoKxQ^KX4@{L-)d()jut-_)LI-*0$w@^!!i^JUM&p264aEOT$1pPqfIKLN1L1M}v; z16|qdo>pFh1YbkrU5{@l{+IFM2*<#ZyW|7ryMfE}hPr`#ZU41UcKARpX#0Bd zHsCA!Yg^CaPc*I2uZJu2vMl+}gT_Rrx0!WY`YUz=?&73_{w$zrSMGRFZk)7(H#d83 zO1ps*6U&5dF{F7Wr>|k5Onhe}8ZTmb!sF<7Dw_ zU#sSu$g(B+cApT-y!eQMk{Ebs=y$U|e4^z}kl`R6lQ_77+~DatxGc2p&`045Osfrq z55~r@z~oR@yJKJ`5R_2f3ID5|h01wS$BjNz4Q_m*_V#UWGK@xN@UKkftK2qVdE$l( z`?pkO5C80eM)9T65t{M(PY`sbRAy!%N;rq(m}(m%H3o=DzD^>{+313$zyITtK4#1 zuyI^)a%?u9a^FlbXrwhZ`DQYG99+bgSI=-zBSFJPiZVEyz*1MyzvG6#DtUwDwx!)1 zRs{h2xW1Cxk06e~=u)swW;4 zPr~zfQ033%WgR!UUjZB>j}1w@!JT7CDt=w>exh^J4)j*e=G67J2OG%MW*ZqJ2~zln zZ~Be19i&8X_c!^?5JYAb$|3CSHh^-4n7oxi)`9=HQ%TM&eLv2u7hJL>rGrR z9(H!oah-;FOr&t5lADXRM}K%b(<;2M*Yj4unDl(I?-^foQZYRAuL{eDM~#o#FPtM= z#b*}>JjRi8I2)`?1U_963Tv@1Ly0$yj0ybATx$q8$mI_|q@GxbUaGQ0F|MesWM2x` zCY{adT+UeovEBs1&F(Br->AJgjIBvcC>Ui+hJLBZh>GZ;DV^0K}!x%_9epORWkcDgg&Yr>X|{B~PL9_MtxmM>n~^ zImX}r_xxSs_y55^Jf0oD;a895r$+eZ6>C9JAe(umYTVkKqP6RuZ%T}x;*i+p=HV_+ zDBrxJbbH)5xNTg1@83S-Vk={%*utmy68i8LXY~<-4i$f}W5Cahukhuj#1#-tF8JAr zbM4p8x%q#8JbL@har1D`Pe^lC{ z7(vT8D!1-5SAH$5=vR4uO2j39Ud~IjQn&b_Jl7d}F60W_wn9RmE|>q(uu2s>o3LpI zvcc}p%lQ^eKfs&vItd`NZk3F;;^qEz!Y*(-ho<^!{3f^vjxzgVNyHN4oOOgIkjUXn zKlSkPx>^@#Y0L2moUC=vw72n_W_Az8t=l>=*Gab52-)Ho^xRfHWh4hWp1co1QGSAh zCdZ@d^-5lvOjUcuj)D3JSa%nqx^aZxWpWY~bm+Eqon)wSm^{${Xrv@xXPO@zYX29y`B%lN*e}%4sS<(;=0!=v zI)?@Wi1)%-C%^_S^-LXfseZ~lz%gObRiDnUg|?e{?s0E_ZRhIS_}i}cU&+*8?{!UW z6R3L=SIL^%AS~fH=nJH@m2EyY&d30WJ^kZv70MM!U|Rp+5Mmhbx$2_MIcYDqt;aKa zoP4X;TB*ZY5htSCy|F*ebS%Gf_wM-1|M6cQUw`w#IFKo4*rmT>iP!>4r0$8n3v{{i z=fvN;1N<6bWW4vl*zCZQ;t<=M*iE9h+tLoSzE~%~X{BS&o8EVtc-Bckd>D3W6V1X~ zKwWbJlAJRM-(?p!z-kMJ$9hF%3H@L+I%PcTI3=4;wa3#yYRuH8y)R>$SI!F5&shmE zb@S$}@yhGpHooIKzH_|&&fDYZ)5pS%-pmHhv%I&5j<~cu{;>t6qQB+V`bm52G*#g# z7av~~w-%02X~5h6q=}lewfFuW9O?(!%dtn^Q%v%cPJC1D=Fu(TT#RqN`OWdkPyhP( zgm$27|^`Hn4wXexC5A9r2}h zdUoo5)Z$?t6y>3i8%kD8GCYLRN7mR5&fAzFH+X=`4fTdE1zm8VVMZY>zS{)i794C! z-4JFe7l21H|4Qr^hzc8UGC8s?r_OSu5qe-#?REsi%OBP=+0gS zJ$O)%eT?56;5P#t`b|lXCzG~#ZRFPh{mmnp!NJ6UHQ%;#8=9#3 zT&@4&ca0%OgVi`LC#nLEv2Z=CYA-z;)W+LNWR%3;C(rn^yD67{E#GD3T;u|G63e!F z0@2^Zv7C;pbpon=#vM6y;;DMfT+@(fT;nUZZ$o2{pVtcp?Y7yxtGr6apiIAUNN!#M zxWS{`sp$jgQ2J@{G|*js^n&KKmHz4k-E|fV7F_fOS%@JElzsdyt(|9KGATK-9b(0x z&|u+IO-_uF@KWcGu@n-t%GRm-d=bKJ!e_z6ccg0b1x}!KaJ*1f+VGdkTDbD$UF>FE z8aw=ky^)Qzz;^vAfXAwWc+k65<0(IU%kPUMB{6K-)?P}MH&LSwQqw0Pd zStr69e(?zr^`$MPB^Oys zy_4b(K9pOwS@8k>70`|mP2OIR7&fiX7GgiP7!@B`Us{F1<_G`)|MW>jK~(eU8}4pX z;*4O)oYp4^_OZ&GXv80NVgn+$J6ewlU)orwWo#Eb>kTk^p#Li23-mhU#xI9B*VB$4grc$) zQHspIQJ>8@cmZglO@Zlzt4%mAMO#|H^G02ABL}_7r6XMT9bt6AN>dvok+Uj0-5#2+2M;|D)F zP9A-AoS!_ET{_v3>WBOLekB>N>*gd1R*rG>mW@0Qf{9j7oxV^Zyu7junTkuDh;ZY7 z@768F@u5`*pIr&#<+ue^3i-huV!$B(IWE;Uzp)noVIdc;eF54Hk?q&_`}bYX2Jb6+ zeE{JI{I+{fuJ$x+UP|6bz=VPSHG-G>C$`!Pl$NS^WxRt$I?#+EN}QRd#^0dk zTzJtO7SHq3)A8i#+O$(4O{YhzW8P#Ul=tF0+01VoX+Ym(dSN4tRCCiH~ zeVaZ&jy$G*c*^G0{?zWWPuEa#pX*EHR}r?|a2<{MV!Kd6u0q8P50Fdmeii9Vt(A?W z+yX0~?E+*&(rvT7%gCv`q5SZS-_Uj6Vo-FX-uXhi%nRlYeWcb`$(=@6JWjP4c;%IQ;}f6!^!WHMf7%gyE+x>IHxhEpusj1iD+UeS7g?9` zYv;+k1;#I6n^g6;?%|8i@Kxy$duv`prgf|L?5N{*=jsxPwPt;58Kw_{A3#^03%yy> z0^l!sNMHtC?bY0v@%Fw=Rf|#Ga)V^4Jb>W`Fa_*C(K&-(xaVby>YU5431bh%aheOv zez2Cch4Ui)abx)K@aA~!J>NFI?Smg2_uqMIobbAOo$L65bD1iF%00@ar47{_pDv?k zZa~r&yV4$o}!t`><>XX6Z!7Wp7qL`3-YR5eiN5X>B;e_f7gEZV9(#OKRZ7e z@4WMHeCpRfJAVEb|MT%1zwx#4+N-aQmtTHmy#CrNb1 zaVi^m(9#~YA5Tv-ZqkRhkCl@8$no3y(JePcX+^vHX2el~%9 z?>q0jGafv8p#Ac4fvJ47N6Q260v;@!t(=D7ODG0TjUFFN|2YrG^Y zI{Ks5Ar}S4GF~7ZK1{hZ5hoDmKw=$lG?#qP->HFes@=Gq2k`53ydc+v7C^7&op$wG zbF`FXdH(+d3EBMt*h5s7T7BtDGBXEHP4zjL$4)UtTN}RtYJ-6J zOZSw+8+=Wa>b1}WotOTuHa%@u^I4*Ch3kVoHz3;!K;yxy!7GPzBeQ-wcuBO2()FeV zpEmi#&9tjv+D=AOk&mWz!;;uj>(37th~Bc+m73;yRFm({05c;ZrbSH{oh4>gC=!FaFAWoO!|oxEgnEp5^YD3g|$YThea)7SkM*-m%U zt#w()D5EwxYV0_sc0Y&L^-KnQolpJE>xTAgT=xZh-BWus&!Zygy=>J0$VV140$a@sKf@j!JVXgXK0nn0mn_=RSLy9N(#^L`FY{@ZyRbS?i0 zi{4>X_2#?M|7uw1H}Vdk3EOxtg0){OIr!1%c{*+LzZ$m#+T&+YO^2Kn5A3I|+(q$A zRkU@GEgxaw8__?P)z@1fnGA_0Fs*rX78`-3q1)KWd zb$tUATX5}h@*61dRo_@+PV1m{gWuToO`TPOmvPBEfw!)2CDTBUGF5k5C$-Zw0XzZv z`@|I`0hJv;Ss2xk&)qylO$@I6`>vCp;DWa4b-kWZS6+?5 zrM|uZiBG_$#tUMJjhFrNmqcqSPWI5;t|{{z&CGT zQ;DOEPYlzo{%jQ8LSuEHbxa6-U{jap0zTak93Lmfh-S%M3FJWPgK2|AF?O6Rq?!ID zp9vh7x7t?;a2a3nG@q#KL*kVt>gvNf|1Q62x-B<4yUzSuj`}@8>zYtLM6`+{n-z~?ul=tbkybhbd&^-r=2b z>y=l>!?z!fixVCI`04Q$4*u>HT=UX@Y?$o*RygrmQ9}6X0 zKCnMj^93^b!#-v0z%1jTm(?)f1KQzD;@dQAbm76p!`2xWn_emLVb7^id9|heGWbGw z#S@!xW66GQl>6dX@g>0*K6$z~axAl6liw$(;jtLvK9|(yc5T&n+4FFL78>=!pX3-Y zrom!6cR7W$>%(f`KxMcadcnPWw{|M2&Y^Cw?Z|EW;<2EAg56A*ka z+#J7PCB9KkyL@%jz~lv|d-# zocT@}V~dy$<1#Oa8MQM8sy{nZOr0E$JsJ7*@x$@({?`ZBhhMsPSNOsZnP0_sVO^caS8Lt-<+Q5Z{{Ra-@_#2-x$7#U+$V}elHFUeumi^OT zt91mv+tYVG=puWZ3qA5{|6M@LHhrh7z-yul?Vm>My!F|F*1wqBo0NMTRfo0xMl!#| zpJ6o~>EBEo(F`k_HX-uqb~05pTnD9B)`Q?>AB_UUQ~Ecd+bX%+A6q%`Al}}qpgVGu zm4a!bIeMe<0>ZiU6O4=>bWN)`Q$On(c%6fnDUkJJ!erk(uY*O8XkAIX=h!VM0@f?Z z7t-AS3K&k?YkgHA769u=Yao1?W!<&Gd;{qBwHDZ+OEo+ zqkxuUJy$jlZ{HdZ-gz?q(tq$*VyEQqX@79e4GeRmwfWQ1^~hTE-OA^@{jEaF6e|y? z{3MaggN*MASg+8>YIWp;ly-38k>geaW$B0``KQl}@GSeiL!85tHk=n-QhPRzr#`VYc=2KI8lRld51#5? z^SFNDn)kz}XM8J;Z_Axp{tg>G;T`NAjKfB^5f%2e(1-!QZ&B#_S-tn=3MLOAP>JDpPtyaoS$-|;EbO?;9(H`bBaOC z?#kBa+vgns`|BFNX=AQWMOW&?pvr1v%ejWMGd|+W01HfN*g_U8TmjMO)92FgE)A|q zfi6i`#!xo3UP;w0z@u2S2D%x_wGFYe*9m+>RS585qD}d3Wx4#fWo)P5wb+|Eqef=mk zC4DMj-LGU?w|1{2dJe3|Fc{3I&0we8RBO1ZtpVQjGxwuyU;ASHOg(XLJ>0Sl*R>0v zZGMcOfcjMlyHXAg=H(v8)#vY0NFN|~J3L73E6M3z)Xb0OhWC z==lLR5Q!7;JvQmfd17$wBgZWNwDSOfu|i*99VgJ2^=#(7Z?FC1iq!oJgm2o!6F+;L zXw&D8j=7}|kTERZTVJ>O=6=ep2NjOr+G*-|%K2@e$4&c-8;Z@zw+eOhA0X!0bNL3o ze?my}yJE@LbH&%!!gNgX!<^JzmNfP>2m9kd$92CM;NZ{?>{5m&H~4SfJn}Mkq&?B2 z$B)OCzVM~-3%~SB4Q_#3IIFSNgPOIMhTwOtG>cnK|3cv zzS+M4&A-#sg8-o12~h6hsyrtdcVK)KSy-=d#wof_8V z=fM_U1@yv?gpBqFWq`b818ZDhOMioMqfuAEV~4+L4BcK`O|x1|0Fze&7-}y%E~vo^ zGP)*Z!Ya6#K-58EX3=dC*>GmU!yp^71wbNo<2pyKnFy#?n}LZ~MZ=noPsy|0_^D}? z>cBB4Y(aF~Y+ImpB(>k450e*sh3?p-?!{7xg_{P{X;r8A!X@hn!7>;uoQETx@1K69GmQDEb`%>*enpo{4Xr{^zq z>#E#!^u;IWp1PkJGu=x5OwWg0z!Pm9uyP@)Y@7N9z@5HtT?{PPRdQ3>Gk(Q~rpb@G z8k&dw-LLR6qEO1dnnxZs%%oX=8AfQk&YS|D=Hd$&5hU8ES8_jk<4er~oq#f`Tzn!G zwO}c&A(6rCSvKx$4l@SfvF6ecDpLODrlX&zT>I!MFfn};fXWpwUVfy5QCLs(x9HjO zs*c_AaacuX+M@^M2Jo+f@B#24HxM(I84I+JJ&p@CSP%Ory8F0U^vRWo1Git*cTIZ% z7c;E))W7?K)9cK1Z}s^T{oJh&(K`l$E(i|Ogti_ozZ%~?4bUYX0?Lh_5tyj7sV^rG zl4RMEQEzYvqS|~zRb{imo{A5%Z~K&+mZbi<{&X&=HKi~7(~jsqfnm$$Ilwf99s2az z3(gLkxA+4~+|D1HH2J*a$MVfmSmHw;n_=YVp$YMTul~qUD>|f)jN&9ed&DHwn;&e{cxk{!}lUU`Fbwya3-=)oOm_r91M~#rRkL)W1CTkM4}) z2fry#IEgBa*hc9?C4;?GS5OV-NDB!VUF5}|{>;VM@yPYz$(uKG*#wxknR7#b(UUG`w z`GluvboU0*x_+YH`p>O%Nt;YB8N->myGIs7OqS-EPR2Sql@M>EmSyb#>LN8qWvBbiytSp$)8V)MBYy1(U5f3+T+UkAn4 zw92#&cwS6^ANtVWaY$X+l->R*UllI^d}1JZXqPcr)g3=wZk-iR5|s5gJV8q&qsx+z z8yR3Nd`()o&xi12?%5=jW1_}Gw3kxTIBvziTRex+LoGgTsj+nMr)^7_4rF;Bspcfn zs?o=9_;Qd}ZYI_Z&6wD#lVYGitvV)i0hXVD;)0@oCse4E?R)Xv**O@y`}^M0-nxBv z{H6cbpBwKy0Ej?$zh#Y$oYpM-k<;-6EWIhO+glI({`m!X;oMkX~!qhKVhae zqiEKYpZfUCm~h^n_#g&}1N=bJe-6Cn2}M4tPJhkM5@f#KUo$@DRsAbq?uJL=!f(}~ zj4Y2O$2SDgh7i^T;Yd&GqxIj@#Ic)wC3MV5(7k7dOt`M^@dkjy@!P)d2gc1i_r@D< zzNvLP?qc6-dVsiMPmg}m-?C^HkIYLz`kFU;DEufK`&^B-3EcQ5{UEE~Iaac#gqHZr zIRtN01I4fGx3K(vtsk1kJ@8fA(UJC^YIyqSsc(k=#&3Le{K7B(=i}4A{>ibU{q47Z z;Qiy}m+y?1UwzdV&JPZ6jg!+S3Qxu^I`L+Ji;EMFn{BU)^VZqwr{0kH8TmkN3M59* zEf70>%~Qz_0)Xzg1c`B$|G0=RnWtyS&G|(x@SmWV%Lm%y@{N9e-r#+&zc#-Aw|)2c zoxkk|##;}*F~0KUugjiedGLmR(&6m%d^~x4Jf5CB8Hf8v<4iJmc$c@0fwRBMn>uQI z(V1bDVwo;7MC0bQ!`*6U45*aqo!k7HPYd7|{;`kPn9kS$4*Lt0v=OsJJJlNRN!xrn z0q9j6;02057J1Pa051vF3x@?9kLu|Mescb?%r|pGUW=$X;6DH3KY}1_sjrT~>tqzl zMe8Q&?h!ys$2IieIS;+TgJ>r#$;LK9fyYbvl}R6$+DI+)MM)nsC$02fkD>Zxq4U^T zICacprD-+!$b(R+^AnK5N}nd$1$9+iR}VkA>SLfx=X~5>LZa7enO0cj0jeX?D}ZSD z(~6irO8{0nZzMpub+4-HRdfMavc$TtX^C~ZX^+%~?32(ly0Rw5^1yO!HFS*);79j6 z4!{ki@u-_-{g=LVg2F?fO4@YXaO-Aj1k`oiAfFR|YL9Fs+vuiiQ&VR;~h<@he>7&-$VlV@Z2-tPer=>)-wd-~<1zZ-VXG;6bi7UoSv^Xj?xuAtD4@ z5dI!krC?CtyobK@UroCS?W^EcGN1Uz&4353^tWKf2m}@Tx&bjH^=W@MvH{~+o@;3% z>eSGDW?6Huk0)t?o_c+OiZ*b)uc3-g@K?R`S^yt~SH{?dO8WFZtjqLwAAX>fc0Q>$ zZpk&@AlW_xI9>0y0LN6u&GXQ_g2zwX7{Ach`@ifLxc=H-YI37dxt3=(h3j_lp-LVd z1j5_X-h#ax=8Ah~_?f2K zS*)*w;=i3=@>kcl+pnOv-ki*?q5FX={BkYdhVB06gBK$75S_DvcBk9TQVq0u%qn;0 zSufhv;;;b_tKCQWwuek~UOAuxM|Bo+=PX9xuM!|drU;tczQS<|{YFBp6XjJhGbQQe z5b)xMUbMBKhH7gki6$3Rg>Rd#NXWhhVC4n1UO^KeK9h%cQvBM9^#;BuOp$u~u-d?L zUHP0_$@>N|EsR-iI8dhrASdOienXB{6w@k zY$tVK!FFnx^H>m?#dq5@SEYPHU?R(YJ`~<|u9%(rI%fmDz@|KXP8$o0@w4K^uK|Aq z@#-Xg_$?>+oWPUI*W3`B^=u52=%ZaUzAzA+rCe#d4*sehK1o)y*GubkJ+LWgU)XYO zjJ!Wc?GiogZEAJno{hTb*l6`P7zl6oD*@{=z;rjr2i_M{?)Xo~Di3mOq`b6ogH@#J zYaHY^4p6vNWjS$f#U&4!J{v1~KDhA1iM(owANZitAPtM1O-t($9oVR_!&F(}d7YRkn1vJJ4|7Hb< zJWRnE;I}W)lW%HsvCrdE7r<Db`$k`DOaudXzoj7yJcgK^#FT0S@07U(u|WV?=! zq@N!EC=z6O-bp<-p`SYoLF9{9R+G<-ta9gF7o;KWxoL7Z5Jpx z4jLwp7Sy>ibQ2wG#hzmMwbx!5ckbRC|C9gmzaV`M$JvA5RETqtm7BZI#-43bseH&W zg9{=#9*L=SjNP6O!n>gphy8;)WAFCuapUN&_~yo)U$-ke{3Z|cilTm?!*=D80=c>k z9zTa4amk60_-`01_M$>Zz7=)j+37fc@^~CSy07$L?CcE=l$%b0WxS zGF`Pf;Y%g;JIU7Eq*tHwjEGMx9au2I-=R&Jii+@V%P@?b%Hf{^l*M9UE?h|Hu^X8f!#z|9y)NTK)C)ZzB=CH2iJ`z+wfcQRfqEUFBjL8 zm=gj;v|{x@1$Z$Zj>h3jF9}C$8S9_hL4*lo2j3)pAYB>lk)5P6cDQeys&Au>`ns)g zlr!$)leX?_yiI(4E^Ur6%J{^LKyn*7HoHvx=ufrkx6(o<-toOSX?@`V0OM;?g1cjT z!V$AUQrZ^o5=|Dl^I#3VWOKK`rhtoo` zw+7>B{~UUN+{9M^_gBh=0wVd8B-+d>EQ6}Pn(j&>V)UiuzfOuPO(feFNWyS|~@JoPYf6T>Y z%i-3YX`&}7bPed<{-Zy(g5v}~zi~ox>O3HN>pZ%2f-L73D$8OXx`&R7*)PBH%D8*y zwl9Rk_&4A9`uO-Seti7gfBx}t^5ksXzIAupxubpS-mZVz%sPtC=_%(RniCFheaP3m zHH*Laf}@x8lU!?Qldz3kr5f+lv7dE7IXq&fAils?yd}pM}_+gU+4R>yY0ip z%-f@zNB+HFz5)2~(PLk*f9J`=ad7KkeDudYGJfyx`TgVc>}-7H8*hxqPo9pOw{MMo z9&|kuz4piW583$AJSCJjD=^gOXFRCQz7rkN7i(f)%Iy7Vn#OqF3~o2&JQ_$IKG!Gn zC&^EoSIoNGw&4ZU51D1bfLSP zX1@Bn@G1x$q@hUze&XSN+SJ$r$W5#q=R2^fkCYQ5Lmo_szrzPo<@C{r4~aRW>BEUO zaHM0$_L~28YQ45on7%M^99a7kcJXTJp8F*0+H^qq#(#7*RejjB&~Uu+anfBYJRx~+ zlZw|%50WzYxy>Kw;i~O*C|EAzxa%xpQ_Zd|~`-L}dJ@(lp7>DPE3bjgw&FWSUT<3uwzGm*C-YklZ9cp5I_ z2fF_nh{)0b*@;nf>)3raa0$2f1{+Xq6@vid935z z?*BpC5UeXfIye?gr^Inx%{2L8HaD|xz!&t0myX|FV9H?Q$j3hZS6lW|*>B}bK>_*~ zKc_s`b8v;qPaB<{aXmJ=Fr0p)fR6hI2M7N4KlNL;vPa^r00(;q+85m#ue|aq4*>iL z{{(>j4vUQwLI8zmH+%o*1I0+yZyab<_o|WsdLckF`2jCDU>TIV$VcNB%%DM!=%wLL zlXXfuPm~J}6mCe0k2iQ`qX|*pno;zcZ1OQpUBH_zp=>44m`r!r0_sajo&6G0Z+_o9CB=dr!_MT zbteM{2jo^2abCD<16_K@Y>-r^2?y9N%F6j>6=mUj`)M9-aGLxLsuW*?lw^6f_h?7y z`sU6vE~I&N7M^+Nqc5(R2e#WGE(6)1tE~>2F1M~d7D`CWw-VI`6pl}_`Q{F7TCf9( zK^s9VlG6^~E(5DNO%zE0?Ok{3m z`g0KHKwDMSJ*UJgIedvW)!l|KD4Viy+COSbCQ!jPbwel;gUX<*WViSS`LEW$=Z5i$ zenQXFjqz8a*}kK%aMGx>hV^!B51p$+E0`EUN4Uhb62-8>S5$XhgU|r9e0&x5jFI9a~>jzr6$f?iLH4o~jEF4P(mkXBGQztO`gn-b# z;FZ$Q>NpiV^})tSfC{3^+ZMnPk@)~11(BB7yaCb?e#QV2XN*OgbnnjH@!j9^?ccy{)tyv(PhIsDW;2ZuLpZ)#3^8e_jLjIO$ zn9CKjwN?}d(S;9VVKtG@@f8Hd23*PI;`xqzbbjMRb&VhHW!!L{#`5C)WL!w!p?oNR z^7c2!+i$%w_T@G%=;9kcAiy`r_|_N=XoNmu!M@5($Z>qBLLIQ;*ojKDeig8%2#^zt z%I5g#gF1Fh8aD)wl7cw8tms3W+~~B6DU;48qSDVol*~Jj8#O75!d*TA-GJKv@eO?T zE0zJyLK}TtQbG=G&V(*MF(!ysqu002)je~P*c;Cxgro7xo1@6LlW?3E2ly!=%W~+i zcIj-5;ORcCk>+1|b+Qgi4*2)%wO0v8_JOeddp&cerIiflwx8Gl^0N4#5dIZ6=N?}%srJY03BgSJ53eW4mxzAjqwYv*9q0_TPss^lb(HjaFU9b z=zVPJdXqJ{SvVW~R>>K8>i8~vS^x1V?K$pJPF*?Uj|~N8zp96^U|x{Qc?rCd!q?ff zhr}Cq3eWA}lIZI?L;(4caNDz?_0{+&qxL#hXDrrGJbGLjy!QfHX1@Ulzku5b8f*iP zdUB#Rbl}rgII44uCC|AqJ|#zQaG{}IfZpX3$3%@w<}OsBb@E)KtlYdPLKJzEFKv#` z=Rx;_D=;nP&Sjx_S081`Pkc!`&i`BnpSgrj0JUL~J}!@i+N+JXFGUUj@94w+h6k89 zjw283+;9O4ome@UjY>KW%?px#8vuNeu|G~V9()U#8;57-7vp=r>$i`e`ZNF6aV(sJ zJ$`4r_QHm?S%AQGnh*^F%RR^{wxS<_^#V1Lr1DMjfb}c;JTl44)(s3z#MT_m zSGh@CYi|KE`eHSqg;R1PSm#~1#P$PeThrpE_WuNAXbjXa`S78S$O>PuIA35- zx67NXIDgR;@OTJ?V;XOy`Q|sjF@EJ&e{KB2Fa5%J`_aSkUEls4<9+XYW!!u9RoifQ zaG=;d8^`El0O*3<40UAYL00C8e1T1F#y>o8yhQ-j;4B^7FHCdVD${(V!4Hx{}_pNPh9%vDu-jlZEW0Q%=W_%~ZVX1u$h@~$W4 z{ES4VHuF{8WyY!#x}+R*v|4kzBFTR~@9G4NXs^V*B3&Lo_^4L#u<)*O@i&F)iJQa^ zZP43K_%;J!fX*Tu9JF^I^pv&USp{rQKldF{r@h*oOXtGL^+(F3Km4k`=pOxT8#I&+ z(6l~9A0-4C^R#!Hau?HP6->WQ7Nqoi0MiWYw~V1jJ4`Jh^Gi7sugcp%tZb~=8~R62 zC&=Bm=7T?K|FWGn@U8}{&-kOcN%&|VtbL(TGrOhk%pZOJ4lTvXuh2hh8h!11@|Hu# z!pHkZ7r>$21m)+ddhO`Gb!u43(1mr&7O>7M>+5mZzw}tV7}o1&_8zL2KR4|M=%Wm- zatkKsJVUwNpxk8OE#SGhl@@>isP?&@CC|AkWq8t_6kPTP>hs=R&4$9W9cxgV*VPn* zkH2dEx(tWv0@X2JBtJq-(wmDA4t7XnF0d#xI!s7H7iA z2kTk8DlfQ#KJWs%ZCC};^P%yOOPK`k=kZ_ry2fEco@g(P$F|;;dl6XMyz1{}a+>SJ zW1>J>NE}Cbnl<_q4(v#txDUr_XZM$w1y^0A%!{jdJxdsaXV7WV$EkfG#kL#B7p2Y3ngy(R6?CNmj}W@EVZ8 zXYpq&ngV)?-Ii5L_$t>T}{knt6mDK|H~LD`$TG^qkkrEcXSGec(<-M;EaSr@B~lSku; zsG1C2_PCLYq5tayQs{`t6J)D=rp@@6b@AFrTsYWh>w5f4vV22xe>X6RHX!3C88b#m zwA0r4c6nXT{P*UkVu3jF)A5Cy{z{6spw+rzy&4CfWkZ;g@U%OT`2)X+0Zq-Xc^kjl zx7x07}>}O+{xbsu;$~jTbg_LTqa-R0fs}AQ=e{O4f@g)(fSJ1ChU{lw1YDoRE zT_oagqs?=2tIg>OzjzL}oYto%i>jc2gQ^)@8iL>Bj$I(<;sZS$599*Rmy%~%Y;tI__KfZr^h!QoQw~>|Mjt_zNg2>ieLC3iVL8!f#1fS8%>QX!`f>je3diS@|qI9 z<3byG>7_ZqYb*U$0P*m*k_43fumOAoJx_>J=1t;Fc<9ndg2pqn?W(VFw!x8D^Y(GRXOP06@*QK~3|d1ls~DwO1fNW2`!7 zd`kA-ZuH~E`YSKLG`{9-o*Q--bf(3&mbeVYzU`MOApv zr`X@!AN+3U&i>)pJ-RdY4v)r-{Uad@r{*Rx#H-Gbne`e);poc>#=##R;%S5M$Xw@G zmgAqFOeh+Bf&_l9;N;>u11HxKWO+qZAazj<}>8L$7Bv|V0tiXPI< ztX=N4he#wwrMqP)Ne1zn^##s!IM`G}&pOr_ z?8AUB*1D>Gd%Q zxnD;ppQ8hzLKL2xmbr>v&}hCBW6BTj+#CBh?k) z8ssSFrWHbq$1~CS++>GwNS->nt#io8I~5)NVy*O8vgYv$N@48M#zR zaW=BCmsb?4&Fw-`tK~ZGzzJ>~icU*;{YPDhi{Gc7#8&z~F3(>pms|B{z^9fw(D}FvxkiO{L$$M=aDkglR{rLDyd-ZeQ9G9oY z%i8oZG`r7MbU-A3!>BI54&wXzEXCM37@!-Le@zTAQ#>;o_+19;-!*QsXK39DE zCOF3@vECCMru*hS$~fUe=`5W1BR1h?p#23M=Pu~lr1)fBjOPn10&uXf&o|iq?y2AS z`tTlDbrQ?=y9Na^v|_l}RFSo2wxEd$(2 z8mXM`fKv{XFAS4k{JTKP@%@s^lUsNN@aeRn*A|p+jl=m&mdK`Qebhn|_~r?nboB8< zCAEbNnUq53pODg+(d!3bq__7)D(69gIMg?3O;GttZV66#Lbt8cZvm#fhHsV6_2ah* z%{RGypklu|p17tf*PfwGy%V^-g7hJt%wwJg( zj(8L2n=;nrs{wS@zD@tEty=&T@Rl6^G8#Gl&2?z{KX?m(qr?chJ@uS}L4zJD)_u?P zr;Rd+@ppV0(snt)tIG?xMIVb!=u#!*R5_UM4#JCL)}lYh6xz8Swa5b0W3+1Py!|#| zO%oY?yx-(ku(f_(H-5<9gz_yD*g9mN;`Q8fH1$QMa+)h;4?uf2qKACy2Y**})2oae zno&=&8n*bP`~{7U#Wd&o8r{&SpcH(6%r*o3ympa_R^X$%j=AvXfhNv~5pT7vA0)v| z9z94t=E6*9Zzpy|Dvr1+2Hy!JHgk9g}RZxLeu$NmuY;KzPX&C3G- zN@m!O1G=6ZutrK4faR3=u|H^NBQ^3Qx)NeE1!!HUy!dv#S9@3a=md~Ae!=9I3y~UH zwJAq6>sPtiP``%WPo;&nhy|;BgT8elHB^1lUIXpFDW~~RzAmky^q+FD8@cU=YB$@) zshq5z*l#zVE0D}moeCP?aHi~y3Pq6Ur>rD7j(Lq|*o&TBD~(+@zLBb^PtM2<+xoBK z!EM!U^2X^LC_ePLzsIln6kMX`K%krhKsu8qPPb8K!liAQ3k4f;nr01U-$tMK&0_bL z+)H|^Zkw)iB-uU_bek(H*sgP7tJ{=s z>+e4J=u;W|eLyLad81DXxe5AtPyp6>G3Y)vAH>&6M#}4YLHnq{C4KM&?9wXObcZ0l zj+GS;E~I1)tMSu%kxgxlM~~a2D?CH$T88UjC-6MO7V_pxzLi!wq`u{Eqix~LoK*4* zs{7((_mPmhPFw91{ZR)jE{bm8t#CK+yA7b;dh~`Y;NkVhVprOh7>um|XJ zrIl}@_cH(U?cdUobzx&}u6TfpB5KYzELI@-?yL1Jwy}u|$1jqUN90U;ZmjXQ({t!;kD8=M&+U+CO#Ma?6(>4{USF8&MpOb1 zJCrG(^9s9giPtREO-_H|`;XX({+@gKb6L32hki8)q6ZZhCiM8_La0FFXTMfl=4J?e zB(*y()u&I#ijSXV&y1~>k$N_4D!IScyV#NA4^VY@^_p-Mba(*x2YpSWI=;g{W>Y$G z%z#}d`r1~>@dcR4-XBTX%p%TWzsJX!3c*9t1x|BsGJLA_P=`g zrVhYZ8pVK5;hue7C;BsP*aj~(167EOK*~y199gv0MN`%JqrSuINbN@wI7XKooZbT> zdCt+Pbtv2h#8k$GPwNOz;V#oE$0M4H;RNg_hISY?(c=3(bW;c8bg;K?pT6(gUK!u< zZLg1i=pX&>K{d{f-%;P*c=G6hd~!Yx4i98Hy>T`3P> z&SZr)(zsUu8RosU?X3{b{=|4JjTj7{CZK3V7rsv`h@z1!SJ2~SJAGjUy z9v2d2ehU|s#nnE=2J!Jc%FQzB&lG>uxhd>8QcmCQ-jQ&3#*ya5&AWHUje}eMi4wcO z`0!(`BdallCjcMWvgk%mFZ@AV>rIY*xv7$Ua_VU#vGyY~wuDd7+f|gwbAe#ZGr8*6 ziQiU!^tUU~mAdu^z5x!O>Pwu;HsSC)!_220;rSxNG2d3i6zFW%|!uq?HU_8cELS09boBe^^z z6g>4=Uvxrh*+rXd%eO7TwX@Wqj8GF_x2P|ErBAYur()WaGY;^9hYwGg-yWw*{h*Nd z_0l8j5J(o}Sn%8z?>r>Q!*hGS!OsJAtn17X2{B)enas$Ww{MSw{rz!QYu@|b_kr;r z{-wV--g^AN^Y2h|+Ye0QU0h^ilz?^KWdlg0=({A^fbIG@Uj$$nAB-U3#P4Sn7xC~uzX^f)TFFXcb4v&GxvabSm=?wsj({Loz5r5R>(M%Xal*9+6?Kf`{EPLNE{jICY(%xY_wJ1!{?Q*DU;D;e z<5Y_^#|ADA=NJ*)a-Kt3bfU2>%Hfe4+?o-sn@P}72#SYpCq>Pa(baov&{W}#I($>j z4-cpMz+3AVg#oH^?B%9v+ z=Kb+2zxu1=7k}Xw$NjhOkJn#&eSF8ae_(vu2i_~OImh6EWNw%v4^z;AV=Csw{v2y5 zTTL0l4ShfSt1@yur|`4t;O+3>41eYqKInX|<6Pz+vByImn5J24H%cb@$Y%I56A1^g zm3fjS|DUS=VEa(Noe?^!Op$`nUo0({CFZG6o%&DT^)hfmrHTckI$v z{dAe914K}GjW50tS#hTi*IlNa511@?&}3a+0ru72>Pm2&yWmeB>kNfo>*gYt{v~;w z-LA0&hg`LuKdHwi`vjG#V-NlK*xkXI_RFpyhewLRk0KVT@zglZIw|zNvCE&D*1eZg z&d-)a+q%x*Hq9#GUzgXkt6Y9?8U3pbKTz{ty%#*UfdmFnbyMiG=GD>sco~<@ zv7bb_c~yVw;J8$;GD}o@^CfJaaUF|-+7r*}a#hGcw1c2zGRg~LXp6xmXuRq#`@0U`zGk}S&)yb$@)N{sUjfSH+r;yt zM?;Q(3;#gl)%dOa0%{-k!Y=JSei>tm^aqbO6NIJ{Ix3k)b()x4JitTK2A)dn->hfR zsn7QC=a|L%K3<@M>WI|`=Mk-k=ZNZa3Qqus#I?zmDSc%Bg~l{uEunOPjqs7Be1g|B zbE}5nb>3y`XMZ$ElJ@Do-k@^tCHx_Oaz)kSY+Qlbyzc4*ht|lu@f$bZ`o#bDZ%Mz8 z_C&nF4aE)UdQZ$A;2yN}d!u5fv3V&2yn;5}Ww7&a@CrI*R**)Wd>Gnr4zgrD0&P;) zaR5;5r4M=+hrrZh+m37KC!c~B(l@kz1@bEc7uRfPB#;e~GbtQw+x!|@?iNgcl|QE5 zT`%r-E)ahayWHCkT_3g3kI=uW{<%baWg2-nqIX{S{|3 znA@-OK+yj0df~0&A)t6c$hGuYSQofV4_s*ctXGc>e7Mrk#Kd9sX75yfx>fXSq?r~k|@Ag%kC|>7+ zJK_2EKf9=JRQ0&cd@KHXotX6G(y8(0I#Tvrx30|iDEr=JoG-BUlltVpoFD~y?QYCX`GVH#GW7Awe5Sv}&t({)R2a_nZrXJ@PV8oXOBLw6q5ZPx^la$yX}Oj~+gd&-=X@g$`P=#}IyLL8eW~~Y1Pbh&I_7ecCa8GI zPbj1m2*2fcm2Wcejl8xW{klB$72dkN^_=#V|54jxulT`7TzC(iy9HHH{P(4wcv!1gV6ur%Hub>T?aVoG5 zvPF8tKg_A~v3K|N@#=Sc=lI%hzB!(qoQ;3*ANX&NkA3=^<0HT0JIBuc&2e&aGER=! zBce9wnJFn-_f`O)#G{@4GiFogfk7sj*G=MU;#Dq;>g?}$U=+X{bgKP? z_2yXaI944(a&=&zFQ)ENJ`qpFDsmM(^00I~9Zw&Hlf2QZpq zk2>F2%P@s+E>s3u5<~5|CVAjn&IHAFkDv7PH=gjbh|*g6AjEdGoT3qKLXoi%z2+-D z*~0^+PsiQ#tN3D%m4iX~w)St{ub5HWvqe9xtM#p$=kZszYCO@CGLUhIAA3wF`*+c` zu6qAc=5hmjS2*x!ex^ZR>6U$R<|gsm{k%cKQ&)PoJmbg@sWZ2uoB8qv39n(&aR=Yk z?|0UmfX01gCy<|{SjNpz4+XHV z(cWwSK=~LqZ{HaY9zGJv?)ZoPk^k=a%`e^``$vc4t%vt*!`a!>amvkFiNl@}KXZb` zdnv{J83)6uZ^Ineg6;Yi#4H0LM))~C*JP~kEx-KI&~NaN_foHQ(+|K+8bCa_o6K0y zAv%1z!~SutLm!vb2%c~7F)rm(_pR_zn0D|zMztRFy6z}BL3E0X#}6NmC&y35@#&c#Wcj=Qz~41~&+q>6ap%_Uae8_> zPUI(iczk>;nmm}=55)j$J*^{l`T7X$(Z}PjIHB$(k(lw4hORUyr-L;`bsy^l@S8ei?5dArxsrzakF9`t37`$mHNZqkP>HiMramY#dhyF15&2}Pr5 zG5f|1GeRYbd?0gVlLXpyd>_{nvulAL6Eu|MPk> zu-0W$e}H}3^>#MANd9%>m*Yz7)yK|dpQ&+TJr%x})da8CkHF@5FM70`uD87H@oT!C z_Z^qDR@HvuMdQZ<61^5)30I6?4+Y@sCCdHKV9$vacwg=Lds%<*xs@3Cm+iMb^LkuE z;}yIpe!hkI7s$A5&OcyN|MkAO{U0d38FW@K?O%KE8oZ*fb-iI8zotDkCVl?ifS%${ zuc=wr@b7BvvpWlX-9XUscggN+0NrN%fNTFCyX}rY`ncHo6L*2q@Vfagp5Y%@`=Y`9 ztMQ9pn|{+T%YL}VDc%wEJV?98)wZnpu4NZr>BnI`Pw=8k;Y|Cpe_3p3f0kX)+a~&( z$D&8;hbp)i)XB=gJO=By>g}(#f8p7_TXcb6WMjkHyiJ`iL-#dZ^Iyww8)(EF`^Mtm zZRFBcmOlH~jkkX7-~U_c|7cEf+jt-s`__v|04{5Y!>0oCfZV(m#~V5@Sa?jU1Z_EN z@~?nJc`M!nI|EJ&fkoDagOTzitA*P^Bq+Y#1iFyTln_#1Efkjkep9ak&T1OyZ~EE< zMTgRP4dP>cp|3$O=z!_+l+%IK2*uC-l}%-TCbn?47a*W*2e$Qxf3dP$>yOXWr~&Ws zVEXrFix|PBahr7^NFVFbfmRE+g3JA}MX<{^yU?Y^PdYkq=YkZSmkSA<7yYzj zwE%#!+HkBV!2(4&xamO;<<24Wk6H(fOC zDcF5?^b2g)iA%Aa8;^o*IqfI%X{X?-`L_XEeAax;W984{o9Q3^*{A@E{}SJQ04w?_ zr}F3NzlN!Q+0%B-IBL1gukgFR&2IzXwWSC3qVT8aJ=(auKefzE&XTS08Z9Q zL(fCWb=!QU?-n>tLenQ504H5`n{4#@>l;1pQa$-nUvEabaiu3~zV!ETL=X|hL|{Mq z1|cUAb=)D|WcNIQtp@NABkP$VrJAdFPnxcR;}Z&$1gV|ZtU%jJBp0=j<>(|o1!@N z#X;ol>>rJ@Q|xE{DW=6>XLo<>?1|Tb>U^tSbC>&`#M=$YWY%(nJAOR#P2`Pu_j=h7 z2lRy(+bbOGGp-VL`pbsmcXoC*_IdD6em#Eh&iLkUercRMePj|M}eAcy^L%u;E zZ(D@;altm@j=$(R9_7h6TlMjVD*Nk3IP*8k$Ks9S`&6{V3o-^%l z?qd75@x@CXHHqa5CpVNyY3pySL!TSD=)#RAZsKwCNP98)anqi_+9t}_!*aqmP}`mW zzQUjMk!RfF$KWBI@sNk5r#_`W%y{7Enf3?hX*$J$*95imTi3|hx!@-;4#v~R=f070 zbo*dDdZJj=n0o!>8$%Ytx}h;InYLa2zae@DncDq|X=%ZY$2AapYugQ4M>kH-GiWNH zYiLp+{w_mS3nd*t!n+=F;)WmdK>qaa*{ZKozSSGJ&{(3jABn{ecKOFOt;ccEo$49u(6X&iid;-yz z_~{2!HlgD#(EAm5FME0*KA-q?yacF|Txb7Ce~sVV*KpjSz3{goz7f9Us9@P{)0&0- zyK_I8d9&~;eTzP8z@itxxbfzQo^OoNI{q;8Mm^B#@l;pV*6@)uB}F0nNxs|aeA>Pi zfbTtP`MdW)y-(ypE6b4%=;L+A`qgoU5*hm@MBqQhSGssjtM#sJY8oIh1X0?$t8sZn zH}Tu+47A|5U&1zcX`X_{I5?+|hmSN*cJ_&RjvY&Egcmk4FC|9@8y{!1R(ao2IQ&db zaPbRz6qG-wUgb~EGx~Jw-;+EZU_f6Ux+5M~Pr={O++=-a-C~)+|2$ZCu&?8=Y9IKn z-#$L|tG_b-#Q)%*9AEtGm&V(VABxw(I5|EYC-qP)b0j|#uz8No@o!gtU{&OAF zxKG%u=SDrYMQ=GQ%c9LyTM9z#xD-Bkw0k_Xf5RWdu3LjIk2$B*VE*`7^7ytmIC7cg z69e}1o_GK8pgEvGhZU1>{!R2aCo5Eb#${mDtIkqo^uSX_KGj-FCX(ZA@+-& zr+m}f$1oUao#zIxcpu%lt1vnmKl&pd89(?VKRkZrlbm`UA78hlX#ZQ^D`a@1FzOI{KWiZ?38)0V4gL8HI7|JpO)#m zc}WkcNsLiGV_#8wl+&ljV{i9xe8-1g8z24XN5_x<$d8WqfAD=iUfsWc zf4uYP;duPu;W#}z_MF6r%ny#Y@Z?Q6@OyUSX>^vp%puQ1yGZ@EL*wK*&44N?{u~aB z6MSl2Z~fI%$DfRYZDahs?u!NcHOHX*BRtN3;=A2(bW3{+F~{D!w{CiW%mV_S{N$&{ zU;df@bbRL5Kd1TmWZXWw>G6B@J?|AmofYuF4#!*xU{8xo$Bg`kP_H-0G++zp%wgHh z_3P?~j_aAkYrfp1I(#9i^EA#7!Mo5tR~G2+(DV)Fk&-``z9ohlz+E2Lt-SoUhVTr1 zKOp7v1VgV2T`xHv(-ml*n$KKsn19OW7oe7T+<0lgZ|GL?^`J-TUD zb_Kup%O#KFKe_4CwuV0L1tfbVKYlFxuNc4R9DL4~g{-v1fBJj;Hf>nTF|5{4U>?t} zn19iieDUKlW+k8FjSI{CiTuR}p$*oC4>mybUXO(-6nfboHa>fAj^ojVw$>xZU+9Bd z@ml?J49l)Q-ZTGFPdmpW%5CRtPqjdFRj*-pKd4PNLAHQneAdZzeFep*9{{-csH<1P zw!^kK^#CxScI0*-HGMm-4;aOV0K^Ed#z5tB!%;xsV9;(4xcK`((EL_%TMs-`f!G_E z<$`7DGY<&YKz|#2!dCqS^mE|6@VMkp$1y55eGg>gRZt4E^0LSFRcaO*sN-`qnG(Kv$A8_wb<`I`|BEA?Z1xi zf|dNhR<8EwQ*xiPIKlMPq+6(vD^j|~C z_1p%kUJL201I*2-k6_{mdOpl~RNxzuE7-7KuwnNK<~(mdl)vYCb{(wr1t$N3DZltN z-*+>9z~3?b|90hH2f^zNORv8`)&u0V%r&&UZCKiS{spRCdSxA2;i)>`Y%}r8z7(B% zJX8N4$0s3)Rmz>E8xc}5_mN6{ODajZO{tK}+^@5dB)Jx$T&5&lTyq&2bKT@N_hD?9 zySZ$JvD?q@zjOXQkMlT>_vd|HujexfY6gM}BuV>Uy0j@taNqm?>^OV<)iO)g;B^UY zDewqC$~LS=K9e|RZ}d5XWkK-l{UhMu1^+&=ci}5)ifVYCTE2j7oQ}3sHCb43xk>-a z1XvAdv7(CMyS^I&0L=%puPnCFKoYN;Zr?!vmR{v3sn_(Fbh^P>#Vi?$pC7;Lh{+l$d8AdseG2X?O#-4 z;qLf9bl3&a{w%FzB4kgjlm#=A5iyM2t36QpEY>!1PI3A*3Q`4}==84d4D#?HGQ=p7-p3YGa?g?DhHA$pla^>bxXo`Y#gNzbH zgQCzbUY*vbH7ieT)aNyskXaYFO_bWX7$$ve#gryLPNzs=vY1Wy3Muc*lIhXPRXg^8VuNK!CVt_* zHtx6=mksOJez;Y;pm|3NN2l-TnS|dDn^` zaH}BVDJ}Hch33L~IYft-SoKr|@46mk{5d@2t_ry3DSH0Rl2QA#Z+*KEmUmr$8AV8p z(;n>-|8F-q_!IG~itZz5ngW3#A|K@Wr03`w7^d_xso^|go?471D1wTs5pZV28s(85XlL(VUHYxe! zRL{*zU-}ob#@+96niV?P_Gi$@D@|iYp)|Ww(`VgctfFTV5q;5Q5hi!-A=(_JbIPmz zFqkR=bVF!BQhfK1dyh6#ad{neF~Buur#F5r38ZSb-e$hSd-Idmv@K|%WE(Je=el-T z>7!U-XX7*4Jqa2f`ywhDrY*eqho`SQ?mpxuKW)(q%e{3Yw`}YZokFsRt>K;#E27R~_yj z>OxTwbLQ~dWZ>M4nMe|)sBh>6xmU~gpOHCVK_%1Wk5#j%{xEVkI5aXbB;y08C^k7R zP5ni^#d^6Lj0RG>Be8`aP^mq)3*vv|KjVn-|3PI@J2Yv0z6h{N7^9RH6H;)nE1J%6l5Saw7W z+fD{vg&UKLAdJ{)Fx5JIpRkP{A|*LwK(EE$Dfk)J^v!gWMMu@SZ_6qzftw4Wp--6Es!t<4#pwg$a1B*R_eIQ~F7-gV|psFNM< zE=JFfP=K9IsY=fVd)J(QuCKCy77Fo7I~p!xQN43j7r~2%%0T#HSR<3|9j!12piXjJ zMucLrJ|%Ma>$d~CURP)z1PZJ-J^0Zm8OWy}Px=g8?aNMuA0rX*HXpesD(31bC5?g9 zYG1*ADU-%(llIra(|OdU{~Y$ty1W?Ijb*yK;+Q7xzmP-ep~Sz+hGW$U_E6-=hc%NX zBdAs;bYvtAXhnNW6Zg42^7^}2*T(KE1<8j#;`OEFG9_qA*$UC1r0egrpS)cx&AgNd z+V2< znk&Ah%d1`=Yq&-!vajd+)$RLTDpq>iP9U)))`oL6HXSM34-I&g6fL|*2x9juS zS(7JUpFN)wA8*c)yvmh-hZf8bb*^C?pIOFy+1Y;mQy@#wPu$Zm*hApaOv9~6r=V_? z)3pLm4bdx<4+CLcFeCJMu}~hk*I(h_Bq`#Rl9tGU2of&~p_v)>(P+px$sX)uKoH!w z)nTG+LX46dI%xYIMYuMFqP*gRYLWwBInv7%=FV9hJCZ>i9J?g~&QQpK@a zdQP1N#`p-pLdtnL;kW6fgP%Mu_|aTv5kIbprV|q=(`&ChHcNNVA7Gg*A=^Zd!EUQW z{V?Nd-8-9>FpZJO4|@o0(Q4YKUSBfzBi4h>?@2~YO|XEhQa?`QO5n8JNWVpj;GA7g zdY8CcnBV#bLgMSCEZ<6v(v_DH9#4zz^bX7C{C<);KeyZe7p3Er` zkMd=U;hJ4^RfG)`3J~D5KGpb}wX2a5Xei&1&?G1Ixi{LpIOJunPU3_P7kYvEm0ADH zR`!+bj#^(N3~j5hQeS zFd1RwtvI2?&xh;~l95#Y#w98n!pWtRw|<|{Pb!sPURJ>QmG>zyRnBT3-vLdvrfmG$-GX?iI3pu>6h z@CvlG6TLmL@DlWxR0GY}_&u2&+aF=!db#yk0CivKyJmaMuTRy%FA2}0vD#`MVYHQG zmAyYpBHL@xo+d}t%JEd@&!>JW;D)#e@2eX)aqQQ}LHAWKafxUUeb01t-P_COi6K1! zstEqo65T33m%5QgABZ~^%8HCtyHkYb25t><#}8~{(g)fJNd?g%IWHq7!G2%x*^-*y zZe<8h13r73TXi5ZO*ym5XuRD~vgqNs?hf#uVm3*8Kzvq}AZ{$B*0B^iQQ!ZXc@&C+#9b0TptwT?HcEVW2W0YM! zk-80)*t-iU=X#2VTRtx)Qz(kD79!NUcE$f0TH zt8Xw67q!qwfj-DqlE!$vU{<49u8TD&blvw> z#D7n#qhGVruQxQpi$Jb{2rbtFU$EROulK2Jc zRM;GyhMtPXD%k3bj=ITML}BFH2YBLid>P zaLn=U0ug}RpBs*ELizcdq)JeG%f5oh^$l^(VCV&y_$bwO=b-&Xob4imL(d|{JmhWY zmEeF4@jHFUaL$<1^u-vX!*OtedTDLrcTO85i;5pg3Kp>)J#;(9g&=i)7ivzS23vA8 z%SJ%CHfz+oO?I)@bbS&z`|e&+T0sXdV%6tBep=9YM`rLte9NrxOGW8h2pkySJ?K%d zx-*x2ySUEdlJv&8Z>P9$Pxdj+a}JmrSgZ7Cqn++wyK2+@N_I`M@py9qy@hxDNz?43 zF()niWTnD3WN$S^svYjarTv`}T2=v26htPWM7%CDSb6jz=e&V5c8FE5w!0%;>t(Sa zdLE8mIdpmA75XNyrj$lEAzV(h#dnK7w{2T{&V~YtnyS_=|HxhGddRk!+zc03=(W)- z5&?J*$^&GsCOq+r!%4HnFR?cA=JCVGdB2unMzen-*>rQ6q+k9#vTRj6&3vlWxDWjAH0veQ@ zzF{R!v+`*Zh>HQT8EweNjlAD=QJ(yvC}j~&%-QM#3i6si?DZLbzqw04+7m{- zusF~18k&Fe*ExK=H2NP*QWF~D_J}E#5|95(u-h^!@urVeCyPlBf>|a00uhR?m&oga z?9`RSi)Fo<+U3(=fr)|K5dfMtxJzrQm@I%?~0Seme-E9pqhav|7)LqOl-NCyROp%rJ_}ag{X_HTbC43T09R2EBTJ8*}%BeI}4e2Sol? zkncJ9@om`$r_Q*$yHnB0we%Z3*G|njFeYCE@AA~J#Z;N>24$L<+Ze1#q{FqY7GOC4 zXU3oVZa4HFT8Sl=RokR~xw?YonY~6tC~x^1wU{*8i1}9K;odtKtmuMJH~OWFYtHq= zgak~zSRIw61l+BRpA=k+;D^lCYbB%jwa^r$ZlJTbYHrVJa%yZco=Qbut1wi8gjfWj zc1R)cM721pX08JxOue3amU2z#~SsX z#C{lDhMj<=QCDZ zD71cuNiz@vA`4PZ6vLDQ^$l!?;08yyi=xtX1)YmMA>2(Oo=xE++GYoQi#BNs3+aLX zgVZH(;a_78jxd$7o5_U1}R-}*wQdk{dVNOJyQ_c{Tc1kYMit^hMddt zm^>wAIc>OpN{k=ddSlsISWzdPCaHGEt6;?Y0FcGo_#SJaUT{`jg9n^t4wBQV-`cI=cY|?IzC8$p+_mC^G{VInyATf} z>t)hw>;);N%nN|7x?HV8!Y26x4V}9wT!}N&ul~-Ziqz~7S>2(;!!J7>fpVCDJ>}KdV#wf8mZC&sesdyopET-;b*x4?~PXsixHCVIaWgJoU|=>A3p#o~AD@ zjT<-lcO_3*prP;6+0?Zib0ohebbgh!OC{{6;>ISj;Uqb z*utx*Sm8f0|Aw#4vEo2%Iz1J;tipMdg}17J!GF1!7*s589K#@0950DCfk;+1mLyRUil3jb-WX;}RQX2d27crMfnw1k z87iakeqAP*(93&AMg>CUBDcVbv&344@GGcdP}V!7)Z~Is@cKSA>=Dy2^y>Fg-L0#U zUQK8N9xL-iLB6ENE7SPb#B=adl3`x6A<C3{H$HAxNRo|0RW6t%eqlmGIf29ingZq#}YL+E;HeO zaXg`plHwh_v;nc#_mB}m!np!#;B*1e8c%)0{Vh1NLlwE&^A#CT^mopBw_^3Vmau~c z|32q|16D?peX`8&h8c~7!@l7OQAJSV;W5~$iFQKWU z$aP)diKu0%Fs2h+&t=|jgq9^O{+&w-ZK=)`CxsB3bPS58AsbJR^KGxp?Cj5Yw6?Q- zg)3jp32Ebyy3kKj`F>5u3d}3Cv2n$L<6B@U`R`<{$lMl73J_aK#@et;+emLbICS(P zmj4w8W8+RrtAW4lKnv$}9dMoShC2kE%NjN6CjI&%?o4}H&)k{K=ZZC+wz|hAi+6NE z&i1ZDomMYy9r}kXLUtMq$!ARV=yXsk(vMGAOqj0HHyk3LEjfniCf5p-@IGi;IlLph zgNuE|?lcigouL9N@`p*^jf#ip_REJu<>BBwuAi*u@{A)#t4+I6yO8FDFYuABAKn|A z%FBvv1@>0@oCR%9j1czuH;8ne;0LS@f_tKISEe!vziy)jnLW+|Y~0t|c)ee4!TzE7 zs68WH^5HXlM&E|PBV1~H4Cni$h3vNuZizFvxu(AQJuC+>A$1_0&QM>I<&R-^u^|2c zauzC2;jipz$9TT{;#ey4z+$~hJrNMGZ-i=78R$UX!ZQGoW9`9y)VzB#mje3E=LTSuQX^EP zXS@!fLbxV!>9h^jF2T`=7IHUSp`PC0cb2Kdi!MT<^?( z)u0#Y3S2(j5cd+ZZ_c#gNVwkYx_FsuE)mXN9t4Leu;o+4s$F2gG`2F}x>cE7tetH@ z`!#COK%aPR|FwVMPb^`t#60?idmx0R=i)L!z94_>#@5H@OrgvX(JqCFrJ5$oo6}{1 zPkbH=1P8%t5mCQSFKm^O2%B5_rP|tbHnj!u(&;4ZtKJAO7 z(bax$0imAUm_3aCZW_D5mxY> zcEsRqG)U5aOP1GX0BhI^odG=aE)mmxZ-wH}MoQ@;!ng;1e>wn@SBdTJh}K0rgwL=^ znbp!O!xho|5qW+bIm~M}$&aGC5_ECc-R<(}k_y132X10YFYdXDQ=c3dz8?9<|JdY| z95UM4Ki!*|0-dY*Q+fg|A@!0Ql6r7?Y}9@F(9bT(p)`NaX+(J~?h}IfiP89dVXL51$lu~KZB@w)b9W!I!@_`P zRLl2KAY+Vswu5m^WM0VTrmcb`iq}>6szxZ&tevAM2p7N}vTXFPpU(n>C+?5D;h@^0 zCFc$dFc*fu!aRui{c)~IlU;|eAJO)XWIOz#dmx02E*!X1Dp@o?rJ}}G{Pz9$I~CbR zxu1lMQ&U>Y7h$8UVeR1z#3~763Z% z;AQZFeo#(!8IV9qRx(E9F~}in;ZQ~Fyb98fvzp9l4cV!(J$8+N12@pc*GQCFBy|1; z5T9d+p0U71iU{gqoizqIt)3Bww%51v_md!Y)~+)3eOIMJl_GbSxzbsvJ4hczBoKuq>@---N%QeGFU zc^-ROScO_(h)(RZN<0#sroXKp`uSfn_ECO?6LYh!s1yT?Ki7)p7RLsrufA;E660ST zhzTSU!~WPqe)=emNPGOy+czHU>qa?os!UdeK6)IkAo;8+&+#4h_u|`RVUi<^R&$5< zTBAX{Q@l|~^It1uugj;|9qln`u={g+XiKY1=X$plt9Wfo&wHegTHfr-$MAc@&htFR zbx?cG&1Nwi@Bmi_eg+jbEaTi%cXUp?_svI5LbS)uHOj(W4{8Tll-QdeOsgH^&5h1( zaD#;Nc;>;8uP`^# zSHu2_;KIsc7x>?Qg=_Yn({Fea04U%}Ogm;@81Qkgxj@4M-d>vCgr32X1VrS)l^)$R z{F<1;Z~_=aPJS_&s=U%U8698#txXi{ojvH&mclIEc=P@7KjG>-jn#A`&5`es2l>q+ zHBlADW%d$Twqh!tQ_wvidYHh1WM60RihU4FWbO=GE?0|xv53{KH5z$e%dqt0K-q#T z#l(<&ko-|`bTLMbP}0WI5v7Z3&V0Wyd$2)HR+3fpt2P; zLl>Q$gnA&SureimzcnLgaWa^i$C0#UTDzG`2e*_X5Ge;Kh1h}9LXE0{0ApZjIAHSE zrkGMiL;=upi=6(+rp5Ue!|>8oGLaQrFpEi_Jx;6i*ID62{-j_?s;X1*`Q zOCu&kymX7)&XAY{aSAt|w5OlmPN>+J@ZBETPULT(c-REw7LvE;dfFh@?U{lpU?ro_ zi#63PmQUx7fMfCriM}`w4I!3C`L&CI*_Gq*aB~xbb|>9NuXyHZ->A3yCC*WbBId;Y z8?Id+hk_^Pg&rjUs;9LXs)I24ubvn$grH_bx0VXaf#j;?X56}q)1)-IdVi5*G+o1= zLG(7khmvw5yhOH!iOQ97VkBk0|2|YABUs*h#1p-2?I#h*+bq$BC)^r_AmI|9UX&n+8J(C`rTf4fJ@`e?qlioP6l* z#K@<<@|8AbJV}D=7!H)@#*aSgelT@iS&ucqo8Hb_esK+%|4uL9SZz-Hms+25#}nq| zhd%r~aDO||Hzco*DRfWs;c7y3K3uY)Bxko-4bH9W{crb%h|B#Oyz3Ry1l=z`?%-mj;|YtCe!* zqwIy<#3khn&)Rra#6!x~Tvh<$#`SEjMmSr;gB2_h*c!O&<#B+TfT;jlUTcwLjO zpiX3SkSyp*bmHZo-PJBy^le&iY>E@aNSa<~aRxQ+U~WhNYHY8C^V#@t3OfpOkBTGf znHbYE4aikVR2SCfT5-jlF%w#!ujbYhY3=v&`$pz$2~;z~tYt z2h8-9=#9o;{hjLuz)Kf$xuycz_oL7Az6Luei)GP{Ex#^eDM&vU7t>;$4!9?AknKCX zPve9IJRe$Z62@%24Bcf&e*pT5ic-6}`Bz#&H2&b=dwdSo09N=aSI49!qir0sz z6Wrc>kLHIpZED=!gIJCN=zgjEnKLlP!0r>?T!wxm$Kk|z1nb&u)s|*%y0q-7t$g5a z9KYQPJD++?xn~`8wwG+GkJqMup8ppQxrn+CMEag*8)y^yQ8hwOfp~>Qf8WVK;jnG@ zoM@4ra;JS=FGp!un%1HururwdS zQ#+;l>dqsI z`B;FxSMmw`y~~r)@>@2s(p%EWEGw_+?j5Cz=L(IVuhm{&ObtZE=jvS4TD|m!?Q)Y4+uLA&JH@p--Ot3z z_jkwoVS*!}#g)1+T<^O){C<;N1?9G~Jdjja5||v)0-om%IM{7GLPa=8nOtl+Xnzs{ zOfHOAQi-ZAR7ZxfRDmLL+{D+W1tJ@D5a)LuM@L#>w)j({W~# zRkksKC*ELB51(K;?!`rrJV8L@W}UagWki!YBm`;c-V`5^u_O1NiHGNyG)Dc&u3;^a zDo1@I&Xd&jl{&16UmQ!eTyP1lg8sX2Q0@6l(__DBcCnW#j})Y@R65*k)P|~5u-*SB zVz`k17BW_n>d$2f{i1}#D^AsYecXm(S0X;RcrdS(ETFy91haULm&^Zn33(yavN&f^ z?nJT4&ml{JF1g{M<3CED9lM$_^ytRc1DM$MQ3pwvRPQ?A$h)S;(s|WiwSa(y$s7~_ zaDjQS@H_=W@@uWgvT<{}_B#0EKy?UZT|K2>#8V|U2}2r&SI=&%pTD)5_6f7S;UA&Z zTWt#pdGgyu{`EAF>#`R*hc@bw+n?a9)=QZLHupI*!KY`_~7J+ZER z`!_#}Hyy-SEfpP0{j>Q94YBVo=LyG8yIv*xO6gzx*a0<bK1KvS_dW=0|!3TMTN(32{CgUCoylDDM{t=P7w48skv_dne9;Owu36qo{IK7 zR+53zUKSfA5HI<^7o6DQM8yW=$}e4`Y&yghy%RTO{)Cci?yU&=lu8pz|B=G5w~)m+ z0t0#h;bg5V(P#$>Q24a`5d;8$8TA1N2p$8k)54R`qwNIs!bSDEa%5m$;hBJiFU_1c z4orKd*2o`NNnMfurRs-`tYWvpyP?kE3B&UDU<-fAzCN?Nqr8K9-|gm(FMHz0<>s`X zEZNcXH!G;_g0#ZfM^B-obnjDm6aj_obL}%ep=g&J@6fm$cIR0uvZavN@|+yC@OMUS z$Db=KG%&pD&hFVLUW?}1?+s11&pNt&*_+#md&?J2JQc(#k>}ZPVkPBQST##`ONQ|vrR2%9Fzy-Ptaqmxc`?4@KLo8;I7xLwcWDKp0!VF zz34g^aOvT#mx%|&1AfOmSH3bWqw?f}HV%1r_JOSJZG{gH-a+m+J$OG76Q7y(QSG45 z$Ig<-71Kkyy8B;06H-jM?M3nmKh@pw9cRD?YGmc8$6;EzN~p?CYEE@M{#v7$*ZAoa zc~0oMKQd@=z1M#+Yu?!^jNR=^uiqZmSnMBbBIj}g%8(10q=`L%_g}puE9CMP@xtOD zcu&h=jM=rW!=3a%!L5asZtG&7bmiM^GnusNp#C|5aCp=0$3JQm8_c_(zfmVHI9`v>hTOdFD@ML75vIgUt0li*mFVmj&J_osJPVmLyVM%cdPz2@eS16; zn?P^5xF7e^GSTGmFQWkR%a>bQCBY5jx#6p|L4LnJ#2aHIw{H3DAmw78<~$4E zk2VZb1vLzAL@WP+;H1ZtU2dKLqJE5wxWo575?ypN{6@T5YLvWoD%t0HA1*7d>2`J& zW{G#-;Eh(RQ{~5RcFDKiYaP05`?mghuZrpT<~Exu@F&9*1eqQXxNc~9k6W- zOz;f;V+?M4M9XT0VjF3$p8iELAiP(Qvu62^#Cq%wh+e zU)Cyr4`qd38L9Oo2HyQWmN9at{-w&wX{&jlN9V`0@+Yf4y4;U76Z}zxC53ViamP`P z=lXqgANQuOb*EnTwTGF$!#sjjtSYa*{u|XP9qdiUkjM;1-Iz$jb~tGnxQZ6pPVO27 z#++Qe6^Fr`84rTp%=+P7)EmQa1+KHU@dF z@&A^>&}-xCS)A?TRHu1&-fslX>hULJkF-(bzp#Dm&`ZeRWUa_-wwP-B9;4F_P`!{$ z%g;@YNRmhVswE+QAqsS(Vco@BkxRZCAhMJFTWv4+VpTCtcavlo9H5yOcrk2zgy>MR z$#2_v&i;hj`Lrbo;{Ds|fMLd1aT?g4rK~uVK#d#P+oR{W7ORo}N+fRL;aEt@qsj!J zQvP)7HN?X=ME^e3p|c4aeMzH_1Qo!FO-R|;tPi_&6fGUWYj53owDgkf{3MtFb>NS@{lZiDSYndNrfJFFJn#HwTWXu zb9Ul}O{AQ)(JLh@eJf1?_kWd|6LQh4zo7RS41g|0sW4 z20_p2j-Q5ABh0Kw55h`1fE((d7bEwWLX2qPIt!F+XfAmLgd=ROF*1}=M3?fKa}U8CVx{}1OZ5DzAe!nv~cj2HY(tlTJPWW zCIN}Tw*BMsstc93-`)|tq?}ffx4Yt^)w7l#(ksc=`ju;R6ON%pG*$hHWmlAm+-4 z65EQ4HpwBNrRZJbo{O%ZLsTG9DUh$Mu0`Ga{SWH*0=EE9e5An!in)31{_RJHl-dL3 zz5YhciVZgXhQ7{Jo;C8|+s8eZ;$P7mY#}Td^`9;ZPdasrtq588Vg9jH>|;A2taDfK zTyO!%`wBg@VkIM_Lwu(DMs3#E+T+b&wpnNM%{s6ub~^0yfq6u+Z{GZGY>@tff&8|u zS^ebu-9szjjN^W{JwV4GQjhQH;`C3FBPw!-^w#hC$rFv5mJuCPiKIo9s z5s!^pw++@~`a^>*67Vg0riOFT#O$%yZjH|uYL!>nI~(%4s#C%5g&Lm(t8Q?JJ|0EEa$-4(!Ie7Mz!yym91lP-#5&ctQ5CR7d42> zVckc3hi|oBl)g@2taw2Hd4I&b{jO-`jt~A8JFKI*E*c8uWN2Xi5mcBHyw+pie^1n+ zrZ&qLL7X#TTv6i|;Re$d!IShz zvqdBNXPkXi?I&UWh?vRy*t578wfG&ddnD~A!ilkBorB#HTc1wm3m7OT$I}wLE*}VR zs0rK2(L#rBueL?swbw@8W}I{?xlFi^*Ogdl5&~N_zO*1Sw~bEo7jBIH$uJrmV719S zbEKn|>Kt75+2ql??}ps3R?n@B9kG{2x#DUQBRcg#;2L9Shi&Dj*|xnyA3WxXC*#r0 z2;Dw;gR`r>E7=t0VFGeAz#!f2bfMoi& zEEv&b`W4gw?2^otAxleqKyC}4u5HMo&Yo4S`6OYDu(|oCV$>Gf30_wM~@D? znwNM)jCJTO?~W{|@2p$4%9)xcZ*}g(Lt8y|rH{xIXJVblA6)-wsehzR$!6NDE-x5# zGwu8aLH8VF-^O^q-R;01@>}1}_wzm^z}@uwl$5Y->cf=vkJ!Uxs`+Z?h-%K`q?!_4 za8%maZGV`H*+(}vG9jjJ5 zMD&0`2ceeNPFFVQRFi>>OVlfcKBznM{*#q39nU&v_~beEXQr|HhI8)kH?(;mg z^PC`GyW?MPY`(H+;<%A`0Mkm@A0hzH54cT_;(S)^n-=-tF=7N2UAbM)=dSR$u`3HR zkBVf?_isPn6hbg!|1KmZ%*X~wnqM~h(GcZ#kC++Q(>nmite$e@e7_h%G`b1@Y|HpG zW5p~vrRd^GXijRspoPbzrXu2A5;Te_ur8UOG4g|u* zoH7ZFk^35azK+Ca26K~1b=WzL6(*}DCl~L^BUIUHd4|W*UqFliTwM4peNr51^0$A- zIzj*!p{>dx!WM3Y1~5a;Fj2=(P{y7XatwBFVD6_0z_J?!(d}D;7Jue1L*}tqQ11o!JsaM}tD(%7BE*hsDOLYJ=dSuC9eTvoT5l+Yj z^w_=JfZD1~Us1jNGp>z#PZe!kJUtH2Jh!a09g5d`P+!_12xtd##Ry@7L#Q(0k3Czr ze%Dz}Y?a)id6>Y}gn4Z(nEKo-7x%RV^iAxQf==GJ)-L^EE5Yw7x(Ba5a+)s+1Q^Ml zgULX|cU%-82S*P{zEntsC*M2=8K#?baL=aI?>$v(g&542BF@lCyf$H1jsMfcfu*u^ zB6(A7^G#6n@XFE0WsXyhTPAb?z`=Jn`|SyTZiU(gKgITzE)}CaZm`AumL7z+A;JL8 z>tF5#UcwHqMRQVY88(&2U@7N7Kb7WY4$Hept}EomWfKxLEh4q&{jV7gp35>8K6vNz zb)JvJ-+SrHu8T(l-vnJY*zC&aDpa5r`rK<5P4La!8Ik=@Rcp%|4;L807>jh1V4@>U z--r9`Nh<$;ouVZLFWNT9EpHghUa)d?W~9mggN$hHQxeR>KSCRbhiU z@cvJ>@PT{2dlBm7GY}z#z}@|)ln(hd$U+)nFVta}n!7$C8V&kBGadafnFAF9T{k9P zUN}|H05l$$CE?^$%c_=MeE`j3qN26nPj3C_xd5zQ3=!fAJD*gUTR4_KZnK}_4#;u& zbO++L)?_MH>X!TgQtk3eGU46vr6c*FE@sE6YggLbr}Q$gNBN3v$e(LRZxT0eBIKnS zbI97K24(+8)0xLJ{Re)Wgs;R%8i^@|A`3-1wh$_J7v)$;$g$*Rw&cD`q>}qCXYOrP=hay&}}!90|^6{a;x_W z%PF&46pPTZ%kO)cPKP5MH*-Jl4l!NPlgInu&fz!8c4ZPB@3$v=tUJpFrR$-!SOwL% z;qoj-`g4{yUPfXmwcSK-<4$iXZASxJwFt8BIG%3UFt{)l>UNqLBW(x?Vcg=nQ$O_2 z;3WDyR8=RyWeY$z1q5g76DqBj2%)bAXxI1AaJ*7c<##+M4<{i!))Q7DoF3UtU!Noe9| zo;~wrctdECg~*YPeGyV9q{3-k7suP$*>S!lYVmo1;pV+YPHbX7gohTU&Ia$QoZ5~S z!wh}n4MiSNm$UIZIHy3@h}!A6Kx}me6@wmVSwj)Ir>#py;d4`N;FcA9%SGCHByXf@ zr%pv?L(BXM?zH~vjgOcC+1%S67dK1nX{W4w(0R;kfX%ffEw;QR&o3T}A?VD;Qn0L7 zWFtesTLxx``NP9+q7G)n`iTd$NR}!i<)v!1sdGU7BbxF?9lOKS0p2lbQa8~VR{jK4 z*T)U|Ef-dDsm0%K!e==GPW=LZ10|^*Z^1EYYJWY+8JgsvKKnzi5knU}2<%M#^xw-6 z!9WBjJbtn=S!trvV^T=P*qiq==#|sO>bpN@%`RK_@>;c0$)n&)Go?|%Imgdt3dRqo2+Ka-%WMY{gexbO zO=YwpizpkJsWIojhpS~C5ql+K^?C634IWyJer)ke1wiSy!_Gx6qslOz95B(}Y9F-7 z@tq*!WQB3@eD&K5x~!aWGL0mIMI@IV00`1^tHCqhG$kpCYwsp8b?rd5b?=$FRXt^h zBHtE>z-yT_7bWJZF;9y3ZOWN#39@tx9OWnPkX01wXH1{)PG7I`o@ApBL=JTl^7`n` zZp4*ytA484R?!O6ryrd_Wc{|GucH6qWO1L|>7})xw`cOHPf>Z$N|%x)#!EiB5`HBf zWE>?{qAY$3_W54=qu*LyL%gFOVxcn=8t^r>ApL~q=2;&fMEm{-UOQ-oH-qJUfHlWg zTB_Ff^fkmbT#czw3OsPeyNR9r!YO{Yw>(1R_w{-Ob}i7>nykJv{%IGW$svf?aZa-S z+D~lBxW7w}=yDOW;f{uT)Vq)G62VNb=Nq5s()CKCq(Kx%n;a>oq7G~M(`0tjAct=i z4EyM#ergsPHLpxaAgEG+;O<`re_89Cf1oQNg)*`duBqdGldMY6KAUwgtxGP&o1;;- zP)vE|4-pwd_&oXLdS4rbjV<82UQ@Qwh|e`g#yfxZIO5seEVI}Sfql>04jhVnnoO_y z&FwW2E7JNK&~0XieyCUwH~uz;1k_Th@q2$aSqE70E@(7|1;;;R$sdM5epaNC>nvkr z3R-M*{#$OuS$Ec1z8`aO=~iVw*ygPq6B&TAWBb^fJ5YO(I3K{g)D-fUvtUHaO#zKs zCXRj|CO!|rUgk@%nY3;R*BDOIF%uol`qth z6IEpR+y7E++28&V(;A%)l_93m8%`w;zSjYLm!d=Qx(Y+rOHgv%J*|++Ry}Gg#6IJL zA#kxR*=oP1;DK} zTQB0Vtm>ZL#}U$SGsl@uq-EeyKb*jEqu@N)I6h%kMxv4{na*1R_!tJMYljk+-LK(b z_%5p?=2ey+^fg0J#eD;Dd#q){QKehUy~P~YZga}5Ha_y|rmXR+66)T(NYJ;MrL`%G zf7x;{j(K*r7M`kKNJL$k_u!7P`ePWg3r1fde?rkek&8@_`S=IENL)t7K`;m^<#Sly z*!{~wx+Kc5#56@D$di9Hh~`ad zm$htgqGxO+5tK=2B1o+`uyRi2w2Ejm;~f5qnDkV|-&G&8G7P}Sk;V2nt-Z5ge}`;B z%#rQO>exA7kPi_$_=&TK2;E3&zrt-(-J$>~PF?LGU!Vkkf{FVc7Pm#yyQT!czBXc> z8Jel6UoQH1G%=Qf+9*Rkv7&LXi4AgGBFlSm_;avdX_Do2$S3kx8_5^6W#V4!^8m%p zZkTP@fL`gu0@zL_zPa*#y!BnQf*8Mti}?58bc7C=^InUV`PU6~@o2|Q1Iep6LI_}! zoh^Q0mP529hi?P$i5u3JPdv5!-=9TAyHfO<%e;JS?zJan6k5f_Ai3iNj?ibg?UpT7&Khi->~bl=bC% z`^iw5qXM6VCehg;CWOFu@XIMbU`%Wi+u>r%dOV$DN4V0$FXk zes_1s?RLxDPh93S+aM<{Gs;78V^ozP#FqWdZ#&6xxU_JorA#y91m+}^`cuuapR&Y$ zGr_}FU)5J6MXOipjydX!K(v-5GDl>30o<&)GxR0todw?X=1Xra4cly<%Q#_LZpWeC zf6IJxqHf?a#ex46pZ~x2B5J=2>iS_?w(Ndg6wula6@V_?hf2>6NDGVhP+H(M{(mIoZv63|r<zM6}>+BAaC$mE7|AZ0Ve2SF+nhav{zB_L-T{nSA=nCQ08 z!}{5c#IUjcU&B6LUW+kR%hu7jPfm4jr_UDbt*mm@q{U!1CZDOV#7VETo!R38vLZA3 zm0u`N34W_FKNfKyTaTGmZTQjhcd^;U2S@$B)&Hbw{^~#!LDipze@iW@wRQmJ0x$G5 zP$2B+zFLVBE5g$hA4Bt5HM2NMK4d?dTqSdJ@*lgMjYek27xvikRRhg$79KXFB&rN3 zSBxN^P#EtzO1;=-jh2ddtEV&YBD{IO18hr}qIeDlg2wV=&Z$5H>)p!}v`(eo#e zO}z(?vCiJ8)uN&f(Dpa@`U|y*C;=2> zfio-u6jD4|{doe_K`QP*VyijfF#***bnN~y(vjo-ZPuBUABH~wg zrg_^-#B!dsPjRc|KVX$*6(JEd-DO^tvFXFwvUs7!^txd5rQ$K|ZL!j{)N^KHXzL9j zl1KW+ofN4kDPw~cZ{Q64W04z)=k1hFVFW(ZZMj0&i2C^^Ciay_i0Y!U`-!M{Zng_T ziw|ZF6(v_>vHjIaK*Cz|!<|BTq>r?_F;9JZLPXtVlSz7Rzc9eV|MaQGKVl4UCF@tD z!9@gv6Xq$F;kQu}HT9*J>&ZpjwQ-IoJ&tr!X%eIRUiPNJk`q+i)3eWBmN}mO?kJ&w zoc!vsyXKQ&c^ybj>Oq)Z{RAlGos;EE&Yi0_eS#W!1FnSFXe9ePn97h`EcoHIYyC!iy10_ zqC~B4V}bd}zx+*o4^7wn)Em!7>{bDo@w+L-crs-@c>3*cGmq=v2AS6cOyp(xE+j5H zi(zE_!fd)^)+1H7?w{@$Ut=gTV+*|>j6~G9_k_KQPuu>Ui+`wJ?9z0MX+hOJaaC#SyJ?NB#4GhNDeOA4cWbh#?G5*9 zx`TfPChU1oj#92Z^JK($VmsyYxnGff-=)@Nl90|lLNCQOax+P=5K-$j*ijR*HXk|p zzDx$))k1sQH56(H)yfc!hEu^}Z@W;u)Iosme*W>v3iTNciVh|1&6G9&)$4rm?A!CT zU0y~2b-277C8fqX;jB`P%if9JJxM?B@_q5~7JJ`>48ZTfxutinzqh=ntrrYw>s8q65PUNa^%wdO!i1meMTY=dnx=GEo&t@e=B3@nEvvn!<%XF ztoti~qC}Hto)JWOiobzQ%^wvcZc*(bcb`&2&S=;2Cb_xHBRSk^8%?(q4p_&8upb2O z=jiSd2=e-NI%YRi7}>r)er0Tsqwz)Nw@e=E^w#@t{Y#-o5CH&NAv;vS`A*hZY_qVL zBnIv{e(vq$aT_=|YKrgnd;S}io&Rh%9Xcd^od?!q9VXic#pU>HM2I{A+$S^^J45)XSi_)?PSuU4@YLGHtUP8L?gFkxP@TDJ5mh0F5w}M_7E#BlKP| z8Hj5)&tk-0!5K~5J1nMwH$LDR2(Lvuww4^58D_w#TnuZ6Wk5}~DI2%I9V>2>n&0Qr z{a?tox+42ktwqnx>8!v%A$K{|w&C)x4l?b>%SwPs~^6P(j(hLg}ue!amX;`g( z6LQB+K7%e`4?ix_S5CTbZwV!4m2EClHeybO@JOT>DZdWq=VA_J`A5fVDjc-)`};3A zTE45V3O)^fSN}3rV}1VVez2CCKF`1q;G`WI#g^*nlwuzX12%kc0szcH$@ zb$)s;KN{5V2Zit_D}x4h@<6J61LMF?L-7F|kwSh>h_8X~aTt={=~9HPTl!j(S0>m* z|GgsvrJ-aMwRIN{r=LAcJ4rF2rx|Rx|KA!%Km4daLXo z1ih6(BSt%HKvsR!ckZb76PL=_K_%Bi}jcmL!RS$zze7iy1S$kAK=^UK?$x-{#wLcH%vo_KG{iemwubZKrC>W>(axF&5vEkWk_ ztCN11P?j#fIurOaq(sABOS9;g^qL+IGCZGty(l}*BlNl};cxLWho4mHHLDxx2%#DU zvlU8&Tw%>~wR?2^=PA8iPb$iHFZ_2V(LN!|CtO9L_$T0<&X;W~K!^Q#u?UL8A6?rx zK-Iy&Ib-tV5OBJWo8)}FtSIz51nle{o*|b`WOq-P{`}Wm4V(>7%C)rWeg$H2df}!U z?xes}WlPjc{^+_-$#31?od>GzIN4m{+Xk$7Du&xH|Ah>@1x0!&m(#5wcZzR{mF~dq z^$^czXzCGP7kA%=vb|lhQ4gS7RHz(8Ia@v6a&g_8ZeLGdX*0h`d0b;%$a-@ANC(r< zD&Q?08b92029IvaYQHVVw|HBhCTOPLw5;E`IQw-x#3z8mb11kOF7R$!(Zm^Ff7LJO zpI9sfQyVynGp0{24Od3shcjqN&^w4jAHSiLXV{4(y0@6=2kNpqaD>!_>|}*8>`E+mpNzy_ZoD&27SP}{ zy@}ZqP%13ek5?yKu7*LM6u>}HNK~)bpFI_mcTUzt7q;;z^M3CGpL1WO8dORI{q73D zCU>(UZoX}xr3DpeHgH}_e~W>*WxhjR!oVK3A%k1?qF{^_avqk|@Mt}bKMT4Ycf;c@ z0d{G7bHaiZ$M&4JMQ*LkKuZ@C*qyO{J#525qCe+pXv|*G=jJ$>Le4)APi`c>X>53< ztx?`ZxoFq*5tEDP`Q$gEHQiW5Fn7ss1UC&pNa{=~d49o69fG`EW9!nxe^D{QD1b9* zXRY~jyzrKwP*JYedsx>g%sbvA8xD`iEH78^;zZ+c*DufR@3G zUh_vm#OcGX5#1Gy)?T$QU^v_Aav^Yq&E}TMk(LPLFay27no5B=zu>&OH3j3O{lgLQ z_&~^>IGzfGz01&0e>&M!1d{eEtj4EL382%6Z%h5^Glr4hV`g5TzHs_pUkB><6j5u# zKGn+*iC~x9;vqRDs3+m;BL%bf~7wbm2NsSsrZapH$HnGvQ9bs#a{5xGT zk2EmLj)%B+EoYt~PS~9mQS8<^XcKWv`Q0W>^=Fk$yX3Fd_Pze|A95BICfv+^@S*In za?7W?Ha*4d1V3%6=o6|2dP9B4kY^c+>CFQhFq@*GdFl2u^nKHK|B3s)z7Hp)Hi`(5+OvEIrfQ5&Ir{Ie%F zqU`@j!E4kdgD1$tMjp7k8jdM7N^Mwy=>TRTOr!g>0 zc=N;WN`rH5?2L7>MBZOM#Te-ZBZN#S z@>c0tt|LbuVe|}~#5xv53)TNGic+1>)fU9rP$MsR4P$J7U!n1OMBko)^4+IM=$q=*3 z7@^lN-h#)mQ^|S#xC0M%@ef~7o7%`=B|kzwz8D8y1b5H+TVm|Izq72Q&%<6m4BG0p zt*&(*sp2%?g?Uz5j}dv5PvzfHtAz@>=YYtnoaSpP4zH^ zlWp-mw&pQEx=x^{?&A5$Hl{Ox?JmGdhMFW#1pk9huR1GWmEow6PO3gI~TkpEC zW+j8&bi11-_x;JHp4v8-gtXhQSFW%cID&OFJ)5d}e1)QPT)VsTjqZ_^H}$kpBLP={ zzMXI&LMHnA;XXDtxvrVZAlcrFA!jVsvM&QAeQN#WJSA-VHJ-3MP}*NJ9;nY8lpT*!>KBw`}|g&igqT#a0(b- zy{0XvT5@7211ymH=jWCu`-U*B+n;|wtpBX%EwI?$CCxqg`c`j*sm~tgrDY=(Hs?si zmX|wXLA?py&fz*!qVW0AIA1JaQ8{kQHm31M;St|$V(%aDy5a9(pQm9hri@1~0sy8_ z;8S%ihzZl#v&LPaYXi5j^iHjT+E>3NgwAVdX!Kujwuh2$XmYJTLTd#Xo|Z zP2iGR#D9dMn``>&#elvDe@Gw41#;A4Xar|zJ-dF;AVzFw!i6HbXYH*7IHO{y7wd${zhYB98V2h_C%CQg9RRc;|o%2n%YL@wPpU zHsyyE5X4AoVD>_7@~aN#dBdzDFSy&$f6DAh&9f`bRsvv=V%GoEV<;#6Fi&$*j)U z(0A^snZh*Pf_Yq(pGx9>atq}^wiSxNYXrT#))~4vyvlnfmG zlK=V*`1}@(h4h==_@TNNb(^RCr}{;zGOoUO^Lkl^Fqf7|do*jzumO8=TfyeS_-%GQ z%f9XrI4LKnpF{bMu~)j(ZiP)U-+hKYOZO-YlEa!9r#W(MRm{u%@n2c_Lyj z^W9egI#u?HGcD2;HIOWMgMg=ouMzsm*ti(u%m3_N?5;o)5d5=&oHajzg_tM`~Y%dB|DSIUbL@ zjKP|3+aE9|9yt87oG3i?GbKX%Yz{85blG1oWKEB-iW26RKaV?JYWWQGvkPJ> zbeBmS@t(#{_=8Prp88YYCG@uCiL><-y){c`t%WJ`i|St6cd>ex_oO0^+D;Ti=oU`R3r^hyBNI0Y7GK1%`2FHJcy&<2m23A{4vz z?+o8yLV)jdf9Z#9-+4I@EsQBi|8$@$)^dLPtL@%&{`}7U^V8MKswU?89K+fCx?c{) z`hTds8R08*_g+T4wfmi+<}Vp*azmeC=()gAXLNTYAxwbSG~D(CO>UD}|B_bARV8ps z_V>Mmf-w5kGz%$|=D7nl2H0EVpeouV8tvGg=pIMsD_2O}24l=Y6R%J!XdafXkP$&i ze^8T?6;y>2s)%AfpqU6M00FDVd~T!b+p3cIT_E!nl=N8b;!!WazXuydl5;{2$gTIM z^Nd1OI{<9uZ08J&Q#8kyDQ&5S5|wiNS5W=tTaO{+y2z%yA0uI##ExjzeBQNx3hIqP zVMXzb=K+!i-gfJQbr?R5qxTw0{U{=|bnL-x@A@{omtbDEjIS7r7j%0NzMC`Dno_W1 zR@z|YCCuYp|5i(;{otc5&(bPCyqDPbeJjphQ&cuAJ-2#I=(}^^z>`M(xf${;ks}ad zGn^>^EyXb!XW;EmP{`BW4JOy7Lo<)-fK0!TLjRJCNRoGtRoUj6M|PRfyBxRr);tTK z=3oCjO2X?oH*TyZZl{@4N|r-Y`tifZ!$^d_4b$@DM#ZZ0 zZM+XhUzY}YZ#FMIJ7%CEmv^`ET^zjh*J!_Vh#Dbm7z5~N$@y%*l@Oue_fac;!HpH6 z@Az;(Fn=7sy#0j!Z<_OWlsey&M;A%i(wm!+5o~@sj5j6~(#=#NJ3zq#78kg_rbc|g z&5PzKC;@9;+<$p8cYd@>h72a0^M^? zpEPOtxX6oM#E4+&w#WC(F6+ssE_$1v_1}&OxVKzeEh)984wwdIjL)lXN}7=N-UYQ0 zg?UTi{@IO@VzR@~EJ9i+|OD8KXDXgB#)XxXMcQG*>MZ5w< z-lWGCm&f3406v0GBw$@X;g)1DzPNkO1ffxaswqkTyeI0h+^vdoro^|W5-{g8ic0I%yP5TA%omE$b_cm|JiGmQuA{hkH#gc~L zi&TQL{z7HXf}Is2KW!f|HKvq_^%z&X3xvEU`KRh&<=tzRR4n4hi5&7=YICumDt!g9%qrFWXpoRrE z1>KO(BwcYtNAA6pO>NAu-w8s+-wsw%9`f#WC!5%{Zeb`pM@%fV_fW04FH2RMbv+Jr zz^2=iKoF32A4cp|jRnPDolXp>Gd$51Qcj3dDG^SqO>z)lTov2*A27Yk`yiLd&=u_t zp9?oDc-6(nlvVCSpBP=o=z@HPrAOlY5(wh7p$97U5H%s_B%tSEiyn>J5Li>$Lebg% zOEZA38sTT<+%9021%Z2+KAEI_M2iq!M(;Uvu@_oo-!P!;nkl*1v4t4L4Zge6mnD2H z-5R`kyM(f*p%=@OMDV!*(DhjQ9Kl;A5_3!9?XWWP+>0JDc`4#M|+Rh? zj+|RBZ9hvr-|bY`!j`lReV9B7nmSnJcS7 zSLp>-$4!VwPUSD$AP} z_5KCo>R!^WcDx`4F$s6Rg;4v%v7a~)C1Dgx6E`m1P6m( ze?;jqsCh*1;YOas2By=Z;Kr9j)%O-RJ{U1c z^ODoyLcmJVDe%XG`rum)c3toabU1QQF-5IRtVxY_!tNrvKuB!4%+73=C$gm~3Ms_8 z^2H0QfMa4+zdqa6uT>mVokSNg(%Vpjh7ZsmvI9>cg{HyfQB2VTSXN*NahQ2Ng^{WT zv2bL`${8PXr$UdkUE<>;1|{5+`S^BXHd=LQQyT;?PYhojidi*%eIa%J`J%!2 zyx{LUSLa^4Woq*7@}kAF=uIMkfd|SBE6-*>R#@E53$nWLLVWidN4gvPa{>?uC`=IA z!;Bz3>-!nfh6ggS{!dX44sW(N*xMNeq6HCGjuT;ZY&kV&G=Xt+Ha5I>{j?7p=Y~b& zHSy-m4b1dt@AIQIc_*$JB_X|hxi z(O!^xQuT7)30BE=@}dm8@O5*sgW}&xm&F?ITK{PB+$|599S_nuP7V>kHE_yxT-F)t z%61EF-&@_EUf4{Y(xJj+_J2<^do#!r3tTBlQ zE`N0-!!~elUMrAM%{>;JS~9Z^)r1ndEc`%n(k9r&>m|ec2IvE9DyE5m4U)mw6y{J) z-w7T1Mx2EAE)eS1L~7Znww}+7u&tqgtoT_{W^&~imUh(i zW^0CS#GP;&{avg+`1P~+wMf>Z>t3=NO zmgnI?Lid_#$**Fj?)a_Njbc!U*6I*L&#c6o6>-`jfabY&8#6D>>V&W5iVt025c!M~ zH03JF3?#Zia#JRY|4H7 zs!Ef!<5zqX{e^pr3;-|=Bxqz_EY2BC*=OYj5h_9E=cjXH>@v4++)xHI!e}i5N(Di- z56mrwN;tezyf#pBx3bUv*s?|aCn4uNE`fS;D>k8?Xx$&8qb85XBd>_Zq`CD^T#bdl4Kb7Oj{0>K@YYK0trvvEn&%Ee2V3E@ zmg?$1d8!cph^_ZFY7gSOtsadxQUfCAZza8#$CQjY$lqGL*!Rn)JaC?yz0T|(hr7Oh z!qn1I4tagKu6ZRd5PyU|JepQT^&fRzr0jJm=~rXgLsBEAnv%#*KjJXF1#xZ`ay!S!@J!$>DPzYV z+T;+KNR-c{&*747hPtnNwSIyzy^;+|O@PeU91W%1f)p}$o~aA)X&nC+Q`C)bn@-!k z2R_9aR}%dDUnZ0e&61xqjX*YqHOgY`U1+s8c+)tuP24YiH1;&Bgj{eSsje*T&4Ewt zGtq+njCS$|$Dm^zw<_;oYcR!5$yj+VZ{RC~@_DE*P zU@A{MEYJ95rkM98(O~Y8dpllc${4h?zR4l~P)$-~{Qea%4IPwxkb)ntXhX@76wloQ zJ*znMY}A3!THW?J*c5HP%*-3`<@B33hovpV)c=vWND7mHj5uVxi=B4}3MBuOP&{Xv z@q0a$^KkH94=>UTX8A8rk@??fdI;mQ`%4A%zujk>YfXZV89jMx;WjxHPYqZ`+^XQ(AK=YiPNV6ENs|dkD7wpG;@hrC4n(W7(2I za?(|h%@?tQW=EOu@lCJ2b}mZT^`9n=_w>2rNq;sK#;<)3N@^2g_BdvD`1lR(U2rl{ z(Ys80}oof{}_Gc>dal}(G!H`Uc&BuIG39+Q2@vZ z$})oah47!ZGu&`R%X0L}tmrYz?@XiYuy+kC3y?MJIBR+o<2))7P$6biAuVFXr%IZwBP{^2A+L_kBim#MVM6UBk4Q7ECTacBf-W~jv zbGqGb@;%|-({EP_{N)lpb z1)4Q3x}c_A3uMSR+IU&P`KA;ujtXCmHlmg)H8cr9 zIKEWnmPF`r(r$9oI#O?1$sV=Vq5_YkSgEYE{^=EGbxpOz?R_#WWSqf+P$=HrzCcm+ zU&1RqFN?m(sfw*9{IOJRnh9|uOrv{*_~fZM$9t6P2j}9;s-17`#2t6}%@6~IKU?7+ zhfYXB?KgxOq6w8_? zm<1vbQxXzsRpua#H)X%JtGV^@ZG#@_koq?XScR!U!gFr;0OSJu`XiGRJ-&8<{`9Xs-t2 zsY}AAD1GG46HNt|KRoa%L{L70;M_-AqEBl~ZX)Xy01qNdF7-PsPiBnxO0c{x;}r9A zqN~lmgO&D#c>bY#z>@dBpl#UT_>BbSU&p7~n4uJ*mj$97x{p)cfJV3<5wV3PbPSiK z#w8>FSqu<(fiBk6=bJI)ci{lLol!N^^uwg!-1QUp52?Ydkpfm7 za$=tMq5?njV{6Uo?Yy@Q&I9-})Q4wsUhBuKvsEDFOoLnb6s}qCjb)IkU<(E0h4A;5 zF#plg&V*sRhhn-C3G+V&{rWnPgw}J_)3;VgLMw0wYiLynU=-!-vQG8qqc5la{Hc!# z56^7XRcURy?Hhsa+YUVC5PacoQzzEz9^|;C78o_>?DdSM?eQ-~RUt`l-*4*^w|i~^ z89{^g>A>l<&8@PcSYGBI89!lA5z2Z9oBKIJjX2qkrB?=M+&6hV{3U64+3QZhopWjH zpTzvDzl9hj*gM?^tn~Ea-=v0rfj?1j=`s&=b3e60crOri=J*xx;rZXC(>nfHsu0uq z@yEHh6c*)nTHA0M_rT|c*Db0jDZGAaHORyj7tx_tL~e|Q>vby()wSx|stH1HjbwV4 z5AJkN>{%EqF#}9|4$=5}H2+{mGMp;%Vk4osGiU1Pg~yw;DE&nphgd^_0)t`zX79D8 zSpe4vb|Ls91)d&nyF9SFr>Rt2aZ^~3Nm6@EatHKHKITSJR>*uQlf&JGGJSd!+4N@^kLFYFfvBw1;je89zgPOkYvK9+clI#QA zeo8P45?8$_zqknD*yD%WKpXKlB)X3%AUVnpmi&Cm9<>P+Qu zx&3f3){7d)wvN9JvW5C#WPF9irUgNNuFGQ1zdhR1JNkaug0&~umxzJu{I_cR20_S+ z7<-*di!zrFi7z+gGkkYmWnj^5atpVH1keLcYq#z&--?okS0NTvQ{A_2?`ddoc=8>G z&&;G;5B-1pXQBx9K)mh`8ZR{P%4cQ{E^%#!_p=}FoehOLmkK67Hj*w6%`9A#ORJ=|1Yi^jF=Y|Tx#9MvMt12?*W({RLs z=P_4+{wQ#7uVsu&qqQJ<$kv=`cL9f6(g^d=_Kdez?Up$jUmJZN)E(o^R?YoKOC%-L z=}rniM5{uu+=}ek&win@E320MQ_^YZdvouTT;}GF=Q5Qc9kH@vrg4Lwz(C5uL;L6M zrO;lUi!ZgdUachm%`VvjDzoyjfi#}NfIP?Utl>8q9D~f{=)`=_(c$Ea^Orc61YS%!6L^}Nn zm)cOV8=L3bjJ@qI{JcJGa?_goU1I)FC+^9)RQpH77?o}$>8`t#zUNVvkS_CcLq~!V z?!B&z_;*9kQw#v^+lX*jvCLOER-%k$t3#0*(mmO2|8d1#m9WJS=z6y_!Tce$ef=e> zm;1u6^P^#YcS}}pkc{iOntxP?rC)K;(rLVGlA@UM(lLLQH8VC43gX~M;2+)gG{gRc z@+<}u%1T4@o{Q@v@;ubyxqIA#E4Nj*+A9#>nR2MRsvCu8?a}>7S1q@>r6TpFs!fkL z`3|Z)tniLDZJA?iwwU+ZQ0b2GKVfQQ#Qng9K-V5(CMf7LAz|k%z|En~ZL@CsSA**$ zi&k?Uy$7!Dh%FBz3I~Qd%+)cYZjCO-cps!p+^-yk)E-bXXjnSu`}6M?iymDYEI)M_ z`XE~~?Yqn=w@el82(|DNvgba@hE+bn68kFFbu#kQT6@SMLz^+0_8A`MvwZwezmnJ&Ibxy%+74b;MH2IMN%}ZQ+O_(GT_`4XzeixsKUK!^H zd|glZt-*G3MrCDO>)Gk^TZ6##u2NUo(-5n$zD=HV?LP3g!Pghcra)t9Cb?d5; zm9I3(1EDx@4M`?4x`pyl;(tgw?|3TT|BsVOl1`-@Wt`+Al@-c%julCfC}f;UG78CF z=OkH~*(HuWL-uwukA3WQaI)vYIfvuy^YeZD{=fgc?(2Qs_w{-`pSX;zQ|4z%nx_m7 z2<>t$$0b=?tIy9V-KSYk%=1HbfV$LDlq2Ej&0o}q_dfi0v|+tpWvbWGsY&Y6yuLr# z=YF@cwY9!&)~8{00=J*JnQW|v*!cyCcLN?GVfQhvoo2=GxPw$MhCD4*Yrs*Yv^z`{UPhB;}x1 zCshosmA}5lUHeeXJ`yV#!+ptWBM7nWnDs%(DMtsElmBFE53SF`4+OrCQ+wQE5EkpV z(C@|j(XHNrbG1hk?_M6cnH&&4l;d{a^Y}E(djXCxZMc-#l5+JA-mX|j_yLx=jFVxN zg5Jwu=Z`F<0^VgH*p$|FB@{gtrIK4KB*|~C$%>fsG1_ZCW$_Co4)M0uv!gWwg8s_> zZb|-TTtM@rUi!TG!7P#Ofx?x_n5B6IdlBHw?ROWYF2M0?32G-=aq2gRFzcT-|$uW?~7gCwg_LHKJ-Wida(Qn>-E2p(cubWts=>wIsjt`bb$vM%j$6* z&G>~}7&44ay0UEoGr^KR+CoW7kN{Mg%|2#QwvpDIH(FuC7nc{UkfSzPcggd%Ulgy5 z^z22|XM^{wbk+(^wgV?@WQ7~B*vq&EbmS_Z1w?zc>95?@UQc9Zh(@FdeW;yt<>S z2>MaB)a%Ap+81#Z_4NRUFX%3G8-~E2G7KvWe|11F-$}NzlLruV!JatyNawvP+aJk# z*8ra~i)SfyQ2!78QmsWHE}NikpxI-GKqJP#0zW5v;pswd@%zG`w4E_L;%QfH>6u~X ztMpLjK4^*+bRgh28@6$&;sRnU#&2%;P+P7FZ*Y)ok;%LzzCD}rT;|~R)z<48b1q}` z7yD0bClaAXC9LJ+`DbTDwaoZ3q3l%Prf%#CmW#z_MqwnEhg-j^YM)oxsSXb2IBIhq zERkv6{{p}E7m4G>zM7$Q1b6%?4+xKcd#ANInajd6Bbt$%;x`hrWBXgeW>zBOQF%|= zBuI2PgkG#DLVi+6GE%-OzHy3Q)NOU8G3t4&=tX#so8(zfbq$k8$Mk8MJ#L9!Ki}Yv zLZbFoWBBoG=UYkZQ|^L))Mr9r=C5SS4He$ay+tgtcYBhIyuJy0cUb;8m*rj5n!$a! z+&t~%4H-txw~Llbk3r=!HkTI_vR9m~o4rnJI z7E`m2V*L8*n(v8p%RWqm;tG=GN_N+a7#eVS(;@A5y@(BMF!ZaIA>%i|@2iWxYof~; zQ~$3VU*JgC#gF_EzhpEYKfU>h@@DF+DRRqU>UOv-#-Qx&q-XL6aRpIak?)sJdvwQT zee_nr#Kp!yCs)*mR0oD0^Zw1pgs1o3-gWUG;!KhWT$+7O=sDEq=Sn9{i^UJ?ttLNd z_`T|vxOL=NQLM{|FUO+-HNIyuYL*K6s~T2gGf}}ILftks)_gN{m~TMx2qJJfZriu% zz>^YL&apa2nL*QEHnH+#ij$WNw%MQLu03r{y;im|0S@u}lDkLyhSFLECT|vGk)!dP z-_SSJ6<#}{FXb@1ES4Cxn48B{u6FNN>5PGVHeXGI@^RoYw7&Jd=5PRln6PBRQbs-iI484EF)3%HJ#8$0xBp`G~}mxHM#jX1X3K?U|L^&LuFE&UDs0{KZ0#??8D0dT`Dv-i})WzY_9ELu&?Tn zGyh-iGonsvThmXC!ozm8%Fox$gb%;wO|?H^fhOr;9;gtTY-)^JexVg60H%J0XncGu zDz49xvrmDRIfe5opg;CjjhCJk@o5~O^mg0=M+^bM?+*%P_$6^$K3{fURWN5I;T%@b zd-CcbA~12)Bq^G0=|w+Tqp;XYX2LAGFmUGn(a~FgqeBP!3Ufm6uk&~6k=GrHEsG;< zr3BXbelx-E@<4f?R`=sDSj+v2O8}GkvKvm@se!*-)%2M+T6AW_8p8)uW1Sd~(qnM> zW|&IsZ@*)POerIjY7m$Na@GXjvrCB?-Q}vpY@7Zv4(KLHTn%b;5kAvuLchXCRcnJE zLz@1{_eRyHZvXjG{)ok)f0Rg0U;6LJ0=qsz);knE{{@Of_8zAH2<$3Y%K1|bY?SpX z5xx#NyGGeiuIV*b74g_=4){R)Y>T`%)(2qar}))OeCl&$+81@8*8pQ<3VjoiM{P$0ZKo-+(Igf!>`3I)YHGQ#BGfiDB^rLpzVjAZ)Ayvh%&vUC$r^aulK%#mJ8No<0oV^ z2y)Jc*v2>FFj-jliEoh(S+0Z!8Qc9vYd>he!hE*UBi~{yRTlRe&+b20ci2DmoW7|p zL~fTS3M11?w51KLiW&79f2zEzgaqdtI294DO<{_&hizF7%fy`xV+8e=p*dc)aVd6K zezXW_+-fRpNKhIKkP&WPMY*OD(~#mvn9cVn9KJ)Q220*q$6##y77$v^DoqYeO&C07 zx8KL)494erP~^$hi_@sCuZJBj6piIg5A72e185fg2~A(oWn~|^N!2_w17BXt#41M9}* zzcfN8dayf9r5*z$XP3G9MC=8<2}zR;sEki*RCJjim%9OG)i*Uw8CL81ZMMkNf#~w^ zEXE#d98e3iYj3IBDH#g#5w1nW8eu414=^*ak&pmNP@}k>_)-o@-$G5J4~)a#GF6W@f(;mh4U+Z8hFnIUTEjOmb7xM@k%__TzU)DYl)slD zTt81&&bezV96Z;O+tTt+!5)dgweW*p!N4aXfrCc1qh^HJ(Q zUwXMPzNqz|2mW7NaZ$^bE*Q^C{r19kI*IC8tv!wx=xDz9*k6LjXkGMdt7XlQ0k}!c zqusJ+I$%T?qT+nC2RSD0;A=>cNIIU>CDXE0M{Y3ZQU*#F!QApvwF^GLx+8r|^yxMF z@q|C}bW33EB73-MG(SGH-1A&uy`v$wul4o7&Go1jx$eXI#YMgWrXqgu6V9RP5pS4U zqDJ7#K9@(I6MX~8F8#FZ+|T+t;Pu_+G9dKmwOrxer`G9Ab+5QwP}a|%NltG=7)^%$ z%@;tb-*ag~rmq36I;fDh&!*c#3pT_@(*<}oWLWImj<<_7BshT#F``jS6-U<@g=&4p6~Ob^=)mqs?# zpi!9@s6Ic=7O)!q=RzMHanhEvG`6bt6ayl>iH{*lMu9=3uQ6mf^dE^?>q5>UF>k1? zDuCu@Le_9^D~FTDuI0qc4yM&zE|XQfP^ETP7Zj~{uB5aQ%R@Aw^xM|2%E}P-c|4+4 zhw*8Hl@D?+x`*`0Z=ig&&l(P2EI8l6@DRGhx_Eaj2cLT^g$=2&@jU3(IjI*|i4Vqcl@ufxT5=jygq zEWYnIKg;w*0AA=$iCkHJdF-tgr$7r>`|hnsCB2Mgg((YWFQm>OO2CpbrjqeKac}#- zZ&^OQCvy!ab`PK_D8c7bVGl7wf`D|rBrv!SKF(V{igy1!tNiA&@3dTJ!CfIeRg_<3 zSWhag#}L`8zlf?fXtxCs;<_5M%Q(KHJ(}_kB`bzzKV#+U&aa4?B@kyQ5tHIu;!R<= z6xdop?z=-p@4G=Bl&!0GfAIYo;z(W%F${}!0=R8f48P9#WzPHLYPdn-u50g5rh3m*Zs9?TuEiySz#+WuG#6>?gK$_+VTF*^SOHBR)IC z9B|;T+*+tukA58MS~-O;$4}DzRKE%$%&vA;y|H2R^mpu_gwcXWvRFf^j&F*1V5xD; zUbS~(r&@LC@pki3Q_wQkuja$ZvxWB|+(YhPThe`A#mShpP`BR0&g`lpi5LuP2THX* zj>xrKXVhULCfhLAK;~2O$$xhLiI)$@a-M2tzJPWxBe&(@HgGsCxn2)sS3f%RIr+(g z+fRbu)6cMh3izXKmE$Xk$x{!5drC+TyS0Y(RBfCt1Oc^kZJ+x7LcL>x`TSf0)Xi$_4S<=)EM_G_3d-^sNLKw)kD@O+eKt*h($EGHrrK0ONf5hHUw_`9KM z57j(|ej66`nAbIDk9#^^uK{Q@mM19UaA!y{v;T_VUQXbnreiUX#3~0>C13&aMf#xJ z*Wp`Qs=;S;0^i{}H6^`Qkufb>W)L-w-u@ovdf9pkNp^6}5JT=?rWn9~CqZsZYM0=` zS~jlwjr%BOum+6}M{CPC`C}wtRRIKb%nGYldIv_CG_(io4FX6e)>cWbzhYEn@NH%ZJ$Vls3wGGJKcjZSI(QO1>Gu27rfXt>AQ4^jntGcYvVecU z5qFG$(#fdUhYluOVF~*O-i?L4wbknhh0nx$NGzk&U}`eK@sR!dFR0KNj@Vc2L&Yz> zdSwRCt`(rKLzMR^hJ(L-*@MkPU5)VJljA}-A-5J! zMnyV=RL){&on$lmCinUY!X)zZe{oClpYNDapD|GPl#+Gdd~a*l5&Hbz)NK7u!rQlv zrKy=+!2-e_{NU$>H~Pal$mHvNO)>jVGvxgEsE+Ht5PJUc-)Lh~N3UXgQW@ySl|K~@ zap3%{&!N|`_iOZC3I@OW-l$amKEb!*R?csSyuz`CGv~ZdC#BW;EN*WMnm( z|2gPmyQ8=G9u&^$m)jgCo2g>DH{D~G;!(3lT1@%V+bosQIDI<1!^h1zyBPUzT}4GY z(14X_BA%H0{bK2$+p(wmmL82v=i*3{>on@fk9n_Q(W4lbbZ)XO`w>&+Z-4tI-bTbp z%vZx5y#MGM`7rqzyQ4X#KJyw=7hlN8ry4ZQiu;UBS?B`6eq~3%PU` zv(Y0DJm%{u#FbVtI4>Xb_|iyW2nQ!JS&z15Hk}YY#jJ%W`i4=Em^F@G=a^L zHx)Lw1$sJvP49ggLqy)1E3|iRb8QqRFAe;dgs~{BZP#j!A0Pd^W-{P-7tB2PL-SSG zpSmfMH>G9A0G?0XIrwe;XR=>ut%NPdCUmK=`}<2EKGsthXM_w-+w3$|gzCV-hoDqwXYKI}0lrYc@wfBn<7 z8U@SW^LP*<6XjiBNx<3v<+xo~{MM)E+lEXUGhz~X2r#-8&b#-(Q5k3~jieF>Mn%EV z(U0pl5kpdmI2GpA6C>%HYC-7FIY&j^T&)i6;w;~d#gUF2k`a7$YIePqoJS(B=)k63 z3xJibdw&r(^bDeJjhZ=)}~4{$Rr|HhTME(jHKKydi?=bU2x&P^DkcV+)cYpK;D9- zeazzPYgK1&CYq-V>}yu5@7^ZQ#mHLHugX7OWAbJkNH>rL-^95JS=}ipI&(kM<9jhR zc2k*;eMkz?t9i^k%YTBy;Xi-sjEnuSedql7J7hd^R0MhM8~!on$HEKxkCzr*p;AM* zfH;5rmA2%!mE~aFc3>2~YI8$or~Rm6#Oz1Yk~0LBp{@K(X3h9bs=A=>zX(x?M}nQP zq2xzFJLcOuTm5U9N`9NuhBxIeEi7_x2jd1nDO zWbliDyx^v=o|bQC-wCyHNiQVk*n5c4HD0O(JjQ&_2V1lStBvLhwavC2KDa(h=E~hK zZ_9YXBpNOX8hYf-1&?bJY@4GMj+vmX3nHdg%DcTiz8$%w+u*OpJsn6xRhfTO7e${7 z{zK7&pY#}PhlkL{e-IzzDPoZN6F#P>x>aG4rX_*XlU(QQYga>$kRa88 z@6S*4aocd`K7RmW)^bB->k7bcXWoIdOVn9CB|HM9Cs$4R`{rP?O;E?jhOJCa8E3pa z&!RXqt$m&OGU-Z{=g=5LDxjsnH69I5kyq5yfO8nUv0@S0n0qa?bMcF0bG&{a@Irc`dOtAp|3W%h>Id``1eNIN)Yv@(}XP}TC6jWa;srX+h3iu!Vz2vte}Ely^*eY(3$_4@Aogz~s1Z7Bsg z$6O49H>)2vdyF`Sq0OOF*OCq;x4Re&YlInvU9Dzf$YnaP<_cuwM!SQnkbiTP6#IwO zx#+Mm<$@~QZ_NiN;B|iBwwjj?1V&%A@Q;i^uxVXAINpKq8~}i@P+d+A zsxq)HaEyj5YGG|7i=6xOr$B-fk;}qgRy4UjsGIx@tiJBo+aDMh1K(=RMm2gxh0OQr`$8rsN>8;IhaweBtB_rCKW;BAG}PPk{#|4^{nX7 zJm$nK@4qW})sNhMqjt5BRSdS8%Vv!MB^I{%Wr)E8-w{qMyq zvQ8_WZw8|9-C}9qnc=PFe)6>7w*MqN%IaS`09a9neMS+BrNf>fT~js$%K(W>EB+Imbf*&I^bLcvrfjd#E6r{5MLd;_2! z$S?J7!oaoJX1El;$E1)L&8Z3N;NE0=RO7X-0*uc*xQ?jD+TUY{{;bDtVlOS9d?fx% zC5rbp>8FE*-$#*1-{bJXmV?lD?uZhJCriWcenvhS$A_GEmONX(v5svs4TND$-UwpG z@uwQyPbu4-m6T?CnY{f_kNm^XcNTgbq1Hc{&nsPE$3tt}=S68QvuR%!-+uXB1bPV< zY|du#VD$&@rb>+NH{ORTLXHli4vSA-tfU0b#3Q!S%uYRg@~a_E?0UB5$Fp{$ie(-f zJ{o)Vn!#$`d>dMsgvAo;KApU*ic@lT*f`oBh~D`bfe;Kk|nKk`~6 zu1|ekatu<*71DLNjU`Mw{e5(-9U2E522V=Y?r|(~(3MsPv(9d`Wo|U)K8Q5jw4-Uvyv}H;Q*NcaiBhO#rwrg*|ri1_gIpz~5 z$a+v7t2Le^+C9~eSb|it9|0^GrH_~wa|_nw$zeo*!(Np6Wy0- zL_Pn6FcgRq`!=Se^_Iivh{8n5&ul&^U)5+V88aGj%cu;BM)RC;666sJD9 z7X}M$W!lAJ6W$zjy%_l-CH+pLWLapSgEMQp+S@@j2>0NP>}Dx%@7Q$6?Q{jhTz z5z{J=7Kinh%jp@Y0_6rb-6-SpPU=gZKXw=m^6K4IGZROzQ>}ip1^P7eI zHW;go9OUh^3_c|9@7!8TZ2iwtek6%}&;y88Q&Dk2Fz421 z#leT0q^<2EDc0kPkJ5qC)tlv;%7{NqXDCT8AXB^!Dt+dDy4gpG$S-$LqIM-R*Wxwl zWVSe{-Lg$u%YfT&xV3JT?_4ak%U;D^qb%HX>fT9F-0qz_dhkuH28s#7?S*sK&lz>y z!@!HP6-E5I-UDxVG0xHAupW)HWr3DLd&+Q04`ujHT4aCXo!EOQ<1%XeL;r@o#rvxi z+#80T@U*TX?y9j`%Kqmcq?OkRkW)rKg&4enx8s+!Gv4sd2ZF_}I*GT&zXH7{a;PMZ zJ^g~+O};q5JK4KaVLVWX=2?G?I!@*RR*cK!wk#E^yUPf&H3vq^94z{S3*H}B+w^Gl z-%CdNj+I!7t>Nv>(KtH&6ab~Y4Hj#lE5PQ?bwifoyq?c>QEpLQn82@JQdfK+_3WUP zBItHApITx|3vb-1u&}G_qHJ;;c%&({%JDz~G`^`#a_;K@dS-tpthB{9_W<=z{J6&b z+i2^by8>n8bR{$KF&+1l{acJMh*A)gdepFQxKcsdug@OTnRa1FJ-6J!@okqx>lQ@) z=U8}a%0~6^QaRz@)gLkXqg1pd#iy6cK`1;iFP2^IRi6kU@H667D4EVnO(Oo$*IvwHoN%X_UP~o{PbK0QoUaxMa`$3GdWw6&)FfVFbq&r_e4a7tf|50 zxG?t*8Gz&1yd`WyJ1%oC>jE%u%6=ViwIl)CR#n;*m#~%WzMT-T_(sy?t#Tn${4z=R z8%Z#{yn_vD(r#`GqZ)x(iMaQdV=T53G{J$R)4WR6z3C~%3GS3SshNBK64TUkl7D3pLVHBHSN|%VLg0Sjl$;sNxnl27qUI5! zz%7t)zR4M&@W(zfV9d`doT^*H;g@Q#alzdtv8%p74UM=6xX7*uWd(n({|tU}^KRFp37&;Kzk4KQk?VH=t{d>rORK{QW5plN>o)|aLsM|5|bC6Z!^@eV|}9*6d#(X~bagaj36{jhJ`l4lC1sn4x{eKP0g zl$x`s0qma-`Q*exXs4oroJ1jav;2Srz#zWo_{JC|i_rtqVd z>~cb9_H=rI8qUu{Da#sP(!8@LWI&|`gJ&)EoL`(rjFtDAspFp5t}Gn0nCrfbmVsCN z^BoMB!;{>^K;SzingRE@x zB-Zc)6D2s5~tNzr^HDvFqT46ThhUA_R)CO1!C2S^c-f+Do-@dR9^4-5i^_f|} zv*TX%D>L>z%fd|MCG>pnGV-vz?fTZpz}UC0Ae8SQ z{3=da?Elz0X`P1?@7+iv+H%wQ6e=Sj9m3=IxA6V;qs*0eau+urtflG9bgr!D;7ZZX zi~-go=?0PHCpUCF)p5{b|qo+ zytLNxd&Dg(s(vqj_$M~+3#Zi{K27w^s;@VOK$Oda$^GJm=4zu)6Gf+hX*q&djS*TK z@7tvpB(pO`F64}L>PY-~bRl3-=it{4(bv+W^Z_qrzC!ohpD_*2(RVB`uUvr~IBSjj z^7Ox$JllzNe$_`Y45yVZPZ~X%cElBS3|y}~U}Iw)=<8^j4%re`xW}mc5EB`Q$nBU! zH}*nIR7;AJ_Q9)ve9xLMa55BghObi0!qkO&T7Z)$0!;Z3*AkoQgPpr+rF1UNZSYR1 zzu?bm;`*0hxlXKFqq>pi>{yUt&sIQ6A^ky%FyxDo%Vj|AMgJ`8X~4Tm<<}SPxNg?j zAV(vHY%Snbtato$d(%eRo}vMa;D3jYuEOW%(s?Hx6MndKSm}g;Gmf*Cr96EbA(c0}>ERVc1Mzc;&U5{?VX=BNdX1o;!>20*z zD)Q#?cvN-*%;odnSe4qM1C7vGH@~MGHhs(Oxmj-a#`k}%C_nT}wSU7Lt(g5O5TvT= zKZI0*Ohl0MRa=za>*?v?h8AsTE9J~LvhTXlkP*c0DUtjilFmaf zQp+0UeU>3Zu9Bg2?Qz8w`b={%d+QFCc^gN2kw>MHvsY(epMyE&y=yTssc0Xq~QUw_>JEc4f{v zNO=UwjG=$W?sL>}|9bIDNy>d@8-5)nKuuhJ=K$Hr_v-i1E1PkekK4XVMQB?23Eigj zMD_-CIHE%zQ|?ml6*5gzSJ2;a&;6eR?r*rt#kbzxA+DY}GZ$$IzE_0gQ{*y>B_xX9yzpY9wwQ#zY*4=kPF96AGW};qtcigVPmHf!XQ`MVglZH^%ZmQGzh%jnBhD z)M+;nPzrRqS#XzV5hdPY^`RSlE3&!VsNYMy5-I)qM}rLbJJ4tv(*i zN~uF{D+82I@SuF_=KBuF2jPCwJx;8e1C54vza90uQ)0 zQdnIWpF;lHN4}t*C#v8s9HtgMbMU%DPQuU>F!>E#9Lp+q&>I~vE5ZOWg~p}K;!j}CS&dP^7p6x?2tyCjt1d_S&2#OXj02lNfDRz{?b=_STfQuQAjmu-ItuqnfL{~MX)RVCxqjK6OH8#&_3}`^#W9+l|aik+a59&P>>T5vHTAq23F4PD=Z|lBac8 z?egj8rm=3u_IEPevyQt87MG~sQW<3%7V!O3pNYI{M@H4#%e0T7&n#_>{Kjszd{?_Z zknvf`wo?A-;lg8vCsn4OyX%$m!U}z=KaZ1XITA*?fd0lE(>q3XgqO59Y^$Kd-5=s7 zuhEBEfV2L{RRPZ-uY6D7a>!dpwBQSd-}yIh``nv7-qJn3W|Z6;kCmOHiXpSPe~eDp z3i?`P94B?R8o?y;(zMWyVkP2AfKZCZp}-sP7V6rqt>=x-D8U7k?IC*fYG>+IymR_# z<}3da*q6)0pb6ISn4-e?H~wMI5Ey!0vr+ zK;XtLcw4`%d$r~7u}bSob*o>0jC|NqD*ob*@q2~B2OERvOBw0u7Sf-*ydRV^(p9}z z!uq{fhk7_s2qShD1>ZEAUF|CJfQ%`NDKa}y)d`1^=@U@;(+I*CmFzROp%rBy@Th?Tj|GWSk2cp{&y5 zH#h3Qi96HfsQmWd^n2G`)4})mE+fk9aol!_2P)Dw!Pexer4ahAf%Zi$=nX%N|ZWnVoI zINbNWb9kDz?)S{q2L2^rgpqJq#Y1I?)|FN!FS}3J&U4KVv)GpPG#KccABI8LA7-n0 zOji}yur_Tsw%~QIjE1BhYsyW*c8^2;cRz>eT1MUruWP_LkQUAx`xN@B%JnY;z1v#aQ0Gs9z6K;9ozqfjgh3^Tz^gd-# zZA6YMyYLlGo6fwwsq6!r3jYMdNcL@sQmCTF zY1ZIT2v5NFi2;*U*MGDD0=EJ47>NHm!N#cLPI|{W@`xm%Uc4Z12$jmb*gfA!jO$g`w3qHYl zP?6r2e6+V9ufu0qrI?Rm(540@tc!n`8*e-FFpVR7T+MHJE!~UOP*+{5iY|H=h56IC zlAX}~Zcuv344>fV2#FY?^eoqt{RI0%1W3MGEd)z&pL5es`8slxzs&^)VBbV-X##0` z9_IxM7$*VBFhDR(46p72qR`;wfuynHCG?LIJbvuR^rk9)4oo;&8Gm=||5k7&s^*M# zG(H6I=`p@EEKO0+bFfW&ob?`09m^C;zC%L(ky*pWLy17xhI^qgVf%y%!*DYz;N|;s z8{Gd+$RSRZG%vk=c%YN7u=&?jlAx;_u%+&URcan{Y1t3(UyX#4&};#$BUU8NFyNp{4LNqm#46GA@aA~ba%P3a=M;^vwZjKmzM zB*%FvMapbH6@ECZal>z<&mzFd3$8euEX!UmwJPs2jUWE|KV}k(oK)LX&hHyAd0a3+ z*Fnj7P-g^vU>!Hv(jkDevdwkv?0JeYzMvW4`xtxyV({Xpl*jqNAZ(eT>aWlk-n-ph zTP&?;@%vt}wmw<0mSkx=_Lw5*d+Yw5<~y-c(uu|f@wpw1-21%wm3C_EbbUBKBi?KM z6cD3r)M2yzLR|RE!8t}}n%|oaUyF&O)Fdj$deuVg`K5!y7ix6*!#ZO$p1(B#PIWz? zp8n5P$WH<(eP-a;{nK21yL=_MG~MU3Q#_RU@FiQ|s%PgfJ@q96oqV=h1Iw8q7#xli z5*A()_grRYlm6g8WN6wH8}7z)%}tyX*k#>ZVG$9Z-8s>A#x%%*a;NZ&6qpZ7x`L=&2b(YY>dM+_Twt2{BtDP0HT zT@rA-NvTf{u(uYgk)rHo?P*DfFRgD+?G>R@APM5PD0#8lPND+7!;gfeTq>-%E3O%% z0WLS(`O5`~nu*ktCGoDq(r=|*UReaBqx|R1HouBf0Ktozx5% zlj&ra^X30`&zYUG-=x*fF$SCB=OXS?m<0kiV|Q8I;t$LNx1zHTBu#&k1dh`7pO;}# zS<_F<`z%nvpN*ewR(l=7-Rn+Pv|kMT$y{cZJz-3==Bqn$P!r5?PRIHSGmHZdGsYWP zr_NfkoDI&gMe&)O&pF6&kfCHdftqgW_*;JJj9nH?|B|fs*XZ2i;%=XcB9O|HIgeI) zbp0=?=WYb?H`dN=;ML-C)cS$ELQtH5zm~c~A}N{|bj>pdAz=PJ|JmIL6^@FhDjPlU zh`OCi`;ReU>&3U=nV^J1++Gt{0s6j3i{KO?r-yL=fr}HJuAz zRcg6&fv~tee$Iba&cMIAo&cvz&rYdqjZU4PLzM`*yIOjd{i#Alp2uGoQ&I&xqLHgz zUVFQd;1r+32{R0q)eCY3(Wdogb2tH*(zGz#?tBxDuCp%Ew2=m}&BXJjKMnR<2gZ6Z z+Yb}!@yJ1*z|!@&)$&I@c+c}IVPDH9pl=0gwUM5I8_QcLyNtj|+e=VGud#*u3x15v z)B!e0mA<80#xTi@pzsit7{<$7n6+2iHsw*rt08!MkD#>yV$?qi+Q&gU#s2zl!Pc1> zdndsyz=!jP+=~U*^e!>z>XggKkRz2(uv2Shiq|bz-}C|oQ={@g$@E8iiO3suqXv(O zFA(k_1+|OU;QUo2|4V(WkO`6mD~#uWq6|eDN(r9zod2_JMl0NFU0itv4$w(sZof~x zj{_ce2C^7z0JeG|DdbyjeKS0`MI%@;#$o#(@bKC0EV)SS;7VWtyXbtv-6K18c(=Li z`q;79n(;jjgsOBhL&@A~Yy)bQmwTo5Uvu`2XJrrN8q}sR6t4%GxXH~Je>&~wF~CGA zKO4AxHOv;GIh1YvPk^9!#M1X7yn;@NpRKArXAUFm`EQyWegmd<-}lIta(R;cv87bs zl9g;o)aULw4`~h3WEIG?Pwd-%}vZVI=_wG3nA7B#d* zKx8ZUWe>F~G1;W?>v=FVr;O(dc;{e@rltPhDalT@_PZcGRe;dfnP%o%8-jTlVCe z&J?rP^;}A?qQ3wAgw&b&@zKhmyj}xj_=m$Juuoi`?+`&bJ$bqXRwc~#*XCcU|5=Bl zsGu4@JLug^fw7PWYQP(O%053C$4fyzqk-8Dfp!VR1wA1s;!BxFYb)+qJ~ODvWk;sZ z3&{=aA65a0XnuEm&sIiuJg_~v*{S(DR<@`VtMLfjTi)HcF*R^yTnupaoR}5cv||bo zpSIn^8HHB~nxAp?Kh`o2`gDLKPz(K`YR>?GL?UVew7i`eJM|)0yyPL{&$`=mC+Ewn(@)7BN=fI~(Kn>Te zuWHhN$kB{|(bTj-tGGtrbqZ-8uixaoXG4Q9@sJtj9F&-tC~voKL8wOJ9!tis!t7wsZjr~A>NHL6<-!I$hOY&(;GXkYVc1Bh#r#Yx5%ZF=l50YZksp>C-jI})LvDdF&y zE^`j+q$|!Vu6<+m1%deKRx5SX&(WR|wKrlb&l>{21K;^&pu&a+m(xCcs~nam-6?=? zKUME(sIC*TIgCiO(zB4EayU{!#Ed{5EN?+p#3$zrvv}7n$cz^4_F%M=1CeK?uN?2J z9NHm6Pw@Kh_?Ya~&C$dmEi{}{^a=H&%xIGqg$Ao-m=Ng z25&Xiz>GGOn8B8x!ZuMwGIf~YDK`o0ZF*6+2<^V_=}(OI`nKzqUd5+94%sy7yp55q zKI`TvqfVqnE>B7{R5r?qSxER$%@a=2O5sZ_0veY!QG93cM94D7sA9d1XIUcZ>tS))}%na(nCf?=KbQU8$Af z5Spdq+v_EwQ!*RxAUW@M@rt%F7wKC8t>iLqchM4k1U{3QtX`z8MPPS*_1zl~hHidWs`oAiy*uW49!?@0)+@R^)3C#A;ZCN0;@Mh|amN;y|l=5K8t+ z1@k{>LXcgBG#|7(kMQek67U)ydel8^J5Zvw*f}ChV$kywz|(wcctIuI0ZJY(cVZ`M zcqDwa*V~#m-drNygF#oR6G#10LI%p)WUU1Jcz=TNk$hNoPV|QHeOSHJ6@zP2JKiII zgtO<(xA$)zv`&7v^!LUn25#4d(>(ouyUYK>T2G}87%AX&@4qh+`)7euKCIdii;5+UBW?Aj3fnI=hY&2*7-dq!g zN!eqxl9;aa*@MFenE7R0Z+Z*_@S$T*%Nd;!?S6|jARK38j_Q1Nb9VS%=eO~Qu;wND zdna*+_9Giny|OWeU)5i=)}|yVTO8ZiBS|_BzCtj`x^S5or#0WB4Jz~g1+}UR4rY*+ zxV;&kdCPhzTnB5z7AFd#Ic)rPf3&qM)16o%pDqw?g#VywWnZX}(8JU)`yJaZK3n1w zmO4T`16dNLE#8>Y?i4*7Yrck#+`DWaHABC$KMzL(*na;dn&H2;{bGH6W8=q%v};VX z=t)oy`X1}B)!4EjQTqYKdnmO3Rp9fs2(!b-Gw>?IwQ=u)-K4GHlnPdh0M8+WQ6|h6 zo7mCgFryRHsLF59PVoN$r$AW0@fv&b8CMkZip!Y~K>9!Wx5|e03*;UFMNeZpuj>pqU74t=KPr$Q+4_Sq@8ZG1!SG_bH+f7QKgFy zef(_xE2tf}b$$S^j0+y50~LQda(dL=*ub~GAhAyVWTR51(Oz*4${$5P(gr47pyT{; zEnL5Jg1+j-zT_57?>8N#Z`qq=qy2-7&aTT0bbHeoi{nJ&#R-*_Ciu{iO-gO!XrJ6iL-@INk<=3`z9}x$ZGs*@cr z7s4^{N15s57wV36ijUK>TkM5nl|@bjYUova=tGAMs0#u^;MbrNgMRwYkq4tOu7o}T zua^&s&*=B*Modm%MwO#CK1`jADC_onAQ8_xq0hGSi3xjjw#ubz>SG-P$;$S0^QeC* zo2y@By!QHq@E^5<`WO`ox3KOL9Yh&l2?ew0;Ri7oYnYKuS~S6NadgzS&Q9Au`MIBIKk<`4-oE=o-`2kG1K-#7w+`Cl z<41n$m@ft}Wbo4jzR08#b?Yq$Ad8!j=-`ui?dysD@LT#$x<>p=?KC0UhVt0c7OV45 zp!T>W+9Ko>qW$Xvkf=luZ5ni=TzHIi2lxh7*u$I`m}Ji}<|luT-{fbyk6&}0%Z3Rh zYneY`suTKw&~elwj%g#)Ykzij;hVUAFh%Xl!zwp#+-=9F7w!2MUuZx2|NTh&-T&Gj zYcJl~Y6^*V@44r^=HY@6^A+)Zex~~Sd**Y_Z&q3t`of-Xq7l1i$49;(Ozd6lC=RL4 zC*-Ym+^ptC5xSTDz{lBE+q+xy-5iEnpq9kK$KHk7&Hne4+&z7J}$mq7Ca$ zXs7;Hdl2IeQn!zXh5bdV%O}e=4mAjP}u!1)B6}$7}7_UmNx{w;E zx`N^&P<4kgBkPy0{RVulSF?Vc=LFDo;x)-NkM6%h>g)bEjn#!3N9Xe4IhVhxccJ9x zzF&ZvWEit{EMAotAf|k-!i`APze3j}ML)JWUpzRR9sq!5Thq>k;+hMPzKKsPP?7yY z6=IVI%fsfl!A3li<{XpU_{%}~A?EyGWdAX4J8mN0q?u%+Y>wN?F~52MK=ioU^KtPO zruqr3UxrcV^88U`l$Mz_Gi52M*=jkfb7{0&1+m0STXb->qMahU41AN0n z@;%PTC-G$uX(xUiz1<|ZgnQ1Kw;jk(Qqf$=@beOqM>cJo<(7L%dt`dnaOf0$F6fc3 zt@OhO)(@MMP5({=Kb7}-gMA$lSN#o|jQ`T5>P-EPvhZK2>Gt2z-0CWd2l5C16{}#O z1lJWZkF})vUDr?eDeapq*$*>*s$PNftGS$$NJ}r%i?2n{Z&DWJwiRAOnw2Z{K*cCc zUDu27U#07(Qhc)#*%>qHndxV7OWJB*%M_e1p)?o2>&;E9_}n$LMHBq4`i+G7`NPCB z^errJ08THM1brM!Ugs6-lyI7S)X%?yHg58TzN7rY$jd5U@Aq{l`Pcj;Co;2ninjAY zE_UaZQXcPRbHJHNJF{Ja!!^0cx-V5(&Z+*u*+Nsv`$NXfR+ahkFG7kF-M2eqi zhA#Z9V)%je2-iEVbWY;O2Qbj}y%&1>bwHCxCQ+K$ z-93?!d2G~PJ`YwK5QXT2*f@^;TJ-o@=;igp69pxTdFhuZy;t}@i6^|MCtAmkj{5_a zHE%vmr_{&i56CRNa^L+4ieJ<*d+F=-m(Sl-_J;HaC4X@Pd#(I6D0xq#DsMvxgef-4_86!7^+iKEEFN&>M*0%SWPwfRL zo|17u+syd8zI@U9P3++fS@BrasrZG=+P_g$9ow=7Q9PGb$HK?9{?>o;XFsae`v4J3 z5d(*ba_@nL#b-|HenURt1(!}3O`n~fg#ssYG-L^59~Zqs0V|>tr5($R03I&_5F4aj zPZP0&%lsoa`hYLz^BX+Q^FN%zPX$jZk`a{=NMYT!1z2g+FN1 z!FKvm#LwaaY^M>ArLTm^&#~{nDLEi?O*)T1D4l8QC*QC|CxAoSeN`R+5RaNjBX$D4 z?~vE!#L15`BDt_{#K^1Luv2e<Zc!IaeH?-gQ z06^(6N6PR1!``%MK^r`xd|IF+vi(c1bvg&?!ddFw5y#6D?#=d3dFbgkAmF>wZnCcy zDFuzYzG1*IeqvtCH^__cv@kC{Ge74;U$YmuX~(C8w|8%}{Tnyi?K}6{){VOgPv$iB zKH0-fbl&#oluepyK|6C;XZ}Q3>Xt0YdrXtnze4>PuK+T=q zNb?(koGf!9ZyV3}&F<6o@X`JD_|b8@xx3%a&t7l;?ce^d+k@9$Z@>3<|1a9xUVdlW z)7*c0a^6nQ&)NmwhUdZpcN)|8=9m2me{dmyJ?P~s^QU-+{uA*7pN0Q>+Z0s(Ob-B1 zRu{4g(LU1~Sm9UeYMqJ8it&lZiVnE|FWxKh-LXb@@92M&MY`YED?Jj{ZGcs^%z6G2 zRju3J{Tq3#ijzpHq9dfpBWZz{jF{X4hX$jZ+bxG50r zR$HO^+4gSGd+np*$xSIQ>n;6fUgU8DmVPW*&O?5mZ{nslsQQx**-k?n8CCqukneMy z#)Ykm^V4?Y=56`@s{I@P#=qJAl|T4<+TZ!x|6P0awO9NT8;5(-eCdZSH2iS08+TlA zLsISME{^v!UZ6kua;J}x<4;FZfpNfo)2yM!jV;$>va3G#?+bX|$2d`&eWrNjVqVo{ z{-JIBlK{fyW!&)h#sfy$<4f}s_1Sg2?ZiAzh#VZ=(YVAX!}9DQw!=j}?Js7Z_SICt?u`fM^k-ly~`K_$1Kj$~;>v2HiXYh`3L6ko3<)$r} z6RJ*Jc!G}clM?BVFyeYqe=WcM078ySn$Jdkz`TCxpX0SJAUI2z^d({<&X?jG30mSK z`bEJ$5ieM)mi`R1rWb3O=NtE`%lO6xXdY^E?5OV%!`J6~G5_Yc1;TfsbMbR$rCyys z>v*C=w(NC*?B~Ges6z~jjy1W@+iZyHz=NNFNW-Ys*Fly6q)5gm2`bE6R zlheGeH)4+;Cq0F742ST|`8VG7rA;(eTwJ#OgWYy^derWJ@r$ZL?DODzUU=6S5O&G) z`2`ncgpSgw$_$Uq3CGo&1&xWmsk{G>mOl*giP@0h8_xDW=_q51HKF{Wy1|OH4l-5) z@@-!SWO>40?!$lZ#y0y;bu%w~Tid*DfV=^H{a*RGx87mT7@OqT4f+G+*5wUlAzvG! zG#v(VM!(@yvf8ru@`eN#PYEn2p)qfN1iKIv;B+80aP4?njzq}BFokTF;A zI#$?b{1=(1 zV`E*%0m$OGVrP5+0Mt6H#ys^J(UT54N*@4^{zZb0xE{yoHqsWDud&;xa^pNyibeW7d&|4e+Im0$J$H(7rXmz~WMLEWnNMjikVMsc*6=S%fiwHIx}7u@*i zB<7i&y@U3i_r722sl9fd?Dz>Tz7EegOg-wra*UPA51PnlT`{$N&;WkZeva@<&{7xf z#2m*eb1YG&o%a!U_;I3#f5{8^g-Z7u2*dS*xJH|X+J7Nu$pefxAnf3>Q}&q0hU*fY z7wztjne$(cN6Yr}dITP6H~Gmj{Hy*K{UvzwMm_tj-5ozfO}jpP{eC+-<);B!d*{1e zZXfvW54Nv;-@CZ-B(?UxX&*9T4bL&Q)j6LZ0N@K$X;O`;z+;yR=&$fm zYh={c{1I()eZfBBC6Km=$YX)!g&xiyVj=v(^-Ae)ABax$+^D~O`=%cV;3oin;g^4@ zedMQpwmo|ApdH9BJY4wbVLo^!{{`zfSKG>;B;neK9}GYiX>Ileai+GgZZx6RPkb4` zM#UiS<61{i4>e&kLKf?vP}i^BzJT#kNn~oj8Bu8co&|lJsePi>4^KtPA+9?wA>(|y zfm~&qc$U5q4R!s4>mCtz)TfOXdL|}c@<%+peuz4O+;@K#LT-1L`1?F#@c;li#2Bzi z|3Wg_xPDs7U!d%J8eL&UzeJ876QZpA6h7rRwI+LJ;+D|tIEfIwUg&R z?P)*9KXHJpYCq_qo%(q2NxO#!sA{eN!8htnHcaJiKk~;10MbX*eiV;YK=2Bijeh@V z(GJ-zDLVO3ukvIM{k*QXJpmyr81?gh>Fc&^F<$$)bV|(gQlD(E2{8_0bDt~H_mDYh z*`fOcWI~YrH~lxZKXu1C#Vh1z!BhP=*)Y?);`x#Xh+~`${mVb4#{_65etF#2za@Y2 zQ(;~&(BnPVZ{fF~U5nJWd<$Gh{jaL5$e8&r4)A%=STD|}jpUo-Xy`NQwAy}Pk{Pri zb4EDBQb+4xS%E6slsDE@`eR%F)nEJ%KB|Kc;CMfwFGHxxctb4t#=4C55nO^}5hdNn<9~YS6=UCQ80DGcNWw*RA(8i}|Gbk&_ z8NPg>1e-j$E6K7bWdT0VSoGW^iG_&x(OI0WKfWk~!Ey35)_2+hauJga><&wQUcfR* zf_(H3`k1If?nK0ZiBE#J*s#EsTG##(Lez7|%9Bp$JU&Y<3qRM9!S%!=N*AvPMs2iR z0$Swt6KBcrZ-1pd1^dr@rn+Sh=(Pf(e*R$S5M;*!|FZj-J%>x|8!w#qcjYY(0 z(7})E{7(2;GddDE`8F5na zRIyb3prglYqT26?ev8;PdYoi>@ZYA`pL7tdZ`5Z}7#ApG{dH~W@(t}5^c+uhmih!u z>5pG0nYEvERoT!cDv7e;20&rnZv(-fgYr-5vY<`>$EQ?_b`|K3oO%GjF(kw%QWy24 zH>TF2#DCEv?J1vA4liuyv;92P&$!RVjZbd=Unw76oY>#K`-0#2zuupIhm{jkZlu*k z8m`=M@@`$lH+XBG?%)%7jPvl9mNH+=)fYThVwbj1o3_chTYBRkeon`4o^hg#a{Ly! zl)F5B;NR{0(wAOsym0)pKmC#RBR}#Z?SsGd+uHB?7ypHJYv)#bcyeF%YQDVO@!Pzd zY;z)garQ{lDW5XW6LIvl0(CQ03xf_ACv2KWmSlb*8&06_Q0;G-Ez?)vg>R_WLMQ*U}h6E|mQAM7RG zsK_T7cXtllzTD*H#`ONpyY1oQW3~TP`=0Oqm)fg}wXb>pM%z7*AH-*GXTP1EpZKAH zVp96}#)Q;jZGs;4wFtm|@A!lKaCjO z-=l5h1DTClbRTQ3;~m0KF@wJDSF(eCg*m=3CI{3yG9a4lchU9{4{m!y_4A1R*hR}F zh$KD$^{BzRVOPo^^IT)8$0XIIPVp&LE^2gi-_rO%AK}CkTG|uQabc5kW+M(1N1=L@ zYajfm1wAC_6mx?6ovA|JmGqqID93YE=CklS;>8CrE|$0k7d{+ z`W?~d*fzLII>h)%p5;mJuwVKxkIAU8Pvr~w+bZA!g88uyf?oMtq-K2weenk7pD>MJ zKe$gMQ34$R{Stp3+}LloZroH%?mGtQn|=eE_G2w+uSOfea~0ticV1-UC(nm@!_JkI zw*Aa|{EEtDi`FRgTgIAcj1gF8tKE$ZkjmQ^``rj9|SlefGnd!ju0wgMA=_F?2fpA`OE!0hP__C&>9)~I*^)h>PQ zMEmy}AnJeV<67@cbi{KUPoO_UKX&6~T>#%vuF-u^ahKEp^LcaVC7m=!`oVbd<9vXJ0Fcd>wT_RE+tK3_wPkBx z|F!RJ-}}Aa)4uNO-`j57IA~|*SMBk`NA1=votr;=s5-Hh=jYe_)gsB7WQaTMu6XeJ z)BdS(Aljioomg*1zRw{vuCgXXPp;*|o3#uN;(APgU5+c*Sw1%}H46_4;y2a|m${~k zpEC&k7*Eg{TkhSt8UEQhY=8H^`;qqJKlu;Z{nuXixg!r`>WrxK-9J3=d4gqW-G)x^ z0(jp(cY83lsokX_c2OTt7NaA_YNOs{R5FL$>usZ`nKl%`T5^c?}=yVL;o`@ zk1?qFZJ~UzYCo)2RSSNlv)Ah02cO7io5tfA*7$Ea7p8B+>j{2I`4d7eb=Hc@%w(yI zTz~fX3CwTohwkg-Th1`P6@ql6Pl0al^0Bby`t>@5Y{}?xm3#S*mm92~$AXBDXoscG z!=EFF|2~EorN@{zq4=%yA7gLCCVWG`r93d{qxjn)VKa`dH>Q77h zk&k%~{cjyg{@DKlRj=AFWEH(x&qP-u=CYL=|AmZ?vF<9bz?dH|!I7>uUQ;14>bX2F zL>}8_KElVg{_3CqGapr7{{Sf_^!n6Vy+Ps=Sd@~2HBoXwgfjB^WSVL<-e}|wN8Du4 zv9=xGv=<$|N&_h(&`^uT1E2P>iXm*6Ktiro2$~CGVMH3k;wFu%kjIIGbgV%q2FNIf zUdt4kOgyLy&!``Zw`fc+q*1R2N!0Zo{dJs4(?kP@e$xUt>=QxKR)xr~Cvc0VtPqc%U!qn5Y(RV6F6Rzt9j0I>cSV=>N zj*}0bh`w-5IKDycpMoc}-&&+UCX}32`ih%C(slikM?C>hi_7q7AXJZ4aD zPQTjso`A2C{~`v^swS4XKc+sDaa?}+O65aGo$i-Fzu~U(_5BbaBjil_&G}47eU|O# z3Apl}gycK6pOJhD8|{y=Fff(HCqy}lep@Gi9mI@K?Nc1^33;6GmVbzwnuqEH8K!=S zPIDE%x4pA>*bZ*pZa41SYr8jZtC;aeC(T?mQ`P>&y-tp~nA!VF&c94=Auq;NKjrRu zITuXwk1wLA9&#;|C9E%ds+isoV8O<+PC?9{f_GK$#=z>WcWhS)~??^g8#`yyeZB_ zo*ni7Gruh!`j8(!x^#?&|HHOOm%W8$I$)QE86D2^fs=L_4*=M8CT2^&K+B& z#rqjh{eUs%>O2^6qmheY+?aFBsvGVdYD`c+x;WDqpl)?=z-K~35aXBk;!|6t&0qS{ z$7o~L>D_-hZ;+=*5)+SW?6W@WR(EmnA+I(NcEuRuwCMW!JZ134 z4|KbG(8a~dc-VMfd3&;NPi;UQ^0pm=toLaU>XC1md7@em1fW6e70cPBO0NkT`&g$j zK5D$^HwvvIwim=8%i@M=%E}lC$I2(9DN7qc&}4mVCazsS=+TMrYFj+~rGsS2vCSq! z{Afq>>K7;`{YHJvk39Tt3feE_1J5C*h_nzFd2*ai1$<*idw4=w)!@&uc?y0&LFsL+ zKci-rNGH=PHAx~JlDis$d0y=$eU?ja^W)8M6?FPM@J&vRX;U3rz6AS508A42u>1pT z5A?;4ytJh*uJstz#>MRFL&OPho!z>1#}BD+g9|_VhD&`y9mYm^10-KD^f-pfl#? z+D~szC#m4_OZ9*Hn(T1DR~hR89_+b1KWnewf33CC{0ht-H~Zoz01BLcTBhSvm2IPh z^b0JTFxs~c^zm3<1nPPQazghw$n$p$8&J(zYp9A15$2vM$xCYV>OBbO{5HaGfQh!( z1Jteh*tm{Y#AUyb?BF3CK8XkXeV8#1kGa2WqvcQF{2j`=4&)dV90QW)2MRI`>iU7a z+kFPSjxc?tlRo*O3o&KB$|Bx-zVgO2sOuWz`rPIY`nyV+XnENu!Zj(kpDzVr$%*lS zjNl&+XiZ}Zbl8LjVS|aaEB(^mY!AP|EE3OYM)_ZLGH#sa0&MCQe-nBBMNzuW$EX+W z8gp!Y0io8tlb&G*b3Wza)5xi|pmF%g2cKso>PuzFFFMu-p}UawGLL&bCg!Z~eC;qF z^0~O+C!{8Q;-^2vQ_3@i`{AIolXmplmlVE|;WE*8zpO*~vGz7*K-I1IsQuGW@$*LO zrydzE9Zxc$qig{4{x``@M)D1D9Y#4o~e%13|Z{VETH-q1zN#~co(cutT^94x~KzOCOO_$d#7Awp0Gj#Z4WBd2)a6Q1hwQ2s)>R@0r^2^@k7Qp?m4_HSc<7 z```yZ)V}`f-)D}rEjPjaB`+<~8IyQ;0Pv6wdeI+!exd$$b;&wgTJ?5N8Gcc}X50vy z1dk~o?N%J&*S+2RqK~p-42<;*pZupBeSm!|*Rf|TW*&+8OZ@PqAFAZVfq3wtbfGQa z&&~eQ|5Km%RQt$J{apL}E1zroyVvdJ!C^aktn>IB>skNq?(m={{Xue&B3_JDu~udl z$@PlXpJgK^QsQGb))W_IzJ2Ag^i7Vt9n3HCpY5tpPEdEklpqf60|C}4a*jUEEcS2o z0RZ%KLm*FE>0j~O?J0hx%rXtgtA1N3{Y1IWjwLLQIw`;QrO$wBi$b)+G?oRX{{Ku^ zw%-%vuSKN5?ci6k`0XH1(&lIaKz<2LPy7dazBpgt&=si7%SiP4t~s@xu5K4`^d= z&3_}$v0h=&&uv%piTKG^9s`WchL*k$S~@3FNQ$U&U9+T_4*@ z*ZEV$bH8s@^5OoPfB>4VlsNe92;Ip%>#v;LX#HsE7Bzwcl8oqy!&`r@4sQ;{|Jr6zAK%5jm7 z-`Vu1Kb4PJT*&tYJM9$`T0k(Bs!W}Nw+A)+x=h)wdjYNKsjKzA{!k{?+T%RQk~}8) zODzJfbmDMHp$36#)#Fphs%BiNu0L0tI*|4oi&DUoy7^+b@|EW_w62Y=@Pn^-xE#8$ z5j|LF@rIS{y`+&@NJU4y(!wlcMuVfT(;8^Zr+8`MRTrxJCJ*7K$gd6gxzwq=Sdfz-=slx)Vtc>V+I8hTvPEto(;wF;Rqm(>6|! z!LuL3k1CvuxGHHDL@HNR;#p_Z`fH-a2DFEzlbO#%P{qQQe?L=1HQ*q&Lvl(LNX&vk z1V53WAeq_NDJg=eQ%N?bD_?)GbBl{?l+F=tLM)|Tf<>|j=b$$Gu!K@i+-y%~tObR& z6^3KCEnd}Le({A_czRR0`Rf4D7)3lJzqmNdF&Oqf)E_?qD(WjOWNd%7hiwU-burt2 zyqQ1+sz-a1Cmo_4bZkKL3@CoJbv$Ez16|BW@=JlPh!NSu{*-T8k~8`9_Io;@oy!8z zZ)3j-@*<81)1Y%wp#PCrZeZB^kUy*aJdk$e33`C`A&=waML(O_Ul@-;j9I~JC@X&2 zN53y1Uj<|hK8P3#y5Oa>PI9BR#xIp|qm;J}IM(T~PRLb%XRqCU?uGW;+uqUkpL?lo z@7)p~Vmp2_ds`>I{+&I>cBR97E?&e%b7FH_C&;m|6K{!jU-kt%^#SF%jC@H=4OJXG z^he{p_#-p7t&20s;%1}jY212r|MhluBz;a!+CTWQpJ@N@zx@Af?|9G4?GOEdKiJ;= z^4Dk%I&Y62@tcIa<-66+kI&oL=~-M*y5^>c#Gs?~L`U^4<`dQv`iGYw!Z-A5o2W96 z33hR{kI9@K5n~s5Y!DC+;fr}boA~xTHhc}LJRzTitU!dvkdbw#lal#1^~cgp+|4kK zM_?Omy^2v@fD@QeugzG<8OBz5TCAfy-I{jbGwgZlC*&SKH~Se4#o=$Ffgj>aJn{nY;~-As4J$ z)PC5{G1%MXO?UDl1}qGFWP{gzsdEAUg>UCr(#^2Rhm~)BV!%UW_$KEn=iy8C|9=Ag zaz}o;)S8z*%LT4|`Q(C!fjkaGJ8>;dtYtfq5q4D~ae#d4%5_hu1 z`C5as=7N@dcxmr7FymL|$9N`TjAK3~pLU5dx2gD@o}X%e;d2Fkn z>m<}=&wpq$0jN(Lx<2z;=vOgdM|uq%dHy7=ndSIpnoLXQiN7{%U~gY22%MevTfQo+#|FWu#L;!@EYd&Dx{QH~BCQ$Xlr)#+o5kNW2}R#A+>;Y;UAuIZ@> zQ|xdWK2*qkCBq2AI+%9c;&1#U3pNIS$7mj#PwvGq^3W?nHTN<0cqCCVJ%~Q-n+)oV z+PPZur&XvHM)~yHe4|j!SJEC%Z^XM`Dw5_csdRRU7ol-!Tktcv3%On!40; z5S7gl69JXE?`o^GH=w=SMMwSuj1D5m#ZKdhIwtE;pW9NV81;!2F?X>e||RPYGD zK?aT289Ilc*&HMIMdfk*EOfY*Z^^@SUjySIQ1q~T>W6GZ*cWlI1{NBJ-kXDfB~@1C zU_5>r-c0=^{To5)4$9_n8=!bFHT@uqXr4;M9HX{#IXAD>7+Q1RxOKBVr}Ip_bE0#! z_>FYv)rK7U7#`6#qbzs^)K8U+IWl$VdPjwrW8y)PsG5xU>pYHYFOCZ6cpIOwh&1C6 zv~1Om$W+8-or#ZA@wcbz7@8>#syy_YqnEjJ1<3MTtG&+08ZSV8*&zK7^H>33ui$>OfBc$J!*)bjtJ96CJ~s zb0HFbkxV=;lxEGvLjn9G*q(g$weNdR`|Th2kl*hA#IJp_J$m%m;u&B3;DUV58tv|# zTONy$3lG*MK7Ua;wEn3vDbd@?`e3_Nm_xg#8*kTfxZ=GInntYD2dv1HAM^s%C#Ni5(r_0&vXh9nG;d_pY zMntB=XGE6v)&T?qXnWi&n35)|+K+vZOm)^_RX%@eD~!j1QJ0R?m%id-&Hq8OejBfA zcx=t{ht77pUBuW%3{EL1lSfFnQ$r{FCy`4?QR^nAp09{vl`7eU9ME7XAYzW3&It zg;5-IbJ}@&9WM~~OhT@Gnfg<|c#IpeXMB7Zw3#qyhZ$YRKx3ZMKKePtsYmpXeUq>`x0!`oHw-{ z2tL#C#B1XJ1blAmKm8B>)JNr)4{)YpW*U!NkyXAN<^HTRZ_-EOAwWhGpg7Pn!il3m z$jr}qI5lWkYp}S$kR3Tv!$dgG!3$0D7NP4SPU+6ui7;?E75_HH9ySKT)%VJDCu-_NZiYCs4Q!0{7C!KjA_I4;vASRAh7Vn2a+At4Ng#@ z3A+LHqm3+En=b}L0EB;~4^lzs!0}uz`E!*0!MFIW(uMpnKaS~;1FZ+-3E^W9bfJ@= zPU~umCzz)F5@kp3hk*m&Ci7zep{zgI6m*jxlP=q{1ApZq@I@B-Ajo)J59~l^`Oh?S zy+YP%<%k{f=k%NH--IXl3mcKKv~>)$vA*d-AL}hl?Ok-ax6Q&ue!?Z+i0p5~6uRt% zy|Ul_Dn2XNl)uE%e}!kQzlm-=-_?m_N%yRB3vW#RDwg%<{cl3i<-Yvx{npApU?;NxnH}19D zFTT{a4sWP#JTc16Eq=F;-^gU>@++o8t%$49TV|K`8`FWPHgy5Ih_-}i^wH-5{v zweypsc7E~LpA)>$Ni@xHd3B~?MJMMx!F+k?C->Dtp@w*~)df$_3DQ00bT-YgO7rs1 zye*`oq%JlyQA}&_3+J0Yc5WBR2?boqC)hWHt zFVEbL^jrH-nLKHXuW%a=3bm z{{^)l{m({vl9ixMl~3MSW`1$xzpO>b6G`&pW@?O0)T;|3v|+sk%tNvG$NGuZa>UOM zO4)}YH^$^icZE|2X_EE1Y}ZjddX=1Y%v*WB;jX=ZBUOs@-!X+qY54X!mZMGdQ6Cx5 z2S37^u&l>+etxM;DTXK4w)(z(#x52AmdhuHazwt$E4El$o1HL(w zzy@@OmGzX)_@>gF8~EW0?9sf34!&9Iy8MlsuE00@L|175jnh{eV|2jzpLuFty%{Buu2VSZE$RGdF{u4d?E4}3BIxdemjdq*XA*2Z{ue;UZpgFv;-)`Qx?N84# z4|4I|58m(zM$wC->lP0&bWW7fR?%K@30-@9sWNz@7xbWf2%jonk9-hhfwHsfF`)Zk zWLffDXFa-(wjua>4ClcuO7$^p7F_StmqXPyVQQ0sevkJhOiWI3Cl0adVWD{u*Qj zmi5*lc>o7u@)(Ze2Kstl^uxY3+Cbwk0Mv2r!74TJDF4&{%#%$z6ki6rv3)Y_{RY|& z(D1&-4+njF7W+xj26x#?Oi;>rT94-!OiLj~C~Q9VHJQ<1rZY z^TSW25B5jCqTKu5uN3puW}%11FUN7(k!8L8mEH3M;?-$7gyT2tsq^?gPQY{4!2`X1 z!fb3u+Mn%H>uHZIP7kr>&d7X7LUJ!rk_SVF-@(;-65ZmZ5N#^ATyMzWhMv|i2kLj{ zr|tAHzafnNYB$>u{@4uSVcc(`e;S`c@mb%SR{GAkYn*>>skX?Tfr4H3~U&T+mU z*4^f_=R0jz>&Ta0etWxp>$!G%ew7bn${rjqFn4(^F2qH9_)a>(n>eVLiLLaiIEwjc z`T}ay8~p}L@?vNumf#WVPu920zj4Do=~7-`I-c?-y25M@H{DbQDu3i%=huO>BWXa; zg+HpS%A+h`dXHOi9A%ncnNN~_6DW%HtTa&DK<{=@+Pvf^&Vl&aka9Y&8wY#-i5-3> zixtM{>2W(cI`UT@E_6Qsp7*?`egAL&f%d+y`x>uPkipjiY$tqmp3c|4sK?Fp9ljuj z3F@yrxF6RULI=kh?S#MjbyeABRl;sUigl3J?zTI87XAqP%qw{U_7#u(q!Qyu<+&bT zkdu!1gL9qpi?g=BcOad09-8aq8#fQzP30ZlyxIQQCqL1C;wL`ZKKr@Pv_qYj?nw{7 zn5Xf^ZK90_3*c92@6~6BP0NHSdCCa=$q0Q%xSrrTKAz$|5Pn%j)C1L(0^-B?o&AyV zp!c;IGS71j$o=vQnv3&?j*Qbuk4jfv(i|fPnxzht{_8OMB~W}Q^KQkbuG8&VK>AKT z4vfbjWXJ_qnFXu-18o4USztuh&oBaz$DYs%_>6dIj`cxqH z{k(AnbKe1UT-7bo|helf|_K5cYA%?J4793J{A^KNQSAy}lI(wDMHr;4NCWjW7) zu9grw);t@$%YL>xF1Q?<6Jwe6OWkJq9l<+z23^Trk|&$YU&P=t&I^n9*@%^XvRqFU zLaE*!bsc>;f47~zZOf+gUmu?%9oO!vtpeevDLrClvk=aE$Wv71WI%^n1+I`?Q+w& zLz*BKJv!;&1ZkHAgO1~;0O)X19Y*C0m=^`0+;Hx@`^|cm3>$g}ym=OEJPBwIsCrxKR2%&D#23^H+ zw51k-)YUN?>$pPj79ahuPA*N7PbeDoSoAt6lzezXmy1K?LtAB4UgAdiXS)Xh*jP^=fa|5x4Whz^o}i2 zwvBK@Fc3kvlsVRw&1z*2tU z8TIHFu;xJC)X$!Px`6h?{wFNkk9wvNJ^SKQ`*}1A84o=^ivJsLztJu(llD+7ozb;) z6X@SHMj16GGaoQ-D9!J9`ztN{2ChPo-|@b6=T5ux+*{i9t$S38I z^zzMFolrA1Y_}W!%pR6mXFpLZIld{7x~3U^_)RukYdwK~eG#B+P2gj=^^1-Juy>`b z;;PziEvB;7n2xe(ehZrFZ%^$5CBN`QJrb(p`ObDgzu+Ob|K(UYn;VyyT|j4_IEJBbQgzfe7jKJV$pNB&znA6 zq`H)!j(yAnL*A<&#}}V!Jyh$vuq8*z+^#Kn{+o*I8Z_h8 zd#tF;v}6B+aV9NOX=vsc=+QBgs2!iTj+1>>(&GaFfYNZQ>}Tgc^uOVTwAUl_8u{MU z!mj#Fed~i>O7OPVReu17o+DcE3!O8)fc=C%I7H=j0e?i)$Bt~1@`39JX^+!=T^wMJ z$R@PD)R#u)#aT^>4u8RF$2ah6UCw%#GGtOWZs-R?=t1#!xnkR&^f$tXv-(b#EsFar z*WaYahe9f*7G0DVeIP_dhUae2i<+01)BF4xv1NJmKOG<3yy4H#^5&f{tSb(E!FqN9 zK|Jd>z14o!Ip<%Xr2fS3@uDemuIzte6^cUXk$NiWnGZw403Q?k=GBcHV!>gOe2Ht6 zTL9}#I79FVQBj7B8fVFOALc;{cYy3Kd?5yp+FudFJPg2ZeV;sdU^;$M22fTT3g#(s z?^~tAq}wtL$RjO(qlfgfZkMc|g71tTKc_5nqmDzr29%P)dA;i6Tl=(7ykw=yX9+cZgjH@I>AgI#-d@2M(?3*-FGSt zRGoRR5MzMS)?c7K;BT6I5O{{r$9rT6)-BtE^ORnLx_m;G2aK_gG}luYALv)Lun>Bk zyk5i4gKqQnu?vm=I-g*@!dIrIc8gudQq5&t{HCG&wZxP38hpG$mt`3{W7CdKi8iK9 z`GhoITw!+BdF0M+yZiie?fI8plyCXzq%%Kg&3Ppcl)8@C_}o={f3jCGLAzQP^*`o4 zwi**+D@9UYrJdg&0Eql?Ju)mE1;$P2iD&HaY@rG~AcDMlyO9piIt-m>rZ@jL)M z>(sir9Bba8j%=Ut&GCZth-n=?0eXC2bzQFYJA2DW-sXvq+2@=R*UULaA8pPHJ<#=1 z?M54w-h3&{>+QWgzX{KEi<6_{c69Ps=Th9{KWoRwkNr7S3Wr{`kB-wblOw zpPHi37tgX#e*Ryv6*%dau)R*@-v?e1X~Kb}FLFYs4d%Fm4FB1zaGjzh4cmZ^{v#~| zjLm^)Eb}l2m)Nv#H`MeeWVk(ybYHdMR*+_p6#oS`L(X(IMo~qlrpI5qjW|TOc5wJK zs)qexwBdxCz3)2pJx#T)u$*JI z_-kAEm|s1uN2RrXO`$U}_4~IE`wqf==qf?CIrU~f=01Gmw>?op1z$Rd=}GHm-CnF7 z0<)%#?3!LIx_!sI`||0u{fRwVVJ7lw$Hzl#%$xsXN{i<+(xYLteeq=kGV1FJb9Q~g^r`Lnx2#jp<*AzqxX%~n6u z;|Mm0^;I~9i9d36WHN*f#al*)^$hy8-90bd+V0wzI(RYlB)TubkCJ++E#lOy^9_o( z&E>8iUjo+mpNR9H^ZdvzP}^$594~xXbU`=xuJ^WWmjVCM4}+R^DQPE!_=tOJ;l>-YdHnX%=zmxhVysP zeHwbAyu?RQPoZB7!v1S&9+FaHUZgKZT+Ktj3#)Rs%;Y2v7HoPtuum;>3dXxy@ymnU zP35mlYWUz3ns_^4O4V85{j0`*%faGPrX8(5ix)kNcv0)=cR_lNzaO=M*k1Q<5JFD6 zIiRnd!yq#nb*j9(-)!K3LU%wo?#D^|Rg(jv6WG}Khj;A+fjwL%8~qJorEr{8S{3e~ ze$PdRV3_OIA6^g>%Bf`}jbFZpPbY)2Sg^z?a=`@_{MgS#S)`# zUY>z$YQ1vg(P4n;PqXT}VuYl;ooBI8f>!vYR3D+id)5q*LjoXe;}gcs^bmaJ9sHhd zh3c;}_^-x+Qok8U^Q6s|_duz)Ze|Ft;jk#18+upp0PN5A(E6tm;(LLkQ+Jd;h^5bf zj;xWk4Wu$xiI_PW#g954ch-~r!>wf3zLrs7{b~rq8VcmR!Y?}MRu0Grr^+8byy2O9 zr@r^c>BV0laEG7G0Ly`Z(VMTkl-K(?Ty4LBV`q=yWa*|1#}Id#z;nloiMYlPGKu(a z#v7t^z$q4lFw`ns97YLat@?SfMoEjuzPNb%7+k~%@%7KX>Ywfl^X8D+OTp)x@)T+# zBbeP{lO3|WaS1%XFAK`^YfvtKrpfv!qEkxuE)&2+^GmyW6ZY&{Kc<|_b%_a4_^ey8 zF?S-=XvR}gJPwRXun{d;6ngO`^W${*%k<^jA##lq3it3^1DEj!Dcf3#%f8d%#{*TT zL_p}%HKhbqDRQVjy=R#TIZ~*@Q&DS6R}$6Yloo!r(4 z6RW!(6Y`eV(2N_87wANQgJ<1GA<;p(t+>sE>WBe4;{iI@E5@QAS|dsmu?F5hAc!=*~|AEsy2=@Rdb zm^ucp=Q{{oZ?1lfbVCkOE&|*j4)?KfQcmEDXPl$MdoxqLR`@xrGlf`kXD_pd#~gRD z<=8l&f9B+KGb4sSR{XSG zIqn4Y(eamnAWTg?L~G>x?)Q^dQW(C<#6jhzkWAQ|bx;BJPDsGfLB6m5*k~Af*;8w_ zZapTZ9)$m>8yg`Oeo8GbQsJ1bhSJSV7{ysdd0tQx*Rj}rW~SouA2+d0%NaS-&)$BGp8mcT3nXtU#fqdk_Lm7P`~)u28#xlEv5`d4!9_>?*HN^ zlqf_{aew^Ju9&SRc2}(nJZoQ5Hs>EF8spvYQ_0+%8c%az@n#A3jhwl91Y#S(@a?j3 zw}KG6$15Qx-1Da!k*=H&p3nF5*=LBD{qxR&x#kv^kJJL)AK^fL|W4*=22Y zR}+Hu9;cRes(HRez8g7b9sSHyRp9jtnRa0o>AxRs4@tow=f5F2)HFclqqT$6z!x1! zH3fzDatpDSZ}94z(DDfW^1CJf!XS=SY-3Q%1-)02{bI}E@$5H*E-5VFxJ1k&KKc|7 zL7onAmIlp;f{rP;-$o6I7>9gU=KWXICKFlvFI!8@@ontvG^F(MU861b=l$BP z7B@dU?K^E7l#lLLcyURzaW03C{_%D6)DC6AtuxSVl)Dk{*J&7-MjmmWh>`W%I1fVx z!6y{vV*va+G0&*;Yw`AkSzmlYWC5uN&k*hOMIY^m;F(2uCHZ!8+R+?2d26A~b~^gM zN!NzI=?B9BdzYT7B$W=QY>jo?d#j#Q&>^+4ia>ZOKPo+B*`RBQGFQlCj3% zl8{Z^TdEF&QFp|pncUk_Ml_`OVQ=vEvu;oSp5>GpVc_3#+vzQc85=7bmJIi>58)?P z^I)wR+HUB>`{LEiGM+$GI%+_DVv8bwSf}nhfND|I!p;81trI%2qMOyBP*h~Yc6_z& zI2d)wp#7P+>Y+ceX5<>R-eZi+E0VLt>5`Kro_?LZ-cK~iw0GQ3d_LeaVqQHTMFU`y zE_HCgLBTT)vsDihLzX_-cRsjrd0-5b8il{246-aOr&UfIr`C0B#T_N132&eV&jc0h8gjx z-jR=pgPiN*_^-}MN3tC{v~En62F&V(tT&uICtk_03o4Huyu@ku5KeDq8gd~@i|lGv zLlA~(Yt^*W>|$W{k_rGb&-10B~ur);0_nI!1@Op;1s=%KO)?73N~Nn2+)K9S=z<=OJ|h?C}${3lL? zo6#+|+Haly1iJFRjAf$?t1PkebMgX1y<40U`R&puVv`fup8E#|c7HiqX!44jSDeFv z%%B+dYu+lZJ@dU1Z?7lVV&Bw74P0pwr*YVz&lvU+Z?86pLp>?@l4CA*>ke%0;wN>WH_6(2LhMWv~>}-T|@BGnWoFspCcw$l!b z>CRIQyYPc2%DYx!7j&C`?gS18XviBI+i`-OFwu!bAYqeKPZ)ub!JX)RGzAvz%ABwu z;3;5M5y_&7P#tsIxe5dEhZqg)(dQ=m`p_}#i_8A{pQ+hR#~gI0{qJFqV|WmZy9dFH zzhKe8hP|0EYir>>_H_s4!SA++Rs(DDEpbcRE6SwvT#*LOpRzV1<-9X> z9xif0pV`_&F*BuWQ>6Uv0#la0vjY<5nu32~D_aKqjg`@{uA-MWo+th539e;xT3%@! z9j#rb@+!;Uc*PI)JS9Lidhi5r9t#@)tD0Dpk#9V4O8{_w^(euTrYK80%x%&vo~%u9 zWo#P9D4@0r-eAVw8kv5R?GNQy7Q&1~c)~LvE70nLX8|XPAEl-;e>A&h4QtTi<*WLa znQj%yFA-tGVqn+`N2SrXfqqzikPFkB^WRV$h2?OZeTf~dF}i`T_q_s}6ScQr$K==f zB87i@AASvN>NlgUY z06ni9@8Dzve~q}V*-hqpzCe8&J_TvlFjhYu!)Y5d{&+B4xbI8ct9x8)>k+Zs9mWu0 zxQA^j;Jv=BIN42(7^(dC(WRZf57co%#!&E+lhpP`JC%Fv2Z5A`Q2d<8ucS?=yrjt-4na%3Akx=&#AU)WQfkpYOf% zJj#N)%7sEF{OBSC#pH=C8o{z$zFi?*5pA*@vk|)EXK&eToh$bRrrgBgmH8;)>#frU z4$+^?VlVS!FWz!R3;5t?mOFowKix5_SJ1JH2g{75x1S%~fVs03CTH+{NPV>t!2S=A zDiAS4#J#LVd9d1ngm*^(C2lC~Bvj|3Bgao$fTMAA=^`h_S~>TFTC=0c0ckbJusq!p z>9@~jvFY=hxW8sBpqwol2*DbiZSm;{5|#=fYo!>I#VPOlltIJ;;nS+zjk3Rugyf7RRJ)0bKj#Mt($JE>yvMopcTKu8Q*na z?2xl-@i&8T@{g^d6t3sqMz4Rxl%<;rZe=9fPf;n{V#I6NeD$9=F=NS9gBq%4(Ch*= zOa+TG%#+!68{=`8c8Ze!8*gb9#Hcb8iyA+n=Jr9*8%HEcxvk-qaz2X`bhy0y z>%ZA{=rdL7^%v3DACC~0B`7w}p>W6~Frjg+M)f|CPLoo~!)}#K&5_e`6o_F|LF0{6S!p&i7#_A}^nqBq2A(?`PVr+?9voSvg8~Tb8pP z6g`|sNn3j5GT%l4^?NU(?fUE@E-wgpPLYgD>g9i%&6-o9I|pmN7fER^|RBP(EDmg z#mx@a^~vQF9h9;5O}wzpFxA%u*q46dZj~;zVKJ@y#3#9FYu>oz z*|WpdoLk=OE6g^kPvOXi%|~9d^?l*IG7dGIlE||x-!7JT6K=oXuPNvn;@X&5fa02gjUjH z_@*iEvPb7YW{9*w$^s|K*W?In_kp#Gg3p<6&crY~y&gS5ya-;WtR zVa?Ze^%2K+z}MJ4NuI#w?vBewHBDD=8v-4Ix^orORzcJ8rquziY!maK(LRaCBKLT# z7bFZGx_{mKQ7Q1x2OOozBH?eIr|_!pKfn9|AZN5FM*WD#2YKau>9hj?b6%XJfRDMn zAS2?l)yOJ~Dv#h%%};2`f8g*Bs!2qyR7zWn*^fBoE7)ERfFW_kv&dFBUi9H&PPu=` znDO5S_Nye1rOvm_i2UtAPQy!nbAv|o29)O7hnK{K#&*X&Kz)-Dew)d1-A8Ku+t9SYJ5!E;TPsRh|<^y+|`}hIj4Gkk#{|#GtPJ*cogDXCeVzhK zP}@6llb4*b-Ym(V^9~}k-F=FnL})v9|K}1Tt&4(0aV3q#USG9$k^U>Zew2slg{|q< zyW}6=rG{vxgNY5H+{Th&5vpI%xivP}H_N(Z6YpTR6q@g*yuc>&BPLVaHOLoBst74G zC{}a4^Qc2z3kfyCXp6_R)7M56qFMVa5)p?#u1EVaQqX&F5SAYJFItT{Y(FHWd}zPQ z?3-z)oZKmJZ0eJaojU~b-x%DsJSTH+tU~DR(D(Oi#Qc}iT~mVmb+?cFcNaR!2!k3@ zfsaP)bOLVrpVu7ye4#SYy5U08jfK_o0T1F58%~QWM^zM=)t#wgqt!JMhRAfNt?!qzz(0W zmMPsB=A=85;gyP!1?cWdd~b z-;xH?-U&z?Ms;I+V-}@3`gEMI{?z^_+nr(jIWvaZ@f*}{n06~?IohM;6$0&lz3YPUIe<6`b!Doofb)@qOMu7r+tOQE*<)&H_f$hG}GY~fEf ztBy^~HoFJLPPvIg{)q-drbA&vn(})QZiQHfDxa2wrIAZ+zZr2pYvZ4ur+IuPOP^%) zY&Cy#Tzx^c)*SXgc3U9Z?}7Hx##!WeX+4QVkxQOsN$Io9T+&nb3QMQTblOB=d7Aqq z^{bNWUtO2Q5lvN{lh$<3wkyv7^>DB8rumOA%ld3Nali0f511T>=w_JEJOqf6-SKFMC zE}=y%;gyCk*tC!709BCrH;lF2P{8lcW7UmiPivFgsXC|*sR*m)>=T^{MGenzR~ALf zOy58We8M+_Is=3@E2%XuR9by^itpYmFvX#Lz$kV{Q{BcS@@D**(By_OFt@vx*A zG)WjZ?(a?%yJvV(Vxwd|qcx2W-K1TJyq@z)tcss&s@-uyJC3vL#;7rk?ERhM;wCrEqJ^~u1?`(7wL)n%lc_(oVLxZvV`3v|^&dIerc_~(D& z0w@fa{%5>VMJ=*>33CF9drzW&MIYn&{2qURDlI048V6Hz5pvW9!N??$Bb~rozd@S) z2-VGII<+EVB(tcM(k>0Xg<`?zNP-jyY1hCSJ*t!@$Z_JD)S9EbV66(8GE#u=t5=SH z?ZkQXW_a#e=d-GdSC<&W{&4JU=U`R*!K+R7V)z3EYSPBsTuk8ZC#Q4ya&%EBDgpLq z!S||oXh0X!7!t$y# z6wcM4YJNHD*nU6F?-%PmL0)mCB=BR`h9pgh%ykLadLBFRpy99eM>z%fKD!d%T zL)r1qc>Ewin?j?5mS(4XmS{4!PGNqtgs=SdS{`sNT(@08t4|vpO0CR(@z~TTSbk*d z(>WlhfYXYUXUjQ|L2!EiQ8s;vz=9&`GY@uOWxmspN3YML=^#dJ^(f`OS@)Q~yK(f7 zP2SqKk3`#BXW~75ITp$X$Ngo;1ePdzUeBD?Y#FxSsiGwK(%CuUq_b zPZv`t!J$0etAGoiI7v^n()bX))e?gfV(cOOJkr$XSK+Z5h@f*S|MdgE4yI^E3sW`& z*QUVxyc307_FvMaKZoW#_!T505F<|g0k|oO^wtYYix1tU@@_$iA&rLs;FMMW2OLpx zm$z$|_h%b7Ft;lCg0_us$|g#q|BrneK14))F)&&dy|&6zqptw<%zRp<)_CKxEAy@c z`J8r>Rfe5&$mr=l1sfWd<^Z_1p7Pb~;R|c(5k$jRtOY0W#`6(`Z9|nR^oK{y6g%a3 z->%Ja^#Y1&FCQemxrgB_zEmX#A+b_*=o7W2aJX9m^M|{9USEKu?Q+G`tN=10=h&Cs zukT^0(Zz4SiCEE6Gjst{)NA{M4*}rBBV6Xuzuk>tfI*^~n4`t&iXf%U>9PsFN6_Hg z2XL9e4&d8&iG2H6mbtusmIdygRdl>plHK!|g|{I7mji``6CtyVCj#}ynOU8j={K=W znuj|xL!A~jGuP&?t^O3;knIb^i8v|dC-m6%7Y92S|XS_FIoN4dG z3tG%O>=2GlST4L_il-mJg^zZ$#g0HwF=q+>H_=8FVoJ21$KUa6T#2+dtURF+FT7vtxj?od)}AdjpOP*) zScm}Kgq0;RLue`|+i1D-o{KtO7S8Y0afoIwF;gEuje%DjxrXDo=?s4kAELyMXO`Z_ zZc@l5f(R>Kx>fg65cqQGHepiI&;Qm8l4B~jN$LhFCU3?0Su=kI^zX=9 z4p*}sNlBY;3Yr~AM)tb?AYXbC>i{5jtYWDK~h9;8k-?ZIao4VR0S9c-S50RV|F|PC3e;HGN^mH)M)ije#N-fznYR^3>*+*MBB#%NgXi^w~5SI|n1Njh=2{*My1|?xzv}G!Xdgl)~)MWJ!Rwlpk}myT*?> z;SOj(#Z`@)mFrV)CwfCa<#?JZj-$xt{x6x6@+TAF!JSqa#BN@Uzq!W@Iuy=dT2+*C zEFR|J0Md%40A}ot#nbb_tdu&H!i}k@rhU)i?O@~w%c++!nm#U~?OAJD*H70xc7iKf zn>HRSgB8~Rs_8|J@j1cuDxkiF} z04npj#M!>78z_7+#vKA(T=0AwT*z|QGAIE?>l0FK3NJui{WH@2EU!nu=U+`dl~Xd= zeKb~A+)eSFe?fF$>k0@xmfea3cb{{aFSoYaI(Pg>m)NS0>3PM74Ww=XCQE#LdYx^@ z0#+yOpgmkIJa0Nt1rQ?z#nr=PRqTJ&(?6ZwJPMo)}KJJ+a_Bkm|4ZR=EW1v`Kch(3a8FmdR0 zMM*!3@elbWUa$RFx2INyno-gDAR-6fur=Jt3)@*Us|-nOH1pw zTYA{na-5~C7Kt$@v3JC}KbFez_Nn83NLdR^jVBxu+r=LkI}DNty#;4{Z|0p)%HN$Nw}8)90L? zhdg|j^Sb|1SUJE3qqz(~RbGIX{*_yC8;?90Wc z)OyP$rW=E&U#g517cu(k&=%uL{Pgx^)NUZ5bn2?;dk}T_$lruOs+b6= z;BKYm!glewUi24O+ruHRd3~48x1$P&Y>g~0c!p)TI&R|igi^`?cR1_6ae`q&;(-p1 zpKi+|C~W-cS(wN$<)eQF;Xjx9Wu0yTopDHp>AM0a2yHLkgvJu& zS>BNBnJ?kn^^!8IQvvV~I~FN(Ibb|Bbo-PALtrO}WCdh3OEKqGgbN+LmvL^w=q_bhL6=cwY2y&KT_SU4 zT7lmhb$;oD9^bbsQWvd=>ao|p+;cu;XFu4v0`b2!( zV}>T6aA%t%|CL9I$BDxdiu1L1%+pAO8H0DL+L?2?stPcBW`pDXJG{zCzaLr78)&^T zmK{f0)4;7;P8z{mr%KjkpIL)Mo3mU>P>Vn}_AZaLh%QG9(H_24G2_J?@>*CGOTI>* zxi4Poh?gI%s&2++vJ9!S0FLS|_$w$0eo)a!Fhd*?X6@)%xCCC!|G=BFb&;XFV<#GF zQZM|PxgHI5sC3WcavO1DAE~wA(Iy$FE#~mAMnp=jya30xC-#Z)v%L{}yUC$z0B?TF zAfzyb6Km;R>ZiAZ)xNo+mTU=22Qm3ZDF#bc4g{Ggln2D~yNUuK*H+yYa_DD{+UD6^ ziD(+^{?(8@w9ldU$f9#&UExnhGQTkE#eM2^pGopl9|^~~n!37+IWd^XjS*>76opFO za3@jHNEn7RtD6$YI>1Yy_hVRtl;9m!u=~!^`v%rrB2GAE^5KvK`GKEH^!;?CyMwjU z>Xv|)3|knW>SFzoZH?~f7l>2e7|Q93Z(kC*JQZ-!>d+lh=4wEL3>5GgZ+I`d*%R41 zHi(kWeoy>iqOe*W(bOVoR6TUW`7FbgpY0KEk-q|E3YZjnk8VVpk~c)upwUY@^kNDY_B#`ufc%zFIis`q~}W| zhtSbL;y9tjkss%H1d0p^3?poAIw2N3G2*QGL>P>T$1)-aK-6*tVTZMt*f~NRU~C-~ z@GP%tc<|wO3a>`8U=!57xPi#3P=fxr+)Od`?jz|g8-zF|2oQuZHe3fFl+o4zR_ z^*C8y85!3ggqnSfL0zXOH^m6sE{6Q&+Br?n!Oq`&;Ss*87+iyP5psoO5po6!E(%S%c~RN0z&gIQY7e-A?-oPQz^i7&&jKPm=EJ z%23Wl6@yGc0efFob`L^dEG6IxTJQb?JrPh@cqqFTc9%odkEz5n*4Om_s-}%JyJ{}F zq8^Oc;8Z|_VpbQD;oxV#DwgWca5}XL^;D7!kbx}&qU@zl$d66AepGm0kWn@jOwTp| zu3N9Lv+Z4hy)tu;9rj=SoO1Q3v!Dd4wyiyA{=|JvqZ}I?hb*30ux-C54dO|{L0n@M zmcAVaF=l?g%i7bq_4o|W3RruEt1UWw*Y`x|z{mF~z(rn}@2KzK+a(7^%m)^BT6w+&1c|!-(rh#nI>B&$d>( zPYe7*T;m;$tXjro`bVz%d}plxyvYzThCKi7TO$5|?-g4$oh_omB;QEoC3>IsYRqi< zqw2M9w!crTXL%kkey)ENt^7FxVgGVEgVc+9e8$rHpHm}Mi?unBV68;L_NwGTwwxvu zfQq2r;*BPhj**t@lJRtIjS9S(X>wB&B{A_Uh6HTTrq&E?lhtoq(C;VknGhe6a!Fle zwvUQIXnF~q->w_q$bN2)!f#P7#Jj}(mgM<@0NN{i=#4BZn+97=r;Bw@Zfo`9lUZzu z3*I68rE9~;5yS(gyb5nDIo)`?>Z>Zc^)0P2dQ{YI?VVjTl6(TPogPadn{_r%+vLfpQY|z z5^}XAtQ;Q$!U+7wO)y0ieTPys2iwhN8@rE;Pjh4k0*PY-7VJsSP~=*Pwz@xF!px%QwIcTin1VaIc?aO~7~aG5O=v= zSr;q6JT1x$0qFQ|RAiSRY#MUV-e+P*o?g{Or+TB}-4HZUr}CPH)`2Le#ct8GJS;dH#aYI1dm$Wv}U) z#$XF`F4^$sSTuQ_D-?GrESugoqtK1oPdmDALER%xp*aGV`_aMgQi?-vB#rBxfbVd! z@j7n^g+KH4e@@)^b&oOJGUPDua>Vh}4Rf=aj``S=hoJZG#IJ#ZkM@3p zR+H4UZJHcIUzI?Pfi%MbO(En?n-8e@iTrzsB=_-i;G=Uax4AlRi`%%LEk88PLMOg$ zTU@$=j6BKP7)|H-`+d9u5gba(10@Zk8gB8drD7pW#&X7M^@kFb9vh-Go zFzZVUm3rSqYL#8r$AR}2f^VCTD6*4db>!Ky#nhHf);c&(%CwzuYFyE~3jC8n(mR*l z3SDK#9h~7Pciby5v%9XYK3>AvN_cM{_bM$k(Nm`Q*zoa(**cDS_T71olH@agkMyn| zwuQy0|BzzuFcAA%M#}t@m&=noGjspmJB8^R=NOf8pY#5>mUozS#c+(}th15FKXe*Y z_YfTsLygQ{tGh4kmSNsOiJQqdRe9SwM&|LvK3mh0cs6fQzB*w)9$T z&sV{GH%i9O?y+pnr1nM6ot}xyR-bdh z_TLLt0O3SS^>7TX&K+&}ZhKYZKbL{1M=-=yYZ1B1749U-pXo z3GM#XVhH*pq84>dwC8mb>snActG#169KWmu&Iq>ln>u>>`nOUF*RsVVhg}@9hat#a9naY;I$@qEd%9%Nw-^G| zzPQtLG)I}L--%Q}lN_65$O#H)X9TGiF7Zz!O7dgwPx`UCN8CG&xl_}ldMn7J#l1gD z3jLMe8^0D0Q=cBwlU?%)2CS?-<27g%-+$~>rBgzbf|%4IyxgVk-oC@Fe}aGT*mo6O z&H>qfSx-s5r|rkqxZZ$Ed&BrtGue@*!D4wq&d}YL(67C%iKN?_L--PXc;-nf!IL25 z$CQI2*G}+cR@nzYf}h;AtQ52YT>USd;{qY|f)08Yd=?fn?Q~hSDcnF}SNFgD!UzT> z!W(FN9;@9`M`3IZ14pK~VS~*g@7D{sV}3`vQZkm)A3FVGBQl=6@ZJY{J&&iniM$)b z+YGoE9_);a>=&^Rdx$uXui;;>{MPqA1Jhh_qYkgwuLCE|vA=mvoN$4-oE6#spg>OY z2-v%BaB?D&_ExmC82vbwH`jan06jg6`!G515WQe+@r}VJe6!1Rk(h-bycJ;+RJ6Bp z)i@&g($$)oY$#O0?Sn*1H)9R|k5iEX!W&%>N=w^JYN_TuCz?e^1mn3tmF9Daf%bTJ zecllvBA2F@27`Ag`XVU#2{me_4S4>uird+bXA;>H%gnrFxM2%JaAELZ1;I&^CmSOX z1D>OOTImXG>T>d52e93+k=dLPAB9<+onY}UADO~lWW<-}#mvP1?2D{7EWg2wER^36 zV7-f3FK?#lJzEZy5F_ve@O~7Zm^hGaXO0G~2-k719y_a#+C`eC3cWDwUtG{B!%fq)pU01mO zhw`V`#Bs_}(1HF5CzACo^fhmUd^~FrH=RlGh9@%)qn&SG5Ul(n+W0EW8uL!eZo!Sc z#@)<(t48n}qhhBGOh+1N$`_!z;#q-@T4B9@Z!&gnqLgb7Yzv<@4R=2{5UW;iuN2AA zSJoHRsO^l5kDo|X)$sTllK)yek@Ojwrj`@BSS9KKCdg*x z@M$YK;o? z%0sC#!dl09R2Lf8zk2Ne7Ic9Rq`{v>kiP)yLcH9cx1Ih=R(okm&T+G~-LS|Az@L$a zEtF6y_b~pCRnij@3Jk*dJG6JdNhqm&Q}S3i^LqFWwPXbiLPHvL49FdBpGh+dI&5%6UJ1 z;=V@4^CAKB;!?g7(i?ZDZqL^IqX@Lx?4#mqUB_9Oj{c7RiB;^hF?7{$*F!hnXuC`D zMcSw~C;VR0m{^ zk1qc{66n7^Bq=IhBi~2q;bf~62mM>b8wF{V@L)tH?*yWjdKKC~7y6d6BH2FC$BP_u zO07`S#S4&@q}QW&@S?DzHC6y)0#5HHjVho=_q)*t=ZgzIjmwrmQ|W*`F^SC=4!3!B z*PeCVQSdVd#$}1Z!q1DpTV>8^lmvKGZ>!Fn-Un_HWdhnhh+;NZy2IKoJ9c+;W15^E z*KS0b)p#})H>n*dgE)4RlupR~D?GlvzaiS1^70E9XX~(R+$jM+s&7Nf7FHEM-s_MP zGHO6Fp1xT_2haMSZ!|+r@m;ENvTLRy!9*z@1LG!A zYCshGA$*RFjjjq2{R%lmf0^A=oA~rRMiu+pyO9Z~DaqwmnYgny?8tuY^2UFlx0Z)C zogw}w9U80{DCG}WMZ1hMTyUKTZMhHY2M8`W=2Hld+Efa&&3ei%<|rrFij|qde6)Bt z)OmAJ4?8@mKKQ5Q6yJAxOd!rNh;*N7otV6(IPavxrJ9zayH{B!D){G?u^*cFzCeQBk|W5) zXO2VxQ$9Eu(gq}E?vL<$rEZtnQTD0M|Af(eJd0Mzz0w#>3Y@izWmSEk-&gpfJ#_YL z+#d(qPhXI2SrxB^TKgAPG{Z43_+K&ON*41na(;I+Lnf^5_lolC@mMEnR!v z=H}O(&<_OKVT^!c_XKuhk;|hcyq^%G11PdPq%OT;t&f1McSDV$>?em?pn`m#KNq)v zD>Llkr7-MO!kE7qw*z1Q)THaG{HRE!MLnXIo|k}FAAL-XF0%V$EQD}UUsEv?vFRuq zHmJKHzZ%=&IJHs*fr8#;G}KO|*8&6iR{AD?wgWQ<{_H`KS}l9)0Kc`b~Y==K-Sl(hXt z%QJMhy6NdMEhNluJ8LzYY2f<93GAKz&VcB(7y5QVY=&&;x^mG?dCuX*k3OmMvkFGr zQD%5QPY8lQu$>`BNw*3xmw&01-IURu2l97c(+-3zuKBTPr&TSRk&T}1n_yH8e@{ny zpLxYU$c&rR|4_(lJA8GJEz51UknQS-jhX-3IBQ`oV0!C$jVjw{ETct|*? z3NIKO+_YMUXC1n-O@S`I;`9S<${+rO_XXDvs>><-kD{}VYU+REI3b`S%)%)({Gy^F zOb{8l5h^MY79ceg5Tr&bv5gW?Vu(m6F%Sh&Lb^AQ7#$L$WWea$V8EImzklyN=brn= z=iKw$=XpP`m#c`UIWy_?Qw4m7)W40eW9H-E3Rg~dON2l6pq}iL;^e`{r>@GQDfqZI zZbfAa6_>HMH`Z&b0BhvG_|V#2fUB}cbjhQfOmq9eH@3_+h-*}iR}qptvHppZR)W*} zs-?N`f~60?JnO`NopT)bvB|_*QXgKKuuTL$6aLf3F<-Ry|B8vZ-tRM4wXMFT^Jkdx z9bui=SO1*Hx_7w_NlxKh2M&T29|`p=Gd?p1^4$Zdvkz!LVVdT3{_ylyhaVzN^#T6` zQQ7m=^|QlY{%GRG^PKwH#u%;Droms${|s^rN`GS!-F#ftWLmXa&_k3AcQy?-+V#7s z1fS6tE`|%qMdroAi@ynRlD$a4g*oA1{rI%}*$B*>`ELE+JHDfR>JX|r{zsZ9q2-oq ziQzzqUhL`f+vhvm+oXf-YuMcf8-5o`+YRXf>9Qk9PW6|{)^AA8L7E57`6(C%%uQTr zSC-QF_-2OpTf9=PE89$OJg+e<@uzs6C7P^T%;X9l(59efr0FmIk)tKL4#lR1sAGkR zxyyocJ2iGp{icve{(Z~eSK=EzW7 zh+hx;6?*i$YWCQZInyEwMr5tHbyNp^lrGh^v?4_O{fq^ys?%NXR(~lutmb%^fT!pQ zdF&>l6y+CLkOu*En|f6urr z4S{YUe**D*FrES=x+W<0Uu|Qj?LE2FuWB~y1{h3=cd@3VsV*({UVM_&_s)or!Fjp! ze0=GR_8Fm%25Dfv3CX7-qw7#3?9uL+(-;DlAygjEGpe_iXE$Yw{oj0?Oi_hk{XoF!LSAZk0%eoFRKnmaY8C>sH~LGw(!8 z6x0N6Z+hL z=k-nl|`?_ zS6DH3wBfWJICebKrUnHiD>!S0gZ@G4Czdq{k3Cp9d5aqCr#Q=Hgw8y`{k`U@h<^&+ z3BA7Xj1|Rhb835vDMLHBi&kIFg9^>wh&wk;$yFko=G`g$Zd^VvJ+mxlk{OHOR34E2 zmf*}3LRv9SBNG~&-OyV@WYl*gNjCRx$2{JydcUtOI2y*JpVE166ZN*CTSxF%mb4nrC5q zPa2L$lgD!(ea+hAm+wx)2>jx9FE<#I*1;8kllvnGfdva9k$wcWcWxb$j{k3JPyILH zHps;+LpaB+9Hb7953|LVZ=5=lo$+7ndT&W9!U7m9WJliW#!}qDYb%G#Gn=r0L!L0B zAMMTzl(B69TJi6+!u_+VBr(f-9`&VFsW+nXNE_ilx@>)2M<)du+P-l4o=jBH@K59Y z9(X~MkamzmX@n3X?}3}wXd%Q6K6uweIBl3mYkG{;mz-gp4E891S@m85yJDCA0z5CI z{8&Yb%GB-H?wC4?is4&qk;D5QWq1y;es>!YF?#y`o#3ouo0~=lKhryMbG@Z|tm%v3 z|56xIQn4p`4dKof_LG&BctrTHv5x=2bha5GFN*bzt66Pc;B{QH5-HcZXesOlmnmue za@g0{<_!Ormxg?c4NE;QZsc!Mu9JU#ZZu&RNxPb#xcox0D*b7ryMO0VoRMJJ5EJxZCG zdQqk5_>q)TUuDZFZ%<~5^dz<(@sMqr>^+pzcHq5fQFD@X2?ex^Z_By%e-aX(-rzl> zwTkmQN*plgXo&_(JRn-VZyA9_H2yRS5f2^xB=?`DdW_RKow^-?+x=?vGgHt-v4?9f zmMK2lAvn9IQN|TKsG+uApzDPYlV)agx0~b}_wP787a80VAuLhYu%|eHS-pF;86Ch+ zA&O)`x@mSIE%}P9m9Wfh-NuL#9lw*a7i<0%7pN$$kdF?%hb{Az`==yuRzpkc?nqG& zqg2H%HvV4ZM>=Tkp$6FwaPryRovXDd=I#At{3nWSGjt?wBlD~B$bVyA0Lq0&qjck;$sh{hT;mo$erPM3GK1gjDu6-*Jr1tT{% zIjEm_$wj9=LQEbQneeVAHBJ0oSJnPSgV`XYO0^>HQTVTr>2Hz~z%LTNM<3hWu;~8- z3eqxj@hFjh4a{x%bzc8l!l9=4_`%l_$Llf#X5LC&iL|}25N4%x333%-ea#By{QPi% zFq-_-ScRrH0=)Z{aDvC3l-y6eHF2H8w^j6C!(G5+c|(t`S=sx*G?O$T&e!Zkf95?! zohvEeWv97ETSKSK~QO896n0yeanF>GslnVF0MmQ9?ZESguIMbEbJny=XMk8nY#=Leph7Cb?xpY|~k3 zN_jvaSG8xkY>+xPb^~Ur!&qax)NgOF$kd=d!g7ocz9K~UG1wT3DI4@*EC!~p999I5 zQvQ|Y^%3R+m`7E$1O7wjnfUgOr>Wfq6`t~c7wW^Z5|)n-n$N4imWs|n^mwJOSKAdT zX!e8ZflgZOjS~XWF6lF3rGza_*opr{PD{WgwUXUx{-}M7x;)Y&ZKZB0)bUuqZ6>)G zol}W+ms35FHlpxP^Ysg?&r!+cHC4TD8ZT0YT8y`h4Mr@bVB3ZDr$Wf@&X{zWMFbS; z1Pzy|&eyL(c(RE(5vIs1CR}ALgJGwBtbddc_|ciOaG!fihQVVqTiwl_n!^E7@fq_s zRtnrgi4Q%ZRGALf2wA)C&YVlfTD)x?tl2V)HvhJaK)1YOP^L5yY~`SYWlUiybM;}| z5uSn*B4|a?TL^~*!7nj-1v@1i`2daGQIgU#Vo@x$=nf954NSvRz9{tXS~*=DpqW>k z2g1FmthlNvNjX}AxO`DPLIhoK)$!W-t-v*h_Gpo?zdqrU*SBwje7&Fp{DzO)fQ8gX zYJ_9)-Z%4kHKpqokr?w*eS2jlKj*mM@yx$Q+~4kMzkS&xp;M*c(8mnYleaVJ7-Q~v_|=W?IU-i9{D_@EQNTu-?F*=kso zz#1A5gw`33HpiE=>4&%xKYLZU_@F7rK7GrpoVt5gJG+$qG0KA;=G`~Aq+gCyL?!ho zo`cJ2Jb6}Q_zXmYI3a^Q&A97~B&u-zl3J7cG1`fO(R0@y>kbsszP4ssziow4-odI! zMdeS%OtqS>NRNsw$@b7QVrFI}8X8mJ-FUXhk8W{w*D%WHxcA@H{C+;vv1-K8{_ppm z<{$M~8Z>)7#BcM^U}yA$gI5tvz?0WmaBqe^xR# z3-eRo*zAMf>FIcp4)6;Qd-lrci>phw{tndInGF8Gc*z``qKG4ZjZv`cI#%#oakDLP z$evm?;=^|BHXu)MX#5GL8jyD0#QnnSTYbjo;|a;FM?hIpCDf3jVsi%5fECZxsp}kp zt;rIgckwN;qj|L{Yx{4((9r~Qwyq@oq8hdY{ zcBI;Pm3>Lbk!V;@`=#&J%J1*>txuZnSbaGTb|`{;Z64^Ls+mo6P~~yDN6@gL<%WDu z1$gzk6KEK)4%@8*&O6QBFEL6P#WE{>$b%oeX@X)s5|S z!RnI38lyp~t+AZ2nWJOrchLitQ`9%DIn1MMbG$m1Wj$=0$oFE1%AesnBqdZ7$y3_v zDijhpwL+if>a!w*aAVs@S`OmbZo+{n8pV7}HrVe1f#A%F;!y}&g$IydW}8sC;PnU5 zcf_+0be)JxaWyEXb^!Sjfx9*9gk5B;{Re3MZ~|LVYV#1ovElV|;N3-v3mOhy+dVY` z67;xWh7oEnwrTk4G(5I>w&q~Z0z0vGU$dMdJ2mY1ll1S;Ul~yQLZqMMb97yjLquJd8?wA-}4lW3JO!KxXt32}+v|0`jPgH|Q_T!g2}_q>fZPmA1d zfaa^+L=gZkD|SOA#`~fLfao>4$-DJ~LQh0uyj{SI!=LZ`uW<3dy_bq@LcNdK=b2B< z^d0_A)w<#MExD>(CmTh*L3g6=gzBz>##TgXw0;-DS^H&?*l6TRpH95&fJ zSKU*T8Jx=n_vM&B^_PnN7v#VDi{-k;M^JXvI`6^b@&$y|)@(t}^1i}sSfL{jgRj}n zLS_@b+=BrurH@OQaN&MSdMFmtd#!yq!9OEA-8(8R~P|NAf%!J8j6+tIyH${*Uc3+P z2-Vpl*nu{wb;6W=Xuo+2Aa%S-@}jc$$%*opE8vF4?WBSc*XBB@jsCJ7%e4t@RTT~t zi>jaC`I5BRX^(|BLv3~EoxqLUIwVh{ga*7{z@st`yWaXXA}}0DgimFt-3^%%|U%$y37PSG@1UMP7|XGNmbJiSByiSGy|f=&Jf=g3#9Um{)+ z8VcVL3SjncrUuPfsm3A8;=Lp_77A9w$<*l?S8_!aR0AX3a4GSo@evZ~cZ9jS3a@L~ zZqMty#+V!4$1>Whw6Vvzm2qm2l-*4Y+xhgG>wJ=;z8acm`>?OV(S9ok1(1!)jr|I^ zRPZvG%Q&FLAz!i=NV_)VC~j-8Up*s^q*jz!ubGKIdk{s780xm>ev~lXg2QixfDIba z4zUR&WLehA>*sjnsWs{OvDxn;fxKF%QTA@hcX-de(B1i{t~ZBU0K#TPm3M1WcLCv6 z6>pG18#s}7)Xv~KyYD`%8pxrWn*mb%?ThOy4Z-9?HwsYX@abGkLU7%FW%V1;sdCaz zG#tnKmoe`1iuv5tAY;PO&D-dJFs$K>JF+62He75cER28uP9*2(2Ah_|J+ zBKn7ds~ItpzkXHRJ7R!QNW!%3=|u=8$*bl1fJ9(2X^A8GMBIG5bhCzZi%9`<_NM+x z@}ss(w+o5y*$b-5H!)cF{e3Tz+JEoy>m|tEYTnH>MnL;4Z8jY)Nv)0i{%`Q0gkvn< zSQG_P6~SP_*z&yp?%J~d?rcO`#2}SU9n=cRDhb6hc>tVVV{lks-}TGr(M5>Val_UP z>#$x$hr~7VJi$f$Sg=9V9!FSMIU2u!ol|2sTS3LyMioz2Cw+AChbeg0vxaIWGAh&y7$th6oswJ7vL z_o2Z`A;j*P@Q{cV_9zS}^TaMw9wpNL(96j~Wn)_%YTBcCs2$mLZ|y}vmF-q|Hd1Bc zA#dFz_E!=6_&ZFa$W$15yO9>!__|B5D?y8R{-pYo{Dy%EZB6u{7NVaaj6YX(5GWjH zM=nDcUPe?xPA@J@n6j;^TC#VY!f2bLnq3<6%~b?j^27_m-r|=TrsF>cIMNyPuBng?(S;%r|9G+J?)7OQzr_E?7qHBs ztHz^k*<-p6R_f2K4T3X*IABfsN5X7cf$O8p#nx*RoVIn&Vd^7(qRq8y>hgM3y(r-= zXJEtn87QY$DY;kMScen>&$C)eIUNm#%e>zo_h_^CFYsF$W?$&5ua%T^dyq+_|9WXz z*>I0?CCQb45Fk)mXup9zuS{vSa)dX2le*OX@LXTMYvpuhXMZXQWBMvb<^stTld&sc zCUq>0|5dtl7-C5t-Nl~OZI0M|brSYxbiP{X?GaA*xc4M+KlfoPBC z)8&N!;?L%F2#u^eQ1ZK^pZM$Kjk~Aht7Lw1ovwmT^PGh_{T!cT$enxL5<{|_axX_c z2_7SOzHpI_KyAntMI22=%$uifi}n z{+8?w4e^CdX`>=$`D?(!B|3uPRvTUj2h%9Q2 z;AG9XiU<@zKBNfbKsVmLhi~oZCcA#yzQYefHdg099*GYd>GE%tS3zDTKwNw|FCi)# zOU|TMH(KJ{rXIDOJs6w6W}dcLq|f0Hl(M3L*KOf}=42N4^QOf9+9oHDa1O!WuZ7Cx zoPp3bGbAym3ONB5^x3T%ACZ6Wnw3_0=8lTl{Jt3YqdWEHKVSn3vbD!+NqA8LP_VT8P2Ehhu5s=@Bp(RE#(Vp+T5= zu;}C_!a{3bGpa`1|W^CdMKVL=g9Pd<> zx&A!*VNzAxjc%WpvH-}Q0?jc{}BnL%Nn$eeYbv4^e?ZsyxC-X7~|{g`#3K;XaCar?0U0>=}kMe~)B z_v$4XMhY3kUr5WSk$;8)70u_=#2IF3y7#>Nk*Yho{3j+V`|nF@2u~a3QT2It1t7ml zEga70gfC~^+vt!Pb3(P`;GfMYBY&TY>_SA(@ddb*h$srf{e`X#^#fy^Pi5J|8jtUP z(i)#I8P)x(X*|H+xN~jJpQFNNC7o?ZxE|N&UU{{)sd?uQ^)^3EQN1_Hg`6%7P2c7P zd`{1`_Q%;tE-~Aen7by(1m4Tu*ET+w(^u;0{vn=aoi|7M)>$IE^fTomI^9NTu1Z;3 z&VM~qn$ef98}kHihex5@BkSxxAQ~$QDc|80#|(|esQeTsj7WoyFK5VcVA;$W4T>b1 z`u{3yG|X)01p0rfe`OFl3%nfkk|jMQ;{5sG?r$)Du1Ags@n_>t-8RD9wygp4xV5UY z?%;Yh?MFJdmP`<6;8>j9`1*_aRlPuqxEuGek6#|OLPYm@t(L|9>Pwnpn4RT*2J-n= zB&pirt}AF+ILOvFNGHT$@g=z$xQOnv+eESntcfZW0|o!Y1ktlZ#D>b93( zrU;01;f!!cp+FAD_}$kcu%qp#6I8(t#(qF%c?x%%H}fWxlay_u$Mv^hJ&p0yEhdTlloter;V? zwV=?ugb-j`PW?*IO+Cy+lSOnfZyYRM3_=w_dlsC+JyCamwHc$IPafTNgY%pO#A!wu#6OYc zCWX{@@PF9}O>@iq+Z%v}%BWb63LD3tGUvWFjDI*uS=1vj=D&juWp#%?$m{b%zgCro z1+NJS_}}0~NKOx97rzF-{<#(4uO0kay|-nxW*jb^8C!NkN=yx>{^S6L9eXJhyh8+2lArYf7tt_1R{#?6ZJIRh(DLsH-NUeeu0H^t1dtP$~_h zL3su6c##xpwEo&_MJ8pn)Xc0dpq(%j#Jz%Qo_PDy9D_rCtDW1N1E;5K-;v)6Ezh%{ zVV229wadF61{|fVjEw5&zFX3JrkoSwpF8ZWkVSw|R_k|`v~A}4gKyN>7GUOElaS;Ys$ttf>f#;kmOEqN zCqc^xqm7bP3DWS%a;Q`1(=Il^&~y=uG3ZmN*9cX?{wSvHfwjymVwoyt-*Bu85;Xz- zYlTE+yGM}%rCHwDijKwGSD_n&OG6O;88C9cT+qEIX2&Fb@x_8Yq`F==1p;@l=`fPa zc)Xb0mS~%Q_u2&JWB8`1t5mF1PfGrGpCpaqisw(bm7A04O0s@i;dBkN3hXl$2-}{8 zV|u0DUDVJ*+67-I~d-Y_eYl(z9t1PX%4DVZyUto{!NFim>51FZx(=3 zR`{4ghs^ZIC74*anVvyk10Pe^(YLhc8Av17R#>+fKOR$S?5;>m;EvH#8~f%RHWr*u zhZm!cE}R`l?ocJ(T$tb(+Xuxz(|-Ay)GXcBItJ8E&PbJJ)JzFxvUNdzK_ea#=mohK zizSw1_=r#GQ)j9!$j6=tsGC+i;hk|^^)me76p{R*OLP>F!0(HnIrh8Dua_zCkJsTN z-vF38tsk^UoQhAi)eX7s z)Unm%dds)eO?UMlD2wn6`g=7hI>09_SWoYD353VEFEf5G6IYD-GeBP=Oo<22#&f7t z8$74VA4FxhM&NaL;4mFeUl&+%MWv9;9QAQXsyenFhq3i=%E+n|tFmuQaCy+~Cn@!d z6aogka`$rK^gQ-+M~MD?$olYFU1h0t>SIIa{6&OLopdpdx#CZIhy7bE7X+7(`{?J7 zLa^sZ^n7YcFs&E^MlH+RT2XJ9wwwyU?VZPO&nNtn-+A7urkZte7ZH$zD@-s)orUGd zB0%l>6O83nK*Sv?^Qc|@nZdyAKT-NhG9Yu){g}R5u4sL+#PrkG!0D*9=kE>onnQw5 z)j8+h(vXXi5}VV%7{SVAef9pV^fF@;dN_LVrIUWjsj71@W+lyb@499``>X@>kk;PL z+57N|CFTrm?Rt;vk8;#A0y)SF6}x8Y6=@~9GmkuB(G zg4R{3H=I*T(DWKD!kk^3IDYJ>r^mQtCiT@qa(?g?e@}{v!9r*>d}4^_Wy*v>`YoaL zUr)bvqQ8Y0eO&cR62G>rxL>oM|BT*t>Z}P?ajN#{fOs`RD&wxi=^PC?wr2x})-lGw=fJ6tt+$uE``mh( zoSMGw{!!oJ=V|YHi#;M8@CTSZ4{3+FnFitr3(d`s!5%l1lC{6?HJ?`Ey@=cpkzh~r z$9+jRvwZTbvb`)n+}8c4s}ysX(MC?XJ#w47lEx9(ZMtgiUhhHl(ia;1KIOa3i&;wc z;q?-OZq+cPs(G0P8MR%0ByPrPlaSkn_HEfO<;R zcKMbab8nTlNYih&@qG&BX6Ts_cME7b+ZOIeq?vC-FBMnt7N$``ATMX{!7#AD2=!vu zZo#&9i9zCVDz>(Frv^I0scmT>3Ay5iD=Au()WAZc|Lle9$LjagnoFwb6To#1JH?F% za==&5py%S6-nZmD8VB=gbedbyg86I};~sl)gWs`eg;!IJr8D&!no@mJ!D}J~k>gZ$ z#({`}E4y#9pZO2L+(jI0y280Bgev4Jp@+4`*(>*gf%5$H744PrS9Q@jBk%qo#&vs#BXqGvvRjQed5n=_QQTa;|!@$5ueu2oWwvLKRK>>Kbbx_ z!nI^QCO)?HI!Fz?E^sFrfV=UmG!V`6F;mLd`EGBGnVsh=LXFUA)LUp?kqy+~O=uo>NHjdVVj0mF+P~iBY9sfR z56bHn)nsN>)t`-}EH9RZ9&Fmq52Nf&papsB(ZWf$`^F6oRwK3I{MHfZOHHsQ8C&Zi zzEaUM;Nlw>e?ePBBuA1C8bEefR|UoJ08JjqEXU_iilyun83H3r8^Yk66*6^>QAX`$ znGovYvGINAnHX{moEz<|NC+~?#OO~Q^0XeM@&I^vYG%N;y$`8duA^e7QkwC8UE_cnaYF!BzWCIQ?KMQF zhWyhf$d4qE*7yrbn>{l3f46_S@+|qD4JG#&FZT;%@94+P0r}uzYwivF*L7L;QfyR(VWjigeP}05V5T+!I&i=Ji@CG|@E7&<+?L35 z&&%OLrXCaC{1@zbr(q$Vr!<+E)zvS-n?wDkh z#rr(FIB@|VctobQf9_5ZO35A9%gzuIq0TK!VG&8iy}duwabgYK**H`a;6?*d_GPA}`NM<2@UA5KmxX^+47bCv0l zx#mS_5lG%IS?$8nMT{e`tq|g$TbAKEb{OinGdXnCA_d$fh_!4`K%Z9+h}^ zPD#7+rx!lx6YW4Bn zvszTcJG;HwcEkrCoLn^7XKl|8;9S!A=_h zOzxm|^X6HLK(ORo!Tn`0%EyPkv49PznFoI-MYH-ZwA7p6Y9z&)MS0&^{4{gBZY{=e zUiiwYkXn`=NCG$iZaatef|Fc3s*^dmfc_9YCjD8%+>iW*OBclbv^Lpr%@{#NH*mh? zUE?;`UMzC%_Qx+127Y*uI^ao4LCLozrfhNrGk%}1e>BE{Kfv}_CUC~k8J7zVKA*R4xig-DAIR8B4tg@s^B|9-Pvoax4XBKThpn#qtRBT* z`8C2+Otc@rQK2~VZ0no6qsQ!aq{`Yl)_~q_FeO=xXLeQxZw^zt@a!(K+&{0nzgLWwIMjnZVf3TXtqXNKv-Ttk3AEOcb`C0e_t2OsVUHHI z57*|AIYqas#zWt6W){6(^X@{I(H--Gzk@c)D2b+nA+$R0~} zjvF}b_e8Qz(BixJ7x+w}wb|#vxZ&aq*2PKt=g&8{_xz>uqVfXH`!v%=wTj_yP*Qv3 zYZiz@UrmNaTe~a|)btP5LeM-jl|nq?dDI@SihsXb5mG{dHiGMt%nLyz^b#V8q?a4q z+Q-(f1FVNb+@(NlX0{zk!rpsL0Fu98Anp~+a)cEtM#VCf?d z)GoV3XQJc}#)xmWo+&*wtzu)?#D0C#PRdpqxia0yt%I>E&$wYH)Ac?sfu(XZ-am3+ z_Cdl7WqeKmYmYss{VufL7Mgd-;>_D)*M_L~IBvrE(|k%LP{T{< z>U~coug=u0QiyDW>8*X_WbG_?YAJ1{S9ZB8EW|`}1eCKi5lLBozt?G!CmkD?5}(JqN&D((0=_cpu&slSEt_k>-w`FMwJ&L4WF)m@S2%EjhcJv6D|5MWUEqLN@t%ovGFqCjYuzV$$-U;za?N1SrvG-85 z)bc~EA^r)#NYk(Y-{AVKr5-A!r>f1L^7GcaJ~7&lv-mX{Z<|;fU%ZgDmUEhw`HX7; zwO^ z)>L(1pelN15&e=q6wd|b?ib?8RhWG)mTun0F;^+l4$h5R-=@-*2dQj^kCE8tI*^wC z^^MhZx&E4&rH5G4cHF z4r!flS6Z}1{FrP$=ZN0pu5GETUhddCTN!KL5{C4GJ)imr-B@&~8&UJ#q)bg4{qO*K zbp7klZaYN3OKQJ^$mkL&kcDHAtF5Z1l+Vp~$@nJ)nhSjHzGP@7Ru^4i{@nz;++QZbCUEHrt;~ zLu#R3Ylv?CGY21!_lbM6bp@Z;Yb8j3nQEVNwlwPS^~(Gnvcy-gq<5Rg=8<=C$3|(G zQ))uL@$gWLk(Q1dzFLe26v!5UD)l5NhX6(w9_I1wYjZ6D8Un6L?5{qHaXydj8-AW} zXRL1sgdCC?WhGM*qV~*t;w~F8}xgX1PCjDYGn|*H6DoV?!sYM9xX#oqKSi zmEKppLlbehv>7_$5N6Acy~QqT7i%$>*emx*P_BPrKk*2bU9RIiT*)6aZ@j_J7@j>y z2wcNiXHn(8;hzRh9vX9RSbpOu_aSXd!@PlTYOd3y+kji>NWThD-W;E$E*LrGuI({}=Y-;k-47h@;av?XA=IGs_VR5NG0ylblxT(K>t>V^LGr!7Eo#>;sw+eSA zH15%807gXVwxL?A*gsJ>i1i_jzIvH&pTlcH$k^vi3Z+-rv&9R92ZZC@{707(xfdIj zC$rgh?aQe9jvicC(7rYmrikx=EXW<2`?O!gk%?PTfN5CZzBqLr$A`I3 z-M1&}b*VG!p6t?)fMDjIF4w|_&MHlr=SF41%bGglX_a(yNe{g%aE`bqo+ff#X6ur> zL=gY(iqrl?p^JtaIK6nKI!Mtg3anH%hBuxB4n!BIWghTUS2#TjuWM+G;U*ud3f*mXiiO#B+df z;C}=q=m`CpuTT9=fb*Nh5;6Mh-K{E)=P6aI`@}HrR_yu@>Q6^2k~VrKk>N9MZ*>M1 z1=aA$W~Al`e3D%t8t?{$<3rG+(Iu)%MM$$p*WtYN|Dm?6c6;KQe|GKkY9cL|Uvueq zbFqh+Fxu6e9S)PXy^PuvStX~f~ zI}_J{dDyAI)k5kQj2xjDQw7_(K?bwi;nHwA zyLhaP2%pb3-4S9$SE)2G$8!o!TwJ^*@wk_vm&Gv!(PgD<62kK<%@;w3?@9317`n;h zSD7)Zu;bCXJK@iubMB(9MQdc#+48A6Mr5?AD__KhVb}tkOUlFgpL&!@mE}f?2d8BO zxa-*ZEA+}-pw#~G4prUq5E%Jr`}nGHd4cV9bkij{TER?N?y;U|*HmxswVjRXBKmrZ z9slp?E{prdn}jG1{|D8UcE#SkSs-X_#o-v^Posa%Zmu`(XZq;jDvHNsBRZd>xy8P& zwC0g;-p*IR62|nBK~p5oU$l?2?-H~uLowFPJ#V~yxt}dgsJ7=TzvcT4dK|e=lQH!Q z$))o5e8+U3piuceuWmh~*>I)CL8 zHUpI-8E6QH$SQ0*R!}C0f%AbB*ToDr8AB3$iofp57GQCoWlz*fd!-||!Ub9(V=xY+ zUu~#~z@7OKI5D7(_W%DZwiU~mshZcu^V;1$bi|?8>h7mk57ifjbHn~D&hq(ZK;3z^ z)fNw3)zv?T5?vKjyPTUvR68a0@A-!WZ;K@8zsPGzGZ{V#4rr`$d$apFhUpv(}Can;%#lxdA zb3})bm@4wSSN}e@9y{soT1L)L(7y)kC%{{MOtIjtqxqqZe)8@a2Cc#Ym8O}&z%x!a1^!ytFa>as8dKWZF5S! zQ)U`_8sY73m9#E#6M{{Ge63^sctSv-_OdCLo)@B!ynykmB)<11RDviuN$OfiO+aA& zj?mi5ur@jg-}vJ%sxB0d3TaL>0>LekU=w@LM?#AC+jE;M`Bjx$ z$Mg6H;&l)3PRrlu6&_OH&L!N)b6JE1zc@%ut&=%`fcv19pRAI_fE0cw?`X0?n(t&r z;1iRYEs4>YP$OyhfXjJb=qH>XPPkB0MdcCzQQvfTy`6k&f`1~tO1Qy8$%OW$ba_wm zPrOYRM#{zHGqX8lnRafg0jK<;;cC24~`q2^gKs?&Fem4OU6BQ^4 zFtKr(R67e${VebpEHxluqm^t5XB<&$pbJ*p+w9(rx9w^Sp4Vn5?oj1DW$pgRiniN`w)GxuwZq9#-879vz671hj>>j92-^j%Vmqk9pmnjI5X=B^exqo{OT&ln4W{Z93 zGP21UsRfW`}14k*O-EOHa>Xz;zJ-NIJ`vhf@$AXy7OY>Up~hk&q`nbf zY$#AEM7o``yRpjn%+u68{sOkA0E6NY*!c}rdM}vI@$fY4sNTRRL#4*Zj-ylF|V`(n4STj8n9=rW( zkz5zd+Kb)X=MT1%1YzTE;O@#;LkR@-7u*(}c!O_O zxww9JrTPt0S$ZOLan&IV%?*Ikz3pbTq^t3u>vFgF^eLCHSJE_y#FBKRM zQdU*6I6tqx(ZfxomJR=tvWKLS=*h^ro>e7Q0D-MwCw40+C_8&GGMTKEeH3;7jk7o+ zY)%dENuuNp3|+@fL;uKexn*eR`bqYG9G&|klkfk>hgg!PR4Rv6iV&-mIc}s&~K=>Ko>wz2j z*82df5RJ1>`Mq_u0cODch>W_Y+({{& z;oEoSuUw!hixr<7pQdNnMG%jc)4}!j?`r;|Trd~w6FThFd@UAozVAbUc!||i$lx*YjO&K;vOQX7A`7PO9EDGXW%pwHgD*DQaIaPzNX8)WF)w8YIf z@fIM!r#xd<<#qL|ujVS(3^e#@Wl@3_L38FZ6c|BoNcmm3X%zIE1?IHR(>zl!UEP(cKZAjdGrK&6lM{B z8nX@O^|c2W5%(LdyQn^f1eykH&qi{*J<#iEyX_Ub#2z{G2ej4@gJt=@Nl;p&IwtcW zI|sUGV8)}>>pYB>eEc$Vs^vS+uf&!fiPT9(z?iyT%a_=1X4Ko_JN4z|I z0sA$nu_Ly7Mgi+`lyAD-*x91??XA`?B}!J*f46aZVCHTvFuOGu1l41=&sQA;P2pk? zv^7!d392UP1e`%rkkt0M4=iEd3j81=Bks6wvy*~0wK`5U3v_>@HO;^KLas`9N9?cf z1{RN-Ev>7~+a$!sNFIbQ|GeL~E7El77D4pXz1h0jWUxRVsj7#9ZyYL*eQCs8cP1>gxDd#R`k|wyL{=Y{r zp`#JnVnL#meJ|@j+gY_*}|O6wdMq&63&*qZd*_-jPTfi2(W+WQRz z^VxCx8(;1f?5b#6CnEcvqv|JWDwKLNC7KWOX62Tj-NB#*WPB9@{U{efw+w;ouU|$* z%t5=-dGE4YyT&)K$b104l$uSQfN1jlCS3#jp1#`X5RhNx#a7yyJDBEr$(U&VbfU0) zByChr`T^>F-VUJu%5;HK?9H1=R`}~^-Bp^$VSP}H^Yby7c%Z{Ij8Ia!>$z{buH)_3 z-}|uG<9(NiYH0q^RS%XI9SY8T(q}c~`0{z}Pd^RL803-)=S!uXRRKJ3MG~*)7&c;) z!Ta;yK)d730mfLlhZCZ*PoU|}wyEq{plC9$wmopLm9-$^x)sAMSM4`&JrN-6 zUi=jt+#h};ytlPb96!_UkMoLz{kaC#i7k{lJ%$}9xzNBFLz$bn$TaeL8{TSdgnGJe zrVK7aLM7{GJVIPxp4Gs1y(9IP_wHt>7PEzH$wADw$32cjU0}=$3Ti5{2w^)nz#P?0 z7e2ywy_83p!g-w6#9{#A`3UMCGp_gp5mA!RM{%?~oC#I;F2J%5(GvbSHs*?m<PeL}ztUg7 z@l%qyY{=cp=c&2cbN(V1JeS2ScH>pPUHc4XUBSdFL>;&htpAYtD-a@)g^V ziPq4;)B&U4ykz1`-pPg!e0k1TGj-RVP>=q~H8F!Oz9ZVObEJ;+KPgG$yB)?bn<%f) zI9Jce*tI+l9vFXZ#kAV58O}%`UIs4Q2yOq*&U7~t+oh(v=$z6Ox~}@nOCH3|L!f#SIMo~`fnAM)DMlgHg(a$<2VRD|S;kg{jg+wZa&%O7 zb_K%L1YT$j`GrJAYATW@!#$L*)j`R-(7z`>Qo{EnxxpotPfE7K9cWm5;Lb zXS3w1+NKyRz|X11onu9$2u}s+1|`CGL5=U9-W)DJieiC*^6$nc5V=Yzo5pT-MSnW3 z=>6U#>o0|;&%;-xuHO3ll|U~$`YQ?g(f{xy21&}&PgY+UItA~VO$YLD!+h|*>q`TTyAJUN%h;+PIj}HDs zP|d*%^N9{K2`j6@E<|P7t0g~SK+U#OGpPEzdpYTt7P(^C;MumdFn{ZY zN-i4)n8wz|SOB^A4LG;CIV<%TUM4Y>q3hjLTs4!;E{VhMjqq3uLpV#8q*LU$@r5r& zfnc!r=HC8Z8>>0c1+u8A!53xQ-PgWeo;7z-cRjf#%MVaARU%3sw2<$nLK^noHi{*gD}&zvU~{9Y}otPg6u-YkJx8p zbD-|8j+0H&nwVriYA#1%TxcqV$kF9m6n3iNip8VF9w~bgC*H}aFE;nwAXfI^$ zeO@bFLR!{ht_L1yQ$aZg4z&0sNZY^6mR&vbW3*M5xONVcYcZ=ULHvw&p@VCG8e1rG zCnPKNh|DSGUn*ps??4K++IRXSW?sHHg%kGj)wi3`Q{{XPD=A{!lxjjzuk`Y2lm>Q1 zS_xf5t$sAQb;yYYEcnS-9UpV8X&1d~>bx?l=BeXqE_v+3Rg_t>Ul?7~|Bu#2dzX!k(XSe({+#F1p1@Nzu(Nx8GIM& zo@?nbzcSl8bLl?4=PZ8tUB0ll=Q48U>cBO(fRl)`=V$K$4?|#08Z^Q0x1o@gHmu}d zrVeKL$9^g84A&xTo@FEVwCE*gfe-gYxm5J&C6p{tvlRhU8|tTvvSuWE7>(tQa-Y=% zN)2{LmCXifa86fe9@UE(6aomayEW)5{4l z2VuVsR=xS?aQbgj%Ot)tO6uRXzVKeWB&;v1u&aq-2DdqD^{f0cGlOcc^;k0J!|@{; zc}G@#@4`uPWu7ukpCBy{1&wN7n134Iv-D8$zm0GhG`UZV+!dl+U_DhWF1$&2CiZ0G z+s^}10webz9J9dUmii||JX7vZkngZyh~-^ar8G@7E(gN?V6i(g>|Bw@9qFB9ZhL1@GP_f$1g9~*AI-1xdavB`|dFI^;e7Os2%+%AHvkqQKLW*vK+=(>S6$2oPt~=Tjao@_1zY z10oF@W8a6Z-CKS}9?T=Hf|*LBJPa$301f1jNLVfxk3sdVbHBIWb)F>pX{W!fU@`L5 zW)#71&e{%kQdek4p+CQ_)xL>TbQ*6O$VC5o4Zh}v2;*zNh|2V!TJEetf{vB~w3mOz zl1skf*^*z|C#=)hE;pT5o{Fvf0Uira*(jQb+4D$%xhCzgw-nfO-LcaMNT9%04Rn3s zdD_9opUKdI-OR0*vmvWhN6FAzHK+IrlKM5|5Bh9^ZO3x)RB3#r24eC@@|ytZSPwi%t97K zUPJF77sSP99iJEZU;5azlpk9HuPi$fdRxWRGGd*a2&1VtLUEibpZ2`zy)8xqo-yW@ zf0aDoh>Xe0JEPTkBj!I8n|Jf4%59$~CF{E}_hMQmX}ckJ&zSt^6YDx-=$VwX*M8K7 z7J6V%80(g{{e^LpI^uqa=wI+@c5HD}mX}@e*g1_%dB4NZ*M5^BdLdET+vXE=5><5Y z^pTRAqyEhjt{*ZvU9AGEHE!p$$Am%1;@Xb9?SMhLhtQ-oz_gFQGE;yP&=h^+ z%01Hvzqw~NUy3(^!4@vex3bV%wpNMQHQDw!OngfSvf}~lv|q266)!W+a5kYwPC_ZP zOItCWA*}6krxvlK9jNr#g>=ck*G;Y7Nc-h?kGb){j5i+9O}MLhS-S}*Hg)5m-=Jl| z7T-vB1j;`cld8}cpS~rVx|b6__E26u+ve-sPwkecAu)l2HK;+~Ey_prOkNccAlCHV z1EG6KC)=p=Sz6#Pu+$}*W@>%@V_w+H_QV*FW+KGrtlx;UhxYmYchSK9v)8?X88uvr zh1h%wH6qV1GIzxNbAlX4ft9op@(Zrf))U@w13Z{5i9RS=asq~N)5L8gmUQgNBJTR2 z=xtuZa2ZVJ!(gQA&609I`D`UITn=J4>YW8D5%V+7K@pRjPH$Da8{20i6j$igBrkm&+HZ9YbPO(Y z3#j-d1$-t8T7?^)ub`ZPWo8e~K-dR$;d4tVRU$@CHz97D$vO0kdCzK3bOf*}*qM~g zhvf`AgW@1}qU-K8(|f|}(a2E3wuE9=94)&k*r|rhrbAe(zid}A{RXQYUFzl|O{jAk z^_NnP!|L z{CgV@dCLQW`|H$R-`hcWK6*sSo~%L92K2T$?;kfgDf+4CTH1XWR|k~kN}tY2pSv3% z^CKf}@Q6QUs$w>?Cs~4oN@?HN1K80}EcN5<-oLThHQ`6DD2{YFh>IRNcw0R>T(KtMz-%EX z+Ves0g_B&Nt8sN#GGy|Ewebb3@!N4;2lgFU92;&8(pBJe$wc}?V9CP)p*-dyPYO@AKQXkoG34&vyDkN|?K-@mcZdJ56=Nl?lt(j%z-FsG|=~Vb2 z;d>}mGOOXGtV}SYf;%tiq8Wx`4C0@B@%J@s`<$muHQi~|1c1Iv5^y5mJACMqF<3p> z*p)=n*2zH@Epdc+3d_#vi{;Ig?6>Gv_Q!*hS_|ji%B03mjT7fv05RxsiDoM07S+Bi zH(h7ktLUQE;_&vo%cNGxp+2s~q7RNQgC=*q8`(nFc0WH3m;Bni6T=Dofx18YXh!qU zwS^Fc-YLt2nBRkSBT`<^_N1)jr)Sg!*UB;NMNAXFI&}2{+exDP#y{oK!zRIh*ux&} zxhv-2j1QJAQAYWqfy7fT4!l87!jT(CwtO!f3|ztPiM18?6H75H4~f_H!Za;Y!^Crb zLtAfR(4%*Ff3!c>R_eqc2wW-_N;}hrs5inCq`EEq;!HHYX!N@1jVqp+d{P%nY4`oR z_jT(JcEKJlf@ep~p5z6KJQ|jr#h1XTCX2{{UG0ZmkEWHG4cNHqoT(RdVp0XY(+%!3 z7_QbKRnu&NI4}-^uD-qPO{ytsMUJnt0(r_@q{L_ay#VOS{e$YmQ9b?Zq#_ug=GFBn zCm$TS(uRO(X$ z^|am_@RoBkJ+5x^U2=u?foZak+>I%JLPZAAVxBBk%3UW~e6gCL2ghl&)Xet>*Is$I zPD|YWODJgr1@8lmTz1^_1{Sln!}K;*srB*X(6n+HX~@x3&^GC&lrIIV?mDjuR2I@6@YN@aX8FDpC;!zbZw}TniZ+HcJ@{&3QKlz2W1sx$ zb!FMv#p;^!r})t#;RXFG%vGLwQN zwZc~UZ=yF5;Mq-avDyE)LjImuf!t6t+(ahfj&z8LcMW&*YfM`#_(6kU^`;LnFOxOU zj;Fk?vu?LtHzl4O9hYF6)Ee^|tmF1n9JF{gWFieN`Le1CamFPRD`7^XhIg!s{5C(F z4DwZS8NuiTrEdd5Il5Rg(uf=ML12gcKc70%K56k2#$(_St~Cxl0c!q%aP_#?G-|6& zwEnHuEf6PPTNm+Y9KSbizHh*0=U$beOY+9AN96Wq)8N-rj4_L=(;g-ztuQ>U<0Xz)H1oH^(zB|6!fg5c`6WH`56my=k*Tnp+ z5kmhL+U^iDnDij%?<{5h?pPM{WC$H%>+O7(#XoUCZCoa5-ou1|s|!k|7YSzkxeQoE z00KmND!SPHD7Dd(J*7qJ&!$?Q_sCx);zI>{hw`kAaD*orROA2#xb~NK#D5{dk~gJC z+;C~W*#Mk>-*5&N-;{qCj>{*~CxibY8vy3$mA#?Wzu{jdwhUYxg0(HODz{Nx79eyx zpm#ejeknZjV#(1Huu0B)Ic_)_lJ<|MQ~Y0_LplHR;Vzc0yyCweQF$ugqfEp$mcC3^ zdqrJIE%2T8+wnG%#8hEGDpN9E0YMW5^OQk0{MHkBK-U9>1g z#UPUh+@#Gc;21Q)4pM$f9>5wLBmbWHmLZXsL>j?u^)e!`EJ;!lnkhn}r4OTb*Wzkm ztcRq1!Gc?-U}ZuAoiipv{?(!`ZmzOrnQrn(Ce^{gxchk$@`;(&v0v99#VUlH1J1cw z#6&P)W|D||#!h^>Ousuc+9AM)JEy`7>eun6i=u`-JA!b#vUl6%+a;utiWzBjAtE2v zR2ntz`@4NShMs(;?k;mE?pk@GX*AX~nlsuy9v?2G?d6rq35NxLx>%uZpMBSwuYGZH-V&wE9V{`h18$H?7_FYw zb%9_j)0J$e3iOV6y#!pE1X(^K0kaGJbF+1Yp@AKh3Sf#_@nmiw!!mg-hV`2#<00r- zk^Al0^!S>i^dYfpI@5|Kv;@BvV~$-G=8h0}pwA9F^5CSjrR8gRLohN@!-yMpW>roVsV7{uz^wdlDXkZBz{09G<+@MCCfDw*vfuer+S>5XNa_N4vLVCVXKpkyK zV8MV3n)#@isKgsa^P*2u6Z<^Br>Z~y>AAN3iSkXqWqVt7uq_^y`ObEZ?APmk9Yl7c zUw%$9WNVdcRaY8;e-w9rwKk`U0A)g&Uum}>_Pf#b&sQmL{R+-ua*{~K9Qw!Ae_+Sn z?dz1nz>v>@P4}Jq+whNnAJGR(YP&p^my8IU_~R>ZR>^)Lh5G#FKlSrS#$=`7PLdf* zUbm%`XF8bQ<6-`Lf*gt6u9RUIQ1?P6gSvg?>*k-#Zcq068~3WqO;a2sn}cx-ad~Nl zT@G=_N%O$d*oT%)ftf$t2B!(RNS>Xmpd#`5o*+;s%O}Y8+O<5_MjoSy4uey(dx$OcSw@==x0)`1VXz6= zCTp|5_gPEEBrD5CS``KT_=}e$lCKswvp6vt(B@CYZ{Zox(jswN_|AB*>7~2VmGDNP ztKh0q+$R)1#?-5=3!6pGoa;^MIF2A#Rb--CC6ARJ9dYZ`&!nNKJ^Mse;s;)ARV?e9 z5pRXD@0+BOMDRPW+;C_9SKX~`tgQx_8o>OwbkXWN`hZ=dwf%WQI-8F&3bx$P#d&U_ z9;SYm2x&XuBvg=YVyYEhfLmaTmT-(og6SghU?rjpvVMY3BFS7pYXrWnz$4Ck7{_(% z6&cME`|;;IyE|8?M9h_I!;<-ST?Xtr|N zx)Qq}+F{B4PhjPJ;P`(A0RoRMTNe}J1))9g6Z&2o`>{6nK-QOV4rn-$4fCfN=d zpo)fl<~60a`sDB`^D;=Zu4AJLnU9-CWQm3Vckmi$3$Cx~-I4 zWwi>5**%XWx-kFkt1p3VAbb&PMM{ge#=p_Vnu@70i&bWGD=(^95t?R0d`n6nBtcGy9oAh95Ll7daC7fK|`x!D4EQ5W5uN?k(aIv8S%g*g~@jzt#5(bvl~p zg%GsEO15Tv`5`bZUeM@qmaLIpmg)-XxCV1zr*OEO0{~nto)ZMLh}ix z4t>rr18Mh&C3Bi3w4nL)RH;Ndy=YY;Q0+L~dGS8WU@%uE*@YNfZ(Y_|AV`jN;?s}= zZ;9@|YaAX|0N}+5&)5w;8@x#sph(BvGvZNwbrB5lGwam%T62r3)KtM-n9fn9^j`RH z(74u?QFl%WB;#qzIzu)J`NN9&s}5;(_4PBjxV=vj8zd70C9)Xm=q;9dzg)Mogx_`= zLO+~w`}e%EEyBd?3D((bY1hF%st#b`8MYJt#$=QdVhI29RW^>2t?~Y0VSC5mq7mUA z)5BQ|8&jydO~_l1!*WZ|PMpa+*s44Yzw5(Gr-$z`z~XydZhm{7+UWH_;&IaUg#0l9 zqjVU-C^Z{_2}!UKBgPBtg%z_JAe>oU{aEul>j*@~3RQ5(sbp`AZo6`9t4C9vfEZYn zyN(3`Ay=q;K~}DPCe8Vc6@VfRYTs;0ZY%lYLxI~Y?4B`bk>0X!9V=Tu>3^wz*Bd_h z!bfC_zm~yvSJ_M3IFySHj7IE$0kwv_A_$VD75j;_tskS{c^4yj-o0a4Uu%X4muxu7hhS3q>gT$T3iS&84LLU z_xgTZopKa)VO|?Y^6K^zR4$n(OOnI2E3Nnb!nl_rB%Zi?t8rdH)k>CB^9ZUyzUeu%)iqouc7;mkbRgz_0E{ zmR_YI=*qPM81ZiCx6w;OoNyK0#qYZ|GA5cb15?f&ztvDr>c+wDF0DOBDIN@g`&xSi zuubrui1UgKZ$`@PV5Tr)#~C(0EtPFcUM|N($BBmzMW`**N||>!GA`M6R8XVCO_oi1 zA1DAoE>fs96BsoWv-L+{H{f;i3eo0}@&Qe8bLyR9zC#wCi4VTngR>Q1|AXBt%36dF zFTF^MCT7ZGJ2gI?U6?ok0Pwc%-O_sos1u8_-(inFYHHs8WV^ucSq^$r0GnhrW4FO| z{oI^IF!QRq<}3L9E8X80%v}dY(9fDSrX_539`PS-)NvL5(bUrU)97CnJPW;fP{&$^ z@qHx{o~Y#|pi_3dn^_T8@N40VF;tG_ue+(gzSJssb7I4;(Cbpl?bdAxJEVUEr249g z%Fd(YN1ude&W2QX@cSfWYDzCc5qzx4A(OJBM~+tVG^=y^plVea@<59eqLmF|nj{dM>}w>uoU5x$8n$^B0~DjNsz?!TvX)Q5ouf zMy{AgLH1yt%d-ApKTT+7iTHY5Zg|g^+~S*tf!)D37Z*3d*MMT@5tftVxo7Vkj6 z1Zv7RFIi!gRAzTP{8 zv@!utc5~^xS-L=%27H85ELB@Slv?b!%AmAS&lNw{*3pvOT?falvvKryOq

?NNMj3aJY)@*2ySzpj`Zzp zVadWN+%PXI)7d)@^qK@l9+OPjhTJ&YE}zJJb#8S>9&FQ!7AXCMgrIf7%uol@2-?T) zZdD>T{}yvM!&cv0GN*}0Czc@pRtXLBeE~}RcGdQ+Z=mQg5J0&kbGxg%d-^wx4;sI_ zU4#9?F&JSK6uhXt7|RX{3^yir5HSG2X}Te*Y^=___spI5_*jl1YCShkr%doAZzy}7 zxDC)nNI2wMdr3Lh^aUX>9RBBfL3=e=BQ3%bUdwu+bL#Oi(I%LXOZJ<|9lhNmY2AHq zJkJq+g(egP=fX*%08)aV^vT0>{i~8^l^?VWWy%-S9aRihMILp+1p{CS;v$Vs@1vj8 z=f{Sqou6Ufj2BxFA3dZ?Q4*v5=+PZC2mfPV3QV)xN%lNmfF}!qW@!}%rdO715Bv5j z+!EKQjF3ioxpycP^-E9Bi~zFF)E@d=gTP5Z8NyJIcHvP4*hL-Mr5{)j239_@(pb0{}Bd-5d_3>2H*N-Jo9$oqiTPi>KOcc zozt47`>J;R*&R3si#oXSS6Fksz-Dg0y!D)?_j^nu;9PhhK*VIW*P59H`pyTG z!>e=cI{-w1Q`o0YIR<$R6nX5sd%)vG8PE(N5-p-9_~G>Gw$rJ9;Kk67lnQ1tY*_8` zW3HnCMxRO~=Qg~-XD&&~S?+r>Kx?1zF%rR`FYwJQ4vc^Y^%HI5{_KRz?}lBleY&K& zU3(8)4~kSRMNEe}arhXSj!fGPg*s;gX8zy=CkJBGUf(8gDQjEQlJvB%RtV0Lf~Ne) zgBK(s2ONw$l~zuXzNN1b1DsJ?k}%k#XjiPsN%PHLgQf#qgED|FOj;(8w4R;Tva)((hnsWPCP~{qnj5k$#5j(`uJ}fPsw>LE(rdF^fZbR5mVIS; zT?9j`-!bh|YK^>wnVULl56SfvFtZ>fdGoS<>9y`OTODWkPVu zhK(1A4=_GkRgz~NdsL(}s5^3>JY}|zzpQh8{gobtx&zo#1Fw7%d}{J4zmj>!^KaTU zUz~ag>P7x|=*p?cSk@1@_@-isqAf$%^}Z94ah&$#Dx@#pJyGt_R!Pa%`o7RBi<=MM zSQmyx92&I_n|(OdsBYJM$Igpm2fgMq_aiGXJ-POv|8St@Afvs0^C?My_}1n@f4@U= zw(MA58HwzB1--3=I1S$u=lx@l`?$;MI*Y$%b=0FbMXnC`dV9Jf44>g{d?s=a^g>L5 z*M{Br9xrxm_7z`Ruj@Fg^HCV`@;2Ny^-l^Rwwpjz(g*_{YF?^2?n*z}QTXa(rbp(r z+)Z7DTH2rb6Sz~;%)@QrCW@Pf9%Vr07vxX>$nMtE*rWJ)E_~xW)FE!YiPKuFd?X8T zki}C(rlq>%20vZPDo0j&H*P3ZU36To=3^-L1>XH^F8%a2fpL3`D~Mx6)?I{=jC08r z>P`<^L+-nhOYZBIXmw=YEAeYZ3qep5(9w?Q!F?!a6sGYiLEukhkw>DRAYRwtwDy4c zk@H@YYLn!DYh9s>ikp`-i$=^P^ml>jg@u>-v>->{0!6*fd=b+4hk@ zq)QX8uIcR0Hh&tb+%PhiCyq|J1oc}s@oq!LLJSvKwRXLZfA2J#kC!}1u}xVmFdBYF zdw3diJ(e24eL|Jnv8g@pcI=Y2$K{t!mw55(3w@#~;)`x1B_(k;PKF9Ml!vZ=52(MBy8Cg-b1s_LI|5ZH3 z?g$l2y>~YO(~@D0N)o0X5$}Ktfg#x|*CzaRgEsf>$O{05hJm+wppDK49uJT4I?wIC z{YaVAkYr24e80dZY1WT^@qjW(m23KN^1G4Oe|tBxe8mnC&iMPb^s|3g-^N@=SHeEG zykV-8Y;D0gHg`t9CnW7!m^{C^EE$;yf5#}9nBbG!wA!cWZ-~&hQsh3!c1Eps2>@XIdVHNBGCXlc!L5@f?*}5_7A>zdROvs}7W_~T#&AO#8Z*HD7J0vZ{yNr=!!9pV z%j91ZjGDtr#}pYF<(xcl1Ed^1jSR<~93^w7o)sTFyQzKG>+ z9(4?180)I88p~DGDbjZ+b7>H}4DM#$1H=1Yuuh=+;L(Y_yt6EaZ#u+3*mk?=nFE;n zF6`fpd{&(`6p{!n+Hfw>H)ot>O!<1_@?wOj+EWvEMQp$-bklk~frH%)A>D1Xzu9Ey@PM3ozS|22Cq9!9xRn}n{C^(=kRSzH8Fo_uk&z^rY%~= z$hi%5P62k)2DZQoqMksy_=_7Yo36XB)rb{vS+wqwl0|#|mIj~83 zY*Yr#R!QZ-3-^_=48DE>NVm@J9T-r``9yX*?!bagt6;G!+f}%BbIR1k$lV@>YM4^Z zKa6VP)OVuJAAu@;#!5AGC23cWdxtdUZCk-Tt%+aC*--5FA7)&qBbTvtvfETq zixJ{!{0!k_@O-5mprER8VExZJRGF;#Vdf-n@YjAF*Dfjj+IACYbf0pBBj~mFt@*qe zjUpc7G5#Z+YR{L#=VI&oirp@aa{ByU2hW)%dvdypn;6c7+_z1CTExML$Bl*VxPCVI zR*{nw3uPqkq4>uALkj(9?I8ju@C$<$0Cr&K{^5cnVMVuPeJR)j z7u+CdkJQh+n^~M~|9V=Uj5#j~VP>U2X33)djD_x-kPqqdlLIB;2Q>+vFHZ#GfZ}<* zlDEoq05hhvaM375t50`(y4oofLi)+?Frl*Zmk<}+1BpQ=%R-{>UAZZIrT>_P?;Ejv zK@{?p3S=(lFG>iQ1!@|qN zY?Jf+7rT->09lX^qpj9JfnE={A3(YB;Uh&kH^=`i$O~6x+VLI?|m$jI4~#F}O>7I0DCv;;|wO@VPlFDAJ> z8Ud{}6B`qsBg}>R`kfOnihwV)YuCOC`C2cH>Jl*V>URt7H3G{TG%`B@gXck$zBr1z z)0MzkK6GdmqfAFmepAb+@;AQrn!ua6b>!PR7#ab#esk8!^)UG5xw;#{R{`BIDb&C# z&DkCj2b;wIydsoAw@wh9b)(U3zaH-$T>TZY2e@u!{LocRT|;fT8TK)utq-;FM)f+`k8SyPSELAq281c zmH3Ia7RPsgyCjZBd}LhMwM}8i+X72ycJ8_0hik*91Mlau40y1VJF-_65{A|=XXGEn zR(yNIW0P{17xUT?FCUj!uo0ldYmMAW?^c)qRNivc7vr{*1i0D#! z(gtf|o zuC|D$&Xfh05#>r@Z*jk-I#nHh{xm#C)zn?2Ssjn$raqrK2E7@NuAJ*a&xQ1j{Zzy_ zg~OWG=FHgy!fyK#`JXzjFZ6c+X)nZt5U{RuIt~m!Swo_SLNKoy*=r3X6U??Fi5X=a z({h@?P&@2?Ixjy2vc0e1m7|)&U!A4ZwaemOJ;c{8?C*)gY6rhuJr-E|q$Bz1zGb7| z7gRDCZ^Km(2{oK*oC(tTdG&-n`gM>b7k&5s*`j&itncB-SndpEJBJmwdBfI2t#QVQ z`_AId5u0w-eTJg?aOJ&R>WosLLN@%PtB#ul(NRXG={1)h&tLT)Z1LBWuDkSN&Jm5O zVB%K7gKVd(aalQW?Z62^rLKy;$Ew~f`*f( zfy8hCtutBZeKJBZE*p4#|8Vl0k4Kimy9sEJ#b)AAYU!E4YWLU>5t$9yol%I%ZfXE9 zeDdpD%daGo7JgGJC)d43Mv_47N+MhP`ewe@qsfVBH6nnPZvgE=Pj ziic|xXYWf?nMp`Dy*?lAVEDsW#RFJ!Jbd>W`k2YNI{=#kn3?CT;7YUSa(? z;w#aHHO{mxO zSzionZtTgSBhx9zy;f5T8k;Kh*e^fO)IvYttH(Un(^KuwBrO= z_5Ly6w72O0@cnkGhU>ZO<+XPCL9_G0O5~bC;K1!wx+qte`#S5)Brgq;h@|EIM9r3n z4TY5)YCfmpB_j~rHs~yuFB$)8#QT|~%L|o@xZnTXxkIen70r0ao7daSEGCB@IgdIY z_hXc51pm-{cB*+$f`9Y!SZ<9G^eYH3R6D|z({w%7bZwY#0}OmWfoNcKBZYMTt#!`heGfFi2Fbp+;fy3lZ0Ho6!`jX#SHF7h2lQTXLAaZN^!GPQ$T9NL zUG)kX4H{BYHeO^qR8%2UE?PKmEjG5HGI|H9LBB zu$SRnz!}3}s{nOx83Kt%>)BTH(bKK$M?68KLCQmowBJg3jomT@0)?NZVOTAUM9dA0 zdG5gTFJSY){ESVRfROMbll|Lu>?}Z&x~cs@f{%+K-tkI|=Q6bTjzKU*Ai|tB4$Qnd z1K(WqmpgIBa5X#~IuGaElk{#L*<#g_6hM~CD3!E2nZlipW9mKv#T5@E-Wwdd(p%W> zW^Ad`e`S-nmvro}vUUPw%}BGFQ)%;t3a;A>u5I_>=OY{SsnQ(r`@#DrP-wVnIbv2` zBOxVPKa}cz@Pj$E67(UbbHgy*Ljd2d^>(PriI;)qcy^nql zSn8W-)jo88Hz6Rd99I&4j3K1-?JE0xpzw5c&zYgJxhEuE<41F@+DK;Rz)`q{Rogvn zu2{%$%iG*Z_B#M|KWB~kDK$cH*SL?_`hHj%;Ys4Nh4^I*hVKAD>j8bO0>_M|{j|{w z$h{(Z&0Y#Vr~0Z4O;CGQS|)#e_src_@DCaEW=(fXy$)E?A$=Io-t8Mj}k7ZWDU?J3ruq#anlu+*^JKKg6?SR%Fw=ekSj4ACX%5PF?uW>PAf zaU2==86sN)d&wu0x;NeLe#We~cs5W!#_Q=*Rp0p+8!96K>SiGtvH!Rkv4%xJXrM6` z*!fJU9+y6d5;;fMAbJ{azOnw9KN#2rD3;hh-7IRclF2)@!>MtLuw=eGU;P5!(8>#( zDnxdC0<~eH5J6N0X`_`@>f1V%_F6IjIb`qhZpvyiE|ETX$_{?pL!U&vFOd)^}79|6>c%iwr$?I z6K&}~Cjqb7()_|%nZ1dh08?w{<7`J?EkI_CM_7FAM}K;ZI@4@GfV1*}Ra;;EpU9hT z0e8SbwGVk|q|YP$?uX2JWx6MSa2VD~>tPGo3HdKqrmW|NG-k?)09qcespk2P5#W2i zeD@{b7vDm&2WQibe&nE@^V!Y4oYI|suQEQWt16aWIA;wxa)Z%rB72Vb=im3mP?PKK zW3{KUvKm?gy5(xRca-PnRwt2GEP+Z+qnacbE3xW{;o@Biv{|99Cq{ zoMIiL%ryj(PVY&reF0yBB_Mb4ihUwyd^(V@g~Glz$k&bacMB!}UcPK80SWeY4X!TC zfQ(KS;9cbdrF~Rp4(4qcrK-Q-D33H#MW{7Qh%c0^dKyd%o(aUDOp`Ms`3%$_Hbp{;`@5V zn4KQR*Av;evp^tZ$Dvw%nP-W2Vb=P71GZszv|ae`NyLS$QOivN$Dv}~W~i!zj-}F8 zsJEGKw91Sk@2*FmV2ZxvB(W$x>cg3B58CB^(r=&ZFNR!K`HRg=lnBM_%zv8=Q&wxQ>{YsF8J+ReoX50<(+0#@?sN^rf51APR1S zSM=q3^Ra>E9tTMQ%1MHw7n;@DGcy`Hz%^YOUfR|aoEhOmp0WEUcvZ#y!^nnGV78kMpu1ncz(57>*) zc3SgA$tH?M{}O9hmzWcyG;BID&Zf(kxFcy8UtW_L&Rvy1?!>uKBtyyNgBfQX52F1M zJHzH*ZjPdtCNz0ClPKgNf>VDWHSNJt0cKi*STki9RGXTi>%K0D4~Ydhr+c13yep7B zsQ*q_{CJc5x?BV0uAw*l05#y>QrviM;ol1NzKHZ6(AWuc|9%go-cF20Nn;%%c<}oAC^i}9rcXv> zJQfE1*F4ChR+oYsw3)MX!h7fZ>8&k02cBbZHc2PF8E(q?lj|;AvSE{MyVa@JI}`!r zbgCaHsTw#7k)L8g*30P;zXk>e8#|eiGS{Dbd6>Jiu@i^aZ-a6jb^vr^yzJ#Q9wl*N z(D#Yn=>EGJsQ74+{<^S7fzDnFJSxYxflWDfpWZ2I>leuc5e)Z5>Ir~zv*Db?_qD_Bi2$lvjLQ0n|Ab;;Y9jC zB+SxM>F^zyz{1nKU)79DzVx}NOfCbpcd2Z~zjP*ILtdHhe4)nSWhmVH8 z`Xql@n)dOQNAQ3G#I5W)k77ekR+hcnIdLAh* z6d?ZT`6C8ol@9&=BkxHc(IDu#3G)G1h0FW`mzCF_$NzHWn_)%6@bwh9yQ063@|+*O zYW}k?6rzGSWun*KW;1+dJ|17DhJf)SN?CLPZXps+`iGJ6VG@3cM4oTa)Fn2`=Mbx9 z8FUMeqH+PyGy>8mDgXA7Q(MZ?8>QeC*F&{mWagi-70#ENmKS|j#D`CM&$oJ|E#s}1 zT12E(SlMO*W=E+4Uos7exzqjvVMFrpgG)}*(>+mj^n~rDFxSlXi0+c)1VTB!FJtC_ z5r4i_9L<>T`aMZ``R|m>7(1@;`{2F_$x68CN5+w!O;`kpU>k3}I1%uWo&0xIxl^?D zJamJQcZ|v3@RpHpccuK*=s5&jsonF1=70P4^88oNA|%CmHh}upI?i%M#5-%B2QV+L z?2On5W$nAZ_#gXTiRo)c_DWr|-~r$yFweV}|CiUjMhB#TZ1-q#9fR%C)#9^u~O?s=m=W}I{&f|Zir+7QY|m&R~zg9$60 zFQH)0$upnnB)$SP!c1Q1 z8>xYH?gyXRCd+9kljNWCTM@MV6@xy|@QRLg<|(QM{=vJ=9ph;9y;An87;~V+wMrM> zIu|~8*Tl>#&|cr6LxDCrLXQ}E6Z-4blJPbg9Nb8elFQgHzWPs6G4RMHJp&bc z%nx~l(+`>u?gXE%(z{4UIFdA|NBDHP6ISy&imNc6 zq2+Vc=IwLxkFXj@bn!B9ei@A3TOoutveXcFPnHW{t!eCvdH&Q$!|52rQjxp0UgSj` zObR)UCuMxI)e+@LZb2{R-q@eKo+>z z0 zShyeA#SCd4t!CtqdVxtc{EoL^qIlH~V|>>at%;8`NdM)nUqI}X9IH@EPPGO0S=^s3 z9x+FE;Ucy>>C&+!{==Irlpq6Nl~W(XV-eyh1LqR3H50tT@Mkw{61IQxY0tMax*>Df znNkFraArQ07vY3e#xO#Kn{=bMBCJnceJRFSTR)N4wdk=C8%pYx5F25&jcX}AwhIcN z+>7$NXYK52-R&a!eK14-Aq@Uy9y4*$?YdV0)5Lhd;cDo;)lilZHgNkyvBblH22^5D zr`~}HqLtZRND2*P>ui-!6MoYDk zp>lO8CIWX1*ssuTfnf{>p+co1h=47o#hR1ClBgn?wYF0qvdLWYhu(^yb*ATZS9+0k z=JwybMpKfF2GMQA70V|>zjc4K7JmPC#>GDQ-&mGX>IL1K*!x);L$7}_iALn9dxTZYzSs&* z=BbXph_tnc%pBp-mt&ANr-Sqp)|g&L+z1ZHW=1_-SPPI3x9WJ`?A`cb zLZhmmJx|bf;6}bq3B%~4OAzscw-yDjOYSpq@$4nJ{^Ww0JKQ>aYo8`F_eRMwQvmaA zJc^Bv6=c>FrzUHExswU-V5;`#rF1s%-mNRJs?w-|)|P8{Z}{I%sw`PkF-})*+xWSt zpW@9M<}LUqT<)#L&_5H5vVOVx-OJs##LqE1I5t$vgS#W-f#dA_(|2-M40@CLUZ0r~ z`IKW(!gurfs)aJTf{j3a%$*`~AI&WormfiK7DL=UZ;gjNo!i!LK44<1<^8Tf_(95r z;L;y@1@`KnF8%-50v5uYWm6FUiRKA~H)N5o>d+YXC-+yCbpqeGS!`4;<9=cGgkYPZ z{_~S1t*HmIbELCIvn@sHbHu)tOQvfA=nsvlXHG072m&Z&GQmBbxrGBFL!%R_N(=pM z62y6{_G{z!OJx3h#gQt+TQu^VO6$cy-4=OWc0)wW$pSgf0xvh zP{t4zCI6+mzU;$wT?xH+*T~|h1y$lVxQ4;FN%UU#r#h=_k|pp=VeQ^M)+f5c1CbDH zW{Mq;^vm)+Bb{F|xALkk*}RGnjP%Mc~6oqJC&_P{2w z#p;G@4Uw}OA9|5vX%mFBptIY?u(PqzzK+=d*$mlW0yxq=mGkGm??(S%T+|3$71>v` zCh^fu4||R|>+hV6i)LgP)B7H>tERHb_uU8S&aO$`I~CoS`Dc_hZSN2Ndk$zR(kMpc zl*iANBe%CPOH5gB>Z=Sj*RMA4{)bB8-VokACFfOMtPiz~i7}eZgvx(FlHGsuUjn>9Aq;m^+GE^)4odZw$K0qC(6w6%k<)= zvnl&~-@)w_vzhr_%gk>(EWrm<>v#J*Bcz9XwmJEsA%ClZsEbf$8R!X z&#r}i)9>!bwM;pP^*yAupaL-dDw_f>Yfw`+OL6Dl*C{~&wLAWdYyp1T_owrZ)pa_A zxp?G`nCnPi)6xOXef|@NY))d&B;A$PzU&Wv53ZN%@`%II=n}$KXH8pFF3)<-mW?WK>;cJb!CY9w!@`GEld9W>cC1xzpG%Ea;^mcPmV{3HAJ*8xK&Cyy#&gv5-1aJW$0FgO)#DR!DYUj{t&g+H3}AQgC) z{T+nu7~7?PjadCt_N3^TE0$xaajVGUE$+9 z`VRTkxDn{rF%kc8T+42DdDAu1_&jkbo2X%dHRLZS_P-1n=q{Y`>c5_+b2-g;@O*&I z#_Npm8?QAp-hl{htFaD?|3M0EgwJnDs%{E(&45c?+P?vVKi^Z&5dzPr$ULwVM~+HV!FOA>lCJD{ZeyL6?* zKTI5Z6ew__kP-gDWLz1rX#`$ve4-@i)-?6LSjKqZ8ip-qqK1jADckX95#6cCfu3f&Gal9T8BXy17h*2+CE>;4y(vZ6 zHhtzjW|q|q`6MOpj>B^&NZO<1-`iUqXa55qIX%7DPG4FwPMOxcOGOiS(B!nv1g!A{ z^v4@SEjN~AgmUaJ{M8||h&sH1*MV#m8WxYo{guq{H2cKkKb}g>q8-Or@^3u@A|$V# z)`r}HoM|yPVNG-v1)ClXt7S!Tl!0*4128IJr&*lH^Z~ker83z2N_XnE!Ex zNA?$(rylboF@%B|>@Bm^lDomAlrL%*B5BS7@^zWf|rwzN4` zoL1U>zVdymFQX4^Z-umu?*A@8*hKyLIjCWN+OP~SrI@dl#cF2Eia|S9J_jw>nvTgG z$8Xgn?;rN&<>HQN!5At}wMdG^4iRTHjDKAtg|J!vW~xOQYC;B29Ih?F*&>oT*U+*U zd(oM{NwgWn^v$|y#L%O;r|{M75^?zWSm`FF2%dmU@75=W_HXiBSZr%pw!3&r-#Zc> z_~-5LPPle}5{pX|6u1``ckWK8{gbR)&m+~i*vE#4)ecZYA>6~ZPt`TN>4JQGvzKPa zifrFj8;aUz_I-Z;F}Sk&+DCynm9J#oU9a%f^wEjk`klaSs^V(;G4)k+J!ZEtu%v3o zd(d&laToZCbIJ9w|UTo;t~lr>AqM5cOzprNGiEIRPlAk#@g?C zaIwr=%iQx1{)Dx4e5>d8;v1UDpSj9yyv5GJD$*G60(ntufIXY(cj5WAsE!fWf7|ky z?Mb0jjOME5rRQs!EcDNBWiEQV-HUBZn5j%%yL7wj^+Dj14b|%sJ$BE5&Fi{_Rh+{K ziiiPo+4U>8(_HEkydVCK{q_7-6#tXo%wSScglc&}HbB(uM5g(c(Z@E|6;>thPr}k! zA(Qvs?s?}b%T7L}kewVuH@j03TZ~4>0t|3ircPTj@^RC0f$aXS)XU8mddzpLE=9|-3?DHZl<*Q8XfUDz@=wDtzS#rrDi zq<=wmmZk5Aj>O8d&n~>Q>ol2&g5~2qFvG(VT*})q-_n2Z#p;-o09pzq`W#$Pvm#&b z%4tf=`cZ_E_hTjEgx|;C5hpE7IAhEy{-KL^C`^XtWP{l7fC(wv^g6)OCdsY+4=briZ;RW&+-5$;XnUo)#*7f2#k4b9=NY&y-TxBoqxOd zH#^?vChTRZm#YgPImg+QJR?{GT9j2I3+Tg=oE3-~nl+dW{iBMI?Z7n&<==E`!5*>> zqYbCBtFzJ=uG9_R-#u!#iyY-jbEhql{Z@V9)|3MXX@MX~KM+3iD$P#B?vi$Pc@c|& z7+3o{avA8;om$zZ>-rfG9v;!3{-;TP3t?pRiN0=BJ5ya;9VLI$@X#S-4UbF5%EmOD zo*%Kt9TEXMVaRF_1x2-osF}1KU0!`bm9vg^7eK zq1IEY#F8+JuEG3ee_2;>!gO28TZ)|$>-aH?(2REjn+(-Ar`S>h93lh%-Sm4r_J^ig z!>EJ)siMkDQ(v649{{97y-$}uC4UF9tnpQkfgkHM6m4T+!<^_2uRfIsoB*&sDBgNX z>eFy5E}>BCm?8M9@zx!bF!Vs<0K6EQsU9U$F41udDMp9@_>S@! ztMbvnfS4)V9lovql^B?7H?kV!DVKqf<=KVwE-*S_Lm1hu&i2Rr?_W(sTps9HeI;cJ zDjqlT37uGXmsP<&rt?oPxIGFt~1i#>eq3qeL2f?@FqwV zBsnQuElRUgG<1)_JTp|@M%}wv{;>C%i#;leAISPyxKro-^rSD6O=oO>>vZ0a_s89g zj*E`_naSUpd%6^1Vz9k?4xLVG#6w`GB!*z>50m}K#6SKH9rDoHZ?pB;GlV6h$=-C? zR2E{}PE{*8&X?rCE}%qs5}BMQrpgoHr6 z0b-5ku?xzNqV%ECpVC>{D zAM*Gb0EZ6li+>zDd8UoLkClQ7(;r@f{VONuqpH)#APt;csIEEZk2E=t(3Hl>_-{_< zn!37bfqIB|eEBM9Qr%DTDD$Qwwv-svwA-DsqIawhDTdUrvm?wVD2anZc=6Ck>A)vJ z&N;mk!91s#L9RhWLw5e>5~y&Qe-^In4_<~t_D;QFms+K6OI6mbCKRo$mR$S}J>)qf zkZIIdkb~X3z3WX?hops zLuOJ8F}sPiX5TdQ;D|KCL!7*Cl(#I5O{V;M?yD@-;$DBys$quP){E4`iwhjKVf_6v zaugxTksM1aZS!DrkwD z$lA*HSe_c_`26P&tA0x7Aqr24Opzt*uYlv`^N5hd2=W}7wN`MptS zLE3V*nbNPg91ikDR2NN~Pje~}SF3;1a-5rnVu~5ozOp0MYMKf_hn*~*E^8@G+4ZGf zcywt`8kap)?n7vh73Y@^t#zk6#nXq~oMQVm-$PS>SzGbURNoOkEluqig97bUzl!?PKSS}pGSJD5-DqR_|QMb20 zYS2GBXojrY3SrTNe|>;nZaMZ}vG{JcvkninaM3?dZB5uD-J6VlDc7vPK3a1`>GFKr zuh0nZv}i}+Z@WjTmOVZFa1wKv?IkSTk3>-(v4b`qPt3K$zr`O84!>O4=N-yzB64=~ zCd0Q6v1cXf{Q;5&gqDD}Lmb&**P~YPRanqXCH^Mz+XIbp=B~TJ(h_YrA34RBKe&1z zH8F3I?&{Zik(yq+OaSJk96=A2PlCd&&D2oiXSqJb(>`z6 z{}k+IUpPfBN@5x-QI%K!Arf|6w@HSI>3)JxP8 zVV2L2RwE%^$>t<6Y8rgHVg&k;&ZsLR|KOlTwJC0Ux?I7>rw@!bH3-N z$EF690-oewi^wzsc~a%S%UzMOF^i4Uwq(q3MFI~<*G{b7<3!dHjGyD3I zkJE$z)%terL-DohaD^ot3;?mVS_JCgq_m;ada||{r>6`D3XTdde$^{b0&jVH0+MH| z_gH@^v(icGtQOtLY;W#(-vM)9(E#r~Dbc?rjS#warZw_X+*{o$obtQ$mzBrrWG-g{ z>*$Jpe1znWanG*KypE9{wR*8#=PmtXLtK~TMm@xf-qgA&LJXa7=0z35qRhJNc%lkN zmiAdPaOXyECB5R#lDVTRrkK#rV{Oa~~iujQuozW}4 z#d=5JM*zd{s}@L7og%Ga@pet`S>9=jD|PamqlI<*(eKW67n5V!)9k%zVa4DUJ!hGfefYzr zfbWj{e6jthHc3q@oe4_?mMSpAm32NKIwiQQXzB7Tkjr1KUtT*C?Kv`Rau&@ix6>Z; z2D}<{G+R5pJ4kuRI+YLzk8Zh!8cKozE0%8ql^s6?J0VvSOv^Pwaz4y7{|5%7EbGT9;*&*r=@Jg{k!^ zS&TfNqVO^Q9{@4MYA0g&>%>vq#c0Vs997^NrWZtm`Olp@%P*nUV$_Fwtr-szUwOM` zFIq|8mFsfz@)%4*0D7aMJRc>`B4hJKs8>n|hQZ>(rR=hkkXZEBNAO)5Ync^~(+n_~ z?>L&dr>dX4uco#b6=y>EPXOY+_Ev;84y}DPszR%eh9DlPNO;#DuJYU6Qm{y?Q>IMv z4lFQlew_PHaQm5rXV=0)+eD`@hGrl=GIh^;fBH{*n;7cnRgtmkdZm`Z@Y6sS za$~zZm7Ly4SH~Wt^|&?(BMbWm-R^lDhT1x*@Yl9?bQ3}eXPM19LLH8_=(is;!K-$Z zxAt8bmh6`;$d{^^Qy!(dHWCgpX0_83+ZJ5&j3jO2a|K#;UV^KLmd~nIb;_(%f>Xu{|ZU zy|!O6Km_^oQ#N`__zFAcWNepJ=kRE~HB+~p++T!+?_ysgsT(lt#ed*bu=hxT;ns@@ zs`I2gTO12c@u@f=q;ElK`!U08drEe|UOOguGb(nhkR^=}`p$@Cr}*%rM{eGVc(OVH z|5%wQfTD%Wp~rcK_PLF{K`V-aN7p@>P_;>L;=TF9=xLL+p=!YNyYgDhlU44)tTAa# zM0!0e7e38d8hN{9T`+Aa7xB=nN_5nKpKwFo0i;r+N{p|&>CWkIW=_dpX)M>Ac*H(_ zj}nfS5ls^2H;zrZ4A{1a!L+Ui(vr*9f+2fjkD<+Xd4oe8j&N`IlBUWIB3lhJPW8#7 zV_T=0->x~Z1I}UACG5&q;ucuy%TuhiQs>KjpG}5Di7xZ)vX$k&=UEh|RUx)LvZ?623R|{U-E6|fr zxJ-XCVh8mJbn*m5n@@`fW(s(ke{Vd8@CT<~LVJ1or`CTyvIp^{RwX88E z9Vpt&-o%ZGgw%x0A<70qDluu4P)t7#8aA&5{e8c(#Cw{NS4(}j7&ysFUkOaWU~}a& zYlpbzw2m5@tur$1wAW(`i~@85nXm0c;j$?ps^=rB=?_1H(5puqWQ?9z!#9CZ^Ys)yYHuR9FLaYP7T)QXE;yb z=l<4^{HV>kn~kE_Z0IPI#ACh)!3K5Uqf@RTruVczqbjpLVGidAxWcS;X(=k-w*VI= zm&p$i20JNjVn%?0;dP-#MHW`l`C!Zu6CHbok0Utbzq_Wn+Vb>)r=fDlyn^cEEjhV= zgDf8rB3h&+;D#~d%4o<;PEYp0p&`FO{OYIED~;|R+vl~QF?PUK+?@d^{}7jj^B=V2 z!+v?A<3b%jDKX5;CdRSDb~Va~{nh6#I%5pyJ)BP*`50U7mr`1<#v&ZQMh2+2M>8+Q zxm1f({)mM%J1Zuuk=tSGVW4$;-4daEAETgfet*E5;~{Zsz}H<()$W{zZ%>zp9w|}8 z10Pus->0Nh-(S^*34OU{boX8G733HgD%TYz%G4TSUx!`?e?}dsYUuv=)Wi1pj2CVZ za+3b=4t;Mg#H~)8r(D258MOQgsWWD4rE1J~>oA`0Uw+#yq=c{E-cG>%t-xY(6vd_n zGCm)fMf_vU;CEY?jhw7^)nsSa;fdX~Y+L_U2rJy}6{yS5L&Os;H|eU9jI4VFq@Ou) z+Vy_&g{nY;OOE0t_N=hs{#Jd*cX967AtCkqEgQG6VP8_42fQQbF~sxgCDX@j6y^~b z_3>PDGrOknv_sbjg*}%p+z@L=I3JnkNZ+$6NV`#&^6{vbt-c@A;&Ffz3ya9wE^EW| zPN0D!tLLRIeRF0@cf59cG1qUsR%3Jg#sXUPZV;VZmfvjpVxQ1EBK-xxcZbS&N_QVkeirGYV%>S3q|!cXNNn z%^5;uFN+e0ZNg*O;>>x%=B%$Fw=5Vs85ie2>s}TGR0Z4Sgx65T2dmr^#kts^ zMCRr(erL9J1R}kOK5(Ylq@Iwlh~0?JRS2C-AXQCt(znE^WWu^D-3R3qafWGBzOia2 z-nl<&2Y=Dx+L<*Ne*lqJK3d!(-bY!OXYN`b$9^=IOJWEWH;%ri|Bsf|i!u#!BK;L&5 z(n;-_Yonjq>&PQ)7@vX!#(}M6Rr}_|VJ22$00>aFXFXzwR?qna|A1-4N*!4?P^~7#UyW_dt`QNbeODoj@Y7hbU z5%hr}Wv!-e)gbZVCXkEt%l~)EK^4w4+;=%&3}12L-M!o||1c5TVxYY0Fr);zVfg2c z%i@1%r|RKdL2UO+>DC7z{mlz^nNX0eh05(h8lPh1fZfT|Jj=~TpBq1%#csT$^m|?p zdMa$4Q|jIXVx0yo=Zc~G#v4QfKs)cTqL&>UJHe1Xe)yy|jmp&-3WgJz0+O7=T+?;Y z_HI7z(gXT~)b7{(<4{v*w6H!ZR(RL(PhFSLQia{14FqnyGGl!S^(SIwJ&JYMAfxi| z+s%K9YjAq1J&M507_(5$WW5u=btO;>^pC6hGTTZzDN#;dsw{7rvx#=$KU!2-3h?fd zq}^xNQnRfVhRcJ3%fKxyenhmVE{{B1c2fk39uCU%n;xI(jfk@pYf`Qp^2$8V4^;;6 z7Jh0Q+q^S(JHfpA)Rxkt21d{Ct(buMp@)xAmqF}7iF4T>MJdjo{DaMGf47BK9CENb z>`Oz|pp%NR%!DYkgTBgA5h;Q{Ad2OcLa*1Yz!J%C^gHyP#!~OmT>eAN(6+~$ECf%U z#A{A~mn*=jy?4bGv7_yGIh4gXP51@CT{XEMWq(1Qf)*p|yFB#wlLiqBtoR!EW(_t6 zcpM}k{OB54C&e$`*~2gF`qO8$xU8Lc{cn|Wz2T2~fBx;bKbStnQnlNnC>>0_$xm6=??3$^eSX+TLFW5t-%}~x5QiQc}^I7=`En3ozPzMkLzx((5fiI z+I=JeNqWvVn}vvRti&GBALBmWkhY=Sz-c!-XtxO&m&IV?g&E3Q#zRwyffJqRaACdza|LacZicC;}xFX^tDdy%z`Fk(L zkBzb5q)o(#ML7Dzz(B*QCFGxQRw?3%*3Ju*9?WpD`R1vI&X|BLCA)+-QuU%1ut{C-Wq`nSwgbX%XMBm1SV3*D3({&zVs zVy8=*|O1vhVd56yk3qjpn4S*rN)AY=f zzb2VME)C~T?oW1_P%6Tif#huM%src$Z+~o_nfF3F$}iBA<%4WY7CiUf*2uDlZPyc{ zRzH2<3$om#`fkY3ZBzOW->3ih5nDoDGK$%y?;~7-HWHwcN4u!#K?Kct(}(tC-)JH8 z-@Yfj0xJ3q_P%(7R3i2b2mxcioAC0_R47FXuQ<%4VZs8ciQW(n1MSh@<@rH5G&fP}A9Jeg`>Wo)Dv+c762)U_TJu^SCGtjmlX2_%#lg`^&TJ6t`Vg%3=z7G6IUrBVa1mx7IFKPB*%DlI?4P_0gm_ute*I> z3C;NQ+XT3r$zMtisi_{=Vwwrtm5W&lf!aV@gz)W&T&=dQnFO;5<%{5VkExw4b?dCd zWbq2{5Kz{5r^%dN%|FRxvbE*O&_Jtj@~uSNo?*tSR?+=cmhTWe+uk(C(iMLv{eWKh zn)2eVq$}aP)NF%*U&XA?q0zW@|Bm4JBTM@1Ed|@Z{#R|sikJZSjnUZWJb$iQy}J7A zqJ|s8=s6yO{x21esqA1a)KFfLi^a~LGm$6dVvbu`6+8G}#_5R<+P<_q;!*!xs!!$Vg zU7LhEE4cWXH?X*TB!_&JLpR_YA2F|~2$-^h5!3+7em|;ba(&`#@wl-aUem@-#Q_nULbUNJ zl5e^O{b!vobXW0^<3d@}**TK9RN=l6NXg}7K;b`0V||X0yGX2Sp^=#KF>+Yki4#@=+vcgV3ck?~3GD4n)pi}ZXk@PNPYz0>YeiOan*oh!u$k3Px z<)fR!wbyqs)B!(t_njK`{m#oL+kq|N+i$zdfYW5 zzLQy81*p9Q`6S$Yzw_}=SPh(&RyhA&vCiKI_X2jS8sofX-dn+}K(rJ5{8n3DqS&$l zdiIq7|BXQOYCGVuBHe)rGNp@y-})HJ4qvrv zN7i+l^6JSp*pR}cmFh|!PyW<=IlwSJjNK{BN<24s#yPa*j?-0_(5FG0auq(Ej+lNkhWz7G= z99#|jJo6=bw4CYBiE#0U21`bXEZ|9Ncp&1IzwWuakn)@ViBYEl;0!aAhWZpUVcf4G5uPxWQ2`{Rml1xN zLn*kpT!Z}2iS!$?jr!Tc)0ZtcTck1FOnae2R;?a6Cq6qr82-31A94!b80F$LYQ;@Y z$chF&qW+3zQwnGCR}wkDw# zta3QG`NgH)OAHYAQU@c5RiU2Uo+nSF6L#sOkjp&4D0zt8T+)|Hdrt!dmY-{ay~3wFDrF^?cJUFy<_R1)=}E zb>g2Vx{hRF)@GdVA1yGPHwxM+8M^KqKY^bx&gG`tVay1H=MEnGXW4PTt}YkVm_8hv z|8Psnmf@|q?-oF2D`wDJtr1|zUbGgHJ@cHur+UZ83$*AsagVGSig#4q3}jmAblMtG zF};CveoRR(maaPx>3Jf(m;eb__4_Jn`nSxS!>xLs(}5sul8a0MmZX*H`y&263`T)Bga9u%`1=8y&)Kt*K_Xm;zjv8`n!4RrW=NobJ#=H)&vF3 zKN8L5n$vr1Zah?NS_Uu&FTA zWE+o;Chsnub_vRp-dXS^u6*Dngs}VU(4;t&8mPb0l^HQw90__MZJaV`+2OG(NkRw#T#c+$Qjw1c9?@(RG~_Jp$wo z)aA;*UK-n4@xbu!(T8_zcdXTp8-`>O714)RPhaGDq2gpe*qfZPJy0|2MI6aJ_yDO2 zA7A?!r!a|U{qtl(_KHk_bJR7zcA)OCCkP&(Wpv~y_pOIP_u|1KIqtZ%V+;F#17&OO`Bh( zID`P$t7Y+n3(Z7=GskZoUND|Bn=C-cIQ2?eP9S`{FwIurtuo&@DxUZ5Pd&1$k@>K$ z(_^y}bz9sKU@FdQtc3CbKm=qb?#ez^3aK2ajv$YS72Y zybT#0_26HjA6={ckvhwe_l4QW*Qc<|j{lXx-ETPFG)q+Lf z+b3SZtRd%rd#!iYB+JDXhcSLU{e&2^dy{j{S~EnfSj}M_=fyC3nGbUD)_rHHI+l6J zfDgWq86jgeWWafDE3N3sIWZ3N)%d!(lo02-0FQoQ;{0%P z+Ts1ufpN2S+r4(FsnSDKIfyc?rKx=v|M2EU=#@Lg?g2Tm1}l_{0PiEvJC(bxda*6N zzs!S-JYM0JjlORcWO)6om;(zZr=CwTgVxs$SXzB;V-Yig(K111_6N78vVKQr)T)n@ z6i&RU0d7<16^7uQ>6&fSO4W?Ghj;W6L#G-}VJ>F%XXR(=xL5G6(4%k@{&^w2J9+BH z6U*>OCTx}Im596EeBmGWKpfM-Tsmt4 z=Kdnjg5Fp^-Hdq0uBX>&;tKbgL2UMR$`&a9V{QCuAo9cMzu}r>kh;)OiL^o4u540J z^7A7HPQQhUm-O@eJ~?_xc7hUqzcj(~m45Y>4%tr-IW#1=7SF&+{pg*E8}8ZK=$MiX zq&b;74Rl6v>0>)i2eG5#v?EeCf~1CDh3~9(=AkzVJ8NKA6fcfyN1u^BleniCUw18* z#Twni~9VpYik$hpo_ftHf zt&{0K{QZgEUC6oJu`+)8CD!qYYVGxda)%JJ61a zakVE5KGVM@Sj>bnBbz$lj+BlawhlYY$u1u)AHUMv_{-XLEJX$Z2_6KB}0T$kl2YB z>vgx=y23`FdA>~p<*7d#PQAlJ|NVUtN3q9BGbzct|MfC#UOm_3c-TPeLOWl<0O99Cd`y+j)z!TmBhd~)y+!D%n`SgP{TbqUmqVXH>FpWC!FC_g?PrGO2O3DH|5jus z_mh)693=;>-KH{#{0gQM@(#rfzEZJBa~W~_I{s}=a>4x>-k+~vsUQ#KN|m~}KF8fj zBmQux!^MNP*(g@X{OsQTxm7Ma7L?I+rRD9!Xl^FA{3kTTe8@HKZXhXHPxnnwF@uq zv*wc}o%4ZaDk{&B0$h#tB)07Hd`r=?hga;PLf_i>oRe3+joRwx@qz&kNDqPHL#Vf{ zdKA-3-jO_=d3pL7XGj24EaLR6me`}$$pa4PCwLXro@*AM0HEKvUCTmM!Qrmm8Hui;NkW`S-F8KN z>k_LHIGRd$ymVq7&Sg8L@3SuRk+1E(XWfZ;4T2%SBFJ8|d?+z}yiVq4(;VX~yQX;l zKmnfac|mb+U6Y~e(AcN<-nB_mUd<5t&(?m3C(YIO>NYOC_ylK&^SLLyVxzFSFdAQy z;-%v|?Ns+L1emA>~;})kcTaaShlNs@)PBq%(i*g}ZpU zSV`T}&DHw=V($NObe`dCK7JPuO4=&XDq6D(Z51_&TG6V~Vf?7tqDE08M(h!)_Nr>_ z+M})3-Yc=U#0-Lj*gIyB@%TUQ?w5ISU%9^bIiGXRpH}(HwsXm|LEoS7mAn%w!SbU! zbzh5!0rrnH+54&wjh6*vb8H5i-yxkx*>dTy|A2rB1ELMh`8))t_x>cP1l__*-(+v8RBJCJZnqzOp^`6+I%PZ7z-woBwP)+XC?f^4K)XxL!~NCRt6(#$`?SMk zq$A?!fexjy|DbmA))t+{oHIB7f{1a?WMFm-7-sLw+)^?J|6L>0xR0DopMGe9IP8NS zenk9T2&2ZgEbf`F_ucv~3qm}&sB}y>UpafM0bXdB1{JgJkyzYNs7K5mp(s_*{4K8Z z4WFK0f#y~I0OY-L_svPa^55ETbHw2y0SDa)di=cDD?PKYhSCpNx`c0_>)wWz;#FPG z=8KVA;yrD_fC^U-3qkdJ;E(X7Dcr%ud)&)yq`ZFe^Yt&QZGZVp&C$kS75to=shD_B z-OfMHu9;q>QEre0H z^^Bi4JVPW9u^U=@J{}Tn`%Se;Exzt!W;b)h$!GoL07aF(Xv>4*3LaNf)6c9lSeDTl zePJ-aGBRQ|{{ito3auflgLC{k%Y;GDmGpkD%$?m8-v#Z!+SviHyIhk0v?M=LjAsyh za>?Y#iy!=vuL(Toy`O;a_sp#$6~HN_T{zmj05_PnkGLmqOUM&A-Su|avrzQkDz_*p3>|M zNpLp9{z_((Ez1ukY=i@{;KofmLByIEQGhrJSK3{`LoN2qlbedVK7~4* zmHnf2f(j_`R^TtZ^3FD6sbsoU5r9U!oi6ywzJs)%Kg*+`(YS}1zj42ny#cqn4>M19 zoTI}YCeyz9QB<0-6z==Dz0tT9mHSM#qRBZ&0*%^fQOq@Ve;1Ug51yVe4sqyIv@bCQ zRTlHCnN9IH=RMf;r&G}n03iDe@{a(Ml+i1&-FzpWty6hu*%v3F|AM#JU;*?jwrlIJ zE;7EoWg3dK;m$Sf#fNGr)WoRQUbtl<|EXX$IE0IX@=}q@iQ zJyf9RON3Zcnq55k+sbxaL5}rSkeGt84C$eYZoY{0`IzwwZl15Owe<%#dLN3MvMA9V zg{f51Ka>zg7W;OUUQ)SK3GjSO!_UV{Yrwt%<@B`!3Q`SQYlYP(a@VD%@U8y)TISZR zHICCCdIL1^_-8laj~+M992ppY_|PC`kYwMh4&38FJhL(X*19y=s*w=lyBT|%r$|VZ z$3%}a6(ELP(|yf$s`M;Qz%MVD-DTs5&Bz^hP=Pj z(S)vxYU6+Dtr;tB^-Wo%-%JA?cr8kP#T<*2`D>~^K54$3)*WYx*#Lh1R&e^KD_WZi z`)>*L(o$$gudo8=R3ZGaai=))qf$_Ybu?|}>tt{!eAP)4KhPN98NF4LO&X*B6b**K z)WS$W<9}e+Zy_qnzwb<2v66e1y5&xO7)u8pWfxafHobTYxtq!|<&(v+ z`g&P-C*Z%uFFB#xosH0V_FctZE^C)WKrjc%K{{07P|AkSYd7Q z%cW@dVfJ*ZYNJ()f<$>@%apb2ps=V<3e5WvHW~p=mZ#Q`mQuT7c#_ZhxTUb^5v^?P%p(0;k`T=Ad6_!%QRpGv{cPTf{t(OB#V zYf)EvzPnlx1r)t6N_Trl74LW=0h(B!wnX$~;?vv24k2k@>DWLOq<;tI4=z1!_ig3^ zJM#qneH)doZ(2MU;&u5A5`)|`5UaajKjW(h1ElB5TXODyxIqQ#{nm}e6mISPF4DZN zhe^_I_{4~KzWC9A;lNP}`~v`aIB6IY%Q{}9A|lb$=E}SLVeim&LVLAyCPA)Ed!%R8 zDZHiPX6)Pi6Te>02L|+ita2$&>|W)4T(v zvzz!l#F}QXD4&$t2m72d1Gzj%D&AwwP4fNobhFSgO*|JXH!@@p<39fr(uDk-i%3>w z?z2_(;pL70UpaAq%N{!c6hM$o*>0ULUtj!69}XNEk|j*-livZ-oA53GdLI(jCFogQ z&j7>65whY{P&4uP$MQu3W3W_Go&Xqhm8Zh~EpJy68YzH>nPaWpTVlY0awa^xmS26N zu52q54zpAEIQLrTK1_f%yAw*`d{U(La8L(I`9J=B=?QH-Kkv)1erB12_}4ENm_OyB zX4E{+H<`?8yr~Dhr1~rx;|JcDcsQ9b%T>v8fp|W4+MhY;AeAo{l7x9zx-x-O^6jpAN(>T%oM*D7b-PKHWop>rqusZ*t|k zWJ7##_}+c@dXMW{X%H{vV9Ta;LdlZB1}oo41%xotG6TMwwY!us$?NaPO35AMFjfAo z$-HKmmsM@%0hLD!z84iaOfWgP9?jFwIn8T9_wmZt_sXzM8vU4yETmyeih3uCtb*wI zwJ5n+kW!w(Qeo2XyoF@HyG&@|(y3_)m&&yOcfDEezbTubM%_5PB6n^Lb_kgUqrW@q zcYA(?Oufqhs$9SeztoK{c6zAvE97=S^@~Y5NHyZ8hp@NuBP2LSdo$`iDyukPbG<_4 z>963x%aR?@qYqGu@8u6;r#9bu*sxY5CJjTcaMQzQ!@#n%@KwjaijWcG$@_y~t;ZvegCDwlM?6t!W|dZNuO z8;m0Ie(G>74jsaPZ2!cce=J-Q=?3LZ40q&J5veEdUw)CfhmEQ!?!T*E6rf{pjLvRZ zVq`?rSBTtfdO9m@&oG7&T_e5s8F4TZNGpUck6q2&_O9div5e0-WQfrFD3T}hTk#;N zrf`L-jprIQ6)?tYz}NSYm!F=zdQ1r6QAFuo$k)eanDEct+Ns8j<&5+!)O_mbh`t<< zrAV@&U2w^C3??TPfB?vZ=hNkw)!d15lCOL+>mc1a0>(+ z<%TvY!W`Xuk*7Zy0NR;K3 zzHjJK_Dl)soN{K6P>Tu3x6_{m(-)qc`3E)n0W8W#ElZyqkd)?GsU)sj)rD0P6dO+j z9jmv6ddWC5n|OFdf0cHx&rfl0>48AE@EPN6rbURk#e)-PV%G8fgB9=JhCWkt0PK<= z)xkI{6Rv@ealn$H&#|}nXx(yv$2LAUd*E1^eCRXn7K_x=NRj32GTai*^TXVAf9a+) zlt4q^vR@207VX+)5NfX?^Md=Luu1S+z`d0|jjr>kZvT>Gu&qm_>FPwH z+=k&X;BCHoJrr)13GybBAIY>N%5bLW}{Gs ziZ(YF_46$b3QXf^Ym8{Ist|Pgt_Mb1ZaziYIFM6JUmfnKNJl!96hV zc6%K|KD&F6UG9&3Kb9Ka{v{hQSgdj&7%0Z3nX@O>>VPZA5K

j0n~2Na_mq2~e*XwOpPtZ_c&fDsq*>fG`{nQ&y?Dp0Xp7MinPaK^Uy*zR zc=Y*^3iI|cM3K*SCo2wj5_?|LPr{K0iAOt>ZSM`M(pbbE7j1#G45d_sDId{^NRr+zNy?j#Y2H zgJc_J6Tj#h1jYXDVc6Nltz$W2{x$Aj)GFz7Kk-!tK~L}ZuE}cb0Q%0vs}5x;={`5X z!4)x5WJWl1L~6kNSlxv$LT*yDZ*Qwib|)`{$7Guo^6I`qL$pp8;3~YrPLS1!B}d1^ z2R^SGm(l zuGm2LhtyvZozP%`K_D7nUAHui09{s^dwuya=zUlaSyee$JE1{>9IOSX*o*Y?mf|{0 zdC7R%@2hadh!u3B-9u?Ji=OU2bKIH_e~iDW4V3>y9x#36Y@B9dvC0gQnY=vrKFYuU zR+inI|L^kSPhg;-55B{eibCaI4Cm;e>B0q0*Gq>fCbw>s9`-X8;n)8walLr+)IL>TPKtm{8ax7{6SxX$6+OWvRP?J26J{ao%Bi4Ch{OK-mXsJ%_%~`Mz zkj+~!ru|YfdGYJI(~vCna{J4}r!&Zp-g7VUu}_yFPlKMHy}HtIX+S`}D?+fOB5;0E zG3kW7ISo{kNgvVM+uKDbDEHm6plE4#M@QQdxB`!ottrvOCt0Kr!YB9qhuo_7*taIb zuPl}>tIkaF8*8%_DXtdA0GS59d57d%4LH3x>0k-CLz;V>n|b^Ao1u3_=wick#;?HW z^g0H|OY@&mJg1K=>b+#=<9JLvx8@>ODhq)*3tpYj7}Xcb!sY=%ZAT($pR}nkaR^#Z zv2Q<+ZG_Bsn#-(trqbcaZZ;~mz|Nn|;3yot;mH7Tyz7z$yp_BMa8_iZY4I(w=hc_CXbdt@LR5K%$Q-_w1p_rc>o?>5 z58k`Aw_aY_XSc+pe-GzZMi-o#^y~f6B(by!l`qx z;@kF*-#eXaw#J}7De(abqoB=%Vb?p5tywR=?kJa!WOP=Nh^g4?e_RV!L-I?#@yp)# z=sOG!)sTmz+U|2gFv@K__N?EZ{`|Y=j$;3n8=33hOe!pS+;dE=!nYPrxV(F_2=UwY z>?IpF7=#b9ubIa1gVmJRkr3EZ^tC)W6oiMUR1p!kVv<`40r9E{>tJ#@vcCP-B*>0F zlQKx>1;jT+Q=^-&B_DC=AH@3xtA#rZLNud2E5=L!E%pMv`rzF3IPhj|7t?@&v?zNQ zT?fL(JrnV+!0<)~=#(v7P?)5&^Gm)Btv_N%uIBGiJwcO%X*bkc2+zulUM~hrtI@S| z)lO(1@4j#v)d_%k!5vM_IYY+L^Ds`xpY3Xr-Y_$e(`RwDo$mv7KQF^Si~;$1BL%6=T=GI1dm_q z86)#$8r-Qt`M)L=mik=Y`|9b_JBrT=Mz;7x2`mN`67SMVXyxL_IEPzQ>^{^*e2K-` z?@t!-3;#n`&>Z1dXtC2r|MmBsx5VZL`n{D%!nR3nd+4D zBl`0?v|Ln%;-zr&4iz14PW0(>M&m60+^*WDdXm?H+meJ$Kux$n5$ z7T+@wUmL9ulN49U+=y}C2;IIZ(l6E^L)rjH@=(GW*B{=c;qfor^G02Tnj4^&L;Jh) zs>$#JD}j;gy1u+~0$_?Z{3E#?egNt|zZ6Sj^V%uI37%4Ss+FSm07}3#9vHuEk`%ja ziIxNRV^TpVH+d;b%dz$`T@Tt78rdOaDr?lvnUhUA0{wf52 z7F{y#0j(s{2C-1RX(j4f#k3G8YH$Wk7Mn+B8lG7Ms|mDgo%!9QG4;2SOo$3rT;8Fy zh|sWQf36oJ_~RSqvp*`>XtfqCDScT?429p0m_aol~ z3zziS=JZ;ovZUQHDilu<6%26}zs04DUU|v)nJ4s`TH}WRFzG^e;%bRj$ju9ipO98% zF=q7ruNLY1n~u=NtK-@^N;WR9U(+vay@*mD>lofQAPbHsWqHbuC@~ouQ&%zxg^-b{82^GVNF?!MABH8Ja018!nPo zaoEEBWmz%=B~bpMokw*e^cQ^NqI_=mBGdSw_LB0ITPOeh)K1=8D>_%0cn`lwNk(!V zhK5jJ$&%hGdM`c;v}BIG_S_@2zw9a|9l3>zK+mU(rg)NGTLSI=2HokKEx9K$%d=K2 zA8cz3vzQn)io?O=R;!|Hlb^J3B9k26_jz<_19+ZDo0V*^n{rM`TsiOv-b!JmJf*we z19(1Leclm#Byv)yPn4b)BtyfOE0=vtW*?>>J$Q;3V*u#2RJ5A|ql85ty<%TcO0~xeKea>YSP+K8^VRmKdFq5% zS!J0cf#%jf8jni43OGX@`S}Hwf};icMu*-?IZcMdoU-9W5L1I}9OlQfiamhUq8JRr zb&7D#EscQrVcVQfSE~Yd#a3Hl8278+TXabHnA>GG-&jGNBZi~o}wKM84vxgM;_0Gk4ySg)Wt><{|77j^bC_%||v@q}!(X zDQ40q|4e$h>t+{Ng}D=uKC^cABRl|KAVQ*EV(c?9Z{0fV7dsaQ;gbhUpW1bm?Xy=d z|B@L`rx*+tfArrPn@#A>mgHx0dNvg-n>YzD1;|$fi%IcJKfp>#-$=>*u)#Adc}o=HPr+B0{<1fj4RD zZ17gwGy;$GuLtfBb;^6nioS!2|;eEV)qeT;GW=Z!LbOiY(}v6L&&_yhj- zFOdSBX5K>G98j&Vf%bu_=g$v04(;~8%n12srPc)RN|$MT?)7!uEl3n$Khstx%*Q*s9|d}>ofm`Zw7e^meBxvd--B%MAEpq_mp-@BhyUB#E<>`s~PnXi13#wQFMFJ`>0x#$7h{0|`4!?FSkAS+# z>_PehJRO_gUMYVvtD~>abzMBrPC>u4oBmD=Wq39E6JdX`+Wtjc5-`Fkce8GE7S2-aev|RxaVB}Aq&Hle zjc&RP=Mxg$GIH|9L;rK(W-)e>3!cmy10P=|nJf+|StyRzIG!Cyh>E-{+Nz#?7x%Zz z;I9a_B2m1_tv079kr&`rPZmgXXl3}$P|0_*hkct7IC(8y@v(Ht3TZHVy|_e<#E-q* z*trY{9l`r;<-R3EnAcWM0P*a#IyFj~wUApzi=N5{ud|#%z2TYE+aSoMl1t3#=sg8A zDLBfD^*t#~dJD9g(rb8O%s;+GkGv_HIs`d1eIfdFHXmaQL>}DvxOPjyfEzL$SRHss z$4;aN)_d8vwjUn*=saw6L4NydO!JA*n|2z$e@vSu;E#a9$( zVG282ql%yTF)da=xx;9T|pO*ZnuM>yNrKiXzXsgQ9m z6Fnd6Y*YImPqluiQ4b;|IwIXpFP$+O67Qd+q6}H%OTX)byJtSAL7C+X-|gsn^U~LO z?PQj4&7I*tD7l9njvU9^ z(#vVAmuozfq`COr6;<-CR6c#Jd+kX3|YT+v`OO|$tuITYhAVPQ}NxlPb}c``MW%CfodVAyvz@VI?6= zXifaTm>J&*D0KzaIEqoIBMb zG~DQ|FE7(>x?t`a13$fx(f19ps(f%AFg$mSG5f|;OAeht$tue~>OD?om(i1VjADuQLScVRPVsn=2>3YoAY~Y^ zvP5RvT83oB+P2uESqw+uluJUY!`Y5dKCis!Xw9~GaU(L9CiAGuoE7Um%H=gR&-92G zsXTQMskNx^Th7Y0%ia+x$-JXmBOkcy(sVK(AD`H*NW#2$>y5^5$xcfPCl~p^JZ4tK zi8m43t}zgcW4RJbP&%|r`e-vUYZtryDV+LQqk33Hib1^TYjJD3(yB%0eM<=siIVfi z>BJRniqV4YPLu2$tqn_1r-mcIfeXTcpZw2cDLKSu_>p$(4v8JEdqJ$ZPRTD&=KOLJ$>k7&1Qg>f@ayHo7+FH3c`#+bo==>+MF3i%Q z^~+hRK5Int>rAq<>VXM^%uWp!Q*(SlMlRTBt}F!N57o+Eh^~r#0z=%*>RoZKs5K4} zm86R%a~H-Snp3aZNqn5tZfRTMgzFh3PvB012$t%!OR`(%;E+mYZX>gGYrmpNoJsas zL((81;6asBJfH@^sqpLPdQFeUhU2bwDr~7lKx2sbxjw$XDo9NEcj6vT)3{h$-s*^U z+VBDbx#fz%%RpEk$FIfkv6(woYNBM!qQm%Kp9Sak9f=bhXhTJ#LQ7S+i`Nt;Gr?4ksOdPO>S5DJ3Y}Y~X<- zr2FM=vk%KdzRSsJ;HRQk3)xdKO7Yt%m9Cthn&&_6=PciS>Lvw}=*o*8F zbNYGWyAbb;73bAYe&B)%nuw9$8b}TYU#$4)1IQwZ;qTct$PCj7;W|Y4fT+sxXB+m~1__TXLo2gn;!Rfa(2rGNi z5~bdEJK1f2g9s`wo;l$ms(S4URW8pS>g4aI3q7|XJm&dp4$j5|FD5QvniBqGyaK7L zRHQQ^lVq+TIB0JTu8%6}cr2?b#o8w*_$LEFJ!y)$_$Y>%*Qde?7brp96{TI@CKzuL z8*wfeIlsRr%8yF>vXXf^zwF6I5rFY?Hz50iIO(oP&b5(^s8R;xsM}*(uI<18y6n@L zqoQZZt(kEJC-{OD{XPu$!l_I1?eHrJ49eK!;hm4lTScP|h6K?d{h+m>I7HtLRl$X4 zHY|6CB_@d_=&|$*j4yMz=7a;QVpB2Mk5CDwS`na>T3S5uhrme`al3KnbgQ;Ve9nO5 zT9ob#4aMAQ*=_p%fr)9X-WB3mtQIyZf|(t9Nw-IcZ1K#rp5VY zRYPJs<{FwXLgBGr3zgr!7pUSc@7ni6(h<8A!jR*vd)n>)fD5fI8NXZ{ToVebVlnJD z{&aQD0BqP)u6J~MlI!b012~IPMT#smZkkxh32|a2<#Lg9+C48rSwH9mdiS&^x8)7~ z_K%DHrZ%;A{ChLyNP}-0xZxKBLpcqnKar)znmx^b4uNG*=&!u?*NAI3PW(@x`8j=VXu zF-igg0Av&)lvTFHLGSpFNrK_oR=l+gYyH}6@`K=#hl-ZZyk5|GphLaOTP7o_9zVjZ z0{~6r&#!ua!uHDZoO0n!IqOtj;749}lZXEM4-_8hkUgpu8wyQZ+IrA#&khVcrhs9S zx=U4z$=ybiGZm&;Ip0nQ;>v3*EYqw|Yd~0)2qcRauI|R8bu)M>>)Fuwn(} zXWIIkMc1Ie*csMI2cp454737Y5EY48IBr?CV4QgO~5yzmFs4m|IPg2 zMWXR|hFMN*X!zlOxC@lSx;hW1UnIILUk>3_1k0P|RgNacn$aA_e4P6gqii!Ob90F* zt}5<~sYoqv>+=1m4Kd#!0=29q$V07B+~h>qvDGVEO!^T!Q4~80dEN~fSaM*S{JB?1 z=Ms<0DQL>9l-Io8;FuugyK8{tMelJmi>4z*tTLW?CQ8?HD0Qi;n0CrbEA4p2$gjE1 zn_97g1w7omuNiZD%Os~c*k_`Ydv~bY(0DG}BI;+furd+Y1tC*+1aRbV#D`5><4Tt1 z1-Tb4JB#y^8Ckym4fta9%b2R0NAXe^DX4WwsW*T;qjyk3y4F#;m$G|*gWMwX)fo}i z^_fuYU3u)-B$=}xDN*pdojX*VuME6gA5B$OqHW!?d8i47M^dwe#}%B zzZphmT$mGfj-#7-?w@=lc*(01Mel@IrwKw=1MI0L=0UH0jf8=PF{9>Bz*d~EQ^^=P zF?ghauxRTH=xbB2ICWG;cB}S#FwWPx|E%Dg!JEwSV=y0rW8k7^Vvj%iIKNjqIc2_z z)w$qBK7KZx)!obw*!~0;U`z3<{j>^8H+KLtG|Cn4VL^b$sjHDv=#yFr!f4u>N5Q?D zc*!2aXD^y(|C3Votp;j*t;?Zt?$TdpdXj3fw{VFiQPa%Fh2z97!gM~ll`GrPDa8jA z`z=|~&iKd#_ESfZpoQJQd_{w=uY<_h7gDY#OuSHfmwnecyq_VXXB1Z7SJ<|u zc8S>)`YOoE&mr!&AGd%}rgv|RsUOP+-f^pP7B(@(AYl}<%+%G$?9^V)!|0^lW{uMD zFK-|3GX|}`6t0a|wqz36SNfvW*V!7aI{Gab9j|$D6~(H{Y_Z|ge8+0{T{yWxo9-MA z1*FT-(s0>%S_^{F@q*1mEBr2ruH@SqLLOb^!x5zg=8Tz!Mcdq;QHzfiHM?7UIkHVg zL#R$V^31uTpD|>5=1dPa@>ZLZkA5lkL%U}}7hP|P!?H4E|+ih0ELbHZ5lQqyoS zPE6#;_{;?(#0;B@Gxut{4DAoB7YqE9iM4$8|I4q&hH4WQuFH-<)eWOEjLh^O$uUF8cIvCbzDV+WLhUG7IB8791~hL zceALGG?+i`H~{BOImC3IOq~&?&O!XTaMbd8TL2f*Ho)kYlMFYgqnIme`|iOx8}P2( z>|1*6XGex3RKs~68))`xS^&VaR{dCSnfZTzt(WDp0Am*LVZsLO- z?{H8!gXB9W3~XvQxa$6C0wWquzhtS4v+jXsiO46GxTqe z&rOI*u40|e;=)sL(!D;R{SErZ3A{D?LRl_9nLnM~yifVg1%H7!5 z@7a+=D)!czMHj)W`0cTK!cNd8x~@Ef&08sQReG3m$#ZuNugf3B?{K)%B1)MNBj z@Y&TSv(?j?^=Yf<4Q2}tr9be0Kn?fI)>CUWE>nWhJ?h%YN9@jkA~~qIPwzc7RcFn& zOkxz{|Ln8Oy?`qXHCqEkq#D}8cxfqOfALBtp>qDO?)dxWJik61547cQp2&gQjZM2& z(KSN0t{j%*_3dDrTVUZvd?;BjG~0KzHuS#aXmI)^ome^H?$M|ZhUg@~e!b0N4bTFW zbiG;OdR#)sX`^OaFYH`*5ccKTEe3umZf(HmKj|fO^4m1%VTcrJ)G&o^Tzgjs&ilHn zo#IN|42>%7Qa~Kfov+;Pmv$uw9NZ1O#X>gU`XcN4cojy$X$G` zpLo|2W6h*M@)=5LX;=06rw|cj;Xp#&5f+vyYv%Np3$k}}nfjRyM5c^hN+0{co{74= z|MjMlcs|gB^onu$4cEhvn*Rap;uH*+AoxU}=Oxm@o+P0Vpn0CIq51|`uM$)wSAS1$U! z9uyLx8Gj_b&SdBvRZJ~9X6EmS%IJyv7{{UV(j(w>_|JJGw(yJhj`x{IWPQs)p=}$k zuUu!$xJlm+A)}0OW4B3PP3f#)(;16@$ar;S|5wU;#TLA~Ov%{utYqQ$6Irm&2+2L9 z?x2tsjsNr?|aIFb6HOKpnZ1@7!oPWYo&um=dSv zUT(={Jt=^1iCf3r0q@>nlrRdo;<{YI@SmCXUo`d}Dg5(Z$Z940<>A1RzVr~VccZEo0K$}225g;NmN&XR zOhEHg)H;pFl_lKBWe==sT7qX=-3oHOd-uN~uD6Lv|3VkcLnMcfTJPuQBA9fz;dhhm z8U{(s=Ys((8czZ?ZVz4I~Xx*n7~hR4vJ#W zRWpO*;a6)F`u*ShI#YzD8aYKy7?Vn|jki-|z*Jo_o(@&caknfd4zwXTaodf~iO;9L zLYo;9&+-lQXGR+vxgG+H-x~=zg`0o(ruAZ}`IM>gHPUqH*&=BazV6qB@gFyE@Slyb z72lIm;RkYPH|exazHZDP+)D%iyVdy-nq~xkg0p#kPfo>?4q2jJ5vEMb9i20}u|ZDs zM6ZD@0#NB^TB9OgLu22?OFRPkNeZZNp+4keLexQBR+L$=JijfMI-wiB*WCrCCI|>{ zrRyQcp8jr+xB2W?MZ(b_RdLD>>tfHO6&CKRRn9J8blK95TG0TQF*mj8HHv7(#c9Tq z#)&@ppV!HA@Vb=8!peEy(T>~1cQfeh8=sGted>@XIo5#eHLrCdOcmLbIMF|ae`WwQ z`a;`lnxuxDf4eu%jw%cN|H1Ve(QiBN8qRlNtCk-!@wlCwRN-WIrVm#n)^3%noE0r; z`kudUuulPXHtcv-+CLDo+PtN-<;WTA3CDWIG=Hnj-wB&=@795w>v~}ZPw`7+$QFX0 zydt4@WIja@y57o1Yd*KT9QF#~>St%yvO#qC;v)?A_9u3LM>9?!1GP;-g2>5P^(_rl z7-v-9U$t-4;BQxY1HSJsRtP~%Qzs9FS<=@sRwutcp}ShAYPNXs@$hg#WqtUNZ)NmB zM#o><>p@E+0pU5Jc^S!jldn}#0_cc@ZR(J&UXZ^W_H0OQW`yx%7 zZeQdpd`WNpD1iP-7w4UZVM5(S$ECB_3*$=-k3lS|KplnheFk$Cxnd`rzgng91Bl(5 zK-r_*yJxq7P!?&q#xW z8DFea?rt0V@iFH%Z*pG9rvLqW3C`N| znygou4L}7q=xdi|nHs&L!IsBnN}f5C%4h@648ZJi^_Ct$sa`m&IfcDGk6r8Deu7is ziT%0e;O#yG-})!Pul)Y+9Q#~C+HZek;V;L{&WKa4RfQUR=nOmAku!o1+JWrfdQDNW zzTa$rTugm%-11vUfbZ4@YAjNK(oO$+#WPN;$^2eW-xHp~T(1JPBH0*OTjf~Fksma^ zcqRF~SN*J>=tWJZt&k2O)FonqI1Nq0QN_=Bwpp>ts943!zb!IG6(ETPNA zPdbxb9jmPpO}OMIxIDVE+x__lugtfpu{ncgRb|1yiuw5v!HHz$Fo(11eR`i8FZn)L zK(~L6q7Q3Y(^(xloW-TW-qg;fn8(6{Umr768JV}FtbKk}T8bU1+2U@7=)CFZVGnHli**=m0#AAm-f_N;F)e}njbQK zz~+lIlS#?C9{;#+mF2{Y{EhEr?#|TrL)(@s&Vy&M$s!GjwtcHm6BzimF?at)@~u<9vASDR0c;h%Z;7^5hI9U+_a$ENg$j zH*w9zD$%lQijFx>f_%02!_+>;`IzeC!-L37Wh07LV32`eBq7iBcj!sF0JVoilJyTU zjePq}jevk--xKe#h(hP^dVd^ewm=Fw0=`kH(SO|!qbKP+{3z#q?Q)ehifARMnRXWw0$v>j-jIaiuZP2U*UmkF0#5dwv80`$AuAXV)W$hj2taUD$N|h>< zpkmhfjnCctSjg!~mHMONZmxkvv`N7aWN`-$ zPTG6C-6MCM>K65B>{)u+uSiu{H&E9ReAJJ{@{Y*;zd;^%!j(0kWF0!3_1yr9@LK|) zYWt0V(5>b3sSvIK!XAJNAywp%nL_Rhzj}ums`5P1&u$MtZ%X{NmcOx&4$;1Sc01TU ztN4+Bk3x97-DtYI;q#2HDIZ5wnpaMC4mnCT$|LX7nxwt<-+yIK+!_KZ-*XAhLxzAO zZ8@3U>|`Ae^bFmD=Ck1ZY*1V^Es0GJtTKase*l#~?b&+&?@2&Ey{yLvrtM=g+UP32 z9=2(bbvJ#TWL~joUYi?hXCMi1jGzADL#By}v`j1W(+4sPEUq z#eKhMdnD^N4{4Z34=to33U494_wNWu|(W_+ud4VA!W5) zsp+<8Z|B#nijGpQiRgfP2SxEM0Ej%1gRLd|_k_sF6Np4GbuMH20iGn+>6@6Dlxw;wxzecz&iEbpB@?ku$ zxU#n_3-i=x$i1j};4p2r8TzwVUCeP<5w;XhWkJ=u^{p_SX4tke_$|DnqsVX(T@@~d zd<$;jrCowd?b@&UjlE%^^N1GsJAN6)6%bTHFdPmTQ6RrS0hKSOSjm<22aZW7ohqE( zeiGBHI&EUNBQQp-Up>D&gOxpVtW>kmOkk_D+{qd)CI0P)L&kHFA$!}9yf>pr8It;i zv6bJM`oZe3N-YrzxeOqQ_L%JutXtJWzsrY26^_Ufu zmSz^$Be8>SCE)JqGulhSUAGKKM%qyb@YwqKX~jCbzR+M{la>_&g)YD;LaP@-)djBB z($>V$X0!$^h9*|lafF6=U{^$zwA`5g5uRkhi)mp+HUVaHJkH6{*K*48H=)hM)AK>^ z+P|1H)c-b2L%?!@2g1~0Z(4KFxi8E9qghM{L;Va0>=NJfQVq|-w=x-Ea*PM)MwcJZN z5|jTM&EmDDk4Z`=VhGkNJET&|f7HHHLHpy{&-I>HNz8uqE_=vp?_aFTJQ@)_3VG{z z4VNIs`8gDbKJYEQ8-C2FSL#H@0_yR&h4%i9GpxUuudL1c&RMyZyMa(tCu{k!<@4dn zP0iX;9W-Pu{BI5B*&r^h{S!G|hEAH=mDB;09T~lFBV^1#&F4&f{Bxp6Sqo5uVM9^l zm-u}fJYrMr36<4fjqj3D_$!K}zb56YS`zGPg)?d)r)IWG(hdmLv_D`^R?ZiUQLo$~6;(6r zZAFAn#g0ZcVrb})^evMDzlH0D)}||`bc>F=b|x%ou@w=fDik$mWCeza_vnj=$%iEO zCv-9}WJDmWm^B-g<|_%f`6Eolh^S{Ft2H?L1DN#)d1d*wB5tMiJ~f|TPZeX^-hcSX zjSyK^H*l+GTtd%-A@xN_@{NQwL5<`}L*~ey!ROJajY}qpNodzcm%>BKg*U!=Y5N}T z_sXXw;g|XAL(eW)5gWmTVTo}^tRz|9$UGekZw;s439r( z6FaQaHZ$rl94i=T(ofQwP%HNR#d%g?7|Lq#(^`GA7SI3ZAFK!CwhNcie)@}5mtZl}e;UeH{*R-xerWQ2+b}&92}MLC6$K>?q+x`J(tZ$WhJ=8` z=w<^!knWNgjndsPIz_r;z~~;eVDa*O|Agn4`+n~0I*;>kH0Unb5x-|T+#rA!$mT0m z%OD%Gxn>hLwcnXJ(!8hJbLGl>+X*ml{J1DB!+Gsdhz1zOvSVFIdy7d>?{}H!fb$m# zAqL@44g0eYgATTPBPyGC8mOg6nRR`8iqq3M7~v@2`P7q z`+<1WHud=Qd@upEL6}7CU{)}7AkroZN&=D)ebd-p%q8#?770JmBX(g<*kW=-vt+!b z{>L$%8;j1ZhC~xvKk6V$5sG&lj;gkoxP{<%&nD$+$#)&uZiwKtqLP%QyvYSxmj6;4 z_?4k%lxJ(ruKwC4aJfe-9U(GSa;5*hd}RT^%B(yY zG_(TS;1Ak|s$DBknol{7?acC|w56sLtl;yuxC=6;`m@!3kFg4(!Wq3{5hCD33#I+z z;)z~@{QW(~;k8-FqZqG45k_M@b|c`~M|(pd+jTd}huYdwHG%K4w&{i@-ASpV{H|oH z7RWppj>gL9!H8b%%ojp|XlTkHd}Uw!#kYea%3D2ER09(7(yAVuzqz&6e{;E|R3^V^ zuCV&-%%n|cw$O5_)+`$gjPpP5M@2K{$!8S9Ev>62o(AR$SS$~0p!ucnVb+l z_%~N2MKvfoDlU*+ynGGK-Ku*|XxyG0Fk>Fmy73ppVLkWb!m1Z;DVixowhX`0;(THB z=au)p>n21R&Ud#$y|%3zz0??Aa17Fj)j4Ptcr!+P!2F@u>L8A6(aQ9)vs;Txh9JXKB1=yOgRNNJ0F9X-e+tVnNC@loHB>;Kyv~| zpTJ-rc(14OwcNl~iOD5n8LZHToM_dWS@6tFH>`|0xxFDu##+hvCQx*W%a^B~+vDvs z1uhAX_Ras=4@TK`@G6l$TD=lf6o(NWS#{K>m0E!f&ue0e>5fbg4e;ID9Q!HpNwPP} z=zh|j{pSzudI-10-?v)RR<^?_**{>-hDU1kP{}1_V6^AuY`Pzrn**>vT>X1lK9w45 zjN{VglrPVf<7$x0?H%umySeOgcDyCTSXPTSs~B=i_6;Q0)c7V@gso(D@`WUh*jvK5 zHoCOV^VZ@TZRJPlqIK>AUy^fHrHPZOicZE4UJ(Sn|D+1+D_;dm@gKtqC}To{v#-bt zN^f&w<66#hgfZ-69k5xM-#XH4JP-A0;|&E?*B^SVhMJUP!`SG_VxwXRY?z)W zRZPB=wG3DPkscl9N31B=&0D2yx&9wxd`GkYdUzrr3=}$Lru8RWRu8R}xl{UKRRh59Q@dNbgzK`UU)_tuM)L4djb+EC?mSIP^ABj;MyXlH4&9QP4*K|;I0xbL80)$t^L^;vD!mLKF_q))pKyj8C za!@g0Q56XmqBwN8A@cZY68hB-kL2RgT7iBu?D@O9Wxmt`Js|LLy&9VvYt8Y_7dT_H zq#TQY+{Uu%XiGOE=vK0$c-&|SU*P{Emz`d z_V?yiCH{$kO6y#ZDG^_k2pJb5GVQ|%x_7btmhtPn{2xr}CXZB^v8%)For9H&W z`>NVfW5o7*J4AY$H1h}t*<&iSs{rOMc9bwwzi*5<7|V1KBN(G4cy%8sH%vpH`f2-np4)an>{yF|MENoQb-?#Gq$CZ;0e4zibdWUn%G|Rv ztIlE*2{PUt#@ zRH7SXtWPvzN}}Ka9}>%r{QtYsv&!@|BD_)0jCecK=X?F9X9qlilOi2~LWzthn)DDl z5mDyKyV^R`w55q>J8qrJvl~`1VNVsG?6J(JJ{Vx7`u(Fnd%97mcH)j44Y?uE@(abw z%$rBW7W#?3jkcdo{u$SV*-JYOk^M~GmuNd-_{5#2B3(!BCnto*SQTo;$ zu~Am;hy%ygy1*6M23g+V6I2M9i(A_|Kd#=$A@L+V{ef&d`6n&3N3DOg=+Cl$=871v zpiiY+G<0h-;ffo$13j7LO;`-bHeUODViKSPQ#>TgWZf_Zgpx-Da=BadWWT30Dh zGXHV@9XR1;*X0pFeB6<5{DF}68NKjhA1y z^Vdp<_Zo8A_$v@&hjivzeHiK68ds|4Y56)0gkAc8dK18d_DsNWm?tptB~qirX-8D7z&4Pnp}Ic6TqIUnT}0L@Woz5 zk}#I7{_ypF7*H&OVf7&|p@UuGh~i9S{&v}RIlFl0gKG-Z*JOkCNv8gHtxbW?cr(Za zS=f*1uL%ft9(r4?7*k(686QGf75XGj{gEla{sDJZp{Cy}Sm=6uTK#6D(8*gB{6fV& zE15e`^FJ6gn?fxU@-71>-Mg^Z!Y_`iOb-ah=_fZ1(d`U4L#2Tz9OFR0bdgT!bnG`OazK!0_Eo@Bjp`fHc>>i*L(?thPkykJ_7!b8T+a_EC;d#$2A#@cI$zo5~0F zgEH3#-(@?>zgUo3QHRR?9&6i6*mhoS4GP7PJI3FXI~yI=7-bF$F0J~AwRHf<*T1NE z{Es}Koc3Bd@&@S8I`3!#((Ank8duaRT%~dH$`;8e-^SWt|H%uQzh9uM|BTm>BdC%| zYCcJsj4xX6SiOiUwqqPFGQ?6x$NB_$PwqfjdTFn$>2EvnpELCxkh$%%cK8_tx{R*^P{XJQ*QK3ltfm@ zVqy9N>-tf>nPP?wwVQrJrmjz}I$Hjo7H$2ocLM*z4)Q4XZTb0KT0_3EpT0O<|F$HZ z$kj6o3kK%XUv?&kk0+5h)S~w{uw-``RG-*^Wn8ldY&Y~w4(4N1WOEte<576~RbQ=p z>A&eGwZ(R~r9SwaeidO*W54WD;QjQPYb#-{dSj#gqVGq3Crfh6*CVWwo?F^}|1N3n z4c{$8`tmAh!|O?1W}Lx(IA}0bXN~4b#5!}DvC0F-SEtep(yI33zZ;i+{=Ro8@|!Ft z)Zgnpxxip)T?_C+Fs9j?iD7lL5fMEcklGn)G-&5H$R_vnC#EX~@W`gWZIhz6K8&>5 zt<+1TxKU4aT#dmAIp}^fN-ZRwz8L@fPzJrGGZuWqyXqsSeHG~k+ipxemwyxZ;S%gM zQMc4pKadcYwSBfFVWHZN(*;?Z{e3bcwtlR1b38@7zsVG2y0J!2h@9Ym-|kk!ej0OF z;SB`cU9`zu3%W5e4OITlA*9uNyI$a7;9Lwb0pPJ3r^22}JP$pXBYgKOuqh)XoLlq= zguW0K*5nxdyU5D_n%Mc;$5~`hkju|za+mT^u3wBuEsyIj7xUvtfrxAR2vx(ev+Yr) zt!#Sz)cUq&;+n0cH=}GTp!{HE%-r}Dc%DV_NsK?=a|R)c1%Bb3Uw(~21a-XxqJEgj zjXP-n)d1AigXMklizM9Um}{`kMkMnAzy3!1n)-B*=G&A#K@>ICNLVQ>Hssp~yk6QHJ=Ny00OShLZd#GxTPkY5FUwv}KtXs-}KxvzWf zUK)x{kE{M;*43>EX7Q=b2#x{w0X9-UT2$_LIo|nE(-_RxavzA1BA)jL(pWAaq*?j+ z`jKk{!2kD6Ux;&FnT<~t9mkM}0H_Tf1H}G8{o97G27xQuE*B)^WLOzEQ8nq+B}do= zvU-^XAj)f1teTB|LmXb*{{^Y0T5$)w?IBYZ_1U8GF`=(}q zQtxNZVAZqJKA2`^7cWj4*_PmbVLM;(2(L3rc#!*HfS49Xo$K4j-3OWn`8ECrqji-9 zePEL|YaZD29Xv+yzih{Rbl_@s2&%T-;9tCM%9Ec-uod6cOG|itKZj+vT4}H}#P(Me zrB(Axz*pio&%k6KA=&Fy%x#mTywJBUE&z$0IlJGhZUn{8__wf)H627=4Z(wrH1VTa z7cZL1`S&vfB=4?wNT|oW{3~I~eJRqc5ri3+g+vf1QgW9)UVaHB_Uz-AFK1m6|Fs+* zGmq7qC@t>2OFOTQkpc!DEbbchgZp^aHpv2VwYAd+`j#`)R`~dEra#C zi2LJb1BHsWO4RaV_Q}-))NPB>+5pV2+B-nrJ zPfW$I&zI_YfL~Nac8J@{o9lI$3Z-kz=<@zt_Q*N0L)HMY9k@O=9DHPglMHCfBPeh zCzil1p($}r?_wVH%?~sh13P9)Oq}X+aVqHCS#Y*fWe5JjjnulqzLNk5{gWO}e8^o) zV(4rf(OhI=OYqZrw_D}U-2}ksgI=`Q4T)T%S9`*4m>U1C6ou~UVQ6YrZ_`;-8ETd? z$!p-*F2nk$lP!LLtQ5I&p(?^VF#Off#%tnlVC-WWTOSGp3s`L7cY|Y%{QdV`lU-B+ zwY%a~H&5fKU|{~7o`?5hM#N=OFm&g2JDNE60vN?tNL}cWOkqLzzgpd*7Fn9=KXoDH zP`2Odgk4Kzdv$NN_&&%Y0al3`W!HWfo_ZemTFf?lIaFOc@=(v0#vXFr94(a^^0PK0 z{02g26wvlIw4Sn&j)Nm6;fkR@uOA#46dKc?b^jSnm`~FOK}uqN`GVd=m=X7BEs*vn zFyfDwwq&xo1#PZibl9TJM^pr`V63+6{BAsP;bE>8-FgMKL+|2u-pZ)Y$HQ-~IX^af zy->u-DoN-UNljQQ5ORhf?P1_V5OK@P&q+d#aEOpLQ)+`=0n)5l;0- z4EV#@Y5XHUL)dBq!}GHxc^7?{WrTwmW3HCDm5Fs+gKeOi!BC2bHg&UXDFa#Rzu+ad z(0T#V1#gCxlT2L&)tu`lQ@=1x!ph9`++9;m(>x^Vt8ONN!kZcRc@oZAdNmOz&b@qU z@YCqIi7k&bV3C(QVo}@Dkn@v=w1DrOStB3u8!RsiYBtB4LzS4=_1Ec!qYgaVMy>Ua zqr`zk6lz=xl4pktaC5|IZ;i$nM#sK z_x{ex;t+Z=NGXP`#}?CDIUEH#MTdIGk(Iic z(Sc$Kd8WRzxyJaOsozBi=lhQ40D8Hif`mXi^Ur8salC2EgM9?j=Df*{}1MqmbxCB6^^ zgiks4KlsIZ-Iw5NGyV@}pw-HI_GH0lG(Q338f{n(39v@uuO4q(jUJ0{s* zz0C5KS)N*1+i>AtjTev3gRYNN{S@QZ#2W8n8CSMbL=y}LZ8(M3F zU`OaTm2ch?IhV|p>FNpM`b;%fu9_S#qUOP?u(8{p4BZ`oz!&z5Od>xD1pHaN*~%=$ zkX@&s4>UNRlb0gIy`a1DHi}x+<3OqpyY7+X5q=>aA?&xJY#UTZS-6@FN2GDw8T|JU zJN-d3i9BppadEIfGl8#hIzDE4G2oLjNYxOYPsOd(^zSoZKfs*c^h*<(M4tO^Sd?Ru>d@1smrT-Gzh)FG4m7g-Ie}=Y zfajT;{E|`M9`3R?;JI&o)`!2v}by0R>mAoXXHa_?KXE< zvHZ_P{UqY+pTRC=#t2XAtL*t`0-tNSCMpW%(h?IkLGZ@%m03Nt1&Gx?aMsjxM{! z<(+*#Wo~TmUkI>LQo&OR5t8PZUI7N$u!)&0ga}Ss#-jhoB&_uIpViJsO9L+t{L9>1 zpYmLW(2H4)`2;6KyP(7|bKO^TeAd#*9`POcj>x;Tjcp6ioW-?3*%N;hh3W@xZ zPCBC1jA*AAR^wv99VJucTL{cqW;o+#jBpXXj%616ZftJYDWfiQ>Xdi>tY{&AKnYkVimKTF`Fk8!kUF3jtkjc5CRN`z z4OIgZ8Zw#nZ zg5Ksacwcg~H1KVHs?&hnm*HmR=)SG!sG0V$%eZI(gAzte6B~6NQ`>Jr`eq$>j(&t20 z<<^xE%U~eLdhA}icx=)vnER!|&0x99s?kD?d3vTGH8$rK{g?`VV01mL zlcM>);hP?yY~^nD#f z$x9pie&tmyLcmm7GfpA$;IloTd^2o@EK^^sbg%+vye!W%99%sd1;9H(YsXI-Z)!A{ zEn@;ru-4wQ0&*goq*wa8WWQJbI&+OfJ^*J93&q9$h6X&<^he+yBaC+@876q`0*Btd zMr8$B)Gr$9>i|)WK6yXv=Pj1E>oq201hl;HG_sd+8wuvBOr9C3GTLljm<5$SJ8=7( z>n<7IY0dn0^FA1=l)UbK2mTI&Pz*9CP*uJmCu4AN49#B~kzK-e5W>k?^t5Y%-ktid2|;dc6<_vw!%@avh{RD=I=RBM676AW}Di4SwQE zviCbpK)fKHqDenc?jW?gVI;?(7z?sDg5?xYG=*ea@yb&<`{0V|HZ|U5Dp&FEddQNZ z5m0+G+fM9EfwwTUWOfu`qA8_jX z&AJu)O^9S1iO6w7S6ZDj4o(^lc}L&08j}hjlL?SH!^_e2igl-HyZIu# zmrjMQ|1dkg?y%mhPoeuU6jM)7%aWKpe{O9YZSsxtHFk3W*(EG^Y2U<_>}DBF^6_iT zsW`~QI}pc%P{oHJ7p?c+4y3Of5`|z>ig_rj-lu-#lihlt?M3e%%X%>; z-QjtWCD)EST>QcSJ9_yMPXpE?hAdNK*5Q9`u}p^_py-7$q-H}3keq)zAZb@0szx~CU0y%hD5}t>}Rwjs;e1; z`>#A?%F5&K+}*!iRTKK_qZAL5-*N6Iiqr=nOFUkUk3$4;lPj(=|H8?d1MN-c`{q;$ zZ|TYI!Rxob5ntHeVt*(<2$X%j%OcOvync++l|y=1#mJuz`ek~MuUs39R$-8tU0tq2 zNEJ>MxRc{cyCWuuaeR&Ve`4-xcqA&;Tn!?D_Dlv>=%0+A|T_R!j@!@j8A!l$VD4 zsq$+q9Jqkynx$dCvhqpda$i!rIbBW(j&Yy5e1QY32m&2Msf~mZuNkHx%rXLB=j`Mz z70rOYND-&Cn4Bx>Bs4k{dv(L+rli!W_g5-~eZ#mm>#GhGNVnUn)#jyz%_ub`5=^rR zOlA%#?T;l;i1;HS@X`i|i?TK+pYAM@4bb#`s)f$RkpEg_x_E4{bBlf0UE~FqAFt1m z(NEip&^6F|OcZ`^0QZQ<&Nq$7hY3j6w1>c{)(=0z<8G6b7J%)?Ud(bfGOP8ERdu%9 zRZ^9NQE*kIFIMn-VGQL8pT@7F3U5W0d4SmB{>5|7Q2u{gT24Rji_KM_%!VvE*SO{l z*b-$vIk8JWAB$ojb9QLO%uqD9A3goO3D7_vC_GScIS*tCGt8Fy#A0f$s7J8#%fMT#$Kc`bO2V+uQLq(EyV z%aWdHS;^e!YY3aXUE)+l?cPKEqAmub;>>QAkrV$;bu>{mR+r$j~mvC-DR6bS*27? zu^v%h0P!9P1zxFbb;}TRUKADy01TdmiZhg@k*)&N4;3VndqWU?+fiQa?Ftt)HGa>B zz}+Pd$ouW5r+%(4R(y{Ms2?KjuOY{12-uH!i0(^ZY1M^_5_>!$vzTZwsT_rh@t>c2 z-;TiHx1XGKO31Q^K+4TXP8lF$93P<3x_QFGQfm%l{>pwpse|Y@C z#&i!O;(IJ_u6ptL>?t=uYU4{Bo&S7cAEnLw=T|p?2IkuBE)GS>fSAfsRABtsNFzEH zfrmdtA1$+0z=^m<&(fEqRWJMj@ahLD26CX%UqKqGh_XJGO^H^i-fnlkq}L24$)h?b zNqFOqEtyce%I?k0CI=U1?9%09VyrU)7jN=C!jo!?Y*)RUL+9(F3f z;Jf(Bpc-&yymmstiC5&}+9V5m^(ow6uRheHZd#Jl=|`JYAb4Ikwklbo4D&WJ+-^A1 zJ{Ms-x$1r2hfUQ~VQ8X%J_&D;*){sqF?3U9`1330nD(TUeGi}SSFkxA2NfwZk@h~T zrXD3fCM}A-FNhM(=B7vP2KLR$P>P!88xt94N3*pH=F-Sq2^v?F=(Fkb=)W@|xN0>Qvy9_-0KXKq zWLnGf!Qr6W!VIty;(F=^l?bRfJaR0LbES@KW3hkfcjg5A*X=w_{CoVoX3uZw?DZ1S zRjK9IQi1dWZp*9>tMuujoymLXNBt zE!9U=sDi`H^Y6o>Q=73%{G(T2R_}x6#j10pp=rl?Dvawa#<9d)H%B;&<@E|}p7-#r zUmC*zX$0y|?QzyzCN65qs>b{*G!FBAiDqw6>>H)6wcJTBc>o;6s;qW>WjVoP)`wJ; z$-{GV9(8U23hO$NVQ(~%T(e~XWl^8M=o!)^p0>rUs&sygvKBV6@r#q=q3tlc zoM%@w&o9`avWlp9{*d8=f3x|^0V68Vqtq$q=KP%*8dLiKW7XB&n*t(p5PZQB&O0E| zU)@t|LsvGNX5!B=K$6UiiXeF!4jFb>ZQtKt!Nu_zQ@b!|YwEFM#ElLSR4!LdY>Jab zoe#(Q-PB;FPN|_&&q(@G%RF*)U70m^-Cj5bPjqih$mYCLGV4H2D%VG0#-N;-5%N;+ z?3=XhpRAuL`hDZNx%A~xOu0-d@GYnAflf{9kK;jRI%Vdum(4Yg0$Z_fBLE_kI@XY9 z#?bH=68kq9FSC)yctQQ$Y$dj<^v|drslnlH#iOc(Zf!Dpn?7CNQ{Bx~@}1eB6Y_VR zeG^P+sq&owyVyfI{euXT0@8Vr%pIu_l?(OI%avwwj+Awa02{K5+pSGB7S91ofi3K& zCu>=EcO98;!QImFf~_5=`Sx+PbV7Q8JK)a9#jf;CXa5-DsXdU42=3l{nqAjAKvongee8g z%-65Xu6F`%u)_dIOlKad&xnh|m!URw)zuxTA2I3oj;Re7@i2m;K*UGOf9%Vl02`S6 zJB6&?rN>_z1uN;Fej6n|H*^0m^u4fKfV&TXf4Pu#&>HZ*U&^~Gu+R1A07quYL0^^s z6LzamF2MC5FrM&N#7pRvc)#y4kOR6E1MSCT9TRJ=r7VC#(!rn($lxc+)=hKUH6{vk z@3%t%anSzG4@fAb#6&mjPqkknnZHnat(+tLyDGZz#mKEWINd%)r-^f2w{@XWv_Zwy zm8u>gIwGWp7i(ugl>k}$>wFl5g71@cET1S`FmNU)FHZa;dkomAUD$$*7If|r=FixQcPjGFJ$q!H2n#I+uYvF5R`MnLb zPz;WPFBd_m3ULk|aqo4$#S6@vk(tU<7}mIuE1zKBt9)jcjR-g{Cg?}ALJPoHu<(-U zI?(7B9mmz;pY+(}5Y-#J{>$lG9x@t`vjuIJ@{Ko@kLqX!?_Ax|P2qGO`RDlqba_!^ zILj^hKm@NHf^!Jnl19FP9Lm)g>s8LMT$N6U!+hLbsJxA z)86BlMw{$@Ak2RP2R239a{X(}zD?P^85O6cdKv8TsMEBkU4ZdX&@K z$@{RqTAdaA-BZt;r_bl=Ndo)$`=jmftr2bDw{F^pA87E#?nyURQKUaw%%GyqDYF zWw<)rq@^*#Aj$=5TgO?XCsdgEfc~Bp9x)?p?x}r{br_0?-{s(MLMCMA*r=!o^3-bXX5*?&EHpl zb5?#MlB-H@-43N^nH||mRHR_MDYa5UH`UsC2z%0O0o^eNu2I1t^6q;b-`1(|Cn~e4 z8rW#*q#W2`s_4V9Z@HT5o~%G65uZy?_Jy0V6Inp(sY*LFpM-Se9Xfq9Nm5v-Q!hCvzF?fF zkC_DB2cI0kiA-j+IrZ#UtEry&dF;Ec{yB1%8o0S$H<#oTd>lSpULM3@d+k)FTWgX# zy`e%NPQN=JKC<3J(_2oS4J!dlw}$WchpJe(R*A28hboa>k2S4F`y*;1P$w6mNF?Zb zTOAkUXDQ>4^UKIm^PI=@&TmS{twoSsK_WQQyV8|tQdhrEG9PITFd(q<`eNe5RoWZ5 zdV~XZ@Q`Vz?yt`H+Ve_6F>ueT8aRDtE~OaRuHTHK7RTL~cE))BcGZwO5$X7}M`=fu}jYR*17`(wfBP7m)6lZ!)!T#%TvbQsiy>_xR^J8p3{fSSd89fNhQtqUyvnWzO~csT35byA7g{$H1wHF zDu9XvG&}i1a=zegXMG?AkSPX=62iyPdL3DG`kee!Nz={Kv0onF{N|}&_V#+X_%Hir zb)?pOa%&B;eb%zkOFjQ#KXR&nricEJf^82G?ta9PH)n9hVjmIZ4Ve^?$MV(nv|#hU zk}YsXG%c{Kn%IqWz7A13^EAF;O;UBB@-EZj#)Yh})GbIK8o0|e=yFj(Y+QpF9s@4J zSQW!gBc| zB8e+p>o~%wL`Xp&GLTpEjOeZe8==ODM%9Apx8$406&aw+EX0GeGk%TEYKNtrSRYxxP0+c0V9Ppj&uGm-PfWC&R}F_jWesV)G;EljVVo{*ch; z42HQy(zx(YC5?y5z2DXZN*Oc??@fi$%g>X;b#HQQ0zj{-v#W29bu_F$Us}-~Fq)~3 zmUM%~rVp17& zirbLL*J(Qm7Z!Wzm77@o!?9m=|3>NYEVs{0oAi3uqQle-?at+>Zez`g#j>{`60V*M~5=kET$%NM(u*{gE3~794*zS~n-@ zE0y)_j(6dYHLUNoodS(u1>zW>Uig6+(iCQ^;;ILOL=ZBOEyNAan2`uI5zhH3xotH- z?kITp5lhjstSvM2{=HgK)2g!80xwhgDM*~7yBPjDk?6^B-3Ro^olXO-sekZwt`41O z?Ti0n5I~!&!|fy5CfxFz%)_AXnDl}u!bO^KO8*!zb$m11h|Xo%oHSsBPpmiXifGq> zmp4o?2wo`5i;ELH1(w#Q1}IsR%GM!Q3)-jWL7AF}FF+f|aS@+I@9%|rhyI;*Ei~W- zJ*3rX59s9tfRR_W`?0jeyX9$j@>yFfD(Gr#)#U0-!BVfy=s?SJ5>6*|n1d_4Qo!?N z6IO0*31mEnDvS`w3>uFDa)H?Mx}3d{%jgVaPt;MshlUO*^MLE@cT%3&f)ei6OINyh zX)n$4^Iu5APDl3i8%c)nOiad@i+igWqI}jZOvGm5I9Y^l8d5^^8$dSY)i?$W*WD;G zy-&z}MNq--v=1eOloS8*ou-8ZLMYx9z!;Tm)8L{9h2-!|f#x(ejok3>h5=evZUz%8 zRFww~MZN9wWOG??QAePY{sNcL6V()Ztq2OPi$Ae1hQJqEtc zaKpsCC&ru6KDXqZ+}15a(&@X~R1tNZr-|B4+%4B!dpv- z3vfu1;*djq!5rk|~KU^^W3oF9iq#<;|I$aGH zzNH|o1)~u4AHVCc@*pi;mngo;Nis|hZ(34`e%kg8+Tw3TzF}=IbU=AVrax)dqncN; zd+Aqz8LZ#g5y{YRKlJIGZgU&JMMr5~kY9wEamq*t;cZVjUKj)p7u_AqAb8&KiASf2G2h7pSyoP_rO zj5VmA(^y{^oq0hB8Sch{0lB2r!7=l&&(ddCMT#s50 zrZ5nR%!_34_Q5SOmtK*@Rk|{9r%lYR#|&He_^3=K=rU0`c2P5BMb6U9yIQ3?Wjr#* zc&Z=5jQ*E$RF2raIbuydAvs2KT&m*>eOx&Zwq$DI(;8+%hB=;~gX~b2Ssx7_s{%v_ z05=O3F+=5+^(R(4^8`_bWU^z^d9C7XH?RK?+Na)s#YOf6v}SFL7#zRhL1%j&%A6aN z(-5jC*c+b-jNPCTBDo<4|7N=-XBw<7EXt=e`3s5qmSK1r3_54$JM|xxezi#UJ_wcp z$xDR*AnfmhwsWnyWfUpnhg%)UM{Ya;68VIK!RZm@>v5NkEjK36o+Ja5u8&c@c~n$1y4uZE|wxrt9;VL<^KA_L9&n{o4@a8FKf``dWZhgxtZ%yJ_{= z#c(!pupg8}u@{39G9`ZBq-*LG>CNU0rN_jR5in~jRR_8Hi9#5k7@Whu`Nz2;#Dj;?)h39-2K6!(FFQM_sL1m|3kad(SII&dJm3!Z({DaZ`>Fx&O`5FL-Gz{KL zur#(2XFSd)1Q@w%s0>VLAoe2T0Y;3Ar;l&jBWn3ZkDZMVc}yK_)aRZMk~YsZ=ncsS z=ro3T4FA0beJsHO&^3QCW5x&FuzlK$BBLP_h63uUq*|Ve4iHPBuub&esnOAfe6BbC z%;QJi!mi#;r1>`Zr5=ac+=7U9dbSnXfw5bfRAl`n8=b>nB9G-KO`aMhyW`L>jcQT)4KIL1X2k>-0pl0zBjw2 zS0})Idvoq)!w|Xu&yvo$_G#{jF*AugJ$JZjl3MT$67(vd#=K99=kyl&7AroYcpZoBoAP#XuwCvdt?ab=R3ma#4ArM z_1>00S-^cNdcQ+cvDU^}*!9z=?Dr`1Lqe#m+vc#$pc45*`}H*7+l_}IIfHEXjmfV? zgaOW2sylb5JE*uox|XQ$`db+YRC8ALs`Rk^%5qh9E5_9>Tm8j#0P*5x&&EwDM*%tO zc4Sa>yKt=6+t457zQu3MM3m~_tfZ1l^nW(EVspa7@T}n~mELve6XNZj!0J%kHJ#1p z=D(vFa*C*axpKM%A&?;;C32zUbCTgfLZ{!=X(nPm!UP|AtppuWZ3<&wPp-W!$Fj?&sF9Lcv_V-Q3RiajZuaY`WyLC5z!JVOa! z_6{d$^5En8XpM8vnmaQZXc1MT28JCVpkFQfpaD}>5m;-ja8*H&>_4l9&R_A`Po~RX z<^4;M^r>#19p|5`UjS8B z`Wl2jh4_!zG6PZ47(3a|wE?!F-AJ>;Ql-vVY=GscsL%9ArBlcH8}Kdu{nw?LU`p53hh3Ckwka?&rLjQo}Tg2(PafpN-j_^;Bz)B0hDq{k!4+Weup^b4UH zf2t3@E`KOLa(3J_Uar%g6+if$vYyw&i@yawc~f`zS9<30S8q(RX93K4kxHrWpBglN z^-=a4>DbAKs40~?VNrHL4f87*++Qk>cq2Urxb^%K87fW$z13iP4NXr#+XF&BdSN%q ziT?@@2o2$PzaTn!C@Z;;MX~7m>p`>npwMr>v@EqgIM|3z>QEQHCL#RR#GCC6YJb7c zCVbw=#@5u$wZxmSonFxruwM{;%P!I{Y8BqJv*??z)_=$&`KEo^HUi54v#F<``UzK; zR_vTXk5HMvf$KqC8h4v@427Qap}*UwxSd4)md9~VewZz7uzMWEi|;D_uPgp7GRUhw zV-4M&xLBmRUp)z^YvCy!(UD2|PrK7kaZSAJ&Y#xMd;n~|P6P733Z%bzl>KGL8LKO3 z`Dgzr`5F(?^QNMKm_pu42S?Zyztl(WKG1_c?GHbou@6!w_HFxwR^v8I`n4|2VI8IS zv(M1b{nc_{wDg_(NvTVb@m4Z)I|sWV`J!6~di?xAz?TlGyY4&q3s2Epw?zRDBY=qq z9sF_0{8;$*NB1q^+q_ih=9QP6q^}Y@^asu_H{s|{qq4l%;z`niK`ENK$lagneyG6cl}jj%}bz(QyAFwH=4{aTqRN1c_gLpxsYm zGZXB-P3S@?|q$-&`xq`4RSSHJOFzcYUA*ZCQi`sXkE za3FhryfEi41YDhuBc*xZzu8$?3Dwj3{TTKus;{<0XhPtsnGN){ig0~bA!%@A8CNB@y5 z9m}5ZuMk1$io&@-YkGB2f7?^eWe}mk~v>Q!KXp;72cN77bh_eWj=Tz z!qDd4^DpAtwdIqN+xMz$nW4`gG)=y7kf-B6{LvB(<~bYFJ;(Z4b%bGYc1jR8m#lT^02Y)>DO^DW$os^H-5>Nek=ViG92vf z*vuPf$P0AP!XY?b00E-61|}JtK|A@;OSV#00z*mirhd)0^ee~r z1Wq~5U!d_{1vmBg(PWKqJ}#VutvE-^#Agr3>gF1@_gj zI_9XBF;@0+JtkGbbtj;l0}X7%q-hzS21?`SZD#0bNESWFq@zuIaS*^?29R7$Gbh3j zM)de80MRoq(F5nmPwe2uX}5IHwvXlIzyXsB@h_WsfCk0)SWJliIRY4h11;rA*2oq; zMA?1^t!XsywsQQYGRvw7UZkM(h`H~?qmonF1L4T@c zH@|hJqg|f_BhUV&O}N7nj^z=)HzR0&#D6A2>1gryGJy$nd^IFZTtrsvwB+R;f39^D zNX~7j^z7i9=9>2!cI&VGlQ>=ESwqsUT{0LCzD_sYy55kpV9J^Dev_k5xO+H|>`aM( zl!+XybiY0T03d_?Rs$@5EO6X+Dc`CnRtQscIJ|dny!OUN$D_ACI<%@#K&n6FcL5=; za?`kgX2yVZj3t(rG;q_-m2UG*Gi=v05Jx=NHxE@nQC&@wQ^G~t<+7*ckwVsA`W|Yb z!dDE~eCFp0PM@BRi}TZQetkZ!&o9U4f9to#PyEmikI(+>Pmed=dUO1xzwmwIoBznC z$D0oy*hcR^ec?k7n#zf`7L6CYO{4|MkrsH|i$9B>=$%1#nN%VR+Qtuyzxx`$tRcSK zZGY21uYfk_4)lif9pleUsPTEiwv2bf;=i7_7+UWg{=^Y*Gd{vUeMqWfPw@DQZEvf0 zhd#x(7asPDX)N~Nrn%VP`7L+pk)O`dbthg)t?L_Xga1hW>vGzk<-HxBg*ecVB2XPl_4?z7gwN%96SHh1jb_9T18*cPnp zFX+WX&m-v%qx(+#Q(ma8_`7_^f4A~n`=adPe+AQj&8u*C`uSb`0Jo7F`o9Wx@gxa# zq2r>WSM6)jr|PZZ06WP_+jiQ8S-&q0y0d0_8Gef$^ROJ72| z_$kkRUg`5v*bPKM>Z`wZBAbzG`dc7Ai7U+|i)H%rt``W-a$Rh@YllGV2;bDX$v>7V znE1)bd`z}l3E{(5jaCp@bKmv^`oU{&pifS#c(hyxgHL%7`2kB({=P`8q8-wF^j-zXda%*YX0bM?u@S+ke3gdIj?VgrEO-dk0=T}?UbT6|jG_A?N@s7GPjEr|1QvUOX@T1=OiZFw@T z-xYTYfZse<;Pr!OLm!@sj~jl*zi!{Et8t+@St6XWiV>hlr5-?Ck0%Sv8ZCDlb=D!S zV;{q8God4m^$)m(4_FT28+RpF+tg6>%5OLNDYcpSs^_7*uw8$qSM`A@=k)vbo*ebL z?(XNolfSOl*O|YX|N6S$`(cfjYtzrY>gG;ptNEwwv*<67?JI1}3z#&@9!pv{y6q(G zD0R~AE$3P{*(K%O{|e8xDnBSjy!YW)Q9kl_dJBS|{-OCHSMe(QPk-Ei)MxuX;w|)Y zlX~sH*17j3C)oeU$m)}7k+Tmgy92n(V_JN;OueJi9CoJ3F4Nl)uKLBnZ^*g=zF?mZnUvPHo0BKL;b-wo%SU;36 z)1D|ux>Yo&E``Jubd)GsT}+H^)9-ZflWrhC{mC}I{;g~5vXm`&fai;{8E;WH^x*|w zHh~1_)S(S}KT!F=MtsrTf2?Q8*SwIo9q9*nqd$8uM}xOqd4T=w^w~H$x$pfLrgM*9 ze{+1|Yd$sJ{K!{{;KjJwKO7gzKUDqA<@q>0eKF3?&c=(=Gk@Xn^x|wj_|HKJjkt={ z%gwn=tvddK00MVE8-D@a?^53DV86*KV86QalIDnT=&$V+e&lCglsf!*uTJ2H^opf^ z=*Szb)Zc72^%a1uC-2?M7XuEC560_{ACKdc@Df$GiFdyE?)bgm{rq_Q?YGCXCr|tZ z%p+)tE?;WmToVTjj%8aOWL{sru*`nQ&!G*_1DC zYvj>Ie@Nc@nAAb8-mk}=YkL3}JcM&&jcxgp7OCfWqb&3Sp@|#|-PjhH8%VE#l#4MC z?OZ-KKlY;3{?d0V&z;J4T_E&IKJ~ZsbKqN!6|azw9~#_u3sNUB=JBoc-~b%JyxPBQ zKgS8$MA}RGCh9^s_MvX`>HGA@hi*Hmql;qPJc|?#Ud9&rQhZ!je|>;x4L;P=N&sa$ z=wmm&DCNUbq)VXi-;_MikNn8JK5%C`YDX=PANWcl$e?T3(w^w^mNjkb?e!dc*L-CJ z+bDkb247STU)sd=rT$cx`iN=#t-wDWKwU|DY@&!rzfwf?|Tsn&rGm# zuROmIQ)QIE5WA8_c8Cfz)D<86UvgBs_4f%-%A7`Utkc@=>8bT zn#pT%!Kd$`-~|Aa!?|&`Z9nTNTdg->!y8$N%evozNJmNEEr3{F(7H(WmHhCrUSbpd zph-eVTn>nHP2e>7qn&X7;>C+`$?yH^kFNk6OOIdtg?~2w@eloj@l*?AzxUpIXIz~7sDhy!bhQ*$@!4ify;~C%+ww}n!H`#QN3?9QG0I{?!%FeieXC#~Td?~abJ8+{N z6&=MNX1huECjW-`d*QRLOT4f3n0U>%efEIO^}0lW5JPuK!|y+hyKc2WLrJ~#Dk zD1Yq8bK3!cz*^g--tq=WVAs3A4IaP@q`+dE*7tV);D9?fIdm;GC(wf~&=0qZZY+pi^F+B`SpzX9S0Px$%T{!7KX4%oH)i<~QHdLIaO z@vy;4*IoZjF>bMo>Phn(vKq>7Oa0K>38Pz}_&841bMUShI!*Xx2LQH#eAQJwLaw`6 z`>sg76c$A$ST=6}(H6RVTfU^lj5vYkO0MX%=m_+^<=V1q3A_VqyQFUT)2^FVNf4O$ z+w^+#1;~7&5(N~makvGC1-R*pF5?DuJ$|g>(Q=9Y5RyRj=yX4yrrXc{>LH z(sxwdmTx6|+rSkdp$oI0&{cYG!x=)miwFOHqxk6FxcBh2@rkef)YyOhL#ju`Lmv`M zozTohzUSXo?-uA-b04NeRWF!@xy*RpCD?cc(;mTF^t>JZCqDpC5Bf}puS2OG8xP2_ z*_^Y@IR51?{OoxDo!=R+J-jEh*tXlzY(7BsB^@`uSDxLi_7`P3b%M25Cg^QRv7C1=%J_BD3sc>KFv2BZrJ$K&HJ9_1>-w^NRt zYxA3OHE>_`4L9o%w;k90rKLq5)`8r2J@U#{b-xX1_c!tnw0>6=;v?d8dCRo|>(4qR z$-s)9%KM?N#UL=I%@#Sgpyh>*`;x>>Z_r6-{jGImy@3GAo3aY-@FD6(mMUMh!$8vA ze%B8s0=sfoebZUfD!dzU(4h8rLq^tPbbdugb&aFz_Smz6E%~XxJOAu{z5DsV?4OWN zI8#r2)@8DhV*%DJ>+~TT{i+UX+mie)cb;fZzv=3Fiya&rE8XDRHtD#$J$^$Y^`hsL zx%Hkn#JI}f<@7?w`O|*bhj>*x0#*GIB%i2U3SNCG|X3l>Qmlq3e`Q8oG*=#w}QELUnvuwpanY;FEj(qcI`#4djBh ziv$Z@VCiSU!9L~3TG#IKF^=UcKBCur0m@vbo4U+lgJp1DN; ze^$AGeAXZ9`r!9N-A=EeX>H@I^6SA6awIRbclWOvKm5Sy^tL{IC2yU-)DJ9WE5A4| zaxCqs=`40hx#D1-J<|ilO6B_jchy&w4s#yzlFq&;{Yz-Vncu`F%@mDKU*l}^WUtmO zNA%@u|J2-$>B_b7RI(7w64 zFfH>IE8Co1AnQ|q8Yfxny8jVw&s{j2z#)OFt8>v(EzN;|i*qxuH`qUeUk(~X7q6Su z_8|!^^Tr-4p5!4nUxQ(eMg9FlzG!(d_|)^^aURC=*1z|g_Rq6lCz3}eSA(B2Kxe*a zMgGO<*?9Ks`FQf)`{Uhr-x-_p^KrI0Q~S9Z_fMpkblaR?jN=onrI~-J9zPjCUAAW| zlVtQKU1JSHA)i?*BoWC!T2+o}C4Jk*~o0CiwUX z64a*vU%^vAzG)Uf6P8QNBEN5ghc8zB9Q?_*pRdm%4|ya-!}Y8Eg%8e4z4iQa@`D9s z)h9ux^HI-f)icftT>r-NtJ>d~x`ZagxMu924Rzc`_K90M(BG=ZQ9|n;S+Lg!;K-D& z3#`W-)31KFq5D(nnayP$NE0*2!uSCVeoBnGK-MRem7P_Ox@gFH)AMfPiU+(jm0ajqV^q=$x=CAs(6EAK-KJ%HmeOlwRpKqb-%FdxrOsl@?l6H(# zc_8r|KPaVgkBJH!;z)kyrUVF9Q$Sg8m}dqF12pk$r%rtmSL{@tO5?yy3&>f!wiM z;)xg}9T`MZ%A-Em#83_lN8V@_nH)Rt5;DH9qJow;`$qJ)s&7LHQ8MsFLHIh)p__3E z{yuo7YiP8b!I#)hN5=qi@(dBM&j873+d9sW@!Z#L4{cwgl= Oc-e7JnM*Um=`Km zsn@XvEP93}fAApIeC9y%D(|yBZsHN)YiUFPfMqp}M!G79@Jn8d z-mpt6_1oCNkNJd0;1>GKGhjP1w&k6{x3Ya8_24yaun_y-E-Re@Oi$jhLBzI3|e z!G_=k+85n1?jJpY8+ZCw4U`Rj#VHd%KI`uWsJit$&h(pxJ9P~pOnji9Mi?1#O~bT{ znCLj2GU%!(xGMMO|H)(GsjELrrE0`y(}hmcL?EsQ9Emhu+8L85uHEj8D?l&qDya(M zBO36ITO%(6n(I*C{336kls2CAL3z?W0qfvVJ^f2PgKU_1Pu&R>Kc-#2OGeRI$CrVH z4|LQCIW0Nx;r_FvChe-XMSsP2f~bhs?tujIyTc4XC%A#W0##<{`{rL0shPK?D}Q!7 z=?w?NLVH^#AcM$+x}Rn$`IxA9u{-76hNjyaoP0%ryw*39r^ve=j2hfGTMxGu-K&oN zVPJ6#S})aC`O(n{+b-kr<2S~KKK6+*9zIsw#x5*oI5~ZQ|Exs()a}yuqTBHcu-KcR zY{ymgYu?ls+>|&VYIP?`jf(GOE75S9K?c(aKSiV}-K5s-5xyJV_P^YWv*$1TIiDBr zJsD5l{k`$SfA{Z>pZS@e8DIa^e{g*3xBap4r@rUAWP``!{9L%Fn{oQ$*?9i!G>b*4 zGv>9$|aZRW|FL U+kfOcIs}+S$sAAY}+Z&e3ty+cPvdg_ICW&SK(3j zCY?Z!yEg)?V2y7Yvn?(0798oT@}7jYe9)aT3U>KJ&v`vARk8DAC$7}*dfmR4_!MWv zs{W?sTH-6|HQ)H9@Kw*E-r~Q&qUY_yB@+SsLb{9}Db))Nx=!qVXns|~+vXJkb z)~ofmoRqUfD!9?M*3fb`9bJ*{#{5}vE!6hi9pNG9b|U$9*_ICEyFTr@{EYl-lGa;t zFL^Czu8d5(LYS2D9Bb{rb-xK?{gKtVw}No2gXpw)0Q$R3*O{;RR*(G+the^YbK7d+ zler%F6I?G~+JHBLL<=3Nd>yOXyuoQdHGve)2Pv{^>c<|AsTIAO5ISD;V}Jaxg3Ke< zAiY#&ir@8W9!ZI5Oh3qerW+gMt7T3Gl>EJ6wW=3jT-dGCKwa^2T(RLN-MfCpQ^R6| zHYPN6X?EjOWFj920KB1icrqTp@uBgRU;C*s9=%aZw!M57)%vEgMT2jXz5_a*%vkMh zOQn{(+jSFL$IorBQ|~rk&o=Bb;u3$TpT<7an>OdR;%MgGq<0g9dwpWtvF`8~P(J~% zwT@@rN1vd4zvZ_%my52(zyGyg9>4wTzdRn@KOXzyziePgw%JcFazvkMUj) zD62mDDmB|+<6)0xT-OzE!(4xq?WX_I4>rG>Q1oX1+3WW}w>Rq5rd=T0URF=`^5qj*?sS1hHGz@4XMUj?;#8%aNz&w5wvF$P^0`OJySP2SVudQ%}4MW7%wDK*(H7ppN7yWxzLaFjBoZAtIUvk zu49@DowZ)b4AB3uVccC#^STS>_*el3&9;a2!-vq~YJSok`v|9pBl79v$m=rv*Z9V( z`tDasrim-C?{v$okG!<;Vj!Ix59s{hkuRG_e%qj@4TsN7i+J#Uf%=2SWgo)hyza^_9HL#EK|`}iT`z;a|D)m!F65alx%^CnS9aW0bKKC zUA+y+gp8Fe+HI%s2##s+Vn~V%$rB&zD_rk8>Anu45ih)Despv?PS4K=zn@JW57&{M zxBq!4?YCZ)W>1C(;ujZZgWr$mQ~yIahx?Dmn;-hf_=-R9RpVn{{;{!ld@SPPt1Az} z`JBBUUeyEpOU*a2U)pDUn77a)Coz?_nfUE_f#X;D@isDe9gn=>OP`&#Mm~6pjiQhA z!p!ah4`L?a^TNv6>4hKO^EFFV5YE*&;|mCqFE6y9^M$_z06-g4;uj8LUS4S~ru#>a zAB?wNe{DQ`@WArJm#-$g?JolS_IUR6$+$REtZgpG{Rby(Ta1H~gK=^wUQ+ttC_iOz za&*saj-OsQmRu8v7wawk-89ewe`?OHe00Ru%tM^+xN{lz)J>Lg1)`|G>aSEcW7%T6 z#xqpxuK}{XjzQ+ME9j@L<>dh|zL~u7r)VkGt`u{ge_}w=u&_VXi?K0?eizagvSL6R zHqRmkecMGcc;ABZ>Q`ERTfCo%_Mz8rsw%dyty8ZN7B-QJAO8cG0DD15^lI{@?-qP6Lf^T9wz= z;P)PE?3u4;sJ>X_p(#R$?707Sb8d8R25jwDYD9 z3##ywL~Hr!P=rJn>FC_ZW)K*m1(5&NPaK%vqRg5vLW zyco3J4YsFfCt$Sair}`w}bH8fy zpWXZGKkzp`qv7(?LB$ZNsJ}Lj(?ae~t9^U_!2kgN^hrcPRNE1{<~R8z3<81Vd;Kdx zuubwSC{W@CMA^B~;SP{G;oF^L8({0+^rE;&BBkL4OrAT?9YIIdBHQ)>0B=Ydihcn& z%_9&w!D~@(h1Q(~wrL};c$0?>-B{YL0^1cat09z}yLv6Rx!IO&fhl*%;l`vt2T}1d zAfM9f3GAAm!M2Ac^{VcA0$dFr$OL*_zvP=J3Fv+!xb-6p0N=|1oflcarDK%*vL&+a z9Xu_3H7bio<4QeFHugPr+7&GRm@M)W0ivY*GzkB>PSGyDu3sGszx7Q%hYJYwo^NT`nx9i=|C(ai7 zYL|7JOV`)6WQ4yrYXvv>wtaj6ATi&#u>otm_c)Uo$OFsF`q*|$y z2jlg(K0NNf@x~au^)Gw#7TiXEj(7U}Jq~anTjIfkv7qhV@rwwpgQY>b7W!O^IZ)Dame1mPJugWmRQn zWmXRN-ZP(me$TU3ys`H=H>>1c=foQkE7lyt8}G39emoxQg6jNC8v*h2J7~o3TJw#s zTjL%#=gh62WFPva*Bc^o*+E|c;2h^Kcmd!p*yJa!PoujHMbEgFT?`e+<^RtY0De4O zgM25eG1|a+Ch&Cqxj7WRISd#-WVHeM@vtwyq3adLCI0B`8&nNz{2jmb{O5SePuuvE zeyK4z^|F5vWNd7DZbRsMUQOD10bs-L?sN8KwjM@t5WDRSmQm33&X;{bXgWTLN7hW% zfi2E0EBq->{j4*as5R=gS}ExZ}{M9eJLA=-+2D71*EpHR zyvr1e<{M+7Zs0b*>3ofoI!!m1=i^WV^MfD!+wm{{>7R~g9`Z7?8YKeW z-ZKK2<$iBJv7fPN9fE^LS}vh)p3vobqi70t;vqlrbUS}+039JhWy_c*IZhRO^Wtyj zsBM_?Y8?>7{>2uriIM?seJNHj{RP}!l^P#?9^8Cu!@A!p@9|M|-Mt26B1diat`sNB z%X-=8&v_G1)jRDkds}%df7ld-YK)d1_ROQ!5A>e+=3ae; zwytXc%=fvh`(^1j?`IpzHqMicht{SqdK%|X>KVV$0@sI6Y(KxH>H@!kOWuYf8`{$0 z^OS7!3rECnjnQhK06U03$@eoBzk0lG{Zn$LyiKn8#XI{?SzffKEjZd%v*RCgIP!fx zsj|QE2D7bKxOx6d|IK)d*7l1S`X2N4r~SlMUf}3>06mf9@7iB&yM3{&+yLfkzGDFW zyk8`m=?lKsb$NxaY!#jd$TE;I>pIsDwpo&k#-C7J`$^yrDFA!;uF<;gdvc!)=$z%g ze6hZKm9h1yE&j%V_Sxkbla5nE;|qAuL_9?1FzevHto`D)W(Rsa<^C76#5JgnZ-eb2 zPRh3rfYf>fpK5#>8(4KrkX?>9HhKKWV`Qlm?*oh%4+nfEI7TMIzW=WOd!rvNEJN{a}0@5~_9ytzteKVZ-H<||S%iA8|Ez$4)vKBaGvIHnQHkg@!3iDEaUiV_TL?I~w4$ z9ZCS>-Jy}5dCgpz8yb3_;NKV*cwkq+mN7KPNqw^Us(zY)xxoFZ*hD>XIXF6(4Ot7B zGye7f*>?ZrfxqR`54q3J#-|^BH2&hdf9{_KxKf>O%6#_z$v8XzBznZl&kJ}ysP7I( z&P&INW8W7`+*Fo5^tW&QU-t{70lygOcqX||#Y^m|H$gPc|2AL)&9o#ozr=_R^HOirwmSp{YY_J29zkZCzFO zk!~lJYO=i=Z>Yxxt6@GDJMByhOOd~>!&p4n!iTEBVFPuq&!vwKSsGO5p|5!b(^k`E z{sQ=k@3c3qX*l<2{{rwE{MKF9xt_PR4o03p_VG-LnjaX$k3Q^EA9%eW5VS{4XehJ3 z#;nL>g@Bhtv-;tXue;_a0E}mi$aum2o^!Ow+eLy)fvcCG1 zCw>F#zM$z1-6rt$0CoBiJ;-rGURZE{%7%`I`_gYM#h$dgYH<7nmcH*-FS9i(mTQoY7io~^)}b#cA`(Dn(S1NvDYb<-Ap0^`xWAQ}Z1^IC?8lQ|Wy z{K=dM&DHhwA0GbZU;FpIqoMW=y%wI`36+yA#u;UrlnWu~`o@P63k#S@|DY@mIu%-2 zMkbURC`_`_=fEfL_OMeX$avfi(b8?V^U!She8}Y+YTZT&c`4Z}mpE+x1=6DFsZ4|f zmUbuAdls!c#L_aMk>1%PU#j9Z7iP(NcXbPhjVTydAbhw5GY%nq@RqS^2nx)~qcJ2wws#vS27lt8zZs_Ra6-k0=ut+SG0?)1I=~yR{>muP+Gok2 z|6DeQxgr-e9UMCaKnmI{yKkDdMODYM1A~5(sF<%1oMX(o-LT-I zuahwPr>*9%$5Iwp##P$^PW3CwJ`7i4!kui;}xezzIA!krfW0 zZnKP2Wx6&C0em)dmD`skw|x8&Dt(W~^e1}pS$O-ZHvL^|>>TJsdivz?I6A&J9zS_B zPCq*x@4xqN$KU(g|M&6V{8#^#s1C-z|2O{n_#6M>-xzPd@%p&9xfq{*{`t5#I~!-G zPc)7tOEU4P{mYzU6VP$M7L3A9jag3K)a?!(M?-9kbta_QWn@r_EXskfb)5={z7G8t zL-Q$@41dDHYsv2UZ-?#+fp${s-KF2Ry#W+^CvQNBlg~#hU6*B};+zPHzRK1ngkxv< zcl+P#&St}Y-h8#5G&slrwxzVw2eV8giFycKqpDPv%E#1aA~TWNH7-{BS3S2OZPV|L z8ynWBD*lSQa9KC2QLvWH3$4VoIs1T|wyCn26F174=dmes5W576yt{2c5CX*SvTRjG ztK)82X)&1()Ql0-zK8RX>!K&~m?BqFn_)A0LZ3%sPSFXreM@k2INSmnuz9n8Lg|~a zu8Oqp?}JSyRV77bI&1p8#6`mZsv(bJxJ zp9^_QH{Qw?n7*r>VbW_wQqkH#6r~Qii$ya!Od2o0%o_q1d@yyF#8rlqL-@_iWbn4K*v+AQaVkPgXAd9^gHI2&6SL1Jk zE`$FffR5Ds$lnEGoY=cIb3!ufKL3Yij>n$_=s&X^xfkF=p9|pC=zS_UXrna#-du`` zaegsAeE)mnko81=Ru7$Mnw+%;@C`hdBFmqyaFs>G%5Bef{%JlNc?U?Jm0-OM1{vAl zm9XD7`QYXv;{cMejc#lSe-X^Q%z)AyZD$(jAMvVOtv=2qBu)X#3I@y3lO>x6-T$AWOY+cPcT6oQ*d0oi{#h1@^I^`56X#52u{E%Nt@AkJwVPD-d5}FayyDy8SJ8TSIZI`a8+g;8 zjD86Xva+s#Fj~OK@710?T;wgEQr`EJs-fSw~s#c(`A_`5r6-K2P&PYkJWTH`Vw+7{B>{ zbbOe5efJfs{PB45_)|alhwn=Jxhuu-zGN}~c^*a|I%YEWp6v2HMEP-Xd8Rt! zlN9|13F#v4||vcwn?#IoGYxx(J_AE%CfLo z2@{Y{nL%ZoY*b_RE6Q!3X!sza`W>?o2a)Ho{#dj8>29ML6Ok*y1^0qG%}e3t_BLEK^=L z&4At{f14S6vLn7jADeUD5T1Fj5-DvNm;M*OUlbFqbx2O`WoWGuJqOp7keLt5MxOzC zib`%b6A0*F$I0lk@7-tS6P?LQrqH5}?W{G_Ras?Hb{vTX-PZb%KgXw8LpyoVU2XK+ zl*O3I^9$SMKDE^rl6|3Qg=T(P9DFEMT|J{e%{uWg9&CdBqTfZQamIY}IZ^bq`+ALd z>yL3_9&E!0>Q?Sksb%U&q3opve&pA9MFG4jA8Nef8#Y@n{=tg~_p{b9d!eKDEu4pr z_-P06@quD&a(wVt%8r;Pr7t>F2Zvq-bmx4FWf*CD)4x4euC;EG_(>MeLD}c^@ZNau z-hcXk`M>@nZLGg7%76^WBo>hbWTPV>a2V&!1Ic=nge$OfRqqMZFc${Eh7l;`{Q$As z#Xt2g0O{RO{Co5b$chi4Bcy@K51v$ef&BwI|8Wq#*;H-N_R{%=L&d`vFv=Nq6-zK# z%=Y!xz!AZeaVIqW)BKkV^A{a@JnHL{Q=CW}7+6z|`e)yVyP)2vZQzEW2jnfL)Gbni z8H-WC0Imh8c;M&uDgzn}R6e;{ZW&eax_7jF1Ar%kCzetEgg<^{(}^UtiQRIvnDXg( zFCfTNf*)X^^uEe*0<>T#HY`GH+E}dJchn`r-v(eB<-cy$d z@J)xRlO$Jl=rI9WmaHs-k0p_MYK|q{nQ^|dy z`P;HHUl;I+uXUB-7%HEy4|MXtey-!&GW|dVeY`;lzIAA6#Xh2fJ)SeWV19#S z@dAixTK+Di-^(X}xh7v-EWLR^UqHW>FFzSH-QDwVgU$bj+)yvOh(AT_lm5JQj~Sb7 zxe>ny>c7Mz`r*sPpZZVq{t7W&0Q4LE>09}z3joUW4<{}z2#EUgFTXmTf93Tt{F`V9 zycrkg*?hCfYBvuw&#>+0UVVc~#fx&{8C%`dE-o-pVl(C12X{gBV;j)@3dAQ_FJHW& zB(7dy(sp@vrp?-=zw-a`;?!?N^M>Eqk3Jdy?0@>F{&8cyV=aJUM%8IkxaJe*5ATU%7C>7LO}n%Waq7ijEX8&*3GHeUhc+ zck@IbIctCZc<8uC3VqBCL)*9`ICR}lUFQ=#O>a;;eAlvJmp`#=*ydp4x*k|*1BxF! zx8l_H6ioYe+&+oU z`$OoZ58nBe_(N+8Hmf~y1E zWP$@#^KS72xi|MduRTuij7xXl>|b!3{|1#Y|gjM zPxm+W?g98o*RjDLm*iuuw^OIZH@%rbq^{g?rr(;zP~(8{<@l5wV24hC4SUz`b6iSJ z;u&b0=00&k+mrtGdLcWZpMI=%_qgjg-Rn=#HFUY<{y1oRZ_~H_K=pHz$xpZ68YjE@ zR~fr@23WU@pK;p&-pModEcs+XhvGqTK5U?|X}9w1OM&B8*U@Vj;%5=o7;sbpG~ z3$}A_;mwIdA6mvkF6;{=epd z=HdMZww-(6{6y&e`}gzEN#J{s%KN9D{2)|59Uf@^s4=j9V@C85eVT1lGMv_O;S)-qh_sYl!0Od$x|V>IaWu-`ad%G`0Zl7X`E9 zY5Bsj-o_u zCJL#_2W2J8-#9D6TI(mc4o~{6=>gZhUc|3J`Bpz+1h`FfUc>k)F~=Bw`n36}E7``Z zYd8I}gP_7b^+Y#J6=chdTwe6y{)@+0?Qh_>U3}As0AAzd4qDWcnm_7i<;2CbylB9S zNuuR9{O2{5z6Zzrq02Dgp4L=y z`=Ye@c_11!l<~m{tTPa~ZDTu$p5Vw^E(Ybp9u-&l-gFKFV@!3v=^LOA`&0KGW6GhQ z_m2f$FQhhIkao@}mM^Ccxz?qH_OhE7o&cWb*`E0gA_4P~ENd5Mx(Y0HgUs!3W9sKa zwE6kbLQC>^biI7Mx%tDxU;p;+d`IPX?1={m!e9^)@(2Q*&^E>!C4vhg1HzkH6hlj3 zpdetCV4EU*OwDE5EQHkhm(+{}Hj`io%qGZ&102q^tTpMVLE=&~xrRRYIyJ22FM^VV zJ*yaxD%oVblARzflvSkepoj)qmDNS^O?B)O4;&oGiG!=lCY@$F^Ps z(47`|Bi{nRxRrL~_$ygt81YSii3S_Qmz%}uH`=n`q%Rn;wioa`L>^ftv4@#bPLeEI zXjM{~H>I*M!CvHu)-DJ~pSb^*1l7|2rd#^RBpfQP^Mt|*ZXga#pME15FFCGbJeDZ4 zkG#ofT;gU5GeJQ9bZFVJpV(BKN=~3;2^Zh+t91b?Gv}HP8Ll_3=~h_iXSsP|gEyeI ztC9g5@UJ$hmQ6omyT-O@Oy$3F(11AqZmCk@)gYW2eY0P-BR6A5NoQG)?sU4T{St~F zUSE&HlVk1KIME)AXP$d`yz$Q0#{Dn7E+TEpWz*I9g}>U1al}`)-tfEslH-#g{VBcJ zrB<{o=2A!{xb|PAt(Zps9Wok}ydlCD^r(@O=oAdGu#5}HzW!D<6JzaeCq%FiFnTZDL}tbCwY?WpGsg3}wV2O>AZwOOzpzlh6au?Add%ftf7CGXE#Gf%2$sVuZ_U!Rg z+0hkQw^P7!=|7`%kbtmL4&9vpKM^!t$yNNl55ivTS;Z*c^qtRhM$pcf#y3@!@2`LR z{@#M(O9~3eMWX)$tjoq>mtZzDlLhG?d#Y-pq+Yu^50KN}%^;864#f7={;oF-Jq4TV zEkg5W+eOU~6G9h^5r!ybU#t)Xx~VFm|-a<7}r zH&qM28>0=}yrWw{6}hFpVJ**?a?bQQ7^ISU;`6?~md8A2*)WgoUFoQE{>$D0SzaH= znTxConjgt6Kr9HY9H{YPv0RiP2R!F|PfoN5V;?F=!!LUPFHZfZn!RR(RV6V3SvE%JW-sjfEBK}62r1b(l?$g&r6IAF_<|3PQ*cX8AYxTcE5j%(#!TU(WtCjGLVN2*Pk)!Hq!vjK3wR!V~*dIOss8@Qj0TlrR=Dj25H z326rpP$AX0QP6C<&(GkPSM*oK&VMGEVlbuM+`_*Zzt*!zb`Q{_uUVYu9$9jQyIL zbK@fGf#neQ#WdpYvJPJR9*FQ@lCdHC5f?#0t+a|PKWRr%N1fPBj&Z+vb*Aw>D-)nEto7~QKVf8=uR9(TzFpWR^aR?}PvFehQ^|LS)ivSoB^1G~5v5Q*HQKg^E z#YnRhu48r;NqP`v`?`+bc09-ES39d3qBmBM*msL*BG zhFzNfoI*+TA0l&|3w;vcIb(C9!1rSY_h!Us65;(2BC@Tw(9%FDWsi}Ral?J;_>foT z2N;xb1Usv2TK|_V;Pme~rZRc&^)UlFLHHpoOvypElVwpN-dP8c#TX+MSLE{#n+|=G z9%N!w)$0G;s8_2SkRzEcMla(ppPUrZ4P{6Hc;D!T+=$>dJvJnIqR3bimK^V8!)R*b zA(gJ3y^|94?r*@pTVEyNP(f~JyBbs1;vUxQn~h7Ni*xP3$N>SK^X1yt_yLrqD4ReT z{E8F3_%e6G)9XUBx;mkG={vIWe+D>lj0~aGw3fqSa>+^mAO~6$;mygrjN|XNt?u*o-vLl9cX7CVMo*_ zaflXWq9sPYAEdH%iss;4u^`qu<2#3qG0wfA=JnBZ@FtEbs&IV7KIFppwtVkdc&6B+ zM~}w$zW3gE^66*e*$4N>LnZIy^w0GJUytwjoI|fZM6|~<63~Zym2iW*QH3V+@WvO$ zjUQerXTVZaoH#FfUxWYn>M=%L?H(T_RcY_%9VkMMmp^F5*ayT&wE{Y>s3uA;t%cHSd?_h0KeLj zw$SyO8?_wpnbc)~xNu~wl?(^3#$G)Xb|00z-jAiqfi}u2S=S=Raxo4i*i?kVQOvo5 zoM`js!Hz`JL`DEj<_2YS6*!j_J>!BtfI9ILg{pjeg5wQYWfa`Q670UgRW&&(}@?Ml(84$xmM2mi7w)Nf7x=t^MINU z>9W=ozp3dWY*f2&^b4N@%Kn0wM1Q{Rrfg(>@JV`fimum1KJIoOFQhzkJn_rV8A)D6->R#o#G z2#&MVRK{RrPgshsaN>dv{mekHMZYbip|jgYr9F$zM!&%>e`L3;6(rHTd-Sbm%B4~r zZE`vun&N9Rx20&7yHxH#6E_ghJ)keJ!~daYFv(a-JHBt_MX`#F$G=&2))E`;FxVYP zPGXR%(0bI=GL2{QDrdUl3(}*H5+`;hMCyU<&B2C^ z94YY=rt$#vM2Y@u8-!y~moF?(Q}_aN{3$lJ6_$zsAN>Pf`NU>cOTE_5TTuN2Aj*BJzVPM}a&S`y(4o_h z|ME~t@^rH0Hz(&qrlXVb@Y$EgYj3wxOnr$!10l~ zoJ|Y78ux6Ju-`h#rPC(GzJ}NZbRlgXACrz;sm9~vo}E&>kdSkdo^ukm%u~-@r5kNV zF3--#+3DkP`j}5?YW2{?$hj`;{@FkNlks2v7ytSA?EN2%Z~olRj^F+3e|`MMum8rl zfB$$qdHiTRdh+@B{PWMplk+oOTj+!?UcMrYll`G$<}rL-;kT6c?2r*BAkDM?&$t_>6O>V@pCT#@ouH$xYie8nX%0Q~5~_s5lQ0@s5F;kj{wT@Ajrfx(7Xzt%p(H&Ku; zayXicvD`zpG6Q1|yxYtt4&~uRNX>6-u)YF!V&w%n|HPgRwlb8vZ5zzn8|>0*&^bkHWRd0Jksw|t{i%;xu4F8bQX3ZcQheoqj}6hT_>#p`Axg7W~S%Jm&( zu7k@*WSG8W3cP;!93ViaAP*{`FS+|rwgjZdpN18^=H1R%LxgG3q3X0LG(Nv9-&4v4*(cpe5J!ukVv+4Qu)do8RDf z+jfCAzNv0ky?AQP^Z5sF(TTm7!TmbyP(Rv|rEj543Pd$FKxfnhaIuXB(#99S*JeuF zz8u>5(eXjP0w38*?mOn@)rH7eHSmKEc^r*n?9n`A&VKaq56Aa^@ZE8FelebZ z{(13fuOdE)^tm!|k~}L%U*g3BaP;^^cC{lnFACV}D&uR)VzZonc#4@B6ZnkB-S7Ar z9d=Y=e2~ zLKXSVxVT`f@r?(HjeleQa2(w~8E?G%)8pOO-xSHsIDNvs!|QRuHvk;pSIqCLthI<( z%N9(*Zh5JC_I%jUFN^@&%OYFix`$Rjw5|3)#Yet6L6r@y|NW4kkSVuT_)M#Vsc585 zIkFnIvU0ISr-jLo+{5Gk8GB8+XRu!`6n(mVuRXVuz-1j24eJHJrD1;Nz3`ed^Tj$OI%5t~E`3@X7|VVM z0pHYS+dNm2+-~d3{*;{T%mU%hpVig6lot-FjhK2+!A?E%ZWCorDlSz`K91$0fK7Y?Un1@~njJaEk@H zByKn)PdNQ!X6}zMzo77;GIR$h`MC!BO*PssY)~Cy;>V}!ucX&e_P7WgrtnQB8H;(2 z5))OHOs`v#g{0UoKEJSw5t#?b;hPT}Pi&_yy!%k)rqRFi#DX^EDkF(87knhUkM-xs zR6L;8TKL}KuYLQszaz{$_xxKGiloUH*boIWXf3LkQ%ooFRXFfyBKfV^1cQx?%~g(@ zW)N9SVuC57tYEs=m3uQv8%<0^(3F8N+=i@<3n4NDmKFU(T}PLx+D$tJlFD7l96ug> z0~Z}sbm&igF!}XI$84f#TM!k!=^T?)q#_v zV*(BRC;L5spf6q;75`OZ7J3!nYjC;wI#qwSy`Crmo47O?+<%*3rY*pme1;w$`e#`+ zMJ+}~ia(Z;6ANkgqJ$OhfzWZZuByvlHcozk$w>*L90L|lY;eF~5}VC@%_C;Q&zF?2Xm|hc03Ce# zkT;Ng(T#5~36OIjxyZuQ$fp5)T7aO34}R<>_MmKl3Xr4BHdj=m-2Qu$h;91c9%xV@ z1rkRviwyH9evX0SR|CxUyRQP!wawcFIP##EzFIeow=AFxTmk2;qT7`76A1WVT;(-R zJmwv9Kx3lWb^v`e6rF7W$iY>D#P*RBYh^sduK8z#brB)ymJPlbls(?G7PJVJqCsme z;mYq-RiiIn(#FOFANt`xr-E~xT#p_+Gv0dVE93dMetH}}^Ssw>rsun+W<{+$5lN1MiSwXr)f1mK#mTji7S3k@Yr4E zob=l^TkTKl-&|adN1r{?$(m37UyQ5M)A6tW{9la!?!WoJjo&=_7@Zuc_7P`9itW^Sfwm249`VeD#}!_4e@%U!_EU zVi#WtrPf%>F=f^-#uSOU#WKcxk6fHtQ~4^$IKLZw0jC&wUI{!Zpzn3FQewq;qD~P6 zYX*HGMCaJ(+ze9$bKie`gG&4!3yuK<@Bo#24pFXTG`=Tyl8M{=5xqA=qCvl8Mywg6 zPL;Mn<(`+i)b=sNfZA*q>tNWOpivv(7(her^@@JO7~08+i?V_>{+tGEUe!Yn@shA5 zN;jB>1i3Y}|L{_*d|&R4yrv-zLUtmCGc`?Me+utNC2QvP=$*l)X1B@e$6=e`~N*rs;!X~a{&&E>z#;@3GW zH*`qgkQZ6V3!NDa6oU>}>SHIH(I3u?wJTT5TWvlc1VM<)-*%dfvW4j(>eqCVg0kiJ7c^T>=feog3xl$}(j z>T@9=9B6DN=V1HSKD3Y>K<86o@!#P10ed{?E1Eo%1C;mr!FZ>7D{)X?DF)QLY#^56 z_qBy(wH>j6eUYyzsMHz{%%2;$x*VT>{G)Mkc3ON$x%3N{h-&)2rZo-~BxsG-m{t6; z!`BVCD#)I6k0;p@>>9JmEu?<*t-vWzc{K8vvaQa%%stWey<$;=T{38Yr zp|Ly&Dj70=+K-I8^k3q=r0L^rz=my3WSwrg+Y6|YYhP9atTeena&-Phf2TC1y?tGM zz8%l*zu1#c55<<-ARMIlMp0#A;CnE$|A3A6mO2CN7No=#9rrS-|BM5eLtA-EItrh* zN*b@>C0=bqY%+xo+nNDjEAfRkeV6lK$3kVVnbcqw%5q(h`(RV%y2-j($2@OH={xtI zaNbXvTnE-tCnP|_{jFdw zjT!URCv&I%TkA3smcEN20&a&TM6Ljd^P2soZ0^#77e6-mH(=ra!5Mv=z8+}7t zXn?qffQ%m!_b1F|IZ0mxQmO7pf9D}TJc@~my_Yv$Ip?uF_#l$xLaU$C*YrW`bN^yj z46zNX9ho-`!i#?HL8a3VI+SM&hW2TG^aG^OA%pQ5TFZ1O>0@w_4|pzu^H}Whp18Mp z=*+`nVJ-J5NwV@VKla8ZQomr(c@zIf15MKCX3>}I0Xk4CM`0MOH`skza;rl-r&pf!V^uRBP-khtS4^P~F zAo&L>GgD=~W3YC+%h=~wn!-5doF6Q@e*uj*f4Y@=#xNq4JWxn7`?Y)#T^@LY! z@W&#&R?5ycMmcK@Yal=4c`#1I!^22^i(7$J4iENuOo=`(8(pg}*dHsd=%CNscl?Cx z$vAlM!g%e~H^%F)ygr^hK2 zOvaGS#ooj%AU{k~{?y!Xvt;;kBpCB~im1%oHwjf@?y~&!UOYOnCh`CnH`&PY&`MBU zhn#zl*R0u+>HCnJ1D$IvkV^?WYY0c;NFCYGQ>pv6wgG#2fS>a~>_MJrZBF{sY{bv! zTkgMr!e5xJ6S8`PJkue$rf|L!NpgNlT3e>id(6W}X>1xFJ#=9KKU zf9UZ!g*Nm;kv9JFd;e+|yU;v9Uz+H1;gQys6Y?r6$N^oz=Q4xtu>lzD=E zc1``|xh1e;%6)(? zG52$vmVNE4M+Uh|`+TmJ|A;Obx#(PMogkceV7tqP z2`n_UTSmuMp!zOfm8*Zc-u-7=OrLz^G;lKTB!4`8Q=Q3Z;qEp;6fvL6kT7D6e6+KxC7%Ub}96tbkOJwNW*+ z57QT9x2i9MMUuz!o@7;nX+uls>cX$n|Af#!wmQR4z#j^)m|lm2XeqT5 zWd1MbGN13;AI81`-m2T_tQCMMIMC*`gqRu|;CwTQ_JP=zq>8J!i@Y^-|LybB4zB40 z`y{G!j!IE<{I^`j)lOeD^O9S|BHz&MKOx*fj!1Wh^geC;ZP5M~UD<=l(|$vj_w!xY zv?543^rO<8#sLxqWh41#!ZlN^`8W^s%#2DfxJaK}tZsu^_fzdZE5uk(B_YDC6T$g2^i}bJx z(?0BP8erG%yMTDZkyQ4#yt)6}ht{FohLm2>Q)S-Tu1N>vx&k1h?4_*09K>B^lM$mG z5Cx6eKZYX0*jG7}S|c8P@}u$i(IZuQy|^CyhAhSI@APZ>%l+m(%r>`;hk@KJZgoDF z*ahr<;eB0zp57PHx0aW>QI*>ABeaTLH)Umc46O~zE;7G`0x|--^5U=0av49W`bHWcCl zuH$8cwyWur_x`cRTDQ--VBdz_dXJmt{|WoA{)-PpGI=J5+mn|As4Z3ps# zY?bE(Z;a$&6Jk;6y%y!356;Slo+}`eXWoQ|Id#JWL30!%VDgMSKa7G;{SZ0Hy~ZNH z-PeI;)*I&LG|PGmGc87n2oS3uAB3sC^Es?Dwng?fYU+#o7*-5ErdWp zCwr!Ra^gri)`e}&GhT8p1Nk&Sk1e$2ns(C35Zh~B)oHbFT-P@rSZJzk@@;wZBRiIF z5W3|1khb7wyL2346K(t%FZo6hI8ygh{K$1P3Adr$beXU1x2!}+iH9ycRH1gQcG*{|ee0xj@Bkz-RWXX#Gm7K~a@Ec~q06gF-`^rpcOO`Git$9}1SeIvR(;w2~d zc{1Ju=t&v-Toj$>ub6yK!bxRpW3XZq0zGF`P|izE#(4~Z7oBw}cKi_}`anKnuL?r} z60Lx>m?XJ;nVje2|2;bWAfT<^bq?Fa)yTKE7tyI8sRi@ zg6BMy&WT;q#-x6r8Ymonk1%8$7OMJKe9ygo^h3DLW$(BnU`qXV4arYX6jM5{L zKG${KOa@gLXRfC`v1H%W_+;4|MZZNzhiR2;`&_?M_t-LVDfS3Bn(D;Id|q2D!{kt<4a_^ih*u}`>suoHajrOo5T0gH_I zBEQ9(FmlA!*kFaU$(TOR1Mr%j)bb?D*S0kl;CDMVjC=1L{`$9n`#Vzq4vomRVDl2l znhZwJ2p}ZF)gs}Jh>WIPc#{!?rW@R_+&+Ol}i*0h1Oqf+P&XyMgSOMRA`2cPO27d$bI=0J*kTB&1#oA&rJ~xH%LUvP6M(TM9fV8329VeJEDpOpdC-I}l+!nS0t2~z6G*lBK#ZF80m2*2K{#c7!^iR! zDsH5*9`=boMAXT2VBt{P<^x2^x#5RBmMt4#Go6GApA7?+<$(Zy&B`Hf;2|IJLds|B z;FJ0EB@JExAW>aK-fE6Ma2L|4ImAMs81)8_R|jy+GKs78&<P2SoHTh@S z_+0o?SG;D&SN|iN>EVlYUT~2>PL>&3Sd|lxH}wAMBk?NnZ~$g!g`;kMi~>Wsa@xIt zFhE02JXPouwS;+-={6s#j~oI0X%iqDxzTiadZFd$%AerpC5k7X{Ahgl&;NYwuU2FH z@zx-L=FhRQvCP%<29|*1h7&JEq+Ou8$1t(ukIoX%S8ROQk7a1gUe$kilSwjta~D_^ z%mR-wcseJ+$NZ6OiXKy|EYVh+orv{pF5F`CK3IO~3oQ_Ph(h#J?>6X@=OGalHjLy+ zX33$_qPsTK#B>^3^S14vqtQ($NQ!n{wfI%qfT zwxw46*EU)P(D`CNxqYpgeXV&?YXiS26+NAr_ipeS<3iifIrEvk#|Nz-eLQ(P~P`UXl8RWm`x6)1=kV~AyJNH$Bq7HC_$9#=h^Gci_;paR^ z4!!EG|F)gV3h}@D%l-EiGqh?N#^^?9&&*|KV zJRE^cDSHCwM5S?#s1|1%Ze9P0PQE1!sk=n8b?*Y_nH2DX8v^hEyW`5ximB6x9$$Yc z&gz4MqX)i`dvgEbc;(GE$2is=Nfg(Yq7zNV8^7&WcKCck(wbz^*i!Zw&{2H>OWD8K z%z^n=G({I*8`m~(0CrC4#sB1MKZ5H7S7l2a_&%||F_}GQ(J4nqLk~gEEteR3I>K9H zApRk%61IrQD|VRFKin{(otthK1V^L}p#k`le&z4P-yD)?sLz->-JD zW_b+SM-bRVd)cER^tmAiMf{G0%qQ@DV{%KGV_o0&b58?nm%Q{_`YSQr>g2$ZyNLXZ zpCGU?dCLWm6Ip?^%y~KWqQ~C_eEsXTjfZ-8#Ov#Qa2$vg#ioxRfzXp$zvEQvXZlF| zHX}M)ZsMs@@iB_Qk-+(n^R#1Fd`^_%%^YFQGQawv9N@1oSJe-5dvAtJ$?`)TQ#DR{ z8Eeep2t=A`Q5ZYJ!CDTU_&Ua*8I&%Bo;v40^JBD=&8dTUQ6zgS1R|jzA_F;g$KKe% zT2@UiKwoh}4fOV<^Tf-!<&fY@yE$wgu_zg)r!D%@hvYRD>~HEBpTfaQC#!-B)T0d-8+g9`@qYp%;N>(n`Nu0O{kfeU|9*L|AT!Pm+3b=!j+*P*Ze zVXX0I8j{NRR{cB^vi0Y6(s58Kod9_BZMev?PomX-+7wfNHFn3rag42IwH?^cI1UY( zD>!cB+X*F6^^%#=QihXnZ%8RAZN|}GPl3pg-SZ@yIB&T(eklriO^?4u2~^j)$Ya~O zl=Pp6?KKX*01~Itv2SbL?<8gJV5_S9xz0MkL$l~Z6!VvIQXb6b;Xfao;6d-D9|Y$i z8$8Ia7XbXpP1^FXn0)~C;3{d4kO!+vn=Ro`{l=&^g0&;I;X}Mr&KS|ysC0!;=T% z$vk#U@K*^LOanAvA7RJ;d>R-$e!+yo)J<#R6<0Z=RzeI|moqLMSKL{yO_#_cLa~2|fJ6JMn@5@njz?JaOmYzMuJAD_+_s^HUA??%kK36W$gW zM=!oK-gxWF=OLOCC^Na};)X@I7sB@SjI%Vr-{zKDAQqW!c83#sV5VlM|RaE)l zfR{ibH|uw=8|YE;z94cj(R#t#2IOV`A!gG+)av|z)$6VsIH+9pwwVjuuDd}}UDjBw zGJmYwoI%MQ9_E;I9%4U#8lzl$0^CPUI1bL#-+8qllfTdsOH^ z6tF}8kzcu+SttDhqi7fx?n7uwmU4eGA4)6$BiOk`C96 zQG4;x_d@qRczz?8)&=~-H1K}T<@jpEVgqACGW`u)#0Z%d=pYG@hi}ENVA!|5HdLbS z{;~XwTYk^qc#)}UK4f@2s&2{T)Lq-ZPq3S?A@e>7ocl&RK1cW)00PV(eo~@iXnUDI z_@FlH(Se-N+~+m#Q7xNhe1ewPFngIF$bkm|3<{Mrm)e^(DqNhSh*wAtJ8mbKY1cJh9VE&te2NZC=RbS7;;KUJ9|v0#UISN!;S5SA8AK z+?XrZWaEU#1x3EvAFS{!YAp0B_nTotX6jI~9ppMtRE`I3L!tCRV;jo{APNM{$5;g{o3$oT?IU>k z6C}U|C4J6CndZJ|&QH(A>F1w~$De*Oo;?24Z*o5R_=EA!|KyLy|Ksoe?Q!||(fH=i zeQo^izyCMJU;fswjA!njjK`lp(gn$<}UkJ3E%`WIUFE;G9H*ES>yQWR>Q|^9f ze(EN#9A5roT*yqSb;<3tfvWaBu=t(0h*oQh&y|Ax{&RhykFUG9K*gu+WxgR2TbxrU zJ}>B1Cb!{?OJxArP zIG4F^REE~{`>^M~g(;D{s@!XP^DcRjL@aW^De*sbb>2{PX3lyYXnbIswZX^y^x-C3 zQYv0^-=ds;>$=?+xnCl*oaSBRtIag-KX4^W06sdm3*98<1-Z^M!snCq$l-!gc**qd zgiiZODvFP8@w()?)=%`>2kIOmyXk$ju1?#WG@puYwjSb%PQXZI(1U{qn}Y9HWW*$~ zhjy1E&?wnwGV!|(z7pafhM?2*SJ`4m{J)*@wVNdikp141Xzr+45P9w+n~-%b`!AZf z5Em7947hQ5bTA$~^Xz!x4PF2^@f3uMc%pIy*2NRp}HeJe5U0(Pz1KgG_)zvAGJOhJLM^vZktceZC8~$S)h@3 z|7!7 z4X2H9AA?(9jp3YYh4Xp5^n0g|~t|}+BJ<%X))`zmw{-dwtx6kC#EFJLE4lOd`_nLr~ z&(ZoSu}_LW0q@ZygSl_H{X(S$OmBp$d4OWBM`d%Qq#YSKs*Rjjkt3&gE!T9N0QeVN z#kcGJWL_Lm6)?M3ogwj+ASlH zZBpuytdizyY#^oVdpMG3ANTCDZL3M*2iFNX$tiC3-3?g5`3s)N_B|w5Hn}tYt~Y(9 z$)6OvBLg05>9&G7N0bvM?r-3y>uO5g_0?Im_4v}naXUc`rt(0mBj0St56-jXspkbS zPp9HRzGUsJqtO=~Yb=z6Az`L{tp1{fKh`wsx_|#T@0^R^N1y#@{M*0yHzK?l4<6pv_>&y=FFX`N74}sbZMK71ruCzTRrWEA zO{7DoxTeW~OR*U`S3oCsopAzP;^O#9zSke(ORTC+yZck}@XaCRezz6MN5w)UO52!| zIFajs)6ff`JLIPVyk<(ac)3r;2Z`9z4dwUF^R?MW&pb2Uc>C?~ji34Ic>cxb$K%t- z-g5kv=-5ZZTZi~Ow&Onlsrx{Se(a`P zxd#e9sLX%@pMz-4=sL3e0T1Q&*Mg9Pos-i$_o&(loyOH@fBWsr{iY7 zU&5h}Ifv0CJ1tGQKS^Hm!w=I9_$*7AJNolIXYK>*x}_{rqX<}6EDt{VM@0rLKG_xG zDE+*&gg_;7e=yH~t*^vjE4sxT-T1;0UW>I|xgRf?_``gKo;K**j#hjZ9c9V2|Bj~> zIq?DwMaQw`c<%QzJ|GNf3iw0?EqQK{dfJ?|KeX9yQt_BX3Iw+83R>B3ImJ)xoR{2| zFEEaNQ%-`%DA(&ec5lxvLE6csjeVusJHLD0Z{Yg^&dW}nZ@o{ZY?jy!D!}szVwic| zc#lfQK8TKQKHvlf`LUzzO+T305p+c|pttYH6Zrg9d<~hakr^P?O`mw{@4b8f=l|1x z_K$@7HVp{qMh8={JI??oUd|p5>5^-51-c&OHaS$vwJXp911iIE65YRzX zfLuG$CV0Ww2yBC2a|=MJbJmZN-*Bgd=gec(6Lrm>!AYd_5R2|V^BuSKy|ANUO_YB`Z$7mo!k zhc|ecFv7DyWCRAhiBT1P(?cVQ>v8P5F6;f4*f@%^!y0FNY8yKaxX}U6wKjyp@=5&Q z(Ji@RdsFV4j)AIrU~D~;Qt z2UK#t*&RC?I`$3TfM^_W0i#o6a9*cXcHf2HZ>$M#em+&tcx1!I`NLf&`$S_tLu{f= z)(Wk0t&5ABaccXocys#!WAOg?>_;Dun~T$Naed|&0RHWt|Hb&@KluCOy?^uPymN$=&|^3#-m4%l%I{06UmXSoRl30mHAB`fbAGyJmX69 z-ge3t_pcw=(C#i1%{I;eE|7qEp(yp|(Ut!0zkU54?-*P4oc}gxv#0!yL*pfnCReRzvwy}|&^e5;ry+LHZoKU8 z_BSTkyU_zJFXxfa$DWc;{1}JO zgme$ohF{Z%#2enJ-*ZO3aa`=}d1_$YMh5zY%FRg0-~x!c%B=&q)sJoO90x_f_$&MO zA-=~RF9T8RI*^jq1rnM_)PQucx`!&lV5JiA{rS0ev8x>odUjYJB4#+*?&SS zKIZL=Tj)2vg`elW(o=EJ!kc^nV&nKQ?pq$ACLyC_QWZ!e21wIreTo-c$ckYg(|qb^twY7*^5W^20Oz zFTd__qeOIkH6oC21i+V?_p?7VZsh~}W+1W`JG>_nz^}H=m%%bJuUuq zdf$AIkMR#V@=g28>+@5|e>DEdpZxK-xO_B@HAdJIA0OX$|FcJfCpUIZds zcm#SJ0p4S{4~iBb&Kj#t$G)2VWY$k;avju)ro8HZ`&<2GwBxtRPdxfY78HR#f9&=j z4-EP|kvX4nyV)M-{rHaUWcRWjwmtLwNdK9?^s~ND zq8ERo-*9LDGnQG;pv(MiyVkv~{p7l2^|RyMfGxrkdmY^Lio?8U48bJHAe?I-7v*qU>W zsT@btpS~{R0f`9r{g2P_B@nq=KUN%Ne|XWacJ&+eIZyG=bv4=s)lp46 z;ji_(^>rSi=-7egD_SPA`$c3u2fEJ!iqm=?k$t{KV9sI64dV&lh-g%g_?Go>TNhf0+O4pUAIuMe(CPbr*2j>&f*#^>Ak102@zii(e(TV4Z(J??3Zf)QW}IcS)WPU|J62lYH~j_~ra% zf3VGEo!PlpS`UCsi|gXM(q4f5$=T)Ac>kjh#>0otj@RCJeSGcfKRaH0^NrAHP2+*& z!Qp*v7td5hx0I{rY%#UwghI?nz(zTNe7ER)s>-nU@aw8bV_S*Q?z)f#Zc+ zu9JH1sZ@gUl7?bfb|jj zbSm189;XGYbKbAi^%8PWncu75bbB3@C|vRb-LkLcBiA8e-b=>y^?CZb{VkX><*&2N z#0fUL)@9E-Wcd1qwrvhupX`XA(9=nWj*ZU;parythW^Gs)$93U+v68o3gEwYnCk;S z%P>#VW$nAX=DCdc<>S_-18%WC);!ssuZ0)z%NDi1_p^_j&(xcmFXY4%J4jORYZmZ+ zso+p!M0Fg@`HlYD;^YtOY5v(h=AZ3BC$?^O7?`UD#gB8BLeg>k`nfU)+?E+cROHx zQh?vkdp=9fw7dJuu?ntZ-{Ah&{2`xp$DwVA4067GCivi>wM_MYWS4z2&d5?fLRWJa z(Y=oLi#Wt1FRaI|iJws83O_{C&nvGK8-8}e&p+#Yz!+?uK=dFhcmUfw^|$M+^!C9Z z9*qSZO^XghL=Ntl`G$<*6lrh?} z;`;3xIvqK*)7FF@EGYt6xJ~5$5+2kFQ_<64w;k2M8mN8fI0W|P*T9_6{Egesi@&N& zH{(>#OAy3l)sb?)y{bd%Enr_?Ezm1i>?(4FYS<(*p%-G>NepNRs#DU3%IQb)sz;s+ zsZW3?ay6QCNaGqimux@dZIVo&kM?CU+OU^Rl(&#Rk-?9C+B3d1H8i|pR!Vc3qlP+HL)m*R}@;dRBnG0XH7}-)dZ3brdVWQPR26~AUF}YmW_vKd2>F5 z;uERA!d9Z=rw(e$VniK-!fzYg7qoSsQ;r^N;2|uGv5TVc%s2V$ue2dXiVr@R&q`Q8 zkV^~@MI5bM%|fUxZyLc1l6g!a{Cxp7-wcn*DaR?>0lDcTD&P|baX^Qfjm&Y5leR^) zLACD<^iOEXw(%KgG05=(dzi(1>cKGck)p#s$e*qUx?tx8)H#v#0&EB64?fk{u(R}m za{9PV5;QLI}VFzBRx5fq)PbkI%@q6>7@~pEzQhaG>OI#$isd*nRrsvvK1&m+-3dVD(0PEW`Ad4JMO zkz^w!z3a&yO@Ljo-BgtDsr-b7vCBTYC}MAwYnD|)g;g~UBh1e zj`jbGpzVuI0QLmZ@B8`!TMQ;+LG!C!G>k3NQnn9ioqWpO&R}*P0*VIXT?pMKpz=kZ zvwuhb#lGvo=D$JutyO6BXFb{c@_3a1Ft(ulYG2#RS(yDFD6R>ff0nzCPhB&|c?0NO z+P16hF@akG8-MtA<*o7b&D+VFRbn%@_@*=6KgGY$^}G*k@^Y;4p^|gtwdi@c4fYRu z`laf@t3RB(szM2?O?X@3^Ag;0#pKIUu+0Li=6Xy9u#-TryRzY2WzU`P&r4RcVKEB$B;S*RZ!7!Ynb$nuYR6+ z3%fo4X5V;SPuJhg!LGqXkjjICLOIdb#-!%Ip z3O2f)pAAh%zTlBlbhK>qKY#{8zrp6?>OUg$@JG95(}&JC@hy`?thwGu-9yZ=CU3e8 zsPA;ul(WE7*8hE38(#}-S>yhj@zzhVzpPTpPgKac%kJ z=xF%`ZKwSWzQ-Af+3y>}{0q(0@U&H{$HhF4?CZPR{|Ob}JHaAaUP}x;{hoU3>3q{4 zyYALco7)9hep1Od>YzjEnj7gi{$1Vk^C#IyLn~i%vllWD-2AS6sAEo|qUvHl;2TGU zg9dFpeDaN3M0x%QhitdA{sJHX1b5PL>Z^9-NPOz(f71OWAa-f7&T1dN*6|)+T9H64RgzWUC zc|^RG_rP(1scmj3AI(ml@kf-6q8+po@;%rz%F0gq&p_+yrs_CyTtBx9K|7deUHT$<=1mvX77+= z_G2|K@DsJz00s26KXzmsLWb?ZrL2gNtKK=#WvmPaZ|5&k z#(_Pc&#i|;D@<0-8oVc?!JRHyZXzDkQ|@`@@hLn}2z2h?2sV7%kr(6}ny&M-W!wkg zgU6T(REuzz7$rZ8C1&P&z+n(kU;hW%7GPH|dB#ZY_1p#F#dm7(hj*h_o$+e)ep|z4 z13CBZ8ldg{NvBgnpHqdarlFL*;Wb zzQw@S^2j@&bU=LW?Op#P+p70r#=Er^SQ7 zTq|)e@7M~_36FV2vpv9)l@i(yeLS!Ya0rNpV^Z}A)o+Y9@!acg>hXSDZS+H|bjrgv za*6YUMu3WVMawqW_bPABg;l%k&kJ*`&+LuIxHvnrxa-Sv9WI36rG=aE{OhlepZV%n z$2Y$6m2q@*GCug|N8|MTYMh)LjnlKMaZmB^&p^mEV#tT24o^;$vnJ?FDBp=CHuNWL z@wfi~Xy`BO2h!(wpuEd!yygWE#Vh9*VyUv$vwn!jy5N4Vgal{+kAKm&Y+#nX*owLI z^DM#ief@x3oeZ za!8Z!+wZz{h}LRp2fvj|m7we|Tm52Sic@at>JQ6cy%mp;$nbSfL<&KlnV4wluNpuq zxye3Jz(h+lC1_qF?D@+l_0fC73+edcYdaHJR@+ZEJ8nHt1ko++kVDr-p^qX+$44Mu zuOn5D&(Nt~q|Bc*cdlcNNq^u@q4gZT=36h&W??9|Df|?j*9VOk@@ubOvPq8Gc1eZD zFWN{Bu}VK%3eBz?1D=?G=HRhLQpFFz%Lj1cMm|lBqhjiLrfHTC1D9fC)> zgl_@Ka(_`&TV8g6C+jefxFpZqLWp9a?22z95kSjJ z8UBeG)oIJR20e1HCuOr6Cm=kuVM4{leMeP?$XNLCw`?&F^3~k-nUOKTw=GKmYr4uFM9sb(4fAc#EhF<{4 zKq2`*LhNLv>X1ZILcn=oV!TasvpqK&K60TPcsWf!syvPD4MSA{+H8httZYkefT)u3 zw;?h}38+N@*tYr;~{LFNvlkJHLvPgtTq10UR~41y$I zcsTB=hp_;Ek}Tc)X%Fd23m_|#+Fx5~BK>2hV#fs-Qz=|XWV6KNOdWmElw=Auip^tJ zOO<`|nv;NSC%)+iXuOC?2VQVefepymmOol-yzt6pl9rFOLkNlJ z5`Scc2c7=f8|t_yj6waz%M0G9l-cH`O1`Wm&M7FL`~c$+o6+m5@w7&F03WG>BaeMc z^7v``B`@7%;p3nL2_06xk83vEsuSPhA*bky@l`gY4_rs2?A~HV&iFtHdeJ8wXp#<* zTcCD+Gl&u61)s6(1)Y8suQm$)WP~5Yvw_jcOfkE-2lJ*2Pi2`4^XJiPSM z8{^e4e`TD!{Ms1z?@J~>55QZV5{D_gaasCUv-4Frwk-A*_{9XE?Y6Cmql^S>OyVTu z{Vwi`&S5YhuPf3;J0%n%2mXE@UDlS{z+2mNPZ38x%K-c&20ya8=d9%(o7ER%%y^4pMd81S`kj2%=H5Y?+LdofQ z+V#Ej-vq6lP3JLNxgWZ&2mIj*KG)ihL^ zZMzWN1#U*5Yic7V%n{cr1zcKr4Mj82RbTNWfo5F_%F*I&!h~v*-PpIOStoE}veq|f zC0m>QbQ}sLaO=wE?nQ3tHD6-j^&g zjBI}8ZX#E~Z*)huk8VpXJi^*GHwen^S-eDHaIX4acKA#|IbmZxMCGw){~av4N$`CX7^mG1k!ukYBWD< zp4&O!3(s0rKl&~!(&~XYd`eT zE3b_Ed}|WoB|ow#!-H(~f!nBYph9uAWx*L=Z7xTSaa<25CTSJkJU;l(56qcj92{&{OGeSGC-tdT7>d|B7C2Zsh_iBI?aTpO>L zComOZU5A-WUNN6S%h+|m^>4+(LzHnqU^0v{zHa=9P^uu|fd)IbdFA%mZ`*v0jpzz5 z)#kB1VJx6bg0_7Yoxj0Ccn4W+W@()qlw2`F#au_&i3AX&+a}DlLExNr^iu9RcEewE z`yf12evqQFAHYbiRnN5pJt^mfbjq8&egD1Iv@g|vbDp($Tu;1}CVR`!_q7GKe0Q-sea_|O|XT4}*VA_%t?rX)ZH z5Zl%lKd^bmV*+g(KS!5DgEm5LoJcl4>=VLqBFTQu0ncA-&xEcv(gLq=ypsn)?tYPa0lzc?)DqJQQ&C--Jd$HC}r47rMIX9&1S z+?Cw61K4H5RNP{q)hd<{`{qKv5_MzBCRBMIm^Ja?eo;tNHr~x(RF(wfI_KISTd6B^ ze4!Q2`?|sY|aZjjT*PhlaZuJ3v&o$B#qk{x*#&PpTYNU4=zg05|JC$oq-vIrp zn_LGI0LgP7t7RqM?B%G~sh*hAij6>MiC=tdUPr_!FA%hx(x*P;4;+2a_f@L70C~s? zoyW|CyVmb6??P}SVrh(0&sQbm*0#H0S2pEJ9tivG8-P#R>hKU#9;A|QZ5%5I1lq;a zR~g`1hgvekZ}L(IZ)ZwN8|2}kjfdvCyAMu~5w{P^Qf#)lt% z-(PI^@(VAHgX3eR6X*W)KR3K2kB8ZOikD9k`;)Y+E!e>ItYee&6MG;(EMi~8C+yLO zUF?N4l)Ofky*AD+w6l#6Vc|Sv=AMmlO^QC&DR@FdG^o3xf9#`9ObR9Y!WVklc~F;! zRFYi#HSRHTpZ@TKZyP|4n2+v{jSX0@G z5(|@FCGK`tx%*sgzF%%<`2dpI6o1bB_T2o^scZsk>5PBLBZJQJ3qhHj7r5vT*WpFB z29Js%!Cv|VI@Lj;13t^+{&i@wCsJRQsAjM^4hMA#;$;rOXFr8v&Gz{KJY=dvBhmzj zcE$&=}^=6dEHw*M-de-mAk8V}|9c~-_-zsTac<|DHECjerm zcqP{_qY!V#ovp!s$?_cKANx5&mMA3;y?Yv1o*c#qFf163$3d zVYln}2wLT=DTojPebz5JVnU98hgrYHs66XXa%7bZxHTTkOaFn8Os|cK3oq$hUGpN1 z{$)1hE4~>^b@nCL$g89A&!hOa9KY+ez970zc?b9`H*PVBlGr}gKW|A zQV46F&+*u#wkzc7&wBJvS)RXl_*cLE8{d(}cSLQ2fDl!ktdpTE7i5uSki~$s=;sTH-9eB*0!h7Va6tNE$_m{e>Y9z_Va>SrPm@WC`N}mXT@1Q z1G0d+6O~LzSE$#CfnxfDfwzIfFcIa$4owEFk9W~FNDwBl3}m4yLO)R4q-6@$WRN}H ztSx=)B7%JO6kv0Mi&oN=9k4G_?FlLwLJrXI$x6^c9U2Y(1{N&mh_^$U^yZ}v zK{)}qsDt~(v`nJZ-7}>pU+Il4OfX2HN2g0JV>3~r^_sDdIZDKci#qFr8eAvg;tOT7 zS`JeALmJK5py>26eI(CG7b?Gfg+R-Nlkuji>n1f%R3MO&WKt~Mx#39xTKtWE_bp?A z@rxb4$pcYn*_=nG{)9KZ%gP{NETM}rY!tGT+!pYd2GBu~{1K$~O6@8Nj<#aG8`Z@xR8dHwA%?mw`7m)a<^ z@E>wvQC}C+;RK>LeY5!|AQsd2yTQ~5!$!;9Xmf(z{G#l-^V9?4<&U?0#cw<0U)mKb z4Ye6hZ7MF$&d2%LXXE_zu`U80jn6*%WPI}B`{Pgk;XfFE^pAdjTz>v|{LD|kJ%0Vy zzCFJ6mws`)@Z7WG$&=IZ`0=B0esMW2l-Q7XL#lOvICAogZfWs35R0tQc%Wlr`zTi3 zeT#kct8EvJn^d=ijkh-gU(E=iPis4cnR40ukgM#8J!9jkn9s6J+wQGE?dz)L}HH?240`8an zv7e4r)?+tye3JHo{9CXztnuE5FK7ZbQ$u*G|72&zYps*h zZpJf^d9>+Cd*%$tZrI{yyiY>PRD`0!Jmc7SSIV9yNxkJopTW*ly?D%JeY>|z3_V*QI9TkieD!r&WNZ4(=L&Vd5<%hPu3+=*>qih|A;;^


1#b?fN(qa86T=gGz8AV=Vd}w?fw|J{=Ih-qYp;S+Bxe+y80*nDK@+y;n%1zVf zD=N*j=;tW=>}xp%HITS1K&i(STy0T! zx*_^4n!~bwx9nBNR=z_w;K6zupxviWeWUNSiT(HC5jWC~v`in37hij096$fEC=~VS zQ2f)5I#ud!>h}E{Sj(k%+RNCcop_M%w+kE}`e8*Ue&GSO_8u_-R>Qb<~H- zZJTtLU3~)%{r(E)s&kW#b!9&x$loBfkEa11irUkWJ$m%X`0Ue<$H`&74wV-GXpj4~ zUzh#fGpWcL>D;zS9&$VFL*pGp_{j^eLb>5O=XU!kl3YI|x+Vu_5jvQUeK!bh$b`l$ zwGOdokE`fZ1J^=iinqx)S~v%I!nZ*PViowCe~X+_~-d= zg^G<;}FHrQgO~%7**jS;1rV&LyacytQ0Vlb2^w@VEogPQN z-asy8ts|0=`x2t09lYw9&5`7IA;)_eU5BY|6#cN!5qdCfj)$oIRIUp1!}GA@#s18N zpkf;`)+lzWst9Z4XwjIc$hX1Np9O+Z=`~@%Biob254~vrn z?OmP;I)mC&zf$5nM1ODpQ%V`9UfP9mZXdHgm`q@l1HOWEFeX*<3u}do67pgO zq$5zd1qvof@U0Jsgwl~<8WY&|FAbsJg{;-VBEn`kj?uq~*t1WNwkD|cv-+QvK0y4c zD2PpsSo8L8qu-UkLGMRHyPF!iyx*SWCIN-bAAt6Tl)Ijs5xL5OKY7N3$Eiee{o<fB7bhy3hhZ1ugdxsFNy6?-0b2#50Szz?`dzAq+S+;8)5o-3a= z_B^6|E64A|AOWV%KPv|XoJAjb?o``0=qj1g_R>gAfK0woM*-m>7vJV&6IF&c-yFdG zXYKPudU5eYq2vQY!dunlxITY8?j4_uuYCO*;}?Ja7sqoiyg2^N_r5pI)ONy46QaBS z;K6wO`C}=#8D~8FXWkz0)5$t}6U%!k^Mb`C_YxJiOCIdumwfiuuwxPCvR_lqy;YAz z9PvY2)#+RP#drM?((>0y_>AF73R-kh^r-ZhGoe@|&vVK1NRF$B-HZv>(Tf`KS|74yOp@lIaa+5eXM&KAwEI4{{+hR+ z;|L?1WZ;wPwuiB&a`xfKFw!<*BQGKN1DND$@%g9J<@%AeqJ)Vz{v#vyp&v%;2s#yK zaL&yMv-{y!r_xt%)lYZczm%s4K52uOc zktA}x2aYukKas=3Z+I+IGLDsWeSRby^!*&cGKf>ti#9mfAw%+EznU{L+G{6c)%JlQ zzStx_oRVwVI};h~QK{p>+M;n}U*U(_FYrQ5wBTn=j%?c~;mENJ`d0CzPZFE*wPRlT zfgN0(bA4V~`jZ#J?6=2clbqx4kMO$K{1q zFy%nH+Z6CdPWI3jwhI~1YBUiySQRtex37w!yU#@O_ez@z)%g@XH_dPEX+wcMRtXic zSBZs!g^d_ls3(jlp+%2y)_GzYE72IsV$7u0c#;0dj!%}(0mo*YoVrc{(1Am)aDI4> zIElz_s3<3#vYi_jbS>L9k4<#jG#zA*%y)_w`N={~9s_)Xicb$5NSC#Omnh~l7-aKl zb9bM;rjQ$REGGQ+J-+RX4RZ#t z)um*jVZHFiQruoT%4UOd_a8Cg&0a0!$j$>1*zb)?x#q}U3UR=j!nttBR|rYDgq}RZ z?~nb|c=q+T#^H0%3kTnYLaGZAPKb;z)w3K(4ksw_9`eQ~EwUo zH-?!EZ0?vpK0!+la}?VdkG`?GINd&xw_kuaKprPH)b)&;suL2*N;zLtZl5_yPZYN|d&}g!j%|NYlo|DF=qLMQ67dx8Ckk+UyI~(Y9%k9K` zP;LevD&J2z&Ij=HVUL$Pz!phQ=TAHTXHKi#a_B1xx0B^4!2^xY(Z(u8iQ_c8xyH`~ z&ns5z^abo zy%RZfiUrL*@0yJ9CU7hT&b-#d=A3hsAn3A*bTF^-(jS@{uieJxy8VJ3(gw~&t=N4f zu&mgWJSpc|WQC=E1@Bl5+c2oOH;Ydy(( zFV*$HqF+V#lZ*{%#O?`~6;t`A4&Sa3m0? zdT#1ac68Lt{wuMzY^znN`_D)pcJrp2fKUN)t&Cj$RSZ`N9kD?948QxO?04$f6ggXW zK$nCLuMuoePJ+jsl#CxUhU*%rRV3ct9C$e!tBeCdrh$H9YV zA=c>%8tIXKyu=)?4SIW!)t%3!UcR74-wUab;(KXg`|Z|h^-?CU^> zl6@a>MR);nETi~*1C}HPuaojCa~nIEizQ!2B-v-xrOu#xT43(1YRnAya$xI0r z)P`agfF}t|#gucsW9=GV!A=cvR`A>voTRJ^^Z|RlA(bS{E>H$tM2Z z<1@#+77%;pB68YW(c5+<|H~)*24#ADmhaGWk3{90gFM`8dg26xhXak9O(Aw6X}|x> znEEyss@=Y|t&!UgR~lw4mG>A3?A*li$JTcmDhK2EFy!XOG4Q zAG|+4|K#)W@{2F~f%bv+mwX#O-`2_4KYnW;5% z#175h-nW3NzM*Z_Yw>A5`$d!KbX>0=FkIwa4yAa>Slgy5zMC zysW?jEk6YGIF`<%=%t+xGa>)r!GrPYmw#sb(l7jiCH#wj@#o{i4?Y?f7u;*U8TTJN zbPVo4JQ2^aKiqVzP2=hLnaq~q8VeV~@e=~j@o?-&{Y0et!6I^b7|cCVOyPxJS!!GH zxiXI(Uy-Fz|3b1>5ijCM?7hEJn`q;;YNoGPNKkZkE3i9{{S*~IzWdn{B}YdaQaI03 z5f~2xW$aNxkNJ=-uwDw(5rAN%8Yx+veaJy^p2iL^Wa6c2fUn0K`)7PcX7qR_FuWKW z=wuvt{6LrS8DYZfyyX{%CDVGvorj#UEjl1oJt)*@MU-I;A_n+qt=SszgJ57F|PE2%dhDn##{5%`|1W)En_iOtwUtowC zPTc?G=6PKa3@}|E5%x^QwySIs19gvPJ5dkBs@y`ryphf2Ip6 zG0_1#MQl6p6Cr}Bs{mUZPvXp<1BgN;;SP?$m0m9|Di9;$n3~sJ>C6iN;(7=D0hBh` z2D5`=%?4FQ>d^*a3Vj)*S#>c*ihB!5%;Mun3sG+Q@B_fHAMZ zM7wQQ+fF>2ZV$HD`)D&$UTqN+dt~kczBb=%Z*`p`&uxqw2!r*f^_5etTX#;_Z#O;6 z6?^p``$pP8`ynS2ks7{v+-vbr5jhTso3aSp&{8goIEITG%KvAEC z6n@aem)P2smPwJt-SH`1=&+qlBR(WBrd4oFc;zJav}}5Z>+ayO|#`bL0T8pMcxkF#vv~!mnFBT@=-aluwE_REX+|9 zAN>h+1(J5g5uZ-u>uZjVPsTBuWnDxZKYVe#{OX(IrPp2`2M=G+P}W%1$*$l2yE*5@ z0d1Tw;*a|hJG3#eUTB1yb~rBRqd|4>l?TX0Q@nsjZjF0xPWVO|NY+@|M-vpXgvGi zh4Gayzde5Iul=?0^{;(>Jp1g!aeDe>JbChHoS&bMv$HeXz^31(%bY{LBB$pq8%lgt z8{OT|vB@S5kY6=UGd_~}rl!WbV~Y^t==Btu&;e?7zJ2nhp=g@ClZ`FtIlsbe-x2l& z@HDzTxp#qC7yeb$oKHgHbKAbyPngMyfx18J3(`}Q0_-nOxt9WvGNH*?D!#&>`l{;gWGhmEn~0k>~gOt zn;B2jqwTPpx;;5@YVf!&P;_P;J>I{R9fAFP4|WHC8L0y@7BMDc97vZhEK6s{euHY8 zouaW3(&-W4waoph{wthe>Q%&|l!_Bo(^_=N7i=W~{wGegA5{YS zy1uFc6xMomCp2N_PorC4%7Z6yBSzft=B7Dsx_6dJIXs5;{dA6gLIkN06hdSb6c* z>wgw~hal}0Z|lPcXpjw_JoWCM4Q|&_0iDl}#n&?C^^PFdpvudK=_i%N<_X5x;G`gx zuo2?m9NLz{an)J5iF`e%W^SrYIpnN$^sz^~Ma;Ag83XMgfX;y;(tJzEnjyL6R*KsQ zF7O;pe?$)DYDbsb#kBb4`cA&K1w7_@%vY>8`jos664c3A=&>(HoS}kYNnSlQQ*Qi1sz#L^AK4Su&cQ)j z@luVrMndFvp-pBQLXBK>1Cy=*TlP=}^HAVmrpU0o_D$gRpoDhWGDTv$Rd@ea8|wrg z7EmcsoN?+y*R2@#G7@}odCY2!6iFU92rAC|kg=9#izxbtN1l+?=y*}pcxl^NKh0O* zPxfO1?FH>WWL{ff<2-92k*ifZzqKB%fCcR5c<&GMK6DCNX9vGflam@ zPf@V;WtVTu4$$uVjC-(acVH2_1N{JmQ4xL3o4Cw)OaY1ueOT2#Z|0#7ylJ4=c93tW z??1KdmK(_3&|TPcBdg=z@>_2DtL@+~gC<}ZB*u~vB#ns!_&{v54ak8n=hp_}g)FtP z9$M$Y;1jF-B*4+pvF|VO;Tt~ve{p^`KKta8@%<0JJDz#){CN1_fi>kmu4M2jct6PO z@fLaPMJfA1SgpJ8@DQrw=7(zz2R0S2?6Q2#3%Zu+;rXuII0}GskUW6wGWc4LeFRt= zAa6{^zw63rvt9}Ca7xVb@|Y-yIl85X`_(-7<691Ru+OJh{Vhq@Kpgm1DD4wofBT*B zi@*4-@!Bh|jrTwLP;tyRcJdPdyfARCxDuoDvoqf>l_u*sxqshrbo?apQhdHoJ``vD zQ6g@KC!*Kb<=(3M*>=lT$3<<+IE$}~ze=-RnOW$YU<~d{J1;Y6bBx_$JW>pbcS2hj zRYIHjh#Xqn3+`q*6++6Q^H<1M-85=P2G!DIc_^_x&_F65g@)2PXktFN;D0!aHf5Tq z^cZS-#wa-Ng=;=JJmC17ChB-l`(@fp6nhzi?3;||4=sOb2blt721h0J zxi3ae(R9DjC;lY3I=+9>L*D3*xeOg;gjs?ZqMur91Lp?6v`jH{nYTdC8Mmou{nS9X zPvWx?)J+$k8Ia~yjQgBn8ctryXq$TvJFkpTD)-s~P1}jgP%8c%K!H##!}B!=soD6s z-VCee5q>HWw?1zt#)z%4BS!dp?5gCK1}=4tXEymo8#v}aKgA1zG>ceafL94Ti)x~K{Q$SrXu-oSORRkIQ1aZRvKTQUq?up#CPj{2fk z5P6|vlxIRvM`7OVUtvzF331*q^$;W`7Y~N8ZCB@QxSN06y8s&$oo`t4PAiCbqVfh% z28d)LU&YvhUCP$P0ku}B+Uz%h2e@z?I`sg`NLOOwZUla-UbzP@s3?gLB})X)gohQp z2P1Yc$e|~hSDRUilNJ&D_ zjg%9%j5^EW#G=^nsek{TOM{raK&v?cksie$CuO*Wqokr^Yr4q9zwRE^3F;ZHwHOf} z%SgZ2cWA&MtMGQ%I*M-Y0wj0jE4ZQvBZ9RM9o%Mq0Q;5UF9~4%f~oWcI;XGD8y`1( zAhr>ov_;pp#-+V;#tO9bjbwTeM=p4H=*!SEPCORGN5A^3agIczh5g7ksJ}d3e&?&> z`1uzkL;R9udgZvsjTA<=-$c|2TLOd>PVJ7I)o>D!B5ZS9l{sbwUS}N{mm9q4gI<2H zalgK&4ZFs)+dQ@@_$YZh0xQSQkl5dQ=|8{ejLm9iv(9%Go}Hbl`Ap;STzc+}Pe1%{ z{L?@Bqw(MUt^aoX^MC%&#+P4zWBl?j{nGd)T`0Wz+Ut(bg*F;&EG};@$JK>=P%KE7 z^%gO|`R@xX(Ry4^5EN10-YeO}ujO9x{Ez>k!Yi4s+DoE= zG11^IFI%hsB3d@vrpN?0-+aCW>9?$zB>hu9SkBd+8x+)+h(T_}H>(2p@a90${Hr0P zRt`E!myJ;!(@YOw19H$)NXEbMEktFSIyGbiAyDqVB3C~t9`0H6xei^=ThVcDu~YOR zbm+kbaDoz5wSZ-jQ$!AYg5tp6%CH=bxtxJJ zekA%p`wxH00-1;7xffm<&%OGZU$W#Y|Da}_FeD; zHP%ndF(zylVdf;tN*xB`H~WJ*ZvM&(X6*WUtJ>CU@T_r(N8i*F!ZbE{>g3S04JJtO zi(uJ124p)Kc3oen%yrTE_~C~?5K=hlF`iJ(EbI$SFgQ7E91e^p=GIwaDX&DZHAy-1 znEjjO6o>R>e^$_J*ysxOB+t5mQ4k1f?*uMwTP(J58-CUu`&j#z+=!H2#Kipw9sB@w z(bz8N;6oQO7;C+zgNF}T^{ojR06dW!JjC6$TaH=_6Plbzu#bUHYY>^hK1&w#`6~6$ z?xxnIR=PkXI+n2oc7^6Jjo2I(30L&wCVi~Du79IjCCMUcG`F{N7>W6p#yQ7q4dWGjrvmyHXB<@OntxGN2@s9#OiDkm?J6l4(L!ax zm;a9G%)=Y99xL&|*AvRQp0_mH9C@~(^n%WMtAYekL|(?EufoD*{)}9gs?L=s7-kJ| z^1P)(J>TS|67#y#Iv$dzPCW8W1*tKO@=|(JAyRTF$R}QYqG_%fi@%&NueE=n#o@qW z(3nNl$E4}63Q6)=b@nf|rQ{>VvT3g%IELh=vstbKvc&j)5CO#2B7sG9e{&poeE0av zbvuSxt65YoF_yWGp&dXnICA;|rj56{SkQP<)7gZjeOGqV*yiqg*NC{ z-%iovb*FuDr_}v3H=-{UbhFRcPifz5cK;D43!yjgGxg-LmwFyTsJ5eLvp;R&^#dBy zxBZ!S+y1*#$@ibiB?D+#8;Ay(ieY482lGfRj_WQoFUQOYC0|o1`C$?FnxyrRPnxq1 z$vS_)=3w}1r!|1t!|*=iqt8#r2k(D49)I?DJoE6G@$i{v#sR-^f1+{7Ct5bRN z&;Q&v$1~479PfSi`{TopJ{+f~kK}t^6g|7R@JkgZCnx@P?W2=p-vdOyA26a=Yl9!G ziJo=!=A${qxA6^g>Hz;Ke=GsOh- z1movMmK)t9V&Hoavbi+oNZqvIXAPrY{KAocTb@4B{5$3=`SY?9>tA>R_S5|Iqx3j% zs%gCY8-4ld6vc~g;0QoT{F+vlYVP^8USf&){y9Nr*bm3l{S`@$mH)D{VvQ!ZZ=&_L z22eiCxAAzWRcvh+{ld6GH@SILQQ1nZg(pxZu5fWT` zM!m|FJm6claSU|jHe!`U0^R%#&D;(^cfF00+YM?83?qq~6Hge7*s>$8#G( zuw952L%#s<6!}kwEC?Auf@uK$QJ%vErE0Er&j?_pFX$q9rv}H}g~|&g*O(dCm8q$7 z!tp`{bnct+8Nm5UFcrL!00QF1>J?eEc-P_PdP`;Cqy@m>|a|CQibl^H;c=0vax;cvz0Wd0Hyl zYA%pl5)&RW^a{r!Pjll{%})??U1jV-7IjU0^w_q^TRFUg8>9es;RF1!nS>*jh2Per zKP{-P^s-A>-%q&zn|j5+!4PnvNdf$i=Cq*kiBfmqF2 za%`t`pm_Ehl+^vUes!2%O7Xa*pU{P?n;hG7lyS67u{zy6ow4}S0eIsWV)|A{W$FUQY+{p;hmfBUz`&wb+;{FPv*k5B!H|MQDeZ~Xb0 z0Bq#M&&G_e4C4)AzHPvLP;WB&W}#2&iIFW+-ae%N@r?^E$?!>U?eGA|v-+LsR*a$7b68s~a4gZ&NIo|JstU^kw-V{e7{-BLE##t%2u+76`d zN@>15et&{o^a&UU*Kg4kc$`C5&He@q7g}4j&wPX3ddj`o-(Zc0WYQ7zh4%^NALksw zj=b8(SuaOeNkUr(Wi|A=4d%R51`Ef|?PDtG78fcbS9_27*-W8m8WQV5Hle_=p5v<; z=A3PRI9ECgaL%$_Mb86Z=ipMz9!uk=_|jKB?#fYY27kcM^U&f}=dqW5QRan63?(~l zKr7?x0m18F+n4_1xMRGf&r*Ok>_tW}f%4xt%2k&=-DQRuht{#51I0U&Y@G83!0PN- z4ET)PDHEVfYzjBqcE=O@QN(O0 z3ZG}!^HQbsTi#rEAXHnMH6eIl-zK}TcmA_7^Xv-p1FKpifVqw=dhO3*K=dg|cCK4V z&=%g%Ik1*Xj%cUZ-Piw40FRPcIVf)Ixu|Kg$IY0Vi|g_64?h@J=i2+y7u>Az9)XdQ zIOjaGjXk@mF@!{4Z>iL=AWYtmSWVi@3v>D4SV80SpH?^HbA7}-BFDDP`G8}JZ3st( z#uF^r(*gVW9>8lpw2G?wD-YD9v+V1KbiT$zC-%EVN3*DvHg2AGOVO5u94)sgpdoi}^!w2KomNPH#ig{{esGfIxGC9E7ge&|#1ba- zNw12u1Mu|2U0_~knM=9Gk6UP8=01xRcz1&ieyBuQG#L}FiYGp{jA;e(N|)QzkmV=1 zX`nd*Z6nZF=hi8y$g|JNHGa5L(AV#dkL`~P>R$}ez=JRui|Evs{iMf5#iVVsZkMaR zq}JYf&dXFnk!tLk^H#b|Z$|l~I5R|D71C7ruVi+rN;AePcu3P)G7Xf~K6waV<$=iL zz8hSYi9Td1O|pU+e;WrKNNkOs2Ft4X%0Tq~%AmT6){iI+hP31ZG>K^%EdvFXA0J3S z+cyoYHJoZ_5+}x*x2%j8=4si;k#fgY8pK>WcJR9ZyOYi%H&+ z>LCxIPa>Bl(Q;4ek_XU2@$mUb^X};Q(7$7UbbN1o{^{rA{SV$7=TFYYGtWIIaZkq8 zh0bBf)KYeQbYF7}oi#4_aF_N;d{e7+>cFSlc}ddu&v^deJ*fCA^mC1%E>U@TK@~C! zQK8`ATK(t3P3?gI2+gQAKO#iBC#5kaQyDT!(8+gLEE>IK1NunV%kP={r3E#*F8lo( z@Z!BVJ0I74O8DwRF_cdi+{@IpmCh6F2VZ*iOXKT5`?KSuXqqZYr$%m}42V0BA=UV)cllGrF zf91d7TK!TrCmrnb{8Jqnvc>UfoMqFXv7#d(Pye8ua_EXVG38ptaa9}l&4uR!7?G#4 zTar9ZVKshD@(a962qg(W84KuSFTnc8RukAjH**Y5xooo4ww3iLZHj0J#mC>&Bc%OQ zB@!ABUvdo!XML>@JK>`qB98v8c*|m(D%U&$OyyWfbKN%yC<@3Hj{fGCD13h$f0g$b zumok4kp+L&lF0Fk2Fh>h{+Yj-1Ik`gAgy(Rx=mTYvYaXo5i6kIIxd1P;DaDl_)_wI z&bbnD79W^z2(~{;=dh^ehEK%b*1DvR`xKj)xxt4>ef}sYP$?6u{B-?Uwg+UH3-Ans5ZwTGL3*%K-?`~ zE;667o45#L8~w99fOz0#9PmwZ_JKca7S3w{wu{E=331k6K7dirTU19*p5~)?X0ED_l|=#9P$kS zzxtcs5y?9}aojnTcNwq|g0fL^p=_AZA5@FW5fGOAb8+(2+Ip z3^bdh3+-p<>;%a)3osc|sJ!{4M=|w=9hJltn|PCtSZ|u$w#rQibb!~RaG|&TDzkuV ztUC2dU z77sD%pSed&ROiJHSiF8Hku~6~X_N>SwrfH4I5i&@a7_H;uj;iHg^{*U1(L&K^7#S| z{-%wui?OfRh#%Gx`R!A9IbcWmqU7JUF&(;@=aK97l9Tb+_1u_@jOeed>S;qbI>C8J z3Gd$%()d8%B@gZ$Q!Vg{5u0A#j=NBtUU>19@zz&=Vch@Hn_@fmVt&OYSvKW8a~C*#wP-XH(ukN#l%{lE9O$HyQ3U_Afq z!||7Yq64NG6Xd(wLa^D@OvQf(cs|-!xB{y- z6q56zbTI!imx8zMC2crpT=OvxgOth`ul1CQr?d#)C7mlwP;zGf*9KZe+I=Bp8IiNX zCX3dlfj;=OH_b-d(6$9K{ zp~@;2m3cO?84Cu}(b#^~{PQPxv!99Rq6L{T02mghq+D?&`F4Hk~JmwLsoIscjCaU0~%Y62w`-pZW z{R!jxt@-0rxT>31DBdEUG|(&S2j7vWD`WA%J2auPNqZ82h!{t0s8{>#r{o5)pOr~ z@)D~Fsaq5@=(nt*!9f3pK6U_G`&3zd8hplr<&?f_USK;pmULOAz12&vzcEgp%Qq`U zugQ?8u)2_agc=hn#ud;CJ6|EV*sy`S>5dlwytnpzldY@=_EY-7sn}xjX{-1#zX^b2 z(P2O3d9L7SW*Tk0{>3+S1DhQaO8@^B*tZ$EiKFb|ri6mXCqh2^_``8_`b1$rv^m_Q zAjaH`;en+hQNvC1->zaGy~3J=Txv8=MOF%*``evmWaYho>FbBBea48R%gMVOC>n=p ziB$pWnrG$fGWlkkl5qBecG^m$sTpfc8;pQ)j1OG@Zu%YCloB^JRk#COTez$P*KrV3 zEE~GyF<$m<0+b6suPM2qUrnZSK?yQ4XuZEr+oV~?-yJaZ7Wfmq0{o9(@t+4+rY-rF zxsw;5YLRDz?M9CgfhyOsvAKNH@Mu5V~#xp7Hj*8$EqaBMe_=~v+^PAy{}7!KegL)NgDFkK9!*nB(Vcn@|kVM>|U zkST>P_+Cdie{f@%xXF|7Dftt$YTsXnEP-69!Y^})M3+d1!gNFdwop=mmBwx;CI>jS@dsB(V;&|*wK+Z))r($&JO9Fpm_M275TD*`1r3RGbWJB z^^=RG-xHtz`0xI5{8i)LV(*7DDPk96h&IR#6sj&8JQ`fps1#rHAX2g^LI>D59mpA* zYB=Ci>x;AKOS&E>Cl8RQ^6*1u1fk>f{9=6g!H46AAAT^Nd-kC}-F|#>B07!c2Pfm| zLg%z2(ZYCnnR{W{D>>E&Jj7A|Ni*k{SU^`{o`?Rq;|#R{4Brej&aBL z?~h~Q4A^edWTDzG6<2qO`cI4^FS`|!^lPnK_(~i0!p(Gpv^EIa2S_RJriRpGhw*{MCaD(%bX8U3AXG&l*f5!?3l%e29u{hAx>G~{YQ=QtVMZ&Cp6HO4CSD=`M>2+ zumWp<+RTp};-`AWKBlM+-;|@GlNu2TPqlge&=2Zw+;HZd>pI}Bn=7N&; zv{9o2a#Ue1@{`d1ih0%(@Yth2{IdZfmt^O5&*L<9wtUOwVExt33slrq&iGbU1IuQc zOj5l>;~TO9wwJokp~|b#pYT`NbAxN4#@k|=ut_DGj55Xn^g4>i?Zn79&w8prpF7Y8!Owh#Y_Q*H3tc6i0(xGA*`fAZbe9oh>V9gQQN zfiTy(j+ik;Ui`uy#})f63mMXXDS^61&$24|Qb68gmbnZ)yv_yr))Y$p+|GiPmF|*t z&-TcQl4W`5hsu4)a~ytVnOGnrx-E;N+RT3}J+i|O>m@(uA^bh`3rS^+X?Ie^KRLSf zk4x?|#+uC$Ek41cvEo=*8VBLwm;B-tl@9co&LRF8?Bi{f(o46fp0c9Ya zr*hDO;J19E4{sArh6$_84HCS_BN+Sq>6n0W(6#6qcmO-at#T=MA_&E6ppXGJKfyEY zcp5A=n7``t213J1w+ma0N`~s{hD`b?lU#Mr+^P~cnwCTcWLXdty)z};-Juu4qAE8M zA_kP0ypi;vrA~YGyX%zz+Pa?=RxTr4R_aYgPAn9msj}(JJd?>dk$GHTL%f-Z__PhO z6}fqUkFCnFzuv$C;F(aBIpzqd%Ow*6<$QuQW1LBiOWe>%jAnw|i%F&O*b zalqfqGd=)W~`QLi+E%Q_QJNN1hs>iR^phs@@D_6J)f& z{4BcgXtU*q|0)Y@{QBT?n_HY9WCJ!*g&5EU4<(Nmox#|^V{vHJz+y-s0U>hpM@8E! zS*nt@PfDo&oQF=UlC$=S^(%Fbna7B4xw~&LhlgBLF6D3sl%MkxtP{!(WE5}bw}CMe z1G5oqLUQ>_A4(jFumIh6ZuehKrhJl|lkTxjiYE^rnD+4Dv*XP#e|0?n+PmYPHp2YI z|M}^eKj|*bqNq)c@QDwiiH{d|B{mRHW^5DFj8oCcAk*4^aGaDafkq0-MVU@ghCY{e z^1NLvUqqKaiyjQ}jXHGjwP^Cs8xuAk!qI)X`FJ)i&(8d5|0kb)uHk$!&MwZ!)#;P* zgManEj6eDPzdyeFul`q?@zd|VGk*1#e|dcU8($v}6`LoIPyHtF>BXt^UHOE{TZX{? z#vtRf6L!+Sv=bYArY{0+H(!k-w-=po9{cbz9u=!{o2IZCGJc4gH+q}&jB(;qv}Cll zt+&99DLN38JNHIrsXMSL7`G-4K;@GLU&veTlG-tBb*3Xmn->BB>u(hSk8krWNI-V^ zkG$lX!t410*x|Vy8fV07Pn;nFT%7b&(SZ!x9emGIZX`SIMLr8XQ!?1qF_^!_RLkrBp!mfdmJ%GlN(gYxZcj6wEIS# zE6D)p+LnFbe7xJ%<-6(kq5B29Zkvq^wR6E*=X*=FBg{v5DTlFd`ZU-*H!)W!S8kM# z3ZLS)usH@U6moCuHCvi>@K-2qePx`?^2>!(n^zU;tG}@1g|qab!2@eiI3YNAvcD=h z{S}atr3;m>>yuzVMsxhJF51FwuN>y);iA~Om3;0eS52yL!TUe`TXWqYiA(`VioUQ0 z(ibznw0Uk6Jpg{K0}g<47FEpg_y>Vps_@nHi~;6{eCPW`Jynnk zG5aT~%rEVNHP+}>`X<+Nksnyg4U8)?&fn0Nod$GNaqWA`ox0w#n;y7D19Z+n0IF^y z8nB^Z#TVT2YXRCEfF;=v=qZx`<%*ZF?0dC0B!Bej$K&ynC+=h4ph5&?v(Wd-E3sE4 z=HNiNQ>BmCqc$$uAZZR@_xG()kian-Kt zjBYcqo&(tBB8>!7`+rxlEqL3?`Oo!T=on34enF|V*m+6W?!PpBa?D?SK%V2VFL&ub zLpArf`|;57Hd?Q56W`GF%DLfaTZ&KR#3OT%GA*5K3qGhA3RQ(87Ck;_<9f?CKuHJz zX`rB))8j?CDRkR%NkSY;D?ByH$++KwAKfuvqkSnmQDpR!_QJ69nWul3&PWU=-=u$evx3a>FE$Y{11@J-{tzxprGa&VVi%QuYg~{x4hssKc4Q-F0?-z zJ!=>1r?I7a`!uN%*Y@O|A1Z@a%7+>}SX70Ex^jGS8D1nKOLUN$4jM*~660KDuz(+| zSS_(CTIAp>c+Ee?b*~-1Q7gKR1^ZI&>s`ubK7?{4c|4Sc_44v^eE<9ZW?U(M=9%Zl z$$iPX;2Xij$%a&r`+|JaK=z^B(?d4*v)q1j<~i?ijZjKuTGeDfv4N&=lp)3+?LXaa zJTU^Q&Yx&ln;kPN2#saHCwsv4X#lqo;|J&R-w$9}-z32y=?8m+pL0sx|2_6MbV|LHn}koc16&|}VW=r?RIAOmc#eH>|9la7oB%AOb0Nwi^$ zgP^3Ng`|#H>Sb<0KHLV4>Y2v!Lv`;dG(UWA_ENGS<2>S-$jiSxT+Uo!{yD;u zVOOA|;fj5w3PMuN!{dH%n{j025Hq)xh?wPSprQ-Dgz7-kSG3cA(Lz^#cmqTX854{v z>EtKm`0yQeajnRQ_VBHro%H=q)@kS%(-NVoAoi4T z?YXA>5cx~}fDb_7CJ7F7tKTrm--66@44xYpA1jDv%s@BS5Bei!v@20|{AIU62AMgNBi=4d3 zkPja6Y{V~9NN0Jp+0R%ZKn2L-XS$*Ba~?h%rNm!sp+$h5$k5_E*K3$nC(~Sb@9?ku z6}|z0p8(jDWfM(32{}IS?@H53kVcwOlU_#|v6fSR8lVrutK)Ls6G*5gKx6~RAPCSW zPrG|A;0eP-tGXbI8UduDa`_Q?$VxGOkuV_0H4fA7$qy8>5XfP`$Hqyp1C9e7f(R1! z>VJz5d=&uqkcpz(e6$x(B*AKYCHtr%6F`5H#m%j!a^wTO@rAt^ChZ2(PRlBic9*i7 zErfz>s9dm>kON86X`-1IYgOnr$t5#ZZPTs=wzu!LaRRLuH2NH2olGD(ItG{V7Jy!p zZ$4y5e=?r54ydkiDZD2bf3(>?9aQG)_OE=BYq^@(+}Pz!Da{W*xHqTCwP-L#BZu+s zH;H`0P^0BXUp9G0>PR~} zZF7&K?B>MnO?WWfl66rhWs79drcbiieIY;-3#ix(a$=D+16apZ`Zb%ew5JUk>QsSAg+#I+uvhCgQcX-W{*~)HlY_b1zFi{;7|3wmFg* zzEw<-U*9lf7zyFijpquD;`7Y}Yz}G%ED9-r zMq|0lCmzthw()X-%O$9KYwzn4WKso^6^6X```VG@&En@ zzc>Er|K|_KNAG{2i=$`8*S`AI@y);V&GE)tZ&)7}YG-HXXJUaK$SBiq z@g7_3xPX}ulehM?1Tx=Ls9@!UPMH{HGs>U8CB%&QNr4T3GU)ZU$pk4KWL3*y1*p|0 z5UmM9jA>!UC$CxLq2*@VfGt!C;B~!nk4=h=2Pi1jSWPsloy6RCol3`!I)x^Z$E|qB z0D(_lTJ zG2pvW$8q}6Fyjks^UiWQ!A}ma9R#oq`jnj*TW&6yoeVIms(SiDNBQ83Z|1QFz5I_7 z73G&`oWlst!r@JwczI}50zwm|vp)ryC&CqRkqanwYDjtV?8)hRL^Um8;)aw8Zl({@ z=Aerpq#C1&p+EJCzfPg^+!ycWL&iXtz1gRTj*gG(w2?Fa=o4b8yxIcFQRDq5W$@|m zO{WOlE}P<!)g zWeBQR#^5vyb5pe5zm|TNiKA_SYtbW!zM2Px^La<{b{y~*9&qqk1aUBFLCXdSt%=I5 z&3;n$Jk@8$ZMpH)Zx(0&;K3)vf(+1|wgtnLyc31C#)CiUp<1r(EKK}cJQ95+GK2W- z2Ya3H?ttt9owWtI$V(G-c(U$`^xl12ad>cly#DS_k8$!aRBFsJk08cM)Z1_5xly~Q z+##IF4`~rdl}u$q)2Y98B4d~MVh@&(ipk{y3BZ=Dh27rJ_2`~_@JP1hPL`HgIO}A+ z#aI&cPJ%YP1UQre+hMt+T^i4U2v3zxH=hpz)nQ>9=<#zjF0}`|I)5}i{q$4Eg0Gac z?K)btLHkAGO23sM-seKad_j#<`vFS*UgjW&vWj(pwuK*Re1c@JEc1N6lw^2}j^4W= z__m2-VZyV=>%L|`L6eBtr{gIVsC-qRGPcdNt?afxluV9%>`M;LOMUf+{&%1sO8eof z`Gqv2B?jR{$7H_w+Zf!c1igbnm3EoM#j)4VHW2EHzwCo0j5LYBwuyJH?TG`-y~cPC zuX4EKo3A0NZXaoPyvuj50~A~_&Aj1N$pF5}@Xu6o(b4VA0^|XEcvc8)6Q+%y0JL4M zBUjR$RI?DH#TI|Xkulm0l4-IM+cvf)bV6wm{tABVLcabEdOu=nq^qWLv~R4cKi#v{ zzg!pclZQa)mHa@Eu~POpp)-Fu_cKOmiLWcn{j~aszkDKI{9Y@&rzFYMmY+|f;RgE^ z@)h>uZb(2K54wTmFhq?YJX?Q;w#yCiN6DFJ*jKGZ(EWiQ%-+Py{ks5j zv?Js_v%tNMKOz0eEXbM=n*i&mq=Vz&L3LRW#n|LmS=+8fS*s?NaP_P96V{ z5uAC{9~ocd*an_G3Npcq12lX}>;=HLyu1+XilAf~qQmM)=5K!z2iW3U(cGD!ji~lT z*qOj(E`kqG?zBSuAx8?GnfB238ofc&1B9MdNNkgPIuCwQS3|8C7E76e89>lbi817{ z5&UMC^13Hz`Ig&l$Wob{aTh}*3ObXh%6h5w6tO)vyAcpSAE2?n9?|4iT4%KmOD?V4 z(*w`LEUV;Uw)*(wmmVq<)sCLKQiEfmJx^9Atvp?sV z|AY76AD{f_6MyQKhnx3L9yljfJkUEj!j@ZHa8p8Q}N{ zc=ll);%A#^r_Q{KP2LmuPq|3^fE#}qOTP93Ld!ox)6W61+5OM_VNPl`tFPSe%-y2( zeyG+@;@~)AIQ@=T&YEh>GaAmI@zW2Vl`z+90erI$+g2JAlZ++g0YX@d{`gQPrkf9Y zv~E;?Bv!E_wy_RCLt@mc0G~GjtlhE8-yrQ56Brldl0hcs^dmI*qu59Xa~(R$ftFQ# z@Y60Eur1fV@FmXX#U@I~#5VfJ33|m}y8Y0f`HsUjOV=BsvH$$pUO}&0I7|ElS!2~C zem~=rUi)CIbUF_Cc4WUiRNs<}?N+CLRGcJVZppu#;1}_0*p@JBxEPu&)VDEqs z1N)_T9BW+hvI00*eGs4G&$rvL-rE<^TNZz+XAFskXNU5avkeg2^Fdu)_KN}(?28Dn z!^ZJaSz^btJ>wjM>v1U=nX3XZ5r6j+whI!s>*WOi8x0gwK~Ge?%D}EFbvYc9B~WCs zt(9)Pn~dEu0KFF~PmTn1@~ON5G_AiKf!3>OK;+xQwr-~Z=)!KU@o3VGPO>LdW7!ti z%e8PhyVZUh>>s&0!*yz#VnLVF9w;B|&PrQA@2~jkU`47z)NOaq)4%|goQw|;N^VPgMG^^8iA)B#Gn2TU z!LsoKZASCf;!a{pFsZV{Ln0a$1Wa=`GY4vclAmy}UXm!&&PjrL#x!LuGb&>WA0pyy zF~1GucH~DE5}=7Kx+fM*pIcX7F3Ezm2Wow7fX9ee%t zU7QpxfJK|}BOXHdw}g@%xCcman)~?)IK9?T2x2gm(qAA*X0!>Mq?j zebrYbazX^8JYlcy1xYyUX1peB3=qc&c6(Oo=OFC;0dwi!}f! z_l=<2up#YlrMBN@w5tCfX@A0QS#q6cVmt4YLrp-wttPE2Y;Y{ zo%`uFC7CisQKn>(W{V<4wOS+#K%syt)R38XOn>k9trZdb+?$1xd)*s5B37(9h8=r2 z=cv3B0iE=Lvkv}LN!*Ioh-wF4=Rn(6bnbIoP=S z$S>OAQ{h!`*}j1t@2xR#I@Fl9ljOXKJe9!kJL)eNEO0>>K)VOvkXP0Y&tnT*-jrk*uMVOdz*h( zNx)vyT?#$@AX<3G+EyZM(g3wj@rG{qI^41Zgdp}J2%^>KDU_u$96P597Da~QtMhVv zb?G^I^`8Ox;7Q2oG8z4q@({U$M(!JZqi5~2F?RFPGY_13X?VLl-#-4;FOB0S9XI(b ziFliy1IK(b12X%Di9H^j&^bIDd{iD_-f4c6MYoq z;Qt(&uX6zpYlpk|Q(L84>6f^q^^ zw3@fzeGfs<`UiW#@YRONDzUSIkjKZ4lkh$SKllU`$0z7v(ZTUBU9EFPpK*=4oX2e~ zbmRd0d~yroU$%ICVQ$r)3NkXalaZ`FO@=Z@Vi;q@5F13mqN>K-zLz{omAl|_V$!{Y zEZWHnA)0he^ARiRK+$H>rM-<3xXojK^13W~-_IKT7@j!>ro8YSLgTBg9?DALPqtE9rC>7(SHW5hf9BI48oCsX_KVNX`H-EG=)V5eo7?xl z|Ni#17hl?b@vC2GUvRSV?gd}vjkv3;3)v_G6jNTVcyM;&a}?j8wt`((oNWhz#jzl( z59xE|u+gSOU;4QKL#YNxU*C+ffHC6u0hXIF0VB5)5e~b{(&rC2IDpiv%}vNDoaLBW zI3L9){P<}W5zr3KeY5<5b3caP9VJ_-13eF1uC_QXuuVdNjxjR7kb**G+yaeQXtEw~ z{--(PAmawxGEdOsm+ZWs&bab}aQ-|F>!l%o0-6HyXj9QPOMp`1Zee9-60(4dy(D)e zz86g(IWK!0OB7=W8=3Rod!ZT~s9}VlnyNa}Ix@sVL;EFeR=EL-L5`BaBk;=)tlxMdvMKehME1_1TMuPO``(T|tO!0V8* zW+J>UYp}~KFAm1>VcIJ{C-8%B)&=3sUvg!+DoU#7!VW91pjt%Cj?U;6Z%hJ(P4%W_i{}ZUy1Va|Zs=l{UxqNaH}cq?*`LWHa_- zN^}b=oWH%>cnk@xpW#WDFc4cW{pO#421Lh@rQO#$f*ea1rIJbF8KihVIQ~cfhyUwO z6DI86&j8lh}vPLTCSa>@+|2(TKvx&1N#=n$gd~$-cmrz_andnij@CB5C1vfjm)Y zQRABjRu>z+NEspd+lksli=JyXQ!Gg83;BwTB)>((nDw93`Wpn&etnaT4c`RduhS78 zeO7nP+fc#TD$%p4%ndZ=jwrQ(%86d>Uch+NFMF<#4PJ2&v-Qbe%fc@@#clFQ8`A#C z0ql>TXe6bKzxIo|1-494Rh#{l%|Kqvpiz;e-#sQtK*_WW?5H{5%?Z0Fa)7?1i9-@R z>llt~C2aM>6cO0CS{Gc;i8mLS;2NOIS7^mog!4u@a%7)wrN8EiF;-om zF6S#~uD2In`P%l)-}=G!=VEcFf=3i}BPoHcrzx>kn4}SZ1x9|Vx z{q4;+-`wuszrS5vTN_Hp}006u&tCpIc!KM7rk zcS&`x*WkrZvfbdS9w|GBm=Xd&j-BWndOF3sTW~0Yxo<%<0QfC5_Tv+#M6vM!)u|f4 z%=ux1WE9FtU~>Ch37}1SyiTd?_*U2**Knz>h&kRFb3>+L#rO+dkc_)P1Vt4BCBtOU zl>q0hr*OMCh+p*SFVR^Ru<+Xk%^e1#%KWx&`mkvxWSt2>VdJX*))XH?iKkw9=+2f<+6{PrZimF<-)-0cZONxBJ0mAJuiHo3YaO3yKk;mP@ujbAFYy8ZZ$`>W_hE0;WDolX z%*x(fvT%_(XIrU|@WJFM<8y(#MoFBm=i8e^&$EemfWbG!-7nPB>YRA$ymY0#;IVA`@WT&%^M{|o;##I} zNZB6NSLMhU`xmVh$j-jD((obY0`ER)67Wqp0Ctk<#hJ$;=UZpB=1yDl5t zKgnqi3G$>%a=@hvQ|{bl%~Ndgz7-Cx<0y(DwkP?MOx|1orN?Eyf|hy6I(XO@6G}|V z$qL}3p|OgGWPASEK5$jGjmq4nf^uHNh$Ttc3j>M=c9M!bhX6bgCwFsz$*##*yx1Qu{(5JKM;ZAJT(AO?Qko% zNK?i;tzGHc!q@saZiHhCCfe?qkcnR^$PRKB1*J#O{y}Sg`jBy;vb%%0fKi$G>owZ^ z#89~!kTLwL{P}<$a+vyB+YRoguL6u2VvOHdOXTsNXz_5LwIX(g+ArcLkm&J#kr(Z% z4{u<^CUZ}KH!Vpp4;(K)*@X08&U?ah-c}iXaQQ_8)+1%XLql16z4F!aAfW8^fN~P1 zx!;se?Rbo}@T)9T_P_xBMxMH_eJHzKJgUOA_Fv*h&KOl2enyC;cs*BcJh_DM0uM4B z=Q!jv)|dioorlVh!vj7DnaA*mmrUc_4}}Vc!H&`*(=hf-l|-4mUm`9ZH342uUIh7b7lNH36By6g0P>GJFQ<=GAG7Lbz{%u#rqmF6Mq%WCNJ0u>j-O~y;kZNk$ZTt_pX zk=^#I%|4f}u?ckcpRvkd?c#92ZYMV zXWxzIx_|{P=Kvd9i)+>(92o``f?shhM(_ zwU@SyPrCcVL-)2zwfobr_={c%^u0cfqnz*R9AJB&10JtyJL5%$@$WHHLF1V2^ISqU zWa!D3(XOk{Om@3sBkf=BL#o)g+4$SbBmw& z}eP)Hl-Hc9)% ztc&D2I8RftcxpTLZk0`m3G@y%Sbs|~rRcL?PRwJB!oI~$`kxr%2Ql+QW8$smvNLgI zj-bfiibf-uxvTS#uOn-%R?Ue+K8de2Nb*?^f@i!{pbbcq2#tO8^g3pp=r_zv9tm&i`~*?rwM zL^c76OxddSDj#M9op|`V6*@vwa^TPN1Z0rZLfl)Aa7e{o+X5xF=6iyc7JGMq zzvMKni5TIh>!M&Y;}8ikjTdxT8o9U_c4)Cd_HXp-d0-==3s!7!CLQ#NYXU8--Wc1r z@X$xxKyZ~N%fN10XFtYN+ zXq8~YN38S)Ryp&V(9_0Z;!nt%hD{pwl52`r7kP~{FT!X>gBBRni_YhaYqbi&qQqE7 zwgXF>l3xI*GELY99S?HyHA=DC?QFK;Eq#7qrFbb*SqSNPrbPld=yZ4KPZdUqyT6#E@ADY1B~#KVj;FP4x)B%H@VBwH^uy*PT_lGmoZw3Pkuf`Ny{5&3C{jh3!g zgsBKwIiPaPFty4}7yT<_+yJ!W7$6%6znbiZPt>8qCKhV8sGOweTfb=~JTG4y9dogI z%C>Ji)duwD3t6$%~`OWWbFTeFI&5;KZdb3@fUv3xY=Q`QsCaB{oW&V9D7I9)F z`;9N(`jf}p5I0K6Z+wk7P!>J3lIhr~>@i7}2HOqfZr%$)5C!Yz&Cg*bd{Nsig zp_VUKY*u?>8$>TRy!lX$OYtM}@i#dzarBq5I#gE(@L9YbTd9YYF~MJGlZ#)4?hV^R zg9R2A<{@0U?M~enB(l+KxKX9E#R-Cs{r)5OLx!1E0pGFndeALDH(NZoxjbllFfmQ?@Uf;+IQz3v0485_>n z!P+~CNN2O`+fP8`nnc;)dN8`v7Pl`S;2w4AWk}7hRGXS~)D!6Soc#;9k~ct}GS>@ot~sO;fRb%( zJ9*O;>Hu!b2v1c2_$v84-EGFPFA=K7oi7&szY17bdc9GPi7peSPV++x&qQ z|2!I?#=WE(#`rzt>H0z5ehoO}6C66>y=LRcNzK>xEOpZsy~^DWtj}DJ@H0@XD<6II z^X*!DME1GN>)byG{a81|*W-%Y8CuoTnld(Wu2VECXpy>E6*9i@jyN?=6J(Pk$y7;fy zM8x2*Dc-URmlp>P*UwzBbyN{BTB_U$;QGNn(tjwH|B=-c4Z;)W_CaMrMgN7q;*Ai? z14XRF0~+~4r86>q&0l)%+P~V~s#>e+_u>u<{YVgWT=fSDuaV{KH;XEHy zhK7V9U-Ro6#*ux4*OXdVA}@VF{A`m1P|v-Vlo@HOzen^VH0Y|JhR#GnX1t)rvfQS! z>XFkhc}M?2yG!~<4tBw;|ArnXCc`8|=zt#~<-oDle?hN}v~uk~brTHE)@ef~xl^4l zaviNU$B%?86BZ7v=uvd~F{&Z_^MRd8n!~{o&5%9vF1Qj(+6< zsB{XI7w};c9$NKfh6MB=l*8-mLXCIZDt*Md`xdH%_Dst20c2)<=yvM%gL{l>r~gdu z`?j)6uJ%|V=D6-eT&s@Dj%Pm_$O?6pyvnq z!ri-4elB?u_4=D{Z9n?)?{06u{?_)1;`rq0xqFy(;q3Hm`{Ikwd~cnfz{x#T^4RHq z#Ag`jR|i=Ywa@XLy04S4S7qbrf5(F|e&Cl$QpZonQ<=-~YYb5?dj55Q51DYRJ<>@3xhFC z#Dry(O(uRYk*kV3QDw#mL(BfE4wsr4FN_cCFCZS_jXwM1wG-ozB^`d5+;+VC z>_?;iWw%N;r4#-a-oOc16}lJjV!hfM2|B{ZNgEt`Dkdy!#(@&e@LS{%c!6knK({N` z81X0KAkjcf-!Ng32^};_$gzx~sd#a8C#-dBgG}JKV-No-BGngS!e!GIJ`<4F?#L$~ z2E6-$ww`2|eCj+mo04L?kdRj$-LwK(xYW!f<_&J*$fAZ(_D^j}Om5TF7|GXKMhD}> zF9#4WbTcWTmd$RXj5zt#pXujQ{@~(gwMlpSC4FcYmFEE{$)cyt-pV_`!A-d&Z9r0)7M1 zo8r<&|D3Q$Ba#@1nz4F(dVhQG+uz&1{_XEql zh&_HP1`L0|vh730gF`{Q=yRb{SF$YOO`{_nB|hqw2@iVl+`(t^eJ{5gh3{|1+x?#G9ht2_79^K4@BacHxe*HW`L$UOl3pyG4LEC<;2pyK~v0R z)cpVksYWP)BU2eWsVLr{0CRVRob#1+(Z;-r-<|qspjdrpZ23!=M1(eXXzw7SL1&7Zin)h(Rnoy*2?}LtPKfW7IzI z$td|&Z8jU55aYRB{?+_*iXsjH{3%|OuQpUZOWoG}L7l!sXSXf6{$pC^pT_}tk3(w~ zP`5wDYns+c-uhM?Wb|#Jc_cu&?T06P(Iqgxa+BBiF%*sRsHZ;p!L>iR{s{hg{ESY=*ES;uq~SLO;5C zj3GT{>KqBv&>QHzoIeSo@`ww^;R~aHKk9XZptO$u8?ayhT$aCiNY9>Jb?ps!5&4zZ z-q;?!^12Z8iR6n8TYEfZKjbsJWl6JgULpXp?mOo8Pk!5!@V1|MAeod%1wR3LGT|dA z_2)vzq~zS;f7)6usdb{`fWF&Xw*ugEOxe8W2f9oHKs<`qu(lUYsFZY+{F|!_m7Z;% ze){X}$iTaa$99!P>u&(jk$Kp_28B6p@aKu>xbJEhk;!nRR_itzm?>M`^EkK9Z zl;Oky98FN-uiC}SH=yyY0l16nsBpRGfq@MqJc%=O$n1qc*Mpg%6=Ig&|6GC>@{7IAq6<6e_=m)9pQ-D17vX5mwsc=fB ze9|9P`U{!9UWb(NVY{$_m#mSaI=WCRMc5`1$Bj6dR&+GSzRGPp5F~Pa1sh|9q5r~% z(U22af69nD*I7~OKxnDBIR@>=809W;&l5ze|Kl_hd#u&#$ zL%*;4WrOAw1-CuSV8f7uD_!vJLsP-FMB|GI6 z%3i1Pq3H7ow8S9sMgehayBb!RDQ8SEeuynHkRAPqj}GcsE*wmVr7)7NH?22`V(O+I z;97(xNw=-${svX+Qj`0HlzD|-(>s2$f$K(SKx2&hu62^rf5>z29fH`;d5%w)(O(`j zYR>f$b?C5}WBZPEZV)gQXjk%L#5jOR|LVW)i}*l-Hf2!ZRZW!?Qx6ZHpkqAA5~11q zcn<*1WHf-66^JZwZ%AIoImOtUc`NID|AH3!7Qr+lP&_Cy0zaS={YY~xHh7%a81mGc zCVBz&hT@rr5H&{K0Zpge53i8}UYeP&a?0}87z4HiUgQVoYBd9{OO|!19Qx*K!s0ir z+VmGaEpM(9?gYn(HssP5?cZ~N^N)2U<(^>5_ABBH*yX*N6^p`+T7gh&6t;)X?^Ux7 zIS#>5;E#b~Ugn-RaApnAA09}=q*M6q6MbZ2AvxAquzcgvH#x7NUm5sn=g;iY&$n?J zV+GRd*p+O_HHX?bFXaK1T0O3nM9V#?ljDb;o4okQH@jY6pKlKzJ`mz!`}MCs*?#fi zFZ{uoS6_Wa{E~V9fqlQnSL3U1kNgd6d<&#al3Xk*AJC5-7(%`J*7h|hhTI;swe3UG z4SIp`iM6|pdAjjC)45m7dCVf1ckbfwCa#Md&)3lUJ{_Nah6Wy`d>S8sFVKC%huYGt zgJ}7A0KQF>F93idaXD34F*!NgwzISC!E4{xzVp2wZ0~&imF>e1KXN=j{p?f4?b-I= z(Tm#y#nbl+y+Eo@jx#^FUFb2k#E;yb*c@wASY#78bnEXKzx@voD%qIEfXAKUlAIVY z7Gx`HstF@A>#?L!K%uhO050`_Xr;(@p&vZu!a>aadED^%L2t{1DWF_3RL7M<5Qay}5L9UOaB$AuhSNXKS; zLYAwN#XMvWle!2c8y}QYM-R!qE1?g63cTMyu25bVI{c9HX?4+N%><`8z_?U9$uqU7oy6ZJ zthhq}E$9olhT)I!ep|(Qir)iLIOXXNWu9oF@nD3An72M5JdlvF+9@a*@we%bQ|&8l z$3$|nVNwwt)O`W~GJ^O@#<1ZBfDZDE0dJVmQKDlP5V@AJ_5o@r+tx~w11@+L81jna z;3Y>%=ncIPOQj#mize|@J2;V;68UbUUVoDBhw9=-Q?wxqJY^n)i%u(nzZPF~BtB*2 zi!M>*O&lhv%E)I@(x$qIPw%AzW ztOTEJ;0D*WU9pLV%ytOpbmTU)s;YTRVqB?WebMO$so(@^Ro7@tnhs`cC8;i9=IQl7 z^<3ydm7J1~&{J$5EV<}V?m3}mj|FVUZ{?x`N4`SF97ZEok}75m^AirKC!Yk|C`P~I zsB$hyXz%qyc*)J!AnD|R6&kyoxX&JFgQ>q`ZAjTHoIH56z5e#Q+dJR>?soFhtCDlE z-CSI3PoG}+0XDxy%R-H>(%_TNv2eo=f&D1pWe#^LalsMy2eiJmqaiPwy^$z>Xw(7? z-E?DJs`~^I_M}AKT^wr5L#}VAiI2$G6heN)3m{@-`m3E25pU-5hVSL$C)>r7$Nq}5 zXWFoz|N58PpZ?Lm-2UuO|9E@y>(95>UU_Bv@ehBvz5k;hZg0Hx#&)Wc8eeyIu9#e1 zU2RwN9Z~c^lzi#n#-=pN2YO%ra}t(C+z|2Cq#d7Zd@ZKeMobhhCtO}`fe$B%$FVk% zzM(5J>P4Pa7$4P?R1J77r}3MVxRTTaPWHH+s&eC3sJb*+0C9GgfI&ZNYPTDpBljVB zgjXKEQI9Y|)koeE7rBP=tNGoTK-Iy!+IAG5+b9sD;4&_2oEbqfQF8r{wgIKD$VT2! z_2DDXI3MXa5RFXkS!;CR5A7#++qzEx)-rHdpFqo852_zJo`bLPp_{78u0db*wf}bc zl-GIUt+7s4=>A3t&H(|>Zs9~wb^@kRK3_m+U`#?<0!x2%CGWC1hn_&4vX#qUH_6WF zv>|aw;sz|u%rDO|+0P8X1{x+OZPM-aAHk}#4vq0zza zu+lep|6F{~c%M*v1n}_`y41aAaGi4pEt19hPk*{S@~Y83I$=AEs4iN^kJS9$BPgEG zQ5VF23a*Ma>pMtLo}I84Hu_Vu&7x@5qBsE2+a@IwE>v0=)>!pIF-hq^+OY|&lI>P* znV}qgXMcqPOWo~XzJB(3GY@5{%Ob0&^Yt1hL(zL)(M~*Qq(88Oy!vD?rJXs-dK{om zt^V^IM^(+MAu;-`;%t8{6rl7uCQT8v5vDtY6!Q7$1IAagzhmsTcpT zTmOv$RPh$gv4UZY^aD7=-c2>3`?r)iJ}azt-I4E%1Pbr9O(1)7&8NC;UlP3T4$9yF zVnrGLq8kZ0?RV>C9$tH&`}FCP?Xyom-cC=n`{F!6Y@yQ_qW{>#<>ghUyT?>DYVL#lgIxXBR><9iW0bhIU@%6U8&MNQ&ceUqyCK=b* zBuk9T*!P|sV&#bwxgsCjlZf`XE;HZ(KK6dNZ#>IDWe7`649V`zH<#0~$_Qvu6Unz|OL zj6dVk?)?Dkjn_V8;2(AMh4&YD%yg#zRMz?1{bbYNopgxpctuB#JLEB5L`PqnnM9lD z?dJ-}$~cMdYVY`hlRkgALv2=LSwPWNeT;c@o6x;iJlG6IZH$?Uap+V{Hel96hf9I> zH#*}R5S{54h?^m6Ysd;`KZZaAcGxSr(tarXoEuS`eul!k3=KZ8|u=yuO8xQd$ z+wrM(VZA>$U-@$_JK@28w?Pb_`p=gj;dkcJXt3Dbj~%L|ICYg zVu8IJi8K2UbhPa%8z8_K)%^kyvcuCPjtk|%ThQkifAEWcK7a;I9;Ea0N7U~>JlW1p zPB*^I@{?cva{KvDf4aT$@~eEArDgJm&DT7@(tPprJ5*B>GynPL58scg`43ERm;p@N$4-&h?hc#&N$E%PEUivhvbQ zlG{L&qsr%y7={TiosOslD*oz&=_&7 zH(b|$ts@9DpYf$j4OG)-il+d(GZq=gYB<41RrxC(URox zXVL+yJmbQj-?F68?XfQ>RiOQEeJ$_6p0a=TQTyL*CAZqg1AOP;pph=($Am;a7ZtXP zVS4KJ)R857iZ;L-!O_FKWxfWkqF=O8UH**m5xXb;5K7R}e~Is9)g@Z>HN3#R`i-8( zkMYRh)5Lmqbm<8j-nK!2?G+kq&}RyWI3|i0BV{na+xRBdwhtVe$H0vicHYc7L$Uds7$>J^+t*%wV|(MB_qL6j%*fOR z_44A0{lFj358m98uY8rhxXcTT&A_^TvB=9VZXn5W(F(@;lz0)VK&=~^=ctjOoa7~M z^re8Cs$&bqiG`Rst~nVViyl6&1K8@{V2hvNd05{U2mH4Gy^D+O{OVkCj<<`?Ki_`# zcYnM6t3Ui_+u!}||F*sH@+;drZ@sd8?|VPkUVr=b?f%(A>$?yS`6X{f*F5AQzx8uM zQ=KV=oi{h>|J;bT!#S@IAHg^u)%;?;2G=iW+}qSw3m_|Un6fHYJR65S0~nLce+3$C zZmRXP>Bl@Mdx*FD(!9i}?W=a>*g(61wbd}~cYCiFJs!q*+|wV*oxd&Bd5w!Z03Qy| zcU^D)GB5U!{=i0{^#l(2r+(R|@$9c>7WBAp|9dPF-}c{n#`!-Go70D(+v`89=QpfH z_Z8x2=y+GX`Tf$D=UUTMe$anCK1O}%zxe-r{q|qu~&Rop<-2FEj*g*Vwo0{Ff|^N6Q`Su1F-OAurUWp7m+A%@( zht6JQXZjWA$$W9@@mGF5JBvupxzgwPZ!RS#-4Sp=<`mGU$2+a(eo)U z&7y;T(>mfjI5=;l$K6xwhd+&~Hh+VOG;K<11EmvS*(Pa2o@q@w2`eR6e`J?d&$spbW+*xH{zYjR@}*Zr zOwAmw^I45QUVH%Z0v~gu<}n8cMpjF}y(3<*Wu9t<#gUpn;SqJs=hPGDynGWJ_JEI% z0?o@A8$t0m4as~0>yaO>a}!W}=rmoeOa2XZKzQibzeIlG5!=fj>c<(2UC+;mOq*}P zh$ph>*VIQJmmbBnE&u@k^hrcPRQ)Y)avd{#?Cyf&GrcXIJ0KVJWu5N38yE^F_(%?0~ za+@zQ`hiPsB&UB}?>I}I>H0h~=6h_K<9F(fZBx$*jN!Bg<^!L~-UBw1d->xeBv6& z*lXRfnQ=m`=N-OK_d(^h52B^-YhCC5OJwfl=6+F+bsp^S0SVf*mdelgf}C1sO@|MW z(Ru3xLO<)RALs|f$VQ(23tx6IH;kO?sqv#)^yQ@Ky~o8TlkuPnXkfW9l$GpD=a9T3-Vjhnip4m zLoNnL!&JVWlzW=@L^z^w@cyQ@&C3(W6IZ;r{ON@qMos z-0QX;na7fr`-#}D>mB@7e0r{O>=#+u={b+puDuLDVL;!lVEN)hzHbLp@YteJzDckqM~DvldtFc5{7(T|F;R=ol;Kh^^Qq z9OD4}?vIYSfAe2}3#>|>W4R%%;4`e07*QJt6Kf=q$%g~>gpjTMyEj~3cjsV9) z{Xx4KWS3t`Hze-tuR!^OJ?vo_$N0ieYNMz69l)=Cj$j`hBaK(od5nm@{C4aW_xvM}E0yt)UE96!ILAT0W=C~;%#&_?w5xnun9`{m3NKG=D5ukI%S;`i8)o$z1_FIX`K zm`n5>F?7EY1A+BckP;v4m-Koj#d*U1xR2FtyP`v^|K|9g{Qf`viA1~`uL86)$&t%N zf_)P~E*R8S$_>N%gIxH~(do$p8Q2;EWmGRn-F}!y-nij7=v!apb$ysN>^hX!^i?mq zolgt&n2A#8m_9p;OrdqQO0cBtO zNgdjjn=AmXxs@{jnuitDMGmy8KQIA%wb;09pzf?7c6))gtkQd6|HL0c3o75R$?|7B z>{pWwIKbh*b0x=GLTrEGnwVIT32#!5Rgsp1rc$v_)U+qbL+WbGB+lfcOiECoM+WrF z1EeXZp~6wxl|*BN%E(b>-|Yt}GLbjl22@De7QQ8bKi*v8HhkDx+osuS*e%noSv^Dhj@x~Z? zB%5gvIULG9`z4h0MZaQ!JmDJVIHx^j-?V9SOnEWs%WT+`Z=YR!~7Pl=7qmi zsm)@ZDA{*NIft8tIPl^z{hJc$)2qihM8`iJ}~xLLDG>I&$Mo z`%dU0=kE+Z%KtC#WNOo*l|E-2fX|`$|km06` zvlP91FfUyIuW!`LMbl^xiQeo3mBQuY(8YnW{PneknIVwuYEQpIrh@t}``WVSj{ZYr z##ZDOdx3l^-3|x+$BmyvLG)xg)Qw*F!zCIww!W6y<4JWDb2dcI)EgILFZ!YveF9xi zqGGIVZCEv@O)<>!p#Rnn6g?%`h9y}G0^?O0nA(cnEWfua8(2QQ`RKX7?);<;sz9NK9EFS^yg z&L@UBrObm;&}7vgutrnb{p}jmS8)*#Jp{y!J$?fAZOL!LzP;oBT=Jq{=RSeFz zprXMG5_YNLOo?3cE_vuBq~c}b9fWW41EB>bF73DF0!Eb_bPQQ*{~6cx!4JBjr}BA1 zRT;=3XCr7oQ#)Z~Jo`kh+?iyx-nN^y!*bP6K1a;{2Vc}x zpL*tXO~tFLo=*@c`DG3okWU+Q!quF2xZHoTOJ)-Xu8Yn3jQf~de(qBE8`@RRL!)Q@ zW`O%=r`yNB`sMbEU;KPKJ$pDb_sRHLYvP*^ zaNWx{jY^>(sM{aK0Q+Pw`~%=2m;c&%Wgj4VWI47NiDMyk#K`F*$B8(beqT>NC>rAR zIZaiExcK_~quS(kJ@@}m>G46v))VWjLONZc2eNbau+@{OW-fQWfsV#_}B zfW%H8+J5=({h9zAaa7UwEY&ww0|ElrNfhx*xxoPjr=+0yeuwn=6V9b4a|D34>A^_{ zN%O|>!yX!eQD|D1l9kWY+Y-}YKYJ@|!UxNzNL{m|UdF(YNWeh>6>n&`){0a|!bIfg zpdzdWF><4W`3GJ(vLu@^?%kux)?!v{Dp4Ga1c;F`+YYtlP#?e?;o3uVYR@x@j4QVy zo@8DMnNMj)CjS|`m={^X-~&l!`hhXLrqTEnp=FSSLKNo(-{1E0Bbd}+79OCZ4_+mj zXh$b}Tu-SE4G3hTD}C*faBA!IY^-ZFcif)3nE00+9E_1nDFq)R{a~N5k!MFG8-QK? z%d-MAdYtl-G;vj(X{IoFBv0jiQNO4L0%MNfTz+XUglw2!1_hs3| z5i8ASUC|R6YA2?w8-nJr3IKYrwuemqi#}sT$u@|>^s&!0O2?X;e!&avFp22cbp--G z(J(HolyWJ}Pcy(H8A|zPW9hIiHDSO0;v)zp6{z`}kgYLx=-*|Nxlf;caQu(|(Leo( z&Rg$#97LuaAl~G5hBAMY$zA9K69h}3j7y&2(oB$4_N+7}M;1Ng(AG_5`&{T?I zedrrQxA`~oB*$+ns_fsUK|buNbArM@Z?KRlIu={RUR_-~Q9scF&rM|f&R5@P6Zz=T z_SW0q++Kb6-R<<1S20TRc@y_aG`x(8Zp0%G&$L`~f>J`) z2CsWX<;2y}&>@9>sSBCb!6)6YgF5zS^N_^JGxO6Y+#1Mito_wIil_aS7;3hMmm8jZ zA+=YcyV{;Sd9wYNzxk`}5C7nQ-2T&F{fF&XB7XeC?`;3z_kMSK=bg88A#&sx(*LB( zoBHao@|#cSWRq(%9G6~qO-G*!Z(7>RMu0KnevD<7uih1Hl>;eba%^=~IrFatC?9%(&VerP^^ZxqK*u3D zdPz;+^(6XW7qmFH<=~j#j)96zv)Jz!occsBI!4uhv;KqryWJP)BQ?J3N&ZBlcxHbE znuBr9U-SFI!u8Ap=ED%J-1|sOG&60L%%kG)-!n+Z%s;wuH0>a~)3l5ypPS7<#b}U9 zDsYamJjt#<84j~x#?Sup2`I}FO2*qBHzR;T;_JPT#~Si2G0AtHRC1HbfqC>P| zmVQwhH$Y7&8yG*>0*z4zfSkxe?rtwRlEC}76b~VCn^qMEv(oqkM(f94+6oXWowQ*i zZNy@Z2{T#$@X<_ahu#AMT*n<~`K-mllMKbDIl+UVtg8i-{ZIUih!pQ%N**zgD3ccb zgwSNXr=Be5x42ezl-~H`f(~sX3Myy*YW*@RC_uE$=i}%vvZhQDrNpQl^4s7(MLsmq zXI9InKw18H9Vxw#pc6Zot7GCU^oGVuuKn)%0azAoZ6sx-K@Pt9V&uW*L1ncw|7_o} z?X&M3k81uIN=j9s!2UVr1=WbAypYaVQJ#N=Of62LQ+sjQZIkJcZ$VL+y!JS!_g~yz zfBPNr=zPkBI2#qw`nr|4$C{{D5goCs<5}SNhStQLgmWltk$m!gmndZa61wK4T(|+0 z)xN_4^xYk>lht!vF@s$)=8Fqflp?u{jcYv6<)gObzJR^~Z5bd^&+D8Ec$rHUyYgVkgisE7v9WE>oOMxX`;X7wVb_v@XE@6=1OS5$N!9*$Vr{O7;Q!iie8X*PHQywx+q%O+)jRyX=N4_ z$Z5b`@C`D>_qYJ$(nxzWKt5aLm3Bwh+8?RTkfi56`{MKM=Rf=D_W7ruZLh!j(sp(#o|7~496fkw`g}0$ z*>-is{l;ruBJe5p`nd(g{P^_P55&24;4Bru7A5+lio8gO8#(Vz|Gflx=;NcxHg-bu zdm6E3s*{34wlu0zI{_!_Qf~A3t9Um|`K?d6WIcPvw}r0pBS{D2${4k&ofjZZ#G@^e zfR{~W)1yb*$xC0~-uw2qw{O4w*7m^%A8a3g@|n6+{Mw)6o?kRQKH9D>#eZ_@#m> zI|`22zvwSkKvcOwEviy;AJbO%OE8d!#`li+1hu$9MT0={>1P-{0Kvo#-xIPN;XK%) z(|^i2D|k=9w_{Mh(kKuOeOshzs|VJaLw?9g?1lHw5O7}{mB>`WH$KePV}s#w#5Xm| zK9|vPbLr;~{wnu$jQypr_^SMT6+L4N`$WSa!6wzk5+2HMs$3F+M=pQh#b;Y{fnt6h z$|FB7{)CrU`I_xUZ8G$p>^;&t>z}?Nj`%Dm^Fnj%@x68F;%9f68wo*4!*6`nALG?O zU5(*}+<$SbOm0rmsTW$7aQ^89}!4c#(|oq@+PSjF9vxZR}-CgBZ2hOUfwBut?oF(2OT^i4R%K1OA2ci##DqzgjIg z`a#0nnt9Onfr-rvsND$XqOY<;j$I~JY>w~c1pxKsy9ly0vSM%!2+ySOCp$#U9A=Pv zpyyBtn0Y$2^Fk991A#&?UZ#F`-Viyxh)z9T(N#T+_UAXjPM9Gkh~JB`rd>&eEU3cU zPo%5RCV^MMKhdE-+GL6AENzy-A2RKS%If31rKP%X(6k+V)k?k{Ky|6{r?4fLHWq6V z_DGx;KA)_#F|{2Eo+z#K#R1 zHiV1~zbGMEZYDo^@#XD}x8L4gfAgE${a0TTQa1B-H|LkqiH)LHoseOh-(XeZ1TgGY zgKx@6^$|QW(lbkW9*iiNw%3n9)*6@WwiC8-F#}6o)Bq8$6D#9L)0&fQeCK4V;J~MT z_NIonXX}Lv)U%P`xB9oUqwVtILi>Q5?a60fsNWxNR~J|6+w1Mqk3Zi2{7?S%_80&5 zPq)XPf3dy&#;e=!{m$=f@4x^3?d6wW*&aWBs<8uY`qJZ0ox+BJ@xj+{`J_l(>~pzX z;C|NF0GOSC>4+D*tAfN4I%v4ziA>HM^f?=f@B+SBrrfbMqT8^c)@}8-MXH>!1C(K^ zW7i_Gpb@iweB7;vcjXQ3c+;0HY}_1_sux>d6d9LN<$L|}{Ik-6kG|Xd6IyS>%HIIa zH&4g7+yngwO{o*4zgK;G|C+G#+j8%+W6Hm~z2$XN$8SP6G;ihIe?xxoa~@I#R|I9h z*DsT1|1#T?EdmR=Vc&mqJnSJcAHoD{d4fmso$-hTB<%OsEmPrKTM+yA{;%^i^RDTO zV71qsk_~1r*XxOjdP~X6-8*#xcvW8>Hwqwy_ z3py;a`fu>zUlR)uX*v$1*?%5l%lvQG4#DSa!QJiAC2yb0zXuUPSg9X~JM!r;Q;iq) z49KoF>@`XI82V(EQUEPAa(rCn1@>K7l@?%qE2P0Ud|BU{aNK}JPvwTr8(JQF2-;eL z&dn~g{-(QI|E~X>v-cv?k^iL=vabaRrOK;^VwnH*dk%JoOPtzMApuDEivx_wXo4&y9!moC?-t@BD zO$U6|A=ufxLd$Btsx$vw?DhuyQ8wDze#tjZxkV|y+BLw#fX28x_8)LE60*J`GrX3o--I1dLfODYS`6`QXk@Cv#zqGJa~mZ<3K>%uvnCLJR1 z9gO*j1>!6kdh90$srdpO30hw_r~;_&hbTsghkjvBxvl&rMu)*&09spEezfQTw-Aut zMtU4z4}WT&G?p&t`eEKsHjd7vKdG+$2Ld;X*90k2=y8b%$G1dzsjnE^zQ0vG*lux= zGw)w;?VU27(C#wyjE54FbA(`#pK;C_sH$Z0>8yD}H0{`C4*6+1`x}~=-0`E*yegR( zn&_*n>P?4F)nHtgC*OoAZ`Z#ye%tTaRylUNi%QJ_Gp# z{3JsUH@tyjxHbPOzN3E&hTSUmqGYK3=ZIb93%#M_#ZK8b_7~B=hP!F@<=B<}s@RQs zpkvT+Jd7vx=;6@vy8qlsF6CHpK)!TM+3J7!AK9eln{a#Vz;~N&1+K@CiP*@Vu{{zLV&EzoiUZCmpEbOnuvHXqhoz|?O)hyE{zX;<6R@(%StJ8`qT@~n_J&9*9! zcon|J3DwZMDr)Xus8Zd1XwHzmTW5oJ?w11uErYp1NOp5XK);z(@eTqZO4aYSgK8AEdvxYIWuMDhsFg{wt}pXd)Z9@p|Y z-nnOr?VaSb%u-IAW&iR0Gk;iVdvdO}XWPjO54Yd?!TZ}c-+5>I;_>I(=TB=7s@Som zvCoRj`O|aXCv}XtM~qDA&-hVXud%IS#28dR*#_cUcF~qTh7W$puldnb!+%6{PXE|P zm3^Qg55#?)0u4EJ#fRpR6qa?odECp6b^l^<0Q_YBa{&V#C)yfpGVJ);_9~BUnOAZ? zdd;s?`tp2>L>qHOWy8$f#prDL`tS2ipI`k`8lY@f@v6z^3E}Jk|LC?p>9-tWucGXv zKYV`+XW*619y7wbACb>F9X|R%w047AL5wHH7Ju9)HxWh%ydT#evKT`EgFt-0{LwBj z33B*pCMviCtL*YvQ5jnE!a`2#zVou^j8-LQDoERaa3jnS+P6+|JjFdnnh^YGnXm=n}rkvT~Djiw_gBz^m zO=puyI$<@6x+<{|q`Rx|l`C1E+evi-785S*b#%Gw0JZ`xH?`jgnhfppE`4PI(00z` z!#8A>Ttjl-yhpAx4kS9hs#eLNlfV-q#XG?_YFb|<$-Q^8zLD4hy8l*O!KhXo`2>~? zQ^U@WBFrpQHTqnpdE}+vGnqP*j0VJE2XdB#4j1Wd=)fsEgj)1GQIX+=i18{r>cs3a zWW9iJXmK@}4QVz{$~3T0FumzZgh-_(xSJ{tHr)uN(%%y}?&ARqV3pO5M7lEOV`F5X zCyQDZw~QgtYF@!h{PI8_hT4q4sx}_*v*`1!|NiY$;jVO|@Pqv$7DYBLCKeyReQzt2 zM9)b@GSO){;M9O!HtqVExHKsy4^NeT|eUf@PA&2FPvyv6f%h|0G^E>pW^3@S{C+a zN0-`5-fRyaKG@!P=bPKx?|y5$|JqyIz55Tvz$e_#z2Neu=)LO;|Moxe%qHbbiTBz# z38)<%4ko&St7Y6Cw5JAq(E91Ob2HeS5z7F0aG}q!I!oXiiZ|TA+tbINZGZKbf4=?EKmVV%zy7QLWqWvb zvVHfr-rIircYn0K^VVD2$;p{w!kfF-+m)1^=;WG{qvD}?=ilDvul%wFj#Y9DbN{)& z+0bV*frs9VnFh$omh)A{1u{tVvo>0+Vb$jYstmQ&Q32=mtNScAIQ4@=gL3Pcb@X)I zHqggqYZ5OckOPvJ7+n*9lM8Di3b{?4syO$`V7CEFO&2ONVc54a=#Bz*j`z8m1*`dY?* z%ebTe+TYB>jNeH|nIfy4Dw=;7!LS@LB*6+;MEfim-g}bNfS&amgnHlUbqt*9oJemD`R%1Fq+3$r~Vf>cPbId4T?$v!XvK zpBu?s&V1=OI0=b!g_&slouX)l~6QmbAeGzYazncC%Eb?)s=Ok>hug^aWzr>e3H|7!xr5?wJ zvK3GBh=%?Sw}5YISYzjYtYmtK#N+tLM;O6*Jd&x6s`N%9$4*}X#<-cudtc@sIt3nk z7DqiX?ms`YVlUyiD`!7!yX*XCQy^HtwgYLg|{FlDAt8FzIb?{AJG$R4x zSF@rdl=_F8(i#`Ug1z)-pM1QXKYpy%lkJ#ugCOe~F@sMBL|8@3oS^PCeJb!n4-#h5 z!8kUJa?xdslng|pSLM|46S^c~$V(_Zz``celapBwO#D|{s^)PSF~&%e8>E{YOy!uy zcnzKAiFw6{PM>2LQ``V`G{wjDS8{+)%>X$kPuEM(=&nDN|024e@G6hmK!c%edO^8%f-4pot|bLf>k|L_64 zn`V&vi)d&6$cKB$+rm~mZYxmUA z5IT9ioImC~PeP7u84qxw>o`$YVlIwYHxy0B&gT=#ErTQYlwEgwLzPGWF$X1+YRPa| zTyC1+1FQwqA?h?CZMOk`W6#P*O!oA5@u!~>zgzsQYi8k)K1v(VI#u#Lx8Mx;&U+W4xQRWZ(5Eb@4m}JPQ{sWoqw$xk;mGXcNk!zS!K&6 z);5pEn)iNjyl4$M$3~cmi{B;7A?MzxZ5d+>dW}Jy*STlq{bZOnepegt8p7|~V@r)? z?wj$cX+|v%51(8--hT1nhug0{{&0Kr=)v~VOE0QjGQ`J+m5%r-dxcYj*84zwTfwyS+ac7x3x3CO^y8d=yWB+%mE2zyBk!@} z$20$Q0(i(`O z`}mVzZVWg{uMHi5mez%8x2bj85T z1CqQ|Th4KoH)JUnsmD5fWQ# z_527MVJA~U6`xvn;JrQ&XXT2G+gv9e_y7a{8XMqM<#`1$_SkYWc7RiEzlC%^h*1n2 z{mFRfRZhD=et=`#vj&=1$-YCQxlF4+iQlnT=CRG6^pO}>=#jy0BeofDu@gJsxBVr@ zV?^!X+~$|1xaSjF+>eTpKP0O(#-cr-Eiv=U04nj}PYG9wjV9yz!FC0QTM{0eeMPPs zNVb=U|5R(-Fn&O|M(iC8{V9&{I2fYwTw*O$Rr328m6`R=b1L8aCl=)RBU&KwK{t4= zchZ0PdF~oZWrSk}6tBTJeAj9(Q;vBl!Y1LH+87to*E*BRe&lwYK2n}a7@gU?O!5BJh-@e&#Q-`hSo{{4UQPk$oRyS7Du zKL83+RClP+>#q=y`9unigkz$)VKH|dGtmw=DZJCXSu*$};KIxgNDKoPs|NVw?!t5O z1Hg9KOh=R7scV5j_dI#ONx*=WeeU@y%;0J^@o?Ipjp5k{Gf$Mz-a#f<&>g?k*+d z!e@jQ9Udn+ysATz2lq)}YyqZ_>&Pd$u-gxc(T!{k2}-rjR+;`g+O81}mC8NdMl7k( zG|Ituto$i5*=H;Igi`*X-zQap$9R=9$4UP8`i2;*00BS0#UkH10kQaIzHowNqm~O* zZb0)JEIjxI$3tupd4azF;L-NRTkme)c<(#gqu1WnuJ8d(+tt&{?diETkM!I5#ddXm zl@pcZ^N^db_t|c=nN)qg)PPTBcgp4=z)j;kv~bOKv3?pu|L!1k9lMs%aRIz`dt4AZ zA-WGGH#gpc_vu&#HW0jp$H|=Wz;FFO`Qi)z-oH5fEfODp^uhLT|JA?T{`6n}@pexe z=Wo9E*7n=)|6u#!4}R#2lk2Nz+qq)Giv!o{48CIN{F0Xe*x*R6HVBvAXh;oxQugBx z<#jx`Uwn20x~<4b?tNv-oNgnue{Zr22IGM6mSmSIskT+r9&^*mQZ9R}Tpxc|znPAec{%Vj`-Y|9S z>833P75DaZJL^}E8=zzz#AV8w@w2SzKhL9-4)Oz7=3n6d|M3%F4lR8SJXc;C+P@zA zon~NeYia#~q8nt**N_I}Bh#!}M|eolH>aIdVy;a?u&L%BjGX&T08a6v2vf_U#y&zc zk;58EnZB}+B+iq*F(I_iPc$nRUG_6GdB0?NCYouvff$GlZA-?h+EtJ1q0J}kX@jn5 zSD+o*MV@&U+M#3Rr@W>^XdJQ^`NM~Op1Gm2;{<1IFqo2K&8A0GooF+LbK*db9F zKj`G5UV=o_G0VevF?l~CV#=Kw?1SiR#6g>&^^g7fEhzfFG1*Y#Y^M*D?dUT9MJF)T z;_n`whtK-uIM~Mc4}FdT*}`zhwv2t26-Tq|F+MdAm6txpN3kV$x$p&k0bS_)EDxZ$ zOaDk=zJb8V>Kp#mQx>5_$b{CTtSZ&y|#=7k6 zv9Lnh0?^cK*sNNLj0H7jeLxQCX-n!o?f|0~T!L9J3OgEx@KjmXU%tUnV^^{3ad!)B z*8)ACTGwb_bQ`FX=%*a3F320m0Sdeo2-Qe+7o!PR& zH+j2HcH$ZicvVr7oi*kHk=bh}v@RpJc)d25Mii>=Z4WfAeK32`hri>K_7x6!ZTGMb zKFZyH!N(rxn=Sw@ZTPrCu3x79!+a0Z-pGWm%q)9nKTccb*acFY_O)%{owO^@Z4kZs zD>N!AHjxwinl*A~u9h5?W!=g-e1+RQfxGPCJPX|ycCXPbn|8nGVi?;HHpr4vZoUbWra!7O>G}EuA)!5^l+~)jiUE;C}j}g@i;`k z&u{1@rEHwX9-#TxW(nr~oV0fga(=MEt^;1zm2AJ{v`xb{m28&mC&ACl0X)ou=j`-k zd%)L_OZXR0o@_t++23xDKYhG?{na-t!M`bu6hGwWJdf_k>W78cz>5M=1ohOt&%>8k zs7BFE(;tJ+Pj^5CxZ^P)*AJz9=B8hv`6R#exfhlkezVPh|Sw75QS`Ug0Nfg;&Q z#-S@gGLlU1Iy3=`G#YySyR5^qZW*Y`=AB-#^!(icF|7WcO>KPJnFha%>OQ6a5GcG^ zD=zj+_CpU@^Z6Laix&EqUgE_=SYl`$l4*YWLSs}UmSI)QU1-X$*c8dWFRXsbPi({& zz}JAv{h`2MlBXhI9z-8zF`xWr8!}fY_e;$F_FUDObH1lvKKNm>X(HI`liK@wAMO4l zZ01*)cBO!FHf-_}n(ofXNZI{Z_P9&2q0*RtF;h@X$Rb`~Y-__HG=IN{V>>tsX9@Tr zWy}fN0?x`KYuK;xlUPwvTo@14U8W?yAZ~azrMR*`6-}Q$ADpmTqQNf473etUMz0;R!{ZAoi-J}K~9O9gZ%O&coPefL!0}P0OLnT z&NWS^J|;G@-aZtY=pd9_P@;AJgG0am#`#dmrUO+F=?BNZ|NH;FUjXp=Oc(@oGH?C) z1WRY%yRLTCebeWRR>{Xef*@+z!8r-s1`Z=jj~i6v4WKbnm)t>8(XxV1Fgi7De}WxY z@*VAx$js}4$ufYS57FB+bdoI6fCz3*h!wiyrtIXr`5J%K?hdbZ$nT1SQgo@=?dV(W zNja0K@fJ1`eAz4m%8|owH-ft1LFQe2K$UBN^}}ZRGw#td@@GKH;K-PW-G<^xIH0M7 zOk8q1JfOi-FJDz2Fv^AZ2{2Ws1}$DT$NB&ZRvcuL9DmlIsPaWzni1z1GT8`D#ndXh zzq`*E`!yD@pZND8Vbj1-aNN^Bq`X;{`CvQT38eBBeY64czcl#M`u0!y6M{>%U294{ zjDB?Dk{94S$Y)XUC-<+BiEWZ0!w{7>s0b4q80$ARA({j#xTltujUX zryAC0j;EablI};D=;U{|<11p7&;up%Y@l&e>8G00D)j~k^4u_k5Se6s(Qt3OmO+e( z$De&Jvors%F3-3B@Mr&S`xpPy|Ga(hxBq2(@xk%-J3sp2_Ji+zXM5wVx3`D)A8r?- z^-oG@1Acv@7+qdSJ7XxZU`%FyB(<-GQH(F{Bgyk8v+PeIWIt?YohXe{a!ku7r|6(5 zyup%yzJZH2z%@5A{>b5hIqRp#*WiwpDRCzExf^{@x!&wFAR~DoeVBTDPLq)Q(|E%R zT#?l>oe$qsEuL7_h|p$i+znqA|4z9enRGjz6$+iXyQc2X_7$4sd%AAtpj0%~b%yRo z06W?i@c5aW7kVbEMV50r}i*&Y4o7oA*W zyeL!RG!pj;(E?oF17k7lO(Re-@!9tu8%hBC9iP#X)Zln&?q-v^Qgj_q?x0q6M<=xP zmrsrn@D=V9gbivnv3?8&E1WhBG z(XLTZfxe*67)$ic5jbQi*JubY?czB(J=?za#@pNd7hka??)drUN6Fp+t4Bp1CDR!hR(E1tsi^pUwy=m7ZP`f3G@7}TIpEk?v*Vw;bUOw4A{rFdE)ZW=Y z$Dn?*=(=7-hU$p}2rHz5K&Ot8=4RuR<% z%tG-ydDr6;<6dP#aqm3s#q9kYOk7Wt9;%Tu3Syy*+`PbSQ0{LyknzOEp!2&`qQg!| zDNRADZ;My3&hrLv!CSo8wSPm5aCu2ty3}ZqN_}$`Xj!=Felk_^G$8|f=u`M;gD(e< z3vfjnX>N4g!z-#(D?3#_^Z`Il_R5S$V@hA$f3eM2K~d+D5)1{{+zX*1y!lJVJswQV z7=a}kmP2fVFj%5&IIjCRj7QGDd=0d3bc?|803>YngHPXK19m08lr`??ik_8glVgXD zzIm(MB$TD?=02^W6LRA#GnmbdC%^VXAl9~4?BebO-kx7v7DqA@f(yB6?xt2Cj? z*zYi*Q%Alc9iO4dWtO6%-S z{V7klZEs+dnR8?QkTPay|1#cDE&h&S_g_GG{o#M;qoaL|%%m>oYo90h9I3XJ=uiBL zbYe=j*AA++e?OK%H0T`d4;u-|*EWZSoV7~&ytcwmqET`flgwYoo)Mt6+9zOqUHz*# z=+Nu4uU{+-2*3CA9thFzVs)bVO%qv8u0qsu%0}b~uhA*flndzxl^UyjZU5!P`F3;l zM166yooRjMzS<|h`fU5@-~a9Q;QoW{l~-R*{E#Ca-yc=-&zq_!E!yg8{{}_b;eJtJ zo;!#c1||+J%1&~B9e0M$1caFJ00y31(wK)1&i&xc(tK;)sR90}gXbxURpgE`J~;67 zp_>Ny+Q6TNRx3=%T_wDUh+L!*~!;iMd ziuu*ki|yn--+p_uJ$T`vAM|pLJUct{2gH2e+IiMX$BR$?@*yhe;3pZbFR%Smxw57W zx2g02LjipHgO>7dxyGI6Ug|Vz9Mes-o18pWK!fwL@wi?zVDEFI>v7~-70G24^iO2mCLTXX$3oid3;$|^jR}a4)wi^f zXv;Y*_V9DMzF)rVq3AQlKh(GS&-~f*?Q_&|@11@jcJYC+&Nm`^jm0|4OR^ryejzFH z&Exh{&jyH zfU*Q=sXI>MSA*AmjXjS?_CTCFpp)LnLpFvYDZETdues`b_g9C(kOF8#i@~h6T}WI_ z98?ggWtc-Ksdp^I*nREiR;AwYL539a+@<}uIP~xh0RQM8{hxj!iT(xv#s-WH9@J&5 z1zIzuPCBj+kVPCCCQaH8!B}J#_#swBkDhw7T%B1t)8PIqPn`uwepf;0;snbA zj4~}6mV++k*Vr#0Ck{U0vB*Hg|GiVu3Rga|vFjHGtXp6!ez(n;?0WIH)|u)X=lH@0`a^_}g7ufMS!J$PUPZZ0lFvu#(tfy}4NtYU$+HKkbKjh=T)U=saMN?%MEMocQkX z6`}->ID72yi7W7`(|$?-G)HxEa9>_rZWoWAD1W-0KYhHNKRMq%|Ky|X-~RC*ZGZXa z|K7jf`pz4#ZU5kRe!RW=?z`Kg7ap1K$&<(1#T73`TyIyG+KgOXrt`gp>BNWL> z8vg2g5s)+2B5x8>?=(QgeY{j+sPb2IyPSW^=)962U_sP=C1;$O@3~NI)qj27WjqBY zZ;!7omj)m2>c323F8m{fmTsN6mS55oi=n5}3Pynoea13%b3GTbd8eRc`nIH@^+$fr zSer8eqXb}a!k0>JLNE*XFLoHG3ZUB z1AHdo(2NBPSlgiF_V%;lL{>a=-d3BT^>$m!=O!3(yS-qkhc^6*8)kf2^; zt%cmuYhi68^^^mXHaA-K5cP~Xc>4T8*+OZQ`N^px!?9g~_DZcb*C&(C@~yvcmLKTr zAiIE!=bZm${yb>e-G4n++K$c_V9d3CA5_-n?b(Iqu*#l~()-H~f2!-6>-^R&dsAXY zyIRfX^N_IaYs$o8rp5)}rQH6cP6{{YALZ5)VB7%VYsfhA5q_2PFac)Ti)V_kXLFGS>bTZhnkb;V1bWS*eOMT1TM!U@`eE)@5z)3zyC=j3wG4XG zY5o7s5j(NMXnv^k^mk_)jvKAn^-L)4Sw)=pu`*$qjnilAw<~>;WL8EehI~24owD z%|!#m-aRca?T3aG+nd%ufw6*h!-xC{w`F&o7r`jE{>Ja=SN@$W-!BB`OfYRtp2A_S zQstos?$Xh|1K@mv-#Pf|Q^Vv1R#`w-56}YQ39=f-IG$qyIFvWro`=891B_uJFs<*? zWqbiVr~!Wm>wulT>OWwNKiMDtWnFMR<>-Q@=UH99sF-@&pB&!i-=}tD`U^zHK2LI0 zh&_Gg@q|g=eoWq-90Bc9fAKwMJoJ4RAP@cFQyucr=fbF-=^J@|SY}xj2OfZU44On} z;@1>PJ?%VCrRP94v{qo-mH8{k;cdF)@pbYB!V?*+c?hg)ROW-pq3HyI{4VG=rP7rT ztl(d+FZqxf<1r60xsNEk)?C&=IDI}-iTh~xAKc&WpPX$c+SiJD`@0YRcKiI}kGGd! zctu;RqwV79g-*{mPK4y>nEST=zE`P4nf765XZ&Kvh@0v*Rn+idH|NLP7*!j#vH!-B zN+M})AluU!VMCAq<@rP4Fn=CI+Pr@z4%{#s1#FZ~b-Kr%%QTf03=|bvzxkFaKhP61 zW9Evl=jWdImFhgSB=(~H#y8&E-u=eg+qq)-^pc1BSKGrEUU1B;Qo8tn828@kBPsRQ zeft{Ym#7&dR&f95t0ND~U9r4G1ApBot zj^bg@YZ)L5EXq^{hs*6DvNU&n(Xf7cKgFmk(SYNdO4bQ@mBR~8ZMIR@4{+K~3-Bbh zI>tABmumn4b+vo#6UuTmZhc?3bs@xph%NN!NER=$9$9|3pXs zY=C*oz0Msz9e9rQ?3x&(lClb-;pHYmB8ycQ&h(wi$NEcS0Zaq%S24h6k(kGQ6M24? zrG85unFCZLR(a_)GJU>AocBEf*FEQG7dYlR|C&j%6Set>r2jr7!QCMDDfj8tuoXxqpWk_VLG62G0oXKzrXW_+op;aiqcF>&>XaN>trvu4% zqzB*t6>IDzeF=kMFW&=m@%1b7lQ~vZzXj>A#9&W%xBm=+>I=)JxY9L5&TJwERJakg z*ivF?a=V(UYu$7U6oH#W%K4bVd;8ozl5uKPXh{$v`P4e9WS!M5iluR`J?G#R)AC@ElACq$CU+SX5{g`e3*lksxKBFkHXkx%i;5(B&rZ;W1_>3LE7L;bCd!iH zMHN-Oz=oGM=e%%R1SeJe5P8x1K`nB@;V+FX6DS=3C$OWV`{N|Q$&P-yzPYfzv$Ip5 zsE<#MwZ-4K{+ly~~b2~nJq~Uwxufo65iR|c9wlEeHzat*Z%e8*d z0%tNuWCvqG#neyLlFdONwqdjVvF)NLnS0*`_{MK-M;tz?`*HX`kL6axn`rQc<|}|E5%9DrJMdkW^Be6BG9%0w%+!kxeq}2 zur(Jt z%>Fao9s0>j!wx1crQyG-V4iF5K-U^zohtM8OgL%01+*AoX+?&N{|WKS>jkhj39@Dh zDK@Nz%I8|nKj4{{5EeV<&KRXq;04q6woygmeNz<+F1o(s{S zXJI8?fb&nC$6`Olm3BFwjWGeqp*-4@@ZWur_Sr6rMPd?eX z+nke=YAiec@dL<(kJ`r^EBdm3adpxzz&HgC-Mh!vAg(yc!fw;ufp3Vq=*}7>O!vL% zz(uF`9nf`u3C5XatY67oHNRxhY+H(82k4dGies7?`|wn9pMfuETgq|+D9&MS!yX`d zSHNz^)cA$J^a74QK2|K^-vSt;0pCFz#JW=Y6oeO_U;4v)nhWm zxdv%Q6$h+CxAfY^TL$p1g@e? zG~R<)2EZx^4=Fqzey$^rO!R41JL?KDP+LLQ$XhJ)6rAM{6ANgWqNG?5KLhlQRvJm1sSy3y1U;!o_ykF;Skn7jaBwqvhq2B@sZ zHk%?e4Js4YXMB?gb;}Bl&>Bk0nD!NoeMV03qq%UC zege46x!3>@ni-$x^W8?%=Rv~XeGIgmt^??WuIJ5cmrV@b0&wG6ec|5&_}HBCgy->0 zbopQXmoWu&4E8b(<)sQOX|YlLG3P>XD{p*nzopatU5$cS-uK@sBW2_e%lNU<-UZ#a zJw|)}Wc&chk+H_TZ5=wsxbE@OQ2G~n%@6G3VGJezizwyZ(ZA2j~ttbfP+f1{?Q+W1SG(1LUB$RT`!s zF~8ZTnE!qI&)g42SPSet@$EY0zRwGg@e(jELh$gPp2R=y*K)s?e&OCZdmy{dxmo!8 zFTS*Wcg^=@c=>0O)S+=RRIPoa}bsLUbS-g{a4MNR}`27%5hY>eQj(8a3uN*o8+7v z%!5ezxXgX`x7jEs_Z&qYeJZ~Ew68~Opj4pVIXnd`4b9oY6Zhyu3Fg{&>`}efWlk5o zlyR)_o%NOds_a$rx018BR%~I-+L7EZV07K`Wvj0vB0u{H^wf(4rLP~pGml9A)VFy3 zbVa)SCmO~JQS@F|$q&r+=Lg}EZ5{>?v64LY+(-AS-omC|}Njp)X$4(5^qbwW8>YN~~+qP;T%!*Rr*)Ht>z zb|69B$$<`V@(yn2D>PkTw(aFLOu2^Ylyo|>XSOD#}5GEURPBh3rY@vcEba3<`GJNEsr2Q6T;gDVWgVb|E ziNz%hp)YdP5KaXr#fM*1-3|^(A;tnP0xTA7x4^A2OqIc`BMZoBzT^OerjkFM53A{U zP^*)a77Qn0ScCZ5NiXd1$eu_Oz1k=zmA#xqRKp(A=|G#mwq9|#@dGsUs*PB&!J#NS z_|^bUKsW9~PL>ezvH^9;VR0oG`p3GCi^)}}YT-ESu<{WbdE*ts)xyFCjhj^`uh=*) z$`eNVFLe0QC;5yW#@g}r=+TQhIcp)+hT~E`y#CtT+xOo8o$ci}-`nnKQ%4{#o<7;0 zK0V*Au5uIR`dnV{>3^M^I5`7s(CG_*1&mJ|^cp9dveOr!6^|fXww((wEWAlmvt1ym z?BCvXUq$vffeiU23w{-PTzGydXWW7!v0=S;rW3bL-i#XrusObb`gA-0{Il)i{IUA; zayx(gbo=n9|8@J*fAuf6|Mjo`YI`W2w_bg9`>k)kx4rw#Z*8x<^on>Hhg^u{_qOO^ zHgWKy3mhkTV&a>e{FQwqIBaW`u^Apvu1S6XdNTZQq4ucCmAuISOSw`ZDo;ivfPT>; zHZQsVQbNYifA9t~Vxyz&g*I)#&IUvK(a>!x%^sRR@)PefwAlWKG|shatoIAVS&R3{oN0-X)UjL_*ln*$#V$76aV4n zt^*o^3(eB5@?G{jeh^nY!&9{Q*80lN#5KGr9L6vBA@{k^{Wa-?(Lw8>blvke1Q;zW z1+{tnsWfyqeX7Yz)P>MAt|($ZKI(;vgGsdS^I{EFTS*W{q?uC0m=&i zd^MwQGKk_wNBr|Fj?QjnykSd^yW(|HyhPjMGz85D4|_@#X=BiV6%R6O14-qqUHFr=!Ogxv z62PmJu469F^~3ybRy)@+^3inWZT{qpZ{Qw3A!Lvp@ONb8U*shP*lUO1>x7E@B$;*B zI^*2*;-j5LY8F99nAqT~)?3$2lyY+X63(cqXM)8qL-R&2^S)ebHHC|R_M2;0?F%f? z@@WefG%W|b`^PQe@4Uzp+U^?x{l-IjP?oO*mpS3L4CtVNNY{K3yMv}Twe1MqJ&k8s zZ$9ESzJU?DK({=al`~3*TKQV)12jT!dHT=bR5ziw2#dk}h)({|TXfneiA3?TFhFz% zkntIOVi-sqCCjnoASvrum5#9KFvduY34_Z1X&-wTlwsA8Tvw8113K(@^t&CI!;UQ$)W$MW1K2%mU9TS@eLe_FXc%u4(n~`_&0f2d^to$(>oi``ljBhGunzTZJ9_Esdvn}-pLO}K-M@wlIsQhOeGNhq?NyE zSA1Z~XrQfSGz|JhzuUEcX*$y|H_R)~e7|NdXN9IG@8p#V`~fts^OnoQpcPORSsmxS zthR+bZByQw1BbkZ#s6gKzs%do)A^KF_@)0{S2;8tr>4JcN9O^`qJhk4#%|jEz|4gj z&&n;D^~(Po5}Rk-|Ld_O@Y#d8DmL2&a?xwv(!+7wj};sCyU)FTCN3*X9jgQwT`B+5 za+r4 z-#+}=-*0cc_V#vqqVwAo-%ffZUnN8KuxIx-VhQ?FuYEq1RM&^^2ruZhre7$_ai$g8 zCm^!A-uqAbki27oO)3+s=Ot^&I2hNpV6BF^>sskcZkaxhs!?_BZzIMp1k?8?-2dbg z`g}|2jq(fsq$}aLxm3(xS6t5?J=(tco$qY#z4q4j%a1?WK6~=ie5abvyeL5QIREnk zD|f{C`uc0@O3bju53>1y9%vNE4%aAD6bzBn@|FYyXCinxqjtkX7O5S$z9}h-N}Cf1(fMKlIK z=_uz#2-Y?sX~Q=%T|dOmVo;uvj{47DYl3}N8u8}?5WR;)j4i--=3i)cU>rJv=LOV$ zHe&y3+(d?CG7qSlk9*xz;=BDR@%;ebJTk=B2Q{|=bo$y?^_K6p7%fQNep!}s^@MC- zY-Sv=PS8Mo#J~`LX^a}x{TDE0&3)S~pFy?^Z~*>VH?B5_#?R|27S0QMLIIM^cQ0a+C+#NHV?SGzry;k+fY z$N&EyHn z3_95>oaqjeVbTUqt~h%WCWOi}7GDi|VZx7;89C~yT}&enyWEE)qjc~DaHW7Ua)?uS zbQ~my{n>KxJ*4shnPZ#)l+_+#hK>_l(L>J7ess@%Z{8JXo1(hVbbJ5l5ptT`UuCb` z%qm)+6pfj-_<~OKuvk`T0obs?4`fZCRIAUU3i^1*KyA%mEyPh1y?WeK?9f$+?C#vk!x@sb@@ zgKi~nOe5pEWST&_C`(i55QlV{Eg$jtAUf;;N1N@?2J2cIE%|R+L@H;bilT)@h9(O4dDVU^r&!?u9@>|juH z1|P|&J&0|cdtRp8brc$}-1829+GoJbJATtRHkorL+O*;beQP@6r`^f*({uUz*wgCr z{9-$Q`uX-B{_Ib;fBygYzi*#?{EO}7M`zo+@4UHvO=L2 z1}X8F>n>&WkIogx{;e$!7(v@tCPN{W}nlAOP3JB=b5Wm` zWpmJfob_>0+;lEhd4Z0b z`-n0*2-Lepy0N4BE3HXmjJclNC7Vr21;2Ph6LUcHiYMn#d{U1?Ej+U1dy5H!5=JO@ zDt`nWF7`Tb zU|i#Y{kkQq#Y$Z?cddP+t@4o;7;Rmi5Ug+#%>K;RB{!GSd9;20$*;F3Up(2)PPw_y zyrREEs`-L9?r%x(*V($87`u!+$|c#Ng?GUeZgamEjq8eq_tIQUnM%6!C$0LMvGhMf z-0LTN$sJ$I3}h_U_+Z~D9LeLa_MshTw>2xD+)&0(a=96s8IQ~%Qv%AVUV~T88cP7I zlR7j4)sfrtCISKBA$65Q2*w)1HH_uaK4H$C$Qk2@+~-qp=+mKfVIR%9_TiMDxC62o zd7_2Xd*(X-dkvP+{pYzN8b?uOX17mA#1S>h*~4UiPih?^$1mu)_M+WJD4|Pjm=_zX zz1IZ#L9*dRC%BeMj@kxEgT(e(b=~|82W4eupq>7O1Nw!=xjWM-bixrI%H@a2NW{FY zP?6Xj(Pw^eLp;PS8ei{+N_DNll{}t^Gp!?LTmz;*TJ{*DZV#+wz_z+C(WQQrSjtK9 zH9Q&v;Osv-X{r7%eIr=dZru<$!lsWok)1XG+LqbAv>$3kXO#K+v#&v|w{$XpLZ zW^4dTujz`|Iqfjvd}Mh{TTq0xYzSt#fd170e?Wl07=(Ns!<{!oUVyf)$43g$J0b0{ zXV2Sk*al>+P25Sjv3?K(lEt(?J`^gm_m*EX-VH%z{^ zCD&rB57eih=xlj>0yDo`?D#Q;2i@)10Jm|{i?5QIZ_@}g9XScxPO=Z^@?A0PG3OT>Iq%_<*YJv;6LF(YsHxt*kM zOeTn(_x{DUW21PxU-)Y5W8ppbIQQS^qHTje=X>O^hGGmjp0|{Ik0t%FHnApDTmsX! z#2g*2C{O?wB+DsPsZ88W!i zF%5&3e-pQVv%>=|bCum~f>8`VO>8biO|266LHHG+<+a|u{Xp_=;%;#0FON3YO`lt`5Iny)HU{}q_{0Q1WID31Y_IU5x%LAKPrzxRm-o7eHnT#t{5Qyv^D& zp}6J^V}C`FI-iqR#`v?mXkZia1}wI<{kVWb#>rIp+>aR5t#QP;{c=!Jc0S|;LQ{S( z5ScDj-q3M7n_;|93^T~l9;(Sj-O02Z^534hDp!);vX76D%d#on^# zINr-0>+%Rs+q0)z>cduZO=-VF>%P(&~YmH3*9Q?Hv4bz2YU0z|HJtMzvp6A=eS$=$(pY#q{93o%!uQ|nf109+*`2q@gu#$B@Q!nsdh5wb0M4QNcAwGgJX$Gx@ zwcVX`JfI701O6n(fvgiNl%2OAbbX`DZ|-~Swd_>~F!8ZLCGrk~n>jp>FZ9&I$2c&S zL$y)(GW!=BwP+u@ET87KS7atPm+dvz# z+dkQ#Vy^@3Kl|p;iNbTE=tsO@TKNPu&H!JuMpVHa{Dz|M7=Zo~|J~BQsunu~=J0$1 zM?3LtIa6&})69$moamEwR^x2IE0#&jKsrse9U(jucp2 zQF&Hu68xg~9#%Pf3rv5eJ;+NheQkT`_19IwXC*eQk{fzTIv~!5l$;cUw;cua+hWLE z7n7SWB_-+L548nT)FSiCVCZW4m2WMFZnZ6t^%Gi}GS6j@S*Oe~|8W2e*-lo|9zx-p z)*aY<6<_IdoQ!|AeewAx+ZUgGx}Bb#Z2pQ%`pU-12Igf9u_<#LD5D8|syCj#qS++w z-zvjzorPc?D&=8Da2e+TUt_@Iaj0B1TDc_yANsHdnZ7A5pO7?UD2K*wsOm75zqSz? zgJJ>g4hKLB3r-@0F-o5KxB5vr*NJMtmK{jGO!hd`ArCE-7g?Dk3ve(m^Sr>wJd_lMW>i?KVaWp5T|8J=%8OA zeN11A#?85pf&3mHi9n(R_#uI#|M8EvH(z~Y``~9k({;tkc6OpZ;Cf2(&!0X} zb?4B67htlJ;wGFuz(9!I?do^-O70MIBo^FXb&WcFs;N1eD3dq6TVYDP;CJ2g!!dYy|VYVk4j*)3=8;}kFALS&x=d{`_GQ$foJ^jl=)Mhvu}@Av_J z*eS3s?^CETr=(K@8+k`3lFd3>{$e4~_k&m-MjI75Zm^)R92`h=VNpt z0(&|xZJYEUuj1T*z4oNeJwVg>Tqav2T}a8!hB-9)!|#4b4s<_|BhNOgj2!Q)mpSS6 zS#|3%P3XE0>3{l-=Loi^`$K-VkH$#`{l+Kut{&-ozc;uEI0eby11or6Px`=NH>hlNG zpp0+**zVZyTrT?BR+V`H;Q#m&IrHu`iwy~eB#$5^{%bJp@UAO)B30QS88n+_0*dKl z8a;<4^G3pCyRACIcskWY zqGi%#aL13PApyz#?Mt~SRuq-+E_4`0&2@f&ckwoJPzb^{Wt-hExHV|uZ#4zWb0QY-JJ|<3` zC$&uHkUMTh30RbxPIE^q)nWH*q*QI@vBUO5iuC7_-`}U0hsl zmpX|e;hlHh+ur}(f4Dt-{cVN&-uC3`VmsI7@Qf3#;zJkk%>|avi3PB20Ls*B-WXvE z2^>brkF>gOey$#nL9_fA>llFHD&v+&Z-M*TZ%V5VDN8PIl+r&O(OQzE1G~YXOIQ|R z1?N;E>Fx%P7Y2x<)O4)_d23341)|^fb$=Zu#Tp)Slnv;ku2#1N zVte;n&-KxK0LbVBc*9gWi7Nd|?hQiIJWm+F`a$0hn)nzV%H-W9pGAwx7>J!zj8Fo? zj~|Iw(=BphmH>H*t$8Yy{|nK8yZz|jf=VBS7nr&$45q0PmCDt4kQ_&fi@uKK3bbd` zF1B~M?7!RQ1sH#K*q}q&1z#qEb7pkmr^Y-NI3mxSfUgriD5?Jt@AbklW1gffpfTwB z>_5|KJk3QY@|<(-FW>ilp0SSbJTOtw2Cxn>)?tC(XQGHQQtKvb0ASh3TJ#-@-|K4W zVlW;l23kYBwspB=h{}Hh>|7G+dj8!?X$%NpK~R5XTSWa)yy#PU&(&8EAzR) zY}F(5pJTUA`d$D4R$}TsSL7cjFw>y8yXB+HI%&N8+3t zov&lJj22EI6{GyeZug;NA&v(BXKY6v?VW&`XRaydE0m?PW5BFS89(-5xhd#?&CoDT z$s@C&<%3TfQHyVx<1Xo6#?xntqlhFe@qiY7%kTxRsp|kxfL^xJCu~Bb(;-E-TEsl2 zpDk56>rn7H4-q`pw3>&|fiI07GVmC%+dEU`vXPrR67sb-Uf&+Q{I%kPUUF}UKzYkIioEP6 z)jl*bO)x>Sa~-6j17FbVe~rHYvD2}2I1|Ei&#l1;pmB($^O^S4>?JQQ&bMFx@>kpO zN$ImZ$~C^1xZY#_(NEns%A-Sh(WMWoY%(kK#d&8@XN+Kl+#XmK%em$9A$#XAWl` zql0-RKwb}OLF^G7GL?F~vR-ggZOnh@qyv4TGKxL=-N62<^b<=7%}WK0hw_QNC@2(` zuM_s?56%(9hzA4MhIND9WN1+xxx_&H@dLe3!l45EZh%)YYaP|M0a2PRwD@nCTvOQ> z=rrQsu?}&xE{@{i=Rz!2f418;0Uf_2_k-FH$vEn@y zL#{*)ANyzU=o8-YQf?Wh2{) z@}b3t$wTs7fvjW4IQAG4nkLhe0NRm}8jyJjLVCQ8#D|$^pO9Ebz_bPE_#dXw?`gx^ zYl3t75BufMw28)wA$~HxET?64S*iF-qySpv(JyLeK6I=sCp0rOiLm3&MVn1?u$PR7vlrOXmJ;;WZL^--lSJ3)z<70asA#h(VdYPm z*!3E?_GM_$>xTq_yX1NQ37Qxi;(zvl@{m1u$3&%w;GnYboXdSq6F=wP^h5YlE?P5^ z+P3(bZ>*sX?WBjUeQmpuzX$O0O>=PmWEX2by4P_Ld8)`>XjE5~*s$N^T#Fp9MQXA> z_lpINJmoHkhXuRshTbtlR%E9wesuo>X)=_C2f7t2vBm`9coJm)AsB)fu7Yb~ENN3>70M}Ba9a&q7I*cd;=2oJH5`<(yj zKmOJB`4^vV-~85h{Z;qe7bQPFd$8pL#_Y^RkKefAc)+?#~P=VitH_(bDdz8#&O zibnQ{j#0u(0Q`nOH&DGF6CL>rZ@jbp;Je@1uFfyFzx(?Swog9(wRB_;@!;VL(!qzI z&bEgS9&L{wKUGz}t3B~0Nmc@?@{+)j+damBGSlS#=S;*K8c{LdOb-ajJg1#<1M3Gw z9)HMH&OU+uE84aHMjaOSG2?Bf%vorn!+u%bgl+@8e%9CyFNr>dO&W-;@?-Ude_BDZ zc%X?uWb+d`-VxGY1ZD9Vxp_!VjG~uW$G2jnUiOQRHp0VFoUiBSxd>w*Ht{V1{#jq> zuoVTGgE#jsBLdk>K=sL7=b);D!f;uhZS;g;+|X|tXOfd^)7ZJ2bCe;MY|F=Yp?Q*X zukMNEJdt>@?}{H`WpU^a)47hb=<#HS4*$6RQqgtUB>Nm{d@i5brtX{LztcA#vW5OKzTT$RE~b+lY)< z;pcNI+ZPW!ar^nwvHIa!>x&Y7ri5P^Taw{;#CnXvt5p^>r2bh2@P3JfvW-JVM)2{` zQGElzJ-z|JA_Z~S0a(E+4$%1l+(Ng!+u(6eGLKAQpjBtFSb9OIN!hTLJBZ*2PM7zv zr*9r$PaFP8PtcHi=up)MYqxh^4fX?AbpX%fKZtXK>7FM`n9(jd)`=Wsw!8x#RpWQ- zC8th9?)HrEHo>-faoghArHXa4ZvN z4{!5C&a_oXlPITM@(@WYJff}&jY;053Foy0Ul8C0PGeq)Mapzl=NlpI2@J?E4@DY~ zrNjbdr)Vc3f68cEc(W>VJbq1I`UajB%92YCX%@Z2*NfM%0r>*-_~Ze;l68&udi|~KM?d}t+iP!sLlTa+t0zyk$4?$_Pqmrb zE-$?S;6(#$Kjtg6M8%@rI=uk_iyeGTCnpza!A~#ZHX0n@ifD)j`~{K)yPA*_MLD4*7k$N-a4 zCO$Xc1nBc=0)Tc(Q?G}B4r}*xdEyS_(oiV(ugbgrRhdwG4`_77feLLVL1&VMIalWO zsB+c~<;`RE@*Bv0h?W||YMOCz#>kHjUA*5&-fM5Xx!r&9Wf2ks)<3AALADN7j*QCP z0G=394}BnU#JWT{x;vk72e37LH=*gjBG@h$DxdwhhrJz>?+}{rfFF9tRmELXSCB_p zIRO-{W5?PHzW758PQbjOZ^(}oulf`VKQ;A>pZ$D0*3tyMZ}>S()D*u62}mvxZOIU( zRO6VE@=ooy_lH_1jMBm5#x3E`7+Rsnt5!}mx!)b@&{-;OGd{r~kMV(U_>?2E$1Y(> zNW|o}@U(vHap)RAR{XsC;QZ&YQ#xpt9)CSPIw;#-)zu7bkqNMyHX%?CN{#Jbt3|Ho zR`K;b0-{^(L%-G(NGrb9Av=ajf#ubBFqs=Y*#yXnoEG7#pzR7k2afg#AG$6eQ#>CO{O8^fpLQf@g7ijG;Ualnq~H^Sc5Sq9ef3c+U|GWSs> zqxzv?jr$I|AKeE4>rB>yJp?hY+Z*V%70(vc4*{S&)CbIO7Ih|QIr}lR#%~24 z)**jqlj|w>!wCQyQq!B}SoU+iLLYyu-|Bm&duY;zTGO{0)!9o?&jS?UWV**~%|*}W z=2zML1)_4RL9{lC&v-y;&*MFb4=`Uht z0+^j(qpRp|?zP?cgLAAMXSRzN)btSK6YcPE9`t=n%e%KdIK97}otrdc82}5i zZKE-QL;l-9|8k!;{nGTp(eF04;#d&;2$Ij!eqmX`VV>}y%s&ajC;oYOC&N5`xrcnk z13B)Ka{d&(+UbX*lcVjDZ%>fCdx|sPAV6Qd@%CHWkAC>u+bfFe&wu$#-{pUJpPxKE z-cHU=w_knyE4~QG@)-l@iXWAi-NeMQB{T6Bg7I!7iB0mPPl3J)z0n-tX()CNMRh`- z!vXFj2hc?pywOA0ng_-jK%Y8Sd-IY@nXC8#4xMz4hfn*Scr?%_r19|Bor&D&9`zNF zr9ZbxXFeTWO)4aYY0o_f?mw69fG2lkcg%UY#?7Mh2WXIqKP*)pFV9=)#9rzpuXRdG zg)irbzCV3@tT9nCOvCd9>Fx8Meg3X-ZS$TJgO*UjQ(JhzX>PWk6LMxr=L^AkeAQk=$(!vmS$xZI%M`L6bD`g1f}s z0+v7Kj`M5E~)vYZj;p_zhq2nxFQJoh-~ONX?JzOsrf4GkJTQ zkr2?aOgVVygz*Nvd1;Sr{mCx3p@$~w6tPXT{`9jqw)CS31kw!=N8?407}-sk(MsMo zL~blg3H_=+^9fLkk&ZwXO*Vo2sok+DTG_xSAkgg_Sa4Ol?a@EqCV-_RZ@7pJlpK)h z1x^266u?WDcy4qdfS=JJe(bc3oFw2aIf|d-NdKo{Rw&U2RwL_wg5BsD5v|ym+#mKY6nKhky6~-u~qO_5a*H|J8@v z{nOL!Tkm~y`_{ML+ur%cyW7KuXZ~HW^Q))ZHL>*g@?dd1@ZT36=`7hLD@2ox%#nYC z47+&IfD=>mIacQ9g#u3SIDnx@!G4W;PP*_Q!1IOn%-gxq5el3JurZE@2*2tM#D4h5$B7<37K^*1;mJh_~n&xE1$ut_x=}BKzXg-4Xgc$ z&t8`=5W%?`T<0cV`j1T)ZOBQ;L~w_C`xOE~jlYrL-PgYq4(GplDs!>w&x2k3Nt#_2 zQ*ogGmic45t1WaK022s^Rr&)zh~JLiF!QW)v;Qn!2Y>K+w_jt0@ET#Wo3*K zUkFGaBCQ{6+2MJu2KSAnzzd_#N|uwk4_(U*PwICA_S<%ux4ZZ?3w7ks7#Xp70{bV^ zbfOu(os9SraR0kd`Lw}cy~MBE8DA9#aPT7n9}IhbpBFvQftFD@zWWmt$bk>PGkzTt z*G(C)4ee88`UZ-Lny~Z}cjK_#7(vq{xBY$fYE)!fp4xoF#&y~nz(rtUQvD~o=+U9$ zPP5}nnY?|Q$=44}Lm-uLPH@v#=)YcbBMZ3gzf%OtMsdJGavQn@>IX>p6eIZE7+12d z#6+2G=g%@Z7ltNrU|cM`HNLVohBtnNVqQN1w0RO$Xm>S;V(#1Xp!UnOYurb_BIKC+)7`vi1|-N5>W0LynUeH#_+IWko!T|@xAT! zH{ae)AH4vT0KJS!<=$gNsHlCk@Uj7$jCV0~qyPK7V05>Y6lg)A13F|GG3nHp;%|cC zZ1b*tBj~ZhZ^wmG#!VK_=Hfww0KO z$7cCJCOaq-OBDGVZ;-|HgfJ#EgOmM4RvDPO>;~mT|1svVT!^xN@_`FJ_BLK9I5_{R zoH?$#$F%4m@y$A71sz40%1`qH8GpTQjP}-}y4}@b*xi4su>W-~`?(T@z<;u%#{+!% z#7>4Sw0YqJ`Rq5Ov*O8kYCi#N_N>4?%Yzgl?rGDT6Z=K$IMQr+*sZ#+O_3d(D%L^n z)oAREkQEw8vouC#kk*)3q91*%|K_QFEPF66*M+K!KR0UOR}*Weing=Pi!+rj+2wiZ z^`pjIL5h+g`P_iU)(QCpYxgJPi%%o@`jBhm@}X1ep#`9~T=j{20Y6){&qF=_LBJpS zoIk09SAP?mRLt+|_f*T52;JG^I44=RCvC}bz$Pw0UgtmJNY%|AF*7e{{TAnnlI&)T zppSZAk0yTj-t{nJd-p}`LNE2|haM|kU-Sd;tZBXejqGZ3Kf+6$zDOB}ZO~H7{A;~g zPoV5RjUNH!F2;WNbKkD`ty?t?CY<%$Bj~mOW$ef9JR}GYb@X$O#`8~ld@mttcjGYk z&nQw44oHks?|1{Pzj6F#IPvgQ5sJ`2v-V%=d%H)y=b!Xv%(C7^HYDSwy}*8%H!?!E zk3aDSUm^t5L$9O%)7Av(|6Uu?UqIVf{bLBum~P%D5q{cBy?BV=X(+O)7vf!Egr6i~Z>EwqzEO_wXzGZ&;`P1z`{mp;gKKb~g?Y(!urF}5JRWE%f z+K2G__dI|U9b$2@fpe|h#(vd%zr{7QOT;x+BpK;Lz-S%Z$&@=U8Vfff^*g}h&F4M( z5qkYwBzR&9FERoV9eM6&@eoT>dkiSYA4F-5@HmvGZKl5^ipr7V%)?Qo%ctkt8J|qo zALm=!!~8gXxNWCr+bi$9yS@LTA8ikgPqx4N+n;V9{qmQ-_srJl{)30xi3Y&c^`-W> zIdA%buob!*S+(}5BHh>^9PQY{ISD_p*=^9tcKb6UI_w0g;^y-fV;p(VQkJ~p?flubVoCH^zp;C)7u9ZAnd`wTv9?H-yBGZm z25K;khrQ7FISn-8$=WX(&sz(vI7(wpf5+5oZ60oWjuT&%c_D^)d%vYRmxrux*nDzI z`-yV#X`e&vtzyk5Y$%C}3{eC6y%c-iFm=v3vHWZl?9oMg{2-#Xzq3g>nlB-R4S zU`}%$gO^W>)6nN%Y_rdj?_9Y2B!o-BNk=|`U;Ssn_<;V%Mdy{S$NUW=EJ{N9AvNm~ z3Kgdlz6F4O5-xgbzsZC5vfq4aBu4)IdzCLQ_|{c!3w*M{bKEA$X36)@ zbP%wTiJj>$Y$nBjwq<@;PR!6(cHm;Zt%s36kIgS{Y@5Fs8a`EsLv*U0{#PAe@sY8R z=N!L4sJy6)1%;bkLxD zo@`ymqk_3vo;bi4X8L!0-3DDJ^pF?qS?_?ht6^oZf*rK2oi~5QQH2B?fChZ~8>n2U zz5gwb0Rq13ogeUbzQilg?F}982~&P1YM^LXxK!>N+p(X9&>LYL#pjL83MHrMEA?am zR{i*^*1cGz8Ix)gvh4vkZ38jm+z;6kh8R)#m590v8y?~`W^{8t7Q+k(sp$@HbFGLW4OJX7A zS$0l9S)BP3vl=;3x#qg{z#dYSeNdg)s9oW<81%??{=9H0vF32HzBavZrppa2793>q z!hvZ-%Rp{sA)qxkbc7!9f|kQ64=jK<(vpspi>&?dwdgduLEvX8OgCo8x0oyx+sFq zdN~PTgU*zXKmAN61WAiKYG={oUSS*y$CF z_}t-tT(0`T_?=>y89?{Lp1$jJ6S|&1{%`l6k0mB!l2YM)1Slz<9^};IEAfbuF(EsxP+O0?Ul7^wA-0>b7T{ z(1N}}kv?fU@?HahW8=m#{QQ+jIOqMoqrSPn<^S~ z1&CtCoozmhgMbq)Hk5tHZ5hs}w7bS=Zl+VZJazIF{zfuz_=J_>@l5-|Uw{0ur18lrZk*5@K^b>&>YxE5YMGTj zF?jzEbe$Xq61Fo&;8jsHxq+$Ddc#^u^5+)Up?BXA1BprAG8YI_X9Cg5U{Ara0sFoG zv2FZdTG92`TtUjR2bONzB@}m$ZNGr=NLy9&O>07l)(@G9CyliR5yy3pNh3w}(uQ6S z&4f*<^3tBW92+z@@ZWk-W++1RI#@mqx->y!tm}c;g8Jm$c9=&7c39ldo9i0y!-S$A z@!N7lbHoP}ECYH7bfk&!TLcL8kH;j}M3P_Q)ApJbUnb7iTa;;%Ae$V~)*I8PL7)Ea z7ZpqhUU?|g|3duBW1U3#h7XBS+~y_E8uey%*AcGa=pwplj#>TgXJ_gVGNgwVN3hrWIt zP;sC3r4HQf(`*mira1)UWUOI*8_;n~f0M5;*NoOz`Ox2blV{DM-1x*O`oNX`gZQ*P z;Z=Rii$gyPq<`q~i6XvDz3~eY=VJ`Dyn_x{>^aoiHUV9x|C)vz`GNSEaSGkuk1pTx zx8zM-v#(rhx~k9HLrot<$G;zTh$C6W3o({e{pYVWwy^3q#UZ*{?&J@YoN>Q2^4fow zol^D(W(Ka3=E{~v3A`fO{Go%dnyJ>$K%=CP}~dZ?}*0kXTg8x2yRI7#|19N|#tLw~gY zEJrxPW;m=65Tq#(q$pS>6|@Cnl-M8|J&`?D)vY_7>H7IS&sv$e_c^BkI=s%wotZ1w zoWq;%u-^>{<=EBhkkyus^g;2Fyf>@>6m&n?hNADaA@!bD*a$Cr4ivB4D9SgOg;QK? zvDBcRIGax~;TyC0B2;o9Jk*KXo$c~M?ec}U4R|aH&CRM8@4v9U_~LWh#~**Z{mGyG z+wJ1<)9uYSU*9gCKH1KnUixQGk;of60lS(Dy*|z!9@8hWKd|mMVn+(l0Oa_gI2ff5 z&k-eTb^_llRVgS+`vz-W-BWI2`Ri#x7QJy5KlJbk*|y?fVV(btuw zgqwC3=Zpu`WW4nJ?RBK}vTpdir~DuO;cJZ<#?K*jl4z%II_lSfhnummwj9oTA}M_M z6WQT&qoL~9(K<{w%Uy5q!wQ6uF>626#yF8}?5P5=5#Fpvrc{o-X&WN#OZ*5esPKfJ zu`Y_Me489T)DR0B{pLT!fcm<)AQJ>mC(i9^-uKbk1sL8&$vk}z{g&GGL3Uu?BHhq z<<+@ql7vH^?C-dG>{kDQg9aD;cHSBfEGOJMf!X&yDA*shW3>GxncV%0EGJV#7k@ln zMd-1pv2Hu74FR!({a?z};D3CO>BASdK;wOxWXON>DeivAK>qWy5$Ml(TH?;RlfMXx zud;!*@kR&Bi{BoX;Ne6*DrGAqiBAA~eUC%A$1vl;*AM)!c759R^sC2~{OF%&@Qr`f zdAo@3li-SUw-4FVBCGq`6{p0a{8Gwzl5dPt)_cgL-(y9%5r6)mzjN^8JOA~6^MCwT zqWeuWTLA$P0*}){8E`)M8faCy>%5A_kOANL@gzn~P$mg*hs9Cw?10}=!4M5t<1sgQ zpijb(n?Vw}IYPviP%;j}w&w|JJL}=p&x(gZL8%=~;Z8@hcd5s=tSQ zd-e~#$N);u7(Ye7=nqu;h;7q%zt#BM(|>2 zjSDc2!4H}8KjoYKA12BF(Czw<{!ik^!vT(A@r%q0vNxS-YQKiRnU8+L%TVLJ<}0FD z7ySgY3caW@KKRQ+~vm~Yg> zcck`X`GY?{vGD8?6zm1if8EwZu4F<@YMtOjhG`YdZYc%G~6 zvDM>0Fl7`%K;1Dk)Ot|R$AhNt^%a|%E_vb!z2z&;iQk#=3>fh61^W09#Qyr*__n|M zulu#;n9e zMLBubr{AF)>v4g7sXX75YW)pmM}gmBQgqQ9`rZiRfBOa8;iokOc~c2c`o))F7daij zX>a8>^j~0~uJW-SU++KbX?bP;^zXX`-h%;1 zhqA|I!&u)M%Ku)Mdj7TlJ$}bH>had=o9V^79QVXYL*i5D$=}EOj*sj|PsPhmumin{5-Q9oPpX>Y^I84D_WQrdV!?&gX46?`Z`~>>}WZP%xE&IBDV2(55rG76pW8PiU z&#||4-2hiQ&RAp%>bMEiz&;3_$k-nT2Kfcvah2WAtUof~8S|mz(|X4FQ-k`=3*^#& z(5&;p`oq5-e*u0j33|Wri}|JjYWm{$iKk??t>eO#%1oEhFNf#b!6o~Fwm0^0-17}f z>G!ci`faWZvF{S%>AQ>{>pD2g$-efD{&qm)#S5N&A$ya|H{m%R%YK`8biCbtUi-kW ze095X_g-SdruAIMU>+Eizhk*|%#~PAY^HM8YyWT?OaTV`uBjprKl>LWVlz45eIG#> zfWc~G{u2mzIiWu|SDz4=j%N$+dPDl-ruiG_dgZNqe_W06f*#@c&06fQ@iyms=&{u8 zm!%i|$Q<%({Zv2k`c%$GE}ouk-}|#a+dlf>r@1J?eqMfh+-ZOAz2^4RGVW|VxS_gY zfJDDZtRNRpNZ=T=^r`x-=Dta#pS`F?wQ(LlYT*0@@T+zOxe;5(^br%mh-b&A+d9|y zBlEChY)TV4s*UrZ+_27#Scg(F@X`K&Q_i?2v~4N;jORJNVy_^`ay&-HV!%7;tTYmRa|op zRqIHB<6AivU`pa9JA96jer(%x0)n)SH>w#pk;G0J*n3dg0r>j72_ScWpi8o|23I^a zUHEn$W32TW`uMAL%j1NOHDK+`d*7dR5nW~X;Ln3f<9e9SW2T^Ijs;>FwcyYYPkMi@*rv1=>=mxv? zDL1pg$z=2P7@>1B?j_A*HayUb8y?Us^nHE?t^1{pM?U9uKr=@Vv`)m%FwEmt&sXWh zb+13d*Li>Z;+R7txaju$Q*7D48pqgo{7CS1|Mq?kz06~^&^ODvInT+kEB2WczOp}Z z!9~CI8EX7=U2>tnPS<(-n&+XFm!AFeKWL*rO8j5f2q&-QTIb;mRt50em{QcYwXGO zLiP{f^HUYr>t_3xxyrb%xnnxkD`=-4fa82(SN4>j_>Euar{1T#=x=+B*YR=%%b;CA zJ2oUf!A<$8kNul)z`(oTz(roCjsvjj@9T%NJ@(-TeDs%d&A+w2ULOIxZew?klhGd? z;0i?d562O;A3Ur6RiG@t=Yzm_&GW1eH4ixsKys1w>zVf92aGAMeNxvt$NE6rZ4dJv z{xL49zc~NG)=TL{JpF3hl?M@W=;U;}yn4Ewot)!0=VZ*KApF6oicn8C5d`)9sA&GCOZZZ_CI?Voc5 zj}OCNVuB!iv)|QkW52$M9K0ewT{ag!3raSdpFEwAfMk8G94c!o~LY+vur zT=03v!HK^i&BLq2iMe)o+SiQ5e|REudTgujkMBy<`S#Nve1H4>-~avX!G}NJ-v0EP za!c_N&y~(S;6)BL@Xd1nM5}^Df3-b)qc=AU)t5Yw!TjM`Kl=&w8T4MKYM$}Mx_sd( zvM;5No0rE7fBL7!<4zn4JkIJkMgJu>5DRMmIrj~rCswB8xGtMzfq8Pjg1TZMo1QTq zM8i0Op6ko-Q^tPVE~>F!RDY}0IrG%?lGAbHi)>sIM<#U=Ix_@_tL&3arq$53or2B* z_aFOm+~Yh;;}J{lX#BM6_7nd-;A3-m&?oR7M&)WF;dks&GX{?R!p-q#|BAe^kJS3f zJrKuPa_+GIRX@VdmlAvI6PLEdacq9YkE0>|j~(;}?Ka7NYNvRtIjjEUr!B_)m*YMj zG5(RySYlk{czmdR_vJbFW6u3ROWq76d2-6VUqR!3Z}?Q9-=UMqY~L-vgDWdTr?D^l z=)1%)$5qzR)(;Ri#+k=FaTSDK?bv{9DPr8v&X+*__F)~qm`B9vTyqbZt;c@X0~LIc zlrdxZ>>)+Re8Ml^v#Nav5C6d54_c%w{JF|9g{30CONaKwN5`3`!~oYj1n9P%=ZQCf zZS-^OuT<@iPJdu{B=ueN`kp8((u0D=gPl7?P5RjNDz zAN5+0sKbx-&Idj6R8!S>Hs=0ooa7;*$gLi-iikxSFy36LwOwX#;g-DH+YS~H%8hUT zZiDz6JJO*$vP*wrH`9dH6QF6gz4bR;<$HeRDI3bu2IqtBHvWd_@xp15kqceRBTdsr zAu!}sf2-~|Y7r0rZF*Zy=k0IR2mL->=iT1@mI=^&y)XjcJI(rj|F+zQIo8wGyu`)D zJ?I6Z^z8ZX106Y&vnE}j?TS7ACX(8(v|vkuFy-f7{BU_!&x{{+-|zz6ZipVe4wdT^ zGVv+qi6rrsKEEnPyki4z0oBR}>ReI5ij*NM1=&EM=O$1LEO6iye_s^zKXnzzOeNo0 z%n>F88T_U*f}|~s7DIo#Uu_&rX!2`Uq+g%*J+C|w*bWw5Z7q(Ew)^*<_o4{VxfbOY zUwV1_(wD!wz4_Lsx6`}N`S<-FJbbJJm-s!$B(o3Zl9MqfV6#G5^1;NHG`U#i?)UD; z@SwlODg45jL`)nwTbG-K&&3era8SAAPzzFZbdz(SiobbfRQka@KA}Q)e2DKchX zvj5`ja(nvd9Q}$2F7-UD81mt+L3&?3v*I}PN!kT_R zpU_Hs9;F{VcmSb-Z~4jB8o`r8a$x{zq=d1w?rTG&@v;Ww@_Pm&K!bcu?yZto#W72iLZ%-ico_b22NWsI> zd091v)3kO$bdDDOA}*5dGG(Z+AiA zCE0(}*EzoL|Ij1AU!EE^(tSozxoL_|&N{|Jf5$_0(-kOgS+bh18`t-0f+E4l3kh0vDw^2KHMg)F!hSXC|1@oXb1?>f{Nh+cez-eQS^aKuPC zo?&jHfi>Yz4HC|Q7#r|agMjl#szK^g_?`zoU+}pCZNzKN1&hHRw*lB!LXqWurj2-s zUh6mfJjg2{!PD*wx&X4s%0J1bAFYCBq4h7fktK``MJMYgdSIcRbI2SPwONKT&6PaVLK2PR z2ekaE&r5FiSLXv_he`Bd z9R_4i8z{!U_M!cRCiR>1ZjT+%uX;R27NE^@uL*{nhSr;u>kZ*s!EKf^>rRAihqnEv zt@V;-IdYng`6uu^L_hItd5IUX>exjUW2F78{mJmV$6sWa&CqrFa`rPiOkZ)QpTi%Z z*fN5TebD82mGSCHN=wNZ{tde-ULu2UJ#RNOU(v_LY730t!v%ZRsA<)bA+IOQjfcBn?PCmI5F$_2T9~e9;p=r;`KE^ zXiILa1~Tr!^cvH0Y@6FKf;{>F;$u4M;`0q69Rs*-3eAa*OLvcsw&(BN-FU6}pZ>3Z zyj?tcy1n}HE26*j>$>sM5A-TOJUK<7MNmvZmfq}Nvi8PQi<{ z6nbpOX3rVNxo`-D#{QD~zW{la**_|LG=&j{G-tn&@gjs|J8HB!r9BK&i}fOl`&VlB zO?@@v5^G>0pPP#uTHz$Ne$%S2-6}?0vvwS>w)@Y$xPA3YU*10bnNMv${l(AJbZ0v~ zJ&{d^+mX&ih%)iJIOm3Vt^?aP*~>iY>$=>Gw3V2QNd%TYV%@Utx>0#iBUFCdM)iyL zgB4GIkeAPy%M}OUlHRAS;vF*4R3Xw)T|i7L5T;R5mE%3Kv5P)E@bwh<64%5Tx{@be z*s(stU3S>EjAuXaNL_YupMWrPJv$Ro49eUyOdF549Ixy*poT-oBYq*%a&Sb+H+x0x z*WP0?GI}nVp0Te6cY1iDhd&6wF&6sfvQX;RjD5)E13VCsRXpu$i8Q6`GgZtVYJeOa z)cHf0#QIGz%xBy%&_+c$O}3-NYb)ocy=@Op0BV<$-Rw#5Je{qXGS z$A{ni)^B}R7(W1j7#78G5M85c6+q*vzH+KxqGMxoJ;XD!A$F6{t54(Dh3AN^U+9 zu%9Ep3ig>!UG2V^-!waM1gD(AGw?jYi*P0*{s_ZV%LUiJpHjiC$A0V0^eA?Cn zh12jm6VYNsqeHq1lRq0B(Ha$A0R8aI4R_%yemv<1tuG!ZWt{W^X-bGyPXBqK5?nI> z7!NEaY=}KBu&!+EaYDrh+vv|`gvCz_BMUn2(QKx~%l#o%n!VT}#{NNrFQBsKcs#19 zf3rqHn=E`@2=nwDmR(#t(IP3o5BXOA#ddzFaV-8bEe97`tdF%prR_`y-ZQmb%GS?) z_TB9ZU;4`S;!Ce>=jRu`@$VZG)ramEhlSY5Pp?DiH=B=HrjtR2v^hqgL<=2wyI?wK z$V-0l@(mdd(1)nTNwu-b%Ysh=U*DAHB*gtio+rZCc=OGQ!-Kn2wD7|qUG#^F84KD> z9%X*o4Q~Xwk$~xG_;9`hRy5hWt`z@v`!D=Wd_Df{zi+;HopUW2%%`bv;R7Tt3MN@PYWRz&@rKDynf%Je~2-(@P&rY>c86oI#SbgeN6$M zL@1va06vTnp3FuS&S~C&p^hzn7^Uicp?Ub*$B&*zV?LU@baz{L16B5#Ou6d4{{tKg zTBy_BV^{UIv5tR6s118|`pfkGNba>4OGE=1v}J?m8~S(pL~Uk?{0R$YCDR{TTXEC$hxbx}BpxG|tfjzUz&P z6N05}OkCQPi4SpeJt3DL!$tu99QzQP{fG$6i$9E=9Aj3#=DyZ{b^aF<86zA|h#hUh zscsJYDOh~7eDxnEx9HO;d5Mke_yJw&;6eFyrX0Ht54Za-yt2Lg>T7%rAY%ksDCEPw zP&RXN(rCAVwA>EnZASd;F+&1Z^J$C?Qn&3V;>&uaqWJ{w1S68W*tq=!%=jhSU%x6h z%j#Q|NB;rBv48HHX)swVW0NxdNIcbUI{}r4uaI2${D2D<+We}{&HgJtociFS57kYV z+mYJ*%}e6lssvfL7lh~30@CR{TQ*9T?RhJ!uq<|{tTB}bsVSH3i!b;AZM2D2{?Lmd znCoYWmK@qsw@%{Vaf|HO!FcX-S!9{UbhweaUu$fV)`f8Nfs9VR@rAg6@Rp+tza=04 z=Q$zG^q*`|ZacJp@oMX47|CPnOxP;v$l(o4ra>kd>s!$|097SZjt<<^v{Jheb-~RK zS9mB@JPI6#mLpKj@<3UZWzrJ=kP~j<vA|xxj|v$x91+Z2B0P?8oS>JSbpN`6&!# z`k`=MoAAH-hJGjWVMRDUJYd;sGSBoYd6kJlO63&st94Uj(Ed8XALTi2dePB8{ie)f z<2h#V1kG_*7E~> zN<|)m=rXUr(Hj{iCjoXTeHGe))IWLplW)YOZ{i>x@TmwnZ@^w?67ygJ89xEID2vYR z#Cf&>V@#EQ9f!!C`X)r)&JI<1qYb|{|E4)Pd#*}hW%XS|q&$k_+ zjttW)hFlBL;ltNY+$y)Yy5_!;TzC@;E7NDkk^Vy$ZSsXV4vBS}ATatkd9Kk=&h-^= zCbRrzB&Xk0QZYK4wsOq@XP~6ie*~s@dT&#^ptK3>pYZL6%awKk8_9!rx%w|MjK0Q4 zeU6=3_V`mgsrt)1z8(p0$Ax0cTJ1@`fBmw@LaK#R?qjlejj_w!7Jo-w`vERDWTrVcR{sIwu z0OpPGd}#`m%)uZ3@Tc3q|M!2sogClY-gxb$?fB?;JGlG2?K--5&({+#9zT{O$&$_3 zkM@2&IAdkrcvAA%&G-hFeF?{aid$VDG`Xtw6@iBIOZvFb{+QQaoR5Z14cL?1wh(hl z2R!Vn*|l@*jgFL);7w>1M^J^MdERi$*l=!t@DE=#_B?HfJMBE+ak(AbJKCOq?X~T_ zcfTYro^3z<*-y8}PoHiV=TH4MGiZ1KfNQY)#yE1hW=wu^T5sN|<3Dm8w;mhVMeZWD zX$+{PZ#?>jE;ja?lVmGANkoDIGSgmEf|`G9+68xxlp_ZY`qfx#cJ5#y1BAP`==qnZ zgF7c0gUr9W32j-?(c^$*HnrPkZ0#=6_^}%6fsS}H)#U#MR|fHlk2Y%)IWdZ#!l7Fwzn0#Z#MZR5`3;u%Ru4khZuu+y#L;!JaBl6j z5##aGqV>V~p9nmMS-5Dz7bUtAyQ~==7g|4ltNl>&+(+WC>*dI1jll8p$hOO^@xlONn!NvEU%`U74`Egme+`4apW75l`P zIhFJY8vu}L#DraT3*qMLloe$p`&i7gLtAyUid}77Xbl2=82+3SBsGynFy)RhL)FnhZiWG z9vhRt3-GlJl@=OLh+ARuG}u0(V4V!AY8bl6I|0PjyMrnhA3k8nP|@@mJb}V7SPj&r zaP&t*(tT?{2TW;@pFG$h~F!%=Y~}AWuq8(T{#?dArblcRt7EE@W|# zbT6vKqq2uZ)f?gqvgosY{asHkskJMa{Li>X5E@xjrcaV?&VTP0V( zN4{T$sQ-+AqXZz55l8zBt7P>f2oq=r!vch#!l1+P%UHz}$BZ?U3&6j^{iyXZ z(tn6dZh6Q}&GcLtyNp2ZMYQgD0Ynb3SMh#aLGo{=sE%L6KToC!MZal5fZ?C@jw${YM?Sl_K)W+q|zrmE(Z?XBnU*gP$p63Ly zkAr;<9*zn9&MV*WiGEN6v4JLZYH=*oyo`VQ4GuXRm}sJ`HeMOS>v%#(1NM}kwGrW9 z#G=o!Mp?C6nDlaEo{Olt5Kl~a!mNKo47)g(T!`keHtXkSXR53G@X7Yz=Re*4#ee^g zw?F&SKh}BZ<@V;QulVfb1*waw@N*?=xk(Ye=u9lvHT zJ?-AKvk@&fdB4(ekSPyO(>1t3pFLiS;g;jy2(O_1G2j09Q4pq$p9DA4cx^AZX7MU_ zkDWH#?HG*X!G!D;pqu(bS28S{M0us# zh0Oa2HoucI61-wY?$anyS-)>k)p{{tkBw6Xdo_XStAzwq+w zO0Q_#YhEipzUb`zs96}?%8(s=*jlMuYM(| zSAG>c7xCC%dYl?qr__fvegJnmfr=M}uN z+fQZaVD1Xm<462-NlCOJG58!z1agk=&`a?{0DUs%)tosrSGsA-_3&VCtPqA-8t(tla=(|>8A z#jVLb)~GYj>iiZ5#n;ECxt|T}9ZM{ZieX1<;=Iu%X8z zzLDDo1S_*29VuvvcF<95zV6qYcLxVhX?ZHixY!s(pFd>KL}sJHRPswV*IxqZ*Ac7Y zozT86_B%t43&k5e{c^o8fF768x6%ba^IfOMBuS2b0iM*i#{+qlGUKW`sGSMlm*Nl!NIi2Np~_5~8_s;CxP z9srnruKF90+xuF`Oxxo_el%~}+pufTl(#}R_u4w?_aU-c2W1jBGlFZ~l@tFP^_RUh zUk0>4&CiV?Xj2n(spDPsW&h&8>JJ|?kV!w&$B3i6thazyPHa(T6ZB#GN)4{r1(IYl zetNFk@BRIgh~u+`85JnGJ>Hi1guLuFPI;|y3|H&1PH2z|zz-qi&<3EPzT&iYuL(Z0 zDjq(-M2v3=>yNQ;a(KMmJ3ZYlv?u$)55K?t^gsS|d;6_-Hhv+2Yl)W^oOkmjq$AUF zJ=V5!+*R_PNxJgQV)?Ah?#n**K<24<;2tfG!^q=D&NY-OY;(Q}OgGn0gXt$w&5c?s z7bMPr_43ex3bH21Yk9-I{LV8b{AC|RD{)+PY^^|L_obqt{nO#m@y4sW5BS+uX#BRZ zZF~9ESGLc6=JVUh@zM6dhaX7>Hwp7Pc3$nxZ_g`e;#J%gAU_-+dAym)0_>>%*$b$h zH)vp2#v&lYzle=ruI1cZ02BOoy_ z1x@-_PJb{!1YEy%8#>^zjmopB7DMn<5z9C7(QNx^R`OeP>pD(p5S@n4@icX_={)yZP-dqU$(Fp? zK+SHV97e|;GJ|7_eA{j+B+Ep#e5QY@j4b11hhN{XIQSs}IN&!QcBzSeIm$S- zzmkh6Y|h+-NPLHPeth`;`~UfO#hf<)WHgnq{S=rRT|N+`2^Ehfp2}!X@>5X4QDqR2 zBI!_}lcB2kgoFWL9T3jN2ABstJ_s5)H==5=Sg&XxGfDw+!Au2B+j2<@bQKvpd@3WJ z14jDP*C8e#kjTb&Cm-}#xU4L>LHY2zl4-jRz(6W~!^|yCkPBjs7hr?yqQD3G zr55AJgC{ZCF~M=8y=kj~g||>+a~+1{<#&8WchP`y$|MLZIm!nORSFGVZh`=VeIGp0 z&C9~iB!vcFI)#6jAfUX~1H=*A8LKS9DNCZPoe60^xo5!<{b*Ce2!(`$v!C>9+1MKe z_o<0hgw9M{c%@wNb0d8C)y8H_dJ3_a)Y$I}54HLli0DtwiL0~F|~gg1F?-Vv0_2LBOmZBrb3&C7@!j#R^bRK zysURbk$&<<+4V9-zzW%~m*My^$8sg?x}dBAZ6fmGFWdTo22Ri}F0@%ZJC_OW(e3Qx zhufe0!T-Gd(|`Qi+fV<)54Pv;oo?^E{r2|x_r9>b@#dS`F`HBA;X#25#Va@a^Q54E z^G<&V=b()I2AMyhoerZkVm3&c-PIMb|NutC*-L?`qP#%$o|Eu-NXmFNEC0@zazVvZjSi^^k4Vgn)H7gAhT1BJ4<2_Q_h|& zF$NM_VbsiL+1TwiQ6I1y0WIUA#D$;$p2&!thL+W~>-a<2Fmu~sHs;^*?T?5JzmF^P z4My|CmE0K7;A81zt~zdv86#9?Z6sHAgB>$fUk;km6GeYDptI&6<++dfYw{1r$=6gi z*e|Uuy}p6g$Gp&5bZJo5zO)Qr$z9~5Q$x$6kCgEnydImu`eMz9QPY&)H^wjXug^;Y zD!Zx=?62{g@c>NufBpU|uq`g=uVgO}JDO$z?{o2!L|=A)w(aPMjj0Eh`K6jK^bot5 ze9;4(Hx`}5HEc4YK1I{<@!lqQ{@mx#fg3Sm{_?50N!{Z=_DvgAl%L)U8nBaNDk9r% zh$_+6eJGg6AKv&R3)$dEAGYRXIW$S^eaZ0!ygn>OdiZU4|EadOUUFS499lDFnEfRb z%GkFaot|zlz5144$N9_`k)(r*VsTD@T-BwJ+@dfWz&HE12i2zonnto?i*z`iB=v>o z8w3F5se3myzT)OOsq4U!yU0hUhIXL%do43r>sa$n726&AfE$i~Gwngowv%Iy@$4IsjU8guUOc@Lf95eUFuJgTei=IH zQ}$^1E-Cao<24EfG!C9wc{tuF+T9Xp1J4uIO#*CEq~$7}YXUQdeHyvmJ* zMZEayi*u&I9~z*G4Owe)LD}NDU=~sp>2I}LMf%Tl_PL0hsEZby&jmF?e9TeeLY*2C z+lKh=Sl}P$&ycGKnb%F%i3fx-KziI4sDsZ98gJz44}Y0J;C{nK0$?>MTfY8$QCaiV z>lyK>HQ5*OZKvpHs~o*iiVXn|MaM!3xyyK%>*fT+fcAAWJAth9pXFLFx=6%*x9+0A zaRG-j9<$^ma5`>KG!z4Q0+u}te2x>T^(*!Hm)ZPbdy(y%^UyWTNI>y>?Sdxw*lPg- zd~JUkx-GWz?OW?JNz;+flw)7Fm80l%;vz6{3_9~DW!lrfRX5c5aXhEoT^n~H{kp_# zCr!RpMwfeD4}aZJorJHkgr#X$rn0YPupfYa^cxrRu|sXFbzbl0@yBmw@ffeVik_RG z3R0Ig%`59#;zxbNPl?<~CU61WM3${ove)rLy=7Rp;~V{qUlTb&%O^fax8jPo779*9 ze9T;E+ThZze33^zee_8X+5R@H{Eh$NiCndQqKxMtbucm355vSd3tI@~^f@M!zvKmFtF z_|EC}$}2B#Cx<$2GCuF#-Ol;#ajE6ot+vf76+_9VZ>Zu%eC5WJ@Elt;2Ki|xVq~)b za)>>?@nu#`65$l|59Sm0pG7nFb;w~&p^j(-^9BHAi1Ij6+q1*;yh=Kz$~M;uTCP&| zNh*6t=);C4_fTaF$y|?H;f{~_p8u6{j#rXP{P@GIe~= zC$#=g9e&1u_rT%_;H;X!aDwG;;HOWrroe;0k&h^+04PZT_JfksqHeTOg~XAUiNifvy7rWdDa{y+0A%GEN=+l zo7=t#4UO+Zgx+=%YyIV%7IyLL*u)$=JUDj$xL>3*doh*C*{2v($5-L~ZE^Zo2KmOc zL1oEQ&$ykpdGLTp?jcBy5{BAr<|*;>7!yL}zESTlRqFhKhj_3Tx$wg7@c@-$7JmRt zg~0J={eO01v3WQ^RLI6}+b0WokU~tp7e+2J?i*O9`q)49u3l^X_MLj!c*)uVXb!;U zhZaPG{`?ei)-QLA>K!9|LM?&G+P zafANq8)6GEWM!PD1oEf|E*U3y1r@T?hK`U5D9K9+&REO%@y&Y?jGv0Y7qz=zm7wv@ zi^wqS^cP=|gn{V>rHYqLK~J^gn~;myzydDg ztk~KL)-oVo$!M98f=q&lT?Cy{+WEA@X{`jTIYFrbve)j`#hy`aKL^Mr*}h}C031SA z84=A#r;hdwfG<|1kCkOdeIo%HFoayVMjP5d676dO$gxE^{N|+&OuqQC^dD?=oZ}9@ zb2k`2-3Pbk4HXl zYMG(u%zqxAOXlpxoyk>2$O3{*T=uPh6w3&0o_`F z96$I)=VJA{7`$g-QPJXRI^meF=#GBug@`8VD5=zoCso!8>rm$IqL4P^L4uq8t$q+# z>4mPwljX}a9tyBtWR(53Q)rH9Zqj)&q||yh$p6-%6g?)L^ zy^BlV$rsEweFW|IU&|WyCrG3JeF7HDPM+5Vrwtj?rXDYWJi+YJPTuPFeYqCDS`Q}K zv;*E@_kdmf-G9@MrI#3UDNhhw-B~9{TI8WYmFbejIX+3SFST?%ub#Yw0*m^(kHI9 zcxT-f-qqNU_R_DD@7Rc7pz}Q#dIUT6Hbh@T{A4Wd;v2@pCFUbhM@Q(pyL`I8ugWro zjEYL&h$~6u8e@*bpa(};KszcOL}&^pSogAl;x`n(AHeRi1X;&~vEJ;+2+-CSkP{!# zj`6U{S#?%fcG)!Tfqb&BamC5@1Y|)VI^@Hv0*3@HMqFq=b8>ord*$`Fw$taHvsC{+ zH>@5cE}1VS0R?1QH_Ap!pvT7WWi_bm1>h%Crg88q3HIU_SaHy=dWX>6{6;wd{0j^l zOAdTW8MiC!$sbVXf1jiuQFb`GcCF7vG_TcMNb?QF97{g<`OmkD^ULj6``WzWO7tby za}_@9bU*(Uu-YTpRMQ810ReeRvX6^D`w8$kKqc0OGFb zmHu_txLtZC^G@FLt#yL;^+xa&hw5UNp=;8yzu?nyX4{Zw-r&>CWtSU@#$_Yj9@cIX z$aY`wFB$`#Eyy>@C@=H_Vt)b~z-AIR7T}334d|Y7yhaY$6CaXh+~wJ^5%7c%3>%`K ze3viaIav76?e9MWb6-w5ZOJnpX8mSB_I^9{>-g(50CcghVLuK{6CRq<-ng8L?*r%W zLx=h{*F)r!7C%W5Bk^cCT;!f{2&6t_T#H}wHGZ*U+7i1{&;A>|-9B^QXu0M?%Wr+T zem>`4c4gcw@2?9(tspi8(e`xSI=c>C`!jJs^Ep#{%{$TU+P&)+kp4^GLu$Fq zC(jA;(4oRbK1xp>9Dpegzz`!v zGqBtb^Y|AnzijKs6aTtVp6k!bppcASAMJ~C8Q?uZ=Drpymfd(8Ph1x!-qH@yJJ!W!wR7y_|TAbIfg z{Rdk^H=s`$eHRJnMyHbIm&Z#t2s}rzp9C*vkoa?5b4hf5kVe*9uT1o}?QIl^c2O<; zpQ`9FPd0OJhA(1TkCNMv4-agtmjaIs=N)U-PhtlVy5W^=_~#f9nT>QGuks4 zO!@3sCn>4RjdTiv_@`3zGc|OXgH`0q3_;@~ak3@A6Hz=qK~UL>3r$KuM!?e3;0Q3~ z)cw?HldJ%4#SdQ?Axl|{PwWP$lhbJ`zyn!6oHgyVivwWFzL-dR@iCa7ODesPn%o~! z*&8T;y!{k?kp!AC19s}4-=K4DdlCUFifh1-FiqFO*%Y+TVKmE6n& zAuKC>$S`%c>VuQ|F;{p3*8V^riCG*(fokwO`l)Noh!(kd60_R<&xaVjP^AqT&q=Fj zc`QO`oePp}fX;-9~mkSKXjKY8z!Ps)KcUl*e5}PL)qm(E`MBA3 ztSMk05N01pRaVMLE>sh`KTNCkoZx}$<5e+~3@56@a@b%*a_L7;PK=bgKPmUIcNjt5 zDUdmjO!^p1_rKCI|AyWHEd%`0=JDSkJY?m=q)mOs0#F$`&p(0vi;k6S#(TC8nT};n zn4*(h*^T-;MGm`z^jFK=h0K$EUSN0rC4MTY7*Wl)59LRvOY!*|1L{wG8b`r>AUp!G zfj+e@YW6rrHa7J5fL2+JgAp%)abkMUUgzdl$}w1p8-PWQ+^i>AS5~kb*)rG3==S=i9XMX{eA+c)W*%g=G6Vf^OT%-TT{XZ@#@9-F+?$ zwGWRLD9r7e0m1su&QQsOvfbl|Hu+ug?sHPWHi<4F$^3JozOIQ8m>feqzGBC2Y8=I$ zf39@bDoy=~uaWxKc(=Vmn`G`^X5K)(Q$x#Xob=mH%Kq5Rn{LgOx-akC;Tukg58nnM zR=gIKH$;8-(TCgF(>HK4j!y}&A@NUrj0u-{;6O4oZXxkEaA~NR zun*+eRc0!cZ|WzHdB}vPjLo?LD;f)7JPRZvdq{hwjPZaR1ONZ;d6~;IS4M_vJtN`F23y?+qM6+eJb|x*S({HZ;qH1 z!6fW~Kp!HoJ|F)g+iwD}O=eQ86D)X$hr35iZjA`hD4}2RiRiLL$y%#UXL(PpmX^oK zn6JO+&rKu~2PpPK%GERCJ+z;mcYdpi$}-F`kiQ;ZZZ}%(kuO7>XtM9n``k?N>(0xn@M%X0+<-;^_Mk^xXBz`62fH(bN{iNfOD+CdpxvE$@dK;${uO@!!K23 zn^(Os)msTH%#d>~2w6X|6E^0Vn~St6hEQbelpm%kKiq~cVB#8LF92QIye?-D6@)&# zve|tH9SNPZ%Ks8;TReZNj|AbR4&-y@f#x?Jez$i5QbvB{spckduG$_$@88V=?J5J< z^%qDCk)gJ|>u2eS*4y|MB5SlQ`g2@rp7c*tH4irElcLKM;^zv}=h$I8EsGCyq;7ql zB!ShhhVX(9=!{Qxg>UJcOLcA4dKCDuLkQI@{!yAZCm$NG;%%KBZv8qb(Uyv8Or zlxfPLvJpzO;fMN9vRAt^FVNGpomjgfFL3~3Uk)6UmC1hkJbkm+ZxQfA&mMwY?WQwp zLDg+O)w` zSA45(!K5IBs`ayE1d?07%WDpw60vZ~{PVHf6nv;%rh4B*g2kIfB^{9ICdbsY;d2dl z)WDl|A&1EQ<_e@mDLLHue0Hw2NAcwb?7=Z_U(m7WL?+#e4= zey}|~yVRbTYsMP2d_&mBzr6AF=%{`tU2VRp&bY3ayKnnC20k!<4mBF;?P?wfn2C)z z*YiLO*^}uHxk4o&muBLp40|P@$F5^bf0`^fD?o8oNPou~)7SJagb6EfE zwYkY3-x#wZ)%-`6=bBV_?#s8_%ucdi{Ri+Pc@pv{l`es02r@qK-$<{`{1HrkFaGkJ zo5C`|dsl3ZzW4`D#}Ca3D=gbC&$*FL#TtY|2Yp}_uKS^2ERwUt;8Ih49D`6#l6*f1 zBFflHydp1sCmzs?931iSCt@N?@oOAoh%f!Y+vA6|j?4JOr|`If)jW(~;rfS!eYxylZu!mC zZcn_SA%6JmhnAIiAcgqT3@tc_rBQgvDf_UUb-;Drz;eMG9WsvXuU1fKvbRAD5(Vg1 z0`E4*4ZkNIT66%>S@_G#A0NK|_22rg@bBQTLtsyUmO!ISbj$3B>X z15pZm2s!nGcE2}j0jomiVHO~ljZ1*8fs~}Zp@gEmWx?9I!0qr%yH@a^|7$@D8Uh8B ztL`TbDQ952yyrOc0zl=?2~o}x3y>o|*moV?gKD#07b?deCMKl}mg_)VLN^}(XyYn# z4o!SO&aSMKuUNp#P1BeMU~z+HHwF_Lzsg)eagnIVgQo3c5l~(Kc^XHPE-eO!vIqT| zz!6s2=tX^&`i*>BN4*c^loF)gNS|b)8fUcQodmj03Wfz`AJdBV^N|` z8HnG+z!$tmN}VMoa%%p;sDJR5t&G^wBNhdA$Od2{WxQ!=8n}p>i=grP_W@LOFYsW& zNvQkCj%&m@fQgf3Tv%A~+|dT;TwTUN=-3D0!;`zlo!-5-edQ}(+1~r|S5$s(d;H|_ z_R)ifzOj9Dbh4dsu$CVB{(^;AdN>$UXVFJKWvu1k$KPdLG~obbi#<3bL!a>l2Q*IQ zQ~FQa#6MgxueiJz64xGqonw5OgGOS$}K z9Qy+O0-=5K|rLC&v`qTat!cVle*tP6@_7?KbZX0-jEZXst7E4^+#YmIba*@<4fCP8yI7$ z254iSbRG8nw?ICT>HS2>ZT-qT>Z&^17qIV>&!p`XmC**MOq>S!5L}Qt51Z}6?zn0g z>rstq`Ysf`ekNPzU*Ei2*RpF+$Bn6PKfevI0~?l3-GM!w(0U9I=`AItN*Uo!9?8C6D&eP}&vp0RU54wGjR*rLrJY-62 z#N&-PT}8K@!y}(kQ&hO2IkzKc^)KXeDT*00F)H> zjmChoNJs%ZB#rl6Ui7s5GEG>bN}QV}aY>BvLpUX`QQ)zYrs8KUZpgRD(MK&r*yCd? z=YIoNZy%76h&2`+(q|dQ;9I0eRs29UuKPhlcO|l+A|vJ`39XAv@J0su@ePIG5ke~| z;5dmGoBM{o`w;>7rN*W2c>BTc29F_`+dlKb zW8}Wb2#&Zq0Fr~Brc3N8bK@L4REzysC%*XJdD>waPgGU2$`~Zn^XpF!I*-$YC z*5jv1^ucka&z(fhr}@-sn!ws#U^}Y}BnT-NJ%FtATiTk(vc)3{lZ1Dh;vO)8ac!%y zRZcZHl4s5T5vkO~EE74y;RwWzz&YzZr&{-oKPh(MLJ;$c{}wmop~8 z&Fi1`ugnh3&11PUp_mb5e2D>Bo>16zX17G zEHcVQ#s+R+8Kw;lsfRBBe{$=rV)BxUzu;g)r;s6`->4Jg{6WNi*2GjLqt|ceumRxN z_bO+EL2G}=4|OE4g3D~-^Z_3?F1hK)H&uO(E*;!F<2vj8le^p9yT{wdk3Qc1{Cj`u zpA3EOxqGJNjrP3Oo*VzD<#pEhKyP4|Zwl+rH}Kelah_p~Ww*-$m<3*!+0VGPz=&Vg z&+=^GZ{QeElBG{H{sJo&Ro$`D|J9s(aReOphQFIcmE)j}KVt|%U-oktQ=D;Pd{P(d{Jb!ZviIL8>MM#%Hv7zO5^@>)kyPVRdNrqu@+<+Yp`+)c!}F2T{x zKG6MDJia;09+#@*9vsE4*3s*jZM#%mF~bwDXXQ6K{H=G&<^c;|bg{?W1f4tDx4wpP51PCa_VgKWMC$Q*WmD4wC0zaD=zR+x@nBPGN7wY~QJ zg-gHf!+vOP_YGz4|K&bJ`h$nJ{qw_;<#CPI`sV>6$3~8zUz$LR_RM-?3o#&G>4S`K z&IRj+xYs86DLVZ0LkH5q-q|n1*B&5R9MHzUjZj4S1+&BFtt6FqN_@K}`%;9Iw-gtGui zQ;$GHRq@9+PYdz8Ka0n26M?q;X&e2f7TMin;X>=X)~O#KzW@H;{;o>zSObA|0Z1S` zqpK~FJK>-jDC?t*E5woq9E3pwxGSBapa8E7(O<{wTR=3}#p2*VA**zBnapMK6jS5V zjjmC)c|%_Y*@S7AffEjF^#MrwRorp!8gygl8Jt6o<(6#I5UMlu zokdAv8Q*550cxjRAU?5;;oy=)pv&LDu={+3yQO&21LiHM@)@4^He&$fn=PW_pRy_P zhi!o8C}kx`tplH>Bj0uaVv9ooc{1KX zg%UP$;&COpe5)N-S;*OFo?mJcq(2_8`_!jDz5UH^d}I5}=iasOhmRksMcneASBO4& zBHDTqSgdaAINBHq9=<}Qvim~16KDBCOrVDb8SuESMIJ|qwSC5E@XCsAfag06=|3*Y zsUTFvjFyfcAY=Js4Q68pWULVX>H~EYV=o8ki|t&S!?W`zepS?yM~}82{qQfhfAZh` zx7)w@H~(gP?d2D?uYU21+gHE#we7XnUh_@yM~@#VhOvW(4Tv*lD)9CXJ9FT&a@og* zjGTF+GGl4+(f%@4B!gFAsbU&-H99t@E>L$%1~HTjt>KO)bvubF&N{%wZF9~WBx8%4hXLISDfDEwK0YC9E4oJRkA{)A%ywZ@R zL}S^1U8^vnnR!w~`lIkE$H$uix{d+9TXRHTgWl`WhtGpGg+liWJ*^KGWwZa9a%*dMpr+m>kpKYT|)53ENEMD!<$o{5vRUhQuC>mFE zJ3!0SKVznH;Z|t*E1lO3Vs`^QbWjc-MsxG0|9bzjn&VPHTQw}^%y=K`D z&<5=2xTGCeS z&OdFS%2()pJcO3>IoBbe5t_(X#rXMF0Olwec-37om5S6=aUVjib&Y-l?J>vx$W1Q8 zWqy?-MNsX=JJHt1$ZlNWlg2W@cxUXe|Aua^Pt>_E+9&zJ$1dz+KgorH7hZa4d*zK! zX>UUxC}w6Cz4yBCDv6gjU=65P1<=dAqU#>*W%%! zP0DRrBjM3t@;IbkJY3Y{yux_phw_~D$&}+GEAJ9CZ z?=osEzi7!wQD3^N4RBGxbi6(GAjE2Z-!?juRvY~(diY8n{q2U>OmemTF|Nz1Q8i8I za-p~Dq8r!mWV1S%r5?JWujMb$_(0aL_)RA(ehe13PsZJ~ph2?j$J*BA#1QBlm}Tgc zTwU9lQyxgy0N$P(opyEaLhCYZ`{eNgzQ%mTZ?zYoylY3d1&=4bT#`102y3wa=TE?AN!C8u6-=o zH4YsS5kTj9@q`bWN^TAf(DSeTZ+h|-VhU~54=PXD;oAq}+CJr8d+Q5a`{XUF6ZROd z@fz9G;fo(bpXJ7OS5+8of-a}O#OHV;qv>KF3oa^ie3T8~m|KvbQ^~e0`*+r6?{DCP z-Zz&;n;~JD16YH+4psjVH`^*=6}`^W>}y%E4U)t+^|6LlO!Pl!W}Jv0dD^KP4=?_J z8X#LNUjRwR9NFeCK9yU~v;GFWp>+VrhM&0T)4>E!MVW2V*ztOZ9D>BnqWt_PayYX3 z+r5kvZa9%&9c=s@=*j8ncK7gTd-U+p_LtxP!S<^UKiFP-?NvV*aEG_79%*027p#uh zQ-~LPweR5ggr*Cvn^@=@QniN0u=rNSmYp|2X!*17R0U>U^4nl-j}FJq!E$ZJ5}d-f z+gmF8SVWn3FZ0zWB=P z+h;%fneFiSc>CbP2iwC(kG89e^X=}*srJd*ja{BQS(*5IEwe4oWy>flmv6}v4_c`E zI9WD%k8-UHHS?17r(_xz8vj|&u09W6WJHrh$Ug7SvTj%}Xs>I~YagVtuLW!TbQF>A z`E7xM%G#fN>s{s2pJe3_nKT(UQ-#&|#v3+NbN%OnYqJ_V{AA)K1|s+uhqxsx4-Ol^>?9EYmkpS4hFvRFM4nIq!1deY5O>e z-6~>_ngjC2av>J5pOigB5J&uA0I`9mm>hrRx$Fct&Sxuret1BCsuhizMdSMrCAznmNaS(EU1*i{1&Z=juOqI(TE9xF^5kL4g>MHNV4-GNR7CiOup zVA?nzdV?7kSom0y=@8usyrJ+5SY(@GXyTSZJ~zO@-H`ZjPRDr!tKOYRgJNi9k(16V z9`h-y^3Ei8UUHhy`7XbrM#D9*p1J@tyGYb*zFaN=POY>d48(a?eA9Q}l5o?m?g71J z$=$g0V<7liiwukx3?|RTN#tjYy4oQTWZWc=>=M#B{YTq?*zIvo3E8k!pYTbJJ@E03 z3ELyKp3qrTmGeO?h0e`*HMkxr(Zvajg&Dt|z`F!ax{bFDLJb;$HynX%7@=n&As4L- z<)Rr>*0Pj6N2o)m6!Y=7cvVAU$56|PY9u7E$WU$lR0JY4zUnQjkXaJwH#UP3?u{pH z^2tv#smiNTSk!&vR%I5ole;I|u{NFeU$|$xVSepXZ*G74Z~fivbDw)}J32Yq9zQwT z9-loi55MKlI?d};I0)N0$4$KG=g&HY7LG-id+gZbr=8VC?63z4^HD;^sH$CqE#&Dh z$I2xxP=zM0b091_WKeco(TkGM;j;+rBUIi3&>)*RVCQ6%3)pOsgx2`wAo=9!qr4*O z>0^nyvpsqI@%ATw_;0u0{eS$^?b+G+_G@4J^7gIw-`~FQ?iaS_pS!=E3HR~GA8)+g z%TM*m79I#77HnKtm+5C8P^8-z6d42pA8has$KC(2y_&6`a98js2*P_azZC^v=cNv~tT@yGcBf~ce@vG!{U@|Ub2a;k>WVK{8c6osG zu6-&*>5SgKU@_YRZCBR)6+*XX_BDPWGCIah(~SFauS58!-9YPEbEiQ!^U$RKsJBlI z(?{r1&f28<@Vn*h+cWXd)3z@^kZ}+l$Z#f@{QF>8$W)n;+_=?F@|*ST!#WOu==C^~ zyfxhnv;Tl5llBz;N+^as@#x#NzX|s9*?;~l1018`kNqwR3V%cY1w^Y)NQewnmPM0t zuiXN(`=ptoTj=)VN1M_O@echi%$qv<@~+c1 zKsRKu|CKI>*DCQ{F=K3kzh>{u4Q=KP_5zc(=i{sgcIV%2PU`*&_@o>>7ujTVuVPLN zQ%d;f6HV3Z7{DAN?|S;*dmQ|2I%gv6i!1(5PTl(eVgjGXipq==$k%yDPQ6gD8%4eY zyZ%iuhZxXtoeOYm)qfe%b@E(nodf9u;-3>>ZJ@;WYbcKVX59k0*hSN#y`VA48TF1o7jYxnkzh=-{{%ABv5UE4 zROPgX=K9dU2c?qlGD-2H-RFw@%Wm}M0ZM6>UMPG%gpwf_tAuBmNFMgMj!X$~w=70@ zZ_D^b0CKE9aRb=PYZjO%_GRy%^Ul&Qz<$88q1B%x51rTw55MI<`z8P__!}tsREJ(?k&jMg z<`F(Pe3bnVxivYqItB#5K)rYb)-ORGE=15Wm|GAuPw3;f;$@pkK+$weIuGpX3VX}7 zb83Tt{^Oh*T*n0Z>Oa+!EWk8QCTm$h^Uiuh%%U#u6pLLuIc|{aL-tWl!oJzx;3iw+ zMDL$ap>83{7)0VrPaEJJ?Sd&wAFMA!2l8&Rzvt9-=xa{1eV2CE*V3^g>pE?>_ql7J z<#au{*ALUAzUtf6Q+V|wc$#kJ>$)PLP|y##*1oB`zUYt6uJ1y|`LruzZqoKQ?b{9j+O^yOZu;kJz{EG7 zGg;O8oyqR~{byibAJ?7M9L~UMgM2CZ`|)p}-m*=veYV%W8W)!5gIe*b=o=*F4KDbe zH_)O>M<>^5)4y=;&psZRW&v>M_wiJ~oQ2);(4jGxK|+(HsvLp^z~wqzK)TT97z7!6 zU2?LEl&xkk3g&uY$e>Emc4jUjo19}$j&VEyfWFp=0`ST`h6T=AShA2Nt^nGR$7}EX z`s_n=3Fq;7$y$1Pe6rm;J=z{T_-OmypZ{;$#rfIx+Uu_=EEj&0C~pmI z242bShiUm2FE_==GgR9Dwy5-Yv(dN-V1kveUG_&i*!k^%?C$5VmZTCkx`%e!)`WcJ z-51ZP5?wkjtO+`$0<46MKK#TYu#2L)nCS?}dL>Scx`es3hLL`H2FW zkZqeuX@@YrP*iV9FE@+%wmjttCeSCp?asA)_McdE;!p3ZrHgCv=jZ1xpPZ;~h3ERd z|LhaIm|8YjN5zV_9fP#tuqGCHN@t=D>iWxntr&rIJYf_{0zu)GdVZEu?D?~gZokQ9-HjJ;t!OASM{rB zdBcUjS+88CAV#?8@OTkFKS#n%Y_EgzQ({Ggk4OwX-ETB{lq4{|sccygGB+==N;_~N z{6$8A`yN~Ehw$jdu6lrpvLzs!zZ#F|Gd)fEBVPQJN5{xrCvYF(9BU4Ayb(co_Y3~W zcWGhT)~jwi5n=`q5SeA>o43&2KURv@#Dj9eCuz0 zSD1IKdAI|#!4e9ZJPihnGGMckTPsjaX=zesIlv7vA_p46&miT5D0FsgFSWdunHyzm z3it+(pnu!N9f1sDVhnBiKvs3i)d7o{2?Pu%TTuy6P83HSZP2$?fei%%j%B$NOV&KG z3QajwzH}=3eK4(LwCVwqCm|_1bXkWm6|0-T-!gX~I^nkljnKFc2B3eaMHXeziPvVK zlZD5=YoQ|-^D4t*e90SJhgQVtsfLt$BW{sQXjL6Xfz=J)G~ zD^>`@S2Een3BDIli%d)t@2SI|CFYJMub;+e>>|(xRU2ja2?AhojBv&|cv%E((OVAk zONQmiQQKLzEs8@2p;3G!c`*rc&_mhzT8?Eb(#X@ll_{5D#M(J^HM8#IDJEad_Ot=ppJn$?2 zzx|!x*uL@g-`HMy>5c8-<0pO+lGjQR-LnfV?j#v)K9pk)Cu``Jq24?QlS?tY#yrsz_B1RB4vR#fz~BD$GctuV>VRk zlagv8#et!qEZM(;1lGke6L7RMMLUJJo z`{;YefJWrjc+e*}A6TpAHpZ0W3Y~I~2bJ*I7|B$D-{$Pi1;%=;Vu$pziEvDbIfK>C zh*z|oT$dciDRSdWXe^ILytWSl(6kjVP%Sq=`Z+R0M}>zv#7jT9uc{1Cb{~(_wx@3c zyV{y|h4^7ZM}h_!{H-u`?aCSIl&eJr^&{N!g>m3DzWg%L&)t#5wgY~K{vEs zatx|R z9QfI`-+#A4lMQy(i4{=^5w>lr0H5a`>qB*t`yL2VN5{sjOY>h_FER zRZCiQI!Wh{U9iIwV=JC;=UC9~S@jlqBLUD=F38+#A0rRDz4tJij%`+1ZAIgo{4KIy zA&Y3^peg#rhcV{CEosUM-<%tr@4+GyV0>j9La*FD?a~1Ib8Z)+JE>vXa&68D1_pbm z7}2rR_-RqZZn1gajyy1d4B;K;IsZBz>!;&2l%?Gl(4lP|W?IG!@kK{uz~B9=lIV2& zwH!BFvL!i;T{Fgc_GK#CY(9!Y`X;%A(WK1&wrud`S8f{F?QEtv{I@^KtlKd-3%*w&z}bB@K?3J~+SB zu{fr8ydsaf&2l4hquMimw8I|%t=d&oi`z9Od0#f&v4{rMb}< zp<>Q6kVQFqeeNbZ{a_q*HLBV6^Sb-&N5SA5Xt%oF5LSJN7S}BcI`FO&3)PD^{mdMs zl>`l52fA!(>}gqL2MLN1H@k@KRFo^pa^uV>0C$C-RJ<05O$??Z28{919#+(mXJ4)n zC4~C`D#uNVB?BB#nd5|Q!#;tVJXS-`u{>QC{|JSB5@E{=52@QT?s3WMm=zJ*j7yJr z>|$=9S|7e?qTFLevcPw8-%Eu3ExJJGyMWbsi$`<>NqO>FZaTta%zC@s2BddZ9t@zG z3y)RXnbS{<;fvm@& zYd^a!u^>-8h(+r7WLn8_E?=CvJuw6N0g9>y+J|8SkaF}RXUb@t!K~D=DQ%sA9omBR zJhb%$DAN{soiZ*u7L~~OaGzD#i9WMYHzvy{KO-)EmIw1i!S!G8hStrPuQ61BobCh0 zfsX^`Pn?(|?fZUCJ0)K<*SJLtmvCs0+~i|E04kcNX;ksG4C}=PH5$qI=~(T8^$Oqd z40$nsSZMqFz2ltz10tyPB`@LhpNsk! z$9~F2ns%P^?*eVB%*+SH(7G#b#2-6KWvBzZxWS^)OT3}e-1C}fU+53{>>E3h!!es! zR0xKC@%LOzcM!Y8(jmYX`qh0?@m&qs0pyTL&~okyFqUSYte|!jqr-LSa9pt!JlHiM zi&E8%;zR2WaS&f5VSMcMw_aTaM-qp9rzBxQWglq4Y9s!s^GTrn=26?oz8?C`Zx`i; z9M=z@U7l|T#|PW#$+5^Uw;%uLC)*Ex_@nK)=U?1jdHKcd;Oa=cnrGrUKhGQ`9Z3*< zc0p{_KK$a7%zeA$9%m{$2un^gHkaMb%tGOlA#rnQNnN5X$zy1>!%AG4<+8!|n|v%# z)hr4a(j+64w9^2NSAQRQJQHX7CViDL=dtRcpbzZeVG7nsOFG<+?%&&9fBWt2&DUO4 zY#(nQKYqA9etNcDUYu`-itX|7iN8$8+6rV$!b^${Z0zX0?d=jc%%SJVR{#M2 z^hrcPR1b93c)})ZrU?@L`@=E9Uj2FO8BHsV5XwKT+pGuaSE1Ce#PCAF29;~lqGdiw z2KS`=Fai2OA02H+)VU5}xt3zTd)$~#{mj_%PnaVQdzAPI1OHTl^(Th#^+O!k>HAd@ z5>GVXZ*s$y(@^uTvlQXV;}L#Lna~+8(&hzk31xjw>k3@)d=7kr!1RGpps?b zqbhuHlLle1P(@kV!52_=%vA63L6buFkd+n#kz*Sx1xnWtH0cBT^@h+}3`Sa)igpJ1Zl%#_sQU!4T^HRI!*UcV0!6p)Z|{G^7&uutf!-ypAv66YrAe z{Hhi>%1ZQOau!H*mHj=NSn$b4zbS{O;);BlLxkMeN*#_7zaHlUWS4O0bmFgc_}d>% zY(2OKa1Q z_Mtx9Eb4^bbT|VYO~i((W1=b~IUlnivW2 zjxaJ`sY9X<_l{`UX2HT(%#hFga^F>MJ`R}bFl3oOAEv<^vkE+IZ3va{f}$aPL_a4{ z2NnFxf$r;uY_dYr$uy8HxP~rIeC71_4qfLfZns=2469wV@wX4gSDJMf7jRStKoi*| zf6ymZUOxpbGn1rcPd&}p$Cs{q<0?68@&)o;Gj_l+zoJRQyM;zS1|qA zhl2danu=qv)wXEIp0-8q|XVsU+?bDSVB z8isMfVGsS8I9ZB$M_ia=u0st&@pmo=eN~7&W5EMJM|Cy0pU}}XCT{-pBe(m9%>su* z@k?~urugikD7xyw!gJh|kJbS}0R0^^ z#AGhU4u5HZ1Rb)Z*Q(500-R@@3_FA~7Cj_G=$fxc4Qm;I8y~T^T+iGtUI2V_rcRE2 z(m2k#-=TvD{_4vf|7AB#kq>}p{bami2X$od$$2I^Kty(Y?sL+nZCSLZ-YIkqvA5-f z7W|YITLZL%?{O7~j6h_|?UGmhRkrTZ?nCfthW_(*mfI8ukL#6a>T1}puK@lgJG;E*@zJFnI#IYhZj=btcmSbyQ91Lk zd4XN~0osSU>c_Dt_c&M}^x41d%H9QHoq3}*Sur&Ih@tG&C+t0DoV-T6MJYh|+s0w% zj(wDsw|wDPTN3Mxj~%G-X4@%C7UEoI9}5YxoFGLmIGACfq5?XCDl_~cgO!@tc9D^T z-#s6n(2}NI&Yy8zbG9;ejb9-3_H${o|5=~M8jMIE(HPGmn;@BhVi{^;@c!t*a~N21}TG@7~jX$yq&>%FDkVJX0d^mogO??IvonhZ~L5O(Yp2^H^h?EcCc?#kY}L zh3d=$ZXmZjVU>`Hi~Dwdp|Xl(fkvg|*(dv|-|%{R8!UU|jWfgfq#e0Xx=o93)d zC-oqJ#SKtJ2J|skDll>shfxBrA4;Bo^gH_9kPq9cQtVBhej!ac!1PNXIouHKYu!$= ziI}&}qBK~);*h*(T0e8nRpG^yFDnw;=#Qk`xsh_{DWzWHNwypx^ZWLik1BIc0-XXJ znVvSRMLB=)`c-p*KIl2l5RnEXn$9KoWpDOQX&XQjKJWmf7<|oLdL(J+@V>JB!e*8G zI5)&0M@DkP7eSEWpZ#fG=fyC_a<9d8oW^K?en$p&7$@2U+Hy?-4SyvcTCdmeAxd;m z+P_NCFJ0J4IAou+ODrc$S|hG>?svgnx{ha#tLV^g8swvgKt(_6Q|?tLG43IEOZ3@Q zgD{uOr|?>~c=Mo1#t%8&39swge}c<>3mS9=qWQ`nG3$FX%)7kVSs=Ndi;NG)1B)0Z zuon$6z%H7V`3n{NrBXQ`0>wQ)BO!Uh5l3t~)EZ3L{a|4;0=0N?qp zym=?lSE>V~KCrO@UW&=8=$wUsiN~bzfs1kxwPP(q(5yRfC*MHwlh@K~afW?rbwG(! zJS;bP>s9ax6cbCq>8IKq5XtRMhiAtc3#GBJ3~FTaYDJa3uFzzD)zxc=HV68wTcLnW z=?>pkFl~(8-MmIe*WLL%F$&E{YA{*T?n0(e^jU=`0P^7Fi9coFWH{Z{9e*2W80eG1 zhd}%xUlJK53DdX{@DU_9ba^6J^Qx-`Z-R0S5>wG!)x@^FiDff*gV_8*?T8`(EwVZsz4_z>G(!1+k^U7O1(Es$I77ky;SCVXxS=k)c(k1=^) zfy4dFF+%pc?-&C@5H74@1H}z^`%eFnYfcgeC^#;dumC<{!MNkeE&R#BnfO3V{FwhL zvo3i6z>=soP5$@Zfk z{l)fw`9J@kw(ouK-)*1y)N9+Xf9)&Vd+)usz5Mb^jxVP0)?hZ>Y$S;nW9;X3*LE%kFmF&aHavP5qsl~S5|0s&9V*Nr zQALRB$z(wcpiofPWoxQ9QMQ;Hbc`kv@GKoKak7EHSGu}fWA{huipu1uK^wB3C(j|2IF?s7r*EF z2)^5v-29||Lbl>#+u?7ym7vdmSPkGsXC4KPTq!4AA!<82W@{{+Ggn3L{%<*?klzSi z50uB#ieDxBEt6Oq8qm3)0LQV`&S87&FZ=gl#=U%EVXD6C@sss{wM6tbl5s{YJYAMB z{iDm2M!^frZ(bz@Svj=krQQ!Pmi*y&%dO;Cg9o|~z_aHNZN~s?CV{FY4V%kvAG@lo zn${prtVpaOy!{Cotf|oG69FZcIx$L494Ie7D!Ds^ZGC>2py+3;7x0PJnvg4e(b2ya zMS{P5WX*zxZ<=_m%Q!NxiScumuS4M&Y{Zee|G{GeeM1>Jl!4+Q*ME+=)qi6j%r^;; zr+CJZB#WZF0(chtUH?PZx>m@#65C1^JnrR6EoboW>zv^JW1nriJK0cLl>&+BEkOK+ z?dSz2!|Vg(EP_U-uDUyhf}L1}KhS&yYyTPI!;SL~8c6Zm6s-b8AuX7=Fxki2hSXkye z`n;z>zvbM(@3tzBLXY`Q;7QnFr^7d(`6@?$+wN30taL2OE+KEqa`;%cJZ_X=)x6xaMmnfJL|AfnIcOy>nT{xQDQPxP^bMm>HpeoVzj;oIm8aqlBMmsB1k0!mN}nWC8jJI&$KMY|;BcUSuFvK5z`rH;|A?KUjl& zl_9h-_YZjRJ8P|v^@s8Ye-#_Z;P(CkdyyW4EDIl5r=IyICbIVsT6OGD0Uf?*M-f}g zw*|b;iNxoD#04Ds%Emm*s3IyFLgd7SLufNT<~6L0<)S4M{~E>EkG;f>7LsgWT#>Uj z7B7H0cYW0xaxCW@hP5+gB6}Nf!yw}e{X(#QBULQvX5^?mE|8}G5KXC3tD4G-<~E1;CaOwCGGZ&~;}9_{BRC^<93?BP*S9R$vY@D$|Y+$#)^L zl0suL^JLsipYVlwM;ZCafymwm;#>V@+1Lvm@tgEF$uqEi!)_iR{`#1@_!e1Bld?sz z|D|ppfXXYLmTg?~nyIRkYy2qnngis<6neqeT$K&`_V-%lxo29AhvJN413WkTuQJz} zyjJ3`cs!or@A*}}L%OG%wLw%)s~-XEcVJy>-o$Z2&$-Z}i#}vM3+>Q<1Au3F`t-cCVNlWBR#&R>ODcm8@hYkHe|fF{^}Jq`&;3>r9%9c!8+|2 zwB{A?mHlDlA&VVU_Sy7`c+}>b`qFX0TN`Ybnz*j5{fqagIA9x~<6q-fGj4L)&Q9|hDI%lLw=s_MfTe#nPB(954lMDLiuqhsD7550=cjwoXu(!1^sa|H5b zNJQHRLC)Pt+%tpJaTIt|+8rU4`)$SOR5fqtRXcM{d*JM@#i>RgkJvY(w)+-vK*Wb1 z(x-~dWh`SPb=iyW&;cfAG*($6LuWI}hm0A=Lb1KN5WQTayP7e_IaTOWP^>lL=!8iWpm*vVYShD`%A2f+wbfWK4>mNUp%vcrK0pke8&_@$syGV=!(eZ`NYO$^! zPqtHqOU+MW;5P*b$Jlird5lr7XiFaVWZX|Q;jeu1xdZ}LXD%FREO@-=kJx*h&@Tsv zKR*1%*MIZ7lJHJ<&V>DX0Rt69v{}gwbXX!Crt8iGDDEfXB^n2tB97XYB0-7rg5;_X5Z#$c3Y`c#71ZdY%fd^5F+Mc8pg=iv?P?B?XvhILC&l1G*T*Xl zV55*JNVA{?4a6Q)`z~xNV}tla z$AWszr9$=Q2YtjbIsCMSuA$pzQ`u!VFZ_0(tgC1&x6;mTg#5I0g zn=<@*A1OyPaKPe#Y-GUWu`|IunaP%z1bL%G5l+k>WPVRG#G@ z{?Ig7E(a&sipVV7qSV2Gww_OpH3=KUz=jRrl|oOS@KXnzwCKb^=np^m#r7}%hyP*w zC;#|&w^#0;Zts8N>)Thq^p)-Hw?Dld%YQb4ycyu}<0soW4+4}OymH2Cv;JAie2~IE zC2uMKh`pv&*$^J=g z{Ye2P;X{vD)BstUo6gcxL<8#s$;3+T@}^w@`zM|Qc*aALbvngg%DZhw8^ATJG7YL; z!x;)ke8s-fM1I>By$!7&pPOgeG-V8^{yC9;dS^$IxNLm$ydQ<7|9jK`jVcf$T_Vgk@&m~+vM zJ*KKcf&RZR%@k<#pK!s;MFT<4`S>Z@mUyLvtuq-z zZn}WiAABLNI%)D0&^*{CzWH|Pr#$wPjD-9?-qH5zo1fZ_pSv%A(8Gn0vK=bTb6W}C z{+GhlUK^3)n)ir6wYx`Fn}s24?@4efz{vnu+d%uS|8Oy1$HW`E@N)&(SSX= zhJ9J++2Jbyf1`*MiO*f%b#|QZ8)%ab-^5XLwa@+Vm%q@S_p$kNflK{IUeIgOl$*B9 zok04vDhTsbuVT;4z-~XZ4ESpnW(o10S?faK&-) zc{8>s$D^oVyb&MO`CIg&u12ocznCmK;vuy6;^u)^Iqfz`|3ccX(H}s{J~lhF5HdfK z=e%BgrgAZID0iYxLzVkmXVJsH9}~L2WW5TGR`CEHzU52Kov?waE7`I^XD?L1nv5@i zBo44ir|reA(yiclTy)6EMC0p5=w<$g#d_Af0lG|ktkF5A{*0?Qxr_|axSz#OjxB8o zc5o&{*9>=B++s z_AK&dvHLH4^V*3sLCItM7Sj0`d^1nGCy=3KfKOXwtoR-9xQo7SYu+5kckw`cUT7u5 zb?oyZLtAK?*5#lLCZ(VcMZ;*WX*Dg-^8oXDZ?Lv^UXpfT-VnIn2CP0Lezw3h^0cKr zeI*$EJbeq){&Q*H&0~kx+|s$qU;7Ve8ro<1W*|h49{*O?bEtVU))K$MS4 z=`=~v7mRDvBue`CH9`kzZOW(XY zR@~u*$9sV~1|Zg9a#E!ZjMC~)`-y)LxIc|{ZWf5^^OK8~q|COEC|L8@}kPcI#6EL}H@o>*2B!dScIm7yelS8zA63+_KQ6@yvb%AC$yt zTZDF-%kWDM`!h>meL0>^SQ=p(>H!|U{EG5TX{f6BtIQRLleKFGZ zv?#|6ey*oWk>{juh6`K1G7eY^48jo`Uw;q|EF!XZLk}3{HZszW{CWIp9r2o>_Vhva z&B*CB$378f@HK~^jhz$vJ`(-rZ~mhfyJU!E)O@iRL-%F+7kX$8_<0Qh@;r|`Hqkpl zl=?(2eTYD2D{o|e<~f+REd;!#=>LK_O5Ehj7&jG*2JDZG86)lnY*H-gdDdndi%j}O zZ5>bW_EU6N1P~|250G&Ye{45DBSAfJ;|&16@m&di2USisP{mLUba2%M47{vFoBK%Z z@Bx&m*r7(=1l-Jmh9KbX94zpWr#`KFK0p_{DEIv0>)s|C2|L{F4Y5muHI8!|f0M z;19Qd^pF0p+fRP@{q2kIzO{Yp+wX6$zxKxV!gJ4WPoF;aTN1G5$=R7?vY{j%IS8B_ z9ZRF+p2p8>xs^hxRK3L% z*v1kNSsk}dC7g^g&~*WP#3Fpb(|_@!Xr^UR zO@4JOaVAt^FT*;Ts^=01|FOnE2V>SQ^9#RNrY7{!s$5F?+sBCMxruPsr3+m2CsxRg zEu@OOae+~HLQkg#D6qHuxBkU`o$y*3yw=}xq9gWG7S773=OTR}0&UZk)=!~*!_{LR zJo-S#aZ^f1>mT;RIQB0AYp^|x2?WDm?czy97JsqVxczYwT-#}ht#bwIa-nkeL&}iL zAfXQ}7ie2bF5|TITegzq`6L#3ht3HeW0jhxlN&|`fQ85AwTvRL zoyK85{Q%B{At4!uJw&zrBnj-B%IV0gg|wq$XIz{Ye4a>6j@O zk1yiL7r*|naCh(D+g^L)?d|Zif2XgGcO}d7j@F`{9A!bZC$9Af~BKy<%aapB?J2fzBjKXa9DEb+Dwkt=XL7WU_)zd^11nU3pT zKRgDTKDm?sIL8r<>g;=SKGyajS1^1vS?wFE*@9t9M>3fYNm*}5Efbw3#kAODyX%kn=RXO5=J5j{Kz03%m@wuG zuhkdJjgL{-K!zNRA>s~>6NgAwU+@{o8aDF=#3A4Ork769U<3Pg#=pO0VLlO~6Etk7 z((%CG_Ak3QCE*-}b9A3ui|7!0b$&=27tdIWdd-xTrm=WwjT5@=_|tdh zPwbG(1Na;>eLc#3Cq2lL-aLR2@OZ1^d`2DKBSZdh+{4d3-2nPySJ^4L*u?mQK)S(M zCUAq00>*~NEO9n7fdDMX@*+EJeQuhuhY`?+HaK|X2lLO2k!^4r{ET5_C^6>+z97q< z!*R#>>Qi?zJmK{mu(;^!`A5Pwz%f0Nb>On-IBv8}NYo0~rKaSxXLO9o5pDx78Ovt+dFF2`oqD(`VH>y-H; zm-N(IhL0cE*7POM{S2=C6IfT%sib0JTfVy~?J2iyfz~;I=0oPWtg=3#gp04K1JsAT z%2Q@c9ID^A8Q~WD;`YuxF3JYu!HHMN8gzWR!?ztp#u~t%-4$K*(-$cl3-6K}y8k>k zqVJ~uTcO7RbiC=(K1*--cKa`LO};8wzR@k0JhJ9ENFFz7Pyd+IF)n(6&-eQHNSm7= z)Mv6AVfZh$^@bGJa#<%+)=%ffm6 z9AbBeY-vYC$nlfjBXo|l+Xc{9AzcMQrNMt4+<(N=G)>oK68txV&%u1o1x@H@(OhQ-Qo4E!u8x|y+zac)5qrKZ2$*~^TElf z^r)YB$ob@Cd-YRqZJ+tH&U22Bw#V8>KRJ8q9Sh=5{Ij{aN$D{l-CT>uraV{|tE`h_ zYxx1dpZLd~tPA$c;322=HOFgNsOH?GfGT5_@#a^N^A=Wh8sDH!xZ@|ZKBg0A!H8q& z_ISl?{qah4zMbzcvaw(=Km@*#OC3|I4qz`j`rGP!3*8U7NDlI#MLyS9vgeU}^b-m~ z1K^X^2S31}w)j`;8Agg%7CHuOKK*4r-&Cdz8;~J-RXmKDk6d42-)e-2h&{4ZqbfyS z%U$(2xd27Y_X&KD!}bc}^-P7b$#_H_Gv-rg3?i?>B^jd8AFBd9PEz1k&Lc`6?i-1) zUdF#^V-Io4J<0GwglK-Y0-u=Un2((_Fz>G}%4W$G6+)$hxMmC_765HCf#-{mZ5R59 zm&csy9+MbpA^4;}KX8IC>R0JLN4Mnk8taFXB!_Xqy_dryjSb|XPr>7%nIp!l&rS3% zA@0}hTuH|->4(Siup6;uf{M1=u@$dlAP@)A^8kSeGsm$DKg$orlw(ldHV{dUFRo#~ z#+UaqZb)0Frj=EQb$?=~%0E8*`ZvGxUA4bsDxidVLc&{hC!os%7h`7KCQQK7PlU9a zRl0ma>x!z~dCvFhusQc(+5jpYB|LS=6+}+7Xk1P4x;5(R6#P}m2X`>tt_e=r9pr{+ z^8(%Wf>s^Yrt3Nt^F!(uR?2Hnj`pEzw=$73CFBpNMqKUIr__&7zy@de4p z0&WwB+nWX+Y}W*E-U(ayi%pixM0E^oWpd(c+KCU83?)=~Lvjm*uiH@sbbn+V%@iG2 zmdT(=6GG!%p|0FTpNP_cd>00URFg<$A2^w)k{L<)^o0?ZC?_Y2PgPjJHu}SXQhV$S zGYK2W8>$XHHEgn7EXY6w7)uv6uPFuTQv4KwGLQWjXHG95u2q|LB12JBayv%?#gnY{MLWAz5o7iY|p>& zqF+`2%U}M|-(-LK^lUpml^|`7(Q(Z0937r$;lvN&Wg|DrdAcdTi!YRP;=$rclEyLz zcHb12@$|BjKC%|5U97&zB3h&*bvgRXFT(tZSBrS!1C9RdcyP#v#`eHx3}aD?I-e}c zBKNDZ)N(F=IXFIf^hD?!#CfIDFMjc}?YIB$|LyiCfAojjYp=bq{mrla+V;hF-`ie& z`DMrT?CH7mAv*^dAFz=vDcFTcXBTIFC_=quIv{OWC|jM2Ub(y8SbL(FYCdz2S8Ou= zZMI_yE&wL|&k&0^;h=S0av*Jb5%|KH{RF{bQb1n_?Y83YKFEQQaR?!xy6E}CW+FQQ z-pFXIZIdK?VJ=u7bOj!}LAw7;UuhnMGyeTeX2!f?Ibm! zHn4p=J_5UTnnbYTx(;MiQZG5(*`;@yl|svdGtm0G9(l7q1nlMmokH6)=^A$Hldhrd z?--_i#IyAjG=9eWdi-dcdiDfdgZ8&!+GjcXYhBlE2dDkT4f_7t`YV1@Z~K!vqusIw z-G2W6WB+wta%VhijE!pOnkyh0w=35AV-EVTqdfBvn|M?S@w=QW^jn^o)|AXF@Uoz4 z68;lk+C+;u*=kA=pgUZ&N`wsYGDKbNu0w&W(V#@1xsI|KGpM@`8IHDw=BMs90rYKK zV#3K81=&qiAEtMFp($?G8{6Odp=tT^lNhAk1^>;uz(vW#ADWO6LrPxuQ{YlJQPChR z`=8h-jD_Un#k#pLu6}Wy;9ExMDPzA{AguOkD{yQoo&c1)^5c7~(a2GM^|7m2trAR& zvXF*q6oB?v>9Uc?5=fuzE&qa-j=F#yP%eLcVaQ6MS7t%9VRO+s=hzYbc@4P;>$p9A zHnY^1Amz59?XEGO`A2*3xtSSm&#h7mgukHlc%Nb)(}ui0Z!#<_fc2ThBMPBZ#EuM; z$Ts2HUxAy7Zzf{7)Qdd+Sfs&T;VVLxbinx;e1h~%j?L`Zyf4G1*6ICK`tbDh`R(;j zzrEez0RREt-?jQ7jCsfyLS~so-u*M3ti0;jW!SGLP8BVQu70vSXgZ~RfSH89DbeWu ztGN(j3JX93Z2{wBU&fB!h1LKaeMB6Ci7tU-vrpfRyU>0^?-FA}vV8;8B!|9rKdXO*$+|)z@$od-u{CA-7j$26^}`3U zr}w?lBv6HMH2N)G2(lJ6tYa?trc{>F*?@dZS5Fm`E!7GHYt5+H8X#W>Ez zEb`VvJLkUG6-)!XtOX8<=(OT0sUn+!$YAbdj_g3m0_!;+V{Bh>S+=}Jknj+L2;(KR z%01jctIaK1KZ$cBr=5Dvn}}!P-g2Qw8({vT^PH!MJpLr8nnLFsSwdP~#SD7eD2(df zFA%?GGL#?#4ZHDX=(fIE^MP{y3RBWurg#n=rGWOzY zOA|0&(BCmqlWkc@R^Z`^Zku$-Pima|V9@2_d2H(kV5n0_?w>?6AOn1uV=tu=R`mWY zY3nR-vFq^AuAB?xwkHXa%&Vd;{~Mq|e(Yb<%1<30CC0P&E0fRomS50v@^DJpng`nC z)SbD$rXkP$y}*v`X=_N)*2c!s+I)ebWo(UBvKLbo)u&4BNyjrhp zoDZ3#4tQOtK8cF9pHI>F29==nD?%{atIQpa6N@g#gSvA4D}$_-Q(@v>3!|r4z^$Z z>_2Wl{K21Z7muHAFTC`!x<~sTzw%nDWINZ;P{Yk^#Bw{DV+OJS%ow493CClXy?m$*hO6NL;b*-C+K(%5Do5o`z=VU zl{?G}ovfwB>jXS~y})5ZHu$E;&*~ zcJqMtB{$_|k8L7l99srKM$u)=Sugq=9^`|lW<2(Pm|NZ~rU#a?=p%9G;L`iO_ zJh2kU$Zj$XgQcMPm_({Ztr&Ogi35NQyV3~=3qK6%1oL_o?{=A53gbxednJ@$4uAq5 zB&;;;eu9Zk!+~BZ}6WA=Y!b=oojYyZ* zmM{{?_)@U;i~f81U%PlkLe9e&e4v0>o$Zu()w><0d)sc>>kf3gzeA&}$7*+3TX$ zk1V8)hT|9i3Bv)uAz2~HCNKTS?I-YjJM~C9c~y}8(Rj0;Fq3LLfOiH)`L7>A1fUf7x`VM=oW&$_4vC|Tlz2Gz)HW$D!1nXNNiyJ%8h^{kAF4{ zgqbno2NqsRQvTeo=OPZ~f!K zD1Np7xa)lI#2*sS>(dUdWCIQtdGXutZiA=w4?BDO_qtXBDn314d!5T~Pz846LThOL z;MaQS`*?&2;m_zRSmFnyuL9;D^Jm%>JKOP=R{2=3t3Hkgw?Oq5(D7Sg?Z5V)Jabp| zg+H*%kN)->*lQjA*D(74q>+r`u_^pq!5<$U7&TTxtgfqQ#xW4h5fbgRa3lo2e zZ9mlDB2S``&o|{DPscy;Ec=lMr)4PK zuAgmJhib=uwXuJ7JNi{8-WhrtW5i2kVogjjP+UU;fng`LBF!+n&F_T|L#9VDDiY*sqq2 z$fkY(_ z`jyUWF(nsKqw6NACn%d;34^9DeqA zuQ=4=H})kJr+M9N7p5JvzYI(N-Ne7zAK3G2vETf)ezqR=TO4yUADd5O>Dkka5v`w$ zdVcfT$7a6)GqUXG(4Vo_`>)JR(Xp-(vpNnP+&LA_5)&iri{iqw-sW13$G!f!#v>`_ zKe}_*`RaGu;l3c%y2SF~^94-)ao%x^ z-5k|aC%(vlMQ#|U{U@R4MxJj{p{wSchmP$Opo^^Sr%y%a>t})Z`V}9l*kWJP2DncDnMEJijD66m==gE`x2}xCHpKf1 zV&NN^2#5{H^PWIsmvMZ^`LpmuX^o$6K*~CJFL)EFub$lKlw5};|d#}>3HvBq2dz$7SiLq`>o_ue>Z;r`U&8%SN-kS z6*xBH%TOhT6zaaf0@>RJ{Ec!kASe9n-zLA$w;7ce&$eUjL2!k*^G$2|m0jF22jl^Xu)Z@gKX=I z{*KGp1#jaeo*5S&a{~68&~iQA{MFaI!J%9=UMI2;ONQ+AxDh?aA@1Ly%keA&uvIkS zPh9ZNIef~Ur0bH&Ai!4Wk&)wE`oVp^0_(9de*@l7>xRJcRX50n_)%5-1*bZAh9Wj` z{|IrX~6c2hH*gxyats4mZkPSF4;EhSgxN!bHUp% zlq1jgM>MXG*Zno(Dc}K%!$a=9U}xDQGPi5o7+-pr=hV^1eK7Q-P3stTm^V7GyK&e7 zK5L-ndF8ZY-|=zQBOd;7zX@muthb=$I3_BWtbcv@jrafdcQq*95xM>Zd^8^`4Z^59 zL$QqX$PyAf2M~|kpv>bU&cqOBK?_tdO}EMDawsU}8M&PeUe~3IFlJVOWKawecbJ4R zR)(3uH$@NJZm0!7um-FFYOtM;*5W5Bc^(VVHD8y%4l(7(`IKT&R(CBbu^!GOq+$MClJKFe6{8%)1><74b zST-zwSen<}mG}Cm9?Yb3G(G7_K;;mMK0KiV<^D#T6S_rj7xZE0yDhj$OOhP$1+kkr z#v)M^jgoCA$$vx(2MW&+LLKmLNSaTR3TiJnYP0MDXjyncmyO7^OoW>g7#wC<>jMUV z12mzt8mEaHz`P|J{IVBX;u~e&I57@*LJH`GCJPFi0JU)t(gM=Sy(v5*QlI0a7fy1N z@le2vFTT2++&|jx-euvxv)#RSvi;_FzO((-Z~eXPt+zid+_UY24?fr)J$kr3dGf@| z5BwzbSeC~1Tok24G|*useL*2llu=gPF^NWfh?9cNj9-ZZCsvF#&sNnvwp2I-XSYha z;+S~3*X1({IQ#{~f{Z_pRfI{)AVLlYk=Vh3j2r1#<|hz^fAsjt_W02QspM5!Pq!;= zh=1~_HS(OzWd&GdU{s}le5I7HW7UA^cZ7>Hv*uO zr$N1OCmz`74FgSb4knL}_~@`GcfZOu=NyRDK;E7ppfA8BCI!UIGMHKNtq$tGv17F4 zK?BdB>>}ob-7-YcqEw7|(=Y4{osvU{eT==@015A#n9iXGCsuGaiun#>;_rFdH2hgn z+s{YyHSE6+##Uz#?5-5;wZp^wqn2@`4C$$F5GFZtodLZ>s96Z_45B#SoC#Vc743g558&K zTIImLhoYdpQu6^WudITQEp*i%RMYpvO8rE8e==-cC0YJOlT z7PNg0d;O){{9~N8ywc z3xxwi=DHH%d;v)qP}Cg{wK@)H2wmcZ{j#I|LL?F$aC@c5rhWW{)%V-EFAgsnCIXL%Q0K3L9fkoT&*sI)a#1&cMgirtWg@S<` zLfPT53BL@6AAhmSdy73l7I=JLU@VUXj#rgJicDe={lgXIJ#Sn^Jw^5bs3}5!#({#0 zoBt@RZ9wMHHS_=rpEAjzcUg)4U;Xz!#I3TiVp?^;<4Dl{-kM04 z#sRTEI?~Cq_C;J=dH&^Bw-;V|h4)}?&$zj){X-cPd6W>=dWH>>C*06)d;e2l@zu~B z8hJVX)!Jle`pQS3J1>5B;*uJ74OV;6k(gzY6jUD>Zj`aY-hkYPmER)w`jD-!iCA{^57U!~SuRuK9~F4~qvI9I;q!>=gOv&mpYm%0AHF@OPWmPZdd_ zXNTT0P2ldI8fOXc`l&R!Rf7DJYJ;wF5k!83@@4~iI@db_>5uH^J2kY+9OXhc+dF2# zp(o{`dxbJ|%I8AAa?s|BEPn$??OYU1yxnO0Zv546rr~^wdd@p2uXHOI&v}bf`NDlf z8+=S3{DrgJwm1AOyHKMI`$J#Va%|EipRhYl#{S!Dt&0V_{ns%cH};6D9hvLNYIDQf z&!-(pE37x_Zj2x6tMt&;w7}3af|-1ZXnY~YfUWb#s`m||o1k^9d8=*6HZXoS-Ayps zh8+!wPq**OYBs>QeSX>Bw7@Lx_8)ZK_p;s!KgQh>(}vxC*tLhc`=aZ0{OkIp3+(a= z7W<7~)_=FuaN3u{Exyo;id+Zu$kzUEF?aH1JmWKc?DZBp@%vff9qQVbrO3qqH1q?+?Fu9kTZ(0_r}hwt=#ZsTi9RulZ~JZ~Y@a$Z7^d=!utsxoBK`??2l` zUTi7r)4UYLcL;Am>EqKy$d&uss&3K;ymj2cAJa)snq`k=mqf^E=tML4BJuf~0lSrP zEcBm12=-t;J}S<{=z{By((UWF^7;7W$Ui@N_x_1_{^HNSx1DQ`cY62Ub|Swn&$v-0 zUiXFM9p|Qhf6JXOQ}K=DOKy1KBS!VjNDQh(U;2Rr(efJoXTE_d8p(qGkk_Dd%;NzZ z#4snt*ZVud;vOR2;44K4KpB?mzfQfLB9!DYw}NQ+gq9p~e=J}<6LxFTr%&5LTRk3}I+_*b z6e|A43EYqM*RhH&in60_lzV-zYu=7+cT>f`#HyJG_CS;Z(RHz_{lSWC%FixTC^50Y zC8fv**S6swW&Gz0w%l7oioa}&Jbn0{5ZJ{pfO7f#0Kj%<`~-k6I*vil2Xzyt zye75h04c+o2ekgksXAgtVwH+5)7}+PR@;;8MoM2kYn?$yfi#siV2eRO### zLl0P>KsHMU2Q-W9g=!CcaTbD%Ueh%$Q!O1k<*s#Tkh>%swAQO=kUVs;sdm03>k_CO za&@)}zKPu=1#8{;qBDzig6(5J{6l{gCb>V_hwfuvBwyl~_*AvPtiTXXv{d$30*8KV zCq>67mrtzX?MHa@S#6U>Qn8Cgg8nprrQ$N@bTa2Gs`_FE#cJz?U5iRuA*OoIl#;XUABM}4P8&dd&U@aHpn2nO;N5tug9VT;3c_+OfvoP>EEa|y5v=&&yW|b z@I>yU4baaLDQH~Vnz%H?M`+t%Xe_O8*a%PKN&2*`U9g<`Yr4oO&&bs@R1F&C;ONhi zJ7|OpbiVj_lMNB%|3K9{?Xj%B$9|VWZlEyJsuKEr8}{|5nf?Ta?HR{u00&`+*Zu=% zyS$bx^`-yzk7vo#UsDzo`)L;4(cz6kOnHJ*FJd*9jmTLHN(?x$?&s1kDQxN_DTJ=XiutYp5I;wSJiU%*# zu6d$QZnFE3B491aZ%M->c^wE%&=lcUd&LA4Gd94yZ+hBklYHz$r+_BD1U2GKonubbVMr+TLHunuWg3eId5dktD05IHa4I@p2j`Fo$BCDl3kWX# zF00&ttY)gZ+pkP%eYT7PE-QSdUBxAP|6-9D86J1d2aekPR-$vfMgx=x_0jBAXD|^osQX zUWbaf(~|WkfO_^v$Qgap#GU&5&mh@8PFM8xfe*}eSb3%2nf9dYRbP7T_3gQrUlYA} z*dO&VSp3w1H74n#;FDMC&bt+mvB(+qJ-n-tl`#aYva5WpEq31y`}(e@?&e$|r<{C# zDh<6=-$|bYMKk#K`oH?m`#iU@o(ty=zvG{?dho%owhMp5L4A9vHG#frqgk`NpT=Mm ztbHf&2sR2DQsx2U+GAv&zUsYK2@gPdP0X=nPVMDWZs8OM<|OEZB>jhkRcZq+VMB|V z_VhTH?JD#+333oixz|C{Y9VFag05wD8^W93Z?|KuLZ(w}h*W@BVUPGVkN5@}-0Y=! zx!z9f#p2rWIgP!guz-j5D1pAtc`f z*18U^VjPK%s_XQh;{Z-&e*wk=sg7;S{DW@x_a2DHYqupc$}5J}(>&2W6K_7qYA~6HENvXA?K+oeARCO%VHC$|rSnDkUDtC*4gzTWkw3o|1PNe*3xy z;5+U>^h~~#(|`E2wmZwm!0*aSz5DMrFo|Q<^s^5UMHzhCADUg5?PgTH`j2|MZ5L{f zL|>UcDLSC$SRoqL7V|-zJr^>4To`~J#6SbUfZhW75MC6EA9WVV_>!8~r(8mi?RKR; z+(g$W_xszg%kKmvkB{=kH@lD}J2}y@O)FTSlJLZ=>#8~|uIn=f z3~=bcvA^)T4>2@u5Pm5GqGgr0c_tO!O?*_Fz;+Kl;==<`C1c5Rj+eUQXdVIAO_7~$ z6O4i({s|mh7b>+ZeOQ!mMw!S($AE9dOQv<2So$0&C1<8on3WT9)=0|&U%7QFNzd#K za5NW!-2D~5ELHO|t=gl3mkjT#)o>XH$nZ)R@r%avYlV?ZGCR^YHuihu5pt4+pMDV1BkXiU>k&EY;rv|ZPLy)LfPVZ zCY;xiI*xJn&ABzCrdQKkD}k2ZIO+-@=gOlEpscjU$>pY@yri1HD>Mr!E7lG2m1MDt zzK()fV!-w?Uv8@J!E=oQI&MZ27cueE0LOPvx0m1k%=YeQKCgZC#rCTYKHQ$1o%yZ0 zceKyGlK2RB?F1yb7=K4D*uTg>YMcJ$P$!qVCnK#}?Y3vavU)%6rv~0m{ z3-JVvK6FVo4-!P)9M|JG;Rhf}J2LQF#k5lb6#U-YpFx~vOc`#=erzRL}^NoYs1<#_#wejm4>!jKkkY87fe0=_RK$hijd zBW=9Zgm{tqCo6#X0Uz-&%JhAofd@-`K3wDV`rc>y&~FrITx41(4*-1UyW)RGt!^~* zqQnd#K`do|1pJHWNGo%jN;XX)!8Q z0tJhTJ`xD!7OY_4+b2LSLa9ScH))ZVlj_b>4h0<;d(npo)43%nCc}emv2WyMof)Zt za|5x`KfbAL@~xIZE4d6dl?fZNa)6$883_e>p$wxQq;-rMbqq%iyg6|00~E$`K#&30 z5UQ3UkX$aB1CEdKHJ*m@gGO=|p0t|A^kWfHjrZ1Ctqak<&6)CYY z+i2`Xha9`JU;+JJP2)r^TF}ti+<>WvE1t|q$)4!2JxV&Lfk=HKq ziX|>?^3cPhCl4ey3;y}jN82C$(I0KU`%nML_8sk$sW`l|R!N;e9U_eOG+yCYwa`YsNL+_v^Qv>e-Abo;Ug3^8eE9z}jb zjW_y4Ien91q9{Ulo7@4r`lr92%r`AqZCT-V{?%9f?|S@jK_B!BQ1S=MzP@gdzaD$d zH^-<&I@UMAv}J{Lj5k=$V!NTL9k(9;je{vX&9}!dg3uwo>X5MRsv~ne*V^NpcIhA! zWE{-+Q3e>^ZsCKq%KuH1Hc3lcfpPjEkD_jenWVUq(Z)Q7mJ??ctY7~iZHnt!XR0l0 zRG*;0OFi!*%U4cN!=FCL1qRg>1IsoadCUxbWW^Wi*o#h-3g;MrV&29c=;J0I(Cfc> z{0&3&(+Y3=34i-dUNNK{F}98RagGJh(0|~Fqn#)`{b@1efW z6Yu!hcU26MqrmZ++1Q5tK+nm>iJ!7*81u?Bh%Ktp0LrwNgnft}muAjCaz>kA6lELP zP|`L|pvl_==6J&*%55^*Kcm3%;kCd`G1D;w=*JR9{~1+;Oj?(lkt#H^jt;~b)Rw(V zFw-}V6J&~?|B$OX>zc(0BLa@!M~p4uc>}=9uf4wAfB6;1-XJGfC-n&wV z>hkH+?ZJn?+Rl{okSh<)@@<*E5t(DZ1*wc26_nfD!UO9*E0Fq*DGT%%fDe+qF-3L8 zcA5+x+OD^t-F%LnlGp3-+JD$P>bJq}IK)1ul@nWuNmYU22j>UM9ox%1jEp>=yTb>K za?^uX@|q|-bnFva7U2D>s~LY-;aGw5o`Qn*2gFEmRGsz9dB}q_+d_s=eB2kgJ09Hu-Y(vxlR|<#P8GGz%pQ^ zy;*kbzX@_IqcL_)dzZR=oN{|#o;u$!qpkUxZpvxcHDK~&9AAgvZf*K@}wUp_2r z^Gz`ICSUEKOm)|u>(D$+zY8E($EwJO<%JO0zDB`3H_@)Wk)1wdZkw*~h>30B<~Z*g zLjl%0WuC@!4EcJEb(6!3jJXa?c>>meUdNzQG3#3TPf}@dI~jW-^BRR>=%F6MQ`blA zH>UE|TlMwWYW#Ir>95m7e(Ra~cFVVjHpI@Vw4U;7LK7tK8}ghl_`FYj7u;3rL89C=#+K~%xCl=MO%KL^?bR;)VjfWZU%JIQX zlA$8HT|<_NsoelR7K_1yRZxW9UzaTg>W*b(LYUYTgMCR6JT_3bY?GCou8Te-t)uoY z(&OuN#MmJFhR&J0-E#G>!~-9!y%&BEmoJ9#`gzS?)j#;CfeWvoKqA|d9-L`^oi@bdr?;9!X(A*5{wLEMpnXiF9B(R7fmpRb*o&|sG^`3FF zlWz*schSd9TzIbP?PssP`Kj%lx8C-7(u0p5Y}?W4#+zw*7=!D}XZ(xnILNVQu|C)4QTl8Z2sLBcqeW7^(MRV0b znaB)*N?heB@VAw))on&;Etdod+D|}kO1b331@K%jik~xuE(2TjmfgQKWST=IO$6FB z5QssjTc%Rg{R$6q_$(k3r|pXV*g+@~%-G<*DVSx&kf)HC5SA&F8zawPJyjN%#05T4 z!XQcxz`dUUat=hc)pl{wu*Xe}56gf?)RJ%2=1Fd}wU@o(-i=fEfEl-&k_JDEJN+o( z-Uc|QHJ}#DXnE9U-jd?CZ?4IgJOKy~&6c2~&l(@O0ZDGOpeL&0;PFU9Yx?Uz^N=Xt z!dnNhaz)U5%DeCEL*}2Ys`&Q;mj%m3RVY9ozKn0hK?fifA2wlm?L2(jc6xH6MTYsx z&GF0a?EHLtpypjxjaU!0QcpX#Pq{otl%V;5~Wd4$C93q8T z5(i2@K}X-RX1RY!l7~)|36N0-RRg>Y-pFyFXWkKe&5Hw%1=ZP5qlY$3)d!X!$u=m* z3%+T5jXwJsJ94~&65C`E#}!k9$0mxojDHlDDnx^FA2oz#?Et(q$dDzzsRJ z6Cd#@<*Xq*I8!zZmI#dq%Gg?p#LC)A-+5jMy_(QItSNXMe@k!MVOz)+Cn$FUnm>Dr zg@2?~`z|zZWYIUGFTIQ*#sK3%AEqNWZHdgqd2|$>l7@@fysi*8qYas#6haSYm-FDu zZt`3lx^Ket9Up4UDba71j(ydCF+{m-AOW$jH@gPUNocW4Z6e4XK^Xf>TVR#|1QA29 zc&5Z0(w}Fe=K?ZjiY)k)y!NYV8N{4z0}LS*MV}QwCQ+oiQnSUa!g@i+5ma zueS2rx9E-!B(>th@;kKFPu=p>rod#Jhd=V@UHqdcy!H4a{H>>KgVv1(%Hlnc--`3g z>zrf-6c3Jx=?t|K3vt5&!m`V*(nmY%8EzNhE`awqk6pRon6lST#XnsZy8_CCEFDG` zXk0i?!)+=JvS}Bl-o!}!($9qeKQuvKqO)UxZFT&PD$e^P!}GB0w>=oU>9+U zEc;opOJ=9&@yn*fp+B8!-%9{DRn z^tIdx>MPF^{6z=6;ok>rX_;+T^Mo(DeI6*e!}J}UDyOqj9Z>b^hkgBy+Y~q;Hx7tr zCv1dn(yunJF-bk+KRzT5=H2@IeWjOtH&-mJry;UYncs(w+)cR7L(r! zyM9gnPIFBNgl_8Yav*)N_Ma1T3ST^5nwESw5t1F6j?JR~HkkU;XMN5{n#ZD>u-}Ib zyJMfE^;oq0c`Yv!k=%0*YJYRGZ~xjVXLujpA zi;&kj*RM5J_{K8Fs+>on#y=mxP>X4Sp}xls2;zplvE#tDppJflPP$D8P4fIjPrGDW zEC&FIeA;98tHAy{4&mvPzMOn5BlNQzxH*2|<=nxx>p!%S?K1dEQ)b26dZAHf`<1is z5B~z)pFaN4Zyx`Yl{5CGpGX;+zw|{uxZ<(Rdw#f|1(2F&!Wc{7(XGDX+D-F%{j7e4 z((3}%;e&G42-nk>nuNld2n7}mpxgUlM1!=>v>8}Gv6Vhk%7Z4t4P6W1^9_Iwx$`$+OwRU zUv7W-7k{yR^6@9zqc`8&?w-CT^EgiPO@AHn{mOZMul__^=a<{%`ME@*KgmE@vBH#j ztRQxW-m@`GdJmBOXBi0K2Yj>{->JaW_=rsYEznGRsUY{o3cTJ(hvNdx()J37sKn0K z32kdq_9&#tp)YuIt^34dg%WvQV$(IkMTD^((M8^A8&`YL6Q}-BOYGiSjhUNu07I+&uf%?&xrES zNzx~A*7>4s*1u!sSU`#o)J01_V2bnDsV#iizxlOfOzG z0)-4v;avytK*AlERMT%=D4?ALU;b0~YPN<1n*BkZ*@~D!KaLCRC9Ox2Wga1lR{cg96uz@_Oi1N0MW^ zfkFoI3Z?*%KySZs3H#c|&jR{tPcHRxCWAd_r+l~_iqt0L0t_2Y=H8Tg!i5Bjc&YpX&X)GTwkoC}O#| z)btH3Y==i|EB|PF{OGZN7U0t-A8*f}Kl8%@ zFJ7E&XJ;?+dMH5-9{Fv37JnbuXpnN&9&~q~;Ke?48w;T-_75;$<&EzP&MWkWjZgas ziGf0VvItQ0J@M*(*9IZ_6(8Fzy!9tnM6*ESHyf<$%k7E-h5kSO^rP+H{O<2=|Ia`C zgYDU~XWLi4{Dtl3fBNUP_uuyD&`hYCF2ZJ`j} zcqO(0*&!K@k7+E@xKVdliM7h~cQs`bFQ9HHdS?(UP38ZH2mUKj&%sDoH4$&WDheuR z=#%x7X;&dKs7D51={ylxW56_$3m-QGkzs&dWgj59xg@*XLf@4UIXH(t2hrdF>*HiY zbv7{!8Lwf;DH>ZKx|T6)RKD_Otq>?TiTlZ->>KrPQjMLgt(>f(*CZv|Wm9vksF+1M zPRun5Ifc%~D>etx&@x{QC18R08optZa9v(;(5-C^j7xC6v72;V4}bG|)4j-`y4HbJ zM<16wE>yk=yEu5!47T$?WVY;{K;lxtl3TZkfo~?s0BSaNT;TfJwWHwoR)V%{2~U zFS2X>$6n>}C3T+=7f<|8+-4nh(DO&##Bj$p>Mrwu6HVf8f01JflvaGO#dS$i9eGXD z`rRk0CmsN_;3>mTfgG{%B{C;{5*h8t1MsaVl)1zWHI1w8 zmarakncJ@dBqOWmC;8uc{yAC&mhmdOeZb~PJM_n}rCxKzR#WFgeD3q2b*`#G<>vLl zO5pxWw9Pw+(;o66z+jeQHvx+CZ;0(&aJ<@Hd-z~`atZ&Z0^mWe@<*MF@lLc8y>O(_d9eb14(yA>98xWN)maL`!%qLpt zDU*|Yk&i!eXk15jtT7EaIGa5A7HOjM8k@grx&hvk?*aRK z#&b7$Y)eal=Z}_~4?iVU#<5!yo2Y{D5a)=ejQsbX*phw?gQeX3|a_;P&-3 zpm$$S>N$TvPWat4a$r~Pod2H%s~x$3yzh5I=swH#!1Q~UF6G8`ev_`_6`&28?!Rf? z$}{CPWPGo-Xsx}WYeLh0Ui%6w?ag-S$^xo?8GU$}sfHi?w=MebYfhB7Z;|5eb&Rav zD(PSVU+eCii|^j|>ipZ+X22;}zF_W48JUB9{lxlxU1V>bs_ye<+ln4xJSK@>>EnPT zyzS7MtdiPL5*Y^kBtso@&h?&eK>C$BdHXS;{2dJY?GsksDSsD_Z2-6_GY~t1d*%3- zpOIH_x`n>=B`3Dfv<-{SiVbkXo{E!@RZj+(4*H4>VQ+{$_(ie`o)^odo*jhMxnG<#V7Y-ZwEoSP!R6_kGchx?<$z z$$vU(YK`Nqvsa31zfl()`cF>oZts2Y;r8V(e;Ener%#`3m+m)yYy8L$1Nh5+)>#r| z9CB?xXtfbr)<4Hs>usQO*3HA6sn=tSob{Dz5_NPioRB%0Knqa?Ao(Cc0c8+M*wU}- zA9Ar3!)=TbU1c-*Q2US#P)8KBX+j2d|1P|aSwlDAFlk@Sje6^+zol0xH_g?5YWH<@ z$#Pd9kN!rG=TC4hXdLj1^iB0G-O;Jp9_%etEnBU_3SD$)JK4^8!{V3^1zk;CKQ;&8qnV z&d%r?1p$)@18}5escdGII|=fDf$${LgQZ=3Jh^2AvZ`LPi%@n`7Zf@Pd6nxQ$P?)V ztPnc@j>)tpVc76kR^hBfRXP=0mB6ZGec>^MX@>o>k&A|R_y&*Fo5)}IF!7|RaWc56?-UdhkDvqir zKQDOzI+W2jQ74QFKCm=Uj?Z?l=`6w@28-T-;&Ug97g7sI|9NqE1!U4!dk+l7lqZQ8 zTYg1+jyyg^Vv+9!Cu4-N{ANX0N3mhcVv_?KcwA&LgZ%XLWPA4Psc?teFaE=Su>I5D z{NHZxzWbhI@#5_H_Vn4O+xhvq0@^q-)T@&p6D1F*bU=HV?9*6IpI$ z9rs!m+x=U13&g%0{DJTY$2fHSg@;GJLT6hb#mGw0CqARqC|qAqNR#Ta0ojRPbb-&m z=qN;tX*&KUphUl#cj{{hec7F&V8(|;UjWV9JiuCR(c^-MMA)`F7jnBtOJY$i&e0Vc zkzppw8b7IktkN^$MVnI3S)e6Wke7rnlNaccIff63PoJ3V^Y7aLto}ED>mT3lul3xb z28}Ap-G5Xo#fL_;-;^hK1bD+~ffGh`_8rOofG+qf`vb8zWWwFjf3AtZ{YL*8pZ4%l z5jL5@TStB1Gs|S}C9sF`^p*7=#We`LxXkbW6yvMK=kl|Y{Nn6WO^`BoUmpBgWcP{WRO%P5JT3MogOKA0Qw$77%4+(J*v+XNby zInB6?+>C$rFW7ElMV$GIk5;03{C5>PEHpVb3)1Z5dV%L%mae_Xt3%&R7 z!S>Gk@9UU-*R*~h@M_oJeg7#Z*#M_Br{8abowan(k;g`-g9}Nb6VTyLm(wi%PUuwL zK2MnCalx_E4z%7q{ph|ydm9uDIvj_V6WbeBp8<2(X6R{C?yI-j7njW8UPQY;gA;|) zv!~Ct=TD!=KaCZsU?u+IxOGzTxq87@=u# zFv(QO8j=gL{`QfTszD?phR`l~<7^`*Akve*M^26jBwS%VKu)g9zyeaOsHu7GJ{L)#FUfi?^Mwx#vkhnvtcra$q+@wd)OxAfntw_(+9zKVgz zF@4)OQ2E%&BQs-Kgf0i#z6l+p+kh)XcEf(dJNplS_qO=;IE(+#rGAG8SUZNw;)i5- zeg_D*kK>A!_h@e71M1+dYmC(ee4gI6!IAx4<^i!oDdkm4&p7z$4?dB@bs5GK62Lbd5UD`pOeWzyA4<^;$ThXt9N8@Ze^stT zJdGAj=yqY^lJD#>z-j(LVmiw+nQ_sXl(|>^G=MUCp_%w~40TQ)nWFgFZ)XhU2KJEa zJnK2d#J%leoF#4^2lT103yE-C%i9Bv0WS1s`fsML1Ii`Kx;R#1v->ISgpB=+V<9?Y z+dlzzIIP0$#t8_W&oxLj|9o8^JhhT#-sRlEGHd*C-7sHnp#fUBS1VdBC!)*eZ!dNdNHXV(t#Cnjzw2Gue-Ro+P?j* zZ*AZD&NsF10Q1dqV#5s}HmqOrrO&P5wb~tM1ol&|r1OUQ=#olSpsWUr6B*xbjzjNyRC(vky=snDQ@b0^+nA^W!HYNXE`2LooMDA%!JWl)oz~BF+-%=3r27n3% zU>@o$q?C=A?Ew?X5G81vghg$ScG`xq<%K^JtQ~NBf#k;i7OoUlLF%cNo#eJfon5Vht#0}Ejz8JPDE-B67xWYR4l4-JwxBd0Rv9iR&@mXUnpt+MiC!aM}!f)hK zBQIL(FPg~@O;Y@#u!~Qd_+Yh-)Pcmy{uhoq3+h5t&~@^)tWO=Nd*KO!0|+uAXKavF z-i_Z1h45H}F4e$I9+O$Vz_J(c1_Sy%e8F54x?MR7JF!rW12{8~1C|#J$zkzi_#qNs zy`W{m5U%VPa7PD@!|lO?``hX1>2`F?>wzw}pZhyMz5T{-{FCh`zWmc#{I0ea=g+nu zeDtxTA%}wn2Ywd7E9*NsIrYNK;_d@@`-A;T&6nRk!KOIigNOSRpUA@oKPhw|yDbE? zady7_+1LMn+rRto|K0X`zxVsvR9$Uf{;@A^U-|NnZy&t-{`T74yW-;l)wVr< zewK$Nc;Zh74Ho|+AMp5QIp6rN-+01Kfh|)UafR4*%zV%#o>g`T_%I<{0J%M$?ZWx# zOXl1Cph{)mBjHXTftA!S__s(+1bHi?#L+m>-50?LQ{ zGz2w^Pg{!gqer#TIp$2T9nNW+;zR;+Id6M-;~GZbLwf?X`2a60AwqDkD1=M0HduS{K1X&qh$C5 z)b!0$bfX?vH#3&6s?YJDR2>~YxrR1`LqaSoxvhV|XV|SYanZBSzdx>C4Y9H15VmZs zR6s|~p^mdX+p}jEx@}j#C0NeP11aASIp#cVv2BG}pY{XZ1~HFc(SPLT-IEspCjpYz z7eX8l;mCbsgX3D6pP%yx9pGgK`PifRgPttK;-iTXVA(1{16~zZ;Zq%)j~ARkyQTY8 zIelEZfsTRY4Gog7Jo`dqk<${|n@|G#c&)1A`BEog@ahB3$zAjX2kKRfum_m?Xtyp9Uya8Yw^J@9g6z4en^2$hGWu2S zeq)_0TVUlUkf4nJ@YcE;5n%>oSAO!vtV+-ck8i}!I1t!?twz9U=!anQW(6>1_npLl z=xhAQC(OA_Il23le{5LVf!%tSXP$XfaglzvG!J9LPN_~G32(cH%o_3i4J1Aw^THAu zDP7eM4c0H_=qjX%1JTvv z?X$uAkSeW^S6Bn>Pq$CHjElhVZ@{e6jf_utWUVSM|mM zF8d(21eG~paPONaJk;46_1MQQ%@Lg+Jb(Ihd;0WKRW(+m-@jj4T>Sb@)N-=`v|8&pFND^MOZoyiL-#Tsa-V?@G#1nww1D;unXBm@ z1H{#`NQ?=J(!(q8DZ-!x-Z!1y7W+gkBo4$0=4>9GvXHX3WRw-Fet@<$!3XsD`#!8O z*l8c*Bamb6P5y>%v%iK(YaRR)mZ9(!?|qr8ElI9A-{0RM7cfsB=#*SHqij~)=Mp;q z8??YQRAt4x4m>$twQl5;T(whQxB|7O7PYI0wO!n(O*g7uIt7t$1_OQWP@}?mODcfW zwp6_VS`M1kPs1-a2ue3#3}cNPx*`G0Wgk5HmbUVr68dZWcDvUX^Qn!SxzLp5`Rs8e zxygcG`^->ut$dWpV-tH6ZaxxM?^n>J`NGYzyLV5w6JAk&#Fxa*x4-=QpJ^z(+#bF8 zmTy2`o?mKiiJt48!uv*>WUx2H{LD9HR>wGredzZd=1{U3TZ$j#Hj!4vou6hLeV43o zN%Bk>fyYX+~3G{f=~U){xLuA z8n9W1cl;LG%gd|nN^!lu;;jJJ+cjk+&JEvs`#yGk zAX)SUq2{_iqM0MU=3LS7O-uTXKjoO3zUgs=>EuOUIF#X==~=MZpD6i$i`RX|4?Uy) zBNoE2&*k^#@x^N{-b-F2$|8L9K8JNWHZcxKxi=tbvW@Ha(1sB^#36iWGqw_cSQ2w8 zt3QtIm6CW^vNDE{S;rq_gxa5-TUG)YJ;aO{dfwOG!KAcfFTRSe`q*(NMwCHG7vH<1 z5<5{YnK~Tt)5P}IvDJV1rVswhHxa5rnPeM?Gk!@8@pc&=lnVO60`6aUzb7AP$BxKk zOduAHa4~*b&HyRMoX?y50C+9SeSt3l`zDw&+UDqNz@FxB9>Dj&7CK@Ccz6OgNB>7x)llT|9u`xu7!l!*X7`dEG=V zJgvK-<)DLd`NvNH{OWIM_vz;@^*9P|y#rh(C6rpM@Ih0_3xdozV+dHc(C51FWIt#V`5_ ze?YYU^q*;sNg2tjoSRT|@S1FwHU~kP!OLa^Cw9=vNBW;{WH;Q@7rH>TVLx=k{!G}} zp7_Gn@?33mRk00?@Vqjj^3bQ8H0x_V^$(d#BHM+=&`1xc$He4sMuUx#kz)=A6C!lY zqE!AckJHD8zVXj|=J#yqJ-*fd@ZtUK|M0*2-)~?0g~urvaqcxX}%;sOF$9-6l~Z^`+y0{qWZ_ufV zZ2anWgW~FbVxD8G%H;avrxLiq?>8WbmrVjOjr_!w17i-Dn&sCQjw26AAn^y^|7iRD z-~0XTU;pmE-hS}C?`)6Wcy0UHpZe+TowwiK-gx5;Z$K`z(Ir;rXKa!sPn!t2Et#yP z+RW*|szZkDv+wApkFWq=HRf%zFu0Mqpo50muo?VlGb=GlZke>24%xWjMzC)1Z+A2P z$Q945J8F|a{~;J)gTNoLmvEd-4c(Cqrfb^UtiPy147vkWqt-$9&E!RP)}G?aGY_lTmR88)yL$ z{ftcV=-GuE`W(OVlW~R?$Klod*0$(f^%K1ugI3-y-;}otw9|9NzfZ0)!hD;Mveyyg zTQ4^-V?Xxpj*<9KrM_{A^rA|o-7 z59$z#YBUdh8u@FIZ#|Z1XYH4MWgbi5_vsBIj>_Y>)mY!P)<0y4+;a4%o+~;bL_<55 zxotm5g&v2tBgoRhu}(g!>`N%hksHbSgv4N^h8=obEa>r+xw8+%E;674rv4S&XM+}K zSu?l2s`{9Z7zaf;$JHjo$3au_uuEn;2jRM+#tDo;bv}pvofl4e9COE!xJj7M?3q}V ztq)?GCrX{{X78re?&NrTp#9YwZ@;HTVxYNv2v2mhziaHgE(p4Rd|dKzi)yW(Gf;-m zMowh4yw2%=a>jq-pl@B_Z8^YdQ+U*VjmIjN{`5)l&>z@p*vn}feBh>@+oAd3;lI~^ z^dU?6Vk z4<(?XsyWUo#{`xAQ&1|mAJXLZNv}ePsretJjGe)0h8F}0FY z$2FQ+Lu{>Os@-|OG9@&!8jzjbpU99;rg7hbg_A_wmA~Y-R1^OR;cuLM!hhw2XvhAs z{}i=!pfBeHX$E2ge5u2qeST!F@`06i>Vk%b=HWagcDMdO3efbrLm8Q|qxH~69vpf4 zKMXVsrRNQ?Qaqrt^TXd`cxiN5GgZG~OB251q~HnP7_wf3LQ5+5i3^r>;3 zm;c_s1!o&G;>4Hw3doxBJK^Ka_yLwT66ZLgt1;60VQv0>eO*r;Ipl#}|Iv$WHE#sb zH)TcNP5$mbWB_STdFHQz=4-mhZQZSZ#;eNqmv->7;ttn-R=pEez9GN*hw&Ey=gQUY zX36z2L4G-;tnJEKqoAQY0EWnP&YIpe>m{CMcAGEUQ?`29?t)_PH~&cAQRK(ga2hx! zA)iG~W#erNwhAZuY%TbJ7X3;-+?5}I%6vN(UAbulGIp+eePqXeXiVgKrwO#TkD&uv zPSfu42(aA{y?z6u?U*ud1Dj@9ZAX0J{IC1h_?jPX6SNNi?bDu=^9D+l2`}NqfBjA9 z00!rNvq;tX7ZZPl+|}WJn*5n7X+sOC95cqY(wWFL6;0JI!dQ0iS?qsPWi2yQ?sHP! zR79m3%|8f{Nt{T~vmO^+o4mka8*Smcqx~l_kvg?Wei{ufgF{udc&u9(f`|eAM;J_$ zGJA385K0~L(XKcJotsaEZ`iN59&~!2;AYj?_bAp^*WCZmZpL5yN(}rB->aNg6G7jo zy?nWS>)YSmzVTOowY_%l{`Tg>2iwu{$#(GCsrH4JYUk$wB$Hh7d_7jOat(x7C2$>Q ze9Mhk>G%3#v(T2D3`hCbet1S z3^nkZ`WefjV2!pO_QR3G+UBvUPVRF*?C|w+#y2vt$;Sry&P`PoAjz?fmZyL5`dYLl z3#a`Cv7*0X6AzMbO*poo)7NJOkLBGWrH6&5Zx6wd!e8%$T(pC zaVWoKt9-|l97o`x-(-WjGWyeIuENt_*-zp*dHiuaB*SBac}C))31Wq4{)FrKgJ;Ys zzIf^QV>@!l{GnRe9iKEOB?CtEAMl_Kbg~&b<{sxT#u-#s84|D*5X}KB5I$7LW%C z4!?Q$)vx`d-%@?&0RV#AOet595n(T=D5uWCRg0VHG&$zU#tOmF7Wy(!{8`vAoc6K< z8PHnC$_GG6A}d^@s53b&r)Uh6g)@10a*?#@R5p1AOVi|`03aRUiw7#?Mxe(=%u;>C;Y{Nl_9LS9{esnj~a5d!ML$uxa< z!?z^k`b1ywywqms@_KuAu1%6+$w;sLi1Ct0dH$~6u|1Wu(@`Cw;T{(DY+oE=!D9i@lz9>Yt5cNXl z0$%wH4e=1pCRd242Sg7KJ#GSg7Pk1Cr4|)Rl1Hvj;gHfBJI?8f#PWS20j=d$gL40N zmtRGix`nt{1$dF^j>C44$-6&ZzNG)Koi?EQ#Rg=&=6DqUEL&}~`ct^bf|n{SQ$7$} zAIRs1IgoY4(yU9gwztp&7JlL}e5`edxWd|o?C1tk?~Mk1nZi^Q+OnYx81e*d8>sXN zJ=k`K27rh0)4XW1HqSc>K~%M&eg7nc6d!}@GXAXi_#jzFoIo#l zA2;DkZu^TibjX={`Zzw4;z7IV9QVZrCC8=9lEH661|J>%i51lk7kfMD%t`s3?T9!L zPvLBz`zmpWeW54$M2WGc_4!D2Rs-J0JoMg`E+KWSmN}VA978>p8GDta7w`!pMbTw` zOSXskobLmWMVz3~pO5E*L>TI0{sme_9+<1W(;_!k08@WJYZc-VshY8pk8OdbOFl$0 z+3cGTO-!}UBPLtf+X$D7N3uN6Gv2SXy9hDmJA?I7d8r7p2sy)TxnLg96G2VcIia z0Lk(qPM=+G=Q@YrCjc(bd0l4O*)C~xi>>t2e`0r>J1+9ON`fv;ds%yU0~LL)o-tqd zPg_F4II$;;Y4MutLcW33I<3F@5!mTBHM*|#W5GtQ5Vi~(N*3Ufc*yY?>l{v%L)GUd zuqrN)lS*&rJg^+OqUAVJ{nu|$KpO3?!DE>vLwN6UlzW~SpEmexi^q0o4U|)I-dd1h zSmd|AT}S3Vcz;Z85^U$(P?S0k&$mEl+@L|08u5@W z%|C2zd2RF1N$kx>9J;TeOFX*_gs=5F8!+QOW!n~^+a}AY^9L17w<~)lQ5)am)+mjE zm&;Gk?9i2*6{0t3LYJolQ)lzI1OUW^&f35cG67}4VM(-s@PRpi z-eDJfy*BtWfU1xX=NVU@x0elp@VYI9!UQ~CV`_jKbcn!CWO8jr`-L)R0A37#85-oQ zkt+9Cfv5S}9}D-Gn&WAbW79od(3F?aDO|xph{(lH*YAbXS#B##WUP%Q>j4SS---&vZ0{SKRu# zFz0Z5bNb}?XgfYS-j0s$Z08r}+h6>}*SC+p_xq(tMc$_+h)Z-B2$Mc8#1$L4|9a@T|M|Nh+O18%B%S-vDKgYM`^BC6xUw1$N zIK~EIr1s$8#I)8Sgj2+Q8;tVP6VgSeZBVk9=cPJ&>V+&Il=0;TAW1ed9_}bRgOKSNl^8mmv`vCya2AEJx78FQ`CypnL z2>PbCM_1Q@>R8XY3T(?6X*%nK9Pr9RLRoNwukeCQJPd6dIdaiS4lf%5+9tGXE)gW` z)1r}BiP*RlJs?<6D$NBR0GBK><+qtB#f}VU>}kHjyR4jnOI*d_i`QzVY+x`C<1>JX z6_EKVNdh&|S~chRx7t5nUyA;4(vEEEK#iaHK`h<(w6+}VQrUFD`5_79xW{oTTy$4Wo!9U}zWw20$FMu^5RBY#LORRk< z`hfhBuahKpjWNhSc3UuXBw|Fpm^5CnrBguq%b1{ul`ItQ-4?d;JhZabG37^{qEhnmIvRJPQH2i z@+)8d;`W>W^q+1&`IBGSPLGebr_Y~mPo944O#lgdvZ%)yf+o zP~fmXv56cu3yg=fhh{?RvXhwLmvK!Cw{RTyMAPwbna#i3LJO{AN}MwfXk$)ikHGQV z-C`y(=tq{ht`Cz1bBS>Dxr)U%T~(xOqpy5Rc^VA;p&Uwuj8HK zPBMtk<3UB^kk>K`xyNrclnqnA!Q-dLKvMc5{qds!{SsePQOfTk?)G02pxlCIospUt zS6b{(A0i)GVA>zODKU24_uXt5OnV@r>X^`DD|%=V&KJaKNXq^No|Hx5KOo0om2DvY zh7OrNHk&SXjtl=fvEoF9-x%a+>Z~QCtlznzplSibgYoTgrFOsB#bZXgO_$?ZK)GpR z1JJ$TRZnQ__j-tJ8cRIzU>tSHy@NcBi31(ipkt2|7m*E}aN0Xi^;k4$Wu@Oa@Casq z3f26^z%Ot;<@~lk{ReH+fl|A1LP{($Ez5sJ;}6-Y0{bY&5_Wj)bUQrBX(eyloTa2s z?2a862Myz?X)MG21ccvqHG%8ID|-En0RB|jNsc=d<%`A$HN;0hIP^k=#gvIF?VKos zV|@&uq}WXYlJ{{i^2B8{Hj5q_Y*IP$faEPV zKBo`hPYjW(_OTABY_{+i37)?6z?k!qL?2tbZ1TQ9xleYjUbe^+KLj>@XafF17y#b78cvjpEs5bl% z^!VBlQ}K4v|7i0X*#J_Bb9!X$r6B_nl^t4eBm_!Sj=|~G^k4Tq$=Cww0%Uu(NjGumJww_ls~5$*^Ezp$^wV1#I4b5tZl#XiDP)sg-vY-{T}9)4{hkp zDWsCCl+$NfXM`!bE_eMlSlgnD#Bo%G8%dwoQsXQrY{PEai1|(bTW%n}w*Ao$PJE+} z!6}j)n~Sk=)En2(#~hU^5%0vD1bOG^%4cG1T=ix1XFLJT8-D_7YaWwP&s3ZTTnS6i=46#3r`~ElG)IodF>=Wwgx3&Msc~BOayMBUS6Z`-S!9^_} zO6c^uC4F3fqDgR|xoEr8jQIFLYs-VJ#+&eY7$EB&xY#E~lH{Wc`@205ZTWd7+Sd3o zFRGt~xMtm8tpTU@T)*hpc2Zp?Wu@=3Qq#KJ>lzj9w^U3sA5g)oG9>mP_}ifSmOSIu z8gOMNw{GMh55X2GD{;xZsn7AN8sut7o_PUZ%E24hr;KqDKpr$Jw4D<~Prn&OY_#?& z{@CF2GMAw$9&9UGu(36MWvr4{>=Z*EziempcpTJvLzwKhk6CM+*#GFCqV*)jZYV`7 zS>F37pE^R?*FOwyD*aSdZvPuMXq87+#`EsnQ}X;!4mrkClnmBS%S4|0zU5{7F_!p* z-f<+Z*kl@L=}X$A&jT(H{g6(ZR|r2K9@bi9>EAaOIrbAH%jXjt1hQ|AFZ2>e=0YY3 zYUp?7hR@+7nsapMsPh0eWAEznV*BLDv+Xbc>g(I1H{RUtp5EP#Pfpw?2lwxKJv}@* z*7=%*(!WZa@B1M|+1u9;(YN`Vz%JOPc&2jO$v(t!eFZcfxywzv^xeQIcX8tS{DFFo zh1j54nkSg0bjiNr*57*U08E%7AVEuQ0AFC_FZ(AjvEA2GE9Sn|c*w)*rm=lG|J(@Va<=WfinBciBBrfpLUynb^24^oNyXFeD4+%E$R zFan|!P3Rkot!1x4?a*kPV~?+GDfigN7SVaF6EpGf4SwvmahOZoGUvH9QfmBAJFH$mj0mPfBftM+{70d_^m$xoxUGnUEq?a;}d;PfAleO;FJJ=>)$rU zUdNI)DfA6*{pGqp)X0=gu3!!+2y+a=wI(MRF7>~%tvJ9`g(kD z%#;Uz{msL_`?X)$djmi#tP;!;T0DYjU?2f#4-q5lNzm6A|X~$OS#qTC>81xl0Z8`yHy#cda z_}XsOYs@jeW}hhU12zF9;dhVs&w?gF-tebkX|JtSC}rGe z%y6KF3sEdAEHc0mudovTi8fb!+yD6Z*iT~mL`d-72k&qH>tFk|?dQMxcee)*9{6C- zMt~fe%UaYLSbX~TV0^%ptv(p4j@i_`a8ox`c-jxj0KSJxKBy1kWBUiFTmt$)4z3oe1_|o>?yYFuAzxV!jtOF`frXl0v;)OR!B!&}x;Y|nM)RawZ zmi_xmGQs^MeepvG#vscPWl&0-@Ivg3o6W8`=z}o;t6I%NOpu{o5KHKr>#|ACTi=8gXBn!4ieGJObg#Atx;}H$cn!=v&0KA~@*P`R_g*eX z+7=oq+GaIBv=1UnBx&q{XmAdJ>ql73?g=&6|AQ2sV}NO9)cu347KJ=HsEi9iH`uZ|a}i<-!3^H}mapo?O=E zUdKYd&Clx_4|pvh8-C&ZRsbGe;>3`gw*$DWWVt#4VEn5tV%c;gJ9MI<s|T1>`~iq7^7oc0UwUP+GYlE2fQeAL$@9cq@H zvXuufq&O$Bm5YWlGP|54Uc6M!93YQRmEe&)=R1kEO2o*xlH&(IB)j6~IMOfJ$#|j; zJ+V=)4!2KSz#)ddSj{-(K?KGT7skDo=9ofXmaJoV1`O14?oGXMgmPWbC;l70EpR=H6l#V%wzWcof?b@8}3*jAXt0d?=#Q8=1R|ySOgDLinrJ^AVT! zqw54sWz;M4NVfd4$Dh=iqcJ8HgbuJ>3J-hs++0y?G?%U~&bFtYKH1KmKi7QBqee~d z`9ekZnT487pdw)*bsqZUK5iJ@`_JG#)|eZG8}n5d^LMWPF;u*u@SI_taFhCP@CvFN zNdY*$r^U<;1BKGx^ekAIYg?L*D98pzZ)mf|K%1Xfi(T+|elVU`a71mnD#x}V&;W}0 z!#U2d-MM$5TP2{Ky{L(?YkU=4u%ALjkKo??6CBs~>@iL(k{37kco0j_vcm!8>VIxuFk^6yCCHf$;3g%5DS5{A;WRe(%=jSO>QnC+37_^!C*&RmZ@u{Xz&mA~tH)<~A`HJww@d0e?sIlj<;ZM&ea2O+}4hrNT6Z(tf3 zxo`j+orObF@7u-+0hLlwnhm8vQe@-?qGC{DAdQ3~HJVWyhzfjZl#s>%LFvxX-5@n! z$mk7WuD7!rvfjav zZp>OVDo_{Pxa+wg1p8(LofZ%j;2He;ApS{hUheYo81vL@<7_zinAvYx|F417g7<%R za}!wmjikX_POPvH;2k^VQJG}2M+u*dXu<0$%7M%5p9iDep;6^uRZw3G<0cYy?MuOkz@h zEgU5lG#H$_QNVdzu(0w2vWO&MG}CsS%VNM}5V5R@PVu03GwJ-RC+TaSI0y~sC@c_U zeE^;9p6d6`hmA?CJvG=Q+=uIVbt~c7<*CfLR43xJl@~0#OsJtWWlGAE)E6* zrvEZRboy%r)_9niaSY z3&`n;37c3p$?{l4JDDD`A&NW7S7PhpZiaI)cS1Z@@E3bf`CBjcyJyeIJ(j&YlD7bp z7+ST<-*uzAisCNR;N>u@p30w zt+2Q&E-vXUGewNQ>(c#G9EB!&cno#%Ci6WY+%DX$U%hdPQc}*%#If)JP2$$dWzoX# z7Y&iUU3~lG_^DQAx?KBp=2aVA(xpW^yVZy;o}g~l0v*7k`}kVQ+dla=`Ej%ggXht8 z?^i8K#$|4lXDxj&;bVqF1)GT~SISV|Pjkxgw$%8!slD*cQh&))#+O;*?wg`rwz2%e z59sR>XjN_q2={QD_y=sMzL%5ji6q1#YpA#}8t#PD6@seX9IM}abcS!)fk3Z`*0d^Z zh^A@$O~NvZfL#vbO-_S)-;mQ`vl;K6;FJ221h(X`jT<1*9)p%ebTbA-{crW#_}`y> z5{}n2K7TTRIEP8Ymz9_k%7X;|d4t(L|z037N_UX22}mqTEVA+WuF0phk>0)2!)INlK7nP|OrR{^iJ=HIUp zLELFeEwACZ#7mFNG+^5S;qrmIS*yCE5eh((>MquE3Cy+i@@aPs*jf6KNk@d z@-8KA;>Gtr9lf(ovUZUC5=im0-cI2j1=$jB0;pnB$Zc-!a@MC>NGeOkmpBjSDEE>2 z(0h*?NnE>iwg=@a_Y$ot^uu<>Kd2=Uqb_*a+2t9exp?I9tjGP$biWs?*g;0QcE9J$ z>1BtwIox)SJ~wXRIj<=D%DK3=k*N6sfZX3VqRsbaDI~E0OKs~TTG>SN>utWeTH`(m zOMDnq6aOS%m0p+d>ow81wP5$LA4}Z@L=}8N<*5JPdvl@O0R`z0S$IF&@2B{MAQ!{- zv`LW9MR&xXW_N1Xnee9NUlVd+`Qwp3c8^2|tG6uw0@8pZGtr<%ZF47PPza|;dcU46 zioY2;i)`zxxei*o4SIgx)w`*sw_Pd0T*BlK`UkXqHeYk?72kMF7Z8VvAir_<{A>7Z zjlft}prKs~1E^z!hg;&gR&-KwDu5hLntR9sE$@Ln9H-J6Y0oc|7%srp3pcxp3qto50^)j_46$#=PsDk zY5-yrJ}fG|m?tko%MR%&D3h%bW``Ejb)D~f3iB$z=WRF_7$`%dxafafYSqr z2Ild#3czNr3DbA<3$8_LF^($c*p{^2@jd;8^Lo~4tP{w!&~N@7xw|3weLZeH985r^U8#Ztsu`^;ZT%emD{_gh0rzT=Z2w zz=hSl#wn?ix_i>St*8}b(ryU>d0AqWHZO>n*-8(7QcRq_7l8HYC;8-IV;c^;8Z^@E!k?sl_kibN_Z zHZu4(=ma0RkPpTvivgq)E7Zvy>jk>fj@^N*_4Z(-Xj|m(HvS>2;e~ z?G{^&wiXW(!v1LNjULJQlmJR(tH(x3P$x;JL=@b?RM%RVnGr7~7X)zh%O1^ExXNePdPQXao&zWO_JL(nKaQ*q88j~h7Ig@*c)w?fd%qhq74TJ{Afs%S zxbcY3>W&FTX5HF|MWGXwTeE(RAnV&^kT>n!hRqhL_^6g+c_1Tz4>^8JxrSUu_{a5! zfkpG5Yq51vYp( z-L!4(#;~Ado{zS485Y?#3t=|otUmsT`1~+rJf?7LER(*$`F{M73chPxAE=`n#D+e+hv~;O zjIn)edsh{eaThUfb+oMbvkgI&X&?YwCwB4mThYSsUE8K1H|jkenf1(l0(505ODeZGx(u)}VW`E5?FAQ=6Y0y(GWkqm8x8}Jh9 zOB)E6aXZ1=P7;n|4ie47X7;ZUE`@xya;6;9u0WDyw$KKf_jiAsM48xTzYPVtpQ2$E z)M*nszard&BV>Ie|lF;B*R@4f}LwyBaD9#P-G7Yv^Y>4`lH@TV^`#pV46xun%h zkNI`UQD;XTq-wW5a~(k@9k(lD0@==h8oJubMQIjY3m_f1mM8O8l&j-N!)nvlK}VYK zhR~8@7=KdRn5`ihfXAnO&}w0#r{o=9gHQwg+rlDt@dAhnji~PmyG;5F` zCtIGrvW=*gYm?~f3o83m@`y``zBo!6Xec8n(+=-55=#$_-P;qJvL&Kvwm^mh1(Z29 zX1SNe5=2h0`uV#=l>9`b%M}!0S->MuGol z;BS+OtV5}s9|B|4sJTMoFR+tVQdz99&(_~Sa2cdNQ66-`Vpq<;{q&a;MBAXYw5i3k z^+>mG?&Q=<+A5%jt2u6nL^#mInl#FM+?oB#>|nPB(@{~WG_va5zIb#-#HqPcpNs-- zTfcp=q&rpad{EexmZm4rg7wxqKKSyCLWgTJT#EXk%cyIj#&qL;k-5MPap%_XxDREF zMl~ax;|lgay#y;5_|3UTqZ4rXyye7+l|^cuU#?k!+DS*d<$)Z zzebrE#{D;OD74Ug8YI+Po5fSE1!D>8qi?h?uo>Sp>bifD-?f0V!Afg2T|3N4@r&Kx|8_p0 z)27v}h`Xb<^kc?wW&t!|GnF2WDO{Q~p?8~jBKO&m6|MMQF-6r)K9f2r6Y%pd_u_h< zvK9Q2vvc2OfxXd7$v)Ckd?worbQR&pb}7Vh?nb*gO(0g@kUc5+dSh{UewKI1B0}a~ z&nX+fIB}!Gy7foGuGMhA&1YGHfY1ih?uMYy#vDM7JA`l)g4&}l1KsWDQ*;ZqVl9+g z`O?yIFt&8J6omd?z)SI4ikVT&{T@eOxcX2LL}3;|{$`{tIjje&5#_W|mczb~60Zly zUpdTP75jhm?uhJ0wh~hhuS#NtvKTyT4958dy1i@(H&`Aw7E3pQXPU=PSqk0x7x?~Y zTnW-oS3g+MBS{>V`yX)%+$*H;Bs8u6v1Id%+hnkzcqoLZkB4wlx@H$ zgCh&ST}I9&U5Q$Xx*MKFsk{#BHRrMeB?9G^IH^N;C>yH{gQxhvzHf>QwUpythNPrwA8;+vA^co_}eMp-cCj}lG&(-fe4_|(+Mk+=QB749I zCgEY$&Xk``5$1DVI1+(f|Y*{-XVET)&9yAp+w#DY; z-o@;ZpaE~V3j;8~I^?cEsIZW)tsSRrM0to(JBw5TP+)L4s^1wgQE{t{#FV*VwErl? z&1Ov5+Hw;G9`6nQi(yl)$Oh;BDq*qTPsF#Tf6If9V;5F>9ph!I@fdB^|b`BlTb$s~U;6Ba6c~TLC zOd5DtFlH3X)$iwk?(#gU6 z;jUJuA~0;P8FhFD2@xByIE}d*$95m9-pP`az)fz(9XfSRLW$FG_G8q;;m< ze?fL{vr3Fe6PzASqE=zwKV4pB;elo%=BS6uk!0sY#L2hNj`+SLcU_kp(Bd zVO`n2qG=OB3U<^f%zY|f1{rkFh<=N3EYEa0+v|(SA;gGsYlFtkWr$YKWJBzpY*zi2 z0StvLzk{=B!H?UDkH-}48HRsY_6_BsJVn_iu(=NZ@r1xTfEoC=O4LrEiQedC24U*+AHpE*FyT22U5k?yEq_Fy@g#?i#9smC5P^r5jz0d)DrqR30PQCvtMy-)q}64=7=q`IHR-8W7gFXvy{896Va$%yVVJ}cmSuz zn~hjl7FzAX;PPDmOBrMuCJ(#8zUOqWOw|%m`gk2WHnvkT0mVnE@ek;yV@v05XgEWB zdSQpe@K}}_y{DM>{zht~>rAIMeYbq_4KAL1btF}ZO;=^lQnD~&R8XH4gAG3L#IEWH zjVsrnnKyV)ZvU>UwedK86G8UsuFju%g6gZi>43xVYKDZ4TZ76L?B=m`S{gLQ%BQNW zxtPecd*?Pkx;|Ae5gdmz6%!H(1!a`IuwJJv%*H)3wvz&;6*l_?D|ZmQ!c>DO(EaAF931Bm?SI)^HNK zLkd4B_aVpXAjoVC6^-N$>GiI3XIdT9k;~j_XISh&b6K8@$gF;IS0vc!UXJ3|s3_$G znAHtUi>Q~N+i5!b3byWO>WLi`LF2PSgNbWVCARLg(x8n*N~?A`R+1)A*UhvNta`Yz z-#|%)k4;sHWO;19H1X|f=4lCY9aSF{{%q(VA3Vm0;=X)4-n^I1kt3*Med-JWyfj|B-ThGmAk%lE?o4Je zn&MuWUNak9ToHbBu5D&dawDd^qiE0PUx#Nlw*E%D;$KC%Tw5mBWd@6}Zn;}4ZvZEP z=_l-{c{^SA5D(;REzb8ZBSq;J)f9VgM)*rq3@MyDJW6vHp0DrU?xQGPKg!~Z<^~)` z8SZ$m9Y(`L}kA={0|3{gr=N);TD>(S>z79_nEPw(&+HwA`!&)uTL zC6-18*GJg-iZq=;ktWWxE31#H^Lk&+-#_=dn15B75*jN33Vm$$s0W=aNznt)SttUMg+HfMFrRCUeUEBXutN1Kl%hOhcFcvl4p~Jwe2;plbid>1@^^PL# z77ylh?X1^W)sW^UT4#rdax|KV*rNv4Ar9$1EnHKHqgD;~KWb%K_? z{P+u#_g%HM+w`x@hGU= zhpEM#jB^I7N&YFCZs^ZNf2^zh@T08qg;QFT%;3QzDFs34A{&YTcSe8a-7))eu9fJi zC{WRf#E~w%{Jh)`bQLDD(y#UC&=N>&E4Fo?FipgN^k{o(o;0z;l{c1>x zk)}AOzq?!G;+F0IU0hklD5UhEv-<}yjct{!W=0WlU{&sRv9zml+HuNIxAzsw!mp%I z=eV0&sD0L8j-{Eh@s!>eG>%)?%ev=!Mq7Gc;1b&aeJGN)YT=_Wb|TR)GBXY zHHmG%IrX98N9j05-qbq+asCpaVOmoxcJb=#+2|sH7pRPv#ga@UP#>LvLx8vNEu4TercY=S?G-tiz z9EEdjpoz=yK=Mi-vTCU@#N_d5%!9#80 zGjFFSjtWT$B?tAleSJyTax(pQ&*0vw+btesp|?f&*l%>q1(p6QiPP0~H#lBh2YjxS zlZXzwb&Wg%0_rR*hhjxeO%%Y7;5IJHje7sN$i1Sm%nF`Ol$f56pGr9%ldt{NAZPF3{cx1a9&Lw@}rEOex{qrUh;7bnG zJ!I~md5~|UDohu%BpCr|p&uoUD{S?FwEi{qewOvLQi>j;m z><(}~DMI7slD71VM-{_8+SyIxLKiqMeF24HLNk@6=eMD{8cq4mi#Pk4Xzo^q%w}$k z()LGZBIjN9a-Fkx1mUieVG@4Z@2#f&&?JF1ZVoyiU6 zE9B<9KR~s7sJOUo^sb8EMi<~T@0$8?%k}!!M_}YLxk|}rxB}}w(L_Z?_zHNF8FxWx zz%p8Bx}UpY>B@HlY~9G6&rhoKPBuUCoqV?0Ku!`1&n1G{JVzlzfsewNJSN>w*4hdT zjG7kh{_S;NKhP{=`s)}d>azvG840h{+paxZq7**2{ThB0#hf}9Q%)2MIhbNAGVI3o`R`l*0|3>?Ag=wB)ctE9GQ+)4#(-7DtAxvRUOD(eMV?9R zIr$2HB2*_9J)MzgkZaS@rl)Lht%gZpkZ#(gyBxGr; zgaH~%sUJQ-? XRA1>bdV983NlAA7s7`26uv3V+BBKuyJ^J9Z21bmYJH9D;p4bt zMv@0=nFapwh%pEJo0#J#=V~e(wn+4viZqulWsU$0#4L9q6_F@L~Bpo&g7 z2kBuR>E$jU-1XRd^?B%jAKq%v8GcCa{fMR~vE+EUc1i81iAaaAL3G7bd0wG$)w|D> zzlgR~rZ|}Za(}Z`#i!*a_HSVibMliaz~Ie|+6hWC=Y!uJF}*+co$X|GfA2Itk&fFg z#!fL6zs236KT8ZFe8WrS1oG2gd2>uel(i3wF%xUDeuf`RD!%Im7h}B>s%~`%3k|3Q z^oxMLHFNqU%1Jo6w2@;Ub*C0fd@g@-z0OaVQ1rG|?dPb;G*OR9gPq<7M6^(ec9i)5 zIxX_7*f4v~H~;#HkCrBQu}rnE@p|xg9R{B-nT=^b-@$Ip@L(9XSav0Oepp`uq|1&4 ztM7o@It+fiO?gvR7}hUv(4=r?w35)b=;a3~B|m>K0bBf;OH>f_ajJ2YNSqiBQgm}a zwL|@FTts1=(e`=j3)UhQlFM=uiUAq%P zP|`=j;s3L=9JG+IzabnnjaMH+iSVF9BdLY7~byaoBIv=FU z`v0C)Ff8FDR*`M7%H}a?m5PBa=JIB397ab2Je41(d{e-%cvx{x+=%2M<(EC{z!&;o zI!4N#HXhiGlR&b|2sWl<%o`7Mu!>n2>>Jxd_^pnZcSZ3(M$KU4>ru>FDRWhTS0J^s zUT!=ro3MQ02021wJIR{-?k293rU$myaZL<8qZymk0elt>6pU0xLp94+NOAQ4;uqT< z>Mn=d>;37)1K-KbKS1b7*$(vcSsQKOFJ_gCOmaIYTNz#(XDeV7X1|Qn7SHdcoj?X0 zCjva$V$l9u4|YKf8*hWiO8%y?m%Fdpjej8>gSe4RhYaEQ-0j5;;K9OTwKt*m_Hk+) zBvyetY(Zvv=nLl&_B-eg0iNr{i?@Q(b1OaN-WJ+tw%@!K|2TxS^WMoUa5>+ON zg(|1*lzYkP0V<~dcMi*?#W7jwt0nyH}b6K)-K);U?zDkRE7!Pm5K6iKjw4V#{{#O5MY% zc$7w6^X1B(3-T`D9697eok?>vyWd`i2Q`KrO&Fk{^dUqgb%MT`=oda0ilHx^E~AZ& z_hi$g!1f^%Z7pWFph(?r5-4bpb#=hVEwJopJg@E^ewe?Xldt9bN<2FPF8Ji9a-Pqn z+YfjpfH-Gn&q|11fU(zvk%)uCMNlXO+jZN=<&mWPMohe~XH;1p2M6$BZ%jwmyOMZ$ zxWe4?FnHvf0pc%j=M9xnw89#bXf#C*Us`#ix_*;pAd#qpgs7 zkF|tV;sY*y<&q=$@k&O`Wk*_&_6J43d)PB1ZLdZssh!qgzAI6VUF+|7FA|iBq1RlK z);OsQ>ZonG)*T5q_T0CvQojx=Hxag!Y!7z0CxLp9TmcZWITm~@m^j=gmg985zz_`3 zIlCq9OTk~GjO{*2k32sL68W5Sq{v%~W&ymg4BKxZ-Y`auhn<@wIsKTDG#f_ zmyE`mK3Mo&a`?_2i4)hHOTP{j#Iwp@Lb9>=RK&5So)Y2#Y3!yOf>kch>@r$pEF%t5 zlxHqPDB4OxU_s&@7QS7@Izq}*uVekg^B7?JgvvA?z<9sauW0F+`>s8;7%PnvzXY5> zQ4j@Wo1!#-*lij_)}cth9Q(lWXKw_2bf<*&CnJD=Uj0!yRIs@e#ww=KFKE&=8m6@( zRr${hiWIb52BQbLUpCURkVL`%07BN&Y|5`}=U*SccG{(kVg)M56L@!QbH3U2HNKE}+? zJdrp0Lc0fU_P4Y86)zv9njhILe6W6bmsd^Y-^u{&2fyKovFtR3#Rs746X}v_PG3vS zTXD%ZRSkd3zq!M{{;DXqGN$G8kYrXB1SqKT&)N)3`t8LKTEVK=2srF`^{mxB4Ax(lx{ zgC=KE$Jw?o^&&@lIQFY(egkKdo`|$RW$@^HtSy08_o#;R{3{e-`Spt>@}q@QoE$aG z_7nE}pT5;Q@3%kF((*z{Q%!?~f@8^gRs_8WsFvr<*_uu0hvF90DL$fi>_o?!Y~g*a z)Bm8XZ*+WOWaaVu@pby79qS)+@EZR|DkmAE9Mtr{@$srm!pzq!w-+<($Mx@*Uef?t z(^2nJl7oBQ$PaElycW1oy9uJydS)1@Y=K1VwBa`$%d{B!1WST?Y6qjuE-x*+1ucL5 zHv2a0>}b&0XHe65H}vm9nsaMw*#x-nAz=Yc4Vpa=n=<-1{={xO1b(=1Qd7UULuWl5 zIv3iV5C$}L%T9zmHF40zRh99NFfN<6I2a4HT`~B^jNxQD(qHvta6XxHL%jFpd+_4R zk4IDrVirTUh^ClM^8C?Au$6z<{B;7+cnn($}XiG8=Nh>VAy0-{p;mp4C z>aKS8s^iyV+2>E6y|I4t&-4v4;&(bK=H$r0z&c86k+!2q_d7%M0zJ9Wq2H;<6Yg|D zK27>+R$uy}s@CMPoyO*CoK%zuKNn$9S##?BE``NCQWm`Y&ZW(A?7h-RTzNcF0t#kW z=vEB3F`D&xoe-MV+NL1zMaMR)EJFQ8^ZLVgFbYGWzbcp<&Dxf~DKtb0!pY722f|!! z)eN~U65wM*89cpGfxUshw%5wWUB`_ML4=TZ>%*7{lTa`9X-l7^C>3>jY9ko>X;|(= z#b+4Q+9Bh+Mpax&iTSYg-3ifnJ@z`y&@diUz3@_QYtyn1qd>ln$8d#qPAqS(!Ie76 zFQS|6G}Q)wA&A&x+JmFD#(?*n4{d*i-ih@%xau9J3=?>K?NHU+qTvXlCYI=ERGvsW zY*WD|Z-yUu>r`$ef7%C(hU&7F9R*VwJ}#w*U7K7XY_h-c99GLA=oK|hKlpqR!0swX z-kTO;&|QFMPB`XyB@ksy)akrynI0~TRvyQ|jTrZ*`!3{H!2V&Q(DVZl`W*JgtZ0Tssnt#8lP@4L8rCpuAZ z=H#gpo`Zd6ZE1$mKDVCengbGh!a-`y!Z*^{cHZpSb{?Ml+9$iwha8tWI2xoawCvbePj^exe!tr!{gGC3*Tb)NsB0Cu(*L%w~T;>!)ETH z)kOuQq#M6HxKm!Pu9|R}+hki#ZHNW!v*m$`fV+GsT0-j<$XZ0cFcop4FdcR#J*e>L zzBMj_%Bc~1szl+@_4Vri$aePr`ta$n9w$a{rXTi)%_A)|j>O+wcYhAp+bxpJ#)eHk zqRS}k&dy!(|2KD0ToGI=5<)tI)NS6jm*D0MT9pf4`N=oha8)`Ki%ySfom+sl6`zv{ zlgn>7Nj^{2Abnfq_+D;Fm$To3X80=@l42a;CZ;~ghSn3#^h+kl+SY`_xI7QdtBO3< zInL}ISZ^e*ylOp`^K^2>?y>jRF+*Itv^E@j`uHY9vbyUr zm)fI(Yp-Bt2?X@vm!5d$*yra>1|81JH&xHw8PnsY7c@4tU_c_eD7IzREeSn*c5EF# zb>*B%ln&xLoj5}HRlN(LL_rp4E8H~Q5k3@ksu|TaYlmJAro5bvqH*qg>uRo>5RzUW z?$qaXjHf$_cwF+ZTF>M2Qn3B6cVlR@i`m7Z+5v=NP4b&)T?~?!r9^4}kuHc~{v5-{ z*PjzRh1W1{sZI{hg+I2_T{p+g?e=eSUY>Z0`3y}Oy#MQx+o@d8>a|w*dLsT0;i2&bTO&*ufJ*Nxc~#c*yQ@3V7-mO>_u8zK%irvTIDTfsxlnN5;%vfwhyf?e670^foBSn0mAEI~Brr2Z%n zGPQgVZcDdBpt?+=2=@y_PjDT!^bzkTM_qXXWB6+4uWZ@_0WX$3;@&18G~vV!k*PmT zU$qbI&z2o|-|Qq8-41Vc@@K#%6a5-ij{PPbw2_wye-r7$0pI<<5Q62^Q{>YPzLYQg zwaUdvlKCNb>fwQ6k!esrOhAu+0$b*v(U#bdlBf;ccj~!8bSvruk%lfB_eV>&(3A)0 zIygnem=-cQj@~eAJ$39|;(Dv7B`_Av(|+sd=^GBqRLC>)kk10{cC%5fyY$b%aj@QZ z$5s)Z@1$yM?O8RDI`l|rD+onW0auyi=Mrbj_eDqxTx?D;3Ohk!O{H0m6HcTyk&ZjavrL=bl&s$te|B(C+p$om!qeV027f`p$x z3ZBG&GAp?u+q88%-ag@zW!@I#uzMEgk^cjolq~SEzCVM?PgQ#e!~rw*;MeNI(SFFQ z#xsx0_M;3E(8oj7yB*ZoGZH;s(W4x(4~UHM)_>B?g|yw_7xpM#Eq zl0^en$rtg6JADPO68fxO{qJK!Y3jnW3!<_JOI$tm!U&Gv1{`IYhi#BdM$E& zHh)un^-lcKNi(_p_!!2SYyR)`VsW|auwZ!raHuA4WAhfA&a}tK>q(cPWc|WYjyUE^omPw>>%KS za@@CdykP2Frcyo24EV0P!d=?d;n%dKrd!}uIq+4T%C54VL~rX7u!*&LUV+iNh<+2 zq`&pOOQ{JtC-)rDhukO!j5fTle4@OY8~6+O>>(VMOf${?ZGeR+ZDj2WxJ?#eu35gV z68XAkel+E-gG0>MPlE+o#^(1&+%EW3e%lVFumX@iTtv3J7U>hDLoM>CS{V-H* z$eprRuA1{<``q$T8GO;zmRb4F9bQr1*cNH!OHvyG0g1$4$*rW{J$ z1MqJv75T3-jaBKsmmi#rfw&2r!HgmI;2W_}`@_R(#j_}wFJ%lmLLVNZEuQc|h*Kzh zu^({pFw3?4Hgs?QO&l>qP~2Wst96C z$QzCzu9T5u?)p7Oxc)DEs=8VY-8GFCL=Bz9jq^CNsfWsdI9mh*cWy1K8O$zi^}SN` zd3GF>F<2!afRn4l+)+LZ3s=WwSf1N-#$kE_qI~T|=wK>=jLM_VBbI3-EA+xB81B08 zX=)gVNH*0t{Me?wCY3ARf1(0x9LpyhiZV9P2DDcPLcLBa%Uo@oggGID@c@H3K7i>8 ztlf%zA4M%*M3)tr=+I-74>MsG2rgt!eSW5>$;IUY)u3W3`?rfu3}bd34>Z;>-v`m@+2b zb8TPJWN5Z+fhOYW?$ToOE3bp6S+^&W$PA<0df}Qgt6qutPfSxOk4=YEz;t-Wz%a9H z|1Ao=cUR$(mSo15caHAx3IsCMl1!%om}q~L6V$$*uL1aAB+7Yebdmp%L0Zt4K@Vz< zUz$?UK=`p6@-v7*$%;2arD;4!YxrX?%kdN0r~pXrD*Ll#pOCMn9dAq~%RYTbV9rCuNR8;{tvz}6T07V% zeRcdXBr|^}(uRe1Iwkm)VrPT`wF{X7%HM9bnf_%b##Ius_?dp~0r(X27+kQfhQo5_ z!yj3{nz`+B`<2D{LDlVnr*9V9pAolAB~#kHzg-CNVOqkp|7v5*Hv1`V;vuZsP$7fX zxY~W7sdNDmd`5W)-7h@gkr($|Oz>BtHJ&|XyXgpGdYxh@J{GmqT!bphgIAqt9yXvW z6)p!)8Tlvyt$zDX5-N%6mC28I({HMo4H>*Wj<|^0f}9sZ!|kE-8_eIlIbl~b;XOZw z2wH0_X4(F{-Lw-=u7j#(n0k1Ho-mv2#Qtt*0JmX_`cwzr%x~7Rd#nyh zKL*vIY39ggstJMutPcs?^ME3$GbbBDiFJKYaKK`su}^qIl64znzn`PKSBHhouCUvL z-utQvgX=FPE^A5VB5p2Jatnigi$I3oIKJX_Vi=zUCVzTa?0(}f{;|jM=*-pdQO*1E zh&wNsnh;xW2L9?A@rdFM@t-OnzorwX)EIK=x#bo8r!L=`_~GfWS#02`aBhM>Xqcm&+E5Lb zgi`xc-_T+KFGP46sI2EjTIKuZnRds%cU^HbS`5vpw<`&0@nworj1k(Z2?Y1q9c-kt z_BxEomAiF%e|RKN{vDIi-Njf`zJvl34op0X5&>UM&9Xd<-4*;fFhb~V2DOLJwK^%U z_^A_W6}+bP;DvPf3tW({6t2JYD{&Cg{{csgp}$PL?vW<3sOhe zb=GUS=8R|gylhS^!4Lf@*4RR^R5;?I3*Ve#R?_9I*4FL#XCfL3&&lvsf=zKi_@e)y z=-oHZR$Yj5v-lN#x)#X_;Qze8=^y*Ht^D7It(7=^o1k{~>X`zRD;5KGq=3ASH2mUU_56d5z`Xlt>kkJ$<=SbqWDpQ7nvhhRQ* zA%jMq@gO~N2ExBa%Hv2_87|pPvw&5 z1A+#OVzJt5GC`*Lk?*fqHi93^V{Y(!$rc+=wG^bdhmY?s6y9O6eAI6lk#YZvkgMb0sx7fkO?Lqd$&u|ybyAST$j2{7p3E6Mvc-b97rK>r% z^k=~h@hKPaw>*T=mA6%x3t}LD(Pwhe$i_Ppe3G|ci|$FZuh=iL{`FlPo;55jC%(($ zgINWV3PyxpFYrBgGDpFMmbU7Z|C#Nv{{}!4UW-ST zpWl1?xcM2#TdBl;Sz5f+7(L|%blqa8gy`E}3GMh|hl#2)o+>~2M$EZj8E`aq?6o@E zVst%Q%O{%O(BiJ3y@s7_&4 z<8{A$U)v9qLMZX;B)|3gu{Eo(Vozb01DtDImE&?ZICa^MBky_O{HFJc7FZyvbM2Fm z*Xs|^LRZ_PPlTUXeWuljk0nH&l%?!<+E0Xq*>UaX5)9lg&f+mIk6hXr{`&46_j8i^Yd4v?FUrD%CEeQAz%QZ` zpyl^L__qLDqX+E=PMyyw^s^ztPoobqWW8SpwEkQ?r~DP2uUxCLbvEt<>2U+=Cha=k zW`6jF6A5lNGZyg&vpuNwyO7j_R=0gpq(qjA{t8sWzW8A86r5A^vgMt%)lyX0qS@wx z(CjafEy4$xHp!@j8@p{6{fd4nw3NS@v~|1`vh-u_x%(1dL~)ewnzngZE1t)0W(5B< zBpUPWn^6%#a}2WBM$g&vMAK=fb@jkjsoM*Q57vj+0m3iJ(>ymAApY1!(~eWCCqEDN z+4$T%3^srlEa8(Qn~Guqt;k2nH$ZVq4W)$A-@br83160MMXDH|Zmc!AwcUEwmeDcv z3{rWt##Q&sS$JaYq#$H6Gd`>-6Bn}`Yh0;hXH&n@L1*vbFMRCz(e97QqZr!S(elX* z3hK$dhc>^-Rrgq7{Ki;$;U>FqX6j-~n)K+GZN9P`-!BiY;I1e`_X*$P=l*g9bhh-4 z$$-OiuwvB8>20W%s}b)f+BvqS<3jMVz4b`A<2w$_%00CzU1{ELWol(nk@tUReJB^6 zQ66Jk(SN1AG3Y4}!EJ5Ae(|;QlT?*9*|P5J%MT29LAz?YJ%Wmhx3<;!uZBKaTsalK zECR|q3o5!o)ouBj$984Kyd>#PB)7u+D1qTFTMz{r z&3Q}N7{#{JoeC5G_ZxW7zS|GF2K+du;Y&CR(Rt9XY9i`b2gQ#J5jP|A7zn9ovt#yN zl}B;rUeg6CVbZ@?^|H5{K&F3Gp|~L>FfklmjH-F{+bo=Mf`3cad?$;}a3y&~YDLt| zTf0N!UELaQa&U7}{;)2~7>`cb#qZqNSYBN{7kt6hfLr&l~YyE?LeYlV~)wQa=;=U=(WYV6s-Spe0?y) zO!i;zT<-;)2o>6=ixL?P->vLb9n0gHA_|gLfQe{s7ze9S5WN}Aj`x{O6k3Z-O zp!g92_^D!z`yBJvB<-2w69h|NFU&_tLkR|M|CN1@erA@0orzmH)Dk6kFU7W@x!PZU zG?L_gAiBwRmaUyY>AT1!+VLJusOLR2eQB2UFlU#r4{HtyHfOu(^y(YObm$>A=~6Ls zVV6DRyAf6@dhY!FHpe29;TTzO3EL9QbL&y74<1lBjRkQf9H+1@2y8n99K$$#>9SQ- z_Bi={UFOYCY9$qy*(gd@hvvveV25y{Tv%kQEh0W(i~1-`TTlLrgJ;D_A^0oi z8%d0IM^5rRT)3x8d{7OppS_kJ@2g>l#m*Asp@0Q~4!w!V1zuhoptKu5_NfUlP>0S6 zghPLC-Z;36F?W!uG(d4R@W;c$Iz3GzhaPz7i0k#lYhrdrALrVqjW}yLX>8wP@DY$X zZ-RJLcVm67>2Wii!?k|wX;jc>2jF`FNMtYd9X&k8-M}Sk#n9Zn%1ks!?S5tTD|ss= zZA!X(dEnZe1CWO`;bO~^swB@?MG06xW3ThX>a)|JA-?%-*uH)T+9zE z1=YEGPLux9t_tn$82E+5KH>+M|2F*%RbsxHwp+MJL9P#pv{K47mPWmy`%!q0OMioi zW07ppvBCBU@AV8`jW^z!#F)G|JKLUq^2zq`N8k0&Cmo&eFq-HX3-B`z1j7e#J~j$u ziZ$y)SQXY3uuj?`v^^9w!_i?r67wV>U}hq#=NYZzuj&)J?BEF`3IDZz&9$*U23w3t z9~UY?Hr|=eztA_qLQ=#&_xFm6epDMc|07(W!HGWSQYBS*US)W3dA>b(pgr0R>Mp!T2stf&#<3$6%B^2GOXLjzMhImD`y6zT&`ox| zU0rDIeEO;P+UIBIZu2=4^I7u+*1jk>$F5SB$?0U)d*!hdG&^i-vPzQ+$cthx0k`2K zqXy*|2aH9V1fH_+@+HmS3NJ)OPBO^lkJc|^AeN8tatz5E;IJ54@q7QrdLs$agxRZ-Uz#)y-RdGVfz_`9~AF$i(lkd90|h`~U|`Ss-@HbMF^D0CIAZ zlm4KgWReYX&M4Gxak_A#*YSe$_dxX<|Kgq{=pince zX$#+8{Coi$TgxX};6*^`THA)LW#c%18SNA-p!}~Gm;c#YBfV{#{TKVdQP#{I$AQ!X z6tJmklJE0iNC7PvO-pV)?By7*_+vgkvd6N#_N9-7rK4rVkBXPCHx-|O@t<>`JbWZW z^q-#`gbzPTo^|DhK|uC+{Y)*fLLUOqHRCkn*y}+1+4GNjjV1jA>0`xv)VX0s zpNJ9^yf`3;jMD1_q>Xyy%XcxRuLA8~=aZL`#-B{Ej+%!nWUQ^Y7`6K2D=fvzUbo*C z=RP+7Djz%_l-nN?_LH%G+UfXjT@)mt_tVbYsLF267XUrysCX`jpKq($#_$6j(=~p{ z=J~tHvDwEY>ZQvzRk8Z7*8}f=M{JbD!ZE(PBV!y~#!#+vsf?c5PZ*dZDiYll!d|i+ zJBo=P^vLx5n+EQ{xc&mI>ELmT#Z;&ZRG`jvv_4;hnE5cEb=6!|{JamK$oS_RN_B@3 zGMX3@6p+IlwT|5MV(re}17gW`;11k&a>Cm-pt;=6p1#=r^w0n2?dafSyZ_p$qJFd; z9gFw%d)v#)OMj`0alqTAcrzP)z$@yl581?8YeMWu@^9AH{3q@~V+Z3nIbhQyyZsJU z#PnB>5q$6R4oo?_`I^79Mr?@Q0wi!;Zy`NMm- z9Ut|ZA2@#yfqdgUPbUSjOHAfd8TIi^P(#tb#jkbqTH}r3Y7w3nBH^6dB9LK21QEbB zCX-tSn(0IkqL7UY84W$jDx{$llnl$qpBm(eW7;CC^m=|r4w-R~nKbyU39O;6WsD*V zS2SPb;&z88VHkI^$#0B#$wL)NmaX_<4%g|$%TF2S)&JVD^00t%9+2l+nd6JU92+&i zxxbV*Mk$FM`_cz0i^kt-SHd{@%->d*VNFNw2Xa*AoAwMM`)GfY;|lG4jR6{LVr}=0 zZ{Kqens~~HvFGp_96IjvfJ0{HB{GF^J2%C#$y)W1_S`o?4z$=3{s1)8MJGQS-|~7r z1+PqgYn=IE3;C@$f`gy^hV4^qpeMDQE(2>hGT@6I>d_M(%KNa&>m1&ztE+Dw{`^<- z27tngqMW%Qh)FF_fFdLeJP~4AHRdE$?QTgv5=M%wq%5G|h`=X|R7*c#`cBl1Q*cdU zYGoRjuBDDCzPQ@$2Fhkqan&y>O$hp2mIoyP9s}6hK@np3#U=rQKB(=;`z&brNv>I7 z>5x;t@V9~uX;{mFwl8D|E#Z9PMeGP}}#)aM$7pvYjm!5`k zU=zeVbf=a~bsC7Di@WV!94QzlXn8CID#|mn`dJK7EPT-KSboRIXQ&uppQ}{t+hHi3RN}p!`M> z-}>jbP)|;dw&N2m0vC6-ciw$>`-i{u_qSjC#b4SUz4^v=sSVMyXHR^?8v-j@g-o$Vcc1# zty4~)6aOUEZwD6&WjtI0u-`Uj0Sxv8g}bP(r%KZ}dW_kxG_9By{sR6Wp%fs{GLno#%MadgF^Rl^4{ylABzq zV+Y6o#0wdFJPRP-{kG7culw`!;5L$7K9%iX<2823z)>sG3bX5>EBmWGEsA%~^@5-wha>@7gGoMWw`(D|9l4toMjvqCC7-zD> zB7~<5C$@D8?8^(y^yenTo`#!rL$s+f{vdNMT=LS>{L<&Of@FG5PFNXrvahb-q>gX@ zv!tG!+}+;#;7i-%Fa9_U3bkbFWC(iebS(OW91WC^Nj#ttk99;pSl^I^6^1dN(+>C~ zQuO&fcUtmk8SGU`Bx|bI5AP?5d1ORNYNmHijd4buev2#v@{9>*OGtcfs|%#N*W1Vw zR6EAGWzeG+eYEjU>#OU*-PVqtefu#!@yktbsQ2jwjvWH2D|a0{#k!zdb>mc_3bekC z4{=Q2Rvo$cTJeZYvO;y+LT3r`fC*$RNx9oHeu_*Wc@p6zTe5ROiWA&x?Im7ppMLbi z?UNsV&wDFhpt79oQ3BoPtN!+<7B6UuJU=_p-cBGy0COkhFdzYo3=5yvfA>LjWSvZV z?*#(I-STVw^0-f_a>fni*goS^K9qsJSWctkI+O?~E8URW^}ro2+;NQ8JYr1K-)PMI zQ%&)w4>+1Xc=&L8>uv4RPVW{sajc(Tu)aXAKf4h9yZaQf?vn}dclvzzQD}fy(SavU z`}W>$|BRnJ#us0}PJKUjsjDsThQIW)06P5L?Vbf%P!ljNq1U*hs(!|Wj3cd$Js-g7 z@9N^k_Uy@%?ehHG^C)|I(P}@!+yvmjg!GX{0ppv0Bd1~1Gmn|m%H>k}KYX`B%d~lu z4(nZ$F6&XE9-tnnd^Uc6Y}kC;*|gvi=g<`|eAbOVXJVuOG``_y-vJVyY6pPi-!2K{mTg&IcE>}Ssp#oE_ZMjXoy1uypZnuPAyfm8sKdFX;y{Y?19-0h|(l5R~pKOSol#0IOcpO;YF3fMi18HCT1?YV-?c}}AMHdObi9h>Wx43=g z0OPs&IA0-8>iHC2p!8Q`2K}zQwtvKZna{BSf8r;(V+TlH z%Qs$ecboeiS#8&(Ut#Ps+P}^h{lKtSu$CLZt@ze=8*M|{ZkH86#;(0L>#etWlF#$G z)G?>&TE1-@@ug*PbDggZ@i=nV$d1+ljBz^S2aNdfFex~iVP#$ysqLVwW*zy06sKdeT#wIO zkbUI7xPq0oPJFBrO2ZCo6O=rUztA?A6cx%%OPLSOe@9@5k+4l8n0=PAV_nX4+^tSF z<=P;4XQH?Bl$6EkTrYI9(DE1B_Jj34v0xxK6Nviq;y z^F#8;;?DKS(ecJl?Vg^T_$>m5zE(q@XFTP#?OeM@gT@v8BR;R$B5;Vgi46sHXSrYC zJ_F}&OJ8N(Xup+YsA-hO@fZIXSn><`2hyZf6BN|ZNei)QThj6s1 zR5p*cuT$TXS$6xpxgz+D7WuE12 zy8&!QBwq~W8VCL1hvy^|UHN%WWNzwzetso(Ad4?o9-QUatw(=3RtN9#EIjvMurjv6TlA1PXDnn7(7Gc(z|6`U)_BJ06NtnjE%)tL&SI2 zn~>0^szz~nFnQgVf~H9daKH%0)JnMlLR$4x@QM1Mi+%=+Pjr#v#KIhjutk>23@CGT zQb*IM4o5D3mgrZu&f#pktzRG+W(7L;1z;*lW~&C{1l`Nl$R_g&q(1rH61e9gfyK`mLWhdMb)vN zZ(!Lkkoq7;2V|zHL<8|``9ooz;B>7P<@6h%+$>!es2plFST=Db#}*~qY^H=qxfeQE z$O(DND_4e%J4rO=>BQmyzWT9KEi{gg{ztxmFEn_gDr-C@l(3W7-_as~QEjvSp`d{6 z9Y>EP$FF6m1i!z*_A&=Y$*>p>jEd!z7B*=3-Qg20Hr)8;KJ>?0+z5J9c{OfrZ|3g~~T+#7?Eb8VDQ4ZY+KJ@h98A`kjBd{lUNgJ$1$P_Rd>xZ};xLsSRe{ z3cwAg)4QjB#WXQE;E3umEtTYwi(ahtK~lhP7^y{xKb3{{@6y{Z#oK)jp%Q2gu#(RP z*M9R2a{N}2ry1?Pd{lGQX&_FCTVy3>PF}w`2x6a-4wUHXr!9*^fG*YeW+nrJF{xHx zKzBFeo8qfx+W7rz&%3r?-~%~*ieILOKW(_ueb+d^PfKFEAm5gNpUorkwb@fU2hqfW z{=)#|k!Q1lyuv!RcjN;PDqu=vqENnDaoMFm+a!JN6J+q=K9CIg#@hjyTkZ>_3|~td zbnp=%$w|y9iQO?mVkCsdk7a@|u8QWThCf{|0?VO{ zkBV6d%0qKjH9)zO^x*4erYXEJ>Y^ z6EWpFezSfeQjHKNm)`VervUvKfQ~VMU*s7l3#lM9Nr+Tq`D>F*iw}_r&aIuJ&7IXc zfvMl(*uTg($A0Jzck-mh^a*H)Npt<|{&RiD_ik67K)E>&6-0JR?Ytpl7A*6Sk%9dg zm+>VWp$XhR7HC&m`q96K>T%IFQKp7*;MNrIxBSsnXdX%npJM3wmvPE^{^mRHZts8j zr}$l)s6OP0bd3wvIHe={+~439pH-+#(d!84YAYCh1P#drt)KYhb|VGMA!9=4ivblo z_n+#HAM(|Iv=Lk6;|g`wH%=bxFA07DJ|e<8^as7ek-yn4Ku*=Y=Vh+bX^$x3{kDg_+|O3+6kcJM;$3DQ+2Xb<;W#}Rr{K^eh84soK>H?TsX37 zOS^LW3xBnnMjswW(hn)|sJ7`dpE}mI4FdLFkPc#gn_7OhO~l3zNlA#`_M`F0#fz(p z%kAvh)9vH$e|P)jlOJ03=@Gv_FM5SZmLgODPl5U_$8TY*6vK#dmEl_E9RMjqYb4>I zrsU(1j`zKGI`#VB{u{utM*xM6(I7yt0F+bqpN!YU3uyXUJ7~8dd`ueOq`&6)M~zR` z6}6W>UL)zwppCNfJ$D8C9=!fN9`O$itf#a|uYwB;7vx@l{f+I>TW_hNG3yH}(yINF zt&-r(Px$!QPX!|u=2FwHLA~~^>!HDM#)rMM$Nx%@fB4n?<~}rU^`Akxi+t>-0#_HE zWB!qd7lpLv*a)ojjVrnP=g*zHSTv;SziJRj4q_!Qd(F(^_(ua55U~fGcB$u=+l!}9 zH-3KX{QQO5z_V7hT*g?+GxX8twN<5{p@Es+hDSC0rpX^^MM(Bt;fjRF3Zd*Pzl%+U z936qIw?e^g7VWV)4s_q4#d#mUV@nbH-w(Gt09nVZ+cZGdr8_=O_=7id zA+Zsi)_@$lMXiRH^#+yD_k-xrW-f9L0q88!_OZTsIWj#6#!vKno$MHhCVO4kBU-CB zFpjH_jGW^a96or>VN4hwJE3)t!3Wmzkl!%rcjq2Bfn|%yI+H>tYaHXyfwLXP(H{N8 zC3WbNr`$4$b7-CXM;m;!Ggh%T?eJPZZ!l@m`bhkzezNFpXrIBMBjYC*8k;U{PGh_V z(zihQ;Qb3_Xl%sP-+n5eF`fE7CQCo#!u95b&9MpHgeqHR#^ws~djj>~=&zQwrndPe zUqi|ni_w|5dVG%KpDFneo)YE#1XPaw$lQsaK9xlW_2_YN2S8Nn>vx7VuQ}3md|J>P zx7J(wZL^xAi%RlU|Lj8$m83tssW(mcT~tl_?#rP^Y4Si1P3l=^uz`H{_&3iV!Z(0< zPMtX=Io%eSl>2y`ZU_|?uOESqIbgqBkDt5q4}9q>Sb2A$=X&O0C*Hth9#!2hrYX6@Zc^(b zPb@62_b&z^94i$)j?m$XQuJm|W5y))NBTy6C`OKxP?zu}t$?xM$g}_DFY%Ox+~m}l zqKn;zt>`x*G-^;$G~PF3f&R#~U!`n1pGQ!`XU1>FRsTUlh-_W=vnwe&PbPfy5I@Fv z;+R<4Y%X#Y572WONS~6}JIG(IClYtYM$fBOXqe?`Kjip>TE3?)(5K7?hsUa` zpN@}x-JBcMd~uqa)Q&dSO3LQG;Y)qJe&)Q2tv`Ns3hczT@*X>{1Z+1o`I0`5Zy&to zh^Nvu4EWdTaae3m^I?iJv`xzP;dvG4~#{_Ih6=8ECw`;*AZ=DfN+L0qCMV4||{!Eb({d z_Fg}++rNFUhJ%9>xgr9@)EM?Ss=7JA;~>o%G!zlc7sjagTCIxiYmY(nW!$9y(TPCH z$kv##-;ANe5jse10t2&xYP|mPMNgbc|Mhu9_Fwpn{v115$I#t-8{6hr(%TMKJKfQoUVaax>+x5 zgOGW%95jjm@;o>=Kbq%MIa2FaG9D04pER zH2dx28vc~Y0|3ABTS9+9>~6$3fcn0`0M1Xqv9vdhl$ouJP{yF1tqOwkN*<&bHG^Z2 z5c6QX>~Ps2heR?`ZwOTiO?`SMtsohQZn*{44<28uoy>GD>;W4D39t;e-$qn`?Nuy} zkS{D=G%4`0FKQ?51xy+SE-`U?P3&fF`=Q%|gcpC|uT&Rw+Nb1_;EMVhgzJ${KQRKR zDH8rnE$t(YE>?kwj6hfCNCWx_YWEj@jhN;02H;mUB`g2?z$TrMug`_9{d1!UWC!$n z2gI46Q*N6eFT8=gQsb7~#ht#ZHUNCldXc@O1>F6ag@8q3nV_Q8f7w8PJMqUt=Jr}N zA%}-WPz^+t!veoEdvbE>ba1Ri3~(hB0095=NklgEVzw$*l77u@$o(rlr4ei*=fl09xF5}Tw=~Lq( zMpYhs`#$QojNi|Ox&9C%t1RgFtmV`r6C3?RoC+ik7=5s?Uml8gwihqXw8=Q%&M%ZJ z2IT+afBcWzzxeHcxqbA}_qWGyzP`Qj@bUKG!9#6U9{3yoyjg*R9I^8Q5=8n!>FUCZ zF!75$*{Cx1d|>etco;^!(60m6sFO2amNla1D@8)b_z?~L3JzL^3(k%)FHuOK-HnJH z3v*tfZBRW2NR?4zRo$pIOS1;0@bf^!I6eY(kU@rHPg_CyO9)P|#gGxKvJF5M?ItyJ z|K-3n8?v!ZnKoigywsJ3$deilew-jTh$fpR^wc;|yiE-cg*y4CV*PZ3sw!n@S~u;= zH6LEy$!}XxceBP+AO@xly=Aa$rOe!Cqju;G9CV4>*aV53cIYBlb{W3})R5yJYV2?6 zO+UuR&SOj2hgjoUUisT*R?d^r#Stj~36aCJ+iGZ?yRcsmtYoooLff56vVCAIx?ie2 zubihF^|@z5eAyFJ>t_q7ZV>Fz2}~zt(@xcOJmox~qvUVySzg6`5fi!~rf>A$>{n2f zpo?ALqLVsLu}4$nWf4%uJTQ6F8a<$Go+qU$7ySyXMU82?z~pO)%-FRLLr3YIuvcgO zN6|a76*uk;Z2*sse3wNO7*~8c0*?#ONerDTFA8yx|Tx2`)hP&4% z`>sHH{5awT3?|zV`va8Mx}fHb_b=F^`i`|@UI|c^U<=T{$TJ<~=B~WSfhKR9^Oj%s z?zgXfLUFan(Cw{vp*Q@f^*(+VcrvGJ^F-Pv&OUGf0h6RR z>jg2@dH^rY?CXT(LJvcIOQM5Zh1tzSIkX16Z%RENK4(GfqqUfGKMS z7XHtJmcQEdS^MW(?Qs9VkN<-G@$WW(eAQDh=nKaB-MVht5m@Dx4C!L$Du+xS3U{Uw z^DoyHaQ9OCXda-sI6qV3;z{4QDL=%+T+3KcYIOyUh2z!)q6MXDYGJ{$+}utbKF2#K zg-=?jDlxy%>3x~nNPulN0SbcizE0K5A?Z=F9Q9vFNq?O})o(GHi9#V6r`cewPLw`mhj!$Zy*8Z|iJ*A!&S|=^NbUb;+Dy zTnZ$Io_#sK=3*r0f8Zmxub&a&RoB|@jgRi-_+!Y$)~PS;tDV5)S!GTAjqkkm1A|t1 zUw|Dt`uHn++tKu`bI_)?Q-`GKfLE2Riue)A!be8S-N(NgLf5iu{gnOiO_gn5BU<)~ zqs%;U$)8j8?6-zh{_ua%5A=DUN|mEym9s08aYg&8xAT4df$pDTsQ#OzEokZwgrLVo z+du6M1RZ;J;wiozYp~h-j~+J##6flcNmc7nsS=Pp%T|p)OGmHbp+#rPgccg&LAB`{ zH2$GC4GLrX9RF5gJM5F!NucHTz9crqDER3Y+FG8PRB(US+85une_~w~ZtX{}r)5Iu zs$qekL)7x2Yu{53%y@J@=|2y`JNZ`F@Bdf9w288d0%C#viLGtfn+Ndi>)-V&bj|M@ z>JcgEJs~i88jSHzXjU7vKB;ykpNlSlesLEI?Y4RVVV*;((Fb=Gg=$(p;z#7cOPkj; z)$KM(l~8H?bfut8fp(A!iQN3`RS^K_V*g+Kx!xCa_TE_sTtCn;q>t^il%kG(>Ma`~ zRVL^9AMzBpITRu%d1%>A*&?LO%-roWG7wk0_Mx#!-J&!cUPEeR%MJ zw%CgP*wQ+?4z#W0B=D9ifOhDqI?%kXdpwb*Pu(WIZytX2=l{`fscpUipmWm|Go9tZ z96`D0M7w|+NNR#2bs5WGHK`m$ky2=2QfJBFgbp!$gD2d(o37+JST&E52Ou_3R?|y< zgBkf{fyvvrXh>>^yr#)-h?cAhq}xZnm&_N7;lY*_MvXw1JU+Qd;6i4~OzQ;vOkf~H zw`Cj1^X;~PWS9im$_b}8SX3(Yw>sLMP4NS#N$G)9WJ$t1p%Yqo@i+%@qwXkv_QRE# z+r|<^u_n7^IfhgO2YG@r@dD9wq@rDu{+*gec9`$br^ppCO#_ z%(tpF7WVn~>X~d+Y2e9Ui9Eb>&o+4B5fQWwos?!`SHDo|+#gv84(c2D9Dumd!G$0$ zj&ZY|jf{W8TzW1pxlwt&{nS^!vi+0a_|5HSe)gxg`}gl{XU{LUC(oa1anS+wK_Lf*Tzq3}Wd4EcMK@+9e!bv$^G8apa)1l4RRm92@rVpQ zk|vD_grOghpT%^#|_1ejm0J3G&*~+otz$TmlxOD_q0j;m%shn z+yDEI{s#@=>+Ru#``cTO-Vv`P9BV^za$k7K(LZlt0Q|ZcWa5W5SeJ|?#mi5nK`Li_ zaws$xU>qan8Nhg?!5IW;_nL~|^1~O897`^CtM0LiF7&I6D3K&)(1TSBNor#ag~cNL z003hS1Z~ikE}BiF55gu~HWF2Kc5MGmEtJjo8dntHl(M#0KOs-ajOm{uh;GsAlh|eBhV9aaPxcqPDh3IM+U1AVUU=5yA3po3P2sL&!}v)D(~@%i$l1^XY&&La2$T( zu81|=ZP4FL(OYbDlzc8A<);2G5V^aNYJu4|A??lpR}|k>{Cw zrIR28X9=y4||D~VUjvu`Zp^kx!ah?cHtYcTD>c7aO?}Y|WbJOgk0{a7J z^`DzmN1xkCq1j1kfTE!MH!Qh}eC(%!A@4_p<|W39ywQJ)?9YViKlnU>PgQT;C3lgJ z{Zuewf}Hk)yl70`G)vl-^EZeeLu!o4mdA-|4?F44G~k1Fty>ybea_MPorO?v0tU2z zwwiY)f=ny4_EcBr=i3jy^PTOJAAY1mI%D=wBT~nwLtbr|<4Qn$4sq(%izto#B>gMJ z2HN-LlM{BZCYXSK+krp+i5?PN$KKKBKeB^5HdrvRB3H?C5(%+q$fszu*3WLD%{X!u zjK{traGO8$r09F5Unf9`7c|OfX){vyFMS9ca5hKf&UR;_6K>AV{v_2b^_IJ z9y`!1dOEppNTt{Fzi2r|+dS$3xe|I{Uu8B87tql+Vg;=EiU-7J-NKoVJqdFxeVz7_ z?VQoUe)@X5cyYG9IDg>_hnGsepke*YpHRkkl8Ufb@&h3P;s>6$OMtQoxL0(HOTaFN z#ZV(&bP5pj)q@wgb$nbwLft3G@BXk}#%1kaq#)Nubo@acDBDEIj4Fe7i;_{oNe<1T zW1O@9;}bqps-2Rh=%cUIFjm-WnVDApncH;(n~qOiMbiinz&@IJ#C!o~f7p*I_nz*; zWN2iMXEbZ3%k(Qu0DfZ|(FziK(toZBz*89>sLGhO%4&;TbOQLDwwsW)G^QMX{d=dZ z6PBIx2XwU_WG9yuF5@rj9Fgw9lH=cH1Qe1@>~Q*U2OYyp+n(7DHC5C!x2>%J|T zDG0e~w>lSTD<0ENnpc5kdi>~EfF8!J`g48(A@L-ArmUWS)t7g;ekOSt3*z;00evPe zS*yIn0h&P0A56!%82FX}VoePt*WaKeUVs@!9y?YWNG+#*K2Kdu5mSCphg@xQ?2rP( z{yP6#{Y+bYT2r9qto0Qd-vDoN3ch}!93AFlI<@-(+pe5>VojVIV?K+t*C_n4DAT>v z@tblV>)oa?(%;(4jZY{6>DT7Wb67ZhIyhoHK%;*t@*2ekj=nT#eP@b658i8tHQQF# zky}1d_B2r4R9shu1Y2l#n|QH{objWE#CQ5ec}>?VqF|V?h|pB2J+|Z&E|c zY}}OFIvTp(vaP%Por!)4?C`7qT0@u9ocN{>ROb9)5OyJX=7q}4_3jVnq@u0skE0GS zRvdS9fGA!AbV@DH`(I_Qi-(N<#aE8;U2Z;q>W`SK$~S2B>G5i^E8-}<@{zh~$=wK$ zlcWf|d~=(x2S$cC(w4zu`o8XE5RM2MiC)YsUce^Xz3?aQY4jIP*f+8DCy7v_rRa1# z7V+ATwoN+n8`e4I*+OU#&-xEd@i07m&&I}tH+u})vyY##R?hK9Jb+nfB7Rh;gV@O% zR5xwD0o?k0eOe_SXSr5Mh>mh|jq6bCwYXt=c$DjYpFH_w`^I1Wm0w~0=*>5`<734~ zvbf31TibY2;Lx&ko<`izX*<}v@YA?NPpQ@)oMG*83~W!#3=muBg7I2oKU43u%C6ji zMz)?x?vEb@+KTkwNX$R!e90Hlkmq#?*`4vhxnxSVwfYa>`ipa!)#u3`1IU(b{Ol7t z9ebM^|1w_=w2v1?eRgzmyd56rZE!DjZhZ9G-R;4fZ*5=t(vNTVUwcjcbFrPDow=WQ z0GJ0kxSoD_d6uao*}fr8Ur9GZg1UXM;`r?$XC(SXr2Ug0b`{F&O|>xx_;Q=yAnS*| z#a+Dq(xzg@gA0CZgj-R=*j7Q}R05;Km_fH1@R|NK2SyVm=2S`OAUl3#yDHKWnPkit zXo#R3Jq%=5QB1108r1rq_(3*ahcXv*;Hi3^d zb05H(9R1WA`k@2t@LK0{7{(2HBcGgeOn+fWe&-$l9LSLm3INwXah3A|bn=h^kZax7 zdEAfquDAE!|6u#4zwsN}SHJf6{M$$8 z=V#kTAN^pvzSKe@{TCN!+u4gV(Vj~@i(Xz&a(r@_#Z7)?(P%qZyol4%e}?44)&+vv z$FKd@9gB{52lylqRn6*6R2E_&zyI_(u-J#)$U9M-j!*cuKCk@0+@5GN_~PujXb!fs z^XJ?D@JIiz?RS3XUpZE9J$|&k|L*(S8?V2i4dop_AmArU{b(W^^h@!aOK#%Ag(dY$ zyj9#ByFMWMn5lz^YA^37-r|QAL^$x@)j*lf!HRarokn8Bg)H-{s1Hi471TMl`S*wn z$ixPa2$X*44j6Zc!ya)%uYVup;1a3%?31O#s!t6x+Pr?!=9OQxk^+;tccI5bW#UCw zXxp#u8))ojLHBj@cALXmn;gJs{L+}QKKuo>*foX%Ap297KU7EmK`%b!+nA8_anKuD z=zRQPv!QrNP=51Dyq+`QMCW#GGSnoF@L8bhoMc*c@a-SD-x5FzCtk+k6TX&jhV=p_ z)M~@dAO~IA0*By~648158}SisZ?Ixh^z}Lfbbqv_rh&eBb|LNXbsKSC=Q*&?JL&_q z+s-T8sd-5-w7ib+E=>6)qxKI|fov3GV@S8jY4TivRkhjCtMYsk0NH3HeYC`gi11@g?Jz>d+Tz7jMjhZA-B5hD+1vIta09?} z1buWOG)*_@QNjFcSV+=;+vU>Hfnvw|Bqzh>DwEZuYz1G#Q!#`?W_9d!ryZL@;8RIXU{GcixevVJlM03pP)%$H~&%X zzoGvXcD@pR^!V+J#|ADE8g}*On5VT${mQrrE^EU8#$t^hH+WoWJ@(wmI_Qf;I#IiL z@qGKicfY+o{p1tvr??T%o`91?PRMIc0p9DBk{VlOkv|!wMQPx*;$Cm64uri<>b=j6 zJw4BC7k{+%1yE>2PtL!Sg|c3guK-Q0POt307BB?Go5^XTmC#rFK^Gp*$peyApU2QHL}*JH(>L#28w zRXj!LCgg{jO6s!LH`k#9P3-N5nystl^ao>G@@S(v;JECDIy6j6_Qrj_=qaq`JkZng zv88bSN0}oS@zR(UK|fT455vEePnqYN;+ONfKpq%CH-16e%tqU9^HS$ieFTs9;HW0h z78gB}ZS>GOx^D&B&&Be>9W{R)`-;-+yncp9d>&U5@5WZ0<+b~LOqx;*#q zyX&XNFn~WP8K7C^OnAldcjV#4?p^+r8`W($=>pTQQ(tn1I->@?wgAAWBRkAG7 zoFIf z{QwSQzEQ&-XMr&%)S82%O2i92ef?vx&$?6}bMY3CjKq^QU39%(iY;Ra-|{#7vu^#X zZdQLpzsEtzq!*!4PnN7Q1^$+%BH;GgFto>CYFq?wsa052J5#y5FV!&Z)1DLx(bGVkhF$kA##w zF+e}0vBN+t$c%UK7U+gOf{NK^L9T^Hhids8wrKoOX8-I4qm_2&^(+Ng58Up4Ar3X( zg&(}lz;=KRx_3hoa-`!q&?XF|W zTXxyk@&!k3^t%Hk7a3fay?V({1n_W%>Rbb=u?Y>jNd8v3ofVG0tGF;$+|RCOEPHI> zD(yTxs)8`}P>f{L2X?9WOw2d{8M!7&QCJ>{p0iY{0hb zEB4Q^GYXW*ASwOywe5@nC0}y>dW448i8}k;#8Sp+u8|>&wu}d2 z3$@39bn$S23W#@%LoKNDcXY&$LSvut?OOm`QtH=Nq3b<)f60w6z$TyZA@P<_9`YHn zEm?^V2tH~*&mVc&5E$N?e(AT=)Aa@bKy2MnGwwU(00<@1 zNf|sClt$yEZStwS3)GW1bsaac!~m9)N)Ntbvp6OUGJ_d3jq!=HNhk!jLI*5RG}L#T z>+*`b1;&K(3Y2m~b=0oUU9Q}lAeFfRZ~ZH6z!=-d;0l?)aSAyLw)wPy@}>%6Ace$n z4z2;M#*0=Zv!DYdZdEYF+hst3c9nqJbmiV;K{u2ORIZKEple!i4e&G{x@dHJ6RpGr z?qEUF!Dky(#V)5R6B7WQZqxs$3qldyzKG#?6yFVir<35b`cLw4c?FY`SG@Zhi@5;8 z;iRF*=p3tBe#D@}1O}n;l?6|A7AT3l)IsyuPZnvyYe=4Jp*cUl*k0Gc^S}Dd-`sxX zzxYSnqu1Y*h?m=wCr|vEd%rSSi`2yxH?Fl1V?-^4SuCjQ!~S$aeHO5`bM+n6Tri=n zh61ygzSHi48hOwuSJ2@1Ny82g5c}*eeF!;ZWv>wplG8U%p(S=a#YZ~4w{2%?n;nPCVc{fTzKQN zTJ)KIM4{!1FB=X4;$;^#KX_tWNHi$-`X4DKazS!DKPXS1j3~dc8}{%((rI2-BMWd0E4i*@OH^NVGQLO5g=-&W{IGoBjG+u|#&+;i-wGKkGv^z4 zvR*0QU@O)kf9h#SIewF;|FoSMYipo+R^AEIE_g+w__Z3^C^IB$QaRZlHGpoZ!uZq4 zebSI}x@9*Gc`&l;Q@>>l$XH`$Gsm+TC%(I~!lML^@gAhA#D})kCh(IV`6-fna=C0w zoOiwHlc&6&-{D)tDiROV70(J&AJBF&hZt9E#tT((Bus&>v+gAa<6McgpC_V^PL8$< z&99@Ad)s>-{KWR=2Onzqb74?=k56)|La)ko0eQmhcE?ATc5{wVVp-dXsUli+Nu*xw8B2lq z+w<9(>OGmmbCZ6rz862rpQZmMl>Z+0K;n{{as|~F-6bLI^|fz+s*!ebRHIJ>{8l03 z@9C#M*narE?`-GnyN;2qq?2Z!&^sXn5&7#d*k9W4L4W-Fb%Qle`3W>X{6o2fU^?rE z%gnKgi*wo6v507N6EhI{7q9&!ryel_dY)j733=ebx|)!jn}VXCUBo$NnaOP_fCE}# zVCchtk%n@&-vjU{jSE$2_iR2Cqx5nSfTJ$?z5B0ikKcNGJ9+Ij$;M|M++Y)w3qkl5 z31!P-(+#lSl{`*}Mvj}WhHmRTe2E|MYTkA1q9XlBaL~VhoKHUdxB+{5_U9kD`S<8< zK+!;Jdz!xW?&+tx?&C{u0336%ZJ0)AHGQ9+^_uG{9*=s{A*0IG6C4{VG3?W#u;&idb^i`XOYuWL@!;0_ zh|A(v8ba50n8x*vL(@(hgjr}R7WxC4FFY%Bo&6(p?Puu8SDu^vw3D+7rXW7WqN!T% zw7Xl9Pj_+wx#O`y@JYyztj^Ux8o=DRo3dh4)9(X(GvRO3XFV`zoY3C3bPlZe3D{Rf zK_5ptp0~*Xs0M|dlZNro<%WH|?L#2_7db5#+SS)?n=(RA8@SM0zha@O;C#}TjS;TU zi`L_obkom-lNg9oDRSK`Xq$3wZTmIm+s>w2<2RCHQ?k5fl8~cpKoM z?bOlFnzIvxTOctc4qZJPufn6mF!ml=qW2NUiV!v=G6!> zNyej&ofujWN|NF#`yxVh+fAQGhEvJ#bv=pVm=0LJWl9$`qSdT4FD}|1#T2a1-=P<8 zj;%8x)0?BRDS9I>I6l>Xz7`X_O)wE{=s~v1%KKP2uI;MqYiUCBO>v}}*e%I3=HYRc zaO)$n!ye{;ZmK0@I?qR?=)KSJJYkHL{VMZ`4C2N0LquO+TyM{wU2I?f`k!y#|NeKj z_dfVwyZ_+cww>yHfw_8eyj}29f|AWX;N>OjwDvBl@C7vcN^lLay@6VpFOWWu6>Jx_ z_LA#(za3ga=kecf5CM>*SaqBla&1?XLw?7Uz7XyudpFtCyfMwNFKZWwXXIvFbc$W^ zD?05bAY6tM?a&_j+tBQRI(GCHbC?*j|MZx?dbu5)p5~_k-hXd<^U<5z<@MF}@zW>U z(*aRuE^i{-{LO9O02Lo^3+SH_%h)!lbsQh_&GGB;W@P49Od}>rLeZyL_i4jE zY_%G%?Rof#F{Uvk9TnLQ8U=y~Hkhn@UH_6PKnWx*$ zIevc6Gsc{Ay?gJxDKqw-@0@eY(a&bh)x7JC&wK-3^sdu&tLE}0&$h3o5| zc>ptgkhp_ufDRP4(ckf%&#a01_neeO@yBN3#KRTH6^J+I0E$3$zX8$BUOzrW_M6M> zahU^ttA=QZOcYCvA66g$kqWf7;*qhprf$!d42esupaSz~CxpPk)zpU{+cu9?ohluRA~ z`15%KzyJkQ#J*Vl4Zs0qU<~pYKt9$SL1ZS$0-gUS)Z7SgyHe_WLf3Xs34QR+3~u*A z7D!v^_kfyU8=A$rBpJDae&@}p>KXtEhRU&{2Xgxg*k9YRq!B-r=|lo#mW9T20!-iG)c1~|sOta( zZ*CN7z%sGhj&zQ}#iI8Uu(qB4&ZYpE@{%kE8uD(l?Q;SRy--mGO16376NIkt)XB{E zbwF$AlS`K6)CD}67hMfKwz`>u7eAxv*IH-=^bg_8xGWjW6IFQ~(K=3hQQ%2O)||(W zpLoGKdvK-&=5o7#dVl++U-*UX=YR1ReFK}d@P}XgaC`akmA}yryuLX1>sg2?2Q>ed zi|Df8vPiI4@U&1Zy7D&%64_Ly^a;Au*5D0Lr*K>8U3M2sSB zqtL+QysVeRuj;u&?Q#hgpoRWQ9g`It@lhZN@Lj5Xu%o|hsL+a= z%ZAACtMEnZ@eYk}ozELs^Wf_?E2Y9bWxT*!x#g-2y;pSsXvBOTXuZDDX8S-?tQ^jw zVS{P4dBvElEjwncAO+X<0ruO9lT+FsbA>Ctj623$bdnh3xpB(aNLeVD5_R-_vB|P0h*nAJTsXWBEy%GNI`n6}qf=)fw$TqZV5Rm(OIQr{OR_+}!FS~*ksfM^*`CQf zZHEB4&~g6aK82HZV&i$AH3Kl(&)_jQ<5jza?LN16sy}{XQAH7V(Ciz5^TP&sG%8qW>m)8>Y_6#b=yI z34uApH+j<+9k=G=8}QH^rkf!A$xDMW)=$bx-tRTAGrKU7D5>}>$YxNM-! zbj$w5b>e3}WVDX2ff4WO>t*~j@1YFqsl9E#0RVlblRVlP=gAYZhL&X+hVmoQOdA?_ zLl>6tP5YF4u4(;}4a|eFZg^!3@6fZ1D}RnQ&VyrP=40gh*dQb^X`4Hb4UyBR%o8f{ z$L9#$D2R@ODtY*)JkWXCn{Lfh4&CeWmAImGlz?-g4>Bq9lo%tA@#wLyap7Y$6cRM%gTQ6b(8#Xr4}@b4lz+b7t#gHw25=+5@y z#mnvA{o8-*Yv>Q}-^beRtcGzkGHdy{*zA$A{-iHiZ@^&%$0e(mt zY!Ffe(n%?HrEeLFd02s@aVutDgUJNzJ7RAuMA-UGiF;5&@6-Dnzl9vyFAF4pUszGbNtb)I2gnq8I3y3}Q#B+r| z(X`E>i;p>GW4CsqcRAlCE+SI@BbT{F0-$fdJd_!4;|l~Oe2Gu&4lqXMI1#u%kQY0c zzlcRnd}bh;vUo%*87+E zH>0m_{x__$q7PkFkt1}@HRy@ZX$NMk6zD;6Bf#HngN7U%7|?3R1Upj+AjcOE&dR+Y zF0`%)?6`$E(L%dzs`&8wV>kFIJ~~;Twmh*zkq=^R*{G^T zmZ#fJv{-O+{!|+kEmhkqP5z7Xi|x<;)Sub@>RY$W*{AwM<3}0Y{ zS3Q=!kPaH68xOnyWiJ3FeNW~xjkpSkU(wJXdez3*t{BEf+7jF7u2><%yx^cix2Vx8 z@J1OPjk}ys(=J-Cp`}X#ax*|UHlZT%=GW?3JEI?<#HZzRV5&L88aEC~l5NZAckJSu z9q9JIBoH4cWLWoqe8~-fY`g_Mj=8Z2vp{5)82|7=88VQJXxl54I)oeR{w-RSTfgTa zeyYd@Ef02BSn}F5VLAVKHI#Ds8r$M0^~7!ui12tthxGb&J;vjw8jB}=NLe`7l~@~0 zLwssJP?Z4YA>$L^%>@Qk!63zw;CC0y`5!%PyJR4rej+~rZ$bpv5`Bt-HiMAfVZx}H z$b}P)$@z1=#{wAoQ-0V*aYQzp;ikVFk0r-h?;A{ljOaktO)z69?%_|#a%x!16X2PA zk=MFOvn@JSn6`-yJ6uEN^s{MbJhXGw3(%f~uh2$+=H4E9?6u6!qj$-DEJauR${bVK zjm=XzdiwZhoKiQ{jku_>AbnbJ>IQOrk1k7L@Ot#`ZS|DYJoDP5aY~>2H{E|nfX zeQ*2TkN@%Q?!zZymprZ479$;&gC89Cy*5OjRodXBbUUsU@FfOI2KLV}JE5d6 zwAZ2W(Y+_zE5B8a=e_X5*c*%nU=@vHA)3fi*(YG&O17U!PQS%BGgfnzQ>i@tH&ek) z!MhO>zr9>wE#Cwy{eQfE`&(-nko`np7xrFn3mix3$~W;7OUGFAg3-k96<)r2zJ315 z?`a?Qh1#xs{9r7e9OvPeSjw1Yt)+}^HaxU+GKCWJyY#BgPY48b6vJna2gZ7i1^E-s za@p|#`NU59X+F^UNRt2rvOq*f-sqW+rAN$4bGKDe;k0D@H-qZ8iHwlDde zcHX3MxxIMFH-2=k^MVHewAa@kXB(Ki@3N{O1HUjIT_ry?koXR-bPA0;%=0(HN@iHo zo(kp2x#9+-o3R;`tig836c_g{$0KF-)f3!8kP`X&@Nu?M>mJ4#s=VX zANC#uuXH#@u?S4D)OXXVrX}wOjE(CbC;lU z+&VR^l{cQRK-ASxc<^HOx`SWOcHDMB5L3`;$vV5 z!X__5$i_y#^_RI|n=FemwuE+f9mw{_=8hS#?=zQ#OWkrbZY;l!dn-NV*0F<{uhnydYZBH&)=tL|95%196w+&E;F|r0 z!c~8VADI1~gb%V6+18VKbf-P*y7p9%40)X*kK9Y6i&}52PHkKa1AOdAaqWZf3WRbD zu~Wh6V_+#&S;_qeir5yPV8|$N6Eeg;uJs5kvdXr>aW=8*!9fpUjSKdi+V_HS*@f0o zIV{NxL$>3J|0-8s`ZeD)p^ra~gJ$Am1rcky*gVP7E(xYILG0tFQYm)koRZ}70HKPb zZ>f09m{JXl(XLWgu8Q|;uH&=*9BUG`Ld5t_YwYqGYT&-kA{QDSbvq7U@t!9C^bBi0puKl|e1YP)#-T6JvVX5x__9RAJU_^s`S zpMSR9zyDx+@bI3*X+|ICn0<6~D$$b7H*9$*VCcY>D_^fp-?1J6c+dP*s+e${4}VgZ zPmXEl__KiIx8IHffG?Uii6g!eLlxa&OWfp>S-^v8ya77oqIXS;ewAHqmTs%qE)7Dd zOq`k>TH8~Ax`Ml$qpB@mtjRjkK zSFg8=tE=sW@>efjZ^!Bz@7Kg%L8-n0Vz>K8iE>n@ZVKu`cAd8MlXFLWp$iEwz03IZ zo0P>Hds*wG)BOgKxZMYlVx86I`!FK$n~5n1jq0dJpzH)sO^iOzW!~$f_qka^6i0N7 z4v60k8KVex&1KuIGSjxN6T*=_uc+h;I1&qrV2*HG&7zZkK$`Ub|=fj2+LX% za^NAJ@|ZbRHg}#I;i8ay=49)&pN_H4XMDpw^!*S5H`o0|PVuxX{Tmu5X5;xuY(HFL zlhCJLVNDj`vmbO2UPH5F;g@)s&oYi{4y-R2=ATo5QM&p~2BLCobM6wI*voZ#f9b1k z`a{p#1Be6qO($XffHL?IPs?a~7<=>&I?_h+!x)MY4+;oAKKiLY{h#^`05v8Zzm$U* zLj5`0JNeb=;43*|C{xW(pC^KmS>+Xc!9jSZ!U2P{^zUFN2VKZ%y0ozn?Wm{^lC=1f zk#qp@ImB)MfYlziE$s{dZ~O$1(Jov>g^mkPDHUIDd?B$sHbXJ>^0DCoyc792Jlr}+F113!W}I7qvi%W8hp^2bf#E% z4K7c*U81vZz^@y^02d-@$~Ye2!D0&q)nZ(am*BklZxox&~awd&iGAy z)6YQajN{nhH_Oy`Ko0DXGJeqEgSBMmn@z~kSf^i09{TL5kDC*`!7p2Eq2}R?CruF$ zd$NH-R%n3G0p85mDgZq5tzsZFV+U+xAP=(mXF9Dj5ZG4Ltw(L+Gev?hHFaFk_Z}13ghvEu-&qy)GCI?dPVA2KTUg@+qq>OagIsCPV5ZaQ##{i_VGJ5|=F%MGj<&h5hj zs_yCA&p9Ee%LOOzik(qksk2U-xt?CCIwtj9u~%vE(7@2X-xbB810lHDFKa}P&C)@hqznl4)ot7 zCs6oX?43}097oE{*ZtS>*Rip{HThk?KK~3q(w-j$2l{V?Tj>6P`iH*^D2j}sf?X&M zilu2YNd4kbsN& zu+3UOvwx*nLEK)M!0NQ_G#0Ogb(W^hu$H%AJlV?x2M^B$_cTdkmB)PI9tHOWm z_(jG7xA2XS4Ep5j@h^~*eEU`qKj-l;(D~l}^attY@(w&`(yz3*m5^KX0j)te4${>m ziTWU^vh?K&zW8N%__?>iW6f0&`25_0EYDvk5U=-)M&)6S^XIR&tILb+<%^eopqH5V z_wJ!_>|1`uE&GXUZdZKme#JX)04M^$jf@fSd{!LMgBsPnzpV9hunOmP_B;4ga}r+f zeU*EkgG?Or!!kW@T=a2@KLC0i#d?@qE=_$ww7=bzu|Yn|03}P(M~{W;Af#hJyWGtfQ#prydLl!zU@7b>9UAGA(Ao|4cV z{cS_#HQBD8Kg>RFKn?)D<*smZeUAg0cTb=4p$sV-cC2mJ=!Rd1Z8ytZeM@=LB~b=5 z=i@4)%kU;1xh~am*M0lS+iGVHzxs!tfU}&;Q~DK|(6x@xa}K=PRJQO1ilPBP9nReq z_BM$|t83#d-}=)ItZjg4s@-GM@uN)%d2Sb;p^Pl87b9mJYi(>C2Lg|8j&qiC9gxhq zy5|}4i;Uqr^{)^bV{H-=0+g@cEQr4?CD&~OYaFZzI_#)=vK-f4G=1o*t0F&P}z2iOOS zAyHEV$Jac-XkgD`2_~|eY2!1tHzo&9mHohKWbOP?(GRRRM$lrcWJGkqB2Uo#&1?|zFMO{}zK)9+*~k26PC(~t>35F(WiQUnPESs@FTVJ4 z`>o&l4V}Zi*q*)jp85d^*;l-|)Un5gB3tIQeQNhLHrpYyl!F7}W1Ulp-g8^cMt?0d z!9Jho59mWTIDHWB9i(juoPct#yOBumJ7WBWxEiz>Gp22fe(CgKOVtsXWvGD~ge-TpW0;~}y_wR4dKKO9^)_d=74<6j}_5Ca1 zc{tFp4eyAJZEu2W31x? zg})5uAZ09MzBA71rZjWH0>YDN6}jjxt!}p*5V9o?6$twL9eShmQ6bDa7zrb;fCpdn z@dZ+#eaBiH@lXLpa}Zg{Ty46b9HrY zJl7*RC*$0w{V*Llc-w`YeLt=J39WSyt1HC$DnHfwGBBVrQI_%HFnJ$^L-CU+9ogfoI^X@YV}R z^yNky%bswpV*#sZa9@e%?xlE97Tqc&VJ|XDr){dbNgCetN`xPXkhGOtLPOczg`8JayA!+s230H4Ri zethxjxekWszUj~6!Yk`D>CqQEkg?t26*}Oh%XZL?9Z-PB{#r0-cV7iu*4Xies>V%j zs0ycy(se-Ql+k*}I7T=1qVbrdE`f_`1NuFFiEQyC&w?0T0I|HdX@aXJF+=I(wD?5 zdV36J*ka$zUDgK&H*Xf*>3mbQ)=&D_7X)$Cj4pbEOBHgqn@H`WQf?06 zJ9+HroBA?Ldp)Bp(hjcHzI<7hq5aCE6;q#x5RjqiJ9WquadPk>pleZ3okM3 z9eSJuK@3%Hwx|_I2k{hbe!4@nYH_y>pKZ*N0Ojr%#o8>;pey}1Y^Og>p14Jxv@Efo zP;n#4fQ>>SUKWgr^EslwrO7EfmA%~c^&Y>Is zLw}Rcds)?gr4RnJ;}5t>#woWv5&%Bq%Uy30`iA*!@0=J*0qT`YUQClfRXpMJ7^{@Ewn<;A(zb@Xy_-DQryoUa&PHx#}8)Y|N0rCn0)ton?~o&1DTZk_@< zp3AQ6!vy3^G|8Tjvge;CE(PT}mRqFc^?4g@Go0y{9Fq}Q=U$#$;pESIU;UHFCQxf( znll_Dr8n~)pU68UHi5{Vyn(cHyq7F=j=fWI)*$v-rziKeXYW1pT5(r3CttC!owB@gBDMh$2~!(NeH0RJ|%%v_IyJ+5qS_n(OsWyZToZ+SBc7h^P+q4R!F zZG96T@Saigm7QPaZpOZ93L|lqa`yDD!$b-1^;xny9vp{|XE_LG??{VDgcF{1pzGw= zqRif1(b)tZm3) zK0+rvuV)pT?V!EJ40U9)hB(R|DCC@*=asHWXbeohg1mb--xPu-Yj#retZ|^6z4Z$C zn;f6AmXfB@_;VeCM;HD`uf`Aa&AFO7=C|<&^6jf>2jeXI+xG0=BIhv8F)P}#9rTdb z>jllm1N3S7wfm3R639C0K4O5XSU+Ok9c9=b_HgGT5e2khz|>x>ol z3d(F^`mX8hr)^y76uHK6r19tZkB?c?Aj`E>LEAyDlxuCG^z{)XbbH@PJF!fAL%1^r zY#TC+6BrS_Ga*x%jZj&0c;OJBBPC?Aw#CNM8zuTvTrrDvk~YK+F;qGioo(($IvqA- zdjL3D*WvuLZOD+0u`gh3sP1u9v?SKXiY0i*idgz5JZMwhe=?e5F8zS~!4G5PSq(DS zC)f4k#J&2@HcdW#j=FC+VJr5ae_19Iv$ zz#}nS!_Cbue2X2v;iMcAUvCpHZ*nC}?3obd8%Ta5qiEfi_Qo=W5^uiD4$TfhDN?f%)r z?eUXG{_=t4FlH3{H;N;1hM8L5^z%HxHO3e|vRS3?7<<;kF*p8FbG!|U?M`JsS<%>i z(@{z0ZoIcOT|b-<`tcLc0>6&lBC-Z~uPa*Y@U;)M`It^zmbk3| zuGi)qCq*cG+kfZy03E&yHr85#PWMG$ z?{%{u$=V5S%1d0i(eJv~SMeS3t$J+9dO!e(i~B|_owyHkMeHOaILa_YFF;A!x3`!0 zA>GIF6`Xv()EeYHpKVRw@Bq?<&V|8yjh8*XrzaV-`)vpM8|Ms;k)mYV?f=I|KlNw+ zT;2dcRnR8Q3rd{_e3M@CJkAVKq4Js>2@Bct0*LJdc=Cy^#Z8V{ASzA#A>7u|5-0S4 z*uiupq6W$B8duPcc6~Z$k~u!b$3_d*P8#se%2qnnh*Xu+!8d@J0+43% zw@!TyKT8mHf>LnZ;sFV%+G`PSSlb6cFnqotc|+7ZLfjN*C&rI_rhS)ckd1*tsy2WF ziWfu5$xOpX)7%8(K-AFju6`?FV?>tv#E`sG8n8R#v*&>(9kC+*JBU0cU^oS$D=miL zN2~Sv=)Wg3QSY3lu2}N&qrwNWFksjG$#S)OvaY+}bd} zqQ->*KstR;ya}L*E!c!v*ph{sSGipJL5TQqXS+CmvwflX{K~KVjqTU|+5fOTdU&>d zq}V-q{O)#odL|y}5-*E2qlnzz$CYd${@YNVt=eNajeLOfymp_c>=>rZ#@L7WjY*?@?x;vF` zp)&Z5J>=HkG*;&RH$BRsu`HUT&m^K@%z6X`^6*sChMzpp_1f5Br>a~~2J4ICP*olu zCb!%%gx7^20F}$k0k%)6p&5Y9io zR-ea#Qd`re0Fe^&MKOuK*cJlH$nE2xXOVHli!j;+$!9GOPbZOB9%T`sJB%QUqM>Tu zODK0=gf3&0x{_m9Rpk_Y@u`l5=sUZ2e|zWIyW72o54XEqF!G$uvGw+GiokCIJ%CEi zH<*9++5S+?c$w`j?@<4t{M+D=?`yy|U{9*9V-Njyn;yoNn~I4F#f3bsE#nGcJU7eo z8Gpl!9ljpm{?nT6@rOOMYemqy{z~UHyaC{~<`qA$;hEt5iX~J6Dwj}y3m+K+)dl@~ zjFOX#aNRd`#pCteb20w#>EpIgUNUU2D%ik!Qs*t*y>^?6jn&W8IRg58SI}<=z-PQ* zKY~17k7`)j8yqL)nQQ4Q|JEs_`d}{w0kU&$!UZg|4j3`8HL?vEqOk~gt37-xb9{5F zZI-=YO`p^};3CE!RgNrZy4{Hjo<-NtHZYF^O-~)af>Usm6JO36KgNgfr2hchu0Cq{xYT8@IUcvJ7VIj9W{qqBc^Uy~$az(v1>icZ^^{X@ zpxkTPO26XbZ^M+80i?{ayKeIe$a9~Pg`)wIhGQi=w;j_j~|oPnE)J#cJ>hP(cZDj z&7FoGtDQp&c+EP1)sHoPEB-pf#y#&oZVfwG_E~%^=v4Jb3(e@xVGAcItin{TdRF`z z|L_l`ty@*(f8zRW*E4p3G*XAnHG0}cS!3}e1FZPD#~2HL+) z`edE6q7o*wyjDGNZaXu+ToeKEoMV2De*vnytZd)`_?N2dCervbB49t^)3LKjtla!q z2ahU)sSDth;kRQ6jmB0=mIq{Q7fl_T`}fTOQ24aRkBtBueT3vj;qtg7Uu zRJxS!;>SD!*{S9c$1Pv?V@?{QPak*1=#7SQs#PSnNPWakM}|T^u@{+DA5eW8AXnXP zHJ>vBbU6;8#M<@*bhwoJ;Y{m7ihSmfP%TOZbb^e;ZQbFpWf{wK% z@eWsm_p*(3o37FRp=@{Z&1|7WgYGf*!Icfz z2&BeXqEtz7C2SZ*ewTfu*Eh))!s{J$_^GjW#RCBH#dy;(c$Fa+dd8EW_qEuYu&~#l zjE%US>_7p#xSK&;mxO*w!?@`3>Hym)`<; z>fDTimJ2I<ItLiVnU~jlXelGqfJXP#x-xjhicult)Y}p%mC};+y`O=7aBwa{FFY3MORnj2z+c(w__ay+1T(~ zMf{!-4-W8~I5_}Gi{xLtxyZwa_*!NGzVN0$|2Cn<>zlVa$nq^85pfXq!K0EF6CQXN zJ~?4@rFcTi@CqK7Op&ocgAbFTx!iyX?hXDGR5f=GakZy`xWf3&Wf3 z^6ES^Qsh??DQDy2jiq$k05(pwLFd5RHkq%+NXCz1sho8}RN;e|*iiLHM-ZFbQy%+S zFNrcuvV}gAJ@AisN?xr*;+7ufyf$!n=DKBy2U3hjmv|jhiF+#_tbqdyBF%#i7$874 zvSh#Q1|^M3_=+9<_-9`4~smLeB<2J+yelrp5q#|{z*Xa(yD%t4-UYBlDu zO~YP89m#>S5uu2!> zj0*EYxt|(jT*$c?8MiR5Id-j2|j5qq+)&w#hbn+rTtr(HdJb;Rr zTL-#`MSSykD#!yLqP9qFcIru8-(WIbHbAl03`)vvRCELOLEw2{@&3?f%?VXui4AUw zpXz^~0Ji~w z$F7$m$wFsjARfQsn~%9j_1-1@HzSN#Uqg`_I zx;Vy$R4Dg1{7WxKSgUJ)dyIk;Z@MJ$M{=fWRxT z4?x#BH^3qw@=Z?Jv`yc7%HLhR%S-MeAINSwq(j=T0mqcsF-BDG4#ckR*WnN3@gtH; zmmffZ-bevBvKq&$`30#qvI%5|{@(J93X8@r=9ffQ|8T+Y{PkiX zPPD=5nA!#8MWoHbO}nJO91~0&P`T%F>iDB}#s%7sIF3-olJ*~3##-#+-#7n7$a$r9 zJf;G2A(cH1x>dGpK9x%^Z}7@|%09+c#0lG-H+aPcci|QR*dw6A1c7!lI6D#*f$}p5MRnyr|6{%gKt|Z5ohA) zd4l)S?ERNY=&1gH7TJ{dfDh0@Lpw?JyZA-9`qOv_`NYrCfv-`df7wuuvG3#pb?jEm zwGW4mviF%5htBSsB-5)o7ySa@`l0&R867r{_@R^7wQK>gi*9VX9=Fh_Ob0ps%9egB z5&<$%6lmUo8~W>b;TQtYktXj7Er;s^w%JRn@))5^mWalpbzZaD01O{|{HG8froP0_ zGC42uoKc&bn+Beajr5wP#vFfQS zfU#{*)qMjF9%SDD9{0$GRtbLWZ;=ZGpVZ*DQ`pDsNx(R!Du#~Kx>YVy#+bV1|9-rf z=Kxk4xDI15FKx}+dHDvY&HW_snk&}Ke`G+Twpz5Z29z)QU%B_Rq>{9Wy`2m|8E>A6Y+qTS1kndgD#Qv?FoQf1k=i=%wB?Pe4Yx-mz3 z+h4l{`>6BLvJ2Et-fN>8BFTWml|*c>9kr zPLM<%K<>5H&AfqAK!xlLH%Y;nNwnZaQT{_0O7tL)mTV~%6Fc)a%G7UMx>L}F@-5cpV$gt9*m zU|=tOXequqOrlRQIS+E@)$?3`=Xm$o?|-`e&hLD@9jlL?z5hi0m^c1hU0!a-Cp=(` z8Wba2IfA@?$#QoN&E4Mp9G@IRL)Yb>w?PKX3&L4_g$c$|gWv;!-e(zmMrl>vU zR3++{7o1;!r(JrmhyE!XdIGlAw)sn1G)U}SpC3t*czCNaZ_o%&Z0vQMdsx`f_qM=M z&)af&5ClHtrp>;gQuV7#`q)AvLv`NHW?MX`DbMj6IN1{nTYJAEcgJYFrJ$^E#uoD{UZ4c0U zf=)oUHw|_CrtflIh@87aKWXLRyk*Ff2>1Z(7kqs5C;!aP{IX8)zRQ4il5r+sY^lIS zTkttRSG#E$xX@<90bugD;XWX(X0Id*jRTnMYh9Jf8xHG-kU$~Usq}XWRv;ZWO0q#K zsB`scGpqjf5dBk+1G_-*9?Y ziw^26UZ*E#+vCSid_({Elm`&sY~T6Tx3|Cei@&t}^iTiHcK`l;|Gxk4fAWLv_3M}0 z#TBpfzf(7`(_iRe({icBi^biGoBjYVByy%>F%}JQEE;Tt$Ym#sBR-n6>5wO{t%f$` zM1Wx4hNhlG%f4gD;?l3b)e@U@KOhU zen05TFTULV=HL0-+rRwR|MT|F)5qI;?|ryEdG^ls;KBXvQt>}NIkk^I0GBMZ^R@+B zp$~lS-yP*Oq%8bd*g_lI7|$|7vGvKh`&dci$D0^l6)a+J=Fve`;P?}JNmFzkKQHo@ zo_3@H&7TWUCC5XzVj(%EXCA;~5mgpI>eq+Tf8;(Oz!$yv<=pEteJlZxV4LJ?RJi~A zGZi$5*h?G~4i>Zx=#?$R2iw5VQNY`i&?5u6O14LK_@=z{+YZYRz)vi#1D!Ss@yRr_ z6Auv5$?gWZsw11tip9xpPT1eP<;hUAOTPLOY25tBK3wlOTd%;jAfH_ASJ>hPe|pSC zHz?(%*LX#irNa&qf3SyUZGfcD2Wqi+4S*!f))kbIg3?9O2PcyLttohk7Z)Ez8o%JF zzLXI6f&M}pDadBT*?(1Rx92uQ&}A$E-G8-#5XgVp#*eU*~Ptgy0hg%EY8d7ux7w zdDn6-KuuhZ4_%J_74~g{5x>T#Jlgl{6IlCctntgN$jo5GsLa%ZE(1r2IDv_tGyKp!3XmppKtVA)|X%{6rQq#u5hcx;Os@oai1te7t@5y&vDs z-u*}>XkAuf(>+dLrh%@L`bMAiVKdP2^C!@yiCMw+xR7#>AHZ$;xKTps0ZfNahKUc{ z$o)0Y`b*Bxb67u=tK2cDd90WQD(Co1^GNz2Z@E=(e@Ci*=W~a@Ezz%Q{ZQUWx8T1< zf9wuE9|z#rR0Cz#iHBmqf^FuC4(Db-*m!WU!t18e<;82KAM*PDPyfSq zaq&u0`8(EH&$kfp2;hb_KBU&)M#(}5!Ur@JL?Jwsj9>grJ$t##Q=s|}8OY4Ppw=~9 z6DiH!=OJH6)I}llSc&-uUCrmTwXCwicCl_#7S=XMhbp-NiJfLJJNQTm(nM(6r5!U#mx(V}yMqnyTI7mUt2WVEj4r+vIjGa15&d2jA^roerK7nfY5@ z@z;4v$EmOdMelha@La(MXGNd-ngZxyUB+G#{YJaSsdA12o(EvGhoFrMtrxE^Hy*CJ zP~y#A+ucha6XHhbXU_teC!S--uh;`EAHH1AAWyP=4kcxQc#I_%3p(858))F6QLaLC zFpsLChln5-`&{xT7eL|RVpE^f^o8mbTAuZwyX6rniS*t>b?@5nw|r~8P`2;3tH5i5 zC%@#_cgco#DzMzpDY=$<0K1gAAEC|uWwv)7Xx*Ln17y7q8hQoM*=2u57p1EEwd)2D z_!fqy0|!LD+Qx-_qXpd`v?n$SrPe0S149cCqg=2TAIWr%y7#i{8lSv3wG8 zuu*yZo^}|7hmAP;{Mh4Hb<8jyw3Ul?@+$WLm_TR0dI=t!c-hZzGtU0R{xPngRB@^n zt)cs0V4oMDD&KOvM~^bDiMTEG@Q@-Oi0{|}O(C&QxYWuvDmm~5hJP)KkD?WEh6o?g zXF8Wj4JJ}%;VBobK|p+^)CVp@fpbZRpcWlam)G%9XZ6UQ(ecJxQ18l-4A^_{C>k4>Mp>`k$UgoR4*m2}$^}%_1pj4Gq1< z1!Zv1Gylw2vMa9X7nfYexQ=xIdDV)p+J72E)})a`rR4-Sz-tu@$)QcWl+&GYL)G+c zb3)@GFImQst6uCG zA7rlmK)*A^9opJouwEhu(Fo_uWXQ67lvV#_KBw&UbKu$8kz0UyIS;KG${|}ob+7B; zF`wi)h9%p7@M#{9V<}bdfe1f3DlXh_a7k_a@h_<*S2-y8YWo#h_aDb`+0XpCcam!m zJQVOdzy0y{dmsPS_C)6vybS=-W5y-xl=d~*TO(uDBZNZ1`Coh>E&vVKkbW~AF=c)0 z^}tYNpLe7f=*lW@$)ij_F5GS(YRghpUS|6oZU>$(c0_jZ5uE+A9CRa>v8K#UQ%}3e z3ViONoVyW;7k%oH;e7&Qo$J56x!7NTlxXVoE$cex1aJ7({}JbS^y%B};+%UNwCgNT z+)%TdnxEPqTZ=aHGXnpjJp#v?Hy zC`(Y`@3#~eX3ABbR8bw6Y;SQsPm0dD-o6h&N`GXFLjPs`l4gTEMpZ} zu}u}x6Z~9H^o@MQKP=dwq-OVlXz>|{4Dx=fMf~jt0T`2k=;q!UFyf^+WdCXt2DH6v zTr>XVw`@uL69e=%J&6Y;R({V1%`Njeu5UiR^MCul|3ChTWd8YxE^h>E%zUu$1%No{ zAXhygdLQtCGSGt+m9z`Wl`4w+^?#n z3|*DU1Ci71)4qbTi-~6_yM(E{?P|Yk!yq}{`>DfQG%8nHfXKxL2AYDdyfx4D`t+(p>L$&Gk#iw5|DOG^&o$%#ZncQ z-PZAF`pDn&Qzj38AabYwH^7j&Ld!20w0QV}WZN?~eCi9@Z-}4Aqj)kV z)81or&4D+5QV5zXB>AQYdNp1}GCYu8_qXQ{eff4f<#$6gAGn#^6O_g3{)02&kGA7e zzTM8vjf?HUgL~W0|7X9j{n#J=BilojUthf1o`3mbJAW+=#7Uibb-{$c@>5KVPxq6W z{7?=jvN8y{SeREL69s+)O0djpbY}j?|Jm1~ukn(NB(&>#0A33zzAtkCV4=*onRuSo zyL;!vLoO#mY3s#&j8Tu{+$i#I&eKLx%VCL5zuI2B{AxQ<{l$w{+v}IF zw*TpW{FmE5{{R1fx092j?W1=;+Rh$4bgX%Rf*a_0+X1ikKhlPl*s&3ypShqcz4j&Z zk2sMNXY%7ar^6m^U>rNyTL+vTZl;Dl68ge2|A9COe9rhgQz^Z+bWq@;ks?N5AI>#;8@b82J z9peKTVa3exV219#7^AJ#y(+#*YrA39P^&aYBBL75RPVIOD-% z+eA4qOMv4|5xMR!#q91;;^a45pvQ3~##j}w-7rcydUz5QTgWxCJ>I+qwEY5Z#K6O| z*1S0)F_uBv0N5vQJjL(dT#{_YJbLIq0s9%tgU61Pg_}O<&Cxg)j&@A5C9!xg3SXll5n8-5{Kxq%zEGgT6De91{KOx7kKLU zhgyYwfzSZqNsPgF?l$_FTzv?>=NrGd1jJU3r`TRLntu%|NHTM2#fCk@>f{K129kaTVK|5 zVqnmGiCviwWBlF<2kp0wi>6b%<1)r==^b|0`0M(Z54XWL^MBBd_RgnWfuisFu>YI! zb5wEvx~|mxOvVFnU(|7H!nEHH0BF2>{ArvrHs4;IZx=e*yLkEKcJb<~?c;y<8{4P< z;rBEqFSj!;9x+q+7K7|yoboVBLmyKbJSTcw2=65I!N(@Q+sNgTqjn_peyVBxO)+H4 zU)Qycc%CT!rjZj(yRPGN9n8GGGfrgNj;udkk5#`mUPixFKKeWB&fB}S{}f+5>h-zf zT(*$Q#y2I8`REG;TF0)gGzamT&57juL|DRzG3&y+?|*Ci_K$sMyZ`9ncK1a4HLVl0 zUSao}Q|lY!{m|Y+e-6ff=3kEYH$%%Q81e&S|J`(7qd)PT`W>f=C;bg=Xw+PJUw|Gk z{3z)DKppG!VY~IKaoBYbr$JA?@^@ZzH}HGh)V^Y#GqCO^mOT!bJILjm|6hIi<@VyU z&$PGyYWwWdPy7(T>G)X(VpDs$I?qIQjt9JHiMlM%2mQs~p8bBu3W!c1bKyF~Zh-bY z%nhG2LCG8b7W6u!^(5y4v6cAKM}48rF;SYjB^Vvo;*CzmZ}_;tCF7JdTm* zJvOJkq3vs+O}1%2#rpAv!6H_9?lqb>%dk`UxF6H1E?Fk;xc{%;-OyhlxjI8Hh{8Fm|OJNGv#(_RXf~Nvwd3IqcFr zk}RLA*$<9M;@A0G=!C-`?H{AN_~0L7AYl9$C(@pbg)&-`8xQr1A50yGPEviT>rCe85e{Py4VK7 zfwyeqYrFuaVcaqfL>H|3+=b#7)^zOy$1hZ2zfgDRSlg5Mpf9=;^EPA7xzbnK*nc}O z!th{k^Uv`Hc-$^`H9#!I$tqu>7|`|?}yXZ(doME2qa#dKe%0J>n|}yE~vN;NPOGA5zju( zs@=z7^-1hfTnGU1ul;=nN$m6Qo?C~K`?&NSp+}^O>-2}QA${}@av}HeZx}lOmB1az z;aWk%+c)PP*Vyvv)hq2sUvFn;XWO6rQ-5~*32xO-lvg`aZ(>f;!=@Dm3guLO>b`hfMUW7#)A^X6W~ z)8i!9-R_)fUAnl~UcG$2ee(Oiv;E%h{QK?m&%W5c`s^zyd%N8`yYKV&i`P01NCth* zeH6^Fs#6u@`rA$WMXtdJN4Ic2b_MkNIla`A^O`gh^JD@*;gWqFXHmRgkxC!5`WkOlrJN0 zWtZqMoBb2|@Z;j*YU3xVkqeGDd-Eld*e73W{NS&1A1{#|9kh2`up4Nd)GbrI{T2Z8 zSa;Ip)zz;Z{p6qibHA*heOHpr*uX|?0ukvVbe({vs|9-ycDT+hQSs2sgyN~qlX8bFVeE6slZ#F_rFFBT*u9BU3( zN@xHk6M1bAhW+C}L4Lq*RPm1kM%SC~0Q%r@oBadaD}t6cb4qs2^^!B}6nL^Ue}JwW z@iR-wvmg*mK6p6LXi;EOVj4|WiPrKVkCa4e>Xs#5$Ivp8t}b|mr8Z(Z@DNVE@qho} zgYCW!MyL06Fw=s4e0IG3*+2c$+t2^vU)a9&;kULEP5RG2{bYOb;^lUJ$-@VtlQe!G zlQI@qkFZSm9v&X2wzJ}beJbei5t>5y;+y|FPQrP7gH)<@p)Xi<{|)IiM>)n&7l(mw zVCTRSjBR6cS|`#L?HF-jv?L-LbyC4bk0K|?j0;Hkc>@+^4%n|>U2M-^e6^jwdg(d$ z>gCJr`@jAD?Qi|fzrFp&Z~Xf9X4<0?S-6tn~0LI~aBr(iYU zY_I7QJBX~OH9_pvHmbZ4lPn8^m-h5sAk4A7EZ{>V`IbHhS>z69L!at|HNr6rbalj^oMzV9jV4+tcx9~5hGFfh%b3Mkiv_ZEhmY7u^>L2D8c6; zDwMIBGuX%hGJLW?gndHdiwCe(XqYS3M;Qb$Q$51O53_hOr~Fa2BU|ziq}=O+@Sd`o zUpbERp$6ELd+31@y*Gm5<-`NDHv_F&3HzD<$cY{i`GhHY^x;0TXaJm`FVtl1K&E(! zow0!#7hxSwAY+CNCr_hdM846PF}6?@f2WRfQjZHP2~^Hnv66>}`1-BV|u;*I`As-wiFV zYPaMCecW67&lxfV665%((g{`epB|lR2QbDm7vmF$iC#mH4olRPFc;fjf2v($O=ZUd zj0#K<^9n~|j{789e|JyMws)R=xPA29Z*O<+Ki=NH7S@Z2Muykm{W$eQ1>_p%j#u^f z=vVAvGTM1K;051&6`%SHA&~y#gw-^b*)#%d0@vl3Q_Dr#0&_e7JwE0<0hv5}*f+?P zTj2Fn`cz4Mz*-jcxR?S(PPyxCcndtNcRao!48Dc~{zE;BU4a?@WNm+e_|1tNki(k0c-*gM2219lzRaI;N9~}lTheW4!O!c z`K7-_N-VJuF4L%RM{5Shwa4!~-JZVtOf5Rm<~5$YLJ(fW)CB|w+z@>;mmtsOs-lv= zK}J*xmU%v5v=;2^+4CYV@pqIhypG2$(0YNYkNC#!wkNy-+j7mAg|DIfHd;K_+J$3& z)P=i2-+m1G&cP#xeg%qdwHea4!Zs+H({0*#E&!4N#MGX^( zAH6k3h90%}c%XeJs{UpCYmY5D`m=pzYd$^qJQHZ;z|S?ejreeAn%5{}~_JAJn{_y7_1!0{lpW z>9Id`Vg+C9X@AgTS)*%bot$y8g2%}&2FcMCJrQi(&fD&?tNd#|l|~-^Hl*vZf9O2m zr|+%1;b!$B)P|jpf5P zd2Fx!&_C;f{ZXBm4PVMW=fUhDNY7>aR{lv==*1UAK7~wjbppDp&F9<1lz+#;1WNH? z2kN)Tn(+)ED>1JjZP}&;NBr8iS}kW6x8Mw-xM;8C=azle(DP^Bq``E0iAP zu?Krq9%S*cq5W_3*kjCCi`&iJ$UI;?NoJ8i8sEWzOk$--H>BRj3m-qgv3KGC1|ACW zI`^*peV zb34H2c-E^U-|_Kw|NaACd;j?3kGC&A`*eHw;K_D&b|OC6C7BL2H(sTi$N}N!MyoRW zL&`YffdzQof5yuN6Y>=#{Hp$;pY3d9V&h@%ao2H_pwLH80N#?(srh}JpiWxhu-*fs z0r|*kJ;+;Wx?JN8Kj|aP(1))j3-5U|&If^%)5rL(Ty?H6%hih-J6#>@|N|c~3nTBF$*z_CP1oyD^h&7m0o$XoNKP9c(BoIeN+~O# zZh1x?i8#bgfi%`IXpx&wR$ZLA!wH^SPN*n+@xtd?##gAO|PL>A_`l7}DHyV6nv236m zz2PTAAjRhGyyXGvI8er`dQB$ivZH9DC(xa4PzSqNdOfg)EDJ6=C7-~6Z<^vSeIAb@ zm}5+F^gwPpfWhB-x{Z{HOmWy%_yFbj(}S1|v5Uy_3Dy!v(4ZuXzkwgVCHFVpm5&3x zp~~aH)G*@*AQd0%vx>e@q~rwv)I^0@zwU~ z^(#e8GLHBLqZXnI#z7V>V(3LeUL>CK!{*|ZXoRDWeBxdGflb}+%?I{6zVg>8biTGm z0ox71cmH(_fxanH{pOQnqf?LGB=7%Ja&R=Jz~(@rR^do~b&3)6M=Y{$IEJrUfv?W5 zc$!I_k%jxK&wsf6cmMbwZ~yqe`^R3Lo<7y4OdB;GtYGZ&+do-6xmeHBWoTCHh!6Iw zqupbSMQ=h}Gx!^=#K-Z)thuNf-+qcG5C5p{WC|sfamRr#TKsbkUFsg=)FBtD)Cn|3 zoQKD;@PGInB^b=F1hvP8+GxM>yA*7jc|FG>FL5A=-eVnB>XbPMrOgJZj!x@?#$t#K zdQ_&45YL@r(Z^k{mBD1_)rCXu@zLN`X<#SeI>u%u+cdXmop&0Yn2(tW+4kYj2szVD(lm z{frAXn7;9@{^5yv$UI*(50OY8alACQ=O(ccJ-!eif`C6*MSqm-2I=C@Knw_u>WLLa zY)~>uaE_7a9J1=>o{!LAfBF~%_KFW13R*kuR;NnyqC3!3nQu9E7S9j`WAmmB9Qt|# z92)FFQ2?pU6nXSJRVTGyt{R%Aa17K=-R5#~KN(y7XRJxp!;UlZi5X=Yq01b^tTxU25oj!C;Ufn% zp!H6F1B#iNJd4@Q>p$u;ukev?Ok2lxx;=RG-uB%;{KvM_`wxW^Ui%jx^W4$1Zw%OA z!;&0#+t=fwEmR&v%Ed2A=YcLqhu>}ijP*<@feKJ0)%r8qu&0yoZ9}R>L#{f#KEVDb zH+X?2M;7@(8qkR7iR>jUFSCHIv2Ezo`0hb zRQV6WKicF6MGiiVD)6yie$H_sFpj$Vj1WZh9xHh?Ks(NV1e^?l8-3Hqv(R`v zVXI>SEgEE#{)UenbUkJRS<^VS^f<8sC$B%XeulsK4s)Q(2k|E>e=W=F1M{=#fzsnP z*`rUpVB3gJuQ%8s3hnLi8=Fo}PPd2eyt6%d@{ae+oJf1m!JY_7jt_NZlvSMZtGq}I zBMxbs=ahnG+X2W$0J7KQP8UV%wLMFszxgvQQ2D4&{kw76&2cD2L)SYo4wX#ekIL5s zw4Tljzq6}p1uic2YjT>Ov59|57NsMK$}TzRMiR1dOIxJUZh_H1 z?DFTXOdeY(lpNbl-WR{xAN&@N=WvqcvsQVK5r5&-?~xyR?>{X;XuVeg$ZQ@BJ($ zd0nmf7krY>-&qGm5j%vB3#w=TiHyWyuan}qsbhZV(q?cUBn6-2Ut|PFYywm6q?v8j zZxTKb78ytl61(0{LstdUR?#xB5kDGVaz>qAgI}M7(~eEr063t=sX$VM zZhr(t-yDfm`e4ZE>t!VV>kS6+al+iUQf^zQM8894@r!;#2$?U8kDTAbkFdz`@l&=X zmXs486^x56a>fR-qC*(f#piv4iBzxg@sbg@U`qI2Encvs+py9v#bY(!w zS$9h=^|}sWpa|MJxz>?BiteP0IY3y(uBw)0nL=ZXQ|j<4B54COUQ%qm?FS-)%NQ0_ z_W?2KdBXe~Hb75Ym4kxcA~+_LJ*6!T(A9cVRrIfP^xcTL65=yv@d7I7NU}#d-m*5) zu13#GgK`)3VI2TCkQFnxscg5v&phjIX)Bqtzmt2+82WewP3O)GdEf^*h2VJY0*L7G zu^V4iMh3;uRGsr%(dVZfiMLwdu~5YiVljEXT1;tByRV;#z@#LOYi=*SmV-<+>c6kA zIrjRuB;kyv*rCaX@SN8OMBjNe*`CCWmO{D=4fe_$hQpvwjwF1-Ec*lwImpIe5MvAi zC6i>kmCQz&!ffm7(gw)*qEFRxt=35UiI{w`2~_OhJdQ+_z39cOSKH~?iGQZ|(;s}c z{r302zww5tr_bKmPL7W@e(sIy8eGG;sP~x&p%^e`d{5 zoBlB=y-S7mjQ>FA&EIv_8qtk?lrnDkvL8G?&P{)HroW`bc*1=(pWT1Bz4QJ@+qb^+ z(ROz4ROjdy+l9u$`Kxn(bDVnwi~-7i=;O_$bHyZekZuff`_(H6+J$8&>NNGW8CN^|BO<0*a4c#7Fr zr4}0pO$)0vr1Bnp(Z@PXp6hMYp`q-bQV69#JxA2;pGfvN?zaysE&WHmZRz7R;2b{5 zK_qSXrqIML`-P3viB))rUnIl_^La1cI6!mJ_u1f=%zB`KegojaCcnXkz9jaJKM#uJ z?HR~pEMOkF}cAGpVk!fxaQOkWK(V#_|k8FVO@6I>RbO_cc2kDwqBfP9D;Ow z0I|X90-ve4z5T&I>;l&h07Q=AV`fC9FNZ8XL1Af@K-)a%~-uJ$<{o>F6!uFFt`KPzX4DOKIJ2>3z zW)h>1O$(DfPdNeDg|BvQ=*F&CEjl|Vy#1k_3>q?QfJC`)YK5piX>a=0m+=_ciW4#@ z(_Cf1x#Q!i!N2iS2Sfnbj2XwF@-f~dkJq};AwEc|eyO-$zP{XEzI@@|@PGa4<@WFX z_kX+noxlJ0wvT`Jceh6m@B3T-51%|%d`_&3*S@k~^PB0ck?Z)AN|~4Wfaf0G#DR`S zFEpaZo*qwJO!gC<#Mx3Ba;VE3=;%;aZokkV*~Jy<${q7bj~v=$RjP8_)^+@i#@3qA zHd-900dF+8NF5hf0g?0Sa^v}QL-{ET2SEg-KNzzmg}7DnS_KgBwCAziAz=n#G#|L_->QhD4 zU1BJzjL#rA35k9F7C~!8hf5Ru6gYnx^Ul!$PK3+@*O8zsCnaeMJ}~^VL4foSi$bG) zw3NIC_f3!20@HxEJJ1q~m2bu_5b0xo(e?7Xp>JScgA%p+WT^f_Rcvxq<#KF5H`I7l zJ+dNinwq#YenRs%Vdv4)c9D15fylu*=1W`G8AHHqYWYo5{y;#JXlnjIHr z{LP>=g!>p1Wg0)8+reifNFVYGmoe4x?7SKF{pX8brS1-!VVx*(JjK4V`;WKxzx}=K z>H8l*h+vIh1`x7hyH(jP#RMA%AQWhQ8DFxr>dGx3wNw^xKqyRp1IG{AcDf3%5xA|* z`U{Ye!ptdlvVA1Q4{>82?BjPkwA?++^`hGk^MavIaduPH59akB9E!i?dt6aZbwVQ! z;y>)Dd;oHkh^ynK+~wlyx*seP6x#EFa?K0m&Uqj}R2Q6Otg{DWb>*pWZS=O6UwyfK z_Jbd6U;OZM@rU-9i@!48^O125+8?NKMgNR{t?5QLjxszphvJNrwE{v)M)zip#V(E9 znTr%pSll1WgZgJQqD{K52_i^oghZp=~FNI=R~KaIpni8k(N6)`Y_4 zm?Hn}tNd|rMWGX2%NF6;*}d)2)2G{m$B(z;BYu7-ywSr)WV^xgO|aw*Zk&1bDfWCb zIFwhC#=2ao={Se*U%}9q-0Q31o9e#>`qj^t*K4zY!2xdQku5^yjR`a-)`xZt82EctFJ9^#fVZGQA9?0wOe=>CZ4uk^c+Te@ zPk~SYHFN}_3(T^Iyoktk_7zvU8p! zTQrV*oD>=!=)w27ck@IJA4+Nqp1G6bZ1}3#^UDnS>~ie2U0x%X^C#O;iV7NseccxO zB6r1wuI!ecl}=cHj{To)Cta6i{=o-Cu1n@g0w4=*%ZVNFH*LNsXc@>2Ztf35mo~;I zHTt6Dm{z%uGt(w;f%dbi!%t^!t5TI~y=H6~9r|fc$rXO`0$q;2s8=pq(*^?Bj0M_y zEHq!|C4U9)#i~5$I#+4fh)t9gOXA`2llU2iPx3V-^8}_`qoDNxj0>m8nC*c@uA$`Q zpeMid_PYgqD-F0vPrbv@X!rVWcrib3QkJ+v@tTzvPrU_Ej#XL87=} zmwlA!u9}ZLV*z=~MQ+vAqHg#0V9}qv?N`h!0&NJ-II)e&^MHP}86pF|&S{_PT>MHI zNV(@zxA_Jhb@qMG>YvE5w$dtO$`*s|J$|tx^qAuHs`soamy5$(_LnB*!>ksltOp2VoQq#YF_xf zR=8lQo^TxtNPrI%%H~-Qfs^W{2DusK?E38=q?UtEMGvgJI1%=PJcHK~Pwl96= zxRiVY@x!LZsg8w^^%o*saXq;WnH3dlZ-c*PNux z^fAXv5*%4m2@mwMVV1`=oD zRdNJr|Br#{|FUVqVZHUN;|iLlTj{&32ozhpPQZ8m_IgM^FQ(AyFOxC<85b(^FaUk! zPJ>=;yixM$yB}>IeDMDE;Oxvl0dT3l=DzT;`iA=|+&HEG^UC$y=4AMta|LRv+P z1W%%OaDX+&E#Oq(n8xwR*9^FRWZ-UqAR`Ul%mleT|dkl%%~}r2LNnJ*#%fXNY&$i*MabcK6cIZCH)s)!s}zcSbcv; z@kfeps*9cWmK!-a*5^I~H@A@|*{UBaAuBePU8Oc({uDj|aU0-%6~Ao}Ef4Ux|DsY5 z`y|QuIqoAn$7*y+isBUg$X8p*1R@WllI)0I(ci!WM!A-)ai;a6{p~oBy!Vait^-*T zX^*nL@!x#IuD3h@;5Pu|0e}@+(FBB(5QgANI(Q^tY8$~24Mhg0T1m~%?UBB@$YRHa zm;;chT$bVDO+6x0ujIOD{HKn#K|H{PJRZ@7cTCU-`>_dHd-<`?K4J@4jn!uV26P0!NoJp)dHIfBKH! zF1f>^z=47T6q+%1M%NYNVpuLfm63uxeM}skfzf|>J%=2d)M;bc%ZAFv2nQo6>`m!4 zLHrCyfKl#ErtQesqwc5YkcIIk!cPj3*-xA0;#!^5DSpR(eLt}|*FNdkzxV&?CqLN! z;XnFE+due+|8RSIalSoy^mu!4?~#Wsu}8HRcXH|A!3{5l@v0oifztESNo|-zn_9-V zv1!7O2&b_&Zs6h9r#mEV@Mio1%Kgn%h%rWeSN&r}?&xsPw}c~WD;q+A^{7>(j+Au? z2cO0016&%F$!j%H<@a;3S%;O#S5m~4%NsTcl97W3Wqmq9CHagwO5U)dPl+4}vd1wo zD@^(q0=;PFo9jHiY&}QOlW#>RrX0Yaf**R?q$+C`RUx$-^NE`2E|nMujAZO(%$U*g z!$zG#@HB&We_KiU!`M@X^D9zXPjPgdqSL=1c}!Z8;{bw6bZEq@YotiM{8 zMWfYLiMstW9tq)h5?}hj71h*dE)nbGS+mP8mC)Jo3Q;IyXO*kJvSv;nh##!h$Oc#B z{ls`g!r$@)j-N2j@RwtVp!yL=f52Zh0Zb%#J7q+QhBDEDh7W9!ej$R5EkOE#I*3YU zoUm4XQ>~c&swW0k`Q6EV=y(H)k$C7;AneeP2M0|6O(bGS0v~?z)kdQz8P~Mo&w3XC z->K^jc~zAX{fHm!3&$nNF)b)5Lnd|d{dx*b^m+c4oY*gE>O*KcHN^g`z5WgGlI!E2 zaVnd~)JoCeuEn;}KLIwgleQ*2dyWiWk(}!+< z%kLyQ4qE33k7I!e7yeolf|O{Y4EfT3fiBb7>6<{yIY?t(Dqg|_s4FwRQh{wXe2sqmhHRJQRL66d z3J-LSO!G8N*Q?D?h(rFh|15{LZs&vG3hWoC%#{JHuR(Nn89n~}R`^L;`*`~oZSptW zr#%Ya@=)#hXP<4KfBO4={r`#fInW$yyz*@{R`85FlJxu7#vI6)Y@4$#*=SWY?})4F zo=?=(cez+pG~%PJ=bPn1YMkioTiQ5=yk)*GzWRjr2JqaLtzIj!z0w+sfO0!9>+wMr zluu=vfC6dd2SOJ!p;vp)OYAcXcBI)m7@Wj5bc0I}y?)%kuYKT?r`v;vk9@I)t~OP-LKaLmy$W?V6SB5&?Inj5{(^p|n*&zLRJ@%A~c-!ZX zvSYPv0c_gYWgaE=0FH&y5)<3wI=tlQZGQ6d2mgHC#TBK!k$b{C2TJ%{I|}7rS!J%`Jv5_w@x|OY%=JsqVR$@$0a+$&zQ0 zwgGwYMhCI#ZxcqeKb)_0`BNR1e7L<;SpN#NLuX%AS4?`odmJmz7i7|Jv~hjG-z|L@#>N2ssxmeyPdjY{bmfmw z@4uk8&1zotu3&WfY)#GYJ`|=G>b`afV_c_9Olgk}e^Rx65TOV1woGbG?;Z-24l{wQ z*fM`_wcj8<^YuF?r-Hf2IQC-8TQLM zWR>}8ho^KPS4G$OV80XK~%fziZ|vNB!e-i zc8_tj`FG+^bk3)fiYs0{uV(hDe7XC}ubywe`5XUkd#m%or|Sb?fAii z?a8xuwvWE^(RQLfef|2CR=lh2yuM_}jc9%%fa9O{%CaLj!+FC)`YB%yWL(rZ@Qv5Z zpP4f6JnrNx{o*e`tHtfI6Z7K%GU;Cx9hdkU|DZz`V>{I5*Vy#W;%XlBc~f5pa~Wr$ z6fW!AKK?B`APUQ8&-b<-T7Y-jg&b%&Mx)O+tT|SKQ*8Wzi|uTg=|9In2~}-BIrD(H z9oNKTUcJ4NK9gYcklNfkD z5--bT#^Eo=9FAi;^01W<4l+NaAl~SaT*U!e+Vk@g(S;oB$hlTsJ3^m?a9r?w_Pq|- z!{2f47f?5)Y>AC2)6Z1P>;afeU!)v=nh%IiSoikhqd&m|0B^VP0Kg$JK)soEnhdFr zCmCIZpN=3uNJEZl3XCs=d1H`vE+`W#g-jx3i^`J&payM2$^FI&Sn6y{46S9&7drbR zU?O%3qVk5O@Ad=0=PK?Y*baRL&<&vYto+b--wcxkv%eZ9&7Q9HG_rCLrQcnt1l%n5 zCjOh@P|j_1hj6I3z+=aSTk<_*1C-WqYJyPKN9SYAt#Q@IdPB(o9eCkbFvufKauCkd zM)}t<=H??$k@4gpPX^w9c;@~2gGUdy|I2^!bK8IZ7k*)T@5wvjc(Z-+)tB3sUw-Lt zxxYR?_qYGExM@ykQDG6uJZ0fUdh7^_zQeZo5+syRE#pd!eJ*ON{SlenFz26SJ5PQ0 zunt6(Br!w+qpIwcvH`=0nP1J86VUObDl%xVMH0G>FYO#;&tJY0-P`R#ad`RVSKGh- zH~-7_cmMw1+kW?Ve|LNO@X_|c2OnoNvg^fV~-I|piGuFMI8!Q3~2DYN!4@)} z4%C12KhiB)7>h1F{&XDONX5D%H>d_&tHx?Wl0aqX*<=ixN(ODMFIF&45EI_$sYJ{a zU(a#3+3ILP7BMGwVHK#(CvA?B8bp(Em;_DQRafS|Fi-gSqh~Tt5U(+m@@l$0ah=mPUo;CQ0Ac!SIw!&cYT)B*~E z$4-IfUa7GhzIBQ3YycA%Z1bnfWW70y?Jc{l>U#BuKFD#;Elp2+cF=s0y8^YwksbPz zw%~^NYx(3!Ie~_^`GfCzd;{fpL0kYn8^IM;itEt&Z`u#I>*+t!wA?*?^8+p`eyuT~ zjQtsZdr0@SY!NH-EMT4}@n@o<>AvzqCMBDgtTqgPyB|gCwNxZ`w2}6!+Q3OJPrVW@ zULp4A$+PYK?|g4NdGL69d#+=KA6AhJE@2^{7PR(b;* z(tR_qI@vho3IP&Z=-c7%pq3C`DR4?8Dg|BgeJ-INl*E{;rV;2PLuL`o$$291% zyAJ}*o+0lPv=AQnQ-1g3fy(S3B%2E=zJP=Z?K>_m&$kPo6TRNfUq81=0CX111u)5^ zi;;|2DWW$K)-@j(N_n6ZSl43Ebu(U z24`w(J)zmr8fFX*0h@1YYddLQ69a_2w?EU*DQ6dpMNVM$pT)_3`gqR+g#HlURd^#O z_1hu#wVc5}x~K8C#bV-2*#d-*?d0HQ9UwlGk-N&C-$-t`MUM=`)Y#kEVw~idCjF2H z%;W#iS+=DPy^+Q>#Ga}M!m5wbdky#!hw0IseDJgBv^`Z9T#sYQJ`WOY*2*~sgmgQ_ z>wfUb{+0qA2A*70UhuKWydNMW3^%=81bkTWIpBQ^z6JKhcBh zl(t$M-DcEiYw#BlYP@IN62J8Jd}?{Q`QJVOUG6+|kfe{~7nLAvbtvbY3e>m}fof|chnA|5 zvK4akLmSzSYxQ5q_;kr&Ie<4ADK;S2`*GVJoGkI&af1#tj?EvCW@RUY> z5Cg!Z?Q3G(;@0aCJGt~@@gd9o>bByi(ejmtM0orbo#i)@I&u~dhJJmhxqTgLuJF;( zK&)6lB}4J!dIi^%Pfz(Rd9KsF*?#-`A8-HRli%I$9pB#`J$<6JwH8mkhP5U^PT$9@uFzzN`VPA z{D*S7UVOLmH_$OFd>wt_=-?=ZT#vitTh%)Loa-QW&O;lT2)yS1jqsOmF1O?RkG7}p zzQ4Wy?t9yV2dCTX*B9Hx)rI!Qchsl&$~;13j$vMdNB=MerAlL=`(w2)_0W3ERJ;4k ziE{dcKVZK#^Vx2DU?VZcG@X2|(rpEET`XMjcfDo%+JZc2AQ zD2P~Y&hssMj;kUup7F=EgUidy$b-##^2TXMco3xRezc}=PQaSec2mL$43LACC8KcESn=_%O-Vns;%}a~U3GuWEBMGF z2O9TviOGw;vbz&c0IguXK<5o5M?8L=Gbq!Rtp~~WIwjaFyfo3WuDAYLL(`Ch^F(#y zNaJS`z~`qxNI8Xu_FCtKn6n7GSx#y<>ahE7PuN58|bEM+&(`{0d&UB zIUs?XZ3lusv_Jg^fAJX|dHFel#I8pe1bGd5XgH5U=je}u7E_`s{w^tX447zH6qv;M zP3t@nsM!aB;jL(L1$__}Z`XAq1L9X!1 zxwzMxrks?4pP9)V6CDUk7O?i;j52_3+9K=jn|za;8|uWc#{kk;@L5p3H}Z!TWF3S_ zI^1n{coI+Tzx`Xkx&58L_xHB{@n8Il?L@vlef(s*_u!F#4nRx0#N_ESKT#%`7w4Cf zaqbxSw|BJg^8f&^gJ~G?(SOQdsZ?G1vD6D`T?7E{ZyyQICW8y>lnHvTcD{LCapWRq z1P7Nfjr@)?6%`j6a*J@1MD!`kL56;oPS6|_BnO>77Rzr9IJZScZ(>4k{WL+WLeCU! zPZfsf&t^w_m_hu-^M*B0gm<@B9_z^Sx<-G&r8dVH0FXd$zYc7$65(*2{yjojAqBhs z#|p1N^OR5O6o($uK4l?-M!2g?5cfr7L0SD`ZDU%J8aa-2lAw zRrrO)1y1uh@dn=Lql#xfW8NfTr5)sd-yk*(8(6GL339fyD-qkituavIz`5j`$rmi2Oi?1|6{nGXAm2 zH8FIMXro+tcVyFeY;6OzGOCF8X7Ut2kp{uSLcw;gS7dOiAP$KQI{~^ps|dbC{fiVMFZ_&^{KA=)6w& zPtsW%4CoMEv`5Dpy^J*;D%noA(+Bsr_doicPR!m@$DT=sPOde#_#J2-4nVi1yU$dW zIzJq+#Esl>^jL=)dx7*{fcDA_+3q8+)e(tB~AT-wR{~o9wy|9w{wmklsi=&+Cf9!eES3;Z=9I4Ja}|7 zKjf@YzfcJw&%z}4~AvqGTI`N?ga^zrI{pP=!8;)}1g&p-KO z`|8Wj)pqF%L)gS`|NHmwHlAEZH98Q2k<*T-MwiAl?KE|>`;T15T;I%qSM@RpP77Dv ze#%Ii;{qQoNCZ_}*P2^Fv|M~0d%=;q4kX87G7d4a2o|aP}ud9fjm%3bdf>e!k=&K zpbLO6^b@*H%wlgR&V`%aaWh~O$92y8)ZXcCj(?F6OPp7Vt!s+h6;@q+(XKC+wJp8R zq7Qu$5LZbaJ85&+qyjnMI4?|FY>WPeE>C;n1M`b|-u863ggl9-w9I6YWayAeS0EjAhD$<6zG}=v4%W6UhKCF(f~r6J)GoQ^wN@)Q$7Hl0HCI$RdyA zpC2Vxl~80(Qkk^zgGwb6h9U9mc9n{@*8=K@3p78+fBKS!+|kzfhh2Pt z?G(E_b*Ta|%DQ|yJ{l(EJl!RKrq7U%Vy;yvb4zFwg0y8#nEm6m2RXRlF(@c{#}0&I zVZ6XM*pLo;?+Hh$IEW9QZF=kJ*n@AJr7Dej=CfiEZ)%kj!yI`=0721dt8ZNx$K$X4{Z?j~8SgC6k^;aW5_ z5mwz<^060uH**~h`IZAsMU-pICXk&+*h;1SNSTZ}x((1!r{zS+8 zTvt9iRv*|Iwb)O-V0PiYIOcl&-K*`(&%f9{{`mXbi!Xk-ee~^bo9{&Dg!K9G>FIWP z$?u74C&s-2ol6n}u5-Ij85j8Z_OewMuuf%tbQlcS$@pknQ{N{yPGDVGVXV`K&`fQ_ zngDpz7mk~3_kMyoLY%=RIlh^s=7J}0gC(YoQ|=-k#9yCo1AoZB;19qvLd@pXx?wr$ z6UH6==bN^7&wUSoW5u!ZzF~at(ZlWOd+%@G`p&nv$B!TSZDX%A-}o{n{dIPFPcm|z zi98^~`FXD)gv?mtmBBv$CC}e1M+V+kk zVmu~^J(+nq?$HPziE$QL5W!EqUp?<@*vhd5RDKd6zM4mJpkZwS=WDId3Fimxxi;c; zK=g_cHj2qI0oE_upQ-`7ZHLj3bRSBB#-MT=;gM}oudoMr7dezzLcnDaAKry zd2l0dI6-b=i|O9qD`!6DCjrv;_~NlaS%jH`8cpVXqyEpDorDqGYa&ilhBP_HCUDfz zNsNdkUqS_1Pt$q~DIpF!+z%cH@ZuRhgdPCbHj+}OPh)%Yz|R=BFI0tB{SWd0Knc2* zH(*eL>^6GsMwoOmFJ+68!9F=^nEc+b&{I3;`fYj+q3I9g+{y2^UvEF4Tft4OSC#yG zFu6b)P;|sYLBGk3gsK~oGci|aKChp2EFG)?GWmWKl-!yK16uItuYF$gWeg-QSeB(h zr_2{{jcZ_F)4t*gd{R+zh8>fR3yhevrceW2Nw$zpVRcD6lu@IVKj94P48KmJGm$o5zM z%3s<3$RGZr-ut||Jl}r!`R863E-x>AVBuT;@DnOOfvhr%gcmdYqX)}?zDU^%!|c!C zNbO%@m&Kkusp%z0?YmJzTV9foILcBj&-&m#@6&@~z|Z%Qre0pKt%<*ZzOo-~Ic)x_$Y@^X=WIPqt^z zo^1~wKeg??=p#PLJlV%9LoY9QPy?NOGmHl@j&orildwvKVIN@q6B5H5+)Sr&;Rl#h zvDji`RPitX7WklOT-I>fI0<*yRZc5$O#cKg9c1-sFt3t2py#8sNVj6fPnxJ`*~pVI zxp=M`c!s1o&{gfqtxE|~4njWA=%AAm2gVq3pov|?jtzr5R3*m}N!md9#()Ut~FfA_3qm)fm4nNOkj|ANo5CWOBSun3NXS1Y(uy9CY_|T8I z?2Sq1wAn|AV$+0NOA7cRBI^NYfiLi2i{h3GYidvyM=+KppZQrR zR34q4Y!4nk*`B`pY&$#C{@e6gaqF0cEj}9W>^aQW(jl}jHvxD%G@<%$&(XFwwsMad zVne-YZi2nE+aYw-6$KpQo4*>y-fNY+_)+6B0}nW`(frWteMikqibEam8B?|aoJH&m zG|*4pYk8jOj6vs0mt2UuINx5q`k^>7rqipMa274!S`+W)BroDThziIPDO#x*mpZ~=^^VPWGLKXOiRp#2(a=X24=yKPy zzwW(7Aat=~>~}TiLLZt#xJl0nG}HIU-P4r)^W%tnYKL-FvQAUaO|rM z#lNuJ0%p7~^gSmbv z0>TZ!RZPDjEAojoc$KPt9oATiA3OXYdl+ZiTE>pZ+sQR=i`xR^a7`o#+iDr?b>XXVUV44K*s7pY!P8#;a#IO3>sUn4 z8O~cyV8#Wo3x*V|U(h7BwJPL}ZG;-v{pYGtEpP~){FTb6QSlzN$-T#$;QnCGU47TB z=`KEM{(%4#SVS$$A4067uf2O~D4c7mL_c+5==+kD@l>=|c}T+uK=wN}^@SbxgKekp zX+w$H+@JUWQFs#%$wY36bH4$_(hb%@q3Nh~*)%YSX3&GCtYkaPUAPH3x1iMPxAuPo z#`elC<+cN;-1fSvPp%WzU#Spa$EXClDtv6I@hspP{l(=AmlGrUCi^MIf8Gdice{7W zgC2Lb&p!Wj`(OXp|9QKA_Gr8RkQ@K}CVl3K&mpz1$?L>950hoW-=SUNk$J2SRFC^D zK4ywWb@UlECT8sH_%+yed>p~qx4S;tR%p5#=ne3MkK>iJyKG%R+dA00ka5NXMSVwPq1d=|9a&}FS}eqVQRrS)gb_3P<- zug(`>;iJubi(j-4y-w9n`s1D&GVmb~8(Q|}2Z))A(L+ovPetF$kRQbVjor+1{6QZ5w9>ek zs_P``Ez9)eksx_#09Yt&>z zl!>vXg|PSYqR*g8rTzYc`)?WEI4fsi-t|?NrK>g^of~>Vr#+*o>Hx8r?bl%N0<@E+ z{g`o>CNyv5*pEE*pMT3VSdS{$e7pe``*4F=F!GDP@_%fairiTV;d7cEqkNEKQP4#4 zgtqI*<0%{2q`p1XX5jclc4~p(w`2&{yYIZS{g;3BuWmp4vp=^z&;jN3>&xw{moI%_ zIX*=ePZ(+8kxVaO1G4z&!^Rx>n~y3Y6xi(;)qs7rhjQh$c!h*^D8WIJwplijlW!`r zfVk~QaRirmZdw3^8N`f*P*J7FYUVsat9~<~G8uzC z*q5^Gln>cJgcQQ`5jMyN(?>^6{^E8zOgb1cVN>ppz>pW6Y&^}Tu@reKE9cwPZA;c2 zPA15O;~T7yf>+|C!{6Sm{>}zOiF`PKEY?0~kU=MCe0%dFI=)E)nZHREHpUQ<<07#A z?x)@vc=gnulWfQvGN_@a`<1p(A)PasWT=XPv6c zHXn3FXPebNBLKZxk)e3;J7BRd6qcp|LyY*Ai#H(}tmy}QM{gv0V?}=yJ6NyTtzSIY zhkRn}{#0`8>J2!=!?7zmgprxo^#aOD?hE1|+s&g_-Ew7318hc|rW#xZVG%{SGsZh-`d{$@Wbuw z@jGf{LyLWy5o*tv4_`)zfTwXWk|zIwb4!555_kE4#ts=_NGvEs-fHt6=u^>n4tX{d zeh(2(Tg7G=gWW3Ew_%BeX~Btn)oVPYlr4(_AB8li0It_tph$OXuhr6ye`Wxo1c~~= zb)aP23bd1&zm@Ib+8Y4;M_n-kCp(B;OJi?ft43Z{E_vi37e2Uq7EOMr6VKZKDka%* zm-QJw;&cei5d^uPL5NnlPt?&WKL30Xdp@0HUR}J}KKsG%Z=Zku1HY!+$1O2H%SjjC zfVG9lliau4E2&VO8xhtQyVL)yL8$OY^xFnOjZYqaNqaXexnyL#VDl`~^>;PzJm;{X z*x@8uvXGQnC*Y#RDs*D&TJPs-9L@1se3b+H6aT_0gNFUWSZggBY}y+Lk8u?n1NNzm z4`_njC)Bj70rO7poo@FZKi!@@dAgnW;i0r~eFp;1{k!bT6CcKEuO|_7JD?AmcIS{j zwiV8EGR5*lbs1qH=v#TKY%)HI{u)3>YW_W)AIvRW>}a`_n=inFtfAB=!8W8{ujwDC zE+DUH5sCbVEF`)T*2p67K%`t&2{LM$k7;VlD6S|LSk}%q*;>r4v_7u_4ex|)?I#;Rq*>ANT z0KKyRF8kiaE^H7$*YajtaM=fA2l&=GX$&3%P1CZdyQ(sJDaHQ4j914K{+4l=1N|GP z*fHCk?I7Qz64d<5g+gfIn=<8{ZKZSw(Cl@H*KrK3`D*?aS;C=tDC!iV zClSmzik=x0^D6NQ*6_pYWXv&l?|i}54zot*X}1ygN~hh^&1v86RKRvOb06VbR2JmwJ?J-Pm% z^FP{9qGnNt1{RZ-azI7=bJ6IjC*x2Ug^7Fz9s^Ytpc{nk^Bi8lb=nVws4hFrYk!)q z^4ibZMr?9%#y$2@sJ!{yUn=RN?Fu9ZXFCNWw_IbCRvwiQt7^+^Sjz^-m6Wj+^JRn0 z673M#@$l94^dDoV$EkDp=zDnhm&}Ol{v-a#F8`&M{SWgd*V8D$uTL@N zO{zk3gA(3)@YoN~sHme(=yi?~8?D89$lHDF@Qt^og$p}qXa5{VLmui-gM2X7WF<&^ z)VNmIo&i4pmJIw}V{TWmdn9Zi2c4Jz$m(|TVMD`k#N577*K3{i<=-|#8ygZ&pw|%w z%GD*u@mI$A+>W*A=-A(WK0Z12P5tdMc5*`zkzB+6jbHzF+vlHrx_$7`x3+r^&NRI= z`_xZ7q;Yh7#+F#(RTrnP)uB&q@k92yL)&NREPQN>onZ>#L;7(A=)_6?+wWY5FnDYq zLbZ*&b;Z94)MYzs1{-HliP~+z=m}!7zTf}9ni1-y1n|ZA3$}? z*gI(b^mPdFAMv1u&M$m(RDF1)Im4X7r{mL;?dh{;+lL>0xSgHd+b%CIwy$2i++J}b zStO_2cMx1%$|j%Fs-om?!fQ;>i&siMk{xrE4SRxf|2Tz*2zqWYU?dw+%ul!D3v)x` z!|Sq+{h;iT`rq?L$$l;|!KTE}#LlV9Jf)P^d1D_OD{$eRG}Ms+4e^mQWtJ!1nG1N_ zGB8<@tjOW{D*7R=UUY6DakDvGwdldtf z0oG6M^GVvk&A7%6pmEI~{YedLThmUPTUXvN0&xFJW;dP(0REGD13(02F?7lG0pRR( zEAJejT*2K^i(GwFoT$#v|Dqh9cu^Hgm9%!DMq3LcV&;mvLT2FK_xDD3?EAF<1JQ!?Q2i+c! z!=gbMy6*2oxwpZOwm&fKk1fTg{##v`E%ga~C4g9DTvhTP)S(AA-W(uTs2e(OJ9TjP zWRora1|qb|k90D^!T$lTqQ7^%-Pgh5T#M2NAH2W)(l7kN_MiR2&u89)r)kN- zPFc``L5eTb(f0@RK5$8PsK7HuQkGY$r-g$o8^B{N>a^Q7{FGn(_Rh z$3n$cLaf!cE5i*EW#o})@}?|3mU#2efvEUQDO%!J7&Y3C&`GFuiQb{&Efw@C4llH>$;dz2$wF)1}nOKgvP^b+XJo%{*3;@_{ zijO!c&qhLZHfC%Wi_W;x<2r5bP-KSBeb#FNd}`-}-Z<7@ zG{{`z280f|eHj7m<*VgQFisHL7`L&npOoeJXVDeUrh~0><7iC9WpGW4ojD11bMcvD zmIoi>1lo2e`Sc{va>!d3X@$PY7#(E`c+;g!-uTWBA@t2ZWprBkx;V#QZ0i2&`kEig z*n!Y)YH{0UkrOdc*e&v+h|FjK1r2{fp3q!vwwXXrjm&SoN)H&uINhibg?$O!y z=<$2oyB~bCJ$&b>#PLmkeBxNqy~g}NxopdTR$cPSn6`kNxn*dx0m@!K3+(S4+20IW z%PsT+-TIrp^6>x;9~yU~Ll&@|DP9BmV{Pv;XjCY5XabXPKW+-dWD6A6TgT20n*U~6 zY)GncB6=uiJ>zPmGIr+9cKI$Qm05v)zr!{qtF%4AzjkWhm`;_O~ z4?q3E_Qe;U`=SnqmAtZ!oN>+r0G8lSUZHH&$YWHi>=yfhZ;Zzv*kYmB%yo#C6-=qmo=3Ms##i(k%Ff_f zgBN^VDDZ^|+Q8TP1U0nW5MbZ#bwvB-d-opXCjg#4(LS07N}*SObJ0)sBO~8HMwpxG zBl2AA)I`_dHv9s1M;|6FdhE8`R0jwdze(Q&nr<&BQJHYJ&a)@|l%F|M{pB*S=f53# z90)AF`~92qL&=_qt!dBSHpBfutQ;4$E5Eut_xZrp#pQN>`K3<9auW*zFW+{JxfdbC zIWdL*x?SwcGXDZ0opl3jqM75m?6jc^zmF%vxlUW~QJXQnrnL7QZC{gsnL86j$Gd6a zjSfS{&!{QmFy&zv5sl+t*;L=e5{c6)`VeS`FHmw_U2){t3O#$j*xfX~fh9n#Ws=wt&rts_jMZowxU6F=BJRw5TY+P-xKuz2=i|PWO7< zI%@6>=y4f}6=JUv4VvtsOWbzU=x`sb5}@hCr;fYyyX~u32qq74>9GcWA185StW|2c z05X~{ae%t{CEs&9!0{i}_>wryh!OmymjiLjDgM6h zw#I@R#EB1bjS^HFb@xT)UjQWS$t`E`6&UsjQcgcb=gmBU@OcUfvd)BV$aEeV{bhh8 zH@Yo((HmM<;@>Ius1;dO+BYsyBmRoZ?s(^zLr+7-W%HB6-*P9!p;dRF`romb_8#&N za?#K>`2$VOe%W%=rvt|y*RRdzR<)^Bb?b5ehSmqynp5_Rb;G3M>Y)u-&jCmd*Wrie z<>wpQ+-Wzll6=n13T%tVZ1_`GGAg-0gu^F<8YM8TAat}*;UhkRZW>?~Tf&64aa=bQ zFt1ns$R_J`DbVH6DqvKVh)KuN0G^oE`0{>61#SqTEAdG}*PFNego>~=#!%z_sk-%5 ztk98l3}uWpXyc2oW2r8yJ@mw#ac@>=n~oIQT=z9t69sP@i4i;~onTukC(+6SnF$91 zXjECezVQw{bUIJ6BxHiKuD-@igHYts*VnInzHYzaF>1&sIWu$YZQCq-%}d_^%AG7k z|I$M@Wz7ar{-N73AVrVcN6wAKt8oJnQhaB8`Bnai5+B^03-YD5duL~M>#G;fx9|Vv zuWy&HUTn{vz2`NDE~3lcoO4X20HKcuJ5{J%lgjj|8r9OhpS?K0im;CNLZc0v#1%@1k7!}tSt%!8onXX3Ac z#?Q6#^UI8ctNPOB1>YW5ft$8R$M?2p@4dIZ`|dlwsrvHOYqy=yugKT={oNCtYe^+~ zcvAp;TvM}N&kGy5UA(zc@8t_w`Hufs?)ODJZjvW<03`burrtTJ9{|8e*S*fz@|sT557v}%{kQmopKC4rInH7u z*CN$F@~PJg33NL-!1_s@Ip`bR@^A2)FQnkO{z6Vl3}OQb{ova++WkBJ`U`F1iNWZx z;fSU|j^Z0-iLeaZX_>&AOV)bHXDI2f_4h*pmLnjKOynU0dhCk4ZqG4Ck}w5w;b?DJ z*cdz0mIoyn4>R6cYd=2v6MyPw^8f%(!j@1ZWDfep|dV8Xp$BuojbWq6L@fvr-S?(Hb6D`Imn-$o_H~0ap!42UN`?2 z{_~&Te({(7()O(nKiJ+}yxm@%zuaEFeCcn9@syH*E)hQ$voeX9%(20VB1Ez|6FB_@ zo##yhJhri8wufDxP=;N3zX<^yjsp!WpvZ~1f^29~)!cuRgIzEbI8neZ6$z#6oGMGE z?BWSW{5w58RXYnaJQ?%E2GiegqM=Uvz2Etr?Qj0AzqS3VfAz1nQ^of21izIk|gP`X;0}F6GdmTFjVwF<-F5Z#K_l_q? zgnL=orcWEHGM}bZf%&N1U7dYt4|rN-f7TK))evO7Qz!0LtM)$N7(~i^!oIvtICz@- zH9f?R-zM#YR%}X~=xcb9O`8to;>GyL9Y9RnpTR|abY;9}9MDdiFP0$pE)Rq`O12r> z;~YfLfY+xIF#+tG=ZX-$IEg0`i3bSuRLp2L%rRoDs(vDwf?33)Smdn-83%y#;zM8T zfd{78YFqqdZRCYhW!NOIfeUT=6h79vf2=2QfoBInT3$-kE=0^z{|FU(lPrrdzyKAv zU5PaSg*&>IZ(JP(Mdw`gF^&ZG(FsVIrNAds9+&!W67w~0a>u#iMS^F>n3VFW6WG^8 z#~6`;MQ*VE1zHxcmRofD@BX0KB=lWu3#ebs+ce!4-eM|P%c7E^Rc}Cb3_zTUzUb=p zT(NhX;%GTEDhp}aEK8}!jmw~`kJh;;*z1nHI@0U-hqOE#!)BOq!p*z;_n&MZeCK=H z<9FWK?mW1sUgG!DC0k?R4SR(0hvOD6TFcv%gp*$DQfyiyrX*B+1dOA=2aO|^kfnS* zUV(iq*PJx_J#rg^h|JUQ>zyoXfYoO)*2>!M^%U#CI^7)r}`xE1tdF2yu$#?9t&iHsRZh%RSh3lB7^~+j@Dbyvy z-&RbS<1?z?GGEyjaGbaOA!nbCNDgb9ESeU59urylAPgS+dK_qO$uCT>Dxp2EpcT?o z;0T={Xw3!GW>%a)~U#?e1NlDVu~-a8&NrY^MNnCMdWxpyLaC= z{~tVj0B{=F!@RH$xrU z#Ty$-hH_uPA%T}q_FnrV=GtqeE1fHyzvAs_^r7~&62SfmiO8}%^O5c2HUZq;#+cl= z^v~pt;LKO{DC~E@bw2{+omQRYOW_eY=Uvlf{1P|zF{F-R)(e=F_;;P7XvaP>Vk>g8 zhoatl@Zm?vv}$OXPfGdLYxC3A@?ulNkP`u_rwmEjiifq6@!`E6GAB*MDJRD6tQqIf zMRw?$C-|HjryXdW?Q{C2`64fXu#_Ep^_%tUGbDV(j@6ci*57S?ql9WuS=)1A4?5yL zQ^e0WF!$1rNv(@))wQPed@AR%O>dMSbFXOt$}M{b9=9%(uI~Rp#<}o29P+vnU4hsF zWKRd}qLVR304n|fzDF-L$Xg;6N5qffyDNSC4W^8#zyY6i#V6u4blQ#v+2(Z} zKMFP4+s@MMI7TMmc>BK@C-k)`w+jsqM#|9P8#gJU&qRj9GWvnG zFE=(KD{ZZ)bK-_?tuNp=QP}p(>9L+W#!8j96bVsMJHCn@k1e~>=x<_aUZDEKHUtx` zseYlGQrIpqr|(c`lF-!;`$L?LqyyysqW+PJm^-ZzVVEdZO? zkHA-{?V{B2)5d?h*qya^z<|D3Fst#kN%??T19~V{c^A#&tEp#i^e`F9M!TYA1Y~hvYZ!RylkAM5$Z@>GyA8$_|Jl&o=d#I)p(;nYF_Fh*)++X<2H>fQK zeVD|zzddkeJ7&_xC;rRvTE`BX|4B8&-$9iHB8O@U*a zy>=o)wHo0y-g1k#nF3BuFzCfr5|)qHrz);1jGYuEJ#086X4e zi?&r^bK!DzvZ3%dL*rMwfx~*!9pt`Avche8ZUY6j>`!X{kVD6|JnT`qGQyk-YqtQh zYCr;X;0m+tK!P`b6E`f#7*m@P@g=!JJ#B%#9-uEKk`FL%e7*@56AW}8drfTH`NaZ5 zK2R}~D1vIV2fBGfuTm_n{-fLp;9}6q4|Kh4upQ;R>7+FMhYi7FH*K6q0F;kp1K(71 zpNRDA>|{H;ch5JoPtT4uxvu=m(VzL5KfC>4?|l0^+u8Ba_QNl}*j~K)%D-vP z1Z>TCWLf%WdiX|%8+*}8yOZ=eR%-X+tO;I%3@&<-FUSTC^6)lTiQ>S997lsoAv^i`}vTG8%KIGS!0UYNcwRq&He`n?DVmm+2Q?T6lKR+in$l_c7FSq~UpZ)Xg z@BZrF-@f?a7u&O^kGBs#_;7o0|KWD;-hK0+rx)~mJDxaELq-;6$4NS42PkN2L9Ow( z2ODS}Ao5n+%FzK5wjBm+O3{NKv2Wha!_y-!3$*OhCmRN0B`M|)k8(GLCi=mr5Bq@F z$r}yYB@1Bu!00{@0b>hfX;iT-0U!G@9~)qCpS#4t2})kM!}#oB=MS!n*fjdsa~2l! z&@NfF2b&SXn#BeTdTWVn({@gOfu|h{stsA@X%7NEv2iX+6;gK7fVR8K#IC|rw2YzH z&XbOrgHuR&kRc)Zr(7+TL%mJPSD)Dlh=C(ATS^|S%qE(Z4zEe;i;!O3CrW^uWB1i4_FoSyM; zHUtkqYoC?Cw?5aEqHod%LdUTsZHyDi!Tr){IWa>dQ4A`1j+d<)NS?ZRzXm*R7n3H> zTN%Lu|nc7&FQ$z{@cM)ZG8>1p8g>u5hW(pDP@OgT62(hRY0*5a_D1^ zS&~qeRX*4T^$!<{`GPcKiod%$F+RKhXnXel2ivpvKG=@D&a*ZkOdDj@D*Ku>-~H!> zC*XaCMJ`VU&!$-8H&{oc)?;;o6Vy&I7mi;UUO-#?I30 zeU~(VYxHElSx!Gdc?h0oHLn7PL`#r&&~yhmgW^WG4SxuS`WtTH-`Nh#x=V~F{}rN} zl@s&c?=tWGO#u>PQYZa*~{QmaY4?pqi$$ha}W@x8ldao5V8QdSX@lrHQz>;6* zSsvr9+dy4o3KrujkoDw^E|&r2sB(XyNx4Nr=XTZ&%0*wmiF@n_KKlXq*sp;re94OJ z3n@ zC67a}w;$6K3W-(9_%_SA5wBuF{BHy_f*KT4rZm>(HF9g7EP`n7eOlXuysk%2`~(j9 zn`gzrzo)4}19q0JfcNE=TL{m;DI4f^@}Ot#&nlyrHTKY+HIEEv+AnyBLB(sat;@*E z#lC$kA}6}wPktEBX-o63?XV*QKG)UX4c&i0+q?HI(6W-Z-U(s3&UXmmFP&o@IOOYk z>p95#A>-27zINIH@zvN?i~ZsuAt?4_GoJHY4{hL35_=NE(81s3302noT-*6ufwKzV z1f^8tB63QP#sQH2g1h=H=t1A9u#%}c?Tbf6cq3%3&bXU0l8_s_R)~(^C}Riq=Rp*# zu65G(#~+~Wo#VZ7A1}JGz=F~bD_HJ|r|j5ai$C=*JMjC^A8rhqM)WF(%CTMT^7Wfx zU!Zk)y!aS^&&g@(`4<{KISx?Yk44%-mwq043wrKFHoAzZF~+wJAo3a?zSh&J^#d8l z$aRem1MNPqF?rh?ypJn1Dd%_^BHDzMUB;OI@L>gbspr~P^X+pbG7FjOj{&_4R4B*1 zjgGIGc*u_~mkJXcro&;vdpm4urN@ z$>YHd)Dut2ojcB>`=QTGxg=~$-=c}Njla{gQ`>cNaGebs!EzvVkuTxtpyKCv;VuYAs4eci`YYCL3o@kV{2 zKG&?%`G!Spr}yt|-}>;|+q0+7M1QrtdVL{0u|ATxJL>N<^`rK@>O0O!QI(sybX2@i$605PN8soX-3OrsVpPZZC zUJETc`n!$P^wHL3a@k&Eo=^WAufR98g^`E@IqAFcU_iC|=NC#2P~rul9d++t@{MIpxyzRoLp6ezvPm3pFWV8m;_>bF_cZw6X-W0*_O*IKiGwQzHn(e`Az?~3~;suBFW47jO2N^d5S4ZcY%(Rtg?kg4Og%`6e+u3bqEPHwm6`06bMJ$Tz+( zopXZj--MM;CI;V5#{sl>6BIKBq#ch4fC7VT-+2=Kkz~LCA4#<-0Gu-83G$lUpwzU7 zQ1MU(Mc#e@-XDS*oIWTr7=oSh@0ip8+-o$p9vf1-X{k12#e}$xUK^ z6&nnc97NsOc!gB;nG=0bao0eF#caiXnC3S~Dj8@v0F-6m?7%+(!Kkz_ATqp&!qTa5 zDpGP^`)3rG7>qNI0b-H(#qVzb{s{*Qoeg`y%!Ne|pcq zVSlNyd#*{&sQaTo{>Qez{@4E6_7i{nC$VZnU$>f8s}vwIKxx|Co4^X@ z$u09w|MEsH{4)B<3(Las!g3PsDT5X-Z76qe>#SEXE(>44EPZ#Yf(*5NUpWyNLzZsEl~C z7BT*@Pu5999e~0m9`bOqk2G{_c=|FB8_CmFdO!(?uhpr|;?QGJd1HWmqt8oj^tfU$E%qy)Sb_W>yh_ZY zT*OCNz;?GG0_*phM;jXh(IV7+kqt}M5NP3J?Ou_ymAFa$AAZHoF=X8XPaNnU zR}5o4q3i-B;shOI6@!`{`za?yPW2=Ds*YaIQRvMJAF}X4a+RXBbS@Bk(*GW->>n7} z!s}D`83%umnxz~2( z;N7;$n{CAAjhy#|B=R@%Rn2&Y2sk@$SjUnWEJq(z)f#L8tn9)e6C2Uxx~Y-~g6Gfd zznNBz0pCFLaejf@{BOHDMR5T*WyWXxT~7ykZiRjS9rCX?{XA7GcJ4>j6B{tdVdgN} zDQP?ze;`lhdTuHXjK@u5Hoxa~wY_}t<@V_(zq5Vu<)@01#--L=j&B^-b76t;g_OjG zo9l1*4PHwQFuobr^kW+MP_o@hHix`%j2C*k=4;XQ9wE+WzA$gyu9S>^hMx*_0CB($ z=Gq!taIBZq^vOI^o>%!gD9Vc--&C^9A$Jc@(v%C>(1Rgfv4!00llsYo=rw_!Peeh_%;<1W-vr>^q8q z2=HD+_9Vt-*PtO+veAmakHz#&rL{lMaSNnHewn`QAc0D5eBmaj4zp>#ZvK%U0{*P{ zmOoSWGq>9fF#kfsw;@2l3m{LRKF&Ifec+OC_3Gkcd;R*Aju$$odc{Kk7rvOn_=l`x zo(CW*{@5jP4tascQXT(718=QoHLg6b)XYUY>gEw7gC~7QT*kM+l;cy=GFI4sjWN9f z>koXxf8{dL^Frv}mtYApNv0tOul{ZPuAR?eYjW?s*4(P#ryc)72Vm=s_8vf90V-Nt zFpuUIJbpDbwj-6jP1>{1VI9GCY{Ot=WbDI`H4uAfcQ&B+v5oH=i=6+(ui1a}on=}c z44Rd7L6Ufy&h)LHGIZ3VpySYR7)#C{eQ{3JoPYEhmVWCL-&C|b@neVMp7rnUQT&bG z=xAvD=e3BK+_;xgkiXu6qrnjU2+n3!5@V1eTd{_b1oS9CzqFk7zA=f#X1v>_B(h(*4LA zbfPPD=F{-qP(D}rTCQ>K*uz(A)!Vn66Yllh1TjInJXYvKWT2Y_<8|h?W!Uf73v4&- zu{~s+1JFaDZ1f-Yfyfwe>_;3!Gkq6Y3Vc%pABiX6^|R^d`>f~br^#!%DX0;GwDVi& z{*pi+$4rp8A@k5*m6VtV-7h8AcmR9AtDWfxh9B!j9&S~eOXBAh-j098z12h?`?i)J zT-yVfM*jC6W}XXn|234*kq6*khnmG%hQM)694t@q&N}KD7IQJpc+t^k?zcIfhpp~2 zHR*$k{}wsyoBfx$41Xw`x}Qd$0hJra`j`en1>t_ zll%C?uqZyu;gjRH<-`xa{m)~m^_b9Lzl|5ad?2n|FCcuZdHCbC(|Xj(4K9$X6M`_4 znb5ND9w~=jvFA&6ebaH0QkP5-qt7?!u&X?v&FgRXpTIW2*2b5t0q&EMt@v9-kz#22 zSb8KU_~1&Yd4XJ)Jf#cVKMcRjPhy$1gB;%MPt@0xkB@T`k8!5)c2eW<NL zvfaP`Xgjuk4l&5M z_c?^;La!m=AzVi%yp{4pp~rSvB{#(@KRDmRv-F3WS;wFi;HT$5 zc+us?Iv|?0{}eN?p)M;ES>{M!jq|`S{pXwZ+Sd{fe1O7gYrb)_`cNL@)o)iYpZdVZ zKh_n#ou4(-^TfL4p=ip}9M>H?nxtE?b9@ATNC7o6%FTek(g*)KANza^vCJC&9Z%K# z&3^`0jSD(IdsMX{M|mtz#CZFn#|w4Z(%;x{z-}6Bb4!rB1RUnKLHltC zC97;0J`Q=+0jlD|r0=*Ec!LMr?Y+eTT-R@X*C2yn&>!j<5Z;`SQEvNhp!cBZ82$(t z{x{srb&YHQ{Y<<1O*CA1m0_b!Igp%-I>ffk-pL)b(22jww}ORe!ZD#m>^a==o>4{E9uf=npLJ9}s0^t?5(0tbSPk{Y388@cza2kiGP;0efmgEg8tl+|9h_M#RzS+4kVk)9u}7 zA8eBmr59t2Gr7F#!CV-_V60nuHzp-(#nB&HhMVQ^yuEeN>eX?52}VXWLb!ty21(__`%Nd0l`! z$IbW-^gK$f>+^)N&`3kT$a@?Gaw4o~E+;jiJAffDdK%{X78q)$0ccj8WbW|m-w)}^ zeugA_^M{Y0ZjY3XwXPf;aiIlfXp}R5iq~TToaf=xa}#i00`0JQ#H;F~|2jSeOI#b$ zzNhOoCJ!>H&32Z32N3S+zahW+qZgY2E#NR;X;!#Fq2V_AL%5A!EgBmhi?V$lZ(45h z`H*SvQRF8$jrXg|yaC|q;`MfMPP*_z!`_xDUS6MxT_u2QjbR_J=YBzD&%;{JR3FE3 z;K06Nr)5sTk-P9MU>i#2wCk4jQ+eCyoLH&cY3iKwuVp7sJ7p3V#MZjy8WV`j36UY` zj{P2*HZ=5$@`__$fJ1$W2q^fgj_lD>U-6L{8GCwvx=cRWy8nj!>bHXlqfzAcW}9$8 z2dh4yTlF^t6@2te|Isn_T?b>J!xI0(+~XuzgXHx+=nxyNC|<0 zJX(qL;wUy^KiQA=lmCO{FHDfv1OXDm5#YQAPy)oTA(E0zB{Vsl+qviaeP31g+WX9q zOj75p-M6}Rb#-;G%U)Y_)XaWSXdZ%+mpG=z(x=bM8m3)a?}Ch3@LFEzcKgwQ9_t1M zu;e&Fj_oUY_~Hu2seaDk4PE=^cH9=f;52l;aShdVmwXI4n z_IG&~-0FT+8-79+d4FvD1fELlB0X+3KHf(w^_OO--jmf2JsYs!WU!0dS=*xg%2ep3 zM(XX$s=|dri;d`C@Kme6l7$@6cpj=`e0Zojn9_e)u7r0M$U5#f1}AtcbvV(C^@dckdJYzyoEhD}LPECtb2ym!zU{Tk~rfZKKu)aDDOF)+cspOf4t+=CKTP zg5UZH1`ZxlXg$65DyR|HJeF=hlHSp|@lY4eyxG+3)#ir)-0wQ4m42;b=S^gE#TfCT zU$d|5vG~KraxOm*M8Fn59-7YdD&BNd%GS0uuKaNLV`&l(}vyD7DItcEA0qK zUGR!O1+!eKW$o3G z`T+tr=9oBL*X1o#zkoU6Qf6<)W{DwMJUZcH%a1ZEt#UE2j_)IX3)TO|oBA|RX55gG zqx+crKE5jDEU?A?E`J;B(>MPddm$0*$!Wb@8Cm_S&{T{ z0~sG>Fo-g@e1pJU5RamR8w+?QOU8wKzBMaiRb5?Njk9~FJ9=xna`r=kEnTK%-=3kW&cS9 zYm$jfKAVf%TYf_p;yUPD@k-zQ@qXY0+$haMFz1$PR5ceD!`7xaNjyEqgQ_~DN(Z!_ zUj7h{>R9-&$$ZM^7g?uU6wkvj1B@S-l==#P!Xp_;yM<_B)NQwO5wVWT<$przvXKB-V=Xr#1*%uEPq;WiVSVzAD>!)kg$NH)NJ)Hi78=Ufw&<01 zo}=wm!c>>J9sQ|Q92E)=hIE}TcG{l8qg|i*wzzn!-Y0?N8Jn;by6UIQEre$$hzK5i zpc4Jm)4lNvAIdBKIW>)dybd&UF-Q}@GK&1ebHlvSCxa7u_HKG0Iz=AiXS*uRiHOG0 z?3ypoHV~C)`SXol*R@9JE3B!Te(?_Z06u9s_EA7IV%2jh0bB^>!|4v=MIuDWN5!nS zDpO}wKce`>p)8E!?!?DS>0_LS!9nVU?kEi>Y)Ew3v|Gz$D`@%(>Coo_!F~4V*F$Z;3cU3 zxBs~vqe0tcrF`qkjQf~$AaHbf7Z%CFkJ;(Hd7E( zM+LId;CPuhTDq|&f@(aa(8A9&iQDk7tsn*kQq{56c+cxKR=u+fQlo`uGODBZ=%`T@ zf5!~gEIML*@t}@ErTvY=br8Fezg;8Lf6;)iuL4I^n8%D2z}J}aKfH9 z0jbCuYJS>ZD|S41_+&hO`gEKSN4OYAvuONtI~c&|va>CtKHI)3xy$5jMDuRw((H?M zcc#+l(&c`mM|6d!Yx&3+?9Ko9GFfmoWg#c#QXg>rp8rx~nB^KDvoixK3#fNz*Rj(M z?3Z`J<2uJ9^md6!c9$M63t;))zcG)|CHk11`32~9&W+R~ZHg`~&d2rT#khQX?FRt- z5O(olZV>i8e;f$m6aUWmgs=Ux&F2idvSzBi;-X1huNLjQ$%C6=Kq=yF)6!P;^H{Pv zoxTIiK;8Of{x!;6n_N)pY?VpNUL9vua7$z`7q;bhK##w1_33@>hQHulv|UF$vECc~ zT|e@Tz^(qdCI>uJ4QGEfi+k%^RzsUt(+~|{jz@Kr;SO6sS!^N5>VV&9?y{8?NRk;r!I;{2p z`YgTS6TOPof0V0D@uSDQDhq<|QwUYN9UpTLt+Ebyp+@`^KJlm4i2KpU6 z%$t<)2Iw@*xUQ;y8$mRT7vt(8=j=k^g9?qK-wa`&iagU~kgHrRY(_08(!!IY?Ie9{ z7dJ0@$|gmBrR9V3rzP-l&(Rz%lh7|ztm_>ffAC`*sU9EkwJ7?A`Dt@Mp-_=^i#+wL zkI)zROCNe`$~n=3U3U`RcD-9#I8w42=PGA`2N2xP5h{7@Eo z&~mewR_XvrtA_$CkG2|e7ntW(7(6Fw!EVmDXlRw9U&@RpWp0k;?K+ubAh@sezZm12 z-3G+xZEzp_p{7;0%_v8=-p|czU}2&9q7^UHvP|ol@y{Agx!ZJ50T~G8GgLUW0%=Ma z-Ah;tB~=^O_IYceebGLQE#Zaj@)u#UihQVF{qAbTB;I`J--_o(^3@n8C&$JC_sjqF z)%g1_{(gM$^rP|M!K1-jhV9?(e|+FrvG_)H+)GnbhN+IkkEw^(b*;}+6*AoqpzNtW zF@NWSTHV(>{SOx>t_PpA59u?W(?cDT@KIem!TnzZ)`yQNu5?c z->j|}DZk}>%OZOaYE1hrUdZwF4^^-0)zG9)gyIvO+56JQn*gy#9x$-kUC;XEZ{7>{ z==g9PAM=HQ^uKu{KDx>_=%x{4-^*J7xUSB4Uf=NM0_k-veq3h|L$ABooq`Uo#T#Gs zv2*JU9{AC}AA$ga+jdWkAU=vyxR$LjmDh;EZCaqu0s1^ZuzdqS1Tg>x1cxqwlNzNf zGYG+_p$~u*JgMj#M62u+UgVf$l7pzmn4eTQRmYU~1(^-ZG7Cg9m3HU4+-bq&vrpf! zhkvIGY;|dSrOp?W$nW!o%|>=M+Nx^09wgN_7Laf4FqyWsTiEMA0ec5p|2P1}MWl|pw*&`a!+`6O>(W&zFXJdth$H>7`fBfWu!lH6VBhg3 zX&=C@`n3I3&4((O4%m&w1bPT zd<#rJs44RLTnh^#F+EaBW$Vrp+3^imghND24*Zo$M?bmkg{bTUjpTEfKsf^_&gTYc zK<%os4N@w-^)J5Zi$3(vFd3)p6aO`Wb>sp<_@FnRj0gCc7qNr*cFfRvMjmiKrBceh z$$=K!^rPiE4O(m{C8u;Xb3=Pnb)O@yV73~{{Q62v# zWn7Z-6C~hkDp4UBq8F}xObnL)=Gbf=0%qt;_=q+&-3Q6mhq5k{x|LM4H#PsD0S|jP zKD!nWEwYshzv!_5o1+&SS#;%r40CfU`lsGS@o499E|2fZ%(&7;Fje zZ36IsrtyGHb_W_KCtc8#zo9@{V3<^})f+ImbY#w^+oySIkZmgOn5lej z9vY*3Xfuw6j5Fyj*X~EboKFq=h@I zk4(rMf2$nXP2WuSQVZNdw>RvU_u&77KHsipEy8P-zUEkbZB?=zd?_lPT#J^I`b+sI zbK}j6ugBM4{%(Br<^L9q=Cb0DA8^sh0NE-gV{xuE#09G8zdf9+%mI%_s;^eo` zWs<%DvM;A$j}!tv>JZ%4g)y#uvFDR|XNo0f+BZIY^2B?{(|h-DOqr@q^pVAw>+l8kj$E0}fLg<^WdUChi9oS24-_ku*-5$t323w_k2e`l}W36`2r0OrlG_a@ex?=uI zCy$wOJQV7B5%n7aeC{XuR69g{Z=t`Hrx@ zQQ`KUf35=}550B){ET^@qXo+T1-<^Z&K6Oy`mq7^Cu5toH4hp$ydoQByLz_%&3j)& z?;AMo?A;7|b=XU|kp(?tyXnL}*gu>p+$xnKl&PmkUBB_J5 z+4xzY<87c>ue#{bGPkk=@1_T+U*}8#%3I!R`~d88m0bI=?ZvTVfM;AKoL3mH4p^`2 zeI!`}fY#l(1lT!t{&e06_59b7_Tb)4YyA_XU#&;+n|Okfw+r;2dh86{>Wld{&8Gk1 z-Tmg`dD^yVi#LldG+^`gyXxygQKm(L3`Ln8#Anw%S2c#H!k;qX3l9mY7+{Ey9_Gj>n*doq=?^*>MI@2|@5ut0D(Ihn zrOu30wR`=niHz>p6aL{0Z0v7TXmoh5rh1{9mg}Et&07abKG>6g>WsT-?{cmIRh?Kk z4l19Ud+1VrCVpKGR66~j?Tc*f5)XNO`F5|^bjn#?bOE=IG^TG{x8gp4UnehJh*hVj zXO4&GZ{Ci-{ky*z7grbK(S!Tqgl!Q_%*=Hy-W=dB^5Bg}5BN;m;9h9P>pM5&DanO8JQg{EVN7{_w+($FpZoOwT9Gj><3&ws#K40+p}nug`nwclv@~aaP5Ra@)pqZ72G` zd}uqFxgq^){;RM1(EbI01}-$IrxU?RUGiOSm^^Oz0va)(isf%yk8T|dn}$eGnOAx6 zx)yF!9v_`*5{t*l;duS}<#_z~@%ZV_esBEzpZ~#lbaH>3Utf*aZ(fee%gb?jlW*kT z-13co@l}97K0XmoCb8uDw_eZ%dn!{GKpVOrJ%M{17Np!8yn}<9e-p3L=|=^<(5au{ zt@*8l-)`apxcwGi!GNT}uKUyI-honw8p+$98b21B9uvUbBE`lMYg*Ot+Rn@C3)gdz z_Qh9UkN^2U{%-u$FMct;_~MIk@7}%fN*Mv72T3Ni@+Y4}NJZInsfJn6j#QMKV9-z`Qdb zZ-AL^@=wYvYKYKy`k?~J_0w`ki8nmRK)?7o4!YKhn)@lP8WKQ;e+qGj`BW05fk zhc#a0lLGOL;56_ChWQ zydrVQg9m!IoTS7>>Up9w{IW5`-z-PtMY-I|_|~sBlK7c#x_X!uV9Q2i)HD8D&tlP~0tDCpu6>>^)MWr$+-&yKmf!30dtuQVV6ugx&o})scJ%9g7Ml8o+M(8YSjk__q50UK(KTgm zTn99t7|!m$a011*%bWh^#@*5nd6TSRPk%pv=1ljq+dFLrl}>wg{d>l*fS-2)+;@Ze zTK?}jqxdoBN~#U>0#<=zf9*4`uC(T}?bJ!;w-(FXC_kSH9kQKh+(|ivP&h!bdl>H46 zLHjMR?e~)3IIA4kJAdi4#BpZ+v{Qch?as6%g3*y3-DKS{oU z#2)@;+FfLcI32#zf;mst_!#!)UynndcTaz4zO1*Fn=>BM+e4J8GJVOP`{n8vXLCSq zpeV%_5WQpn0)FcPz57{poL>Rm-t--3W_>~H0Zg^|COLY`hFsT!CVg1rviK~2WlVN& z7%)d`{ZhX|v-Jn~1%It;(YJN+8|*B<<<|K1c#E&ooJ~q!F**mEo2(EaT3Dze*JFMa-rJo*EUW6{!K$}jrX;ZA$ogZQAjrFTR0==wYRn(~K+ z#@mkjmM7=nix+A9GX6=Qy1#0Z?%V)Hp1bM~^wk)NzsaxT-zzSZ-KL$}cgx>>y#QYE z7p`9c zwcctgKao^bsSiD`YrSm#2a2=k7X9|-mLPnS7iaToIe_&SE;t^K(#ryz*1CW{ffLEw1YWrDN;gU$~;%U5Ad{7uq-u4|xD)&nF7HpS$&mzp&w& z2Hyds%-aB%ete_*O6QXuPhY%xA^$rV&pv!Se)iMfA3yx@4}I+ze@$!Oz@47T5crySn116VdTd>#XB&ImmC`p^qJlrFr0P zk{rI8-*#ceHSoNiYK-s{_Jn;O)_UqZw)qm{c{2gJI^J`xtf9zxKM;Q4k`hB)D}2kT zHrrnDfwyq0?Y|!U`Ct62|D-|XimM@;`ZBPT$;7WlLlkUS4*9&rX(S~4(O#jUjLDMxbKT)28d zRk_W;3<27cwD6Mi0{~7_9(y-{nrpMcV!NgrPMnjAb?cXoO@PEIwMEBfHV|S|-Ar>QYeo|BP#1nqQgeTrEU!D6Q3L@A` z+3Y|6 z^O+me2I7kB#YIeq2MGoR|KVv|YJfwXFax(NYWmZuZL4T77RrmnH(-qg20XxzZhSM* zAxZHAefmKQzRhTHJQ*iy_~WZWm(8KZuQglb@4$`xA3r&fTD}Nx!^x-g$pbVjI*b)O zpg)o>h=R8t7GT>1tuI8-R~^Eo4{%};jL!M3vm-CS0{l~qGQY5EtiUV;pM{lnaaNn^ zYo23Ncv6qad7wd-PFq2bCE6KDkU7hoVC+)I(|6{h)Hk+0R?&|q{qTK4l=--CO0-@& zb?-FR-l&U*7^fQCEz|=H$W&8*gNn_f?=BrE{(G(Ah+a|_t!hrjKOF)ESOe5~VIIG( z8~;7W{4F}?z^hC_Z=SueF{9|S-H4z?p;?v(yme!KSsB?&JZ^m>~G-ma5_-Sw};F8%e#_{JxwEj=1MW_TmxmoW=KBaj?iXZBD1wYl|SJ^9T$cQW8U z&9w~M;E$x>La2P(*?ts%*asObUQQ1-7qXJm<Ik=Mx3d_y!uvNq5Jpa{Fuj-c`#1f(edEnqw%AE z^yBgT`A6gSL?_fI55~3jbv&5Ywn+c=AwS`vy3|pbd`pRc;#-UhHLR)1(0B7lJm|h1 z^a*6m>ASGCfxsQxz+u^C18hm>dD0#nX#7fjJW`)k_};LuXVYuOYu_=;o+rYUy-ZQ_ zi+B#?8@FV1A|SHX7ySU*JykVUPKz%WtvnSvFly>c&M7SSlYrI~YygoWp$~6Ca0DH>8-sGppNF|0JJDr1_MGe7e5{2OTHM zPR+;ktR=*LvOS+%NEVm@r43)@&HU@N;JW5mVD-gNabUvaTX?g*)0VdxM+GohPF1X$ zz5cQf3=jM?=X$~^_UscIsEUQC(>mWcJF4=aX~6TYQtWPetrIjGHd)V1E=pMf8 z`}@4g{>Jry?KlfCbq@neCFeQWvkK5>>KD7xFZ>_4t_wfN2l!@4%L|{H|2-axpO_UM z=s~*m|Gr_&9^Qa|Pa8lNd3X9S`6P~ZI~NS|K(1OO*gt{u9kr@ITzlHKaJL{sA_<%Gkj5 zlAryL>Z|{;Pk{5oP_qZ8HZ6Vr$tXG19Blu2A4HF~LjXSqrjDlH;;_cGHbDBC+pMM3=n}!~bPMhD76QGQI z?WcD|8q7;}*pBZme}PpKKlfuJ*ZNg?$5txN^)klF4J_eo?PU8mzozdrbR|Rwb`h@U zf>LyuHUna}#-4zF)@^H#$i$DAM=pB6UEQi$dQHFT&8>KIzQGNtoFAv!aa6LsAA(PR zF-+vyBSnWa0@}ZEElF}Bn+xvX=9qpV*q)ciw*s6maSe&~JOI!-!T|ZK4b4Y+$ATrl z{nGy2`=^b4m@mwatl`qv`0^|FKWz(EJ|TzrxorP_hz)%HWp%{I3~KzUPsoaV*kY=8EAQ+dw2{pq zL8)%Q59hK|W7~2U^n9v%?K?B&dOb^v<^)jU4>^54aRJJD-Ls|h$i|L7ZNRH<|umg@&YZ_(}j=k)RdI!TF)ZL zfYsPvPY|;A4x1`7nC12hRe11~`uaxcsXcA{mARyp_+qC(xKjE9I{Zv8pq?^lcR=!b zFXb_<>o>Ni>gaU&uI)hGg!0vr$N1!di*EtcA{CfCg0zJ0B^ExGLP<%qeqEONfEB_Pfw4oP?H zF9o-2p7#RMhBp(SXm0X@3a{#u`hfrXRrQKXsv)F=kLHEf%=U$O-L;L!zEz3lU*2^!n0&7H=%~Ru<%o4X2vIoP-^yU5v{bla1wGR^3i1>uNg)>Xf>b~me uuteZFG-y2mO`r(tZsEX(|-u@qkfjFsBCkb=_000047;B*tDIB1>7*t)kL{ z*OsLsC1uGHSrYkqdVYVsf4%2A*SW6yy1w6Y&L8*r&{y3USx@*Ap)3W)24`O{e{V+z zTYGn^pP!>2>kLa_B^jLdR9_Z@^E}m)#o(g(`rDslIi6uLclDpb$9in{eKrTf*FSyam8oGxuQ4YSrKuY*?M?6II_&R8EdJY_AXw& zezsJ1_kRowVNZV#?;sU>FJDI$4}W)>E7jMR8pLAo+4?%#dwH%*^0%k45?2=bBV5_W z)8Su9BFl^r$=b_d$BJSdVJWOkCpBSx`YL>OESv>|K-kH{wBD$504J#_^4NTxLP3>Ubq4D#>&SLUc_L~vQbNH+U#5_na0{vpJ7%1v{%7JU)@6%q`aTRsul%c%FBD;$@qj0YMq)eDILIVt)f!ve{6@x zV~@owzkI`%Cx?d|J}Sa75~*uK_8mT3@is$5a6fciSChzE(5;*@yz7n zo3*=OzMU?@Yx5G>Evc%MSBQc2cGWieWIAqqHD72|paq4Ormg+kp5r&`-jr+k=z+Cb z$|CJuGd7P%G}Ti^V8`|3)VI0MSf%``aK4om{I2nDH8ttPvHJx#haT7qLXtg0sA>xD zYpNCAdu0a@wo?Z@x$QB3yqjwEzDcy@>QoOqF8~p}m~?v!PO!{KlN}w%K_1Tu<)ZK8 zVC+cM-x_0OsDDbq{9Cdne6vvguHV0){!ve>t0lp>NQC3@Lgo-E+^)Fy^sZEtoBSxov{4nJ z^z8WB_FY8h4CC*ZD^F*_t_fMepQfynzq@o2WLKE|E9*u zb(ht^GNg>dDdRH=F$xzN_-+htEe;-{-0iqIKK^nv%?M8Y{lxn1$7wYA;`{WuVlxo- zNQ)uJf5t|Z`nm=;^dLeuMRP}#4!D>`{3m=?ABMV#StrVraBcKDlh|<`y(QYS*{+ ziVn1U@|uV@E5ZkPi~M?fyG`;D0HII zvpt5P$J5C22y2m&rGZxSmvQ<~;kU1QD)kf~;Xu#+F zD1kQ$0UEB^y=hX8LQO*7_eHM!2Y#!uXXSb6=s_YsQ~sO^n7nDL)LN)O7h(owrX_a) zzbk`Yqh60HI4kQt2Ml1e)h9BHRE|xPwr=;+SA=47y^E`YyHT_L@$HAbN$|zI(r;qN z4k*+RKU$xr4o_>+n_P3d@q53u53FjofncA!@aEsUA=}#4F5glc?yE`DoKrNwGP%m$ zzg-6u?k@h)=F|ew0KHfVXFW*zP(u10LPznL86}-A2Y|IgA{Sv7(SM-kBKF@%lld1? zMcOYp7|QC1(8`@K{O}ek-KsHjQz!-Rk)>!G#|QyWQG4&kz)Se9d9m)*URk()O@}0q zc@LNDj^=-AF9zw-6BiZp+%T|`GW9REq8~bauBCNQZ7J{$$VAdMu(3!QK;n1)9OwLwv;YU>GMDDz@pES>HyN zZF@te%ZQ-RIdNI8gp3G$5oENc4AHu0mu-nmd`e$+Qp`yf_?yUNiNm+iEWw(;d9OH} zx6;TR6^z6L9?y@|!fqtDwA}aH^#X2eUzH_TqzdEg)G?9)6Z1b=Nbt@WK*ifSgvDrV z+`>JSzqD!?*%)`PvRMBRnN9Ye&=it@;al`AB?77VSmCV?HiK#)kg3cZ)2>8eHv?V5 z8>K<}wS0iYz5sNOFjbOo*ldcB7J8j*e;QnbS*HbI+queYA;DJ z3x^pt^e7mS;Qbv8UFc5$VMW#DpBr4osxs z>)MN({u42U#)Y~)1~a6v-R$&k_3I00cTb?qQY#aMdt5VUy*!WZpU|??ZDZrlJ>_JD z5g{lzS@LL5r0Z0Q(CG>uty=bAn&#l(*61P664|! zs%6+PRTUC&qr)AC>Qr9nyydO_&L|hLE;g9A1rovgGJ{#B9gS(5jx3fqD*R*LA7Gw4xlnC=E?e z))0j>$5Y0K>nm~X;j_p5j0`~K@MBxY4+Z$slg3oUr47V)tor2TQhc$XY0dE%T_|RM z(3XFagr}vs&-S#b|8J1mM$=qWn72jr5s9D7MMuywec(HUQvnX=?16W~?!r0>t$ zG<>JqXT(g01L8B^f1OE-#C&So@N~vFYRl&k8oQc`Zz{2SUa~lWpz>|u=h6Yx`%JO8i^KPAf!%`fs^7!giupkOT!fyGNZee&SCOPW6 zt`z((yKniSCkWp=7I`S_FKOWFN^WMD9>t8OFCE3sO(Nqxk}ndMkKwalan5!$gZky_ zm+IWlU~rF_E)Q4@^m%=;ojW&Tx=xc^gV% zii~#O=AY={vjh3$w}CkM%fb)BT``C#J=rM2n}VXzC(SEfd@ygbv(_2y#|6s7=N>Ww zz0#?GFlpOOlqh*gzzvpK|J2usF5d;BD&Xe(H369Ub#(zuO>ENSljosdL50$t}kd z5C257ZS)3jY6L!#@Lt2_z$%d88&jZ+GVqIcBjHpJE{JxmBz;hJ#&vx~yR%|{pky8a zm3Qq_JoT)~nRn+VP(E(cI<+nqU(Q<-HX-00y!&1E47Ov@$OsXjoi zM2)6V?aQ$kty)4C=EwvqN+{lE#p8z#ZG_M@FJCV<-@tlZ6$d`_Zh*k=UCnh4Z2V&_ zJC%4(5W*?fOo%V@ey!hL{=H@x^-bmpcwWR4V(t0J!B!ojm<)}hi+ oZ9g9CY@CUIWX-Q928+XcYjyQsgcRJnyqs2dSMCp_9Q{@P4-_#H7XSbN literal 0 HcmV?d00001 diff --git a/data/splits/bench2drive_base_train_val_split.json b/data/splits/bench2drive_base_train_val_split.json new file mode 100644 index 0000000..855d8ed --- /dev/null +++ b/data/splits/bench2drive_base_train_val_split.json @@ -0,0 +1 @@ +{"train": ["v1/PedestrianCrossing_Town13_Route638_Weather14", "v1/BlockedIntersection_Town13_Route616_Weather18", "v1/LaneChange_Town13_Route725_Weather23", "v1/VanillaSignalizedTurnEncounterRedLight_Town13_Route603_Weather15", "v1/VanillaSignalizedTurnEncounterGreenLight_Town15_Route507_Weather13", "v1/ParkedObstacleTwoWays_Town12_Route1166_Weather22", "v1/StaticCutIn_Town15_Route429_Weather13", "v1/ParkingCutIn_Town12_Route1301_Weather14", "v1/StaticCutIn_Town03_Route158_Weather2", "v1/YieldToEmergencyVehicle_Town04_Route207_Weather25", "v1/Accident_Town12_Route956_Weather20", "v1/ParkingCutIn_Town12_Route1313_Weather3", "v1/HighwayExit_Town12_Route1324_Weather0", "v1/CrossingBicycleFlow_Town12_Route1050_Weather10", "v1/ParkingCutIn_Town12_Route762_Weather8", "v1/ParkedObstacle_Town15_Route415_Weather25", "v1/BlockedIntersection_Town15_Route486_Weather18", "v1/TJunction_Town13_Route655_Weather5", "v1/ParkedObstacleTwoWays_Town12_Route1167_Weather23", "v1/ParkingCutIn_Town13_Route1349_Weather10", "v1/SignalizedJunctionLeftTurnEnterFlow_Town12_Route885_Weather14", "v1/OppositeVehicleRunningRedLight_Town12_Route809_Weather3", "v1/ParkedObstacle_Town05_Route262_Weather1", "v1/VehicleTurningRoutePedestrian_Town13_Route680_Weather10", "v1/HazardAtSideLaneTwoWays_Town12_Route1128_Weather10", "v1/NonSignalizedJunctionLeftTurn_Town03_Route153_Weather26", "v1/SignalizedJunctionLeftTurn_Town10HD_Route380_Weather21", "v1/HighwayExit_Town06_Route292_Weather14", "v1/VanillaSignalizedTurnEncounterGreenLight_Town04_Route196_Weather14", "v1/HazardAtSideLane_Town12_Route1512_Weather7", "v1/ParkingCrossingPedestrian_Town12_Route758_Weather3", "v1/HazardAtSideLaneTwoWays_Town12_Route1140_Weather22", "v1/SignalizedJunctionLeftTurnEnterFlow_Town15_Route511_Weather9", "v1/HazardAtSideLaneTwoWays_Town12_Route1129_Weather11", "v1/VehicleTurningRoute_Town13_Route679_Weather3", "v1/InvadingTurn_Town13_Route575_Weather3", "v1/OppositeVehicleRunningRedLight_Town15_Route440_Weather23", "v1/NonSignalizedJunctionRightTurn_Town12_Route1024_Weather10", "v1/HazardAtSideLaneTwoWays_Town12_Route1133_Weather15", "v1/MergerIntoSlowTrafficV2_Town12_Route941_Weather5", "v1/VehicleTurningRoutePedestrian_Town12_Route826_Weather20", "v1/ConstructionObstacle_Town03_Route61_Weather9", "v1/ConstructionObstacleTwoWays_Town12_Route1098_Weather6", "v1/MergerIntoSlowTrafficV2_Town12_Route858_Weather0", "v1/HardBreakRoute_Town02_Route34_Weather8", "v1/LaneChange_Town13_Route743_Weather3", "v1/NonSignalizedJunctionLeftTurn_Town12_Route812_Weather12", "v1/VanillaSignalizedTurnEncounterRedLight_Town12_Route874_Weather8", "v1/YieldToEmergencyVehicle_Town13_Route560_Weather14", "v1/ParkingExit_Town13_Route567_Weather21", "v1/HazardAtSideLane_Town15_Route420_Weather3", "v1/ParkedObstacleTwoWays_Town13_Route1334_Weather26", "v1/HighwayExit_Town12_Route841_Weather9", "v1/TJunction_Town12_Route926_Weather8", "v1/HighwayExit_Town12_Route1000_Weather12", "v1/VanillaSignalizedTurnEncounterRedLight_Town10HD_Route388_Weather23", "v1/HighwayExit_Town13_Route705_Weather3", "v1/StaticCutIn_Town15_Route427_Weather11", "v1/StaticCutIn_Town13_Route563_Weather9", "v1/VanillaSignalizedTurnEncounterRedLight_Town13_Route646_Weather22", "v1/AccidentTwoWays_Town12_Route1114_Weather22", "v1/OppositeVehicleRunningRedLight_Town05_Route268_Weather8", "v1/SignalizedJunctionLeftTurnEnterFlow_Town12_Route887_Weather3", "v1/HazardAtSideLane_Town12_Route1535_Weather10", "v1/ParkingCutIn_Town12_Route1304_Weather9", "v1/HighwayExit_Town12_Route937_Weather1", "v1/AccidentTwoWays_Town12_Route1110_Weather18", "v1/InterurbanAdvancedActorFlow_Town13_Route686_Weather10", "v1/HazardAtSideLane_Town03_Route105_Weather22", "v1/ParkingCrossingPedestrian_Town12_Route760_Weather6", "v1/OppositeVehicleTakingPriority_Town12_Route995_Weather7", "v1/NonSignalizedJunctionLeftTurnEnterFlow_Town13_Route661_Weather11", "v1/HardBreakRoute_Town06_Route46_Weather20", "v1/OppositeVehicleTakingPriority_Town04_Route189_Weather7", "v1/BlockedIntersection_Town07_Route352_Weather14", "v1/ConstructionObstacleTwoWays_Town12_Route1404_Weather26", "v1/AccidentTwoWays_Town12_Route1446_Weather2", "v1/ParkedObstacle_Town03_Route147_Weather0", "v1/HazardAtSideLaneTwoWays_Town12_Route1146_Weather2", "v1/VanillaSignalizedTurnEncounterGreenLight_Town04_Route197_Weather15", "v1/AccidentTwoWays_Town12_Route1456_Weather15", "v1/ParkingCutIn_Town12_Route955_Weather19", "v1/ParkedObstacle_Town13_Route553_Weather11", "v1/VanillaSignalizedTurnEncounterRedLight_Town03_Route141_Weather11", "v1/NonSignalizedJunctionRightTurn_Town12_Route817_Weather11", "v1/OppositeVehicleTakingPriority_Town03_Route128_Weather23", "v1/HighwayExit_Town13_Route749_Weather21", "v1/VanillaSignalizedTurnEncounterRedLight_Town03_Route142_Weather12", "v1/TJunction_Town07_Route364_Weather0", "v1/HardBreakRoute_Town07_Route47_Weather21", "v1/CrossingBicycleFlow_Town12_Route1062_Weather22", "v1/HazardAtSideLane_Town12_Route1527_Weather25", "v1/LaneChange_Town12_Route756_Weather2", "v1/VehicleTurningRoutePedestrian_Town13_Route703_Weather1", "v1/OppositeVehicleRunningRedLight_Town04_Route178_Weather22", "v1/OppositeVehicleTakingPriority_Town12_Route820_Weather14", "v1/Accident_Town12_Route769_Weather15", "v1/AccidentTwoWays_Town12_Route1469_Weather3", "v1/MergerIntoSlowTrafficV2_Town12_Route1010_Weather22", "v1/NonSignalizedJunctionLeftTurn_Town12_Route966_Weather3", "v1/TJunction_Town12_Route883_Weather25", "v1/OppositeVehicleRunningRedLight_Town12_Route807_Weather1", "v1/OppositeVehicleTakingPriority_Town12_Route994_Weather6", "v1/CrossingBicycleFlow_Town12_Route1077_Weather11", "v1/InvadingTurn_Town02_Route99_Weather21", "v1/YieldToEmergencyVehicle_Town12_Route917_Weather7", "v1/PedestrianCrossing_Town13_Route718_Weather8", "v1/VanillaSignalizedTurnEncounterGreenLight_Town13_Route640_Weather8", "v1/SignalizedJunctionLeftTurnEnterFlow_Town15_Route536_Weather8", "v1/HazardAtSideLane_Town15_Route421_Weather5", "v1/TJunction_Town12_Route1018_Weather3", "v1/ConstructionObstacle_Town05_Route69_Weather9", "v1/HardBreakRoute_Town13_Route1341_Weather26", "v1/NonSignalizedJunctionRightTurn_Town12_Route816_Weather15", "v1/LaneChange_Town12_Route984_Weather22", "v1/YieldToEmergencyVehicle_Town12_Route779_Weather25", "v1/Accident_Town03_Route102_Weather20", "v1/VehicleTurningRoute_Town15_Route480_Weather18", "v1/OppositeVehicleTakingPriority_Town04_Route188_Weather6", "v1/ParkingCutIn_Town13_Route1347_Weather6", "v1/SignalizedJunctionLeftTurnEnterFlow_Town12_Route1019_Weather5", "v1/HardBreakRoute_Town15_Route59_Weather7", "v1/LaneChange_Town06_Route326_Weather3", "v1/EnterActorFlow_Town13_Route612_Weather14", "v1/DynamicObjectCrossing_Town12_Route21_Weather21", "v1/HardBreakRoute_Town10HD_Route49_Weather23", "v1/StaticCutIn_Town06_Route287_Weather1", "v1/HighwayExit_Town13_Route619_Weather21", "v1/InterurbanAdvancedActorFlow_Town12_Route1048_Weather8", "v1/SignalizedJunctionRightTurn_Town04_Route176_Weather20", "v1/EnterActorFlow_Town07_Route349_Weather11", "v1/CrossingBicycleFlow_Town12_Route860_Weather2", "v1/ConstructionObstacleTwoWays_Town12_Route1419_Weather26", "v1/VanillaNonSignalizedTurnEncounterStopsign_Town03_Route143_Weather13", "v1/ParkedObstacle_Town03_Route103_Weather25", "v1/ParkingExit_Town13_Route569_Weather23", "v1/AccidentTwoWays_Town12_Route1454_Weather13", "v1/NonSignalizedJunctionRightTurn_Town13_Route595_Weather11", "v1/SignalizedJunctionRightTurn_Town07_Route339_Weather1", "v1/HazardAtSideLane_Town12_Route1506_Weather0", "v1/VanillaSignalizedTurnEncounterGreenLight_Town15_Route490_Weather22", "v1/ConstructionObstacle_Town13_Route82_Weather3", "v1/DynamicObjectCrossing_Town02_Route12_Weather12", "v1/HighwayExit_Town12_Route1051_Weather11", "v1/MergerIntoSlowTraffic_Town13_Route627_Weather3", "v1/YieldToEmergencyVehicle_Town05_Route225_Weather9", "v1/ControlLoss_Town15_Route430_Weather14", "v1/ParkingCutIn_Town13_Route546_Weather0", "v1/VanillaSignalizedTurnEncounterRedLight_Town15_Route452_Weather10", "v1/StaticCutIn_Town15_Route428_Weather12", "v1/ParkedObstacleTwoWays_Town12_Route1183_Weather7", "v1/PedestrianCrossing_Town15_Route448_Weather6", "v1/BlockedIntersection_Town04_Route193_Weather11", "v1/InterurbanAdvancedActorFlow_Town06_Route325_Weather13", "v1/ParkedObstacle_Town12_Route771_Weather9", "v1/AccidentTwoWays_Town12_Route1104_Weather12", "v1/VanillaSignalizedTurnEncounterRedLight_Town03_Route140_Weather10", "v1/VanillaNonSignalizedTurnEncounterStopsign_Town15_Route493_Weather25", "v1/TJunction_Town12_Route1201_Weather5", "v1/ParkedObstacle_Town04_Route162_Weather6", "v1/ParkingCutIn_Town13_Route670_Weather20", "v1/VehicleTurningRoute_Town13_Route605_Weather9", "v1/HazardAtSideLane_Town12_Route960_Weather20", "v1/StaticCutIn_Town04_Route168_Weather12", "v1/VanillaSignalizedTurnEncounterRedLight_Town10HD_Route393_Weather3", "v1/ParkingCutIn_Town12_Route764_Weather10", "v1/InterurbanAdvancedActorFlow_Town12_Route854_Weather22", "v1/HighwayExit_Town12_Route1028_Weather14", "v1/Accident_Town15_Route414_Weather23", "v1/ConstructionObstacleTwoWays_Town12_Route1425_Weather26", "v1/SignalizedJunctionLeftTurnEnterFlow_Town13_Route737_Weather23", "v1/ParkedObstacleTwoWays_Town12_Route1182_Weather12", "v1/NonSignalizedJunctionLeftTurn_Town12_Route1356_Weather7", "v1/NonSignalizedJunctionLeftTurn_Town05_Route238_Weather26", "v1/HighwayCutIn_Town12_Route850_Weather18", "v1/HighwayCutIn_Town06_Route320_Weather8", "v1/HazardAtSideLaneTwoWays_Town12_Route1145_Weather1", "v1/ParkedObstacleTwoWays_Town12_Route1164_Weather20", "v1/NonSignalizedJunctionLeftTurn_Town12_Route813_Weather26", "v1/HardBreakRoute_Town02_Route33_Weather7", "v1/ParkingExit_Town12_Route1305_Weather18", "v1/HighwayCutIn_Town12_Route974_Weather12", "v1/NonSignalizedJunctionLeftTurn_Town07_Route344_Weather6", "v1/ConstructionObstacleTwoWays_Town12_Route1084_Weather5", "v1/ParkingCutIn_Town13_Route671_Weather21", "v1/ConstructionObstacle_Town03_Route63_Weather11", "v1/EnterActorFlow_Town05_Route245_Weather9", "v1/VanillaSignalizedTurnEncounterRedLight_Town10HD_Route389_Weather25", "v1/SignalizedJunctionLeftTurn_Town15_Route471_Weather26", "v1/ConstructionObstacleTwoWays_Town12_Route1083_Weather9", "v1/HighwayCutIn_Town13_Route685_Weather9", "v1/HazardAtSideLaneTwoWays_Town12_Route1139_Weather21", "v1/CrossingBicycleFlow_Town12_Route1075_Weather9", "v1/HighwayCutIn_Town12_Route1006_Weather18", "v1/PedestrianCrossing_Town13_Route687_Weather11", "v1/ParkingExit_Town13_Route697_Weather21", "v1/ParkingExit_Town12_Route1309_Weather22", "v1/InterurbanAdvancedActorFlow_Town13_Route715_Weather13", "v1/ParkingCrossingPedestrian_Town12_Route896_Weather12", "v1/HazardAtSideLane_Town05_Route263_Weather3", "v1/TJunction_Town12_Route980_Weather18", "v1/OppositeVehicleTakingPriority_Town12_Route1025_Weather11", "v1/EnterActorFlow_Town13_Route614_Weather8", "v1/ParkingCrossingPedestrian_Town13_Route668_Weather18", "v1/ParkingExit_Town13_Route731_Weather3", "v1/ParkingCrossingPedestrian_Town13_Route669_Weather19", "v1/NonSignalizedJunctionLeftTurnEnterFlow_Town13_Route723_Weather21", "v1/HazardAtSideLane_Town12_Route1519_Weather8", "v1/HighwayCutIn_Town13_Route631_Weather7", "v1/SignalizedJunctionLeftTurnEnterFlow_Town13_Route719_Weather18", "v1/ConstructionObstacleTwoWays_Town12_Route1421_Weather26", "v1/VehicleTurningRoutePedestrian_Town12_Route999_Weather11", "v1/NonSignalizedJunctionLeftTurnEnterFlow_Town12_Route1022_Weather8", "v1/ControlLoss_Town15_Route432_Weather8", "v1/CrossingBicycleFlow_Town12_Route1063_Weather23", "v1/ParkingExit_Town12_Route1308_Weather21", "v1/Accident_Town13_Route552_Weather6", "v1/EnterActorFlow_Town13_Route613_Weather15", "v1/LaneChange_Town13_Route726_Weather21", "v1/SignalizedJunctionLeftTurnEnterFlow_Town13_Route1633_Weather12", "v1/SignalizedJunctionLeftTurnEnterFlow_Town13_Route1629_Weather6", "v1/NonSignalizedJunctionRightTurn_Town12_Route967_Weather25", "v1/CrossingBicycleFlow_Town12_Route1078_Weather12", "v1/SignalizedJunctionLeftTurn_Town13_Route581_Weather26", "v1/VehicleTurningRoute_Town12_Route1026_Weather12", "v1/ParkedObstacleTwoWays_Town12_Route1181_Weather11", "v1/VehicleTurningRoute_Town15_Route1370_Weather7", "v1/ParkingCrossingPedestrian_Town12_Route953_Weather9", "v1/VehicleTurningRoutePedestrian_Town13_Route702_Weather14", "v1/ParkingCutIn_Town13_Route547_Weather1", "v1/HardBreakRoute_Town07_Route48_Weather22", "v1/ConstructionObstacle_Town13_Route81_Weather3", "v1/YieldToEmergencyVehicle_Town12_Route778_Weather14", "v1/DynamicObjectCrossing_Town01_Route2_Weather2", "v1/HazardAtSideLaneTwoWays_Town12_Route1141_Weather23", "v1/HighwayCutIn_Town13_Route745_Weather9", "v1/SignalizedJunctionLeftTurnEnterFlow_Town13_Route658_Weather8", "v1/ConstructionObstacleTwoWays_Town12_Route1085_Weather6", "v1/DynamicObjectCrossing_Town15_Route29_Weather3", "v1/VanillaNonSignalizedTurnEncounterStopsign_Town04_Route202_Weather20", "v1/ParkingCutIn_Town12_Route1312_Weather2", "v1/ParkedObstacle_Town05_Route221_Weather13", "v1/ParkingExit_Town12_Route788_Weather8", "v1/HazardAtSideLane_Town12_Route1526_Weather23", "v1/AccidentTwoWays_Town12_Route1106_Weather14", "v1/BlockedIntersection_Town12_Route936_Weather0", "v1/AccidentTwoWays_Town12_Route1459_Weather18", "v1/ParkingCutIn_Town13_Route549_Weather3", "v1/HighwayCutIn_Town12_Route940_Weather3", "v1/HardBreakRoute_Town05_Route43_Weather9", "v1/InvadingTurn_Town13_Route578_Weather6", "v1/VehicleOpensDoorTwoWays_Town12_Route1197_Weather1", "v1/VehicleTurningRoutePedestrian_Town13_Route610_Weather12", "v1/HighwayCutIn_Town13_Route628_Weather3", "v1/ParkingExit_Town12_Route920_Weather10", "v1/SignalizedJunctionLeftTurnEnterFlow_Town13_Route1627_Weather3", "v1/ParkingCrossingPedestrian_Town12_Route759_Weather5", "v1/HardBreakRoute_Town03_Route38_Weather12", "v1/SignalizedJunctionLeftTurn_Town15_Route437_Weather26", "v1/HazardAtSideLaneTwoWays_Town12_Route1152_Weather8", "v1/ConstructionObstacleTwoWays_Town12_Route1424_Weather26", "v1/ConstructionObstacleTwoWays_Town12_Route1095_Weather3", "v1/VanillaNonSignalizedTurnEncounterStopsign_Town03_Route145_Weather15", "v1/VehicleTurningRoute_Town15_Route1380_Weather19", "v1/VanillaSignalizedTurnEncounterGreenLight_Town07_Route355_Weather9", "v1/ControlLoss_Town15_Route433_Weather9", "v1/NonSignalizedJunctionLeftTurn_Town13_Route594_Weather26", "v1/Accident_Town12_Route766_Weather12", "v1/SignalizedJunctionLeftTurn_Town13_Route580_Weather26", "v1/HighwayCutIn_Town06_Route300_Weather14", "v1/HazardAtSideLane_Town12_Route1515_Weather12", "v1/StaticCutIn_Town03_Route109_Weather1", "v1/SignalizedJunctionLeftTurnEnterFlow_Town12_Route1034_Weather20", "v1/HardBreakRoute_Town04_Route39_Weather13", "v1/SignalizedJunctionLeftTurnEnterFlow_Town13_Route1636_Weather15", "v1/OppositeVehicleRunningRedLight_Town03_Route119_Weather12", "v1/OppositeVehicleRunningRedLight_Town12_Route991_Weather3", "v1/VehicleTurningRoute_Town13_Route700_Weather23", "v1/HazardAtSideLaneTwoWays_Town12_Route1135_Weather9", "v1/StaticCutIn_Town12_Route783_Weather3", "v1/VanillaNonSignalizedTurnEncounterStopsign_Town12_Route946_Weather10", "v1/AccidentTwoWays_Town12_Route1445_Weather1", "v1/BlockedIntersection_Town04_Route195_Weather13", "v1/ParkingCutIn_Town12_Route903_Weather20", "v1/HardBreakRoute_Town05_Route44_Weather18", "v1/VehicleTurningRoute_Town12_Route825_Weather21", "v1/HazardAtSideLane_Town12_Route775_Weather12", "v1/ParkingCutIn_Town13_Route548_Weather2", "v1/CrossingBicycleFlow_Town12_Route863_Weather5", "v1/HazardAtSideLane_Town06_Route283_Weather23", "v1/BlockedIntersection_Town05_Route248_Weather14", "v1/BlockedIntersection_Town07_Route351_Weather13", "v1/AccidentTwoWays_Town12_Route1444_Weather0", "v1/NonSignalizedJunctionLeftTurn_Town05_Route240_Weather26", "v1/YieldToEmergencyVehicle_Town12_Route781_Weather8", "v1/HazardAtSideLane_Town13_Route558_Weather12", "v1/VanillaNonSignalizedTurnEncounterStopsign_Town12_Route877_Weather19", "v1/NonSignalizedJunctionRightTurn_Town12_Route931_Weather21", "v1/ParkedObstacle_Town12_Route773_Weather19", "v1/HighwayCutIn_Town12_Route1042_Weather2", "v1/SignalizedJunctionLeftTurnEnterFlow_Town12_Route1020_Weather6", "v1/SignalizedJunctionRightTurn_Town12_Route804_Weather5", "v1/CrossingBicycleFlow_Town12_Route1066_Weather0", "v1/VanillaSignalizedTurnEncounterGreenLight_Town15_Route451_Weather9", "v1/ParkedObstacleTwoWays_Town12_Route1172_Weather2", "v1/StaticCutIn_Town05_Route227_Weather2", "v1/YieldToEmergencyVehicle_Town13_Route675_Weather25", "v1/DynamicObjectCrossing_Town12_Route22_Weather22", "v1/NonSignalizedJunctionRightTurn_Town13_Route596_Weather23", "v1/ParkedObstacle_Town10HD_Route371_Weather7", "v1/HazardAtSideLaneTwoWays_Town12_Route1155_Weather11", "v1/VehicleTurningRoute_Town15_Route519_Weather25", "v1/Accident_Town06_Route280_Weather11", "v1/AccidentTwoWays_Town12_Route1468_Weather2", "v1/InterurbanAdvancedActorFlow_Town06_Route330_Weather18", "v1/PedestrianCrossing_Town12_Route865_Weather7", "v1/ParkingCutIn_Town13_Route1348_Weather7", "v1/TJunction_Town13_Route654_Weather3", "v1/VehicleTurningRoutePedestrian_Town12_Route1040_Weather0", "v1/InterurbanAdvancedActorFlow_Town13_Route753_Weather25", "v1/HazardAtSideLane_Town12_Route1507_Weather1", "v1/OppositeVehicleRunningRedLight_Town03_Route120_Weather8", "v1/StaticCutIn_Town03_Route149_Weather19", "v1/OppositeVehicleRunningRedLight_Town13_Route587_Weather15", "v1/OppositeVehicleTakingPriority_Town12_Route932_Weather22", "v1/VanillaSignalizedTurnEncounterGreenLight_Town07_Route356_Weather18", "v1/PedestrianCrossing_Town13_Route637_Weather13", "v1/ParkedObstacle_Town13_Route555_Weather9", "v1/StaticCutIn_Town04_Route208_Weather0", "v1/LaneChange_Town06_Route277_Weather9", "v1/YieldToEmergencyVehicle_Town04_Route165_Weather7", "v1/EnterActorFlow_Town05_Route271_Weather11", "v1/ParkingCrossingPedestrian_Town13_Route728_Weather0", "v1/InvadingTurn_Town15_Route436_Weather20", "v1/AccidentTwoWays_Town12_Route1458_Weather9", "v1/ParkingExit_Town12_Route786_Weather6", "v1/CrossingBicycleFlow_Town12_Route1065_Weather25", "v1/ParkedObstacleTwoWays_Town12_Route1163_Weather19", "v1/OppositeVehicleRunningRedLight_Town15_Route475_Weather7", "v1/HighwayCutIn_Town06_Route321_Weather9", "v1/ParkingCrossingPedestrian_Town13_Route727_Weather25", "v1/TJunction_Town12_Route1017_Weather3", "v1/ParkingCutIn_Town12_Route1300_Weather13", "v1/HighwayExit_Town13_Route622_Weather23", "v1/Accident_Town12_Route1108_Weather8", "v1/NonSignalizedJunctionLeftTurn_Town13_Route592_Weather26", "v1/DynamicObjectCrossing_Town01_Route8_Weather3", "v1/AccidentTwoWays_Town12_Route1119_Weather1", "v1/HardBreakRoute_Town12_Route53_Weather1", "v1/SignalizedJunctionLeftTurnEnterFlow_Town15_Route529_Weather9", "v1/VehicleTurningRoute_Town15_Route1376_Weather15", "v1/HazardAtSideLane_Town12_Route1537_Weather12", "v1/HazardAtSideLane_Town12_Route915_Weather7", "v1/ParkingExit_Town13_Route568_Weather22", "v1/DynamicObjectCrossing_Town15_Route28_Weather2", "v1/TJunction_Town02_Route97_Weather19", "v1/MergerIntoSlowTrafficV2_Town12_Route1009_Weather21", "v1/TJunction_Town01_Route90_Weather12", "v1/VanillaSignalizedTurnEncounterRedLight_Town04_Route201_Weather19", "v1/ParkingCrossingPedestrian_Town12_Route952_Weather8", "v1/NonSignalizedJunctionLeftTurn_Town12_Route1358_Weather11", "v1/SignalizedJunctionLeftTurnEnterFlow_Town13_Route1631_Weather10", "v1/AccidentTwoWays_Town12_Route1121_Weather3", "v1/HardBreakRoute_Town05_Route42_Weather8", "v1/SignalizedJunctionLeftTurn_Town07_Route336_Weather23", "v1/VanillaSignalizedTurnEncounterRedLight_Town05_Route254_Weather20", "v1/ParkedObstacle_Town11_Route395_Weather5", "v1/ParkedObstacleTwoWays_Town12_Route1175_Weather5", "v1/InvadingTurn_Town12_Route796_Weather8", "v1/HazardAtSideLane_Town05_Route222_Weather0", "v1/MergerIntoSlowTrafficV2_Town12_Route1043_Weather3", "v1/EnterActorFlow_Town12_Route832_Weather0", "v1/AccidentTwoWays_Town12_Route1126_Weather8", "v1/SignalizedJunctionLeftTurnEnterFlow_Town15_Route531_Weather11", "v1/TJunction_Town13_Route691_Weather15", "v1/TJunction_Town01_Route91_Weather13", "v1/ParkedObstacleTwoWays_Town12_Route1161_Weather9", "v1/ParkedObstacle_Town06_Route328_Weather8", "v1/Accident_Town06_Route327_Weather15", "v1/ControlLoss_Town13_Route574_Weather19", "v1/HardBreakRoute_Town15_Route58_Weather6", "v1/HighwayExit_Town13_Route621_Weather23", "v1/NonSignalizedJunctionLeftTurn_Town05_Route239_Weather26", "v1/VanillaNonSignalizedTurnEncounterStopsign_Town04_Route203_Weather21", "v1/VanillaNonSignalizedTurnEncounterStopsign_Town15_Route530_Weather10", "v1/MergerIntoSlowTrafficV2_Town12_Route1055_Weather15", "v1/ConstructionObstacle_Town12_Route79_Weather1", "v1/BlockedIntersection_Town03_Route136_Weather6", "v1/ParkingExit_Town13_Route677_Weather1", "v1/VanillaSignalizedTurnEncounterGreenLight_Town04_Route198_Weather8", "v1/TJunction_Town15_Route457_Weather15", "v1/OppositeVehicleTakingPriority_Town15_Route477_Weather9", "v1/VehicleTurningRoute_Town15_Route479_Weather11", "v1/ParkingCutIn_Town13_Route1346_Weather5", "v1/SignalizedJunctionLeftTurn_Town05_Route234_Weather7", "v1/AccidentTwoWays_Town12_Route1109_Weather9", "v1/ConstructionObstacleTwoWays_Town12_Route1094_Weather2", "v1/CrossingBicycleFlow_Town12_Route1072_Weather6", "v1/VanillaSignalizedTurnEncounterRedLight_Town15_Route492_Weather23", "v1/ConstructionObstacleTwoWays_Town12_Route1100_Weather14", "v1/NonSignalizedJunctionLeftTurn_Town04_Route182_Weather26", "v1/ParkingCrossingPedestrian_Town12_Route898_Weather14", "v1/ParkingExit_Town12_Route1316_Weather0", "v1/StaticCutIn_Town13_Route565_Weather8", "v1/SignalizedJunctionRightTurn_Town12_Route803_Weather23", "v1/HazardAtSideLane_Town12_Route1538_Weather13", "v1/MergerIntoSlowTraffic_Town13_Route626_Weather2", "v1/StaticCutIn_Town12_Route785_Weather5", "v1/HazardAtSideLaneTwoWays_Town12_Route1138_Weather20", "v1/HazardAtSideLane_Town12_Route1536_Weather11", "v1/OppositeVehicleTakingPriority_Town13_Route600_Weather2", "v1/HazardAtSideLane_Town12_Route1530_Weather2", "v1/OppositeVehicleTakingPriority_Town13_Route601_Weather3", "v1/HardBreakRoute_Town13_Route1337_Weather26", "v1/LaneChange_Town12_Route894_Weather10", "v1/HazardAtSideLane_Town06_Route329_Weather9", "v1/ParkingExit_Town13_Route732_Weather3", "v1/VehicleTurningRoute_Town15_Route1367_Weather3", "v1/HighwayExit_Town13_Route748_Weather20", "v1/Accident_Town03_Route101_Weather23", "v1/VanillaSignalizedTurnEncounterRedLight_Town12_Route1054_Weather14", "v1/ParkingCutIn_Town12_Route1314_Weather5", "v1/HighwayCutIn_Town06_Route322_Weather10", "v1/StaticCutIn_Town13_Route566_Weather20", "v1/AccidentTwoWays_Town12_Route1463_Weather22", "v1/CrossingBicycleFlow_Town12_Route1011_Weather23", "v1/HazardAtSideLaneTwoWays_Town12_Route1153_Weather9", "v1/ControlLoss_Town07_Route333_Weather21", "v1/TJunction_Town12_Route880_Weather22", "v1/Accident_Town12_Route957_Weather21", "v1/VanillaSignalizedTurnEncounterGreenLight_Town10HD_Route386_Weather22", "v1/AccidentTwoWays_Town12_Route1120_Weather2", "v1/InvadingTurn_Town12_Route925_Weather15", "v1/VehicleTurningRoute_Town12_Route822_Weather18", "v1/VehicleTurningRoutePedestrian_Town13_Route607_Weather19", "v1/HighwayExit_Town06_Route313_Weather1", "v1/Accident_Town04_Route205_Weather23", "v1/HazardAtSideLane_Town12_Route1508_Weather2", "v1/NonSignalizedJunctionLeftTurnEnterFlow_Town12_Route1035_Weather21", "v1/HazardAtSideLane_Town12_Route1533_Weather6", "v1/NonSignalizedJunctionLeftTurn_Town03_Route122_Weather26", "v1/PedestrianCrossing_Town15_Route526_Weather6", "v1/VanillaSignalizedTurnEncounterRedLight_Town04_Route199_Weather9", "v1/MergerIntoSlowTraffic_Town12_Route1003_Weather8", "v1/VehicleTurningRoute_Town15_Route1374_Weather13", "v1/VehicleTurningRoutePedestrian_Town12_Route970_Weather8", "v1/NonSignalizedJunctionLeftTurnEnterFlow_Town12_Route888_Weather3", "v1/MergerIntoSlowTrafficV2_Town12_Route1053_Weather13", "v1/EnterActorFlow_Town12_Route830_Weather23", "v1/YieldToEmergencyVehicle_Town13_Route562_Weather15", "v1/AccidentTwoWays_Town12_Route1453_Weather12", "v1/OppositeVehicleRunningRedLight_Town07_Route368_Weather3", "v1/HardBreakRoute_Town06_Route45_Weather19", "v1/HighwayCutIn_Town13_Route630_Weather6", "v1/ParkingExit_Town12_Route923_Weather13", "v1/LaneChange_Town12_Route983_Weather5", "v1/ParkingExit_Town13_Route676_Weather0", "v1/StaticCutIn_Town05_Route275_Weather15", "v1/InvadingTurn_Town04_Route217_Weather9", "v1/AccidentTwoWays_Town12_Route1455_Weather14", "v1/HighwayExit_Town12_Route1327_Weather3", "v1/NonSignalizedJunctionLeftTurnEnterFlow_Town13_Route660_Weather10", "v1/TJunction_Town07_Route365_Weather1", "v1/SignalizedJunctionRightTurn_Town12_Route988_Weather0", "v1/AccidentTwoWays_Town12_Route1124_Weather18", "v1/HighwayCutIn_Town13_Route734_Weather6", "v1/VehicleTurningRoute_Town13_Route698_Weather22", "v1/OppositeVehicleTakingPriority_Town05_Route270_Weather6", "v1/TJunction_Town15_Route496_Weather2", "v1/EnterActorFlow_Town07_Route350_Weather12", "v1/ParkingCutIn_Town13_Route1345_Weather3", "v1/HazardAtSideLane_Town12_Route1532_Weather5", "v1/PedestrianCrossing_Town12_Route867_Weather9", "v1/ConstructionObstacle_Town06_Route73_Weather21", "v1/SignalizedJunctionLeftTurnEnterFlow_Town13_Route721_Weather19", "v1/TJunction_Town15_Route495_Weather1", "v1/HighwayExit_Town12_Route1041_Weather1", "v1/ParkedObstacleTwoWays_Town12_Route1178_Weather8", "v1/ParkedObstacleTwoWays_Town12_Route1159_Weather23", "v1/NonSignalizedJunctionLeftTurn_Town03_Route124_Weather26", "v1/HardBreakRoute_Town13_Route1338_Weather26", "v1/PedestrianCrossing_Town12_Route1045_Weather5", "v1/VanillaSignalizedTurnEncounterRedLight_Town12_Route875_Weather9", "v1/HardBreakRoute_Town13_Route1339_Weather26", "v1/CrossingBicycleFlow_Town12_Route1071_Weather0", "v1/ParkingCrossingPedestrian_Town12_Route897_Weather13", "v1/PedestrianCrossing_Town12_Route943_Weather7", "v1/VehicleTurningRoutePedestrian_Town15_Route482_Weather20", "v1/AccidentTwoWays_Town12_Route1461_Weather20", "v1/HazardAtSideLaneTwoWays_Town12_Route1130_Weather12", "v1/VanillaNonSignalizedTurnEncounterStopsign_Town15_Route535_Weather15", "v1/ConstructionObstacle_Town05_Route68_Weather8", "v1/StaticCutIn_Town04_Route216_Weather8", "v1/HazardAtSideLaneTwoWays_Town12_Route1154_Weather10", "v1/ParkingExit_Town12_Route1321_Weather6", "v1/HardBreakRoute_Town13_Route54_Weather2", "v1/NonSignalizedJunctionLeftTurnEnterFlow_Town15_Route512_Weather18", "v1/SignalizedJunctionLeftTurn_Town12_Route1470_Weather5", "v1/SignalizedJunctionRightTurn_Town15_Route473_Weather5", "v1/ConstructionObstacleTwoWays_Town12_Route1414_Weather26", "v1/ParkingExit_Town13_Route678_Weather2", "v1/VehicleTurningRoute_Town15_Route1368_Weather5", "v1/NonSignalizedJunctionRightTurn_Town04_Route185_Weather9", "v1/HighwayCutIn_Town12_Route1005_Weather9", "v1/HardBreakRoute_Town13_Route1340_Weather26", "v1/ParkingCrossingPedestrian_Town12_Route899_Weather15", "v1/InvadingTurn_Town15_Route434_Weather18", "v1/VehicleTurningRoute_Town15_Route443_Weather1", "v1/DynamicObjectCrossing_Town15_Route27_Weather1", "v1/VanillaSignalizedTurnEncounterGreenLight_Town10HD_Route392_Weather2", "v1/HardBreakRoute_Town15_Route57_Weather5", "v1/ParkingExit_Town12_Route789_Weather9", "v1/BlockedIntersection_Town13_Route617_Weather19", "v1/TJunction_Town05_Route259_Weather0", "v1/AccidentTwoWays_Town12_Route1112_Weather20", "v1/SignalizedJunctionRightTurn_Town04_Route211_Weather3", "v1/ParkedObstacleTwoWays_Town12_Route1171_Weather1", "v1/NonSignalizedJunctionLeftTurnEnterFlow_Town13_Route722_Weather20", "v1/VanillaSignalizedTurnEncounterGreenLight_Town13_Route643_Weather19", "v1/HazardAtSideLane_Town12_Route1516_Weather13", "v1/YieldToEmergencyVehicle_Town12_Route918_Weather8", "v1/HazardAtSideLaneTwoWays_Town12_Route1131_Weather13", "v1/VanillaSignalizedTurnEncounterRedLight_Town07_Route358_Weather20", "v1/InvadingTurn_Town05_Route230_Weather22", "v1/ParkingCutIn_Town12_Route765_Weather11", "v1/Accident_Town05_Route219_Weather11", "v1/ParkingCutIn_Town12_Route954_Weather18", "v1/DynamicObjectCrossing_Town01_Route7_Weather7", "v1/OppositeVehicleTakingPriority_Town12_Route1038_Weather23", "v1/PedestrianCrossing_Town12_Route1013_Weather25", "v1/CrossingBicycleFlow_Town12_Route1061_Weather21", "v1/OppositeVehicleRunningRedLight_Town03_Route121_Weather13", "v1/VanillaSignalizedTurnEncounterGreenLight_Town03_Route139_Weather9", "v1/HardBreakRoute_Town04_Route41_Weather15", "v1/HardBreakRoute_Town04_Route40_Weather14", "v1/ParkingCrossingPedestrian_Town15_Route462_Weather20", "v1/ConstructionObstacleTwoWays_Town12_Route1089_Weather23", "v1/TJunction_Town06_Route304_Weather18", "v1/MergerIntoSlowTrafficV2_Town12_Route1060_Weather20", "v1/StaticCutIn_Town15_Route426_Weather10", "v1/HazardAtSideLaneTwoWays_Town12_Route1144_Weather0", "v1/TJunction_Town12_Route881_Weather23", "v1/ParkingCutIn_Town12_Route1303_Weather8", "v1/SignalizedJunctionLeftTurn_Town03_Route114_Weather6", "v1/VanillaNonSignalizedTurnEncounterStopsign_Town13_Route690_Weather14", "v1/MergerIntoSlowTraffic_Town12_Route844_Weather12", "v1/OppositeVehicleRunningRedLight_Town13_Route588_Weather8", "v1/HazardAtSideLane_Town12_Route774_Weather20", "v1/BlockedIntersection_Town15_Route485_Weather9", "v1/SignalizedJunctionLeftTurn_Town03_Route150_Weather26", "v1/ConstructionObstacle_Town12_Route76_Weather23", "v1/StaticCutIn_Town06_Route288_Weather2", "v1/MergerIntoSlowTraffic_Town12_Route1004_Weather8", "v1/StaticCutIn_Town05_Route265_Weather5", "v1/HardBreakRoute_Town03_Route37_Weather11", "v1/DynamicObjectCrossing_Town01_Route6_Weather6", "v1/CrossingBicycleFlow_Town12_Route1032_Weather18", "v1/VanillaNonSignalizedTurnEncounterStopsign_Town15_Route455_Weather13", "v1/ConstructionObstacleTwoWays_Town12_Route1410_Weather26", "v1/ParkedObstacle_Town13_Route554_Weather12", "v1/SignalizedJunctionLeftTurn_Town07_Route366_Weather2", "v1/VanillaSignalizedTurnEncounterRedLight_Town13_Route689_Weather13", "v1/NonSignalizedJunctionLeftTurnEnterFlow_Town13_Route724_Weather22", "v1/ParkingCutIn_Town12_Route1311_Weather1", "v1/OppositeVehicleRunningRedLight_Town05_Route235_Weather1", "v1/VanillaSignalizedTurnEncounterGreenLight_Town05_Route252_Weather21", "v1/LaneChange_Town12_Route757_Weather10", "v1/VehicleTurningRoute_Town15_Route1377_Weather8", "v1/ParkingCrossingPedestrian_Town15_Route514_Weather0", "v1/NonSignalizedJunctionLeftTurn_Town03_Route123_Weather26", "v1/CrossingBicycleFlow_Town12_Route1076_Weather10", "v1/ParkingCrossingPedestrian_Town15_Route513_Weather19", "v1/ParkedObstacle_Town12_Route772_Weather11", "v1/OppositeVehicleTakingPriority_Town04_Route187_Weather5", "v1/VanillaNonSignalizedTurnEncounterStopsign_Town13_Route651_Weather1", "v1/SignalizedJunctionRightTurn_Town07_Route338_Weather10", "v1/VanillaNonSignalizedTurnEncounterStopsign_Town07_Route362_Weather23", "v1/Accident_Town04_Route159_Weather3", "v1/NonSignalizedJunctionLeftTurn_Town12_Route810_Weather10", "v1/MergerIntoSlowTraffic_Town12_Route845_Weather13", "v1/Accident_Town06_Route279_Weather19", "v1/ParkingCutIn_Town13_Route1344_Weather2", "v1/VanillaSignalizedTurnEncounterRedLight_Town13_Route644_Weather20", "v1/OppositeVehicleTakingPriority_Town05_Route242_Weather15", "v1/DynamicObjectCrossing_Town01_Route5_Weather2", "v1/NonSignalizedJunctionLeftTurn_Town12_Route1351_Weather1", "v1/HazardAtSideLane_Town12_Route1521_Weather18", "v1/VanillaNonSignalizedTurnEncounterStopsign_Town03_Route144_Weather14", "v1/SignalizedJunctionLeftTurn_Town05_Route267_Weather3", "v1/VehicleOpensDoorTwoWays_Town12_Route1196_Weather0", "v1/VanillaNonSignalizedTurnEncounterStopsign_Town15_Route494_Weather0", "v1/ConstructionObstacle_Town13_Route80_Weather2", "v1/ConstructionObstacle_Town03_Route62_Weather10", "v1/VanillaNonSignalizedTurnEncounterStopsign_Town07_Route361_Weather23", "v1/OppositeVehicleRunningRedLight_Town13_Route590_Weather18", "v1/NonSignalizedJunctionLeftTurnEnterFlow_Town15_Route533_Weather13", "v1/PedestrianCrossing_Town12_Route1033_Weather20", "v1/Accident_Town13_Route550_Weather3", "v1/SignalizedJunctionLeftTurnEnterFlow_Town12_Route948_Weather9", "v1/LaneChange_Town12_Route892_Weather8", "v1/HardBreakRoute_Town13_Route56_Weather3", "v1/DynamicObjectCrossing_Town13_Route24_Weather23", "v1/DynamicObjectCrossing_Town01_Route1_Weather1", "v1/MergerIntoSlowTrafficV2_Town12_Route859_Weather1", "v1/VanillaSignalizedTurnEncounterRedLight_Town13_Route645_Weather21", "v1/TJunction_Town12_Route947_Weather11", "v1/NonSignalizedJunctionLeftTurn_Town04_Route181_Weather15", "v1/ConstructionObstacle_Town05_Route70_Weather18", "v1/HazardAtSideLaneTwoWays_Town12_Route1134_Weather8", "v1/SignalizedJunctionLeftTurn_Town04_Route172_Weather8", "v1/ConstructionObstacleTwoWays_Town12_Route1086_Weather20", "v1/ParkedObstacleTwoWays_Town13_Route1336_Weather26", "v1/HardBreakRoute_Town01_Route30_Weather3", "v1/InterurbanAdvancedActorFlow_Town06_Route301_Weather15", "v1/HazardAtSideLane_Town12_Route1524_Weather21", "v1/HighwayExit_Town13_Route683_Weather7", "v1/ParkedObstacle_Town12_Route958_Weather22", "v1/HazardAtSideLaneTwoWays_Town12_Route1136_Weather18", "v1/BlockedIntersection_Town05_Route272_Weather12", "v1/VehicleTurningRoutePedestrian_Town15_Route1387_Weather1", "v1/OppositeVehicleTakingPriority_Town05_Route243_Weather9", "v1/HazardAtSideLane_Town12_Route1523_Weather20", "v1/NonSignalizedJunctionRightTurn_Town13_Route598_Weather0", "v1/HighwayCutIn_Town13_Route750_Weather22", "v1/HardBreakRoute_Town11_Route50_Weather23", "v1/ControlLoss_Town07_Route332_Weather20", "v1/NonSignalizedJunctionLeftTurn_Town12_Route1352_Weather2", "v1/VanillaSignalizedTurnEncounterRedLight_Town04_Route200_Weather18", "v1/VanillaSignalizedTurnEncounterRedLight_Town12_Route872_Weather14", "v1/HighwayCutIn_Town06_Route299_Weather13", "v1/Accident_Town12_Route768_Weather14", "v1/InvadingTurn_Town05_Route231_Weather23", "v1/DynamicObjectCrossing_Town10HD_Route18_Weather18", "v1/NonSignalizedJunctionLeftTurnEnterFlow_Town15_Route537_Weather9", "v1/ConstructionObstacleTwoWays_Town12_Route1415_Weather26", "v1/SignalizedJunctionRightTurn_Town13_Route583_Weather11", "v1/BlockedIntersection_Town07_Route353_Weather15", "v1/ParkedObstacle_Town15_Route417_Weather1", "v1/ParkingExit_Town12_Route1318_Weather2", "v1/InvadingTurn_Town05_Route266_Weather6", "v1/SignalizedJunctionLeftTurnEnterFlow_Town13_Route1630_Weather7", "v1/ConstructionObstacleTwoWays_Town12_Route1091_Weather12", "v1/HazardAtSideLaneTwoWays_Town12_Route1132_Weather14", "v1/BlockedIntersection_Town12_Route834_Weather2", "v1/VanillaNonSignalizedTurnEncounterStopsign_Town05_Route257_Weather23", "v1/VehicleTurningRoute_Town15_Route1379_Weather18", "v1/HazardAtSideLane_Town12_Route1520_Weather9", "v1/DynamicObjectCrossing_Town12_Route23_Weather23", "v1/NonSignalizedJunctionLeftTurnEnterFlow_Town15_Route460_Weather18", "v1/PedestrianCrossing_Town15_Route506_Weather12", "v1/VanillaSignalizedTurnEncounterRedLight_Town12_Route969_Weather7", "v1/StaticCutIn_Town03_Route110_Weather6", "v1/ParkingExit_Town12_Route787_Weather7", "v1/VanillaSignalizedTurnEncounterRedLight_Town12_Route945_Weather9", "v1/NonSignalizedJunctionLeftTurnEnterFlow_Town15_Route499_Weather5", "v1/ParkedObstacle_Town06_Route281_Weather21", "v1/VanillaSignalizedTurnEncounterRedLight_Town05_Route255_Weather21", "v1/TJunction_Town12_Route927_Weather9", "v1/VanillaSignalizedTurnEncounterGreenLight_Town13_Route641_Weather9", "v1/TJunction_Town06_Route305_Weather19", "v1/AccidentTwoWays_Town12_Route1117_Weather25", "v1/InterurbanAdvancedActorFlow_Town06_Route302_Weather21", "v1/ControlLoss_Town11_Route402_Weather12", "v1/EnterActorFlow_Town13_Route681_Weather5", "v1/HazardAtSideLaneTwoWays_Town12_Route1143_Weather25", "v1/ConstructionObstacleTwoWays_Town12_Route1406_Weather26", "v1/ParkedObstacle_Town12_Route959_Weather23", "v1/VehicleTurningRoute_Town15_Route1369_Weather6", "v1/NonSignalizedJunctionRightTurn_Town07_Route345_Weather14", "v1/VanillaSignalizedTurnEncounterGreenLight_Town12_Route868_Weather10", "v1/NonSignalizedJunctionLeftTurn_Town07_Route369_Weather18", "v1/TJunction_Town15_Route510_Weather8", "v1/VehicleTurningRoute_Town13_Route606_Weather18", "v1/DynamicObjectCrossing_Town02_Route14_Weather14", "v1/DynamicObjectCrossing_Town10HD_Route19_Weather19", "v1/VanillaNonSignalizedTurnEncounterStopsign_Town12_Route879_Weather21", "v1/SignalizedJunctionRightTurn_Town04_Route177_Weather13", "v1/OppositeVehicleTakingPriority_Town12_Route818_Weather12", "v1/EnterActorFlow_Town12_Route831_Weather25", "v1/AccidentTwoWays_Town12_Route1105_Weather13", "v1/HazardAtSideLane_Town03_Route106_Weather23", "v1/HighwayExit_Town12_Route1326_Weather2", "v1/CrossingBicycleFlow_Town12_Route1067_Weather1", "v1/SignalizedJunctionRightTurn_Town12_Route964_Weather2", "v1/SignalizedJunctionRightTurn_Town12_Route805_Weather25", "v1/PedestrianCrossing_Town13_Route736_Weather8", "v1/AccidentTwoWays_Town12_Route1113_Weather21", "v1/ConstructionObstacleTwoWays_Town12_Route1418_Weather26", "v1/HighwayExit_Town13_Route620_Weather22", "v1/Accident_Town15_Route411_Weather21", "v1/HazardAtSideLane_Town12_Route961_Weather25", "v1/LaneChange_Town13_Route664_Weather14", "v1/HazardAtSideLaneTwoWays_Town12_Route1137_Weather19", "v1/VanillaNonSignalizedTurnEncounterStopsign_Town13_Route650_Weather0", "v1/HighwayCutIn_Town12_Route1052_Weather12", "v1/HighwayCutIn_Town13_Route751_Weather23", "v1/HighwayCutIn_Town12_Route851_Weather19", "v1/HazardAtSideLaneTwoWays_Town12_Route1147_Weather3", "v1/BlockedIntersection_Town05_Route247_Weather19", "v1/VanillaSignalizedTurnEncounterGreenLight_Town12_Route944_Weather8", "v1/SignalizedJunctionRightTurn_Town03_Route151_Weather2", "v1/HardBreakRoute_Town12_Route52_Weather0", "v1/HardBreakRoute_Town03_Route36_Weather10", "v1/NonSignalizedJunctionLeftTurn_Town13_Route593_Weather26", "v1/NonSignalizedJunctionLeftTurn_Town12_Route1350_Weather0", "v1/HazardAtSideLane_Town12_Route1518_Weather15", "v1/SignalizedJunctionLeftTurnEnterFlow_Town13_Route659_Weather3", "v1/OppositeVehicleRunningRedLight_Town05_Route236_Weather10", "v1/EnterActorFlow_Town07_Route348_Weather10", "v1/MergerIntoSlowTrafficV2_Town12_Route1031_Weather9", "v1/VanillaNonSignalizedTurnEncounterStopsign_Town15_Route454_Weather12", "v1/ConstructionObstacle_Town04_Route67_Weather15", "v1/ParkedObstacle_Town03_Route157_Weather1", "v1/CrossingBicycleFlow_Town12_Route1074_Weather8", "v1/NonSignalizedJunctionLeftTurn_Town12_Route1353_Weather3", "v1/HazardAtSideLane_Town04_Route164_Weather8", "v1/HighwayExit_Town13_Route706_Weather3", "v1/ParkedObstacle_Town12_Route770_Weather8", "v1/VehicleTurningRoute_Town12_Route933_Weather23", "v1/YieldToEmergencyVehicle_Town15_Route423_Weather7", "v1/Accident_Town13_Route551_Weather5", "v1/MergerIntoSlowTrafficV2_Town12_Route1057_Weather9", "v1/InterurbanAdvancedActorFlow_Town13_Route735_Weather7", "v1/OppositeVehicleRunningRedLight_Town12_Route929_Weather19", "v1/MergerIntoSlowTraffic_Town12_Route973_Weather11", "v1/ParkingExit_Town13_Route570_Weather23", "v1/NonSignalizedJunctionLeftTurnEnterFlow_Town13_Route738_Weather10", "v1/SignalizedJunctionLeftTurn_Town10HD_Route381_Weather22", "v1/YieldToEmergencyVehicle_Town13_Route561_Weather14", "v1/ConstructionObstacleTwoWays_Town12_Route1099_Weather7", "v1/DynamicObjectCrossing_Town02_Route13_Weather6", "v1/NonSignalizedJunctionLeftTurnEnterFlow_Town12_Route982_Weather20", "v1/LaneChange_Town13_Route739_Weather25", "v1/ConstructionObstacle_Town12_Route75_Weather23", "v1/AccidentTwoWays_Town12_Route1102_Weather10", "v1/ParkingCrossingPedestrian_Town15_Route403_Weather13", "v1/ControlLoss_Town04_Route169_Weather13", "v1/HazardAtSideLaneTwoWays_Town12_Route1142_Weather23", "v1/VanillaNonSignalizedTurnEncounterStopsign_Town13_Route648_Weather23", "v1/HardBreakRoute_Town02_Route35_Weather9", "v1/HazardAtSideLaneTwoWays_Town12_Route1157_Weather13", "v1/VanillaNonSignalizedTurnEncounterStopsign_Town12_Route878_Weather20", "v1/CrossingBicycleFlow_Town12_Route1068_Weather2", "v1/ConstructionObstacleTwoWays_Town12_Route1080_Weather14", "v1/AccidentTwoWays_Town12_Route1127_Weather9", "v1/HighwayExit_Town12_Route1046_Weather6", "v1/ConstructionObstacleTwoWays_Town12_Route1413_Weather26", "v1/ParkedObstacleTwoWays_Town12_Route1168_Weather23", "v1/NonSignalizedJunctionLeftTurnEnterFlow_Town12_Route891_Weather7", "v1/HighwayCutIn_Town12_Route849_Weather9", "v1/NonSignalizedJunctionLeftTurn_Town12_Route1361_Weather14", "v1/HighwayExit_Town06_Route291_Weather5", "v1/DynamicObjectCrossing_Town13_Route25_Weather25", "v1/InterurbanAdvancedActorFlow_Town06_Route331_Weather19", "v1/TJunction_Town15_Route456_Weather14", "v1/VehicleTurningRoutePedestrian_Town12_Route1027_Weather13", "v1/VehicleTurningRoutePedestrian_Town12_Route827_Weather22", "v1/VanillaSignalizedTurnEncounterGreenLight_Town12_Route870_Weather12", "v1/NonSignalizedJunctionLeftTurnEnterFlow_Town15_Route532_Weather12", "v1/VanillaSignalizedTurnEncounterGreenLight_Town05_Route250_Weather8", "v1/VehicleTurningRoute_Town15_Route1378_Weather9", "v1/SignalizedJunctionLeftTurn_Town03_Route113_Weather26", "v1/AccidentTwoWays_Town12_Route1111_Weather19", "v1/MergerIntoSlowTrafficV2_Town12_Route1058_Weather18", "v1/ConstructionObstacle_Town04_Route66_Weather14", "v1/TJunction_Town12_Route882_Weather23", "v1/ParkedObstacleTwoWays_Town12_Route1158_Weather14", "v1/OppositeVehicleRunningRedLight_Town03_Route152_Weather22", "v1/HighwayCutIn_Town06_Route298_Weather20", "v1/NonSignalizedJunctionLeftTurn_Town12_Route1359_Weather12", "v1/SignalizedJunctionLeftTurnEnterFlow_Town12_Route981_Weather19", "v1/Accident_Town03_Route156_Weather0", "v1/NonSignalizedJunctionRightTurn_Town04_Route184_Weather2", "v1/ParkedObstacle_Town06_Route309_Weather23", "v1/MergerIntoSlowTrafficV2_Town12_Route1056_Weather8", "v1/PedestrianCrossing_Town12_Route1014_Weather0", "v1/ConstructionObstacleTwoWays_Town12_Route1097_Weather5", "v1/CrossingBicycleFlow_Town12_Route1073_Weather7", "v1/CrossingBicycleFlow_Town12_Route1064_Weather23", "v1/InterurbanAdvancedActorFlow_Town06_Route303_Weather22", "v1/OppositeVehicleRunningRedLight_Town04_Route179_Weather14", "v1/ParkingExit_Town12_Route1319_Weather3", "v1/ParkingCutIn_Town12_Route901_Weather9", "v1/VehicleTurningRoute_Town13_Route699_Weather23", "v1/AccidentTwoWays_Town12_Route1448_Weather5", "v1/HardBreakRoute_Town13_Route55_Weather3", "v1/HazardAtSideLaneTwoWays_Town12_Route1156_Weather12", "v1/SignalizedJunctionLeftTurn_Town05_Route233_Weather6", "v1/ConstructionObstacleTwoWays_Town12_Route1422_Weather26", "v1/CrossingBicycleFlow_Town12_Route1044_Weather3", "v1/VanillaNonSignalizedTurnEncounterStopsign_Town15_Route509_Weather15", "v1/ParkingExit_Town12_Route1320_Weather5", "v1/VehicleTurningRoute_Town12_Route997_Weather9", "v1/ConstructionObstacle_Town15_Route85_Weather7", "v1/DynamicObjectCrossing_Town13_Route26_Weather0", "v1/NonSignalizedJunctionLeftTurn_Town12_Route1357_Weather10", "v1/VehicleTurningRoutePedestrian_Town12_Route829_Weather25", "v1/HardBreakRoute_Town01_Route31_Weather5", "v1/AccidentTwoWays_Town12_Route1116_Weather23", "v1/VanillaSignalizedTurnEncounterRedLight_Town13_Route647_Weather23", "v1/BlockedIntersection_Town04_Route194_Weather12", "v1/VanillaSignalizedTurnEncounterGreenLight_Town12_Route871_Weather13", "v1/HazardAtSideLane_Town12_Route1528_Weather0", "v1/HazardAtSideLane_Town12_Route777_Weather23", "v1/TJunction_Town07_Route363_Weather25", "v1/DynamicObjectCrossing_Town11_Route20_Weather20", "v1/NonSignalizedJunctionLeftTurn_Town12_Route1355_Weather6", "v1/VanillaSignalizedTurnEncounterGreenLight_Town05_Route251_Weather9", "v1/SignalizedJunctionLeftTurnEnterFlow_Town13_Route692_Weather8", "v1/ParkedObstacleTwoWays_Town12_Route1169_Weather25", "v1/EnterActorFlow_Town04_Route192_Weather10", "v1/ParkingCutIn_Town13_Route1342_Weather0", "v1/DynamicObjectCrossing_Town01_Route4_Weather3", "v1/SignalizedJunctionLeftTurn_Town07_Route334_Weather26", "v1/BlockedIntersection_Town13_Route615_Weather9", "v1/VanillaNonSignalizedTurnEncounterStopsign_Town07_Route360_Weather22", "v1/HazardAtSideLane_Town12_Route1525_Weather22", "v1/ControlLoss_Town15_Route431_Weather15", "v1/VanillaNonSignalizedTurnEncounterStopsign_Town05_Route258_Weather25", "v1/BlockedIntersection_Town03_Route134_Weather3", "v1/NonSignalizedJunctionLeftTurn_Town04_Route212_Weather26", "v1/SignalizedJunctionLeftTurnEnterFlow_Town12_Route884_Weather0", "v1/ParkingCutIn_Town12_Route1315_Weather6", "v1/ParkingCutIn_Town13_Route672_Weather22", "v1/VanillaSignalizedTurnEncounterRedLight_Town07_Route357_Weather19", "v1/ParkedObstacle_Town05_Route273_Weather7", "v1/YieldToEmergencyVehicle_Town15_Route425_Weather9", "v1/MergerIntoSlowTrafficV2_Town12_Route856_Weather23", "v1/ParkedObstacleTwoWays_Town12_Route1173_Weather3", "v1/SignalizedJunctionLeftTurn_Town04_Route174_Weather18", "v1/ConstructionObstacle_Town15_Route84_Weather6", "v1/InterurbanActorFlow_Town12_Route1296_Weather7", "v1/HighwayExit_Town06_Route311_Weather25", "v1/LaneChange_Town13_Route740_Weather0", "v1/HazardAtSideLane_Town12_Route1534_Weather7", "v1/ParkingCutIn_Town12_Route763_Weather9", "v1/ParkingCutIn_Town12_Route1310_Weather0", "v1/ParkingCutIn_Town13_Route729_Weather1", "v1/VanillaSignalizedTurnEncounterGreenLight_Town13_Route642_Weather18", "v1/PedestrianCrossing_Town13_Route716_Weather14", "v1/PedestrianCrossing_Town12_Route864_Weather6", "v1/ConstructionObstacle_Town03_Route60_Weather8", "v1/HazardAtSideLane_Town05_Route223_Weather15", "v1/Accident_Town15_Route412_Weather22", "v1/ConstructionObstacle_Town04_Route65_Weather13", "v1/ParkedObstacleTwoWays_Town13_Route1335_Weather26", "v1/PedestrianCrossing_Town12_Route866_Weather8", "v1/NonSignalizedJunctionLeftTurn_Town12_Route930_Weather20", "v1/VanillaSignalizedTurnEncounterGreenLight_Town03_Route137_Weather7", "v1/HighwayCutIn_Town12_Route1047_Weather7", "v1/ParkingCutIn_Town13_Route696_Weather20", "v1/ParkedObstacleTwoWays_Town12_Route1165_Weather21", "v1/HighwayCutIn_Town13_Route629_Weather5", "v1/VanillaNonSignalizedTurnEncounterStopsign_Town12_Route1015_Weather1", "v1/HardBreakRoute_Town12_Route51_Weather25", "v1/YieldToEmergencyVehicle_Town12_Route919_Weather11", "v1/InvadingTurn_Town12_Route924_Weather14", "v1/ConstructionObstacle_Town13_Route83_Weather5", "v1/HighwayExit_Town13_Route744_Weather8", "v1/Accident_Town12_Route767_Weather13", "v1/ParkedObstacleTwoWays_Town12_Route1174_Weather3", "v1/Accident_Town03_Route146_Weather8", "v1/NonSignalizedJunctionLeftTurn_Town12_Route1363_Weather8", "v1/DynamicObjectCrossing_Town02_Route15_Weather15", "v1/NonSignalizedJunctionLeftTurn_Town04_Route183_Weather1", "v1/SignalizedJunctionLeftTurn_Town05_Route232_Weather23", "v1/ParkingCrossingPedestrian_Town15_Route405_Weather15", "v1/VehicleTurningRoutePedestrian_Town13_Route609_Weather21", "v1/HighwayExit_Town13_Route704_Weather2", "v1/BlockedIntersection_Town10HD_Route391_Weather1", "v1/HighwayExit_Town12_Route1331_Weather10", "v1/VanillaNonSignalizedTurnEncounterStopsign_Town12_Route876_Weather18", "v1/YieldToEmergencyVehicle_Town13_Route673_Weather23", "v1/OppositeVehicleRunningRedLight_Town12_Route990_Weather2", "v1/TJunction_Town05_Route261_Weather1", "v1/StaticCutIn_Town12_Route782_Weather2", "v1/HighwayExit_Town12_Route838_Weather6", "v1/VanillaSignalizedTurnEncounterRedLight_Town05_Route253_Weather19", "v1/VanillaSignalizedTurnEncounterGreenLight_Town10HD_Route387_Weather23", "v1/ConstructionObstacle_Town15_Route87_Weather9", "v1/TJunction_Town13_Route652_Weather2", "v1/ParkingCutIn_Town12_Route1302_Weather15", "v1/SignalizedJunctionLeftTurnEnterFlow_Town15_Route498_Weather23", "v1/YieldToEmergencyVehicle_Town15_Route424_Weather8", "v1/CrossingBicycleFlow_Town12_Route1070_Weather3", "v1/ConstructionObstacle_Town04_Route64_Weather12", "v1/ParkingCutIn_Town12_Route902_Weather18", "v1/MergerIntoSlowTrafficV2_Town12_Route1059_Weather19", "v1/MergerIntoSlowTrafficV2_Town12_Route976_Weather14", "v1/SignalizedJunctionLeftTurn_Town12_Route799_Weather0", "v1/InterurbanAdvancedActorFlow_Town12_Route1030_Weather8", "v1/YieldToEmergencyVehicle_Town03_Route148_Weather18", "v1/ConstructionObstacle_Town06_Route72_Weather20", "v1/PedestrianCrossing_Town13_Route636_Weather12", "v1/SignalizedJunctionRightTurn_Town15_Route438_Weather7", "v1/Accident_Town04_Route160_Weather3", "v1/Accident_Town12_Route1122_Weather3", "v1/Accident_Town06_Route308_Weather22", "v1/NonSignalizedJunctionLeftTurn_Town12_Route1362_Weather15", "v1/CrossingBicycleFlow_Town12_Route1012_Weather23", "v1/OppositeVehicleTakingPriority_Town05_Route241_Weather7", "v1/InterurbanAdvancedActorFlow_Town13_Route634_Weather10", "v1/DynamicObjectCrossing_Town02_Route9_Weather9", "v1/Accident_Town15_Route413_Weather23", "v1/ParkedObstacle_Town13_Route556_Weather10", "v1/OppositeVehicleTakingPriority_Town12_Route968_Weather6", "v1/SignalizedJunctionLeftTurnEnterFlow_Town13_Route720_Weather19", "v1/NonSignalizedJunctionRightTurn_Town07_Route346_Weather15", "v1/BlockedIntersection_Town13_Route618_Weather20", "v1/HighwayCutIn_Town13_Route713_Weather11", "v1/AccidentTwoWays_Town12_Route1103_Weather11", "v1/VanillaNonSignalizedTurnEncounterStopsign_Town12_Route1016_Weather2", "v1/ParkedObstacle_Town15_Route418_Weather2", "v1/ParkedObstacle_Town05_Route220_Weather12", "v1/VanillaNonSignalizedTurnEncounterStopsign_Town05_Route256_Weather22", "v1/VehicleTurningRoutePedestrian_Town15_Route523_Weather2", "v1/ParkingCrossingPedestrian_Town12_Route761_Weather7", "v1/EnterActorFlow_Town13_Route611_Weather13", "v1/MergerIntoSlowTrafficV2_Town15_Route525_Weather5", "v1/HazardAtSideLane_Town12_Route1517_Weather14", "v1/ParkingExit_Town12_Route1322_Weather7", "v1/ConstructionObstacle_Town12_Route77_Weather25", "v1/ConstructionObstacle_Town05_Route71_Weather19", "v1/StaticCutIn_Town13_Route564_Weather18", "v1/ConstructionObstacleTwoWays_Town12_Route1096_Weather3", "v1/BlockedIntersection_Town12_Route835_Weather3", "v1/HighwayExit_Town06_Route293_Weather15", "v1/NonSignalizedJunctionRightTurn_Town12_Route815_Weather9", "v1/VanillaSignalizedTurnEncounterGreenLight_Town15_Route489_Weather21", "v1/BlockedIntersection_Town12_Route836_Weather3", "v1/ParkingCutIn_Town12_Route900_Weather19", "v1/CrossingBicycleFlow_Town12_Route862_Weather3", "v1/OppositeVehicleTakingPriority_Town03_Route155_Weather25", "v1/VanillaSignalizedTurnEncounterGreenLight_Town12_Route869_Weather11", "v1/ParkingExit_Town12_Route1307_Weather20", "v1/ControlLoss_Town11_Route401_Weather11", "v1/AccidentTwoWays_Town12_Route1107_Weather15", "v1/InterurbanActorFlow_Town13_Route708_Weather6", "v1/VehicleTurningRoute_Town12_Route824_Weather20", "v1/ControlLoss_Town10HD_Route377_Weather13"], "val": ["v1/ParkingCrossingPedestrian_Town13_Route545_Weather25", "v1/OppositeVehicleTakingPriority_Town04_Route214_Weather6", "v1/DynamicObjectCrossing_Town02_Route11_Weather11", "v1/AccidentTwoWays_Town12_Route1115_Weather23", "v1/VehicleTurningRoute_Town15_Route504_Weather10", "v1/ParkingExit_Town12_Route922_Weather12", "v1/SignalizedJunctionLeftTurn_Town04_Route173_Weather26", "v1/EnterActorFlow_Town03_Route132_Weather2", "v1/HighwayExit_Town06_Route312_Weather0", "v1/VanillaSignalizedTurnEncounterRedLight_Town15_Route491_Weather23", "v1/CrossingBicycleFlow_Town12_Route977_Weather15", "v1/OppositeVehicleRunningRedLight_Town04_Route180_Weather23", "v1/VanillaSignalizedTurnEncounterRedLight_Town07_Route359_Weather21", "v1/ParkingCutIn_Town13_Route1343_Weather1", "v1/ParkedObstacle_Town06_Route282_Weather22", "v1/TJunction_Town06_Route306_Weather20", "v1/PedestrianCrossing_Town13_Route747_Weather19", "v1/VehicleTurningRoutePedestrian_Town15_Route445_Weather11", "v1/ConstructionObstacle_Town12_Route78_Weather0", "v1/HazardAtSideLaneTwoWays_Town12_Route1151_Weather7", "v1/ControlLoss_Town04_Route170_Weather14", "v1/MergerIntoSlowTrafficV2_Town12_Route857_Weather25", "v1/DynamicObjectCrossing_Town01_Route3_Weather3", "v1/SignalizedJunctionRightTurn_Town03_Route118_Weather14", "v1/BlockedIntersection_Town03_Route135_Weather5", "v1/MergerIntoSlowTraffic_Town06_Route317_Weather5", "v1/NonSignalizedJunctionRightTurn_Town03_Route126_Weather18", "v1/ParkedObstacleTwoWays_Town13_Route1333_Weather26", "v1/ConstructionObstacleTwoWays_Town12_Route1093_Weather1", "v1/TJunction_Town05_Route260_Weather0", "v1/NonSignalizedJunctionLeftTurn_Town07_Route342_Weather3", "v1/HighwayCutIn_Town12_Route1029_Weather15", "v1/HazardAtSideLane_Town10HD_Route373_Weather9", "v1/YieldToEmergencyVehicle_Town04_Route166_Weather10", "v1/HardBreakRoute_Town01_Route32_Weather6", "v1/SignalizedJunctionLeftTurnEnterFlow_Town13_Route657_Weather2", "v1/ConstructionObstacle_Town10HD_Route74_Weather22", "v1/ControlLoss_Town10HD_Route378_Weather14", "v1/Accident_Town05_Route218_Weather10", "v1/InterurbanActorFlow_Town12_Route1291_Weather1", "v1/LaneChange_Town06_Route307_Weather21", "v1/InvadingTurn_Town02_Route95_Weather9", "v1/VanillaNonSignalizedTurnEncounterStopsign_Town12_Route979_Weather9", "v1/StaticCutIn_Town05_Route226_Weather18", "v1/VehicleOpensDoorTwoWays_Town12_Route1203_Weather7", "v1/VehicleTurningRoutePedestrian_Town15_Route481_Weather19", "v1/VanillaSignalizedTurnEncounterGreenLight_Town07_Route354_Weather8", "v1/NonSignalizedJunctionLeftTurnEnterFlow_Town12_Route949_Weather13", "v1/InterurbanAdvancedActorFlow_Town06_Route324_Weather2", "v1/ParkedObstacle_Town10HD_Route372_Weather8"]} \ No newline at end of file diff --git a/docs/CONVERT_GUIDE.md b/docs/CONVERT_GUIDE.md new file mode 100644 index 0000000..5b4c988 --- /dev/null +++ b/docs/CONVERT_GUIDE.md @@ -0,0 +1,29 @@ +# Code Convert Guide + +This document outlines important considerations for migrating code based on nuscenes or other datasets to bench2drive. + +## Models + +We integrated several MMCV dependencies into the `mmcv` directory and no longer install the original libraries. You can refer to our existing methods to utilize these modules and place your own models and utils in `mmcv` directory and register them. Please make sure the mmcv directory contains all the modules you need; if not, you will need to add them. + +## Scripts and configs + +You can place the configs and scripts for each method in the `adzoo` . Utils of each methods can also be placed here for easier management. + +## Details of configs + +To create a config for the bench2drive dataset, note the following: + +- We have included the bench2drive name-to-class mapping and evaluation settings directly in the config. You can use our settings or modify them as needed. +- Unlike the 10 classes in nuscenes, we use 9 classes in bench2drive . +- Methods like UniAD and VAD use 3 commands on nuscenes, while bench2drive uses 6 commands obtained from Carla. + +## Dataset + +- The reference frame of the Bench2Drive data differs significantly from the coordinate system used by Nuscenes.([here](https://github.com/Thinklab-SJTU/Bench2Drive/blob/main/docs/anno.md) for details). In `mmcv/datasets/prepare_B2D.py`, we convert the world coordinate system, ego coordinate system, and sensor coordinate system to match the Nuscenes reference frame, including the vehicle coordinates, bounding box coordinates, and sensor extrinsics. You can refer to our code for data alignment. +- In Nuscenes, keyframes are at 2Hz, while Bench2Drive runs at 10Hz with annotations for each frame. For reproducing UniAD and VAD, we set the window length (time interval between adjacent points in past and future trajectories) to 0.5s and the window shift to 0.1s (any frame can be selected as the current frame). This fully utilizes Bench2Drive's data and aligns the trajectories with Nuscenes. +- For the map, Bench2Drive stores vectorized maps. You can refer to our code to use the map, such as extracting map elements within a certain range. + +## Team agent + +To perform closed-loop evaluation in Carla, set up sensors to gather data from Carla. Use this data to compute all necessary model inputs, then convert the model outputs into a `carla.VehicleControl` object. \ No newline at end of file diff --git a/docs/DATA_PREP.md b/docs/DATA_PREP.md new file mode 100644 index 0000000..066338d --- /dev/null +++ b/docs/DATA_PREP.md @@ -0,0 +1,81 @@ +# Prepare Bench2Drive Dataset + +## Download Bench2Drive + +Download our dataset from (LINK) and make sure the structure of data as follows: + +``` + Bench2DriveZoo + ├── ... + ├── data/ + | ├── bench2drive/ + | | ├── v1/ # Bench2Drive base + | | | ├── Accident_Town03_Route101_Weather23/ + | | | ├── Accident_Town03_Route102_Weather20/ + | | | └── ... + | | └── maps/ # maps of Towns + | | ├── Town01_HD_map.npz + | | ├── Town02_HD_map.npz + | | └── ... + | ├── others + | | └── b2d_motion_anchor_infos_mode6.pkl # motion anchors for UniAD + | └── splits + | └── bench2drive_base_train_val_split.json # trainval_split of Bench2Drive base + +``` + +## Prepare Bench2Drive data info + +Run the following command: + +``` +cd mmcv/datasets +python prepare_B2D.py --workers 16 # workers used to prepare data +``` + +The command will generate `b2d_infos_train.pkl`, `b2d_infos_val.pkl`, `b2d_map_infos.pkl` under `data/infos`. +*Note: It will take about 1 hour to generate all the data with 16 workers* + + +## Structure of code + + +After installing and data preparing, the structure of our code will be as follows: + +``` + Bench2DriveZoo + ├── adzoo/ + | ├── bevformer/ + | ├── uniad/ + | └── vad/ + ├── ckpts/ + | ├── r101_dcn_fcos3d_pretrain.pth # pretrain weights for bevformer + | ├── resnet50-19c8e357.pth # image backbone pretrain weights for vad + | ├── bevformer_base_b2d.pth # download weights you need + | ├── uniad_base_b2d.pth # download weights you need + | └── ... + ├── data/ + | ├── bench2drive/ + | | ├── v1/ # Bench2Drive base + | | | ├── Accident_Town03_Route101_Weather23/ + | | | ├── Accident_Town03_Route102_Weather20/ + | | | └── ... + | | └── maps/ # maps of Towns + | | ├── Town01_HD_map.npz + | | ├── Town02_HD_map.npz + | | └── ... + │ ├── infos/ + │ │ ├── b2d_infos_train.pkl + │ │ ├── b2d_infos_val.pkl + | | └── b2d_map_infos.pkl + | ├── others + | | └── b2d_motion_anchor_infos_mode6.pkl # motion anchors for UniAD + | └── splits + | └── bench2drive_base_train_val_split.json # trainval_split of Bench2Drive base + ├── docs/ + ├── mmcv/ + ├── team_code/ # for Closed-loop Evaluation in Carla +``` + + + diff --git a/docs/EVAL_IN_CARLA.md b/docs/EVAL_IN_CARLA.md new file mode 100644 index 0000000..0d06c57 --- /dev/null +++ b/docs/EVAL_IN_CARLA.md @@ -0,0 +1,26 @@ +# Closed Loop Evaluation + +Please follow these steps to evaluate UniAD and VAD in Carla: + +## Preparations + +- Install this repo as [doc](docs/INSTALL.md). +- Install Bench2Drive from [here](https://github.com/Thinklab-SJTU/Bench2Drive). + + +## Link this repo to Bench2Drive + +```bash +# Add your agent code +cd Bench2Drive/leaderboard +mkdir team_code +cd Bench2Drive/leaderboard/team_code +ln -s YOUR_TEAM_AGENT ./ # link your agent code +cd Bench2Drive/ +ln -s Bench2DriveZoo/team_code/* ./ # link entire repo to Bench2Drive +``` + +## Run evaluation + +Follow [this](https://github.com/Thinklab-SJTU/Bench2Drive?tab=readme-ov-file#eval-tools) to use evaluation tools of Bench2Drive. + diff --git a/docs/INSTALL.md b/docs/INSTALL.md new file mode 100644 index 0000000..2e69655 --- /dev/null +++ b/docs/INSTALL.md @@ -0,0 +1,52 @@ +## Follow these steps to install the environment +- **STEP 1: Create enviroment** + ``` + conda create -n uniad python=3.8 + conda activate uniad + ``` +- **STEP 2: Install cudatoolkit** + ``` + conda install -c "nvidia/label/cuda-11.8.0" cuda-toolkit + ``` +- **STEP 3: Install torch** + ``` + pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118 + ``` +- **STEP 4: Set environment variables** + ``` + export PATH=YOUR_GCC_PATH/bin:$PATH + export CUDA_HOME=YOUR_CUDA_PATH/ + ``` +- **STEP 5: Install ninja and packaging** + ``` + pip install ninja packaging + ``` +- **STEP 6: Install our repo** + ``` + pip install -v -e . + ``` + +- **STEP 7: Prepare pretrained weights.** + create directory `ckpts` + + ``` + mkdir ckpts + ``` + Download `resnet50-19c8e357.pth` form [Hugging Face](https://huggingface.co/rethinklab/Bench2DriveZoo/blob/main/resnet50-19c8e357.pth) or [Baidu Cloud](https://pan.baidu.com/s/1LlSrbYvghnv3lOlX1uLU5g?pwd=1234 ) + Download `r101_dcn_fcos3d_pretrain.pth` form [Hugging Face](https://huggingface.co/rethinklab/Bench2DriveZoo/blob/main/r101_dcn_fcos3d_pretrain.pth) or [Baidu Cloud](https://pan.baidu.com/s/1o7owaQ5G66xqq2S0TldwXQ?pwd=1234) + + +- **STEP 8: Install Carla for closed-loop evaluation.** + + + ``` + mkdir carla + cd carla + wget https://carla-releases.s3.us-east-005.backblazeb2.com/Linux/CARLA_0.9.15.tar.gz + tar -xvf CARLA_0.9.15.tar.gz + cd Import && wget https://carla-releases.s3.us-east-005.backblazeb2.com/Linux/AdditionalMaps_0.9.15.tar.gz + cd .. && bash ImportAssets.sh + export CARLA_ROOT=YOUR_CARLA_PATH + echo "$CARLA_ROOT/PythonAPI/carla/dist/carla-0.9.15-py3.7-linux-x86_64.egg" >> YOUR_CONDA_PATH/envs/YOUR_CONDA_ENV_NAME/lib/python3.7/site-packages/carla.pth # python 3.8 also works well, please set YOUR_CONDA_PATH and YOUR_CONDA_ENV_NAME + + ``` \ No newline at end of file diff --git a/docs/TRAIN_EVAL.md b/docs/TRAIN_EVAL.md new file mode 100644 index 0000000..ce66ff0 --- /dev/null +++ b/docs/TRAIN_EVAL.md @@ -0,0 +1,68 @@ +# Train/Eval models + +You can use following commands to train and validate [BEVFormer](https://github.com/fundamentalvision/BEVFormer), [UniAD](https://github.com/OpenDriveLab/UniAD) and [VAD](https://github.com/hustvl/VAD) + +## BEVFormer + +### Train + +```bash +#train BEVFormer base +./adzoo/bevformer/dist_train.sh ./adzoo/bevformer/configs/bevformer/bevformer_base_b2d.py 4 #N_GPUS +#train BEVFormer tiny +./adzoo/bevformer/dist_train.sh ./adzoo/bevformer/configs/bevformer/bevformer_tiny_b2d.py 4 #N_GPUS +``` +### Open loop eval + +```bash +#eval BEVFormer base +./adzoo/bevformer/dist_test.sh ./adzoo/bevformer/configs/bevformer/bevformer_base_b2d.py ./ckpts/bevformer_base_b2d.pth 1 +#test BEVFormerr tiny +./adzoo/bevformer/dist_test.sh ./adzoo/bevformer/configs/bevformer/bevformer_tiny_b2d.py ./ckpts/bevformer_tiny_b2d.pth 1 +``` + + +## UniAD + +### Train stage1 +```bash +#train UniAD base +./adzoo/uniad/uniad_dist_train.sh ./adzoo/uniad/configs/stage1_track_map/base_track_map_b2d.py 4 +#train UniAD tiny +./adzoo/uniad/uniad_dist_train.sh ./adzoo/uniad/configs/stage1_track_map/tiny_track_map_b2d.py 4 +``` + +### Train stage2 +```bash +#train UniAD base +./adzoo/uniad/uniad_dist_train.sh ./adzoo/uniad/configs/stage2_e2e/base_e2e_b2d.py 1 +#train UniAD tiny +./adzoo/uniad/uniad_dist_train.sh ./adzoo/uniad/configs/stage2_e2e/tiny_e2e_b2d.py 1 +``` + + +### Open loop eval + +```bash +#eval UniAD base +./adzoo/uniad/uniad_dist_eval.sh ./adzoo/uniad/configs/stage2_e2e/base_e2e_b2d.py ./ckpts/uniad_base_b2d.pth 1 +#eval UniAD tiny +./adzoo/uniad/uniad_dist_eval.sh ./adzoo/uniad/configs/stage2_e2e/tiny_e2e_b2d.py ./ckpts/uniad_tiny_b2d.pth 1 +``` + + +## VAD + +### Train + +```bash +./adzoo/vad/dist_test.sh ./adzoo/vad/configs/VAD/VAD_base_e2e_b2d.py ./ckpts/vad_b2d_base.pth 1 +``` + +### Open loop eval + +```bash +./adzoo/vad/dist_test.sh ./adzoo/vad/configs/VAD/VAD_base_e2e_b2d.py ./ckpts/vad_b2d_base.pth 1 +``` + +**NOTE**: UniAD and VAD use different definitions to calculate Planning L2. UniAD calculates L2 at each time step(0.5s,1.0s,1.5s,...), while VAD calculates the average over each time period(0s-0.5s,0s-1.0s,0s-1.5s,...). We retain the original calculation logic in the code, but report UniAD's Planning L2 converted to VAD's definition. \ No newline at end of file diff --git a/mmcv/__init__.py b/mmcv/__init__.py new file mode 100644 index 0000000..29f79b1 --- /dev/null +++ b/mmcv/__init__.py @@ -0,0 +1,15 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# flake8: noqa +__version__ = '0.0.1' + +from .fileio import * +from .image import * +from .utils import * +from .core.bbox.coder.nms_free_coder import NMSFreeCoder +from .core.bbox.match_costs import BBox3DL1Cost, DiceCost +from .core.evaluation.eval_hooks import CustomDistEvalHook +from .models.utils import * +from .models.opt.adamw import AdamW2 +from .losses import * +from .structures import Instances, BoxMode, Boxes +from .layers import cat, Conv2d, batched_nms, get_norm \ No newline at end of file diff --git a/mmcv/core/__init__.py b/mmcv/core/__init__.py new file mode 100644 index 0000000..a401238 --- /dev/null +++ b/mmcv/core/__init__.py @@ -0,0 +1,10 @@ +from .anchor import * # noqa: F401, F403 +from .bbox import * # noqa: F401, F403 +from .evaluation import * # noqa: F401, F403 +from .points import * # noqa: F401, F403 +from .mask import * # noqa: F401, F403 +from .post_processing import * # noqa: F401, F403 +from .utils import * # noqa: F401, F403 +# from .seg import * # noqa: F401, F403 +from .visualizer import * # noqa: F401, F403 +from .voxel import * # noqa: F401, F403 diff --git a/mmcv/core/anchor/__init__.py b/mmcv/core/anchor/__init__.py new file mode 100644 index 0000000..e3262a7 --- /dev/null +++ b/mmcv/core/anchor/__init__.py @@ -0,0 +1,18 @@ +from .anchor_generator import (AnchorGenerator, LegacyAnchorGenerator, + YOLOAnchorGenerator) +from .builder import (ANCHOR_GENERATORS, PRIOR_GENERATORS, + build_anchor_generator, build_prior_generator) +from .point_generator import MlvlPointGenerator, PointGenerator +from .utils import anchor_inside_flags, calc_region, images_to_levels +from .anchor_3d_generator import (AlignedAnchor3DRangeGenerator, + AlignedAnchor3DRangeGeneratorPerCls, + Anchor3DRangeGenerator) + +__all__ = [ + 'AnchorGenerator', 'LegacyAnchorGenerator', 'anchor_inside_flags', + 'PointGenerator', 'images_to_levels', 'calc_region', + 'build_anchor_generator', 'ANCHOR_GENERATORS', 'YOLOAnchorGenerator', + 'build_prior_generator', 'PRIOR_GENERATORS', 'MlvlPointGenerator', + 'AlignedAnchor3DRangeGenerator', 'Anchor3DRangeGenerator', + 'AlignedAnchor3DRangeGeneratorPerCls' +] diff --git a/mmcv/core/anchor/anchor_3d_generator.py b/mmcv/core/anchor/anchor_3d_generator.py new file mode 100644 index 0000000..118f6ea --- /dev/null +++ b/mmcv/core/anchor/anchor_3d_generator.py @@ -0,0 +1,404 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmcv.core.anchor import ANCHOR_GENERATORS +from mmcv.utils import is_list_of + + +@ANCHOR_GENERATORS.register_module() +class Anchor3DRangeGenerator(object): + """3D Anchor Generator by range. + + This anchor generator generates anchors by the given range in different + feature levels. + Due the convention in 3D detection, different anchor sizes are related to + different ranges for different categories. However we find this setting + does not effect the performance much in some datasets, e.g., nuScenes. + + Args: + ranges (list[list[float]]): Ranges of different anchors. + The ranges are the same across different feature levels. But may + vary for different anchor sizes if size_per_range is True. + sizes (list[list[float]]): 3D sizes of anchors. + scales (list[int]): Scales of anchors in different feature levels. + rotations (list[float]): Rotations of anchors in a feature grid. + custom_values (tuple[float]): Customized values of that anchor. For + example, in nuScenes the anchors have velocities. + reshape_out (bool): Whether to reshape the output into (N x 4). + size_per_range: Whether to use separate ranges for different sizes. + If size_per_range is True, the ranges should have the same length + as the sizes, if not, it will be duplicated. + """ + + def __init__(self, + ranges, + sizes=[[1.6, 3.9, 1.56]], + scales=[1], + rotations=[0, 1.5707963], + custom_values=(), + reshape_out=True, + size_per_range=True): + assert is_list_of(ranges, list) + if size_per_range: + if len(sizes) != len(ranges): + assert len(ranges) == 1 + ranges = ranges * len(sizes) + assert len(ranges) == len(sizes) + else: + assert len(ranges) == 1 + assert is_list_of(sizes, list) + assert isinstance(scales, list) + + self.sizes = sizes + self.scales = scales + self.ranges = ranges + self.rotations = rotations + self.custom_values = custom_values + self.cached_anchors = None + self.reshape_out = reshape_out + self.size_per_range = size_per_range + + def __repr__(self): + s = self.__class__.__name__ + '(' + s += f'anchor_range={self.ranges},\n' + s += f'scales={self.scales},\n' + s += f'sizes={self.sizes},\n' + s += f'rotations={self.rotations},\n' + s += f'reshape_out={self.reshape_out},\n' + s += f'size_per_range={self.size_per_range})' + return s + + @property + def num_base_anchors(self): + """list[int]: Total number of base anchors in a feature grid.""" + num_rot = len(self.rotations) + num_size = torch.tensor(self.sizes).reshape(-1, 3).size(0) + return num_rot * num_size + + @property + def num_levels(self): + """int: Number of feature levels that the generator is applied to.""" + return len(self.scales) + + def grid_anchors(self, featmap_sizes, device='cuda'): + """Generate grid anchors in multiple feature levels. + + Args: + featmap_sizes (list[tuple]): List of feature map sizes in + multiple feature levels. + device (str): Device where the anchors will be put on. + + Returns: + list[torch.Tensor]: Anchors in multiple feature levels. \ + The sizes of each tensor should be [N, 4], where \ + N = width * height * num_base_anchors, width and height \ + are the sizes of the corresponding feature lavel, \ + num_base_anchors is the number of anchors for that level. + """ + assert self.num_levels == len(featmap_sizes) + multi_level_anchors = [] + for i in range(self.num_levels): + anchors = self.single_level_grid_anchors( + featmap_sizes[i], self.scales[i], device=device) + if self.reshape_out: + anchors = anchors.reshape(-1, anchors.size(-1)) + multi_level_anchors.append(anchors) + return multi_level_anchors + + def single_level_grid_anchors(self, featmap_size, scale, device='cuda'): + """Generate grid anchors of a single level feature map. + + This function is usually called by method ``self.grid_anchors``. + + Args: + featmap_size (tuple[int]): Size of the feature map. + scale (float): Scale factor of the anchors in the current level. + device (str, optional): Device the tensor will be put on. + Defaults to 'cuda'. + + Returns: + torch.Tensor: Anchors in the overall feature map. + """ + # We reimplement the anchor generator using torch in cuda + # torch: 0.6975 s for 1000 times + # numpy: 4.3345 s for 1000 times + # which is ~5 times faster than the numpy implementation + if not self.size_per_range: + return self.anchors_single_range( + featmap_size, + self.ranges[0], + scale, + self.sizes, + self.rotations, + device=device) + + mr_anchors = [] + for anchor_range, anchor_size in zip(self.ranges, self.sizes): + mr_anchors.append( + self.anchors_single_range( + featmap_size, + anchor_range, + scale, + anchor_size, + self.rotations, + device=device)) + mr_anchors = torch.cat(mr_anchors, dim=-3) + return mr_anchors + + def anchors_single_range(self, + feature_size, + anchor_range, + scale=1, + sizes=[[1.6, 3.9, 1.56]], + rotations=[0, 1.5707963], + device='cuda'): + """Generate anchors in a single range. + + Args: + feature_size (list[float] | tuple[float]): Feature map size. It is + either a list of a tuple of [D, H, W](in order of z, y, and x). + anchor_range (torch.Tensor | list[float]): Range of anchors with + shape [6]. The order is consistent with that of anchors, i.e., + (x_min, y_min, z_min, x_max, y_max, z_max). + scale (float | int, optional): The scale factor of anchors. + sizes (list[list] | np.ndarray | torch.Tensor): Anchor size with + shape [N, 3], in order of x, y, z. + rotations (list[float] | np.ndarray | torch.Tensor): Rotations of + anchors in a single feature grid. + device (str): Devices that the anchors will be put on. + + Returns: + torch.Tensor: Anchors with shape \ + [*feature_size, num_sizes, num_rots, 7]. + """ + if len(feature_size) == 2: + feature_size = [1, feature_size[0], feature_size[1]] + anchor_range = torch.tensor(anchor_range, device=device) + z_centers = torch.linspace( + anchor_range[2], anchor_range[5], feature_size[0], device=device) + y_centers = torch.linspace( + anchor_range[1], anchor_range[4], feature_size[1], device=device) + x_centers = torch.linspace( + anchor_range[0], anchor_range[3], feature_size[2], device=device) + sizes = torch.tensor(sizes, device=device).reshape(-1, 3) * scale + rotations = torch.tensor(rotations, device=device) + + # torch.meshgrid default behavior is 'id', np's default is 'xy' + rets = torch.meshgrid(x_centers, y_centers, z_centers, rotations) + # torch.meshgrid returns a tuple rather than list + rets = list(rets) + tile_shape = [1] * 5 + tile_shape[-2] = int(sizes.shape[0]) + for i in range(len(rets)): + rets[i] = rets[i].unsqueeze(-2).repeat(tile_shape).unsqueeze(-1) + + sizes = sizes.reshape([1, 1, 1, -1, 1, 3]) + tile_size_shape = list(rets[0].shape) + tile_size_shape[3] = 1 + sizes = sizes.repeat(tile_size_shape) + rets.insert(3, sizes) + + ret = torch.cat(rets, dim=-1).permute([2, 1, 0, 3, 4, 5]) + # [1, 200, 176, N, 2, 7] for kitti after permute + + if len(self.custom_values) > 0: + custom_ndim = len(self.custom_values) + custom = ret.new_zeros([*ret.shape[:-1], custom_ndim]) + # custom[:] = self.custom_values + ret = torch.cat([ret, custom], dim=-1) + # [1, 200, 176, N, 2, 9] for nus dataset after permute + return ret + + +@ANCHOR_GENERATORS.register_module() +class AlignedAnchor3DRangeGenerator(Anchor3DRangeGenerator): + """Aligned 3D Anchor Generator by range. + + This anchor generator uses a different manner to generate the positions + of anchors' centers from :class:`Anchor3DRangeGenerator`. + + Note: + The `align` means that the anchor's center is aligned with the voxel + grid, which is also the feature grid. The previous implementation of + :class:`Anchor3DRangeGenerator` does not generate the anchors' center + according to the voxel grid. Rather, it generates the center by + uniformly distributing the anchors inside the minimum and maximum + anchor ranges according to the feature map sizes. + However, this makes the anchors center does not match the feature grid. + The :class:`AlignedAnchor3DRangeGenerator` add + 1 when using the + feature map sizes to obtain the corners of the voxel grid. Then it + shifts the coordinates to the center of voxel grid and use the left + up corner to distribute anchors. + + Args: + anchor_corner (bool): Whether to align with the corner of the voxel + grid. By default it is False and the anchor's center will be + the same as the corresponding voxel's center, which is also the + center of the corresponding greature grid. + """ + + def __init__(self, align_corner=False, **kwargs): + super(AlignedAnchor3DRangeGenerator, self).__init__(**kwargs) + self.align_corner = align_corner + + def anchors_single_range(self, + feature_size, + anchor_range, + scale, + sizes=[[1.6, 3.9, 1.56]], + rotations=[0, 1.5707963], + device='cuda'): + """Generate anchors in a single range. + + Args: + feature_size (list[float] | tuple[float]): Feature map size. It is + either a list of a tuple of [D, H, W](in order of z, y, and x). + anchor_range (torch.Tensor | list[float]): Range of anchors with + shape [6]. The order is consistent with that of anchors, i.e., + (x_min, y_min, z_min, x_max, y_max, z_max). + scale (float | int, optional): The scale factor of anchors. + sizes (list[list] | np.ndarray | torch.Tensor): Anchor size with + shape [N, 3], in order of x, y, z. + rotations (list[float] | np.ndarray | torch.Tensor): Rotations of + anchors in a single feature grid. + device (str): Devices that the anchors will be put on. + + Returns: + torch.Tensor: Anchors with shape \ + [*feature_size, num_sizes, num_rots, 7]. + """ + if len(feature_size) == 2: + feature_size = [1, feature_size[0], feature_size[1]] + anchor_range = torch.tensor(anchor_range, device=device) + z_centers = torch.linspace( + anchor_range[2], + anchor_range[5], + feature_size[0] + 1, + device=device) + y_centers = torch.linspace( + anchor_range[1], + anchor_range[4], + feature_size[1] + 1, + device=device) + x_centers = torch.linspace( + anchor_range[0], + anchor_range[3], + feature_size[2] + 1, + device=device) + sizes = torch.tensor(sizes, device=device).reshape(-1, 3) * scale + rotations = torch.tensor(rotations, device=device) + + # shift the anchor center + if not self.align_corner: + z_shift = (z_centers[1] - z_centers[0]) / 2 + y_shift = (y_centers[1] - y_centers[0]) / 2 + x_shift = (x_centers[1] - x_centers[0]) / 2 + z_centers += z_shift + y_centers += y_shift + x_centers += x_shift + + # torch.meshgrid default behavior is 'id', np's default is 'xy' + rets = torch.meshgrid(x_centers[:feature_size[2]], + y_centers[:feature_size[1]], + z_centers[:feature_size[0]], rotations) + + # torch.meshgrid returns a tuple rather than list + rets = list(rets) + tile_shape = [1] * 5 + tile_shape[-2] = int(sizes.shape[0]) + for i in range(len(rets)): + rets[i] = rets[i].unsqueeze(-2).repeat(tile_shape).unsqueeze(-1) + + sizes = sizes.reshape([1, 1, 1, -1, 1, 3]) + tile_size_shape = list(rets[0].shape) + tile_size_shape[3] = 1 + sizes = sizes.repeat(tile_size_shape) + rets.insert(3, sizes) + + ret = torch.cat(rets, dim=-1).permute([2, 1, 0, 3, 4, 5]) + + if len(self.custom_values) > 0: + custom_ndim = len(self.custom_values) + custom = ret.new_zeros([*ret.shape[:-1], custom_ndim]) + # TODO: check the support of custom values + # custom[:] = self.custom_values + ret = torch.cat([ret, custom], dim=-1) + return ret + + +@ANCHOR_GENERATORS.register_module() +class AlignedAnchor3DRangeGeneratorPerCls(AlignedAnchor3DRangeGenerator): + """3D Anchor Generator by range for per class. + + This anchor generator generates anchors by the given range for per class. + Note that feature maps of different classes may be different. + + Args: + kwargs (dict): Arguments are the same as those in \ + :class:`AlignedAnchor3DRangeGenerator`. + """ + + def __init__(self, **kwargs): + super(AlignedAnchor3DRangeGeneratorPerCls, self).__init__(**kwargs) + assert len(self.scales) == 1, 'Multi-scale feature map levels are' + \ + ' not supported currently in this kind of anchor generator.' + + def grid_anchors(self, featmap_sizes, device='cuda'): + """Generate grid anchors in multiple feature levels. + + Args: + featmap_sizes (list[tuple]): List of feature map sizes for \ + different classes in a single feature level. + device (str): Device where the anchors will be put on. + + Returns: + list[list[torch.Tensor]]: Anchors in multiple feature levels. \ + Note that in this anchor generator, we currently only \ + support single feature level. The sizes of each tensor \ + should be [num_sizes/ranges*num_rots*featmap_size, \ + box_code_size]. + """ + multi_level_anchors = [] + anchors = self.multi_cls_grid_anchors( + featmap_sizes, self.scales[0], device=device) + multi_level_anchors.append(anchors) + return multi_level_anchors + + def multi_cls_grid_anchors(self, featmap_sizes, scale, device='cuda'): + """Generate grid anchors of a single level feature map for multi-class + with different feature map sizes. + + This function is usually called by method ``self.grid_anchors``. + + Args: + featmap_sizes (list[tuple]): List of feature map sizes for \ + different classes in a single feature level. + scale (float): Scale factor of the anchors in the current level. + device (str, optional): Device the tensor will be put on. + Defaults to 'cuda'. + + Returns: + torch.Tensor: Anchors in the overall feature map. + """ + assert len(featmap_sizes) == len(self.sizes) == len(self.ranges), \ + 'The number of different feature map sizes anchor sizes and ' + \ + 'ranges should be the same.' + + multi_cls_anchors = [] + for i in range(len(featmap_sizes)): + anchors = self.anchors_single_range( + featmap_sizes[i], + self.ranges[i], + scale, + self.sizes[i], + self.rotations, + device=device) + # [*featmap_size, num_sizes/ranges, num_rots, box_code_size] + ndim = len(featmap_sizes[i]) + anchors = anchors.view(*featmap_sizes[i], -1, anchors.size(-1)) + # [*featmap_size, num_sizes/ranges*num_rots, box_code_size] + anchors = anchors.permute(ndim, *range(0, ndim), ndim + 1) + # [num_sizes/ranges*num_rots, *featmap_size, box_code_size] + multi_cls_anchors.append(anchors.reshape(-1, anchors.size(-1))) + # [num_sizes/ranges*num_rots*featmap_size, box_code_size] + return multi_cls_anchors diff --git a/mmcv/core/anchor/anchor_generator.py b/mmcv/core/anchor/anchor_generator.py new file mode 100644 index 0000000..2b8c7d8 --- /dev/null +++ b/mmcv/core/anchor/anchor_generator.py @@ -0,0 +1,838 @@ +import warnings + +from mmcv.utils import is_tuple_of +import numpy as np +import torch +from torch.nn.modules.utils import _pair + +from .builder import PRIOR_GENERATORS + + +@PRIOR_GENERATORS.register_module() +class AnchorGenerator: + """Standard anchor generator for 2D anchor-based detectors. + + Args: + strides (list[int] | list[tuple[int, int]]): Strides of anchors + in multiple feature levels in order (w, h). + ratios (list[float]): The list of ratios between the height and width + of anchors in a single level. + scales (list[int] | None): Anchor scales for anchors in a single level. + It cannot be set at the same time if `octave_base_scale` and + `scales_per_octave` are set. + base_sizes (list[int] | None): The basic sizes + of anchors in multiple levels. + If None is given, strides will be used as base_sizes. + (If strides are non square, the shortest stride is taken.) + scale_major (bool): Whether to multiply scales first when generating + base anchors. If true, the anchors in the same row will have the + same scales. By default it is True in V2.0 + octave_base_scale (int): The base scale of octave. + scales_per_octave (int): Number of scales for each octave. + `octave_base_scale` and `scales_per_octave` are usually used in + retinanet and the `scales` should be None when they are set. + centers (list[tuple[float, float]] | None): The centers of the anchor + relative to the feature grid center in multiple feature levels. + By default it is set to be None and not used. If a list of tuple of + float is given, they will be used to shift the centers of anchors. + center_offset (float): The offset of center in proportion to anchors' + width and height. By default it is 0 in V2.0. + + Examples: + >>> from mmcv.core import AnchorGenerator + >>> self = AnchorGenerator([16], [1.], [1.], [9]) + >>> all_anchors = self.grid_anchors([(2, 2)], device='cpu') + >>> print(all_anchors) + [tensor([[-4.5000, -4.5000, 4.5000, 4.5000], + [11.5000, -4.5000, 20.5000, 4.5000], + [-4.5000, 11.5000, 4.5000, 20.5000], + [11.5000, 11.5000, 20.5000, 20.5000]])] + >>> self = AnchorGenerator([16, 32], [1.], [1.], [9, 18]) + >>> all_anchors = self.grid_anchors([(2, 2), (1, 1)], device='cpu') + >>> print(all_anchors) + [tensor([[-4.5000, -4.5000, 4.5000, 4.5000], + [11.5000, -4.5000, 20.5000, 4.5000], + [-4.5000, 11.5000, 4.5000, 20.5000], + [11.5000, 11.5000, 20.5000, 20.5000]]), \ + tensor([[-9., -9., 9., 9.]])] + """ + + def __init__(self, + strides, + ratios, + scales=None, + base_sizes=None, + scale_major=True, + octave_base_scale=None, + scales_per_octave=None, + centers=None, + center_offset=0.): + # check center and center_offset + if center_offset != 0: + assert centers is None, 'center cannot be set when center_offset' \ + f'!=0, {centers} is given.' + if not (0 <= center_offset <= 1): + raise ValueError('center_offset should be in range [0, 1], ' + f'{center_offset} is given.') + if centers is not None: + assert len(centers) == len(strides), \ + 'The number of strides should be the same as centers, got ' \ + f'{strides} and {centers}' + + # calculate base sizes of anchors + self.strides = [_pair(stride) for stride in strides] + self.base_sizes = [min(stride) for stride in self.strides + ] if base_sizes is None else base_sizes + assert len(self.base_sizes) == len(self.strides), \ + 'The number of strides should be the same as base sizes, got ' \ + f'{self.strides} and {self.base_sizes}' + + # calculate scales of anchors + assert ((octave_base_scale is not None + and scales_per_octave is not None) ^ (scales is not None)), \ + 'scales and octave_base_scale with scales_per_octave cannot' \ + ' be set at the same time' + if scales is not None: + self.scales = torch.Tensor(scales) + elif octave_base_scale is not None and scales_per_octave is not None: + octave_scales = np.array( + [2**(i / scales_per_octave) for i in range(scales_per_octave)]) + scales = octave_scales * octave_base_scale + self.scales = torch.Tensor(scales) + else: + raise ValueError('Either scales or octave_base_scale with ' + 'scales_per_octave should be set') + + self.octave_base_scale = octave_base_scale + self.scales_per_octave = scales_per_octave + self.ratios = torch.Tensor(ratios) + self.scale_major = scale_major + self.centers = centers + self.center_offset = center_offset + self.base_anchors = self.gen_base_anchors() + + @property + def num_base_anchors(self): + """list[int]: total number of base anchors in a feature grid""" + return self.num_base_priors + + @property + def num_base_priors(self): + """list[int]: The number of priors (anchors) at a point + on the feature grid""" + return [base_anchors.size(0) for base_anchors in self.base_anchors] + + @property + def num_levels(self): + """int: number of feature levels that the generator will be applied""" + return len(self.strides) + + def gen_base_anchors(self): + """Generate base anchors. + + Returns: + list(torch.Tensor): Base anchors of a feature grid in multiple \ + feature levels. + """ + multi_level_base_anchors = [] + for i, base_size in enumerate(self.base_sizes): + center = None + if self.centers is not None: + center = self.centers[i] + multi_level_base_anchors.append( + self.gen_single_level_base_anchors( + base_size, + scales=self.scales, + ratios=self.ratios, + center=center)) + return multi_level_base_anchors + + def gen_single_level_base_anchors(self, + base_size, + scales, + ratios, + center=None): + """Generate base anchors of a single level. + + Args: + base_size (int | float): Basic size of an anchor. + scales (torch.Tensor): Scales of the anchor. + ratios (torch.Tensor): The ratio between between the height + and width of anchors in a single level. + center (tuple[float], optional): The center of the base anchor + related to a single feature grid. Defaults to None. + + Returns: + torch.Tensor: Anchors in a single-level feature maps. + """ + w = base_size + h = base_size + if center is None: + x_center = self.center_offset * w + y_center = self.center_offset * h + else: + x_center, y_center = center + + h_ratios = torch.sqrt(ratios) + w_ratios = 1 / h_ratios + if self.scale_major: + ws = (w * w_ratios[:, None] * scales[None, :]).view(-1) + hs = (h * h_ratios[:, None] * scales[None, :]).view(-1) + else: + ws = (w * scales[:, None] * w_ratios[None, :]).view(-1) + hs = (h * scales[:, None] * h_ratios[None, :]).view(-1) + + # use float anchor and the anchor's center is aligned with the + # pixel center + base_anchors = [ + x_center - 0.5 * ws, y_center - 0.5 * hs, x_center + 0.5 * ws, + y_center + 0.5 * hs + ] + base_anchors = torch.stack(base_anchors, dim=-1) + + return base_anchors + + def _meshgrid(self, x, y, row_major=True): + """Generate mesh grid of x and y. + + Args: + x (torch.Tensor): Grids of x dimension. + y (torch.Tensor): Grids of y dimension. + row_major (bool, optional): Whether to return y grids first. + Defaults to True. + + Returns: + tuple[torch.Tensor]: The mesh grids of x and y. + """ + # use shape instead of len to keep tracing while exporting to onnx + xx = x.repeat(y.shape[0]) + yy = y.view(-1, 1).repeat(1, x.shape[0]).view(-1) + if row_major: + return xx, yy + else: + return yy, xx + + def grid_priors(self, featmap_sizes, device='cuda'): + """Generate grid anchors in multiple feature levels. + + Args: + featmap_sizes (list[tuple]): List of feature map sizes in + multiple feature levels. + device (str): The device where the anchors will be put on. + + Return: + list[torch.Tensor]: Anchors in multiple feature levels. \ + The sizes of each tensor should be [N, 4], where \ + N = width * height * num_base_anchors, width and height \ + are the sizes of the corresponding feature level, \ + num_base_anchors is the number of anchors for that level. + """ + assert self.num_levels == len(featmap_sizes) + multi_level_anchors = [] + for i in range(self.num_levels): + anchors = self.single_level_grid_priors( + featmap_sizes[i], level_idx=i, device=device) + multi_level_anchors.append(anchors) + return multi_level_anchors + + def single_level_grid_priors(self, featmap_size, level_idx, device='cuda'): + """Generate grid anchors of a single level. + + Note: + This function is usually called by method ``self.grid_priors``. + + Args: + featmap_size (tuple[int]): Size of the feature maps. + level_idx (int): The index of corresponding feature map level. + device (str, optional): The device the tensor will be put on. + Defaults to 'cuda'. + + Returns: + torch.Tensor: Anchors in the overall feature maps. + """ + + base_anchors = self.base_anchors[level_idx].to(device) + feat_h, feat_w = featmap_size + stride_w, stride_h = self.strides[level_idx] + shift_x = torch.arange(0, feat_w, device=device) * stride_w + shift_y = torch.arange(0, feat_h, device=device) * stride_h + + shift_xx, shift_yy = self._meshgrid(shift_x, shift_y) + shifts = torch.stack([shift_xx, shift_yy, shift_xx, shift_yy], dim=-1) + shifts = shifts.type_as(base_anchors) + # first feat_w elements correspond to the first row of shifts + # add A anchors (1, A, 4) to K shifts (K, 1, 4) to get + # shifted anchors (K, A, 4), reshape to (K*A, 4) + + all_anchors = base_anchors[None, :, :] + shifts[:, None, :] + all_anchors = all_anchors.view(-1, 4) + # first A rows correspond to A anchors of (0, 0) in feature map, + # then (0, 1), (0, 2), ... + return all_anchors + + def sparse_priors(self, + prior_idxs, + featmap_size, + level_idx, + dtype=torch.float32, + device='cuda'): + """Generate sparse anchors according to the ``prior_idxs``. + + Args: + prior_idxs (Tensor): The index of corresponding anchors + in the feature map. + featmap_size (tuple[int]): feature map size arrange as (h, w). + level_idx (int): The level index of corresponding feature + map. + dtype (obj:`torch.dtype`): Date type of points.Defaults to + ``torch.float32``. + device (obj:`torch.device`): The device where the points is + located. + Returns: + Tensor: Anchor with shape (N, 4), N should be equal to + the length of ``prior_idxs``. + """ + + height, width = featmap_size + num_base_anchors = self.num_base_anchors[level_idx] + base_anchor_id = prior_idxs % num_base_anchors + x = (prior_idxs // + num_base_anchors) % width * self.strides[level_idx][0] + y = (prior_idxs // width // + num_base_anchors) % height * self.strides[level_idx][1] + priors = torch.stack([x, y, x, y], 1).to(dtype).to(device) + \ + self.base_anchors[level_idx][base_anchor_id, :].to(device) + + return priors + + def grid_anchors(self, featmap_sizes, device='cuda'): + """Generate grid anchors in multiple feature levels. + + Args: + featmap_sizes (list[tuple]): List of feature map sizes in + multiple feature levels. + device (str): Device where the anchors will be put on. + + Return: + list[torch.Tensor]: Anchors in multiple feature levels. \ + The sizes of each tensor should be [N, 4], where \ + N = width * height * num_base_anchors, width and height \ + are the sizes of the corresponding feature level, \ + num_base_anchors is the number of anchors for that level. + """ + warnings.warn('``grid_anchors`` would be deprecated soon. ' + 'Please use ``grid_priors`` ') + + assert self.num_levels == len(featmap_sizes) + multi_level_anchors = [] + for i in range(self.num_levels): + anchors = self.single_level_grid_anchors( + self.base_anchors[i].to(device), + featmap_sizes[i], + self.strides[i], + device=device) + multi_level_anchors.append(anchors) + return multi_level_anchors + + def single_level_grid_anchors(self, + base_anchors, + featmap_size, + stride=(16, 16), + device='cuda'): + """Generate grid anchors of a single level. + + Note: + This function is usually called by method ``self.grid_anchors``. + + Args: + base_anchors (torch.Tensor): The base anchors of a feature grid. + featmap_size (tuple[int]): Size of the feature maps. + stride (tuple[int], optional): Stride of the feature map in order + (w, h). Defaults to (16, 16). + device (str, optional): Device the tensor will be put on. + Defaults to 'cuda'. + + Returns: + torch.Tensor: Anchors in the overall feature maps. + """ + + warnings.warn( + '``single_level_grid_anchors`` would be deprecated soon. ' + 'Please use ``single_level_grid_priors`` ') + + # keep featmap_size as Tensor instead of int, so that we + # can covert to ONNX correctly + feat_h, feat_w = featmap_size + shift_x = torch.arange(0, feat_w, device=device) * stride[0] + shift_y = torch.arange(0, feat_h, device=device) * stride[1] + + shift_xx, shift_yy = self._meshgrid(shift_x, shift_y) + shifts = torch.stack([shift_xx, shift_yy, shift_xx, shift_yy], dim=-1) + shifts = shifts.type_as(base_anchors) + # first feat_w elements correspond to the first row of shifts + # add A anchors (1, A, 4) to K shifts (K, 1, 4) to get + # shifted anchors (K, A, 4), reshape to (K*A, 4) + + all_anchors = base_anchors[None, :, :] + shifts[:, None, :] + all_anchors = all_anchors.view(-1, 4) + # first A rows correspond to A anchors of (0, 0) in feature map, + # then (0, 1), (0, 2), ... + return all_anchors + + def valid_flags(self, featmap_sizes, pad_shape, device='cuda'): + """Generate valid flags of anchors in multiple feature levels. + + Args: + featmap_sizes (list(tuple)): List of feature map sizes in + multiple feature levels. + pad_shape (tuple): The padded shape of the image. + device (str): Device where the anchors will be put on. + + Return: + list(torch.Tensor): Valid flags of anchors in multiple levels. + """ + assert self.num_levels == len(featmap_sizes) + multi_level_flags = [] + for i in range(self.num_levels): + anchor_stride = self.strides[i] + feat_h, feat_w = featmap_sizes[i] + h, w = pad_shape[:2] + valid_feat_h = min(int(np.ceil(h / anchor_stride[1])), feat_h) + valid_feat_w = min(int(np.ceil(w / anchor_stride[0])), feat_w) + flags = self.single_level_valid_flags((feat_h, feat_w), + (valid_feat_h, valid_feat_w), + self.num_base_anchors[i], + device=device) + multi_level_flags.append(flags) + return multi_level_flags + + def single_level_valid_flags(self, + featmap_size, + valid_size, + num_base_anchors, + device='cuda'): + """Generate the valid flags of anchor in a single feature map. + + Args: + featmap_size (tuple[int]): The size of feature maps, arrange + as (h, w). + valid_size (tuple[int]): The valid size of the feature maps. + num_base_anchors (int): The number of base anchors. + device (str, optional): Device where the flags will be put on. + Defaults to 'cuda'. + + Returns: + torch.Tensor: The valid flags of each anchor in a single level \ + feature map. + """ + feat_h, feat_w = featmap_size + valid_h, valid_w = valid_size + assert valid_h <= feat_h and valid_w <= feat_w + valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device) + valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device) + valid_x[:valid_w] = 1 + valid_y[:valid_h] = 1 + valid_xx, valid_yy = self._meshgrid(valid_x, valid_y) + valid = valid_xx & valid_yy + valid = valid[:, None].expand(valid.size(0), + num_base_anchors).contiguous().view(-1) + return valid + + def __repr__(self): + """str: a string that describes the module""" + indent_str = ' ' + repr_str = self.__class__.__name__ + '(\n' + repr_str += f'{indent_str}strides={self.strides},\n' + repr_str += f'{indent_str}ratios={self.ratios},\n' + repr_str += f'{indent_str}scales={self.scales},\n' + repr_str += f'{indent_str}base_sizes={self.base_sizes},\n' + repr_str += f'{indent_str}scale_major={self.scale_major},\n' + repr_str += f'{indent_str}octave_base_scale=' + repr_str += f'{self.octave_base_scale},\n' + repr_str += f'{indent_str}scales_per_octave=' + repr_str += f'{self.scales_per_octave},\n' + repr_str += f'{indent_str}num_levels={self.num_levels}\n' + repr_str += f'{indent_str}centers={self.centers},\n' + repr_str += f'{indent_str}center_offset={self.center_offset})' + return repr_str + + +@PRIOR_GENERATORS.register_module() +class SSDAnchorGenerator(AnchorGenerator): + """Anchor generator for SSD. + + Args: + strides (list[int] | list[tuple[int, int]]): Strides of anchors + in multiple feature levels. + ratios (list[float]): The list of ratios between the height and width + of anchors in a single level. + basesize_ratio_range (tuple(float)): Ratio range of anchors. + input_size (int): Size of feature map, 300 for SSD300, + 512 for SSD512. + scale_major (bool): Whether to multiply scales first when generating + base anchors. If true, the anchors in the same row will have the + same scales. It is always set to be False in SSD. + """ + + def __init__(self, + strides, + ratios, + basesize_ratio_range, + input_size=300, + scale_major=True): + assert len(strides) == len(ratios) + assert is_tuple_of(basesize_ratio_range, float) + + self.strides = [_pair(stride) for stride in strides] + self.input_size = input_size + self.centers = [(stride[0] / 2., stride[1] / 2.) + for stride in self.strides] + self.basesize_ratio_range = basesize_ratio_range + + # calculate anchor ratios and sizes + min_ratio, max_ratio = basesize_ratio_range + min_ratio = int(min_ratio * 100) + max_ratio = int(max_ratio * 100) + step = int(np.floor(max_ratio - min_ratio) / (self.num_levels - 2)) + min_sizes = [] + max_sizes = [] + for ratio in range(int(min_ratio), int(max_ratio) + 1, step): + min_sizes.append(int(self.input_size * ratio / 100)) + max_sizes.append(int(self.input_size * (ratio + step) / 100)) + if self.input_size == 300: + if basesize_ratio_range[0] == 0.15: # SSD300 COCO + min_sizes.insert(0, int(self.input_size * 7 / 100)) + max_sizes.insert(0, int(self.input_size * 15 / 100)) + elif basesize_ratio_range[0] == 0.2: # SSD300 VOC + min_sizes.insert(0, int(self.input_size * 10 / 100)) + max_sizes.insert(0, int(self.input_size * 20 / 100)) + else: + raise ValueError( + 'basesize_ratio_range[0] should be either 0.15' + 'or 0.2 when input_size is 300, got ' + f'{basesize_ratio_range[0]}.') + elif self.input_size == 512: + if basesize_ratio_range[0] == 0.1: # SSD512 COCO + min_sizes.insert(0, int(self.input_size * 4 / 100)) + max_sizes.insert(0, int(self.input_size * 10 / 100)) + elif basesize_ratio_range[0] == 0.15: # SSD512 VOC + min_sizes.insert(0, int(self.input_size * 7 / 100)) + max_sizes.insert(0, int(self.input_size * 15 / 100)) + else: + raise ValueError('basesize_ratio_range[0] should be either 0.1' + 'or 0.15 when input_size is 512, got' + f' {basesize_ratio_range[0]}.') + else: + raise ValueError('Only support 300 or 512 in SSDAnchorGenerator' + f', got {self.input_size}.') + + anchor_ratios = [] + anchor_scales = [] + for k in range(len(self.strides)): + scales = [1., np.sqrt(max_sizes[k] / min_sizes[k])] + anchor_ratio = [1.] + for r in ratios[k]: + anchor_ratio += [1 / r, r] # 4 or 6 ratio + anchor_ratios.append(torch.Tensor(anchor_ratio)) + anchor_scales.append(torch.Tensor(scales)) + + self.base_sizes = min_sizes + self.scales = anchor_scales + self.ratios = anchor_ratios + self.scale_major = scale_major + self.center_offset = 0 + self.base_anchors = self.gen_base_anchors() + + def gen_base_anchors(self): + """Generate base anchors. + + Returns: + list(torch.Tensor): Base anchors of a feature grid in multiple \ + feature levels. + """ + multi_level_base_anchors = [] + for i, base_size in enumerate(self.base_sizes): + base_anchors = self.gen_single_level_base_anchors( + base_size, + scales=self.scales[i], + ratios=self.ratios[i], + center=self.centers[i]) + indices = list(range(len(self.ratios[i]))) + indices.insert(1, len(indices)) + base_anchors = torch.index_select(base_anchors, 0, + torch.LongTensor(indices)) + multi_level_base_anchors.append(base_anchors) + return multi_level_base_anchors + + def __repr__(self): + """str: a string that describes the module""" + indent_str = ' ' + repr_str = self.__class__.__name__ + '(\n' + repr_str += f'{indent_str}strides={self.strides},\n' + repr_str += f'{indent_str}scales={self.scales},\n' + repr_str += f'{indent_str}scale_major={self.scale_major},\n' + repr_str += f'{indent_str}input_size={self.input_size},\n' + repr_str += f'{indent_str}scales={self.scales},\n' + repr_str += f'{indent_str}ratios={self.ratios},\n' + repr_str += f'{indent_str}num_levels={self.num_levels},\n' + repr_str += f'{indent_str}base_sizes={self.base_sizes},\n' + repr_str += f'{indent_str}basesize_ratio_range=' + repr_str += f'{self.basesize_ratio_range})' + return repr_str + + +@PRIOR_GENERATORS.register_module() +class LegacyAnchorGenerator(AnchorGenerator): + """Legacy anchor generator used in MMDetection V1.x. + + Note: + Difference to the V2.0 anchor generator: + + 1. The center offset of V1.x anchors are set to be 0.5 rather than 0. + 2. The width/height are minused by 1 when calculating the anchors' \ + centers and corners to meet the V1.x coordinate system. + 3. The anchors' corners are quantized. + + Args: + strides (list[int] | list[tuple[int]]): Strides of anchors + in multiple feature levels. + ratios (list[float]): The list of ratios between the height and width + of anchors in a single level. + scales (list[int] | None): Anchor scales for anchors in a single level. + It cannot be set at the same time if `octave_base_scale` and + `scales_per_octave` are set. + base_sizes (list[int]): The basic sizes of anchors in multiple levels. + If None is given, strides will be used to generate base_sizes. + scale_major (bool): Whether to multiply scales first when generating + base anchors. If true, the anchors in the same row will have the + same scales. By default it is True in V2.0 + octave_base_scale (int): The base scale of octave. + scales_per_octave (int): Number of scales for each octave. + `octave_base_scale` and `scales_per_octave` are usually used in + retinanet and the `scales` should be None when they are set. + centers (list[tuple[float, float]] | None): The centers of the anchor + relative to the feature grid center in multiple feature levels. + By default it is set to be None and not used. It a list of float + is given, this list will be used to shift the centers of anchors. + center_offset (float): The offset of center in propotion to anchors' + width and height. By default it is 0.5 in V2.0 but it should be 0.5 + in v1.x models. + + Examples: + >>> from mmcv.core import LegacyAnchorGenerator + >>> self = LegacyAnchorGenerator( + >>> [16], [1.], [1.], [9], center_offset=0.5) + >>> all_anchors = self.grid_anchors(((2, 2),), device='cpu') + >>> print(all_anchors) + [tensor([[ 0., 0., 8., 8.], + [16., 0., 24., 8.], + [ 0., 16., 8., 24.], + [16., 16., 24., 24.]])] + """ + + def gen_single_level_base_anchors(self, + base_size, + scales, + ratios, + center=None): + """Generate base anchors of a single level. + + Note: + The width/height of anchors are minused by 1 when calculating \ + the centers and corners to meet the V1.x coordinate system. + + Args: + base_size (int | float): Basic size of an anchor. + scales (torch.Tensor): Scales of the anchor. + ratios (torch.Tensor): The ratio between between the height. + and width of anchors in a single level. + center (tuple[float], optional): The center of the base anchor + related to a single feature grid. Defaults to None. + + Returns: + torch.Tensor: Anchors in a single-level feature map. + """ + w = base_size + h = base_size + if center is None: + x_center = self.center_offset * (w - 1) + y_center = self.center_offset * (h - 1) + else: + x_center, y_center = center + + h_ratios = torch.sqrt(ratios) + w_ratios = 1 / h_ratios + if self.scale_major: + ws = (w * w_ratios[:, None] * scales[None, :]).view(-1) + hs = (h * h_ratios[:, None] * scales[None, :]).view(-1) + else: + ws = (w * scales[:, None] * w_ratios[None, :]).view(-1) + hs = (h * scales[:, None] * h_ratios[None, :]).view(-1) + + # use float anchor and the anchor's center is aligned with the + # pixel center + base_anchors = [ + x_center - 0.5 * (ws - 1), y_center - 0.5 * (hs - 1), + x_center + 0.5 * (ws - 1), y_center + 0.5 * (hs - 1) + ] + base_anchors = torch.stack(base_anchors, dim=-1).round() + + return base_anchors + + +@PRIOR_GENERATORS.register_module() +class LegacySSDAnchorGenerator(SSDAnchorGenerator, LegacyAnchorGenerator): + """Legacy anchor generator used in MMDetection V1.x. + + The difference between `LegacySSDAnchorGenerator` and `SSDAnchorGenerator` + can be found in `LegacyAnchorGenerator`. + """ + + def __init__(self, + strides, + ratios, + basesize_ratio_range, + input_size=300, + scale_major=True): + super(LegacySSDAnchorGenerator, + self).__init__(strides, ratios, basesize_ratio_range, input_size, + scale_major) + self.centers = [((stride - 1) / 2., (stride - 1) / 2.) + for stride in strides] + self.base_anchors = self.gen_base_anchors() + + +@PRIOR_GENERATORS.register_module() +class YOLOAnchorGenerator(AnchorGenerator): + """Anchor generator for YOLO. + + Args: + strides (list[int] | list[tuple[int, int]]): Strides of anchors + in multiple feature levels. + base_sizes (list[list[tuple[int, int]]]): The basic sizes + of anchors in multiple levels. + """ + + def __init__(self, strides, base_sizes): + self.strides = [_pair(stride) for stride in strides] + self.centers = [(stride[0] / 2., stride[1] / 2.) + for stride in self.strides] + self.base_sizes = [] + num_anchor_per_level = len(base_sizes[0]) + for base_sizes_per_level in base_sizes: + assert num_anchor_per_level == len(base_sizes_per_level) + self.base_sizes.append( + [_pair(base_size) for base_size in base_sizes_per_level]) + self.base_anchors = self.gen_base_anchors() + + @property + def num_levels(self): + """int: number of feature levels that the generator will be applied""" + return len(self.base_sizes) + + def gen_base_anchors(self): + """Generate base anchors. + + Returns: + list(torch.Tensor): Base anchors of a feature grid in multiple \ + feature levels. + """ + multi_level_base_anchors = [] + for i, base_sizes_per_level in enumerate(self.base_sizes): + center = None + if self.centers is not None: + center = self.centers[i] + multi_level_base_anchors.append( + self.gen_single_level_base_anchors(base_sizes_per_level, + center)) + return multi_level_base_anchors + + def gen_single_level_base_anchors(self, base_sizes_per_level, center=None): + """Generate base anchors of a single level. + + Args: + base_sizes_per_level (list[tuple[int, int]]): Basic sizes of + anchors. + center (tuple[float], optional): The center of the base anchor + related to a single feature grid. Defaults to None. + + Returns: + torch.Tensor: Anchors in a single-level feature maps. + """ + x_center, y_center = center + base_anchors = [] + for base_size in base_sizes_per_level: + w, h = base_size + + # use float anchor and the anchor's center is aligned with the + # pixel center + base_anchor = torch.Tensor([ + x_center - 0.5 * w, y_center - 0.5 * h, x_center + 0.5 * w, + y_center + 0.5 * h + ]) + base_anchors.append(base_anchor) + base_anchors = torch.stack(base_anchors, dim=0) + + return base_anchors + + def responsible_flags(self, featmap_sizes, gt_bboxes, device='cuda'): + """Generate responsible anchor flags of grid cells in multiple scales. + + Args: + featmap_sizes (list(tuple)): List of feature map sizes in multiple + feature levels. + gt_bboxes (Tensor): Ground truth boxes, shape (n, 4). + device (str): Device where the anchors will be put on. + + Return: + list(torch.Tensor): responsible flags of anchors in multiple level + """ + assert self.num_levels == len(featmap_sizes) + multi_level_responsible_flags = [] + for i in range(self.num_levels): + anchor_stride = self.strides[i] + flags = self.single_level_responsible_flags( + featmap_sizes[i], + gt_bboxes, + anchor_stride, + self.num_base_anchors[i], + device=device) + multi_level_responsible_flags.append(flags) + return multi_level_responsible_flags + + def single_level_responsible_flags(self, + featmap_size, + gt_bboxes, + stride, + num_base_anchors, + device='cuda'): + """Generate the responsible flags of anchor in a single feature map. + + Args: + featmap_size (tuple[int]): The size of feature maps. + gt_bboxes (Tensor): Ground truth boxes, shape (n, 4). + stride (tuple(int)): stride of current level + num_base_anchors (int): The number of base anchors. + device (str, optional): Device where the flags will be put on. + Defaults to 'cuda'. + + Returns: + torch.Tensor: The valid flags of each anchor in a single level \ + feature map. + """ + feat_h, feat_w = featmap_size + gt_bboxes_cx = ((gt_bboxes[:, 0] + gt_bboxes[:, 2]) * 0.5).to(device) + gt_bboxes_cy = ((gt_bboxes[:, 1] + gt_bboxes[:, 3]) * 0.5).to(device) + gt_bboxes_grid_x = torch.floor(gt_bboxes_cx / stride[0]).long() + gt_bboxes_grid_y = torch.floor(gt_bboxes_cy / stride[1]).long() + + # row major indexing + gt_bboxes_grid_idx = gt_bboxes_grid_y * feat_w + gt_bboxes_grid_x + + responsible_grid = torch.zeros( + feat_h * feat_w, dtype=torch.uint8, device=device) + responsible_grid[gt_bboxes_grid_idx] = 1 + + responsible_grid = responsible_grid[:, None].expand( + responsible_grid.size(0), num_base_anchors).contiguous().view(-1) + return responsible_grid diff --git a/mmcv/core/anchor/builder.py b/mmcv/core/anchor/builder.py new file mode 100644 index 0000000..d53a624 --- /dev/null +++ b/mmcv/core/anchor/builder.py @@ -0,0 +1,18 @@ +import warnings + +from mmcv.utils import Registry, build_from_cfg + +PRIOR_GENERATORS = Registry('Generator for anchors and points') + +ANCHOR_GENERATORS = PRIOR_GENERATORS + + +def build_prior_generator(cfg, default_args=None): + return build_from_cfg(cfg, PRIOR_GENERATORS, default_args) + + +def build_anchor_generator(cfg, default_args=None): + warnings.warn( + '``build_anchor_generator`` would be deprecated soon, please use ' + '``build_prior_generator`` ') + return build_prior_generator(cfg, default_args=default_args) diff --git a/mmcv/core/anchor/point_generator.py b/mmcv/core/anchor/point_generator.py new file mode 100644 index 0000000..7b11a85 --- /dev/null +++ b/mmcv/core/anchor/point_generator.py @@ -0,0 +1,241 @@ +import numpy as np +import torch +from torch.nn.modules.utils import _pair + +from .builder import PRIOR_GENERATORS + + +@PRIOR_GENERATORS.register_module() +class PointGenerator: + + def _meshgrid(self, x, y, row_major=True): + xx = x.repeat(len(y)) + yy = y.view(-1, 1).repeat(1, len(x)).view(-1) + if row_major: + return xx, yy + else: + return yy, xx + + def grid_points(self, featmap_size, stride=16, device='cuda'): + feat_h, feat_w = featmap_size + shift_x = torch.arange(0., feat_w, device=device) * stride + shift_y = torch.arange(0., feat_h, device=device) * stride + shift_xx, shift_yy = self._meshgrid(shift_x, shift_y) + stride = shift_x.new_full((shift_xx.shape[0], ), stride) + shifts = torch.stack([shift_xx, shift_yy, stride], dim=-1) + all_points = shifts.to(device) + return all_points + + def valid_flags(self, featmap_size, valid_size, device='cuda'): + feat_h, feat_w = featmap_size + valid_h, valid_w = valid_size + assert valid_h <= feat_h and valid_w <= feat_w + valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device) + valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device) + valid_x[:valid_w] = 1 + valid_y[:valid_h] = 1 + valid_xx, valid_yy = self._meshgrid(valid_x, valid_y) + valid = valid_xx & valid_yy + return valid + + +@PRIOR_GENERATORS.register_module() +class MlvlPointGenerator: + """Standard points generator for multi-level (Mlvl) feature maps in 2D + points-based detectors. + + Args: + strides (list[int] | list[tuple[int, int]]): Strides of anchors + in multiple feature levels in order (w, h). + offset (float): The offset of points, the value is normalized with + corresponding stride. Defaults to 0.5. + """ + + def __init__(self, strides, offset=0.5): + self.strides = [_pair(stride) for stride in strides] + self.offset = offset + + @property + def num_levels(self): + """int: number of feature levels that the generator will be applied""" + return len(self.strides) + + @property + def num_base_priors(self): + """list[int]: The number of priors (points) at a point + on the feature grid""" + return [1 for _ in range(len(self.strides))] + + def _meshgrid(self, x, y, row_major=True): + xx = x.repeat(len(y)) + yy = y.view(-1, 1).repeat(1, len(x)).view(-1) + if row_major: + return xx, yy + else: + return yy, xx + + def grid_priors(self, featmap_sizes, device='cuda', with_stride=False): + """Generate grid points of multiple feature levels. + + Args: + featmap_sizes (list[tuple]): List of feature map sizes in + multiple feature levels, each size arrange as + as (h, w). + device (str): The device where the anchors will be put on. + with_stride (bool): Whether to concatenate the stride to + the last dimension of points. + + Return: + list[torch.Tensor]: Points of multiple feature levels. + The sizes of each tensor should be (N, 2) when with stride is + ``False``, where N = width * height, width and height + are the sizes of the corresponding feature level, + and the last dimension 2 represent (coord_x, coord_y), + otherwise the shape should be (N, 4), + and the last dimension 4 represent + (coord_x, coord_y, stride_w, stride_h). + """ + assert self.num_levels == len(featmap_sizes) + multi_level_priors = [] + for i in range(self.num_levels): + priors = self.single_level_grid_priors( + featmap_sizes[i], + level_idx=i, + device=device, + with_stride=with_stride) + multi_level_priors.append(priors) + return multi_level_priors + + def single_level_grid_priors(self, + featmap_size, + level_idx, + device='cuda', + with_stride=False): + """Generate grid Points of a single level. + + Note: + This function is usually called by method ``self.grid_priors``. + + Args: + featmap_size (tuple[int]): Size of the feature maps, arrange as + (h, w). + level_idx (int): The index of corresponding feature map level. + device (str, optional): The device the tensor will be put on. + Defaults to 'cuda'. + with_stride (bool): Concatenate the stride to the last dimension + of points. + + Return: + Tensor: Points of single feature levels. + The shape of tensor should be (N, 2) when with stride is + ``False``, where N = width * height, width and height + are the sizes of the corresponding feature level, + and the last dimension 2 represent (coord_x, coord_y), + otherwise the shape should be (N, 4), + and the last dimension 4 represent + (coord_x, coord_y, stride_w, stride_h). + """ + feat_h, feat_w = featmap_size + stride_w, stride_h = self.strides[level_idx] + shift_x = (torch.arange(0., feat_w, device=device) + + self.offset) * stride_w + shift_y = (torch.arange(0., feat_h, device=device) + + self.offset) * stride_h + shift_xx, shift_yy = self._meshgrid(shift_x, shift_y) + if not with_stride: + shifts = torch.stack([shift_xx, shift_yy], dim=-1) + else: + stride_w = shift_xx.new_full((len(shift_xx), ), stride_w) + stride_h = shift_xx.new_full((len(shift_yy), ), stride_h) + shifts = torch.stack([shift_xx, shift_yy, stride_w, stride_h], + dim=-1) + all_points = shifts.to(device) + return all_points + + def valid_flags(self, featmap_sizes, pad_shape, device='cuda'): + """Generate valid flags of points of multiple feature levels. + + Args: + featmap_sizes (list(tuple)): List of feature map sizes in + multiple feature levels, each size arrange as + as (h, w). + pad_shape (tuple(int)): The padded shape of the image, + arrange as (h, w). + device (str): The device where the anchors will be put on. + + Return: + list(torch.Tensor): Valid flags of points of multiple levels. + """ + assert self.num_levels == len(featmap_sizes) + multi_level_flags = [] + for i in range(self.num_levels): + point_stride = self.strides[i] + feat_h, feat_w = featmap_sizes[i] + h, w = pad_shape[:2] + valid_feat_h = min(int(np.ceil(h / point_stride[1])), feat_h) + valid_feat_w = min(int(np.ceil(w / point_stride[0])), feat_w) + flags = self.single_level_valid_flags((feat_h, feat_w), + (valid_feat_h, valid_feat_w), + device=device) + multi_level_flags.append(flags) + return multi_level_flags + + def single_level_valid_flags(self, + featmap_size, + valid_size, + device='cuda'): + """Generate the valid flags of points of a single feature map. + + Args: + featmap_size (tuple[int]): The size of feature maps, arrange as + as (h, w). + valid_size (tuple[int]): The valid size of the feature maps. + The size arrange as as (h, w). + device (str, optional): The device where the flags will be put on. + Defaults to 'cuda'. + + Returns: + torch.Tensor: The valid flags of each points in a single level \ + feature map. + """ + feat_h, feat_w = featmap_size + valid_h, valid_w = valid_size + assert valid_h <= feat_h and valid_w <= feat_w + valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device) + valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device) + valid_x[:valid_w] = 1 + valid_y[:valid_h] = 1 + valid_xx, valid_yy = self._meshgrid(valid_x, valid_y) + valid = valid_xx & valid_yy + return valid + + def sparse_priors(self, + prior_idxs, + featmap_size, + level_idx, + dtype=torch.float32, + device='cuda'): + """Generate sparse points according to the ``prior_idxs``. + + Args: + prior_idxs (Tensor): The index of corresponding anchors + in the feature map. + featmap_size (tuple[int]): feature map size arrange as (w, h). + level_idx (int): The level index of corresponding feature + map. + dtype (obj:`torch.dtype`): Date type of points. Defaults to + ``torch.float32``. + device (obj:`torch.device`): The device where the points is + located. + Returns: + Tensor: Anchor with shape (N, 2), N should be equal to + the length of ``prior_idxs``. And last dimension + 2 represent (coord_x, coord_y). + """ + height, width = featmap_size + x = (prior_idxs % width + self.offset) * self.strides[level_idx][0] + y = ((prior_idxs // width) % height + + self.offset) * self.strides[level_idx][1] + prioris = torch.stack([x, y], 1).to(dtype) + prioris = prioris.to(device) + return prioris diff --git a/mmcv/core/anchor/utils.py b/mmcv/core/anchor/utils.py new file mode 100644 index 0000000..ab9b53f --- /dev/null +++ b/mmcv/core/anchor/utils.py @@ -0,0 +1,71 @@ +import torch + + +def images_to_levels(target, num_levels): + """Convert targets by image to targets by feature level. + + [target_img0, target_img1] -> [target_level0, target_level1, ...] + """ + target = torch.stack(target, 0) + level_targets = [] + start = 0 + for n in num_levels: + end = start + n + # level_targets.append(target[:, start:end].squeeze(0)) + level_targets.append(target[:, start:end]) + start = end + return level_targets + + +def anchor_inside_flags(flat_anchors, + valid_flags, + img_shape, + allowed_border=0): + """Check whether the anchors are inside the border. + + Args: + flat_anchors (torch.Tensor): Flatten anchors, shape (n, 4). + valid_flags (torch.Tensor): An existing valid flags of anchors. + img_shape (tuple(int)): Shape of current image. + allowed_border (int, optional): The border to allow the valid anchor. + Defaults to 0. + + Returns: + torch.Tensor: Flags indicating whether the anchors are inside a \ + valid range. + """ + img_h, img_w = img_shape[:2] + if allowed_border >= 0: + inside_flags = valid_flags & \ + (flat_anchors[:, 0] >= -allowed_border) & \ + (flat_anchors[:, 1] >= -allowed_border) & \ + (flat_anchors[:, 2] < img_w + allowed_border) & \ + (flat_anchors[:, 3] < img_h + allowed_border) + else: + inside_flags = valid_flags + return inside_flags + + +def calc_region(bbox, ratio, featmap_size=None): + """Calculate a proportional bbox region. + + The bbox center are fixed and the new h' and w' is h * ratio and w * ratio. + + Args: + bbox (Tensor): Bboxes to calculate regions, shape (n, 4). + ratio (float): Ratio of the output region. + featmap_size (tuple): Feature map size used for clipping the boundary. + + Returns: + tuple: x1, y1, x2, y2 + """ + x1 = torch.round((1 - ratio) * bbox[0] + ratio * bbox[2]).long() + y1 = torch.round((1 - ratio) * bbox[1] + ratio * bbox[3]).long() + x2 = torch.round(ratio * bbox[0] + (1 - ratio) * bbox[2]).long() + y2 = torch.round(ratio * bbox[1] + (1 - ratio) * bbox[3]).long() + if featmap_size is not None: + x1 = x1.clamp(min=0, max=featmap_size[1]) + y1 = y1.clamp(min=0, max=featmap_size[0]) + x2 = x2.clamp(min=0, max=featmap_size[1]) + y2 = y2.clamp(min=0, max=featmap_size[0]) + return (x1, y1, x2, y2) diff --git a/mmcv/core/bbox/__init__.py b/mmcv/core/bbox/__init__.py new file mode 100644 index 0000000..3399260 --- /dev/null +++ b/mmcv/core/bbox/__init__.py @@ -0,0 +1,13 @@ +from .builder import build_assigner, build_bbox_coder, build_sampler +from .samplers import (PseudoSampler) +from .structures import (get_box_type, limit_period, + mono_cam_box2vis, points_cam2img, xywhr2xyxyr) +from .transforms import (bbox2distance, bbox2result, bbox2roi, + bbox_cxcywh_to_xyxy, bbox_flip, bbox_mapping, + bbox_mapping_back, bbox_rescale, bbox_xyxy_to_cxcywh, + distance2bbox, roi2bbox, + bbox3d2result, bbox3d2roi, bbox3d_mapping_back) +from .iou_calculators import (BboxOverlaps2D, bbox_overlaps, AxisAlignedBboxOverlaps3D, + BboxOverlaps3D, BboxOverlapsNearest3D, + axis_aligned_bbox_overlaps_3d, bbox_overlaps_3d, + bbox_overlaps_nearest_3d) \ No newline at end of file diff --git a/mmcv/core/bbox/assigners/__init__.py b/mmcv/core/bbox/assigners/__init__.py new file mode 100644 index 0000000..9c6d438 --- /dev/null +++ b/mmcv/core/bbox/assigners/__init__.py @@ -0,0 +1,10 @@ +from .hungarian_assigner import HungarianAssigner +from .hungarian_assigner_3d import HungarianAssigner3D +from .hungarian_assigner_3d_track import HungarianAssigner3DTrack +from .base_assigner import BaseAssigner +from .map_hungarian_assigner_3d import MapHungarianAssigner3D + +# __all__ = [ +# 'HungarianAssigner', + +# ] diff --git a/mmcv/core/bbox/assigners/assign_result.py b/mmcv/core/bbox/assigners/assign_result.py new file mode 100644 index 0000000..f3b9543 --- /dev/null +++ b/mmcv/core/bbox/assigners/assign_result.py @@ -0,0 +1,204 @@ +import torch + +from mmcv.utils import util_mixins + + +class AssignResult(util_mixins.NiceRepr): + """Stores assignments between predicted and truth boxes. + + Attributes: + num_gts (int): the number of truth boxes considered when computing this + assignment + + gt_inds (LongTensor): for each predicted box indicates the 1-based + index of the assigned truth box. 0 means unassigned and -1 means + ignore. + + max_overlaps (FloatTensor): the iou between the predicted box and its + assigned truth box. + + labels (None | LongTensor): If specified, for each predicted box + indicates the category label of the assigned truth box. + + Example: + >>> # An assign result between 4 predicted boxes and 9 true boxes + >>> # where only two boxes were assigned. + >>> num_gts = 9 + >>> max_overlaps = torch.LongTensor([0, .5, .9, 0]) + >>> gt_inds = torch.LongTensor([-1, 1, 2, 0]) + >>> labels = torch.LongTensor([0, 3, 4, 0]) + >>> self = AssignResult(num_gts, gt_inds, max_overlaps, labels) + >>> print(str(self)) # xdoctest: +IGNORE_WANT + + >>> # Force addition of gt labels (when adding gt as proposals) + >>> new_labels = torch.LongTensor([3, 4, 5]) + >>> self.add_gt_(new_labels) + >>> print(str(self)) # xdoctest: +IGNORE_WANT + + """ + + def __init__(self, num_gts, gt_inds, max_overlaps, labels=None): + self.num_gts = num_gts + self.gt_inds = gt_inds + self.max_overlaps = max_overlaps + self.labels = labels + # Interface for possible user-defined properties + self._extra_properties = {} + + @property + def num_preds(self): + """int: the number of predictions in this assignment""" + return len(self.gt_inds) + + def set_extra_property(self, key, value): + """Set user-defined new property.""" + assert key not in self.info + self._extra_properties[key] = value + + def get_extra_property(self, key): + """Get user-defined property.""" + return self._extra_properties.get(key, None) + + @property + def info(self): + """dict: a dictionary of info about the object""" + basic_info = { + 'num_gts': self.num_gts, + 'num_preds': self.num_preds, + 'gt_inds': self.gt_inds, + 'max_overlaps': self.max_overlaps, + 'labels': self.labels, + } + basic_info.update(self._extra_properties) + return basic_info + + def __nice__(self): + """str: a "nice" summary string describing this assign result""" + parts = [] + parts.append(f'num_gts={self.num_gts!r}') + if self.gt_inds is None: + parts.append(f'gt_inds={self.gt_inds!r}') + else: + parts.append(f'gt_inds.shape={tuple(self.gt_inds.shape)!r}') + if self.max_overlaps is None: + parts.append(f'max_overlaps={self.max_overlaps!r}') + else: + parts.append('max_overlaps.shape=' + f'{tuple(self.max_overlaps.shape)!r}') + if self.labels is None: + parts.append(f'labels={self.labels!r}') + else: + parts.append(f'labels.shape={tuple(self.labels.shape)!r}') + return ', '.join(parts) + + @classmethod + def random(cls, **kwargs): + """Create random AssignResult for tests or debugging. + + Args: + num_preds: number of predicted boxes + num_gts: number of true boxes + p_ignore (float): probability of a predicted box assigned to an + ignored truth + p_assigned (float): probability of a predicted box not being + assigned + p_use_label (float | bool): with labels or not + rng (None | int | numpy.random.RandomState): seed or state + + Returns: + :obj:`AssignResult`: Randomly generated assign results. + + Example: + >>> from mmcv.core.bbox.assigners.assign_result import * # NOQA + >>> self = AssignResult.random() + >>> print(self.info) + """ + from mmcv.core.bbox import demodata + rng = demodata.ensure_rng(kwargs.get('rng', None)) + + num_gts = kwargs.get('num_gts', None) + num_preds = kwargs.get('num_preds', None) + p_ignore = kwargs.get('p_ignore', 0.3) + p_assigned = kwargs.get('p_assigned', 0.7) + p_use_label = kwargs.get('p_use_label', 0.5) + num_classes = kwargs.get('p_use_label', 3) + + if num_gts is None: + num_gts = rng.randint(0, 8) + if num_preds is None: + num_preds = rng.randint(0, 16) + + if num_gts == 0: + max_overlaps = torch.zeros(num_preds, dtype=torch.float32) + gt_inds = torch.zeros(num_preds, dtype=torch.int64) + if p_use_label is True or p_use_label < rng.rand(): + labels = torch.zeros(num_preds, dtype=torch.int64) + else: + labels = None + else: + import numpy as np + # Create an overlap for each predicted box + max_overlaps = torch.from_numpy(rng.rand(num_preds)) + + # Construct gt_inds for each predicted box + is_assigned = torch.from_numpy(rng.rand(num_preds) < p_assigned) + # maximum number of assignments constraints + n_assigned = min(num_preds, min(num_gts, is_assigned.sum())) + + assigned_idxs = np.where(is_assigned)[0] + rng.shuffle(assigned_idxs) + assigned_idxs = assigned_idxs[0:n_assigned] + assigned_idxs.sort() + + is_assigned[:] = 0 + is_assigned[assigned_idxs] = True + + is_ignore = torch.from_numpy( + rng.rand(num_preds) < p_ignore) & is_assigned + + gt_inds = torch.zeros(num_preds, dtype=torch.int64) + + true_idxs = np.arange(num_gts) + rng.shuffle(true_idxs) + true_idxs = torch.from_numpy(true_idxs) + gt_inds[is_assigned] = true_idxs[:n_assigned] + + gt_inds = torch.from_numpy( + rng.randint(1, num_gts + 1, size=num_preds)) + gt_inds[is_ignore] = -1 + gt_inds[~is_assigned] = 0 + max_overlaps[~is_assigned] = 0 + + if p_use_label is True or p_use_label < rng.rand(): + if num_classes == 0: + labels = torch.zeros(num_preds, dtype=torch.int64) + else: + labels = torch.from_numpy( + # remind that we set FG labels to [0, num_class-1] + # since mmcv v2.0 + # BG cat_id: num_class + rng.randint(0, num_classes, size=num_preds)) + labels[~is_assigned] = 0 + else: + labels = None + + self = cls(num_gts, gt_inds, max_overlaps, labels) + return self + + def add_gt_(self, gt_labels): + """Add ground truth as assigned results. + + Args: + gt_labels (torch.Tensor): Labels of gt boxes + """ + self_inds = torch.arange( + 1, len(gt_labels) + 1, dtype=torch.long, device=gt_labels.device) + self.gt_inds = torch.cat([self_inds, self.gt_inds]) + + self.max_overlaps = torch.cat( + [self.max_overlaps.new_ones(len(gt_labels)), self.max_overlaps]) + + if self.labels is not None: + self.labels = torch.cat([gt_labels, self.labels]) diff --git a/mmcv/core/bbox/assigners/base_assigner.py b/mmcv/core/bbox/assigners/base_assigner.py new file mode 100644 index 0000000..1ff0160 --- /dev/null +++ b/mmcv/core/bbox/assigners/base_assigner.py @@ -0,0 +1,9 @@ +from abc import ABCMeta, abstractmethod + + +class BaseAssigner(metaclass=ABCMeta): + """Base assigner that assigns boxes to ground truth boxes.""" + + @abstractmethod + def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): + """Assign boxes to either a ground truth boxes or a negative boxes.""" diff --git a/mmcv/core/bbox/assigners/hungarian_assigner.py b/mmcv/core/bbox/assigners/hungarian_assigner.py new file mode 100644 index 0000000..e10cc14 --- /dev/null +++ b/mmcv/core/bbox/assigners/hungarian_assigner.py @@ -0,0 +1,145 @@ +import torch + +from ..builder import BBOX_ASSIGNERS +from ..match_costs import build_match_cost +from ..transforms import bbox_cxcywh_to_xyxy +from .assign_result import AssignResult +from .base_assigner import BaseAssigner + +try: + from scipy.optimize import linear_sum_assignment +except ImportError: + linear_sum_assignment = None + + +@BBOX_ASSIGNERS.register_module() +class HungarianAssigner(BaseAssigner): + """Computes one-to-one matching between predictions and ground truth. + + This class computes an assignment between the targets and the predictions + based on the costs. The costs are weighted sum of three components: + classification cost, regression L1 cost and regression iou cost. The + targets don't include the no_object, so generally there are more + predictions than targets. After the one-to-one matching, the un-matched + are treated as backgrounds. Thus each query prediction will be assigned + with `0` or a positive integer indicating the ground truth index: + + - 0: negative sample, no assigned gt + - positive integer: positive sample, index (1-based) of assigned gt + + Args: + cls_weight (int | float, optional): The scale factor for classification + cost. Default 1.0. + bbox_weight (int | float, optional): The scale factor for regression + L1 cost. Default 1.0. + iou_weight (int | float, optional): The scale factor for regression + iou cost. Default 1.0. + iou_calculator (dict | optional): The config for the iou calculation. + Default type `BboxOverlaps2D`. + iou_mode (str | optional): "iou" (intersection over union), "iof" + (intersection over foreground), or "giou" (generalized + intersection over union). Default "giou". + """ + + def __init__(self, + cls_cost=dict(type='ClassificationCost', weight=1.), + reg_cost=dict(type='BBoxL1Cost', weight=1.0), + iou_cost=dict(type='IoUCost', iou_mode='giou', weight=1.0)): + self.cls_cost = build_match_cost(cls_cost) + self.reg_cost = build_match_cost(reg_cost) + self.iou_cost = build_match_cost(iou_cost) + + def assign(self, + bbox_pred, + cls_pred, + gt_bboxes, + gt_labels, + img_meta, + gt_bboxes_ignore=None, + eps=1e-7): + """Computes one-to-one matching based on the weighted costs. + + This method assign each query prediction to a ground truth or + background. The `assigned_gt_inds` with -1 means don't care, + 0 means negative sample, and positive number is the index (1-based) + of assigned gt. + The assignment is done in the following steps, the order matters. + + 1. assign every prediction to -1 + 2. compute the weighted costs + 3. do Hungarian matching on CPU based on the costs + 4. assign all to 0 (background) first, then for each matched pair + between predictions and gts, treat this prediction as foreground + and assign the corresponding gt index (plus 1) to it. + + Args: + bbox_pred (Tensor): Predicted boxes with normalized coordinates + (cx, cy, w, h), which are all in range [0, 1]. Shape + [num_query, 4]. + cls_pred (Tensor): Predicted classification logits, shape + [num_query, num_class]. + gt_bboxes (Tensor): Ground truth boxes with unnormalized + coordinates (x1, y1, x2, y2). Shape [num_gt, 4]. + gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,). + img_meta (dict): Meta information for current image. + gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are + labelled as `ignored`. Default None. + eps (int | float, optional): A value added to the denominator for + numerical stability. Default 1e-7. + + Returns: + :obj:`AssignResult`: The assigned result. + """ + assert gt_bboxes_ignore is None, \ + 'Only case when gt_bboxes_ignore is None is supported.' + num_gts, num_bboxes = gt_bboxes.size(0), bbox_pred.size(0) + + # 1. assign -1 by default + assigned_gt_inds = bbox_pred.new_full((num_bboxes, ), + -1, + dtype=torch.long) + assigned_labels = bbox_pred.new_full((num_bboxes, ), + -1, + dtype=torch.long) + if num_gts == 0 or num_bboxes == 0: + # No ground truth or boxes, return empty assignment + if num_gts == 0: + # No ground truth, assign all to background + assigned_gt_inds[:] = 0 + return AssignResult( + num_gts, assigned_gt_inds, None, labels=assigned_labels) + img_h, img_w, _ = img_meta['img_shape'] + factor = gt_bboxes.new_tensor([img_w, img_h, img_w, + img_h]).unsqueeze(0) + + # 2. compute the weighted costs + # classification and bboxcost. + cls_cost = self.cls_cost(cls_pred, gt_labels) + # regression L1 cost + normalize_gt_bboxes = gt_bboxes / factor + reg_cost = self.reg_cost(bbox_pred, normalize_gt_bboxes) + # regression iou cost, defaultly giou is used in official DETR. + bboxes = bbox_cxcywh_to_xyxy(bbox_pred) * factor + iou_cost = self.iou_cost(bboxes, gt_bboxes) + # weighted sum of above three costs + cost = cls_cost + reg_cost + iou_cost + + # 3. do Hungarian matching on CPU using linear_sum_assignment + cost = cost.detach().cpu() + if linear_sum_assignment is None: + raise ImportError('Please run "pip install scipy" ' + 'to install scipy first.') + matched_row_inds, matched_col_inds = linear_sum_assignment(cost) + matched_row_inds = torch.from_numpy(matched_row_inds).to( + bbox_pred.device) + matched_col_inds = torch.from_numpy(matched_col_inds).to( + bbox_pred.device) + + # 4. assign backgrounds and foregrounds + # assign all indices to backgrounds first + assigned_gt_inds[:] = 0 + # assign foregrounds based on matching results + assigned_gt_inds[matched_row_inds] = matched_col_inds + 1 + assigned_labels[matched_row_inds] = gt_labels[matched_col_inds] + return AssignResult( + num_gts, assigned_gt_inds, None, labels=assigned_labels) diff --git a/mmcv/core/bbox/assigners/hungarian_assigner_3d.py b/mmcv/core/bbox/assigners/hungarian_assigner_3d.py new file mode 100755 index 0000000..86d6cf2 --- /dev/null +++ b/mmcv/core/bbox/assigners/hungarian_assigner_3d.py @@ -0,0 +1,136 @@ +import torch + +from mmcv.core.bbox.builder import BBOX_ASSIGNERS +from .assign_result import AssignResult +from .base_assigner import BaseAssigner +from mmcv.core.bbox.match_costs import build_match_cost +from mmcv.models.utils.transformer import inverse_sigmoid +from mmcv.core.bbox.util import normalize_bbox + +try: + from scipy.optimize import linear_sum_assignment +except ImportError: + linear_sum_assignment = None + + +@BBOX_ASSIGNERS.register_module() +class HungarianAssigner3D(BaseAssigner): + """Computes one-to-one matching between predictions and ground truth. + This class computes an assignment between the targets and the predictions + based on the costs. The costs are weighted sum of three components: + classification cost, regression L1 cost and regression iou cost. The + targets don't include the no_object, so generally there are more + predictions than targets. After the one-to-one matching, the un-matched + are treated as backgrounds. Thus each query prediction will be assigned + with `0` or a positive integer indicating the ground truth index: + - 0: negative sample, no assigned gt + - positive integer: positive sample, index (1-based) of assigned gt + Args: + cls_weight (int | float, optional): The scale factor for classification + cost. Default 1.0. + bbox_weight (int | float, optional): The scale factor for regression + L1 cost. Default 1.0. + iou_weight (int | float, optional): The scale factor for regression + iou cost. Default 1.0. + iou_calculator (dict | optional): The config for the iou calculation. + Default type `BboxOverlaps2D`. + iou_mode (str | optional): "iou" (intersection over union), "iof" + (intersection over foreground), or "giou" (generalized + intersection over union). Default "giou". + """ + + def __init__(self, + cls_cost=dict(type='ClassificationCost', weight=1.), + reg_cost=dict(type='BBoxL1Cost', weight=1.0), + iou_cost=dict(type='IoUCost', weight=0.0), + pc_range=None): + self.cls_cost = build_match_cost(cls_cost) + self.reg_cost = build_match_cost(reg_cost) + self.iou_cost = build_match_cost(iou_cost) + self.pc_range = pc_range + + def assign(self, + bbox_pred, + cls_pred, + gt_bboxes, + gt_labels, + gt_bboxes_ignore=None, + eps=1e-7): + """Computes one-to-one matching based on the weighted costs. + This method assign each query prediction to a ground truth or + background. The `assigned_gt_inds` with -1 means don't care, + 0 means negative sample, and positive number is the index (1-based) + of assigned gt. + The assignment is done in the following steps, the order matters. + 1. assign every prediction to -1 + 2. compute the weighted costs + 3. do Hungarian matching on CPU based on the costs + 4. assign all to 0 (background) first, then for each matched pair + between predictions and gts, treat this prediction as foreground + and assign the corresponding gt index (plus 1) to it. + Args: + bbox_pred (Tensor): Predicted boxes with normalized coordinates + (cx, cy, w, h), which are all in range [0, 1]. Shape + [num_query, 4]. + cls_pred (Tensor): Predicted classification logits, shape + [num_query, num_class]. + gt_bboxes (Tensor): Ground truth boxes with unnormalized + coordinates (x1, y1, x2, y2). Shape [num_gt, 4]. + gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,). + gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are + labelled as `ignored`. Default None. + eps (int | float, optional): A value added to the denominator for + numerical stability. Default 1e-7. + Returns: + :obj:`AssignResult`: The assigned result. + """ + assert gt_bboxes_ignore is None, \ + 'Only case when gt_bboxes_ignore is None is supported.' + num_gts, num_bboxes = gt_bboxes.size(0), bbox_pred.size(0) + + # 1. assign -1 by default + assigned_gt_inds = bbox_pred.new_full((num_bboxes, ), + -1, + dtype=torch.long) + assigned_labels = bbox_pred.new_full((num_bboxes, ), + -1, + dtype=torch.long) + if num_gts == 0 or num_bboxes == 0: + # No ground truth or boxes, return empty assignment + if num_gts == 0: + # No ground truth, assign all to background + assigned_gt_inds[:] = 0 + return AssignResult( + num_gts, assigned_gt_inds, None, labels=assigned_labels) + + # 2. compute the weighted costs + # classification and bboxcost. + cls_cost = self.cls_cost(cls_pred, gt_labels) + # regression L1 cost + + normalized_gt_bboxes = normalize_bbox(gt_bboxes, self.pc_range) + + reg_cost = self.reg_cost(bbox_pred[:, :8], normalized_gt_bboxes[:, :8]) + + # weighted sum of above two costs + cost = cls_cost + reg_cost + + # 3. do Hungarian matching on CPU using linear_sum_assignment + cost = cost.detach().cpu() + if linear_sum_assignment is None: + raise ImportError('Please run "pip install scipy" ' + 'to install scipy first.') + matched_row_inds, matched_col_inds = linear_sum_assignment(cost) + matched_row_inds = torch.from_numpy(matched_row_inds).to( + bbox_pred.device) + matched_col_inds = torch.from_numpy(matched_col_inds).to( + bbox_pred.device) + + # 4. assign backgrounds and foregrounds + # assign all indices to backgrounds first + assigned_gt_inds[:] = 0 + # assign foregrounds based on matching results + assigned_gt_inds[matched_row_inds] = matched_col_inds + 1 + assigned_labels[matched_row_inds] = gt_labels[matched_col_inds] + return AssignResult( + num_gts, assigned_gt_inds, None, labels=assigned_labels) \ No newline at end of file diff --git a/mmcv/core/bbox/assigners/hungarian_assigner_3d_track.py b/mmcv/core/bbox/assigners/hungarian_assigner_3d_track.py new file mode 100644 index 0000000..792d0f9 --- /dev/null +++ b/mmcv/core/bbox/assigners/hungarian_assigner_3d_track.py @@ -0,0 +1,122 @@ +import numpy as np +import torch + +from mmcv.core.bbox.builder import BBOX_ASSIGNERS +from mmcv.core.bbox.assigners.base_assigner import BaseAssigner +from mmcv.core.bbox.match_costs import build_match_cost +try: + from scipy.optimize import linear_sum_assignment +except ImportError: + linear_sum_assignment = None + + +@BBOX_ASSIGNERS.register_module() +class HungarianAssigner3DTrack(BaseAssigner): + """Computes one-to-one matching between predictions and ground truth. + This class computes an assignment between the targets and the predictions + based on the costs. The costs are weighted sum of three components: + classification cost, regression L1 cost and regression iou cost. The + targets don't include the no_object, so generally there are more + predictions than targets. After the one-to-one matching, the un-matched + are treated as backgrounds. Thus each query prediction will be assigned + with `0` or a positive integer indicating the ground truth index: + - 0: negative sample, no assigned gt + - positive integer: positive sample, index (1-based) of assigned gt + Args: + cls_weight (int | float, optional): The scale factor for classification + cost. Default 1.0. + bbox_weight (int | float, optional): The scale factor for regression + L1 cost. Default 1.0. + """ + + def __init__(self, + cls_cost=dict(type='ClassificationCost', weight=1.), + reg_cost=dict(type='BBoxL1Cost', weight=1.0), + pc_range=None): + self.cls_cost = build_match_cost(cls_cost) + self.reg_cost = build_match_cost(reg_cost) + self.pc_range = pc_range + + def assign(self, + bbox_pred, + cls_pred, + gt_bboxes, + gt_labels, + gt_bboxes_ignore=None, + eps=1e-7): + """Computes one-to-one matching based on the weighted costs. + This method assign each query prediction to a ground truth or + background. The `assigned_gt_inds` with -1 means don't care, + 0 means negative sample, and positive number is the index (1-based) + of assigned gt. + The assignment is done in the following steps, the order matters. + 1. assign every prediction to -1 + 2. compute the weighted costs + 3. do Hungarian matching on CPU based on the costs + 4. assign all to 0 (background) first, then for each matched pair + between predictions and gts, treat this prediction as foreground + and assign the corresponding gt index (plus 1) to it. + Args: + bbox_pred (Tensor): Predicted boxes with normalized coordinates + (cx, cy, w, h), which are all in range [0, 1]. Shape + [num_query, 4]. + cls_pred (Tensor): Predicted classification logits, shape + [num_query, num_class]. + gt_bboxes (Tensor): Ground truth boxes with unnormalized + coordinates (x1, y1, x2, y2). Shape [num_gt, 4]. + gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,). + gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are + labelled as `ignored`. Default None. + eps (int | float, optional): A value added to the denominator for + numerical stability. Default 1e-7. + Returns: + :obj:`AssignResult`: The assigned result. + """ + assert gt_bboxes_ignore is None, \ + 'Only case when gt_bboxes_ignore is None is supported.' + num_gts, num_bboxes = gt_bboxes.size(0), bbox_pred.size(0) + + # 1. assign -1 by default + assigned_gt_inds = bbox_pred.new_full((num_bboxes, ), + -1, + dtype=torch.long) + assigned_labels = bbox_pred.new_full((num_bboxes, ), + -1, + dtype=torch.long) + if num_gts == 0 or num_bboxes == 0: + # No ground truth or boxes, return empty assignment + if num_gts == 0: + # No ground truth, assign all to background + assigned_gt_inds[:] = 0 + return (None, None) + # 2. compute the weighted costs + # classification and bboxcost. + cls_cost = self.cls_cost(cls_pred, gt_labels) + # regression L1 cost + reg_cost = self.reg_cost(bbox_pred[:, :8], gt_bboxes[:, :8]) + # weighted sum of above three costs + cost = cls_cost + reg_cost + + cost = torch.nan_to_num(cost) + + # 3. do Hungarian matching on CPU using linear_sum_assignment + cost = cost.detach().cpu() + if linear_sum_assignment is None: + raise ImportError('Please run "pip install scipy" ' + 'to install scipy first.') + cost = np.nan_to_num(cost) + matched_row_inds, matched_col_inds = linear_sum_assignment(cost) + matched_row_inds = torch.from_numpy(matched_row_inds).to( + bbox_pred.device) + matched_col_inds = torch.from_numpy(matched_col_inds).to( + bbox_pred.device) + + # 4. assign backgrounds and foregrounds + # assign all indices to backgrounds first + assigned_gt_inds[:] = 0 + # assign foregrounds based on matching results + assigned_gt_inds[matched_row_inds] = matched_col_inds + 1 + assigned_labels[matched_row_inds] = gt_labels[matched_col_inds] + + return (matched_row_inds, matched_col_inds) + diff --git a/mmcv/core/bbox/assigners/map_hungarian_assigner_3d.py b/mmcv/core/bbox/assigners/map_hungarian_assigner_3d.py new file mode 100644 index 0000000..2bfc278 --- /dev/null +++ b/mmcv/core/bbox/assigners/map_hungarian_assigner_3d.py @@ -0,0 +1,162 @@ +import torch +import torch.nn.functional as F + +from mmcv.core.bbox.builder import BBOX_ASSIGNERS +from mmcv.core.bbox.assigners.assign_result import AssignResult +from mmcv.core.bbox.assigners.base_assigner import BaseAssigner +from mmcv.core.bbox.match_costs import build_match_cost +from mmcv.models.utils.transformer import inverse_sigmoid +from mmcv.core.bbox.util import normalize_bbox +from mmcv.models.vad_utils.map_utils import ( + normalize_2d_bbox, normalize_2d_pts, denormalize_2d_bbox +) + +try: + from scipy.optimize import linear_sum_assignment +except ImportError: + linear_sum_assignment = None + +@BBOX_ASSIGNERS.register_module() +class MapHungarianAssigner3D(BaseAssigner): + """Computes one-to-one matching between predictions and ground truth. + This class computes an assignment between the targets and the predictions + based on the costs. The costs are weighted sum of three components: + classification cost, regression L1 cost and regression iou cost. The + targets don't include the no_object, so generally there are more + predictions than targets. After the one-to-one matching, the un-matched + are treated as backgrounds. Thus each query prediction will be assigned + with `0` or a positive integer indicating the ground truth index: + - 0: negative sample, no assigned gt + - positive integer: positive sample, index (1-based) of assigned gt + Args: + cls_weight (int | float, optional): The scale factor for classification + cost. Default 1.0. + bbox_weight (int | float, optional): The scale factor for regression + L1 cost. Default 1.0. + iou_weight (int | float, optional): The scale factor for regression + iou cost. Default 1.0. + iou_calculator (dict | optional): The config for the iou calculation. + Default type `BboxOverlaps2D`. + iou_mode (str | optional): "iou" (intersection over union), "iof" + (intersection over foreground), or "giou" (generalized + intersection over union). Default "giou". + """ + + def __init__(self, + cls_cost=dict(type='ClassificationCost', weight=1.), + reg_cost=dict(type='BBoxL1Cost', weight=1.0), + iou_cost=dict(type='IoUCost', weight=0.0), + pts_cost=dict(type='ChamferDistance',loss_src_weight=1.0,loss_dst_weight=1.0), + pc_range=None): + self.cls_cost = build_match_cost(cls_cost) + self.reg_cost = build_match_cost(reg_cost) + self.iou_cost = build_match_cost(iou_cost) + self.pts_cost = build_match_cost(pts_cost) + self.pc_range = pc_range + + def assign(self, + bbox_pred, + cls_pred, + pts_pred, + gt_bboxes, + gt_labels, + gt_pts, + gt_bboxes_ignore=None, + eps=1e-7): + """Computes one-to-one matching based on the weighted costs. + This method assign each query prediction to a ground truth or + background. The `assigned_gt_inds` with -1 means don't care, + 0 means negative sample, and positive number is the index (1-based) + of assigned gt. + The assignment is done in the following steps, the order matters. + 1. assign every prediction to -1 + 2. compute the weighted costs + 3. do Hungarian matching on CPU based on the costs + 4. assign all to 0 (background) first, then for each matched pair + between predictions and gts, treat this prediction as foreground + and assign the corresponding gt index (plus 1) to it. + Args: + bbox_pred (Tensor): Predicted boxes with normalized coordinates + (cx, cy, w, h), which are all in range [0, 1]. Shape + [num_query, 4]. + cls_pred (Tensor): Predicted classification logits, shape + [num_query, num_class]. + gt_bboxes (Tensor): Ground truth boxes with unnormalized + coordinates (x1, y1, x2, y2). Shape [num_gt, 4]. + gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,). + gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are + labelled as `ignored`. Default None. + eps (int | float, optional): A value added to the denominator for + numerical stability. Default 1e-7. + Returns: + :obj:`AssignResult`: The assigned result. + """ + assert gt_bboxes_ignore is None, \ + 'Only case when gt_bboxes_ignore is None is supported.' + assert bbox_pred.shape[-1] == 4, \ + 'Only support bbox pred shape is 4 dims' + num_gts, num_bboxes = gt_bboxes.size(0), bbox_pred.size(0) + + # 1. assign -1 by default + assigned_gt_inds = bbox_pred.new_full((num_bboxes, ), + -1, + dtype=torch.long) + assigned_labels = bbox_pred.new_full((num_bboxes, ), + -1, + dtype=torch.long) + if num_gts == 0 or num_bboxes == 0: + # No ground truth or boxes, return empty assignment + if num_gts == 0: + # No ground truth, assign all to background + assigned_gt_inds[:] = 0 + return AssignResult( + num_gts, assigned_gt_inds, None, labels=assigned_labels), None + + # 2. compute the weighted costs + # classification and bboxcost. + cls_cost = self.cls_cost(cls_pred, gt_labels) + # regression L1 cost + + normalized_gt_bboxes = normalize_2d_bbox(gt_bboxes, self.pc_range) + # normalized_gt_bboxes = gt_bboxes + # import pdb;pdb.set_trace() + reg_cost = self.reg_cost(bbox_pred[:, :4], normalized_gt_bboxes[:, :4]) + + _, num_orders, num_pts_per_gtline, num_coords = gt_pts.shape + normalized_gt_pts = normalize_2d_pts(gt_pts, self.pc_range) + num_pts_per_predline = pts_pred.size(1) + if num_pts_per_predline != num_pts_per_gtline: + pts_pred_interpolated = F.interpolate(pts_pred.permute(0,2,1),size=(num_pts_per_gtline), + mode='linear', align_corners=True) + pts_pred_interpolated = pts_pred_interpolated.permute(0,2,1).contiguous() + else: + pts_pred_interpolated = pts_pred + # num_q, num_pts, 2 <-> num_gt, num_pts, 2 + pts_cost_ordered = self.pts_cost(pts_pred_interpolated, normalized_gt_pts) + pts_cost_ordered = pts_cost_ordered.view(num_bboxes, num_gts, num_orders) + pts_cost, order_index = torch.min(pts_cost_ordered, 2) + + bboxes = denormalize_2d_bbox(bbox_pred, self.pc_range) + iou_cost = self.iou_cost(bboxes, gt_bboxes) + # weighted sum of above three costs + cost = cls_cost + reg_cost + iou_cost + pts_cost + + # 3. do Hungarian matching on CPU using linear_sum_assignment + cost = cost.detach().cpu() + if linear_sum_assignment is None: + raise ImportError('Please run "pip install scipy" ' + 'to install scipy first.') + matched_row_inds, matched_col_inds = linear_sum_assignment(cost) + matched_row_inds = torch.from_numpy(matched_row_inds).to( + bbox_pred.device) + matched_col_inds = torch.from_numpy(matched_col_inds).to( + bbox_pred.device) + + # 4. assign backgrounds and foregrounds + # assign all indices to backgrounds first + assigned_gt_inds[:] = 0 + # assign foregrounds based on matching results + assigned_gt_inds[matched_row_inds] = matched_col_inds + 1 + assigned_labels[matched_row_inds] = gt_labels[matched_col_inds] + return AssignResult( + num_gts, assigned_gt_inds, None, labels=assigned_labels), order_index \ No newline at end of file diff --git a/mmcv/core/bbox/box_np_ops.py b/mmcv/core/bbox/box_np_ops.py new file mode 100644 index 0000000..6740e4e --- /dev/null +++ b/mmcv/core/bbox/box_np_ops.py @@ -0,0 +1,896 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# TODO: clean the functions in this file and move the APIs into box structures +# in the future + +import numba +import numpy as np + + +def camera_to_lidar(points, r_rect, velo2cam): + """Convert points in camera coordinate to lidar coordinate. + + Args: + points (np.ndarray, shape=[N, 3]): Points in camera coordinate. + r_rect (np.ndarray, shape=[4, 4]): Matrix to project points in + specific camera coordinate (e.g. CAM2) to CAM0. + velo2cam (np.ndarray, shape=[4, 4]): Matrix to project points in + camera coordinate to lidar coordinate. + + Returns: + np.ndarray, shape=[N, 3]: Points in lidar coordinate. + """ + points_shape = list(points.shape[0:-1]) + if points.shape[-1] == 3: + points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1) + lidar_points = points @ np.linalg.inv((r_rect @ velo2cam).T) + return lidar_points[..., :3] + + +def box_camera_to_lidar(data, r_rect, velo2cam): + """Covert boxes in camera coordinate to lidar coordinate. + + Args: + data (np.ndarray, shape=[N, 7]): Boxes in camera coordinate. + r_rect (np.ndarray, shape=[4, 4]): Matrix to project points in + specific camera coordinate (e.g. CAM2) to CAM0. + velo2cam (np.ndarray, shape=[4, 4]): Matrix to project points in + camera coordinate to lidar coordinate. + + Returns: + np.ndarray, shape=[N, 3]: Boxes in lidar coordinate. + """ + xyz = data[:, 0:3] + l, h, w = data[:, 3:4], data[:, 4:5], data[:, 5:6] + r = data[:, 6:7] + xyz_lidar = camera_to_lidar(xyz, r_rect, velo2cam) + return np.concatenate([xyz_lidar, w, l, h, r], axis=1) + + +def corners_nd(dims, origin=0.5): + """Generate relative box corners based on length per dim and origin point. + + Args: + dims (np.ndarray, shape=[N, ndim]): Array of length per dim + origin (list or array or float, optional): origin point relate to + smallest point. Defaults to 0.5 + + Returns: + np.ndarray, shape=[N, 2 ** ndim, ndim]: Returned corners. + point layout example: (2d) x0y0, x0y1, x1y0, x1y1; + (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1 + where x0 < x1, y0 < y1, z0 < z1. + """ + ndim = int(dims.shape[1]) + corners_norm = np.stack( + np.unravel_index(np.arange(2**ndim), [2] * ndim), + axis=1).astype(dims.dtype) + # now corners_norm has format: (2d) x0y0, x0y1, x1y0, x1y1 + # (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1 + # so need to convert to a format which is convenient to do other computing. + # for 2d boxes, format is clockwise start with minimum point + # for 3d boxes, please draw lines by your hand. + if ndim == 2: + # generate clockwise box corners + corners_norm = corners_norm[[0, 1, 3, 2]] + elif ndim == 3: + corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]] + corners_norm = corners_norm - np.array(origin, dtype=dims.dtype) + corners = dims.reshape([-1, 1, ndim]) * corners_norm.reshape( + [1, 2**ndim, ndim]) + return corners + + +def rotation_2d(points, angles): + """Rotation 2d points based on origin point clockwise when angle positive. + + Args: + points (np.ndarray): Points to be rotated with shape \ + (N, point_size, 2). + angles (np.ndarray): Rotation angle with shape (N). + + Returns: + np.ndarray: Same shape as points. + """ + rot_sin = np.sin(angles) + rot_cos = np.cos(angles) + rot_mat_T = np.stack([[rot_cos, -rot_sin], [rot_sin, rot_cos]]) + return np.einsum('aij,jka->aik', points, rot_mat_T) + + +def center_to_corner_box2d(centers, dims, angles=None, origin=0.5): + """Convert kitti locations, dimensions and angles to corners. + format: center(xy), dims(xy), angles(clockwise when positive) + + Args: + centers (np.ndarray): Locations in kitti label file with shape (N, 2). + dims (np.ndarray): Dimensions in kitti label file with shape (N, 2). + angles (np.ndarray, optional): Rotation_y in kitti label file with + shape (N). Defaults to None. + origin (list or array or float, optional): origin point relate to + smallest point. Defaults to 0.5. + + Returns: + np.ndarray: Corners with the shape of (N, 4, 2). + """ + # 'length' in kitti format is in x axis. + # xyz(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar) + # center in kitti format is [0.5, 1.0, 0.5] in xyz. + corners = corners_nd(dims, origin=origin) + # corners: [N, 4, 2] + if angles is not None: + corners = rotation_2d(corners, angles) + corners += centers.reshape([-1, 1, 2]) + return corners + + +@numba.jit(nopython=True) +def depth_to_points(depth, trunc_pixel): + """Convert depth map to points. + + Args: + depth (np.array, shape=[H, W]): Depth map which + the row of [0~`trunc_pixel`] are truncated. + trunc_pixel (int): The number of truncated row. + + Returns: + np.ndarray: Points in camera coordinates. + """ + num_pts = np.sum(depth[trunc_pixel:, ] > 0.1) + points = np.zeros((num_pts, 3), dtype=depth.dtype) + x = np.array([0, 0, 1], dtype=depth.dtype) + k = 0 + for i in range(trunc_pixel, depth.shape[0]): + for j in range(depth.shape[1]): + if depth[i, j] > 0.1: + x = np.array([j, i, 1], dtype=depth.dtype) + points[k] = x * depth[i, j] + k += 1 + return points + + +def depth_to_lidar_points(depth, trunc_pixel, P2, r_rect, velo2cam): + """Convert depth map to points in lidar coordinate. + + Args: + depth (np.array, shape=[H, W]): Depth map which + the row of [0~`trunc_pixel`] are truncated. + trunc_pixel (int): The number of truncated row. + P2 (p.array, shape=[4, 4]): Intrinsics of Camera2. + r_rect (np.ndarray, shape=[4, 4]): Matrix to project points in + specific camera coordinate (e.g. CAM2) to CAM0. + velo2cam (np.ndarray, shape=[4, 4]): Matrix to project points in + camera coordinate to lidar coordinate. + + Returns: + np.ndarray: Points in lidar coordinates. + """ + pts = depth_to_points(depth, trunc_pixel) + points_shape = list(pts.shape[0:-1]) + points = np.concatenate([pts, np.ones(points_shape + [1])], axis=-1) + points = points @ np.linalg.inv(P2.T) + lidar_points = camera_to_lidar(points, r_rect, velo2cam) + return lidar_points + + +def rotation_3d_in_axis(points, angles, axis=0): + """Rotate points in specific axis. + + Args: + points (np.ndarray, shape=[N, point_size, 3]]): + angles (np.ndarray, shape=[N]]): + axis (int, optional): Axis to rotate at. Defaults to 0. + + Returns: + np.ndarray: Rotated points. + """ + # points: [N, point_size, 3] + rot_sin = np.sin(angles) + rot_cos = np.cos(angles) + ones = np.ones_like(rot_cos) + zeros = np.zeros_like(rot_cos) + if axis == 1: + rot_mat_T = np.stack([[rot_cos, zeros, -rot_sin], [zeros, ones, zeros], + [rot_sin, zeros, rot_cos]]) + elif axis == 2 or axis == -1: + rot_mat_T = np.stack([[rot_cos, -rot_sin, zeros], + [rot_sin, rot_cos, zeros], [zeros, zeros, ones]]) + elif axis == 0: + rot_mat_T = np.stack([[zeros, rot_cos, -rot_sin], + [zeros, rot_sin, rot_cos], [ones, zeros, zeros]]) + else: + raise ValueError('axis should in range') + + return np.einsum('aij,jka->aik', points, rot_mat_T) + + +def center_to_corner_box3d(centers, + dims, + angles=None, + origin=(0.5, 1.0, 0.5), + axis=1): + """Convert kitti locations, dimensions and angles to corners. + + Args: + centers (np.ndarray): Locations in kitti label file with shape (N, 3). + dims (np.ndarray): Dimensions in kitti label file with shape (N, 3). + angles (np.ndarray, optional): Rotation_y in kitti label file with + shape (N). Defaults to None. + origin (list or array or float, optional): Origin point relate to + smallest point. Use (0.5, 1.0, 0.5) in camera and (0.5, 0.5, 0) + in lidar. Defaults to (0.5, 1.0, 0.5). + axis (int, optional): Rotation axis. 1 for camera and 2 for lidar. + Defaults to 1. + + Returns: + np.ndarray: Corners with the shape of (N, 8, 3). + """ + # 'length' in kitti format is in x axis. + # yzx(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar) + # center in kitti format is [0.5, 1.0, 0.5] in xyz. + corners = corners_nd(dims, origin=origin) + # corners: [N, 8, 3] + if angles is not None: + corners = rotation_3d_in_axis(corners, angles, axis=axis) + corners += centers.reshape([-1, 1, 3]) + return corners + + +@numba.jit(nopython=True) +def box2d_to_corner_jit(boxes): + """Convert box2d to corner. + + Args: + boxes (np.ndarray, shape=[N, 5]): Boxes2d with rotation. + + Returns: + box_corners (np.ndarray, shape=[N, 4, 2]): Box corners. + """ + num_box = boxes.shape[0] + corners_norm = np.zeros((4, 2), dtype=boxes.dtype) + corners_norm[1, 1] = 1.0 + corners_norm[2] = 1.0 + corners_norm[3, 0] = 1.0 + corners_norm -= np.array([0.5, 0.5], dtype=boxes.dtype) + corners = boxes.reshape(num_box, 1, 5)[:, :, 2:4] * corners_norm.reshape( + 1, 4, 2) + rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype) + box_corners = np.zeros((num_box, 4, 2), dtype=boxes.dtype) + for i in range(num_box): + rot_sin = np.sin(boxes[i, -1]) + rot_cos = np.cos(boxes[i, -1]) + rot_mat_T[0, 0] = rot_cos + rot_mat_T[0, 1] = -rot_sin + rot_mat_T[1, 0] = rot_sin + rot_mat_T[1, 1] = rot_cos + box_corners[i] = corners[i] @ rot_mat_T + boxes[i, :2] + return box_corners + + +@numba.njit +def corner_to_standup_nd_jit(boxes_corner): + """Convert boxes_corner to aligned (min-max) boxes. + + Args: + boxes_corner (np.ndarray, shape=[N, 2**dim, dim]): Boxes corners. + + Returns: + np.ndarray, shape=[N, dim*2]: Aligned (min-max) boxes. + """ + num_boxes = boxes_corner.shape[0] + ndim = boxes_corner.shape[-1] + result = np.zeros((num_boxes, ndim * 2), dtype=boxes_corner.dtype) + for i in range(num_boxes): + for j in range(ndim): + result[i, j] = np.min(boxes_corner[i, :, j]) + for j in range(ndim): + result[i, j + ndim] = np.max(boxes_corner[i, :, j]) + return result + + +@numba.jit(nopython=True) +def corner_to_surfaces_3d_jit(corners): + """Convert 3d box corners from corner function above to surfaces that + normal vectors all direct to internal. + + Args: + corners (np.ndarray): 3d box corners with the shape of (N, 8, 3). + + Returns: + np.ndarray: Surfaces with the shape of (N, 6, 4, 3). + """ + # box_corners: [N, 8, 3], must from corner functions in this module + num_boxes = corners.shape[0] + surfaces = np.zeros((num_boxes, 6, 4, 3), dtype=corners.dtype) + corner_idxes = np.array([ + 0, 1, 2, 3, 7, 6, 5, 4, 0, 3, 7, 4, 1, 5, 6, 2, 0, 4, 5, 1, 3, 2, 6, 7 + ]).reshape(6, 4) + for i in range(num_boxes): + for j in range(6): + for k in range(4): + surfaces[i, j, k] = corners[i, corner_idxes[j, k]] + return surfaces + + +def rotation_points_single_angle(points, angle, axis=0): + """Rotate points with a single angle. + + Args: + points (np.ndarray, shape=[N, 3]]): + angle (np.ndarray, shape=[1]]): + axis (int, optional): Axis to rotate at. Defaults to 0. + + Returns: + np.ndarray: Rotated points. + """ + # points: [N, 3] + rot_sin = np.sin(angle) + rot_cos = np.cos(angle) + if axis == 1: + rot_mat_T = np.array( + [[rot_cos, 0, -rot_sin], [0, 1, 0], [rot_sin, 0, rot_cos]], + dtype=points.dtype) + elif axis == 2 or axis == -1: + rot_mat_T = np.array( + [[rot_cos, -rot_sin, 0], [rot_sin, rot_cos, 0], [0, 0, 1]], + dtype=points.dtype) + elif axis == 0: + rot_mat_T = np.array( + [[1, 0, 0], [0, rot_cos, -rot_sin], [0, rot_sin, rot_cos]], + dtype=points.dtype) + else: + raise ValueError('axis should in range') + + return points @ rot_mat_T, rot_mat_T + + +def points_cam2img(points_3d, proj_mat, with_depth=False): + """Project points in camera coordinates to image coordinates. + + Args: + points_3d (np.ndarray): Points in shape (N, 3) + proj_mat (np.ndarray): Transformation matrix between coordinates. + with_depth (bool, optional): Whether to keep depth in the output. + Defaults to False. + + Returns: + np.ndarray: Points in image coordinates with shape [N, 2]. + """ + points_shape = list(points_3d.shape) + points_shape[-1] = 1 + + assert len(proj_mat.shape) == 2, 'The dimension of the projection'\ + f' matrix should be 2 instead of {len(proj_mat.shape)}.' + d1, d2 = proj_mat.shape[:2] + assert (d1 == 3 and d2 == 3) or (d1 == 3 and d2 == 4) or ( + d1 == 4 and d2 == 4), 'The shape of the projection matrix'\ + f' ({d1}*{d2}) is not supported.' + if d1 == 3: + proj_mat_expanded = np.eye(4, dtype=proj_mat.dtype) + proj_mat_expanded[:d1, :d2] = proj_mat + proj_mat = proj_mat_expanded + + points_4 = np.concatenate([points_3d, np.ones(points_shape)], axis=-1) + point_2d = points_4 @ proj_mat.T + point_2d_res = point_2d[..., :2] / point_2d[..., 2:3] + + if with_depth: + points_2d_depth = np.concatenate([point_2d_res, point_2d[..., 2:3]], + axis=-1) + return points_2d_depth + + return point_2d_res + + +def box3d_to_bbox(box3d, P2): + """Convert box3d in camera coordinates to bbox in image coordinates. + + Args: + box3d (np.ndarray, shape=[N, 7]): Boxes in camera coordinate. + P2 (np.array, shape=[4, 4]): Intrinsics of Camera2. + + Returns: + np.ndarray, shape=[N, 4]: Boxes 2d in image coordinates. + """ + box_corners = center_to_corner_box3d( + box3d[:, :3], box3d[:, 3:6], box3d[:, 6], [0.5, 1.0, 0.5], axis=1) + box_corners_in_image = points_cam2img(box_corners, P2) + # box_corners_in_image: [N, 8, 2] + minxy = np.min(box_corners_in_image, axis=1) + maxxy = np.max(box_corners_in_image, axis=1) + bbox = np.concatenate([minxy, maxxy], axis=1) + return bbox + + +def corner_to_surfaces_3d(corners): + """convert 3d box corners from corner function above to surfaces that + normal vectors all direct to internal. + + Args: + corners (np.ndarray): 3D box corners with shape of (N, 8, 3). + + Returns: + np.ndarray: Surfaces with the shape of (N, 6, 4, 3). + """ + # box_corners: [N, 8, 3], must from corner functions in this module + surfaces = np.array([ + [corners[:, 0], corners[:, 1], corners[:, 2], corners[:, 3]], + [corners[:, 7], corners[:, 6], corners[:, 5], corners[:, 4]], + [corners[:, 0], corners[:, 3], corners[:, 7], corners[:, 4]], + [corners[:, 1], corners[:, 5], corners[:, 6], corners[:, 2]], + [corners[:, 0], corners[:, 4], corners[:, 5], corners[:, 1]], + [corners[:, 3], corners[:, 2], corners[:, 6], corners[:, 7]], + ]).transpose([2, 0, 1, 3]) + return surfaces + + +def points_in_rbbox(points, rbbox, z_axis=2, origin=(0.5, 0.5, 0)): + """Check points in rotated bbox and return indicces. + + Args: + points (np.ndarray, shape=[N, 3+dim]): Points to query. + rbbox (np.ndarray, shape=[M, 7]): Boxes3d with rotation. + z_axis (int, optional): Indicate which axis is height. + Defaults to 2. + origin (tuple[int], optional): Indicate the position of + box center. Defaults to (0.5, 0.5, 0). + + Returns: + np.ndarray, shape=[N, M]: Indices of points in each box. + """ + # TODO: this function is different from PointCloud3D, be careful + # when start to use nuscene, check the input + rbbox_corners = center_to_corner_box3d( + rbbox[:, :3], rbbox[:, 3:6], rbbox[:, 6], origin=origin, axis=z_axis) + surfaces = corner_to_surfaces_3d(rbbox_corners) + indices = points_in_convex_polygon_3d_jit(points[:, :3], surfaces) + return indices + + +def minmax_to_corner_2d(minmax_box): + """Convert minmax box to corners2d. + + Args: + minmax_box (np.ndarray, shape=[N, dims]): minmax boxes. + + Returns: + np.ndarray: 2d corners of boxes + """ + ndim = minmax_box.shape[-1] // 2 + center = minmax_box[..., :ndim] + dims = minmax_box[..., ndim:] - center + return center_to_corner_box2d(center, dims, origin=0.0) + + +def limit_period(val, offset=0.5, period=np.pi): + """Limit the value into a period for periodic function. + + Args: + val (np.ndarray): The value to be converted. + offset (float, optional): Offset to set the value range. \ + Defaults to 0.5. + period (float, optional): Period of the value. Defaults to np.pi. + + Returns: + torch.Tensor: Value in the range of \ + [-offset * period, (1-offset) * period] + """ + return val - np.floor(val / period + offset) * period + + +def create_anchors_3d_range(feature_size, + anchor_range, + sizes=((1.6, 3.9, 1.56), ), + rotations=(0, np.pi / 2), + dtype=np.float32): + """Create anchors 3d by range. + + Args: + feature_size (list[float] | tuple[float]): Feature map size. It is + either a list of a tuple of [D, H, W](in order of z, y, and x). + anchor_range (torch.Tensor | list[float]): Range of anchors with + shape [6]. The order is consistent with that of anchors, i.e., + (x_min, y_min, z_min, x_max, y_max, z_max). + sizes (list[list] | np.ndarray | torch.Tensor, optional): + Anchor size with shape [N, 3], in order of x, y, z. + Defaults to ((1.6, 3.9, 1.56), ). + rotations (list[float] | np.ndarray | torch.Tensor, optional): + Rotations of anchors in a single feature grid. + Defaults to (0, np.pi / 2). + dtype (type, optional): Data type. Default to np.float32. + + Returns: + np.ndarray: Range based anchors with shape of \ + (*feature_size, num_sizes, num_rots, 7). + """ + anchor_range = np.array(anchor_range, dtype) + z_centers = np.linspace( + anchor_range[2], anchor_range[5], feature_size[0], dtype=dtype) + y_centers = np.linspace( + anchor_range[1], anchor_range[4], feature_size[1], dtype=dtype) + x_centers = np.linspace( + anchor_range[0], anchor_range[3], feature_size[2], dtype=dtype) + sizes = np.reshape(np.array(sizes, dtype=dtype), [-1, 3]) + rotations = np.array(rotations, dtype=dtype) + rets = np.meshgrid( + x_centers, y_centers, z_centers, rotations, indexing='ij') + tile_shape = [1] * 5 + tile_shape[-2] = int(sizes.shape[0]) + for i in range(len(rets)): + rets[i] = np.tile(rets[i][..., np.newaxis, :], tile_shape) + rets[i] = rets[i][..., np.newaxis] # for concat + sizes = np.reshape(sizes, [1, 1, 1, -1, 1, 3]) + tile_size_shape = list(rets[0].shape) + tile_size_shape[3] = 1 + sizes = np.tile(sizes, tile_size_shape) + rets.insert(3, sizes) + ret = np.concatenate(rets, axis=-1) + return np.transpose(ret, [2, 1, 0, 3, 4, 5]) + + +def center_to_minmax_2d(centers, dims, origin=0.5): + """Center to minmax. + + Args: + centers (np.ndarray): Center points. + dims (np.ndarray): Dimensions. + origin (list or array or float, optional): Origin point relate + to smallest point. Defaults to 0.5. + + Returns: + np.ndarray: Minmax points. + """ + if origin == 0.5: + return np.concatenate([centers - dims / 2, centers + dims / 2], + axis=-1) + corners = center_to_corner_box2d(centers, dims, origin=origin) + return corners[:, [0, 2]].reshape([-1, 4]) + + +def rbbox2d_to_near_bbox(rbboxes): + """convert rotated bbox to nearest 'standing' or 'lying' bbox. + + Args: + rbboxes (np.ndarray): Rotated bboxes with shape of \ + (N, 5(x, y, xdim, ydim, rad)). + + Returns: + np.ndarray: Bounding boxes with the shpae of + (N, 4(xmin, ymin, xmax, ymax)). + """ + rots = rbboxes[..., -1] + rots_0_pi_div_2 = np.abs(limit_period(rots, 0.5, np.pi)) + cond = (rots_0_pi_div_2 > np.pi / 4)[..., np.newaxis] + bboxes_center = np.where(cond, rbboxes[:, [0, 1, 3, 2]], rbboxes[:, :4]) + bboxes = center_to_minmax_2d(bboxes_center[:, :2], bboxes_center[:, 2:]) + return bboxes + + +@numba.jit(nopython=True) +def iou_jit(boxes, query_boxes, mode='iou', eps=0.0): + """Calculate box iou. Note that jit version runs ~10x faster than the + box_overlaps function in mmdet3d.core.evaluation. + + Args: + boxes (np.ndarray): Input bounding boxes with shape of (N, 4). + query_boxes (np.ndarray): Query boxes with shape of (K, 4). + mode (str, optional): IoU mode. Defaults to 'iou'. + eps (float, optional): Value added to denominator. Defaults to 0. + + Returns: + np.ndarray: Overlap between boxes and query_boxes + with the shape of [N, K]. + """ + N = boxes.shape[0] + K = query_boxes.shape[0] + overlaps = np.zeros((N, K), dtype=boxes.dtype) + for k in range(K): + box_area = ((query_boxes[k, 2] - query_boxes[k, 0] + eps) * + (query_boxes[k, 3] - query_boxes[k, 1] + eps)) + for n in range(N): + iw = ( + min(boxes[n, 2], query_boxes[k, 2]) - + max(boxes[n, 0], query_boxes[k, 0]) + eps) + if iw > 0: + ih = ( + min(boxes[n, 3], query_boxes[k, 3]) - + max(boxes[n, 1], query_boxes[k, 1]) + eps) + if ih > 0: + if mode == 'iou': + ua = ((boxes[n, 2] - boxes[n, 0] + eps) * + (boxes[n, 3] - boxes[n, 1] + eps) + box_area - + iw * ih) + else: + ua = ((boxes[n, 2] - boxes[n, 0] + eps) * + (boxes[n, 3] - boxes[n, 1] + eps)) + overlaps[n, k] = iw * ih / ua + return overlaps + + +def projection_matrix_to_CRT_kitti(proj): + """Split projection matrix of kitti. + + P = C @ [R|T] + C is upper triangular matrix, so we need to inverse CR and use QR + stable for all kitti camera projection matrix. + + Args: + proj (p.array, shape=[4, 4]): Intrinsics of camera. + + Returns: + tuple[np.ndarray]: Splited matrix of C, R and T. + """ + + CR = proj[0:3, 0:3] + CT = proj[0:3, 3] + RinvCinv = np.linalg.inv(CR) + Rinv, Cinv = np.linalg.qr(RinvCinv) + C = np.linalg.inv(Cinv) + R = np.linalg.inv(Rinv) + T = Cinv @ CT + return C, R, T + + +def remove_outside_points(points, rect, Trv2c, P2, image_shape): + """Remove points which are outside of image. + + Args: + points (np.ndarray, shape=[N, 3+dims]): Total points. + rect (np.ndarray, shape=[4, 4]): Matrix to project points in + specific camera coordinate (e.g. CAM2) to CAM0. + Trv2c (np.ndarray, shape=[4, 4]): Matrix to project points in + camera coordinate to lidar coordinate. + P2 (p.array, shape=[4, 4]): Intrinsics of Camera2. + image_shape (list[int]): Shape of image. + + Returns: + np.ndarray, shape=[N, 3+dims]: Filtered points. + """ + # 5x faster than remove_outside_points_v1(2ms vs 10ms) + C, R, T = projection_matrix_to_CRT_kitti(P2) + image_bbox = [0, 0, image_shape[1], image_shape[0]] + frustum = get_frustum(image_bbox, C) + frustum -= T + frustum = np.linalg.inv(R) @ frustum.T + frustum = camera_to_lidar(frustum.T, rect, Trv2c) + frustum_surfaces = corner_to_surfaces_3d_jit(frustum[np.newaxis, ...]) + indices = points_in_convex_polygon_3d_jit(points[:, :3], frustum_surfaces) + points = points[indices.reshape([-1])] + return points + + +def get_frustum(bbox_image, C, near_clip=0.001, far_clip=100): + """Get frustum corners in camera coordinates. + + Args: + bbox_image (list[int]): box in image coordinates. + C (np.ndarray): Intrinsics. + near_clip (float, optional): Nearest distance of frustum. + Defaults to 0.001. + far_clip (float, optional): Farthest distance of frustum. + Defaults to 100. + + Returns: + np.ndarray, shape=[8, 3]: coordinates of frustum corners. + """ + fku = C[0, 0] + fkv = -C[1, 1] + u0v0 = C[0:2, 2] + z_points = np.array( + [near_clip] * 4 + [far_clip] * 4, dtype=C.dtype)[:, np.newaxis] + b = bbox_image + box_corners = np.array( + [[b[0], b[1]], [b[0], b[3]], [b[2], b[3]], [b[2], b[1]]], + dtype=C.dtype) + near_box_corners = (box_corners - u0v0) / np.array( + [fku / near_clip, -fkv / near_clip], dtype=C.dtype) + far_box_corners = (box_corners - u0v0) / np.array( + [fku / far_clip, -fkv / far_clip], dtype=C.dtype) + ret_xy = np.concatenate([near_box_corners, far_box_corners], + axis=0) # [8, 2] + ret_xyz = np.concatenate([ret_xy, z_points], axis=1) + return ret_xyz + + +def surface_equ_3d(polygon_surfaces): + """ + + Args: + polygon_surfaces (np.ndarray): Polygon surfaces with shape of + [num_polygon, max_num_surfaces, max_num_points_of_surface, 3]. + All surfaces' normal vector must direct to internal. + Max_num_points_of_surface must at least 3. + + Returns: + tuple: normal vector and its direction. + """ + # return [a, b, c], d in ax+by+cz+d=0 + # polygon_surfaces: [num_polygon, num_surfaces, num_points_of_polygon, 3] + surface_vec = polygon_surfaces[:, :, :2, :] - \ + polygon_surfaces[:, :, 1:3, :] + # normal_vec: [..., 3] + normal_vec = np.cross(surface_vec[:, :, 0, :], surface_vec[:, :, 1, :]) + # print(normal_vec.shape, points[..., 0, :].shape) + # d = -np.inner(normal_vec, points[..., 0, :]) + d = np.einsum('aij, aij->ai', normal_vec, polygon_surfaces[:, :, 0, :]) + return normal_vec, -d + + +@numba.njit +def _points_in_convex_polygon_3d_jit(points, polygon_surfaces, normal_vec, d, + num_surfaces): + """ + Args: + points (np.ndarray): Input points with shape of (num_points, 3). + polygon_surfaces (np.ndarray): Polygon surfaces with shape of + (num_polygon, max_num_surfaces, max_num_points_of_surface, 3). + All surfaces' normal vector must direct to internal. + Max_num_points_of_surface must at least 3. + normal_vec (np.ndarray): Normal vector of polygon_surfaces. + d (int): Directions of normal vector. + num_surfaces (np.ndarray): Number of surfaces a polygon contains + shape of (num_polygon). + + Returns: + np.ndarray: Result matrix with the shape of [num_points, num_polygon]. + """ + max_num_surfaces, max_num_points_of_surface = polygon_surfaces.shape[1:3] + num_points = points.shape[0] + num_polygons = polygon_surfaces.shape[0] + ret = np.ones((num_points, num_polygons), dtype=np.bool_) + sign = 0.0 + for i in range(num_points): + for j in range(num_polygons): + for k in range(max_num_surfaces): + if k > num_surfaces[j]: + break + sign = ( + points[i, 0] * normal_vec[j, k, 0] + + points[i, 1] * normal_vec[j, k, 1] + + points[i, 2] * normal_vec[j, k, 2] + d[j, k]) + if sign >= 0: + ret[i, j] = False + break + return ret + + +def points_in_convex_polygon_3d_jit(points, + polygon_surfaces, + num_surfaces=None): + """Check points is in 3d convex polygons. + + Args: + points (np.ndarray): Input points with shape of (num_points, 3). + polygon_surfaces (np.ndarray): Polygon surfaces with shape of + (num_polygon, max_num_surfaces, max_num_points_of_surface, 3). + All surfaces' normal vector must direct to internal. + Max_num_points_of_surface must at least 3. + num_surfaces (np.ndarray, optional): Number of surfaces a polygon + contains shape of (num_polygon). Defaults to None. + + Returns: + np.ndarray: Result matrix with the shape of [num_points, num_polygon]. + """ + max_num_surfaces, max_num_points_of_surface = polygon_surfaces.shape[1:3] + # num_points = points.shape[0] + num_polygons = polygon_surfaces.shape[0] + if num_surfaces is None: + num_surfaces = np.full((num_polygons, ), 9999999, dtype=np.int64) + normal_vec, d = surface_equ_3d(polygon_surfaces[:, :, :3, :]) + # normal_vec: [num_polygon, max_num_surfaces, 3] + # d: [num_polygon, max_num_surfaces] + return _points_in_convex_polygon_3d_jit(points, polygon_surfaces, + normal_vec, d, num_surfaces) + + +@numba.jit +def points_in_convex_polygon_jit(points, polygon, clockwise=True): + """Check points is in 2d convex polygons. True when point in polygon. + + Args: + points (np.ndarray): Input points with the shape of [num_points, 2]. + polygon (np.ndarray): Input polygon with the shape of + [num_polygon, num_points_of_polygon, 2]. + clockwise (bool, optional): Indicate polygon is clockwise. Defaults + to True. + + Returns: + np.ndarray: Result matrix with the shape of [num_points, num_polygon]. + """ + # first convert polygon to directed lines + num_points_of_polygon = polygon.shape[1] + num_points = points.shape[0] + num_polygons = polygon.shape[0] + # if clockwise: + # vec1 = polygon - polygon[:, [num_points_of_polygon - 1] + + # list(range(num_points_of_polygon - 1)), :] + # else: + # vec1 = polygon[:, [num_points_of_polygon - 1] + + # list(range(num_points_of_polygon - 1)), :] - polygon + # vec1: [num_polygon, num_points_of_polygon, 2] + vec1 = np.zeros((2), dtype=polygon.dtype) + ret = np.zeros((num_points, num_polygons), dtype=np.bool_) + success = True + cross = 0.0 + for i in range(num_points): + for j in range(num_polygons): + success = True + for k in range(num_points_of_polygon): + if clockwise: + vec1 = polygon[j, k] - polygon[j, k - 1] + else: + vec1 = polygon[j, k - 1] - polygon[j, k] + cross = vec1[1] * (polygon[j, k, 0] - points[i, 0]) + cross -= vec1[0] * (polygon[j, k, 1] - points[i, 1]) + if cross >= 0: + success = False + break + ret[i, j] = success + return ret + + +def boxes3d_to_corners3d_lidar(boxes3d, bottom_center=True): + """Convert kitti center boxes to corners. + + 7 -------- 4 + /| /| + 6 -------- 5 . + | | | | + . 3 -------- 0 + |/ |/ + 2 -------- 1 + + Args: + boxes3d (np.ndarray): Boxes with shape of (N, 7) + [x, y, z, w, l, h, ry] in LiDAR coords, see the definition of ry + in KITTI dataset. + bottom_center (bool, optional): Whether z is on the bottom center + of object. Defaults to True. + + Returns: + np.ndarray: Box corners with the shape of [N, 8, 3]. + """ + boxes_num = boxes3d.shape[0] + w, l, h = boxes3d[:, 3], boxes3d[:, 4], boxes3d[:, 5] + x_corners = np.array( + [w / 2., -w / 2., -w / 2., w / 2., w / 2., -w / 2., -w / 2., w / 2.], + dtype=np.float32).T + y_corners = np.array( + [-l / 2., -l / 2., l / 2., l / 2., -l / 2., -l / 2., l / 2., l / 2.], + dtype=np.float32).T + if bottom_center: + z_corners = np.zeros((boxes_num, 8), dtype=np.float32) + z_corners[:, 4:8] = h.reshape(boxes_num, 1).repeat(4, axis=1) # (N, 8) + else: + z_corners = np.array([ + -h / 2., -h / 2., -h / 2., -h / 2., h / 2., h / 2., h / 2., h / 2. + ], + dtype=np.float32).T + + ry = boxes3d[:, 6] + zeros, ones = np.zeros( + ry.size, dtype=np.float32), np.ones( + ry.size, dtype=np.float32) + rot_list = np.array([[np.cos(ry), -np.sin(ry), zeros], + [np.sin(ry), np.cos(ry), zeros], [zeros, zeros, + ones]]) # (3, 3, N) + R_list = np.transpose(rot_list, (2, 0, 1)) # (N, 3, 3) + + temp_corners = np.concatenate((x_corners.reshape( + -1, 8, 1), y_corners.reshape(-1, 8, 1), z_corners.reshape(-1, 8, 1)), + axis=2) # (N, 8, 3) + rotated_corners = np.matmul(temp_corners, R_list) # (N, 8, 3) + x_corners = rotated_corners[:, :, 0] + y_corners = rotated_corners[:, :, 1] + z_corners = rotated_corners[:, :, 2] + + x_loc, y_loc, z_loc = boxes3d[:, 0], boxes3d[:, 1], boxes3d[:, 2] + + x = x_loc.reshape(-1, 1) + x_corners.reshape(-1, 8) + y = y_loc.reshape(-1, 1) + y_corners.reshape(-1, 8) + z = z_loc.reshape(-1, 1) + z_corners.reshape(-1, 8) + + corners = np.concatenate( + (x.reshape(-1, 8, 1), y.reshape(-1, 8, 1), z.reshape(-1, 8, 1)), + axis=2) + + return corners.astype(np.float32) diff --git a/mmcv/core/bbox/builder.py b/mmcv/core/bbox/builder.py new file mode 100644 index 0000000..682683b --- /dev/null +++ b/mmcv/core/bbox/builder.py @@ -0,0 +1,20 @@ +from mmcv.utils import Registry, build_from_cfg + +BBOX_ASSIGNERS = Registry('bbox_assigner') +BBOX_SAMPLERS = Registry('bbox_sampler') +BBOX_CODERS = Registry('bbox_coder') + + +def build_assigner(cfg, **default_args): + """Builder of box assigner.""" + return build_from_cfg(cfg, BBOX_ASSIGNERS, default_args) + + +def build_sampler(cfg, **default_args): + """Builder of box sampler.""" + return build_from_cfg(cfg, BBOX_SAMPLERS, default_args) + + +def build_bbox_coder(cfg, **default_args): + """Builder of box coder.""" + return build_from_cfg(cfg, BBOX_CODERS, default_args) diff --git a/mmcv/core/bbox/coder/__init__.py b/mmcv/core/bbox/coder/__init__.py new file mode 100644 index 0000000..ab2e6be --- /dev/null +++ b/mmcv/core/bbox/coder/__init__.py @@ -0,0 +1,11 @@ +from .nms_free_coder import NMSFreeCoder +from .detr3d_track_coder import DETRTrack3DCoder +from mmcv.core.bbox import build_bbox_coder +from .fut_nms_free_coder import CustomNMSFreeCoder +from .map_nms_free_coder import MapNMSFreeCoder + +__all__ = [ + 'build_bbox_coder', + 'NMSFreeCoder', 'DETRTrack3DCoder', + 'CustomNMSFreeCoder','MapNMSFreeCoder' +] diff --git a/mmcv/core/bbox/coder/base_bbox_coder.py b/mmcv/core/bbox/coder/base_bbox_coder.py new file mode 100644 index 0000000..cf0b34c --- /dev/null +++ b/mmcv/core/bbox/coder/base_bbox_coder.py @@ -0,0 +1,17 @@ +from abc import ABCMeta, abstractmethod + + +class BaseBBoxCoder(metaclass=ABCMeta): + """Base bounding box coder.""" + + def __init__(self, **kwargs): + pass + + @abstractmethod + def encode(self, bboxes, gt_bboxes): + """Encode deltas between bboxes and ground truth boxes.""" + + @abstractmethod + def decode(self, bboxes, bboxes_pred): + """Decode the predicted bboxes according to prediction and base + boxes.""" diff --git a/mmcv/core/bbox/coder/detr3d_track_coder.py b/mmcv/core/bbox/coder/detr3d_track_coder.py new file mode 100755 index 0000000..1c0e017 --- /dev/null +++ b/mmcv/core/bbox/coder/detr3d_track_coder.py @@ -0,0 +1,156 @@ +import torch + +from mmcv.core.bbox.coder.base_bbox_coder import BaseBBoxCoder +from mmcv.core.bbox.builder import BBOX_CODERS +from ..util import normalize_bbox, denormalize_bbox +from ..structures.utils import xywhr2xyxyr +from mmcv.ops.iou3d import nms_bev + +@BBOX_CODERS.register_module() +class DETRTrack3DCoder(BaseBBoxCoder): + """Bbox coder for DETR3D. + Args: + pc_range (list[float]): Range of point cloud. + post_center_range (list[float]): Limit of the center. + Default: None. + max_num (int): Max number to be kept. Default: 100. + score_threshold (float): Threshold to filter boxes based on score. + Default: None. + code_size (int): Code size of bboxes. Default: 9 + """ + + def __init__(self, + pc_range, + post_center_range=None, + max_num=100, + score_threshold=0.2, + num_classes=7, + with_nms=False, + iou_thres=0.3): + + self.pc_range = pc_range + self.post_center_range = post_center_range + self.max_num = max_num + self.score_threshold = score_threshold + self.num_classes = num_classes + self.with_nms = with_nms + self.nms_iou_thres = iou_thres + + def encode(self): + pass + + def decode_single(self, cls_scores, bbox_preds, + track_scores, obj_idxes, with_mask=True, img_metas=None): + """Decode bboxes. + Args: + cls_scores (Tensor): Outputs from the classification head, \ + shape [num_query, cls_out_channels]. Note \ + cls_out_channels should includes background. + bbox_preds (Tensor): Outputs from the regression \ + head with normalized coordinate format (cx, cy, w, l, cz, h, rot_sine, rot_cosine, vx, vy). \ + Shape [num_query, 9]. + + Returns: + list[dict]: Decoded boxes. + """ + max_num = self.max_num + max_num = min(cls_scores.size(0), self.max_num) + + cls_scores = cls_scores.sigmoid() + _, indexs = cls_scores.max(dim=-1) + labels = indexs % self.num_classes + + _, bbox_index = track_scores.topk(max_num) + + labels = labels[bbox_index] + bbox_preds = bbox_preds[bbox_index] + track_scores = track_scores[bbox_index] + obj_idxes = obj_idxes[bbox_index] + + scores = track_scores + + final_box_preds = denormalize_bbox(bbox_preds, self.pc_range) + final_scores = track_scores + final_preds = labels + + # use score threshold + if self.score_threshold is not None: + thresh_mask = final_scores > self.score_threshold + + if self.with_nms: + boxes_for_nms = xywhr2xyxyr(img_metas[0]['box_type_3d'](final_box_preds[:, :], 9).bev) + nms_mask = boxes_for_nms.new_zeros(boxes_for_nms.shape[0]) > 0 + # print(self.nms_iou_thres) + try: + selected = nms_bev( + boxes_for_nms, + final_scores, + thresh=self.nms_iou_thres) + nms_mask[selected] = True + except: + print('Error', boxes_for_nms, final_scores) + nms_mask = boxes_for_nms.new_ones(boxes_for_nms.shape[0]) > 0 + if self.post_center_range is not None: + self.post_center_range = torch.tensor( + self.post_center_range, device=scores.device) + mask = (final_box_preds[..., :3] >= + self.post_center_range[:3]).all(1) + mask &= (final_box_preds[..., :3] <= + self.post_center_range[3:]).all(1) + + if self.score_threshold: + mask &= thresh_mask + if not with_mask: + mask = torch.ones_like(mask) > 0 + if self.with_nms: + mask &= nms_mask + + boxes3d = final_box_preds[mask] + scores = final_scores[mask] + labels = final_preds[mask] + track_scores = track_scores[mask] + obj_idxes = obj_idxes[mask] + predictions_dict = { + 'bboxes': boxes3d, + 'scores': scores, + 'labels': labels, + 'track_scores': track_scores, + 'obj_idxes': obj_idxes, + 'bbox_index': bbox_index, + 'mask': mask + } + + else: + raise NotImplementedError( + 'Need to reorganize output as a batch, only ' + 'support post_center_range is not None for now!') + return predictions_dict + + def decode(self, preds_dicts, with_mask=True, img_metas=None): + """Decode bboxes. + Args: + cls_scores (Tensor): Outputs from the classification head, \ + shape [nb_dec, bs, num_query, cls_out_channels]. Note \ + cls_out_channels should includes background. + Note: before sigmoid! + bbox_preds (Tensor): Sigmoid outputs from the regression \ + head with normalized coordinate format (cx, cy, w, l, cz, h, rot_sine, rot_cosine, vx, vy). \ + Shape [nb_dec, bs, num_query, 9]. + + Returns: + list[dict]: Decoded boxes. + """ + all_cls_scores = preds_dicts['cls_scores'] + all_bbox_preds = preds_dicts['bbox_preds'] + track_scores = preds_dicts['track_scores'] + obj_idxes = preds_dicts['obj_idxes'] + + batch_size = all_cls_scores.size()[0] + predictions_list = [] + # bs size = 1 + predictions_list.append(self.decode_single( + all_cls_scores, all_bbox_preds, + track_scores, obj_idxes, with_mask, img_metas)) + #for i in range(batch_size): + # predictions_list.append(self.decode_single(all_cls_scores[i], all_bbox_preds[i])) + return predictions_list diff --git a/mmcv/core/bbox/coder/fut_nms_free_coder.py b/mmcv/core/bbox/coder/fut_nms_free_coder.py new file mode 100644 index 0000000..b8a8a95 --- /dev/null +++ b/mmcv/core/bbox/coder/fut_nms_free_coder.py @@ -0,0 +1,127 @@ +import torch + +from mmcv.core.bbox.coder.base_bbox_coder import BaseBBoxCoder +from mmcv.core.bbox.builder import BBOX_CODERS +from mmcv.core.bbox.util import denormalize_bbox +import numpy as np + + +@BBOX_CODERS.register_module() +class CustomNMSFreeCoder(BaseBBoxCoder): + """Bbox coder for NMS-free detector. + Args: + pc_range (list[float]): Range of point cloud. + post_center_range (list[float]): Limit of the center. + Default: None. + max_num (int): Max number to be kept. Default: 100. + score_threshold (float): Threshold to filter boxes based on score. + Default: None. + code_size (int): Code size of bboxes. Default: 9 + """ + + def __init__(self, + pc_range, + voxel_size=None, + post_center_range=None, + max_num=100, + score_threshold=None, + num_classes=10): + self.pc_range = pc_range + self.voxel_size = voxel_size + self.post_center_range = post_center_range + self.max_num = max_num + self.score_threshold = score_threshold + self.num_classes = num_classes + + def encode(self): + + pass + + def decode_single(self, cls_scores, bbox_preds, traj_preds): + """Decode bboxes. + Args: + cls_scores (Tensor): Outputs from the classification head, \ + shape [num_query, cls_out_channels]. Note \ + cls_out_channels should includes background. + bbox_preds (Tensor): Outputs from the regression \ + head with normalized coordinate format (cx, cy, w, l, cz, h, rot_sine, rot_cosine, vx, vy). \ + Shape [num_query, 9]. + Returns: + list[dict]: Decoded boxes. + """ + max_num = self.max_num + + cls_scores = cls_scores.sigmoid() + scores, indexs = cls_scores.view(-1).topk(max_num) + labels = indexs % self.num_classes + bbox_index = indexs // self.num_classes + bbox_preds = bbox_preds[bbox_index] + traj_preds = traj_preds[bbox_index] + + final_box_preds = denormalize_bbox(bbox_preds, self.pc_range) + final_scores = scores + final_preds = labels + final_traj_preds = traj_preds + + # use score threshold + if self.score_threshold is not None: + thresh_mask = final_scores > self.score_threshold + tmp_score = self.score_threshold + while thresh_mask.sum() == 0: + tmp_score *= 0.9 + if tmp_score < 0.01: + thresh_mask = final_scores > -1 + break + thresh_mask = final_scores >= tmp_score + + if self.post_center_range is not None: + self.post_center_range = torch.tensor( + self.post_center_range, device=scores.device) + mask = (final_box_preds[..., :3] >= + self.post_center_range[:3]).all(1) + mask &= (final_box_preds[..., :3] <= + self.post_center_range[3:]).all(1) + + if self.score_threshold: + mask &= thresh_mask + + boxes3d = final_box_preds[mask] + scores = final_scores[mask] + labels = final_preds[mask] + trajs = final_traj_preds[mask] + + predictions_dict = { + 'bboxes': boxes3d, + 'scores': scores, + 'labels': labels, + 'trajs': trajs + } + + else: + raise NotImplementedError( + 'Need to reorganize output as a batch, only ' + 'support post_center_range is not None for now!') + return predictions_dict + + def decode(self, preds_dicts): + """Decode bboxes. + Args: + all_cls_scores (Tensor): Outputs from the classification head, \ + shape [nb_dec, bs, num_query, cls_out_channels]. Note \ + cls_out_channels should includes background. + all_bbox_preds (Tensor): Sigmoid outputs from the regression \ + head with normalized coordinate format (cx, cy, w, l, cz, h, rot_sine, rot_cosine, vx, vy). \ + Shape [nb_dec, bs, num_query, 9]. + Returns: + list[dict]: Decoded boxes. + """ + all_cls_scores = preds_dicts['all_cls_scores'][-1] + all_bbox_preds = preds_dicts['all_bbox_preds'][-1] + all_traj_preds = preds_dicts['all_traj_preds'][-1] + + batch_size = all_cls_scores.size()[0] + predictions_list = [] + for i in range(batch_size): + predictions_list.append(self.decode_single(all_cls_scores[i], all_bbox_preds[i], all_traj_preds[i])) + return predictions_list + diff --git a/mmcv/core/bbox/coder/map_nms_free_coder.py b/mmcv/core/bbox/coder/map_nms_free_coder.py new file mode 100644 index 0000000..f20d300 --- /dev/null +++ b/mmcv/core/bbox/coder/map_nms_free_coder.py @@ -0,0 +1,126 @@ +import torch + +from mmcv.core.bbox.coder.base_bbox_coder import BaseBBoxCoder +from mmcv.core.bbox.builder import BBOX_CODERS +from mmcv.models.vad_utils.map_utils import ( + denormalize_2d_pts, denormalize_2d_bbox +) + + +@BBOX_CODERS.register_module() +class MapNMSFreeCoder(BaseBBoxCoder): + """Bbox coder for NMS-free detector. + Args: + pc_range (list[float]): Range of point cloud. + post_center_range (list[float]): Limit of the center. + Default: None. + max_num (int): Max number to be kept. Default: 100. + score_threshold (float): Threshold to filter boxes based on score. + Default: None. + code_size (int): Code size of bboxes. Default: 9 + """ + + def __init__(self, + pc_range, + voxel_size=None, + post_center_range=None, + max_num=100, + score_threshold=None, + num_classes=10): + self.pc_range = pc_range + self.voxel_size = voxel_size + self.post_center_range = post_center_range + self.max_num = max_num + self.score_threshold = score_threshold + self.num_classes = num_classes + + def encode(self): + + pass + + def decode_single(self, cls_scores, bbox_preds, pts_preds): + """Decode bboxes. + Args: + cls_scores (Tensor): Outputs from the classification head, \ + shape [num_query, cls_out_channels]. Note \ + cls_out_channels should includes background. + bbox_preds (Tensor): Outputs from the regression \ + head with normalized coordinate format (cx, cy, w, l, cz, h, rot_sine, rot_cosine, vx, vy). \ + Shape [num_query, 9]. + pts_preds (Tensor): + Shape [num_query, fixed_num_pts, 2] + Returns: + list[dict]: Decoded boxes. + """ + max_num = self.max_num + + cls_scores = cls_scores.sigmoid() + scores, indexs = cls_scores.view(-1).topk(max_num) + labels = indexs % self.num_classes + bbox_index = indexs // self.num_classes + bbox_preds = bbox_preds[bbox_index] + pts_preds = pts_preds[bbox_index] + + final_box_preds = denormalize_2d_bbox(bbox_preds, self.pc_range) + final_pts_preds = denormalize_2d_pts(pts_preds, self.pc_range) #num_q,num_p,2 + # final_box_preds = bbox_preds + final_scores = scores + final_preds = labels + + # use score threshold + if self.score_threshold is not None: + thresh_mask = final_scores > self.score_threshold + tmp_score = self.score_threshold + while thresh_mask.sum() == 0: + tmp_score *= 0.9 + if tmp_score < 0.01: + thresh_mask = final_scores > -1 + break + thresh_mask = final_scores >= tmp_score + + if self.post_center_range is not None: + self.post_center_range = torch.tensor( + self.post_center_range, device=scores.device) + mask = (final_box_preds[..., :4] >= self.post_center_range[:4]).all(1) + mask &= (final_box_preds[..., :4] <= self.post_center_range[4:]).all(1) + + if self.score_threshold: + mask &= thresh_mask + + boxes3d = final_box_preds[mask] + scores = final_scores[mask] + pts = final_pts_preds[mask] + labels = final_preds[mask] + predictions_dict = { + 'map_bboxes': boxes3d, + 'map_scores': scores, + 'map_labels': labels, + 'map_pts': pts, + } + + else: + raise NotImplementedError( + 'Need to reorganize output as a batch, only ' + 'support post_center_range is not None for now!') + return predictions_dict + + def decode(self, preds_dicts): + """Decode bboxes. + Args: + all_cls_scores (Tensor): Outputs from the classification head, \ + shape [nb_dec, bs, num_query, cls_out_channels]. Note \ + cls_out_channels should includes background. + all_bbox_preds (Tensor): Sigmoid outputs from the regression \ + head with normalized coordinate format (cx, cy, w, l, cz, h, rot_sine, rot_cosine, vx, vy). \ + Shape [nb_dec, bs, num_query, 9]. + Returns: + list[dict]: Decoded boxes. + """ + all_cls_scores = preds_dicts['map_all_cls_scores'][-1] + all_bbox_preds = preds_dicts['map_all_bbox_preds'][-1] + all_pts_preds = preds_dicts['map_all_pts_preds'][-1] + batch_size = all_cls_scores.size()[0] + predictions_list = [] + for i in range(batch_size): + predictions_list.append(self.decode_single(all_cls_scores[i], all_bbox_preds[i],all_pts_preds[i])) + return predictions_list \ No newline at end of file diff --git a/mmcv/core/bbox/coder/nms_free_coder.py b/mmcv/core/bbox/coder/nms_free_coder.py new file mode 100755 index 0000000..95430bc --- /dev/null +++ b/mmcv/core/bbox/coder/nms_free_coder.py @@ -0,0 +1,124 @@ +import torch + +from mmcv.core.bbox.coder.base_bbox_coder import BaseBBoxCoder +from mmcv.core.bbox.builder import BBOX_CODERS +from mmcv.core.bbox.util import denormalize_bbox +import numpy as np + + +@BBOX_CODERS.register_module() +class NMSFreeCoder(BaseBBoxCoder): + """Bbox coder for NMS-free detector. + Args: + pc_range (list[float]): Range of point cloud. + post_center_range (list[float]): Limit of the center. + Default: None. + max_num (int): Max number to be kept. Default: 100. + score_threshold (float): Threshold to filter boxes based on score. + Default: None. + code_size (int): Code size of bboxes. Default: 9 + """ + + def __init__(self, + pc_range, + voxel_size=None, + post_center_range=None, + max_num=100, + score_threshold=None, + num_classes=10): + self.pc_range = pc_range + self.voxel_size = voxel_size + self.post_center_range = post_center_range + self.max_num = max_num + self.score_threshold = score_threshold + self.num_classes = num_classes + + def encode(self): + + pass + + def decode_single(self, cls_scores, bbox_preds): + """Decode bboxes. + Args: + cls_scores (Tensor): Outputs from the classification head, \ + shape [num_query, cls_out_channels]. Note \ + cls_out_channels should includes background. + bbox_preds (Tensor): Outputs from the regression \ + head with normalized coordinate format (cx, cy, w, l, cz, h, rot_sine, rot_cosine, vx, vy). \ + Shape [num_query, 9]. + Returns: + list[dict]: Decoded boxes. + """ + max_num = min(self.max_num, cls_scores.shape[0]) + + cls_scores = cls_scores.sigmoid() + scores, indexs = cls_scores.view(-1).topk(max_num) + labels = indexs % self.num_classes + bbox_index = indexs // self.num_classes + bbox_preds = bbox_preds[bbox_index] + + final_box_preds = denormalize_bbox(bbox_preds, self.pc_range) + final_scores = scores + final_preds = labels + + # use score threshold + if self.score_threshold is not None: + thresh_mask = final_scores > self.score_threshold + tmp_score = self.score_threshold + while thresh_mask.sum() == 0: + tmp_score *= 0.9 + if tmp_score < 0.01: + thresh_mask = final_scores > -1 + break + thresh_mask = final_scores >= tmp_score + + if self.post_center_range is not None: + self.post_center_range = torch.tensor( + self.post_center_range, device=scores.device) + mask = (final_box_preds[..., :3] >= + self.post_center_range[:3]).all(1) + mask &= (final_box_preds[..., :3] <= + self.post_center_range[3:]).all(1) + + if self.score_threshold: + mask &= thresh_mask + + boxes3d = final_box_preds[mask] + scores = final_scores[mask] + + labels = final_preds[mask] + predictions_dict = { + 'bboxes': boxes3d, + 'scores': scores, + 'labels': labels, + 'mask': mask, + 'bbox_index': bbox_index + } + + else: + raise NotImplementedError( + 'Need to reorganize output as a batch, only ' + 'support post_center_range is not None for now!') + return predictions_dict + + def decode(self, preds_dicts): + """Decode bboxes. + Args: + all_cls_scores (Tensor): Outputs from the classification head, \ + shape [nb_dec, bs, num_query, cls_out_channels]. Note \ + cls_out_channels should includes background. + all_bbox_preds (Tensor): Sigmoid outputs from the regression \ + head with normalized coordinate format (cx, cy, w, l, cz, h, rot_sine, rot_cosine, vx, vy). \ + Shape [nb_dec, bs, num_query, 9]. + Returns: + list[dict]: Decoded boxes. + """ + all_cls_scores = preds_dicts['all_cls_scores'][-1] + all_bbox_preds = preds_dicts['all_bbox_preds'][-1] + + batch_size = all_cls_scores.size()[0] + predictions_list = [] + for i in range(batch_size): + predictions_list.append(self.decode_single(all_cls_scores[i], all_bbox_preds[i])) + return predictions_list + diff --git a/mmcv/core/bbox/iou_calculators/__init__.py b/mmcv/core/bbox/iou_calculators/__init__.py new file mode 100644 index 0000000..3c13f41 --- /dev/null +++ b/mmcv/core/bbox/iou_calculators/__init__.py @@ -0,0 +1,11 @@ +from .builder import build_iou_calculator +from .iou2d_calculator import BboxOverlaps2D, bbox_overlaps +from .iou3d_calculator import (AxisAlignedBboxOverlaps3D, BboxOverlaps3D, + BboxOverlapsNearest3D, + axis_aligned_bbox_overlaps_3d, bbox_overlaps_3d, + bbox_overlaps_nearest_3d) + +__all__ = ['build_iou_calculator', 'BboxOverlaps2D', 'bbox_overlaps', + 'BboxOverlapsNearest3D', 'BboxOverlaps3D', 'bbox_overlaps_nearest_3d', + 'bbox_overlaps_3d', 'AxisAlignedBboxOverlaps3D', + 'axis_aligned_bbox_overlaps_3d'] diff --git a/mmcv/core/bbox/iou_calculators/builder.py b/mmcv/core/bbox/iou_calculators/builder.py new file mode 100644 index 0000000..09094d7 --- /dev/null +++ b/mmcv/core/bbox/iou_calculators/builder.py @@ -0,0 +1,8 @@ +from mmcv.utils import Registry, build_from_cfg + +IOU_CALCULATORS = Registry('IoU calculator') + + +def build_iou_calculator(cfg, default_args=None): + """Builder of IoU calculator.""" + return build_from_cfg(cfg, IOU_CALCULATORS, default_args) diff --git a/mmcv/core/bbox/iou_calculators/iou2d_calculator.py b/mmcv/core/bbox/iou_calculators/iou2d_calculator.py new file mode 100644 index 0000000..25f2b46 --- /dev/null +++ b/mmcv/core/bbox/iou_calculators/iou2d_calculator.py @@ -0,0 +1,260 @@ +import torch + +from .builder import IOU_CALCULATORS + + +def cast_tensor_type(x, scale=1., dtype=None): + if dtype == 'fp16': + # scale is for preventing overflows + x = (x / scale).half() + return x + + +def fp16_clamp(x, min=None, max=None): + if not x.is_cuda and x.dtype == torch.float16: + # clamp for cpu float16, tensor fp16 has no clamp implementation + return x.float().clamp(min, max).half() + + return x.clamp(min, max) + + +@IOU_CALCULATORS.register_module() +class BboxOverlaps2D: + """2D Overlaps (e.g. IoUs, GIoUs) Calculator.""" + + def __init__(self, scale=1., dtype=None): + self.scale = scale + self.dtype = dtype + + def __call__(self, bboxes1, bboxes2, mode='iou', is_aligned=False): + """Calculate IoU between 2D bboxes. + + Args: + bboxes1 (Tensor): bboxes have shape (m, 4) in + format, or shape (m, 5) in format. + bboxes2 (Tensor): bboxes have shape (m, 4) in + format, shape (m, 5) in format, or be + empty. If ``is_aligned `` is ``True``, then m and n must be + equal. + mode (str): "iou" (intersection over union), "iof" (intersection + over foreground), or "giou" (generalized intersection over + union). + is_aligned (bool, optional): If True, then m and n must be equal. + Default False. + + Returns: + Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,) + """ + assert bboxes1.size(-1) in [0, 4, 5] + assert bboxes2.size(-1) in [0, 4, 5] + if bboxes2.size(-1) == 5: + bboxes2 = bboxes2[..., :4] + if bboxes1.size(-1) == 5: + bboxes1 = bboxes1[..., :4] + + if self.dtype == 'fp16': + # change tensor type to save cpu and cuda memory and keep speed + bboxes1 = cast_tensor_type(bboxes1, self.scale, self.dtype) + bboxes2 = cast_tensor_type(bboxes2, self.scale, self.dtype) + overlaps = bbox_overlaps(bboxes1, bboxes2, mode, is_aligned) + if not overlaps.is_cuda and overlaps.dtype == torch.float16: + # resume cpu float32 + overlaps = overlaps.float() + return overlaps + + return bbox_overlaps(bboxes1, bboxes2, mode, is_aligned) + + def __repr__(self): + """str: a string describing the module""" + repr_str = self.__class__.__name__ + f'(' \ + f'scale={self.scale}, dtype={self.dtype})' + return repr_str + + +def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-6): + """Calculate overlap between two set of bboxes. + + FP16 Contributed by https://github.com/open-mmlab/mmdetection/pull/4889 + Note: + Assume bboxes1 is M x 4, bboxes2 is N x 4, when mode is 'iou', + there are some new generated variable when calculating IOU + using bbox_overlaps function: + + 1) is_aligned is False + area1: M x 1 + area2: N x 1 + lt: M x N x 2 + rb: M x N x 2 + wh: M x N x 2 + overlap: M x N x 1 + union: M x N x 1 + ious: M x N x 1 + + Total memory: + S = (9 x N x M + N + M) * 4 Byte, + + When using FP16, we can reduce: + R = (9 x N x M + N + M) * 4 / 2 Byte + R large than (N + M) * 4 * 2 is always true when N and M >= 1. + Obviously, N + M <= N * M < 3 * N * M, when N >=2 and M >=2, + N + 1 < 3 * N, when N or M is 1. + + Given M = 40 (ground truth), N = 400000 (three anchor boxes + in per grid, FPN, R-CNNs), + R = 275 MB (one times) + + A special case (dense detection), M = 512 (ground truth), + R = 3516 MB = 3.43 GB + + When the batch size is B, reduce: + B x R + + Therefore, CUDA memory runs out frequently. + + Experiments on GeForce RTX 2080Ti (11019 MiB): + + | dtype | M | N | Use | Real | Ideal | + |:----:|:----:|:----:|:----:|:----:|:----:| + | FP32 | 512 | 400000 | 8020 MiB | -- | -- | + | FP16 | 512 | 400000 | 4504 MiB | 3516 MiB | 3516 MiB | + | FP32 | 40 | 400000 | 1540 MiB | -- | -- | + | FP16 | 40 | 400000 | 1264 MiB | 276MiB | 275 MiB | + + 2) is_aligned is True + area1: N x 1 + area2: N x 1 + lt: N x 2 + rb: N x 2 + wh: N x 2 + overlap: N x 1 + union: N x 1 + ious: N x 1 + + Total memory: + S = 11 x N * 4 Byte + + When using FP16, we can reduce: + R = 11 x N * 4 / 2 Byte + + So do the 'giou' (large than 'iou'). + + Time-wise, FP16 is generally faster than FP32. + + When gpu_assign_thr is not -1, it takes more time on cpu + but not reduce memory. + There, we can reduce half the memory and keep the speed. + + If ``is_aligned `` is ``False``, then calculate the overlaps between each + bbox of bboxes1 and bboxes2, otherwise the overlaps between each aligned + pair of bboxes1 and bboxes2. + + Args: + bboxes1 (Tensor): shape (B, m, 4) in format or empty. + bboxes2 (Tensor): shape (B, n, 4) in format or empty. + B indicates the batch dim, in shape (B1, B2, ..., Bn). + If ``is_aligned `` is ``True``, then m and n must be equal. + mode (str): "iou" (intersection over union), "iof" (intersection over + foreground) or "giou" (generalized intersection over union). + Default "iou". + is_aligned (bool, optional): If True, then m and n must be equal. + Default False. + eps (float, optional): A value added to the denominator for numerical + stability. Default 1e-6. + + Returns: + Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,) + + Example: + >>> bboxes1 = torch.FloatTensor([ + >>> [0, 0, 10, 10], + >>> [10, 10, 20, 20], + >>> [32, 32, 38, 42], + >>> ]) + >>> bboxes2 = torch.FloatTensor([ + >>> [0, 0, 10, 20], + >>> [0, 10, 10, 19], + >>> [10, 10, 20, 20], + >>> ]) + >>> overlaps = bbox_overlaps(bboxes1, bboxes2) + >>> assert overlaps.shape == (3, 3) + >>> overlaps = bbox_overlaps(bboxes1, bboxes2, is_aligned=True) + >>> assert overlaps.shape == (3, ) + + Example: + >>> empty = torch.empty(0, 4) + >>> nonempty = torch.FloatTensor([[0, 0, 10, 9]]) + >>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1) + >>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0) + >>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0) + """ + + assert mode in ['iou', 'iof', 'giou'], f'Unsupported mode {mode}' + # Either the boxes are empty or the length of boxes' last dimension is 4 + assert (bboxes1.size(-1) == 4 or bboxes1.size(0) == 0) + assert (bboxes2.size(-1) == 4 or bboxes2.size(0) == 0) + + # Batch dim must be the same + # Batch dim: (B1, B2, ... Bn) + assert bboxes1.shape[:-2] == bboxes2.shape[:-2] + batch_shape = bboxes1.shape[:-2] + + rows = bboxes1.size(-2) + cols = bboxes2.size(-2) + if is_aligned: + assert rows == cols + + if rows * cols == 0: + if is_aligned: + return bboxes1.new(batch_shape + (rows, )) + else: + return bboxes1.new(batch_shape + (rows, cols)) + + area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * ( + bboxes1[..., 3] - bboxes1[..., 1]) + area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * ( + bboxes2[..., 3] - bboxes2[..., 1]) + + if is_aligned: + lt = torch.max(bboxes1[..., :2], bboxes2[..., :2]) # [B, rows, 2] + rb = torch.min(bboxes1[..., 2:], bboxes2[..., 2:]) # [B, rows, 2] + + wh = fp16_clamp(rb - lt, min=0) + overlap = wh[..., 0] * wh[..., 1] + + if mode in ['iou', 'giou']: + union = area1 + area2 - overlap + else: + union = area1 + if mode == 'giou': + enclosed_lt = torch.min(bboxes1[..., :2], bboxes2[..., :2]) + enclosed_rb = torch.max(bboxes1[..., 2:], bboxes2[..., 2:]) + else: + lt = torch.max(bboxes1[..., :, None, :2], + bboxes2[..., None, :, :2]) # [B, rows, cols, 2] + rb = torch.min(bboxes1[..., :, None, 2:], + bboxes2[..., None, :, 2:]) # [B, rows, cols, 2] + + wh = fp16_clamp(rb - lt, min=0) + overlap = wh[..., 0] * wh[..., 1] + + if mode in ['iou', 'giou']: + union = area1[..., None] + area2[..., None, :] - overlap + else: + union = area1[..., None] + if mode == 'giou': + enclosed_lt = torch.min(bboxes1[..., :, None, :2], + bboxes2[..., None, :, :2]) + enclosed_rb = torch.max(bboxes1[..., :, None, 2:], + bboxes2[..., None, :, 2:]) + + eps = union.new_tensor([eps]) + union = torch.max(union, eps) + ious = overlap / union + if mode in ['iou', 'iof']: + return ious + # calculate gious + enclose_wh = fp16_clamp(enclosed_rb - enclosed_lt, min=0) + enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1] + enclose_area = torch.max(enclose_area, eps) + gious = ious - (enclose_area - union) / enclose_area + return gious diff --git a/mmcv/core/bbox/iou_calculators/iou3d_calculator.py b/mmcv/core/bbox/iou_calculators/iou3d_calculator.py new file mode 100644 index 0000000..5bc00b4 --- /dev/null +++ b/mmcv/core/bbox/iou_calculators/iou3d_calculator.py @@ -0,0 +1,321 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from .iou2d_calculator import bbox_overlaps +from .builder import IOU_CALCULATORS +from ..structures.utils import get_box_type + + +@IOU_CALCULATORS.register_module() +class BboxOverlapsNearest3D(object): + """Nearest 3D IoU Calculator. + + Note: + This IoU calculator first finds the nearest 2D boxes in bird eye view + (BEV), and then calculates the 2D IoU using :meth:`bbox_overlaps`. + + Args: + coordinate (str): 'camera', 'lidar', or 'depth' coordinate system. + """ + + def __init__(self, coordinate='lidar'): + assert coordinate in ['camera', 'lidar', 'depth'] + self.coordinate = coordinate + + def __call__(self, bboxes1, bboxes2, mode='iou', is_aligned=False): + """Calculate nearest 3D IoU. + + Note: + If ``is_aligned`` is ``False``, then it calculates the ious between + each bbox of bboxes1 and bboxes2, otherwise it calculates the ious + between each aligned pair of bboxes1 and bboxes2. + + Args: + bboxes1 (torch.Tensor): shape (N, 7+N) [x, y, z, h, w, l, ry, v]. + bboxes2 (torch.Tensor): shape (M, 7+N) [x, y, z, h, w, l, ry, v]. + mode (str): "iou" (intersection over union) or iof + (intersection over foreground). + is_aligned (bool): Whether the calculation is aligned. + + Return: + torch.Tensor: If ``is_aligned`` is ``True``, return ious between \ + bboxes1 and bboxes2 with shape (M, N). If ``is_aligned`` is \ + ``False``, return shape is M. + """ + return bbox_overlaps_nearest_3d(bboxes1, bboxes2, mode, is_aligned, + self.coordinate) + + def __repr__(self): + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + repr_str += f'(coordinate={self.coordinate}' + return repr_str + + +@IOU_CALCULATORS.register_module() +class BboxOverlaps3D(object): + """3D IoU Calculator. + + Args: + coordinate (str): The coordinate system, valid options are + 'camera', 'lidar', and 'depth'. + """ + + def __init__(self, coordinate): + assert coordinate in ['camera', 'lidar', 'depth'] + self.coordinate = coordinate + + def __call__(self, bboxes1, bboxes2, mode='iou'): + """Calculate 3D IoU using cuda implementation. + + Note: + This function calculate the IoU of 3D boxes based on their volumes. + IoU calculator ``:class:BboxOverlaps3D`` uses this function to + calculate the actual 3D IoUs of boxes. + + Args: + bboxes1 (torch.Tensor): shape (N, 7+C) [x, y, z, h, w, l, ry]. + bboxes2 (torch.Tensor): shape (M, 7+C) [x, y, z, h, w, l, ry]. + mode (str): "iou" (intersection over union) or + iof (intersection over foreground). + + Return: + torch.Tensor: Bbox overlaps results of bboxes1 and bboxes2 \ + with shape (M, N) (aligned mode is not supported currently). + """ + return bbox_overlaps_3d(bboxes1, bboxes2, mode, self.coordinate) + + def __repr__(self): + """str: return a string that describes the module""" + repr_str = self.__class__.__name__ + repr_str += f'(coordinate={self.coordinate}' + return repr_str + + +def bbox_overlaps_nearest_3d(bboxes1, + bboxes2, + mode='iou', + is_aligned=False, + coordinate='lidar'): + """Calculate nearest 3D IoU. + + Note: + This function first finds the nearest 2D boxes in bird eye view + (BEV), and then calculates the 2D IoU using :meth:`bbox_overlaps`. + Ths IoU calculator :class:`BboxOverlapsNearest3D` uses this + function to calculate IoUs of boxes. + + If ``is_aligned`` is ``False``, then it calculates the ious between + each bbox of bboxes1 and bboxes2, otherwise the ious between each + aligned pair of bboxes1 and bboxes2. + + Args: + bboxes1 (torch.Tensor): shape (N, 7+C) [x, y, z, h, w, l, ry, v]. + bboxes2 (torch.Tensor): shape (M, 7+C) [x, y, z, h, w, l, ry, v]. + mode (str): "iou" (intersection over union) or iof + (intersection over foreground). + is_aligned (bool): Whether the calculation is aligned + + Return: + torch.Tensor: If ``is_aligned`` is ``True``, return ious between \ + bboxes1 and bboxes2 with shape (M, N). If ``is_aligned`` is \ + ``False``, return shape is M. + """ + assert bboxes1.size(-1) == bboxes2.size(-1) >= 7 + + box_type, _ = get_box_type(coordinate) + + bboxes1 = box_type(bboxes1, box_dim=bboxes1.shape[-1]) + bboxes2 = box_type(bboxes2, box_dim=bboxes2.shape[-1]) + + # Change the bboxes to bev + # box conversion and iou calculation in torch version on CUDA + # is 10x faster than that in numpy version + bboxes1_bev = bboxes1.nearest_bev + bboxes2_bev = bboxes2.nearest_bev + + ret = bbox_overlaps( + bboxes1_bev, bboxes2_bev, mode=mode, is_aligned=is_aligned) + return ret + + +def bbox_overlaps_3d(bboxes1, bboxes2, mode='iou', coordinate='camera'): + """Calculate 3D IoU using cuda implementation. + + Note: + This function calculates the IoU of 3D boxes based on their volumes. + IoU calculator :class:`BboxOverlaps3D` uses this function to + calculate the actual IoUs of boxes. + + Args: + bboxes1 (torch.Tensor): shape (N, 7+C) [x, y, z, h, w, l, ry]. + bboxes2 (torch.Tensor): shape (M, 7+C) [x, y, z, h, w, l, ry]. + mode (str): "iou" (intersection over union) or + iof (intersection over foreground). + coordinate (str): 'camera' or 'lidar' coordinate system. + + Return: + torch.Tensor: Bbox overlaps results of bboxes1 and bboxes2 \ + with shape (M, N) (aligned mode is not supported currently). + """ + assert bboxes1.size(-1) == bboxes2.size(-1) >= 7 + + box_type, _ = get_box_type(coordinate) + + bboxes1 = box_type(bboxes1, box_dim=bboxes1.shape[-1]) + bboxes2 = box_type(bboxes2, box_dim=bboxes2.shape[-1]) + + return bboxes1.overlaps(bboxes1, bboxes2, mode=mode) + + +@IOU_CALCULATORS.register_module() +class AxisAlignedBboxOverlaps3D(object): + """Axis-aligned 3D Overlaps (IoU) Calculator.""" + + def __call__(self, bboxes1, bboxes2, mode='iou', is_aligned=False): + """Calculate IoU between 2D bboxes. + + Args: + bboxes1 (Tensor): shape (B, m, 6) in + format or empty. + bboxes2 (Tensor): shape (B, n, 6) in + format or empty. + B indicates the batch dim, in shape (B1, B2, ..., Bn). + If ``is_aligned`` is ``True``, then m and n must be equal. + mode (str): "iou" (intersection over union) or "giou" (generalized + intersection over union). + is_aligned (bool, optional): If True, then m and n must be equal. + Default False. + Returns: + Tensor: shape (m, n) if ``is_aligned`` is False else shape (m,) + """ + assert bboxes1.size(-1) == bboxes2.size(-1) == 6 + return axis_aligned_bbox_overlaps_3d(bboxes1, bboxes2, mode, + is_aligned) + + def __repr__(self): + """str: a string describing the module""" + repr_str = self.__class__.__name__ + '()' + return repr_str + + +def axis_aligned_bbox_overlaps_3d(bboxes1, + bboxes2, + mode='iou', + is_aligned=False, + eps=1e-6): + """Calculate overlap between two set of axis aligned 3D bboxes. If + ``is_aligned`` is ``False``, then calculate the overlaps between each bbox + of bboxes1 and bboxes2, otherwise the overlaps between each aligned pair of + bboxes1 and bboxes2. + + Args: + bboxes1 (Tensor): shape (B, m, 6) in + format or empty. + bboxes2 (Tensor): shape (B, n, 6) in + format or empty. + B indicates the batch dim, in shape (B1, B2, ..., Bn). + If ``is_aligned`` is ``True``, then m and n must be equal. + mode (str): "iou" (intersection over union) or "giou" (generalized + intersection over union). + is_aligned (bool, optional): If True, then m and n must be equal. + Default False. + eps (float, optional): A value added to the denominator for numerical + stability. Default 1e-6. + + Returns: + Tensor: shape (m, n) if ``is_aligned`` is False else shape (m,) + + Example: + >>> bboxes1 = torch.FloatTensor([ + >>> [0, 0, 0, 10, 10, 10], + >>> [10, 10, 10, 20, 20, 20], + >>> [32, 32, 32, 38, 40, 42], + >>> ]) + >>> bboxes2 = torch.FloatTensor([ + >>> [0, 0, 0, 10, 20, 20], + >>> [0, 10, 10, 10, 19, 20], + >>> [10, 10, 10, 20, 20, 20], + >>> ]) + >>> overlaps = axis_aligned_bbox_overlaps_3d(bboxes1, bboxes2) + >>> assert overlaps.shape == (3, 3) + >>> overlaps = bbox_overlaps(bboxes1, bboxes2, is_aligned=True) + >>> assert overlaps.shape == (3, ) + Example: + >>> empty = torch.empty(0, 6) + >>> nonempty = torch.FloatTensor([[0, 0, 0, 10, 9, 10]]) + >>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1) + >>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0) + >>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0) + """ + + assert mode in ['iou', 'giou'], f'Unsupported mode {mode}' + # Either the boxes are empty or the length of boxes's last dimenstion is 6 + assert (bboxes1.size(-1) == 6 or bboxes1.size(0) == 0) + assert (bboxes2.size(-1) == 6 or bboxes2.size(0) == 0) + + # Batch dim must be the same + # Batch dim: (B1, B2, ... Bn) + assert bboxes1.shape[:-2] == bboxes2.shape[:-2] + batch_shape = bboxes1.shape[:-2] + + rows = bboxes1.size(-2) + cols = bboxes2.size(-2) + if is_aligned: + assert rows == cols + + if rows * cols == 0: + if is_aligned: + return bboxes1.new(batch_shape + (rows, )) + else: + return bboxes1.new(batch_shape + (rows, cols)) + + area1 = (bboxes1[..., 3] - + bboxes1[..., 0]) * (bboxes1[..., 4] - bboxes1[..., 1]) * ( + bboxes1[..., 5] - bboxes1[..., 2]) + area2 = (bboxes2[..., 3] - + bboxes2[..., 0]) * (bboxes2[..., 4] - bboxes2[..., 1]) * ( + bboxes2[..., 5] - bboxes2[..., 2]) + + if is_aligned: + lt = torch.max(bboxes1[..., :3], bboxes2[..., :3]) # [B, rows, 3] + rb = torch.min(bboxes1[..., 3:], bboxes2[..., 3:]) # [B, rows, 3] + + wh = (rb - lt).clamp(min=0) # [B, rows, 2] + overlap = wh[..., 0] * wh[..., 1] * wh[..., 2] + + if mode in ['iou', 'giou']: + union = area1 + area2 - overlap + else: + union = area1 + if mode == 'giou': + enclosed_lt = torch.min(bboxes1[..., :3], bboxes2[..., :3]) + enclosed_rb = torch.max(bboxes1[..., 3:], bboxes2[..., 3:]) + else: + lt = torch.max(bboxes1[..., :, None, :3], + bboxes2[..., None, :, :3]) # [B, rows, cols, 3] + rb = torch.min(bboxes1[..., :, None, 3:], + bboxes2[..., None, :, 3:]) # [B, rows, cols, 3] + + wh = (rb - lt).clamp(min=0) # [B, rows, cols, 3] + overlap = wh[..., 0] * wh[..., 1] * wh[..., 2] + + if mode in ['iou', 'giou']: + union = area1[..., None] + area2[..., None, :] - overlap + if mode == 'giou': + enclosed_lt = torch.min(bboxes1[..., :, None, :3], + bboxes2[..., None, :, :3]) + enclosed_rb = torch.max(bboxes1[..., :, None, 3:], + bboxes2[..., None, :, 3:]) + + eps = union.new_tensor([eps]) + union = torch.max(union, eps) + ious = overlap / union + if mode in ['iou']: + return ious + # calculate gious + enclose_wh = (enclosed_rb - enclosed_lt).clamp(min=0) + enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1] * enclose_wh[..., 2] + enclose_area = torch.max(enclose_area, eps) + gious = ious - (enclose_area - union) / enclose_area + return gious diff --git a/mmcv/core/bbox/match_costs/__init__.py b/mmcv/core/bbox/match_costs/__init__.py new file mode 100644 index 0000000..8fdb6d2 --- /dev/null +++ b/mmcv/core/bbox/match_costs/__init__.py @@ -0,0 +1,7 @@ +from .builder import build_match_cost +from .match_cost import BBoxL1Cost, ClassificationCost, FocalLossCost, IoUCost, BBox3DL1Cost, DiceCost + +__all__ = [ + 'build_match_cost', 'ClassificationCost', 'BBoxL1Cost', 'IoUCost', + 'FocalLossCost', 'BBox3DL1Cost', 'DiceCost' +] diff --git a/mmcv/core/bbox/match_costs/builder.py b/mmcv/core/bbox/match_costs/builder.py new file mode 100644 index 0000000..6894017 --- /dev/null +++ b/mmcv/core/bbox/match_costs/builder.py @@ -0,0 +1,8 @@ +from mmcv.utils import Registry, build_from_cfg + +MATCH_COST = Registry('Match Cost') + + +def build_match_cost(cfg, default_args=None): + """Builder of IoU calculator.""" + return build_from_cfg(cfg, MATCH_COST, default_args) diff --git a/mmcv/core/bbox/match_costs/match_cost.py b/mmcv/core/bbox/match_costs/match_cost.py new file mode 100644 index 0000000..b5a6a68 --- /dev/null +++ b/mmcv/core/bbox/match_costs/match_cost.py @@ -0,0 +1,324 @@ +import torch +import torch.nn.functional as F +from mmcv.core.bbox.iou_calculators import bbox_overlaps +from mmcv.core.bbox.transforms import bbox_cxcywh_to_xyxy, bbox_xyxy_to_cxcywh +from .builder import MATCH_COST + + +@MATCH_COST.register_module() +class BBoxL1Cost: + """BBoxL1Cost. + + Args: + weight (int | float, optional): loss_weight + box_format (str, optional): 'xyxy' for DETR, 'xywh' for Sparse_RCNN + + Examples: + >>> from mmcv.core.bbox.match_costs.match_cost import BBoxL1Cost + >>> import torch + >>> self = BBoxL1Cost() + >>> bbox_pred = torch.rand(1, 4) + >>> gt_bboxes= torch.FloatTensor([[0, 0, 2, 4], [1, 2, 3, 4]]) + >>> factor = torch.tensor([10, 8, 10, 8]) + >>> self(bbox_pred, gt_bboxes, factor) + tensor([[1.6172, 1.6422]]) + """ + + def __init__(self, weight=1., box_format='xyxy'): + self.weight = weight + assert box_format in ['xyxy', 'xywh'] + self.box_format = box_format + + def __call__(self, bbox_pred, gt_bboxes): + """ + Args: + bbox_pred (Tensor): Predicted boxes with normalized coordinates + (cx, cy, w, h), which are all in range [0, 1]. Shape + [num_query, 4]. + gt_bboxes (Tensor): Ground truth boxes with normalized + coordinates (x1, y1, x2, y2). Shape [num_gt, 4]. + + Returns: + torch.Tensor: bbox_cost value with weight + """ + if self.box_format == 'xywh': + gt_bboxes = bbox_xyxy_to_cxcywh(gt_bboxes) + elif self.box_format == 'xyxy': + bbox_pred = bbox_cxcywh_to_xyxy(bbox_pred) + bbox_cost = torch.cdist(bbox_pred, gt_bboxes, p=1) + return bbox_cost * self.weight + + +@MATCH_COST.register_module() +class FocalLossCost: + """FocalLossCost. + + Args: + weight (int | float, optional): loss_weight + alpha (int | float, optional): focal_loss alpha + gamma (int | float, optional): focal_loss gamma + eps (float, optional): default 1e-12 + + Examples: + >>> from mmcv.core.bbox.match_costs.match_cost import FocalLossCost + >>> import torch + >>> self = FocalLossCost() + >>> cls_pred = torch.rand(4, 3) + >>> gt_labels = torch.tensor([0, 1, 2]) + >>> factor = torch.tensor([10, 8, 10, 8]) + >>> self(cls_pred, gt_labels) + tensor([[-0.3236, -0.3364, -0.2699], + [-0.3439, -0.3209, -0.4807], + [-0.4099, -0.3795, -0.2929], + [-0.1950, -0.1207, -0.2626]]) + """ + + def __init__(self, weight=1., alpha=0.25, gamma=2, eps=1e-12): + self.weight = weight + self.alpha = alpha + self.gamma = gamma + self.eps = eps + + def __call__(self, cls_pred, gt_labels): + """ + Args: + cls_pred (Tensor): Predicted classification logits, shape + [num_query, num_class]. + gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,). + + Returns: + torch.Tensor: cls_cost value with weight + """ + cls_pred = cls_pred.sigmoid() + neg_cost = -(1 - cls_pred + self.eps).log() * ( + 1 - self.alpha) * cls_pred.pow(self.gamma) + pos_cost = -(cls_pred + self.eps).log() * self.alpha * ( + 1 - cls_pred).pow(self.gamma) + cls_cost = pos_cost[:, gt_labels] - neg_cost[:, gt_labels] + return cls_cost * self.weight + + +@MATCH_COST.register_module() +class ClassificationCost: + """ClsSoftmaxCost. + + Args: + weight (int | float, optional): loss_weight + + Examples: + >>> from mmcv.core.bbox.match_costs.match_cost import \ + ... ClassificationCost + >>> import torch + >>> self = ClassificationCost() + >>> cls_pred = torch.rand(4, 3) + >>> gt_labels = torch.tensor([0, 1, 2]) + >>> factor = torch.tensor([10, 8, 10, 8]) + >>> self(cls_pred, gt_labels) + tensor([[-0.3430, -0.3525, -0.3045], + [-0.3077, -0.2931, -0.3992], + [-0.3664, -0.3455, -0.2881], + [-0.3343, -0.2701, -0.3956]]) + """ + + def __init__(self, weight=1.): + self.weight = weight + + def __call__(self, cls_pred, gt_labels): + """ + Args: + cls_pred (Tensor): Predicted classification logits, shape + [num_query, num_class]. + gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,). + + Returns: + torch.Tensor: cls_cost value with weight + """ + # Following the official DETR repo, contrary to the loss that + # NLL is used, we approximate it in 1 - cls_score[gt_label]. + # The 1 is a constant that doesn't change the matching, + # so it can be omitted. + cls_score = cls_pred.softmax(-1) + cls_cost = -cls_score[:, gt_labels] + return cls_cost * self.weight + + +@MATCH_COST.register_module() +class IoUCost: + """IoUCost. + + Args: + iou_mode (str, optional): iou mode such as 'iou' | 'giou' + weight (int | float, optional): loss weight + + Examples: + >>> from mmcv.core.bbox.match_costs.match_cost import IoUCost + >>> import torch + >>> self = IoUCost() + >>> bboxes = torch.FloatTensor([[1,1, 2, 2], [2, 2, 3, 4]]) + >>> gt_bboxes = torch.FloatTensor([[0, 0, 2, 4], [1, 2, 3, 4]]) + >>> self(bboxes, gt_bboxes) + tensor([[-0.1250, 0.1667], + [ 0.1667, -0.5000]]) + """ + + def __init__(self, iou_mode='giou', weight=1.): + self.weight = weight + self.iou_mode = iou_mode + + def __call__(self, bboxes, gt_bboxes): + """ + Args: + bboxes (Tensor): Predicted boxes with unnormalized coordinates + (x1, y1, x2, y2). Shape [num_query, 4]. + gt_bboxes (Tensor): Ground truth boxes with unnormalized + coordinates (x1, y1, x2, y2). Shape [num_gt, 4]. + + Returns: + torch.Tensor: iou_cost value with weight + """ + # overlaps: [num_bboxes, num_gt] + overlaps = bbox_overlaps( + bboxes, gt_bboxes, mode=self.iou_mode, is_aligned=False) + # The 1 is a constant that doesn't change the matching, so omitted. + iou_cost = -overlaps + return iou_cost * self.weight + + +@MATCH_COST.register_module() +class BBox3DL1Cost(object): + """BBox3DL1Cost. + Args: + weight (int | float, optional): loss_weight + """ + + def __init__(self, weight=1.): + self.weight = weight + + def __call__(self, bbox_pred, gt_bboxes): + """ + Args: + bbox_pred (Tensor): Predicted boxes with normalized coordinates + (cx, cy, w, h), which are all in range [0, 1]. Shape + [num_query, 4]. + gt_bboxes (Tensor): Ground truth boxes with normalized + coordinates (x1, y1, x2, y2). Shape [num_gt, 4]. + Returns: + torch.Tensor: bbox_cost value with weight + """ + bbox_cost = torch.cdist(bbox_pred, gt_bboxes, p=1) + return bbox_cost * self.weight + +#@weighted_loss +def smooth_l1_loss(pred, target, beta=1.0): + """Smooth L1 loss. + Args: + pred (torch.Tensor): The prediction. + target (torch.Tensor): The learning target of the prediction. + beta (float, optional): The threshold in the piecewise function. + Defaults to 1.0. + Returns: + torch.Tensor: Calculated loss + """ + assert beta > 0 + if target.numel() == 0: + return pred.sum() * 0 + + # assert pred.size() == target.size() + diff = torch.abs(pred - target) + loss = torch.where(diff < beta, 0.5 * diff * diff / beta, + diff - 0.5 * beta) + return loss.sum(-1) + + +@MATCH_COST.register_module() +class SmoothL1Cost(object): + """SmoothL1Cost. + Args: + weight (int | float, optional): loss weight + + Examples: + >>> from mmdet.core.bbox.match_costs.match_cost import IoUCost + >>> import torch + >>> self = IoUCost() + >>> bboxes = torch.FloatTensor([[1,1, 2, 2], [2, 2, 3, 4]]) + >>> gt_bboxes = torch.FloatTensor([[0, 0, 2, 4], [1, 2, 3, 4]]) + >>> self(bboxes, gt_bboxes) + tensor([[-0.1250, 0.1667], + [ 0.1667, -0.5000]]) + """ + + def __init__(self, weight=1.): + self.weight = weight + + def __call__(self, input, target): + """ + Args: + bboxes (Tensor): Predicted boxes with unnormalized coordinates + (x1, y1, x2, y2). Shape [num_query, 4]. + gt_bboxes (Tensor): Ground truth boxes with unnormalized + coordinates (x1, y1, x2, y2). Shape [num_gt, 4]. + + Returns: + torch.Tensor: iou_cost value with weight + """ + N1, C = input.shape + N2, C = target.shape + input = input.contiguous().view(N1, C)[:, None, :] + target = target.contiguous().view(N2, C)[None, :, :] + cost = smooth_l1_loss(input, target) + + return cost * self.weight + + +@MATCH_COST.register_module() +class DiceCost(object): + """IoUCost. + + Args: + iou_mode (str, optional): iou mode such as 'iou' | 'giou' + weight (int | float, optional): loss weight + + Examples: + >>> from mmcv.core.bbox.match_costs.match_cost import IoUCost + >>> import torch + >>> self = IoUCost() + >>> bboxes = torch.FloatTensor([[1,1, 2, 2], [2, 2, 3, 4]]) + >>> gt_bboxes = torch.FloatTensor([[0, 0, 2, 4], [1, 2, 3, 4]]) + >>> self(bboxes, gt_bboxes) + tensor([[-0.1250, 0.1667], + [ 0.1667, -0.5000]]) + """ + + def __init__(self, weight=1.): + self.weight = weight + self.count = 0 + + def __call__(self, input, target): + """ + Args: + bboxes (Tensor): Predicted boxes with unnormalized coordinates + (x1, y1, x2, y2). Shape [num_query, 4]. + gt_bboxes (Tensor): Ground truth boxes with unnormalized + coordinates (x1, y1, x2, y2). Shape [num_gt, 4]. + + Returns: + torch.Tensor: iou_cost value with weight + """ + # overlaps: [num_bboxes, num_gt] + # print('INPUT', input.shape) + # print('target',target.shape) + + N1, H1, W1 = input.shape + N2, H2, W2 = target.shape + + if H1 != H2 or W1 != W2: + target = F.interpolate(target.unsqueeze(0), size=(H1, W1), mode='bilinear').squeeze(0) + + input = input.contiguous().view(N1, -1)[:, None, :] + target = target.contiguous().view(N2, -1)[None, :, :] + + a = torch.sum(input * target, -1) + b = torch.sum(input * input, -1) + 0.001 + c = torch.sum(target * target, -1) + 0.001 + d = (2 * a) / (b + c) + return (1 - d) * self.weight diff --git a/mmcv/core/bbox/samplers/__init__.py b/mmcv/core/bbox/samplers/__init__.py new file mode 100644 index 0000000..3a743dc --- /dev/null +++ b/mmcv/core/bbox/samplers/__init__.py @@ -0,0 +1,6 @@ +from .pseudo_sampler import PseudoSampler + + +__all__ = [ + 'PseudoSampler' +] diff --git a/mmcv/core/bbox/samplers/base_sampler.py b/mmcv/core/bbox/samplers/base_sampler.py new file mode 100644 index 0000000..1534082 --- /dev/null +++ b/mmcv/core/bbox/samplers/base_sampler.py @@ -0,0 +1,101 @@ +from abc import ABCMeta, abstractmethod + +import torch + +from .sampling_result import SamplingResult + + +class BaseSampler(metaclass=ABCMeta): + """Base class of samplers.""" + + def __init__(self, + num, + pos_fraction, + neg_pos_ub=-1, + add_gt_as_proposals=True, + **kwargs): + self.num = num + self.pos_fraction = pos_fraction + self.neg_pos_ub = neg_pos_ub + self.add_gt_as_proposals = add_gt_as_proposals + self.pos_sampler = self + self.neg_sampler = self + + @abstractmethod + def _sample_pos(self, assign_result, num_expected, **kwargs): + """Sample positive samples.""" + pass + + @abstractmethod + def _sample_neg(self, assign_result, num_expected, **kwargs): + """Sample negative samples.""" + pass + + def sample(self, + assign_result, + bboxes, + gt_bboxes, + gt_labels=None, + **kwargs): + """Sample positive and negative bboxes. + + This is a simple implementation of bbox sampling given candidates, + assigning results and ground truth bboxes. + + Args: + assign_result (:obj:`AssignResult`): Bbox assigning results. + bboxes (Tensor): Boxes to be sampled from. + gt_bboxes (Tensor): Ground truth bboxes. + gt_labels (Tensor, optional): Class labels of ground truth bboxes. + + Returns: + :obj:`SamplingResult`: Sampling result. + + Example: + >>> from mmcv.core.bbox import RandomSampler + >>> from mmcv.core.bbox import AssignResult + >>> from mmcv.core.bbox.demodata import ensure_rng, random_boxes + >>> rng = ensure_rng(None) + >>> assign_result = AssignResult.random(rng=rng) + >>> bboxes = random_boxes(assign_result.num_preds, rng=rng) + >>> gt_bboxes = random_boxes(assign_result.num_gts, rng=rng) + >>> gt_labels = None + >>> self = RandomSampler(num=32, pos_fraction=0.5, neg_pos_ub=-1, + >>> add_gt_as_proposals=False) + >>> self = self.sample(assign_result, bboxes, gt_bboxes, gt_labels) + """ + if len(bboxes.shape) < 2: + bboxes = bboxes[None, :] + + bboxes = bboxes[:, :4] + + gt_flags = bboxes.new_zeros((bboxes.shape[0], ), dtype=torch.uint8) + if self.add_gt_as_proposals and len(gt_bboxes) > 0: + if gt_labels is None: + raise ValueError( + 'gt_labels must be given when add_gt_as_proposals is True') + bboxes = torch.cat([gt_bboxes, bboxes], dim=0) + assign_result.add_gt_(gt_labels) + gt_ones = bboxes.new_ones(gt_bboxes.shape[0], dtype=torch.uint8) + gt_flags = torch.cat([gt_ones, gt_flags]) + + num_expected_pos = int(self.num * self.pos_fraction) + pos_inds = self.pos_sampler._sample_pos( + assign_result, num_expected_pos, bboxes=bboxes, **kwargs) + # We found that sampled indices have duplicated items occasionally. + # (may be a bug of PyTorch) + pos_inds = pos_inds.unique() + num_sampled_pos = pos_inds.numel() + num_expected_neg = self.num - num_sampled_pos + if self.neg_pos_ub >= 0: + _pos = max(1, num_sampled_pos) + neg_upper_bound = int(self.neg_pos_ub * _pos) + if num_expected_neg > neg_upper_bound: + num_expected_neg = neg_upper_bound + neg_inds = self.neg_sampler._sample_neg( + assign_result, num_expected_neg, bboxes=bboxes, **kwargs) + neg_inds = neg_inds.unique() + + sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes, + assign_result, gt_flags) + return sampling_result diff --git a/mmcv/core/bbox/samplers/pseudo_sampler.py b/mmcv/core/bbox/samplers/pseudo_sampler.py new file mode 100644 index 0000000..2bd81ab --- /dev/null +++ b/mmcv/core/bbox/samplers/pseudo_sampler.py @@ -0,0 +1,41 @@ +import torch + +from ..builder import BBOX_SAMPLERS +from .base_sampler import BaseSampler +from .sampling_result import SamplingResult + + +@BBOX_SAMPLERS.register_module() +class PseudoSampler(BaseSampler): + """A pseudo sampler that does not do sampling actually.""" + + def __init__(self, **kwargs): + pass + + def _sample_pos(self, **kwargs): + """Sample positive samples.""" + raise NotImplementedError + + def _sample_neg(self, **kwargs): + """Sample negative samples.""" + raise NotImplementedError + + def sample(self, assign_result, bboxes, gt_bboxes, **kwargs): + """Directly returns the positive and negative indices of samples. + + Args: + assign_result (:obj:`AssignResult`): Assigned results + bboxes (torch.Tensor): Bounding boxes + gt_bboxes (torch.Tensor): Ground truth boxes + + Returns: + :obj:`SamplingResult`: sampler results + """ + pos_inds = torch.nonzero( + assign_result.gt_inds > 0, as_tuple=False).squeeze(-1).unique() + neg_inds = torch.nonzero( + assign_result.gt_inds == 0, as_tuple=False).squeeze(-1).unique() + gt_flags = bboxes.new_zeros(bboxes.shape[0], dtype=torch.uint8) + sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes, + assign_result, gt_flags) + return sampling_result diff --git a/mmcv/core/bbox/samplers/sampling_result.py b/mmcv/core/bbox/samplers/sampling_result.py new file mode 100644 index 0000000..06eff7e --- /dev/null +++ b/mmcv/core/bbox/samplers/sampling_result.py @@ -0,0 +1,152 @@ +import torch + +from mmcv.utils import util_mixins + + +class SamplingResult(util_mixins.NiceRepr): + """Bbox sampling result. + + Example: + >>> # xdoctest: +IGNORE_WANT + >>> from mmcv.core.bbox.samplers.sampling_result import * # NOQA + >>> self = SamplingResult.random(rng=10) + >>> print(f'self = {self}') + self = + """ + + def __init__(self, pos_inds, neg_inds, bboxes, gt_bboxes, assign_result, + gt_flags): + self.pos_inds = pos_inds + self.neg_inds = neg_inds + self.pos_bboxes = bboxes[pos_inds] + self.neg_bboxes = bboxes[neg_inds] + self.pos_is_gt = gt_flags[pos_inds] + + self.num_gts = gt_bboxes.shape[0] + self.pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1 + + if gt_bboxes.numel() == 0: + # hack for index error case + assert self.pos_assigned_gt_inds.numel() == 0 + self.pos_gt_bboxes = torch.empty_like(gt_bboxes).view(-1, 4) + else: + if len(gt_bboxes.shape) < 2: + gt_bboxes = gt_bboxes.view(-1, 4) + + self.pos_gt_bboxes = gt_bboxes[self.pos_assigned_gt_inds, :] + + if assign_result.labels is not None: + self.pos_gt_labels = assign_result.labels[pos_inds] + else: + self.pos_gt_labels = None + + @property + def bboxes(self): + """torch.Tensor: concatenated positive and negative boxes""" + return torch.cat([self.pos_bboxes, self.neg_bboxes]) + + def to(self, device): + """Change the device of the data inplace. + + Example: + >>> self = SamplingResult.random() + >>> print(f'self = {self.to(None)}') + >>> # xdoctest: +REQUIRES(--gpu) + >>> print(f'self = {self.to(0)}') + """ + _dict = self.__dict__ + for key, value in _dict.items(): + if isinstance(value, torch.Tensor): + _dict[key] = value.to(device) + return self + + def __nice__(self): + data = self.info.copy() + data['pos_bboxes'] = data.pop('pos_bboxes').shape + data['neg_bboxes'] = data.pop('neg_bboxes').shape + parts = [f"'{k}': {v!r}" for k, v in sorted(data.items())] + body = ' ' + ',\n '.join(parts) + return '{\n' + body + '\n}' + + @property + def info(self): + """Returns a dictionary of info about the object.""" + return { + 'pos_inds': self.pos_inds, + 'neg_inds': self.neg_inds, + 'pos_bboxes': self.pos_bboxes, + 'neg_bboxes': self.neg_bboxes, + 'pos_is_gt': self.pos_is_gt, + 'num_gts': self.num_gts, + 'pos_assigned_gt_inds': self.pos_assigned_gt_inds, + } + + @classmethod + def random(cls, rng=None, **kwargs): + """ + Args: + rng (None | int | numpy.random.RandomState): seed or state. + kwargs (keyword arguments): + - num_preds: number of predicted boxes + - num_gts: number of true boxes + - p_ignore (float): probability of a predicted box assigned to \ + an ignored truth. + - p_assigned (float): probability of a predicted box not being \ + assigned. + - p_use_label (float | bool): with labels or not. + + Returns: + :obj:`SamplingResult`: Randomly generated sampling result. + + Example: + >>> from mmcv.core.bbox.samplers.sampling_result import * # NOQA + >>> self = SamplingResult.random() + >>> print(self.__dict__) + """ + from mmcv.core.bbox.samplers.random_sampler import RandomSampler + from mmcv.core.bbox.assigners.assign_result import AssignResult + from mmcv.core.bbox import demodata + rng = demodata.ensure_rng(rng) + + # make probabalistic? + num = 32 + pos_fraction = 0.5 + neg_pos_ub = -1 + + assign_result = AssignResult.random(rng=rng, **kwargs) + + # Note we could just compute an assignment + bboxes = demodata.random_boxes(assign_result.num_preds, rng=rng) + gt_bboxes = demodata.random_boxes(assign_result.num_gts, rng=rng) + + if rng.rand() > 0.2: + # sometimes algorithms squeeze their data, be robust to that + gt_bboxes = gt_bboxes.squeeze() + bboxes = bboxes.squeeze() + + if assign_result.labels is None: + gt_labels = None + else: + gt_labels = None # todo + + if gt_labels is None: + add_gt_as_proposals = False + else: + add_gt_as_proposals = True # make probabalistic? + + sampler = RandomSampler( + num, + pos_fraction, + neg_pos_ub=neg_pos_ub, + add_gt_as_proposals=add_gt_as_proposals, + rng=rng) + self = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels) + return self diff --git a/mmcv/core/bbox/structures/__init__.py b/mmcv/core/bbox/structures/__init__.py new file mode 100644 index 0000000..7e55840 --- /dev/null +++ b/mmcv/core/bbox/structures/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +from .utils import (get_box_type, get_proj_mat_by_coord_type, limit_period, + mono_cam_box2vis, points_cam2img, rotation_3d_in_axis, + xywhr2xyxyr) diff --git a/mmcv/core/bbox/structures/base_box3d.py b/mmcv/core/bbox/structures/base_box3d.py new file mode 100644 index 0000000..a09caf4 --- /dev/null +++ b/mmcv/core/bbox/structures/base_box3d.py @@ -0,0 +1,462 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch +from abc import abstractmethod + +# from mmcv.ops.iou3d import iou3d_cuda +from .utils import limit_period, xywhr2xyxyr +from mmcv.ops.iou3d_det import iou3d_cuda + + +class BaseInstance3DBoxes(object): + """Base class for 3D Boxes. + + Note: + The box is bottom centered, i.e. the relative position of origin in + the box is (0.5, 0.5, 0). + + Args: + tensor (torch.Tensor | np.ndarray | list): a N x box_dim matrix. + box_dim (int): Number of the dimension of a box. + Each row is (x, y, z, x_size, y_size, z_size, yaw). + Default to 7. + with_yaw (bool): Whether the box is with yaw rotation. + If False, the value of yaw will be set to 0 as minmax boxes. + Default to True. + origin (tuple[float]): The relative position of origin in the box. + Default to (0.5, 0.5, 0). This will guide the box be converted to + (0.5, 0.5, 0) mode. + + Attributes: + tensor (torch.Tensor): Float matrix of N x box_dim. + box_dim (int): Integer indicating the dimension of a box. + Each row is (x, y, z, x_size, y_size, z_size, yaw, ...). + with_yaw (bool): If True, the value of yaw will be set to 0 as minmax + boxes. + """ + + def __init__(self, tensor, box_dim=7, with_yaw=True, origin=(0.5, 0.5, 0)): + if isinstance(tensor, torch.Tensor): + device = tensor.device + else: + device = torch.device('cpu') + tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device) + if tensor.numel() == 0: + # Use reshape, so we don't end up creating a new tensor that + # does not depend on the inputs (and consequently confuses jit) + tensor = tensor.reshape((0, box_dim)).to( + dtype=torch.float32, device=device) + assert tensor.dim() == 2 and tensor.size(-1) == box_dim, tensor.size() + + if tensor.shape[-1] == 6: + # If the dimension of boxes is 6, we expand box_dim by padding + # 0 as a fake yaw and set with_yaw to False. + assert box_dim == 6 + fake_rot = tensor.new_zeros(tensor.shape[0], 1) + tensor = torch.cat((tensor, fake_rot), dim=-1) + self.box_dim = box_dim + 1 + self.with_yaw = False + else: + self.box_dim = box_dim + self.with_yaw = with_yaw + self.tensor = tensor.clone() + + if origin != (0.5, 0.5, 0): + dst = self.tensor.new_tensor((0.5, 0.5, 0)) + src = self.tensor.new_tensor(origin) + self.tensor[:, :3] += self.tensor[:, 3:6] * (dst - src) + + @property + def volume(self): + """torch.Tensor: A vector with volume of each box.""" + return self.tensor[:, 3] * self.tensor[:, 4] * self.tensor[:, 5] + + @property + def dims(self): + """torch.Tensor: Corners of each box with size (N, 8, 3).""" + return self.tensor[:, 3:6] + + @property + def yaw(self): + """torch.Tensor: A vector with yaw of each box.""" + return self.tensor[:, 6] + + @property + def height(self): + """torch.Tensor: A vector with height of each box.""" + return self.tensor[:, 5] + + @property + def top_height(self): + """torch.Tensor: A vector with the top height of each box.""" + return self.bottom_height + self.height + + @property + def bottom_height(self): + """torch.Tensor: A vector with bottom's height of each box.""" + return self.tensor[:, 2] + + @property + def center(self): + """Calculate the center of all the boxes. + + Note: + In the MMDetection3D's convention, the bottom center is + usually taken as the default center. + + The relative position of the centers in different kinds of + boxes are different, e.g., the relative center of a boxes is + (0.5, 1.0, 0.5) in camera and (0.5, 0.5, 0) in lidar. + It is recommended to use ``bottom_center`` or ``gravity_center`` + for more clear usage. + + Returns: + torch.Tensor: A tensor with center of each box. + """ + return self.bottom_center + + @property + def bottom_center(self): + """torch.Tensor: A tensor with center of each box.""" + return self.tensor[:, :3] + + @property + def gravity_center(self): + """torch.Tensor: A tensor with center of each box.""" + pass + + @property + def corners(self): + """torch.Tensor: a tensor with 8 corners of each box.""" + pass + + @abstractmethod + def rotate(self, angle, points=None): + """Rotate boxes with points (optional) with the given angle or \ + rotation matrix. + + Args: + angle (float | torch.Tensor | np.ndarray): + Rotation angle or rotation matrix. + points (torch.Tensor, numpy.ndarray, :obj:`BasePoints`, optional): + Points to rotate. Defaults to None. + """ + pass + + @abstractmethod + def flip(self, bev_direction='horizontal'): + """Flip the boxes in BEV along given BEV direction.""" + pass + + def translate(self, trans_vector): + """Translate boxes with the given translation vector. + + Args: + trans_vector (torch.Tensor): Translation vector of size 1x3. + """ + if not isinstance(trans_vector, torch.Tensor): + trans_vector = self.tensor.new_tensor(trans_vector) + self.tensor[:, :3] += trans_vector + + def in_range_3d(self, box_range): + """Check whether the boxes are in the given range. + + Args: + box_range (list | torch.Tensor): The range of box + (x_min, y_min, z_min, x_max, y_max, z_max) + + Note: + In the original implementation of SECOND, checking whether + a box in the range checks whether the points are in a convex + polygon, we try to reduce the burden for simpler cases. + + Returns: + torch.Tensor: A binary vector indicating whether each box is \ + inside the reference range. + """ + in_range_flags = ((self.tensor[:, 0] > box_range[0]) + & (self.tensor[:, 1] > box_range[1]) + & (self.tensor[:, 2] > box_range[2]) + & (self.tensor[:, 0] < box_range[3]) + & (self.tensor[:, 1] < box_range[4]) + & (self.tensor[:, 2] < box_range[5])) + return in_range_flags + + @abstractmethod + def in_range_bev(self, box_range): + """Check whether the boxes are in the given range. + + Args: + box_range (list | torch.Tensor): The range of box + in order of (x_min, y_min, x_max, y_max). + + Returns: + torch.Tensor: Indicating whether each box is inside \ + the reference range. + """ + pass + + @abstractmethod + def convert_to(self, dst, rt_mat=None): + """Convert self to ``dst`` mode. + + Args: + dst (:obj:`Box3DMode`): The target Box mode. + rt_mat (np.ndarray | torch.Tensor): The rotation and translation + matrix between different coordinates. Defaults to None. + The conversion from `src` coordinates to `dst` coordinates + usually comes along the change of sensors, e.g., from camera + to LiDAR. This requires a transformation matrix. + + Returns: + :obj:`BaseInstance3DBoxes`: The converted box of the same type \ + in the `dst` mode. + """ + pass + + def scale(self, scale_factor): + """Scale the box with horizontal and vertical scaling factors. + + Args: + scale_factors (float): Scale factors to scale the boxes. + """ + self.tensor[:, :6] *= scale_factor + self.tensor[:, 7:] *= scale_factor + + def limit_yaw(self, offset=0.5, period=np.pi): + """Limit the yaw to a given period and offset. + + Args: + offset (float): The offset of the yaw. + period (float): The expected period. + """ + self.tensor[:, 6] = limit_period(self.tensor[:, 6], offset, period) + + def nonempty(self, threshold: float = 0.0): + """Find boxes that are non-empty. + + A box is considered empty, + if either of its side is no larger than threshold. + + Args: + threshold (float): The threshold of minimal sizes. + + Returns: + torch.Tensor: A binary vector which represents whether each \ + box is empty (False) or non-empty (True). + """ + box = self.tensor + size_x = box[..., 3] + size_y = box[..., 4] + size_z = box[..., 5] + keep = ((size_x > threshold) + & (size_y > threshold) & (size_z > threshold)) + return keep + + def __getitem__(self, item): + """ + Note: + The following usage are allowed: + 1. `new_boxes = boxes[3]`: + return a `Boxes` that contains only one box. + 2. `new_boxes = boxes[2:10]`: + return a slice of boxes. + 3. `new_boxes = boxes[vector]`: + where vector is a torch.BoolTensor with `length = len(boxes)`. + Nonzero elements in the vector will be selected. + Note that the returned Boxes might share storage with this Boxes, + subject to Pytorch's indexing semantics. + + Returns: + :obj:`BaseInstance3DBoxes`: A new object of \ + :class:`BaseInstances3DBoxes` after indexing. + """ + original_type = type(self) + if isinstance(item, int): + return original_type( + self.tensor[item].view(1, -1), + box_dim=self.box_dim, + with_yaw=self.with_yaw) + b = self.tensor[item] + assert b.dim() == 2, \ + f'Indexing on Boxes with {item} failed to return a matrix!' + return original_type(b, box_dim=self.box_dim, with_yaw=self.with_yaw) + + def __len__(self): + """int: Number of boxes in the current object.""" + return self.tensor.shape[0] + + def __repr__(self): + """str: Return a strings that describes the object.""" + return self.__class__.__name__ + '(\n ' + str(self.tensor) + ')' + + @classmethod + def cat(cls, boxes_list): + """Concatenate a list of Boxes into a single Boxes. + + Args: + boxes_list (list[:obj:`BaseInstance3DBoxes`]): List of boxes. + + Returns: + :obj:`BaseInstance3DBoxes`: The concatenated Boxes. + """ + assert isinstance(boxes_list, (list, tuple)) + if len(boxes_list) == 0: + return cls(torch.empty(0)) + assert all(isinstance(box, cls) for box in boxes_list) + + # use torch.cat (v.s. layers.cat) + # so the returned boxes never share storage with input + cat_boxes = cls( + torch.cat([b.tensor for b in boxes_list], dim=0), + box_dim=boxes_list[0].tensor.shape[1], + with_yaw=boxes_list[0].with_yaw) + return cat_boxes + + def to(self, device): + """Convert current boxes to a specific device. + + Args: + device (str | :obj:`torch.device`): The name of the device. + + Returns: + :obj:`BaseInstance3DBoxes`: A new boxes object on the \ + specific device. + """ + original_type = type(self) + return original_type( + self.tensor.to(device), + box_dim=self.box_dim, + with_yaw=self.with_yaw) + + def clone(self): + """Clone the Boxes. + + Returns: + :obj:`BaseInstance3DBoxes`: Box object with the same properties \ + as self. + """ + original_type = type(self) + return original_type( + self.tensor.clone(), box_dim=self.box_dim, with_yaw=self.with_yaw) + + @property + def device(self): + """str: The device of the boxes are on.""" + return self.tensor.device + + def __iter__(self): + """Yield a box as a Tensor of shape (4,) at a time. + + Returns: + torch.Tensor: A box of shape (4,). + """ + yield from self.tensor + + @classmethod + def height_overlaps(cls, boxes1, boxes2, mode='iou'): + """Calculate height overlaps of two boxes. + + Note: + This function calculates the height overlaps between boxes1 and + boxes2, boxes1 and boxes2 should be in the same type. + + Args: + boxes1 (:obj:`BaseInstance3DBoxes`): Boxes 1 contain N boxes. + boxes2 (:obj:`BaseInstance3DBoxes`): Boxes 2 contain M boxes. + mode (str, optional): Mode of iou calculation. Defaults to 'iou'. + + Returns: + torch.Tensor: Calculated iou of boxes. + """ + assert isinstance(boxes1, BaseInstance3DBoxes) + assert isinstance(boxes2, BaseInstance3DBoxes) + assert type(boxes1) == type(boxes2), '"boxes1" and "boxes2" should' \ + f'be in the same type, got {type(boxes1)} and {type(boxes2)}.' + + boxes1_top_height = boxes1.top_height.view(-1, 1) + boxes1_bottom_height = boxes1.bottom_height.view(-1, 1) + boxes2_top_height = boxes2.top_height.view(1, -1) + boxes2_bottom_height = boxes2.bottom_height.view(1, -1) + + heighest_of_bottom = torch.max(boxes1_bottom_height, + boxes2_bottom_height) + lowest_of_top = torch.min(boxes1_top_height, boxes2_top_height) + overlaps_h = torch.clamp(lowest_of_top - heighest_of_bottom, min=0) + return overlaps_h + + @classmethod + def overlaps(cls, boxes1, boxes2, mode='iou'): + """Calculate 3D overlaps of two boxes. + + Note: + This function calculates the overlaps between ``boxes1`` and + ``boxes2``, ``boxes1`` and ``boxes2`` should be in the same type. + + Args: + boxes1 (:obj:`BaseInstance3DBoxes`): Boxes 1 contain N boxes. + boxes2 (:obj:`BaseInstance3DBoxes`): Boxes 2 contain M boxes. + mode (str, optional): Mode of iou calculation. Defaults to 'iou'. + + Returns: + torch.Tensor: Calculated iou of boxes' heights. + """ + assert isinstance(boxes1, BaseInstance3DBoxes) + assert isinstance(boxes2, BaseInstance3DBoxes) + assert type(boxes1) == type(boxes2), '"boxes1" and "boxes2" should' \ + f'be in the same type, got {type(boxes1)} and {type(boxes2)}.' + + assert mode in ['iou', 'iof'] + + rows = len(boxes1) + cols = len(boxes2) + if rows * cols == 0: + return boxes1.tensor.new(rows, cols) + + # height overlap + overlaps_h = cls.height_overlaps(boxes1, boxes2) + + # obtain BEV boxes in XYXYR format + boxes1_bev = xywhr2xyxyr(boxes1.bev) + boxes2_bev = xywhr2xyxyr(boxes2.bev) + + # bev overlap + overlaps_bev = boxes1_bev.new_zeros( + (boxes1_bev.shape[0], boxes2_bev.shape[0])).cuda() # (N, M) + iou3d_cuda.boxes_overlap_bev_gpu(boxes1_bev.contiguous().cuda(), + boxes2_bev.contiguous().cuda(), + overlaps_bev) + + # 3d overlaps + overlaps_3d = overlaps_bev.to(boxes1.device) * overlaps_h + + volume1 = boxes1.volume.view(-1, 1) + volume2 = boxes2.volume.view(1, -1) + + if mode == 'iou': + # the clamp func is used to avoid division of 0 + iou3d = overlaps_3d / torch.clamp( + volume1 + volume2 - overlaps_3d, min=1e-8) + else: + iou3d = overlaps_3d / torch.clamp(volume1, min=1e-8) + + return iou3d + + def new_box(self, data): + """Create a new box object with data. + + The new box and its tensor has the similar properties \ + as self and self.tensor, respectively. + + Args: + data (torch.Tensor | numpy.array | list): Data to be copied. + + Returns: + :obj:`BaseInstance3DBoxes`: A new bbox object with ``data``, \ + the object's other properties are similar to ``self``. + """ + new_tensor = self.tensor.new_tensor(data) \ + if not isinstance(data, torch.Tensor) else data.to(self.device) + original_type = type(self) + return original_type( + new_tensor, box_dim=self.box_dim, with_yaw=self.with_yaw) diff --git a/mmcv/core/bbox/structures/box_3d_mode.py b/mmcv/core/bbox/structures/box_3d_mode.py new file mode 100644 index 0000000..6e2db4f --- /dev/null +++ b/mmcv/core/bbox/structures/box_3d_mode.py @@ -0,0 +1,166 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch +from enum import IntEnum, unique + +from .base_box3d import BaseInstance3DBoxes +from .cam_box3d import CameraInstance3DBoxes +from .depth_box3d import DepthInstance3DBoxes +from .lidar_box3d import LiDARInstance3DBoxes + + +@unique +class Box3DMode(IntEnum): + r"""Enum of different ways to represent a box. + + Coordinates in LiDAR: + + .. code-block:: none + + up z + ^ x front + | / + | / + left y <------ 0 + + The relative coordinate of bottom center in a LiDAR box is (0.5, 0.5, 0), + and the yaw is around the z axis, thus the rotation axis=2. + + Coordinates in camera: + + .. code-block:: none + + z front + / + / + 0 ------> x right + | + | + v + down y + + The relative coordinate of bottom center in a CAM box is [0.5, 1.0, 0.5], + and the yaw is around the y axis, thus the rotation axis=1. + + Coordinates in Depth mode: + + .. code-block:: none + + up z + ^ y front + | / + | / + 0 ------> x right + + The relative coordinate of bottom center in a DEPTH box is (0.5, 0.5, 0), + and the yaw is around the z axis, thus the rotation axis=2. + """ + + LIDAR = 0 + CAM = 1 + DEPTH = 2 + + @staticmethod + def convert(box, src, dst, rt_mat=None): + """Convert boxes from `src` mode to `dst` mode. + + Args: + box (tuple | list | np.ndarray | + torch.Tensor | BaseInstance3DBoxes): + Can be a k-tuple, k-list or an Nxk array/tensor, where k = 7. + src (:obj:`Box3DMode`): The src Box mode. + dst (:obj:`Box3DMode`): The target Box mode. + rt_mat (np.ndarray | torch.Tensor): The rotation and translation + matrix between different coordinates. Defaults to None. + The conversion from `src` coordinates to `dst` coordinates + usually comes along the change of sensors, e.g., from camera + to LiDAR. This requires a transformation matrix. + + Returns: + (tuple | list | np.ndarray | torch.Tensor | BaseInstance3DBoxes): \ + The converted box of the same type. + """ + if src == dst: + return box + + is_numpy = isinstance(box, np.ndarray) + is_Instance3DBoxes = isinstance(box, BaseInstance3DBoxes) + single_box = isinstance(box, (list, tuple)) + if single_box: + assert len(box) >= 7, ( + 'Box3DMode.convert takes either a k-tuple/list or ' + 'an Nxk array/tensor, where k >= 7') + arr = torch.tensor(box)[None, :] + else: + # avoid modifying the input box + if is_numpy: + arr = torch.from_numpy(np.asarray(box)).clone() + elif is_Instance3DBoxes: + arr = box.tensor.clone() + else: + arr = box.clone() + + # convert box from `src` mode to `dst` mode. + x_size, y_size, z_size = arr[..., 3:4], arr[..., 4:5], arr[..., 5:6] + if src == Box3DMode.LIDAR and dst == Box3DMode.CAM: + if rt_mat is None: + rt_mat = arr.new_tensor([[0, -1, 0], [0, 0, -1], [1, 0, 0]]) + xyz_size = torch.cat([y_size, z_size, x_size], dim=-1) + elif src == Box3DMode.CAM and dst == Box3DMode.LIDAR: + if rt_mat is None: + rt_mat = arr.new_tensor([[0, 0, 1], [-1, 0, 0], [0, -1, 0]]) + xyz_size = torch.cat([z_size, x_size, y_size], dim=-1) + elif src == Box3DMode.DEPTH and dst == Box3DMode.CAM: + if rt_mat is None: + rt_mat = arr.new_tensor([[1, 0, 0], [0, 0, 1], [0, -1, 0]]) + xyz_size = torch.cat([x_size, z_size, y_size], dim=-1) + elif src == Box3DMode.CAM and dst == Box3DMode.DEPTH: + if rt_mat is None: + rt_mat = arr.new_tensor([[1, 0, 0], [0, 0, -1], [0, 1, 0]]) + xyz_size = torch.cat([x_size, z_size, y_size], dim=-1) + elif src == Box3DMode.LIDAR and dst == Box3DMode.DEPTH: + if rt_mat is None: + rt_mat = arr.new_tensor([[0, -1, 0], [1, 0, 0], [0, 0, 1]]) + xyz_size = torch.cat([y_size, x_size, z_size], dim=-1) + elif src == Box3DMode.DEPTH and dst == Box3DMode.LIDAR: + if rt_mat is None: + rt_mat = arr.new_tensor([[0, 1, 0], [-1, 0, 0], [0, 0, 1]]) + xyz_size = torch.cat([y_size, x_size, z_size], dim=-1) + else: + raise NotImplementedError( + f'Conversion from Box3DMode {src} to {dst} ' + 'is not supported yet') + + if not isinstance(rt_mat, torch.Tensor): + rt_mat = arr.new_tensor(rt_mat) + if rt_mat.size(1) == 4: + extended_xyz = torch.cat( + [arr[:, :3], arr.new_ones(arr.size(0), 1)], dim=-1) + xyz = extended_xyz @ rt_mat.t() + else: + xyz = arr[:, :3] @ rt_mat.t() + + remains = arr[..., 6:] + arr = torch.cat([xyz[:, :3], xyz_size, remains], dim=-1) + + # convert arr to the original type + original_type = type(box) + if single_box: + return original_type(arr.flatten().tolist()) + if is_numpy: + return arr.numpy() + elif is_Instance3DBoxes: + if dst == Box3DMode.CAM: + target_type = CameraInstance3DBoxes + elif dst == Box3DMode.LIDAR: + target_type = LiDARInstance3DBoxes + elif dst == Box3DMode.DEPTH: + target_type = DepthInstance3DBoxes + else: + raise NotImplementedError( + f'Conversion to {dst} through {original_type}' + ' is not supported yet') + return target_type( + arr, box_dim=arr.size(-1), with_yaw=box.with_yaw) + else: + return arr diff --git a/mmcv/core/bbox/structures/cam_box3d.py b/mmcv/core/bbox/structures/cam_box3d.py new file mode 100644 index 0000000..2f0a74b --- /dev/null +++ b/mmcv/core/bbox/structures/cam_box3d.py @@ -0,0 +1,324 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch + +from mmcv.core.points import BasePoints +from .base_box3d import BaseInstance3DBoxes +from .utils import limit_period, rotation_3d_in_axis + + +class CameraInstance3DBoxes(BaseInstance3DBoxes): + """3D boxes of instances in CAM coordinates. + + Coordinates in camera: + + .. code-block:: none + + z front (yaw=-0.5*pi) + / + / + 0 ------> x right (yaw=0) + | + | + v + down y + + The relative coordinate of bottom center in a CAM box is (0.5, 1.0, 0.5), + and the yaw is around the y axis, thus the rotation axis=1. + The yaw is 0 at the positive direction of x axis, and decreases from + the positive direction of x to the positive direction of z. + + A refactor is ongoing to make the three coordinate systems + easier to understand and convert between each other. + + Attributes: + tensor (torch.Tensor): Float matrix of N x box_dim. + box_dim (int): Integer indicates the dimension of a box + Each row is (x, y, z, x_size, y_size, z_size, yaw, ...). + with_yaw (bool): If True, the value of yaw will be set to 0 as minmax + boxes. + """ + + def __init__(self, + tensor, + box_dim=7, + with_yaw=True, + origin=(0.5, 1.0, 0.5)): + if isinstance(tensor, torch.Tensor): + device = tensor.device + else: + device = torch.device('cpu') + tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device) + if tensor.numel() == 0: + # Use reshape, so we don't end up creating a new tensor that + # does not depend on the inputs (and consequently confuses jit) + tensor = tensor.reshape((0, box_dim)).to( + dtype=torch.float32, device=device) + assert tensor.dim() == 2 and tensor.size(-1) == box_dim, tensor.size() + + if tensor.shape[-1] == 6: + # If the dimension of boxes is 6, we expand box_dim by padding + # 0 as a fake yaw and set with_yaw to False. + assert box_dim == 6 + fake_rot = tensor.new_zeros(tensor.shape[0], 1) + tensor = torch.cat((tensor, fake_rot), dim=-1) + self.box_dim = box_dim + 1 + self.with_yaw = False + else: + self.box_dim = box_dim + self.with_yaw = with_yaw + self.tensor = tensor.clone() + + if origin != (0.5, 1.0, 0.5): + dst = self.tensor.new_tensor((0.5, 1.0, 0.5)) + src = self.tensor.new_tensor(origin) + self.tensor[:, :3] += self.tensor[:, 3:6] * (dst - src) + + @property + def height(self): + """torch.Tensor: A vector with height of each box.""" + return self.tensor[:, 4] + + @property + def top_height(self): + """torch.Tensor: A vector with the top height of each box.""" + # the positive direction is down rather than up + return self.bottom_height - self.height + + @property + def bottom_height(self): + """torch.Tensor: A vector with bottom's height of each box.""" + return self.tensor[:, 1] + + @property + def gravity_center(self): + """torch.Tensor: A tensor with center of each box.""" + bottom_center = self.bottom_center + gravity_center = torch.zeros_like(bottom_center) + gravity_center[:, [0, 2]] = bottom_center[:, [0, 2]] + gravity_center[:, 1] = bottom_center[:, 1] - self.tensor[:, 4] * 0.5 + return gravity_center + + @property + def corners(self): + """torch.Tensor: Coordinates of corners of all the boxes in + shape (N, 8, 3). + + Convert the boxes to in clockwise order, in the form of + (x0y0z0, x0y0z1, x0y1z1, x0y1z0, x1y0z0, x1y0z1, x1y1z1, x1y1z0) + + .. code-block:: none + + front z + / + / + (x0, y0, z1) + ----------- + (x1, y0, z1) + /| / | + / | / | + (x0, y0, z0) + ----------- + + (x1, y1, z1) + | / . | / + | / origin | / + (x0, y1, z0) + ----------- + -------> x right + | (x1, y1, z0) + | + v + down y + """ + # TODO: rotation_3d_in_axis function do not support + # empty tensor currently. + assert len(self.tensor) != 0 + dims = self.dims + corners_norm = torch.from_numpy( + np.stack(np.unravel_index(np.arange(8), [2] * 3), axis=1)).to( + device=dims.device, dtype=dims.dtype) + + corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]] + # use relative origin [0.5, 1, 0.5] + corners_norm = corners_norm - dims.new_tensor([0.5, 1, 0.5]) + corners = dims.view([-1, 1, 3]) * corners_norm.reshape([1, 8, 3]) + + # rotate around y axis + corners = rotation_3d_in_axis(corners, self.tensor[:, 6], axis=1) + corners += self.tensor[:, :3].view(-1, 1, 3) + return corners + + @property + def bev(self): + """torch.Tensor: A n x 5 tensor of 2D BEV box of each box + with rotation in XYWHR format.""" + return self.tensor[:, [0, 2, 3, 5, 6]] + + @property + def nearest_bev(self): + """torch.Tensor: A tensor of 2D BEV box of each box + without rotation.""" + # Obtain BEV boxes with rotation in XZWHR format + bev_rotated_boxes = self.bev + # convert the rotation to a valid range + rotations = bev_rotated_boxes[:, -1] + normed_rotations = torch.abs(limit_period(rotations, 0.5, np.pi)) + + # find the center of boxes + conditions = (normed_rotations > np.pi / 4)[..., None] + bboxes_xywh = torch.where(conditions, bev_rotated_boxes[:, + [0, 1, 3, 2]], + bev_rotated_boxes[:, :4]) + + centers = bboxes_xywh[:, :2] + dims = bboxes_xywh[:, 2:] + bev_boxes = torch.cat([centers - dims / 2, centers + dims / 2], dim=-1) + return bev_boxes + + def rotate(self, angle, points=None): + """Rotate boxes with points (optional) with the given angle or \ + rotation matrix. + + Args: + angle (float | torch.Tensor | np.ndarray): + Rotation angle or rotation matrix. + points (torch.Tensor, numpy.ndarray, :obj:`BasePoints`, optional): + Points to rotate. Defaults to None. + + Returns: + tuple or None: When ``points`` is None, the function returns \ + None, otherwise it returns the rotated points and the \ + rotation matrix ``rot_mat_T``. + """ + if not isinstance(angle, torch.Tensor): + angle = self.tensor.new_tensor(angle) + assert angle.shape == torch.Size([3, 3]) or angle.numel() == 1, \ + f'invalid rotation angle shape {angle.shape}' + + if angle.numel() == 1: + rot_sin = torch.sin(angle) + rot_cos = torch.cos(angle) + rot_mat_T = self.tensor.new_tensor([[rot_cos, 0, -rot_sin], + [0, 1, 0], + [rot_sin, 0, rot_cos]]) + else: + rot_mat_T = angle + rot_sin = rot_mat_T[2, 0] + rot_cos = rot_mat_T[0, 0] + angle = np.arctan2(rot_sin, rot_cos) + + self.tensor[:, :3] = self.tensor[:, :3] @ rot_mat_T + self.tensor[:, 6] += angle + + if points is not None: + if isinstance(points, torch.Tensor): + points[:, :3] = points[:, :3] @ rot_mat_T + elif isinstance(points, np.ndarray): + rot_mat_T = rot_mat_T.numpy() + points[:, :3] = np.dot(points[:, :3], rot_mat_T) + elif isinstance(points, BasePoints): + # clockwise + points.rotate(-angle) + else: + raise ValueError + return points, rot_mat_T + + def flip(self, bev_direction='horizontal', points=None): + """Flip the boxes in BEV along given BEV direction. + + In CAM coordinates, it flips the x (horizontal) or z (vertical) axis. + + Args: + bev_direction (str): Flip direction (horizontal or vertical). + points (torch.Tensor, numpy.ndarray, :obj:`BasePoints`, None): + Points to flip. Defaults to None. + + Returns: + torch.Tensor, numpy.ndarray or None: Flipped points. + """ + assert bev_direction in ('horizontal', 'vertical') + if bev_direction == 'horizontal': + self.tensor[:, 0::7] = -self.tensor[:, 0::7] + if self.with_yaw: + self.tensor[:, 6] = -self.tensor[:, 6] + np.pi + elif bev_direction == 'vertical': + self.tensor[:, 2::7] = -self.tensor[:, 2::7] + if self.with_yaw: + self.tensor[:, 6] = -self.tensor[:, 6] + + if points is not None: + assert isinstance(points, (torch.Tensor, np.ndarray, BasePoints)) + if isinstance(points, (torch.Tensor, np.ndarray)): + if bev_direction == 'horizontal': + points[:, 0] = -points[:, 0] + elif bev_direction == 'vertical': + points[:, 2] = -points[:, 2] + elif isinstance(points, BasePoints): + points.flip(bev_direction) + return points + + def in_range_bev(self, box_range): + """Check whether the boxes are in the given range. + + Args: + box_range (list | torch.Tensor): The range of box + (x_min, z_min, x_max, z_max). + + Note: + The original implementation of SECOND checks whether boxes in + a range by checking whether the points are in a convex + polygon, we reduce the burden for simpler cases. + + Returns: + torch.Tensor: Indicating whether each box is inside \ + the reference range. + """ + in_range_flags = ((self.tensor[:, 0] > box_range[0]) + & (self.tensor[:, 2] > box_range[1]) + & (self.tensor[:, 0] < box_range[2]) + & (self.tensor[:, 2] < box_range[3])) + return in_range_flags + + @classmethod + def height_overlaps(cls, boxes1, boxes2, mode='iou'): + """Calculate height overlaps of two boxes. + + This function calculates the height overlaps between ``boxes1`` and + ``boxes2``, where ``boxes1`` and ``boxes2`` should be in the same type. + + Args: + boxes1 (:obj:`CameraInstance3DBoxes`): Boxes 1 contain N boxes. + boxes2 (:obj:`CameraInstance3DBoxes`): Boxes 2 contain M boxes. + mode (str, optional): Mode of iou calculation. Defaults to 'iou'. + + Returns: + torch.Tensor: Calculated iou of boxes' heights. + """ + assert isinstance(boxes1, CameraInstance3DBoxes) + assert isinstance(boxes2, CameraInstance3DBoxes) + + boxes1_top_height = boxes1.top_height.view(-1, 1) + boxes1_bottom_height = boxes1.bottom_height.view(-1, 1) + boxes2_top_height = boxes2.top_height.view(1, -1) + boxes2_bottom_height = boxes2.bottom_height.view(1, -1) + + # In camera coordinate system + # from up to down is the positive direction + heighest_of_bottom = torch.min(boxes1_bottom_height, + boxes2_bottom_height) + lowest_of_top = torch.max(boxes1_top_height, boxes2_top_height) + overlaps_h = torch.clamp(heighest_of_bottom - lowest_of_top, min=0) + return overlaps_h + + def convert_to(self, dst, rt_mat=None): + """Convert self to ``dst`` mode. + + Args: + dst (:obj:`Box3DMode`): The target Box mode. + rt_mat (np.ndarray | torch.Tensor): The rotation and translation + matrix between different coordinates. Defaults to None. + The conversion from ``src`` coordinates to ``dst`` coordinates + usually comes along the change of sensors, e.g., from camera + to LiDAR. This requires a transformation matrix. + + Returns: + :obj:`BaseInstance3DBoxes`: \ + The converted box of the same type in the ``dst`` mode. + """ + from .box_3d_mode import Box3DMode + return Box3DMode.convert( + box=self, src=Box3DMode.CAM, dst=dst, rt_mat=rt_mat) diff --git a/mmcv/core/bbox/structures/coord_3d_mode.py b/mmcv/core/bbox/structures/coord_3d_mode.py new file mode 100644 index 0000000..2d0de8d --- /dev/null +++ b/mmcv/core/bbox/structures/coord_3d_mode.py @@ -0,0 +1,281 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch +from enum import IntEnum, unique + +from mmcv.core.points import (BasePoints, CameraPoints, DepthPoints, + LiDARPoints) +from .base_box3d import BaseInstance3DBoxes +from .cam_box3d import CameraInstance3DBoxes +from .depth_box3d import DepthInstance3DBoxes +from .lidar_box3d import LiDARInstance3DBoxes + + +@unique +class Coord3DMode(IntEnum): + r"""Enum of different ways to represent a box + and point cloud. + + Coordinates in LiDAR: + + .. code-block:: none + + up z + ^ x front + | / + | / + left y <------ 0 + + The relative coordinate of bottom center in a LiDAR box is (0.5, 0.5, 0), + and the yaw is around the z axis, thus the rotation axis=2. + + Coordinates in camera: + + .. code-block:: none + + z front + / + / + 0 ------> x right + | + | + v + down y + + The relative coordinate of bottom center in a CAM box is [0.5, 1.0, 0.5], + and the yaw is around the y axis, thus the rotation axis=1. + + Coordinates in Depth mode: + + .. code-block:: none + + up z + ^ y front + | / + | / + 0 ------> x right + + The relative coordinate of bottom center in a DEPTH box is (0.5, 0.5, 0), + and the yaw is around the z axis, thus the rotation axis=2. + """ + + LIDAR = 0 + CAM = 1 + DEPTH = 2 + + @staticmethod + def convert(input, src, dst, rt_mat=None): + """Convert boxes or points from `src` mode to `dst` mode.""" + if isinstance(input, BaseInstance3DBoxes): + return Coord3DMode.convert_box(input, src, dst, rt_mat=rt_mat) + elif isinstance(input, BasePoints): + return Coord3DMode.convert_point(input, src, dst, rt_mat=rt_mat) + else: + raise NotImplementedError + + @staticmethod + def convert_box(box, src, dst, rt_mat=None): + """Convert boxes from `src` mode to `dst` mode. + + Args: + box (tuple | list | np.ndarray | + torch.Tensor | BaseInstance3DBoxes): + Can be a k-tuple, k-list or an Nxk array/tensor, where k = 7. + src (:obj:`CoordMode`): The src Box mode. + dst (:obj:`CoordMode`): The target Box mode. + rt_mat (np.ndarray | torch.Tensor): The rotation and translation + matrix between different coordinates. Defaults to None. + The conversion from `src` coordinates to `dst` coordinates + usually comes along the change of sensors, e.g., from camera + to LiDAR. This requires a transformation matrix. + + Returns: + (tuple | list | np.ndarray | torch.Tensor | BaseInstance3DBoxes): \ + The converted box of the same type. + """ + if src == dst: + return box + + is_numpy = isinstance(box, np.ndarray) + is_Instance3DBoxes = isinstance(box, BaseInstance3DBoxes) + single_box = isinstance(box, (list, tuple)) + if single_box: + assert len(box) >= 7, ( + 'CoordMode.convert takes either a k-tuple/list or ' + 'an Nxk array/tensor, where k >= 7') + arr = torch.tensor(box)[None, :] + else: + # avoid modifying the input box + if is_numpy: + arr = torch.from_numpy(np.asarray(box)).clone() + elif is_Instance3DBoxes: + arr = box.tensor.clone() + else: + arr = box.clone() + + # convert box from `src` mode to `dst` mode. + x_size, y_size, z_size = arr[..., 3:4], arr[..., 4:5], arr[..., 5:6] + if src == Coord3DMode.LIDAR and dst == Coord3DMode.CAM: + if rt_mat is None: + rt_mat = arr.new_tensor([[0, -1, 0], [0, 0, -1], [1, 0, 0]]) + xyz_size = torch.cat([y_size, z_size, x_size], dim=-1) + elif src == Coord3DMode.CAM and dst == Coord3DMode.LIDAR: + if rt_mat is None: + rt_mat = arr.new_tensor([[0, 0, 1], [-1, 0, 0], [0, -1, 0]]) + xyz_size = torch.cat([z_size, x_size, y_size], dim=-1) + elif src == Coord3DMode.DEPTH and dst == Coord3DMode.CAM: + if rt_mat is None: + rt_mat = arr.new_tensor([[1, 0, 0], [0, 0, 1], [0, -1, 0]]) + xyz_size = torch.cat([x_size, z_size, y_size], dim=-1) + elif src == Coord3DMode.CAM and dst == Coord3DMode.DEPTH: + if rt_mat is None: + rt_mat = arr.new_tensor([[1, 0, 0], [0, 0, -1], [0, 1, 0]]) + xyz_size = torch.cat([x_size, z_size, y_size], dim=-1) + elif src == Coord3DMode.LIDAR and dst == Coord3DMode.DEPTH: + if rt_mat is None: + rt_mat = arr.new_tensor([[0, -1, 0], [1, 0, 0], [0, 0, 1]]) + xyz_size = torch.cat([y_size, x_size, z_size], dim=-1) + elif src == Coord3DMode.DEPTH and dst == Coord3DMode.LIDAR: + if rt_mat is None: + rt_mat = arr.new_tensor([[0, 1, 0], [-1, 0, 0], [0, 0, 1]]) + xyz_size = torch.cat([y_size, x_size, z_size], dim=-1) + else: + raise NotImplementedError( + f'Conversion from Coord3DMode {src} to {dst} ' + 'is not supported yet') + + if not isinstance(rt_mat, torch.Tensor): + rt_mat = arr.new_tensor(rt_mat) + if rt_mat.size(1) == 4: + extended_xyz = torch.cat( + [arr[:, :3], arr.new_ones(arr.size(0), 1)], dim=-1) + xyz = extended_xyz @ rt_mat.t() + else: + xyz = arr[:, :3] @ rt_mat.t() + + remains = arr[..., 6:] + arr = torch.cat([xyz[:, :3], xyz_size, remains], dim=-1) + + # convert arr to the original type + original_type = type(box) + if single_box: + return original_type(arr.flatten().tolist()) + if is_numpy: + return arr.numpy() + elif is_Instance3DBoxes: + if dst == Coord3DMode.CAM: + target_type = CameraInstance3DBoxes + elif dst == Coord3DMode.LIDAR: + target_type = LiDARInstance3DBoxes + elif dst == Coord3DMode.DEPTH: + target_type = DepthInstance3DBoxes + else: + raise NotImplementedError( + f'Conversion to {dst} through {original_type}' + ' is not supported yet') + return target_type( + arr, box_dim=arr.size(-1), with_yaw=box.with_yaw) + else: + return arr + + @staticmethod + def convert_point(point, src, dst, rt_mat=None): + """Convert points from `src` mode to `dst` mode. + + Args: + point (tuple | list | np.ndarray | + torch.Tensor | BasePoints): + Can be a k-tuple, k-list or an Nxk array/tensor. + src (:obj:`CoordMode`): The src Point mode. + dst (:obj:`CoordMode`): The target Point mode. + rt_mat (np.ndarray | torch.Tensor): The rotation and translation + matrix between different coordinates. Defaults to None. + The conversion from `src` coordinates to `dst` coordinates + usually comes along the change of sensors, e.g., from camera + to LiDAR. This requires a transformation matrix. + + Returns: + (tuple | list | np.ndarray | torch.Tensor | BasePoints): \ + The converted point of the same type. + """ + if src == dst: + return point + + is_numpy = isinstance(point, np.ndarray) + is_InstancePoints = isinstance(point, BasePoints) + single_point = isinstance(point, (list, tuple)) + if single_point: + assert len(point) >= 3, ( + 'CoordMode.convert takes either a k-tuple/list or ' + 'an Nxk array/tensor, where k >= 3') + arr = torch.tensor(point)[None, :] + else: + # avoid modifying the input point + if is_numpy: + arr = torch.from_numpy(np.asarray(point)).clone() + elif is_InstancePoints: + arr = point.tensor.clone() + else: + arr = point.clone() + + # convert point from `src` mode to `dst` mode. + # TODO: LIDAR + # only implemented provided Rt matrix in cam-depth conversion + if src == Coord3DMode.LIDAR and dst == Coord3DMode.CAM: + if rt_mat is None: + rt_mat = arr.new_tensor([[0, -1, 0], [0, 0, -1], [1, 0, 0]]) + elif src == Coord3DMode.CAM and dst == Coord3DMode.LIDAR: + if rt_mat is None: + rt_mat = arr.new_tensor([[0, 0, 1], [-1, 0, 0], [0, -1, 0]]) + elif src == Coord3DMode.DEPTH and dst == Coord3DMode.CAM: + if rt_mat is None: + rt_mat = arr.new_tensor([[1, 0, 0], [0, 0, -1], [0, 1, 0]]) + elif src == Coord3DMode.CAM and dst == Coord3DMode.DEPTH: + if rt_mat is None: + rt_mat = arr.new_tensor([[1, 0, 0], [0, 0, 1], [0, -1, 0]]) + elif src == Coord3DMode.LIDAR and dst == Coord3DMode.DEPTH: + if rt_mat is None: + rt_mat = arr.new_tensor([[0, -1, 0], [1, 0, 0], [0, 0, 1]]) + elif src == Coord3DMode.DEPTH and dst == Coord3DMode.LIDAR: + if rt_mat is None: + rt_mat = arr.new_tensor([[0, 1, 0], [-1, 0, 0], [0, 0, 1]]) + else: + raise NotImplementedError( + f'Conversion from Coord3DMode {src} to {dst} ' + 'is not supported yet') + + if not isinstance(rt_mat, torch.Tensor): + rt_mat = arr.new_tensor(rt_mat) + if rt_mat.size(1) == 4: + extended_xyz = torch.cat( + [arr[:, :3], arr.new_ones(arr.size(0), 1)], dim=-1) + xyz = extended_xyz @ rt_mat.t() + else: + xyz = arr[:, :3] @ rt_mat.t() + + remains = arr[:, 3:] + arr = torch.cat([xyz[:, :3], remains], dim=-1) + + # convert arr to the original type + original_type = type(point) + if single_point: + return original_type(arr.flatten().tolist()) + if is_numpy: + return arr.numpy() + elif is_InstancePoints: + if dst == Coord3DMode.CAM: + target_type = CameraPoints + elif dst == Coord3DMode.LIDAR: + target_type = LiDARPoints + elif dst == Coord3DMode.DEPTH: + target_type = DepthPoints + else: + raise NotImplementedError( + f'Conversion to {dst} through {original_type}' + ' is not supported yet') + return target_type( + arr, + points_dim=arr.size(-1), + attribute_dims=point.attribute_dims) + else: + return arr diff --git a/mmcv/core/bbox/structures/depth_box3d.py b/mmcv/core/bbox/structures/depth_box3d.py new file mode 100644 index 0000000..058e975 --- /dev/null +++ b/mmcv/core/bbox/structures/depth_box3d.py @@ -0,0 +1,343 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch + +from mmcv.core.points import BasePoints +from mmcv.ops.roiaware_pool3d import points_in_boxes_batch +from .base_box3d import BaseInstance3DBoxes +from .utils import limit_period, rotation_3d_in_axis + + +class DepthInstance3DBoxes(BaseInstance3DBoxes): + """3D boxes of instances in Depth coordinates. + + Coordinates in Depth: + + .. code-block:: none + + up z y front (yaw=-0.5*pi) + ^ ^ + | / + | / + 0 ------> x right (yaw=0) + + The relative coordinate of bottom center in a Depth box is (0.5, 0.5, 0), + and the yaw is around the z axis, thus the rotation axis=2. + The yaw is 0 at the positive direction of x axis, and decreases from + the positive direction of x to the positive direction of y. + Also note that rotation of DepthInstance3DBoxes is counterclockwise, + which is reverse to the definition of the yaw angle (clockwise). + + A refactor is ongoing to make the three coordinate systems + easier to understand and convert between each other. + + Attributes: + tensor (torch.Tensor): Float matrix of N x box_dim. + box_dim (int): Integer indicates the dimension of a box + Each row is (x, y, z, x_size, y_size, z_size, yaw, ...). + with_yaw (bool): If True, the value of yaw will be set to 0 as minmax + boxes. + """ + + @property + def gravity_center(self): + """torch.Tensor: A tensor with center of each box.""" + bottom_center = self.bottom_center + gravity_center = torch.zeros_like(bottom_center) + gravity_center[:, :2] = bottom_center[:, :2] + gravity_center[:, 2] = bottom_center[:, 2] + self.tensor[:, 5] * 0.5 + return gravity_center + + @property + def corners(self): + """torch.Tensor: Coordinates of corners of all the boxes + in shape (N, 8, 3). + + Convert the boxes to corners in clockwise order, in form of + ``(x0y0z0, x0y0z1, x0y1z1, x0y1z0, x1y0z0, x1y0z1, x1y1z1, x1y1z0)`` + + .. code-block:: none + + up z + front y ^ + / | + / | + (x0, y1, z1) + ----------- + (x1, y1, z1) + /| / | + / | / | + (x0, y0, z1) + ----------- + + (x1, y1, z0) + | / . | / + | / origin | / + (x0, y0, z0) + ----------- + --------> right x + (x1, y0, z0) + """ + # TODO: rotation_3d_in_axis function do not support + # empty tensor currently. + assert len(self.tensor) != 0 + dims = self.dims + corners_norm = torch.from_numpy( + np.stack(np.unravel_index(np.arange(8), [2] * 3), axis=1)).to( + device=dims.device, dtype=dims.dtype) + + corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]] + # use relative origin (0.5, 0.5, 0) + corners_norm = corners_norm - dims.new_tensor([0.5, 0.5, 0]) + corners = dims.view([-1, 1, 3]) * corners_norm.reshape([1, 8, 3]) + + # rotate around z axis + corners = rotation_3d_in_axis(corners, self.tensor[:, 6], axis=2) + corners += self.tensor[:, :3].view(-1, 1, 3) + return corners + + @property + def bev(self): + """torch.Tensor: A n x 5 tensor of 2D BEV box of each box + in XYWHR format.""" + return self.tensor[:, [0, 1, 3, 4, 6]] + + @property + def nearest_bev(self): + """torch.Tensor: A tensor of 2D BEV box of each box + without rotation.""" + # Obtain BEV boxes with rotation in XYWHR format + bev_rotated_boxes = self.bev + # convert the rotation to a valid range + rotations = bev_rotated_boxes[:, -1] + normed_rotations = torch.abs(limit_period(rotations, 0.5, np.pi)) + + # find the center of boxes + conditions = (normed_rotations > np.pi / 4)[..., None] + bboxes_xywh = torch.where(conditions, bev_rotated_boxes[:, + [0, 1, 3, 2]], + bev_rotated_boxes[:, :4]) + + centers = bboxes_xywh[:, :2] + dims = bboxes_xywh[:, 2:] + bev_boxes = torch.cat([centers - dims / 2, centers + dims / 2], dim=-1) + return bev_boxes + + def rotate(self, angle, points=None): + """Rotate boxes with points (optional) with the given angle or \ + rotation matrix. + + Args: + angle (float | torch.Tensor | np.ndarray): + Rotation angle or rotation matrix. + points (torch.Tensor, numpy.ndarray, :obj:`BasePoints`, optional): + Points to rotate. Defaults to None. + + Returns: + tuple or None: When ``points`` is None, the function returns \ + None, otherwise it returns the rotated points and the \ + rotation matrix ``rot_mat_T``. + """ + if not isinstance(angle, torch.Tensor): + angle = self.tensor.new_tensor(angle) + assert angle.shape == torch.Size([3, 3]) or angle.numel() == 1, \ + f'invalid rotation angle shape {angle.shape}' + + if angle.numel() == 1: + rot_sin = torch.sin(angle) + rot_cos = torch.cos(angle) + rot_mat_T = self.tensor.new_tensor([[rot_cos, -rot_sin, 0], + [rot_sin, rot_cos, 0], + [0, 0, 1]]).T + else: + rot_mat_T = angle.T + rot_sin = rot_mat_T[0, 1] + rot_cos = rot_mat_T[0, 0] + angle = np.arctan2(rot_sin, rot_cos) + + self.tensor[:, 0:3] = self.tensor[:, 0:3] @ rot_mat_T + if self.with_yaw: + self.tensor[:, 6] -= angle + else: + corners_rot = self.corners @ rot_mat_T + new_x_size = corners_rot[..., 0].max( + dim=1, keepdim=True)[0] - corners_rot[..., 0].min( + dim=1, keepdim=True)[0] + new_y_size = corners_rot[..., 1].max( + dim=1, keepdim=True)[0] - corners_rot[..., 1].min( + dim=1, keepdim=True)[0] + self.tensor[:, 3:5] = torch.cat((new_x_size, new_y_size), dim=-1) + + if points is not None: + if isinstance(points, torch.Tensor): + points[:, :3] = points[:, :3] @ rot_mat_T + elif isinstance(points, np.ndarray): + rot_mat_T = rot_mat_T.numpy() + points[:, :3] = np.dot(points[:, :3], rot_mat_T) + elif isinstance(points, BasePoints): + # anti-clockwise + points.rotate(angle) + else: + raise ValueError + return points, rot_mat_T + + def flip(self, bev_direction='horizontal', points=None): + """Flip the boxes in BEV along given BEV direction. + + In Depth coordinates, it flips x (horizontal) or y (vertical) axis. + + Args: + bev_direction (str): Flip direction (horizontal or vertical). + points (torch.Tensor, numpy.ndarray, :obj:`BasePoints`, None): + Points to flip. Defaults to None. + + Returns: + torch.Tensor, numpy.ndarray or None: Flipped points. + """ + assert bev_direction in ('horizontal', 'vertical') + if bev_direction == 'horizontal': + self.tensor[:, 0::7] = -self.tensor[:, 0::7] + if self.with_yaw: + self.tensor[:, 6] = -self.tensor[:, 6] + np.pi + elif bev_direction == 'vertical': + self.tensor[:, 1::7] = -self.tensor[:, 1::7] + if self.with_yaw: + self.tensor[:, 6] = -self.tensor[:, 6] + + if points is not None: + assert isinstance(points, (torch.Tensor, np.ndarray, BasePoints)) + if isinstance(points, (torch.Tensor, np.ndarray)): + if bev_direction == 'horizontal': + points[:, 0] = -points[:, 0] + elif bev_direction == 'vertical': + points[:, 1] = -points[:, 1] + elif isinstance(points, BasePoints): + points.flip(bev_direction) + return points + + def in_range_bev(self, box_range): + """Check whether the boxes are in the given range. + + Args: + box_range (list | torch.Tensor): The range of box + (x_min, y_min, x_max, y_max). + + Note: + In the original implementation of SECOND, checking whether + a box in the range checks whether the points are in a convex + polygon, we try to reduce the burdun for simpler cases. + + Returns: + torch.Tensor: Indicating whether each box is inside \ + the reference range. + """ + in_range_flags = ((self.tensor[:, 0] > box_range[0]) + & (self.tensor[:, 1] > box_range[1]) + & (self.tensor[:, 0] < box_range[2]) + & (self.tensor[:, 1] < box_range[3])) + return in_range_flags + + def convert_to(self, dst, rt_mat=None): + """Convert self to ``dst`` mode. + + Args: + dst (:obj:`Box3DMode`): The target Box mode. + rt_mat (np.ndarray | torch.Tensor): The rotation and translation + matrix between different coordinates. Defaults to None. + The conversion from ``src`` coordinates to ``dst`` coordinates + usually comes along the change of sensors, e.g., from camera + to LiDAR. This requires a transformation matrix. + + Returns: + :obj:`DepthInstance3DBoxes`: \ + The converted box of the same type in the ``dst`` mode. + """ + from .box_3d_mode import Box3DMode + return Box3DMode.convert( + box=self, src=Box3DMode.DEPTH, dst=dst, rt_mat=rt_mat) + + def points_in_boxes(self, points): + """Find points that are in boxes (CUDA). + + Args: + points (torch.Tensor): Points in shape [1, M, 3] or [M, 3], \ + 3 dimensions are [x, y, z] in LiDAR coordinate. + + Returns: + torch.Tensor: The index of boxes each point lies in with shape \ + of (B, M, T). + """ + from .box_3d_mode import Box3DMode + + # to lidar + points_lidar = points.clone() + points_lidar = points_lidar[..., [1, 0, 2]] + points_lidar[..., 1] *= -1 + if points.dim() == 2: + points_lidar = points_lidar.unsqueeze(0) + else: + assert points.dim() == 3 and points_lidar.shape[0] == 1 + + boxes_lidar = self.convert_to(Box3DMode.LIDAR).tensor + boxes_lidar = boxes_lidar.to(points.device).unsqueeze(0) + box_idxs_of_pts = points_in_boxes_batch(points_lidar, boxes_lidar) + + return box_idxs_of_pts.squeeze(0) + + def enlarged_box(self, extra_width): + """Enlarge the length, width and height boxes. + + Args: + extra_width (float | torch.Tensor): Extra width to enlarge the box. + + Returns: + :obj:`LiDARInstance3DBoxes`: Enlarged boxes. + """ + enlarged_boxes = self.tensor.clone() + enlarged_boxes[:, 3:6] += extra_width * 2 + # bottom center z minus extra_width + enlarged_boxes[:, 2] -= extra_width + return self.new_box(enlarged_boxes) + + def get_surface_line_center(self): + """Compute surface and line center of bounding boxes. + + Returns: + torch.Tensor: Surface and line center of bounding boxes. + """ + obj_size = self.dims + center = self.gravity_center.view(-1, 1, 3) + batch_size = center.shape[0] + + rot_sin = torch.sin(-self.yaw) + rot_cos = torch.cos(-self.yaw) + rot_mat_T = self.yaw.new_zeros(tuple(list(self.yaw.shape) + [3, 3])) + rot_mat_T[..., 0, 0] = rot_cos + rot_mat_T[..., 0, 1] = -rot_sin + rot_mat_T[..., 1, 0] = rot_sin + rot_mat_T[..., 1, 1] = rot_cos + rot_mat_T[..., 2, 2] = 1 + + # Get the object surface center + offset = obj_size.new_tensor([[0, 0, 1], [0, 0, -1], [0, 1, 0], + [0, -1, 0], [1, 0, 0], [-1, 0, 0]]) + offset = offset.view(1, 6, 3) / 2 + surface_3d = (offset * + obj_size.view(batch_size, 1, 3).repeat(1, 6, 1)).reshape( + -1, 3) + + # Get the object line center + offset = obj_size.new_tensor([[1, 0, 1], [-1, 0, 1], [0, 1, 1], + [0, -1, 1], [1, 0, -1], [-1, 0, -1], + [0, 1, -1], [0, -1, -1], [1, 1, 0], + [1, -1, 0], [-1, 1, 0], [-1, -1, 0]]) + offset = offset.view(1, 12, 3) / 2 + + line_3d = (offset * + obj_size.view(batch_size, 1, 3).repeat(1, 12, 1)).reshape( + -1, 3) + + surface_rot = rot_mat_T.repeat(6, 1, 1) + surface_3d = torch.matmul( + surface_3d.unsqueeze(-2), surface_rot.transpose(2, 1)).squeeze(-2) + surface_center = center.repeat(1, 6, 1).reshape(-1, 3) + surface_3d + + line_rot = rot_mat_T.repeat(12, 1, 1) + line_3d = torch.matmul( + line_3d.unsqueeze(-2), line_rot.transpose(2, 1)).squeeze(-2) + line_center = center.repeat(1, 12, 1).reshape(-1, 3) + line_3d + + return surface_center, line_center diff --git a/mmcv/core/bbox/structures/lidar_box3d.py b/mmcv/core/bbox/structures/lidar_box3d.py new file mode 100644 index 0000000..f7e7ec2 --- /dev/null +++ b/mmcv/core/bbox/structures/lidar_box3d.py @@ -0,0 +1,270 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch + +from mmcv.core.points import BasePoints +from mmcv.ops.roiaware_pool3d import points_in_boxes_gpu +from .base_box3d import BaseInstance3DBoxes +from .utils import limit_period, rotation_3d_in_axis + + +class LiDARInstance3DBoxes(BaseInstance3DBoxes): + """3D boxes of instances in LIDAR coordinates. + + Coordinates in LiDAR: + + .. code-block:: none + + up z x front (yaw=-0.5*pi) + ^ ^ + | / + | / + (yaw=-pi) left y <------ 0 -------- (yaw=0) + + The relative coordinate of bottom center in a LiDAR box is (0.5, 0.5, 0), + and the yaw is around the z axis, thus the rotation axis=2. + The yaw is 0 at the negative direction of y axis, and decreases from + the negative direction of y to the positive direction of x. + + A refactor is ongoing to make the three coordinate systems + easier to understand and convert between each other. + + Attributes: + tensor (torch.Tensor): Float matrix of N x box_dim. + box_dim (int): Integer indicating the dimension of a box. + Each row is (x, y, z, x_size, y_size, z_size, yaw, ...). + with_yaw (bool): If True, the value of yaw will be set to 0 as minmax + boxes. + """ + + @property + def gravity_center(self): + """torch.Tensor: A tensor with center of each box.""" + bottom_center = self.bottom_center + gravity_center = torch.zeros_like(bottom_center) + gravity_center[:, :2] = bottom_center[:, :2] + gravity_center[:, 2] = bottom_center[:, 2] + self.tensor[:, 5] * 0.5 + return gravity_center + + @property + def corners(self): + """torch.Tensor: Coordinates of corners of all the boxes + in shape (N, 8, 3). + + Convert the boxes to corners in clockwise order, in form of + ``(x0y0z0, x0y0z1, x0y1z1, x0y1z0, x1y0z0, x1y0z1, x1y1z1, x1y1z0)`` + + .. code-block:: none + + up z + front x ^ + / | + / | + (x1, y0, z1) + ----------- + (x1, y1, z1) + /| / | + / | / | + (x0, y0, z1) + ----------- + + (x1, y1, z0) + | / . | / + | / origin | / + left y<-------- + ----------- + (x0, y1, z0) + (x0, y0, z0) + """ + # TODO: rotation_3d_in_axis function do not support + # empty tensor currently. + assert len(self.tensor) != 0 + dims = self.dims + corners_norm = torch.from_numpy( + np.stack(np.unravel_index(np.arange(8), [2] * 3), axis=1)).to( + device=dims.device, dtype=dims.dtype) + + corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]] + # use relative origin [0.5, 0.5, 0] + corners_norm = corners_norm - dims.new_tensor([0.5, 0.5, 0]) + corners = dims.view([-1, 1, 3]) * corners_norm.reshape([1, 8, 3]) + + # rotate around z axis + corners = rotation_3d_in_axis(corners, self.tensor[:, 6], axis=2) + corners += self.tensor[:, :3].view(-1, 1, 3) + return corners + + @property + def bev(self): + """torch.Tensor: 2D BEV box of each box with rotation + in XYWHR format.""" + return self.tensor[:, [0, 1, 3, 4, 6]] + + @property + def nearest_bev(self): + """torch.Tensor: A tensor of 2D BEV box of each box + without rotation.""" + # Obtain BEV boxes with rotation in XYWHR format + bev_rotated_boxes = self.bev + # convert the rotation to a valid range + rotations = bev_rotated_boxes[:, -1] + normed_rotations = torch.abs(limit_period(rotations, 0.5, np.pi)) + + # find the center of boxes + conditions = (normed_rotations > np.pi / 4)[..., None] + bboxes_xywh = torch.where(conditions, bev_rotated_boxes[:, + [0, 1, 3, 2]], + bev_rotated_boxes[:, :4]) + + centers = bboxes_xywh[:, :2] + dims = bboxes_xywh[:, 2:] + bev_boxes = torch.cat([centers - dims / 2, centers + dims / 2], dim=-1) + return bev_boxes + + def rotate(self, angle, points=None): + """Rotate boxes with points (optional) with the given angle or \ + rotation matrix. + + Args: + angles (float | torch.Tensor | np.ndarray): + Rotation angle or rotation matrix. + points (torch.Tensor, numpy.ndarray, :obj:`BasePoints`, optional): + Points to rotate. Defaults to None. + + Returns: + tuple or None: When ``points`` is None, the function returns \ + None, otherwise it returns the rotated points and the \ + rotation matrix ``rot_mat_T``. + """ + if not isinstance(angle, torch.Tensor): + angle = self.tensor.new_tensor(angle) + assert angle.shape == torch.Size([3, 3]) or angle.numel() == 1, \ + f'invalid rotation angle shape {angle.shape}' + + if angle.numel() == 1: + rot_sin = torch.sin(angle) + rot_cos = torch.cos(angle) + rot_mat_T = self.tensor.new_tensor([[rot_cos, -rot_sin, 0], + [rot_sin, rot_cos, 0], + [0, 0, 1]]) + else: + rot_mat_T = angle + rot_sin = rot_mat_T[1, 0] + rot_cos = rot_mat_T[0, 0] + angle = np.arctan2(rot_sin, rot_cos) + + self.tensor[:, :3] = self.tensor[:, :3] @ rot_mat_T + self.tensor[:, 6] += angle + + if self.tensor.shape[1] == 9: + # rotate velo vector + self.tensor[:, 7:9] = self.tensor[:, 7:9] @ rot_mat_T[:2, :2] + + if points is not None: + if isinstance(points, torch.Tensor): + points[:, :3] = points[:, :3] @ rot_mat_T + elif isinstance(points, np.ndarray): + rot_mat_T = rot_mat_T.numpy() + points[:, :3] = np.dot(points[:, :3], rot_mat_T) + elif isinstance(points, BasePoints): + # clockwise + points.rotate(-angle) + else: + raise ValueError + return points, rot_mat_T + + def flip(self, bev_direction='horizontal', points=None): + """Flip the boxes in BEV along given BEV direction. + + In LIDAR coordinates, it flips the y (horizontal) or x (vertical) axis. + + Args: + bev_direction (str): Flip direction (horizontal or vertical). + points (torch.Tensor, numpy.ndarray, :obj:`BasePoints`, None): + Points to flip. Defaults to None. + + Returns: + torch.Tensor, numpy.ndarray or None: Flipped points. + """ + assert bev_direction in ('horizontal', 'vertical') + if bev_direction == 'horizontal': + self.tensor[:, 1::7] = -self.tensor[:, 1::7] + if self.with_yaw: + self.tensor[:, 6] = -self.tensor[:, 6] + np.pi + elif bev_direction == 'vertical': + self.tensor[:, 0::7] = -self.tensor[:, 0::7] + if self.with_yaw: + self.tensor[:, 6] = -self.tensor[:, 6] + + if points is not None: + assert isinstance(points, (torch.Tensor, np.ndarray, BasePoints)) + if isinstance(points, (torch.Tensor, np.ndarray)): + if bev_direction == 'horizontal': + points[:, 1] = -points[:, 1] + elif bev_direction == 'vertical': + points[:, 0] = -points[:, 0] + elif isinstance(points, BasePoints): + points.flip(bev_direction) + return points + + def in_range_bev(self, box_range): + """Check whether the boxes are in the given range. + + Args: + box_range (list | torch.Tensor): the range of box + (x_min, y_min, x_max, y_max) + + Note: + The original implementation of SECOND checks whether boxes in + a range by checking whether the points are in a convex + polygon, we reduce the burden for simpler cases. + + Returns: + torch.Tensor: Whether each box is inside the reference range. + """ + in_range_flags = ((self.tensor[:, 0] > box_range[0]) + & (self.tensor[:, 1] > box_range[1]) + & (self.tensor[:, 0] < box_range[2]) + & (self.tensor[:, 1] < box_range[3])) + return in_range_flags + + def convert_to(self, dst, rt_mat=None): + """Convert self to ``dst`` mode. + + Args: + dst (:obj:`Box3DMode`): the target Box mode + rt_mat (np.ndarray | torch.Tensor): The rotation and translation + matrix between different coordinates. Defaults to None. + The conversion from ``src`` coordinates to ``dst`` coordinates + usually comes along the change of sensors, e.g., from camera + to LiDAR. This requires a transformation matrix. + + Returns: + :obj:`BaseInstance3DBoxes`: \ + The converted box of the same type in the ``dst`` mode. + """ + from .box_3d_mode import Box3DMode + return Box3DMode.convert( + box=self, src=Box3DMode.LIDAR, dst=dst, rt_mat=rt_mat) + + def enlarged_box(self, extra_width): + """Enlarge the length, width and height boxes. + + Args: + extra_width (float | torch.Tensor): Extra width to enlarge the box. + + Returns: + :obj:`LiDARInstance3DBoxes`: Enlarged boxes. + """ + enlarged_boxes = self.tensor.clone() + enlarged_boxes[:, 3:6] += extra_width * 2 + # bottom center z minus extra_width + enlarged_boxes[:, 2] -= extra_width + return self.new_box(enlarged_boxes) + + def points_in_boxes(self, points): + """Find the box which the points are in. + + Args: + points (torch.Tensor): Points in shape (N, 3). + + Returns: + torch.Tensor: The index of box where each point are in. + """ + box_idx = points_in_boxes_gpu( + points.unsqueeze(0), + self.tensor.unsqueeze(0).to(points.device)).squeeze(0) + return box_idx diff --git a/mmcv/core/bbox/structures/nuscenes_box.py b/mmcv/core/bbox/structures/nuscenes_box.py new file mode 100644 index 0000000..05200a0 --- /dev/null +++ b/mmcv/core/bbox/structures/nuscenes_box.py @@ -0,0 +1,458 @@ +# nuScenes dev-kit. +# Code written by Oscar Beijbom, 2018. + +import copy +from typing import Tuple, List + +import cv2 +import numpy as np +import matplotlib.pyplot as plt +from matplotlib.axes import Axes +from matplotlib.collections import LineCollection +from pyquaternion import Quaternion +from nuscenes.utils.geometry_utils import view_points +from nuscenes.eval.common.data_classes import EvalBox +from nuscenes.eval.detection.constants import DETECTION_NAMES, ATTRIBUTE_NAMES + + +def color_map(data, cmap): + """数值映射为颜色""" + + dmin, dmax = np.nanmin(data), np.nanmax(data) + cmo = plt.cm.get_cmap(cmap) + cs, k = list(), 256/cmo.N + + for i in range(cmo.N): + c = cmo(i) + for j in range(int(i*k), int((i+1)*k)): + cs.append(c) + cs = np.array(cs) + data = np.uint8(255*(data-dmin)/(dmax-dmin)) + + return cs[data] + +class CustomNuscenesBox: + """ Simple data class representing a 3d box including, label, score and velocity. """ + + def __init__(self, + center: List[float], + size: List[float], + orientation: Quaternion, + fut_trajs: List[float], + label: int = np.nan, + score: float = np.nan, + velocity: Tuple = (np.nan, np.nan, np.nan), + name: str = None, + token: str = None): + """ + :param center: Center of box given as x, y, z. + :param size: Size of box in width, length, height. + :param orientation: Box orientation. + :param label: Integer label, optional. + :param score: Classification score, optional. + :param velocity: Box velocity in x, y, z direction. + :param name: Box name, optional. Can be used e.g. for denote category name. + :param token: Unique string identifier from DB. + """ + assert not np.any(np.isnan(center)) + assert not np.any(np.isnan(size)) + assert len(center) == 3 + assert len(size) == 3 + assert type(orientation) == Quaternion + + self.center = np.array(center) + self.wlh = np.array(size) + self.orientation = orientation + self.label = int(label) if not np.isnan(label) else label + self.score = float(score) if not np.isnan(score) else score + self.velocity = np.array(velocity) + self.name = name + self.token = token + self.fut_trajs = np.array(fut_trajs) + + def __eq__(self, other): + center = np.allclose(self.center, other.center) + wlh = np.allclose(self.wlh, other.wlh) + orientation = np.allclose(self.orientation.elements, other.orientation.elements) + label = (self.label == other.label) or (np.isnan(self.label) and np.isnan(other.label)) + score = (self.score == other.score) or (np.isnan(self.score) and np.isnan(other.score)) + vel = (np.allclose(self.velocity, other.velocity) or + (np.all(np.isnan(self.velocity)) and np.all(np.isnan(other.velocity)))) + + return center and wlh and orientation and label and score and vel + + def __repr__(self): + repr_str = 'label: {}, score: {:.2f}, xyz: [{:.2f}, {:.2f}, {:.2f}], wlh: [{:.2f}, {:.2f}, {:.2f}], ' \ + 'rot axis: [{:.2f}, {:.2f}, {:.2f}], ang(degrees): {:.2f}, ang(rad): {:.2f}, ' \ + 'vel: {:.2f}, {:.2f}, {:.2f}, name: {}, token: {}' + + return repr_str.format(self.label, self.score, self.center[0], self.center[1], self.center[2], self.wlh[0], + self.wlh[1], self.wlh[2], self.orientation.axis[0], self.orientation.axis[1], + self.orientation.axis[2], self.orientation.degrees, self.orientation.radians, + self.velocity[0], self.velocity[1], self.velocity[2], self.name, self.token) + + @property + def rotation_matrix(self) -> np.ndarray: + """ + Return a rotation matrix. + :return: . The box's rotation matrix. + """ + return self.orientation.rotation_matrix + + def translate(self, x: np.ndarray) -> None: + """ + Applies a translation. + :param x: . Translation in x, y, z direction. + """ + self.center += x + + def rotate(self, quaternion: Quaternion) -> None: + """ + Rotates box. + :param quaternion: Rotation to apply. + """ + self.center = np.dot(quaternion.rotation_matrix, self.center) + self.orientation = quaternion * self.orientation + self.velocity = np.dot(quaternion.rotation_matrix, self.velocity) + + def corners(self, wlh_factor: float = 1.0) -> np.ndarray: + """ + Returns the bounding box corners. + :param wlh_factor: Multiply w, l, h by a factor to scale the box. + :return: . First four corners are the ones facing forward. + The last four are the ones facing backwards. + """ + w, l, h = self.wlh * wlh_factor + + # 3D bounding box corners. (Convention: x points forward, y to the left, z up.) + x_corners = l / 2 * np.array([1, 1, 1, 1, -1, -1, -1, -1]) + y_corners = w / 2 * np.array([1, -1, -1, 1, 1, -1, -1, 1]) + z_corners = h / 2 * np.array([1, 1, -1, -1, 1, 1, -1, -1]) + corners = np.vstack((x_corners, y_corners, z_corners)) + + # Rotate + corners = np.dot(self.orientation.rotation_matrix, corners) + + # Translate + x, y, z = self.center + corners[0, :] = corners[0, :] + x + corners[1, :] = corners[1, :] + y + corners[2, :] = corners[2, :] + z + + return corners + + def bottom_corners(self) -> np.ndarray: + """ + Returns the four bottom corners. + :return: . Bottom corners. First two face forward, last two face backwards. + """ + return self.corners()[:, [2, 3, 7, 6]] + + def render(self, + axis: Axes, + view: np.ndarray = np.eye(3), + normalize: bool = False, + colors: Tuple = ('b', 'r', 'k'), + linewidth: float = 2, + box_idx=None, + alpha=0.5) -> None: + """ + Renders the box in the provided Matplotlib axis. + :param axis: Axis onto which the box should be drawn. + :param view: . Define a projection in needed (e.g. for drawing projection in an image). + :param normalize: Whether to normalize the remaining coordinate. + :param colors: (: 3). Valid Matplotlib colors ( or normalized RGB tuple) for front, + back and sides. + :param linewidth: Width in pixel of the box sides. + """ + corners = view_points(self.corners(), view, normalize=normalize)[:2, :] + + def draw_rect(selected_corners, color, alpha): + prev = selected_corners[-1] + for corner in selected_corners: + axis.plot([prev[0], corner[0]], [prev[1], corner[1]], color=color, linewidth=linewidth, alpha=alpha) + prev = corner + + # Draw the sides + for i in range(4): + axis.plot([corners.T[i][0], corners.T[i + 4][0]], + [corners.T[i][1], corners.T[i + 4][1]], + color=colors[2], linewidth=linewidth, alpha=alpha) + + # Draw front (first 4 corners) and rear (last 4 corners) rectangles(3d)/lines(2d) + draw_rect(corners.T[:4], colors[0], alpha) + draw_rect(corners.T[4:], colors[1], alpha) + + # Draw line indicating the front + center_bottom_forward = np.mean(corners.T[2:4], axis=0) + center_bottom = np.mean(corners.T[[2, 3, 7, 6]], axis=0) + axis.plot([center_bottom[0], center_bottom_forward[0]], + [center_bottom[1], center_bottom_forward[1]], + color=colors[0], linewidth=linewidth, alpha=alpha) + if box_idx is not None and center_bottom[0] > -35 and center_bottom[1] > -35 \ + and center_bottom[0] < 35 and center_bottom[1] < 35: + text = f'{box_idx}' + axis.text(center_bottom[0], center_bottom[1], text, ha='left', fontsize=5) + + def render_fut_trajs(self, + axis: Axes, + color: str = 'b', + linewidth: float = 1, + fut_ts: int = 6, + mode_idx=None) -> None: + """ + Renders the box in the provided Matplotlib axis. + :param axis: Axis onto which the box should be drawn. + :param view: . Define a projection in needed (e.g. for drawing projection in an image). + :param normalize: Whether to normalize the remaining coordinate. + :param colors: (: 3). Valid Matplotlib colors ( or normalized RGB tuple) for front, + back and sides. + :param linewidth: Width in pixel of the box sides. + """ + + fut_coords = self.fut_trajs.reshape((-1, fut_ts, 2)) + if mode_idx is not None: + fut_coords = fut_coords[[mode_idx]] + alpha = 0.8 + for i in range(fut_coords.shape[0]): + fut_coord = fut_coords[i] + fut_coord = fut_coord.cumsum(axis=-2) + fut_coord = fut_coord + self.center[:2] + if np.abs(fut_coord[-1] - self.center[:2]).max() >= 10: + if color == 'g': + axis.scatter(fut_coord[-1, 0], fut_coord[-1, 1], c=color, marker='*', s=70, alpha=alpha) + elif color == 'b': + axis.scatter(fut_coord[-1, 0], fut_coord[-1, 1], c=color, marker='o', s=20, alpha=alpha) + if mode_idx is None and fut_coord[-1, 0] > -35 and fut_coord[-1, 1] > -35 \ + and fut_coord[-1, 0] < 35 and fut_coord[-1, 1] < 35: + text = f'{i}' + axis.text(fut_coord[-1, 0], fut_coord[-1, 1], text, ha='left', fontsize=5) + axis.plot( + [self.center[0], fut_coord[0, 0]], + [self.center[1], fut_coord[0, 1]], + color=color, linewidth=linewidth, alpha=alpha + ) + for i in range(fut_coord.shape[0]-1): + axis.plot( + [fut_coord[i, 0], fut_coord[i+1, 0]], + [fut_coord[i, 1], fut_coord[i+1, 1]], + color=color, linewidth=linewidth, alpha=alpha + ) + + def render_fut_trajs_grad_color(self, + axis: Axes, + linewidth: float = 1, + linestyles='solid', + cmap='viridis', + fut_ts: int = 6, + alpha: int = 0.8, + mode_idx=None) -> None: + """ + Renders the box in the provided Matplotlib axis. + :param axis: Axis onto which the box should be drawn. + :param view: . Define a projection in needed (e.g. for drawing projection in an image). + :param normalize: Whether to normalize the remaining coordinate. + :param colors: (: 3). Valid Matplotlib colors ( or normalized RGB tuple) for front, + back and sides. + :param linewidth: Width in pixel of the box sides. + """ + + fut_coords = self.fut_trajs.reshape((-1, fut_ts, 2)) + if mode_idx is not None: + fut_coords = fut_coords[[mode_idx]] + + for i in range(fut_coords.shape[0]): + fut_coord = fut_coords[i] + fut_coord = fut_coord.cumsum(axis=-2) + fut_coord = fut_coord + self.center[:2] + fut_coord = np.concatenate((self.center[np.newaxis, :2], fut_coord), axis=0) + fut_coord_segments = np.stack((fut_coord[:-1], fut_coord[1:]), axis=1) + + fut_vecs = None + for j in range(fut_coord_segments.shape[0]): + fut_vec_j = fut_coord_segments[j] + x_linspace = np.linspace(fut_vec_j[0, 0], fut_vec_j[1, 0], 51) + y_linspace = np.linspace(fut_vec_j[0, 1], fut_vec_j[1, 1], 51) + xy = np.stack((x_linspace, y_linspace), axis=1) + xy = np.stack((xy[:-1], xy[1:]), axis=1) + if fut_vecs is None: + fut_vecs = xy + else: + fut_vecs = np.concatenate((fut_vecs, xy), axis=0) + + y = np.sin(np.linspace(3/2*np.pi, 5/2*np.pi, 301)) + colors = color_map(y[:-1], cmap) + line_segments = LineCollection(fut_vecs, colors=colors, linewidths=linewidth, linestyles=linestyles, cmap=cmap) + + # if mode_idx is None and abs(fut_coord[-1, 0]) < 35 and abs(fut_coord[-1, 1]) < 35: + # text = f'{i}' + # axis.text(fut_coord[-1, 0], fut_coord[-1, 1], text, ha='left', fontsize=5) + + axis.add_collection(line_segments) + + def render_fut_trajs_coords(self, + axis: Axes, + color: str = 'b', + linewidth: float = 1, + fut_ts: int = 12) -> None: + """ + Renders the box in the provided Matplotlib axis. + :param axis: Axis onto which the box should be drawn. + :param view: . Define a projection in needed (e.g. for drawing projection in an image). + :param normalize: Whether to normalize the remaining coordinate. + :param colors: (: 3). Valid Matplotlib colors ( or normalized RGB tuple) for front, + back and sides. + :param linewidth: Width in pixel of the box sides. + """ + + fut_coords = self.fut_trajs.reshape((-1, fut_ts, 2)) + alpha = 0.2 if color == 'b' else 1 + for i in range(fut_coords.shape[0]): + fut_coord = fut_coords[i] + fut_coord = fut_coord + self.center[:2] + if np.abs(fut_coord[-1] - self.center[:2]).max() >= 10: + if color == 'g': + axis.scatter(fut_coord[-1, 0], fut_coord[-1, 1], c=color, marker='*', s=70, alpha=alpha) + elif color == 'b': + axis.scatter(fut_coord[-1, 0], fut_coord[-1, 1], c=color, marker='o', s=20, alpha=alpha) + axis.plot( + [self.center[0], fut_coord[0, 0]], + [self.center[1], fut_coord[0, 1]], + color=color, linewidth=linewidth, alpha=alpha + ) + for i in range(fut_coord.shape[0]-1): + axis.plot( + [fut_coord[i, 0], fut_coord[i+1, 0]], + [fut_coord[i, 1], fut_coord[i+1, 1]], + color=color, linewidth=linewidth, alpha=alpha + ) + + def render_cv2(self, + im: np.ndarray, + view: np.ndarray = np.eye(3), + normalize: bool = False, + colors: Tuple = ((0, 0, 255), (255, 0, 0), (155, 155, 155)), + linewidth: int = 2) -> None: + """ + Renders box using OpenCV2. + :param im: . Image array. Channels are in BGR order. + :param view: . Define a projection if needed (e.g. for drawing projection in an image). + :param normalize: Whether to normalize the remaining coordinate. + :param colors: ((R, G, B), (R, G, B), (R, G, B)). Colors for front, side & rear. + :param linewidth: Linewidth for plot. + """ + corners = view_points(self.corners(), view, normalize=normalize)[:2, :] + + def draw_rect(selected_corners, color): + prev = selected_corners[-1] + for corner in selected_corners: + cv2.line(im, + (int(prev[0]), int(prev[1])), + (int(corner[0]), int(corner[1])), + color, linewidth) + prev = corner + + # Draw the sides + for i in range(4): + cv2.line(im, + (int(corners.T[i][0]), int(corners.T[i][1])), + (int(corners.T[i + 4][0]), int(corners.T[i + 4][1])), + colors[2][::-1], linewidth) + + # Draw front (first 4 corners) and rear (last 4 corners) rectangles(3d)/lines(2d) + draw_rect(corners.T[:4], colors[0][::-1]) + draw_rect(corners.T[4:], colors[1][::-1]) + + # Draw line indicating the front + center_bottom_forward = np.mean(corners.T[2:4], axis=0) + center_bottom = np.mean(corners.T[[2, 3, 7, 6]], axis=0) + cv2.line(im, + (int(center_bottom[0]), int(center_bottom[1])), + (int(center_bottom_forward[0]), int(center_bottom_forward[1])), + colors[0][::-1], linewidth) + + def copy(self) -> 'CustomNuscenesBox': + """ + Create a copy of self. + :return: A copy. + """ + return copy.deepcopy(self) + + +class CustomDetectionBox(EvalBox): + """ Data class used during detection evaluation. Can be a prediction or ground truth.""" + + def __init__(self, + sample_token: str = "", + translation: Tuple[float, float, float] = (0, 0, 0), + size: Tuple[float, float, float] = (0, 0, 0), + rotation: Tuple[float, float, float, float] = (0, 0, 0, 0), + velocity: Tuple[float, float] = (0, 0), + ego_translation: Tuple[float, float, float] = (0, 0, 0), # Translation to ego vehicle in meters. + num_pts: int = -1, # Nbr. LIDAR or RADAR inside the box. Only for gt boxes. + detection_name: str = 'car', # The class name used in the detection challenge. + detection_score: float = -1.0, # GT samples do not have a score. + attribute_name: str = '', # Box attribute. Each box can have at most 1 attribute. + fut_trajs=None): # future trajectories of a pred box, shape=[fut_ts*2]. + + super().__init__(sample_token, translation, size, rotation, velocity, ego_translation, num_pts) + + assert detection_name is not None, 'Error: detection_name cannot be empty!' + assert detection_name in DETECTION_NAMES, 'Error: Unknown detection_name %s' % detection_name + + assert attribute_name in ATTRIBUTE_NAMES or attribute_name == '', \ + 'Error: Unknown attribute_name %s' % attribute_name + + assert type(detection_score) == float, 'Error: detection_score must be a float!' + assert not np.any(np.isnan(detection_score)), 'Error: detection_score may not be NaN!' + + # Assign. + self.detection_name = detection_name + self.detection_score = detection_score + self.attribute_name = attribute_name + self.fut_trajs = fut_trajs + + def __eq__(self, other): + return (self.sample_token == other.sample_token and + self.translation == other.translation and + self.size == other.size and + self.rotation == other.rotation and + self.velocity == other.velocity and + self.ego_translation == other.ego_translation and + self.num_pts == other.num_pts and + self.detection_name == other.detection_name and + self.detection_score == other.detection_score and + self.attribute_name == other.attribute_name and + self.fut_trajs == other.fut_trajs) + + def serialize(self) -> dict: + """ Serialize instance into json-friendly format. """ + return { + 'sample_token': self.sample_token, + 'translation': self.translation, + 'size': self.size, + 'rotation': self.rotation, + 'velocity': self.velocity, + 'ego_translation': self.ego_translation, + 'num_pts': self.num_pts, + 'detection_name': self.detection_name, + 'detection_score': self.detection_score, + 'attribute_name': self.attribute_name, + 'fut_trajs': self.fut_trajs + } + + @classmethod + def deserialize(cls, content: dict): + """ Initialize from serialized content. """ + return cls(sample_token=content['sample_token'], + translation=tuple(content['translation']), + size=tuple(content['size']), + rotation=tuple(content['rotation']), + velocity=tuple(content['velocity']), + fut_trajs=tuple(content['fut_trajs']), + ego_translation=(0.0, 0.0, 0.0) if 'ego_translation' not in content + else tuple(content['ego_translation']), + num_pts=-1 if 'num_pts' not in content else int(content['num_pts']), + detection_name=content['detection_name'], + detection_score=-1.0 if 'detection_score' not in content else float(content['detection_score']), + attribute_name=content['attribute_name']) diff --git a/mmcv/core/bbox/structures/utils.py b/mmcv/core/bbox/structures/utils.py new file mode 100644 index 0000000..842131f --- /dev/null +++ b/mmcv/core/bbox/structures/utils.py @@ -0,0 +1,214 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch +from logging import warning + + +def limit_period(val, offset=0.5, period=np.pi): + """Limit the value into a period for periodic function. + + Args: + val (torch.Tensor): The value to be converted. + offset (float, optional): Offset to set the value range. \ + Defaults to 0.5. + period ([type], optional): Period of the value. Defaults to np.pi. + + Returns: + torch.Tensor: Value in the range of \ + [-offset * period, (1-offset) * period] + """ + return val - torch.floor(val / period + offset) * period + + +def rotation_3d_in_axis(points, angles, axis=0): + """Rotate points by angles according to axis. + + Args: + points (torch.Tensor): Points of shape (N, M, 3). + angles (torch.Tensor): Vector of angles in shape (N,) + axis (int, optional): The axis to be rotated. Defaults to 0. + + Raises: + ValueError: when the axis is not in range [0, 1, 2], it will \ + raise value error. + + Returns: + torch.Tensor: Rotated points in shape (N, M, 3) + """ + rot_sin = torch.sin(angles) + rot_cos = torch.cos(angles) + ones = torch.ones_like(rot_cos) + zeros = torch.zeros_like(rot_cos) + if axis == 1: + rot_mat_T = torch.stack([ + torch.stack([rot_cos, zeros, -rot_sin]), + torch.stack([zeros, ones, zeros]), + torch.stack([rot_sin, zeros, rot_cos]) + ]) + elif axis == 2 or axis == -1: + rot_mat_T = torch.stack([ + torch.stack([rot_cos, -rot_sin, zeros]), + torch.stack([rot_sin, rot_cos, zeros]), + torch.stack([zeros, zeros, ones]) + ]) + elif axis == 0: + rot_mat_T = torch.stack([ + torch.stack([zeros, rot_cos, -rot_sin]), + torch.stack([zeros, rot_sin, rot_cos]), + torch.stack([ones, zeros, zeros]) + ]) + else: + raise ValueError(f'axis should in range [0, 1, 2], got {axis}') + + return torch.einsum('aij,jka->aik', (points, rot_mat_T)) + + +def xywhr2xyxyr(boxes_xywhr): + """Convert a rotated boxes in XYWHR format to XYXYR format. + + Args: + boxes_xywhr (torch.Tensor): Rotated boxes in XYWHR format. + + Returns: + torch.Tensor: Converted boxes in XYXYR format. + """ + boxes = torch.zeros_like(boxes_xywhr) + half_w = boxes_xywhr[:, 2] / 2 + half_h = boxes_xywhr[:, 3] / 2 + + boxes[:, 0] = boxes_xywhr[:, 0] - half_w + boxes[:, 1] = boxes_xywhr[:, 1] - half_h + boxes[:, 2] = boxes_xywhr[:, 0] + half_w + boxes[:, 3] = boxes_xywhr[:, 1] + half_h + boxes[:, 4] = boxes_xywhr[:, 4] + return boxes + + +def get_box_type(box_type): + """Get the type and mode of box structure. + + Args: + box_type (str): The type of box structure. + The valid value are "LiDAR", "Camera", or "Depth". + + Returns: + tuple: Box type and box mode. + """ + from .box_3d_mode import (Box3DMode, CameraInstance3DBoxes, + DepthInstance3DBoxes, LiDARInstance3DBoxes) + box_type_lower = box_type.lower() + if box_type_lower == 'lidar': + box_type_3d = LiDARInstance3DBoxes + box_mode_3d = Box3DMode.LIDAR + elif box_type_lower == 'camera': + box_type_3d = CameraInstance3DBoxes + box_mode_3d = Box3DMode.CAM + elif box_type_lower == 'depth': + box_type_3d = DepthInstance3DBoxes + box_mode_3d = Box3DMode.DEPTH + else: + raise ValueError('Only "box_type" of "camera", "lidar", "depth"' + f' are supported, got {box_type}') + + return box_type_3d, box_mode_3d + + +def points_cam2img(points_3d, proj_mat, with_depth=False): + """Project points from camera coordicates to image coordinates. + + Args: + points_3d (torch.Tensor): Points in shape (N, 3). + proj_mat (torch.Tensor): Transformation matrix between coordinates. + with_depth (bool, optional): Whether to keep depth in the output. + Defaults to False. + + Returns: + torch.Tensor: Points in image coordinates with shape [N, 2]. + """ + points_num = list(points_3d.shape)[:-1] + + points_shape = np.concatenate([points_num, [1]], axis=0).tolist() + assert len(proj_mat.shape) == 2, 'The dimension of the projection'\ + f' matrix should be 2 instead of {len(proj_mat.shape)}.' + d1, d2 = proj_mat.shape[:2] + assert (d1 == 3 and d2 == 3) or (d1 == 3 and d2 == 4) or ( + d1 == 4 and d2 == 4), 'The shape of the projection matrix'\ + f' ({d1}*{d2}) is not supported.' + if d1 == 3: + proj_mat_expanded = torch.eye( + 4, device=proj_mat.device, dtype=proj_mat.dtype) + proj_mat_expanded[:d1, :d2] = proj_mat + proj_mat = proj_mat_expanded + + # previous implementation use new_zeros, new_one yeilds better results + points_4 = torch.cat( + [points_3d, points_3d.new_ones(*points_shape)], dim=-1) + point_2d = torch.matmul(points_4, proj_mat.t()) + point_2d_res = point_2d[..., :2] / point_2d[..., 2:3] + + if with_depth: + return torch.cat([point_2d_res, point_2d[..., 2:3]], dim=-1) + return point_2d_res + + +def mono_cam_box2vis(cam_box): + """This is a post-processing function on the bboxes from Mono-3D task. If + we want to perform projection visualization, we need to: + + 1. rotate the box along x-axis for np.pi / 2 (roll) + 2. change orientation from local yaw to global yaw + 3. convert yaw by (np.pi / 2 - yaw) + + After applying this function, we can project and draw it on 2D images. + + Args: + cam_box (:obj:`CameraInstance3DBoxes`): 3D bbox in camera coordinate \ + system before conversion. Could be gt bbox loaded from dataset or \ + network prediction output. + + Returns: + :obj:`CameraInstance3DBoxes`: Box after conversion. + """ + warning.warn('DeprecationWarning: The hack of yaw and dimension in the ' + 'monocular 3D detection on nuScenes has been removed. The ' + 'function mono_cam_box2vis will be deprecated.') + from . import CameraInstance3DBoxes + assert isinstance(cam_box, CameraInstance3DBoxes), \ + 'input bbox should be CameraInstance3DBoxes!' + + loc = cam_box.gravity_center + dim = cam_box.dims + yaw = cam_box.yaw + feats = cam_box.tensor[:, 7:] + # rotate along x-axis for np.pi / 2 + # see also here: https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/datasets/nuscenes_mono_dataset.py#L557 # noqa + dim[:, [1, 2]] = dim[:, [2, 1]] + # change local yaw to global yaw for visualization + # refer to https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/datasets/nuscenes_mono_dataset.py#L164-L166 # noqa + yaw += torch.atan2(loc[:, 0], loc[:, 2]) + # convert yaw by (-yaw - np.pi / 2) + # this is because mono 3D box class such as `NuScenesBox` has different + # definition of rotation with our `CameraInstance3DBoxes` + yaw = -yaw - np.pi / 2 + cam_box = torch.cat([loc, dim, yaw[:, None], feats], dim=1) + cam_box = CameraInstance3DBoxes( + cam_box, box_dim=cam_box.shape[-1], origin=(0.5, 0.5, 0.5)) + + return cam_box + + +def get_proj_mat_by_coord_type(img_meta, coord_type): + """Obtain image features using points. + + Args: + img_meta (dict): Meta info. + coord_type (str): 'DEPTH' or 'CAMERA' or 'LIDAR'. + Can be case-insensitive. + + Returns: + torch.Tensor: transformation matrix. + """ + coord_type = coord_type.upper() + mapping = {'LIDAR': 'lidar2img', 'DEPTH': 'depth2img', 'CAMERA': 'cam2img'} + assert coord_type in mapping.keys() + return img_meta[mapping[coord_type]] diff --git a/mmcv/core/bbox/transforms.py b/mmcv/core/bbox/transforms.py new file mode 100644 index 0000000..2dcd769 --- /dev/null +++ b/mmcv/core/bbox/transforms.py @@ -0,0 +1,320 @@ +import numpy as np +import torch + + +def bbox_flip(bboxes, img_shape, direction='horizontal'): + """Flip bboxes horizontally or vertically. + + Args: + bboxes (Tensor): Shape (..., 4*k) + img_shape (tuple): Image shape. + direction (str): Flip direction, options are "horizontal", "vertical", + "diagonal". Default: "horizontal" + + Returns: + Tensor: Flipped bboxes. + """ + assert bboxes.shape[-1] % 4 == 0 + assert direction in ['horizontal', 'vertical', 'diagonal'] + flipped = bboxes.clone() + if direction == 'horizontal': + flipped[..., 0::4] = img_shape[1] - bboxes[..., 2::4] + flipped[..., 2::4] = img_shape[1] - bboxes[..., 0::4] + elif direction == 'vertical': + flipped[..., 1::4] = img_shape[0] - bboxes[..., 3::4] + flipped[..., 3::4] = img_shape[0] - bboxes[..., 1::4] + else: + flipped[..., 0::4] = img_shape[1] - bboxes[..., 2::4] + flipped[..., 1::4] = img_shape[0] - bboxes[..., 3::4] + flipped[..., 2::4] = img_shape[1] - bboxes[..., 0::4] + flipped[..., 3::4] = img_shape[0] - bboxes[..., 1::4] + return flipped + + +def bbox_mapping(bboxes, + img_shape, + scale_factor, + flip, + flip_direction='horizontal'): + """Map bboxes from the original image scale to testing scale.""" + new_bboxes = bboxes * bboxes.new_tensor(scale_factor) + if flip: + new_bboxes = bbox_flip(new_bboxes, img_shape, flip_direction) + return new_bboxes + + +def bbox_mapping_back(bboxes, + img_shape, + scale_factor, + flip, + flip_direction='horizontal'): + """Map bboxes from testing scale to original image scale.""" + new_bboxes = bbox_flip(bboxes, img_shape, + flip_direction) if flip else bboxes + new_bboxes = new_bboxes.view(-1, 4) / new_bboxes.new_tensor(scale_factor) + return new_bboxes.view(bboxes.shape) + + +def bbox2roi(bbox_list): + """Convert a list of bboxes to roi format. + + Args: + bbox_list (list[Tensor]): a list of bboxes corresponding to a batch + of images. + + Returns: + Tensor: shape (n, 5), [batch_ind, x1, y1, x2, y2] + """ + rois_list = [] + for img_id, bboxes in enumerate(bbox_list): + if bboxes.size(0) > 0: + img_inds = bboxes.new_full((bboxes.size(0), 1), img_id) + rois = torch.cat([img_inds, bboxes[:, :4]], dim=-1) + else: + rois = bboxes.new_zeros((0, 5)) + rois_list.append(rois) + rois = torch.cat(rois_list, 0) + return rois + + +def roi2bbox(rois): + """Convert rois to bounding box format. + + Args: + rois (torch.Tensor): RoIs with the shape (n, 5) where the first + column indicates batch id of each RoI. + + Returns: + list[torch.Tensor]: Converted boxes of corresponding rois. + """ + bbox_list = [] + img_ids = torch.unique(rois[:, 0].cpu(), sorted=True) + for img_id in img_ids: + inds = (rois[:, 0] == img_id.item()) + bbox = rois[inds, 1:] + bbox_list.append(bbox) + return bbox_list + + +def bbox2result(bboxes, labels, num_classes): + """Convert detection results to a list of numpy arrays. + + Args: + bboxes (torch.Tensor | np.ndarray): shape (n, 5) + labels (torch.Tensor | np.ndarray): shape (n, ) + num_classes (int): class number, including background class + + Returns: + list(ndarray): bbox results of each class + """ + if bboxes.shape[0] == 0: + return [np.zeros((0, 5), dtype=np.float32) for i in range(num_classes)] + else: + if isinstance(bboxes, torch.Tensor): + bboxes = bboxes.detach().cpu().numpy() + labels = labels.detach().cpu().numpy() + return [bboxes[labels == i, :] for i in range(num_classes)] + + +def distance2bbox(points, distance, max_shape=None): + """Decode distance prediction to bounding box. + + Args: + points (Tensor): Shape (B, N, 2) or (N, 2). + distance (Tensor): Distance from the given point to 4 + boundaries (left, top, right, bottom). Shape (B, N, 4) or (N, 4) + max_shape (Sequence[int] or torch.Tensor or Sequence[ + Sequence[int]],optional): Maximum bounds for boxes, specifies + (H, W, C) or (H, W). If priors shape is (B, N, 4), then + the max_shape should be a Sequence[Sequence[int]] + and the length of max_shape should also be B. + + Returns: + Tensor: Boxes with shape (N, 4) or (B, N, 4) + """ + x1 = points[..., 0] - distance[..., 0] + y1 = points[..., 1] - distance[..., 1] + x2 = points[..., 0] + distance[..., 2] + y2 = points[..., 1] + distance[..., 3] + + bboxes = torch.stack([x1, y1, x2, y2], -1) + + if max_shape is not None: + # clip bboxes with dynamic `min` and `max` for onnx + if torch.onnx.is_in_onnx_export(): + from mmcv.core.export import dynamic_clip_for_onnx + x1, y1, x2, y2 = dynamic_clip_for_onnx(x1, y1, x2, y2, max_shape) + bboxes = torch.stack([x1, y1, x2, y2], dim=-1) + return bboxes + if not isinstance(max_shape, torch.Tensor): + max_shape = x1.new_tensor(max_shape) + max_shape = max_shape[..., :2].type_as(x1) + if max_shape.ndim == 2: + assert bboxes.ndim == 3 + assert max_shape.size(0) == bboxes.size(0) + + min_xy = x1.new_tensor(0) + max_xy = torch.cat([max_shape, max_shape], + dim=-1).flip(-1).unsqueeze(-2) + bboxes = torch.where(bboxes < min_xy, min_xy, bboxes) + bboxes = torch.where(bboxes > max_xy, max_xy, bboxes) + + return bboxes + + +def bbox2distance(points, bbox, max_dis=None, eps=0.1): + """Decode bounding box based on distances. + + Args: + points (Tensor): Shape (n, 2), [x, y]. + bbox (Tensor): Shape (n, 4), "xyxy" format + max_dis (float): Upper bound of the distance. + eps (float): a small value to ensure target < max_dis, instead <= + + Returns: + Tensor: Decoded distances. + """ + left = points[:, 0] - bbox[:, 0] + top = points[:, 1] - bbox[:, 1] + right = bbox[:, 2] - points[:, 0] + bottom = bbox[:, 3] - points[:, 1] + if max_dis is not None: + left = left.clamp(min=0, max=max_dis - eps) + top = top.clamp(min=0, max=max_dis - eps) + right = right.clamp(min=0, max=max_dis - eps) + bottom = bottom.clamp(min=0, max=max_dis - eps) + return torch.stack([left, top, right, bottom], -1) + + +def bbox_rescale(bboxes, scale_factor=1.0): + """Rescale bounding box w.r.t. scale_factor. + + Args: + bboxes (Tensor): Shape (n, 4) for bboxes or (n, 5) for rois + scale_factor (float): rescale factor + + Returns: + Tensor: Rescaled bboxes. + """ + if bboxes.size(1) == 5: + bboxes_ = bboxes[:, 1:] + inds_ = bboxes[:, 0] + else: + bboxes_ = bboxes + cx = (bboxes_[:, 0] + bboxes_[:, 2]) * 0.5 + cy = (bboxes_[:, 1] + bboxes_[:, 3]) * 0.5 + w = bboxes_[:, 2] - bboxes_[:, 0] + h = bboxes_[:, 3] - bboxes_[:, 1] + w = w * scale_factor + h = h * scale_factor + x1 = cx - 0.5 * w + x2 = cx + 0.5 * w + y1 = cy - 0.5 * h + y2 = cy + 0.5 * h + if bboxes.size(1) == 5: + rescaled_bboxes = torch.stack([inds_, x1, y1, x2, y2], dim=-1) + else: + rescaled_bboxes = torch.stack([x1, y1, x2, y2], dim=-1) + return rescaled_bboxes + + +def bbox_cxcywh_to_xyxy(bbox): + """Convert bbox coordinates from (cx, cy, w, h) to (x1, y1, x2, y2). + + Args: + bbox (Tensor): Shape (n, 4) for bboxes. + + Returns: + Tensor: Converted bboxes. + """ + cx, cy, w, h = bbox.split((1, 1, 1, 1), dim=-1) + bbox_new = [(cx - 0.5 * w), (cy - 0.5 * h), (cx + 0.5 * w), (cy + 0.5 * h)] + return torch.cat(bbox_new, dim=-1) + + +def bbox_xyxy_to_cxcywh(bbox): + """Convert bbox coordinates from (x1, y1, x2, y2) to (cx, cy, w, h). + + Args: + bbox (Tensor): Shape (n, 4) for bboxes. + + Returns: + Tensor: Converted bboxes. + """ + x1, y1, x2, y2 = bbox.split((1, 1, 1, 1), dim=-1) + bbox_new = [(x1 + x2) / 2, (y1 + y2) / 2, (x2 - x1), (y2 - y1)] + return torch.cat(bbox_new, dim=-1) + +def bbox3d_mapping_back(bboxes, scale_factor, flip_horizontal, flip_vertical): + """Map bboxes from testing scale to original image scale. + + Args: + bboxes (:obj:`BaseInstance3DBoxes`): Boxes to be mapped back. + scale_factor (float): Scale factor. + flip_horizontal (bool): Whether to flip horizontally. + flip_vertical (bool): Whether to flip vertically. + + Returns: + :obj:`BaseInstance3DBoxes`: Boxes mapped back. + """ + new_bboxes = bboxes.clone() + if flip_horizontal: + new_bboxes.flip('horizontal') + if flip_vertical: + new_bboxes.flip('vertical') + new_bboxes.scale(1 / scale_factor) + + return new_bboxes + + +def bbox3d2roi(bbox_list): + """Convert a list of bounding boxes to roi format. + + Args: + bbox_list (list[torch.Tensor]): A list of bounding boxes + corresponding to a batch of images. + + Returns: + torch.Tensor: Region of interests in shape (n, c), where \ + the channels are in order of [batch_ind, x, y ...]. + """ + rois_list = [] + for img_id, bboxes in enumerate(bbox_list): + if bboxes.size(0) > 0: + img_inds = bboxes.new_full((bboxes.size(0), 1), img_id) + rois = torch.cat([img_inds, bboxes], dim=-1) + else: + rois = torch.zeros_like(bboxes) + rois_list.append(rois) + rois = torch.cat(rois_list, 0) + return rois + + +def bbox3d2result(bboxes, scores, labels, attrs=None): + """Convert detection results to a list of numpy arrays. + + Args: + bboxes (torch.Tensor): Bounding boxes with shape of (n, 5). + labels (torch.Tensor): Labels with shape of (n, ). + scores (torch.Tensor): Scores with shape of (n, ). + attrs (torch.Tensor, optional): Attributes with shape of (n, ). \ + Defaults to None. + + Returns: + dict[str, torch.Tensor]: Bounding box results in cpu mode. + + - boxes_3d (torch.Tensor): 3D boxes. + - scores (torch.Tensor): Prediction scores. + - labels_3d (torch.Tensor): Box labels. + - attrs_3d (torch.Tensor, optional): Box attributes. + """ + result_dict = dict( + boxes_3d=bboxes.to('cpu'), + scores_3d=scores.cpu(), + labels_3d=labels.cpu()) + + if attrs is not None: + result_dict['attrs_3d'] = attrs.cpu() + + return result_dict + diff --git a/mmcv/core/bbox/util.py b/mmcv/core/bbox/util.py new file mode 100755 index 0000000..c54bd75 --- /dev/null +++ b/mmcv/core/bbox/util.py @@ -0,0 +1,53 @@ +import torch + + +def normalize_bbox(bboxes, pc_range): + + cx = bboxes[..., 0:1] + cy = bboxes[..., 1:2] + cz = bboxes[..., 2:3] + w = bboxes[..., 3:4].log() + l = bboxes[..., 4:5].log() + h = bboxes[..., 5:6].log() + + rot = bboxes[..., 6:7] + if bboxes.size(-1) > 7: + vx = bboxes[..., 7:8] + vy = bboxes[..., 8:9] + normalized_bboxes = torch.cat( + (cx, cy, w, l, cz, h, rot.sin(), rot.cos(), vx, vy), dim=-1 + ) + else: + normalized_bboxes = torch.cat( + (cx, cy, w, l, cz, h, rot.sin(), rot.cos()), dim=-1 + ) + return normalized_bboxes + +def denormalize_bbox(normalized_bboxes, pc_range): + # rotation + rot_sine = normalized_bboxes[..., 6:7] + + rot_cosine = normalized_bboxes[..., 7:8] + rot = torch.atan2(rot_sine, rot_cosine) + + # center in the bev + cx = normalized_bboxes[..., 0:1] + cy = normalized_bboxes[..., 1:2] + cz = normalized_bboxes[..., 4:5] + + # size + w = normalized_bboxes[..., 2:3] + l = normalized_bboxes[..., 3:4] + h = normalized_bboxes[..., 5:6] + + w = w.exp() + l = l.exp() + h = h.exp() + if normalized_bboxes.size(-1) > 8: + # velocity + vx = normalized_bboxes[:, 8:9] + vy = normalized_bboxes[:, 9:10] + denormalized_bboxes = torch.cat([cx, cy, cz, w, l, h, rot, vx, vy], dim=-1) + else: + denormalized_bboxes = torch.cat([cx, cy, cz, w, l, h, rot], dim=-1) + return denormalized_bboxes \ No newline at end of file diff --git a/mmcv/core/evaluation/__init__.py b/mmcv/core/evaluation/__init__.py new file mode 100644 index 0000000..b93b087 --- /dev/null +++ b/mmcv/core/evaluation/__init__.py @@ -0,0 +1,13 @@ +from .indoor_eval import indoor_eval +from .kitti_utils import kitti_eval, kitti_eval_coco_style +from .lyft_eval import lyft_eval +from .seg_eval import seg_eval +from .class_names import (cityscapes_classes, coco_classes, dataset_aliases, + get_classes, get_palette, imagenet_det_classes, + imagenet_vid_classes, voc_classes) +from .eval_hooks import DistEvalHook, EvalHook, CustomDistEvalHook +from .mean_ap import average_precision, eval_map, print_map_summary +from .recall import (eval_recalls, plot_iou_recall, plot_num_recall, + print_recall_summary) +from .metrics import eval_metrics, mean_dice, mean_fscore, mean_iou +from .metric_motion import get_ade,get_best_preds,get_fde \ No newline at end of file diff --git a/mmcv/core/evaluation/bbox_overlaps.py b/mmcv/core/evaluation/bbox_overlaps.py new file mode 100644 index 0000000..93559ea --- /dev/null +++ b/mmcv/core/evaluation/bbox_overlaps.py @@ -0,0 +1,48 @@ +import numpy as np + + +def bbox_overlaps(bboxes1, bboxes2, mode='iou', eps=1e-6): + """Calculate the ious between each bbox of bboxes1 and bboxes2. + + Args: + bboxes1(ndarray): shape (n, 4) + bboxes2(ndarray): shape (k, 4) + mode(str): iou (intersection over union) or iof (intersection + over foreground) + + Returns: + ious(ndarray): shape (n, k) + """ + + assert mode in ['iou', 'iof'] + + bboxes1 = bboxes1.astype(np.float32) + bboxes2 = bboxes2.astype(np.float32) + rows = bboxes1.shape[0] + cols = bboxes2.shape[0] + ious = np.zeros((rows, cols), dtype=np.float32) + if rows * cols == 0: + return ious + exchange = False + if bboxes1.shape[0] > bboxes2.shape[0]: + bboxes1, bboxes2 = bboxes2, bboxes1 + ious = np.zeros((cols, rows), dtype=np.float32) + exchange = True + area1 = (bboxes1[:, 2] - bboxes1[:, 0]) * (bboxes1[:, 3] - bboxes1[:, 1]) + area2 = (bboxes2[:, 2] - bboxes2[:, 0]) * (bboxes2[:, 3] - bboxes2[:, 1]) + for i in range(bboxes1.shape[0]): + x_start = np.maximum(bboxes1[i, 0], bboxes2[:, 0]) + y_start = np.maximum(bboxes1[i, 1], bboxes2[:, 1]) + x_end = np.minimum(bboxes1[i, 2], bboxes2[:, 2]) + y_end = np.minimum(bboxes1[i, 3], bboxes2[:, 3]) + overlap = np.maximum(x_end - x_start, 0) * np.maximum( + y_end - y_start, 0) + if mode == 'iou': + union = area1[i] + area2 - overlap + else: + union = area1[i] if not exchange else area2 + union = np.maximum(union, eps) + ious[i, :] = overlap / union + if exchange: + ious = ious.T + return ious diff --git a/mmcv/core/evaluation/class_names.py b/mmcv/core/evaluation/class_names.py new file mode 100644 index 0000000..0e0e4f2 --- /dev/null +++ b/mmcv/core/evaluation/class_names.py @@ -0,0 +1,219 @@ +from mmcv.utils import is_str + +def ade_classes(): + """ADE20K class names for external use.""" + return [ + 'wall', 'building', 'sky', 'floor', 'tree', 'ceiling', 'road', 'bed ', + 'windowpane', 'grass', 'cabinet', 'sidewalk', 'person', 'earth', + 'door', 'table', 'mountain', 'plant', 'curtain', 'chair', 'car', + 'water', 'painting', 'sofa', 'shelf', 'house', 'sea', 'mirror', 'rug', + 'field', 'armchair', 'seat', 'fence', 'desk', 'rock', 'wardrobe', + 'lamp', 'bathtub', 'railing', 'cushion', 'base', 'box', 'column', + 'signboard', 'chest of drawers', 'counter', 'sand', 'sink', + 'skyscraper', 'fireplace', 'refrigerator', 'grandstand', 'path', + 'stairs', 'runway', 'case', 'pool table', 'pillow', 'screen door', + 'stairway', 'river', 'bridge', 'bookcase', 'blind', 'coffee table', + 'toilet', 'flower', 'book', 'hill', 'bench', 'countertop', 'stove', + 'palm', 'kitchen island', 'computer', 'swivel chair', 'boat', 'bar', + 'arcade machine', 'hovel', 'bus', 'towel', 'light', 'truck', 'tower', + 'chandelier', 'awning', 'streetlight', 'booth', 'television receiver', + 'airplane', 'dirt track', 'apparel', 'pole', 'land', 'bannister', + 'escalator', 'ottoman', 'bottle', 'buffet', 'poster', 'stage', 'van', + 'ship', 'fountain', 'conveyer belt', 'canopy', 'washer', 'plaything', + 'swimming pool', 'stool', 'barrel', 'basket', 'waterfall', 'tent', + 'bag', 'minibike', 'cradle', 'oven', 'ball', 'food', 'step', 'tank', + 'trade name', 'microwave', 'pot', 'animal', 'bicycle', 'lake', + 'dishwasher', 'screen', 'blanket', 'sculpture', 'hood', 'sconce', + 'vase', 'traffic light', 'tray', 'ashcan', 'fan', 'pier', 'crt screen', + 'plate', 'monitor', 'bulletin board', 'shower', 'radiator', 'glass', + 'clock', 'flag' + ] + +def ade_palette(): + """ADE20K palette for external use.""" + return [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50], + [4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255], + [230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7], + [150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82], + [143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3], + [0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255], + [255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220], + [255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224], + [255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255], + [224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7], + [255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153], + [6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255], + [140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0], + [255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255], + [255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255], + [11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255], + [0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0], + [255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0], + [0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255], + [173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255], + [255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20], + [255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255], + [255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255], + [0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255], + [0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0], + [143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0], + [8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255], + [255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112], + [92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160], + [163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163], + [255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0], + [255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0], + [10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255], + [255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204], + [41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255], + [71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255], + [184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194], + [102, 255, 0], [92, 0, 255]] + +def wider_face_classes(): + return ['face'] + + +def voc_classes(): + return [ + 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', + 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', + 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor' + ] + +def voc_palette(): + """Pascal VOC palette for external use.""" + return [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128], + [128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0], + [192, 0, 0], [64, 128, 0], [192, 128, 0], [64, 0, 128], + [192, 0, 128], [64, 128, 128], [192, 128, 128], [0, 64, 0], + [128, 64, 0], [0, 192, 0], [128, 192, 0], [0, 64, 128]] + + +def imagenet_det_classes(): + return [ + 'accordion', 'airplane', 'ant', 'antelope', 'apple', 'armadillo', + 'artichoke', 'axe', 'baby_bed', 'backpack', 'bagel', 'balance_beam', + 'banana', 'band_aid', 'banjo', 'baseball', 'basketball', 'bathing_cap', + 'beaker', 'bear', 'bee', 'bell_pepper', 'bench', 'bicycle', 'binder', + 'bird', 'bookshelf', 'bow_tie', 'bow', 'bowl', 'brassiere', 'burrito', + 'bus', 'butterfly', 'camel', 'can_opener', 'car', 'cart', 'cattle', + 'cello', 'centipede', 'chain_saw', 'chair', 'chime', 'cocktail_shaker', + 'coffee_maker', 'computer_keyboard', 'computer_mouse', 'corkscrew', + 'cream', 'croquet_ball', 'crutch', 'cucumber', 'cup_or_mug', 'diaper', + 'digital_clock', 'dishwasher', 'dog', 'domestic_cat', 'dragonfly', + 'drum', 'dumbbell', 'electric_fan', 'elephant', 'face_powder', 'fig', + 'filing_cabinet', 'flower_pot', 'flute', 'fox', 'french_horn', 'frog', + 'frying_pan', 'giant_panda', 'goldfish', 'golf_ball', 'golfcart', + 'guacamole', 'guitar', 'hair_dryer', 'hair_spray', 'hamburger', + 'hammer', 'hamster', 'harmonica', 'harp', 'hat_with_a_wide_brim', + 'head_cabbage', 'helmet', 'hippopotamus', 'horizontal_bar', 'horse', + 'hotdog', 'iPod', 'isopod', 'jellyfish', 'koala_bear', 'ladle', + 'ladybug', 'lamp', 'laptop', 'lemon', 'lion', 'lipstick', 'lizard', + 'lobster', 'maillot', 'maraca', 'microphone', 'microwave', 'milk_can', + 'miniskirt', 'monkey', 'motorcycle', 'mushroom', 'nail', 'neck_brace', + 'oboe', 'orange', 'otter', 'pencil_box', 'pencil_sharpener', 'perfume', + 'person', 'piano', 'pineapple', 'ping-pong_ball', 'pitcher', 'pizza', + 'plastic_bag', 'plate_rack', 'pomegranate', 'popsicle', 'porcupine', + 'power_drill', 'pretzel', 'printer', 'puck', 'punching_bag', 'purse', + 'rabbit', 'racket', 'ray', 'red_panda', 'refrigerator', + 'remote_control', 'rubber_eraser', 'rugby_ball', 'ruler', + 'salt_or_pepper_shaker', 'saxophone', 'scorpion', 'screwdriver', + 'seal', 'sheep', 'ski', 'skunk', 'snail', 'snake', 'snowmobile', + 'snowplow', 'soap_dispenser', 'soccer_ball', 'sofa', 'spatula', + 'squirrel', 'starfish', 'stethoscope', 'stove', 'strainer', + 'strawberry', 'stretcher', 'sunglasses', 'swimming_trunks', 'swine', + 'syringe', 'table', 'tape_player', 'tennis_ball', 'tick', 'tie', + 'tiger', 'toaster', 'traffic_light', 'train', 'trombone', 'trumpet', + 'turtle', 'tv_or_monitor', 'unicycle', 'vacuum', 'violin', + 'volleyball', 'waffle_iron', 'washer', 'water_bottle', 'watercraft', + 'whale', 'wine_bottle', 'zebra' + ] + + +def imagenet_vid_classes(): + return [ + 'airplane', 'antelope', 'bear', 'bicycle', 'bird', 'bus', 'car', + 'cattle', 'dog', 'domestic_cat', 'elephant', 'fox', 'giant_panda', + 'hamster', 'horse', 'lion', 'lizard', 'monkey', 'motorcycle', 'rabbit', + 'red_panda', 'sheep', 'snake', 'squirrel', 'tiger', 'train', 'turtle', + 'watercraft', 'whale', 'zebra' + ] + + +def coco_classes(): + return [ + 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', + 'truck', 'boat', 'traffic_light', 'fire_hydrant', 'stop_sign', + 'parking_meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', + 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', + 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', + 'sports_ball', 'kite', 'baseball_bat', 'baseball_glove', 'skateboard', + 'surfboard', 'tennis_racket', 'bottle', 'wine_glass', 'cup', 'fork', + 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', + 'broccoli', 'carrot', 'hot_dog', 'pizza', 'donut', 'cake', 'chair', + 'couch', 'potted_plant', 'bed', 'dining_table', 'toilet', 'tv', + 'laptop', 'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave', + 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', + 'scissors', 'teddy_bear', 'hair_drier', 'toothbrush' + ] + + +def cityscapes_classes(): + return [ + 'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', + 'bicycle' + ] + +def cityscapes_palette(): + """Cityscapes palette for external use.""" + return [[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156], + [190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0], + [107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60], + [255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100], [0, 80, 100], + [0, 0, 230], [119, 11, 32]] + + +dataset_aliases = { + 'voc': ['voc', 'pascal_voc', 'voc07', 'voc12'], + 'imagenet_det': ['det', 'imagenet_det', 'ilsvrc_det'], + 'imagenet_vid': ['vid', 'imagenet_vid', 'ilsvrc_vid'], + 'coco': ['coco', 'mscoco', 'ms_coco'], + 'wider_face': ['WIDERFaceDataset', 'wider_face', 'WIDERFace'], + 'cityscapes': ['cityscapes'], + 'ade': ['ade', 'ade20k'] +} + + +def get_classes(dataset): + """Get class names of a dataset.""" + alias2name = {} + for name, aliases in dataset_aliases.items(): + for alias in aliases: + alias2name[alias] = name + + if is_str(dataset): + if dataset in alias2name: + labels = eval(alias2name[dataset] + '_classes()') + else: + raise ValueError(f'Unrecognized dataset: {dataset}') + else: + raise TypeError(f'dataset must a str, but got {type(dataset)}') + return labels + + +def get_palette(dataset): + """Get class palette (RGB) of a dataset.""" + alias2name = {} + for name, aliases in dataset_aliases.items(): + for alias in aliases: + alias2name[alias] = name + + if is_str(dataset): + if dataset in alias2name: + labels = eval(alias2name[dataset] + '_palette()') + else: + raise ValueError(f'Unrecognized dataset: {dataset}') + else: + raise TypeError(f'dataset must a str, but got {type(dataset)}') + return labels diff --git a/mmcv/core/evaluation/eval_hooks.py b/mmcv/core/evaluation/eval_hooks.py new file mode 100644 index 0000000..dcaf73d --- /dev/null +++ b/mmcv/core/evaluation/eval_hooks.py @@ -0,0 +1,133 @@ +import bisect +import os.path as osp + +import torch.distributed as dist +from mmcv.runner import DistEvalHook as BaseDistEvalHook +from mmcv.runner import EvalHook as BaseEvalHook +from mmcv.utils import is_list_of +from torch.nn.modules.batchnorm import _BatchNorm + + +class EvalHook(BaseEvalHook): + + def _do_evaluate(self, runner): + """perform evaluation and save ckpt.""" + if not self._should_evaluate(runner): + return + + results = self.test_fn(runner.model, self.dataloader, show=False) + runner.log_buffer.output['eval_iter_num'] = len(self.dataloader) + key_score = self.evaluate(runner, results) + if self.save_best: + self._save_ckpt(runner, key_score) + + +class DistEvalHook(BaseDistEvalHook): + + def _do_evaluate(self, runner): + """perform evaluation and save ckpt.""" + # Synchronization of BatchNorm's buffer (running_mean + # and running_var) is not supported in the DDP of pytorch, + # which may cause the inconsistent performance of models in + # different ranks, so we broadcast BatchNorm's buffers + # of rank 0 to other ranks to avoid this. + if self.broadcast_bn_buffer: + model = runner.model + for name, module in model.named_modules(): + if isinstance(module, + _BatchNorm) and module.track_running_stats: + dist.broadcast(module.running_var, 0) + dist.broadcast(module.running_mean, 0) + + if not self._should_evaluate(runner): + return + + tmpdir = self.tmpdir + if tmpdir is None: + tmpdir = osp.join(runner.work_dir, '.eval_hook') + + results = self.test_fn( + runner.model, + self.dataloader, + tmpdir=tmpdir, + gpu_collect=self.gpu_collect) + if runner.rank == 0: + print('\n') + runner.log_buffer.output['eval_iter_num'] = len(self.dataloader) + key_score = self.evaluate(runner, results) + + if self.save_best: + self._save_ckpt(runner, key_score) + +def _calc_dynamic_intervals(start_interval, dynamic_interval_list): + assert is_list_of(dynamic_interval_list, tuple) + + dynamic_milestones = [0] + dynamic_milestones.extend( + [dynamic_interval[0] for dynamic_interval in dynamic_interval_list]) + dynamic_intervals = [start_interval] + dynamic_intervals.extend( + [dynamic_interval[1] for dynamic_interval in dynamic_interval_list]) + return dynamic_milestones, dynamic_intervals + + +class CustomDistEvalHook(BaseDistEvalHook): + + def __init__(self, *args, dynamic_intervals=None, **kwargs): + super(CustomDistEvalHook, self).__init__(*args, **kwargs) + self.use_dynamic_intervals = dynamic_intervals is not None + if self.use_dynamic_intervals: + self.dynamic_milestones, self.dynamic_intervals = \ + _calc_dynamic_intervals(self.interval, dynamic_intervals) + + def _decide_interval(self, runner): + if self.use_dynamic_intervals: + progress = runner.epoch if self.by_epoch else runner.iter + step = bisect.bisect(self.dynamic_milestones, (progress + 1)) + # Dynamically modify the evaluation interval + self.interval = self.dynamic_intervals[step - 1] + + def before_train_epoch(self, runner): + """Evaluate the model only at the start of training by epoch.""" + self._decide_interval(runner) + super().before_train_epoch(runner) + + def before_train_iter(self, runner): + self._decide_interval(runner) + super().before_train_iter(runner) + + def _do_evaluate(self, runner): + """perform evaluation and save ckpt.""" + # Synchronization of BatchNorm's buffer (running_mean + # and running_var) is not supported in the DDP of pytorch, + # which may cause the inconsistent performance of models in + # different ranks, so we broadcast BatchNorm's buffers + # of rank 0 to other ranks to avoid this. + if self.broadcast_bn_buffer: + model = runner.model + for name, module in model.named_modules(): + if isinstance(module, + _BatchNorm) and module.track_running_stats: + dist.broadcast(module.running_var, 0) + dist.broadcast(module.running_mean, 0) + + if not self._should_evaluate(runner): + return + + tmpdir = self.tmpdir + if tmpdir is None: + tmpdir = osp.join(runner.work_dir, '.eval_hook') + + results = self.test_fn( + runner.model, + self.dataloader, + tmpdir=tmpdir, + gpu_collect=self.gpu_collect) + if runner.rank == 0: + print('\n') + runner.log_buffer.output['eval_iter_num'] = len(self.dataloader) + + key_score = self.evaluate(runner, results) + + if self.save_best: + self._save_ckpt(runner, key_score) diff --git a/mmcv/core/evaluation/indoor_eval.py b/mmcv/core/evaluation/indoor_eval.py new file mode 100644 index 0000000..ff0dac1 --- /dev/null +++ b/mmcv/core/evaluation/indoor_eval.py @@ -0,0 +1,310 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch +from mmcv.utils import print_log +from terminaltables import AsciiTable + + +def average_precision(recalls, precisions, mode='area'): + """Calculate average precision (for single or multiple scales). + + Args: + recalls (np.ndarray): Recalls with shape of (num_scales, num_dets) \ + or (num_dets, ). + precisions (np.ndarray): Precisions with shape of \ + (num_scales, num_dets) or (num_dets, ). + mode (str): 'area' or '11points', 'area' means calculating the area + under precision-recall curve, '11points' means calculating + the average precision of recalls at [0, 0.1, ..., 1] + + Returns: + float or np.ndarray: Calculated average precision. + """ + if recalls.ndim == 1: + recalls = recalls[np.newaxis, :] + precisions = precisions[np.newaxis, :] + + assert recalls.shape == precisions.shape + assert recalls.ndim == 2 + + num_scales = recalls.shape[0] + ap = np.zeros(num_scales, dtype=np.float32) + if mode == 'area': + zeros = np.zeros((num_scales, 1), dtype=recalls.dtype) + ones = np.ones((num_scales, 1), dtype=recalls.dtype) + mrec = np.hstack((zeros, recalls, ones)) + mpre = np.hstack((zeros, precisions, zeros)) + for i in range(mpre.shape[1] - 1, 0, -1): + mpre[:, i - 1] = np.maximum(mpre[:, i - 1], mpre[:, i]) + for i in range(num_scales): + ind = np.where(mrec[i, 1:] != mrec[i, :-1])[0] + ap[i] = np.sum( + (mrec[i, ind + 1] - mrec[i, ind]) * mpre[i, ind + 1]) + elif mode == '11points': + for i in range(num_scales): + for thr in np.arange(0, 1 + 1e-3, 0.1): + precs = precisions[i, recalls[i, :] >= thr] + prec = precs.max() if precs.size > 0 else 0 + ap[i] += prec + ap /= 11 + else: + raise ValueError( + 'Unrecognized mode, only "area" and "11points" are supported') + return ap + + +def eval_det_cls(pred, gt, iou_thr=None): + """Generic functions to compute precision/recall for object detection for a + single class. + + Args: + pred (dict): Predictions mapping from image id to bounding boxes \ + and scores. + gt (dict): Ground truths mapping from image id to bounding boxes. + iou_thr (list[float]): A list of iou thresholds. + + Return: + tuple (np.ndarray, np.ndarray, float): Recalls, precisions and \ + average precision. + """ + + # {img_id: {'bbox': box structure, 'det': matched list}} + class_recs = {} + npos = 0 + for img_id in gt.keys(): + cur_gt_num = len(gt[img_id]) + if cur_gt_num != 0: + gt_cur = torch.zeros([cur_gt_num, 7], dtype=torch.float32) + for i in range(cur_gt_num): + gt_cur[i] = gt[img_id][i].tensor + bbox = gt[img_id][0].new_box(gt_cur) + else: + bbox = gt[img_id] + det = [[False] * len(bbox) for i in iou_thr] + npos += len(bbox) + class_recs[img_id] = {'bbox': bbox, 'det': det} + + # construct dets + image_ids = [] + confidence = [] + ious = [] + for img_id in pred.keys(): + cur_num = len(pred[img_id]) + if cur_num == 0: + continue + pred_cur = torch.zeros((cur_num, 7), dtype=torch.float32) + box_idx = 0 + for box, score in pred[img_id]: + image_ids.append(img_id) + confidence.append(score) + pred_cur[box_idx] = box.tensor + box_idx += 1 + pred_cur = box.new_box(pred_cur) + gt_cur = class_recs[img_id]['bbox'] + if len(gt_cur) > 0: + # calculate iou in each image + iou_cur = pred_cur.overlaps(pred_cur, gt_cur) + for i in range(cur_num): + ious.append(iou_cur[i]) + else: + for i in range(cur_num): + ious.append(np.zeros(1)) + + confidence = np.array(confidence) + + # sort by confidence + sorted_ind = np.argsort(-confidence) + image_ids = [image_ids[x] for x in sorted_ind] + ious = [ious[x] for x in sorted_ind] + + # go down dets and mark TPs and FPs + nd = len(image_ids) + tp_thr = [np.zeros(nd) for i in iou_thr] + fp_thr = [np.zeros(nd) for i in iou_thr] + for d in range(nd): + R = class_recs[image_ids[d]] + iou_max = -np.inf + BBGT = R['bbox'] + cur_iou = ious[d] + + if len(BBGT) > 0: + # compute overlaps + for j in range(len(BBGT)): + # iou = get_iou_main(get_iou_func, (bb, BBGT[j,...])) + iou = cur_iou[j] + if iou > iou_max: + iou_max = iou + jmax = j + + for iou_idx, thresh in enumerate(iou_thr): + if iou_max > thresh: + if not R['det'][iou_idx][jmax]: + tp_thr[iou_idx][d] = 1. + R['det'][iou_idx][jmax] = 1 + else: + fp_thr[iou_idx][d] = 1. + else: + fp_thr[iou_idx][d] = 1. + + ret = [] + for iou_idx, thresh in enumerate(iou_thr): + # compute precision recall + fp = np.cumsum(fp_thr[iou_idx]) + tp = np.cumsum(tp_thr[iou_idx]) + recall = tp / float(npos) + # avoid divide by zero in case the first detection matches a difficult + # ground truth + precision = tp / np.maximum(tp + fp, np.finfo(np.float64).eps) + ap = average_precision(recall, precision) + ret.append((recall, precision, ap)) + + return ret + + +def eval_map_recall(pred, gt, ovthresh=None): + """Evaluate mAP and recall. + + Generic functions to compute precision/recall for object detection + for multiple classes. + + Args: + pred (dict): Information of detection results, + which maps class_id and predictions. + gt (dict): Information of ground truths, which maps class_id and \ + ground truths. + ovthresh (list[float]): iou threshold. + Default: None. + + Return: + tuple[dict]: dict results of recall, AP, and precision for all classes. + """ + + ret_values = {} + for classname in gt.keys(): + if classname in pred: + ret_values[classname] = eval_det_cls(pred[classname], + gt[classname], ovthresh) + recall = [{} for i in ovthresh] + precision = [{} for i in ovthresh] + ap = [{} for i in ovthresh] + + for label in gt.keys(): + for iou_idx, thresh in enumerate(ovthresh): + if label in pred: + recall[iou_idx][label], precision[iou_idx][label], ap[iou_idx][ + label] = ret_values[label][iou_idx] + else: + recall[iou_idx][label] = np.zeros(1) + precision[iou_idx][label] = np.zeros(1) + ap[iou_idx][label] = np.zeros(1) + + return recall, precision, ap + + +def indoor_eval(gt_annos, + dt_annos, + metric, + label2cat, + logger=None, + box_type_3d=None, + box_mode_3d=None): + """Indoor Evaluation. + + Evaluate the result of the detection. + + Args: + gt_annos (list[dict]): Ground truth annotations. + dt_annos (list[dict]): Detection annotations. the dict + includes the following keys + + - labels_3d (torch.Tensor): Labels of boxes. + - boxes_3d (:obj:`BaseInstance3DBoxes`): \ + 3D bounding boxes in Depth coordinate. + - scores_3d (torch.Tensor): Scores of boxes. + metric (list[float]): IoU thresholds for computing average precisions. + label2cat (dict): Map from label to category. + logger (logging.Logger | str | None): The way to print the mAP + summary. See `mmcv.utils.print_log()` for details. Default: None. + + Return: + dict[str, float]: Dict of results. + """ + assert len(dt_annos) == len(gt_annos) + pred = {} # map {class_id: pred} + gt = {} # map {class_id: gt} + for img_id in range(len(dt_annos)): + # parse detected annotations + det_anno = dt_annos[img_id] + for i in range(len(det_anno['labels_3d'])): + label = det_anno['labels_3d'].numpy()[i] + bbox = det_anno['boxes_3d'].convert_to(box_mode_3d)[i] + score = det_anno['scores_3d'].numpy()[i] + if label not in pred: + pred[int(label)] = {} + if img_id not in pred[label]: + pred[int(label)][img_id] = [] + if label not in gt: + gt[int(label)] = {} + if img_id not in gt[label]: + gt[int(label)][img_id] = [] + pred[int(label)][img_id].append((bbox, score)) + + # parse gt annotations + gt_anno = gt_annos[img_id] + if gt_anno['gt_num'] != 0: + gt_boxes = box_type_3d( + gt_anno['gt_boxes_upright_depth'], + box_dim=gt_anno['gt_boxes_upright_depth'].shape[-1], + origin=(0.5, 0.5, 0.5)).convert_to(box_mode_3d) + labels_3d = gt_anno['class'] + else: + gt_boxes = box_type_3d(np.array([], dtype=np.float32)) + labels_3d = np.array([], dtype=np.int64) + + for i in range(len(labels_3d)): + label = labels_3d[i] + bbox = gt_boxes[i] + if label not in gt: + gt[label] = {} + if img_id not in gt[label]: + gt[label][img_id] = [] + gt[label][img_id].append(bbox) + + rec, prec, ap = eval_map_recall(pred, gt, metric) + ret_dict = dict() + header = ['classes'] + table_columns = [[label2cat[label] + for label in ap[0].keys()] + ['Overall']] + + for i, iou_thresh in enumerate(metric): + header.append(f'AP_{iou_thresh:.2f}') + header.append(f'AR_{iou_thresh:.2f}') + rec_list = [] + for label in ap[i].keys(): + ret_dict[f'{label2cat[label]}_AP_{iou_thresh:.2f}'] = float( + ap[i][label][0]) + ret_dict[f'mAP_{iou_thresh:.2f}'] = float( + np.mean(list(ap[i].values()))) + + table_columns.append(list(map(float, list(ap[i].values())))) + table_columns[-1] += [ret_dict[f'mAP_{iou_thresh:.2f}']] + table_columns[-1] = [f'{x:.4f}' for x in table_columns[-1]] + + for label in rec[i].keys(): + ret_dict[f'{label2cat[label]}_rec_{iou_thresh:.2f}'] = float( + rec[i][label][-1]) + rec_list.append(rec[i][label][-1]) + ret_dict[f'mAR_{iou_thresh:.2f}'] = float(np.mean(rec_list)) + + table_columns.append(list(map(float, rec_list))) + table_columns[-1] += [ret_dict[f'mAR_{iou_thresh:.2f}']] + table_columns[-1] = [f'{x:.4f}' for x in table_columns[-1]] + + table_data = [header] + table_rows = list(zip(*table_columns)) + table_data += table_rows + table = AsciiTable(table_data) + table.inner_footing_row_border = True + print_log('\n' + table.table, logger=logger) + + return ret_dict diff --git a/mmcv/core/evaluation/kitti_utils/__init__.py b/mmcv/core/evaluation/kitti_utils/__init__.py new file mode 100644 index 0000000..23c1cdf --- /dev/null +++ b/mmcv/core/evaluation/kitti_utils/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .eval import kitti_eval, kitti_eval_coco_style + +__all__ = ['kitti_eval', 'kitti_eval_coco_style'] diff --git a/mmcv/core/evaluation/kitti_utils/eval.py b/mmcv/core/evaluation/kitti_utils/eval.py new file mode 100644 index 0000000..93492c4 --- /dev/null +++ b/mmcv/core/evaluation/kitti_utils/eval.py @@ -0,0 +1,847 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import gc +import io as sysio +import numba +import numpy as np + + +@numba.jit +def get_thresholds(scores: np.ndarray, num_gt, num_sample_pts=41): + scores.sort() + scores = scores[::-1] + current_recall = 0 + thresholds = [] + for i, score in enumerate(scores): + l_recall = (i + 1) / num_gt + if i < (len(scores) - 1): + r_recall = (i + 2) / num_gt + else: + r_recall = l_recall + if (((r_recall - current_recall) < (current_recall - l_recall)) + and (i < (len(scores) - 1))): + continue + # recall = l_recall + thresholds.append(score) + current_recall += 1 / (num_sample_pts - 1.0) + return thresholds + + +def clean_data(gt_anno, dt_anno, current_class, difficulty): + CLASS_NAMES = ['car', 'pedestrian', 'cyclist'] + MIN_HEIGHT = [40, 25, 25] + MAX_OCCLUSION = [0, 1, 2] + MAX_TRUNCATION = [0.15, 0.3, 0.5] + dc_bboxes, ignored_gt, ignored_dt = [], [], [] + current_cls_name = CLASS_NAMES[current_class].lower() + num_gt = len(gt_anno['name']) + num_dt = len(dt_anno['name']) + num_valid_gt = 0 + for i in range(num_gt): + bbox = gt_anno['bbox'][i] + gt_name = gt_anno['name'][i].lower() + height = bbox[3] - bbox[1] + valid_class = -1 + if (gt_name == current_cls_name): + valid_class = 1 + elif (current_cls_name == 'Pedestrian'.lower() + and 'Person_sitting'.lower() == gt_name): + valid_class = 0 + elif (current_cls_name == 'Car'.lower() and 'Van'.lower() == gt_name): + valid_class = 0 + else: + valid_class = -1 + ignore = False + if ((gt_anno['occluded'][i] > MAX_OCCLUSION[difficulty]) + or (gt_anno['truncated'][i] > MAX_TRUNCATION[difficulty]) + or (height <= MIN_HEIGHT[difficulty])): + ignore = True + if valid_class == 1 and not ignore: + ignored_gt.append(0) + num_valid_gt += 1 + elif (valid_class == 0 or (ignore and (valid_class == 1))): + ignored_gt.append(1) + else: + ignored_gt.append(-1) + # for i in range(num_gt): + if gt_anno['name'][i] == 'DontCare': + dc_bboxes.append(gt_anno['bbox'][i]) + for i in range(num_dt): + if (dt_anno['name'][i].lower() == current_cls_name): + valid_class = 1 + else: + valid_class = -1 + height = abs(dt_anno['bbox'][i, 3] - dt_anno['bbox'][i, 1]) + if height < MIN_HEIGHT[difficulty]: + ignored_dt.append(1) + elif valid_class == 1: + ignored_dt.append(0) + else: + ignored_dt.append(-1) + + return num_valid_gt, ignored_gt, ignored_dt, dc_bboxes + + +@numba.jit(nopython=True) +def image_box_overlap(boxes, query_boxes, criterion=-1): + N = boxes.shape[0] + K = query_boxes.shape[0] + overlaps = np.zeros((N, K), dtype=boxes.dtype) + for k in range(K): + qbox_area = ((query_boxes[k, 2] - query_boxes[k, 0]) * + (query_boxes[k, 3] - query_boxes[k, 1])) + for n in range(N): + iw = ( + min(boxes[n, 2], query_boxes[k, 2]) - + max(boxes[n, 0], query_boxes[k, 0])) + if iw > 0: + ih = ( + min(boxes[n, 3], query_boxes[k, 3]) - + max(boxes[n, 1], query_boxes[k, 1])) + if ih > 0: + if criterion == -1: + ua = ((boxes[n, 2] - boxes[n, 0]) * + (boxes[n, 3] - boxes[n, 1]) + qbox_area - + iw * ih) + elif criterion == 0: + ua = ((boxes[n, 2] - boxes[n, 0]) * + (boxes[n, 3] - boxes[n, 1])) + elif criterion == 1: + ua = qbox_area + else: + ua = 1.0 + overlaps[n, k] = iw * ih / ua + return overlaps + + +def bev_box_overlap(boxes, qboxes, criterion=-1): + from .rotate_iou import rotate_iou_gpu_eval + riou = rotate_iou_gpu_eval(boxes, qboxes, criterion) + return riou + + +@numba.jit(nopython=True, parallel=True) +def d3_box_overlap_kernel(boxes, qboxes, rinc, criterion=-1): + # ONLY support overlap in CAMERA, not lidar. + # TODO: change to use prange for parallel mode, should check the difference + N, K = boxes.shape[0], qboxes.shape[0] + for i in numba.prange(N): + for j in numba.prange(K): + if rinc[i, j] > 0: + # iw = (min(boxes[i, 1] + boxes[i, 4], qboxes[j, 1] + + # qboxes[j, 4]) - max(boxes[i, 1], qboxes[j, 1])) + iw = ( + min(boxes[i, 1], qboxes[j, 1]) - + max(boxes[i, 1] - boxes[i, 4], + qboxes[j, 1] - qboxes[j, 4])) + + if iw > 0: + area1 = boxes[i, 3] * boxes[i, 4] * boxes[i, 5] + area2 = qboxes[j, 3] * qboxes[j, 4] * qboxes[j, 5] + inc = iw * rinc[i, j] + if criterion == -1: + ua = (area1 + area2 - inc) + elif criterion == 0: + ua = area1 + elif criterion == 1: + ua = area2 + else: + ua = inc + rinc[i, j] = inc / ua + else: + rinc[i, j] = 0.0 + + +def d3_box_overlap(boxes, qboxes, criterion=-1): + from .rotate_iou import rotate_iou_gpu_eval + rinc = rotate_iou_gpu_eval(boxes[:, [0, 2, 3, 5, 6]], + qboxes[:, [0, 2, 3, 5, 6]], 2) + d3_box_overlap_kernel(boxes, qboxes, rinc, criterion) + return rinc + + +@numba.jit(nopython=True) +def compute_statistics_jit(overlaps, + gt_datas, + dt_datas, + ignored_gt, + ignored_det, + dc_bboxes, + metric, + min_overlap, + thresh=0, + compute_fp=False, + compute_aos=False): + + det_size = dt_datas.shape[0] + gt_size = gt_datas.shape[0] + dt_scores = dt_datas[:, -1] + dt_alphas = dt_datas[:, 4] + gt_alphas = gt_datas[:, 4] + dt_bboxes = dt_datas[:, :4] + # gt_bboxes = gt_datas[:, :4] + + assigned_detection = [False] * det_size + ignored_threshold = [False] * det_size + if compute_fp: + for i in range(det_size): + if (dt_scores[i] < thresh): + ignored_threshold[i] = True + NO_DETECTION = -10000000 + tp, fp, fn, similarity = 0, 0, 0, 0 + # thresholds = [0.0] + # delta = [0.0] + thresholds = np.zeros((gt_size, )) + thresh_idx = 0 + delta = np.zeros((gt_size, )) + delta_idx = 0 + for i in range(gt_size): + if ignored_gt[i] == -1: + continue + det_idx = -1 + valid_detection = NO_DETECTION + max_overlap = 0 + assigned_ignored_det = False + + for j in range(det_size): + if (ignored_det[j] == -1): + continue + if (assigned_detection[j]): + continue + if (ignored_threshold[j]): + continue + overlap = overlaps[j, i] + dt_score = dt_scores[j] + if (not compute_fp and (overlap > min_overlap) + and dt_score > valid_detection): + det_idx = j + valid_detection = dt_score + elif (compute_fp and (overlap > min_overlap) + and (overlap > max_overlap or assigned_ignored_det) + and ignored_det[j] == 0): + max_overlap = overlap + det_idx = j + valid_detection = 1 + assigned_ignored_det = False + elif (compute_fp and (overlap > min_overlap) + and (valid_detection == NO_DETECTION) + and ignored_det[j] == 1): + det_idx = j + valid_detection = 1 + assigned_ignored_det = True + + if (valid_detection == NO_DETECTION) and ignored_gt[i] == 0: + fn += 1 + elif ((valid_detection != NO_DETECTION) + and (ignored_gt[i] == 1 or ignored_det[det_idx] == 1)): + assigned_detection[det_idx] = True + elif valid_detection != NO_DETECTION: + tp += 1 + # thresholds.append(dt_scores[det_idx]) + thresholds[thresh_idx] = dt_scores[det_idx] + thresh_idx += 1 + if compute_aos: + # delta.append(gt_alphas[i] - dt_alphas[det_idx]) + delta[delta_idx] = gt_alphas[i] - dt_alphas[det_idx] + delta_idx += 1 + + assigned_detection[det_idx] = True + if compute_fp: + for i in range(det_size): + if (not (assigned_detection[i] or ignored_det[i] == -1 + or ignored_det[i] == 1 or ignored_threshold[i])): + fp += 1 + nstuff = 0 + if metric == 0: + overlaps_dt_dc = image_box_overlap(dt_bboxes, dc_bboxes, 0) + for i in range(dc_bboxes.shape[0]): + for j in range(det_size): + if (assigned_detection[j]): + continue + if (ignored_det[j] == -1 or ignored_det[j] == 1): + continue + if (ignored_threshold[j]): + continue + if overlaps_dt_dc[j, i] > min_overlap: + assigned_detection[j] = True + nstuff += 1 + fp -= nstuff + if compute_aos: + tmp = np.zeros((fp + delta_idx, )) + # tmp = [0] * fp + for i in range(delta_idx): + tmp[i + fp] = (1.0 + np.cos(delta[i])) / 2.0 + # tmp.append((1.0 + np.cos(delta[i])) / 2.0) + # assert len(tmp) == fp + tp + # assert len(delta) == tp + if tp > 0 or fp > 0: + similarity = np.sum(tmp) + else: + similarity = -1 + return tp, fp, fn, similarity, thresholds[:thresh_idx] + + +def get_split_parts(num, num_part): + same_part = num // num_part + remain_num = num % num_part + if remain_num == 0: + return [same_part] * num_part + else: + return [same_part] * num_part + [remain_num] + + +@numba.jit(nopython=True) +def fused_compute_statistics(overlaps, + pr, + gt_nums, + dt_nums, + dc_nums, + gt_datas, + dt_datas, + dontcares, + ignored_gts, + ignored_dets, + metric, + min_overlap, + thresholds, + compute_aos=False): + gt_num = 0 + dt_num = 0 + dc_num = 0 + for i in range(gt_nums.shape[0]): + for t, thresh in enumerate(thresholds): + overlap = overlaps[dt_num:dt_num + dt_nums[i], + gt_num:gt_num + gt_nums[i]] + + gt_data = gt_datas[gt_num:gt_num + gt_nums[i]] + dt_data = dt_datas[dt_num:dt_num + dt_nums[i]] + ignored_gt = ignored_gts[gt_num:gt_num + gt_nums[i]] + ignored_det = ignored_dets[dt_num:dt_num + dt_nums[i]] + dontcare = dontcares[dc_num:dc_num + dc_nums[i]] + tp, fp, fn, similarity, _ = compute_statistics_jit( + overlap, + gt_data, + dt_data, + ignored_gt, + ignored_det, + dontcare, + metric, + min_overlap=min_overlap, + thresh=thresh, + compute_fp=True, + compute_aos=compute_aos) + pr[t, 0] += tp + pr[t, 1] += fp + pr[t, 2] += fn + if similarity != -1: + pr[t, 3] += similarity + gt_num += gt_nums[i] + dt_num += dt_nums[i] + dc_num += dc_nums[i] + + +def calculate_iou_partly(gt_annos, dt_annos, metric, num_parts=50): + """Fast iou algorithm. this function can be used independently to do result + analysis. Must be used in CAMERA coordinate system. + + Args: + gt_annos (dict): Must from get_label_annos() in kitti_common.py. + dt_annos (dict): Must from get_label_annos() in kitti_common.py. + metric (int): Eval type. 0: bbox, 1: bev, 2: 3d. + num_parts (int): A parameter for fast calculate algorithm. + """ + assert len(gt_annos) == len(dt_annos) + total_dt_num = np.stack([len(a['name']) for a in dt_annos], 0) + total_gt_num = np.stack([len(a['name']) for a in gt_annos], 0) + num_examples = len(gt_annos) + split_parts = get_split_parts(num_examples, num_parts) + parted_overlaps = [] + example_idx = 0 + + for num_part in split_parts: + gt_annos_part = gt_annos[example_idx:example_idx + num_part] + dt_annos_part = dt_annos[example_idx:example_idx + num_part] + if metric == 0: + gt_boxes = np.concatenate([a['bbox'] for a in gt_annos_part], 0) + dt_boxes = np.concatenate([a['bbox'] for a in dt_annos_part], 0) + overlap_part = image_box_overlap(gt_boxes, dt_boxes) + elif metric == 1: + loc = np.concatenate( + [a['location'][:, [0, 2]] for a in gt_annos_part], 0) + dims = np.concatenate( + [a['dimensions'][:, [0, 2]] for a in gt_annos_part], 0) + rots = np.concatenate([a['rotation_y'] for a in gt_annos_part], 0) + gt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]], + axis=1) + loc = np.concatenate( + [a['location'][:, [0, 2]] for a in dt_annos_part], 0) + dims = np.concatenate( + [a['dimensions'][:, [0, 2]] for a in dt_annos_part], 0) + rots = np.concatenate([a['rotation_y'] for a in dt_annos_part], 0) + dt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]], + axis=1) + overlap_part = bev_box_overlap(gt_boxes, + dt_boxes).astype(np.float64) + elif metric == 2: + loc = np.concatenate([a['location'] for a in gt_annos_part], 0) + dims = np.concatenate([a['dimensions'] for a in gt_annos_part], 0) + rots = np.concatenate([a['rotation_y'] for a in gt_annos_part], 0) + gt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]], + axis=1) + loc = np.concatenate([a['location'] for a in dt_annos_part], 0) + dims = np.concatenate([a['dimensions'] for a in dt_annos_part], 0) + rots = np.concatenate([a['rotation_y'] for a in dt_annos_part], 0) + dt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]], + axis=1) + overlap_part = d3_box_overlap(gt_boxes, + dt_boxes).astype(np.float64) + else: + raise ValueError('unknown metric') + parted_overlaps.append(overlap_part) + example_idx += num_part + overlaps = [] + example_idx = 0 + for j, num_part in enumerate(split_parts): + gt_annos_part = gt_annos[example_idx:example_idx + num_part] + dt_annos_part = dt_annos[example_idx:example_idx + num_part] + gt_num_idx, dt_num_idx = 0, 0 + for i in range(num_part): + gt_box_num = total_gt_num[example_idx + i] + dt_box_num = total_dt_num[example_idx + i] + overlaps.append( + parted_overlaps[j][gt_num_idx:gt_num_idx + gt_box_num, + dt_num_idx:dt_num_idx + dt_box_num]) + gt_num_idx += gt_box_num + dt_num_idx += dt_box_num + example_idx += num_part + + return overlaps, parted_overlaps, total_gt_num, total_dt_num + + +def _prepare_data(gt_annos, dt_annos, current_class, difficulty): + gt_datas_list = [] + dt_datas_list = [] + total_dc_num = [] + ignored_gts, ignored_dets, dontcares = [], [], [] + total_num_valid_gt = 0 + for i in range(len(gt_annos)): + rets = clean_data(gt_annos[i], dt_annos[i], current_class, difficulty) + num_valid_gt, ignored_gt, ignored_det, dc_bboxes = rets + ignored_gts.append(np.array(ignored_gt, dtype=np.int64)) + ignored_dets.append(np.array(ignored_det, dtype=np.int64)) + if len(dc_bboxes) == 0: + dc_bboxes = np.zeros((0, 4)).astype(np.float64) + else: + dc_bboxes = np.stack(dc_bboxes, 0).astype(np.float64) + total_dc_num.append(dc_bboxes.shape[0]) + dontcares.append(dc_bboxes) + total_num_valid_gt += num_valid_gt + gt_datas = np.concatenate( + [gt_annos[i]['bbox'], gt_annos[i]['alpha'][..., np.newaxis]], 1) + dt_datas = np.concatenate([ + dt_annos[i]['bbox'], dt_annos[i]['alpha'][..., np.newaxis], + dt_annos[i]['score'][..., np.newaxis] + ], 1) + gt_datas_list.append(gt_datas) + dt_datas_list.append(dt_datas) + total_dc_num = np.stack(total_dc_num, axis=0) + return (gt_datas_list, dt_datas_list, ignored_gts, ignored_dets, dontcares, + total_dc_num, total_num_valid_gt) + + +def eval_class(gt_annos, + dt_annos, + current_classes, + difficultys, + metric, + min_overlaps, + compute_aos=False, + num_parts=200): + """Kitti eval. support 2d/bev/3d/aos eval. support 0.5:0.05:0.95 coco AP. + + Args: + gt_annos (dict): Must from get_label_annos() in kitti_common.py. + dt_annos (dict): Must from get_label_annos() in kitti_common.py. + current_classes (list[int]): 0: car, 1: pedestrian, 2: cyclist. + difficultys (list[int]): Eval difficulty, 0: easy, 1: normal, 2: hard + metric (int): Eval type. 0: bbox, 1: bev, 2: 3d + min_overlaps (float): Min overlap. format: + [num_overlap, metric, class]. + num_parts (int): A parameter for fast calculate algorithm + + Returns: + dict[str, np.ndarray]: recall, precision and aos + """ + assert len(gt_annos) == len(dt_annos) + num_examples = len(gt_annos) + if num_examples < num_parts: + num_parts = num_examples + split_parts = get_split_parts(num_examples, num_parts) + + rets = calculate_iou_partly(dt_annos, gt_annos, metric, num_parts) + overlaps, parted_overlaps, total_dt_num, total_gt_num = rets + N_SAMPLE_PTS = 41 + num_minoverlap = len(min_overlaps) + num_class = len(current_classes) + num_difficulty = len(difficultys) + precision = np.zeros( + [num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS]) + recall = np.zeros( + [num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS]) + aos = np.zeros([num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS]) + for m, current_class in enumerate(current_classes): + for idx_l, difficulty in enumerate(difficultys): + rets = _prepare_data(gt_annos, dt_annos, current_class, difficulty) + (gt_datas_list, dt_datas_list, ignored_gts, ignored_dets, + dontcares, total_dc_num, total_num_valid_gt) = rets + for k, min_overlap in enumerate(min_overlaps[:, metric, m]): + thresholdss = [] + for i in range(len(gt_annos)): + rets = compute_statistics_jit( + overlaps[i], + gt_datas_list[i], + dt_datas_list[i], + ignored_gts[i], + ignored_dets[i], + dontcares[i], + metric, + min_overlap=min_overlap, + thresh=0.0, + compute_fp=False) + tp, fp, fn, similarity, thresholds = rets + thresholdss += thresholds.tolist() + thresholdss = np.array(thresholdss) + thresholds = get_thresholds(thresholdss, total_num_valid_gt) + thresholds = np.array(thresholds) + pr = np.zeros([len(thresholds), 4]) + idx = 0 + for j, num_part in enumerate(split_parts): + gt_datas_part = np.concatenate( + gt_datas_list[idx:idx + num_part], 0) + dt_datas_part = np.concatenate( + dt_datas_list[idx:idx + num_part], 0) + dc_datas_part = np.concatenate( + dontcares[idx:idx + num_part], 0) + ignored_dets_part = np.concatenate( + ignored_dets[idx:idx + num_part], 0) + ignored_gts_part = np.concatenate( + ignored_gts[idx:idx + num_part], 0) + fused_compute_statistics( + parted_overlaps[j], + pr, + total_gt_num[idx:idx + num_part], + total_dt_num[idx:idx + num_part], + total_dc_num[idx:idx + num_part], + gt_datas_part, + dt_datas_part, + dc_datas_part, + ignored_gts_part, + ignored_dets_part, + metric, + min_overlap=min_overlap, + thresholds=thresholds, + compute_aos=compute_aos) + idx += num_part + for i in range(len(thresholds)): + recall[m, idx_l, k, i] = pr[i, 0] / (pr[i, 0] + pr[i, 2]) + precision[m, idx_l, k, i] = pr[i, 0] / ( + pr[i, 0] + pr[i, 1]) + if compute_aos: + aos[m, idx_l, k, i] = pr[i, 3] / (pr[i, 0] + pr[i, 1]) + for i in range(len(thresholds)): + precision[m, idx_l, k, i] = np.max( + precision[m, idx_l, k, i:], axis=-1) + recall[m, idx_l, k, i] = np.max( + recall[m, idx_l, k, i:], axis=-1) + if compute_aos: + aos[m, idx_l, k, i] = np.max( + aos[m, idx_l, k, i:], axis=-1) + ret_dict = { + 'recall': recall, + 'precision': precision, + 'orientation': aos, + } + + # clean temp variables + del overlaps + del parted_overlaps + + gc.collect() + return ret_dict + + +def get_mAP(prec): + sums = 0 + for i in range(0, prec.shape[-1], 4): + sums = sums + prec[..., i] + return sums / 11 * 100 + + +def print_str(value, *arg, sstream=None): + if sstream is None: + sstream = sysio.StringIO() + sstream.truncate(0) + sstream.seek(0) + print(value, *arg, file=sstream) + return sstream.getvalue() + + +def do_eval(gt_annos, + dt_annos, + current_classes, + min_overlaps, + eval_types=['bbox', 'bev', '3d']): + # min_overlaps: [num_minoverlap, metric, num_class] + difficultys = [0, 1, 2] + mAP_bbox = None + mAP_aos = None + if 'bbox' in eval_types: + ret = eval_class( + gt_annos, + dt_annos, + current_classes, + difficultys, + 0, + min_overlaps, + compute_aos=('aos' in eval_types)) + # ret: [num_class, num_diff, num_minoverlap, num_sample_points] + mAP_bbox = get_mAP(ret['precision']) + if 'aos' in eval_types: + mAP_aos = get_mAP(ret['orientation']) + + mAP_bev = None + if 'bev' in eval_types: + ret = eval_class(gt_annos, dt_annos, current_classes, difficultys, 1, + min_overlaps) + mAP_bev = get_mAP(ret['precision']) + + mAP_3d = None + if '3d' in eval_types: + ret = eval_class(gt_annos, dt_annos, current_classes, difficultys, 2, + min_overlaps) + mAP_3d = get_mAP(ret['precision']) + return mAP_bbox, mAP_bev, mAP_3d, mAP_aos + + +def do_coco_style_eval(gt_annos, dt_annos, current_classes, overlap_ranges, + compute_aos): + # overlap_ranges: [range, metric, num_class] + min_overlaps = np.zeros([10, *overlap_ranges.shape[1:]]) + for i in range(overlap_ranges.shape[1]): + for j in range(overlap_ranges.shape[2]): + min_overlaps[:, i, j] = np.linspace(*overlap_ranges[:, i, j]) + mAP_bbox, mAP_bev, mAP_3d, mAP_aos = do_eval(gt_annos, dt_annos, + current_classes, min_overlaps, + compute_aos) + # ret: [num_class, num_diff, num_minoverlap] + mAP_bbox = mAP_bbox.mean(-1) + mAP_bev = mAP_bev.mean(-1) + mAP_3d = mAP_3d.mean(-1) + if mAP_aos is not None: + mAP_aos = mAP_aos.mean(-1) + return mAP_bbox, mAP_bev, mAP_3d, mAP_aos + + +def kitti_eval(gt_annos, + dt_annos, + current_classes, + eval_types=['bbox', 'bev', '3d']): + """KITTI evaluation. + + Args: + gt_annos (list[dict]): Contain gt information of each sample. + dt_annos (list[dict]): Contain detected information of each sample. + current_classes (list[str]): Classes to evaluation. + eval_types (list[str], optional): Types to eval. + Defaults to ['bbox', 'bev', '3d']. + + Returns: + tuple: String and dict of evaluation results. + """ + assert len(eval_types) > 0, 'must contain at least one evaluation type' + if 'aos' in eval_types: + assert 'bbox' in eval_types, 'must evaluate bbox when evaluating aos' + overlap_0_7 = np.array([[0.7, 0.5, 0.5, 0.7, + 0.5], [0.7, 0.5, 0.5, 0.7, 0.5], + [0.7, 0.5, 0.5, 0.7, 0.5]]) + overlap_0_5 = np.array([[0.7, 0.5, 0.5, 0.7, 0.5], + [0.5, 0.25, 0.25, 0.5, 0.25], + [0.5, 0.25, 0.25, 0.5, 0.25]]) + min_overlaps = np.stack([overlap_0_7, overlap_0_5], axis=0) # [2, 3, 5] + class_to_name = { + 0: 'Car', + 1: 'Pedestrian', + 2: 'Cyclist', + 3: 'Van', + 4: 'Person_sitting', + } + name_to_class = {v: n for n, v in class_to_name.items()} + if not isinstance(current_classes, (list, tuple)): + current_classes = [current_classes] + current_classes_int = [] + for curcls in current_classes: + if isinstance(curcls, str): + current_classes_int.append(name_to_class[curcls]) + else: + current_classes_int.append(curcls) + current_classes = current_classes_int + min_overlaps = min_overlaps[:, :, current_classes] + result = '' + # check whether alpha is valid + compute_aos = False + pred_alpha = False + valid_alpha_gt = False + for anno in dt_annos: + mask = (anno['alpha'] != -10) + if anno['alpha'][mask].shape[0] != 0: + pred_alpha = True + break + for anno in gt_annos: + if anno['alpha'][0] != -10: + valid_alpha_gt = True + break + compute_aos = (pred_alpha and valid_alpha_gt) + if compute_aos: + eval_types.append('aos') + + mAPbbox, mAPbev, mAP3d, mAPaos = do_eval(gt_annos, dt_annos, + current_classes, min_overlaps, + eval_types) + + ret_dict = {} + difficulty = ['easy', 'moderate', 'hard'] + for j, curcls in enumerate(current_classes): + # mAP threshold array: [num_minoverlap, metric, class] + # mAP result: [num_class, num_diff, num_minoverlap] + curcls_name = class_to_name[curcls] + for i in range(min_overlaps.shape[0]): + # prepare results for print + result += ('{} AP@{:.2f}, {:.2f}, {:.2f}:\n'.format( + curcls_name, *min_overlaps[i, :, j])) + if mAPbbox is not None: + result += 'bbox AP:{:.4f}, {:.4f}, {:.4f}\n'.format( + *mAPbbox[j, :, i]) + if mAPbev is not None: + result += 'bev AP:{:.4f}, {:.4f}, {:.4f}\n'.format( + *mAPbev[j, :, i]) + if mAP3d is not None: + result += '3d AP:{:.4f}, {:.4f}, {:.4f}\n'.format( + *mAP3d[j, :, i]) + + if compute_aos: + result += 'aos AP:{:.2f}, {:.2f}, {:.2f}\n'.format( + *mAPaos[j, :, i]) + + # prepare results for logger + for idx in range(3): + if i == 0: + postfix = f'{difficulty[idx]}_strict' + else: + postfix = f'{difficulty[idx]}_loose' + prefix = f'KITTI/{curcls_name}' + if mAP3d is not None: + ret_dict[f'{prefix}_3D_{postfix}'] = mAP3d[j, idx, i] + if mAPbev is not None: + ret_dict[f'{prefix}_BEV_{postfix}'] = mAPbev[j, idx, i] + if mAPbbox is not None: + ret_dict[f'{prefix}_2D_{postfix}'] = mAPbbox[j, idx, i] + + # calculate mAP over all classes if there are multiple classes + if len(current_classes) > 1: + # prepare results for print + result += ('\nOverall AP@{}, {}, {}:\n'.format(*difficulty)) + if mAPbbox is not None: + mAPbbox = mAPbbox.mean(axis=0) + result += 'bbox AP:{:.4f}, {:.4f}, {:.4f}\n'.format(*mAPbbox[:, 0]) + if mAPbev is not None: + mAPbev = mAPbev.mean(axis=0) + result += 'bev AP:{:.4f}, {:.4f}, {:.4f}\n'.format(*mAPbev[:, 0]) + if mAP3d is not None: + mAP3d = mAP3d.mean(axis=0) + result += '3d AP:{:.4f}, {:.4f}, {:.4f}\n'.format(*mAP3d[:, 0]) + if compute_aos: + mAPaos = mAPaos.mean(axis=0) + result += 'aos AP:{:.2f}, {:.2f}, {:.2f}\n'.format(*mAPaos[:, 0]) + + # prepare results for logger + for idx in range(3): + postfix = f'{difficulty[idx]}' + if mAP3d is not None: + ret_dict[f'KITTI/Overall_3D_{postfix}'] = mAP3d[idx, 0] + if mAPbev is not None: + ret_dict[f'KITTI/Overall_BEV_{postfix}'] = mAPbev[idx, 0] + if mAPbbox is not None: + ret_dict[f'KITTI/Overall_2D_{postfix}'] = mAPbbox[idx, 0] + + return result, ret_dict + + +def kitti_eval_coco_style(gt_annos, dt_annos, current_classes): + """coco style evaluation of kitti. + + Args: + gt_annos (list[dict]): Contain gt information of each sample. + dt_annos (list[dict]): Contain detected information of each sample. + current_classes (list[str]): Classes to evaluation. + + Returns: + string: Evaluation results. + """ + class_to_name = { + 0: 'Car', + 1: 'Pedestrian', + 2: 'Cyclist', + 3: 'Van', + 4: 'Person_sitting', + } + class_to_range = { + 0: [0.5, 0.95, 10], + 1: [0.25, 0.7, 10], + 2: [0.25, 0.7, 10], + 3: [0.5, 0.95, 10], + 4: [0.25, 0.7, 10], + } + name_to_class = {v: n for n, v in class_to_name.items()} + if not isinstance(current_classes, (list, tuple)): + current_classes = [current_classes] + current_classes_int = [] + for curcls in current_classes: + if isinstance(curcls, str): + current_classes_int.append(name_to_class[curcls]) + else: + current_classes_int.append(curcls) + current_classes = current_classes_int + overlap_ranges = np.zeros([3, 3, len(current_classes)]) + for i, curcls in enumerate(current_classes): + overlap_ranges[:, :, i] = np.array(class_to_range[curcls])[:, + np.newaxis] + result = '' + # check whether alpha is valid + compute_aos = False + for anno in dt_annos: + if anno['alpha'].shape[0] != 0: + if anno['alpha'][0] != -10: + compute_aos = True + break + mAPbbox, mAPbev, mAP3d, mAPaos = do_coco_style_eval( + gt_annos, dt_annos, current_classes, overlap_ranges, compute_aos) + for j, curcls in enumerate(current_classes): + # mAP threshold array: [num_minoverlap, metric, class] + # mAP result: [num_class, num_diff, num_minoverlap] + o_range = np.array(class_to_range[curcls])[[0, 2, 1]] + o_range[1] = (o_range[2] - o_range[0]) / (o_range[1] - 1) + result += print_str((f'{class_to_name[curcls]} ' + 'coco AP@{:.2f}:{:.2f}:{:.2f}:'.format(*o_range))) + result += print_str((f'bbox AP:{mAPbbox[j, 0]:.2f}, ' + f'{mAPbbox[j, 1]:.2f}, ' + f'{mAPbbox[j, 2]:.2f}')) + result += print_str((f'bev AP:{mAPbev[j, 0]:.2f}, ' + f'{mAPbev[j, 1]:.2f}, ' + f'{mAPbev[j, 2]:.2f}')) + result += print_str((f'3d AP:{mAP3d[j, 0]:.2f}, ' + f'{mAP3d[j, 1]:.2f}, ' + f'{mAP3d[j, 2]:.2f}')) + if compute_aos: + result += print_str((f'aos AP:{mAPaos[j, 0]:.2f}, ' + f'{mAPaos[j, 1]:.2f}, ' + f'{mAPaos[j, 2]:.2f}')) + return result diff --git a/mmcv/core/evaluation/kitti_utils/rotate_iou.py b/mmcv/core/evaluation/kitti_utils/rotate_iou.py new file mode 100644 index 0000000..2f0c9c8 --- /dev/null +++ b/mmcv/core/evaluation/kitti_utils/rotate_iou.py @@ -0,0 +1,379 @@ +# Copyright (c) OpenMMLab. All rights reserved. +##################### +# Based on https://github.com/hongzhenwang/RRPN-revise +# Licensed under The MIT License +# Author: yanyan, scrin@foxmail.com +##################### +import math +import numba +import numpy as np +from numba import cuda + + +@numba.jit(nopython=True) +def div_up(m, n): + return m // n + (m % n > 0) + + +@cuda.jit('(float32[:], float32[:], float32[:])', device=True, inline=True) +def trangle_area(a, b, c): + return ((a[0] - c[0]) * (b[1] - c[1]) - (a[1] - c[1]) * + (b[0] - c[0])) / 2.0 + + +@cuda.jit('(float32[:], int32)', device=True, inline=True) +def area(int_pts, num_of_inter): + area_val = 0.0 + for i in range(num_of_inter - 2): + area_val += abs( + trangle_area(int_pts[:2], int_pts[2 * i + 2:2 * i + 4], + int_pts[2 * i + 4:2 * i + 6])) + return area_val + + +@cuda.jit('(float32[:], int32)', device=True, inline=True) +def sort_vertex_in_convex_polygon(int_pts, num_of_inter): + if num_of_inter > 0: + center = cuda.local.array((2, ), dtype=numba.float32) + center[:] = 0.0 + for i in range(num_of_inter): + center[0] += int_pts[2 * i] + center[1] += int_pts[2 * i + 1] + center[0] /= num_of_inter + center[1] /= num_of_inter + v = cuda.local.array((2, ), dtype=numba.float32) + vs = cuda.local.array((16, ), dtype=numba.float32) + for i in range(num_of_inter): + v[0] = int_pts[2 * i] - center[0] + v[1] = int_pts[2 * i + 1] - center[1] + d = math.sqrt(v[0] * v[0] + v[1] * v[1]) + v[0] = v[0] / d + v[1] = v[1] / d + if v[1] < 0: + v[0] = -2 - v[0] + vs[i] = v[0] + j = 0 + temp = 0 + for i in range(1, num_of_inter): + if vs[i - 1] > vs[i]: + temp = vs[i] + tx = int_pts[2 * i] + ty = int_pts[2 * i + 1] + j = i + while j > 0 and vs[j - 1] > temp: + vs[j] = vs[j - 1] + int_pts[j * 2] = int_pts[j * 2 - 2] + int_pts[j * 2 + 1] = int_pts[j * 2 - 1] + j -= 1 + + vs[j] = temp + int_pts[j * 2] = tx + int_pts[j * 2 + 1] = ty + + +@cuda.jit( + '(float32[:], float32[:], int32, int32, float32[:])', + device=True, + inline=True) +def line_segment_intersection(pts1, pts2, i, j, temp_pts): + A = cuda.local.array((2, ), dtype=numba.float32) + B = cuda.local.array((2, ), dtype=numba.float32) + C = cuda.local.array((2, ), dtype=numba.float32) + D = cuda.local.array((2, ), dtype=numba.float32) + + A[0] = pts1[2 * i] + A[1] = pts1[2 * i + 1] + + B[0] = pts1[2 * ((i + 1) % 4)] + B[1] = pts1[2 * ((i + 1) % 4) + 1] + + C[0] = pts2[2 * j] + C[1] = pts2[2 * j + 1] + + D[0] = pts2[2 * ((j + 1) % 4)] + D[1] = pts2[2 * ((j + 1) % 4) + 1] + BA0 = B[0] - A[0] + BA1 = B[1] - A[1] + DA0 = D[0] - A[0] + CA0 = C[0] - A[0] + DA1 = D[1] - A[1] + CA1 = C[1] - A[1] + acd = DA1 * CA0 > CA1 * DA0 + bcd = (D[1] - B[1]) * (C[0] - B[0]) > (C[1] - B[1]) * (D[0] - B[0]) + if acd != bcd: + abc = CA1 * BA0 > BA1 * CA0 + abd = DA1 * BA0 > BA1 * DA0 + if abc != abd: + DC0 = D[0] - C[0] + DC1 = D[1] - C[1] + ABBA = A[0] * B[1] - B[0] * A[1] + CDDC = C[0] * D[1] - D[0] * C[1] + DH = BA1 * DC0 - BA0 * DC1 + Dx = ABBA * DC0 - BA0 * CDDC + Dy = ABBA * DC1 - BA1 * CDDC + temp_pts[0] = Dx / DH + temp_pts[1] = Dy / DH + return True + return False + + +@cuda.jit( + '(float32[:], float32[:], int32, int32, float32[:])', + device=True, + inline=True) +def line_segment_intersection_v1(pts1, pts2, i, j, temp_pts): + a = cuda.local.array((2, ), dtype=numba.float32) + b = cuda.local.array((2, ), dtype=numba.float32) + c = cuda.local.array((2, ), dtype=numba.float32) + d = cuda.local.array((2, ), dtype=numba.float32) + + a[0] = pts1[2 * i] + a[1] = pts1[2 * i + 1] + + b[0] = pts1[2 * ((i + 1) % 4)] + b[1] = pts1[2 * ((i + 1) % 4) + 1] + + c[0] = pts2[2 * j] + c[1] = pts2[2 * j + 1] + + d[0] = pts2[2 * ((j + 1) % 4)] + d[1] = pts2[2 * ((j + 1) % 4) + 1] + + area_abc = trangle_area(a, b, c) + area_abd = trangle_area(a, b, d) + + if area_abc * area_abd >= 0: + return False + + area_cda = trangle_area(c, d, a) + area_cdb = area_cda + area_abc - area_abd + + if area_cda * area_cdb >= 0: + return False + t = area_cda / (area_abd - area_abc) + + dx = t * (b[0] - a[0]) + dy = t * (b[1] - a[1]) + temp_pts[0] = a[0] + dx + temp_pts[1] = a[1] + dy + return True + + +@cuda.jit('(float32, float32, float32[:])', device=True, inline=True) +def point_in_quadrilateral(pt_x, pt_y, corners): + ab0 = corners[2] - corners[0] + ab1 = corners[3] - corners[1] + + ad0 = corners[6] - corners[0] + ad1 = corners[7] - corners[1] + + ap0 = pt_x - corners[0] + ap1 = pt_y - corners[1] + + abab = ab0 * ab0 + ab1 * ab1 + abap = ab0 * ap0 + ab1 * ap1 + adad = ad0 * ad0 + ad1 * ad1 + adap = ad0 * ap0 + ad1 * ap1 + + return abab >= abap and abap >= 0 and adad >= adap and adap >= 0 + + +@cuda.jit('(float32[:], float32[:], float32[:])', device=True, inline=True) +def quadrilateral_intersection(pts1, pts2, int_pts): + num_of_inter = 0 + for i in range(4): + if point_in_quadrilateral(pts1[2 * i], pts1[2 * i + 1], pts2): + int_pts[num_of_inter * 2] = pts1[2 * i] + int_pts[num_of_inter * 2 + 1] = pts1[2 * i + 1] + num_of_inter += 1 + if point_in_quadrilateral(pts2[2 * i], pts2[2 * i + 1], pts1): + int_pts[num_of_inter * 2] = pts2[2 * i] + int_pts[num_of_inter * 2 + 1] = pts2[2 * i + 1] + num_of_inter += 1 + temp_pts = cuda.local.array((2, ), dtype=numba.float32) + for i in range(4): + for j in range(4): + has_pts = line_segment_intersection(pts1, pts2, i, j, temp_pts) + if has_pts: + int_pts[num_of_inter * 2] = temp_pts[0] + int_pts[num_of_inter * 2 + 1] = temp_pts[1] + num_of_inter += 1 + + return num_of_inter + + +@cuda.jit('(float32[:], float32[:])', device=True, inline=True) +def rbbox_to_corners(corners, rbbox): + # generate clockwise corners and rotate it clockwise + angle = rbbox[4] + a_cos = math.cos(angle) + a_sin = math.sin(angle) + center_x = rbbox[0] + center_y = rbbox[1] + x_d = rbbox[2] + y_d = rbbox[3] + corners_x = cuda.local.array((4, ), dtype=numba.float32) + corners_y = cuda.local.array((4, ), dtype=numba.float32) + corners_x[0] = -x_d / 2 + corners_x[1] = -x_d / 2 + corners_x[2] = x_d / 2 + corners_x[3] = x_d / 2 + corners_y[0] = -y_d / 2 + corners_y[1] = y_d / 2 + corners_y[2] = y_d / 2 + corners_y[3] = -y_d / 2 + for i in range(4): + corners[2 * i] = a_cos * corners_x[i] + a_sin * corners_y[i] + center_x + corners[2 * i + + 1] = -a_sin * corners_x[i] + a_cos * corners_y[i] + center_y + + +@cuda.jit('(float32[:], float32[:])', device=True, inline=True) +def inter(rbbox1, rbbox2): + """Compute intersection of two rotated boxes. + + Args: + rbox1 (np.ndarray, shape=[5]): Rotated 2d box. + rbox2 (np.ndarray, shape=[5]): Rotated 2d box. + + Returns: + float: Intersection of two rotated boxes. + """ + corners1 = cuda.local.array((8, ), dtype=numba.float32) + corners2 = cuda.local.array((8, ), dtype=numba.float32) + intersection_corners = cuda.local.array((16, ), dtype=numba.float32) + + rbbox_to_corners(corners1, rbbox1) + rbbox_to_corners(corners2, rbbox2) + + num_intersection = quadrilateral_intersection(corners1, corners2, + intersection_corners) + sort_vertex_in_convex_polygon(intersection_corners, num_intersection) + # print(intersection_corners.reshape([-1, 2])[:num_intersection]) + + return area(intersection_corners, num_intersection) + + +@cuda.jit('(float32[:], float32[:], int32)', device=True, inline=True) +def devRotateIoUEval(rbox1, rbox2, criterion=-1): + """Compute rotated iou on device. + + Args: + rbox1 (np.ndarray, shape=[5]): Rotated 2d box. + rbox2 (np.ndarray, shape=[5]): Rotated 2d box. + criterion (int, optional): Indicate different type of iou. + -1 indicate `area_inter / (area1 + area2 - area_inter)`, + 0 indicate `area_inter / area1`, + 1 indicate `area_inter / area2`. + + Returns: + float: iou between two input boxes. + """ + area1 = rbox1[2] * rbox1[3] + area2 = rbox2[2] * rbox2[3] + area_inter = inter(rbox1, rbox2) + if criterion == -1: + return area_inter / (area1 + area2 - area_inter) + elif criterion == 0: + return area_inter / area1 + elif criterion == 1: + return area_inter / area2 + else: + return area_inter + + +@cuda.jit( + '(int64, int64, float32[:], float32[:], float32[:], int32)', + fastmath=False) +def rotate_iou_kernel_eval(N, + K, + dev_boxes, + dev_query_boxes, + dev_iou, + criterion=-1): + """Kernel of computing rotated iou. + + Args: + N (int): The number of boxes. + K (int): The number of query boxes. + dev_boxes (np.ndarray): Boxes on device. + dev_query_boxes (np.ndarray): Query boxes on device. + dev_iou (np.ndarray): Computed iou to return. + criterion (int, optional): Indicate different type of iou. + -1 indicate `area_inter / (area1 + area2 - area_inter)`, + 0 indicate `area_inter / area1`, + 1 indicate `area_inter / area2`. + """ + threadsPerBlock = 8 * 8 + row_start = cuda.blockIdx.x + col_start = cuda.blockIdx.y + tx = cuda.threadIdx.x + row_size = min(N - row_start * threadsPerBlock, threadsPerBlock) + col_size = min(K - col_start * threadsPerBlock, threadsPerBlock) + block_boxes = cuda.shared.array(shape=(64 * 5, ), dtype=numba.float32) + block_qboxes = cuda.shared.array(shape=(64 * 5, ), dtype=numba.float32) + + dev_query_box_idx = threadsPerBlock * col_start + tx + dev_box_idx = threadsPerBlock * row_start + tx + if (tx < col_size): + block_qboxes[tx * 5 + 0] = dev_query_boxes[dev_query_box_idx * 5 + 0] + block_qboxes[tx * 5 + 1] = dev_query_boxes[dev_query_box_idx * 5 + 1] + block_qboxes[tx * 5 + 2] = dev_query_boxes[dev_query_box_idx * 5 + 2] + block_qboxes[tx * 5 + 3] = dev_query_boxes[dev_query_box_idx * 5 + 3] + block_qboxes[tx * 5 + 4] = dev_query_boxes[dev_query_box_idx * 5 + 4] + if (tx < row_size): + block_boxes[tx * 5 + 0] = dev_boxes[dev_box_idx * 5 + 0] + block_boxes[tx * 5 + 1] = dev_boxes[dev_box_idx * 5 + 1] + block_boxes[tx * 5 + 2] = dev_boxes[dev_box_idx * 5 + 2] + block_boxes[tx * 5 + 3] = dev_boxes[dev_box_idx * 5 + 3] + block_boxes[tx * 5 + 4] = dev_boxes[dev_box_idx * 5 + 4] + cuda.syncthreads() + if tx < row_size: + for i in range(col_size): + offset = ( + row_start * threadsPerBlock * K + col_start * threadsPerBlock + + tx * K + i) + dev_iou[offset] = devRotateIoUEval(block_qboxes[i * 5:i * 5 + 5], + block_boxes[tx * 5:tx * 5 + 5], + criterion) + + +def rotate_iou_gpu_eval(boxes, query_boxes, criterion=-1, device_id=0): + """Rotated box iou running in gpu. 500x faster than cpu version (take 5ms + in one example with numba.cuda code). convert from [this project]( + https://github.com/hongzhenwang/RRPN-revise/tree/master/lib/rotation). + + Args: + boxes (torch.Tensor): rbboxes. format: centers, dims, + angles(clockwise when positive) with the shape of [N, 5]. + query_boxes (float tensor: [K, 5]): rbboxes to compute iou with boxes. + device_id (int, optional): Defaults to 0. Device to use. + criterion (int, optional): Indicate different type of iou. + -1 indicate `area_inter / (area1 + area2 - area_inter)`, + 0 indicate `area_inter / area1`, + 1 indicate `area_inter / area2`. + + Returns: + np.ndarray: IoU results. + """ + boxes = boxes.astype(np.float32) + query_boxes = query_boxes.astype(np.float32) + N = boxes.shape[0] + K = query_boxes.shape[0] + iou = np.zeros((N, K), dtype=np.float32) + if N == 0 or K == 0: + return iou + threadsPerBlock = 8 * 8 + cuda.select_device(device_id) + blockspergrid = (div_up(N, threadsPerBlock), div_up(K, threadsPerBlock)) + + stream = cuda.stream() + with stream.auto_synchronize(): + boxes_dev = cuda.to_device(boxes.reshape([-1]), stream) + query_boxes_dev = cuda.to_device(query_boxes.reshape([-1]), stream) + iou_dev = cuda.to_device(iou.reshape([-1]), stream) + rotate_iou_kernel_eval[blockspergrid, threadsPerBlock, + stream](N, K, boxes_dev, query_boxes_dev, + iou_dev, criterion) + iou_dev.copy_to_host(iou.reshape([-1]), stream=stream) + return iou.astype(boxes.dtype) diff --git a/mmcv/core/evaluation/lyft_eval.py b/mmcv/core/evaluation/lyft_eval.py new file mode 100644 index 0000000..bfb95a1 --- /dev/null +++ b/mmcv/core/evaluation/lyft_eval.py @@ -0,0 +1,284 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +from lyft_dataset_sdk.eval.detection.mAP_evaluation import (Box3D, get_ap, + get_class_names, + get_ious, + group_by_key, + wrap_in_box) +from mmcv.utils import print_log, track_iter_progress +from mmcv.fileio.io import dump, load +from os import path as osp +from terminaltables import AsciiTable + + +# def load_lyft_gts(lyft, data_root, eval_split, logger=None): +# """Loads ground truth boxes from database. + +# Args: +# lyft (:obj:`LyftDataset`): Lyft class in the sdk. +# data_root (str): Root of data for reading splits. +# eval_split (str): Name of the split for evaluation. +# logger (logging.Logger | str | None): Logger used for printing +# related information during evaluation. Default: None. + +# Returns: +# list[dict]: List of annotation dictionaries. +# """ +# split_scenes = mmcv.list_from_file( +# osp.join(data_root, f'{eval_split}.txt')) + +# # Read out all sample_tokens in DB. +# sample_tokens_all = [s['token'] for s in lyft.sample] +# assert len(sample_tokens_all) > 0, 'Error: Database has no samples!' + +# if eval_split == 'test': +# # Check that you aren't trying to cheat :) +# assert len(lyft.sample_annotation) > 0, \ +# 'Error: You are trying to evaluate on the test set \ +# but you do not have the annotations!' + +# sample_tokens = [] +# for sample_token in sample_tokens_all: +# scene_token = lyft.get('sample', sample_token)['scene_token'] +# scene_record = lyft.get('scene', scene_token) +# if scene_record['name'] in split_scenes: +# sample_tokens.append(sample_token) + +# all_annotations = [] + +# print_log('Loading ground truth annotations...', logger=logger) +# # Load annotations and filter predictions and annotations. +# for sample_token in track_iter_progress(sample_tokens): +# sample = lyft.get('sample', sample_token) +# sample_annotation_tokens = sample['anns'] +# for sample_annotation_token in sample_annotation_tokens: +# # Get label name in detection task and filter unused labels. +# sample_annotation = \ +# lyft.get('sample_annotation', sample_annotation_token) +# detection_name = sample_annotation['category_name'] +# if detection_name is None: +# continue +# annotation = { +# 'sample_token': sample_token, +# 'translation': sample_annotation['translation'], +# 'size': sample_annotation['size'], +# 'rotation': sample_annotation['rotation'], +# 'name': detection_name, +# } +# all_annotations.append(annotation) + +# return all_annotations + + +def load_lyft_predictions(res_path): + """Load Lyft predictions from json file. + + Args: + res_path (str): Path of result json file recording detections. + + Returns: + list[dict]: List of prediction dictionaries. + """ + predictions = load(res_path) + predictions = predictions['results'] + all_preds = [] + for sample_token in predictions.keys(): + all_preds.extend(predictions[sample_token]) + return all_preds + + +def lyft_eval(lyft, data_root, res_path, eval_set, output_dir, logger=None): + """Evaluation API for Lyft dataset. + + Args: + lyft (:obj:`LyftDataset`): Lyft class in the sdk. + data_root (str): Root of data for reading splits. + res_path (str): Path of result json file recording detections. + eval_set (str): Name of the split for evaluation. + output_dir (str): Output directory for output json files. + logger (logging.Logger | str | None): Logger used for printing + related information during evaluation. Default: None. + + Returns: + dict[str, float]: The evaluation results. + """ + # evaluate by lyft metrics + gts = load_lyft_gts(lyft, data_root, eval_set, logger) + predictions = load_lyft_predictions(res_path) + + class_names = get_class_names(gts) + print('Calculating mAP@0.5:0.95...') + + iou_thresholds = [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95] + metrics = {} + average_precisions = \ + get_classwise_aps(gts, predictions, class_names, iou_thresholds) + APs_data = [['IOU', 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]] + + mAPs = np.mean(average_precisions, axis=0) + mAPs_cate = np.mean(average_precisions, axis=1) + final_mAP = np.mean(mAPs) + + metrics['average_precisions'] = average_precisions.tolist() + metrics['mAPs'] = mAPs.tolist() + metrics['Final mAP'] = float(final_mAP) + metrics['class_names'] = class_names + metrics['mAPs_cate'] = mAPs_cate.tolist() + + APs_data = [['class', 'mAP@0.5:0.95']] + for i in range(len(class_names)): + row = [class_names[i], round(mAPs_cate[i], 3)] + APs_data.append(row) + APs_data.append(['Overall', round(final_mAP, 3)]) + APs_table = AsciiTable(APs_data, title='mAPs@0.5:0.95') + APs_table.inner_footing_row_border = True + print_log(APs_table.table, logger=logger) + + res_path = osp.join(output_dir, 'lyft_metrics.json') + dump(metrics, res_path) + return metrics + + +def get_classwise_aps(gt, predictions, class_names, iou_thresholds): + """Returns an array with an average precision per class. + + Note: Ground truth and predictions should have the following format. + + .. code-block:: + + gt = [{ + 'sample_token': '0f0e3ce89d2324d8b45aa55a7b4f8207 + fbb039a550991a5149214f98cec136ac', + 'translation': [974.2811881299899, 1714.6815014457964, + -23.689857123368846], + 'size': [1.796, 4.488, 1.664], + 'rotation': [0.14882026466054782, 0, 0, 0.9888642620837121], + 'name': 'car' + }] + + predictions = [{ + 'sample_token': '0f0e3ce89d2324d8b45aa55a7b4f8207 + fbb039a550991a5149214f98cec136ac', + 'translation': [971.8343488872263, 1713.6816097857359, + -25.82534357061308], + 'size': [2.519726579986132, 7.810161372666739, 3.483438286096803], + 'rotation': [0.10913582721095375, 0.04099572636992043, + 0.01927712319721745, 1.029328402625659], + 'name': 'car', + 'score': 0.3077029437237213 + }] + + Args: + gt (list[dict]): list of dictionaries in the format described below. + predictions (list[dict]): list of dictionaries in the format + described below. + class_names (list[str]): list of the class names. + iou_thresholds (list[float]): IOU thresholds used to calculate + TP / FN + + Returns: + np.ndarray: an array with an average precision per class. + """ + assert all([0 <= iou_th <= 1 for iou_th in iou_thresholds]) + + gt_by_class_name = group_by_key(gt, 'name') + pred_by_class_name = group_by_key(predictions, 'name') + + average_precisions = np.zeros((len(class_names), len(iou_thresholds))) + + for class_id, class_name in enumerate(class_names): + if class_name in pred_by_class_name: + recalls, precisions, average_precision = get_single_class_aps( + gt_by_class_name[class_name], pred_by_class_name[class_name], + iou_thresholds) + average_precisions[class_id, :] = average_precision + + return average_precisions + + +def get_single_class_aps(gt, predictions, iou_thresholds): + """Compute recall and precision for all iou thresholds. Adapted from + LyftDatasetDevkit. + + Args: + gt (list[dict]): list of dictionaries in the format described above. + predictions (list[dict]): list of dictionaries in the format \ + described below. + iou_thresholds (list[float]): IOU thresholds used to calculate \ + TP / FN + + Returns: + tuple[np.ndarray]: Returns (recalls, precisions, average precisions) + for each class. + """ + num_gts = len(gt) + image_gts = group_by_key(gt, 'sample_token') + image_gts = wrap_in_box(image_gts) + + sample_gt_checked = { + sample_token: np.zeros((len(boxes), len(iou_thresholds))) + for sample_token, boxes in image_gts.items() + } + + predictions = sorted(predictions, key=lambda x: x['score'], reverse=True) + + # go down dets and mark TPs and FPs + num_predictions = len(predictions) + tps = np.zeros((num_predictions, len(iou_thresholds))) + fps = np.zeros((num_predictions, len(iou_thresholds))) + + for prediction_index, prediction in enumerate(predictions): + predicted_box = Box3D(**prediction) + + sample_token = prediction['sample_token'] + + max_overlap = -np.inf + jmax = -1 + + if sample_token in image_gts: + gt_boxes = image_gts[sample_token] + # gt_boxes per sample + gt_checked = sample_gt_checked[sample_token] + # gt flags per sample + else: + gt_boxes = [] + gt_checked = None + + if len(gt_boxes) > 0: + overlaps = get_ious(gt_boxes, predicted_box) + + max_overlap = np.max(overlaps) + + jmax = np.argmax(overlaps) + + for i, iou_threshold in enumerate(iou_thresholds): + if max_overlap > iou_threshold: + if gt_checked[jmax, i] == 0: + tps[prediction_index, i] = 1.0 + gt_checked[jmax, i] = 1 + else: + fps[prediction_index, i] = 1.0 + else: + fps[prediction_index, i] = 1.0 + + # compute precision recall + fps = np.cumsum(fps, axis=0) + tps = np.cumsum(tps, axis=0) + + recalls = tps / float(num_gts) + # avoid divide by zero in case the first detection + # matches a difficult ground truth + precisions = tps / np.maximum(tps + fps, np.finfo(np.float64).eps) + + aps = [] + for i in range(len(iou_thresholds)): + recall = recalls[:, i] + precision = precisions[:, i] + assert np.all(0 <= recall) & np.all(recall <= 1) + assert np.all(0 <= precision) & np.all(precision <= 1) + ap = get_ap(recall, precision) + aps.append(ap) + + aps = np.array(aps) + + return recalls, precisions, aps diff --git a/mmcv/core/evaluation/mean_ap.py b/mmcv/core/evaluation/mean_ap.py new file mode 100644 index 0000000..dca238b --- /dev/null +++ b/mmcv/core/evaluation/mean_ap.py @@ -0,0 +1,467 @@ +from multiprocessing import Pool +import numpy as np +from mmcv.utils import print_log, is_str +from terminaltables import AsciiTable + +from .bbox_overlaps import bbox_overlaps +from .class_names import get_classes + + +def average_precision(recalls, precisions, mode='area'): + """Calculate average precision (for single or multiple scales). + + Args: + recalls (ndarray): shape (num_scales, num_dets) or (num_dets, ) + precisions (ndarray): shape (num_scales, num_dets) or (num_dets, ) + mode (str): 'area' or '11points', 'area' means calculating the area + under precision-recall curve, '11points' means calculating + the average precision of recalls at [0, 0.1, ..., 1] + + Returns: + float or ndarray: calculated average precision + """ + no_scale = False + if recalls.ndim == 1: + no_scale = True + recalls = recalls[np.newaxis, :] + precisions = precisions[np.newaxis, :] + assert recalls.shape == precisions.shape and recalls.ndim == 2 + num_scales = recalls.shape[0] + ap = np.zeros(num_scales, dtype=np.float32) + if mode == 'area': + zeros = np.zeros((num_scales, 1), dtype=recalls.dtype) + ones = np.ones((num_scales, 1), dtype=recalls.dtype) + mrec = np.hstack((zeros, recalls, ones)) + mpre = np.hstack((zeros, precisions, zeros)) + for i in range(mpre.shape[1] - 1, 0, -1): + mpre[:, i - 1] = np.maximum(mpre[:, i - 1], mpre[:, i]) + for i in range(num_scales): + ind = np.where(mrec[i, 1:] != mrec[i, :-1])[0] + ap[i] = np.sum( + (mrec[i, ind + 1] - mrec[i, ind]) * mpre[i, ind + 1]) + elif mode == '11points': + for i in range(num_scales): + for thr in np.arange(0, 1 + 1e-3, 0.1): + precs = precisions[i, recalls[i, :] >= thr] + prec = precs.max() if precs.size > 0 else 0 + ap[i] += prec + ap /= 11 + else: + raise ValueError( + 'Unrecognized mode, only "area" and "11points" are supported') + if no_scale: + ap = ap[0] + return ap + + +def tpfp_imagenet(det_bboxes, + gt_bboxes, + gt_bboxes_ignore=None, + default_iou_thr=0.5, + area_ranges=None): + """Check if detected bboxes are true positive or false positive. + + Args: + det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5). + gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4). + gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image, + of shape (k, 4). Default: None + default_iou_thr (float): IoU threshold to be considered as matched for + medium and large bboxes (small ones have special rules). + Default: 0.5. + area_ranges (list[tuple] | None): Range of bbox areas to be evaluated, + in the format [(min1, max1), (min2, max2), ...]. Default: None. + + Returns: + tuple[np.ndarray]: (tp, fp) whose elements are 0 and 1. The shape of + each array is (num_scales, m). + """ + # an indicator of ignored gts + gt_ignore_inds = np.concatenate( + (np.zeros(gt_bboxes.shape[0], dtype=np.bool), + np.ones(gt_bboxes_ignore.shape[0], dtype=np.bool))) + # stack gt_bboxes and gt_bboxes_ignore for convenience + gt_bboxes = np.vstack((gt_bboxes, gt_bboxes_ignore)) + + num_dets = det_bboxes.shape[0] + num_gts = gt_bboxes.shape[0] + if area_ranges is None: + area_ranges = [(None, None)] + num_scales = len(area_ranges) + # tp and fp are of shape (num_scales, num_gts), each row is tp or fp + # of a certain scale. + tp = np.zeros((num_scales, num_dets), dtype=np.float32) + fp = np.zeros((num_scales, num_dets), dtype=np.float32) + if gt_bboxes.shape[0] == 0: + if area_ranges == [(None, None)]: + fp[...] = 1 + else: + det_areas = (det_bboxes[:, 2] - det_bboxes[:, 0]) * ( + det_bboxes[:, 3] - det_bboxes[:, 1]) + for i, (min_area, max_area) in enumerate(area_ranges): + fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1 + return tp, fp + ious = bbox_overlaps(det_bboxes, gt_bboxes - 1) + gt_w = gt_bboxes[:, 2] - gt_bboxes[:, 0] + gt_h = gt_bboxes[:, 3] - gt_bboxes[:, 1] + iou_thrs = np.minimum((gt_w * gt_h) / ((gt_w + 10.0) * (gt_h + 10.0)), + default_iou_thr) + # sort all detections by scores in descending order + sort_inds = np.argsort(-det_bboxes[:, -1]) + for k, (min_area, max_area) in enumerate(area_ranges): + gt_covered = np.zeros(num_gts, dtype=bool) + # if no area range is specified, gt_area_ignore is all False + if min_area is None: + gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool) + else: + gt_areas = gt_w * gt_h + gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area) + for i in sort_inds: + max_iou = -1 + matched_gt = -1 + # find best overlapped available gt + for j in range(num_gts): + # different from PASCAL VOC: allow finding other gts if the + # best overlapped ones are already matched by other det bboxes + if gt_covered[j]: + continue + elif ious[i, j] >= iou_thrs[j] and ious[i, j] > max_iou: + max_iou = ious[i, j] + matched_gt = j + # there are 4 cases for a det bbox: + # 1. it matches a gt, tp = 1, fp = 0 + # 2. it matches an ignored gt, tp = 0, fp = 0 + # 3. it matches no gt and within area range, tp = 0, fp = 1 + # 4. it matches no gt but is beyond area range, tp = 0, fp = 0 + if matched_gt >= 0: + gt_covered[matched_gt] = 1 + if not (gt_ignore_inds[matched_gt] + or gt_area_ignore[matched_gt]): + tp[k, i] = 1 + elif min_area is None: + fp[k, i] = 1 + else: + bbox = det_bboxes[i, :4] + area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1]) + if area >= min_area and area < max_area: + fp[k, i] = 1 + return tp, fp + + +def tpfp_default(det_bboxes, + gt_bboxes, + gt_bboxes_ignore=None, + iou_thr=0.5, + area_ranges=None): + """Check if detected bboxes are true positive or false positive. + + Args: + det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5). + gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4). + gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image, + of shape (k, 4). Default: None + iou_thr (float): IoU threshold to be considered as matched. + Default: 0.5. + area_ranges (list[tuple] | None): Range of bbox areas to be evaluated, + in the format [(min1, max1), (min2, max2), ...]. Default: None. + + Returns: + tuple[np.ndarray]: (tp, fp) whose elements are 0 and 1. The shape of + each array is (num_scales, m). + """ + # an indicator of ignored gts + gt_ignore_inds = np.concatenate( + (np.zeros(gt_bboxes.shape[0], dtype=np.bool), + np.ones(gt_bboxes_ignore.shape[0], dtype=np.bool))) + # stack gt_bboxes and gt_bboxes_ignore for convenience + gt_bboxes = np.vstack((gt_bboxes, gt_bboxes_ignore)) + + num_dets = det_bboxes.shape[0] + num_gts = gt_bboxes.shape[0] + if area_ranges is None: + area_ranges = [(None, None)] + num_scales = len(area_ranges) + # tp and fp are of shape (num_scales, num_gts), each row is tp or fp of + # a certain scale + tp = np.zeros((num_scales, num_dets), dtype=np.float32) + fp = np.zeros((num_scales, num_dets), dtype=np.float32) + + # if there is no gt bboxes in this image, then all det bboxes + # within area range are false positives + if gt_bboxes.shape[0] == 0: + if area_ranges == [(None, None)]: + fp[...] = 1 + else: + det_areas = (det_bboxes[:, 2] - det_bboxes[:, 0]) * ( + det_bboxes[:, 3] - det_bboxes[:, 1]) + for i, (min_area, max_area) in enumerate(area_ranges): + fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1 + return tp, fp + + ious = bbox_overlaps(det_bboxes, gt_bboxes) + # for each det, the max iou with all gts + ious_max = ious.max(axis=1) + # for each det, which gt overlaps most with it + ious_argmax = ious.argmax(axis=1) + # sort all dets in descending order by scores + sort_inds = np.argsort(-det_bboxes[:, -1]) + for k, (min_area, max_area) in enumerate(area_ranges): + gt_covered = np.zeros(num_gts, dtype=bool) + # if no area range is specified, gt_area_ignore is all False + if min_area is None: + gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool) + else: + gt_areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0]) * ( + gt_bboxes[:, 3] - gt_bboxes[:, 1]) + gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area) + for i in sort_inds: + if ious_max[i] >= iou_thr: + matched_gt = ious_argmax[i] + if not (gt_ignore_inds[matched_gt] + or gt_area_ignore[matched_gt]): + if not gt_covered[matched_gt]: + gt_covered[matched_gt] = True + tp[k, i] = 1 + else: + fp[k, i] = 1 + # otherwise ignore this detected bbox, tp = 0, fp = 0 + elif min_area is None: + fp[k, i] = 1 + else: + bbox = det_bboxes[i, :4] + area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1]) + if area >= min_area and area < max_area: + fp[k, i] = 1 + return tp, fp + + +def get_cls_results(det_results, annotations, class_id): + """Get det results and gt information of a certain class. + + Args: + det_results (list[list]): Same as `eval_map()`. + annotations (list[dict]): Same as `eval_map()`. + class_id (int): ID of a specific class. + + Returns: + tuple[list[np.ndarray]]: detected bboxes, gt bboxes, ignored gt bboxes + """ + cls_dets = [img_res[class_id] for img_res in det_results] + cls_gts = [] + cls_gts_ignore = [] + for ann in annotations: + gt_inds = ann['labels'] == class_id + cls_gts.append(ann['bboxes'][gt_inds, :]) + + if ann.get('labels_ignore', None) is not None: + ignore_inds = ann['labels_ignore'] == class_id + cls_gts_ignore.append(ann['bboxes_ignore'][ignore_inds, :]) + else: + cls_gts_ignore.append(np.empty((0, 4), dtype=np.float32)) + + return cls_dets, cls_gts, cls_gts_ignore + + +def eval_map(det_results, + annotations, + scale_ranges=None, + iou_thr=0.5, + dataset=None, + logger=None, + tpfp_fn=None, + nproc=4): + """Evaluate mAP of a dataset. + + Args: + det_results (list[list]): [[cls1_det, cls2_det, ...], ...]. + The outer list indicates images, and the inner list indicates + per-class detected bboxes. + annotations (list[dict]): Ground truth annotations where each item of + the list indicates an image. Keys of annotations are: + + - `bboxes`: numpy array of shape (n, 4) + - `labels`: numpy array of shape (n, ) + - `bboxes_ignore` (optional): numpy array of shape (k, 4) + - `labels_ignore` (optional): numpy array of shape (k, ) + scale_ranges (list[tuple] | None): Range of scales to be evaluated, + in the format [(min1, max1), (min2, max2), ...]. A range of + (32, 64) means the area range between (32**2, 64**2). + Default: None. + iou_thr (float): IoU threshold to be considered as matched. + Default: 0.5. + dataset (list[str] | str | None): Dataset name or dataset classes, + there are minor differences in metrics for different datsets, e.g. + "voc07", "imagenet_det", etc. Default: None. + logger (logging.Logger | str | None): The way to print the mAP + summary. See `mmcv.utils.print_log()` for details. Default: None. + tpfp_fn (callable | None): The function used to determine true/ + false positives. If None, :func:`tpfp_default` is used as default + unless dataset is 'det' or 'vid' (:func:`tpfp_imagenet` in this + case). If it is given as a function, then this function is used + to evaluate tp & fp. Default None. + nproc (int): Processes used for computing TP and FP. + Default: 4. + + Returns: + tuple: (mAP, [dict, dict, ...]) + """ + assert len(det_results) == len(annotations) + + num_imgs = len(det_results) + num_scales = len(scale_ranges) if scale_ranges is not None else 1 + num_classes = len(det_results[0]) # positive class num + area_ranges = ([(rg[0]**2, rg[1]**2) for rg in scale_ranges] + if scale_ranges is not None else None) + + pool = Pool(nproc) + eval_results = [] + for i in range(num_classes): + # get gt and det bboxes of this class + cls_dets, cls_gts, cls_gts_ignore = get_cls_results( + det_results, annotations, i) + # choose proper function according to datasets to compute tp and fp + if tpfp_fn is None: + if dataset in ['det', 'vid']: + tpfp_fn = tpfp_imagenet + else: + tpfp_fn = tpfp_default + if not callable(tpfp_fn): + raise ValueError( + f'tpfp_fn has to be a function or None, but got {tpfp_fn}') + + # compute tp and fp for each image with multiple processes + tpfp = pool.starmap( + tpfp_fn, + zip(cls_dets, cls_gts, cls_gts_ignore, + [iou_thr for _ in range(num_imgs)], + [area_ranges for _ in range(num_imgs)])) + tp, fp = tuple(zip(*tpfp)) + # calculate gt number of each scale + # ignored gts or gts beyond the specific scale are not counted + num_gts = np.zeros(num_scales, dtype=int) + for j, bbox in enumerate(cls_gts): + if area_ranges is None: + num_gts[0] += bbox.shape[0] + else: + gt_areas = (bbox[:, 2] - bbox[:, 0]) * ( + bbox[:, 3] - bbox[:, 1]) + for k, (min_area, max_area) in enumerate(area_ranges): + num_gts[k] += np.sum((gt_areas >= min_area) + & (gt_areas < max_area)) + # sort all det bboxes by score, also sort tp and fp + cls_dets = np.vstack(cls_dets) + num_dets = cls_dets.shape[0] + sort_inds = np.argsort(-cls_dets[:, -1]) + tp = np.hstack(tp)[:, sort_inds] + fp = np.hstack(fp)[:, sort_inds] + # calculate recall and precision with tp and fp + tp = np.cumsum(tp, axis=1) + fp = np.cumsum(fp, axis=1) + eps = np.finfo(np.float32).eps + recalls = tp / np.maximum(num_gts[:, np.newaxis], eps) + precisions = tp / np.maximum((tp + fp), eps) + # calculate AP + if scale_ranges is None: + recalls = recalls[0, :] + precisions = precisions[0, :] + num_gts = num_gts.item() + mode = 'area' if dataset != 'voc07' else '11points' + ap = average_precision(recalls, precisions, mode) + eval_results.append({ + 'num_gts': num_gts, + 'num_dets': num_dets, + 'recall': recalls, + 'precision': precisions, + 'ap': ap + }) + pool.close() + if scale_ranges is not None: + # shape (num_classes, num_scales) + all_ap = np.vstack([cls_result['ap'] for cls_result in eval_results]) + all_num_gts = np.vstack( + [cls_result['num_gts'] for cls_result in eval_results]) + mean_ap = [] + for i in range(num_scales): + if np.any(all_num_gts[:, i] > 0): + mean_ap.append(all_ap[all_num_gts[:, i] > 0, i].mean()) + else: + mean_ap.append(0.0) + else: + aps = [] + for cls_result in eval_results: + if cls_result['num_gts'] > 0: + aps.append(cls_result['ap']) + mean_ap = np.array(aps).mean().item() if aps else 0.0 + + print_map_summary( + mean_ap, eval_results, dataset, area_ranges, logger=logger) + + return mean_ap, eval_results + + +def print_map_summary(mean_ap, + results, + dataset=None, + scale_ranges=None, + logger=None): + """Print mAP and results of each class. + + A table will be printed to show the gts/dets/recall/AP of each class and + the mAP. + + Args: + mean_ap (float): Calculated from `eval_map()`. + results (list[dict]): Calculated from `eval_map()`. + dataset (list[str] | str | None): Dataset name or dataset classes. + scale_ranges (list[tuple] | None): Range of scales to be evaluated. + logger (logging.Logger | str | None): The way to print the mAP + summary. See `mmcv.utils.print_log()` for details. Default: None. + """ + + if logger == 'silent': + return + + if isinstance(results[0]['ap'], np.ndarray): + num_scales = len(results[0]['ap']) + else: + num_scales = 1 + + if scale_ranges is not None: + assert len(scale_ranges) == num_scales + + num_classes = len(results) + + recalls = np.zeros((num_scales, num_classes), dtype=np.float32) + aps = np.zeros((num_scales, num_classes), dtype=np.float32) + num_gts = np.zeros((num_scales, num_classes), dtype=int) + for i, cls_result in enumerate(results): + if cls_result['recall'].size > 0: + recalls[:, i] = np.array(cls_result['recall'], ndmin=2)[:, -1] + aps[:, i] = cls_result['ap'] + num_gts[:, i] = cls_result['num_gts'] + + if dataset is None: + label_names = [str(i) for i in range(num_classes)] + elif is_str(dataset): + label_names = get_classes(dataset) + else: + label_names = dataset + + if not isinstance(mean_ap, list): + mean_ap = [mean_ap] + + header = ['class', 'gts', 'dets', 'recall', 'ap'] + for i in range(num_scales): + if scale_ranges is not None: + print_log(f'Scale range {scale_ranges[i]}', logger=logger) + table_data = [header] + for j in range(num_classes): + row_data = [ + label_names[j], num_gts[i, j], results[j]['num_dets'], + f'{recalls[i, j]:.3f}', f'{aps[i, j]:.3f}' + ] + table_data.append(row_data) + table_data.append(['mAP', '', '', '', f'{mean_ap[i]:.3f}']) + table = AsciiTable(table_data) + table.inner_footing_row_border = True + print_log('\n' + table.table, logger=logger) diff --git a/mmcv/core/evaluation/metric_motion.py b/mmcv/core/evaluation/metric_motion.py new file mode 100644 index 0000000..8219438 --- /dev/null +++ b/mmcv/core/evaluation/metric_motion.py @@ -0,0 +1,70 @@ +# + +"""This module evaluates the forecasted trajectories against the ground truth.""" + +import math +from typing import Dict, List, Optional + +import numpy as np +import torch + +LOW_PROB_THRESHOLD_FOR_METRICS = 0.05 + + +def get_ade(forecasted_trajectory: torch.Tensor, gt_trajectory: torch.Tensor) -> float: + """Compute Average Displacement Error. + Args: + forecasted_trajectory: Predicted trajectory with shape [fut_ts, 2] + gt_trajectory: Ground truth trajectory with shape [fut_ts, 2] + Returns: + ade: Average Displacement Error + """ + pred_len = forecasted_trajectory.shape[0] + ade = float( + sum( + torch.sqrt( + (forecasted_trajectory[i, 0] - gt_trajectory[i, 0]) ** 2 + + (forecasted_trajectory[i, 1] - gt_trajectory[i, 1]) ** 2 + ) + for i in range(pred_len) + ) + / pred_len + ) + return ade + +def get_best_preds( + forecasted_trajectory: torch.Tensor, + gt_trajectory: torch.Tensor +) -> float: + """Compute min Average Displacement Error. + Args: + forecasted_trajectory: Predicted trajectory with shape [k, fut_ts, 2] + gt_trajectory: Ground truth trajectory with shape [fut_ts, 2] + gt_fut_masks: Ground truth traj mask with shape (fut_ts) + Returns: + best_forecasted_trajectory: Predicted trajectory with shape [fut_ts, 2] + """ + + # [k, fut_ts] + dist = torch.linalg.norm(gt_trajectory[None] - forecasted_trajectory, dim=-1) + dist = dist[..., -1] + dist[torch.isnan(dist)] = 0 + min_mode_idx = torch.argmin(dist, dim=-1) + + return forecasted_trajectory[min_mode_idx] + +def get_fde(forecasted_trajectory: torch.Tensor, gt_trajectory: torch.Tensor) -> float: + """Compute Final Displacement Error. + Args: + forecasted_trajectory: Predicted trajectory with shape [fut_ts, 2] + gt_trajectory: Ground truth trajectory with shape [fut_ts, 2] + Returns: + fde: Final Displacement Error + """ + fde = float( + torch.sqrt( + (forecasted_trajectory[-1, 0] - gt_trajectory[-1, 0]) ** 2 + + (forecasted_trajectory[-1, 1] - gt_trajectory[-1, 1]) ** 2 + ) + ) + return fde diff --git a/mmcv/core/evaluation/metrics.py b/mmcv/core/evaluation/metrics.py new file mode 100644 index 0000000..551203a --- /dev/null +++ b/mmcv/core/evaluation/metrics.py @@ -0,0 +1,325 @@ +from collections import OrderedDict +from mmcv.image import imread +import numpy as np +import torch + + +def f_score(precision, recall, beta=1): + """calcuate the f-score value. + + Args: + precision (float | torch.Tensor): The precision value. + recall (float | torch.Tensor): The recall value. + beta (int): Determines the weight of recall in the combined score. + Default: False. + + Returns: + [torch.tensor]: The f-score value. + """ + score = (1 + beta**2) * (precision * recall) / ( + (beta**2 * precision) + recall) + return score + + +def intersect_and_union(pred_label, + label, + num_classes, + ignore_index, + label_map=dict(), + reduce_zero_label=False): + """Calculate intersection and Union. + + Args: + pred_label (ndarray | str): Prediction segmentation map + or predict result filename. + label (ndarray | str): Ground truth segmentation map + or label filename. + num_classes (int): Number of categories. + ignore_index (int): Index that will be ignored in evaluation. + label_map (dict): Mapping old labels to new labels. The parameter will + work only when label is str. Default: dict(). + reduce_zero_label (bool): Wether ignore zero label. The parameter will + work only when label is str. Default: False. + + Returns: + torch.Tensor: The intersection of prediction and ground truth + histogram on all classes. + torch.Tensor: The union of prediction and ground truth histogram on + all classes. + torch.Tensor: The prediction histogram on all classes. + torch.Tensor: The ground truth histogram on all classes. + """ + + if isinstance(pred_label, str): + pred_label = torch.from_numpy(np.load(pred_label)) + else: + pred_label = torch.from_numpy((pred_label)) + + if isinstance(label, str): + label = torch.from_numpy( + imread(label, flag='unchanged', backend='pillow')) + else: + label = torch.from_numpy(label) + + if label_map is not None: + for old_id, new_id in label_map.items(): + label[label == old_id] = new_id + if reduce_zero_label: + label[label == 0] = 255 + label = label - 1 + label[label == 254] = 255 + + mask = (label != ignore_index) + pred_label = pred_label[mask] + label = label[mask] + + intersect = pred_label[pred_label == label] + area_intersect = torch.histc( + intersect.float(), bins=(num_classes), min=0, max=num_classes - 1) + area_pred_label = torch.histc( + pred_label.float(), bins=(num_classes), min=0, max=num_classes - 1) + area_label = torch.histc( + label.float(), bins=(num_classes), min=0, max=num_classes - 1) + area_union = area_pred_label + area_label - area_intersect + return area_intersect, area_union, area_pred_label, area_label + + +def total_intersect_and_union(results, + gt_seg_maps, + num_classes, + ignore_index, + label_map=dict(), + reduce_zero_label=False): + """Calculate Total Intersection and Union. + + Args: + results (list[ndarray] | list[str]): List of prediction segmentation + maps or list of prediction result filenames. + gt_seg_maps (list[ndarray] | list[str]): list of ground truth + segmentation maps or list of label filenames. + num_classes (int): Number of categories. + ignore_index (int): Index that will be ignored in evaluation. + label_map (dict): Mapping old labels to new labels. Default: dict(). + reduce_zero_label (bool): Wether ignore zero label. Default: False. + + Returns: + ndarray: The intersection of prediction and ground truth histogram + on all classes. + ndarray: The union of prediction and ground truth histogram on all + classes. + ndarray: The prediction histogram on all classes. + ndarray: The ground truth histogram on all classes. + """ + num_imgs = len(results) + assert len(gt_seg_maps) == num_imgs + total_area_intersect = torch.zeros((num_classes, ), dtype=torch.float64) + total_area_union = torch.zeros((num_classes, ), dtype=torch.float64) + total_area_pred_label = torch.zeros((num_classes, ), dtype=torch.float64) + total_area_label = torch.zeros((num_classes, ), dtype=torch.float64) + for i in range(num_imgs): + area_intersect, area_union, area_pred_label, area_label = \ + intersect_and_union( + results[i], gt_seg_maps[i], num_classes, ignore_index, + label_map, reduce_zero_label) + total_area_intersect += area_intersect + total_area_union += area_union + total_area_pred_label += area_pred_label + total_area_label += area_label + return total_area_intersect, total_area_union, total_area_pred_label, \ + total_area_label + + +def mean_iou(results, + gt_seg_maps, + num_classes, + ignore_index, + nan_to_num=None, + label_map=dict(), + reduce_zero_label=False): + """Calculate Mean Intersection and Union (mIoU) + + Args: + results (list[ndarray] | list[str]): List of prediction segmentation + maps or list of prediction result filenames. + gt_seg_maps (list[ndarray] | list[str]): list of ground truth + segmentation maps or list of label filenames. + num_classes (int): Number of categories. + ignore_index (int): Index that will be ignored in evaluation. + nan_to_num (int, optional): If specified, NaN values will be replaced + by the numbers defined by the user. Default: None. + label_map (dict): Mapping old labels to new labels. Default: dict(). + reduce_zero_label (bool): Wether ignore zero label. Default: False. + + Returns: + dict[str, float | ndarray]: + float: Overall accuracy on all images. + ndarray: Per category accuracy, shape (num_classes, ). + ndarray: Per category IoU, shape (num_classes, ). + """ + iou_result = eval_metrics( + results=results, + gt_seg_maps=gt_seg_maps, + num_classes=num_classes, + ignore_index=ignore_index, + metrics=['mIoU'], + nan_to_num=nan_to_num, + label_map=label_map, + reduce_zero_label=reduce_zero_label) + return iou_result + + +def mean_dice(results, + gt_seg_maps, + num_classes, + ignore_index, + nan_to_num=None, + label_map=dict(), + reduce_zero_label=False): + """Calculate Mean Dice (mDice) + + Args: + results (list[ndarray] | list[str]): List of prediction segmentation + maps or list of prediction result filenames. + gt_seg_maps (list[ndarray] | list[str]): list of ground truth + segmentation maps or list of label filenames. + num_classes (int): Number of categories. + ignore_index (int): Index that will be ignored in evaluation. + nan_to_num (int, optional): If specified, NaN values will be replaced + by the numbers defined by the user. Default: None. + label_map (dict): Mapping old labels to new labels. Default: dict(). + reduce_zero_label (bool): Wether ignore zero label. Default: False. + + Returns: + dict[str, float | ndarray]: Default metrics. + float: Overall accuracy on all images. + ndarray: Per category accuracy, shape (num_classes, ). + ndarray: Per category dice, shape (num_classes, ). + """ + + dice_result = eval_metrics( + results=results, + gt_seg_maps=gt_seg_maps, + num_classes=num_classes, + ignore_index=ignore_index, + metrics=['mDice'], + nan_to_num=nan_to_num, + label_map=label_map, + reduce_zero_label=reduce_zero_label) + return dice_result + + +def mean_fscore(results, + gt_seg_maps, + num_classes, + ignore_index, + nan_to_num=None, + label_map=dict(), + reduce_zero_label=False, + beta=1): + """Calculate Mean Intersection and Union (mIoU) + + Args: + results (list[ndarray] | list[str]): List of prediction segmentation + maps or list of prediction result filenames. + gt_seg_maps (list[ndarray] | list[str]): list of ground truth + segmentation maps or list of label filenames. + num_classes (int): Number of categories. + ignore_index (int): Index that will be ignored in evaluation. + nan_to_num (int, optional): If specified, NaN values will be replaced + by the numbers defined by the user. Default: None. + label_map (dict): Mapping old labels to new labels. Default: dict(). + reduce_zero_label (bool): Wether ignore zero label. Default: False. + beta (int): Determines the weight of recall in the combined score. + Default: False. + + + Returns: + dict[str, float | ndarray]: Default metrics. + float: Overall accuracy on all images. + ndarray: Per category recall, shape (num_classes, ). + ndarray: Per category precision, shape (num_classes, ). + ndarray: Per category f-score, shape (num_classes, ). + """ + fscore_result = eval_metrics( + results=results, + gt_seg_maps=gt_seg_maps, + num_classes=num_classes, + ignore_index=ignore_index, + metrics=['mFscore'], + nan_to_num=nan_to_num, + label_map=label_map, + reduce_zero_label=reduce_zero_label, + beta=beta) + return fscore_result + + +def eval_metrics(results, + gt_seg_maps, + num_classes, + ignore_index, + metrics=['mIoU'], + nan_to_num=None, + label_map=dict(), + reduce_zero_label=False, + beta=1): + """Calculate evaluation metrics + Args: + results (list[ndarray] | list[str]): List of prediction segmentation + maps or list of prediction result filenames. + gt_seg_maps (list[ndarray] | list[str]): list of ground truth + segmentation maps or list of label filenames. + num_classes (int): Number of categories. + ignore_index (int): Index that will be ignored in evaluation. + metrics (list[str] | str): Metrics to be evaluated, 'mIoU' and 'mDice'. + nan_to_num (int, optional): If specified, NaN values will be replaced + by the numbers defined by the user. Default: None. + label_map (dict): Mapping old labels to new labels. Default: dict(). + reduce_zero_label (bool): Wether ignore zero label. Default: False. + Returns: + float: Overall accuracy on all images. + ndarray: Per category accuracy, shape (num_classes, ). + ndarray: Per category evaluation metrics, shape (num_classes, ). + """ + if isinstance(metrics, str): + metrics = [metrics] + allowed_metrics = ['mIoU', 'mDice', 'mFscore'] + if not set(metrics).issubset(set(allowed_metrics)): + raise KeyError('metrics {} is not supported'.format(metrics)) + + total_area_intersect, total_area_union, total_area_pred_label, \ + total_area_label = total_intersect_and_union( + results, gt_seg_maps, num_classes, ignore_index, label_map, + reduce_zero_label) + all_acc = total_area_intersect.sum() / total_area_label.sum() + ret_metrics = OrderedDict({'aAcc': all_acc}) + for metric in metrics: + if metric == 'mIoU': + iou = total_area_intersect / total_area_union + acc = total_area_intersect / total_area_label + ret_metrics['IoU'] = iou + ret_metrics['Acc'] = acc + elif metric == 'mDice': + dice = 2 * total_area_intersect / ( + total_area_pred_label + total_area_label) + acc = total_area_intersect / total_area_label + ret_metrics['Dice'] = dice + ret_metrics['Acc'] = acc + elif metric == 'mFscore': + precision = total_area_intersect / total_area_pred_label + recall = total_area_intersect / total_area_label + f_value = torch.tensor( + [f_score(x[0], x[1], beta) for x in zip(precision, recall)]) + ret_metrics['Fscore'] = f_value + ret_metrics['Precision'] = precision + ret_metrics['Recall'] = recall + + ret_metrics = { + metric: value.numpy() + for metric, value in ret_metrics.items() + } + if nan_to_num is not None: + ret_metrics = OrderedDict({ + metric: np.nan_to_num(metric_value, nan=nan_to_num) + for metric, metric_value in ret_metrics.items() + }) + return ret_metrics diff --git a/mmcv/core/evaluation/recall.py b/mmcv/core/evaluation/recall.py new file mode 100644 index 0000000..23ec744 --- /dev/null +++ b/mmcv/core/evaluation/recall.py @@ -0,0 +1,189 @@ +from collections.abc import Sequence + +import numpy as np +from mmcv.utils import print_log +from terminaltables import AsciiTable + +from .bbox_overlaps import bbox_overlaps + + +def _recalls(all_ious, proposal_nums, thrs): + + img_num = all_ious.shape[0] + total_gt_num = sum([ious.shape[0] for ious in all_ious]) + + _ious = np.zeros((proposal_nums.size, total_gt_num), dtype=np.float32) + for k, proposal_num in enumerate(proposal_nums): + tmp_ious = np.zeros(0) + for i in range(img_num): + ious = all_ious[i][:, :proposal_num].copy() + gt_ious = np.zeros((ious.shape[0])) + if ious.size == 0: + tmp_ious = np.hstack((tmp_ious, gt_ious)) + continue + for j in range(ious.shape[0]): + gt_max_overlaps = ious.argmax(axis=1) + max_ious = ious[np.arange(0, ious.shape[0]), gt_max_overlaps] + gt_idx = max_ious.argmax() + gt_ious[j] = max_ious[gt_idx] + box_idx = gt_max_overlaps[gt_idx] + ious[gt_idx, :] = -1 + ious[:, box_idx] = -1 + tmp_ious = np.hstack((tmp_ious, gt_ious)) + _ious[k, :] = tmp_ious + + _ious = np.fliplr(np.sort(_ious, axis=1)) + recalls = np.zeros((proposal_nums.size, thrs.size)) + for i, thr in enumerate(thrs): + recalls[:, i] = (_ious >= thr).sum(axis=1) / float(total_gt_num) + + return recalls + + +def set_recall_param(proposal_nums, iou_thrs): + """Check proposal_nums and iou_thrs and set correct format.""" + if isinstance(proposal_nums, Sequence): + _proposal_nums = np.array(proposal_nums) + elif isinstance(proposal_nums, int): + _proposal_nums = np.array([proposal_nums]) + else: + _proposal_nums = proposal_nums + + if iou_thrs is None: + _iou_thrs = np.array([0.5]) + elif isinstance(iou_thrs, Sequence): + _iou_thrs = np.array(iou_thrs) + elif isinstance(iou_thrs, float): + _iou_thrs = np.array([iou_thrs]) + else: + _iou_thrs = iou_thrs + + return _proposal_nums, _iou_thrs + + +def eval_recalls(gts, + proposals, + proposal_nums=None, + iou_thrs=0.5, + logger=None): + """Calculate recalls. + + Args: + gts (list[ndarray]): a list of arrays of shape (n, 4) + proposals (list[ndarray]): a list of arrays of shape (k, 4) or (k, 5) + proposal_nums (int | Sequence[int]): Top N proposals to be evaluated. + iou_thrs (float | Sequence[float]): IoU thresholds. Default: 0.5. + logger (logging.Logger | str | None): The way to print the recall + summary. See `mmcv.utils.print_log()` for details. Default: None. + + Returns: + ndarray: recalls of different ious and proposal nums + """ + + img_num = len(gts) + assert img_num == len(proposals) + + proposal_nums, iou_thrs = set_recall_param(proposal_nums, iou_thrs) + + all_ious = [] + for i in range(img_num): + if proposals[i].ndim == 2 and proposals[i].shape[1] == 5: + scores = proposals[i][:, 4] + sort_idx = np.argsort(scores)[::-1] + img_proposal = proposals[i][sort_idx, :] + else: + img_proposal = proposals[i] + prop_num = min(img_proposal.shape[0], proposal_nums[-1]) + if gts[i] is None or gts[i].shape[0] == 0: + ious = np.zeros((0, img_proposal.shape[0]), dtype=np.float32) + else: + ious = bbox_overlaps(gts[i], img_proposal[:prop_num, :4]) + all_ious.append(ious) + all_ious = np.array(all_ious) + recalls = _recalls(all_ious, proposal_nums, iou_thrs) + + print_recall_summary(recalls, proposal_nums, iou_thrs, logger=logger) + return recalls + + +def print_recall_summary(recalls, + proposal_nums, + iou_thrs, + row_idxs=None, + col_idxs=None, + logger=None): + """Print recalls in a table. + + Args: + recalls (ndarray): calculated from `bbox_recalls` + proposal_nums (ndarray or list): top N proposals + iou_thrs (ndarray or list): iou thresholds + row_idxs (ndarray): which rows(proposal nums) to print + col_idxs (ndarray): which cols(iou thresholds) to print + logger (logging.Logger | str | None): The way to print the recall + summary. See `mmcv.utils.print_log()` for details. Default: None. + """ + proposal_nums = np.array(proposal_nums, dtype=np.int32) + iou_thrs = np.array(iou_thrs) + if row_idxs is None: + row_idxs = np.arange(proposal_nums.size) + if col_idxs is None: + col_idxs = np.arange(iou_thrs.size) + row_header = [''] + iou_thrs[col_idxs].tolist() + table_data = [row_header] + for i, num in enumerate(proposal_nums[row_idxs]): + row = [f'{val:.3f}' for val in recalls[row_idxs[i], col_idxs].tolist()] + row.insert(0, num) + table_data.append(row) + table = AsciiTable(table_data) + print_log('\n' + table.table, logger=logger) + + +def plot_num_recall(recalls, proposal_nums): + """Plot Proposal_num-Recalls curve. + + Args: + recalls(ndarray or list): shape (k,) + proposal_nums(ndarray or list): same shape as `recalls` + """ + if isinstance(proposal_nums, np.ndarray): + _proposal_nums = proposal_nums.tolist() + else: + _proposal_nums = proposal_nums + if isinstance(recalls, np.ndarray): + _recalls = recalls.tolist() + else: + _recalls = recalls + + import matplotlib.pyplot as plt + f = plt.figure() + plt.plot([0] + _proposal_nums, [0] + _recalls) + plt.xlabel('Proposal num') + plt.ylabel('Recall') + plt.axis([0, proposal_nums.max(), 0, 1]) + f.show() + + +def plot_iou_recall(recalls, iou_thrs): + """Plot IoU-Recalls curve. + + Args: + recalls(ndarray or list): shape (k,) + iou_thrs(ndarray or list): same shape as `recalls` + """ + if isinstance(iou_thrs, np.ndarray): + _iou_thrs = iou_thrs.tolist() + else: + _iou_thrs = iou_thrs + if isinstance(recalls, np.ndarray): + _recalls = recalls.tolist() + else: + _recalls = recalls + + import matplotlib.pyplot as plt + f = plt.figure() + plt.plot(_iou_thrs + [1.0], _recalls + [0.]) + plt.xlabel('IoU') + plt.ylabel('Recall') + plt.axis([iou_thrs.min(), 1, 0, 1]) + f.show() diff --git a/mmcv/core/evaluation/seg_eval.py b/mmcv/core/evaluation/seg_eval.py new file mode 100644 index 0000000..542fedc --- /dev/null +++ b/mmcv/core/evaluation/seg_eval.py @@ -0,0 +1,131 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +from mmcv.utils import print_log +from terminaltables import AsciiTable + + +def fast_hist(preds, labels, num_classes): + """Compute the confusion matrix for every batch. + + Args: + preds (np.ndarray): Prediction labels of points with shape of + (num_points, ). + labels (np.ndarray): Ground truth labels of points with shape of + (num_points, ). + num_classes (int): number of classes + + Returns: + np.ndarray: Calculated confusion matrix. + """ + + k = (labels >= 0) & (labels < num_classes) + bin_count = np.bincount( + num_classes * labels[k].astype(int) + preds[k], + minlength=num_classes**2) + return bin_count[:num_classes**2].reshape(num_classes, num_classes) + + +def per_class_iou(hist): + """Compute the per class iou. + + Args: + hist(np.ndarray): Overall confusion martix + (num_classes, num_classes ). + + Returns: + np.ndarray: Calculated per class iou + """ + + return np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist)) + + +def get_acc(hist): + """Compute the overall accuracy. + + Args: + hist(np.ndarray): Overall confusion martix + (num_classes, num_classes ). + + Returns: + float: Calculated overall acc + """ + + return np.diag(hist).sum() / hist.sum() + + +def get_acc_cls(hist): + """Compute the class average accuracy. + + Args: + hist(np.ndarray): Overall confusion martix + (num_classes, num_classes ). + + Returns: + float: Calculated class average acc + """ + + return np.nanmean(np.diag(hist) / hist.sum(axis=1)) + + +def seg_eval(gt_labels, seg_preds, label2cat, ignore_index, logger=None): + """Semantic Segmentation Evaluation. + + Evaluate the result of the Semantic Segmentation. + + Args: + gt_labels (list[torch.Tensor]): Ground truth labels. + seg_preds (list[torch.Tensor]): Predictions. + label2cat (dict): Map from label to category name. + ignore_index (int): Index that will be ignored in evaluation. + logger (logging.Logger | str | None): The way to print the mAP + summary. See `mmcv.utils.print_log()` for details. Default: None. + + Returns: + dict[str, float]: Dict of results. + """ + assert len(seg_preds) == len(gt_labels) + num_classes = len(label2cat) + + hist_list = [] + for i in range(len(gt_labels)): + gt_seg = gt_labels[i].clone().numpy().astype(np.int) + pred_seg = seg_preds[i].clone().numpy().astype(np.int) + + # filter out ignored points + pred_seg[gt_seg == ignore_index] = -1 + gt_seg[gt_seg == ignore_index] = -1 + + # calculate one instance result + hist_list.append(fast_hist(pred_seg, gt_seg, num_classes)) + + iou = per_class_iou(sum(hist_list)) + miou = np.nanmean(iou) + acc = get_acc(sum(hist_list)) + acc_cls = get_acc_cls(sum(hist_list)) + + header = ['classes'] + for i in range(len(label2cat)): + header.append(label2cat[i]) + header.extend(['miou', 'acc', 'acc_cls']) + + ret_dict = dict() + table_columns = [['results']] + for i in range(len(label2cat)): + ret_dict[label2cat[i]] = float(iou[i]) + table_columns.append([f'{iou[i]:.4f}']) + ret_dict['miou'] = float(miou) + ret_dict['acc'] = float(acc) + ret_dict['acc_cls'] = float(acc_cls) + + table_columns.append([f'{miou:.4f}']) + table_columns.append([f'{acc:.4f}']) + table_columns.append([f'{acc_cls:.4f}']) + + table_data = [header] + table_rows = list(zip(*table_columns)) + table_data += table_rows + table = AsciiTable(table_data) + table.inner_footing_row_border = True + print_log('\n' + table.table, logger=logger) + + return ret_dict diff --git a/mmcv/core/evaluation/waymo_utils/prediction_kitti_to_waymo.py b/mmcv/core/evaluation/waymo_utils/prediction_kitti_to_waymo.py new file mode 100644 index 0000000..014b480 --- /dev/null +++ b/mmcv/core/evaluation/waymo_utils/prediction_kitti_to_waymo.py @@ -0,0 +1,262 @@ +# Copyright (c) OpenMMLab. All rights reserved. +r"""Adapted from `Waymo to KITTI converter + `_. +""" + +try: + from waymo_open_dataset import dataset_pb2 as open_dataset +except ImportError: + raise ImportError( + 'Please run "pip install waymo-open-dataset-tf-2-1-0==1.2.0" ' + 'to install the official devkit first.') + +from mmcv.utils import mkdir_or_exist, track_parallel_progress +import numpy as np +import tensorflow as tf +from glob import glob +from os.path import join +from waymo_open_dataset import label_pb2 +from waymo_open_dataset.protos import metrics_pb2 + + +class KITTI2Waymo(object): + """KITTI predictions to Waymo converter. + + This class serves as the converter to change predictions from KITTI to + Waymo format. + + Args: + kitti_result_files (list[dict]): Predictions in KITTI format. + waymo_tfrecords_dir (str): Directory to load waymo raw data. + waymo_results_save_dir (str): Directory to save converted predictions + in waymo format (.bin files). + waymo_results_final_path (str): Path to save combined + predictions in waymo format (.bin file), like 'a/b/c.bin'. + prefix (str): Prefix of filename. In general, 0 for training, 1 for + validation and 2 for testing. + workers (str): Number of parallel processes. + """ + + def __init__(self, + kitti_result_files, + waymo_tfrecords_dir, + waymo_results_save_dir, + waymo_results_final_path, + prefix, + workers=64): + + self.kitti_result_files = kitti_result_files + self.waymo_tfrecords_dir = waymo_tfrecords_dir + self.waymo_results_save_dir = waymo_results_save_dir + self.waymo_results_final_path = waymo_results_final_path + self.prefix = prefix + self.workers = int(workers) + self.name2idx = {} + for idx, result in enumerate(kitti_result_files): + if len(result['sample_idx']) > 0: + self.name2idx[str(result['sample_idx'][0])] = idx + + # turn on eager execution for older tensorflow versions + if int(tf.__version__.split('.')[0]) < 2: + tf.enable_eager_execution() + + self.k2w_cls_map = { + 'Car': label_pb2.Label.TYPE_VEHICLE, + 'Pedestrian': label_pb2.Label.TYPE_PEDESTRIAN, + 'Sign': label_pb2.Label.TYPE_SIGN, + 'Cyclist': label_pb2.Label.TYPE_CYCLIST, + } + + self.T_ref_to_front_cam = np.array([[0.0, 0.0, 1.0, 0.0], + [-1.0, 0.0, 0.0, 0.0], + [0.0, -1.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 1.0]]) + + self.get_file_names() + self.create_folder() + + def get_file_names(self): + """Get file names of waymo raw data.""" + self.waymo_tfrecord_pathnames = sorted( + glob(join(self.waymo_tfrecords_dir, '*.tfrecord'))) + print(len(self.waymo_tfrecord_pathnames), 'tfrecords found.') + + def create_folder(self): + """Create folder for data conversion.""" + mkdir_or_exist(self.waymo_results_save_dir) + + def parse_objects(self, kitti_result, T_k2w, context_name, + frame_timestamp_micros): + """Parse one prediction with several instances in kitti format and + convert them to `Object` proto. + + Args: + kitti_result (dict): Predictions in kitti format. + + - name (np.ndarray): Class labels of predictions. + - dimensions (np.ndarray): Height, width, length of boxes. + - location (np.ndarray): Bottom center of boxes (x, y, z). + - rotation_y (np.ndarray): Orientation of boxes. + - score (np.ndarray): Scores of predictions. + T_k2w (np.ndarray): Transformation matrix from kitti to waymo. + context_name (str): Context name of the frame. + frame_timestamp_micros (int): Frame timestamp. + + Returns: + :obj:`Object`: Predictions in waymo dataset Object proto. + """ + + def parse_one_object(instance_idx): + """Parse one instance in kitti format and convert them to `Object` + proto. + + Args: + instance_idx (int): Index of the instance to be converted. + + Returns: + :obj:`Object`: Predicted instance in waymo dataset \ + Object proto. + """ + cls = kitti_result['name'][instance_idx] + length = round(kitti_result['dimensions'][instance_idx, 0], 4) + height = round(kitti_result['dimensions'][instance_idx, 1], 4) + width = round(kitti_result['dimensions'][instance_idx, 2], 4) + x = round(kitti_result['location'][instance_idx, 0], 4) + y = round(kitti_result['location'][instance_idx, 1], 4) + z = round(kitti_result['location'][instance_idx, 2], 4) + rotation_y = round(kitti_result['rotation_y'][instance_idx], 4) + score = round(kitti_result['score'][instance_idx], 4) + + # y: downwards; move box origin from bottom center (kitti) to + # true center (waymo) + y -= height / 2 + # frame transformation: kitti -> waymo + x, y, z = self.transform(T_k2w, x, y, z) + + # different conventions + heading = -(rotation_y + np.pi / 2) + while heading < -np.pi: + heading += 2 * np.pi + while heading > np.pi: + heading -= 2 * np.pi + + box = label_pb2.Label.Box() + box.center_x = x + box.center_y = y + box.center_z = z + box.length = length + box.width = width + box.height = height + box.heading = heading + + o = metrics_pb2.Object() + o.object.box.CopyFrom(box) + o.object.type = self.k2w_cls_map[cls] + o.score = score + + o.context_name = context_name + o.frame_timestamp_micros = frame_timestamp_micros + + return o + + objects = metrics_pb2.Objects() + + for instance_idx in range(len(kitti_result['name'])): + o = parse_one_object(instance_idx) + objects.objects.append(o) + + return objects + + def convert_one(self, file_idx): + """Convert action for single file. + + Args: + file_idx (int): Index of the file to be converted. + """ + file_pathname = self.waymo_tfrecord_pathnames[file_idx] + file_data = tf.data.TFRecordDataset(file_pathname, compression_type='') + + for frame_num, frame_data in enumerate(file_data): + frame = open_dataset.Frame() + frame.ParseFromString(bytearray(frame_data.numpy())) + + filename = f'{self.prefix}{file_idx:03d}{frame_num:03d}' + + for camera in frame.context.camera_calibrations: + # FRONT = 1, see dataset.proto for details + if camera.name == 1: + T_front_cam_to_vehicle = np.array( + camera.extrinsic.transform).reshape(4, 4) + + T_k2w = T_front_cam_to_vehicle @ self.T_ref_to_front_cam + + context_name = frame.context.name + frame_timestamp_micros = frame.timestamp_micros + + if filename in self.name2idx: + kitti_result = \ + self.kitti_result_files[self.name2idx[filename]] + objects = self.parse_objects(kitti_result, T_k2w, context_name, + frame_timestamp_micros) + else: + print(filename, 'not found.') + objects = metrics_pb2.Objects() + + with open( + join(self.waymo_results_save_dir, f'{filename}.bin'), + 'wb') as f: + f.write(objects.SerializeToString()) + + def convert(self): + """Convert action.""" + print('Start converting ...') + track_parallel_progress(self.convert_one, range(len(self)), + self.workers) + print('\nFinished ...') + + # combine all files into one .bin + pathnames = sorted(glob(join(self.waymo_results_save_dir, '*.bin'))) + combined = self.combine(pathnames) + + with open(self.waymo_results_final_path, 'wb') as f: + f.write(combined.SerializeToString()) + + def __len__(self): + """Length of the filename list.""" + return len(self.waymo_tfrecord_pathnames) + + def transform(self, T, x, y, z): + """Transform the coordinates with matrix T. + + Args: + T (np.ndarray): Transformation matrix. + x(float): Coordinate in x axis. + y(float): Coordinate in y axis. + z(float): Coordinate in z axis. + + Returns: + list: Coordinates after transformation. + """ + pt_bef = np.array([x, y, z, 1.0]).reshape(4, 1) + pt_aft = np.matmul(T, pt_bef) + return pt_aft[:3].flatten().tolist() + + def combine(self, pathnames): + """Combine predictions in waymo format for each sample together. + + Args: + pathnames (str): Paths to save predictions. + + Returns: + :obj:`Objects`: Combined predictions in Objects proto. + """ + combined = metrics_pb2.Objects() + + for pathname in pathnames: + objects = metrics_pb2.Objects() + with open(pathname, 'rb') as f: + objects.ParseFromString(f.read()) + for o in objects.objects: + combined.objects.append(o) + + return combined diff --git a/mmcv/core/mask/__init__.py b/mmcv/core/mask/__init__.py new file mode 100644 index 0000000..02cbbc7 --- /dev/null +++ b/mmcv/core/mask/__init__.py @@ -0,0 +1,6 @@ +from .mask_target import mask_target +from .utils import encode_mask_results, split_combined_polys + +__all__ = [ + 'split_combined_polys', 'mask_target', 'encode_mask_results' +] diff --git a/mmcv/core/mask/mask_target.py b/mmcv/core/mask/mask_target.py new file mode 100644 index 0000000..e8f5461 --- /dev/null +++ b/mmcv/core/mask/mask_target.py @@ -0,0 +1,126 @@ +import numpy as np +import torch +from torch.nn.modules.utils import _pair + + +def mask_target(pos_proposals_list, pos_assigned_gt_inds_list, gt_masks_list, + cfg): + """Compute mask target for positive proposals in multiple images. + + Args: + pos_proposals_list (list[Tensor]): Positive proposals in multiple + images. + pos_assigned_gt_inds_list (list[Tensor]): Assigned GT indices for each + positive proposals. + gt_masks_list (list[:obj:`BaseInstanceMasks`]): Ground truth masks of + each image. + cfg (dict): Config dict that specifies the mask size. + + Returns: + list[Tensor]: Mask target of each image. + + Example: + >>> import mmcv + >>> import mmdet + >>> from mmcv.core.mask import BitmapMasks + >>> from mmcv.core.mask.mask_target import * + >>> H, W = 17, 18 + >>> cfg = mmcv.Config({'mask_size': (13, 14)}) + >>> rng = np.random.RandomState(0) + >>> # Positive proposals (tl_x, tl_y, br_x, br_y) for each image + >>> pos_proposals_list = [ + >>> torch.Tensor([ + >>> [ 7.2425, 5.5929, 13.9414, 14.9541], + >>> [ 7.3241, 3.6170, 16.3850, 15.3102], + >>> ]), + >>> torch.Tensor([ + >>> [ 4.8448, 6.4010, 7.0314, 9.7681], + >>> [ 5.9790, 2.6989, 7.4416, 4.8580], + >>> [ 0.0000, 0.0000, 0.1398, 9.8232], + >>> ]), + >>> ] + >>> # Corresponding class index for each proposal for each image + >>> pos_assigned_gt_inds_list = [ + >>> torch.LongTensor([7, 0]), + >>> torch.LongTensor([5, 4, 1]), + >>> ] + >>> # Ground truth mask for each true object for each image + >>> gt_masks_list = [ + >>> BitmapMasks(rng.rand(8, H, W), height=H, width=W), + >>> BitmapMasks(rng.rand(6, H, W), height=H, width=W), + >>> ] + >>> mask_targets = mask_target( + >>> pos_proposals_list, pos_assigned_gt_inds_list, + >>> gt_masks_list, cfg) + >>> assert mask_targets.shape == (5,) + cfg['mask_size'] + """ + cfg_list = [cfg for _ in range(len(pos_proposals_list))] + mask_targets = map(mask_target_single, pos_proposals_list, + pos_assigned_gt_inds_list, gt_masks_list, cfg_list) + mask_targets = list(mask_targets) + if len(mask_targets) > 0: + mask_targets = torch.cat(mask_targets) + return mask_targets + + +def mask_target_single(pos_proposals, pos_assigned_gt_inds, gt_masks, cfg): + """Compute mask target for each positive proposal in the image. + + Args: + pos_proposals (Tensor): Positive proposals. + pos_assigned_gt_inds (Tensor): Assigned GT inds of positive proposals. + gt_masks (:obj:`BaseInstanceMasks`): GT masks in the format of Bitmap + or Polygon. + cfg (dict): Config dict that indicate the mask size. + + Returns: + Tensor: Mask target of each positive proposals in the image. + + Example: + >>> import mmcv + >>> import mmdet + >>> from mmcv.core.mask import BitmapMasks + >>> from mmcv.core.mask.mask_target import * # NOQA + >>> H, W = 32, 32 + >>> cfg = mmcv.Config({'mask_size': (7, 11)}) + >>> rng = np.random.RandomState(0) + >>> # Masks for each ground truth box (relative to the image) + >>> gt_masks_data = rng.rand(3, H, W) + >>> gt_masks = BitmapMasks(gt_masks_data, height=H, width=W) + >>> # Predicted positive boxes in one image + >>> pos_proposals = torch.FloatTensor([ + >>> [ 16.2, 5.5, 19.9, 20.9], + >>> [ 17.3, 13.6, 19.3, 19.3], + >>> [ 14.8, 16.4, 17.0, 23.7], + >>> [ 0.0, 0.0, 16.0, 16.0], + >>> [ 4.0, 0.0, 20.0, 16.0], + >>> ]) + >>> # For each predicted proposal, its assignment to a gt mask + >>> pos_assigned_gt_inds = torch.LongTensor([0, 1, 2, 1, 1]) + >>> mask_targets = mask_target_single( + >>> pos_proposals, pos_assigned_gt_inds, gt_masks, cfg) + >>> assert mask_targets.shape == (5,) + cfg['mask_size'] + """ + device = pos_proposals.device + mask_size = _pair(cfg.mask_size) + binarize = not cfg.get('soft_mask_target', False) + num_pos = pos_proposals.size(0) + if num_pos > 0: + proposals_np = pos_proposals.cpu().numpy() + maxh, maxw = gt_masks.height, gt_masks.width + proposals_np[:, [0, 2]] = np.clip(proposals_np[:, [0, 2]], 0, maxw) + proposals_np[:, [1, 3]] = np.clip(proposals_np[:, [1, 3]], 0, maxh) + pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy() + + mask_targets = gt_masks.crop_and_resize( + proposals_np, + mask_size, + device=device, + inds=pos_assigned_gt_inds, + binarize=binarize).to_ndarray() + + mask_targets = torch.from_numpy(mask_targets).float().to(device) + else: + mask_targets = pos_proposals.new_zeros((0, ) + mask_size) + + return mask_targets diff --git a/mmcv/core/mask/structures.py b/mmcv/core/mask/structures.py new file mode 100644 index 0000000..10d9155 --- /dev/null +++ b/mmcv/core/mask/structures.py @@ -0,0 +1,1037 @@ +from abc import ABCMeta, abstractmethod + +import cv2 +import numpy as np +import pycocotools.mask as maskUtils +import torch +from mmcv.ops.roi_align import roi_align +from mmcv.image import rescale_size, imrescale, imresize, imflip, impad, imtranslate, imshear, imrotate + + +class BaseInstanceMasks(metaclass=ABCMeta): + """Base class for instance masks.""" + + @abstractmethod + def rescale(self, scale, interpolation='nearest'): + """Rescale masks as large as possible while keeping the aspect ratio. + For details can refer to `mmcv.imrescale`. + + Args: + scale (tuple[int]): The maximum size (h, w) of rescaled mask. + interpolation (str): Same as :func:`mmcv.imrescale`. + + Returns: + BaseInstanceMasks: The rescaled masks. + """ + + @abstractmethod + def resize(self, out_shape, interpolation='nearest'): + """Resize masks to the given out_shape. + + Args: + out_shape: Target (h, w) of resized mask. + interpolation (str): See :func:`mmcv.imresize`. + + Returns: + BaseInstanceMasks: The resized masks. + """ + + @abstractmethod + def flip(self, flip_direction='horizontal'): + """Flip masks alone the given direction. + + Args: + flip_direction (str): Either 'horizontal' or 'vertical'. + + Returns: + BaseInstanceMasks: The flipped masks. + """ + + @abstractmethod + def pad(self, out_shape, pad_val): + """Pad masks to the given size of (h, w). + + Args: + out_shape (tuple[int]): Target (h, w) of padded mask. + pad_val (int): The padded value. + + Returns: + BaseInstanceMasks: The padded masks. + """ + + @abstractmethod + def crop(self, bbox): + """Crop each mask by the given bbox. + + Args: + bbox (ndarray): Bbox in format [x1, y1, x2, y2], shape (4, ). + + Return: + BaseInstanceMasks: The cropped masks. + """ + + @abstractmethod + def crop_and_resize(self, + bboxes, + out_shape, + inds, + device, + interpolation='bilinear', + binarize=True): + """Crop and resize masks by the given bboxes. + + This function is mainly used in mask targets computation. + It firstly align mask to bboxes by assigned_inds, then crop mask by the + assigned bbox and resize to the size of (mask_h, mask_w) + + Args: + bboxes (Tensor): Bboxes in format [x1, y1, x2, y2], shape (N, 4) + out_shape (tuple[int]): Target (h, w) of resized mask + inds (ndarray): Indexes to assign masks to each bbox, + shape (N,) and values should be between [0, num_masks - 1]. + device (str): Device of bboxes + interpolation (str): See `mmcv.imresize` + binarize (bool): if True fractional values are rounded to 0 or 1 + after the resize operation. if False and unsupported an error + will be raised. Defaults to True. + + Return: + BaseInstanceMasks: the cropped and resized masks. + """ + + @abstractmethod + def expand(self, expanded_h, expanded_w, top, left): + """see :class:`Expand`.""" + + @property + @abstractmethod + def areas(self): + """ndarray: areas of each instance.""" + + @abstractmethod + def to_ndarray(self): + """Convert masks to the format of ndarray. + + Return: + ndarray: Converted masks in the format of ndarray. + """ + + @abstractmethod + def to_tensor(self, dtype, device): + """Convert masks to the format of Tensor. + + Args: + dtype (str): Dtype of converted mask. + device (torch.device): Device of converted masks. + + Returns: + Tensor: Converted masks in the format of Tensor. + """ + + @abstractmethod + def translate(self, + out_shape, + offset, + direction='horizontal', + fill_val=0, + interpolation='bilinear'): + """Translate the masks. + + Args: + out_shape (tuple[int]): Shape for output mask, format (h, w). + offset (int | float): The offset for translate. + direction (str): The translate direction, either "horizontal" + or "vertical". + fill_val (int | float): Border value. Default 0. + interpolation (str): Same as :func:`mmcv.imtranslate`. + + Returns: + Translated masks. + """ + + def shear(self, + out_shape, + magnitude, + direction='horizontal', + border_value=0, + interpolation='bilinear'): + """Shear the masks. + + Args: + out_shape (tuple[int]): Shape for output mask, format (h, w). + magnitude (int | float): The magnitude used for shear. + direction (str): The shear direction, either "horizontal" + or "vertical". + border_value (int | tuple[int]): Value used in case of a + constant border. Default 0. + interpolation (str): Same as in :func:`mmcv.imshear`. + + Returns: + ndarray: Sheared masks. + """ + + @abstractmethod + def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0): + """Rotate the masks. + + Args: + out_shape (tuple[int]): Shape for output mask, format (h, w). + angle (int | float): Rotation angle in degrees. Positive values + mean counter-clockwise rotation. + center (tuple[float], optional): Center point (w, h) of the + rotation in source image. If not specified, the center of + the image will be used. + scale (int | float): Isotropic scale factor. + fill_val (int | float): Border value. Default 0 for masks. + + Returns: + Rotated masks. + """ + + +class BitmapMasks(BaseInstanceMasks): + """This class represents masks in the form of bitmaps. + + Args: + masks (ndarray): ndarray of masks in shape (N, H, W), where N is + the number of objects. + height (int): height of masks + width (int): width of masks + + Example: + >>> from mmcv.core.mask.structures import * # NOQA + >>> num_masks, H, W = 3, 32, 32 + >>> rng = np.random.RandomState(0) + >>> masks = (rng.rand(num_masks, H, W) > 0.1).astype(np.int) + >>> self = BitmapMasks(masks, height=H, width=W) + + >>> # demo crop_and_resize + >>> num_boxes = 5 + >>> bboxes = np.array([[0, 0, 30, 10.0]] * num_boxes) + >>> out_shape = (14, 14) + >>> inds = torch.randint(0, len(self), size=(num_boxes,)) + >>> device = 'cpu' + >>> interpolation = 'bilinear' + >>> new = self.crop_and_resize( + ... bboxes, out_shape, inds, device, interpolation) + >>> assert len(new) == num_boxes + >>> assert new.height, new.width == out_shape + """ + + def __init__(self, masks, height, width): + self.height = height + self.width = width + if len(masks) == 0: + self.masks = np.empty((0, self.height, self.width), dtype=np.uint8) + else: + assert isinstance(masks, (list, np.ndarray)) + if isinstance(masks, list): + assert isinstance(masks[0], np.ndarray) + assert masks[0].ndim == 2 # (H, W) + else: + assert masks.ndim == 3 # (N, H, W) + + self.masks = np.stack(masks).reshape(-1, height, width) + assert self.masks.shape[1] == self.height + assert self.masks.shape[2] == self.width + + def __getitem__(self, index): + """Index the BitmapMask. + + Args: + index (int | ndarray): Indices in the format of integer or ndarray. + + Returns: + :obj:`BitmapMasks`: Indexed bitmap masks. + """ + masks = self.masks[index].reshape(-1, self.height, self.width) + return BitmapMasks(masks, self.height, self.width) + + def __iter__(self): + return iter(self.masks) + + def __repr__(self): + s = self.__class__.__name__ + '(' + s += f'num_masks={len(self.masks)}, ' + s += f'height={self.height}, ' + s += f'width={self.width})' + return s + + def __len__(self): + """Number of masks.""" + return len(self.masks) + + def rescale(self, scale, interpolation='nearest'): + """See :func:`BaseInstanceMasks.rescale`.""" + if len(self.masks) == 0: + new_w, new_h = rescale_size((self.width, self.height), scale) + rescaled_masks = np.empty((0, new_h, new_w), dtype=np.uint8) + else: + rescaled_masks = np.stack([ + imrescale(mask, scale, interpolation=interpolation) + for mask in self.masks + ]) + height, width = rescaled_masks.shape[1:] + return BitmapMasks(rescaled_masks, height, width) + + def resize(self, out_shape, interpolation='nearest'): + """See :func:`BaseInstanceMasks.resize`.""" + if len(self.masks) == 0: + resized_masks = np.empty((0, *out_shape), dtype=np.uint8) + else: + resized_masks = np.stack([ + imresize( + mask, out_shape[::-1], interpolation=interpolation) + for mask in self.masks + ]) + return BitmapMasks(resized_masks, *out_shape) + + def flip(self, flip_direction='horizontal'): + """See :func:`BaseInstanceMasks.flip`.""" + assert flip_direction in ('horizontal', 'vertical', 'diagonal') + + if len(self.masks) == 0: + flipped_masks = self.masks + else: + flipped_masks = np.stack([ + imflip(mask, direction=flip_direction) + for mask in self.masks + ]) + return BitmapMasks(flipped_masks, self.height, self.width) + + def pad(self, out_shape, pad_val=0): + """See :func:`BaseInstanceMasks.pad`.""" + if len(self.masks) == 0: + padded_masks = np.empty((0, *out_shape), dtype=np.uint8) + else: + padded_masks = np.stack([ + impad(mask, shape=out_shape, pad_val=pad_val) + for mask in self.masks + ]) + return BitmapMasks(padded_masks, *out_shape) + + def crop(self, bbox): + """See :func:`BaseInstanceMasks.crop`.""" + assert isinstance(bbox, np.ndarray) + assert bbox.ndim == 1 + + # clip the boundary + bbox = bbox.copy() + bbox[0::2] = np.clip(bbox[0::2], 0, self.width) + bbox[1::2] = np.clip(bbox[1::2], 0, self.height) + x1, y1, x2, y2 = bbox + w = np.maximum(x2 - x1, 1) + h = np.maximum(y2 - y1, 1) + + if len(self.masks) == 0: + cropped_masks = np.empty((0, h, w), dtype=np.uint8) + else: + cropped_masks = self.masks[:, y1:y1 + h, x1:x1 + w] + return BitmapMasks(cropped_masks, h, w) + + def crop_and_resize(self, + bboxes, + out_shape, + inds, + device='cpu', + interpolation='bilinear', + binarize=True): + """See :func:`BaseInstanceMasks.crop_and_resize`.""" + if len(self.masks) == 0: + empty_masks = np.empty((0, *out_shape), dtype=np.uint8) + return BitmapMasks(empty_masks, *out_shape) + + # convert bboxes to tensor + if isinstance(bboxes, np.ndarray): + bboxes = torch.from_numpy(bboxes).to(device=device) + if isinstance(inds, np.ndarray): + inds = torch.from_numpy(inds).to(device=device) + + num_bbox = bboxes.shape[0] + fake_inds = torch.arange( + num_bbox, device=device).to(dtype=bboxes.dtype)[:, None] + rois = torch.cat([fake_inds, bboxes], dim=1) # Nx5 + rois = rois.to(device=device) + if num_bbox > 0: + gt_masks_th = torch.from_numpy(self.masks).to(device).index_select( + 0, inds).to(dtype=rois.dtype) + targets = roi_align(gt_masks_th[:, None, :, :], rois, out_shape, + 1.0, 0, 'avg', True).squeeze(1) + if binarize: + resized_masks = (targets >= 0.5).cpu().numpy() + else: + resized_masks = targets.cpu().numpy() + else: + resized_masks = [] + return BitmapMasks(resized_masks, *out_shape) + + def expand(self, expanded_h, expanded_w, top, left): + """See :func:`BaseInstanceMasks.expand`.""" + if len(self.masks) == 0: + expanded_mask = np.empty((0, expanded_h, expanded_w), + dtype=np.uint8) + else: + expanded_mask = np.zeros((len(self), expanded_h, expanded_w), + dtype=np.uint8) + expanded_mask[:, top:top + self.height, + left:left + self.width] = self.masks + return BitmapMasks(expanded_mask, expanded_h, expanded_w) + + def translate(self, + out_shape, + offset, + direction='horizontal', + fill_val=0, + interpolation='bilinear'): + """Translate the BitmapMasks. + + Args: + out_shape (tuple[int]): Shape for output mask, format (h, w). + offset (int | float): The offset for translate. + direction (str): The translate direction, either "horizontal" + or "vertical". + fill_val (int | float): Border value. Default 0 for masks. + interpolation (str): Same as :func:`mmcv.imtranslate`. + + Returns: + BitmapMasks: Translated BitmapMasks. + + Example: + >>> from mmcv.core.mask.structures import BitmapMasks + >>> self = BitmapMasks.random(dtype=np.uint8) + >>> out_shape = (32, 32) + >>> offset = 4 + >>> direction = 'horizontal' + >>> fill_val = 0 + >>> interpolation = 'bilinear' + >>> # Note, There seem to be issues when: + >>> # * out_shape is different than self's shape + >>> # * the mask dtype is not supported by cv2.AffineWarp + >>> new = self.translate(out_shape, offset, direction, fill_val, + >>> interpolation) + >>> assert len(new) == len(self) + >>> assert new.height, new.width == out_shape + """ + if len(self.masks) == 0: + translated_masks = np.empty((0, *out_shape), dtype=np.uint8) + else: + translated_masks = imtranslate( + self.masks.transpose((1, 2, 0)), + offset, + direction, + border_value=fill_val, + interpolation=interpolation) + if translated_masks.ndim == 2: + translated_masks = translated_masks[:, :, None] + translated_masks = translated_masks.transpose( + (2, 0, 1)).astype(self.masks.dtype) + return BitmapMasks(translated_masks, *out_shape) + + def shear(self, + out_shape, + magnitude, + direction='horizontal', + border_value=0, + interpolation='bilinear'): + """Shear the BitmapMasks. + + Args: + out_shape (tuple[int]): Shape for output mask, format (h, w). + magnitude (int | float): The magnitude used for shear. + direction (str): The shear direction, either "horizontal" + or "vertical". + border_value (int | tuple[int]): Value used in case of a + constant border. + interpolation (str): Same as in :func:`mmcv.imshear`. + + Returns: + BitmapMasks: The sheared masks. + """ + if len(self.masks) == 0: + sheared_masks = np.empty((0, *out_shape), dtype=np.uint8) + else: + sheared_masks = imshear( + self.masks.transpose((1, 2, 0)), + magnitude, + direction, + border_value=border_value, + interpolation=interpolation) + if sheared_masks.ndim == 2: + sheared_masks = sheared_masks[:, :, None] + sheared_masks = sheared_masks.transpose( + (2, 0, 1)).astype(self.masks.dtype) + return BitmapMasks(sheared_masks, *out_shape) + + def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0): + """Rotate the BitmapMasks. + + Args: + out_shape (tuple[int]): Shape for output mask, format (h, w). + angle (int | float): Rotation angle in degrees. Positive values + mean counter-clockwise rotation. + center (tuple[float], optional): Center point (w, h) of the + rotation in source image. If not specified, the center of + the image will be used. + scale (int | float): Isotropic scale factor. + fill_val (int | float): Border value. Default 0 for masks. + + Returns: + BitmapMasks: Rotated BitmapMasks. + """ + if len(self.masks) == 0: + rotated_masks = np.empty((0, *out_shape), dtype=self.masks.dtype) + else: + rotated_masks = imrotate( + self.masks.transpose((1, 2, 0)), + angle, + center=center, + scale=scale, + border_value=fill_val) + if rotated_masks.ndim == 2: + # case when only one mask, (h, w) + rotated_masks = rotated_masks[:, :, None] # (h, w, 1) + rotated_masks = rotated_masks.transpose( + (2, 0, 1)).astype(self.masks.dtype) + return BitmapMasks(rotated_masks, *out_shape) + + @property + def areas(self): + """See :py:attr:`BaseInstanceMasks.areas`.""" + return self.masks.sum((1, 2)) + + def to_ndarray(self): + """See :func:`BaseInstanceMasks.to_ndarray`.""" + return self.masks + + def to_tensor(self, dtype, device): + """See :func:`BaseInstanceMasks.to_tensor`.""" + return torch.tensor(self.masks, dtype=dtype, device=device) + + @classmethod + def random(cls, + num_masks=3, + height=32, + width=32, + dtype=np.uint8, + rng=None): + """Generate random bitmap masks for demo / testing purposes. + + Example: + >>> from mmcv.core.mask.structures import BitmapMasks + >>> self = BitmapMasks.random() + >>> print('self = {}'.format(self)) + self = BitmapMasks(num_masks=3, height=32, width=32) + """ + from mmcv.utils.util_random import ensure_rng + rng = ensure_rng(rng) + masks = (rng.rand(num_masks, height, width) > 0.1).astype(dtype) + self = cls(masks, height=height, width=width) + return self + + +class PolygonMasks(BaseInstanceMasks): + """This class represents masks in the form of polygons. + + Polygons is a list of three levels. The first level of the list + corresponds to objects, the second level to the polys that compose the + object, the third level to the poly coordinates + + Args: + masks (list[list[ndarray]]): The first level of the list + corresponds to objects, the second level to the polys that + compose the object, the third level to the poly coordinates + height (int): height of masks + width (int): width of masks + + Example: + >>> from mmcv.core.mask.structures import * # NOQA + >>> masks = [ + >>> [ np.array([0, 0, 10, 0, 10, 10., 0, 10, 0, 0]) ] + >>> ] + >>> height, width = 16, 16 + >>> self = PolygonMasks(masks, height, width) + + >>> # demo translate + >>> new = self.translate((16, 16), 4., direction='horizontal') + >>> assert np.all(new.masks[0][0][1::2] == masks[0][0][1::2]) + >>> assert np.all(new.masks[0][0][0::2] == masks[0][0][0::2] + 4) + + >>> # demo crop_and_resize + >>> num_boxes = 3 + >>> bboxes = np.array([[0, 0, 30, 10.0]] * num_boxes) + >>> out_shape = (16, 16) + >>> inds = torch.randint(0, len(self), size=(num_boxes,)) + >>> device = 'cpu' + >>> interpolation = 'bilinear' + >>> new = self.crop_and_resize( + ... bboxes, out_shape, inds, device, interpolation) + >>> assert len(new) == num_boxes + >>> assert new.height, new.width == out_shape + """ + + def __init__(self, masks, height, width): + assert isinstance(masks, list) + if len(masks) > 0: + assert isinstance(masks[0], list) + assert isinstance(masks[0][0], np.ndarray) + + self.height = height + self.width = width + self.masks = masks + + def __getitem__(self, index): + """Index the polygon masks. + + Args: + index (ndarray | List): The indices. + + Returns: + :obj:`PolygonMasks`: The indexed polygon masks. + """ + if isinstance(index, np.ndarray): + index = index.tolist() + if isinstance(index, list): + masks = [self.masks[i] for i in index] + else: + try: + masks = self.masks[index] + except Exception: + raise ValueError( + f'Unsupported input of type {type(index)} for indexing!') + if len(masks) and isinstance(masks[0], np.ndarray): + masks = [masks] # ensure a list of three levels + return PolygonMasks(masks, self.height, self.width) + + def __iter__(self): + return iter(self.masks) + + def __repr__(self): + s = self.__class__.__name__ + '(' + s += f'num_masks={len(self.masks)}, ' + s += f'height={self.height}, ' + s += f'width={self.width})' + return s + + def __len__(self): + """Number of masks.""" + return len(self.masks) + + def rescale(self, scale, interpolation=None): + """see :func:`BaseInstanceMasks.rescale`""" + new_w, new_h = rescale_size((self.width, self.height), scale) + if len(self.masks) == 0: + rescaled_masks = PolygonMasks([], new_h, new_w) + else: + rescaled_masks = self.resize((new_h, new_w)) + return rescaled_masks + + def resize(self, out_shape, interpolation=None): + """see :func:`BaseInstanceMasks.resize`""" + if len(self.masks) == 0: + resized_masks = PolygonMasks([], *out_shape) + else: + h_scale = out_shape[0] / self.height + w_scale = out_shape[1] / self.width + resized_masks = [] + for poly_per_obj in self.masks: + resized_poly = [] + for p in poly_per_obj: + p = p.copy() + p[0::2] *= w_scale + p[1::2] *= h_scale + resized_poly.append(p) + resized_masks.append(resized_poly) + resized_masks = PolygonMasks(resized_masks, *out_shape) + return resized_masks + + def flip(self, flip_direction='horizontal'): + """see :func:`BaseInstanceMasks.flip`""" + assert flip_direction in ('horizontal', 'vertical', 'diagonal') + if len(self.masks) == 0: + flipped_masks = PolygonMasks([], self.height, self.width) + else: + flipped_masks = [] + for poly_per_obj in self.masks: + flipped_poly_per_obj = [] + for p in poly_per_obj: + p = p.copy() + if flip_direction == 'horizontal': + p[0::2] = self.width - p[0::2] + elif flip_direction == 'vertical': + p[1::2] = self.height - p[1::2] + else: + p[0::2] = self.width - p[0::2] + p[1::2] = self.height - p[1::2] + flipped_poly_per_obj.append(p) + flipped_masks.append(flipped_poly_per_obj) + flipped_masks = PolygonMasks(flipped_masks, self.height, + self.width) + return flipped_masks + + def crop(self, bbox): + """see :func:`BaseInstanceMasks.crop`""" + assert isinstance(bbox, np.ndarray) + assert bbox.ndim == 1 + + # clip the boundary + bbox = bbox.copy() + bbox[0::2] = np.clip(bbox[0::2], 0, self.width) + bbox[1::2] = np.clip(bbox[1::2], 0, self.height) + x1, y1, x2, y2 = bbox + w = np.maximum(x2 - x1, 1) + h = np.maximum(y2 - y1, 1) + + if len(self.masks) == 0: + cropped_masks = PolygonMasks([], h, w) + else: + cropped_masks = [] + for poly_per_obj in self.masks: + cropped_poly_per_obj = [] + for p in poly_per_obj: + # pycocotools will clip the boundary + p = p.copy() + p[0::2] -= bbox[0] + p[1::2] -= bbox[1] + cropped_poly_per_obj.append(p) + cropped_masks.append(cropped_poly_per_obj) + cropped_masks = PolygonMasks(cropped_masks, h, w) + return cropped_masks + + def pad(self, out_shape, pad_val=0): + """padding has no effect on polygons`""" + return PolygonMasks(self.masks, *out_shape) + + def expand(self, *args, **kwargs): + """TODO: Add expand for polygon""" + raise NotImplementedError + + def crop_and_resize(self, + bboxes, + out_shape, + inds, + device='cpu', + interpolation='bilinear', + binarize=True): + """see :func:`BaseInstanceMasks.crop_and_resize`""" + out_h, out_w = out_shape + if len(self.masks) == 0: + return PolygonMasks([], out_h, out_w) + + if not binarize: + raise ValueError('Polygons are always binary, ' + 'setting binarize=False is unsupported') + + resized_masks = [] + for i in range(len(bboxes)): + mask = self.masks[inds[i]] + bbox = bboxes[i, :] + x1, y1, x2, y2 = bbox + w = np.maximum(x2 - x1, 1) + h = np.maximum(y2 - y1, 1) + h_scale = out_h / max(h, 0.1) # avoid too large scale + w_scale = out_w / max(w, 0.1) + + resized_mask = [] + for p in mask: + p = p.copy() + # crop + # pycocotools will clip the boundary + p[0::2] -= bbox[0] + p[1::2] -= bbox[1] + + # resize + p[0::2] *= w_scale + p[1::2] *= h_scale + resized_mask.append(p) + resized_masks.append(resized_mask) + return PolygonMasks(resized_masks, *out_shape) + + def translate(self, + out_shape, + offset, + direction='horizontal', + fill_val=None, + interpolation=None): + """Translate the PolygonMasks. + + Example: + >>> self = PolygonMasks.random(dtype=np.int) + >>> out_shape = (self.height, self.width) + >>> new = self.translate(out_shape, 4., direction='horizontal') + >>> assert np.all(new.masks[0][0][1::2] == self.masks[0][0][1::2]) + >>> assert np.all(new.masks[0][0][0::2] == self.masks[0][0][0::2] + 4) # noqa: E501 + """ + assert fill_val is None or fill_val == 0, 'Here fill_val is not '\ + f'used, and defaultly should be None or 0. got {fill_val}.' + if len(self.masks) == 0: + translated_masks = PolygonMasks([], *out_shape) + else: + translated_masks = [] + for poly_per_obj in self.masks: + translated_poly_per_obj = [] + for p in poly_per_obj: + p = p.copy() + if direction == 'horizontal': + p[0::2] = np.clip(p[0::2] + offset, 0, out_shape[1]) + elif direction == 'vertical': + p[1::2] = np.clip(p[1::2] + offset, 0, out_shape[0]) + translated_poly_per_obj.append(p) + translated_masks.append(translated_poly_per_obj) + translated_masks = PolygonMasks(translated_masks, *out_shape) + return translated_masks + + def shear(self, + out_shape, + magnitude, + direction='horizontal', + border_value=0, + interpolation='bilinear'): + """See :func:`BaseInstanceMasks.shear`.""" + if len(self.masks) == 0: + sheared_masks = PolygonMasks([], *out_shape) + else: + sheared_masks = [] + if direction == 'horizontal': + shear_matrix = np.stack([[1, magnitude], + [0, 1]]).astype(np.float32) + elif direction == 'vertical': + shear_matrix = np.stack([[1, 0], [magnitude, + 1]]).astype(np.float32) + for poly_per_obj in self.masks: + sheared_poly = [] + for p in poly_per_obj: + p = np.stack([p[0::2], p[1::2]], axis=0) # [2, n] + new_coords = np.matmul(shear_matrix, p) # [2, n] + new_coords[0, :] = np.clip(new_coords[0, :], 0, + out_shape[1]) + new_coords[1, :] = np.clip(new_coords[1, :], 0, + out_shape[0]) + sheared_poly.append( + new_coords.transpose((1, 0)).reshape(-1)) + sheared_masks.append(sheared_poly) + sheared_masks = PolygonMasks(sheared_masks, *out_shape) + return sheared_masks + + def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0): + """See :func:`BaseInstanceMasks.rotate`.""" + if len(self.masks) == 0: + rotated_masks = PolygonMasks([], *out_shape) + else: + rotated_masks = [] + rotate_matrix = cv2.getRotationMatrix2D(center, -angle, scale) + for poly_per_obj in self.masks: + rotated_poly = [] + for p in poly_per_obj: + p = p.copy() + coords = np.stack([p[0::2], p[1::2]], axis=1) # [n, 2] + # pad 1 to convert from format [x, y] to homogeneous + # coordinates format [x, y, 1] + coords = np.concatenate( + (coords, np.ones((coords.shape[0], 1), coords.dtype)), + axis=1) # [n, 3] + rotated_coords = np.matmul( + rotate_matrix[None, :, :], + coords[:, :, None])[..., 0] # [n, 2, 1] -> [n, 2] + rotated_coords[:, 0] = np.clip(rotated_coords[:, 0], 0, + out_shape[1]) + rotated_coords[:, 1] = np.clip(rotated_coords[:, 1], 0, + out_shape[0]) + rotated_poly.append(rotated_coords.reshape(-1)) + rotated_masks.append(rotated_poly) + rotated_masks = PolygonMasks(rotated_masks, *out_shape) + return rotated_masks + + def to_bitmap(self): + """convert polygon masks to bitmap masks.""" + bitmap_masks = self.to_ndarray() + return BitmapMasks(bitmap_masks, self.height, self.width) + + @property + def areas(self): + """Compute areas of masks. + + This func is modified from `detectron2 + `_. + The function only works with Polygons using the shoelace formula. + + Return: + ndarray: areas of each instance + """ # noqa: W501 + area = [] + for polygons_per_obj in self.masks: + area_per_obj = 0 + for p in polygons_per_obj: + area_per_obj += self._polygon_area(p[0::2], p[1::2]) + area.append(area_per_obj) + return np.asarray(area) + + def _polygon_area(self, x, y): + """Compute the area of a component of a polygon. + + Using the shoelace formula: + https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates + + Args: + x (ndarray): x coordinates of the component + y (ndarray): y coordinates of the component + + Return: + float: the are of the component + """ # noqa: 501 + return 0.5 * np.abs( + np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1))) + + def to_ndarray(self): + """Convert masks to the format of ndarray.""" + if len(self.masks) == 0: + return np.empty((0, self.height, self.width), dtype=np.uint8) + bitmap_masks = [] + for poly_per_obj in self.masks: + bitmap_masks.append( + polygon_to_bitmap(poly_per_obj, self.height, self.width)) + return np.stack(bitmap_masks) + + def to_tensor(self, dtype, device): + """See :func:`BaseInstanceMasks.to_tensor`.""" + if len(self.masks) == 0: + return torch.empty((0, self.height, self.width), + dtype=dtype, + device=device) + ndarray_masks = self.to_ndarray() + return torch.tensor(ndarray_masks, dtype=dtype, device=device) + + @classmethod + def random(cls, + num_masks=3, + height=32, + width=32, + n_verts=5, + dtype=np.float32, + rng=None): + """Generate random polygon masks for demo / testing purposes. + + Adapted from [1]_ + + References: + .. [1] https://gitlab.kitware.com/computer-vision/kwimage/-/blob/928cae35ca8/kwimage/structs/polygon.py#L379 # noqa: E501 + + Example: + >>> from mmcv.core.mask.structures import PolygonMasks + >>> self = PolygonMasks.random() + >>> print('self = {}'.format(self)) + """ + from mmcv.utils.util_random import ensure_rng + rng = ensure_rng(rng) + + def _gen_polygon(n, irregularity, spikeyness): + """Creates the polygon by sampling points on a circle around the + centre. Random noise is added by varying the angular spacing + between sequential points, and by varying the radial distance of + each point from the centre. + + Based on original code by Mike Ounsworth + + Args: + n (int): number of vertices + irregularity (float): [0,1] indicating how much variance there + is in the angular spacing of vertices. [0,1] will map to + [0, 2pi/numberOfVerts] + spikeyness (float): [0,1] indicating how much variance there is + in each vertex from the circle of radius aveRadius. [0,1] + will map to [0, aveRadius] + + Returns: + a list of vertices, in CCW order. + """ + from scipy.stats import truncnorm + # Generate around the unit circle + cx, cy = (0.0, 0.0) + radius = 1 + + tau = np.pi * 2 + + irregularity = np.clip(irregularity, 0, 1) * 2 * np.pi / n + spikeyness = np.clip(spikeyness, 1e-9, 1) + + # generate n angle steps + lower = (tau / n) - irregularity + upper = (tau / n) + irregularity + angle_steps = rng.uniform(lower, upper, n) + + # normalize the steps so that point 0 and point n+1 are the same + k = angle_steps.sum() / (2 * np.pi) + angles = (angle_steps / k).cumsum() + rng.uniform(0, tau) + + # Convert high and low values to be wrt the standard normal range + # https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.truncnorm.html + low = 0 + high = 2 * radius + mean = radius + std = spikeyness + a = (low - mean) / std + b = (high - mean) / std + tnorm = truncnorm(a=a, b=b, loc=mean, scale=std) + + # now generate the points + radii = tnorm.rvs(n, random_state=rng) + x_pts = cx + radii * np.cos(angles) + y_pts = cy + radii * np.sin(angles) + + points = np.hstack([x_pts[:, None], y_pts[:, None]]) + + # Scale to 0-1 space + points = points - points.min(axis=0) + points = points / points.max(axis=0) + + # Randomly place within 0-1 space + points = points * (rng.rand() * .8 + .2) + min_pt = points.min(axis=0) + max_pt = points.max(axis=0) + + high = (1 - max_pt) + low = (0 - min_pt) + offset = (rng.rand(2) * (high - low)) + low + points = points + offset + return points + + def _order_vertices(verts): + """ + References: + https://stackoverflow.com/questions/1709283/how-can-i-sort-a-coordinate-list-for-a-rectangle-counterclockwise + """ + mlat = verts.T[0].sum() / len(verts) + mlng = verts.T[1].sum() / len(verts) + + tau = np.pi * 2 + angle = (np.arctan2(mlat - verts.T[0], verts.T[1] - mlng) + + tau) % tau + sortx = angle.argsort() + verts = verts.take(sortx, axis=0) + return verts + + # Generate a random exterior for each requested mask + masks = [] + for _ in range(num_masks): + exterior = _order_vertices(_gen_polygon(n_verts, 0.9, 0.9)) + exterior = (exterior * [(width, height)]).astype(dtype) + masks.append([exterior.ravel()]) + + self = cls(masks, height, width) + return self + + +def polygon_to_bitmap(polygons, height, width): + """Convert masks from the form of polygons to bitmaps. + + Args: + polygons (list[ndarray]): masks in polygon representation + height (int): mask height + width (int): mask width + + Return: + ndarray: the converted masks in bitmap representation + """ + rles = maskUtils.frPyObjects(polygons, height, width) + rle = maskUtils.merge(rles) + bitmap_mask = maskUtils.decode(rle).astype(np.bool) + return bitmap_mask diff --git a/mmcv/core/mask/utils.py b/mmcv/core/mask/utils.py new file mode 100644 index 0000000..cc671b1 --- /dev/null +++ b/mmcv/core/mask/utils.py @@ -0,0 +1,63 @@ +from mmcv.utils import slice_list +import numpy as np +import pycocotools.mask as mask_util + + +def split_combined_polys(polys, poly_lens, polys_per_mask): + """Split the combined 1-D polys into masks. + + A mask is represented as a list of polys, and a poly is represented as + a 1-D array. In dataset, all masks are concatenated into a single 1-D + tensor. Here we need to split the tensor into original representations. + + Args: + polys (list): a list (length = image num) of 1-D tensors + poly_lens (list): a list (length = image num) of poly length + polys_per_mask (list): a list (length = image num) of poly number + of each mask + + Returns: + list: a list (length = image num) of list (length = mask num) of \ + list (length = poly num) of numpy array. + """ + mask_polys_list = [] + for img_id in range(len(polys)): + polys_single = polys[img_id] + polys_lens_single = poly_lens[img_id].tolist() + polys_per_mask_single = polys_per_mask[img_id].tolist() + + split_polys = slice_list(polys_single, polys_lens_single) + mask_polys = slice_list(split_polys, polys_per_mask_single) + mask_polys_list.append(mask_polys) + return mask_polys_list + + +# TODO: move this function to more proper place +def encode_mask_results(mask_results): + """Encode bitmap mask to RLE code. + + Args: + mask_results (list | tuple[list]): bitmap mask results. + In mask scoring rcnn, mask_results is a tuple of (segm_results, + segm_cls_score). + + Returns: + list | tuple: RLE encoded mask. + """ + if isinstance(mask_results, tuple): # mask scoring + cls_segms, cls_mask_scores = mask_results + else: + cls_segms = mask_results + num_classes = len(cls_segms) + encoded_mask_results = [[] for _ in range(num_classes)] + for i in range(len(cls_segms)): + for cls_segm in cls_segms[i]: + encoded_mask_results[i].append( + mask_util.encode( + np.array( + cls_segm[:, :, np.newaxis], order='F', + dtype='uint8'))[0]) # encoded with RLE + if isinstance(mask_results, tuple): + return encoded_mask_results, cls_mask_scores + else: + return encoded_mask_results diff --git a/mmcv/core/points/__init__.py b/mmcv/core/points/__init__.py new file mode 100644 index 0000000..73d2d83 --- /dev/null +++ b/mmcv/core/points/__init__.py @@ -0,0 +1,30 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .base_points import BasePoints +from .cam_points import CameraPoints +from .depth_points import DepthPoints +from .lidar_points import LiDARPoints + +__all__ = ['BasePoints', 'CameraPoints', 'DepthPoints', 'LiDARPoints'] + + +def get_points_type(points_type): + """Get the class of points according to coordinate type. + + Args: + points_type (str): The type of points coordinate. + The valid value are "CAMERA", "LIDAR", or "DEPTH". + + Returns: + class: Points type. + """ + if points_type == 'CAMERA': + points_cls = CameraPoints + elif points_type == 'LIDAR': + points_cls = LiDARPoints + elif points_type == 'DEPTH': + points_cls = DepthPoints + else: + raise ValueError('Only "points_type" of "CAMERA", "LIDAR", or "DEPTH"' + f' are supported, got {points_type}') + + return points_cls diff --git a/mmcv/core/points/base_points.py b/mmcv/core/points/base_points.py new file mode 100644 index 0000000..31b8cec --- /dev/null +++ b/mmcv/core/points/base_points.py @@ -0,0 +1,436 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch +import warnings +from abc import abstractmethod + + +class BasePoints(object): + """Base class for Points. + + Args: + tensor (torch.Tensor | np.ndarray | list): a N x points_dim matrix. + points_dim (int): Number of the dimension of a point. + Each row is (x, y, z). Default to 3. + attribute_dims (dict): Dictionary to indicate the meaning of extra + dimension. Default to None. + + Attributes: + tensor (torch.Tensor): Float matrix of N x points_dim. + points_dim (int): Integer indicating the dimension of a point. + Each row is (x, y, z, ...). + attribute_dims (bool): Dictionary to indicate the meaning of extra + dimension. Default to None. + rotation_axis (int): Default rotation axis for points rotation. + """ + + def __init__(self, tensor, points_dim=3, attribute_dims=None): + if isinstance(tensor, torch.Tensor): + device = tensor.device + else: + device = torch.device('cpu') + tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device) + if tensor.numel() == 0: + # Use reshape, so we don't end up creating a new tensor that + # does not depend on the inputs (and consequently confuses jit) + tensor = tensor.reshape((0, points_dim)).to( + dtype=torch.float32, device=device) + assert tensor.dim() == 2 and tensor.size(-1) == \ + points_dim, tensor.size() + + self.tensor = tensor + self.points_dim = points_dim + self.attribute_dims = attribute_dims + self.rotation_axis = 0 + + @property + def coord(self): + """torch.Tensor: Coordinates of each point with size (N, 3).""" + return self.tensor[:, :3] + + @coord.setter + def coord(self, tensor): + """Set the coordinates of each point.""" + try: + tensor = tensor.reshape(self.shape[0], 3) + except (RuntimeError, ValueError): # for torch.Tensor and np.ndarray + raise ValueError(f'got unexpected shape {tensor.shape}') + if not isinstance(tensor, torch.Tensor): + tensor = self.tensor.new_tensor(tensor) + self.tensor[:, :3] = tensor + + @property + def height(self): + """torch.Tensor: A vector with height of each point.""" + if self.attribute_dims is not None and \ + 'height' in self.attribute_dims.keys(): + return self.tensor[:, self.attribute_dims['height']] + else: + return None + + @height.setter + def height(self, tensor): + """Set the height of each point.""" + try: + tensor = tensor.reshape(self.shape[0]) + except (RuntimeError, ValueError): # for torch.Tensor and np.ndarray + raise ValueError(f'got unexpected shape {tensor.shape}') + if not isinstance(tensor, torch.Tensor): + tensor = self.tensor.new_tensor(tensor) + if self.attribute_dims is not None and \ + 'height' in self.attribute_dims.keys(): + self.tensor[:, self.attribute_dims['height']] = tensor + else: + # add height attribute + if self.attribute_dims is None: + self.attribute_dims = dict() + attr_dim = self.shape[1] + self.tensor = torch.cat([self.tensor, tensor.unsqueeze(1)], dim=1) + self.attribute_dims.update(dict(height=attr_dim)) + self.points_dim += 1 + + @property + def color(self): + """torch.Tensor: A vector with color of each point.""" + if self.attribute_dims is not None and \ + 'color' in self.attribute_dims.keys(): + return self.tensor[:, self.attribute_dims['color']] + else: + return None + + @color.setter + def color(self, tensor): + """Set the color of each point.""" + try: + tensor = tensor.reshape(self.shape[0], 3) + except (RuntimeError, ValueError): # for torch.Tensor and np.ndarray + raise ValueError(f'got unexpected shape {tensor.shape}') + if tensor.max() >= 256 or tensor.min() < 0: + warnings.warn('point got color value beyond [0, 255]') + if not isinstance(tensor, torch.Tensor): + tensor = self.tensor.new_tensor(tensor) + if self.attribute_dims is not None and \ + 'color' in self.attribute_dims.keys(): + self.tensor[:, self.attribute_dims['color']] = tensor + else: + # add color attribute + if self.attribute_dims is None: + self.attribute_dims = dict() + attr_dim = self.shape[1] + self.tensor = torch.cat([self.tensor, tensor], dim=1) + self.attribute_dims.update( + dict(color=[attr_dim, attr_dim + 1, attr_dim + 2])) + self.points_dim += 3 + + @property + def shape(self): + """torch.Shape: Shape of points.""" + return self.tensor.shape + + def shuffle(self): + """Shuffle the points. + + Returns: + torch.Tensor: The shuffled index. + """ + idx = torch.randperm(self.__len__(), device=self.tensor.device) + self.tensor = self.tensor[idx] + return idx + + def rotate(self, rotation, axis=None): + """Rotate points with the given rotation matrix or angle. + + Args: + rotation (float, np.ndarray, torch.Tensor): Rotation matrix + or angle. + axis (int): Axis to rotate at. Defaults to None. + """ + if not isinstance(rotation, torch.Tensor): + rotation = self.tensor.new_tensor(rotation) + assert rotation.shape == torch.Size([3, 3]) or \ + rotation.numel() == 1, f'invalid rotation shape {rotation.shape}' + + if axis is None: + axis = self.rotation_axis + + if rotation.numel() == 1: + rot_sin = torch.sin(rotation) + rot_cos = torch.cos(rotation) + if axis == 1: + rot_mat_T = rotation.new_tensor([[rot_cos, 0, -rot_sin], + [0, 1, 0], + [rot_sin, 0, rot_cos]]) + elif axis == 2 or axis == -1: + rot_mat_T = rotation.new_tensor([[rot_cos, -rot_sin, 0], + [rot_sin, rot_cos, 0], + [0, 0, 1]]) + elif axis == 0: + rot_mat_T = rotation.new_tensor([[0, rot_cos, -rot_sin], + [0, rot_sin, rot_cos], + [1, 0, 0]]) + else: + raise ValueError('axis should in range') + rot_mat_T = rot_mat_T.T + elif rotation.numel() == 9: + rot_mat_T = rotation + else: + raise NotImplementedError + self.tensor[:, :3] = self.tensor[:, :3] @ rot_mat_T + + return rot_mat_T + + @abstractmethod + def flip(self, bev_direction='horizontal'): + """Flip the points in BEV along given BEV direction.""" + pass + + def translate(self, trans_vector): + """Translate points with the given translation vector. + + Args: + trans_vector (np.ndarray, torch.Tensor): Translation + vector of size 3 or nx3. + """ + if not isinstance(trans_vector, torch.Tensor): + trans_vector = self.tensor.new_tensor(trans_vector) + trans_vector = trans_vector.squeeze(0) + if trans_vector.dim() == 1: + assert trans_vector.shape[0] == 3 + elif trans_vector.dim() == 2: + assert trans_vector.shape[0] == self.tensor.shape[0] and \ + trans_vector.shape[1] == 3 + else: + raise NotImplementedError( + f'Unsupported translation vector of shape {trans_vector.shape}' + ) + self.tensor[:, :3] += trans_vector + + def in_range_3d(self, point_range): + """Check whether the points are in the given range. + + Args: + point_range (list | torch.Tensor): The range of point + (x_min, y_min, z_min, x_max, y_max, z_max) + + Note: + In the original implementation of SECOND, checking whether + a box in the range checks whether the points are in a convex + polygon, we try to reduce the burden for simpler cases. + + Returns: + torch.Tensor: A binary vector indicating whether each point is \ + inside the reference range. + """ + in_range_flags = ((self.tensor[:, 0] > point_range[0]) + & (self.tensor[:, 1] > point_range[1]) + & (self.tensor[:, 2] > point_range[2]) + & (self.tensor[:, 0] < point_range[3]) + & (self.tensor[:, 1] < point_range[4]) + & (self.tensor[:, 2] < point_range[5])) + return in_range_flags + + @abstractmethod + def in_range_bev(self, point_range): + """Check whether the points are in the given range. + + Args: + point_range (list | torch.Tensor): The range of point + in order of (x_min, y_min, x_max, y_max). + + Returns: + torch.Tensor: Indicating whether each point is inside \ + the reference range. + """ + pass + + @abstractmethod + def convert_to(self, dst, rt_mat=None): + """Convert self to ``dst`` mode. + + Args: + dst (:obj:`CoordMode`): The target Box mode. + rt_mat (np.ndarray | torch.Tensor): The rotation and translation + matrix between different coordinates. Defaults to None. + The conversion from `src` coordinates to `dst` coordinates + usually comes along the change of sensors, e.g., from camera + to LiDAR. This requires a transformation matrix. + + Returns: + :obj:`BasePoints`: The converted box of the same type \ + in the `dst` mode. + """ + pass + + def scale(self, scale_factor): + """Scale the points with horizontal and vertical scaling factors. + + Args: + scale_factors (float): Scale factors to scale the points. + """ + self.tensor[:, :3] *= scale_factor + + def __getitem__(self, item): + """ + Note: + The following usage are allowed: + 1. `new_points = points[3]`: + return a `Points` that contains only one point. + 2. `new_points = points[2:10]`: + return a slice of points. + 3. `new_points = points[vector]`: + where vector is a torch.BoolTensor with `length = len(points)`. + Nonzero elements in the vector will be selected. + 4. `new_points = points[3:11, vector]`: + return a slice of points and attribute dims. + 5. `new_points = points[4:12, 2]`: + return a slice of points with single attribute. + Note that the returned Points might share storage with this Points, + subject to Pytorch's indexing semantics. + + Returns: + :obj:`BasePoints`: A new object of \ + :class:`BasePoints` after indexing. + """ + original_type = type(self) + if isinstance(item, int): + return original_type( + self.tensor[item].view(1, -1), + points_dim=self.points_dim, + attribute_dims=self.attribute_dims) + elif isinstance(item, tuple) and len(item) == 2: + if isinstance(item[1], slice): + start = 0 if item[1].start is None else item[1].start + stop = self.tensor.shape[1] if \ + item[1].stop is None else item[1].stop + step = 1 if item[1].step is None else item[1].step + item = list(item) + item[1] = list(range(start, stop, step)) + item = tuple(item) + elif isinstance(item[1], int): + item = list(item) + item[1] = [item[1]] + item = tuple(item) + p = self.tensor[item[0], item[1]] + + keep_dims = list( + set(item[1]).intersection(set(range(3, self.tensor.shape[1])))) + if self.attribute_dims is not None: + attribute_dims = self.attribute_dims.copy() + for key in self.attribute_dims.keys(): + cur_attribute_dims = attribute_dims[key] + if isinstance(cur_attribute_dims, int): + cur_attribute_dims = [cur_attribute_dims] + intersect_attr = list( + set(cur_attribute_dims).intersection(set(keep_dims))) + if len(intersect_attr) == 1: + attribute_dims[key] = intersect_attr[0] + elif len(intersect_attr) > 1: + attribute_dims[key] = intersect_attr + else: + attribute_dims.pop(key) + else: + attribute_dims = None + elif isinstance(item, (slice, np.ndarray, torch.Tensor)): + p = self.tensor[item] + attribute_dims = self.attribute_dims + else: + raise NotImplementedError(f'Invalid slice {item}!') + + assert p.dim() == 2, \ + f'Indexing on Points with {item} failed to return a matrix!' + return original_type( + p, points_dim=p.shape[1], attribute_dims=attribute_dims) + + def __len__(self): + """int: Number of points in the current object.""" + return self.tensor.shape[0] + + def __repr__(self): + """str: Return a strings that describes the object.""" + return self.__class__.__name__ + '(\n ' + str(self.tensor) + ')' + + @classmethod + def cat(cls, points_list): + """Concatenate a list of Points into a single Points. + + Args: + points_list (list[:obj:`BasePoints`]): List of points. + + Returns: + :obj:`BasePoints`: The concatenated Points. + """ + assert isinstance(points_list, (list, tuple)) + if len(points_list) == 0: + return cls(torch.empty(0)) + assert all(isinstance(points, cls) for points in points_list) + + # use torch.cat (v.s. layers.cat) + # so the returned points never share storage with input + cat_points = cls( + torch.cat([p.tensor for p in points_list], dim=0), + points_dim=points_list[0].tensor.shape[1], + attribute_dims=points_list[0].attribute_dims) + return cat_points + + def to(self, device): + """Convert current points to a specific device. + + Args: + device (str | :obj:`torch.device`): The name of the device. + + Returns: + :obj:`BasePoints`: A new boxes object on the \ + specific device. + """ + original_type = type(self) + return original_type( + self.tensor.to(device), + points_dim=self.points_dim, + attribute_dims=self.attribute_dims) + + def clone(self): + """Clone the Points. + + Returns: + :obj:`BasePoints`: Box object with the same properties \ + as self. + """ + original_type = type(self) + return original_type( + self.tensor.clone(), + points_dim=self.points_dim, + attribute_dims=self.attribute_dims) + + @property + def device(self): + """str: The device of the points are on.""" + return self.tensor.device + + def __iter__(self): + """Yield a point as a Tensor of shape (4,) at a time. + + Returns: + torch.Tensor: A point of shape (4,). + """ + yield from self.tensor + + def new_point(self, data): + """Create a new point object with data. + + The new point and its tensor has the similar properties \ + as self and self.tensor, respectively. + + Args: + data (torch.Tensor | numpy.array | list): Data to be copied. + + Returns: + :obj:`BasePoints`: A new point object with ``data``, \ + the object's other properties are similar to ``self``. + """ + new_tensor = self.tensor.new_tensor(data) \ + if not isinstance(data, torch.Tensor) else data.to(self.device) + original_type = type(self) + return original_type( + new_tensor, + points_dim=self.points_dim, + attribute_dims=self.attribute_dims) diff --git a/mmcv/core/points/cam_points.py b/mmcv/core/points/cam_points.py new file mode 100644 index 0000000..ba83cf0 --- /dev/null +++ b/mmcv/core/points/cam_points.py @@ -0,0 +1,70 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .base_points import BasePoints + + +class CameraPoints(BasePoints): + """Points of instances in CAM coordinates. + + Args: + tensor (torch.Tensor | np.ndarray | list): a N x points_dim matrix. + points_dim (int): Number of the dimension of a point. + Each row is (x, y, z). Default to 3. + attribute_dims (dict): Dictionary to indicate the meaning of extra + dimension. Default to None. + + Attributes: + tensor (torch.Tensor): Float matrix of N x points_dim. + points_dim (int): Integer indicating the dimension of a point. + Each row is (x, y, z, ...). + attribute_dims (bool): Dictionary to indicate the meaning of extra + dimension. Default to None. + rotation_axis (int): Default rotation axis for points rotation. + """ + + def __init__(self, tensor, points_dim=3, attribute_dims=None): + super(CameraPoints, self).__init__( + tensor, points_dim=points_dim, attribute_dims=attribute_dims) + self.rotation_axis = 1 + + def flip(self, bev_direction='horizontal'): + """Flip the boxes in BEV along given BEV direction.""" + if bev_direction == 'horizontal': + self.tensor[:, 0] = -self.tensor[:, 0] + elif bev_direction == 'vertical': + self.tensor[:, 2] = -self.tensor[:, 2] + + def in_range_bev(self, point_range): + """Check whether the points are in the given range. + + Args: + point_range (list | torch.Tensor): The range of point + in order of (x_min, y_min, x_max, y_max). + + Returns: + torch.Tensor: Indicating whether each point is inside \ + the reference range. + """ + in_range_flags = ((self.tensor[:, 0] > point_range[0]) + & (self.tensor[:, 2] > point_range[1]) + & (self.tensor[:, 0] < point_range[2]) + & (self.tensor[:, 2] < point_range[3])) + return in_range_flags + + def convert_to(self, dst, rt_mat=None): + """Convert self to ``dst`` mode. + + Args: + dst (:obj:`CoordMode`): The target Point mode. + rt_mat (np.ndarray | torch.Tensor): The rotation and translation + matrix between different coordinates. Defaults to None. + The conversion from `src` coordinates to `dst` coordinates + usually comes along the change of sensors, e.g., from camera + to LiDAR. This requires a transformation matrix. + + Returns: + :obj:`BasePoints`: The converted point of the same type \ + in the `dst` mode. + """ + from mmcv.core.bbox import Coord3DMode + return Coord3DMode.convert_point( + point=self, src=Coord3DMode.CAM, dst=dst, rt_mat=rt_mat) diff --git a/mmcv/core/points/depth_points.py b/mmcv/core/points/depth_points.py new file mode 100644 index 0000000..1b12299 --- /dev/null +++ b/mmcv/core/points/depth_points.py @@ -0,0 +1,70 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .base_points import BasePoints + + +class DepthPoints(BasePoints): + """Points of instances in DEPTH coordinates. + + Args: + tensor (torch.Tensor | np.ndarray | list): a N x points_dim matrix. + points_dim (int): Number of the dimension of a point. + Each row is (x, y, z). Default to 3. + attribute_dims (dict): Dictionary to indicate the meaning of extra + dimension. Default to None. + + Attributes: + tensor (torch.Tensor): Float matrix of N x points_dim. + points_dim (int): Integer indicating the dimension of a point. + Each row is (x, y, z, ...). + attribute_dims (bool): Dictionary to indicate the meaning of extra + dimension. Default to None. + rotation_axis (int): Default rotation axis for points rotation. + """ + + def __init__(self, tensor, points_dim=3, attribute_dims=None): + super(DepthPoints, self).__init__( + tensor, points_dim=points_dim, attribute_dims=attribute_dims) + self.rotation_axis = 2 + + def flip(self, bev_direction='horizontal'): + """Flip the boxes in BEV along given BEV direction.""" + if bev_direction == 'horizontal': + self.tensor[:, 0] = -self.tensor[:, 0] + elif bev_direction == 'vertical': + self.tensor[:, 1] = -self.tensor[:, 1] + + def in_range_bev(self, point_range): + """Check whether the points are in the given range. + + Args: + point_range (list | torch.Tensor): The range of point + in order of (x_min, y_min, x_max, y_max). + + Returns: + torch.Tensor: Indicating whether each point is inside \ + the reference range. + """ + in_range_flags = ((self.tensor[:, 0] > point_range[0]) + & (self.tensor[:, 1] > point_range[1]) + & (self.tensor[:, 0] < point_range[2]) + & (self.tensor[:, 1] < point_range[3])) + return in_range_flags + + def convert_to(self, dst, rt_mat=None): + """Convert self to ``dst`` mode. + + Args: + dst (:obj:`CoordMode`): The target Point mode. + rt_mat (np.ndarray | torch.Tensor): The rotation and translation + matrix between different coordinates. Defaults to None. + The conversion from `src` coordinates to `dst` coordinates + usually comes along the change of sensors, e.g., from camera + to LiDAR. This requires a transformation matrix. + + Returns: + :obj:`BasePoints`: The converted point of the same type \ + in the `dst` mode. + """ + from mmcv.core.bbox import Coord3DMode + return Coord3DMode.convert_point( + point=self, src=Coord3DMode.DEPTH, dst=dst, rt_mat=rt_mat) diff --git a/mmcv/core/points/lidar_points.py b/mmcv/core/points/lidar_points.py new file mode 100644 index 0000000..bbfddd9 --- /dev/null +++ b/mmcv/core/points/lidar_points.py @@ -0,0 +1,70 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .base_points import BasePoints + + +class LiDARPoints(BasePoints): + """Points of instances in LIDAR coordinates. + + Args: + tensor (torch.Tensor | np.ndarray | list): a N x points_dim matrix. + points_dim (int): Number of the dimension of a point. + Each row is (x, y, z). Default to 3. + attribute_dims (dict): Dictionary to indicate the meaning of extra + dimension. Default to None. + + Attributes: + tensor (torch.Tensor): Float matrix of N x points_dim. + points_dim (int): Integer indicating the dimension of a point. + Each row is (x, y, z, ...). + attribute_dims (bool): Dictionary to indicate the meaning of extra + dimension. Default to None. + rotation_axis (int): Default rotation axis for points rotation. + """ + + def __init__(self, tensor, points_dim=3, attribute_dims=None): + super(LiDARPoints, self).__init__( + tensor, points_dim=points_dim, attribute_dims=attribute_dims) + self.rotation_axis = 2 + + def flip(self, bev_direction='horizontal'): + """Flip the boxes in BEV along given BEV direction.""" + if bev_direction == 'horizontal': + self.tensor[:, 1] = -self.tensor[:, 1] + elif bev_direction == 'vertical': + self.tensor[:, 0] = -self.tensor[:, 0] + + def in_range_bev(self, point_range): + """Check whether the points are in the given range. + + Args: + point_range (list | torch.Tensor): The range of point + in order of (x_min, y_min, x_max, y_max). + + Returns: + torch.Tensor: Indicating whether each point is inside \ + the reference range. + """ + in_range_flags = ((self.tensor[:, 0] > point_range[0]) + & (self.tensor[:, 1] > point_range[1]) + & (self.tensor[:, 0] < point_range[2]) + & (self.tensor[:, 1] < point_range[3])) + return in_range_flags + + def convert_to(self, dst, rt_mat=None): + """Convert self to ``dst`` mode. + + Args: + dst (:obj:`CoordMode`): The target Point mode. + rt_mat (np.ndarray | torch.Tensor): The rotation and translation + matrix between different coordinates. Defaults to None. + The conversion from `src` coordinates to `dst` coordinates + usually comes along the change of sensors, e.g., from camera + to LiDAR. This requires a transformation matrix. + + Returns: + :obj:`BasePoints`: The converted point of the same type \ + in the `dst` mode. + """ + from mmcv.core.bbox import Coord3DMode + return Coord3DMode.convert_point( + point=self, src=Coord3DMode.LIDAR, dst=dst, rt_mat=rt_mat) diff --git a/mmcv/core/post_processing/__init__.py b/mmcv/core/post_processing/__init__.py new file mode 100644 index 0000000..5335741 --- /dev/null +++ b/mmcv/core/post_processing/__init__.py @@ -0,0 +1,9 @@ +# from .merge_augs import (merge_aug_bboxes, merge_aug_masks, +# merge_aug_scores, merge_aug_bboxes_3d) +from .box3d_nms import aligned_3d_nms, box3d_multiclass_nms, circle_nms + +# __all__ = [ +# 'merge_aug_bboxes', +# 'merge_aug_scores', 'merge_aug_masks', 'box3d_multiclass_nms', +# 'aligned_3d_nms', 'merge_aug_bboxes_3d', 'circle_nms' +# ] diff --git a/mmcv/core/post_processing/bbox_nms.py b/mmcv/core/post_processing/bbox_nms.py new file mode 100644 index 0000000..1b3c77a --- /dev/null +++ b/mmcv/core/post_processing/bbox_nms.py @@ -0,0 +1,170 @@ +import torch +from mmcv.ops.nms import batched_nms + +from mmcv.core.bbox.iou_calculators import bbox_overlaps + + +def multiclass_nms(multi_bboxes, + multi_scores, + score_thr, + nms_cfg, + max_num=-1, + score_factors=None, + return_inds=False): + """NMS for multi-class bboxes. + + Args: + multi_bboxes (Tensor): shape (n, #class*4) or (n, 4) + multi_scores (Tensor): shape (n, #class), where the last column + contains scores of the background class, but this will be ignored. + score_thr (float): bbox threshold, bboxes with scores lower than it + will not be considered. + nms_thr (float): NMS IoU threshold + max_num (int, optional): if there are more than max_num bboxes after + NMS, only top max_num will be kept. Default to -1. + score_factors (Tensor, optional): The factors multiplied to scores + before applying NMS. Default to None. + return_inds (bool, optional): Whether return the indices of kept + bboxes. Default to False. + + Returns: + tuple: (dets, labels, indices (optional)), tensors of shape (k, 5), + (k), and (k). Dets are boxes with scores. Labels are 0-based. + """ + num_classes = multi_scores.size(1) - 1 + # exclude background category + if multi_bboxes.shape[1] > 4: + bboxes = multi_bboxes.view(multi_scores.size(0), -1, 4) + else: + bboxes = multi_bboxes[:, None].expand( + multi_scores.size(0), num_classes, 4) + + scores = multi_scores[:, :-1] + + labels = torch.arange(num_classes, dtype=torch.long) + labels = labels.view(1, -1).expand_as(scores) + + bboxes = bboxes.reshape(-1, 4) + scores = scores.reshape(-1) + labels = labels.reshape(-1) + + if not torch.onnx.is_in_onnx_export(): + # NonZero not supported in TensorRT + # remove low scoring boxes + valid_mask = scores > score_thr + # multiply score_factor after threshold to preserve more bboxes, improve + # mAP by 1% for YOLOv3 + if score_factors is not None: + # expand the shape to match original shape of score + score_factors = score_factors.view(-1, 1).expand( + multi_scores.size(0), num_classes) + score_factors = score_factors.reshape(-1) + scores = scores * score_factors + + if not torch.onnx.is_in_onnx_export(): + # NonZero not supported in TensorRT + inds = valid_mask.nonzero(as_tuple=False).squeeze(1) + bboxes, scores, labels = bboxes[inds], scores[inds], labels[inds] + else: + # TensorRT NMS plugin has invalid output filled with -1 + # add dummy data to make detection output correct. + bboxes = torch.cat([bboxes, bboxes.new_zeros(1, 4)], dim=0) + scores = torch.cat([scores, scores.new_zeros(1)], dim=0) + labels = torch.cat([labels, labels.new_zeros(1)], dim=0) + + if bboxes.numel() == 0: + if torch.onnx.is_in_onnx_export(): + raise RuntimeError('[ONNX Error] Can not record NMS ' + 'as it has not been executed this time') + dets = torch.cat([bboxes, scores[:, None]], -1) + if return_inds: + return dets, labels, inds + else: + return dets, labels + + dets, keep = batched_nms(bboxes, scores, labels, nms_cfg) + + if max_num > 0: + dets = dets[:max_num] + keep = keep[:max_num] + + if return_inds: + return dets, labels[keep], keep + else: + return dets, labels[keep] + + +def fast_nms(multi_bboxes, + multi_scores, + multi_coeffs, + score_thr, + iou_thr, + top_k, + max_num=-1): + """Fast NMS in `YOLACT `_. + + Fast NMS allows already-removed detections to suppress other detections so + that every instance can be decided to be kept or discarded in parallel, + which is not possible in traditional NMS. This relaxation allows us to + implement Fast NMS entirely in standard GPU-accelerated matrix operations. + + Args: + multi_bboxes (Tensor): shape (n, #class*4) or (n, 4) + multi_scores (Tensor): shape (n, #class+1), where the last column + contains scores of the background class, but this will be ignored. + multi_coeffs (Tensor): shape (n, #class*coeffs_dim). + score_thr (float): bbox threshold, bboxes with scores lower than it + will not be considered. + iou_thr (float): IoU threshold to be considered as conflicted. + top_k (int): if there are more than top_k bboxes before NMS, + only top top_k will be kept. + max_num (int): if there are more than max_num bboxes after NMS, + only top max_num will be kept. If -1, keep all the bboxes. + Default: -1. + + Returns: + tuple: (dets, labels, coefficients), tensors of shape (k, 5), (k, 1), + and (k, coeffs_dim). Dets are boxes with scores. + Labels are 0-based. + """ + + scores = multi_scores[:, :-1].t() # [#class, n] + scores, idx = scores.sort(1, descending=True) + + idx = idx[:, :top_k].contiguous() + scores = scores[:, :top_k] # [#class, topk] + num_classes, num_dets = idx.size() + boxes = multi_bboxes[idx.view(-1), :].view(num_classes, num_dets, 4) + coeffs = multi_coeffs[idx.view(-1), :].view(num_classes, num_dets, -1) + + iou = bbox_overlaps(boxes, boxes) # [#class, topk, topk] + iou.triu_(diagonal=1) + iou_max, _ = iou.max(dim=1) + + # Now just filter out the ones higher than the threshold + keep = iou_max <= iou_thr + + # Second thresholding introduces 0.2 mAP gain at negligible time cost + keep *= scores > score_thr + + # Assign each kept detection to its corresponding class + classes = torch.arange( + num_classes, device=boxes.device)[:, None].expand_as(keep) + classes = classes[keep] + + boxes = boxes[keep] + coeffs = coeffs[keep] + scores = scores[keep] + + # Only keep the top max_num highest scores across all classes + scores, idx = scores.sort(0, descending=True) + if max_num > 0: + idx = idx[:max_num] + scores = scores[:max_num] + + classes = classes[idx] + boxes = boxes[idx] + coeffs = coeffs[idx] + + cls_dets = torch.cat([boxes, scores[:, None]], dim=1) + return cls_dets, classes, coeffs diff --git a/mmcv/core/post_processing/box3d_nms.py b/mmcv/core/post_processing/box3d_nms.py new file mode 100644 index 0000000..8bede1b --- /dev/null +++ b/mmcv/core/post_processing/box3d_nms.py @@ -0,0 +1,220 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numba +import numpy as np +import torch + +from mmcv.ops.iou3d_det.iou3d_utils import nms_gpu, nms_normal_gpu + + +def box3d_multiclass_nms(mlvl_bboxes, + mlvl_bboxes_for_nms, + mlvl_scores, + score_thr, + max_num, + cfg, + mlvl_dir_scores=None, + mlvl_attr_scores=None, + mlvl_bboxes2d=None): + """Multi-class nms for 3D boxes. + + Args: + mlvl_bboxes (torch.Tensor): Multi-level boxes with shape (N, M). + M is the dimensions of boxes. + mlvl_bboxes_for_nms (torch.Tensor): Multi-level boxes with shape + (N, 5) ([x1, y1, x2, y2, ry]). N is the number of boxes. + mlvl_scores (torch.Tensor): Multi-level boxes with shape + (N, C + 1). N is the number of boxes. C is the number of classes. + score_thr (float): Score thredhold to filter boxes with low + confidence. + max_num (int): Maximum number of boxes will be kept. + cfg (dict): Configuration dict of NMS. + mlvl_dir_scores (torch.Tensor, optional): Multi-level scores + of direction classifier. Defaults to None. + mlvl_attr_scores (torch.Tensor, optional): Multi-level scores + of attribute classifier. Defaults to None. + mlvl_bboxes2d (torch.Tensor, optional): Multi-level 2D bounding + boxes. Defaults to None. + + Returns: + tuple[torch.Tensor]: Return results after nms, including 3D \ + bounding boxes, scores, labels, direction scores, attribute \ + scores (optional) and 2D bounding boxes (optional). + """ + # do multi class nms + # the fg class id range: [0, num_classes-1] + num_classes = mlvl_scores.shape[1] - 1 + bboxes = [] + scores = [] + labels = [] + dir_scores = [] + attr_scores = [] + bboxes2d = [] + for i in range(0, num_classes): + # get bboxes and scores of this class + cls_inds = mlvl_scores[:, i] > score_thr + if not cls_inds.any(): + continue + + _scores = mlvl_scores[cls_inds, i] + _bboxes_for_nms = mlvl_bboxes_for_nms[cls_inds, :] + + if cfg.use_rotate_nms: + nms_func = nms_gpu + else: + nms_func = nms_normal_gpu + + selected = nms_func(_bboxes_for_nms, _scores, cfg.nms_thr) + _mlvl_bboxes = mlvl_bboxes[cls_inds, :] + bboxes.append(_mlvl_bboxes[selected]) + scores.append(_scores[selected]) + cls_label = mlvl_bboxes.new_full((len(selected), ), + i, + dtype=torch.long) + labels.append(cls_label) + + if mlvl_dir_scores is not None: + _mlvl_dir_scores = mlvl_dir_scores[cls_inds] + dir_scores.append(_mlvl_dir_scores[selected]) + if mlvl_attr_scores is not None: + _mlvl_attr_scores = mlvl_attr_scores[cls_inds] + attr_scores.append(_mlvl_attr_scores[selected]) + if mlvl_bboxes2d is not None: + _mlvl_bboxes2d = mlvl_bboxes2d[cls_inds] + bboxes2d.append(_mlvl_bboxes2d[selected]) + + if bboxes: + bboxes = torch.cat(bboxes, dim=0) + scores = torch.cat(scores, dim=0) + labels = torch.cat(labels, dim=0) + if mlvl_dir_scores is not None: + dir_scores = torch.cat(dir_scores, dim=0) + if mlvl_attr_scores is not None: + attr_scores = torch.cat(attr_scores, dim=0) + if mlvl_bboxes2d is not None: + bboxes2d = torch.cat(bboxes2d, dim=0) + if bboxes.shape[0] > max_num: + _, inds = scores.sort(descending=True) + inds = inds[:max_num] + bboxes = bboxes[inds, :] + labels = labels[inds] + scores = scores[inds] + if mlvl_dir_scores is not None: + dir_scores = dir_scores[inds] + if mlvl_attr_scores is not None: + attr_scores = attr_scores[inds] + if mlvl_bboxes2d is not None: + bboxes2d = bboxes2d[inds] + else: + bboxes = mlvl_scores.new_zeros((0, mlvl_bboxes.size(-1))) + scores = mlvl_scores.new_zeros((0, )) + labels = mlvl_scores.new_zeros((0, ), dtype=torch.long) + if mlvl_dir_scores is not None: + dir_scores = mlvl_scores.new_zeros((0, )) + if mlvl_attr_scores is not None: + attr_scores = mlvl_scores.new_zeros((0, )) + if mlvl_bboxes2d is not None: + bboxes2d = mlvl_scores.new_zeros((0, 4)) + + results = (bboxes, scores, labels) + + if mlvl_dir_scores is not None: + results = results + (dir_scores, ) + if mlvl_attr_scores is not None: + results = results + (attr_scores, ) + if mlvl_bboxes2d is not None: + results = results + (bboxes2d, ) + + return results + + +def aligned_3d_nms(boxes, scores, classes, thresh): + """3d nms for aligned boxes. + + Args: + boxes (torch.Tensor): Aligned box with shape [n, 6]. + scores (torch.Tensor): Scores of each box. + classes (torch.Tensor): Class of each box. + thresh (float): Iou threshold for nms. + + Returns: + torch.Tensor: Indices of selected boxes. + """ + x1 = boxes[:, 0] + y1 = boxes[:, 1] + z1 = boxes[:, 2] + x2 = boxes[:, 3] + y2 = boxes[:, 4] + z2 = boxes[:, 5] + area = (x2 - x1) * (y2 - y1) * (z2 - z1) + zero = boxes.new_zeros(1, ) + + score_sorted = torch.argsort(scores) + pick = [] + while (score_sorted.shape[0] != 0): + last = score_sorted.shape[0] + i = score_sorted[-1] + pick.append(i) + + xx1 = torch.max(x1[i], x1[score_sorted[:last - 1]]) + yy1 = torch.max(y1[i], y1[score_sorted[:last - 1]]) + zz1 = torch.max(z1[i], z1[score_sorted[:last - 1]]) + xx2 = torch.min(x2[i], x2[score_sorted[:last - 1]]) + yy2 = torch.min(y2[i], y2[score_sorted[:last - 1]]) + zz2 = torch.min(z2[i], z2[score_sorted[:last - 1]]) + classes1 = classes[i] + classes2 = classes[score_sorted[:last - 1]] + inter_l = torch.max(zero, xx2 - xx1) + inter_w = torch.max(zero, yy2 - yy1) + inter_h = torch.max(zero, zz2 - zz1) + + inter = inter_l * inter_w * inter_h + iou = inter / (area[i] + area[score_sorted[:last - 1]] - inter) + iou = iou * (classes1 == classes2).float() + score_sorted = score_sorted[torch.nonzero( + iou <= thresh, as_tuple=False).flatten()] + + indices = boxes.new_tensor(pick, dtype=torch.long) + return indices + + +@numba.jit(nopython=True) +def circle_nms(dets, thresh, post_max_size=83): + """Circular NMS. + + An object is only counted as positive if no other center + with a higher confidence exists within a radius r using a + bird-eye view distance metric. + + Args: + dets (torch.Tensor): Detection results with the shape of [N, 3]. + thresh (float): Value of threshold. + post_max_size (int): Max number of prediction to be kept. Defaults + to 83 + + Returns: + torch.Tensor: Indexes of the detections to be kept. + """ + x1 = dets[:, 0] + y1 = dets[:, 1] + scores = dets[:, 2] + order = scores.argsort()[::-1].astype(np.int32) # highest->lowest + ndets = dets.shape[0] + suppressed = np.zeros((ndets), dtype=np.int32) + keep = [] + for _i in range(ndets): + i = order[_i] # start with highest score box + if suppressed[ + i] == 1: # if any box have enough iou with this, remove it + continue + keep.append(i) + for _j in range(_i + 1, ndets): + j = order[_j] + if suppressed[j] == 1: + continue + # calculate center distance between i and j box + dist = (x1[i] - x1[j])**2 + (y1[i] - y1[j])**2 + + # ovr = inter / areas[j] + if dist <= thresh: + suppressed[j] = 1 + return keep[:post_max_size] diff --git a/mmcv/core/post_processing/merge_augs.py b/mmcv/core/post_processing/merge_augs.py new file mode 100644 index 0000000..e96dc3b --- /dev/null +++ b/mmcv/core/post_processing/merge_augs.py @@ -0,0 +1,241 @@ +import copy +import warnings + +import numpy as np +import torch +from mmcv import ConfigDict +from mmcv.ops import nms + +from mmcv.ops.iou3d_det.iou3d_utils import nms_gpu, nms_normal_gpu +from ..bbox.transforms import bbox_mapping_back, bbox3d2result, bbox3d_mapping_back +from ..bbox.structures.utils import xywhr2xyxyr + +def merge_aug_proposals(aug_proposals, img_metas, cfg): + """Merge augmented proposals (multiscale, flip, etc.) + + Args: + aug_proposals (list[Tensor]): proposals from different testing + schemes, shape (n, 5). Note that they are not rescaled to the + original image size. + + img_metas (list[dict]): list of image info dict where each dict has: + 'img_shape', 'scale_factor', 'flip', and may also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys see + `mmcv/datasets/pipelines/formatting.py:Collect`. + + cfg (dict): rpn test config. + + Returns: + Tensor: shape (n, 4), proposals corresponding to original image scale. + """ + + cfg = copy.deepcopy(cfg) + + # deprecate arguments warning + if 'nms' not in cfg or 'max_num' in cfg or 'nms_thr' in cfg: + warnings.warn( + 'In rpn_proposal or test_cfg, ' + 'nms_thr has been moved to a dict named nms as ' + 'iou_threshold, max_num has been renamed as max_per_img, ' + 'name of original arguments and the way to specify ' + 'iou_threshold of NMS will be deprecated.') + if 'nms' not in cfg: + cfg.nms = ConfigDict(dict(type='nms', iou_threshold=cfg.nms_thr)) + if 'max_num' in cfg: + if 'max_per_img' in cfg: + assert cfg.max_num == cfg.max_per_img, f'You set max_num and ' \ + f'max_per_img at the same time, but get {cfg.max_num} ' \ + f'and {cfg.max_per_img} respectively' \ + f'Please delete max_num which will be deprecated.' + else: + cfg.max_per_img = cfg.max_num + if 'nms_thr' in cfg: + assert cfg.nms.iou_threshold == cfg.nms_thr, f'You set ' \ + f'iou_threshold in nms and ' \ + f'nms_thr at the same time, but get ' \ + f'{cfg.nms.iou_threshold} and {cfg.nms_thr}' \ + f' respectively. Please delete the nms_thr ' \ + f'which will be deprecated.' + + recovered_proposals = [] + for proposals, img_info in zip(aug_proposals, img_metas): + img_shape = img_info['img_shape'] + scale_factor = img_info['scale_factor'] + flip = img_info['flip'] + flip_direction = img_info['flip_direction'] + _proposals = proposals.clone() + _proposals[:, :4] = bbox_mapping_back(_proposals[:, :4], img_shape, + scale_factor, flip, + flip_direction) + recovered_proposals.append(_proposals) + aug_proposals = torch.cat(recovered_proposals, dim=0) + merged_proposals, _ = nms(aug_proposals[:, :4].contiguous(), + aug_proposals[:, -1].contiguous(), + cfg.nms.iou_threshold) + scores = merged_proposals[:, 4] + _, order = scores.sort(0, descending=True) + num = min(cfg.max_per_img, merged_proposals.shape[0]) + order = order[:num] + merged_proposals = merged_proposals[order, :] + return merged_proposals + + +def merge_aug_bboxes(aug_bboxes, aug_scores, img_metas, rcnn_test_cfg): + """Merge augmented detection bboxes and scores. + + Args: + aug_bboxes (list[Tensor]): shape (n, 4*#class) + aug_scores (list[Tensor] or None): shape (n, #class) + img_shapes (list[Tensor]): shape (3, ). + rcnn_test_cfg (dict): rcnn test config. + + Returns: + tuple: (bboxes, scores) + """ + recovered_bboxes = [] + for bboxes, img_info in zip(aug_bboxes, img_metas): + img_shape = img_info[0]['img_shape'] + scale_factor = img_info[0]['scale_factor'] + flip = img_info[0]['flip'] + flip_direction = img_info[0]['flip_direction'] + bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip, + flip_direction) + recovered_bboxes.append(bboxes) + bboxes = torch.stack(recovered_bboxes).mean(dim=0) + if aug_scores is None: + return bboxes + else: + scores = torch.stack(aug_scores).mean(dim=0) + return bboxes, scores + + +def merge_aug_scores(aug_scores): + """Merge augmented bbox scores.""" + if isinstance(aug_scores[0], torch.Tensor): + return torch.mean(torch.stack(aug_scores), dim=0) + else: + return np.mean(aug_scores, axis=0) + + +def merge_aug_masks(aug_masks, img_metas, rcnn_test_cfg, weights=None): + """Merge augmented mask prediction. + + Args: + aug_masks (list[ndarray]): shape (n, #class, h, w) + img_shapes (list[ndarray]): shape (3, ). + rcnn_test_cfg (dict): rcnn test config. + + Returns: + tuple: (bboxes, scores) + """ + recovered_masks = [] + for mask, img_info in zip(aug_masks, img_metas): + flip = img_info[0]['flip'] + flip_direction = img_info[0]['flip_direction'] + if flip: + if flip_direction == 'horizontal': + mask = mask[:, :, :, ::-1] + elif flip_direction == 'vertical': + mask = mask[:, :, ::-1, :] + elif flip_direction == 'diagonal': + mask = mask[:, :, :, ::-1] + mask = mask[:, :, ::-1, :] + else: + raise ValueError( + f"Invalid flipping direction '{flip_direction}'") + recovered_masks.append(mask) + + if weights is None: + merged_masks = np.mean(recovered_masks, axis=0) + else: + merged_masks = np.average( + np.array(recovered_masks), axis=0, weights=np.array(weights)) + return merged_masks + +def merge_aug_bboxes_3d(aug_results, img_metas, test_cfg): + """Merge augmented detection 3D bboxes and scores. + + Args: + aug_results (list[dict]): The dict of detection results. + The dict contains the following keys + + - boxes_3d (:obj:`BaseInstance3DBoxes`): Detection bbox. + - scores_3d (torch.Tensor): Detection scores. + - labels_3d (torch.Tensor): Predicted box labels. + img_metas (list[dict]): Meta information of each sample. + test_cfg (dict): Test config. + + Returns: + dict: Bounding boxes results in cpu mode, containing merged results. + + - boxes_3d (:obj:`BaseInstance3DBoxes`): Merged detection bbox. + - scores_3d (torch.Tensor): Merged detection scores. + - labels_3d (torch.Tensor): Merged predicted box labels. + """ + + assert len(aug_results) == len(img_metas), \ + '"aug_results" should have the same length as "img_metas", got len(' \ + f'aug_results)={len(aug_results)} and len(img_metas)={len(img_metas)}' + + recovered_bboxes = [] + recovered_scores = [] + recovered_labels = [] + + for bboxes, img_info in zip(aug_results, img_metas): + scale_factor = img_info[0]['pcd_scale_factor'] + pcd_horizontal_flip = img_info[0]['pcd_horizontal_flip'] + pcd_vertical_flip = img_info[0]['pcd_vertical_flip'] + recovered_scores.append(bboxes['scores_3d']) + recovered_labels.append(bboxes['labels_3d']) + bboxes = bbox3d_mapping_back(bboxes['boxes_3d'], scale_factor, + pcd_horizontal_flip, pcd_vertical_flip) + recovered_bboxes.append(bboxes) + + aug_bboxes = recovered_bboxes[0].cat(recovered_bboxes) + aug_bboxes_for_nms = xywhr2xyxyr(aug_bboxes.bev) + aug_scores = torch.cat(recovered_scores, dim=0) + aug_labels = torch.cat(recovered_labels, dim=0) + + # TODO: use a more elegent way to deal with nms + if test_cfg.use_rotate_nms: + nms_func = nms_gpu + else: + nms_func = nms_normal_gpu + + merged_bboxes = [] + merged_scores = [] + merged_labels = [] + + # Apply multi-class nms when merge bboxes + if len(aug_labels) == 0: + return bbox3d2result(aug_bboxes, aug_scores, aug_labels) + + for class_id in range(torch.max(aug_labels).item() + 1): + class_inds = (aug_labels == class_id) + bboxes_i = aug_bboxes[class_inds] + bboxes_nms_i = aug_bboxes_for_nms[class_inds, :] + scores_i = aug_scores[class_inds] + labels_i = aug_labels[class_inds] + if len(bboxes_nms_i) == 0: + continue + selected = nms_func(bboxes_nms_i, scores_i, test_cfg.nms_thr) + + merged_bboxes.append(bboxes_i[selected, :]) + merged_scores.append(scores_i[selected]) + merged_labels.append(labels_i[selected]) + + merged_bboxes = merged_bboxes[0].cat(merged_bboxes) + merged_scores = torch.cat(merged_scores, dim=0) + merged_labels = torch.cat(merged_labels, dim=0) + + _, order = merged_scores.sort(0, descending=True) + num = min(test_cfg.max_num, len(aug_bboxes)) + order = order[:num] + + merged_bboxes = merged_bboxes[order] + merged_scores = merged_scores[order] + merged_labels = merged_labels[order] + + return bbox3d2result(merged_bboxes, merged_scores, merged_labels) + diff --git a/mmcv/core/utils/__init__.py b/mmcv/core/utils/__init__.py new file mode 100644 index 0000000..b127388 --- /dev/null +++ b/mmcv/core/utils/__init__.py @@ -0,0 +1,9 @@ +from .dist_utils import DistOptimizerHook, allreduce_grads, reduce_mean +from .misc import flip_tensor, mask2ndarray, multi_apply, unmap, add_prefix +from .gaussian import draw_heatmap_gaussian, gaussian_2d, gaussian_radius + +__all__ = [ + 'allreduce_grads', 'DistOptimizerHook', 'reduce_mean', 'multi_apply', + 'unmap', 'mask2ndarray', 'flip_tensor', 'add_prefix', + 'gaussian_2d', 'gaussian_radius', 'draw_heatmap_gaussian' +] diff --git a/mmcv/core/utils/dist_utils.py b/mmcv/core/utils/dist_utils.py new file mode 100644 index 0000000..5fe7775 --- /dev/null +++ b/mmcv/core/utils/dist_utils.py @@ -0,0 +1,69 @@ +import warnings +from collections import OrderedDict + +import torch.distributed as dist +from mmcv.runner import OptimizerHook +from torch._utils import (_flatten_dense_tensors, _take_tensors, + _unflatten_dense_tensors) + + +def _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1): + if bucket_size_mb > 0: + bucket_size_bytes = bucket_size_mb * 1024 * 1024 + buckets = _take_tensors(tensors, bucket_size_bytes) + else: + buckets = OrderedDict() + for tensor in tensors: + tp = tensor.type() + if tp not in buckets: + buckets[tp] = [] + buckets[tp].append(tensor) + buckets = buckets.values() + + for bucket in buckets: + flat_tensors = _flatten_dense_tensors(bucket) + dist.all_reduce(flat_tensors) + flat_tensors.div_(world_size) + for tensor, synced in zip( + bucket, _unflatten_dense_tensors(flat_tensors, bucket)): + tensor.copy_(synced) + + +def allreduce_grads(params, coalesce=True, bucket_size_mb=-1): + """Allreduce gradients. + + Args: + params (list[torch.Parameters]): List of parameters of a model + coalesce (bool, optional): Whether allreduce parameters as a whole. + Defaults to True. + bucket_size_mb (int, optional): Size of bucket, the unit is MB. + Defaults to -1. + """ + grads = [ + param.grad.data for param in params + if param.requires_grad and param.grad is not None + ] + world_size = dist.get_world_size() + if coalesce: + _allreduce_coalesced(grads, world_size, bucket_size_mb) + else: + for tensor in grads: + dist.all_reduce(tensor.div_(world_size)) + + +class DistOptimizerHook(OptimizerHook): + """Deprecated optimizer hook for distributed training.""" + + def __init__(self, *args, **kwargs): + warnings.warn('"DistOptimizerHook" is deprecated, please switch to' + '"mmcv.runner.OptimizerHook".') + super().__init__(*args, **kwargs) + + +def reduce_mean(tensor): + """"Obtain the mean of tensor on different GPUs.""" + if not (dist.is_available() and dist.is_initialized()): + return tensor + tensor = tensor.clone() + dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM) + return tensor diff --git a/mmcv/core/utils/gaussian.py b/mmcv/core/utils/gaussian.py new file mode 100644 index 0000000..a07963e --- /dev/null +++ b/mmcv/core/utils/gaussian.py @@ -0,0 +1,86 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch + + +def gaussian_2d(shape, sigma=1): + """Generate gaussian map. + + Args: + shape (list[int]): Shape of the map. + sigma (float): Sigma to generate gaussian map. + Defaults to 1. + + Returns: + np.ndarray: Generated gaussian map. + """ + m, n = [(ss - 1.) / 2. for ss in shape] + y, x = np.ogrid[-m:m + 1, -n:n + 1] + + h = np.exp(-(x * x + y * y) / (2 * sigma * sigma)) + h[h < np.finfo(h.dtype).eps * h.max()] = 0 + return h + + +def draw_heatmap_gaussian(heatmap, center, radius, k=1): + """Get gaussian masked heatmap. + + Args: + heatmap (torch.Tensor): Heatmap to be masked. + center (torch.Tensor): Center coord of the heatmap. + radius (int): Radius of gausian. + K (int): Multiple of masked_gaussian. Defaults to 1. + + Returns: + torch.Tensor: Masked heatmap. + """ + diameter = 2 * radius + 1 + gaussian = gaussian_2d((diameter, diameter), sigma=diameter / 6) + + x, y = int(center[0]), int(center[1]) + + height, width = heatmap.shape[0:2] + + left, right = min(x, radius), min(width - x, radius + 1) + top, bottom = min(y, radius), min(height - y, radius + 1) + + masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right] + masked_gaussian = torch.from_numpy( + gaussian[radius - top:radius + bottom, + radius - left:radius + right]).to(heatmap.device, + torch.float32) + if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: + torch.max(masked_heatmap, masked_gaussian * k, out=masked_heatmap) + return heatmap + + +def gaussian_radius(det_size, min_overlap=0.5): + """Get radius of gaussian. + + Args: + det_size (tuple[torch.Tensor]): Size of the detection result. + min_overlap (float): Gaussian_overlap. Defaults to 0.5. + + Returns: + torch.Tensor: Computed radius. + """ + height, width = det_size + + a1 = 1 + b1 = (height + width) + c1 = width * height * (1 - min_overlap) / (1 + min_overlap) + sq1 = torch.sqrt(b1**2 - 4 * a1 * c1) + r1 = (b1 + sq1) / 2 + + a2 = 4 + b2 = 2 * (height + width) + c2 = (1 - min_overlap) * width * height + sq2 = torch.sqrt(b2**2 - 4 * a2 * c2) + r2 = (b2 + sq2) / 2 + + a3 = 4 * min_overlap + b3 = -2 * min_overlap * (height + width) + c3 = (min_overlap - 1) * width * height + sq3 = torch.sqrt(b3**2 - 4 * a3 * c3) + r3 = (b3 + sq3) / 2 + return min(r1, r2, r3) diff --git a/mmcv/core/utils/misc.py b/mmcv/core/utils/misc.py new file mode 100644 index 0000000..52e1897 --- /dev/null +++ b/mmcv/core/utils/misc.py @@ -0,0 +1,102 @@ +from functools import partial + +import numpy as np +import torch +from six.moves import map, zip + +from ..mask.structures import BitmapMasks, PolygonMasks + + +def multi_apply(func, *args, **kwargs): + """Apply function to a list of arguments. + + Note: + This function applies the ``func`` to multiple inputs and + map the multiple outputs of the ``func`` into different + list. Each list contains the same type of outputs corresponding + to different inputs. + + Args: + func (Function): A function that will be applied to a list of + arguments + + Returns: + tuple(list): A tuple containing multiple list, each list contains \ + a kind of returned results by the function + """ + pfunc = partial(func, **kwargs) if kwargs else func + map_results = map(pfunc, *args) + return tuple(map(list, zip(*map_results))) + + +def unmap(data, count, inds, fill=0): + """Unmap a subset of item (data) back to the original set of items (of size + count)""" + if data.dim() == 1: + ret = data.new_full((count, ), fill) + ret[inds.type(torch.bool)] = data + else: + new_size = (count, ) + data.size()[1:] + ret = data.new_full(new_size, fill) + ret[inds.type(torch.bool), :] = data + return ret + + +def mask2ndarray(mask): + """Convert Mask to ndarray.. + + Args: + mask (:obj:`BitmapMasks` or :obj:`PolygonMasks` or + torch.Tensor or np.ndarray): The mask to be converted. + + Returns: + np.ndarray: Ndarray mask of shape (n, h, w) that has been converted + """ + if isinstance(mask, (BitmapMasks, PolygonMasks)): + mask = mask.to_ndarray() + elif isinstance(mask, torch.Tensor): + mask = mask.detach().cpu().numpy() + elif not isinstance(mask, np.ndarray): + raise TypeError(f'Unsupported {type(mask)} data type') + return mask + + +def flip_tensor(src_tensor, flip_direction): + """flip tensor base on flip_direction. + + Args: + src_tensor (Tensor): input feature map, shape (B, C, H, W). + flip_direction (str): The flipping direction. Options are + 'horizontal', 'vertical', 'diagonal'. + + Returns: + out_tensor (Tensor): Flipped tensor. + """ + assert src_tensor.ndim == 4 + valid_directions = ['horizontal', 'vertical', 'diagonal'] + assert flip_direction in valid_directions + if flip_direction == 'horizontal': + out_tensor = torch.flip(src_tensor, [3]) + elif flip_direction == 'vertical': + out_tensor = torch.flip(src_tensor, [2]) + else: + out_tensor = torch.flip(src_tensor, [2, 3]) + return out_tensor + +def add_prefix(inputs, prefix): + """Add prefix for dict. + + Args: + inputs (dict): The input dict with str keys. + prefix (str): The prefix to add. + + Returns: + + dict: The dict with keys updated with ``prefix``. + """ + + outputs = dict() + for name, value in inputs.items(): + outputs[f'{prefix}.{name}'] = value + + return outputs diff --git a/mmcv/core/visualization/__init__.py b/mmcv/core/visualization/__init__.py new file mode 100644 index 0000000..af64628 --- /dev/null +++ b/mmcv/core/visualization/__init__.py @@ -0,0 +1,4 @@ +from .image import (color_val_matplotlib, imshow_det_bboxes, + imshow_gt_det_bboxes, imshow, color_val) + +__all__ = ['imshow_det_bboxes', 'imshow_gt_det_bboxes', 'color_val_matplotlib'] diff --git a/mmcv/core/visualization/image.py b/mmcv/core/visualization/image.py new file mode 100644 index 0000000..a001853 --- /dev/null +++ b/mmcv/core/visualization/image.py @@ -0,0 +1,372 @@ +import matplotlib.pyplot as plt +import numpy as np +import cv2 +import pycocotools.mask as mask_util +from matplotlib.collections import PatchCollection +from matplotlib.patches import Polygon +from enum import Enum +from mmcv.utils import concat_list, is_str +from mmcv.image import imread, imwrite +from mmcv.image import bgr2rgb, rgb2bgr +from ..utils import mask2ndarray + +EPS = 1e-2 + + +class Color(Enum): + """An enum that defines common colors. + + Contains red, green, blue, cyan, yellow, magenta, white and black. + """ + red = (0, 0, 255) + green = (0, 255, 0) + blue = (255, 0, 0) + cyan = (255, 255, 0) + yellow = (0, 255, 255) + magenta = (255, 0, 255) + white = (255, 255, 255) + black = (0, 0, 0) + + +def color_val(color): + """Convert various input to color tuples. + + Args: + color (:obj:`Color`/str/tuple/int/ndarray): Color inputs + + Returns: + tuple[int]: A tuple of 3 integers indicating BGR channels. + """ + if is_str(color): + return Color[color].value + elif isinstance(color, Color): + return color.value + elif isinstance(color, tuple): + assert len(color) == 3 + for channel in color: + assert 0 <= channel <= 255 + return color + elif isinstance(color, int): + assert 0 <= color <= 255 + return color, color, color + elif isinstance(color, np.ndarray): + assert color.ndim == 1 and color.size == 3 + assert np.all((color >= 0) & (color <= 255)) + color = color.astype(np.uint8) + return tuple(color) + else: + raise TypeError(f'Invalid type for color: {type(color)}') + + + +def color_val_matplotlib(color): + """Convert various input in BGR order to normalized RGB matplotlib color + tuples, + + Args: + color (:obj:`Color`/str/tuple/int/ndarray): Color inputs + + Returns: + tuple[float]: A tuple of 3 normalized floats indicating RGB channels. + """ + color = color_val(color) + color = [color / 255 for color in color[::-1]] + return tuple(color) + +def imshow(img, win_name='', wait_time=0): + """Show an image. + + Args: + img (str or ndarray): The image to be displayed. + win_name (str): The window name. + wait_time (int): Value of waitKey param. + """ + cv2.imshow(win_name, imread(img)) + if wait_time == 0: # prevent from hanging if windows was closed + while True: + ret = cv2.waitKey(1) + + closed = cv2.getWindowProperty(win_name, cv2.WND_PROP_VISIBLE) < 1 + # if user closed window or if some key pressed + if closed or ret != -1: + break + else: + ret = cv2.waitKey(wait_time) + + +def imshow_det_bboxes(img, + bboxes, + labels, + segms=None, + class_names=None, + score_thr=0, + bbox_color='green', + text_color='green', + mask_color=None, + thickness=2, + font_size=13, + win_name='', + show=True, + wait_time=0, + out_file=None): + """Draw bboxes and class labels (with scores) on an image. + + Args: + img (str or ndarray): The image to be displayed. + bboxes (ndarray): Bounding boxes (with scores), shaped (n, 4) or + (n, 5). + labels (ndarray): Labels of bboxes. + segms (ndarray or None): Masks, shaped (n,h,w) or None + class_names (list[str]): Names of each classes. + score_thr (float): Minimum score of bboxes to be shown. Default: 0 + bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines. + The tuple of color should be in BGR order. Default: 'green' + text_color (str or tuple(int) or :obj:`Color`):Color of texts. + The tuple of color should be in BGR order. Default: 'green' + mask_color (str or tuple(int) or :obj:`Color`, optional): + Color of masks. The tuple of color should be in BGR order. + Default: None + thickness (int): Thickness of lines. Default: 2 + font_size (int): Font size of texts. Default: 13 + show (bool): Whether to show the image. Default: True + win_name (str): The window name. Default: '' + wait_time (float): Value of waitKey param. Default: 0. + out_file (str, optional): The filename to write the image. + Default: None + + Returns: + ndarray: The image with bboxes drawn on it. + """ + assert bboxes.ndim == 2, \ + f' bboxes ndim should be 2, but its ndim is {bboxes.ndim}.' + assert labels.ndim == 1, \ + f' labels ndim should be 1, but its ndim is {labels.ndim}.' + assert bboxes.shape[0] == labels.shape[0], \ + 'bboxes.shape[0] and labels.shape[0] should have the same length.' + assert bboxes.shape[1] == 4 or bboxes.shape[1] == 5, \ + f' bboxes.shape[1] should be 4 or 5, but its {bboxes.shape[1]}.' + img = imread(img).astype(np.uint8) + + if score_thr > 0: + assert bboxes.shape[1] == 5 + scores = bboxes[:, -1] + inds = scores > score_thr + bboxes = bboxes[inds, :] + labels = labels[inds] + if segms is not None: + segms = segms[inds, ...] + + mask_colors = [] + if labels.shape[0] > 0: + if mask_color is None: + # random color + np.random.seed(42) + mask_colors = [ + np.random.randint(0, 256, (1, 3), dtype=np.uint8) + for _ in range(max(labels) + 1) + ] + else: + # specify color + mask_colors = [ + np.array(color_val(mask_color)[::-1], dtype=np.uint8) + ] * ( + max(labels) + 1) + + bbox_color = color_val_matplotlib(bbox_color) + text_color = color_val_matplotlib(text_color) + + img = bgr2rgb(img) + width, height = img.shape[1], img.shape[0] + img = np.ascontiguousarray(img) + + fig = plt.figure(win_name, frameon=False) + plt.title(win_name) + canvas = fig.canvas + dpi = fig.get_dpi() + # add a small EPS to avoid precision lost due to matplotlib's truncation + # (https://github.com/matplotlib/matplotlib/issues/15363) + fig.set_size_inches((width + EPS) / dpi, (height + EPS) / dpi) + + # remove white edges by set subplot margin + plt.subplots_adjust(left=0, right=1, bottom=0, top=1) + ax = plt.gca() + ax.axis('off') + + polygons = [] + color = [] + for i, (bbox, label) in enumerate(zip(bboxes, labels)): + bbox_int = bbox.astype(np.int32) + poly = [[bbox_int[0], bbox_int[1]], [bbox_int[0], bbox_int[3]], + [bbox_int[2], bbox_int[3]], [bbox_int[2], bbox_int[1]]] + np_poly = np.array(poly).reshape((4, 2)) + polygons.append(Polygon(np_poly)) + color.append(bbox_color) + label_text = class_names[ + label] if class_names is not None else f'class {label}' + if len(bbox) > 4: + label_text += f'|{bbox[-1]:.02f}' + ax.text( + bbox_int[0], + bbox_int[1], + f'{label_text}', + bbox={ + 'facecolor': 'black', + 'alpha': 0.8, + 'pad': 0.7, + 'edgecolor': 'none' + }, + color=text_color, + fontsize=font_size, + verticalalignment='top', + horizontalalignment='left') + if segms is not None: + color_mask = mask_colors[labels[i]] + mask = segms[i].astype(bool) + img[mask] = img[mask] * 0.5 + color_mask * 0.5 + + plt.imshow(img) + + p = PatchCollection( + polygons, facecolor='none', edgecolors=color, linewidths=thickness) + ax.add_collection(p) + + stream, _ = canvas.print_to_buffer() + buffer = np.frombuffer(stream, dtype='uint8') + img_rgba = buffer.reshape(height, width, 4) + rgb, alpha = np.split(img_rgba, [3], axis=2) + img = rgb.astype('uint8') + img = rgb2bgr(img) + + if show: + # We do not use cv2 for display because in some cases, opencv will + # conflict with Qt, it will output a warning: Current thread + # is not the object's thread. You can refer to + # https://github.com/opencv/opencv-python/issues/46 for details + if wait_time == 0: + plt.show() + else: + plt.show(block=False) + plt.pause(wait_time) + if out_file is not None: + imwrite(img, out_file) + + plt.close() + + return img + + +def imshow_gt_det_bboxes(img, + annotation, + result, + class_names=None, + score_thr=0, + gt_bbox_color=(255, 102, 61), + gt_text_color=(255, 102, 61), + gt_mask_color=(255, 102, 61), + det_bbox_color=(72, 101, 241), + det_text_color=(72, 101, 241), + det_mask_color=(72, 101, 241), + thickness=2, + font_size=13, + win_name='', + show=True, + wait_time=0, + out_file=None): + """General visualization GT and result function. + + Args: + img (str or ndarray): The image to be displayed.) + annotation (dict): Ground truth annotations where contain keys of + 'gt_bboxes' and 'gt_labels' or 'gt_masks' + result (tuple[list] or list): The detection result, can be either + (bbox, segm) or just bbox. + class_names (list[str]): Names of each classes. + score_thr (float): Minimum score of bboxes to be shown. Default: 0 + gt_bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines. + The tuple of color should be in BGR order. Default: (255, 102, 61) + gt_text_color (str or tuple(int) or :obj:`Color`):Color of texts. + The tuple of color should be in BGR order. Default: (255, 102, 61) + gt_mask_color (str or tuple(int) or :obj:`Color`, optional): + Color of masks. The tuple of color should be in BGR order. + Default: (255, 102, 61) + det_bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines. + The tuple of color should be in BGR order. Default: (72, 101, 241) + det_text_color (str or tuple(int) or :obj:`Color`):Color of texts. + The tuple of color should be in BGR order. Default: (72, 101, 241) + det_mask_color (str or tuple(int) or :obj:`Color`, optional): + Color of masks. The tuple of color should be in BGR order. + Default: (72, 101, 241) + thickness (int): Thickness of lines. Default: 2 + font_size (int): Font size of texts. Default: 13 + win_name (str): The window name. Default: '' + show (bool): Whether to show the image. Default: True + wait_time (float): Value of waitKey param. Default: 0. + out_file (str, optional): The filename to write the image. + Default: None + + Returns: + ndarray: The image with bboxes or masks drawn on it. + """ + assert 'gt_bboxes' in annotation + assert 'gt_labels' in annotation + assert isinstance( + result, + (tuple, list)), f'Expected tuple or list, but get {type(result)}' + + gt_masks = annotation.get('gt_masks', None) + if gt_masks is not None: + gt_masks = mask2ndarray(gt_masks) + + img = imread(img) + + img = imshow_det_bboxes( + img, + annotation['gt_bboxes'], + annotation['gt_labels'], + gt_masks, + class_names=class_names, + bbox_color=gt_bbox_color, + text_color=gt_text_color, + mask_color=gt_mask_color, + thickness=thickness, + font_size=font_size, + win_name=win_name, + show=False) + + if isinstance(result, tuple): + bbox_result, segm_result = result + if isinstance(segm_result, tuple): + segm_result = segm_result[0] # ms rcnn + else: + bbox_result, segm_result = result, None + + bboxes = np.vstack(bbox_result) + labels = [ + np.full(bbox.shape[0], i, dtype=np.int32) + for i, bbox in enumerate(bbox_result) + ] + labels = np.concatenate(labels) + + segms = None + if segm_result is not None and len(labels) > 0: # non empty + segms = concat_list(segm_result) + segms = mask_util.decode(segms) + segms = segms.transpose(2, 0, 1) + + img = imshow_det_bboxes( + img, + bboxes, + labels, + segms=segms, + class_names=class_names, + score_thr=score_thr, + bbox_color=det_bbox_color, + text_color=det_text_color, + mask_color=det_mask_color, + thickness=thickness, + font_size=font_size, + win_name=win_name, + show=show, + wait_time=wait_time, + out_file=out_file) + return img diff --git a/mmcv/core/visualizer/__init__.py b/mmcv/core/visualizer/__init__.py new file mode 100644 index 0000000..bbf1e60 --- /dev/null +++ b/mmcv/core/visualizer/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .show_result import (show_multi_modality_result, show_result, + show_seg_result) + +__all__ = ['show_result', 'show_seg_result', 'show_multi_modality_result'] diff --git a/mmcv/core/visualizer/image_vis.py b/mmcv/core/visualizer/image_vis.py new file mode 100644 index 0000000..60034f1 --- /dev/null +++ b/mmcv/core/visualizer/image_vis.py @@ -0,0 +1,198 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import cv2 +import numpy as np +import torch +from matplotlib import pyplot as plt + + +def project_pts_on_img(points, + raw_img, + lidar2img_rt, + max_distance=70, + thickness=-1): + """Project the 3D points cloud on 2D image. + + Args: + points (numpy.array): 3D points cloud (x, y, z) to visualize. + raw_img (numpy.array): The numpy array of image. + lidar2img_rt (numpy.array, shape=[4, 4]): The projection matrix + according to the camera intrinsic parameters. + max_distance (float): the max distance of the points cloud. + Default: 70. + thickness (int, optional): The thickness of 2D points. Default: -1. + """ + img = raw_img.copy() + num_points = points.shape[0] + pts_4d = np.concatenate([points[:, :3], np.ones((num_points, 1))], axis=-1) + pts_2d = pts_4d @ lidar2img_rt.T + + # cam_points is Tensor of Nx4 whose last column is 1 + # transform camera coordinate to image coordinate + pts_2d[:, 2] = np.clip(pts_2d[:, 2], a_min=1e-5, a_max=99999) + pts_2d[:, 0] /= pts_2d[:, 2] + pts_2d[:, 1] /= pts_2d[:, 2] + + fov_inds = ((pts_2d[:, 0] < img.shape[1]) + & (pts_2d[:, 0] >= 0) + & (pts_2d[:, 1] < img.shape[0]) + & (pts_2d[:, 1] >= 0)) + + imgfov_pts_2d = pts_2d[fov_inds, :3] # u, v, d + + cmap = plt.cm.get_cmap('hsv', 256) + cmap = np.array([cmap(i) for i in range(256)])[:, :3] * 255 + for i in range(imgfov_pts_2d.shape[0]): + depth = imgfov_pts_2d[i, 2] + color = cmap[np.clip(int(max_distance * 10 / depth), 0, 255), :] + cv2.circle( + img, + center=(int(np.round(imgfov_pts_2d[i, 0])), + int(np.round(imgfov_pts_2d[i, 1]))), + radius=1, + color=tuple(color), + thickness=thickness, + ) + cv2.imshow('project_pts_img', img.astype(np.uint8)) + cv2.waitKey(100) + + +def plot_rect3d_on_img(img, + num_rects, + rect_corners, + color=(0, 255, 0), + thickness=1): + """Plot the boundary lines of 3D rectangular on 2D images. + + Args: + img (numpy.array): The numpy array of image. + num_rects (int): Number of 3D rectangulars. + rect_corners (numpy.array): Coordinates of the corners of 3D + rectangulars. Should be in the shape of [num_rect, 8, 2]. + color (tuple[int]): The color to draw bboxes. Default: (0, 255, 0). + thickness (int, optional): The thickness of bboxes. Default: 1. + """ + line_indices = ((0, 1), (0, 3), (0, 4), (1, 2), (1, 5), (3, 2), (3, 7), + (4, 5), (4, 7), (2, 6), (5, 6), (6, 7)) + for i in range(num_rects): + corners = rect_corners[i].astype(np.int) + for start, end in line_indices: + cv2.line(img, (corners[start, 0], corners[start, 1]), + (corners[end, 0], corners[end, 1]), color, thickness, + cv2.LINE_AA) + + return img.astype(np.uint8) + + +def draw_lidar_bbox3d_on_img(bboxes3d, + raw_img, + lidar2img_rt, + img_metas, + color=(0, 255, 0), + thickness=1): + """Project the 3D bbox on 2D plane and draw on input image. + + Args: + bboxes3d (:obj:`LiDARInstance3DBoxes`): + 3d bbox in lidar coordinate system to visualize. + raw_img (numpy.array): The numpy array of image. + lidar2img_rt (numpy.array, shape=[4, 4]): The projection matrix + according to the camera intrinsic parameters. + img_metas (dict): Useless here. + color (tuple[int]): The color to draw bboxes. Default: (0, 255, 0). + thickness (int, optional): The thickness of bboxes. Default: 1. + """ + img = raw_img.copy() + corners_3d = bboxes3d.corners + num_bbox = corners_3d.shape[0] + pts_4d = np.concatenate( + [corners_3d.reshape(-1, 3), + np.ones((num_bbox * 8, 1))], axis=-1) + lidar2img_rt = copy.deepcopy(lidar2img_rt).reshape(4, 4) + if isinstance(lidar2img_rt, torch.Tensor): + lidar2img_rt = lidar2img_rt.cpu().numpy() + pts_2d = pts_4d @ lidar2img_rt.T + + pts_2d[:, 2] = np.clip(pts_2d[:, 2], a_min=1e-5, a_max=1e5) + pts_2d[:, 0] /= pts_2d[:, 2] + pts_2d[:, 1] /= pts_2d[:, 2] + imgfov_pts_2d = pts_2d[..., :2].reshape(num_bbox, 8, 2) + + return plot_rect3d_on_img(img, num_bbox, imgfov_pts_2d, color, thickness) + + +# TODO: remove third parameter in all functions here in favour of img_metas +def draw_depth_bbox3d_on_img(bboxes3d, + raw_img, + calibs, + img_metas, + color=(0, 255, 0), + thickness=1): + """Project the 3D bbox on 2D plane and draw on input image. + + Args: + bboxes3d (:obj:`DepthInstance3DBoxes`, shape=[M, 7]): + 3d bbox in depth coordinate system to visualize. + raw_img (numpy.array): The numpy array of image. + calibs (dict): Camera calibration information, Rt and K. + img_metas (dict): Used in coordinates transformation. + color (tuple[int]): The color to draw bboxes. Default: (0, 255, 0). + thickness (int, optional): The thickness of bboxes. Default: 1. + """ + from mmcv.core.bbox import points_cam2img + from mmcv.models import apply_3d_transformation + + img = raw_img.copy() + img_metas = copy.deepcopy(img_metas) + corners_3d = bboxes3d.corners + num_bbox = corners_3d.shape[0] + points_3d = corners_3d.reshape(-1, 3) + + # first reverse the data transformations + xyz_depth = apply_3d_transformation( + points_3d, 'DEPTH', img_metas, reverse=True) + + # project to 2d to get image coords (uv) + uv_origin = points_cam2img(xyz_depth, + xyz_depth.new_tensor(img_metas['depth2img'])) + uv_origin = (uv_origin - 1).round() + imgfov_pts_2d = uv_origin[..., :2].reshape(num_bbox, 8, 2).numpy() + + return plot_rect3d_on_img(img, num_bbox, imgfov_pts_2d, color, thickness) + + +def draw_camera_bbox3d_on_img(bboxes3d, + raw_img, + cam2img, + img_metas, + color=(0, 255, 0), + thickness=1): + """Project the 3D bbox on 2D plane and draw on input image. + + Args: + bboxes3d (:obj:`CameraInstance3DBoxes`, shape=[M, 7]): + 3d bbox in camera coordinate system to visualize. + raw_img (numpy.array): The numpy array of image. + cam2img (dict): Camera intrinsic matrix, + denoted as `K` in depth bbox coordinate system. + img_metas (dict): Useless here. + color (tuple[int]): The color to draw bboxes. Default: (0, 255, 0). + thickness (int, optional): The thickness of bboxes. Default: 1. + """ + from mmcv.core.bbox import points_cam2img + + img = raw_img.copy() + cam2img = copy.deepcopy(cam2img) + corners_3d = bboxes3d.corners + num_bbox = corners_3d.shape[0] + points_3d = corners_3d.reshape(-1, 3) + if not isinstance(cam2img, torch.Tensor): + cam2img = torch.from_numpy(np.array(cam2img)) + cam2img = cam2img.reshape(3, 3).float().cpu() + + # project to 2d to get image coords (uv) + uv_origin = points_cam2img(points_3d, cam2img) + uv_origin = (uv_origin - 1).round() + imgfov_pts_2d = uv_origin[..., :2].reshape(num_bbox, 8, 2).numpy() + + return plot_rect3d_on_img(img, num_bbox, imgfov_pts_2d, color, thickness) diff --git a/mmcv/core/visualizer/open3d_vis.py b/mmcv/core/visualizer/open3d_vis.py new file mode 100644 index 0000000..0790ee4 --- /dev/null +++ b/mmcv/core/visualizer/open3d_vis.py @@ -0,0 +1,443 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import numpy as np +import torch + +try: + import open3d as o3d + from open3d import geometry +except ImportError: + raise ImportError( + 'Please run "pip install open3d" to install open3d first.') + + +def _draw_points(points, + vis, + points_size=2, + point_color=(0.5, 0.5, 0.5), + mode='xyz'): + """Draw points on visualizer. + + Args: + points (numpy.array | torch.tensor, shape=[N, 3+C]): + points to visualize. + vis (:obj:`open3d.visualization.Visualizer`): open3d visualizer. + points_size (int): the size of points to show on visualizer. + Default: 2. + point_color (tuple[float]): the color of points. + Default: (0.5, 0.5, 0.5). + mode (str): indicate type of the input points, avaliable mode + ['xyz', 'xyzrgb']. Default: 'xyz'. + + Returns: + tuple: points, color of each point. + """ + vis.get_render_option().point_size = points_size # set points size + if isinstance(points, torch.Tensor): + points = points.cpu().numpy() + + points = points.copy() + pcd = geometry.PointCloud() + if mode == 'xyz': + pcd.points = o3d.utility.Vector3dVector(points[:, :3]) + points_colors = np.tile(np.array(point_color), (points.shape[0], 1)) + elif mode == 'xyzrgb': + pcd.points = o3d.utility.Vector3dVector(points[:, :3]) + points_colors = points[:, 3:6] + # normalize to [0, 1] for open3d drawing + if not ((points_colors >= 0.0) & (points_colors <= 1.0)).all(): + points_colors /= 255.0 + else: + raise NotImplementedError + + pcd.colors = o3d.utility.Vector3dVector(points_colors) + vis.add_geometry(pcd) + + return pcd, points_colors + + +def _draw_bboxes(bbox3d, + vis, + points_colors, + pcd=None, + bbox_color=(0, 1, 0), + points_in_box_color=(1, 0, 0), + rot_axis=2, + center_mode='lidar_bottom', + mode='xyz'): + """Draw bbox on visualizer and change the color of points inside bbox3d. + + Args: + bbox3d (numpy.array | torch.tensor, shape=[M, 7]): + 3d bbox (x, y, z, dx, dy, dz, yaw) to visualize. + vis (:obj:`open3d.visualization.Visualizer`): open3d visualizer. + points_colors (numpy.array): color of each points. + pcd (:obj:`open3d.geometry.PointCloud`): point cloud. Default: None. + bbox_color (tuple[float]): the color of bbox. Default: (0, 1, 0). + points_in_box_color (tuple[float]): + the color of points inside bbox3d. Default: (1, 0, 0). + rot_axis (int): rotation axis of bbox. Default: 2. + center_mode (bool): indicate the center of bbox is bottom center + or gravity center. avaliable mode + ['lidar_bottom', 'camera_bottom']. Default: 'lidar_bottom'. + mode (str): indicate type of the input points, avaliable mode + ['xyz', 'xyzrgb']. Default: 'xyz'. + """ + if isinstance(bbox3d, torch.Tensor): + bbox3d = bbox3d.cpu().numpy() + bbox3d = bbox3d.copy() + + in_box_color = np.array(points_in_box_color) + for i in range(len(bbox3d)): + center = bbox3d[i, 0:3] + dim = bbox3d[i, 3:6] + yaw = np.zeros(3) + yaw[rot_axis] = -bbox3d[i, 6] + rot_mat = geometry.get_rotation_matrix_from_xyz(yaw) + + if center_mode == 'lidar_bottom': + center[rot_axis] += dim[ + rot_axis] / 2 # bottom center to gravity center + elif center_mode == 'camera_bottom': + center[rot_axis] -= dim[ + rot_axis] / 2 # bottom center to gravity center + box3d = geometry.OrientedBoundingBox(center, rot_mat, dim) + + line_set = geometry.LineSet.create_from_oriented_bounding_box(box3d) + line_set.paint_uniform_color(bbox_color) + # draw bboxes on visualizer + vis.add_geometry(line_set) + + # change the color of points which are in box + if pcd is not None and mode == 'xyz': + indices = box3d.get_point_indices_within_bounding_box(pcd.points) + points_colors[indices] = in_box_color + + # update points colors + if pcd is not None: + pcd.colors = o3d.utility.Vector3dVector(points_colors) + vis.update_geometry(pcd) + + +def show_pts_boxes(points, + bbox3d=None, + show=True, + save_path=None, + points_size=2, + point_color=(0.5, 0.5, 0.5), + bbox_color=(0, 1, 0), + points_in_box_color=(1, 0, 0), + rot_axis=2, + center_mode='lidar_bottom', + mode='xyz'): + """Draw bbox and points on visualizer. + + Args: + points (numpy.array | torch.tensor, shape=[N, 3+C]): + points to visualize. + bbox3d (numpy.array | torch.tensor, shape=[M, 7]): + 3d bbox (x, y, z, dx, dy, dz, yaw) to visualize. Default: None. + show (bool): whether to show the visualization results. Default: True. + save_path (str): path to save visualized results. Default: None. + points_size (int): the size of points to show on visualizer. + Default: 2. + point_color (tuple[float]): the color of points. + Default: (0.5, 0.5, 0.5). + bbox_color (tuple[float]): the color of bbox. Default: (0, 1, 0). + points_in_box_color (tuple[float]): + the color of points which are in bbox3d. Default: (1, 0, 0). + rot_axis (int): rotation axis of bbox. Default: 2. + center_mode (bool): indicate the center of bbox is bottom center + or gravity center. avaliable mode + ['lidar_bottom', 'camera_bottom']. Default: 'lidar_bottom'. + mode (str): indicate type of the input points, avaliable mode + ['xyz', 'xyzrgb']. Default: 'xyz'. + """ + # TODO: support score and class info + assert 0 <= rot_axis <= 2 + + # init visualizer + vis = o3d.visualization.Visualizer() + vis.create_window() + mesh_frame = geometry.TriangleMesh.create_coordinate_frame( + size=1, origin=[0, 0, 0]) # create coordinate frame + vis.add_geometry(mesh_frame) + + # draw points + pcd, points_colors = _draw_points(points, vis, points_size, point_color, + mode) + + # draw boxes + if bbox3d is not None: + _draw_bboxes(bbox3d, vis, points_colors, pcd, bbox_color, + points_in_box_color, rot_axis, center_mode, mode) + + if show: + vis.run() + + if save_path is not None: + vis.capture_screen_image(save_path) + + vis.destroy_window() + + +def _draw_bboxes_ind(bbox3d, + vis, + indices, + points_colors, + pcd=None, + bbox_color=(0, 1, 0), + points_in_box_color=(1, 0, 0), + rot_axis=2, + center_mode='lidar_bottom', + mode='xyz'): + """Draw bbox on visualizer and change the color or points inside bbox3d + with indices. + + Args: + bbox3d (numpy.array | torch.tensor, shape=[M, 7]): + 3d bbox (x, y, z, dx, dy, dz, yaw) to visualize. + vis (:obj:`open3d.visualization.Visualizer`): open3d visualizer. + indices (numpy.array | torch.tensor, shape=[N, M]): + indicate which bbox3d that each point lies in. + points_colors (numpy.array): color of each points. + pcd (:obj:`open3d.geometry.PointCloud`): point cloud. Default: None. + bbox_color (tuple[float]): the color of bbox. Default: (0, 1, 0). + points_in_box_color (tuple[float]): + the color of points which are in bbox3d. Default: (1, 0, 0). + rot_axis (int): rotation axis of bbox. Default: 2. + center_mode (bool): indicate the center of bbox is bottom center + or gravity center. avaliable mode + ['lidar_bottom', 'camera_bottom']. Default: 'lidar_bottom'. + mode (str): indicate type of the input points, avaliable mode + ['xyz', 'xyzrgb']. Default: 'xyz'. + """ + if isinstance(bbox3d, torch.Tensor): + bbox3d = bbox3d.cpu().numpy() + if isinstance(indices, torch.Tensor): + indices = indices.cpu().numpy() + bbox3d = bbox3d.copy() + + in_box_color = np.array(points_in_box_color) + for i in range(len(bbox3d)): + center = bbox3d[i, 0:3] + dim = bbox3d[i, 3:6] + yaw = np.zeros(3) + # TODO: fix problem of current coordinate system + # dim[0], dim[1] = dim[1], dim[0] # for current coordinate + # yaw[rot_axis] = -(bbox3d[i, 6] - 0.5 * np.pi) + yaw[rot_axis] = -bbox3d[i, 6] + rot_mat = geometry.get_rotation_matrix_from_xyz(yaw) + if center_mode == 'lidar_bottom': + center[rot_axis] += dim[ + rot_axis] / 2 # bottom center to gravity center + elif center_mode == 'camera_bottom': + center[rot_axis] -= dim[ + rot_axis] / 2 # bottom center to gravity center + box3d = geometry.OrientedBoundingBox(center, rot_mat, dim) + + line_set = geometry.LineSet.create_from_oriented_bounding_box(box3d) + line_set.paint_uniform_color(bbox_color) + # draw bboxes on visualizer + vis.add_geometry(line_set) + + # change the color of points which are in box + if pcd is not None and mode == 'xyz': + points_colors[indices[:, i].astype(np.bool)] = in_box_color + + # update points colors + if pcd is not None: + pcd.colors = o3d.utility.Vector3dVector(points_colors) + vis.update_geometry(pcd) + + +def show_pts_index_boxes(points, + bbox3d=None, + show=True, + indices=None, + save_path=None, + points_size=2, + point_color=(0.5, 0.5, 0.5), + bbox_color=(0, 1, 0), + points_in_box_color=(1, 0, 0), + rot_axis=2, + center_mode='lidar_bottom', + mode='xyz'): + """Draw bbox and points on visualizer with indices that indicate which + bbox3d that each point lies in. + + Args: + points (numpy.array | torch.tensor, shape=[N, 3+C]): + points to visualize. + bbox3d (numpy.array | torch.tensor, shape=[M, 7]): + 3d bbox (x, y, z, dx, dy, dz, yaw) to visualize. Default: None. + show (bool): whether to show the visualization results. Default: True. + indices (numpy.array | torch.tensor, shape=[N, M]): + indicate which bbox3d that each point lies in. Default: None. + save_path (str): path to save visualized results. Default: None. + points_size (int): the size of points to show on visualizer. + Default: 2. + point_color (tuple[float]): the color of points. + Default: (0.5, 0.5, 0.5). + bbox_color (tuple[float]): the color of bbox. Default: (0, 1, 0). + points_in_box_color (tuple[float]): + the color of points which are in bbox3d. Default: (1, 0, 0). + rot_axis (int): rotation axis of bbox. Default: 2. + center_mode (bool): indicate the center of bbox is bottom center + or gravity center. avaliable mode + ['lidar_bottom', 'camera_bottom']. Default: 'lidar_bottom'. + mode (str): indicate type of the input points, avaliable mode + ['xyz', 'xyzrgb']. Default: 'xyz'. + """ + # TODO: support score and class info + assert 0 <= rot_axis <= 2 + + # init visualizer + vis = o3d.visualization.Visualizer() + vis.create_window() + mesh_frame = geometry.TriangleMesh.create_coordinate_frame( + size=1, origin=[0, 0, 0]) # create coordinate frame + vis.add_geometry(mesh_frame) + + # draw points + pcd, points_colors = _draw_points(points, vis, points_size, point_color, + mode) + + # draw boxes + if bbox3d is not None: + _draw_bboxes_ind(bbox3d, vis, indices, points_colors, pcd, bbox_color, + points_in_box_color, rot_axis, center_mode, mode) + + if show: + vis.run() + + if save_path is not None: + vis.capture_screen_image(save_path) + + vis.destroy_window() + + +class Visualizer(object): + r"""Online visualizer implemented with Open3d. + + Args: + points (numpy.array, shape=[N, 3+C]): Points to visualize. The Points + cloud is in mode of Coord3DMode.DEPTH (please refer to + core.structures.coord_3d_mode). + bbox3d (numpy.array, shape=[M, 7]): 3d bbox (x, y, z, dx, dy, dz, yaw) + to visualize. The 3d bbox is in mode of Box3DMode.DEPTH with + gravity_center (please refer to core.structures.box_3d_mode). + Default: None. + save_path (str): path to save visualized results. Default: None. + points_size (int): the size of points to show on visualizer. + Default: 2. + point_color (tuple[float]): the color of points. + Default: (0.5, 0.5, 0.5). + bbox_color (tuple[float]): the color of bbox. Default: (0, 1, 0). + points_in_box_color (tuple[float]): + the color of points which are in bbox3d. Default: (1, 0, 0). + rot_axis (int): rotation axis of bbox. Default: 2. + center_mode (bool): indicate the center of bbox is bottom center + or gravity center. avaliable mode + ['lidar_bottom', 'camera_bottom']. Default: 'lidar_bottom'. + mode (str): indicate type of the input points, avaliable mode + ['xyz', 'xyzrgb']. Default: 'xyz'. + """ + + def __init__(self, + points, + bbox3d=None, + save_path=None, + points_size=2, + point_color=(0.5, 0.5, 0.5), + bbox_color=(0, 1, 0), + points_in_box_color=(1, 0, 0), + rot_axis=2, + center_mode='lidar_bottom', + mode='xyz'): + super(Visualizer, self).__init__() + assert 0 <= rot_axis <= 2 + + # init visualizer + self.o3d_visualizer = o3d.visualization.Visualizer() + self.o3d_visualizer.create_window() + mesh_frame = geometry.TriangleMesh.create_coordinate_frame( + size=1, origin=[0, 0, 0]) # create coordinate frame + self.o3d_visualizer.add_geometry(mesh_frame) + + self.points_size = points_size + self.point_color = point_color + self.bbox_color = bbox_color + self.points_in_box_color = points_in_box_color + self.rot_axis = rot_axis + self.center_mode = center_mode + self.mode = mode + self.seg_num = 0 + + # draw points + if points is not None: + self.pcd, self.points_colors = _draw_points( + points, self.o3d_visualizer, points_size, point_color, mode) + + # draw boxes + if bbox3d is not None: + _draw_bboxes(bbox3d, self.o3d_visualizer, self.points_colors, + self.pcd, bbox_color, points_in_box_color, rot_axis, + center_mode, mode) + + def add_bboxes(self, bbox3d, bbox_color=None, points_in_box_color=None): + """Add bounding box to visualizer. + + Args: + bbox3d (numpy.array, shape=[M, 7]): + 3D bbox (x, y, z, dx, dy, dz, yaw) to be visualized. + The 3d bbox is in mode of Box3DMode.DEPTH with + gravity_center (please refer to core.structures.box_3d_mode). + bbox_color (tuple[float]): the color of bbox. Defaule: None. + points_in_box_color (tuple[float]): the color of points which + are in bbox3d. Defaule: None. + """ + if bbox_color is None: + bbox_color = self.bbox_color + if points_in_box_color is None: + points_in_box_color = self.points_in_box_color + _draw_bboxes(bbox3d, self.o3d_visualizer, self.points_colors, self.pcd, + bbox_color, points_in_box_color, self.rot_axis, + self.center_mode, self.mode) + + def add_seg_mask(self, seg_mask_colors): + """Add segmentation mask to visualizer via per-point colorization. + + Args: + seg_mask_colors (numpy.array, shape=[N, 6]): + The segmentation mask whose first 3 dims are point coordinates + and last 3 dims are converted colors. + """ + # we can't draw the colors on existing points + # in case gt and pred mask would overlap + # instead we set a large offset along x-axis for each seg mask + self.seg_num += 1 + offset = (np.array(self.pcd.points).max(0) - + np.array(self.pcd.points).min(0))[0] * 1.2 * self.seg_num + mesh_frame = geometry.TriangleMesh.create_coordinate_frame( + size=1, origin=[offset, 0, 0]) # create coordinate frame for seg + self.o3d_visualizer.add_geometry(mesh_frame) + seg_points = copy.deepcopy(seg_mask_colors) + seg_points[:, 0] += offset + _draw_points( + seg_points, self.o3d_visualizer, self.points_size, mode='xyzrgb') + + def show(self, save_path=None): + """Visualize the points cloud. + + Args: + save_path (str): path to save image. Default: None. + """ + + self.o3d_visualizer.run() + + if save_path is not None: + self.o3d_visualizer.capture_screen_image(save_path) + + self.o3d_visualizer.destroy_window() + return diff --git a/mmcv/core/visualizer/show_result.py b/mmcv/core/visualizer/show_result.py new file mode 100644 index 0000000..eb50be1 --- /dev/null +++ b/mmcv/core/visualizer/show_result.py @@ -0,0 +1,272 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import trimesh +from os import path as osp + +from mmcv.utils import mkdir_or_exist +from mmcv.image import imwrite + +from .image_vis import (draw_camera_bbox3d_on_img, draw_depth_bbox3d_on_img, + draw_lidar_bbox3d_on_img) + + +def _write_obj(points, out_filename): + """Write points into ``obj`` format for meshlab visualization. + + Args: + points (np.ndarray): Points in shape (N, dim). + out_filename (str): Filename to be saved. + """ + N = points.shape[0] + fout = open(out_filename, 'w') + for i in range(N): + if points.shape[1] == 6: + c = points[i, 3:].astype(int) + fout.write( + 'v %f %f %f %d %d %d\n' % + (points[i, 0], points[i, 1], points[i, 2], c[0], c[1], c[2])) + + else: + fout.write('v %f %f %f\n' % + (points[i, 0], points[i, 1], points[i, 2])) + fout.close() + + +def _write_oriented_bbox(scene_bbox, out_filename): + """Export oriented (around Z axis) scene bbox to meshes. + + Args: + scene_bbox(list[ndarray] or ndarray): xyz pos of center and + 3 lengths (dx,dy,dz) and heading angle around Z axis. + Y forward, X right, Z upward. heading angle of positive X is 0, + heading angle of positive Y is 90 degrees. + out_filename(str): Filename. + """ + + def heading2rotmat(heading_angle): + rotmat = np.zeros((3, 3)) + rotmat[2, 2] = 1 + cosval = np.cos(heading_angle) + sinval = np.sin(heading_angle) + rotmat[0:2, 0:2] = np.array([[cosval, -sinval], [sinval, cosval]]) + return rotmat + + def convert_oriented_box_to_trimesh_fmt(box): + ctr = box[:3] + lengths = box[3:6] + trns = np.eye(4) + trns[0:3, 3] = ctr + trns[3, 3] = 1.0 + trns[0:3, 0:3] = heading2rotmat(box[6]) + box_trimesh_fmt = trimesh.creation.box(lengths, trns) + return box_trimesh_fmt + + if len(scene_bbox) == 0: + scene_bbox = np.zeros((1, 7)) + scene = trimesh.scene.Scene() + for box in scene_bbox: + scene.add_geometry(convert_oriented_box_to_trimesh_fmt(box)) + + mesh_list = trimesh.util.concatenate(scene.dump()) + # save to obj file + trimesh.io.export.export_mesh(mesh_list, out_filename, file_type='obj') + + return + + +def show_result(points, + gt_bboxes, + pred_bboxes, + out_dir, + filename, + show=True, + snapshot=False): + """Convert results into format that is directly readable for meshlab. + + Args: + points (np.ndarray): Points. + gt_bboxes (np.ndarray): Ground truth boxes. + pred_bboxes (np.ndarray): Predicted boxes. + out_dir (str): Path of output directory + filename (str): Filename of the current frame. + show (bool): Visualize the results online. Defaults to False. + snapshot (bool): Whether to save the online results. Defaults to False. + """ + result_path = osp.join(out_dir, filename) + mkdir_or_exist(result_path) + + if show: + from .open3d_vis import Visualizer + + vis = Visualizer(points) + if pred_bboxes is not None: + vis.add_bboxes(bbox3d=pred_bboxes) + if gt_bboxes is not None: + vis.add_bboxes(bbox3d=gt_bboxes, bbox_color=(0, 0, 1)) + show_path = osp.join(result_path, + f'{filename}_online.png') if snapshot else None + vis.show(show_path) + + if points is not None: + _write_obj(points, osp.join(result_path, f'{filename}_points.obj')) + + if gt_bboxes is not None: + # bottom center to gravity center + gt_bboxes[..., 2] += gt_bboxes[..., 5] / 2 + # the positive direction for yaw in meshlab is clockwise + gt_bboxes[:, 6] *= -1 + _write_oriented_bbox(gt_bboxes, + osp.join(result_path, f'{filename}_gt.obj')) + + if pred_bboxes is not None: + # bottom center to gravity center + pred_bboxes[..., 2] += pred_bboxes[..., 5] / 2 + # the positive direction for yaw in meshlab is clockwise + pred_bboxes[:, 6] *= -1 + _write_oriented_bbox(pred_bboxes, + osp.join(result_path, f'{filename}_pred.obj')) + + +def show_seg_result(points, + gt_seg, + pred_seg, + out_dir, + filename, + palette, + ignore_index=None, + show=True, + snapshot=False): + """Convert results into format that is directly readable for meshlab. + + Args: + points (np.ndarray): Points. + gt_seg (np.ndarray): Ground truth segmentation mask. + pred_seg (np.ndarray): Predicted segmentation mask. + out_dir (str): Path of output directory + filename (str): Filename of the current frame. + palette (np.ndarray): Mapping between class labels and colors. + ignore_index (int, optional): The label index to be ignored, e.g. \ + unannotated points. Defaults to None. + show (bool, optional): Visualize the results online. Defaults to False. + snapshot (bool, optional): Whether to save the online results. \ + Defaults to False. + """ + # we need 3D coordinates to visualize segmentation mask + if gt_seg is not None or pred_seg is not None: + assert points is not None, \ + '3D coordinates are required for segmentation visualization' + + # filter out ignored points + if gt_seg is not None and ignore_index is not None: + if points is not None: + points = points[gt_seg != ignore_index] + if pred_seg is not None: + pred_seg = pred_seg[gt_seg != ignore_index] + gt_seg = gt_seg[gt_seg != ignore_index] + + if gt_seg is not None: + gt_seg_color = palette[gt_seg] + gt_seg_color = np.concatenate([points[:, :3], gt_seg_color], axis=1) + if pred_seg is not None: + pred_seg_color = palette[pred_seg] + pred_seg_color = np.concatenate([points[:, :3], pred_seg_color], + axis=1) + + result_path = osp.join(out_dir, filename) + mkdir_or_exist(result_path) + + # online visualization of segmentation mask + # we show three masks in a row, scene_points, gt_mask, pred_mask + if show: + from .open3d_vis import Visualizer + mode = 'xyzrgb' if points.shape[1] == 6 else 'xyz' + vis = Visualizer(points, mode=mode) + if gt_seg is not None: + vis.add_seg_mask(gt_seg_color) + if pred_seg is not None: + vis.add_seg_mask(pred_seg_color) + show_path = osp.join(result_path, + f'{filename}_online.png') if snapshot else None + vis.show(show_path) + + if points is not None: + _write_obj(points, osp.join(result_path, f'{filename}_points.obj')) + + if gt_seg is not None: + _write_obj(gt_seg_color, osp.join(result_path, f'{filename}_gt.obj')) + + if pred_seg is not None: + _write_obj(pred_seg_color, osp.join(result_path, + f'{filename}_pred.obj')) + + +def show_multi_modality_result(img, + gt_bboxes, + pred_bboxes, + proj_mat, + out_dir, + filename, + box_mode='lidar', + img_metas=None, + show=True, + gt_bbox_color=(61, 102, 255), + pred_bbox_color=(241, 101, 72)): + """Convert multi-modality detection results into 2D results. + + Project the predicted 3D bbox to 2D image plane and visualize them. + + Args: + img (np.ndarray): The numpy array of image in cv2 fashion. + gt_bboxes (:obj:`BaseInstance3DBoxes`): Ground truth boxes. + pred_bboxes (:obj:`BaseInstance3DBoxes`): Predicted boxes. + proj_mat (numpy.array, shape=[4, 4]): The projection matrix + according to the camera intrinsic parameters. + out_dir (str): Path of output directory. + filename (str): Filename of the current frame. + box_mode (str): Coordinate system the boxes are in. Should be one of + 'depth', 'lidar' and 'camera'. Defaults to 'lidar'. + img_metas (dict): Used in projecting depth bbox. + show (bool): Visualize the results online. Defaults to False. + gt_bbox_color (str or tuple(int)): Color of bbox lines. + The tuple of color should be in BGR order. Default: (255, 102, 61) + pred_bbox_color (str or tuple(int)): Color of bbox lines. + The tuple of color should be in BGR order. Default: (72, 101, 241) + """ + if box_mode == 'depth': + draw_bbox = draw_depth_bbox3d_on_img + elif box_mode == 'lidar': + draw_bbox = draw_lidar_bbox3d_on_img + elif box_mode == 'camera': + draw_bbox = draw_camera_bbox3d_on_img + else: + raise NotImplementedError(f'unsupported box mode {box_mode}') + + result_path = osp.join(out_dir, filename) + mkdir_or_exist(result_path) + + if show: + show_img = img.copy() + if gt_bboxes is not None: + show_img = draw_bbox( + gt_bboxes, show_img, proj_mat, img_metas, color=gt_bbox_color) + if pred_bboxes is not None: + show_img = draw_bbox( + pred_bboxes, + show_img, + proj_mat, + img_metas, + color=pred_bbox_color) + mmcv.imshow(show_img, win_name='project_bbox3d_img', wait_time=0) + + if img is not None: + imwrite(img, osp.join(result_path, f'{filename}_img.png')) + + if gt_bboxes is not None: + gt_img = draw_bbox( + gt_bboxes, img, proj_mat, img_metas, color=gt_bbox_color) + imwrite(gt_img, osp.join(result_path, f'{filename}_gt.png')) + + if pred_bboxes is not None: + pred_img = draw_bbox( + pred_bboxes, img, proj_mat, img_metas, color=pred_bbox_color) + imwrite(pred_img, osp.join(result_path, f'{filename}_pred.png')) diff --git a/mmcv/core/voxel/__init__.py b/mmcv/core/voxel/__init__.py new file mode 100644 index 0000000..8d69543 --- /dev/null +++ b/mmcv/core/voxel/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .builder import build_voxel_generator +from .voxel_generator import VoxelGenerator + +__all__ = ['build_voxel_generator', 'VoxelGenerator'] diff --git a/mmcv/core/voxel/builder.py b/mmcv/core/voxel/builder.py new file mode 100644 index 0000000..d7fe494 --- /dev/null +++ b/mmcv/core/voxel/builder.py @@ -0,0 +1,14 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from . import voxel_generator +from mmcv.utils import obj_from_dict + +def build_voxel_generator(cfg, **kwargs): + """Builder of voxel generator.""" + if isinstance(cfg, voxel_generator.VoxelGenerator): + return cfg + elif isinstance(cfg, dict): + return obj_from_dict( + cfg, voxel_generator, default_args=kwargs) + else: + raise TypeError('Invalid type {} for building a sampler'.format( + type(cfg))) diff --git a/mmcv/core/voxel/voxel_generator.py b/mmcv/core/voxel/voxel_generator.py new file mode 100644 index 0000000..615b749 --- /dev/null +++ b/mmcv/core/voxel/voxel_generator.py @@ -0,0 +1,280 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numba +import numpy as np + + +class VoxelGenerator(object): + """Voxel generator in numpy implementation. + + Args: + voxel_size (list[float]): Size of a single voxel + point_cloud_range (list[float]): Range of points + max_num_points (int): Maximum number of points in a single voxel + max_voxels (int, optional): Maximum number of voxels. + Defaults to 20000. + """ + + def __init__(self, + voxel_size, + point_cloud_range, + max_num_points, + max_voxels=20000): + + point_cloud_range = np.array(point_cloud_range, dtype=np.float32) + # [0, -40, -3, 70.4, 40, 1] + voxel_size = np.array(voxel_size, dtype=np.float32) + grid_size = (point_cloud_range[3:] - + point_cloud_range[:3]) / voxel_size + grid_size = np.round(grid_size).astype(np.int64) + + self._voxel_size = voxel_size + self._point_cloud_range = point_cloud_range + self._max_num_points = max_num_points + self._max_voxels = max_voxels + self._grid_size = grid_size + + def generate(self, points): + """Generate voxels given points.""" + return points_to_voxel(points, self._voxel_size, + self._point_cloud_range, self._max_num_points, + True, self._max_voxels) + + @property + def voxel_size(self): + """list[float]: Size of a single voxel.""" + return self._voxel_size + + @property + def max_num_points_per_voxel(self): + """int: Maximum number of points per voxel.""" + return self._max_num_points + + @property + def point_cloud_range(self): + """list[float]: Range of point cloud.""" + return self._point_cloud_range + + @property + def grid_size(self): + """np.ndarray: The size of grids.""" + return self._grid_size + + def __repr__(self): + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + indent = ' ' * (len(repr_str) + 1) + repr_str += f'(voxel_size={self._voxel_size},\n' + repr_str += indent + 'point_cloud_range=' + repr_str += f'{self._point_cloud_range.tolist()},\n' + repr_str += indent + f'max_num_points={self._max_num_points},\n' + repr_str += indent + f'max_voxels={self._max_voxels},\n' + repr_str += indent + f'grid_size={self._grid_size.tolist()}' + repr_str += ')' + return repr_str + + +def points_to_voxel(points, + voxel_size, + coors_range, + max_points=35, + reverse_index=True, + max_voxels=20000): + """convert kitti points(N, >=3) to voxels. + + Args: + points (np.ndarray): [N, ndim]. points[:, :3] contain xyz points and \ + points[:, 3:] contain other information such as reflectivity. + voxel_size (list, tuple, np.ndarray): [3] xyz, indicate voxel size + coors_range (list[float | tuple[float] | ndarray]): Voxel range. \ + format: xyzxyz, minmax + max_points (int): Indicate maximum points contained in a voxel. + reverse_index (bool): Whether return reversed coordinates. \ + if points has xyz format and reverse_index is True, output \ + coordinates will be zyx format, but points in features always \ + xyz format. + max_voxels (int): Maximum number of voxels this function creates. \ + For second, 20000 is a good choice. Points should be shuffled for \ + randomness before this function because max_voxels drops points. + + Returns: + tuple[np.ndarray]: + voxels: [M, max_points, ndim] float tensor. only contain points. + coordinates: [M, 3] int32 tensor. + num_points_per_voxel: [M] int32 tensor. + """ + if not isinstance(voxel_size, np.ndarray): + voxel_size = np.array(voxel_size, dtype=points.dtype) + if not isinstance(coors_range, np.ndarray): + coors_range = np.array(coors_range, dtype=points.dtype) + voxelmap_shape = (coors_range[3:] - coors_range[:3]) / voxel_size + voxelmap_shape = tuple(np.round(voxelmap_shape).astype(np.int32).tolist()) + if reverse_index: + voxelmap_shape = voxelmap_shape[::-1] + # don't create large array in jit(nopython=True) code. + num_points_per_voxel = np.zeros(shape=(max_voxels, ), dtype=np.int32) + coor_to_voxelidx = -np.ones(shape=voxelmap_shape, dtype=np.int32) + voxels = np.zeros( + shape=(max_voxels, max_points, points.shape[-1]), dtype=points.dtype) + coors = np.zeros(shape=(max_voxels, 3), dtype=np.int32) + if reverse_index: + voxel_num = _points_to_voxel_reverse_kernel( + points, voxel_size, coors_range, num_points_per_voxel, + coor_to_voxelidx, voxels, coors, max_points, max_voxels) + + else: + voxel_num = _points_to_voxel_kernel(points, voxel_size, coors_range, + num_points_per_voxel, + coor_to_voxelidx, voxels, coors, + max_points, max_voxels) + + coors = coors[:voxel_num] + voxels = voxels[:voxel_num] + num_points_per_voxel = num_points_per_voxel[:voxel_num] + + return voxels, coors, num_points_per_voxel + + +@numba.jit(nopython=True) +def _points_to_voxel_reverse_kernel(points, + voxel_size, + coors_range, + num_points_per_voxel, + coor_to_voxelidx, + voxels, + coors, + max_points=35, + max_voxels=20000): + """convert kitti points(N, >=3) to voxels. + + Args: + points (np.ndarray): [N, ndim]. points[:, :3] contain xyz points and \ + points[:, 3:] contain other information such as reflectivity. + voxel_size (list, tuple, np.ndarray): [3] xyz, indicate voxel size \ + coors_range (list[float | tuple[float] | ndarray]): Range of voxels. \ + format: xyzxyz, minmax + num_points_per_voxel (int): Number of points per voxel. + coor_to_voxel_idx (np.ndarray): A voxel grid of shape (D, H, W), \ + which has the same shape as the complete voxel map. It indicates \ + the index of each corresponding voxel. + voxels (np.ndarray): Created empty voxels. + coors (np.ndarray): Created coordinates of each voxel. + max_points (int): Indicate maximum points contained in a voxel. + max_voxels (int): Maximum number of voxels this function create. \ + for second, 20000 is a good choice. Points should be shuffled for \ + randomness before this function because max_voxels drops points. + + Returns: + tuple[np.ndarray]: + voxels: Shape [M, max_points, ndim], only contain points. + coordinates: Shape [M, 3]. + num_points_per_voxel: Shape [M]. + """ + # put all computations to one loop. + # we shouldn't create large array in main jit code, otherwise + # reduce performance + N = points.shape[0] + # ndim = points.shape[1] - 1 + ndim = 3 + ndim_minus_1 = ndim - 1 + grid_size = (coors_range[3:] - coors_range[:3]) / voxel_size + # np.round(grid_size) + # grid_size = np.round(grid_size).astype(np.int64)(np.int32) + grid_size = np.round(grid_size, 0, grid_size).astype(np.int32) + coor = np.zeros(shape=(3, ), dtype=np.int32) + voxel_num = 0 + failed = False + for i in range(N): + failed = False + for j in range(ndim): + c = np.floor((points[i, j] - coors_range[j]) / voxel_size[j]) + if c < 0 or c >= grid_size[j]: + failed = True + break + coor[ndim_minus_1 - j] = c + if failed: + continue + voxelidx = coor_to_voxelidx[coor[0], coor[1], coor[2]] + if voxelidx == -1: + voxelidx = voxel_num + if voxel_num >= max_voxels: + continue + voxel_num += 1 + coor_to_voxelidx[coor[0], coor[1], coor[2]] = voxelidx + coors[voxelidx] = coor + num = num_points_per_voxel[voxelidx] + if num < max_points: + voxels[voxelidx, num] = points[i] + num_points_per_voxel[voxelidx] += 1 + return voxel_num + + +@numba.jit(nopython=True) +def _points_to_voxel_kernel(points, + voxel_size, + coors_range, + num_points_per_voxel, + coor_to_voxelidx, + voxels, + coors, + max_points=35, + max_voxels=20000): + """convert kitti points(N, >=3) to voxels. + + Args: + points (np.ndarray): [N, ndim]. points[:, :3] contain xyz points and \ + points[:, 3:] contain other information such as reflectivity. + voxel_size (list, tuple, np.ndarray): [3] xyz, indicate voxel size. + coors_range (list[float | tuple[float] | ndarray]): Range of voxels. \ + format: xyzxyz, minmax + num_points_per_voxel (int): Number of points per voxel. + coor_to_voxel_idx (np.ndarray): A voxel grid of shape (D, H, W), \ + which has the same shape as the complete voxel map. It indicates \ + the index of each corresponding voxel. + voxels (np.ndarray): Created empty voxels. + coors (np.ndarray): Created coordinates of each voxel. + max_points (int): Indicate maximum points contained in a voxel. + max_voxels (int): Maximum number of voxels this function create. \ + for second, 20000 is a good choice. Points should be shuffled for \ + randomness before this function because max_voxels drops points. + + Returns: + tuple[np.ndarray]: + voxels: Shape [M, max_points, ndim], only contain points. + coordinates: Shape [M, 3]. + num_points_per_voxel: Shape [M]. + """ + N = points.shape[0] + # ndim = points.shape[1] - 1 + ndim = 3 + grid_size = (coors_range[3:] - coors_range[:3]) / voxel_size + # grid_size = np.round(grid_size).astype(np.int64)(np.int32) + grid_size = np.round(grid_size, 0, grid_size).astype(np.int32) + + # lower_bound = coors_range[:3] + # upper_bound = coors_range[3:] + coor = np.zeros(shape=(3, ), dtype=np.int32) + voxel_num = 0 + failed = False + for i in range(N): + failed = False + for j in range(ndim): + c = np.floor((points[i, j] - coors_range[j]) / voxel_size[j]) + if c < 0 or c >= grid_size[j]: + failed = True + break + coor[j] = c + if failed: + continue + voxelidx = coor_to_voxelidx[coor[0], coor[1], coor[2]] + if voxelidx == -1: + voxelidx = voxel_num + if voxel_num >= max_voxels: + continue + voxel_num += 1 + coor_to_voxelidx[coor[0], coor[1], coor[2]] = voxelidx + coors[voxelidx] = coor + num = num_points_per_voxel[voxelidx] + if num < max_points: + voxels[voxelidx, num] = points[i] + num_points_per_voxel[voxelidx] += 1 + return voxel_num diff --git a/mmcv/datasets/B2D_dataset.py b/mmcv/datasets/B2D_dataset.py new file mode 100644 index 0000000..530c8eb --- /dev/null +++ b/mmcv/datasets/B2D_dataset.py @@ -0,0 +1,504 @@ +import copy +import numpy as np +from mmcv.datasets import DATASETS +from os import path as osp +import torch +from pyquaternion import Quaternion +from mmcv.utils import save_tensor +from mmcv.parallel import DataContainer as DC +import random +from .custom_3d import Custom3DDataset +from .pipelines import Compose +from mmcv.core.bbox.structures.lidar_box3d import LiDARInstance3DBoxes +from mmcv.fileio.io import load, dump +from mmcv.utils import track_iter_progress, mkdir_or_exist +import tempfile +from .nuscenes_styled_eval_utils import DetectionMetrics, EvalBoxes, DetectionBox,center_distance,accumulate,DetectionMetricDataList,calc_ap, calc_tp, quaternion_yaw +import json + +@DATASETS.register_module() +class B2D_Dataset(Custom3DDataset): + + + def __init__(self, queue_length=4, bev_size=(200, 200),overlap_test=False,with_velocity=True,sample_interval=5,name_mapping= None,eval_cfg = None ,*args, **kwargs): + super().__init__(*args, **kwargs) + self.queue_length = queue_length + self.overlap_test = overlap_test + self.with_velocity = with_velocity + if name_mapping is not None: + self.NameMapping = name_mapping + else: + self.NameMapping = { + 'vehicle.bh.crossbike': 'bicycle', + "vehicle.diamondback.century": 'bicycle', + "vehicle.chevrolet.impala": 'car', + "vehicle.dodge.charger_2020": 'car', + "vehicle.dodge.charger_police_2020": 'car', + "vehicle.lincoln.mkz_2017": 'car', + "vehicle.lincoln.mkz_2020": 'car', + "vehicle.mini.cooper_s_2021": 'car', + "vehicle.mercedes.coupe_2020": 'car', + "vehicle.ford.mustang": 'car', + "vehicle.nissan.patrol_2021": 'car', + "vehicle.audi.tt": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/FordCrown/SM_FordCrown_parked.SM_FordCrown_parked": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/VolkswagenT2/SM_VolkswagenT2_2021_Parked.SM_VolkswagenT2_2021_Parked": "van", + "traffic.speed_limit.30": 'speed_limit', + "traffic.speed_limit.40": 'speed_limit', + "traffic.speed_limit.50": 'speed_limit', + "traffic.speed_limit.60": 'speed_limit', + "traffic.traffic_light": 'traffic_light', + "traffic.stop": 'stop', + } + if eval_cfg is not None: + self.eval_cfg = eval_cfg + else: + self.eval_cfg = { + "dist_ths": [0.5, 1.0, 2.0, 4.0], + "dist_th_tp": 2.0, + "min_recall": 0.1, + "min_precision": 0.1, + "mean_ap_weight": 5, + "class_names":['car','van','bicycle'], + "tp_metrics":['trans_err', 'scale_err', 'orient_err', 'vel_err'], + "err_name_maping":{'trans_err': 'mATE','scale_err': 'mASE','orient_err': 'mAOE','vel_err': 'mAVE','attr_err': 'mAAE'} + } + self.sample_interval = sample_interval + + + def invert_pose(self, pose): + inv_pose = np.eye(4) + inv_pose[:3, :3] = np.transpose(pose[:3, :3]) + inv_pose[:3, -1] = - inv_pose[:3, :3] @ pose[:3, -1] + return inv_pose + + def prepare_train_data(self, index): + """ + Training data preparation. + Args: + index (int): Index for accessing the target data. + Returns: + dict: Training data dict of the corresponding index. + """ + queue = [] + index_list = list(range(index-self.queue_length*self.sample_interval, index,self.sample_interval)) + random.shuffle(index_list) + index_list = sorted(index_list[1:]) + index_list.append(index) + for i in index_list: + i = max(0, i) + input_dict = self.get_data_info(i) + if input_dict is None: + return None + self.pre_pipeline(input_dict) + example = self.pipeline(input_dict) + if self.filter_empty_gt and \ + (example is None or ~(example['gt_labels_3d']._data != -1).any()): + return None + queue.append(example) + return self.union2one(queue) + + + def union2one(self, queue): + imgs_list = [each['img'].data for each in queue] + metas_map = {} + prev_scene_token = None + prev_pos = None + prev_angle = None + for i, each in enumerate(queue): + metas_map[i] = each['img_metas'].data + if metas_map[i]['folder'] != prev_scene_token: + metas_map[i]['prev_bev_exists'] = False + prev_scene_token = metas_map[i]['folder'] + prev_pos = copy.deepcopy(metas_map[i]['can_bus'][:3]) + prev_angle = copy.deepcopy(metas_map[i]['can_bus'][-1]) + metas_map[i]['can_bus'][:3] = 0 + metas_map[i]['can_bus'][-1] = 0 + else: + metas_map[i]['prev_bev_exists'] = True + tmp_pos = copy.deepcopy(metas_map[i]['can_bus'][:3]) + tmp_angle = copy.deepcopy(metas_map[i]['can_bus'][-1]) + metas_map[i]['can_bus'][:3] -= prev_pos + metas_map[i]['can_bus'][-1] -= prev_angle + prev_pos = copy.deepcopy(tmp_pos) + prev_angle = copy.deepcopy(tmp_angle) + queue[-1]['img'] = DC(torch.stack(imgs_list), cpu_only=False, stack=True) + queue[-1]['img_metas'] = DC(metas_map, cpu_only=True) + queue = queue[-1] + return queue + + def get_data_info(self, index): + """Get data info according to the given index. + + Args: + index (int): Index of the sample data to get. + + Returns: + dict: Data information that will be passed to the data \ + preprocessing pipelines. It includes the following keys: + + - sample_idx (str): Sample index. + - pts_filename (str): Filename of point clouds. + - sweeps (list[dict]): Infos of sweeps. + - timestamp (float): Sample timestamp. + - img_filename (str, optional): Image filename. + - lidar2img (list[np.ndarray], optional): Transformations \ + from lidar to different cameras. + - ann_info (dict): Annotation info. + """ + info = self.data_infos[index] + for i in range(len(info['gt_names'])): + if info['gt_names'][i] in self.NameMapping.keys(): + info['gt_names'][i] = self.NameMapping[info['gt_names'][i]] + + input_dict = dict( + folder=info['folder'], + scene_token=info['folder'], + frame_idx=info['frame_idx'], + ego_yaw=np.nan_to_num(info['ego_yaw'],nan=90), + ego_translation=info['ego_translation'], + sensors=info['sensors'], + gt_ids=info['gt_ids'], + gt_boxes=info['gt_boxes'], + gt_names=info['gt_names'], + ego_vel = info['ego_vel'], + ego_accel = info['ego_accel'], + ego_rotation_rate = info['ego_rotation_rate'], + ) + if self.modality['use_camera']: + image_paths = [] + lidar2img_rts = [] + lidar2cam_rts = [] + cam_intrinsics = [] + lidar2ego = info['sensors']['LIDAR_TOP']['lidar2ego'] + for sensor_type, cam_info in info['sensors'].items(): + if not 'CAM' in sensor_type: + continue + image_paths.append(osp.join(self.data_root,cam_info['data_path'])) + cam2ego = cam_info['cam2ego'] + intrinsic = cam_info['intrinsic'] + intrinsic_pad = np.eye(4) + intrinsic_pad[:intrinsic.shape[0], :intrinsic.shape[1]] = intrinsic + lidar2cam = self.invert_pose(cam2ego) @ lidar2ego + lidar2img = intrinsic_pad @ lidar2cam + lidar2img_rts.append(lidar2img) + cam_intrinsics.append(intrinsic_pad) + lidar2cam_rts.append(lidar2cam) + + input_dict.update( + dict( + img_filename=image_paths, + lidar2img=lidar2img_rts, + cam_intrinsic=cam_intrinsics, + lidar2cam=lidar2cam_rts, + )) + + if not self.test_mode: + annos = self.get_ann_info(index) + input_dict['ann_info'] = annos + yaw = input_dict['ego_yaw'] + rotation = list(Quaternion(axis=[0, 0, 1], radians=yaw)) + if yaw < 0: + yaw += 2*np.pi + yaw_in_degree = yaw / np.pi * 180 + can_bus = np.zeros(18) + can_bus[:3] = input_dict['ego_translation'] + can_bus[3:7] = rotation + can_bus[7:10] = input_dict['ego_vel'] + can_bus[10:13] = input_dict['ego_accel'] + can_bus[13:16] = input_dict['ego_rotation_rate'] + can_bus[16] = yaw + can_bus[17] = yaw_in_degree + input_dict['can_bus'] = can_bus + + return input_dict + + + def get_ann_info(self, index): + """Get annotation info according to the given index. + + Args: + index (int): Index of the annotation data to get. + + Returns: + dict: Annotation information consists of the following keys: + + - gt_bboxes_3d (:obj:`LiDARInstance3DBoxes`): \ + 3D ground truth bboxes + - gt_labels_3d (np.ndarray): Labels of ground truths. + - gt_names (list[str]): Class names of ground truths. + """ + info = self.data_infos[index] + # filter out bbox containing no points + mask = (info['num_points'] >= -1) + gt_bboxes_3d = info['gt_boxes'][mask] + gt_names_3d = info['gt_names'][mask] + gt_labels_3d = [] + + for cat in gt_names_3d: + if cat in self.CLASSES: + gt_labels_3d.append(self.CLASSES.index(cat)) + else: + gt_labels_3d.append(-1) + gt_labels_3d = np.array(gt_labels_3d) + if not self.with_velocity: + gt_bboxes_3d = gt_bboxes_3d[:,0:7] + + gt_bboxes_3d = LiDARInstance3DBoxes( + gt_bboxes_3d, + box_dim=gt_bboxes_3d.shape[-1], + origin=(0.5, 0.5, 0.5)).convert_to(self.box_mode_3d) + anns_results = dict( + gt_bboxes_3d=gt_bboxes_3d, + gt_labels_3d=gt_labels_3d, + gt_names=gt_names_3d) + return anns_results + + def __getitem__(self, idx): + """Get item from infos according to the given index. + Returns: + dict: Data dictionary of the corresponding index. + """ + if self.test_mode: + return self.prepare_test_data(idx) + while True: + + data = self.prepare_train_data(idx) + if data is None: + idx = self._rand_another(idx) + continue + return data + + def evaluate(self, + results, + metric='bbox', + logger=None, + jsonfile_prefix=None, + result_names=['pts_bbox'], + show=False, + out_dir=None, + pipeline=None): + """Evaluation in nuScenes protocol. + + Args: + results (list[dict]): Testing results of the dataset. + metric (str | list[str]): Metrics to be evaluated. + logger (logging.Logger | str | None): Logger used for printing + related information during evaluation. Default: None. + jsonfile_prefix (str | None): The prefix of json files. It includes + the file path and the prefix of filename, e.g., "a/b/prefix". + If not specified, a temp file will be created. Default: None. + show (bool): Whether to visualize. + Default: False. + out_dir (str): Path to save the visualization results. + Default: None. + pipeline (list[dict], optional): raw data loading for showing. + Default: None. + + Returns: + dict[str, float]: Results of each evaluation metric. + """ + result_files, tmp_dir = self.format_results(results, jsonfile_prefix) + result_path = result_files['pts_bbox'] + with open(result_path) as f: + result_data = json.load(f) + pred_boxes = EvalBoxes.deserialize(result_data['results'], DetectionBox) + meta = result_data['meta'] + + + gt_boxes = self.load_gt() + + metric_data_list = DetectionMetricDataList() + for class_name in self.eval_cfg['class_names']: + for dist_th in self.eval_cfg['dist_ths']: + md = accumulate(gt_boxes, pred_boxes, class_name, center_distance, dist_th) + metric_data_list.set(class_name, dist_th, md) + metrics = DetectionMetrics(self.eval_cfg) + + for class_name in self.eval_cfg['class_names']: + # Compute APs. + for dist_th in self.eval_cfg['dist_ths']: + metric_data = metric_data_list[(class_name, dist_th)] + ap = calc_ap(metric_data, self.eval_cfg['min_recall'], self.eval_cfg['min_precision']) + metrics.add_label_ap(class_name, dist_th, ap) + + # Compute TP metrics. + for metric_name in self.eval_cfg['tp_metrics']: + metric_data = metric_data_list[(class_name, self.eval_cfg['dist_th_tp'])] + tp = calc_tp(metric_data, self.eval_cfg['min_recall'], metric_name) + metrics.add_label_tp(class_name, metric_name, tp) + + metrics_summary = metrics.serialize() + metrics_summary['meta'] = meta.copy() + print('mAP: %.4f' % (metrics_summary['mean_ap'])) + err_name_mapping = { + 'trans_err': 'mATE', + 'scale_err': 'mASE', + 'orient_err': 'mAOE', + 'vel_err': 'mAVE', + } + for tp_name, tp_val in metrics_summary['tp_errors'].items(): + print('%s: %.4f' % (err_name_mapping[tp_name], tp_val)) + print('NDS: %.4f' % (metrics_summary['nd_score'])) + #print('Eval time: %.1fs' % metrics_summary['eval_time']) + + # Print per-class metrics. + print() + print('Per-class results:') + print('Object Class\tAP\tATE\tASE\tAOE\tAVE') + class_aps = metrics_summary['mean_dist_aps'] + class_tps = metrics_summary['label_tp_errors'] + for class_name in class_aps.keys(): + print('%s\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f' + % (class_name, class_aps[class_name], + class_tps[class_name]['trans_err'], + class_tps[class_name]['scale_err'], + class_tps[class_name]['orient_err'], + class_tps[class_name]['vel_err'])) + + detail = dict() + metric_prefix = 'bbox_NuScenes' + for name in self.eval_cfg['class_names']: + for k, v in metrics_summary['label_aps'][name].items(): + val = float('{:.4f}'.format(v)) + detail['{}/{}_AP_dist_{}'.format(metric_prefix, name, k)] = val + for k, v in metrics_summary['label_tp_errors'][name].items(): + val = float('{:.4f}'.format(v)) + detail['{}/{}_{}'.format(metric_prefix, name, k)] = val + for k, v in metrics_summary['tp_errors'].items(): + val = float('{:.4f}'.format(v)) + detail['{}/{}'.format(metric_prefix,self.eval_cfg['err_name_maping'][k])] = val + detail['{}/NDS'.format(metric_prefix)] = metrics_summary['nd_score'] + detail['{}/mAP'.format(metric_prefix)] = metrics_summary['mean_ap'] + + + return detail + + + def load_gt(self): + all_annotations = EvalBoxes() + for i in range(len(self.data_infos)): + sample_boxes = [] + sample_data = self.data_infos[i] + + gt_boxes = sample_data['gt_boxes'] + + for j in range(gt_boxes.shape[0]): + class_name = self.NameMapping[sample_data['gt_names'][j]] + if not class_name in self.eval_cfg['class_range'].keys(): + continue + range_x, range_y = self.eval_cfg['class_range'][class_name] + if abs(gt_boxes[j,0]) > range_x or abs(gt_boxes[j,1]) > range_y: + continue + sample_boxes.append(DetectionBox( + sample_token=sample_data['folder']+'_'+str(sample_data['frame_idx']), + translation=gt_boxes[j,0:3], + size=gt_boxes[j,3:6], + rotation=list(Quaternion(axis=[0, 0, 1], radians=-gt_boxes[j,6]-np.pi/2)), + velocity=gt_boxes[j,7:9], + num_pts=int(sample_data['num_points'][j]), + detection_name=self.NameMapping[sample_data['gt_names'][j]], + detection_score=-1.0, + attribute_name=self.NameMapping[sample_data['gt_names'][j]] + )) + all_annotations.add_boxes(sample_data['folder']+'_'+str(sample_data['frame_idx']), sample_boxes) + return all_annotations + + def _format_bbox(self, results, jsonfile_prefix=None): + """Convert the results to the standard format. + + Args: + results (list[dict]): Testing results of the dataset. + jsonfile_prefix (str): The prefix of the output jsonfile. + You can specify the output directory/filename by + modifying the jsonfile_prefix. Default: None. + + Returns: + str: Path of the output json file. + """ + + + nusc_annos = {} + mapped_class_names = self.CLASSES + + print('Start to convert detection format...') + for sample_id, det in enumerate(track_iter_progress(results)): + #pdb.set_trace() + annos = [] + box3d = det['boxes_3d'] + scores = det['scores_3d'] + labels = det['labels_3d'] + box_gravity_center = box3d.gravity_center + box_dims = box3d.dims + box_yaw = box3d.yaw.numpy() + box_yaw = -box_yaw - np.pi / 2 + sample_token = self.data_infos[sample_id]['folder'] + '_' + str(self.data_infos[sample_id]['frame_idx']) + + + + for i in range(len(box3d)): + #import pdb;pdb.set_trace() + quat = list(Quaternion(axis=[0, 0, 1], radians=box_yaw[i])) + velocity = [box3d.tensor[i, 7].item(),box3d.tensor[i, 8].item()] + name = mapped_class_names[labels[i]] + nusc_anno = dict( + sample_token=sample_token, + translation=box_gravity_center[i].tolist(), + size=box_dims[i].tolist(), + rotation=quat, + velocity=velocity, + detection_name=name, + detection_score=scores[i].item(), + attribute_name=name) + annos.append(nusc_anno) + nusc_annos[sample_token] = annos + nusc_submissions = { + 'meta': self.modality, + 'results': nusc_annos, + } + + mkdir_or_exist(jsonfile_prefix) + res_path = osp.join(jsonfile_prefix, 'results_nusc.json') + print('Results writes to', res_path) + dump(nusc_submissions, res_path) + return res_path + + def format_results(self, results, jsonfile_prefix=None): + """Format the results to json (standard format for COCO evaluation). + + Args: + results (list[dict]): Testing results of the dataset. + jsonfile_prefix (str | None): The prefix of json files. It includes + the file path and the prefix of filename, e.g., "a/b/prefix". + If not specified, a temp file will be created. Default: None. + + Returns: + tuple: Returns (result_files, tmp_dir), where `result_files` is a \ + dict containing the json filepaths, `tmp_dir` is the temporal \ + directory created for saving json files when \ + `jsonfile_prefix` is not specified. + """ + assert isinstance(results, list), 'results must be a list' + # assert len(results) == len(self), ( + # 'The length of results is not equal to the dataset len: {} != {}'. + # format(len(results), len(self))) + + if jsonfile_prefix is None: + tmp_dir = tempfile.TemporaryDirectory() + jsonfile_prefix = osp.join(tmp_dir.name, 'results') + else: + tmp_dir = None + + if not ('pts_bbox' in results[0] or 'img_bbox' in results[0]): + result_files = self._format_bbox(results, jsonfile_prefix) + else: + # should take the inner dict out of 'pts_bbox' or 'img_bbox' dict + result_files = dict() + for name in results[0]: + print(f'\nFormating bboxes of {name}') + results_ = [out[name] for out in results] + tmp_file_ = osp.join(jsonfile_prefix, name) + result_files.update( + {name: self._format_bbox(results_, tmp_file_)}) + return result_files, tmp_dir + diff --git a/mmcv/datasets/B2D_e2e_dataset.py b/mmcv/datasets/B2D_e2e_dataset.py new file mode 100644 index 0000000..9f5b4e0 --- /dev/null +++ b/mmcv/datasets/B2D_e2e_dataset.py @@ -0,0 +1,855 @@ +import copy +import numpy as np +import os +from os import path as osp +import torch +import random +import json, pickle +import tempfile +import cv2 +from pyquaternion import Quaternion +from mmcv.datasets import DATASETS +from mmcv.utils import save_tensor +from mmcv.parallel import DataContainer as DC +from mmcv.core.bbox.structures.lidar_box3d import LiDARInstance3DBoxes +from mmcv.fileio.io import load, dump +from mmcv.utils import track_iter_progress, mkdir_or_exist +from mmcv.datasets.pipelines import to_tensor +from .custom_3d import Custom3DDataset +from .pipelines import Compose +from .nuscenes_styled_eval_utils import DetectionMetrics, EvalBoxes, DetectionBox,center_distance,accumulate,DetectionMetricDataList,calc_ap, calc_tp, quaternion_yaw +from prettytable import PrettyTable + + + +@DATASETS.register_module() +class B2D_E2E_Dataset(Custom3DDataset): + def __init__(self, queue_length=4, bev_size=(200, 200),overlap_test=False,with_velocity=True,sample_interval=5,name_mapping= None,eval_cfg = None, map_root =None,map_file=None,past_frames=4, future_frames=4,predict_frames=12,planning_frames=6,patch_size = [102.4, 102.4],point_cloud_range = [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0] ,occ_receptive_field=3,occ_n_future=6,occ_filter_invalid_sample=False,occ_filter_by_valid_flag=False,eval_mod=None,*args, **kwargs): + super().__init__(*args, **kwargs) + self.queue_length = queue_length + self.bev_size = (200, 200) + self.overlap_test = overlap_test + self.with_velocity = with_velocity + self.NameMapping = name_mapping + self.eval_cfg = eval_cfg + self.sample_interval = sample_interval + self.past_frames = past_frames + self.future_frames = future_frames + self.predict_frames = predict_frames + self.planning_frames = planning_frames + self.map_root = map_root + self.map_file = map_file + self.point_cloud_range = np.array(point_cloud_range) + self.patch_size = patch_size + self.occ_receptive_field = occ_receptive_field # past + current + self.occ_n_future = occ_n_future # future only + self.occ_filter_invalid_sample = occ_filter_invalid_sample + self.occ_filter_by_valid_flag = occ_filter_by_valid_flag + self.occ_only_total_frames = 7 # NOTE: hardcode, not influenced by planning + self.eval_mod = eval_mod + self.map_element_class = {'Broken':0, 'Solid':1, 'SolidSolid':2,'Center':3,'TrafficLight':4,'StopSign':5} + with open(self.map_file,'rb') as f: + self.map_infos = pickle.load(f) + + def invert_pose(self, pose): + inv_pose = np.eye(4) + inv_pose[:3, :3] = np.transpose(pose[:3, :3]) + inv_pose[:3, -1] = - inv_pose[:3, :3] @ pose[:3, -1] + return inv_pose + + def prepare_train_data(self, index): + """ + Training data preparation. + Args: + index (int): Index for accessing the target data. + Returns: + dict: Training data dict of the corresponding index. + """ + queue = [] + index_list = list(range(index-self.queue_length*self.sample_interval, index,self.sample_interval)) + random.shuffle(index_list) + index_list = sorted(index_list[1:]) + index_list.append(index) + for i in index_list: + i = max(0, i) + input_dict = self.get_data_info(i) + if input_dict is None: + return None + self.pre_pipeline(input_dict) + example = self.pipeline(input_dict) + if self.filter_empty_gt and \ + (example is None or ~(example['gt_labels_3d']._data != -1).any()): + return None + queue.append(example) + return self.union2one(queue) + + def union2one(self, queue): + imgs_list = [each['img'].data for each in queue] + gt_labels_3d_list = [each['gt_labels_3d'].data for each in queue] + gt_sdc_label_list = [each['gt_sdc_label'].data for each in queue] + gt_inds_list = [to_tensor(each['gt_inds']) for each in queue] + gt_bboxes_3d_list = [each['gt_bboxes_3d'].data for each in queue] + gt_past_traj_list = [to_tensor(each['gt_past_traj']) for each in queue] + gt_past_traj_mask_list = [ to_tensor(each['gt_past_traj_mask']) for each in queue] + gt_sdc_bbox_list = [each['gt_sdc_bbox'].data for each in queue] + l2g_r_mat_list = [to_tensor(each['l2g_r_mat']) for each in queue] + l2g_t_list = [to_tensor(each['l2g_t']) for each in queue] + timestamp_list = [to_tensor(each['timestamp']) for each in queue] + gt_fut_traj = to_tensor(queue[-1]['gt_fut_traj']) + gt_fut_traj_mask = to_tensor(queue[-1]['gt_fut_traj_mask']) + if 'gt_future_boxes' in queue[-1]: + gt_future_boxes_list = queue[-1]['gt_future_boxes'] + else: + gt_future_boxes_list = None + if 'gt_future_labels' in queue[-1]: + gt_future_labels_list = [to_tensor(each) for each in queue[-1]['gt_future_labels']] + else: + gt_future_labels_list = None + + metas_map = {} + prev_scene_token = None + prev_pos = None + prev_angle = None + for i, each in enumerate(queue): + metas_map[i] = each['img_metas'].data + if metas_map[i]['folder'] != prev_scene_token: + metas_map[i]['prev_bev_exists'] = False + prev_scene_token = metas_map[i]['folder'] + prev_pos = copy.deepcopy(metas_map[i]['can_bus'][:3]) + prev_angle = copy.deepcopy(metas_map[i]['can_bus'][-1]) + metas_map[i]['can_bus'][:3] = 0 + metas_map[i]['can_bus'][-1] = 0 + else: + metas_map[i]['prev_bev_exists'] = True + tmp_pos = copy.deepcopy(metas_map[i]['can_bus'][:3]) + tmp_angle = copy.deepcopy(metas_map[i]['can_bus'][-1]) + metas_map[i]['can_bus'][:3] -= prev_pos + metas_map[i]['can_bus'][-1] -= prev_angle + prev_pos = copy.deepcopy(tmp_pos) + prev_angle = copy.deepcopy(tmp_angle) + queue[-1]['img'] = DC(torch.stack(imgs_list), cpu_only=False, stack=True) + queue[-1]['img_metas'] = DC(metas_map, cpu_only=True) + queue = queue[-1] + queue['gt_labels_3d'] = DC(gt_labels_3d_list) + queue['gt_sdc_label'] = DC(gt_sdc_label_list) + queue['gt_inds'] = DC(gt_inds_list) + queue['gt_bboxes_3d'] = DC(gt_bboxes_3d_list, cpu_only=True) + queue['gt_sdc_bbox'] = DC(gt_sdc_bbox_list, cpu_only=True) + queue['l2g_r_mat'] = DC(l2g_r_mat_list) + queue['l2g_t'] = DC(l2g_t_list) + queue['timestamp'] = DC(timestamp_list) + queue['gt_fut_traj'] = DC(gt_fut_traj) + queue['gt_fut_traj_mask'] = DC(gt_fut_traj_mask) + queue['gt_past_traj'] = DC(gt_past_traj_list) + queue['gt_past_traj_mask'] = DC(gt_past_traj_mask_list) + if gt_future_boxes_list is not None: + queue['gt_future_boxes'] = DC(gt_future_boxes_list, cpu_only=True) + if gt_future_labels_list is not None: + queue['gt_future_labels'] = DC(gt_future_labels_list) + + return queue + + def get_data_info(self, index): + """Get data info according to the given index. + + Args: + index (int): Index of the sample data to get. + + Returns: + dict: Data information that will be passed to the data \ + preprocessing pipelines. It includes the following keys: + + - sample_idx (str): Sample index. + - pts_filename (str): Filename of point clouds. + - sweeps (list[dict]): Infos of sweeps. + - timestamp (float): Sample timestamp. + - img_filename (str, optional): Image filename. + - lidar2img (list[np.ndarray], optional): Transformations \ + from lidar to different cameras. + - ann_info (dict): Annotation info. + """ + info = self.data_infos[index] + + for i in range(len(info['gt_names'])): + if info['gt_names'][i] in self.NameMapping.keys(): + info['gt_names'][i] = self.NameMapping[info['gt_names'][i]] + + + gt_masks,gt_labels,gt_bboxes = self.get_map_info(index) + + + input_dict = dict( + folder=info['folder'], + scene_token=info['folder'], + frame_idx=info['frame_idx'], + ego_yaw=np.nan_to_num(info['ego_yaw'],nan=np.pi/2), + ego_translation=info['ego_translation'], + sensors=info['sensors'], + world2lidar=info['sensors']['LIDAR_TOP']['world2lidar'], + gt_ids=info['gt_ids'], + gt_boxes=info['gt_boxes'], + gt_names=info['gt_names'], + ego_vel = info['ego_vel'], + ego_accel = info['ego_accel'], + ego_rotation_rate = info['ego_rotation_rate'], + npc2world = info['npc2world'], + gt_lane_labels=gt_labels, + gt_lane_bboxes=gt_bboxes, + gt_lane_masks=gt_masks, + timestamp=info['frame_idx']/10 + + ) + + if self.modality['use_camera']: + image_paths = [] + lidar2img_rts = [] + lidar2cam_rts = [] + cam_intrinsics = [] + lidar2ego = info['sensors']['LIDAR_TOP']['lidar2ego'] + for sensor_type, cam_info in info['sensors'].items(): + if not 'CAM' in sensor_type: + continue + image_paths.append(osp.join(self.data_root,cam_info['data_path'])) + # obtain lidar to image transformation matrix + cam2ego = cam_info['cam2ego'] + intrinsic = cam_info['intrinsic'] + intrinsic_pad = np.eye(4) + intrinsic_pad[:intrinsic.shape[0], :intrinsic.shape[1]] = intrinsic + lidar2cam = self.invert_pose(cam2ego) @ lidar2ego + lidar2img = intrinsic_pad @ lidar2cam + lidar2img_rts.append(lidar2img) + cam_intrinsics.append(intrinsic_pad) + lidar2cam_rts.append(lidar2cam) + ego2world = np.eye(4) + ego2world[0:3,0:3] = Quaternion(axis=[0, 0, 1], radians=input_dict['ego_yaw']).rotation_matrix + ego2world[0:3,3] = input_dict['ego_translation'] + lidar2global = ego2world @ lidar2ego + input_dict.update( + dict( + img_filename=image_paths, + lidar2img=lidar2img_rts, + cam_intrinsic=cam_intrinsics, + lidar2cam=lidar2cam_rts, + l2g_r_mat=lidar2global[0:3,0:3], + l2g_t=lidar2global[0:3,3] + + )) + + annos = self.get_ann_info(index) + input_dict['ann_info'] = annos + yaw = input_dict['ego_yaw'] + rotation = list(Quaternion(axis=[0, 0, 1], radians=yaw)) + if yaw < 0: + yaw += 2*np.pi + yaw_in_degree = yaw / np.pi * 180 + + can_bus = np.zeros(18) + can_bus[:3] = input_dict['ego_translation'] + can_bus[3:7] = rotation + can_bus[7:10] = input_dict['ego_vel'] + can_bus[10:13] = input_dict['ego_accel'] + can_bus[13:16] = input_dict['ego_rotation_rate'] + can_bus[16] = yaw + can_bus[17] = yaw_in_degree + input_dict['can_bus'] = can_bus + all_frames = [] + for adj_idx in range(index-self.occ_receptive_field+1,index+self.occ_n_future+1): + if adj_idx<0 or adj_idx>=len(self.data_infos): + all_frames.append(-1) + elif self.data_infos[adj_idx]['folder'] != self.data_infos[index]['folder']: + all_frames.append(-1) + else: + all_frames.append(adj_idx) + + future_frames = all_frames[self.occ_receptive_field-1:] + input_dict['occ_has_invalid_frame'] = (-1 in all_frames[:self.occ_only_total_frames]) + input_dict['occ_img_is_valid'] = np.array(all_frames) >= 0 + occ_future_ann_infos = [] + for future_frame in future_frames: + if future_frame >= 0: + occ_future_ann_infos.append( + self.get_ann_boxes_only(future_frame), + ) + else: + occ_future_ann_infos.append(None) + input_dict['occ_future_ann_infos'] = occ_future_ann_infos + + input_dict.update(self.occ_get_transforms(future_frames)) + sdc_planning, sdc_planning_mask = self.get_ego_future_xy(index,self.sample_interval,self.planning_frames) + input_dict['sdc_planning'] = sdc_planning + input_dict['sdc_planning_mask'] = sdc_planning_mask + command = info['command_near'] + if command < 0: + command = 4 + command -= 1 + input_dict['command'] = command + + return input_dict + + + def get_map_info(self, index): + + gt_masks = [] + gt_labels = [] + gt_bboxes = [] + + ann_info = self.data_infos[index] + town_name = ann_info['town_name'] + map_info = self.map_infos[town_name] + lane_points = map_info['lane_points'] + lane_sample_points = map_info['lane_sample_points'] + lane_types = map_info['lane_types'] + trigger_volumes_points = map_info['trigger_volumes_points'] + trigger_volumes_sample_points = map_info['trigger_volumes_sample_points'] + trigger_volumes_types = map_info['trigger_volumes_types'] + world2lidar = np.array(ann_info['sensors']['LIDAR_TOP']['world2lidar']) + ego_xy = np.linalg.inv(world2lidar)[0:2,3] + + #1st search + max_distance = 100 + chosed_idx = [] + for idx in range(len(lane_sample_points)): + single_sample_points = lane_sample_points[idx] + distance = np.linalg.norm((single_sample_points[:,0:2]-ego_xy),axis=-1) + if np.min(distance) < max_distance: + chosed_idx.append(idx) + + for idx in chosed_idx: + if not lane_types[idx] in self.map_element_class.keys(): + continue + points = lane_points[idx] + points = np.concatenate([points,np.ones((points.shape[0],1))],axis=-1) + points_in_ego = (world2lidar @ points.T).T + #print(points_in_ego) + mask = (points_in_ego[:,0]>self.point_cloud_range[0]) & (points_in_ego[:,0]self.point_cloud_range[1]) & (points_in_ego[:,1] 1: + gt_mask = np.zeros(self.bev_size,dtype=np.uint8) + normalized_points = np.zeros_like(points_in_ego_range) + normalized_points[:,0] = (points_in_ego_range[:,0] + self.patch_size[0]/2)*(self.bev_size[0]/self.patch_size[0]) + normalized_points[:,1] = (points_in_ego_range[:,1] + self.patch_size[1]/2)*(self.bev_size[1]/self.patch_size[1]) + cv2.polylines(gt_mask, [normalized_points.astype(np.int32)], False, color=1, thickness=2) + gt_label = self.map_element_class[lane_types[idx]] + gt_masks.append(gt_mask) + gt_labels.append(gt_label) + ys, xs = np.where(gt_mask==1) + gt_bboxes.append([min(xs), min(ys), max(xs), max(ys)]) + + for idx in range(len(trigger_volumes_points)): + if not trigger_volumes_types[idx] in self.map_element_class.keys(): + continue + points = trigger_volumes_points[idx] + points = np.concatenate([points,np.ones((points.shape[0],1))],axis=-1) + points_in_ego = (world2lidar @ points.T).T + mask = (points_in_ego[:,0]>self.point_cloud_range[0]) & (points_in_ego[:,0]self.point_cloud_range[1]) & (points_in_ego[:,1]= -1) + gt_bboxes_3d = info['gt_boxes'][mask] + gt_names_3d = info['gt_names'][mask] + gt_inds = info['gt_ids'] + gt_labels_3d = [] + + for cat in gt_names_3d: + if cat in self.CLASSES: + gt_labels_3d.append(self.CLASSES.index(cat)) + else: + gt_labels_3d.append(-1) + gt_labels_3d = np.array(gt_labels_3d) + if not self.with_velocity: + gt_bboxes_3d = gt_bboxes_3d[:,0:7] + gt_bboxes_3d = LiDARInstance3DBoxes( + gt_bboxes_3d, + box_dim=gt_bboxes_3d.shape[-1], + origin=(0.5, 0.5, 0.5)).convert_to(self.box_mode_3d) + + ego_future_track, ego_future_mask = self.get_ego_future_xy(index,self.sample_interval,self.predict_frames) + past_track, past_mask = self.get_past_or_future_xy(index,self.sample_interval,self.past_frames,past_or_future='past',local_xy=True) + predict_track, predict_mask = self.get_past_or_future_xy(index,self.sample_interval,self.predict_frames,past_or_future='future',local_xy=False) + mask = (past_mask.sum((1,2))>0).astype(np.int) + future_track = predict_track[:,0:self.future_frames,:]*mask[:,None,None] + future_mask = predict_mask[:,0:self.future_frames,:]*mask[:,None,None] + full_past_track = np.concatenate([past_track,future_track],axis=1) + full_past_mask = np.concatenate([past_mask,future_mask],axis=1) + gt_sdc_bbox, gt_sdc_label =self.generate_sdc_info(index) + anns_results = dict( + gt_bboxes_3d=gt_bboxes_3d, + gt_labels_3d=gt_labels_3d, + gt_names=gt_names_3d, + gt_inds=gt_inds, + gt_fut_traj=predict_track, + gt_fut_traj_mask=predict_mask, + gt_past_traj=full_past_track, + gt_past_traj_mask=full_past_mask, + gt_sdc_bbox=gt_sdc_bbox, + gt_sdc_label=gt_sdc_label, + gt_sdc_fut_traj=ego_future_track[:,:,0:2], + gt_sdc_fut_traj_mask=ego_future_mask, + ) + return anns_results + + def get_ann_boxes_only(self, index): + + info = self.data_infos[index] + for i in range(len(info['gt_names'])): + if info['gt_names'][i] in self.NameMapping.keys(): + info['gt_names'][i] = self.NameMapping[info['gt_names'][i]] + gt_bboxes_3d = info['gt_boxes'] + gt_names_3d = info['gt_names'] + gt_inds = info['gt_ids'] + gt_labels_3d = [] + for cat in gt_names_3d: + if cat in self.CLASSES: + gt_labels_3d.append(self.CLASSES.index(cat)) + + else: + gt_labels_3d.append(-1) + gt_labels_3d = np.array(gt_labels_3d) + if not self.with_velocity: + gt_bboxes_3d = gt_bboxes_3d[:,0:7] + gt_bboxes_3d = LiDARInstance3DBoxes( + gt_bboxes_3d, + box_dim=gt_bboxes_3d.shape[-1], + origin=(0.5, 0.5, 0.5)).convert_to(self.box_mode_3d) + boxes_annos = dict( + gt_bboxes_3d=gt_bboxes_3d, + gt_labels_3d=gt_labels_3d, + gt_inds=gt_inds, + ) + return boxes_annos + + def __getitem__(self, idx): + """Get item from infos according to the given index. + Returns: + dict: Data dictionary of the corresponding index. + """ + if self.test_mode: + return self.prepare_test_data(idx) + while True: + + data = self.prepare_train_data(idx) + if data is None: + idx = self._rand_another(idx) + continue + return data + + def generate_sdc_info(self,idx): + + info = self.data_infos[idx] + ego_size = info['ego_size'] + ego_vel = info['ego_vel'] + psudo_sdc_bbox = np.array([0.0, 0.0, 0.0, ego_size[0], ego_size[1], ego_size[2], -np.pi, ego_vel[1], ego_vel[0] ]) + if not self.with_velocity: + psudo_sdc_bbox = psudo_sdc_bbox[0:7] + gt_bboxes_3d = np.array([psudo_sdc_bbox]).astype(np.float32) + gt_names_3d = ['car'] + gt_labels_3d = [] + for cat in gt_names_3d: + if cat in self.CLASSES: + gt_labels_3d.append(self.CLASSES.index(cat)) + else: + gt_labels_3d.append(-1) + gt_labels_3d = np.array(gt_labels_3d) + + # the nuscenes box center is [0.5, 0.5, 0.5], we change it to be + # the same as KITTI (0.5, 0.5, 0) + gt_bboxes_3d = LiDARInstance3DBoxes( + gt_bboxes_3d, + box_dim=gt_bboxes_3d.shape[-1], + origin=(0.5, 0.5, 0.5)).convert_to(self.box_mode_3d) + + gt_labels_3d = DC(to_tensor(gt_labels_3d)) + gt_bboxes_3d = DC(gt_bboxes_3d, cpu_only=True) + + return gt_bboxes_3d, gt_labels_3d + + def get_past_or_future_xy(self,idx,sample_rate,frames,past_or_future,local_xy=False): + + assert past_or_future in ['past','future'] + if past_or_future == 'past': + adj_idx_list = range(idx-sample_rate,idx-(frames+1)*sample_rate,-sample_rate) + else: + adj_idx_list = range(idx+sample_rate,idx+(frames+1)*sample_rate,sample_rate) + + cur_frame = self.data_infos[idx] + box_ids = cur_frame['gt_ids'] + adj_track = np.zeros((len(box_ids),frames,2)) + adj_mask = np.zeros((len(box_ids),frames,2)) + world2lidar_ego_cur = cur_frame['sensors']['LIDAR_TOP']['world2lidar'] + for i in range(len(box_ids)): + box_id = box_ids[i] + cur_box2lidar = world2lidar_ego_cur @ cur_frame['npc2world'][i] + cur_xy = cur_box2lidar[0:2,3] + for j in range(len(adj_idx_list)): + adj_idx = adj_idx_list[j] + if adj_idx <0 or adj_idx>=len(self.data_infos): + break + adj_frame = self.data_infos[adj_idx] + if adj_frame['folder'] != cur_frame ['folder']: + break + if len(np.where(adj_frame['gt_ids']==box_id)[0])==0: + continue + assert len(np.where(adj_frame['gt_ids']==box_id)[0]) == 1 , np.where(adj_frame['gt_ids']==box_id)[0] + adj_idx = np.where(adj_frame['gt_ids']==box_id)[0][0] + adj_box2lidar = world2lidar_ego_cur @ adj_frame['npc2world'][adj_idx] + adj_xy = adj_box2lidar[0:2,3] + if local_xy: + adj_xy -= cur_xy + adj_track[i,j,:] = adj_xy + adj_mask[i,j,:] = 1 + return adj_track, adj_mask + + def get_ego_future_xy(self,idx,sample_rate,frames): + + adj_idx_list = range(idx+sample_rate,idx+(frames+1)*sample_rate,sample_rate) + cur_frame = self.data_infos[idx] + adj_track = np.zeros((1,frames,3)) + adj_mask = np.zeros((1,frames,2)) + world2lidar_ego_cur = cur_frame['sensors']['LIDAR_TOP']['world2lidar'] + for j in range(len(adj_idx_list)): + adj_idx = adj_idx_list[j] + if adj_idx <0 or adj_idx>=len(self.data_infos): + break + adj_frame = self.data_infos[adj_idx] + if adj_frame['folder'] != cur_frame ['folder']: + break + world2lidar_ego_adj = adj_frame['sensors']['LIDAR_TOP']['world2lidar'] + adj2cur_lidar = world2lidar_ego_cur @ np.linalg.inv(world2lidar_ego_adj) + xy = adj2cur_lidar[0:2,3] + yaw = np.arctan2(adj2cur_lidar[1,0],adj2cur_lidar[0,0]) + yaw = -yaw -np.pi + while yaw > np.pi: + yaw -= np.pi*2 + while yaw < -np.pi: + yaw += np.pi*2 + adj_track[0,j,0:2] = xy + adj_track[0,j,2] = yaw + adj_mask[0,j,:] = 1 + + return adj_track, adj_mask + + def occ_get_transforms(self, indices, data_type=torch.float32): + + l2e_r_mats = [] + l2e_t_vecs = [] + e2g_r_mats = [] + e2g_t_vecs = [] + + for index in indices: + if index == -1: + l2e_r_mats.append(None) + l2e_t_vecs.append(None) + e2g_r_mats.append(None) + e2g_t_vecs.append(None) + else: + info = self.data_infos[index] + lidar2ego = info['sensors']['LIDAR_TOP']['lidar2ego'] + l2e_r = lidar2ego[0:3,0:3] + l2e_t = lidar2ego[0:3,3] + ego2global = np.linalg.inv(info['world2ego']) + e2g_r = ego2global[0:3,0:3] + e2g_t = ego2global[0:3,3] + l2e_r_mats.append(torch.tensor(l2e_r).to(data_type)) + l2e_t_vecs.append(torch.tensor(l2e_t).to(data_type)) + e2g_r_mats.append(torch.tensor(e2g_r).to(data_type)) + e2g_t_vecs.append(torch.tensor(e2g_t).to(data_type)) + res = { + 'occ_l2e_r_mats': l2e_r_mats, + 'occ_l2e_t_vecs': l2e_t_vecs, + 'occ_e2g_r_mats': e2g_r_mats, + 'occ_e2g_t_vecs': e2g_t_vecs, + } + + return res + + def evaluate(self, + results, + metric='bbox', + logger=None, + jsonfile_prefix=None, + result_names=['pts_bbox'], + show=False, + out_dir=None, + pipeline=None): + """Evaluation in nuScenes protocol. + + Args: + results (list[dict]): Testing results of the dataset. + metric (str | list[str]): Metrics to be evaluated. + logger (logging.Logger | str | None): Logger used for printing + related information during evaluation. Default: None. + jsonfile_prefix (str | None): The prefix of json files. It includes + the file path and the prefix of filename, e.g., "a/b/prefix". + If not specified, a temp file will be created. Default: None. + show (bool): Whether to visualize. + Default: False. + out_dir (str): Path to save the visualization results. + Default: None. + pipeline (list[dict], optional): raw data loading for showing. + Default: None. + + Returns: + dict[str, float]: Results of each evaluation metric. + """ + + # NOTE:Curremtly we only support evaluation on detection and planning + + result_files, tmp_dir = self.format_results(results['bbox_results'], jsonfile_prefix) + result_path = result_files + with open(result_path) as f: + result_data = json.load(f) + pred_boxes = EvalBoxes.deserialize(result_data['results'], DetectionBox) + meta = result_data['meta'] + + gt_boxes = self.load_gt() + + metric_data_list = DetectionMetricDataList() + for class_name in self.eval_cfg['class_names']: + for dist_th in self.eval_cfg['dist_ths']: + md = accumulate(gt_boxes, pred_boxes, class_name, center_distance, dist_th) + metric_data_list.set(class_name, dist_th, md) + metrics = DetectionMetrics(self.eval_cfg) + + for class_name in self.eval_cfg['class_names']: + # Compute APs. + for dist_th in self.eval_cfg['dist_ths']: + metric_data = metric_data_list[(class_name, dist_th)] + ap = calc_ap(metric_data, self.eval_cfg['min_recall'], self.eval_cfg['min_precision']) + metrics.add_label_ap(class_name, dist_th, ap) + + # Compute TP metrics. + for metric_name in self.eval_cfg['tp_metrics']: + metric_data = metric_data_list[(class_name, self.eval_cfg['dist_th_tp'])] + tp = calc_tp(metric_data, self.eval_cfg['min_recall'], metric_name) + metrics.add_label_tp(class_name, metric_name, tp) + + metrics_summary = metrics.serialize() + metrics_summary['meta'] = meta.copy() + print('mAP: %.4f' % (metrics_summary['mean_ap'])) + err_name_mapping = { + 'trans_err': 'mATE', + 'scale_err': 'mASE', + 'orient_err': 'mAOE', + 'vel_err': 'mAVE', + } + for tp_name, tp_val in metrics_summary['tp_errors'].items(): + print('%s: %.4f' % (err_name_mapping[tp_name], tp_val)) + print('NDS: %.4f' % (metrics_summary['nd_score'])) + #print('Eval time: %.1fs' % metrics_summary['eval_time']) + + # Print per-class metrics. + print() + print('Per-class results:') + print('Object Class\tAP\tATE\tASE\tAOE\tAVE') + class_aps = metrics_summary['mean_dist_aps'] + class_tps = metrics_summary['label_tp_errors'] + for class_name in class_aps.keys(): + print('%s\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f' + % (class_name, class_aps[class_name], + class_tps[class_name]['trans_err'], + class_tps[class_name]['scale_err'], + class_tps[class_name]['orient_err'], + class_tps[class_name]['vel_err'])) + + detail = dict() + metric_prefix = 'bbox_NuScenes' + for name in self.eval_cfg['class_names']: + for k, v in metrics_summary['label_aps'][name].items(): + val = float('{:.4f}'.format(v)) + detail['{}/{}_AP_dist_{}'.format(metric_prefix, name, k)] = val + for k, v in metrics_summary['label_tp_errors'][name].items(): + val = float('{:.4f}'.format(v)) + detail['{}/{}_{}'.format(metric_prefix, name, k)] = val + for k, v in metrics_summary['tp_errors'].items(): + val = float('{:.4f}'.format(v)) + detail['{}/{}'.format(metric_prefix,self.eval_cfg['err_name_maping'][k])] = val + detail['{}/NDS'.format(metric_prefix)] = metrics_summary['nd_score'] + detail['{}/mAP'.format(metric_prefix)] = metrics_summary['mean_ap'] + + if 'planning_results_computed' in results.keys(): + planning_results_computed = results['planning_results_computed'] + planning_tab = PrettyTable() + planning_tab.field_names = [ + "metrics", "0.5s", "1.0s", "1.5s", "2.0s", "2.5s", "3.0s"] + for key in planning_results_computed.keys(): + value = planning_results_computed[key] + row_value = [] + row_value.append(key) + for i in range(len(value)): + row_value.append('%.4f' % float(value[i])) + planning_tab.add_row(row_value) + print(planning_tab) + + + return detail + + def load_gt(self): + all_annotations = EvalBoxes() + for i in range(len(self.data_infos)): + sample_boxes = [] + sample_data = self.data_infos[i] + + gt_boxes = sample_data['gt_boxes'] + + for j in range(gt_boxes.shape[0]): + class_name = self.NameMapping[sample_data['gt_names'][j]] + if not class_name in self.eval_cfg['class_range'].keys(): + continue + range_x, range_y = self.eval_cfg['class_range'][class_name] + if abs(gt_boxes[j,0]) > range_x or abs(gt_boxes[j,1]) > range_y: + continue + sample_boxes.append(DetectionBox( + sample_token=sample_data['folder']+'_'+str(sample_data['frame_idx']), + translation=gt_boxes[j,0:3], + size=gt_boxes[j,3:6], + rotation=list(Quaternion(axis=[0, 0, 1], radians=-gt_boxes[j,6]-np.pi/2)), + velocity=gt_boxes[j,7:9], + num_pts=int(sample_data['num_points'][j]), + detection_name=self.NameMapping[sample_data['gt_names'][j]], + detection_score=-1.0, + attribute_name=self.NameMapping[sample_data['gt_names'][j]] + )) + all_annotations.add_boxes(sample_data['folder']+'_'+str(sample_data['frame_idx']), sample_boxes) + return all_annotations + + def _format_bbox(self, results, jsonfile_prefix=None): + """Convert the results to the standard format. + + Args: + results (list[dict]): Testing results of the dataset. + jsonfile_prefix (str): The prefix of the output jsonfile. + You can specify the output directory/filename by + modifying the jsonfile_prefix. Default: None. + + Returns: + str: Path of the output json file. + """ + + + nusc_annos = {} + mapped_class_names = self.CLASSES + + print('Start to convert detection format...') + for sample_id, det in enumerate(track_iter_progress(results)): + #pdb.set_trace() + annos = [] + box3d = det['boxes_3d'] + scores = det['scores_3d'] + labels = det['labels_3d'] + box_gravity_center = box3d.gravity_center + box_dims = box3d.dims + box_yaw = box3d.yaw.numpy() + box_yaw = -box_yaw - np.pi / 2 + sample_token = self.data_infos[sample_id]['folder'] + '_' + str(self.data_infos[sample_id]['frame_idx']) + + + + for i in range(len(box3d)): + #import pdb;pdb.set_trace() + quat = list(Quaternion(axis=[0, 0, 1], radians=box_yaw[i])) + velocity = [box3d.tensor[i, 7].item(),box3d.tensor[i, 8].item()] + name = mapped_class_names[labels[i]] + nusc_anno = dict( + sample_token=sample_token, + translation=box_gravity_center[i].tolist(), + size=box_dims[i].tolist(), + rotation=quat, + velocity=velocity, + detection_name=name, + detection_score=scores[i].item(), + attribute_name=name) + annos.append(nusc_anno) + nusc_annos[sample_token] = annos + nusc_submissions = { + 'meta': self.modality, + 'results': nusc_annos, + } + + mkdir_or_exist(jsonfile_prefix) + res_path = osp.join(jsonfile_prefix, 'results_nusc.json') + print('Results writes to', res_path) + dump(nusc_submissions, res_path) + return res_path + + def format_results(self, results, jsonfile_prefix=None): + """Format the results to json (standard format for COCO evaluation). + + Args: + results (list[dict]): Testing results of the dataset. + jsonfile_prefix (str | None): The prefix of json files. It includes + the file path and the prefix of filename, e.g., "a/b/prefix". + If not specified, a temp file will be created. Default: None. + + Returns: + tuple: Returns (result_files, tmp_dir), where `result_files` is a \ + dict containing the json filepaths, `tmp_dir` is the temporal \ + directory created for saving json files when \ + `jsonfile_prefix` is not specified. + """ + assert isinstance(results, list), 'results must be a list' + # assert len(results) == len(self), ( + # 'The length of results is not equal to the dataset len: {} != {}'. + # format(len(results), len(self))) + + if jsonfile_prefix is None: + tmp_dir = tempfile.TemporaryDirectory() + jsonfile_prefix = osp.join(tmp_dir.name, 'results') + else: + tmp_dir = None + + if not ('pts_bbox' in results[0] or 'img_bbox' in results[0]): + result_files = self._format_bbox(results, jsonfile_prefix) + else: + # should take the inner dict out of 'pts_bbox' or 'img_bbox' dict + result_files = dict() + for name in results[0]: + print(f'\nFormating bboxes of {name}') + results_ = [out[name] for out in results] + tmp_file_ = osp.join(jsonfile_prefix, name) + result_files.update( + {name: self._format_bbox(results_, tmp_file_)}) + return result_files, tmp_dir + diff --git a/mmcv/datasets/B2D_vad_dataset.py b/mmcv/datasets/B2D_vad_dataset.py new file mode 100644 index 0000000..f32cf25 --- /dev/null +++ b/mmcv/datasets/B2D_vad_dataset.py @@ -0,0 +1,1037 @@ +import copy +import numpy as np +import os +from os import path as osp +import torch +import random +import json, pickle +import tempfile +import cv2 +import pyquaternion +from pyquaternion import Quaternion +import mmcv +from mmcv.datasets import DATASETS +from mmcv.utils import save_tensor +from mmcv.parallel import DataContainer as DC +from mmcv.core.bbox.structures.lidar_box3d import LiDARInstance3DBoxes +from mmcv.fileio.io import load, dump +from mmcv.utils import track_iter_progress, mkdir_or_exist +from mmcv.datasets.pipelines import to_tensor +from .custom_3d import Custom3DDataset +from .pipelines import Compose +from mmcv.datasets.map_utils.struct import LiDARInstanceLines +from shapely.geometry import LineString +from nuscenes.eval.common.utils import quaternion_yaw, Quaternion +from .vad_custom_nuscenes_eval import NuScenesEval_custom +from nuscenes.eval.common.utils import center_distance +import random +from nuscenes.utils.data_classes import Box as NuScenesBox +from mmcv.core.bbox.structures.nuscenes_box import CustomNuscenesBox +from shapely import affinity, ops +from shapely.geometry import LineString, box, MultiPolygon, MultiLineString +from nuscenes.map_expansion.map_api import NuScenesMap, NuScenesMapExplorer +from nuscenes.eval.detection.constants import DETECTION_NAMES +from mmcv.datasets.map_utils.mean_ap import eval_map +from mmcv.datasets.map_utils.mean_ap import format_res_gt_by_classes +from .nuscenes_styled_eval_utils import DetectionMetrics, EvalBoxes, DetectionBox,center_distance,accumulate,DetectionMetricDataList,calc_ap, calc_tp, quaternion_yaw + +@DATASETS.register_module() +class B2D_VAD_Dataset(Custom3DDataset): + + + def __init__(self, queue_length=4, bev_size=(200, 200),overlap_test=False,with_velocity=True,sample_interval=5,name_mapping= None,eval_cfg = None, map_root =None,map_file=None,past_frames=2, future_frames=6,point_cloud_range = [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0] ,polyline_points_num=20,*args, **kwargs): + super().__init__(*args, **kwargs) + self.queue_length = queue_length + self.bev_size = bev_size + self.overlap_test = overlap_test + self.with_velocity = with_velocity + self.NameMapping = name_mapping + self.eval_cfg = eval_cfg + self.sample_interval = sample_interval + self.past_frames = past_frames + self.future_frames = future_frames + self.map_root = map_root + self.map_file = map_file + self.point_cloud_range = np.array(point_cloud_range) + self.polyline_points_num = polyline_points_num + self.map_element_class = {'Broken':0, 'Solid':1, 'SolidSolid':2,'Center':3,'TrafficLight':4,'StopSign':5} + self.MAPCLASSES = list(self.map_element_class.keys()) + self.NUM_MAPCLASSES = len(self.MAPCLASSES) + self.map_eval_use_same_gt_sample_num_flag = True + self.map_ann_file = 'data/infos' + self.eval_cfg = eval_cfg + with open(self.map_file,'rb') as f: + self.map_infos = pickle.load(f) + + def invert_pose(self, pose): + inv_pose = np.eye(4) + inv_pose[:3, :3] = np.transpose(pose[:3, :3]) + inv_pose[:3, -1] = - inv_pose[:3, :3] @ pose[:3, -1] + return inv_pose + + def prepare_train_data(self, index): + """ + Training data preparation. + Args: + index (int): Index for accessing the target data. + Returns: + dict: Training data dict of the corresponding index. + """ + queue = [] + index_list = list(range(index-self.queue_length*self.sample_interval, index,self.sample_interval)) + random.shuffle(index_list) + index_list = sorted(index_list[1:]) + index_list.append(index) + for i in index_list: + i = max(0, i) + input_dict = self.get_data_info(i) + if input_dict is None: + return None + self.pre_pipeline(input_dict) + example = self.pipeline(input_dict) + gt_labels,gt_bboxes = self.get_map_info(index) + example['map_gt_labels_3d'] = DC(gt_labels, cpu_only=False) + example['map_gt_bboxes_3d'] = DC(gt_bboxes, cpu_only=True) + + if self.filter_empty_gt and \ + (example is None or ~(example['gt_labels_3d']._data != -1).any()): + return None + queue.append(example) + return self.union2one(queue) + + + def union2one(self, queue): + imgs_list = [each['img'].data for each in queue] + metas_map = {} + prev_scene_token = None + prev_pos = None + prev_angle = None + for i, each in enumerate(queue): + metas_map[i] = each['img_metas'].data + if metas_map[i]['folder'] != prev_scene_token: + metas_map[i]['prev_bev_exists'] = False + prev_scene_token = metas_map[i]['folder'] + prev_pos = copy.deepcopy(metas_map[i]['can_bus'][:3]) + prev_angle = copy.deepcopy(metas_map[i]['can_bus'][-1]) + metas_map[i]['can_bus'][:3] = 0 + metas_map[i]['can_bus'][-1] = 0 + else: + metas_map[i]['prev_bev_exists'] = True + tmp_pos = copy.deepcopy(metas_map[i]['can_bus'][:3]) + tmp_angle = copy.deepcopy(metas_map[i]['can_bus'][-1]) + metas_map[i]['can_bus'][:3] -= prev_pos + metas_map[i]['can_bus'][-1] -= prev_angle + prev_pos = copy.deepcopy(tmp_pos) + prev_angle = copy.deepcopy(tmp_angle) + queue[-1]['img'] = DC(torch.stack(imgs_list), cpu_only=False, stack=True) + queue[-1]['img_metas'] = DC(metas_map, cpu_only=True) + queue = queue[-1] + return queue + + def get_data_info(self, index): + """Get data info according to the given index. + + Args: + index (int): Index of the sample data to get. + + Returns: + dict: Data information that will be passed to the data \ + preprocessing pipelines. It includes the following keys: + + - sample_idx (str): Sample index. + - pts_filename (str): Filename of point clouds. + - sweeps (list[dict]): Infos of sweeps. + - timestamp (float): Sample timestamp. + - img_filename (str, optional): Image filename. + - lidar2img (list[np.ndarray], optional): Transformations \ + from lidar to different cameras. + - ann_info (dict): Annotation info. + """ + info = self.data_infos[index] + + for i in range(len(info['gt_names'])): + if info['gt_names'][i] in self.NameMapping.keys(): + info['gt_names'][i] = self.NameMapping[info['gt_names'][i]] + + input_dict = dict( + folder=info['folder'], + scene_token=info['folder'], + frame_idx=info['frame_idx'], + ego_yaw=np.nan_to_num(info['ego_yaw'],nan=np.pi/2), + ego_translation=info['ego_translation'], + sensors=info['sensors'], + world2lidar=info['sensors']['LIDAR_TOP']['world2lidar'], + gt_ids=info['gt_ids'], + gt_boxes=info['gt_boxes'], + gt_names=info['gt_names'], + ego_vel = info['ego_vel'], + ego_accel = info['ego_accel'], + ego_rotation_rate = info['ego_rotation_rate'], + npc2world = info['npc2world'], + timestamp=info['frame_idx']/10 + ) + + if self.modality['use_camera']: + image_paths = [] + lidar2img_rts = [] + lidar2cam_rts = [] + cam_intrinsics = [] + lidar2ego = info['sensors']['LIDAR_TOP']['lidar2ego'] + lidar2global = self.invert_pose(info['sensors']['LIDAR_TOP']['world2lidar']) + for sensor_type, cam_info in info['sensors'].items(): + if not 'CAM' in sensor_type: + continue + image_paths.append(osp.join(self.data_root,cam_info['data_path'])) + # obtain lidar to image transformation matrix + cam2ego = cam_info['cam2ego'] + intrinsic = cam_info['intrinsic'] + intrinsic_pad = np.eye(4) + intrinsic_pad[:intrinsic.shape[0], :intrinsic.shape[1]] = intrinsic + lidar2cam = self.invert_pose(cam2ego) @ lidar2ego + lidar2img = intrinsic_pad @ lidar2cam + lidar2img_rts.append(lidar2img) + cam_intrinsics.append(intrinsic_pad) + lidar2cam_rts.append(lidar2cam) + + input_dict.update( + dict( + img_filename=image_paths, + lidar2img=lidar2img_rts, + cam_intrinsic=cam_intrinsics, + lidar2cam=lidar2cam_rts, + l2g_r_mat=lidar2global[0:3,0:3], + l2g_t=lidar2global[0:3,3] + + )) + + #if not self.test_mode: + annos = self.get_ann_info(index) + input_dict['ann_info'] = annos + yaw = input_dict['ego_yaw'] + rotation = list(Quaternion(axis=[0, 0, 1], radians=yaw)) + + if yaw < 0: + yaw += 2*np.pi + yaw_in_degree = yaw / np.pi * 180 + + can_bus = np.zeros(18) + can_bus[:3] = input_dict['ego_translation'] + can_bus[3:7] = rotation + can_bus[7:10] = input_dict['ego_vel'] + can_bus[10:13] = input_dict['ego_accel'] + can_bus[13:16] = input_dict['ego_rotation_rate'] + can_bus[16] = yaw + can_bus[17] = yaw_in_degree + input_dict['can_bus'] = can_bus + ego_lcf_feat = np.zeros(9) + ego_lcf_feat[0:2] = input_dict['ego_translation'][0:2] + ego_lcf_feat[2:4] = input_dict['ego_accel'][2:4] + ego_lcf_feat[4] = input_dict['ego_rotation_rate'][-1] + ego_lcf_feat[5] = info['ego_size'][1] + ego_lcf_feat[6] = info['ego_size'][0] + ego_lcf_feat[7] = np.sqrt(input_dict['ego_translation'][0]**2+input_dict['ego_translation'][1]**2) + ego_lcf_feat[8] = info['steer'] + ego_his_trajs, ego_fut_trajs, ego_fut_masks, command = self.get_ego_trajs(index,self.sample_interval,self.past_frames,self.future_frames) + input_dict['ego_his_trajs'] = ego_his_trajs + input_dict['ego_fut_trajs'] = ego_fut_trajs + input_dict['ego_fut_masks'] = ego_fut_masks + input_dict['ego_fut_cmd'] = command + input_dict['ego_lcf_feat'] = ego_lcf_feat + input_dict['fut_valid_flag'] = (ego_fut_masks==1).all() + + return input_dict + + + def get_map_info(self, index): + + gt_masks = [] + gt_labels = [] + gt_bboxes = [] + + ann_info = self.data_infos[index] + town_name = ann_info['town_name'] + map_info = self.map_infos[town_name] + lane_points = map_info['lane_points'] + lane_sample_points = map_info['lane_sample_points'] + lane_types = map_info['lane_types'] + trigger_volumes_points = map_info['trigger_volumes_points'] + trigger_volumes_sample_points = map_info['trigger_volumes_sample_points'] + trigger_volumes_types = map_info['trigger_volumes_types'] + world2lidar = np.array(ann_info['sensors']['LIDAR_TOP']['world2lidar']) + ego_xy = np.linalg.inv(world2lidar)[0:2,3] + max_distance = 50 + chosed_idx = [] + + for idx in range(len(lane_sample_points)): + single_sample_points = lane_sample_points[idx] + distance = np.linalg.norm((single_sample_points[:,0:2]-ego_xy),axis=-1) + if np.min(distance) < max_distance: + chosed_idx.append(idx) + + polylines = [] + for idx in chosed_idx: + if not lane_types[idx] in self.map_element_class.keys(): + continue + points = lane_points[idx] + points = np.concatenate([points,np.ones((points.shape[0],1))],axis=-1) + points_in_lidar = (world2lidar @ points.T).T + mask = (points_in_lidar[:,0]>self.point_cloud_range[0]) & (points_in_lidar[:,0]self.point_cloud_range[1]) & (points_in_lidar[:,1] 1: + polylines.append(LineString(points_in_lidar_range)) + gt_label = self.map_element_class[lane_types[idx]] + gt_labels.append(gt_label) + + for idx in range(len(trigger_volumes_points)): + if not trigger_volumes_types[idx] in self.map_element_class.keys(): + continue + points = trigger_volumes_points[idx] + points = np.concatenate([points,np.ones((points.shape[0],1))],axis=-1) + points_in_lidar = (world2lidar @ points.T).T + mask = (points_in_lidar[:,0]>self.point_cloud_range[0]) & (points_in_lidar[:,0]self.point_cloud_range[1]) & (points_in_lidar[:,1]=len(self.data_infos): + break + adj_frame = self.data_infos[adj_idx] + if adj_frame['folder'] != cur_frame ['folder']: + break + world2lidar_ego_adj = adj_frame['sensors']['LIDAR_TOP']['world2lidar'] + adj2cur_lidar = world2lidar_lidar_cur @ np.linalg.inv(world2lidar_ego_adj) + xy = adj2cur_lidar[0:2,3] + full_adj_track[j,0:2] = xy + full_adj_adj_mask[j] = 1 + offset_track = full_adj_track[1:] - full_adj_track[:-1] + for j in range(past_frames-1,-1,-1): + if full_adj_adj_mask[j] == 0: + offset_track[j] = offset_track[j+1] + for j in range(past_frames,past_frames+future_frames,1): + + if full_adj_adj_mask[j+1] == 0 : + offset_track[j] = 0 + command = self.command2hot(cur_frame['command_near']) + return offset_track[:past_frames].copy(), offset_track[past_frames:].copy(), full_adj_adj_mask[-future_frames:].copy(), command + + def command2hot(self,command,max_dim=6): + if command < 0: + command = 4 + command -= 1 + cmd_one_hot = np.zeros(max_dim) + cmd_one_hot[command] = 1 + return cmd_one_hot + + def get_box_attr_labels(self,idx,sample_rate,frames): + + + adj_idx_list = range(idx,idx+(frames+1)*sample_rate,sample_rate) + cur_frame = self.data_infos[idx] + cur_box_names = cur_frame['gt_names'] + for i in range(len(cur_box_names)): + if cur_box_names[i] in self.NameMapping.keys(): + cur_box_names[i] = self.NameMapping[cur_box_names[i]] + cur_boxes = cur_frame['gt_boxes'].copy() + box_ids = cur_frame['gt_ids'] + future_track = np.zeros((len(box_ids),frames+1,2)) + future_mask = np.zeros((len(box_ids),frames+1)) + future_yaw = np.zeros((len(box_ids),frames+1)) + gt_fut_goal = np.zeros((len(box_ids),1)) + agent_lcf_feat = np.zeros((len(box_ids),9)) + world2lidar_lidar_cur = cur_frame['sensors']['LIDAR_TOP']['world2lidar'] + for i in range(len(box_ids)): + agent_lcf_feat[i,0:2] = cur_boxes[i,0:2] + agent_lcf_feat[i,2] = cur_boxes[i,6] + agent_lcf_feat[i,3:5] = cur_boxes[i,7:] + agent_lcf_feat[i,5:8] = cur_boxes[i,3:6] + cur_box_name = cur_box_names[i] + if cur_box_name in self.CLASSES: + agent_lcf_feat[i, 8] = self.CLASSES.index(cur_box_name) + else: + agent_lcf_feat[i, 8] = -1 + + box_id = box_ids[i] + cur_box2lidar = world2lidar_lidar_cur @ cur_frame['npc2world'][i] + cur_xy = cur_box2lidar[0:2,3] + for j in range(len(adj_idx_list)): + adj_idx = adj_idx_list[j] + if adj_idx <0 or adj_idx>=len(self.data_infos): + break + adj_frame = self.data_infos[adj_idx] + if adj_frame['folder'] != cur_frame ['folder']: + break + if len(np.where(adj_frame['gt_ids']==box_id)[0])==0: + continue + assert len(np.where(adj_frame['gt_ids']==box_id)[0]) == 1 , np.where(adj_frame['gt_ids']==box_id)[0] + adj_idx = np.where(adj_frame['gt_ids']==box_id)[0][0] + adj_box2lidar = world2lidar_lidar_cur @ adj_frame['npc2world'][adj_idx] + adj_xy = adj_box2lidar[0:2,3] + future_track[i,j,:] = adj_xy + future_mask[i,j] = 1 + future_yaw[i,j] = np.arctan2(adj_box2lidar[1,0],adj_box2lidar[0,0]) + + coord_diff = future_track[i,-1] - future_track[i,0] + if coord_diff.max() < 1.0: # static + gt_fut_goal[i] = 9 + else: + box_mot_yaw = np.arctan2(coord_diff[1], coord_diff[0]) + np.pi + gt_fut_goal[i] = box_mot_yaw // (np.pi / 4) # 0-8: goal direction class + + future_track_offset = future_track[:,1:,:] - future_track[:,:-1,:] + future_mask_offset = future_mask[:,1:] + future_track_offset[future_mask_offset==0] = 0 + future_yaw_offset = future_yaw[:,1:] - future_yaw[:,:-1] + mask1 = np.where(future_yaw_offset>np.pi) + mask2 = np.where(future_yaw_offset<-np.pi) + future_yaw_offset[mask1] -=np.pi*2 + future_yaw_offset[mask2] +=np.pi*2 + attr_labels = np.concatenate([future_track_offset.reshape(-1,frames*2), future_mask_offset, gt_fut_goal, agent_lcf_feat, future_yaw_offset],axis=-1).astype(np.float32) + return attr_labels.copy() + + + + def load_gt(self): + all_annotations = EvalBoxes() + for i in range(len(self.data_infos)): + sample_boxes = [] + sample_data = self.data_infos[i] + gt_boxes = sample_data['gt_boxes'] + for j in range(gt_boxes.shape[0]): + class_name = self.NameMapping[sample_data['gt_names'][j]] + if not class_name in self.eval_cfg['class_range'].keys(): + continue + range_x, range_y = self.eval_cfg['class_range'][class_name] + if abs(gt_boxes[j,0]) > range_x or abs(gt_boxes[j,1]) > range_y: + continue + sample_boxes.append(DetectionBox( + sample_token=sample_data['folder']+'_'+str(sample_data['frame_idx']), + translation=gt_boxes[j,0:3], + size=gt_boxes[j,3:6], + rotation=list(Quaternion(axis=[0, 0, 1], radians=-gt_boxes[j,6]-np.pi/2)), + velocity=gt_boxes[j,7:9], + num_pts=int(sample_data['num_points'][j]), + detection_name=class_name, + detection_score=-1.0, + attribute_name=class_name + )) + all_annotations.add_boxes(sample_data['folder']+'_'+str(sample_data['frame_idx']), sample_boxes) + return all_annotations + + + + def _format_gt(self): + gt_annos = [] + print('Start to convert gt map format...') + # assert self.map_ann_file is not None + if (not os.path.exists(self.map_ann_file)) : + dataset_length = len(self) + prog_bar = mmcv.ProgressBar(dataset_length) + mapped_class_names = self.MAPCLASSES + for sample_id in range(dataset_length): + sample_token = self.data_infos[sample_id]['folder'] + '_' + str(self.data_infos[sample_id]['frame_idx']) + gt_anno = {} + gt_anno['sample_token'] = sample_token + # gt_sample_annos = [] + gt_sample_dict = {} + gt_labels , gt_bboxes = self.get_map_info(sample_id) + gt_vecs = gt_bboxes.instance_list + gt_vec_list = [] + for i, (gt_label, gt_vec) in enumerate(zip(gt_labels, gt_vecs)): + name = mapped_class_names[gt_label] + anno = dict( + pts=np.array(list(gt_vec.coords)), + pts_num=len(list(gt_vec.coords)), + cls_name=name, + type=gt_label, + ) + gt_vec_list.append(anno) + gt_anno['vectors']=gt_vec_list + gt_annos.append(gt_anno) + + prog_bar.update() + nusc_submissions = { + 'GTs': gt_annos + } + print('\n GT anns writes to', self.map_ann_file) + dump(nusc_submissions, self.map_ann_file) + else: + print(f'{self.map_ann_file} exist, not update') + + + def _format_bbox(self, results, jsonfile_prefix=None, score_thresh=0.2): + """Convert the results to the standard format. + + Args: + results (list[dict]): Testing results of the dataset. + jsonfile_prefix (str): The prefix of the output jsonfile. + You can specify the output directory/filename by + modifying the jsonfile_prefix. Default: None. + + Returns: + str: Path of the output json file. + """ + + nusc_annos = {} + det_mapped_class_names = self.CLASSES + # assert self.map_ann_file is not None + map_pred_annos = {} + map_mapped_class_names = self.MAPCLASSES + plan_annos = {} + print('Start to convert detection format...') + for sample_id, det in enumerate(track_iter_progress(results)): + #pdb.set_trace() + annos = [] + box3d = det['boxes_3d'] + scores = det['scores_3d'] + labels = det['labels_3d'] + box_gravity_center = box3d.gravity_center + box_dims = box3d.dims + box_yaw = box3d.yaw.numpy() + box_yaw = -box_yaw - np.pi / 2 + sample_token = self.data_infos[sample_id]['folder'] + '_' + str(self.data_infos[sample_id]['frame_idx']) + for i in range(len(box3d)): + #import pdb;pdb.set_trace() + if scores[i] < score_thresh: + continue + quat = list(Quaternion(axis=[0, 0, 1], radians=box_yaw[i])) + velocity = [box3d.tensor[i, 7].item(),box3d.tensor[i, 8].item()] + name = det_mapped_class_names[labels[i]] + nusc_anno = dict( + sample_token=sample_token, + translation=box_gravity_center[i].tolist(), + size=box_dims[i].tolist(), + rotation=quat, + velocity=velocity, + detection_name=name, + detection_score=scores[i].item(), + attribute_name=name) + annos.append(nusc_anno) + nusc_annos[sample_token] = annos + map_pred_anno = {} + vecs = output_to_vecs(det) + sample_token = self.data_infos[sample_id]['folder'] + '_' + str(self.data_infos[sample_id]['frame_idx']) + map_pred_anno['sample_token'] = sample_token + pred_vec_list=[] + for i, vec in enumerate(vecs): + name = map_mapped_class_names[vec['label']] + anno = dict( + # sample_token=sample_token, + pts=vec['pts'], + pts_num=len(vec['pts']), + cls_name=name, + type=vec['label'], + confidence_level=vec['score']) + pred_vec_list.append(anno) + # annos.append(nusc_anno) + # nusc_annos[sample_token] = annos + map_pred_anno['vectors'] = pred_vec_list + map_pred_annos[sample_token] = map_pred_anno + + # NOTE: Eval on map is VERY SLOW for the first time(about 3 hours) because load map ground trurh is slow. + # So we do not eval map by default. + # if not os.path.exists(self.map_ann_file): + # self._format_gt() + # else: + # print(f'{self.map_ann_file} exist, not update') + # with open(self.map_ann_file,'r') as f: + # GT_anns = json.load(f) + # gt_annos = GT_anns['GTs'] + + nusc_submissions = { + 'meta': self.modality, + 'results': nusc_annos, + 'map_results': map_pred_annos, + 'plan_results': plan_annos + # 'GTs': gt_annos + } + + mmcv.mkdir_or_exist(jsonfile_prefix) + + res_path = osp.join(jsonfile_prefix, 'results_nusc.json') + print('Results writes to', res_path) + dump(nusc_submissions, res_path) + return res_path + + def format_results(self, results, jsonfile_prefix=None): + """Format the results to json (standard format for COCO evaluation). + + Args: + results (list[dict]): Testing results of the dataset. + jsonfile_prefix (str | None): The prefix of json files. It includes + the file path and the prefix of filename, e.g., "a/b/prefix". + If not specified, a temp file will be created. Default: None. + + Returns: + tuple: Returns (result_files, tmp_dir), where `result_files` is a \ + dict containing the json filepaths, `tmp_dir` is the temporal \ + directory created for saving json files when \ + `jsonfile_prefix` is not specified. + """ + if isinstance(results, dict): + # print(f'results must be a list, but get dict, keys={results.keys()}') + # assert isinstance(results, list) + results = results['bbox_results'] + assert isinstance(results, list) + # assert len(results) == len(self), ( + # 'The length of results is not equal to the dataset len: {} != {}'. + # format(len(results), len(self))) + + if jsonfile_prefix is None: + tmp_dir = tempfile.TemporaryDirectory() + jsonfile_prefix = osp.join(tmp_dir.name, 'results') + else: + tmp_dir = None + + # currently the output prediction results could be in two formats + # 1. list of dict('boxes_3d': ..., 'scores_3d': ..., 'labels_3d': ...) + # 2. list of dict('pts_bbox' or 'img_bbox': + # dict('boxes_3d': ..., 'scores_3d': ..., 'labels_3d': ...)) + # this is a workaround to enable evaluation of both formats on nuScenes + # refer to https://github.com/open-mmlab/mmdetection3d/issues/449 + if not ('pts_bbox' in results[0] or 'img_bbox' in results[0]): + result_files = self._format_bbox(results, jsonfile_prefix) + else: + # should take the inner dict out of 'pts_bbox' or 'img_bbox' dict + result_files = dict() + for name in results[0]: + if name == 'metric_results': + continue + print(f'\nFormating bboxes of {name}') + results_ = [out[name] for out in results] + tmp_file_ = osp.join(jsonfile_prefix, name) + result_files.update( + {name: self._format_bbox(results_, tmp_file_)}) + return result_files, tmp_dir + + def _evaluate_single(self, + result_path, + logger=None, + metric='bbox', + map_metric='chamfer', + result_name='pts_bbox'): + """Evaluation for a single model in nuScenes protocol. + + Args: + result_path (str): Path of the result file. + logger (logging.Logger | str | None): Logger used for printing + related information during evaluation. Default: None. + metric (str): Metric name used for evaluation. Default: 'bbox'. + result_name (str): Result name in the metric prefix. + Default: 'pts_bbox'. + + Returns: + dict: Dictionary of evaluation details. + """ + detail = dict() + with open(result_path,'r') as f: + result_data = json.load(f) + pred_boxes = EvalBoxes.deserialize(result_data['results'], DetectionBox) + meta = result_data['meta'] + + + + gt_boxes = self.load_gt() + + metric_data_list = DetectionMetricDataList() + for class_name in self.eval_cfg['class_names']: + for dist_th in self.eval_cfg['dist_ths']: + md = accumulate(gt_boxes, pred_boxes, class_name, center_distance, dist_th) + metric_data_list.set(class_name, dist_th, md) + metrics = DetectionMetrics(self.eval_cfg) + + for class_name in self.eval_cfg['class_names']: + # Compute APs. + for dist_th in self.eval_cfg['dist_ths']: + metric_data = metric_data_list[(class_name, dist_th)] + ap = calc_ap(metric_data, self.eval_cfg['min_recall'], self.eval_cfg['min_precision']) + metrics.add_label_ap(class_name, dist_th, ap) + + # Compute TP metrics. + for metric_name in self.eval_cfg['tp_metrics']: + metric_data = metric_data_list[(class_name, self.eval_cfg['dist_th_tp'])] + tp = calc_tp(metric_data, self.eval_cfg['min_recall'], metric_name) + metrics.add_label_tp(class_name, metric_name, tp) + + metrics_summary = metrics.serialize() + metrics_summary['meta'] = meta.copy() + print('mAP: %.4f' % (metrics_summary['mean_ap'])) + err_name_mapping = { + 'trans_err': 'mATE', + 'scale_err': 'mASE', + 'orient_err': 'mAOE', + 'vel_err': 'mAVE', + } + for tp_name, tp_val in metrics_summary['tp_errors'].items(): + print('%s: %.4f' % (err_name_mapping[tp_name], tp_val)) + print('NDS: %.4f' % (metrics_summary['nd_score'])) + #print('Eval time: %.1fs' % metrics_summary['eval_time']) + + # Print per-class metrics. + print() + print('Per-class results:') + print('Object Class\tAP\tATE\tASE\tAOE\tAVE') + class_aps = metrics_summary['mean_dist_aps'] + class_tps = metrics_summary['label_tp_errors'] + for class_name in class_aps.keys(): + print('%s\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f' + % (class_name, class_aps[class_name], + class_tps[class_name]['trans_err'], + class_tps[class_name]['scale_err'], + class_tps[class_name]['orient_err'], + class_tps[class_name]['vel_err'])) + + detail = dict() + metric_prefix = 'bbox_NuScenes' + for name in self.eval_cfg['class_names']: + for k, v in metrics_summary['label_aps'][name].items(): + val = float('{:.4f}'.format(v)) + detail['{}/{}_AP_dist_{}'.format(metric_prefix, name, k)] = val + for k, v in metrics_summary['label_tp_errors'][name].items(): + val = float('{:.4f}'.format(v)) + detail['{}/{}_{}'.format(metric_prefix, name, k)] = val + for k, v in metrics_summary['tp_errors'].items(): + val = float('{:.4f}'.format(v)) + detail['{}/{}'.format(metric_prefix,self.eval_cfg['err_name_maping'][k])] = val + detail['{}/NDS'.format(metric_prefix)] = metrics_summary['nd_score'] + detail['{}/mAP'.format(metric_prefix)] = metrics_summary['mean_ap'] + + + # from mmcv.datasets.map_utils.mean_ap import eval_map + # from mmcv.datasets.map_utils.mean_ap import format_res_gt_by_classes + # result_path = osp.abspath(result_path) + + # print('Formating results & gts by classes') + # pred_results = load(result_path) + # map_results = pred_results['map_results'] + # gt_anns = load(self.map_ann_file) + # map_annotations = gt_anns['GTs'] + # cls_gens, cls_gts = format_res_gt_by_classes(result_path, + # map_results, + # map_annotations, + # cls_names=self.MAPCLASSES, + # num_pred_pts_per_instance=self.polyline_points_num, + # eval_use_same_gt_sample_num_flag=self.map_eval_use_same_gt_sample_num_flag, + # pc_range=self.point_cloud_range) + # map_metrics = map_metric if isinstance(map_metric, list) else [map_metric] + # allowed_metrics = ['chamfer', 'iou'] + # for metric in map_metrics: + # if metric not in allowed_metrics: + # raise KeyError(f'metric {metric} is not supported') + # for metric in map_metrics: + # print('-*'*10+f'use metric:{metric}'+'-*'*10) + # if metric == 'chamfer': + # thresholds = [0.5,1.0,1.5] + # elif metric == 'iou': + # thresholds= np.linspace(.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True) + # cls_aps = np.zeros((len(thresholds),self.NUM_MAPCLASSES)) + # for i, thr in enumerate(thresholds): + # print('-*'*10+f'threshhold:{thr}'+'-*'*10) + # mAP, cls_ap = eval_map( + # map_results, + # map_annotations, + # cls_gens, + # cls_gts, + # threshold=thr, + # cls_names=self.MAPCLASSES, + # logger=logger, + # num_pred_pts_per_instance=self.polyline_points_num, + # pc_range=self.point_cloud_range, + # metric=metric) + # for j in range(self.NUM_MAPCLASSES): + # cls_aps[i, j] = cls_ap[j]['ap'] + # for i, name in enumerate(self.MAPCLASSES): + # print('{}: {}'.format(name, cls_aps.mean(0)[i])) + # detail['NuscMap_{}/{}_AP'.format(metric,name)] = cls_aps.mean(0)[i] + # print('map: {}'.format(cls_aps.mean(0).mean())) + # detail['NuscMap_{}/mAP'.format(metric)] = cls_aps.mean(0).mean() + # for i, name in enumerate(self.MAPCLASSES): + # for j, thr in enumerate(thresholds): + # if metric == 'chamfer': + # detail['NuscMap_{}/{}_AP_thr_{}'.format(metric,name,thr)]=cls_aps[j][i] + # elif metric == 'iou': + # if thr == 0.5 or thr == 0.75: + # detail['NuscMap_{}/{}_AP_thr_{}'.format(metric,name,thr)]=cls_aps[j][i] + + return detail + + def evaluate(self, + results, + metric='bbox', + map_metric='chamfer', + logger=None, + jsonfile_prefix=None, + result_names=['pts_bbox'], + show=False, + out_dir=None, + pipeline=None): + """Evaluation in nuScenes protocol. + + Args: + results (list[dict]): Testing results of the dataset. + metric (str | list[str]): Metrics to be evaluated. + logger (logging.Logger | str | None): Logger used for printing + related information during evaluation. Default: None. + jsonfile_prefix (str | None): The prefix of json files. It includes + the file path and the prefix of filename, e.g., "a/b/prefix". + If not specified, a temp file will be created. Default: None. + show (bool): Whether to visualize. + Default: False. + out_dir (str): Path to save the visualization results. + Default: None. + pipeline (list[dict], optional): raw data loading for showing. + Default: None. + + Returns: + dict[str, float]: Results of each evaluation metric. + """ + result_metric_names = ['EPA', 'ADE', 'FDE', 'MR'] + motion_cls_names = ['car', 'pedestrian'] + motion_metric_names = ['gt', 'cnt_ade', 'cnt_fde', 'hit', + 'fp', 'ADE', 'FDE', 'MR'] + all_metric_dict = {} + for met in motion_metric_names: + for cls in motion_cls_names: + all_metric_dict[met+'_'+cls] = 0.0 + result_dict = {} + for met in result_metric_names: + for cls in motion_cls_names: + result_dict[met+'_'+cls] = 0.0 + + alpha = 0.5 + + for i in range(len(results)): + for key in all_metric_dict.keys(): + all_metric_dict[key] += results[i]['metric_results'][key] + + for cls in motion_cls_names: + result_dict['EPA_'+cls] = (all_metric_dict['hit_'+cls] - \ + alpha * all_metric_dict['fp_'+cls]) / all_metric_dict['gt_'+cls] + result_dict['ADE_'+cls] = all_metric_dict['ADE_'+cls] / all_metric_dict['cnt_ade_'+cls] + result_dict['FDE_'+cls] = all_metric_dict['FDE_'+cls] / all_metric_dict['cnt_fde_'+cls] + result_dict['MR_'+cls] = all_metric_dict['MR_'+cls] / all_metric_dict['cnt_fde_'+cls] + + print('\n') + print('-------------- Motion Prediction --------------') + for k, v in result_dict.items(): + print(f'{k}: {v}') + + # NOTE: print planning metric + print('\n') + print('-------------- Planning --------------') + metric_dict = None + num_valid = 0 + for res in results: + if res['metric_results']['fut_valid_flag']: + num_valid += 1 + else: + continue + if metric_dict is None: + metric_dict = copy.deepcopy(res['metric_results']) + else: + for k in res['metric_results'].keys(): + metric_dict[k] += res['metric_results'][k] + + for k in metric_dict: + metric_dict[k] = metric_dict[k] / num_valid + print("{}:{}".format(k, metric_dict[k])) + + result_files, tmp_dir = self.format_results(results, jsonfile_prefix) + + if isinstance(result_files, dict): + results_dict = dict() + for name in result_names: + print('Evaluating bboxes of {}'.format(name)) + ret_dict = self._evaluate_single(result_files[name], metric=metric, map_metric=map_metric) + results_dict.update(ret_dict) + elif isinstance(result_files, str): + results_dict = self._evaluate_single(result_files, metric=metric, map_metric=map_metric) + + if tmp_dir is not None: + tmp_dir.cleanup() + + if show: + self.show(results, out_dir, pipeline=pipeline) + return results_dict + +def output_to_nusc_box(detection): + """Convert the output to the box class in the nuScenes. + + Args: + detection (dict): Detection results. + + - boxes_3d (:obj:`BaseInstance3DBoxes`): Detection bbox. + - scores_3d (torch.Tensor): Detection scores. + - labels_3d (torch.Tensor): Predicted box labels. + + Returns: + list[:obj:`NuScenesBox`]: List of standard NuScenesBoxes. + """ + box3d = detection['boxes_3d'] + scores = detection['scores_3d'].numpy() + labels = detection['labels_3d'].numpy() + trajs = detection['trajs_3d'].numpy() + + + box_gravity_center = box3d.gravity_center.numpy() + box_dims = box3d.dims.numpy() + box_yaw = box3d.yaw.numpy() + # TODO: check whether this is necessary + # with dir_offset & dir_limit in the head + box_yaw = -box_yaw - np.pi / 2 + + box_list = [] + for i in range(len(box3d)): + quat = pyquaternion.Quaternion(axis=[0, 0, 1], radians=box_yaw[i]) + velocity = (*box3d.tensor[i, 7:9], 0.0) + # velo_val = np.linalg.norm(box3d[i, 7:9]) + # velo_ori = box3d[i, 6] + # velocity = ( + # velo_val * np.cos(velo_ori), velo_val * np.sin(velo_ori), 0.0) + box = CustomNuscenesBox( + center=box_gravity_center[i], + size=box_dims[i], + orientation=quat, + fut_trajs=trajs[i], + label=labels[i], + score=scores[i], + velocity=velocity) + box_list.append(box) + return box_list + + +def lidar_nusc_box_to_global(info, + boxes, + classes, + eval_configs, + eval_version='detection_cvpr_2019'): + """Convert the box from ego to global coordinate. + + Args: + info (dict): Info for a specific sample data, including the + calibration information. + boxes (list[:obj:`NuScenesBox`]): List of predicted NuScenesBoxes. + classes (list[str]): Mapped classes in the evaluation. + eval_configs (object): Evaluation configuration object. + eval_version (str): Evaluation version. + Default: 'detection_cvpr_2019' + + Returns: + list: List of standard NuScenesBoxes in the global + coordinate. + """ + box_list = [] + for box in boxes: + # Move box to ego vehicle coord system + box.rotate(pyquaternion.Quaternion(info['lidar2ego_rotation'])) + box.translate(np.array(info['lidar2ego_translation'])) + # filter det in ego. + cls_range_x_map = eval_configs.class_range_x + cls_range_y_map = eval_configs.class_range_y + x_distance, y_distance = box.center[0], box.center[1] + det_range_x = cls_range_x_map[classes[box.label]] + det_range_y = cls_range_y_map[classes[box.label]] + if abs(x_distance) > det_range_x or abs(y_distance) > det_range_y: + continue + # Move box to global coord system + box.rotate(pyquaternion.Quaternion(info['ego2global_rotation'])) + box.translate(np.array(info['ego2global_translation'])) + box_list.append(box) + return box_list + +def output_to_vecs(detection): + box3d = detection['map_boxes_3d'].numpy() + scores = detection['map_scores_3d'].numpy() + labels = detection['map_labels_3d'].numpy() + pts = detection['map_pts_3d'].numpy() + + vec_list = [] + # import pdb;pdb.set_trace() + for i in range(box3d.shape[0]): + vec = dict( + bbox = box3d[i], # xyxy + label=labels[i], + score=scores[i], + pts=pts[i], + ) + vec_list.append(vec) + return vec_list \ No newline at end of file diff --git a/mmcv/datasets/__init__.py b/mmcv/datasets/__init__.py new file mode 100644 index 0000000..a0093d3 --- /dev/null +++ b/mmcv/datasets/__init__.py @@ -0,0 +1,15 @@ +from .builder import DATASETS, PIPELINES, build_dataloader, build_dataset +from .custom_3d import Custom3DDataset +from .custom import CustomDataset +from .nuscenes_dataset import NuScenesDataset +from .nuscenes_e2e_dataset import NuScenesE2EDataset +from .samplers import DistributedGroupSampler, DistributedSampler, GroupSampler +from .utils import replace_ImageToTensor +from .custom_nuscenes_dataset_v2 import CustomNuScenesDatasetV2 +from .custom_nuscenes_dataset import CustomNuScenesDataset +from .dd3d_nuscenes_dataset import DD3DNuscenesDataset +from .lyft_dataset import LyftDataset +from .B2D_dataset import B2D_Dataset +from .B2D_e2e_dataset import B2D_E2E_Dataset +from .nuscenes_vad_dataset import VADCustomNuScenesDataset +from .B2D_vad_dataset import B2D_VAD_Dataset \ No newline at end of file diff --git a/mmcv/datasets/api_wrappers/__init__.py b/mmcv/datasets/api_wrappers/__init__.py new file mode 100644 index 0000000..05f95c9 --- /dev/null +++ b/mmcv/datasets/api_wrappers/__init__.py @@ -0,0 +1,3 @@ +from .coco_api import COCO, COCOeval + +__all__ = ['COCO', 'COCOeval'] diff --git a/mmcv/datasets/api_wrappers/coco_api.py b/mmcv/datasets/api_wrappers/coco_api.py new file mode 100644 index 0000000..57077f9 --- /dev/null +++ b/mmcv/datasets/api_wrappers/coco_api.py @@ -0,0 +1,46 @@ +# This file add snake case alias for coco api + +import warnings + +import pycocotools +from pycocotools.coco import COCO as _COCO +from pycocotools.cocoeval import COCOeval as _COCOeval + + +class COCO(_COCO): + """This class is almost the same as official pycocotools package. + + It implements some snake case function aliases. So that the COCO class has + the same interface as LVIS class. + """ + + def __init__(self, annotation_file=None): + if getattr(pycocotools, '__version__', '0') >= '12.0.2': + warnings.warn( + 'mmpycocotools is deprecated. Please install official pycocotools by "pip install pycocotools"', # noqa: E501 + UserWarning) + super().__init__(annotation_file=annotation_file) + self.img_ann_map = self.imgToAnns + self.cat_img_map = self.catToImgs + + def get_ann_ids(self, img_ids=[], cat_ids=[], area_rng=[], iscrowd=None): + return self.getAnnIds(img_ids, cat_ids, area_rng, iscrowd) + + def get_cat_ids(self, cat_names=[], sup_names=[], cat_ids=[]): + return self.getCatIds(cat_names, sup_names, cat_ids) + + def get_img_ids(self, img_ids=[], cat_ids=[]): + return self.getImgIds(img_ids, cat_ids) + + def load_anns(self, ids): + return self.loadAnns(ids) + + def load_cats(self, ids): + return self.loadCats(ids) + + def load_imgs(self, ids): + return self.loadImgs(ids) + + +# just for the ease of import +COCOeval = _COCOeval diff --git a/mmcv/datasets/builder.py b/mmcv/datasets/builder.py new file mode 100644 index 0000000..7f527d6 --- /dev/null +++ b/mmcv/datasets/builder.py @@ -0,0 +1,204 @@ +import copy +import platform +import random +from functools import partial + +import numpy as np +from mmcv.parallel import collate +from mmcv.utils import Registry, build_from_cfg, get_dist_info +from torch.utils.data import DataLoader + +# DATASETS = Registry('dataset') +# PIPELINES = Registry('pipeline') +# OBJECTSAMPLERS = Registry('Object sampler') + +from .samplers import DistributedGroupSampler, DistributedSampler, GroupSampler +# from .dataset_wrappers import CBGSDataset, ClassBalancedDataset, ConcatDataset, RepeatDataset +from .samplers.sampler import build_sampler + +if platform.system() != 'Windows': + # https://github.com/pytorch/pytorch/issues/973 + import resource + rlimit = resource.getrlimit(resource.RLIMIT_NOFILE) + hard_limit = rlimit[1] + soft_limit = min(4096, hard_limit) + resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit)) + +DATASETS = Registry('dataset') +PIPELINES = Registry('pipeline') +OBJECTSAMPLERS = Registry('Object sampler') + + + +def _concat_dataset(cfg, default_args=None): + from .dataset_wrappers import ConcatDataset + ann_files = cfg['ann_file'] + img_prefixes = cfg.get('img_prefix', None) + seg_prefixes = cfg.get('seg_prefix', None) + proposal_files = cfg.get('proposal_file', None) + separate_eval = cfg.get('separate_eval', True) + + datasets = [] + num_dset = len(ann_files) + for i in range(num_dset): + data_cfg = copy.deepcopy(cfg) + # pop 'separate_eval' since it is not a valid key for common datasets. + if 'separate_eval' in data_cfg: + data_cfg.pop('separate_eval') + data_cfg['ann_file'] = ann_files[i] + if isinstance(img_prefixes, (list, tuple)): + data_cfg['img_prefix'] = img_prefixes[i] + if isinstance(seg_prefixes, (list, tuple)): + data_cfg['seg_prefix'] = seg_prefixes[i] + if isinstance(proposal_files, (list, tuple)): + data_cfg['proposal_file'] = proposal_files[i] + datasets.append(build_dataset(data_cfg, default_args)) + + return ConcatDataset(datasets, separate_eval) + + + + +def build_dataset(cfg, default_args=None): + from mmcv.datasets.dataset_wrappers import CBGSDataset + from mmcv.datasets.dataset_wrappers import (ClassBalancedDataset, + ConcatDataset, RepeatDataset) + if isinstance(cfg, (list, tuple)): + dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg]) + elif cfg['type'] == 'ConcatDataset': + dataset = ConcatDataset( + [build_dataset(c, default_args) for c in cfg['datasets']], + cfg.get('separate_eval', True)) + elif cfg['type'] == 'RepeatDataset': + dataset = RepeatDataset( + build_dataset(cfg['dataset'], default_args), cfg['times']) + elif cfg['type'] == 'ClassBalancedDataset': + dataset = ClassBalancedDataset( + build_dataset(cfg['dataset'], default_args), cfg['oversample_thr']) + elif cfg['type'] == 'CBGSDataset': + dataset = CBGSDataset(build_dataset(cfg['dataset'], default_args)) + elif isinstance(cfg.get('ann_file'), (list, tuple)): + dataset = _concat_dataset(cfg, default_args) + else: + dataset = build_from_cfg(cfg, DATASETS, default_args) + + return dataset + + +def build_dataloader(dataset, + samples_per_gpu, + workers_per_gpu, + num_gpus=1, + dist=True, + shuffle=True, + seed=None, + shuffler_sampler=None, + nonshuffler_sampler=None, + **kwargs): + """Build PyTorch DataLoader. + In distributed training, each GPU/process has a dataloader. + In non-distributed training, there is only one dataloader for all GPUs. + Args: + dataset (Dataset): A PyTorch dataset. + samples_per_gpu (int): Number of training samples on each GPU, i.e., + batch size of each GPU. + workers_per_gpu (int): How many subprocesses to use for data loading + for each GPU. + num_gpus (int): Number of GPUs. Only used in non-distributed training. + dist (bool): Distributed training/test or not. Default: True. + shuffle (bool): Whether to shuffle the data at every epoch. + Default: True. + kwargs: any keyword argument to be used to initialize DataLoader + Returns: + DataLoader: A PyTorch dataloader. + """ + rank, world_size = get_dist_info() + if dist: + # DistributedGroupSampler will definitely shuffle the data to satisfy + # that images on each GPU are in the same group + if shuffle: + sampler = build_sampler(shuffler_sampler if shuffler_sampler is not None else dict(type='DistributedGroupSampler'), + dict( + dataset=dataset, + samples_per_gpu=samples_per_gpu, + num_replicas=world_size, + rank=rank, + seed=seed) + ) + + else: + sampler = build_sampler(nonshuffler_sampler if nonshuffler_sampler is not None else dict(type='DistributedSampler'), + dict( + dataset=dataset, + num_replicas=world_size, + rank=rank, + shuffle=shuffle, + seed=seed) + ) + + batch_size = samples_per_gpu + num_workers = workers_per_gpu + else: + # assert False, 'not support in bevformer' + print('WARNING!!!!, Only can be used for obtain inference speed!!!!') + sampler = GroupSampler(dataset, samples_per_gpu) if shuffle else None + batch_size = num_gpus * samples_per_gpu + num_workers = num_gpus * workers_per_gpu + + init_fn = partial( + worker_init_fn, num_workers=num_workers, rank=rank, + seed=seed) if seed is not None else None + data_loader = DataLoader( + dataset, + batch_size=batch_size, + sampler=sampler, + num_workers=num_workers, + collate_fn=partial(collate, samples_per_gpu=samples_per_gpu), + pin_memory=False, + worker_init_fn=init_fn, + **kwargs) + + return data_loader + + +def worker_init_fn(worker_id, num_workers, rank, seed): + # The seed of each worker equals to + # num_worker * rank + worker_id + user_seed + worker_seed = num_workers * rank + worker_id + seed + np.random.seed(worker_seed) + random.seed(worker_seed) + + +if platform.system() != 'Windows': + # https://github.com/pytorch/pytorch/issues/973 + import resource + rlimit = resource.getrlimit(resource.RLIMIT_NOFILE) + base_soft_limit = rlimit[0] + hard_limit = rlimit[1] + soft_limit = min(max(4096, base_soft_limit), hard_limit) + resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit)) + +def custom_build_dataset(cfg, default_args=None): + from mmdet3d.datasets.dataset_wrappers import CBGSDataset + from mmdet.datasets.dataset_wrappers import (ClassBalancedDataset, + ConcatDataset, RepeatDataset) + if isinstance(cfg, (list, tuple)): + dataset = ConcatDataset([custom_build_dataset(c, default_args) for c in cfg]) + elif cfg['type'] == 'ConcatDataset': + dataset = ConcatDataset( + [custom_build_dataset(c, default_args) for c in cfg['datasets']], + cfg.get('separate_eval', True)) + elif cfg['type'] == 'RepeatDataset': + dataset = RepeatDataset( + custom_build_dataset(cfg['dataset'], default_args), cfg['times']) + elif cfg['type'] == 'ClassBalancedDataset': + dataset = ClassBalancedDataset( + custom_build_dataset(cfg['dataset'], default_args), cfg['oversample_thr']) + elif cfg['type'] == 'CBGSDataset': + dataset = CBGSDataset(custom_build_dataset(cfg['dataset'], default_args)) + elif isinstance(cfg.get('ann_file'), (list, tuple)): + dataset = _concat_dataset(cfg, default_args) + else: + dataset = build_from_cfg(cfg, DATASETS, default_args) + + return dataset \ No newline at end of file diff --git a/mmcv/datasets/coco.py b/mmcv/datasets/coco.py new file mode 100644 index 0000000..6d2f0c1 --- /dev/null +++ b/mmcv/datasets/coco.py @@ -0,0 +1,558 @@ +import itertools +import logging +import os.path as osp +import tempfile +import warnings +from collections import OrderedDict + +from mmcv.fileio.io import load, dump +import numpy as np +from mmcv.utils import print_log +from terminaltables import AsciiTable + +from mmcv.core import eval_recalls +from .api_wrappers import COCO, COCOeval +from .builder import DATASETS +from .custom import CustomDataset + + +@DATASETS.register_module() +class CocoDataset(CustomDataset): + + CLASSES = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', + 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', + 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', + 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', + 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', + 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', + 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', + 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', + 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', + 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', + 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', + 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', + 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', + 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush') + + def load_annotations(self, ann_file): + """Load annotation from COCO style annotation file. + + Args: + ann_file (str): Path of annotation file. + + Returns: + list[dict]: Annotation info from COCO api. + """ + + self.coco = COCO(ann_file) + # The order of returned `cat_ids` will not + # change with the order of the CLASSES + self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES) + + self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)} + self.img_ids = self.coco.get_img_ids() + data_infos = [] + total_ann_ids = [] + for i in self.img_ids: + info = self.coco.load_imgs([i])[0] + info['filename'] = info['file_name'] + data_infos.append(info) + ann_ids = self.coco.get_ann_ids(img_ids=[i]) + total_ann_ids.extend(ann_ids) + assert len(set(total_ann_ids)) == len( + total_ann_ids), f"Annotation ids in '{ann_file}' are not unique!" + return data_infos + + def get_ann_info(self, idx): + """Get COCO annotation by index. + + Args: + idx (int): Index of data. + + Returns: + dict: Annotation info of specified index. + """ + + img_id = self.data_infos[idx]['id'] + ann_ids = self.coco.get_ann_ids(img_ids=[img_id]) + ann_info = self.coco.load_anns(ann_ids) + return self._parse_ann_info(self.data_infos[idx], ann_info) + + def get_cat_ids(self, idx): + """Get COCO category ids by index. + + Args: + idx (int): Index of data. + + Returns: + list[int]: All categories in the image of specified index. + """ + + img_id = self.data_infos[idx]['id'] + ann_ids = self.coco.get_ann_ids(img_ids=[img_id]) + ann_info = self.coco.load_anns(ann_ids) + return [ann['category_id'] for ann in ann_info] + + def _filter_imgs(self, min_size=32): + """Filter images too small or without ground truths.""" + valid_inds = [] + # obtain images that contain annotation + ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values()) + # obtain images that contain annotations of the required categories + ids_in_cat = set() + for i, class_id in enumerate(self.cat_ids): + ids_in_cat |= set(self.coco.cat_img_map[class_id]) + # merge the image id sets of the two conditions and use the merged set + # to filter out images if self.filter_empty_gt=True + ids_in_cat &= ids_with_ann + + valid_img_ids = [] + for i, img_info in enumerate(self.data_infos): + img_id = self.img_ids[i] + if self.filter_empty_gt and img_id not in ids_in_cat: + continue + if min(img_info['width'], img_info['height']) >= min_size: + valid_inds.append(i) + valid_img_ids.append(img_id) + self.img_ids = valid_img_ids + return valid_inds + + def _parse_ann_info(self, img_info, ann_info): + """Parse bbox and mask annotation. + + Args: + ann_info (list[dict]): Annotation info of an image. + with_mask (bool): Whether to parse mask annotations. + + Returns: + dict: A dict containing the following keys: bboxes, bboxes_ignore,\ + labels, masks, seg_map. "masks" are raw annotations and not \ + decoded into binary masks. + """ + gt_bboxes = [] + gt_labels = [] + gt_bboxes_ignore = [] + gt_masks_ann = [] + for i, ann in enumerate(ann_info): + if ann.get('ignore', False): + continue + x1, y1, w, h = ann['bbox'] + inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0)) + inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0)) + if inter_w * inter_h == 0: + continue + if ann['area'] <= 0 or w < 1 or h < 1: + continue + if ann['category_id'] not in self.cat_ids: + continue + bbox = [x1, y1, x1 + w, y1 + h] + if ann.get('iscrowd', False): + gt_bboxes_ignore.append(bbox) + else: + gt_bboxes.append(bbox) + gt_labels.append(self.cat2label[ann['category_id']]) + gt_masks_ann.append(ann.get('segmentation', None)) + + if gt_bboxes: + gt_bboxes = np.array(gt_bboxes, dtype=np.float32) + gt_labels = np.array(gt_labels, dtype=np.int64) + else: + gt_bboxes = np.zeros((0, 4), dtype=np.float32) + gt_labels = np.array([], dtype=np.int64) + + if gt_bboxes_ignore: + gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32) + else: + gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32) + + seg_map = img_info['filename'].replace('jpg', 'png') + + ann = dict( + bboxes=gt_bboxes, + labels=gt_labels, + bboxes_ignore=gt_bboxes_ignore, + masks=gt_masks_ann, + seg_map=seg_map) + + return ann + + def xyxy2xywh(self, bbox): + """Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO + evaluation. + + Args: + bbox (numpy.ndarray): The bounding boxes, shape (4, ), in + ``xyxy`` order. + + Returns: + list[float]: The converted bounding boxes, in ``xywh`` order. + """ + + _bbox = bbox.tolist() + return [ + _bbox[0], + _bbox[1], + _bbox[2] - _bbox[0], + _bbox[3] - _bbox[1], + ] + + def _proposal2json(self, results): + """Convert proposal results to COCO json style.""" + json_results = [] + for idx in range(len(self)): + img_id = self.img_ids[idx] + bboxes = results[idx] + for i in range(bboxes.shape[0]): + data = dict() + data['image_id'] = img_id + data['bbox'] = self.xyxy2xywh(bboxes[i]) + data['score'] = float(bboxes[i][4]) + data['category_id'] = 1 + json_results.append(data) + return json_results + + def _det2json(self, results): + """Convert detection results to COCO json style.""" + json_results = [] + for idx in range(len(self)): + img_id = self.img_ids[idx] + result = results[idx] + for label in range(len(result)): + bboxes = result[label] + for i in range(bboxes.shape[0]): + data = dict() + data['image_id'] = img_id + data['bbox'] = self.xyxy2xywh(bboxes[i]) + data['score'] = float(bboxes[i][4]) + data['category_id'] = self.cat_ids[label] + json_results.append(data) + return json_results + + def _segm2json(self, results): + """Convert instance segmentation results to COCO json style.""" + bbox_json_results = [] + segm_json_results = [] + for idx in range(len(self)): + img_id = self.img_ids[idx] + det, seg = results[idx] + for label in range(len(det)): + # bbox results + bboxes = det[label] + for i in range(bboxes.shape[0]): + data = dict() + data['image_id'] = img_id + data['bbox'] = self.xyxy2xywh(bboxes[i]) + data['score'] = float(bboxes[i][4]) + data['category_id'] = self.cat_ids[label] + bbox_json_results.append(data) + + # segm results + # some detectors use different scores for bbox and mask + if isinstance(seg, tuple): + segms = seg[0][label] + mask_score = seg[1][label] + else: + segms = seg[label] + mask_score = [bbox[4] for bbox in bboxes] + for i in range(bboxes.shape[0]): + data = dict() + data['image_id'] = img_id + data['bbox'] = self.xyxy2xywh(bboxes[i]) + data['score'] = float(mask_score[i]) + data['category_id'] = self.cat_ids[label] + if isinstance(segms[i]['counts'], bytes): + segms[i]['counts'] = segms[i]['counts'].decode() + data['segmentation'] = segms[i] + segm_json_results.append(data) + return bbox_json_results, segm_json_results + + def results2json(self, results, outfile_prefix): + """Dump the detection results to a COCO style json file. + + There are 3 types of results: proposals, bbox predictions, mask + predictions, and they have different data types. This method will + automatically recognize the type, and dump them to json files. + + Args: + results (list[list | tuple | ndarray]): Testing results of the + dataset. + outfile_prefix (str): The filename prefix of the json files. If the + prefix is "somepath/xxx", the json files will be named + "somepath/xxx.bbox.json", "somepath/xxx.segm.json", + "somepath/xxx.proposal.json". + + Returns: + dict[str: str]: Possible keys are "bbox", "segm", "proposal", and \ + values are corresponding filenames. + """ + result_files = dict() + if isinstance(results[0], list): + json_results = self._det2json(results) + result_files['bbox'] = f'{outfile_prefix}.bbox.json' + result_files['proposal'] = f'{outfile_prefix}.bbox.json' + dump(json_results, result_files['bbox']) + elif isinstance(results[0], tuple): + json_results = self._segm2json(results) + result_files['bbox'] = f'{outfile_prefix}.bbox.json' + result_files['proposal'] = f'{outfile_prefix}.bbox.json' + result_files['segm'] = f'{outfile_prefix}.segm.json' + dump(json_results[0], result_files['bbox']) + dump(json_results[1], result_files['segm']) + elif isinstance(results[0], np.ndarray): + json_results = self._proposal2json(results) + result_files['proposal'] = f'{outfile_prefix}.proposal.json' + dump(json_results, result_files['proposal']) + else: + raise TypeError('invalid type of results') + return result_files + + def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None): + gt_bboxes = [] + for i in range(len(self.img_ids)): + ann_ids = self.coco.get_ann_ids(img_ids=self.img_ids[i]) + ann_info = self.coco.load_anns(ann_ids) + if len(ann_info) == 0: + gt_bboxes.append(np.zeros((0, 4))) + continue + bboxes = [] + for ann in ann_info: + if ann.get('ignore', False) or ann['iscrowd']: + continue + x1, y1, w, h = ann['bbox'] + bboxes.append([x1, y1, x1 + w, y1 + h]) + bboxes = np.array(bboxes, dtype=np.float32) + if bboxes.shape[0] == 0: + bboxes = np.zeros((0, 4)) + gt_bboxes.append(bboxes) + + recalls = eval_recalls( + gt_bboxes, results, proposal_nums, iou_thrs, logger=logger) + ar = recalls.mean(axis=1) + return ar + + def format_results(self, results, jsonfile_prefix=None, **kwargs): + """Format the results to json (standard format for COCO evaluation). + + Args: + results (list[tuple | numpy.ndarray]): Testing results of the + dataset. + jsonfile_prefix (str | None): The prefix of json files. It includes + the file path and the prefix of filename, e.g., "a/b/prefix". + If not specified, a temp file will be created. Default: None. + + Returns: + tuple: (result_files, tmp_dir), result_files is a dict containing \ + the json filepaths, tmp_dir is the temporal directory created \ + for saving json files when jsonfile_prefix is not specified. + """ + assert isinstance(results, list), 'results must be a list' + assert len(results) == len(self), ( + 'The length of results is not equal to the dataset len: {} != {}'. + format(len(results), len(self))) + + if jsonfile_prefix is None: + tmp_dir = tempfile.TemporaryDirectory() + jsonfile_prefix = osp.join(tmp_dir.name, 'results') + else: + tmp_dir = None + result_files = self.results2json(results, jsonfile_prefix) + return result_files, tmp_dir + + def evaluate(self, + results, + metric='bbox', + logger=None, + jsonfile_prefix=None, + classwise=False, + proposal_nums=(100, 300, 1000), + iou_thrs=None, + metric_items=None): + """Evaluation in COCO protocol. + + Args: + results (list[list | tuple]): Testing results of the dataset. + metric (str | list[str]): Metrics to be evaluated. Options are + 'bbox', 'segm', 'proposal', 'proposal_fast'. + logger (logging.Logger | str | None): Logger used for printing + related information during evaluation. Default: None. + jsonfile_prefix (str | None): The prefix of json files. It includes + the file path and the prefix of filename, e.g., "a/b/prefix". + If not specified, a temp file will be created. Default: None. + classwise (bool): Whether to evaluating the AP for each class. + proposal_nums (Sequence[int]): Proposal number used for evaluating + recalls, such as recall@100, recall@1000. + Default: (100, 300, 1000). + iou_thrs (Sequence[float], optional): IoU threshold used for + evaluating recalls/mAPs. If set to a list, the average of all + IoUs will also be computed. If not specified, [0.50, 0.55, + 0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used. + Default: None. + metric_items (list[str] | str, optional): Metric items that will + be returned. If not specified, ``['AR@100', 'AR@300', + 'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be + used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75', + 'mAP_s', 'mAP_m', 'mAP_l']`` will be used when + ``metric=='bbox' or metric=='segm'``. + + Returns: + dict[str, float]: COCO style evaluation metric. + """ + + metrics = metric if isinstance(metric, list) else [metric] + allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast'] + for metric in metrics: + if metric not in allowed_metrics: + raise KeyError(f'metric {metric} is not supported') + if iou_thrs is None: + iou_thrs = np.linspace( + .5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True) + if metric_items is not None: + if not isinstance(metric_items, list): + metric_items = [metric_items] + + result_files, tmp_dir = self.format_results(results, jsonfile_prefix) + + eval_results = OrderedDict() + cocoGt = self.coco + for metric in metrics: + msg = f'Evaluating {metric}...' + if logger is None: + msg = '\n' + msg + print_log(msg, logger=logger) + + if metric == 'proposal_fast': + ar = self.fast_eval_recall( + results, proposal_nums, iou_thrs, logger='silent') + log_msg = [] + for i, num in enumerate(proposal_nums): + eval_results[f'AR@{num}'] = ar[i] + log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}') + log_msg = ''.join(log_msg) + print_log(log_msg, logger=logger) + continue + + iou_type = 'bbox' if metric == 'proposal' else metric + if metric not in result_files: + raise KeyError(f'{metric} is not in results') + try: + predictions = load(result_files[metric]) + if iou_type == 'segm': + # Refer to https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocotools/coco.py#L331 # noqa + # When evaluating mask AP, if the results contain bbox, + # cocoapi will use the box area instead of the mask area + # for calculating the instance area. Though the overall AP + # is not affected, this leads to different + # small/medium/large mask AP results. + for x in predictions: + x.pop('bbox') + warnings.simplefilter('once') + warnings.warn( + 'The key "bbox" is deleted for more accurate mask AP ' + 'of small/medium/large instances since v2.12.0. This ' + 'does not change the overall mAP calculation.', + UserWarning) + cocoDt = cocoGt.loadRes(predictions) + except IndexError: + print_log( + 'The testing results of the whole dataset is empty.', + logger=logger, + level=logging.ERROR) + break + + cocoEval = COCOeval(cocoGt, cocoDt, iou_type) + cocoEval.params.catIds = self.cat_ids + cocoEval.params.imgIds = self.img_ids + cocoEval.params.maxDets = list(proposal_nums) + cocoEval.params.iouThrs = iou_thrs + # mapping of cocoEval.stats + coco_metric_names = { + 'mAP': 0, + 'mAP_50': 1, + 'mAP_75': 2, + 'mAP_s': 3, + 'mAP_m': 4, + 'mAP_l': 5, + 'AR@100': 6, + 'AR@300': 7, + 'AR@1000': 8, + 'AR_s@1000': 9, + 'AR_m@1000': 10, + 'AR_l@1000': 11 + } + if metric_items is not None: + for metric_item in metric_items: + if metric_item not in coco_metric_names: + raise KeyError( + f'metric item {metric_item} is not supported') + + if metric == 'proposal': + cocoEval.params.useCats = 0 + cocoEval.evaluate() + cocoEval.accumulate() + cocoEval.summarize() + if metric_items is None: + metric_items = [ + 'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000', + 'AR_m@1000', 'AR_l@1000' + ] + + for item in metric_items: + val = float( + f'{cocoEval.stats[coco_metric_names[item]]:.3f}') + eval_results[item] = val + else: + cocoEval.evaluate() + cocoEval.accumulate() + cocoEval.summarize() + if classwise: # Compute per-category AP + # Compute per-category AP + # from https://github.com/facebookresearch/detectron2/ + precisions = cocoEval.eval['precision'] + # precision: (iou, recall, cls, area range, max dets) + assert len(self.cat_ids) == precisions.shape[2] + + results_per_category = [] + for idx, catId in enumerate(self.cat_ids): + # area range index 0: all area ranges + # max dets index -1: typically 100 per image + nm = self.coco.loadCats(catId)[0] + precision = precisions[:, :, idx, 0, -1] + precision = precision[precision > -1] + if precision.size: + ap = np.mean(precision) + else: + ap = float('nan') + results_per_category.append( + (f'{nm["name"]}', f'{float(ap):0.3f}')) + + num_columns = min(6, len(results_per_category) * 2) + results_flatten = list( + itertools.chain(*results_per_category)) + headers = ['category', 'AP'] * (num_columns // 2) + results_2d = itertools.zip_longest(*[ + results_flatten[i::num_columns] + for i in range(num_columns) + ]) + table_data = [headers] + table_data += [result for result in results_2d] + table = AsciiTable(table_data) + print_log('\n' + table.table, logger=logger) + + if metric_items is None: + metric_items = [ + 'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l' + ] + + for metric_item in metric_items: + key = f'{metric}_{metric_item}' + val = float( + f'{cocoEval.stats[coco_metric_names[metric_item]]:.3f}' + ) + eval_results[key] = val + ap = cocoEval.stats[:6] + eval_results[f'{metric}_mAP_copypaste'] = ( + f'{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} ' + f'{ap[4]:.3f} {ap[5]:.3f}') + if tmp_dir is not None: + tmp_dir.cleanup() + return eval_results diff --git a/mmcv/datasets/custom.py b/mmcv/datasets/custom.py new file mode 100644 index 0000000..4cd8a1d --- /dev/null +++ b/mmcv/datasets/custom.py @@ -0,0 +1,362 @@ +import os.path as osp +import warnings +from collections import OrderedDict + +import numpy as np +from mmcv.utils import print_log +from mmcv.fileio.io import load +from mmcv.fileio.parse import list_from_file +from terminaltables import AsciiTable +from torch.utils.data import Dataset + +from mmcv.core import eval_map, eval_recalls +from .builder import DATASETS +from .pipelines import Compose + + +@DATASETS.register_module() +class CustomDataset(Dataset): + """Custom dataset for detection. + + The annotation format is shown as follows. The `ann` field is optional for + testing. + + .. code-block:: none + + [ + { + 'filename': 'a.jpg', + 'width': 1280, + 'height': 720, + 'ann': { + 'bboxes': (n, 4) in (x1, y1, x2, y2) order. + 'labels': (n, ), + 'bboxes_ignore': (k, 4), (optional field) + 'labels_ignore': (k, 4) (optional field) + } + }, + ... + ] + + Args: + ann_file (str): Annotation file path. + pipeline (list[dict]): Processing pipeline. + classes (str | Sequence[str], optional): Specify classes to load. + If is None, ``cls.CLASSES`` will be used. Default: None. + data_root (str, optional): Data root for ``ann_file``, + ``img_prefix``, ``seg_prefix``, ``proposal_file`` if specified. + test_mode (bool, optional): If set True, annotation will not be loaded. + filter_empty_gt (bool, optional): If set true, images without bounding + boxes of the dataset's classes will be filtered out. This option + only works when `test_mode=False`, i.e., we never filter images + during tests. + """ + + CLASSES = None + + def __init__(self, + ann_file, + pipeline, + classes=None, + data_root=None, + img_prefix='', + seg_prefix=None, + proposal_file=None, + test_mode=False, + filter_empty_gt=True): + self.ann_file = ann_file + self.data_root = data_root + self.img_prefix = img_prefix + self.seg_prefix = seg_prefix + self.proposal_file = proposal_file + self.test_mode = test_mode + self.filter_empty_gt = filter_empty_gt + self.CLASSES = self.get_classes(classes) + + # join paths if data_root is specified + if self.data_root is not None: + if not osp.isabs(self.ann_file): + self.ann_file = osp.join(self.data_root, self.ann_file) + if not (self.img_prefix is None or osp.isabs(self.img_prefix)): + self.img_prefix = osp.join(self.data_root, self.img_prefix) + if not (self.seg_prefix is None or osp.isabs(self.seg_prefix)): + self.seg_prefix = osp.join(self.data_root, self.seg_prefix) + if not (self.proposal_file is None + or osp.isabs(self.proposal_file)): + self.proposal_file = osp.join(self.data_root, + self.proposal_file) + # load annotations (and proposals) + self.data_infos = self.load_annotations(self.ann_file) + + if self.proposal_file is not None: + self.proposals = self.load_proposals(self.proposal_file) + else: + self.proposals = None + + # filter images too small and containing no annotations + if not test_mode: + valid_inds = self._filter_imgs() + self.data_infos = [self.data_infos[i] for i in valid_inds] + if self.proposals is not None: + self.proposals = [self.proposals[i] for i in valid_inds] + # set group flag for the sampler + self._set_group_flag() + + # processing pipeline + self.pipeline = Compose(pipeline) + + def __len__(self): + """Total number of samples of data.""" + return len(self.data_infos) + + def load_annotations(self, ann_file): + """Load annotation from annotation file.""" + return load(ann_file) + + def load_proposals(self, proposal_file): + """Load proposal from proposal file.""" + return load(proposal_file) + + def get_ann_info(self, idx): + """Get annotation by index. + + Args: + idx (int): Index of data. + + Returns: + dict: Annotation info of specified index. + """ + + return self.data_infos[idx]['ann'] + + def get_cat_ids(self, idx): + """Get category ids by index. + + Args: + idx (int): Index of data. + + Returns: + list[int]: All categories in the image of specified index. + """ + + return self.data_infos[idx]['ann']['labels'].astype(np.int).tolist() + + def pre_pipeline(self, results): + """Prepare results dict for pipeline.""" + results['img_prefix'] = self.img_prefix + results['seg_prefix'] = self.seg_prefix + results['proposal_file'] = self.proposal_file + results['bbox_fields'] = [] + results['mask_fields'] = [] + results['seg_fields'] = [] + + def _filter_imgs(self, min_size=32): + """Filter images too small.""" + if self.filter_empty_gt: + warnings.warn( + 'CustomDataset does not support filtering empty gt images.') + valid_inds = [] + for i, img_info in enumerate(self.data_infos): + if min(img_info['width'], img_info['height']) >= min_size: + valid_inds.append(i) + return valid_inds + + def _set_group_flag(self): + """Set flag according to image aspect ratio. + + Images with aspect ratio greater than 1 will be set as group 1, + otherwise group 0. + """ + self.flag = np.zeros(len(self), dtype=np.uint8) + for i in range(len(self)): + img_info = self.data_infos[i] + if img_info['width'] / img_info['height'] > 1: + self.flag[i] = 1 + + def _rand_another(self, idx): + """Get another random index from the same group as the given index.""" + pool = np.where(self.flag == self.flag[idx])[0] + return np.random.choice(pool) + + def __getitem__(self, idx): + """Get training/test data after pipeline. + + Args: + idx (int): Index of data. + + Returns: + dict: Training/test data (with annotation if `test_mode` is set \ + True). + """ + + if self.test_mode: + return self.prepare_test_img(idx) + while True: + data = self.prepare_train_img(idx) + if data is None: + idx = self._rand_another(idx) + continue + return data + + def prepare_train_img(self, idx): + """Get training data and annotations after pipeline. + + Args: + idx (int): Index of data. + + Returns: + dict: Training data and annotation after pipeline with new keys \ + introduced by pipeline. + """ + + img_info = self.data_infos[idx] + ann_info = self.get_ann_info(idx) + results = dict(img_info=img_info, ann_info=ann_info) + if self.proposals is not None: + results['proposals'] = self.proposals[idx] + self.pre_pipeline(results) + return self.pipeline(results) + + def prepare_test_img(self, idx): + """Get testing data after pipeline. + + Args: + idx (int): Index of data. + + Returns: + dict: Testing data after pipeline with new keys introduced by \ + pipeline. + """ + + img_info = self.data_infos[idx] + results = dict(img_info=img_info) + if self.proposals is not None: + results['proposals'] = self.proposals[idx] + self.pre_pipeline(results) + return self.pipeline(results) + + @classmethod + def get_classes(cls, classes=None): + """Get class names of current dataset. + + Args: + classes (Sequence[str] | str | None): If classes is None, use + default CLASSES defined by builtin dataset. If classes is a + string, take it as a file name. The file contains the name of + classes where each line contains one class name. If classes is + a tuple or list, override the CLASSES defined by the dataset. + + Returns: + tuple[str] or list[str]: Names of categories of the dataset. + """ + if classes is None: + return cls.CLASSES + + if isinstance(classes, str): + # take it as a file path + class_names = list_from_file(classes) + elif isinstance(classes, (tuple, list)): + class_names = classes + else: + raise ValueError(f'Unsupported type {type(classes)} of classes.') + + return class_names + + def format_results(self, results, **kwargs): + """Place holder to format result to dataset specific output.""" + + def evaluate(self, + results, + metric='mAP', + logger=None, + proposal_nums=(100, 300, 1000), + iou_thr=0.5, + scale_ranges=None): + """Evaluate the dataset. + + Args: + results (list): Testing results of the dataset. + metric (str | list[str]): Metrics to be evaluated. + logger (logging.Logger | None | str): Logger used for printing + related information during evaluation. Default: None. + proposal_nums (Sequence[int]): Proposal number used for evaluating + recalls, such as recall@100, recall@1000. + Default: (100, 300, 1000). + iou_thr (float | list[float]): IoU threshold. Default: 0.5. + scale_ranges (list[tuple] | None): Scale ranges for evaluating mAP. + Default: None. + """ + + if not isinstance(metric, str): + assert len(metric) == 1 + metric = metric[0] + allowed_metrics = ['mAP', 'recall'] + if metric not in allowed_metrics: + raise KeyError(f'metric {metric} is not supported') + annotations = [self.get_ann_info(i) for i in range(len(self))] + eval_results = OrderedDict() + iou_thrs = [iou_thr] if isinstance(iou_thr, float) else iou_thr + if metric == 'mAP': + assert isinstance(iou_thrs, list) + mean_aps = [] + for iou_thr in iou_thrs: + print_log(f'\n{"-" * 15}iou_thr: {iou_thr}{"-" * 15}') + mean_ap, _ = eval_map( + results, + annotations, + scale_ranges=scale_ranges, + iou_thr=iou_thr, + dataset=self.CLASSES, + logger=logger) + mean_aps.append(mean_ap) + eval_results[f'AP{int(iou_thr * 100):02d}'] = round(mean_ap, 3) + eval_results['mAP'] = sum(mean_aps) / len(mean_aps) + elif metric == 'recall': + gt_bboxes = [ann['bboxes'] for ann in annotations] + recalls = eval_recalls( + gt_bboxes, results, proposal_nums, iou_thr, logger=logger) + for i, num in enumerate(proposal_nums): + for j, iou in enumerate(iou_thrs): + eval_results[f'recall@{num}@{iou}'] = recalls[i, j] + if recalls.shape[1] > 1: + ar = recalls.mean(axis=1) + for i, num in enumerate(proposal_nums): + eval_results[f'AR@{num}'] = ar[i] + return eval_results + + def __repr__(self): + """Print the number of instance number.""" + dataset_type = 'Test' if self.test_mode else 'Train' + result = (f'\n{self.__class__.__name__} {dataset_type} dataset ' + f'with number of images {len(self)}, ' + f'and instance counts: \n') + if self.CLASSES is None: + result += 'Category names are not provided. \n' + return result + instance_count = np.zeros(len(self.CLASSES) + 1).astype(int) + # count the instance number in each image + for idx in range(len(self)): + label = self.get_ann_info(idx)['labels'] + unique, counts = np.unique(label, return_counts=True) + if len(unique) > 0: + # add the occurrence number to each class + instance_count[unique] += counts + else: + # background is the last index + instance_count[-1] += 1 + # create a table with category count + table_data = [['category', 'count'] * 5] + row_data = [] + for cls, count in enumerate(instance_count): + if cls < len(self.CLASSES): + row_data += [f'{cls} [{self.CLASSES[cls]}]', f'{count}'] + else: + # add the background number + row_data += ['-1 background', f'{count}'] + if len(row_data) == 10: + table_data.append(row_data) + row_data = [] + + table = AsciiTable(table_data) + result += table.table + return result diff --git a/mmcv/datasets/custom_3d.py b/mmcv/datasets/custom_3d.py new file mode 100644 index 0000000..88c8bfb --- /dev/null +++ b/mmcv/datasets/custom_3d.py @@ -0,0 +1,370 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import tempfile +import warnings +from os import path as osp +from torch.utils.data import Dataset + +from mmcv.datasets.builder import DATASETS +from ..core.bbox import get_box_type +from .pipelines import Compose +from .utils import extract_result_dict, get_loading_pipeline +from mmcv.fileio.io import load, dump +from mmcv.fileio.parse import list_from_file + +@DATASETS.register_module() +class Custom3DDataset(Dataset): + """Customized 3D dataset. + + This is the base dataset of SUNRGB-D, ScanNet, nuScenes, and KITTI + dataset. + + Args: + data_root (str): Path of dataset root. + ann_file (str): Path of annotation file. + pipeline (list[dict], optional): Pipeline used for data processing. + Defaults to None. + classes (tuple[str], optional): Classes used in the dataset. + Defaults to None. + modality (dict, optional): Modality to specify the sensor data used + as input. Defaults to None. + box_type_3d (str, optional): Type of 3D box of this dataset. + Based on the `box_type_3d`, the dataset will encapsulate the box + to its original format then converted them to `box_type_3d`. + Defaults to 'LiDAR'. Available options includes + + - 'LiDAR': Box in LiDAR coordinates. + - 'Depth': Box in depth coordinates, usually for indoor dataset. + - 'Camera': Box in camera coordinates. + filter_empty_gt (bool, optional): Whether to filter empty GT. + Defaults to True. + test_mode (bool, optional): Whether the dataset is in test mode. + Defaults to False. + """ + + def __init__(self, + data_root, + ann_file, + pipeline=None, + classes=None, + modality=None, + box_type_3d='LiDAR', + filter_empty_gt=True, + test_mode=False): + super().__init__() + self.data_root = data_root + self.ann_file = ann_file + self.test_mode = test_mode + self.modality = modality + self.filter_empty_gt = filter_empty_gt + self.box_type_3d, self.box_mode_3d = get_box_type(box_type_3d) + + self.CLASSES = self.get_classes(classes) + self.cat2id = {name: i for i, name in enumerate(self.CLASSES)} + self.data_infos = self.load_annotations(self.ann_file) + + if pipeline is not None: + self.pipeline = Compose(pipeline) + + # set group flag for the sampler + if not self.test_mode: + self._set_group_flag() + + def load_annotations(self, ann_file): + """Load annotations from ann_file. + + Args: + ann_file (str): Path of the annotation file. + + Returns: + list[dict]: List of annotations. + """ + return load(ann_file) + + def get_data_info(self, index): + """Get data info according to the given index. + + Args: + index (int): Index of the sample data to get. + + Returns: + dict: Data information that will be passed to the data \ + preprocessing pipelines. It includes the following keys: + + - sample_idx (str): Sample index. + - pts_filename (str): Filename of point clouds. + - file_name (str): Filename of point clouds. + - ann_info (dict): Annotation info. + """ + info = self.data_infos[index] + sample_idx = info['point_cloud']['lidar_idx'] + pts_filename = osp.join(self.data_root, info['pts_path']) + + input_dict = dict( + pts_filename=pts_filename, + sample_idx=sample_idx, + file_name=pts_filename) + + if not self.test_mode: + annos = self.get_ann_info(index) + input_dict['ann_info'] = annos + if self.filter_empty_gt and ~(annos['gt_labels_3d'] != -1).any(): + return None + return input_dict + + def pre_pipeline(self, results): + """Initialization before data preparation. + + Args: + results (dict): Dict before data preprocessing. + + - img_fields (list): Image fields. + - bbox3d_fields (list): 3D bounding boxes fields. + - pts_mask_fields (list): Mask fields of points. + - pts_seg_fields (list): Mask fields of point segments. + - bbox_fields (list): Fields of bounding boxes. + - mask_fields (list): Fields of masks. + - seg_fields (list): Segment fields. + - box_type_3d (str): 3D box type. + - box_mode_3d (str): 3D box mode. + """ + results['img_fields'] = [] + results['bbox3d_fields'] = [] + results['pts_mask_fields'] = [] + results['pts_seg_fields'] = [] + results['bbox_fields'] = [] + results['mask_fields'] = [] + results['seg_fields'] = [] + results['box_type_3d'] = self.box_type_3d + results['box_mode_3d'] = self.box_mode_3d + + def prepare_train_data(self, index): + """Training data preparation. + + Args: + index (int): Index for accessing the target data. + + Returns: + dict: Training data dict of the corresponding index. + """ + input_dict = self.get_data_info(index) + if input_dict is None: + return None + self.pre_pipeline(input_dict) + example = self.pipeline(input_dict) + if self.filter_empty_gt and \ + (example is None or + ~(example['gt_labels_3d']._data != -1).any()): + return None + return example + + def prepare_test_data(self, index): + """Prepare data for testing. + + Args: + index (int): Index for accessing the target data. + + Returns: + dict: Testing data dict of the corresponding index. + """ + input_dict = self.get_data_info(index) + self.pre_pipeline(input_dict) + example = self.pipeline(input_dict) + return example + + @classmethod + def get_classes(cls, classes=None): + """Get class names of current dataset. + + Args: + classes (Sequence[str] | str | None): If classes is None, use + default CLASSES defined by builtin dataset. If classes is a + string, take it as a file name. The file contains the name of + classes where each line contains one class name. If classes is + a tuple or list, override the CLASSES defined by the dataset. + + Return: + list[str]: A list of class names. + """ + if classes is None: + return cls.CLASSES + + if isinstance(classes, str): + # take it as a file path + class_names = list_from_file(classes) + elif isinstance(classes, (tuple, list)): + class_names = classes + else: + raise ValueError(f'Unsupported type {type(classes)} of classes.') + + return class_names + + def format_results(self, + outputs, + pklfile_prefix=None, + submission_prefix=None): + """Format the results to pkl file. + + Args: + outputs (list[dict]): Testing results of the dataset. + pklfile_prefix (str | None): The prefix of pkl files. It includes + the file path and the prefix of filename, e.g., "a/b/prefix". + If not specified, a temp file will be created. Default: None. + + Returns: + tuple: (outputs, tmp_dir), outputs is the detection results, \ + tmp_dir is the temporal directory created for saving json \ + files when ``jsonfile_prefix`` is not specified. + """ + if pklfile_prefix is None: + tmp_dir = tempfile.TemporaryDirectory() + pklfile_prefix = osp.join(tmp_dir.name, 'results') + out = f'{pklfile_prefix}.pkl' + dump(outputs, out) + return outputs, tmp_dir + + def evaluate(self, + results, + metric=None, + iou_thr=(0.25, 0.5), + logger=None, + show=False, + out_dir=None, + pipeline=None): + """Evaluate. + + Evaluation in indoor protocol. + + Args: + results (list[dict]): List of results. + metric (str | list[str]): Metrics to be evaluated. + iou_thr (list[float]): AP IoU thresholds. + show (bool): Whether to visualize. + Default: False. + out_dir (str): Path to save the visualization results. + Default: None. + pipeline (list[dict], optional): raw data loading for showing. + Default: None. + + Returns: + dict: Evaluation results. + """ + from mmcv.core.evaluation import indoor_eval + assert isinstance( + results, list), f'Expect results to be list, got {type(results)}.' + assert len(results) > 0, 'Expect length of results > 0.' + assert len(results) == len(self.data_infos) + assert isinstance( + results[0], dict + ), f'Expect elements in results to be dict, got {type(results[0])}.' + gt_annos = [info['annos'] for info in self.data_infos] + label2cat = {i: cat_id for i, cat_id in enumerate(self.CLASSES)} + ret_dict = indoor_eval( + gt_annos, + results, + iou_thr, + label2cat, + logger=logger, + box_type_3d=self.box_type_3d, + box_mode_3d=self.box_mode_3d) + if show: + self.show(results, out_dir, pipeline=pipeline) + + return ret_dict + + def _build_default_pipeline(self): + """Build the default pipeline for this dataset.""" + raise NotImplementedError('_build_default_pipeline is not implemented ' + f'for dataset {self.__class__.__name__}') + + def _get_pipeline(self, pipeline): + """Get data loading pipeline in self.show/evaluate function. + + Args: + pipeline (list[dict] | None): Input pipeline. If None is given, \ + get from self.pipeline. + """ + if pipeline is None: + if not hasattr(self, 'pipeline') or self.pipeline is None: + warnings.warn( + 'Use default pipeline for data loading, this may cause ' + 'errors when data is on ceph') + return self._build_default_pipeline() + loading_pipeline = get_loading_pipeline(self.pipeline.transforms) + return Compose(loading_pipeline) + return Compose(pipeline) + + def _extract_data(self, index, pipeline, key, load_annos=False): + """Load data using input pipeline and extract data according to key. + + Args: + index (int): Index for accessing the target data. + pipeline (:obj:`Compose`): Composed data loading pipeline. + key (str | list[str]): One single or a list of data key. + load_annos (bool): Whether to load data annotations. + If True, need to set self.test_mode as False before loading. + + Returns: + np.ndarray | torch.Tensor | list[np.ndarray | torch.Tensor]: + A single or a list of loaded data. + """ + assert pipeline is not None, 'data loading pipeline is not provided' + # when we want to load ground-truth via pipeline (e.g. bbox, seg mask) + # we need to set self.test_mode as False so that we have 'annos' + if load_annos: + original_test_mode = self.test_mode + self.test_mode = False + input_dict = self.get_data_info(index) + self.pre_pipeline(input_dict) + example = pipeline(input_dict) + + # extract data items according to keys + if isinstance(key, str): + data = extract_result_dict(example, key) + else: + data = [extract_result_dict(example, k) for k in key] + if load_annos: + self.test_mode = original_test_mode + + return data + + def __len__(self): + """Return the length of data infos. + + Returns: + int: Length of data infos. + """ + return len(self.data_infos) + + def _rand_another(self, idx): + """Randomly get another item with the same flag. + + Returns: + int: Another index of item with the same flag. + """ + pool = np.where(self.flag == self.flag[idx])[0] + return np.random.choice(pool) + + def __getitem__(self, idx): + """Get item from infos according to the given index. + + Returns: + dict: Data dictionary of the corresponding index. + """ + if self.test_mode: + return self.prepare_test_data(idx) + while True: + data = self.prepare_train_data(idx) + if data is None: + idx = self._rand_another(idx) + continue + return data + + def _set_group_flag(self): + """Set flag according to image aspect ratio. + + Images with aspect ratio greater than 1 will be set as group 1, + otherwise group 0. In 3D datasets, they are all the same, thus are all + zeros. + """ + self.flag = np.zeros(len(self), dtype=np.uint8) diff --git a/mmcv/datasets/custom_nuscenes_dataset.py b/mmcv/datasets/custom_nuscenes_dataset.py new file mode 100644 index 0000000..17c9e5a --- /dev/null +++ b/mmcv/datasets/custom_nuscenes_dataset.py @@ -0,0 +1,246 @@ +import copy + +import numpy as np +from mmcv.datasets import DATASETS +from mmcv.datasets import NuScenesDataset +from os import path as osp +from mmcv.datasets import DATASETS +import torch +import numpy as np +from nuscenes.eval.common.utils import quaternion_yaw, Quaternion +from .nuscnes_eval import NuScenesEval_custom +from mmcv.utils import save_tensor +from mmcv.parallel import DataContainer as DC +import random +from mmcv.fileio.io import load + + +@DATASETS.register_module() +class CustomNuScenesDataset(NuScenesDataset): + r"""NuScenes Dataset. + + This datset only add camera intrinsics and extrinsics to the results. + """ + + def __init__(self, queue_length=4, bev_size=(200, 200), overlap_test=False, *args, **kwargs): + super().__init__(*args, **kwargs) + self.queue_length = queue_length + self.overlap_test = overlap_test + self.bev_size = bev_size + + + def prepare_train_data(self, index): + """ + Training data preparation. + Args: + index (int): Index for accessing the target data. + Returns: + dict: Training data dict of the corresponding index. + """ + queue = [] + index_list = list(range(index-self.queue_length, index)) + random.shuffle(index_list) + index_list = sorted(index_list[1:]) + index_list.append(index) + for i in index_list: + i = max(0, i) + input_dict = self.get_data_info(i) + if input_dict is None: + return None + self.pre_pipeline(input_dict) + example = self.pipeline(input_dict) + if self.filter_empty_gt and \ + (example is None or ~(example['gt_labels_3d']._data != -1).any()): + return None + queue.append(example) + return self.union2one(queue) + + + def union2one(self, queue): + imgs_list = [each['img'].data for each in queue] + metas_map = {} + prev_scene_token = None + prev_pos = None + prev_angle = None + for i, each in enumerate(queue): + metas_map[i] = each['img_metas'].data + if metas_map[i]['scene_token'] != prev_scene_token: + metas_map[i]['prev_bev_exists'] = False + prev_scene_token = metas_map[i]['scene_token'] + prev_pos = copy.deepcopy(metas_map[i]['can_bus'][:3]) + prev_angle = copy.deepcopy(metas_map[i]['can_bus'][-1]) + metas_map[i]['can_bus'][:3] = 0 + metas_map[i]['can_bus'][-1] = 0 + else: + metas_map[i]['prev_bev_exists'] = True + tmp_pos = copy.deepcopy(metas_map[i]['can_bus'][:3]) + tmp_angle = copy.deepcopy(metas_map[i]['can_bus'][-1]) + metas_map[i]['can_bus'][:3] -= prev_pos + metas_map[i]['can_bus'][-1] -= prev_angle + prev_pos = copy.deepcopy(tmp_pos) + prev_angle = copy.deepcopy(tmp_angle) + queue[-1]['img'] = DC(torch.stack(imgs_list), cpu_only=False, stack=True) + queue[-1]['img_metas'] = DC(metas_map, cpu_only=True) + queue = queue[-1] + return queue + + def get_data_info(self, index): + """Get data info according to the given index. + + Args: + index (int): Index of the sample data to get. + + Returns: + dict: Data information that will be passed to the data \ + preprocessing pipelines. It includes the following keys: + + - sample_idx (str): Sample index. + - pts_filename (str): Filename of point clouds. + - sweeps (list[dict]): Infos of sweeps. + - timestamp (float): Sample timestamp. + - img_filename (str, optional): Image filename. + - lidar2img (list[np.ndarray], optional): Transformations \ + from lidar to different cameras. + - ann_info (dict): Annotation info. + """ + info = self.data_infos[index] + # standard protocal modified from SECOND.Pytorch + input_dict = dict( + sample_idx=info['token'], + pts_filename=info['lidar_path'], + sweeps=info['sweeps'], + ego2global_translation=info['ego2global_translation'], + ego2global_rotation=info['ego2global_rotation'], + prev_idx=info['prev'], + next_idx=info['next'], + scene_token=info['scene_token'], + can_bus=info['can_bus'], + frame_idx=info['frame_idx'], + timestamp=info['timestamp'] / 1e6, + ) +#(['CAM_FRONT', 'CAM_FRONT_RIGHT', 'CAM_FRONT_LEFT', 'CAM_BACK', 'CAM_BACK_LEFT', 'CAM_BACK_RIGHT']) + if self.modality['use_camera']: + image_paths = [] + lidar2img_rts = [] + lidar2cam_rts = [] + cam_intrinsics = [] + for cam_type, cam_info in info['cams'].items(): + # if cam_type in ['CAM_FRONT','CAM_BACK_LEFT']: + # continue + image_paths.append(cam_info['data_path']) + # obtain lidar to image transformation matrix + lidar2cam_r = np.linalg.inv(cam_info['sensor2lidar_rotation']) + lidar2cam_t = cam_info[ + 'sensor2lidar_translation'] @ lidar2cam_r.T + lidar2cam_rt = np.eye(4) + lidar2cam_rt[:3, :3] = lidar2cam_r.T + lidar2cam_rt[3, :3] = -lidar2cam_t + intrinsic = cam_info['cam_intrinsic'] + viewpad = np.eye(4) + viewpad[:intrinsic.shape[0], :intrinsic.shape[1]] = intrinsic + lidar2img_rt = (viewpad @ lidar2cam_rt.T) + lidar2img_rts.append(lidar2img_rt) + + cam_intrinsics.append(viewpad) + lidar2cam_rts.append(lidar2cam_rt.T) + + input_dict.update( + dict( + img_filename=image_paths, + lidar2img=lidar2img_rts, + cam_intrinsic=cam_intrinsics, + lidar2cam=lidar2cam_rts, + )) + + if not self.test_mode: + annos = self.get_ann_info(index) + input_dict['ann_info'] = annos + + rotation = Quaternion(input_dict['ego2global_rotation']) + translation = input_dict['ego2global_translation'] + can_bus = input_dict['can_bus'] + can_bus[:3] = translation + can_bus[3:7] = rotation + patch_angle = quaternion_yaw(rotation) / np.pi * 180 + if patch_angle < 0: + patch_angle += 360 + can_bus[-2] = patch_angle / 180 * np.pi + can_bus[-1] = patch_angle + + return input_dict + + def __getitem__(self, idx): + """Get item from infos according to the given index. + Returns: + dict: Data dictionary of the corresponding index. + """ + if self.test_mode: + return self.prepare_test_data(idx) + while True: + + data = self.prepare_train_data(idx) + if data is None: + idx = self._rand_another(idx) + continue + return data + + def _evaluate_single(self, + result_path, + logger=None, + metric='bbox', + result_name='pts_bbox'): + """Evaluation for a single model in nuScenes protocol. + + Args: + result_path (str): Path of the result file. + logger (logging.Logger | str | None): Logger used for printing + related information during evaluation. Default: None. + metric (str): Metric name used for evaluation. Default: 'bbox'. + result_name (str): Result name in the metric prefix. + Default: 'pts_bbox'. + + Returns: + dict: Dictionary of evaluation details. + """ + from nuscenes import NuScenes + self.nusc = NuScenes(version=self.version, dataroot=self.data_root, + verbose=True) + + import pdb + pdb.set_trace() + + output_dir = osp.join(*osp.split(result_path)[:-1]) + + eval_set_map = { + 'v1.0-mini': 'mini_val', + 'v1.0-trainval': 'val', + } + self.nusc_eval = NuScenesEval_custom( + self.nusc, + config=self.eval_detection_configs, + result_path=result_path, + eval_set=eval_set_map[self.version], + output_dir=output_dir, + verbose=True, + overlap_test=self.overlap_test, + data_infos=self.data_infos + ) + self.nusc_eval.main(plot_examples=0, render_curves=False) + # record metrics + metrics = load(osp.join(output_dir, 'metrics_summary.json')) + detail = dict() + metric_prefix = f'{result_name}_NuScenes' + for name in self.CLASSES: + for k, v in metrics['label_aps'][name].items(): + val = float('{:.4f}'.format(v)) + detail['{}/{}_AP_dist_{}'.format(metric_prefix, name, k)] = val + for k, v in metrics['label_tp_errors'][name].items(): + val = float('{:.4f}'.format(v)) + detail['{}/{}_{}'.format(metric_prefix, name, k)] = val + for k, v in metrics['tp_errors'].items(): + val = float('{:.4f}'.format(v)) + detail['{}/{}'.format(metric_prefix, + self.ErrNameMapping[k])] = val + detail['{}/NDS'.format(metric_prefix)] = metrics['nd_score'] + detail['{}/mAP'.format(metric_prefix)] = metrics['mean_ap'] + return detail diff --git a/mmcv/datasets/custom_nuscenes_dataset_v2.py b/mmcv/datasets/custom_nuscenes_dataset_v2.py new file mode 100644 index 0000000..305d6b3 --- /dev/null +++ b/mmcv/datasets/custom_nuscenes_dataset_v2.py @@ -0,0 +1,302 @@ +import copy +from .nuscenes_dataset import NuScenesDataset +from .dd3d_nuscenes_dataset import DD3DNuscenesDataset +from os import path as osp +from mmcv.datasets import DATASETS +import torch +import numpy as np +from nuscenes.eval.common.utils import quaternion_yaw, Quaternion +from .nuscnes_eval import NuScenesEval_custom +from mmcv.parallel import DataContainer as DC +from collections import defaultdict, OrderedDict + + +@DATASETS.register_module() +class CustomNuScenesDatasetV2(NuScenesDataset): + def __init__(self, frames=(),mono_cfg=None, overlap_test=False,*args, **kwargs): + super().__init__(*args, **kwargs) + self.frames = frames + self.queue_length = len(frames) + self.overlap_test = overlap_test + self.mono_cfg = mono_cfg + if not self.test_mode and mono_cfg is not None: + self.mono_dataset = DD3DNuscenesDataset(**mono_cfg) + + def prepare_test_data(self, index): + """Prepare data for testing. + + Args: + index (int): Index for accessing the target data. + + Returns: + dict: Testing data dict of the corresponding index. + """ + data_queue = OrderedDict() + input_dict = self.get_data_info(index) + cur_scene_token = input_dict['scene_token'] + self.pre_pipeline(input_dict) + example = self.pipeline(input_dict) + data_queue[0] = example + + for frame_idx in self.frames: + chosen_idx = index + frame_idx + if frame_idx ==0 or chosen_idx <0 or chosen_idx >= len(self.data_infos): + continue + info = self.data_infos[chosen_idx] + input_dict = self.prepare_input_dict(info) + if input_dict['scene_token'] == cur_scene_token: + self.pre_pipeline(input_dict) + example = self.pipeline(input_dict) + data_queue[frame_idx] = example + + data_queue = OrderedDict(sorted(data_queue.items())) + ret = defaultdict(list) + for i in range(len(data_queue[0]['img'])): + single_aug_data_queue = {} + for t in data_queue.keys(): + single_example = {} + for key ,value in data_queue[t].items(): + single_example[key] = value[i] + single_aug_data_queue[t] = single_example + single_aug_data_queue = OrderedDict(sorted(single_aug_data_queue.items())) + single_aug_sample = self.union2one(single_aug_data_queue) + + for key, value in single_aug_sample.items(): + ret[key].append(value) + return ret + + def prepare_train_data(self, index): + """ + Training data preparation. + Args: + index (int): Index for accessing the target data. + Returns: + dict: Training data dict of the corresponding index. + """ + data_queue = OrderedDict() + input_dict = self.get_data_info(index) + if input_dict is None: + return None + cur_scene_token = input_dict['scene_token'] + # cur_frame_idx = input_dict['frame_idx'] + ann_info = copy.deepcopy(input_dict['ann_info']) + self.pre_pipeline(input_dict) + example = self.pipeline(input_dict) + if self.filter_empty_gt and \ + (example is None or ~(example['gt_labels_3d']._data != -1).any()): + return None + data_queue[0] = example + aug_param = copy.deepcopy(example['aug_param']) if 'aug_param' in example else {} + + # frame_idx_to_idx = self.scene_to_frame_idx_to_idx[cur_scene_token] + for frame_idx in self.frames: + chosen_idx = index + frame_idx + if frame_idx ==0 or chosen_idx <0 or chosen_idx >= len(self.data_infos): + continue + info = self.data_infos[chosen_idx] + input_dict = self.prepare_input_dict(info) + if input_dict['scene_token'] == cur_scene_token: + input_dict['ann_info'] = copy.deepcopy(ann_info) # only for pipeline, should never be used + self.pre_pipeline(input_dict) + input_dict['aug_param'] = copy.deepcopy(aug_param) + example = self.pipeline(input_dict) + data_queue[frame_idx] = example + + data_queue = OrderedDict(sorted(data_queue.items())) + return self.union2one(data_queue) + + def union2one(self, queue: dict): + """ + convert sample queue into one single sample. + """ + imgs_list = [each['img'].data for each in queue.values()] + lidar2ego = np.eye(4, dtype=np.float32) + lidar2ego[:3, :3] = Quaternion(queue[0]['lidar2ego_rotation']).rotation_matrix + lidar2ego[:3, 3] = queue[0]['lidar2ego_translation'] + + egocurr2global = np.eye(4, dtype=np.float32) + egocurr2global[:3,:3] = Quaternion(queue[0]['ego2global_rotation']).rotation_matrix + egocurr2global[:3,3] = queue[0]['ego2global_translation'] + metas_map = {} + for i, each in queue.items(): + metas_map[i] = each['img_metas'].data + metas_map[i]['timestamp'] = each['timestamp'] + if 'aug_param' in each: + metas_map[i]['aug_param'] = each['aug_param'] + if i == 0: + metas_map[i]['lidaradj2lidarcurr'] = None + else: + egoadj2global = np.eye(4, dtype=np.float32) + egoadj2global[:3,:3] = Quaternion(each['ego2global_rotation']).rotation_matrix + egoadj2global[:3,3] = each['ego2global_translation'] + + lidaradj2lidarcurr = np.linalg.inv(lidar2ego) @ np.linalg.inv(egocurr2global) @ egoadj2global @ lidar2ego + metas_map[i]['lidaradj2lidarcurr'] = lidaradj2lidarcurr + for i_cam in range(len(metas_map[i]['lidar2img'])): + metas_map[i]['lidar2img'][i_cam] = metas_map[i]['lidar2img'][i_cam] @ np.linalg.inv(lidaradj2lidarcurr) + queue[0]['img'] = DC(torch.stack(imgs_list), + cpu_only=False, stack=True) + queue[0]['img_metas'] = DC(metas_map, cpu_only=True) + queue = queue[0] + return queue + + def prepare_input_dict(self, info): + # standard protocal modified from SECOND.Pytorch + input_dict = dict( + sample_idx=info['token'], + pts_filename=info['lidar_path'], + sweeps=info['sweeps'], + ego2global_translation=info['ego2global_translation'], + ego2global_rotation=info['ego2global_rotation'], + lidar2ego_translation=info['lidar2ego_translation'], + lidar2ego_rotation=info['lidar2ego_rotation'], + prev=info['prev'], + next=info['next'], + scene_token=info['scene_token'], + frame_idx=info['frame_idx'], + timestamp=info['timestamp'] / 1e6, + ) + + if self.modality['use_camera']: + image_paths = [] + lidar2img_rts = [] + lidar2cam_rts = [] + cam_intrinsics = [] + for cam_type, cam_info in info['cams'].items(): + image_paths.append(cam_info['data_path']) + # obtain lidar to image transformation matrix + lidar2cam_r = np.linalg.inv(cam_info['sensor2lidar_rotation']) + lidar2cam_t = cam_info[ + 'sensor2lidar_translation'] @ lidar2cam_r.T + lidar2cam_rt = np.eye(4) + lidar2cam_rt[:3, :3] = lidar2cam_r.T + lidar2cam_rt[3, :3] = -lidar2cam_t + intrinsic = cam_info['cam_intrinsic'] + viewpad = np.eye(4) + viewpad[:intrinsic.shape[0], :intrinsic.shape[1]] = intrinsic + lidar2img_rt = (viewpad @ lidar2cam_rt.T) + lidar2img_rts.append(lidar2img_rt) + + cam_intrinsics.append(viewpad) + lidar2cam_rts.append(lidar2cam_rt.T) + + input_dict.update( + dict( + img_filename=image_paths, + lidar2img=lidar2img_rts, + cam2img=cam_intrinsics, + lidar2cam=lidar2cam_rts, + )) + + return input_dict + + def filter_crowd_annotations(self, data_dict): + for ann in data_dict["annotations"]: + if ann.get("iscrowd", 0) == 0: + return True + return False + + def get_data_info(self, index): + info = self.data_infos[index] + input_dict = self.prepare_input_dict(info) + if not self.test_mode: + annos = self.get_ann_info(index) + input_dict['ann_info'] = annos + + if not self.test_mode and self.mono_cfg is not None: + if input_dict is None: + return None + info = self.data_infos[index] + img_ids = [] + for cam_type, cam_info in info['cams'].items(): + img_ids.append(cam_info['sample_data_token']) + + mono_input_dict = []; mono_ann_index = [] + for i, img_id in enumerate(img_ids): + tmp_dict = self.mono_dataset.getitem_by_datumtoken(img_id) + if tmp_dict is not None: + if self.filter_crowd_annotations(tmp_dict): + mono_input_dict.append(tmp_dict) + mono_ann_index.append(i) + + # filter empth annotation + if len(mono_ann_index) == 0: + return None + + mono_ann_index = DC(mono_ann_index, cpu_only=True) + input_dict['mono_input_dict'] = mono_input_dict + input_dict['mono_ann_idx'] = mono_ann_index + return input_dict + + def __getitem__(self, idx): + """Get item from infos according to the given index. + Returns: + dict: Data dictionary of the corresponding index. + """ + if self.test_mode: + return self.prepare_test_data(idx) + while True: + + data = self.prepare_train_data(idx) + if data is None: + idx = self._rand_another(idx) + continue + return data + + def _evaluate_single(self, + result_path, + logger=None, + metric='bbox', + result_name='pts_bbox'): + """Evaluation for a single model in nuScenes protocol. + + Args: + result_path (str): Path of the result file. + logger (logging.Logger | str | None): Logger used for printing + related information during evaluation. Default: None. + metric (str): Metric name used for evaluation. Default: 'bbox'. + result_name (str): Result name in the metric prefix. + Default: 'pts_bbox'. + + Returns: + dict: Dictionary of evaluation details. + """ + from nuscenes import NuScenes + self.nusc = NuScenes(version=self.version, dataroot=self.data_root, + verbose=True) + + output_dir = osp.join(*osp.split(result_path)[:-1]) + + eval_set_map = { + 'v1.0-mini': 'mini_val', + 'v1.0-trainval': 'val', + } + self.nusc_eval = NuScenesEval_custom( + self.nusc, + config=self.eval_detection_configs, + result_path=result_path, + eval_set=eval_set_map[self.version], + output_dir=output_dir, + verbose=True, + overlap_test=self.overlap_test, + data_infos=self.data_infos + ) + self.nusc_eval.main(plot_examples=0, render_curves=False) + # record metrics + metrics = mmcv.load(osp.join(output_dir, 'metrics_summary.json')) + detail = dict() + metric_prefix = f'{result_name}_NuScenes' + for name in self.CLASSES: + for k, v in metrics['label_aps'][name].items(): + val = float('{:.4f}'.format(v)) + detail['{}/{}_AP_dist_{}'.format(metric_prefix, name, k)] = val + for k, v in metrics['label_tp_errors'][name].items(): + val = float('{:.4f}'.format(v)) + detail['{}/{}_{}'.format(metric_prefix, name, k)] = val + for k, v in metrics['tp_errors'].items(): + val = float('{:.4f}'.format(v)) + detail['{}/{}'.format(metric_prefix, + self.ErrNameMapping[k])] = val + detail['{}/NDS'.format(metric_prefix)] = metrics['nd_score'] + detail['{}/mAP'.format(metric_prefix)] = metrics['mean_ap'] + return detail \ No newline at end of file diff --git a/mmcv/datasets/data_utils/data_utils.py b/mmcv/datasets/data_utils/data_utils.py new file mode 100644 index 0000000..331e02f --- /dev/null +++ b/mmcv/datasets/data_utils/data_utils.py @@ -0,0 +1,174 @@ +import math +import numpy as np +from nuscenes.eval.common.utils import quaternion_yaw, Quaternion +from nuscenes.utils.data_classes import Box as NuScenesBox +import pyquaternion + + +def output_to_nusc_box(detection): + """Convert the output to the box class in the nuScenes. + Args: + detection (dict): Detection results. + - boxes_3d (:obj:`BaseInstance3DBoxes`): Detection bbox. + - scores_3d (torch.Tensor): Detection scores. + - labels_3d (torch.Tensor): Predicted box labels. + Returns: + list[:obj:`NuScenesBox`]: List of standard NuScenesBoxes. + """ + box3d = detection['boxes_3d'] + scores = detection['scores_3d'].numpy() + labels = detection['labels_3d'].numpy() + if 'track_ids' in detection: + ids = detection['track_ids'].numpy() + else: + ids = np.ones_like(labels) + + box_gravity_center = box3d.gravity_center.numpy() + box_dims = box3d.dims.numpy() + box_yaw = box3d.yaw.numpy() + # TODO: check whether this is necessary + # with dir_offset & dir_limit in the head + box_yaw = -box_yaw - np.pi / 2 + + box_list = [] + for i in range(len(box3d)): + quat = pyquaternion.Quaternion(axis=[0, 0, 1], radians=box_yaw[i]) + velocity = (*box3d.tensor[i, 7:9], 0.0) + # velo_val = np.linalg.norm(box3d[i, 7:9]) + # velo_ori = box3d[i, 6] + # velocity = ( + # velo_val * np.cos(velo_ori), velo_val * np.sin(velo_ori), 0.0) + box = NuScenesBox( + box_gravity_center[i], + box_dims[i], + quat, + label=labels[i], + score=scores[i], + velocity=velocity) + box.token = ids[i] + box_list.append(box) + return box_list + + +def output_to_nusc_box_det(detection): + """Convert the output to the box class in the nuScenes. + + Args: + detection (dict): Detection results. + + - boxes_3d (:obj:`BaseInstance3DBoxes`): Detection bbox. + - scores_3d (torch.Tensor): Detection scores. + - labels_3d (torch.Tensor): Predicted box labels. + + Returns: + list[:obj:`NuScenesBox`]: List of standard NuScenesBoxes. + """ + if 'boxes_3d_det' in detection: + box3d = detection['boxes_3d_det'] + scores = detection['scores_3d_det'].numpy() + labels = detection['labels_3d_det'].numpy() + else: + box3d = detection['boxes_3d'] + scores = detection['scores_3d'].numpy() + labels = detection['labels_3d'].numpy() + + box_gravity_center = box3d.gravity_center.numpy() + box_dims = box3d.dims.numpy() + box_yaw = box3d.yaw.numpy() + # TODO: check whether this is necessary + # with dir_offset & dir_limit in the head + box_yaw = -box_yaw - np.pi / 2 + + box_list = [] + for i in range(len(box3d)): + quat = Quaternion(axis=[0, 0, 1], radians=box_yaw[i]) + velocity = (*box3d.tensor[i, 7:9], 0.0) + box = NuScenesBox( + box_gravity_center[i], + box_dims[i], + quat, + label=labels[i], + score=scores[i], + velocity=velocity) + box_list.append(box) + return box_list + + +def lidar_nusc_box_to_global(info, + boxes, + classes, + eval_configs, + eval_version='detection_cvpr_2019'): + """Convert the box from ego to global coordinate. + Args: + info (dict): Info for a specific sample data, including the + calibration information. + boxes (list[:obj:`NuScenesBox`]): List of predicted NuScenesBoxes. + classes (list[str]): Mapped classes in the evaluation. + eval_configs (object): Evaluation configuration object. + eval_version (str, optional): Evaluation version. + Default: 'detection_cvpr_2019' + Returns: + list: List of standard NuScenesBoxes in the global + coordinate. + """ + box_list = [] + keep_idx = [] + for i, box in enumerate(boxes): + # Move box to ego vehicle coord system + box.rotate(Quaternion(info['lidar2ego_rotation'])) + box.translate(np.array(info['lidar2ego_translation'])) + # filter det in ego. + cls_range_map = eval_configs.class_range + radius = np.linalg.norm(box.center[:2], 2) + det_range = cls_range_map[classes[box.label]] + if radius > det_range: + continue + # Move box to global coord system + box.rotate(Quaternion(info['ego2global_rotation'])) + box.translate(np.array(info['ego2global_translation'])) + box_list.append(box) + keep_idx.append(i) + return box_list, keep_idx + + +def obtain_map_info(nusc, + nusc_maps, + sample, + patch_size=(102.4, 102.4), + canvas_size=(256, 256), + layer_names=['lane_divider', 'road_divider'], + thickness=10): + """ + Export 2d annotation from the info file and raw data. + """ + l2e_r = sample['lidar2ego_rotation'] + l2e_t = sample['lidar2ego_translation'] + e2g_r = sample['ego2global_rotation'] + e2g_t = sample['ego2global_translation'] + l2e_r_mat = Quaternion(l2e_r).rotation_matrix + e2g_r_mat = Quaternion(e2g_r).rotation_matrix + + scene = nusc.get('scene', sample['scene_token']) + log = nusc.get('log', scene['log_token']) + nusc_map = nusc_maps[log['location']] + if layer_names is None: + layer_names = nusc_map.non_geometric_layers + + l2g_r_mat = (l2e_r_mat.T @ e2g_r_mat.T).T + l2g_t = l2e_t @ e2g_r_mat.T + e2g_t + patch_box = (l2g_t[0], l2g_t[1], patch_size[0], patch_size[1]) + patch_angle = math.degrees(Quaternion(matrix=l2g_r_mat).yaw_pitch_roll[0]) + + map_mask = nusc_map.get_map_mask( + patch_box, patch_angle, layer_names, canvas_size=canvas_size) + map_mask = map_mask[-2] | map_mask[-1] + map_mask = map_mask[np.newaxis, :] + map_mask = map_mask.transpose((2, 1, 0)).squeeze(2) # (H, W, C) + + erode = nusc_map.get_map_mask(patch_box, patch_angle, [ + 'drivable_area'], canvas_size=canvas_size) + erode = erode.transpose((2, 1, 0)).squeeze(2) + + map_mask = np.concatenate([erode[None], map_mask[None]], axis=0) + return map_mask diff --git a/mmcv/datasets/data_utils/rasterize.py b/mmcv/datasets/data_utils/rasterize.py new file mode 100644 index 0000000..c30a870 --- /dev/null +++ b/mmcv/datasets/data_utils/rasterize.py @@ -0,0 +1,160 @@ +import cv2 +import numpy as np +from shapely import affinity +from shapely.geometry import LineString, box + + +def get_patch_coord(patch_box, patch_angle=0.0): + patch_x, patch_y, patch_h, patch_w = patch_box + + x_min = patch_x - patch_w / 2.0 + y_min = patch_y - patch_h / 2.0 + x_max = patch_x + patch_w / 2.0 + y_max = patch_y + patch_h / 2.0 + + patch = box(x_min, y_min, x_max, y_max) + patch = affinity.rotate(patch, patch_angle, origin=( + patch_x, patch_y), use_radians=False) + + return patch + + +def get_discrete_degree(vec, angle_class=36): + deg = np.mod(np.degrees(np.arctan2(vec[1], vec[0])), 360) + deg = (int(deg / (360 / angle_class) + 0.5) % angle_class) + 1 + return deg + + +def mask_for_lines(lines, mask, thickness, idx, type='index', angle_class=36): + coords = np.asarray(list(lines.coords), np.int32) + coords = coords.reshape((-1, 2)) + if len(coords) < 2: + return mask, idx + if type == 'backward': + coords = np.flip(coords, 0) + + if type == 'index': + cv2.polylines(mask, [coords], False, color=idx, thickness=thickness) + idx += 1 + else: + for i in range(len(coords) - 1): + cv2.polylines(mask, [coords[i:]], False, color=get_discrete_degree( + coords[i + 1] - coords[i], angle_class=angle_class), thickness=thickness) + return mask, idx + + +def line_geom_to_mask(layer_geom, confidence_levels, local_box, canvas_size, thickness, idx, type='index', angle_class=36): + patch_x, patch_y, patch_h, patch_w = local_box + + patch = get_patch_coord(local_box) + + canvas_h = canvas_size[0] + canvas_w = canvas_size[1] + scale_height = canvas_h / patch_h + scale_width = canvas_w / patch_w + + trans_x = -patch_x + patch_w / 2.0 + trans_y = -patch_y + patch_h / 2.0 + + map_mask = np.zeros(canvas_size, np.uint8) + + for line in layer_geom: + if isinstance(line, tuple): + line, confidence = line + else: + confidence = None + new_line = line.intersection(patch) + if not new_line.is_empty: + new_line = affinity.affine_transform( + new_line, [1.0, 0.0, 0.0, 1.0, trans_x, trans_y]) + new_line = affinity.scale( + new_line, xfact=scale_width, yfact=scale_height, origin=(0, 0)) + confidence_levels.append(confidence) + if new_line.geom_type == 'MultiLineString': + for new_single_line in new_line: + map_mask, idx = mask_for_lines( + new_single_line, map_mask, thickness, idx, type, angle_class) + else: + map_mask, idx = mask_for_lines( + new_line, map_mask, thickness, idx, type, angle_class) + return map_mask, idx + + +def overlap_filter(mask, filter_mask): + C, _, _ = mask.shape + for c in range(C-1, -1, -1): + filter = np.repeat((filter_mask[c] != 0)[None, :], c, axis=0) + mask[:c][filter] = 0 + + return mask + + +def preprocess_map(vectors, patch_size, canvas_size, num_classes, thickness, angle_class): + confidence_levels = [-1] + vector_num_list = {} + for i in range(num_classes): + vector_num_list[i] = [] + + for vector in vectors: + if vector['pts_num'] >= 2: + vector_num_list[vector['type']].append( + LineString(vector['pts'][:vector['pts_num']])) + + local_box = (0.0, 0.0, patch_size[0], patch_size[1]) + + idx = 1 + filter_masks = [] + instance_masks = [] + forward_masks = [] + backward_masks = [] + for i in range(num_classes): + map_mask, idx = line_geom_to_mask( + vector_num_list[i], confidence_levels, local_box, canvas_size, thickness, idx) + instance_masks.append(map_mask) + filter_mask, _ = line_geom_to_mask( + vector_num_list[i], confidence_levels, local_box, canvas_size, thickness + 4, 1) + filter_masks.append(filter_mask) + forward_mask, _ = line_geom_to_mask( + vector_num_list[i], confidence_levels, local_box, canvas_size, thickness, 1, type='forward', angle_class=angle_class) + forward_masks.append(forward_mask) + backward_mask, _ = line_geom_to_mask( + vector_num_list[i], confidence_levels, local_box, canvas_size, thickness, 1, type='backward', angle_class=angle_class) + backward_masks.append(backward_mask) + + filter_masks = np.stack(filter_masks) + instance_masks = np.stack(instance_masks) + forward_masks = np.stack(forward_masks) + backward_masks = np.stack(backward_masks) + + instance_masks = overlap_filter(instance_masks, filter_masks) + forward_masks = overlap_filter( + forward_masks, filter_masks).sum(0).astype('int32') + backward_masks = overlap_filter( + backward_masks, filter_masks).sum(0).astype('int32') + + semantic_masks = instance_masks != 0 + + return semantic_masks, instance_masks, forward_masks, backward_masks + + +def rasterize_map(vectors, patch_size, canvas_size, num_classes, thickness): + confidence_levels = [-1] + vector_num_list = {} + for i in range(num_classes): + vector_num_list[i] = [] + + for vector in vectors: + if vector['pts_num'] >= 2: + vector_num_list[vector['type']].append( + (LineString(vector['pts'][:vector['pts_num']]), vector.get('confidence_level', 1))) + + local_box = (0.0, 0.0, patch_size[0], patch_size[1]) + + idx = 1 + masks = [] + for i in range(num_classes): + map_mask, idx = line_geom_to_mask( + vector_num_list[i], confidence_levels, local_box, canvas_size, thickness, idx) + masks.append(map_mask) + + return np.stack(masks), confidence_levels diff --git a/mmcv/datasets/data_utils/trajectory_api.py b/mmcv/datasets/data_utils/trajectory_api.py new file mode 100644 index 0000000..83b2c3b --- /dev/null +++ b/mmcv/datasets/data_utils/trajectory_api.py @@ -0,0 +1,283 @@ +import numpy as np +from nuscenes.prediction import (PredictHelper, + convert_local_coords_to_global, + convert_global_coords_to_local) +from mmcv.core.bbox.structures.box_3d_mode import Box3DMode +from mmcv.core.bbox.structures.coord_3d_mode import Coord3DMode +from mmcv.core.bbox.structures.lidar_box3d import LiDARInstance3DBoxes +from nuscenes.eval.common.utils import quaternion_yaw, Quaternion +from mmcv.parallel import DataContainer as DC +from mmcv.datasets.pipelines import to_tensor + +class NuScenesTraj(object): + def __init__(self, + nusc, + predict_steps, + planning_steps, + past_steps, + fut_steps, + with_velocity, + CLASSES, + box_mode_3d, + use_nonlinear_optimizer=False): + super().__init__() + self.nusc = nusc + self.prepare_sdc_vel_info() + self.predict_steps = predict_steps + self.planning_steps = planning_steps + self.past_steps = past_steps + self.fut_steps = fut_steps + self.with_velocity = with_velocity + self.CLASSES = CLASSES + self.box_mode_3d = box_mode_3d + self.predict_helper = PredictHelper(self.nusc) + self.use_nonlinear_optimizer = use_nonlinear_optimizer + + def get_traj_label(self, sample_token, ann_tokens): + sd_rec = self.nusc.get('sample', sample_token) + fut_traj_all = [] + fut_traj_valid_mask_all = [] + past_traj_all = [] + past_traj_valid_mask_all = [] + _, boxes, _ = self.nusc.get_sample_data(sd_rec['data']['LIDAR_TOP'], selected_anntokens=ann_tokens) + for i, ann_token in enumerate(ann_tokens): + box = boxes[i] + instance_token = self.nusc.get('sample_annotation', ann_token)['instance_token'] + fut_traj_local = self.predict_helper.get_future_for_agent(instance_token, sample_token, seconds=6, in_agent_frame=True) + past_traj_local = self.predict_helper.get_past_for_agent(instance_token, sample_token, seconds=2, in_agent_frame=True) + + fut_traj = np.zeros((self.predict_steps, 2)) + fut_traj_valid_mask = np.zeros((self.predict_steps, 2)) + past_traj = np.zeros((self.past_steps + self.fut_steps, 2)) + past_traj_valid_mask = np.zeros((self.past_steps + self.fut_steps, 2)) + if fut_traj_local.shape[0] > 0: + if self.use_nonlinear_optimizer: + trans = box.center + else: + trans = np.array([0, 0, 0]) + rot = Quaternion(matrix=box.rotation_matrix) + fut_traj_scence_centric = convert_local_coords_to_global(fut_traj_local, trans, rot) + fut_traj[:fut_traj_scence_centric.shape[0], :] = fut_traj_scence_centric + fut_traj_valid_mask[:fut_traj_scence_centric.shape[0], :] = 1 + if past_traj_local.shape[0] > 0: + trans = np.array([0, 0, 0]) + rot = Quaternion(matrix=box.rotation_matrix) + past_traj_scence_centric = convert_local_coords_to_global(past_traj_local, trans, rot) + past_traj[:past_traj_scence_centric.shape[0], :] = past_traj_scence_centric + past_traj_valid_mask[:past_traj_scence_centric.shape[0], :] = 1 + + if fut_traj_local.shape[0] > 0: + fut_steps = min(self.fut_steps, fut_traj_scence_centric.shape[0]) + past_traj[self.past_steps:self.past_steps+fut_steps, :] = fut_traj_scence_centric[:fut_steps] + past_traj_valid_mask[self.past_steps:self.past_steps+fut_steps, :] = 1 + + fut_traj_all.append(fut_traj) + fut_traj_valid_mask_all.append(fut_traj_valid_mask) + past_traj_all.append(past_traj) + past_traj_valid_mask_all.append(past_traj_valid_mask) + if len(ann_tokens) > 0: + fut_traj_all = np.stack(fut_traj_all, axis=0) + fut_traj_valid_mask_all = np.stack(fut_traj_valid_mask_all, axis=0) + past_traj_all = np.stack(past_traj_all, axis=0) + past_traj_valid_mask_all = np.stack(past_traj_valid_mask_all, axis=0) + else: + fut_traj_all = np.zeros((0, self.predict_steps, 2)) + fut_traj_valid_mask_all = np.zeros((0, self.predict_steps, 2)) + past_traj_all = np.zeros((0, self.predict_steps, 2)) + past_traj_valid_mask_all = np.zeros((0, self.predict_steps, 2)) + return fut_traj_all, fut_traj_valid_mask_all, past_traj_all, past_traj_valid_mask_all + + def get_vel_transform_mats(self, sample): + sd_rec = self.nusc.get('sample_data', sample['data']['LIDAR_TOP']) + cs_record = self.nusc.get('calibrated_sensor', + sd_rec['calibrated_sensor_token']) + pose_record = self.nusc.get('ego_pose', sd_rec['ego_pose_token']) + + l2e_r = cs_record['rotation'] + l2e_t = cs_record['translation'] + e2g_r = pose_record['rotation'] + e2g_t = pose_record['translation'] + l2e_r_mat = Quaternion(l2e_r).rotation_matrix + e2g_r_mat = Quaternion(e2g_r).rotation_matrix + + return l2e_r_mat, e2g_r_mat + + def get_vel_and_time(self, sample): + lidar_token = sample['data']['LIDAR_TOP'] + lidar_top = self.nusc.get('sample_data', lidar_token) + pose = self.nusc.get('ego_pose', lidar_top['ego_pose_token']) + xyz = pose['translation'] + timestamp = sample['timestamp'] + return xyz, timestamp + + def prepare_sdc_vel_info(self): + # generate sdc velocity info for all samples + # Note that these velocity values are converted from + # global frame to lidar frame + # as aligned with bbox gts + + self.sdc_vel_info = {} + for scene in self.nusc.scene: + scene_token = scene['token'] + + # we cannot infer vel for the last sample, therefore we skip it + last_sample_token = scene['last_sample_token'] + sample_token = scene['first_sample_token'] + sample = self.nusc.get('sample', sample_token) + xyz, time = self.get_vel_and_time(sample) + while sample['token'] != last_sample_token: + next_sample_token = sample['next'] + next_sample = self.nusc.get('sample', next_sample_token) + next_xyz, next_time = self.get_vel_and_time(next_sample) + dc = np.array(next_xyz) - np.array(xyz) + dt = (next_time - time) / 1e6 + vel = dc/dt + + # global frame to lidar frame + l2e_r_mat, e2g_r_mat = self.get_vel_transform_mats(sample) + vel = vel @ np.linalg.inv(e2g_r_mat).T @ np.linalg.inv( + l2e_r_mat).T + vel = vel[:2] + + self.sdc_vel_info[sample['token']] = vel + xyz, time = next_xyz, next_time + sample = next_sample + + # set first sample's vel equal to second sample's + last_sample = self.nusc.get('sample', last_sample_token) + second_last_sample_token = last_sample['prev'] + self.sdc_vel_info[last_sample_token] = self.sdc_vel_info[second_last_sample_token] + + def generate_sdc_info(self, sdc_vel, as_lidar_instance3d_box=False): + # sdc dim from https://forum.nuscenes.org/t/dimensions-of-the-ego-vehicle-used-to-gather-data/550 + psudo_sdc_bbox = np.array([0.0, 0.0, 0.0, 1.73, 4.08, 1.56, -np.pi]) + if self.with_velocity: + psudo_sdc_bbox = np.concatenate([psudo_sdc_bbox, sdc_vel], axis=-1) + gt_bboxes_3d = np.array([psudo_sdc_bbox]).astype(np.float32) + gt_names_3d = ['car'] + gt_labels_3d = [] + for cat in gt_names_3d: + if cat in self.CLASSES: + gt_labels_3d.append(self.CLASSES.index(cat)) + else: + gt_labels_3d.append(-1) + gt_labels_3d = np.array(gt_labels_3d) + + # the nuscenes box center is [0.5, 0.5, 0.5], we change it to be + # the same as KITTI (0.5, 0.5, 0) + gt_bboxes_3d = LiDARInstance3DBoxes( + gt_bboxes_3d, + box_dim=gt_bboxes_3d.shape[-1], + origin=(0.5, 0.5, 0.5)).convert_to(self.box_mode_3d) + + if as_lidar_instance3d_box: + # if we do not want the batch the box in to DataContrainer + return gt_bboxes_3d + + gt_labels_3d = DC(to_tensor(gt_labels_3d)) + gt_bboxes_3d = DC(gt_bboxes_3d, cpu_only=True) + + return gt_bboxes_3d, gt_labels_3d + + def get_sdc_traj_label(self, sample_token): + sd_rec = self.nusc.get('sample', sample_token) + lidar_top_data_start = self.nusc.get('sample_data', sd_rec['data']['LIDAR_TOP']) + ego_pose_start = self.nusc.get('ego_pose', lidar_top_data_start['ego_pose_token']) + + sdc_fut_traj = [] + for _ in range(self.predict_steps): + next_annotation_token = sd_rec['next'] + if next_annotation_token=='': + break + sd_rec = self.nusc.get('sample', next_annotation_token) + lidar_top_data_next = self.nusc.get('sample_data', sd_rec['data']['LIDAR_TOP']) + ego_pose_next = self.nusc.get('ego_pose', lidar_top_data_next['ego_pose_token']) + sdc_fut_traj.append(ego_pose_next['translation'][:2]) # global xy pos of sdc at future step i + + sdc_fut_traj_all = np.zeros((1, self.predict_steps, 2)) + sdc_fut_traj_valid_mask_all = np.zeros((1, self.predict_steps, 2)) + n_valid_timestep = len(sdc_fut_traj) + if n_valid_timestep>0: + sdc_fut_traj = np.stack(sdc_fut_traj, axis=0) #(t,2) + sdc_fut_traj = convert_global_coords_to_local( + coordinates=sdc_fut_traj, + translation=ego_pose_start['translation'], + rotation=ego_pose_start['rotation'], + ) + sdc_fut_traj_all[:,:n_valid_timestep,:] = sdc_fut_traj + sdc_fut_traj_valid_mask_all[:,:n_valid_timestep,:] = 1 + + return sdc_fut_traj_all, sdc_fut_traj_valid_mask_all + + def get_l2g_transform(self, sample): + sd_rec = self.nusc.get('sample_data', sample['data']['LIDAR_TOP']) + cs_record = self.nusc.get('calibrated_sensor', + sd_rec['calibrated_sensor_token']) + pose_record = self.nusc.get('ego_pose', sd_rec['ego_pose_token']) + + l2e_r = cs_record['rotation'] + l2e_t = np.array(cs_record['translation']) + e2g_r = pose_record['rotation'] + e2g_t = np.array(pose_record['translation']) + l2e_r_mat = Quaternion(l2e_r).rotation_matrix + e2g_r_mat = Quaternion(e2g_r).rotation_matrix + + return l2e_r_mat, l2e_t, e2g_r_mat, e2g_t + + def get_sdc_planning_label(self, sample_token): + sd_rec = self.nusc.get('sample', sample_token) + l2e_r_mat_init, l2e_t_init, e2g_r_mat_init, e2g_t_init = self.get_l2g_transform(sd_rec) + + + planning = [] + for _ in range(self.planning_steps): + next_annotation_token = sd_rec['next'] + if next_annotation_token=='': + break + sd_rec = self.nusc.get('sample', next_annotation_token) + l2e_r_mat_curr, l2e_t_curr, e2g_r_mat_curr, e2g_t_curr = self.get_l2g_transform(sd_rec) # (lidar to global at current frame) + + # bbox of sdc under current lidar frame + next_bbox3d = self.generate_sdc_info(self.sdc_vel_info[next_annotation_token], as_lidar_instance3d_box=True) + + # to bbox under curr ego frame + next_bbox3d.rotate(l2e_r_mat_curr.T) + next_bbox3d.translate(l2e_t_curr) + + # to bbox under world frame + next_bbox3d.rotate(e2g_r_mat_curr.T) + next_bbox3d.translate(e2g_t_curr) + + # to bbox under initial ego frame, first inverse translate, then inverse rotate + next_bbox3d.translate(- e2g_t_init) + m1 = np.linalg.inv(e2g_r_mat_init) + next_bbox3d.rotate(m1.T) + + # to bbox under curr ego frame, first inverse translate, then inverse rotate + next_bbox3d.translate(- l2e_t_init) + m2 = np.linalg.inv(l2e_r_mat_init) + next_bbox3d.rotate(m2.T) + + planning.append(next_bbox3d) + + planning_all = np.zeros((1, self.planning_steps, 3)) + planning_mask_all = np.zeros((1, self.planning_steps, 2)) + n_valid_timestep = len(planning) + if n_valid_timestep>0: + planning = [p.tensor.squeeze(0) for p in planning] + planning = np.stack(planning, axis=0) # (valid_t, 9) + planning = planning[:, [0,1,6]] # (x, y, yaw) + planning_all[:,:n_valid_timestep,:] = planning + planning_mask_all[:,:n_valid_timestep,:] = 1 + + mask = planning_mask_all[0].any(axis=1) + if mask.sum() == 0: + command = 2 #'FORWARD' + elif planning_all[0, mask][-1][0] >= 2: + command = 0 #'RIGHT' + elif planning_all[0, mask][-1][0] <= -2: + command = 1 #'LEFT' + else: + command = 2 #'FORWARD' + + return planning_all, planning_mask_all, command \ No newline at end of file diff --git a/mmcv/datasets/data_utils/vector_map.py b/mmcv/datasets/data_utils/vector_map.py new file mode 100644 index 0000000..1ea21a6 --- /dev/null +++ b/mmcv/datasets/data_utils/vector_map.py @@ -0,0 +1,246 @@ +import numpy as np +from nuscenes.map_expansion.map_api import NuScenesMap, NuScenesMapExplorer +from nuscenes.eval.common.utils import quaternion_yaw, Quaternion +from shapely import affinity, ops +from shapely.geometry import LineString, box, MultiPolygon, MultiLineString + +CLASS2LABEL = { + 'road_divider': 0, + 'lane_divider': 0, + 'ped_crossing': 1, + 'contours': 2, + 'others': -1 +} + +class VectorizedLocalMap(object): + def __init__(self, + dataroot, + patch_size, + canvas_size, + line_classes=['road_divider', 'lane_divider'], + ped_crossing_classes=['ped_crossing'], + contour_classes=['road_segment', 'lane'], + sample_dist=1, + num_samples=250, + padding=False, + normalize=False, + fixed_num=-1): + ''' + Args: + fixed_num = -1 : no fixed num + ''' + super().__init__() + self.data_root = dataroot + self.MAPS = ['boston-seaport', 'singapore-hollandvillage', + 'singapore-onenorth', 'singapore-queenstown'] + self.line_classes = line_classes + self.ped_crossing_classes = ped_crossing_classes + self.polygon_classes = contour_classes + self.nusc_maps = {} + self.map_explorer = {} + for loc in self.MAPS: + self.nusc_maps[loc] = NuScenesMap(dataroot=self.data_root, map_name=loc) + self.map_explorer[loc] = NuScenesMapExplorer(self.nusc_maps[loc]) + + self.patch_size = patch_size + self.canvas_size = canvas_size + self.sample_dist = sample_dist + self.num_samples = num_samples + self.padding = padding + self.normalize = normalize + self.fixed_num = fixed_num + + def gen_vectorized_samples(self, location, ego2global_translation, ego2global_rotation): + map_pose = ego2global_translation[:2] + rotation = Quaternion(ego2global_rotation) + + patch_box = (map_pose[0], map_pose[1], self.patch_size[0], self.patch_size[1]) + patch_angle = quaternion_yaw(rotation) / np.pi * 180 + + line_geom = self.get_map_geom(patch_box, patch_angle, self.line_classes, location) + line_vector_dict = self.line_geoms_to_vectors(line_geom) + + ped_geom = self.get_map_geom(patch_box, patch_angle, self.ped_crossing_classes, location) + # ped_vector_list = self.ped_geoms_to_vectors(ped_geom) + ped_vector_list = self.line_geoms_to_vectors(ped_geom)['ped_crossing'] + + polygon_geom = self.get_map_geom(patch_box, patch_angle, self.polygon_classes, location) + poly_bound_list = self.poly_geoms_to_vectors(polygon_geom) + + vectors = [] + for line_type, vects in line_vector_dict.items(): + for line, length in vects: + vectors.append((line.astype(float), length, CLASS2LABEL.get(line_type, -1))) + + for ped_line, length in ped_vector_list: + vectors.append((ped_line.astype(float), length, CLASS2LABEL.get('ped_crossing', -1))) + + for contour, length in poly_bound_list: + vectors.append((contour.astype(float), length, CLASS2LABEL.get('contours', -1))) + + # filter out -1 + filtered_vectors = [] + for pts, pts_num, type in vectors: + if type != -1: + filtered_vectors.append({ + 'pts': pts, + 'pts_num': pts_num, + 'type': type + }) + + return filtered_vectors + + def get_map_geom(self, patch_box, patch_angle, layer_names, location): + map_geom = [] + for layer_name in layer_names: + if layer_name in self.line_classes: + geoms = self.map_explorer[location]._get_layer_line(patch_box, patch_angle, layer_name) + map_geom.append((layer_name, geoms)) + elif layer_name in self.polygon_classes: + geoms = self.map_explorer[location]._get_layer_polygon(patch_box, patch_angle, layer_name) + map_geom.append((layer_name, geoms)) + elif layer_name in self.ped_crossing_classes: + geoms = self.get_ped_crossing_line(patch_box, patch_angle, location) + # geoms = self.map_explorer[location]._get_layer_polygon(patch_box, patch_angle, layer_name) + map_geom.append((layer_name, geoms)) + return map_geom + + def _one_type_line_geom_to_vectors(self, line_geom): + line_vectors = [] + for line in line_geom: + if not line.is_empty: + if line.geom_type == 'MultiLineString': + for single_line in line.geoms: + line_vectors.append(self.sample_pts_from_line(single_line)) + elif line.geom_type == 'LineString': + line_vectors.append(self.sample_pts_from_line(line)) + else: + raise NotImplementedError + return line_vectors + + def poly_geoms_to_vectors(self, polygon_geom): + roads = polygon_geom[0][1] + lanes = polygon_geom[1][1] + union_roads = ops.unary_union(roads) + union_lanes = ops.unary_union(lanes) + union_segments = ops.unary_union([union_roads, union_lanes]) + max_x = self.patch_size[1] / 2 + max_y = self.patch_size[0] / 2 + local_patch = box(-max_x + 0.2, -max_y + 0.2, max_x - 0.2, max_y - 0.2) + exteriors = [] + interiors = [] + if union_segments.geom_type != 'MultiPolygon': + union_segments = MultiPolygon([union_segments]) + for poly in union_segments.geoms: + exteriors.append(poly.exterior) + for inter in poly.interiors: + interiors.append(inter) + + results = [] + for ext in exteriors: + if ext.is_ccw: + ext.coords = list(ext.coords)[::-1] + lines = ext.intersection(local_patch) + if isinstance(lines, MultiLineString): + lines = ops.linemerge(lines) + results.append(lines) + + for inter in interiors: + if not inter.is_ccw: + inter.coords = list(inter.coords)[::-1] + lines = inter.intersection(local_patch) + if isinstance(lines, MultiLineString): + lines = ops.linemerge(lines) + results.append(lines) + + return self._one_type_line_geom_to_vectors(results) + + def line_geoms_to_vectors(self, line_geom): + line_vectors_dict = dict() + for line_type, a_type_of_lines in line_geom: + one_type_vectors = self._one_type_line_geom_to_vectors(a_type_of_lines) + line_vectors_dict[line_type] = one_type_vectors + + return line_vectors_dict + + def ped_geoms_to_vectors(self, ped_geom): + ped_geom = ped_geom[0][1] + union_ped = ops.unary_union(ped_geom) + if union_ped.geom_type != 'MultiPolygon': + union_ped = MultiPolygon([union_ped]) + + max_x = self.patch_size[1] / 2 + max_y = self.patch_size[0] / 2 + local_patch = box(-max_x + 0.2, -max_y + 0.2, max_x - 0.2, max_y - 0.2) + results = [] + for ped_poly in union_ped: + # rect = ped_poly.minimum_rotated_rectangle + ext = ped_poly.exterior + if not ext.is_ccw: + ext.coords = list(ext.coords)[::-1] + lines = ext.intersection(local_patch) + results.append(lines) + + return self._one_type_line_geom_to_vectors(results) + + def get_ped_crossing_line(self, patch_box, patch_angle, location): + def add_line(poly_xy, idx, patch, patch_angle, patch_x, patch_y, line_list): + points = [(p0, p1) for p0, p1 in zip(poly_xy[0, idx:idx + 2], poly_xy[1, idx:idx + 2])] + line = LineString(points) + line = line.intersection(patch) + if not line.is_empty: + line = affinity.rotate(line, -patch_angle, origin=(patch_x, patch_y), use_radians=False) + line = affinity.affine_transform(line, [1.0, 0.0, 0.0, 1.0, -patch_x, -patch_y]) + line_list.append(line) + + patch_x = patch_box[0] + patch_y = patch_box[1] + + patch = NuScenesMapExplorer.get_patch_coord(patch_box, patch_angle) + line_list = [] + records = getattr(self.nusc_maps[location], 'ped_crossing') + for record in records: + polygon = self.map_explorer[location].extract_polygon(record['polygon_token']) + poly_xy = np.array(polygon.exterior.xy) + dist = np.square(poly_xy[:, 1:] - poly_xy[:, :-1]).sum(0) + x1, x2 = np.argsort(dist)[-2:] + + add_line(poly_xy, x1, patch, patch_angle, patch_x, patch_y, line_list) + add_line(poly_xy, x2, patch, patch_angle, patch_x, patch_y, line_list) + + return line_list + + def sample_pts_from_line(self, line): + if self.fixed_num < 0: + distances = np.arange(0, line.length, self.sample_dist) + sampled_points = np.array([list(line.interpolate(distance).coords) for distance in distances]).reshape(-1, 2) + else: + # fixed number of points, so distance is line.length / self.fixed_num + distances = np.linspace(0, line.length, self.fixed_num) + sampled_points = np.array([list(line.interpolate(distance).coords) for distance in distances]).reshape(-1, 2) + + if self.normalize: + sampled_points = sampled_points / np.array([self.patch_size[1], self.patch_size[0]]) + + num_valid = len(sampled_points) + + if not self.padding or self.fixed_num > 0: + # fixed num sample can return now! + return sampled_points, num_valid + + # fixed distance sampling need padding! + num_valid = len(sampled_points) + + if self.fixed_num < 0: + if num_valid < self.num_samples: + padding = np.zeros((self.num_samples - len(sampled_points), 2)) + sampled_points = np.concatenate([sampled_points, padding], axis=0) + else: + sampled_points = sampled_points[:self.num_samples, :] + num_valid = self.num_samples + + if self.normalize: + sampled_points = sampled_points / np.array([self.patch_size[1], self.patch_size[0]]) + num_valid = len(sampled_points) + + return sampled_points, num_valid diff --git a/mmcv/datasets/dataset_wrappers.py b/mmcv/datasets/dataset_wrappers.py new file mode 100644 index 0000000..65921b7 --- /dev/null +++ b/mmcv/datasets/dataset_wrappers.py @@ -0,0 +1,353 @@ +import bisect +import math +from collections import defaultdict + +import numpy as np +from mmcv.utils import print_log +from torch.utils.data.dataset import ConcatDataset as _ConcatDataset + +from .builder import DATASETS +from .coco import CocoDataset + + +@DATASETS.register_module() +class ConcatDataset(_ConcatDataset): + """A wrapper of concatenated dataset. + + Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but + concat the group flag for image aspect ratio. + + Args: + datasets (list[:obj:`Dataset`]): A list of datasets. + separate_eval (bool): Whether to evaluate the results + separately if it is used as validation dataset. + Defaults to True. + """ + + def __init__(self, datasets, separate_eval=True): + super(ConcatDataset, self).__init__(datasets) + self.CLASSES = datasets[0].CLASSES + self.separate_eval = separate_eval + if not separate_eval: + if any([isinstance(ds, CocoDataset) for ds in datasets]): + raise NotImplementedError( + 'Evaluating concatenated CocoDataset as a whole is not' + ' supported! Please set "separate_eval=True"') + elif len(set([type(ds) for ds in datasets])) != 1: + raise NotImplementedError( + 'All the datasets should have same types') + + if hasattr(datasets[0], 'flag'): + flags = [] + for i in range(0, len(datasets)): + flags.append(datasets[i].flag) + self.flag = np.concatenate(flags) + + def get_cat_ids(self, idx): + """Get category ids of concatenated dataset by index. + + Args: + idx (int): Index of data. + + Returns: + list[int]: All categories in the image of specified index. + """ + + if idx < 0: + if -idx > len(self): + raise ValueError( + 'absolute value of index should not exceed dataset length') + idx = len(self) + idx + dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx) + if dataset_idx == 0: + sample_idx = idx + else: + sample_idx = idx - self.cumulative_sizes[dataset_idx - 1] + return self.datasets[dataset_idx].get_cat_ids(sample_idx) + + def evaluate(self, results, logger=None, **kwargs): + """Evaluate the results. + + Args: + results (list[list | tuple]): Testing results of the dataset. + logger (logging.Logger | str | None): Logger used for printing + related information during evaluation. Default: None. + + Returns: + dict[str: float]: AP results of the total dataset or each separate + dataset if `self.separate_eval=True`. + """ + assert len(results) == self.cumulative_sizes[-1], \ + ('Dataset and results have different sizes: ' + f'{self.cumulative_sizes[-1]} v.s. {len(results)}') + + # Check whether all the datasets support evaluation + for dataset in self.datasets: + assert hasattr(dataset, 'evaluate'), \ + f'{type(dataset)} does not implement evaluate function' + + if self.separate_eval: + dataset_idx = -1 + total_eval_results = dict() + for size, dataset in zip(self.cumulative_sizes, self.datasets): + start_idx = 0 if dataset_idx == -1 else \ + self.cumulative_sizes[dataset_idx] + end_idx = self.cumulative_sizes[dataset_idx + 1] + + results_per_dataset = results[start_idx:end_idx] + print_log( + f'\nEvaluateing {dataset.ann_file} with ' + f'{len(results_per_dataset)} images now', + logger=logger) + + eval_results_per_dataset = dataset.evaluate( + results_per_dataset, logger=logger, **kwargs) + dataset_idx += 1 + for k, v in eval_results_per_dataset.items(): + total_eval_results.update({f'{dataset_idx}_{k}': v}) + + return total_eval_results + elif any([isinstance(ds, CocoDataset) for ds in self.datasets]): + raise NotImplementedError( + 'Evaluating concatenated CocoDataset as a whole is not' + ' supported! Please set "separate_eval=True"') + elif len(set([type(ds) for ds in self.datasets])) != 1: + raise NotImplementedError( + 'All the datasets should have same types') + else: + original_data_infos = self.datasets[0].data_infos + self.datasets[0].data_infos = sum( + [dataset.data_infos for dataset in self.datasets], []) + eval_results = self.datasets[0].evaluate( + results, logger=logger, **kwargs) + self.datasets[0].data_infos = original_data_infos + return eval_results + + +@DATASETS.register_module() +class RepeatDataset: + """A wrapper of repeated dataset. + + The length of repeated dataset will be `times` larger than the original + dataset. This is useful when the data loading time is long but the dataset + is small. Using RepeatDataset can reduce the data loading time between + epochs. + + Args: + dataset (:obj:`Dataset`): The dataset to be repeated. + times (int): Repeat times. + """ + + def __init__(self, dataset, times): + self.dataset = dataset + self.times = times + self.CLASSES = dataset.CLASSES + if hasattr(self.dataset, 'flag'): + self.flag = np.tile(self.dataset.flag, times) + + self._ori_len = len(self.dataset) + + def __getitem__(self, idx): + return self.dataset[idx % self._ori_len] + + def get_cat_ids(self, idx): + """Get category ids of repeat dataset by index. + + Args: + idx (int): Index of data. + + Returns: + list[int]: All categories in the image of specified index. + """ + + return self.dataset.get_cat_ids(idx % self._ori_len) + + def __len__(self): + """Length after repetition.""" + return self.times * self._ori_len + + +# Modified from https://github.com/facebookresearch/detectron2/blob/41d475b75a230221e21d9cac5d69655e3415e3a4/detectron2/data/samplers/distributed_sampler.py#L57 # noqa +@DATASETS.register_module() +class ClassBalancedDataset: + """A wrapper of repeated dataset with repeat factor. + + Suitable for training on class imbalanced datasets like LVIS. Following + the sampling strategy in the `paper `_, + in each epoch, an image may appear multiple times based on its + "repeat factor". + The repeat factor for an image is a function of the frequency the rarest + category labeled in that image. The "frequency of category c" in [0, 1] + is defined by the fraction of images in the training set (without repeats) + in which category c appears. + The dataset needs to instantiate :func:`self.get_cat_ids` to support + ClassBalancedDataset. + + The repeat factor is computed as followed. + + 1. For each category c, compute the fraction # of images + that contain it: :math:`f(c)` + 2. For each category c, compute the category-level repeat factor: + :math:`r(c) = max(1, sqrt(t/f(c)))` + 3. For each image I, compute the image-level repeat factor: + :math:`r(I) = max_{c in I} r(c)` + + Args: + dataset (:obj:`CustomDataset`): The dataset to be repeated. + oversample_thr (float): frequency threshold below which data is + repeated. For categories with ``f_c >= oversample_thr``, there is + no oversampling. For categories with ``f_c < oversample_thr``, the + degree of oversampling following the square-root inverse frequency + heuristic above. + filter_empty_gt (bool, optional): If set true, images without bounding + boxes will not be oversampled. Otherwise, they will be categorized + as the pure background class and involved into the oversampling. + Default: True. + """ + + def __init__(self, dataset, oversample_thr, filter_empty_gt=True): + self.dataset = dataset + self.oversample_thr = oversample_thr + self.filter_empty_gt = filter_empty_gt + self.CLASSES = dataset.CLASSES + + repeat_factors = self._get_repeat_factors(dataset, oversample_thr) + repeat_indices = [] + for dataset_idx, repeat_factor in enumerate(repeat_factors): + repeat_indices.extend([dataset_idx] * math.ceil(repeat_factor)) + self.repeat_indices = repeat_indices + + flags = [] + if hasattr(self.dataset, 'flag'): + for flag, repeat_factor in zip(self.dataset.flag, repeat_factors): + flags.extend([flag] * int(math.ceil(repeat_factor))) + assert len(flags) == len(repeat_indices) + self.flag = np.asarray(flags, dtype=np.uint8) + + def _get_repeat_factors(self, dataset, repeat_thr): + """Get repeat factor for each images in the dataset. + + Args: + dataset (:obj:`CustomDataset`): The dataset + repeat_thr (float): The threshold of frequency. If an image + contains the categories whose frequency below the threshold, + it would be repeated. + + Returns: + list[float]: The repeat factors for each images in the dataset. + """ + + # 1. For each category c, compute the fraction # of images + # that contain it: f(c) + category_freq = defaultdict(int) + num_images = len(dataset) + for idx in range(num_images): + cat_ids = set(self.dataset.get_cat_ids(idx)) + if len(cat_ids) == 0 and not self.filter_empty_gt: + cat_ids = set([len(self.CLASSES)]) + for cat_id in cat_ids: + category_freq[cat_id] += 1 + for k, v in category_freq.items(): + category_freq[k] = v / num_images + + # 2. For each category c, compute the category-level repeat factor: + # r(c) = max(1, sqrt(t/f(c))) + category_repeat = { + cat_id: max(1.0, math.sqrt(repeat_thr / cat_freq)) + for cat_id, cat_freq in category_freq.items() + } + + # 3. For each image I, compute the image-level repeat factor: + # r(I) = max_{c in I} r(c) + repeat_factors = [] + for idx in range(num_images): + cat_ids = set(self.dataset.get_cat_ids(idx)) + if len(cat_ids) == 0 and not self.filter_empty_gt: + cat_ids = set([len(self.CLASSES)]) + repeat_factor = 1 + if len(cat_ids) > 0: + repeat_factor = max( + {category_repeat[cat_id] + for cat_id in cat_ids}) + repeat_factors.append(repeat_factor) + + return repeat_factors + + def __getitem__(self, idx): + ori_index = self.repeat_indices[idx] + return self.dataset[ori_index] + + def __len__(self): + """Length after repetition.""" + return len(self.repeat_indices) + +@DATASETS.register_module() +class CBGSDataset(object): + """A wrapper of class sampled dataset with ann_file path. Implementation of + paper `Class-balanced Grouping and Sampling for Point Cloud 3D Object + Detection `_. + + Balance the number of scenes under different classes. + + Args: + dataset (:obj:`CustomDataset`): The dataset to be class sampled. + """ + + def __init__(self, dataset): + self.dataset = dataset + self.CLASSES = dataset.CLASSES + self.cat2id = {name: i for i, name in enumerate(self.CLASSES)} + self.sample_indices = self._get_sample_indices() + # self.dataset.data_infos = self.data_infos + if hasattr(self.dataset, 'flag'): + self.flag = np.array( + [self.dataset.flag[ind] for ind in self.sample_indices], + dtype=np.uint8) + + def _get_sample_indices(self): + """Load annotations from ann_file. + + Args: + ann_file (str): Path of the annotation file. + + Returns: + list[dict]: List of annotations after class sampling. + """ + class_sample_idxs = {cat_id: [] for cat_id in self.cat2id.values()} + for idx in range(len(self.dataset)): + sample_cat_ids = self.dataset.get_cat_ids(idx) + for cat_id in sample_cat_ids: + class_sample_idxs[cat_id].append(idx) + duplicated_samples = sum( + [len(v) for _, v in class_sample_idxs.items()]) + class_distribution = { + k: len(v) / duplicated_samples + for k, v in class_sample_idxs.items() + } + + sample_indices = [] + + frac = 1.0 / len(self.CLASSES) + ratios = [frac / v for v in class_distribution.values()] + for cls_inds, ratio in zip(list(class_sample_idxs.values()), ratios): + sample_indices += np.random.choice(cls_inds, + int(len(cls_inds) * + ratio)).tolist() + return sample_indices + + def __getitem__(self, idx): + """Get item from infos according to the given index. + + Returns: + dict: Data dictionary of the corresponding index. + """ + ori_idx = self.sample_indices[idx] + return self.dataset[ori_idx] + + def __len__(self): + """Return the length of data infos. + + Returns: + int: Length of data infos. + """ + return len(self.sample_indices) diff --git a/mmcv/datasets/dd3d_nuscenes_dataset.py b/mmcv/datasets/dd3d_nuscenes_dataset.py new file mode 100644 index 0000000..6c77617 --- /dev/null +++ b/mmcv/datasets/dd3d_nuscenes_dataset.py @@ -0,0 +1,359 @@ +# Copyright 2021 Toyota Research Institute. All rights reserved. +#import functools +from collections import OrderedDict + +import numpy as np +import seaborn as sns +from torch.utils.data import Dataset +from tqdm import tqdm + +from mmcv.structures import BoxMode +from nuscenes.eval.detection.utils import category_to_detection_name +from nuscenes.nuscenes import NuScenes +from nuscenes.utils.splits import create_splits_scenes + +#from tridet.data import collect_dataset_dicts +from adzoo.bevformer.mmdet3d_plugin.dd3d.structures.boxes3d import GenericBoxes3D +from adzoo.bevformer.mmdet3d_plugin.dd3d.structures.pose import Pose +from adzoo.bevformer.mmdet3d_plugin.dd3d.utils.geometry import project_points3d +from adzoo.bevformer.mmdet3d_plugin.dd3d.utils.visualization import float_to_uint8_color + +# https://github.com/nutonomy/nuscenes-devkit/blob/9b209638ef3dee6d0cdc5ac700c493747f5b35fe/python-sdk/nuscenes/utils/splits.py#L189 +# - train/val/test: The standard splits of the nuScenes dataset (700/150/150 scenes). +# - mini_train/mini_val: Train and val splits of the mini subset used for visualization and debugging (8/2 scenes). +# - train_detect/train_track: Two halves of the train split used for separating the training sets of detector and +# tracker if required +DATASET_NAME_TO_VERSION = { + "nusc_train": "v1.0-trainval", + "nusc_val": "v1.0-trainval", + "nusc_val-subsample-8": "v1.0-trainval", + "nusc_trainval": "v1.0-trainval", + "nusc_test": "v1.0-test", + "nusc_mini_train": "v1.0-mini", + "nusc_mini_val": "v1.0-mini", +} + +CAMERA_NAMES = ('CAM_FRONT_LEFT', 'CAM_FRONT', 'CAM_FRONT_RIGHT', 'CAM_BACK_LEFT', 'CAM_BACK', 'CAM_BACK_RIGHT') + +ATTRIBUTE_IDS = { + 'vehicle.moving': 0, + 'vehicle.parked': 1, + 'vehicle.stopped': 2, + 'pedestrian.moving': 0, + 'pedestrian.standing': 1, + 'pedestrian.sitting_lying_down': 2, + 'cycle.with_rider': 0, + 'cycle.without_rider': 1, +} + +CATEGORY_IDS = OrderedDict({ + 'barrier': 0, + 'bicycle': 1, + 'bus': 2, + 'car': 3, + 'construction_vehicle': 4, + 'motorcycle': 5, + 'pedestrian': 6, + 'traffic_cone': 7, + 'trailer': 8, + 'truck': 9, +}) + +COLORS = [float_to_uint8_color(clr) for clr in sns.color_palette("bright", n_colors=10)] +COLORMAP = OrderedDict({ + 'barrier': COLORS[8], # yellow + 'bicycle': COLORS[0], # blue + 'bus': COLORS[6], # pink + 'car': COLORS[2], # green + 'construction_vehicle': COLORS[7], # gray + 'motorcycle': COLORS[4], # purple + 'pedestrian': COLORS[1], # orange + 'traffic_cone': COLORS[3], # red + 'trailer': COLORS[9], # skyblue + 'truck': COLORS[5], # brown +}) + +MAX_NUM_ATTRIBUTES = 3 + + +def _compute_iou(box1, box2): + """ + Parameters + ---------- + box1, box2: + (x1, y1, x2, y2) + """ + xx1 = max(box1[0], box2[0]) + yy1 = max(box1[1], box2[1]) + xx2 = min(box1[2], box2[2]) + yy2 = min(box1[3], box2[3]) + if xx1 >= xx2 or yy1 >= yy2: + return 0. + inter = (xx2 - xx1) * (yy2 - yy1) + a1 = (box1[2] - box1[0]) * (box1[3] - box1[1]) + a2 = (box2[2] - box2[0]) * (box2[3] - box2[1]) + return inter / (a1 + a2 - inter) + + +class DD3DNuscenesDataset(Dataset): + def __init__(self, name, data_root, datum_names=CAMERA_NAMES, min_num_lidar_points=3, min_box_visibility=0.2, **unused): + self.data_root = data_root + assert name in DATASET_NAME_TO_VERSION + version = DATASET_NAME_TO_VERSION[name] + self.nusc = NuScenes(version=version, dataroot=data_root, verbose=True) + + self.datum_names = datum_names + self.min_num_lidar_points = min_num_lidar_points + self.min_box_visibility = min_box_visibility + + self.dataset_item_info = self._build_dataset_item_info(name) + + # Index instance tokens to their IDs + self._instance_token_to_id = self._index_instance_tokens() + + # Construct the mapping from datum_token (image id) to index + print("Generating the mapping from image id to idx...") + self.datumtoken2idx = {} + for idx, (datum_token, _, _, _, _) in enumerate(self.dataset_item_info): + self.datumtoken2idx[datum_token] = idx + print("Done.") + + def _build_dataset_item_info(self, name): + scenes_in_split = self._get_split_scenes(name) + + dataset_items = [] + for _, scene_token in tqdm(scenes_in_split): + scene = self.nusc.get('scene', scene_token) + sample_token = scene['first_sample_token'] + for sample_idx in range(scene['nbr_samples']): + if name.endswith('subsample-8') and sample_idx % 8 > 0: + # Sample-level subsampling. + continue + + sample = self.nusc.get('sample', sample_token) + for datum_name, datum_token in sample['data'].items(): + if datum_name not in self.datum_names: + continue + dataset_items.append((datum_token, sample_token, scene['name'], sample_idx, datum_name)) + sample_token = sample['next'] + return dataset_items + + def _get_split_scenes(self, name): + scenes_in_splits = create_splits_scenes() + if name == "nusc_trainval": + scenes = scenes_in_splits["train"] + scenes_in_splits["val"] + elif name == "nusc_val-subsample-8": + scenes = scenes_in_splits["val"] + else: + assert name.startswith('nusc_'), f"Invalid dataset name: {name}" + split = name[5:] + assert split in scenes_in_splits, f"Invalid dataset: {split}" + scenes = scenes_in_splits[split] + + # Mapping from scene name to token. + name_to_token = {scene['name']: scene['token'] for scene in self.nusc.scene} + return [(name, name_to_token[name]) for name in scenes] + + def __len__(self): + return len(self.dataset_item_info) + + def _build_id(self, scene_name, sample_idx, datum_name): + sample_id = f"{scene_name}_{sample_idx:03d}" + image_id = f"{sample_id}_{datum_name}" + return image_id, sample_id + + def _index_instance_tokens(self): + """Index instance tokens for uniquely identifying instances across samples""" + instance_token_to_id = {} + for record in self.nusc.sample_annotation: + instance_token = record['instance_token'] + if instance_token not in instance_token_to_id: + next_instance_id = len(instance_token_to_id) + instance_token_to_id[instance_token] = next_instance_id + return instance_token_to_id + + def get_instance_annotations(self, annotation_list, K, image_shape, pose_WS): + annotations = [] + for _ann in annotation_list: + ann = self.nusc.get('sample_annotation', _ann.token) + if ann['num_lidar_pts'] + ann['num_radar_pts'] < self.min_num_lidar_points: + continue + annotation = OrderedDict() + + # -------- + # Category + # -------- + category = category_to_detection_name(ann['category_name']) + if category is None: + continue + annotation['category_id'] = CATEGORY_IDS[category] + + # ------ + # 3D box + # ------ + # NOTE: ann['rotation'], ann['translation'] is in global frame. + pose_SO = Pose(wxyz=_ann.orientation, tvec=_ann.center) # pose in sensor frame + # DEBUG: + # pose_WO_1 = Pose(np.array(ann['rotation']), np.array(ann['translation'])) + # pose_WO_2 = pose_WS * pose_SO + # assert np.allclose(pose_WO_1.matrix, pose_WO_2.matrix) + bbox3d = GenericBoxes3D(_ann.orientation, _ann.center, _ann.wlh) + annotation['bbox3d'] = bbox3d.vectorize().tolist()[0] + + # -------------------------------------- + # 2D box -- project 8 corners of 3D bbox + # -------------------------------------- + corners = project_points3d(bbox3d.corners.cpu().numpy().squeeze(0), K) + l, t = corners[:, 0].min(), corners[:, 1].min() + r, b = corners[:, 0].max(), corners[:, 1].max() + + x1 = max(0, l) + y1 = max(0, t) + x2 = min(image_shape[1], r) + y2 = min(image_shape[0], b) + + iou = _compute_iou([l, t, r, b], [x1, y1, x2, y2]) + if iou < self.min_box_visibility: + continue + + annotation['bbox'] = [x1, y1, x2, y2] + annotation['bbox_mode'] = BoxMode.XYXY_ABS + + # -------- + # Track ID + # -------- + annotation['track_id'] = self._instance_token_to_id[ann['instance_token']] + + # --------- + # Attribute + # --------- + attr_tokens = ann['attribute_tokens'] + assert len(attr_tokens) < 2 # NOTE: Allow only single attrubute. + attribute_id = MAX_NUM_ATTRIBUTES # By default, MAX_NUM_ATTRIBUTES -- this is to be ignored in loss compute. + if attr_tokens: + attribute = self.nusc.get('attribute', attr_tokens[0])['name'] + attribute_id = ATTRIBUTE_IDS[attribute] + annotation['attribute_id'] = attribute_id + + # ----- + # Speed + # ----- + vel_global = self.nusc.box_velocity(ann['token']) + speed = np.linalg.norm(vel_global) # NOTE: This can be NaN. + # DEBUG: + # speed * Quaternion(ann['rotation']).rotation_matrix.T[0] ~= vel_global + annotation['speed'] = speed + + annotations.append(annotation) + + return annotations + + def _get_ego_velocity(self, current, max_time_diff=1.5): + """Velocity of ego-vehicle in m/s. + """ + has_prev = current['prev'] != '' + has_next = current['next'] != '' + + # Cannot estimate velocity for a single annotation. + if not has_prev and not has_next: + return np.array([np.nan, np.nan, np.nan]) + + if has_prev: + first = self.nusc.get('sample_data', current['prev']) + else: + first = current + + if has_next: + last = self.nusc.get('sample_data', current['next']) + else: + last = current + + pos_first = self.nusc.get('ego_pose', first['ego_pose_token'])['translation'] + pos_last = self.nusc.get('ego_pose', last['ego_pose_token'])['translation'] + pos_diff = np.float32(pos_last) - np.float32(pos_first) + + time_last = 1e-6 * last['timestamp'] + time_first = 1e-6 * first['timestamp'] + time_diff = time_last - time_first + + if has_next and has_prev: + # If doing centered difference, allow for up to double the max_time_diff. + max_time_diff *= 2 + + if time_diff > max_time_diff: + # If time_diff is too big, don't return an estimate. + return np.array([np.nan, np.nan, np.nan]) + else: + return pos_diff / time_diff + + def __getitem__(self, idx): + datum_token, sample_token, scene_name, sample_idx, datum_name = self.dataset_item_info[idx] + datum = self.nusc.get('sample_data', datum_token) + assert datum['is_key_frame'] + + filename, _annotations, K = self.nusc.get_sample_data(datum_token) + image_id, sample_id = self._build_id(scene_name, sample_idx, datum_name) + height, width = datum['height'], datum['width'] + d2_dict = OrderedDict( + file_name=filename, + height=height, + width=width, + image_id=image_id, + sample_id=sample_id, + sample_token=sample_token + ) + + # Intrinsics + d2_dict['intrinsics'] = list(K.flatten()) + + # Get pose of the sensor (S) from vehicle (V) frame + _pose_VS = self.nusc.get('calibrated_sensor', datum['calibrated_sensor_token']) + pose_VS = Pose(wxyz=np.float64(_pose_VS['rotation']), tvec=np.float64(_pose_VS['translation'])) + + # Get ego-pose of the vehicle (V) from global/world (W) frame + _pose_WV = self.nusc.get('ego_pose', datum['ego_pose_token']) + pose_WV = Pose(wxyz=np.float64(_pose_WV['rotation']), tvec=np.float64(_pose_WV['translation'])) + pose_WS = pose_WV * pose_VS + + d2_dict['pose'] = {'wxyz': list(pose_WS.quat.elements), 'tvec': list(pose_WS.tvec)} + d2_dict['extrinsics'] = {'wxyz': list(pose_VS.quat.elements), 'tvec': list(pose_VS.tvec)} + + d2_dict['ego_speed'] = np.linalg.norm(self._get_ego_velocity(datum)) + + d2_dict['annotations'] = self.get_instance_annotations(_annotations, K, (height, width), pose_WS) + + return d2_dict + + def getitem_by_datumtoken(self, datum_token): + # idx = self.datumtoken2idx[datum_token] + # ret = self.__getitem__(idx) + + datum = self.nusc.get('sample_data', datum_token) + sample_token = datum['sample_token'] + filename, _annotations, K = self.nusc.get_sample_data(datum_token) + height, width = datum['height'], datum['width'] + d2_dict = OrderedDict( + file_name=filename, + height=height, + width=width, + image_id=0, + sample_id=0, + sample_token=sample_token + ) + # Intrinsics + d2_dict['intrinsics'] = list(K.flatten()) + # Get pose of the sensor (S) from vehicle (V) frame + _pose_VS = self.nusc.get('calibrated_sensor', datum['calibrated_sensor_token']) + pose_VS = Pose(wxyz=np.float64(_pose_VS['rotation']), tvec=np.float64(_pose_VS['translation'])) + # Get ego-pose of the vehicle (V) from global/world (W) frame + _pose_WV = self.nusc.get('ego_pose', datum['ego_pose_token']) + pose_WV = Pose(wxyz=np.float64(_pose_WV['rotation']), tvec=np.float64(_pose_WV['translation'])) + pose_WS = pose_WV * pose_VS + + d2_dict['pose'] = {'wxyz': list(pose_WS.quat.elements), 'tvec': list(pose_WS.tvec)} + d2_dict['extrinsics'] = {'wxyz': list(pose_VS.quat.elements), 'tvec': list(pose_VS.tvec)} + + d2_dict['ego_speed'] = np.linalg.norm(self._get_ego_velocity(datum)) + + d2_dict['annotations'] = self.get_instance_annotations(_annotations, K, (height, width), pose_WS) + return d2_dict \ No newline at end of file diff --git a/mmcv/datasets/eval_utils/eval_utils.py b/mmcv/datasets/eval_utils/eval_utils.py new file mode 100644 index 0000000..9d56923 --- /dev/null +++ b/mmcv/datasets/eval_utils/eval_utils.py @@ -0,0 +1,911 @@ +import json +import torch +import tqdm +from typing import List, Dict, Tuple, Callable, Union +from nuscenes import NuScenes +from pyquaternion import Quaternion +import numpy as np +from .metric_utils import min_ade, min_fde, miss_rate + +from nuscenes.utils.splits import create_splits_scenes +from nuscenes.eval.detection.utils import category_to_detection_name +from nuscenes.prediction import PredictHelper, convert_local_coords_to_global +from nuscenes.eval.common.data_classes import EvalBox, EvalBoxes +from nuscenes.eval.detection.data_classes import DetectionBox +from nuscenes.eval.detection.data_classes import DetectionMetricData, DetectionMetricDataList, DetectionMetrics +from nuscenes.eval.common.utils import center_distance, scale_iou, yaw_diff, velocity_l2, attr_acc, cummean + +def category_to_motion_name(category_name: str): + """ + Default label mapping from nuScenes to nuScenes detection classes. + Note that pedestrian does not include personal_mobility, stroller and wheelchair. + :param category_name: Generic nuScenes class. + :return: nuScenes detection class. + """ + detection_mapping = { + 'movable_object.barrier': 'barrier', + 'vehicle.bicycle': 'car', + 'vehicle.bus.bendy': 'car', + 'vehicle.bus.rigid': 'car', + 'vehicle.car': 'car', + 'vehicle.construction': 'car', + 'vehicle.motorcycle': 'car', + 'human.pedestrian.adult': 'pedestrian', + 'human.pedestrian.child': 'pedestrian', + 'human.pedestrian.construction_worker': 'pedestrian', + 'human.pedestrian.police_officer': 'pedestrian', + 'movable_object.trafficcone': 'barrier', + 'vehicle.trailer': 'car', + 'vehicle.truck': 'car' + } + + if category_name in detection_mapping: + return detection_mapping[category_name] + else: + return None + +def detection_prediction_category_to_motion_name(category_name: str): + """ + Default label mapping from nuScenes to nuScenes detection classes. + Note that pedestrian does not include personal_mobility, stroller and wheelchair. + :param category_name: Generic nuScenes class. + :return: nuScenes detection class. + """ + detection_mapping = { + 'car': 'car', + 'truck': 'car', + 'construction_vehicle': 'car', + 'bus': 'car', + 'trailer': 'car', + 'motorcycle': 'car', + 'bicycle': 'car', + 'pedestrian': 'pedestrian', + 'traffic_cone': 'barrier', + 'barrier': 'barrier', + } + + if category_name in detection_mapping: + return detection_mapping[category_name] + else: + return None + +class DetectionMotionMetrics(DetectionMetrics): + """ Stores average precision and true positive metric results. Provides properties to summarize. """ + + @classmethod + def deserialize(cls, content: dict): + """ Initialize from serialized dictionary. """ + + cfg = DetectionConfig.deserialize(content['cfg']) + metrics = cls(cfg=cfg) + metrics.add_runtime(content['eval_time']) + + for detection_name, label_aps in content['label_aps'].items(): + for dist_th, ap in label_aps.items(): + metrics.add_label_ap(detection_name=detection_name, dist_th=float(dist_th), ap=float(ap)) + + for detection_name, label_tps in content['label_tp_errors'].items(): + for metric_name, tp in label_tps.items(): + metrics.add_label_tp(detection_name=detection_name, metric_name=metric_name, tp=float(tp)) + + return metrics + +class DetectionMotionMetricDataList(DetectionMetricDataList): + """ This stores a set of MetricData in a dict indexed by (name, match-distance). """ + @classmethod + def deserialize(cls, content: dict): + mdl = cls() + for key, md in content.items(): + name, distance = key.split(':') + mdl.set(name, float(distance), DetectionMotionMetricData.deserialize(md)) + return mdl + +class DetectionMotionMetricData(DetectionMetricData): + """ This class holds accumulated and interpolated data required to calculate the detection metrics. """ + + nelem = 101 + + def __init__(self, + recall: np.array, + precision: np.array, + confidence: np.array, + trans_err: np.array, + vel_err: np.array, + scale_err: np.array, + orient_err: np.array, + attr_err: np.array, + min_ade_err: np.array, + min_fde_err: np.array, + miss_rate_err: np.array): + + # Assert lengths. + assert len(recall) == self.nelem + assert len(precision) == self.nelem + assert len(confidence) == self.nelem + assert len(trans_err) == self.nelem + assert len(vel_err) == self.nelem + assert len(scale_err) == self.nelem + assert len(orient_err) == self.nelem + assert len(attr_err) == self.nelem + assert len(min_ade_err) == self.nelem + assert len(min_fde_err) == self.nelem + assert len(miss_rate_err) == self.nelem + + # Assert ordering. + assert all(confidence == sorted(confidence, reverse=True)) # Confidences should be descending. + assert all(recall == sorted(recall)) # Recalls should be ascending. + + # Set attributes explicitly to help IDEs figure out what is going on. + self.recall = recall + self.precision = precision + self.confidence = confidence + self.trans_err = trans_err + self.vel_err = vel_err + self.scale_err = scale_err + self.orient_err = orient_err + self.attr_err = attr_err + self.min_ade_err = min_ade_err + self.min_fde_err = min_fde_err + self.miss_rate_err = miss_rate_err + + def __eq__(self, other): + eq = True + for key in self.serialize().keys(): + eq = eq and np.array_equal(getattr(self, key), getattr(other, key)) + return eq + + @property + def max_recall_ind(self): + """ Returns index of max recall achieved. """ + + # Last instance of confidence > 0 is index of max achieved recall. + non_zero = np.nonzero(self.confidence)[0] + if len(non_zero) == 0: # If there are no matches, all the confidence values will be zero. + max_recall_ind = 0 + else: + max_recall_ind = non_zero[-1] + + return max_recall_ind + + @property + def max_recall(self): + """ Returns max recall achieved. """ + + return self.recall[self.max_recall_ind] + + def serialize(self): + """ Serialize instance into json-friendly format. """ + return { + 'recall': self.recall.tolist(), + 'precision': self.precision.tolist(), + 'confidence': self.confidence.tolist(), + 'trans_err': self.trans_err.tolist(), + 'vel_err': self.vel_err.tolist(), + 'scale_err': self.scale_err.tolist(), + 'orient_err': self.orient_err.tolist(), + 'attr_err': self.attr_err.tolist(), + 'min_ade_err': self.min_ade_err.tolist(), + 'min_fde_err': self.min_fde_err.tolist(), + 'miss_rate_err': self.miss_rate_err.tolist(), + } + + @classmethod + def deserialize(cls, content: dict): + """ Initialize from serialized content. """ + return cls(recall=np.array(content['recall']), + precision=np.array(content['precision']), + confidence=np.array(content['confidence']), + trans_err=np.array(content['trans_err']), + vel_err=np.array(content['vel_err']), + scale_err=np.array(content['scale_err']), + orient_err=np.array(content['orient_err']), + attr_err=np.array(content['attr_err']), + min_ade_err=np.array(content['min_ade_err']), + min_fde_err=np.array(content['min_fde_err']), + miss_rate_err=np.array(content['miss_rate_err'])) + + @classmethod + def no_predictions(cls): + """ Returns a md instance corresponding to having no predictions. """ + return cls(recall=np.linspace(0, 1, cls.nelem), + precision=np.zeros(cls.nelem), + confidence=np.zeros(cls.nelem), + trans_err=np.ones(cls.nelem), + vel_err=np.ones(cls.nelem), + scale_err=np.ones(cls.nelem), + orient_err=np.ones(cls.nelem), + attr_err=np.ones(cls.nelem), + min_ade_err=np.ones(cls.nelem), + min_fde_err=np.ones(cls.nelem), + miss_rate_err=np.ones(cls.nelem)) + + @classmethod + def random_md(cls): + """ Returns an md instance corresponding to a random results. """ + return cls(recall=np.linspace(0, 1, cls.nelem), + precision=np.random.random(cls.nelem), + confidence=np.linspace(0, 1, cls.nelem)[::-1], + trans_err=np.random.random(cls.nelem), + vel_err=np.random.random(cls.nelem), + scale_err=np.random.random(cls.nelem), + orient_err=np.random.random(cls.nelem), + attr_err=np.random.random(cls.nelem), + min_ade_err=np.random.random(cls.nelem), + min_fde_err=np.random.random(cls.nelem), + miss_rate_err=np.random.random(cls.nelem)) + + +class DetectionMotionBox(DetectionBox): + def __init__(self, + sample_token: str = "", + translation: Tuple[float, float, float] = (0, 0, 0), + size: Tuple[float, float, float] = (0, 0, 0), + rotation: Tuple[float, float, float, float] = (0, 0, 0, 0), + velocity: Tuple[float, float] = (0, 0), + ego_translation: [float, float, float] = (0, 0, 0), # Translation to ego vehicle in meters. + num_pts: int = -1, # Nbr. LIDAR or RADAR inside the box. Only for gt boxes. + detection_name: str = 'car', # The class name used in the detection challenge. + detection_score: float = -1.0, # GT samples do not have a score. + attribute_name: str = '', + traj=None, + traj_scores=None): # Box attribute. Each box can have at most 1 attribute. + super(DetectionBox, self).__init__(sample_token, translation, size, rotation, velocity, ego_translation, num_pts) + assert detection_name is not None, 'Error: detection_name cannot be empty!' + # assert detection_name in DETECTION_NAMES, 'Error: Unknown detection_name %s' % detection_name + + # assert attribute_name in ATTRIBUTE_NAMES or attribute_name == '', \ + # 'Error: Unknown attribute_name %s' % attribute_name + + assert type(detection_score) == float, 'Error: detection_score must be a float!' + assert not np.any(np.isnan(detection_score)), 'Error: detection_score may not be NaN!' + + # Assign. + self.detection_name = detection_name + self.attribute_name = attribute_name + self.detection_score = detection_score + self.traj = traj + self.traj_scores = traj_scores + self.traj_index = None + + def __eq__(self, other): + return (self.sample_token == other.sample_token and + self.translation == other.translation and + self.size == other.size and + self.rotation == other.rotation and + self.velocity == other.velocity and + self.ego_translation == other.ego_translation and + self.num_pts == other.num_pts and + self.detection_name == other.detection_name and + self.detection_score == other.detection_score and + self.attribute_name == other.attribute_name and + np.all(self.traj == other.traj) and + np.all(self.traj_scores == other.traj_scores)) + + def serialize(self) -> dict: + """ Serialize instance into json-friendly format. """ + return { + 'sample_token': self.sample_token, + 'translation': self.translation, + 'size': self.size, + 'rotation': self.rotation, + 'velocity': self.velocity, + 'ego_translation': self.ego_translation, + 'num_pts': self.num_pts, + 'detection_name': self.detection_name, + 'detection_score': self.detection_score, + 'attribute_name': self.attribute_name, + 'traj': self.traj, + 'traj_scores': self.traj_scores + } + + @classmethod + def deserialize(cls, content: dict): + """ Initialize from serialized content. """ + return cls(sample_token=content['sample_token'], + translation=tuple(content['translation']), + size=tuple(content['size']), + rotation=tuple(content['rotation']), + velocity=tuple(content['velocity']), + ego_translation=(0.0, 0.0, 0.0) if 'ego_translation' not in content + else tuple(content['ego_translation']), + num_pts=-1 if 'num_pts' not in content else int(content['num_pts']), + detection_name=content['detection_name'], + detection_score=-1.0 if 'detection_score' not in content else float(content['detection_score']), + attribute_name=content['attribute_name'], + traj=content['predict_traj'], + traj_scores=content['predict_traj_score']) + +class DetectionMotionBox_modified(DetectionMotionBox): + def __init__(self, *args, token=None, visibility=None, index=None, **kwargs): + ''' + add annotation token + ''' + super().__init__(*args, **kwargs) + self.token = token + self.visibility = visibility + self.index = index + + def serialize(self) -> dict: + """ Serialize instance into json-friendly format. """ + return { + 'token': self.token, + 'sample_token': self.sample_token, + 'translation': self.translation, + 'size': self.size, + 'rotation': self.rotation, + 'velocity': self.velocity, + 'ego_translation': self.ego_translation, + 'num_pts': self.num_pts, + 'detection_name': self.detection_name, + 'detection_score': self.detection_score, + 'attribute_name': self.attribute_name, + 'visibility': self.visibility, + 'index': self.index, + 'traj': self.traj, + 'traj_scores': self.traj_scores + } + + @classmethod + def deserialize(cls, content: dict): + """ Initialize from serialized content. """ + return cls( + token=content['token'], + sample_token=content['sample_token'], + translation=tuple(content['translation']), + size=tuple(content['size']), + rotation=tuple(content['rotation']), + velocity=tuple(content['velocity']), + ego_translation=(0.0, 0.0, 0.0) if 'ego_translation' not in content + else tuple(content['ego_translation']), + num_pts=-1 if 'num_pts' not in content else int(content['num_pts']), + detection_name=content['detection_name'], + detection_score=-1.0 if 'detection_score' not in content else float(content['detection_score']), + attribute_name=content['attribute_name'], + visibility=content['visibility'], + index=content['index'], + traj=content['traj'], + ) + + +def load_prediction(result_path: str, max_boxes_per_sample: int, box_cls, verbose: bool = False, category_convert_type='detection_category') \ + -> Tuple[EvalBoxes, Dict]: + """ + Loads object predictions from file. + :param result_path: Path to the .json result file provided by the user. + :param max_boxes_per_sample: Maximim number of boxes allowed per sample. + :param box_cls: Type of box to load, e.g. DetectionBox, DetectionMotionBox or TrackingBox. + :param verbose: Whether to print messages to stdout. + :return: The deserialized results and meta data. + """ + + # Load from file and check that the format is correct. + with open(result_path) as f: + data = json.load(f) + assert 'results' in data, 'Error: No field `results` in result file. Please note that the result format changed.' \ + 'See https://www.nuscenes.org/object-detection for more information.' + + if category_convert_type == 'motion_category': + for key in data['results'].keys(): + for i in range(len(data['results'][key])): + data['results'][key][i]['detection_name'] = detection_prediction_category_to_motion_name(data['results'][key][i]['detection_name']) + # Deserialize results and get meta data. + all_results = EvalBoxes.deserialize(data['results'], box_cls) + meta = data['meta'] + if verbose: + print("Loaded results from {}. Found detections for {} samples." + .format(result_path, len(all_results.sample_tokens))) + + # Check that each sample has no more than x predicted boxes. + for sample_token in all_results.sample_tokens: + assert len(all_results.boxes[sample_token]) <= max_boxes_per_sample, \ + "Error: Only <= %d boxes per sample allowed!" % max_boxes_per_sample + + return all_results, meta + +def load_gt(nusc: NuScenes, eval_split: str, box_cls, verbose: bool = False, category_convert_type='detection_category'): + """ + Loads ground truth boxes from DB. + :param nusc: A NuScenes instance. + :param eval_split: The evaluation split for which we load GT boxes. + :param box_cls: Type of box to load, e.g. DetectionBox or TrackingBox. + :param verbose: Whether to print messages to stdout. + :return: The GT boxes. + """ + predict_helper = PredictHelper(nusc) + # Init. + if box_cls == DetectionMotionBox_modified: + attribute_map = {a['token']: a['name'] for a in nusc.attribute} + + if verbose: + print('Loading annotations for {} split from nuScenes version: {}'.format(eval_split, nusc.version)) + # Read out all sample_tokens in DB. + sample_tokens_all = [s['token'] for s in nusc.sample] + assert len(sample_tokens_all) > 0, "Error: Database has no samples!" + + # Only keep samples from this split. + splits = create_splits_scenes() + + # Check compatibility of split with nusc_version. + version = nusc.version + if eval_split in {'train', 'val', 'train_detect', 'train_track'}: + assert version.endswith('trainval'), \ + 'Error: Requested split {} which is not compatible with NuScenes version {}'.format(eval_split, version) + elif eval_split in {'mini_train', 'mini_val'}: + assert version.endswith('mini'), \ + 'Error: Requested split {} which is not compatible with NuScenes version {}'.format(eval_split, version) + elif eval_split == 'test': + assert version.endswith('test'), \ + 'Error: Requested split {} which is not compatible with NuScenes version {}'.format(eval_split, version) + else: + raise ValueError('Error: Requested split {} which this function cannot map to the correct NuScenes version.' + .format(eval_split)) + + if eval_split == 'test': + # Check that you aren't trying to cheat :). + assert len(nusc.sample_annotation) > 0, \ + 'Error: You are trying to evaluate on the test set but you do not have the annotations!' + index_map = {} + for scene in nusc.scene: + first_sample_token = scene['first_sample_token'] + sample = nusc.get('sample', first_sample_token) + index_map[first_sample_token] = 1 + index = 2 + while sample['next'] != '': + sample = nusc.get('sample', sample['next']) + index_map[sample['token']] = index + index += 1 + + sample_tokens = [] + for sample_token in sample_tokens_all: + scene_token = nusc.get('sample', sample_token)['scene_token'] + scene_record = nusc.get('scene', scene_token) + if scene_record['name'] in splits[eval_split]: + sample_tokens.append(sample_token) + + all_annotations = EvalBoxes() + + # Load annotations and filter predictions and annotations. + tracking_id_set = set() + for sample_token in tqdm.tqdm(sample_tokens, leave=verbose): + + sample = nusc.get('sample', sample_token) + sample_annotation_tokens = sample['anns'] + + sample_boxes = [] + for sample_annotation_token in sample_annotation_tokens: + + sample_annotation = nusc.get('sample_annotation', sample_annotation_token) + if box_cls == DetectionMotionBox_modified: + # Get label name in detection task and filter unused labels. + if category_convert_type == 'detection_category': + detection_name = category_to_detection_name(sample_annotation['category_name']) + elif category_convert_type == 'motion_category': + detection_name = category_to_motion_name(sample_annotation['category_name']) + else: + raise NotImplementedError + if detection_name is None: + continue + # Get attribute_name. + attr_tokens = sample_annotation['attribute_tokens'] + attr_count = len(attr_tokens) + if attr_count == 0: + attribute_name = '' + elif attr_count == 1: + attribute_name = attribute_map[attr_tokens[0]] + else: + raise Exception('Error: GT annotations must not have more than one attribute!') + instance_token = nusc.get('sample_annotation', sample_annotation['token'])['instance_token'] + fut_traj_local = predict_helper.get_future_for_agent(instance_token, sample_token, seconds=6, in_agent_frame=True) + fut_traj_scence_centric = np.zeros((0,)) + if fut_traj_local.shape[0] > 0: + _, boxes, _ = nusc.get_sample_data(sample['data']['LIDAR_TOP'], selected_anntokens=[sample_annotation['token']]) + box = boxes[0] + trans = box.center + rot = Quaternion(matrix=box.rotation_matrix) + fut_traj_scence_centric = convert_local_coords_to_global(fut_traj_local, trans, rot) + + sample_boxes.append( + box_cls( + token=sample_annotation_token, + sample_token=sample_token, + translation=sample_annotation['translation'], + size=sample_annotation['size'], + rotation=sample_annotation['rotation'], + velocity=nusc.box_velocity(sample_annotation['token'])[:2], + num_pts=sample_annotation['num_lidar_pts'] + sample_annotation['num_radar_pts'], + detection_name=detection_name, + detection_score=-1.0, # GT samples do not have a score. + attribute_name=attribute_name, + visibility=sample_annotation['visibility_token'], + index=index_map[sample_token], + traj=fut_traj_scence_centric, + ) + ) + elif box_cls == TrackingBox: + assert False + else: + raise NotImplementedError('Error: Invalid box_cls %s!' % box_cls) + + all_annotations.add_boxes(sample_token, sample_boxes) + + if verbose: + print("Loaded ground truth annotations for {} samples.".format(len(all_annotations.sample_tokens))) + + return all_annotations + +def prediction_metrics(gt_box_match, pred_box): + pred_traj = np.array(pred_box.traj) + gt_traj_steps = gt_box_match.traj.reshape((-1, 2)) + valid_steps = gt_traj_steps.shape[0] + if valid_steps <= 0: + return np.array([0]), np.array([0]), 0 + nmodes = pred_traj.shape[0] + pred_steps = pred_traj.shape[1] + valid_mask = np.zeros((pred_steps, )) + gt_traj = np.zeros((pred_steps, 2)) + gt_traj[:valid_steps, :] = gt_traj_steps + valid_mask[: valid_steps] = 1 + pred_traj = torch.tensor(pred_traj[None]) + gt_traj = torch.tensor(gt_traj[None]) + valid_mask = torch.tensor(valid_mask[None]) + ade_err, inds = min_ade(pred_traj, gt_traj, 1 - valid_mask) + fde_err, inds = min_fde(pred_traj, gt_traj, 1 - valid_mask) + mr_err = miss_rate(pred_traj, gt_traj, 1 - valid_mask, dist_thresh=2) + return ade_err.numpy(), fde_err.numpy(), mr_err.numpy() + + +def accumulate(gt_boxes: EvalBoxes, + pred_boxes: EvalBoxes, + class_name: str, + dist_fcn: Callable, + dist_th: float, + verbose: bool = False) -> DetectionMotionMetricData: + """ + Average Precision over predefined different recall thresholds for a single distance threshold. + The recall/conf thresholds and other raw metrics will be used in secondary metrics. + :param gt_boxes: Maps every sample_token to a list of its sample_annotations. + :param pred_boxes: Maps every sample_token to a list of its sample_results. + :param class_name: Class to compute AP on. + :param dist_fcn: Distance function used to match detections and ground truths. + :param dist_th: Distance threshold for a match. + :param verbose: If true, print debug messages. + :return: (average_prec, metrics). The average precision value and raw data for a number of metrics. + """ + # --------------------------------------------- + # Organize input and initialize accumulators. + # --------------------------------------------- + + # Count the positives. + npos = len([1 for gt_box in gt_boxes.all if gt_box.detection_name == class_name]) + if verbose: + print("Found {} GT of class {} out of {} total across {} samples.". + format(npos, class_name, len(gt_boxes.all), len(gt_boxes.sample_tokens))) + + # For missing classes in the GT, return a data structure corresponding to no predictions. + if npos == 0: + return DetectionMotionMetricData.no_predictions(), 0, 0, 0 + + # Organize the predictions in a single list. + pred_boxes_list = [box for box in pred_boxes.all if box.detection_name == class_name] + pred_confs = [box.detection_score for box in pred_boxes_list] + + if verbose: + print("Found {} PRED of class {} out of {} total across {} samples.". + format(len(pred_confs), class_name, len(pred_boxes.all), len(pred_boxes.sample_tokens))) + + # Sort by confidence. + sortind = [i for (v, i) in sorted((v, i) for (i, v) in enumerate(pred_confs))][::-1] + + # Do the actual matching. + tp = [] # Accumulator of true positives. + fp = [] # Accumulator of false positives. + conf = [] # Accumulator of confidences. + + # match_data holds the extra metrics we calculate for each match. + match_data = {'trans_err': [], + 'vel_err': [], + 'scale_err': [], + 'orient_err': [], + 'attr_err': [], + 'conf': [], + 'min_ade_err': [], + 'min_fde_err': [], + 'miss_rate_err': []} + + # --------------------------------------------- + # Match and accumulate match data. + # --------------------------------------------- + + taken = set() # Initially no gt bounding box is matched. + for ind in sortind: + pred_box = pred_boxes_list[ind] + min_dist = np.inf + match_gt_idx = None + + for gt_idx, gt_box in enumerate(gt_boxes[pred_box.sample_token]): + + # Find closest match among ground truth boxes + if gt_box.detection_name == class_name and not (pred_box.sample_token, gt_idx) in taken: + this_distance = dist_fcn(gt_box, pred_box) + if this_distance < min_dist: + min_dist = this_distance + match_gt_idx = gt_idx + + # If the closest match is close enough according to threshold we have a match! + is_match = min_dist < dist_th + + if is_match: + taken.add((pred_box.sample_token, match_gt_idx)) + + # Update tp, fp and confs. + tp.append(1) + fp.append(0) + conf.append(pred_box.detection_score) + + # Since it is a match, update match data also. + gt_box_match = gt_boxes[pred_box.sample_token][match_gt_idx] + + match_data['trans_err'].append(center_distance(gt_box_match, pred_box)) + match_data['vel_err'].append(velocity_l2(gt_box_match, pred_box)) + match_data['scale_err'].append(1 - scale_iou(gt_box_match, pred_box)) + + # Barrier orientation is only determined up to 180 degree. (For cones orientation is discarded later) + period = np.pi if class_name == 'barrier' else 2 * np.pi + match_data['orient_err'].append(yaw_diff(gt_box_match, pred_box, period=period)) + + match_data['attr_err'].append(1 - attr_acc(gt_box_match, pred_box)) + minade, minfde, m_r = prediction_metrics(gt_box_match, pred_box) + + match_data['min_ade_err'].append(minade) + match_data['min_fde_err'].append(minfde) + match_data['miss_rate_err'].append(m_r) + match_data['conf'].append(pred_box.detection_score) + + else: + # No match. Mark this as a false positive. + tp.append(0) + fp.append(1) + conf.append(pred_box.detection_score) + + # Check if we have any matches. If not, just return a "no predictions" array. + if len(match_data['trans_err']) == 0: + return DetectionMotionMetricData.no_predictions(), 0, 0, 0 + + # --------------------------------------------- + # Calculate and interpolate precision and recall + # --------------------------------------------- + + # Accumulate. + N_tp = np.sum(tp) + N_fp = np.sum(fp) + tp = np.cumsum(tp).astype(float) + fp = np.cumsum(fp).astype(float) + conf = np.array(conf) + + + # Calculate precision and recall. + prec = tp / (fp + tp) + rec = tp / float(npos) + + rec_interp = np.linspace(0, 1, DetectionMotionMetricData.nelem) # 101 steps, from 0% to 100% recall. + prec = np.interp(rec_interp, rec, prec, right=0) + conf = np.interp(rec_interp, rec, conf, right=0) + rec = rec_interp + + # --------------------------------------------- + # Re-sample the match-data to match, prec, recall and conf. + # --------------------------------------------- + + for key in match_data.keys(): + if key == "conf": + continue # Confidence is used as reference to align with fp and tp. So skip in this step. + + else: + # For each match_data, we first calculate the accumulated mean. + tmp = cummean(np.array(match_data[key])) + + # Then interpolate based on the confidences. (Note reversing since np.interp needs increasing arrays) + match_data[key] = np.interp(conf[::-1], match_data['conf'][::-1], tmp[::-1])[::-1] + + # --------------------------------------------- + # Done. Instantiate MetricData and return + # --------------------------------------------- + return DetectionMotionMetricData(recall=rec, + precision=prec, + confidence=conf, + trans_err=match_data['trans_err'], + vel_err=match_data['vel_err'], + scale_err=match_data['scale_err'], + orient_err=match_data['orient_err'], + attr_err=match_data['attr_err'], + min_ade_err=match_data['min_ade_err'], + min_fde_err=match_data['min_fde_err'], + miss_rate_err=match_data['miss_rate_err'] + ), N_tp, N_fp, npos + + + +def accumulate_motion(gt_boxes: EvalBoxes, + pred_boxes: EvalBoxes, + class_name: str, + dist_fcn: Callable, + traj_fcn: Callable, + dist_th: float, + traj_dist_th: float, + verbose: bool = False, + final_step: float = 12) -> DetectionMotionMetricData: + """ + Average Precision over predefined different recall thresholds for a single distance threshold. + The recall/conf thresholds and other raw metrics will be used in secondary metrics. + :param gt_boxes: Maps every sample_token to a list of its sample_annotations. + :param pred_boxes: Maps every sample_token to a list of its sample_results. + :param class_name: Class to compute AP on. + :param dist_fcn: Distance function used to match detections and ground truths. + :param dist_th: Distance threshold for a match. + :param verbose: If true, print debug messages. + :return: (average_prec, metrics). The average precision value and raw data for a number of metrics. + """ + # --------------------------------------------- + # Organize input and initialize accumulators. + # --------------------------------------------- + + # Count the positives. + npos = len([1 for gt_box in gt_boxes.all if gt_box.detection_name == class_name]) + if verbose: + print("Found {} GT of class {} out of {} total across {} samples.". + format(npos, class_name, len(gt_boxes.all), len(gt_boxes.sample_tokens))) + + # For missing classes in the GT, return a data structure corresponding to no predictions. + if npos == 0: + return DetectionMotionMetricData.no_predictions(), 0, 0, 0 + + # + # Organize the predictions in a single list. + pred_boxes_list = [] + pred_confs = [] + + pred_boxes_list = [box for box in pred_boxes.all if box.detection_name == class_name] + pred_confs = [box.detection_score for box in pred_boxes_list] + # for box in pred_boxes.all: + # if box.detection_name == class_name: + # box.traj_scores = np.exp(box.traj_scores) + # for i in range(len(box.traj_scores)): + # box.traj_index = i + # pred_boxes_list.append(box) + # pred_confs = [box.detection_score * box.traj_scores[box.traj_index] for box in pred_boxes_list] + + if verbose: + print("Found {} PRED of class {} out of {} total across {} samples.". + format(len(pred_confs), class_name, len(pred_boxes.all), len(pred_boxes.sample_tokens))) + + # Sort by confidence. + sortind = [i for (v, i) in sorted((v, i) for (i, v) in enumerate(pred_confs))][::-1] + + # Do the actual matching. + tp = [] # Accumulator of true positives. + fp = [] # Accumulator of false positives. + conf = [] # Accumulator of confidences. + + # match_data holds the extra metrics we calculate for each match. + match_data = {'trans_err': [], + 'vel_err': [], + 'scale_err': [], + 'orient_err': [], + 'attr_err': [], + 'conf': [], + 'min_ade_err': [], + 'min_fde_err': [], + 'miss_rate_err': []} + + # --------------------------------------------- + # Match and accumulate match data. + # --------------------------------------------- + + taken = set() # Initially no gt bounding box is matched. + for ind in sortind: + pred_box = pred_boxes_list[ind] + min_dist = np.inf + match_gt_idx = None + + for gt_idx, gt_box in enumerate(gt_boxes[pred_box.sample_token]): + + # Find closest match among ground truth boxes + if gt_box.detection_name == class_name and not (pred_box.sample_token, gt_idx) in taken: + this_distance = dist_fcn(gt_box, pred_box) + if this_distance < min_dist: + min_dist = this_distance + match_gt_idx = gt_idx + fde_distance = traj_fcn(gt_box, pred_box, final_step) + # If the closest match is close enough according to threshold we have a match! + is_match = min_dist < dist_th and fde_distance < traj_dist_th + + if is_match: + taken.add((pred_box.sample_token, match_gt_idx)) + + # Update tp, fp and confs. + tp.append(1) + fp.append(0) + conf.append(pred_box.detection_score) + + # Since it is a match, update match data also. + gt_box_match = gt_boxes[pred_box.sample_token][match_gt_idx] + + match_data['trans_err'].append(center_distance(gt_box_match, pred_box)) + match_data['vel_err'].append(velocity_l2(gt_box_match, pred_box)) + match_data['scale_err'].append(1 - scale_iou(gt_box_match, pred_box)) + + # Barrier orientation is only determined up to 180 degree. (For cones orientation is discarded later) + period = np.pi if class_name == 'barrier' else 2 * np.pi + match_data['orient_err'].append(yaw_diff(gt_box_match, pred_box, period=period)) + + match_data['attr_err'].append(1 - attr_acc(gt_box_match, pred_box)) + minade, minfde, m_r = prediction_metrics(gt_box_match, pred_box) + + match_data['min_ade_err'].append(minade) + match_data['min_fde_err'].append(minfde) + match_data['miss_rate_err'].append(m_r) + match_data['conf'].append(pred_box.detection_score) + + else: + # No match. Mark this as a false positive. + tp.append(0) + fp.append(1) + conf.append(pred_box.detection_score) + # conf.append(pred_box.detection_score * pred_box.traj_scores[pred_box.traj_index]) + # + # Check if we have any matches. If not, just return a "no predictions" array. + if len(match_data['trans_err']) == 0: + return DetectionMotionMetricData.no_predictions(), 0, 0, 0 + + # --------------------------------------------- + # Calculate and interpolate precision and recall + # --------------------------------------------- + + # Accumulate. + N_tp = np.sum(tp) + N_fp = np.sum(fp) + tp = np.cumsum(tp).astype(float) + fp = np.cumsum(fp).astype(float) + conf = np.array(conf) + + # Calculate precision and recall. + prec = tp / (fp + tp) + rec = tp / float(npos) + + + + rec_interp = np.linspace(0, 1, DetectionMotionMetricData.nelem) # 101 steps, from 0% to 100% recall. + prec = np.interp(rec_interp, rec, prec, right=0) + conf = np.interp(rec_interp, rec, conf, right=0) + rec = rec_interp + + # --------------------------------------------- + # Re-sample the match-data to match, prec, recall and conf. + # --------------------------------------------- + + for key in match_data.keys(): + if key == "conf": + continue # Confidence is used as reference to align with fp and tp. So skip in this step. + + else: + # For each match_data, we first calculate the accumulated mean. + tmp = cummean(np.array(match_data[key])) + + # Then interpolate based on the confidences. (Note reversing since np.interp needs increasing arrays) + match_data[key] = np.interp(conf[::-1], match_data['conf'][::-1], tmp[::-1])[::-1] + + # --------------------------------------------- + # Done. Instantiate MetricData and return + # --------------------------------------------- + return DetectionMotionMetricData(recall=rec, + precision=prec, + confidence=conf, + trans_err=match_data['trans_err'], + vel_err=match_data['vel_err'], + scale_err=match_data['scale_err'], + orient_err=match_data['orient_err'], + attr_err=match_data['attr_err'], + min_ade_err=match_data['min_ade_err'], + min_fde_err=match_data['min_fde_err'], + miss_rate_err=match_data['miss_rate_err'] + ), N_tp, N_fp, npos \ No newline at end of file diff --git a/mmcv/datasets/eval_utils/map_api.py b/mmcv/datasets/eval_utils/map_api.py new file mode 100644 index 0000000..5f26e58 --- /dev/null +++ b/mmcv/datasets/eval_utils/map_api.py @@ -0,0 +1,2355 @@ +# nuScenes dev-kit. +# Code written by Sergi Adipraja Widjaja, 2019. +# + Map mask by Kiwoo Shin, 2019. +# + Methods operating on NuScenesMap and NuScenes by Holger Caesar, 2019. + +import json +import os +import random +from typing import Dict, List, Tuple, Optional, Union + +import cv2 +import math +import descartes +import matplotlib.gridspec as gridspec +import matplotlib.pyplot as plt +import numpy as np +from PIL import Image +from matplotlib.axes import Axes +from matplotlib.figure import Figure +from matplotlib.patches import Rectangle, Arrow +from mpl_toolkits.axes_grid1.inset_locator import mark_inset +from pyquaternion import Quaternion +from shapely import affinity +from shapely.geometry import Polygon, MultiPolygon, LineString, Point, box +from tqdm import tqdm + +from nuscenes.map_expansion.arcline_path_utils import discretize_lane, ArcLinePath +from nuscenes.map_expansion.bitmap import BitMap +from nuscenes.nuscenes import NuScenes +from nuscenes.utils.geometry_utils import view_points +from functools import partial + +# Recommended style to use as the plots will show grids. +plt.style.use('seaborn-whitegrid') + +# Define a map geometry type for polygons and lines. +Geometry = Union[Polygon, LineString] + +locations = ['singapore-onenorth', 'singapore-hollandvillage', 'singapore-queenstown', 'boston-seaport'] + + +class NuScenesMap: + """ + NuScenesMap database class for querying and retrieving information from the semantic maps. + Before using this class please use the provided tutorial `map_expansion_tutorial.ipynb`. + + Below you can find the map origins (south western corner, in [lat, lon]) for each of the 4 maps in nuScenes: + boston-seaport: [42.336849169438615, -71.05785369873047] + singapore-onenorth: [1.2882100868743724, 103.78475189208984] + singapore-hollandvillage: [1.2993652317780957, 103.78217697143555] + singapore-queenstown: [1.2782562240223188, 103.76741409301758] + + The dimensions of the maps are as follows ([width, height] in meters): + singapore-onenorth: [1585.6, 2025.0] + singapore-hollandvillage: [2808.3, 2922.9] + singapore-queenstown: [3228.6, 3687.1] + boston-seaport: [2979.5, 2118.1] + The rasterized semantic maps (e.g. singapore-onenorth.png) published with nuScenes v1.0 have a scale of 10px/m, + hence the above numbers are the image dimensions divided by 10. + + We use the same WGS 84 Web Mercator (EPSG:3857) projection as Google Maps/Earth. + """ + def __init__(self, + dataroot: str = '/data/sets/nuscenes', + map_name: str = 'singapore-onenorth'): + """ + Loads the layers, create reverse indices and shortcuts, initializes the explorer class. + :param dataroot: Path to the layers in the form of a .json file. + :param map_name: Which map out of `singapore-onenorth`, `singepore-hollandvillage`, `singapore-queenstown`, + `boston-seaport` that we want to load. + """ + assert map_name in locations, 'Error: Unknown map name %s!' % map_name + + self.dataroot = dataroot + self.map_name = map_name + + self.geometric_layers = ['polygon', 'line', 'node'] + + # These are the non-geometric layers which have polygons as the geometric descriptors. + self.non_geometric_polygon_layers = ['drivable_area', 'road_segment', 'road_block', 'lane', 'ped_crossing', + 'walkway', 'stop_line', 'carpark_area'] + + # We want to be able to search for lane connectors, but not render them. + self.lookup_polygon_layers = self.non_geometric_polygon_layers + ['lane_connector'] + + # These are the non-geometric layers which have line strings as the geometric descriptors. + self.non_geometric_line_layers = ['road_divider', 'lane_divider', 'traffic_light'] + self.non_geometric_layers = self.non_geometric_polygon_layers + self.non_geometric_line_layers + self.layer_names = self.geometric_layers + self.lookup_polygon_layers + self.non_geometric_line_layers + + # Load the selected map. + self.json_fname = os.path.join(self.dataroot, 'maps', 'expansion', '{}.json'.format(self.map_name)) + with open(self.json_fname, 'r') as fh: + self.json_obj = json.load(fh) + + # Parse the map version and print an error for deprecated maps. + if 'version' in self.json_obj: + self.version = self.json_obj['version'] + else: + self.version = '1.0' + if self.version < '1.3': + raise Exception('Error: You are using an outdated map version (%s)! ' + 'Please go to https://www.nuscenes.org/download to download the latest map!') + + self.canvas_edge = self.json_obj['canvas_edge'] + self._load_layers() + self._make_token2ind() + self._make_shortcuts() + + self.explorer = NuScenesMapExplorer(self) + + def _load_layer(self, layer_name: str) -> List[dict]: + """ + Returns a list of records corresponding to the layer name. + :param layer_name: Name of the layer that will be loaded. + :return: A list of records corresponding to a layer. + """ + return self.json_obj[layer_name] + + def _load_layer_dict(self, layer_name: str) -> Dict[str, Union[dict, list]]: + """ + Returns a dict of records corresponding to the layer name. + :param layer_name: Name of the layer that will be loaded. + :return: A dict of records corresponding to a layer. + """ + return self.json_obj[layer_name] + + def _load_layers(self) -> None: + """ Loads each available layer. """ + + # Explicit assignment of layers are necessary to help the IDE determine valid class members. + self.polygon = self._load_layer('polygon') + self.line = self._load_layer('line') + self.node = self._load_layer('node') + self.drivable_area = self._load_layer('drivable_area') + self.road_segment = self._load_layer('road_segment') + self.road_block = self._load_layer('road_block') + self.lane = self._load_layer('lane') + self.ped_crossing = self._load_layer('ped_crossing') + self.walkway = self._load_layer('walkway') + self.stop_line = self._load_layer('stop_line') + self.carpark_area = self._load_layer('carpark_area') + self.road_divider = self._load_layer('road_divider') + self.lane_divider = self._load_layer('lane_divider') + self.traffic_light = self._load_layer('traffic_light') + + self.arcline_path_3: Dict[str, List[dict]] = self._load_layer_dict('arcline_path_3') + self.connectivity: Dict[str, dict] = self._load_layer_dict('connectivity') + self.lane_connector = self._load_layer('lane_connector') + + def _make_token2ind(self) -> None: + """ Store the mapping from token to layer index for each layer. """ + self._token2ind = dict() + for layer_name in self.layer_names: + self._token2ind[layer_name] = dict() + + for ind, member in enumerate(getattr(self, layer_name)): + self._token2ind[layer_name][member['token']] = ind + + def _make_shortcuts(self) -> None: + """ Makes the record shortcuts. """ + + # Makes a shortcut between non geometric records to their nodes. + for layer_name in self.non_geometric_polygon_layers: + if layer_name == 'drivable_area': # Drivable area has more than one geometric representation. + pass + else: + for record in self.__dict__[layer_name]: + polygon_obj = self.get('polygon', record['polygon_token']) + record['exterior_node_tokens'] = polygon_obj['exterior_node_tokens'] + record['holes'] = polygon_obj['holes'] + + for layer_name in self.non_geometric_line_layers: + for record in self.__dict__[layer_name]: + record['node_tokens'] = self.get('line', record['line_token'])['node_tokens'] + + # Makes a shortcut between stop lines to their cues, there's different cues for different types of stop line. + # Refer to `_get_stop_line_cue()` for details. + for record in self.stop_line: + cue = self._get_stop_line_cue(record) + record['cue'] = cue + + # Makes a shortcut between lanes to their lane divider segment nodes. + for record in self.lane: + record['left_lane_divider_segment_nodes'] = [self.get('node', segment['node_token']) for segment in + record['left_lane_divider_segments']] + record['right_lane_divider_segment_nodes'] = [self.get('node', segment['node_token']) for segment in + record['right_lane_divider_segments']] + + def _get_stop_line_cue(self, stop_line_record: dict) -> List[dict]: + """ + Get the different cues for different types of stop lines. + :param stop_line_record: A single stop line record. + :return: The cue for that stop line. + """ + if stop_line_record['stop_line_type'] in ['PED_CROSSING', 'TURN_STOP']: + return [self.get('ped_crossing', token) for token in stop_line_record['ped_crossing_tokens']] + elif stop_line_record['stop_line_type'] in ['STOP_SIGN', 'YIELD']: + return [] + elif stop_line_record['stop_line_type'] == 'TRAFFIC_LIGHT': + return [self.get('traffic_light', token) for token in stop_line_record['traffic_light_tokens']] + + def get(self, layer_name: str, token: str) -> dict: + """ + Returns a record from the layer in constant runtime. + :param layer_name: Name of the layer that we are interested in. + :param token: Token of the record. + :return: A single layer record. + """ + assert layer_name in self.layer_names, "Layer {} not found".format(layer_name) + + return getattr(self, layer_name)[self.getind(layer_name, token)] + + def getind(self, layer_name: str, token: str) -> int: + """ + This returns the index of the record in a layer in constant runtime. + :param layer_name: Name of the layer we are interested in. + :param token: Token of the record. + :return: The index of the record in the layer, layer is an array. + """ + return self._token2ind[layer_name][token] + + def render_record(self, + layer_name: str, + token: str, + alpha: float = 0.5, + figsize: Tuple[float, float] = None, + other_layers: List[str] = None, + bitmap: Optional[BitMap] = None) -> Tuple[Figure, Tuple[Axes, Axes]]: + """ + Render a single map record. By default will also render 3 layers which are `drivable_area`, `lane`, + and `walkway` unless specified by `other_layers`. + :param layer_name: Name of the layer that we are interested in. + :param token: Token of the record that you want to render. + :param alpha: The opacity of each layer that gets rendered. + :param figsize: Size of the whole figure. + :param other_layers: What other layers to render aside from the one specified in `layer_name`. + :param bitmap: Optional BitMap object to render below the other map layers. + :return: The matplotlib figure and axes of the rendered layers. + """ + return self.explorer.render_record(layer_name, token, alpha, + figsize=figsize, other_layers=other_layers, bitmap=bitmap) + + def render_layers(self, + layer_names: List[str], + alpha: float = 0.5, + figsize: Union[None, float, Tuple[float, float]] = None, + tokens: List[str] = None, + bitmap: Optional[BitMap] = None) -> Tuple[Figure, Axes]: + """ + Render a list of layer names. + :param layer_names: A list of layer names. + :param alpha: The opacity of each layer that gets rendered. + :param figsize: Size of the whole figure. + :param tokens: Optional list of tokens to render. None means all tokens are rendered. + :param bitmap: Optional BitMap object to render below the other map layers. + :return: The matplotlib figure and axes of the rendered layers. + """ + return self.explorer.render_layers(layer_names, alpha, + figsize=figsize, tokens=tokens, bitmap=bitmap) + + def render_map_patch(self, + box_coords: Tuple[float, float, float, float], + layer_names: List[str] = None, + alpha: float = 0.5, + figsize: Tuple[int, int] = (15, 15), + render_egoposes_range: bool = True, + render_legend: bool = True, + bitmap: Optional[BitMap] = None) -> Tuple[Figure, Axes]: + """ + Renders a rectangular patch specified by `box_coords`. By default renders all layers. + :param box_coords: The rectangular patch coordinates (x_min, y_min, x_max, y_max). + :param layer_names: All the non geometric layers that we want to render. + :param alpha: The opacity of each layer. + :param figsize: Size of the whole figure. + :param render_egoposes_range: Whether to render a rectangle around all ego poses. + :param render_legend: Whether to render the legend of map layers. + :param bitmap: Optional BitMap object to render below the other map layers. + :return: The matplotlib figure and axes of the rendered layers. + """ + return self.explorer.render_map_patch(box_coords, layer_names=layer_names, alpha=alpha, figsize=figsize, + render_egoposes_range=render_egoposes_range, + render_legend=render_legend, bitmap=bitmap) + + def render_map_in_image(self, + nusc: NuScenes, + sample_token: str, + camera_channel: str = 'CAM_FRONT', + alpha: float = 0.3, + patch_radius: float = 10000, + min_polygon_area: float = 1000, + render_behind_cam: bool = True, + render_outside_im: bool = True, + layer_names: List[str] = None, + verbose: bool = True, + out_path: str = None) -> Tuple[Figure, Axes]: + """ + Render a nuScenes camera image and overlay the polygons for the specified map layers. + Note that the projections are not always accurate as the localization is in 2d. + :param nusc: The NuScenes instance to load the image from. + :param sample_token: The image's corresponding sample_token. + :param camera_channel: Camera channel name, e.g. 'CAM_FRONT'. + :param alpha: The transparency value of the layers to render in [0, 1]. + :param patch_radius: The radius in meters around the ego car in which to select map records. + :param min_polygon_area: Minimum area a polygon needs to have to be rendered. + :param render_behind_cam: Whether to render polygons where any point is behind the camera. + :param render_outside_im: Whether to render polygons where any point is outside the image. + :param layer_names: The names of the layers to render, e.g. ['lane']. + If set to None, the recommended setting will be used. + :param verbose: Whether to print to stdout. + :param out_path: Optional path to save the rendered figure to disk. + """ + return self.explorer.render_map_in_image( + nusc, sample_token, camera_channel=camera_channel, alpha=alpha, + patch_radius=patch_radius, min_polygon_area=min_polygon_area, + render_behind_cam=render_behind_cam, render_outside_im=render_outside_im, + layer_names=layer_names, verbose=verbose, out_path=out_path) + + def get_map_mask_in_image(self, + nusc: NuScenes, + sample_token: str, + camera_channel: str = 'CAM_FRONT', + alpha: float = 0.3, + patch_radius: float = 10000, + min_polygon_area: float = 1000, + render_behind_cam: bool = True, + render_outside_im: bool = True, + layer_names: List[str] = None, + verbose: bool = False, + out_path: str = None): + """ + Render a nuScenes camera image and overlay the polygons for the specified map layers. + Note that the projections are not always accurate as the localization is in 2d. + :param nusc: The NuScenes instance to load the image from. + :param sample_token: The image's corresponding sample_token. + :param camera_channel: Camera channel name, e.g. 'CAM_FRONT'. + :param alpha: The transparency value of the layers to render in [0, 1]. + :param patch_radius: The radius in meters around the ego car in which to select map records. + :param min_polygon_area: Minimum area a polygon needs to have to be rendered. + :param render_behind_cam: Whether to render polygons where any point is behind the camera. + :param render_outside_im: Whether to render polygons where any point is outside the image. + :param layer_names: The names of the layers to render, e.g. ['lane']. + If set to None, the recommended setting will be used. + :param verbose: Whether to print to stdout. + :param out_path: Optional path to save the rendered figure to disk. + """ + return self.explorer.get_map_mask_in_image( + nusc, sample_token, camera_channel=camera_channel, alpha=alpha, + patch_radius=patch_radius, min_polygon_area=min_polygon_area, + render_behind_cam=render_behind_cam, render_outside_im=render_outside_im, + layer_names=layer_names, verbose=verbose, out_path=out_path) + + def render_egoposes_on_fancy_map(self, + nusc: NuScenes, + scene_tokens: List = None, + verbose: bool = True, + out_path: str = None, + render_egoposes: bool = True, + render_egoposes_range: bool = True, + render_legend: bool = True, + bitmap: Optional[BitMap] = None) -> Tuple[np.ndarray, Figure, Axes]: + """ + Renders each ego pose of a list of scenes on the map (around 40 poses per scene). + This method is heavily inspired by NuScenes.render_egoposes_on_map(), but uses the map expansion pack maps. + :param nusc: The NuScenes instance to load the ego poses from. + :param scene_tokens: Optional list of scene tokens corresponding to the current map location. + :param verbose: Whether to show status messages and progress bar. + :param out_path: Optional path to save the rendered figure to disk. + :param render_egoposes: Whether to render ego poses. + :param render_egoposes_range: Whether to render a rectangle around all ego poses. + :param render_legend: Whether to render the legend of map layers. + :param bitmap: Optional BitMap object to render below the other map layers. + :return: . Returns a matrix with n ego poses in global map coordinates. + """ + return self.explorer.render_egoposes_on_fancy_map(nusc, scene_tokens=scene_tokens, + verbose=verbose, out_path=out_path, + render_egoposes=render_egoposes, + render_egoposes_range=render_egoposes_range, + render_legend=render_legend, bitmap=bitmap) + + def render_centerlines(self, + resolution_meters: float = 0.5, + figsize: Union[None, float, Tuple[float, float]] = None, + bitmap: Optional[BitMap] = None) -> Tuple[Figure, Axes]: + """ + Render the centerlines of all lanes and lane connectors. + :param resolution_meters: How finely to discretize the lane. Smaller values ensure curved + lanes are properly represented. + :param figsize: Size of the figure. + :param bitmap: Optional BitMap object to render below the other map layers. + """ + return self.explorer.render_centerlines(resolution_meters=resolution_meters, figsize=figsize, bitmap=bitmap) + + def render_map_mask(self, + patch_box: Tuple[float, float, float, float], + patch_angle: float, + layer_names: List[str] = None, + canvas_size: Tuple[int, int] = (100, 100), + figsize: Tuple[int, int] = (15, 15), + n_row: int = 2) -> Tuple[Figure, List[Axes]]: + """ + Render map mask of the patch specified by patch_box and patch_angle. + :param patch_box: Patch box defined as [x_center, y_center, height, width]. + :param patch_angle: Patch orientation in degrees. + :param layer_names: A list of layer names to be returned. + :param canvas_size: Size of the output mask (h, w). + :param figsize: Size of the figure. + :param n_row: Number of rows with plots. + :return: The matplotlib figure and a list of axes of the rendered layers. + """ + return self.explorer.render_map_mask(patch_box, patch_angle, + layer_names=layer_names, canvas_size=canvas_size, + figsize=figsize, n_row=n_row) + + def get_map_mask(self, + patch_box: Optional[Tuple[float, float, float, float]], + patch_angle: float, + layer_names: List[str] = None, + canvas_size: Optional[Tuple[int, int]] = (100, 100)) -> np.ndarray: + """ + Return list of map mask layers of the specified patch. + :param patch_box: Patch box defined as [x_center, y_center, height, width]. If None, this plots the entire map. + :param patch_angle: Patch orientation in degrees. North-facing corresponds to 0. + :param layer_names: A list of layer names to be extracted, or None for all non-geometric layers. + :param canvas_size: Size of the output mask (h, w). If None, we use the default resolution of 10px/m. + :return: Stacked numpy array of size [c x h x w] with c channels and the same width/height as the canvas. + """ + return self.explorer.get_map_mask(patch_box, patch_angle, layer_names=layer_names, canvas_size=canvas_size) + + def get_map_geom(self, + patch_box: Tuple[float, float, float, float], + patch_angle: float, + layer_names: List[str]) -> List[Tuple[str, List[Geometry]]]: + """ + Returns a list of geometries in the specified patch_box. + These are unscaled, but aligned with the patch angle. + :param patch_box: Patch box defined as [x_center, y_center, height, width]. + :param patch_angle: Patch orientation in degrees. + North-facing corresponds to 0. + :param layer_names: A list of layer names to be extracted, or None for all non-geometric layers. + :return: List of layer names and their corresponding geometries. + """ + return self.explorer.get_map_geom(patch_box, patch_angle, layer_names) + + def get_records_in_patch(self, + box_coords: Tuple[float, float, float, float], + layer_names: List[str] = None, + mode: str = 'intersect') -> Dict[str, List[str]]: + """ + Get all the record token that intersects or is within a particular rectangular patch. + :param box_coords: The rectangular patch coordinates (x_min, y_min, x_max, y_max). + :param layer_names: Names of the layers that we want to retrieve in a particular patch. By default will always + look at the all non geometric layers. + :param mode: "intersect" will return all non geometric records that intersects the patch, "within" will return + all non geometric records that are within the patch. + :return: Dictionary of layer_name - tokens pairs. + """ + return self.explorer.get_records_in_patch(box_coords, layer_names=layer_names, mode=mode) + + def is_record_in_patch(self, + layer_name: str, + token: str, + box_coords: Tuple[float, float, float, float], + mode: str = 'intersect') -> bool: + """ + Query whether a particular record is in a rectangular patch + :param layer_name: The layer name of the record. + :param token: The record token. + :param box_coords: The rectangular patch coordinates (x_min, y_min, x_max, y_max). + :param mode: "intersect" means it will return True if the geometric object intersects the patch, "within" will + return True if the geometric object is within the patch. + :return: Boolean value on whether a particular record intersects or within a particular patch. + """ + return self.explorer.is_record_in_patch(layer_name, token, box_coords, mode=mode) + + def layers_on_point(self, x: float, y: float, layer_names: List[str] = None) -> Dict[str, str]: + """ + Returns all the polygonal layers that a particular point is on. + :param x: x coordinate of the point of interest. + :param y: y coordinate of the point of interest. + :param layer_names: The names of the layers to search for. + :return: All the polygonal layers that a particular point is on. {: } + """ + return self.explorer.layers_on_point(x, y, layer_names=layer_names) + + def record_on_point(self, x: float, y: float, layer_name: str) -> str: + """ + Query what record of a layer a particular point is on. + :param x: x coordinate of the point of interest. + :param y: y coordinate of the point of interest. + :param layer_name: The non geometric polygonal layer name that we are interested in. + :return: The first token of a layer a particular point is on or '' if no layer is found. + """ + return self.explorer.record_on_point(x, y, layer_name) + + def extract_polygon(self, polygon_token: str) -> Polygon: + """ + Construct a shapely Polygon object out of a polygon token. + :param polygon_token: The token of the polygon record. + :return: The polygon wrapped in a shapely Polygon object. + """ + return self.explorer.extract_polygon(polygon_token) + + def extract_line(self, line_token: str) -> LineString: + """ + Construct a shapely LineString object out of a line token. + :param line_token: The token of the line record. + :return: The line wrapped in a LineString object. + """ + return self.explorer.extract_line(line_token) + + def get_bounds(self, layer_name: str, token: str) -> Tuple[float, float, float, float]: + """ + Get the bounds of the geometric object that corresponds to a non geometric record. + :param layer_name: Name of the layer that we are interested in. + :param token: Token of the record. + :return: min_x, min_y, max_x, max_y of of the line representation. + """ + return self.explorer.get_bounds(layer_name, token) + + def get_records_in_radius(self, x: float, y: float, radius: float, + layer_names: List[str], mode: str = 'intersect') -> Dict[str, List[str]]: + """ + Get all the record tokens that intersect a square patch of side length 2*radius centered on (x,y). + :param x: X-coordinate in global frame. + :param y: y-coordinate in global frame. + :param radius: All records within radius meters of point (x, y) will be returned. + :param layer_names: Names of the layers that we want to retrieve. By default will always + look at the all non geometric layers. + :param mode: "intersect" will return all non geometric records that intersects the patch, "within" will return + all non geometric records that are within the patch. + :return: Dictionary of layer_name - tokens pairs. + """ + + patch = (x - radius, y - radius, x + radius, y + radius) + return self.explorer.get_records_in_patch(patch, layer_names, mode=mode) + + def discretize_centerlines(self, resolution_meters: float) -> List[np.array]: + """ + Discretize the centerlines of lanes and lane connectors. + :param resolution_meters: How finely to discretize the lane. Smaller values ensure curved + lanes are properly represented. + :return: A list of np.arrays with x, y and z values for each point. + """ + pose_lists = [] + for lane in self.lane + self.lane_connector: + my_lane = self.arcline_path_3.get(lane['token'], []) + discretized = np.array(discretize_lane(my_lane, resolution_meters)) + pose_lists.append(discretized) + + return pose_lists + + def discretize_lanes(self, tokens: List[str], + resolution_meters: float) -> Dict[str, List[Tuple[float, float, float]]]: + """ + Discretizes a list of lane/lane connector tokens. + :param tokens: List of lane and/or lane connector record tokens. Can be retrieved with + get_records_in_radius or get_records_in_patch. + :param resolution_meters: How finely to discretize the splines. + :return: Mapping from lane/lane connector token to sequence of poses along the lane. + """ + + return {ID: discretize_lane(self.arcline_path_3.get(ID, []), resolution_meters) for ID in tokens} + + def _get_connected_lanes(self, lane_token: str, incoming_outgoing: str) -> List[str]: + """ + Helper for getting the lanes connected to a given lane + :param lane_token: Token for the lane. + :param incoming_outgoing: Whether to get incoming or outgoing lanes + :return: List of lane tokens this lane is connected to. + """ + + if lane_token not in self.connectivity: + raise ValueError(f"{lane_token} is not a valid lane.") + + return self.connectivity[lane_token][incoming_outgoing] + + def get_outgoing_lane_ids(self, lane_token: str) -> List[str]: + """ + Get the out-going lanes. + :param lane_token: Token for the lane. + :return: List of lane tokens that start at the end of this lane. + """ + + return self._get_connected_lanes(lane_token, 'outgoing') + + def get_incoming_lane_ids(self, lane_token: str) -> List[str]: + """ + Get the incoming lanes. + :param lane_token: Token for the lane. + :return: List of lane tokens that end at the start of this lane. + """ + + return self._get_connected_lanes(lane_token, 'incoming') + + def get_arcline_path(self, lane_token: str) -> List[ArcLinePath]: + """ + Get the arcline path representation for a lane. + Note: This function was previously called `get_lane()`, but renamed to avoid confusion between lanes and + arcline paths. + :param lane_token: Token for the lane. + :return: Arc line path representation of the lane. + """ + + arcline_path = self.arcline_path_3.get(lane_token) + if not arcline_path: + raise ValueError(f'Error: Lane with token {lane_token} does not have a valid arcline path!') + + return arcline_path + + def get_closest_lane(self, x: float, y: float, radius: float = 5) -> str: + """ + Get closest lane id within a radius of query point. The distance from a point (x, y) to a lane is + the minimum l2 distance from (x, y) to a point on the lane. + :param x: X coordinate in global coordinate frame. + :param y: Y Coordinate in global coordinate frame. + :param radius: Radius around point to consider. + :return: Lane id of closest lane within radius. + """ + + lanes = self.get_records_in_radius(x, y, radius, ['lane', 'lane_connector']) + lanes = lanes['lane'] + lanes['lane_connector'] + + discrete_points = self.discretize_lanes(lanes, 0.5) + + current_min = np.inf + + min_id = "" + for lane_id, points in discrete_points.items(): + + distance = np.linalg.norm(np.array(points)[:, :2] - [x, y], axis=1).min() + if distance <= current_min: + current_min = distance + min_id = lane_id + + return min_id + + def render_next_roads(self, + x: float, + y: float, + alpha: float = 0.5, + figsize: Union[None, float, Tuple[float, float]] = None, + bitmap: Optional[BitMap] = None) -> Tuple[Figure, Axes]: + """ + Renders the possible next roads from a point of interest. + :param x: x coordinate of the point of interest. + :param y: y coordinate of the point of interest. + :param alpha: The opacity of each layer that gets rendered. + :param figsize: Size of the whole figure. + :param bitmap: Optional BitMap object to render below the other map layers. + """ + return self.explorer.render_next_roads(x, y, alpha, figsize=figsize, bitmap=bitmap) + + def get_next_roads(self, x: float, y: float) -> Dict[str, List[str]]: + """ + Get the possible next roads from a point of interest. + Returns road_segment, road_block and lane. + :param x: x coordinate of the point of interest. + :param y: y coordinate of the point of interest. + :return: Dictionary of layer_name - tokens pairs. + """ + # Filter out irrelevant layers. + road_layers = ['road_segment', 'road_block', 'lane'] + layers = self.explorer.layers_on_point(x, y) + rel_layers = {layer: layers[layer] for layer in road_layers} + + # Pick most fine-grained road layer (lane, road_block, road_segment) object that contains the point. + rel_layer = None + rel_token = None + for layer in road_layers[::-1]: + if rel_layers[layer] != '': + rel_layer = layer + rel_token = rel_layers[layer] + break + assert rel_layer is not None, 'Error: No suitable layer in the specified point location!' + + # Get all records that overlap with the bounding box of the selected road. + box_coords = self.explorer.get_bounds(rel_layer, rel_token) + intersect = self.explorer.get_records_in_patch(box_coords, road_layers, mode='intersect') + + # Go through all objects within the bounding box. + result = {layer: [] for layer in road_layers} + if rel_layer == 'road_segment': + # For road segments, we do not have a direction. + # Return objects that have ANY exterior points in common with the relevant layer. + rel_exterior_nodes = self.get(rel_layer, rel_token)['exterior_node_tokens'] + for layer in road_layers: + for token in intersect[layer]: + exterior_nodes = self.get(layer, token)['exterior_node_tokens'] + if any(n in exterior_nodes for n in rel_exterior_nodes) \ + and token != rel_layers[layer]: + result[layer].append(token) + else: + # For lanes and road blocks, the next road is indicated by the edge line. + # Return objects where ALL edge line nodes are included in the exterior nodes. + to_edge_line = self.get(rel_layer, rel_token)['to_edge_line_token'] + to_edge_nodes = self.get('line', to_edge_line)['node_tokens'] + for layer in road_layers: + for token in intersect[layer]: + exterior_nodes = self.get(layer, token)['exterior_node_tokens'] + if all(n in exterior_nodes for n in to_edge_nodes) \ + and token != rel_layers[layer]: + result[layer].append(token) + return result + + +class NuScenesMapExplorer: + """ Helper class to explore the nuScenes map data. """ + def __init__(self, + map_api: NuScenesMap, + representative_layers: Tuple[str] = ('drivable_area', 'lane', 'walkway'), + color_map: dict = None): + """ + :param map_api: NuScenesMap database class. + :param representative_layers: These are the layers that we feel are representative of the whole mapping data. + :param color_map: Color map. + """ + # Mutable default argument. + if color_map is None: + color_map = dict(drivable_area='#a6cee3', + road_segment='#1f78b4', + road_block='#b2df8a', + lane='#33a02c', + ped_crossing='#fb9a99', + walkway='#e31a1c', + stop_line='#fdbf6f', + carpark_area='#ff7f00', + road_divider='#cab2d6', + lane_divider='#6a3d9a', + traffic_light='#7e772e') + + self.map_api = map_api + self.representative_layers = representative_layers + self.color_map = color_map + + self.canvas_max_x = self.map_api.canvas_edge[0] + self.canvas_min_x = 0 + self.canvas_max_y = self.map_api.canvas_edge[1] + self.canvas_min_y = 0 + self.canvas_aspect_ratio = (self.canvas_max_x - self.canvas_min_x) / (self.canvas_max_y - self.canvas_min_y) + + def render_centerlines(self, + resolution_meters: float, + figsize: Union[None, float, Tuple[float, float]] = None, + bitmap: Optional[BitMap] = None) -> Tuple[Figure, Axes]: + """ + Render the centerlines of all lanes and lane connectors. + :param resolution_meters: How finely to discretize the lane. Smaller values ensure curved + lanes are properly represented. + :param figsize: Size of the figure. + :param bitmap: Optional BitMap object to render below the other map layers. + """ + # Discretize all lanes and lane connectors. + pose_lists = self.map_api.discretize_centerlines(resolution_meters) + + # Render connectivity lines. + fig = plt.figure(figsize=self._get_figsize(figsize)) + ax = fig.add_axes([0, 0, 1, 1 / self.canvas_aspect_ratio]) + + if bitmap is not None: + bitmap.render(self.map_api.canvas_edge, ax) + + for pose_list in pose_lists: + if len(pose_list) > 0: + plt.plot(pose_list[:, 0], pose_list[:, 1]) + + return fig, ax + + def render_map_mask(self, + patch_box: Tuple[float, float, float, float], + patch_angle: float, + layer_names: List[str], + canvas_size: Tuple[int, int], + figsize: Tuple[int, int], + n_row: int = 2) -> Tuple[Figure, List[Axes]]: + """ + Render map mask of the patch specified by patch_box and patch_angle. + :param patch_box: Patch box defined as [x_center, y_center, height, width]. + :param patch_angle: Patch orientation in degrees. + :param layer_names: A list of layer names to be extracted. + :param canvas_size: Size of the output mask (h, w). + :param figsize: Size of the figure. + :param n_row: Number of rows with plots. + :return: The matplotlib figure and a list of axes of the rendered layers. + """ + if layer_names is None: + layer_names = self.map_api.non_geometric_layers + + map_mask = self.get_map_mask(patch_box, patch_angle, layer_names, canvas_size) + + # If no canvas_size is specified, retrieve the default from the output of get_map_mask. + if canvas_size is None: + canvas_size = map_mask.shape[1:] + + fig = plt.figure(figsize=figsize) + ax = fig.add_axes([0, 0, 1, 1]) + ax.set_xlim(0, canvas_size[1]) + ax.set_ylim(0, canvas_size[0]) + + n_col = len(map_mask) // n_row + gs = gridspec.GridSpec(n_row, n_col) + gs.update(wspace=0.025, hspace=0.05) + for i in range(len(map_mask)): + r = i // n_col + c = i - r * n_col + subax = plt.subplot(gs[r, c]) + subax.imshow(map_mask[i], origin='lower') + subax.text(canvas_size[0] * 0.5, canvas_size[1] * 1.1, layer_names[i]) + subax.grid(False) + + return fig, fig.axes + + def get_map_geom(self, + patch_box: Tuple[float, float, float, float], + patch_angle: float, + layer_names: List[str]) -> List[Tuple[str, List[Geometry]]]: + """ + Returns a list of geometries in the specified patch_box. + These are unscaled, but aligned with the patch angle. + :param patch_box: Patch box defined as [x_center, y_center, height, width]. + :param patch_angle: Patch orientation in degrees. + North-facing corresponds to 0. + :param layer_names: A list of layer names to be extracted, or None for all non-geometric layers. + :return: List of layer names and their corresponding geometries. + """ + # If None, return all geometric layers. + if layer_names is None: + layer_names = self.map_api.non_geometric_layers + + # Get each layer name and geometry and store them in a list. + map_geom = [] + for layer_name in layer_names: + layer_geom = self._get_layer_geom(patch_box, patch_angle, layer_name) + if layer_geom is None: + continue + map_geom.append((layer_name, layer_geom)) + + return map_geom + + def map_geom_to_mask(self, + map_geom: List[Tuple[str, List[Geometry]]], + local_box: Tuple[float, float, float, float], + canvas_size: Tuple[int, int]) -> np.ndarray: + """ + Return list of map mask layers of the specified patch. + :param map_geom: List of layer names and their corresponding geometries. + :param local_box: The local patch box defined as (x_center, y_center, height, width), where typically + x_center = y_center = 0. + :param canvas_size: Size of the output mask (h, w). + :return: Stacked numpy array of size [c x h x w] with c channels and the same height/width as the canvas. + """ + # Get each layer mask and stack them into a numpy tensor. + map_mask = [] + for layer_name, layer_geom in map_geom: + layer_mask = self._layer_geom_to_mask(layer_name, layer_geom, local_box, canvas_size) + if layer_mask is not None: + map_mask.append(layer_mask) + + return np.array(map_mask) + + def get_map_mask(self, + patch_box: Optional[Tuple[float, float, float, float]], + patch_angle: float, + layer_names: List[str] = None, + canvas_size: Tuple[int, int] = (100, 100)) -> np.ndarray: + """ + Return list of map mask layers of the specified patch. + :param patch_box: Patch box defined as [x_center, y_center, height, width]. If None, this plots the entire map. + :param patch_angle: Patch orientation in degrees. North-facing corresponds to 0. + :param layer_names: A list of layer names to be extracted, or None for all non-geometric layers. + :param canvas_size: Size of the output mask (h, w). If None, we use the default resolution of 10px/m. + :return: Stacked numpy array of size [c x h x w] with c channels and the same width/height as the canvas. + """ + # For some combination of parameters, we need to know the size of the current map. + if self.map_api.map_name == 'singapore-onenorth': + map_dims = [1585.6, 2025.0] + elif self.map_api.map_name == 'singapore-hollandvillage': + map_dims = [2808.3, 2922.9] + elif self.map_api.map_name == 'singapore-queenstown': + map_dims = [3228.6, 3687.1] + elif self.map_api.map_name == 'boston-seaport': + map_dims = [2979.5, 2118.1] + else: + raise Exception('Error: Invalid map!') + + # If None, return the entire map. + if patch_box is None: + patch_box = [map_dims[0] / 2, map_dims[1] / 2, map_dims[1], map_dims[0]] + + # If None, return all geometric layers. + if layer_names is None: + layer_names = self.map_api.non_geometric_layers + + # If None, return the specified patch in the original scale of 10px/m. + if canvas_size is None: + map_scale = 10 + canvas_size = np.array((patch_box[2], patch_box[3])) * map_scale + canvas_size = tuple(np.round(canvas_size).astype(np.int32)) + + # Get geometry of each layer. + map_geom = self.get_map_geom(patch_box, patch_angle, layer_names) + + # Convert geometry of each layer into mask and stack them into a numpy tensor. + # Convert the patch box from global coordinates to local coordinates by setting the center to (0, 0). + local_box = (0.0, 0.0, patch_box[2], patch_box[3]) + map_mask = self.map_geom_to_mask(map_geom, local_box, canvas_size) + assert np.all(map_mask.shape[1:] == canvas_size) + + return map_mask + + def render_record(self, + layer_name: str, + token: str, + alpha: float = 0.5, + figsize: Union[None, float, Tuple[float, float]] = None, + other_layers: List[str] = None, + bitmap: Optional[BitMap] = None) -> Tuple[Figure, Tuple[Axes, Axes]]: + """ + Render a single map record. + By default will also render 3 layers which are `drivable_area`, `lane`, and `walkway` unless specified by + `other_layers`. + :param layer_name: Name of the layer that we are interested in. + :param token: Token of the record that you want to render. + :param alpha: The opacity of each layer that gets rendered. + :param figsize: Size of the whole figure. + :param other_layers: What other layers to render aside from the one specified in `layer_name`. + :param bitmap: Optional BitMap object to render below the other map layers. + :return: The matplotlib figure and axes of the rendered layers. + """ + if other_layers is None: + other_layers = list(self.representative_layers) + + for other_layer in other_layers: + if other_layer not in self.map_api.non_geometric_layers: + raise ValueError("{} is not a non geometric layer".format(layer_name)) + + x1, y1, x2, y2 = self.map_api.get_bounds(layer_name, token) + + local_width = x2 - x1 + local_height = y2 - y1 + assert local_height > 0, 'Error: Map has 0 height!' + local_aspect_ratio = local_width / local_height + + # We obtained the values 0.65 and 0.66 by trials. + fig = plt.figure(figsize=self._get_figsize(figsize)) + global_ax = fig.add_axes([0, 0, 0.65, 0.65 / self.canvas_aspect_ratio]) + local_ax = fig.add_axes([0.66, 0.66 / self.canvas_aspect_ratio, 0.34, 0.34 / local_aspect_ratio]) + + # To make sure the sequence of the layer overlays is always consistent after typesetting set(). + random.seed('nutonomy') + + if bitmap is not None: + bitmap.render(self.map_api.canvas_edge, global_ax) + bitmap.render(self.map_api.canvas_edge, local_ax) + + layer_names = other_layers + [layer_name] + layer_names = list(set(layer_names)) + + for layer in layer_names: + self._render_layer(global_ax, layer, alpha) + + for layer in layer_names: + self._render_layer(local_ax, layer, alpha) + + if layer_name == 'drivable_area': + # Bad output aesthetically if we add spacing between the objects and the axes for drivable area. + local_ax_xlim = (x1, x2) + local_ax_ylim = (y1, y2) + else: + # Add some spacing between the object and the axes. + local_ax_xlim = (x1 - local_width / 3, x2 + local_width / 3) + local_ax_ylim = (y1 - local_height / 3, y2 + local_height / 3) + + # Draws the rectangular patch on the local_ax. + local_ax.add_patch(Rectangle((x1, y1), local_width, local_height, linestyle='-.', color='red', fill=False, + lw=2)) + + local_ax.set_xlim(*local_ax_xlim) + local_ax.set_ylim(*local_ax_ylim) + local_ax.set_title('Local View') + + global_ax.set_xlim(self.canvas_min_x, self.canvas_max_x) + global_ax.set_ylim(self.canvas_min_y, self.canvas_max_y) + global_ax.set_title('Global View') + global_ax.legend() + + # Adds the zoomed in effect to the plot. + mark_inset(global_ax, local_ax, loc1=2, loc2=4) + + return fig, (global_ax, local_ax) + + def render_layers(self, + layer_names: List[str], + alpha: float, + figsize: Union[None, float, Tuple[float, float]], + tokens: List[str] = None, + bitmap: Optional[BitMap] = None) -> Tuple[Figure, Axes]: + """ + Render a list of layers. + :param layer_names: A list of layer names. + :param alpha: The opacity of each layer. + :param figsize: Size of the whole figure. + :param tokens: Optional list of tokens to render. None means all tokens are rendered. + :param bitmap: Optional BitMap object to render below the other map layers. + :return: The matplotlib figure and axes of the rendered layers. + """ + fig = plt.figure(figsize=self._get_figsize(figsize)) + ax = fig.add_axes([0, 0, 1, 1 / self.canvas_aspect_ratio]) + + ax.set_xlim(self.canvas_min_x, self.canvas_max_x) + ax.set_ylim(self.canvas_min_y, self.canvas_max_y) + + if bitmap is not None: + bitmap.render(self.map_api.canvas_edge, ax) + + layer_names = list(set(layer_names)) + for layer_name in layer_names: + self._render_layer(ax, layer_name, alpha, tokens) + + ax.legend() + + return fig, ax + + def render_map_patch(self, + box_coords: Tuple[float, float, float, float], + layer_names: List[str] = None, + alpha: float = 0.5, + figsize: Tuple[float, float] = (15, 15), + render_egoposes_range: bool = True, + render_legend: bool = True, + bitmap: Optional[BitMap] = None) -> Tuple[Figure, Axes]: + """ + Renders a rectangular patch specified by `box_coords`. By default renders all layers. + :param box_coords: The rectangular patch coordinates (x_min, y_min, x_max, y_max). + :param layer_names: All the non geometric layers that we want to render. + :param alpha: The opacity of each layer. + :param figsize: Size of the whole figure. + :param render_egoposes_range: Whether to render a rectangle around all ego poses. + :param render_legend: Whether to render the legend of map layers. + :param bitmap: Optional BitMap object to render below the other map layers. + :return: The matplotlib figure and axes of the rendered layers. + """ + x_min, y_min, x_max, y_max = box_coords + + if layer_names is None: + layer_names = self.map_api.non_geometric_layers + + fig = plt.figure(figsize=figsize) + + local_width = x_max - x_min + local_height = y_max - y_min + assert local_height > 0, 'Error: Map patch has 0 height!' + local_aspect_ratio = local_width / local_height + + ax = fig.add_axes([0, 0, 1, 1 / local_aspect_ratio]) + + if bitmap is not None: + bitmap.render(self.map_api.canvas_edge, ax) + + for layer_name in layer_names: + self._render_layer(ax, layer_name, alpha) + + x_margin = np.minimum(local_width / 4, 50) + y_margin = np.minimum(local_height / 4, 10) + ax.set_xlim(x_min - x_margin, x_max + x_margin) + ax.set_ylim(y_min - y_margin, y_max + y_margin) + + if render_egoposes_range: + ax.add_patch(Rectangle((x_min, y_min), local_width, local_height, fill=False, linestyle='-.', color='red', + lw=2)) + ax.text(x_min + local_width / 100, y_min + local_height / 2, "%g m" % local_height, + fontsize=14, weight='bold') + ax.text(x_min + local_width / 2, y_min + local_height / 100, "%g m" % local_width, + fontsize=14, weight='bold') + + if render_legend: + ax.legend(frameon=True, loc='upper right') + + return fig, ax + + def render_map_in_image(self, + nusc: NuScenes, + sample_token: str, + camera_channel: str = 'CAM_FRONT', + alpha: float = 0.3, + patch_radius: float = 10000, + min_polygon_area: float = 1000, + render_behind_cam: bool = True, + render_outside_im: bool = True, + layer_names: List[str] = None, + verbose: bool = True, + out_path: str = None) -> Tuple[Figure, Axes]: + """ + Render a nuScenes camera image and overlay the polygons for the specified map layers. + Note that the projections are not always accurate as the localization is in 2d. + :param nusc: The NuScenes instance to load the image from. + :param sample_token: The image's corresponding sample_token. + :param camera_channel: Camera channel name, e.g. 'CAM_FRONT'. + :param alpha: The transparency value of the layers to render in [0, 1]. + :param patch_radius: The radius in meters around the ego car in which to select map records. + :param min_polygon_area: Minimum area a polygon needs to have to be rendered. + :param render_behind_cam: Whether to render polygons where any point is behind the camera. + :param render_outside_im: Whether to render polygons where any point is outside the image. + :param layer_names: The names of the layers to render, e.g. ['lane']. + If set to None, the recommended setting will be used. + :param verbose: Whether to print to stdout. + :param out_path: Optional path to save the rendered figure to disk. + """ + near_plane = 1e-8 + + if verbose: + print('Warning: Note that the projections are not always accurate as the localization is in 2d.') + + # Default layers. + if layer_names is None: + layer_names = ['road_segment', 'lane', 'ped_crossing', 'walkway', 'stop_line', 'carpark_area'] + + # Check layers whether we can render them. + for layer_name in layer_names: + assert layer_name in self.map_api.non_geometric_polygon_layers, \ + 'Error: Can only render non-geometry polygons: %s' % layer_names + + # Check that NuScenesMap was loaded for the correct location. + sample_record = nusc.get('sample', sample_token) + scene_record = nusc.get('scene', sample_record['scene_token']) + log_record = nusc.get('log', scene_record['log_token']) + log_location = log_record['location'] + assert self.map_api.map_name == log_location, \ + 'Error: NuScenesMap loaded for location %s, should be %s!' % (self.map_api.map_name, log_location) + + # Grab the front camera image and intrinsics. + cam_token = sample_record['data'][camera_channel] + cam_record = nusc.get('sample_data', cam_token) + cam_path = nusc.get_sample_data_path(cam_token) + im = Image.open(cam_path) + im_size = im.size + cs_record = nusc.get('calibrated_sensor', cam_record['calibrated_sensor_token']) + cam_intrinsic = np.array(cs_record['camera_intrinsic']) + + # Retrieve the current map. + poserecord = nusc.get('ego_pose', cam_record['ego_pose_token']) + ego_pose = poserecord['translation'] + box_coords = ( + ego_pose[0] - patch_radius, + ego_pose[1] - patch_radius, + ego_pose[0] + patch_radius, + ego_pose[1] + patch_radius, + ) + records_in_patch = self.get_records_in_patch(box_coords, layer_names, 'intersect') + + # Init axes. + fig = plt.figure(figsize=(9, 16)) + ax = fig.add_axes([0, 0, 1, 1]) + ax.set_xlim(0, im_size[0]) + ax.set_ylim(0, im_size[1]) + ax.imshow(im) + + # Retrieve and render each record. + for layer_name in layer_names: + for token in records_in_patch[layer_name]: + record = self.map_api.get(layer_name, token) + if layer_name == 'drivable_area': + polygon_tokens = record['polygon_tokens'] + else: + polygon_tokens = [record['polygon_token']] + + for polygon_token in polygon_tokens: + polygon = self.map_api.extract_polygon(polygon_token) + + # Convert polygon nodes to pointcloud with 0 height. + points = np.array(polygon.exterior.xy) + points = np.vstack((points, np.zeros((1, points.shape[1])))) + + # Transform into the ego vehicle frame for the timestamp of the image. + points = points - np.array(poserecord['translation']).reshape((-1, 1)) + points = np.dot(Quaternion(poserecord['rotation']).rotation_matrix.T, points) + + # Transform into the camera. + points = points - np.array(cs_record['translation']).reshape((-1, 1)) + points = np.dot(Quaternion(cs_record['rotation']).rotation_matrix.T, points) + + # Remove points that are partially behind the camera. + depths = points[2, :] + behind = depths < near_plane + if np.all(behind): + continue + + if render_behind_cam: + # Perform clipping on polygons that are partially behind the camera. + points = NuScenesMapExplorer._clip_points_behind_camera(points, near_plane) + elif np.any(behind): + # Otherwise ignore any polygon that is partially behind the camera. + continue + + # Ignore polygons with less than 3 points after clipping. + if len(points) == 0 or points.shape[1] < 3: + continue + + # Take the actual picture (matrix multiplication with camera-matrix + renormalization). + points = view_points(points, cam_intrinsic, normalize=True) + + # Skip polygons where all points are outside the image. + # Leave a margin of 1 pixel for aesthetic reasons. + inside = np.ones(points.shape[1], dtype=bool) + inside = np.logical_and(inside, points[0, :] > 1) + inside = np.logical_and(inside, points[0, :] < im.size[0] - 1) + inside = np.logical_and(inside, points[1, :] > 1) + inside = np.logical_and(inside, points[1, :] < im.size[1] - 1) + if render_outside_im: + if np.all(np.logical_not(inside)): + continue + else: + if np.any(np.logical_not(inside)): + continue + + points = points[:2, :] + points = [(p0, p1) for (p0, p1) in zip(points[0], points[1])] + polygon_proj = Polygon(points) + + # Filter small polygons + if polygon_proj.area < min_polygon_area: + continue + + label = layer_name + ax.add_patch(descartes.PolygonPatch(polygon_proj, fc=self.color_map[layer_name], alpha=alpha, + label=label)) + + # Display the image. + plt.axis('off') + ax.invert_yaxis() + + if out_path is not None: + plt.tight_layout() + plt.savefig(out_path, bbox_inches='tight', pad_inches=0) + + return fig, ax + + @staticmethod + def points_transform(points, poserecord, cs_record, cam_intrinsic, im_size, near_plane=1e-8, + render_behind_cam=True, render_outside_im=True): + points = np.vstack((points, np.zeros((1, points.shape[1])))) + + # Transform into the ego vehicle frame for the timestamp of the image. + points = points - np.array(poserecord['translation']).reshape((-1, 1)) + points = np.dot(Quaternion(poserecord['rotation']).rotation_matrix.T, points) + + # Transform into the camera. + points = points - np.array(cs_record['translation']).reshape((-1, 1)) + points = np.dot(Quaternion(cs_record['rotation']).rotation_matrix.T, points) + + # Remove points that are partially behind the camera. + depths = points[2, :] + behind = depths < near_plane + if np.all(behind): + return None + + if render_behind_cam: + # Perform clipping on polygons that are partially behind the camera. + points = NuScenesMapExplorer._clip_points_behind_camera(points, near_plane) + + elif np.any(behind): + # Otherwise ignore any polygon that is partially behind the camera. + return None + + # Take the actual picture (matrix multiplication with camera-matrix + renormalization). + points = view_points(points, cam_intrinsic, normalize=True) + + # Skip polygons where all points are outside the image. + # Leave a margin of 1 pixel for aesthetic reasons. + inside = np.ones(points.shape[1], dtype=bool) + inside = np.logical_and(inside, points[0, :] > 1) + inside = np.logical_and(inside, points[0, :] < im_size[0] - 1) + inside = np.logical_and(inside, points[1, :] > 1) + inside = np.logical_and(inside, points[1, :] < im_size[1] - 1) + + if render_outside_im: + if np.all(np.logical_not(inside)): + return None + else: + if np.any(np.logical_not(inside)): + return None + + # points = points[:, inside] + + # Ignore polygons with less than 3 points after clipping. + if len(points) == 0 or points.shape[1] < 3: + return None + + points = points[:2, :] + points = [(p0, p1) for (p0, p1) in zip(points[0], points[1])] + return points + + def get_map_mask_in_image(self, + nusc: NuScenes, + sample_token: str, + camera_channel: str = 'CAM_FRONT', + alpha: float = 0.3, + patch_radius: float = 10000, + min_polygon_area: float = 1000, + render_behind_cam: bool = True, + render_outside_im: bool = True, + layer_names: List[str] = None, + verbose: bool = False, + out_path: str = None) -> np.ndarray: + """ + Render a nuScenes camera image and overlay the polygons for the specified map layers. + Note that the projections are not always accurate as the localization is in 2d. + :param nusc: The NuScenes instance to load the image from. + :param sample_token: The image's corresponding sample_token. + :param camera_channel: Camera channel name, e.g. 'CAM_FRONT'. + :param alpha: The transparency value of the layers to render in [0, 1]. + :param patch_radius: The radius in meters around the ego car in which to select map records. + :param min_polygon_area: Minimum area a polygon needs to have to be rendered. + :param render_behind_cam: Whether to render polygons where any point is behind the camera. + :param render_outside_im: Whether to render polygons where any point is outside the image. + :param layer_names: The names of the layers to render, e.g. ['lane']. + If set to None, the recommended setting will be used. + :param verbose: Whether to print to stdout. + :param out_path: Optional path to save the rendered figure to disk. + """ + near_plane = 1e-8 + if verbose: + print('Warning: Note that the projections are not always accurate as the localization is in 2d.') + + # Default layers. + if layer_names is None: + layer_names = ['road_segment', 'lane', 'ped_crossing', 'walkway', 'stop_line', 'carpark_area'] + + # # Check layers whether we can render them. + # for layer_name in layer_names: + # assert layer_name in self.map_api.non_geometric_polygon_layers, \ + # 'Error: Can only render non-geometry polygons: %s' % layer_names + + # Check that NuScenesMap was loaded for the correct location. + sample_record = nusc.get('sample', sample_token) + scene_record = nusc.get('scene', sample_record['scene_token']) + log_record = nusc.get('log', scene_record['log_token']) + log_location = log_record['location'] + assert self.map_api.map_name == log_location, \ + 'Error: NuScenesMap loaded for location %s, should be %s!' % (self.map_api.map_name, log_location) + + # Grab the front camera image and intrinsics. + cam_token = sample_record['data'][camera_channel] + cam_record = nusc.get('sample_data', cam_token) + cam_path = nusc.get_sample_data_path(cam_token) + im = Image.open(cam_path) + im_size = im.size + cs_record = nusc.get('calibrated_sensor', cam_record['calibrated_sensor_token']) + cam_intrinsic = np.array(cs_record['camera_intrinsic']) + + # Retrieve the current map. + poserecord = nusc.get('ego_pose', cam_record['ego_pose_token']) + ego_pose = poserecord['translation'] + box_coords = ( + ego_pose[0] - patch_radius, + ego_pose[1] - patch_radius, + ego_pose[0] + patch_radius, + ego_pose[1] + patch_radius, + ) + records_in_patch = self.get_records_in_patch(box_coords, layer_names, 'intersect') + + if out_path is not None: + # Init axes. + fig = plt.figure(figsize=(9, 16)) + ax = fig.add_axes([0, 0, 1, 1]) + ax.set_xlim(0, im_size[0]) + ax.set_ylim(0, im_size[1]) + ax.imshow(im) + + points_transform = partial(self.points_transform, poserecord=poserecord, cs_record=cs_record, + cam_intrinsic=cam_intrinsic, near_plane=near_plane, im_size=im_size, + render_behind_cam=render_behind_cam, render_outside_im=render_outside_im) + + # Retrieve and render each record. + map_geom = [] + for layer_name in layer_names: + if layer_name in self.map_api.non_geometric_line_layers: + line_list = [] + for token in records_in_patch[layer_name]: + record = self.map_api.get(layer_name, token) + line = self.map_api.extract_line(record['line_token']) + if line.is_empty: # Skip lines without nodes. + continue + points = np.array(line.xy) + points = points_transform(points) + if points is None: + continue + line = LineString(points) + line_list.append(line) + # For visualize + if out_path is not None: + polygon = Polygon(points) + ax.add_patch(descartes.PolygonPatch(polygon, fc=self.color_map[layer_name], + alpha=alpha, label=layer_name)) + map_geom.append((layer_name, line_list)) + elif layer_name == 'drivable_area': + polygon_list = [] + for token in records_in_patch[layer_name]: + record = self.map_api.get(layer_name, token) + polygons = [self.map_api.extract_polygon(polygon_token) for polygon_token in + record['polygon_tokens']] + for polygon in polygons: + ex_points = np.array(polygon.exterior.xy) + ex_points = points_transform(ex_points) + if ex_points is None: + continue + interiors = [] + for interior in polygon.interiors: + in_points = np.array(interior.xy) + in_points = points_transform(in_points) + if in_points is None: + continue + interiors.append(in_points) + polygon = Polygon(ex_points, interiors) + polygon = polygon.buffer(0.01) + if polygon.geom_type == 'Polygon': + polygon = MultiPolygon([polygon]) + # Filter small polygons + if polygon.area < min_polygon_area: + continue + polygon_list.append(polygon) + # For visualize + if out_path is not None: + ax.add_patch(descartes.PolygonPatch(polygon, fc=self.color_map[layer_name], + alpha=alpha, label=layer_name)) + map_geom.append((layer_name, polygon_list)) + else: + polygon_list = [] + for token in records_in_patch[layer_name]: + record = self.map_api.get(layer_name, token) + polygon = self.map_api.extract_polygon(record['polygon_token']) + if polygon.is_valid: + if not polygon.is_empty: + ex_points = np.array(polygon.exterior.xy) + ex_points = points_transform(ex_points) + if ex_points is None: + continue + interiors = [] + for interior in polygon.interiors: + in_points = np.array(interior.xy) + in_points = points_transform(in_points) + if in_points is None: + continue + interiors.append(in_points) + polygon = Polygon(ex_points, interiors) + polygon = polygon.buffer(0.01) + if polygon.geom_type == 'Polygon': + polygon = MultiPolygon([polygon]) + # Filter small polygons + if polygon.area < min_polygon_area: + continue + polygon_list.append(polygon) + # For visualize + if out_path is not None: + ax.add_patch(descartes.PolygonPatch(polygon, fc=self.color_map[layer_name], + alpha=alpha, label=layer_name)) + map_geom.append((layer_name, polygon_list)) + + # For visualize + if out_path is not None: + # Display the image. + plt.axis('off') + ax.invert_yaxis() + plt.tight_layout() + plt.savefig(out_path, bbox_inches='tight', pad_inches=0) + plt.close() + + # Convert geometry of each layer into mask and stack them into a numpy tensor. + # Convert the patch box from global coordinates to local coordinates by setting the center to (0, 0). + local_box = (im_size[0] // 2, im_size[1] // 2, im_size[1], im_size[0]) + canvas_size = (im_size[1], im_size[0]) + img_mask = self.map_geom_to_mask(map_geom, local_box, canvas_size) + assert np.all(img_mask.shape[1:] == canvas_size) + return img_mask + + def render_egoposes_on_fancy_map(self, + nusc: NuScenes, + scene_tokens: List = None, + verbose: bool = True, + out_path: str = None, + render_egoposes: bool = True, + render_egoposes_range: bool = True, + render_legend: bool = True, + bitmap: Optional[BitMap] = None) -> Tuple[np.ndarray, Figure, Axes]: + """ + Renders each ego pose of a list of scenes on the map (around 40 poses per scene). + This method is heavily inspired by NuScenes.render_egoposes_on_map(), but uses the map expansion pack maps. + Note that the maps are constantly evolving, whereas we only released a single snapshot of the data. + Therefore for some scenes there is a bad fit between ego poses and maps. + :param nusc: The NuScenes instance to load the ego poses from. + :param scene_tokens: Optional list of scene tokens corresponding to the current map location. + :param verbose: Whether to show status messages and progress bar. + :param out_path: Optional path to save the rendered figure to disk. + :param render_egoposes: Whether to render ego poses. + :param render_egoposes_range: Whether to render a rectangle around all ego poses. + :param render_legend: Whether to render the legend of map layers. + :param bitmap: Optional BitMap object to render below the other map layers. + :return: . Returns a matrix with n ego poses in global map coordinates. + """ + # Settings + patch_margin = 2 + min_diff_patch = 30 + + # Ids of scenes with a bad match between localization and map. + scene_blacklist = [499, 515, 517] + + # Get logs by location. + log_location = self.map_api.map_name + log_tokens = [log['token'] for log in nusc.log if log['location'] == log_location] + assert len(log_tokens) > 0, 'Error: This split has 0 scenes for location %s!' % log_location + + # Filter scenes. + scene_tokens_location = [e['token'] for e in nusc.scene if e['log_token'] in log_tokens] + if scene_tokens is not None: + scene_tokens_location = [t for t in scene_tokens_location if t in scene_tokens] + assert len(scene_tokens_location) > 0, 'Error: Found 0 valid scenes for location %s!' % log_location + + map_poses = [] + if verbose: + print('Adding ego poses to map...') + for scene_token in tqdm(scene_tokens_location, disable=not verbose): + # Check that the scene is from the correct location. + scene_record = nusc.get('scene', scene_token) + scene_name = scene_record['name'] + scene_id = int(scene_name.replace('scene-', '')) + log_record = nusc.get('log', scene_record['log_token']) + assert log_record['location'] == log_location, \ + 'Error: The provided scene_tokens do not correspond to the provided map location!' + + # Print a warning if the localization is known to be bad. + if verbose and scene_id in scene_blacklist: + print('Warning: %s is known to have a bad fit between ego pose and map.' % scene_name) + + # For each sample in the scene, store the ego pose. + sample_tokens = nusc.field2token('sample', 'scene_token', scene_token) + for sample_token in sample_tokens: + sample_record = nusc.get('sample', sample_token) + + # Poses are associated with the sample_data. Here we use the lidar sample_data. + sample_data_record = nusc.get('sample_data', sample_record['data']['LIDAR_TOP']) + pose_record = nusc.get('ego_pose', sample_data_record['ego_pose_token']) + + # Calculate the pose on the map and append. + map_poses.append(pose_record['translation']) + + # Check that ego poses aren't empty. + assert len(map_poses) > 0, 'Error: Found 0 ego poses. Please check the inputs.' + + # Compute number of close ego poses. + if verbose: + print('Creating plot...') + map_poses = np.vstack(map_poses)[:, :2] + + # Render the map patch with the current ego poses. + min_patch = np.floor(map_poses.min(axis=0) - patch_margin) + max_patch = np.ceil(map_poses.max(axis=0) + patch_margin) + diff_patch = max_patch - min_patch + if any(diff_patch < min_diff_patch): + center_patch = (min_patch + max_patch) / 2 + diff_patch = np.maximum(diff_patch, min_diff_patch) + min_patch = center_patch - diff_patch / 2 + max_patch = center_patch + diff_patch / 2 + my_patch = (min_patch[0], min_patch[1], max_patch[0], max_patch[1]) + fig, ax = self.render_map_patch(my_patch, self.map_api.non_geometric_layers, figsize=(10, 10), + render_egoposes_range=render_egoposes_range, + render_legend=render_legend, bitmap=bitmap) + + # Plot in the same axis as the map. + # Make sure these are plotted "on top". + if render_egoposes: + ax.scatter(map_poses[:, 0], map_poses[:, 1], s=20, c='k', alpha=1.0, zorder=2) + plt.axis('off') + + if out_path is not None: + plt.savefig(out_path, bbox_inches='tight', pad_inches=0) + + return map_poses, fig, ax + + def render_next_roads(self, + x: float, + y: float, + alpha: float = 0.5, + figsize: Union[None, float, Tuple[float, float]] = None, + bitmap: Optional[BitMap] = None) -> Tuple[Figure, Axes]: + """ + Renders the possible next roads from a point of interest. + :param x: x coordinate of the point of interest. + :param y: y coordinate of the point of interest. + :param alpha: The opacity of each layer that gets rendered. + :param figsize: Size of the whole figure. + :param bitmap: Optional BitMap object to render below the other map layers. + """ + # Get next roads. + next_roads = self.map_api.get_next_roads(x, y) + layer_names = [] + tokens = [] + for layer_name, layer_tokens in next_roads.items(): + if len(layer_tokens) > 0: + layer_names.append(layer_name) + tokens.extend(layer_tokens) + + # Render them. + fig, ax = self.render_layers(layer_names, alpha, figsize, tokens=tokens, bitmap=bitmap) + + # Render current location with an x. + ax.plot(x, y, 'x', markersize=12, color='red') + + return fig, ax + + @staticmethod + def _clip_points_behind_camera(points, near_plane: float): + """ + Perform clipping on polygons that are partially behind the camera. + This method is necessary as the projection does not work for points behind the camera. + Hence we compute the line between the point and the camera and follow that line until we hit the near plane of + the camera. Then we use that point. + :param points: Matrix of points, where each point (x, y, z) is along each column. + :param near_plane: If we set the near_plane distance of the camera to 0 then some points will project to + infinity. Therefore we need to clip these points at the near plane. + :return: The clipped version of the polygon. This may have fewer points than the original polygon if some lines + were entirely behind the polygon. + """ + points_clipped = [] + # Loop through each line on the polygon. + # For each line where exactly 1 endpoints is behind the camera, move the point along the line until + # it hits the near plane of the camera (clipping). + assert points.shape[0] == 3 + point_count = points.shape[1] + for line_1 in range(point_count): + line_2 = (line_1 + 1) % point_count + point_1 = points[:, line_1] + point_2 = points[:, line_2] + z_1 = point_1[2] + z_2 = point_2[2] + + if z_1 >= near_plane and z_2 >= near_plane: + # Both points are in front. + # Add both points unless the first is already added. + if len(points_clipped) == 0 or all(points_clipped[-1] != point_1): + points_clipped.append(point_1) + points_clipped.append(point_2) + elif z_1 < near_plane and z_2 < near_plane: + # Both points are in behind. + # Don't add anything. + continue + else: + # One point is in front, one behind. + # By convention pointA is behind the camera and pointB in front. + if z_1 <= z_2: + point_a = points[:, line_1] + point_b = points[:, line_2] + else: + point_a = points[:, line_2] + point_b = points[:, line_1] + z_a = point_a[2] + z_b = point_b[2] + + # Clip line along near plane. + pointdiff = point_b - point_a + alpha = (near_plane - z_b) / (z_a - z_b) + clipped = point_a + (1 - alpha) * pointdiff + assert np.abs(clipped[2] - near_plane) < 1e-6 + + # Add the first point (if valid and not duplicate), the clipped point and the second point (if valid). + if z_1 >= near_plane and (len(points_clipped) == 0 or all(points_clipped[-1] != point_1)): + points_clipped.append(point_1) + points_clipped.append(clipped) + if z_2 >= near_plane: + points_clipped.append(point_2) + + points_clipped = np.array(points_clipped).transpose() + return points_clipped + + def get_records_in_patch(self, + box_coords: Tuple[float, float, float, float], + layer_names: List[str] = None, + mode: str = 'intersect') -> Dict[str, List[str]]: + """ + Get all the record token that intersects or within a particular rectangular patch. + :param box_coords: The rectangular patch coordinates (x_min, y_min, x_max, y_max). + :param layer_names: Names of the layers that we want to retrieve in a particular patch. + By default will always look for all non geometric layers. + :param mode: "intersect" will return all non geometric records that intersects the patch, + "within" will return all non geometric records that are within the patch. + :return: Dictionary of layer_name - tokens pairs. + """ + if mode not in ['intersect', 'within']: + raise ValueError("Mode {} is not valid, choice=('intersect', 'within')".format(mode)) + + if layer_names is None: + layer_names = self.map_api.non_geometric_layers + + records_in_patch = dict() + for layer_name in layer_names: + layer_records = [] + for record in getattr(self.map_api, layer_name): + token = record['token'] + if self.is_record_in_patch(layer_name, token, box_coords, mode): + layer_records.append(token) + + records_in_patch.update({layer_name: layer_records}) + + return records_in_patch + + def is_record_in_patch(self, + layer_name: str, + token: str, + box_coords: Tuple[float, float, float, float], + mode: str = 'intersect') -> bool: + """ + Query whether a particular record is in a rectangular patch. + :param layer_name: The layer name of the record. + :param token: The record token. + :param box_coords: The rectangular patch coordinates (x_min, y_min, x_max, y_max). + :param mode: "intersect" means it will return True if the geometric object intersects the patch and False + otherwise, "within" will return True if the geometric object is within the patch and False otherwise. + :return: Boolean value on whether a particular record intersects or is within a particular patch. + """ + if mode not in ['intersect', 'within']: + raise ValueError("Mode {} is not valid, choice=('intersect', 'within')".format(mode)) + + if layer_name in self.map_api.lookup_polygon_layers: + return self._is_polygon_record_in_patch(token, layer_name, box_coords, mode) + elif layer_name in self.map_api.non_geometric_line_layers: + return self._is_line_record_in_patch(token, layer_name, box_coords, mode) + else: + raise ValueError("{} is not a valid layer".format(layer_name)) + + def layers_on_point(self, x: float, y: float, layer_names: List[str] = None) -> Dict[str, str]: + """ + Returns all the polygonal layers that a particular point is on. + :param x: x coordinate of the point of interest. + :param y: y coordinate of the point of interest. + :param layer_names: The names of the layers to search for. + :return: All the polygonal layers that a particular point is on. + """ + # Default option. + if layer_names is None: + layer_names = self.map_api.non_geometric_polygon_layers + + layers_on_point = dict() + for layer_name in layer_names: + layers_on_point.update({layer_name: self.record_on_point(x, y, layer_name)}) + + return layers_on_point + + def record_on_point(self, x: float, y: float, layer_name: str) -> str: + """ + Query what record of a layer a particular point is on. + :param x: x coordinate of the point of interest. + :param y: y coordinate of the point of interest. + :param layer_name: The non geometric polygonal layer name that we are interested in. + :return: The first token of a layer a particular point is on or '' if no layer is found. + """ + if layer_name not in self.map_api.non_geometric_polygon_layers: + raise ValueError("{} is not a polygon layer".format(layer_name)) + + point = Point(x, y) + records = getattr(self.map_api, layer_name) + + if layer_name == 'drivable_area': + for record in records: + polygons = [self.map_api.extract_polygon(polygon_token) for polygon_token in record['polygon_tokens']] + for polygon in polygons: + if point.within(polygon): + return record['token'] + else: + pass + else: + for record in records: + polygon = self.map_api.extract_polygon(record['polygon_token']) + if point.within(polygon): + return record['token'] + else: + pass + + # If nothing is found, return an empty string. + return '' + + def extract_polygon(self, polygon_token: str) -> Polygon: + """ + Construct a shapely Polygon object out of a polygon token. + :param polygon_token: The token of the polygon record. + :return: The polygon wrapped in a shapely Polygon object. + """ + polygon_record = self.map_api.get('polygon', polygon_token) + + exterior_coords = [(self.map_api.get('node', token)['x'], self.map_api.get('node', token)['y']) + for token in polygon_record['exterior_node_tokens']] + + interiors = [] + for hole in polygon_record['holes']: + interior_coords = [(self.map_api.get('node', token)['x'], self.map_api.get('node', token)['y']) + for token in hole['node_tokens']] + if len(interior_coords) > 0: # Add only non-empty holes. + interiors.append(interior_coords) + + return Polygon(exterior_coords, interiors) + + def extract_line(self, line_token: str) -> LineString: + """ + Construct a shapely LineString object out of a line token. + :param line_token: The token of the line record. + :return: The line wrapped in a LineString object. + """ + line_record = self.map_api.get('line', line_token) + line_nodes = [(self.map_api.get('node', token)['x'], self.map_api.get('node', token)['y']) + for token in line_record['node_tokens']] + + return LineString(line_nodes) + + def get_bounds(self, layer_name: str, token: str) -> Tuple[float, float, float, float]: + """ + Get the bounds of the geometric object that corresponds to a non geometric record. + :param layer_name: Name of the layer that we are interested in. + :param token: Token of the record. + :return: min_x, min_y, max_x, max_y of the line representation. + """ + if layer_name in self.map_api.non_geometric_polygon_layers: + return self._get_polygon_bounds(layer_name, token) + elif layer_name in self.map_api.non_geometric_line_layers: + return self._get_line_bounds(layer_name, token) + else: + raise ValueError("{} is not a valid layer".format(layer_name)) + + def _get_polygon_bounds(self, layer_name: str, token: str) -> Tuple[float, float, float, float]: + """ + Get the extremities of the polygon object that corresponds to a non geometric record. + :param layer_name: Name of the layer that we are interested in. + :param token: Token of the record. + :return: min_x, min_y, max_x, max_y of of the polygon or polygons (for drivable_area) representation. + """ + if layer_name not in self.map_api.non_geometric_polygon_layers: + raise ValueError("{} is not a record with polygon representation".format(token)) + + record = self.map_api.get(layer_name, token) + + if layer_name == 'drivable_area': + polygons = [self.map_api.get('polygon', polygon_token) for polygon_token in record['polygon_tokens']] + exterior_node_coords = [] + + for polygon in polygons: + nodes = [self.map_api.get('node', node_token) for node_token in polygon['exterior_node_tokens']] + node_coords = [(node['x'], node['y']) for node in nodes] + exterior_node_coords.extend(node_coords) + + exterior_node_coords = np.array(exterior_node_coords) + else: + exterior_nodes = [self.map_api.get('node', token) for token in record['exterior_node_tokens']] + exterior_node_coords = np.array([(node['x'], node['y']) for node in exterior_nodes]) + + xs = exterior_node_coords[:, 0] + ys = exterior_node_coords[:, 1] + + x2 = xs.max() + x1 = xs.min() + y2 = ys.max() + y1 = ys.min() + + return x1, y1, x2, y2 + + def _get_line_bounds(self, layer_name: str, token: str) -> Tuple[float, float, float, float]: + """ + Get the bounds of the line object that corresponds to a non geometric record. + :param layer_name: Name of the layer that we are interested in. + :param token: Token of the record. + :return: min_x, min_y, max_x, max_y of of the line representation. + """ + if layer_name not in self.map_api.non_geometric_line_layers: + raise ValueError("{} is not a record with line representation".format(token)) + + record = self.map_api.get(layer_name, token) + nodes = [self.map_api.get('node', node_token) for node_token in record['node_tokens']] + node_coords = [(node['x'], node['y']) for node in nodes] + node_coords = np.array(node_coords) + + xs = node_coords[:, 0] + ys = node_coords[:, 1] + + x2 = xs.max() + x1 = xs.min() + y2 = ys.max() + y1 = ys.min() + + return x1, y1, x2, y2 + + def _is_polygon_record_in_patch(self, + token: str, + layer_name: str, + box_coords: Tuple[float, float, float, float], + mode: str = 'intersect') -> bool: + """ + Query whether a particular polygon record is in a rectangular patch. + :param layer_name: The layer name of the record. + :param token: The record token. + :param box_coords: The rectangular patch coordinates (x_min, y_min, x_max, y_max). + :param mode: "intersect" means it will return True if the geometric object intersects the patch and False + otherwise, "within" will return True if the geometric object is within the patch and False otherwise. + :return: Boolean value on whether a particular polygon record intersects or is within a particular patch. + """ + if layer_name not in self.map_api.lookup_polygon_layers: + raise ValueError('{} is not a polygonal layer'.format(layer_name)) + + x_min, y_min, x_max, y_max = box_coords + record = self.map_api.get(layer_name, token) + rectangular_patch = box(x_min, y_min, x_max, y_max) + + if layer_name == 'drivable_area': + polygons = [self.map_api.extract_polygon(polygon_token) for polygon_token in record['polygon_tokens']] + geom = MultiPolygon(polygons) + else: + geom = self.map_api.extract_polygon(record['polygon_token']) + + if mode == 'intersect': + return geom.intersects(rectangular_patch) + elif mode == 'within': + return geom.within(rectangular_patch) + + def _is_line_record_in_patch(self, + token: str, + layer_name: str, + box_coords: Tuple[float, float, float, float], + mode: str = 'intersect') -> bool: + """ + Query whether a particular line record is in a rectangular patch. + :param layer_name: The layer name of the record. + :param token: The record token. + :param box_coords: The rectangular patch coordinates (x_min, y_min, x_max, y_max). + :param mode: "intersect" means it will return True if the geometric object intersects the patch and False + otherwise, "within" will return True if the geometric object is within the patch and False otherwise. + :return: Boolean value on whether a particular line record intersects or is within a particular patch. + """ + if layer_name not in self.map_api.non_geometric_line_layers: + raise ValueError("{} is not a line layer".format(layer_name)) + + # Retrieve nodes of this line. + record = self.map_api.get(layer_name, token) + node_recs = [self.map_api.get('node', node_token) for node_token in record['node_tokens']] + node_coords = [[node['x'], node['y']] for node in node_recs] + node_coords = np.array(node_coords) + + # A few lines in Queenstown have zero nodes. In this case we return False. + if len(node_coords) == 0: + return False + + # Check that nodes fall inside the path. + x_min, y_min, x_max, y_max = box_coords + cond_x = np.logical_and(node_coords[:, 0] < x_max, node_coords[:, 0] > x_min) + cond_y = np.logical_and(node_coords[:, 1] < y_max, node_coords[:, 1] > y_min) + cond = np.logical_and(cond_x, cond_y) + if mode == 'intersect': + return np.any(cond) + elif mode == 'within': + return np.all(cond) + + def _render_layer(self, ax: Axes, layer_name: str, alpha: float, tokens: List[str] = None) -> None: + """ + Wrapper method that renders individual layers on an axis. + :param ax: The matplotlib axes where the layer will get rendered. + :param layer_name: Name of the layer that we are interested in. + :param alpha: The opacity of the layer to be rendered. + :param tokens: Optional list of tokens to render. None means all tokens are rendered. + """ + if layer_name in self.map_api.non_geometric_polygon_layers: + self._render_polygon_layer(ax, layer_name, alpha, tokens) + elif layer_name in self.map_api.non_geometric_line_layers: + self._render_line_layer(ax, layer_name, alpha, tokens) + else: + raise ValueError("{} is not a valid layer".format(layer_name)) + + def _render_polygon_layer(self, ax: Axes, layer_name: str, alpha: float, tokens: List[str] = None) -> None: + """ + Renders an individual non-geometric polygon layer on an axis. + :param ax: The matplotlib axes where the layer will get rendered. + :param layer_name: Name of the layer that we are interested in. + :param alpha: The opacity of the layer to be rendered. + :param tokens: Optional list of tokens to render. None means all tokens are rendered. + """ + if layer_name not in self.map_api.non_geometric_polygon_layers: + raise ValueError('{} is not a polygonal layer'.format(layer_name)) + + first_time = True + records = getattr(self.map_api, layer_name) + if tokens is not None: + records = [r for r in records if r['token'] in tokens] + if layer_name == 'drivable_area': + for record in records: + polygons = [self.map_api.extract_polygon(polygon_token) for polygon_token in record['polygon_tokens']] + + for polygon in polygons: + if first_time: + label = layer_name + first_time = False + else: + label = None + ax.add_patch(descartes.PolygonPatch(polygon, fc=self.color_map[layer_name], alpha=alpha, + label=label)) + else: + for record in records: + polygon = self.map_api.extract_polygon(record['polygon_token']) + + if first_time: + label = layer_name + first_time = False + else: + label = None + + ax.add_patch(descartes.PolygonPatch(polygon, fc=self.color_map[layer_name], alpha=alpha, + label=label)) + + def _render_line_layer(self, ax: Axes, layer_name: str, alpha: float, tokens: List[str] = None) -> None: + """ + Renders an individual non-geometric line layer on an axis. + :param ax: The matplotlib axes where the layer will get rendered. + :param layer_name: Name of the layer that we are interested in. + :param alpha: The opacity of the layer to be rendered. + :param tokens: Optional list of tokens to render. None means all tokens are rendered. + """ + if layer_name not in self.map_api.non_geometric_line_layers: + raise ValueError("{} is not a line layer".format(layer_name)) + + first_time = True + records = getattr(self.map_api, layer_name) + if tokens is not None: + records = [r for r in records if r['token'] in tokens] + for record in records: + if first_time: + label = layer_name + first_time = False + else: + label = None + line = self.map_api.extract_line(record['line_token']) + if line.is_empty: # Skip lines without nodes + continue + xs, ys = line.xy + + if layer_name == 'traffic_light': + # Draws an arrow with the physical traffic light as the starting point, pointing to the direction on + # where the traffic light points. + ax.add_patch(Arrow(xs[0], ys[0], xs[1]-xs[0], ys[1]-ys[0], color=self.color_map[layer_name], + label=label)) + else: + ax.plot(xs, ys, color=self.color_map[layer_name], alpha=alpha, label=label) + + def _get_layer_geom(self, + patch_box: Tuple[float, float, float, float], + patch_angle: float, + layer_name: str) -> List[Geometry]: + """ + Wrapper method that gets the geometries for each layer. + :param patch_box: Patch box defined as [x_center, y_center, height, width]. + :param patch_angle: Patch orientation in degrees. + :param layer_name: Name of map layer to be converted to binary map mask patch. + :return: List of geometries for the given layer. + """ + if layer_name in self.map_api.non_geometric_polygon_layers: + return self._get_layer_polygon(patch_box, patch_angle, layer_name) + elif layer_name in self.map_api.non_geometric_line_layers: + return self._get_layer_line(patch_box, patch_angle, layer_name) + else: + raise ValueError("{} is not a valid layer".format(layer_name)) + + def _layer_geom_to_mask(self, + layer_name: str, + layer_geom: List[Geometry], + local_box: Tuple[float, float, float, float], + canvas_size: Tuple[int, int]) -> np.ndarray: + """ + Wrapper method that gets the mask for each layer's geometries. + :param layer_name: The name of the layer for which we get the masks. + :param layer_geom: List of the geometries of the layer specified in layer_name. + :param local_box: The local patch box defined as (x_center, y_center, height, width), where typically + x_center = y_center = 0. + :param canvas_size: Size of the output mask (h, w). + """ + if layer_name in self.map_api.non_geometric_polygon_layers: + return self._polygon_geom_to_mask(layer_geom, local_box, layer_name, canvas_size) + elif layer_name in self.map_api.non_geometric_line_layers: + return self._line_geom_to_mask(layer_geom, local_box, layer_name, canvas_size) + else: + raise ValueError("{} is not a valid layer".format(layer_name)) + + @staticmethod + def mask_for_polygons(polygons: MultiPolygon, mask: np.ndarray) -> np.ndarray: + """ + Convert a polygon or multipolygon list to an image mask ndarray. + :param polygons: List of Shapely polygons to be converted to numpy array. + :param mask: Canvas where mask will be generated. + :return: Numpy ndarray polygon mask. + """ + if not polygons: + return mask + + def int_coords(x): + # function to round and convert to int + return np.array(x).round().astype(np.int32) + exteriors = [int_coords(poly.exterior.coords) for poly in polygons] + interiors = [int_coords(pi.coords) for poly in polygons for pi in poly.interiors] + cv2.fillPoly(mask, exteriors, 1) + cv2.fillPoly(mask, interiors, 0) + return mask + + @staticmethod + def mask_for_lines(lines: LineString, mask: np.ndarray) -> np.ndarray: + """ + Convert a Shapely LineString back to an image mask ndarray. + :param lines: List of shapely LineStrings to be converted to a numpy array. + :param mask: Canvas where mask will be generated. + :return: Numpy ndarray line mask. + """ + if lines.geom_type == 'MultiLineString': + for line in lines: + coords = np.asarray(list(line.coords), np.int32) + coords = coords.reshape((-1, 2)) + cv2.polylines(mask, [coords], False, 1, 2) + else: + coords = np.asarray(list(lines.coords), np.int32) + coords = coords.reshape((-1, 2)) + cv2.polylines(mask, [coords], False, 1, 2) + + return mask + + def _polygon_geom_to_mask(self, + layer_geom: List[Polygon], + local_box: Tuple[float, float, float, float], + layer_name: str, + canvas_size: Tuple[int, int]) -> np.ndarray: + """ + Convert polygon inside patch to binary mask and return the map patch. + :param layer_geom: list of polygons for each map layer + :param local_box: The local patch box defined as (x_center, y_center, height, width), where typically + x_center = y_center = 0. + :param layer_name: name of map layer to be converted to binary map mask patch. + :param canvas_size: Size of the output mask (h, w). + :return: Binary map mask patch with the size canvas_size. + """ + if layer_name not in self.map_api.non_geometric_polygon_layers: + raise ValueError('{} is not a polygonal layer'.format(layer_name)) + + patch_x, patch_y, patch_h, patch_w = local_box + + patch = self.get_patch_coord(local_box) + + canvas_h = canvas_size[0] + canvas_w = canvas_size[1] + + scale_height = canvas_h / patch_h + scale_width = canvas_w / patch_w + + trans_x = -patch_x + patch_w / 2.0 + trans_y = -patch_y + patch_h / 2.0 + + map_mask = np.zeros(canvas_size, np.uint8) + + for polygon in layer_geom: + new_polygon = polygon.intersection(patch) + if not new_polygon.is_empty: + new_polygon = affinity.affine_transform(new_polygon, + [1.0, 0.0, 0.0, 1.0, trans_x, trans_y]) + new_polygon = affinity.scale(new_polygon, xfact=scale_width, yfact=scale_height, origin=(0, 0)) + + if new_polygon.geom_type == 'Polygon': + new_polygon = MultiPolygon([new_polygon]) + + # if new_polygon.area < 1000: + # continue + + if not isinstance(new_polygon, MultiPolygon): + print(new_polygon) + + continue + + map_mask = self.mask_for_polygons(new_polygon, map_mask) + + return map_mask + + def _line_geom_to_mask(self, + layer_geom: List[LineString], + local_box: Tuple[float, float, float, float], + layer_name: str, + canvas_size: Tuple[int, int]) -> Optional[np.ndarray]: + """ + Convert line inside patch to binary mask and return the map patch. + :param layer_geom: list of LineStrings for each map layer + :param local_box: The local patch box defined as (x_center, y_center, height, width), where typically + x_center = y_center = 0. + :param layer_name: name of map layer to be converted to binary map mask patch. + :param canvas_size: Size of the output mask (h, w). + :return: Binary map mask patch in a canvas size. + """ + if layer_name not in self.map_api.non_geometric_line_layers: + raise ValueError("{} is not a line layer".format(layer_name)) + + patch_x, patch_y, patch_h, patch_w = local_box + + patch = self.get_patch_coord(local_box) + + canvas_h = canvas_size[0] + canvas_w = canvas_size[1] + scale_height = canvas_h/patch_h + scale_width = canvas_w/patch_w + + trans_x = -patch_x + patch_w / 2.0 + trans_y = -patch_y + patch_h / 2.0 + + map_mask = np.zeros(canvas_size, np.uint8) + + if layer_name == 'traffic_light': + return None + + for line in layer_geom: + new_line = line.intersection(patch) + if not new_line.is_empty: + new_line = affinity.affine_transform(new_line, + [1.0, 0.0, 0.0, 1.0, trans_x, trans_y]) + new_line = affinity.scale(new_line, xfact=scale_width, yfact=scale_height, origin=(0, 0)) + + map_mask = self.mask_for_lines(new_line, map_mask) + return map_mask + + def _get_layer_polygon(self, + patch_box: Tuple[float, float, float, float], + patch_angle: float, + layer_name: str) -> List[Polygon]: + """ + Retrieve the polygons of a particular layer within the specified patch. + :param patch_box: Patch box defined as [x_center, y_center, height, width]. + :param patch_angle: Patch orientation in degrees. + :param layer_name: name of map layer to be extracted. + :return: List of Polygon in a patch box. + """ + if layer_name not in self.map_api.non_geometric_polygon_layers: + raise ValueError('{} is not a polygonal layer'.format(layer_name)) + + patch_x = patch_box[0] + patch_y = patch_box[1] + + patch = self.get_patch_coord(patch_box, patch_angle) + + records = getattr(self.map_api, layer_name) + + polygon_list = [] + if layer_name == 'drivable_area': + for record in records: + polygons = [self.map_api.extract_polygon(polygon_token) for polygon_token in record['polygon_tokens']] + + for polygon in polygons: + new_polygon = polygon.intersection(patch) + if not new_polygon.is_empty: + new_polygon = affinity.rotate(new_polygon, -patch_angle, + origin=(patch_x, patch_y), use_radians=False) + new_polygon = affinity.affine_transform(new_polygon, + [1.0, 0.0, 0.0, 1.0, -patch_x, -patch_y]) + if new_polygon.geom_type == 'Polygon': + new_polygon = MultiPolygon([new_polygon]) + polygon_list.append(new_polygon) + + else: + for record in records: + polygon = self.map_api.extract_polygon(record['polygon_token']) + + if polygon.is_valid: + new_polygon = polygon.intersection(patch) + if not new_polygon.is_empty: + new_polygon = affinity.rotate(new_polygon, -patch_angle, + origin=(patch_x, patch_y), use_radians=False) + new_polygon = affinity.affine_transform(new_polygon, + [1.0, 0.0, 0.0, 1.0, -patch_x, -patch_y]) + if new_polygon.geom_type == 'Polygon': + new_polygon = MultiPolygon([new_polygon]) + polygon_list.append(new_polygon) + + return polygon_list + + def _get_layer_line(self, + patch_box: Tuple[float, float, float, float], + patch_angle: float, + layer_name: str) -> Optional[List[LineString]]: + """ + Retrieve the lines of a particular layer within the specified patch. + :param patch_box: Patch box defined as [x_center, y_center, height, width]. + :param patch_angle: Patch orientation in degrees. + :param layer_name: name of map layer to be converted to binary map mask patch. + :return: List of LineString in a patch box. + """ + if layer_name not in self.map_api.non_geometric_line_layers: + raise ValueError("{} is not a line layer".format(layer_name)) + + if layer_name == 'traffic_light': + return None + + patch_x = patch_box[0] + patch_y = patch_box[1] + + patch = self.get_patch_coord(patch_box, patch_angle) + + line_list = [] + records = getattr(self.map_api, layer_name) + for record in records: + line = self.map_api.extract_line(record['line_token']) + if line.is_empty: # Skip lines without nodes. + continue + + new_line = line.intersection(patch) + if not new_line.is_empty: + new_line = affinity.rotate(new_line, -patch_angle, + origin=(patch_x, patch_y), use_radians=False) + new_line = affinity.affine_transform(new_line, + [1.0, 0.0, 0.0, 1.0, -patch_x, -patch_y]) + line_list.append(new_line) + + return line_list + + @staticmethod + def get_patch_coord(patch_box: Tuple[float, float, float, float], + patch_angle: float = 0.0) -> Polygon: + """ + Convert patch_box to shapely Polygon coordinates. + :param patch_box: Patch box defined as [x_center, y_center, height, width]. + :param patch_angle: Patch orientation in degrees. + :return: Box Polygon for patch_box. + """ + patch_x, patch_y, patch_h, patch_w = patch_box + + x_min = patch_x - patch_w / 2.0 + y_min = patch_y - patch_h / 2.0 + x_max = patch_x + patch_w / 2.0 + y_max = patch_y + patch_h / 2.0 + + patch = box(x_min, y_min, x_max, y_max) + patch = affinity.rotate(patch, patch_angle, origin=(patch_x, patch_y), use_radians=False) + + return patch + + def _get_figsize(self, figsize: Union[None, float, Tuple[float, float]]) -> Tuple[float, float]: + """ + Utility function that scales the figure size by the map canvas size. + If figsize is: + - None => Return default scale. + - Scalar => Scale canvas size. + - Two-tuple => Use the specified figure size. + :param figsize: The input figure size. + :return: The output figure size. + """ + # Divide canvas size by arbitrary scalar to get into cm range. + canvas_size = np.array(self.map_api.canvas_edge)[::-1] / 200 + + if figsize is None: + return tuple(canvas_size) + elif type(figsize) in [int, float]: + return tuple(canvas_size * figsize) + elif type(figsize) == tuple and len(figsize) == 2: + return figsize + else: + raise Exception('Error: Invalid figsize: %s' % figsize) diff --git a/mmcv/datasets/eval_utils/metric_utils.py b/mmcv/datasets/eval_utils/metric_utils.py new file mode 100644 index 0000000..1058703 --- /dev/null +++ b/mmcv/datasets/eval_utils/metric_utils.py @@ -0,0 +1,104 @@ +import torch +import math +import numpy as np +from typing import List, Dict, Tuple, Callable, Union + +def min_ade(traj: torch.Tensor, traj_gt: torch.Tensor, + masks: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Computes average displacement error for the best trajectory is a set, + with respect to ground truth + :param traj: predictions, shape [batch_size, num_modes, sequence_length, 2] + :param traj_gt: ground truth trajectory, shape + [batch_size, sequence_length, 2] + :param masks: masks for varying length ground truth, shape + [batch_size, sequence_length] + :return errs, inds: errors and indices for modes with min error, shape + [batch_size] + """ + num_modes = traj.shape[1] + traj_gt_rpt = traj_gt.unsqueeze(1).repeat(1, num_modes, 1, 1) + masks_rpt = masks.unsqueeze(1).repeat(1, num_modes, 1) + err = traj_gt_rpt - traj[:, :, :, 0:2] + err = torch.pow(err, exponent=2) + err = torch.sum(err, dim=3) + err = torch.pow(err, exponent=0.5) + err = torch.sum(err * (1 - masks_rpt), dim=2) / \ + torch.clip(torch.sum((1 - masks_rpt), dim=2), min=1) + err, inds = torch.min(err, dim=1) + + return err, inds + + +def min_fde(traj: torch.Tensor, traj_gt: torch.Tensor, + masks: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Computes final displacement error for the best trajectory is a set, + with respect to ground truth + :param traj: predictions, shape [batch_size, num_modes, sequence_length, 2] + :param traj_gt: ground truth trajectory, shape + [batch_size, sequence_length, 2] + :param masks: masks for varying length ground truth, shape + [batch_size, sequence_length] + :return errs, inds: errors and indices for modes with min error, + shape [batch_size] + """ + num_modes = traj.shape[1] + traj_gt_rpt = traj_gt.unsqueeze(1).repeat(1, num_modes, 1, 1) + lengths = torch.sum(1 - masks, dim=1).long() + inds = lengths.unsqueeze(1).unsqueeze( + 2).unsqueeze(3).repeat(1, num_modes, 1, 2) - 1 + + traj_last = torch.gather(traj[..., :2], dim=2, index=inds).squeeze(2) + traj_gt_last = torch.gather(traj_gt_rpt, dim=2, index=inds).squeeze(2) + + err = traj_gt_last - traj_last[..., 0:2] + err = torch.pow(err, exponent=2) + err = torch.sum(err, dim=2) + err = torch.pow(err, exponent=0.5) + err, inds = torch.min(err, dim=1) + + return err, inds + + +def miss_rate( + traj: torch.Tensor, + traj_gt: torch.Tensor, + masks: torch.Tensor, + dist_thresh: float = 2) -> torch.Tensor: + """ + Computes miss rate for mini batch of trajectories, + with respect to ground truth and given distance threshold + :param traj: predictions, shape [batch_size, num_modes, sequence_length, 2] + :param traj_gt: ground truth trajectory, + shape [batch_size, sequence_length, 2] + :param masks: masks for varying length ground truth, + shape [batch_size, sequence_length] + :param dist_thresh: distance threshold for computing miss rate. + :return errs, inds: errors and indices for modes with min error, + shape [batch_size] + """ + num_modes = traj.shape[1] + + traj_gt_rpt = traj_gt.unsqueeze(1).repeat(1, num_modes, 1, 1) + masks_rpt = masks.unsqueeze(1).repeat(1, num_modes, 1) + dist = traj_gt_rpt - traj[:, :, :, 0:2] + dist = torch.pow(dist, exponent=2) + dist = torch.sum(dist, dim=3) + dist = torch.pow(dist, exponent=0.5) + dist[masks_rpt.bool()] = -math.inf + dist, _ = torch.max(dist, dim=2) + dist, _ = torch.min(dist, dim=1) + m_r = torch.sum(torch.as_tensor(dist > dist_thresh)) / len(dist) + + return m_r + +def traj_fde(gt_box, pred_box, final_step): + if gt_box.traj.shape[0] <= 0: + return np.inf + final_step = min(gt_box.traj.shape[0], final_step) + gt_final = gt_box.traj[None, final_step-1] + pred_final = np.array(pred_box.traj)[:,final_step-1,:] + err = gt_final - pred_final + err = np.sqrt(np.sum(np.square(gt_final - pred_final), axis=-1)) + return np.min(err) \ No newline at end of file diff --git a/mmcv/datasets/eval_utils/nuscenes_eval.py b/mmcv/datasets/eval_utils/nuscenes_eval.py new file mode 100644 index 0000000..48a136c --- /dev/null +++ b/mmcv/datasets/eval_utils/nuscenes_eval.py @@ -0,0 +1,705 @@ +import argparse +import copy +import json +import numpy as np +import os +import time +from typing import Tuple, Dict, Any +import tqdm +from matplotlib import pyplot as plt +from pyquaternion import Quaternion + +from nuscenes import NuScenes +from nuscenes.eval.common.config import config_factory +from nuscenes.eval.common.data_classes import EvalBoxes +from nuscenes.eval.common.loaders import load_prediction, load_gt, add_center_dist, filter_eval_boxes +from nuscenes.eval.common.render import setup_axis +from nuscenes.eval.detection.algo import accumulate, calc_ap, calc_tp +from nuscenes.eval.detection.constants import TP_METRICS, TP_METRICS_UNITS, PRETTY_DETECTION_NAMES, PRETTY_TP_METRICS +from nuscenes.eval.detection.data_classes import DetectionConfig, DetectionMetrics, DetectionBox, DetectionMetricDataList +from nuscenes.eval.detection.evaluate import NuScenesEval +from nuscenes.eval.detection.render import summary_plot, class_pr_curve, dist_pr_curve +from nuscenes.eval.tracking.data_classes import TrackingBox +from nuscenes.utils.data_classes import Box +from nuscenes.utils.geometry_utils import view_points, BoxVisibility +from nuscenes.utils.splits import create_splits_scenes +from nuscenes.eval.detection.utils import category_to_detection_name + + +Axis = Any + +def class_tp_curve(md_list: DetectionMetricDataList, + metrics: DetectionMetrics, + detection_name: str, + min_recall: float, + dist_th_tp: float, + savepath: str = None, + ax: Axis = None) -> None: + """ + Plot the true positive curve for the specified class. + :param md_list: DetectionMetricDataList instance. + :param metrics: DetectionMetrics instance. + :param detection_name: + :param min_recall: Minimum recall value. + :param dist_th_tp: The distance threshold used to determine matches. + :param savepath: If given, saves the the rendering here instead of displaying. + :param ax: Axes onto which to render. + """ + # Get metric data for given detection class with tp distance threshold. + + md = md_list[(detection_name, dist_th_tp)] + min_recall_ind = round(100 * min_recall) + if min_recall_ind <= md.max_recall_ind: + # For traffic_cone and barrier only a subset of the metrics are plotted. + rel_metrics = [m for m in TP_METRICS if not np.isnan(metrics.get_label_tp(detection_name, m))] + ylimit = max([max(getattr(md, metric)[min_recall_ind:md.max_recall_ind + 1]) for metric in rel_metrics]) * 1.1 + else: + ylimit = 1.0 + + # Prepare axis. + if ax is None: + ax = setup_axis(title=PRETTY_DETECTION_NAMES[detection_name], xlabel='Recall', ylabel='Error', xlim=1, + min_recall=min_recall) + ax.set_ylim(0, ylimit) + + # Plot the recall vs. error curve for each tp metric. + for metric in TP_METRICS: + tp = metrics.get_label_tp(detection_name, metric) + + # Plot only if we have valid data. + if tp is not np.nan and min_recall_ind <= md.max_recall_ind: + recall, error = md.recall[:md.max_recall_ind + 1], getattr(md, metric)[:md.max_recall_ind + 1] + else: + recall, error = [], [] + + # Change legend based on tp value + if tp is np.nan: + label = '{}: n/a'.format(PRETTY_TP_METRICS[metric]) + elif min_recall_ind > md.max_recall_ind: + label = '{}: nan'.format(PRETTY_TP_METRICS[metric]) + else: + label = '{}: {:.2f} ({})'.format(PRETTY_TP_METRICS[metric], tp, TP_METRICS_UNITS[metric]) + if metric == 'trans_err': + label += f' ({md.max_recall_ind})' # add recall + print(f'Recall: {detection_name}: {md.max_recall_ind/100}') + ax.plot(recall, error, label=label) + ax.axvline(x=md.max_recall, linestyle='-.', color=(0, 0, 0, 0.3)) + ax.legend(loc='best') + + if savepath is not None: + plt.savefig(savepath) + plt.close() + + +class DetectionBox_modified(DetectionBox): + def __init__(self, *args, token=None, visibility=None, index=None, **kwargs): + ''' + add annotation token + ''' + super().__init__(*args, **kwargs) + self.token = token + self.visibility = visibility + self.index = index + + def serialize(self) -> dict: + """ Serialize instance into json-friendly format. """ + return { + 'token': self.token, + 'sample_token': self.sample_token, + 'translation': self.translation, + 'size': self.size, + 'rotation': self.rotation, + 'velocity': self.velocity, + 'ego_translation': self.ego_translation, + 'num_pts': self.num_pts, + 'detection_name': self.detection_name, + 'detection_score': self.detection_score, + 'attribute_name': self.attribute_name, + 'visibility': self.visibility, + 'index': self.index + + } + + @classmethod + def deserialize(cls, content: dict): + """ Initialize from serialized content. """ + return cls( + token=content['token'], + sample_token=content['sample_token'], + translation=tuple(content['translation']), + size=tuple(content['size']), + rotation=tuple(content['rotation']), + velocity=tuple(content['velocity']), + ego_translation=(0.0, 0.0, 0.0) if 'ego_translation' not in content + else tuple(content['ego_translation']), + num_pts=-1 if 'num_pts' not in content else int(content['num_pts']), + detection_name=content['detection_name'], + detection_score=-1.0 if 'detection_score' not in content else float(content['detection_score']), + attribute_name=content['attribute_name'], + visibility=content['visibility'], + index=content['index'], + ) + + +def center_in_image(box, intrinsic: np.ndarray, imsize: Tuple[int, int], vis_level: int = BoxVisibility.ANY) -> bool: + """ + Check if a box is visible inside an image without accounting for occlusions. + :param box: The box to be checked. + :param intrinsic: . Intrinsic camera matrix. + :param imsize: (width, height). + :param vis_level: One of the enumerations of . + :return True if visibility condition is satisfied. + """ + + center_3d = box.center.reshape(3, 1) + center_img = view_points(center_3d, intrinsic, normalize=True)[:2, :] + + visible = np.logical_and(center_img[0, :] > 0, center_img[0, :] < imsize[0]) + visible = np.logical_and(visible, center_img[1, :] < imsize[1]) + visible = np.logical_and(visible, center_img[1, :] > 0) + visible = np.logical_and(visible, center_3d[2, :] > 1) + + in_front = center_3d[2, :] > 0.1 # True if a corner is at least 0.1 meter in front of the camera. + + if vis_level == BoxVisibility.ALL: + return all(visible) and all(in_front) + elif vis_level == BoxVisibility.ANY: + return any(visible) and all(in_front) + elif vis_level == BoxVisibility.NONE: + return True + else: + raise ValueError("vis_level: {} not valid".format(vis_level)) + + +def exist_corners_in_image_but_not_all(box, intrinsic: np.ndarray, imsize: Tuple[int, int], + vis_level: int = BoxVisibility.ANY) -> bool: + """ + Check if a box is visible in images but not all corners in image . + :param box: The box to be checked. + :param intrinsic: . Intrinsic camera matrix. + :param imsize: (width, height). + :param vis_level: One of the enumerations of . + :return True if visibility condition is satisfied. + """ + + corners_3d = box.corners() + corners_img = view_points(corners_3d, intrinsic, normalize=True)[:2, :] + + visible = np.logical_and(corners_img[0, :] > 0, corners_img[0, :] < imsize[0]) + visible = np.logical_and(visible, corners_img[1, :] < imsize[1]) + visible = np.logical_and(visible, corners_img[1, :] > 0) + visible = np.logical_and(visible, corners_3d[2, :] > 1) + + in_front = corners_3d[2, :] > 0.1 # True if a corner is at least 0.1 meter in front of the camera. + + if any(visible) and not all(visible) and all(in_front): + return True + else: + return False + + +def load_gt(nusc: NuScenes, eval_split: str, box_cls, verbose: bool = False): + """ + Loads ground truth boxes from DB. + :param nusc: A NuScenes instance. + :param eval_split: The evaluation split for which we load GT boxes. + :param box_cls: Type of box to load, e.g. DetectionBox or TrackingBox. + :param verbose: Whether to print messages to stdout. + :return: The GT boxes. + """ + + # Init. + if box_cls == DetectionBox_modified: + attribute_map = {a['token']: a['name'] for a in nusc.attribute} + + if verbose: + print('Loading annotations for {} split from nuScenes version: {}'.format(eval_split, nusc.version)) + # Read out all sample_tokens in DB. + sample_tokens_all = [s['token'] for s in nusc.sample] + assert len(sample_tokens_all) > 0, "Error: Database has no samples!" + + # Only keep samples from this split. + splits = create_splits_scenes() + + # Check compatibility of split with nusc_version. + version = nusc.version + if eval_split in {'train', 'val', 'train_detect', 'train_track'}: + assert version.endswith('trainval'), \ + 'Error: Requested split {} which is not compatible with NuScenes version {}'.format(eval_split, version) + elif eval_split in {'mini_train', 'mini_val'}: + assert version.endswith('mini'), \ + 'Error: Requested split {} which is not compatible with NuScenes version {}'.format(eval_split, version) + elif eval_split == 'test': + assert version.endswith('test'), \ + 'Error: Requested split {} which is not compatible with NuScenes version {}'.format(eval_split, version) + else: + raise ValueError('Error: Requested split {} which this function cannot map to the correct NuScenes version.' + .format(eval_split)) + + if eval_split == 'test': + # Check that you aren't trying to cheat :). + assert len(nusc.sample_annotation) > 0, \ + 'Error: You are trying to evaluate on the test set but you do not have the annotations!' + index_map = {} + for scene in nusc.scene: + first_sample_token = scene['first_sample_token'] + sample = nusc.get('sample', first_sample_token) + index_map[first_sample_token] = 1 + index = 2 + while sample['next'] != '': + sample = nusc.get('sample', sample['next']) + index_map[sample['token']] = index + index += 1 + + sample_tokens = [] + for sample_token in sample_tokens_all: + scene_token = nusc.get('sample', sample_token)['scene_token'] + scene_record = nusc.get('scene', scene_token) + if scene_record['name'] in splits[eval_split]: + sample_tokens.append(sample_token) + + all_annotations = EvalBoxes() + + # Load annotations and filter predictions and annotations. + tracking_id_set = set() + for sample_token in tqdm.tqdm(sample_tokens, leave=verbose): + + sample = nusc.get('sample', sample_token) + sample_annotation_tokens = sample['anns'] + + sample_boxes = [] + for sample_annotation_token in sample_annotation_tokens: + + sample_annotation = nusc.get('sample_annotation', sample_annotation_token) + if box_cls == DetectionBox_modified: + # Get label name in detection task and filter unused labels. + detection_name = category_to_detection_name(sample_annotation['category_name']) + if detection_name is None: + continue + + # Get attribute_name. + attr_tokens = sample_annotation['attribute_tokens'] + attr_count = len(attr_tokens) + if attr_count == 0: + attribute_name = '' + elif attr_count == 1: + attribute_name = attribute_map[attr_tokens[0]] + else: + raise Exception('Error: GT annotations must not have more than one attribute!') + + sample_boxes.append( + box_cls( + token=sample_annotation_token, + sample_token=sample_token, + translation=sample_annotation['translation'], + size=sample_annotation['size'], + rotation=sample_annotation['rotation'], + velocity=nusc.box_velocity(sample_annotation['token'])[:2], + num_pts=sample_annotation['num_lidar_pts'] + sample_annotation['num_radar_pts'], + detection_name=detection_name, + detection_score=-1.0, # GT samples do not have a score. + attribute_name=attribute_name, + visibility=sample_annotation['visibility_token'], + index=index_map[sample_token] + ) + ) + elif box_cls == TrackingBox: + assert False + else: + raise NotImplementedError('Error: Invalid box_cls %s!' % box_cls) + + all_annotations.add_boxes(sample_token, sample_boxes) + + if verbose: + print("Loaded ground truth annotations for {} samples.".format(len(all_annotations.sample_tokens))) + + return all_annotations + + +def filter_eval_boxes_by_id(nusc: NuScenes, + eval_boxes: EvalBoxes, + id=None, + verbose: bool = False) -> EvalBoxes: + """ + Applies filtering to boxes. Distance, bike-racks and points per box. + :param nusc: An instance of the NuScenes class. + :param eval_boxes: An instance of the EvalBoxes class. + :param is: the anns token set that used to keep bboxes. + :param verbose: Whether to print to stdout. + """ + + # Accumulators for number of filtered boxes. + total, anns_filter = 0, 0 + for ind, sample_token in enumerate(eval_boxes.sample_tokens): + + # Filter on anns + total += len(eval_boxes[sample_token]) + filtered_boxes = [] + for box in eval_boxes[sample_token]: + if box.token in id: + filtered_boxes.append(box) + anns_filter += len(filtered_boxes) + eval_boxes.boxes[sample_token] = filtered_boxes + + if verbose: + print("=> Original number of boxes: %d" % total) + print("=> After anns based filtering: %d" % anns_filter) + + return eval_boxes + + +def filter_eval_boxes_by_visibility( + ori_eval_boxes: EvalBoxes, + visibility=None, + verbose: bool = False) -> EvalBoxes: + """ + Applies filtering to boxes. Distance, bike-racks and points per box. + :param nusc: An instance of the NuScenes class. + :param eval_boxes: An instance of the EvalBoxes class. + :param is: the anns token set that used to keep bboxes. + :param verbose: Whether to print to stdout. + """ + + # Accumulators for number of filtered boxes. + eval_boxes = copy.deepcopy(ori_eval_boxes) + total, anns_filter = 0, 0 + for ind, sample_token in enumerate(eval_boxes.sample_tokens): + # Filter on anns + total += len(eval_boxes[sample_token]) + filtered_boxes = [] + for box in eval_boxes[sample_token]: + if box.visibility == visibility: + filtered_boxes.append(box) + anns_filter += len(filtered_boxes) + eval_boxes.boxes[sample_token] = filtered_boxes + + if verbose: + print("=> Original number of boxes: %d" % total) + print("=> After visibility based filtering: %d" % anns_filter) + + return eval_boxes + + +def filter_by_sample_token(ori_eval_boxes, valid_sample_tokens=[], verbose=False): + eval_boxes = copy.deepcopy(ori_eval_boxes) + for sample_token in eval_boxes.sample_tokens: + if sample_token not in valid_sample_tokens: + eval_boxes.boxes.pop(sample_token) + return eval_boxes + + +def filter_eval_boxes_by_overlap(nusc: NuScenes, + eval_boxes: EvalBoxes, + verbose: bool = False) -> EvalBoxes: + """ + Applies filtering to boxes. basedon overlap . + :param nusc: An instance of the NuScenes class. + :param eval_boxes: An instance of the EvalBoxes class. + :param verbose: Whether to print to stdout. + """ + + # Accumulators for number of filtered boxes. + cams = ['CAM_FRONT', + 'CAM_FRONT_RIGHT', + 'CAM_BACK_RIGHT', + 'CAM_BACK', + 'CAM_BACK_LEFT', + 'CAM_FRONT_LEFT'] + + total, anns_filter = 0, 0 + for ind, sample_token in enumerate(eval_boxes.sample_tokens): + + # Filter on anns + total += len(eval_boxes[sample_token]) + sample_record = nusc.get('sample', sample_token) + filtered_boxes = [] + for box in eval_boxes[sample_token]: + count = 0 + for cam in cams: + ''' + copy-paste form nuscens + ''' + sample_data_token = sample_record['data'][cam] + sd_record = nusc.get('sample_data', sample_data_token) + cs_record = nusc.get('calibrated_sensor', sd_record['calibrated_sensor_token']) + sensor_record = nusc.get('sensor', cs_record['sensor_token']) + pose_record = nusc.get('ego_pose', sd_record['ego_pose_token']) + cam_intrinsic = np.array(cs_record['camera_intrinsic']) + imsize = (sd_record['width'], sd_record['height']) + new_box = Box(box.translation, box.size, Quaternion(box.rotation), + name=box.detection_name, token='') + + # Move box to ego vehicle coord system. + new_box.translate(-np.array(pose_record['translation'])) + new_box.rotate(Quaternion(pose_record['rotation']).inverse) + + # Move box to sensor coord system. + new_box.translate(-np.array(cs_record['translation'])) + new_box.rotate(Quaternion(cs_record['rotation']).inverse) + + if center_in_image(new_box, cam_intrinsic, imsize, vis_level=BoxVisibility.ANY): + count += 1 + # if exist_corners_in_image_but_not_all(new_box, cam_intrinsic, imsize, vis_level=BoxVisibility.ANY): + # count += 1 + + if count > 1: + with open('center_overlap.txt', 'a') as f: + try: + f.write(box.token + '\n') + except: + pass + filtered_boxes.append(box) + anns_filter += len(filtered_boxes) + eval_boxes.boxes[sample_token] = filtered_boxes + + verbose = True + + if verbose: + print("=> Original number of boxes: %d" % total) + print("=> After anns based filtering: %d" % anns_filter) + + return eval_boxes + + +class NuScenesEval_custom(NuScenesEval): + """ + Dummy class for backward-compatibility. Same as DetectionEval. + """ + + def __init__(self, + nusc: NuScenes, + config: DetectionConfig, + result_path: str, + eval_set: str, + output_dir: str = None, + verbose: bool = True, + overlap_test=False, + eval_mask=False, + data_infos=None + ): + """ + Initialize a DetectionEval object. + :param nusc: A NuScenes object. + :param config: A DetectionConfig object. + :param result_path: Path of the nuScenes JSON result file. + :param eval_set: The dataset split to evaluate on, e.g. train, val or test. + :param output_dir: Folder to save plots and results to. + :param verbose: Whether to print to stdout. + """ + + self.nusc = nusc + self.result_path = result_path + self.eval_set = eval_set + self.output_dir = output_dir + self.verbose = verbose + self.cfg = config + self.overlap_test = overlap_test + self.eval_mask = eval_mask + self.data_infos = data_infos + # Check result file exists. + assert os.path.exists(result_path), 'Error: The result file does not exist!' + + # Make dirs. + self.plot_dir = os.path.join(self.output_dir, 'plots') + if not os.path.isdir(self.output_dir): + os.makedirs(self.output_dir) + if not os.path.isdir(self.plot_dir): + os.makedirs(self.plot_dir) + + # Load data. + if verbose: + print('Initializing nuScenes detection evaluation') + self.pred_boxes, self.meta = load_prediction(self.result_path, self.cfg.max_boxes_per_sample, DetectionBox, + verbose=verbose) + self.gt_boxes = load_gt(self.nusc, self.eval_set, DetectionBox_modified, verbose=verbose) + + # assert set(self.pred_boxes.sample_tokens) == set(self.gt_boxes.sample_tokens), \ + # "Samples in split doesn't match samples in predictions." + + # Add center distances. + self.pred_boxes = add_center_dist(nusc, self.pred_boxes) + self.gt_boxes = add_center_dist(nusc, self.gt_boxes) + + # Filter boxes (distance, points per box, etc.). + + if verbose: + print('Filtering predictions') + self.pred_boxes = filter_eval_boxes(nusc, self.pred_boxes, self.cfg.class_range, verbose=verbose) + if verbose: + print('Filtering ground truth annotations') + self.gt_boxes = filter_eval_boxes(nusc, self.gt_boxes, self.cfg.class_range, verbose=verbose) + + if self.overlap_test: + self.pred_boxes = filter_eval_boxes_by_overlap(self.nusc, self.pred_boxes) + + self.gt_boxes = filter_eval_boxes_by_overlap(self.nusc, self.gt_boxes, verbose=True) + + self.all_gt = copy.deepcopy(self.gt_boxes) + self.all_preds = copy.deepcopy(self.pred_boxes) + self.sample_tokens = self.gt_boxes.sample_tokens + + self.index_map = {} + for scene in nusc.scene: + first_sample_token = scene['first_sample_token'] + sample = nusc.get('sample', first_sample_token) + self.index_map[first_sample_token] = 1 + index = 2 + while sample['next'] != '': + sample = nusc.get('sample', sample['next']) + self.index_map[sample['token']] = index + index += 1 + + def update_gt(self, type_='vis', visibility='1', index=1): + if type_ == 'vis': + self.visibility_test = True + if self.visibility_test: + '''[{'description': 'visibility of whole object is between 0 and 40%', + 'token': '1', + 'level': 'v0-40'}, + {'description': 'visibility of whole object is between 40 and 60%', + 'token': '2', + 'level': 'v40-60'}, + {'description': 'visibility of whole object is between 60 and 80%', + 'token': '3', + 'level': 'v60-80'}, + {'description': 'visibility of whole object is between 80 and 100%', + 'token': '4', + 'level': 'v80-100'}]''' + + self.gt_boxes = filter_eval_boxes_by_visibility(self.all_gt, visibility, verbose=True) + + elif type_ == 'ord': + + valid_tokens = [key for (key, value) in self.index_map.items() if value == index] + # from IPython import embed + # embed() + self.gt_boxes = filter_by_sample_token(self.all_gt, valid_tokens) + self.pred_boxes = filter_by_sample_token(self.all_preds, valid_tokens) + self.sample_tokens = self.gt_boxes.sample_tokens + + + def evaluate(self) -> Tuple[DetectionMetrics, DetectionMetricDataList]: + """ + Performs the actual evaluation. + :return: A tuple of high-level and the raw metric data. + """ + start_time = time.time() + + # ----------------------------------- + # Step 1: Accumulate metric data for all classes and distance thresholds. + # ----------------------------------- + if self.verbose: + print('Accumulating metric data...') + metric_data_list = DetectionMetricDataList() + + # print(self.cfg.dist_fcn_callable, self.cfg.dist_ths) + # self.cfg.dist_ths = [0.3] + # self.cfg.dist_fcn_callable + for class_name in self.cfg.class_names: + for dist_th in self.cfg.dist_ths: + md = accumulate(self.gt_boxes, self.pred_boxes, class_name, self.cfg.dist_fcn_callable, dist_th) + metric_data_list.set(class_name, dist_th, md) + + # ----------------------------------- + # Step 2: Calculate metrics from the data. + # ----------------------------------- + if self.verbose: + print('Calculating metrics...') + metrics = DetectionMetrics(self.cfg) + for class_name in self.cfg.class_names: + # Compute APs. + for dist_th in self.cfg.dist_ths: + metric_data = metric_data_list[(class_name, dist_th)] + ap = calc_ap(metric_data, self.cfg.min_recall, self.cfg.min_precision) + metrics.add_label_ap(class_name, dist_th, ap) + # Compute TP metrics. + for metric_name in TP_METRICS: + metric_data = metric_data_list[(class_name, self.cfg.dist_th_tp)] + if class_name in ['traffic_cone'] and metric_name in ['attr_err', 'vel_err', 'orient_err']: + tp = np.nan + elif class_name in ['barrier'] and metric_name in ['attr_err', 'vel_err']: + tp = np.nan + else: + tp = calc_tp(metric_data, self.cfg.min_recall, metric_name) + metrics.add_label_tp(class_name, metric_name, tp) + + # Compute evaluation time. + metrics.add_runtime(time.time() - start_time) + + return metrics, metric_data_list + + def render(self, metrics: DetectionMetrics, md_list: DetectionMetricDataList) -> None: + """ + Renders various PR and TP curves. + :param metrics: DetectionMetrics instance. + :param md_list: DetectionMetricDataList instance. + """ + if self.verbose: + print('Rendering PR and TP curves') + + def savepath(name): + return os.path.join(self.plot_dir, name + '.pdf') + + summary_plot(md_list, metrics, min_precision=self.cfg.min_precision, min_recall=self.cfg.min_recall, + dist_th_tp=self.cfg.dist_th_tp, savepath=savepath('summary')) + + for detection_name in self.cfg.class_names: + class_pr_curve(md_list, metrics, detection_name, self.cfg.min_precision, self.cfg.min_recall, + savepath=savepath(detection_name + '_pr')) + + class_tp_curve(md_list, metrics, detection_name, self.cfg.min_recall, self.cfg.dist_th_tp, + savepath=savepath(detection_name + '_tp')) + + for dist_th in self.cfg.dist_ths: + dist_pr_curve(md_list, metrics, dist_th, self.cfg.min_precision, self.cfg.min_recall, + savepath=savepath('dist_pr_' + str(dist_th))) + + +if __name__ == "__main__": + + # Settings. + parser = argparse.ArgumentParser(description='Evaluate nuScenes detection results.', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('result_path', type=str, help='The submission as a JSON file.') + parser.add_argument('--output_dir', type=str, default='~/nuscenes-metrics', + help='Folder to store result metrics, graphs and example visualizations.') + parser.add_argument('--eval_set', type=str, default='val', + help='Which dataset split to evaluate on, train, val or test.') + parser.add_argument('--dataroot', type=str, default='data/nuscenes', + help='Default nuScenes data directory.') + parser.add_argument('--version', type=str, default='v1.0-trainval', + help='Which version of the nuScenes dataset to evaluate on, e.g. v1.0-trainval.') + parser.add_argument('--config_path', type=str, default='', + help='Path to the configuration file.' + 'If no path given, the CVPR 2019 configuration will be used.') + parser.add_argument('--plot_examples', type=int, default=0, + help='How many example visualizations to write to disk.') + parser.add_argument('--render_curves', type=int, default=1, + help='Whether to render PR and TP curves to disk.') + parser.add_argument('--verbose', type=int, default=1, + help='Whether to print to stdout.') + args = parser.parse_args() + + result_path_ = os.path.expanduser(args.result_path) + output_dir_ = os.path.expanduser(args.output_dir) + eval_set_ = args.eval_set + dataroot_ = args.dataroot + version_ = args.version + config_path = args.config_path + plot_examples_ = args.plot_examples + render_curves_ = bool(args.render_curves) + verbose_ = bool(args.verbose) + + if config_path == '': + cfg_ = config_factory('detection_cvpr_2019') + else: + with open(config_path, 'r') as _f: + cfg_ = DetectionConfig.deserialize(json.load(_f)) + + nusc_ = NuScenes(version=version_, verbose=verbose_, dataroot=dataroot_) + nusc_eval = NuScenesEval_custom(nusc_, config=cfg_, result_path=result_path_, eval_set=eval_set_, + output_dir=output_dir_, verbose=verbose_) + for vis in ['1', '2', '3', '4']: + nusc_eval.update_gt(type_='vis', visibility=vis) + print(f'================ {vis} ===============') + nusc_eval.main(plot_examples=plot_examples_, render_curves=render_curves_) diff --git a/mmcv/datasets/eval_utils/nuscenes_eval_motion.py b/mmcv/datasets/eval_utils/nuscenes_eval_motion.py new file mode 100644 index 0000000..8ff66f0 --- /dev/null +++ b/mmcv/datasets/eval_utils/nuscenes_eval_motion.py @@ -0,0 +1,933 @@ +import argparse +import copy +import json +import os +import time +from typing import Tuple, Dict, Any +import numpy as np + +from nuscenes import NuScenes +from nuscenes.eval.common.config import config_factory +from nuscenes.eval.common.data_classes import EvalBoxes +from nuscenes.eval.detection.data_classes import DetectionConfig +from nuscenes.eval.detection.evaluate import NuScenesEval +from pyquaternion import Quaternion + +from nuscenes import NuScenes +from nuscenes.eval.common.data_classes import EvalBoxes +from nuscenes.utils.data_classes import Box +from nuscenes.eval.common.loaders import add_center_dist, filter_eval_boxes +import tqdm +from nuscenes.utils.geometry_utils import view_points, BoxVisibility +import pycocotools.mask as mask_util +import argparse +import json +import os +import random +import time +from typing import Tuple, Dict, Any + +import numpy as np + +from nuscenes import NuScenes +from nuscenes.eval.common.config import config_factory +from nuscenes.eval.common.data_classes import EvalBoxes +from nuscenes.eval.common.loaders import add_center_dist, filter_eval_boxes +from nuscenes.eval.detection.algo import calc_ap, calc_tp +from nuscenes.eval.detection.constants import TP_METRICS +from nuscenes.eval.detection.data_classes import DetectionConfig, DetectionMetrics, DetectionBox, \ + DetectionMetricDataList +from nuscenes.eval.detection.render import summary_plot, class_pr_curve, dist_pr_curve, visualize_sample +from nuscenes.eval.common.utils import quaternion_yaw, Quaternion +from mmcv.core.bbox.iou_calculators import BboxOverlaps3D +from IPython import embed +import json +from typing import Any + +import numpy as np +from matplotlib import pyplot as plt + +from nuscenes import NuScenes +from nuscenes.eval.common.data_classes import EvalBoxes +from nuscenes.eval.common.render import setup_axis +from nuscenes.eval.common.utils import boxes_to_sensor +from nuscenes.eval.detection.constants import TP_METRICS, DETECTION_NAMES, DETECTION_COLORS, TP_METRICS_UNITS, \ + PRETTY_DETECTION_NAMES, PRETTY_TP_METRICS +from nuscenes.eval.detection.data_classes import DetectionMetrics, DetectionMetricData, DetectionMetricDataList +from nuscenes.utils.data_classes import LidarPointCloud +from nuscenes.utils.geometry_utils import view_points +from .eval_utils import load_prediction, load_gt, accumulate, accumulate_motion, \ + DetectionMotionBox, DetectionMotionBox_modified, DetectionMotionMetricData, \ + DetectionMotionMetrics, DetectionMotionMetricDataList +from .metric_utils import traj_fde +from prettytable import PrettyTable + +TP_METRICS = [ + 'trans_err', + 'scale_err', + 'orient_err', + 'vel_err', + 'attr_err', + 'min_ade_err', + 'min_fde_err', + 'miss_rate_err'] +TP_TRAJ_METRICS = ['min_ade_err', 'min_fde_err', 'miss_rate_err'] +Axis = Any + + +def class_tp_curve(md_list: DetectionMetricDataList, + metrics: DetectionMetrics, + detection_name: str, + min_recall: float, + dist_th_tp: float, + savepath: str = None, + ax: Axis = None) -> None: + """ + Plot the true positive curve for the specified class. + :param md_list: DetectionMetricDataList instance. + :param metrics: DetectionMetrics instance. + :param detection_name: + :param min_recall: Minimum recall value. + :param dist_th_tp: The distance threshold used to determine matches. + :param savepath: If given, saves the the rendering here instead of displaying. + :param ax: Axes onto which to render. + """ + # Get metric data for given detection class with tp distance threshold. + + md = md_list[(detection_name, dist_th_tp)] + min_recall_ind = round(100 * min_recall) + if min_recall_ind <= md.max_recall_ind: + # For traffic_cone and barrier only a subset of the metrics are + # plotted. + rel_metrics = [ + m for m in TP_METRICS if not np.isnan( + metrics.get_label_tp( + detection_name, m))] + ylimit = max([max(getattr(md, metric)[min_recall_ind:md.max_recall_ind + 1]) + for metric in rel_metrics]) * 1.1 + else: + ylimit = 1.0 + + # Prepare axis. + if ax is None: + ax = setup_axis( + title=PRETTY_DETECTION_NAMES[detection_name], + xlabel='Recall', + ylabel='Error', + xlim=1, + min_recall=min_recall) + ax.set_ylim(0, ylimit) + + # Plot the recall vs. error curve for each tp metric. + for metric in TP_METRICS: + tp = metrics.get_label_tp(detection_name, metric) + + # Plot only if we have valid data. + if tp is not np.nan and min_recall_ind <= md.max_recall_ind: + recall, error = md.recall[:md.max_recall_ind + + 1], getattr(md, metric)[:md.max_recall_ind + 1] + else: + recall, error = [], [] + + # Change legend based on tp value + if tp is np.nan: + label = '{}: n/a'.format(PRETTY_TP_METRICS[metric]) + elif min_recall_ind > md.max_recall_ind: + label = '{}: nan'.format(PRETTY_TP_METRICS[metric]) + else: + label = '{}: {:.2f} ({})'.format( + PRETTY_TP_METRICS[metric], tp, TP_METRICS_UNITS[metric]) + if metric == 'trans_err': + label += f' ({md.max_recall_ind})' # add recall + print(f'Recall: {detection_name}: {md.max_recall_ind/100}') + ax.plot(recall, error, label=label) + ax.axvline(x=md.max_recall, linestyle='-.', color=(0, 0, 0, 0.3)) + ax.legend(loc='best') + + if savepath is not None: + plt.savefig(savepath) + plt.close() + + +def center_in_image(box, + intrinsic: np.ndarray, + imsize: Tuple[int, + int], + vis_level: int = BoxVisibility.ANY) -> bool: + """ + Check if a box is visible inside an image without accounting for occlusions. + :param box: The box to be checked. + :param intrinsic: . Intrinsic camera matrix. + :param imsize: (width, height). + :param vis_level: One of the enumerations of . + :return True if visibility condition is satisfied. + """ + + center_3d = box.center.reshape(3, 1) + center_img = view_points(center_3d, intrinsic, normalize=True)[:2, :] + + visible = np.logical_and( + center_img[0, :] > 0, center_img[0, :] < imsize[0]) + visible = np.logical_and(visible, center_img[1, :] < imsize[1]) + visible = np.logical_and(visible, center_img[1, :] > 0) + visible = np.logical_and(visible, center_3d[2, :] > 1) + + # True if a corner is at least 0.1 meter in front of the camera. + in_front = center_3d[2, :] > 0.1 + + if vis_level == BoxVisibility.ALL: + return all(visible) and all(in_front) + elif vis_level == BoxVisibility.ANY: + return any(visible) and all(in_front) + elif vis_level == BoxVisibility.NONE: + return True + else: + raise ValueError("vis_level: {} not valid".format(vis_level)) + + +def exist_corners_in_image_but_not_all(box, + intrinsic: np.ndarray, + imsize: Tuple[int, + int], + vis_level: int = BoxVisibility.ANY) -> bool: + """ + Check if a box is visible in images but not all corners in image . + :param box: The box to be checked. + :param intrinsic: . Intrinsic camera matrix. + :param imsize: (width, height). + :param vis_level: One of the enumerations of . + :return True if visibility condition is satisfied. + """ + + corners_3d = box.corners() + corners_img = view_points(corners_3d, intrinsic, normalize=True)[:2, :] + + visible = np.logical_and( + corners_img[0, :] > 0, corners_img[0, :] < imsize[0]) + visible = np.logical_and(visible, corners_img[1, :] < imsize[1]) + visible = np.logical_and(visible, corners_img[1, :] > 0) + visible = np.logical_and(visible, corners_3d[2, :] > 1) + + # True if a corner is at least 0.1 meter in front of the camera. + in_front = corners_3d[2, :] > 0.1 + + if any(visible) and not all(visible) and all(in_front): + return True + else: + return False + + +def filter_eval_boxes_by_id(nusc: NuScenes, + eval_boxes: EvalBoxes, + id=None, + verbose: bool = False) -> EvalBoxes: + """ + Applies filtering to boxes. Distance, bike-racks and points per box. + :param nusc: An instance of the NuScenes class. + :param eval_boxes: An instance of the EvalBoxes class. + :param is: the anns token set that used to keep bboxes. + :param verbose: Whether to print to stdout. + """ + + # Accumulators for number of filtered boxes. + total, anns_filter = 0, 0 + for ind, sample_token in enumerate(eval_boxes.sample_tokens): + + # Filter on anns + total += len(eval_boxes[sample_token]) + filtered_boxes = [] + for box in eval_boxes[sample_token]: + if box.token in id: + filtered_boxes.append(box) + anns_filter += len(filtered_boxes) + eval_boxes.boxes[sample_token] = filtered_boxes + + if verbose: + print("=> Original number of boxes: %d" % total) + print("=> After anns based filtering: %d" % anns_filter) + + return eval_boxes + + +def filter_eval_boxes_by_visibility( + ori_eval_boxes: EvalBoxes, + visibility=None, + verbose: bool = False) -> EvalBoxes: + """ + Applies filtering to boxes. Distance, bike-racks and points per box. + :param nusc: An instance of the NuScenes class. + :param eval_boxes: An instance of the EvalBoxes class. + :param is: the anns token set that used to keep bboxes. + :param verbose: Whether to print to stdout. + """ + + # Accumulators for number of filtered boxes. + eval_boxes = copy.deepcopy(ori_eval_boxes) + total, anns_filter = 0, 0 + for ind, sample_token in enumerate(eval_boxes.sample_tokens): + # Filter on anns + total += len(eval_boxes[sample_token]) + filtered_boxes = [] + for box in eval_boxes[sample_token]: + if box.visibility == visibility: + filtered_boxes.append(box) + anns_filter += len(filtered_boxes) + eval_boxes.boxes[sample_token] = filtered_boxes + + if verbose: + print("=> Original number of boxes: %d" % total) + print("=> After visibility based filtering: %d" % anns_filter) + + return eval_boxes + + +def filter_by_sample_token( + ori_eval_boxes, + valid_sample_tokens=[], + verbose=False): + eval_boxes = copy.deepcopy(ori_eval_boxes) + for sample_token in eval_boxes.sample_tokens: + if sample_token not in valid_sample_tokens: + eval_boxes.boxes.pop(sample_token) + return eval_boxes + + +def filter_eval_boxes_by_overlap(nusc: NuScenes, + eval_boxes: EvalBoxes, + verbose: bool = False) -> EvalBoxes: + """ + Applies filtering to boxes. basedon overlap . + :param nusc: An instance of the NuScenes class. + :param eval_boxes: An instance of the EvalBoxes class. + :param verbose: Whether to print to stdout. + """ + + # Accumulators for number of filtered boxes. + cams = ['CAM_FRONT', + 'CAM_FRONT_RIGHT', + 'CAM_BACK_RIGHT', + 'CAM_BACK', + 'CAM_BACK_LEFT', + 'CAM_FRONT_LEFT'] + + total, anns_filter = 0, 0 + for ind, sample_token in enumerate(eval_boxes.sample_tokens): + + # Filter on anns + total += len(eval_boxes[sample_token]) + sample_record = nusc.get('sample', sample_token) + filtered_boxes = [] + for box in eval_boxes[sample_token]: + count = 0 + for cam in cams: + ''' + copy-paste form nuscens + ''' + sample_data_token = sample_record['data'][cam] + sd_record = nusc.get('sample_data', sample_data_token) + cs_record = nusc.get( + 'calibrated_sensor', + sd_record['calibrated_sensor_token']) + sensor_record = nusc.get('sensor', cs_record['sensor_token']) + pose_record = nusc.get('ego_pose', sd_record['ego_pose_token']) + cam_intrinsic = np.array(cs_record['camera_intrinsic']) + imsize = (sd_record['width'], sd_record['height']) + new_box = Box( + box.translation, + box.size, + Quaternion( + box.rotation), + name=box.detection_name, + token='') + + # Move box to ego vehicle coord system. + new_box.translate(-np.array(pose_record['translation'])) + new_box.rotate(Quaternion(pose_record['rotation']).inverse) + + # Move box to sensor coord system. + new_box.translate(-np.array(cs_record['translation'])) + new_box.rotate(Quaternion(cs_record['rotation']).inverse) + + if center_in_image( + new_box, + cam_intrinsic, + imsize, + vis_level=BoxVisibility.ANY): + count += 1 + # if exist_corners_in_image_but_not_all(new_box, cam_intrinsic, imsize, vis_level=BoxVisibility.ANY): + # count += 1 + + if count > 1: + with open('center_overlap.txt', 'a') as f: + try: + f.write(box.token + '\n') + except BaseException: + pass + filtered_boxes.append(box) + anns_filter += len(filtered_boxes) + eval_boxes.boxes[sample_token] = filtered_boxes + + verbose = True + + if verbose: + print("=> Original number of boxes: %d" % total) + print("=> After anns based filtering: %d" % anns_filter) + + return eval_boxes + + +class MotionEval(NuScenesEval): + """ + Dummy class for backward-compatibility. Same as DetectionEval. + """ + + def __init__(self, + nusc: NuScenes, + config: DetectionConfig, + result_path: str, + eval_set: str, + output_dir: str = None, + verbose: bool = True, + overlap_test=False, + eval_mask=False, + data_infos=None, + category_convert_type='motion_category', + ): + """ + Initialize a DetectionEval object. + :param nusc: A NuScenes object. + :param config: A DetectionConfig object. + :param result_path: Path of the nuScenes JSON result file. + :param eval_set: The dataset split to evaluate on, e.g. train, val or test. + :param output_dir: Folder to save plots and results to. + :param verbose: Whether to print to stdout. + """ + + self.nusc = nusc + self.result_path = result_path + self.eval_set = eval_set + self.output_dir = output_dir + self.verbose = verbose + self.cfg = config + self.overlap_test = overlap_test + self.eval_mask = eval_mask + self.data_infos = data_infos + # Check result file exists. + assert os.path.exists( + result_path), 'Error: The result file does not exist!' + + # Make dirs. + self.plot_dir = os.path.join(self.output_dir, 'plots') + if not os.path.isdir(self.output_dir): + os.makedirs(self.output_dir) + if not os.path.isdir(self.plot_dir): + os.makedirs(self.plot_dir) + + # Load data. + if verbose: + print('Initializing nuScenes detection evaluation') + self.pred_boxes, self.meta = load_prediction(self.result_path, self.cfg.max_boxes_per_sample, DetectionMotionBox, + verbose=verbose, category_convert_type=category_convert_type) + self.gt_boxes = load_gt( + self.nusc, + self.eval_set, + DetectionMotionBox_modified, + verbose=verbose, + category_convert_type=category_convert_type) + + assert set(self.pred_boxes.sample_tokens) == set(self.gt_boxes.sample_tokens), \ + "Samples in split doesn't match samples in predictions." + + # Add center distances. + self.pred_boxes = add_center_dist(nusc, self.pred_boxes) + self.gt_boxes = add_center_dist(nusc, self.gt_boxes) + + # Filter boxes (distance, points per box, etc.). + + if verbose: + print('Filtering predictions') + self.pred_boxes = filter_eval_boxes( + nusc, self.pred_boxes, self.cfg.class_range, verbose=verbose) + if verbose: + print('Filtering ground truth annotations') + self.gt_boxes = filter_eval_boxes( + nusc, self.gt_boxes, self.cfg.class_range, verbose=verbose) + + if self.overlap_test: + self.pred_boxes = filter_eval_boxes_by_overlap( + self.nusc, self.pred_boxes) + + self.gt_boxes = filter_eval_boxes_by_overlap( + self.nusc, self.gt_boxes, verbose=True) + + self.all_gt = copy.deepcopy(self.gt_boxes) + self.all_preds = copy.deepcopy(self.pred_boxes) + self.sample_tokens = self.gt_boxes.sample_tokens + + self.index_map = {} + for scene in nusc.scene: + first_sample_token = scene['first_sample_token'] + sample = nusc.get('sample', first_sample_token) + self.index_map[first_sample_token] = 1 + index = 2 + while sample['next'] != '': + sample = nusc.get('sample', sample['next']) + self.index_map[sample['token']] = index + index += 1 + + def update_gt(self, type_='vis', visibility='1', index=1): + if type_ == 'vis': + self.visibility_test = True + if self.visibility_test: + '''[{'description': 'visibility of whole object is between 0 and 40%', + 'token': '1', + 'level': 'v0-40'}, + {'description': 'visibility of whole object is between 40 and 60%', + 'token': '2', + 'level': 'v40-60'}, + {'description': 'visibility of whole object is between 60 and 80%', + 'token': '3', + 'level': 'v60-80'}, + {'description': 'visibility of whole object is between 80 and 100%', + 'token': '4', + 'level': 'v80-100'}]''' + + self.gt_boxes = filter_eval_boxes_by_visibility( + self.all_gt, visibility, verbose=True) + + elif type_ == 'ord': + + valid_tokens = [ + key for ( + key, + value) in self.index_map.items() if value == index] + # from IPython import embed + # embed() + self.gt_boxes = filter_by_sample_token(self.all_gt, valid_tokens) + self.pred_boxes = filter_by_sample_token( + self.all_preds, valid_tokens) + self.sample_tokens = self.gt_boxes.sample_tokens + + def evaluate(self) -> Tuple[DetectionMotionMetrics, + DetectionMotionMetricDataList]: + """ + Performs the actual evaluation. + :return: A tuple of high-level and the raw metric data. + """ + start_time = time.time() + + # ----------------------------------- + # Step 1: Accumulate metric data for all classes and distance thresholds. + # ----------------------------------- + if self.verbose: + print('Accumulating metric data...') + metric_data_list = DetectionMotionMetricDataList() + + # print(self.cfg.dist_fcn_callable, self.cfg.dist_ths) + # self.cfg.dist_ths = [0.3] + # self.cfg.dist_fcn_callable + for class_name in self.cfg.class_names: + for dist_th in self.cfg.dist_ths: + md, _, _, _ = accumulate( + self.gt_boxes, self.pred_boxes, class_name, self.cfg.dist_fcn_callable, dist_th) + metric_data_list.set(class_name, dist_th, md) + + # ----------------------------------- + # Step 2: Calculate metrics from the data. + # ----------------------------------- + if self.verbose: + print('Calculating metrics...') + metrics = DetectionMotionMetrics(self.cfg) + + traj_metrics = {} + for class_name in self.cfg.class_names: + # Compute APs. + for dist_th in self.cfg.dist_ths: + metric_data = metric_data_list[(class_name, dist_th)] + ap = calc_ap( + metric_data, + self.cfg.min_recall, + self.cfg.min_precision) + metrics.add_label_ap(class_name, dist_th, ap) + # Compute TP metrics. + for metric_name in TP_METRICS: + metric_data = metric_data_list[( + class_name, self.cfg.dist_th_tp)] + if class_name in ['traffic_cone'] and metric_name in [ + 'attr_err', 'vel_err', 'orient_err']: + tp = np.nan + elif class_name in ['barrier'] and metric_name in ['attr_err', 'vel_err']: + tp = np.nan + else: + tp = calc_tp(metric_data, self.cfg.min_recall, metric_name) + if metric_name in TP_TRAJ_METRICS: + if class_name not in traj_metrics: + traj_metrics[class_name] = {} + traj_metrics[class_name][metric_name] = tp + metrics.add_label_tp(class_name, metric_name, tp) + print_traj_metrics(traj_metrics) + + # Compute evaluation time. + metrics.add_runtime(time.time() - start_time) + + return metrics, metric_data_list + + def evaluate_motion( + self) -> Tuple[DetectionMotionMetrics, DetectionMotionMetricDataList]: + """ + Performs the actual evaluation. + :return: A tuple of high-level and the raw metric data. + """ + start_time = time.time() + + self.cfg.dist_ths = [1.0] + self.cfg.dist_th_tp = 1.0 # center dist for detection + traj_dist_th = 2.0 # FDE for traj + + # ----------------------------------- + # Step 1: Accumulate metric data for all classes and distance thresholds. + # ----------------------------------- + if self.verbose: + print('Accumulating metric data...') + metric_data_list = DetectionMotionMetricDataList() + + for class_name in self.cfg.class_names: + for dist_th in self.cfg.dist_ths: + md, _, _, _ = accumulate_motion( + self.gt_boxes, self.pred_boxes, class_name, self.cfg.dist_fcn_callable, traj_fde, dist_th, traj_dist_th) + metric_data_list.set(class_name, dist_th, md) + + # ----------------------------------- + # Step 2: Calculate metrics from the data. + # ----------------------------------- + if self.verbose: + print('Calculating metrics...') + metrics = DetectionMotionMetrics(self.cfg) + + traj_metrics = {} + for class_name in self.cfg.class_names: + # Compute APs. + for dist_th in self.cfg.dist_ths: + metric_data = metric_data_list[(class_name, dist_th)] + ap = calc_ap( + metric_data, + self.cfg.min_recall, + self.cfg.min_precision) + metrics.add_label_ap(class_name, dist_th, ap) + # Compute TP metrics. + for metric_name in TP_METRICS: + metric_data = metric_data_list[( + class_name, self.cfg.dist_th_tp)] + if class_name in ['traffic_cone'] and metric_name in [ + 'attr_err', 'vel_err', 'orient_err']: + tp = np.nan + elif class_name in ['barrier'] and metric_name in ['attr_err', 'vel_err']: + tp = np.nan + else: + tp = calc_tp(metric_data, self.cfg.min_recall, metric_name) + if metric_name in TP_TRAJ_METRICS: + if class_name not in traj_metrics: + traj_metrics[class_name] = {} + traj_metrics[class_name][metric_name] = tp + metrics.add_label_tp(class_name, metric_name, tp) + print_traj_metrics(traj_metrics) + + # Compute evaluation time. + metrics.add_runtime(time.time() - start_time) + + return metrics, metric_data_list + + def evaluate_epa( + self) -> Tuple[DetectionMotionMetrics, DetectionMotionMetricDataList]: + """ + Performs the actual evaluation. + :return: A tuple of high-level and the raw metric data. + """ + start_time = time.time() + + self.cfg.dist_ths = [2.0] + self.cfg.dist_th_tp = 2.0 # center dist for detection + traj_dist_th = 2.0 # FDE for traj + + # ----------------------------------- + # Step 1: Accumulate metric data for all classes and distance thresholds. + # ----------------------------------- + if self.verbose: + print('Accumulating metric data...') + metric_data_list = DetectionMotionMetricDataList() + + for class_name in self.cfg.class_names: + for dist_th in self.cfg.dist_ths: + md, N_det_tp, N_det_fp, N_det_gt = accumulate( + self.gt_boxes, self.pred_boxes, class_name, self.cfg.dist_fcn_callable, dist_th) + md, N_det_traj_tp, N_det_traj_fp, N_det_traj_gt = accumulate_motion( + self.gt_boxes, self.pred_boxes, class_name, self.cfg.dist_fcn_callable, traj_fde, dist_th, traj_dist_th) + metric_data_list.set(class_name, dist_th, md) + EPA = (N_det_traj_tp - 0.5 * N_det_fp) / (N_det_gt + 1e-5) + print(N_det_traj_tp, N_det_fp, N_det_gt) + print('EPA ', class_name, EPA) + + # ----------------------------------- + # Step 2: Calculate metrics from the data. + # ----------------------------------- + if self.verbose: + print('Calculating metrics...') + metrics = DetectionMotionMetrics(self.cfg) + + traj_metrics = {} + for class_name in self.cfg.class_names: + # Compute APs. + for dist_th in self.cfg.dist_ths: + metric_data = metric_data_list[(class_name, dist_th)] + ap = calc_ap( + metric_data, + self.cfg.min_recall, + self.cfg.min_precision) + metrics.add_label_ap(class_name, dist_th, ap) + # Compute TP metrics. + for metric_name in TP_METRICS: + metric_data = metric_data_list[( + class_name, self.cfg.dist_th_tp)] + if class_name in ['traffic_cone'] and metric_name in [ + 'attr_err', 'vel_err', 'orient_err']: + tp = np.nan + elif class_name in ['barrier'] and metric_name in ['attr_err', 'vel_err']: + tp = np.nan + else: + tp = calc_tp(metric_data, self.cfg.min_recall, metric_name) + if metric_name in TP_TRAJ_METRICS: + if class_name not in traj_metrics: + traj_metrics[class_name] = {} + traj_metrics[class_name][metric_name] = tp + metrics.add_label_tp(class_name, metric_name, tp) + print_traj_metrics(traj_metrics) + + # Compute evaluation time. + metrics.add_runtime(time.time() - start_time) + + return metrics, metric_data_list + + def main(self, + plot_examples: int = 0, + render_curves: bool = True, + eval_mode: str = 'standard') -> Dict[str, Any]: + """ + Main function that loads the evaluation code, visualizes samples, runs the evaluation and renders stat plots. + :param plot_examples: How many example visualizations to write to disk. + :param render_curves: Whether to render PR and TP curves to disk. + :return: A dict that stores the high-level metrics and meta data. + """ + if plot_examples > 0: + # Select a random but fixed subset to plot. + random.seed(42) + sample_tokens = list(self.sample_tokens) + random.shuffle(sample_tokens) + sample_tokens = sample_tokens[:plot_examples] + + # Visualize samples. + example_dir = os.path.join(self.output_dir, 'examples') + if not os.path.isdir(example_dir): + os.mkdir(example_dir) + for sample_token in sample_tokens: + visualize_sample(self.nusc, + sample_token, + self.gt_boxes if self.eval_set != 'test' else EvalBoxes(), + # Don't render test GT. + self.pred_boxes, + eval_range=max(self.cfg.class_range.values()), + savepath=os.path.join(example_dir, '{}.png'.format(sample_token))) + + # Run evaluation. + if eval_mode == 'motion_map': + metrics, metric_data_list = self.evaluate_motion() + elif eval_mode == 'standard': + metrics, metric_data_list = self.evaluate() + elif eval_mode == 'epa': + metrics, metric_data_list = self.evaluate_epa() + else: + raise NotImplementedError + # Render PR and TP curves. + if render_curves: + self.render(metrics, metric_data_list) + + # Dump the metric data, meta and metrics to disk. + if self.verbose: + print('Saving metrics to: %s' % self.output_dir) + metrics_summary = metrics.serialize() + metrics_summary['meta'] = self.meta.copy() + with open(os.path.join(self.output_dir, 'metrics_summary.json'), 'w') as f: + json.dump(metrics_summary, f, indent=2) + with open(os.path.join(self.output_dir, 'metrics_details.json'), 'w') as f: + json.dump(metric_data_list.serialize(), f, indent=2) + + # Print high-level metrics. + print('mAP: %.4f' % (metrics_summary['mean_ap'])) + err_name_mapping = { + 'trans_err': 'mATE', + 'scale_err': 'mASE', + 'orient_err': 'mAOE', + 'vel_err': 'mAVE', + 'attr_err': 'mAAE' + } + for tp_name, tp_val in metrics_summary['tp_errors'].items(): + print('%s: %.4f' % (err_name_mapping[tp_name], tp_val)) + print('NDS: %.4f' % (metrics_summary['nd_score'])) + print('Eval time: %.1fs' % metrics_summary['eval_time']) + + # Print per-class metrics. + print() + print('Per-class results:') + print('Object Class\tAP\tATE\tASE\tAOE\tAVE\tAAE') + class_aps = metrics_summary['mean_dist_aps'] + class_tps = metrics_summary['label_tp_errors'] + for class_name in class_aps.keys(): + print('%s\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f' + % (class_name, class_aps[class_name], + class_tps[class_name]['trans_err'], + class_tps[class_name]['scale_err'], + class_tps[class_name]['orient_err'], + class_tps[class_name]['vel_err'], + class_tps[class_name]['attr_err'])) + + return metrics_summary + + def render(self, metrics: DetectionMetrics, + md_list: DetectionMetricDataList) -> None: + """ + Renders various PR and TP curves. + :param metrics: DetectionMetrics instance. + :param md_list: DetectionMetricDataList instance. + """ + if self.verbose: + print('Rendering PR and TP curves') + + def savepath(name): + return os.path.join(self.plot_dir, name + '.pdf') + + summary_plot( + md_list, + metrics, + min_precision=self.cfg.min_precision, + min_recall=self.cfg.min_recall, + dist_th_tp=self.cfg.dist_th_tp, + savepath=savepath('summary')) + + for detection_name in self.cfg.class_names: + class_pr_curve( + md_list, + metrics, + detection_name, + self.cfg.min_precision, + self.cfg.min_recall, + savepath=savepath( + detection_name + + '_pr')) + + class_tp_curve( + md_list, + metrics, + detection_name, + self.cfg.min_recall, + self.cfg.dist_th_tp, + savepath=savepath( + detection_name + + '_tp')) + + for dist_th in self.cfg.dist_ths: + dist_pr_curve( + md_list, + metrics, + dist_th, + self.cfg.min_precision, + self.cfg.min_recall, + savepath=savepath( + 'dist_pr_' + + str(dist_th))) + + +def print_traj_metrics(metrics): + class_names = metrics.keys() + x = PrettyTable() + x.field_names = ["class names"] + TP_TRAJ_METRICS + for class_name in metrics.keys(): + row_data = [class_name] + for m in TP_TRAJ_METRICS: + row_data.append('%.4f' % metrics[class_name][m]) + x.add_row(row_data) + print(x) + + +if __name__ == "__main__": + + # Settings. + parser = argparse.ArgumentParser( + description='Evaluate nuScenes detection results.', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument( + 'result_path', + type=str, + help='The submission as a JSON file.') + parser.add_argument( + '--output_dir', + type=str, + default='~/nuscenes-metrics', + help='Folder to store result metrics, graphs and example visualizations.') + parser.add_argument( + '--eval_set', + type=str, + default='val', + help='Which dataset split to evaluate on, train, val or test.') + parser.add_argument('--dataroot', type=str, default='data/nuscenes', + help='Default nuScenes data directory.') + parser.add_argument( + '--version', + type=str, + default='v1.0-trainval', + help='Which version of the nuScenes dataset to evaluate on, e.g. v1.0-trainval.') + parser.add_argument( + '--config_path', + type=str, + default='', + help='Path to the configuration file.' + 'If no path given, the CVPR 2019 configuration will be used.') + parser.add_argument( + '--plot_examples', + type=int, + default=0, + help='How many example visualizations to write to disk.') + parser.add_argument('--render_curves', type=int, default=1, + help='Whether to render PR and TP curves to disk.') + parser.add_argument('--verbose', type=int, default=1, + help='Whether to print to stdout.') + args = parser.parse_args() + + result_path_ = os.path.expanduser(args.result_path) + output_dir_ = os.path.expanduser(args.output_dir) + eval_set_ = args.eval_set + dataroot_ = args.dataroot + version_ = args.version + config_path = args.config_path + plot_examples_ = args.plot_examples + render_curves_ = bool(args.render_curves) + verbose_ = bool(args.verbose) + + if config_path == '': + cfg_ = config_factory('detection_cvpr_2019') + else: + with open(config_path, 'r') as _f: + cfg_ = DetectionConfig.deserialize(json.load(_f)) + + nusc_ = NuScenes(version=version_, verbose=verbose_, dataroot=dataroot_) + nusc_eval = MotionEval( + nusc_, + config=cfg_, + result_path=result_path_, + eval_set=eval_set_, + output_dir=output_dir_, + verbose=verbose_) + for vis in ['1', '2', '3', '4']: + nusc_eval.update_gt(type_='vis', visibility=vis) + print(f'================ {vis} ===============') + nusc_eval.main( + plot_examples=plot_examples_, + render_curves=render_curves_) diff --git a/mmcv/datasets/lyft_dataset.py b/mmcv/datasets/lyft_dataset.py new file mode 100644 index 0000000..34707ee --- /dev/null +++ b/mmcv/datasets/lyft_dataset.py @@ -0,0 +1,561 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmcv +import numpy as np +import os +import pandas as pd +import tempfile +from lyft_dataset_sdk.lyftdataset import LyftDataset as Lyft +from lyft_dataset_sdk.utils.data_classes import Box as LyftBox +from os import path as osp +from pyquaternion import Quaternion + +from mmcv.core.evaluation.lyft_eval import lyft_eval +from mmcv.datasets import DATASETS +from mmcv.core import show_result +# from mmcv.core.bbox import Box3DMode, Coord3DMode, LiDARInstance3DBoxes +from mmcv.core.bbox.structures.box_3d_mode import Box3DMode +from mmcv.core.bbox.structures.coord_3d_mode import Coord3DMode +from mmcv.core.bbox.structures.lidar_box3d import LiDARInstance3DBoxes +from .custom_3d import Custom3DDataset +from .pipelines import Compose + + +@DATASETS.register_module() +class LyftDataset(Custom3DDataset): + r"""Lyft Dataset. + + This class serves as the API for experiments on the Lyft Dataset. + + Please refer to + ``_ + for data downloading. + + Args: + ann_file (str): Path of annotation file. + pipeline (list[dict], optional): Pipeline used for data processing. + Defaults to None. + data_root (str): Path of dataset root. + classes (tuple[str], optional): Classes used in the dataset. + Defaults to None. + load_interval (int, optional): Interval of loading the dataset. It is + used to uniformly sample the dataset. Defaults to 1. + modality (dict, optional): Modality to specify the sensor data used + as input. Defaults to None. + box_type_3d (str, optional): Type of 3D box of this dataset. + Based on the `box_type_3d`, the dataset will encapsulate the box + to its original format then converted them to `box_type_3d`. + Defaults to 'LiDAR' in this dataset. Available options includes + + - 'LiDAR': Box in LiDAR coordinates. + - 'Depth': Box in depth coordinates, usually for indoor dataset. + - 'Camera': Box in camera coordinates. + filter_empty_gt (bool, optional): Whether to filter empty GT. + Defaults to True. + test_mode (bool, optional): Whether the dataset is in test mode. + Defaults to False. + """ # noqa: E501 + NameMapping = { + 'bicycle': 'bicycle', + 'bus': 'bus', + 'car': 'car', + 'emergency_vehicle': 'emergency_vehicle', + 'motorcycle': 'motorcycle', + 'other_vehicle': 'other_vehicle', + 'pedestrian': 'pedestrian', + 'truck': 'truck', + 'animal': 'animal' + } + DefaultAttribute = { + 'car': 'is_stationary', + 'truck': 'is_stationary', + 'bus': 'is_stationary', + 'emergency_vehicle': 'is_stationary', + 'other_vehicle': 'is_stationary', + 'motorcycle': 'is_stationary', + 'bicycle': 'is_stationary', + 'pedestrian': 'is_stationary', + 'animal': 'is_stationary' + } + CLASSES = ('car', 'truck', 'bus', 'emergency_vehicle', 'other_vehicle', + 'motorcycle', 'bicycle', 'pedestrian', 'animal') + + def __init__(self, + ann_file, + pipeline=None, + data_root=None, + classes=None, + load_interval=1, + modality=None, + box_type_3d='LiDAR', + filter_empty_gt=True, + test_mode=False): + self.load_interval = load_interval + super().__init__( + data_root=data_root, + ann_file=ann_file, + pipeline=pipeline, + classes=classes, + modality=modality, + box_type_3d=box_type_3d, + filter_empty_gt=filter_empty_gt, + test_mode=test_mode) + + if self.modality is None: + self.modality = dict( + use_camera=False, + use_lidar=True, + use_radar=False, + use_map=False, + use_external=False, + ) + + def load_annotations(self, ann_file): + """Load annotations from ann_file. + + Args: + ann_file (str): Path of the annotation file. + + Returns: + list[dict]: List of annotations sorted by timestamps. + """ + data = mmcv.load(ann_file) + data_infos = list(sorted(data['infos'], key=lambda e: e['timestamp'])) + data_infos = data_infos[::self.load_interval] + self.metadata = data['metadata'] + self.version = self.metadata['version'] + return data_infos + + def get_data_info(self, index): + """Get data info according to the given index. + + Args: + index (int): Index of the sample data to get. + + Returns: + dict: Data information that will be passed to the data \ + preprocessing pipelines. It includes the following keys: + + - sample_idx (str): sample index + - pts_filename (str): filename of point clouds + - sweeps (list[dict]): infos of sweeps + - timestamp (float): sample timestamp + - img_filename (str, optional): image filename + - lidar2img (list[np.ndarray], optional): transformations \ + from lidar to different cameras + - ann_info (dict): annotation info + """ + info = self.data_infos[index] + + # standard protocal modified from SECOND.Pytorch + input_dict = dict( + sample_idx=info['token'], + pts_filename=info['lidar_path'], + sweeps=info['sweeps'], + timestamp=info['timestamp'] / 1e6, + ) + + if self.modality['use_camera']: + image_paths = [] + lidar2img_rts = [] + for cam_type, cam_info in info['cams'].items(): + image_paths.append(cam_info['data_path']) + # obtain lidar to image transformation matrix + lidar2cam_r = np.linalg.inv(cam_info['sensor2lidar_rotation']) + lidar2cam_t = cam_info[ + 'sensor2lidar_translation'] @ lidar2cam_r.T + lidar2cam_rt = np.eye(4) + lidar2cam_rt[:3, :3] = lidar2cam_r.T + lidar2cam_rt[3, :3] = -lidar2cam_t + intrinsic = cam_info['cam_intrinsic'] + viewpad = np.eye(4) + viewpad[:intrinsic.shape[0], :intrinsic.shape[1]] = intrinsic + lidar2img_rt = (viewpad @ lidar2cam_rt.T) + lidar2img_rts.append(lidar2img_rt) + + input_dict.update( + dict( + img_filename=image_paths, + lidar2img=lidar2img_rts, + )) + + if not self.test_mode: + annos = self.get_ann_info(index) + input_dict['ann_info'] = annos + + return input_dict + + def get_ann_info(self, index): + """Get annotation info according to the given index. + + Args: + index (int): Index of the annotation data to get. + + Returns: + dict: Annotation information consists of the following keys: + + - gt_bboxes_3d (:obj:`LiDARInstance3DBoxes`): \ + 3D ground truth bboxes. + - gt_labels_3d (np.ndarray): Labels of ground truths. + - gt_names (list[str]): Class names of ground truths. + """ + info = self.data_infos[index] + gt_bboxes_3d = info['gt_boxes'] + gt_names_3d = info['gt_names'] + gt_labels_3d = [] + for cat in gt_names_3d: + if cat in self.CLASSES: + gt_labels_3d.append(self.CLASSES.index(cat)) + else: + gt_labels_3d.append(-1) + gt_labels_3d = np.array(gt_labels_3d) + + if 'gt_shape' in info: + gt_shape = info['gt_shape'] + gt_bboxes_3d = np.concatenate([gt_bboxes_3d, gt_shape], axis=-1) + + # the lyft box center is [0.5, 0.5, 0.5], we change it to be + # the same as KITTI (0.5, 0.5, 0) + gt_bboxes_3d = LiDARInstance3DBoxes( + gt_bboxes_3d, + box_dim=gt_bboxes_3d.shape[-1], + origin=(0.5, 0.5, 0.5)).convert_to(self.box_mode_3d) + + anns_results = dict( + gt_bboxes_3d=gt_bboxes_3d, + gt_labels_3d=gt_labels_3d, + ) + return anns_results + + def _format_bbox(self, results, jsonfile_prefix=None): + """Convert the results to the standard format. + + Args: + results (list[dict]): Testing results of the dataset. + jsonfile_prefix (str): The prefix of the output jsonfile. + You can specify the output directory/filename by + modifying the jsonfile_prefix. Default: None. + + Returns: + str: Path of the output json file. + """ + lyft_annos = {} + mapped_class_names = self.CLASSES + + print('Start to convert detection format...') + for sample_id, det in enumerate(mmcv.track_iter_progress(results)): + annos = [] + boxes = output_to_lyft_box(det) + sample_token = self.data_infos[sample_id]['token'] + boxes = lidar_lyft_box_to_global(self.data_infos[sample_id], boxes) + for i, box in enumerate(boxes): + name = mapped_class_names[box.label] + lyft_anno = dict( + sample_token=sample_token, + translation=box.center.tolist(), + size=box.wlh.tolist(), + rotation=box.orientation.elements.tolist(), + name=name, + score=box.score) + annos.append(lyft_anno) + lyft_annos[sample_token] = annos + lyft_submissions = { + 'meta': self.modality, + 'results': lyft_annos, + } + + mmcv.mkdir_or_exist(jsonfile_prefix) + res_path = osp.join(jsonfile_prefix, 'results_lyft.json') + print('Results writes to', res_path) + mmcv.dump(lyft_submissions, res_path) + return res_path + + def _evaluate_single(self, + result_path, + logger=None, + metric='bbox', + result_name='pts_bbox'): + """Evaluation for a single model in Lyft protocol. + + Args: + result_path (str): Path of the result file. + logger (logging.Logger | str | None): Logger used for printing + related information during evaluation. Default: None. + metric (str): Metric name used for evaluation. Default: 'bbox'. + result_name (str): Result name in the metric prefix. + Default: 'pts_bbox'. + + Returns: + dict: Dictionary of evaluation details. + """ + + output_dir = osp.join(*osp.split(result_path)[:-1]) + lyft = Lyft( + data_path=osp.join(self.data_root, self.version), + json_path=osp.join(self.data_root, self.version, self.version), + verbose=True) + eval_set_map = { + 'v1.01-train': 'val', + } + metrics = lyft_eval(lyft, self.data_root, result_path, + eval_set_map[self.version], output_dir, logger) + + # record metrics + detail = dict() + metric_prefix = f'{result_name}_Lyft' + + for i, name in enumerate(metrics['class_names']): + AP = float(metrics['mAPs_cate'][i]) + detail[f'{metric_prefix}/{name}_AP'] = AP + + detail[f'{metric_prefix}/mAP'] = metrics['Final mAP'] + return detail + + def format_results(self, results, jsonfile_prefix=None, csv_savepath=None): + """Format the results to json (standard format for COCO evaluation). + + Args: + results (list[dict]): Testing results of the dataset. + jsonfile_prefix (str | None): The prefix of json files. It includes + the file path and the prefix of filename, e.g., "a/b/prefix". + If not specified, a temp file will be created. Default: None. + csv_savepath (str | None): The path for saving csv files. + It includes the file path and the csv filename, + e.g., "a/b/filename.csv". If not specified, + the result will not be converted to csv file. + + Returns: + tuple: Returns (result_files, tmp_dir), where `result_files` is a \ + dict containing the json filepaths, `tmp_dir` is the temporal \ + directory created for saving json files when \ + `jsonfile_prefix` is not specified. + """ + assert isinstance(results, list), 'results must be a list' + assert len(results) == len(self), ( + 'The length of results is not equal to the dataset len: {} != {}'. + format(len(results), len(self))) + + if jsonfile_prefix is None: + tmp_dir = tempfile.TemporaryDirectory() + jsonfile_prefix = osp.join(tmp_dir.name, 'results') + else: + tmp_dir = None + + # currently the output prediction results could be in two formats + # 1. list of dict('boxes_3d': ..., 'scores_3d': ..., 'labels_3d': ...) + # 2. list of dict('pts_bbox' or 'img_bbox': + # dict('boxes_3d': ..., 'scores_3d': ..., 'labels_3d': ...)) + # this is a workaround to enable evaluation of both formats on Lyft + # refer to https://github.com/open-mmlab/mmdetection3d/issues/449 + if not ('pts_bbox' in results[0] or 'img_bbox' in results[0]): + result_files = self._format_bbox(results, jsonfile_prefix) + else: + # should take the inner dict out of 'pts_bbox' or 'img_bbox' dict + result_files = dict() + for name in results[0]: + print(f'\nFormating bboxes of {name}') + results_ = [out[name] for out in results] + tmp_file_ = osp.join(jsonfile_prefix, name) + result_files.update( + {name: self._format_bbox(results_, tmp_file_)}) + if csv_savepath is not None: + self.json2csv(result_files['pts_bbox'], csv_savepath) + return result_files, tmp_dir + + def evaluate(self, + results, + metric='bbox', + logger=None, + jsonfile_prefix=None, + csv_savepath=None, + result_names=['pts_bbox'], + show=False, + out_dir=None, + pipeline=None): + """Evaluation in Lyft protocol. + + Args: + results (list[dict]): Testing results of the dataset. + metric (str | list[str]): Metrics to be evaluated. + logger (logging.Logger | str | None): Logger used for printing + related information during evaluation. Default: None. + jsonfile_prefix (str | None): The prefix of json files. It includes + the file path and the prefix of filename, e.g., "a/b/prefix". + If not specified, a temp file will be created. Default: None. + csv_savepath (str | None): The path for saving csv files. + It includes the file path and the csv filename, + e.g., "a/b/filename.csv". If not specified, + the result will not be converted to csv file. + show (bool): Whether to visualize. + Default: False. + out_dir (str): Path to save the visualization results. + Default: None. + pipeline (list[dict], optional): raw data loading for showing. + Default: None. + + Returns: + dict[str, float]: Evaluation results. + """ + result_files, tmp_dir = self.format_results(results, jsonfile_prefix, + csv_savepath) + + if isinstance(result_files, dict): + results_dict = dict() + for name in result_names: + print(f'Evaluating bboxes of {name}') + ret_dict = self._evaluate_single(result_files[name]) + results_dict.update(ret_dict) + elif isinstance(result_files, str): + results_dict = self._evaluate_single(result_files) + + if tmp_dir is not None: + tmp_dir.cleanup() + + if show: + self.show(results, out_dir, pipeline=pipeline) + return results_dict + + def _build_default_pipeline(self): + """Build the default pipeline for this dataset.""" + pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + file_client_args=dict(backend='disk')), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + file_client_args=dict(backend='disk')), + dict( + type='DefaultFormatBundle3D', + class_names=self.CLASSES, + with_label=False), + dict(type='Collect3D', keys=['points']) + ] + return Compose(pipeline) + + def show(self, results, out_dir, show=True, pipeline=None): + """Results visualization. + + Args: + results (list[dict]): List of bounding boxes results. + out_dir (str): Output directory of visualization result. + show (bool): Visualize the results online. + pipeline (list[dict], optional): raw data loading for showing. + Default: None. + """ + assert out_dir is not None, 'Expect out_dir, got none.' + pipeline = self._get_pipeline(pipeline) + for i, result in enumerate(results): + if 'pts_bbox' in result.keys(): + result = result['pts_bbox'] + data_info = self.data_infos[i] + pts_path = data_info['lidar_path'] + file_name = osp.split(pts_path)[-1].split('.')[0] + points = self._extract_data(i, pipeline, 'points').numpy() + points = Coord3DMode.convert_point(points, Coord3DMode.LIDAR, + Coord3DMode.DEPTH) + inds = result['scores_3d'] > 0.1 + gt_bboxes = self.get_ann_info(i)['gt_bboxes_3d'].tensor.numpy() + show_gt_bboxes = Box3DMode.convert(gt_bboxes, Box3DMode.LIDAR, + Box3DMode.DEPTH) + pred_bboxes = result['boxes_3d'][inds].tensor.numpy() + show_pred_bboxes = Box3DMode.convert(pred_bboxes, Box3DMode.LIDAR, + Box3DMode.DEPTH) + show_result(points, show_gt_bboxes, show_pred_bboxes, out_dir, + file_name, show) + + def json2csv(self, json_path, csv_savepath): + """Convert the json file to csv format for submission. + + Args: + json_path (str): Path of the result json file. + csv_savepath (str): Path to save the csv file. + """ + results = mmcv.load(json_path)['results'] + sample_list_path = osp.join(self.data_root, 'sample_submission.csv') + data = pd.read_csv(sample_list_path) + Id_list = list(data['Id']) + pred_list = list(data['PredictionString']) + cnt = 0 + print('Converting the json to csv...') + for token in results.keys(): + cnt += 1 + predictions = results[token] + prediction_str = '' + for i in range(len(predictions)): + prediction_str += \ + str(predictions[i]['score']) + ' ' + \ + str(predictions[i]['translation'][0]) + ' ' + \ + str(predictions[i]['translation'][1]) + ' ' + \ + str(predictions[i]['translation'][2]) + ' ' + \ + str(predictions[i]['size'][0]) + ' ' + \ + str(predictions[i]['size'][1]) + ' ' + \ + str(predictions[i]['size'][2]) + ' ' + \ + str(Quaternion(list(predictions[i]['rotation'])) + .yaw_pitch_roll[0]) + ' ' + \ + predictions[i]['name'] + ' ' + prediction_str = prediction_str[:-1] + idx = Id_list.index(token) + pred_list[idx] = prediction_str + df = pd.DataFrame({'Id': Id_list, 'PredictionString': pred_list}) + mmcv.mkdir_or_exist(os.path.dirname(csv_savepath)) + df.to_csv(csv_savepath, index=False) + + +def output_to_lyft_box(detection): + """Convert the output to the box class in the Lyft. + + Args: + detection (dict): Detection results. + + Returns: + list[:obj:`LyftBox`]: List of standard LyftBoxes. + """ + box3d = detection['boxes_3d'] + scores = detection['scores_3d'].numpy() + labels = detection['labels_3d'].numpy() + + box_gravity_center = box3d.gravity_center.numpy() + box_dims = box3d.dims.numpy() + box_yaw = box3d.yaw.numpy() + # TODO: check whether this is necessary + # with dir_offset & dir_limit in the head + box_yaw = -box_yaw - np.pi / 2 + + box_list = [] + for i in range(len(box3d)): + quat = Quaternion(axis=[0, 0, 1], radians=box_yaw[i]) + box = LyftBox( + box_gravity_center[i], + box_dims[i], + quat, + label=labels[i], + score=scores[i]) + box_list.append(box) + return box_list + + +def lidar_lyft_box_to_global(info, boxes): + """Convert the box from ego to global coordinate. + + Args: + info (dict): Info for a specific sample data, including the + calibration information. + boxes (list[:obj:`LyftBox`]): List of predicted LyftBoxes. + + Returns: + list: List of standard LyftBoxes in the global + coordinate. + """ + box_list = [] + for box in boxes: + # Move box to ego vehicle coord system + box.rotate(Quaternion(info['lidar2ego_rotation'])) + box.translate(np.array(info['lidar2ego_translation'])) + # Move box to global coord system + box.rotate(Quaternion(info['ego2global_rotation'])) + box.translate(np.array(info['ego2global_translation'])) + box_list.append(box) + return box_list \ No newline at end of file diff --git a/mmcv/datasets/map_utils/mean_ap.py b/mmcv/datasets/map_utils/mean_ap.py new file mode 100644 index 0000000..9b3a49b --- /dev/null +++ b/mmcv/datasets/map_utils/mean_ap.py @@ -0,0 +1,390 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from multiprocessing import Pool +from shapely.geometry import LineString, Polygon +import mmcv +import numpy as np +from mmcv.utils import print_log +from terminaltables import AsciiTable +import json +from os import path as osp +import os +from functools import partial +from .tpfp import tpfp_gen, custom_tpfp_gen +from mmcv.fileio.io import dump,load + +def average_precision(recalls, precisions, mode='area'): + """Calculate average precision (for single or multiple scales). + + Args: + recalls (ndarray): shape (num_scales, num_dets) or (num_dets, ) + precisions (ndarray): shape (num_scales, num_dets) or (num_dets, ) + mode (str): 'area' or '11points', 'area' means calculating the area + under precision-recall curve, '11points' means calculating + the average precision of recalls at [0, 0.1, ..., 1] + + Returns: + float or ndarray: calculated average precision + """ + no_scale = False + if recalls.ndim == 1: + no_scale = True + recalls = recalls[np.newaxis, :] + precisions = precisions[np.newaxis, :] + assert recalls.shape == precisions.shape and recalls.ndim == 2 + num_scales = recalls.shape[0] + ap = np.zeros(num_scales, dtype=np.float32) + if mode == 'area': + zeros = np.zeros((num_scales, 1), dtype=recalls.dtype) + ones = np.ones((num_scales, 1), dtype=recalls.dtype) + mrec = np.hstack((zeros, recalls, ones)) + mpre = np.hstack((zeros, precisions, zeros)) + for i in range(mpre.shape[1] - 1, 0, -1): + mpre[:, i - 1] = np.maximum(mpre[:, i - 1], mpre[:, i]) + for i in range(num_scales): + ind = np.where(mrec[i, 1:] != mrec[i, :-1])[0] + ap[i] = np.sum( + (mrec[i, ind + 1] - mrec[i, ind]) * mpre[i, ind + 1]) + elif mode == '11points': + for i in range(num_scales): + for thr in np.arange(0, 1 + 1e-3, 0.1): + precs = precisions[i, recalls[i, :] >= thr] + prec = precs.max() if precs.size > 0 else 0 + ap[i] += prec + ap /= 11 + else: + raise ValueError( + 'Unrecognized mode, only "area" and "11points" are supported') + if no_scale: + ap = ap[0] + return ap + +def get_cls_results(gen_results, + annotations, + num_sample=100, + num_pred_pts_per_instance=30, + eval_use_same_gt_sample_num_flag=False, + class_id=0, + fix_interval=False): + """Get det results and gt information of a certain class. + + Args: + gen_results (list[list]): Same as `eval_map()`. + annotations (list[dict]): Same as `eval_map()`. + class_id (int): ID of a specific class. + + Returns: + tuple[list[np.ndarray]]: detected bboxes, gt bboxes + """ + # if len(gen_results) == 0 or + + cls_gens, cls_scores = [], [] + for res in gen_results['vectors']: + if res['type'] == class_id: + if len(res['pts']) < 2: + continue + if not eval_use_same_gt_sample_num_flag: + sampled_points = np.array(res['pts']) + else: + line = res['pts'] + line = LineString(line) + + if fix_interval: + distances = list(np.arange(1., line.length, 1.)) + distances = [0,] + distances + [line.length,] + sampled_points = np.array([list(line.interpolate(distance).coords) + for distance in distances]).reshape(-1, 2) + else: + distances = np.linspace(0, line.length, num_sample) + sampled_points = np.array([list(line.interpolate(distance).coords) + for distance in distances]).reshape(-1, 2) + + cls_gens.append(sampled_points) + cls_scores.append(res['confidence_level']) + num_res = len(cls_gens) + if num_res > 0: + cls_gens = np.stack(cls_gens).reshape(num_res,-1) + cls_scores = np.array(cls_scores)[:,np.newaxis] + cls_gens = np.concatenate([cls_gens,cls_scores],axis=-1) + # print(f'for class {i}, cls_gens has shape {cls_gens.shape}') + else: + if not eval_use_same_gt_sample_num_flag: + cls_gens = np.zeros((0,num_pred_pts_per_instance*2+1)) + else: + cls_gens = np.zeros((0,num_sample*2+1)) + # print(f'for class {i}, cls_gens has shape {cls_gens.shape}') + + cls_gts = [] + for ann in annotations['vectors']: + if ann['type'] == class_id: + # line = ann['pts'] + np.array((1,1)) # for hdmapnet + line = ann['pts'] + # line = ann['pts'].cumsum(0) + line = LineString(line) + distances = np.linspace(0, line.length, num_sample) + sampled_points = np.array([list(line.interpolate(distance).coords) + for distance in distances]).reshape(-1, 2) + + cls_gts.append(sampled_points) + num_gts = len(cls_gts) + if num_gts > 0: + cls_gts = np.stack(cls_gts).reshape(num_gts,-1) + else: + cls_gts = np.zeros((0,num_sample*2)) + return cls_gens, cls_gts + # ones = np.ones((num_gts,1)) + # tmp_cls_gens = np.concatenate([cls_gts,ones],axis=-1) + # return tmp_cls_gens, cls_gts + +def format_res_gt_by_classes(result_path, + gen_results, + annotations, + cls_names=None, + num_pred_pts_per_instance=30, + eval_use_same_gt_sample_num_flag=False, + pc_range=[-15.0, -30.0, -5.0, 15.0, 30.0, 3.0], + nproc=24): + assert cls_names is not None + timer = mmcv.Timer() + num_fixed_sample_pts = 100 + fix_interval = False + print('results path: {}'.format(result_path)) + + output_dir = osp.join(*osp.split(result_path)[:-1]) + assert len(gen_results) == len(annotations) + + pool = Pool(nproc) + cls_gens, cls_gts = {}, {} + print('Formatting ...') + formatting_file = 'cls_formatted.pkl' + formatting_file = osp.join(output_dir,formatting_file) + + # for vis + if False: + from PIL import Image + import matplotlib.pyplot as plt + from matplotlib import transforms + from matplotlib.patches import Rectangle + + show_dir = osp.join(output_dir,'vis_json') + mmcv.mkdir_or_exist(osp.abspath(show_dir)) + # import pdb;pdb.set_trace() + car_img = Image.open('./figs/lidar_car.png') + colors_plt = ['r', 'b', 'g'] + for i in range(20): + + plt.figure(figsize=(2, 4)) + plt.xlim(pc_range[0], pc_range[3]) + plt.ylim(pc_range[1], pc_range[4]) + plt.axis('off') + + for line in gen_results[i]['vectors']: + l = np.array(line['pts']) + plt.plot(l[:,0],l[:,1],'-', + # color=colors[line['type']] + color = 'red', + ) + + for line in annotations[i]['vectors']: + # l = np.array(line['pts']) + np.array((1,1)) + l = np.array(line['pts']) + # l = line['pts'] + plt.plot(l[:,0],l[:,1],'-', + # color=colors[line['type']], + color = 'blue', + ) + plt.imshow(car_img, extent=[-1.2, 1.2, -1.5, 1.5]) + map_path = osp.join(show_dir, 'COMPARE_MAP_{}.jpg'.format(i)) + plt.savefig(map_path, bbox_inches='tight', dpi=400) + plt.close() + + for i, clsname in enumerate(cls_names): + + gengts = pool.starmap( + partial(get_cls_results, num_sample=num_fixed_sample_pts, + num_pred_pts_per_instance=num_pred_pts_per_instance, + eval_use_same_gt_sample_num_flag=eval_use_same_gt_sample_num_flag,class_id=i,fix_interval=fix_interval), + zip(list(gen_results.values()), annotations)) + # gengts = map(partial(get_cls_results, num_sample=num_fixed_sample_pts, class_id=i,fix_interval=fix_interval), + # zip(gen_results, annotations)) + # import pdb;pdb.set_trace() + gens, gts = tuple(zip(*gengts)) + cls_gens[clsname] = gens + cls_gts[clsname] = gts + + dump([cls_gens, cls_gts],formatting_file) + print('Cls data formatting done in {:2f}s!! with {}'.format(float(timer.since_start()),formatting_file)) + pool.close() + return cls_gens, cls_gts + +def eval_map(gen_results, + annotations, + cls_gens, + cls_gts, + threshold=0.5, + cls_names=None, + logger=None, + tpfp_fn=None, + pc_range=[-15.0, -30.0, -5.0, 15.0, 30.0, 3.0], + metric=None, + num_pred_pts_per_instance=30, + nproc=24): + timer = mmcv.Timer() + pool = Pool(nproc) + + eval_results = [] + + for i, clsname in enumerate(cls_names): + + # get gt and det bboxes of this class + cls_gen = cls_gens[clsname] + cls_gt = cls_gts[clsname] + # choose proper function according to datasets to compute tp and fp + # XXX + # func_name = cls2func[clsname] + # tpfp_fn = tpfp_fn_dict[tpfp_fn_name] + tpfp_fn = custom_tpfp_gen + # Trick for serialized + # only top-level function can be serized + # somehow use partitial the return function is defined + # at the top level. + + # tpfp = tpfp_fn(cls_gen[i], cls_gt[i],threshold=threshold, metric=metric) + # import pdb; pdb.set_trace() + # TODO this is a hack + tpfp_fn = partial(tpfp_fn, threshold=threshold, metric=metric) + args = [] + # compute tp and fp for each image with multiple processes + tpfp = pool.starmap( + tpfp_fn, + zip(cls_gen, cls_gt, *args)) + # import pdb;pdb.set_trace() + tp, fp = tuple(zip(*tpfp)) + + + + # map_results = map( + # tpfp_fn, + # cls_gen, cls_gt) + # tp, fp = tuple(map(list, zip(*map_results))) + + + # debug and testing + # for i in range(len(cls_gen)): + # # print(i) + # tpfp = tpfp_fn(cls_gen[i], cls_gt[i],threshold=threshold) + # print(i) + # tpfp = (tpfp,) + # print(tpfp) + # i = 0 + # tpfp = tpfp_fn(cls_gen[i], cls_gt[i],threshold=threshold) + # import pdb; pdb.set_trace() + + # XXX + + num_gts = 0 + for j, bbox in enumerate(cls_gt): + num_gts += bbox.shape[0] + + # sort all det bboxes by score, also sort tp and fp + # import pdb;pdb.set_trace() + cls_gen = np.vstack(cls_gen) + num_dets = cls_gen.shape[0] + sort_inds = np.argsort(-cls_gen[:, -1]) #descending, high score front + tp = np.hstack(tp)[sort_inds] + fp = np.hstack(fp)[sort_inds] + + # calculate recall and precision with tp and fp + # num_det*num_res + tp = np.cumsum(tp, axis=0) + fp = np.cumsum(fp, axis=0) + eps = np.finfo(np.float32).eps + recalls = tp / np.maximum(num_gts, eps) + precisions = tp / np.maximum((tp + fp), eps) + + # calculate AP + # if dataset != 'voc07' else '11points' + mode = 'area' + ap = average_precision(recalls, precisions, mode) + eval_results.append({ + 'num_gts': num_gts, + 'num_dets': num_dets, + 'recall': recalls, + 'precision': precisions, + 'ap': ap + }) + print('cls:{} done in {:2f}s!!'.format(clsname,float(timer.since_last_check()))) + pool.close() + aps = [] + for cls_result in eval_results: + if cls_result['num_gts'] > 0: + aps.append(cls_result['ap']) + mean_ap = np.array(aps).mean().item() if len(aps) else 0.0 + + print_map_summary( + mean_ap, eval_results, class_name=cls_names, logger=logger) + + return mean_ap, eval_results + + + +def print_map_summary(mean_ap, + results, + class_name=None, + scale_ranges=None, + logger=None): + """Print mAP and results of each class. + + A table will be printed to show the gts/dets/recall/AP of each class and + the mAP. + + Args: + mean_ap (float): Calculated from `eval_map()`. + results (list[dict]): Calculated from `eval_map()`. + dataset (list[str] | str | None): Dataset name or dataset classes. + scale_ranges (list[tuple] | None): Range of scales to be evaluated. + logger (logging.Logger | str | None): The way to print the mAP + summary. See `mmcv.utils.print_log()` for details. Default: None. + """ + + if logger == 'silent': + return + + if isinstance(results[0]['ap'], np.ndarray): + num_scales = len(results[0]['ap']) + else: + num_scales = 1 + + if scale_ranges is not None: + assert len(scale_ranges) == num_scales + + num_classes = len(results) + + recalls = np.zeros((num_scales, num_classes), dtype=np.float32) + aps = np.zeros((num_scales, num_classes), dtype=np.float32) + num_gts = np.zeros((num_scales, num_classes), dtype=int) + for i, cls_result in enumerate(results): + if cls_result['recall'].size > 0: + recalls[:, i] = np.array(cls_result['recall'], ndmin=2)[:, -1] + aps[:, i] = cls_result['ap'] + num_gts[:, i] = cls_result['num_gts'] + + label_names = class_name + + if not isinstance(mean_ap, list): + mean_ap = [mean_ap] + + header = ['class', 'gts', 'dets', 'recall', 'ap'] + for i in range(num_scales): + if scale_ranges is not None: + print_log(f'Scale range {scale_ranges[i]}', logger=logger) + table_data = [header] + for j in range(num_classes): + row_data = [ + label_names[j], num_gts[i, j], results[j]['num_dets'], + f'{recalls[i, j]:.3f}', f'{aps[i, j]:.3f}' + ] + table_data.append(row_data) + table_data.append(['mAP', '', '', '', f'{mean_ap[i]:.3f}']) + table = AsciiTable(table_data) + table.inner_footing_row_border = True + print_log('\n' + table.table, logger=logger) diff --git a/mmcv/datasets/map_utils/struct.py b/mmcv/datasets/map_utils/struct.py new file mode 100644 index 0000000..1f20fee --- /dev/null +++ b/mmcv/datasets/map_utils/struct.py @@ -0,0 +1,438 @@ +import numpy as np +import torch +from shapely.geometry import LineString +from mmcv.datasets.pipelines import to_tensor + +class LiDARInstanceLines(object): + """Line instance in LIDAR coordinates + + """ + def __init__(self, + instance_line_list, + sample_dist=1, + num_samples=250, + padding=False, + fixed_num=-1, + padding_value=-10000, + patch_size=None): + assert isinstance(instance_line_list, list) + assert patch_size is not None + if len(instance_line_list) != 0: + assert isinstance(instance_line_list[0], LineString) + self.patch_size = patch_size + self.max_x = self.patch_size[1] / 2 + self.max_y = self.patch_size[0] / 2 + self.sample_dist = sample_dist + self.num_samples = num_samples + self.padding = padding + self.fixed_num = fixed_num + self.padding_value = padding_value + + self.instance_list = instance_line_list + + @property + def start_end_points(self): + """ + return torch.Tensor([N,4]), in xstart, ystart, xend, yend form + """ + assert len(self.instance_list) != 0 + instance_se_points_list = [] + for instance in self.instance_list: + se_points = [] + se_points.extend(instance.coords[0]) + se_points.extend(instance.coords[-1]) + instance_se_points_list.append(se_points) + instance_se_points_array = np.array(instance_se_points_list) + instance_se_points_tensor = to_tensor(instance_se_points_array) + instance_se_points_tensor = instance_se_points_tensor.to( + dtype=torch.float32) + instance_se_points_tensor[:,0] = torch.clamp(instance_se_points_tensor[:,0], min=-self.max_x,max=self.max_x) + instance_se_points_tensor[:,1] = torch.clamp(instance_se_points_tensor[:,1], min=-self.max_y,max=self.max_y) + instance_se_points_tensor[:,2] = torch.clamp(instance_se_points_tensor[:,2], min=-self.max_x,max=self.max_x) + instance_se_points_tensor[:,3] = torch.clamp(instance_se_points_tensor[:,3], min=-self.max_y,max=self.max_y) + return instance_se_points_tensor + + @property + def bbox(self): + """ + return torch.Tensor([N,4]), in xmin, ymin, xmax, ymax form + """ + assert len(self.instance_list) != 0 + instance_bbox_list = [] + for instance in self.instance_list: + # bounds is bbox: [xmin, ymin, xmax, ymax] + instance_bbox_list.append(instance.bounds) + instance_bbox_array = np.array(instance_bbox_list) + instance_bbox_tensor = to_tensor(instance_bbox_array) + instance_bbox_tensor = instance_bbox_tensor.to( + dtype=torch.float32) + instance_bbox_tensor[:,0] = torch.clamp(instance_bbox_tensor[:,0], min=-self.max_x,max=self.max_x) + instance_bbox_tensor[:,1] = torch.clamp(instance_bbox_tensor[:,1], min=-self.max_y,max=self.max_y) + instance_bbox_tensor[:,2] = torch.clamp(instance_bbox_tensor[:,2], min=-self.max_x,max=self.max_x) + instance_bbox_tensor[:,3] = torch.clamp(instance_bbox_tensor[:,3], min=-self.max_y,max=self.max_y) + return instance_bbox_tensor + + @property + def fixed_num_sampled_points(self): + """ + return torch.Tensor([N,fixed_num,2]), in xmin, ymin, xmax, ymax form + N means the num of instances + """ + assert len(self.instance_list) != 0 + instance_points_list = [] + for instance in self.instance_list: + distances = np.linspace(0, instance.length, self.fixed_num) + sampled_points = np.array([list(instance.interpolate(distance).coords) for distance in distances]).reshape(-1, 2) + instance_points_list.append(sampled_points) + instance_points_array = np.array(instance_points_list) + instance_points_tensor = to_tensor(instance_points_array) + instance_points_tensor = instance_points_tensor.to( + dtype=torch.float32) + instance_points_tensor[:,:,0] = torch.clamp(instance_points_tensor[:,:,0], min=-self.max_x,max=self.max_x) + instance_points_tensor[:,:,1] = torch.clamp(instance_points_tensor[:,:,1], min=-self.max_y,max=self.max_y) + return instance_points_tensor + + @property + def fixed_num_sampled_points_ambiguity(self): + """ + return torch.Tensor([N,fixed_num,2]), in xmin, ymin, xmax, ymax form + N means the num of instances + """ + assert len(self.instance_list) != 0 + instance_points_list = [] + for instance in self.instance_list: + distances = np.linspace(0, instance.length, self.fixed_num) + sampled_points = np.array([list(instance.interpolate(distance).coords) for distance in distances]).reshape(-1, 2) + instance_points_list.append(sampled_points) + instance_points_array = np.array(instance_points_list) + instance_points_tensor = to_tensor(instance_points_array) + instance_points_tensor = instance_points_tensor.to( + dtype=torch.float32) + instance_points_tensor[:,:,0] = torch.clamp(instance_points_tensor[:,:,0], min=-self.max_x,max=self.max_x) + instance_points_tensor[:,:,1] = torch.clamp(instance_points_tensor[:,:,1], min=-self.max_y,max=self.max_y) + instance_points_tensor = instance_points_tensor.unsqueeze(1) + return instance_points_tensor + + @property + def fixed_num_sampled_points_torch(self): + """ + return torch.Tensor([N,fixed_num,2]), in xmin, ymin, xmax, ymax form + N means the num of instances + """ + assert len(self.instance_list) != 0 + instance_points_list = [] + for instance in self.instance_list: + # distances = np.linspace(0, instance.length, self.fixed_num) + # sampled_points = np.array([list(instance.interpolate(distance).coords) for distance in distances]).reshape(-1, 2) + poly_pts = to_tensor(np.array(list(instance.coords))) + poly_pts = poly_pts.unsqueeze(0).permute(0,2,1) + sampled_pts = torch.nn.functional.interpolate(poly_pts,size=(self.fixed_num),mode='linear',align_corners=True) + sampled_pts = sampled_pts.permute(0,2,1).squeeze(0) + instance_points_list.append(sampled_pts) + # instance_points_array = np.array(instance_points_list) + # instance_points_tensor = to_tensor(instance_points_array) + instance_points_tensor = torch.stack(instance_points_list,dim=0) + instance_points_tensor = instance_points_tensor.to( + dtype=torch.float32) + instance_points_tensor[:,:,0] = torch.clamp(instance_points_tensor[:,:,0], min=-self.max_x,max=self.max_x) + instance_points_tensor[:,:,1] = torch.clamp(instance_points_tensor[:,:,1], min=-self.max_y,max=self.max_y) + return instance_points_tensor + + @property + def shift_fixed_num_sampled_points(self): + """ + return [instances_num, num_shifts, fixed_num, 2] + """ + fixed_num_sampled_points = self.fixed_num_sampled_points + instances_list = [] + is_poly = False + # is_line = False + # import pdb;pdb.set_trace() + for fixed_num_pts in fixed_num_sampled_points: + # [fixed_num, 2] + is_poly = fixed_num_pts[0].equal(fixed_num_pts[-1]) + fixed_num = fixed_num_pts.shape[0] + shift_pts_list = [] + if is_poly: + # import pdb;pdb.set_trace() + for shift_right_i in range(fixed_num): + shift_pts_list.append(fixed_num_pts.roll(shift_right_i,0)) + else: + shift_pts_list.append(fixed_num_pts) + shift_pts_list.append(fixed_num_pts.flip(0)) + shift_pts = torch.stack(shift_pts_list,dim=0) + + shift_pts[:,:,0] = torch.clamp(shift_pts[:,:,0], min=-self.max_x,max=self.max_x) + shift_pts[:,:,1] = torch.clamp(shift_pts[:,:,1], min=-self.max_y,max=self.max_y) + + if not is_poly: + padding = torch.full([fixed_num-shift_pts.shape[0],fixed_num,2], self.padding_value) + shift_pts = torch.cat([shift_pts,padding],dim=0) + # padding = np.zeros((self.num_samples - len(sampled_points), 2)) + # sampled_points = np.concatenate([sampled_points, padding], axis=0) + instances_list.append(shift_pts) + instances_tensor = torch.stack(instances_list, dim=0) + instances_tensor = instances_tensor.to( + dtype=torch.float32) + return instances_tensor + + @property + def shift_fixed_num_sampled_points_v1(self): + """ + return [instances_num, num_shifts, fixed_num, 2] + """ + fixed_num_sampled_points = self.fixed_num_sampled_points + instances_list = [] + is_poly = False + # is_line = False + # import pdb;pdb.set_trace() + for fixed_num_pts in fixed_num_sampled_points: + # [fixed_num, 2] + is_poly = fixed_num_pts[0].equal(fixed_num_pts[-1]) + pts_num = fixed_num_pts.shape[0] + shift_num = pts_num - 1 + if is_poly: + pts_to_shift = fixed_num_pts[:-1,:] + shift_pts_list = [] + if is_poly: + for shift_right_i in range(shift_num): + shift_pts_list.append(pts_to_shift.roll(shift_right_i,0)) + else: + shift_pts_list.append(fixed_num_pts) + shift_pts_list.append(fixed_num_pts.flip(0)) + shift_pts = torch.stack(shift_pts_list,dim=0) + + if is_poly: + _, _, num_coords = shift_pts.shape + tmp_shift_pts = shift_pts.new_zeros((shift_num, pts_num, num_coords)) + tmp_shift_pts[:,:-1,:] = shift_pts + tmp_shift_pts[:,-1,:] = shift_pts[:,0,:] + shift_pts = tmp_shift_pts + + shift_pts[:,:,0] = torch.clamp(shift_pts[:,:,0], min=-self.max_x,max=self.max_x) + shift_pts[:,:,1] = torch.clamp(shift_pts[:,:,1], min=-self.max_y,max=self.max_y) + + if not is_poly: + padding = torch.full([shift_num-shift_pts.shape[0],pts_num,2], self.padding_value) + shift_pts = torch.cat([shift_pts,padding],dim=0) + # padding = np.zeros((self.num_samples - len(sampled_points), 2)) + # sampled_points = np.concatenate([sampled_points, padding], axis=0) + instances_list.append(shift_pts) + instances_tensor = torch.stack(instances_list, dim=0) + instances_tensor = instances_tensor.to( + dtype=torch.float32) + return instances_tensor + + @property + def shift_fixed_num_sampled_points_v2(self): + """ + return [instances_num, num_shifts, fixed_num, 2] + """ + assert len(self.instance_list) != 0 + instances_list = [] + for instance in self.instance_list: + distances = np.linspace(0, instance.length, self.fixed_num) + poly_pts = np.array(list(instance.coords)) + start_pts = poly_pts[0] + end_pts = poly_pts[-1] + is_poly = np.equal(start_pts, end_pts) + is_poly = is_poly.all() + shift_pts_list = [] + pts_num, coords_num = poly_pts.shape + shift_num = pts_num - 1 + final_shift_num = self.fixed_num - 1 + if is_poly: + pts_to_shift = poly_pts[:-1,:] + for shift_right_i in range(shift_num): + shift_pts = np.roll(pts_to_shift,shift_right_i,axis=0) + pts_to_concat = shift_pts[0] + pts_to_concat = np.expand_dims(pts_to_concat,axis=0) + shift_pts = np.concatenate((shift_pts,pts_to_concat),axis=0) + shift_instance = LineString(shift_pts) + shift_sampled_points = np.array([list(shift_instance.interpolate(distance).coords) for distance in distances]).reshape(-1, 2) + shift_pts_list.append(shift_sampled_points) + # import pdb;pdb.set_trace() + else: + sampled_points = np.array([list(instance.interpolate(distance).coords) for distance in distances]).reshape(-1, 2) + flip_sampled_points = np.flip(sampled_points, axis=0) + shift_pts_list.append(sampled_points) + shift_pts_list.append(flip_sampled_points) + + multi_shifts_pts = np.stack(shift_pts_list,axis=0) + shifts_num,_,_ = multi_shifts_pts.shape + + if shifts_num > final_shift_num: + index = np.random.choice(multi_shifts_pts.shape[0], final_shift_num, replace=False) + multi_shifts_pts = multi_shifts_pts[index] + + multi_shifts_pts_tensor = to_tensor(multi_shifts_pts) + multi_shifts_pts_tensor = multi_shifts_pts_tensor.to( + dtype=torch.float32) + + multi_shifts_pts_tensor[:,:,0] = torch.clamp(multi_shifts_pts_tensor[:,:,0], min=-self.max_x,max=self.max_x) + multi_shifts_pts_tensor[:,:,1] = torch.clamp(multi_shifts_pts_tensor[:,:,1], min=-self.max_y,max=self.max_y) + # if not is_poly: + if multi_shifts_pts_tensor.shape[0] < final_shift_num: + padding = torch.full([final_shift_num-multi_shifts_pts_tensor.shape[0],self.fixed_num,2], self.padding_value) + multi_shifts_pts_tensor = torch.cat([multi_shifts_pts_tensor,padding],dim=0) + instances_list.append(multi_shifts_pts_tensor) + instances_tensor = torch.stack(instances_list, dim=0) + instances_tensor = instances_tensor.to( + dtype=torch.float32) + return instances_tensor + + @property + def shift_fixed_num_sampled_points_v3(self): + """ + return [instances_num, num_shifts, fixed_num, 2] + """ + assert len(self.instance_list) != 0 + instances_list = [] + for instance in self.instance_list: + distances = np.linspace(0, instance.length, self.fixed_num) + poly_pts = np.array(list(instance.coords)) + start_pts = poly_pts[0] + end_pts = poly_pts[-1] + is_poly = np.equal(start_pts, end_pts) + is_poly = is_poly.all() + shift_pts_list = [] + pts_num, coords_num = poly_pts.shape + shift_num = pts_num - 1 + final_shift_num = self.fixed_num - 1 + if is_poly: + pts_to_shift = poly_pts[:-1,:] + for shift_right_i in range(shift_num): + shift_pts = np.roll(pts_to_shift,shift_right_i,axis=0) + pts_to_concat = shift_pts[0] + pts_to_concat = np.expand_dims(pts_to_concat,axis=0) + shift_pts = np.concatenate((shift_pts,pts_to_concat),axis=0) + shift_instance = LineString(shift_pts) + shift_sampled_points = np.array([list(shift_instance.interpolate(distance).coords) for distance in distances]).reshape(-1, 2) + shift_pts_list.append(shift_sampled_points) + flip_pts_to_shift = np.flip(pts_to_shift, axis=0) + for shift_right_i in range(shift_num): + shift_pts = np.roll(flip_pts_to_shift,shift_right_i,axis=0) + pts_to_concat = shift_pts[0] + pts_to_concat = np.expand_dims(pts_to_concat,axis=0) + shift_pts = np.concatenate((shift_pts,pts_to_concat),axis=0) + shift_instance = LineString(shift_pts) + shift_sampled_points = np.array([list(shift_instance.interpolate(distance).coords) for distance in distances]).reshape(-1, 2) + shift_pts_list.append(shift_sampled_points) + # import pdb;pdb.set_trace() + else: + sampled_points = np.array([list(instance.interpolate(distance).coords) for distance in distances]).reshape(-1, 2) + flip_sampled_points = np.flip(sampled_points, axis=0) + shift_pts_list.append(sampled_points) + shift_pts_list.append(flip_sampled_points) + + multi_shifts_pts = np.stack(shift_pts_list,axis=0) + shifts_num,_,_ = multi_shifts_pts.shape + # import pdb;pdb.set_trace() + if shifts_num > 2*final_shift_num: + index = np.random.choice(shift_num, final_shift_num, replace=False) + flip0_shifts_pts = multi_shifts_pts[index] + flip1_shifts_pts = multi_shifts_pts[index+shift_num] + multi_shifts_pts = np.concatenate((flip0_shifts_pts,flip1_shifts_pts),axis=0) + + multi_shifts_pts_tensor = to_tensor(multi_shifts_pts) + multi_shifts_pts_tensor = multi_shifts_pts_tensor.to( + dtype=torch.float32) + + multi_shifts_pts_tensor[:,:,0] = torch.clamp(multi_shifts_pts_tensor[:,:,0], min=-self.max_x,max=self.max_x) + multi_shifts_pts_tensor[:,:,1] = torch.clamp(multi_shifts_pts_tensor[:,:,1], min=-self.max_y,max=self.max_y) + # if not is_poly: + if multi_shifts_pts_tensor.shape[0] < 2*final_shift_num: + padding = torch.full([final_shift_num*2-multi_shifts_pts_tensor.shape[0],self.fixed_num,2], self.padding_value) + multi_shifts_pts_tensor = torch.cat([multi_shifts_pts_tensor,padding],dim=0) + instances_list.append(multi_shifts_pts_tensor) + instances_tensor = torch.stack(instances_list, dim=0) + instances_tensor = instances_tensor.to( + dtype=torch.float32) + return instances_tensor + + @property + def shift_fixed_num_sampled_points_v4(self): + """ + return [instances_num, num_shifts, fixed_num, 2] + """ + fixed_num_sampled_points = self.fixed_num_sampled_points + instances_list = [] + is_poly = False + # is_line = False + # import pdb;pdb.set_trace() + for fixed_num_pts in fixed_num_sampled_points: + # [fixed_num, 2] + is_poly = fixed_num_pts[0].equal(fixed_num_pts[-1]) + pts_num = fixed_num_pts.shape[0] + shift_num = pts_num - 1 + shift_pts_list = [] + if is_poly: + pts_to_shift = fixed_num_pts[:-1,:] + for shift_right_i in range(shift_num): + shift_pts_list.append(pts_to_shift.roll(shift_right_i,0)) + flip_pts_to_shift = pts_to_shift.flip(0) + for shift_right_i in range(shift_num): + shift_pts_list.append(flip_pts_to_shift.roll(shift_right_i,0)) + else: + shift_pts_list.append(fixed_num_pts) + shift_pts_list.append(fixed_num_pts.flip(0)) + shift_pts = torch.stack(shift_pts_list,dim=0) + + if is_poly: + _, _, num_coords = shift_pts.shape + tmp_shift_pts = shift_pts.new_zeros((shift_num*2, pts_num, num_coords)) + tmp_shift_pts[:,:-1,:] = shift_pts + tmp_shift_pts[:,-1,:] = shift_pts[:,0,:] + shift_pts = tmp_shift_pts + + shift_pts[:,:,0] = torch.clamp(shift_pts[:,:,0], min=-self.max_x,max=self.max_x) + shift_pts[:,:,1] = torch.clamp(shift_pts[:,:,1], min=-self.max_y,max=self.max_y) + + if not is_poly: + padding = torch.full([shift_num*2-shift_pts.shape[0],pts_num,2], self.padding_value) + shift_pts = torch.cat([shift_pts,padding],dim=0) + # padding = np.zeros((self.num_samples - len(sampled_points), 2)) + # sampled_points = np.concatenate([sampled_points, padding], axis=0) + instances_list.append(shift_pts) + instances_tensor = torch.stack(instances_list, dim=0) + instances_tensor = instances_tensor.to( + dtype=torch.float32) + return instances_tensor + + @property + def shift_fixed_num_sampled_points_torch(self): + """ + return [instances_num, num_shifts, fixed_num, 2] + """ + fixed_num_sampled_points = self.fixed_num_sampled_points_torch + instances_list = [] + is_poly = False + # is_line = False + # import pdb;pdb.set_trace() + for fixed_num_pts in fixed_num_sampled_points: + # [fixed_num, 2] + is_poly = fixed_num_pts[0].equal(fixed_num_pts[-1]) + fixed_num = fixed_num_pts.shape[0] + shift_pts_list = [] + if is_poly: + # import pdb;pdb.set_trace() + for shift_right_i in range(fixed_num): + shift_pts_list.append(fixed_num_pts.roll(shift_right_i,0)) + else: + shift_pts_list.append(fixed_num_pts) + shift_pts_list.append(fixed_num_pts.flip(0)) + shift_pts = torch.stack(shift_pts_list,dim=0) + + shift_pts[:,:,0] = torch.clamp(shift_pts[:,:,0], min=-self.max_x,max=self.max_x) + shift_pts[:,:,1] = torch.clamp(shift_pts[:,:,1], min=-self.max_y,max=self.max_y) + + if not is_poly: + padding = torch.full([fixed_num-shift_pts.shape[0],fixed_num,2], self.padding_value) + shift_pts = torch.cat([shift_pts,padding],dim=0) + # padding = np.zeros((self.num_samples - len(sampled_points), 2)) + # sampled_points = np.concatenate([sampled_points, padding], axis=0) + instances_list.append(shift_pts) + instances_tensor = torch.stack(instances_list, dim=0) + instances_tensor = instances_tensor.to( + dtype=torch.float32) + return instances_tensor \ No newline at end of file diff --git a/mmcv/datasets/map_utils/tpfp.py b/mmcv/datasets/map_utils/tpfp.py new file mode 100644 index 0000000..a40ea1d --- /dev/null +++ b/mmcv/datasets/map_utils/tpfp.py @@ -0,0 +1,363 @@ +import mmcv +import numpy as np + +from mmcv.core.evaluation.bbox_overlaps import bbox_overlaps +from .tpfp_chamfer import vec_iou, convex_iou, rbbox_iou, polyline_score, custom_polyline_score +from shapely.geometry import LineString, Polygon +# from vecmapnet_ops.ops.iou import convex_iou + +def tpfp_bbox(det_bboxes, + gt_bboxes, + gt_bbox_masks, + threshold=0.5): + """Check if detected bboxes are true positive or false positive. + + Args: + det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5). + gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4). + gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image, + of shape (k, 4). Default: None + iou_thr (float): IoU threshold to be considered as matched. + Default: 0.5. + use_legacy_coordinate (bool): Whether to use coordinate system in + mmdet v1.x. which means width, height should be + calculated as 'x2 - x1 + 1` and 'y2 - y1 + 1' respectively. + Default: False. + + Returns: + tuple[np.ndarray]: (tp, fp) whose elements are 0 and 1. The shape of + each array is (num_scales, m). + """ + + num_dets = len(det_bboxes) + num_gts = len(gt_bboxes) + + # tp and fp + tp = np.zeros((num_dets), dtype=np.float32) + fp = np.zeros((num_dets), dtype=np.float32) + + # if there is no gt bboxes in this image, then all det bboxes + # within area range are false positives + # XXX + if num_gts == 0: + fp[...] = 1 + return tp, fp + + if num_dets == 0: + return tp, fp + + # # distance matrix: n x m + bbox_p = det_bboxes[:, :-1].reshape(num_dets,-1,2) + bbox_g = gt_bboxes.reshape(num_gts,-1,2) + bbox_gm = gt_bbox_masks.reshape(num_gts,-1,2) + matrix = convex_iou(bbox_p,bbox_g,bbox_gm) + + # for each det, the max iou with all gts + matrix_max = matrix.max(axis=1) + # for each det, which gt overlaps most with it + matrix_argmax = matrix.argmax(axis=1) + # sort all dets in descending order by scores + sort_inds = np.argsort(-det_bboxes[:, -1]) + + gt_covered = np.zeros(num_gts, dtype=bool) + + # tp = 0 and fp = 0 means ignore this detected bbox, + for i in sort_inds: + if matrix_max[i] >= threshold: + matched_gt = matrix_argmax[i] + if not gt_covered[matched_gt]: + gt_covered[matched_gt] = True + tp[i] = 1 + else: + fp[i] = 1 + else: + fp[i] = 1 + + return tp, fp + + +def tpfp_rbbox(det_bboxes, + gt_bboxes, + gt_bbox_masks, + threshold=0.5): + """Check if detected bboxes are true positive or false positive. + + Args: + det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5). + gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4). + gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image, + of shape (k, 4). Default: None + iou_thr (float): IoU threshold to be considered as matched. + Default: 0.5. + use_legacy_coordinate (bool): Whether to use coordinate system in + mmdet v1.x. which means width, height should be + calculated as 'x2 - x1 + 1` and 'y2 - y1 + 1' respectively. + Default: False. + + Returns: + tuple[np.ndarray]: (tp, fp) whose elements are 0 and 1. The shape of + each array is (num_scales, m). + """ + + num_dets = len(det_bboxes) + num_gts = len(gt_bboxes) + + # tp and fp + tp = np.zeros((num_dets), dtype=np.float32) + fp = np.zeros((num_dets), dtype=np.float32) + + # if there is no gt bboxes in this image, then all det bboxes + # within area range are false positives + # XXX + if num_gts == 0: + fp[...] = 1 + return tp, fp + + if num_dets == 0: + return tp, fp + + # # distance matrix: n x m + bbox_p = det_bboxes[:, :-1].reshape(num_dets,-1,2) + bbox_g = gt_bboxes.reshape(num_gts,-1,2) + bbox_gm = gt_bbox_masks.reshape(num_gts,-1,2) + matrix = rbbox_iou(bbox_p,bbox_g,bbox_gm) + + # for each det, the max iou with all gts + matrix_max = matrix.max(axis=1) + # for each det, which gt overlaps most with it + matrix_argmax = matrix.argmax(axis=1) + # sort all dets in descending order by scores + sort_inds = np.argsort(-det_bboxes[:, -1]) + + gt_covered = np.zeros(num_gts, dtype=bool) + + # tp = 0 and fp = 0 means ignore this detected bbox, + for i in sort_inds: + if matrix_max[i] >= threshold: + matched_gt = matrix_argmax[i] + if not gt_covered[matched_gt]: + gt_covered[matched_gt] = True + tp[i] = 1 + else: + fp[i] = 1 + else: + fp[i] = 1 + + return tp, fp + + +def tpfp_det(det_bboxes, + gt_bboxes, + threshold=0.5): + """Check if detected bboxes are true positive or false positive. + + Args: + det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5). + gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4). + gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image, + of shape (k, 4). Default: None + iou_thr (float): IoU threshold to be considered as matched. + Default: 0.5. + use_legacy_coordinate (bool): Whether to use coordinate system in + mmdet v1.x. which means width, height should be + calculated as 'x2 - x1 + 1` and 'y2 - y1 + 1' respectively. + Default: False. + + Returns: + tuple[np.ndarray]: (tp, fp) whose elements are 0 and 1. The shape of + each array is (num_scales, m). + """ + + num_dets = det_bboxes.shape[0] + num_gts = gt_bboxes.shape[0] + + # tp and fp + tp = np.zeros((num_dets), dtype=np.float32) + fp = np.zeros((num_dets), dtype=np.float32) + + # if there is no gt bboxes in this image, then all det bboxes + # within area range are false positives + # XXX + if num_gts == 0: + fp[...] = 1 + return tp, fp + + if num_dets == 0: + return tp, fp + + # # distance matrix: n x m + matrix = vec_iou( + det_bboxes[:, :-1].reshape(num_dets,-1,2), + gt_bboxes.reshape(num_gts,-1,2)) + # for each det, the max iou with all gts + matrix_max = matrix.max(axis=1) + # for each det, which gt overlaps most with it + matrix_argmax = matrix.argmax(axis=1) + # sort all dets in descending order by scores + sort_inds = np.argsort(-det_bboxes[:, -1]) + + gt_covered = np.zeros(num_gts, dtype=bool) + + # tp = 0 and fp = 0 means ignore this detected bbox, + for i in sort_inds: + if matrix_max[i] >= threshold: + matched_gt = matrix_argmax[i] + if not gt_covered[matched_gt]: + gt_covered[matched_gt] = True + tp[i] = 1 + else: + fp[i] = 1 + else: + fp[i] = 1 + + return tp, fp + + +def tpfp_gen(gen_lines, + gt_lines, + threshold=0.5, + metric='POR'): + """Check if detected bboxes are true positive or false positive. + + Args: + det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5). + gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4). + gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image, + of shape (k, 4). Default: None + iou_thr (float): IoU threshold to be considered as matched. + Default: 0.5. + use_legacy_coordinate (bool): Whether to use coordinate system in + mmdet v1.x. which means width, height should be + calculated as 'x2 - x1 + 1` and 'y2 - y1 + 1' respectively. + Default: False. + + Returns: + tuple[np.ndarray]: (tp, fp) whose elements are 0 and 1. The shape of + each array is (num_scales, m). + """ + + num_gens = gen_lines.shape[0] + num_gts = gt_lines.shape[0] + + # tp and fp + tp = np.zeros((num_gens), dtype=np.float32) + fp = np.zeros((num_gens), dtype=np.float32) + + # if there is no gt bboxes in this image, then all det bboxes + # within area range are false positives + if num_gts == 0: + fp[...] = 1 + return tp, fp + + if num_gens == 0: + return tp, fp + + gen_scores = gen_lines[:,-1] # n + # distance matrix: n x m + + # matrix = custom_polyline_score( + # gen_lines[:,:-1].reshape(num_gens,-1,2), + # gt_lines.reshape(num_gts,-1,2),linewidth=2.,metric=metric) + + # TODO MAY bug here + matrix = polyline_score( + gen_lines[:,:-1].reshape(num_gens,-1,2), + gt_lines.reshape(num_gts,-1,2),linewidth=2.,metric=metric) + # for each det, the max iou with all gts + matrix_max = matrix.max(axis=1) + # for each det, which gt overlaps most with it + matrix_argmax = matrix.argmax(axis=1) + # sort all dets in descending order by scores + sort_inds = np.argsort(-gen_scores) + + gt_covered = np.zeros(num_gts, dtype=bool) + + # tp = 0 and fp = 0 means ignore this detected bbox, + for i in sort_inds: + if matrix_max[i] >= threshold: + matched_gt = matrix_argmax[i] + if not gt_covered[matched_gt]: + gt_covered[matched_gt] = True + tp[i] = 1 + else: + fp[i] = 1 + else: + fp[i] = 1 + + return tp, fp + + +def custom_tpfp_gen(gen_lines, + gt_lines, + threshold=0.5, + metric='chamfer'): + """Check if detected bboxes are true positive or false positive. + + Args: + det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5). + gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4). + gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image, + of shape (k, 4). Default: None + iou_thr (float): IoU threshold to be considered as matched. + Default: 0.5. + use_legacy_coordinate (bool): Whether to use coordinate system in + mmdet v1.x. which means width, height should be + calculated as 'x2 - x1 + 1` and 'y2 - y1 + 1' respectively. + Default: False. + + Returns: + tuple[np.ndarray]: (tp, fp) whose elements are 0 and 1. The shape of + each array is (num_scales, m). + """ + if metric == 'chamfer': + if threshold >0: + threshold= -threshold + # else: + # raise NotImplementedError + + # import pdb;pdb.set_trace() + num_gens = gen_lines.shape[0] + num_gts = gt_lines.shape[0] + + # tp and fp + tp = np.zeros((num_gens), dtype=np.float32) + fp = np.zeros((num_gens), dtype=np.float32) + + # if there is no gt bboxes in this image, then all det bboxes + # within area range are false positives + if num_gts == 0: + fp[...] = 1 + return tp, fp + + if num_gens == 0: + return tp, fp + + gen_scores = gen_lines[:,-1] # n + # distance matrix: n x m + + matrix = custom_polyline_score( + gen_lines[:,:-1].reshape(num_gens,-1,2), + gt_lines.reshape(num_gts,-1,2),linewidth=2.,metric=metric) + # for each det, the max iou with all gts + matrix_max = matrix.max(axis=1) + # for each det, which gt overlaps most with it + matrix_argmax = matrix.argmax(axis=1) + # sort all dets in descending order by scores + sort_inds = np.argsort(-gen_scores) + + gt_covered = np.zeros(num_gts, dtype=bool) + + # tp = 0 and fp = 0 means ignore this detected bbox, + for i in sort_inds: + if matrix_max[i] >= threshold: + matched_gt = matrix_argmax[i] + if not gt_covered[matched_gt]: + gt_covered[matched_gt] = True + tp[i] = 1 + else: + fp[i] = 1 + else: + fp[i] = 1 + + return tp, fp + diff --git a/mmcv/datasets/map_utils/tpfp_chamfer.py b/mmcv/datasets/map_utils/tpfp_chamfer.py new file mode 100644 index 0000000..db55fdd --- /dev/null +++ b/mmcv/datasets/map_utils/tpfp_chamfer.py @@ -0,0 +1,335 @@ +# from ..chamfer_dist import ChamferDistance +import numpy as np +from shapely.geometry import LineString, Polygon +from shapely.strtree import STRtree +from shapely.geometry import CAP_STYLE, JOIN_STYLE +from scipy.spatial import distance +import similaritymeasures + +# def chamfer_distance(pred_bbox, gt_bbox): + +# cd_dist_func = ChamferDistance.vec_cd_dist( +# pred, pred_mask, tgt, tgt_mask)() + + +def vec_iou(pred_lines, gt_lines): + ''' + each line with 1 meter width + pred_lines: num_preds, npts, 2 + gt_lines: num_gts, npts, 2 + ''' + + num_preds = pred_lines.shape[0] + num_gts = gt_lines.shape[0] + + pred_lines_shapely = \ + [LineString(i).buffer(1., + cap_style=CAP_STYLE.round, join_style=JOIN_STYLE.round) + for i in pred_lines] + gt_lines_shapely =\ + [LineString(i).buffer(1., + cap_style=CAP_STYLE.round, join_style=JOIN_STYLE.round) + for i in gt_lines] + + # construct tree + tree = STRtree(gt_lines_shapely) + index_by_id = dict((id(pt), i) for i, pt in enumerate(gt_lines_shapely)) + + iou_matrix = np.zeros((num_preds, num_gts)) + + for i, pline in enumerate(pred_lines_shapely): + + for o in tree.query(pline): + if o.intersects(pline): + gt_id = index_by_id[id(o)] + + inter = o.intersection(pline).area + union = o.union(pline).area + iou_matrix[i, gt_id] = inter / union + + return iou_matrix + +def convex_iou(pred_lines, gt_lines, gt_mask): + ''' + each line with 1 meter width + pred_lines: num_preds, List [npts, 2] + gt_lines: num_gts, npts, 2 + gt_mask: num_gts, npts, 2 + ''' + + num_preds = len(pred_lines) + num_gts = len(gt_lines) + + pred_lines_shapely = \ + [Polygon(i).convex_hull for i in pred_lines] + gt_lines_shapely =\ + [Polygon(i[m].reshape(-1,2)).convex_hull for i,m in zip(gt_lines,gt_mask)] + + # construct tree + tree = STRtree(pred_lines_shapely) + index_by_id = dict((id(pt), i) for i, pt in enumerate(pred_lines_shapely)) + + iou_matrix = np.zeros((num_preds, num_gts)) + + for i, pline in enumerate(gt_lines_shapely): + + for o in tree.query(pline): + if o.intersects(pline): + pred_id = index_by_id[id(o)] + + inter = o.intersection(pline).area + union = o.union(pline).area + iou_matrix[pred_id, i] = inter / union + + return iou_matrix + +def rbbox_iou(pred_lines, gt_lines, gt_mask): + ''' + each line with 1 meter width + pred_lines: num_preds, List [npts, 2] + gt_lines: num_gts, npts, 2 + gt_mask: num_gts, npts, 2 + ''' + + num_preds = len(pred_lines) + num_gts = len(gt_lines) + + pred_lines_shapely = \ + [Polygon(i).minimum_rotated_rectangle for i in pred_lines] + gt_lines_shapely =\ + [Polygon(i[m].reshape(-1,2)) for i,m in zip(gt_lines,gt_mask)] + + # construct tree + tree = STRtree(pred_lines_shapely) + index_by_id = dict((id(pt), i) for i, pt in enumerate(pred_lines_shapely)) + + iou_matrix = np.zeros((num_preds, num_gts)) + + for i, pline in enumerate(gt_lines_shapely): + + for o in tree.query(pline): + if o.intersects(pline): + pred_id = index_by_id[id(o)] + + inter = o.intersection(pline).area + union = o.union(pline).area + iou_matrix[pred_id, i] = inter / union + + return iou_matrix + + +def polyline_score(pred_lines, gt_lines, linewidth=1., metric='POR'): + ''' + each line with 1 meter width + pred_lines: num_preds, List [npts, 2] + gt_lines: num_gts, npts, 2 + gt_mask: num_gts, npts, 2 + ''' + positive_threshold = 1. + num_preds = len(pred_lines) + num_gts = len(gt_lines) + line_length = pred_lines.shape[1] + + # gt_lines = gt_lines + np.array((1.,1.)) + + pred_lines_shapely = \ + [LineString(i).buffer(linewidth, + cap_style=CAP_STYLE.flat, join_style=JOIN_STYLE.mitre) + for i in pred_lines] + gt_lines_shapely =\ + [LineString(i).buffer(linewidth, + cap_style=CAP_STYLE.flat, join_style=JOIN_STYLE.mitre) + for i in gt_lines] + + # construct tree + tree = STRtree(pred_lines_shapely) + index_by_id = dict((id(pt), i) for i, pt in enumerate(pred_lines_shapely)) + + if metric=='POR': + iou_matrix = np.zeros((num_preds, num_gts),dtype=np.float64) + elif metric=='frechet': + iou_matrix = np.full((num_preds, num_gts), -100.) + elif metric=='chamfer': + iou_matrix = np.full((num_preds, num_gts), -100.) + elif metric=='chamfer_v2': + iou_matrix = np.full((num_preds, num_gts), -100.) + + for i, pline in enumerate(gt_lines_shapely): + + for o in tree.query(pline): + if o.intersects(pline): + pred_id = index_by_id[id(o)] + + if metric=='POR': + dist_mat = distance.cdist( + pred_lines[pred_id], gt_lines[i], 'euclidean') + + valid_ab = (dist_mat.min(-1) < positive_threshold).sum() + valid_ba = (dist_mat.min(-2) < positive_threshold).sum() + + iou_matrix[pred_id, i] = min(valid_ba,valid_ab) / line_length + # iou_matrix[pred_id, i] = ((valid_ba+valid_ab)/2) / line_length + # assert iou_matrix[pred_id, i] <= 1. and iou_matrix[pred_id, i] >= 0. + elif metric=='frechet': + fdistance_1 = \ + -similaritymeasures.frechet_dist(pred_lines[pred_id], gt_lines[i]) + fdistance_2 = \ + -similaritymeasures.frechet_dist(pred_lines[pred_id][::-1], gt_lines[i]) + fdistance = max(fdistance_1,fdistance_2) + iou_matrix[pred_id, i] = fdistance + + elif metric=='chamfer': + dist_mat = distance.cdist( + pred_lines[pred_id], gt_lines[i], 'euclidean') + + valid_ab = dist_mat.min(-1).sum() + valid_ba = dist_mat.min(-2).sum() + + iou_matrix[pred_id, i] = -(valid_ba+valid_ab)/(2*line_length) + # if iou_matrix[pred_id, i] == 0: + # import ipdb; ipdb.set_trace() + elif metric=='chamfer_v2': + dist_mat = distance.cdist( + pred_lines[pred_id], gt_lines[i], 'euclidean') + + valid_ab = dist_mat.min(-1).sum() + valid_ba = dist_mat.min(-2).sum() + + iou_matrix[pred_id, i] = -(valid_ba/pred_lines[pred_id].shape[0] + +valid_ab/gt_lines[i].shape[0])/2 + # if iou_matrix[pred_id, i] == 0: + # import ipdb; ipdb.set_trace() + + + # if True: + # import matplotlib.pyplot as plt + # print('pred num', num_preds) + # print('gt num', num_gts) + # for i in range(num_preds): + # plt.plot(pred_lines[i][:,0],pred_lines[i][:,1],'-',color='red',alpha=0.5) + # for i in range(num_gts): + # plt.plot(gt_lines[i][:,0],gt_lines[i][:,1],'-',color='blue',alpha=0.5) + # plt.savefig('test.png') + # plt.close() + return iou_matrix + + +def custom_polyline_score(pred_lines, gt_lines, linewidth=1., metric='chamfer'): + ''' + each line with 1 meter width + pred_lines: num_preds, List [npts, 2] + gt_lines: num_gts, npts, 2 + gt_mask: num_gts, npts, 2 + ''' + if metric == 'iou': + linewidth = 1.0 + positive_threshold = 1. + num_preds = len(pred_lines) + num_gts = len(gt_lines) + line_length = pred_lines.shape[1] + + # gt_lines = gt_lines + np.array((1.,1.)) + + pred_lines_shapely = \ + [LineString(i).buffer(linewidth, + cap_style=CAP_STYLE.flat, join_style=JOIN_STYLE.mitre) + for i in pred_lines] + gt_lines_shapely =\ + [LineString(i).buffer(linewidth, + cap_style=CAP_STYLE.flat, join_style=JOIN_STYLE.mitre) + for i in gt_lines] + + # construct tree + tree = STRtree(pred_lines_shapely) + index_by_id = dict((id(pt), i) for i, pt in enumerate(pred_lines_shapely)) + + + if metric=='chamfer': + iou_matrix = np.full((num_preds, num_gts), -100.) + elif metric=='iou': + iou_matrix = np.zeros((num_preds, num_gts),dtype=np.float64) + else: + raise NotImplementedError + + for i, pline in enumerate(gt_lines_shapely): + + for o in tree.query(pline): + if o.intersects(pline): + pred_id = index_by_id[id(o)] + + if metric=='chamfer': + dist_mat = distance.cdist( + pred_lines[pred_id], gt_lines[i], 'euclidean') + # import pdb;pdb.set_trace() + valid_ab = dist_mat.min(-1).mean() + valid_ba = dist_mat.min(-2).mean() + + iou_matrix[pred_id, i] = -(valid_ba+valid_ab)/2 + elif metric=='iou': + inter = o.intersection(pline).area + union = o.union(pline).area + iou_matrix[pred_id, i] = inter / union + + return iou_matrix + +if __name__ == '__main__': + import torch + + line1 = torch.tensor([ + [1, 5], [3, 5], [5, 5] + ]) + + line0 = torch.tensor([ + [3, 6], [4, 8], [5, 6] + ]) + + line2 = torch.tensor([ + [1, 4], [3, 4], [5, 4] + ]) + + line3 = torch.tensor([ + [4, 4], [3, 3], [5, 3] + ]) + + gt = torch.stack((line2, line3), dim=0).type(torch.float32) + pred = torch.stack((line0, line1), dim=0).type(torch.float32) + + # import ipdb; ipdb.set_trace() + import mmcv + # with mmcv.Timer(): + # gt = upsampler(gt, pts=10) + # pred = upsampler(pred, pts=10) + + import matplotlib.pyplot as plt + from shapely.geometry import LineString + from descartes import PolygonPatch + + iou_matrix = vec_iou(pred,gt) + print(iou_matrix) + # import pdb;pdb.set_trace() + score_matrix = custom_polyline_score(pred, gt, linewidth=1., metric='chamfer') + print(score_matrix) + fig, ax = plt.subplots() + for i in gt: + i = i.numpy() + plt.plot(i[:, 0], i[:, 1], 'o', color='red') + plt.plot(i[:, 0], i[:, 1], '-', color='red') + + dilated = LineString(i).buffer(1, cap_style=CAP_STYLE.round, join_style=JOIN_STYLE.round) + patch1 = PolygonPatch(dilated, fc='red', ec='red', alpha=0.5, zorder=-1) + ax.add_patch(patch1) + + for i in pred: + i = i.numpy() + plt.plot(i[:, 0], i[:, 1], 'o', color='blue') + plt.plot(i[:, 0], i[:, 1], '-', color='blue') + + dilated = LineString(i).buffer(1, cap_style=CAP_STYLE.flat, join_style=JOIN_STYLE.mitre) + patch1 = PolygonPatch(dilated, fc='blue', ec='blue', alpha=0.5, zorder=-1) + ax.add_patch(patch1) + + + ax.axis('equal') + + + plt.savefig('test3.png') \ No newline at end of file diff --git a/mmcv/datasets/nuscenes_dataset.py b/mmcv/datasets/nuscenes_dataset.py new file mode 100644 index 0000000..e9c76e0 --- /dev/null +++ b/mmcv/datasets/nuscenes_dataset.py @@ -0,0 +1,658 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import pyquaternion +import tempfile +from nuscenes.utils.data_classes import Box as NuScenesBox +from os import path as osp + +from mmcv.datasets import DATASETS +from mmcv.fileio.io import load, dump +from mmcv.utils import track_iter_progress, mkdir_or_exist +from mmcv.core import show_result +from mmcv.core.bbox.structures.box_3d_mode import Box3DMode +from mmcv.core.bbox.structures.coord_3d_mode import Coord3DMode +from mmcv.core.bbox.structures.lidar_box3d import LiDARInstance3DBoxes +from .custom_3d import Custom3DDataset +from .pipelines import Compose + + +@DATASETS.register_module() +class NuScenesDataset(Custom3DDataset): + r"""NuScenes Dataset. + + This class serves as the API for experiments on the NuScenes Dataset. + + Please refer to `NuScenes Dataset `_ + for data downloading. + + Args: + ann_file (str): Path of annotation file. + pipeline (list[dict], optional): Pipeline used for data processing. + Defaults to None. + data_root (str): Path of dataset root. + classes (tuple[str], optional): Classes used in the dataset. + Defaults to None. + load_interval (int, optional): Interval of loading the dataset. It is + used to uniformly sample the dataset. Defaults to 1. + with_velocity (bool, optional): Whether include velocity prediction + into the experiments. Defaults to True. + modality (dict, optional): Modality to specify the sensor data used + as input. Defaults to None. + box_type_3d (str, optional): Type of 3D box of this dataset. + Based on the `box_type_3d`, the dataset will encapsulate the box + to its original format then converted them to `box_type_3d`. + Defaults to 'LiDAR' in this dataset. Available options includes. + - 'LiDAR': Box in LiDAR coordinates. + - 'Depth': Box in depth coordinates, usually for indoor dataset. + - 'Camera': Box in camera coordinates. + filter_empty_gt (bool, optional): Whether to filter empty GT. + Defaults to True. + test_mode (bool, optional): Whether the dataset is in test mode. + Defaults to False. + eval_version (bool, optional): Configuration version of evaluation. + Defaults to 'detection_cvpr_2019'. + use_valid_flag (bool): Whether to use `use_valid_flag` key in the info + file as mask to filter gt_boxes and gt_names. Defaults to False. + """ + NameMapping = { + 'movable_object.barrier': 'barrier', + 'vehicle.bicycle': 'bicycle', + 'vehicle.bus.bendy': 'bus', + 'vehicle.bus.rigid': 'bus', + 'vehicle.car': 'car', + 'vehicle.construction': 'construction_vehicle', + 'vehicle.motorcycle': 'motorcycle', + 'human.pedestrian.adult': 'pedestrian', + 'human.pedestrian.child': 'pedestrian', + 'human.pedestrian.construction_worker': 'pedestrian', + 'human.pedestrian.police_officer': 'pedestrian', + 'movable_object.trafficcone': 'traffic_cone', + 'vehicle.trailer': 'trailer', + 'vehicle.truck': 'truck' + } + DefaultAttribute = { + 'car': 'vehicle.parked', + 'pedestrian': 'pedestrian.moving', + 'trailer': 'vehicle.parked', + 'truck': 'vehicle.parked', + 'bus': 'vehicle.moving', + 'motorcycle': 'cycle.without_rider', + 'construction_vehicle': 'vehicle.parked', + 'bicycle': 'cycle.without_rider', + 'barrier': '', + 'traffic_cone': '', + } + AttrMapping = { + 'cycle.with_rider': 0, + 'cycle.without_rider': 1, + 'pedestrian.moving': 2, + 'pedestrian.standing': 3, + 'pedestrian.sitting_lying_down': 4, + 'vehicle.moving': 5, + 'vehicle.parked': 6, + 'vehicle.stopped': 7, + } + AttrMapping_rev = [ + 'cycle.with_rider', + 'cycle.without_rider', + 'pedestrian.moving', + 'pedestrian.standing', + 'pedestrian.sitting_lying_down', + 'vehicle.moving', + 'vehicle.parked', + 'vehicle.stopped', + ] + # https://github.com/nutonomy/nuscenes-devkit/blob/57889ff20678577025326cfc24e57424a829be0a/python-sdk/nuscenes/eval/detection/evaluate.py#L222 # noqa + ErrNameMapping = { + 'trans_err': 'mATE', + 'scale_err': 'mASE', + 'orient_err': 'mAOE', + 'vel_err': 'mAVE', + 'attr_err': 'mAAE' + } + CLASSES = ('car', 'truck', 'trailer', 'bus', 'construction_vehicle', + 'bicycle', 'motorcycle', 'pedestrian', 'traffic_cone', + 'barrier') + + def __init__(self, + ann_file, + pipeline=None, + data_root=None, + classes=None, + load_interval=1, + with_velocity=True, + modality=None, + box_type_3d='LiDAR', + filter_empty_gt=True, + test_mode=False, + eval_version='detection_cvpr_2019', + use_valid_flag=False): + self.load_interval = load_interval + self.use_valid_flag = use_valid_flag + super().__init__( + data_root=data_root, + ann_file=ann_file, + pipeline=pipeline, + classes=classes, + modality=modality, + box_type_3d=box_type_3d, + filter_empty_gt=filter_empty_gt, + test_mode=test_mode) + + self.with_velocity = with_velocity + self.eval_version = eval_version + from nuscenes.eval.detection.config import config_factory + self.eval_detection_configs = config_factory(self.eval_version) + # self.eval_detection_configs.class_names = list(self.eval_detection_configs.class_names) + if self.modality is None: + self.modality = dict( + use_camera=False, + use_lidar=True, + use_radar=False, + use_map=False, + use_external=False, + ) + + def get_cat_ids(self, idx): + """Get category distribution of single scene. + + Args: + idx (int): Index of the data_info. + + Returns: + dict[list]: for each category, if the current scene + contains such boxes, store a list containing idx, + otherwise, store empty list. + """ + info = self.data_infos[idx] + if self.use_valid_flag: + mask = info['valid_flag'] + gt_names = set(info['gt_names'][mask]) + else: + gt_names = set(info['gt_names']) + + cat_ids = [] + for name in gt_names: + if name in self.CLASSES: + cat_ids.append(self.cat2id[name]) + return cat_ids + + def load_annotations(self, ann_file): + """Load annotations from ann_file. + + Args: + ann_file (str): Path of the annotation file. + + Returns: + list[dict]: List of annotations sorted by timestamps. + """ + data = load(ann_file) + data_infos = list(sorted(data['infos'], key=lambda e: e['timestamp'])) + data_infos = data_infos[::self.load_interval] + self.metadata = data['metadata'] + self.version = self.metadata['version'] + return data_infos + + def get_data_info(self, index): + """Get data info according to the given index. + + Args: + index (int): Index of the sample data to get. + + Returns: + dict: Data information that will be passed to the data \ + preprocessing pipelines. It includes the following keys: + + - sample_idx (str): Sample index. + - pts_filename (str): Filename of point clouds. + - sweeps (list[dict]): Infos of sweeps. + - timestamp (float): Sample timestamp. + - img_filename (str, optional): Image filename. + - lidar2img (list[np.ndarray], optional): Transformations \ + from lidar to different cameras. + - ann_info (dict): Annotation info. + """ + info = self.data_infos[index] + # standard protocal modified from SECOND.Pytorch + input_dict = dict( + sample_idx=info['token'], + pts_filename=info['lidar_path'], + sweeps=info['sweeps'], + timestamp=info['timestamp'] / 1e6, + ) + + if self.modality['use_camera']: + image_paths = [] + lidar2img_rts = [] + for cam_type, cam_info in info['cams'].items(): + image_paths.append(cam_info['data_path']) + # obtain lidar to image transformation matrix + lidar2cam_r = np.linalg.inv(cam_info['sensor2lidar_rotation']) + lidar2cam_t = cam_info[ + 'sensor2lidar_translation'] @ lidar2cam_r.T + lidar2cam_rt = np.eye(4) + lidar2cam_rt[:3, :3] = lidar2cam_r.T + lidar2cam_rt[3, :3] = -lidar2cam_t + intrinsic = cam_info['cam_intrinsic'] + viewpad = np.eye(4) + viewpad[:intrinsic.shape[0], :intrinsic.shape[1]] = intrinsic + lidar2img_rt = (viewpad @ lidar2cam_rt.T) + lidar2img_rts.append(lidar2img_rt) + + input_dict.update( + dict( + img_filename=image_paths, + lidar2img=lidar2img_rts, + )) + + if not self.test_mode: + annos = self.get_ann_info(index) + input_dict['ann_info'] = annos + + return input_dict + + def get_ann_info(self, index): + """Get annotation info according to the given index. + + Args: + index (int): Index of the annotation data to get. + + Returns: + dict: Annotation information consists of the following keys: + + - gt_bboxes_3d (:obj:`LiDARInstance3DBoxes`): \ + 3D ground truth bboxes + - gt_labels_3d (np.ndarray): Labels of ground truths. + - gt_names (list[str]): Class names of ground truths. + """ + info = self.data_infos[index] + # filter out bbox containing no points + if self.use_valid_flag: + mask = info['valid_flag'] + else: + mask = info['num_lidar_pts'] > 0 + gt_bboxes_3d = info['gt_boxes'][mask] + gt_names_3d = info['gt_names'][mask] + gt_labels_3d = [] + for cat in gt_names_3d: + if cat in self.CLASSES: + gt_labels_3d.append(self.CLASSES.index(cat)) + else: + gt_labels_3d.append(-1) + gt_labels_3d = np.array(gt_labels_3d) + + if self.with_velocity: + gt_velocity = info['gt_velocity'][mask] + nan_mask = np.isnan(gt_velocity[:, 0]) + gt_velocity[nan_mask] = [0.0, 0.0] + gt_bboxes_3d = np.concatenate([gt_bboxes_3d, gt_velocity], axis=-1) + + # the nuscenes box center is [0.5, 0.5, 0.5], we change it to be + # the same as KITTI (0.5, 0.5, 0) + gt_bboxes_3d = LiDARInstance3DBoxes( + gt_bboxes_3d, + box_dim=gt_bboxes_3d.shape[-1], + origin=(0.5, 0.5, 0.5)).convert_to(self.box_mode_3d) + + anns_results = dict( + gt_bboxes_3d=gt_bboxes_3d, + gt_labels_3d=gt_labels_3d, + gt_names=gt_names_3d) + return anns_results + + def _format_bbox(self, results, jsonfile_prefix=None): + """Convert the results to the standard format. + + Args: + results (list[dict]): Testing results of the dataset. + jsonfile_prefix (str): The prefix of the output jsonfile. + You can specify the output directory/filename by + modifying the jsonfile_prefix. Default: None. + + Returns: + str: Path of the output json file. + """ + + # import pdb + # pdb.set_trace() + nusc_annos = {} + mapped_class_names = self.CLASSES + + print('Start to convert detection format...') + for sample_id, det in enumerate(track_iter_progress(results)): + annos = [] + boxes = output_to_nusc_box(det) + sample_token = self.data_infos[sample_id]['token'] + boxes = lidar_nusc_box_to_global(self.data_infos[sample_id], boxes, + mapped_class_names, + self.eval_detection_configs, + self.eval_version) + for i, box in enumerate(boxes): + name = mapped_class_names[box.label] + if np.sqrt(box.velocity[0]**2 + box.velocity[1]**2) > 0.2: + if name in [ + 'car', + 'construction_vehicle', + 'bus', + 'truck', + 'trailer', + ]: + attr = 'vehicle.moving' + elif name in ['bicycle', 'motorcycle']: + attr = 'cycle.with_rider' + else: + attr = NuScenesDataset.DefaultAttribute[name] + else: + if name in ['pedestrian']: + attr = 'pedestrian.standing' + elif name in ['bus']: + attr = 'vehicle.stopped' + else: + attr = NuScenesDataset.DefaultAttribute[name] + + nusc_anno = dict( + sample_token=sample_token, + translation=box.center.tolist(), + size=box.wlh.tolist(), + rotation=box.orientation.elements.tolist(), + velocity=box.velocity[:2].tolist(), + detection_name=name, + detection_score=box.score, + attribute_name=attr) + annos.append(nusc_anno) + nusc_annos[sample_token] = annos + nusc_submissions = { + 'meta': self.modality, + 'results': nusc_annos, + } + + #pdb.set_trace() + + mkdir_or_exist(jsonfile_prefix) + res_path = osp.join(jsonfile_prefix, 'results_nusc.json') + print('Results writes to', res_path) + dump(nusc_submissions, res_path) + return res_path + + def _evaluate_single(self, + result_path, + logger=None, + metric='bbox', + result_name='pts_bbox'): + """Evaluation for a single model in nuScenes protocol. + + Args: + result_path (str): Path of the result file. + logger (logging.Logger | str | None): Logger used for printing + related information during evaluation. Default: None. + metric (str): Metric name used for evaluation. Default: 'bbox'. + result_name (str): Result name in the metric prefix. + Default: 'pts_bbox'. + + Returns: + dict: Dictionary of evaluation details. + """ + from nuscenes import NuScenes + from nuscenes.eval.detection.evaluate import NuScenesEval + + output_dir = osp.join(*osp.split(result_path)[:-1]) + nusc = NuScenes( + version=self.version, dataroot=self.data_root, verbose=False) + eval_set_map = { + 'v1.0-mini': 'mini_val', + 'v1.0-trainval': 'val', + } + nusc_eval = NuScenesEval( + nusc, + config=self.eval_detection_configs, + result_path=result_path, + eval_set=eval_set_map[self.version], + output_dir=output_dir, + verbose=False) + nusc_eval.main(render_curves=False) + + # record metrics + metrics = load(osp.join(output_dir, 'metrics_summary.json')) + detail = dict() + metric_prefix = f'{result_name}_NuScenes' + for name in self.CLASSES: + for k, v in metrics['label_aps'][name].items(): + val = float('{:.4f}'.format(v)) + detail['{}/{}_AP_dist_{}'.format(metric_prefix, name, k)] = val + for k, v in metrics['label_tp_errors'][name].items(): + val = float('{:.4f}'.format(v)) + detail['{}/{}_{}'.format(metric_prefix, name, k)] = val + for k, v in metrics['tp_errors'].items(): + val = float('{:.4f}'.format(v)) + detail['{}/{}'.format(metric_prefix, + self.ErrNameMapping[k])] = val + + detail['{}/NDS'.format(metric_prefix)] = metrics['nd_score'] + detail['{}/mAP'.format(metric_prefix)] = metrics['mean_ap'] + return detail + + def format_results(self, results, jsonfile_prefix=None): + """Format the results to json (standard format for COCO evaluation). + + Args: + results (list[dict]): Testing results of the dataset. + jsonfile_prefix (str | None): The prefix of json files. It includes + the file path and the prefix of filename, e.g., "a/b/prefix". + If not specified, a temp file will be created. Default: None. + + Returns: + tuple: Returns (result_files, tmp_dir), where `result_files` is a \ + dict containing the json filepaths, `tmp_dir` is the temporal \ + directory created for saving json files when \ + `jsonfile_prefix` is not specified. + """ + assert isinstance(results, list), 'results must be a list' + assert len(results) == len(self), ( + 'The length of results is not equal to the dataset len: {} != {}'. + format(len(results), len(self))) + + if jsonfile_prefix is None: + tmp_dir = tempfile.TemporaryDirectory() + jsonfile_prefix = osp.join(tmp_dir.name, 'results') + else: + tmp_dir = None + + # currently the output prediction results could be in two formats + # 1. list of dict('boxes_3d': ..., 'scores_3d': ..., 'labels_3d': ...) + # 2. list of dict('pts_bbox' or 'img_bbox': + # dict('boxes_3d': ..., 'scores_3d': ..., 'labels_3d': ...)) + # this is a workaround to enable evaluation of both formats on nuScenes + # refer to https://github.com/open-mmlab/mmcvection3d/issues/449 + if not ('pts_bbox' in results[0] or 'img_bbox' in results[0]): + result_files = self._format_bbox(results, jsonfile_prefix) + else: + # should take the inner dict out of 'pts_bbox' or 'img_bbox' dict + result_files = dict() + for name in results[0]: + print(f'\nFormating bboxes of {name}') + results_ = [out[name] for out in results] + tmp_file_ = osp.join(jsonfile_prefix, name) + result_files.update( + {name: self._format_bbox(results_, tmp_file_)}) + return result_files, tmp_dir + + def evaluate(self, + results, + metric='bbox', + logger=None, + jsonfile_prefix=None, + result_names=['pts_bbox'], + show=False, + out_dir=None, + pipeline=None): + """Evaluation in nuScenes protocol. + + Args: + results (list[dict]): Testing results of the dataset. + metric (str | list[str]): Metrics to be evaluated. + logger (logging.Logger | str | None): Logger used for printing + related information during evaluation. Default: None. + jsonfile_prefix (str | None): The prefix of json files. It includes + the file path and the prefix of filename, e.g., "a/b/prefix". + If not specified, a temp file will be created. Default: None. + show (bool): Whether to visualize. + Default: False. + out_dir (str): Path to save the visualization results. + Default: None. + pipeline (list[dict], optional): raw data loading for showing. + Default: None. + + Returns: + dict[str, float]: Results of each evaluation metric. + """ + result_files, tmp_dir = self.format_results(results, jsonfile_prefix) + + if isinstance(result_files, dict): + results_dict = dict() + for name in result_names: + print('Evaluating bboxes of {}'.format(name)) + ret_dict = self._evaluate_single(result_files[name]) + results_dict.update(ret_dict) + elif isinstance(result_files, str): + results_dict = self._evaluate_single(result_files) + + if tmp_dir is not None: + tmp_dir.cleanup() + + if show: + self.show(results, out_dir, pipeline=pipeline) + return results_dict + + def _build_default_pipeline(self): + """Build the default pipeline for this dataset.""" + pipeline = [ + dict( + type='LoadPointsFromFile', + coord_type='LIDAR', + load_dim=5, + use_dim=5, + file_client_args=dict(backend='disk')), + dict( + type='LoadPointsFromMultiSweeps', + sweeps_num=10, + file_client_args=dict(backend='disk')), + dict( + type='DefaultFormatBundle3D', + class_names=self.CLASSES, + with_label=False), + dict(type='Collect3D', keys=['points']) + ] + return Compose(pipeline) + + def show(self, results, out_dir, show=True, pipeline=None): + """Results visualization. + + Args: + results (list[dict]): List of bounding boxes results. + out_dir (str): Output directory of visualization result. + show (bool): Visualize the results online. + pipeline (list[dict], optional): raw data loading for showing. + Default: None. + """ + assert out_dir is not None, 'Expect out_dir, got none.' + pipeline = self._get_pipeline(pipeline) + for i, result in enumerate(results): + if 'pts_bbox' in result.keys(): + result = result['pts_bbox'] + data_info = self.data_infos[i] + pts_path = data_info['lidar_path'] + file_name = osp.split(pts_path)[-1].split('.')[0] + points = self._extract_data(i, pipeline, 'points').numpy() + # for now we convert points into depth mode + points = Coord3DMode.convert_point(points, Coord3DMode.LIDAR, + Coord3DMode.DEPTH) + inds = result['scores_3d'] > 0.1 + gt_bboxes = self.get_ann_info(i)['gt_bboxes_3d'].tensor.numpy() + show_gt_bboxes = Box3DMode.convert(gt_bboxes, Box3DMode.LIDAR, + Box3DMode.DEPTH) + pred_bboxes = result['boxes_3d'][inds].tensor.numpy() + show_pred_bboxes = Box3DMode.convert(pred_bboxes, Box3DMode.LIDAR, + Box3DMode.DEPTH) + show_result(points, show_gt_bboxes, show_pred_bboxes, out_dir, + file_name, show) + + +def output_to_nusc_box(detection): + """Convert the output to the box class in the nuScenes. + + Args: + detection (dict): Detection results. + + - boxes_3d (:obj:`BaseInstance3DBoxes`): Detection bbox. + - scores_3d (torch.Tensor): Detection scores. + - labels_3d (torch.Tensor): Predicted box labels. + + Returns: + list[:obj:`NuScenesBox`]: List of standard NuScenesBoxes. + """ + box3d = detection['boxes_3d'] + scores = detection['scores_3d'].numpy() + labels = detection['labels_3d'].numpy() + + box_gravity_center = box3d.gravity_center.numpy() + box_dims = box3d.dims.numpy() + box_yaw = box3d.yaw.numpy() + # TODO: check whether this is necessary + # with dir_offset & dir_limit in the head + box_yaw = -box_yaw - np.pi / 2 + + box_list = [] + for i in range(len(box3d)): + quat = pyquaternion.Quaternion(axis=[0, 0, 1], radians=box_yaw[i]) + velocity = (*box3d.tensor[i, 7:9], 0.0) + # velo_val = np.linalg.norm(box3d[i, 7:9]) + # velo_ori = box3d[i, 6] + # velocity = ( + # velo_val * np.cos(velo_ori), velo_val * np.sin(velo_ori), 0.0) + box = NuScenesBox( + box_gravity_center[i], + box_dims[i], + quat, + label=labels[i], + score=scores[i], + velocity=velocity) + box_list.append(box) + return box_list + + +def lidar_nusc_box_to_global(info, + boxes, + classes, + eval_configs, + eval_version='detection_cvpr_2019'): + """Convert the box from ego to global coordinate. + + Args: + info (dict): Info for a specific sample data, including the + calibration information. + boxes (list[:obj:`NuScenesBox`]): List of predicted NuScenesBoxes. + classes (list[str]): Mapped classes in the evaluation. + eval_configs (object): Evaluation configuration object. + eval_version (str): Evaluation version. + Default: 'detection_cvpr_2019' + + Returns: + list: List of standard NuScenesBoxes in the global + coordinate. + """ + box_list = [] + for box in boxes: + # Move box to ego vehicle coord system + box.rotate(pyquaternion.Quaternion(info['lidar2ego_rotation'])) + box.translate(np.array(info['lidar2ego_translation'])) + # filter det in ego. + cls_range_map = eval_configs.class_range + radius = np.linalg.norm(box.center[:2], 2) + det_range = cls_range_map[classes[box.label]] + if radius > det_range: + continue + # Move box to global coord system + box.rotate(pyquaternion.Quaternion(info['ego2global_rotation'])) + box.translate(np.array(info['ego2global_translation'])) + box_list.append(box) + return box_list diff --git a/mmcv/datasets/nuscenes_e2e_dataset.py b/mmcv/datasets/nuscenes_e2e_dataset.py new file mode 100644 index 0000000..38b3ffc --- /dev/null +++ b/mmcv/datasets/nuscenes_e2e_dataset.py @@ -0,0 +1,1247 @@ +#---------------------------------------------------------------------------------# +# UniAD: Planning-oriented Autonomous Driving (https://arxiv.org/abs/2212.10156) # +# Source code: https://github.com/OpenDriveLab/UniAD # +# Copyright (c) OpenDriveLab. All rights reserved. # +#---------------------------------------------------------------------------------# + +import copy +import numpy as np +import torch +from mmcv.datasets import DATASETS +from mmcv.datasets.pipelines import to_tensor +from mmcv.datasets import NuScenesDataset +from mmcv.core.bbox.structures.lidar_box3d import LiDARInstance3DBoxes +from mmcv.fileio.file_client import FileClient +from mmcv.fileio.io import load, dump +from mmcv.utils import track_iter_progress, mkdir_or_exist +from os import path as osp +from nuscenes.eval.common.utils import quaternion_yaw, Quaternion +from .eval_utils.nuscenes_eval import NuScenesEval_custom +from nuscenes.eval.tracking.evaluate import TrackingEval +from .eval_utils.nuscenes_eval_motion import MotionEval +from nuscenes.eval.common.config import config_factory +import tempfile +from mmcv.parallel import DataContainer as DC +import random +import pickle +from prettytable import PrettyTable + +from nuscenes import NuScenes +from mmcv.datasets.data_utils.vector_map import VectorizedLocalMap +from mmcv.datasets.data_utils.rasterize import preprocess_map +from mmcv.datasets.eval_utils.map_api import NuScenesMap +from mmcv.datasets.data_utils.trajectory_api import NuScenesTraj +from .data_utils.data_utils import lidar_nusc_box_to_global, obtain_map_info, output_to_nusc_box, output_to_nusc_box_det +from nuscenes.prediction import convert_local_coords_to_global + + +@DATASETS.register_module() +class NuScenesE2EDataset(NuScenesDataset): + r"""NuScenes E2E Dataset. + + This dataset only add camera intrinsics and extrinsics to the results. + """ + + def __init__(self, + queue_length=4, + bev_size=(200, 200), + patch_size=(102.4, 102.4), + canvas_size=(200, 200), + overlap_test=False, + predict_steps=12, + planning_steps=6, + past_steps=4, + fut_steps=4, + use_nonlinear_optimizer=False, + lane_ann_file=None, + eval_mod=None, + + # For debug + is_debug=False, + len_debug=30, + + # Occ dataset + enbale_temporal_aug=False, + occ_receptive_field=3, + occ_n_future=4, + occ_filter_invalid_sample=False, + occ_filter_by_valid_flag=False, + + file_client_args=dict(backend='disk'), + *args, + **kwargs): + # init before super init since it is called in parent class + self.file_client_args = file_client_args + self.file_client = FileClient(**file_client_args) + + self.is_debug = is_debug + self.len_debug = len_debug + super().__init__(*args, **kwargs) + self.queue_length = queue_length + self.overlap_test = overlap_test + self.bev_size = bev_size + self.predict_steps = predict_steps + self.planning_steps = planning_steps + self.past_steps = past_steps + self.fut_steps = fut_steps + self.scene_token = None + self.lane_infos = self.load_annotations(lane_ann_file) \ + if lane_ann_file else None + self.eval_mod = eval_mod + + self.use_nonlinear_optimizer = use_nonlinear_optimizer + + self.nusc = NuScenes(version=self.version, + dataroot=self.data_root, verbose=True) + + self.map_num_classes = 3 + if canvas_size[0] == 50: + self.thickness = 1 + elif canvas_size[0] == 200: + self.thickness = 2 + else: + assert False + self.angle_class = 36 + self.patch_size = patch_size + self.canvas_size = canvas_size + self.nusc_maps = { + 'boston-seaport': NuScenesMap(dataroot=self.data_root, map_name='boston-seaport'), + 'singapore-hollandvillage': NuScenesMap(dataroot=self.data_root, map_name='singapore-hollandvillage'), + 'singapore-onenorth': NuScenesMap(dataroot=self.data_root, map_name='singapore-onenorth'), + 'singapore-queenstown': NuScenesMap(dataroot=self.data_root, map_name='singapore-queenstown'), + } + self.vector_map = VectorizedLocalMap( + self.data_root, + patch_size=self.patch_size, + canvas_size=self.canvas_size) + self.traj_api = NuScenesTraj(self.nusc, + self.predict_steps, + self.planning_steps, + self.past_steps, + self.fut_steps, + self.with_velocity, + self.CLASSES, + self.box_mode_3d, + self.use_nonlinear_optimizer) + + # Occ + self.enbale_temporal_aug = enbale_temporal_aug + assert self.enbale_temporal_aug is False + + self.occ_receptive_field = occ_receptive_field # past + current + self.occ_n_future = occ_n_future # future only + self.occ_filter_invalid_sample = occ_filter_invalid_sample + self.occ_filter_by_valid_flag = occ_filter_by_valid_flag + self.occ_only_total_frames = 7 # NOTE: hardcode, not influenced by planning + + def __len__(self): + if not self.is_debug: + return len(self.data_infos) + else: + return self.len_debug + + def load_annotations(self, ann_file): + """Load annotations from ann_file. + Args: + ann_file (str): Path of the annotation file. + + Returns: + list[dict]: List of annotations sorted by timestamps. + """ + if self.file_client_args['backend'] == 'disk': + # data_infos = mmcv.load(ann_file) + data = pickle.loads(self.file_client.get(ann_file)) + data_infos = list( + sorted(data['infos'], key=lambda e: e['timestamp'])) + data_infos = data_infos[::self.load_interval] + self.metadata = data['metadata'] + self.version = self.metadata['version'] + elif self.file_client_args['backend'] == 'petrel': + data = pickle.loads(self.file_client.get(ann_file)) + data_infos = list( + sorted(data['infos'], key=lambda e: e['timestamp'])) + data_infos = data_infos[::self.load_interval] + self.metadata = data['metadata'] + self.version = self.metadata['version'] + else: + assert False, 'Invalid file_client_args!' + return data_infos + + def prepare_train_data(self, index): + """ + Training data preparation. + Args: + index (int): Index for accessing the target data. + Returns: + dict: Training data dict of the corresponding index. + img: queue_length, 6, 3, H, W + img_metas: img_metas of each frame (list) + gt_globals_3d: gt_globals of each frame (list) + gt_bboxes_3d: gt_bboxes of each frame (list) + gt_inds: gt_inds of each frame (list) + """ + data_queue = [] + self.enbale_temporal_aug = False + if self.enbale_temporal_aug: + # temporal aug + prev_indexs_list = list(range(index-self.queue_length, index)) + random.shuffle(prev_indexs_list) + prev_indexs_list = sorted(prev_indexs_list[1:], reverse=True) + input_dict = self.get_data_info(index) + else: + # ensure the first and final frame in same scene + final_index = index + first_index = index - self.queue_length + 1 + if first_index < 0: + return None + if self.data_infos[first_index]['scene_token'] != \ + self.data_infos[final_index]['scene_token']: + return None + # current timestamp + input_dict = self.get_data_info(final_index) + prev_indexs_list = list(reversed(range(first_index, final_index))) + if input_dict is None: + return None + frame_idx = input_dict['frame_idx'] + scene_token = input_dict['scene_token'] + self.pre_pipeline(input_dict) + example = self.pipeline(input_dict) + + assert example['gt_labels_3d'].data.shape[0] == example['gt_fut_traj'].shape[0] + assert example['gt_labels_3d'].data.shape[0] == example['gt_past_traj'].shape[0] + + if self.filter_empty_gt and \ + (example is None or ~(example['gt_labels_3d']._data != -1).any()): + return None + data_queue.insert(0, example) + + # retrieve previous infos + + for i in prev_indexs_list: + if self.enbale_temporal_aug: + i = max(0, i) + input_dict = self.get_data_info(i) + if input_dict is None: + return None + if input_dict['frame_idx'] < frame_idx and input_dict['scene_token'] == scene_token: + self.pre_pipeline(input_dict) + example = self.pipeline(input_dict) + if self.filter_empty_gt and \ + (example is None or ~(example['gt_labels_3d']._data != -1).any()): + return None + frame_idx = input_dict['frame_idx'] + assert example['gt_labels_3d'].data.shape[0] == example['gt_fut_traj'].shape[0] + assert example['gt_labels_3d'].data.shape[0] == example['gt_past_traj'].shape[0] + data_queue.insert(0, copy.deepcopy(example)) + data_queue = self.union2one(data_queue) + return data_queue + + def prepare_test_data(self, index): + """ + Training data preparation. + Args: + index (int): Index for accessing the target data. + Returns: + dict: Training data dict of the corresponding index. + img: queue_length, 6, 3, H, W + img_metas: img_metas of each frame (list) + gt_labels_3d: gt_labels of each frame (list) + gt_bboxes_3d: gt_bboxes of each frame (list) + gt_inds: gt_inds of each frame(list) + """ + + input_dict = self.get_data_info(index) + self.pre_pipeline(input_dict) + example = self.pipeline(input_dict) + data_dict = {} + for key, value in example.items(): + if 'l2g' in key: + data_dict[key] = to_tensor(value[0]) + else: + data_dict[key] = value + return data_dict + + def union2one(self, queue): + """ + convert sample dict into one single sample. + """ + imgs_list = [each['img'].data for each in queue] + gt_labels_3d_list = [each['gt_labels_3d'].data for each in queue] + gt_sdc_label_list = [each['gt_sdc_label'].data for each in queue] + gt_inds_list = [to_tensor(each['gt_inds']) for each in queue] + gt_bboxes_3d_list = [each['gt_bboxes_3d'].data for each in queue] + gt_past_traj_list = [to_tensor(each['gt_past_traj']) for each in queue] + gt_past_traj_mask_list = [ + to_tensor(each['gt_past_traj_mask']) for each in queue] + gt_sdc_bbox_list = [each['gt_sdc_bbox'].data for each in queue] + l2g_r_mat_list = [to_tensor(each['l2g_r_mat']) for each in queue] + l2g_t_list = [to_tensor(each['l2g_t']) for each in queue] + timestamp_list = [to_tensor(each['timestamp']) for each in queue] + gt_fut_traj = to_tensor(queue[-1]['gt_fut_traj']) + gt_fut_traj_mask = to_tensor(queue[-1]['gt_fut_traj_mask']) + # gt_sdc_fut_traj = to_tensor(queue[-1]['gt_sdc_fut_traj']) + # gt_sdc_fut_traj_mask = to_tensor(queue[-1]['gt_sdc_fut_traj_mask']) + # gt_future_boxes_list = queue[-1]['gt_future_boxes'] + # gt_future_labels_list = [to_tensor(each) + # for each in queue[-1]['gt_future_labels']] + + metas_map = {} + prev_pos = None + prev_angle = None + for i, each in enumerate(queue): + metas_map[i] = each['img_metas'].data + if i == 0: + metas_map[i]['prev_bev'] = False + prev_pos = copy.deepcopy(metas_map[i]['can_bus'][:3]) + prev_angle = copy.deepcopy(metas_map[i]['can_bus'][-1]) + metas_map[i]['can_bus'][:3] = 0 + metas_map[i]['can_bus'][-1] = 0 + else: + metas_map[i]['prev_bev'] = True + tmp_pos = copy.deepcopy(metas_map[i]['can_bus'][:3]) + tmp_angle = copy.deepcopy(metas_map[i]['can_bus'][-1]) + metas_map[i]['can_bus'][:3] -= prev_pos + metas_map[i]['can_bus'][-1] -= prev_angle + prev_pos = copy.deepcopy(tmp_pos) + prev_angle = copy.deepcopy(tmp_angle) + + queue[-1]['img'] = DC(torch.stack(imgs_list), + cpu_only=False, stack=True) + queue[-1]['img_metas'] = DC(metas_map, cpu_only=True) + queue = queue[-1] + + queue['gt_labels_3d'] = DC(gt_labels_3d_list) + queue['gt_sdc_label'] = DC(gt_sdc_label_list) + queue['gt_inds'] = DC(gt_inds_list) + queue['gt_bboxes_3d'] = DC(gt_bboxes_3d_list, cpu_only=True) + queue['gt_sdc_bbox'] = DC(gt_sdc_bbox_list, cpu_only=True) + queue['l2g_r_mat'] = DC(l2g_r_mat_list) + queue['l2g_t'] = DC(l2g_t_list) + queue['timestamp'] = DC(timestamp_list) + queue['gt_fut_traj'] = DC(gt_fut_traj) + queue['gt_fut_traj_mask'] = DC(gt_fut_traj_mask) + queue['gt_past_traj'] = DC(gt_past_traj_list) + queue['gt_past_traj_mask'] = DC(gt_past_traj_mask_list) + # queue['gt_future_boxes'] = DC(gt_future_boxes_list, cpu_only=True) + # queue['gt_future_labels'] = DC(gt_future_labels_list) + return queue + + def get_ann_info(self, index): + """Get annotation info according to the given index. + + Args: + index (int): Index of the annotation data to get. + + Returns: + dict: Annotation information consists of the following keys: + + - gt_bboxes_3d (:obj:`LiDARInstance3DBoxes`): \ + 3D ground truth bboxes + - gt_labels_3d (np.ndarray): Labels of ground truths. + - gt_names (list[str]): Class names of ground truths. + - gt_inds (np.ndarray): Instance ids of ground truths. + - gt_fut_traj (np.ndarray): . + - gt_fut_traj_mask (np.ndarray): . + """ + info = self.data_infos[index] + # filter out bbox containing no points + if self.use_valid_flag: + mask = info['valid_flag'] + else: + mask = info['num_lidar_pts'] > 0 + gt_bboxes_3d = info['gt_boxes'][mask] + gt_names_3d = info['gt_names'][mask] + gt_inds = info['gt_inds'][mask] + + sample = self.nusc.get('sample', info['token']) + ann_tokens = np.array(sample['anns'])[mask] + assert ann_tokens.shape[0] == gt_bboxes_3d.shape[0] + + gt_fut_traj, gt_fut_traj_mask, gt_past_traj, gt_past_traj_mask = self.traj_api.get_traj_label( + info['token'], ann_tokens) + + sdc_vel = self.traj_api.sdc_vel_info[info['token']] + gt_sdc_bbox, gt_sdc_label = self.traj_api.generate_sdc_info(sdc_vel) + gt_sdc_fut_traj, gt_sdc_fut_traj_mask = self.traj_api.get_sdc_traj_label( + info['token']) + + sdc_planning, sdc_planning_mask, command = self.traj_api.get_sdc_planning_label( + info['token']) + + gt_labels_3d = [] + for cat in gt_names_3d: + if cat in self.CLASSES: + gt_labels_3d.append(self.CLASSES.index(cat)) + else: + gt_labels_3d.append(-1) + gt_labels_3d = np.array(gt_labels_3d) + + if self.with_velocity: + gt_velocity = info['gt_velocity'][mask] + nan_mask = np.isnan(gt_velocity[:, 0]) + gt_velocity[nan_mask] = [0.0, 0.0] + gt_bboxes_3d = np.concatenate([gt_bboxes_3d, gt_velocity], axis=-1) + + # the nuscenes box center is [0.5, 0.5, 0.5], we change it to be + # the same as KITTI (0.5, 0.5, 0) + gt_bboxes_3d = LiDARInstance3DBoxes( + gt_bboxes_3d, + box_dim=gt_bboxes_3d.shape[-1], + origin=(0.5, 0.5, 0.5)).convert_to(self.box_mode_3d) + + anns_results = dict( + gt_bboxes_3d=gt_bboxes_3d, + gt_labels_3d=gt_labels_3d, + gt_names=gt_names_3d, + gt_inds=gt_inds, + gt_fut_traj=gt_fut_traj, + gt_fut_traj_mask=gt_fut_traj_mask, + gt_past_traj=gt_past_traj, + gt_past_traj_mask=gt_past_traj_mask, + gt_sdc_bbox=gt_sdc_bbox, + gt_sdc_label=gt_sdc_label, + gt_sdc_fut_traj=gt_sdc_fut_traj, + gt_sdc_fut_traj_mask=gt_sdc_fut_traj_mask, + sdc_planning=sdc_planning, + sdc_planning_mask=sdc_planning_mask, + command=command, + ) + assert gt_fut_traj.shape[0] == gt_labels_3d.shape[0] + assert gt_past_traj.shape[0] == gt_labels_3d.shape[0] + return anns_results + + def get_data_info(self, index): + """Get data info according to the given index. + + Args: + index (int): Index of the sample data to get. + Returns: + dict: Data information that will be passed to the data \ + preprocessing pipelines. It includes the following keys: + + - sample_idx (str): Sample index. + - pts_filename (str): Filename of point clouds. + - sweeps (list[dict]): Infos of sweeps. + - timestamp (float): Sample timestamp. + - img_filename (str, optional): Image filename. + - lidar2img (list[np.ndarray], optional): Transformations \ + from lidar to different cameras. + - ann_info (dict): Annotation info. + """ + info = self.data_infos[index] + + # semantic format + lane_info = self.lane_infos[index] if self.lane_infos else None + # panoptic format + location = self.nusc.get('log', self.nusc.get( + 'scene', info['scene_token'])['log_token'])['location'] + vectors = self.vector_map.gen_vectorized_samples(location, + info['ego2global_translation'], + info['ego2global_rotation']) + semantic_masks, instance_masks, forward_masks, backward_masks = preprocess_map(vectors, + self.patch_size, + self.canvas_size, + self.map_num_classes, + self.thickness, + self.angle_class) + instance_masks = np.rot90(instance_masks, k=-1, axes=(1, 2)) + instance_masks = torch.tensor(instance_masks.copy()) + gt_labels = [] + gt_bboxes = [] + gt_masks = [] + for cls in range(self.map_num_classes): + for i in np.unique(instance_masks[cls]): + if i == 0: + continue + gt_mask = (instance_masks[cls] == i).to(torch.uint8) + ys, xs = np.where(gt_mask) + gt_bbox = [min(xs), min(ys), max(xs), max(ys)] + gt_labels.append(cls) + gt_bboxes.append(gt_bbox) + gt_masks.append(gt_mask) + map_mask = obtain_map_info(self.nusc, + self.nusc_maps, + info, + patch_size=self.patch_size, + canvas_size=self.canvas_size, + layer_names=['lane_divider', 'road_divider']) + map_mask = np.flip(map_mask, axis=1) + map_mask = np.rot90(map_mask, k=-1, axes=(1, 2)) + map_mask = torch.tensor(map_mask.copy()) + for i, gt_mask in enumerate(map_mask[:-1]): + ys, xs = np.where(gt_mask) + gt_bbox = [min(xs), min(ys), max(xs), max(ys)] + gt_labels.append(i + self.map_num_classes) + gt_bboxes.append(gt_bbox) + gt_masks.append(gt_mask) + gt_labels = torch.tensor(gt_labels) + gt_bboxes = torch.tensor(np.stack(gt_bboxes)) + gt_masks = torch.stack(gt_masks) + + # standard protocal modified from SECOND.Pytorch + input_dict = dict( + sample_idx=info['token'], + pts_filename=info['lidar_path'], + sweeps=info['sweeps'], + ego2global_translation=info['ego2global_translation'], + ego2global_rotation=info['ego2global_rotation'], + prev_idx=info['prev'], + next_idx=info['next'], + scene_token=info['scene_token'], + can_bus=info['can_bus'], + frame_idx=info['frame_idx'], + timestamp=info['timestamp'] / 1e6, + map_filename=lane_info['maps']['map_mask'] if lane_info else None, + gt_lane_labels=gt_labels, + gt_lane_bboxes=gt_bboxes, + gt_lane_masks=gt_masks, + ) + + l2e_r = info['lidar2ego_rotation'] + l2e_t = info['lidar2ego_translation'] + e2g_r = info['ego2global_rotation'] + e2g_t = info['ego2global_translation'] + l2e_r_mat = Quaternion(l2e_r).rotation_matrix + e2g_r_mat = Quaternion(e2g_r).rotation_matrix + + l2g_r_mat = l2e_r_mat.T @ e2g_r_mat.T + l2g_t = l2e_t @ e2g_r_mat.T + e2g_t + + input_dict.update( + dict( + l2g_r_mat=l2g_r_mat.astype(np.float32), + l2g_t=l2g_t.astype(np.float32))) + + if self.modality['use_camera']: + image_paths = [] + lidar2img_rts = [] + lidar2cam_rts = [] + cam_intrinsics = [] + for cam_type, cam_info in info['cams'].items(): + image_paths.append(cam_info['data_path']) + # obtain lidar to image transformation matrix + lidar2cam_r = np.linalg.inv(cam_info['sensor2lidar_rotation']) + lidar2cam_t = cam_info[ + 'sensor2lidar_translation'] @ lidar2cam_r.T + lidar2cam_rt = np.eye(4) + lidar2cam_rt[:3, :3] = lidar2cam_r.T + lidar2cam_rt[3, :3] = -lidar2cam_t + intrinsic = cam_info['cam_intrinsic'] + viewpad = np.eye(4) + viewpad[:intrinsic.shape[0], :intrinsic.shape[1]] = intrinsic + lidar2img_rt = (viewpad @ lidar2cam_rt.T) + lidar2img_rts.append(lidar2img_rt) + + cam_intrinsics.append(viewpad) + lidar2cam_rts.append(lidar2cam_rt.T) + + input_dict.update( + dict( + img_filename=image_paths, + lidar2img=lidar2img_rts, + cam_intrinsic=cam_intrinsics, + lidar2cam=lidar2cam_rts, + )) + + # if not self.test_mode: + annos = self.get_ann_info(index) + input_dict['ann_info'] = annos + if 'sdc_planning' in input_dict['ann_info'].keys(): + input_dict['sdc_planning'] = input_dict['ann_info']['sdc_planning'] + input_dict['sdc_planning_mask'] = input_dict['ann_info']['sdc_planning_mask'] + input_dict['command'] = input_dict['ann_info']['command'] + + rotation = Quaternion(input_dict['ego2global_rotation']) + translation = input_dict['ego2global_translation'] + can_bus = input_dict['can_bus'] + can_bus[:3] = translation + can_bus[3:7] = rotation + patch_angle = quaternion_yaw(rotation) / np.pi * 180 + if patch_angle < 0: + patch_angle += 360 + can_bus[-2] = patch_angle / 180 * np.pi + can_bus[-1] = patch_angle + + # TODO: Warp all those below occupancy-related codes into a function + prev_indices, future_indices = self.occ_get_temporal_indices( + index, self.occ_receptive_field, self.occ_n_future) + + # ego motions of all frames are needed + all_frames = prev_indices + [index] + future_indices + + # whether invalid frames is present + # + has_invalid_frame = -1 in all_frames[:self.occ_only_total_frames] + # NOTE: This can only represent 7 frames in total as it influence evaluation + input_dict['occ_has_invalid_frame'] = has_invalid_frame + input_dict['occ_img_is_valid'] = np.array(all_frames) >= 0 + + # might have None if not in the same sequence + future_frames = [index] + future_indices + + # get lidar to ego to global transforms for each curr and fut index + occ_transforms = self.occ_get_transforms( + future_frames) # might have None + input_dict.update(occ_transforms) + + # for (current and) future frames, detection labels are needed + # generate detection labels for current + future frames + input_dict['occ_future_ann_infos'] = \ + self.get_future_detection_infos(future_frames) + return input_dict + + def get_future_detection_infos(self, future_frames): + detection_ann_infos = [] + for future_frame in future_frames: + if future_frame >= 0: + detection_ann_infos.append( + self.occ_get_detection_ann_info(future_frame), + ) + else: + detection_ann_infos.append(None) + return detection_ann_infos + + def occ_get_temporal_indices(self, index, receptive_field, n_future): + current_scene_token = self.data_infos[index]['scene_token'] + + # generate the past + previous_indices = [] + + for t in range(- receptive_field + 1, 0): + index_t = index + t + if index_t >= 0 and self.data_infos[index_t]['scene_token'] == current_scene_token: + previous_indices.append(index_t) + else: + previous_indices.append(-1) # for invalid indices + + # generate the future + future_indices = [] + + for t in range(1, n_future + 1): + index_t = index + t + if index_t < len(self.data_infos) and self.data_infos[index_t]['scene_token'] == current_scene_token: + future_indices.append(index_t) + else: + # NOTE: How to deal the invalid indices??? + future_indices.append(-1) + + return previous_indices, future_indices + + def occ_get_transforms(self, indices, data_type=torch.float32): + """ + get l2e, e2g rotation and translation for each valid frame + """ + l2e_r_mats = [] + l2e_t_vecs = [] + e2g_r_mats = [] + e2g_t_vecs = [] + + for index in indices: + if index == -1: + l2e_r_mats.append(None) + l2e_t_vecs.append(None) + e2g_r_mats.append(None) + e2g_t_vecs.append(None) + else: + info = self.data_infos[index] + l2e_r = info['lidar2ego_rotation'] + l2e_t = info['lidar2ego_translation'] + e2g_r = info['ego2global_rotation'] + e2g_t = info['ego2global_translation'] + + l2e_r_mat = torch.from_numpy(Quaternion(l2e_r).rotation_matrix) + e2g_r_mat = torch.from_numpy(Quaternion(e2g_r).rotation_matrix) + + l2e_r_mats.append(l2e_r_mat.to(data_type)) + l2e_t_vecs.append(torch.tensor(l2e_t).to(data_type)) + e2g_r_mats.append(e2g_r_mat.to(data_type)) + e2g_t_vecs.append(torch.tensor(e2g_t).to(data_type)) + + res = { + 'occ_l2e_r_mats': l2e_r_mats, + 'occ_l2e_t_vecs': l2e_t_vecs, + 'occ_e2g_r_mats': e2g_r_mats, + 'occ_e2g_t_vecs': e2g_t_vecs, + } + + return res + + def occ_get_detection_ann_info(self, index): + info = self.data_infos[index].copy() + gt_bboxes_3d = info['gt_boxes'].copy() + gt_names_3d = info['gt_names'].copy() + gt_ins_inds = info['gt_inds'].copy() + + gt_vis_tokens = info.get('visibility_tokens', None) + + if self.use_valid_flag: + gt_valid_flag = info['valid_flag'] + else: + gt_valid_flag = info['num_lidar_pts'] > 0 + + assert self.occ_filter_by_valid_flag is False + if self.occ_filter_by_valid_flag: + gt_bboxes_3d = gt_bboxes_3d[gt_valid_flag] + gt_names_3d = gt_names_3d[gt_valid_flag] + gt_ins_inds = gt_ins_inds[gt_valid_flag] + gt_vis_tokens = gt_vis_tokens[gt_valid_flag] + + # cls_name to cls_id + gt_labels_3d = [] + for cat in gt_names_3d: + if cat in self.CLASSES: + gt_labels_3d.append(self.CLASSES.index(cat)) + else: + gt_labels_3d.append(-1) + gt_labels_3d = np.array(gt_labels_3d) + + if self.with_velocity: + gt_velocity = info['gt_velocity'] + nan_mask = np.isnan(gt_velocity[:, 0]) + gt_velocity[nan_mask] = [0.0, 0.0] + gt_bboxes_3d = np.concatenate([gt_bboxes_3d, gt_velocity], axis=-1) + + # the nuscenes box center is [0.5, 0.5, 0.5], we change it to be + # the same as KITTI (0.5, 0.5, 0) + gt_bboxes_3d = LiDARInstance3DBoxes( + gt_bboxes_3d, + box_dim=gt_bboxes_3d.shape[-1], + origin=(0.5, 0.5, 0.5)).convert_to(self.box_mode_3d) + + anns_results = dict( + gt_bboxes_3d=gt_bboxes_3d, + gt_labels_3d=gt_labels_3d, + # gt_names=gt_names_3d, + gt_inds=gt_ins_inds, + gt_vis_tokens=gt_vis_tokens, + ) + + return anns_results + + def __getitem__(self, idx): + """Get item from infos according to the given index. + Returns: + dict: Data dictionary of the corresponding index. + """ + if self.test_mode: + return self.prepare_test_data(idx) + while True: + + data = self.prepare_train_data(idx) + if data is None: + idx = self._rand_another(idx) + continue + return data + + def _format_bbox(self, results, jsonfile_prefix=None): + """Convert the results to the standard format. + Args: + results (list[dict]): Testing results of the dataset. + jsonfile_prefix (str): The prefix of the output jsonfile. + You can specify the output directory/filename by + modifying the jsonfile_prefix. Default: None. + Returns: + str: Path of the output json file. + """ + nusc_annos = {} + nusc_map_annos = {} + mapped_class_names = self.CLASSES + + print('Start to convert detection format...') + for sample_id, det in enumerate(track_iter_progress(results)): + annos = [] + sample_token = self.data_infos[sample_id]['token'] + + if 'map' in self.eval_mod: + map_annos = {} + for key, value in det['ret_iou'].items(): + map_annos[key] = float(value.numpy()[0]) + nusc_map_annos[sample_token] = map_annos + + if 'boxes_3d' not in det: + nusc_annos[sample_token] = annos + continue + + boxes = output_to_nusc_box(det) + boxes_ego = copy.deepcopy(boxes) + boxes, keep_idx = lidar_nusc_box_to_global(self.data_infos[sample_id], boxes, + mapped_class_names, + self.eval_detection_configs, + self.eval_version) + for i, box in enumerate(boxes): + name = mapped_class_names[box.label] + if np.sqrt(box.velocity[0]**2 + box.velocity[1]**2) > 0.2: + if name in [ + 'car', + 'construction_vehicle', + 'bus', + 'truck', + 'trailer', + ]: + attr = 'vehicle.moving' + elif name in ['bicycle', 'motorcycle']: + attr = 'cycle.with_rider' + else: + attr = NuScenesDataset.DefaultAttribute[name] + else: + if name in ['pedestrian']: + attr = 'pedestrian.standing' + elif name in ['bus']: + attr = 'vehicle.stopped' + else: + attr = NuScenesDataset.DefaultAttribute[name] + + # center_ = box.center.tolist() + # change from ground height to center height + # center_[2] = center_[2] + (box.wlh.tolist()[2] / 2.0) + if name not in ['car', 'truck', 'bus', 'trailer', 'motorcycle', + 'bicycle', 'pedestrian', ]: + continue + + box_ego = boxes_ego[keep_idx[i]] + trans = box_ego.center + if 'traj' in det: + traj_local = det['traj'][keep_idx[i]].numpy()[..., :2] + traj_scores = det['traj_scores'][keep_idx[i]].numpy() + else: + traj_local = np.zeros((0,)) + traj_scores = np.zeros((0,)) + traj_ego = np.zeros_like(traj_local) + rot = Quaternion(axis=np.array([0, 0.0, 1.0]), angle=np.pi/2) + for kk in range(traj_ego.shape[0]): + traj_ego[kk] = convert_local_coords_to_global( + traj_local[kk], trans, rot) + + nusc_anno = dict( + sample_token=sample_token, + translation=box.center.tolist(), + size=box.wlh.tolist(), + rotation=box.orientation.elements.tolist(), + velocity=box.velocity[:2].tolist(), + detection_name=name, + detection_score=box.score, + attribute_name=attr, + tracking_name=name, + tracking_score=box.score, + tracking_id=box.token, + predict_traj=traj_ego, + predict_traj_score=traj_scores, + ) + annos.append(nusc_anno) + nusc_annos[sample_token] = annos + nusc_submissions = { + 'meta': self.modality, + 'results': nusc_annos, + 'map_results': nusc_map_annos, + } + + mkdir_or_exist(jsonfile_prefix) + res_path = osp.join(jsonfile_prefix, 'results_nusc.json') + print('Results writes to', res_path) + dump(nusc_submissions, res_path) + return res_path + + def format_results(self, results, jsonfile_prefix=None): + """Format the results to json (standard format for COCO evaluation). + + Args: + results (list[dict]): Testing results of the dataset. + jsonfile_prefix (str | None): The prefix of json files. It includes + the file path and the prefix of filename, e.g., "a/b/prefix". + If not specified, a temp file will be created. Default: None. + + Returns: + tuple: Returns (result_files, tmp_dir), where `result_files` is a \ + dict containing the json filepaths, `tmp_dir` is the temporal \ + directory created for saving json files when \ + `jsonfile_prefix` is not specified. + """ + assert isinstance(results, list), 'results must be a list' + # assert len(results) == len(self), ( + # 'The length of results is not equal to the dataset len: {} != {}'. + # format(len(results), len(self))) + + if jsonfile_prefix is None: + tmp_dir = tempfile.TemporaryDirectory() + jsonfile_prefix = osp.join(tmp_dir.name, 'results') + else: + tmp_dir = None + + result_files = self._format_bbox(results, jsonfile_prefix) + + return result_files, tmp_dir + + def _format_bbox_det(self, results, jsonfile_prefix=None): + """Convert the results to the standard format. + Args: + results (list[dict]): Testing results of the dataset. + jsonfile_prefix (str): The prefix of the output jsonfile. + You can specify the output directory/filename by + modifying the jsonfile_prefix. Default: None. + Returns: + str: Path of the output json file. + """ + nusc_annos = {} + mapped_class_names = self.CLASSES + + print('Start to convert detection format...') + for sample_id, det in enumerate(track_iter_progress(results)): + annos = [] + sample_token = self.data_infos[sample_id]['token'] + + if det is None: + nusc_annos[sample_token] = annos + continue + + boxes = output_to_nusc_box_det(det) + boxes_ego = copy.deepcopy(boxes) + boxes, keep_idx = lidar_nusc_box_to_global(self.data_infos[sample_id], boxes, + mapped_class_names, + self.eval_detection_configs, + self.eval_version) + for i, box in enumerate(boxes): + name = mapped_class_names[box.label] + if np.sqrt(box.velocity[0]**2 + box.velocity[1]**2) > 0.2: + if name in [ + 'car', + 'construction_vehicle', + 'bus', + 'truck', + 'trailer', + ]: + attr = 'vehicle.moving' + elif name in ['bicycle', 'motorcycle']: + attr = 'cycle.with_rider' + else: + attr = NuScenesDataset.DefaultAttribute[name] + else: + if name in ['pedestrian']: + attr = 'pedestrian.standing' + elif name in ['bus']: + attr = 'vehicle.stopped' + else: + attr = NuScenesDataset.DefaultAttribute[name] + + nusc_anno = dict( + sample_token=sample_token, + translation=box.center.tolist(), + size=box.wlh.tolist(), + rotation=box.orientation.elements.tolist(), + velocity=box.velocity[:2].tolist(), + detection_name=name, + detection_score=box.score, + attribute_name=attr, + ) + annos.append(nusc_anno) + nusc_annos[sample_token] = annos + nusc_submissions = { + 'meta': self.modality, + 'results': nusc_annos, + } + + mkdir_or_exist(jsonfile_prefix) + res_path = osp.join(jsonfile_prefix, 'results_nusc_det.json') + print('Results writes to', res_path) + dump(nusc_submissions, res_path) + return res_path + + def format_results_det(self, results, jsonfile_prefix=None): + """Format the results to json (standard format for COCO evaluation). + + Args: + results (list[dict]): Testing results of the dataset. + jsonfile_prefix (str | None): The prefix of json files. It includes + the file path and the prefix of filename, e.g., "a/b/prefix". + If not specified, a temp file will be created. Default: None. + + Returns: + tuple: Returns (result_files, tmp_dir), where `result_files` is a \ + dict containing the json filepaths, `tmp_dir` is the temporal \ + directory created for saving json files when \ + `jsonfile_prefix` is not specified. + """ + assert isinstance(results, list), 'results must be a list' + # assert len(results) == len(self), ( + # 'The length of results is not equal to the dataset len: {} != {}'. + # format(len(results), len(self))) + + if jsonfile_prefix is None: + tmp_dir = tempfile.TemporaryDirectory() + jsonfile_prefix = osp.join(tmp_dir.name, 'results_det') + else: + tmp_dir = None + + result_files = self._format_bbox_det(results, jsonfile_prefix) + return result_files, tmp_dir + + def evaluate(self, + results, + metric='bbox', + logger=None, + jsonfile_prefix=None, + result_names=['pts_bbox'], + show=False, + out_dir=None, + pipeline=None): + """Evaluation in nuScenes protocol. + Args: + results (list[dict]): Testing results of the dataset. + metric (str | list[str]): Metrics to be evaluated. + logger (logging.Logger | str | None): Logger used for printing + related information during evaluation. Default: None. + jsonfile_prefix (str | None): The prefix of json files. It includes + the file path and the prefix of filename, e.g., "a/b/prefix". + If not specified, a temp file will be created. Default: None. + show (bool): Whether to visualize. + Default: False. + out_dir (str): Path to save the visualization results. + Default: None. + pipeline (list[dict], optional): raw data loading for showing. + Default: None. + Returns: + dict[str, float]: Results of each evaluation metric. + """ + if isinstance(results, dict): + if 'occ_results_computed' in results.keys(): + occ_results_computed = results['occ_results_computed'] + out_metrics = ['iou'] + + # pan_eval + if occ_results_computed.get('pq', None) is not None: + out_metrics = ['iou', 'pq', 'sq', 'rq'] + + print("Occ-flow Val Results:") + for panoptic_key in out_metrics: + print(panoptic_key) + # HERE!! connect + print(' & '.join( + [f'{x:.1f}' for x in occ_results_computed[panoptic_key]])) + + if 'num_occ' in occ_results_computed.keys() and 'ratio_occ' in occ_results_computed.keys(): + print( + f"num occ evaluated:{occ_results_computed['num_occ']}") + print( + f"ratio occ evaluated: {occ_results_computed['ratio_occ'] * 100:.1f}%") + if 'planning_results_computed' in results.keys(): + planning_results_computed = results['planning_results_computed'] + planning_tab = PrettyTable() + planning_tab.field_names = [ + "metrics", "0.5s", "1.0s", "1.5s", "2.0s", "2.5s", "3.0s"] + for key in planning_results_computed.keys(): + value = planning_results_computed[key] + row_value = [] + row_value.append(key) + for i in range(len(value)): + row_value.append('%.4f' % float(value[i])) + planning_tab.add_row(row_value) + print(planning_tab) + results = results['bbox_results'] # get bbox_results + + result_files, tmp_dir = self.format_results(results, jsonfile_prefix) + result_files_det, tmp_dir = self.format_results_det( + results, jsonfile_prefix) + + if isinstance(result_files, dict): + results_dict = dict() + for name in result_names: + print('Evaluating bboxes of {}'.format(name)) + ret_dict = self._evaluate_single( + result_files[name], result_files_det[name]) + results_dict.update(ret_dict) + elif isinstance(result_files, str): + results_dict = self._evaluate_single( + result_files, result_files_det) + + if 'map' in self.eval_mod: + drivable_intersection = 0 + drivable_union = 0 + lanes_intersection = 0 + lanes_union = 0 + divider_intersection = 0 + divider_union = 0 + crossing_intersection = 0 + crossing_union = 0 + contour_intersection = 0 + contour_union = 0 + for i in range(len(results)): + drivable_intersection += results[i]['ret_iou']['drivable_intersection'] + drivable_union += results[i]['ret_iou']['drivable_union'] + lanes_intersection += results[i]['ret_iou']['lanes_intersection'] + lanes_union += results[i]['ret_iou']['lanes_union'] + divider_intersection += results[i]['ret_iou']['divider_intersection'] + divider_union += results[i]['ret_iou']['divider_union'] + crossing_intersection += results[i]['ret_iou']['crossing_intersection'] + crossing_union += results[i]['ret_iou']['crossing_union'] + contour_intersection += results[i]['ret_iou']['contour_intersection'] + contour_union += results[i]['ret_iou']['contour_union'] + results_dict.update({'drivable_iou': float(drivable_intersection / drivable_union), + 'lanes_iou': float(lanes_intersection / lanes_union), + 'divider_iou': float(divider_intersection / divider_union), + 'crossing_iou': float(crossing_intersection / crossing_union), + 'contour_iou': float(contour_intersection / contour_union)}) + + print(results_dict) + + if tmp_dir is not None: + tmp_dir.cleanup() + + if show: + self.show(results, out_dir, pipeline=pipeline) + return results_dict + + def _evaluate_single(self, + result_path, + result_path_det, + logger=None, + metric='bbox', + result_name='pts_bbox'): + """Evaluation for a single model in nuScenes protocol. + + Args: + result_path (str): Path of the result file. + logger (logging.Logger | str | None): Logger used for printing + related information during evaluation. Default: None. + metric (str): Metric name used for evaluation. Default: 'bbox'. + result_name (str): Result name in the metric prefix. + Default: 'pts_bbox'. + + Returns: + dict: Dictionary of evaluation details. + """ + + # TODO: fix the evaluation pipelines + + output_dir = osp.join(*osp.split(result_path)[:-1]) + output_dir_det = osp.join(output_dir, 'det') + output_dir_track = osp.join(output_dir, 'track') + output_dir_motion = osp.join(output_dir, 'motion') + mkdir_or_exist(output_dir_det) + mkdir_or_exist(output_dir_track) + mkdir_or_exist(output_dir_motion) + + eval_set_map = { + 'v1.0-mini': 'mini_val', + 'v1.0-trainval': 'val', + } + detail = dict() + + if 'det' in self.eval_mod: + self.nusc_eval = NuScenesEval_custom( + self.nusc, + config=self.eval_detection_configs, + result_path=result_path_det, + eval_set=eval_set_map[self.version], + output_dir=output_dir_det, + verbose=True, + overlap_test=self.overlap_test, + data_infos=self.data_infos + ) + self.nusc_eval.main(plot_examples=0, render_curves=False) + # record metrics + metrics = load( + osp.join( + output_dir_det, + 'metrics_summary.json')) + metric_prefix = f'{result_name}_NuScenes' + for name in self.CLASSES: + for k, v in metrics['label_aps'][name].items(): + val = float('{:.4f}'.format(v)) + detail['{}/{}_AP_dist_{}'.format( + metric_prefix, name, k)] = val + for k, v in metrics['label_tp_errors'][name].items(): + val = float('{:.4f}'.format(v)) + detail['{}/{}_{}'.format(metric_prefix, name, k)] = val + for k, v in metrics['tp_errors'].items(): + val = float('{:.4f}'.format(v)) + detail['{}/{}'.format(metric_prefix, + self.ErrNameMapping[k])] = val + detail['{}/NDS'.format(metric_prefix)] = metrics['nd_score'] + detail['{}/mAP'.format(metric_prefix)] = metrics['mean_ap'] + + if 'track' in self.eval_mod: + cfg = config_factory("tracking_nips_2019") + self.nusc_eval_track = TrackingEval( + config=cfg, + result_path=result_path, + eval_set=eval_set_map[self.version], + output_dir=output_dir_track, + verbose=True, + nusc_version=self.version, + nusc_dataroot=self.data_root + ) + self.nusc_eval_track.main() + # record metrics + metrics = load( + osp.join( + output_dir_track, + 'metrics_summary.json')) + keys = ['amota', 'amotp', 'recall', 'motar', + 'gt', 'mota', 'motp', 'mt', 'ml', 'faf', + 'tp', 'fp', 'fn', 'ids', 'frag', 'tid', 'lgd'] + for key in keys: + detail['{}/{}'.format(metric_prefix, key)] = metrics[key] + + # if 'map' in self.eval_mod: + # for i, ret_iou in enumerate(ret_ious): + # detail['iou_{}'.format(i)] = ret_iou + + if 'motion' in self.eval_mod: + self.nusc_eval_motion = MotionEval( + self.nusc, + config=self.eval_detection_configs, + result_path=result_path, + eval_set=eval_set_map[self.version], + output_dir=output_dir, + verbose=True, + overlap_test=self.overlap_test, + data_infos=self.data_infos, + category_convert_type='motion_category' + ) + print('-'*50) + print( + 'Evaluate on motion category, merge class for vehicles and pedestrians...') + print('evaluate standard motion metrics...') + self.nusc_eval_motion.main( + plot_examples=0, + render_curves=False, + eval_mode='standard') + print('evaluate motion mAP-minFDE metrics...') + self.nusc_eval_motion.main( + plot_examples=0, + render_curves=False, + eval_mode='motion_map') + print('evaluate EPA motion metrics...') + self.nusc_eval_motion.main( + plot_examples=0, + render_curves=False, + eval_mode='epa') + print('-'*50) + print('Evaluate on detection category...') + self.nusc_eval_motion = MotionEval( + self.nusc, + config=self.eval_detection_configs, + result_path=result_path, + eval_set=eval_set_map[self.version], + output_dir=output_dir, + verbose=True, + overlap_test=self.overlap_test, + data_infos=self.data_infos, + category_convert_type='detection_category' + ) + print('evaluate standard motion metrics...') + self.nusc_eval_motion.main( + plot_examples=0, + render_curves=False, + eval_mode='standard') + print('evaluate EPA motion metrics...') + self.nusc_eval_motion.main( + plot_examples=0, + render_curves=False, + eval_mode='motion_map') + print('evaluate EPA motion metrics...') + self.nusc_eval_motion.main( + plot_examples=0, + render_curves=False, + eval_mode='epa') + + return detail diff --git a/mmcv/datasets/nuscenes_eval.py b/mmcv/datasets/nuscenes_eval.py new file mode 100644 index 0000000..a0dc0b7 --- /dev/null +++ b/mmcv/datasets/nuscenes_eval.py @@ -0,0 +1,752 @@ +import argparse +import copy +import json +import os +import time +import cv2 +import argparse +import random +import tqdm +import torch +from typing import Tuple, Dict, Any +from mmcv.fileio.io import dump,load +from torchvision.transforms.functional import rotate +import numpy as np +from pyquaternion import Quaternion +from nuscenes import NuScenes +from nuscenes.eval.common.config import config_factory +from nuscenes.eval.common.data_classes import EvalBoxes +from nuscenes.eval.detection.data_classes import DetectionConfig +from nuscenes.eval.detection.evaluate import NuScenesEval +from nuscenes.eval.detection.data_classes import DetectionBox +from nuscenes.eval.detection.utils import category_to_detection_name +from nuscenes.eval.tracking.data_classes import TrackingBox +from nuscenes.utils.data_classes import Box +from nuscenes.utils.splits import create_splits_scenes +from nuscenes.utils.geometry_utils import view_points, box_in_image, BoxVisibility, transform_matrix +import pycocotools.mask as mask_util +# from projects.mmdet3d_plugin.models.utils.visual import save_tensor +from nuscenes.eval.common.loaders import load_gt, add_center_dist, filter_eval_boxes +from nuscenes.eval.detection.algo import accumulate, calc_ap, calc_tp +from nuscenes.eval.detection.data_classes import DetectionConfig, DetectionMetrics, DetectionBox, \ + DetectionMetricDataList +from nuscenes.eval.detection.render import summary_plot, class_pr_curve, dist_pr_curve, visualize_sample +from matplotlib import pyplot as plt +from nuscenes.eval.common.render import setup_axis +from nuscenes.eval.detection.constants import TP_METRICS, DETECTION_NAMES, DETECTION_COLORS, TP_METRICS_UNITS, \ + PRETTY_DETECTION_NAMES, PRETTY_TP_METRICS +from nuscenes.eval.detection.data_classes import DetectionMetrics, DetectionMetricData, DetectionMetricDataList +import mmcv + + +Axis = Any + +def class_tp_curve(md_list: DetectionMetricDataList, + metrics: DetectionMetrics, + detection_name: str, + min_recall: float, + dist_th_tp: float, + savepath: str = None, + ax: Axis = None) -> None: + """ + Plot the true positive curve for the specified class. + :param md_list: DetectionMetricDataList instance. + :param metrics: DetectionMetrics instance. + :param detection_name: + :param min_recall: Minimum recall value. + :param dist_th_tp: The distance threshold used to determine matches. + :param savepath: If given, saves the the rendering here instead of displaying. + :param ax: Axes onto which to render. + """ + # Get metric data for given detection class with tp distance threshold. + + md = md_list[(detection_name, dist_th_tp)] + min_recall_ind = round(100 * min_recall) + if min_recall_ind <= md.max_recall_ind: + # For traffic_cone and barrier only a subset of the metrics are plotted. + rel_metrics = [m for m in TP_METRICS if not np.isnan(metrics.get_label_tp(detection_name, m))] + ylimit = max([max(getattr(md, metric)[min_recall_ind:md.max_recall_ind + 1]) for metric in rel_metrics]) * 1.1 + else: + ylimit = 1.0 + + # Prepare axis. + if ax is None: + ax = setup_axis(title=PRETTY_DETECTION_NAMES[detection_name], xlabel='Recall', ylabel='Error', xlim=1, + min_recall=min_recall) + ax.set_ylim(0, ylimit) + + # Plot the recall vs. error curve for each tp metric. + for metric in TP_METRICS: + tp = metrics.get_label_tp(detection_name, metric) + + # Plot only if we have valid data. + if tp is not np.nan and min_recall_ind <= md.max_recall_ind: + recall, error = md.recall[:md.max_recall_ind + 1], getattr(md, metric)[:md.max_recall_ind + 1] + else: + recall, error = [], [] + + # Change legend based on tp value + if tp is np.nan: + label = '{}: n/a'.format(PRETTY_TP_METRICS[metric]) + elif min_recall_ind > md.max_recall_ind: + label = '{}: nan'.format(PRETTY_TP_METRICS[metric]) + else: + label = '{}: {:.2f} ({})'.format(PRETTY_TP_METRICS[metric], tp, TP_METRICS_UNITS[metric]) + if metric == 'trans_err': + label += f' ({md.max_recall_ind})' # add recall + print(f'Recall: {detection_name}: {md.max_recall_ind/100}') + ax.plot(recall, error, label=label) + ax.axvline(x=md.max_recall, linestyle='-.', color=(0, 0, 0, 0.3)) + ax.legend(loc='best') + + if savepath is not None: + plt.savefig(savepath) + plt.close() + + +class DetectionBox_modified(DetectionBox): + def __init__(self, *args, token=None, visibility=None, index=None, **kwargs): + ''' + add annotation token + ''' + super().__init__(*args, **kwargs) + self.token = token + self.visibility = visibility + self.index = index + + def serialize(self) -> dict: + """ Serialize instance into json-friendly format. """ + return { + 'token': self.token, + 'sample_token': self.sample_token, + 'translation': self.translation, + 'size': self.size, + 'rotation': self.rotation, + 'velocity': self.velocity, + 'ego_translation': self.ego_translation, + 'num_pts': self.num_pts, + 'detection_name': self.detection_name, + 'detection_score': self.detection_score, + 'attribute_name': self.attribute_name, + 'visibility': self.visibility, + 'index': self.index + + } + + @classmethod + def deserialize(cls, content: dict): + """ Initialize from serialized content. """ + return cls( + token=content['token'], + sample_token=content['sample_token'], + translation=tuple(content['translation']), + size=tuple(content['size']), + rotation=tuple(content['rotation']), + velocity=tuple(content['velocity']), + ego_translation=(0.0, 0.0, 0.0) if 'ego_translation' not in content + else tuple(content['ego_translation']), + num_pts=-1 if 'num_pts' not in content else int(content['num_pts']), + detection_name=content['detection_name'], + detection_score=-1.0 if 'detection_score' not in content else float(content['detection_score']), + attribute_name=content['attribute_name'], + visibility=content['visibility'], + index=content['index'], + ) + + +def center_in_image(box, intrinsic: np.ndarray, imsize: Tuple[int, int], vis_level: int = BoxVisibility.ANY) -> bool: + """ + Check if a box is visible inside an image without accounting for occlusions. + :param box: The box to be checked. + :param intrinsic: . Intrinsic camera matrix. + :param imsize: (width, height). + :param vis_level: One of the enumerations of . + :return True if visibility condition is satisfied. + """ + + center_3d = box.center.reshape(3, 1) + center_img = view_points(center_3d, intrinsic, normalize=True)[:2, :] + + visible = np.logical_and(center_img[0, :] > 0, center_img[0, :] < imsize[0]) + visible = np.logical_and(visible, center_img[1, :] < imsize[1]) + visible = np.logical_and(visible, center_img[1, :] > 0) + visible = np.logical_and(visible, center_3d[2, :] > 1) + + in_front = center_3d[2, :] > 0.1 # True if a corner is at least 0.1 meter in front of the camera. + + if vis_level == BoxVisibility.ALL: + return all(visible) and all(in_front) + elif vis_level == BoxVisibility.ANY: + return any(visible) and all(in_front) + elif vis_level == BoxVisibility.NONE: + return True + else: + raise ValueError("vis_level: {} not valid".format(vis_level)) + + +def exist_corners_in_image_but_not_all(box, intrinsic: np.ndarray, imsize: Tuple[int, int], + vis_level: int = BoxVisibility.ANY) -> bool: + """ + Check if a box is visible in images but not all corners in image . + :param box: The box to be checked. + :param intrinsic: . Intrinsic camera matrix. + :param imsize: (width, height). + :param vis_level: One of the enumerations of . + :return True if visibility condition is satisfied. + """ + + corners_3d = box.corners() + corners_img = view_points(corners_3d, intrinsic, normalize=True)[:2, :] + + visible = np.logical_and(corners_img[0, :] > 0, corners_img[0, :] < imsize[0]) + visible = np.logical_and(visible, corners_img[1, :] < imsize[1]) + visible = np.logical_and(visible, corners_img[1, :] > 0) + visible = np.logical_and(visible, corners_3d[2, :] > 1) + + in_front = corners_3d[2, :] > 0.1 # True if a corner is at least 0.1 meter in front of the camera. + + if any(visible) and not all(visible) and all(in_front): + return True + else: + return False + +def load_prediction(result_path: str, max_boxes_per_sample: int, box_cls, verbose: bool = False) \ + -> Tuple[EvalBoxes, Dict]: + """ + Loads object predictions from file. + :param result_path: Path to the .json result file provided by the user. + :param max_boxes_per_sample: Maximim number of boxes allowed per sample. + :param box_cls: Type of box to load, e.g. DetectionBox or TrackingBox. + :param verbose: Whether to print messages to stdout. + :return: The deserialized results and meta data. + """ + + # Load from file and check that the format is correct. + # with open(result_path) as f: + # data = json.load(f) + data = load(result_path) + assert 'results' in data, 'Error: No field `results` in result file. Please note that the result format changed.' \ + 'See https://www.nuscenes.org/object-detection for more information.' + + # Deserialize results and get meta data. + all_results = EvalBoxes.deserialize(data['results'], box_cls) + meta = data['meta'] + if verbose: + print("Loaded results from {}. Found detections for {} samples." + .format(result_path, len(all_results.sample_tokens))) + + # Check that each sample has no more than x predicted boxes. + for sample_token in all_results.sample_tokens: + assert len(all_results.boxes[sample_token]) <= max_boxes_per_sample, \ + "Error: Only <= %d boxes per sample allowed!" % max_boxes_per_sample + + return all_results, meta + +def load_gt(nusc: NuScenes, eval_split: str, box_cls, verbose: bool = False): + """ + Loads ground truth boxes from DB. + :param nusc: A NuScenes instance. + :param eval_split: The evaluation split for which we load GT boxes. + :param box_cls: Type of box to load, e.g. DetectionBox or TrackingBox. + :param verbose: Whether to print messages to stdout. + :return: The GT boxes. + """ + + # Init. + if box_cls == DetectionBox_modified: + attribute_map = {a['token']: a['name'] for a in nusc.attribute} + + if verbose: + print('Loading annotations for {} split from nuScenes version: {}'.format(eval_split, nusc.version)) + # Read out all sample_tokens in DB. + sample_tokens_all = [s['token'] for s in nusc.sample] + assert len(sample_tokens_all) > 0, "Error: Database has no samples!" + + # Only keep samples from this split. + splits = create_splits_scenes() + + # Check compatibility of split with nusc_version. + version = nusc.version + if eval_split in {'train', 'val', 'train_detect', 'train_track'}: + assert version.endswith('trainval'), \ + 'Error: Requested split {} which is not compatible with NuScenes version {}'.format(eval_split, version) + elif eval_split in {'mini_train', 'mini_val'}: + assert version.endswith('mini'), \ + 'Error: Requested split {} which is not compatible with NuScenes version {}'.format(eval_split, version) + elif eval_split == 'test': + assert version.endswith('test'), \ + 'Error: Requested split {} which is not compatible with NuScenes version {}'.format(eval_split, version) + else: + raise ValueError('Error: Requested split {} which this function cannot map to the correct NuScenes version.' + .format(eval_split)) + + if eval_split == 'test': + # Check that you aren't trying to cheat :). + assert len(nusc.sample_annotation) > 0, \ + 'Error: You are trying to evaluate on the test set but you do not have the annotations!' + index_map = {} + for scene in nusc.scene: + first_sample_token = scene['first_sample_token'] + sample = nusc.get('sample', first_sample_token) + index_map[first_sample_token] = 1 + index = 2 + while sample['next'] != '': + sample = nusc.get('sample', sample['next']) + index_map[sample['token']] = index + index += 1 + + sample_tokens = [] + for sample_token in sample_tokens_all: + scene_token = nusc.get('sample', sample_token)['scene_token'] + scene_record = nusc.get('scene', scene_token) + if scene_record['name'] in splits[eval_split]: + sample_tokens.append(sample_token) + + all_annotations = EvalBoxes() + + # Load annotations and filter predictions and annotations. + tracking_id_set = set() + for sample_token in tqdm.tqdm(sample_tokens, leave=verbose): + + sample = nusc.get('sample', sample_token) + sample_annotation_tokens = sample['anns'] + + sample_boxes = [] + for sample_annotation_token in sample_annotation_tokens: + + sample_annotation = nusc.get('sample_annotation', sample_annotation_token) + if box_cls == DetectionBox_modified: + # Get label name in detection task and filter unused labels. + detection_name = category_to_detection_name(sample_annotation['category_name']) + if detection_name is None: + continue + + # Get attribute_name. + attr_tokens = sample_annotation['attribute_tokens'] + attr_count = len(attr_tokens) + if attr_count == 0: + attribute_name = '' + elif attr_count == 1: + attribute_name = attribute_map[attr_tokens[0]] + else: + raise Exception('Error: GT annotations must not have more than one attribute!') + + sample_boxes.append( + box_cls( + token=sample_annotation_token, + sample_token=sample_token, + translation=sample_annotation['translation'], + size=sample_annotation['size'], + rotation=sample_annotation['rotation'], + velocity=nusc.box_velocity(sample_annotation['token'])[:2], + num_pts=sample_annotation['num_lidar_pts'] + sample_annotation['num_radar_pts'], + detection_name=detection_name, + detection_score=-1.0, # GT samples do not have a score. + attribute_name=attribute_name, + visibility=sample_annotation['visibility_token'], + index=index_map[sample_token] + ) + ) + elif box_cls == TrackingBox: + assert False + else: + raise NotImplementedError('Error: Invalid box_cls %s!' % box_cls) + + all_annotations.add_boxes(sample_token, sample_boxes) + + if verbose: + print("Loaded ground truth annotations for {} samples.".format(len(all_annotations.sample_tokens))) + + return all_annotations + + +def filter_eval_boxes_by_id(nusc: NuScenes, + eval_boxes: EvalBoxes, + id=None, + verbose: bool = False) -> EvalBoxes: + """ + Applies filtering to boxes. Distance, bike-racks and points per box. + :param nusc: An instance of the NuScenes class. + :param eval_boxes: An instance of the EvalBoxes class. + :param is: the anns token set that used to keep bboxes. + :param verbose: Whether to print to stdout. + """ + + # Accumulators for number of filtered boxes. + total, anns_filter = 0, 0 + for ind, sample_token in enumerate(eval_boxes.sample_tokens): + + # Filter on anns + total += len(eval_boxes[sample_token]) + filtered_boxes = [] + for box in eval_boxes[sample_token]: + if box.token in id: + filtered_boxes.append(box) + anns_filter += len(filtered_boxes) + eval_boxes.boxes[sample_token] = filtered_boxes + + if verbose: + print("=> Original number of boxes: %d" % total) + print("=> After anns based filtering: %d" % anns_filter) + + return eval_boxes + + +def filter_eval_boxes_by_visibility( + ori_eval_boxes: EvalBoxes, + visibility=None, + verbose: bool = False) -> EvalBoxes: + """ + Applies filtering to boxes. Distance, bike-racks and points per box. + :param nusc: An instance of the NuScenes class. + :param eval_boxes: An instance of the EvalBoxes class. + :param is: the anns token set that used to keep bboxes. + :param verbose: Whether to print to stdout. + """ + + # Accumulators for number of filtered boxes. + eval_boxes = copy.deepcopy(ori_eval_boxes) + total, anns_filter = 0, 0 + for ind, sample_token in enumerate(eval_boxes.sample_tokens): + # Filter on anns + total += len(eval_boxes[sample_token]) + filtered_boxes = [] + for box in eval_boxes[sample_token]: + if box.visibility == visibility: + filtered_boxes.append(box) + anns_filter += len(filtered_boxes) + eval_boxes.boxes[sample_token] = filtered_boxes + + if verbose: + print("=> Original number of boxes: %d" % total) + print("=> After visibility based filtering: %d" % anns_filter) + + return eval_boxes + + +def filter_by_sample_token(ori_eval_boxes, valid_sample_tokens=[], verbose=False): + eval_boxes = copy.deepcopy(ori_eval_boxes) + for sample_token in eval_boxes.sample_tokens: + if sample_token not in valid_sample_tokens: + eval_boxes.boxes.pop(sample_token) + return eval_boxes + + +def filter_eval_boxes_by_overlap(nusc: NuScenes, + eval_boxes: EvalBoxes, + verbose: bool = False) -> EvalBoxes: + """ + Applies filtering to boxes. basedon overlap . + :param nusc: An instance of the NuScenes class. + :param eval_boxes: An instance of the EvalBoxes class. + :param verbose: Whether to print to stdout. + """ + + # Accumulators for number of filtered boxes. + cams = ['CAM_FRONT', + 'CAM_FRONT_RIGHT', + 'CAM_BACK_RIGHT', + 'CAM_BACK', + 'CAM_BACK_LEFT', + 'CAM_FRONT_LEFT'] + + total, anns_filter = 0, 0 + for ind, sample_token in enumerate(eval_boxes.sample_tokens): + + # Filter on anns + total += len(eval_boxes[sample_token]) + sample_record = nusc.get('sample', sample_token) + filtered_boxes = [] + for box in eval_boxes[sample_token]: + count = 0 + for cam in cams: + ''' + copy-paste form nuscens + ''' + sample_data_token = sample_record['data'][cam] + sd_record = nusc.get('sample_data', sample_data_token) + cs_record = nusc.get('calibrated_sensor', sd_record['calibrated_sensor_token']) + sensor_record = nusc.get('sensor', cs_record['sensor_token']) + pose_record = nusc.get('ego_pose', sd_record['ego_pose_token']) + cam_intrinsic = np.array(cs_record['camera_intrinsic']) + imsize = (sd_record['width'], sd_record['height']) + new_box = Box(box.translation, box.size, Quaternion(box.rotation), + name=box.detection_name, token='') + + # Move box to ego vehicle coord system. + new_box.translate(-np.array(pose_record['translation'])) + new_box.rotate(Quaternion(pose_record['rotation']).inverse) + + # Move box to sensor coord system. + new_box.translate(-np.array(cs_record['translation'])) + new_box.rotate(Quaternion(cs_record['rotation']).inverse) + + if center_in_image(new_box, cam_intrinsic, imsize, vis_level=BoxVisibility.ANY): + count += 1 + # if exist_corners_in_image_but_not_all(new_box, cam_intrinsic, imsize, vis_level=BoxVisibility.ANY): + # count += 1 + + if count > 1: + with open('center_overlap.txt', 'a') as f: + try: + f.write(box.token + '\n') + except: + pass + filtered_boxes.append(box) + anns_filter += len(filtered_boxes) + eval_boxes.boxes[sample_token] = filtered_boxes + + verbose = True + + if verbose: + print("=> Original number of boxes: %d" % total) + print("=> After anns based filtering: %d" % anns_filter) + + return eval_boxes + + +class NuScenesEval_custom(NuScenesEval): + """ + Dummy class for backward-compatibility. Same as DetectionEval. + """ + + def __init__(self, + nusc: NuScenes, + config: DetectionConfig, + result_path: str, + eval_set: str, + output_dir: str = None, + verbose: bool = True, + overlap_test=False, + eval_mask=False, + data_infos=None + ): + """ + Initialize a DetectionEval object. + :param nusc: A NuScenes object. + :param config: A DetectionConfig object. + :param result_path: Path of the nuScenes JSON result file. + :param eval_set: The dataset split to evaluate on, e.g. train, val or test. + :param output_dir: Folder to save plots and results to. + :param verbose: Whether to print to stdout. + """ + + self.nusc = nusc + self.result_path = result_path + self.eval_set = eval_set + self.output_dir = output_dir + self.verbose = verbose + self.cfg = config + self.overlap_test = overlap_test + self.eval_mask = eval_mask + self.data_infos = data_infos + # Check result file exists. + assert os.path.exists(result_path), 'Error: The result file does not exist!' + + # Make dirs. + self.plot_dir = os.path.join(self.output_dir, 'plots') + if not os.path.isdir(self.output_dir): + os.makedirs(self.output_dir) + if not os.path.isdir(self.plot_dir): + os.makedirs(self.plot_dir) + + # Load data. + if verbose: + print('Initializing nuScenes detection evaluation') + self.pred_boxes, self.meta = load_prediction(self.result_path, self.cfg.max_boxes_per_sample, DetectionBox, + verbose=verbose) + self.gt_boxes = load_gt(self.nusc, self.eval_set, DetectionBox_modified, verbose=verbose) + + assert set(self.pred_boxes.sample_tokens) == set(self.gt_boxes.sample_tokens), \ + "Samples in split doesn't match samples in predictions." + + # Add center distances. + self.pred_boxes = add_center_dist(nusc, self.pred_boxes) + self.gt_boxes = add_center_dist(nusc, self.gt_boxes) + + # Filter boxes (distance, points per box, etc.). + + if verbose: + print('Filtering predictions') + self.pred_boxes = filter_eval_boxes(nusc, self.pred_boxes, self.cfg.class_range, verbose=verbose) + if verbose: + print('Filtering ground truth annotations') + self.gt_boxes = filter_eval_boxes(nusc, self.gt_boxes, self.cfg.class_range, verbose=verbose) + + if self.overlap_test: + self.pred_boxes = filter_eval_boxes_by_overlap(self.nusc, self.pred_boxes) + + self.gt_boxes = filter_eval_boxes_by_overlap(self.nusc, self.gt_boxes, verbose=True) + + self.all_gt = copy.deepcopy(self.gt_boxes) + self.all_preds = copy.deepcopy(self.pred_boxes) + self.sample_tokens = self.gt_boxes.sample_tokens + + self.index_map = {} + for scene in nusc.scene: + first_sample_token = scene['first_sample_token'] + sample = nusc.get('sample', first_sample_token) + self.index_map[first_sample_token] = 1 + index = 2 + while sample['next'] != '': + sample = nusc.get('sample', sample['next']) + self.index_map[sample['token']] = index + index += 1 + + def update_gt(self, type_='vis', visibility='1', index=1): + if type_ == 'vis': + self.visibility_test = True + if self.visibility_test: + '''[{'description': 'visibility of whole object is between 0 and 40%', + 'token': '1', + 'level': 'v0-40'}, + {'description': 'visibility of whole object is between 40 and 60%', + 'token': '2', + 'level': 'v40-60'}, + {'description': 'visibility of whole object is between 60 and 80%', + 'token': '3', + 'level': 'v60-80'}, + {'description': 'visibility of whole object is between 80 and 100%', + 'token': '4', + 'level': 'v80-100'}]''' + + self.gt_boxes = filter_eval_boxes_by_visibility(self.all_gt, visibility, verbose=True) + + elif type_ == 'ord': + + valid_tokens = [key for (key, value) in self.index_map.items() if value == index] + # from IPython import embed + # embed() + self.gt_boxes = filter_by_sample_token(self.all_gt, valid_tokens) + self.pred_boxes = filter_by_sample_token(self.all_preds, valid_tokens) + self.sample_tokens = self.gt_boxes.sample_tokens + + + def evaluate(self) -> Tuple[DetectionMetrics, DetectionMetricDataList]: + """ + Performs the actual evaluation. + :return: A tuple of high-level and the raw metric data. + """ + start_time = time.time() + + # ----------------------------------- + # Step 1: Accumulate metric data for all classes and distance thresholds. + # ----------------------------------- + if self.verbose: + print('Accumulating metric data...') + metric_data_list = DetectionMetricDataList() + + # print(self.cfg.dist_fcn_callable, self.cfg.dist_ths) + # self.cfg.dist_ths = [0.3] + # self.cfg.dist_fcn_callable + for class_name in self.cfg.class_names: + for dist_th in self.cfg.dist_ths: + md = accumulate(self.gt_boxes, self.pred_boxes, class_name, self.cfg.dist_fcn_callable, dist_th) + metric_data_list.set(class_name, dist_th, md) + + # ----------------------------------- + # Step 2: Calculate metrics from the data. + # ----------------------------------- + if self.verbose: + print('Calculating metrics...') + metrics = DetectionMetrics(self.cfg) + for class_name in self.cfg.class_names: + # Compute APs. + for dist_th in self.cfg.dist_ths: + metric_data = metric_data_list[(class_name, dist_th)] + ap = calc_ap(metric_data, self.cfg.min_recall, self.cfg.min_precision) + metrics.add_label_ap(class_name, dist_th, ap) + # Compute TP metrics. + for metric_name in TP_METRICS: + metric_data = metric_data_list[(class_name, self.cfg.dist_th_tp)] + if class_name in ['traffic_cone'] and metric_name in ['attr_err', 'vel_err', 'orient_err']: + tp = np.nan + elif class_name in ['barrier'] and metric_name in ['attr_err', 'vel_err']: + tp = np.nan + else: + tp = calc_tp(metric_data, self.cfg.min_recall, metric_name) + metrics.add_label_tp(class_name, metric_name, tp) + + # Compute evaluation time. + metrics.add_runtime(time.time() - start_time) + + return metrics, metric_data_list + + def render(self, metrics: DetectionMetrics, md_list: DetectionMetricDataList) -> None: + """ + Renders various PR and TP curves. + :param metrics: DetectionMetrics instance. + :param md_list: DetectionMetricDataList instance. + """ + if self.verbose: + print('Rendering PR and TP curves') + + def savepath(name): + return os.path.join(self.plot_dir, name + '.pdf') + + summary_plot(md_list, metrics, min_precision=self.cfg.min_precision, min_recall=self.cfg.min_recall, + dist_th_tp=self.cfg.dist_th_tp, savepath=savepath('summary')) + + for detection_name in self.cfg.class_names: + class_pr_curve(md_list, metrics, detection_name, self.cfg.min_precision, self.cfg.min_recall, + savepath=savepath(detection_name + '_pr')) + + class_tp_curve(md_list, metrics, detection_name, self.cfg.min_recall, self.cfg.dist_th_tp, + savepath=savepath(detection_name + '_tp')) + + for dist_th in self.cfg.dist_ths: + dist_pr_curve(md_list, metrics, dist_th, self.cfg.min_precision, self.cfg.min_recall, + savepath=savepath('dist_pr_' + str(dist_th))) + + +if __name__ == "__main__": + + # Settings. + parser = argparse.ArgumentParser(description='Evaluate nuScenes detection results.', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('result_path', type=str, help='The submission as a JSON file.') + parser.add_argument('--output_dir', type=str, default='~/nuscenes-metrics', + help='Folder to store result metrics, graphs and example visualizations.') + parser.add_argument('--eval_set', type=str, default='val', + help='Which dataset split to evaluate on, train, val or test.') + parser.add_argument('--dataroot', type=str, default='data/nuscenes', + help='Default nuScenes data directory.') + parser.add_argument('--version', type=str, default='v1.0-trainval', + help='Which version of the nuScenes dataset to evaluate on, e.g. v1.0-trainval.') + parser.add_argument('--config_path', type=str, default='', + help='Path to the configuration file.' + 'If no path given, the CVPR 2019 configuration will be used.') + parser.add_argument('--plot_examples', type=int, default=0, + help='How many example visualizations to write to disk.') + parser.add_argument('--render_curves', type=int, default=1, + help='Whether to render PR and TP curves to disk.') + parser.add_argument('--verbose', type=int, default=1, + help='Whether to print to stdout.') + args = parser.parse_args() + + result_path_ = os.path.expanduser(args.result_path) + output_dir_ = os.path.expanduser(args.output_dir) + eval_set_ = args.eval_set + dataroot_ = args.dataroot + version_ = args.version + config_path = args.config_path + plot_examples_ = args.plot_examples + render_curves_ = bool(args.render_curves) + verbose_ = bool(args.verbose) + + if config_path == '': + cfg_ = config_factory('detection_cvpr_2019') + else: + with open(config_path, 'r') as _f: + cfg_ = DetectionConfig.deserialize(json.load(_f)) + + nusc_ = NuScenes(version=version_, verbose=verbose_, dataroot=dataroot_) + nusc_eval = NuScenesEval_custom(nusc_, config=cfg_, result_path=result_path_, eval_set=eval_set_, + output_dir=output_dir_, verbose=verbose_) + for vis in ['1', '2', '3', '4']: + nusc_eval.update_gt(type_='vis', visibility=vis) + print(f'================ {vis} ===============') + nusc_eval.main(plot_examples=plot_examples_, render_curves=render_curves_) + #for index in range(1, 41): + # nusc_eval.update_gt(type_='ord', index=index) + # diff --git a/mmcv/datasets/nuscenes_mono_dataset.py b/mmcv/datasets/nuscenes_mono_dataset.py new file mode 100644 index 0000000..b036b87 --- /dev/null +++ b/mmcv/datasets/nuscenes_mono_dataset.py @@ -0,0 +1,777 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import mmcv +import numpy as np +import pyquaternion +import tempfile +import torch +import warnings +from nuscenes.utils.data_classes import Box as NuScenesBox +from os import path as osp + +from mmdet3d.core import bbox3d2result, box3d_multiclass_nms, xywhr2xyxyr +from mmdet.datasets import DATASETS, CocoDataset +from mmdet3d.core import show_multi_modality_result +from mmdet3d.core.bbox import CameraInstance3DBoxes, get_box_type +from mmdet3d.datasets.pipelines import Compose +from mmdet3d.datasets.utils import extract_result_dict, get_loading_pipeline + + +@DATASETS.register_module() +class CustomNuScenesMonoDataset(CocoDataset): + r"""Monocular 3D detection on NuScenes Dataset. + This class serves as the API for experiments on the NuScenes Dataset. + Please refer to `NuScenes Dataset `_ + for data downloading. + Args: + ann_file (str): Path of annotation file. + data_root (str): Path of dataset root. + load_interval (int, optional): Interval of loading the dataset. It is + used to uniformly sample the dataset. Defaults to 1. + with_velocity (bool, optional): Whether include velocity prediction + into the experiments. Defaults to True. + modality (dict, optional): Modality to specify the sensor data used + as input. Defaults to None. + box_type_3d (str, optional): Type of 3D box of this dataset. + Based on the `box_type_3d`, the dataset will encapsulate the box + to its original format then converted them to `box_type_3d`. + Defaults to 'Camera' in this class. Available options includes. + - 'LiDAR': Box in LiDAR coordinates. + - 'Depth': Box in depth coordinates, usually for indoor dataset. + - 'Camera': Box in camera coordinates. + eval_version (str, optional): Configuration version of evaluation. + Defaults to 'detection_cvpr_2019'. + use_valid_flag (bool): Whether to use `use_valid_flag` key in the info + file as mask to filter gt_boxes and gt_names. Defaults to False. + version (str, optional): Dataset version. Defaults to 'v1.0-trainval'. + """ + CLASSES = ('car', 'truck', 'trailer', 'bus', 'construction_vehicle', + 'bicycle', 'motorcycle', 'pedestrian', 'traffic_cone', + 'barrier') + DefaultAttribute = { + 'car': 'vehicle.parked', + 'pedestrian': 'pedestrian.moving', + 'trailer': 'vehicle.parked', + 'truck': 'vehicle.parked', + 'bus': 'vehicle.moving', + 'motorcycle': 'cycle.without_rider', + 'construction_vehicle': 'vehicle.parked', + 'bicycle': 'cycle.without_rider', + 'barrier': '', + 'traffic_cone': '', + } + # https://github.com/nutonomy/nuscenes-devkit/blob/57889ff20678577025326cfc24e57424a829be0a/python-sdk/nuscenes/eval/detection/evaluate.py#L222 # noqa + ErrNameMapping = { + 'trans_err': 'mATE', + 'scale_err': 'mASE', + 'orient_err': 'mAOE', + 'vel_err': 'mAVE', + 'attr_err': 'mAAE' + } + + def __init__(self, + data_root, + load_interval=1, + with_velocity=True, + modality=None, + box_type_3d='Camera', + eval_version='detection_cvpr_2019', + use_valid_flag=False, + overlap_test=False, + version='v1.0-trainval', + **kwargs): + super().__init__(**kwargs) + # overlap_test = True + self.data_root = data_root + self.overlap_test = overlap_test + self.load_interval = load_interval + self.with_velocity = with_velocity + self.modality = modality + self.box_type_3d, self.box_mode_3d = get_box_type(box_type_3d) + self.eval_version = eval_version + self.use_valid_flag = use_valid_flag + self.bbox_code_size = 9 + self.version = version + if self.eval_version is not None: + from nuscenes.eval.detection.config import config_factory + self.eval_detection_configs = config_factory(self.eval_version) + if self.modality is None: + self.modality = dict( + use_camera=True, + use_lidar=False, + use_radar=False, + use_map=False, + use_external=False) + + def pre_pipeline(self, results): + """Initialization before data preparation. + Args: + results (dict): Dict before data preprocessing. + - img_fields (list): Image fields. + - bbox3d_fields (list): 3D bounding boxes fields. + - pts_mask_fields (list): Mask fields of points. + - pts_seg_fields (list): Mask fields of point segments. + - bbox_fields (list): Fields of bounding boxes. + - mask_fields (list): Fields of masks. + - seg_fields (list): Segment fields. + - box_type_3d (str): 3D box type. + - box_mode_3d (str): 3D box mode. + """ + results['img_prefix'] = '' # self.img_prefix + # print('img_prefix', self.img_prefix) + results['seg_prefix'] = self.seg_prefix + results['proposal_file'] = self.proposal_file + results['img_fields'] = [] + results['bbox3d_fields'] = [] + results['pts_mask_fields'] = [] + results['pts_seg_fields'] = [] + results['bbox_fields'] = [] + results['mask_fields'] = [] + results['seg_fields'] = [] + results['box_type_3d'] = self.box_type_3d + results['box_mode_3d'] = self.box_mode_3d + + def _parse_ann_info(self, img_info, ann_info): + """Parse bbox annotation. + Args: + img_info (list[dict]): Image info. + ann_info (list[dict]): Annotation info of an image. + Returns: + dict: A dict containing the following keys: bboxes, labels, \ + gt_bboxes_3d, gt_labels_3d, attr_labels, centers2d, \ + depths, bboxes_ignore, masks, seg_map + """ + gt_bboxes = [] + gt_labels = [] + attr_labels = [] + gt_bboxes_ignore = [] + gt_masks_ann = [] + gt_bboxes_cam3d = [] + centers2d = [] + depths = [] + for i, ann in enumerate(ann_info): + if ann.get('ignore', False): + continue + x1, y1, w, h = ann['bbox'] + inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0)) + inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0)) + if inter_w * inter_h == 0: + continue + if ann['area'] <= 0 or w < 1 or h < 1: + continue + if ann['category_id'] not in self.cat_ids: + continue + bbox = [x1, y1, x1 + w, y1 + h] + if ann.get('iscrowd', False): + gt_bboxes_ignore.append(bbox) + else: + gt_bboxes.append(bbox) + gt_labels.append(self.cat2label[ann['category_id']]) + attr_labels.append(ann['attribute_id']) + gt_masks_ann.append(ann.get('segmentation', None)) + # 3D annotations in camera coordinates + bbox_cam3d = np.array(ann['bbox_cam3d']).reshape(1, -1) + velo_cam3d = np.array(ann['velo_cam3d']).reshape(1, 2) + nan_mask = np.isnan(velo_cam3d[:, 0]) + velo_cam3d[nan_mask] = [0.0, 0.0] + bbox_cam3d = np.concatenate([bbox_cam3d, velo_cam3d], axis=-1) + gt_bboxes_cam3d.append(bbox_cam3d.squeeze()) + # 2.5D annotations in camera coordinates + center2d = ann['center2d'][:2] + depth = ann['center2d'][2] + centers2d.append(center2d) + depths.append(depth) + + if gt_bboxes: + gt_bboxes = np.array(gt_bboxes, dtype=np.float32) + gt_labels = np.array(gt_labels, dtype=np.int64) + attr_labels = np.array(attr_labels, dtype=np.int64) + else: + gt_bboxes = np.zeros((0, 4), dtype=np.float32) + gt_labels = np.array([], dtype=np.int64) + attr_labels = np.array([], dtype=np.int64) + + if gt_bboxes_cam3d: + gt_bboxes_cam3d = np.array(gt_bboxes_cam3d, dtype=np.float32) + centers2d = np.array(centers2d, dtype=np.float32) + depths = np.array(depths, dtype=np.float32) + else: + gt_bboxes_cam3d = np.zeros((0, self.bbox_code_size), + dtype=np.float32) + centers2d = np.zeros((0, 2), dtype=np.float32) + depths = np.zeros((0), dtype=np.float32) + + gt_bboxes_cam3d = CameraInstance3DBoxes( + gt_bboxes_cam3d, + box_dim=gt_bboxes_cam3d.shape[-1], + origin=(0.5, 0.5, 0.5)) + gt_labels_3d = copy.deepcopy(gt_labels) + + if gt_bboxes_ignore: + gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32) + else: + gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32) + + seg_map = img_info['filename'].replace('jpg', 'png') + + ann = dict( + bboxes=gt_bboxes, + labels=gt_labels, + gt_bboxes_3d=gt_bboxes_cam3d, + gt_labels_3d=gt_labels_3d, + attr_labels=attr_labels, + centers2d=centers2d, + depths=depths, + bboxes_ignore=gt_bboxes_ignore, + masks=gt_masks_ann, + seg_map=seg_map) + + return ann + + def get_attr_name(self, attr_idx, label_name): + """Get attribute from predicted index. + This is a workaround to predict attribute when the predicted velocity + is not reliable. We map the predicted attribute index to the one + in the attribute set. If it is consistent with the category, we will + keep it. Otherwise, we will use the default attribute. + Args: + attr_idx (int): Attribute index. + label_name (str): Predicted category name. + Returns: + str: Predicted attribute name. + """ + # TODO: Simplify the variable name + AttrMapping_rev2 = [ + 'cycle.with_rider', 'cycle.without_rider', 'pedestrian.moving', + 'pedestrian.standing', 'pedestrian.sitting_lying_down', + 'vehicle.moving', 'vehicle.parked', 'vehicle.stopped', 'None' + ] + if label_name == 'car' or label_name == 'bus' \ + or label_name == 'truck' or label_name == 'trailer' \ + or label_name == 'construction_vehicle': + if AttrMapping_rev2[attr_idx] == 'vehicle.moving' or \ + AttrMapping_rev2[attr_idx] == 'vehicle.parked' or \ + AttrMapping_rev2[attr_idx] == 'vehicle.stopped': + return AttrMapping_rev2[attr_idx] + else: + return CustomNuScenesMonoDataset.DefaultAttribute[label_name] + elif label_name == 'pedestrian': + if AttrMapping_rev2[attr_idx] == 'pedestrian.moving' or \ + AttrMapping_rev2[attr_idx] == 'pedestrian.standing' or \ + AttrMapping_rev2[attr_idx] == \ + 'pedestrian.sitting_lying_down': + return AttrMapping_rev2[attr_idx] + else: + return CustomNuScenesMonoDataset.DefaultAttribute[label_name] + elif label_name == 'bicycle' or label_name == 'motorcycle': + if AttrMapping_rev2[attr_idx] == 'cycle.with_rider' or \ + AttrMapping_rev2[attr_idx] == 'cycle.without_rider': + return AttrMapping_rev2[attr_idx] + else: + return CustomNuScenesMonoDataset.DefaultAttribute[label_name] + else: + return CustomNuScenesMonoDataset.DefaultAttribute[label_name] + + def _format_bbox(self, results, jsonfile_prefix=None): + """Convert the results to the standard format. + Args: + results (list[dict]): Testing results of the dataset. + jsonfile_prefix (str): The prefix of the output jsonfile. + You can specify the output directory/filename by + modifying the jsonfile_prefix. Default: None. + Returns: + str: Path of the output json file. + """ + nusc_annos = {} + mapped_class_names = self.CLASSES + + print('Start to convert detection format...') + + CAM_NUM = 6 + + for sample_id, det in enumerate(mmcv.track_iter_progress(results)): + + if sample_id % CAM_NUM == 0: + boxes_per_frame = [] + attrs_per_frame = [] + + # need to merge results from images of the same sample + annos = [] + boxes, attrs = output_to_nusc_box(det) + sample_token = self.data_infos[sample_id]['token'] + boxes, attrs = cam_nusc_box_to_global(self.data_infos[sample_id], + boxes, attrs, + mapped_class_names, + self.eval_detection_configs, + self.eval_version) + + boxes_per_frame.extend(boxes) + attrs_per_frame.extend(attrs) + # Remove redundant predictions caused by overlap of images + if (sample_id + 1) % CAM_NUM != 0: + continue + boxes = global_nusc_box_to_cam( + self.data_infos[sample_id + 1 - CAM_NUM], boxes_per_frame, + mapped_class_names, self.eval_detection_configs, + self.eval_version) + cam_boxes3d, scores, labels = nusc_box_to_cam_box3d(boxes) + # box nms 3d over 6 images in a frame + # TODO: move this global setting into config + nms_cfg = dict( + use_rotate_nms=True, + nms_across_levels=False, + nms_pre=4096, + nms_thr=0.05, + score_thr=0.01, + min_bbox_size=0, + max_per_frame=500) + from mmcv import Config + nms_cfg = Config(nms_cfg) + cam_boxes3d_for_nms = xywhr2xyxyr(cam_boxes3d.bev) + boxes3d = cam_boxes3d.tensor + # generate attr scores from attr labels + attrs = labels.new_tensor([attr for attr in attrs_per_frame]) + boxes3d, scores, labels, attrs = box3d_multiclass_nms( + boxes3d, + cam_boxes3d_for_nms, + scores, + nms_cfg.score_thr, + nms_cfg.max_per_frame, + nms_cfg, + mlvl_attr_scores=attrs) + cam_boxes3d = CameraInstance3DBoxes(boxes3d, box_dim=9) + det = bbox3d2result(cam_boxes3d, scores, labels, attrs) + boxes, attrs = output_to_nusc_box(det) + boxes, attrs = cam_nusc_box_to_global( + self.data_infos[sample_id + 1 - CAM_NUM], boxes, attrs, + mapped_class_names, self.eval_detection_configs, + self.eval_version) + + for i, box in enumerate(boxes): + name = mapped_class_names[box.label] + attr = self.get_attr_name(attrs[i], name) + nusc_anno = dict( + sample_token=sample_token, + translation=box.center.tolist(), + size=box.wlh.tolist(), + rotation=box.orientation.elements.tolist(), + velocity=box.velocity[:2].tolist(), + detection_name=name, + detection_score=box.score, + attribute_name=attr) + annos.append(nusc_anno) + # other views results of the same frame should be concatenated + if sample_token in nusc_annos: + nusc_annos[sample_token].extend(annos) + else: + nusc_annos[sample_token] = annos + + nusc_submissions = { + 'meta': self.modality, + 'results': nusc_annos, + } + + mmcv.mkdir_or_exist(jsonfile_prefix) + res_path = osp.join(jsonfile_prefix, 'results_nusc.json') + print('Results writes to', res_path) + mmcv.dump(nusc_submissions, res_path) + return res_path + + def _evaluate_single(self, + result_path, + logger=None, + metric='bbox', + result_name='img_bbox'): + """Evaluation for a single model in nuScenes protocol. + Args: + result_path (str): Path of the result file. + logger (logging.Logger | str | None): Logger used for printing + related information during evaluation. Default: None. + metric (str): Metric name used for evaluation. Default: 'bbox'. + result_name (str): Result name in the metric prefix. + Default: 'img_bbox'. + Returns: + dict: Dictionary of evaluation details. + """ + from nuscenes import NuScenes + #from nuscenes.eval.detection.evaluate import NuScenesEval + from .nuscnes_eval import NuScenesEval_custom + output_dir = osp.join(*osp.split(result_path)[:-1]) + self.nusc = NuScenes( + version=self.version, dataroot=self.data_root, verbose=False) + eval_set_map = { + 'v1.0-mini': 'mini_val', + 'v1.0-trainval': 'val', + } + # nusc_eval = NuScenesEval( + # nusc, + # config=self.eval_detection_configs, + # result_path=result_path, + # eval_set=eval_set_map[self.version], + # output_dir=output_dir, + # verbose=False) + self.nusc_eval = NuScenesEval_custom( + self.nusc, + config=self.eval_detection_configs, + result_path=result_path, + eval_set=eval_set_map[self.version], + output_dir=output_dir, + verbose=True, + overlap_test=self.overlap_test, + data_infos=self.data_infos + ) + + self.nusc_eval.main(render_curves=True) + + # record metrics + metrics = mmcv.load(osp.join(output_dir, 'metrics_summary.json')) + detail = dict() + metric_prefix = f'{result_name}_NuScenes' + for name in self.CLASSES: + for k, v in metrics['label_aps'][name].items(): + val = float('{:.4f}'.format(v)) + detail['{}/{}_AP_dist_{}'.format(metric_prefix, name, k)] = val + for k, v in metrics['label_tp_errors'][name].items(): + val = float('{:.4f}'.format(v)) + detail['{}/{}_{}'.format(metric_prefix, name, k)] = val + for k, v in metrics['tp_errors'].items(): + val = float('{:.4f}'.format(v)) + detail['{}/{}'.format(metric_prefix, + self.ErrNameMapping[k])] = val + + detail['{}/NDS'.format(metric_prefix)] = metrics['nd_score'] + detail['{}/mAP'.format(metric_prefix)] = metrics['mean_ap'] + return detail + + def format_results(self, results, jsonfile_prefix=None, **kwargs): + """Format the results to json (standard format for COCO evaluation). + Args: + results (list[tuple | numpy.ndarray]): Testing results of the + dataset. + jsonfile_prefix (str | None): The prefix of json files. It includes + the file path and the prefix of filename, e.g., "a/b/prefix". + If not specified, a temp file will be created. Default: None. + Returns: + tuple: (result_files, tmp_dir), result_files is a dict containing \ + the json filepaths, tmp_dir is the temporal directory created \ + for saving json files when jsonfile_prefix is not specified. + """ + assert isinstance(results, list), 'results must be a list' + assert len(results) == len(self), ( + 'The length of results is not equal to the dataset len: {} != {}'. + format(len(results), len(self))) + + if jsonfile_prefix is None: + tmp_dir = tempfile.TemporaryDirectory() + jsonfile_prefix = osp.join(tmp_dir.name, 'results') + else: + tmp_dir = None + + # currently the output prediction results could be in two formats + # 1. list of dict('boxes_3d': ..., 'scores_3d': ..., 'labels_3d': ...) + # 2. list of dict('pts_bbox' or 'img_bbox': + # dict('boxes_3d': ..., 'scores_3d': ..., 'labels_3d': ...)) + # this is a workaround to enable evaluation of both formats on nuScenes + # refer to https://github.com/open-mmlab/mmdetection3d/issues/449 + if not ('pts_bbox' in results[0] or 'img_bbox' in results[0]): + result_files = self._format_bbox(results, jsonfile_prefix) + else: + # should take the inner dict out of 'pts_bbox' or 'img_bbox' dict + result_files = dict() + for name in results[0]: + # not evaluate 2D predictions on nuScenes + if '2d' in name: + continue + print(f'\nFormating bboxes of {name}') + results_ = [out[name] for out in results] + tmp_file_ = osp.join(jsonfile_prefix, name) + result_files.update( + {name: self._format_bbox(results_, tmp_file_)}) + + return result_files, tmp_dir + + def evaluate(self, + results, + metric='bbox', + logger=None, + jsonfile_prefix=None, + result_names=['img_bbox'], + show=False, + out_dir=None, + pipeline=None): + """Evaluation in nuScenes protocol. + Args: + results (list[dict]): Testing results of the dataset. + metric (str | list[str]): Metrics to be evaluated. + logger (logging.Logger | str | None): Logger used for printing + related information during evaluation. Default: None. + jsonfile_prefix (str | None): The prefix of json files. It includes + the file path and the prefix of filename, e.g., "a/b/prefix". + If not specified, a temp file will be created. Default: None. + show (bool): Whether to visualize. + Default: False. + out_dir (str): Path to save the visualization results. + Default: None. + pipeline (list[dict], optional): raw data loading for showing. + Default: None. + Returns: + dict[str, float]: Results of each evaluation metric. + """ + + result_files, tmp_dir = self.format_results(results, jsonfile_prefix) + + if isinstance(result_files, dict): + results_dict = dict() + for name in result_names: + print('Evaluating bboxes of {}'.format(name)) + ret_dict = self._evaluate_single(result_files[name]) + results_dict.update(ret_dict) + elif isinstance(result_files, str): + results_dict = self._evaluate_single(result_files) + + if tmp_dir is not None: + tmp_dir.cleanup() + + if show: + self.show(results, out_dir, pipeline=pipeline) + return results_dict + + def _extract_data(self, index, pipeline, key, load_annos=False): + """Load data using input pipeline and extract data according to key. + Args: + index (int): Index for accessing the target data. + pipeline (:obj:`Compose`): Composed data loading pipeline. + key (str | list[str]): One single or a list of data key. + load_annos (bool): Whether to load data annotations. + If True, need to set self.test_mode as False before loading. + Returns: + np.ndarray | torch.Tensor | list[np.ndarray | torch.Tensor]: + A single or a list of loaded data. + """ + assert pipeline is not None, 'data loading pipeline is not provided' + img_info = self.data_infos[index] + input_dict = dict(img_info=img_info) + + if load_annos: + ann_info = self.get_ann_info(index) + input_dict.update(dict(ann_info=ann_info)) + + self.pre_pipeline(input_dict) + example = pipeline(input_dict) + + # extract data items according to keys + if isinstance(key, str): + data = extract_result_dict(example, key) + else: + data = [extract_result_dict(example, k) for k in key] + + return data + + def _get_pipeline(self, pipeline): + """Get data loading pipeline in self.show/evaluate function. + Args: + pipeline (list[dict] | None): Input pipeline. If None is given, \ + get from self.pipeline. + """ + if pipeline is None: + if not hasattr(self, 'pipeline') or self.pipeline is None: + warnings.warn( + 'Use default pipeline for data loading, this may cause ' + 'errors when data is on ceph') + return self._build_default_pipeline() + loading_pipeline = get_loading_pipeline(self.pipeline.transforms) + return Compose(loading_pipeline) + return Compose(pipeline) + + def _build_default_pipeline(self): + """Build the default pipeline for this dataset.""" + pipeline = [ + dict(type='LoadImageFromFileMono3D'), + dict( + type='DefaultFormatBundle3D', + class_names=self.CLASSES, + with_label=False), + dict(type='Collect3D', keys=['img']) + ] + return Compose(pipeline) + + def show(self, results, out_dir, show=True, pipeline=None): + """Results visualization. + Args: + results (list[dict]): List of bounding boxes results. + out_dir (str): Output directory of visualization result. + show (bool): Visualize the results online. + pipeline (list[dict], optional): raw data loading for showing. + Default: None. + """ + assert out_dir is not None, 'Expect out_dir, got none.' + pipeline = self._get_pipeline(pipeline) + for i, result in enumerate(results): + if 'img_bbox' in result.keys(): + result = result['img_bbox'] + data_info = self.data_infos[i] + img_path = data_info['file_name'] + file_name = osp.split(img_path)[-1].split('.')[0] + img, img_metas = self._extract_data(i, pipeline, + ['img', 'img_metas']) + # need to transpose channel to first dim + img = img.numpy().transpose(1, 2, 0) + gt_bboxes = self.get_ann_info(i)['gt_bboxes_3d'] + pred_bboxes = result['boxes_3d'] + show_multi_modality_result( + img, + gt_bboxes, + pred_bboxes, + img_metas['cam2img'], + out_dir, + file_name, + box_mode='camera', + show=show) + + +def output_to_nusc_box(detection): + """Convert the output to the box class in the nuScenes. + Args: + detection (dict): Detection results. + - boxes_3d (:obj:`BaseInstance3DBoxes`): Detection bbox. + - scores_3d (torch.Tensor): Detection scores. + - labels_3d (torch.Tensor): Predicted box labels. + - attrs_3d (torch.Tensor, optional): Predicted attributes. + Returns: + list[:obj:`NuScenesBox`]: List of standard NuScenesBoxes. + """ + box3d = detection['boxes_3d'] + scores = detection['scores_3d'].numpy() + labels = detection['labels_3d'].numpy() + attrs = None + if 'attrs_3d' in detection: + attrs = detection['attrs_3d'].numpy() + + box_gravity_center = box3d.gravity_center.numpy() + box_dims = box3d.dims.numpy() + box_yaw = box3d.yaw.numpy() + + # convert the dim/rot to nuscbox convention + box_dims[:, [0, 1, 2]] = box_dims[:, [2, 0, 1]] + box_yaw = -box_yaw + + box_list = [] + for i in range(len(box3d)): + q1 = pyquaternion.Quaternion(axis=[0, 0, 1], radians=box_yaw[i]) + q2 = pyquaternion.Quaternion(axis=[1, 0, 0], radians=np.pi / 2) + quat = q2 * q1 + velocity = (box3d.tensor[i, 7], 0.0, box3d.tensor[i, 8]) + box = NuScenesBox( + box_gravity_center[i], + box_dims[i], + quat, + label=labels[i], + score=scores[i], + velocity=velocity) + box_list.append(box) + return box_list, attrs + + +def cam_nusc_box_to_global(info, + boxes, + attrs, + classes, + eval_configs, + eval_version='detection_cvpr_2019'): + """Convert the box from camera to global coordinate. + Args: + info (dict): Info for a specific sample data, including the + calibration information. + boxes (list[:obj:`NuScenesBox`]): List of predicted NuScenesBoxes. + classes (list[str]): Mapped classes in the evaluation. + eval_configs (object): Evaluation configuration object. + eval_version (str): Evaluation version. + Default: 'detection_cvpr_2019' + Returns: + list: List of standard NuScenesBoxes in the global + coordinate. + """ + box_list = [] + attr_list = [] + for (box, attr) in zip(boxes, attrs): + # Move box to ego vehicle coord system + box.rotate(pyquaternion.Quaternion(info['cam2ego_rotation'])) + box.translate(np.array(info['cam2ego_translation'])) + # filter det in ego. + cls_range_map = eval_configs.class_range + radius = np.linalg.norm(box.center[:2], 2) + det_range = cls_range_map[classes[box.label]] + if radius > det_range: + continue + # Move box to global coord system + box.rotate(pyquaternion.Quaternion(info['ego2global_rotation'])) + box.translate(np.array(info['ego2global_translation'])) + box_list.append(box) + attr_list.append(attr) + return box_list, attr_list + + +def global_nusc_box_to_cam(info, + boxes, + classes, + eval_configs, + eval_version='detection_cvpr_2019'): + """Convert the box from global to camera coordinate. + Args: + info (dict): Info for a specific sample data, including the + calibration information. + boxes (list[:obj:`NuScenesBox`]): List of predicted NuScenesBoxes. + classes (list[str]): Mapped classes in the evaluation. + eval_configs (object): Evaluation configuration object. + eval_version (str): Evaluation version. + Default: 'detection_cvpr_2019' + Returns: + list: List of standard NuScenesBoxes in the global + coordinate. + """ + box_list = [] + for box in boxes: + # Move box to ego vehicle coord system + box.translate(-np.array(info['ego2global_translation'])) + box.rotate( + pyquaternion.Quaternion(info['ego2global_rotation']).inverse) + # filter det in ego. + cls_range_map = eval_configs.class_range + radius = np.linalg.norm(box.center[:2], 2) + det_range = cls_range_map[classes[box.label]] + if radius > det_range: + continue + # Move box to camera coord system + box.translate(-np.array(info['cam2ego_translation'])) + box.rotate(pyquaternion.Quaternion(info['cam2ego_rotation']).inverse) + box_list.append(box) + return box_list + + +def nusc_box_to_cam_box3d(boxes): + """Convert boxes from :obj:`NuScenesBox` to :obj:`CameraInstance3DBoxes`. + Args: + boxes (list[:obj:`NuScenesBox`]): List of predicted NuScenesBoxes. + Returns: + tuple (:obj:`CameraInstance3DBoxes` | torch.Tensor | torch.Tensor): \ + Converted 3D bounding boxes, scores and labels. + """ + locs = torch.Tensor([b.center for b in boxes]).view(-1, 3) + dims = torch.Tensor([b.wlh for b in boxes]).view(-1, 3) + rots = torch.Tensor([b.orientation.yaw_pitch_roll[0] + for b in boxes]).view(-1, 1) + velocity = torch.Tensor([b.velocity[:2] for b in boxes]).view(-1, 2) + + # convert nusbox to cambox convention + dims[:, [0, 1, 2]] = dims[:, [1, 2, 0]] + rots = -rots + + boxes_3d = torch.cat([locs, dims, rots, velocity], dim=1).cuda() + cam_boxes3d = CameraInstance3DBoxes( + boxes_3d, box_dim=9, origin=(0.5, 0.5, 0.5)) + scores = torch.Tensor([b.score for b in boxes]).cuda() + labels = torch.LongTensor([b.label for b in boxes]).cuda() + nms_scores = scores.new_zeros(scores.shape[0], 10 + 1) + indices = labels.new_tensor(list(range(scores.shape[0]))) + nms_scores[indices, labels] = scores + return cam_boxes3d, nms_scores, labels \ No newline at end of file diff --git a/mmcv/datasets/nuscenes_styled_eval_utils.py b/mmcv/datasets/nuscenes_styled_eval_utils.py new file mode 100644 index 0000000..a8053aa --- /dev/null +++ b/mmcv/datasets/nuscenes_styled_eval_utils.py @@ -0,0 +1,755 @@ +from collections import defaultdict +from typing import List, Dict, Tuple, Union, Callable +import abc +import numpy as np +from pyquaternion import Quaternion + + +def center_distance(gt_box, pred_box) -> float: + """ + L2 distance between the box centers (xy only). + :param gt_box: GT annotation sample. + :param pred_box: Predicted sample. + :return: L2 distance. + """ + return np.linalg.norm(np.array(pred_box.translation[:2]) - np.array(gt_box.translation[:2])) + + +def velocity_l2(gt_box, pred_box) -> float: + """ + L2 distance between the velocity vectors (xy only). + If the predicted velocities are nan, we return inf, which is subsequently clipped to 1. + :param gt_box: GT annotation sample. + :param pred_box: Predicted sample. + :return: L2 distance. + """ + return np.linalg.norm(np.array(pred_box.velocity) - np.array(gt_box.velocity)) + + +def yaw_diff(gt_box, eval_box, period: float = 2*np.pi) -> float: + """ + Returns the yaw angle difference between the orientation of two boxes. + :param gt_box: Ground truth box. + :param eval_box: Predicted box. + :param period: Periodicity in radians for assessing angle difference. + :return: Yaw angle difference in radians in [0, pi]. + """ + yaw_gt = quaternion_yaw(Quaternion(gt_box.rotation)) + yaw_est = quaternion_yaw(Quaternion(eval_box.rotation)) + + return abs(angle_diff(yaw_gt, yaw_est, period)) + + +def angle_diff(x: float, y: float, period: float) -> float: + """ + Get the smallest angle difference between 2 angles: the angle from y to x. + :param x: To angle. + :param y: From angle. + :param period: Periodicity in radians for assessing angle difference. + :return: . Signed smallest between-angle difference in range (-pi, pi). + """ + + # calculate angle difference, modulo to [0, 2*pi] + diff = (x - y + period / 2) % period - period / 2 + if diff > np.pi: + diff = diff - (2 * np.pi) # shift (pi, 2*pi] to (-pi, 0] + + return diff + + +def attr_acc(gt_box, pred_box) -> float: + """ + Computes the classification accuracy for the attribute of this class (if any). + If the GT class has no attributes or the annotation is missing attributes, we assign an accuracy of nan, which is + ignored later on. + :param gt_box: GT annotation sample. + :param pred_box: Predicted sample. + :return: Attribute classification accuracy (0 or 1) or nan if GT annotation does not have any attributes. + """ + if gt_box.attribute_name == '': + # If the class does not have attributes or this particular sample is missing attributes, return nan, which is + # ignored later. Note that about 0.4% of the sample_annotations have no attributes, although they should. + acc = np.nan + else: + # Check that label is correct. + acc = float(gt_box.attribute_name == pred_box.attribute_name) + return acc + + +def scale_iou(sample_annotation, sample_result) -> float: + """ + This method compares predictions to the ground truth in terms of scale. + It is equivalent to intersection over union (IOU) between the two boxes in 3D, + if we assume that the boxes are aligned, i.e. translation and rotation are considered identical. + :param sample_annotation: GT annotation sample. + :param sample_result: Predicted sample. + :return: Scale IOU. + """ + # Validate inputs. + sa_size = np.array(sample_annotation.size) + sr_size = np.array(sample_result.size) + assert all(sa_size > 0), 'Error: sample_annotation sizes must be >0.' + assert all(sr_size > 0), 'Error: sample_result sizes must be >0.' + + # Compute IOU. + min_wlh = np.minimum(sa_size, sr_size) + volume_annotation = np.prod(sa_size) + volume_result = np.prod(sr_size) + intersection = np.prod(min_wlh) # type: float + union = volume_annotation + volume_result - intersection # type: float + iou = intersection / union + + return iou + + +def quaternion_yaw(q: Quaternion) -> float: + """ + Calculate the yaw angle from a quaternion. + Note that this only works for a quaternion that represents a box in lidar or global coordinate frame. + It does not work for a box in the camera frame. + :param q: Quaternion of interest. + :return: Yaw angle in radians. + """ + + # Project into xy plane. + v = np.dot(q.rotation_matrix, np.array([1, 0, 0])) + + # Measure yaw using arctan. + yaw = np.arctan2(v[1], v[0]) + + return yaw + + + +def cummean(x: np.array) -> np.array: + """ + Computes the cumulative mean up to each position in a NaN sensitive way + - If all values are NaN return an array of ones. + - If some values are NaN, accumulate arrays discording those entries. + """ + if sum(np.isnan(x)) == len(x): + # Is all numbers in array are NaN's. + return np.ones(len(x)) # If all errors are NaN set to error to 1 for all operating points. + else: + # Accumulate in a nan-aware manner. + sum_vals = np.nancumsum(x.astype(float)) # Cumulative sum ignoring nans. + count_vals = np.cumsum(~np.isnan(x)) # Number of non-nans up to each position. + return np.divide(sum_vals, count_vals, out=np.zeros_like(sum_vals), where=count_vals != 0) + + +class DetectionMetricData(abc.ABC): + """ This class holds accumulated and interpolated data required to calculate the detection metrics. """ + + nelem = 101 + + def __init__(self, + recall: np.array, + precision: np.array, + confidence: np.array, + trans_err: np.array, + vel_err: np.array, + scale_err: np.array, + orient_err: np.array, + attr_err: np.array): + + # Assert lengths. + assert len(recall) == self.nelem + assert len(precision) == self.nelem + assert len(confidence) == self.nelem + assert len(trans_err) == self.nelem + assert len(vel_err) == self.nelem + assert len(scale_err) == self.nelem + assert len(orient_err) == self.nelem + assert len(attr_err) == self.nelem + + # Assert ordering. + assert all(confidence == sorted(confidence, reverse=True)) # Confidences should be descending. + assert all(recall == sorted(recall)) # Recalls should be ascending. + + # Set attributes explicitly to help IDEs figure out what is going on. + self.recall = recall + self.precision = precision + self.confidence = confidence + self.trans_err = trans_err + self.vel_err = vel_err + self.scale_err = scale_err + self.orient_err = orient_err + self.attr_err = attr_err + + def __eq__(self, other): + eq = True + for key in self.serialize().keys(): + eq = eq and np.array_equal(getattr(self, key), getattr(other, key)) + return eq + + @property + def max_recall_ind(self): + """ Returns index of max recall achieved. """ + + # Last instance of confidence > 0 is index of max achieved recall. + non_zero = np.nonzero(self.confidence)[0] + if len(non_zero) == 0: # If there are no matches, all the confidence values will be zero. + max_recall_ind = 0 + else: + max_recall_ind = non_zero[-1] + + return max_recall_ind + + @property + def max_recall(self): + """ Returns max recall achieved. """ + + return self.recall[self.max_recall_ind] + + def serialize(self): + """ Serialize instance into json-friendly format. """ + return { + 'recall': self.recall.tolist(), + 'precision': self.precision.tolist(), + 'confidence': self.confidence.tolist(), + 'trans_err': self.trans_err.tolist(), + 'vel_err': self.vel_err.tolist(), + 'scale_err': self.scale_err.tolist(), + 'orient_err': self.orient_err.tolist(), + 'attr_err': self.attr_err.tolist(), + } + + @classmethod + def deserialize(cls, content: dict): + """ Initialize from serialized content. """ + return cls(recall=np.array(content['recall']), + precision=np.array(content['precision']), + confidence=np.array(content['confidence']), + trans_err=np.array(content['trans_err']), + vel_err=np.array(content['vel_err']), + scale_err=np.array(content['scale_err']), + orient_err=np.array(content['orient_err']), + attr_err=np.array(content['attr_err'])) + + @classmethod + def no_predictions(cls): + """ Returns a md instance corresponding to having no predictions. """ + return cls(recall=np.linspace(0, 1, cls.nelem), + precision=np.zeros(cls.nelem), + confidence=np.zeros(cls.nelem), + trans_err=np.ones(cls.nelem), + vel_err=np.ones(cls.nelem), + scale_err=np.ones(cls.nelem), + orient_err=np.ones(cls.nelem), + attr_err=np.ones(cls.nelem)) + + @classmethod + def random_md(cls): + """ Returns an md instance corresponding to a random results. """ + return cls(recall=np.linspace(0, 1, cls.nelem), + precision=np.random.random(cls.nelem), + confidence=np.linspace(0, 1, cls.nelem)[::-1], + trans_err=np.random.random(cls.nelem), + vel_err=np.random.random(cls.nelem), + scale_err=np.random.random(cls.nelem), + orient_err=np.random.random(cls.nelem), + attr_err=np.random.random(cls.nelem)) + + +class DetectionMetricDataList: + """ This stores a set of MetricData in a dict indexed by (name, match-distance). """ + + def __init__(self): + self.md = {} + + def __getitem__(self, key): + return self.md[key] + + def __eq__(self, other): + eq = True + for key in self.md.keys(): + eq = eq and self[key] == other[key] + return eq + + def get_class_data(self, detection_name: str) -> List[Tuple[DetectionMetricData, float]]: + """ Get all the MetricData entries for a certain detection_name. """ + return [(md, dist_th) for (name, dist_th), md in self.md.items() if name == detection_name] + + def get_dist_data(self, dist_th: float) -> List[Tuple[DetectionMetricData, str]]: + """ Get all the MetricData entries for a certain match_distance. """ + return [(md, detection_name) for (detection_name, dist), md in self.md.items() if dist == dist_th] + + def set(self, detection_name: str, match_distance: float, data: DetectionMetricData): + """ Sets the MetricData entry for a certain detection_name and match_distance. """ + self.md[(detection_name, match_distance)] = data + + def serialize(self) -> dict: + return {key[0] + ':' + str(key[1]): value.serialize() for key, value in self.md.items()} + + @classmethod + def deserialize(cls, content: dict): + mdl = cls() + for key, md in content.items(): + name, distance = key.split(':') + mdl.set(name, float(distance), DetectionMetricData.deserialize(md)) + return mdl + +class DetectionMetrics: + """ Stores average precision and true positive metric results. Provides properties to summarize. """ + + def __init__(self, cfg: dict): + + self.cfg = cfg + self._label_aps = defaultdict(lambda: defaultdict(float)) + self._label_tp_errors = defaultdict(lambda: defaultdict(float)) + self.eval_time = None + + def add_label_ap(self, detection_name: str, dist_th: float, ap: float) -> None: + self._label_aps[detection_name][dist_th] = ap + + def get_label_ap(self, detection_name: str, dist_th: float) -> float: + return self._label_aps[detection_name][dist_th] + + def add_label_tp(self, detection_name: str, metric_name: str, tp: float): + self._label_tp_errors[detection_name][metric_name] = tp + + def get_label_tp(self, detection_name: str, metric_name: str) -> float: + return self._label_tp_errors[detection_name][metric_name] + + def add_runtime(self, eval_time: float) -> None: + self.eval_time = eval_time + + @property + def mean_dist_aps(self) -> Dict[str, float]: + """ Calculates the mean over distance thresholds for each label. """ + return {class_name: np.mean(list(d.values())) for class_name, d in self._label_aps.items()} + + @property + def mean_ap(self) -> float: + """ Calculates the mean AP by averaging over distance thresholds and classes. """ + return float(np.mean(list(self.mean_dist_aps.values()))) + + @property + def tp_errors(self) -> Dict[str, float]: + """ Calculates the mean true positive error across all classes for each metric. """ + errors = {} + for metric_name in self.cfg['tp_metrics']: + class_errors = [] + for detection_name in self.cfg['class_names']: + class_errors.append(self.get_label_tp(detection_name, metric_name)) + + errors[metric_name] = float(np.nanmean(class_errors)) + + return errors + + @property + def tp_scores(self) -> Dict[str, float]: + scores = {} + tp_errors = self.tp_errors + for metric_name in self.cfg['tp_metrics']: + + # We convert the true positive errors to "scores" by 1-error. + score = 1.0 - tp_errors[metric_name] + + # Some of the true positive errors are unbounded, so we bound the scores to min 0. + score = max(0.0, score) + + scores[metric_name] = score + + return scores + + @property + def nd_score(self) -> float: + """ + Compute the nuScenes detection score (NDS, weighted sum of the individual scores). + :return: The NDS. + """ + # Summarize. + total = float(self.cfg['mean_ap_weight'] * self.mean_ap + np.sum(list(self.tp_scores.values()))) + + # Normalize. + total = total / float(self.cfg['mean_ap_weight'] + len(self.tp_scores.keys())) + + return total + + + def serialize(self): + return { + 'label_aps': self._label_aps, + 'mean_dist_aps': self.mean_dist_aps, + 'mean_ap': self.mean_ap, + 'label_tp_errors': self._label_tp_errors, + 'tp_errors': self.tp_errors, + 'tp_scores': self.tp_scores, + 'nd_score': self.nd_score, + 'eval_time': self.eval_time, + 'cfg': self.cfg + } + + @classmethod + def deserialize(cls, content: dict): + """ Initialize from serialized dictionary. """ + + cfg = content['cfg'] + metrics = cls(cfg=cfg) + metrics.add_runtime(content['eval_time']) + + for detection_name, label_aps in content['label_aps'].items(): + for dist_th, ap in label_aps.items(): + metrics.add_label_ap(detection_name=detection_name, dist_th=float(dist_th), ap=float(ap)) + + for detection_name, label_tps in content['label_tp_errors'].items(): + for metric_name, tp in label_tps.items(): + metrics.add_label_tp(detection_name=detection_name, metric_name=metric_name, tp=float(tp)) + + return metrics + + def __eq__(self, other): + eq = True + eq = eq and self._label_aps == other._label_aps + eq = eq and self._label_tp_errors == other._label_tp_errors + eq = eq and self.eval_time == other.eval_time + eq = eq and self.cfg == other.cfg + + return eq + + +class DetectionBox(abc.ABC): + """ Data class used during detection evaluation. Can be a prediction or ground truth.""" + + def __init__(self, + sample_token: str = "", + translation: Tuple[float, float, float] = (0, 0, 0), + size: Tuple[float, float, float] = (0, 0, 0), + rotation: Tuple[float, float, float, float] = (0, 0, 0, 0), + velocity: Tuple[float, float] = (0, 0), + ego_translation: Tuple[float, float, float] = (0, 0, 0), # Translation to ego vehicle in meters. + num_pts: int = -1, # Nbr. LIDAR or RADAR inside the box. Only for gt boxes. + detection_name: str = 'car', # The class name used in the detection challenge. + detection_score: float = -1.0, # GT samples do not have a score. + attribute_name: str = ''): # Box attribute. Each box can have at most 1 attribute. + + + assert detection_name is not None, 'Error: detection_name cannot be empty!' + # assert detection_name in DETECTION_NAMES, 'Error: Unknown detection_name %s' % detection_name + + # assert attribute_name in ATTRIBUTE_NAMES or attribute_name == '', \ + # 'Error: Unknown attribute_name %s' % attribute_name + + assert type(detection_score) == float, 'Error: detection_score must be a float!' + assert not np.any(np.isnan(detection_score)), 'Error: detection_score may not be NaN!' + self.sample_token = sample_token + self.translation = translation + self.size = size + self.rotation = rotation + self.velocity = velocity + self.ego_translation = ego_translation + self.num_pts = num_pts + self.detection_name = detection_name + self.detection_score = detection_score + self.attribute_name = attribute_name + + def __eq__(self, other): + return (self.sample_token == other.sample_token and + self.translation == other.translation and + self.size == other.size and + self.rotation == other.rotation and + self.velocity == other.velocity and + self.ego_translation == other.ego_translation and + self.num_pts == other.num_pts and + self.detection_name == other.detection_name and + self.detection_score == other.detection_score and + self.attribute_name == other.attribute_name) + + def serialize(self) -> dict: + """ Serialize instance into json-friendly format. """ + return { + 'sample_token': self.sample_token, + 'translation': self.translation, + 'size': self.size, + 'rotation': self.rotation, + 'velocity': self.velocity, + 'ego_translation': self.ego_translation, + 'num_pts': self.num_pts, + 'detection_name': self.detection_name, + 'detection_score': self.detection_score, + 'attribute_name': self.attribute_name + } + + @classmethod + def deserialize(cls, content: dict): + """ Initialize from serialized content. """ + return cls(sample_token=content['sample_token'], + translation=tuple(content['translation']), + size=tuple(content['size']), + rotation=tuple(content['rotation']), + velocity=tuple(content['velocity']), + ego_translation=(0.0, 0.0, 0.0) if 'ego_translation' not in content + else tuple(content['ego_translation']), + num_pts=-1 if 'num_pts' not in content else int(content['num_pts']), + detection_name=content['detection_name'], + detection_score=-1.0 if 'detection_score' not in content else float(content['detection_score']), + attribute_name=content['attribute_name']) + @property + def ego_dist(self) -> float: + """ Compute the distance from this box to the ego vehicle in 2D. """ + return np.sqrt(np.sum(np.array(self.ego_translation[:2]) ** 2)) + + + + + +class EvalBoxes: + """ Data class that groups EvalBox instances by sample. """ + + def __init__(self): + """ + Initializes the EvalBoxes for GT or predictions. + """ + self.boxes = defaultdict(list) + + def __repr__(self): + return "EvalBoxes with {} boxes across {} samples".format(len(self.all), len(self.sample_tokens)) + + def __getitem__(self, item) -> List[DetectionBox]: + return self.boxes[item] + + def __eq__(self, other): + if not set(self.sample_tokens) == set(other.sample_tokens): + return False + for token in self.sample_tokens: + if not len(self[token]) == len(other[token]): + return False + for box1, box2 in zip(self[token], other[token]): + if box1 != box2: + return False + return True + + def __len__(self): + return len(self.boxes) + + @property + def all(self) -> List[DetectionBox]: + """ Returns all EvalBoxes in a list. """ + ab = [] + for sample_token in self.sample_tokens: + ab.extend(self[sample_token]) + return ab + + @property + def sample_tokens(self) -> List[str]: + """ Returns a list of all keys. """ + return list(self.boxes.keys()) + + def add_boxes(self, sample_token: str, boxes: List[DetectionBox]) -> None: + """ Adds a list of boxes. """ + self.boxes[sample_token].extend(boxes) + + def serialize(self) -> dict: + """ Serialize instance into json-friendly format. """ + return {key: [box.serialize() for box in boxes] for key, boxes in self.boxes.items()} + + @classmethod + def deserialize(cls, content: dict, box_cls): + """ + Initialize from serialized content. + :param content: A dictionary with the serialized content of the box. + :param box_cls: The class of the boxes, DetectionBox or TrackingBox. + """ + eb = cls() + for sample_token, boxes in content.items(): + eb.add_boxes(sample_token, [box_cls.deserialize(box) for box in boxes]) + return eb + + +def accumulate(gt_boxes, + pred_boxes, + class_name: str, + dist_fcn: Callable, + dist_th: float, + verbose: bool = False) -> DetectionMetricData: + """ + Average Precision over predefined different recall thresholds for a single distance threshold. + The recall/conf thresholds and other raw metrics will be used in secondary metrics. + :param gt_boxes: Maps every sample_token to a list of its sample_annotations. + :param pred_boxes: Maps every sample_token to a list of its sample_results. + :param class_name: Class to compute AP on. + :param dist_fcn: Distance function used to match detections and ground truths. + :param dist_th: Distance threshold for a match. + :param verbose: If true, print debug messages. + :return: (average_prec, metrics). The average precision value and raw data for a number of metrics. + """ + # --------------------------------------------- + # Organize input and initialize accumulators. + # --------------------------------------------- + + # Count the positives. + npos = len([1 for gt_box in gt_boxes.all if gt_box.detection_name == class_name]) + if verbose: + print("Found {} GT of class {} out of {} total across {} samples.". + format(npos, class_name, len(gt_boxes.all), len(gt_boxes.sample_tokens))) + + # For missing classes in the GT, return a data structure corresponding to no predictions. + if npos == 0: + return DetectionMetricData.no_predictions() + + # Organize the predictions in a single list. + pred_boxes_list = [box for box in pred_boxes.all if box.detection_name == class_name] + pred_confs = [box.detection_score for box in pred_boxes_list] + + if verbose: + print("Found {} PRED of class {} out of {} total across {} samples.". + format(len(pred_confs), class_name, len(pred_boxes.all), len(pred_boxes.sample_tokens))) + + # Sort by confidence. + sortind = [i for (v, i) in sorted((v, i) for (i, v) in enumerate(pred_confs))][::-1] + + # Do the actual matching. + tp = [] # Accumulator of true positives. + fp = [] # Accumulator of false positives. + conf = [] # Accumulator of confidences. + + # match_data holds the extra metrics we calculate for each match. + match_data = {'trans_err': [], + 'vel_err': [], + 'scale_err': [], + 'orient_err': [], + 'attr_err': [], + 'conf': []} + + # --------------------------------------------- + # Match and accumulate match data. + # --------------------------------------------- + + taken = set() # Initially no gt bounding box is matched. + for ind in sortind: + pred_box = pred_boxes_list[ind] + min_dist = np.inf + match_gt_idx = None + + for gt_idx, gt_box in enumerate(gt_boxes[pred_box.sample_token]): + + # Find closest match among ground truth boxes + if gt_box.detection_name == class_name and not (pred_box.sample_token, gt_idx) in taken: + this_distance = dist_fcn(gt_box, pred_box) + if this_distance < min_dist: + min_dist = this_distance + match_gt_idx = gt_idx + + # If the closest match is close enough according to threshold we have a match! + is_match = min_dist < dist_th + + if is_match: + taken.add((pred_box.sample_token, match_gt_idx)) + + # Update tp, fp and confs. + tp.append(1) + fp.append(0) + conf.append(pred_box.detection_score) + + # Since it is a match, update match data also. + gt_box_match = gt_boxes[pred_box.sample_token][match_gt_idx] + + match_data['trans_err'].append(center_distance(gt_box_match, pred_box)) + match_data['vel_err'].append(velocity_l2(gt_box_match, pred_box)) + match_data['scale_err'].append(1 - scale_iou(gt_box_match, pred_box)) + + # Barrier orientation is only determined up to 180 degree. (For cones orientation is discarded later) + period = np.pi if class_name == 'barrier' else 2 * np.pi + match_data['orient_err'].append(yaw_diff(gt_box_match, pred_box, period=period)) + + match_data['attr_err'].append(1 - attr_acc(gt_box_match, pred_box)) + match_data['conf'].append(pred_box.detection_score) + + else: + # No match. Mark this as a false positive. + tp.append(0) + fp.append(1) + conf.append(pred_box.detection_score) + + # Check if we have any matches. If not, just return a "no predictions" array. + if len(match_data['trans_err']) == 0: + return DetectionMetricData.no_predictions() + + # --------------------------------------------- + # Calculate and interpolate precision and recall + # --------------------------------------------- + + # Accumulate. + tp = np.cumsum(tp).astype(float) + fp = np.cumsum(fp).astype(float) + conf = np.array(conf) + + # Calculate precision and recall. + prec = tp / (fp + tp) + rec = tp / float(npos) + + rec_interp = np.linspace(0, 1, DetectionMetricData.nelem) # 101 steps, from 0% to 100% recall. + prec = np.interp(rec_interp, rec, prec, right=0) + conf = np.interp(rec_interp, rec, conf, right=0) + rec = rec_interp + + # --------------------------------------------- + # Re-sample the match-data to match, prec, recall and conf. + # --------------------------------------------- + + for key in match_data.keys(): + if key == "conf": + continue # Confidence is used as reference to align with fp and tp. So skip in this step. + + else: + # For each match_data, we first calculate the accumulated mean. + tmp = cummean(np.array(match_data[key])) + + # Then interpolate based on the confidences. (Note reversing since np.interp needs increasing arrays) + match_data[key] = np.interp(conf[::-1], match_data['conf'][::-1], tmp[::-1])[::-1] + + # --------------------------------------------- + # Done. Instantiate MetricData and return + # --------------------------------------------- + return DetectionMetricData(recall=rec, + precision=prec, + confidence=conf, + trans_err=match_data['trans_err'], + vel_err=match_data['vel_err'], + scale_err=match_data['scale_err'], + orient_err=match_data['orient_err'], + attr_err=match_data['attr_err']) + + + +def calc_ap(md: DetectionMetricData, min_recall: float, min_precision: float) -> float: + """ Calculated average precision. """ + + assert 0 <= min_precision < 1 + assert 0 <= min_recall <= 1 + + prec = np.copy(md.precision) + prec = prec[round(100 * min_recall) + 1:] # Clip low recalls. +1 to exclude the min recall bin. + prec -= min_precision # Clip low precision + prec[prec < 0] = 0 + return float(np.mean(prec)) / (1.0 - min_precision) + + +def calc_tp(md: DetectionMetricData, min_recall: float, metric_name: str) -> float: + """ Calculates true positive errors. """ + + first_ind = round(100 * min_recall) + 1 # +1 to exclude the error at min recall. + last_ind = md.max_recall_ind # First instance of confidence = 0 is index of max achieved recall. + if last_ind < first_ind: + return 1.0 # Assign 1 here. If this happens for all classes, the score for that TP metric will be 0. + else: + return float(np.mean(getattr(md, metric_name)[first_ind: last_ind + 1])) # +1 to include error at max recall. + + +def quaternion_yaw(q: Quaternion) -> float: + """ + Calculate the yaw angle from a quaternion. + Note that this only works for a quaternion that represents a box in lidar or global coordinate frame. + It does not work for a box in the camera frame. + :param q: Quaternion of interest. + :return: Yaw angle in radians. + """ + + # Project into xy plane. + v = np.dot(q.rotation_matrix, np.array([1, 0, 0])) + + # Measure yaw using arctan. + yaw = np.arctan2(v[1], v[0]) + + return yaw \ No newline at end of file diff --git a/mmcv/datasets/nuscenes_vad_dataset.py b/mmcv/datasets/nuscenes_vad_dataset.py new file mode 100644 index 0000000..a552afb --- /dev/null +++ b/mmcv/datasets/nuscenes_vad_dataset.py @@ -0,0 +1,1933 @@ +import os +import json +import copy +import tempfile +from typing import Dict, List +from mmcv.fileio.io import dump,load +import numpy as np +from .builder import DATASETS +from mmcv.datasets import NuScenesDataset +import pyquaternion +import mmcv +from os import path as osp +import torch +import numpy as np +from nuscenes.eval.common.utils import quaternion_yaw, Quaternion +from .vad_custom_nuscenes_eval import NuScenesEval_custom +from nuscenes.eval.common.utils import center_distance +from mmcv.utils.visual import save_tensor +from mmcv.parallel import DataContainer as DC +import random +from mmcv.core.bbox.structures.lidar_box3d import LiDARInstance3DBoxes +from nuscenes.utils.data_classes import Box as NuScenesBox +from mmcv.core.bbox.structures.nuscenes_box import CustomNuscenesBox +from shapely import affinity, ops +from shapely.geometry import LineString, box, MultiPolygon, MultiLineString +from mmcv.datasets.pipelines import to_tensor +from nuscenes.map_expansion.map_api import NuScenesMap, NuScenesMapExplorer +from nuscenes.eval.detection.constants import DETECTION_NAMES + + +class LiDARInstanceLines(object): + """Line instance in LIDAR coordinates + + """ + def __init__(self, + instance_line_list, + sample_dist=1, + num_samples=250, + padding=False, + fixed_num=-1, + padding_value=-10000, + patch_size=None): + assert isinstance(instance_line_list, list) + assert patch_size is not None + if len(instance_line_list) != 0: + assert isinstance(instance_line_list[0], LineString) + self.patch_size = patch_size + self.max_x = self.patch_size[1] / 2 + self.max_y = self.patch_size[0] / 2 + self.sample_dist = sample_dist + self.num_samples = num_samples + self.padding = padding + self.fixed_num = fixed_num + self.padding_value = padding_value + + self.instance_list = instance_line_list + + @property + def start_end_points(self): + """ + return torch.Tensor([N,4]), in xstart, ystart, xend, yend form + """ + assert len(self.instance_list) != 0 + instance_se_points_list = [] + for instance in self.instance_list: + se_points = [] + se_points.extend(instance.coords[0]) + se_points.extend(instance.coords[-1]) + instance_se_points_list.append(se_points) + instance_se_points_array = np.array(instance_se_points_list) + instance_se_points_tensor = to_tensor(instance_se_points_array) + instance_se_points_tensor = instance_se_points_tensor.to( + dtype=torch.float32) + instance_se_points_tensor[:,0] = torch.clamp(instance_se_points_tensor[:,0], min=-self.max_x,max=self.max_x) + instance_se_points_tensor[:,1] = torch.clamp(instance_se_points_tensor[:,1], min=-self.max_y,max=self.max_y) + instance_se_points_tensor[:,2] = torch.clamp(instance_se_points_tensor[:,2], min=-self.max_x,max=self.max_x) + instance_se_points_tensor[:,3] = torch.clamp(instance_se_points_tensor[:,3], min=-self.max_y,max=self.max_y) + return instance_se_points_tensor + + @property + def bbox(self): + """ + return torch.Tensor([N,4]), in xmin, ymin, xmax, ymax form + """ + assert len(self.instance_list) != 0 + instance_bbox_list = [] + for instance in self.instance_list: + # bounds is bbox: [xmin, ymin, xmax, ymax] + instance_bbox_list.append(instance.bounds) + instance_bbox_array = np.array(instance_bbox_list) + instance_bbox_tensor = to_tensor(instance_bbox_array) + instance_bbox_tensor = instance_bbox_tensor.to( + dtype=torch.float32) + instance_bbox_tensor[:,0] = torch.clamp(instance_bbox_tensor[:,0], min=-self.max_x,max=self.max_x) + instance_bbox_tensor[:,1] = torch.clamp(instance_bbox_tensor[:,1], min=-self.max_y,max=self.max_y) + instance_bbox_tensor[:,2] = torch.clamp(instance_bbox_tensor[:,2], min=-self.max_x,max=self.max_x) + instance_bbox_tensor[:,3] = torch.clamp(instance_bbox_tensor[:,3], min=-self.max_y,max=self.max_y) + return instance_bbox_tensor + + @property + def fixed_num_sampled_points(self): + """ + return torch.Tensor([N,fixed_num,2]), in xmin, ymin, xmax, ymax form + N means the num of instances + """ + assert len(self.instance_list) != 0 + instance_points_list = [] + for instance in self.instance_list: + distances = np.linspace(0, instance.length, self.fixed_num) + sampled_points = np.array([list(instance.interpolate(distance).coords) for distance in distances]).reshape(-1, 2) + instance_points_list.append(sampled_points) + instance_points_array = np.array(instance_points_list) + instance_points_tensor = to_tensor(instance_points_array) + instance_points_tensor = instance_points_tensor.to( + dtype=torch.float32) + instance_points_tensor[:,:,0] = torch.clamp(instance_points_tensor[:,:,0], min=-self.max_x,max=self.max_x) + instance_points_tensor[:,:,1] = torch.clamp(instance_points_tensor[:,:,1], min=-self.max_y,max=self.max_y) + return instance_points_tensor + + @property + def fixed_num_sampled_points_ambiguity(self): + """ + return torch.Tensor([N,fixed_num,2]), in xmin, ymin, xmax, ymax form + N means the num of instances + """ + assert len(self.instance_list) != 0 + instance_points_list = [] + for instance in self.instance_list: + distances = np.linspace(0, instance.length, self.fixed_num) + sampled_points = np.array([list(instance.interpolate(distance).coords) for distance in distances]).reshape(-1, 2) + instance_points_list.append(sampled_points) + instance_points_array = np.array(instance_points_list) + instance_points_tensor = to_tensor(instance_points_array) + instance_points_tensor = instance_points_tensor.to( + dtype=torch.float32) + instance_points_tensor[:,:,0] = torch.clamp(instance_points_tensor[:,:,0], min=-self.max_x,max=self.max_x) + instance_points_tensor[:,:,1] = torch.clamp(instance_points_tensor[:,:,1], min=-self.max_y,max=self.max_y) + instance_points_tensor = instance_points_tensor.unsqueeze(1) + return instance_points_tensor + + @property + def fixed_num_sampled_points_torch(self): + """ + return torch.Tensor([N,fixed_num,2]), in xmin, ymin, xmax, ymax form + N means the num of instances + """ + assert len(self.instance_list) != 0 + instance_points_list = [] + for instance in self.instance_list: + # distances = np.linspace(0, instance.length, self.fixed_num) + # sampled_points = np.array([list(instance.interpolate(distance).coords) for distance in distances]).reshape(-1, 2) + poly_pts = to_tensor(np.array(list(instance.coords))) + poly_pts = poly_pts.unsqueeze(0).permute(0,2,1) + sampled_pts = torch.nn.functional.interpolate(poly_pts,size=(self.fixed_num),mode='linear',align_corners=True) + sampled_pts = sampled_pts.permute(0,2,1).squeeze(0) + instance_points_list.append(sampled_pts) + # instance_points_array = np.array(instance_points_list) + # instance_points_tensor = to_tensor(instance_points_array) + instance_points_tensor = torch.stack(instance_points_list,dim=0) + instance_points_tensor = instance_points_tensor.to( + dtype=torch.float32) + instance_points_tensor[:,:,0] = torch.clamp(instance_points_tensor[:,:,0], min=-self.max_x,max=self.max_x) + instance_points_tensor[:,:,1] = torch.clamp(instance_points_tensor[:,:,1], min=-self.max_y,max=self.max_y) + return instance_points_tensor + + @property + def shift_fixed_num_sampled_points(self): + """ + return [instances_num, num_shifts, fixed_num, 2] + """ + fixed_num_sampled_points = self.fixed_num_sampled_points + instances_list = [] + is_poly = False + # is_line = False + # import pdb;pdb.set_trace() + for fixed_num_pts in fixed_num_sampled_points: + # [fixed_num, 2] + is_poly = fixed_num_pts[0].equal(fixed_num_pts[-1]) + fixed_num = fixed_num_pts.shape[0] + shift_pts_list = [] + if is_poly: + # import pdb;pdb.set_trace() + for shift_right_i in range(fixed_num): + shift_pts_list.append(fixed_num_pts.roll(shift_right_i,0)) + else: + shift_pts_list.append(fixed_num_pts) + shift_pts_list.append(fixed_num_pts.flip(0)) + shift_pts = torch.stack(shift_pts_list,dim=0) + + shift_pts[:,:,0] = torch.clamp(shift_pts[:,:,0], min=-self.max_x,max=self.max_x) + shift_pts[:,:,1] = torch.clamp(shift_pts[:,:,1], min=-self.max_y,max=self.max_y) + + if not is_poly: + padding = torch.full([fixed_num-shift_pts.shape[0],fixed_num,2], self.padding_value) + shift_pts = torch.cat([shift_pts,padding],dim=0) + # padding = np.zeros((self.num_samples - len(sampled_points), 2)) + # sampled_points = np.concatenate([sampled_points, padding], axis=0) + instances_list.append(shift_pts) + instances_tensor = torch.stack(instances_list, dim=0) + instances_tensor = instances_tensor.to( + dtype=torch.float32) + return instances_tensor + + @property + def shift_fixed_num_sampled_points_v1(self): + """ + return [instances_num, num_shifts, fixed_num, 2] + """ + fixed_num_sampled_points = self.fixed_num_sampled_points + instances_list = [] + is_poly = False + # is_line = False + # import pdb;pdb.set_trace() + for fixed_num_pts in fixed_num_sampled_points: + # [fixed_num, 2] + is_poly = fixed_num_pts[0].equal(fixed_num_pts[-1]) + pts_num = fixed_num_pts.shape[0] + shift_num = pts_num - 1 + if is_poly: + pts_to_shift = fixed_num_pts[:-1,:] + shift_pts_list = [] + if is_poly: + for shift_right_i in range(shift_num): + shift_pts_list.append(pts_to_shift.roll(shift_right_i,0)) + else: + shift_pts_list.append(fixed_num_pts) + shift_pts_list.append(fixed_num_pts.flip(0)) + shift_pts = torch.stack(shift_pts_list,dim=0) + + if is_poly: + _, _, num_coords = shift_pts.shape + tmp_shift_pts = shift_pts.new_zeros((shift_num, pts_num, num_coords)) + tmp_shift_pts[:,:-1,:] = shift_pts + tmp_shift_pts[:,-1,:] = shift_pts[:,0,:] + shift_pts = tmp_shift_pts + + shift_pts[:,:,0] = torch.clamp(shift_pts[:,:,0], min=-self.max_x,max=self.max_x) + shift_pts[:,:,1] = torch.clamp(shift_pts[:,:,1], min=-self.max_y,max=self.max_y) + + if not is_poly: + padding = torch.full([shift_num-shift_pts.shape[0],pts_num,2], self.padding_value) + shift_pts = torch.cat([shift_pts,padding],dim=0) + # padding = np.zeros((self.num_samples - len(sampled_points), 2)) + # sampled_points = np.concatenate([sampled_points, padding], axis=0) + instances_list.append(shift_pts) + instances_tensor = torch.stack(instances_list, dim=0) + instances_tensor = instances_tensor.to( + dtype=torch.float32) + return instances_tensor + + @property + def shift_fixed_num_sampled_points_v2(self): + """ + return [instances_num, num_shifts, fixed_num, 2] + """ + assert len(self.instance_list) != 0 + instances_list = [] + for instance in self.instance_list: + distances = np.linspace(0, instance.length, self.fixed_num) + poly_pts = np.array(list(instance.coords)) + start_pts = poly_pts[0] + end_pts = poly_pts[-1] + is_poly = np.equal(start_pts, end_pts) + is_poly = is_poly.all() + shift_pts_list = [] + pts_num, coords_num = poly_pts.shape + shift_num = pts_num - 1 + final_shift_num = self.fixed_num - 1 + if is_poly: + pts_to_shift = poly_pts[:-1,:] + for shift_right_i in range(shift_num): + shift_pts = np.roll(pts_to_shift,shift_right_i,axis=0) + pts_to_concat = shift_pts[0] + pts_to_concat = np.expand_dims(pts_to_concat,axis=0) + shift_pts = np.concatenate((shift_pts,pts_to_concat),axis=0) + shift_instance = LineString(shift_pts) + shift_sampled_points = np.array([list(shift_instance.interpolate(distance).coords) for distance in distances]).reshape(-1, 2) + shift_pts_list.append(shift_sampled_points) + # import pdb;pdb.set_trace() + else: + sampled_points = np.array([list(instance.interpolate(distance).coords) for distance in distances]).reshape(-1, 2) + flip_sampled_points = np.flip(sampled_points, axis=0) + shift_pts_list.append(sampled_points) + shift_pts_list.append(flip_sampled_points) + + multi_shifts_pts = np.stack(shift_pts_list,axis=0) + shifts_num,_,_ = multi_shifts_pts.shape + + if shifts_num > final_shift_num: + index = np.random.choice(multi_shifts_pts.shape[0], final_shift_num, replace=False) + multi_shifts_pts = multi_shifts_pts[index] + + multi_shifts_pts_tensor = to_tensor(multi_shifts_pts) + multi_shifts_pts_tensor = multi_shifts_pts_tensor.to( + dtype=torch.float32) + + multi_shifts_pts_tensor[:,:,0] = torch.clamp(multi_shifts_pts_tensor[:,:,0], min=-self.max_x,max=self.max_x) + multi_shifts_pts_tensor[:,:,1] = torch.clamp(multi_shifts_pts_tensor[:,:,1], min=-self.max_y,max=self.max_y) + # if not is_poly: + if multi_shifts_pts_tensor.shape[0] < final_shift_num: + padding = torch.full([final_shift_num-multi_shifts_pts_tensor.shape[0],self.fixed_num,2], self.padding_value) + multi_shifts_pts_tensor = torch.cat([multi_shifts_pts_tensor,padding],dim=0) + instances_list.append(multi_shifts_pts_tensor) + instances_tensor = torch.stack(instances_list, dim=0) + instances_tensor = instances_tensor.to( + dtype=torch.float32) + return instances_tensor + + @property + def shift_fixed_num_sampled_points_v3(self): + """ + return [instances_num, num_shifts, fixed_num, 2] + """ + assert len(self.instance_list) != 0 + instances_list = [] + for instance in self.instance_list: + distances = np.linspace(0, instance.length, self.fixed_num) + poly_pts = np.array(list(instance.coords)) + start_pts = poly_pts[0] + end_pts = poly_pts[-1] + is_poly = np.equal(start_pts, end_pts) + is_poly = is_poly.all() + shift_pts_list = [] + pts_num, coords_num = poly_pts.shape + shift_num = pts_num - 1 + final_shift_num = self.fixed_num - 1 + if is_poly: + pts_to_shift = poly_pts[:-1,:] + for shift_right_i in range(shift_num): + shift_pts = np.roll(pts_to_shift,shift_right_i,axis=0) + pts_to_concat = shift_pts[0] + pts_to_concat = np.expand_dims(pts_to_concat,axis=0) + shift_pts = np.concatenate((shift_pts,pts_to_concat),axis=0) + shift_instance = LineString(shift_pts) + shift_sampled_points = np.array([list(shift_instance.interpolate(distance).coords) for distance in distances]).reshape(-1, 2) + shift_pts_list.append(shift_sampled_points) + flip_pts_to_shift = np.flip(pts_to_shift, axis=0) + for shift_right_i in range(shift_num): + shift_pts = np.roll(flip_pts_to_shift,shift_right_i,axis=0) + pts_to_concat = shift_pts[0] + pts_to_concat = np.expand_dims(pts_to_concat,axis=0) + shift_pts = np.concatenate((shift_pts,pts_to_concat),axis=0) + shift_instance = LineString(shift_pts) + shift_sampled_points = np.array([list(shift_instance.interpolate(distance).coords) for distance in distances]).reshape(-1, 2) + shift_pts_list.append(shift_sampled_points) + # import pdb;pdb.set_trace() + else: + sampled_points = np.array([list(instance.interpolate(distance).coords) for distance in distances]).reshape(-1, 2) + flip_sampled_points = np.flip(sampled_points, axis=0) + shift_pts_list.append(sampled_points) + shift_pts_list.append(flip_sampled_points) + + multi_shifts_pts = np.stack(shift_pts_list,axis=0) + shifts_num,_,_ = multi_shifts_pts.shape + # import pdb;pdb.set_trace() + if shifts_num > 2*final_shift_num: + index = np.random.choice(shift_num, final_shift_num, replace=False) + flip0_shifts_pts = multi_shifts_pts[index] + flip1_shifts_pts = multi_shifts_pts[index+shift_num] + multi_shifts_pts = np.concatenate((flip0_shifts_pts,flip1_shifts_pts),axis=0) + + multi_shifts_pts_tensor = to_tensor(multi_shifts_pts) + multi_shifts_pts_tensor = multi_shifts_pts_tensor.to( + dtype=torch.float32) + + multi_shifts_pts_tensor[:,:,0] = torch.clamp(multi_shifts_pts_tensor[:,:,0], min=-self.max_x,max=self.max_x) + multi_shifts_pts_tensor[:,:,1] = torch.clamp(multi_shifts_pts_tensor[:,:,1], min=-self.max_y,max=self.max_y) + # if not is_poly: + if multi_shifts_pts_tensor.shape[0] < 2*final_shift_num: + padding = torch.full([final_shift_num*2-multi_shifts_pts_tensor.shape[0],self.fixed_num,2], self.padding_value) + multi_shifts_pts_tensor = torch.cat([multi_shifts_pts_tensor,padding],dim=0) + instances_list.append(multi_shifts_pts_tensor) + instances_tensor = torch.stack(instances_list, dim=0) + instances_tensor = instances_tensor.to( + dtype=torch.float32) + return instances_tensor + + @property + def shift_fixed_num_sampled_points_v4(self): + """ + return [instances_num, num_shifts, fixed_num, 2] + """ + fixed_num_sampled_points = self.fixed_num_sampled_points + instances_list = [] + is_poly = False + # is_line = False + # import pdb;pdb.set_trace() + for fixed_num_pts in fixed_num_sampled_points: + # [fixed_num, 2] + is_poly = fixed_num_pts[0].equal(fixed_num_pts[-1]) + pts_num = fixed_num_pts.shape[0] + shift_num = pts_num - 1 + shift_pts_list = [] + if is_poly: + pts_to_shift = fixed_num_pts[:-1,:] + for shift_right_i in range(shift_num): + shift_pts_list.append(pts_to_shift.roll(shift_right_i,0)) + flip_pts_to_shift = pts_to_shift.flip(0) + for shift_right_i in range(shift_num): + shift_pts_list.append(flip_pts_to_shift.roll(shift_right_i,0)) + else: + shift_pts_list.append(fixed_num_pts) + shift_pts_list.append(fixed_num_pts.flip(0)) + shift_pts = torch.stack(shift_pts_list,dim=0) + + if is_poly: + _, _, num_coords = shift_pts.shape + tmp_shift_pts = shift_pts.new_zeros((shift_num*2, pts_num, num_coords)) + tmp_shift_pts[:,:-1,:] = shift_pts + tmp_shift_pts[:,-1,:] = shift_pts[:,0,:] + shift_pts = tmp_shift_pts + + shift_pts[:,:,0] = torch.clamp(shift_pts[:,:,0], min=-self.max_x,max=self.max_x) + shift_pts[:,:,1] = torch.clamp(shift_pts[:,:,1], min=-self.max_y,max=self.max_y) + + if not is_poly: + padding = torch.full([shift_num*2-shift_pts.shape[0],pts_num,2], self.padding_value) + shift_pts = torch.cat([shift_pts,padding],dim=0) + # padding = np.zeros((self.num_samples - len(sampled_points), 2)) + # sampled_points = np.concatenate([sampled_points, padding], axis=0) + instances_list.append(shift_pts) + instances_tensor = torch.stack(instances_list, dim=0) + instances_tensor = instances_tensor.to( + dtype=torch.float32) + return instances_tensor + + @property + def shift_fixed_num_sampled_points_torch(self): + """ + return [instances_num, num_shifts, fixed_num, 2] + """ + fixed_num_sampled_points = self.fixed_num_sampled_points_torch + instances_list = [] + is_poly = False + # is_line = False + # import pdb;pdb.set_trace() + for fixed_num_pts in fixed_num_sampled_points: + # [fixed_num, 2] + is_poly = fixed_num_pts[0].equal(fixed_num_pts[-1]) + fixed_num = fixed_num_pts.shape[0] + shift_pts_list = [] + if is_poly: + # import pdb;pdb.set_trace() + for shift_right_i in range(fixed_num): + shift_pts_list.append(fixed_num_pts.roll(shift_right_i,0)) + else: + shift_pts_list.append(fixed_num_pts) + shift_pts_list.append(fixed_num_pts.flip(0)) + shift_pts = torch.stack(shift_pts_list,dim=0) + + shift_pts[:,:,0] = torch.clamp(shift_pts[:,:,0], min=-self.max_x,max=self.max_x) + shift_pts[:,:,1] = torch.clamp(shift_pts[:,:,1], min=-self.max_y,max=self.max_y) + + if not is_poly: + padding = torch.full([fixed_num-shift_pts.shape[0],fixed_num,2], self.padding_value) + shift_pts = torch.cat([shift_pts,padding],dim=0) + # padding = np.zeros((self.num_samples - len(sampled_points), 2)) + # sampled_points = np.concatenate([sampled_points, padding], axis=0) + instances_list.append(shift_pts) + instances_tensor = torch.stack(instances_list, dim=0) + instances_tensor = instances_tensor.to( + dtype=torch.float32) + return instances_tensor + + # @property + # def polyline_points(self): + # """ + # return [[x0,y0],[x1,y1],...] + # """ + # assert len(self.instance_list) != 0 + # for instance in self.instance_list: + + +class VectorizedLocalMap(object): + CLASS2LABEL = { + 'road_divider': 0, + 'lane_divider': 0, + 'ped_crossing': 1, + 'contours': 2, + 'others': -1 + } + def __init__(self, + dataroot, + patch_size, + map_classes=['divider','ped_crossing','boundary'], + line_classes=['road_divider', 'lane_divider'], + ped_crossing_classes=['ped_crossing'], + contour_classes=['road_segment', 'lane'], + sample_dist=1, + num_samples=250, + padding=False, + fixed_ptsnum_per_line=-1, + padding_value=-10000,): + ''' + Args: + fixed_ptsnum_per_line = -1 : no fixed num + ''' + super().__init__() + self.data_root = dataroot + self.MAPS = ['boston-seaport', 'singapore-hollandvillage', + 'singapore-onenorth', 'singapore-queenstown'] + self.vec_classes = map_classes + self.line_classes = line_classes + self.ped_crossing_classes = ped_crossing_classes + self.polygon_classes = contour_classes + self.nusc_maps = {} + self.map_explorer = {} + for loc in self.MAPS: + self.nusc_maps[loc] = NuScenesMap(dataroot=self.data_root, map_name=loc) + self.map_explorer[loc] = NuScenesMapExplorer(self.nusc_maps[loc]) + + self.patch_size = patch_size + self.sample_dist = sample_dist + self.num_samples = num_samples + self.padding = padding + self.fixed_num = fixed_ptsnum_per_line + self.padding_value = padding_value + + def gen_vectorized_samples(self, location, lidar2global_translation, lidar2global_rotation): + ''' + use lidar2global to get gt map layers + ''' + + map_pose = lidar2global_translation[:2] + rotation = Quaternion(lidar2global_rotation) + + patch_box = (map_pose[0], map_pose[1], self.patch_size[0], self.patch_size[1]) + patch_angle = quaternion_yaw(rotation) / np.pi * 180 + # import pdb;pdb.set_trace() + vectors = [] + for vec_class in self.vec_classes: + if vec_class == 'divider': + line_geom = self.get_map_geom(patch_box, patch_angle, self.line_classes, location) + line_instances_dict = self.line_geoms_to_instances(line_geom) + for line_type, instances in line_instances_dict.items(): + for instance in instances: + vectors.append((instance, self.CLASS2LABEL.get(line_type, -1))) + elif vec_class == 'ped_crossing': + ped_geom = self.get_map_geom(patch_box, patch_angle, self.ped_crossing_classes, location) + # ped_vector_list = self.ped_geoms_to_vectors(ped_geom) + ped_instance_list = self.ped_poly_geoms_to_instances(ped_geom) + # import pdb;pdb.set_trace() + for instance in ped_instance_list: + vectors.append((instance, self.CLASS2LABEL.get('ped_crossing', -1))) + elif vec_class == 'boundary': + polygon_geom = self.get_map_geom(patch_box, patch_angle, self.polygon_classes, location) + # import pdb;pdb.set_trace() + poly_bound_list = self.poly_geoms_to_instances(polygon_geom) + # import pdb;pdb.set_trace() + for contour in poly_bound_list: + vectors.append((contour, self.CLASS2LABEL.get('contours', -1))) + else: + raise ValueError(f'WRONG vec_class: {vec_class}') + + # filter out -1 + filtered_vectors = [] + gt_pts_loc_3d = [] + gt_pts_num_3d = [] + gt_labels = [] + gt_instance = [] + for instance, type in vectors: + if type != -1: + gt_instance.append(instance) + gt_labels.append(type) + + gt_instance = LiDARInstanceLines(gt_instance,self.sample_dist, + self.num_samples, self.padding, self.fixed_num,self.padding_value, patch_size=self.patch_size) + + anns_results = dict( + gt_vecs_pts_loc=gt_instance, + gt_vecs_label=gt_labels, + + ) + # import pdb;pdb.set_trace() + return anns_results + + def get_map_geom(self, patch_box, patch_angle, layer_names, location): + map_geom = [] + for layer_name in layer_names: + if layer_name in self.line_classes: + # import pdb;pdb.set_trace() + geoms = self.get_divider_line(patch_box, patch_angle, layer_name, location) + # import pdb;pdb.set_trace() + # geoms = self.map_explorer[location]._get_layer_line(patch_box, patch_angle, layer_name) + map_geom.append((layer_name, geoms)) + elif layer_name in self.polygon_classes: + geoms = self.get_contour_line(patch_box, patch_angle, layer_name, location) + # geoms = self.map_explorer[location]._get_layer_polygon(patch_box, patch_angle, layer_name) + map_geom.append((layer_name, geoms)) + elif layer_name in self.ped_crossing_classes: + geoms = self.get_ped_crossing_line(patch_box, patch_angle, location) + # geoms = self.map_explorer[location]._get_layer_polygon(patch_box, patch_angle, layer_name) + map_geom.append((layer_name, geoms)) + return map_geom + + def _one_type_line_geom_to_vectors(self, line_geom): + line_vectors = [] + + for line in line_geom: + if not line.is_empty: + if line.geom_type == 'MultiLineString': + for single_line in line.geoms: + line_vectors.append(self.sample_pts_from_line(single_line)) + elif line.geom_type == 'LineString': + line_vectors.append(self.sample_pts_from_line(line)) + else: + raise NotImplementedError + return line_vectors + + def _one_type_line_geom_to_instances(self, line_geom): + line_instances = [] + + for line in line_geom: + if not line.is_empty: + if line.geom_type == 'MultiLineString': + for single_line in line.geoms: + line_instances.append(single_line) + elif line.geom_type == 'LineString': + line_instances.append(line) + else: + raise NotImplementedError + return line_instances + + def poly_geoms_to_vectors(self, polygon_geom): + roads = polygon_geom[0][1] + lanes = polygon_geom[1][1] + union_roads = ops.unary_union(roads) + union_lanes = ops.unary_union(lanes) + union_segments = ops.unary_union([union_roads, union_lanes]) + max_x = self.patch_size[1] / 2 + max_y = self.patch_size[0] / 2 + local_patch = box(-max_x + 0.2, -max_y + 0.2, max_x - 0.2, max_y - 0.2) + exteriors = [] + interiors = [] + if union_segments.geom_type != 'MultiPolygon': + union_segments = MultiPolygon([union_segments]) + for poly in union_segments.geoms: + exteriors.append(poly.exterior) + for inter in poly.interiors: + interiors.append(inter) + + results = [] + for ext in exteriors: + if ext.is_ccw: + ext.coords = list(ext.coords)[::-1] + lines = ext.intersection(local_patch) + if isinstance(lines, MultiLineString): + lines = ops.linemerge(lines) + results.append(lines) + + for inter in interiors: + if not inter.is_ccw: + inter.coords = list(inter.coords)[::-1] + lines = inter.intersection(local_patch) + if isinstance(lines, MultiLineString): + lines = ops.linemerge(lines) + results.append(lines) + + return self._one_type_line_geom_to_vectors(results) + + def ped_poly_geoms_to_instances(self, ped_geom): + # import pdb;pdb.set_trace() + ped = ped_geom[0][1] + union_segments = ops.unary_union(ped) + max_x = self.patch_size[1] / 2 + max_y = self.patch_size[0] / 2 + # local_patch = box(-max_x + 0.2, -max_y + 0.2, max_x - 0.2, max_y - 0.2) + local_patch = box(-max_x - 0.2, -max_y - 0.2, max_x + 0.2, max_y + 0.2) + exteriors = [] + interiors = [] + if union_segments.geom_type != 'MultiPolygon': + union_segments = MultiPolygon([union_segments]) + for poly in union_segments.geoms: + exteriors.append(poly.exterior) + for inter in poly.interiors: + interiors.append(inter) + + results = [] + for ext in exteriors: + if ext.is_ccw: + ext.coords = list(ext.coords)[::-1] + lines = ext.intersection(local_patch) + if isinstance(lines, MultiLineString): + lines = ops.linemerge(lines) + results.append(lines) + + for inter in interiors: + if not inter.is_ccw: + inter.coords = list(inter.coords)[::-1] + lines = inter.intersection(local_patch) + if isinstance(lines, MultiLineString): + lines = ops.linemerge(lines) + results.append(lines) + + return self._one_type_line_geom_to_instances(results) + + + def poly_geoms_to_instances(self, polygon_geom): + roads = polygon_geom[0][1] + lanes = polygon_geom[1][1] + union_roads = ops.unary_union(roads) + union_lanes = ops.unary_union(lanes) + union_segments = ops.unary_union([union_roads, union_lanes]) + max_x = self.patch_size[1] / 2 + max_y = self.patch_size[0] / 2 + local_patch = box(-max_x + 0.2, -max_y + 0.2, max_x - 0.2, max_y - 0.2) + exteriors = [] + interiors = [] + if union_segments.geom_type != 'MultiPolygon': + union_segments = MultiPolygon([union_segments]) + for poly in union_segments.geoms: + exteriors.append(poly.exterior) + for inter in poly.interiors: + interiors.append(inter) + + results = [] + for ext in exteriors: + if ext.is_ccw: + ext.coords = list(ext.coords)[::-1] + lines = ext.intersection(local_patch) + if isinstance(lines, MultiLineString): + lines = ops.linemerge(lines) + results.append(lines) + + for inter in interiors: + if not inter.is_ccw: + inter.coords = list(inter.coords)[::-1] + lines = inter.intersection(local_patch) + if isinstance(lines, MultiLineString): + lines = ops.linemerge(lines) + results.append(lines) + + return self._one_type_line_geom_to_instances(results) + + def line_geoms_to_vectors(self, line_geom): + line_vectors_dict = dict() + for line_type, a_type_of_lines in line_geom: + one_type_vectors = self._one_type_line_geom_to_vectors(a_type_of_lines) + line_vectors_dict[line_type] = one_type_vectors + + return line_vectors_dict + def line_geoms_to_instances(self, line_geom): + line_instances_dict = dict() + for line_type, a_type_of_lines in line_geom: + one_type_instances = self._one_type_line_geom_to_instances(a_type_of_lines) + line_instances_dict[line_type] = one_type_instances + + return line_instances_dict + + def ped_geoms_to_vectors(self, ped_geom): + ped_geom = ped_geom[0][1] + union_ped = ops.unary_union(ped_geom) + if union_ped.geom_type != 'MultiPolygon': + union_ped = MultiPolygon([union_ped]) + + max_x = self.patch_size[1] / 2 + max_y = self.patch_size[0] / 2 + local_patch = box(-max_x + 0.2, -max_y + 0.2, max_x - 0.2, max_y - 0.2) + results = [] + for ped_poly in union_ped: + # rect = ped_poly.minimum_rotated_rectangle + ext = ped_poly.exterior + if not ext.is_ccw: + ext.coords = list(ext.coords)[::-1] + lines = ext.intersection(local_patch) + results.append(lines) + + return self._one_type_line_geom_to_vectors(results) + + def get_contour_line(self,patch_box,patch_angle,layer_name,location): + if layer_name not in self.map_explorer[location].map_api.non_geometric_polygon_layers: + raise ValueError('{} is not a polygonal layer'.format(layer_name)) + + patch_x = patch_box[0] + patch_y = patch_box[1] + + patch = self.map_explorer[location].get_patch_coord(patch_box, patch_angle) + + records = getattr(self.map_explorer[location].map_api, layer_name) + + polygon_list = [] + if layer_name == 'drivable_area': + for record in records: + polygons = [self.map_explorer[location].map_api.extract_polygon(polygon_token) for polygon_token in record['polygon_tokens']] + + for polygon in polygons: + new_polygon = polygon.intersection(patch) + if not new_polygon.is_empty: + new_polygon = affinity.rotate(new_polygon, -patch_angle, + origin=(patch_x, patch_y), use_radians=False) + new_polygon = affinity.affine_transform(new_polygon, + [1.0, 0.0, 0.0, 1.0, -patch_x, -patch_y]) + if new_polygon.geom_type == 'Polygon': + new_polygon = MultiPolygon([new_polygon]) + polygon_list.append(new_polygon) + + else: + for record in records: + polygon = self.map_explorer[location].map_api.extract_polygon(record['polygon_token']) + + if polygon.is_valid: + new_polygon = polygon.intersection(patch) + if not new_polygon.is_empty: + new_polygon = affinity.rotate(new_polygon, -patch_angle, + origin=(patch_x, patch_y), use_radians=False) + new_polygon = affinity.affine_transform(new_polygon, + [1.0, 0.0, 0.0, 1.0, -patch_x, -patch_y]) + if new_polygon.geom_type == 'Polygon': + new_polygon = MultiPolygon([new_polygon]) + polygon_list.append(new_polygon) + + return polygon_list + + def get_divider_line(self,patch_box,patch_angle,layer_name,location): + if layer_name not in self.map_explorer[location].map_api.non_geometric_line_layers: + raise ValueError("{} is not a line layer".format(layer_name)) + + if layer_name == 'traffic_light': + return None + + patch_x = patch_box[0] + patch_y = patch_box[1] + + patch = self.map_explorer[location].get_patch_coord(patch_box, patch_angle) + + line_list = [] + records = getattr(self.map_explorer[location].map_api, layer_name) + for record in records: + line = self.map_explorer[location].map_api.extract_line(record['line_token']) + if line.is_empty: # Skip lines without nodes. + continue + + new_line = line.intersection(patch) + if not new_line.is_empty: + new_line = affinity.rotate(new_line, -patch_angle, origin=(patch_x, patch_y), use_radians=False) + new_line = affinity.affine_transform(new_line, + [1.0, 0.0, 0.0, 1.0, -patch_x, -patch_y]) + line_list.append(new_line) + + return line_list + + def get_ped_crossing_line(self, patch_box, patch_angle, location): + patch_x = patch_box[0] + patch_y = patch_box[1] + + patch = self.map_explorer[location].get_patch_coord(patch_box, patch_angle) + polygon_list = [] + records = getattr(self.map_explorer[location].map_api, 'ped_crossing') + # records = getattr(self.nusc_maps[location], 'ped_crossing') + for record in records: + polygon = self.map_explorer[location].map_api.extract_polygon(record['polygon_token']) + if polygon.is_valid: + new_polygon = polygon.intersection(patch) + if not new_polygon.is_empty: + new_polygon = affinity.rotate(new_polygon, -patch_angle, + origin=(patch_x, patch_y), use_radians=False) + new_polygon = affinity.affine_transform(new_polygon, + [1.0, 0.0, 0.0, 1.0, -patch_x, -patch_y]) + if new_polygon.geom_type == 'Polygon': + new_polygon = MultiPolygon([new_polygon]) + polygon_list.append(new_polygon) + + return polygon_list + + def sample_pts_from_line(self, line): + if self.fixed_num < 0: + distances = np.arange(0, line.length, self.sample_dist) + sampled_points = np.array([list(line.interpolate(distance).coords) for distance in distances]).reshape(-1, 2) + else: + # fixed number of points, so distance is line.length / self.fixed_num + distances = np.linspace(0, line.length, self.fixed_num) + sampled_points = np.array([list(line.interpolate(distance).coords) for distance in distances]).reshape(-1, 2) + + # tmpdistances = np.linspace(0, line.length, 2) + # tmpsampled_points = np.array([list(line.interpolate(tmpdistance).coords) for tmpdistance in tmpdistances]).reshape(-1, 2) + # import pdb;pdb.set_trace() + # if self.normalize: + # sampled_points = sampled_points / np.array([self.patch_size[1], self.patch_size[0]]) + + num_valid = len(sampled_points) + + if not self.padding or self.fixed_num > 0: + # fixed num sample can return now! + return sampled_points, num_valid + + # fixed distance sampling need padding! + num_valid = len(sampled_points) + + if self.fixed_num < 0: + if num_valid < self.num_samples: + padding = np.zeros((self.num_samples - len(sampled_points), 2)) + sampled_points = np.concatenate([sampled_points, padding], axis=0) + else: + sampled_points = sampled_points[:self.num_samples, :] + num_valid = self.num_samples + + # if self.normalize: + # sampled_points = sampled_points / np.array([self.patch_size[1], self.patch_size[0]]) + # num_valid = len(sampled_points) + + return sampled_points, num_valid + + +############################################################################################################### +############################################################################################################### +############################################################################################################### + +class v1CustomDetectionConfig: + """ Data class that specifies the detection evaluation settings. """ + + def __init__(self, + class_range_x: Dict[str, int], + class_range_y: Dict[str, int], + dist_fcn: str, + dist_ths: List[float], + dist_th_tp: float, + min_recall: float, + min_precision: float, + max_boxes_per_sample: int, + mean_ap_weight: int): + + assert set(class_range_x.keys()) == set(DETECTION_NAMES), "Class count mismatch." + assert dist_th_tp in dist_ths, "dist_th_tp must be in set of dist_ths." + + self.class_range_x = class_range_x + self.class_range_y = class_range_y + self.dist_fcn = dist_fcn + self.dist_ths = dist_ths + self.dist_th_tp = dist_th_tp + self.min_recall = min_recall + self.min_precision = min_precision + self.max_boxes_per_sample = max_boxes_per_sample + self.mean_ap_weight = mean_ap_weight + + self.class_names = self.class_range_y.keys() + + def __eq__(self, other): + eq = True + for key in self.serialize().keys(): + eq = eq and np.array_equal(getattr(self, key), getattr(other, key)) + return eq + + def serialize(self) -> dict: + """ Serialize instance into json-friendly format. """ + return { + 'class_range_x': self.class_range_x, + 'class_range_y': self.class_range_y, + 'dist_fcn': self.dist_fcn, + 'dist_ths': self.dist_ths, + 'dist_th_tp': self.dist_th_tp, + 'min_recall': self.min_recall, + 'min_precision': self.min_precision, + 'max_boxes_per_sample': self.max_boxes_per_sample, + 'mean_ap_weight': self.mean_ap_weight + } + + @classmethod + def deserialize(cls, content: dict): + """ Initialize from serialized dictionary. """ + return cls(content['class_range_x'], + content['class_range_y'], + content['dist_fcn'], + content['dist_ths'], + content['dist_th_tp'], + content['min_recall'], + content['min_precision'], + content['max_boxes_per_sample'], + content['mean_ap_weight']) + + @property + def dist_fcn_callable(self): + """ Return the distance function corresponding to the dist_fcn string. """ + if self.dist_fcn == 'center_distance': + return center_distance + else: + raise Exception('Error: Unknown distance function %s!' % self.dist_fcn) + +@DATASETS.register_module() +class VADCustomNuScenesDataset(NuScenesDataset): + r"""Custom NuScenes Dataset. + """ + MAPCLASSES = ('divider',) + def __init__( + self, + queue_length=4, + bev_size=(200, 200), + overlap_test=False, + with_attr=True, + fut_ts=6, + pc_range=[-51.2, -51.2, -5.0, 51.2, 51.2, 3.0], + map_classes=None, + map_ann_file=None, + map_fixed_ptsnum_per_line=-1, + map_eval_use_same_gt_sample_num_flag=False, + padding_value=-10000, + use_pkl_result=False, + custom_eval_version='vad_nusc_detection_cvpr_2019', + *args, + **kwargs + ): + super().__init__(*args, **kwargs) + self.queue_length = queue_length + self.overlap_test = overlap_test + self.bev_size = bev_size + self.with_attr = with_attr + self.fut_ts = fut_ts + self.use_pkl_result = use_pkl_result + + self.custom_eval_version = custom_eval_version + # Check if config exists. + this_dir = os.path.dirname(os.path.abspath(__file__)) + cfg_path = os.path.join(this_dir, '%s.json' % self.custom_eval_version) + assert os.path.exists(cfg_path), \ + 'Requested unknown configuration {}'.format(self.custom_eval_version) + # Load config file and deserialize it. + with open(cfg_path, 'r') as f: + data = json.load(f) + self.custom_eval_detection_configs = v1CustomDetectionConfig.deserialize(data) + + self.map_ann_file = map_ann_file + self.MAPCLASSES = self.get_map_classes(map_classes) + self.NUM_MAPCLASSES = len(self.MAPCLASSES) + self.pc_range = pc_range + patch_h = pc_range[4]-pc_range[1] + patch_w = pc_range[3]-pc_range[0] + self.patch_size = (patch_h, patch_w) + self.padding_value = padding_value + self.fixed_num = map_fixed_ptsnum_per_line + self.eval_use_same_gt_sample_num_flag = map_eval_use_same_gt_sample_num_flag + self.vector_map = VectorizedLocalMap(kwargs['data_root'], + patch_size=self.patch_size, map_classes=self.MAPCLASSES, + fixed_ptsnum_per_line=map_fixed_ptsnum_per_line, + padding_value=self.padding_value) + self.is_vis_on_test = True + + @classmethod + def get_map_classes(cls, map_classes=None): + """Get class names of current dataset. + + Args: + classes (Sequence[str] | str | None): If classes is None, use + default CLASSES defined by builtin dataset. If classes is a + string, take it as a file name. The file contains the name of + classes where each line contains one class name. If classes is + a tuple or list, override the CLASSES defined by the dataset. + + Return: + list[str]: A list of class names. + """ + if map_classes is None: + return cls.MAPCLASSES + + if isinstance(map_classes, str): + # take it as a file path + class_names = mmcv.list_from_file(map_classes) + elif isinstance(map_classes, (tuple, list)): + class_names = map_classes + else: + raise ValueError(f'Unsupported type {type(map_classes)} of map classes.') + + return class_names + + def vectormap_pipeline(self, example, input_dict): + ''' + `example` type: + keys: 'img_metas', 'gt_bboxes_3d', 'gt_labels_3d', 'img'; + all keys type is 'DataContainer'; + 'img_metas' cpu_only=True, type is dict, others are false; + 'gt_labels_3d' shape torch.size([num_samples]), stack=False, + padding_value=0, cpu_only=False + 'gt_bboxes_3d': stack=False, cpu_only=True + ''' + # import pdb;pdb.set_trace() + lidar2ego = np.eye(4) + lidar2ego[:3,:3] = Quaternion(input_dict['lidar2ego_rotation']).rotation_matrix + lidar2ego[:3, 3] = input_dict['lidar2ego_translation'] + ego2global = np.eye(4) + ego2global[:3,:3] = Quaternion(input_dict['ego2global_rotation']).rotation_matrix + ego2global[:3, 3] = input_dict['ego2global_translation'] + + lidar2global = ego2global @ lidar2ego + + lidar2global_translation = list(lidar2global[:3,3]) + lidar2global_rotation = list(Quaternion(matrix=lidar2global).q) + + location = input_dict['map_location'] + ego2global_translation = input_dict['ego2global_translation'] + ego2global_rotation = input_dict['ego2global_rotation'] + anns_results = self.vector_map.gen_vectorized_samples( + location, lidar2global_translation, lidar2global_rotation + ) + + ''' + anns_results, type: dict + 'gt_vecs_pts_loc': list[num_vecs], vec with num_points*2 coordinates + 'gt_vecs_pts_num': list[num_vecs], vec with num_points + 'gt_vecs_label': list[num_vecs], vec with cls index + ''' + gt_vecs_label = to_tensor(anns_results['gt_vecs_label']) + if isinstance(anns_results['gt_vecs_pts_loc'], LiDARInstanceLines): + gt_vecs_pts_loc = anns_results['gt_vecs_pts_loc'] + else: + gt_vecs_pts_loc = to_tensor(anns_results['gt_vecs_pts_loc']) + try: + gt_vecs_pts_loc = gt_vecs_pts_loc.flatten(1).to(dtype=torch.float32) + except: + # empty tensor, will be passed in train, + # but we preserve it for test + gt_vecs_pts_loc = gt_vecs_pts_loc + + example['map_gt_labels_3d'] = DC(gt_vecs_label, cpu_only=False) + example['map_gt_bboxes_3d'] = DC(gt_vecs_pts_loc, cpu_only=True) + + return example + + def prepare_train_data(self, index): + """ + Training data preparation. + Args: + index (int): Index for accessing the target data. + Returns: + dict: Training data dict of the corresponding index. + """ + data_queue = [] + + # temporal aug + prev_indexs_list = list(range(index-self.queue_length, index)) + random.shuffle(prev_indexs_list) + prev_indexs_list = sorted(prev_indexs_list[1:], reverse=True) + ## + + input_dict = self.get_data_info(index) + if input_dict is None: + return None + frame_idx = input_dict['frame_idx'] + scene_token = input_dict['scene_token'] + self.pre_pipeline(input_dict) + example = self.pipeline(input_dict) + example = self.vectormap_pipeline(example,input_dict) + if self.filter_empty_gt and \ + ((example is None or ~(example['gt_labels_3d']._data != -1).any()) or \ + (example is None or ~(example['map_gt_labels_3d']._data != -1).any())): + return None + data_queue.insert(0, example) + for i in prev_indexs_list: + i = max(0, i) + input_dict = self.get_data_info(i) + if input_dict is None: + return None + if input_dict['frame_idx'] < frame_idx and input_dict['scene_token'] == scene_token: + self.pre_pipeline(input_dict) + example = self.pipeline(input_dict) + example = self.vectormap_pipeline(example,input_dict) + if self.filter_empty_gt and \ + (example is None or ~(example['gt_labels_3d']._data != -1).any()) and \ + (example is None or ~(example['map_gt_labels_3d']._data != -1).any()): + return None + frame_idx = input_dict['frame_idx'] + data_queue.insert(0, copy.deepcopy(example)) + return self.union2one(data_queue) + + def prepare_test_data(self, index): + """Prepare data for testing. + + Args: + index (int): Index for accessing the target data. + + Returns: + dict: Testing data dict of the corresponding index. + """ + input_dict = self.get_data_info(index) + self.pre_pipeline(input_dict) + example = self.pipeline(input_dict) + if self.is_vis_on_test: + example = self.vectormap_pipeline(example, input_dict) + return example + + def union2one(self, queue): + """ + convert sample queue into one single sample. + """ + imgs_list = [each['img'].data for each in queue] + metas_map = {} + prev_pos = None + prev_angle = None + for i, each in enumerate(queue): + metas_map[i] = each['img_metas'].data + if i == 0: + metas_map[i]['prev_bev'] = False + prev_pos = copy.deepcopy(metas_map[i]['can_bus'][:3]) + prev_angle = copy.deepcopy(metas_map[i]['can_bus'][-1]) + metas_map[i]['can_bus'][:3] = 0 + metas_map[i]['can_bus'][-1] = 0 + else: + metas_map[i]['prev_bev'] = True + tmp_pos = copy.deepcopy(metas_map[i]['can_bus'][:3]) + tmp_angle = copy.deepcopy(metas_map[i]['can_bus'][-1]) + metas_map[i]['can_bus'][:3] -= prev_pos + metas_map[i]['can_bus'][-1] -= prev_angle + prev_pos = copy.deepcopy(tmp_pos) + prev_angle = copy.deepcopy(tmp_angle) + + queue[-1]['img'] = DC(torch.stack(imgs_list), + cpu_only=False, stack=True) + queue[-1]['img_metas'] = DC(metas_map, cpu_only=True) + queue = queue[-1] + return queue + + def get_ann_info(self, index): + """Get annotation info according to the given index. + + Args: + index (int): Index of the annotation data to get. + + Returns: + dict: Annotation information consists of the following keys: + + - gt_bboxes_3d (:obj:`LiDARInstance3DBoxes`): \ + 3D ground truth bboxes + - gt_labels_3d (np.ndarray): Labels of ground truths. + - gt_names (list[str]): Class names of ground truths. + """ + info = self.data_infos[index] + # filter out bbox containing no points + if self.use_valid_flag: + mask = info['valid_flag'] + else: + mask = info['num_lidar_pts'] > 0 + gt_bboxes_3d = info['gt_boxes'][mask] + gt_names_3d = info['gt_names'][mask] + gt_labels_3d = [] + for cat in gt_names_3d: + if cat in self.CLASSES: + gt_labels_3d.append(self.CLASSES.index(cat)) + else: + gt_labels_3d.append(-1) + gt_labels_3d = np.array(gt_labels_3d) + + if self.with_velocity: + gt_velocity = info['gt_velocity'][mask] + nan_mask = np.isnan(gt_velocity[:, 0]) + gt_velocity[nan_mask] = [0.0, 0.0] + gt_bboxes_3d = np.concatenate([gt_bboxes_3d, gt_velocity], axis=-1) + + if self.with_attr: + gt_fut_trajs = info['gt_agent_fut_trajs'][mask] + gt_fut_masks = info['gt_agent_fut_masks'][mask] + gt_fut_goal = info['gt_agent_fut_goal'][mask] + gt_lcf_feat = info['gt_agent_lcf_feat'][mask] + gt_fut_yaw = info['gt_agent_fut_yaw'][mask] + attr_labels = np.concatenate( + [gt_fut_trajs, gt_fut_masks, gt_fut_goal[..., None], gt_lcf_feat, gt_fut_yaw], axis=-1 + ).astype(np.float32) + + # the nuscenes box center is [0.5, 0.5, 0.5], we change it to be + # the same as KITTI (0.5, 0.5, 0) + gt_bboxes_3d = LiDARInstance3DBoxes( + gt_bboxes_3d, + box_dim=gt_bboxes_3d.shape[-1], + origin=(0.5, 0.5, 0.5)).convert_to(self.box_mode_3d) + + anns_results = dict( + gt_bboxes_3d=gt_bboxes_3d, + gt_labels_3d=gt_labels_3d, + gt_names=gt_names_3d, + attr_labels=attr_labels) + + return anns_results + + def get_data_info(self, index): + """Get data info according to the given index. + + Args: + index (int): Index of the sample data to get. + + Returns: + dict: Data information that will be passed to the data \ + preprocessing pipelines. It includes the following keys: + + - sample_idx (str): Sample index. + - pts_filename (str): Filename of point clouds. + - sweeps (list[dict]): Infos of sweeps. + - timestamp (float): Sample timestamp. + - img_filename (str, optional): Image filename. + - lidar2img (list[np.ndarray], optional): Transformations \ + from lidar to different cameras. + - ann_info (dict): Annotation info. + """ + info = self.data_infos[index] + # standard protocal modified from SECOND.Pytorch + input_dict = dict( + sample_idx=info['token'], + #pts_filename=info['lidar_path'], + #sweeps=info['sweeps'], + ego2global_translation=info['ego2global_translation'], + ego2global_rotation=info['ego2global_rotation'], + lidar2ego_translation=info['lidar2ego_translation'], + lidar2ego_rotation=info['lidar2ego_rotation'], + prev_idx=info['prev'], + next_idx=info['next'], + scene_token=info['scene_token'], + can_bus=info['can_bus'], + frame_idx=info['frame_idx'], + timestamp=info['timestamp'] / 1e6, + fut_valid_flag=info['fut_valid_flag'], + map_location=info['map_location'], + ego_his_trajs=info['gt_ego_his_trajs'], + ego_fut_trajs=info['gt_ego_fut_trajs'], + ego_fut_masks=info['gt_ego_fut_masks'], + ego_fut_cmd=info['gt_ego_fut_cmd'], + ego_lcf_feat=info['gt_ego_lcf_feat'] + ) + # lidar to ego transform + lidar2ego = np.eye(4).astype(np.float32) + lidar2ego[:3, :3] = Quaternion(info["lidar2ego_rotation"]).rotation_matrix + lidar2ego[:3, 3] = info["lidar2ego_translation"] + input_dict["lidar2ego"] = lidar2ego + + if self.modality['use_camera']: + image_paths = [] + lidar2img_rts = [] + lidar2cam_rts = [] + cam_intrinsics = [] + input_dict["camera2ego"] = [] + input_dict["camera_intrinsics"] = [] + for cam_type, cam_info in info['cams'].items(): + image_paths.append(cam_info['data_path']) + # obtain lidar to image transformation matrix + lidar2cam_r = np.linalg.inv(cam_info['sensor2lidar_rotation']) + lidar2cam_t = cam_info[ + 'sensor2lidar_translation'] @ lidar2cam_r.T + lidar2cam_rt = np.eye(4) + lidar2cam_rt[:3, :3] = lidar2cam_r.T + lidar2cam_rt[3, :3] = -lidar2cam_t + intrinsic = cam_info['cam_intrinsic'] + viewpad = np.eye(4) + viewpad[:intrinsic.shape[0], :intrinsic.shape[1]] = intrinsic + lidar2img_rt = (viewpad @ lidar2cam_rt.T) + lidar2img_rts.append(lidar2img_rt) + + cam_intrinsics.append(viewpad) + lidar2cam_rts.append(lidar2cam_rt.T) + + # camera to ego transform + camera2ego = np.eye(4).astype(np.float32) + camera2ego[:3, :3] = Quaternion( + cam_info["sensor2ego_rotation"] + ).rotation_matrix + camera2ego[:3, 3] = cam_info["sensor2ego_translation"] + input_dict["camera2ego"].append(camera2ego) + # camera intrinsics + camera_intrinsics = np.eye(4).astype(np.float32) + camera_intrinsics[:3, :3] = cam_info["cam_intrinsic"] + input_dict["camera_intrinsics"].append(camera_intrinsics) + + input_dict.update( + dict( + img_filename=image_paths, + lidar2img=lidar2img_rts, + cam_intrinsic=cam_intrinsics, + lidar2cam=lidar2cam_rts, + )) + + # NOTE: now we load gt in test_mode for evaluating + # if not self.test_mode: + # annos = self.get_ann_info(index) + # input_dict['ann_info'] = annos + + annos = self.get_ann_info(index) + input_dict['ann_info'] = annos + + rotation = Quaternion(input_dict['ego2global_rotation']) + translation = input_dict['ego2global_translation'] + can_bus = input_dict['can_bus'] + can_bus[:3] = translation + can_bus[3:7] = rotation + patch_angle = quaternion_yaw(rotation) / np.pi * 180 + if patch_angle < 0: + patch_angle += 360 + can_bus[-2] = patch_angle / 180 * np.pi + can_bus[-1] = patch_angle + + lidar2ego = np.eye(4) + lidar2ego[:3,:3] = Quaternion(input_dict['lidar2ego_rotation']).rotation_matrix + lidar2ego[:3, 3] = input_dict['lidar2ego_translation'] + ego2global = np.eye(4) + ego2global[:3,:3] = Quaternion(input_dict['ego2global_rotation']).rotation_matrix + ego2global[:3, 3] = input_dict['ego2global_translation'] + lidar2global = ego2global @ lidar2ego + input_dict['lidar2global'] = lidar2global + + return input_dict + + def __getitem__(self, idx): + """Get item from infos according to the given index. + Returns: + dict: Data dictionary of the corresponding index. + """ + if self.test_mode: + return self.prepare_test_data(idx) + while True: + + data = self.prepare_train_data(idx) + if data is None: + idx = self._rand_another(idx) + continue + return data + + def _format_gt(self): + gt_annos = [] + print('Start to convert gt map format...') + # assert self.map_ann_file is not None + if (not os.path.exists(self.map_ann_file)) : + dataset_length = len(self) + prog_bar = mmcv.ProgressBar(dataset_length) + mapped_class_names = self.MAPCLASSES + for sample_id in range(dataset_length): + sample_token = self.data_infos[sample_id]['token'] + gt_anno = {} + gt_anno['sample_token'] = sample_token + # gt_sample_annos = [] + gt_sample_dict = {} + gt_sample_dict = self.vectormap_pipeline(gt_sample_dict, self.data_infos[sample_id]) + gt_labels = gt_sample_dict['map_gt_labels_3d'].data.numpy() + gt_vecs = gt_sample_dict['map_gt_bboxes_3d'].data.instance_list + gt_vec_list = [] + for i, (gt_label, gt_vec) in enumerate(zip(gt_labels, gt_vecs)): + name = mapped_class_names[gt_label] + anno = dict( + pts=np.array(list(gt_vec.coords)), + pts_num=len(list(gt_vec.coords)), + cls_name=name, + type=gt_label, + ) + gt_vec_list.append(anno) + gt_anno['vectors']=gt_vec_list + gt_annos.append(gt_anno) + + prog_bar.update() + nusc_submissions = { + 'GTs': gt_annos + } + print('\n GT anns writes to', self.map_ann_file) + dump(nusc_submissions, self.map_ann_file) + else: + print(f'{self.map_ann_file} exist, not update') + + def _format_bbox(self, results, jsonfile_prefix=None, score_thresh=0.2): + """Convert the results to the standard format. + + Args: + results (list[dict]): Testing results of the dataset. + jsonfile_prefix (str): The prefix of the output jsonfile. + You can specify the output directory/filename by + modifying the jsonfile_prefix. Default: None. + + Returns: + str: Path of the output json file. + """ + nusc_annos = {} + det_mapped_class_names = self.CLASSES + + # assert self.map_ann_file is not None + map_pred_annos = {} + map_mapped_class_names = self.MAPCLASSES + + plan_annos = {} + + print('Start to convert detection format...') + for sample_id, det in enumerate(mmcv.track_iter_progress(results)): + annos = [] + boxes = output_to_nusc_box(det) + sample_token = self.data_infos[sample_id]['token'] + + plan_annos[sample_token] = [det['ego_fut_preds'], det['ego_fut_cmd']] + + boxes = lidar_nusc_box_to_global(self.data_infos[sample_id], boxes, + det_mapped_class_names, + self.custom_eval_detection_configs, + self.eval_version) + for i, box in enumerate(boxes): + if box.score < score_thresh: + continue + name = det_mapped_class_names[box.label] + if np.sqrt(box.velocity[0]**2 + box.velocity[1]**2) > 0.2: + if name in [ + 'car', + 'construction_vehicle', + 'bus', + 'truck', + 'trailer', + ]: + attr = 'vehicle.moving' + elif name in ['bicycle', 'motorcycle']: + attr = 'cycle.with_rider' + else: + attr = NuScenesDataset.DefaultAttribute[name] + else: + if name in ['pedestrian']: + attr = 'pedestrian.standing' + elif name in ['bus']: + attr = 'vehicle.stopped' + else: + attr = NuScenesDataset.DefaultAttribute[name] + + nusc_anno = dict( + sample_token=sample_token, + translation=box.center.tolist(), + size=box.wlh.tolist(), + rotation=box.orientation.elements.tolist(), + velocity=box.velocity[:2].tolist(), + detection_name=name, + detection_score=box.score, + attribute_name=attr, + fut_traj=box.fut_trajs.tolist()) + annos.append(nusc_anno) + nusc_annos[sample_token] = annos + + + map_pred_anno = {} + vecs = output_to_vecs(det) + sample_token = self.data_infos[sample_id]['token'] + map_pred_anno['sample_token'] = sample_token + pred_vec_list=[] + for i, vec in enumerate(vecs): + name = map_mapped_class_names[vec['label']] + anno = dict( + # sample_token=sample_token, + pts=vec['pts'], + pts_num=len(vec['pts']), + cls_name=name, + type=vec['label'], + confidence_level=vec['score']) + pred_vec_list.append(anno) + # annos.append(nusc_anno) + # nusc_annos[sample_token] = annos + map_pred_anno['vectors'] = pred_vec_list + map_pred_annos[sample_token] = map_pred_anno + + if not os.path.exists(self.map_ann_file): + self._format_gt() + else: + print(f'{self.map_ann_file} exist, not update') + # with open(self.map_ann_file,'r') as f: + # GT_anns = json.load(f) + # gt_annos = GT_anns['GTs'] + + nusc_submissions = { + 'meta': self.modality, + 'results': nusc_annos, + 'map_results': map_pred_annos, + 'plan_results': plan_annos + # 'GTs': gt_annos + } + + mmcv.mkdir_or_exist(jsonfile_prefix) + if self.use_pkl_result: + res_path = osp.join(jsonfile_prefix, 'results_nusc.pkl') + else: + res_path = osp.join(jsonfile_prefix, 'results_nusc.json') + print('Results writes to', res_path) + dump(nusc_submissions, res_path) + return res_path + + def format_results(self, results, jsonfile_prefix=None): + """Format the results to json (standard format for COCO evaluation). + + Args: + results (list[dict]): Testing results of the dataset. + jsonfile_prefix (str | None): The prefix of json files. It includes + the file path and the prefix of filename, e.g., "a/b/prefix". + If not specified, a temp file will be created. Default: None. + + Returns: + tuple: Returns (result_files, tmp_dir), where `result_files` is a \ + dict containing the json filepaths, `tmp_dir` is the temporal \ + directory created for saving json files when \ + `jsonfile_prefix` is not specified. + """ + if isinstance(results, dict): + # print(f'results must be a list, but get dict, keys={results.keys()}') + # assert isinstance(results, list) + results = results['bbox_results'] + assert isinstance(results, list) + # assert len(results) == len(self), ( + # 'The length of results is not equal to the dataset len: {} != {}'. + # format(len(results), len(self))) + + if jsonfile_prefix is None: + tmp_dir = tempfile.TemporaryDirectory() + jsonfile_prefix = osp.join(tmp_dir.name, 'results') + else: + tmp_dir = None + + # currently the output prediction results could be in two formats + # 1. list of dict('boxes_3d': ..., 'scores_3d': ..., 'labels_3d': ...) + # 2. list of dict('pts_bbox' or 'img_bbox': + # dict('boxes_3d': ..., 'scores_3d': ..., 'labels_3d': ...)) + # this is a workaround to enable evaluation of both formats on nuScenes + # refer to https://github.com/open-mmlab/mmdetection3d/issues/449 + if not ('pts_bbox' in results[0] or 'img_bbox' in results[0]): + result_files = self._format_bbox(results, jsonfile_prefix) + else: + # should take the inner dict out of 'pts_bbox' or 'img_bbox' dict + result_files = dict() + for name in results[0]: + if name == 'metric_results': + continue + print(f'\nFormating bboxes of {name}') + results_ = [out[name] for out in results] + tmp_file_ = osp.join(jsonfile_prefix, name) + result_files.update( + {name: self._format_bbox(results_, tmp_file_)}) + return result_files, tmp_dir + + def _evaluate_single(self, + result_path, + logger=None, + metric='bbox', + map_metric='chamfer', + result_name='pts_bbox'): + """Evaluation for a single model in nuScenes protocol. + + Args: + result_path (str): Path of the result file. + logger (logging.Logger | str | None): Logger used for printing + related information during evaluation. Default: None. + metric (str): Metric name used for evaluation. Default: 'bbox'. + result_name (str): Result name in the metric prefix. + Default: 'pts_bbox'. + + Returns: + dict: Dictionary of evaluation details. + """ + detail = dict() + from nuscenes import NuScenes + self.nusc = NuScenes(version=self.version, dataroot=self.data_root, + verbose=False) + + output_dir = osp.join(*osp.split(result_path)[:-1]) + + eval_set_map = { + 'v1.0-mini': 'mini_val', + 'v1.0-trainval': 'val', + } + self.nusc_eval = NuScenesEval_custom( + self.nusc, + config=self.custom_eval_detection_configs, + result_path=result_path, + eval_set=eval_set_map[self.version], + output_dir=output_dir, + verbose=False, + overlap_test=self.overlap_test, + data_infos=self.data_infos + ) + self.nusc_eval.main(plot_examples=0, render_curves=False) + # record metrics + metrics = load(osp.join(output_dir, 'metrics_summary.json')) + metric_prefix = f'{result_name}_NuScenes' + for name in self.CLASSES: + for k, v in metrics['label_aps'][name].items(): + val = float('{:.4f}'.format(v)) + detail['{}/{}_AP_dist_{}'.format(metric_prefix, name, k)] = val + for k, v in metrics['label_tp_errors'][name].items(): + val = float('{:.4f}'.format(v)) + detail['{}/{}_{}'.format(metric_prefix, name, k)] = val + for k, v in metrics['tp_errors'].items(): + val = float('{:.4f}'.format(v)) + detail['{}/{}'.format(metric_prefix, + self.ErrNameMapping[k])] = val + detail['{}/NDS'.format(metric_prefix)] = metrics['nd_score'] + detail['{}/mAP'.format(metric_prefix)] = metrics['mean_ap'] + + + from mmcv.datasets.map_utils.mean_ap import eval_map + from mmcv.datasets.map_utils.mean_ap import format_res_gt_by_classes + result_path = osp.abspath(result_path) + + print('Formating results & gts by classes') + pred_results = load(result_path) + map_results = pred_results['map_results'] + gt_anns = load(self.map_ann_file) + map_annotations = gt_anns['GTs'] + cls_gens, cls_gts = format_res_gt_by_classes(result_path, + map_results, + map_annotations, + cls_names=self.MAPCLASSES, + num_pred_pts_per_instance=self.fixed_num, + eval_use_same_gt_sample_num_flag=self.eval_use_same_gt_sample_num_flag, + pc_range=self.pc_range) + map_metrics = map_metric if isinstance(map_metric, list) else [map_metric] + allowed_metrics = ['chamfer', 'iou'] + for metric in map_metrics: + if metric not in allowed_metrics: + raise KeyError(f'metric {metric} is not supported') + for metric in map_metrics: + print('-*'*10+f'use metric:{metric}'+'-*'*10) + if metric == 'chamfer': + thresholds = [0.5,1.0,1.5] + elif metric == 'iou': + thresholds= np.linspace(.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True) + cls_aps = np.zeros((len(thresholds),self.NUM_MAPCLASSES)) + for i, thr in enumerate(thresholds): + print('-*'*10+f'threshhold:{thr}'+'-*'*10) + mAP, cls_ap = eval_map( + map_results, + map_annotations, + cls_gens, + cls_gts, + threshold=thr, + cls_names=self.MAPCLASSES, + logger=logger, + num_pred_pts_per_instance=self.fixed_num, + pc_range=self.pc_range, + metric=metric) + for j in range(self.NUM_MAPCLASSES): + cls_aps[i, j] = cls_ap[j]['ap'] + for i, name in enumerate(self.MAPCLASSES): + print('{}: {}'.format(name, cls_aps.mean(0)[i])) + detail['NuscMap_{}/{}_AP'.format(metric,name)] = cls_aps.mean(0)[i] + print('map: {}'.format(cls_aps.mean(0).mean())) + detail['NuscMap_{}/mAP'.format(metric)] = cls_aps.mean(0).mean() + for i, name in enumerate(self.MAPCLASSES): + for j, thr in enumerate(thresholds): + if metric == 'chamfer': + detail['NuscMap_{}/{}_AP_thr_{}'.format(metric,name,thr)]=cls_aps[j][i] + elif metric == 'iou': + if thr == 0.5 or thr == 0.75: + detail['NuscMap_{}/{}_AP_thr_{}'.format(metric,name,thr)]=cls_aps[j][i] + + return detail + + def evaluate(self, + results, + metric='bbox', + map_metric='chamfer', + logger=None, + jsonfile_prefix=None, + result_names=['pts_bbox'], + show=False, + out_dir=None, + pipeline=None): + """Evaluation in nuScenes protocol. + + Args: + results (list[dict]): Testing results of the dataset. + metric (str | list[str]): Metrics to be evaluated. + logger (logging.Logger | str | None): Logger used for printing + related information during evaluation. Default: None. + jsonfile_prefix (str | None): The prefix of json files. It includes + the file path and the prefix of filename, e.g., "a/b/prefix". + If not specified, a temp file will be created. Default: None. + show (bool): Whether to visualize. + Default: False. + out_dir (str): Path to save the visualization results. + Default: None. + pipeline (list[dict], optional): raw data loading for showing. + Default: None. + + Returns: + dict[str, float]: Results of each evaluation metric. + """ + result_metric_names = ['EPA', 'ADE', 'FDE', 'MR'] + motion_cls_names = ['car', 'pedestrian'] + motion_metric_names = ['gt', 'cnt_ade', 'cnt_fde', 'hit', + 'fp', 'ADE', 'FDE', 'MR'] + all_metric_dict = {} + for met in motion_metric_names: + for cls in motion_cls_names: + all_metric_dict[met+'_'+cls] = 0.0 + result_dict = {} + for met in result_metric_names: + for cls in motion_cls_names: + result_dict[met+'_'+cls] = 0.0 + + alpha = 0.5 + + for i in range(len(results)): + for key in all_metric_dict.keys(): + all_metric_dict[key] += results[i]['metric_results'][key] + + for cls in motion_cls_names: + result_dict['EPA_'+cls] = (all_metric_dict['hit_'+cls] - \ + alpha * all_metric_dict['fp_'+cls]) / all_metric_dict['gt_'+cls] + result_dict['ADE_'+cls] = all_metric_dict['ADE_'+cls] / all_metric_dict['cnt_ade_'+cls] + result_dict['FDE_'+cls] = all_metric_dict['FDE_'+cls] / all_metric_dict['cnt_fde_'+cls] + result_dict['MR_'+cls] = all_metric_dict['MR_'+cls] / all_metric_dict['cnt_fde_'+cls] + + print('\n') + print('-------------- Motion Prediction --------------') + for k, v in result_dict.items(): + print(f'{k}: {v}') + + # NOTE: print planning metric + print('\n') + print('-------------- Planning --------------') + metric_dict = None + num_valid = 0 + for res in results: + if res['metric_results']['fut_valid_flag']: + num_valid += 1 + else: + continue + if metric_dict is None: + metric_dict = copy.deepcopy(res['metric_results']) + else: + for k in res['metric_results'].keys(): + metric_dict[k] += res['metric_results'][k] + + for k in metric_dict: + metric_dict[k] = metric_dict[k] / num_valid + print("{}:{}".format(k, metric_dict[k])) + + result_files, tmp_dir = self.format_results(results, jsonfile_prefix) + + if isinstance(result_files, dict): + results_dict = dict() + for name in result_names: + print('Evaluating bboxes of {}'.format(name)) + ret_dict = self._evaluate_single(result_files[name], metric=metric, map_metric=map_metric) + results_dict.update(ret_dict) + elif isinstance(result_files, str): + results_dict = self._evaluate_single(result_files, metric=metric, map_metric=map_metric) + + if tmp_dir is not None: + tmp_dir.cleanup() + + if show: + self.show(results, out_dir, pipeline=pipeline) + return results_dict + +def output_to_nusc_box(detection): + """Convert the output to the box class in the nuScenes. + + Args: + detection (dict): Detection results. + + - boxes_3d (:obj:`BaseInstance3DBoxes`): Detection bbox. + - scores_3d (torch.Tensor): Detection scores. + - labels_3d (torch.Tensor): Predicted box labels. + + Returns: + list[:obj:`NuScenesBox`]: List of standard NuScenesBoxes. + """ + box3d = detection['boxes_3d'] + scores = detection['scores_3d'].numpy() + labels = detection['labels_3d'].numpy() + trajs = detection['trajs_3d'].numpy() + + + box_gravity_center = box3d.gravity_center.numpy() + box_dims = box3d.dims.numpy() + box_yaw = box3d.yaw.numpy() + # TODO: check whether this is necessary + # with dir_offset & dir_limit in the head + box_yaw = -box_yaw - np.pi / 2 + + box_list = [] + for i in range(len(box3d)): + quat = pyquaternion.Quaternion(axis=[0, 0, 1], radians=box_yaw[i]) + velocity = (*box3d.tensor[i, 7:9], 0.0) + # velo_val = np.linalg.norm(box3d[i, 7:9]) + # velo_ori = box3d[i, 6] + # velocity = ( + # velo_val * np.cos(velo_ori), velo_val * np.sin(velo_ori), 0.0) + box = CustomNuscenesBox( + center=box_gravity_center[i], + size=box_dims[i], + orientation=quat, + fut_trajs=trajs[i], + label=labels[i], + score=scores[i], + velocity=velocity) + box_list.append(box) + return box_list + + +def lidar_nusc_box_to_global(info, + boxes, + classes, + eval_configs, + eval_version='detection_cvpr_2019'): + """Convert the box from ego to global coordinate. + + Args: + info (dict): Info for a specific sample data, including the + calibration information. + boxes (list[:obj:`NuScenesBox`]): List of predicted NuScenesBoxes. + classes (list[str]): Mapped classes in the evaluation. + eval_configs (object): Evaluation configuration object. + eval_version (str): Evaluation version. + Default: 'detection_cvpr_2019' + + Returns: + list: List of standard NuScenesBoxes in the global + coordinate. + """ + box_list = [] + for box in boxes: + # Move box to ego vehicle coord system + box.rotate(pyquaternion.Quaternion(info['lidar2ego_rotation'])) + box.translate(np.array(info['lidar2ego_translation'])) + # filter det in ego. + cls_range_x_map = eval_configs.class_range_x + cls_range_y_map = eval_configs.class_range_y + x_distance, y_distance = box.center[0], box.center[1] + det_range_x = cls_range_x_map[classes[box.label]] + det_range_y = cls_range_y_map[classes[box.label]] + if abs(x_distance) > det_range_x or abs(y_distance) > det_range_y: + continue + # Move box to global coord system + box.rotate(pyquaternion.Quaternion(info['ego2global_rotation'])) + box.translate(np.array(info['ego2global_translation'])) + box_list.append(box) + return box_list + +def output_to_vecs(detection): + box3d = detection['map_boxes_3d'].numpy() + scores = detection['map_scores_3d'].numpy() + labels = detection['map_labels_3d'].numpy() + pts = detection['map_pts_3d'].numpy() + + vec_list = [] + # import pdb;pdb.set_trace() + for i in range(box3d.shape[0]): + vec = dict( + bbox = box3d[i], # xyxy + label=labels[i], + score=scores[i], + pts=pts[i], + ) + vec_list.append(vec) + return vec_list \ No newline at end of file diff --git a/mmcv/datasets/nuscnes_eval.py b/mmcv/datasets/nuscnes_eval.py new file mode 100644 index 0000000..2b14535 --- /dev/null +++ b/mmcv/datasets/nuscnes_eval.py @@ -0,0 +1,756 @@ +import argparse +import copy +import json +import os +import time +from typing import Tuple, Dict, Any +import torch +import numpy as np + +from nuscenes import NuScenes +from nuscenes.eval.common.config import config_factory +from nuscenes.eval.common.data_classes import EvalBoxes +from nuscenes.eval.detection.data_classes import DetectionConfig +from nuscenes.eval.detection.evaluate import NuScenesEval +from pyquaternion import Quaternion + +from nuscenes import NuScenes +from nuscenes.eval.common.data_classes import EvalBoxes +from nuscenes.eval.detection.data_classes import DetectionBox +from nuscenes.eval.detection.utils import category_to_detection_name +from nuscenes.eval.tracking.data_classes import TrackingBox +from nuscenes.utils.data_classes import Box +from nuscenes.utils.geometry_utils import points_in_box +from nuscenes.utils.splits import create_splits_scenes +from nuscenes.eval.common.loaders import load_prediction, add_center_dist, filter_eval_boxes +import tqdm +from nuscenes.utils.geometry_utils import view_points, box_in_image, BoxVisibility, transform_matrix +from torchvision.transforms.functional import rotate +import pycocotools.mask as mask_util +# from projects.mmdet3d_plugin.models.utils.visual import save_tensor +from torchvision.transforms.functional import rotate +import cv2 +import argparse +import json +import os +import random +import time +from typing import Tuple, Dict, Any + +import numpy as np + +from nuscenes import NuScenes +from nuscenes.eval.common.config import config_factory +from nuscenes.eval.common.data_classes import EvalBoxes +from nuscenes.eval.common.loaders import load_prediction, load_gt, add_center_dist, filter_eval_boxes +from nuscenes.eval.detection.algo import accumulate, calc_ap, calc_tp +from nuscenes.eval.detection.constants import TP_METRICS +from nuscenes.eval.detection.data_classes import DetectionConfig, DetectionMetrics, DetectionBox, \ + DetectionMetricDataList +from nuscenes.eval.detection.render import summary_plot, class_pr_curve, dist_pr_curve, visualize_sample +from nuscenes.eval.common.utils import quaternion_yaw, Quaternion +from mmcv.core.bbox import BboxOverlaps3D +from IPython import embed +import json +from typing import Any + +import numpy as np +from matplotlib import pyplot as plt + +from nuscenes import NuScenes +from nuscenes.eval.common.data_classes import EvalBoxes +from nuscenes.eval.common.render import setup_axis +from nuscenes.eval.common.utils import boxes_to_sensor +from nuscenes.eval.detection.constants import TP_METRICS, DETECTION_NAMES, DETECTION_COLORS, TP_METRICS_UNITS, \ + PRETTY_DETECTION_NAMES, PRETTY_TP_METRICS +from nuscenes.eval.detection.data_classes import DetectionMetrics, DetectionMetricData, DetectionMetricDataList +from nuscenes.utils.data_classes import LidarPointCloud +from nuscenes.utils.geometry_utils import view_points + + + +Axis = Any + +def class_tp_curve(md_list: DetectionMetricDataList, + metrics: DetectionMetrics, + detection_name: str, + min_recall: float, + dist_th_tp: float, + savepath: str = None, + ax: Axis = None) -> None: + """ + Plot the true positive curve for the specified class. + :param md_list: DetectionMetricDataList instance. + :param metrics: DetectionMetrics instance. + :param detection_name: + :param min_recall: Minimum recall value. + :param dist_th_tp: The distance threshold used to determine matches. + :param savepath: If given, saves the the rendering here instead of displaying. + :param ax: Axes onto which to render. + """ + # Get metric data for given detection class with tp distance threshold. + + md = md_list[(detection_name, dist_th_tp)] + min_recall_ind = round(100 * min_recall) + if min_recall_ind <= md.max_recall_ind: + # For traffic_cone and barrier only a subset of the metrics are plotted. + rel_metrics = [m for m in TP_METRICS if not np.isnan(metrics.get_label_tp(detection_name, m))] + ylimit = max([max(getattr(md, metric)[min_recall_ind:md.max_recall_ind + 1]) for metric in rel_metrics]) * 1.1 + else: + ylimit = 1.0 + + # Prepare axis. + if ax is None: + ax = setup_axis(title=PRETTY_DETECTION_NAMES[detection_name], xlabel='Recall', ylabel='Error', xlim=1, + min_recall=min_recall) + ax.set_ylim(0, ylimit) + + # Plot the recall vs. error curve for each tp metric. + for metric in TP_METRICS: + tp = metrics.get_label_tp(detection_name, metric) + + # Plot only if we have valid data. + if tp is not np.nan and min_recall_ind <= md.max_recall_ind: + recall, error = md.recall[:md.max_recall_ind + 1], getattr(md, metric)[:md.max_recall_ind + 1] + else: + recall, error = [], [] + + # Change legend based on tp value + if tp is np.nan: + label = '{}: n/a'.format(PRETTY_TP_METRICS[metric]) + elif min_recall_ind > md.max_recall_ind: + label = '{}: nan'.format(PRETTY_TP_METRICS[metric]) + else: + label = '{}: {:.2f} ({})'.format(PRETTY_TP_METRICS[metric], tp, TP_METRICS_UNITS[metric]) + if metric == 'trans_err': + label += f' ({md.max_recall_ind})' # add recall + print(f'Recall: {detection_name}: {md.max_recall_ind/100}') + ax.plot(recall, error, label=label) + ax.axvline(x=md.max_recall, linestyle='-.', color=(0, 0, 0, 0.3)) + ax.legend(loc='best') + + if savepath is not None: + plt.savefig(savepath) + plt.close() + + +class DetectionBox_modified(DetectionBox): + def __init__(self, *args, token=None, visibility=None, index=None, **kwargs): + ''' + add annotation token + ''' + super().__init__(*args, **kwargs) + self.token = token + self.visibility = visibility + self.index = index + + def serialize(self) -> dict: + """ Serialize instance into json-friendly format. """ + return { + 'token': self.token, + 'sample_token': self.sample_token, + 'translation': self.translation, + 'size': self.size, + 'rotation': self.rotation, + 'velocity': self.velocity, + 'ego_translation': self.ego_translation, + 'num_pts': self.num_pts, + 'detection_name': self.detection_name, + 'detection_score': self.detection_score, + 'attribute_name': self.attribute_name, + 'visibility': self.visibility, + 'index': self.index + + } + + @classmethod + def deserialize(cls, content: dict): + """ Initialize from serialized content. """ + return cls( + token=content['token'], + sample_token=content['sample_token'], + translation=tuple(content['translation']), + size=tuple(content['size']), + rotation=tuple(content['rotation']), + velocity=tuple(content['velocity']), + ego_translation=(0.0, 0.0, 0.0) if 'ego_translation' not in content + else tuple(content['ego_translation']), + num_pts=-1 if 'num_pts' not in content else int(content['num_pts']), + detection_name=content['detection_name'], + detection_score=-1.0 if 'detection_score' not in content else float(content['detection_score']), + attribute_name=content['attribute_name'], + visibility=content['visibility'], + index=content['index'], + ) + + +def center_in_image(box, intrinsic: np.ndarray, imsize: Tuple[int, int], vis_level: int = BoxVisibility.ANY) -> bool: + """ + Check if a box is visible inside an image without accounting for occlusions. + :param box: The box to be checked. + :param intrinsic: . Intrinsic camera matrix. + :param imsize: (width, height). + :param vis_level: One of the enumerations of . + :return True if visibility condition is satisfied. + """ + + center_3d = box.center.reshape(3, 1) + center_img = view_points(center_3d, intrinsic, normalize=True)[:2, :] + + visible = np.logical_and(center_img[0, :] > 0, center_img[0, :] < imsize[0]) + visible = np.logical_and(visible, center_img[1, :] < imsize[1]) + visible = np.logical_and(visible, center_img[1, :] > 0) + visible = np.logical_and(visible, center_3d[2, :] > 1) + + in_front = center_3d[2, :] > 0.1 # True if a corner is at least 0.1 meter in front of the camera. + + if vis_level == BoxVisibility.ALL: + return all(visible) and all(in_front) + elif vis_level == BoxVisibility.ANY: + return any(visible) and all(in_front) + elif vis_level == BoxVisibility.NONE: + return True + else: + raise ValueError("vis_level: {} not valid".format(vis_level)) + + +def exist_corners_in_image_but_not_all(box, intrinsic: np.ndarray, imsize: Tuple[int, int], + vis_level: int = BoxVisibility.ANY) -> bool: + """ + Check if a box is visible in images but not all corners in image . + :param box: The box to be checked. + :param intrinsic: . Intrinsic camera matrix. + :param imsize: (width, height). + :param vis_level: One of the enumerations of . + :return True if visibility condition is satisfied. + """ + + corners_3d = box.corners() + corners_img = view_points(corners_3d, intrinsic, normalize=True)[:2, :] + + visible = np.logical_and(corners_img[0, :] > 0, corners_img[0, :] < imsize[0]) + visible = np.logical_and(visible, corners_img[1, :] < imsize[1]) + visible = np.logical_and(visible, corners_img[1, :] > 0) + visible = np.logical_and(visible, corners_3d[2, :] > 1) + + in_front = corners_3d[2, :] > 0.1 # True if a corner is at least 0.1 meter in front of the camera. + + if any(visible) and not all(visible) and all(in_front): + return True + else: + return False + + +def load_gt(nusc: NuScenes, eval_split: str, box_cls, verbose: bool = False): + """ + Loads ground truth boxes from DB. + :param nusc: A NuScenes instance. + :param eval_split: The evaluation split for which we load GT boxes. + :param box_cls: Type of box to load, e.g. DetectionBox or TrackingBox. + :param verbose: Whether to print messages to stdout. + :return: The GT boxes. + """ + + # Init. + if box_cls == DetectionBox_modified: + attribute_map = {a['token']: a['name'] for a in nusc.attribute} + + if verbose: + print('Loading annotations for {} split from nuScenes version: {}'.format(eval_split, nusc.version)) + # Read out all sample_tokens in DB. + sample_tokens_all = [s['token'] for s in nusc.sample] + assert len(sample_tokens_all) > 0, "Error: Database has no samples!" + + # Only keep samples from this split. + splits = create_splits_scenes() + + # Check compatibility of split with nusc_version. + version = nusc.version + if eval_split in {'train', 'val', 'train_detect', 'train_track'}: + assert version.endswith('trainval'), \ + 'Error: Requested split {} which is not compatible with NuScenes version {}'.format(eval_split, version) + elif eval_split in {'mini_train', 'mini_val'}: + assert version.endswith('mini'), \ + 'Error: Requested split {} which is not compatible with NuScenes version {}'.format(eval_split, version) + elif eval_split == 'test': + assert version.endswith('test'), \ + 'Error: Requested split {} which is not compatible with NuScenes version {}'.format(eval_split, version) + else: + raise ValueError('Error: Requested split {} which this function cannot map to the correct NuScenes version.' + .format(eval_split)) + + if eval_split == 'test': + # Check that you aren't trying to cheat :). + assert len(nusc.sample_annotation) > 0, \ + 'Error: You are trying to evaluate on the test set but you do not have the annotations!' + index_map = {} + for scene in nusc.scene: + first_sample_token = scene['first_sample_token'] + sample = nusc.get('sample', first_sample_token) + index_map[first_sample_token] = 1 + index = 2 + while sample['next'] != '': + sample = nusc.get('sample', sample['next']) + index_map[sample['token']] = index + index += 1 + + sample_tokens = [] + for sample_token in sample_tokens_all: + scene_token = nusc.get('sample', sample_token)['scene_token'] + scene_record = nusc.get('scene', scene_token) + if scene_record['name'] in splits[eval_split]: + sample_tokens.append(sample_token) + + all_annotations = EvalBoxes() + + # Load annotations and filter predictions and annotations. + tracking_id_set = set() + for sample_token in tqdm.tqdm(sample_tokens, leave=verbose): + + sample = nusc.get('sample', sample_token) + sample_annotation_tokens = sample['anns'] + + sample_boxes = [] + for sample_annotation_token in sample_annotation_tokens: + + sample_annotation = nusc.get('sample_annotation', sample_annotation_token) + if box_cls == DetectionBox_modified: + # Get label name in detection task and filter unused labels. + detection_name = category_to_detection_name(sample_annotation['category_name']) + if detection_name is None: + continue + + # Get attribute_name. + attr_tokens = sample_annotation['attribute_tokens'] + attr_count = len(attr_tokens) + if attr_count == 0: + attribute_name = '' + elif attr_count == 1: + attribute_name = attribute_map[attr_tokens[0]] + else: + raise Exception('Error: GT annotations must not have more than one attribute!') + + sample_boxes.append( + box_cls( + token=sample_annotation_token, + sample_token=sample_token, + translation=sample_annotation['translation'], + size=sample_annotation['size'], + rotation=sample_annotation['rotation'], + velocity=nusc.box_velocity(sample_annotation['token'])[:2], + num_pts=sample_annotation['num_lidar_pts'] + sample_annotation['num_radar_pts'], + detection_name=detection_name, + detection_score=-1.0, # GT samples do not have a score. + attribute_name=attribute_name, + visibility=sample_annotation['visibility_token'], + index=index_map[sample_token] + ) + ) + elif box_cls == TrackingBox: + assert False + else: + raise NotImplementedError('Error: Invalid box_cls %s!' % box_cls) + + all_annotations.add_boxes(sample_token, sample_boxes) + + if verbose: + print("Loaded ground truth annotations for {} samples.".format(len(all_annotations.sample_tokens))) + + return all_annotations + + +def filter_eval_boxes_by_id(nusc: NuScenes, + eval_boxes: EvalBoxes, + id=None, + verbose: bool = False) -> EvalBoxes: + """ + Applies filtering to boxes. Distance, bike-racks and points per box. + :param nusc: An instance of the NuScenes class. + :param eval_boxes: An instance of the EvalBoxes class. + :param is: the anns token set that used to keep bboxes. + :param verbose: Whether to print to stdout. + """ + + # Accumulators for number of filtered boxes. + total, anns_filter = 0, 0 + for ind, sample_token in enumerate(eval_boxes.sample_tokens): + + # Filter on anns + total += len(eval_boxes[sample_token]) + filtered_boxes = [] + for box in eval_boxes[sample_token]: + if box.token in id: + filtered_boxes.append(box) + anns_filter += len(filtered_boxes) + eval_boxes.boxes[sample_token] = filtered_boxes + + if verbose: + print("=> Original number of boxes: %d" % total) + print("=> After anns based filtering: %d" % anns_filter) + + return eval_boxes + + +def filter_eval_boxes_by_visibility( + ori_eval_boxes: EvalBoxes, + visibility=None, + verbose: bool = False) -> EvalBoxes: + """ + Applies filtering to boxes. Distance, bike-racks and points per box. + :param nusc: An instance of the NuScenes class. + :param eval_boxes: An instance of the EvalBoxes class. + :param is: the anns token set that used to keep bboxes. + :param verbose: Whether to print to stdout. + """ + + # Accumulators for number of filtered boxes. + eval_boxes = copy.deepcopy(ori_eval_boxes) + total, anns_filter = 0, 0 + for ind, sample_token in enumerate(eval_boxes.sample_tokens): + # Filter on anns + total += len(eval_boxes[sample_token]) + filtered_boxes = [] + for box in eval_boxes[sample_token]: + if box.visibility == visibility: + filtered_boxes.append(box) + anns_filter += len(filtered_boxes) + eval_boxes.boxes[sample_token] = filtered_boxes + + if verbose: + print("=> Original number of boxes: %d" % total) + print("=> After visibility based filtering: %d" % anns_filter) + + return eval_boxes + + +def filter_by_sample_token(ori_eval_boxes, valid_sample_tokens=[], verbose=False): + eval_boxes = copy.deepcopy(ori_eval_boxes) + for sample_token in eval_boxes.sample_tokens: + if sample_token not in valid_sample_tokens: + eval_boxes.boxes.pop(sample_token) + return eval_boxes + + +def filter_eval_boxes_by_overlap(nusc: NuScenes, + eval_boxes: EvalBoxes, + verbose: bool = False) -> EvalBoxes: + """ + Applies filtering to boxes. basedon overlap . + :param nusc: An instance of the NuScenes class. + :param eval_boxes: An instance of the EvalBoxes class. + :param verbose: Whether to print to stdout. + """ + + # Accumulators for number of filtered boxes. + cams = ['CAM_FRONT', + 'CAM_FRONT_RIGHT', + 'CAM_BACK_RIGHT', + 'CAM_BACK', + 'CAM_BACK_LEFT', + 'CAM_FRONT_LEFT'] + + total, anns_filter = 0, 0 + for ind, sample_token in enumerate(eval_boxes.sample_tokens): + + # Filter on anns + total += len(eval_boxes[sample_token]) + sample_record = nusc.get('sample', sample_token) + filtered_boxes = [] + for box in eval_boxes[sample_token]: + count = 0 + for cam in cams: + ''' + copy-paste form nuscens + ''' + sample_data_token = sample_record['data'][cam] + sd_record = nusc.get('sample_data', sample_data_token) + cs_record = nusc.get('calibrated_sensor', sd_record['calibrated_sensor_token']) + sensor_record = nusc.get('sensor', cs_record['sensor_token']) + pose_record = nusc.get('ego_pose', sd_record['ego_pose_token']) + cam_intrinsic = np.array(cs_record['camera_intrinsic']) + imsize = (sd_record['width'], sd_record['height']) + new_box = Box(box.translation, box.size, Quaternion(box.rotation), + name=box.detection_name, token='') + + # Move box to ego vehicle coord system. + new_box.translate(-np.array(pose_record['translation'])) + new_box.rotate(Quaternion(pose_record['rotation']).inverse) + + # Move box to sensor coord system. + new_box.translate(-np.array(cs_record['translation'])) + new_box.rotate(Quaternion(cs_record['rotation']).inverse) + + if center_in_image(new_box, cam_intrinsic, imsize, vis_level=BoxVisibility.ANY): + count += 1 + # if exist_corners_in_image_but_not_all(new_box, cam_intrinsic, imsize, vis_level=BoxVisibility.ANY): + # count += 1 + + if count > 1: + with open('center_overlap.txt', 'a') as f: + try: + f.write(box.token + '\n') + except: + pass + filtered_boxes.append(box) + anns_filter += len(filtered_boxes) + eval_boxes.boxes[sample_token] = filtered_boxes + + verbose = True + + if verbose: + print("=> Original number of boxes: %d" % total) + print("=> After anns based filtering: %d" % anns_filter) + + return eval_boxes + + +class NuScenesEval_custom(NuScenesEval): + """ + Dummy class for backward-compatibility. Same as DetectionEval. + """ + + def __init__(self, + nusc: NuScenes, + config: DetectionConfig, + result_path: str, + eval_set: str, + output_dir: str = None, + verbose: bool = True, + overlap_test=False, + eval_mask=False, + data_infos=None + ): + """ + Initialize a DetectionEval object. + :param nusc: A NuScenes object. + :param config: A DetectionConfig object. + :param result_path: Path of the nuScenes JSON result file. + :param eval_set: The dataset split to evaluate on, e.g. train, val or test. + :param output_dir: Folder to save plots and results to. + :param verbose: Whether to print to stdout. + """ + + self.nusc = nusc + self.result_path = result_path + self.eval_set = eval_set + self.output_dir = output_dir + self.verbose = verbose + self.cfg = config + self.overlap_test = overlap_test + self.eval_mask = eval_mask + self.data_infos = data_infos + # Check result file exists. + assert os.path.exists(result_path), 'Error: The result file does not exist!' + + # Make dirs. + self.plot_dir = os.path.join(self.output_dir, 'plots') + if not os.path.isdir(self.output_dir): + os.makedirs(self.output_dir) + if not os.path.isdir(self.plot_dir): + os.makedirs(self.plot_dir) + + # Load data. + if verbose: + print('Initializing nuScenes detection evaluation') + self.pred_boxes, self.meta = load_prediction(self.result_path, self.cfg.max_boxes_per_sample, DetectionBox, + verbose=verbose) + self.gt_boxes = load_gt(self.nusc, self.eval_set, DetectionBox_modified, verbose=verbose) + + assert set(self.pred_boxes.sample_tokens) == set(self.gt_boxes.sample_tokens), \ + "Samples in split doesn't match samples in predictions." + + # Add center distances. + # self.pred_boxes = add_center_dist(nusc, self.pred_boxes) + # self.gt_boxes = add_center_dist(nusc, self.gt_boxes) + + import pdb + pdb.set_trace() + + + + # Filter boxes (distance, points per box, etc.). + + if verbose: + print('Filtering predictions') + self.pred_boxes = filter_eval_boxes(nusc, self.pred_boxes, self.cfg.class_range, verbose=verbose) + if verbose: + print('Filtering ground truth annotations') + self.gt_boxes = filter_eval_boxes(nusc, self.gt_boxes, self.cfg.class_range, verbose=verbose) + + if self.overlap_test: + self.pred_boxes = filter_eval_boxes_by_overlap(self.nusc, self.pred_boxes) + + self.gt_boxes = filter_eval_boxes_by_overlap(self.nusc, self.gt_boxes, verbose=True) + + self.all_gt = copy.deepcopy(self.gt_boxes) + self.all_preds = copy.deepcopy(self.pred_boxes) + self.sample_tokens = self.gt_boxes.sample_tokens + + self.index_map = {} + for scene in nusc.scene: + first_sample_token = scene['first_sample_token'] + sample = nusc.get('sample', first_sample_token) + self.index_map[first_sample_token] = 1 + index = 2 + while sample['next'] != '': + sample = nusc.get('sample', sample['next']) + self.index_map[sample['token']] = index + index += 1 + + def update_gt(self, type_='vis', visibility='1', index=1): + if type_ == 'vis': + self.visibility_test = True + if self.visibility_test: + '''[{'description': 'visibility of whole object is between 0 and 40%', + 'token': '1', + 'level': 'v0-40'}, + {'description': 'visibility of whole object is between 40 and 60%', + 'token': '2', + 'level': 'v40-60'}, + {'description': 'visibility of whole object is between 60 and 80%', + 'token': '3', + 'level': 'v60-80'}, + {'description': 'visibility of whole object is between 80 and 100%', + 'token': '4', + 'level': 'v80-100'}]''' + + self.gt_boxes = filter_eval_boxes_by_visibility(self.all_gt, visibility, verbose=True) + + elif type_ == 'ord': + + valid_tokens = [key for (key, value) in self.index_map.items() if value == index] + # from IPython import embed + # embed() + self.gt_boxes = filter_by_sample_token(self.all_gt, valid_tokens) + self.pred_boxes = filter_by_sample_token(self.all_preds, valid_tokens) + self.sample_tokens = self.gt_boxes.sample_tokens + + + def evaluate(self) -> Tuple[DetectionMetrics, DetectionMetricDataList]: + """ + Performs the actual evaluation. + :return: A tuple of high-level and the raw metric data. + """ + start_time = time.time() + + # ----------------------------------- + # Step 1: Accumulate metric data for all classes and distance thresholds. + # ----------------------------------- + if self.verbose: + print('Accumulating metric data...') + metric_data_list = DetectionMetricDataList() + + # print(self.cfg.dist_fcn_callable, self.cfg.dist_ths) + # self.cfg.dist_ths = [0.3] + # self.cfg.dist_fcn_callable + for class_name in self.cfg.class_names: + for dist_th in self.cfg.dist_ths: + md = accumulate(self.gt_boxes, self.pred_boxes, class_name, self.cfg.dist_fcn_callable, dist_th) + metric_data_list.set(class_name, dist_th, md) + + # ----------------------------------- + # Step 2: Calculate metrics from the data. + # ----------------------------------- + if self.verbose: + print('Calculating metrics...') + metrics = DetectionMetrics(self.cfg) + for class_name in self.cfg.class_names: + # Compute APs. + for dist_th in self.cfg.dist_ths: + metric_data = metric_data_list[(class_name, dist_th)] + ap = calc_ap(metric_data, self.cfg.min_recall, self.cfg.min_precision) + metrics.add_label_ap(class_name, dist_th, ap) + # Compute TP metrics. + for metric_name in TP_METRICS: + metric_data = metric_data_list[(class_name, self.cfg.dist_th_tp)] + if class_name in ['traffic_cone'] and metric_name in ['attr_err', 'vel_err', 'orient_err']: + tp = np.nan + elif class_name in ['barrier'] and metric_name in ['attr_err', 'vel_err']: + tp = np.nan + else: + tp = calc_tp(metric_data, self.cfg.min_recall, metric_name) + metrics.add_label_tp(class_name, metric_name, tp) + + # Compute evaluation time. + metrics.add_runtime(time.time() - start_time) + + return metrics, metric_data_list + + def render(self, metrics: DetectionMetrics, md_list: DetectionMetricDataList) -> None: + """ + Renders various PR and TP curves. + :param metrics: DetectionMetrics instance. + :param md_list: DetectionMetricDataList instance. + """ + if self.verbose: + print('Rendering PR and TP curves') + + def savepath(name): + return os.path.join(self.plot_dir, name + '.pdf') + + summary_plot(md_list, metrics, min_precision=self.cfg.min_precision, min_recall=self.cfg.min_recall, + dist_th_tp=self.cfg.dist_th_tp, savepath=savepath('summary')) + + for detection_name in self.cfg.class_names: + class_pr_curve(md_list, metrics, detection_name, self.cfg.min_precision, self.cfg.min_recall, + savepath=savepath(detection_name + '_pr')) + + class_tp_curve(md_list, metrics, detection_name, self.cfg.min_recall, self.cfg.dist_th_tp, + savepath=savepath(detection_name + '_tp')) + + for dist_th in self.cfg.dist_ths: + dist_pr_curve(md_list, metrics, dist_th, self.cfg.min_precision, self.cfg.min_recall, + savepath=savepath('dist_pr_' + str(dist_th))) + + +if __name__ == "__main__": + + # Settings. + parser = argparse.ArgumentParser(description='Evaluate nuScenes detection results.', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('result_path', type=str, help='The submission as a JSON file.') + parser.add_argument('--output_dir', type=str, default='~/nuscenes-metrics', + help='Folder to store result metrics, graphs and example visualizations.') + parser.add_argument('--eval_set', type=str, default='val', + help='Which dataset split to evaluate on, train, val or test.') + parser.add_argument('--dataroot', type=str, default='data/nuscenes', + help='Default nuScenes data directory.') + parser.add_argument('--version', type=str, default='v1.0-trainval', + help='Which version of the nuScenes dataset to evaluate on, e.g. v1.0-trainval.') + parser.add_argument('--config_path', type=str, default='', + help='Path to the configuration file.' + 'If no path given, the CVPR 2019 configuration will be used.') + parser.add_argument('--plot_examples', type=int, default=0, + help='How many example visualizations to write to disk.') + parser.add_argument('--render_curves', type=int, default=1, + help='Whether to render PR and TP curves to disk.') + parser.add_argument('--verbose', type=int, default=1, + help='Whether to print to stdout.') + args = parser.parse_args() + + result_path_ = os.path.expanduser(args.result_path) + output_dir_ = os.path.expanduser(args.output_dir) + eval_set_ = args.eval_set + dataroot_ = args.dataroot + version_ = args.version + config_path = args.config_path + plot_examples_ = args.plot_examples + render_curves_ = bool(args.render_curves) + verbose_ = bool(args.verbose) + + if config_path == '': + cfg_ = config_factory('detection_cvpr_2019') + else: + with open(config_path, 'r') as _f: + cfg_ = DetectionConfig.deserialize(json.load(_f)) + + nusc_ = NuScenes(version=version_, verbose=verbose_, dataroot=dataroot_) + nusc_eval = NuScenesEval_custom(nusc_, config=cfg_, result_path=result_path_, eval_set=eval_set_, + output_dir=output_dir_, verbose=verbose_) + for vis in ['1', '2', '3', '4']: + nusc_eval.update_gt(type_='vis', visibility=vis) + print(f'================ {vis} ===============') + nusc_eval.main(plot_examples=plot_examples_, render_curves=render_curves_) + #for index in range(1, 41): + # nusc_eval.update_gt(type_='ord', index=index) + # diff --git a/mmcv/datasets/pipelines/__init__.py b/mmcv/datasets/pipelines/__init__.py new file mode 100644 index 0000000..04e195d --- /dev/null +++ b/mmcv/datasets/pipelines/__init__.py @@ -0,0 +1,50 @@ +from .compose import Compose +from .formating import (Collect, Collect3D, DefaultFormatBundle, DefaultFormatBundle3D, + CustomDefaultFormatBundle3D, ImageToTensor, + ToDataContainer, ToTensor, Transpose, to_tensor,VADFormatBundle3D) +from .loading import (LoadAnnotations, LoadImageFromFile, LoadImageFromWebcam, + LoadMultiChannelImageFromFiles, LoadProposals, + LoadAnnotations3D, LoadImageFromFileMono3D, + LoadMultiViewImageFromFiles, LoadPointsFromFile, + LoadPointsFromMultiSweeps, NormalizePointsColor, + PointSegClassMapping, LoadAnnotations3D_E2E, CustomLoadPointsFromMultiSweeps, CustomLoadPointsFromFile) +from .test_time_aug import MultiScaleFlipAug, MultiScaleFlipAug3D +from .transforms_3d import (BackgroundPointsFilter, GlobalAlignment, + GlobalRotScaleTrans, IndoorPatchPointSample, + IndoorPointSample, ObjectNameFilter, ObjectNoise, + ObjectRangeFilter, ObjectSample, PointSample, + PointShuffle, PointsRangeFilter, + RandomDropPointsColor, RandomFlip3D, + RandomJitterPoints, VoxelBasedPointSampler, + PadMultiViewImage, NormalizeMultiviewImage, + PhotoMetricDistortionMultiViewImage, CustomCollect3D, + RandomScaleImageMultiViewImage,VADObjectRangeFilter,VADObjectNameFilter,CustomPointsRangeFilter) +from .transforms import (Albu, CutOut, Expand, MinIoURandomCrop, Normalize, + Pad, PhotoMetricDistortion, RandomCenterCropPad, + RandomCrop, RandomFlip, RandomShift, Resize, + SegRescale) +from .occflow_label import GenerateOccFlowLabels + +# __all__ = [ +# 'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer', +# 'Transpose', 'Collect', 'DefaultFormatBundle', 'LoadAnnotations', +# 'LoadImageFromFile', 'LoadImageFromWebcam', +# 'LoadMultiChannelImageFromFiles', 'LoadProposals', 'MultiScaleFlipAug', +# 'Resize', 'RandomFlip', 'Pad', 'RandomCrop', 'Normalize', 'SegRescale', +# 'MinIoURandomCrop', 'Expand', 'PhotoMetricDistortion', 'Albu', +# 'InstaBoost', 'RandomCenterCropPad', 'AutoAugment', 'CutOut', 'Shear', +# 'Rotate', 'ColorTransform', 'EqualizeTransform', 'BrightnessTransform', +# 'ContrastTransform', 'Translate', 'RandomShift', +# 'ObjectSample', 'RandomFlip3D', 'ObjectNoise', 'GlobalRotScaleTrans', +# 'PointShuffle', 'ObjectRangeFilter', 'PointsRangeFilter', 'Collect3D', +# 'LoadMultiViewImageFromFiles', 'LoadPointsFromFile', +# 'DefaultFormatBundle3D', 'DataBaseSampler', +# 'NormalizePointsColor', 'LoadAnnotations3D', 'IndoorPointSample', +# 'PointSample', 'PointSegClassMapping', 'MultiScaleFlipAug3D', +# 'LoadPointsFromMultiSweeps', 'BackgroundPointsFilter', +# 'VoxelBasedPointSampler', 'GlobalAlignment', 'IndoorPatchPointSample', +# 'LoadImageFromFileMono3D', 'ObjectNameFilter', 'RandomDropPointsColor', +# 'RandomJitterPoints', 'CustomDefaultFormatBundle3D', 'LoadAnnotations3D_E2E', +# 'GenerateOccFlowLabels', 'PadMultiViewImage', 'NormalizeMultiviewImage', +# 'PhotoMetricDistortionMultiViewImage', 'CustomCollect3D', 'RandomScaleImageMultiViewImage' +# ] diff --git a/mmcv/datasets/pipelines/compose.py b/mmcv/datasets/pipelines/compose.py new file mode 100644 index 0000000..1567530 --- /dev/null +++ b/mmcv/datasets/pipelines/compose.py @@ -0,0 +1,51 @@ +import collections + +from mmcv.utils import build_from_cfg + +from ..builder import PIPELINES + + +@PIPELINES.register_module() +class Compose: + """Compose multiple transforms sequentially. + + Args: + transforms (Sequence[dict | callable]): Sequence of transform object or + config dict to be composed. + """ + + def __init__(self, transforms): + assert isinstance(transforms, collections.abc.Sequence) + self.transforms = [] + for transform in transforms: + if isinstance(transform, dict): + transform = build_from_cfg(transform, PIPELINES) + self.transforms.append(transform) + elif callable(transform): + self.transforms.append(transform) + else: + raise TypeError('transform must be callable or a dict') + + def __call__(self, data): + """Call function to apply transforms sequentially. + + Args: + data (dict): A result dict contains the data to transform. + + Returns: + dict: Transformed data. + """ + + for t in self.transforms: + data = t(data) + if data is None: + return None + return data + + def __repr__(self): + format_string = self.__class__.__name__ + '(' + for t in self.transforms: + format_string += '\n' + format_string += f' {t}' + format_string += '\n)' + return format_string diff --git a/mmcv/datasets/pipelines/data_augment_utils.py b/mmcv/datasets/pipelines/data_augment_utils.py new file mode 100644 index 0000000..231ab80 --- /dev/null +++ b/mmcv/datasets/pipelines/data_augment_utils.py @@ -0,0 +1,409 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numba +import numpy as np +import warnings +from numba.errors import NumbaPerformanceWarning + +from mmcv.core.bbox import box_np_ops + +warnings.filterwarnings('ignore', category=NumbaPerformanceWarning) + + +@numba.njit +def _rotation_box2d_jit_(corners, angle, rot_mat_T): + """Rotate 2D boxes. + + Args: + corners (np.ndarray): Corners of boxes. + angle (float): Rotation angle. + rot_mat_T (np.ndarray): Transposed rotation matrix. + """ + rot_sin = np.sin(angle) + rot_cos = np.cos(angle) + rot_mat_T[0, 0] = rot_cos + rot_mat_T[0, 1] = -rot_sin + rot_mat_T[1, 0] = rot_sin + rot_mat_T[1, 1] = rot_cos + corners[:] = corners @ rot_mat_T + + +@numba.jit(nopython=True) +def box_collision_test(boxes, qboxes, clockwise=True): + """Box collision test. + + Args: + boxes (np.ndarray): Corners of current boxes. + qboxes (np.ndarray): Boxes to be avoid colliding. + clockwise (bool): Whether the corners are in clockwise order. + Default: True. + """ + N = boxes.shape[0] + K = qboxes.shape[0] + ret = np.zeros((N, K), dtype=np.bool_) + slices = np.array([1, 2, 3, 0]) + lines_boxes = np.stack((boxes, boxes[:, slices, :]), + axis=2) # [N, 4, 2(line), 2(xy)] + lines_qboxes = np.stack((qboxes, qboxes[:, slices, :]), axis=2) + # vec = np.zeros((2,), dtype=boxes.dtype) + boxes_standup = box_np_ops.corner_to_standup_nd_jit(boxes) + qboxes_standup = box_np_ops.corner_to_standup_nd_jit(qboxes) + for i in range(N): + for j in range(K): + # calculate standup first + iw = ( + min(boxes_standup[i, 2], qboxes_standup[j, 2]) - + max(boxes_standup[i, 0], qboxes_standup[j, 0])) + if iw > 0: + ih = ( + min(boxes_standup[i, 3], qboxes_standup[j, 3]) - + max(boxes_standup[i, 1], qboxes_standup[j, 1])) + if ih > 0: + for k in range(4): + for box_l in range(4): + A = lines_boxes[i, k, 0] + B = lines_boxes[i, k, 1] + C = lines_qboxes[j, box_l, 0] + D = lines_qboxes[j, box_l, 1] + acd = (D[1] - A[1]) * (C[0] - + A[0]) > (C[1] - A[1]) * ( + D[0] - A[0]) + bcd = (D[1] - B[1]) * (C[0] - + B[0]) > (C[1] - B[1]) * ( + D[0] - B[0]) + if acd != bcd: + abc = (C[1] - A[1]) * (B[0] - A[0]) > ( + B[1] - A[1]) * ( + C[0] - A[0]) + abd = (D[1] - A[1]) * (B[0] - A[0]) > ( + B[1] - A[1]) * ( + D[0] - A[0]) + if abc != abd: + ret[i, j] = True # collision. + break + if ret[i, j] is True: + break + if ret[i, j] is False: + # now check complete overlap. + # box overlap qbox: + box_overlap_qbox = True + for box_l in range(4): # point l in qboxes + for k in range(4): # corner k in boxes + vec = boxes[i, k] - boxes[i, (k + 1) % 4] + if clockwise: + vec = -vec + cross = vec[1] * ( + boxes[i, k, 0] - qboxes[j, box_l, 0]) + cross -= vec[0] * ( + boxes[i, k, 1] - qboxes[j, box_l, 1]) + if cross >= 0: + box_overlap_qbox = False + break + if box_overlap_qbox is False: + break + + if box_overlap_qbox is False: + qbox_overlap_box = True + for box_l in range(4): # point box_l in boxes + for k in range(4): # corner k in qboxes + vec = qboxes[j, k] - qboxes[j, (k + 1) % 4] + if clockwise: + vec = -vec + cross = vec[1] * ( + qboxes[j, k, 0] - boxes[i, box_l, 0]) + cross -= vec[0] * ( + qboxes[j, k, 1] - boxes[i, box_l, 1]) + if cross >= 0: # + qbox_overlap_box = False + break + if qbox_overlap_box is False: + break + if qbox_overlap_box: + ret[i, j] = True # collision. + else: + ret[i, j] = True # collision. + return ret + + +@numba.njit +def noise_per_box(boxes, valid_mask, loc_noises, rot_noises): + """Add noise to every box (only on the horizontal plane). + + Args: + boxes (np.ndarray): Input boxes with shape (N, 5). + valid_mask (np.ndarray): Mask to indicate which boxes are valid + with shape (N). + loc_noises (np.ndarray): Location noises with shape (N, M, 3). + rot_noises (np.ndarray): Rotation noises with shape (N, M). + + Returns: + np.ndarray: Mask to indicate whether the noise is + added successfully (pass the collision test). + """ + num_boxes = boxes.shape[0] + num_tests = loc_noises.shape[1] + box_corners = box_np_ops.box2d_to_corner_jit(boxes) + current_corners = np.zeros((4, 2), dtype=boxes.dtype) + rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype) + success_mask = -np.ones((num_boxes, ), dtype=np.int64) + # print(valid_mask) + for i in range(num_boxes): + if valid_mask[i]: + for j in range(num_tests): + current_corners[:] = box_corners[i] + current_corners -= boxes[i, :2] + _rotation_box2d_jit_(current_corners, rot_noises[i, j], + rot_mat_T) + current_corners += boxes[i, :2] + loc_noises[i, j, :2] + coll_mat = box_collision_test( + current_corners.reshape(1, 4, 2), box_corners) + coll_mat[0, i] = False + # print(coll_mat) + if not coll_mat.any(): + success_mask[i] = j + box_corners[i] = current_corners + break + return success_mask + + +@numba.njit +def noise_per_box_v2_(boxes, valid_mask, loc_noises, rot_noises, + global_rot_noises): + """Add noise to every box (only on the horizontal plane). Version 2 used + when enable global rotations. + + Args: + boxes (np.ndarray): Input boxes with shape (N, 5). + valid_mask (np.ndarray): Mask to indicate which boxes are valid + with shape (N). + loc_noises (np.ndarray): Location noises with shape (N, M, 3). + rot_noises (np.ndarray): Rotation noises with shape (N, M). + + Returns: + np.ndarray: Mask to indicate whether the noise is + added successfully (pass the collision test). + """ + num_boxes = boxes.shape[0] + num_tests = loc_noises.shape[1] + box_corners = box_np_ops.box2d_to_corner_jit(boxes) + current_corners = np.zeros((4, 2), dtype=boxes.dtype) + current_box = np.zeros((1, 5), dtype=boxes.dtype) + rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype) + dst_pos = np.zeros((2, ), dtype=boxes.dtype) + success_mask = -np.ones((num_boxes, ), dtype=np.int64) + corners_norm = np.zeros((4, 2), dtype=boxes.dtype) + corners_norm[1, 1] = 1.0 + corners_norm[2] = 1.0 + corners_norm[3, 0] = 1.0 + corners_norm -= np.array([0.5, 0.5], dtype=boxes.dtype) + corners_norm = corners_norm.reshape(4, 2) + for i in range(num_boxes): + if valid_mask[i]: + for j in range(num_tests): + current_box[0, :] = boxes[i] + current_radius = np.sqrt(boxes[i, 0]**2 + boxes[i, 1]**2) + current_grot = np.arctan2(boxes[i, 0], boxes[i, 1]) + dst_grot = current_grot + global_rot_noises[i, j] + dst_pos[0] = current_radius * np.sin(dst_grot) + dst_pos[1] = current_radius * np.cos(dst_grot) + current_box[0, :2] = dst_pos + current_box[0, -1] += (dst_grot - current_grot) + + rot_sin = np.sin(current_box[0, -1]) + rot_cos = np.cos(current_box[0, -1]) + rot_mat_T[0, 0] = rot_cos + rot_mat_T[0, 1] = -rot_sin + rot_mat_T[1, 0] = rot_sin + rot_mat_T[1, 1] = rot_cos + current_corners[:] = current_box[ + 0, 2:4] * corners_norm @ rot_mat_T + current_box[0, :2] + current_corners -= current_box[0, :2] + _rotation_box2d_jit_(current_corners, rot_noises[i, j], + rot_mat_T) + current_corners += current_box[0, :2] + loc_noises[i, j, :2] + coll_mat = box_collision_test( + current_corners.reshape(1, 4, 2), box_corners) + coll_mat[0, i] = False + if not coll_mat.any(): + success_mask[i] = j + box_corners[i] = current_corners + loc_noises[i, j, :2] += (dst_pos - boxes[i, :2]) + rot_noises[i, j] += (dst_grot - current_grot) + break + return success_mask + + +def _select_transform(transform, indices): + """Select transform. + + Args: + transform (np.ndarray): Transforms to select from. + indices (np.ndarray): Mask to indicate which transform to select. + + Returns: + np.ndarray: Selected transforms. + """ + result = np.zeros((transform.shape[0], *transform.shape[2:]), + dtype=transform.dtype) + for i in range(transform.shape[0]): + if indices[i] != -1: + result[i] = transform[i, indices[i]] + return result + + +@numba.njit +def _rotation_matrix_3d_(rot_mat_T, angle, axis): + """Get the 3D rotation matrix. + + Args: + rot_mat_T (np.ndarray): Transposed rotation matrix. + angle (float): Rotation angle. + axis (int): Rotation axis. + """ + rot_sin = np.sin(angle) + rot_cos = np.cos(angle) + rot_mat_T[:] = np.eye(3) + if axis == 1: + rot_mat_T[0, 0] = rot_cos + rot_mat_T[0, 2] = -rot_sin + rot_mat_T[2, 0] = rot_sin + rot_mat_T[2, 2] = rot_cos + elif axis == 2 or axis == -1: + rot_mat_T[0, 0] = rot_cos + rot_mat_T[0, 1] = -rot_sin + rot_mat_T[1, 0] = rot_sin + rot_mat_T[1, 1] = rot_cos + elif axis == 0: + rot_mat_T[1, 1] = rot_cos + rot_mat_T[1, 2] = -rot_sin + rot_mat_T[2, 1] = rot_sin + rot_mat_T[2, 2] = rot_cos + + +@numba.njit +def points_transform_(points, centers, point_masks, loc_transform, + rot_transform, valid_mask): + """Apply transforms to points and box centers. + + Args: + points (np.ndarray): Input points. + centers (np.ndarray): Input box centers. + point_masks (np.ndarray): Mask to indicate which points need + to be transformed. + loc_transform (np.ndarray): Location transform to be applied. + rot_transform (np.ndarray): Rotation transform to be applied. + valid_mask (np.ndarray): Mask to indicate which boxes are valid. + """ + num_box = centers.shape[0] + num_points = points.shape[0] + rot_mat_T = np.zeros((num_box, 3, 3), dtype=points.dtype) + for i in range(num_box): + _rotation_matrix_3d_(rot_mat_T[i], rot_transform[i], 2) + for i in range(num_points): + for j in range(num_box): + if valid_mask[j]: + if point_masks[i, j] == 1: + points[i, :3] -= centers[j, :3] + points[i:i + 1, :3] = points[i:i + 1, :3] @ rot_mat_T[j] + points[i, :3] += centers[j, :3] + points[i, :3] += loc_transform[j] + break # only apply first box's transform + + +@numba.njit +def box3d_transform_(boxes, loc_transform, rot_transform, valid_mask): + """Transform 3D boxes. + + Args: + boxes (np.ndarray): 3D boxes to be transformed. + loc_transform (np.ndarray): Location transform to be applied. + rot_transform (np.ndarray): Rotation transform to be applied. + valid_mask (np.ndarray | None): Mask to indicate which boxes are valid. + """ + num_box = boxes.shape[0] + for i in range(num_box): + if valid_mask[i]: + boxes[i, :3] += loc_transform[i] + boxes[i, 6] += rot_transform[i] + + +def noise_per_object_v3_(gt_boxes, + points=None, + valid_mask=None, + rotation_perturb=np.pi / 4, + center_noise_std=1.0, + global_random_rot_range=np.pi / 4, + num_try=100): + """Random rotate or remove each groundtruth independently. use kitti viewer + to test this function points_transform_ + + Args: + gt_boxes (np.ndarray): Ground truth boxes with shape (N, 7). + points (np.ndarray | None): Input point cloud with shape (M, 4). + Default: None. + valid_mask (np.ndarray | None): Mask to indicate which boxes are valid. + Default: None. + rotation_perturb (float): Rotation perturbation. Default: pi / 4. + center_noise_std (float): Center noise standard deviation. + Default: 1.0. + global_random_rot_range (float): Global random rotation range. + Default: pi/4. + num_try (int): Number of try. Default: 100. + """ + num_boxes = gt_boxes.shape[0] + if not isinstance(rotation_perturb, (list, tuple, np.ndarray)): + rotation_perturb = [-rotation_perturb, rotation_perturb] + if not isinstance(global_random_rot_range, (list, tuple, np.ndarray)): + global_random_rot_range = [ + -global_random_rot_range, global_random_rot_range + ] + enable_grot = np.abs(global_random_rot_range[0] - + global_random_rot_range[1]) >= 1e-3 + + if not isinstance(center_noise_std, (list, tuple, np.ndarray)): + center_noise_std = [ + center_noise_std, center_noise_std, center_noise_std + ] + if valid_mask is None: + valid_mask = np.ones((num_boxes, ), dtype=np.bool_) + center_noise_std = np.array(center_noise_std, dtype=gt_boxes.dtype) + + loc_noises = np.random.normal( + scale=center_noise_std, size=[num_boxes, num_try, 3]) + rot_noises = np.random.uniform( + rotation_perturb[0], rotation_perturb[1], size=[num_boxes, num_try]) + gt_grots = np.arctan2(gt_boxes[:, 0], gt_boxes[:, 1]) + grot_lowers = global_random_rot_range[0] - gt_grots + grot_uppers = global_random_rot_range[1] - gt_grots + global_rot_noises = np.random.uniform( + grot_lowers[..., np.newaxis], + grot_uppers[..., np.newaxis], + size=[num_boxes, num_try]) + + origin = (0.5, 0.5, 0) + gt_box_corners = box_np_ops.center_to_corner_box3d( + gt_boxes[:, :3], + gt_boxes[:, 3:6], + gt_boxes[:, 6], + origin=origin, + axis=2) + + # TODO: rewrite this noise box function? + if not enable_grot: + selected_noise = noise_per_box(gt_boxes[:, [0, 1, 3, 4, 6]], + valid_mask, loc_noises, rot_noises) + else: + selected_noise = noise_per_box_v2_(gt_boxes[:, [0, 1, 3, 4, 6]], + valid_mask, loc_noises, rot_noises, + global_rot_noises) + + loc_transforms = _select_transform(loc_noises, selected_noise) + rot_transforms = _select_transform(rot_noises, selected_noise) + surfaces = box_np_ops.corner_to_surfaces_3d_jit(gt_box_corners) + if points is not None: + # TODO: replace this points_in_convex function by my tools? + point_masks = box_np_ops.points_in_convex_polygon_3d_jit( + points[:, :3], surfaces) + points_transform_(points, gt_boxes[:, :3], point_masks, loc_transforms, + rot_transforms, valid_mask) + + box3d_transform_(gt_boxes, loc_transforms, rot_transforms, valid_mask) diff --git a/mmcv/datasets/pipelines/formating.py b/mmcv/datasets/pipelines/formating.py new file mode 100644 index 0000000..a7b3e61 --- /dev/null +++ b/mmcv/datasets/pipelines/formating.py @@ -0,0 +1,700 @@ +from collections.abc import Sequence + +import numpy as np +import torch +from mmcv.parallel import DataContainer as DC + +from mmcv.core.bbox.structures.base_box3d import BaseInstance3DBoxes +from mmcv.core.points import BasePoints +from mmcv.utils import is_str +from ..builder import PIPELINES + + +def to_tensor(data): + """Convert objects of various python types to :obj:`torch.Tensor`. + + Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`, + :class:`Sequence`, :class:`int` and :class:`float`. + + Args: + data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to + be converted. + """ + + if isinstance(data, torch.Tensor): + return data + elif isinstance(data, np.ndarray): + return torch.from_numpy(data) + elif isinstance(data, Sequence) and not is_str(data): + return torch.tensor(data) + elif isinstance(data, int): + return torch.LongTensor([data]) + elif isinstance(data, float): + return torch.FloatTensor([data]) + else: + raise TypeError(f'type {type(data)} cannot be converted to tensor.') + + +@PIPELINES.register_module() +class ToTensor: + """Convert some results to :obj:`torch.Tensor` by given keys. + + Args: + keys (Sequence[str]): Keys that need to be converted to Tensor. + """ + + def __init__(self, keys): + self.keys = keys + + def __call__(self, results): + """Call function to convert data in results to :obj:`torch.Tensor`. + + Args: + results (dict): Result dict contains the data to convert. + + Returns: + dict: The result dict contains the data converted + to :obj:`torch.Tensor`. + """ + for key in self.keys: + results[key] = to_tensor(results[key]) + return results + + def __repr__(self): + return self.__class__.__name__ + f'(keys={self.keys})' + + +@PIPELINES.register_module() +class ImageToTensor: + """Convert image to :obj:`torch.Tensor` by given keys. + + The dimension order of input image is (H, W, C). The pipeline will convert + it to (C, H, W). If only 2 dimension (H, W) is given, the output would be + (1, H, W). + + Args: + keys (Sequence[str]): Key of images to be converted to Tensor. + """ + + def __init__(self, keys): + self.keys = keys + + def __call__(self, results): + """Call function to convert image in results to :obj:`torch.Tensor` and + transpose the channel order. + + Args: + results (dict): Result dict contains the image data to convert. + + Returns: + dict: The result dict contains the image converted + to :obj:`torch.Tensor` and transposed to (C, H, W) order. + """ + for key in self.keys: + img = results[key] + if len(img.shape) < 3: + img = np.expand_dims(img, -1) + results[key] = to_tensor(img.transpose(2, 0, 1)) + return results + + def __repr__(self): + return self.__class__.__name__ + f'(keys={self.keys})' + + +@PIPELINES.register_module() +class Transpose: + """Transpose some results by given keys. + + Args: + keys (Sequence[str]): Keys of results to be transposed. + order (Sequence[int]): Order of transpose. + """ + + def __init__(self, keys, order): + self.keys = keys + self.order = order + + def __call__(self, results): + """Call function to transpose the channel order of data in results. + + Args: + results (dict): Result dict contains the data to transpose. + + Returns: + dict: The result dict contains the data transposed to \ + ``self.order``. + """ + for key in self.keys: + results[key] = results[key].transpose(self.order) + return results + + def __repr__(self): + return self.__class__.__name__ + \ + f'(keys={self.keys}, order={self.order})' + + +@PIPELINES.register_module() +class ToDataContainer: + """Convert results to :obj:`mmcv.DataContainer` by given fields. + + Args: + fields (Sequence[dict]): Each field is a dict like + ``dict(key='xxx', **kwargs)``. The ``key`` in result will + be converted to :obj:`mmcv.DataContainer` with ``**kwargs``. + Default: ``(dict(key='img', stack=True), dict(key='gt_bboxes'), + dict(key='gt_labels'))``. + """ + + def __init__(self, + fields=(dict(key='img', stack=True), dict(key='gt_bboxes'), + dict(key='gt_labels'))): + self.fields = fields + + def __call__(self, results): + """Call function to convert data in results to + :obj:`mmcv.DataContainer`. + + Args: + results (dict): Result dict contains the data to convert. + + Returns: + dict: The result dict contains the data converted to \ + :obj:`mmcv.DataContainer`. + """ + + for field in self.fields: + field = field.copy() + key = field.pop('key') + results[key] = DC(results[key], **field) + return results + + def __repr__(self): + return self.__class__.__name__ + f'(fields={self.fields})' + + +@PIPELINES.register_module() +class DefaultFormatBundle: + """Default formatting bundle. + + It simplifies the pipeline of formatting common fields, including "img", + "proposals", "gt_bboxes", "gt_labels", "gt_masks" and "gt_semantic_seg". + These fields are formatted as follows. + + - img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True) + - proposals: (1)to tensor, (2)to DataContainer + - gt_bboxes: (1)to tensor, (2)to DataContainer + - gt_bboxes_ignore: (1)to tensor, (2)to DataContainer + - gt_labels: (1)to tensor, (2)to DataContainer + - gt_masks: (1)to tensor, (2)to DataContainer (cpu_only=True) + - gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor, \ + (3)to DataContainer (stack=True) + """ + + def __call__(self, results): + """Call function to transform and format common fields in results. + + Args: + results (dict): Result dict contains the data to convert. + + Returns: + dict: The result dict contains the data that is formatted with \ + default bundle. + """ + + if 'img' in results: + img = results['img'] + # add default meta keys + results = self._add_default_meta_keys(results) + if len(img.shape) < 3: + img = np.expand_dims(img, -1) + img = np.ascontiguousarray(img.transpose(2, 0, 1)) + results['img'] = DC(to_tensor(img), stack=True) + for key in ['proposals', 'gt_bboxes', 'gt_bboxes_ignore', 'gt_labels']: + if key not in results: + continue + results[key] = DC(to_tensor(results[key])) + if 'gt_masks' in results: + results['gt_masks'] = DC(results['gt_masks'], cpu_only=True) + if 'gt_semantic_seg' in results: + results['gt_semantic_seg'] = DC( + to_tensor(results['gt_semantic_seg'][None, ...]), stack=True) + return results + + def _add_default_meta_keys(self, results): + """Add default meta keys. + + We set default meta keys including `pad_shape`, `scale_factor` and + `img_norm_cfg` to avoid the case where no `Resize`, `Normalize` and + `Pad` are implemented during the whole pipeline. + + Args: + results (dict): Result dict contains the data to convert. + + Returns: + results (dict): Updated result dict contains the data to convert. + """ + img = results['img'] + results.setdefault('pad_shape', img.shape) + results.setdefault('scale_factor', 1.0) + num_channels = 1 if len(img.shape) < 3 else img.shape[2] + results.setdefault( + 'img_norm_cfg', + dict( + mean=np.zeros(num_channels, dtype=np.float32), + std=np.ones(num_channels, dtype=np.float32), + to_rgb=False)) + return results + + def __repr__(self): + return self.__class__.__name__ + + +@PIPELINES.register_module() +class Collect: + """Collect data from the loader relevant to the specific task. + + This is usually the last stage of the data loader pipeline. Typically keys + is set to some subset of "img", "proposals", "gt_bboxes", + "gt_bboxes_ignore", "gt_labels", and/or "gt_masks". + + The "img_meta" item is always populated. The contents of the "img_meta" + dictionary depends on "meta_keys". By default this includes: + + - "img_shape": shape of the image input to the network as a tuple \ + (h, w, c). Note that images may be zero padded on the \ + bottom/right if the batch tensor is larger than this shape. + + - "scale_factor": a float indicating the preprocessing scale + + - "flip": a boolean indicating if image flip transform was used + + - "filename": path to the image file + + - "ori_shape": original shape of the image as a tuple (h, w, c) + + - "pad_shape": image shape after padding + + - "img_norm_cfg": a dict of normalization information: + + - mean - per channel mean subtraction + - std - per channel std divisor + - to_rgb - bool indicating if bgr was converted to rgb + + Args: + keys (Sequence[str]): Keys of results to be collected in ``data``. + meta_keys (Sequence[str], optional): Meta keys to be converted to + ``mmcv.DataContainer`` and collected in ``data[img_metas]``. + Default: ``('filename', 'ori_filename', 'ori_shape', 'img_shape', + 'pad_shape', 'scale_factor', 'flip', 'flip_direction', + 'img_norm_cfg')`` + """ + + def __init__(self, + keys, + meta_keys=('filename', 'ori_filename', 'ori_shape', + 'img_shape', 'pad_shape', 'scale_factor', 'flip', + 'flip_direction', 'img_norm_cfg')): + self.keys = keys + self.meta_keys = meta_keys + + def __call__(self, results): + """Call function to collect keys in results. The keys in ``meta_keys`` + will be converted to :obj:mmcv.DataContainer. + + Args: + results (dict): Result dict contains the data to collect. + + Returns: + dict: The result dict contains the following keys + + - keys in``self.keys`` + - ``img_metas`` + """ + + data = {} + img_meta = {} + for key in self.meta_keys: + img_meta[key] = results[key] + data['img_metas'] = DC(img_meta, cpu_only=True) + for key in self.keys: + data[key] = results[key] + return data + + def __repr__(self): + return self.__class__.__name__ + \ + f'(keys={self.keys}, meta_keys={self.meta_keys})' + + +@PIPELINES.register_module() +class WrapFieldsToLists: + """Wrap fields of the data dictionary into lists for evaluation. + + This class can be used as a last step of a test or validation + pipeline for single image evaluation or inference. + + Example: + >>> test_pipeline = [ + >>> dict(type='LoadImageFromFile'), + >>> dict(type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + >>> dict(type='Pad', size_divisor=32), + >>> dict(type='ImageToTensor', keys=['img']), + >>> dict(type='Collect', keys=['img']), + >>> dict(type='WrapFieldsToLists') + >>> ] + """ + + def __call__(self, results): + """Call function to wrap fields into lists. + + Args: + results (dict): Result dict contains the data to wrap. + + Returns: + dict: The result dict where value of ``self.keys`` are wrapped \ + into list. + """ + + # Wrap dict fields into lists + for key, val in results.items(): + results[key] = [val] + return results + + def __repr__(self): + return f'{self.__class__.__name__}()' + + +PIPELINES._module_dict.pop('DefaultFormatBundle') + +@PIPELINES.register_module() +class DefaultFormatBundle(object): + """Default formatting bundle. + + It simplifies the pipeline of formatting common fields, including "img", + "proposals", "gt_bboxes", "gt_labels", "gt_masks" and "gt_semantic_seg". + These fields are formatted as follows. + + - img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True) + - proposals: (1)to tensor, (2)to DataContainer + - gt_bboxes: (1)to tensor, (2)to DataContainer + - gt_bboxes_ignore: (1)to tensor, (2)to DataContainer + - gt_labels: (1)to tensor, (2)to DataContainer + - gt_masks: (1)to tensor, (2)to DataContainer (cpu_only=True) + - gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor, \ + (3)to DataContainer (stack=True) + """ + + def __init__(self, ): + return + + def __call__(self, results): + """Call function to transform and format common fields in results. + + Args: + results (dict): Result dict contains the data to convert. + + Returns: + dict: The result dict contains the data that is formatted with + default bundle. + """ + if 'img' in results: + if isinstance(results['img'], list): + # process multiple imgs in single frame + imgs = [img.transpose(2, 0, 1) for img in results['img']] + imgs = np.ascontiguousarray(np.stack(imgs, axis=0)) + results['img'] = DC(to_tensor(imgs), stack=True) + else: + img = np.ascontiguousarray(results['img'].transpose(2, 0, 1)) + results['img'] = DC(to_tensor(img), stack=True) + for key in [ + 'proposals', 'gt_bboxes', 'gt_bboxes_ignore', 'gt_labels', + 'gt_labels_3d', 'attr_labels', 'pts_instance_mask', + 'pts_semantic_mask', 'centers2d', 'depths' + ]: + if key not in results: + continue + if isinstance(results[key], list): + results[key] = DC([to_tensor(res) for res in results[key]]) + else: + results[key] = DC(to_tensor(results[key])) + if 'gt_bboxes_3d' in results: + if isinstance(results['gt_bboxes_3d'], BaseInstance3DBoxes): + results['gt_bboxes_3d'] = DC( + results['gt_bboxes_3d'], cpu_only=True) + else: + results['gt_bboxes_3d'] = DC( + to_tensor(results['gt_bboxes_3d'])) + + if 'gt_masks' in results: + results['gt_masks'] = DC(results['gt_masks'], cpu_only=True) + if 'gt_semantic_seg' in results: + results['gt_semantic_seg'] = DC( + to_tensor(results['gt_semantic_seg'][None, ...]), stack=True) + + return results + + def __repr__(self): + return self.__class__.__name__ + + +@PIPELINES.register_module() +class Collect3D(object): + """Collect data from the loader relevant to the specific task. + + This is usually the last stage of the data loader pipeline. Typically keys + is set to some subset of "img", "proposals", "gt_bboxes", + "gt_bboxes_ignore", "gt_labels", and/or "gt_masks". + + The "img_meta" item is always populated. The contents of the "img_meta" + dictionary depends on "meta_keys". By default this includes: + + - 'img_shape': shape of the image input to the network as a tuple \ + (h, w, c). Note that images may be zero padded on the \ + bottom/right if the batch tensor is larger than this shape. + - 'scale_factor': a float indicating the preprocessing scale + - 'flip': a boolean indicating if image flip transform was used + - 'filename': path to the image file + - 'ori_shape': original shape of the image as a tuple (h, w, c) + - 'pad_shape': image shape after padding + - 'lidar2img': transform from lidar to image + - 'depth2img': transform from depth to image + - 'cam2img': transform from camera to image + - 'pcd_horizontal_flip': a boolean indicating if point cloud is \ + flipped horizontally + - 'pcd_vertical_flip': a boolean indicating if point cloud is \ + flipped vertically + - 'box_mode_3d': 3D box mode + - 'box_type_3d': 3D box type + - 'img_norm_cfg': a dict of normalization information: + - mean: per channel mean subtraction + - std: per channel std divisor + - to_rgb: bool indicating if bgr was converted to rgb + - 'pcd_trans': point cloud transformations + - 'sample_idx': sample index + - 'pcd_scale_factor': point cloud scale factor + - 'pcd_rotation': rotation applied to point cloud + - 'pts_filename': path to point cloud file. + + Args: + keys (Sequence[str]): Keys of results to be collected in ``data``. + meta_keys (Sequence[str], optional): Meta keys to be converted to + ``mmcv.DataContainer`` and collected in ``data[img_metas]``. + Default: ('filename', 'ori_shape', 'img_shape', 'lidar2img', + 'depth2img', 'cam2img', 'pad_shape', 'scale_factor', 'flip', + 'pcd_horizontal_flip', 'pcd_vertical_flip', 'box_mode_3d', + 'box_type_3d', 'img_norm_cfg', 'pcd_trans', + 'sample_idx', 'pcd_scale_factor', 'pcd_rotation', 'pts_filename') + """ + + def __init__(self, + keys, + meta_keys=('filename', 'ori_shape', 'img_shape', 'lidar2img', + 'depth2img', 'cam2img', 'pad_shape', + 'scale_factor', 'flip', 'pcd_horizontal_flip', + 'pcd_vertical_flip', 'box_mode_3d', 'box_type_3d', + 'img_norm_cfg', 'pcd_trans', 'sample_idx', + 'pcd_scale_factor', 'pcd_rotation', 'pts_filename', + 'transformation_3d_flow')): + self.keys = keys + self.meta_keys = meta_keys + + def __call__(self, results): + """Call function to collect keys in results. The keys in ``meta_keys`` + will be converted to :obj:`mmcv.DataContainer`. + + Args: + results (dict): Result dict contains the data to collect. + + Returns: + dict: The result dict contains the following keys + - keys in ``self.keys`` + - ``img_metas`` + """ + data = {} + img_metas = {} + for key in self.meta_keys: + if key in results: + img_metas[key] = results[key] + + data['img_metas'] = DC(img_metas, cpu_only=True) + for key in self.keys: + data[key] = results[key] + return data + + def __repr__(self): + """str: Return a string that describes the module.""" + return self.__class__.__name__ + \ + f'(keys={self.keys}, meta_keys={self.meta_keys})' + + +@PIPELINES.register_module() +class DefaultFormatBundle3D(DefaultFormatBundle): + """Default formatting bundle. + + It simplifies the pipeline of formatting common fields for voxels, + including "proposals", "gt_bboxes", "gt_labels", "gt_masks" and + "gt_semantic_seg". + These fields are formatted as follows. + + - img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True) + - proposals: (1)to tensor, (2)to DataContainer + - gt_bboxes: (1)to tensor, (2)to DataContainer + - gt_bboxes_ignore: (1)to tensor, (2)to DataContainer + - gt_labels: (1)to tensor, (2)to DataContainer + """ + + def __init__(self, class_names, with_gt=True, with_label=True): + super(DefaultFormatBundle3D, self).__init__() + self.class_names = class_names + self.with_gt = with_gt + self.with_label = with_label + + def __call__(self, results): + """Call function to transform and format common fields in results. + + Args: + results (dict): Result dict contains the data to convert. + + Returns: + dict: The result dict contains the data that is formatted with + default bundle. + """ + # Format 3D data + if 'points' in results: + assert isinstance(results['points'], BasePoints) + results['points'] = DC(results['points'].tensor) + + for key in ['voxels', 'coors', 'voxel_centers', 'num_points']: + if key not in results: + continue + results[key] = DC(to_tensor(results[key]), stack=False) + + if self.with_gt: + # Clean GT bboxes in the final + if 'gt_bboxes_3d_mask' in results: + gt_bboxes_3d_mask = results['gt_bboxes_3d_mask'] + results['gt_bboxes_3d'] = results['gt_bboxes_3d'][ + gt_bboxes_3d_mask] + if 'gt_names_3d' in results: + results['gt_names_3d'] = results['gt_names_3d'][ + gt_bboxes_3d_mask] + if 'centers2d' in results: + results['centers2d'] = results['centers2d'][ + gt_bboxes_3d_mask] + if 'depths' in results: + results['depths'] = results['depths'][gt_bboxes_3d_mask] + if 'gt_bboxes_mask' in results: + gt_bboxes_mask = results['gt_bboxes_mask'] + if 'gt_bboxes' in results: + results['gt_bboxes'] = results['gt_bboxes'][gt_bboxes_mask] + results['gt_names'] = results['gt_names'][gt_bboxes_mask] + if self.with_label: + if 'gt_names' in results and len(results['gt_names']) == 0: + results['gt_labels'] = np.array([], dtype=np.int64) + results['attr_labels'] = np.array([], dtype=np.int64) + elif 'gt_names' in results and isinstance( + results['gt_names'][0], list): + # gt_labels might be a list of list in multi-view setting + results['gt_labels'] = [ + np.array([self.class_names.index(n) for n in res], + dtype=np.int64) for res in results['gt_names'] + ] + elif 'gt_names' in results: + results['gt_labels'] = np.array([ + self.class_names.index(n) for n in results['gt_names'] + ], + dtype=np.int64) + # we still assume one pipeline for one frame LiDAR + # thus, the 3D name is list[string] + if 'gt_names_3d' in results: + results['gt_labels_3d'] = np.array([ + self.class_names.index(n) + for n in results['gt_names_3d'] + ], + dtype=np.int64) + results = super(DefaultFormatBundle3D, self).__call__(results) + return results + + def __repr__(self): + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + repr_str += f'(class_names={self.class_names}, ' + repr_str += f'with_gt={self.with_gt}, with_label={self.with_label})' + return repr_str + +@PIPELINES.register_module() +class CustomDefaultFormatBundle3D(DefaultFormatBundle3D): + """Default formatting bundle. + It simplifies the pipeline of formatting common fields for voxels, + including "proposals", "gt_bboxes", "gt_labels", "gt_masks" and + "gt_semantic_seg". + These fields are formatted as follows. + - img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True) + - proposals: (1)to tensor, (2)to DataContainer + - gt_bboxes: (1)to tensor, (2)to DataContainer + - gt_bboxes_ignore: (1)to tensor, (2)to DataContainer + - gt_labels: (1)to tensor, (2)to DataContainer + """ + + def __call__(self, results): + """Call function to transform and format common fields in results. + Args: + results (dict): Result dict contains the data to convert. + Returns: + dict: The result dict contains the data that is formatted with + default bundle. + """ + # Format 3D data + results = super(CustomDefaultFormatBundle3D, self).__call__(results) + results['gt_map_masks'] = DC( + to_tensor(results['gt_map_masks']), stack=True) + + return results + +@PIPELINES.register_module() +class VADFormatBundle3D(DefaultFormatBundle3D): + """Default formatting bundle. + It simplifies the pipeline of formatting common fields for voxels, + including "proposals", "gt_bboxes", "gt_labels", "gt_masks" and + "gt_semantic_seg". + These fields are formatted as follows. + - img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True) + - proposals: (1)to tensor, (2)to DataContainer + - gt_bboxes: (1)to tensor, (2)to DataContainer + - gt_bboxes_ignore: (1)to tensor, (2)to DataContainer + - gt_labels: (1)to tensor, (2)to DataContainer + """ + def __init__(self, class_names, with_gt=True, with_label=True, with_ego=True): + super(VADFormatBundle3D, self).__init__(class_names, with_gt, with_label) + self.with_ego = with_ego + + + def __call__(self, results): + """Call function to transform and format common fields in results. + Args: + results (dict): Result dict contains the data to convert. + Returns: + dict: The result dict contains the data that is formatted with + default bundle. + """ + # Format 3D data + results = super(VADFormatBundle3D, self).__call__(results) + # results['gt_map_masks'] = DC(to_tensor(results['gt_map_masks']), stack=True) + if self.with_ego: + if 'ego_his_trajs' in results: + results['ego_his_trajs'] = DC(to_tensor(results['ego_his_trajs'][None, ...]), stack=True) + if 'ego_fut_trajs' in results: + results['ego_fut_trajs'] = DC(to_tensor(results['ego_fut_trajs'][None, ...]), stack=True) + if 'ego_fut_masks' in results: + results['ego_fut_masks'] = DC(to_tensor(results['ego_fut_masks'][None, None, ...]), stack=True) + if 'ego_fut_cmd' in results: + results['ego_fut_cmd'] = DC(to_tensor(results['ego_fut_cmd'][None, None, ...]), stack=True) + if 'ego_lcf_feat' in results: + results['ego_lcf_feat'] = DC(to_tensor(results['ego_lcf_feat'][None, None, ...]), stack=True) + if 'gt_attr_labels' in results: + results['gt_attr_labels'] = DC(to_tensor(results['gt_attr_labels']), cpu_only=False) + + return results + diff --git a/mmcv/datasets/pipelines/loading.py b/mmcv/datasets/pipelines/loading.py new file mode 100644 index 0000000..dbf494e --- /dev/null +++ b/mmcv/datasets/pipelines/loading.py @@ -0,0 +1,1709 @@ +import os +import os.path as osp +import torch +import mmcv +import numpy as np +import pycocotools.mask as maskUtils +from einops import rearrange +from mmcv.core.points import BasePoints, get_points_type +from mmcv.fileio.file_client import FileClient +from mmcv.image import imfrombytes, imread +from mmcv.utils import check_file_exist +from mmcv.core.mask.structures import BitmapMasks, PolygonMasks +# from mmcv.datasets.pipelines.loading import LoadAnnotations, LoadImageFromFile +from ..builder import PIPELINES + + +@PIPELINES.register_module() +class LoadImageFromFile: + """Load an image from file. + + Required keys are "img_prefix" and "img_info" (a dict that must contain the + key "filename"). Added or updated keys are "filename", "img", "img_shape", + "ori_shape" (same as `img_shape`), "pad_shape" (same as `img_shape`), + "scale_factor" (1.0) and "img_norm_cfg" (means=0 and stds=1). + + Args: + to_float32 (bool): Whether to convert the loaded image to a float32 + numpy array. If set to False, the loaded image is an uint8 array. + Defaults to False. + color_type (str): The flag argument for :func:`mmcv.imfrombytes`. + Defaults to 'color'. + file_client_args (dict): Arguments to instantiate a FileClient. + See :class:`mmcv.fileio.FileClient` for details. + Defaults to ``dict(backend='disk')``. + """ + + def __init__(self, + to_float32=False, + color_type='color', + file_client_args=dict(backend='disk')): + self.to_float32 = to_float32 + self.color_type = color_type + self.file_client_args = file_client_args.copy() + self.file_client = None + + def __call__(self, results): + """Call functions to load image and get image meta information. + + Args: + results (dict): Result dict from :obj:`mmcv.CustomDataset`. + + Returns: + dict: The dict contains loaded image and meta information. + """ + + if self.file_client is None: + self.file_client = FileClient(**self.file_client_args) + + if results['img_prefix'] is not None: + filename = osp.join(results['img_prefix'], + results['img_info']['filename']) + else: + filename = results['img_info']['filename'] + + img_bytes = self.file_client.get(filename) + img = imfrombytes(img_bytes, flag=self.color_type) + if self.to_float32: + img = img.astype(np.float32) + + results['filename'] = filename + results['ori_filename'] = results['img_info']['filename'] + results['img'] = img + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + results['img_fields'] = ['img'] + return results + + def __repr__(self): + repr_str = (f'{self.__class__.__name__}(' + f'to_float32={self.to_float32}, ' + f"color_type='{self.color_type}', " + f'file_client_args={self.file_client_args})') + return repr_str + + +@PIPELINES.register_module() +class LoadImageFromWebcam(LoadImageFromFile): + """Load an image from webcam. + + Similar with :obj:`LoadImageFromFile`, but the image read from webcam is in + ``results['img']``. + """ + + def __call__(self, results): + """Call functions to add image meta information. + + Args: + results (dict): Result dict with Webcam read image in + ``results['img']``. + + Returns: + dict: The dict contains loaded image and meta information. + """ + + img = results['img'] + if self.to_float32: + img = img.astype(np.float32) + + results['filename'] = None + results['ori_filename'] = None + results['img'] = img + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + results['img_fields'] = ['img'] + return results + + +@PIPELINES.register_module() +class LoadMultiChannelImageFromFiles: + """Load multi-channel images from a list of separate channel files. + + Required keys are "img_prefix" and "img_info" (a dict that must contain the + key "filename", which is expected to be a list of filenames). + Added or updated keys are "filename", "img", "img_shape", + "ori_shape" (same as `img_shape`), "pad_shape" (same as `img_shape`), + "scale_factor" (1.0) and "img_norm_cfg" (means=0 and stds=1). + + Args: + to_float32 (bool): Whether to convert the loaded image to a float32 + numpy array. If set to False, the loaded image is an uint8 array. + Defaults to False. + color_type (str): The flag argument for :func:`mmcv.imfrombytes`. + Defaults to 'color'. + file_client_args (dict): Arguments to instantiate a FileClient. + See :class:`mmcv.fileio.FileClient` for details. + Defaults to ``dict(backend='disk')``. + """ + + def __init__(self, + to_float32=False, + color_type='unchanged', + file_client_args=dict(backend='disk')): + self.to_float32 = to_float32 + self.color_type = color_type + self.file_client_args = file_client_args.copy() + self.file_client = None + + def __call__(self, results): + """Call functions to load multiple images and get images meta + information. + + Args: + results (dict): Result dict from :obj:`mmcv.CustomDataset`. + + Returns: + dict: The dict contains loaded images and meta information. + """ + + if self.file_client is None: + self.file_client = FileClient(**self.file_client_args) + + if results['img_prefix'] is not None: + filename = [ + osp.join(results['img_prefix'], fname) + for fname in results['img_info']['filename'] + ] + else: + filename = results['img_info']['filename'] + + img = [] + for name in filename: + img_bytes = self.file_client.get(name) + img.append(imfrombytes(img_bytes, flag=self.color_type)) + img = np.stack(img, axis=-1) + if self.to_float32: + img = img.astype(np.float32) + + results['filename'] = filename + results['ori_filename'] = results['img_info']['filename'] + results['img'] = img + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + # Set initial values for default meta_keys + results['pad_shape'] = img.shape + results['scale_factor'] = 1.0 + num_channels = 1 if len(img.shape) < 3 else img.shape[2] + results['img_norm_cfg'] = dict( + mean=np.zeros(num_channels, dtype=np.float32), + std=np.ones(num_channels, dtype=np.float32), + to_rgb=False) + return results + + def __repr__(self): + repr_str = (f'{self.__class__.__name__}(' + f'to_float32={self.to_float32}, ' + f"color_type='{self.color_type}', " + f'file_client_args={self.file_client_args})') + return repr_str + + +@PIPELINES.register_module() +class LoadAnnotations: + """Load multiple types of annotations. + + Args: + with_bbox (bool): Whether to parse and load the bbox annotation. + Default: True. + with_label (bool): Whether to parse and load the label annotation. + Default: True. + with_mask (bool): Whether to parse and load the mask annotation. + Default: False. + with_seg (bool): Whether to parse and load the semantic segmentation + annotation. Default: False. + poly2mask (bool): Whether to convert the instance masks from polygons + to bitmaps. Default: True. + file_client_args (dict): Arguments to instantiate a FileClient. + See :class:`mmcv.fileio.FileClient` for details. + Defaults to ``dict(backend='disk')``. + """ + + def __init__(self, + with_bbox=True, + with_label=True, + with_mask=False, + with_seg=False, + poly2mask=True, + file_client_args=dict(backend='disk')): + self.with_bbox = with_bbox + self.with_label = with_label + self.with_mask = with_mask + self.with_seg = with_seg + self.poly2mask = poly2mask + self.file_client_args = file_client_args.copy() + self.file_client = None + + def _load_bboxes(self, results): + """Private function to load bounding box annotations. + + Args: + results (dict): Result dict from :obj:`mmcv.CustomDataset`. + + Returns: + dict: The dict contains loaded bounding box annotations. + """ + + ann_info = results['ann_info'] + results['gt_bboxes'] = ann_info['bboxes'].copy() + + gt_bboxes_ignore = ann_info.get('bboxes_ignore', None) + if gt_bboxes_ignore is not None: + results['gt_bboxes_ignore'] = gt_bboxes_ignore.copy() + results['bbox_fields'].append('gt_bboxes_ignore') + results['bbox_fields'].append('gt_bboxes') + return results + + def _load_labels(self, results): + """Private function to load label annotations. + + Args: + results (dict): Result dict from :obj:`mmcv.CustomDataset`. + + Returns: + dict: The dict contains loaded label annotations. + """ + + results['gt_labels'] = results['ann_info']['labels'].copy() + return results + + def _poly2mask(self, mask_ann, img_h, img_w): + """Private function to convert masks represented with polygon to + bitmaps. + + Args: + mask_ann (list | dict): Polygon mask annotation input. + img_h (int): The height of output mask. + img_w (int): The width of output mask. + + Returns: + numpy.ndarray: The decode bitmap mask of shape (img_h, img_w). + """ + + if isinstance(mask_ann, list): + # polygon -- a single object might consist of multiple parts + # we merge all parts into one mask rle code + rles = maskUtils.frPyObjects(mask_ann, img_h, img_w) + rle = maskUtils.merge(rles) + elif isinstance(mask_ann['counts'], list): + # uncompressed RLE + rle = maskUtils.frPyObjects(mask_ann, img_h, img_w) + else: + # rle + rle = mask_ann + mask = maskUtils.decode(rle) + return mask + + def process_polygons(self, polygons): + """Convert polygons to list of ndarray and filter invalid polygons. + + Args: + polygons (list[list]): Polygons of one instance. + + Returns: + list[numpy.ndarray]: Processed polygons. + """ + + polygons = [np.array(p) for p in polygons] + valid_polygons = [] + for polygon in polygons: + if len(polygon) % 2 == 0 and len(polygon) >= 6: + valid_polygons.append(polygon) + return valid_polygons + + def _load_masks(self, results): + """Private function to load mask annotations. + + Args: + results (dict): Result dict from :obj:`mmcv.CustomDataset`. + + Returns: + dict: The dict contains loaded mask annotations. + If ``self.poly2mask`` is set ``True``, `gt_mask` will contain + :obj:`PolygonMasks`. Otherwise, :obj:`BitmapMasks` is used. + """ + + h, w = results['img_info']['height'], results['img_info']['width'] + gt_masks = results['ann_info']['masks'] + if self.poly2mask: + gt_masks = BitmapMasks( + [self._poly2mask(mask, h, w) for mask in gt_masks], h, w) + else: + gt_masks = PolygonMasks( + [self.process_polygons(polygons) for polygons in gt_masks], h, + w) + results['gt_masks'] = gt_masks + results['mask_fields'].append('gt_masks') + return results + + def _load_semantic_seg(self, results): + """Private function to load semantic segmentation annotations. + + Args: + results (dict): Result dict from :obj:`dataset`. + + Returns: + dict: The dict contains loaded semantic segmentation annotations. + """ + + if self.file_client is None: + self.file_client = FileClient(**self.file_client_args) + + filename = osp.join(results['seg_prefix'], + results['ann_info']['seg_map']) + img_bytes = self.file_client.get(filename) + results['gt_semantic_seg'] = imfrombytes( + img_bytes, flag='unchanged').squeeze() + results['seg_fields'].append('gt_semantic_seg') + return results + + def __call__(self, results): + """Call function to load multiple types annotations. + + Args: + results (dict): Result dict from :obj:`mmcv.CustomDataset`. + + Returns: + dict: The dict contains loaded bounding box, label, mask and + semantic segmentation annotations. + """ + + if self.with_bbox: + results = self._load_bboxes(results) + if results is None: + return None + if self.with_label: + results = self._load_labels(results) + if self.with_mask: + results = self._load_masks(results) + if self.with_seg: + results = self._load_semantic_seg(results) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(with_bbox={self.with_bbox}, ' + repr_str += f'with_label={self.with_label}, ' + repr_str += f'with_mask={self.with_mask}, ' + repr_str += f'with_seg={self.with_seg}, ' + repr_str += f'poly2mask={self.poly2mask}, ' + repr_str += f'poly2mask={self.file_client_args})' + return repr_str + + +@PIPELINES.register_module() +class LoadProposals: + """Load proposal pipeline. + + Required key is "proposals". Updated keys are "proposals", "bbox_fields". + + Args: + num_max_proposals (int, optional): Maximum number of proposals to load. + If not specified, all proposals will be loaded. + """ + + def __init__(self, num_max_proposals=None): + self.num_max_proposals = num_max_proposals + + def __call__(self, results): + """Call function to load proposals from file. + + Args: + results (dict): Result dict from :obj:`mmcv.CustomDataset`. + + Returns: + dict: The dict contains loaded proposal annotations. + """ + + proposals = results['proposals'] + if proposals.shape[1] not in (4, 5): + raise AssertionError( + 'proposals should have shapes (n, 4) or (n, 5), ' + f'but found {proposals.shape}') + proposals = proposals[:, :4] + + if self.num_max_proposals is not None: + proposals = proposals[:self.num_max_proposals] + + if len(proposals) == 0: + proposals = np.array([[0, 0, 0, 0]], dtype=np.float32) + results['proposals'] = proposals + results['bbox_fields'].append('proposals') + return results + + def __repr__(self): + return self.__class__.__name__ + \ + f'(num_max_proposals={self.num_max_proposals})' + + +@PIPELINES.register_module() +class FilterAnnotations: + """Filter invalid annotations. + + Args: + min_gt_bbox_wh (tuple[int]): Minimum width and height of ground truth + boxes. + """ + + def __init__(self, min_gt_bbox_wh): + # TODO: add more filter options + self.min_gt_bbox_wh = min_gt_bbox_wh + + def __call__(self, results): + assert 'gt_bboxes' in results + gt_bboxes = results['gt_bboxes'] + w = gt_bboxes[:, 2] - gt_bboxes[:, 0] + h = gt_bboxes[:, 3] - gt_bboxes[:, 1] + keep = (w > self.min_gt_bbox_wh[0]) & (h > self.min_gt_bbox_wh[1]) + if not keep.any(): + return None + else: + keys = ('gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg') + for key in keys: + if key in results: + results[key] = results[key][keep] + return results + + +@PIPELINES.register_module() +class LoadMultiViewImageFromFiles(object): + """Load multi channel images from a list of separate channel files. + + Expects results['img_filename'] to be a list of filenames. + + Args: + to_float32 (bool): Whether to convert the img to float32. + Defaults to False. + color_type (str): Color type of the file. Defaults to 'unchanged'. + """ + + def __init__(self, to_float32=False, color_type='unchanged'): + self.to_float32 = to_float32 + self.color_type = color_type + + def __call__(self, results): + """Call function to load multi-view image from files. + + Args: + results (dict): Result dict containing multi-view image filenames. + + Returns: + dict: The result dict containing the multi-view image data. \ + Added keys and values are described below. + + - filename (str): Multi-view image filenames. + - img (np.ndarray): Multi-view image arrays. + - img_shape (tuple[int]): Shape of multi-view image arrays. + - ori_shape (tuple[int]): Shape of original image arrays. + - pad_shape (tuple[int]): Shape of padded image arrays. + - scale_factor (float): Scale factor. + - img_norm_cfg (dict): Normalization configuration of images. + """ + filename = results['img_filename'] + # img is of shape (h, w, c, num_views) + img = np.stack( + [imread(name, self.color_type) for name in filename], axis=-1) + if self.to_float32: + img = img.astype(np.float32) + results['filename'] = filename + # unravel to list, see `DefaultFormatBundle` in formating.py + # which will transpose each image separately and then stack into array + results['img'] = [img[..., i] for i in range(img.shape[-1])] + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + # Set initial values for default meta_keys + results['pad_shape'] = img.shape + results['scale_factor'] = 1.0 + num_channels = 1 if len(img.shape) < 3 else img.shape[2] + results['img_norm_cfg'] = dict( + mean=np.zeros(num_channels, dtype=np.float32), + std=np.ones(num_channels, dtype=np.float32), + to_rgb=False) + return results + + def __repr__(self): + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + repr_str += f'(to_float32={self.to_float32}, ' + repr_str += f"color_type='{self.color_type}')" + return repr_str + + +@PIPELINES.register_module() +class LoadImageFromFileMono3D(LoadImageFromFile): + """Load an image from file in monocular 3D object detection. Compared to 2D + detection, additional camera parameters need to be loaded. + + Args: + kwargs (dict): Arguments are the same as those in \ + :class:`LoadImageFromFile`. + """ + + def __call__(self, results): + """Call functions to load image and get image meta information. + + Args: + results (dict): Result dict from :obj:`mmcv.CustomDataset`. + + Returns: + dict: The dict contains loaded image and meta information. + """ + super().__call__(results) + results['cam2img'] = results['img_info']['cam_intrinsic'] + return results + + +@PIPELINES.register_module() +class LoadPointsFromMultiSweeps(object): + """Load points from multiple sweeps. + + This is usually used for nuScenes dataset to utilize previous sweeps. + + Args: + sweeps_num (int): Number of sweeps. Defaults to 10. + load_dim (int): Dimension number of the loaded points. Defaults to 5. + use_dim (list[int]): Which dimension to use. Defaults to [0, 1, 2, 4]. + file_client_args (dict): Config dict of file clients, refer to + https://github.com/open-mmlab/mmcv/blob/master/mmcv/fileio/file_client.py + for more details. Defaults to dict(backend='disk'). + pad_empty_sweeps (bool): Whether to repeat keyframe when + sweeps is empty. Defaults to False. + remove_close (bool): Whether to remove close points. + Defaults to False. + test_mode (bool): If test_model=True used for testing, it will not + randomly sample sweeps but select the nearest N frames. + Defaults to False. + """ + + def __init__(self, + sweeps_num=10, + load_dim=5, + use_dim=[0, 1, 2, 4], + file_client_args=dict(backend='disk'), + pad_empty_sweeps=False, + remove_close=False, + test_mode=False): + self.load_dim = load_dim + self.sweeps_num = sweeps_num + self.use_dim = use_dim + self.file_client_args = file_client_args.copy() + self.file_client = None + self.pad_empty_sweeps = pad_empty_sweeps + self.remove_close = remove_close + self.test_mode = test_mode + + def _load_points(self, pts_filename): + """Private function to load point clouds data. + + Args: + pts_filename (str): Filename of point clouds data. + + Returns: + np.ndarray: An array containing point clouds data. + """ + if self.file_client is None: + self.file_client = FileClient(**self.file_client_args) + try: + pts_bytes = self.file_client.get(pts_filename) + points = np.frombuffer(pts_bytes, dtype=np.float32) + except ConnectionError: + check_file_exist(pts_filename) + if pts_filename.endswith('.npy'): + points = np.load(pts_filename) + else: + points = np.fromfile(pts_filename, dtype=np.float32) + return points + + def _remove_close(self, points, radius=1.0): + """Removes point too close within a certain radius from origin. + + Args: + points (np.ndarray | :obj:`BasePoints`): Sweep points. + radius (float): Radius below which points are removed. + Defaults to 1.0. + + Returns: + np.ndarray: Points after removing. + """ + if isinstance(points, np.ndarray): + points_numpy = points + elif isinstance(points, BasePoints): + points_numpy = points.tensor.numpy() + else: + raise NotImplementedError + x_filt = np.abs(points_numpy[:, 0]) < radius + y_filt = np.abs(points_numpy[:, 1]) < radius + not_close = np.logical_not(np.logical_and(x_filt, y_filt)) + return points[not_close] + + def __call__(self, results): + """Call function to load multi-sweep point clouds from files. + + Args: + results (dict): Result dict containing multi-sweep point cloud \ + filenames. + + Returns: + dict: The result dict containing the multi-sweep points data. \ + Added key and value are described below. + + - points (np.ndarray | :obj:`BasePoints`): Multi-sweep point \ + cloud arrays. + """ + points = results['points'] + points.tensor[:, 4] = 0 + sweep_points_list = [points] + ts = results['timestamp'] + if self.pad_empty_sweeps and len(results['sweeps']) == 0: + for i in range(self.sweeps_num): + if self.remove_close: + sweep_points_list.append(self._remove_close(points)) + else: + sweep_points_list.append(points) + else: + if len(results['sweeps']) <= self.sweeps_num: + choices = np.arange(len(results['sweeps'])) + elif self.test_mode: + choices = np.arange(self.sweeps_num) + else: + choices = np.random.choice( + len(results['sweeps']), self.sweeps_num, replace=False) + for idx in choices: + sweep = results['sweeps'][idx] + points_sweep = self._load_points(sweep['data_path']) + points_sweep = np.copy(points_sweep).reshape(-1, self.load_dim) + if self.remove_close: + points_sweep = self._remove_close(points_sweep) + sweep_ts = sweep['timestamp'] / 1e6 + points_sweep[:, :3] = points_sweep[:, :3] @ sweep[ + 'sensor2lidar_rotation'].T + points_sweep[:, :3] += sweep['sensor2lidar_translation'] + points_sweep[:, 4] = ts - sweep_ts + points_sweep = points.new_point(points_sweep) + sweep_points_list.append(points_sweep) + + points = points.cat(sweep_points_list) + points = points[:, self.use_dim] + results['points'] = points + return results + + def __repr__(self): + """str: Return a string that describes the module.""" + return f'{self.__class__.__name__}(sweeps_num={self.sweeps_num})' + + +@PIPELINES.register_module() +class PointSegClassMapping(object): + """Map original semantic class to valid category ids. + + Map valid classes as 0~len(valid_cat_ids)-1 and + others as len(valid_cat_ids). + + Args: + valid_cat_ids (tuple[int]): A tuple of valid category. + max_cat_id (int): The max possible cat_id in input segmentation mask. + Defaults to 40. + """ + + def __init__(self, valid_cat_ids, max_cat_id=40): + assert max_cat_id >= np.max(valid_cat_ids), \ + 'max_cat_id should be greater than maximum id in valid_cat_ids' + + self.valid_cat_ids = valid_cat_ids + self.max_cat_id = int(max_cat_id) + + # build cat_id to class index mapping + neg_cls = len(valid_cat_ids) + self.cat_id2class = np.ones( + self.max_cat_id + 1, dtype=np.int) * neg_cls + for cls_idx, cat_id in enumerate(valid_cat_ids): + self.cat_id2class[cat_id] = cls_idx + + def __call__(self, results): + """Call function to map original semantic class to valid category ids. + + Args: + results (dict): Result dict containing point semantic masks. + + Returns: + dict: The result dict containing the mapped category ids. \ + Updated key and value are described below. + + - pts_semantic_mask (np.ndarray): Mapped semantic masks. + """ + assert 'pts_semantic_mask' in results + pts_semantic_mask = results['pts_semantic_mask'] + + converted_pts_sem_mask = self.cat_id2class[pts_semantic_mask] + + results['pts_semantic_mask'] = converted_pts_sem_mask + return results + + def __repr__(self): + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + repr_str += f'(valid_cat_ids={self.valid_cat_ids}, ' + repr_str += f'max_cat_id={self.max_cat_id})' + return repr_str + + +@PIPELINES.register_module() +class NormalizePointsColor(object): + """Normalize color of points. + + Args: + color_mean (list[float]): Mean color of the point cloud. + """ + + def __init__(self, color_mean): + self.color_mean = color_mean + + def __call__(self, results): + """Call function to normalize color of points. + + Args: + results (dict): Result dict containing point clouds data. + + Returns: + dict: The result dict containing the normalized points. \ + Updated key and value are described below. + + - points (:obj:`BasePoints`): Points after color normalization. + """ + points = results['points'] + assert points.attribute_dims is not None and \ + 'color' in points.attribute_dims.keys(), \ + 'Expect points have color attribute' + if self.color_mean is not None: + points.color = points.color - \ + points.color.new_tensor(self.color_mean) + points.color = points.color / 255.0 + results['points'] = points + return results + + def __repr__(self): + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + repr_str += f'(color_mean={self.color_mean})' + return repr_str + + +@PIPELINES.register_module() +class LoadPointsFromFile(object): + """Load Points From File. + + Load sunrgbd and scannet points from file. + + Args: + coord_type (str): The type of coordinates of points cloud. + Available options includes: + - 'LIDAR': Points in LiDAR coordinates. + - 'DEPTH': Points in depth coordinates, usually for indoor dataset. + - 'CAMERA': Points in camera coordinates. + load_dim (int): The dimension of the loaded points. + Defaults to 6. + use_dim (list[int]): Which dimensions of the points to be used. + Defaults to [0, 1, 2]. For KITTI dataset, set use_dim=4 + or use_dim=[0, 1, 2, 3] to use the intensity dimension. + shift_height (bool): Whether to use shifted height. Defaults to False. + use_color (bool): Whether to use color features. Defaults to False. + file_client_args (dict): Config dict of file clients, refer to + https://github.com/open-mmlab/mmcv/blob/master/mmcv/fileio/file_client.py + for more details. Defaults to dict(backend='disk'). + """ + + def __init__(self, + coord_type, + load_dim=6, + use_dim=[0, 1, 2], + shift_height=False, + use_color=False, + file_client_args=dict(backend='disk')): + self.shift_height = shift_height + self.use_color = use_color + if isinstance(use_dim, int): + use_dim = list(range(use_dim)) + assert max(use_dim) < load_dim, \ + f'Expect all used dimensions < {load_dim}, got {use_dim}' + assert coord_type in ['CAMERA', 'LIDAR', 'DEPTH'] + + self.coord_type = coord_type + self.load_dim = load_dim + self.use_dim = use_dim + self.file_client_args = file_client_args.copy() + self.file_client = None + + def _load_points(self, pts_filename): + """Private function to load point clouds data. + + Args: + pts_filename (str): Filename of point clouds data. + + Returns: + np.ndarray: An array containing point clouds data. + """ + if self.file_client is None: + self.file_client = FileClient(**self.file_client_args) + try: + pts_bytes = self.file_client.get(pts_filename) + points = np.frombuffer(pts_bytes, dtype=np.float32) + except ConnectionError: + check_file_exist(pts_filename) + if pts_filename.endswith('.npy'): + points = np.load(pts_filename) + else: + points = np.fromfile(pts_filename, dtype=np.float32) + + return points + + def __call__(self, results): + """Call function to load points data from file. + + Args: + results (dict): Result dict containing point clouds data. + + Returns: + dict: The result dict containing the point clouds data. \ + Added key and value are described below. + + - points (:obj:`BasePoints`): Point clouds data. + """ + pts_filename = results['pts_filename'] + points = self._load_points(pts_filename) + points = points.reshape(-1, self.load_dim) + points = points[:, self.use_dim] + attribute_dims = None + + if self.shift_height: + floor_height = np.percentile(points[:, 2], 0.99) + height = points[:, 2] - floor_height + points = np.concatenate( + [points[:, :3], + np.expand_dims(height, 1), points[:, 3:]], 1) + attribute_dims = dict(height=3) + + if self.use_color: + assert len(self.use_dim) >= 6 + if attribute_dims is None: + attribute_dims = dict() + attribute_dims.update( + dict(color=[ + points.shape[1] - 3, + points.shape[1] - 2, + points.shape[1] - 1, + ])) + + points_class = get_points_type(self.coord_type) + points = points_class( + points, points_dim=points.shape[-1], attribute_dims=attribute_dims) + results['points'] = points + + return results + + def __repr__(self): + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + '(' + repr_str += f'shift_height={self.shift_height}, ' + repr_str += f'use_color={self.use_color}, ' + repr_str += f'file_client_args={self.file_client_args}, ' + repr_str += f'load_dim={self.load_dim}, ' + repr_str += f'use_dim={self.use_dim})' + return repr_str + + +@PIPELINES.register_module() +class LoadAnnotations3D(LoadAnnotations): + """Load Annotations3D. + + Load instance mask and semantic mask of points and + encapsulate the items into related fields. + + Args: + with_bbox_3d (bool, optional): Whether to load 3D boxes. + Defaults to True. + with_label_3d (bool, optional): Whether to load 3D labels. + Defaults to True. + with_attr_label (bool, optional): Whether to load attribute label. + Defaults to False. + with_mask_3d (bool, optional): Whether to load 3D instance masks. + for points. Defaults to False. + with_seg_3d (bool, optional): Whether to load 3D semantic masks. + for points. Defaults to False. + with_bbox (bool, optional): Whether to load 2D boxes. + Defaults to False. + with_label (bool, optional): Whether to load 2D labels. + Defaults to False. + with_mask (bool, optional): Whether to load 2D instance masks. + Defaults to False. + with_seg (bool, optional): Whether to load 2D semantic masks. + Defaults to False. + with_bbox_depth (bool, optional): Whether to load 2.5D boxes. + Defaults to False. + poly2mask (bool, optional): Whether to convert polygon annotations + to bitmasks. Defaults to True. + seg_3d_dtype (dtype, optional): Dtype of 3D semantic masks. + Defaults to int64 + file_client_args (dict): Config dict of file clients, refer to + https://github.com/open-mmlab/mmcv/blob/master/mmcv/fileio/file_client.py + for more details. + """ + + def __init__(self, + with_bbox_3d=True, + with_label_3d=True, + with_attr_label=False, + with_mask_3d=False, + with_seg_3d=False, + with_bbox=False, + with_label=False, + with_mask=False, + with_seg=False, + with_bbox_depth=False, + poly2mask=True, + seg_3d_dtype='int', + file_client_args=dict(backend='disk')): + super().__init__( + with_bbox, + with_label, + with_mask, + with_seg, + poly2mask, + file_client_args=file_client_args) + self.with_bbox_3d = with_bbox_3d + self.with_bbox_depth = with_bbox_depth + self.with_label_3d = with_label_3d + self.with_attr_label = with_attr_label + self.with_mask_3d = with_mask_3d + self.with_seg_3d = with_seg_3d + self.seg_3d_dtype = seg_3d_dtype + + def _load_bboxes_3d(self, results): + """Private function to load 3D bounding box annotations. + + Args: + results (dict): Result dict from :obj:`mmcv3d.CustomDataset`. + + Returns: + dict: The dict containing loaded 3D bounding box annotations. + """ + results['gt_bboxes_3d'] = results['ann_info']['gt_bboxes_3d'] + results['bbox3d_fields'].append('gt_bboxes_3d') + return results + + def _load_bboxes_depth(self, results): + """Private function to load 2.5D bounding box annotations. + + Args: + results (dict): Result dict from :obj:`mmcv3d.CustomDataset`. + + Returns: + dict: The dict containing loaded 2.5D bounding box annotations. + """ + results['centers2d'] = results['ann_info']['centers2d'] + results['depths'] = results['ann_info']['depths'] + return results + + def _load_labels_3d(self, results): + """Private function to load label annotations. + + Args: + results (dict): Result dict from :obj:`mmcv3d.CustomDataset`. + + Returns: + dict: The dict containing loaded label annotations. + """ + results['gt_labels_3d'] = results['ann_info']['gt_labels_3d'] + return results + + def _load_attr_labels(self, results): + """Private function to load label annotations. + + Args: + results (dict): Result dict from :obj:`mmcv3d.CustomDataset`. + + Returns: + dict: The dict containing loaded label annotations. + """ + results['attr_labels'] = results['ann_info']['attr_labels'] + return results + + def _load_masks_3d(self, results): + """Private function to load 3D mask annotations. + + Args: + results (dict): Result dict from :obj:`mmcv3d.CustomDataset`. + + Returns: + dict: The dict containing loaded 3D mask annotations. + """ + pts_instance_mask_path = results['ann_info']['pts_instance_mask_path'] + + if self.file_client is None: + self.file_client = FileClient(**self.file_client_args) + try: + mask_bytes = self.file_client.get(pts_instance_mask_path) + pts_instance_mask = np.frombuffer(mask_bytes, dtype=np.int) + except ConnectionError: + check_file_exist(pts_instance_mask_path) + pts_instance_mask = np.fromfile( + pts_instance_mask_path, dtype=np.long) + + results['pts_instance_mask'] = pts_instance_mask + results['pts_mask_fields'].append('pts_instance_mask') + return results + + def _load_semantic_seg_3d(self, results): + """Private function to load 3D semantic segmentation annotations. + + Args: + results (dict): Result dict from :obj:`mmcv3d.CustomDataset`. + + Returns: + dict: The dict containing the semantic segmentation annotations. + """ + pts_semantic_mask_path = results['ann_info']['pts_semantic_mask_path'] + + if self.file_client is None: + self.file_client = FileClient(**self.file_client_args) + try: + mask_bytes = self.file_client.get(pts_semantic_mask_path) + # add .copy() to fix read-only bug + pts_semantic_mask = np.frombuffer( + mask_bytes, dtype=self.seg_3d_dtype).copy() + except ConnectionError: + check_file_exist(pts_semantic_mask_path) + pts_semantic_mask = np.fromfile( + pts_semantic_mask_path, dtype=np.long) + + results['pts_semantic_mask'] = pts_semantic_mask + results['pts_seg_fields'].append('pts_semantic_mask') + return results + + def __call__(self, results): + """Call function to load multiple types annotations. + + Args: + results (dict): Result dict from :obj:`mmcv3d.CustomDataset`. + + Returns: + dict: The dict containing loaded 3D bounding box, label, mask and + semantic segmentation annotations. + """ + results = super().__call__(results) + if self.with_bbox_3d: + results = self._load_bboxes_3d(results) + if results is None: + return None + if self.with_bbox_depth: + results = self._load_bboxes_depth(results) + if results is None: + return None + if self.with_label_3d: + results = self._load_labels_3d(results) + if self.with_attr_label: + results = self._load_attr_labels(results) + if self.with_mask_3d: + results = self._load_masks_3d(results) + if self.with_seg_3d: + results = self._load_semantic_seg_3d(results) + + return results + + def __repr__(self): + """str: Return a string that describes the module.""" + indent_str = ' ' + repr_str = self.__class__.__name__ + '(\n' + repr_str += f'{indent_str}with_bbox_3d={self.with_bbox_3d}, ' + repr_str += f'{indent_str}with_label_3d={self.with_label_3d}, ' + repr_str += f'{indent_str}with_attr_label={self.with_attr_label}, ' + repr_str += f'{indent_str}with_mask_3d={self.with_mask_3d}, ' + repr_str += f'{indent_str}with_seg_3d={self.with_seg_3d}, ' + repr_str += f'{indent_str}with_bbox={self.with_bbox}, ' + repr_str += f'{indent_str}with_label={self.with_label}, ' + repr_str += f'{indent_str}with_mask={self.with_mask}, ' + repr_str += f'{indent_str}with_seg={self.with_seg}, ' + repr_str += f'{indent_str}with_bbox_depth={self.with_bbox_depth}, ' + repr_str += f'{indent_str}poly2mask={self.poly2mask})' + return repr_str + +@PIPELINES.register_module() +class LoadMultiViewImageFromFilesInCeph(object): + """Load multi channel images from a list of separate channel files. + + Expects results['img_filename'] to be a list of filenames. + + Args: + to_float32 (bool): Whether to convert the img to float32. + Defaults to False. + color_type (str): Color type of the file. Defaults to 'unchanged'. + """ + + def __init__(self, to_float32=False, color_type='unchanged', file_client_args=dict(backend='disk'), img_root=''): + self.to_float32 = to_float32 + self.color_type = color_type + self.file_client_args = file_client_args.copy() + self.file_client = FileClient(**self.file_client_args) + self.img_root = img_root + + def __call__(self, results): + """Call function to load multi-view image from files. + + Args: + results (dict): Result dict containing multi-view image filenames. + + Returns: + dict: The result dict containing the multi-view image data. \ + Added keys and values are described below. + + - filename (list of str): Multi-view image filenames. + - img (np.ndarray): Multi-view image arrays. + - img_shape (tuple[int]): Shape of multi-view image arrays. + - ori_shape (tuple[int]): Shape of original image arrays. + - pad_shape (tuple[int]): Shape of padded image arrays. + - scale_factor (float): Scale factor. + - img_norm_cfg (dict): Normalization configuration of images. + """ + images_multiView = [] + filename = results['img_filename'] + for img_path in filename: + # img_path = os.path.join(self.img_root, img_path) + if self.file_client_args['backend'] == 'petrel': + img_bytes = self.file_client.get(img_path) + img = imfrombytes(img_bytes) + elif self.file_client_args['backend'] == 'disk': + img = imread(img_path, self.color_type) + images_multiView.append(img) + # img is of shape (h, w, c, num_views) + img = np.stack( + #[mmcv.imread(name, self.color_type) for name in filename], axis=-1) + images_multiView, axis=-1) + if self.to_float32: + img = img.astype(np.float32) + results['filename'] = filename + # unravel to list, see `DefaultFormatBundle` in formating.py + # which will transpose each image separately and then stack into array + results['img'] = [img[..., i] for i in range(img.shape[-1])] + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + # Set initial values for default meta_keys + results['pad_shape'] = img.shape + results['scale_factor'] = 1.0 + num_channels = 1 if len(img.shape) < 3 else img.shape[2] + results['img_norm_cfg'] = dict( + mean=np.zeros(num_channels, dtype=np.float32), + std=np.ones(num_channels, dtype=np.float32), + to_rgb=False) + return results + + def __repr__(self): + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + repr_str += f'(to_float32={self.to_float32}, ' + repr_str += f"color_type='{self.color_type}')" + return repr_str + + +@PIPELINES.register_module() +class LoadAnnotations3D_E2E(LoadAnnotations3D): + """Load Annotations3D. + + Load instance mask and semantic mask of points and + encapsulate the items into related fields. + + Args: + with_bbox_3d (bool, optional): Whether to load 3D boxes. + Defaults to True. + with_label_3d (bool, optional): Whether to load 3D labels. + Defaults to True. + with_attr_label (bool, optional): Whether to load attribute label. + Defaults to False. + with_mask_3d (bool, optional): Whether to load 3D instance masks. + for points. Defaults to False. + with_seg_3d (bool, optional): Whether to load 3D semantic masks. + for points. Defaults to False. + with_bbox (bool, optional): Whether to load 2D boxes. + Defaults to False. + with_label (bool, optional): Whether to load 2D labels. + Defaults to False. + with_mask (bool, optional): Whether to load 2D instance masks. + Defaults to False. + with_seg (bool, optional): Whether to load 2D semantic masks. + Defaults to False. + with_bbox_depth (bool, optional): Whether to load 2.5D boxes. + Defaults to False. + poly2mask (bool, optional): Whether to convert polygon annotations + to bitmasks. Defaults to True. + seg_3d_dtype (dtype, optional): Dtype of 3D semantic masks. + Defaults to int64 + file_client_args (dict): Config dict of file clients, refer to + https://github.com/open-mmlab/mmcv/blob/master/mmcv/fileio/file_client.py + for more details. + """ + def __init__(self, + with_future_anns=False, + with_ins_inds_3d=False, + with_vis_token=True, + ins_inds_add_1=False, # NOTE: make ins_inds start from 1, not 0 + **kwargs): + super().__init__(**kwargs) + self.with_future_anns = with_future_anns + self.with_ins_inds_3d = with_ins_inds_3d + self.with_vis_token = with_vis_token + self.ins_inds_add_1 = ins_inds_add_1 + + def _load_future_anns(self, results): + """Private function to load 3D bounding box annotations. + + Args: + results (dict): Result dict from :obj:`mmcv3d.CustomDataset`. + + Returns: + dict: The dict containing loaded 3D bounding box annotations. + """ + + gt_bboxes_3d = [] + gt_labels_3d = [] + gt_inds_3d = [] + # gt_valid_flags = [] + gt_vis_tokens = [] + + for ann_info in results['occ_future_ann_infos']: + if ann_info is not None: + gt_bboxes_3d.append(ann_info['gt_bboxes_3d']) + gt_labels_3d.append(ann_info['gt_labels_3d']) + + ann_gt_inds = ann_info['gt_inds'] + if self.ins_inds_add_1: + ann_gt_inds += 1 + # NOTE: sdc query is changed from -10 -> -9 + gt_inds_3d.append(ann_gt_inds) + + # gt_valid_flags.append(ann_info['gt_valid_flag']) + if self.with_vis_token: + gt_vis_tokens.append(ann_info['gt_vis_tokens']) + else: + # invalid frame + gt_bboxes_3d.append(None) + gt_labels_3d.append(None) + gt_inds_3d.append(None) + # gt_valid_flags.append(None) + if self.with_vis_token: + gt_vis_tokens.append(None) + + results['future_gt_bboxes_3d'] = gt_bboxes_3d + # results['future_bbox3d_fields'].append('gt_bboxes_3d') # Field is used for augmentations, not needed here + results['future_gt_labels_3d'] = gt_labels_3d + results['future_gt_inds'] = gt_inds_3d + # results['future_gt_valid_flag'] = gt_valid_flags + if self.with_vis_token: + results['future_gt_vis_tokens'] = gt_vis_tokens + + return results + + def _load_ins_inds_3d(self, results): + ann_gt_inds = results['ann_info']['gt_inds'].copy() # TODO: note here + + # NOTE: Avoid gt_inds generated twice + results['ann_info'].pop('gt_inds') + + if self.ins_inds_add_1: + ann_gt_inds += 1 + results['gt_inds'] = ann_gt_inds + return results + + def __call__(self, results): + results = super().__call__(results) + + if self.with_future_anns: + results = self._load_future_anns(results) + if self.with_ins_inds_3d: + results = self._load_ins_inds_3d(results) + + # Generate ann for plan + if 'occ_future_ann_infos_for_plan' in results.keys(): + results = self._load_future_anns_plan(results) + + return results + + def __repr__(self): + repr_str = super().__repr__() + indent_str = ' ' + repr_str += f'{indent_str}with_future_anns={self.with_future_anns}, ' + repr_str += f'{indent_str}with_ins_inds_3d={self.with_ins_inds_3d}, ' + + return repr_str + + +def load_augmented_point_cloud(path, virtual=False, reduce_beams=32): + # NOTE: following Tianwei's implementation, it is hard coded for nuScenes + points = np.fromfile(path, dtype=np.float32).reshape(-1, 5) + # NOTE: path definition different from Tianwei's implementation. + tokens = path.split("/") + vp_dir = "_VIRTUAL" if reduce_beams == 32 else f"_VIRTUAL_{reduce_beams}BEAMS" + seg_path = os.path.join( + *tokens[:-3], + "virtual_points", + tokens[-3], + tokens[-2] + vp_dir, + tokens[-1] + ".pkl.npy", + ) + assert os.path.exists(seg_path) + data_dict = np.load(seg_path, allow_pickle=True).item() + + virtual_points1 = data_dict["real_points"] + # NOTE: add zero reflectance to virtual points instead of removing them from real points + virtual_points2 = np.concatenate( + [ + data_dict["virtual_points"][:, :3], + np.zeros([data_dict["virtual_points"].shape[0], 1]), + data_dict["virtual_points"][:, 3:], + ], + axis=-1, + ) + + points = np.concatenate( + [ + points, + np.ones([points.shape[0], virtual_points1.shape[1] - points.shape[1] + 1]), + ], + axis=1, + ) + virtual_points1 = np.concatenate( + [virtual_points1, np.zeros([virtual_points1.shape[0], 1])], axis=1 + ) + # note: this part is different from Tianwei's implementation, we don't have duplicate foreground real points. + if len(data_dict["real_points_indice"]) > 0: + points[data_dict["real_points_indice"]] = virtual_points1 + if virtual: + virtual_points2 = np.concatenate( + [virtual_points2, -1 * np.ones([virtual_points2.shape[0], 1])], axis=1 + ) + points = np.concatenate([points, virtual_points2], axis=0).astype(np.float32) + return points + + +def reduce_LiDAR_beams(pts, reduce_beams_to=32): + # print(pts.size()) + if isinstance(pts, np.ndarray): + pts = torch.from_numpy(pts) + radius = torch.sqrt(pts[:, 0].pow(2) + pts[:, 1].pow(2) + pts[:, 2].pow(2)) + sine_theta = pts[:, 2] / radius + # [-pi/2, pi/2] + theta = torch.asin(sine_theta) + phi = torch.atan2(pts[:, 1], pts[:, 0]) + + top_ang = 0.1862 + down_ang = -0.5353 + + beam_range = torch.zeros(32) + beam_range[0] = top_ang + beam_range[31] = down_ang + + for i in range(1, 31): + beam_range[i] = beam_range[i - 1] - 0.023275 + # beam_range = [1, 0.18, 0.15, 0.13, 0.11, 0.085, 0.065, 0.03, 0.01, -0.01, -0.03, -0.055, -0.08, -0.105, -0.13, -0.155, -0.18, -0.205, -0.228, -0.251, -0.275, + # -0.295, -0.32, -0.34, -0.36, -0.38, -0.40, -0.425, -0.45, -0.47, -0.49, -0.52, -0.54] + + num_pts, _ = pts.size() + mask = torch.zeros(num_pts) + if reduce_beams_to == 16: + for id in [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31]: + beam_mask = (theta < (beam_range[id - 1] - 0.012)) * ( + theta > (beam_range[id] - 0.012) + ) + mask = mask + beam_mask + mask = mask.bool() + elif reduce_beams_to == 4: + for id in [7, 9, 11, 13]: + beam_mask = (theta < (beam_range[id - 1] - 0.012)) * ( + theta > (beam_range[id] - 0.012) + ) + mask = mask + beam_mask + mask = mask.bool() + # [?] pick the 14th beam + elif reduce_beams_to == 1: + chosen_beam_id = 9 + mask = (theta < (beam_range[chosen_beam_id - 1] - 0.012)) * ( + theta > (beam_range[chosen_beam_id] - 0.012) + ) + else: + raise NotImplementedError + # points = copy.copy(pts) + points = pts[mask] + # print(points.size()) + return points.numpy() + +@PIPELINES.register_module() +class CustomLoadPointsFromMultiSweeps: + """Load points from multiple sweeps. + + This is usually used for nuScenes dataset to utilize previous sweeps. + + Args: + sweeps_num (int): Number of sweeps. Defaults to 10. + load_dim (int): Dimension number of the loaded points. Defaults to 5. + use_dim (list[int]): Which dimension to use. Defaults to [0, 1, 2, 4]. + pad_empty_sweeps (bool): Whether to repeat keyframe when + sweeps is empty. Defaults to False. + remove_close (bool): Whether to remove close points. + Defaults to False. + test_mode (bool): If test_model=True used for testing, it will not + randomly sample sweeps but select the nearest N frames. + Defaults to False. + """ + + def __init__( + self, + sweeps_num=10, + load_dim=5, + use_dim=[0, 1, 2, 4], + pad_empty_sweeps=False, + remove_close=False, + test_mode=False, + load_augmented=None, + reduce_beams=None, + ): + self.load_dim = load_dim + self.sweeps_num = sweeps_num + if isinstance(use_dim, int): + use_dim = list(range(use_dim)) + self.use_dim = use_dim + self.pad_empty_sweeps = pad_empty_sweeps + self.remove_close = remove_close + self.test_mode = test_mode + self.load_augmented = load_augmented + self.reduce_beams = reduce_beams + + def _load_points(self, lidar_path): + """Private function to load point clouds data. + + Args: + lidar_path (str): Filename of point clouds data. + + Returns: + np.ndarray: An array containing point clouds data. + """ + mmcv.check_file_exist(lidar_path) + if self.load_augmented: + assert self.load_augmented in ["pointpainting", "mvp"] + virtual = self.load_augmented == "mvp" + points = load_augmented_point_cloud( + lidar_path, virtual=virtual, reduce_beams=self.reduce_beams + ) + elif lidar_path.endswith(".npy"): + points = np.load(lidar_path) + else: + points = np.fromfile(lidar_path, dtype=np.float32) + return points + + def _remove_close(self, points, radius=1.0): + """Removes point too close within a certain radius from origin. + + Args: + points (np.ndarray | :obj:`BasePoints`): Sweep points. + radius (float): Radius below which points are removed. + Defaults to 1.0. + + Returns: + np.ndarray: Points after removing. + """ + if isinstance(points, np.ndarray): + points_numpy = points + elif isinstance(points, BasePoints): + points_numpy = points.tensor.numpy() + else: + raise NotImplementedError + x_filt = np.abs(points_numpy[:, 0]) < radius + y_filt = np.abs(points_numpy[:, 1]) < radius + not_close = np.logical_not(np.logical_and(x_filt, y_filt)) + return points[not_close] + + def __call__(self, results): + """Call function to load multi-sweep point clouds from files. + + Args: + results (dict): Result dict containing multi-sweep point cloud \ + filenames. + + Returns: + dict: The result dict containing the multi-sweep points data. \ + Added key and value are described below. + + - points (np.ndarray | :obj:`BasePoints`): Multi-sweep point \ + cloud arrays. + """ + points = results["points"] + points.tensor[:, 4] = 0 + sweep_points_list = [points] + ts = results["timestamp"] / 1e6 + if self.pad_empty_sweeps and len(results["sweeps"]) == 0: + for i in range(self.sweeps_num): + if self.remove_close: + sweep_points_list.append(self._remove_close(points)) + else: + sweep_points_list.append(points) + else: + if len(results["sweeps"]) <= self.sweeps_num: + choices = np.arange(len(results["sweeps"])) + elif self.test_mode: + choices = np.arange(self.sweeps_num) + else: + # NOTE: seems possible to load frame -11? + if not self.load_augmented: + choices = np.random.choice( + len(results["sweeps"]), self.sweeps_num, replace=False + ) + else: + # don't allow to sample the earliest frame, match with Tianwei's implementation. + choices = np.random.choice( + len(results["sweeps"]) - 1, self.sweeps_num, replace=False + ) + for idx in choices: + sweep = results["sweeps"][idx] + points_sweep = self._load_points(sweep["data_path"]) + points_sweep = np.copy(points_sweep).reshape(-1, self.load_dim) + + # TODO: make it more general + if self.reduce_beams and self.reduce_beams < 32: + points_sweep = reduce_LiDAR_beams(points_sweep, self.reduce_beams) + + if self.remove_close: + points_sweep = self._remove_close(points_sweep) + sweep_ts = sweep["timestamp"] / 1e6 + points_sweep[:, :3] = ( + points_sweep[:, :3] @ sweep["sensor2lidar_rotation"].T + ) + points_sweep[:, :3] += sweep["sensor2lidar_translation"] + points_sweep[:, 4] = ts - sweep_ts + points_sweep = points.new_point(points_sweep) + sweep_points_list.append(points_sweep) + + points = points.cat(sweep_points_list) + points = points[:, self.use_dim] + results["points"] = points + return results + + def __repr__(self): + """str: Return a string that describes the module.""" + return f"{self.__class__.__name__}(sweeps_num={self.sweeps_num})" + + + +@PIPELINES.register_module() +class CustomLoadPointsFromFile: + """Load Points From File. + + Load sunrgbd and scannet points from file. + + Args: + coord_type (str): The type of coordinates of points cloud. + Available options includes: + - 'LIDAR': Points in LiDAR coordinates. + - 'DEPTH': Points in depth coordinates, usually for indoor dataset. + - 'CAMERA': Points in camera coordinates. + load_dim (int): The dimension of the loaded points. + Defaults to 6. + use_dim (list[int]): Which dimensions of the points to be used. + Defaults to [0, 1, 2]. For KITTI dataset, set use_dim=4 + or use_dim=[0, 1, 2, 3] to use the intensity dimension. + shift_height (bool): Whether to use shifted height. Defaults to False. + use_color (bool): Whether to use color features. Defaults to False. + """ + + def __init__( + self, + coord_type, + load_dim=6, + use_dim=[0, 1, 2], + shift_height=False, + use_color=False, + load_augmented=None, + reduce_beams=None, + ): + self.shift_height = shift_height + self.use_color = use_color + if isinstance(use_dim, int): + use_dim = list(range(use_dim)) + assert ( + max(use_dim) < load_dim + ), f"Expect all used dimensions < {load_dim}, got {use_dim}" + assert coord_type in ["CAMERA", "LIDAR", "DEPTH"] + + self.coord_type = coord_type + self.load_dim = load_dim + self.use_dim = use_dim + self.load_augmented = load_augmented + self.reduce_beams = reduce_beams + + def _load_points(self, lidar_path): + """Private function to load point clouds data. + + Args: + lidar_path (str): Filename of point clouds data. + + Returns: + np.ndarray: An array containing point clouds data. + """ + mmcv.check_file_exist(lidar_path) + if self.load_augmented: + assert self.load_augmented in ["pointpainting", "mvp"] + virtual = self.load_augmented == "mvp" + points = load_augmented_point_cloud( + lidar_path, virtual=virtual, reduce_beams=self.reduce_beams + ) + elif lidar_path.endswith(".npy"): + points = np.load(lidar_path) + else: + points = np.fromfile(lidar_path, dtype=np.float32) + + return points + + def __call__(self, results): + """Call function to load points data from file. + + Args: + results (dict): Result dict containing point clouds data. + + Returns: + dict: The result dict containing the point clouds data. \ + Added key and value are described below. + + - points (:obj:`BasePoints`): Point clouds data. + """ + lidar_path = results["pts_filename"] + points = self._load_points(lidar_path) + points = points.reshape(-1, self.load_dim) + # TODO: make it more general + if self.reduce_beams and self.reduce_beams < 32: + points = reduce_LiDAR_beams(points, self.reduce_beams) + points = points[:, self.use_dim] + attribute_dims = None + + if self.shift_height: + floor_height = np.percentile(points[:, 2], 0.99) + height = points[:, 2] - floor_height + points = np.concatenate( + [points[:, :3], np.expand_dims(height, 1), points[:, 3:]], 1 + ) + attribute_dims = dict(height=3) + + if self.use_color: + assert len(self.use_dim) >= 6 + if attribute_dims is None: + attribute_dims = dict() + attribute_dims.update( + dict( + color=[ + points.shape[1] - 3, + points.shape[1] - 2, + points.shape[1] - 1, + ] + ) + ) + + points_class = get_points_type(self.coord_type) + points = points_class( + points, points_dim=points.shape[-1], attribute_dims=attribute_dims + ) + results["points"] = points + + return results diff --git a/mmcv/datasets/pipelines/occflow_label.py b/mmcv/datasets/pipelines/occflow_label.py new file mode 100644 index 0000000..5ed8fe4 --- /dev/null +++ b/mmcv/datasets/pipelines/occflow_label.py @@ -0,0 +1,286 @@ +import torch +import numpy as np +import cv2 + +from mmcv.models.dense_heads.occ_head_plugin import calculate_birds_eye_view_parameters + +from mmcv.datasets.builder import PIPELINES +import os + +@PIPELINES.register_module() +class GenerateOccFlowLabels(object): + def __init__(self, grid_conf, ignore_index=255, only_vehicle=True, filter_invisible=True, deal_instance_255=False,all_classes = None,vehicle_classes = None,plan_classes = None): + self.grid_conf = grid_conf + self.bev_resolution, self.bev_start_position, self.bev_dimension = calculate_birds_eye_view_parameters( + grid_conf['xbound'], grid_conf['ybound'], grid_conf['zbound'], + ) + # convert numpy + self.bev_resolution = self.bev_resolution.numpy() + self.bev_start_position = self.bev_start_position.numpy() + self.bev_dimension = self.bev_dimension.numpy() + self.spatial_extent = (grid_conf['xbound'][1], grid_conf['ybound'][1]) + self.ignore_index = ignore_index + self.only_vehicle = only_vehicle + self.filter_invisible = filter_invisible + self.deal_instance_255 = deal_instance_255 + assert self.deal_instance_255 is False + + + if all_classes is None: + all_classes = ['car', 'truck', 'construction_vehicle', 'bus', 'trailer', + 'barrier', 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone'] + if vehicle_classes is None: + vehicle_classes = ['car', 'bus', 'construction_vehicle', + 'bicycle', 'motorcycle', 'truck', 'trailer'] + if plan_classes is None: + plan_classes = vehicle_classes + ['pedestrian'] + + self.vehicle_cls_ids = np.array([all_classes.index( + cls_name) for cls_name in vehicle_classes]) + + self.plan_cls_ids = np.array([all_classes.index( + cls_name) for cls_name in plan_classes]) + + if only_vehicle: + self.filter_cls_ids = self.vehicle_cls_ids + else: + self.filter_cls_ids = self.plan_cls_ids + + def reframe_boxes(self, boxes, t_init, t_curr): + l2e_r_mat_curr = t_curr['l2e_r'] + l2e_t_curr = t_curr['l2e_t'] + e2g_r_mat_curr = t_curr['e2g_r'] + e2g_t_curr = t_curr['e2g_t'] + + l2e_r_mat_init = t_init['l2e_r'] + l2e_t_init = t_init['l2e_t'] + e2g_r_mat_init = t_init['e2g_r'] + e2g_t_init = t_init['e2g_t'] + + # to bbox under curr ego frame # TODO: Uncomment + boxes.rotate(l2e_r_mat_curr.T) + boxes.translate(l2e_t_curr) + + # to bbox under world frame + boxes.rotate(e2g_r_mat_curr.T) + boxes.translate(e2g_t_curr) + + # to bbox under initial ego frame, first inverse translate, then inverse rotate + boxes.translate(- e2g_t_init) + m1 = np.linalg.inv(e2g_r_mat_init) + boxes.rotate(m1.T) + + # to bbox under curr ego frame, first inverse translate, then inverse rotate + boxes.translate(- l2e_t_init) + m2 = np.linalg.inv(l2e_r_mat_init) + boxes.rotate(m2.T) + + return boxes + + def __call__(self, results): + """ + # Given lidar frame bboxes for curr frame and each future frame, + # generate segmentation, instance, centerness, offset, and fwd flow map + """ + # Avoid ignoring obj with index = self.ignore_index + SPECIAL_INDEX = -20 + + all_gt_bboxes_3d = results['future_gt_bboxes_3d'] + all_gt_labels_3d = results['future_gt_labels_3d'] + all_gt_inds = results['future_gt_inds'] + if 'future_gt_vis_tokens' in results.keys(): + all_vis_tokens = results['future_gt_vis_tokens'] + else: + all_vis_tokens = None + num_frame = len(all_gt_bboxes_3d) + + # motion related transforms, of seq lengths + l2e_r_mats = results['occ_l2e_r_mats'] + l2e_t_vecs = results['occ_l2e_t_vecs'] + e2g_r_mats = results['occ_e2g_r_mats'] + e2g_t_vecs = results['occ_e2g_t_vecs'] + + # reference frame transform + t_ref = dict(l2e_r=l2e_r_mats[0], l2e_t=l2e_t_vecs[0], e2g_r=e2g_r_mats[0], e2g_t=e2g_t_vecs[0]) + + segmentations = [] + instances = [] + gt_future_boxes = [] + gt_future_labels = [] + + # num_frame is 5 + for i in range(num_frame): + # bbox, label, index of curr frame + gt_bboxes_3d, gt_labels_3d = all_gt_bboxes_3d[i], all_gt_labels_3d[i] + ins_inds = all_gt_inds[i] + if all_vis_tokens is not None: + vis_tokens = all_vis_tokens[i] + else: + vis_tokens = None + + if gt_bboxes_3d is None: + # for invalid samples, no loss calculated + segmentation = np.ones( + (self.bev_dimension[1], self.bev_dimension[0])) * self.ignore_index + instance = np.ones( + (self.bev_dimension[1], self.bev_dimension[0])) * self.ignore_index + else: + # reframe bboxes to reference frame + t_curr = dict(l2e_r=l2e_r_mats[i], l2e_t=l2e_t_vecs[i], e2g_r=e2g_r_mats[i], e2g_t=e2g_t_vecs[i]) + ref_bboxes_3d = self.reframe_boxes(gt_bboxes_3d, t_ref, t_curr) + gt_future_boxes.append(ref_bboxes_3d) + gt_future_labels.append(gt_labels_3d) + + # for valid samples + segmentation = np.zeros( + (self.bev_dimension[1], self.bev_dimension[0])) + instance = np.zeros( + (self.bev_dimension[1], self.bev_dimension[0])) + + if self.only_vehicle: + vehicle_mask = np.isin(gt_labels_3d, self.filter_cls_ids) + ref_bboxes_3d = ref_bboxes_3d[vehicle_mask] + gt_labels_3d = gt_labels_3d[vehicle_mask] + ins_inds = ins_inds[vehicle_mask] + if vis_tokens is not None: + vis_tokens = vis_tokens[vehicle_mask] + + if self.filter_invisible: + assert vis_tokens is not None + visible_mask = (vis_tokens != 1) # obj are filtered out with visibility(1) between 0 and 40% + ref_bboxes_3d = ref_bboxes_3d[visible_mask] + gt_labels_3d = gt_labels_3d[visible_mask] + ins_inds = ins_inds[visible_mask] + + # valid sample and has objects + if len(ref_bboxes_3d.tensor) > 0: + bbox_corners = ref_bboxes_3d.corners[:, [ + 0, 3, 7, 4], :2].numpy() + bbox_corners = np.round( + (bbox_corners - self.bev_start_position[:2] + self.bev_resolution[:2] / 2.0) / self.bev_resolution[:2]).astype(np.int32) + + for index, gt_ind in enumerate(ins_inds): + if gt_ind == self.ignore_index: + gt_ind = SPECIAL_INDEX # 255 -> -20 + poly_region = bbox_corners[index] + + cv2.fillPoly(segmentation, [poly_region], 1.0) + cv2.fillPoly(instance, [poly_region], int(gt_ind)) + + segmentations.append(segmentation) + instances.append(instance) + + # segmentation = 1 where objects are located + segmentations = torch.from_numpy( + np.stack(segmentations, axis=0)).long() + instances = torch.from_numpy(np.stack(instances, axis=0)).long() + + # generate heatmap & offset from segmentation & instance + instance_centerness, instance_offset, instance_flow, instance_backward_flow = self.center_offset_flow( + instances, + all_gt_inds, + ignore_index=255, + ) + + invalid_mask = (segmentations[:, 0, 0] == self.ignore_index) + instance_centerness[invalid_mask] = self.ignore_index + + results['gt_occ_has_invalid_frame'] = results.pop('occ_has_invalid_frame') + results['gt_occ_img_is_valid'] = results.pop('occ_img_is_valid') + results.update({ + 'gt_segmentation': segmentations, + 'gt_instance': instances, + 'gt_centerness': instance_centerness, + 'gt_offset': instance_offset, + 'gt_flow': instance_flow, + 'gt_backward_flow': instance_backward_flow, + 'gt_future_boxes': gt_future_boxes, + 'gt_future_labels': gt_future_labels + }) + return results + + def center_offset_flow(self, instance_img, all_gt_inds, ignore_index=255, sigma=3.0): + seq_len, h, w = instance_img.shape + # heatmap + center_label = torch.zeros(seq_len, 1, h, w) + # offset from parts to centers + offset_label = ignore_index * torch.ones(seq_len, 2, h, w) + # future flow + future_displacement_label = ignore_index * torch.ones(seq_len, 2, h, w) + + # backward flow + backward_flow = ignore_index * torch.ones(seq_len, 2, h, w) + + # x is vertical displacement, y is horizontal displacement + x, y = torch.meshgrid(torch.arange(h, dtype=torch.float), + torch.arange(w, dtype=torch.float)) + + gt_inds_all = [] + for ins_inds_per_frame in all_gt_inds: + if ins_inds_per_frame is None: + continue + for ins_ind in ins_inds_per_frame: + gt_inds_all.append(ins_ind) + gt_inds_unique = np.unique(np.array(gt_inds_all)) + + # iterate over all instances across this sequence + for instance_id in gt_inds_unique: + instance_id = int(instance_id) + prev_xc = None + prev_yc = None + prev_mask = None + for t in range(seq_len): + instance_mask = (instance_img[t] == instance_id) + if instance_mask.sum() == 0: + # this instance is not in this frame + prev_xc = None + prev_yc = None + prev_mask = None + continue + + # the Bird-Eye-View center of the instance + xc = x[instance_mask].mean() + yc = y[instance_mask].mean() + + off_x = xc - x + off_y = yc - y + g = torch.exp(-(off_x ** 2 + off_y ** 2) / sigma ** 2) + center_label[t, 0] = torch.maximum(center_label[t, 0], g) + offset_label[t, 0, instance_mask] = off_x[instance_mask] + offset_label[t, 1, instance_mask] = off_y[instance_mask] + + if prev_xc is not None and instance_mask.sum() > 0: + delta_x = xc - prev_xc + delta_y = yc - prev_yc + future_displacement_label[t-1, 0, prev_mask] = delta_x + future_displacement_label[t-1, 1, prev_mask] = delta_y + backward_flow[t-1, 0, instance_mask] = -1 * delta_x + backward_flow[t-1, 1, instance_mask] = -1 * delta_y + + prev_xc = xc + prev_yc = yc + prev_mask = instance_mask + + return center_label, offset_label, future_displacement_label, backward_flow + + + def visualize_instances(self, instances, vis_root=''): + if vis_root is not None and vis_root != '': + os.makedirs(vis_root, exist_ok=True) + + for i, ins in enumerate(instances): + ins_c = ins.astype(np.uint8) + ins_c = cv2.applyColorMap(ins_c, cv2.COLORMAP_JET) + save_path = os.path.join(vis_root, '{}.png'.format(i)) + cv2.imwrite(save_path, ins_c) + + vid_path = os.path.join(vis_root, 'vid_ins.avi') + height, width = instances[0].shape + size = (height, width) + v_out = cv2.VideoWriter(vid_path, cv2.VideoWriter_fourcc(*'DIVX'), 4, size) + for i in range(len(instances)): + ins_c = instances[i].astype(np.uint8) + ins_c = cv2.applyColorMap(ins_c, cv2.COLORMAP_JET) + v_out.write(ins_c) + v_out.release() + return diff --git a/mmcv/datasets/pipelines/test_time_aug.py b/mmcv/datasets/pipelines/test_time_aug.py new file mode 100644 index 0000000..4c21d4e --- /dev/null +++ b/mmcv/datasets/pipelines/test_time_aug.py @@ -0,0 +1,233 @@ +import warnings + +from mmcv.utils import is_list_of +from copy import deepcopy +from ..builder import PIPELINES +from .compose import Compose + + +@PIPELINES.register_module() +class MultiScaleFlipAug: + """Test-time augmentation with multiple scales and flipping. + + An example configuration is as followed: + + .. code-block:: + + img_scale=[(1333, 400), (1333, 800)], + flip=True, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ] + + After MultiScaleFLipAug with above configuration, the results are wrapped + into lists of the same length as followed: + + .. code-block:: + + dict( + img=[...], + img_shape=[...], + scale=[(1333, 400), (1333, 400), (1333, 800), (1333, 800)] + flip=[False, True, False, True] + ... + ) + + Args: + transforms (list[dict]): Transforms to apply in each augmentation. + img_scale (tuple | list[tuple] | None): Images scales for resizing. + scale_factor (float | list[float] | None): Scale factors for resizing. + flip (bool): Whether apply flip augmentation. Default: False. + flip_direction (str | list[str]): Flip augmentation directions, + options are "horizontal", "vertical" and "diagonal". If + flip_direction is a list, multiple flip augmentations will be + applied. It has no effect when flip == False. Default: + "horizontal". + """ + + def __init__(self, + transforms, + img_scale=None, + scale_factor=None, + flip=False, + flip_direction='horizontal'): + self.transforms = Compose(transforms) + assert (img_scale is None) ^ (scale_factor is None), ( + 'Must have but only one variable can be setted') + if img_scale is not None: + self.img_scale = img_scale if isinstance(img_scale, + list) else [img_scale] + self.scale_key = 'scale' + assert is_list_of(self.img_scale, tuple) + else: + self.img_scale = scale_factor if isinstance( + scale_factor, list) else [scale_factor] + self.scale_key = 'scale_factor' + + self.flip = flip + self.flip_direction = flip_direction if isinstance( + flip_direction, list) else [flip_direction] + assert is_list_of(self.flip_direction, str) + if not self.flip and self.flip_direction != ['horizontal']: + warnings.warn( + 'flip_direction has no effect when flip is set to False') + if (self.flip + and not any([t['type'] == 'RandomFlip' for t in transforms])): + warnings.warn( + 'flip has no effect when RandomFlip is not in transforms') + + def __call__(self, results): + """Call function to apply test time augment transforms on results. + + Args: + results (dict): Result dict contains the data to transform. + + Returns: + dict[str: list]: The augmented data, where each value is wrapped + into a list. + """ + + aug_data = [] + flip_args = [(False, None)] + if self.flip: + flip_args += [(True, direction) + for direction in self.flip_direction] + for scale in self.img_scale: + for flip, direction in flip_args: + _results = results.copy() + _results[self.scale_key] = scale + _results['flip'] = flip + _results['flip_direction'] = direction + data = self.transforms(_results) + aug_data.append(data) + # list of dict to dict of list + aug_data_dict = {key: [] for key in aug_data[0]} + for data in aug_data: + for key, val in data.items(): + aug_data_dict[key].append(val) + return aug_data_dict + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(transforms={self.transforms}, ' + repr_str += f'img_scale={self.img_scale}, flip={self.flip}, ' + repr_str += f'flip_direction={self.flip_direction})' + return repr_str + +@PIPELINES.register_module() +class MultiScaleFlipAug3D(object): + """Test-time augmentation with multiple scales and flipping. + + Args: + transforms (list[dict]): Transforms to apply in each augmentation. + img_scale (tuple | list[tuple]: Images scales for resizing. + pts_scale_ratio (float | list[float]): Points scale ratios for + resizing. + flip (bool): Whether apply flip augmentation. Defaults to False. + flip_direction (str | list[str]): Flip augmentation directions + for images, options are "horizontal" and "vertical". + If flip_direction is list, multiple flip augmentations will + be applied. It has no effect when ``flip == False``. + Defaults to "horizontal". + pcd_horizontal_flip (bool): Whether apply horizontal flip augmentation + to point cloud. Defaults to True. Note that it works only when + 'flip' is turned on. + pcd_vertical_flip (bool): Whether apply vertical flip augmentation + to point cloud. Defaults to True. Note that it works only when + 'flip' is turned on. + """ + + def __init__(self, + transforms, + img_scale, + pts_scale_ratio, + flip=False, + flip_direction='horizontal', + pcd_horizontal_flip=False, + pcd_vertical_flip=False): + self.transforms = Compose(transforms) + self.img_scale = img_scale if isinstance(img_scale, + list) else [img_scale] + self.pts_scale_ratio = pts_scale_ratio \ + if isinstance(pts_scale_ratio, list) else[float(pts_scale_ratio)] + + assert is_list_of(self.img_scale, tuple) + assert is_list_of(self.pts_scale_ratio, float) + + self.flip = flip + self.pcd_horizontal_flip = pcd_horizontal_flip + self.pcd_vertical_flip = pcd_vertical_flip + + self.flip_direction = flip_direction if isinstance( + flip_direction, list) else [flip_direction] + assert is_list_of(self.flip_direction, str) + if not self.flip and self.flip_direction != ['horizontal']: + warnings.warn( + 'flip_direction has no effect when flip is set to False') + if (self.flip and not any([(t['type'] == 'RandomFlip3D' + or t['type'] == 'RandomFlip') + for t in transforms])): + warnings.warn( + 'flip has no effect when RandomFlip is not in transforms') + + def __call__(self, results): + """Call function to augment common fields in results. + + Args: + results (dict): Result dict contains the data to augment. + + Returns: + dict: The result dict contains the data that is augmented with \ + different scales and flips. + """ + aug_data = [] + + # modified from `flip_aug = [False, True] if self.flip else [False]` + # to reduce unnecessary scenes when using double flip augmentation + # during test time + flip_aug = [True] if self.flip else [False] + pcd_horizontal_flip_aug = [False, True] \ + if self.flip and self.pcd_horizontal_flip else [False] + pcd_vertical_flip_aug = [False, True] \ + if self.flip and self.pcd_vertical_flip else [False] + for scale in self.img_scale: + for pts_scale_ratio in self.pts_scale_ratio: + for flip in flip_aug: + for pcd_horizontal_flip in pcd_horizontal_flip_aug: + for pcd_vertical_flip in pcd_vertical_flip_aug: + for direction in self.flip_direction: + # results.copy will cause bug + # since it is shallow copy + _results = deepcopy(results) + _results['scale'] = scale + _results['flip'] = flip + _results['pcd_scale_factor'] = \ + pts_scale_ratio + _results['flip_direction'] = direction + _results['pcd_horizontal_flip'] = \ + pcd_horizontal_flip + _results['pcd_vertical_flip'] = \ + pcd_vertical_flip + data = self.transforms(_results) + aug_data.append(data) + # list of dict to dict of list + aug_data_dict = {key: [] for key in aug_data[0]} + for data in aug_data: + for key, val in data.items(): + aug_data_dict[key].append(val) + return aug_data_dict + + def __repr__(self): + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + repr_str += f'(transforms={self.transforms}, ' + repr_str += f'img_scale={self.img_scale}, flip={self.flip}, ' + repr_str += f'pts_scale_ratio={self.pts_scale_ratio}, ' + repr_str += f'flip_direction={self.flip_direction})' + return repr_str + diff --git a/mmcv/datasets/pipelines/transforms.py b/mmcv/datasets/pipelines/transforms.py new file mode 100644 index 0000000..e7776cd --- /dev/null +++ b/mmcv/datasets/pipelines/transforms.py @@ -0,0 +1,1906 @@ +import copy +import inspect + +import numpy as np +from numpy import random + +from mmcv.core.mask.structures import PolygonMasks +from mmcv.core.evaluation.bbox_overlaps import bbox_overlaps +from mmcv.utils import is_list_of, is_str +from mmcv.image import imrescale, imresize, imflip, impad, impad_to_multiple, imnormalize, bgr2hsv, hsv2bgr +from ..builder import PIPELINES + +try: + from imagecorruptions import corrupt +except ImportError: + corrupt = None + +try: + import albumentations + from albumentations import Compose +except ImportError: + albumentations = None + Compose = None + + +@PIPELINES.register_module() +class Resize: + """Resize images & bbox & mask. + + This transform resizes the input image to some scale. Bboxes and masks are + then resized with the same scale factor. If the input dict contains the key + "scale", then the scale in the input dict is used, otherwise the specified + scale in the init method is used. If the input dict contains the key + "scale_factor" (if MultiScaleFlipAug does not give img_scale but + scale_factor), the actual scale will be computed by image shape and + scale_factor. + + `img_scale` can either be a tuple (single-scale) or a list of tuple + (multi-scale). There are 3 multiscale modes: + + - ``ratio_range is not None``: randomly sample a ratio from the ratio \ + range and multiply it with the image scale. + - ``ratio_range is None`` and ``multiscale_mode == "range"``: randomly \ + sample a scale from the multiscale range. + - ``ratio_range is None`` and ``multiscale_mode == "value"``: randomly \ + sample a scale from multiple scales. + + Args: + img_scale (tuple or list[tuple]): Images scales for resizing. + multiscale_mode (str): Either "range" or "value". + ratio_range (tuple[float]): (min_ratio, max_ratio) + keep_ratio (bool): Whether to keep the aspect ratio when resizing the + image. + bbox_clip_border (bool, optional): Whether clip the objects outside + the border of the image. Defaults to True. + backend (str): Image resize backend, choices are 'cv2' and 'pillow'. + These two backends generates slightly different results. Defaults + to 'cv2'. + override (bool, optional): Whether to override `scale` and + `scale_factor` so as to call resize twice. Default False. If True, + after the first resizing, the existed `scale` and `scale_factor` + will be ignored so the second resizing can be allowed. + This option is a work-around for multiple times of resize in DETR. + Defaults to False. + """ + + def __init__(self, + img_scale=None, + multiscale_mode='range', + ratio_range=None, + keep_ratio=True, + bbox_clip_border=True, + backend='cv2', + override=False): + if img_scale is None: + self.img_scale = None + else: + if isinstance(img_scale, list): + self.img_scale = img_scale + else: + self.img_scale = [img_scale] + assert is_list_of(self.img_scale, tuple) + + if ratio_range is not None: + # mode 1: given a scale and a range of image ratio + assert len(self.img_scale) == 1 + else: + # mode 2: given multiple scales or a range of scales + assert multiscale_mode in ['value', 'range'] + + self.backend = backend + self.multiscale_mode = multiscale_mode + self.ratio_range = ratio_range + self.keep_ratio = keep_ratio + # TODO: refactor the override option in Resize + self.override = override + self.bbox_clip_border = bbox_clip_border + + @staticmethod + def random_select(img_scales): + """Randomly select an img_scale from given candidates. + + Args: + img_scales (list[tuple]): Images scales for selection. + + Returns: + (tuple, int): Returns a tuple ``(img_scale, scale_dix)``, \ + where ``img_scale`` is the selected image scale and \ + ``scale_idx`` is the selected index in the given candidates. + """ + + assert is_list_of(img_scales, tuple) + scale_idx = np.random.randint(len(img_scales)) + img_scale = img_scales[scale_idx] + return img_scale, scale_idx + + @staticmethod + def random_sample(img_scales): + """Randomly sample an img_scale when ``multiscale_mode=='range'``. + + Args: + img_scales (list[tuple]): Images scale range for sampling. + There must be two tuples in img_scales, which specify the lower + and upper bound of image scales. + + Returns: + (tuple, None): Returns a tuple ``(img_scale, None)``, where \ + ``img_scale`` is sampled scale and None is just a placeholder \ + to be consistent with :func:`random_select`. + """ + + assert is_list_of(img_scales, tuple) and len(img_scales) == 2 + img_scale_long = [max(s) for s in img_scales] + img_scale_short = [min(s) for s in img_scales] + long_edge = np.random.randint( + min(img_scale_long), + max(img_scale_long) + 1) + short_edge = np.random.randint( + min(img_scale_short), + max(img_scale_short) + 1) + img_scale = (long_edge, short_edge) + return img_scale, None + + @staticmethod + def random_sample_ratio(img_scale, ratio_range): + """Randomly sample an img_scale when ``ratio_range`` is specified. + + A ratio will be randomly sampled from the range specified by + ``ratio_range``. Then it would be multiplied with ``img_scale`` to + generate sampled scale. + + Args: + img_scale (tuple): Images scale base to multiply with ratio. + ratio_range (tuple[float]): The minimum and maximum ratio to scale + the ``img_scale``. + + Returns: + (tuple, None): Returns a tuple ``(scale, None)``, where \ + ``scale`` is sampled ratio multiplied with ``img_scale`` and \ + None is just a placeholder to be consistent with \ + :func:`random_select`. + """ + + assert isinstance(img_scale, tuple) and len(img_scale) == 2 + min_ratio, max_ratio = ratio_range + assert min_ratio <= max_ratio + ratio = np.random.random_sample() * (max_ratio - min_ratio) + min_ratio + scale = int(img_scale[0] * ratio), int(img_scale[1] * ratio) + return scale, None + + def _random_scale(self, results): + """Randomly sample an img_scale according to ``ratio_range`` and + ``multiscale_mode``. + + If ``ratio_range`` is specified, a ratio will be sampled and be + multiplied with ``img_scale``. + If multiple scales are specified by ``img_scale``, a scale will be + sampled according to ``multiscale_mode``. + Otherwise, single scale will be used. + + Args: + results (dict): Result dict from :obj:`dataset`. + + Returns: + dict: Two new keys 'scale` and 'scale_idx` are added into \ + ``results``, which would be used by subsequent pipelines. + """ + + if self.ratio_range is not None: + scale, scale_idx = self.random_sample_ratio( + self.img_scale[0], self.ratio_range) + elif len(self.img_scale) == 1: + scale, scale_idx = self.img_scale[0], 0 + elif self.multiscale_mode == 'range': + scale, scale_idx = self.random_sample(self.img_scale) + elif self.multiscale_mode == 'value': + scale, scale_idx = self.random_select(self.img_scale) + else: + raise NotImplementedError + + results['scale'] = scale + results['scale_idx'] = scale_idx + + def _resize_img(self, results): + """Resize images with ``results['scale']``.""" + for key in results.get('img_fields', ['img']): + if self.keep_ratio: + img, scale_factor = imrescale( + results[key], + results['scale'], + return_scale=True, + backend=self.backend) + # the w_scale and h_scale has minor difference + # a real fix should be done in the mmcv.imrescale in the future + new_h, new_w = img.shape[:2] + h, w = results[key].shape[:2] + w_scale = new_w / w + h_scale = new_h / h + else: + img, w_scale, h_scale = imresize( + results[key], + results['scale'], + return_scale=True, + backend=self.backend) + results[key] = img + + scale_factor = np.array([w_scale, h_scale, w_scale, h_scale], + dtype=np.float32) + results['img_shape'] = img.shape + # in case that there is no padding + results['pad_shape'] = img.shape + results['scale_factor'] = scale_factor + results['keep_ratio'] = self.keep_ratio + + def _resize_bboxes(self, results): + """Resize bounding boxes with ``results['scale_factor']``.""" + for key in results.get('bbox_fields', []): + bboxes = results[key] * results['scale_factor'] + if self.bbox_clip_border: + img_shape = results['img_shape'] + bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1]) + bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0]) + results[key] = bboxes + + def _resize_masks(self, results): + """Resize masks with ``results['scale']``""" + for key in results.get('mask_fields', []): + if results[key] is None: + continue + if self.keep_ratio: + results[key] = results[key].rescale(results['scale']) + else: + results[key] = results[key].resize(results['img_shape'][:2]) + + def _resize_seg(self, results): + """Resize semantic segmentation map with ``results['scale']``.""" + for key in results.get('seg_fields', []): + if self.keep_ratio: + gt_seg = imrescale( + results[key], + results['scale'], + interpolation='nearest', + backend=self.backend) + else: + gt_seg = imresize( + results[key], + results['scale'], + interpolation='nearest', + backend=self.backend) + results['gt_semantic_seg'] = gt_seg + + def __call__(self, results): + """Call function to resize images, bounding boxes, masks, semantic + segmentation map. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Resized results, 'img_shape', 'pad_shape', 'scale_factor', \ + 'keep_ratio' keys are added into result dict. + """ + + if 'scale' not in results: + if 'scale_factor' in results: + img_shape = results['img'].shape[:2] + scale_factor = results['scale_factor'] + assert isinstance(scale_factor, float) + results['scale'] = tuple( + [int(x * scale_factor) for x in img_shape][::-1]) + else: + self._random_scale(results) + else: + if not self.override: + assert 'scale_factor' not in results, ( + 'scale and scale_factor cannot be both set.') + else: + results.pop('scale') + if 'scale_factor' in results: + results.pop('scale_factor') + self._random_scale(results) + + self._resize_img(results) + self._resize_bboxes(results) + self._resize_masks(results) + self._resize_seg(results) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(img_scale={self.img_scale}, ' + repr_str += f'multiscale_mode={self.multiscale_mode}, ' + repr_str += f'ratio_range={self.ratio_range}, ' + repr_str += f'keep_ratio={self.keep_ratio}, ' + repr_str += f'bbox_clip_border={self.bbox_clip_border})' + return repr_str + + +@PIPELINES.register_module() +class RandomFlip: + """Flip the image & bbox & mask. + + If the input dict contains the key "flip", then the flag will be used, + otherwise it will be randomly decided by a ratio specified in the init + method. + + When random flip is enabled, ``flip_ratio``/``direction`` can either be a + float/string or tuple of float/string. There are 3 flip modes: + + - ``flip_ratio`` is float, ``direction`` is string: the image will be + ``direction``ly flipped with probability of ``flip_ratio`` . + E.g., ``flip_ratio=0.5``, ``direction='horizontal'``, + then image will be horizontally flipped with probability of 0.5. + - ``flip_ratio`` is float, ``direction`` is list of string: the image wil + be ``direction[i]``ly flipped with probability of + ``flip_ratio/len(direction)``. + E.g., ``flip_ratio=0.5``, ``direction=['horizontal', 'vertical']``, + then image will be horizontally flipped with probability of 0.25, + vertically with probability of 0.25. + - ``flip_ratio`` is list of float, ``direction`` is list of string: + given ``len(flip_ratio) == len(direction)``, the image wil + be ``direction[i]``ly flipped with probability of ``flip_ratio[i]``. + E.g., ``flip_ratio=[0.3, 0.5]``, ``direction=['horizontal', + 'vertical']``, then image will be horizontally flipped with probability + of 0.3, vertically with probability of 0.5 + + Args: + flip_ratio (float | list[float], optional): The flipping probability. + Default: None. + direction(str | list[str], optional): The flipping direction. Options + are 'horizontal', 'vertical', 'diagonal'. Default: 'horizontal'. + If input is a list, the length must equal ``flip_ratio``. Each + element in ``flip_ratio`` indicates the flip probability of + corresponding direction. + """ + + def __init__(self, flip_ratio=None, direction='horizontal'): + if isinstance(flip_ratio, list): + assert is_list_of(flip_ratio, float) + assert 0 <= sum(flip_ratio) <= 1 + elif isinstance(flip_ratio, float): + assert 0 <= flip_ratio <= 1 + elif flip_ratio is None: + pass + else: + raise ValueError('flip_ratios must be None, float, ' + 'or list of float') + self.flip_ratio = flip_ratio + + valid_directions = ['horizontal', 'vertical', 'diagonal'] + if isinstance(direction, str): + assert direction in valid_directions + elif isinstance(direction, list): + assert is_list_of(direction, str) + assert set(direction).issubset(set(valid_directions)) + else: + raise ValueError('direction must be either str or list of str') + self.direction = direction + + if isinstance(flip_ratio, list): + assert len(self.flip_ratio) == len(self.direction) + + def bbox_flip(self, bboxes, img_shape, direction): + """Flip bboxes horizontally. + + Args: + bboxes (numpy.ndarray): Bounding boxes, shape (..., 4*k) + img_shape (tuple[int]): Image shape (height, width) + direction (str): Flip direction. Options are 'horizontal', + 'vertical'. + + Returns: + numpy.ndarray: Flipped bounding boxes. + """ + + assert bboxes.shape[-1] % 4 == 0 + flipped = bboxes.copy() + if direction == 'horizontal': + w = img_shape[1] + flipped[..., 0::4] = w - bboxes[..., 2::4] + flipped[..., 2::4] = w - bboxes[..., 0::4] + elif direction == 'vertical': + h = img_shape[0] + flipped[..., 1::4] = h - bboxes[..., 3::4] + flipped[..., 3::4] = h - bboxes[..., 1::4] + elif direction == 'diagonal': + w = img_shape[1] + h = img_shape[0] + flipped[..., 0::4] = w - bboxes[..., 2::4] + flipped[..., 1::4] = h - bboxes[..., 3::4] + flipped[..., 2::4] = w - bboxes[..., 0::4] + flipped[..., 3::4] = h - bboxes[..., 1::4] + else: + raise ValueError(f"Invalid flipping direction '{direction}'") + return flipped + + def __call__(self, results): + """Call function to flip bounding boxes, masks, semantic segmentation + maps. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Flipped results, 'flip', 'flip_direction' keys are added \ + into result dict. + """ + + if 'flip' not in results: + if isinstance(self.direction, list): + # None means non-flip + direction_list = self.direction + [None] + else: + # None means non-flip + direction_list = [self.direction, None] + + if isinstance(self.flip_ratio, list): + non_flip_ratio = 1 - sum(self.flip_ratio) + flip_ratio_list = self.flip_ratio + [non_flip_ratio] + else: + non_flip_ratio = 1 - self.flip_ratio + # exclude non-flip + single_ratio = self.flip_ratio / (len(direction_list) - 1) + flip_ratio_list = [single_ratio] * (len(direction_list) - + 1) + [non_flip_ratio] + + cur_dir = np.random.choice(direction_list, p=flip_ratio_list) + + results['flip'] = cur_dir is not None + if 'flip_direction' not in results: + results['flip_direction'] = cur_dir + if results['flip']: + # flip image + for key in results.get('img_fields', ['img']): + results[key] = imflip( + results[key], direction=results['flip_direction']) + # flip bboxes + for key in results.get('bbox_fields', []): + results[key] = self.bbox_flip(results[key], + results['img_shape'], + results['flip_direction']) + # flip masks + for key in results.get('mask_fields', []): + results[key] = results[key].flip(results['flip_direction']) + + # flip segs + for key in results.get('seg_fields', []): + results[key] = imflip( + results[key], direction=results['flip_direction']) + return results + + def __repr__(self): + return self.__class__.__name__ + f'(flip_ratio={self.flip_ratio})' + + +@PIPELINES.register_module() +class RandomShift: + """Shift the image and box given shift pixels and probability. + + Args: + shift_ratio (float): Probability of shifts. Default 0.5. + max_shift_px (int): The max pixels for shifting. Default 32. + filter_thr_px (int): The width and height threshold for filtering. + The bbox and the rest of the targets below the width and + height threshold will be filtered. Default 1. + """ + + def __init__(self, shift_ratio=0.5, max_shift_px=32, filter_thr_px=1): + assert 0 <= shift_ratio <= 1 + assert max_shift_px >= 0 + self.shift_ratio = shift_ratio + self.max_shift_px = max_shift_px + self.filter_thr_px = int(filter_thr_px) + # The key correspondence from bboxes to labels. + self.bbox2label = { + 'gt_bboxes': 'gt_labels', + 'gt_bboxes_ignore': 'gt_labels_ignore' + } + + def __call__(self, results): + """Call function to random shift images, bounding boxes. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Shift results. + """ + if random.random() < self.shift_ratio: + img_shape = results['img'].shape[:2] + + random_shift_x = random.randint(-self.max_shift_px, + self.max_shift_px) + random_shift_y = random.randint(-self.max_shift_px, + self.max_shift_px) + new_x = max(0, random_shift_x) + orig_x = max(0, -random_shift_x) + new_y = max(0, random_shift_y) + orig_y = max(0, -random_shift_y) + + # TODO: support mask and semantic segmentation maps. + for key in results.get('bbox_fields', []): + bboxes = results[key].copy() + bboxes[..., 0::2] += random_shift_x + bboxes[..., 1::2] += random_shift_y + + # clip border + bboxes[..., 0::2] = np.clip(bboxes[..., 0::2], 0, img_shape[1]) + bboxes[..., 1::2] = np.clip(bboxes[..., 1::2], 0, img_shape[0]) + + # remove invalid bboxes + bbox_w = bboxes[..., 2] - bboxes[..., 0] + bbox_h = bboxes[..., 3] - bboxes[..., 1] + valid_inds = (bbox_w > self.filter_thr_px) & ( + bbox_h > self.filter_thr_px) + # If the shift does not contain any gt-bbox area, skip this + # image. + if key == 'gt_bboxes' and not valid_inds.any(): + return results + bboxes = bboxes[valid_inds] + results[key] = bboxes + + # label fields. e.g. gt_labels and gt_labels_ignore + label_key = self.bbox2label.get(key) + if label_key in results: + results[label_key] = results[label_key][valid_inds] + + for key in results.get('img_fields', ['img']): + img = results[key] + new_img = np.zeros_like(img) + img_h, img_w = img.shape[:2] + new_h = img_h - np.abs(random_shift_y) + new_w = img_w - np.abs(random_shift_x) + new_img[new_y:new_y + new_h, new_x:new_x + new_w] \ + = img[orig_y:orig_y + new_h, orig_x:orig_x + new_w] + results[key] = new_img + + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(max_shift_px={self.max_shift_px}, ' + return repr_str + + +@PIPELINES.register_module() +class Pad: + """Pad the image & mask. + + There are two padding modes: (1) pad to a fixed size and (2) pad to the + minimum size that is divisible by some number. + Added keys are "pad_shape", "pad_fixed_size", "pad_size_divisor", + + Args: + size (tuple, optional): Fixed padding size. + size_divisor (int, optional): The divisor of padded size. + pad_val (float, optional): Padding value, 0 by default. + """ + + def __init__(self, size=None, size_divisor=None, pad_val=0): + self.size = size + self.size_divisor = size_divisor + self.pad_val = pad_val + # only one of size and size_divisor should be valid + assert size is not None or size_divisor is not None + assert size is None or size_divisor is None + + def _pad_img(self, results): + """Pad images according to ``self.size``.""" + for key in results.get('img_fields', ['img']): + if self.size is not None: + padded_img = impad( + results[key], shape=self.size, pad_val=self.pad_val) + elif self.size_divisor is not None: + padded_img = impad_to_multiple( + results[key], self.size_divisor, pad_val=self.pad_val) + results[key] = padded_img + results['pad_shape'] = padded_img.shape + results['pad_fixed_size'] = self.size + results['pad_size_divisor'] = self.size_divisor + + def _pad_masks(self, results): + """Pad masks according to ``results['pad_shape']``.""" + pad_shape = results['pad_shape'][:2] + for key in results.get('mask_fields', []): + results[key] = results[key].pad(pad_shape, pad_val=self.pad_val) + + def _pad_seg(self, results): + """Pad semantic segmentation map according to + ``results['pad_shape']``.""" + for key in results.get('seg_fields', []): + results[key] = impad( + results[key], shape=results['pad_shape'][:2]) + + def __call__(self, results): + """Call function to pad images, masks, semantic segmentation maps. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Updated result dict. + """ + self._pad_img(results) + self._pad_masks(results) + self._pad_seg(results) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(size={self.size}, ' + repr_str += f'size_divisor={self.size_divisor}, ' + repr_str += f'pad_val={self.pad_val})' + return repr_str + + +@PIPELINES.register_module() +class Normalize: + """Normalize the image. + + Added key is "img_norm_cfg". + + Args: + mean (sequence): Mean values of 3 channels. + std (sequence): Std values of 3 channels. + to_rgb (bool): Whether to convert the image from BGR to RGB, + default is true. + """ + + def __init__(self, mean, std, to_rgb=True): + self.mean = np.array(mean, dtype=np.float32) + self.std = np.array(std, dtype=np.float32) + self.to_rgb = to_rgb + + def __call__(self, results): + """Call function to normalize images. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Normalized results, 'img_norm_cfg' key is added into + result dict. + """ + for key in results.get('img_fields', ['img']): + results[key] = imnormalize(results[key], self.mean, self.std, + self.to_rgb) + results['img_norm_cfg'] = dict( + mean=self.mean, std=self.std, to_rgb=self.to_rgb) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(mean={self.mean}, std={self.std}, to_rgb={self.to_rgb})' + return repr_str + + +@PIPELINES.register_module() +class RandomCrop: + """Random crop the image & bboxes & masks. + + The absolute `crop_size` is sampled based on `crop_type` and `image_size`, + then the cropped results are generated. + + Args: + crop_size (tuple): The relative ratio or absolute pixels of + height and width. + crop_type (str, optional): one of "relative_range", "relative", + "absolute", "absolute_range". "relative" randomly crops + (h * crop_size[0], w * crop_size[1]) part from an input of size + (h, w). "relative_range" uniformly samples relative crop size from + range [crop_size[0], 1] and [crop_size[1], 1] for height and width + respectively. "absolute" crops from an input with absolute size + (crop_size[0], crop_size[1]). "absolute_range" uniformly samples + crop_h in range [crop_size[0], min(h, crop_size[1])] and crop_w + in range [crop_size[0], min(w, crop_size[1])]. Default "absolute". + allow_negative_crop (bool, optional): Whether to allow a crop that does + not contain any bbox area. Default False. + bbox_clip_border (bool, optional): Whether clip the objects outside + the border of the image. Defaults to True. + + Note: + - If the image is smaller than the absolute crop size, return the + original image. + - The keys for bboxes, labels and masks must be aligned. That is, + `gt_bboxes` corresponds to `gt_labels` and `gt_masks`, and + `gt_bboxes_ignore` corresponds to `gt_labels_ignore` and + `gt_masks_ignore`. + - If the crop does not contain any gt-bbox region and + `allow_negative_crop` is set to False, skip this image. + """ + + def __init__(self, + crop_size, + crop_type='absolute', + allow_negative_crop=False, + bbox_clip_border=True): + if crop_type not in [ + 'relative_range', 'relative', 'absolute', 'absolute_range' + ]: + raise ValueError(f'Invalid crop_type {crop_type}.') + if crop_type in ['absolute', 'absolute_range']: + assert crop_size[0] > 0 and crop_size[1] > 0 + assert isinstance(crop_size[0], int) and isinstance( + crop_size[1], int) + else: + assert 0 < crop_size[0] <= 1 and 0 < crop_size[1] <= 1 + self.crop_size = crop_size + self.crop_type = crop_type + self.allow_negative_crop = allow_negative_crop + self.bbox_clip_border = bbox_clip_border + # The key correspondence from bboxes to labels and masks. + self.bbox2label = { + 'gt_bboxes': 'gt_labels', + 'gt_bboxes_ignore': 'gt_labels_ignore' + } + self.bbox2mask = { + 'gt_bboxes': 'gt_masks', + 'gt_bboxes_ignore': 'gt_masks_ignore' + } + + def _crop_data(self, results, crop_size, allow_negative_crop): + """Function to randomly crop images, bounding boxes, masks, semantic + segmentation maps. + + Args: + results (dict): Result dict from loading pipeline. + crop_size (tuple): Expected absolute size after cropping, (h, w). + allow_negative_crop (bool): Whether to allow a crop that does not + contain any bbox area. Default to False. + + Returns: + dict: Randomly cropped results, 'img_shape' key in result dict is + updated according to crop size. + """ + assert crop_size[0] > 0 and crop_size[1] > 0 + for key in results.get('img_fields', ['img']): + img = results[key] + margin_h = max(img.shape[0] - crop_size[0], 0) + margin_w = max(img.shape[1] - crop_size[1], 0) + offset_h = np.random.randint(0, margin_h + 1) + offset_w = np.random.randint(0, margin_w + 1) + crop_y1, crop_y2 = offset_h, offset_h + crop_size[0] + crop_x1, crop_x2 = offset_w, offset_w + crop_size[1] + + # crop the image + img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...] + img_shape = img.shape + results[key] = img + results['img_shape'] = img_shape + + # crop bboxes accordingly and clip to the image boundary + for key in results.get('bbox_fields', []): + # e.g. gt_bboxes and gt_bboxes_ignore + bbox_offset = np.array([offset_w, offset_h, offset_w, offset_h], + dtype=np.float32) + bboxes = results[key] - bbox_offset + if self.bbox_clip_border: + bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1]) + bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0]) + valid_inds = (bboxes[:, 2] > bboxes[:, 0]) & ( + bboxes[:, 3] > bboxes[:, 1]) + # If the crop does not contain any gt-bbox area and + # allow_negative_crop is False, skip this image. + if (key == 'gt_bboxes' and not valid_inds.any() + and not allow_negative_crop): + return None + results[key] = bboxes[valid_inds, :] + # label fields. e.g. gt_labels and gt_labels_ignore + label_key = self.bbox2label.get(key) + if label_key in results: + results[label_key] = results[label_key][valid_inds] + + # mask fields, e.g. gt_masks and gt_masks_ignore + mask_key = self.bbox2mask.get(key) + if mask_key in results: + results[mask_key] = results[mask_key][ + valid_inds.nonzero()[0]].crop( + np.asarray([crop_x1, crop_y1, crop_x2, crop_y2])) + + # crop semantic seg + for key in results.get('seg_fields', []): + results[key] = results[key][crop_y1:crop_y2, crop_x1:crop_x2] + + return results + + def _get_crop_size(self, image_size): + """Randomly generates the absolute crop size based on `crop_type` and + `image_size`. + + Args: + image_size (tuple): (h, w). + + Returns: + crop_size (tuple): (crop_h, crop_w) in absolute pixels. + """ + h, w = image_size + if self.crop_type == 'absolute': + return (min(self.crop_size[0], h), min(self.crop_size[1], w)) + elif self.crop_type == 'absolute_range': + assert self.crop_size[0] <= self.crop_size[1] + crop_h = np.random.randint( + min(h, self.crop_size[0]), + min(h, self.crop_size[1]) + 1) + crop_w = np.random.randint( + min(w, self.crop_size[0]), + min(w, self.crop_size[1]) + 1) + return crop_h, crop_w + elif self.crop_type == 'relative': + crop_h, crop_w = self.crop_size + return int(h * crop_h + 0.5), int(w * crop_w + 0.5) + elif self.crop_type == 'relative_range': + crop_size = np.asarray(self.crop_size, dtype=np.float32) + crop_h, crop_w = crop_size + np.random.rand(2) * (1 - crop_size) + return int(h * crop_h + 0.5), int(w * crop_w + 0.5) + + def __call__(self, results): + """Call function to randomly crop images, bounding boxes, masks, + semantic segmentation maps. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Randomly cropped results, 'img_shape' key in result dict is + updated according to crop size. + """ + image_size = results['img'].shape[:2] + crop_size = self._get_crop_size(image_size) + results = self._crop_data(results, crop_size, self.allow_negative_crop) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(crop_size={self.crop_size}, ' + repr_str += f'crop_type={self.crop_type}, ' + repr_str += f'allow_negative_crop={self.allow_negative_crop}, ' + repr_str += f'bbox_clip_border={self.bbox_clip_border})' + return repr_str + + +@PIPELINES.register_module() +class SegRescale: + """Rescale semantic segmentation maps. + + Args: + scale_factor (float): The scale factor of the final output. + backend (str): Image rescale backend, choices are 'cv2' and 'pillow'. + These two backends generates slightly different results. Defaults + to 'cv2'. + """ + + def __init__(self, scale_factor=1, backend='cv2'): + self.scale_factor = scale_factor + self.backend = backend + + def __call__(self, results): + """Call function to scale the semantic segmentation map. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Result dict with semantic segmentation map scaled. + """ + + for key in results.get('seg_fields', []): + if self.scale_factor != 1: + results[key] = imrescale( + results[key], + self.scale_factor, + interpolation='nearest', + backend=self.backend) + return results + + def __repr__(self): + return self.__class__.__name__ + f'(scale_factor={self.scale_factor})' + + +@PIPELINES.register_module() +class PhotoMetricDistortion: + """Apply photometric distortion to image sequentially, every transformation + is applied with a probability of 0.5. The position of random contrast is in + second or second to last. + + 1. random brightness + 2. random contrast (mode 0) + 3. convert color from BGR to HSV + 4. random saturation + 5. random hue + 6. convert color from HSV to BGR + 7. random contrast (mode 1) + 8. randomly swap channels + + Args: + brightness_delta (int): delta of brightness. + contrast_range (tuple): range of contrast. + saturation_range (tuple): range of saturation. + hue_delta (int): delta of hue. + """ + + def __init__(self, + brightness_delta=32, + contrast_range=(0.5, 1.5), + saturation_range=(0.5, 1.5), + hue_delta=18): + self.brightness_delta = brightness_delta + self.contrast_lower, self.contrast_upper = contrast_range + self.saturation_lower, self.saturation_upper = saturation_range + self.hue_delta = hue_delta + + def __call__(self, results): + """Call function to perform photometric distortion on images. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Result dict with images distorted. + """ + + if 'img_fields' in results: + assert results['img_fields'] == ['img'], \ + 'Only single img_fields is allowed' + img = results['img'] + assert img.dtype == np.float32, \ + 'PhotoMetricDistortion needs the input image of dtype np.float32,'\ + ' please set "to_float32=True" in "LoadImageFromFile" pipeline' + # random brightness + if random.randint(2): + delta = random.uniform(-self.brightness_delta, + self.brightness_delta) + img += delta + + # mode == 0 --> do random contrast first + # mode == 1 --> do random contrast last + mode = random.randint(2) + if mode == 1: + if random.randint(2): + alpha = random.uniform(self.contrast_lower, + self.contrast_upper) + img *= alpha + + # convert color from BGR to HSV + img = bgr2hsv(img) + + # random saturation + if random.randint(2): + img[..., 1] *= random.uniform(self.saturation_lower, + self.saturation_upper) + + # random hue + if random.randint(2): + img[..., 0] += random.uniform(-self.hue_delta, self.hue_delta) + img[..., 0][img[..., 0] > 360] -= 360 + img[..., 0][img[..., 0] < 0] += 360 + + # convert color from HSV to BGR + img = hsv2bgr(img) + + # random contrast + if mode == 0: + if random.randint(2): + alpha = random.uniform(self.contrast_lower, + self.contrast_upper) + img *= alpha + + # randomly swap channels + if random.randint(2): + img = img[..., random.permutation(3)] + + results['img'] = img + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(\nbrightness_delta={self.brightness_delta},\n' + repr_str += 'contrast_range=' + repr_str += f'{(self.contrast_lower, self.contrast_upper)},\n' + repr_str += 'saturation_range=' + repr_str += f'{(self.saturation_lower, self.saturation_upper)},\n' + repr_str += f'hue_delta={self.hue_delta})' + return repr_str + + +@PIPELINES.register_module() +class Expand: + """Random expand the image & bboxes. + + Randomly place the original image on a canvas of 'ratio' x original image + size filled with mean values. The ratio is in the range of ratio_range. + + Args: + mean (tuple): mean value of dataset. + to_rgb (bool): if need to convert the order of mean to align with RGB. + ratio_range (tuple): range of expand ratio. + prob (float): probability of applying this transformation + """ + + def __init__(self, + mean=(0, 0, 0), + to_rgb=True, + ratio_range=(1, 4), + seg_ignore_label=None, + prob=0.5): + self.to_rgb = to_rgb + self.ratio_range = ratio_range + if to_rgb: + self.mean = mean[::-1] + else: + self.mean = mean + self.min_ratio, self.max_ratio = ratio_range + self.seg_ignore_label = seg_ignore_label + self.prob = prob + + def __call__(self, results): + """Call function to expand images, bounding boxes. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Result dict with images, bounding boxes expanded + """ + + if random.uniform(0, 1) > self.prob: + return results + + if 'img_fields' in results: + assert results['img_fields'] == ['img'], \ + 'Only single img_fields is allowed' + img = results['img'] + + h, w, c = img.shape + ratio = random.uniform(self.min_ratio, self.max_ratio) + # speedup expand when meets large image + if np.all(self.mean == self.mean[0]): + expand_img = np.empty((int(h * ratio), int(w * ratio), c), + img.dtype) + expand_img.fill(self.mean[0]) + else: + expand_img = np.full((int(h * ratio), int(w * ratio), c), + self.mean, + dtype=img.dtype) + left = int(random.uniform(0, w * ratio - w)) + top = int(random.uniform(0, h * ratio - h)) + expand_img[top:top + h, left:left + w] = img + + results['img'] = expand_img + # expand bboxes + for key in results.get('bbox_fields', []): + results[key] = results[key] + np.tile( + (left, top), 2).astype(results[key].dtype) + + # expand masks + for key in results.get('mask_fields', []): + results[key] = results[key].expand( + int(h * ratio), int(w * ratio), top, left) + + # expand segs + for key in results.get('seg_fields', []): + gt_seg = results[key] + expand_gt_seg = np.full((int(h * ratio), int(w * ratio)), + self.seg_ignore_label, + dtype=gt_seg.dtype) + expand_gt_seg[top:top + h, left:left + w] = gt_seg + results[key] = expand_gt_seg + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(mean={self.mean}, to_rgb={self.to_rgb}, ' + repr_str += f'ratio_range={self.ratio_range}, ' + repr_str += f'seg_ignore_label={self.seg_ignore_label})' + return repr_str + + +@PIPELINES.register_module() +class MinIoURandomCrop: + """Random crop the image & bboxes, the cropped patches have minimum IoU + requirement with original image & bboxes, the IoU threshold is randomly + selected from min_ious. + + Args: + min_ious (tuple): minimum IoU threshold for all intersections with + bounding boxes + min_crop_size (float): minimum crop's size (i.e. h,w := a*h, a*w, + where a >= min_crop_size). + bbox_clip_border (bool, optional): Whether clip the objects outside + the border of the image. Defaults to True. + + Note: + The keys for bboxes, labels and masks should be paired. That is, \ + `gt_bboxes` corresponds to `gt_labels` and `gt_masks`, and \ + `gt_bboxes_ignore` to `gt_labels_ignore` and `gt_masks_ignore`. + """ + + def __init__(self, + min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), + min_crop_size=0.3, + bbox_clip_border=True): + # 1: return ori img + self.min_ious = min_ious + self.sample_mode = (1, *min_ious, 0) + self.min_crop_size = min_crop_size + self.bbox_clip_border = bbox_clip_border + self.bbox2label = { + 'gt_bboxes': 'gt_labels', + 'gt_bboxes_ignore': 'gt_labels_ignore' + } + self.bbox2mask = { + 'gt_bboxes': 'gt_masks', + 'gt_bboxes_ignore': 'gt_masks_ignore' + } + + def __call__(self, results): + """Call function to crop images and bounding boxes with minimum IoU + constraint. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Result dict with images and bounding boxes cropped, \ + 'img_shape' key is updated. + """ + + if 'img_fields' in results: + assert results['img_fields'] == ['img'], \ + 'Only single img_fields is allowed' + img = results['img'] + assert 'bbox_fields' in results + boxes = [results[key] for key in results['bbox_fields']] + boxes = np.concatenate(boxes, 0) + h, w, c = img.shape + while True: + mode = random.choice(self.sample_mode) + self.mode = mode + if mode == 1: + return results + + min_iou = mode + for i in range(50): + new_w = random.uniform(self.min_crop_size * w, w) + new_h = random.uniform(self.min_crop_size * h, h) + + # h / w in [0.5, 2] + if new_h / new_w < 0.5 or new_h / new_w > 2: + continue + + left = random.uniform(w - new_w) + top = random.uniform(h - new_h) + + patch = np.array( + (int(left), int(top), int(left + new_w), int(top + new_h))) + # Line or point crop is not allowed + if patch[2] == patch[0] or patch[3] == patch[1]: + continue + overlaps = bbox_overlaps( + patch.reshape(-1, 4), boxes.reshape(-1, 4)).reshape(-1) + if len(overlaps) > 0 and overlaps.min() < min_iou: + continue + + # center of boxes should inside the crop img + # only adjust boxes and instance masks when the gt is not empty + if len(overlaps) > 0: + # adjust boxes + def is_center_of_bboxes_in_patch(boxes, patch): + center = (boxes[:, :2] + boxes[:, 2:]) / 2 + mask = ((center[:, 0] > patch[0]) * + (center[:, 1] > patch[1]) * + (center[:, 0] < patch[2]) * + (center[:, 1] < patch[3])) + return mask + + mask = is_center_of_bboxes_in_patch(boxes, patch) + if not mask.any(): + continue + for key in results.get('bbox_fields', []): + boxes = results[key].copy() + mask = is_center_of_bboxes_in_patch(boxes, patch) + boxes = boxes[mask] + if self.bbox_clip_border: + boxes[:, 2:] = boxes[:, 2:].clip(max=patch[2:]) + boxes[:, :2] = boxes[:, :2].clip(min=patch[:2]) + boxes -= np.tile(patch[:2], 2) + + results[key] = boxes + # labels + label_key = self.bbox2label.get(key) + if label_key in results: + results[label_key] = results[label_key][mask] + + # mask fields + mask_key = self.bbox2mask.get(key) + if mask_key in results: + results[mask_key] = results[mask_key][ + mask.nonzero()[0]].crop(patch) + # adjust the img no matter whether the gt is empty before crop + img = img[patch[1]:patch[3], patch[0]:patch[2]] + results['img'] = img + results['img_shape'] = img.shape + + # seg fields + for key in results.get('seg_fields', []): + results[key] = results[key][patch[1]:patch[3], + patch[0]:patch[2]] + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(min_ious={self.min_ious}, ' + repr_str += f'min_crop_size={self.min_crop_size}, ' + repr_str += f'bbox_clip_border={self.bbox_clip_border})' + return repr_str + + +@PIPELINES.register_module() +class Corrupt: + """Corruption augmentation. + + Corruption transforms implemented based on + `imagecorruptions `_. + + Args: + corruption (str): Corruption name. + severity (int, optional): The severity of corruption. Default: 1. + """ + + def __init__(self, corruption, severity=1): + self.corruption = corruption + self.severity = severity + + def __call__(self, results): + """Call function to corrupt image. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Result dict with images corrupted. + """ + + if corrupt is None: + raise RuntimeError('imagecorruptions is not installed') + if 'img_fields' in results: + assert results['img_fields'] == ['img'], \ + 'Only single img_fields is allowed' + results['img'] = corrupt( + results['img'].astype(np.uint8), + corruption_name=self.corruption, + severity=self.severity) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(corruption={self.corruption}, ' + repr_str += f'severity={self.severity})' + return repr_str + + +@PIPELINES.register_module() +class Albu: + """Albumentation augmentation. + + Adds custom transformations from Albumentations library. + Please, visit `https://albumentations.readthedocs.io` + to get more information. + + An example of ``transforms`` is as followed: + + .. code-block:: + + [ + dict( + type='ShiftScaleRotate', + shift_limit=0.0625, + scale_limit=0.0, + rotate_limit=0, + interpolation=1, + p=0.5), + dict( + type='RandomBrightnessContrast', + brightness_limit=[0.1, 0.3], + contrast_limit=[0.1, 0.3], + p=0.2), + dict(type='ChannelShuffle', p=0.1), + dict( + type='OneOf', + transforms=[ + dict(type='Blur', blur_limit=3, p=1.0), + dict(type='MedianBlur', blur_limit=3, p=1.0) + ], + p=0.1), + ] + + Args: + transforms (list[dict]): A list of albu transformations + bbox_params (dict): Bbox_params for albumentation `Compose` + keymap (dict): Contains {'input key':'albumentation-style key'} + skip_img_without_anno (bool): Whether to skip the image if no ann left + after aug + """ + + def __init__(self, + transforms, + bbox_params=None, + keymap=None, + update_pad_shape=False, + skip_img_without_anno=False): + if Compose is None: + raise RuntimeError('albumentations is not installed') + + # Args will be modified later, copying it will be safer + transforms = copy.deepcopy(transforms) + if bbox_params is not None: + bbox_params = copy.deepcopy(bbox_params) + if keymap is not None: + keymap = copy.deepcopy(keymap) + self.transforms = transforms + self.filter_lost_elements = False + self.update_pad_shape = update_pad_shape + self.skip_img_without_anno = skip_img_without_anno + + # A simple workaround to remove masks without boxes + if (isinstance(bbox_params, dict) and 'label_fields' in bbox_params + and 'filter_lost_elements' in bbox_params): + self.filter_lost_elements = True + self.origin_label_fields = bbox_params['label_fields'] + bbox_params['label_fields'] = ['idx_mapper'] + del bbox_params['filter_lost_elements'] + + self.bbox_params = ( + self.albu_builder(bbox_params) if bbox_params else None) + self.aug = Compose([self.albu_builder(t) for t in self.transforms], + bbox_params=self.bbox_params) + + if not keymap: + self.keymap_to_albu = { + 'img': 'image', + 'gt_masks': 'masks', + 'gt_bboxes': 'bboxes' + } + else: + self.keymap_to_albu = keymap + self.keymap_back = {v: k for k, v in self.keymap_to_albu.items()} + + def albu_builder(self, cfg): + """Import a module from albumentations. + + It inherits some of :func:`build_from_cfg` logic. + + Args: + cfg (dict): Config dict. It should at least contain the key "type". + + Returns: + obj: The constructed object. + """ + + assert isinstance(cfg, dict) and 'type' in cfg + args = cfg.copy() + + obj_type = args.pop('type') + if is_str(obj_type): + if albumentations is None: + raise RuntimeError('albumentations is not installed') + obj_cls = getattr(albumentations, obj_type) + elif inspect.isclass(obj_type): + obj_cls = obj_type + else: + raise TypeError( + f'type must be a str or valid type, but got {type(obj_type)}') + + if 'transforms' in args: + args['transforms'] = [ + self.albu_builder(transform) + for transform in args['transforms'] + ] + + return obj_cls(**args) + + @staticmethod + def mapper(d, keymap): + """Dictionary mapper. Renames keys according to keymap provided. + + Args: + d (dict): old dict + keymap (dict): {'old_key':'new_key'} + Returns: + dict: new dict. + """ + + updated_dict = {} + for k, v in zip(d.keys(), d.values()): + new_k = keymap.get(k, k) + updated_dict[new_k] = d[k] + return updated_dict + + def __call__(self, results): + # dict to albumentations format + results = self.mapper(results, self.keymap_to_albu) + # TODO: add bbox_fields + if 'bboxes' in results: + # to list of boxes + if isinstance(results['bboxes'], np.ndarray): + results['bboxes'] = [x for x in results['bboxes']] + # add pseudo-field for filtration + if self.filter_lost_elements: + results['idx_mapper'] = np.arange(len(results['bboxes'])) + + # TODO: Support mask structure in albu + if 'masks' in results: + if isinstance(results['masks'], PolygonMasks): + raise NotImplementedError( + 'Albu only supports BitMap masks now') + ori_masks = results['masks'] + if albumentations.__version__ < '0.5': + results['masks'] = results['masks'].masks + else: + results['masks'] = [mask for mask in results['masks'].masks] + + results = self.aug(**results) + + if 'bboxes' in results: + if isinstance(results['bboxes'], list): + results['bboxes'] = np.array( + results['bboxes'], dtype=np.float32) + results['bboxes'] = results['bboxes'].reshape(-1, 4) + + # filter label_fields + if self.filter_lost_elements: + + for label in self.origin_label_fields: + results[label] = np.array( + [results[label][i] for i in results['idx_mapper']]) + if 'masks' in results: + results['masks'] = np.array( + [results['masks'][i] for i in results['idx_mapper']]) + results['masks'] = ori_masks.__class__( + results['masks'], results['image'].shape[0], + results['image'].shape[1]) + + if (not len(results['idx_mapper']) + and self.skip_img_without_anno): + return None + + if 'gt_labels' in results: + if isinstance(results['gt_labels'], list): + results['gt_labels'] = np.array(results['gt_labels']) + results['gt_labels'] = results['gt_labels'].astype(np.int64) + + # back to the original format + results = self.mapper(results, self.keymap_back) + + # update final shape + if self.update_pad_shape: + results['pad_shape'] = results['img'].shape + + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + f'(transforms={self.transforms})' + return repr_str + + +@PIPELINES.register_module() +class RandomCenterCropPad: + """Random center crop and random around padding for CornerNet. + + This operation generates randomly cropped image from the original image and + pads it simultaneously. Different from :class:`RandomCrop`, the output + shape may not equal to ``crop_size`` strictly. We choose a random value + from ``ratios`` and the output shape could be larger or smaller than + ``crop_size``. The padding operation is also different from :class:`Pad`, + here we use around padding instead of right-bottom padding. + + The relation between output image (padding image) and original image: + + .. code:: text + + output image + + +----------------------------+ + | padded area | + +------|----------------------------|----------+ + | | cropped area | | + | | +---------------+ | | + | | | . center | | | original image + | | | range | | | + | | +---------------+ | | + +------|----------------------------|----------+ + | padded area | + +----------------------------+ + + There are 5 main areas in the figure: + + - output image: output image of this operation, also called padding + image in following instruction. + - original image: input image of this operation. + - padded area: non-intersect area of output image and original image. + - cropped area: the overlap of output image and original image. + - center range: a smaller area where random center chosen from. + center range is computed by ``border`` and original image's shape + to avoid our random center is too close to original image's border. + + Also this operation act differently in train and test mode, the summary + pipeline is listed below. + + Train pipeline: + + 1. Choose a ``random_ratio`` from ``ratios``, the shape of padding image + will be ``random_ratio * crop_size``. + 2. Choose a ``random_center`` in center range. + 3. Generate padding image with center matches the ``random_center``. + 4. Initialize the padding image with pixel value equals to ``mean``. + 5. Copy the cropped area to padding image. + 6. Refine annotations. + + Test pipeline: + + 1. Compute output shape according to ``test_pad_mode``. + 2. Generate padding image with center matches the original image + center. + 3. Initialize the padding image with pixel value equals to ``mean``. + 4. Copy the ``cropped area`` to padding image. + + Args: + crop_size (tuple | None): expected size after crop, final size will + computed according to ratio. Requires (h, w) in train mode, and + None in test mode. + ratios (tuple): random select a ratio from tuple and crop image to + (crop_size[0] * ratio) * (crop_size[1] * ratio). + Only available in train mode. + border (int): max distance from center select area to image border. + Only available in train mode. + mean (sequence): Mean values of 3 channels. + std (sequence): Std values of 3 channels. + to_rgb (bool): Whether to convert the image from BGR to RGB. + test_mode (bool): whether involve random variables in transform. + In train mode, crop_size is fixed, center coords and ratio is + random selected from predefined lists. In test mode, crop_size + is image's original shape, center coords and ratio is fixed. + test_pad_mode (tuple): padding method and padding shape value, only + available in test mode. Default is using 'logical_or' with + 127 as padding shape value. + + - 'logical_or': final_shape = input_shape | padding_shape_value + - 'size_divisor': final_shape = int( + ceil(input_shape / padding_shape_value) * padding_shape_value) + test_pad_add_pix (int): Extra padding pixel in test mode. Default 0. + bbox_clip_border (bool, optional): Whether clip the objects outside + the border of the image. Defaults to True. + """ + + def __init__(self, + crop_size=None, + ratios=(0.9, 1.0, 1.1), + border=128, + mean=None, + std=None, + to_rgb=None, + test_mode=False, + test_pad_mode=('logical_or', 127), + test_pad_add_pix=0, + bbox_clip_border=True): + if test_mode: + assert crop_size is None, 'crop_size must be None in test mode' + assert ratios is None, 'ratios must be None in test mode' + assert border is None, 'border must be None in test mode' + assert isinstance(test_pad_mode, (list, tuple)) + assert test_pad_mode[0] in ['logical_or', 'size_divisor'] + else: + assert isinstance(crop_size, (list, tuple)) + assert crop_size[0] > 0 and crop_size[1] > 0, ( + 'crop_size must > 0 in train mode') + assert isinstance(ratios, (list, tuple)) + assert test_pad_mode is None, ( + 'test_pad_mode must be None in train mode') + + self.crop_size = crop_size + self.ratios = ratios + self.border = border + # We do not set default value to mean, std and to_rgb because these + # hyper-parameters are easy to forget but could affect the performance. + # Please use the same setting as Normalize for performance assurance. + assert mean is not None and std is not None and to_rgb is not None + self.to_rgb = to_rgb + self.input_mean = mean + self.input_std = std + if to_rgb: + self.mean = mean[::-1] + self.std = std[::-1] + else: + self.mean = mean + self.std = std + self.test_mode = test_mode + self.test_pad_mode = test_pad_mode + self.test_pad_add_pix = test_pad_add_pix + self.bbox_clip_border = bbox_clip_border + + def _get_border(self, border, size): + """Get final border for the target size. + + This function generates a ``final_border`` according to image's shape. + The area between ``final_border`` and ``size - final_border`` is the + ``center range``. We randomly choose center from the ``center range`` + to avoid our random center is too close to original image's border. + Also ``center range`` should be larger than 0. + + Args: + border (int): The initial border, default is 128. + size (int): The width or height of original image. + Returns: + int: The final border. + """ + k = 2 * border / size + i = pow(2, np.ceil(np.log2(np.ceil(k))) + (k == int(k))) + return border // i + + def _filter_boxes(self, patch, boxes): + """Check whether the center of each box is in the patch. + + Args: + patch (list[int]): The cropped area, [left, top, right, bottom]. + boxes (numpy array, (N x 4)): Ground truth boxes. + + Returns: + mask (numpy array, (N,)): Each box is inside or outside the patch. + """ + center = (boxes[:, :2] + boxes[:, 2:]) / 2 + mask = (center[:, 0] > patch[0]) * (center[:, 1] > patch[1]) * ( + center[:, 0] < patch[2]) * ( + center[:, 1] < patch[3]) + return mask + + def _crop_image_and_paste(self, image, center, size): + """Crop image with a given center and size, then paste the cropped + image to a blank image with two centers align. + + This function is equivalent to generating a blank image with ``size`` + as its shape. Then cover it on the original image with two centers ( + the center of blank image and the random center of original image) + aligned. The overlap area is paste from the original image and the + outside area is filled with ``mean pixel``. + + Args: + image (np array, H x W x C): Original image. + center (list[int]): Target crop center coord. + size (list[int]): Target crop size. [target_h, target_w] + + Returns: + cropped_img (np array, target_h x target_w x C): Cropped image. + border (np array, 4): The distance of four border of + ``cropped_img`` to the original image area, [top, bottom, + left, right] + patch (list[int]): The cropped area, [left, top, right, bottom]. + """ + center_y, center_x = center + target_h, target_w = size + img_h, img_w, img_c = image.shape + + x0 = max(0, center_x - target_w // 2) + x1 = min(center_x + target_w // 2, img_w) + y0 = max(0, center_y - target_h // 2) + y1 = min(center_y + target_h // 2, img_h) + patch = np.array((int(x0), int(y0), int(x1), int(y1))) + + left, right = center_x - x0, x1 - center_x + top, bottom = center_y - y0, y1 - center_y + + cropped_center_y, cropped_center_x = target_h // 2, target_w // 2 + cropped_img = np.zeros((target_h, target_w, img_c), dtype=image.dtype) + for i in range(img_c): + cropped_img[:, :, i] += self.mean[i] + y_slice = slice(cropped_center_y - top, cropped_center_y + bottom) + x_slice = slice(cropped_center_x - left, cropped_center_x + right) + cropped_img[y_slice, x_slice, :] = image[y0:y1, x0:x1, :] + + border = np.array([ + cropped_center_y - top, cropped_center_y + bottom, + cropped_center_x - left, cropped_center_x + right + ], + dtype=np.float32) + + return cropped_img, border, patch + + def _train_aug(self, results): + """Random crop and around padding the original image. + + Args: + results (dict): Image infomations in the augment pipeline. + + Returns: + results (dict): The updated dict. + """ + img = results['img'] + h, w, c = img.shape + boxes = results['gt_bboxes'] + while True: + scale = random.choice(self.ratios) + new_h = int(self.crop_size[0] * scale) + new_w = int(self.crop_size[1] * scale) + h_border = self._get_border(self.border, h) + w_border = self._get_border(self.border, w) + + for i in range(50): + center_x = random.randint(low=w_border, high=w - w_border) + center_y = random.randint(low=h_border, high=h - h_border) + + cropped_img, border, patch = self._crop_image_and_paste( + img, [center_y, center_x], [new_h, new_w]) + + mask = self._filter_boxes(patch, boxes) + # if image do not have valid bbox, any crop patch is valid. + if not mask.any() and len(boxes) > 0: + continue + + results['img'] = cropped_img + results['img_shape'] = cropped_img.shape + results['pad_shape'] = cropped_img.shape + + x0, y0, x1, y1 = patch + + left_w, top_h = center_x - x0, center_y - y0 + cropped_center_x, cropped_center_y = new_w // 2, new_h // 2 + + # crop bboxes accordingly and clip to the image boundary + for key in results.get('bbox_fields', []): + mask = self._filter_boxes(patch, results[key]) + bboxes = results[key][mask] + bboxes[:, 0:4:2] += cropped_center_x - left_w - x0 + bboxes[:, 1:4:2] += cropped_center_y - top_h - y0 + if self.bbox_clip_border: + bboxes[:, 0:4:2] = np.clip(bboxes[:, 0:4:2], 0, new_w) + bboxes[:, 1:4:2] = np.clip(bboxes[:, 1:4:2], 0, new_h) + keep = (bboxes[:, 2] > bboxes[:, 0]) & ( + bboxes[:, 3] > bboxes[:, 1]) + bboxes = bboxes[keep] + results[key] = bboxes + if key in ['gt_bboxes']: + if 'gt_labels' in results: + labels = results['gt_labels'][mask] + labels = labels[keep] + results['gt_labels'] = labels + if 'gt_masks' in results: + raise NotImplementedError( + 'RandomCenterCropPad only supports bbox.') + + # crop semantic seg + for key in results.get('seg_fields', []): + raise NotImplementedError( + 'RandomCenterCropPad only supports bbox.') + return results + + def _test_aug(self, results): + """Around padding the original image without cropping. + + The padding mode and value are from ``test_pad_mode``. + + Args: + results (dict): Image infomations in the augment pipeline. + + Returns: + results (dict): The updated dict. + """ + img = results['img'] + h, w, c = img.shape + results['img_shape'] = img.shape + if self.test_pad_mode[0] in ['logical_or']: + # self.test_pad_add_pix is only used for centernet + target_h = (h | self.test_pad_mode[1]) + self.test_pad_add_pix + target_w = (w | self.test_pad_mode[1]) + self.test_pad_add_pix + elif self.test_pad_mode[0] in ['size_divisor']: + divisor = self.test_pad_mode[1] + target_h = int(np.ceil(h / divisor)) * divisor + target_w = int(np.ceil(w / divisor)) * divisor + else: + raise NotImplementedError( + 'RandomCenterCropPad only support two testing pad mode:' + 'logical-or and size_divisor.') + + cropped_img, border, _ = self._crop_image_and_paste( + img, [h // 2, w // 2], [target_h, target_w]) + results['img'] = cropped_img + results['pad_shape'] = cropped_img.shape + results['border'] = border + return results + + def __call__(self, results): + img = results['img'] + assert img.dtype == np.float32, ( + 'RandomCenterCropPad needs the input image of dtype np.float32,' + ' please set "to_float32=True" in "LoadImageFromFile" pipeline') + h, w, c = img.shape + assert c == len(self.mean) + if self.test_mode: + return self._test_aug(results) + else: + return self._train_aug(results) + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(crop_size={self.crop_size}, ' + repr_str += f'ratios={self.ratios}, ' + repr_str += f'border={self.border}, ' + repr_str += f'mean={self.input_mean}, ' + repr_str += f'std={self.input_std}, ' + repr_str += f'to_rgb={self.to_rgb}, ' + repr_str += f'test_mode={self.test_mode}, ' + repr_str += f'test_pad_mode={self.test_pad_mode}, ' + repr_str += f'bbox_clip_border={self.bbox_clip_border})' + return repr_str + + +@PIPELINES.register_module() +class CutOut: + """CutOut operation. + + Randomly drop some regions of image used in + `Cutout `_. + + Args: + n_holes (int | tuple[int, int]): Number of regions to be dropped. + If it is given as a list, number of holes will be randomly + selected from the closed interval [`n_holes[0]`, `n_holes[1]`]. + cutout_shape (tuple[int, int] | list[tuple[int, int]]): The candidate + shape of dropped regions. It can be `tuple[int, int]` to use a + fixed cutout shape, or `list[tuple[int, int]]` to randomly choose + shape from the list. + cutout_ratio (tuple[float, float] | list[tuple[float, float]]): The + candidate ratio of dropped regions. It can be `tuple[float, float]` + to use a fixed ratio or `list[tuple[float, float]]` to randomly + choose ratio from the list. Please note that `cutout_shape` + and `cutout_ratio` cannot be both given at the same time. + fill_in (tuple[float, float, float] | tuple[int, int, int]): The value + of pixel to fill in the dropped regions. Default: (0, 0, 0). + """ + + def __init__(self, + n_holes, + cutout_shape=None, + cutout_ratio=None, + fill_in=(0, 0, 0)): + + assert (cutout_shape is None) ^ (cutout_ratio is None), \ + 'Either cutout_shape or cutout_ratio should be specified.' + assert (isinstance(cutout_shape, (list, tuple)) + or isinstance(cutout_ratio, (list, tuple))) + if isinstance(n_holes, tuple): + assert len(n_holes) == 2 and 0 <= n_holes[0] < n_holes[1] + else: + n_holes = (n_holes, n_holes) + self.n_holes = n_holes + self.fill_in = fill_in + self.with_ratio = cutout_ratio is not None + self.candidates = cutout_ratio if self.with_ratio else cutout_shape + if not isinstance(self.candidates, list): + self.candidates = [self.candidates] + + def __call__(self, results): + """Call function to drop some regions of image.""" + h, w, c = results['img'].shape + n_holes = np.random.randint(self.n_holes[0], self.n_holes[1] + 1) + for _ in range(n_holes): + x1 = np.random.randint(0, w) + y1 = np.random.randint(0, h) + index = np.random.randint(0, len(self.candidates)) + if not self.with_ratio: + cutout_w, cutout_h = self.candidates[index] + else: + cutout_w = int(self.candidates[index][0] * w) + cutout_h = int(self.candidates[index][1] * h) + + x2 = np.clip(x1 + cutout_w, 0, w) + y2 = np.clip(y1 + cutout_h, 0, h) + results['img'][y1:y2, x1:x2, :] = self.fill_in + + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(n_holes={self.n_holes}, ' + repr_str += (f'cutout_ratio={self.candidates}, ' if self.with_ratio + else f'cutout_shape={self.candidates}, ') + repr_str += f'fill_in={self.fill_in})' + return repr_str diff --git a/mmcv/datasets/pipelines/transforms_3d.py b/mmcv/datasets/pipelines/transforms_3d.py new file mode 100644 index 0000000..aa7ab25 --- /dev/null +++ b/mmcv/datasets/pipelines/transforms_3d.py @@ -0,0 +1,2042 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +from numpy import random +import warnings +from mmcv import is_tuple_of +from mmcv.utils import build_from_cfg +from mmcv.parallel import DataContainer as DC + +from mmcv.core.voxel.voxel_generator import VoxelGenerator +from mmcv.core.bbox.structures.cam_box3d import CameraInstance3DBoxes +from mmcv.core.bbox.structures.depth_box3d import DepthInstance3DBoxes +from mmcv.core.bbox.structures.lidar_box3d import LiDARInstance3DBoxes +from mmcv.core.bbox import box_np_ops +from mmcv.datasets.builder import PIPELINES +from mmcv.datasets.pipelines.transforms import RandomFlip +from mmcv.image import impad, impad_to_multiple, imnormalize, imresize, bgr2hsv, hsv2bgr +from ..builder import OBJECTSAMPLERS +from .data_augment_utils import noise_per_object_v3_ + + +@PIPELINES.register_module() +class RandomDropPointsColor(object): + r"""Randomly set the color of points to all zeros. + + Once this transform is executed, all the points' color will be dropped. + Refer to `PAConv `_ for more details. + + Args: + drop_ratio (float): The probability of dropping point colors. + Defaults to 0.2. + """ + + def __init__(self, drop_ratio=0.2): + assert isinstance(drop_ratio, (int, float)) and 0 <= drop_ratio <= 1, \ + f'invalid drop_ratio value {drop_ratio}' + self.drop_ratio = drop_ratio + + def __call__(self, input_dict): + """Call function to drop point colors. + + Args: + input_dict (dict): Result dict from loading pipeline. + + Returns: + dict: Results after color dropping, \ + 'points' key is updated in the result dict. + """ + points = input_dict['points'] + assert points.attribute_dims is not None and \ + 'color' in points.attribute_dims, \ + 'Expect points have color attribute' + + # this if-expression is a bit strange + # `RandomDropPointsColor` is used in training 3D segmentor PAConv + # we discovered in our experiments that, using + # `if np.random.rand() > 1.0 - self.drop_ratio` consistently leads to + # better results than using `if np.random.rand() < self.drop_ratio` + # so we keep this hack in our codebase + if np.random.rand() > 1.0 - self.drop_ratio: + points.color = points.color * 0.0 + return input_dict + + def __repr__(self): + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + repr_str += f'(drop_ratio={self.drop_ratio})' + return repr_str + + +@PIPELINES.register_module() +class RandomFlip3D(RandomFlip): + """Flip the points & bbox. + + If the input dict contains the key "flip", then the flag will be used, + otherwise it will be randomly decided by a ratio specified in the init + method. + + Args: + sync_2d (bool, optional): Whether to apply flip according to the 2D + images. If True, it will apply the same flip as that to 2D images. + If False, it will decide whether to flip randomly and independently + to that of 2D images. Defaults to True. + flip_ratio_bev_horizontal (float, optional): The flipping probability + in horizontal direction. Defaults to 0.0. + flip_ratio_bev_vertical (float, optional): The flipping probability + in vertical direction. Defaults to 0.0. + """ + + def __init__(self, + sync_2d=True, + flip_ratio_bev_horizontal=0.0, + flip_ratio_bev_vertical=0.0, + **kwargs): + super(RandomFlip3D, self).__init__( + flip_ratio=flip_ratio_bev_horizontal, **kwargs) + self.sync_2d = sync_2d + self.flip_ratio_bev_vertical = flip_ratio_bev_vertical + if flip_ratio_bev_horizontal is not None: + assert isinstance( + flip_ratio_bev_horizontal, + (int, float)) and 0 <= flip_ratio_bev_horizontal <= 1 + if flip_ratio_bev_vertical is not None: + assert isinstance( + flip_ratio_bev_vertical, + (int, float)) and 0 <= flip_ratio_bev_vertical <= 1 + + def random_flip_data_3d(self, input_dict, direction='horizontal'): + """Flip 3D data randomly. + + Args: + input_dict (dict): Result dict from loading pipeline. + direction (str): Flip direction. Default: horizontal. + + Returns: + dict: Flipped results, 'points', 'bbox3d_fields' keys are \ + updated in the result dict. + """ + assert direction in ['horizontal', 'vertical'] + if len(input_dict['bbox3d_fields']) == 0: # test mode + input_dict['bbox3d_fields'].append('empty_box3d') + input_dict['empty_box3d'] = input_dict['box_type_3d']( + np.array([], dtype=np.float32)) + assert len(input_dict['bbox3d_fields']) == 1 + for key in input_dict['bbox3d_fields']: + if 'points' in input_dict: + input_dict['points'] = input_dict[key].flip( + direction, points=input_dict['points']) + else: + input_dict[key].flip(direction) + if 'centers2d' in input_dict: + assert self.sync_2d is True and direction == 'horizontal', \ + 'Only support sync_2d=True and horizontal flip with images' + w = input_dict['ori_shape'][1] + input_dict['centers2d'][..., 0] = \ + w - input_dict['centers2d'][..., 0] + # need to modify the horizontal position of camera center + # along u-axis in the image (flip like centers2d) + # ['cam2img'][0][2] = c_u + # see more details and examples at + # https://github.com/open-mmlab/mmcvection3d/pull/744 + input_dict['cam2img'][0][2] = w - input_dict['cam2img'][0][2] + + def __call__(self, input_dict): + """Call function to flip points, values in the ``bbox3d_fields`` and \ + also flip 2D image and its annotations. + + Args: + input_dict (dict): Result dict from loading pipeline. + + Returns: + dict: Flipped results, 'flip', 'flip_direction', \ + 'pcd_horizontal_flip' and 'pcd_vertical_flip' keys are added \ + into result dict. + """ + # filp 2D image and its annotations + super(RandomFlip3D, self).__call__(input_dict) + + if self.sync_2d: + input_dict['pcd_horizontal_flip'] = input_dict['flip'] + input_dict['pcd_vertical_flip'] = False + else: + if 'pcd_horizontal_flip' not in input_dict: + flip_horizontal = True if np.random.rand( + ) < self.flip_ratio else False + input_dict['pcd_horizontal_flip'] = flip_horizontal + if 'pcd_vertical_flip' not in input_dict: + flip_vertical = True if np.random.rand( + ) < self.flip_ratio_bev_vertical else False + input_dict['pcd_vertical_flip'] = flip_vertical + + if 'transformation_3d_flow' not in input_dict: + input_dict['transformation_3d_flow'] = [] + + if input_dict['pcd_horizontal_flip']: + self.random_flip_data_3d(input_dict, 'horizontal') + input_dict['transformation_3d_flow'].extend(['HF']) + if input_dict['pcd_vertical_flip']: + self.random_flip_data_3d(input_dict, 'vertical') + input_dict['transformation_3d_flow'].extend(['VF']) + return input_dict + + def __repr__(self): + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + repr_str += f'(sync_2d={self.sync_2d},' + repr_str += f' flip_ratio_bev_vertical={self.flip_ratio_bev_vertical})' + return repr_str + + +@PIPELINES.register_module() +class RandomJitterPoints(object): + """Randomly jitter point coordinates. + + Different from the global translation in ``GlobalRotScaleTrans``, here we \ + apply different noises to each point in a scene. + + Args: + jitter_std (list[float]): The standard deviation of jittering noise. + This applies random noise to all points in a 3D scene, which is \ + sampled from a gaussian distribution whose standard deviation is \ + set by ``jitter_std``. Defaults to [0.01, 0.01, 0.01] + clip_range (list[float] | None): Clip the randomly generated jitter \ + noise into this range. If None is given, don't perform clipping. + Defaults to [-0.05, 0.05] + + Note: + This transform should only be used in point cloud segmentation tasks \ + because we don't transform ground-truth bboxes accordingly. + For similar transform in detection task, please refer to `ObjectNoise`. + """ + + def __init__(self, + jitter_std=[0.01, 0.01, 0.01], + clip_range=[-0.05, 0.05]): + seq_types = (list, tuple, np.ndarray) + if not isinstance(jitter_std, seq_types): + assert isinstance(jitter_std, (int, float)), \ + f'unsupported jitter_std type {type(jitter_std)}' + jitter_std = [jitter_std, jitter_std, jitter_std] + self.jitter_std = jitter_std + + if clip_range is not None: + if not isinstance(clip_range, seq_types): + assert isinstance(clip_range, (int, float)), \ + f'unsupported clip_range type {type(clip_range)}' + clip_range = [-clip_range, clip_range] + self.clip_range = clip_range + + def __call__(self, input_dict): + """Call function to jitter all the points in the scene. + + Args: + input_dict (dict): Result dict from loading pipeline. + + Returns: + dict: Results after adding noise to each point, \ + 'points' key is updated in the result dict. + """ + points = input_dict['points'] + jitter_std = np.array(self.jitter_std, dtype=np.float32) + jitter_noise = \ + np.random.randn(points.shape[0], 3) * jitter_std[None, :] + if self.clip_range is not None: + jitter_noise = np.clip(jitter_noise, self.clip_range[0], + self.clip_range[1]) + + points.translate(jitter_noise) + return input_dict + + def __repr__(self): + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + repr_str += f'(jitter_std={self.jitter_std},' + repr_str += f' clip_range={self.clip_range})' + return repr_str + + +@PIPELINES.register_module() +class ObjectSample(object): + """Sample GT objects to the data. + + Args: + db_sampler (dict): Config dict of the database sampler. + sample_2d (bool): Whether to also paste 2D image patch to the images + This should be true when applying multi-modality cut-and-paste. + Defaults to False. + """ + + def __init__(self, db_sampler, sample_2d=False): + self.sampler_cfg = db_sampler + self.sample_2d = sample_2d + if 'type' not in db_sampler.keys(): + db_sampler['type'] = 'DataBaseSampler' + self.db_sampler = build_from_cfg(db_sampler, OBJECTSAMPLERS) + + @staticmethod + def remove_points_in_boxes(points, boxes): + """Remove the points in the sampled bounding boxes. + + Args: + points (:obj:`BasePoints`): Input point cloud array. + boxes (np.ndarray): Sampled ground truth boxes. + + Returns: + np.ndarray: Points with those in the boxes removed. + """ + masks = box_np_ops.points_in_rbbox(points.coord.numpy(), boxes) + points = points[np.logical_not(masks.any(-1))] + return points + + def __call__(self, input_dict): + """Call function to sample ground truth objects to the data. + + Args: + input_dict (dict): Result dict from loading pipeline. + + Returns: + dict: Results after object sampling augmentation, \ + 'points', 'gt_bboxes_3d', 'gt_labels_3d' keys are updated \ + in the result dict. + """ + gt_bboxes_3d = input_dict['gt_bboxes_3d'] + gt_labels_3d = input_dict['gt_labels_3d'] + + # change to float for blending operation + points = input_dict['points'] + if self.sample_2d: + img = input_dict['img'] + gt_bboxes_2d = input_dict['gt_bboxes'] + # Assume for now 3D & 2D bboxes are the same + sampled_dict = self.db_sampler.sample_all( + gt_bboxes_3d.tensor.numpy(), + gt_labels_3d, + gt_bboxes_2d=gt_bboxes_2d, + img=img) + else: + sampled_dict = self.db_sampler.sample_all( + gt_bboxes_3d.tensor.numpy(), gt_labels_3d, img=None) + + if sampled_dict is not None: + sampled_gt_bboxes_3d = sampled_dict['gt_bboxes_3d'] + sampled_points = sampled_dict['points'] + sampled_gt_labels = sampled_dict['gt_labels_3d'] + + gt_labels_3d = np.concatenate([gt_labels_3d, sampled_gt_labels], + axis=0) + gt_bboxes_3d = gt_bboxes_3d.new_box( + np.concatenate( + [gt_bboxes_3d.tensor.numpy(), sampled_gt_bboxes_3d])) + + points = self.remove_points_in_boxes(points, sampled_gt_bboxes_3d) + # check the points dimension + points = points.cat([sampled_points, points]) + + if self.sample_2d: + sampled_gt_bboxes_2d = sampled_dict['gt_bboxes_2d'] + gt_bboxes_2d = np.concatenate( + [gt_bboxes_2d, sampled_gt_bboxes_2d]).astype(np.float32) + + input_dict['gt_bboxes'] = gt_bboxes_2d + input_dict['img'] = sampled_dict['img'] + + input_dict['gt_bboxes_3d'] = gt_bboxes_3d + input_dict['gt_labels_3d'] = gt_labels_3d.astype(np.long) + input_dict['points'] = points + + return input_dict + + def __repr__(self): + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + repr_str += f' sample_2d={self.sample_2d},' + repr_str += f' data_root={self.sampler_cfg.data_root},' + repr_str += f' info_path={self.sampler_cfg.info_path},' + repr_str += f' rate={self.sampler_cfg.rate},' + repr_str += f' prepare={self.sampler_cfg.prepare},' + repr_str += f' classes={self.sampler_cfg.classes},' + repr_str += f' sample_groups={self.sampler_cfg.sample_groups}' + return repr_str + + +@PIPELINES.register_module() +class ObjectNoise(object): + """Apply noise to each GT objects in the scene. + + Args: + translation_std (list[float], optional): Standard deviation of the + distribution where translation noise are sampled from. + Defaults to [0.25, 0.25, 0.25]. + global_rot_range (list[float], optional): Global rotation to the scene. + Defaults to [0.0, 0.0]. + rot_range (list[float], optional): Object rotation range. + Defaults to [-0.15707963267, 0.15707963267]. + num_try (int, optional): Number of times to try if the noise applied is + invalid. Defaults to 100. + """ + + def __init__(self, + translation_std=[0.25, 0.25, 0.25], + global_rot_range=[0.0, 0.0], + rot_range=[-0.15707963267, 0.15707963267], + num_try=100): + self.translation_std = translation_std + self.global_rot_range = global_rot_range + self.rot_range = rot_range + self.num_try = num_try + + def __call__(self, input_dict): + """Call function to apply noise to each ground truth in the scene. + + Args: + input_dict (dict): Result dict from loading pipeline. + + Returns: + dict: Results after adding noise to each object, \ + 'points', 'gt_bboxes_3d' keys are updated in the result dict. + """ + gt_bboxes_3d = input_dict['gt_bboxes_3d'] + points = input_dict['points'] + + # TODO: check this inplace function + numpy_box = gt_bboxes_3d.tensor.numpy() + numpy_points = points.tensor.numpy() + + noise_per_object_v3_( + numpy_box, + numpy_points, + rotation_perturb=self.rot_range, + center_noise_std=self.translation_std, + global_random_rot_range=self.global_rot_range, + num_try=self.num_try) + + input_dict['gt_bboxes_3d'] = gt_bboxes_3d.new_box(numpy_box) + input_dict['points'] = points.new_point(numpy_points) + return input_dict + + def __repr__(self): + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + repr_str += f'(num_try={self.num_try},' + repr_str += f' translation_std={self.translation_std},' + repr_str += f' global_rot_range={self.global_rot_range},' + repr_str += f' rot_range={self.rot_range})' + return repr_str + + +@PIPELINES.register_module() +class GlobalAlignment(object): + """Apply global alignment to 3D scene points by rotation and translation. + + Args: + rotation_axis (int): Rotation axis for points and bboxes rotation. + + Note: + We do not record the applied rotation and translation as in \ + GlobalRotScaleTrans. Because usually, we do not need to reverse \ + the alignment step. + For example, ScanNet 3D detection task uses aligned ground-truth \ + bounding boxes for evaluation. + """ + + def __init__(self, rotation_axis): + self.rotation_axis = rotation_axis + + def _trans_points(self, input_dict, trans_factor): + """Private function to translate points. + + Args: + input_dict (dict): Result dict from loading pipeline. + trans_factor (np.ndarray): Translation vector to be applied. + + Returns: + dict: Results after translation, 'points' is updated in the dict. + """ + input_dict['points'].translate(trans_factor) + + def _rot_points(self, input_dict, rot_mat): + """Private function to rotate bounding boxes and points. + + Args: + input_dict (dict): Result dict from loading pipeline. + rot_mat (np.ndarray): Rotation matrix to be applied. + + Returns: + dict: Results after rotation, 'points' is updated in the dict. + """ + # input should be rot_mat_T so I transpose it here + input_dict['points'].rotate(rot_mat.T) + + def _check_rot_mat(self, rot_mat): + """Check if rotation matrix is valid for self.rotation_axis. + + Args: + rot_mat (np.ndarray): Rotation matrix to be applied. + """ + is_valid = np.allclose(np.linalg.det(rot_mat), 1.0) + valid_array = np.zeros(3) + valid_array[self.rotation_axis] = 1.0 + is_valid &= (rot_mat[self.rotation_axis, :] == valid_array).all() + is_valid &= (rot_mat[:, self.rotation_axis] == valid_array).all() + assert is_valid, f'invalid rotation matrix {rot_mat}' + + def __call__(self, input_dict): + """Call function to shuffle points. + + Args: + input_dict (dict): Result dict from loading pipeline. + + Returns: + dict: Results after global alignment, 'points' and keys in \ + input_dict['bbox3d_fields'] are updated in the result dict. + """ + assert 'axis_align_matrix' in input_dict['ann_info'].keys(), \ + 'axis_align_matrix is not provided in GlobalAlignment' + + axis_align_matrix = input_dict['ann_info']['axis_align_matrix'] + assert axis_align_matrix.shape == (4, 4), \ + f'invalid shape {axis_align_matrix.shape} for axis_align_matrix' + rot_mat = axis_align_matrix[:3, :3] + trans_vec = axis_align_matrix[:3, -1] + + self._check_rot_mat(rot_mat) + self._rot_points(input_dict, rot_mat) + self._trans_points(input_dict, trans_vec) + + return input_dict + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(rotation_axis={self.rotation_axis})' + return repr_str + + +@PIPELINES.register_module() +class GlobalRotScaleTrans(object): + """Apply global rotation, scaling and translation to a 3D scene. + + Args: + rot_range (list[float]): Range of rotation angle. + Defaults to [-0.78539816, 0.78539816] (close to [-pi/4, pi/4]). + scale_ratio_range (list[float]): Range of scale ratio. + Defaults to [0.95, 1.05]. + translation_std (list[float]): The standard deviation of translation + noise. This applies random translation to a scene by a noise, which + is sampled from a gaussian distribution whose standard deviation + is set by ``translation_std``. Defaults to [0, 0, 0] + shift_height (bool): Whether to shift height. + (the fourth dimension of indoor points) when scaling. + Defaults to False. + """ + + def __init__(self, + rot_range=[-0.78539816, 0.78539816], + scale_ratio_range=[0.95, 1.05], + translation_std=[0, 0, 0], + shift_height=False): + seq_types = (list, tuple, np.ndarray) + if not isinstance(rot_range, seq_types): + assert isinstance(rot_range, (int, float)), \ + f'unsupported rot_range type {type(rot_range)}' + rot_range = [-rot_range, rot_range] + self.rot_range = rot_range + + assert isinstance(scale_ratio_range, seq_types), \ + f'unsupported scale_ratio_range type {type(scale_ratio_range)}' + self.scale_ratio_range = scale_ratio_range + + if not isinstance(translation_std, seq_types): + assert isinstance(translation_std, (int, float)), \ + f'unsupported translation_std type {type(translation_std)}' + translation_std = [ + translation_std, translation_std, translation_std + ] + assert all([std >= 0 for std in translation_std]), \ + 'translation_std should be positive' + self.translation_std = translation_std + self.shift_height = shift_height + + def _trans_bbox_points(self, input_dict): + """Private function to translate bounding boxes and points. + + Args: + input_dict (dict): Result dict from loading pipeline. + + Returns: + dict: Results after translation, 'points', 'pcd_trans' \ + and keys in input_dict['bbox3d_fields'] are updated \ + in the result dict. + """ + translation_std = np.array(self.translation_std, dtype=np.float32) + trans_factor = np.random.normal(scale=translation_std, size=3).T + + input_dict['points'].translate(trans_factor) + input_dict['pcd_trans'] = trans_factor + for key in input_dict['bbox3d_fields']: + input_dict[key].translate(trans_factor) + + def _rot_bbox_points(self, input_dict): + """Private function to rotate bounding boxes and points. + + Args: + input_dict (dict): Result dict from loading pipeline. + + Returns: + dict: Results after rotation, 'points', 'pcd_rotation' \ + and keys in input_dict['bbox3d_fields'] are updated \ + in the result dict. + """ + rotation = self.rot_range + noise_rotation = np.random.uniform(rotation[0], rotation[1]) + + # if no bbox in input_dict, only rotate points + if len(input_dict['bbox3d_fields']) == 0: + rot_mat_T = input_dict['points'].rotate(noise_rotation) + input_dict['pcd_rotation'] = rot_mat_T + return + + # rotate points with bboxes + for key in input_dict['bbox3d_fields']: + if len(input_dict[key].tensor) != 0: + points, rot_mat_T = input_dict[key].rotate( + noise_rotation, input_dict['points']) + input_dict['points'] = points + input_dict['pcd_rotation'] = rot_mat_T + + def _scale_bbox_points(self, input_dict): + """Private function to scale bounding boxes and points. + + Args: + input_dict (dict): Result dict from loading pipeline. + + Returns: + dict: Results after scaling, 'points'and keys in \ + input_dict['bbox3d_fields'] are updated in the result dict. + """ + scale = input_dict['pcd_scale_factor'] + points = input_dict['points'] + points.scale(scale) + if self.shift_height: + assert 'height' in points.attribute_dims.keys(), \ + 'setting shift_height=True but points have no height attribute' + points.tensor[:, points.attribute_dims['height']] *= scale + input_dict['points'] = points + + for key in input_dict['bbox3d_fields']: + input_dict[key].scale(scale) + + def _random_scale(self, input_dict): + """Private function to randomly set the scale factor. + + Args: + input_dict (dict): Result dict from loading pipeline. + + Returns: + dict: Results after scaling, 'pcd_scale_factor' are updated \ + in the result dict. + """ + scale_factor = np.random.uniform(self.scale_ratio_range[0], + self.scale_ratio_range[1]) + input_dict['pcd_scale_factor'] = scale_factor + + def __call__(self, input_dict): + """Private function to rotate, scale and translate bounding boxes and \ + points. + + Args: + input_dict (dict): Result dict from loading pipeline. + + Returns: + dict: Results after scaling, 'points', 'pcd_rotation', + 'pcd_scale_factor', 'pcd_trans' and keys in \ + input_dict['bbox3d_fields'] are updated in the result dict. + """ + if 'transformation_3d_flow' not in input_dict: + input_dict['transformation_3d_flow'] = [] + + self._rot_bbox_points(input_dict) + + if 'pcd_scale_factor' not in input_dict: + self._random_scale(input_dict) + self._scale_bbox_points(input_dict) + + self._trans_bbox_points(input_dict) + + input_dict['transformation_3d_flow'].extend(['R', 'S', 'T']) + return input_dict + + def __repr__(self): + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + repr_str += f'(rot_range={self.rot_range},' + repr_str += f' scale_ratio_range={self.scale_ratio_range},' + repr_str += f' translation_std={self.translation_std},' + repr_str += f' shift_height={self.shift_height})' + return repr_str + + +@PIPELINES.register_module() +class PointShuffle(object): + """Shuffle input points.""" + + def __call__(self, input_dict): + """Call function to shuffle points. + + Args: + input_dict (dict): Result dict from loading pipeline. + + Returns: + dict: Results after filtering, 'points', 'pts_instance_mask' \ + and 'pts_semantic_mask' keys are updated in the result dict. + """ + idx = input_dict['points'].shuffle() + idx = idx.numpy() + + pts_instance_mask = input_dict.get('pts_instance_mask', None) + pts_semantic_mask = input_dict.get('pts_semantic_mask', None) + + if pts_instance_mask is not None: + input_dict['pts_instance_mask'] = pts_instance_mask[idx] + + if pts_semantic_mask is not None: + input_dict['pts_semantic_mask'] = pts_semantic_mask[idx] + + return input_dict + + def __repr__(self): + return self.__class__.__name__ + + +@PIPELINES.register_module() +class ObjectRangeFilter(object): + """Filter objects by the range. + + Args: + point_cloud_range (list[float]): Point cloud range. + """ + + def __init__(self, point_cloud_range): + self.pcd_range = np.array(point_cloud_range, dtype=np.float32) + + def __call__(self, input_dict): + """Call function to filter objects by the range. + + Args: + input_dict (dict): Result dict from loading pipeline. + + Returns: + dict: Results after filtering, 'gt_bboxes_3d', 'gt_labels_3d' \ + keys are updated in the result dict. + """ + # Check points instance type and initialise bev_range + if isinstance(input_dict['gt_bboxes_3d'], + (LiDARInstance3DBoxes, DepthInstance3DBoxes)): + bev_range = self.pcd_range[[0, 1, 3, 4]] + elif isinstance(input_dict['gt_bboxes_3d'], CameraInstance3DBoxes): + bev_range = self.pcd_range[[0, 2, 3, 5]] + + gt_bboxes_3d = input_dict['gt_bboxes_3d'] + gt_labels_3d = input_dict['gt_labels_3d'] + mask = gt_bboxes_3d.in_range_bev(bev_range) + gt_bboxes_3d = gt_bboxes_3d[mask] + # mask is a torch tensor but gt_labels_3d is still numpy array + # using mask to index gt_labels_3d will cause bug when + # len(gt_labels_3d) == 1, where mask=1 will be interpreted + # as gt_labels_3d[1] and cause out of index error + gt_labels_3d = gt_labels_3d[mask.numpy().astype(np.bool)] + + # limit rad to [-pi, pi] + gt_bboxes_3d.limit_yaw(offset=0.5, period=2 * np.pi) + input_dict['gt_bboxes_3d'] = gt_bboxes_3d + input_dict['gt_labels_3d'] = gt_labels_3d + + return input_dict + + def __repr__(self): + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + repr_str += f'(point_cloud_range={self.pcd_range.tolist()})' + return repr_str + + +@PIPELINES.register_module() +class PointsRangeFilter(object): + """Filter points by the range. + + Args: + point_cloud_range (list[float]): Point cloud range. + """ + + def __init__(self, point_cloud_range): + self.pcd_range = np.array(point_cloud_range, dtype=np.float32) + + def __call__(self, input_dict): + """Call function to filter points by the range. + + Args: + input_dict (dict): Result dict from loading pipeline. + + Returns: + dict: Results after filtering, 'points', 'pts_instance_mask' \ + and 'pts_semantic_mask' keys are updated in the result dict. + """ + points = input_dict['points'] + points_mask = points.in_range_3d(self.pcd_range) + clean_points = points[points_mask] + input_dict['points'] = clean_points + points_mask = points_mask.numpy() + + pts_instance_mask = input_dict.get('pts_instance_mask', None) + pts_semantic_mask = input_dict.get('pts_semantic_mask', None) + + if pts_instance_mask is not None: + input_dict['pts_instance_mask'] = pts_instance_mask[points_mask] + + if pts_semantic_mask is not None: + input_dict['pts_semantic_mask'] = pts_semantic_mask[points_mask] + + return input_dict + + def __repr__(self): + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + repr_str += f'(point_cloud_range={self.pcd_range.tolist()})' + return repr_str + + +@PIPELINES.register_module() +class ObjectNameFilter(object): + """Filter GT objects by their names. + + Args: + classes (list[str]): List of class names to be kept for training. + """ + + def __init__(self, classes): + self.classes = classes + self.labels = list(range(len(self.classes))) + + def __call__(self, input_dict): + """Call function to filter objects by their names. + + Args: + input_dict (dict): Result dict from loading pipeline. + + Returns: + dict: Results after filtering, 'gt_bboxes_3d', 'gt_labels_3d' \ + keys are updated in the result dict. + """ + gt_labels_3d = input_dict['gt_labels_3d'] + gt_bboxes_mask = np.array([n in self.labels for n in gt_labels_3d], + dtype=np.bool_) + input_dict['gt_bboxes_3d'] = input_dict['gt_bboxes_3d'][gt_bboxes_mask] + input_dict['gt_labels_3d'] = input_dict['gt_labels_3d'][gt_bboxes_mask] + + return input_dict + + def __repr__(self): + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + repr_str += f'(classes={self.classes})' + return repr_str + + +@PIPELINES.register_module() +class PointSample(object): + """Point sample. + + Sampling data to a certain number. + + Args: + num_points (int): Number of points to be sampled. + sample_range (float, optional): The range where to sample points. + If not None, the points with depth larger than `sample_range` are + prior to be sampled. Defaults to None. + replace (bool, optional): Whether the sampling is with or without + replacement. Defaults to False. + """ + + def __init__(self, num_points, sample_range=None, replace=False): + self.num_points = num_points + self.sample_range = sample_range + self.replace = replace + + def _points_random_sampling(self, + points, + num_samples, + sample_range=None, + replace=False, + return_choices=False): + """Points random sampling. + + Sample points to a certain number. + + Args: + points (np.ndarray | :obj:`BasePoints`): 3D Points. + num_samples (int): Number of samples to be sampled. + sample_range (float, optional): Indicating the range where the + points will be sampled. Defaults to None. + replace (bool, optional): Sampling with or without replacement. + Defaults to None. + return_choices (bool, optional): Whether return choice. + Defaults to False. + Returns: + tuple[np.ndarray] | np.ndarray: + - points (np.ndarray | :obj:`BasePoints`): 3D Points. + - choices (np.ndarray, optional): The generated random samples. + """ + if not replace: + replace = (points.shape[0] < num_samples) + point_range = range(len(points)) + if sample_range is not None and not replace: + # Only sampling the near points when len(points) >= num_samples + depth = np.linalg.norm(points.tensor, axis=1) + far_inds = np.where(depth > sample_range)[0] + near_inds = np.where(depth <= sample_range)[0] + # in case there are too many far points + if len(far_inds) > num_samples: + far_inds = np.random.choice( + far_inds, num_samples, replace=False) + point_range = near_inds + num_samples -= len(far_inds) + choices = np.random.choice(point_range, num_samples, replace=replace) + if sample_range is not None and not replace: + choices = np.concatenate((far_inds, choices)) + # Shuffle points after sampling + np.random.shuffle(choices) + if return_choices: + return points[choices], choices + else: + return points[choices] + + def __call__(self, results): + """Call function to sample points to in indoor scenes. + + Args: + input_dict (dict): Result dict from loading pipeline. + Returns: + dict: Results after sampling, 'points', 'pts_instance_mask' \ + and 'pts_semantic_mask' keys are updated in the result dict. + """ + points = results['points'] + # Points in Camera coord can provide the depth information. + # TODO: Need to suport distance-based sampling for other coord system. + if self.sample_range is not None: + from mmcv.core.points import CameraPoints + assert isinstance(points, CameraPoints), \ + 'Sampling based on distance is only appliable for CAMERA coord' + points, choices = self._points_random_sampling( + points, + self.num_points, + self.sample_range, + self.replace, + return_choices=True) + results['points'] = points + + pts_instance_mask = results.get('pts_instance_mask', None) + pts_semantic_mask = results.get('pts_semantic_mask', None) + + if pts_instance_mask is not None: + pts_instance_mask = pts_instance_mask[choices] + results['pts_instance_mask'] = pts_instance_mask + + if pts_semantic_mask is not None: + pts_semantic_mask = pts_semantic_mask[choices] + results['pts_semantic_mask'] = pts_semantic_mask + + return results + + def __repr__(self): + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + repr_str += f'(num_points={self.num_points},' + repr_str += f' sample_range={self.sample_range},' + repr_str += f' replace={self.replace})' + + return repr_str + + +@PIPELINES.register_module() +class IndoorPointSample(PointSample): + """Indoor point sample. + + Sampling data to a certain number. + NOTE: IndoorPointSample is deprecated in favor of PointSample + + Args: + num_points (int): Number of points to be sampled. + """ + + def __init__(self, *args, **kwargs): + warnings.warn( + 'IndoorPointSample is deprecated in favor of PointSample') + super(IndoorPointSample, self).__init__(*args, **kwargs) + + +@PIPELINES.register_module() +class IndoorPatchPointSample(object): + r"""Indoor point sample within a patch. Modified from `PointNet++ `_. + + Sampling data to a certain number for semantic segmentation. + + Args: + num_points (int): Number of points to be sampled. + block_size (float, optional): Size of a block to sample points from. + Defaults to 1.5. + sample_rate (float, optional): Stride used in sliding patch generation. + This parameter is unused in `IndoorPatchPointSample` and thus has + been deprecated. We plan to remove it in the future. + Defaults to None. + ignore_index (int, optional): Label index that won't be used for the + segmentation task. This is set in PointSegClassMapping as neg_cls. + If not None, will be used as a patch selection criterion. + Defaults to None. + use_normalized_coord (bool, optional): Whether to use normalized xyz as + additional features. Defaults to False. + num_try (int, optional): Number of times to try if the patch selected + is invalid. Defaults to 10. + enlarge_size (float | None, optional): Enlarge the sampled patch to + [-block_size / 2 - enlarge_size, block_size / 2 + enlarge_size] as + an augmentation. If None, set it as 0. Defaults to 0.2. + min_unique_num (int | None, optional): Minimum number of unique points + the sampled patch should contain. If None, use PointNet++'s method + to judge uniqueness. Defaults to None. + eps (float, optional): A value added to patch boundary to guarantee + points coverage. Defaults to 1e-2. + + Note: + This transform should only be used in the training process of point + cloud segmentation tasks. For the sliding patch generation and + inference process in testing, please refer to the `slide_inference` + function of `EncoderDecoder3D` class. + """ + + def __init__(self, + num_points, + block_size=1.5, + sample_rate=None, + ignore_index=None, + use_normalized_coord=False, + num_try=10, + enlarge_size=0.2, + min_unique_num=None, + eps=1e-2): + self.num_points = num_points + self.block_size = block_size + self.ignore_index = ignore_index + self.use_normalized_coord = use_normalized_coord + self.num_try = num_try + self.enlarge_size = enlarge_size if enlarge_size is not None else 0.0 + self.min_unique_num = min_unique_num + self.eps = eps + + if sample_rate is not None: + warnings.warn( + "'sample_rate' has been deprecated and will be removed in " + 'the future. Please remove them from your code.') + + def _input_generation(self, coords, patch_center, coord_max, attributes, + attribute_dims, point_type): + """Generating model input. + + Generate input by subtracting patch center and adding additional \ + features. Currently support colors and normalized xyz as features. + + Args: + coords (np.ndarray): Sampled 3D Points. + patch_center (np.ndarray): Center coordinate of the selected patch. + coord_max (np.ndarray): Max coordinate of all 3D Points. + attributes (np.ndarray): features of input points. + attribute_dims (dict): Dictionary to indicate the meaning of extra + dimension. + point_type (type): class of input points inherited from BasePoints. + + Returns: + :obj:`BasePoints`: The generated input data. + """ + # subtract patch center, the z dimension is not centered + centered_coords = coords.copy() + centered_coords[:, 0] -= patch_center[0] + centered_coords[:, 1] -= patch_center[1] + + if self.use_normalized_coord: + normalized_coord = coords / coord_max + attributes = np.concatenate([attributes, normalized_coord], axis=1) + if attribute_dims is None: + attribute_dims = dict() + attribute_dims.update( + dict(normalized_coord=[ + attributes.shape[1], attributes.shape[1] + + 1, attributes.shape[1] + 2 + ])) + + points = np.concatenate([centered_coords, attributes], axis=1) + points = point_type( + points, points_dim=points.shape[1], attribute_dims=attribute_dims) + + return points + + def _patch_points_sampling(self, points, sem_mask): + """Patch points sampling. + + First sample a valid patch. + Then sample points within that patch to a certain number. + + Args: + points (:obj:`BasePoints`): 3D Points. + sem_mask (np.ndarray): semantic segmentation mask for input points. + + Returns: + tuple[:obj:`BasePoints`, np.ndarray] | :obj:`BasePoints`: + + - points (:obj:`BasePoints`): 3D Points. + - choices (np.ndarray): The generated random samples. + """ + coords = points.coord.numpy() + attributes = points.tensor[:, 3:].numpy() + attribute_dims = points.attribute_dims + point_type = type(points) + + coord_max = np.amax(coords, axis=0) + coord_min = np.amin(coords, axis=0) + + for _ in range(self.num_try): + # random sample a point as patch center + cur_center = coords[np.random.choice(coords.shape[0])] + + # boundary of a patch, which would be enlarged by + # `self.enlarge_size` as an augmentation + cur_max = cur_center + np.array( + [self.block_size / 2.0, self.block_size / 2.0, 0.0]) + cur_min = cur_center - np.array( + [self.block_size / 2.0, self.block_size / 2.0, 0.0]) + cur_max[2] = coord_max[2] + cur_min[2] = coord_min[2] + cur_choice = np.sum( + (coords >= (cur_min - self.enlarge_size)) * + (coords <= (cur_max + self.enlarge_size)), + axis=1) == 3 + + if not cur_choice.any(): # no points in this patch + continue + + cur_coords = coords[cur_choice, :] + cur_sem_mask = sem_mask[cur_choice] + point_idxs = np.where(cur_choice)[0] + mask = np.sum( + (cur_coords >= (cur_min - self.eps)) * (cur_coords <= + (cur_max + self.eps)), + axis=1) == 3 + + # two criteria for patch sampling, adopted from PointNet++ + # 1. selected patch should contain enough unique points + if self.min_unique_num is None: + # use PointNet++'s method as default + # [31, 31, 62] are just some big values used to transform + # coords from 3d array to 1d and then check their uniqueness + # this is used in all the ScanNet code following PointNet++ + vidx = np.ceil( + (cur_coords[mask, :] - cur_min) / (cur_max - cur_min) * + np.array([31.0, 31.0, 62.0])) + vidx = np.unique(vidx[:, 0] * 31.0 * 62.0 + vidx[:, 1] * 62.0 + + vidx[:, 2]) + flag1 = len(vidx) / 31.0 / 31.0 / 62.0 >= 0.02 + else: + # if `min_unique_num` is provided, directly compare with it + flag1 = mask.sum() >= self.min_unique_num + + # 2. selected patch should contain enough annotated points + if self.ignore_index is None: + flag2 = True + else: + flag2 = np.sum(cur_sem_mask != self.ignore_index) / \ + len(cur_sem_mask) >= 0.7 + + if flag1 and flag2: + break + + # sample idx to `self.num_points` + if point_idxs.size >= self.num_points: + # no duplicate in sub-sampling + choices = np.random.choice( + point_idxs, self.num_points, replace=False) + else: + # do not use random choice here to avoid some points not counted + dup = np.random.choice(point_idxs.size, + self.num_points - point_idxs.size) + idx_dup = np.concatenate( + [np.arange(point_idxs.size), + np.array(dup)], 0) + choices = point_idxs[idx_dup] + + # construct model input + points = self._input_generation(coords[choices], cur_center, coord_max, + attributes[choices], attribute_dims, + point_type) + + return points, choices + + def __call__(self, results): + """Call function to sample points to in indoor scenes. + + Args: + input_dict (dict): Result dict from loading pipeline. + + Returns: + dict: Results after sampling, 'points', 'pts_instance_mask' \ + and 'pts_semantic_mask' keys are updated in the result dict. + """ + points = results['points'] + + assert 'pts_semantic_mask' in results.keys(), \ + 'semantic mask should be provided in training and evaluation' + pts_semantic_mask = results['pts_semantic_mask'] + + points, choices = self._patch_points_sampling(points, + pts_semantic_mask) + + results['points'] = points + results['pts_semantic_mask'] = pts_semantic_mask[choices] + pts_instance_mask = results.get('pts_instance_mask', None) + if pts_instance_mask is not None: + results['pts_instance_mask'] = pts_instance_mask[choices] + + return results + + def __repr__(self): + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + repr_str += f'(num_points={self.num_points},' + repr_str += f' block_size={self.block_size},' + repr_str += f' ignore_index={self.ignore_index},' + repr_str += f' use_normalized_coord={self.use_normalized_coord},' + repr_str += f' num_try={self.num_try},' + repr_str += f' enlarge_size={self.enlarge_size},' + repr_str += f' min_unique_num={self.min_unique_num},' + repr_str += f' eps={self.eps})' + return repr_str + + +@PIPELINES.register_module() +class BackgroundPointsFilter(object): + """Filter background points near the bounding box. + + Args: + bbox_enlarge_range (tuple[float], float): Bbox enlarge range. + """ + + def __init__(self, bbox_enlarge_range): + assert (is_tuple_of(bbox_enlarge_range, float) + and len(bbox_enlarge_range) == 3) \ + or isinstance(bbox_enlarge_range, float), \ + f'Invalid arguments bbox_enlarge_range {bbox_enlarge_range}' + + if isinstance(bbox_enlarge_range, float): + bbox_enlarge_range = [bbox_enlarge_range] * 3 + self.bbox_enlarge_range = np.array( + bbox_enlarge_range, dtype=np.float32)[np.newaxis, :] + + def __call__(self, input_dict): + """Call function to filter points by the range. + + Args: + input_dict (dict): Result dict from loading pipeline. + + Returns: + dict: Results after filtering, 'points', 'pts_instance_mask' \ + and 'pts_semantic_mask' keys are updated in the result dict. + """ + points = input_dict['points'] + gt_bboxes_3d = input_dict['gt_bboxes_3d'] + + # avoid groundtruth being modified + gt_bboxes_3d_np = gt_bboxes_3d.tensor.clone().numpy() + gt_bboxes_3d_np[:, :3] = gt_bboxes_3d.gravity_center.clone().numpy() + + enlarged_gt_bboxes_3d = gt_bboxes_3d_np.copy() + enlarged_gt_bboxes_3d[:, 3:6] += self.bbox_enlarge_range + points_numpy = points.tensor.clone().numpy() + foreground_masks = box_np_ops.points_in_rbbox( + points_numpy, gt_bboxes_3d_np, origin=(0.5, 0.5, 0.5)) + enlarge_foreground_masks = box_np_ops.points_in_rbbox( + points_numpy, enlarged_gt_bboxes_3d, origin=(0.5, 0.5, 0.5)) + foreground_masks = foreground_masks.max(1) + enlarge_foreground_masks = enlarge_foreground_masks.max(1) + valid_masks = ~np.logical_and(~foreground_masks, + enlarge_foreground_masks) + + input_dict['points'] = points[valid_masks] + pts_instance_mask = input_dict.get('pts_instance_mask', None) + if pts_instance_mask is not None: + input_dict['pts_instance_mask'] = pts_instance_mask[valid_masks] + + pts_semantic_mask = input_dict.get('pts_semantic_mask', None) + if pts_semantic_mask is not None: + input_dict['pts_semantic_mask'] = pts_semantic_mask[valid_masks] + return input_dict + + def __repr__(self): + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + repr_str += f'(bbox_enlarge_range={self.bbox_enlarge_range.tolist()})' + return repr_str + + +@PIPELINES.register_module() +class VoxelBasedPointSampler(object): + """Voxel based point sampler. + + Apply voxel sampling to multiple sweep points. + + Args: + cur_sweep_cfg (dict): Config for sampling current points. + prev_sweep_cfg (dict): Config for sampling previous points. + time_dim (int): Index that indicate the time dimention + for input points. + """ + + def __init__(self, cur_sweep_cfg, prev_sweep_cfg=None, time_dim=3): + self.cur_voxel_generator = VoxelGenerator(**cur_sweep_cfg) + self.cur_voxel_num = self.cur_voxel_generator._max_voxels + self.time_dim = time_dim + if prev_sweep_cfg is not None: + assert prev_sweep_cfg['max_num_points'] == \ + cur_sweep_cfg['max_num_points'] + self.prev_voxel_generator = VoxelGenerator(**prev_sweep_cfg) + self.prev_voxel_num = self.prev_voxel_generator._max_voxels + else: + self.prev_voxel_generator = None + self.prev_voxel_num = 0 + + def _sample_points(self, points, sampler, point_dim): + """Sample points for each points subset. + + Args: + points (np.ndarray): Points subset to be sampled. + sampler (VoxelGenerator): Voxel based sampler for + each points subset. + point_dim (int): The dimention of each points + + Returns: + np.ndarray: Sampled points. + """ + voxels, coors, num_points_per_voxel = sampler.generate(points) + if voxels.shape[0] < sampler._max_voxels: + padding_points = np.zeros([ + sampler._max_voxels - voxels.shape[0], sampler._max_num_points, + point_dim + ], + dtype=points.dtype) + padding_points[:] = voxels[0] + sample_points = np.concatenate([voxels, padding_points], axis=0) + else: + sample_points = voxels + + return sample_points + + def __call__(self, results): + """Call function to sample points from multiple sweeps. + + Args: + input_dict (dict): Result dict from loading pipeline. + + Returns: + dict: Results after sampling, 'points', 'pts_instance_mask' \ + and 'pts_semantic_mask' keys are updated in the result dict. + """ + points = results['points'] + original_dim = points.shape[1] + + # TODO: process instance and semantic mask while _max_num_points + # is larger than 1 + # Extend points with seg and mask fields + map_fields2dim = [] + start_dim = original_dim + points_numpy = points.tensor.numpy() + extra_channel = [points_numpy] + for idx, key in enumerate(results['pts_mask_fields']): + map_fields2dim.append((key, idx + start_dim)) + extra_channel.append(results[key][..., None]) + + start_dim += len(results['pts_mask_fields']) + for idx, key in enumerate(results['pts_seg_fields']): + map_fields2dim.append((key, idx + start_dim)) + extra_channel.append(results[key][..., None]) + + points_numpy = np.concatenate(extra_channel, axis=-1) + + # Split points into two part, current sweep points and + # previous sweeps points. + # TODO: support different sampling methods for next sweeps points + # and previous sweeps points. + cur_points_flag = (points_numpy[:, self.time_dim] == 0) + cur_sweep_points = points_numpy[cur_points_flag] + prev_sweeps_points = points_numpy[~cur_points_flag] + if prev_sweeps_points.shape[0] == 0: + prev_sweeps_points = cur_sweep_points + + # Shuffle points before sampling + np.random.shuffle(cur_sweep_points) + np.random.shuffle(prev_sweeps_points) + + cur_sweep_points = self._sample_points(cur_sweep_points, + self.cur_voxel_generator, + points_numpy.shape[1]) + if self.prev_voxel_generator is not None: + prev_sweeps_points = self._sample_points(prev_sweeps_points, + self.prev_voxel_generator, + points_numpy.shape[1]) + + points_numpy = np.concatenate( + [cur_sweep_points, prev_sweeps_points], 0) + else: + points_numpy = cur_sweep_points + + if self.cur_voxel_generator._max_num_points == 1: + points_numpy = points_numpy.squeeze(1) + results['points'] = points.new_point(points_numpy[..., :original_dim]) + + # Restore the correspoinding seg and mask fields + for key, dim_index in map_fields2dim: + results[key] = points_numpy[..., dim_index] + + return results + + def __repr__(self): + """str: Return a string that describes the module.""" + + def _auto_indent(repr_str, indent): + repr_str = repr_str.split('\n') + repr_str = [' ' * indent + t + '\n' for t in repr_str] + repr_str = ''.join(repr_str)[:-1] + return repr_str + + repr_str = self.__class__.__name__ + indent = 4 + repr_str += '(\n' + repr_str += ' ' * indent + f'num_cur_sweep={self.cur_voxel_num},\n' + repr_str += ' ' * indent + f'num_prev_sweep={self.prev_voxel_num},\n' + repr_str += ' ' * indent + f'time_dim={self.time_dim},\n' + repr_str += ' ' * indent + 'cur_voxel_generator=\n' + repr_str += f'{_auto_indent(repr(self.cur_voxel_generator), 8)},\n' + repr_str += ' ' * indent + 'prev_voxel_generator=\n' + repr_str += f'{_auto_indent(repr(self.prev_voxel_generator), 8)})' + return repr_str + +@PIPELINES.register_module() +class PadMultiViewImage(object): + """Pad the multi-view image. + There are two padding modes: (1) pad to a fixed size and (2) pad to the + minimum size that is divisible by some number. + Added keys are "pad_shape", "pad_fixed_size", "pad_size_divisor", + Args: + size (tuple, optional): Fixed padding size. + size_divisor (int, optional): The divisor of padded size. + pad_val (float, optional): Padding value, 0 by default. + """ + + def __init__(self, size=None, size_divisor=None, pad_val=0): + self.size = size + self.size_divisor = size_divisor + self.pad_val = pad_val + # only one of size and size_divisor should be valid + assert size is not None or size_divisor is not None + assert size is None or size_divisor is None + + def _pad_img(self, results): + """Pad images according to ``self.size``.""" + if self.size is not None: + padded_img = [impad( + img, shape=self.size, pad_val=self.pad_val) for img in results['img']] + elif self.size_divisor is not None: + padded_img = [impad_to_multiple( + img, self.size_divisor, pad_val=self.pad_val) for img in results['img']] + + results['ori_shape'] = [img.shape for img in results['img']] + results['img'] = padded_img + results['img_shape'] = [img.shape for img in padded_img] + results['pad_shape'] = [img.shape for img in padded_img] + results['pad_fixed_size'] = self.size + results['pad_size_divisor'] = self.size_divisor + + def __call__(self, results): + """Call function to pad images, masks, semantic segmentation maps. + Args: + results (dict): Result dict from loading pipeline. + Returns: + dict: Updated result dict. + """ + self._pad_img(results) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(size={self.size}, ' + repr_str += f'size_divisor={self.size_divisor}, ' + repr_str += f'pad_val={self.pad_val})' + return repr_str + + +@PIPELINES.register_module() +class NormalizeMultiviewImage(object): + """Normalize the image. + Added key is "img_norm_cfg". + Args: + mean (sequence): Mean values of 3 channels. + std (sequence): Std values of 3 channels. + to_rgb (bool): Whether to convert the image from BGR to RGB, + default is true. + """ + + def __init__(self, mean, std, to_rgb=True): + self.mean = np.array(mean, dtype=np.float32) + self.std = np.array(std, dtype=np.float32) + self.to_rgb = to_rgb + + + def __call__(self, results): + """Call function to normalize images. + Args: + results (dict): Result dict from loading pipeline. + Returns: + dict: Normalized results, 'img_norm_cfg' key is added into + result dict. + """ + + results['img'] = [imnormalize(img, self.mean, self.std, self.to_rgb) for img in results['img']] + results['img_norm_cfg'] = dict( + mean=self.mean, std=self.std, to_rgb=self.to_rgb) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(mean={self.mean}, std={self.std}, to_rgb={self.to_rgb})' + return repr_str + + +@PIPELINES.register_module() +class PhotoMetricDistortionMultiViewImage: + """Apply photometric distortion to image sequentially, every transformation + is applied with a probability of 0.5. The position of random contrast is in + second or second to last. + 1. random brightness + 2. random contrast (mode 0) + 3. convert color from BGR to HSV + 4. random saturation + 5. random hue + 6. convert color from HSV to BGR + 7. random contrast (mode 1) + 8. randomly swap channels + Args: + brightness_delta (int): delta of brightness. + contrast_range (tuple): range of contrast. + saturation_range (tuple): range of saturation. + hue_delta (int): delta of hue. + """ + + def __init__(self, + brightness_delta=32, + contrast_range=(0.5, 1.5), + saturation_range=(0.5, 1.5), + hue_delta=18): + self.brightness_delta = brightness_delta + self.contrast_lower, self.contrast_upper = contrast_range + self.saturation_lower, self.saturation_upper = saturation_range + self.hue_delta = hue_delta + + def __call__(self, results): + """Call function to perform photometric distortion on images. + Args: + results (dict): Result dict from loading pipeline. + Returns: + dict: Result dict with images distorted. + """ + imgs = results['img'] + new_imgs = [] + for img in imgs: + assert img.dtype == np.float32, \ + 'PhotoMetricDistortion needs the input image of dtype np.float32,'\ + ' please set "to_float32=True" in "LoadImageFromFile" pipeline' + # random brightness + if random.randint(2): + delta = random.uniform(-self.brightness_delta, + self.brightness_delta) + img += delta + + # mode == 0 --> do random contrast first + # mode == 1 --> do random contrast last + mode = random.randint(2) + if mode == 1: + if random.randint(2): + alpha = random.uniform(self.contrast_lower, + self.contrast_upper) + img *= alpha + + # convert color from BGR to HSV + img = bgr2hsv(img) + + # random saturation + if random.randint(2): + img[..., 1] *= random.uniform(self.saturation_lower, + self.saturation_upper) + + # random hue + if random.randint(2): + img[..., 0] += random.uniform(-self.hue_delta, self.hue_delta) + img[..., 0][img[..., 0] > 360] -= 360 + img[..., 0][img[..., 0] < 0] += 360 + + # convert color from HSV to BGR + img = hsv2bgr(img) + + # random contrast + if mode == 0: + if random.randint(2): + alpha = random.uniform(self.contrast_lower, + self.contrast_upper) + img *= alpha + + # randomly swap channels + if random.randint(2): + img = img[..., random.permutation(3)] + new_imgs.append(img) + results['img'] = new_imgs + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(\nbrightness_delta={self.brightness_delta},\n' + repr_str += 'contrast_range=' + repr_str += f'{(self.contrast_lower, self.contrast_upper)},\n' + repr_str += 'saturation_range=' + repr_str += f'{(self.saturation_lower, self.saturation_upper)},\n' + repr_str += f'hue_delta={self.hue_delta})' + return repr_str + + + +@PIPELINES.register_module() +class CustomCollect3D(object): + """Collect data from the loader relevant to the specific task. + This is usually the last stage of the data loader pipeline. Typically keys + is set to some subset of "img", "proposals", "gt_bboxes", + "gt_bboxes_ignore", "gt_labels", and/or "gt_masks". + The "img_meta" item is always populated. The contents of the "img_meta" + dictionary depends on "meta_keys". By default this includes: + - 'img_shape': shape of the image input to the network as a tuple \ + (h, w, c). Note that images may be zero padded on the \ + bottom/right if the batch tensor is larger than this shape. + - 'scale_factor': a float indicating the preprocessing scale + - 'flip': a boolean indicating if image flip transform was used + - 'filename': path to the image file + - 'ori_shape': original shape of the image as a tuple (h, w, c) + - 'pad_shape': image shape after padding + - 'lidar2img': transform from lidar to image + - 'depth2img': transform from depth to image + - 'cam2img': transform from camera to image + - 'pcd_horizontal_flip': a boolean indicating if point cloud is \ + flipped horizontally + - 'pcd_vertical_flip': a boolean indicating if point cloud is \ + flipped vertically + - 'box_mode_3d': 3D box mode + - 'box_type_3d': 3D box type + - 'img_norm_cfg': a dict of normalization information: + - mean: per channel mean subtraction + - std: per channel std divisor + - to_rgb: bool indicating if bgr was converted to rgb + - 'pcd_trans': point cloud transformations + - 'sample_idx': sample index + - 'pcd_scale_factor': point cloud scale factor + - 'pcd_rotation': rotation applied to point cloud + - 'pts_filename': path to point cloud file. + Args: + keys (Sequence[str]): Keys of results to be collected in ``data``. + meta_keys (Sequence[str], optional): Meta keys to be converted to + ``mmcv.DataContainer`` and collected in ``data[img_metas]``. + Default: ('filename', 'ori_shape', 'img_shape', 'lidar2img', + 'depth2img', 'cam2img', 'pad_shape', 'scale_factor', 'flip', + 'pcd_horizontal_flip', 'pcd_vertical_flip', 'box_mode_3d', + 'box_type_3d', 'img_norm_cfg', 'pcd_trans', + 'sample_idx', 'pcd_scale_factor', 'pcd_rotation', 'pts_filename') + """ + + def __init__(self, + keys, + meta_keys=('filename', 'ori_shape', 'img_shape', 'lidar2img', + 'depth2img', 'cam2img', 'pad_shape', + 'scale_factor', 'flip', 'pcd_horizontal_flip', + 'pcd_vertical_flip', 'box_mode_3d', 'box_type_3d', + 'img_norm_cfg', 'pcd_trans', 'sample_idx', 'prev_idx', 'next_idx', + 'pcd_scale_factor', 'pcd_rotation', 'pts_filename', + 'transformation_3d_flow', 'scene_token', + 'can_bus','folder','frame_idx' + )): + # TODO(yzj) bevformer meta_keys has lidar2cam + self.keys = keys + self.meta_keys = meta_keys + + def __call__(self, results): + """Call function to collect keys in results. The keys in ``meta_keys`` + will be converted to :obj:`mmcv.DataContainer`. + Args: + results (dict): Result dict contains the data to collect. + Returns: + dict: The result dict contains the following keys + - keys in ``self.keys`` + - ``img_metas`` + """ + + data = {} + img_metas = {} + for key in self.meta_keys: + if key in results: + img_metas[key] = results[key] + + data['img_metas'] = DC(img_metas, cpu_only=True) + for key in self.keys: + data[key] = results[key] + return data + + def __repr__(self): + """str: Return a string that describes the module.""" + return self.__class__.__name__ + \ + f'(keys={self.keys}, meta_keys={self.meta_keys})' + + + +@PIPELINES.register_module() +class RandomScaleImageMultiViewImage(object): + """Random scale the image + Args: + scales + """ + + def __init__(self, scales=[]): + self.scales = scales + assert len(self.scales)==1 + + def __call__(self, results): + """Call function to pad images, masks, semantic segmentation maps. + Args: + results (dict): Result dict from loading pipeline. + Returns: + dict: Updated result dict. + """ + rand_ind = np.random.permutation(range(len(self.scales)))[0] + rand_scale = self.scales[rand_ind] + + y_size = [int(img.shape[0] * rand_scale) for img in results['img']] + x_size = [int(img.shape[1] * rand_scale) for img in results['img']] + scale_factor = np.eye(4) + scale_factor[0, 0] *= rand_scale + scale_factor[1, 1] *= rand_scale + results['img'] = [imresize(img, (x_size[idx], y_size[idx]), return_scale=False) for idx, img in + enumerate(results['img'])] + lidar2img = [scale_factor @ l2i for l2i in results['lidar2img']] + results['lidar2img'] = lidar2img + results['img_shape'] = [img.shape for img in results['img']] + results['ori_shape'] = [img.shape for img in results['img']] + + return results + + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(size={self.scales}, ' + return repr_str + +@PIPELINES.register_module() +class ObjectRangeFilterTrack(object): + """Filter objects by the range. + Args: + point_cloud_range (list[float]): Point cloud range. + """ + + def __init__(self, point_cloud_range): + self.pcd_range = np.array(point_cloud_range, dtype=np.float32) + + def __call__(self, input_dict): + """Call function to filter objects by the range. + Args: + input_dict (dict): Result dict from loading pipeline. + Returns: + dict: Results after filtering, 'gt_bboxes_3d', 'gt_labels_3d' \ + keys are updated in the result dict. + """ + # Check points instance type and initialise bev_range + if isinstance(input_dict['gt_bboxes_3d'], + (LiDARInstance3DBoxes, DepthInstance3DBoxes)): + bev_range = self.pcd_range[[0, 1, 3, 4]] + elif isinstance(input_dict['gt_bboxes_3d'], CameraInstance3DBoxes): + bev_range = self.pcd_range[[0, 2, 3, 5]] + + if 'gt_inds' in input_dict['ann_info'].keys(): + input_dict['gt_inds'] = input_dict['ann_info']['gt_inds'] + if 'gt_fut_traj' in input_dict['ann_info'].keys(): + input_dict['gt_fut_traj'] = input_dict['ann_info']['gt_fut_traj'] + if 'gt_fut_traj_mask' in input_dict['ann_info'].keys(): + input_dict['gt_fut_traj_mask'] = input_dict['ann_info']['gt_fut_traj_mask'] + if 'gt_past_traj' in input_dict['ann_info'].keys(): + input_dict['gt_past_traj'] = input_dict['ann_info']['gt_past_traj'] + if 'gt_past_traj_mask' in input_dict['ann_info'].keys(): + input_dict['gt_past_traj_mask'] = input_dict['ann_info']['gt_past_traj_mask'] + if 'gt_sdc_bbox' in input_dict['ann_info'].keys(): + input_dict['gt_sdc_bbox'] = input_dict['ann_info']['gt_sdc_bbox'] + input_dict['gt_sdc_label'] = input_dict['ann_info']['gt_sdc_label'] + input_dict['gt_sdc_fut_traj'] = input_dict['ann_info']['gt_sdc_fut_traj'] + input_dict['gt_sdc_fut_traj_mask'] = input_dict['ann_info']['gt_sdc_fut_traj_mask'] + + gt_bboxes_3d = input_dict['gt_bboxes_3d'] + gt_labels_3d = input_dict['gt_labels_3d'] + gt_inds = input_dict['gt_inds'] + gt_fut_traj = input_dict['gt_fut_traj'] + gt_fut_traj_mask = input_dict['gt_fut_traj_mask'] + gt_past_traj = input_dict['gt_past_traj'] + gt_past_traj_mask = input_dict['gt_past_traj_mask'] + + mask = gt_bboxes_3d.in_range_bev(bev_range) + gt_bboxes_3d = gt_bboxes_3d[mask] + # mask is a torch tensor but gt_labels_3d is still numpy array + # using mask to index gt_labels_3d will cause bug when + # len(gt_labels_3d) == 1, where mask=1 will be interpreted + # as gt_labels_3d[1] and cause out of index error + mask = mask.numpy().astype(np.bool) + gt_labels_3d = gt_labels_3d[mask] + gt_inds = gt_inds[mask] + gt_fut_traj = gt_fut_traj[mask] + gt_fut_traj_mask = gt_fut_traj_mask[mask] + gt_past_traj = gt_past_traj[mask] + gt_past_traj_mask = gt_past_traj_mask[mask] + + # limit rad to [-pi, pi] + gt_bboxes_3d.limit_yaw(offset=0.5, period=2 * np.pi) + input_dict['gt_bboxes_3d'] = gt_bboxes_3d + input_dict['gt_labels_3d'] = gt_labels_3d + input_dict['gt_inds'] = gt_inds + input_dict['gt_fut_traj'] = gt_fut_traj + input_dict['gt_fut_traj_mask'] = gt_fut_traj_mask + input_dict['gt_past_traj'] = gt_past_traj + input_dict['gt_past_traj_mask'] = gt_past_traj_mask + return input_dict + + def __repr__(self): + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + repr_str += f'(point_cloud_range={self.pcd_range.tolist()})' + return repr_str + +@PIPELINES.register_module() +class ObjectNameFilterTrack(object): + """Filter GT objects by their names. + Args: + classes (list[str]): List of class names to be kept for training. + """ + + def __init__(self, classes): + self.classes = classes + self.labels = list(range(len(self.classes))) + + def __call__(self, input_dict): + """Call function to filter objects by their names. + Args: + input_dict (dict): Result dict from loading pipeline. + Returns: + dict: Results after filtering, 'gt_bboxes_3d', 'gt_labels_3d' \ + keys are updated in the result dict. + """ + gt_labels_3d = input_dict['gt_labels_3d'] + gt_bboxes_mask = np.array([n in self.labels for n in gt_labels_3d], + dtype=np.bool_) + input_dict['gt_bboxes_3d'] = input_dict['gt_bboxes_3d'][gt_bboxes_mask] + input_dict['gt_labels_3d'] = input_dict['gt_labels_3d'][gt_bboxes_mask] + input_dict['gt_inds'] = input_dict['gt_inds'][gt_bboxes_mask] + input_dict['gt_fut_traj'] = input_dict['gt_fut_traj'][gt_bboxes_mask] + input_dict['gt_fut_traj_mask'] = input_dict['gt_fut_traj_mask'][gt_bboxes_mask] + input_dict['gt_past_traj'] = input_dict['gt_past_traj'][gt_bboxes_mask] + input_dict['gt_past_traj_mask'] = input_dict['gt_past_traj_mask'][gt_bboxes_mask] + return input_dict + + def __repr__(self): + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + repr_str += f'(classes={self.classes})' + return repr_str + +@PIPELINES.register_module() +class CustomObjectRangeFilter(ObjectRangeFilter): + def __call__(self, results): + """Call function to filter objects by the range. + Args: + results (dict): Result dict from loading pipeline. + Returns: + dict: Results after filtering, 'gt_bboxes_3d', 'gt_labels_3d' + keys are updated in the result dict. + """ + # Check points instance type and initialise bev_range + if isinstance(results['gt_bboxes_3d'], + (LiDARInstance3DBoxes, DepthInstance3DBoxes)): + bev_range = self.pcd_range[[0, 1, 3, 4]] + elif isinstance(results['gt_bboxes_3d'], CameraInstance3DBoxes): + bev_range = self.pcd_range[[0, 2, 3, 5]] + + gt_bboxes_3d = results['gt_bboxes_3d'] + gt_labels_3d = results['gt_labels_3d'] + mask = gt_bboxes_3d.in_range_bev(bev_range) + gt_bboxes_3d = gt_bboxes_3d[mask] + # mask is a torch tensor but gt_labels_3d is still numpy array + # using mask to index gt_labels_3d will cause bug when + # len(gt_labels_3d) == 1, where mask=1 will be interpreted + # as gt_labels_3d[1] and cause out of index error + gt_labels_3d = gt_labels_3d[mask.numpy().astype(np.bool)] + + # limit rad to [-pi, pi] + gt_bboxes_3d.limit_yaw(offset=0.5, period=2 * np.pi) + results['gt_bboxes_3d'] = gt_bboxes_3d + results['gt_labels_3d'] = gt_labels_3d + # results['ann_tokens'] = results['ann_tokens'][mask.numpy().astype(np.bool)] + + return results + +@PIPELINES.register_module() +class CustomObjectNameFilter(ObjectNameFilter): + def __call__(self, results): + """Call function to filter objects by their names. + Args: + results (dict): Result dict from loading pipeline. + Returns: + dict: Results after filtering, 'gt_bboxes_3d', 'gt_labels_3d' + keys are updated in the result dict. + """ + gt_labels_3d = results['gt_labels_3d'] + gt_bboxes_mask = np.array([n in self.labels for n in gt_labels_3d], + dtype=np.bool_) + results['gt_bboxes_3d'] = results['gt_bboxes_3d'][gt_bboxes_mask] + results['gt_labels_3d'] = results['gt_labels_3d'][gt_bboxes_mask] + # results['ann_tokens'] = results['ann_tokens'][gt_bboxes_mask] + + return results + + +@PIPELINES.register_module() +class VADObjectRangeFilter(object): + """Filter objects by the range, and also filter corresponding fut trajs + + Args: + point_cloud_range (list[float]): Point cloud range. + """ + + def __init__(self, point_cloud_range): + self.pcd_range = np.array(point_cloud_range, dtype=np.float32) + + def __call__(self, input_dict): + """Call function to filter objects by the range. + + Args: + input_dict (dict): Result dict from loading pipeline. + + Returns: + dict: Results after filtering, 'gt_bboxes_3d', 'gt_labels_3d' \ + keys are updated in the result dict. + """ + # Check points instance type and initialise bev_range + if isinstance(input_dict['gt_bboxes_3d'], + (LiDARInstance3DBoxes, DepthInstance3DBoxes)): + bev_range = self.pcd_range[[0, 1, 3, 4]] + elif isinstance(input_dict['gt_bboxes_3d'], CameraInstance3DBoxes): + bev_range = self.pcd_range[[0, 2, 3, 5]] + + gt_bboxes_3d = input_dict['gt_bboxes_3d'] + gt_labels_3d = input_dict['gt_labels_3d'] + + + mask = gt_bboxes_3d.in_range_bev(bev_range) + gt_bboxes_3d = gt_bboxes_3d[mask] + # mask is a torch tensor but gt_labels_3d is still numpy array + # using mask to index gt_labels_3d will cause bug when + # len(gt_labels_3d) == 1, where mask=1 will be interpreted + # as gt_labels_3d[1] and cause out of index error + gt_labels_3d = gt_labels_3d[mask.numpy().astype(np.bool)] + + + # limit rad to [-pi, pi] + gt_bboxes_3d.limit_yaw(offset=0.5, period=2 * np.pi) + input_dict['gt_bboxes_3d'] = gt_bboxes_3d + input_dict['gt_labels_3d'] = gt_labels_3d + + if 'attr_labels' in input_dict: + gt_attr_labels = input_dict['attr_labels'] + gt_attr_labels = gt_attr_labels[mask.numpy().astype(np.bool)] + input_dict['gt_attr_labels'] = gt_attr_labels + + return input_dict + + def __repr__(self): + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + repr_str += f'(point_cloud_range={self.pcd_range.tolist()})' + return repr_str + + +@PIPELINES.register_module() +class VADObjectNameFilter(object): + """Filter GT objects by their names, , and also filter corresponding fut trajs + + Args: + classes (list[str]): List of class names to be kept for training. + """ + + def __init__(self, classes): + self.classes = classes + self.labels = list(range(len(self.classes))) + + def __call__(self, input_dict): + """Call function to filter objects by their names. + + Args: + input_dict (dict): Result dict from loading pipeline. + + Returns: + dict: Results after filtering, 'gt_bboxes_3d', 'gt_labels_3d' \ + keys are updated in the result dict. + """ + gt_labels_3d = input_dict['gt_labels_3d'] + gt_bboxes_mask = np.array([n in self.labels for n in gt_labels_3d], + dtype=np.bool_) + input_dict['gt_bboxes_3d'] = input_dict['gt_bboxes_3d'][gt_bboxes_mask] + input_dict['gt_labels_3d'] = input_dict['gt_labels_3d'][gt_bboxes_mask] + if 'gt_attr_labels' in input_dict: + input_dict['gt_attr_labels'] = input_dict['gt_attr_labels'][gt_bboxes_mask] + + return input_dict + + def __repr__(self): + """str: Return a string that describes the module.""" + repr_str = self.__class__.__name__ + repr_str += f'(classes={self.classes})' + return repr_str + +@PIPELINES.register_module() +class CustomPointsRangeFilter: + """Filter points by the range. + Args: + point_cloud_range (list[float]): Point cloud range. + """ + + def __init__(self, point_cloud_range): + self.pcd_range = np.array(point_cloud_range, dtype=np.float32) + + def __call__(self, data): + """Call function to filter points by the range. + Args: + data (dict): Result dict from loading pipeline. + Returns: + dict: Results after filtering, 'points', 'pts_instance_mask' \ + and 'pts_semantic_mask' keys are updated in the result dict. + """ + points = data["points"] + points_mask = points.in_range_3d(self.pcd_range) + clean_points = points[points_mask] + data["points"] = clean_points + return data \ No newline at end of file diff --git a/mmcv/datasets/prepare_B2D.py b/mmcv/datasets/prepare_B2D.py new file mode 100644 index 0000000..a9b2f33 --- /dev/null +++ b/mmcv/datasets/prepare_B2D.py @@ -0,0 +1,401 @@ +import os +from os.path import join +import gzip, json, pickle +import numpy as np +from pyquaternion import Quaternion +from tqdm import tqdm +from vis_utils import calculate_cube_vertices,calculate_occlusion_stats,edges,DIS_CAR_SAVE +import cv2 +import multiprocessing +import argparse +# All data in the Bench2Drive dataset are in the left-handed coordinate system. +# This code converts all coordinate systems (world coordinate system, vehicle coordinate system, +# camera coordinate system, and lidar coordinate system) to the right-handed coordinate system +# consistent with the nuscenes dataset. + +DATAROOT = '../../data/bench2drive' +MAP_ROOT = '../../data/bench2drive/maps' +OUT_DIR = '../../data/infos' + +MAX_DISTANCE = 75 # Filter bounding boxes that are too far from the vehicle +FILTER_Z_SHRESHOLD = 10 # Filter bounding boxes that are too high/low from the vehicle +FILTER_INVISINLE = True # Filter bounding boxes based on visibility +NUM_VISIBLE_SHRESHOLD = 1 # Filter bounding boxes with fewer visible vertices than this value +NUM_OUTPOINT_SHRESHOLD = 7 # Filter bounding boxes where the number of vertices outside the frame is greater than this value in all cameras +CAMERAS = ['CAM_FRONT', 'CAM_FRONT_LEFT', 'CAM_FRONT_RIGHT', 'CAM_BACK', 'CAM_BACK_LEFT', 'CAM_BACK_RIGHT'] +CAMERA_TO_FOLDER_MAP = {'CAM_FRONT':'rgb_front', 'CAM_FRONT_LEFT':'rgb_front_left', 'CAM_FRONT_RIGHT':'rgb_front_right', 'CAM_BACK':'rgb_back', 'CAM_BACK_LEFT':'rgb_back_left', 'CAM_BACK_RIGHT':'rgb_back_right'} + + +stand_to_ue4_rotate = np.array([[ 0, 0, 1, 0], + [ 1, 0, 0, 0], + [ 0,-1, 0, 0], + [ 0, 0, 0, 1]]) + + + +lidar_to_righthand_ego = np.array([[ 0, 1, 0, 0], + [ -1, 0, 0, 0], + [ 0, 0, 1, 0], + [ 0, 0, 0, 1]]) + +lefthand_ego_to_lidar = np.array([[ 0, 1, 0, 0], + [ 1, 0, 0, 0], + [ 0, 0, 1, 0], + [ 0, 0, 0, 1]]) + + + +left2right = np.eye(4) +left2right[1,1] = -1 + +def apply_trans(vec,world2ego): + vec = np.concatenate((vec,np.array([1]))) + t = world2ego @ vec + return t[0:3] + +def get_pose_matrix(dic): + new_matrix = np.zeros((4,4)) + new_matrix[0:3,0:3] = Quaternion(axis=[0, 0, 1], radians=dic['theta']-np.pi/2).rotation_matrix + new_matrix[0,3] = dic['x'] + new_matrix[1,3] = dic['y'] + new_matrix[3,3] = 1 + return new_matrix + +def get_npc2world(npc): + for key in ['world2vehicle','world2ego','world2sign','world2ped']: + if key in npc.keys(): + npc2world = np.linalg.inv(np.array(npc[key])) + yaw_from_matrix = np.arctan2(npc2world[1,0], npc2world[0,0]) + yaw = npc['rotation'][-1]/180*np.pi + if abs(yaw-yaw_from_matrix)> 0.01: + npc2world[0:3,0:3] = Quaternion(axis=[0, 0, 1], radians=yaw).rotation_matrix + npc2world = left2right@npc2world@left2right + return npc2world + npc2world = np.eye(4) + npc2world[0:3,0:3] = Quaternion(axis=[0, 0, 1], radians=npc['rotation'][-1]/180*np.pi).rotation_matrix + npc2world[0:3,3] = np.array(npc['location']) + return left2right@npc2world@left2right + + +def get_global_trigger_vertex(center,extent,yaw_in_degree): + x,y = center[0],-center[1] + dx,dy = extent[0],extent[1] + yaw_in_radians = -yaw_in_degree/180*np.pi + vertex_in_self = np.array([[dx,dy], + [-dx,dy], + [-dx,-dy], + [dx,-dy]]) + rotate_matrix = np.array([[np.cos(yaw_in_radians),-np.sin(yaw_in_radians)], + [np.sin(yaw_in_radians), np.cos(yaw_in_radians)]]) + rotated_vertex = (rotate_matrix @ vertex_in_self.T).T + vertex_in_global = np.array([[x,y]]).repeat(4,axis=0) + rotated_vertex + return vertex_in_global + + + +def get_image_point(loc, K, w2c): + point = np.array([loc[0], loc[1], loc[2], 1]) + point_camera = np.dot(w2c, point) + point_camera = point_camera[0:3] + depth = point_camera[2] + point_img = np.dot(K, point_camera) + point_img[0] /= point_img[2] + point_img[1] /= point_img[2] + return point_img[0:2], depth + +def get_action(index): + Discrete_Actions_DICT = { + 0: (0, 0, 1, False), + 1: (0.7, -0.5, 0, False), + 2: (0.7, -0.3, 0, False), + 3: (0.7, -0.2, 0, False), + 4: (0.7, -0.1, 0, False), + 5: (0.7, 0, 0, False), + 6: (0.7, 0.1, 0, False), + 7: (0.7, 0.2, 0, False), + 8: (0.7, 0.3, 0, False), + 9: (0.7, 0.5, 0, False), + 10: (0.3, -0.7, 0, False), + 11: (0.3, -0.5, 0, False), + 12: (0.3, -0.3, 0, False), + 13: (0.3, -0.2, 0, False), + 14: (0.3, -0.1, 0, False), + 15: (0.3, 0, 0, False), + 16: (0.3, 0.1, 0, False), + 17: (0.3, 0.2, 0, False), + 18: (0.3, 0.3, 0, False), + 19: (0.3, 0.5, 0, False), + 20: (0.3, 0.7, 0, False), + 21: (0, -1, 0, False), + 22: (0, -0.6, 0, False), + 23: (0, -0.3, 0, False), + 24: (0, -0.1, 0, False), + 25: (1, 0, 0, False), + 26: (0, 0.1, 0, False), + 27: (0, 0.3, 0, False), + 28: (0, 0.6, 0, False), + 29: (0, 1.0, 0, False), + 30: (0.5, -0.5, 0, True), + 31: (0.5, -0.3, 0, True), + 32: (0.5, -0.2, 0, True), + 33: (0.5, -0.1, 0, True), + 34: (0.5, 0, 0, True), + 35: (0.5, 0.1, 0, True), + 36: (0.5, 0.2, 0, True), + 37: (0.5, 0.3, 0, True), + 38: (0.5, 0.5, 0, True), + } + throttle, steer, brake, reverse = Discrete_Actions_DICT[index] + return throttle, steer, brake + + +def gengrate_map(map_root): + map_infos = {} + for file_name in os.listdir(map_root): + if '.npz' in file_name: + map_info = dict(np.load(join(map_root,file_name), allow_pickle=True)['arr']) + town_name = file_name.split('_')[0] + map_infos[town_name] = {} + lane_points = [] + lane_types = [] + lane_sample_points = [] + trigger_volumes_points = [] + trigger_volumes_types = [] + trigger_volumes_sample_points = [] + for road_id, road in map_info.items(): + for lane_id, lane in road.items(): + if lane_id == 'Trigger_Volumes': + for single_trigger_volume in lane: + points = np.array(single_trigger_volume['Points']) + points[:,1] *= -1 + trigger_volumes_points.append(points) + trigger_volumes_sample_points.append(points.mean(axis=0)) + trigger_volumes_types.append(single_trigger_volume['Type']) + else: + for single_lane in lane: + points = np.array([raw_point[0] for raw_point in single_lane['Points']]) + points[:,1] *= -1 + lane_points.append(points) + lane_types.append(single_lane['Type']) + lane_lenth = points.shape[0] + if lane_lenth % 50 !=0: + devide_points = [50*i for i in range(lane_lenth//50+1)] + else: + devide_points = [50*i for i in range(lane_lenth//50)] + devide_points.append(lane_lenth-1) + lane_sample_points_tmp = points[devide_points] + lane_sample_points.append(lane_sample_points_tmp) + map_infos[town_name]['lane_points'] = lane_points + map_infos[town_name]['lane_sample_points'] = lane_sample_points + map_infos[town_name]['lane_types'] = lane_types + map_infos[town_name]['trigger_volumes_points'] = trigger_volumes_points + map_infos[town_name]['trigger_volumes_sample_points'] = trigger_volumes_sample_points + map_infos[town_name]['trigger_volumes_types'] = trigger_volumes_types + with open(join(OUT_DIR,'b2d_map_infos.pkl'),'wb') as f: + pickle.dump(map_infos,f) + +def preprocess(folder_list,idx,tmp_dir,train_or_val): + + data_root = DATAROOT + cameras = CAMERAS + final_data = [] + if idx == 0: + folders = tqdm(folder_list) + else: + folders = folder_list + + for folder_name in folders: + folder_path = join(data_root, folder_name) + last_position_dict = {} + for ann_name in sorted(os.listdir(join(folder_path,'anno')),key= lambda x: int(x.split('.')[0])): + position_dict = {} + frame_data = {} + cam_gray_depth = {} + with gzip.open(join(folder_path,'anno',ann_name), 'rt', encoding='utf-8') as gz_file: + anno = json.load(gz_file) + frame_data['folder'] = folder_name + frame_data['town_name'] = folder_name.split('/')[1].split('_')[1] + frame_data['command_far_xy'] = np.array([anno['x_command_far'],-anno['y_command_far']]) + frame_data['command_far'] = anno['command_far'] + frame_data['command_near_xy'] = np.array([anno['x_command_near'],-anno['y_command_near']]) + frame_data['command_near'] = anno['command_near'] + frame_data['frame_idx'] = int(ann_name.split('.')[0]) + frame_data['ego_yaw'] = -np.nan_to_num(anno['theta'],nan=np.pi)+np.pi/2 + frame_data['ego_translation'] = np.array([anno['x'],-anno['y'],0]) + frame_data['ego_vel'] = np.array([anno['speed'],0,0]) + frame_data['ego_accel'] = np.array([anno['acceleration'][0],-anno['acceleration'][1],anno['acceleration'][2]]) + frame_data['ego_rotation_rate'] = -np.array(anno['angular_velocity']) + frame_data['ego_size'] = np.array([anno['bounding_boxes'][0]['extent'][1],anno['bounding_boxes'][0]['extent'][0],anno['bounding_boxes'][0]['extent'][2]])*2 + world2ego = left2right@anno['bounding_boxes'][0]['world2ego']@left2right + frame_data['world2ego'] = world2ego + if frame_data['frame_idx'] == 0: + expert_file_path = join(folder_path,'expert_assessment','-0001.npz') + else: + expert_file_path = join(folder_path,'expert_assessment',str(frame_data['frame_idx']-1).zfill(5)+'.npz') + expert_data = np.load(expert_file_path,allow_pickle=True)['arr_0'] + action_id = expert_data[-1] + # value = expert_data[-2] + # expert_feature = expert_data[:-2] + throttle, steer, brake = get_action(action_id) + frame_data['brake'] = brake + frame_data['throttle'] = throttle + frame_data['steer'] = steer + #frame_data['action_id'] = action_id + #frame_data['value'] = value + #frame_data['expert_feature'] = expert_feature + ###get sensor infos### + sensor_infos = {} + for cam in CAMERAS: + sensor_infos[cam] = {} + sensor_infos[cam]['cam2ego'] = left2right @ np.array(anno['sensors'][cam]['cam2ego']) @stand_to_ue4_rotate + sensor_infos[cam]['intrinsic'] = np.array(anno['sensors'][cam]['intrinsic']) + sensor_infos[cam]['world2cam'] = np.linalg.inv(stand_to_ue4_rotate) @ np.array(anno['sensors'][cam]['world2cam']) @left2right + sensor_infos[cam]['data_path'] = join(folder_name,'camera',CAMERA_TO_FOLDER_MAP[cam],ann_name.split('.')[0]+'.jpg') + cam_gray_depth[cam] = cv2.imread(join(data_root,sensor_infos[cam]['data_path']).replace('rgb_','depth_').replace('.jpg','.png'))[:,:,0] + sensor_infos['LIDAR_TOP'] = {} + sensor_infos['LIDAR_TOP']['lidar2ego'] = np.array(anno['sensors']['LIDAR_TOP']['lidar2ego']) @ lidar_to_righthand_ego + world2lidar = lefthand_ego_to_lidar @ np.array(anno['sensors']['LIDAR_TOP']['world2lidar']) @ left2right + sensor_infos['LIDAR_TOP']['world2lidar'] = world2lidar + frame_data['sensors'] = sensor_infos + ###get bounding_boxes infos### + gt_boxes = [] + gt_names = [] + gt_ids = [] + num_points_list = [] + npc2world_list = [] + for npc in anno['bounding_boxes']: + if npc['class'] == 'ego_vehicle': continue + if npc['distance'] > MAX_DISTANCE: continue + if abs(npc['location'][2] - anno['bounding_boxes'][0]['location'][2]) > FILTER_Z_SHRESHOLD: continue + center = np.array([npc['center'][0],-npc['center'][1],npc['center'][2]]) # left hand -> right hand + extent = np.array([npc['extent'][1],npc['extent'][0],npc['extent'][2]]) # lwh -> wlh + position_dict[npc['id']] = center + local_center = apply_trans(center, world2lidar) + size = extent*2 + if 'world2vehicle' in npc.keys(): + world2vehicle = left2right@np.array(npc['world2vehicle'])@left2right + vehicle2lidar = world2lidar @ np.linalg.inv(world2vehicle) + yaw_local = np.arctan2(vehicle2lidar[1,0], vehicle2lidar[0,0]) + + else: + yaw_local = -npc['rotation'][-1]/180*np.pi - frame_data['ego_yaw'] +np.pi / 2 + yaw_local_in_lidar_box = -yaw_local - np.pi / 2 + while yaw_local < -np.pi: + yaw_local += 2*np.pi + while yaw_local > np.pi: + yaw_local -= 2*np.pi + if 'speed' in npc.keys(): + if 'vehicle' in npc['class']: # only vehicles have correct speed + speed = npc['speed'] + else: + if npc['id'] in last_position_dict.keys(): #calculate speed for other object + speed = np.linalg.norm((center-last_position_dict[npc['id']])[0:2]) * 10 + else: + speed = 0 + else: + speed = 0 + if 'num_points' in npc.keys(): + num_points = npc['num_points'] + else: + num_points = -1 + npc2world = get_npc2world(npc) + speed_x = speed * np.cos(yaw_local) + speed_y = speed * np.sin(yaw_local) + + ###fliter_bounding_boxes### + if FILTER_INVISINLE: + valid = False + box2lidar = np.eye(4) + box2lidar[0:3,0:3] = Quaternion(axis=[0, 0, 1], radians=yaw_local).rotation_matrix + box2lidar[0:3,3] = local_center + lidar2box = np.linalg.inv(box2lidar) + raw_verts = calculate_cube_vertices(local_center,extent) + verts = [] + for raw_vert in raw_verts: + tmp = np.dot(lidar2box, [raw_vert[0], raw_vert[1], raw_vert[2],1]) + tmp[0:3] += local_center + verts.append(tmp.tolist()[:-1]) + for cam in cameras: + lidar2cam = np.linalg.inv(frame_data['sensors'][cam]['cam2ego']) @ sensor_infos['LIDAR_TOP']['lidar2ego'] + test_points = [] + test_depth = [] + for vert in verts: + point, depth = get_image_point(vert, frame_data['sensors'][cam]['intrinsic'], lidar2cam) + if depth > 0: + test_points.append(point) + test_depth.append(depth) + + num_visible_vertices, num_invisible_vertices, num_vertices_outside_camera, colored_points = calculate_occlusion_stats(np.array(test_points), np.array(test_depth), cam_gray_depth[cam], max_render_depth=MAX_DISTANCE) + if num_visible_vertices>NUM_VISIBLE_SHRESHOLD and num_vertices_outside_camera 0: + indice = np.where(self.flag == i)[0] + assert len(indice) == size + # TODO: check whether torch.randperm() can be replaced by + # numpy.random.permutation(). + indice = indice[list( + torch.randperm(int(size), generator=g).numpy())].tolist() + extra = int( + math.ceil( + size * 1.0 / self.samples_per_gpu / self.num_replicas) + ) * self.samples_per_gpu * self.num_replicas - len(indice) + # pad indice + tmp = indice.copy() + for _ in range(extra // size): + indice.extend(tmp) + indice.extend(tmp[:extra % size]) + indices.extend(indice) + + assert len(indices) == self.total_size + + indices = [ + indices[j] for i in list( + torch.randperm( + len(indices) // self.samples_per_gpu, generator=g)) + for j in range(i * self.samples_per_gpu, (i + 1) * + self.samples_per_gpu) + ] + + # subsample + offset = self.num_samples * self.rank + indices = indices[offset:offset + self.num_samples] + assert len(indices) == self.num_samples + + return iter(indices) + + def __len__(self): + return self.num_samples + + def set_epoch(self, epoch): + self.epoch = epoch diff --git a/mmcv/datasets/samplers/sampler.py b/mmcv/datasets/samplers/sampler.py new file mode 100644 index 0000000..1906049 --- /dev/null +++ b/mmcv/datasets/samplers/sampler.py @@ -0,0 +1,7 @@ +from mmcv.utils.registry import Registry, build_from_cfg + +SAMPLER = Registry('sampler') + + +def build_sampler(cfg, default_args): + return build_from_cfg(cfg, SAMPLER, default_args) diff --git a/mmcv/datasets/utils.py b/mmcv/datasets/utils.py new file mode 100644 index 0000000..02cf96d --- /dev/null +++ b/mmcv/datasets/utils.py @@ -0,0 +1,298 @@ +import copy +import warnings +from mmcv.models import VGG +from mmcv.runner.hooks import HOOKS, Hook + +from mmcv.datasets.pipelines import (Collect3D, DefaultFormatBundle3D, + LoadAnnotations3D, + LoadImageFromFileMono3D, + LoadMultiViewImageFromFiles, + LoadPointsFromFile, + LoadPointsFromMultiSweeps, + MultiScaleFlipAug3D, + PointSegClassMapping) + +from mmcv.datasets.builder import PIPELINES +from mmcv.datasets.pipelines import LoadAnnotations, LoadImageFromFile +from mmcv.models.dense_heads import GARPNHead, RPNHead +from mmcv.models.roi_heads.mask_heads import FusedSemanticHead +from mmcv.parallel import DataContainer + + +def replace_ImageToTensor(pipelines): + """Replace the ImageToTensor transform in a data pipeline to + DefaultFormatBundle, which is normally useful in batch inference. + + Args: + pipelines (list[dict]): Data pipeline configs. + + Returns: + list: The new pipeline list with all ImageToTensor replaced by + DefaultFormatBundle. + + Examples: + >>> pipelines = [ + ... dict(type='LoadImageFromFile'), + ... dict( + ... type='MultiScaleFlipAug', + ... img_scale=(1333, 800), + ... flip=False, + ... transforms=[ + ... dict(type='Resize', keep_ratio=True), + ... dict(type='RandomFlip'), + ... dict(type='Normalize', mean=[0, 0, 0], std=[1, 1, 1]), + ... dict(type='Pad', size_divisor=32), + ... dict(type='ImageToTensor', keys=['img']), + ... dict(type='Collect', keys=['img']), + ... ]) + ... ] + >>> expected_pipelines = [ + ... dict(type='LoadImageFromFile'), + ... dict( + ... type='MultiScaleFlipAug', + ... img_scale=(1333, 800), + ... flip=False, + ... transforms=[ + ... dict(type='Resize', keep_ratio=True), + ... dict(type='RandomFlip'), + ... dict(type='Normalize', mean=[0, 0, 0], std=[1, 1, 1]), + ... dict(type='Pad', size_divisor=32), + ... dict(type='DefaultFormatBundle'), + ... dict(type='Collect', keys=['img']), + ... ]) + ... ] + >>> assert expected_pipelines == replace_ImageToTensor(pipelines) + """ + pipelines = copy.deepcopy(pipelines) + for i, pipeline in enumerate(pipelines): + if pipeline['type'] == 'MultiScaleFlipAug': + assert 'transforms' in pipeline + pipeline['transforms'] = replace_ImageToTensor( + pipeline['transforms']) + elif pipeline['type'] == 'ImageToTensor': + warnings.warn( + '"ImageToTensor" pipeline is replaced by ' + '"DefaultFormatBundle" for batch inference. It is ' + 'recommended to manually replace it in the test ' + 'data pipeline in your config file.', UserWarning) + pipelines[i] = {'type': 'DefaultFormatBundle'} + return pipelines + + +# def get_loading_pipeline(pipeline): +# """Only keep loading image and annotations related configuration. + +# Args: +# pipeline (list[dict]): Data pipeline configs. + +# Returns: +# list[dict]: The new pipeline list with only keep +# loading image and annotations related configuration. + +# Examples: +# >>> pipelines = [ +# ... dict(type='LoadImageFromFile'), +# ... dict(type='LoadAnnotations', with_bbox=True), +# ... dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), +# ... dict(type='RandomFlip', flip_ratio=0.5), +# ... dict(type='Normalize', **img_norm_cfg), +# ... dict(type='Pad', size_divisor=32), +# ... dict(type='DefaultFormatBundle'), +# ... dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) +# ... ] +# >>> expected_pipelines = [ +# ... dict(type='LoadImageFromFile'), +# ... dict(type='LoadAnnotations', with_bbox=True) +# ... ] +# >>> assert expected_pipelines ==\ +# ... get_loading_pipeline(pipelines) +# """ +# loading_pipeline_cfg = [] +# for cfg in pipeline: +# obj_cls = PIPELINES.get(cfg['type']) +# # TODO:use more elegant way to distinguish loading modules +# if obj_cls is not None and obj_cls in (LoadImageFromFile, +# LoadAnnotations): +# loading_pipeline_cfg.append(cfg) +# assert len(loading_pipeline_cfg) == 2, \ +# 'The data pipeline in your config file must include ' \ +# 'loading image and annotations related pipeline.' +# return loading_pipeline_cfg + + +@HOOKS.register_module() +class NumClassCheckHook(Hook): + + def _check_head(self, runner): + """Check whether the `num_classes` in head matches the length of + `CLASSSES` in `dataset`. + + Args: + runner (obj:`EpochBasedRunner`): Epoch based Runner. + """ + model = runner.model + dataset = runner.data_loader.dataset + if dataset.CLASSES is None: + runner.logger.warning( + f'Please set `CLASSES` ' + f'in the {dataset.__class__.__name__} and' + f'check if it is consistent with the `num_classes` ' + f'of head') + else: + assert type(dataset.CLASSES) is not str, \ + (f'`CLASSES` in {dataset.__class__.__name__}' + f'should be a tuple of str.' + f'Add comma if number of classes is 1 as ' + f'CLASSES = ({dataset.CLASSES},)') + for name, module in model.named_modules(): + if hasattr(module, 'num_classes') and not isinstance( + module, (RPNHead, VGG, FusedSemanticHead, GARPNHead)): + assert module.num_classes == len(dataset.CLASSES), \ + (f'The `num_classes` ({module.num_classes}) in ' + f'{module.__class__.__name__} of ' + f'{model.__class__.__name__} does not matches ' + f'the length of `CLASSES` ' + f'{len(dataset.CLASSES)}) in ' + f'{dataset.__class__.__name__}') + + def before_train_epoch(self, runner): + """Check whether the training dataset is compatible with head. + + Args: + runner (obj:`EpochBasedRunner`): Epoch based Runner. + """ + self._check_head(runner) + + def before_val_epoch(self, runner): + """Check whether the dataset in val epoch is compatible with head. + + Args: + runner (obj:`EpochBasedRunner`): Epoch based Runner. + """ + self._check_head(runner) + + +def is_loading_function(transform): + """Judge whether a transform function is a loading function. + + Note: `MultiScaleFlipAug3D` is a wrapper for multiple pipeline functions, + so we need to search if its inner transforms contain any loading function. + + Args: + transform (dict | :obj:`Pipeline`): A transform config or a function. + + Returns: + bool | None: Whether it is a loading function. None means can't judge. + When transform is `MultiScaleFlipAug3D`, we return None. + """ + # TODO: use more elegant way to distinguish loading modules + loading_functions = (LoadImageFromFile, LoadPointsFromFile, + LoadAnnotations3D, LoadMultiViewImageFromFiles, + LoadPointsFromMultiSweeps, DefaultFormatBundle3D, + Collect3D, LoadImageFromFileMono3D, + PointSegClassMapping) + if isinstance(transform, dict): + obj_cls = PIPELINES.get(transform['type']) + if obj_cls is None: + return False + if obj_cls in loading_functions: + return True + if obj_cls in (MultiScaleFlipAug3D, ): + return None + elif callable(transform): + if isinstance(transform, loading_functions): + return True + if isinstance(transform, MultiScaleFlipAug3D): + return None + return False + + +def get_loading_pipeline(pipeline): + """Only keep loading image, points and annotations related configuration. + + Args: + pipeline (list[dict] | list[:obj:`Pipeline`]): + Data pipeline configs or list of pipeline functions. + + Returns: + list[dict] | list[:obj:`Pipeline`]): The new pipeline list with only + keep loading image, points and annotations related configuration. + + Examples: + >>> pipelines = [ + ... dict(type='LoadPointsFromFile', + ... coord_type='LIDAR', load_dim=4, use_dim=4), + ... dict(type='LoadImageFromFile'), + ... dict(type='LoadAnnotations3D', + ... with_bbox=True, with_label_3d=True), + ... dict(type='Resize', + ... img_scale=[(640, 192), (2560, 768)], keep_ratio=True), + ... dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + ... dict(type='PointsRangeFilter', + ... point_cloud_range=point_cloud_range), + ... dict(type='ObjectRangeFilter', + ... point_cloud_range=point_cloud_range), + ... dict(type='PointShuffle'), + ... dict(type='Normalize', **img_norm_cfg), + ... dict(type='Pad', size_divisor=32), + ... dict(type='DefaultFormatBundle3D', class_names=class_names), + ... dict(type='Collect3D', + ... keys=['points', 'img', 'gt_bboxes_3d', 'gt_labels_3d']) + ... ] + >>> expected_pipelines = [ + ... dict(type='LoadPointsFromFile', + ... coord_type='LIDAR', load_dim=4, use_dim=4), + ... dict(type='LoadImageFromFile'), + ... dict(type='LoadAnnotations3D', + ... with_bbox=True, with_label_3d=True), + ... dict(type='DefaultFormatBundle3D', class_names=class_names), + ... dict(type='Collect3D', + ... keys=['points', 'img', 'gt_bboxes_3d', 'gt_labels_3d']) + ... ] + >>> assert expected_pipelines ==\ + ... get_loading_pipeline(pipelines) + """ + loading_pipeline = [] + for transform in pipeline: + is_loading = is_loading_function(transform) + if is_loading is None: # MultiScaleFlipAug3D + # extract its inner pipeline + if isinstance(transform, dict): + inner_pipeline = transform.get('transforms', []) + else: + inner_pipeline = transform.transforms.transforms + loading_pipeline.extend(get_loading_pipeline(inner_pipeline)) + elif is_loading: + loading_pipeline.append(transform) + assert len(loading_pipeline) > 0, \ + 'The data pipeline in your config file must include ' \ + 'loading step.' + return loading_pipeline + + +def extract_result_dict(results, key): + """Extract and return the data corresponding to key in result dict. + + ``results`` is a dict output from `pipeline(input_dict)`, which is the + loaded data from ``Dataset`` class. + The data terms inside may be wrapped in list, tuple and DataContainer, so + this function essentially extracts data from these wrappers. + + Args: + results (dict): Data loaded using pipeline. + key (str): Key of the desired data. + + Returns: + np.ndarray | torch.Tensor | None: Data term. + """ + if key not in results.keys(): + return None + # results[key] may be data or list[data] or tuple[data] + # data may be wrapped inside DataContainer + data = results[key] + if isinstance(data, (list, tuple)): + data = data[0] + if isinstance(data, DataContainer): + data = data._data + return data + diff --git a/mmcv/datasets/vad_custom_nuscenes_eval.py b/mmcv/datasets/vad_custom_nuscenes_eval.py new file mode 100644 index 0000000..0285591 --- /dev/null +++ b/mmcv/datasets/vad_custom_nuscenes_eval.py @@ -0,0 +1,834 @@ +import argparse +import copy +import json +import os +import time +from typing import Tuple, Dict, Any +from mmcv.fileio.io import dump,load +import torch +import numpy as np +from nuscenes import NuScenes +from nuscenes.eval.common.config import config_factory +from nuscenes.eval.common.data_classes import EvalBoxes +from nuscenes.eval.detection.evaluate import NuScenesEval +from pyquaternion import Quaternion +from nuscenes.eval.detection.data_classes import DetectionBox +from nuscenes.eval.detection.utils import category_to_detection_name +from nuscenes.eval.tracking.data_classes import TrackingBox +from nuscenes.utils.data_classes import Box +from nuscenes.utils.geometry_utils import points_in_box +from nuscenes.utils.splits import create_splits_scenes +import tqdm +from nuscenes.utils.geometry_utils import view_points, box_in_image, BoxVisibility, transform_matrix +import pycocotools.mask as mask_util +# from projects.mmdet3d_plugin.models.utils.visual import save_tensor +from torchvision.transforms.functional import rotate +import cv2 +import argparse +import random +from nuscenes.eval.common.loaders import load_gt, add_center_dist +from nuscenes.eval.detection.algo import accumulate, calc_ap, calc_tp +from nuscenes.eval.detection.data_classes import DetectionConfig, DetectionMetrics, DetectionBox, DetectionMetricData,DetectionMetricDataList +from nuscenes.eval.detection.render import summary_plot, class_pr_curve, dist_pr_curve, visualize_sample +from nuscenes.eval.common.utils import quaternion_yaw, Quaternion +from IPython import embed +from matplotlib import pyplot as plt +from nuscenes.eval.common.render import setup_axis +from nuscenes.eval.common.utils import boxes_to_sensor +from nuscenes.eval.detection.constants import TP_METRICS, DETECTION_NAMES, DETECTION_COLORS, TP_METRICS_UNITS, \ + PRETTY_DETECTION_NAMES, PRETTY_TP_METRICS +from nuscenes.utils.data_classes import LidarPointCloud +import mmcv + + +Axis = Any + +def class_tp_curve(md_list: DetectionMetricDataList, + metrics: DetectionMetrics, + detection_name: str, + min_recall: float, + dist_th_tp: float, + savepath: str = None, + ax: Axis = None) -> None: + """ + Plot the true positive curve for the specified class. + :param md_list: DetectionMetricDataList instance. + :param metrics: DetectionMetrics instance. + :param detection_name: + :param min_recall: Minimum recall value. + :param dist_th_tp: The distance threshold used to determine matches. + :param savepath: If given, saves the the rendering here instead of displaying. + :param ax: Axes onto which to render. + """ + # Get metric data for given detection class with tp distance threshold. + + md = md_list[(detection_name, dist_th_tp)] + min_recall_ind = round(100 * min_recall) + if min_recall_ind <= md.max_recall_ind: + # For traffic_cone and barrier only a subset of the metrics are plotted. + rel_metrics = [m for m in TP_METRICS if not np.isnan(metrics.get_label_tp(detection_name, m))] + ylimit = max([max(getattr(md, metric)[min_recall_ind:md.max_recall_ind + 1]) for metric in rel_metrics]) * 1.1 + else: + ylimit = 1.0 + + # Prepare axis. + if ax is None: + ax = setup_axis(title=PRETTY_DETECTION_NAMES[detection_name], xlabel='Recall', ylabel='Error', xlim=1, + min_recall=min_recall) + ax.set_ylim(0, ylimit) + + # Plot the recall vs. error curve for each tp metric. + for metric in TP_METRICS: + tp = metrics.get_label_tp(detection_name, metric) + + # Plot only if we have valid data. + if tp is not np.nan and min_recall_ind <= md.max_recall_ind: + recall, error = md.recall[:md.max_recall_ind + 1], getattr(md, metric)[:md.max_recall_ind + 1] + else: + recall, error = [], [] + + # Change legend based on tp value + if tp is np.nan: + label = '{}: n/a'.format(PRETTY_TP_METRICS[metric]) + elif min_recall_ind > md.max_recall_ind: + label = '{}: nan'.format(PRETTY_TP_METRICS[metric]) + else: + label = '{}: {:.2f} ({})'.format(PRETTY_TP_METRICS[metric], tp, TP_METRICS_UNITS[metric]) + if metric == 'trans_err': + label += f' ({md.max_recall_ind})' # add recall + print(f'Recall: {detection_name}: {md.max_recall_ind/100}') + ax.plot(recall, error, label=label) + ax.axvline(x=md.max_recall, linestyle='-.', color=(0, 0, 0, 0.3)) + ax.legend(loc='best') + + if savepath is not None: + plt.savefig(savepath) + plt.close() + + +class DetectionBox_modified(DetectionBox): + def __init__(self, *args, token=None, visibility=None, index=None, **kwargs): + ''' + add annotation token + ''' + super().__init__(*args, **kwargs) + self.token = token + self.visibility = visibility + self.index = index + + def serialize(self) -> dict: + """ Serialize instance into json-friendly format. """ + return { + 'token': self.token, + 'sample_token': self.sample_token, + 'translation': self.translation, + 'size': self.size, + 'rotation': self.rotation, + 'velocity': self.velocity, + 'ego_translation': self.ego_translation, + 'num_pts': self.num_pts, + 'detection_name': self.detection_name, + 'detection_score': self.detection_score, + 'attribute_name': self.attribute_name, + 'visibility': self.visibility, + 'index': self.index + + } + + @classmethod + def deserialize(cls, content: dict): + """ Initialize from serialized content. """ + return cls( + token=content['token'], + sample_token=content['sample_token'], + translation=tuple(content['translation']), + size=tuple(content['size']), + rotation=tuple(content['rotation']), + velocity=tuple(content['velocity']), + ego_translation=(0.0, 0.0, 0.0) if 'ego_translation' not in content + else tuple(content['ego_translation']), + num_pts=-1 if 'num_pts' not in content else int(content['num_pts']), + detection_name=content['detection_name'], + detection_score=-1.0 if 'detection_score' not in content else float(content['detection_score']), + attribute_name=content['attribute_name'], + visibility=content['visibility'], + index=content['index'], + ) + + +def center_in_image(box, intrinsic: np.ndarray, imsize: Tuple[int, int], vis_level: int = BoxVisibility.ANY) -> bool: + """ + Check if a box is visible inside an image without accounting for occlusions. + :param box: The box to be checked. + :param intrinsic: . Intrinsic camera matrix. + :param imsize: (width, height). + :param vis_level: One of the enumerations of . + :return True if visibility condition is satisfied. + """ + + center_3d = box.center.reshape(3, 1) + center_img = view_points(center_3d, intrinsic, normalize=True)[:2, :] + + visible = np.logical_and(center_img[0, :] > 0, center_img[0, :] < imsize[0]) + visible = np.logical_and(visible, center_img[1, :] < imsize[1]) + visible = np.logical_and(visible, center_img[1, :] > 0) + visible = np.logical_and(visible, center_3d[2, :] > 1) + + in_front = center_3d[2, :] > 0.1 # True if a corner is at least 0.1 meter in front of the camera. + + if vis_level == BoxVisibility.ALL: + return all(visible) and all(in_front) + elif vis_level == BoxVisibility.ANY: + return any(visible) and all(in_front) + elif vis_level == BoxVisibility.NONE: + return True + else: + raise ValueError("vis_level: {} not valid".format(vis_level)) + + +def exist_corners_in_image_but_not_all(box, intrinsic: np.ndarray, imsize: Tuple[int, int], + vis_level: int = BoxVisibility.ANY) -> bool: + """ + Check if a box is visible in images but not all corners in image . + :param box: The box to be checked. + :param intrinsic: . Intrinsic camera matrix. + :param imsize: (width, height). + :param vis_level: One of the enumerations of . + :return True if visibility condition is satisfied. + """ + + corners_3d = box.corners() + corners_img = view_points(corners_3d, intrinsic, normalize=True)[:2, :] + + visible = np.logical_and(corners_img[0, :] > 0, corners_img[0, :] < imsize[0]) + visible = np.logical_and(visible, corners_img[1, :] < imsize[1]) + visible = np.logical_and(visible, corners_img[1, :] > 0) + visible = np.logical_and(visible, corners_3d[2, :] > 1) + + in_front = corners_3d[2, :] > 0.1 # True if a corner is at least 0.1 meter in front of the camera. + + if any(visible) and not all(visible) and all(in_front): + return True + else: + return False + +def load_prediction(result_path: str, max_boxes_per_sample: int, box_cls, verbose: bool = False) \ + -> Tuple[EvalBoxes, Dict]: + """ + Loads object predictions from file. + :param result_path: Path to the .json result file provided by the user. + :param max_boxes_per_sample: Maximim number of boxes allowed per sample. + :param box_cls: Type of box to load, e.g. DetectionBox or TrackingBox. + :param verbose: Whether to print messages to stdout. + :return: The deserialized results and meta data. + """ + + # Load from file and check that the format is correct. + # with open(result_path) as f: + # data = json.load(f) + data = load(result_path) + assert 'results' in data, 'Error: No field `results` in result file. Please note that the result format changed.' \ + 'See https://www.nuscenes.org/object-detection for more information.' + + # Deserialize results and get meta data. + all_results = EvalBoxes.deserialize(data['results'], box_cls) + meta = data['meta'] + if verbose: + print("Loaded results from {}. Found detections for {} samples." + .format(result_path, len(all_results.sample_tokens))) + + # Check that each sample has no more than x predicted boxes. + for sample_token in all_results.sample_tokens: + assert len(all_results.boxes[sample_token]) <= max_boxes_per_sample, \ + "Error: Only <= %d boxes per sample allowed!" % max_boxes_per_sample + + return all_results, meta + +def load_gt(nusc: NuScenes, eval_split: str, box_cls, verbose: bool = False): + """ + Loads ground truth boxes from DB. + :param nusc: A NuScenes instance. + :param eval_split: The evaluation split for which we load GT boxes. + :param box_cls: Type of box to load, e.g. DetectionBox or TrackingBox. + :param verbose: Whether to print messages to stdout. + :return: The GT boxes. + """ + + # Init. + if box_cls == DetectionBox_modified: + attribute_map = {a['token']: a['name'] for a in nusc.attribute} + + if verbose: + print('Loading annotations for {} split from nuScenes version: {}'.format(eval_split, nusc.version)) + # Read out all sample_tokens in DB. + sample_tokens_all = [s['token'] for s in nusc.sample] + assert len(sample_tokens_all) > 0, "Error: Database has no samples!" + + # Only keep samples from this split. + splits = create_splits_scenes() + + # Check compatibility of split with nusc_version. + version = nusc.version + if eval_split in {'train', 'val', 'train_detect', 'train_track'}: + assert version.endswith('trainval'), \ + 'Error: Requested split {} which is not compatible with NuScenes version {}'.format(eval_split, version) + elif eval_split in {'mini_train', 'mini_val'}: + assert version.endswith('mini'), \ + 'Error: Requested split {} which is not compatible with NuScenes version {}'.format(eval_split, version) + elif eval_split == 'test': + assert version.endswith('test'), \ + 'Error: Requested split {} which is not compatible with NuScenes version {}'.format(eval_split, version) + else: + raise ValueError('Error: Requested split {} which this function cannot map to the correct NuScenes version.' + .format(eval_split)) + + if eval_split == 'test': + # Check that you aren't trying to cheat :). + assert len(nusc.sample_annotation) > 0, \ + 'Error: You are trying to evaluate on the test set but you do not have the annotations!' + index_map = {} + for scene in nusc.scene: + first_sample_token = scene['first_sample_token'] + sample = nusc.get('sample', first_sample_token) + index_map[first_sample_token] = 1 + index = 2 + while sample['next'] != '': + sample = nusc.get('sample', sample['next']) + index_map[sample['token']] = index + index += 1 + + sample_tokens = [] + for sample_token in sample_tokens_all: + scene_token = nusc.get('sample', sample_token)['scene_token'] + scene_record = nusc.get('scene', scene_token) + if scene_record['name'] in splits[eval_split]: + sample_tokens.append(sample_token) + + all_annotations = EvalBoxes() + + # Load annotations and filter predictions and annotations. + tracking_id_set = set() + for sample_token in tqdm.tqdm(sample_tokens, leave=verbose): + + sample = nusc.get('sample', sample_token) + sample_annotation_tokens = sample['anns'] + + sample_boxes = [] + for sample_annotation_token in sample_annotation_tokens: + + sample_annotation = nusc.get('sample_annotation', sample_annotation_token) + if box_cls == DetectionBox_modified: + # Get label name in detection task and filter unused labels. + detection_name = category_to_detection_name(sample_annotation['category_name']) + if detection_name is None: + continue + + # Get attribute_name. + attr_tokens = sample_annotation['attribute_tokens'] + attr_count = len(attr_tokens) + if attr_count == 0: + attribute_name = '' + elif attr_count == 1: + attribute_name = attribute_map[attr_tokens[0]] + else: + raise Exception('Error: GT annotations must not have more than one attribute!') + + sample_boxes.append( + box_cls( + token=sample_annotation_token, + sample_token=sample_token, + translation=sample_annotation['translation'], + size=sample_annotation['size'], + rotation=sample_annotation['rotation'], + velocity=nusc.box_velocity(sample_annotation['token'])[:2], + num_pts=sample_annotation['num_lidar_pts'] + sample_annotation['num_radar_pts'], + detection_name=detection_name, + detection_score=-1.0, # GT samples do not have a score. + attribute_name=attribute_name, + visibility=sample_annotation['visibility_token'], + index=index_map[sample_token] + ) + ) + elif box_cls == TrackingBox: + assert False + else: + raise NotImplementedError('Error: Invalid box_cls %s!' % box_cls) + + all_annotations.add_boxes(sample_token, sample_boxes) + + if verbose: + print("Loaded ground truth annotations for {} samples.".format(len(all_annotations.sample_tokens))) + + return all_annotations + + +def filter_eval_boxes_by_id(nusc: NuScenes, + eval_boxes: EvalBoxes, + id=None, + verbose: bool = False) -> EvalBoxes: + """ + Applies filtering to boxes. Distance, bike-racks and points per box. + :param nusc: An instance of the NuScenes class. + :param eval_boxes: An instance of the EvalBoxes class. + :param is: the anns token set that used to keep bboxes. + :param verbose: Whether to print to stdout. + """ + + # Accumulators for number of filtered boxes. + total, anns_filter = 0, 0 + for ind, sample_token in enumerate(eval_boxes.sample_tokens): + + # Filter on anns + total += len(eval_boxes[sample_token]) + filtered_boxes = [] + for box in eval_boxes[sample_token]: + if box.token in id: + filtered_boxes.append(box) + anns_filter += len(filtered_boxes) + eval_boxes.boxes[sample_token] = filtered_boxes + + if verbose: + print("=> Original number of boxes: %d" % total) + print("=> After anns based filtering: %d" % anns_filter) + + return eval_boxes + + +def filter_eval_boxes_by_visibility( + ori_eval_boxes: EvalBoxes, + visibility=None, + verbose: bool = False) -> EvalBoxes: + """ + Applies filtering to boxes. Distance, bike-racks and points per box. + :param nusc: An instance of the NuScenes class. + :param eval_boxes: An instance of the EvalBoxes class. + :param is: the anns token set that used to keep bboxes. + :param verbose: Whether to print to stdout. + """ + + # Accumulators for number of filtered boxes. + eval_boxes = copy.deepcopy(ori_eval_boxes) + total, anns_filter = 0, 0 + for ind, sample_token in enumerate(eval_boxes.sample_tokens): + # Filter on anns + total += len(eval_boxes[sample_token]) + filtered_boxes = [] + for box in eval_boxes[sample_token]: + if box.visibility == visibility: + filtered_boxes.append(box) + anns_filter += len(filtered_boxes) + eval_boxes.boxes[sample_token] = filtered_boxes + + if verbose: + print("=> Original number of boxes: %d" % total) + print("=> After visibility based filtering: %d" % anns_filter) + + return eval_boxes + + +def filter_by_sample_token(ori_eval_boxes, valid_sample_tokens=[], verbose=False): + eval_boxes = copy.deepcopy(ori_eval_boxes) + for sample_token in eval_boxes.sample_tokens: + if sample_token not in valid_sample_tokens: + eval_boxes.boxes.pop(sample_token) + return eval_boxes + + +def filter_eval_boxes_by_overlap(nusc: NuScenes, + eval_boxes: EvalBoxes, + verbose: bool = False) -> EvalBoxes: + """ + Applies filtering to boxes. basedon overlap . + :param nusc: An instance of the NuScenes class. + :param eval_boxes: An instance of the EvalBoxes class. + :param verbose: Whether to print to stdout. + """ + + # Accumulators for number of filtered boxes. + cams = ['CAM_FRONT', + 'CAM_FRONT_RIGHT', + 'CAM_BACK_RIGHT', + 'CAM_BACK', + 'CAM_BACK_LEFT', + 'CAM_FRONT_LEFT'] + + total, anns_filter = 0, 0 + for ind, sample_token in enumerate(eval_boxes.sample_tokens): + + # Filter on anns + total += len(eval_boxes[sample_token]) + sample_record = nusc.get('sample', sample_token) + filtered_boxes = [] + for box in eval_boxes[sample_token]: + count = 0 + for cam in cams: + ''' + copy-paste form nuscens + ''' + sample_data_token = sample_record['data'][cam] + sd_record = nusc.get('sample_data', sample_data_token) + cs_record = nusc.get('calibrated_sensor', sd_record['calibrated_sensor_token']) + sensor_record = nusc.get('sensor', cs_record['sensor_token']) + pose_record = nusc.get('ego_pose', sd_record['ego_pose_token']) + cam_intrinsic = np.array(cs_record['camera_intrinsic']) + imsize = (sd_record['width'], sd_record['height']) + new_box = Box(box.translation, box.size, Quaternion(box.rotation), + name=box.detection_name, token='') + + # Move box to ego vehicle coord system. + new_box.translate(-np.array(pose_record['translation'])) + new_box.rotate(Quaternion(pose_record['rotation']).inverse) + + # Move box to sensor coord system. + new_box.translate(-np.array(cs_record['translation'])) + new_box.rotate(Quaternion(cs_record['rotation']).inverse) + + if center_in_image(new_box, cam_intrinsic, imsize, vis_level=BoxVisibility.ANY): + count += 1 + # if exist_corners_in_image_but_not_all(new_box, cam_intrinsic, imsize, vis_level=BoxVisibility.ANY): + # count += 1 + + if count > 1: + with open('center_overlap.txt', 'a') as f: + try: + f.write(box.token + '\n') + except: + pass + filtered_boxes.append(box) + anns_filter += len(filtered_boxes) + eval_boxes.boxes[sample_token] = filtered_boxes + + verbose = True + + if verbose: + print("=> Original number of boxes: %d" % total) + print("=> After anns based filtering: %d" % anns_filter) + + return eval_boxes + +def _get_box_class_field(eval_boxes: EvalBoxes) -> str: + """ + Retrieve the name of the class field in the boxes. + This parses through all boxes until it finds a valid box. + If there are no valid boxes, this function throws an exception. + :param eval_boxes: The EvalBoxes used for evaluation. + :return: The name of the class field in the boxes, e.g. detection_name or tracking_name. + """ + assert len(eval_boxes.boxes) > 0 + box = None + for val in eval_boxes.boxes.values(): + if len(val) > 0: + box = val[0] + break + if isinstance(box, DetectionBox): + class_field = 'detection_name' + elif isinstance(box, TrackingBox): + class_field = 'tracking_name' + else: + raise Exception('Error: Invalid box type: %s' % box) + + return class_field + +def filter_eval_boxes(nusc: NuScenes, + eval_boxes: EvalBoxes, + max_dist_x: Dict[str, float], + max_dist_y: Dict[str, float], + verbose: bool = False) -> EvalBoxes: + """ + Applies filtering to boxes. Distance, bike-racks and points per box. + :param nusc: An instance of the NuScenes class. + :param eval_boxes: An instance of the EvalBoxes class. + :param max_dist: Maps the detection name to the eval distance threshold for that class. + :param verbose: Whether to print to stdout. + """ + # Retrieve box type for detectipn/tracking boxes. + class_field = _get_box_class_field(eval_boxes) + + # Accumulators for number of filtered boxes. + total, dist_filter, point_filter, bike_rack_filter = 0, 0, 0, 0 + for ind, sample_token in enumerate(eval_boxes.sample_tokens): + + # Filter on distance first. + total += len(eval_boxes[sample_token]) + eval_boxes.boxes[sample_token] = [box for box in eval_boxes[sample_token] if + abs(box.ego_translation[0]) < max_dist_x[box.__getattribute__(class_field)] \ + and abs(box.ego_translation[1]) < max_dist_y[box.__getattribute__(class_field)]] + dist_filter += len(eval_boxes[sample_token]) + + # Then remove boxes with zero points in them. Eval boxes have -1 points by default. + eval_boxes.boxes[sample_token] = [box for box in eval_boxes[sample_token] if not box.num_pts == 0] + point_filter += len(eval_boxes[sample_token]) + + # Perform bike-rack filtering. + sample_anns = nusc.get('sample', sample_token)['anns'] + bikerack_recs = [nusc.get('sample_annotation', ann) for ann in sample_anns if + nusc.get('sample_annotation', ann)['category_name'] == 'static_object.bicycle_rack'] + bikerack_boxes = [Box(rec['translation'], rec['size'], Quaternion(rec['rotation'])) for rec in bikerack_recs] + filtered_boxes = [] + for box in eval_boxes[sample_token]: + if box.__getattribute__(class_field) in ['bicycle', 'motorcycle']: + in_a_bikerack = False + for bikerack_box in bikerack_boxes: + if np.sum(points_in_box(bikerack_box, np.expand_dims(np.array(box.translation), axis=1))) > 0: + in_a_bikerack = True + if not in_a_bikerack: + filtered_boxes.append(box) + else: + filtered_boxes.append(box) + + eval_boxes.boxes[sample_token] = filtered_boxes + bike_rack_filter += len(eval_boxes.boxes[sample_token]) + + if verbose: + print("=> Original number of boxes: %d" % total) + print("=> After distance based filtering: %d" % dist_filter) + print("=> After LIDAR and RADAR points based filtering: %d" % point_filter) + print("=> After bike rack filtering: %d" % bike_rack_filter) + + return eval_boxes + +class NuScenesEval_custom(NuScenesEval): + """ + Dummy class for backward-compatibility. Same as DetectionEval. + """ + + def __init__(self, + nusc: NuScenes, + config: DetectionConfig, + result_path: str, + eval_set: str, + output_dir: str = None, + verbose: bool = True, + overlap_test=False, + eval_mask=False, + data_infos=None + ): + """ + Initialize a DetectionEval object. + :param nusc: A NuScenes object. + :param config: A DetectionConfig object. + :param result_path: Path of the nuScenes JSON result file. + :param eval_set: The dataset split to evaluate on, e.g. train, val or test. + :param output_dir: Folder to save plots and results to. + :param verbose: Whether to print to stdout. + """ + + self.nusc = nusc + self.result_path = result_path + self.eval_set = eval_set + self.output_dir = output_dir + self.verbose = verbose + self.cfg = config + self.overlap_test = overlap_test + self.eval_mask = eval_mask + self.data_infos = data_infos + # Check result file exists. + assert os.path.exists(result_path), 'Error: The result file does not exist!' + + # Make dirs. + self.plot_dir = os.path.join(self.output_dir, 'plots') + if not os.path.isdir(self.output_dir): + os.makedirs(self.output_dir) + if not os.path.isdir(self.plot_dir): + os.makedirs(self.plot_dir) + + # Load data. + if verbose: + print('Initializing nuScenes detection evaluation') + self.pred_boxes, self.meta = load_prediction(self.result_path, self.cfg.max_boxes_per_sample, DetectionBox, + verbose=verbose) + self.gt_boxes = load_gt(self.nusc, self.eval_set, DetectionBox_modified, verbose=verbose) + + # assert set(self.pred_boxes.sample_tokens) == set(self.gt_boxes.sample_tokens), \ + # "Samples in split doesn't match samples in predictions." + + # Add center distances. + self.pred_boxes = add_center_dist(nusc, self.pred_boxes) + self.gt_boxes = add_center_dist(nusc, self.gt_boxes) + + # Filter boxes (distance, points per box, etc.). + + if verbose: + print('Filtering predictions') + self.pred_boxes = filter_eval_boxes(nusc, self.pred_boxes, self.cfg.class_range_x, self.cfg.class_range_y, verbose=verbose) + if verbose: + print('Filtering ground truth annotations') + self.gt_boxes = filter_eval_boxes(nusc, self.gt_boxes, self.cfg.class_range_x, self.cfg.class_range_y, verbose=verbose) + + if self.overlap_test: + self.pred_boxes = filter_eval_boxes_by_overlap(self.nusc, self.pred_boxes) + + self.gt_boxes = filter_eval_boxes_by_overlap(self.nusc, self.gt_boxes, verbose=True) + + self.all_gt = copy.deepcopy(self.gt_boxes) + self.all_preds = copy.deepcopy(self.pred_boxes) + self.sample_tokens = self.gt_boxes.sample_tokens + + self.index_map = {} + for scene in nusc.scene: + first_sample_token = scene['first_sample_token'] + sample = nusc.get('sample', first_sample_token) + self.index_map[first_sample_token] = 1 + index = 2 + while sample['next'] != '': + sample = nusc.get('sample', sample['next']) + self.index_map[sample['token']] = index + index += 1 + + def update_gt(self, type_='vis', visibility='1', index=1): + if type_ == 'vis': + self.visibility_test = True + if self.visibility_test: + '''[{'description': 'visibility of whole object is between 0 and 40%', + 'token': '1', + 'level': 'v0-40'}, + {'description': 'visibility of whole object is between 40 and 60%', + 'token': '2', + 'level': 'v40-60'}, + {'description': 'visibility of whole object is between 60 and 80%', + 'token': '3', + 'level': 'v60-80'}, + {'description': 'visibility of whole object is between 80 and 100%', + 'token': '4', + 'level': 'v80-100'}]''' + + self.gt_boxes = filter_eval_boxes_by_visibility(self.all_gt, visibility, verbose=True) + + elif type_ == 'ord': + + valid_tokens = [key for (key, value) in self.index_map.items() if value == index] + # from IPython import embed + # embed() + self.gt_boxes = filter_by_sample_token(self.all_gt, valid_tokens) + self.pred_boxes = filter_by_sample_token(self.all_preds, valid_tokens) + self.sample_tokens = self.gt_boxes.sample_tokens + + + def evaluate(self) -> Tuple[DetectionMetrics, DetectionMetricDataList]: + """ + Performs the actual evaluation. + :return: A tuple of high-level and the raw metric data. + """ + start_time = time.time() + + # ----------------------------------- + # Step 1: Accumulate metric data for all classes and distance thresholds. + # ----------------------------------- + if self.verbose: + print('Accumulating metric data...') + metric_data_list = DetectionMetricDataList() + + # print(self.cfg.dist_fcn_callable, self.cfg.dist_ths) + # self.cfg.dist_ths = [0.3] + # self.cfg.dist_fcn_callable + for class_name in self.cfg.class_names: + for dist_th in self.cfg.dist_ths: + md = accumulate(self.gt_boxes, self.pred_boxes, class_name, self.cfg.dist_fcn_callable, dist_th) + metric_data_list.set(class_name, dist_th, md) + + # ----------------------------------- + # Step 2: Calculate metrics from the data. + # ----------------------------------- + if self.verbose: + print('Calculating metrics...') + metrics = DetectionMetrics(self.cfg) + for class_name in self.cfg.class_names: + # Compute APs. + for dist_th in self.cfg.dist_ths: + metric_data = metric_data_list[(class_name, dist_th)] + ap = calc_ap(metric_data, self.cfg.min_recall, self.cfg.min_precision) + metrics.add_label_ap(class_name, dist_th, ap) + # Compute TP metrics. + for metric_name in TP_METRICS: + metric_data = metric_data_list[(class_name, self.cfg.dist_th_tp)] + if class_name in ['traffic_cone'] and metric_name in ['attr_err', 'vel_err', 'orient_err']: + tp = np.nan + elif class_name in ['barrier'] and metric_name in ['attr_err', 'vel_err']: + tp = np.nan + else: + tp = calc_tp(metric_data, self.cfg.min_recall, metric_name) + metrics.add_label_tp(class_name, metric_name, tp) + + # Compute evaluation time. + metrics.add_runtime(time.time() - start_time) + + return metrics, metric_data_list + + def render(self, metrics: DetectionMetrics, md_list: DetectionMetricDataList) -> None: + """ + Renders various PR and TP curves. + :param metrics: DetectionMetrics instance. + :param md_list: DetectionMetricDataList instance. + """ + if self.verbose: + print('Rendering PR and TP curves') + + def savepath(name): + return os.path.join(self.plot_dir, name + '.pdf') + + summary_plot(md_list, metrics, min_precision=self.cfg.min_precision, min_recall=self.cfg.min_recall, + dist_th_tp=self.cfg.dist_th_tp, savepath=savepath('summary')) + + for detection_name in self.cfg.class_names: + class_pr_curve(md_list, metrics, detection_name, self.cfg.min_precision, self.cfg.min_recall, + savepath=savepath(detection_name + '_pr')) + + class_tp_curve(md_list, metrics, detection_name, self.cfg.min_recall, self.cfg.dist_th_tp, + savepath=savepath(detection_name + '_tp')) + + for dist_th in self.cfg.dist_ths: + dist_pr_curve(md_list, metrics, dist_th, self.cfg.min_precision, self.cfg.min_recall, + savepath=savepath('dist_pr_' + str(dist_th))) + + +if __name__ == "__main__": + + # Settings. + parser = argparse.ArgumentParser(description='Evaluate nuScenes detection results.', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('result_path', type=str, help='The submission as a JSON file.') + parser.add_argument('--output_dir', type=str, default='~/nuscenes-metrics', + help='Folder to store result metrics, graphs and example visualizations.') + parser.add_argument('--eval_set', type=str, default='val', + help='Which dataset split to evaluate on, train, val or test.') + parser.add_argument('--dataroot', type=str, default='data/nuscenes', + help='Default nuScenes data directory.') + parser.add_argument('--version', type=str, default='v1.0-trainval', + help='Which version of the nuScenes dataset to evaluate on, e.g. v1.0-trainval.') + parser.add_argument('--config_path', type=str, default='', + help='Path to the configuration file.' + 'If no path given, the CVPR 2019 configuration will be used.') + parser.add_argument('--plot_examples', type=int, default=0, + help='How many example visualizations to write to disk.') + parser.add_argument('--render_curves', type=int, default=1, + help='Whether to render PR and TP curves to disk.') + parser.add_argument('--verbose', type=int, default=1, + help='Whether to print to stdout.') + args = parser.parse_args() + + result_path_ = os.path.expanduser(args.result_path) + output_dir_ = os.path.expanduser(args.output_dir) + eval_set_ = args.eval_set + dataroot_ = args.dataroot + version_ = args.version + config_path = args.config_path + plot_examples_ = args.plot_examples + render_curves_ = bool(args.render_curves) + verbose_ = bool(args.verbose) + + if config_path == '': + cfg_ = config_factory('detection_cvpr_2019') + else: + with open(config_path, 'r') as _f: + cfg_ = DetectionConfig.deserialize(json.load(_f)) + + nusc_ = NuScenes(version=version_, verbose=verbose_, dataroot=dataroot_) + nusc_eval = NuScenesEval_custom(nusc_, config=cfg_, result_path=result_path_, eval_set=eval_set_, + output_dir=output_dir_, verbose=verbose_) + for vis in ['1', '2', '3', '4']: + nusc_eval.update_gt(type_='vis', visibility=vis) + print(f'================ {vis} ===============') + nusc_eval.main(plot_examples=plot_examples_, render_curves=render_curves_) + #for index in range(1, 41): + # nusc_eval.update_gt(type_='ord', index=index) + # diff --git a/mmcv/datasets/vis_utils.py b/mmcv/datasets/vis_utils.py new file mode 100644 index 0000000..281703f --- /dev/null +++ b/mmcv/datasets/vis_utils.py @@ -0,0 +1,670 @@ +import numpy as np +import cv2 +from matplotlib import cm +import math +import open3d as o3d +import os + +WINDOW_HEIGHT = 900 +WINDOW_WIDTH = 1600 + +DIS_CAR_SAVE = 50 +DIS_WALKER_SAVE = 50 +DIS_SIGN_SAVE = 50 +DIS_LIGHT_SAVE = 50 + +edges = [[0,1], [1,3], [3,2], [2,0], [0,4], [4,5], [5,1], [5,7], [7,6], [6,4], [6,2], [7,3]] + +carla_bbox_edges = [ + (0, 1), (1, 2), (2, 3), (3, 0), # Bottom face + (4, 5), (5, 6), (6, 7), (7, 4), # Top face + (0, 4), (1, 5), (2, 6), (3, 7) # Side edges connecting top and bottom faces +] + +VIRIDIS = np.array(cm.get_cmap('plasma').colors) +VID_RANGE = np.linspace(0.0, 1.0, VIRIDIS.shape[0]) +LABEL_COLORS = np.array([ + (255, 255, 255), # None + (70, 70, 70), # Building + (100, 40, 40), # Fences + (55, 90, 80), # Other + (220, 20, 60), # Pedestrian + (153, 153, 153), # Pole + (157, 234, 50), # RoadLines + (128, 64, 128), # Road + (244, 35, 232), # Sidewalk + (107, 142, 35), # Vegetation + (0, 0, 142), # Vehicle + (102, 102, 156), # Wall + (220, 220, 0), # TrafficSign + (70, 130, 180), # Sky + (81, 0, 81), # Ground + (150, 100, 100), # Bridge + (230, 150, 140), # RailTrack + (180, 165, 180), # GuardRail + (250, 170, 30), # TrafficLight + (110, 190, 160), # Static + (170, 120, 50), # Dynamic + (45, 60, 150), # Water + (145, 170, 100), # Terrain +]) / 255.0 # normalize each channel [0-1] since is what Open3D uses + +SEM_SEG_LABEL_COLORS = { + 0 : ( 0, 0, 0), # unlabeled + # cityscape + 1 : (128, 64, 128), # road + 2 : (244, 35, 232), # sidewalk + 3 : ( 70, 70, 70), # building + 4 : (102, 102, 156), # wall + 5 : (190, 153, 153), # fence + 6 : (153, 153, 153), # pole + 7 : (250, 170, 30), # traffic light + 8 : (220, 220, 0), # traffic sign + 9 : (107, 142, 35), # vegetation + 10 : (152, 251, 152), # terrain + 11 : ( 70, 130, 180), # sky + 12 : (220, 20, 60), # pedestrian + 13 : (255, 0, 0), # rider + 14 : ( 0, 0, 142), # Car + 15 : ( 0, 0, 70), # truck + 16 : ( 0, 60, 100), # bus + 17 : ( 0, 80, 100), # train + 18 : ( 0, 0, 230), # motorcycle + 19 : (119, 11, 32), # bicycle + # custom + 20 : (110, 190, 160), # static + 21 : (170, 120, 50), # dynamic + 22 : ( 55, 90, 80), # other + 23 : ( 45, 60, 150), # water + 24 : (157, 234, 50), # road line + 25 : ( 81, 0, 81), # ground + 26 : (150, 100, 100), # bridge + 27 : (230, 150, 140), # rail track + 28 : (180, 165, 180), # guard rail +} + +uniad_class_names = [ + 'car', 'truck', 'trailer', 'bus', 'construction_vehicle', 'bicycle', + 'motorcycle', 'pedestrian', 'traffic_cone', 'barrier' +] + +carla_class_name = [ + 'car', 'truck', 'bus', 'van', 'motorcycle', 'bicycle', 'pedestrian', +] + +TYPE_ID_MAP = { + #=================vehicle================= + # bicycle + 'vehicle.bh.crossbike': 'bicycle', + "vehicle.diamondback.century": 'bicycle', + # car + "vehicle.chevrolet.impala": 'car', + "vehicle.dodge.charger_2020": 'car', + "vehicle.dodge.charger_police_2020": 'car', + "vehicle.lincoln.mkz_2017": 'car', + "vehicle.lincoln.mkz_2020": 'car', + "vehicle.mini.cooper_s_2021": 'car', + "vehicle.mercedes.coupe_2020": 'car', + "vehicle.ford.mustang": 'car', + "vehicle.nissan.patrol_2021": 'car', + "vehicle.audi.tt": 'car', + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/FordCrown/SM_FordCrown_parked.SM_FordCrown_parked": 'car', + # bus + # van + "/Game/Carla/Static/Car/4Wheeled/ParkedVehicles/VolkswagenT2/SM_VolkswagenT2_2021_Parked.SM_VolkswagenT2_2021_Parked": "van", + #========================================= + + #=================traffic sign============ + # traffic.speed_limit + "traffic.speed_limit.30": 'speed_limit', + "traffic.speed_limit.40": 'speed_limit', + "traffic.speed_limit.50": 'speed_limit', + # traffic.traffic_light + "traffic.traffic_light": 'traffic_light', + # traffic.stop + "traffic.stop": 'stop', + #========================================= +} + +def calc_projected_2d_bbox(vertices_pos2d): + """ Takes in all vertices in pixel projection and calculates min and max of all x and y coordinates. + Returns left top, right bottom pixel coordinates for the 2d bounding box as a list of four values. + Note that vertices_pos2d contains a list of (y_pos2d, x_pos2d) tuples, or None + """ + x_coords = vertices_pos2d[:, 0] + y_coords = vertices_pos2d[:, 1] + min_x, max_x = np.min(x_coords), np.max(x_coords) + min_y, max_y = np.min(y_coords), np.max(y_coords) + return [min_x, min_y, max_x, max_y] + +def calculate_occlusion(bbox, point_depth, agent, depth_map): + """Calculate the occlusion value of a 2D bounding box. + Iterate through each point (pixel) in the bounding box and declare it occluded only + if the 4 surroinding points (pixels) are closer to the camera (by using the help of depth map) + than the actual distance to the middle of the 3D bounding boxe and some margin (the extent of the object) + """ + bbox_3d_mid = np.mean(point_depth) + min_x, min_y, max_x, max_y = calc_projected_2d_bbox(bbox) + height, width, length = agent.bounding_box.extent.z, agent.bounding_box.extent.x, agent.bounding_box.extent.y + + #depth_margin should depend on the rotation of the object but this solution works fine + depth_margin = np.max([2 * width, 2 * length]) + is_occluded = [] + + for x in range(int(min_x), int(max_x)): + for y in range(int(min_y), int(max_y)): + is_occluded.append(point_is_occluded( + (y, x), bbox_3d_mid - depth_margin, depth_map)) + + occlusion = ((float(np.sum(is_occluded))) / ((max_x-min_x) * (max_y-min_y))) + #discretize the 0–1 occlusion value into KITTI’s {0,1,2,3} labels by equally dividing the interval into 4 parts + # occlusion = np.digitize(occlusion, bins=[0.25, 0.50, 0.75]) + return occlusion + +def calculate_occlusion_vectorized(bbox, point_depth, extent, depth_map): + """Calculate the occlusion value of a 2D bounding box. + Iterate through each point (pixel) in the bounding box and declare it occluded only + if the 4 surroinding points (pixels) are closer to the camera (by using the help of depth map) + than the actual distance to the middle of the 3D bounding boxe and some margin (the extent of the object) + """ + bbox_3d_mid = np.mean(point_depth) + min_x, min_y, max_x, max_y = calc_projected_2d_bbox(bbox) + height, width, length = extent[2], extent[0], extent[1] + depth_margin = np.max([2 * width, 2 * length]) + count_num = (max_x - min_x) * (max_y - min_y) + if count_num > 10000: + p = 100 / count_num + elif count_num > 1000: + p = 100 / count_num + elif count_num > 100: + p = 100 / count_num + else: + p = 1 + sample_step_approx = int(np.sqrt(1/p)) + + # x, y = np.meshgrid(np.arange(min_x, max_x), np.arange(min_y, max_y)) + x, y = np.meshgrid(np.arange(min_x, max_x, sample_step_approx), np.arange(min_y, max_y, sample_step_approx)) + points = np.stack((y.flatten(), x.flatten()), axis=1) + is_occluded_array = point_is_occluded_single(points, bbox_3d_mid - depth_margin, depth_map) + occlusion = is_occluded_array.mean() + #discretize the 0–1 occlusion value into KITTI’s {0,1,2,3} labels by equally dividing the interval into 4 parts + # occlusion = np.digitize(occlusion, bins=[0.25, 0.50, 0.75]) + return occlusion + +def calc_bbox2d_area(bbox_2d): + """ Calculate the area of the given 2d bbox + Input is assumed to be xmin, ymin, xmax, ymax tuple + """ + xmin, ymin, xmax, ymax = bbox_2d + return (ymax - ymin) * (xmax - xmin) + +def calculate_truncation(uncropped_bbox, cropped_bbox): + "Calculate how much of the object's 2D uncropped bounding box is outside the image boundary" + + area_cropped = calc_bbox2d_area(cropped_bbox) + area_uncropped = calc_bbox2d_area(uncropped_bbox) + truncation = 1.0 - float(area_cropped / area_uncropped) + return truncation + +def crop_boxes_in_canvas(cam_bboxes): + neg_x_inds = np.where(cam_bboxes[:, 0] < 0)[0] + out_x_inds = np.where(cam_bboxes[:, 0] > WINDOW_WIDTH)[0] + neg_y_inds = np.where(cam_bboxes[:, 1] < 0)[0] + out_y_inds = np.where(cam_bboxes[:, 1] > WINDOW_HEIGHT)[0] + cam_bboxes[neg_x_inds, 0] = 0 + cam_bboxes[out_x_inds, 0] = WINDOW_HEIGHT + cam_bboxes[neg_y_inds, 1] = 0 + cam_bboxes[out_y_inds, 1] = WINDOW_WIDTH + return cam_bboxes + +def point_is_occluded(point, vertex_depth, depth_map): + """ Checks whether or not the four pixels directly around the given point has less depth than the given vertex depth + If True, this means that the point is occluded. + """ + y, x = map(int, point) + from itertools import product + neigbours = product((1, -1), repeat=2) + is_occluded = [] + for dy, dx in neigbours: + if point_in_canvas_hw((dy+y, dx+x)): + # If the depth map says the pixel is closer to the camera than the actual vertex + if depth_map[y+dy, x+dx] < vertex_depth: + is_occluded.append(True) + else: + is_occluded.append(False) + # Only say point is occluded if all four neighbours are closer to camera than vertex + return all(is_occluded) + +def point_is_occluded_single(points, vertex_depth, depth_map, canvas_shape=(WINDOW_HEIGHT, WINDOW_WIDTH)): + ''' + Simplified version that checks occlusion based only on the points' own depth + ''' + points = np.asarray(points).astype(np.int32) + y, x = points[:, 0], points[:, 1] + + valid = (y >= 0) & (y < canvas_shape[0]) & \ + (x >= 0) & (x < canvas_shape[1]) + + is_occluded = np.zeros(len(points), dtype=bool) + try: + is_occluded[valid] = depth_map[y[valid], x[valid]] < vertex_depth + except: + pass + return is_occluded + +def point_is_occluded_vectorized(points, vertex_depth, depth_map, canvas_shape=(WINDOW_HEIGHT, WINDOW_WIDTH)): + ''' + Equivalent to point_is_occluded + ''' + points = np.asarray(points).astype(np.int32) + y, x = points[:, 0], points[:, 1] + + dy, dx = np.array([1, 1, -1, -1]), np.array([1, -1, 1, -1]) + neighbour_y = y[:, np.newaxis] + dy + neighbour_x = x[:, np.newaxis] + dx + + valid = (neighbour_y >= 0) & (neighbour_y < canvas_shape[0]) & \ + (neighbour_x >= 0) & (neighbour_x < canvas_shape[1]) + + neighbour_depths = np.full(neighbour_y.shape, np.inf) + for i in range(4): + mask = valid[:, i] + neighbour_depths[mask, i] = depth_map[neighbour_y[mask, i], neighbour_x[mask, i]] + + is_occluded = np.logical_and.reduce(neighbour_depths < vertex_depth, axis=1) & np.any(valid, axis=1) + return is_occluded + +def draw_3d_bbox_vertex(image, points): + for x_2d, y_2d, vertex_color in points: + cv2.circle(image, (int(x_2d), int(y_2d)), radius=3, color=vertex_color, thickness=1) + +def calculate_occlusion_stats(bbox_points, depth, depth_map, max_render_depth): + """ Draws each vertex in vertices_pos2d if it is in front of the camera + The color is based on whether the object is occluded or not. + Returns the number of visible vertices and the number of vertices outside the camera. + """ + num_visible_vertices = 0 + num_invisible_vertices = 0 + num_vertices_outside_camera = 0 + points = [] + + for i in range(len(bbox_points)): + x_2d = bbox_points[i][0] + y_2d = bbox_points[i][1] + point_depth = depth[i] + + # if the point is in front of the camera but not too far away + if max_render_depth > point_depth > 0 and point_in_canvas_hw((y_2d, x_2d)): + #is_occluded_v = point_is_occluded_vectorized([[y_2d, x_2d]], point_depth, depth_map) + is_occluded = point_is_occluded( + (y_2d, x_2d), point_depth, depth_map) + + if is_occluded: + vertex_color = (0,0,255) # bgr, red + num_invisible_vertices += 1 + else: + num_visible_vertices += 1 + vertex_color = (0,255,0) # bgr, green + points.append((x_2d, y_2d, vertex_color)) + else: + num_vertices_outside_camera += 1 + return num_visible_vertices, num_invisible_vertices, num_vertices_outside_camera, points + +def get_intrinsic_matrix(camera): + + width = int(camera.attributes['image_size_x']) + height = int(camera.attributes['image_size_y']) + fov = float(camera.attributes['fov']) + + k = np.identity(3) + k[0, 2] = width / 2.0 + k[1, 2] = height / 2.0 + k[0, 0] = k[1, 1] = width / (2.0 * np.tan(fov * np.pi / 360.0)) + + return k + +def get_image_point(loc, K, w2c): + # Calculate 2D projection of 3D coordinate + + # Format the input coordinate (loc is a carla.Position object) + point = np.array([loc[0], loc[1], loc[2], 1]) + # transform to camera coordinates + point_camera = np.dot(w2c, point) + + # New we must change from UE4's coordinate system to an "standard" + # (x, y ,z) -> (y, -z, x) + # and we remove the fourth componebonent also + point_camera = [point_camera[1], -point_camera[2], point_camera[0]] + + depth = point_camera[2] + + # now project 3D->2D using the camera matrix + point_img = np.dot(K, point_camera) + # normalize + point_img[0] /= point_img[2] + point_img[1] /= point_img[2] + + return point_img[0:2], depth + +def point_in_canvas_hw(pos): + """Return true if point is in canvas""" + if (pos[0] >= 0) and (pos[0] < WINDOW_HEIGHT) and (pos[1] >= 0) and (pos[1] < WINDOW_WIDTH): + return True + return False + +def point_in_canvas_wh(pos): + """Return true if point is in canvas""" + if (pos[0] >= 0) and (pos[0] < WINDOW_WIDTH) and (pos[1] >= 0) and (pos[1] < WINDOW_HEIGHT): + return True + return False + +def build_projection_matrix(w, h, fov, is_behind_camera=False): + focal = w / (2.0 * np.tan(fov * np.pi / 360.0)) + K = np.identity(3) + + if is_behind_camera: + K[0, 0] = K[1, 1] = -focal + else: + K[0, 0] = K[1, 1] = focal + + K[0, 2] = w / 2.0 + K[1, 2] = h / 2.0 + return K + +def rotate_3d(vector, theta): + theta = np.radians(theta) + R = np.array([[np.cos(theta), -np.sin(theta), 0], + [np.sin(theta), np.cos(theta), 0], + [0, 0, 1]]) + + v_rotated = np.dot(R, vector) + return np.array([v_rotated[0], v_rotated[1], v_rotated[2]]) + +def normalize_angle_degree(x): + x = x % 360.0 + if x > 180.0: + x -= 360.0 + return x + + +def algin_lidar(lidar, translation, yaw): + """ + Translates and rotates a LiDAR into a new coordinate system. + Rotation is inverse to translation and yaw + :param lidar: numpy LiDAR point cloud (N,3) + :param translation: translations in meters + :param yaw: yaw angle in radians + :return: numpy LiDAR point cloud in the new coordinate system. + """ + + rotation_matrix = np.array([[np.cos(yaw), -np.sin(yaw), 0.0], [np.sin(yaw), np.cos(yaw), 0.0], [0.0, 0.0, 1.0]]) + + aligned_lidar = (rotation_matrix.T @ (lidar - translation).T).T + + return aligned_lidar + +def convert_depth(data): + """ + Computes the normalized depth from a CARLA depth map. + """ + data = data.astype(np.float16) + + normalized = np.dot(data, [65536.0, 256.0, 1.0]) + normalized /= (256 * 256 * 256 - 1) + return normalized * 1000 + +def get_relative_transform(ego_matrix, vehicle_matrix): + """ + Returns the position of the vehicle matrix in the ego coordinate system. + :param ego_matrix: ndarray 4x4 Matrix of the ego vehicle in global + coordinates + :param vehicle_matrix: ndarray 4x4 Matrix of another actor in global + coordinates + :return: ndarray position of the other vehicle in the ego coordinate system + """ + relative_pos = vehicle_matrix[:3, 3] - ego_matrix[:3, 3] + rot = ego_matrix[:3, :3].T + relative_pos = rot @ relative_pos + + return relative_pos + +def normalize_angle(x): + x = x % (2 * np.pi) # force in range [0, 2 pi) + if x > np.pi: # move to [-pi, pi) + x -= 2 * np.pi + return x + +def build_skeleton(ped, sk_links): + + ######## get the pedestrian skeleton ######### + bones = ped.get_bones() + + # list where we will store the lines we will project + # onto the camera output + lines_3d = [] + + # cycle through the bone pairs in skeleton.txt and retrieve the joint positions + for link in sk_links[1:]: + + # get the roots of the two bones to be joined + bone_transform_1 = next(filter(lambda b: b.name == link[0], bones.bone_transforms), None) + bone_transform_2 = next(filter(lambda b: b.name == link[1], bones.bone_transforms), None) + + # some bone names aren't matched + if bone_transform_1 is not None and bone_transform_2 is not None: + lines_3d.append([(bone_transform_1.world.location.x, bone_transform_1.world.location.y, bone_transform_1.world.location.z), + (bone_transform_2.world.location.x, bone_transform_2.world.location.y, bone_transform_2.world.location.z)] + ) + return lines_3d + +def get_center_and_extent(verts): + sum_x = sum_y = sum_z = 0 + max_x = max_y = max_z = float('-inf') + min_x = min_y = min_z = float('inf') + + for pos in verts: + sum_x += pos.x + sum_y += pos.y + sum_z += pos.z + + max_x = max(max_x, pos.x) + max_y = max(max_y, pos.y) + max_z = max(max_z, pos.z) + + min_x = min(min_x, pos.x) + min_y = min(min_y, pos.y) + min_z = min(min_z, pos.z) + + center = (sum_x / 8, sum_y / 8, sum_z / 8) + + extent = ((max_x - min_x)/2, (max_y - min_y)/2, (max_z - min_z)/2) + return center, extent + +def get_forward_vector(yaw): + + yaw_rad = math.radians(yaw) + + x = math.cos(yaw_rad) + y = math.sin(yaw_rad) + + z = 0 + return np.array([x, y, z]) + +def calculate_cube_vertices(center, extent): + cx, cy, cz = center + x, y, z = extent + vertices = [ + (cx + x, cy + y, cz + z), + (cx + x, cy + y, cz - z), + (cx + x, cy - y, cz + z), + (cx + x, cy - y, cz - z), + (cx - x, cy + y, cz + z), + (cx - x, cy + y, cz - z), + (cx - x, cy - y, cz + z), + (cx - x, cy - y, cz - z) + ] + return vertices + + +def calculate_cube_vertices_2(center, extent): + cx, cy, cz = center.x, center.y, center.z + x, y, z = extent.x, extent.y, extent.z + vertices = [ + (cx + x, cy + y, cz + z), + (cx + x, cy + y, cz - z), + (cx + x, cy - y, cz + z), + (cx + x, cy - y, cz - z), + (cx - x, cy + y, cz + z), + (cx - x, cy + y, cz - z), + (cx - x, cy - y, cz + z), + (cx - x, cy - y, cz - z) + ] + return vertices + +def calculate_cube_vertices_3(center, extent): + cx, cy, cz = center[0], center[1], center[2] + x, y, z = extent[0], extent[1], extent[2] + vertices = [ + (cx + x, cy + y, cz + z), + (cx + x, cy + y, cz - z), + (cx + x, cy - y, cz + z), + (cx + x, cy - y, cz - z), + (cx - x, cy + y, cz + z), + (cx - x, cy + y, cz - z), + (cx - x, cy - y, cz + z), + (cx - x, cy - y, cz - z) + ] + return vertices + + + + +def draw_dashed_line(img, start_point, end_point, color, thickness=1, dash_length=5): + + d = np.sqrt((end_point[0] - start_point[0])**2 + (end_point[1] - start_point[1])**2) + dx = (end_point[0] - start_point[0]) / d + dy = (end_point[1] - start_point[1]) / d + + x, y = start_point[0], start_point[1] + + while d >= dash_length: + + x_end = x + dx * dash_length + y_end = y + dy * dash_length + cv2.line(img, (int(x), int(y)), (int(x_end), int(y_end)), color, thickness) + x = x_end + dx * dash_length + y = y_end + dy * dash_length + d -= 2 * dash_length + +def get_matrix(location, rotation): + """ + Creates matrix from carla transform. + """ + pitch, roll, yaw = rotation + x, y, z = location + c_y = np.cos(np.radians(yaw)) + s_y = np.sin(np.radians(yaw)) + c_r = np.cos(np.radians(roll)) + s_r = np.sin(np.radians(roll)) + c_p = np.cos(np.radians(pitch)) + s_p = np.sin(np.radians(pitch)) + matrix = np.matrix(np.identity(4)) + matrix[0, 3] = x + matrix[1, 3] = y + matrix[2, 3] = z + matrix[0, 0] = c_p * c_y + matrix[0, 1] = c_y * s_p * s_r - s_y * c_r + matrix[0, 2] = -c_y * s_p * c_r - s_y * s_r + matrix[1, 0] = s_y * c_p + matrix[1, 1] = s_y * s_p * s_r + c_y * c_r + matrix[1, 2] = -s_y * s_p * c_r + c_y * s_r + matrix[2, 0] = s_p + matrix[2, 1] = -c_p * s_r + matrix[2, 2] = c_p * c_r + return matrix + +def euler_to_rotation_matrix(pitch, roll, yaw): + Ry_pitch = np.array([ + [np.cos(pitch), 0, np.sin(pitch)], + [0, 1, 0], + [-np.sin(pitch), 0, np.cos(pitch)] + ]) + Rx_roll = np.array([ + [1, 0, 0], + [0, np.cos(roll), -np.sin(roll)], + [0, np.sin(roll), np.cos(roll)] + ]) + Rz_yaw = np.array([ + [np.cos(yaw), -np.sin(yaw), 0], + [np.sin(yaw), np.cos(yaw), 0], + [0, 0, 1] + ]) + return np.dot(Rz_yaw, np.dot(Rx_roll, Ry_pitch)) + +def world_to_ego_no(point_world, ego_location, ego_rotation): + rotation_matrix = euler_to_rotation_matrix(np.radians(ego_rotation[0]), + np.radians(ego_rotation[1]), + np.radians(ego_rotation[2])) + + point_relative = np.array(point_world) - np.array(ego_location) + point = np.dot(rotation_matrix, point_relative) + # (x, y ,z) -> (y, -x, z) + point = [point[0], -point[1], point[2]] + return point + +def world_to_ego(point_world, w2e): + point_world = np.array([point_world[0], point_world[1], point_world[2], 1]) + point_ego = np.dot(w2e, point_world) + point_ego = [point_ego[1], -point_ego[0], point_ego[2]] + return point_ego + +def world_to_lidar(point_world, w2l): + point_world = np.array([point_world[0], point_world[1], point_world[2], 1]) + point_lidar = np.dot(w2l, point_world) + return point_lidar + +def vector_angle(v1, v2): + dot_product = np.dot(v1, v2) + magnitude_v1 = np.linalg.norm(v1) + magnitude_v2 = np.linalg.norm(v2) + cos_theta = dot_product / (magnitude_v1 * magnitude_v2) + angle_radians = np.arccos(cos_theta) + angle_degrees = np.degrees(angle_radians) + return angle_degrees + +def get_weather_id(weather_conditions): + from xml.etree import ElementTree as ET + tree = ET.parse('./weather.xml') + root = tree.getroot() + def conditions_match(weather, conditions): + for (key, value) in weather: + if key == 'route_percentage' : continue + if str(conditions[key]) != value: + return False + return True + for case in root.findall('case'): + weather = case[0].items() + if conditions_match(weather, weather_conditions): + return case.items()[0][1] + return None + + +def static_weather(path): + import gzip + import json + static_dict = {} + for dir in os.listdir(path): + for d1 in os.listdir(os.path.join(path, dir)): + if os.path.exists(os.path.join(path, dir, d1, 'anno/00000.json.gz')): + with gzip.open(os.path.join(path, dir, d1, 'anno/00000.json.gz'), 'rt', encoding='utf-8') as gz_file: + anno = json.load(gz_file) + weather = anno['weather'] + weather_id = get_weather_id(weather) + static_dict[weather_id] = static_dict.get(weather_id, 0) + 1 + print(static_dict) + return + +if __name__ == '__main__': + + path = '' + static_weather(path) \ No newline at end of file diff --git a/mmcv/fileio/__init__.py b/mmcv/fileio/__init__.py new file mode 100644 index 0000000..b08824c --- /dev/null +++ b/mmcv/fileio/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# from .file_client import BaseStorageBackend, FileClient +# from .io import dump, load, imread #register_handler +# from .handlers import PickleHandler, JsonHandler +# from .parse import * \ No newline at end of file diff --git a/mmcv/fileio/file_client.py b/mmcv/fileio/file_client.py new file mode 100644 index 0000000..705eb65 --- /dev/null +++ b/mmcv/fileio/file_client.py @@ -0,0 +1,1146 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import inspect +import os +import os.path as osp +import re +import tempfile +import warnings +from abc import ABCMeta, abstractmethod +from contextlib import contextmanager +from pathlib import Path +from typing import Iterable, Iterator, Optional, Tuple, Union +from urllib.request import urlopen +from mmcv.utils.misc import has_method +from mmcv.utils.path import is_filepath, mkdir_or_exist + + +class BaseStorageBackend(metaclass=ABCMeta): + """Abstract class of storage backends. + + All backends need to implement two apis: ``get()`` and ``get_text()``. + ``get()`` reads the file as a byte stream and ``get_text()`` reads the file + as texts. + """ + + # a flag to indicate whether the backend can create a symlink for a file + _allow_symlink = False + + @property + def name(self): + return self.__class__.__name__ + + @property + def allow_symlink(self): + return self._allow_symlink + + @abstractmethod + def get(self, filepath): + pass + + @abstractmethod + def get_text(self, filepath): + pass + + +class CephBackend(BaseStorageBackend): + """Ceph storage backend (for internal use). + + Args: + path_mapping (dict|None): path mapping dict from local path to Petrel + path. When ``path_mapping={'src': 'dst'}``, ``src`` in ``filepath`` + will be replaced by ``dst``. Default: None. + + .. warning:: + :class:`mmcv.fileio.file_client.CephBackend` will be deprecated, + please use :class:`mmcv.fileio.file_client.PetrelBackend` instead. + """ + + def __init__(self, path_mapping=None): + try: + import ceph + except ImportError: + raise ImportError('Please install ceph to enable CephBackend.') + + warnings.warn( + 'CephBackend will be deprecated, please use PetrelBackend instead') + self._client = ceph.S3Client() + assert isinstance(path_mapping, dict) or path_mapping is None + self.path_mapping = path_mapping + + def get(self, filepath): + filepath = str(filepath) + if self.path_mapping is not None: + for k, v in self.path_mapping.items(): + filepath = filepath.replace(k, v) + value = self._client.Get(filepath) + value_buf = memoryview(value) + return value_buf + + def get_text(self, filepath, encoding=None): + raise NotImplementedError + + +class PetrelBackend(BaseStorageBackend): + """Petrel storage backend (for internal use). + + PetrelBackend supports reading and writing data to multiple clusters. + If the file path contains the cluster name, PetrelBackend will read data + from specified cluster or write data to it. Otherwise, PetrelBackend will + access the default cluster. + + Args: + path_mapping (dict, optional): Path mapping dict from local path to + Petrel path. When ``path_mapping={'src': 'dst'}``, ``src`` in + ``filepath`` will be replaced by ``dst``. Default: None. + enable_mc (bool, optional): Whether to enable memcached support. + Default: True. + + Examples: + >>> filepath1 = 's3://path/of/file' + >>> filepath2 = 'cluster-name:s3://path/of/file' + >>> client = PetrelBackend() + >>> client.get(filepath1) # get data from default cluster + >>> client.get(filepath2) # get data from 'cluster-name' cluster + """ + + def __init__(self, + path_mapping: Optional[dict] = None, + enable_mc: bool = True): + try: + from petrel_client import client + except ImportError: + raise ImportError('Please install petrel_client to enable ' + 'PetrelBackend.') + + self._client = client.Client(enable_mc=enable_mc) + assert isinstance(path_mapping, dict) or path_mapping is None + self.path_mapping = path_mapping + + def _map_path(self, filepath: Union[str, Path]) -> str: + """Map ``filepath`` to a string path whose prefix will be replaced by + :attr:`self.path_mapping`. + + Args: + filepath (str): Path to be mapped. + """ + filepath = str(filepath) + if self.path_mapping is not None: + for k, v in self.path_mapping.items(): + filepath = filepath.replace(k, v) + return filepath + + def _format_path(self, filepath: str) -> str: + """Convert a ``filepath`` to standard format of petrel oss. + + If the ``filepath`` is concatenated by ``os.path.join``, in a Windows + environment, the ``filepath`` will be the format of + 's3://bucket_name\\image.jpg'. By invoking :meth:`_format_path`, the + above ``filepath`` will be converted to 's3://bucket_name/image.jpg'. + + Args: + filepath (str): Path to be formatted. + """ + return re.sub(r'\\+', '/', filepath) + + def get(self, filepath: Union[str, Path]) -> memoryview: + """Read data from a given ``filepath`` with 'rb' mode. + + Args: + filepath (str or Path): Path to read data. + + Returns: + memoryview: A memory view of expected bytes object to avoid + copying. The memoryview object can be converted to bytes by + ``value_buf.tobytes()``. + """ + filepath = self._map_path(filepath) + filepath = self._format_path(filepath) + value = self._client.Get(filepath) + value_buf = memoryview(value) + return value_buf + + def get_text(self, + filepath: Union[str, Path], + encoding: str = 'utf-8') -> str: + """Read data from a given ``filepath`` with 'r' mode. + + Args: + filepath (str or Path): Path to read data. + encoding (str): The encoding format used to open the ``filepath``. + Default: 'utf-8'. + + Returns: + str: Expected text reading from ``filepath``. + """ + return str(self.get(filepath), encoding=encoding) + + def put(self, obj: bytes, filepath: Union[str, Path]) -> None: + """Save data to a given ``filepath``. + + Args: + obj (bytes): Data to be saved. + filepath (str or Path): Path to write data. + """ + filepath = self._map_path(filepath) + filepath = self._format_path(filepath) + self._client.put(filepath, obj) + + def put_text(self, + obj: str, + filepath: Union[str, Path], + encoding: str = 'utf-8') -> None: + """Save data to a given ``filepath``. + + Args: + obj (str): Data to be written. + filepath (str or Path): Path to write data. + encoding (str): The encoding format used to encode the ``obj``. + Default: 'utf-8'. + """ + self.put(bytes(obj, encoding=encoding), filepath) + + def remove(self, filepath: Union[str, Path]) -> None: + """Remove a file. + + Args: + filepath (str or Path): Path to be removed. + """ + if not has_method(self._client, 'delete'): + raise NotImplementedError( + ('Current version of Petrel Python SDK has not supported ' + 'the `delete` method, please use a higher version or dev' + ' branch instead.')) + + filepath = self._map_path(filepath) + filepath = self._format_path(filepath) + self._client.delete(filepath) + + def exists(self, filepath: Union[str, Path]) -> bool: + """Check whether a file path exists. + + Args: + filepath (str or Path): Path to be checked whether exists. + + Returns: + bool: Return ``True`` if ``filepath`` exists, ``False`` otherwise. + """ + if not (has_method(self._client, 'contains') + and has_method(self._client, 'isdir')): + raise NotImplementedError( + ('Current version of Petrel Python SDK has not supported ' + 'the `contains` and `isdir` methods, please use a higher' + 'version or dev branch instead.')) + + filepath = self._map_path(filepath) + filepath = self._format_path(filepath) + return self._client.contains(filepath) or self._client.isdir(filepath) + + def isdir(self, filepath: Union[str, Path]) -> bool: + """Check whether a file path is a directory. + + Args: + filepath (str or Path): Path to be checked whether it is a + directory. + + Returns: + bool: Return ``True`` if ``filepath`` points to a directory, + ``False`` otherwise. + """ + if not has_method(self._client, 'isdir'): + raise NotImplementedError( + ('Current version of Petrel Python SDK has not supported ' + 'the `isdir` method, please use a higher version or dev' + ' branch instead.')) + + filepath = self._map_path(filepath) + filepath = self._format_path(filepath) + return self._client.isdir(filepath) + + def isfile(self, filepath: Union[str, Path]) -> bool: + """Check whether a file path is a file. + + Args: + filepath (str or Path): Path to be checked whether it is a file. + + Returns: + bool: Return ``True`` if ``filepath`` points to a file, ``False`` + otherwise. + """ + if not has_method(self._client, 'contains'): + raise NotImplementedError( + ('Current version of Petrel Python SDK has not supported ' + 'the `contains` method, please use a higher version or ' + 'dev branch instead.')) + + filepath = self._map_path(filepath) + filepath = self._format_path(filepath) + return self._client.contains(filepath) + + def join_path(self, filepath: Union[str, Path], + *filepaths: Union[str, Path]) -> str: + """Concatenate all file paths. + + Args: + filepath (str or Path): Path to be concatenated. + + Returns: + str: The result after concatenation. + """ + filepath = self._format_path(self._map_path(filepath)) + if filepath.endswith('/'): + filepath = filepath[:-1] + formatted_paths = [filepath] + for path in filepaths: + formatted_paths.append(self._format_path(self._map_path(path))) + return '/'.join(formatted_paths) + + @contextmanager + def get_local_path(self, filepath: Union[str, Path]) -> Iterable[str]: + """Download a file from ``filepath`` and return a temporary path. + + ``get_local_path`` is decorated by :meth:`contxtlib.contextmanager`. It + can be called with ``with`` statement, and when exists from the + ``with`` statement, the temporary path will be released. + + Args: + filepath (str | Path): Download a file from ``filepath``. + + Examples: + >>> client = PetrelBackend() + >>> # After existing from the ``with`` clause, + >>> # the path will be removed + >>> with client.get_local_path('s3://path/of/your/file') as path: + ... # do something here + + Yields: + Iterable[str]: Only yield one temporary path. + """ + filepath = self._map_path(filepath) + filepath = self._format_path(filepath) + assert self.isfile(filepath) + try: + f = tempfile.NamedTemporaryFile(delete=False) + f.write(self.get(filepath)) + f.close() + yield f.name + finally: + os.remove(f.name) + + def list_dir_or_file(self, + dir_path: Union[str, Path], + list_dir: bool = True, + list_file: bool = True, + suffix: Optional[Union[str, Tuple[str]]] = None, + recursive: bool = False) -> Iterator[str]: + """Scan a directory to find the interested directories or files in + arbitrary order. + + Note: + Petrel has no concept of directories but it simulates the directory + hierarchy in the filesystem through public prefixes. In addition, + if the returned path ends with '/', it means the path is a public + prefix which is a logical directory. + + Note: + :meth:`list_dir_or_file` returns the path relative to ``dir_path``. + In addition, the returned path of directory will not contains the + suffix '/' which is consistent with other backends. + + Args: + dir_path (str | Path): Path of the directory. + list_dir (bool): List the directories. Default: True. + list_file (bool): List the path of files. Default: True. + suffix (str or tuple[str], optional): File suffix + that we are interested in. Default: None. + recursive (bool): If set to True, recursively scan the + directory. Default: False. + + Yields: + Iterable[str]: A relative path to ``dir_path``. + """ + if not has_method(self._client, 'list'): + raise NotImplementedError( + ('Current version of Petrel Python SDK has not supported ' + 'the `list` method, please use a higher version or dev' + ' branch instead.')) + + dir_path = self._map_path(dir_path) + dir_path = self._format_path(dir_path) + if list_dir and suffix is not None: + raise TypeError( + '`list_dir` should be False when `suffix` is not None') + + if (suffix is not None) and not isinstance(suffix, (str, tuple)): + raise TypeError('`suffix` must be a string or tuple of strings') + + # Petrel's simulated directory hierarchy assumes that directory paths + # should end with `/` + if not dir_path.endswith('/'): + dir_path += '/' + + root = dir_path + + def _list_dir_or_file(dir_path, list_dir, list_file, suffix, + recursive): + for path in self._client.list(dir_path): + # the `self.isdir` is not used here to determine whether path + # is a directory, because `self.isdir` relies on + # `self._client.list` + if path.endswith('/'): # a directory path + next_dir_path = self.join_path(dir_path, path) + if list_dir: + # get the relative path and exclude the last + # character '/' + rel_dir = next_dir_path[len(root):-1] + yield rel_dir + if recursive: + yield from _list_dir_or_file(next_dir_path, list_dir, + list_file, suffix, + recursive) + else: # a file path + absolute_path = self.join_path(dir_path, path) + rel_path = absolute_path[len(root):] + if (suffix is None + or rel_path.endswith(suffix)) and list_file: + yield rel_path + + return _list_dir_or_file(dir_path, list_dir, list_file, suffix, + recursive) + + +class MemcachedBackend(BaseStorageBackend): + """Memcached storage backend. + + Attributes: + server_list_cfg (str): Config file for memcached server list. + client_cfg (str): Config file for memcached client. + sys_path (str | None): Additional path to be appended to `sys.path`. + Default: None. + """ + + def __init__(self, server_list_cfg, client_cfg, sys_path=None): + if sys_path is not None: + import sys + sys.path.append(sys_path) + try: + import mc + except ImportError: + raise ImportError( + 'Please install memcached to enable MemcachedBackend.') + + self.server_list_cfg = server_list_cfg + self.client_cfg = client_cfg + self._client = mc.MemcachedClient.GetInstance(self.server_list_cfg, + self.client_cfg) + # mc.pyvector servers as a point which points to a memory cache + self._mc_buffer = mc.pyvector() + + def get(self, filepath): + filepath = str(filepath) + import mc + self._client.Get(filepath, self._mc_buffer) + value_buf = mc.ConvertBuffer(self._mc_buffer) + return value_buf + + def get_text(self, filepath, encoding=None): + raise NotImplementedError + + +class LmdbBackend(BaseStorageBackend): + """Lmdb storage backend. + + Args: + db_path (str): Lmdb database path. + readonly (bool, optional): Lmdb environment parameter. If True, + disallow any write operations. Default: True. + lock (bool, optional): Lmdb environment parameter. If False, when + concurrent access occurs, do not lock the database. Default: False. + readahead (bool, optional): Lmdb environment parameter. If False, + disable the OS filesystem readahead mechanism, which may improve + random read performance when a database is larger than RAM. + Default: False. + + Attributes: + db_path (str): Lmdb database path. + """ + + def __init__(self, + db_path, + readonly=True, + lock=False, + readahead=False, + **kwargs): + try: + import lmdb + except ImportError: + raise ImportError('Please install lmdb to enable LmdbBackend.') + + self.db_path = str(db_path) + self._client = lmdb.open( + self.db_path, + readonly=readonly, + lock=lock, + readahead=readahead, + **kwargs) + + def get(self, filepath): + """Get values according to the filepath. + + Args: + filepath (str | obj:`Path`): Here, filepath is the lmdb key. + """ + filepath = str(filepath) + with self._client.begin(write=False) as txn: + value_buf = txn.get(filepath.encode('ascii')) + return value_buf + + def get_text(self, filepath, encoding=None): + raise NotImplementedError + + +class HardDiskBackend(BaseStorageBackend): + """Raw hard disks storage backend.""" + + _allow_symlink = True + + def get(self, filepath: Union[str, Path]) -> bytes: + """Read data from a given ``filepath`` with 'rb' mode. + + Args: + filepath (str or Path): Path to read data. + + Returns: + bytes: Expected bytes object. + """ + with open(filepath, 'rb') as f: + value_buf = f.read() + return value_buf + + def get_text(self, + filepath: Union[str, Path], + encoding: str = 'utf-8') -> str: + """Read data from a given ``filepath`` with 'r' mode. + + Args: + filepath (str or Path): Path to read data. + encoding (str): The encoding format used to open the ``filepath``. + Default: 'utf-8'. + + Returns: + str: Expected text reading from ``filepath``. + """ + with open(filepath, 'r', encoding=encoding) as f: + value_buf = f.read() + return value_buf + + def put(self, obj: bytes, filepath: Union[str, Path]) -> None: + """Write data to a given ``filepath`` with 'wb' mode. + + Note: + ``put`` will create a directory if the directory of ``filepath`` + does not exist. + + Args: + obj (bytes): Data to be written. + filepath (str or Path): Path to write data. + """ + mkdir_or_exist(osp.dirname(filepath)) + with open(filepath, 'wb') as f: + f.write(obj) + + def put_text(self, + obj: str, + filepath: Union[str, Path], + encoding: str = 'utf-8') -> None: + """Write data to a given ``filepath`` with 'w' mode. + + Note: + ``put_text`` will create a directory if the directory of + ``filepath`` does not exist. + + Args: + obj (str): Data to be written. + filepath (str or Path): Path to write data. + encoding (str): The encoding format used to open the ``filepath``. + Default: 'utf-8'. + """ + mkdir_or_exist(osp.dirname(filepath)) + with open(filepath, 'w', encoding=encoding) as f: + f.write(obj) + + def remove(self, filepath: Union[str, Path]) -> None: + """Remove a file. + + Args: + filepath (str or Path): Path to be removed. + """ + os.remove(filepath) + + def exists(self, filepath: Union[str, Path]) -> bool: + """Check whether a file path exists. + + Args: + filepath (str or Path): Path to be checked whether exists. + + Returns: + bool: Return ``True`` if ``filepath`` exists, ``False`` otherwise. + """ + return osp.exists(filepath) + + def isdir(self, filepath: Union[str, Path]) -> bool: + """Check whether a file path is a directory. + + Args: + filepath (str or Path): Path to be checked whether it is a + directory. + + Returns: + bool: Return ``True`` if ``filepath`` points to a directory, + ``False`` otherwise. + """ + return osp.isdir(filepath) + + def isfile(self, filepath: Union[str, Path]) -> bool: + """Check whether a file path is a file. + + Args: + filepath (str or Path): Path to be checked whether it is a file. + + Returns: + bool: Return ``True`` if ``filepath`` points to a file, ``False`` + otherwise. + """ + return osp.isfile(filepath) + + def join_path(self, filepath: Union[str, Path], + *filepaths: Union[str, Path]) -> str: + """Concatenate all file paths. + + Join one or more filepath components intelligently. The return value + is the concatenation of filepath and any members of *filepaths. + + Args: + filepath (str or Path): Path to be concatenated. + + Returns: + str: The result of concatenation. + """ + return osp.join(filepath, *filepaths) + + @contextmanager + def get_local_path( + self, filepath: Union[str, Path]) -> Iterable[Union[str, Path]]: + """Only for unified API and do nothing.""" + yield filepath + + def list_dir_or_file(self, + dir_path: Union[str, Path], + list_dir: bool = True, + list_file: bool = True, + suffix: Optional[Union[str, Tuple[str]]] = None, + recursive: bool = False) -> Iterator[str]: + """Scan a directory to find the interested directories or files in + arbitrary order. + + Note: + :meth:`list_dir_or_file` returns the path relative to ``dir_path``. + + Args: + dir_path (str | Path): Path of the directory. + list_dir (bool): List the directories. Default: True. + list_file (bool): List the path of files. Default: True. + suffix (str or tuple[str], optional): File suffix + that we are interested in. Default: None. + recursive (bool): If set to True, recursively scan the + directory. Default: False. + + Yields: + Iterable[str]: A relative path to ``dir_path``. + """ + if list_dir and suffix is not None: + raise TypeError('`suffix` should be None when `list_dir` is True') + + if (suffix is not None) and not isinstance(suffix, (str, tuple)): + raise TypeError('`suffix` must be a string or tuple of strings') + + root = dir_path + + def _list_dir_or_file(dir_path, list_dir, list_file, suffix, + recursive): + for entry in os.scandir(dir_path): + if not entry.name.startswith('.') and entry.is_file(): + rel_path = osp.relpath(entry.path, root) + if (suffix is None + or rel_path.endswith(suffix)) and list_file: + yield rel_path + elif osp.isdir(entry.path): + if list_dir: + rel_dir = osp.relpath(entry.path, root) + yield rel_dir + if recursive: + yield from _list_dir_or_file(entry.path, list_dir, + list_file, suffix, + recursive) + + return _list_dir_or_file(dir_path, list_dir, list_file, suffix, + recursive) + + +class HTTPBackend(BaseStorageBackend): + """HTTP and HTTPS storage bachend.""" + + def get(self, filepath): + value_buf = urlopen(filepath).read() + return value_buf + + def get_text(self, filepath, encoding='utf-8'): + value_buf = urlopen(filepath).read() + return value_buf.decode(encoding) + + @contextmanager + def get_local_path(self, filepath: str) -> Iterable[str]: + """Download a file from ``filepath``. + + ``get_local_path`` is decorated by :meth:`contxtlib.contextmanager`. It + can be called with ``with`` statement, and when exists from the + ``with`` statement, the temporary path will be released. + + Args: + filepath (str): Download a file from ``filepath``. + + Examples: + >>> client = HTTPBackend() + >>> # After existing from the ``with`` clause, + >>> # the path will be removed + >>> with client.get_local_path('http://path/of/your/file') as path: + ... # do something here + """ + try: + f = tempfile.NamedTemporaryFile(delete=False) + f.write(self.get(filepath)) + f.close() + yield f.name + finally: + os.remove(f.name) + + +class FileClient: + """A general file client to access files in different backends. + + The client loads a file or text in a specified backend from its path + and returns it as a binary or text file. There are two ways to choose a + backend, the name of backend and the prefix of path. Although both of them + can be used to choose a storage backend, ``backend`` has a higher priority + that is if they are all set, the storage backend will be chosen by the + backend argument. If they are all `None`, the disk backend will be chosen. + Note that It can also register other backend accessor with a given name, + prefixes, and backend class. In addition, We use the singleton pattern to + avoid repeated object creation. If the arguments are the same, the same + object will be returned. + + Args: + backend (str, optional): The storage backend type. Options are "disk", + "ceph", "memcached", "lmdb", "http" and "petrel". Default: None. + prefix (str, optional): The prefix of the registered storage backend. + Options are "s3", "http", "https". Default: None. + + Examples: + >>> # only set backend + >>> file_client = FileClient(backend='petrel') + >>> # only set prefix + >>> file_client = FileClient(prefix='s3') + >>> # set both backend and prefix but use backend to choose client + >>> file_client = FileClient(backend='petrel', prefix='s3') + >>> # if the arguments are the same, the same object is returned + >>> file_client1 = FileClient(backend='petrel') + >>> file_client1 is file_client + True + + Attributes: + client (:obj:`BaseStorageBackend`): The backend object. + """ + + _backends = { + 'disk': HardDiskBackend, + 'ceph': CephBackend, + 'memcached': MemcachedBackend, + 'lmdb': LmdbBackend, + 'petrel': PetrelBackend, + 'http': HTTPBackend, + } + # This collection is used to record the overridden backends, and when a + # backend appears in the collection, the singleton pattern is disabled for + # that backend, because if the singleton pattern is used, then the object + # returned will be the backend before overwriting + _overridden_backends = set() + _prefix_to_backends = { + 's3': PetrelBackend, + 'http': HTTPBackend, + 'https': HTTPBackend, + } + _overridden_prefixes = set() + + _instances = {} + + def __new__(cls, backend=None, prefix=None, **kwargs): + if backend is None and prefix is None: + backend = 'disk' + if backend is not None and backend not in cls._backends: + raise ValueError( + f'Backend {backend} is not supported. Currently supported ones' + f' are {list(cls._backends.keys())}') + if prefix is not None and prefix not in cls._prefix_to_backends: + raise ValueError( + f'prefix {prefix} is not supported. Currently supported ones ' + f'are {list(cls._prefix_to_backends.keys())}') + + # concatenate the arguments to a unique key for determining whether + # objects with the same arguments were created + arg_key = f'{backend}:{prefix}' + for key, value in kwargs.items(): + arg_key += f':{key}:{value}' + + # if a backend was overridden, it will create a new object + if (arg_key in cls._instances + and backend not in cls._overridden_backends + and prefix not in cls._overridden_prefixes): + _instance = cls._instances[arg_key] + else: + # create a new object and put it to _instance + _instance = super().__new__(cls) + if backend is not None: + _instance.client = cls._backends[backend](**kwargs) + else: + _instance.client = cls._prefix_to_backends[prefix](**kwargs) + + cls._instances[arg_key] = _instance + + return _instance + + @property + def name(self): + return self.client.name + + @property + def allow_symlink(self): + return self.client.allow_symlink + + @staticmethod + def parse_uri_prefix(uri: Union[str, Path]) -> Optional[str]: + """Parse the prefix of a uri. + + Args: + uri (str | Path): Uri to be parsed that contains the file prefix. + + Examples: + >>> FileClient.parse_uri_prefix('s3://path/of/your/file') + 's3' + + Returns: + str | None: Return the prefix of uri if the uri contains '://' + else ``None``. + """ + assert is_filepath(uri) + uri = str(uri) + if '://' not in uri: + return None + else: + prefix, _ = uri.split('://') + # In the case of PetrelBackend, the prefix may contains the cluster + # name like clusterName:s3 + if ':' in prefix: + _, prefix = prefix.split(':') + return prefix + + @classmethod + def infer_client(cls, + file_client_args: Optional[dict] = None, + uri: Optional[Union[str, Path]] = None) -> 'FileClient': + """Infer a suitable file client based on the URI and arguments. + + Args: + file_client_args (dict, optional): Arguments to instantiate a + FileClient. Default: None. + uri (str | Path, optional): Uri to be parsed that contains the file + prefix. Default: None. + + Examples: + >>> uri = 's3://path/of/your/file' + >>> file_client = FileClient.infer_client(uri=uri) + >>> file_client_args = {'backend': 'petrel'} + >>> file_client = FileClient.infer_client(file_client_args) + + Returns: + FileClient: Instantiated FileClient object. + """ + assert file_client_args is not None or uri is not None + if file_client_args is None: + file_prefix = cls.parse_uri_prefix(uri) # type: ignore + return cls(prefix=file_prefix) + else: + return cls(**file_client_args) + + @classmethod + def _register_backend(cls, name, backend, force=False, prefixes=None): + if not isinstance(name, str): + raise TypeError('the backend name should be a string, ' + f'but got {type(name)}') + if not inspect.isclass(backend): + raise TypeError( + f'backend should be a class but got {type(backend)}') + if not issubclass(backend, BaseStorageBackend): + raise TypeError( + f'backend {backend} is not a subclass of BaseStorageBackend') + if not force and name in cls._backends: + raise KeyError( + f'{name} is already registered as a storage backend, ' + 'add "force=True" if you want to override it') + + if name in cls._backends and force: + cls._overridden_backends.add(name) + cls._backends[name] = backend + + if prefixes is not None: + if isinstance(prefixes, str): + prefixes = [prefixes] + else: + assert isinstance(prefixes, (list, tuple)) + for prefix in prefixes: + if prefix not in cls._prefix_to_backends: + cls._prefix_to_backends[prefix] = backend + elif (prefix in cls._prefix_to_backends) and force: + cls._overridden_prefixes.add(prefix) + cls._prefix_to_backends[prefix] = backend + else: + raise KeyError( + f'{prefix} is already registered as a storage backend,' + ' add "force=True" if you want to override it') + + @classmethod + def register_backend(cls, name, backend=None, force=False, prefixes=None): + """Register a backend to FileClient. + + This method can be used as a normal class method or a decorator. + + .. code-block:: python + + class NewBackend(BaseStorageBackend): + + def get(self, filepath): + return filepath + + def get_text(self, filepath): + return filepath + + FileClient.register_backend('new', NewBackend) + + or + + .. code-block:: python + + @FileClient.register_backend('new') + class NewBackend(BaseStorageBackend): + + def get(self, filepath): + return filepath + + def get_text(self, filepath): + return filepath + + Args: + name (str): The name of the registered backend. + backend (class, optional): The backend class to be registered, + which must be a subclass of :class:`BaseStorageBackend`. + When this method is used as a decorator, backend is None. + Defaults to None. + force (bool, optional): Whether to override the backend if the name + has already been registered. Defaults to False. + prefixes (str or list[str] or tuple[str], optional): The prefixes + of the registered storage backend. Default: None. + `New in version 1.3.15.` + """ + if backend is not None: + cls._register_backend( + name, backend, force=force, prefixes=prefixes) + return + + def _register(backend_cls): + cls._register_backend( + name, backend_cls, force=force, prefixes=prefixes) + return backend_cls + + return _register + + def get(self, filepath: Union[str, Path]) -> Union[bytes, memoryview]: + """Read data from a given ``filepath`` with 'rb' mode. + + Note: + There are two types of return values for ``get``, one is ``bytes`` + and the other is ``memoryview``. The advantage of using memoryview + is that you can avoid copying, and if you want to convert it to + ``bytes``, you can use ``.tobytes()``. + + Args: + filepath (str or Path): Path to read data. + + Returns: + bytes | memoryview: Expected bytes object or a memory view of the + bytes object. + """ + return self.client.get(filepath) + + def get_text(self, filepath: Union[str, Path], encoding='utf-8') -> str: + """Read data from a given ``filepath`` with 'r' mode. + + Args: + filepath (str or Path): Path to read data. + encoding (str): The encoding format used to open the ``filepath``. + Default: 'utf-8'. + + Returns: + str: Expected text reading from ``filepath``. + """ + return self.client.get_text(filepath, encoding) + + def put(self, obj: bytes, filepath: Union[str, Path]) -> None: + """Write data to a given ``filepath`` with 'wb' mode. + + Note: + ``put`` should create a directory if the directory of ``filepath`` + does not exist. + + Args: + obj (bytes): Data to be written. + filepath (str or Path): Path to write data. + """ + self.client.put(obj, filepath) + + def put_text(self, obj: str, filepath: Union[str, Path]) -> None: + """Write data to a given ``filepath`` with 'w' mode. + + Note: + ``put_text`` should create a directory if the directory of + ``filepath`` does not exist. + + Args: + obj (str): Data to be written. + filepath (str or Path): Path to write data. + encoding (str, optional): The encoding format used to open the + `filepath`. Default: 'utf-8'. + """ + self.client.put_text(obj, filepath) + + def remove(self, filepath: Union[str, Path]) -> None: + """Remove a file. + + Args: + filepath (str, Path): Path to be removed. + """ + self.client.remove(filepath) + + def exists(self, filepath: Union[str, Path]) -> bool: + """Check whether a file path exists. + + Args: + filepath (str or Path): Path to be checked whether exists. + + Returns: + bool: Return ``True`` if ``filepath`` exists, ``False`` otherwise. + """ + return self.client.exists(filepath) + + def isdir(self, filepath: Union[str, Path]) -> bool: + """Check whether a file path is a directory. + + Args: + filepath (str or Path): Path to be checked whether it is a + directory. + + Returns: + bool: Return ``True`` if ``filepath`` points to a directory, + ``False`` otherwise. + """ + return self.client.isdir(filepath) + + def isfile(self, filepath: Union[str, Path]) -> bool: + """Check whether a file path is a file. + + Args: + filepath (str or Path): Path to be checked whether it is a file. + + Returns: + bool: Return ``True`` if ``filepath`` points to a file, ``False`` + otherwise. + """ + return self.client.isfile(filepath) + + def join_path(self, filepath: Union[str, Path], + *filepaths: Union[str, Path]) -> str: + """Concatenate all file paths. + + Join one or more filepath components intelligently. The return value + is the concatenation of filepath and any members of *filepaths. + + Args: + filepath (str or Path): Path to be concatenated. + + Returns: + str: The result of concatenation. + """ + return self.client.join_path(filepath, *filepaths) + + @contextmanager + def get_local_path(self, filepath: Union[str, Path]) -> Iterable[str]: + """Download data from ``filepath`` and write the data to local path. + + ``get_local_path`` is decorated by :meth:`contxtlib.contextmanager`. It + can be called with ``with`` statement, and when exists from the + ``with`` statement, the temporary path will be released. + + Note: + If the ``filepath`` is a local path, just return itself. + + .. warning:: + ``get_local_path`` is an experimental interface that may change in + the future. + + Args: + filepath (str or Path): Path to be read data. + + Examples: + >>> file_client = FileClient(prefix='s3') + >>> with file_client.get_local_path('s3://bucket/abc.jpg') as path: + ... # do something here + + Yields: + Iterable[str]: Only yield one path. + """ + with self.client.get_local_path(str(filepath)) as local_path: + yield local_path + + def list_dir_or_file(self, + dir_path: Union[str, Path], + list_dir: bool = True, + list_file: bool = True, + suffix: Optional[Union[str, Tuple[str]]] = None, + recursive: bool = False) -> Iterator[str]: + """Scan a directory to find the interested directories or files in + arbitrary order. + + Note: + :meth:`list_dir_or_file` returns the path relative to ``dir_path``. + + Args: + dir_path (str | Path): Path of the directory. + list_dir (bool): List the directories. Default: True. + list_file (bool): List the path of files. Default: True. + suffix (str or tuple[str], optional): File suffix + that we are interested in. Default: None. + recursive (bool): If set to True, recursively scan the + directory. Default: False. + + Yields: + Iterable[str]: A relative path to ``dir_path``. + """ + yield from self.client.list_dir_or_file(dir_path, list_dir, list_file, + suffix, recursive) diff --git a/mmcv/fileio/handlers/__init__.py b/mmcv/fileio/handlers/__init__.py new file mode 100644 index 0000000..4756674 --- /dev/null +++ b/mmcv/fileio/handlers/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .base import BaseFileHandler +from .json_handler import JsonHandler +from .pickle_handler import PickleHandler \ No newline at end of file diff --git a/mmcv/fileio/handlers/base.py b/mmcv/fileio/handlers/base.py new file mode 100644 index 0000000..288878b --- /dev/null +++ b/mmcv/fileio/handlers/base.py @@ -0,0 +1,30 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta, abstractmethod + + +class BaseFileHandler(metaclass=ABCMeta): + # `str_like` is a flag to indicate whether the type of file object is + # str-like object or bytes-like object. Pickle only processes bytes-like + # objects but json only processes str-like object. If it is str-like + # object, `StringIO` will be used to process the buffer. + str_like = True + + @abstractmethod + def load_from_fileobj(self, file, **kwargs): + pass + + @abstractmethod + def dump_to_fileobj(self, obj, file, **kwargs): + pass + + @abstractmethod + def dump_to_str(self, obj, **kwargs): + pass + + def load_from_path(self, filepath, mode='r', **kwargs): + with open(filepath, mode) as f: + return self.load_from_fileobj(f, **kwargs) + + def dump_to_path(self, obj, filepath, mode='w', **kwargs): + with open(filepath, mode) as f: + self.dump_to_fileobj(obj, f, **kwargs) diff --git a/mmcv/fileio/handlers/json_handler.py b/mmcv/fileio/handlers/json_handler.py new file mode 100644 index 0000000..18d4f15 --- /dev/null +++ b/mmcv/fileio/handlers/json_handler.py @@ -0,0 +1,36 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import json + +import numpy as np + +from .base import BaseFileHandler + + +def set_default(obj): + """Set default json values for non-serializable values. + + It helps convert ``set``, ``range`` and ``np.ndarray`` data types to list. + It also converts ``np.generic`` (including ``np.int32``, ``np.float32``, + etc.) into plain numbers of plain python built-in types. + """ + if isinstance(obj, (set, range)): + return list(obj) + elif isinstance(obj, np.ndarray): + return obj.tolist() + elif isinstance(obj, np.generic): + return obj.item() + raise TypeError(f'{type(obj)} is unsupported for json dump') + + +class JsonHandler(BaseFileHandler): + + def load_from_fileobj(self, file): + return json.load(file) + + def dump_to_fileobj(self, obj, file, **kwargs): + kwargs.setdefault('default', set_default) + json.dump(obj, file, **kwargs) + + def dump_to_str(self, obj, **kwargs): + kwargs.setdefault('default', set_default) + return json.dumps(obj, **kwargs) diff --git a/mmcv/fileio/handlers/pickle_handler.py b/mmcv/fileio/handlers/pickle_handler.py new file mode 100644 index 0000000..b37c79b --- /dev/null +++ b/mmcv/fileio/handlers/pickle_handler.py @@ -0,0 +1,28 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pickle + +from .base import BaseFileHandler + + +class PickleHandler(BaseFileHandler): + + str_like = False + + def load_from_fileobj(self, file, **kwargs): + return pickle.load(file, **kwargs) + + def load_from_path(self, filepath, **kwargs): + return super(PickleHandler, self).load_from_path( + filepath, mode='rb', **kwargs) + + def dump_to_str(self, obj, **kwargs): + kwargs.setdefault('protocol', 2) + return pickle.dumps(obj, **kwargs) + + def dump_to_fileobj(self, obj, file, **kwargs): + kwargs.setdefault('protocol', 2) + pickle.dump(obj, file, **kwargs) + + def dump_to_path(self, obj, filepath, **kwargs): + super(PickleHandler, self).dump_to_path( + obj, filepath, mode='wb', **kwargs) diff --git a/mmcv/fileio/io.py b/mmcv/fileio/io.py new file mode 100644 index 0000000..6155a5d --- /dev/null +++ b/mmcv/fileio/io.py @@ -0,0 +1,154 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from io import BytesIO, StringIO +from pathlib import Path + +from ..utils.misc import is_list_of, is_str +from .handlers import BaseFileHandler, JsonHandler, PickleHandler + +file_handlers = { + 'json': JsonHandler(), + # 'yaml': YamlHandler(), + # 'yml': YamlHandler(), + 'pickle': PickleHandler(), + 'pkl': PickleHandler() +} + + +def load(file, file_format=None, file_client_args=None, **kwargs): + """Load data from json/yaml/pickle files. + + This method provides a unified api for loading data from serialized files. + + Note: + In v1.3.16 and later, ``load`` supports loading data from serialized + files those can be storaged in different backends. + + Args: + file (str or :obj:`Path` or file-like object): Filename or a file-like + object. + file_format (str, optional): If not specified, the file format will be + inferred from the file extension, otherwise use the specified one. + Currently supported formats include "json", "yaml/yml" and + "pickle/pkl". + file_client_args (dict, optional): Arguments to instantiate a + FileClient. See :class:`mmcv.fileio.FileClient` for details. + Default: None. + + Examples: + >>> load('/path/of/your/file') # file is storaged in disk + >>> load('https://path/of/your/file') # file is storaged in Internet + >>> load('s3://path/of/your/file') # file is storaged in petrel + + Returns: + The content from the file. + """ + #TODO(JIAZI) + from .file_client import FileClient + if isinstance(file, Path): + file = str(file) + if file_format is None and is_str(file): + file_format = file.split('.')[-1] + if file_format not in file_handlers: + raise TypeError(f'Unsupported format: {file_format}') + + handler = file_handlers[file_format] + if is_str(file): + file_client = FileClient.infer_client(file_client_args, file) + if handler.str_like: + with StringIO(file_client.get_text(file)) as f: + obj = handler.load_from_fileobj(f, **kwargs) + else: + with BytesIO(file_client.get(file)) as f: + obj = handler.load_from_fileobj(f, **kwargs) + elif hasattr(file, 'read'): + obj = handler.load_from_fileobj(file, **kwargs) + else: + raise TypeError('"file" must be a filepath str or a file-object') + return obj + + +def dump(obj, file=None, file_format=None, file_client_args=None, **kwargs): + """Dump data to json/yaml/pickle strings or files. + + This method provides a unified api for dumping data as strings or to files, + and also supports custom arguments for each file format. + + Note: + In v1.3.16 and later, ``dump`` supports dumping data as strings or to + files which is saved to different backends. + + Args: + obj (any): The python object to be dumped. + file (str or :obj:`Path` or file-like object, optional): If not + specified, then the object is dumped to a str, otherwise to a file + specified by the filename or file-like object. + file_format (str, optional): Same as :func:`load`. + file_client_args (dict, optional): Arguments to instantiate a + FileClient. See :class:`mmcv.fileio.FileClient` for details. + Default: None. + + Examples: + >>> dump('hello world', '/path/of/your/file') # disk + >>> dump('hello world', 's3://path/of/your/file') # ceph or petrel + + Returns: + bool: True for success, False otherwise. + """ + #TODO(JIAZI) + from .file_client import FileClient + if isinstance(file, Path): + file = str(file) + if file_format is None: + if is_str(file): + file_format = file.split('.')[-1] + elif file is None: + raise ValueError( + 'file_format must be specified since file is None') + if file_format not in file_handlers: + raise TypeError(f'Unsupported format: {file_format}') + + handler = file_handlers[file_format] + if file is None: + return handler.dump_to_str(obj, **kwargs) + elif is_str(file): + file_client = FileClient.infer_client(file_client_args, file) + if handler.str_like: + with StringIO() as f: + handler.dump_to_fileobj(obj, f, **kwargs) + file_client.put_text(f.getvalue(), file) + else: + with BytesIO() as f: + handler.dump_to_fileobj(obj, f, **kwargs) + file_client.put(f.getvalue(), file) + elif hasattr(file, 'write'): + handler.dump_to_fileobj(obj, file, **kwargs) + else: + raise TypeError('"file" must be a filename str or a file-object') + + +def _register_handler(handler, file_formats): + """Register a handler for some file extensions. + + Args: + handler (:obj:`BaseFileHandler`): Handler to be registered. + file_formats (str or list[str]): File formats to be handled by this + handler. + """ + if not isinstance(handler, BaseFileHandler): + raise TypeError( + f'handler must be a child of BaseFileHandler, not {type(handler)}') + if isinstance(file_formats, str): + file_formats = [file_formats] + if not is_list_of(file_formats, str): + raise TypeError('file_formats must be a str or a list of str') + for ext in file_formats: + file_handlers[ext] = handler + + +def register_handler(file_formats, **kwargs): + + def wrap(cls): + _register_handler(cls(**kwargs), file_formats) + return cls + + return wrap diff --git a/mmcv/fileio/parse.py b/mmcv/fileio/parse.py new file mode 100644 index 0000000..f60f0d6 --- /dev/null +++ b/mmcv/fileio/parse.py @@ -0,0 +1,97 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +from io import StringIO + +from .file_client import FileClient + + +def list_from_file(filename, + prefix='', + offset=0, + max_num=0, + encoding='utf-8', + file_client_args=None): + """Load a text file and parse the content as a list of strings. + + Note: + In v1.3.16 and later, ``list_from_file`` supports loading a text file + which can be storaged in different backends and parsing the content as + a list for strings. + + Args: + filename (str): Filename. + prefix (str): The prefix to be inserted to the beginning of each item. + offset (int): The offset of lines. + max_num (int): The maximum number of lines to be read, + zeros and negatives mean no limitation. + encoding (str): Encoding used to open the file. Default utf-8. + file_client_args (dict, optional): Arguments to instantiate a + FileClient. See :class:`mmcv.fileio.FileClient` for details. + Default: None. + + Examples: + >>> list_from_file('/path/of/your/file') # disk + ['hello', 'world'] + >>> list_from_file('s3://path/of/your/file') # ceph or petrel + ['hello', 'world'] + + Returns: + list[str]: A list of strings. + """ + cnt = 0 + item_list = [] + file_client = FileClient.infer_client(file_client_args, filename) + with StringIO(file_client.get_text(filename, encoding)) as f: + for _ in range(offset): + f.readline() + for line in f: + if 0 < max_num <= cnt: + break + item_list.append(prefix + line.rstrip('\n\r')) + cnt += 1 + return item_list + + +def dict_from_file(filename, + key_type=str, + encoding='utf-8', + file_client_args=None): + """Load a text file and parse the content as a dict. + + Each line of the text file will be two or more columns split by + whitespaces or tabs. The first column will be parsed as dict keys, and + the following columns will be parsed as dict values. + + Note: + In v1.3.16 and later, ``dict_from_file`` supports loading a text file + which can be storaged in different backends and parsing the content as + a dict. + + Args: + filename(str): Filename. + key_type(type): Type of the dict keys. str is user by default and + type conversion will be performed if specified. + encoding (str): Encoding used to open the file. Default utf-8. + file_client_args (dict, optional): Arguments to instantiate a + FileClient. See :class:`mmcv.fileio.FileClient` for details. + Default: None. + + Examples: + >>> dict_from_file('/path/of/your/file') # disk + {'key1': 'value1', 'key2': 'value2'} + >>> dict_from_file('s3://path/of/your/file') # ceph or petrel + {'key1': 'value1', 'key2': 'value2'} + + Returns: + dict: The parsed contents. + """ + mapping = {} + file_client = FileClient.infer_client(file_client_args, filename) + with StringIO(file_client.get_text(filename, encoding)) as f: + for line in f: + items = line.rstrip('\n').split() + assert len(items) >= 2 + key = key_type(items[0]) + val = items[1:] if len(items) > 2 else items[1] + mapping[key] = val + return mapping diff --git a/mmcv/image/__init__.py b/mmcv/image/__init__.py new file mode 100644 index 0000000..5d7edb2 --- /dev/null +++ b/mmcv/image/__init__.py @@ -0,0 +1,27 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .geometric import (cutout, imcrop, imflip, imflip_, impad, + impad_to_multiple, imrescale, imresize, imresize_like, + imresize_to_multiple, imrotate, imshear, imtranslate, + rescale_size) +from .io import imfrombytes, imread, imwrite, supported_backends, use_backend +from .photometric import (adjust_brightness, adjust_color, adjust_contrast, + adjust_lighting, adjust_sharpness, auto_contrast, + clahe, imdenormalize, imequalize, iminvert, + imnormalize, imnormalize_, lut_transform, posterize, + solarize) +from .misc import tensor2imgs +from .colorspace import (bgr2gray, bgr2hls, bgr2hsv, bgr2rgb, bgr2ycbcr, + gray2bgr, gray2rgb, hls2bgr, hsv2bgr, imconvert, + rgb2bgr, rgb2gray, rgb2ycbcr, ycbcr2bgr, ycbcr2rgb) +# __all__ = [ +# 'bgr2gray', 'bgr2hls', 'bgr2hsv', 'bgr2rgb', 'gray2bgr', 'gray2rgb', +# 'hls2bgr', 'hsv2bgr', 'imconvert', 'rgb2bgr', 'rgb2gray', 'imrescale', +# 'imresize', 'imresize_like', 'imresize_to_multiple', 'rescale_size', +# 'imcrop', 'imflip', 'imflip_', 'impad', 'impad_to_multiple', 'imrotate', +# 'imfrombytes', 'imread', 'imwrite', 'supported_backends', 'use_backend', +# 'imdenormalize', 'imnormalize', 'imnormalize_', 'iminvert', 'posterize', +# 'solarize', 'rgb2ycbcr', 'bgr2ycbcr', 'ycbcr2rgb', 'ycbcr2bgr', +# 'tensor2imgs', 'imshear', 'imtranslate', 'adjust_color', 'imequalize', +# 'adjust_brightness', 'adjust_contrast', 'lut_transform', 'clahe', +# 'adjust_sharpness', 'auto_contrast', 'cutout', 'adjust_lighting' +# ] diff --git a/mmcv/image/colorspace.py b/mmcv/image/colorspace.py new file mode 100644 index 0000000..8145339 --- /dev/null +++ b/mmcv/image/colorspace.py @@ -0,0 +1,306 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import cv2 +import numpy as np + + +def imconvert(img, src, dst): + """Convert an image from the src colorspace to dst colorspace. + + Args: + img (ndarray): The input image. + src (str): The source colorspace, e.g., 'rgb', 'hsv'. + dst (str): The destination colorspace, e.g., 'rgb', 'hsv'. + + Returns: + ndarray: The converted image. + """ + code = getattr(cv2, f'COLOR_{src.upper()}2{dst.upper()}') + out_img = cv2.cvtColor(img, code) + return out_img + + +def bgr2gray(img, keepdim=False): + """Convert a BGR image to grayscale image. + + Args: + img (ndarray): The input image. + keepdim (bool): If False (by default), then return the grayscale image + with 2 dims, otherwise 3 dims. + + Returns: + ndarray: The converted grayscale image. + """ + out_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + if keepdim: + out_img = out_img[..., None] + return out_img + + +def rgb2gray(img, keepdim=False): + """Convert a RGB image to grayscale image. + + Args: + img (ndarray): The input image. + keepdim (bool): If False (by default), then return the grayscale image + with 2 dims, otherwise 3 dims. + + Returns: + ndarray: The converted grayscale image. + """ + out_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) + if keepdim: + out_img = out_img[..., None] + return out_img + + +def gray2bgr(img): + """Convert a grayscale image to BGR image. + + Args: + img (ndarray): The input image. + + Returns: + ndarray: The converted BGR image. + """ + img = img[..., None] if img.ndim == 2 else img + out_img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) + return out_img + + +def gray2rgb(img): + """Convert a grayscale image to RGB image. + + Args: + img (ndarray): The input image. + + Returns: + ndarray: The converted RGB image. + """ + img = img[..., None] if img.ndim == 2 else img + out_img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) + return out_img + + +def _convert_input_type_range(img): + """Convert the type and range of the input image. + + It converts the input image to np.float32 type and range of [0, 1]. + It is mainly used for pre-processing the input image in colorspace + conversion functions such as rgb2ycbcr and ycbcr2rgb. + + Args: + img (ndarray): The input image. It accepts: + 1. np.uint8 type with range [0, 255]; + 2. np.float32 type with range [0, 1]. + + Returns: + (ndarray): The converted image with type of np.float32 and range of + [0, 1]. + """ + img_type = img.dtype + img = img.astype(np.float32) + if img_type == np.float32: + pass + elif img_type == np.uint8: + img /= 255. + else: + raise TypeError('The img type should be np.float32 or np.uint8, ' + f'but got {img_type}') + return img + + +def _convert_output_type_range(img, dst_type): + """Convert the type and range of the image according to dst_type. + + It converts the image to desired type and range. If `dst_type` is np.uint8, + images will be converted to np.uint8 type with range [0, 255]. If + `dst_type` is np.float32, it converts the image to np.float32 type with + range [0, 1]. + It is mainly used for post-processing images in colorspace conversion + functions such as rgb2ycbcr and ycbcr2rgb. + + Args: + img (ndarray): The image to be converted with np.float32 type and + range [0, 255]. + dst_type (np.uint8 | np.float32): If dst_type is np.uint8, it + converts the image to np.uint8 type with range [0, 255]. If + dst_type is np.float32, it converts the image to np.float32 type + with range [0, 1]. + + Returns: + (ndarray): The converted image with desired type and range. + """ + if dst_type not in (np.uint8, np.float32): + raise TypeError('The dst_type should be np.float32 or np.uint8, ' + f'but got {dst_type}') + if dst_type == np.uint8: + img = img.round() + else: + img /= 255. + return img.astype(dst_type) + + +def rgb2ycbcr(img, y_only=False): + """Convert a RGB image to YCbCr image. + + This function produces the same results as Matlab's `rgb2ycbcr` function. + It implements the ITU-R BT.601 conversion for standard-definition + television. See more details in + https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion. + + It differs from a similar function in cv2.cvtColor: `RGB <-> YCrCb`. + In OpenCV, it implements a JPEG conversion. See more details in + https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion. + + Args: + img (ndarray): The input image. It accepts: + 1. np.uint8 type with range [0, 255]; + 2. np.float32 type with range [0, 1]. + y_only (bool): Whether to only return Y channel. Default: False. + + Returns: + ndarray: The converted YCbCr image. The output image has the same type + and range as input image. + """ + img_type = img.dtype + img = _convert_input_type_range(img) + if y_only: + out_img = np.dot(img, [65.481, 128.553, 24.966]) + 16.0 + else: + out_img = np.matmul( + img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786], + [24.966, 112.0, -18.214]]) + [16, 128, 128] + out_img = _convert_output_type_range(out_img, img_type) + return out_img + + +def bgr2ycbcr(img, y_only=False): + """Convert a BGR image to YCbCr image. + + The bgr version of rgb2ycbcr. + It implements the ITU-R BT.601 conversion for standard-definition + television. See more details in + https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion. + + It differs from a similar function in cv2.cvtColor: `BGR <-> YCrCb`. + In OpenCV, it implements a JPEG conversion. See more details in + https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion. + + Args: + img (ndarray): The input image. It accepts: + 1. np.uint8 type with range [0, 255]; + 2. np.float32 type with range [0, 1]. + y_only (bool): Whether to only return Y channel. Default: False. + + Returns: + ndarray: The converted YCbCr image. The output image has the same type + and range as input image. + """ + img_type = img.dtype + img = _convert_input_type_range(img) + if y_only: + out_img = np.dot(img, [24.966, 128.553, 65.481]) + 16.0 + else: + out_img = np.matmul( + img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786], + [65.481, -37.797, 112.0]]) + [16, 128, 128] + out_img = _convert_output_type_range(out_img, img_type) + return out_img + + +def ycbcr2rgb(img): + """Convert a YCbCr image to RGB image. + + This function produces the same results as Matlab's ycbcr2rgb function. + It implements the ITU-R BT.601 conversion for standard-definition + television. See more details in + https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion. + + It differs from a similar function in cv2.cvtColor: `YCrCb <-> RGB`. + In OpenCV, it implements a JPEG conversion. See more details in + https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion. + + Args: + img (ndarray): The input image. It accepts: + 1. np.uint8 type with range [0, 255]; + 2. np.float32 type with range [0, 1]. + + Returns: + ndarray: The converted RGB image. The output image has the same type + and range as input image. + """ + img_type = img.dtype + img = _convert_input_type_range(img) * 255 + out_img = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621], + [0, -0.00153632, 0.00791071], + [0.00625893, -0.00318811, 0]]) * 255.0 + [ + -222.921, 135.576, -276.836 + ] + out_img = _convert_output_type_range(out_img, img_type) + return out_img + + +def ycbcr2bgr(img): + """Convert a YCbCr image to BGR image. + + The bgr version of ycbcr2rgb. + It implements the ITU-R BT.601 conversion for standard-definition + television. See more details in + https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion. + + It differs from a similar function in cv2.cvtColor: `YCrCb <-> BGR`. + In OpenCV, it implements a JPEG conversion. See more details in + https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion. + + Args: + img (ndarray): The input image. It accepts: + 1. np.uint8 type with range [0, 255]; + 2. np.float32 type with range [0, 1]. + + Returns: + ndarray: The converted BGR image. The output image has the same type + and range as input image. + """ + img_type = img.dtype + img = _convert_input_type_range(img) * 255 + out_img = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621], + [0.00791071, -0.00153632, 0], + [0, -0.00318811, 0.00625893]]) * 255.0 + [ + -276.836, 135.576, -222.921 + ] + out_img = _convert_output_type_range(out_img, img_type) + return out_img + + +def convert_color_factory(src, dst): + + code = getattr(cv2, f'COLOR_{src.upper()}2{dst.upper()}') + + def convert_color(img): + out_img = cv2.cvtColor(img, code) + return out_img + + convert_color.__doc__ = f"""Convert a {src.upper()} image to {dst.upper()} + image. + + Args: + img (ndarray or str): The input image. + + Returns: + ndarray: The converted {dst.upper()} image. + """ + + return convert_color + + +bgr2rgb = convert_color_factory('bgr', 'rgb') + +rgb2bgr = convert_color_factory('rgb', 'bgr') + +bgr2hsv = convert_color_factory('bgr', 'hsv') + +hsv2bgr = convert_color_factory('hsv', 'bgr') + +bgr2hls = convert_color_factory('bgr', 'hls') + +hls2bgr = convert_color_factory('hls', 'bgr') diff --git a/mmcv/image/geometric.py b/mmcv/image/geometric.py new file mode 100644 index 0000000..cf97c20 --- /dev/null +++ b/mmcv/image/geometric.py @@ -0,0 +1,728 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numbers + +import cv2 +import numpy as np + +from ..utils import to_2tuple +from .io import imread_backend + +try: + from PIL import Image +except ImportError: + Image = None + + +def _scale_size(size, scale): + """Rescale a size by a ratio. + + Args: + size (tuple[int]): (w, h). + scale (float | tuple(float)): Scaling factor. + + Returns: + tuple[int]: scaled size. + """ + if isinstance(scale, (float, int)): + scale = (scale, scale) + w, h = size + return int(w * float(scale[0]) + 0.5), int(h * float(scale[1]) + 0.5) + + +cv2_interp_codes = { + 'nearest': cv2.INTER_NEAREST, + 'bilinear': cv2.INTER_LINEAR, + 'bicubic': cv2.INTER_CUBIC, + 'area': cv2.INTER_AREA, + 'lanczos': cv2.INTER_LANCZOS4 +} + +if Image is not None: + pillow_interp_codes = { + 'nearest': Image.NEAREST, + 'bilinear': Image.BILINEAR, + 'bicubic': Image.BICUBIC, + 'box': Image.BOX, + 'lanczos': Image.LANCZOS, + 'hamming': Image.HAMMING + } + + +def imresize(img, + size, + return_scale=False, + interpolation='bilinear', + out=None, + backend=None): + """Resize image to a given size. + + Args: + img (ndarray): The input image. + size (tuple[int]): Target size (w, h). + return_scale (bool): Whether to return `w_scale` and `h_scale`. + interpolation (str): Interpolation method, accepted values are + "nearest", "bilinear", "bicubic", "area", "lanczos" for 'cv2' + backend, "nearest", "bilinear" for 'pillow' backend. + out (ndarray): The output destination. + backend (str | None): The image resize backend type. Options are `cv2`, + `pillow`, `None`. If backend is None, the global imread_backend + specified by ``mmcv.use_backend()`` will be used. Default: None. + + Returns: + tuple | ndarray: (`resized_img`, `w_scale`, `h_scale`) or + `resized_img`. + """ + h, w = img.shape[:2] + if backend is None: + backend = imread_backend + if backend not in ['cv2', 'pillow']: + raise ValueError(f'backend: {backend} is not supported for resize.' + f"Supported backends are 'cv2', 'pillow'") + + if backend == 'pillow': + assert img.dtype == np.uint8, 'Pillow backend only support uint8 type' + pil_image = Image.fromarray(img) + pil_image = pil_image.resize(size, pillow_interp_codes[interpolation]) + resized_img = np.array(pil_image) + else: + resized_img = cv2.resize( + img, size, dst=out, interpolation=cv2_interp_codes[interpolation]) + if not return_scale: + return resized_img + else: + w_scale = size[0] / w + h_scale = size[1] / h + return resized_img, w_scale, h_scale + + +def imresize_to_multiple(img, + divisor, + size=None, + scale_factor=None, + keep_ratio=False, + return_scale=False, + interpolation='bilinear', + out=None, + backend=None): + """Resize image according to a given size or scale factor and then rounds + up the the resized or rescaled image size to the nearest value that can be + divided by the divisor. + + Args: + img (ndarray): The input image. + divisor (int | tuple): Resized image size will be a multiple of + divisor. If divisor is a tuple, divisor should be + (w_divisor, h_divisor). + size (None | int | tuple[int]): Target size (w, h). Default: None. + scale_factor (None | float | tuple[float]): Multiplier for spatial + size. Should match input size if it is a tuple and the 2D style is + (w_scale_factor, h_scale_factor). Default: None. + keep_ratio (bool): Whether to keep the aspect ratio when resizing the + image. Default: False. + return_scale (bool): Whether to return `w_scale` and `h_scale`. + interpolation (str): Interpolation method, accepted values are + "nearest", "bilinear", "bicubic", "area", "lanczos" for 'cv2' + backend, "nearest", "bilinear" for 'pillow' backend. + out (ndarray): The output destination. + backend (str | None): The image resize backend type. Options are `cv2`, + `pillow`, `None`. If backend is None, the global imread_backend + specified by ``mmcv.use_backend()`` will be used. Default: None. + + Returns: + tuple | ndarray: (`resized_img`, `w_scale`, `h_scale`) or + `resized_img`. + """ + h, w = img.shape[:2] + if size is not None and scale_factor is not None: + raise ValueError('only one of size or scale_factor should be defined') + elif size is None and scale_factor is None: + raise ValueError('one of size or scale_factor should be defined') + elif size is not None: + size = to_2tuple(size) + if keep_ratio: + size = rescale_size((w, h), size, return_scale=False) + else: + size = _scale_size((w, h), scale_factor) + + divisor = to_2tuple(divisor) + size = tuple([int(np.ceil(s / d)) * d for s, d in zip(size, divisor)]) + resized_img, w_scale, h_scale = imresize( + img, + size, + return_scale=True, + interpolation=interpolation, + out=out, + backend=backend) + if return_scale: + return resized_img, w_scale, h_scale + else: + return resized_img + + +def imresize_like(img, + dst_img, + return_scale=False, + interpolation='bilinear', + backend=None): + """Resize image to the same size of a given image. + + Args: + img (ndarray): The input image. + dst_img (ndarray): The target image. + return_scale (bool): Whether to return `w_scale` and `h_scale`. + interpolation (str): Same as :func:`resize`. + backend (str | None): Same as :func:`resize`. + + Returns: + tuple or ndarray: (`resized_img`, `w_scale`, `h_scale`) or + `resized_img`. + """ + h, w = dst_img.shape[:2] + return imresize(img, (w, h), return_scale, interpolation, backend=backend) + + +def rescale_size(old_size, scale, return_scale=False): + """Calculate the new size to be rescaled to. + + Args: + old_size (tuple[int]): The old size (w, h) of image. + scale (float | tuple[int]): The scaling factor or maximum size. + If it is a float number, then the image will be rescaled by this + factor, else if it is a tuple of 2 integers, then the image will + be rescaled as large as possible within the scale. + return_scale (bool): Whether to return the scaling factor besides the + rescaled image size. + + Returns: + tuple[int]: The new rescaled image size. + """ + w, h = old_size + if isinstance(scale, (float, int)): + if scale <= 0: + raise ValueError(f'Invalid scale {scale}, must be positive.') + scale_factor = scale + elif isinstance(scale, tuple): + max_long_edge = max(scale) + max_short_edge = min(scale) + scale_factor = min(max_long_edge / max(h, w), + max_short_edge / min(h, w)) + else: + raise TypeError( + f'Scale must be a number or tuple of int, but got {type(scale)}') + + new_size = _scale_size((w, h), scale_factor) + + if return_scale: + return new_size, scale_factor + else: + return new_size + + +def imrescale(img, + scale, + return_scale=False, + interpolation='bilinear', + backend=None): + """Resize image while keeping the aspect ratio. + + Args: + img (ndarray): The input image. + scale (float | tuple[int]): The scaling factor or maximum size. + If it is a float number, then the image will be rescaled by this + factor, else if it is a tuple of 2 integers, then the image will + be rescaled as large as possible within the scale. + return_scale (bool): Whether to return the scaling factor besides the + rescaled image. + interpolation (str): Same as :func:`resize`. + backend (str | None): Same as :func:`resize`. + + Returns: + ndarray: The rescaled image. + """ + h, w = img.shape[:2] + new_size, scale_factor = rescale_size((w, h), scale, return_scale=True) + rescaled_img = imresize( + img, new_size, interpolation=interpolation, backend=backend) + if return_scale: + return rescaled_img, scale_factor + else: + return rescaled_img + + +def imflip(img, direction='horizontal'): + """Flip an image horizontally or vertically. + + Args: + img (ndarray): Image to be flipped. + direction (str): The flip direction, either "horizontal" or + "vertical" or "diagonal". + + Returns: + ndarray: The flipped image. + """ + assert direction in ['horizontal', 'vertical', 'diagonal'] + if direction == 'horizontal': + return np.flip(img, axis=1) + elif direction == 'vertical': + return np.flip(img, axis=0) + else: + return np.flip(img, axis=(0, 1)) + + +def imflip_(img, direction='horizontal'): + """Inplace flip an image horizontally or vertically. + + Args: + img (ndarray): Image to be flipped. + direction (str): The flip direction, either "horizontal" or + "vertical" or "diagonal". + + Returns: + ndarray: The flipped image (inplace). + """ + assert direction in ['horizontal', 'vertical', 'diagonal'] + if direction == 'horizontal': + return cv2.flip(img, 1, img) + elif direction == 'vertical': + return cv2.flip(img, 0, img) + else: + return cv2.flip(img, -1, img) + + +def imrotate(img, + angle, + center=None, + scale=1.0, + border_value=0, + interpolation='bilinear', + auto_bound=False): + """Rotate an image. + + Args: + img (ndarray): Image to be rotated. + angle (float): Rotation angle in degrees, positive values mean + clockwise rotation. + center (tuple[float], optional): Center point (w, h) of the rotation in + the source image. If not specified, the center of the image will be + used. + scale (float): Isotropic scale factor. + border_value (int): Border value. + interpolation (str): Same as :func:`resize`. + auto_bound (bool): Whether to adjust the image size to cover the whole + rotated image. + + Returns: + ndarray: The rotated image. + """ + if center is not None and auto_bound: + raise ValueError('`auto_bound` conflicts with `center`') + h, w = img.shape[:2] + if center is None: + center = ((w - 1) * 0.5, (h - 1) * 0.5) + assert isinstance(center, tuple) + + matrix = cv2.getRotationMatrix2D(center, -angle, scale) + if auto_bound: + cos = np.abs(matrix[0, 0]) + sin = np.abs(matrix[0, 1]) + new_w = h * sin + w * cos + new_h = h * cos + w * sin + matrix[0, 2] += (new_w - w) * 0.5 + matrix[1, 2] += (new_h - h) * 0.5 + w = int(np.round(new_w)) + h = int(np.round(new_h)) + rotated = cv2.warpAffine( + img, + matrix, (w, h), + flags=cv2_interp_codes[interpolation], + borderValue=border_value) + return rotated + + +def bbox_clip(bboxes, img_shape): + """Clip bboxes to fit the image shape. + + Args: + bboxes (ndarray): Shape (..., 4*k) + img_shape (tuple[int]): (height, width) of the image. + + Returns: + ndarray: Clipped bboxes. + """ + assert bboxes.shape[-1] % 4 == 0 + cmin = np.empty(bboxes.shape[-1], dtype=bboxes.dtype) + cmin[0::2] = img_shape[1] - 1 + cmin[1::2] = img_shape[0] - 1 + clipped_bboxes = np.maximum(np.minimum(bboxes, cmin), 0) + return clipped_bboxes + + +def bbox_scaling(bboxes, scale, clip_shape=None): + """Scaling bboxes w.r.t the box center. + + Args: + bboxes (ndarray): Shape(..., 4). + scale (float): Scaling factor. + clip_shape (tuple[int], optional): If specified, bboxes that exceed the + boundary will be clipped according to the given shape (h, w). + + Returns: + ndarray: Scaled bboxes. + """ + if float(scale) == 1.0: + scaled_bboxes = bboxes.copy() + else: + w = bboxes[..., 2] - bboxes[..., 0] + 1 + h = bboxes[..., 3] - bboxes[..., 1] + 1 + dw = (w * (scale - 1)) * 0.5 + dh = (h * (scale - 1)) * 0.5 + scaled_bboxes = bboxes + np.stack((-dw, -dh, dw, dh), axis=-1) + if clip_shape is not None: + return bbox_clip(scaled_bboxes, clip_shape) + else: + return scaled_bboxes + + +def imcrop(img, bboxes, scale=1.0, pad_fill=None): + """Crop image patches. + + 3 steps: scale the bboxes -> clip bboxes -> crop and pad. + + Args: + img (ndarray): Image to be cropped. + bboxes (ndarray): Shape (k, 4) or (4, ), location of cropped bboxes. + scale (float, optional): Scale ratio of bboxes, the default value + 1.0 means no padding. + pad_fill (Number | list[Number]): Value to be filled for padding. + Default: None, which means no padding. + + Returns: + list[ndarray] | ndarray: The cropped image patches. + """ + chn = 1 if img.ndim == 2 else img.shape[2] + if pad_fill is not None: + if isinstance(pad_fill, (int, float)): + pad_fill = [pad_fill for _ in range(chn)] + assert len(pad_fill) == chn + + _bboxes = bboxes[None, ...] if bboxes.ndim == 1 else bboxes + scaled_bboxes = bbox_scaling(_bboxes, scale).astype(np.int32) + clipped_bbox = bbox_clip(scaled_bboxes, img.shape) + + patches = [] + for i in range(clipped_bbox.shape[0]): + x1, y1, x2, y2 = tuple(clipped_bbox[i, :]) + if pad_fill is None: + patch = img[y1:y2 + 1, x1:x2 + 1, ...] + else: + _x1, _y1, _x2, _y2 = tuple(scaled_bboxes[i, :]) + if chn == 1: + patch_shape = (_y2 - _y1 + 1, _x2 - _x1 + 1) + else: + patch_shape = (_y2 - _y1 + 1, _x2 - _x1 + 1, chn) + patch = np.array( + pad_fill, dtype=img.dtype) * np.ones( + patch_shape, dtype=img.dtype) + x_start = 0 if _x1 >= 0 else -_x1 + y_start = 0 if _y1 >= 0 else -_y1 + w = x2 - x1 + 1 + h = y2 - y1 + 1 + patch[y_start:y_start + h, x_start:x_start + w, + ...] = img[y1:y1 + h, x1:x1 + w, ...] + patches.append(patch) + + if bboxes.ndim == 1: + return patches[0] + else: + return patches + + +def impad(img, + *, + shape=None, + padding=None, + pad_val=0, + padding_mode='constant'): + """Pad the given image to a certain shape or pad on all sides with + specified padding mode and padding value. + + Args: + img (ndarray): Image to be padded. + shape (tuple[int]): Expected padding shape (h, w). Default: None. + padding (int or tuple[int]): Padding on each border. If a single int is + provided this is used to pad all borders. If tuple of length 2 is + provided this is the padding on left/right and top/bottom + respectively. If a tuple of length 4 is provided this is the + padding for the left, top, right and bottom borders respectively. + Default: None. Note that `shape` and `padding` can not be both + set. + pad_val (Number | Sequence[Number]): Values to be filled in padding + areas when padding_mode is 'constant'. Default: 0. + padding_mode (str): Type of padding. Should be: constant, edge, + reflect or symmetric. Default: constant. + + - constant: pads with a constant value, this value is specified + with pad_val. + - edge: pads with the last value at the edge of the image. + - reflect: pads with reflection of image without repeating the + last value on the edge. For example, padding [1, 2, 3, 4] + with 2 elements on both sides in reflect mode will result + in [3, 2, 1, 2, 3, 4, 3, 2]. + - symmetric: pads with reflection of image repeating the last + value on the edge. For example, padding [1, 2, 3, 4] with + 2 elements on both sides in symmetric mode will result in + [2, 1, 1, 2, 3, 4, 4, 3] + + Returns: + ndarray: The padded image. + """ + + assert (shape is not None) ^ (padding is not None) + if shape is not None: + padding = (0, 0, shape[1] - img.shape[1], shape[0] - img.shape[0]) + + # check pad_val + if isinstance(pad_val, tuple): + assert len(pad_val) == img.shape[-1] + elif not isinstance(pad_val, numbers.Number): + raise TypeError('pad_val must be a int or a tuple. ' + f'But received {type(pad_val)}') + + # check padding + if isinstance(padding, tuple) and len(padding) in [2, 4]: + if len(padding) == 2: + padding = (padding[0], padding[1], padding[0], padding[1]) + elif isinstance(padding, numbers.Number): + padding = (padding, padding, padding, padding) + else: + raise ValueError('Padding must be a int or a 2, or 4 element tuple.' + f'But received {padding}') + + # check padding mode + assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric'] + + border_type = { + 'constant': cv2.BORDER_CONSTANT, + 'edge': cv2.BORDER_REPLICATE, + 'reflect': cv2.BORDER_REFLECT_101, + 'symmetric': cv2.BORDER_REFLECT + } + img = cv2.copyMakeBorder( + img, + padding[1], + padding[3], + padding[0], + padding[2], + border_type[padding_mode], + value=pad_val) + + return img + + +def impad_to_multiple(img, divisor, pad_val=0): + """Pad an image to ensure each edge to be multiple to some number. + + Args: + img (ndarray): Image to be padded. + divisor (int): Padded image edges will be multiple to divisor. + pad_val (Number | Sequence[Number]): Same as :func:`impad`. + + Returns: + ndarray: The padded image. + """ + pad_h = int(np.ceil(img.shape[0] / divisor)) * divisor + pad_w = int(np.ceil(img.shape[1] / divisor)) * divisor + return impad(img, shape=(pad_h, pad_w), pad_val=pad_val) + + +def cutout(img, shape, pad_val=0): + """Randomly cut out a rectangle from the original img. + + Args: + img (ndarray): Image to be cutout. + shape (int | tuple[int]): Expected cutout shape (h, w). If given as a + int, the value will be used for both h and w. + pad_val (int | float | tuple[int | float]): Values to be filled in the + cut area. Defaults to 0. + + Returns: + ndarray: The cutout image. + """ + + channels = 1 if img.ndim == 2 else img.shape[2] + if isinstance(shape, int): + cut_h, cut_w = shape, shape + else: + assert isinstance(shape, tuple) and len(shape) == 2, \ + f'shape must be a int or a tuple with length 2, but got type ' \ + f'{type(shape)} instead.' + cut_h, cut_w = shape + if isinstance(pad_val, (int, float)): + pad_val = tuple([pad_val] * channels) + elif isinstance(pad_val, tuple): + assert len(pad_val) == channels, \ + 'Expected the num of elements in tuple equals the channels' \ + 'of input image. Found {} vs {}'.format( + len(pad_val), channels) + else: + raise TypeError(f'Invalid type {type(pad_val)} for `pad_val`') + + img_h, img_w = img.shape[:2] + y0 = np.random.uniform(img_h) + x0 = np.random.uniform(img_w) + + y1 = int(max(0, y0 - cut_h / 2.)) + x1 = int(max(0, x0 - cut_w / 2.)) + y2 = min(img_h, y1 + cut_h) + x2 = min(img_w, x1 + cut_w) + + if img.ndim == 2: + patch_shape = (y2 - y1, x2 - x1) + else: + patch_shape = (y2 - y1, x2 - x1, channels) + + img_cutout = img.copy() + patch = np.array( + pad_val, dtype=img.dtype) * np.ones( + patch_shape, dtype=img.dtype) + img_cutout[y1:y2, x1:x2, ...] = patch + + return img_cutout + + +def _get_shear_matrix(magnitude, direction='horizontal'): + """Generate the shear matrix for transformation. + + Args: + magnitude (int | float): The magnitude used for shear. + direction (str): The flip direction, either "horizontal" + or "vertical". + + Returns: + ndarray: The shear matrix with dtype float32. + """ + if direction == 'horizontal': + shear_matrix = np.float32([[1, magnitude, 0], [0, 1, 0]]) + elif direction == 'vertical': + shear_matrix = np.float32([[1, 0, 0], [magnitude, 1, 0]]) + return shear_matrix + + +def imshear(img, + magnitude, + direction='horizontal', + border_value=0, + interpolation='bilinear'): + """Shear an image. + + Args: + img (ndarray): Image to be sheared with format (h, w) + or (h, w, c). + magnitude (int | float): The magnitude used for shear. + direction (str): The flip direction, either "horizontal" + or "vertical". + border_value (int | tuple[int]): Value used in case of a + constant border. + interpolation (str): Same as :func:`resize`. + + Returns: + ndarray: The sheared image. + """ + assert direction in ['horizontal', + 'vertical'], f'Invalid direction: {direction}' + height, width = img.shape[:2] + if img.ndim == 2: + channels = 1 + elif img.ndim == 3: + channels = img.shape[-1] + if isinstance(border_value, int): + border_value = tuple([border_value] * channels) + elif isinstance(border_value, tuple): + assert len(border_value) == channels, \ + 'Expected the num of elements in tuple equals the channels' \ + 'of input image. Found {} vs {}'.format( + len(border_value), channels) + else: + raise ValueError( + f'Invalid type {type(border_value)} for `border_value`') + shear_matrix = _get_shear_matrix(magnitude, direction) + sheared = cv2.warpAffine( + img, + shear_matrix, + (width, height), + # Note case when the number elements in `border_value` + # greater than 3 (e.g. shearing masks whose channels large + # than 3) will raise TypeError in `cv2.warpAffine`. + # Here simply slice the first 3 values in `border_value`. + borderValue=border_value[:3], + flags=cv2_interp_codes[interpolation]) + return sheared + + +def _get_translate_matrix(offset, direction='horizontal'): + """Generate the translate matrix. + + Args: + offset (int | float): The offset used for translate. + direction (str): The translate direction, either + "horizontal" or "vertical". + + Returns: + ndarray: The translate matrix with dtype float32. + """ + if direction == 'horizontal': + translate_matrix = np.float32([[1, 0, offset], [0, 1, 0]]) + elif direction == 'vertical': + translate_matrix = np.float32([[1, 0, 0], [0, 1, offset]]) + return translate_matrix + + +def imtranslate(img, + offset, + direction='horizontal', + border_value=0, + interpolation='bilinear'): + """Translate an image. + + Args: + img (ndarray): Image to be translated with format + (h, w) or (h, w, c). + offset (int | float): The offset used for translate. + direction (str): The translate direction, either "horizontal" + or "vertical". + border_value (int | tuple[int]): Value used in case of a + constant border. + interpolation (str): Same as :func:`resize`. + + Returns: + ndarray: The translated image. + """ + assert direction in ['horizontal', + 'vertical'], f'Invalid direction: {direction}' + height, width = img.shape[:2] + if img.ndim == 2: + channels = 1 + elif img.ndim == 3: + channels = img.shape[-1] + if isinstance(border_value, int): + border_value = tuple([border_value] * channels) + elif isinstance(border_value, tuple): + assert len(border_value) == channels, \ + 'Expected the num of elements in tuple equals the channels' \ + 'of input image. Found {} vs {}'.format( + len(border_value), channels) + else: + raise ValueError( + f'Invalid type {type(border_value)} for `border_value`.') + translate_matrix = _get_translate_matrix(offset, direction) + translated = cv2.warpAffine( + img, + translate_matrix, + (width, height), + # Note case when the number elements in `border_value` + # greater than 3 (e.g. translating masks whose channels + # large than 3) will raise TypeError in `cv2.warpAffine`. + # Here simply slice the first 3 values in `border_value`. + borderValue=border_value[:3], + flags=cv2_interp_codes[interpolation]) + return translated diff --git a/mmcv/image/io.py b/mmcv/image/io.py new file mode 100644 index 0000000..69369f0 --- /dev/null +++ b/mmcv/image/io.py @@ -0,0 +1,262 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import io +import os.path as osp +from pathlib import Path + +import cv2 +import numpy as np +from cv2 import (IMREAD_COLOR, IMREAD_GRAYSCALE, IMREAD_IGNORE_ORIENTATION, + IMREAD_UNCHANGED) + +from mmcv.utils import check_file_exist, is_str, mkdir_or_exist + +try: + from turbojpeg import TJCS_RGB, TJPF_BGR, TJPF_GRAY, TurboJPEG +except ImportError: + TJCS_RGB = TJPF_GRAY = TJPF_BGR = TurboJPEG = None + +try: + from PIL import Image, ImageOps +except ImportError: + Image = None + +try: + import tifffile +except ImportError: + tifffile = None + +jpeg = None +supported_backends = ['cv2', 'turbojpeg', 'pillow', 'tifffile'] + +imread_flags = { + 'color': IMREAD_COLOR, + 'grayscale': IMREAD_GRAYSCALE, + 'unchanged': IMREAD_UNCHANGED, + 'color_ignore_orientation': IMREAD_IGNORE_ORIENTATION | IMREAD_COLOR, + 'grayscale_ignore_orientation': + IMREAD_IGNORE_ORIENTATION | IMREAD_GRAYSCALE +} + +imread_backend = 'cv2' + + +def use_backend(backend): + """Select a backend for image decoding. + + Args: + backend (str): The image decoding backend type. Options are `cv2`, + `pillow`, `turbojpeg` (see https://github.com/lilohuang/PyTurboJPEG) + and `tifffile`. `turbojpeg` is faster but it only supports `.jpeg` + file format. + """ + assert backend in supported_backends + global imread_backend + imread_backend = backend + if imread_backend == 'turbojpeg': + if TurboJPEG is None: + raise ImportError('`PyTurboJPEG` is not installed') + global jpeg + if jpeg is None: + jpeg = TurboJPEG() + elif imread_backend == 'pillow': + if Image is None: + raise ImportError('`Pillow` is not installed') + elif imread_backend == 'tifffile': + if tifffile is None: + raise ImportError('`tifffile` is not installed') + + +def _jpegflag(flag='color', channel_order='bgr'): + channel_order = channel_order.lower() + if channel_order not in ['rgb', 'bgr']: + raise ValueError('channel order must be either "rgb" or "bgr"') + + if flag == 'color': + if channel_order == 'bgr': + return TJPF_BGR + elif channel_order == 'rgb': + return TJCS_RGB + elif flag == 'grayscale': + return TJPF_GRAY + else: + raise ValueError('flag must be "color" or "grayscale"') + + +def _pillow2array(img, flag='color', channel_order='bgr'): + """Convert a pillow image to numpy array. + + Args: + img (:obj:`PIL.Image.Image`): The image loaded using PIL + flag (str): Flags specifying the color type of a loaded image, + candidates are 'color', 'grayscale' and 'unchanged'. + Default to 'color'. + channel_order (str): The channel order of the output image array, + candidates are 'bgr' and 'rgb'. Default to 'bgr'. + + Returns: + np.ndarray: The converted numpy array + """ + channel_order = channel_order.lower() + if channel_order not in ['rgb', 'bgr']: + raise ValueError('channel order must be either "rgb" or "bgr"') + + if flag == 'unchanged': + array = np.array(img) + if array.ndim >= 3 and array.shape[2] >= 3: # color image + array[:, :, :3] = array[:, :, (2, 1, 0)] # RGB to BGR + else: + # Handle exif orientation tag + if flag in ['color', 'grayscale']: + img = ImageOps.exif_transpose(img) + # If the image mode is not 'RGB', convert it to 'RGB' first. + if img.mode != 'RGB': + if img.mode != 'LA': + # Most formats except 'LA' can be directly converted to RGB + img = img.convert('RGB') + else: + # When the mode is 'LA', the default conversion will fill in + # the canvas with black, which sometimes shadows black objects + # in the foreground. + # + # Therefore, a random color (124, 117, 104) is used for canvas + img_rgba = img.convert('RGBA') + img = Image.new('RGB', img_rgba.size, (124, 117, 104)) + img.paste(img_rgba, mask=img_rgba.split()[3]) # 3 is alpha + if flag in ['color', 'color_ignore_orientation']: + array = np.array(img) + if channel_order != 'rgb': + array = array[:, :, ::-1] # RGB to BGR + elif flag in ['grayscale', 'grayscale_ignore_orientation']: + img = img.convert('L') + array = np.array(img) + else: + raise ValueError( + 'flag must be "color", "grayscale", "unchanged", ' + f'"color_ignore_orientation" or "grayscale_ignore_orientation"' + f' but got {flag}') + return array + + +def imread(img_or_path, flag='color', channel_order='bgr', backend=None): + """Read an image. + + Args: + img_or_path (ndarray or str or Path): Either a numpy array or str or + pathlib.Path. If it is a numpy array (loaded image), then + it will be returned as is. + flag (str): Flags specifying the color type of a loaded image, + candidates are `color`, `grayscale`, `unchanged`, + `color_ignore_orientation` and `grayscale_ignore_orientation`. + By default, `cv2` and `pillow` backend would rotate the image + according to its EXIF info unless called with `unchanged` or + `*_ignore_orientation` flags. `turbojpeg` and `tifffile` backend + always ignore image's EXIF info regardless of the flag. + The `turbojpeg` backend only supports `color` and `grayscale`. + channel_order (str): Order of channel, candidates are `bgr` and `rgb`. + backend (str | None): The image decoding backend type. Options are + `cv2`, `pillow`, `turbojpeg`, `tifffile`, `None`. + If backend is None, the global imread_backend specified by + ``mmcv.use_backend()`` will be used. Default: None. + + Returns: + ndarray: Loaded image array. + """ + + if backend is None: + backend = imread_backend + if backend not in supported_backends: + raise ValueError(f'backend: {backend} is not supported. Supported ' + "backends are 'cv2', 'turbojpeg', 'pillow'") + if isinstance(img_or_path, Path): + img_or_path = str(img_or_path) + + if isinstance(img_or_path, np.ndarray): + return img_or_path + elif is_str(img_or_path): + check_file_exist(img_or_path, + f'img file does not exist: {img_or_path}') + if backend == 'turbojpeg': + with open(img_or_path, 'rb') as in_file: + img = jpeg.decode(in_file.read(), + _jpegflag(flag, channel_order)) + if img.shape[-1] == 1: + img = img[:, :, 0] + return img + elif backend == 'pillow': + img = Image.open(img_or_path) + img = _pillow2array(img, flag, channel_order) + return img + elif backend == 'tifffile': + img = tifffile.imread(img_or_path) + return img + else: + flag = imread_flags[flag] if is_str(flag) else flag + img = cv2.imread(img_or_path, flag) + if flag == IMREAD_COLOR and channel_order == 'rgb': + cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img) + return img + else: + raise TypeError('"img" must be a numpy array or a str or ' + 'a pathlib.Path object') + + +def imfrombytes(content, flag='color', channel_order='bgr', backend=None): + """Read an image from bytes. + + Args: + content (bytes): Image bytes got from files or other streams. + flag (str): Same as :func:`imread`. + backend (str | None): The image decoding backend type. Options are + `cv2`, `pillow`, `turbojpeg`, `None`. If backend is None, the + global imread_backend specified by ``mmcv.use_backend()`` will be + used. Default: None. + + Returns: + ndarray: Loaded image array. + """ + + if backend is None: + backend = imread_backend + if backend not in supported_backends: + raise ValueError(f'backend: {backend} is not supported. Supported ' + "backends are 'cv2', 'turbojpeg', 'pillow'") + if backend == 'turbojpeg': + img = jpeg.decode(content, _jpegflag(flag, channel_order)) + if img.shape[-1] == 1: + img = img[:, :, 0] + return img + elif backend == 'pillow': + buff = io.BytesIO(content) + img = Image.open(buff) + img = _pillow2array(img, flag, channel_order) + return img + else: + img_np = np.frombuffer(content, np.uint8) + flag = imread_flags[flag] if is_str(flag) else flag + img = cv2.imdecode(img_np, flag) + if flag == IMREAD_COLOR and channel_order == 'rgb': + cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img) + return img + + +def imwrite(img, file_path, params=None, auto_mkdir=True): + """Write image to file. + + Args: + img (ndarray): Image array to be written. + file_path (str): Image file path. + params (None or list): Same as opencv :func:`imwrite` interface. + auto_mkdir (bool): If the parent folder of `file_path` does not exist, + whether to create it automatically. + + Returns: + bool: Successful or not. + """ + if auto_mkdir: + dir_name = osp.abspath(osp.dirname(file_path)) + mkdir_or_exist(dir_name) + return cv2.imwrite(file_path, img, params) + + + + diff --git a/mmcv/image/misc.py b/mmcv/image/misc.py new file mode 100644 index 0000000..a52304a --- /dev/null +++ b/mmcv/image/misc.py @@ -0,0 +1,43 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +from mmcv.image import imdenormalize + +try: + import torch +except ImportError: + torch = None + + +def tensor2imgs(tensor, mean=(0, 0, 0), std=(1, 1, 1), to_rgb=True): + """Convert tensor to 3-channel images. + + Args: + tensor (torch.Tensor): Tensor that contains multiple images, shape ( + N, C, H, W). + mean (tuple[float], optional): Mean of images. Defaults to (0, 0, 0). + std (tuple[float], optional): Standard deviation of images. + Defaults to (1, 1, 1). + to_rgb (bool, optional): Whether the tensor was converted to RGB + format in the first place. If so, convert it back to BGR. + Defaults to True. + + Returns: + list[np.ndarray]: A list that contains multiple images. + """ + + if torch is None: + raise RuntimeError('pytorch is not installed') + assert torch.is_tensor(tensor) and tensor.ndim == 4 + assert len(mean) == 3 + assert len(std) == 3 + + num_imgs = tensor.size(0) + mean = np.array(mean, dtype=np.float32) + std = np.array(std, dtype=np.float32) + imgs = [] + for img_id in range(num_imgs): + img = tensor[img_id, ...].cpu().numpy().transpose(1, 2, 0) + img = imdenormalize( + img, mean, std, to_bgr=to_rgb).astype(np.uint8) + imgs.append(np.ascontiguousarray(img)) + return imgs diff --git a/mmcv/image/photometric.py b/mmcv/image/photometric.py new file mode 100644 index 0000000..5085d01 --- /dev/null +++ b/mmcv/image/photometric.py @@ -0,0 +1,428 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import cv2 +import numpy as np + +from ..utils import is_tuple_of +from .colorspace import bgr2gray, gray2bgr + + +def imnormalize(img, mean, std, to_rgb=True): + """Normalize an image with mean and std. + + Args: + img (ndarray): Image to be normalized. + mean (ndarray): The mean to be used for normalize. + std (ndarray): The std to be used for normalize. + to_rgb (bool): Whether to convert to rgb. + + Returns: + ndarray: The normalized image. + """ + img = img.copy().astype(np.float32) + return imnormalize_(img, mean, std, to_rgb) + + +def imnormalize_(img, mean, std, to_rgb=True): + """Inplace normalize an image with mean and std. + + Args: + img (ndarray): Image to be normalized. + mean (ndarray): The mean to be used for normalize. + std (ndarray): The std to be used for normalize. + to_rgb (bool): Whether to convert to rgb. + + Returns: + ndarray: The normalized image. + """ + # cv2 inplace normalization does not accept uint8 + assert img.dtype != np.uint8 + mean = np.float64(mean.reshape(1, -1)) + stdinv = 1 / np.float64(std.reshape(1, -1)) + if to_rgb: + cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img) # inplace + cv2.subtract(img, mean, img) # inplace + cv2.multiply(img, stdinv, img) # inplace + return img + + +def imdenormalize(img, mean, std, to_bgr=True): + assert img.dtype != np.uint8 + mean = mean.reshape(1, -1).astype(np.float64) + std = std.reshape(1, -1).astype(np.float64) + img = cv2.multiply(img, std) # make a copy + cv2.add(img, mean, img) # inplace + if to_bgr: + cv2.cvtColor(img, cv2.COLOR_RGB2BGR, img) # inplace + return img + + +def iminvert(img): + """Invert (negate) an image. + + Args: + img (ndarray): Image to be inverted. + + Returns: + ndarray: The inverted image. + """ + return np.full_like(img, 255) - img + + +def solarize(img, thr=128): + """Solarize an image (invert all pixel values above a threshold) + + Args: + img (ndarray): Image to be solarized. + thr (int): Threshold for solarizing (0 - 255). + + Returns: + ndarray: The solarized image. + """ + img = np.where(img < thr, img, 255 - img) + return img + + +def posterize(img, bits): + """Posterize an image (reduce the number of bits for each color channel) + + Args: + img (ndarray): Image to be posterized. + bits (int): Number of bits (1 to 8) to use for posterizing. + + Returns: + ndarray: The posterized image. + """ + shift = 8 - bits + img = np.left_shift(np.right_shift(img, shift), shift) + return img + + +def adjust_color(img, alpha=1, beta=None, gamma=0): + r"""It blends the source image and its gray image: + + .. math:: + output = img * alpha + gray\_img * beta + gamma + + Args: + img (ndarray): The input source image. + alpha (int | float): Weight for the source image. Default 1. + beta (int | float): Weight for the converted gray image. + If None, it's assigned the value (1 - `alpha`). + gamma (int | float): Scalar added to each sum. + Same as :func:`cv2.addWeighted`. Default 0. + + Returns: + ndarray: Colored image which has the same size and dtype as input. + """ + gray_img = bgr2gray(img) + gray_img = np.tile(gray_img[..., None], [1, 1, 3]) + if beta is None: + beta = 1 - alpha + colored_img = cv2.addWeighted(img, alpha, gray_img, beta, gamma) + if not colored_img.dtype == np.uint8: + # Note when the dtype of `img` is not the default `np.uint8` + # (e.g. np.float32), the value in `colored_img` got from cv2 + # is not guaranteed to be in range [0, 255], so here clip + # is needed. + colored_img = np.clip(colored_img, 0, 255) + return colored_img + + +def imequalize(img): + """Equalize the image histogram. + + This function applies a non-linear mapping to the input image, + in order to create a uniform distribution of grayscale values + in the output image. + + Args: + img (ndarray): Image to be equalized. + + Returns: + ndarray: The equalized image. + """ + + def _scale_channel(im, c): + """Scale the data in the corresponding channel.""" + im = im[:, :, c] + # Compute the histogram of the image channel. + histo = np.histogram(im, 256, (0, 255))[0] + # For computing the step, filter out the nonzeros. + nonzero_histo = histo[histo > 0] + step = (np.sum(nonzero_histo) - nonzero_histo[-1]) // 255 + if not step: + lut = np.array(range(256)) + else: + # Compute the cumulative sum, shifted by step // 2 + # and then normalized by step. + lut = (np.cumsum(histo) + (step // 2)) // step + # Shift lut, prepending with 0. + lut = np.concatenate([[0], lut[:-1]], 0) + # handle potential integer overflow + lut[lut > 255] = 255 + # If step is zero, return the original image. + # Otherwise, index from lut. + return np.where(np.equal(step, 0), im, lut[im]) + + # Scales each channel independently and then stacks + # the result. + s1 = _scale_channel(img, 0) + s2 = _scale_channel(img, 1) + s3 = _scale_channel(img, 2) + equalized_img = np.stack([s1, s2, s3], axis=-1) + return equalized_img.astype(img.dtype) + + +def adjust_brightness(img, factor=1.): + """Adjust image brightness. + + This function controls the brightness of an image. An + enhancement factor of 0.0 gives a black image. + A factor of 1.0 gives the original image. This function + blends the source image and the degenerated black image: + + .. math:: + output = img * factor + degenerated * (1 - factor) + + Args: + img (ndarray): Image to be brightened. + factor (float): A value controls the enhancement. + Factor 1.0 returns the original image, lower + factors mean less color (brightness, contrast, + etc), and higher values more. Default 1. + + Returns: + ndarray: The brightened image. + """ + degenerated = np.zeros_like(img) + # Note manually convert the dtype to np.float32, to + # achieve as close results as PIL.ImageEnhance.Brightness. + # Set beta=1-factor, and gamma=0 + brightened_img = cv2.addWeighted( + img.astype(np.float32), factor, degenerated.astype(np.float32), + 1 - factor, 0) + brightened_img = np.clip(brightened_img, 0, 255) + return brightened_img.astype(img.dtype) + + +def adjust_contrast(img, factor=1.): + """Adjust image contrast. + + This function controls the contrast of an image. An + enhancement factor of 0.0 gives a solid grey + image. A factor of 1.0 gives the original image. It + blends the source image and the degenerated mean image: + + .. math:: + output = img * factor + degenerated * (1 - factor) + + Args: + img (ndarray): Image to be contrasted. BGR order. + factor (float): Same as :func:`mmcv.adjust_brightness`. + + Returns: + ndarray: The contrasted image. + """ + gray_img = bgr2gray(img) + hist = np.histogram(gray_img, 256, (0, 255))[0] + mean = round(np.sum(gray_img) / np.sum(hist)) + degenerated = (np.ones_like(img[..., 0]) * mean).astype(img.dtype) + degenerated = gray2bgr(degenerated) + contrasted_img = cv2.addWeighted( + img.astype(np.float32), factor, degenerated.astype(np.float32), + 1 - factor, 0) + contrasted_img = np.clip(contrasted_img, 0, 255) + return contrasted_img.astype(img.dtype) + + +def auto_contrast(img, cutoff=0): + """Auto adjust image contrast. + + This function maximize (normalize) image contrast by first removing cutoff + percent of the lightest and darkest pixels from the histogram and remapping + the image so that the darkest pixel becomes black (0), and the lightest + becomes white (255). + + Args: + img (ndarray): Image to be contrasted. BGR order. + cutoff (int | float | tuple): The cutoff percent of the lightest and + darkest pixels to be removed. If given as tuple, it shall be + (low, high). Otherwise, the single value will be used for both. + Defaults to 0. + + Returns: + ndarray: The contrasted image. + """ + + def _auto_contrast_channel(im, c, cutoff): + im = im[:, :, c] + # Compute the histogram of the image channel. + histo = np.histogram(im, 256, (0, 255))[0] + # Remove cut-off percent pixels from histo + histo_sum = np.cumsum(histo) + cut_low = histo_sum[-1] * cutoff[0] // 100 + cut_high = histo_sum[-1] - histo_sum[-1] * cutoff[1] // 100 + histo_sum = np.clip(histo_sum, cut_low, cut_high) - cut_low + histo = np.concatenate([[histo_sum[0]], np.diff(histo_sum)], 0) + + # Compute mapping + low, high = np.nonzero(histo)[0][0], np.nonzero(histo)[0][-1] + # If all the values have been cut off, return the origin img + if low >= high: + return im + scale = 255.0 / (high - low) + offset = -low * scale + lut = np.array(range(256)) + lut = lut * scale + offset + lut = np.clip(lut, 0, 255) + return lut[im] + + if isinstance(cutoff, (int, float)): + cutoff = (cutoff, cutoff) + else: + assert isinstance(cutoff, tuple), 'cutoff must be of type int, ' \ + f'float or tuple, but got {type(cutoff)} instead.' + # Auto adjusts contrast for each channel independently and then stacks + # the result. + s1 = _auto_contrast_channel(img, 0, cutoff) + s2 = _auto_contrast_channel(img, 1, cutoff) + s3 = _auto_contrast_channel(img, 2, cutoff) + contrasted_img = np.stack([s1, s2, s3], axis=-1) + return contrasted_img.astype(img.dtype) + + +def adjust_sharpness(img, factor=1., kernel=None): + """Adjust image sharpness. + + This function controls the sharpness of an image. An + enhancement factor of 0.0 gives a blurred image. A + factor of 1.0 gives the original image. And a factor + of 2.0 gives a sharpened image. It blends the source + image and the degenerated mean image: + + .. math:: + output = img * factor + degenerated * (1 - factor) + + Args: + img (ndarray): Image to be sharpened. BGR order. + factor (float): Same as :func:`mmcv.adjust_brightness`. + kernel (np.ndarray, optional): Filter kernel to be applied on the img + to obtain the degenerated img. Defaults to None. + + Note: + No value sanity check is enforced on the kernel set by users. So with + an inappropriate kernel, the ``adjust_sharpness`` may fail to perform + the function its name indicates but end up performing whatever + transform determined by the kernel. + + Returns: + ndarray: The sharpened image. + """ + + if kernel is None: + # adopted from PIL.ImageFilter.SMOOTH + kernel = np.array([[1., 1., 1.], [1., 5., 1.], [1., 1., 1.]]) / 13 + assert isinstance(kernel, np.ndarray), \ + f'kernel must be of type np.ndarray, but got {type(kernel)} instead.' + assert kernel.ndim == 2, \ + f'kernel must have a dimension of 2, but got {kernel.ndim} instead.' + + degenerated = cv2.filter2D(img, -1, kernel) + sharpened_img = cv2.addWeighted( + img.astype(np.float32), factor, degenerated.astype(np.float32), + 1 - factor, 0) + sharpened_img = np.clip(sharpened_img, 0, 255) + return sharpened_img.astype(img.dtype) + + +def adjust_lighting(img, eigval, eigvec, alphastd=0.1, to_rgb=True): + """AlexNet-style PCA jitter. + + This data augmentation is proposed in `ImageNet Classification with Deep + Convolutional Neural Networks + `_. + + Args: + img (ndarray): Image to be adjusted lighting. BGR order. + eigval (ndarray): the eigenvalue of the convariance matrix of pixel + values, respectively. + eigvec (ndarray): the eigenvector of the convariance matrix of pixel + values, respectively. + alphastd (float): The standard deviation for distribution of alpha. + Defaults to 0.1 + to_rgb (bool): Whether to convert img to rgb. + + Returns: + ndarray: The adjusted image. + """ + assert isinstance(eigval, np.ndarray) and isinstance(eigvec, np.ndarray), \ + f'eigval and eigvec should both be of type np.ndarray, got ' \ + f'{type(eigval)} and {type(eigvec)} instead.' + + assert eigval.ndim == 1 and eigvec.ndim == 2 + assert eigvec.shape == (3, eigval.shape[0]) + n_eigval = eigval.shape[0] + assert isinstance(alphastd, float), 'alphastd should be of type float, ' \ + f'got {type(alphastd)} instead.' + + img = img.copy().astype(np.float32) + if to_rgb: + cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img) # inplace + + alpha = np.random.normal(0, alphastd, n_eigval) + alter = eigvec \ + * np.broadcast_to(alpha.reshape(1, n_eigval), (3, n_eigval)) \ + * np.broadcast_to(eigval.reshape(1, n_eigval), (3, n_eigval)) + alter = np.broadcast_to(alter.sum(axis=1).reshape(1, 1, 3), img.shape) + img_adjusted = img + alter + return img_adjusted + + +def lut_transform(img, lut_table): + """Transform array by look-up table. + + The function lut_transform fills the output array with values from the + look-up table. Indices of the entries are taken from the input array. + + Args: + img (ndarray): Image to be transformed. + lut_table (ndarray): look-up table of 256 elements; in case of + multi-channel input array, the table should either have a single + channel (in this case the same table is used for all channels) or + the same number of channels as in the input array. + + Returns: + ndarray: The transformed image. + """ + assert isinstance(img, np.ndarray) + assert 0 <= np.min(img) and np.max(img) <= 255 + assert isinstance(lut_table, np.ndarray) + assert lut_table.shape == (256, ) + + return cv2.LUT(np.array(img, dtype=np.uint8), lut_table) + + +def clahe(img, clip_limit=40.0, tile_grid_size=(8, 8)): + """Use CLAHE method to process the image. + + See `ZUIDERVELD,K. Contrast Limited Adaptive Histogram Equalization[J]. + Graphics Gems, 1994:474-485.` for more information. + + Args: + img (ndarray): Image to be processed. + clip_limit (float): Threshold for contrast limiting. Default: 40.0. + tile_grid_size (tuple[int]): Size of grid for histogram equalization. + Input image will be divided into equally sized rectangular tiles. + It defines the number of tiles in row and column. Default: (8, 8). + + Returns: + ndarray: The processed image. + """ + assert isinstance(img, np.ndarray) + assert img.ndim == 2 + assert isinstance(clip_limit, (float, int)) + assert is_tuple_of(tile_grid_size, int) + assert len(tile_grid_size) == 2 + + clahe = cv2.createCLAHE(clip_limit, tile_grid_size) + return clahe.apply(np.array(img, dtype=np.uint8)) diff --git a/mmcv/layers/__init__.py b/mmcv/layers/__init__.py new file mode 100644 index 0000000..53f735c --- /dev/null +++ b/mmcv/layers/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from .batch_norm import get_norm +from .nms import batched_nms +from .shape_spec import ShapeSpec +from .wrappers import cat, Conv2d +from .roi_align import ROIAlign \ No newline at end of file diff --git a/mmcv/layers/aspp.py b/mmcv/layers/aspp.py new file mode 100644 index 0000000..14861aa --- /dev/null +++ b/mmcv/layers/aspp.py @@ -0,0 +1,144 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +from copy import deepcopy +import fvcore.nn.weight_init as weight_init +import torch +from torch import nn +from torch.nn import functional as F + +from .batch_norm import get_norm +from .blocks import DepthwiseSeparableConv2d +from .wrappers import Conv2d + + +class ASPP(nn.Module): + """ + Atrous Spatial Pyramid Pooling (ASPP). + """ + + def __init__( + self, + in_channels, + out_channels, + dilations, + *, + norm, + activation, + pool_kernel_size=None, + dropout: float = 0.0, + use_depthwise_separable_conv=False, + ): + """ + Args: + in_channels (int): number of input channels for ASPP. + out_channels (int): number of output channels. + dilations (list): a list of 3 dilations in ASPP. + norm (str or callable): normalization for all conv layers. + See :func:`layers.get_norm` for supported format. norm is + applied to all conv layers except the conv following + global average pooling. + activation (callable): activation function. + pool_kernel_size (tuple, list): the average pooling size (kh, kw) + for image pooling layer in ASPP. If set to None, it always + performs global average pooling. If not None, it must be + divisible by the shape of inputs in forward(). It is recommended + to use a fixed input feature size in training, and set this + option to match this size, so that it performs global average + pooling in training, and the size of the pooling window stays + consistent in inference. + dropout (float): apply dropout on the output of ASPP. It is used in + the official DeepLab implementation with a rate of 0.1: + https://github.com/tensorflow/models/blob/21b73d22f3ed05b650e85ac50849408dd36de32e/research/deeplab/model.py#L532 # noqa + use_depthwise_separable_conv (bool): use DepthwiseSeparableConv2d + for 3x3 convs in ASPP, proposed in :paper:`DeepLabV3+`. + """ + super(ASPP, self).__init__() + assert len(dilations) == 3, "ASPP expects 3 dilations, got {}".format(len(dilations)) + self.pool_kernel_size = pool_kernel_size + self.dropout = dropout + use_bias = norm == "" + self.convs = nn.ModuleList() + # conv 1x1 + self.convs.append( + Conv2d( + in_channels, + out_channels, + kernel_size=1, + bias=use_bias, + norm=get_norm(norm, out_channels), + activation=deepcopy(activation), + ) + ) + weight_init.c2_xavier_fill(self.convs[-1]) + # atrous convs + for dilation in dilations: + if use_depthwise_separable_conv: + self.convs.append( + DepthwiseSeparableConv2d( + in_channels, + out_channels, + kernel_size=3, + padding=dilation, + dilation=dilation, + norm1=norm, + activation1=deepcopy(activation), + norm2=norm, + activation2=deepcopy(activation), + ) + ) + else: + self.convs.append( + Conv2d( + in_channels, + out_channels, + kernel_size=3, + padding=dilation, + dilation=dilation, + bias=use_bias, + norm=get_norm(norm, out_channels), + activation=deepcopy(activation), + ) + ) + weight_init.c2_xavier_fill(self.convs[-1]) + # image pooling + # We do not add BatchNorm because the spatial resolution is 1x1, + # the original TF implementation has BatchNorm. + if pool_kernel_size is None: + image_pooling = nn.Sequential( + nn.AdaptiveAvgPool2d(1), + Conv2d(in_channels, out_channels, 1, bias=True, activation=deepcopy(activation)), + ) + else: + image_pooling = nn.Sequential( + nn.AvgPool2d(kernel_size=pool_kernel_size, stride=1), + Conv2d(in_channels, out_channels, 1, bias=True, activation=deepcopy(activation)), + ) + weight_init.c2_xavier_fill(image_pooling[1]) + self.convs.append(image_pooling) + + self.project = Conv2d( + 5 * out_channels, + out_channels, + kernel_size=1, + bias=use_bias, + norm=get_norm(norm, out_channels), + activation=deepcopy(activation), + ) + weight_init.c2_xavier_fill(self.project) + + def forward(self, x): + size = x.shape[-2:] + if self.pool_kernel_size is not None: + if size[0] % self.pool_kernel_size[0] or size[1] % self.pool_kernel_size[1]: + raise ValueError( + "`pool_kernel_size` must be divisible by the shape of inputs. " + "Input size: {} `pool_kernel_size`: {}".format(size, self.pool_kernel_size) + ) + res = [] + for conv in self.convs: + res.append(conv(x)) + res[-1] = F.interpolate(res[-1], size=size, mode="bilinear", align_corners=False) + res = torch.cat(res, dim=1) + res = self.project(res) + res = F.dropout(res, self.dropout, training=self.training) if self.dropout > 0 else res + return res diff --git a/mmcv/layers/batch_norm.py b/mmcv/layers/batch_norm.py new file mode 100644 index 0000000..9c9d19f --- /dev/null +++ b/mmcv/layers/batch_norm.py @@ -0,0 +1,384 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import torch +import torch.distributed as dist +from torch import nn +from torch.nn import functional as F +from torch.autograd.function import Function +from .wrappers import BatchNorm2d +TORCH_VERSION = tuple(int(x) for x in torch.__version__.split(".")[:2]) + +def get_world_size() -> int: + if not dist.is_available(): + return 1 + if not dist.is_initialized(): + return 1 + return dist.get_world_size() + +class _AllReduce(Function): + @staticmethod + def forward(ctx, input: torch.Tensor) -> torch.Tensor: + input_list = [torch.zeros_like(input) for k in range(dist.get_world_size())] + # Use allgather instead of allreduce since I don't trust in-place operations .. + dist.all_gather(input_list, input, async_op=False) + inputs = torch.stack(input_list, dim=0) + return torch.sum(inputs, dim=0) + + @staticmethod + def backward(ctx, grad_output: torch.Tensor) -> torch.Tensor: + dist.all_reduce(grad_output, async_op=False) + return grad_output + +def differentiable_all_reduce(input: torch.Tensor) -> torch.Tensor: + """ + Differentiable counterpart of `dist.all_reduce`. + """ + if ( + not dist.is_available() + or not dist.is_initialized() + or dist.get_world_size() == 1 + ): + return input + return _AllReduce.apply(input) + + +class FrozenBatchNorm2d(nn.Module): + """ + BatchNorm2d where the batch statistics and the affine parameters are fixed. + + It contains non-trainable buffers called + "weight" and "bias", "running_mean", "running_var", + initialized to perform identity transformation. + + The pre-trained backbone models from Caffe2 only contain "weight" and "bias", + which are computed from the original four parameters of BN. + The affine transform `x * weight + bias` will perform the equivalent + computation of `(x - running_mean) / sqrt(running_var) * weight + bias`. + When loading a backbone model from Caffe2, "running_mean" and "running_var" + will be left unchanged as identity transformation. + + Other pre-trained backbone models may contain all 4 parameters. + + The forward is implemented by `F.batch_norm(..., training=False)`. + """ + + _version = 3 + + def __init__(self, num_features, eps=1e-5): + super().__init__() + self.num_features = num_features + self.eps = eps + self.register_buffer("weight", torch.ones(num_features)) + self.register_buffer("bias", torch.zeros(num_features)) + self.register_buffer("running_mean", torch.zeros(num_features)) + self.register_buffer("running_var", torch.ones(num_features) - eps) + self.register_buffer("num_batches_tracked", None) + + def forward(self, x): + if x.requires_grad: + # When gradients are needed, F.batch_norm will use extra memory + # because its backward op computes gradients for weight/bias as well. + scale = self.weight * (self.running_var + self.eps).rsqrt() + bias = self.bias - self.running_mean * scale + scale = scale.reshape(1, -1, 1, 1) + bias = bias.reshape(1, -1, 1, 1) + out_dtype = x.dtype # may be half + return x * scale.to(out_dtype) + bias.to(out_dtype) + else: + # When gradients are not needed, F.batch_norm is a single fused op + # and provide more optimization opportunities. + return F.batch_norm( + x, + self.running_mean, + self.running_var, + self.weight, + self.bias, + training=False, + eps=self.eps, + ) + + def _load_from_state_dict( + self, + state_dict, + prefix, + local_metadata, + strict, + missing_keys, + unexpected_keys, + error_msgs, + ): + version = local_metadata.get("version", None) + + if version is None or version < 2: + # No running_mean/var in early versions + # This will silent the warnings + if prefix + "running_mean" not in state_dict: + state_dict[prefix + "running_mean"] = torch.zeros_like(self.running_mean) + if prefix + "running_var" not in state_dict: + state_dict[prefix + "running_var"] = torch.ones_like(self.running_var) + + super()._load_from_state_dict( + state_dict, + prefix, + local_metadata, + strict, + missing_keys, + unexpected_keys, + error_msgs, + ) + + def __repr__(self): + return "FrozenBatchNorm2d(num_features={}, eps={})".format(self.num_features, self.eps) + + @classmethod + def convert_frozen_batchnorm(cls, module): + """ + Convert all BatchNorm/SyncBatchNorm in module into FrozenBatchNorm. + + Args: + module (torch.nn.Module): + + Returns: + If module is BatchNorm/SyncBatchNorm, returns a new module. + Otherwise, in-place convert module and return it. + + Similar to convert_sync_batchnorm in + https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/batchnorm.py + """ + bn_module = nn.modules.batchnorm + bn_module = (bn_module.BatchNorm2d, bn_module.SyncBatchNorm) + res = module + if isinstance(module, bn_module): + res = cls(module.num_features) + if module.affine: + res.weight.data = module.weight.data.clone().detach() + res.bias.data = module.bias.data.clone().detach() + res.running_mean.data = module.running_mean.data + res.running_var.data = module.running_var.data + res.eps = module.eps + res.num_batches_tracked = module.num_batches_tracked + else: + for name, child in module.named_children(): + new_child = cls.convert_frozen_batchnorm(child) + if new_child is not child: + res.add_module(name, new_child) + return res + + @classmethod + def convert_frozenbatchnorm2d_to_batchnorm2d(cls, module: nn.Module) -> nn.Module: + """ + Convert all FrozenBatchNorm2d to BatchNorm2d + + Args: + module (torch.nn.Module): + + Returns: + If module is FrozenBatchNorm2d, returns a new module. + Otherwise, in-place convert module and return it. + + This is needed for quantization: + https://fb.workplace.com/groups/1043663463248667/permalink/1296330057982005/ + """ + + res = module + if isinstance(module, FrozenBatchNorm2d): + res = torch.nn.BatchNorm2d(module.num_features, module.eps) + + res.weight.data = module.weight.data.clone().detach() + res.bias.data = module.bias.data.clone().detach() + res.running_mean.data = module.running_mean.data.clone().detach() + res.running_var.data = module.running_var.data.clone().detach() + res.eps = module.eps + res.num_batches_tracked = module.num_batches_tracked + else: + for name, child in module.named_children(): + new_child = cls.convert_frozenbatchnorm2d_to_batchnorm2d(child) + if new_child is not child: + res.add_module(name, new_child) + return res + + +def get_norm(norm, out_channels): + """ + Args: + norm (str or callable): either one of BN, SyncBN, FrozenBN, GN; + or a callable that takes a channel number and returns + the normalization layer as a nn.Module. + + Returns: + nn.Module or None: the normalization layer + """ + if norm is None: + return None + if isinstance(norm, str): + if len(norm) == 0: + return None + norm = { + "BN": BatchNorm2d, + # Fixed in https://github.com/pytorch/pytorch/pull/36382 + "SyncBN": NaiveSyncBatchNorm if TORCH_VERSION <= (1, 5) else nn.SyncBatchNorm, + "FrozenBN": FrozenBatchNorm2d, + "GN": lambda channels: nn.GroupNorm(32, channels), + # for debugging: + "nnSyncBN": nn.SyncBatchNorm, + "naiveSyncBN": NaiveSyncBatchNorm, + # expose stats_mode N as an option to caller, required for zero-len inputs + "naiveSyncBN_N": lambda channels: NaiveSyncBatchNorm(channels, stats_mode="N"), + "LN": lambda channels: LayerNorm(channels), + }[norm] + return norm(out_channels) + + +class NaiveSyncBatchNorm(BatchNorm2d): + """ + In PyTorch<=1.5, ``nn.SyncBatchNorm`` has incorrect gradient + when the batch size on each worker is different. + (e.g., when scale augmentation is used, or when it is applied to mask head). + + This is a slower but correct alternative to `nn.SyncBatchNorm`. + + Note: + There isn't a single definition of Sync BatchNorm. + + When ``stats_mode==""``, this module computes overall statistics by using + statistics of each worker with equal weight. The result is true statistics + of all samples (as if they are all on one worker) only when all workers + have the same (N, H, W). This mode does not support inputs with zero batch size. + + When ``stats_mode=="N"``, this module computes overall statistics by weighting + the statistics of each worker by their ``N``. The result is true statistics + of all samples (as if they are all on one worker) only when all workers + have the same (H, W). It is slower than ``stats_mode==""``. + + Even though the result of this module may not be the true statistics of all samples, + it may still be reasonable because it might be preferrable to assign equal weights + to all workers, regardless of their (H, W) dimension, instead of putting larger weight + on larger images. From preliminary experiments, little difference is found between such + a simplified implementation and an accurate computation of overall mean & variance. + """ + + def __init__(self, *args, stats_mode="", **kwargs): + super().__init__(*args, **kwargs) + assert stats_mode in ["", "N"] + self._stats_mode = stats_mode + + def forward(self, input): + if get_world_size() == 1 or not self.training: + return super().forward(input) + + B, C = input.shape[0], input.shape[1] + + half_input = input.dtype == torch.float16 + if half_input: + # fp16 does not have good enough numerics for the reduction here + input = input.float() + mean = torch.mean(input, dim=[0, 2, 3]) + meansqr = torch.mean(input * input, dim=[0, 2, 3]) + + if self._stats_mode == "": + assert B > 0, 'SyncBatchNorm(stats_mode="") does not support zero batch size.' + vec = torch.cat([mean, meansqr], dim=0) + vec = differentiable_all_reduce(vec) * (1.0 / dist.get_world_size()) + mean, meansqr = torch.split(vec, C) + momentum = self.momentum + else: + if B == 0: + vec = torch.zeros([2 * C + 1], device=mean.device, dtype=mean.dtype) + vec = vec + input.sum() # make sure there is gradient w.r.t input + else: + vec = torch.cat( + [ + mean, + meansqr, + torch.ones([1], device=mean.device, dtype=mean.dtype), + ], + dim=0, + ) + vec = differentiable_all_reduce(vec * B) + + total_batch = vec[-1].detach() + momentum = total_batch.clamp(max=1) * self.momentum # no update if total_batch is 0 + mean, meansqr, _ = torch.split(vec / total_batch.clamp(min=1), C) # avoid div-by-zero + + var = meansqr - mean * mean + invstd = torch.rsqrt(var + self.eps) + scale = self.weight * invstd + bias = self.bias - mean * scale + scale = scale.reshape(1, -1, 1, 1) + bias = bias.reshape(1, -1, 1, 1) + + self.running_mean += momentum * (mean.detach() - self.running_mean) + self.running_var += momentum * (var.detach() - self.running_var) + ret = input * scale + bias + if half_input: + ret = ret.half() + return ret + + +class CycleBatchNormList(nn.ModuleList): + """ + Implement domain-specific BatchNorm by cycling. + + When a BatchNorm layer is used for multiple input domains or input + features, it might need to maintain a separate test-time statistics + for each domain. See Sec 5.2 in :paper:`rethinking-batchnorm`. + + This module implements it by using N separate BN layers + and it cycles through them every time a forward() is called. + + NOTE: The caller of this module MUST guarantee to always call + this module by multiple of N times. Otherwise its test-time statistics + will be incorrect. + """ + + def __init__(self, length: int, bn_class=nn.BatchNorm2d, **kwargs): + """ + Args: + length: number of BatchNorm layers to cycle. + bn_class: the BatchNorm class to use + kwargs: arguments of the BatchNorm class, such as num_features. + """ + self._affine = kwargs.pop("affine", True) + super().__init__([bn_class(**kwargs, affine=False) for k in range(length)]) + if self._affine: + # shared affine, domain-specific BN + channels = self[0].num_features + self.weight = nn.Parameter(torch.ones(channels)) + self.bias = nn.Parameter(torch.zeros(channels)) + self._pos = 0 + + def forward(self, x): + ret = self[self._pos](x) + self._pos = (self._pos + 1) % len(self) + + if self._affine: + w = self.weight.reshape(1, -1, 1, 1) + b = self.bias.reshape(1, -1, 1, 1) + return ret * w + b + else: + return ret + + def extra_repr(self): + return f"affine={self._affine}" + + +class LayerNorm(nn.Module): + """ + A LayerNorm variant, popularized by Transformers, that performs point-wise mean and + variance normalization over the channel dimension for inputs that have shape + (batch_size, channels, height, width). + https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119 # noqa B950 + """ + + def __init__(self, normalized_shape, eps=1e-6): + super().__init__() + self.weight = nn.Parameter(torch.ones(normalized_shape)) + self.bias = nn.Parameter(torch.zeros(normalized_shape)) + self.eps = eps + self.normalized_shape = (normalized_shape,) + + def forward(self, x): + u = x.mean(1, keepdim=True) + s = (x - u).pow(2).mean(1, keepdim=True) + x = (x - u) / torch.sqrt(s + self.eps) + x = self.weight[:, None, None] * x + self.bias[:, None, None] + return x diff --git a/mmcv/layers/blocks.py b/mmcv/layers/blocks.py new file mode 100644 index 0000000..1995a4b --- /dev/null +++ b/mmcv/layers/blocks.py @@ -0,0 +1,111 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. + +import fvcore.nn.weight_init as weight_init +from torch import nn + +from .batch_norm import FrozenBatchNorm2d, get_norm +from .wrappers import Conv2d + + +""" +CNN building blocks. +""" + + +class CNNBlockBase(nn.Module): + """ + A CNN block is assumed to have input channels, output channels and a stride. + The input and output of `forward()` method must be NCHW tensors. + The method can perform arbitrary computation but must match the given + channels and stride specification. + + Attribute: + in_channels (int): + out_channels (int): + stride (int): + """ + + def __init__(self, in_channels, out_channels, stride): + """ + The `__init__` method of any subclass should also contain these arguments. + + Args: + in_channels (int): + out_channels (int): + stride (int): + """ + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.stride = stride + + def freeze(self): + """ + Make this block not trainable. + This method sets all parameters to `requires_grad=False`, + and convert all BatchNorm layers to FrozenBatchNorm + + Returns: + the block itself + """ + for p in self.parameters(): + p.requires_grad = False + FrozenBatchNorm2d.convert_frozen_batchnorm(self) + return self + + +class DepthwiseSeparableConv2d(nn.Module): + """ + A kxk depthwise convolution + a 1x1 convolution. + + In :paper:`xception`, norm & activation are applied on the second conv. + :paper:`mobilenet` uses norm & activation on both convs. + """ + + def __init__( + self, + in_channels, + out_channels, + kernel_size=3, + padding=1, + dilation=1, + *, + norm1=None, + activation1=None, + norm2=None, + activation2=None, + ): + """ + Args: + norm1, norm2 (str or callable): normalization for the two conv layers. + activation1, activation2 (callable(Tensor) -> Tensor): activation + function for the two conv layers. + """ + super().__init__() + self.depthwise = Conv2d( + in_channels, + in_channels, + kernel_size=kernel_size, + padding=padding, + dilation=dilation, + groups=in_channels, + bias=not norm1, + norm=get_norm(norm1, in_channels), + activation=activation1, + ) + self.pointwise = Conv2d( + in_channels, + out_channels, + kernel_size=1, + bias=not norm2, + norm=get_norm(norm2, out_channels), + activation=activation2, + ) + + # default initialization + weight_init.c2_msra_fill(self.depthwise) + weight_init.c2_msra_fill(self.pointwise) + + def forward(self, x): + return self.pointwise(self.depthwise(x)) diff --git a/mmcv/layers/csrc/README.md b/mmcv/layers/csrc/README.md new file mode 100644 index 0000000..778ed3d --- /dev/null +++ b/mmcv/layers/csrc/README.md @@ -0,0 +1,7 @@ + + +To add a new Op: + +1. Create a new directory +2. Implement new ops there +3. Delcare its Python interface in `vision.cpp`. diff --git a/mmcv/layers/csrc/ROIAlignRotated/ROIAlignRotated.h b/mmcv/layers/csrc/ROIAlignRotated/ROIAlignRotated.h new file mode 100644 index 0000000..03f4211 --- /dev/null +++ b/mmcv/layers/csrc/ROIAlignRotated/ROIAlignRotated.h @@ -0,0 +1,115 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +#pragma once +#include + +namespace detectron2 { + +at::Tensor ROIAlignRotated_forward_cpu( + const at::Tensor& input, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int sampling_ratio); + +at::Tensor ROIAlignRotated_backward_cpu( + const at::Tensor& grad, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int batch_size, + const int channels, + const int height, + const int width, + const int sampling_ratio); + +#if defined(WITH_CUDA) || defined(WITH_HIP) +at::Tensor ROIAlignRotated_forward_cuda( + const at::Tensor& input, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int sampling_ratio); + +at::Tensor ROIAlignRotated_backward_cuda( + const at::Tensor& grad, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int batch_size, + const int channels, + const int height, + const int width, + const int sampling_ratio); +#endif + +// Interface for Python +inline at::Tensor ROIAlignRotated_forward( + const at::Tensor& input, + const at::Tensor& rois, + const double spatial_scale, + const int64_t pooled_height, + const int64_t pooled_width, + const int64_t sampling_ratio) { + if (input.is_cuda()) { +#if defined(WITH_CUDA) || defined(WITH_HIP) + return ROIAlignRotated_forward_cuda( + input, + rois, + spatial_scale, + pooled_height, + pooled_width, + sampling_ratio); +#else + AT_ERROR("Detectron2 is not compiled with GPU support!"); +#endif + } + return ROIAlignRotated_forward_cpu( + input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio); +} + +inline at::Tensor ROIAlignRotated_backward( + const at::Tensor& grad, + const at::Tensor& rois, + const double spatial_scale, + const int64_t pooled_height, + const int64_t pooled_width, + const int64_t batch_size, + const int64_t channels, + const int64_t height, + const int64_t width, + const int64_t sampling_ratio) { + if (grad.is_cuda()) { +#if defined(WITH_CUDA) || defined(WITH_HIP) + return ROIAlignRotated_backward_cuda( + grad, + rois, + spatial_scale, + pooled_height, + pooled_width, + batch_size, + channels, + height, + width, + sampling_ratio); +#else + AT_ERROR("Detectron2 is not compiled with GPU support!"); +#endif + } + return ROIAlignRotated_backward_cpu( + grad, + rois, + spatial_scale, + pooled_height, + pooled_width, + batch_size, + channels, + height, + width, + sampling_ratio); +} + +} // namespace detectron2 diff --git a/mmcv/layers/csrc/ROIAlignRotated/ROIAlignRotated_cpu.cpp b/mmcv/layers/csrc/ROIAlignRotated/ROIAlignRotated_cpu.cpp new file mode 100644 index 0000000..2a3d305 --- /dev/null +++ b/mmcv/layers/csrc/ROIAlignRotated/ROIAlignRotated_cpu.cpp @@ -0,0 +1,522 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +#include +#include "ROIAlignRotated.h" + +// Note: this implementation originates from the Caffe2 ROIAlignRotated Op +// and PyTorch ROIAlign (non-rotated) Op implementations. +// The key difference between this implementation and those ones is +// we don't do "legacy offset" in this version, as there aren't many previous +// works, if any, using the "legacy" ROIAlignRotated Op. +// This would make the interface a bit cleaner. + +namespace detectron2 { + +namespace { +template +struct PreCalc { + int pos1; + int pos2; + int pos3; + int pos4; + T w1; + T w2; + T w3; + T w4; +}; + +template +void pre_calc_for_bilinear_interpolate( + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int iy_upper, + const int ix_upper, + T roi_start_h, + T roi_start_w, + T bin_size_h, + T bin_size_w, + int roi_bin_grid_h, + int roi_bin_grid_w, + T roi_center_h, + T roi_center_w, + T cos_theta, + T sin_theta, + std::vector>& pre_calc) { + int pre_calc_index = 0; + for (int ph = 0; ph < pooled_height; ph++) { + for (int pw = 0; pw < pooled_width; pw++) { + for (int iy = 0; iy < iy_upper; iy++) { + const T yy = roi_start_h + ph * bin_size_h + + static_cast(iy + .5f) * bin_size_h / + static_cast(roi_bin_grid_h); // e.g., 0.5, 1.5 + for (int ix = 0; ix < ix_upper; ix++) { + const T xx = roi_start_w + pw * bin_size_w + + static_cast(ix + .5f) * bin_size_w / + static_cast(roi_bin_grid_w); + + // Rotate by theta around the center and translate + // In image space, (y, x) is the order for Right Handed System, + // and this is essentially multiplying the point by a rotation matrix + // to rotate it counterclockwise through angle theta. + T y = yy * cos_theta - xx * sin_theta + roi_center_h; + T x = yy * sin_theta + xx * cos_theta + roi_center_w; + // deal with: inverse elements are out of feature map boundary + if (y < -1.0 || y > height || x < -1.0 || x > width) { + // empty + PreCalc pc; + pc.pos1 = 0; + pc.pos2 = 0; + pc.pos3 = 0; + pc.pos4 = 0; + pc.w1 = 0; + pc.w2 = 0; + pc.w3 = 0; + pc.w4 = 0; + pre_calc[pre_calc_index] = pc; + pre_calc_index += 1; + continue; + } + + if (y < 0) { + y = 0; + } + if (x < 0) { + x = 0; + } + + int y_low = (int)y; + int x_low = (int)x; + int y_high; + int x_high; + + if (y_low >= height - 1) { + y_high = y_low = height - 1; + y = (T)y_low; + } else { + y_high = y_low + 1; + } + + if (x_low >= width - 1) { + x_high = x_low = width - 1; + x = (T)x_low; + } else { + x_high = x_low + 1; + } + + T ly = y - y_low; + T lx = x - x_low; + T hy = 1. - ly, hx = 1. - lx; + T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; + + // save weights and indices + PreCalc pc; + pc.pos1 = y_low * width + x_low; + pc.pos2 = y_low * width + x_high; + pc.pos3 = y_high * width + x_low; + pc.pos4 = y_high * width + x_high; + pc.w1 = w1; + pc.w2 = w2; + pc.w3 = w3; + pc.w4 = w4; + pre_calc[pre_calc_index] = pc; + + pre_calc_index += 1; + } + } + } + } +} + +template +void bilinear_interpolate_gradient( + const int height, + const int width, + T y, + T x, + T& w1, + T& w2, + T& w3, + T& w4, + int& x_low, + int& x_high, + int& y_low, + int& y_high) { + // deal with cases that inverse elements are out of feature map boundary + if (y < -1.0 || y > height || x < -1.0 || x > width) { + // empty + w1 = w2 = w3 = w4 = 0.; + x_low = x_high = y_low = y_high = -1; + return; + } + + if (y < 0) { + y = 0; + } + + if (x < 0) { + x = 0; + } + + y_low = (int)y; + x_low = (int)x; + + if (y_low >= height - 1) { + y_high = y_low = height - 1; + y = (T)y_low; + } else { + y_high = y_low + 1; + } + + if (x_low >= width - 1) { + x_high = x_low = width - 1; + x = (T)x_low; + } else { + x_high = x_low + 1; + } + + T ly = y - y_low; + T lx = x - x_low; + T hy = 1. - ly, hx = 1. - lx; + + // reference in forward + // T v1 = input[y_low * width + x_low]; + // T v2 = input[y_low * width + x_high]; + // T v3 = input[y_high * width + x_low]; + // T v4 = input[y_high * width + x_high]; + // T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + + w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; + + return; +} + +template +inline void add(T* address, const T& val) { + *address += val; +} + +} // namespace + +template +void ROIAlignRotatedForward( + const int nthreads, + const T* input, + const T& spatial_scale, + const int channels, + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int sampling_ratio, + const T* rois, + T* output) { + int n_rois = nthreads / channels / pooled_width / pooled_height; + // (n, c, ph, pw) is an element in the pooled output + // can be parallelized using omp + // #pragma omp parallel for num_threads(32) + for (int n = 0; n < n_rois; n++) { + int index_n = n * channels * pooled_width * pooled_height; + + const T* current_roi = rois + n * 6; + int roi_batch_ind = current_roi[0]; + + // Do not use rounding; this implementation detail is critical + // ROIAlignRotated supports align == true, i.e., continuous coordinate + // by default, thus the 0.5 offset + T offset = (T)0.5; + T roi_center_w = current_roi[1] * spatial_scale - offset; + T roi_center_h = current_roi[2] * spatial_scale - offset; + T roi_width = current_roi[3] * spatial_scale; + T roi_height = current_roi[4] * spatial_scale; + T theta = current_roi[5] * M_PI / 180.0; + T cos_theta = cos(theta); + T sin_theta = sin(theta); + + AT_ASSERTM( + roi_width >= 0 && roi_height >= 0, + "ROIs in ROIAlignRotated do not have non-negative size!"); + + T bin_size_h = static_cast(roi_height) / static_cast(pooled_height); + T bin_size_w = static_cast(roi_width) / static_cast(pooled_width); + + // We use roi_bin_grid to sample the grid and mimic integral + int roi_bin_grid_h = (sampling_ratio > 0) + ? sampling_ratio + : ceil(roi_height / pooled_height); // e.g., = 2 + int roi_bin_grid_w = + (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); + + // We do average (integral) pooling inside a bin + const T count = std::max(roi_bin_grid_h * roi_bin_grid_w, 1); // e.g. = 4 + + // we want to precalculate indices and weights shared by all channels, + // this is the key point of optimization + std::vector> pre_calc( + roi_bin_grid_h * roi_bin_grid_w * pooled_width * pooled_height); + + // roi_start_h and roi_start_w are computed wrt the center of RoI (x, y). + // Appropriate translation needs to be applied after. + T roi_start_h = -roi_height / 2.0; + T roi_start_w = -roi_width / 2.0; + + pre_calc_for_bilinear_interpolate( + height, + width, + pooled_height, + pooled_width, + roi_bin_grid_h, + roi_bin_grid_w, + roi_start_h, + roi_start_w, + bin_size_h, + bin_size_w, + roi_bin_grid_h, + roi_bin_grid_w, + roi_center_h, + roi_center_w, + cos_theta, + sin_theta, + pre_calc); + + for (int c = 0; c < channels; c++) { + int index_n_c = index_n + c * pooled_width * pooled_height; + const T* offset_input = + input + (roi_batch_ind * channels + c) * height * width; + int pre_calc_index = 0; + + for (int ph = 0; ph < pooled_height; ph++) { + for (int pw = 0; pw < pooled_width; pw++) { + int index = index_n_c + ph * pooled_width + pw; + + T output_val = 0.; + for (int iy = 0; iy < roi_bin_grid_h; iy++) { + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + PreCalc pc = pre_calc[pre_calc_index]; + output_val += pc.w1 * offset_input[pc.pos1] + + pc.w2 * offset_input[pc.pos2] + + pc.w3 * offset_input[pc.pos3] + pc.w4 * offset_input[pc.pos4]; + + pre_calc_index += 1; + } + } + output_val /= count; + + output[index] = output_val; + } // for pw + } // for ph + } // for c + } // for n +} + +template +void ROIAlignRotatedBackward( + const int nthreads, + // may not be contiguous. should index using n_stride, etc + const T* grad_output, + const T& spatial_scale, + const int channels, + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int sampling_ratio, + T* grad_input, + const T* rois, + const int n_stride, + const int c_stride, + const int h_stride, + const int w_stride) { + for (int index = 0; index < nthreads; index++) { + // (n, c, ph, pw) is an element in the pooled output + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + + const T* current_roi = rois + n * 6; + int roi_batch_ind = current_roi[0]; + + // Do not use rounding; this implementation detail is critical + // ROIAlignRotated supports align == true, i.e., continuous coordinate + // by default, thus the 0.5 offset + T offset = (T)0.5; + T roi_center_w = current_roi[1] * spatial_scale - offset; + T roi_center_h = current_roi[2] * spatial_scale - offset; + T roi_width = current_roi[3] * spatial_scale; + T roi_height = current_roi[4] * spatial_scale; + T theta = current_roi[5] * M_PI / 180.0; + T cos_theta = cos(theta); + T sin_theta = sin(theta); + + AT_ASSERTM( + roi_width >= 0 && roi_height >= 0, + "ROIs in ROIAlignRotated do not have non-negative size!"); + + T bin_size_h = static_cast(roi_height) / static_cast(pooled_height); + T bin_size_w = static_cast(roi_width) / static_cast(pooled_width); + + T* offset_grad_input = + grad_input + ((roi_batch_ind * channels + c) * height * width); + + int output_offset = n * n_stride + c * c_stride; + const T* offset_grad_output = grad_output + output_offset; + const T grad_output_this_bin = + offset_grad_output[ph * h_stride + pw * w_stride]; + + // We use roi_bin_grid to sample the grid and mimic integral + int roi_bin_grid_h = (sampling_ratio > 0) + ? sampling_ratio + : ceil(roi_height / pooled_height); // e.g., = 2 + int roi_bin_grid_w = + (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); + + // roi_start_h and roi_start_w are computed wrt the center of RoI (x, y). + // Appropriate translation needs to be applied after. + T roi_start_h = -roi_height / 2.0; + T roi_start_w = -roi_width / 2.0; + + // We do average (integral) pooling inside a bin + const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 + + for (int iy = 0; iy < roi_bin_grid_h; iy++) { + const T yy = roi_start_h + ph * bin_size_h + + static_cast(iy + .5f) * bin_size_h / + static_cast(roi_bin_grid_h); // e.g., 0.5, 1.5 + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + const T xx = roi_start_w + pw * bin_size_w + + static_cast(ix + .5f) * bin_size_w / + static_cast(roi_bin_grid_w); + + // Rotate by theta around the center and translate + T y = yy * cos_theta - xx * sin_theta + roi_center_h; + T x = yy * sin_theta + xx * cos_theta + roi_center_w; + + T w1, w2, w3, w4; + int x_low, x_high, y_low, y_high; + + bilinear_interpolate_gradient( + height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high); + + T g1 = grad_output_this_bin * w1 / count; + T g2 = grad_output_this_bin * w2 / count; + T g3 = grad_output_this_bin * w3 / count; + T g4 = grad_output_this_bin * w4 / count; + + if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { + // atomic add is not needed for now since it is single threaded + add(offset_grad_input + y_low * width + x_low, static_cast(g1)); + add(offset_grad_input + y_low * width + x_high, static_cast(g2)); + add(offset_grad_input + y_high * width + x_low, static_cast(g3)); + add(offset_grad_input + y_high * width + x_high, static_cast(g4)); + } // if + } // ix + } // iy + } // for +} // ROIAlignRotatedBackward + +at::Tensor ROIAlignRotated_forward_cpu( + const at::Tensor& input, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int sampling_ratio) { + AT_ASSERTM(input.device().is_cpu(), "input must be a CPU tensor"); + AT_ASSERTM(rois.device().is_cpu(), "rois must be a CPU tensor"); + + at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; + + at::CheckedFrom c = "ROIAlign_forward_cpu"; + at::checkAllSameType(c, {input_t, rois_t}); + + auto num_rois = rois.size(0); + auto channels = input.size(1); + auto height = input.size(2); + auto width = input.size(3); + + at::Tensor output = at::zeros( + {num_rois, channels, pooled_height, pooled_width}, input.options()); + + auto output_size = num_rois * pooled_height * pooled_width * channels; + + if (output.numel() == 0) { + return output; + } + + auto input_ = input.contiguous(), rois_ = rois.contiguous(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + input.scalar_type(), "ROIAlignRotated_forward", [&] { + ROIAlignRotatedForward( + output_size, + input_.data_ptr(), + spatial_scale, + channels, + height, + width, + pooled_height, + pooled_width, + sampling_ratio, + rois_.data_ptr(), + output.data_ptr()); + }); + return output; +} + +at::Tensor ROIAlignRotated_backward_cpu( + const at::Tensor& grad, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int batch_size, + const int channels, + const int height, + const int width, + const int sampling_ratio) { + AT_ASSERTM(grad.device().is_cpu(), "grad must be a CPU tensor"); + AT_ASSERTM(rois.device().is_cpu(), "rois must be a CPU tensor"); + + at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2}; + + at::CheckedFrom c = "ROIAlignRotated_backward_cpu"; + at::checkAllSameType(c, {grad_t, rois_t}); + + at::Tensor grad_input = + at::zeros({batch_size, channels, height, width}, grad.options()); + + // handle possibly empty gradients + if (grad.numel() == 0) { + return grad_input; + } + + // get stride values to ensure indexing into gradients is correct. + int n_stride = grad.stride(0); + int c_stride = grad.stride(1); + int h_stride = grad.stride(2); + int w_stride = grad.stride(3); + + auto rois_ = rois.contiguous(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + grad.scalar_type(), "ROIAlignRotated_forward", [&] { + ROIAlignRotatedBackward( + grad.numel(), + grad.data_ptr(), + spatial_scale, + channels, + height, + width, + pooled_height, + pooled_width, + sampling_ratio, + grad_input.data_ptr(), + rois_.data_ptr(), + n_stride, + c_stride, + h_stride, + w_stride); + }); + return grad_input; +} + +} // namespace detectron2 diff --git a/mmcv/layers/csrc/ROIAlignRotated/ROIAlignRotated_cuda.cu b/mmcv/layers/csrc/ROIAlignRotated/ROIAlignRotated_cuda.cu new file mode 100644 index 0000000..fca1865 --- /dev/null +++ b/mmcv/layers/csrc/ROIAlignRotated/ROIAlignRotated_cuda.cu @@ -0,0 +1,443 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +#include +#include +#include +#include + +// TODO make it in a common file +#define CUDA_1D_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ + i += blockDim.x * gridDim.x) + +// Note: this implementation originates from the Caffe2 ROIAlignRotated Op +// and PyTorch ROIAlign (non-rotated) Op implementations. +// The key difference between this implementation and those ones is +// we don't do "legacy offset" in this version, as there aren't many previous +// works, if any, using the "legacy" ROIAlignRotated Op. +// This would make the interface a bit cleaner. + +namespace detectron2 { + +namespace { + +template +__device__ T bilinear_interpolate( + const T* input, + const int height, + const int width, + T y, + T x) { + // deal with cases that inverse elements are out of feature map boundary + if (y < -1.0 || y > height || x < -1.0 || x > width) { + // empty + return 0; + } + + if (y < 0) { + y = 0; + } + + if (x < 0) { + x = 0; + } + + int y_low = (int)y; + int x_low = (int)x; + int y_high; + int x_high; + + if (y_low >= height - 1) { + y_high = y_low = height - 1; + y = (T)y_low; + } else { + y_high = y_low + 1; + } + + if (x_low >= width - 1) { + x_high = x_low = width - 1; + x = (T)x_low; + } else { + x_high = x_low + 1; + } + + T ly = y - y_low; + T lx = x - x_low; + T hy = 1. - ly, hx = 1. - lx; + // do bilinear interpolation + T v1 = input[y_low * width + x_low]; + T v2 = input[y_low * width + x_high]; + T v3 = input[y_high * width + x_low]; + T v4 = input[y_high * width + x_high]; + T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; + + T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + + return val; +} + +template +__device__ void bilinear_interpolate_gradient( + const int height, + const int width, + T y, + T x, + T& w1, + T& w2, + T& w3, + T& w4, + int& x_low, + int& x_high, + int& y_low, + int& y_high) { + // deal with cases that inverse elements are out of feature map boundary + if (y < -1.0 || y > height || x < -1.0 || x > width) { + // empty + w1 = w2 = w3 = w4 = 0.; + x_low = x_high = y_low = y_high = -1; + return; + } + + if (y < 0) { + y = 0; + } + + if (x < 0) { + x = 0; + } + + y_low = (int)y; + x_low = (int)x; + + if (y_low >= height - 1) { + y_high = y_low = height - 1; + y = (T)y_low; + } else { + y_high = y_low + 1; + } + + if (x_low >= width - 1) { + x_high = x_low = width - 1; + x = (T)x_low; + } else { + x_high = x_low + 1; + } + + T ly = y - y_low; + T lx = x - x_low; + T hy = 1. - ly, hx = 1. - lx; + + // reference in forward + // T v1 = input[y_low * width + x_low]; + // T v2 = input[y_low * width + x_high]; + // T v3 = input[y_high * width + x_low]; + // T v4 = input[y_high * width + x_high]; + // T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + + w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; + + return; +} + +} // namespace + +template +__global__ void RoIAlignRotatedForward( + const int nthreads, + const T* input, + const T spatial_scale, + const int channels, + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int sampling_ratio, + const T* rois, + T* top_data) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + // (n, c, ph, pw) is an element in the pooled output + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + + const T* current_roi = rois + n * 6; + int roi_batch_ind = current_roi[0]; + + // Do not use rounding; this implementation detail is critical + // ROIAlignRotated supports align == true, i.e., continuous coordinate + // by default, thus the 0.5 offset + T offset = (T)0.5; + T roi_center_w = current_roi[1] * spatial_scale - offset; + T roi_center_h = current_roi[2] * spatial_scale - offset; + T roi_width = current_roi[3] * spatial_scale; + T roi_height = current_roi[4] * spatial_scale; + T theta = current_roi[5] * M_PI / 180.0; + T cos_theta = cos(theta); + T sin_theta = sin(theta); + + T bin_size_h = static_cast(roi_height) / static_cast(pooled_height); + T bin_size_w = static_cast(roi_width) / static_cast(pooled_width); + + const T* offset_input = + input + (roi_batch_ind * channels + c) * height * width; + + // We use roi_bin_grid to sample the grid and mimic integral + int roi_bin_grid_h = (sampling_ratio > 0) + ? sampling_ratio + : ceil(roi_height / pooled_height); // e.g., = 2 + int roi_bin_grid_w = + (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); + + // roi_start_h and roi_start_w are computed wrt the center of RoI (x, y). + // Appropriate translation needs to be applied after. + T roi_start_h = -roi_height / 2.0; + T roi_start_w = -roi_width / 2.0; + + // We do average (inte gral) pooling inside a bin + const T count = max(roi_bin_grid_h * roi_bin_grid_w, 1); // e.g. = 4 + + T output_val = 0.; + for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1 + { + const T yy = roi_start_h + ph * bin_size_h + + static_cast(iy + .5f) * bin_size_h / + static_cast(roi_bin_grid_h); // e.g., 0.5, 1.5 + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + const T xx = roi_start_w + pw * bin_size_w + + static_cast(ix + .5f) * bin_size_w / + static_cast(roi_bin_grid_w); + + // Rotate by theta around the center and translate + T y = yy * cos_theta - xx * sin_theta + roi_center_h; + T x = yy * sin_theta + xx * cos_theta + roi_center_w; + + T val = bilinear_interpolate(offset_input, height, width, y, x); + output_val += val; + } + } + output_val /= count; + + top_data[index] = output_val; + } +} + +template +__global__ void RoIAlignRotatedBackwardFeature( + const int nthreads, + const T* top_diff, + const int num_rois, + const T spatial_scale, + const int channels, + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int sampling_ratio, + T* bottom_diff, + const T* rois) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + // (n, c, ph, pw) is an element in the pooled output + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + + const T* current_roi = rois + n * 6; + int roi_batch_ind = current_roi[0]; + + // Do not use rounding; this implementation detail is critical + // ROIAlignRotated supports align == true, i.e., continuous coordinate + // by default, thus the 0.5 offset + T offset = (T)0.5; + T roi_center_w = current_roi[1] * spatial_scale - offset; + T roi_center_h = current_roi[2] * spatial_scale - offset; + T roi_width = current_roi[3] * spatial_scale; + T roi_height = current_roi[4] * spatial_scale; + T theta = current_roi[5] * M_PI / 180.0; + T cos_theta = cos(theta); + T sin_theta = sin(theta); + + T bin_size_h = static_cast(roi_height) / static_cast(pooled_height); + T bin_size_w = static_cast(roi_width) / static_cast(pooled_width); + + T* offset_bottom_diff = + bottom_diff + (roi_batch_ind * channels + c) * height * width; + + int top_offset = (n * channels + c) * pooled_height * pooled_width; + const T* offset_top_diff = top_diff + top_offset; + const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw]; + + // We use roi_bin_grid to sample the grid and mimic integral + int roi_bin_grid_h = (sampling_ratio > 0) + ? sampling_ratio + : ceil(roi_height / pooled_height); // e.g., = 2 + int roi_bin_grid_w = + (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); + + // roi_start_h and roi_start_w are computed wrt the center of RoI (x, y). + // Appropriate translation needs to be applied after. + T roi_start_h = -roi_height / 2.0; + T roi_start_w = -roi_width / 2.0; + + // We do average (integral) pooling inside a bin + const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 + + for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1 + { + const T yy = roi_start_h + ph * bin_size_h + + static_cast(iy + .5f) * bin_size_h / + static_cast(roi_bin_grid_h); // e.g., 0.5, 1.5 + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + const T xx = roi_start_w + pw * bin_size_w + + static_cast(ix + .5f) * bin_size_w / + static_cast(roi_bin_grid_w); + + // Rotate by theta around the center and translate + T y = yy * cos_theta - xx * sin_theta + roi_center_h; + T x = yy * sin_theta + xx * cos_theta + roi_center_w; + + T w1, w2, w3, w4; + int x_low, x_high, y_low, y_high; + + bilinear_interpolate_gradient( + height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high); + + T g1 = top_diff_this_bin * w1 / count; + T g2 = top_diff_this_bin * w2 / count; + T g3 = top_diff_this_bin * w3 / count; + T g4 = top_diff_this_bin * w4 / count; + + if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { + atomicAdd( + offset_bottom_diff + y_low * width + x_low, static_cast(g1)); + atomicAdd( + offset_bottom_diff + y_low * width + x_high, static_cast(g2)); + atomicAdd( + offset_bottom_diff + y_high * width + x_low, static_cast(g3)); + atomicAdd( + offset_bottom_diff + y_high * width + x_high, static_cast(g4)); + } // if + } // ix + } // iy + } // CUDA_1D_KERNEL_LOOP +} // RoIAlignRotatedBackward + +at::Tensor ROIAlignRotated_forward_cuda( + const at::Tensor& input, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int sampling_ratio) { + AT_ASSERTM(input.device().is_cuda(), "input must be a CUDA tensor"); + AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor"); + at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; + + at::CheckedFrom c = "ROIAlignRotated_forward_cuda"; + at::checkAllSameGPU(c, {input_t, rois_t}); + at::checkAllSameType(c, {input_t, rois_t}); + at::cuda::CUDAGuard device_guard(input.device()); + + auto num_rois = rois.size(0); + auto channels = input.size(1); + auto height = input.size(2); + auto width = input.size(3); + + auto output = at::empty( + {num_rois, channels, pooled_height, pooled_width}, input.options()); + auto output_size = num_rois * pooled_height * pooled_width * channels; + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + dim3 grid(std::min( + at::cuda::ATenCeilDiv( + static_cast(output_size), static_cast(512)), + static_cast(4096))); + dim3 block(512); + + if (output.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return output; + } + + auto input_ = input.contiguous(), rois_ = rois.contiguous(); + AT_DISPATCH_FLOATING_TYPES( + input.scalar_type(), "ROIAlignRotated_forward", [&] { + RoIAlignRotatedForward<<>>( + output_size, + input_.data_ptr(), + spatial_scale, + channels, + height, + width, + pooled_height, + pooled_width, + sampling_ratio, + rois_.data_ptr(), + output.data_ptr()); + }); + cudaDeviceSynchronize(); + AT_CUDA_CHECK(cudaGetLastError()); + return output; +} + +// TODO remove the dependency on input and use instead its sizes -> save memory +at::Tensor ROIAlignRotated_backward_cuda( + const at::Tensor& grad, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int batch_size, + const int channels, + const int height, + const int width, + const int sampling_ratio) { + AT_ASSERTM(grad.device().is_cuda(), "grad must be a CUDA tensor"); + AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor"); + + at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2}; + at::CheckedFrom c = "ROIAlign_backward_cuda"; + at::checkAllSameGPU(c, {grad_t, rois_t}); + at::checkAllSameType(c, {grad_t, rois_t}); + at::cuda::CUDAGuard device_guard(grad.device()); + + auto num_rois = rois.size(0); + auto grad_input = + at::zeros({batch_size, channels, height, width}, grad.options()); + + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + dim3 grid(std::min( + at::cuda::ATenCeilDiv( + static_cast(grad.numel()), static_cast(512)), + static_cast(4096))); + dim3 block(512); + + // handle possibly empty gradients + if (grad.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return grad_input; + } + + auto grad_ = grad.contiguous(), rois_ = rois.contiguous(); + AT_DISPATCH_FLOATING_TYPES( + grad.scalar_type(), "ROIAlignRotated_backward", [&] { + RoIAlignRotatedBackwardFeature<<>>( + grad.numel(), + grad_.data_ptr(), + num_rois, + spatial_scale, + channels, + height, + width, + pooled_height, + pooled_width, + sampling_ratio, + grad_input.data_ptr(), + rois_.data_ptr()); + }); + AT_CUDA_CHECK(cudaGetLastError()); + return grad_input; +} + +} // namespace detectron2 diff --git a/mmcv/layers/csrc/box_iou_rotated/box_iou_rotated.h b/mmcv/layers/csrc/box_iou_rotated/box_iou_rotated.h new file mode 100644 index 0000000..3bf383b --- /dev/null +++ b/mmcv/layers/csrc/box_iou_rotated/box_iou_rotated.h @@ -0,0 +1,35 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +#pragma once +#include + +namespace detectron2 { + +at::Tensor box_iou_rotated_cpu( + const at::Tensor& boxes1, + const at::Tensor& boxes2); + +#if defined(WITH_CUDA) || defined(WITH_HIP) +at::Tensor box_iou_rotated_cuda( + const at::Tensor& boxes1, + const at::Tensor& boxes2); +#endif + +// Interface for Python +// inline is needed to prevent multiple function definitions when this header is +// included by different cpps +inline at::Tensor box_iou_rotated( + const at::Tensor& boxes1, + const at::Tensor& boxes2) { + assert(boxes1.device().is_cuda() == boxes2.device().is_cuda()); + if (boxes1.device().is_cuda()) { +#if defined(WITH_CUDA) || defined(WITH_HIP) + return box_iou_rotated_cuda(boxes1.contiguous(), boxes2.contiguous()); +#else + AT_ERROR("Detectron2 is not compiled with GPU support!"); +#endif + } + + return box_iou_rotated_cpu(boxes1.contiguous(), boxes2.contiguous()); +} + +} // namespace detectron2 diff --git a/mmcv/layers/csrc/box_iou_rotated/box_iou_rotated_cpu.cpp b/mmcv/layers/csrc/box_iou_rotated/box_iou_rotated_cpu.cpp new file mode 100644 index 0000000..c843487 --- /dev/null +++ b/mmcv/layers/csrc/box_iou_rotated/box_iou_rotated_cpu.cpp @@ -0,0 +1,39 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +#include "box_iou_rotated.h" +#include "box_iou_rotated_utils.h" + +namespace detectron2 { + +template +void box_iou_rotated_cpu_kernel( + const at::Tensor& boxes1, + const at::Tensor& boxes2, + at::Tensor& ious) { + auto num_boxes1 = boxes1.size(0); + auto num_boxes2 = boxes2.size(0); + + for (int i = 0; i < num_boxes1; i++) { + for (int j = 0; j < num_boxes2; j++) { + ious[i * num_boxes2 + j] = single_box_iou_rotated( + boxes1[i].data_ptr(), boxes2[j].data_ptr()); + } + } +} + +at::Tensor box_iou_rotated_cpu( + // input must be contiguous: + const at::Tensor& boxes1, + const at::Tensor& boxes2) { + auto num_boxes1 = boxes1.size(0); + auto num_boxes2 = boxes2.size(0); + at::Tensor ious = + at::empty({num_boxes1 * num_boxes2}, boxes1.options().dtype(at::kFloat)); + + box_iou_rotated_cpu_kernel(boxes1, boxes2, ious); + + // reshape from 1d array to 2d array + auto shape = std::vector{num_boxes1, num_boxes2}; + return ious.reshape(shape); +} + +} // namespace detectron2 diff --git a/mmcv/layers/csrc/box_iou_rotated/box_iou_rotated_cuda.cu b/mmcv/layers/csrc/box_iou_rotated/box_iou_rotated_cuda.cu new file mode 100644 index 0000000..952710e --- /dev/null +++ b/mmcv/layers/csrc/box_iou_rotated/box_iou_rotated_cuda.cu @@ -0,0 +1,130 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +#include +#include +#include +#include +#include "box_iou_rotated_utils.h" + +namespace detectron2 { + +// 2D block with 32 * 16 = 512 threads per block +const int BLOCK_DIM_X = 32; +const int BLOCK_DIM_Y = 16; + +template +__global__ void box_iou_rotated_cuda_kernel( + const int n_boxes1, + const int n_boxes2, + const T* dev_boxes1, + const T* dev_boxes2, + T* dev_ious) { + const int row_start = blockIdx.x * blockDim.x; + const int col_start = blockIdx.y * blockDim.y; + + const int row_size = min(n_boxes1 - row_start, blockDim.x); + const int col_size = min(n_boxes2 - col_start, blockDim.y); + + __shared__ float block_boxes1[BLOCK_DIM_X * 5]; + __shared__ float block_boxes2[BLOCK_DIM_Y * 5]; + + // It's safe to copy using threadIdx.x since BLOCK_DIM_X >= BLOCK_DIM_Y + if (threadIdx.x < row_size && threadIdx.y == 0) { + block_boxes1[threadIdx.x * 5 + 0] = + dev_boxes1[(row_start + threadIdx.x) * 5 + 0]; + block_boxes1[threadIdx.x * 5 + 1] = + dev_boxes1[(row_start + threadIdx.x) * 5 + 1]; + block_boxes1[threadIdx.x * 5 + 2] = + dev_boxes1[(row_start + threadIdx.x) * 5 + 2]; + block_boxes1[threadIdx.x * 5 + 3] = + dev_boxes1[(row_start + threadIdx.x) * 5 + 3]; + block_boxes1[threadIdx.x * 5 + 4] = + dev_boxes1[(row_start + threadIdx.x) * 5 + 4]; + } + + if (threadIdx.x < col_size && threadIdx.y == 0) { + block_boxes2[threadIdx.x * 5 + 0] = + dev_boxes2[(col_start + threadIdx.x) * 5 + 0]; + block_boxes2[threadIdx.x * 5 + 1] = + dev_boxes2[(col_start + threadIdx.x) * 5 + 1]; + block_boxes2[threadIdx.x * 5 + 2] = + dev_boxes2[(col_start + threadIdx.x) * 5 + 2]; + block_boxes2[threadIdx.x * 5 + 3] = + dev_boxes2[(col_start + threadIdx.x) * 5 + 3]; + block_boxes2[threadIdx.x * 5 + 4] = + dev_boxes2[(col_start + threadIdx.x) * 5 + 4]; + } + __syncthreads(); + + if (threadIdx.x < row_size && threadIdx.y < col_size) { + int offset = (row_start + threadIdx.x) * n_boxes2 + col_start + threadIdx.y; + dev_ious[offset] = single_box_iou_rotated( + block_boxes1 + threadIdx.x * 5, block_boxes2 + threadIdx.y * 5); + } +} + +at::Tensor box_iou_rotated_cuda( + // input must be contiguous + const at::Tensor& boxes1, + const at::Tensor& boxes2) { + using scalar_t = float; + AT_ASSERTM( + boxes1.scalar_type() == at::kFloat, "boxes1 must be a float tensor"); + AT_ASSERTM( + boxes2.scalar_type() == at::kFloat, "boxes2 must be a float tensor"); + AT_ASSERTM(boxes1.is_cuda(), "boxes1 must be a CUDA tensor"); + AT_ASSERTM(boxes2.is_cuda(), "boxes2 must be a CUDA tensor"); + at::cuda::CUDAGuard device_guard(boxes1.device()); + + auto num_boxes1 = boxes1.size(0); + auto num_boxes2 = boxes2.size(0); + + at::Tensor ious = + at::empty({num_boxes1 * num_boxes2}, boxes1.options().dtype(at::kFloat)); + + bool transpose = false; + if (num_boxes1 > 0 && num_boxes2 > 0) { + scalar_t *data1 = boxes1.data_ptr(), + *data2 = boxes2.data_ptr(); + + if (num_boxes2 > 65535 * BLOCK_DIM_Y) { + AT_ASSERTM( + num_boxes1 <= 65535 * BLOCK_DIM_Y, + "Too many boxes for box_iou_rotated_cuda!"); + // x dim is allowed to be large, but y dim cannot, + // so we transpose the two to avoid "invalid configuration argument" + // error. We assume one of them is small. Otherwise the result is hard to + // fit in memory anyway. + std::swap(num_boxes1, num_boxes2); + std::swap(data1, data2); + transpose = true; + } + + const int blocks_x = + at::cuda::ATenCeilDiv(static_cast(num_boxes1), BLOCK_DIM_X); + const int blocks_y = + at::cuda::ATenCeilDiv(static_cast(num_boxes2), BLOCK_DIM_Y); + + dim3 blocks(blocks_x, blocks_y); + dim3 threads(BLOCK_DIM_X, BLOCK_DIM_Y); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + box_iou_rotated_cuda_kernel<<>>( + num_boxes1, + num_boxes2, + data1, + data2, + (scalar_t*)ious.data_ptr()); + + AT_CUDA_CHECK(cudaGetLastError()); + } + + // reshape from 1d array to 2d array + auto shape = std::vector{num_boxes1, num_boxes2}; + if (transpose) { + return ious.view(shape).t(); + } else { + return ious.view(shape); + } +} + +} // namespace detectron2 diff --git a/mmcv/layers/csrc/box_iou_rotated/box_iou_rotated_utils.h b/mmcv/layers/csrc/box_iou_rotated/box_iou_rotated_utils.h new file mode 100644 index 0000000..bc6967a --- /dev/null +++ b/mmcv/layers/csrc/box_iou_rotated/box_iou_rotated_utils.h @@ -0,0 +1,391 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +#pragma once + +#include +#include + +#if defined(__CUDACC__) || __HCC__ == 1 || __HIP__ == 1 +// Designates functions callable from the host (CPU) and the device (GPU) +#define HOST_DEVICE __host__ __device__ +#define HOST_DEVICE_INLINE HOST_DEVICE __forceinline__ +#else +#include +#define HOST_DEVICE +#define HOST_DEVICE_INLINE HOST_DEVICE inline +#endif + +namespace detectron2 { + +namespace { + +template +struct RotatedBox { + T x_ctr, y_ctr, w, h, a; +}; + +template +struct Point { + T x, y; + HOST_DEVICE_INLINE Point(const T& px = 0, const T& py = 0) : x(px), y(py) {} + HOST_DEVICE_INLINE Point operator+(const Point& p) const { + return Point(x + p.x, y + p.y); + } + HOST_DEVICE_INLINE Point& operator+=(const Point& p) { + x += p.x; + y += p.y; + return *this; + } + HOST_DEVICE_INLINE Point operator-(const Point& p) const { + return Point(x - p.x, y - p.y); + } + HOST_DEVICE_INLINE Point operator*(const T coeff) const { + return Point(x * coeff, y * coeff); + } +}; + +template +HOST_DEVICE_INLINE T dot_2d(const Point& A, const Point& B) { + return A.x * B.x + A.y * B.y; +} + +// R: result type. can be different from input type +template +HOST_DEVICE_INLINE R cross_2d(const Point& A, const Point& B) { + return static_cast(A.x) * static_cast(B.y) - + static_cast(B.x) * static_cast(A.y); +} + +template +HOST_DEVICE_INLINE void get_rotated_vertices( + const RotatedBox& box, + Point (&pts)[4]) { + // M_PI / 180. == 0.01745329251 + double theta = box.a * 0.01745329251; + T cosTheta2 = (T)cos(theta) * 0.5f; + T sinTheta2 = (T)sin(theta) * 0.5f; + + // y: top --> down; x: left --> right + pts[0].x = box.x_ctr + sinTheta2 * box.h + cosTheta2 * box.w; + pts[0].y = box.y_ctr + cosTheta2 * box.h - sinTheta2 * box.w; + pts[1].x = box.x_ctr - sinTheta2 * box.h + cosTheta2 * box.w; + pts[1].y = box.y_ctr - cosTheta2 * box.h - sinTheta2 * box.w; + pts[2].x = 2 * box.x_ctr - pts[0].x; + pts[2].y = 2 * box.y_ctr - pts[0].y; + pts[3].x = 2 * box.x_ctr - pts[1].x; + pts[3].y = 2 * box.y_ctr - pts[1].y; +} + +template +HOST_DEVICE_INLINE int get_intersection_points( + const Point (&pts1)[4], + const Point (&pts2)[4], + Point (&intersections)[24]) { + // Line vector + // A line from p1 to p2 is: p1 + (p2-p1)*t, t=[0,1] + Point vec1[4], vec2[4]; + for (int i = 0; i < 4; i++) { + vec1[i] = pts1[(i + 1) % 4] - pts1[i]; + vec2[i] = pts2[(i + 1) % 4] - pts2[i]; + } + + // When computing the intersection area, it doesn't hurt if we have + // more (duplicated/approximate) intersections/vertices than needed, + // while it can cause drastic difference if we miss an intersection/vertex. + // Therefore, we add an epsilon to relax the comparisons between + // the float point numbers that decide the intersection points. + double EPS = 1e-5; + + // Line test - test all line combos for intersection + int num = 0; // number of intersections + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + // Solve for 2x2 Ax=b + T det = cross_2d(vec2[j], vec1[i]); + + // This takes care of parallel lines + if (fabs(det) <= 1e-14) { + continue; + } + + auto vec12 = pts2[j] - pts1[i]; + + T t1 = cross_2d(vec2[j], vec12) / det; + T t2 = cross_2d(vec1[i], vec12) / det; + + if (t1 > -EPS && t1 < 1.0f + EPS && t2 > -EPS && t2 < 1.0f + EPS) { + intersections[num++] = pts1[i] + vec1[i] * t1; + } + } + } + + // Check for vertices of rect1 inside rect2 + { + const auto& AB = vec2[0]; + const auto& DA = vec2[3]; + auto ABdotAB = dot_2d(AB, AB); + auto ADdotAD = dot_2d(DA, DA); + for (int i = 0; i < 4; i++) { + // assume ABCD is the rectangle, and P is the point to be judged + // P is inside ABCD iff. P's projection on AB lies within AB + // and P's projection on AD lies within AD + + auto AP = pts1[i] - pts2[0]; + + auto APdotAB = dot_2d(AP, AB); + auto APdotAD = -dot_2d(AP, DA); + + if ((APdotAB > -EPS) && (APdotAD > -EPS) && (APdotAB < ABdotAB + EPS) && + (APdotAD < ADdotAD + EPS)) { + intersections[num++] = pts1[i]; + } + } + } + + // Reverse the check - check for vertices of rect2 inside rect1 + { + const auto& AB = vec1[0]; + const auto& DA = vec1[3]; + auto ABdotAB = dot_2d(AB, AB); + auto ADdotAD = dot_2d(DA, DA); + for (int i = 0; i < 4; i++) { + auto AP = pts2[i] - pts1[0]; + + auto APdotAB = dot_2d(AP, AB); + auto APdotAD = -dot_2d(AP, DA); + + if ((APdotAB > -EPS) && (APdotAD > -EPS) && (APdotAB < ABdotAB + EPS) && + (APdotAD < ADdotAD + EPS)) { + intersections[num++] = pts2[i]; + } + } + } + + return num; +} + +template +HOST_DEVICE_INLINE int convex_hull_graham( + const Point (&p)[24], + const int& num_in, + Point (&q)[24], + bool shift_to_zero = false) { + assert(num_in >= 2); + + // Step 1: + // Find point with minimum y + // if more than 1 points have the same minimum y, + // pick the one with the minimum x. + int t = 0; + for (int i = 1; i < num_in; i++) { + if (p[i].y < p[t].y || (p[i].y == p[t].y && p[i].x < p[t].x)) { + t = i; + } + } + auto& start = p[t]; // starting point + + // Step 2: + // Subtract starting point from every points (for sorting in the next step) + for (int i = 0; i < num_in; i++) { + q[i] = p[i] - start; + } + + // Swap the starting point to position 0 + auto tmp = q[0]; + q[0] = q[t]; + q[t] = tmp; + + // Step 3: + // Sort point 1 ~ num_in according to their relative cross-product values + // (essentially sorting according to angles) + // If the angles are the same, sort according to their distance to origin + T dist[24]; +#if defined(__CUDACC__) || __HCC__ == 1 || __HIP__ == 1 + // compute distance to origin before sort, and sort them together with the + // points + for (int i = 0; i < num_in; i++) { + dist[i] = dot_2d(q[i], q[i]); + } + + // CUDA version + // In the future, we can potentially use thrust + // for sorting here to improve speed (though not guaranteed) + for (int i = 1; i < num_in - 1; i++) { + for (int j = i + 1; j < num_in; j++) { + T crossProduct = cross_2d(q[i], q[j]); + if ((crossProduct < -1e-6) || + (fabs(crossProduct) < 1e-6 && dist[i] > dist[j])) { + auto q_tmp = q[i]; + q[i] = q[j]; + q[j] = q_tmp; + auto dist_tmp = dist[i]; + dist[i] = dist[j]; + dist[j] = dist_tmp; + } + } + } +#else + // CPU version + // std::sort( + // q + 1, q + num_in, [](const Point& A, const Point& B) -> bool { + // T temp = cross_2d(A, B); + + // if (fabs(temp) < 1e-6) { + // return dot_2d(A, A) < dot_2d(B, B); + // } else { + // return temp > 0; + // } + // }); + for (int i = 0; i < num_in; i++) { + dist[i] = dot_2d(q[i], q[i]); + } + + for (int i = 1; i < num_in - 1; i++) { + for (int j = i + 1; j < num_in; j++) { + T crossProduct = cross_2d(q[i], q[j]); + if ((crossProduct < -1e-6) || + (fabs(crossProduct) < 1e-6 && dist[i] > dist[j])) { + auto q_tmp = q[i]; + q[i] = q[j]; + q[j] = q_tmp; + auto dist_tmp = dist[i]; + dist[i] = dist[j]; + dist[j] = dist_tmp; + } + } + } + + // compute distance to origin after sort, since the points are now different. + for (int i = 0; i < num_in; i++) { + dist[i] = dot_2d(q[i], q[i]); + } + +#endif + + // Step 4: + // Make sure there are at least 2 points (that don't overlap with each other) + // in the stack + int k; // index of the non-overlapped second point + for (k = 1; k < num_in; k++) { + if (dist[k] > 1e-8) { + break; + } + } + if (k == num_in) { + // We reach the end, which means the convex hull is just one point + q[0] = p[t]; + return 1; + } + q[1] = q[k]; + int m = 2; // 2 points in the stack + // Step 5: + // Finally we can start the scanning process. + // When a non-convex relationship between the 3 points is found + // (either concave shape or duplicated points), + // we pop the previous point from the stack + // until the 3-point relationship is convex again, or + // until the stack only contains two points + for (int i = k + 1; i < num_in; i++) { + while (m > 1) { + auto q1 = q[i] - q[m - 2], q2 = q[m - 1] - q[m - 2]; + // cross_2d() uses FMA and therefore computes round(round(q1.x*q2.y) - + // q2.x*q1.y) So it may not return 0 even when q1==q2. Therefore we + // compare round(q1.x*q2.y) and round(q2.x*q1.y) directly. (round means + // round to nearest floating point). + if (q1.x * q2.y >= q2.x * q1.y) + m--; + else + break; + } + // Using double also helps, but float can solve the issue for now. + // while (m > 1 && cross_2d(q[i] - q[m - 2], q[m - 1] - q[m - 2]) + // >= 0) { + // m--; + // } + q[m++] = q[i]; + } + + // Step 6 (Optional): + // In general sense we need the original coordinates, so we + // need to shift the points back (reverting Step 2) + // But if we're only interested in getting the area/perimeter of the shape + // We can simply return. + if (!shift_to_zero) { + for (int i = 0; i < m; i++) { + q[i] += start; + } + } + + return m; +} + +template +HOST_DEVICE_INLINE T polygon_area(const Point (&q)[24], const int& m) { + if (m <= 2) { + return 0; + } + + T area = 0; + for (int i = 1; i < m - 1; i++) { + area += fabs(cross_2d(q[i] - q[0], q[i + 1] - q[0])); + } + + return area / 2.0; +} + +template +HOST_DEVICE_INLINE T rotated_boxes_intersection( + const RotatedBox& box1, + const RotatedBox& box2) { + // There are up to 4 x 4 + 4 + 4 = 24 intersections (including dups) returned + // from rotated_rect_intersection_pts + Point intersectPts[24], orderedPts[24]; + + Point pts1[4]; + Point pts2[4]; + get_rotated_vertices(box1, pts1); + get_rotated_vertices(box2, pts2); + + int num = get_intersection_points(pts1, pts2, intersectPts); + + if (num <= 2) { + return 0.0; + } + + // Convex Hull to order the intersection points in clockwise order and find + // the contour area. + int num_convex = convex_hull_graham(intersectPts, num, orderedPts, true); + return polygon_area(orderedPts, num_convex); +} + +} // namespace + +template +HOST_DEVICE_INLINE T +single_box_iou_rotated(T const* const box1_raw, T const* const box2_raw) { + // shift center to the middle point to achieve higher precision in result + RotatedBox box1, box2; + auto center_shift_x = (box1_raw[0] + box2_raw[0]) / 2.0; + auto center_shift_y = (box1_raw[1] + box2_raw[1]) / 2.0; + box1.x_ctr = box1_raw[0] - center_shift_x; + box1.y_ctr = box1_raw[1] - center_shift_y; + box1.w = box1_raw[2]; + box1.h = box1_raw[3]; + box1.a = box1_raw[4]; + box2.x_ctr = box2_raw[0] - center_shift_x; + box2.y_ctr = box2_raw[1] - center_shift_y; + box2.w = box2_raw[2]; + box2.h = box2_raw[3]; + box2.a = box2_raw[4]; + + T area1 = box1.w * box1.h; + T area2 = box2.w * box2.h; + if (area1 < 1e-14 || area2 < 1e-14) { + return 0.f; + } + + T intersection = rotated_boxes_intersection(box1, box2); + T iou = intersection / (area1 + area2 - intersection); + return iou; +} + +} // namespace detectron2 diff --git a/mmcv/layers/csrc/cocoeval/cocoeval.cpp b/mmcv/layers/csrc/cocoeval/cocoeval.cpp new file mode 100644 index 0000000..0a5b7b9 --- /dev/null +++ b/mmcv/layers/csrc/cocoeval/cocoeval.cpp @@ -0,0 +1,507 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +#include "cocoeval.h" +#include +#include +#include +#include + +using namespace pybind11::literals; + +namespace detectron2 { + +namespace COCOeval { + +// Sort detections from highest score to lowest, such that +// detection_instances[detection_sorted_indices[t]] >= +// detection_instances[detection_sorted_indices[t+1]]. Use stable_sort to match +// original COCO API +void SortInstancesByDetectionScore( + const std::vector& detection_instances, + std::vector* detection_sorted_indices) { + detection_sorted_indices->resize(detection_instances.size()); + std::iota( + detection_sorted_indices->begin(), detection_sorted_indices->end(), 0); + std::stable_sort( + detection_sorted_indices->begin(), + detection_sorted_indices->end(), + [&detection_instances](size_t j1, size_t j2) { + return detection_instances[j1].score > detection_instances[j2].score; + }); +} + +// Partition the ground truth objects based on whether or not to ignore them +// based on area +void SortInstancesByIgnore( + const std::array& area_range, + const std::vector& ground_truth_instances, + std::vector* ground_truth_sorted_indices, + std::vector* ignores) { + ignores->clear(); + ignores->reserve(ground_truth_instances.size()); + for (auto o : ground_truth_instances) { + ignores->push_back( + o.ignore || o.area < area_range[0] || o.area > area_range[1]); + } + + ground_truth_sorted_indices->resize(ground_truth_instances.size()); + std::iota( + ground_truth_sorted_indices->begin(), + ground_truth_sorted_indices->end(), + 0); + std::stable_sort( + ground_truth_sorted_indices->begin(), + ground_truth_sorted_indices->end(), + [&ignores](size_t j1, size_t j2) { + return (int)(*ignores)[j1] < (int)(*ignores)[j2]; + }); +} + +// For each IOU threshold, greedily match each detected instance to a ground +// truth instance (if possible) and store the results +void MatchDetectionsToGroundTruth( + const std::vector& detection_instances, + const std::vector& detection_sorted_indices, + const std::vector& ground_truth_instances, + const std::vector& ground_truth_sorted_indices, + const std::vector& ignores, + const std::vector>& ious, + const std::vector& iou_thresholds, + const std::array& area_range, + ImageEvaluation* results) { + // Initialize memory to store return data matches and ignore + const int num_iou_thresholds = iou_thresholds.size(); + const int num_ground_truth = ground_truth_sorted_indices.size(); + const int num_detections = detection_sorted_indices.size(); + std::vector ground_truth_matches( + num_iou_thresholds * num_ground_truth, 0); + std::vector& detection_matches = results->detection_matches; + std::vector& detection_ignores = results->detection_ignores; + std::vector& ground_truth_ignores = results->ground_truth_ignores; + detection_matches.resize(num_iou_thresholds * num_detections, 0); + detection_ignores.resize(num_iou_thresholds * num_detections, false); + ground_truth_ignores.resize(num_ground_truth); + for (auto g = 0; g < num_ground_truth; ++g) { + ground_truth_ignores[g] = ignores[ground_truth_sorted_indices[g]]; + } + + for (auto t = 0; t < num_iou_thresholds; ++t) { + for (auto d = 0; d < num_detections; ++d) { + // information about best match so far (match=-1 -> unmatched) + double best_iou = std::min(iou_thresholds[t], 1 - 1e-10); + int match = -1; + for (auto g = 0; g < num_ground_truth; ++g) { + // if this ground truth instance is already matched and not a + // crowd, it cannot be matched to another detection + if (ground_truth_matches[t * num_ground_truth + g] > 0 && + !ground_truth_instances[ground_truth_sorted_indices[g]].is_crowd) { + continue; + } + + // if detected instance matched to a regular ground truth + // instance, we can break on the first ground truth instance + // tagged as ignore (because they are sorted by the ignore tag) + if (match >= 0 && !ground_truth_ignores[match] && + ground_truth_ignores[g]) { + break; + } + + // if IOU overlap is the best so far, store the match appropriately + if (ious[d][ground_truth_sorted_indices[g]] >= best_iou) { + best_iou = ious[d][ground_truth_sorted_indices[g]]; + match = g; + } + } + // if match was made, store id of match for both detection and + // ground truth + if (match >= 0) { + detection_ignores[t * num_detections + d] = ground_truth_ignores[match]; + detection_matches[t * num_detections + d] = + ground_truth_instances[ground_truth_sorted_indices[match]].id; + ground_truth_matches[t * num_ground_truth + match] = + detection_instances[detection_sorted_indices[d]].id; + } + + // set unmatched detections outside of area range to ignore + const InstanceAnnotation& detection = + detection_instances[detection_sorted_indices[d]]; + detection_ignores[t * num_detections + d] = + detection_ignores[t * num_detections + d] || + (detection_matches[t * num_detections + d] == 0 && + (detection.area < area_range[0] || detection.area > area_range[1])); + } + } + + // store detection score results + results->detection_scores.resize(detection_sorted_indices.size()); + for (size_t d = 0; d < detection_sorted_indices.size(); ++d) { + results->detection_scores[d] = + detection_instances[detection_sorted_indices[d]].score; + } +} + +std::vector EvaluateImages( + const std::vector>& area_ranges, + int max_detections, + const std::vector& iou_thresholds, + const ImageCategoryInstances>& image_category_ious, + const ImageCategoryInstances& + image_category_ground_truth_instances, + const ImageCategoryInstances& + image_category_detection_instances) { + const int num_area_ranges = area_ranges.size(); + const int num_images = image_category_ground_truth_instances.size(); + const int num_categories = + image_category_ious.size() > 0 ? image_category_ious[0].size() : 0; + std::vector detection_sorted_indices; + std::vector ground_truth_sorted_indices; + std::vector ignores; + std::vector results_all( + num_images * num_area_ranges * num_categories); + + // Store results for each image, category, and area range combination. Results + // for each IOU threshold are packed into the same ImageEvaluation object + for (auto i = 0; i < num_images; ++i) { + for (auto c = 0; c < num_categories; ++c) { + const std::vector& ground_truth_instances = + image_category_ground_truth_instances[i][c]; + const std::vector& detection_instances = + image_category_detection_instances[i][c]; + + SortInstancesByDetectionScore( + detection_instances, &detection_sorted_indices); + if ((int)detection_sorted_indices.size() > max_detections) { + detection_sorted_indices.resize(max_detections); + } + + for (size_t a = 0; a < area_ranges.size(); ++a) { + SortInstancesByIgnore( + area_ranges[a], + ground_truth_instances, + &ground_truth_sorted_indices, + &ignores); + + MatchDetectionsToGroundTruth( + detection_instances, + detection_sorted_indices, + ground_truth_instances, + ground_truth_sorted_indices, + ignores, + image_category_ious[i][c], + iou_thresholds, + area_ranges[a], + &results_all + [c * num_area_ranges * num_images + a * num_images + i]); + } + } + } + + return results_all; +} + +// Convert a python list to a vector +template +std::vector list_to_vec(const py::list& l) { + std::vector v(py::len(l)); + for (int i = 0; i < (int)py::len(l); ++i) { + v[i] = l[i].cast(); + } + return v; +} + +// Helper function to Accumulate() +// Considers the evaluation results applicable to a particular category, area +// range, and max_detections parameter setting, which begin at +// evaluations[evaluation_index]. Extracts a sorted list of length n of all +// applicable detection instances concatenated across all images in the dataset, +// which are represented by the outputs evaluation_indices, detection_scores, +// image_detection_indices, and detection_sorted_indices--all of which are +// length n. evaluation_indices[i] stores the applicable index into +// evaluations[] for instance i, which has detection score detection_score[i], +// and is the image_detection_indices[i]'th of the list of detections +// for the image containing i. detection_sorted_indices[] defines a sorted +// permutation of the 3 other outputs +int BuildSortedDetectionList( + const std::vector& evaluations, + const int64_t evaluation_index, + const int64_t num_images, + const int max_detections, + std::vector* evaluation_indices, + std::vector* detection_scores, + std::vector* detection_sorted_indices, + std::vector* image_detection_indices) { + assert(evaluations.size() >= evaluation_index + num_images); + + // Extract a list of object instances of the applicable category, area + // range, and max detections requirements such that they can be sorted + image_detection_indices->clear(); + evaluation_indices->clear(); + detection_scores->clear(); + image_detection_indices->reserve(num_images * max_detections); + evaluation_indices->reserve(num_images * max_detections); + detection_scores->reserve(num_images * max_detections); + int num_valid_ground_truth = 0; + for (auto i = 0; i < num_images; ++i) { + const ImageEvaluation& evaluation = evaluations[evaluation_index + i]; + + for (int d = 0; + d < (int)evaluation.detection_scores.size() && d < max_detections; + ++d) { // detected instances + evaluation_indices->push_back(evaluation_index + i); + image_detection_indices->push_back(d); + detection_scores->push_back(evaluation.detection_scores[d]); + } + for (auto ground_truth_ignore : evaluation.ground_truth_ignores) { + if (!ground_truth_ignore) { + ++num_valid_ground_truth; + } + } + } + + // Sort detections by decreasing score, using stable sort to match + // python implementation + detection_sorted_indices->resize(detection_scores->size()); + std::iota( + detection_sorted_indices->begin(), detection_sorted_indices->end(), 0); + std::stable_sort( + detection_sorted_indices->begin(), + detection_sorted_indices->end(), + [&detection_scores](size_t j1, size_t j2) { + return (*detection_scores)[j1] > (*detection_scores)[j2]; + }); + + return num_valid_ground_truth; +} + +// Helper function to Accumulate() +// Compute a precision recall curve given a sorted list of detected instances +// encoded in evaluations, evaluation_indices, detection_scores, +// detection_sorted_indices, image_detection_indices (see +// BuildSortedDetectionList()). Using vectors precisions and recalls +// and temporary storage, output the results into precisions_out, recalls_out, +// and scores_out, which are large buffers containing many precion/recall curves +// for all possible parameter settings, with precisions_out_index and +// recalls_out_index defining the applicable indices to store results. +void ComputePrecisionRecallCurve( + const int64_t precisions_out_index, + const int64_t precisions_out_stride, + const int64_t recalls_out_index, + const std::vector& recall_thresholds, + const int iou_threshold_index, + const int num_iou_thresholds, + const int num_valid_ground_truth, + const std::vector& evaluations, + const std::vector& evaluation_indices, + const std::vector& detection_scores, + const std::vector& detection_sorted_indices, + const std::vector& image_detection_indices, + std::vector* precisions, + std::vector* recalls, + std::vector* precisions_out, + std::vector* scores_out, + std::vector* recalls_out) { + assert(recalls_out->size() > recalls_out_index); + + // Compute precision/recall for each instance in the sorted list of detections + int64_t true_positives_sum = 0, false_positives_sum = 0; + precisions->clear(); + recalls->clear(); + precisions->reserve(detection_sorted_indices.size()); + recalls->reserve(detection_sorted_indices.size()); + assert(!evaluations.empty() || detection_sorted_indices.empty()); + for (auto detection_sorted_index : detection_sorted_indices) { + const ImageEvaluation& evaluation = + evaluations[evaluation_indices[detection_sorted_index]]; + const auto num_detections = + evaluation.detection_matches.size() / num_iou_thresholds; + const auto detection_index = iou_threshold_index * num_detections + + image_detection_indices[detection_sorted_index]; + assert(evaluation.detection_matches.size() > detection_index); + assert(evaluation.detection_ignores.size() > detection_index); + const int64_t detection_match = + evaluation.detection_matches[detection_index]; + const bool detection_ignores = + evaluation.detection_ignores[detection_index]; + const auto true_positive = detection_match > 0 && !detection_ignores; + const auto false_positive = detection_match == 0 && !detection_ignores; + if (true_positive) { + ++true_positives_sum; + } + if (false_positive) { + ++false_positives_sum; + } + + const double recall = + static_cast(true_positives_sum) / num_valid_ground_truth; + recalls->push_back(recall); + const int64_t num_valid_detections = + true_positives_sum + false_positives_sum; + const double precision = num_valid_detections > 0 + ? static_cast(true_positives_sum) / num_valid_detections + : 0.0; + precisions->push_back(precision); + } + + (*recalls_out)[recalls_out_index] = !recalls->empty() ? recalls->back() : 0; + + for (int64_t i = static_cast(precisions->size()) - 1; i > 0; --i) { + if ((*precisions)[i] > (*precisions)[i - 1]) { + (*precisions)[i - 1] = (*precisions)[i]; + } + } + + // Sample the per instance precision/recall list at each recall threshold + for (size_t r = 0; r < recall_thresholds.size(); ++r) { + // first index in recalls >= recall_thresholds[r] + std::vector::iterator low = std::lower_bound( + recalls->begin(), recalls->end(), recall_thresholds[r]); + size_t precisions_index = low - recalls->begin(); + + const auto results_ind = precisions_out_index + r * precisions_out_stride; + assert(results_ind < precisions_out->size()); + assert(results_ind < scores_out->size()); + if (precisions_index < precisions->size()) { + (*precisions_out)[results_ind] = (*precisions)[precisions_index]; + (*scores_out)[results_ind] = + detection_scores[detection_sorted_indices[precisions_index]]; + } else { + (*precisions_out)[results_ind] = 0; + (*scores_out)[results_ind] = 0; + } + } +} +py::dict Accumulate( + const py::object& params, + const std::vector& evaluations) { + const std::vector recall_thresholds = + list_to_vec(params.attr("recThrs")); + const std::vector max_detections = + list_to_vec(params.attr("maxDets")); + const int num_iou_thresholds = py::len(params.attr("iouThrs")); + const int num_recall_thresholds = py::len(params.attr("recThrs")); + const int num_categories = params.attr("useCats").cast() == 1 + ? py::len(params.attr("catIds")) + : 1; + const int num_area_ranges = py::len(params.attr("areaRng")); + const int num_max_detections = py::len(params.attr("maxDets")); + const int num_images = py::len(params.attr("imgIds")); + + std::vector precisions_out( + num_iou_thresholds * num_recall_thresholds * num_categories * + num_area_ranges * num_max_detections, + -1); + std::vector recalls_out( + num_iou_thresholds * num_categories * num_area_ranges * + num_max_detections, + -1); + std::vector scores_out( + num_iou_thresholds * num_recall_thresholds * num_categories * + num_area_ranges * num_max_detections, + -1); + + // Consider the list of all detected instances in the entire dataset in one + // large list. evaluation_indices, detection_scores, + // image_detection_indices, and detection_sorted_indices all have the same + // length as this list, such that each entry corresponds to one detected + // instance + std::vector evaluation_indices; // indices into evaluations[] + std::vector detection_scores; // detection scores of each instance + std::vector detection_sorted_indices; // sorted indices of all + // instances in the dataset + std::vector + image_detection_indices; // indices into the list of detected instances in + // the same image as each instance + std::vector precisions, recalls; + + for (auto c = 0; c < num_categories; ++c) { + for (auto a = 0; a < num_area_ranges; ++a) { + for (auto m = 0; m < num_max_detections; ++m) { + // The COCO PythonAPI assumes evaluations[] (the return value of + // COCOeval::EvaluateImages() is one long list storing results for each + // combination of category, area range, and image id, with categories in + // the outermost loop and images in the innermost loop. + const int64_t evaluations_index = + c * num_area_ranges * num_images + a * num_images; + int num_valid_ground_truth = BuildSortedDetectionList( + evaluations, + evaluations_index, + num_images, + max_detections[m], + &evaluation_indices, + &detection_scores, + &detection_sorted_indices, + &image_detection_indices); + + if (num_valid_ground_truth == 0) { + continue; + } + + for (auto t = 0; t < num_iou_thresholds; ++t) { + // recalls_out is a flattened vectors representing a + // num_iou_thresholds X num_categories X num_area_ranges X + // num_max_detections matrix + const int64_t recalls_out_index = + t * num_categories * num_area_ranges * num_max_detections + + c * num_area_ranges * num_max_detections + + a * num_max_detections + m; + + // precisions_out and scores_out are flattened vectors + // representing a num_iou_thresholds X num_recall_thresholds X + // num_categories X num_area_ranges X num_max_detections matrix + const int64_t precisions_out_stride = + num_categories * num_area_ranges * num_max_detections; + const int64_t precisions_out_index = t * num_recall_thresholds * + num_categories * num_area_ranges * num_max_detections + + c * num_area_ranges * num_max_detections + + a * num_max_detections + m; + + ComputePrecisionRecallCurve( + precisions_out_index, + precisions_out_stride, + recalls_out_index, + recall_thresholds, + t, + num_iou_thresholds, + num_valid_ground_truth, + evaluations, + evaluation_indices, + detection_scores, + detection_sorted_indices, + image_detection_indices, + &precisions, + &recalls, + &precisions_out, + &scores_out, + &recalls_out); + } + } + } + } + + time_t rawtime; + struct tm local_time; + std::array buffer; + time(&rawtime); +#ifdef _WIN32 + localtime_s(&local_time, &rawtime); +#else + localtime_r(&rawtime, &local_time); +#endif + strftime( + buffer.data(), 200, "%Y-%m-%d %H:%num_max_detections:%S", &local_time); + return py::dict( + "params"_a = params, + "counts"_a = std::vector( + {num_iou_thresholds, + num_recall_thresholds, + num_categories, + num_area_ranges, + num_max_detections}), + "date"_a = buffer, + "precision"_a = precisions_out, + "recall"_a = recalls_out, + "scores"_a = scores_out); +} + +} // namespace COCOeval + +} // namespace detectron2 diff --git a/mmcv/layers/csrc/cocoeval/cocoeval.h b/mmcv/layers/csrc/cocoeval/cocoeval.h new file mode 100644 index 0000000..db246e4 --- /dev/null +++ b/mmcv/layers/csrc/cocoeval/cocoeval.h @@ -0,0 +1,88 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +#pragma once + +#include +#include +#include +#include +#include + +namespace py = pybind11; + +namespace detectron2 { + +namespace COCOeval { + +// Annotation data for a single object instance in an image +struct InstanceAnnotation { + InstanceAnnotation( + uint64_t id, + double score, + double area, + bool is_crowd, + bool ignore) + : id{id}, score{score}, area{area}, is_crowd{is_crowd}, ignore{ignore} {} + uint64_t id; + double score = 0.; + double area = 0.; + bool is_crowd = false; + bool ignore = false; +}; + +// Stores intermediate results for evaluating detection results for a single +// image that has D detected instances and G ground truth instances. This stores +// matches between detected and ground truth instances +struct ImageEvaluation { + // For each of the D detected instances, the id of the matched ground truth + // instance, or 0 if unmatched + std::vector detection_matches; + + // The detection score of each of the D detected instances + std::vector detection_scores; + + // Marks whether or not each of G instances was ignored from evaluation (e.g., + // because it's outside area_range) + std::vector ground_truth_ignores; + + // Marks whether or not each of D instances was ignored from evaluation (e.g., + // because it's outside aRng) + std::vector detection_ignores; +}; + +template +using ImageCategoryInstances = std::vector>>; + +// C++ implementation of COCO API cocoeval.py::COCOeval.evaluateImg(). For each +// combination of image, category, area range settings, and IOU thresholds to +// evaluate, it matches detected instances to ground truth instances and stores +// the results into a vector of ImageEvaluation results, which will be +// interpreted by the COCOeval::Accumulate() function to produce precion-recall +// curves. The parameters of nested vectors have the following semantics: +// image_category_ious[i][c][d][g] is the intersection over union of the d'th +// detected instance and g'th ground truth instance of +// category category_ids[c] in image image_ids[i] +// image_category_ground_truth_instances[i][c] is a vector of ground truth +// instances in image image_ids[i] of category category_ids[c] +// image_category_detection_instances[i][c] is a vector of detected +// instances in image image_ids[i] of category category_ids[c] +std::vector EvaluateImages( + const std::vector>& area_ranges, // vector of 2-tuples + int max_detections, + const std::vector& iou_thresholds, + const ImageCategoryInstances>& image_category_ious, + const ImageCategoryInstances& + image_category_ground_truth_instances, + const ImageCategoryInstances& + image_category_detection_instances); + +// C++ implementation of COCOeval.accumulate(), which generates precision +// recall curves for each set of category, IOU threshold, detection area range, +// and max number of detections parameters. It is assumed that the parameter +// evaluations is the return value of the functon COCOeval::EvaluateImages(), +// which was called with the same parameter settings params +py::dict Accumulate( + const py::object& params, + const std::vector& evalutations); + +} // namespace COCOeval +} // namespace detectron2 diff --git a/mmcv/layers/csrc/cuda_version.cu b/mmcv/layers/csrc/cuda_version.cu new file mode 100644 index 0000000..b74fdda --- /dev/null +++ b/mmcv/layers/csrc/cuda_version.cu @@ -0,0 +1,26 @@ +// Copyright (c) Facebook, Inc. and its affiliates. + +#include + +namespace detectron2 { +int get_cudart_version() { +// Not a ROCM platform: Either HIP is not used, or +// it is used, but platform is not ROCM (i.e. it is CUDA) +#if !defined(__HIP_PLATFORM_AMD__) + return CUDART_VERSION; +#else + int version = 0; + +#if HIP_VERSION_MAJOR != 0 + // Create a convention similar to that of CUDA, as assumed by other + // parts of the code. + + version = HIP_VERSION_MINOR; + version += (HIP_VERSION_MAJOR * 100); +#else + hipRuntimeGetVersion(&version); +#endif + return version; +#endif +} +} // namespace detectron2 diff --git a/mmcv/layers/csrc/deformable/deform_conv.h b/mmcv/layers/csrc/deformable/deform_conv.h new file mode 100644 index 0000000..965c1bf --- /dev/null +++ b/mmcv/layers/csrc/deformable/deform_conv.h @@ -0,0 +1,377 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +#pragma once +#include + +namespace detectron2 { + +#if defined(WITH_CUDA) || defined(WITH_HIP) +int deform_conv_forward_cuda( + at::Tensor input, + at::Tensor weight, + at::Tensor offset, + at::Tensor output, + at::Tensor columns, + at::Tensor ones, + int kW, + int kH, + int dW, + int dH, + int padW, + int padH, + int dilationW, + int dilationH, + int group, + int deformable_group, + int im2col_step); + +int deform_conv_backward_input_cuda( + at::Tensor input, + at::Tensor offset, + at::Tensor gradOutput, + at::Tensor gradInput, + at::Tensor gradOffset, + at::Tensor weight, + at::Tensor columns, + int kW, + int kH, + int dW, + int dH, + int padW, + int padH, + int dilationW, + int dilationH, + int group, + int deformable_group, + int im2col_step); + +int deform_conv_backward_parameters_cuda( + at::Tensor input, + at::Tensor offset, + at::Tensor gradOutput, + at::Tensor gradWeight, // at::Tensor gradBias, + at::Tensor columns, + at::Tensor ones, + int kW, + int kH, + int dW, + int dH, + int padW, + int padH, + int dilationW, + int dilationH, + int group, + int deformable_group, + float scale, + int im2col_step); + +void modulated_deform_conv_cuda_forward( + at::Tensor input, + at::Tensor weight, + at::Tensor bias, + at::Tensor ones, + at::Tensor offset, + at::Tensor mask, + at::Tensor output, + at::Tensor columns, + int kernel_h, + int kernel_w, + const int stride_h, + const int stride_w, + const int pad_h, + const int pad_w, + const int dilation_h, + const int dilation_w, + const int group, + const int deformable_group, + const bool with_bias); + +void modulated_deform_conv_cuda_backward( + at::Tensor input, + at::Tensor weight, + at::Tensor bias, + at::Tensor ones, + at::Tensor offset, + at::Tensor mask, + at::Tensor columns, + at::Tensor grad_input, + at::Tensor grad_weight, + at::Tensor grad_bias, + at::Tensor grad_offset, + at::Tensor grad_mask, + at::Tensor grad_output, + int kernel_h, + int kernel_w, + int stride_h, + int stride_w, + int pad_h, + int pad_w, + int dilation_h, + int dilation_w, + int group, + int deformable_group, + const bool with_bias); + +#endif + +inline int deform_conv_forward( + at::Tensor input, + at::Tensor weight, + at::Tensor offset, + at::Tensor output, + at::Tensor columns, + at::Tensor ones, + int kW, + int kH, + int dW, + int dH, + int padW, + int padH, + int dilationW, + int dilationH, + int group, + int deformable_group, + int im2col_step) { + if (input.is_cuda()) { +#if defined(WITH_CUDA) || defined(WITH_HIP) + TORCH_CHECK(weight.is_cuda(), "weight tensor is not on GPU!"); + TORCH_CHECK(offset.is_cuda(), "offset tensor is not on GPU!"); + return deform_conv_forward_cuda( + input, + weight, + offset, + output, + columns, + ones, + kW, + kH, + dW, + dH, + padW, + padH, + dilationW, + dilationH, + group, + deformable_group, + im2col_step); +#else + AT_ERROR("Detectron2 is not compiled with GPU support!"); +#endif + } + AT_ERROR("This operator is not implemented on CPU"); +} + +inline int deform_conv_backward_input( + at::Tensor input, + at::Tensor offset, + at::Tensor gradOutput, + at::Tensor gradInput, + at::Tensor gradOffset, + at::Tensor weight, + at::Tensor columns, + int kW, + int kH, + int dW, + int dH, + int padW, + int padH, + int dilationW, + int dilationH, + int group, + int deformable_group, + int im2col_step) { + if (gradOutput.is_cuda()) { +#if defined(WITH_CUDA) || defined(WITH_HIP) + TORCH_CHECK(input.is_cuda(), "input tensor is not on GPU!"); + TORCH_CHECK(weight.is_cuda(), "weight tensor is not on GPU!"); + TORCH_CHECK(offset.is_cuda(), "offset tensor is not on GPU!"); + return deform_conv_backward_input_cuda( + input, + offset, + gradOutput, + gradInput, + gradOffset, + weight, + columns, + kW, + kH, + dW, + dH, + padW, + padH, + dilationW, + dilationH, + group, + deformable_group, + im2col_step); +#else + AT_ERROR("Detectron2 is not compiled with GPU support!"); +#endif + } + AT_ERROR("This operator is not implemented on CPU"); +} + +inline int deform_conv_backward_filter( + at::Tensor input, + at::Tensor offset, + at::Tensor gradOutput, + at::Tensor gradWeight, // at::Tensor gradBias, + at::Tensor columns, + at::Tensor ones, + int kW, + int kH, + int dW, + int dH, + int padW, + int padH, + int dilationW, + int dilationH, + int group, + int deformable_group, + float scale, + int im2col_step) { + if (gradOutput.is_cuda()) { +#if defined(WITH_CUDA) || defined(WITH_HIP) + TORCH_CHECK(input.is_cuda(), "input tensor is not on GPU!"); + TORCH_CHECK(offset.is_cuda(), "offset tensor is not on GPU!"); + return deform_conv_backward_parameters_cuda( + input, + offset, + gradOutput, + gradWeight, + columns, + ones, + kW, + kH, + dW, + dH, + padW, + padH, + dilationW, + dilationH, + group, + deformable_group, + scale, + im2col_step); +#else + AT_ERROR("Detectron2 is not compiled with GPU support!"); +#endif + } + AT_ERROR("This operator is not implemented on CPU"); +} + +inline void modulated_deform_conv_forward( + at::Tensor input, + at::Tensor weight, + at::Tensor bias, + at::Tensor ones, + at::Tensor offset, + at::Tensor mask, + at::Tensor output, + at::Tensor columns, + int kernel_h, + int kernel_w, + const int stride_h, + const int stride_w, + const int pad_h, + const int pad_w, + const int dilation_h, + const int dilation_w, + const int group, + const int deformable_group, + const bool with_bias) { + if (input.is_cuda()) { +#if defined(WITH_CUDA) || defined(WITH_HIP) + TORCH_CHECK(weight.is_cuda(), "weight tensor is not on GPU!"); + TORCH_CHECK(bias.is_cuda(), "bias tensor is not on GPU!"); + TORCH_CHECK(offset.is_cuda(), "offset tensor is not on GPU!"); + return modulated_deform_conv_cuda_forward( + input, + weight, + bias, + ones, + offset, + mask, + output, + columns, + kernel_h, + kernel_w, + stride_h, + stride_w, + pad_h, + pad_w, + dilation_h, + dilation_w, + group, + deformable_group, + with_bias); +#else + AT_ERROR("Detectron2 is not compiled with GPU support!"); +#endif + } + AT_ERROR("This operator is not implemented on CPU"); +} + +inline void modulated_deform_conv_backward( + at::Tensor input, + at::Tensor weight, + at::Tensor bias, + at::Tensor ones, + at::Tensor offset, + at::Tensor mask, + at::Tensor columns, + at::Tensor grad_input, + at::Tensor grad_weight, + at::Tensor grad_bias, + at::Tensor grad_offset, + at::Tensor grad_mask, + at::Tensor grad_output, + int kernel_h, + int kernel_w, + int stride_h, + int stride_w, + int pad_h, + int pad_w, + int dilation_h, + int dilation_w, + int group, + int deformable_group, + const bool with_bias) { + if (grad_output.is_cuda()) { +#if defined(WITH_CUDA) || defined(WITH_HIP) + TORCH_CHECK(input.is_cuda(), "input tensor is not on GPU!"); + TORCH_CHECK(weight.is_cuda(), "weight tensor is not on GPU!"); + TORCH_CHECK(bias.is_cuda(), "bias tensor is not on GPU!"); + TORCH_CHECK(offset.is_cuda(), "offset tensor is not on GPU!"); + return modulated_deform_conv_cuda_backward( + input, + weight, + bias, + ones, + offset, + mask, + columns, + grad_input, + grad_weight, + grad_bias, + grad_offset, + grad_mask, + grad_output, + kernel_h, + kernel_w, + stride_h, + stride_w, + pad_h, + pad_w, + dilation_h, + dilation_w, + group, + deformable_group, + with_bias); +#else + AT_ERROR("Detectron2 is not compiled with GPU support!"); +#endif + } + AT_ERROR("This operator is not implemented on CPU"); +} + +} // namespace detectron2 diff --git a/mmcv/layers/csrc/deformable/deform_conv_cuda.cu b/mmcv/layers/csrc/deformable/deform_conv_cuda.cu new file mode 100644 index 0000000..2072bb8 --- /dev/null +++ b/mmcv/layers/csrc/deformable/deform_conv_cuda.cu @@ -0,0 +1,1223 @@ +// Copyright (c) Facebook, Inc. and its affiliates. + +// modified from +// https://github.com/open-mmlab/mmdetection/blob/master/mmdet/ops/dcn/src/deform_conv_cuda.cpp +// Original license: Apache 2.0 + +// modify from +// https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda.c +// Original license: Apache 2.0 + +#include + +#include "deform_conv.h" + +#include +#include + +namespace detectron2 { + +void deformable_im2col( + const at::Tensor data_im, + const at::Tensor data_offset, + const int channels, + const int height, + const int width, + const int ksize_h, + const int ksize_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int parallel_imgs, + const int deformable_group, + at::Tensor data_col); + +void deformable_col2im( + const at::Tensor data_col, + const at::Tensor data_offset, + const int channels, + const int height, + const int width, + const int ksize_h, + const int ksize_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int parallel_imgs, + const int deformable_group, + at::Tensor grad_im); + +void deformable_col2im_coord( + const at::Tensor data_col, + const at::Tensor data_im, + const at::Tensor data_offset, + const int channels, + const int height, + const int width, + const int ksize_h, + const int ksize_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int parallel_imgs, + const int deformable_group, + at::Tensor grad_offset); + +void modulated_deformable_im2col_cuda( + const at::Tensor data_im, + const at::Tensor data_offset, + const at::Tensor data_mask, + const int batch_size, + const int channels, + const int height_im, + const int width_im, + const int height_col, + const int width_col, + const int kernel_h, + const int kenerl_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int deformable_group, + at::Tensor data_col); + +void modulated_deformable_col2im_cuda( + const at::Tensor data_col, + const at::Tensor data_offset, + const at::Tensor data_mask, + const int batch_size, + const int channels, + const int height_im, + const int width_im, + const int height_col, + const int width_col, + const int kernel_h, + const int kenerl_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int deformable_group, + at::Tensor grad_im); + +void modulated_deformable_col2im_coord_cuda( + const at::Tensor data_col, + const at::Tensor data_im, + const at::Tensor data_offset, + const at::Tensor data_mask, + const int batch_size, + const int channels, + const int height_im, + const int width_im, + const int height_col, + const int width_col, + const int kernel_h, + const int kenerl_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int deformable_group, + at::Tensor grad_offset, + at::Tensor grad_mask); + +void shape_check( + at::Tensor input, + at::Tensor offset, + at::Tensor* gradOutput, + at::Tensor weight, + int kH, + int kW, + int dH, + int dW, + int padH, + int padW, + int dilationH, + int dilationW, + int group, + int deformable_group) { + TORCH_CHECK( + weight.ndimension() == 4, + "4D weight tensor (nOutputPlane,nInputPlane,kH,kW) expected, " + "but got: %s", + weight.ndimension()); + + TORCH_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous"); + + TORCH_CHECK( + kW > 0 && kH > 0, + "kernel size should be greater than zero, but got kH: %d kW: %d", + kH, + kW); + + TORCH_CHECK( + (weight.size(2) == kH && weight.size(3) == kW), + "kernel size should be consistent with weight, ", + "but got kH: %d kW: %d weight.size(2): %d, weight.size(3): %d", + kH, + kW, + weight.size(2), + weight.size(3)); + + TORCH_CHECK( + dW > 0 && dH > 0, + "stride should be greater than zero, but got dH: %d dW: %d", + dH, + dW); + + TORCH_CHECK( + dilationW > 0 && dilationH > 0, + "dilation should be greater than 0, but got dilationH: %d dilationW: %d", + dilationH, + dilationW); + + int ndim = input.ndimension(); + int dimf = 0; + int dimh = 1; + int dimw = 2; + + if (ndim == 4) { + dimf++; + dimh++; + dimw++; + } + + TORCH_CHECK( + ndim == 3 || ndim == 4, + "3D or 4D input tensor expected but got: %s", + ndim); + + long nInputPlane = weight.size(1) * group; + long inputHeight = input.size(dimh); + long inputWidth = input.size(dimw); + long nOutputPlane = weight.size(0); + long outputHeight = + (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; + long outputWidth = + (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; + + TORCH_CHECK( + nInputPlane % deformable_group == 0, + "input channels must divide deformable group size"); + + if (outputWidth < 1 || outputHeight < 1) + AT_ERROR( + "Given input size: (%ld x %ld x %ld). " + "Calculated output size: (%ld x %ld x %ld). Output size is too small", + nInputPlane, + inputHeight, + inputWidth, + nOutputPlane, + outputHeight, + outputWidth); + + TORCH_CHECK( + input.size(1) == nInputPlane, + "invalid number of input planes, expected: %d, but got: %d", + nInputPlane, + input.size(1)); + + TORCH_CHECK( + (inputHeight + 2 * padH >= kH && inputWidth + 2 * padW >= kW), + "input image is smaller than kernel"); + + TORCH_CHECK( + (offset.size(2) == outputHeight && offset.size(3) == outputWidth), + "invalid spatial size of offset, expected height: %d width: %d, but " + "got height: %d width: %d", + outputHeight, + outputWidth, + offset.size(2), + offset.size(3)); + + TORCH_CHECK( + (offset.size(1) == deformable_group * 2 * kH * kW), + "invalid number of channels of offset"); + + if (gradOutput != NULL) { + TORCH_CHECK( + gradOutput->size(dimf) == nOutputPlane, + "invalid number of gradOutput planes, expected: %d, but got: %d", + nOutputPlane, + gradOutput->size(dimf)); + + TORCH_CHECK( + (gradOutput->size(dimh) == outputHeight && + gradOutput->size(dimw) == outputWidth), + "invalid size of gradOutput, expected height: %d width: %d , but " + "got height: %d width: %d", + outputHeight, + outputWidth, + gradOutput->size(dimh), + gradOutput->size(dimw)); + } +} + +int deform_conv_forward_cuda( + at::Tensor input, + at::Tensor weight, + at::Tensor offset, + at::Tensor output, + at::Tensor columns, + at::Tensor ones, + int kW, + int kH, + int dW, + int dH, + int padW, + int padH, + int dilationW, + int dilationH, + int group, + int deformable_group, + int im2col_step) { + // todo: resize columns to include im2col: done + // todo: add im2col_step as input + // todo: add new output buffer and transpose it to output (or directly + // transpose output) todo: possibly change data indexing because of + // parallel_imgs + + shape_check( + input, + offset, + NULL, + weight, + kH, + kW, + dH, + dW, + padH, + padW, + dilationH, + dilationW, + group, + deformable_group); + + input = input.contiguous(); + offset = offset.contiguous(); + weight = weight.contiguous(); + + int batch = 1; + if (input.ndimension() == 3) { + // Force batch + batch = 0; + input.unsqueeze_(0); + offset.unsqueeze_(0); + } + + // todo: assert batchsize dividable by im2col_step + + long batchSize = input.size(0); + long nInputPlane = input.size(1); + long inputHeight = input.size(2); + long inputWidth = input.size(3); + + long nOutputPlane = weight.size(0); + + long outputWidth = + (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; + long outputHeight = + (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; + + TORCH_CHECK((offset.size(0) == batchSize), "invalid batch size of offset"); + + output = output.view( + {batchSize / im2col_step, + im2col_step, + nOutputPlane, + outputHeight, + outputWidth}); + columns = at::zeros( + {nInputPlane * kW * kH, im2col_step * outputHeight * outputWidth}, + input.options()); + + if (ones.ndimension() != 2 || + ones.size(0) * ones.size(1) < outputHeight * outputWidth) { + ones = at::ones({outputHeight, outputWidth}, input.options()); + } + + input = input.view( + {batchSize / im2col_step, + im2col_step, + nInputPlane, + inputHeight, + inputWidth}); + offset = offset.view( + {batchSize / im2col_step, + im2col_step, + deformable_group * 2 * kH * kW, + outputHeight, + outputWidth}); + + at::Tensor output_buffer = at::zeros( + {batchSize / im2col_step, + nOutputPlane, + im2col_step * outputHeight, + outputWidth}, + output.options()); + + output_buffer = output_buffer.view( + {output_buffer.size(0), + group, + output_buffer.size(1) / group, + output_buffer.size(2), + output_buffer.size(3)}); + + for (int elt = 0; elt < batchSize / im2col_step; elt++) { + deformable_im2col( + input[elt], + offset[elt], + nInputPlane, + inputHeight, + inputWidth, + kH, + kW, + padH, + padW, + dH, + dW, + dilationH, + dilationW, + im2col_step, + deformable_group, + columns); + + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + weight = weight.view( + {group, + weight.size(0) / group, + weight.size(1), + weight.size(2), + weight.size(3)}); + + for (int g = 0; g < group; g++) { + output_buffer[elt][g] = output_buffer[elt][g] + .flatten(1) + .addmm_(weight[g].flatten(1), columns[g]) + .view_as(output_buffer[elt][g]); + } + } + + output_buffer = output_buffer.view( + {output_buffer.size(0), + output_buffer.size(1) * output_buffer.size(2), + output_buffer.size(3), + output_buffer.size(4)}); + + output_buffer = output_buffer.view( + {batchSize / im2col_step, + nOutputPlane, + im2col_step, + outputHeight, + outputWidth}); + output_buffer.transpose_(1, 2); + output.copy_(output_buffer); + output = output.view({batchSize, nOutputPlane, outputHeight, outputWidth}); + + input = input.view({batchSize, nInputPlane, inputHeight, inputWidth}); + offset = offset.view( + {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); + + if (batch == 0) { + output = output.view({nOutputPlane, outputHeight, outputWidth}); + input = input.view({nInputPlane, inputHeight, inputWidth}); + offset = offset.view({offset.size(1), offset.size(2), offset.size(3)}); + } + + return 1; +} + +int deform_conv_backward_input_cuda( + at::Tensor input, + at::Tensor offset, + at::Tensor gradOutput, + at::Tensor gradInput, + at::Tensor gradOffset, + at::Tensor weight, + at::Tensor columns, + int kW, + int kH, + int dW, + int dH, + int padW, + int padH, + int dilationW, + int dilationH, + int group, + int deformable_group, + int im2col_step) { + shape_check( + input, + offset, + &gradOutput, + weight, + kH, + kW, + dH, + dW, + padH, + padW, + dilationH, + dilationW, + group, + deformable_group); + + input = input.contiguous(); + offset = offset.contiguous(); + gradOutput = gradOutput.contiguous(); + weight = weight.contiguous(); + + int batch = 1; + + if (input.ndimension() == 3) { + // Force batch + batch = 0; + input = input.view({1, input.size(0), input.size(1), input.size(2)}); + offset = offset.view({1, offset.size(0), offset.size(1), offset.size(2)}); + gradOutput = gradOutput.view( + {1, gradOutput.size(0), gradOutput.size(1), gradOutput.size(2)}); + } + + long batchSize = input.size(0); + long nInputPlane = input.size(1); + long inputHeight = input.size(2); + long inputWidth = input.size(3); + + long nOutputPlane = weight.size(0); + + long outputWidth = + (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; + long outputHeight = + (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; + + TORCH_CHECK((offset.size(0) == batchSize), 3, "invalid batch size of offset"); + gradInput = gradInput.view({batchSize, nInputPlane, inputHeight, inputWidth}); + columns = at::zeros( + {nInputPlane * kW * kH, im2col_step * outputHeight * outputWidth}, + input.options()); + + // change order of grad output + gradOutput = gradOutput.view( + {batchSize / im2col_step, + im2col_step, + nOutputPlane, + outputHeight, + outputWidth}); + gradOutput.transpose_(1, 2); + + gradInput = gradInput.view( + {batchSize / im2col_step, + im2col_step, + nInputPlane, + inputHeight, + inputWidth}); + input = input.view( + {batchSize / im2col_step, + im2col_step, + nInputPlane, + inputHeight, + inputWidth}); + gradOffset = gradOffset.view( + {batchSize / im2col_step, + im2col_step, + deformable_group * 2 * kH * kW, + outputHeight, + outputWidth}); + offset = offset.view( + {batchSize / im2col_step, + im2col_step, + deformable_group * 2 * kH * kW, + outputHeight, + outputWidth}); + + for (int elt = 0; elt < batchSize / im2col_step; elt++) { + // divide into groups + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + weight = weight.view( + {group, + weight.size(0) / group, + weight.size(1), + weight.size(2), + weight.size(3)}); + gradOutput = gradOutput.view( + {gradOutput.size(0), + group, + gradOutput.size(1) / group, + gradOutput.size(2), + gradOutput.size(3), + gradOutput.size(4)}); + + for (int g = 0; g < group; g++) { + columns[g] = columns[g].addmm_( + weight[g].flatten(1).transpose(0, 1), + gradOutput[elt][g].flatten(1), + 0.0f, + 1.0f); + } + + columns = + columns.view({columns.size(0) * columns.size(1), columns.size(2)}); + gradOutput = gradOutput.view( + {gradOutput.size(0), + gradOutput.size(1) * gradOutput.size(2), + gradOutput.size(3), + gradOutput.size(4), + gradOutput.size(5)}); + + deformable_col2im_coord( + columns, + input[elt], + offset[elt], + nInputPlane, + inputHeight, + inputWidth, + kH, + kW, + padH, + padW, + dH, + dW, + dilationH, + dilationW, + im2col_step, + deformable_group, + gradOffset[elt]); + + deformable_col2im( + columns, + offset[elt], + nInputPlane, + inputHeight, + inputWidth, + kH, + kW, + padH, + padW, + dH, + dW, + dilationH, + dilationW, + im2col_step, + deformable_group, + gradInput[elt]); + } + + gradOutput.transpose_(1, 2); + gradOutput = + gradOutput.view({batchSize, nOutputPlane, outputHeight, outputWidth}); + + gradInput = gradInput.view({batchSize, nInputPlane, inputHeight, inputWidth}); + input = input.view({batchSize, nInputPlane, inputHeight, inputWidth}); + gradOffset = gradOffset.view( + {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); + offset = offset.view( + {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); + + if (batch == 0) { + gradOutput = gradOutput.view({nOutputPlane, outputHeight, outputWidth}); + input = input.view({nInputPlane, inputHeight, inputWidth}); + gradInput = gradInput.view({nInputPlane, inputHeight, inputWidth}); + offset = offset.view({offset.size(1), offset.size(2), offset.size(3)}); + gradOffset = + gradOffset.view({offset.size(1), offset.size(2), offset.size(3)}); + } + + return 1; +} + +int deform_conv_backward_parameters_cuda( + at::Tensor input, + at::Tensor offset, + at::Tensor gradOutput, + at::Tensor gradWeight, // at::Tensor gradBias, + at::Tensor columns, + at::Tensor ones, + int kW, + int kH, + int dW, + int dH, + int padW, + int padH, + int dilationW, + int dilationH, + int group, + int deformable_group, + float scale, + int im2col_step) { + // todo: transpose and reshape outGrad + // todo: reshape columns + // todo: add im2col_step as input + + shape_check( + input, + offset, + &gradOutput, + gradWeight, + kH, + kW, + dH, + dW, + padH, + padW, + dilationH, + dilationW, + group, + deformable_group); + + input = input.contiguous(); + offset = offset.contiguous(); + gradOutput = gradOutput.contiguous(); + + int batch = 1; + + if (input.ndimension() == 3) { + // Force batch + batch = 0; + input = input.view( + at::IntList({1, input.size(0), input.size(1), input.size(2)})); + gradOutput = gradOutput.view( + {1, gradOutput.size(0), gradOutput.size(1), gradOutput.size(2)}); + } + + long batchSize = input.size(0); + long nInputPlane = input.size(1); + long inputHeight = input.size(2); + long inputWidth = input.size(3); + + long nOutputPlane = gradWeight.size(0); + + long outputWidth = + (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; + long outputHeight = + (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; + + TORCH_CHECK((offset.size(0) == batchSize), "invalid batch size of offset"); + + columns = at::zeros( + {nInputPlane * kW * kH, im2col_step * outputHeight * outputWidth}, + input.options()); + + gradOutput = gradOutput.view( + {batchSize / im2col_step, + im2col_step, + nOutputPlane, + outputHeight, + outputWidth}); + gradOutput.transpose_(1, 2); + + at::Tensor gradOutputBuffer = at::zeros_like(gradOutput); + gradOutputBuffer = gradOutputBuffer.view( + {batchSize / im2col_step, + nOutputPlane, + im2col_step, + outputHeight, + outputWidth}); + gradOutputBuffer.copy_(gradOutput); + // gradOutput is not contiguous, so we do reshape (instead of view) next + gradOutputBuffer = gradOutputBuffer.reshape( + {batchSize / im2col_step, + nOutputPlane, + im2col_step * outputHeight, + outputWidth}); + + gradOutput.transpose_(1, 2); + gradOutput = + gradOutput.view({batchSize, nOutputPlane, outputHeight, outputWidth}); + + input = input.view( + {batchSize / im2col_step, + im2col_step, + nInputPlane, + inputHeight, + inputWidth}); + offset = offset.view( + {batchSize / im2col_step, + im2col_step, + deformable_group * 2 * kH * kW, + outputHeight, + outputWidth}); + + for (int elt = 0; elt < batchSize / im2col_step; elt++) { + deformable_im2col( + input[elt], + offset[elt], + nInputPlane, + inputHeight, + inputWidth, + kH, + kW, + padH, + padW, + dH, + dW, + dilationH, + dilationW, + im2col_step, + deformable_group, + columns); + + // divide into group + gradOutputBuffer = gradOutputBuffer.view( + {gradOutputBuffer.size(0), + group, + gradOutputBuffer.size(1) / group, + gradOutputBuffer.size(2), + gradOutputBuffer.size(3)}); + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + gradWeight = gradWeight.view( + {group, + gradWeight.size(0) / group, + gradWeight.size(1), + gradWeight.size(2), + gradWeight.size(3)}); + + for (int g = 0; g < group; g++) { + gradWeight[g] = gradWeight[g] + .flatten(1) + .addmm_( + gradOutputBuffer[elt][g].flatten(1), + columns[g].transpose(1, 0), + 1.0, + scale) + .view_as(gradWeight[g]); + } + gradOutputBuffer = gradOutputBuffer.view( + {gradOutputBuffer.size(0), + gradOutputBuffer.size(1) * gradOutputBuffer.size(2), + gradOutputBuffer.size(3), + gradOutputBuffer.size(4)}); + columns = + columns.view({columns.size(0) * columns.size(1), columns.size(2)}); + gradWeight = gradWeight.view( + {gradWeight.size(0) * gradWeight.size(1), + gradWeight.size(2), + gradWeight.size(3), + gradWeight.size(4)}); + } + + input = input.view({batchSize, nInputPlane, inputHeight, inputWidth}); + offset = offset.view( + {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); + + if (batch == 0) { + gradOutput = gradOutput.view({nOutputPlane, outputHeight, outputWidth}); + input = input.view({nInputPlane, inputHeight, inputWidth}); + } + + return 1; +} + +void modulated_deform_conv_cuda_forward( + at::Tensor input, + at::Tensor weight, + at::Tensor bias, + at::Tensor ones, + at::Tensor offset, + at::Tensor mask, + at::Tensor output, + at::Tensor columns, + int kernel_h, + int kernel_w, + const int stride_h, + const int stride_w, + const int pad_h, + const int pad_w, + const int dilation_h, + const int dilation_w, + const int group, + const int deformable_group, + const bool with_bias) { + shape_check( + input, + offset, + NULL, + weight, + kernel_h, + kernel_w, + stride_h, + stride_w, + pad_h, + pad_w, + dilation_h, + dilation_w, + group, + deformable_group); + + TORCH_CHECK(input.is_contiguous(), "input tensor has to be contiguous"); + TORCH_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous"); + + const int batch = input.size(0); + const int channels = input.size(1); + const int height = input.size(2); + const int width = input.size(3); + + const int channels_out = weight.size(0); + const int channels_kernel = weight.size(1); + const int kernel_h_ = weight.size(2); + const int kernel_w_ = weight.size(3); + + if (kernel_h_ != kernel_h || kernel_w_ != kernel_w) + AT_ERROR( + "Input shape and kernel shape wont match: (%d x %d vs %d x %d).", + kernel_h_, + kernel_w, + kernel_h_, + kernel_w_); + if (channels != channels_kernel * group) + AT_ERROR( + "Input shape and kernel channels wont match: (%d vs %d).", + channels, + channels_kernel * group); + + const int height_out = + (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; + const int width_out = + (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; + + // mask shape check + TORCH_CHECK( + (mask.size(2) == height_out && mask.size(3) == width_out), + "invalid spatial size of mask, expected height: %d width: %d, but " + "got height: %d width: %d", + height_out, + width_out, + mask.size(2), + mask.size(3)); + + TORCH_CHECK( + (mask.size(1) == deformable_group * kernel_h * kernel_w), + "invalid number of channels of mask"); + + if (ones.ndimension() != 2 || + ones.size(0) * ones.size(1) < height_out * width_out) { + // Resize plane and fill with ones... + ones = at::ones({height_out, width_out}, input.options()); + } + + // resize output + output = output.view({batch, channels_out, height_out, width_out}).zero_(); + // resize temporary columns + columns = at::zeros( + {channels * kernel_h * kernel_w, 1 * height_out * width_out}, + input.options()); + + output = output.view( + {output.size(0), + group, + output.size(1) / group, + output.size(2), + output.size(3)}); + + for (int b = 0; b < batch; b++) { + modulated_deformable_im2col_cuda( + input[b], + offset[b], + mask[b], + 1, + channels, + height, + width, + height_out, + width_out, + kernel_h, + kernel_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + deformable_group, + columns); + + // divide into group + weight = weight.view( + {group, + weight.size(0) / group, + weight.size(1), + weight.size(2), + weight.size(3)}); + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + + for (int g = 0; g < group; g++) { + output[b][g] = output[b][g] + .flatten(1) + .addmm_(weight[g].flatten(1), columns[g]) + .view_as(output[b][g]); + } + + weight = weight.view( + {weight.size(0) * weight.size(1), + weight.size(2), + weight.size(3), + weight.size(4)}); + columns = + columns.view({columns.size(0) * columns.size(1), columns.size(2)}); + } + + output = output.view( + {output.size(0), + output.size(1) * output.size(2), + output.size(3), + output.size(4)}); + + if (with_bias) { + output += bias.view({1, bias.size(0), 1, 1}); + } +} + +void modulated_deform_conv_cuda_backward( + at::Tensor input, + at::Tensor weight, + at::Tensor bias, + at::Tensor ones, + at::Tensor offset, + at::Tensor mask, + at::Tensor columns, + at::Tensor grad_input, + at::Tensor grad_weight, + at::Tensor grad_bias, + at::Tensor grad_offset, + at::Tensor grad_mask, + at::Tensor grad_output, + int kernel_h, + int kernel_w, + int stride_h, + int stride_w, + int pad_h, + int pad_w, + int dilation_h, + int dilation_w, + int group, + int deformable_group, + const bool with_bias) { + shape_check( + input, + offset, + &grad_output, + weight, + kernel_h, + kernel_w, + stride_h, + stride_w, + pad_h, + pad_w, + dilation_h, + dilation_w, + group, + deformable_group); + + TORCH_CHECK(input.is_contiguous(), "input tensor has to be contiguous"); + TORCH_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous"); + + const int batch = input.size(0); + const int channels = input.size(1); + const int height = input.size(2); + const int width = input.size(3); + + const int channels_kernel = weight.size(1); + const int kernel_h_ = weight.size(2); + const int kernel_w_ = weight.size(3); + if (kernel_h_ != kernel_h || kernel_w_ != kernel_w) + AT_ERROR( + "Input shape and kernel shape wont match: (%d x %d vs %d x %d).", + kernel_h_, + kernel_w, + kernel_h_, + kernel_w_); + if (channels != channels_kernel * group) + AT_ERROR( + "Input shape and kernel channels wont match: (%d vs %d).", + channels, + channels_kernel * group); + + const int height_out = + (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; + const int width_out = + (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; + + // mask shape check + TORCH_CHECK( + (mask.size(2) == height_out && mask.size(3) == width_out), + "invalid spatial size of mask, expected height: %d width: %d, but " + "got height: %d width: %d", + height_out, + width_out, + mask.size(2), + mask.size(3)); + + TORCH_CHECK( + (mask.size(1) == deformable_group * kernel_h * kernel_w), + "invalid number of channels of mask"); + + if (ones.ndimension() != 2 || + ones.size(0) * ones.size(1) < height_out * width_out) { + // Resize plane and fill with ones... + ones = at::ones({height_out, width_out}, input.options()); + } + + grad_input = grad_input.view({batch, channels, height, width}); + columns = at::zeros( + {channels * kernel_h * kernel_w, height_out * width_out}, + input.options()); + + grad_output = grad_output.view( + {grad_output.size(0), + group, + grad_output.size(1) / group, + grad_output.size(2), + grad_output.size(3)}); + + for (int b = 0; b < batch; b++) { + // divide int group + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + weight = weight.view( + {group, + weight.size(0) / group, + weight.size(1), + weight.size(2), + weight.size(3)}); + + for (int g = 0; g < group; g++) { + columns[g].addmm_( + weight[g].flatten(1).transpose(0, 1), + grad_output[b][g].flatten(1), + 0.0f, + 1.0f); + } + + columns = + columns.view({columns.size(0) * columns.size(1), columns.size(2)}); + weight = weight.view( + {weight.size(0) * weight.size(1), + weight.size(2), + weight.size(3), + weight.size(4)}); + + // gradient w.r.t. input coordinate data + modulated_deformable_col2im_coord_cuda( + columns, + input[b], + offset[b], + mask[b], + 1, + channels, + height, + width, + height_out, + width_out, + kernel_h, + kernel_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + deformable_group, + grad_offset[b], + grad_mask[b]); + // gradient w.r.t. input data + modulated_deformable_col2im_cuda( + columns, + offset[b], + mask[b], + 1, + channels, + height, + width, + height_out, + width_out, + kernel_h, + kernel_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + deformable_group, + grad_input[b]); + + // gradient w.r.t. weight, dWeight should accumulate across the batch and + // group + modulated_deformable_im2col_cuda( + input[b], + offset[b], + mask[b], + 1, + channels, + height, + width, + height_out, + width_out, + kernel_h, + kernel_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + deformable_group, + columns); + + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + grad_weight = grad_weight.view( + {group, + grad_weight.size(0) / group, + grad_weight.size(1), + grad_weight.size(2), + grad_weight.size(3)}); + if (with_bias) + grad_bias = grad_bias.view({group, grad_bias.size(0) / group}); + + for (int g = 0; g < group; g++) { + grad_weight[g] = + grad_weight[g] + .flatten(1) + .addmm_(grad_output[b][g].flatten(1), columns[g].transpose(0, 1)) + .view_as(grad_weight[g]); + if (with_bias) { + grad_bias[g] = + grad_bias[g] + .view({-1, 1}) + .addmm_(grad_output[b][g].flatten(1), ones.view({-1, 1})) + .view(-1); + } + } + + columns = + columns.view({columns.size(0) * columns.size(1), columns.size(2)}); + grad_weight = grad_weight.view( + {grad_weight.size(0) * grad_weight.size(1), + grad_weight.size(2), + grad_weight.size(3), + grad_weight.size(4)}); + if (with_bias) + grad_bias = grad_bias.view({grad_bias.size(0) * grad_bias.size(1)}); + } + grad_output = grad_output.view( + {grad_output.size(0) * grad_output.size(1), + grad_output.size(2), + grad_output.size(3), + grad_output.size(4)}); +} + +} // namespace detectron2 diff --git a/mmcv/layers/csrc/deformable/deform_conv_cuda_kernel.cu b/mmcv/layers/csrc/deformable/deform_conv_cuda_kernel.cu new file mode 100644 index 0000000..f299c7a --- /dev/null +++ b/mmcv/layers/csrc/deformable/deform_conv_cuda_kernel.cu @@ -0,0 +1,1288 @@ +// Copyright (c) Facebook, Inc. and its affiliates. + +// modified from +// https://github.com/open-mmlab/mmdetection/blob/master/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu +// Original license: Apache 2.0 +// clang-format off + +// modify from +// https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu + +/*! + ******************* BEGIN Caffe Copyright Notice and Disclaimer ***************** + * + * COPYRIGHT + * + * All contributions by the University of California: + * Copyright (c) 2014-2017 The Regents of the University of California (Regents) + * All rights reserved. + * + * All other contributions: + * Copyright (c) 2014-2017, the respective contributors + * All rights reserved. + * + * Caffe uses a shared copyright model: each contributor holds copyright over + * their contributions to Caffe. The project versioning records all such + * contribution and copyright details. If a contributor wants to further mark + * their specific copyright on a particular contribution, they should indicate + * their copyright solely in the commit message of the change when it is + * committed. + * + * LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + *AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + *IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE + *FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + *DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + *SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + *CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + *OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + *OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * CONTRIBUTION AGREEMENT + * + * By contributing to the BVLC/caffe repository through pull-request, comment, + * or otherwise, the contributor releases their content to the + * license and copyright terms herein. + * + ***************** END Caffe Copyright Notice and Disclaimer ********************* + * + * Copyright (c) 2018 Microsoft + * Licensed under The MIT License [see LICENSE for details] + * \file modulated_deformable_im2col.cuh + * \brief Function definitions of converting an image to + * column matrix based on kernel, padding, dilation, and offset. + * These functions are mainly used in deformable convolution operators. + * \ref: https://arxiv.org/abs/1703.06211 + * \author Yuwen Xiong, Haozhi Qi, Jifeng Dai, Xizhou Zhu, Han Hu, Dazhi Cheng + */ + +#include +#include +#include +#include +#include +#include + +using namespace at; + +#define CUDA_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ + i += blockDim.x * gridDim.x) + + +namespace { + +const int CUDA_NUM_THREADS = 1024; +const int kMaxGridNum = 65535; + +inline int GET_BLOCKS(const int N) { + return std::min(kMaxGridNum, (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS); +} + +} + +template +__device__ scalar_t deformable_im2col_bilinear( + const scalar_t* bottom_data, + const int data_width, + const int height, + const int width, + scalar_t h, + scalar_t w) { + int h_low = floor(h); + int w_low = floor(w); + int h_high = h_low + 1; + int w_high = w_low + 1; + + scalar_t lh = h - h_low; + scalar_t lw = w - w_low; + scalar_t hh = 1 - lh, hw = 1 - lw; + + scalar_t v1 = 0; + if (h_low >= 0 && w_low >= 0) + v1 = bottom_data[h_low * data_width + w_low]; + scalar_t v2 = 0; + if (h_low >= 0 && w_high <= width - 1) + v2 = bottom_data[h_low * data_width + w_high]; + scalar_t v3 = 0; + if (h_high <= height - 1 && w_low >= 0) + v3 = bottom_data[h_high * data_width + w_low]; + scalar_t v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) + v4 = bottom_data[h_high * data_width + w_high]; + + scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + + scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + return val; +} + +template +__device__ scalar_t get_gradient_weight( + scalar_t argmax_h, + scalar_t argmax_w, + const int h, + const int w, + const int height, + const int width) { + if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || + argmax_w >= width) { + // empty + return 0; + } + + int argmax_h_low = floor(argmax_h); + int argmax_w_low = floor(argmax_w); + int argmax_h_high = argmax_h_low + 1; + int argmax_w_high = argmax_w_low + 1; + + scalar_t weight = 0; + if (h == argmax_h_low && w == argmax_w_low) + weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); + if (h == argmax_h_low && w == argmax_w_high) + weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); + if (h == argmax_h_high && w == argmax_w_low) + weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); + if (h == argmax_h_high && w == argmax_w_high) + weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); + return weight; +} + +template +__device__ scalar_t get_coordinate_weight( + scalar_t argmax_h, + scalar_t argmax_w, + const int height, + const int width, + const scalar_t* im_data, + const int data_width, + const int bp_dir) { + if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || + argmax_w >= width) { + // empty + return 0; + } + + int argmax_h_low = floor(argmax_h); + int argmax_w_low = floor(argmax_w); + int argmax_h_high = argmax_h_low + 1; + int argmax_w_high = argmax_w_low + 1; + + scalar_t weight = 0; + + if (bp_dir == 0) { + if (argmax_h_low >= 0 && argmax_w_low >= 0) + weight += -1 * (argmax_w_low + 1 - argmax_w) * + im_data[argmax_h_low * data_width + argmax_w_low]; + if (argmax_h_low >= 0 && argmax_w_high <= width - 1) + weight += -1 * (argmax_w - argmax_w_low) * + im_data[argmax_h_low * data_width + argmax_w_high]; + if (argmax_h_high <= height - 1 && argmax_w_low >= 0) + weight += (argmax_w_low + 1 - argmax_w) * + im_data[argmax_h_high * data_width + argmax_w_low]; + if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) + weight += (argmax_w - argmax_w_low) * + im_data[argmax_h_high * data_width + argmax_w_high]; + } else if (bp_dir == 1) { + if (argmax_h_low >= 0 && argmax_w_low >= 0) + weight += -1 * (argmax_h_low + 1 - argmax_h) * + im_data[argmax_h_low * data_width + argmax_w_low]; + if (argmax_h_low >= 0 && argmax_w_high <= width - 1) + weight += (argmax_h_low + 1 - argmax_h) * + im_data[argmax_h_low * data_width + argmax_w_high]; + if (argmax_h_high <= height - 1 && argmax_w_low >= 0) + weight += -1 * (argmax_h - argmax_h_low) * + im_data[argmax_h_high * data_width + argmax_w_low]; + if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) + weight += (argmax_h - argmax_h_low) * + im_data[argmax_h_high * data_width + argmax_w_high]; + } + + return weight; +} + +template +__global__ void deformable_im2col_gpu_kernel( + const int n, + const scalar_t* data_im, + const scalar_t* data_offset, + const int height, + const int width, + const int kernel_h, + const int kernel_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, + const int num_channels, + const int deformable_group, + const int height_col, + const int width_col, + scalar_t* data_col) { + CUDA_KERNEL_LOOP(index, n) { + // index index of output matrix + const int w_col = index % width_col; + const int h_col = (index / width_col) % height_col; + const int b_col = (index / width_col / height_col) % batch_size; + const int c_im = (index / width_col / height_col) / batch_size; + const int c_col = c_im * kernel_h * kernel_w; + + // compute deformable group index + const int deformable_group_index = c_im / channel_per_deformable_group; + + const int h_in = h_col * stride_h - pad_h; + const int w_in = w_col * stride_w - pad_w; + scalar_t* data_col_ptr = data_col + + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; + // const scalar_t* data_im_ptr = data_im + ((b_col * num_channels + c_im) * + // height + h_in) * width + w_in; + const scalar_t* data_im_ptr = + data_im + (b_col * num_channels + c_im) * height * width; + const scalar_t* data_offset_ptr = data_offset + + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * + kernel_w * height_col * width_col; + + for (int i = 0; i < kernel_h; ++i) { + for (int j = 0; j < kernel_w; ++j) { + const int data_offset_h_ptr = + ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; + const int data_offset_w_ptr = + ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + + w_col; + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + scalar_t val = static_cast(0); + const scalar_t h_im = h_in + i * dilation_h + offset_h; + const scalar_t w_im = w_in + j * dilation_w + offset_w; + if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { + // const scalar_t map_h = i * dilation_h + offset_h; + // const scalar_t map_w = j * dilation_w + offset_w; + // const int cur_height = height - h_in; + // const int cur_width = width - w_in; + // val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, + // cur_width, map_h, map_w); + val = deformable_im2col_bilinear( + data_im_ptr, width, height, width, h_im, w_im); + } + *data_col_ptr = val; + data_col_ptr += batch_size * height_col * width_col; + } + } + } +} + + +template +__global__ void deformable_col2im_gpu_kernel( + const int n, + const scalar_t* data_col, + const scalar_t* data_offset, + const int channels, + const int height, + const int width, + const int kernel_h, + const int kernel_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, + const int deformable_group, + const int height_col, + const int width_col, + scalar_t* grad_im) { + CUDA_KERNEL_LOOP(index, n) { + const int j = (index / width_col / height_col / batch_size) % kernel_w; + const int i = + (index / width_col / height_col / batch_size / kernel_w) % kernel_h; + const int c = + index / width_col / height_col / batch_size / kernel_w / kernel_h; + // compute the start and end of the output + + const int deformable_group_index = c / channel_per_deformable_group; + + int w_out = index % width_col; + int h_out = (index / width_col) % height_col; + int b = (index / width_col / height_col) % batch_size; + int w_in = w_out * stride_w - pad_w; + int h_in = h_out * stride_h - pad_h; + + const scalar_t* data_offset_ptr = data_offset + + (b * deformable_group + deformable_group_index) * 2 * kernel_h * + kernel_w * height_col * width_col; + const int data_offset_h_ptr = + ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; + const int data_offset_w_ptr = + ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h; + const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w; + + const scalar_t cur_top_grad = data_col[index]; + const int cur_h = (int)cur_inv_h_data; + const int cur_w = (int)cur_inv_w_data; + for (int dy = -2; dy <= 2; dy++) { + for (int dx = -2; dx <= 2; dx++) { + if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && + cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && + abs(cur_inv_w_data - (cur_w + dx)) < 1) { + int cur_bottom_grad_pos = + ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; + scalar_t weight = get_gradient_weight( + cur_inv_h_data, + cur_inv_w_data, + cur_h + dy, + cur_w + dx, + height, + width); + atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); + } + } + } + } +} + + +template +__global__ void deformable_col2im_coord_gpu_kernel( + const int n, + const scalar_t* data_col, + const scalar_t* data_im, + const scalar_t* data_offset, + const int channels, + const int height, + const int width, + const int kernel_h, + const int kernel_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, + const int offset_channels, + const int deformable_group, + const int height_col, + const int width_col, + scalar_t* grad_offset) { + CUDA_KERNEL_LOOP(index, n) { + scalar_t val = 0; + int w = index % width_col; + int h = (index / width_col) % height_col; + int c = (index / width_col / height_col) % offset_channels; + int b = (index / width_col / height_col) / offset_channels; + // compute the start and end of the output + + const int deformable_group_index = c / (2 * kernel_h * kernel_w); + const int col_step = kernel_h * kernel_w; + int cnt = 0; + const scalar_t* data_col_ptr = data_col + + deformable_group_index * channel_per_deformable_group * batch_size * + width_col * height_col; + const scalar_t* data_im_ptr = data_im + + (b * deformable_group + deformable_group_index) * + channel_per_deformable_group / kernel_h / kernel_w * height * width; + const scalar_t* data_offset_ptr = data_offset + + (b * deformable_group + deformable_group_index) * 2 * kernel_h * + kernel_w * height_col * width_col; + + const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; + + for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; + col_c += col_step) { + const int col_pos = + (((col_c * batch_size + b) * height_col) + h) * width_col + w; + const int bp_dir = offset_c % 2; + + int j = (col_pos / width_col / height_col / batch_size) % kernel_w; + int i = + (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; + int w_out = col_pos % width_col; + int h_out = (col_pos / width_col) % height_col; + int w_in = w_out * stride_w - pad_w; + int h_in = h_out * stride_h - pad_h; + const int data_offset_h_ptr = + (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); + const int data_offset_w_ptr = + (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + + w_out); + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + scalar_t inv_h = h_in + i * dilation_h + offset_h; + scalar_t inv_w = w_in + j * dilation_w + offset_w; + if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) { + inv_h = inv_w = -2; + } + const scalar_t weight = get_coordinate_weight( + inv_h, + inv_w, + height, + width, + data_im_ptr + cnt * height * width, + width, + bp_dir); + val += weight * data_col_ptr[col_pos]; + cnt += 1; + } + + grad_offset[index] = val; + } +} + + +namespace detectron2 { + +void deformable_im2col( + const at::Tensor data_im, + const at::Tensor data_offset, + const int channels, + const int height, + const int width, + const int ksize_h, + const int ksize_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int parallel_imgs, + const int deformable_group, + at::Tensor data_col) { + // num_axes should be smaller than block size + // todo: check parallel_imgs is correctly passed in + int height_col = + (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; + int width_col = + (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; + int num_kernels = channels * height_col * width_col * parallel_imgs; + int channel_per_deformable_group = channels / deformable_group; + + at::cuda::CUDAGuard device_guard(data_im.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_im.scalar_type(), "deformable_im2col_gpu", ([&] { + const scalar_t* data_im_ = data_im.data_ptr(); + const scalar_t* data_offset_ = data_offset.data_ptr(); + scalar_t* data_col_ = data_col.data_ptr(); + + deformable_im2col_gpu_kernel<<< + GET_BLOCKS(num_kernels), + CUDA_NUM_THREADS, + 0, + stream>>>( + num_kernels, + data_im_, + data_offset_, + height, + width, + ksize_h, + ksize_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + channel_per_deformable_group, + parallel_imgs, + channels, + deformable_group, + height_col, + width_col, + data_col_); + })); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) { + printf("error in deformable_im2col: %s\n", cudaGetErrorString(err)); + } +} + + +void deformable_col2im( + const at::Tensor data_col, + const at::Tensor data_offset, + const int channels, + const int height, + const int width, + const int ksize_h, + const int ksize_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int parallel_imgs, + const int deformable_group, + at::Tensor grad_im) { + // todo: make sure parallel_imgs is passed in correctly + int height_col = + (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; + int width_col = + (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; + int num_kernels = + channels * ksize_h * ksize_w * height_col * width_col * parallel_imgs; + int channel_per_deformable_group = channels / deformable_group; + + at::cuda::CUDAGuard device_guard(data_col.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_col.scalar_type(), "deformable_col2im_gpu", ([&] { + const scalar_t* data_col_ = data_col.data_ptr(); + const scalar_t* data_offset_ = data_offset.data_ptr(); + scalar_t* grad_im_ = grad_im.data_ptr(); + + deformable_col2im_gpu_kernel<<< + GET_BLOCKS(num_kernels), + CUDA_NUM_THREADS, + 0, + stream>>>( + num_kernels, + data_col_, + data_offset_, + channels, + height, + width, + ksize_h, + ksize_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + channel_per_deformable_group, + parallel_imgs, + deformable_group, + height_col, + width_col, + grad_im_); + })); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) { + printf("error in deformable_col2im: %s\n", cudaGetErrorString(err)); + } +} + + +void deformable_col2im_coord( + const at::Tensor data_col, + const at::Tensor data_im, + const at::Tensor data_offset, + const int channels, + const int height, + const int width, + const int ksize_h, + const int ksize_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int parallel_imgs, + const int deformable_group, + at::Tensor grad_offset) { + int height_col = + (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; + int width_col = + (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; + int num_kernels = height_col * width_col * 2 * ksize_h * ksize_w * + deformable_group * parallel_imgs; + int channel_per_deformable_group = + channels * ksize_h * ksize_w / deformable_group; + + at::cuda::CUDAGuard device_guard(data_col.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_col.scalar_type(), "deformable_col2im_coord_gpu", ([&] { + const scalar_t* data_col_ = data_col.data_ptr(); + const scalar_t* data_im_ = data_im.data_ptr(); + const scalar_t* data_offset_ = data_offset.data_ptr(); + scalar_t* grad_offset_ = grad_offset.data_ptr(); + + deformable_col2im_coord_gpu_kernel<<< + GET_BLOCKS(num_kernels), + CUDA_NUM_THREADS, + 0, + stream>>>( + num_kernels, + data_col_, + data_im_, + data_offset_, + channels, + height, + width, + ksize_h, + ksize_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + channel_per_deformable_group, + parallel_imgs, + 2 * ksize_h * ksize_w * deformable_group, + deformable_group, + height_col, + width_col, + grad_offset_); + })); +} + +} // namespace detectron2 + + +template +__device__ scalar_t dmcn_im2col_bilinear( + const scalar_t* bottom_data, + const int data_width, + const int height, + const int width, + scalar_t h, + scalar_t w) { + int h_low = floor(h); + int w_low = floor(w); + int h_high = h_low + 1; + int w_high = w_low + 1; + + scalar_t lh = h - h_low; + scalar_t lw = w - w_low; + scalar_t hh = 1 - lh, hw = 1 - lw; + + scalar_t v1 = 0; + if (h_low >= 0 && w_low >= 0) + v1 = bottom_data[h_low * data_width + w_low]; + scalar_t v2 = 0; + if (h_low >= 0 && w_high <= width - 1) + v2 = bottom_data[h_low * data_width + w_high]; + scalar_t v3 = 0; + if (h_high <= height - 1 && w_low >= 0) + v3 = bottom_data[h_high * data_width + w_low]; + scalar_t v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) + v4 = bottom_data[h_high * data_width + w_high]; + + scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + + scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + return val; +} + +template +__device__ scalar_t dmcn_get_gradient_weight( + scalar_t argmax_h, + scalar_t argmax_w, + const int h, + const int w, + const int height, + const int width) { + if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || + argmax_w >= width) { + // empty + return 0; + } + + int argmax_h_low = floor(argmax_h); + int argmax_w_low = floor(argmax_w); + int argmax_h_high = argmax_h_low + 1; + int argmax_w_high = argmax_w_low + 1; + + scalar_t weight = 0; + if (h == argmax_h_low && w == argmax_w_low) + weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); + if (h == argmax_h_low && w == argmax_w_high) + weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); + if (h == argmax_h_high && w == argmax_w_low) + weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); + if (h == argmax_h_high && w == argmax_w_high) + weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); + return weight; +} + +template +__device__ scalar_t dmcn_get_coordinate_weight( + scalar_t argmax_h, + scalar_t argmax_w, + const int height, + const int width, + const scalar_t* im_data, + const int data_width, + const int bp_dir) { + if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || + argmax_w >= width) { + // empty + return 0; + } + + int argmax_h_low = floor(argmax_h); + int argmax_w_low = floor(argmax_w); + int argmax_h_high = argmax_h_low + 1; + int argmax_w_high = argmax_w_low + 1; + + scalar_t weight = 0; + + if (bp_dir == 0) { + if (argmax_h_low >= 0 && argmax_w_low >= 0) + weight += -1 * (argmax_w_low + 1 - argmax_w) * + im_data[argmax_h_low * data_width + argmax_w_low]; + if (argmax_h_low >= 0 && argmax_w_high <= width - 1) + weight += -1 * (argmax_w - argmax_w_low) * + im_data[argmax_h_low * data_width + argmax_w_high]; + if (argmax_h_high <= height - 1 && argmax_w_low >= 0) + weight += (argmax_w_low + 1 - argmax_w) * + im_data[argmax_h_high * data_width + argmax_w_low]; + if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) + weight += (argmax_w - argmax_w_low) * + im_data[argmax_h_high * data_width + argmax_w_high]; + } else if (bp_dir == 1) { + if (argmax_h_low >= 0 && argmax_w_low >= 0) + weight += -1 * (argmax_h_low + 1 - argmax_h) * + im_data[argmax_h_low * data_width + argmax_w_low]; + if (argmax_h_low >= 0 && argmax_w_high <= width - 1) + weight += (argmax_h_low + 1 - argmax_h) * + im_data[argmax_h_low * data_width + argmax_w_high]; + if (argmax_h_high <= height - 1 && argmax_w_low >= 0) + weight += -1 * (argmax_h - argmax_h_low) * + im_data[argmax_h_high * data_width + argmax_w_low]; + if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) + weight += (argmax_h - argmax_h_low) * + im_data[argmax_h_high * data_width + argmax_w_high]; + } + + return weight; +} + +template +__global__ void modulated_deformable_im2col_gpu_kernel( + const int n, + const scalar_t* data_im, + const scalar_t* data_offset, + const scalar_t* data_mask, + const int height, + const int width, + const int kernel_h, + const int kernel_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, + const int num_channels, + const int deformable_group, + const int height_col, + const int width_col, + scalar_t* data_col) { + CUDA_KERNEL_LOOP(index, n) { + // index index of output matrix + const int w_col = index % width_col; + const int h_col = (index / width_col) % height_col; + const int b_col = (index / width_col / height_col) % batch_size; + const int c_im = (index / width_col / height_col) / batch_size; + const int c_col = c_im * kernel_h * kernel_w; + + // compute deformable group index + const int deformable_group_index = c_im / channel_per_deformable_group; + + const int h_in = h_col * stride_h - pad_h; + const int w_in = w_col * stride_w - pad_w; + + scalar_t* data_col_ptr = data_col + + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; + // const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) * + // height + h_in) * width + w_in; + const scalar_t* data_im_ptr = + data_im + (b_col * num_channels + c_im) * height * width; + const scalar_t* data_offset_ptr = data_offset + + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * + kernel_w * height_col * width_col; + + const scalar_t* data_mask_ptr = data_mask + + (b_col * deformable_group + deformable_group_index) * kernel_h * + kernel_w * height_col * width_col; + + for (int i = 0; i < kernel_h; ++i) { + for (int j = 0; j < kernel_w; ++j) { + const int data_offset_h_ptr = + ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; + const int data_offset_w_ptr = + ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + + w_col; + const int data_mask_hw_ptr = + ((i * kernel_w + j) * height_col + h_col) * width_col + w_col; + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; + scalar_t val = static_cast(0); + const scalar_t h_im = h_in + i * dilation_h + offset_h; + const scalar_t w_im = w_in + j * dilation_w + offset_w; + // if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { + if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { + // const float map_h = i * dilation_h + offset_h; + // const float map_w = j * dilation_w + offset_w; + // const int cur_height = height - h_in; + // const int cur_width = width - w_in; + // val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, + // cur_width, map_h, map_w); + val = dmcn_im2col_bilinear( + data_im_ptr, width, height, width, h_im, w_im); + } + *data_col_ptr = val * mask; + data_col_ptr += batch_size * height_col * width_col; + // data_col_ptr += height_col * width_col; + } + } + } +} + +template +__global__ void modulated_deformable_col2im_gpu_kernel( + const int n, + const scalar_t* data_col, + const scalar_t* data_offset, + const scalar_t* data_mask, + const int channels, + const int height, + const int width, + const int kernel_h, + const int kernel_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, + const int deformable_group, + const int height_col, + const int width_col, + scalar_t* grad_im) { + CUDA_KERNEL_LOOP(index, n) { + const int j = (index / width_col / height_col / batch_size) % kernel_w; + const int i = + (index / width_col / height_col / batch_size / kernel_w) % kernel_h; + const int c = + index / width_col / height_col / batch_size / kernel_w / kernel_h; + // compute the start and end of the output + + const int deformable_group_index = c / channel_per_deformable_group; + + int w_out = index % width_col; + int h_out = (index / width_col) % height_col; + int b = (index / width_col / height_col) % batch_size; + int w_in = w_out * stride_w - pad_w; + int h_in = h_out * stride_h - pad_h; + + const scalar_t* data_offset_ptr = data_offset + + (b * deformable_group + deformable_group_index) * 2 * kernel_h * + kernel_w * height_col * width_col; + const scalar_t* data_mask_ptr = data_mask + + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * + height_col * width_col; + const int data_offset_h_ptr = + ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; + const int data_offset_w_ptr = + ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; + const int data_mask_hw_ptr = + ((i * kernel_w + j) * height_col + h_out) * width_col + w_out; + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; + const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h; + const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w; + + const scalar_t cur_top_grad = data_col[index] * mask; + const int cur_h = (int)cur_inv_h_data; + const int cur_w = (int)cur_inv_w_data; + for (int dy = -2; dy <= 2; dy++) { + for (int dx = -2; dx <= 2; dx++) { + if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && + cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && + abs(cur_inv_w_data - (cur_w + dx)) < 1) { + int cur_bottom_grad_pos = + ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; + scalar_t weight = dmcn_get_gradient_weight( + cur_inv_h_data, + cur_inv_w_data, + cur_h + dy, + cur_w + dx, + height, + width); + atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); + } + } + } + } +} + +template +__global__ void modulated_deformable_col2im_coord_gpu_kernel( + const int n, + const scalar_t* data_col, + const scalar_t* data_im, + const scalar_t* data_offset, + const scalar_t* data_mask, + const int channels, + const int height, + const int width, + const int kernel_h, + const int kernel_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, + const int offset_channels, + const int deformable_group, + const int height_col, + const int width_col, + scalar_t* grad_offset, + scalar_t* grad_mask) { + CUDA_KERNEL_LOOP(index, n) { + scalar_t val = 0, mval = 0; + int w = index % width_col; + int h = (index / width_col) % height_col; + int c = (index / width_col / height_col) % offset_channels; + int b = (index / width_col / height_col) / offset_channels; + // compute the start and end of the output + + const int deformable_group_index = c / (2 * kernel_h * kernel_w); + const int col_step = kernel_h * kernel_w; + int cnt = 0; + const scalar_t* data_col_ptr = data_col + + deformable_group_index * channel_per_deformable_group * batch_size * + width_col * height_col; + const scalar_t* data_im_ptr = data_im + + (b * deformable_group + deformable_group_index) * + channel_per_deformable_group / kernel_h / kernel_w * height * width; + const scalar_t* data_offset_ptr = data_offset + + (b * deformable_group + deformable_group_index) * 2 * kernel_h * + kernel_w * height_col * width_col; + const scalar_t* data_mask_ptr = data_mask + + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * + height_col * width_col; + + const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; + + for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; + col_c += col_step) { + const int col_pos = + (((col_c * batch_size + b) * height_col) + h) * width_col + w; + const int bp_dir = offset_c % 2; + + int j = (col_pos / width_col / height_col / batch_size) % kernel_w; + int i = + (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; + int w_out = col_pos % width_col; + int h_out = (col_pos / width_col) % height_col; + int w_in = w_out * stride_w - pad_w; + int h_in = h_out * stride_h - pad_h; + const int data_offset_h_ptr = + (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); + const int data_offset_w_ptr = + (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + + w_out); + const int data_mask_hw_ptr = + (((i * kernel_w + j) * height_col + h_out) * width_col + w_out); + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; + scalar_t inv_h = h_in + i * dilation_h + offset_h; + scalar_t inv_w = w_in + j * dilation_w + offset_w; + if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) { + inv_h = inv_w = -2; + } else { + mval += data_col_ptr[col_pos] * + dmcn_im2col_bilinear( + data_im_ptr + cnt * height * width, + width, + height, + width, + inv_h, + inv_w); + } + const scalar_t weight = dmcn_get_coordinate_weight( + inv_h, + inv_w, + height, + width, + data_im_ptr + cnt * height * width, + width, + bp_dir); + val += weight * data_col_ptr[col_pos] * mask; + cnt += 1; + } + // KERNEL_ASSIGN(grad_offset[index], offset_req, val); + grad_offset[index] = val; + if (offset_c % 2 == 0) + // KERNEL_ASSIGN(grad_mask[(((b * deformable_group + + // deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * + // height_col + h) * width_col + w], mask_req, mval); + grad_mask + [(((b * deformable_group + deformable_group_index) * kernel_h * + kernel_w + + offset_c / 2) * + height_col + + h) * + width_col + + w] = mval; + } +} + + +namespace detectron2 { + +void modulated_deformable_im2col_cuda( + const at::Tensor data_im, + const at::Tensor data_offset, + const at::Tensor data_mask, + const int batch_size, + const int channels, + const int height_im, + const int width_im, + const int height_col, + const int width_col, + const int kernel_h, + const int kenerl_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int deformable_group, + at::Tensor data_col) { + // num_axes should be smaller than block size + const int channel_per_deformable_group = channels / deformable_group; + const int num_kernels = channels * batch_size * height_col * width_col; + + at::cuda::CUDAGuard device_guard(data_im.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_im.scalar_type(), "modulated_deformable_im2col_gpu", ([&] { + const scalar_t* data_im_ = data_im.data_ptr(); + const scalar_t* data_offset_ = data_offset.data_ptr(); + const scalar_t* data_mask_ = data_mask.data_ptr(); + scalar_t* data_col_ = data_col.data_ptr(); + + modulated_deformable_im2col_gpu_kernel<<< + GET_BLOCKS(num_kernels), + CUDA_NUM_THREADS, + 0, + stream>>>( + num_kernels, + data_im_, + data_offset_, + data_mask_, + height_im, + width_im, + kernel_h, + kenerl_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + channel_per_deformable_group, + batch_size, + channels, + deformable_group, + height_col, + width_col, + data_col_); + })); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) { + printf( + "error in modulated_deformable_im2col_cuda: %s\n", + cudaGetErrorString(err)); + } +} + +void modulated_deformable_col2im_cuda( + const at::Tensor data_col, + const at::Tensor data_offset, + const at::Tensor data_mask, + const int batch_size, + const int channels, + const int height_im, + const int width_im, + const int height_col, + const int width_col, + const int kernel_h, + const int kernel_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int deformable_group, + at::Tensor grad_im) { + const int channel_per_deformable_group = channels / deformable_group; + const int num_kernels = + channels * kernel_h * kernel_w * batch_size * height_col * width_col; + + at::cuda::CUDAGuard device_guard(data_col.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_col.scalar_type(), "modulated_deformable_col2im_gpu", ([&] { + const scalar_t* data_col_ = data_col.data_ptr(); + const scalar_t* data_offset_ = data_offset.data_ptr(); + const scalar_t* data_mask_ = data_mask.data_ptr(); + scalar_t* grad_im_ = grad_im.data_ptr(); + + modulated_deformable_col2im_gpu_kernel<<< + GET_BLOCKS(num_kernels), + CUDA_NUM_THREADS, + 0, + stream>>>( + num_kernels, + data_col_, + data_offset_, + data_mask_, + channels, + height_im, + width_im, + kernel_h, + kernel_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + channel_per_deformable_group, + batch_size, + deformable_group, + height_col, + width_col, + grad_im_); + })); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) { + printf( + "error in modulated_deformable_col2im_cuda: %s\n", + cudaGetErrorString(err)); + } +} + +void modulated_deformable_col2im_coord_cuda( + const at::Tensor data_col, + const at::Tensor data_im, + const at::Tensor data_offset, + const at::Tensor data_mask, + const int batch_size, + const int channels, + const int height_im, + const int width_im, + const int height_col, + const int width_col, + const int kernel_h, + const int kernel_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int deformable_group, + at::Tensor grad_offset, + at::Tensor grad_mask) { + const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h * + kernel_w * deformable_group; + const int channel_per_deformable_group = + channels * kernel_h * kernel_w / deformable_group; + + at::cuda::CUDAGuard device_guard(data_col.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_col.scalar_type(), "modulated_deformable_col2im_coord_gpu", ([&] { + const scalar_t* data_col_ = data_col.data_ptr(); + const scalar_t* data_im_ = data_im.data_ptr(); + const scalar_t* data_offset_ = data_offset.data_ptr(); + const scalar_t* data_mask_ = data_mask.data_ptr(); + scalar_t* grad_offset_ = grad_offset.data_ptr(); + scalar_t* grad_mask_ = grad_mask.data_ptr(); + + modulated_deformable_col2im_coord_gpu_kernel<<< + GET_BLOCKS(num_kernels), + CUDA_NUM_THREADS, + 0, + stream>>>( + num_kernels, + data_col_, + data_im_, + data_offset_, + data_mask_, + channels, + height_im, + width_im, + kernel_h, + kernel_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + channel_per_deformable_group, + batch_size, + 2 * kernel_h * kernel_w * deformable_group, + deformable_group, + height_col, + width_col, + grad_offset_, + grad_mask_); + })); + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) { + printf( + "error in modulated_deformable_col2im_coord_cuda: %s\n", + cudaGetErrorString(err)); + } +} + +} // namespace detectron2 diff --git a/mmcv/layers/csrc/nms_rotated/nms_rotated.h b/mmcv/layers/csrc/nms_rotated/nms_rotated.h new file mode 100644 index 0000000..12aca38 --- /dev/null +++ b/mmcv/layers/csrc/nms_rotated/nms_rotated.h @@ -0,0 +1,39 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +#pragma once +#include + +namespace detectron2 { + +at::Tensor nms_rotated_cpu( + const at::Tensor& dets, + const at::Tensor& scores, + const double iou_threshold); + +#if defined(WITH_CUDA) || defined(WITH_HIP) +at::Tensor nms_rotated_cuda( + const at::Tensor& dets, + const at::Tensor& scores, + const double iou_threshold); +#endif + +// Interface for Python +// inline is needed to prevent multiple function definitions when this header is +// included by different cpps +inline at::Tensor nms_rotated( + const at::Tensor& dets, + const at::Tensor& scores, + const double iou_threshold) { + assert(dets.device().is_cuda() == scores.device().is_cuda()); + if (dets.device().is_cuda()) { +#if defined(WITH_CUDA) || defined(WITH_HIP) + return nms_rotated_cuda( + dets.contiguous(), scores.contiguous(), iou_threshold); +#else + AT_ERROR("Detectron2 is not compiled with GPU support!"); +#endif + } + + return nms_rotated_cpu(dets.contiguous(), scores.contiguous(), iou_threshold); +} + +} // namespace detectron2 diff --git a/mmcv/layers/csrc/nms_rotated/nms_rotated_cpu.cpp b/mmcv/layers/csrc/nms_rotated/nms_rotated_cpu.cpp new file mode 100644 index 0000000..d7556e6 --- /dev/null +++ b/mmcv/layers/csrc/nms_rotated/nms_rotated_cpu.cpp @@ -0,0 +1,75 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +#include "../box_iou_rotated/box_iou_rotated_utils.h" +#include "nms_rotated.h" + +namespace detectron2 { + +template +at::Tensor nms_rotated_cpu_kernel( + const at::Tensor& dets, + const at::Tensor& scores, + const double iou_threshold) { + // nms_rotated_cpu_kernel is modified from torchvision's nms_cpu_kernel, + // however, the code in this function is much shorter because + // we delegate the IoU computation for rotated boxes to + // the single_box_iou_rotated function in box_iou_rotated_utils.h + AT_ASSERTM(dets.device().is_cpu(), "dets must be a CPU tensor"); + AT_ASSERTM(scores.device().is_cpu(), "scores must be a CPU tensor"); + AT_ASSERTM( + dets.scalar_type() == scores.scalar_type(), + "dets should have the same type as scores"); + + if (dets.numel() == 0) { + return at::empty({0}, dets.options().dtype(at::kLong)); + } + + auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); + + auto ndets = dets.size(0); + at::Tensor suppressed_t = at::zeros({ndets}, dets.options().dtype(at::kByte)); + at::Tensor keep_t = at::zeros({ndets}, dets.options().dtype(at::kLong)); + + auto suppressed = suppressed_t.data_ptr(); + auto keep = keep_t.data_ptr(); + auto order = order_t.data_ptr(); + + int64_t num_to_keep = 0; + + for (int64_t _i = 0; _i < ndets; _i++) { + auto i = order[_i]; + if (suppressed[i] == 1) { + continue; + } + + keep[num_to_keep++] = i; + + for (int64_t _j = _i + 1; _j < ndets; _j++) { + auto j = order[_j]; + if (suppressed[j] == 1) { + continue; + } + + auto ovr = single_box_iou_rotated( + dets[i].data_ptr(), dets[j].data_ptr()); + if (ovr >= iou_threshold) { + suppressed[j] = 1; + } + } + } + return keep_t.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep); +} + +at::Tensor nms_rotated_cpu( + // input must be contiguous + const at::Tensor& dets, + const at::Tensor& scores, + const double iou_threshold) { + auto result = at::empty({0}, dets.options()); + + AT_DISPATCH_FLOATING_TYPES(dets.scalar_type(), "nms_rotated", [&] { + result = nms_rotated_cpu_kernel(dets, scores, iou_threshold); + }); + return result; +} + +} // namespace detectron2 diff --git a/mmcv/layers/csrc/nms_rotated/nms_rotated_cuda.cu b/mmcv/layers/csrc/nms_rotated/nms_rotated_cuda.cu new file mode 100644 index 0000000..2a3db5c --- /dev/null +++ b/mmcv/layers/csrc/nms_rotated/nms_rotated_cuda.cu @@ -0,0 +1,145 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +#include +#include +#include +#include +#ifdef WITH_CUDA +#include "../box_iou_rotated/box_iou_rotated_utils.h" +#endif +// TODO avoid this when pytorch supports "same directory" hipification +#ifdef WITH_HIP +#include "box_iou_rotated/box_iou_rotated_utils.h" +#endif + +using namespace detectron2; + +namespace { +int const threadsPerBlock = sizeof(unsigned long long) * 8; +} + +template +__global__ void nms_rotated_cuda_kernel( + const int n_boxes, + const double iou_threshold, + const T* dev_boxes, + unsigned long long* dev_mask) { + // nms_rotated_cuda_kernel is modified from torchvision's nms_cuda_kernel + + const int row_start = blockIdx.y; + const int col_start = blockIdx.x; + + // if (row_start > col_start) return; + + const int row_size = + min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); + const int col_size = + min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); + + // Compared to nms_cuda_kernel, where each box is represented with 4 values + // (x1, y1, x2, y2), each rotated box is represented with 5 values + // (x_center, y_center, width, height, angle_degrees) here. + __shared__ T block_boxes[threadsPerBlock * 5]; + if (threadIdx.x < col_size) { + block_boxes[threadIdx.x * 5 + 0] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; + block_boxes[threadIdx.x * 5 + 1] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; + block_boxes[threadIdx.x * 5 + 2] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; + block_boxes[threadIdx.x * 5 + 3] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; + block_boxes[threadIdx.x * 5 + 4] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; + } + __syncthreads(); + + if (threadIdx.x < row_size) { + const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; + const T* cur_box = dev_boxes + cur_box_idx * 5; + int i = 0; + unsigned long long t = 0; + int start = 0; + if (row_start == col_start) { + start = threadIdx.x + 1; + } + for (i = start; i < col_size; i++) { + // Instead of devIoU used by original horizontal nms, here + // we use the single_box_iou_rotated function from box_iou_rotated_utils.h + if (single_box_iou_rotated(cur_box, block_boxes + i * 5) > + iou_threshold) { + t |= 1ULL << i; + } + } + const int col_blocks = at::cuda::ATenCeilDiv(n_boxes, threadsPerBlock); + dev_mask[cur_box_idx * col_blocks + col_start] = t; + } +} + +namespace detectron2 { + +at::Tensor nms_rotated_cuda( + // input must be contiguous + const at::Tensor& dets, + const at::Tensor& scores, + double iou_threshold) { + // using scalar_t = float; + AT_ASSERTM(dets.is_cuda(), "dets must be a CUDA tensor"); + AT_ASSERTM(scores.is_cuda(), "scores must be a CUDA tensor"); + at::cuda::CUDAGuard device_guard(dets.device()); + + auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); + auto dets_sorted = dets.index_select(0, order_t); + + auto dets_num = dets.size(0); + + const int col_blocks = + at::cuda::ATenCeilDiv(static_cast(dets_num), threadsPerBlock); + + at::Tensor mask = + at::empty({dets_num * col_blocks}, dets.options().dtype(at::kLong)); + + dim3 blocks(col_blocks, col_blocks); + dim3 threads(threadsPerBlock); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + AT_DISPATCH_FLOATING_TYPES( + dets_sorted.scalar_type(), "nms_rotated_kernel_cuda", [&] { + nms_rotated_cuda_kernel<<>>( + dets_num, + iou_threshold, + dets_sorted.data_ptr(), + (unsigned long long*)mask.data_ptr()); + }); + + at::Tensor mask_cpu = mask.to(at::kCPU); + unsigned long long* mask_host = + (unsigned long long*)mask_cpu.data_ptr(); + + std::vector remv(col_blocks); + memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); + + at::Tensor keep = + at::empty({dets_num}, dets.options().dtype(at::kLong).device(at::kCPU)); + int64_t* keep_out = keep.data_ptr(); + + int num_to_keep = 0; + for (int i = 0; i < dets_num; i++) { + int nblock = i / threadsPerBlock; + int inblock = i % threadsPerBlock; + + if (!(remv[nblock] & (1ULL << inblock))) { + keep_out[num_to_keep++] = i; + unsigned long long* p = mask_host + i * col_blocks; + for (int j = nblock; j < col_blocks; j++) { + remv[j] |= p[j]; + } + } + } + + AT_CUDA_CHECK(cudaGetLastError()); + return order_t.index( + {keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep) + .to(order_t.device(), keep.scalar_type())}); +} + +} // namespace detectron2 diff --git a/mmcv/layers/csrc/vision.cpp b/mmcv/layers/csrc/vision.cpp new file mode 100644 index 0000000..c9a2cd4 --- /dev/null +++ b/mmcv/layers/csrc/vision.cpp @@ -0,0 +1,117 @@ +// Copyright (c) Facebook, Inc. and its affiliates. + +#include +#include "ROIAlignRotated/ROIAlignRotated.h" +#include "box_iou_rotated/box_iou_rotated.h" +#include "cocoeval/cocoeval.h" +#include "deformable/deform_conv.h" +#include "nms_rotated/nms_rotated.h" + +namespace detectron2 { + +#if defined(WITH_CUDA) || defined(WITH_HIP) +extern int get_cudart_version(); +#endif + +std::string get_cuda_version() { +#if defined(WITH_CUDA) || defined(WITH_HIP) + std::ostringstream oss; + +#if defined(WITH_CUDA) + oss << "CUDA "; +#else + oss << "HIP "; +#endif + + // copied from + // https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/cuda/detail/CUDAHooks.cpp#L231 + auto printCudaStyleVersion = [&](int v) { + oss << (v / 1000) << "." << (v / 10 % 100); + if (v % 10 != 0) { + oss << "." << (v % 10); + } + }; + printCudaStyleVersion(get_cudart_version()); + return oss.str(); +#else // neither CUDA nor HIP + return std::string("not available"); +#endif +} + +bool has_cuda() { +#if defined(WITH_CUDA) + return true; +#else + return false; +#endif +} + +// similar to +// https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/Version.cpp +std::string get_compiler_version() { + std::ostringstream ss; +#if defined(__GNUC__) +#ifndef __clang__ + +#if ((__GNUC__ <= 4) && (__GNUC_MINOR__ <= 8)) +#error "GCC >= 4.9 is required!" +#endif + + { ss << "GCC " << __GNUC__ << "." << __GNUC_MINOR__; } +#endif +#endif + +#if defined(__clang_major__) + { + ss << "clang " << __clang_major__ << "." << __clang_minor__ << "." + << __clang_patchlevel__; + } +#endif + +#if defined(_MSC_VER) + { ss << "MSVC " << _MSC_FULL_VER; } +#endif + return ss.str(); +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("get_compiler_version", &get_compiler_version, "get_compiler_version"); + m.def("get_cuda_version", &get_cuda_version, "get_cuda_version"); + m.def("has_cuda", &has_cuda, "has_cuda"); + + m.def("deform_conv_forward", &deform_conv_forward, "deform_conv_forward"); + m.def( + "deform_conv_backward_input", + &deform_conv_backward_input, + "deform_conv_backward_input"); + m.def( + "deform_conv_backward_filter", + &deform_conv_backward_filter, + "deform_conv_backward_filter"); + m.def( + "modulated_deform_conv_forward", + &modulated_deform_conv_forward, + "modulated_deform_conv_forward"); + m.def( + "modulated_deform_conv_backward", + &modulated_deform_conv_backward, + "modulated_deform_conv_backward"); + + m.def("COCOevalAccumulate", &COCOeval::Accumulate, "COCOeval::Accumulate"); + m.def( + "COCOevalEvaluateImages", + &COCOeval::EvaluateImages, + "COCOeval::EvaluateImages"); + pybind11::class_(m, "InstanceAnnotation") + .def(pybind11::init()); + pybind11::class_(m, "ImageEvaluation") + .def(pybind11::init<>()); +} + +TORCH_LIBRARY(detectron2, m) { + m.def("nms_rotated", &nms_rotated); + m.def("box_iou_rotated", &box_iou_rotated); + m.def("roi_align_rotated_forward", &ROIAlignRotated_forward); + m.def("roi_align_rotated_backward", &ROIAlignRotated_backward); +} +} // namespace detectron2 diff --git a/mmcv/layers/deform_conv.py b/mmcv/layers/deform_conv.py new file mode 100644 index 0000000..dffb720 --- /dev/null +++ b/mmcv/layers/deform_conv.py @@ -0,0 +1,514 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import math +from functools import lru_cache +import torch +from torch import nn +from torch.autograd import Function +from torch.autograd.function import once_differentiable +from torch.nn.modules.utils import _pair +from torchvision.ops import deform_conv2d + +from detectron2.utils.develop import create_dummy_class, create_dummy_func + +from .wrappers import _NewEmptyTensorOp + + +class _DeformConv(Function): + @staticmethod + def forward( + ctx, + input, + offset, + weight, + stride=1, + padding=0, + dilation=1, + groups=1, + deformable_groups=1, + im2col_step=64, + ): + if input is not None and input.dim() != 4: + raise ValueError( + "Expected 4D tensor as input, got {}D tensor instead.".format(input.dim()) + ) + ctx.stride = _pair(stride) + ctx.padding = _pair(padding) + ctx.dilation = _pair(dilation) + ctx.groups = groups + ctx.deformable_groups = deformable_groups + ctx.im2col_step = im2col_step + + ctx.save_for_backward(input, offset, weight) + + output = input.new_empty( + _DeformConv._output_size(input, weight, ctx.padding, ctx.dilation, ctx.stride) + ) + + ctx.bufs_ = [input.new_empty(0), input.new_empty(0)] # columns, ones + + if not input.is_cuda: + # TODO: let torchvision support full features of our deformconv. + if deformable_groups != 1: + raise NotImplementedError( + "Deformable Conv with deformable_groups != 1 is not supported on CPUs!" + ) + return deform_conv2d( + input, offset, weight, stride=stride, padding=padding, dilation=dilation + ) + else: + cur_im2col_step = _DeformConv._cal_im2col_step(input.shape[0], ctx.im2col_step) + assert (input.shape[0] % cur_im2col_step) == 0, "im2col step must divide batchsize" + + _C.deform_conv_forward( + input, + weight, + offset, + output, + ctx.bufs_[0], + ctx.bufs_[1], + weight.size(3), + weight.size(2), + ctx.stride[1], + ctx.stride[0], + ctx.padding[1], + ctx.padding[0], + ctx.dilation[1], + ctx.dilation[0], + ctx.groups, + ctx.deformable_groups, + cur_im2col_step, + ) + return output + + @staticmethod + @once_differentiable + def backward(ctx, grad_output): + input, offset, weight = ctx.saved_tensors + + grad_input = grad_offset = grad_weight = None + + if not grad_output.is_cuda: + raise NotImplementedError("Deformable Conv is not supported on CPUs!") + else: + cur_im2col_step = _DeformConv._cal_im2col_step(input.shape[0], ctx.im2col_step) + assert (input.shape[0] % cur_im2col_step) == 0, "im2col step must divide batchsize" + + if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]: + grad_input = torch.zeros_like(input) + grad_offset = torch.zeros_like(offset) + _C.deform_conv_backward_input( + input, + offset, + grad_output, + grad_input, + grad_offset, + weight, + ctx.bufs_[0], + weight.size(3), + weight.size(2), + ctx.stride[1], + ctx.stride[0], + ctx.padding[1], + ctx.padding[0], + ctx.dilation[1], + ctx.dilation[0], + ctx.groups, + ctx.deformable_groups, + cur_im2col_step, + ) + + if ctx.needs_input_grad[2]: + grad_weight = torch.zeros_like(weight) + _C.deform_conv_backward_filter( + input, + offset, + grad_output, + grad_weight, + ctx.bufs_[0], + ctx.bufs_[1], + weight.size(3), + weight.size(2), + ctx.stride[1], + ctx.stride[0], + ctx.padding[1], + ctx.padding[0], + ctx.dilation[1], + ctx.dilation[0], + ctx.groups, + ctx.deformable_groups, + 1, + cur_im2col_step, + ) + + return grad_input, grad_offset, grad_weight, None, None, None, None, None, None + + @staticmethod + def _output_size(input, weight, padding, dilation, stride): + channels = weight.size(0) + output_size = (input.size(0), channels) + for d in range(input.dim() - 2): + in_size = input.size(d + 2) + pad = padding[d] + kernel = dilation[d] * (weight.size(d + 2) - 1) + 1 + stride_ = stride[d] + output_size += ((in_size + (2 * pad) - kernel) // stride_ + 1,) + if not all(map(lambda s: s > 0, output_size)): + raise ValueError( + "convolution input is too small (output would be {})".format( + "x".join(map(str, output_size)) + ) + ) + return output_size + + @staticmethod + @lru_cache(maxsize=128) + def _cal_im2col_step(input_size, default_size): + """ + Calculate proper im2col step size, which should be divisible by input_size and not larger + than prefer_size. Meanwhile the step size should be as large as possible to be more + efficient. So we choose the largest one among all divisors of input_size which are smaller + than prefer_size. + :param input_size: input batch size . + :param default_size: default preferred im2col step size. + :return: the largest proper step size. + """ + if input_size <= default_size: + return input_size + best_step = 1 + for step in range(2, min(int(math.sqrt(input_size)) + 1, default_size)): + if input_size % step == 0: + if input_size // step <= default_size: + return input_size // step + best_step = step + + return best_step + + +class _ModulatedDeformConv(Function): + @staticmethod + def forward( + ctx, + input, + offset, + mask, + weight, + bias=None, + stride=1, + padding=0, + dilation=1, + groups=1, + deformable_groups=1, + ): + ctx.stride = stride + ctx.padding = padding + ctx.dilation = dilation + ctx.groups = groups + ctx.deformable_groups = deformable_groups + ctx.with_bias = bias is not None + if not ctx.with_bias: + bias = input.new_empty(1) # fake tensor + if not input.is_cuda: + raise NotImplementedError("Deformable Conv is not supported on CPUs!") + if ( + weight.requires_grad + or mask.requires_grad + or offset.requires_grad + or input.requires_grad + ): + ctx.save_for_backward(input, offset, mask, weight, bias) + output = input.new_empty(_ModulatedDeformConv._infer_shape(ctx, input, weight)) + ctx._bufs = [input.new_empty(0), input.new_empty(0)] + _C.modulated_deform_conv_forward( + input, + weight, + bias, + ctx._bufs[0], + offset, + mask, + output, + ctx._bufs[1], + weight.shape[2], + weight.shape[3], + ctx.stride, + ctx.stride, + ctx.padding, + ctx.padding, + ctx.dilation, + ctx.dilation, + ctx.groups, + ctx.deformable_groups, + ctx.with_bias, + ) + return output + + @staticmethod + @once_differentiable + def backward(ctx, grad_output): + if not grad_output.is_cuda: + raise NotImplementedError("Deformable Conv is not supported on CPUs!") + input, offset, mask, weight, bias = ctx.saved_tensors + grad_input = torch.zeros_like(input) + grad_offset = torch.zeros_like(offset) + grad_mask = torch.zeros_like(mask) + grad_weight = torch.zeros_like(weight) + grad_bias = torch.zeros_like(bias) + _C.modulated_deform_conv_backward( + input, + weight, + bias, + ctx._bufs[0], + offset, + mask, + ctx._bufs[1], + grad_input, + grad_weight, + grad_bias, + grad_offset, + grad_mask, + grad_output, + weight.shape[2], + weight.shape[3], + ctx.stride, + ctx.stride, + ctx.padding, + ctx.padding, + ctx.dilation, + ctx.dilation, + ctx.groups, + ctx.deformable_groups, + ctx.with_bias, + ) + if not ctx.with_bias: + grad_bias = None + + return ( + grad_input, + grad_offset, + grad_mask, + grad_weight, + grad_bias, + None, + None, + None, + None, + None, + ) + + @staticmethod + def _infer_shape(ctx, input, weight): + n = input.size(0) + channels_out = weight.size(0) + height, width = input.shape[2:4] + kernel_h, kernel_w = weight.shape[2:4] + height_out = ( + height + 2 * ctx.padding - (ctx.dilation * (kernel_h - 1) + 1) + ) // ctx.stride + 1 + width_out = ( + width + 2 * ctx.padding - (ctx.dilation * (kernel_w - 1) + 1) + ) // ctx.stride + 1 + return n, channels_out, height_out, width_out + + +deform_conv = _DeformConv.apply +modulated_deform_conv = _ModulatedDeformConv.apply + + +class DeformConv(nn.Module): + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + deformable_groups=1, + bias=False, + norm=None, + activation=None, + ): + """ + Deformable convolution from :paper:`deformconv`. + + Arguments are similar to :class:`Conv2D`. Extra arguments: + + Args: + deformable_groups (int): number of groups used in deformable convolution. + norm (nn.Module, optional): a normalization layer + activation (callable(Tensor) -> Tensor): a callable activation function + """ + super(DeformConv, self).__init__() + + assert not bias + assert in_channels % groups == 0, "in_channels {} cannot be divisible by groups {}".format( + in_channels, groups + ) + assert ( + out_channels % groups == 0 + ), "out_channels {} cannot be divisible by groups {}".format(out_channels, groups) + + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = _pair(kernel_size) + self.stride = _pair(stride) + self.padding = _pair(padding) + self.dilation = _pair(dilation) + self.groups = groups + self.deformable_groups = deformable_groups + self.norm = norm + self.activation = activation + + self.weight = nn.Parameter( + torch.Tensor(out_channels, in_channels // self.groups, *self.kernel_size) + ) + self.bias = None + + nn.init.kaiming_uniform_(self.weight, nonlinearity="relu") + + def forward(self, x, offset): + if x.numel() == 0: + # When input is empty, we want to return a empty tensor with "correct" shape, + # So that the following operations will not panic + # if they check for the shape of the tensor. + # This computes the height and width of the output tensor + output_shape = [ + (i + 2 * p - (di * (k - 1) + 1)) // s + 1 + for i, p, di, k, s in zip( + x.shape[-2:], self.padding, self.dilation, self.kernel_size, self.stride + ) + ] + output_shape = [x.shape[0], self.weight.shape[0]] + output_shape + return _NewEmptyTensorOp.apply(x, output_shape) + + x = deform_conv( + x, + offset, + self.weight, + self.stride, + self.padding, + self.dilation, + self.groups, + self.deformable_groups, + ) + if self.norm is not None: + x = self.norm(x) + if self.activation is not None: + x = self.activation(x) + return x + + def extra_repr(self): + tmpstr = "in_channels=" + str(self.in_channels) + tmpstr += ", out_channels=" + str(self.out_channels) + tmpstr += ", kernel_size=" + str(self.kernel_size) + tmpstr += ", stride=" + str(self.stride) + tmpstr += ", padding=" + str(self.padding) + tmpstr += ", dilation=" + str(self.dilation) + tmpstr += ", groups=" + str(self.groups) + tmpstr += ", deformable_groups=" + str(self.deformable_groups) + tmpstr += ", bias=False" + return tmpstr + + +class ModulatedDeformConv(nn.Module): + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + deformable_groups=1, + bias=True, + norm=None, + activation=None, + ): + """ + Modulated deformable convolution from :paper:`deformconv2`. + + Arguments are similar to :class:`Conv2D`. Extra arguments: + + Args: + deformable_groups (int): number of groups used in deformable convolution. + norm (nn.Module, optional): a normalization layer + activation (callable(Tensor) -> Tensor): a callable activation function + """ + super(ModulatedDeformConv, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = _pair(kernel_size) + self.stride = stride + self.padding = padding + self.dilation = dilation + self.groups = groups + self.deformable_groups = deformable_groups + self.with_bias = bias + self.norm = norm + self.activation = activation + + self.weight = nn.Parameter( + torch.Tensor(out_channels, in_channels // groups, *self.kernel_size) + ) + if bias: + self.bias = nn.Parameter(torch.Tensor(out_channels)) + else: + self.bias = None + + nn.init.kaiming_uniform_(self.weight, nonlinearity="relu") + if self.bias is not None: + nn.init.constant_(self.bias, 0) + + def forward(self, x, offset, mask): + if x.numel() == 0: + output_shape = [ + (i + 2 * p - (di * (k - 1) + 1)) // s + 1 + for i, p, di, k, s in zip( + x.shape[-2:], self.padding, self.dilation, self.kernel_size, self.stride + ) + ] + output_shape = [x.shape[0], self.weight.shape[0]] + output_shape + return _NewEmptyTensorOp.apply(x, output_shape) + + x = modulated_deform_conv( + x, + offset, + mask, + self.weight, + self.bias, + self.stride, + self.padding, + self.dilation, + self.groups, + self.deformable_groups, + ) + if self.norm is not None: + x = self.norm(x) + if self.activation is not None: + x = self.activation(x) + return x + + def extra_repr(self): + tmpstr = "in_channels=" + str(self.in_channels) + tmpstr += ", out_channels=" + str(self.out_channels) + tmpstr += ", kernel_size=" + str(self.kernel_size) + tmpstr += ", stride=" + str(self.stride) + tmpstr += ", padding=" + str(self.padding) + tmpstr += ", dilation=" + str(self.dilation) + tmpstr += ", groups=" + str(self.groups) + tmpstr += ", deformable_groups=" + str(self.deformable_groups) + tmpstr += ", bias=" + str(self.with_bias) + return tmpstr + + +try: + from detectron2 import _C +except ImportError: + # TODO: register ops natively so there is no need to import _C. + _msg = "detectron2 is not compiled successfully, please build following the instructions!" + _args = ("detectron2._C", _msg) + DeformConv = create_dummy_class("DeformConv", *_args) + ModulatedDeformConv = create_dummy_class("ModulatedDeformConv", *_args) + deform_conv = create_dummy_func("deform_conv", *_args) + modulated_deform_conv = create_dummy_func("modulated_deform_conv", *_args) diff --git a/mmcv/layers/losses.py b/mmcv/layers/losses.py new file mode 100644 index 0000000..850a852 --- /dev/null +++ b/mmcv/layers/losses.py @@ -0,0 +1,133 @@ +import math +import torch + + +def diou_loss( + boxes1: torch.Tensor, + boxes2: torch.Tensor, + reduction: str = "none", + eps: float = 1e-7, +) -> torch.Tensor: + """ + Distance Intersection over Union Loss (Zhaohui Zheng et. al) + https://arxiv.org/abs/1911.08287 + Args: + boxes1, boxes2 (Tensor): box locations in XYXY format, shape (N, 4) or (4,). + reduction: 'none' | 'mean' | 'sum' + 'none': No reduction will be applied to the output. + 'mean': The output will be averaged. + 'sum': The output will be summed. + eps (float): small number to prevent division by zero + """ + + x1, y1, x2, y2 = boxes1.unbind(dim=-1) + x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1) + + # TODO: use torch._assert_async() when pytorch 1.8 support is dropped + assert (x2 >= x1).all(), "bad box: x1 larger than x2" + assert (y2 >= y1).all(), "bad box: y1 larger than y2" + + # Intersection keypoints + xkis1 = torch.max(x1, x1g) + ykis1 = torch.max(y1, y1g) + xkis2 = torch.min(x2, x2g) + ykis2 = torch.min(y2, y2g) + + intsct = torch.zeros_like(x1) + mask = (ykis2 > ykis1) & (xkis2 > xkis1) + intsct[mask] = (xkis2[mask] - xkis1[mask]) * (ykis2[mask] - ykis1[mask]) + union = (x2 - x1) * (y2 - y1) + (x2g - x1g) * (y2g - y1g) - intsct + eps + iou = intsct / union + + # smallest enclosing box + xc1 = torch.min(x1, x1g) + yc1 = torch.min(y1, y1g) + xc2 = torch.max(x2, x2g) + yc2 = torch.max(y2, y2g) + diag_len = ((xc2 - xc1) ** 2) + ((yc2 - yc1) ** 2) + eps + + # centers of boxes + x_p = (x2 + x1) / 2 + y_p = (y2 + y1) / 2 + x_g = (x1g + x2g) / 2 + y_g = (y1g + y2g) / 2 + distance = ((x_p - x_g) ** 2) + ((y_p - y_g) ** 2) + + # Eqn. (7) + loss = 1 - iou + (distance / diag_len) + if reduction == "mean": + loss = loss.mean() if loss.numel() > 0 else 0.0 * loss.sum() + elif reduction == "sum": + loss = loss.sum() + + return loss + + +def ciou_loss( + boxes1: torch.Tensor, + boxes2: torch.Tensor, + reduction: str = "none", + eps: float = 1e-7, +) -> torch.Tensor: + """ + Complete Intersection over Union Loss (Zhaohui Zheng et. al) + https://arxiv.org/abs/1911.08287 + Args: + boxes1, boxes2 (Tensor): box locations in XYXY format, shape (N, 4) or (4,). + reduction: 'none' | 'mean' | 'sum' + 'none': No reduction will be applied to the output. + 'mean': The output will be averaged. + 'sum': The output will be summed. + eps (float): small number to prevent division by zero + """ + + x1, y1, x2, y2 = boxes1.unbind(dim=-1) + x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1) + + # TODO: use torch._assert_async() when pytorch 1.8 support is dropped + assert (x2 >= x1).all(), "bad box: x1 larger than x2" + assert (y2 >= y1).all(), "bad box: y1 larger than y2" + + # Intersection keypoints + xkis1 = torch.max(x1, x1g) + ykis1 = torch.max(y1, y1g) + xkis2 = torch.min(x2, x2g) + ykis2 = torch.min(y2, y2g) + + intsct = torch.zeros_like(x1) + mask = (ykis2 > ykis1) & (xkis2 > xkis1) + intsct[mask] = (xkis2[mask] - xkis1[mask]) * (ykis2[mask] - ykis1[mask]) + union = (x2 - x1) * (y2 - y1) + (x2g - x1g) * (y2g - y1g) - intsct + eps + iou = intsct / union + + # smallest enclosing box + xc1 = torch.min(x1, x1g) + yc1 = torch.min(y1, y1g) + xc2 = torch.max(x2, x2g) + yc2 = torch.max(y2, y2g) + diag_len = ((xc2 - xc1) ** 2) + ((yc2 - yc1) ** 2) + eps + + # centers of boxes + x_p = (x2 + x1) / 2 + y_p = (y2 + y1) / 2 + x_g = (x1g + x2g) / 2 + y_g = (y1g + y2g) / 2 + distance = ((x_p - x_g) ** 2) + ((y_p - y_g) ** 2) + + # width and height of boxes + w_pred = x2 - x1 + h_pred = y2 - y1 + w_gt = x2g - x1g + h_gt = y2g - y1g + v = (4 / (math.pi**2)) * torch.pow((torch.atan(w_gt / h_gt) - torch.atan(w_pred / h_pred)), 2) + with torch.no_grad(): + alpha = v / (1 - iou + v + eps) + + # Eqn. (10) + loss = 1 - iou + (distance / diag_len) + alpha * v + if reduction == "mean": + loss = loss.mean() if loss.numel() > 0 else 0.0 * loss.sum() + elif reduction == "sum": + loss = loss.sum() + + return loss diff --git a/mmcv/layers/mask_ops.py b/mmcv/layers/mask_ops.py new file mode 100644 index 0000000..990d04a --- /dev/null +++ b/mmcv/layers/mask_ops.py @@ -0,0 +1,275 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import numpy as np +from typing import Tuple +import torch +from PIL import Image +from torch.nn import functional as F + +__all__ = ["paste_masks_in_image"] + + +BYTES_PER_FLOAT = 4 +# TODO: This memory limit may be too much or too little. It would be better to +# determine it based on available resources. +GPU_MEM_LIMIT = 1024**3 # 1 GB memory limit + + +def _do_paste_mask(masks, boxes, img_h: int, img_w: int, skip_empty: bool = True): + """ + Args: + masks: N, 1, H, W + boxes: N, 4 + img_h, img_w (int): + skip_empty (bool): only paste masks within the region that + tightly bound all boxes, and returns the results this region only. + An important optimization for CPU. + + Returns: + if skip_empty == False, a mask of shape (N, img_h, img_w) + if skip_empty == True, a mask of shape (N, h', w'), and the slice + object for the corresponding region. + """ + # On GPU, paste all masks together (up to chunk size) + # by using the entire image to sample the masks + # Compared to pasting them one by one, + # this has more operations but is faster on COCO-scale dataset. + device = masks.device + + if skip_empty and not torch.jit.is_scripting(): + x0_int, y0_int = torch.clamp(boxes.min(dim=0).values.floor()[:2] - 1, min=0).to( + dtype=torch.int32 + ) + x1_int = torch.clamp(boxes[:, 2].max().ceil() + 1, max=img_w).to(dtype=torch.int32) + y1_int = torch.clamp(boxes[:, 3].max().ceil() + 1, max=img_h).to(dtype=torch.int32) + else: + x0_int, y0_int = 0, 0 + x1_int, y1_int = img_w, img_h + x0, y0, x1, y1 = torch.split(boxes, 1, dim=1) # each is Nx1 + + N = masks.shape[0] + + img_y = torch.arange(y0_int, y1_int, device=device, dtype=torch.float32) + 0.5 + img_x = torch.arange(x0_int, x1_int, device=device, dtype=torch.float32) + 0.5 + img_y = (img_y - y0) / (y1 - y0) * 2 - 1 + img_x = (img_x - x0) / (x1 - x0) * 2 - 1 + # img_x, img_y have shapes (N, w), (N, h) + + gx = img_x[:, None, :].expand(N, img_y.size(1), img_x.size(1)) + gy = img_y[:, :, None].expand(N, img_y.size(1), img_x.size(1)) + grid = torch.stack([gx, gy], dim=3) + + if not torch.jit.is_scripting(): + if not masks.dtype.is_floating_point: + masks = masks.float() + img_masks = F.grid_sample(masks, grid.to(masks.dtype), align_corners=False) + + if skip_empty and not torch.jit.is_scripting(): + return img_masks[:, 0], (slice(y0_int, y1_int), slice(x0_int, x1_int)) + else: + return img_masks[:, 0], () + + +# Annotate boxes as Tensor (but not Boxes) in order to use scripting +@torch.jit.script_if_tracing +def paste_masks_in_image( + masks: torch.Tensor, boxes: torch.Tensor, image_shape: Tuple[int, int], threshold: float = 0.5 +): + """ + Paste a set of masks that are of a fixed resolution (e.g., 28 x 28) into an image. + The location, height, and width for pasting each mask is determined by their + corresponding bounding boxes in boxes. + + Note: + This is a complicated but more accurate implementation. In actual deployment, it is + often enough to use a faster but less accurate implementation. + See :func:`paste_mask_in_image_old` in this file for an alternative implementation. + + Args: + masks (tensor): Tensor of shape (Bimg, Hmask, Wmask), where Bimg is the number of + detected object instances in the image and Hmask, Wmask are the mask width and mask + height of the predicted mask (e.g., Hmask = Wmask = 28). Values are in [0, 1]. + boxes (Boxes or Tensor): A Boxes of length Bimg or Tensor of shape (Bimg, 4). + boxes[i] and masks[i] correspond to the same object instance. + image_shape (tuple): height, width + threshold (float): A threshold in [0, 1] for converting the (soft) masks to + binary masks. + + Returns: + img_masks (Tensor): A tensor of shape (Bimg, Himage, Wimage), where Bimg is the + number of detected object instances and Himage, Wimage are the image width + and height. img_masks[i] is a binary mask for object instance i. + """ + + assert masks.shape[-1] == masks.shape[-2], "Only square mask predictions are supported" + N = len(masks) + if N == 0: + return masks.new_empty((0,) + image_shape, dtype=torch.uint8) + if not isinstance(boxes, torch.Tensor): + boxes = boxes.tensor + device = boxes.device + assert len(boxes) == N, boxes.shape + + img_h, img_w = image_shape + + # The actual implementation split the input into chunks, + # and paste them chunk by chunk. + if device.type == "cpu" or torch.jit.is_scripting(): + # CPU is most efficient when they are pasted one by one with skip_empty=True + # so that it performs minimal number of operations. + num_chunks = N + else: + # GPU benefits from parallelism for larger chunks, but may have memory issue + # int(img_h) because shape may be tensors in tracing + num_chunks = int(np.ceil(N * int(img_h) * int(img_w) * BYTES_PER_FLOAT / GPU_MEM_LIMIT)) + assert ( + num_chunks <= N + ), "Default GPU_MEM_LIMIT in mask_ops.py is too small; try increasing it" + chunks = torch.chunk(torch.arange(N, device=device), num_chunks) + + img_masks = torch.zeros( + N, img_h, img_w, device=device, dtype=torch.bool if threshold >= 0 else torch.uint8 + ) + for inds in chunks: + masks_chunk, spatial_inds = _do_paste_mask( + masks[inds, None, :, :], boxes[inds], img_h, img_w, skip_empty=device.type == "cpu" + ) + + if threshold >= 0: + masks_chunk = (masks_chunk >= threshold).to(dtype=torch.bool) + else: + # for visualization and debugging + masks_chunk = (masks_chunk * 255).to(dtype=torch.uint8) + + if torch.jit.is_scripting(): # Scripting does not use the optimized codepath + img_masks[inds] = masks_chunk + else: + img_masks[(inds,) + spatial_inds] = masks_chunk + return img_masks + + +# The below are the original paste function (from Detectron1) which has +# larger quantization error. +# It is faster on CPU, while the aligned one is faster on GPU thanks to grid_sample. + + +def paste_mask_in_image_old(mask, box, img_h, img_w, threshold): + """ + Paste a single mask in an image. + This is a per-box implementation of :func:`paste_masks_in_image`. + This function has larger quantization error due to incorrect pixel + modeling and is not used any more. + + Args: + mask (Tensor): A tensor of shape (Hmask, Wmask) storing the mask of a single + object instance. Values are in [0, 1]. + box (Tensor): A tensor of shape (4, ) storing the x0, y0, x1, y1 box corners + of the object instance. + img_h, img_w (int): Image height and width. + threshold (float): Mask binarization threshold in [0, 1]. + + Returns: + im_mask (Tensor): + The resized and binarized object mask pasted into the original + image plane (a tensor of shape (img_h, img_w)). + """ + # Conversion from continuous box coordinates to discrete pixel coordinates + # via truncation (cast to int32). This determines which pixels to paste the + # mask onto. + box = box.to(dtype=torch.int32) # Continuous to discrete coordinate conversion + # An example (1D) box with continuous coordinates (x0=0.7, x1=4.3) will map to + # a discrete coordinates (x0=0, x1=4). Note that box is mapped to 5 = x1 - x0 + 1 + # pixels (not x1 - x0 pixels). + samples_w = box[2] - box[0] + 1 # Number of pixel samples, *not* geometric width + samples_h = box[3] - box[1] + 1 # Number of pixel samples, *not* geometric height + + # Resample the mask from it's original grid to the new samples_w x samples_h grid + mask = Image.fromarray(mask.cpu().numpy()) + mask = mask.resize((samples_w, samples_h), resample=Image.BILINEAR) + mask = np.array(mask, copy=False) + + if threshold >= 0: + mask = np.array(mask > threshold, dtype=np.uint8) + mask = torch.from_numpy(mask) + else: + # for visualization and debugging, we also + # allow it to return an unmodified mask + mask = torch.from_numpy(mask * 255).to(torch.uint8) + + im_mask = torch.zeros((img_h, img_w), dtype=torch.uint8) + x_0 = max(box[0], 0) + x_1 = min(box[2] + 1, img_w) + y_0 = max(box[1], 0) + y_1 = min(box[3] + 1, img_h) + + im_mask[y_0:y_1, x_0:x_1] = mask[ + (y_0 - box[1]) : (y_1 - box[1]), (x_0 - box[0]) : (x_1 - box[0]) + ] + return im_mask + + +# Our pixel modeling requires extrapolation for any continuous +# coordinate < 0.5 or > length - 0.5. When sampling pixels on the masks, +# we would like this extrapolation to be an interpolation between boundary values and zero, +# instead of using absolute zero or boundary values. +# Therefore `paste_mask_in_image_old` is often used with zero padding around the masks like this: +# masks, scale = pad_masks(masks[:, 0, :, :], 1) +# boxes = scale_boxes(boxes.tensor, scale) + + +def pad_masks(masks, padding): + """ + Args: + masks (tensor): A tensor of shape (B, M, M) representing B masks. + padding (int): Number of cells to pad on all sides. + + Returns: + The padded masks and the scale factor of the padding size / original size. + """ + B = masks.shape[0] + M = masks.shape[-1] + pad2 = 2 * padding + scale = float(M + pad2) / M + padded_masks = masks.new_zeros((B, M + pad2, M + pad2)) + padded_masks[:, padding:-padding, padding:-padding] = masks + return padded_masks, scale + + +def scale_boxes(boxes, scale): + """ + Args: + boxes (tensor): A tensor of shape (B, 4) representing B boxes with 4 + coords representing the corners x0, y0, x1, y1, + scale (float): The box scaling factor. + + Returns: + Scaled boxes. + """ + w_half = (boxes[:, 2] - boxes[:, 0]) * 0.5 + h_half = (boxes[:, 3] - boxes[:, 1]) * 0.5 + x_c = (boxes[:, 2] + boxes[:, 0]) * 0.5 + y_c = (boxes[:, 3] + boxes[:, 1]) * 0.5 + + w_half *= scale + h_half *= scale + + scaled_boxes = torch.zeros_like(boxes) + scaled_boxes[:, 0] = x_c - w_half + scaled_boxes[:, 2] = x_c + w_half + scaled_boxes[:, 1] = y_c - h_half + scaled_boxes[:, 3] = y_c + h_half + return scaled_boxes + + +@torch.jit.script_if_tracing +def _paste_masks_tensor_shape( + masks: torch.Tensor, + boxes: torch.Tensor, + image_shape: Tuple[torch.Tensor, torch.Tensor], + threshold: float = 0.5, +): + """ + A wrapper of paste_masks_in_image where image_shape is Tensor. + During tracing, shapes might be tensors instead of ints. The Tensor->int + conversion should be scripted rather than traced. + """ + return paste_masks_in_image(masks, boxes, (int(image_shape[0]), int(image_shape[1])), threshold) diff --git a/mmcv/layers/nms.py b/mmcv/layers/nms.py new file mode 100644 index 0000000..1019e7f --- /dev/null +++ b/mmcv/layers/nms.py @@ -0,0 +1,144 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. + +import torch +from torchvision.ops import boxes as box_ops +from torchvision.ops import nms # noqa . for compatibility + + +def batched_nms( + boxes: torch.Tensor, scores: torch.Tensor, idxs: torch.Tensor, iou_threshold: float +): + """ + Same as torchvision.ops.boxes.batched_nms, but with float(). + """ + assert boxes.shape[-1] == 4 + # Note: Torchvision already has a strategy (https://github.com/pytorch/vision/issues/1311) + # to decide whether to use coordinate trick or for loop to implement batched_nms. So we + # just call it directly. + # Fp16 does not have enough range for batched NMS, so adding float(). + return box_ops.batched_nms(boxes.float(), scores, idxs, iou_threshold) + + +# Note: this function (nms_rotated) might be moved into +# torchvision/ops/boxes.py in the future +def nms_rotated(boxes: torch.Tensor, scores: torch.Tensor, iou_threshold: float): + """ + Performs non-maximum suppression (NMS) on the rotated boxes according + to their intersection-over-union (IoU). + + Rotated NMS iteratively removes lower scoring rotated boxes which have an + IoU greater than iou_threshold with another (higher scoring) rotated box. + + Note that RotatedBox (5, 3, 4, 2, -90) covers exactly the same region as + RotatedBox (5, 3, 4, 2, 90) does, and their IoU will be 1. However, they + can be representing completely different objects in certain tasks, e.g., OCR. + + As for the question of whether rotated-NMS should treat them as faraway boxes + even though their IOU is 1, it depends on the application and/or ground truth annotation. + + As an extreme example, consider a single character v and the square box around it. + + If the angle is 0 degree, the object (text) would be read as 'v'; + + If the angle is 90 degrees, the object (text) would become '>'; + + If the angle is 180 degrees, the object (text) would become '^'; + + If the angle is 270/-90 degrees, the object (text) would become '<' + + All of these cases have IoU of 1 to each other, and rotated NMS that only + uses IoU as criterion would only keep one of them with the highest score - + which, practically, still makes sense in most cases because typically + only one of theses orientations is the correct one. Also, it does not matter + as much if the box is only used to classify the object (instead of transcribing + them with a sequential OCR recognition model) later. + + On the other hand, when we use IoU to filter proposals that are close to the + ground truth during training, we should definitely take the angle into account if + we know the ground truth is labeled with the strictly correct orientation (as in, + upside-down words are annotated with -180 degrees even though they can be covered + with a 0/90/-90 degree box, etc.) + + The way the original dataset is annotated also matters. For example, if the dataset + is a 4-point polygon dataset that does not enforce ordering of vertices/orientation, + we can estimate a minimum rotated bounding box to this polygon, but there's no way + we can tell the correct angle with 100% confidence (as shown above, there could be 4 different + rotated boxes, with angles differed by 90 degrees to each other, covering the exactly + same region). In that case we have to just use IoU to determine the box + proximity (as many detection benchmarks (even for text) do) unless there're other + assumptions we can make (like width is always larger than height, or the object is not + rotated by more than 90 degrees CCW/CW, etc.) + + In summary, not considering angles in rotated NMS seems to be a good option for now, + but we should be aware of its implications. + + Args: + boxes (Tensor[N, 5]): Rotated boxes to perform NMS on. They are expected to be in + (x_center, y_center, width, height, angle_degrees) format. + scores (Tensor[N]): Scores for each one of the rotated boxes + iou_threshold (float): Discards all overlapping rotated boxes with IoU < iou_threshold + + Returns: + keep (Tensor): int64 tensor with the indices of the elements that have been kept + by Rotated NMS, sorted in decreasing order of scores + """ + return torch.ops.detectron2.nms_rotated(boxes, scores, iou_threshold) + + +# Note: this function (batched_nms_rotated) might be moved into +# torchvision/ops/boxes.py in the future + + +@torch.jit.script_if_tracing +def batched_nms_rotated( + boxes: torch.Tensor, scores: torch.Tensor, idxs: torch.Tensor, iou_threshold: float +): + """ + Performs non-maximum suppression in a batched fashion. + + Each index value correspond to a category, and NMS + will not be applied between elements of different categories. + + Args: + boxes (Tensor[N, 5]): + boxes where NMS will be performed. They + are expected to be in (x_ctr, y_ctr, width, height, angle_degrees) format + scores (Tensor[N]): + scores for each one of the boxes + idxs (Tensor[N]): + indices of the categories for each one of the boxes. + iou_threshold (float): + discards all overlapping boxes + with IoU < iou_threshold + + Returns: + Tensor: + int64 tensor with the indices of the elements that have been kept + by NMS, sorted in decreasing order of scores + """ + assert boxes.shape[-1] == 5 + + if boxes.numel() == 0: + return torch.empty((0,), dtype=torch.int64, device=boxes.device) + boxes = boxes.float() # fp16 does not have enough range for batched NMS + # Strategy: in order to perform NMS independently per class, + # we add an offset to all the boxes. The offset is dependent + # only on the class idx, and is large enough so that boxes + # from different classes do not overlap + + # Note that batched_nms in torchvision/ops/boxes.py only uses max_coordinate, + # which won't handle negative coordinates correctly. + # Here by using min_coordinate we can make sure the negative coordinates are + # correctly handled. + max_coordinate = ( + torch.max(boxes[:, 0], boxes[:, 1]) + torch.max(boxes[:, 2], boxes[:, 3]) / 2 + ).max() + min_coordinate = ( + torch.min(boxes[:, 0], boxes[:, 1]) - torch.max(boxes[:, 2], boxes[:, 3]) / 2 + ).min() + offsets = idxs.to(boxes) * (max_coordinate - min_coordinate + 1) + boxes_for_nms = boxes.clone() # avoid modifying the original values in boxes + boxes_for_nms[:, :2] += offsets[:, None] + keep = nms_rotated(boxes_for_nms, scores, iou_threshold) + return keep diff --git a/mmcv/layers/roi_align.py b/mmcv/layers/roi_align.py new file mode 100644 index 0000000..163462e --- /dev/null +++ b/mmcv/layers/roi_align.py @@ -0,0 +1,74 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from torch import nn +from torchvision.ops import roi_align + + +# NOTE: torchvision's RoIAlign has a different default aligned=False +class ROIAlign(nn.Module): + def __init__(self, output_size, spatial_scale, sampling_ratio, aligned=True): + """ + Args: + output_size (tuple): h, w + spatial_scale (float): scale the input boxes by this number + sampling_ratio (int): number of inputs samples to take for each output + sample. 0 to take samples densely. + aligned (bool): if False, use the legacy implementation in + Detectron. If True, align the results more perfectly. + + Note: + The meaning of aligned=True: + + Given a continuous coordinate c, its two neighboring pixel indices (in our + pixel model) are computed by floor(c - 0.5) and ceil(c - 0.5). For example, + c=1.3 has pixel neighbors with discrete indices [0] and [1] (which are sampled + from the underlying signal at continuous coordinates 0.5 and 1.5). But the original + roi_align (aligned=False) does not subtract the 0.5 when computing neighboring + pixel indices and therefore it uses pixels with a slightly incorrect alignment + (relative to our pixel model) when performing bilinear interpolation. + + With `aligned=True`, + we first appropriately scale the ROI and then shift it by -0.5 + prior to calling roi_align. This produces the correct neighbors; see + detectron2/tests/test_roi_align.py for verification. + + The difference does not make a difference to the model's performance if + ROIAlign is used together with conv layers. + """ + super().__init__() + self.output_size = output_size + self.spatial_scale = spatial_scale + self.sampling_ratio = sampling_ratio + self.aligned = aligned + + from torchvision import __version__ + + version = tuple(int(x) for x in __version__.split(".")[:2]) + # https://github.com/pytorch/vision/pull/2438 + assert version >= (0, 7), "Require torchvision >= 0.7" + + def forward(self, input, rois): + """ + Args: + input: NCHW images + rois: Bx5 boxes. First column is the index into N. The other 4 columns are xyxy. + """ + assert rois.dim() == 2 and rois.size(1) == 5 + if input.is_quantized: + input = input.dequantize() + return roi_align( + input, + rois.to(dtype=input.dtype), + self.output_size, + self.spatial_scale, + self.sampling_ratio, + self.aligned, + ) + + def __repr__(self): + tmpstr = self.__class__.__name__ + "(" + tmpstr += "output_size=" + str(self.output_size) + tmpstr += ", spatial_scale=" + str(self.spatial_scale) + tmpstr += ", sampling_ratio=" + str(self.sampling_ratio) + tmpstr += ", aligned=" + str(self.aligned) + tmpstr += ")" + return tmpstr diff --git a/mmcv/layers/roi_align_rotated.py b/mmcv/layers/roi_align_rotated.py new file mode 100644 index 0000000..2a52399 --- /dev/null +++ b/mmcv/layers/roi_align_rotated.py @@ -0,0 +1,100 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import torch +from torch import nn +from torch.autograd import Function +from torch.autograd.function import once_differentiable +from torch.nn.modules.utils import _pair + + +class _ROIAlignRotated(Function): + @staticmethod + def forward(ctx, input, roi, output_size, spatial_scale, sampling_ratio): + ctx.save_for_backward(roi) + ctx.output_size = _pair(output_size) + ctx.spatial_scale = spatial_scale + ctx.sampling_ratio = sampling_ratio + ctx.input_shape = input.size() + output = torch.ops.detectron2.roi_align_rotated_forward( + input, roi, spatial_scale, output_size[0], output_size[1], sampling_ratio + ) + return output + + @staticmethod + @once_differentiable + def backward(ctx, grad_output): + (rois,) = ctx.saved_tensors + output_size = ctx.output_size + spatial_scale = ctx.spatial_scale + sampling_ratio = ctx.sampling_ratio + bs, ch, h, w = ctx.input_shape + grad_input = torch.ops.detectron2.roi_align_rotated_backward( + grad_output, + rois, + spatial_scale, + output_size[0], + output_size[1], + bs, + ch, + h, + w, + sampling_ratio, + ) + return grad_input, None, None, None, None, None + + +roi_align_rotated = _ROIAlignRotated.apply + + +class ROIAlignRotated(nn.Module): + def __init__(self, output_size, spatial_scale, sampling_ratio): + """ + Args: + output_size (tuple): h, w + spatial_scale (float): scale the input boxes by this number + sampling_ratio (int): number of inputs samples to take for each output + sample. 0 to take samples densely. + + Note: + ROIAlignRotated supports continuous coordinate by default: + Given a continuous coordinate c, its two neighboring pixel indices (in our + pixel model) are computed by floor(c - 0.5) and ceil(c - 0.5). For example, + c=1.3 has pixel neighbors with discrete indices [0] and [1] (which are sampled + from the underlying signal at continuous coordinates 0.5 and 1.5). + """ + super(ROIAlignRotated, self).__init__() + self.output_size = output_size + self.spatial_scale = spatial_scale + self.sampling_ratio = sampling_ratio + + def forward(self, input, rois): + """ + Args: + input: NCHW images + rois: Bx6 boxes. First column is the index into N. + The other 5 columns are (x_ctr, y_ctr, width, height, angle_degrees). + """ + assert rois.dim() == 2 and rois.size(1) == 6 + orig_dtype = input.dtype + if orig_dtype == torch.float16: + input = input.float() + rois = rois.float() + output_size = _pair(self.output_size) + + # Scripting for Autograd is currently unsupported. + # This is a quick fix without having to rewrite code on the C++ side + if torch.jit.is_scripting() or torch.jit.is_tracing(): + return torch.ops.detectron2.roi_align_rotated_forward( + input, rois, self.spatial_scale, output_size[0], output_size[1], self.sampling_ratio + ).to(dtype=orig_dtype) + + return roi_align_rotated( + input, rois, self.output_size, self.spatial_scale, self.sampling_ratio + ).to(dtype=orig_dtype) + + def __repr__(self): + tmpstr = self.__class__.__name__ + "(" + tmpstr += "output_size=" + str(self.output_size) + tmpstr += ", spatial_scale=" + str(self.spatial_scale) + tmpstr += ", sampling_ratio=" + str(self.sampling_ratio) + tmpstr += ")" + return tmpstr diff --git a/mmcv/layers/rotated_boxes.py b/mmcv/layers/rotated_boxes.py new file mode 100644 index 0000000..03f73b3 --- /dev/null +++ b/mmcv/layers/rotated_boxes.py @@ -0,0 +1,21 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from __future__ import absolute_import, division, print_function, unicode_literals +import torch + + +def pairwise_iou_rotated(boxes1, boxes2): + """ + Return intersection-over-union (Jaccard index) of boxes. + + Both sets of boxes are expected to be in + (x_center, y_center, width, height, angle) format. + + Arguments: + boxes1 (Tensor[N, 5]) + boxes2 (Tensor[M, 5]) + + Returns: + iou (Tensor[N, M]): the NxM matrix containing the pairwise + IoU values for every element in boxes1 and boxes2 + """ + return torch.ops.detectron2.box_iou_rotated(boxes1, boxes2) diff --git a/mmcv/layers/shape_spec.py b/mmcv/layers/shape_spec.py new file mode 100644 index 0000000..8dac3c5 --- /dev/null +++ b/mmcv/layers/shape_spec.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. +from dataclasses import dataclass +from typing import Optional + + +@dataclass +class ShapeSpec: + """ + A simple structure that contains basic shape specification about a tensor. + It is often used as the auxiliary inputs/outputs of models, + to complement the lack of shape inference ability among pytorch modules. + """ + + channels: Optional[int] = None + height: Optional[int] = None + width: Optional[int] = None + stride: Optional[int] = None diff --git a/mmcv/layers/wrappers.py b/mmcv/layers/wrappers.py new file mode 100644 index 0000000..c9d63f1 --- /dev/null +++ b/mmcv/layers/wrappers.py @@ -0,0 +1,162 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +""" +Wrappers around on some nn functions, mainly to support empty tensors. + +Ideally, add support directly in PyTorch to empty tensors in those functions. + +These can be removed once https://github.com/pytorch/pytorch/issues/12013 +is implemented +""" + +import warnings +from typing import List, Optional +import torch +from torch.nn import functional as F + +TORCH_VERSION = tuple(int(x) for x in torch.__version__.split(".")[:2]) + + +def shapes_to_tensor(x: List[int], device: Optional[torch.device] = None) -> torch.Tensor: + """ + Turn a list of integer scalars or integer Tensor scalars into a vector, + in a way that's both traceable and scriptable. + + In tracing, `x` should be a list of scalar Tensor, so the output can trace to the inputs. + In scripting or eager, `x` should be a list of int. + """ + if torch.jit.is_scripting(): + return torch.as_tensor(x, device=device) + if torch.jit.is_tracing(): + assert all( + [isinstance(t, torch.Tensor) for t in x] + ), "Shape should be tensor during tracing!" + # as_tensor should not be used in tracing because it records a constant + ret = torch.stack(x) + if ret.device != device: # avoid recording a hard-coded device if not necessary + ret = ret.to(device=device) + return ret + return torch.as_tensor(x, device=device) + + +def check_if_dynamo_compiling(): + if TORCH_VERSION >= (1, 14): + from torch._dynamo import is_compiling + + return is_compiling() + else: + return False + + +def cat(tensors: List[torch.Tensor], dim: int = 0): + """ + Efficient version of torch.cat that avoids a copy if there is only a single element in a list + """ + assert isinstance(tensors, (list, tuple)) + if len(tensors) == 1: + return tensors[0] + return torch.cat(tensors, dim) + + +def empty_input_loss_func_wrapper(loss_func): + def wrapped_loss_func(input, target, *, reduction="mean", **kwargs): + """ + Same as `loss_func`, but returns 0 (instead of nan) for empty inputs. + """ + if target.numel() == 0 and reduction == "mean": + return input.sum() * 0.0 # connect the gradient + return loss_func(input, target, reduction=reduction, **kwargs) + + return wrapped_loss_func + + +cross_entropy = empty_input_loss_func_wrapper(F.cross_entropy) + + +class _NewEmptyTensorOp(torch.autograd.Function): + @staticmethod + def forward(ctx, x, new_shape): + ctx.shape = x.shape + return x.new_empty(new_shape) + + @staticmethod + def backward(ctx, grad): + shape = ctx.shape + return _NewEmptyTensorOp.apply(grad, shape), None + + +class Conv2d(torch.nn.Conv2d): + """ + A wrapper around :class:`torch.nn.Conv2d` to support empty inputs and more features. + """ + + def __init__(self, *args, **kwargs): + """ + Extra keyword arguments supported in addition to those in `torch.nn.Conv2d`: + + Args: + norm (nn.Module, optional): a normalization layer + activation (callable(Tensor) -> Tensor): a callable activation function + + It assumes that norm layer is used before activation. + """ + norm = kwargs.pop("norm", None) + activation = kwargs.pop("activation", None) + super().__init__(*args, **kwargs) + + self.norm = norm + self.activation = activation + + def forward(self, x): + # torchscript does not support SyncBatchNorm yet + # https://github.com/pytorch/pytorch/issues/40507 + # and we skip these codes in torchscript since: + # 1. currently we only support torchscript in evaluation mode + # 2. features needed by exporting module to torchscript are added in PyTorch 1.6 or + # later version, `Conv2d` in these PyTorch versions has already supported empty inputs. + if not torch.jit.is_scripting(): + # Dynamo doesn't support context managers yet + is_dynamo_compiling = check_if_dynamo_compiling() + if not is_dynamo_compiling: + with warnings.catch_warnings(record=True): + if x.numel() == 0 and self.training: + # https://github.com/pytorch/pytorch/issues/12013 + assert not isinstance( + self.norm, torch.nn.SyncBatchNorm + ), "SyncBatchNorm does not support empty inputs!" + + x = F.conv2d( + x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups + ) + if self.norm is not None: + x = self.norm(x) + if self.activation is not None: + x = self.activation(x) + return x + + +ConvTranspose2d = torch.nn.ConvTranspose2d +BatchNorm2d = torch.nn.BatchNorm2d +interpolate = F.interpolate +Linear = torch.nn.Linear + + +def nonzero_tuple(x): + """ + A 'as_tuple=True' version of torch.nonzero to support torchscript. + because of https://github.com/pytorch/pytorch/issues/38718 + """ + if torch.jit.is_scripting(): + if x.dim() == 0: + return x.unsqueeze(0).nonzero().unbind(1) + return x.nonzero().unbind(1) + else: + return x.nonzero(as_tuple=True) + + +@torch.jit.script_if_tracing +def move_device_like(src: torch.Tensor, dst: torch.Tensor) -> torch.Tensor: + """ + Tracing friendly way to cast tensor to another tensor's device. Device will be treated + as constant during tracing, scripting the casting process as whole can workaround this issue. + """ + return src.to(dst.device) diff --git a/mmcv/losses/__init__.py b/mmcv/losses/__init__.py new file mode 100644 index 0000000..ed8a93b --- /dev/null +++ b/mmcv/losses/__init__.py @@ -0,0 +1,7 @@ +from .track_loss import ClipMatcher +from .dice_loss import DiceLoss +from .occflow_loss import * +from .traj_loss import TrajLoss +from .planning_loss import PlanningLoss, CollisionLoss +from .fvcore_smooth_l1_loss import smooth_l1_loss +from .focal_loss import sigmoid_focal_loss \ No newline at end of file diff --git a/mmcv/losses/dice_loss.py b/mmcv/losses/dice_loss.py new file mode 100644 index 0000000..39dd231 --- /dev/null +++ b/mmcv/losses/dice_loss.py @@ -0,0 +1,61 @@ +import torch +import torch +import torch.nn as nn + +from mmcv.models.losses.utils import weighted_loss +from mmcv.models.builder import LOSSES + +@weighted_loss +def dice_loss(input, target,mask=None,eps=0.001): + N,H,W = input.shape + + input = input.contiguous().view(N, H*W) + target = target.contiguous().view(N, H*W).float() + if mask is not None: + mask = mask.contiguous().view(N, H*W).float() + input = input * mask + target = target * mask + a = torch.sum(input * target, 1) + b = torch.sum(input * input, 1) + eps + c = torch.sum(target * target, 1) + eps + d = (2 * a) / (b + c) + return 1 - d + +@LOSSES.register_module() +class DiceLoss(nn.Module): + + def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0): + super(DiceLoss, self).__init__() + self.eps = eps + self.reduction = reduction + self.loss_weight = loss_weight + self.count = 0 + def forward(self, + pred, + target, + weight=None, + mask=None, + avg_factor=None, + reduction_override=None, + **kwargs): + + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + #if weight is not None and weight.dim() > 1: + # TODO: remove this in the future + # reduce the weight of shape (n,w,h) to (n,) to match the + # giou_loss of shape (n,) + #assert weight.shape == pred.shape + #weight = weight.mean((-2,-1)) + loss = self.loss_weight * dice_loss( + pred, + target, + weight, + mask=mask, + eps=self.eps, + reduction=reduction, + avg_factor=avg_factor, + **kwargs) + #print('DiceLoss',loss, avg_factor) + return loss diff --git a/mmcv/losses/focal_loss.py b/mmcv/losses/focal_loss.py new file mode 100644 index 0000000..d4f357c --- /dev/null +++ b/mmcv/losses/focal_loss.py @@ -0,0 +1,105 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +# pyre-strict + +import torch +from torch.nn import functional as F + + +def sigmoid_focal_loss( + inputs: torch.Tensor, + targets: torch.Tensor, + alpha: float = -1, + gamma: float = 2, + reduction: str = "none", +) -> torch.Tensor: + """ + Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. + Args: + inputs: A float tensor of arbitrary shape. + The predictions for each example. + targets: A float tensor with the same shape as inputs. Stores the binary + classification label for each element in inputs + (0 for the negative class and 1 for the positive class). + alpha: (optional) Weighting factor in range (0,1) to balance + positive vs negative examples. Default = -1 (no weighting). + gamma: Exponent of the modulating factor (1 - p_t) to + balance easy vs hard examples. + reduction: 'none' | 'mean' | 'sum' + 'none': No reduction will be applied to the output. + 'mean': The output will be averaged. + 'sum': The output will be summed. + Returns: + Loss tensor with the reduction option applied. + """ + inputs = inputs.float() + targets = targets.float() + p = torch.sigmoid(inputs) + ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none") + p_t = p * targets + (1 - p) * (1 - targets) + loss = ce_loss * ((1 - p_t) ** gamma) + + if alpha >= 0: + alpha_t = alpha * targets + (1 - alpha) * (1 - targets) + loss = alpha_t * loss + + if reduction == "mean": + loss = loss.mean() + elif reduction == "sum": + loss = loss.sum() + + return loss + + +# pyre-fixme[9]: sigmoid_focal_loss_jit has type `ScriptModule`; used as +# `ScriptFunction[..., typing.Any]`. +sigmoid_focal_loss_jit: "torch.jit.ScriptModule" = torch.jit.script(sigmoid_focal_loss) + + +def sigmoid_focal_loss_star( + inputs: torch.Tensor, + targets: torch.Tensor, + alpha: float = -1, + gamma: float = 1, + reduction: str = "none", +) -> torch.Tensor: + """ + FL* described in RetinaNet paper Appendix: https://arxiv.org/abs/1708.02002. + Args: + inputs: A float tensor of arbitrary shape. + The predictions for each example. + targets: A float tensor with the same shape as inputs. Stores the binary + classification label for each element in inputs + (0 for the negative class and 1 for the positive class). + alpha: (optional) Weighting factor in range (0,1) to balance + positive vs negative examples. Default = -1 (no weighting). + gamma: Gamma parameter described in FL*. Default = 1 (no weighting). + reduction: 'none' | 'mean' | 'sum' + 'none': No reduction will be applied to the output. + 'mean': The output will be averaged. + 'sum': The output will be summed. + Returns: + Loss tensor with the reduction option applied. + """ + inputs = inputs.float() + targets = targets.float() + shifted_inputs = gamma * (inputs * (2 * targets - 1)) + loss = -(F.logsigmoid(shifted_inputs)) / gamma + + if alpha >= 0: + alpha_t = alpha * targets + (1 - alpha) * (1 - targets) + loss *= alpha_t + + if reduction == "mean": + loss = loss.mean() + elif reduction == "sum": + loss = loss.sum() + + return loss + + +# pyre-fixme[9]: sigmoid_focal_loss_star_jit has type `ScriptModule`; used as +# `ScriptFunction[..., typing.Any]`. +sigmoid_focal_loss_star_jit: "torch.jit.ScriptModule" = torch.jit.script( + sigmoid_focal_loss_star +) \ No newline at end of file diff --git a/mmcv/losses/fvcore_smooth_l1_loss.py b/mmcv/losses/fvcore_smooth_l1_loss.py new file mode 100644 index 0000000..df4f541 --- /dev/null +++ b/mmcv/losses/fvcore_smooth_l1_loss.py @@ -0,0 +1,76 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +# pyre-strict + +import torch + + +def smooth_l1_loss( + input: torch.Tensor, target: torch.Tensor, beta: float, reduction: str = "none" +) -> torch.Tensor: + """ + Smooth L1 loss defined in the Fast R-CNN paper as: + :: + | 0.5 * x ** 2 / beta if abs(x) < beta + smoothl1(x) = | + | abs(x) - 0.5 * beta otherwise, + + where x = input - target. + + Smooth L1 loss is related to Huber loss, which is defined as: + :: + | 0.5 * x ** 2 if abs(x) < beta + huber(x) = | + | beta * (abs(x) - 0.5 * beta) otherwise + + Smooth L1 loss is equal to huber(x) / beta. This leads to the following + differences: + + - As beta -> 0, Smooth L1 loss converges to L1 loss, while Huber loss + converges to a constant 0 loss. + - As beta -> +inf, Smooth L1 converges to a constant 0 loss, while Huber loss + converges to L2 loss. + - For Smooth L1 loss, as beta varies, the L1 segment of the loss has a constant + slope of 1. For Huber loss, the slope of the L1 segment is beta. + + Smooth L1 loss can be seen as exactly L1 loss, but with the abs(x) < beta + portion replaced with a quadratic function such that at abs(x) = beta, its + slope is 1. The quadratic segment smooths the L1 loss near x = 0. + + Args: + input (Tensor): input tensor of any shape + target (Tensor): target value tensor with the same shape as input + beta (float): L1 to L2 change point. + For beta values < 1e-5, L1 loss is computed. + reduction: 'none' | 'mean' | 'sum' + 'none': No reduction will be applied to the output. + 'mean': The output will be averaged. + 'sum': The output will be summed. + + Returns: + The loss with the reduction option applied. + + Note: + PyTorch's builtin "Smooth L1 loss" implementation does not actually + implement Smooth L1 loss, nor does it implement Huber loss. It implements + the special case of both in which they are equal (beta=1). + See: https://pytorch.org/docs/stable/nn.html#torch.nn.SmoothL1Loss. + """ + if beta < 1e-5: + # if beta == 0, then torch.where will result in nan gradients when + # the chain rule is applied due to pytorch implementation details + # (the False branch "0.5 * n ** 2 / 0" has an incoming gradient of + # zeros, rather than "no gradient"). To avoid this issue, we define + # small values of beta to be exactly l1 loss. + loss = torch.abs(input - target) + else: + n = torch.abs(input - target) + cond = n < beta + # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`. + loss = torch.where(cond, 0.5 * n**2 / beta, n - 0.5 * beta) + + if reduction == "mean": + loss = loss.mean() if loss.numel() > 0 else 0.0 * loss.sum() + elif reduction == "sum": + loss = loss.sum() + return loss \ No newline at end of file diff --git a/mmcv/losses/occflow_loss.py b/mmcv/losses/occflow_loss.py new file mode 100644 index 0000000..ff80306 --- /dev/null +++ b/mmcv/losses/occflow_loss.py @@ -0,0 +1,226 @@ +#---------------------------------------------------------------------------------# +# UniAD: Planning-oriented Autonomous Driving (https://arxiv.org/abs/2212.10156) # +# Source code: https://github.com/OpenDriveLab/UniAD # +# Copyright (c) OpenDriveLab. All rights reserved. # +# Modified from Fiery (https://github.com/wayveai/fiery) # +#---------------------------------------------------------------------------------# +import torch +import torch.nn as nn +import torch.nn.functional as F +from einops import rearrange +from mmcv.models.builder import LOSSES +from mmcv.models.losses.utils import weight_reduce_loss + +@LOSSES.register_module() +class FieryBinarySegmentationLoss(nn.Module): + def __init__(self, use_top_k=False, top_k_ratio=1.0, future_discount=1.0, loss_weight=1.0, ignore_index=255): + super().__init__() + self.use_top_k = use_top_k + self.top_k_ratio = top_k_ratio + self.future_discount = future_discount + self.loss_weight = loss_weight + self.ignore_index = ignore_index + + def forward(self, prediction, target, frame_mask=None): + n_gt, s, h, w = prediction.size() + assert prediction.size() == target.size(), f"{prediction.size()}, {target.size()}" + + # Deal target > 1 (ignore_index) + keep_mask = (target.long() != self.ignore_index).float() + target = target * keep_mask + + loss = F.binary_cross_entropy_with_logits( + prediction, + target.float(), + reduction='none', + ) + assert loss.size() == prediction.size(), f"{loss.size()}, {prediction.size()}" + + # Deal ignore_index + if self.ignore_index is not None: + # keep_mask = (target.long() != self.ignore_index).float() + loss = loss * keep_mask + + # Filter out losses of invalid future sample + if frame_mask is not None: + assert frame_mask.size(0) == s, f"{frame_mask.size()}" + if frame_mask.sum().item() == 0: + return prediction.sum() * 0. + frame_mask = frame_mask.view(1, s, 1, 1) + loss = loss * frame_mask.float() + + future_discounts = self.future_discount ** torch.arange(s, device=loss.device, dtype=loss.dtype) + future_discounts = future_discounts.view(1, s, 1, 1) + loss = loss * future_discounts + + loss = loss.view(n_gt, s, -1) + if self.use_top_k: + # Penalises the top-k hardest pixels + k = int(self.top_k_ratio * loss.shape[2]) + loss, _ = torch.sort(loss, dim=2, descending=True) + loss = loss[:, :, :k] + + return self.loss_weight * torch.mean(loss) + +def dice_loss(pred, + target, + weight=None, + eps=1e-3, + reduction='mean', + naive_dice=False, + avg_factor=None, + ignore_index=None, + frame_mask=None): + """Calculate dice loss, there are two forms of dice loss is supported: + + - the one proposed in `V-Net: Fully Convolutional Neural + Networks for Volumetric Medical Image Segmentation + `_. + - the dice loss in which the power of the number in the + denominator is the first power instead of the second + power. + + Args: + pred (torch.Tensor): The prediction, has a shape (n, *) + target (torch.Tensor): The learning label of the prediction, + shape (n, *), same shape of pred. + weight (torch.Tensor, optional): The weight of loss for each + prediction, has a shape (n,). Defaults to None. + eps (float): Avoid dividing by zero. Default: 1e-3. + reduction (str, optional): The method used to reduce the loss into + a scalar. Defaults to 'mean'. + Options are "none", "mean" and "sum". + naive_dice (bool, optional): If false, use the dice + loss defined in the V-Net paper, otherwise, use the + naive dice loss in which the power of the number in the + denominator is the first power instead of the second + power.Defaults to False. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + """ + n, s, h, w = pred.size() + assert pred.size() == target.size(), \ + f"{pred.size()}, {target.size()}" + + # Ignore invalid index(255) + if ignore_index is not None: + keep_mask = (target.long() != ignore_index) + target = target * keep_mask.float() + pred = pred * keep_mask.float() + + # Ignore invalid frame + if frame_mask is not None: + assert frame_mask.size(0) == s, f"{frame_mask.size()}" + if frame_mask.sum().item() == 0: + return pred.sum() * 0. + frame_mask = frame_mask.view(1, s, 1, 1) + target = target * frame_mask.float() + pred = pred * frame_mask.float() + + input = pred.flatten(1) + target = target.flatten(1).float() + + a = torch.sum(input * target, 1) + if naive_dice: + b = torch.sum(input, 1) + c = torch.sum(target, 1) + d = (2 * a + eps) / (b + c + eps) + else: + b = torch.sum(input * input, 1) + eps + c = torch.sum(target * target, 1) + eps + d = (2 * a) / (b + c) + + loss = 1 - d + if weight is not None: + assert weight.ndim == loss.ndim + assert len(weight) == len(pred) + loss = weight_reduce_loss(loss, weight, reduction, avg_factor) + return loss + +@LOSSES.register_module() +class DiceLossWithMasks(nn.Module): + def __init__(self, + use_sigmoid=True, + activate=True, + reduction='mean', + naive_dice=False, + loss_weight=1.0, + ignore_index=255, + eps=1e-3): + """Compute dice loss. + + Args: + use_sigmoid (bool, optional): Whether to the prediction is + used for sigmoid or softmax. Defaults to True. + activate (bool): Whether to activate the predictions inside, + this will disable the inside sigmoid operation. + Defaults to True. + reduction (str, optional): The method used + to reduce the loss. Options are "none", + "mean" and "sum". Defaults to 'mean'. + naive_dice (bool, optional): If false, use the dice + loss defined in the V-Net paper, otherwise, use the + naive dice loss in which the power of the number in the + denominator is the first power instead of the second + power. Defaults to False. + loss_weight (float, optional): Weight of loss. Defaults to 1.0. + eps (float): Avoid dividing by zero. Defaults to 1e-3. + """ + + super(DiceLossWithMasks, self).__init__() + self.use_sigmoid = use_sigmoid + self.reduction = reduction + self.naive_dice = naive_dice + self.loss_weight = loss_weight + self.eps = eps + self.activate = activate + self.ignore_index = ignore_index + + def forward(self, + pred, + target, + weight=None, + reduction_override=None, + avg_factor=None, + frame_mask=None + ): + """Forward function. + + Args: + pred (torch.Tensor): The prediction, has a shape (n, *). + target (torch.Tensor): The label of the prediction, + shape (n, *), same shape of pred. + weight (torch.Tensor, optional): The weight of loss for each + prediction, has a shape (n,). Defaults to None. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + reduction_override (str, optional): The reduction method used to + override the original reduction method of the loss. + Options are "none", "mean" and "sum". + + Returns: + torch.Tensor: The calculated loss + """ + + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + + if self.activate: + if self.use_sigmoid: + pred = pred.sigmoid() + else: + raise NotImplementedError + + loss = self.loss_weight * dice_loss( + pred, + target, + weight, + eps=self.eps, + reduction=reduction, + naive_dice=self.naive_dice, + avg_factor=avg_factor, + ignore_index=self.ignore_index, + frame_mask=frame_mask) + + return loss \ No newline at end of file diff --git a/mmcv/losses/planning_loss.py b/mmcv/losses/planning_loss.py new file mode 100644 index 0000000..6b6fbfc --- /dev/null +++ b/mmcv/losses/planning_loss.py @@ -0,0 +1,77 @@ +#---------------------------------------------------------------------------------# +# UniAD: Planning-oriented Autonomous Driving (https://arxiv.org/abs/2212.10156) # +# Source code: https://github.com/OpenDriveLab/UniAD # +# Copyright (c) OpenDriveLab. All rights reserved. # +#---------------------------------------------------------------------------------# + +import torch +import torch.nn as nn +import torch.nn.functional as F +from typing import Tuple +import pickle +from mmcv.models import LOSSES + + +@LOSSES.register_module() +class PlanningLoss(nn.Module): + def __init__(self, loss_type='L2'): + super(PlanningLoss, self).__init__() + self.loss_type = loss_type + + def forward(self, sdc_traj, gt_sdc_fut_traj, mask): + err = sdc_traj[..., :2] - gt_sdc_fut_traj[..., :2] + err = torch.pow(err, exponent=2) + err = torch.sum(err, dim=-1) + err = torch.pow(err, exponent=0.5) + return torch.sum(err * mask)/(torch.sum(mask) + 1e-5) + + +@LOSSES.register_module() +class CollisionLoss(nn.Module): + def __init__(self, delta=0.5, weight=1.0): + super(CollisionLoss, self).__init__() + self.w = 1.85 + delta + self.h = 4.084 + delta + self.weight = weight + + def forward(self, sdc_traj_all, sdc_planning_gt, sdc_planning_gt_mask, future_gt_bbox): + # sdc_traj_all (1, 6, 2) + # sdc_planning_gt (1,6,3) + # sdc_planning_gt_mask (1, 6) + # future_gt_bbox 6x[lidarboxinstance] + n_futures = len(future_gt_bbox) + inter_sum = sdc_traj_all.new_zeros(1, ) + dump_sdc = [] + for i in range(n_futures): + if len(future_gt_bbox[i].tensor) > 0: + future_gt_bbox_corners = future_gt_bbox[i].corners[:, [0,3,4,7], :2] # (N, 8, 3) -> (N, 4, 2) only bev + # sdc_yaw = -sdc_planning_gt[0, i, 2].to(sdc_traj_all.dtype) - 1.5708 + sdc_yaw = sdc_planning_gt[0, i, 2].to(sdc_traj_all.dtype) + sdc_bev_box = self.to_corners([sdc_traj_all[0, i, 0], sdc_traj_all[0, i, 1], self.w, self.h, sdc_yaw]) + dump_sdc.append(sdc_bev_box.cpu().detach().numpy()) + for j in range(future_gt_bbox_corners.shape[0]): + inter_sum += self.inter_bbox(sdc_bev_box, future_gt_bbox_corners[j].to(sdc_traj_all.device)) + return inter_sum * self.weight + + def inter_bbox(self, corners_a, corners_b): + xa1, ya1 = torch.max(corners_a[:, 0]), torch.max(corners_a[:, 1]) + xa2, ya2 = torch.min(corners_a[:, 0]), torch.min(corners_a[:, 1]) + xb1, yb1 = torch.max(corners_b[:, 0]), torch.max(corners_b[:, 1]) + xb2, yb2 = torch.min(corners_b[:, 0]), torch.min(corners_b[:, 1]) + + xi1, yi1 = min(xa1, xb1), min(ya1, yb1) + xi2, yi2 = max(xa2, xb2), max(ya2, yb2) + intersect = max((xi1 - xi2), xi1.new_zeros(1, ).to(xi1.device)) * max((yi1 - yi2), xi1.new_zeros(1,).to(xi1.device)) + return intersect + + def to_corners(self, bbox): + x, y, w, l, theta = bbox + corners = torch.tensor([ + [w/2, -l/2], [w/2, l/2], [-w/2, l/2], [-w/2,-l/2] + ]).to(x.device) # 4,2 + rot_mat = torch.tensor( + [[torch.cos(theta), torch.sin(theta)], + [-torch.sin(theta), torch.cos(theta)]] + ).to(x.device) + new_corners = rot_mat @ corners.T + torch.tensor(bbox[:2])[:, None].to(x.device) + return new_corners.T \ No newline at end of file diff --git a/mmcv/losses/track_loss.py b/mmcv/losses/track_loss.py new file mode 100644 index 0000000..603116f --- /dev/null +++ b/mmcv/losses/track_loss.py @@ -0,0 +1,619 @@ +#---------------------------------------------------------------------------------# +# UniAD: Planning-oriented Autonomous Driving (https://arxiv.org/abs/2212.10156) # +# Source code: https://github.com/OpenDriveLab/UniAD # +# Copyright (c) OpenDriveLab. All rights reserved. # +# Modified from MOTR (https://github.com/megvii-research/MOTR) # +#---------------------------------------------------------------------------------# + +import copy +from distutils.command.build import build +import math +from xmlrpc.client import Boolean +import numpy as np +import torch +import torch.nn.functional as F +import torch.distributed as dist +import torch.nn as nn +from typing import List +from mmcv.models.dense_heads.track_head_plugin import Instances +from mmcv.core import build_assigner +from mmcv.models import build_loss +from mmcv.models.builder import LOSSES +from mmcv.core import reduce_mean +from mmcv.core.bbox.iou_calculators.iou3d_calculator import ( + bbox_overlaps_nearest_3d as iou_3d, ) +from mmcv.core.bbox.util import denormalize_bbox + + +def is_dist_avail_and_initialized(): + if not dist.is_available(): + return False + if not dist.is_initialized(): + return False + return True + + +def get_world_size(): + if not is_dist_avail_and_initialized(): + return 1 + return dist.get_world_size() + + +@torch.no_grad() +def accuracy(output, target, topk=(1, )): + """Computes the precision@k for the specified values of k""" + if target.numel() == 0: + return [torch.zeros([], device=output.device)] + maxk = max(topk) + batch_size = target.size(0) + + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + correct = pred.eq(target.view(1, -1).expand_as(pred)) + + res = [] + for k in topk: + correct_k = correct[:k].view(-1).float().sum(0) + res.append(correct_k.mul_(100.0 / batch_size)) + return res + + +@LOSSES.register_module() +class ClipMatcher(nn.Module): + def __init__( + self, + num_classes, + weight_dict, + code_weights=[ + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2, 0.2 + ], + loss_past_traj_weight=1.0, + assigner=dict( + type="HungarianAssigner3D", + cls_cost=dict(type="FocalLossCost", weight=2.0), + reg_cost=dict(type="BBox3DL1Cost", weight=0.25), + pc_range=[-51.2, -51.2, -5.0, 51.2, 51.2, 3.0], + ), + loss_cls=dict(type="FocalLoss", + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=2.0), + loss_bbox=dict(type="L1Loss", loss_weight=0.25), + ): + """Create the criterion. + Parameters: + num_classes: number of object categories, omitting the special no-object category + weight_dict: dict containing as key the names of the losses and as values their relative weight. + eos_coef: relative classification weight applied to the no-object category + """ + super().__init__() + self.num_classes = num_classes + self.matcher = build_assigner(assigner) + self.loss_cls = build_loss(loss_cls) + self.loss_bboxes = build_loss(loss_bbox) + self.loss_predictions = nn.SmoothL1Loss(reduction="none", beta=1.0) + self.register_buffer("code_weights", + torch.tensor(code_weights, requires_grad=False)) + + self.weight_dict = weight_dict + self.loss_past_traj_weight = loss_past_traj_weight + # self.losses = ['labels', 'boxes', 'cardinality'] + self.losses = ["labels", "boxes", "past_trajs"] + self.focal_loss = True + self.losses_dict = {} + self._current_frame_idx = 0 + + def _get_src_permutation_idx(self, indices): + # permute predictions following indices + batch_idx = torch.cat( + [torch.full_like(src, i) for i, (src, _) in enumerate(indices)]) + src_idx = torch.cat([src for (src, _) in indices]) + return batch_idx, src_idx + + def _get_tgt_permutation_idx(self, indices): + # permute targets following indices + batch_idx = torch.cat( + [torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)]) + tgt_idx = torch.cat([tgt for (_, tgt) in indices]) + return batch_idx, tgt_idx + + def initialize_for_single_clip(self, gt_instances: List[Instances]): + self.gt_instances = gt_instances + self.num_samples = 0 + self.sample_device = None + self._current_frame_idx = 0 + self.losses_dict = {} + + def _step(self): + self._current_frame_idx += 1 + + def calc_loss_for_track_scores(self, track_instances: Instances): + frame_id = self._current_frame_idx - 1 + gt_instances = self.gt_instances[frame_id] + outputs = { + "pred_logits": track_instances.track_scores[None], + } + device = track_instances.track_scores.device + + num_tracks = len(track_instances) + src_idx = torch.arange(num_tracks, dtype=torch.long, device=device) + tgt_idx = (track_instances.matched_gt_idxes + ) # -1 for FP tracks and disappeared tracks + + track_losses = self.get_loss( + "labels", + outputs=outputs, + gt_instances=[gt_instances], + indices=[(src_idx, tgt_idx)], + num_boxes=1, + ) + self.losses_dict.update({ + "frame_{}_track_{}".format(frame_id, key): value + for key, value in track_losses.items() + }) + + def get_num_boxes(self, num_samples): + num_boxes = torch.as_tensor(num_samples, + dtype=torch.float, + device=self.sample_device) + if is_dist_avail_and_initialized(): + torch.distributed.all_reduce(num_boxes) + num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item() + return num_boxes + + @torch.no_grad() + def loss_cardinality(self, outputs, targets, indices): + """Compute the cardinality error, ie the absolute error in the number of predicted non-empty boxes + This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients + """ + pred_logits = outputs["pred_logits"] + device = pred_logits.device + tgt_lengths = torch.as_tensor([len(v.labels) for v in targets], + device=device) + # Count the number of predictions that are NOT "no-object" (which is the last class) + card_pred = (pred_logits.argmax(-1) != + pred_logits.shape[-1] - 1).sum(1) + card_err = F.l1_loss(card_pred.float(), tgt_lengths.float()) + losses = {"cardinality_error": card_err} + return losses + + def get_loss(self, loss, outputs, gt_instances, indices, **kwargs): + loss_map = { + "labels": self.loss_labels, + "cardinality": self.loss_cardinality, + "boxes": self.loss_boxes, + "past_trajs": self.loss_past_trajs, + } + assert loss in loss_map, f"do you really want to compute {loss} loss?" + return loss_map[loss](outputs, gt_instances, indices, **kwargs) + + def loss_past_trajs(self, outputs, gt_instances: List[Instances], + indices: List[tuple]): + # We ignore the regression loss of the track-disappear slots. + # TODO: Make this filter process more elegant. + filtered_idx = [] + for src_per_img, tgt_per_img in indices: + keep = tgt_per_img != -1 + filtered_idx.append((src_per_img[keep], tgt_per_img[keep])) + indices = filtered_idx + idx = self._get_src_permutation_idx(indices) + src_trajs = outputs["pred_past_trajs"][idx] + target_trajs = torch.cat( + [ + gt_per_img.past_traj[i] + for gt_per_img, (_, i) in zip(gt_instances, indices) + ], + dim=0, + ) + target_trajs_mask = torch.cat( + [ + gt_per_img.past_traj_mask[i] + for gt_per_img, (_, i) in zip(gt_instances, indices) + ], + dim=0, + ) + + # for pad target, don't calculate regression loss, judged by whether obj_id=-1 + target_obj_ids = torch.cat( + [ + gt_per_img.obj_ids[i] + for gt_per_img, (_, i) in zip(gt_instances, indices) + ], + dim=0, + ) # size(16) + # [num_matched] + mask = target_obj_ids != -1 + loss_trajs = self.compute_past_traj_loss(src_trajs[mask], target_trajs[mask], target_trajs_mask[mask]) + losses = {} + losses["loss_past_trajs"] = loss_trajs * self.loss_past_traj_weight + return losses + + def compute_past_traj_loss(self, src, tgt, tgt_mask): + loss = torch.abs(src - tgt) * tgt_mask + return torch.sum(loss)/ (torch.sum(tgt_mask>0) + 1e-5) + + def loss_boxes(self, outputs, gt_instances: List[Instances], + indices: List[tuple]): + """Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss + targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4] + The target boxes are expected in format (center_x, center_y, h, w), normalized by the image size. + """ + # We ignore the regression loss of the track-disappear slots. + # TODO: Make this filter process more elegant. + filtered_idx = [] + for src_per_img, tgt_per_img in indices: + keep = tgt_per_img != -1 + filtered_idx.append((src_per_img[keep], tgt_per_img[keep])) + indices = filtered_idx + idx = self._get_src_permutation_idx(indices) + src_boxes = outputs["pred_boxes"][idx] + sdc_boxes = outputs["pred_sdc_boxes"][0, -1:] + target_sdc_boxes = gt_instances[0].sdc_boxes[:1] + target_boxes = torch.cat( + [ + gt_per_img.boxes[i] + for gt_per_img, (_, i) in zip(gt_instances, indices) + ], + dim=0, + ) + + src_boxes = torch.cat([src_boxes, sdc_boxes], dim=0) + target_boxes = torch.cat([target_boxes, target_sdc_boxes], dim=0) + + # for pad target, don't calculate regression loss, judged by whether obj_id=-1 + target_obj_ids = torch.cat( + [ + gt_per_img.obj_ids[i] + for gt_per_img, (_, i) in zip(gt_instances, indices) + ], + dim=0, + ) + # [num_matched] + + target_obj_ids = torch.cat([target_obj_ids, torch.zeros(1).to(target_obj_ids.device)], dim=0) + mask = target_obj_ids != -1 + bbox_weights = torch.ones_like(target_boxes) * self.code_weights + avg_factor = src_boxes[mask].size(0) + avg_factor = reduce_mean(target_boxes.new_tensor([avg_factor])) + loss_bbox = self.loss_bboxes( + src_boxes[mask], + target_boxes[mask], + bbox_weights[mask], + avg_factor=avg_factor.item(), + ) + + losses = {} + losses["loss_bbox"] = loss_bbox + + return losses + + def loss_labels(self, + outputs, + gt_instances: List[Instances], + indices, + log=False): + """Classification loss (NLL) + targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes] + + indices: [(src_idx, tgt_idx)] + """ + # [bs=1, num_query, num_classes] + src_logits = outputs["pred_logits"] + sdc_logits = outputs["pred_sdc_logits"] + # batch_idx, src_idx + idx = self._get_src_permutation_idx(indices) + # [bs, num_query] + target_classes = torch.full( + src_logits.shape[:2], + self.num_classes, + dtype=torch.int64, + device=src_logits.device, + ) + # The matched gt for disappear track query is set -1. + labels = [] + for gt_per_img, (_, J) in zip(gt_instances, indices): + labels_per_img = torch.ones_like(J) * self.num_classes + # set labels of track-appear slots to num_classes + if len(gt_per_img) > 0: + labels_per_img[J != -1] = gt_per_img.labels[J[J != -1]] + labels.append(labels_per_img) + # [num_matched] + target_classes_o = torch.cat(labels) + # [bs, num_query] + target_classes[idx] = target_classes_o + target_sdc_classes = gt_instances[0].sdc_labels[0:1].unsqueeze(0) + if sdc_logits is not None: + src_logits = torch.cat([src_logits, sdc_logits], dim=1) + target_classes = torch.cat([target_classes, target_sdc_classes], dim=1) + label_weights = torch.ones_like(target_classes) + # float tensor + avg_factor = target_classes_o.numel( + ) # pos + mathced gt for disapper track + avg_factor += 1 # sdc + + avg_factor = reduce_mean(src_logits.new_tensor([avg_factor])) + loss_ce = self.loss_cls( + src_logits.flatten(0, 1), + target_classes.flatten(0), + label_weights.flatten(0), + avg_factor, + ) + + losses = {"loss_cls": loss_ce} + + if log: + # TODO this should probably be a separate loss, not hacked in this one here + losses["class_error"] = 100 - accuracy(src_logits[idx], + target_classes_o)[0] + + return losses + + def match_for_single_frame(self, + outputs: dict, + dec_lvl: int, + if_step=False, + ): + outputs_without_aux = { + k: v + for k, v in outputs.items() if k != "aux_outputs" + } + + gt_instances_i = self.gt_instances[ + self._current_frame_idx] # gt instances of i-th image. + track_instances: Instances = outputs_without_aux["track_instances"] + pred_logits_i = track_instances.pred_logits + pred_boxes_i = track_instances.pred_boxes + # modified the hard code, 900:901, sdc query + pred_sdc_logits_i = track_instances.pred_logits[900:901].unsqueeze(0) + pred_sdc_boxes_i = track_instances.pred_boxes[900:901].unsqueeze(0) + # -2 means the sdc query in this code + track_instances.obj_idxes[900]=-2 + pred_past_trajs_i = track_instances.pred_past_trajs # predicted past trajs of i-th image. + + obj_idxes = gt_instances_i.obj_ids + obj_idxes_list = obj_idxes.detach().cpu().numpy().tolist() + obj_idx_to_gt_idx = { + obj_idx: gt_idx + for gt_idx, obj_idx in enumerate(obj_idxes_list) + } + outputs_i = { + "pred_logits": pred_logits_i.unsqueeze(0), + "pred_sdc_logits": pred_sdc_logits_i, + "pred_boxes": pred_boxes_i.unsqueeze(0), + "pred_sdc_boxes": pred_sdc_boxes_i, + "pred_past_trajs": pred_past_trajs_i.unsqueeze(0), + } + # step1. inherit and update the previous tracks. + num_disappear_track = 0 + for j in range(len(track_instances)): + obj_id = track_instances.obj_idxes[j].item() + # set new target idx. + if obj_id >= 0: + if obj_id in obj_idx_to_gt_idx: + track_instances.matched_gt_idxes[j] = obj_idx_to_gt_idx[ + obj_id] + else: + num_disappear_track += 1 + track_instances.matched_gt_idxes[ + j] = -1 # track-disappear case. + else: + track_instances.matched_gt_idxes[j] = -1 + + full_track_idxes = torch.arange( + len(track_instances), dtype=torch.long).to(pred_logits_i.device) + # previsouly tracked, which is matched by rule + matched_track_idxes = track_instances.obj_idxes >= 0 + prev_matched_indices = torch.stack( + [ + full_track_idxes[matched_track_idxes], + track_instances.matched_gt_idxes[matched_track_idxes], + ], + dim=1, + ).to(pred_logits_i.device) + + # step2. select the unmatched slots. + # note that the FP tracks whose obj_idxes are -2 will not be selected here. + unmatched_track_idxes = full_track_idxes[track_instances.obj_idxes == + -1] + + # step3. select the untracked gt instances (new tracks). + tgt_indexes = track_instances.matched_gt_idxes + tgt_indexes = tgt_indexes[tgt_indexes != -1] + + tgt_state = torch.zeros(len(gt_instances_i)).to(pred_logits_i.device) + tgt_state[tgt_indexes] = 1 + # new tgt indexes + untracked_tgt_indexes = torch.arange(len(gt_instances_i)).to( + pred_logits_i.device)[tgt_state == 0] + # untracked_tgt_indexes = select_unmatched_indexes(tgt_indexes, len(gt_instances_i)) + # [num_untracked] + untracked_gt_instances = gt_instances_i[untracked_tgt_indexes] + + def match_for_single_decoder_layer(unmatched_outputs, matcher): + bbox_preds, cls_preds = ( + unmatched_outputs["pred_boxes"], + unmatched_outputs["pred_logits"], + ) + bs, num_querys = bbox_preds.shape[:2] + # Also concat the target labels and boxes + targets = [untracked_gt_instances] + if isinstance(targets[0], Instances): + # [num_box], [num_box, 9] (un-normalized bboxes) + gt_labels = torch.cat( + [gt_per_img.labels for gt_per_img in targets]) + gt_bboxes = torch.cat( + [gt_per_img.boxes for gt_per_img in targets]) + else: + gt_labels = torch.cat([v["labels"] for v in targets]) + gt_bboxes = torch.cat([v["boxes"] for v in targets]) + + bbox_pred = bbox_preds[0] + cls_pred = cls_preds[0] + + src_idx, tgt_idx = matcher.assign(bbox_pred, cls_pred, gt_bboxes, + gt_labels) + if src_idx is None: + return None + # concat src and tgt. + new_matched_indices = torch.stack([ + unmatched_track_idxes[src_idx], untracked_tgt_indexes[tgt_idx] + ], + dim=1).to(pred_logits_i.device) + return new_matched_indices + + # step4. do matching between the unmatched slots and GTs. + unmatched_outputs = { + # [bs, num_pred, num_classes] + "pred_logits": + track_instances.pred_logits[unmatched_track_idxes].unsqueeze(0), + # [bs, num_pred, box_dim] + "pred_boxes": + track_instances.pred_boxes[unmatched_track_idxes].unsqueeze(0), + } + # [num_new_matched, 2] + new_matched_indices = match_for_single_decoder_layer( + unmatched_outputs, self.matcher) + + # step5. update obj_idxes according to the new matching result. + if new_matched_indices is not None: + track_instances.obj_idxes[ + new_matched_indices[:, 0]] = gt_instances_i.obj_ids[ + new_matched_indices[:, 1]].long() + track_instances.matched_gt_idxes[ + new_matched_indices[:, 0]] = new_matched_indices[:, 1] + + # step6. calculate iou3d. + active_idxes = (track_instances.obj_idxes >= + 0) & (track_instances.matched_gt_idxes >= 0) + active_track_boxes = track_instances.pred_boxes[active_idxes] + with torch.no_grad(): + if len(active_track_boxes) > 0: + gt_boxes = gt_instances_i.boxes[ + track_instances.matched_gt_idxes[active_idxes]] + iou_3ds = iou_3d( + denormalize_bbox(gt_boxes, None)[..., :7], + denormalize_bbox(active_track_boxes, None)[..., :7], + ) + track_instances.iou[active_idxes] = torch.tensor([ + iou_3ds[i, i] for i in range(gt_boxes.shape[0]) + ]).to(gt_boxes.device) + + # step7. merge the unmatched pairs and the matched pairs. + # [num_new_macthed + num_prev_mathed, 2] + matched_indices = torch.cat( + [new_matched_indices, prev_matched_indices], dim=0) + else: + matched_indices = prev_matched_indices + # step8. calculate losses. + self.num_samples += len(gt_instances_i) + num_disappear_track + self.sample_device = pred_logits_i.device + + for loss in self.losses: + new_track_loss = self.get_loss( + loss, + outputs=outputs_i, + gt_instances=[gt_instances_i], + indices=[(matched_indices[:, 0], matched_indices[:, 1])], + ) + self.losses_dict.update({ + "frame_{}_{}_{}".format(self._current_frame_idx, key, dec_lvl): + value + for key, value in new_track_loss.items() + }) + if "aux_outputs" in outputs: + for i, aux_outputs in enumerate(outputs["aux_outputs"]): + unmatched_outputs_layer = { + "pred_logits": + aux_outputs["pred_logits"][ + 0, unmatched_track_idxes].unsqueeze(0), + "pred_boxes": + aux_outputs["pred_boxes"][ + 0, unmatched_track_idxes].unsqueeze(0), + } + new_matched_indices_layer = match_for_single_decoder_layer( + unmatched_outputs_layer, self.matcher) + matched_indices_layer = torch.cat( + [new_matched_indices_layer, prev_matched_indices], dim=0) + for loss in self.losses: + if loss == "masks": + # Intermediate masks losses are too costly to compute, we ignore them. + continue + l_dict = self.get_loss( + loss, + aux_outputs, + gt_instances=[gt_instances_i], + indices=[(matched_indices_layer[:, 0], + matched_indices_layer[:, 1])], + ) + self.losses_dict.update({ + "frame_{}_aux{}_{}".format(self._current_frame_idx, i, + key): value + for key, value in l_dict.items() + }) + if if_step: + self._step() + return track_instances, matched_indices + + def forward(self, outputs, input_data: dict): + # losses of each frame are calculated during the model's forwarding and are outputted by the model as outputs['losses_dict]. + losses = outputs.pop("losses_dict") + num_samples = self.get_num_boxes(self.num_samples) + for loss_name, loss in losses.items(): + losses[loss_name] /= num_samples + return losses + + def prediction_loss(self, track_instances, predictions): + + decay_ratio = 1.0 + for i in range(self._current_frame_idx, len(self.gt_instances)): + gt_instances_i = self.gt_instances[ + i] # gt instances of i-th image. + + pred_boxes_i = predictions[i - self._current_frame_idx] + + obj_idxes = gt_instances_i.obj_ids + obj_idxes_list = obj_idxes.detach().cpu().numpy().tolist() + obj_idx_to_gt_idx = { + obj_idx: gt_idx + for gt_idx, obj_idx in enumerate(obj_idxes_list) + } + + num_paired = 0 + for j in range(len(track_instances)): + obj_id = track_instances.obj_idxes[j].item() + # set new target idx. + if obj_id >= 0: + if obj_id in obj_idx_to_gt_idx: + track_instances.matched_gt_idxes[ + j] = obj_idx_to_gt_idx[obj_id] + num_paired += 1 + else: + track_instances.matched_gt_idxes[ + j] = -1 # track-disappear case. + else: + track_instances.matched_gt_idxes[j] = -1 + + if num_paired > 0: + if_paired_i = track_instances.matched_gt_idxes >= 0 + + paired_pred_boxes_i = pred_boxes_i[if_paired_i] + + paired_gt_instances = gt_instances_i[ + track_instances.matched_gt_idxes[if_paired_i]] + normalized_bboxes = paired_gt_instances.boxes + cx = normalized_bboxes[..., 0:1] + cy = normalized_bboxes[..., 1:2] + cz = normalized_bboxes[..., 4:5] + + gt_boxes_i = torch.cat([cx, cy, cz], dim=-1) + + pred_loss_i = (0.2 * decay_ratio * self.loss_predictions( + paired_pred_boxes_i, gt_boxes_i).sum(dim=-1).mean()) + + self.losses_dict["pred_loss_{}".format(i)] = pred_loss_i + else: + self.losses_dict["pred_loss_{}".format(i)] = torch.tensor( + [0.0]).cuda() + + decay_ratio = decay_ratio * 0.5 \ No newline at end of file diff --git a/mmcv/losses/traj_loss.py b/mmcv/losses/traj_loss.py new file mode 100644 index 0000000..727d291 --- /dev/null +++ b/mmcv/losses/traj_loss.py @@ -0,0 +1,233 @@ +#---------------------------------------------------------------------------------# +# UniAD: Planning-oriented Autonomous Driving (https://arxiv.org/abs/2212.10156) # +# Source code: https://github.com/OpenDriveLab/UniAD # +# Copyright (c) OpenDriveLab. All rights reserved. # +#---------------------------------------------------------------------------------# + +import torch +import math +import torch.nn as nn +import torch.nn.functional as F +from typing import Tuple + +from mmcv.models import LOSSES + +@LOSSES.register_module() +class TrajLoss(nn.Module): + """ + MTP loss modified to include variances. Uses MSE for mode selection. + Can also be used with + Multipath outputs, with residuals added to anchors. + """ + + def __init__(self, use_variance=False, cls_loss_weight=1., nll_loss_weight=1., loss_weight_minade=0., loss_weight_minfde=1., loss_weight_mr=1.): + """ + Initialize MTP loss + :param args: Dictionary with the following (optional) keys + use_variance: bool, whether or not to use variances for computing + regression component of loss, + default: False + alpha: float, relative weight assigned to classification component, + compared to regression component + of loss, default: 1 + """ + super(TrajLoss, self).__init__() + self.use_variance = use_variance + self.cls_loss_weight = cls_loss_weight + self.nll_loss_weight = nll_loss_weight + self.loss_weight_minade = loss_weight_minade + self.loss_weight_minfde = loss_weight_minfde + + def forward(self, + traj_prob, + traj_preds, + gt_future_traj, + gt_future_traj_valid_mask): + """ + Compute MTP loss + :param predictions: Dictionary with 'traj': predicted trajectories + and 'probs': mode (log) probabilities + :param ground_truth: Either a tensor with ground truth trajectories + or a dictionary + :return: + """ + # Unpack arguments + traj = traj_preds # (b, nmodes, seq, 5) + log_probs = traj_prob + traj_gt = gt_future_traj + + # Useful variables + batch_size = traj.shape[0] + sequence_length = traj.shape[2] + pred_params = 5 if self.use_variance else 2 + + # Masks for variable length ground truth trajectories + masks = 1 - gt_future_traj_valid_mask.to(traj.dtype) + + l_minfde, inds = min_fde(traj, traj_gt, masks) + try: + l_mr = miss_rate(traj, traj_gt, masks) + except: + l_mr = torch.zeros_like(l_minfde) + l_minade, inds = min_ade(traj, traj_gt, masks) + inds_rep = inds.repeat( + sequence_length, + pred_params, 1, 1).permute(3, 2, 0, 1) + + # Calculate MSE or NLL loss for trajectories corresponding to selected + # outputs: + traj_best = traj.gather(1, inds_rep).squeeze(dim=1) + + if self.use_variance: + l_reg = traj_nll(traj_best, traj_gt, masks) + else: + l_reg = l_minade + + # Compute classification loss + l_class = - torch.squeeze(log_probs.gather(1, inds.unsqueeze(1))) + + l_reg = torch.sum(l_reg)/(batch_size + 1e-5) + l_class = torch.sum(l_class)/(batch_size + 1e-5) + l_minade = torch.sum(l_minade)/(batch_size + 1e-5) + l_minfde = torch.sum(l_minfde)/(batch_size + 1e-5) + + loss = l_class * self.cls_loss_weight + l_reg * self.nll_loss_weight + l_minade * self.loss_weight_minade + l_minfde * self.loss_weight_minfde + return loss, l_class, l_reg, l_minade, l_minfde, l_mr + +def min_ade(traj: torch.Tensor, traj_gt: torch.Tensor, + masks: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Computes average displacement error for the best trajectory is a set, + with respect to ground truth + :param traj: predictions, shape [batch_size, num_modes, sequence_length, 2] + :param traj_gt: ground truth trajectory, shape + [batch_size, sequence_length, 2] + :param masks: masks for varying length ground truth, shape + [batch_size, sequence_length] + :return errs, inds: errors and indices for modes with min error, shape + [batch_size] + """ + num_modes = traj.shape[1] + traj_gt_rpt = traj_gt.unsqueeze(1).repeat(1, num_modes, 1, 1) + masks_rpt = masks.unsqueeze(1).repeat(1, num_modes, 1) + err = traj_gt_rpt - traj[:, :, :, 0:2] + err = torch.pow(err, exponent=2) + err = torch.sum(err, dim=3) + err = torch.pow(err, exponent=0.5) + err = torch.sum(err * (1 - masks_rpt), dim=2) / \ + torch.clip(torch.sum((1 - masks_rpt), dim=2), min=1) + err, inds = torch.min(err, dim=1) + + return err, inds + +def traj_nll( + pred_dist: torch.Tensor, + traj_gt: torch.Tensor, + masks: torch.Tensor): + """ + Computes negative log likelihood of ground truth trajectory under a + predictive distribution with a single mode, + with a bivariate Gaussian distribution predicted at each time in the + prediction horizon + + :param pred_dist: parameters of a bivariate Gaussian distribution, + shape [batch_size, sequence_length, 5] + :param traj_gt: ground truth trajectory, + shape [batch_size, sequence_length, 2] + :param masks: masks for varying length ground truth, + shape [batch_size, sequence_length] + :return: + """ + mu_x = pred_dist[:, :, 0] + mu_y = pred_dist[:, :, 1] + x = traj_gt[:, :, 0] + y = traj_gt[:, :, 1] + + sig_x = pred_dist[:, :, 2] + sig_y = pred_dist[:, :, 3] + rho = pred_dist[:, :, 4] + ohr = torch.pow(1 - torch.pow(rho, 2), -0.5) + + nll = 0.5 * torch.pow(ohr, 2) * \ + (torch.pow(sig_x, 2) * torch.pow(x - mu_x, 2) + torch.pow(sig_y, 2) * + torch.pow(y - mu_y, 2) - 2 * rho * torch.pow(sig_x, 1) * + torch.pow(sig_y, 1) * (x - mu_x) * (y - mu_y)) - \ + torch.log(sig_x * sig_y * ohr) + 1.8379 + + nll[nll.isnan()] = 0 + nll[nll.isinf()] = 0 + + nll = torch.sum(nll * (1 - masks), dim=1) / (torch.sum((1 - masks), dim=1) + 1e-5) + # Note: Normalizing with torch.sum((1 - masks), dim=1) makes values + # somewhat comparable for trajectories of + # different lengths + + return nll + +def min_fde(traj: torch.Tensor, traj_gt: torch.Tensor, + masks: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Computes final displacement error for the best trajectory is a set, + with respect to ground truth + :param traj: predictions, shape [batch_size, num_modes, sequence_length, 2] + :param traj_gt: ground truth trajectory, shape + [batch_size, sequence_length, 2] + :param masks: masks for varying length ground truth, shape + [batch_size, sequence_length] + :return errs, inds: errors and indices for modes with min error, + shape [batch_size] + """ + num_modes = traj.shape[1] + lengths = torch.sum(1 - masks, dim=1).long() + valid_mask = lengths > 0 + traj = traj[valid_mask] + traj_gt = traj_gt[valid_mask] + masks = masks[valid_mask] + traj_gt_rpt = traj_gt.unsqueeze(1).repeat(1, num_modes, 1, 1) + lengths = torch.sum(1 - masks, dim=1).long() + inds = lengths.unsqueeze(1).unsqueeze( + 2).unsqueeze(3).repeat(1, num_modes, 1, 2) - 1 + + traj_last = torch.gather(traj[..., :2], dim=2, index=inds).squeeze(2) + traj_gt_last = torch.gather(traj_gt_rpt, dim=2, index=inds).squeeze(2) + + err = traj_gt_last - traj_last[..., 0:2] + err = torch.pow(err, exponent=2) + err = torch.sum(err, dim=2) + err = torch.pow(err, exponent=0.5) + err, inds = torch.min(err, dim=1) + + return err, inds + + +def miss_rate( + traj: torch.Tensor, + traj_gt: torch.Tensor, + masks: torch.Tensor, + dist_thresh: float = 2) -> torch.Tensor: + """ + Computes miss rate for mini batch of trajectories, + with respect to ground truth and given distance threshold + :param traj: predictions, shape [batch_size, num_modes, sequence_length, 2] + :param traj_gt: ground truth trajectory, + shape [batch_size, sequence_length, 2] + :param masks: masks for varying length ground truth, + shape [batch_size, sequence_length] + :param dist_thresh: distance threshold for computing miss rate. + :return errs, inds: errors and indices for modes with min error, + shape [batch_size] + """ + num_modes = traj.shape[1] + + traj_gt_rpt = traj_gt.unsqueeze(1).repeat(1, num_modes, 1, 1) + masks_rpt = masks.unsqueeze(1).repeat(1, num_modes, 1) + dist = traj_gt_rpt - traj[:, :, :, 0:2] + dist = torch.pow(dist, exponent=2) + dist = torch.sum(dist, dim=3) + dist = torch.pow(dist, exponent=0.5) + dist[masks_rpt.bool()] = -math.inf + dist, _ = torch.max(dist, dim=2) + dist, _ = torch.min(dist, dim=1) + m_r = torch.sum(torch.as_tensor(dist > dist_thresh)) / len(dist) + + return m_r diff --git a/mmcv/metrics/classification.py b/mmcv/metrics/classification.py new file mode 100644 index 0000000..9818c2a --- /dev/null +++ b/mmcv/metrics/classification.py @@ -0,0 +1,178 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from functools import wraps +from typing import Callable, Optional, Sequence, Tuple + +import torch +from .utils import get_num_classes as __gnc +from .utils import to_categorical as __tc +from .distributed import rank_zero_warn + + +def to_categorical(tensor: torch.Tensor, argmax_dim: int = 1) -> torch.Tensor: + """ + Converts a tensor of probabilities to a dense label tensor + + .. warning :: Deprecated in favor of :func:`~mmcv.pytorch_lightning.metrics.utils.to_categorical` + + """ + rank_zero_warn( + "This `to_categorical` was deprecated in v1.1.0 in favor of" + " `from mmcv.pytorch_lightning.metrics.utils import to_categorical`." + " It will be removed in v1.3.0", DeprecationWarning + ) + return __tc(tensor) + + +def get_num_classes( + pred: torch.Tensor, + target: torch.Tensor, + num_classes: Optional[int] = None, +) -> int: + """ + Calculates the number of classes for a given prediction and target tensor. + + .. warning :: Deprecated in favor of :func:`~mmcv.pytorch_lightning.metrics.utils.get_num_classes` + + """ + rank_zero_warn( + "This `get_num_classes` was deprecated in v1.1.0 in favor of" + " `from mmcv.pytorch_lightning.metrics.utils import get_num_classes`." + " It will be removed in v1.3.0", DeprecationWarning + ) + return __gnc(pred, target, num_classes) + + +def stat_scores( + pred: torch.Tensor, + target: torch.Tensor, + class_index: int, + argmax_dim: int = 1, +) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Calculates the number of true positive, false positive, true negative + and false negative for a specific class + + Args: + pred: prediction tensor + target: target tensor + class_index: class to calculate over + argmax_dim: if pred is a tensor of probabilities, this indicates the + axis the argmax transformation will be applied over + + Return: + True Positive, False Positive, True Negative, False Negative, Support + + Example: + + >>> x = torch.tensor([1, 2, 3]) + >>> y = torch.tensor([0, 2, 3]) + >>> tp, fp, tn, fn, sup = stat_scores(x, y, class_index=1) + >>> tp, fp, tn, fn, sup + (tensor(0), tensor(1), tensor(2), tensor(0), tensor(0)) + + """ + if pred.ndim == target.ndim + 1: + pred = to_categorical(pred, argmax_dim=argmax_dim) + + tp = ((pred == class_index) * (target == class_index)).to(torch.long).sum() + fp = ((pred == class_index) * (target != class_index)).to(torch.long).sum() + tn = ((pred != class_index) * (target != class_index)).to(torch.long).sum() + fn = ((pred != class_index) * (target == class_index)).to(torch.long).sum() + sup = (target == class_index).to(torch.long).sum() + + return tp, fp, tn, fn, sup + + +# todo: remove in 1.4 +def stat_scores_multiple_classes( + pred: torch.Tensor, + target: torch.Tensor, + num_classes: Optional[int] = None, + argmax_dim: int = 1, + reduction: str = 'none', +) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Calculates the number of true positive, false positive, true negative + and false negative for each class + + .. warning :: Deprecated in favor of :func:`~mmcv.pytorch_lightning.metrics.functional.stat_scores` + + Raises: + ValueError: + If ``reduction`` is not one of ``"none"``, ``"sum"`` or ``"elementwise_mean"``. + """ + + rank_zero_warn( + "This `stat_scores_multiple_classes` was deprecated in v1.2.0 in favor of" + " `from mmcv.pytorch_lightning.metrics.functional import stat_scores`." + " It will be removed in v1.4.0", DeprecationWarning + ) + if pred.ndim == target.ndim + 1: + pred = to_categorical(pred, argmax_dim=argmax_dim) + + num_classes = get_num_classes(pred=pred, target=target, num_classes=num_classes) + + if pred.dtype != torch.bool: + pred = pred.clamp_max(max=num_classes) + if target.dtype != torch.bool: + target = target.clamp_max(max=num_classes) + + possible_reductions = ('none', 'sum', 'elementwise_mean') + if reduction not in possible_reductions: + raise ValueError("reduction type %s not supported" % reduction) + + if reduction == 'none': + pred = pred.view((-1, )).long() + target = target.view((-1, )).long() + + tps = torch.zeros((num_classes + 1, ), device=pred.device) + fps = torch.zeros((num_classes + 1, ), device=pred.device) + fns = torch.zeros((num_classes + 1, ), device=pred.device) + sups = torch.zeros((num_classes + 1, ), device=pred.device) + + match_true = (pred == target).float() + match_false = 1 - match_true + + tps.scatter_add_(0, pred, match_true) + fps.scatter_add_(0, pred, match_false) + fns.scatter_add_(0, target, match_false) + tns = pred.size(0) - (tps + fps + fns) + sups.scatter_add_(0, target, torch.ones_like(match_true)) + + tps = tps[:num_classes] + fps = fps[:num_classes] + tns = tns[:num_classes] + fns = fns[:num_classes] + sups = sups[:num_classes] + + elif reduction == 'sum' or reduction == 'elementwise_mean': + count_match_true = (pred == target).sum().float() + oob_tp, oob_fp, oob_tn, oob_fn, oob_sup = stat_scores(pred, target, num_classes, argmax_dim) + + tps = count_match_true - oob_tp + fps = pred.nelement() - count_match_true - oob_fp + fns = pred.nelement() - count_match_true - oob_fn + tns = pred.nelement() * (num_classes + 1) - (tps + fps + fns + oob_tn) + sups = pred.nelement() - oob_sup.float() + + if reduction == 'elementwise_mean': + tps /= num_classes + fps /= num_classes + fns /= num_classes + tns /= num_classes + sups /= num_classes + + return tps.float(), fps.float(), tns.float(), fns.float(), sups.float() + diff --git a/mmcv/metrics/compositional.py b/mmcv/metrics/compositional.py new file mode 100644 index 0000000..124e6a5 --- /dev/null +++ b/mmcv/metrics/compositional.py @@ -0,0 +1,40 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Callable, Union + +import torch +from torchmetrics.metric import CompositionalMetric as _CompositionalMetric + +from .metric import Metric +from .distributed import rank_zero_warn + + +class CompositionalMetric(_CompositionalMetric): + r""" + This implementation refers to :class:`~torchmetrics.metric.CompositionalMetric`. + + .. warning:: This metric is deprecated, use ``torchmetrics.metric.CompositionalMetric``. Will be removed in v1.5.0. + """ + + def __init__( + self, + operator: Callable, + metric_a: Union[Metric, int, float, torch.Tensor], + metric_b: Union[Metric, int, float, torch.Tensor, None], + ): + rank_zero_warn( + "This `Metric` was deprecated since v1.3.0 in favor of `torchmetrics.Metric`." + " It will be removed in v1.5.0", DeprecationWarning + ) + super().__init__(operator=operator, metric_a=metric_a, metric_b=metric_b) diff --git a/mmcv/metrics/distributed.py b/mmcv/metrics/distributed.py new file mode 100644 index 0000000..9e47af2 --- /dev/null +++ b/mmcv/metrics/distributed.py @@ -0,0 +1,214 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import os +import warnings +from functools import wraps +from typing import Any, Optional, Union + +import torch + +log = logging.getLogger(__name__) + +if torch.distributed.is_available(): + from torch.distributed import group, ReduceOp + +else: + + class ReduceOp: + SUM = None + + class group: + WORLD = None + + +def rank_zero_only(fn): + + @wraps(fn) + def wrapped_fn(*args, **kwargs): + if rank_zero_only.rank == 0: + return fn(*args, **kwargs) + + return wrapped_fn + + +# add the attribute to the function but don't overwrite in case Trainer has already set it +rank_zero_only.rank = getattr(rank_zero_only, 'rank', int(os.environ.get('LOCAL_RANK', 0))) + + +def _warn(*args, **kwargs): + warnings.warn(*args, **kwargs) + + +def _info(*args, **kwargs): + log.info(*args, **kwargs) + + +def _debug(*args, **kwargs): + log.debug(*args, **kwargs) + + +rank_zero_debug = rank_zero_only(_debug) +rank_zero_info = rank_zero_only(_info) +rank_zero_warn = rank_zero_only(_warn) + + +def find_free_network_port() -> int: + """ + Finds a free port on localhost. + It is useful in single-node training when we don't want to connect to a real master node but + have to set the `MASTER_PORT` environment variable. + """ + import socket + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.bind(("", 0)) + s.listen(1) + port = s.getsockname()[1] + s.close() + return port + + +def gather_all_tensors(result: Union[torch.Tensor], group: Optional[Any] = None): + """ + Function to gather all tensors from several ddp processes onto a list that + is broadcasted to all processes + + Args: + result: the value to sync + group: the process group to gather results from. Defaults to all processes (world) + + Return: + gathered_result: list with size equal to the process group where + gathered_result[i] corresponds to result tensor from process i + """ + if group is None: + group = torch.distributed.group.WORLD + + # convert tensors to contiguous format + result = result.contiguous() + + world_size = torch.distributed.get_world_size(group) + + gathered_result = [torch.zeros_like(result) for _ in range(world_size)] + + # sync and broadcast all + torch.distributed.barrier(group=group) + torch.distributed.all_gather(gathered_result, result, group) + + return gathered_result + + +def sync_ddp_if_available( + result: Union[torch.Tensor], + group: Optional[Any] = None, + reduce_op: Optional[Union[ReduceOp, str]] = None +) -> torch.Tensor: + """ + Function to reduce a tensor across worker processes during distributed training + Args: + result: the value to sync and reduce (typically tensor or number) + group: the process group to gather results from. Defaults to all processes (world) + reduce_op: the reduction operation. Defaults to sum. + Can also be a string of 'avg', 'mean' to calculate the mean during reduction. + + Return: + reduced value + """ + if torch.distributed.is_available() and torch.distributed.is_initialized(): + return sync_ddp(result, group=group, reduce_op=reduce_op) + return result + + +def sync_ddp( + result: Union[torch.Tensor], + group: Optional[Any] = None, + reduce_op: Optional[Union[ReduceOp, str]] = None +) -> torch.Tensor: + """ + Function to reduce the tensors from several ddp processes to one master process + + Args: + result: the value to sync and reduce (typically tensor or number) + group: the process group to gather results from. Defaults to all processes (world) + reduce_op: the reduction operation. Defaults to sum. + Can also be a string of 'avg', 'mean' to calculate the mean during reduction. + + Return: + reduced value + """ + divide_by_world_size = False + + if group is None: + group = torch.distributed.group.WORLD + + op = reduce_op if isinstance(reduce_op, ReduceOp) else ReduceOp.SUM + + if isinstance(reduce_op, str) and reduce_op.lower() in ("avg", "mean"): + divide_by_world_size = True + + # sync all processes before reduction + torch.distributed.barrier(group=group) + torch.distributed.all_reduce(result, op=op, group=group, async_op=False) + + if divide_by_world_size: + result = result / torch.distributed.get_world_size(group) + + return result + + +class AllGatherGrad(torch.autograd.Function): + + @staticmethod + def forward(ctx, tensor, group=group.WORLD): + ctx.group = group + + gathered_tensor = [torch.zeros_like(tensor) for _ in range(torch.distributed.get_world_size())] + + torch.distributed.all_gather(gathered_tensor, tensor, group=group) + gathered_tensor = torch.stack(gathered_tensor, dim=0) + + return gathered_tensor + + @staticmethod + def backward(ctx, *grad_output): + grad_output = torch.cat(grad_output) + + torch.distributed.all_reduce(grad_output, op=torch.distributed.ReduceOp.SUM, async_op=False, group=ctx.group) + + return grad_output[torch.distributed.get_rank()] + + +def all_gather_ddp_if_available( + tensor: Union[torch.Tensor], group: Optional[Any] = None, sync_grads: bool = False +) -> torch.Tensor: + """ + Function to gather a tensor from several distributed processes + + Args: + tensor: tensor of shape (batch, ...) + group: the process group to gather results from. Defaults to all processes (world) + sync_grads: flag that allows users to synchronize gradients for all_gather op + + Return: + A tensor of shape (world_size, batch, ...) + """ + group = group if group is not None else torch.distributed.group.WORLD + if torch.distributed.is_available() and torch.distributed.is_initialized(): + if sync_grads: + return AllGatherGrad.apply(tensor, group) + else: + with torch.no_grad(): + return AllGatherGrad.apply(tensor, group) + return tensor diff --git a/mmcv/metrics/metric.py b/mmcv/metrics/metric.py new file mode 100644 index 0000000..c306504 --- /dev/null +++ b/mmcv/metrics/metric.py @@ -0,0 +1,199 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import torch +from torchmetrics import Metric as _Metric +from torchmetrics import MetricCollection as _MetricCollection + +from mmcv.metrics.distributed import rank_zero_warn + + +class Metric(_Metric): + r""" + This implementation refers to :class:`~torchmetrics.Metric`. + + .. warning:: This metric is deprecated, use ``torchmetrics.Metric``. Will be removed in v1.5.0. + """ + + def __init__( + self, + dist_sync_on_step: bool = False, + process_group: Optional[Any] = None, + dist_sync_fn: Callable = None, + ): + rank_zero_warn( + "This `Metric` was deprecated since v1.3.0 in favor of `torchmetrics.Metric`." + " It will be removed in v1.5.0", DeprecationWarning + ) + super().__init__( + dist_sync_on_step=dist_sync_on_step, + process_group=process_group, + dist_sync_fn=dist_sync_fn, + ) + + def __hash__(self): + return super().__hash__() + + def __add__(self, other: Any): + from .compositional import CompositionalMetric + return CompositionalMetric(torch.add, self, other) + + def __and__(self, other: Any): + from .compositional import CompositionalMetric + return CompositionalMetric(torch.bitwise_and, self, other) + + def __eq__(self, other: Any): + from .compositional import CompositionalMetric + return CompositionalMetric(torch.eq, self, other) + + def __floordiv__(self, other: Any): + from .compositional import CompositionalMetric + return CompositionalMetric(torch.floor_divide, self, other) + + def __ge__(self, other: Any): + from .compositional import CompositionalMetric + return CompositionalMetric(torch.ge, self, other) + + def __gt__(self, other: Any): + from .compositional import CompositionalMetric + return CompositionalMetric(torch.gt, self, other) + + def __le__(self, other: Any): + from .compositional import CompositionalMetric + return CompositionalMetric(torch.le, self, other) + + def __lt__(self, other: Any): + from .compositional import CompositionalMetric + return CompositionalMetric(torch.lt, self, other) + + def __matmul__(self, other: Any): + from .compositional import CompositionalMetric + return CompositionalMetric(torch.matmul, self, other) + + def __mod__(self, other: Any): + from .compositional import CompositionalMetric + return CompositionalMetric(torch.fmod, self, other) + + def __mul__(self, other: Any): + from .compositional import CompositionalMetric + return CompositionalMetric(torch.mul, self, other) + + def __ne__(self, other: Any): + from .compositional import CompositionalMetric + return CompositionalMetric(torch.ne, self, other) + + def __or__(self, other: Any): + from .compositional import CompositionalMetric + return CompositionalMetric(torch.bitwise_or, self, other) + + def __pow__(self, other: Any): + from .compositional import CompositionalMetric + return CompositionalMetric(torch.pow, self, other) + + def __radd__(self, other: Any): + from .compositional import CompositionalMetric + return CompositionalMetric(torch.add, other, self) + + def __rand__(self, other: Any): + from .compositional import CompositionalMetric + + # swap them since bitwise_and only supports that way and it's commutative + return CompositionalMetric(torch.bitwise_and, self, other) + + def __rfloordiv__(self, other: Any): + from .compositional import CompositionalMetric + return CompositionalMetric(torch.floor_divide, other, self) + + def __rmatmul__(self, other: Any): + from .compositional import CompositionalMetric + return CompositionalMetric(torch.matmul, other, self) + + def __rmod__(self, other: Any): + from .compositional import CompositionalMetric + return CompositionalMetric(torch.fmod, other, self) + + def __rmul__(self, other: Any): + from .compositional import CompositionalMetric + return CompositionalMetric(torch.mul, other, self) + + def __ror__(self, other: Any): + from .compositional import CompositionalMetric + return CompositionalMetric(torch.bitwise_or, other, self) + + def __rpow__(self, other: Any): + from .compositional import CompositionalMetric + return CompositionalMetric(torch.pow, other, self) + + def __rsub__(self, other: Any): + from .compositional import CompositionalMetric + return CompositionalMetric(torch.sub, other, self) + + def __rtruediv__(self, other: Any): + from .compositional import CompositionalMetric + return CompositionalMetric(torch.true_divide, other, self) + + def __rxor__(self, other: Any): + from .compositional import CompositionalMetric + return CompositionalMetric(torch.bitwise_xor, other, self) + + def __sub__(self, other: Any): + from .compositional import CompositionalMetric + return CompositionalMetric(torch.sub, self, other) + + def __truediv__(self, other: Any): + from .compositional import CompositionalMetric + return CompositionalMetric(torch.true_divide, self, other) + + def __xor__(self, other: Any): + from .compositional import CompositionalMetric + return CompositionalMetric(torch.bitwise_xor, self, other) + + def __abs__(self): + from .compositional import CompositionalMetric + return CompositionalMetric(torch.abs, self, None) + + def __inv__(self): + from .compositional import CompositionalMetric + return CompositionalMetric(torch.bitwise_not, self, None) + + def __invert__(self): + return self.__inv__() + + def __neg__(self): + from .compositional import CompositionalMetric + return CompositionalMetric(_neg, self, None) + + def __pos__(self): + from .compositional import CompositionalMetric + return CompositionalMetric(torch.abs, self, None) + + +def _neg(tensor: torch.Tensor): + return -torch.abs(tensor) + + +class MetricCollection(_MetricCollection): + r""" + This implementation refers to :class:`~torchmetrics.MetricCollection`. + + .. warning:: This metric is deprecated, use ``torchmetrics.MetricCollection``. Will be removed in v1.5.0. + """ + + def __init__(self, metrics: Union[List[Metric], Tuple[Metric], Dict[str, Metric]]): + rank_zero_warn( + "This `MetricCollection` was deprecated since v1.3.0 in favor of `torchmetrics.MetricCollection`." + " It will be removed in v1.5.0", DeprecationWarning + ) + super().__init__(metrics=metrics) diff --git a/mmcv/metrics/reduction.py b/mmcv/metrics/reduction.py new file mode 100644 index 0000000..f0ab4c2 --- /dev/null +++ b/mmcv/metrics/reduction.py @@ -0,0 +1,26 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import torch + +from .utils import reduce as __reduce +from .distributed import rank_zero_warn + +def reduce(to_reduce: torch.Tensor, reduction: str) -> torch.Tensor: + rank_zero_warn( + "This `reduce` was deprecated in v1.1.0 in favor of" + " `mmcv.pytorch_lightning.metrics.utils import reduce`." + " It will be removed in v1.3.0", DeprecationWarning + ) + return __reduce(to_reduce=to_reduce, reduction=reduction) + diff --git a/mmcv/metrics/utils.py b/mmcv/metrics/utils.py new file mode 100644 index 0000000..7d1bf40 --- /dev/null +++ b/mmcv/metrics/utils.py @@ -0,0 +1,292 @@ +# Copyright The PyTorch Lightning team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional, Tuple + +import torch + +from .distributed import rank_zero_warn + +METRIC_EPS = 1e-6 + + +def dim_zero_cat(x): + x = x if isinstance(x, (list, tuple)) else [x] + return torch.cat(x, dim=0) + + +def dim_zero_sum(x): + return torch.sum(x, dim=0) + + +def dim_zero_mean(x): + return torch.mean(x, dim=0) + + +def _flatten(x): + return [item for sublist in x for item in sublist] + + +def _check_same_shape(pred: torch.Tensor, target: torch.Tensor): + """ Check that predictions and target have the same shape, else raise error """ + if pred.shape != target.shape: + raise RuntimeError("Predictions and targets are expected to have the same shape") + + +def _input_format_classification_one_hot( + num_classes: int, + preds: torch.Tensor, + target: torch.Tensor, + threshold: float = 0.5, + multilabel: bool = False +) -> Tuple[torch.Tensor, torch.Tensor]: + """Convert preds and target tensors into one hot spare label tensors + + Args: + num_classes: number of classes + preds: either tensor with labels, tensor with probabilities/logits or + multilabel tensor + target: tensor with ground true labels + threshold: float used for thresholding multilabel input + multilabel: boolean flag indicating if input is multilabel + + Returns: + preds: one hot tensor of shape [num_classes, -1] with predicted labels + target: one hot tensors of shape [num_classes, -1] with true labels + """ + if not (preds.ndim == target.ndim or preds.ndim == target.ndim + 1): + raise ValueError("preds and target must have same number of dimensions, or one additional dimension for preds") + + if preds.ndim == target.ndim + 1: + # multi class probabilites + preds = torch.argmax(preds, dim=1) + + if preds.ndim == target.ndim and preds.dtype in (torch.long, torch.int) and num_classes > 1 and not multilabel: + # multi-class + preds = to_onehot(preds, num_classes=num_classes) + target = to_onehot(target, num_classes=num_classes) + + elif preds.ndim == target.ndim and preds.is_floating_point(): + # binary or multilabel probablities + preds = (preds >= threshold).long() + + # transpose class as first dim and reshape + if preds.ndim > 1: + preds = preds.transpose(1, 0) + target = target.transpose(1, 0) + + return preds.reshape(num_classes, -1), target.reshape(num_classes, -1) + + +def to_onehot( + label_tensor: torch.Tensor, + num_classes: Optional[int] = None, +) -> torch.Tensor: + """ + Converts a dense label tensor to one-hot format + + Args: + label_tensor: dense label tensor, with shape [N, d1, d2, ...] + num_classes: number of classes C + + Output: + A sparse label tensor with shape [N, C, d1, d2, ...] + + Example: + + >>> x = torch.tensor([1, 2, 3]) + >>> to_onehot(x) + tensor([[0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1]]) + + """ + if num_classes is None: + num_classes = int(label_tensor.max().detach().item() + 1) + + tensor_onehot = torch.zeros( + label_tensor.shape[0], + num_classes, + *label_tensor.shape[1:], + dtype=label_tensor.dtype, + device=label_tensor.device, + ) + index = label_tensor.long().unsqueeze(1).expand_as(tensor_onehot) + return tensor_onehot.scatter_(1, index, 1.0) + + +def select_topk(prob_tensor: torch.Tensor, topk: int = 1, dim: int = 1) -> torch.Tensor: + """ + Convert a probability tensor to binary by selecting top-k highest entries. + + Args: + prob_tensor: dense tensor of shape ``[..., C, ...]``, where ``C`` is in the + position defined by the ``dim`` argument + topk: number of highest entries to turn into 1s + dim: dimension on which to compare entries + + Output: + A binary tensor of the same shape as the input tensor of type torch.int32 + + Example: + >>> x = torch.tensor([[1.1, 2.0, 3.0], [2.0, 1.0, 0.5]]) + >>> select_topk(x, topk=2) + tensor([[0, 1, 1], + [1, 1, 0]], dtype=torch.int32) + """ + zeros = torch.zeros_like(prob_tensor) + topk_tensor = zeros.scatter(dim, prob_tensor.topk(k=topk, dim=dim).indices, 1.0) + return topk_tensor.int() + + +def to_categorical(tensor: torch.Tensor, argmax_dim: int = 1) -> torch.Tensor: + """ + Converts a tensor of probabilities to a dense label tensor + + Args: + tensor: probabilities to get the categorical label [N, d1, d2, ...] + argmax_dim: dimension to apply + + Return: + A tensor with categorical labels [N, d2, ...] + + Example: + + >>> x = torch.tensor([[0.2, 0.5], [0.9, 0.1]]) + >>> to_categorical(x) + tensor([1, 0]) + + """ + return torch.argmax(tensor, dim=argmax_dim) + + +def get_num_classes( + pred: torch.Tensor, + target: torch.Tensor, + num_classes: Optional[int] = None, +) -> int: + """ + Calculates the number of classes for a given prediction and target tensor. + + Args: + pred: predicted values + target: true labels + num_classes: number of classes if known + + Return: + An integer that represents the number of classes. + """ + num_target_classes = int(target.max().detach().item() + 1) + num_pred_classes = int(pred.max().detach().item() + 1) + num_all_classes = max(num_target_classes, num_pred_classes) + + if num_classes is None: + num_classes = num_all_classes + elif num_classes != num_all_classes: + rank_zero_warn( + f"You have set {num_classes} number of classes which is" + f" different from predicted ({num_pred_classes}) and" + f" target ({num_target_classes}) number of classes", + RuntimeWarning, + ) + return num_classes + + +def reduce(to_reduce: torch.Tensor, reduction: str) -> torch.Tensor: + """ + Reduces a given tensor by a given reduction method + + Args: + to_reduce : the tensor, which shall be reduced + reduction : a string specifying the reduction method ('elementwise_mean', 'none', 'sum') + + Return: + reduced Tensor + + Raise: + ValueError if an invalid reduction parameter was given + """ + if reduction == "elementwise_mean": + return torch.mean(to_reduce) + if reduction == "none": + return to_reduce + if reduction == "sum": + return torch.sum(to_reduce) + raise ValueError("Reduction parameter unknown.") + + +def class_reduce( + num: torch.Tensor, denom: torch.Tensor, weights: torch.Tensor, class_reduction: str = "none" +) -> torch.Tensor: + """ + Function used to reduce classification metrics of the form `num / denom * weights`. + For example for calculating standard accuracy the num would be number of + true positives per class, denom would be the support per class, and weights + would be a tensor of 1s + + Args: + num: numerator tensor + denom: denominator tensor + weights: weights for each class + class_reduction: reduction method for multiclass problems + + - ``'micro'``: calculate metrics globally (default) + - ``'macro'``: calculate metrics for each label, and find their unweighted mean. + - ``'weighted'``: calculate metrics for each label, and find their weighted mean. + - ``'none'`` or ``None``: returns calculated metric per class + + Raises: + ValueError: + If ``class_reduction`` is none of ``"micro"``, ``"macro"``, ``"weighted"``, ``"none"`` or ``None``. + """ + valid_reduction = ("micro", "macro", "weighted", "none", None) + if class_reduction == "micro": + fraction = torch.sum(num) / torch.sum(denom) + else: + fraction = num / denom + + # We need to take care of instances where the denom can be 0 + # for some (or all) classes which will produce nans + fraction[fraction != fraction] = 0 + + if class_reduction == "micro": + return fraction + elif class_reduction == "macro": + return torch.mean(fraction) + elif class_reduction == "weighted": + return torch.sum(fraction * (weights.float() / torch.sum(weights))) + elif class_reduction == "none" or class_reduction is None: + return fraction + + raise ValueError( + f"Reduction parameter {class_reduction} unknown." + f" Choose between one of these: {valid_reduction}" + ) + + +def _stable_1d_sort(x: torch, N: int = 2049): + """ + Stable sort of 1d tensors. Pytorch defaults to a stable sorting algorithm + if number of elements are larger than 2048. This function pads the tensors, + makes the sort and returns the sorted array (with the padding removed) + See this discussion: https://discuss.pytorch.org/t/is-torch-sort-stable/20714 + """ + if x.ndim > 1: + raise ValueError('Stable sort only works on 1d tensors') + n = x.numel() + if N - n > 0: + x_max = x.max() + x = torch.cat([x, (x_max + 1) * torch.ones(N - n, dtype=x.dtype, device=x.device)], 0) + x_sort = x.sort() + i = min(N, n) + return x_sort.values[:i], x_sort.indices[:i] diff --git a/mmcv/modeling/postprocessing.py b/mmcv/modeling/postprocessing.py new file mode 100644 index 0000000..b893d37 --- /dev/null +++ b/mmcv/modeling/postprocessing.py @@ -0,0 +1,100 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import torch +from torch.nn import functional as F + +from mmcv.structures import Instances, ROIMasks + + +# perhaps should rename to "resize_instance" +def detector_postprocess( + results: Instances, output_height: int, output_width: int, mask_threshold: float = 0.5 +): + """ + Resize the output instances. + The input images are often resized when entering an object detector. + As a result, we often need the outputs of the detector in a different + resolution from its inputs. + + This function will resize the raw outputs of an R-CNN detector + to produce outputs according to the desired output resolution. + + Args: + results (Instances): the raw outputs from the detector. + `results.image_size` contains the input image resolution the detector sees. + This object might be modified in-place. + output_height, output_width: the desired output resolution. + Returns: + Instances: the resized output from the model, based on the output resolution + """ + if isinstance(output_width, torch.Tensor): + # This shape might (but not necessarily) be tensors during tracing. + # Converts integer tensors to float temporaries to ensure true + # division is performed when computing scale_x and scale_y. + output_width_tmp = output_width.float() + output_height_tmp = output_height.float() + new_size = torch.stack([output_height, output_width]) + else: + new_size = (output_height, output_width) + output_width_tmp = output_width + output_height_tmp = output_height + + scale_x, scale_y = ( + output_width_tmp / results.image_size[1], + output_height_tmp / results.image_size[0], + ) + results = Instances(new_size, **results.get_fields()) + + if results.has("pred_boxes"): + output_boxes = results.pred_boxes + elif results.has("proposal_boxes"): + output_boxes = results.proposal_boxes + else: + output_boxes = None + assert output_boxes is not None, "Predictions must contain boxes!" + + output_boxes.scale(scale_x, scale_y) + output_boxes.clip(results.image_size) + + results = results[output_boxes.nonempty()] + + if results.has("pred_masks"): + if isinstance(results.pred_masks, ROIMasks): + roi_masks = results.pred_masks + else: + # pred_masks is a tensor of shape (N, 1, M, M) + roi_masks = ROIMasks(results.pred_masks[:, 0, :, :]) + results.pred_masks = roi_masks.to_bitmasks( + results.pred_boxes, output_height, output_width, mask_threshold + ).tensor # TODO return ROIMasks/BitMask object in the future + + if results.has("pred_keypoints"): + results.pred_keypoints[:, :, 0] *= scale_x + results.pred_keypoints[:, :, 1] *= scale_y + + return results + + +def sem_seg_postprocess(result, img_size, output_height, output_width): + """ + Return semantic segmentation predictions in the original resolution. + + The input images are often resized when entering semantic segmentor. Moreover, in same + cases, they also padded inside segmentor to be divisible by maximum network stride. + As a result, we often need the predictions of the segmentor in a different + resolution from its inputs. + + Args: + result (Tensor): semantic segmentation prediction logits. A tensor of shape (C, H, W), + where C is the number of classes, and H, W are the height and width of the prediction. + img_size (tuple): image size that segmentor is taking as input. + output_height, output_width: the desired output resolution. + + Returns: + semantic segmentation prediction (Tensor): A tensor of the shape + (C, output_height, output_width) that contains per-pixel soft predictions. + """ + result = result[:, : img_size[0], : img_size[1]].expand(1, -1, -1, -1) + result = F.interpolate( + result, size=(output_height, output_width), mode="bilinear", align_corners=False + )[0] + return result \ No newline at end of file diff --git a/mmcv/models/__init__.py b/mmcv/models/__init__.py new file mode 100644 index 0000000..b493852 --- /dev/null +++ b/mmcv/models/__init__.py @@ -0,0 +1,14 @@ +from .backbones import * # noqa: F401,F403 +from .builder import (BACKBONES, DETECTORS, HEADS, LOSSES, NECKS, + ROI_EXTRACTORS, SHARED_HEADS, FUSION_LAYERS, + MIDDLE_ENCODERS, VOXEL_ENCODERS, SEGMENTORS, + build_backbone, build_detector, build_fusion_layer, + build_head, build_loss, build_middle_encoder, + build_model, build_neck, build_roi_extractor, + build_shared_head, build_voxel_encoder, build_segmentor) +from .dense_heads import * # noqa: F401,F403 +from .detectors import * # noqa: F401,F403 +from .losses import * # noqa: F401,F403 +from .necks import * # noqa: F401,F403 +from .bricks import * +from .utils import * \ No newline at end of file diff --git a/mmcv/models/backbones/__init__.py b/mmcv/models/backbones/__init__.py new file mode 100644 index 0000000..22bb120 --- /dev/null +++ b/mmcv/models/backbones/__init__.py @@ -0,0 +1,3 @@ +from .resnet import ResNet, ResNetV1d +from .vgg import VGG +from .base_module import BaseModule, ModuleList, Sequential diff --git a/mmcv/models/backbones/base_module.py b/mmcv/models/backbones/base_module.py new file mode 100644 index 0000000..94a8d04 --- /dev/null +++ b/mmcv/models/backbones/base_module.py @@ -0,0 +1,195 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import warnings +from abc import ABCMeta +from collections import defaultdict +from logging import FileHandler + +import torch.nn as nn + +from mmcv.utils import master_only +from mmcv.utils.logging import get_logger, logger_initialized, print_log + + +class BaseModule(nn.Module, metaclass=ABCMeta): + """Base module for all modules in openmmlab. + + ``BaseModule`` is a wrapper of ``torch.nn.Module`` with additional + functionality of parameter initialization. Compared with + ``torch.nn.Module``, ``BaseModule`` mainly adds three attributes. + + - ``init_cfg``: the config to control the initialization. + - ``init_weights``: The function of parameter + initialization and recording initialization + information. + - ``_params_init_info``: Used to track the parameter + initialization information. This attribute only + exists during executing the ``init_weights``. + + Args: + init_cfg (dict, optional): Initialization config dict. + """ + + def __init__(self, init_cfg=None): + """Initialize BaseModule, inherited from `torch.nn.Module`""" + + # NOTE init_cfg can be defined in different levels, but init_cfg + # in low levels has a higher priority. + + super(BaseModule, self).__init__() + # define default value of init_cfg instead of hard code + # in init_weights() function + self._is_init = False + + self.init_cfg = copy.deepcopy(init_cfg) + + # Backward compatibility in derived classes + # if pretrained is not None: + # warnings.warn('DeprecationWarning: pretrained is a deprecated \ + # key, please consider using init_cfg') + # self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) + + @property + def is_init(self): + return self._is_init + + def init_weights(self): + """Initialize the weights.""" + + is_top_level_module = False + # check if it is top-level module + if not hasattr(self, '_params_init_info'): + # The `_params_init_info` is used to record the initialization + # information of the parameters + # the key should be the obj:`nn.Parameter` of model and the value + # should be a dict containing + # - init_info (str): The string that describes the initialization. + # - tmp_mean_value (FloatTensor): The mean of the parameter, + # which indicates whether the parameter has been modified. + # this attribute would be deleted after all parameters + # is initialized. + self._params_init_info = defaultdict(dict) + is_top_level_module = True + + # Initialize the `_params_init_info`, + # When detecting the `tmp_mean_value` of + # the corresponding parameter is changed, update related + # initialization information + for name, param in self.named_parameters(): + self._params_init_info[param][ + 'init_info'] = f'The value is the same before and ' \ + f'after calling `init_weights` ' \ + f'of {self.__class__.__name__} ' + self._params_init_info[param][ + 'tmp_mean_value'] = param.data.mean() + + # pass `params_init_info` to all submodules + # All submodules share the same `params_init_info`, + # so it will be updated when parameters are + # modified at any level of the model. + for sub_module in self.modules(): + sub_module._params_init_info = self._params_init_info + + # Get the initialized logger, if not exist, + # create a logger named `mmcv` + logger_names = list(logger_initialized.keys()) + logger_name = logger_names[0] if logger_names else 'mmcv' + + from ..utils import initialize + from ..utils.weight_init import update_init_info + module_name = self.__class__.__name__ + if not self._is_init: + if self.init_cfg: + print_log( + f'initialize {module_name} with init_cfg {self.init_cfg}', + logger=logger_name) + initialize(self, self.init_cfg) + if isinstance(self.init_cfg, dict): + # prevent the parameters of + # the pre-trained model + # from being overwritten by + # the `init_weights` + if self.init_cfg['type'] == 'Pretrained': + return + + for m in self.children(): + if hasattr(m, 'init_weights'): + m.init_weights() + # users may overload the `init_weights` + update_init_info( + m, + init_info=f'Initialized by ' + f'user-defined `init_weights`' + f' in {m.__class__.__name__} ') + + self._is_init = True + else: + warnings.warn(f'init_weights of {self.__class__.__name__} has ' + f'been called more than once.') + + if is_top_level_module: + self._dump_init_info(logger_name) + + for sub_module in self.modules(): + del sub_module._params_init_info + + @master_only + def _dump_init_info(self, logger_name): + """Dump the initialization information to a file named + `initialization.log.json` in workdir. + + Args: + logger_name (str): The name of logger. + """ + + logger = get_logger(logger_name) + + with_file_handler = False + # dump the information to the logger file if there is a `FileHandler` + for handler in logger.handlers: + if isinstance(handler, FileHandler): + handler.stream.write( + 'Name of parameter - Initialization information\n') + for name, param in self.named_parameters(): + handler.stream.write( + f'\n{name} - {param.shape}: ' + f"\n{self._params_init_info[param]['init_info']} \n") + handler.stream.flush() + with_file_handler = True + if not with_file_handler: + for name, param in self.named_parameters(): + print_log( + f'\n{name} - {param.shape}: ' + f"\n{self._params_init_info[param]['init_info']} \n ", + logger=logger_name) + + def __repr__(self): + s = super().__repr__() + if self.init_cfg: + s += f'\ninit_cfg={self.init_cfg}' + return s + + +class Sequential(BaseModule, nn.Sequential): + """Sequential module in openmmlab. + + Args: + init_cfg (dict, optional): Initialization config dict. + """ + + def __init__(self, *args, init_cfg=None): + BaseModule.__init__(self, init_cfg) + nn.Sequential.__init__(self, *args) + + +class ModuleList(BaseModule, nn.ModuleList): + """ModuleList in openmmlab. + + Args: + modules (iterable, optional): an iterable of modules to add. + init_cfg (dict, optional): Initialization config dict. + """ + + def __init__(self, modules=None, init_cfg=None): + BaseModule.__init__(self, init_cfg) + nn.ModuleList.__init__(self, modules) diff --git a/mmcv/models/backbones/resnet.py b/mmcv/models/backbones/resnet.py new file mode 100644 index 0000000..0b21416 --- /dev/null +++ b/mmcv/models/backbones/resnet.py @@ -0,0 +1,671 @@ +import warnings + +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.models.bricks import build_conv_layer, build_norm_layer, build_plugin_layer +from mmcv.models.backbones.base_module import BaseModule +from torch.nn.modules.batchnorm import _BatchNorm + +from ..builder import BACKBONES +from ..utils import ResLayer + + +class BasicBlock(BaseModule): + expansion = 1 + + def __init__(self, + inplanes, + planes, + stride=1, + dilation=1, + downsample=None, + style='pytorch', + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + dcn=None, + plugins=None, + init_cfg=None): + super(BasicBlock, self).__init__(init_cfg) + assert dcn is None, 'Not implemented yet.' + assert plugins is None, 'Not implemented yet.' + + self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1) + self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2) + + self.conv1 = build_conv_layer( + conv_cfg, + inplanes, + planes, + 3, + stride=stride, + padding=dilation, + dilation=dilation, + bias=False) + self.add_module(self.norm1_name, norm1) + self.conv2 = build_conv_layer( + conv_cfg, planes, planes, 3, padding=1, bias=False) + self.add_module(self.norm2_name, norm2) + + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + self.dilation = dilation + self.with_cp = with_cp + + @property + def norm1(self): + """nn.Module: normalization layer after the first convolution layer""" + return getattr(self, self.norm1_name) + + @property + def norm2(self): + """nn.Module: normalization layer after the second convolution layer""" + return getattr(self, self.norm2_name) + + def forward(self, x): + """Forward function.""" + + def _inner_forward(x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.norm2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +class Bottleneck(BaseModule): + expansion = 4 + + def __init__(self, + inplanes, + planes, + stride=1, + dilation=1, + downsample=None, + style='pytorch', + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + dcn=None, + plugins=None, + init_cfg=None): + """Bottleneck block for ResNet. + + If style is "pytorch", the stride-two layer is the 3x3 conv layer, if + it is "caffe", the stride-two layer is the first 1x1 conv layer. + """ + super(Bottleneck, self).__init__(init_cfg) + assert style in ['pytorch', 'caffe'] + assert dcn is None or isinstance(dcn, dict) + assert plugins is None or isinstance(plugins, list) + if plugins is not None: + allowed_position = ['after_conv1', 'after_conv2', 'after_conv3'] + assert all(p['position'] in allowed_position for p in plugins) + + self.inplanes = inplanes + self.planes = planes + self.stride = stride + self.dilation = dilation + self.style = style + self.with_cp = with_cp + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.dcn = dcn + self.with_dcn = dcn is not None + self.plugins = plugins + self.with_plugins = plugins is not None + + if self.with_plugins: + # collect plugins for conv1/conv2/conv3 + self.after_conv1_plugins = [ + plugin['cfg'] for plugin in plugins + if plugin['position'] == 'after_conv1' + ] + self.after_conv2_plugins = [ + plugin['cfg'] for plugin in plugins + if plugin['position'] == 'after_conv2' + ] + self.after_conv3_plugins = [ + plugin['cfg'] for plugin in plugins + if plugin['position'] == 'after_conv3' + ] + + if self.style == 'pytorch': + self.conv1_stride = 1 + self.conv2_stride = stride + else: + self.conv1_stride = stride + self.conv2_stride = 1 + + self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1) + self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2) + self.norm3_name, norm3 = build_norm_layer( + norm_cfg, planes * self.expansion, postfix=3) + + self.conv1 = build_conv_layer( + conv_cfg, + inplanes, + planes, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + fallback_on_stride = False + if self.with_dcn: + fallback_on_stride = dcn.pop('fallback_on_stride', False) + if not self.with_dcn or fallback_on_stride: + self.conv2 = build_conv_layer( + conv_cfg, + planes, + planes, + kernel_size=3, + stride=self.conv2_stride, + padding=dilation, + dilation=dilation, + bias=False) + else: + assert self.conv_cfg is None, 'conv_cfg must be None for DCN' + self.conv2 = build_conv_layer( + dcn, + planes, + planes, + kernel_size=3, + stride=self.conv2_stride, + padding=dilation, + dilation=dilation, + bias=False) + + self.add_module(self.norm2_name, norm2) + self.conv3 = build_conv_layer( + conv_cfg, + planes, + planes * self.expansion, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + + if self.with_plugins: + self.after_conv1_plugin_names = self.make_block_plugins( + planes, self.after_conv1_plugins) + self.after_conv2_plugin_names = self.make_block_plugins( + planes, self.after_conv2_plugins) + self.after_conv3_plugin_names = self.make_block_plugins( + planes * self.expansion, self.after_conv3_plugins) + + def make_block_plugins(self, in_channels, plugins): + """make plugins for block. + + Args: + in_channels (int): Input channels of plugin. + plugins (list[dict]): List of plugins cfg to build. + + Returns: + list[str]: List of the names of plugin. + """ + assert isinstance(plugins, list) + plugin_names = [] + for plugin in plugins: + plugin = plugin.copy() + name, layer = build_plugin_layer( + plugin, + in_channels=in_channels, + postfix=plugin.pop('postfix', '')) + assert not hasattr(self, name), f'duplicate plugin {name}' + self.add_module(name, layer) + plugin_names.append(name) + return plugin_names + + def forward_plugin(self, x, plugin_names): + out = x + for name in plugin_names: + out = getattr(self, name)(x) + return out + + @property + def norm1(self): + """nn.Module: normalization layer after the first convolution layer""" + return getattr(self, self.norm1_name) + + @property + def norm2(self): + """nn.Module: normalization layer after the second convolution layer""" + return getattr(self, self.norm2_name) + + @property + def norm3(self): + """nn.Module: normalization layer after the third convolution layer""" + return getattr(self, self.norm3_name) + + def forward(self, x): + """Forward function.""" + + def _inner_forward(x): + identity = x + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + if self.with_plugins: + out = self.forward_plugin(out, self.after_conv1_plugin_names) + + out = self.conv2(out) + out = self.norm2(out) + out = self.relu(out) + + if self.with_plugins: + out = self.forward_plugin(out, self.after_conv2_plugin_names) + + out = self.conv3(out) + out = self.norm3(out) + + if self.with_plugins: + out = self.forward_plugin(out, self.after_conv3_plugin_names) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +@BACKBONES.register_module() +class ResNet(BaseModule): + """ResNet backbone. + + Args: + depth (int): Depth of resnet, from {18, 34, 50, 101, 152}. + stem_channels (int | None): Number of stem channels. If not specified, + it will be the same as `base_channels`. Default: None. + base_channels (int): Number of base channels of res layer. Default: 64. + in_channels (int): Number of input image channels. Default: 3. + num_stages (int): Resnet stages. Default: 4. + strides (Sequence[int]): Strides of the first block of each stage. + dilations (Sequence[int]): Dilation of each stage. + out_indices (Sequence[int]): Output from which stages. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. + norm_cfg (dict): Dictionary to construct and config norm layer. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. + plugins (list[dict]): List of plugins for stages, each dict contains: + + - cfg (dict, required): Cfg dict to build plugin. + - position (str, required): Position inside block to insert + plugin, options are 'after_conv1', 'after_conv2', 'after_conv3'. + - stages (tuple[bool], optional): Stages to apply plugin, length + should be same as 'num_stages'. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. + pretrained (str, optional): model pretrained path. Default: None + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + + Example: + >>> from mmcv.models import ResNet + >>> import torch + >>> self = ResNet(depth=18) + >>> self.eval() + >>> inputs = torch.rand(1, 3, 32, 32) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 64, 8, 8) + (1, 128, 4, 4) + (1, 256, 2, 2) + (1, 512, 1, 1) + """ + + arch_settings = { + 18: (BasicBlock, (2, 2, 2, 2)), + 34: (BasicBlock, (3, 4, 6, 3)), + 50: (Bottleneck, (3, 4, 6, 3)), + 101: (Bottleneck, (3, 4, 23, 3)), + 152: (Bottleneck, (3, 8, 36, 3)) + } + + def __init__(self, + depth, + in_channels=3, + stem_channels=None, + base_channels=64, + num_stages=4, + strides=(1, 2, 2, 2), + dilations=(1, 1, 1, 1), + out_indices=(0, 1, 2, 3), + style='pytorch', + deep_stem=False, + avg_down=False, + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + dcn=None, + stage_with_dcn=(False, False, False, False), + plugins=None, + with_cp=False, + zero_init_residual=True, + pretrained=None, + init_cfg=None): + super(ResNet, self).__init__(init_cfg) + self.zero_init_residual = zero_init_residual + if depth not in self.arch_settings: + raise KeyError(f'invalid depth {depth} for resnet') + + block_init_cfg = None + assert not (init_cfg and pretrained), \ + 'init_cfg and pretrained cannot be setting at the same time' + if isinstance(pretrained, str): + warnings.warn('DeprecationWarning: pretrained is deprecated, ' + 'please use "init_cfg" instead') + self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) + elif pretrained is None: + if init_cfg is None: + self.init_cfg = [ + dict(type='Kaiming', layer='Conv2d'), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ] + block = self.arch_settings[depth][0] + if self.zero_init_residual: + if block is BasicBlock: + block_init_cfg = dict( + type='Constant', + val=0, + override=dict(name='norm2')) + elif block is Bottleneck: + block_init_cfg = dict( + type='Constant', + val=0, + override=dict(name='norm3')) + else: + raise TypeError('pretrained must be a str or None') + + self.depth = depth + if stem_channels is None: + stem_channels = base_channels + self.stem_channels = stem_channels + self.base_channels = base_channels + self.num_stages = num_stages + assert num_stages >= 1 and num_stages <= 4 + self.strides = strides + self.dilations = dilations + assert len(strides) == len(dilations) == num_stages + self.out_indices = out_indices + assert max(out_indices) < num_stages + self.style = style + self.deep_stem = deep_stem + self.avg_down = avg_down + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.with_cp = with_cp + self.norm_eval = norm_eval + self.dcn = dcn + self.stage_with_dcn = stage_with_dcn + if dcn is not None: + assert len(stage_with_dcn) == num_stages + self.plugins = plugins + self.block, stage_blocks = self.arch_settings[depth] + self.stage_blocks = stage_blocks[:num_stages] + self.inplanes = stem_channels + + self._make_stem_layer(in_channels, stem_channels) + + self.res_layers = [] + for i, num_blocks in enumerate(self.stage_blocks): + stride = strides[i] + dilation = dilations[i] + dcn = self.dcn if self.stage_with_dcn[i] else None + if plugins is not None: + stage_plugins = self.make_stage_plugins(plugins, i) + else: + stage_plugins = None + planes = base_channels * 2**i + res_layer = self.make_res_layer( + block=self.block, + inplanes=self.inplanes, + planes=planes, + num_blocks=num_blocks, + stride=stride, + dilation=dilation, + style=self.style, + avg_down=self.avg_down, + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + dcn=dcn, + plugins=stage_plugins, + init_cfg=block_init_cfg) + self.inplanes = planes * self.block.expansion + layer_name = f'layer{i + 1}' + self.add_module(layer_name, res_layer) + self.res_layers.append(layer_name) + + self._freeze_stages() + + self.feat_dim = self.block.expansion * base_channels * 2**( + len(self.stage_blocks) - 1) + + def make_stage_plugins(self, plugins, stage_idx): + """Make plugins for ResNet ``stage_idx`` th stage. + + Currently we support to insert ``context_block``, + ``empirical_attention_block``, ``nonlocal_block`` into the backbone + like ResNet/ResNeXt. They could be inserted after conv1/conv2/conv3 of + Bottleneck. + + An example of plugins format could be: + + Examples: + >>> plugins=[ + ... dict(cfg=dict(type='xxx', arg1='xxx'), + ... stages=(False, True, True, True), + ... position='after_conv2'), + ... dict(cfg=dict(type='yyy'), + ... stages=(True, True, True, True), + ... position='after_conv3'), + ... dict(cfg=dict(type='zzz', postfix='1'), + ... stages=(True, True, True, True), + ... position='after_conv3'), + ... dict(cfg=dict(type='zzz', postfix='2'), + ... stages=(True, True, True, True), + ... position='after_conv3') + ... ] + >>> self = ResNet(depth=18) + >>> stage_plugins = self.make_stage_plugins(plugins, 0) + >>> assert len(stage_plugins) == 3 + + Suppose ``stage_idx=0``, the structure of blocks in the stage would be: + + .. code-block:: none + + conv1-> conv2->conv3->yyy->zzz1->zzz2 + + Suppose 'stage_idx=1', the structure of blocks in the stage would be: + + .. code-block:: none + + conv1-> conv2->xxx->conv3->yyy->zzz1->zzz2 + + If stages is missing, the plugin would be applied to all stages. + + Args: + plugins (list[dict]): List of plugins cfg to build. The postfix is + required if multiple same type plugins are inserted. + stage_idx (int): Index of stage to build + + Returns: + list[dict]: Plugins for current stage + """ + stage_plugins = [] + for plugin in plugins: + plugin = plugin.copy() + stages = plugin.pop('stages', None) + assert stages is None or len(stages) == self.num_stages + # whether to insert plugin into current stage + if stages is None or stages[stage_idx]: + stage_plugins.append(plugin) + + return stage_plugins + + def make_res_layer(self, **kwargs): + """Pack all blocks in a stage into a ``ResLayer``.""" + return ResLayer(**kwargs) + + @property + def norm1(self): + """nn.Module: the normalization layer named "norm1" """ + return getattr(self, self.norm1_name) + + def _make_stem_layer(self, in_channels, stem_channels): + if self.deep_stem: + self.stem = nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels, + stem_channels // 2, + kernel_size=3, + stride=2, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, stem_channels // 2)[1], + nn.ReLU(inplace=True), + build_conv_layer( + self.conv_cfg, + stem_channels // 2, + stem_channels // 2, + kernel_size=3, + stride=1, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, stem_channels // 2)[1], + nn.ReLU(inplace=True), + build_conv_layer( + self.conv_cfg, + stem_channels // 2, + stem_channels, + kernel_size=3, + stride=1, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, stem_channels)[1], + nn.ReLU(inplace=True)) + else: + self.conv1 = build_conv_layer( + self.conv_cfg, + in_channels, + stem_channels, + kernel_size=7, + stride=2, + padding=3, + bias=False) + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, stem_channels, postfix=1) + self.add_module(self.norm1_name, norm1) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + if self.deep_stem: + self.stem.eval() + for param in self.stem.parameters(): + param.requires_grad = False + else: + self.norm1.eval() + for m in [self.conv1, self.norm1]: + for param in m.parameters(): + param.requires_grad = False + + for i in range(1, self.frozen_stages + 1): + m = getattr(self, f'layer{i}') + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def forward(self, x): + """Forward function.""" + if self.deep_stem: + x = self.stem(x) + else: + x = self.conv1(x) + x = self.norm1(x) + x = self.relu(x) + x = self.maxpool(x) + outs = [] + for i, layer_name in enumerate(self.res_layers): + res_layer = getattr(self, layer_name) + x = res_layer(x) + if i in self.out_indices: + outs.append(x) + return tuple(outs) + + def train(self, mode=True): + """Convert the model into training mode while keep normalization layer + freezed.""" + super(ResNet, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() + + +@BACKBONES.register_module() +class ResNetV1d(ResNet): + r"""ResNetV1d variant described in `Bag of Tricks + `_. + + Compared with default ResNet(ResNetV1b), ResNetV1d replaces the 7x7 conv in + the input stem with three 3x3 convs. And in the downsampling block, a 2x2 + avg_pool with stride 2 is added before conv, whose stride is changed to 1. + """ + + def __init__(self, **kwargs): + super(ResNetV1d, self).__init__( + deep_stem=True, avg_down=True, **kwargs) diff --git a/mmcv/models/backbones/vgg.py b/mmcv/models/backbones/vgg.py new file mode 100644 index 0000000..dcda6f1 --- /dev/null +++ b/mmcv/models/backbones/vgg.py @@ -0,0 +1,175 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import logging + +import torch.nn as nn + +from ..utils.weight_init import constant_init, kaiming_init, normal_init + + +def conv3x3(in_planes, out_planes, dilation=1): + """3x3 convolution with padding.""" + return nn.Conv2d( + in_planes, + out_planes, + kernel_size=3, + padding=dilation, + dilation=dilation) + + +def make_vgg_layer(inplanes, + planes, + num_blocks, + dilation=1, + with_bn=False, + ceil_mode=False): + layers = [] + for _ in range(num_blocks): + layers.append(conv3x3(inplanes, planes, dilation)) + if with_bn: + layers.append(nn.BatchNorm2d(planes)) + layers.append(nn.ReLU(inplace=True)) + inplanes = planes + layers.append(nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=ceil_mode)) + + return layers + + +class VGG(nn.Module): + """VGG backbone. + + Args: + depth (int): Depth of vgg, from {11, 13, 16, 19}. + with_bn (bool): Use BatchNorm or not. + num_classes (int): number of classes for classification. + num_stages (int): VGG stages, normally 5. + dilations (Sequence[int]): Dilation of each stage. + out_indices (Sequence[int]): Output from which stages. + frozen_stages (int): Stages to be frozen (all param fixed). -1 means + not freezing any parameters. + bn_eval (bool): Whether to set BN layers as eval mode, namely, freeze + running stats (mean and var). + bn_frozen (bool): Whether to freeze weight and bias of BN layers. + """ + + arch_settings = { + 11: (1, 1, 2, 2, 2), + 13: (2, 2, 2, 2, 2), + 16: (2, 2, 3, 3, 3), + 19: (2, 2, 4, 4, 4) + } + + def __init__(self, + depth, + with_bn=False, + num_classes=-1, + num_stages=5, + dilations=(1, 1, 1, 1, 1), + out_indices=(0, 1, 2, 3, 4), + frozen_stages=-1, + bn_eval=True, + bn_frozen=False, + ceil_mode=False, + with_last_pool=True): + super(VGG, self).__init__() + if depth not in self.arch_settings: + raise KeyError(f'invalid depth {depth} for vgg') + assert num_stages >= 1 and num_stages <= 5 + stage_blocks = self.arch_settings[depth] + self.stage_blocks = stage_blocks[:num_stages] + assert len(dilations) == num_stages + assert max(out_indices) <= num_stages + + self.num_classes = num_classes + self.out_indices = out_indices + self.frozen_stages = frozen_stages + self.bn_eval = bn_eval + self.bn_frozen = bn_frozen + + self.inplanes = 3 + start_idx = 0 + vgg_layers = [] + self.range_sub_modules = [] + for i, num_blocks in enumerate(self.stage_blocks): + num_modules = num_blocks * (2 + with_bn) + 1 + end_idx = start_idx + num_modules + dilation = dilations[i] + planes = 64 * 2**i if i < 4 else 512 + vgg_layer = make_vgg_layer( + self.inplanes, + planes, + num_blocks, + dilation=dilation, + with_bn=with_bn, + ceil_mode=ceil_mode) + vgg_layers.extend(vgg_layer) + self.inplanes = planes + self.range_sub_modules.append([start_idx, end_idx]) + start_idx = end_idx + if not with_last_pool: + vgg_layers.pop(-1) + self.range_sub_modules[-1][1] -= 1 + self.module_name = 'features' + self.add_module(self.module_name, nn.Sequential(*vgg_layers)) + + if self.num_classes > 0: + self.classifier = nn.Sequential( + nn.Linear(512 * 7 * 7, 4096), + nn.ReLU(True), + nn.Dropout(), + nn.Linear(4096, 4096), + nn.ReLU(True), + nn.Dropout(), + nn.Linear(4096, num_classes), + ) + + def init_weights(self, pretrained=None): + if isinstance(pretrained, str): + logger = logging.getLogger() + from ...runner import load_checkpoint + load_checkpoint(self, pretrained, strict=False, logger=logger) + elif pretrained is None: + for m in self.modules(): + if isinstance(m, nn.Conv2d): + kaiming_init(m) + elif isinstance(m, nn.BatchNorm2d): + constant_init(m, 1) + elif isinstance(m, nn.Linear): + normal_init(m, std=0.01) + else: + raise TypeError('pretrained must be a str or None') + + def forward(self, x): + outs = [] + vgg_layers = getattr(self, self.module_name) + for i in range(len(self.stage_blocks)): + for j in range(*self.range_sub_modules[i]): + vgg_layer = vgg_layers[j] + x = vgg_layer(x) + if i in self.out_indices: + outs.append(x) + if self.num_classes > 0: + x = x.view(x.size(0), -1) + x = self.classifier(x) + outs.append(x) + if len(outs) == 1: + return outs[0] + else: + return tuple(outs) + + def train(self, mode=True): + super(VGG, self).train(mode) + if self.bn_eval: + for m in self.modules(): + if isinstance(m, nn.BatchNorm2d): + m.eval() + if self.bn_frozen: + for params in m.parameters(): + params.requires_grad = False + vgg_layers = getattr(self, self.module_name) + if mode and self.frozen_stages >= 0: + for i in range(self.frozen_stages): + for j in range(*self.range_sub_modules[i]): + mod = vgg_layers[j] + mod.eval() + for param in mod.parameters(): + param.requires_grad = False diff --git a/mmcv/models/backbones/vovnet.py b/mmcv/models/backbones/vovnet.py new file mode 100755 index 0000000..879d186 --- /dev/null +++ b/mmcv/models/backbones/vovnet.py @@ -0,0 +1,375 @@ + +from collections import OrderedDict +from mmcv.runner import BaseModule +from mmdet.models.builder import BACKBONES +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.nn.modules.batchnorm import _BatchNorm + + +VoVNet19_slim_dw_eSE = { + 'stem': [64, 64, 64], + 'stage_conv_ch': [64, 80, 96, 112], + 'stage_out_ch': [112, 256, 384, 512], + "layer_per_block": 3, + "block_per_stage": [1, 1, 1, 1], + "eSE": True, + "dw": True +} + +VoVNet19_dw_eSE = { + 'stem': [64, 64, 64], + "stage_conv_ch": [128, 160, 192, 224], + "stage_out_ch": [256, 512, 768, 1024], + "layer_per_block": 3, + "block_per_stage": [1, 1, 1, 1], + "eSE": True, + "dw": True +} + +VoVNet19_slim_eSE = { + 'stem': [64, 64, 128], + 'stage_conv_ch': [64, 80, 96, 112], + 'stage_out_ch': [112, 256, 384, 512], + 'layer_per_block': 3, + 'block_per_stage': [1, 1, 1, 1], + 'eSE': True, + "dw": False +} + +VoVNet19_eSE = { + 'stem': [64, 64, 128], + "stage_conv_ch": [128, 160, 192, 224], + "stage_out_ch": [256, 512, 768, 1024], + "layer_per_block": 3, + "block_per_stage": [1, 1, 1, 1], + "eSE": True, + "dw": False +} + +VoVNet39_eSE = { + 'stem': [64, 64, 128], + "stage_conv_ch": [128, 160, 192, 224], + "stage_out_ch": [256, 512, 768, 1024], + "layer_per_block": 5, + "block_per_stage": [1, 1, 2, 2], + "eSE": True, + "dw": False +} + +VoVNet57_eSE = { + 'stem': [64, 64, 128], + "stage_conv_ch": [128, 160, 192, 224], + "stage_out_ch": [256, 512, 768, 1024], + "layer_per_block": 5, + "block_per_stage": [1, 1, 4, 3], + "eSE": True, + "dw": False +} + +VoVNet99_eSE = { + 'stem': [64, 64, 128], + "stage_conv_ch": [128, 160, 192, 224], + "stage_out_ch": [256, 512, 768, 1024], + "layer_per_block": 5, + "block_per_stage": [1, 3, 9, 3], + "eSE": True, + "dw": False +} + +_STAGE_SPECS = { + "V-19-slim-dw-eSE": VoVNet19_slim_dw_eSE, + "V-19-dw-eSE": VoVNet19_dw_eSE, + "V-19-slim-eSE": VoVNet19_slim_eSE, + "V-19-eSE": VoVNet19_eSE, + "V-39-eSE": VoVNet39_eSE, + "V-57-eSE": VoVNet57_eSE, + "V-99-eSE": VoVNet99_eSE, +} + + +def dw_conv3x3(in_channels, out_channels, module_name, postfix, stride=1, kernel_size=3, padding=1): + """3x3 convolution with padding""" + return [ + ( + '{}_{}/dw_conv3x3'.format(module_name, postfix), + nn.Conv2d( + in_channels, + out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + groups=out_channels, + bias=False + ) + ), + ( + '{}_{}/pw_conv1x1'.format(module_name, postfix), + nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0, groups=1, bias=False) + ), + ('{}_{}/pw_norm'.format(module_name, postfix), nn.BatchNorm2d(out_channels)), + ('{}_{}/pw_relu'.format(module_name, postfix), nn.ReLU(inplace=True)), + ] + + +def conv3x3(in_channels, out_channels, module_name, postfix, stride=1, groups=1, kernel_size=3, padding=1): + """3x3 convolution with padding""" + return [ + ( + f"{module_name}_{postfix}/conv", + nn.Conv2d( + in_channels, + out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + groups=groups, + bias=False, + ), + ), + (f"{module_name}_{postfix}/norm", nn.BatchNorm2d(out_channels)), + (f"{module_name}_{postfix}/relu", nn.ReLU(inplace=True)), + ] + + +def conv1x1(in_channels, out_channels, module_name, postfix, stride=1, groups=1, kernel_size=1, padding=0): + """1x1 convolution with padding""" + return [ + ( + f"{module_name}_{postfix}/conv", + nn.Conv2d( + in_channels, + out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + groups=groups, + bias=False, + ), + ), + (f"{module_name}_{postfix}/norm", nn.BatchNorm2d(out_channels)), + (f"{module_name}_{postfix}/relu", nn.ReLU(inplace=True)), + ] + + +class Hsigmoid(nn.Module): + def __init__(self, inplace=True): + super(Hsigmoid, self).__init__() + self.inplace = inplace + + def forward(self, x): + return F.relu6(x + 3.0, inplace=self.inplace) / 6.0 + + +class eSEModule(nn.Module): + def __init__(self, channel, reduction=4): + super(eSEModule, self).__init__() + self.avg_pool = nn.AdaptiveAvgPool2d(1) + self.fc = nn.Conv2d(channel, channel, kernel_size=1, padding=0) + self.hsigmoid = Hsigmoid() + + def forward(self, x): + input = x + x = self.avg_pool(x) + x = self.fc(x) + x = self.hsigmoid(x) + return input * x + + +class _OSA_module(nn.Module): + def __init__( + self, in_ch, stage_ch, concat_ch, layer_per_block, module_name, SE=False, identity=False, depthwise=False + ): + + super(_OSA_module, self).__init__() + + self.identity = identity + self.depthwise = depthwise + self.isReduced = False + self.layers = nn.ModuleList() + in_channel = in_ch + if self.depthwise and in_channel != stage_ch: + self.isReduced = True + self.conv_reduction = nn.Sequential( + OrderedDict(conv1x1(in_channel, stage_ch, "{}_reduction".format(module_name), "0")) + ) + for i in range(layer_per_block): + if self.depthwise: + self.layers.append(nn.Sequential(OrderedDict(dw_conv3x3(stage_ch, stage_ch, module_name, i)))) + else: + self.layers.append(nn.Sequential(OrderedDict(conv3x3(in_channel, stage_ch, module_name, i)))) + in_channel = stage_ch + + # feature aggregation + in_channel = in_ch + layer_per_block * stage_ch + self.concat = nn.Sequential(OrderedDict(conv1x1(in_channel, concat_ch, module_name, "concat"))) + + self.ese = eSEModule(concat_ch) + + def forward(self, x): + + identity_feat = x + + output = [] + output.append(x) + if self.depthwise and self.isReduced: + x = self.conv_reduction(x) + for layer in self.layers: + x = layer(x) + output.append(x) + + x = torch.cat(output, dim=1) + xt = self.concat(x) + + xt = self.ese(xt) + + if self.identity: + xt = xt + identity_feat + + return xt + + +class _OSA_stage(nn.Sequential): + def __init__( + self, in_ch, stage_ch, concat_ch, block_per_stage, layer_per_block, stage_num, SE=False, depthwise=False + ): + + super(_OSA_stage, self).__init__() + + if not stage_num == 2: + self.add_module("Pooling", nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)) + + if block_per_stage != 1: + SE = False + module_name = f"OSA{stage_num}_1" + self.add_module( + module_name, _OSA_module(in_ch, stage_ch, concat_ch, layer_per_block, module_name, SE, depthwise=depthwise) + ) + for i in range(block_per_stage - 1): + if i != block_per_stage - 2: # last block + SE = False + module_name = f"OSA{stage_num}_{i + 2}" + self.add_module( + module_name, + _OSA_module( + concat_ch, + stage_ch, + concat_ch, + layer_per_block, + module_name, + SE, + identity=True, + depthwise=depthwise + ), + ) + + +@BACKBONES.register_module() +class VoVNet(BaseModule): + def __init__(self, spec_name, input_ch=3, out_features=None, + frozen_stages=-1, norm_eval=True, pretrained=None, init_cfg=None): + """ + Args: + input_ch(int) : the number of input channel + out_features (list[str]): name of the layers whose outputs should + be returned in forward. Can be anything in "stem", "stage2" ... + """ + super(VoVNet, self).__init__(init_cfg) + self.frozen_stages = frozen_stages + self.norm_eval = norm_eval + + if isinstance(pretrained, str): + warnings.warn('DeprecationWarning: pretrained is deprecated, ' + 'please use "init_cfg" instead') + self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) + stage_specs = _STAGE_SPECS[spec_name] + + stem_ch = stage_specs["stem"] + config_stage_ch = stage_specs["stage_conv_ch"] + config_concat_ch = stage_specs["stage_out_ch"] + block_per_stage = stage_specs["block_per_stage"] + layer_per_block = stage_specs["layer_per_block"] + SE = stage_specs["eSE"] + depthwise = stage_specs["dw"] + + self._out_features = out_features + + # Stem module + conv_type = dw_conv3x3 if depthwise else conv3x3 + stem = conv3x3(input_ch, stem_ch[0], "stem", "1", 2) + stem += conv_type(stem_ch[0], stem_ch[1], "stem", "2", 1) + stem += conv_type(stem_ch[1], stem_ch[2], "stem", "3", 2) + self.add_module("stem", nn.Sequential((OrderedDict(stem)))) + current_stirde = 4 + self._out_feature_strides = {"stem": current_stirde, "stage2": current_stirde} + self._out_feature_channels = {"stem": stem_ch[2]} + + stem_out_ch = [stem_ch[2]] + in_ch_list = stem_out_ch + config_concat_ch[:-1] + # OSA stages + self.stage_names = [] + for i in range(4): # num_stages + name = "stage%d" % (i + 2) # stage 2 ... stage 5 + self.stage_names.append(name) + self.add_module( + name, + _OSA_stage( + in_ch_list[i], + config_stage_ch[i], + config_concat_ch[i], + block_per_stage[i], + layer_per_block, + i + 2, + SE, + depthwise, + ), + ) + + self._out_feature_channels[name] = config_concat_ch[i] + if not i == 0: + self._out_feature_strides[name] = current_stirde = int(current_stirde * 2) + + # initialize weights + # self._initialize_weights() + + def _initialize_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight) + + def forward(self, x): + outputs = {} + x = self.stem(x) + if "stem" in self._out_features: + outputs["stem"] = x + for name in self.stage_names: + x = getattr(self, name)(x) + if name in self._out_features: + outputs[name] = x + + return outputs + + def _freeze_stages(self): + if self.frozen_stages >= 0: + m = getattr(self, 'stem') + m.eval() + for param in m.parameters(): + param.requires_grad = False + + for i in range(1, self.frozen_stages + 1): + m = getattr(self, f'stage{i+1}') + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def train(self, mode=True): + """Convert the model into training mode while keep normalization layer + freezed.""" + super(VoVNet, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() \ No newline at end of file diff --git a/mmcv/models/bricks/__init__.py b/mmcv/models/bricks/__init__.py new file mode 100644 index 0000000..c492806 --- /dev/null +++ b/mmcv/models/bricks/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .activation import build_activation_layer +from .conv import build_conv_layer +from .plugin import build_plugin_layer +from .conv_module import ConvModule +from .drop import Dropout, DropPath +from .norm import build_norm_layer, is_norm +from .wrappers import (Conv2d, Conv3d, ConvTranspose2d, ConvTranspose3d, + Linear, MaxPool2d, MaxPool3d) +from .registry import (ACTIVATION_LAYERS, CONV_LAYERS, NORM_LAYERS, + PADDING_LAYERS, PLUGIN_LAYERS, UPSAMPLE_LAYERS) +from .transformer import build_positional_encoding \ No newline at end of file diff --git a/mmcv/models/bricks/activation.py b/mmcv/models/bricks/activation.py new file mode 100644 index 0000000..5606745 --- /dev/null +++ b/mmcv/models/bricks/activation.py @@ -0,0 +1,91 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F + +from mmcv.utils import build_from_cfg, digit_version, TORCH_VERSION +from .registry import ACTIVATION_LAYERS + +for module in [ + nn.ReLU, nn.LeakyReLU, nn.PReLU, nn.RReLU, nn.ReLU6, nn.ELU, + nn.Sigmoid, nn.Tanh +]: + ACTIVATION_LAYERS.register_module(module=module) + + +@ACTIVATION_LAYERS.register_module(name='Clip') +@ACTIVATION_LAYERS.register_module() +class Clamp(nn.Module): + """Clamp activation layer. + + This activation function is to clamp the feature map value within + :math:`[min, max]`. More details can be found in ``torch.clamp()``. + + Args: + min (Number | optional): Lower-bound of the range to be clamped to. + Default to -1. + max (Number | optional): Upper-bound of the range to be clamped to. + Default to 1. + """ + + def __init__(self, min=-1., max=1.): + super(Clamp, self).__init__() + self.min = min + self.max = max + + def forward(self, x): + """Forward function. + + Args: + x (torch.Tensor): The input tensor. + + Returns: + torch.Tensor: Clamped tensor. + """ + return torch.clamp(x, min=self.min, max=self.max) + + +class GELU(nn.Module): + r"""Applies the Gaussian Error Linear Units function: + + .. math:: + \text{GELU}(x) = x * \Phi(x) + where :math:`\Phi(x)` is the Cumulative Distribution Function for + Gaussian Distribution. + + Shape: + - Input: :math:`(N, *)` where `*` means, any number of additional + dimensions + - Output: :math:`(N, *)`, same shape as the input + + .. image:: scripts/activation_images/GELU.png + + Examples:: + + >>> m = nn.GELU() + >>> input = torch.randn(2) + >>> output = m(input) + """ + + def forward(self, input): + return F.gelu(input) + + +if (digit_version(TORCH_VERSION) < digit_version('1.4')): + ACTIVATION_LAYERS.register_module(module=GELU) +else: + ACTIVATION_LAYERS.register_module(module=nn.GELU) + + +def build_activation_layer(cfg): + """Build activation layer. + + Args: + cfg (dict): The activation layer config, which should contain: + - type (str): Layer type. + - layer args: Args needed to instantiate an activation layer. + + Returns: + nn.Module: Created activation layer. + """ + return build_from_cfg(cfg, ACTIVATION_LAYERS) diff --git a/mmcv/models/bricks/conv.py b/mmcv/models/bricks/conv.py new file mode 100644 index 0000000..cf54491 --- /dev/null +++ b/mmcv/models/bricks/conv.py @@ -0,0 +1,44 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from torch import nn + +from .registry import CONV_LAYERS + +CONV_LAYERS.register_module('Conv1d', module=nn.Conv1d) +CONV_LAYERS.register_module('Conv2d', module=nn.Conv2d) +CONV_LAYERS.register_module('Conv3d', module=nn.Conv3d) +CONV_LAYERS.register_module('Conv', module=nn.Conv2d) + + +def build_conv_layer(cfg, *args, **kwargs): + """Build convolution layer. + + Args: + cfg (None or dict): The conv layer config, which should contain: + - type (str): Layer type. + - layer args: Args needed to instantiate an conv layer. + args (argument list): Arguments passed to the `__init__` + method of the corresponding conv layer. + kwargs (keyword arguments): Keyword arguments passed to the `__init__` + method of the corresponding conv layer. + + Returns: + nn.Module: Created conv layer. + """ + if cfg is None: + cfg_ = dict(type='Conv2d') + else: + if not isinstance(cfg, dict): + raise TypeError('cfg must be a dict') + if 'type' not in cfg: + raise KeyError('the cfg dict must contain the key "type"') + cfg_ = cfg.copy() + + layer_type = cfg_.pop('type') + if layer_type not in CONV_LAYERS: + raise KeyError(f'Unrecognized norm type {layer_type}') + else: + conv_layer = CONV_LAYERS.get(layer_type) + + layer = conv_layer(*args, **kwargs, **cfg_) + + return layer diff --git a/mmcv/models/bricks/conv_module.py b/mmcv/models/bricks/conv_module.py new file mode 100644 index 0000000..bbbc616 --- /dev/null +++ b/mmcv/models/bricks/conv_module.py @@ -0,0 +1,207 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import torch.nn as nn + +from torch.nn.modules.instancenorm import _InstanceNorm +from torch.nn.modules.batchnorm import _BatchNorm +from ..utils import constant_init, kaiming_init +from .activation import build_activation_layer +from .conv import build_conv_layer +from .norm import build_norm_layer +from .padding import build_padding_layer +from .registry import PLUGIN_LAYERS + + +@PLUGIN_LAYERS.register_module() +class ConvModule(nn.Module): + """A conv block that bundles conv/norm/activation layers. + + This block simplifies the usage of convolution layers, which are commonly + used with a norm layer (e.g., BatchNorm) and activation layer (e.g., ReLU). + It is based upon three build methods: `build_conv_layer()`, + `build_norm_layer()` and `build_activation_layer()`. + + Besides, we add some additional features in this module. + 1. Automatically set `bias` of the conv layer. + 2. Spectral norm is supported. + 3. More padding modes are supported. Before PyTorch 1.5, nn.Conv2d only + supports zero and circular padding, and we add "reflect" padding mode. + + Args: + in_channels (int): Number of channels in the input feature map. + Same as that in ``nn._ConvNd``. + out_channels (int): Number of channels produced by the convolution. + Same as that in ``nn._ConvNd``. + kernel_size (int | tuple[int]): Size of the convolving kernel. + Same as that in ``nn._ConvNd``. + stride (int | tuple[int]): Stride of the convolution. + Same as that in ``nn._ConvNd``. + padding (int | tuple[int]): Zero-padding added to both sides of + the input. Same as that in ``nn._ConvNd``. + dilation (int | tuple[int]): Spacing between kernel elements. + Same as that in ``nn._ConvNd``. + groups (int): Number of blocked connections from input channels to + output channels. Same as that in ``nn._ConvNd``. + bias (bool | str): If specified as `auto`, it will be decided by the + norm_cfg. Bias will be set as True if `norm_cfg` is None, otherwise + False. Default: "auto". + conv_cfg (dict): Config dict for convolution layer. Default: None, + which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. Default: None. + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU'). + inplace (bool): Whether to use inplace mode for activation. + Default: True. + with_spectral_norm (bool): Whether use spectral norm in conv module. + Default: False. + padding_mode (str): If the `padding_mode` has not been supported by + current `Conv2d` in PyTorch, we will use our own padding layer + instead. Currently, we support ['zeros', 'circular'] with official + implementation and ['reflect'] with our own implementation. + Default: 'zeros'. + order (tuple[str]): The order of conv/norm/activation layers. It is a + sequence of "conv", "norm" and "act". Common examples are + ("conv", "norm", "act") and ("act", "conv", "norm"). + Default: ('conv', 'norm', 'act'). + """ + + _abbr_ = 'conv_block' + + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + bias='auto', + conv_cfg=None, + norm_cfg=None, + act_cfg=dict(type='ReLU'), + inplace=True, + with_spectral_norm=False, + padding_mode='zeros', + order=('conv', 'norm', 'act')): + super(ConvModule, self).__init__() + assert conv_cfg is None or isinstance(conv_cfg, dict) + assert norm_cfg is None or isinstance(norm_cfg, dict) + assert act_cfg is None or isinstance(act_cfg, dict) + official_padding_mode = ['zeros', 'circular'] + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.inplace = inplace + self.with_spectral_norm = with_spectral_norm + self.with_explicit_padding = padding_mode not in official_padding_mode + self.order = order + assert isinstance(self.order, tuple) and len(self.order) == 3 + assert set(order) == set(['conv', 'norm', 'act']) + + self.with_norm = norm_cfg is not None + self.with_activation = act_cfg is not None + # if the conv layer is before a norm layer, bias is unnecessary. + if bias == 'auto': + bias = not self.with_norm + self.with_bias = bias + + if self.with_explicit_padding: + pad_cfg = dict(type=padding_mode) + self.padding_layer = build_padding_layer(pad_cfg, padding) + + # reset padding to 0 for conv module + conv_padding = 0 if self.with_explicit_padding else padding + # build convolution layer + self.conv = build_conv_layer( + conv_cfg, + in_channels, + out_channels, + kernel_size, + stride=stride, + padding=conv_padding, + dilation=dilation, + groups=groups, + bias=bias) + # export the attributes of self.conv to a higher level for convenience + self.in_channels = self.conv.in_channels + self.out_channels = self.conv.out_channels + self.kernel_size = self.conv.kernel_size + self.stride = self.conv.stride + self.padding = padding + self.dilation = self.conv.dilation + self.transposed = self.conv.transposed + self.output_padding = self.conv.output_padding + self.groups = self.conv.groups + + if self.with_spectral_norm: + self.conv = nn.utils.spectral_norm(self.conv) + + # build normalization layers + if self.with_norm: + # norm layer is after conv layer + if order.index('norm') > order.index('conv'): + norm_channels = out_channels + else: + norm_channels = in_channels + self.norm_name, norm = build_norm_layer(norm_cfg, norm_channels) + self.add_module(self.norm_name, norm) + if self.with_bias: + if isinstance(norm, (_BatchNorm, _InstanceNorm)): + warnings.warn( + 'Unnecessary conv bias before batch/instance norm') + else: + self.norm_name = None + + # build activation layer + if self.with_activation: + act_cfg_ = act_cfg.copy() + # nn.Tanh has no 'inplace' argument + if act_cfg_['type'] not in [ + 'Tanh', 'PReLU', 'Sigmoid', 'HSigmoid', 'Swish' + ]: + act_cfg_.setdefault('inplace', inplace) + self.activate = build_activation_layer(act_cfg_) + + # Use msra init by default + self.init_weights() + + @property + def norm(self): + if self.norm_name: + return getattr(self, self.norm_name) + else: + return None + + def init_weights(self): + # 1. It is mainly for customized conv layers with their own + # initialization manners by calling their own ``init_weights()``, + # and we do not want ConvModule to override the initialization. + # 2. For customized conv layers without their own initialization + # manners (that is, they don't have their own ``init_weights()``) + # and PyTorch's conv layers, they will be initialized by + # this method with default ``kaiming_init``. + # Note: For PyTorch's conv layers, they will be overwritten by our + # initialization implementation using default ``kaiming_init``. + if not hasattr(self.conv, 'init_weights'): + if self.with_activation and self.act_cfg['type'] == 'LeakyReLU': + nonlinearity = 'leaky_relu' + a = self.act_cfg.get('negative_slope', 0.01) + else: + nonlinearity = 'relu' + a = 0 + kaiming_init(self.conv, a=a, nonlinearity=nonlinearity) + if self.with_norm: + constant_init(self.norm, 1, bias=0) + + def forward(self, x, activate=True, norm=True): + for layer in self.order: + if layer == 'conv': + if self.with_explicit_padding: + x = self.padding_layer(x) + x = self.conv(x) + elif layer == 'norm' and norm and self.with_norm: + x = self.norm(x) + elif layer == 'act' and activate and self.with_activation: + x = self.activate(x) + return x diff --git a/mmcv/models/bricks/drop.py b/mmcv/models/bricks/drop.py new file mode 100644 index 0000000..b0a0266 --- /dev/null +++ b/mmcv/models/bricks/drop.py @@ -0,0 +1,65 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn + +from mmcv import build_from_cfg +from .registry import DROPOUT_LAYERS + + +def drop_path(x, drop_prob=0., training=False): + """Drop paths (Stochastic Depth) per sample (when applied in main path of + residual blocks). + + We follow the implementation + https://github.com/rwightman/pytorch-image-models/blob/a2727c1bf78ba0d7b5727f5f95e37fb7f8866b1f/timm/models/layers/drop.py # noqa: E501 + """ + if drop_prob == 0. or not training: + return x + keep_prob = 1 - drop_prob + # handle tensors with different dimensions, not just 4D tensors. + shape = (x.shape[0], ) + (1, ) * (x.ndim - 1) + random_tensor = keep_prob + torch.rand( + shape, dtype=x.dtype, device=x.device) + output = x.div(keep_prob) * random_tensor.floor() + return output + + +@DROPOUT_LAYERS.register_module() +class DropPath(nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of + residual blocks). + + We follow the implementation + https://github.com/rwightman/pytorch-image-models/blob/a2727c1bf78ba0d7b5727f5f95e37fb7f8866b1f/timm/models/layers/drop.py # noqa: E501 + + Args: + drop_prob (float): Probability of the path to be zeroed. Default: 0.1 + """ + + def __init__(self, drop_prob=0.1): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + + def forward(self, x): + return drop_path(x, self.drop_prob, self.training) + + +@DROPOUT_LAYERS.register_module() +class Dropout(nn.Dropout): + """A wrapper for ``torch.nn.Dropout``, We rename the ``p`` of + ``torch.nn.Dropout`` to ``drop_prob`` so as to be consistent with + ``DropPath`` + + Args: + drop_prob (float): Probability of the elements to be + zeroed. Default: 0.5. + inplace (bool): Do the operation inplace or not. Default: False. + """ + + def __init__(self, drop_prob=0.5, inplace=False): + super().__init__(p=drop_prob, inplace=inplace) + + +def build_dropout(cfg, default_args=None): + """Builder for drop out layers.""" + return build_from_cfg(cfg, DROPOUT_LAYERS, default_args) diff --git a/mmcv/models/bricks/norm.py b/mmcv/models/bricks/norm.py new file mode 100644 index 0000000..7c40c99 --- /dev/null +++ b/mmcv/models/bricks/norm.py @@ -0,0 +1,145 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import inspect + +import torch.nn as nn + +from mmcv.utils import is_tuple_of +from torch.nn.modules.instancenorm import _InstanceNorm +from torch.nn.modules.batchnorm import _BatchNorm +from .registry import NORM_LAYERS + +NORM_LAYERS.register_module('BN', module=nn.BatchNorm2d) +NORM_LAYERS.register_module('BN1d', module=nn.BatchNorm1d) +NORM_LAYERS.register_module('BN2d', module=nn.BatchNorm2d) +NORM_LAYERS.register_module('BN3d', module=nn.BatchNorm3d) +NORM_LAYERS.register_module('SyncBN', module=nn.SyncBatchNorm) +NORM_LAYERS.register_module('GN', module=nn.GroupNorm) +NORM_LAYERS.register_module('LN', module=nn.LayerNorm) +NORM_LAYERS.register_module('IN', module=nn.InstanceNorm2d) +NORM_LAYERS.register_module('IN1d', module=nn.InstanceNorm1d) +NORM_LAYERS.register_module('IN2d', module=nn.InstanceNorm2d) +NORM_LAYERS.register_module('IN3d', module=nn.InstanceNorm3d) + + +def infer_abbr(class_type): + """Infer abbreviation from the class name. + + When we build a norm layer with `build_norm_layer()`, we want to preserve + the norm type in variable names, e.g, self.bn1, self.gn. This method will + infer the abbreviation to map class types to abbreviations. + + Rule 1: If the class has the property "_abbr_", return the property. + Rule 2: If the parent class is _BatchNorm, GroupNorm, LayerNorm or + InstanceNorm, the abbreviation of this layer will be "bn", "gn", "ln" and + "in" respectively. + Rule 3: If the class name contains "batch", "group", "layer" or "instance", + the abbreviation of this layer will be "bn", "gn", "ln" and "in" + respectively. + Rule 4: Otherwise, the abbreviation falls back to "norm". + + Args: + class_type (type): The norm layer type. + + Returns: + str: The inferred abbreviation. + """ + if not inspect.isclass(class_type): + raise TypeError( + f'class_type must be a type, but got {type(class_type)}') + if hasattr(class_type, '_abbr_'): + return class_type._abbr_ + if issubclass(class_type, _InstanceNorm): # IN is a subclass of BN + return 'in' + elif issubclass(class_type, _BatchNorm): + return 'bn' + elif issubclass(class_type, nn.GroupNorm): + return 'gn' + elif issubclass(class_type, nn.LayerNorm): + return 'ln' + else: + class_name = class_type.__name__.lower() + if 'batch' in class_name: + return 'bn' + elif 'group' in class_name: + return 'gn' + elif 'layer' in class_name: + return 'ln' + elif 'instance' in class_name: + return 'in' + else: + return 'norm_layer' + + +def build_norm_layer(cfg, num_features, postfix=''): + """Build normalization layer. + + Args: + cfg (dict): The norm layer config, which should contain: + + - type (str): Layer type. + - layer args: Args needed to instantiate a norm layer. + - requires_grad (bool, optional): Whether stop gradient updates. + num_features (int): Number of input channels. + postfix (int | str): The postfix to be appended into norm abbreviation + to create named layer. + + Returns: + (str, nn.Module): The first element is the layer name consisting of + abbreviation and postfix, e.g., bn1, gn. The second element is the + created norm layer. + """ + if not isinstance(cfg, dict): + raise TypeError('cfg must be a dict') + if 'type' not in cfg: + raise KeyError('the cfg dict must contain the key "type"') + cfg_ = cfg.copy() + + layer_type = cfg_.pop('type') + if layer_type not in NORM_LAYERS: + raise KeyError(f'Unrecognized norm type {layer_type}') + + norm_layer = NORM_LAYERS.get(layer_type) + abbr = infer_abbr(norm_layer) + + assert isinstance(postfix, (int, str)) + name = abbr + str(postfix) + + requires_grad = cfg_.pop('requires_grad', True) + cfg_.setdefault('eps', 1e-5) + if layer_type != 'GN': + layer = norm_layer(num_features, **cfg_) + if layer_type == 'SyncBN' and hasattr(layer, '_specify_ddp_gpu_num'): + layer._specify_ddp_gpu_num(1) + else: + assert 'num_groups' in cfg_ + layer = norm_layer(num_channels=num_features, **cfg_) + + for param in layer.parameters(): + param.requires_grad = requires_grad + + return name, layer + + +def is_norm(layer, exclude=None): + """Check if a layer is a normalization layer. + + Args: + layer (nn.Module): The layer to be checked. + exclude (type | tuple[type]): Types to be excluded. + + Returns: + bool: Whether the layer is a norm layer. + """ + if exclude is not None: + if not isinstance(exclude, tuple): + exclude = (exclude, ) + if not is_tuple_of(exclude, type): + raise TypeError( + f'"exclude" must be either None or type or a tuple of types, ' + f'but got {type(exclude)}: {exclude}') + + if exclude and isinstance(layer, exclude): + return False + + all_norm_bases = (_BatchNorm, _InstanceNorm, nn.GroupNorm, nn.LayerNorm) + return isinstance(layer, all_norm_bases) diff --git a/mmcv/models/bricks/padding.py b/mmcv/models/bricks/padding.py new file mode 100644 index 0000000..e4ac6b2 --- /dev/null +++ b/mmcv/models/bricks/padding.py @@ -0,0 +1,36 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn + +from .registry import PADDING_LAYERS + +PADDING_LAYERS.register_module('zero', module=nn.ZeroPad2d) +PADDING_LAYERS.register_module('reflect', module=nn.ReflectionPad2d) +PADDING_LAYERS.register_module('replicate', module=nn.ReplicationPad2d) + + +def build_padding_layer(cfg, *args, **kwargs): + """Build padding layer. + + Args: + cfg (None or dict): The padding layer config, which should contain: + - type (str): Layer type. + - layer args: Args needed to instantiate a padding layer. + + Returns: + nn.Module: Created padding layer. + """ + if not isinstance(cfg, dict): + raise TypeError('cfg must be a dict') + if 'type' not in cfg: + raise KeyError('the cfg dict must contain the key "type"') + + cfg_ = cfg.copy() + padding_type = cfg_.pop('type') + if padding_type not in PADDING_LAYERS: + raise KeyError(f'Unrecognized padding type {padding_type}.') + else: + padding_layer = PADDING_LAYERS.get(padding_type) + + layer = padding_layer(*args, **kwargs, **cfg_) + + return layer diff --git a/mmcv/models/bricks/plugin.py b/mmcv/models/bricks/plugin.py new file mode 100644 index 0000000..07c010d --- /dev/null +++ b/mmcv/models/bricks/plugin.py @@ -0,0 +1,88 @@ +import inspect +import platform + +from .registry import PLUGIN_LAYERS + +if platform.system() == 'Windows': + import regex as re +else: + import re + + +def infer_abbr(class_type): + """Infer abbreviation from the class name. + + This method will infer the abbreviation to map class types to + abbreviations. + + Rule 1: If the class has the property "abbr", return the property. + Rule 2: Otherwise, the abbreviation falls back to snake case of class + name, e.g. the abbreviation of ``FancyBlock`` will be ``fancy_block``. + + Args: + class_type (type): The norm layer type. + + Returns: + str: The inferred abbreviation. + """ + + def camel2snack(word): + """Convert camel case word into snack case. + + Modified from `inflection lib + `_. + + Example:: + + >>> camel2snack("FancyBlock") + 'fancy_block' + """ + + word = re.sub(r'([A-Z]+)([A-Z][a-z])', r'\1_\2', word) + word = re.sub(r'([a-z\d])([A-Z])', r'\1_\2', word) + word = word.replace('-', '_') + return word.lower() + + if not inspect.isclass(class_type): + raise TypeError( + f'class_type must be a type, but got {type(class_type)}') + if hasattr(class_type, '_abbr_'): + return class_type._abbr_ + else: + return camel2snack(class_type.__name__) + + +def build_plugin_layer(cfg, postfix='', **kwargs): + """Build plugin layer. + + Args: + cfg (None or dict): cfg should contain: + type (str): identify plugin layer type. + layer args: args needed to instantiate a plugin layer. + postfix (int, str): appended into norm abbreviation to + create named layer. Default: ''. + + Returns: + tuple[str, nn.Module]: + name (str): abbreviation + postfix + layer (nn.Module): created plugin layer + """ + if not isinstance(cfg, dict): + raise TypeError('cfg must be a dict') + if 'type' not in cfg: + raise KeyError('the cfg dict must contain the key "type"') + cfg_ = cfg.copy() + + layer_type = cfg_.pop('type') + if layer_type not in PLUGIN_LAYERS: + raise KeyError(f'Unrecognized plugin type {layer_type}') + + plugin_layer = PLUGIN_LAYERS.get(layer_type) + abbr = infer_abbr(plugin_layer) + + assert isinstance(postfix, (int, str)) + name = abbr + str(postfix) + + layer = plugin_layer(**kwargs, **cfg_) + + return name, layer diff --git a/mmcv/models/bricks/registry.py b/mmcv/models/bricks/registry.py new file mode 100644 index 0000000..c292797 --- /dev/null +++ b/mmcv/models/bricks/registry.py @@ -0,0 +1,16 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmcv.utils import Registry + +CONV_LAYERS = Registry('conv layer') +NORM_LAYERS = Registry('norm layer') +ACTIVATION_LAYERS = Registry('activation layer') +PADDING_LAYERS = Registry('padding layer') +UPSAMPLE_LAYERS = Registry('upsample layer') +PLUGIN_LAYERS = Registry('plugin layer') + +DROPOUT_LAYERS = Registry('drop out layers') +POSITIONAL_ENCODING = Registry('position encoding') +ATTENTION = Registry('attention') +FEEDFORWARD_NETWORK = Registry('feed-forward Network') +TRANSFORMER_LAYER = Registry('transformerLayer') +TRANSFORMER_LAYER_SEQUENCE = Registry('transformer-layers sequence') diff --git a/mmcv/models/bricks/transformer.py b/mmcv/models/bricks/transformer.py new file mode 100644 index 0000000..df6e532 --- /dev/null +++ b/mmcv/models/bricks/transformer.py @@ -0,0 +1,611 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import warnings + +import torch +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv import ConfigDict, deprecated_api_warning +from .wrappers import Linear +from .activation import build_activation_layer +from .norm import build_norm_layer +# from mmcv.models.bricks import Linear, build_activation_layer, build_norm_layer +from ..backbones.base_module import BaseModule, ModuleList, Sequential +from mmcv.utils import build_from_cfg +from .drop import build_dropout +from .registry import (ATTENTION, FEEDFORWARD_NETWORK, POSITIONAL_ENCODING, + TRANSFORMER_LAYER, TRANSFORMER_LAYER_SEQUENCE) + +# Avoid BC-breaking of importing MultiScaleDeformableAttention from this file +try: + from mmcv.ops.multi_scale_deform_attn import MultiScaleDeformableAttention # noqa F401 + warnings.warn( + ImportWarning( + '``MultiScaleDeformableAttention`` has been moved to ' + '``mmcv.ops.multi_scale_deform_attn``, please change original path ' # noqa E501 + '``from mmcv.cnn.bricks.transformer import MultiScaleDeformableAttention`` ' # noqa E501 + 'to ``from mmcv.ops.multi_scale_deform_attn import MultiScaleDeformableAttention`` ' # noqa E501 + )) + +except ImportError: + warnings.warn('Fail to import ``MultiScaleDeformableAttention`` from ' + '``mmcv.ops.multi_scale_deform_attn``, ' + 'You should install ``mmcv-full`` if you need this module. ') + + +def build_positional_encoding(cfg, default_args=None): + """Builder for Position Encoding.""" + return build_from_cfg(cfg, POSITIONAL_ENCODING, default_args) + + +def build_attention(cfg, default_args=None): + """Builder for attention.""" + return build_from_cfg(cfg, ATTENTION, default_args) + + +def build_feedforward_network(cfg, default_args=None): + """Builder for feed-forward network (FFN).""" + return build_from_cfg(cfg, FEEDFORWARD_NETWORK, default_args) + + +def build_transformer_layer(cfg, default_args=None): + """Builder for transformer layer.""" + return build_from_cfg(cfg, TRANSFORMER_LAYER, default_args) + + +def build_transformer_layer_sequence(cfg, default_args=None): + """Builder for transformer encoder and transformer decoder.""" + return build_from_cfg(cfg, TRANSFORMER_LAYER_SEQUENCE, default_args) + + +@ATTENTION.register_module() +class MultiheadAttention(BaseModule): + """A wrapper for ``torch.nn.MultiheadAttention``. + + This module implements MultiheadAttention with identity connection, + and positional encoding is also passed as input. + + Args: + embed_dims (int): The embedding dimension. + num_heads (int): Parallel attention heads. + attn_drop (float): A Dropout layer on attn_output_weights. + Default: 0.0. + proj_drop (float): A Dropout layer after `nn.MultiheadAttention`. + Default: 0.0. + dropout_layer (obj:`ConfigDict`): The dropout_layer used + when adding the shortcut. + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + batch_first (bool): When it is True, Key, Query and Value are shape of + (batch, n, embed_dim), otherwise (n, batch, embed_dim). + Default to False. + """ + + def __init__(self, + embed_dims, + num_heads, + attn_drop=0., + proj_drop=0., + dropout_layer=dict(type='Dropout', drop_prob=0.), + init_cfg=None, + batch_first=False, + with_cp=False, + **kwargs): + super(MultiheadAttention, self).__init__(init_cfg) + if 'dropout' in kwargs: + warnings.warn('The arguments `dropout` in MultiheadAttention ' + 'has been deprecated, now you can separately ' + 'set `attn_drop`(float), proj_drop(float), ' + 'and `dropout_layer`(dict) ') + attn_drop = kwargs['dropout'] + dropout_layer['drop_prob'] = kwargs.pop('dropout') + + self.embed_dims = embed_dims + self.num_heads = num_heads + self.batch_first = batch_first + + self.attn = nn.MultiheadAttention(embed_dims, num_heads, attn_drop, + **kwargs) + self.proj_drop = nn.Dropout(proj_drop) + self.dropout_layer = build_dropout( + dropout_layer) if dropout_layer else nn.Identity() + self.with_cp = with_cp + + @deprecated_api_warning({'residual': 'identity'}, + cls_name='MultiheadAttention') + def forward(self, + query, + key=None, + value=None, + identity=None, + query_pos=None, + key_pos=None, + attn_mask=None, + key_padding_mask=None, + **kwargs): + """Forward function for `MultiheadAttention`. + + **kwargs allow passing a more general data flow when combining + with other operations in `transformerlayer`. + + Args: + query (Tensor): The input query with shape [num_queries, bs, + embed_dims] if self.batch_first is False, else + [bs, num_queries embed_dims]. + key (Tensor): The key tensor with shape [num_keys, bs, + embed_dims] if self.batch_first is False, else + [bs, num_keys, embed_dims] . + If None, the ``query`` will be used. Defaults to None. + value (Tensor): The value tensor with same shape as `key`. + Same in `nn.MultiheadAttention.forward`. Defaults to None. + If None, the `key` will be used. + identity (Tensor): This tensor, with the same shape as x, + will be used for the identity link. + If None, `x` will be used. Defaults to None. + query_pos (Tensor): The positional encoding for query, with + the same shape as `x`. If not None, it will + be added to `x` before forward function. Defaults to None. + key_pos (Tensor): The positional encoding for `key`, with the + same shape as `key`. Defaults to None. If not None, it will + be added to `key` before forward function. If None, and + `query_pos` has the same shape as `key`, then `query_pos` + will be used for `key_pos`. Defaults to None. + attn_mask (Tensor): ByteTensor mask with shape [num_queries, + num_keys]. Same in `nn.MultiheadAttention.forward`. + Defaults to None. + key_padding_mask (Tensor): ByteTensor with shape [bs, num_keys]. + Defaults to None. + + Returns: + Tensor: forwarded results with shape + [num_queries, bs, embed_dims] + if self.batch_first is False, else + [bs, num_queries embed_dims]. + """ + + if key is None: + key = query + if value is None: + value = key + if identity is None: + identity = query + if key_pos is None: + if query_pos is not None: + # use query_pos if key_pos is not available + if query_pos.shape == key.shape: + key_pos = query_pos + else: + warnings.warn(f'position encoding of key is' + f'missing in {self.__class__.__name__}.') + if query_pos is not None: + query = query + query_pos + if key_pos is not None: + key = key + key_pos + + # Because the dataflow('key', 'query', 'value') of + # ``torch.nn.MultiheadAttention`` is (num_query, batch, + # embed_dims), We should adjust the shape of dataflow from + # batch_first (batch, num_query, embed_dims) to num_query_first + # (num_query ,batch, embed_dims), and recover ``attn_output`` + # from num_query_first to batch_first. + if self.batch_first: + query = query.transpose(0, 1) + key = key.transpose(0, 1) + value = value.transpose(0, 1) + + if self.with_cp: + out = cp.checkpoint(self.attn, use_reentrant=False, query=query, + key=key, + value=value, + attn_mask=attn_mask, + key_padding_mask=key_padding_mask)[0] + else: + out = self.attn( + query=query, + key=key, + value=value, + attn_mask=attn_mask, + key_padding_mask=key_padding_mask)[0] + + if self.batch_first: + out = out.transpose(0, 1) + + return identity + self.dropout_layer(self.proj_drop(out)) + + +@FEEDFORWARD_NETWORK.register_module() +class FFN(BaseModule): + """Implements feed-forward networks (FFNs) with identity connection. + + Args: + embed_dims (int): The feature dimension. Same as + `MultiheadAttention`. Defaults: 256. + feedforward_channels (int): The hidden dimension of FFNs. + Defaults: 1024. + num_fcs (int, optional): The number of fully-connected layers in + FFNs. Default: 2. + act_cfg (dict, optional): The activation config for FFNs. + Default: dict(type='ReLU') + ffn_drop (float, optional): Probability of an element to be + zeroed in FFN. Default 0.0. + add_identity (bool, optional): Whether to add the + identity connection. Default: `True`. + dropout_layer (obj:`ConfigDict`): The dropout_layer used + when adding the shortcut. + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + """ + + @deprecated_api_warning( + { + 'dropout': 'ffn_drop', + 'add_residual': 'add_identity' + }, + cls_name='FFN') + def __init__(self, + embed_dims=256, + feedforward_channels=1024, + num_fcs=2, + act_cfg=dict(type='ReLU', inplace=True), + ffn_drop=0., + dropout_layer=None, + add_identity=True, + init_cfg=None, + **kwargs): + super(FFN, self).__init__(init_cfg) + assert num_fcs >= 2, 'num_fcs should be no less ' \ + f'than 2. got {num_fcs}.' + self.embed_dims = embed_dims + self.feedforward_channels = feedforward_channels + self.num_fcs = num_fcs + self.act_cfg = act_cfg + self.activate = build_activation_layer(act_cfg) + + layers = [] + in_channels = embed_dims + for _ in range(num_fcs - 1): + layers.append( + Sequential( + Linear(in_channels, feedforward_channels), self.activate, + nn.Dropout(ffn_drop))) + in_channels = feedforward_channels + layers.append(Linear(feedforward_channels, embed_dims)) + layers.append(nn.Dropout(ffn_drop)) + self.layers = Sequential(*layers) + self.dropout_layer = build_dropout( + dropout_layer) if dropout_layer else torch.nn.Identity() + self.add_identity = add_identity + + @deprecated_api_warning({'residual': 'identity'}, cls_name='FFN') + def forward(self, x, identity=None): + """Forward function for `FFN`. + + The function would add x to the output tensor if residue is None. + """ + out = self.layers(x) + if not self.add_identity: + return self.dropout_layer(out) + if identity is None: + identity = x + return identity + self.dropout_layer(out) + + +@TRANSFORMER_LAYER.register_module() +class BaseTransformerLayer(BaseModule): + """Base `TransformerLayer` for vision transformer. + + It can be built from `mmcv.ConfigDict` and support more flexible + customization, for example, using any number of `FFN or LN ` and + use different kinds of `attention` by specifying a list of `ConfigDict` + named `attn_cfgs`. It is worth mentioning that it supports `prenorm` + when you specifying `norm` as the first element of `operation_order`. + More details about the `prenorm`: `On Layer Normalization in the + Transformer Architecture `_ . + + Args: + attn_cfgs (list[`mmcv.ConfigDict`] | obj:`mmcv.ConfigDict` | None )): + Configs for `self_attention` or `cross_attention` modules, + The order of the configs in the list should be consistent with + corresponding attentions in operation_order. + If it is a dict, all of the attention modules in operation_order + will be built with this config. Default: None. + ffn_cfgs (list[`mmcv.ConfigDict`] | obj:`mmcv.ConfigDict` | None )): + Configs for FFN, The order of the configs in the list should be + consistent with corresponding ffn in operation_order. + If it is a dict, all of the attention modules in operation_order + will be built with this config. + operation_order (tuple[str]): The execution order of operation + in transformer. Such as ('self_attn', 'norm', 'ffn', 'norm'). + Support `prenorm` when you specifying first element as `norm`. + Default:None. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN'). + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + batch_first (bool): Key, Query and Value are shape + of (batch, n, embed_dim) + or (n, batch, embed_dim). Default to False. + """ + + def __init__(self, + attn_cfgs=None, + with_cp=False, + ffn_cfgs=dict( + type='FFN', + embed_dims=256, + feedforward_channels=1024, + num_fcs=2, + ffn_drop=0., + act_cfg=dict(type='ReLU', inplace=True), + ), + operation_order=None, + norm_cfg=dict(type='LN'), + init_cfg=None, + batch_first=False, + **kwargs): + + deprecated_args = dict( + feedforward_channels='feedforward_channels', + ffn_dropout='ffn_drop', + ffn_num_fcs='num_fcs') + for ori_name, new_name in deprecated_args.items(): + if ori_name in kwargs: + warnings.warn( + f'The arguments `{ori_name}` in BaseTransformerLayer ' + f'has been deprecated, now you should set `{new_name}` ' + f'and other FFN related arguments ' + f'to a dict named `ffn_cfgs`. ') + ffn_cfgs[new_name] = kwargs[ori_name] + + super(BaseTransformerLayer, self).__init__(init_cfg) + + self.batch_first = batch_first + + assert set(operation_order) & set( + ['self_attn', 'norm', 'ffn', 'cross_attn']) == \ + set(operation_order), f'The operation_order of' \ + f' {self.__class__.__name__} should ' \ + f'contains all four operation type ' \ + f"{['self_attn', 'norm', 'ffn', 'cross_attn']}" + + num_attn = operation_order.count('self_attn') + operation_order.count( + 'cross_attn') + if isinstance(attn_cfgs, dict): + attn_cfgs = [copy.deepcopy(attn_cfgs) for _ in range(num_attn)] + else: + assert num_attn == len(attn_cfgs), f'The length ' \ + f'of attn_cfg {num_attn} is ' \ + f'not consistent with the number of attention' \ + f'in operation_order {operation_order}.' + + self.num_attn = num_attn + self.operation_order = operation_order + self.norm_cfg = norm_cfg + self.pre_norm = operation_order[0] == 'norm' + self.attentions = ModuleList() + + index = 0 + for operation_name in operation_order: + if operation_name in ['self_attn', 'cross_attn']: + if 'batch_first' in attn_cfgs[index]: + assert self.batch_first == attn_cfgs[index]['batch_first'] + else: + attn_cfgs[index]['batch_first'] = self.batch_first + attention = build_attention(attn_cfgs[index]) + # Some custom attentions used as `self_attn` + # or `cross_attn` can have different behavior. + attention.operation_name = operation_name + self.attentions.append(attention) + index += 1 + + self.embed_dims = self.attentions[0].embed_dims + + self.ffns = ModuleList() + num_ffns = operation_order.count('ffn') + if isinstance(ffn_cfgs, dict): + ffn_cfgs = ConfigDict(ffn_cfgs) + if isinstance(ffn_cfgs, dict): + ffn_cfgs = [copy.deepcopy(ffn_cfgs) for _ in range(num_ffns)] + assert len(ffn_cfgs) == num_ffns + for ffn_index in range(num_ffns): + if 'embed_dims' not in ffn_cfgs[ffn_index]: + ffn_cfgs['embed_dims'] = self.embed_dims + else: + assert ffn_cfgs[ffn_index]['embed_dims'] == self.embed_dims + self.ffns.append( + build_feedforward_network(ffn_cfgs[ffn_index], + dict(type='FFN'))) + + self.norms = ModuleList() + num_norms = operation_order.count('norm') + for _ in range(num_norms): + self.norms.append(build_norm_layer(norm_cfg, self.embed_dims)[1]) + self.with_cp = with_cp + + def forward(self, + query, + key=None, + value=None, + query_pos=None, + key_pos=None, + attn_masks=None, + query_key_padding_mask=None, + key_padding_mask=None, + **kwargs): + """Forward function for `TransformerDecoderLayer`. + + **kwargs contains some specific arguments of attentions. + + Args: + query (Tensor): The input query with shape + [num_queries, bs, embed_dims] if + self.batch_first is False, else + [bs, num_queries embed_dims]. + key (Tensor): The key tensor with shape [num_keys, bs, + embed_dims] if self.batch_first is False, else + [bs, num_keys, embed_dims] . + value (Tensor): The value tensor with same shape as `key`. + query_pos (Tensor): The positional encoding for `query`. + Default: None. + key_pos (Tensor): The positional encoding for `key`. + Default: None. + attn_masks (List[Tensor] | None): 2D Tensor used in + calculation of corresponding attention. The length of + it should equal to the number of `attention` in + `operation_order`. Default: None. + query_key_padding_mask (Tensor): ByteTensor for `query`, with + shape [bs, num_queries]. Only used in `self_attn` layer. + Defaults to None. + key_padding_mask (Tensor): ByteTensor for `query`, with + shape [bs, num_keys]. Default: None. + + Returns: + Tensor: forwarded results with shape [num_queries, bs, embed_dims]. + """ + + norm_index = 0 + attn_index = 0 + ffn_index = 0 + identity = query + if attn_masks is None: + attn_masks = [None for _ in range(self.num_attn)] + elif isinstance(attn_masks, torch.Tensor): + attn_masks = [ + copy.deepcopy(attn_masks) for _ in range(self.num_attn) + ] + warnings.warn(f'Use same attn_mask in all attentions in ' + f'{self.__class__.__name__} ') + else: + assert len(attn_masks) == self.num_attn, f'The length of ' \ + f'attn_masks {len(attn_masks)} must be equal ' \ + f'to the number of attention in ' \ + f'operation_order {self.num_attn}' + + for layer in self.operation_order: + if layer == 'self_attn': + temp_key = temp_value = query + query = self.attentions[attn_index]( + query, + temp_key, + temp_value, + identity if self.pre_norm else None, + query_pos=query_pos, + key_pos=query_pos, + attn_mask=attn_masks[attn_index], + key_padding_mask=query_key_padding_mask, + **kwargs) + attn_index += 1 + identity = query + + elif layer == 'norm': + query = self.norms[norm_index](query) + norm_index += 1 + + elif layer == 'cross_attn': + query = self.attentions[attn_index]( + query, + key, + value, + identity if self.pre_norm else None, + query_pos=query_pos, + key_pos=key_pos, + attn_mask=attn_masks[attn_index], + key_padding_mask=key_padding_mask, + **kwargs) + attn_index += 1 + identity = query + + elif layer == 'ffn': + if self.with_cp: + query = cp.checkpoint(self.ffns[ffn_index], query) + else: + query = self.ffns[ffn_index]( + query, identity if self.pre_norm else None) + ffn_index += 1 + + return query + + +@TRANSFORMER_LAYER_SEQUENCE.register_module() +class TransformerLayerSequence(BaseModule): + """Base class for TransformerEncoder and TransformerDecoder in vision + transformer. + + As base-class of Encoder and Decoder in vision transformer. + Support customization such as specifying different kind + of `transformer_layer` in `transformer_coder`. + + Args: + transformerlayer (list[obj:`mmcv.ConfigDict`] | + obj:`mmcv.ConfigDict`): Config of transformerlayer + in TransformerCoder. If it is obj:`mmcv.ConfigDict`, + it would be repeated `num_layer` times to a + list[`mmcv.ConfigDict`]. Default: None. + num_layers (int): The number of `TransformerLayer`. Default: None. + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + """ + + def __init__(self, transformerlayers=None, num_layers=None, init_cfg=None): + super(TransformerLayerSequence, self).__init__(init_cfg) + if isinstance(transformerlayers, dict): + transformerlayers = [ + copy.deepcopy(transformerlayers) for _ in range(num_layers) + ] + else: + assert isinstance(transformerlayers, list) and \ + len(transformerlayers) == num_layers + self.num_layers = num_layers + self.layers = ModuleList() + for i in range(num_layers): + self.layers.append(build_transformer_layer(transformerlayers[i])) + self.embed_dims = self.layers[0].embed_dims + self.pre_norm = self.layers[0].pre_norm + + def forward(self, + query, + key, + value, + query_pos=None, + key_pos=None, + attn_masks=None, + query_key_padding_mask=None, + key_padding_mask=None, + **kwargs): + """Forward function for `TransformerCoder`. + + Args: + query (Tensor): Input query with shape + `(num_queries, bs, embed_dims)`. + key (Tensor): The key tensor with shape + `(num_keys, bs, embed_dims)`. + value (Tensor): The value tensor with shape + `(num_keys, bs, embed_dims)`. + query_pos (Tensor): The positional encoding for `query`. + Default: None. + key_pos (Tensor): The positional encoding for `key`. + Default: None. + attn_masks (List[Tensor], optional): Each element is 2D Tensor + which is used in calculation of corresponding attention in + operation_order. Default: None. + query_key_padding_mask (Tensor): ByteTensor for `query`, with + shape [bs, num_queries]. Only used in self-attention + Default: None. + key_padding_mask (Tensor): ByteTensor for `query`, with + shape [bs, num_keys]. Default: None. + + Returns: + Tensor: results with shape [num_queries, bs, embed_dims]. + """ + for layer in self.layers: + query = layer( + query, + key, + value, + query_pos=query_pos, + key_pos=key_pos, + attn_masks=attn_masks, + query_key_padding_mask=query_key_padding_mask, + key_padding_mask=key_padding_mask, + **kwargs) + return query diff --git a/mmcv/models/bricks/wrappers.py b/mmcv/models/bricks/wrappers.py new file mode 100644 index 0000000..e4bed07 --- /dev/null +++ b/mmcv/models/bricks/wrappers.py @@ -0,0 +1,175 @@ +# Copyright (c) OpenMMLab. All rights reserved. +r"""Modified from https://github.com/facebookresearch/detectron2/blob/master/detectron2/layers/wrappers.py # noqa: E501 + +Wrap some nn modules to support empty tensor input. Currently, these wrappers +are mainly used in mask heads like fcn_mask_head and maskiou_heads since mask +heads are trained on only positive RoIs. +""" +import math + +import torch +import torch.nn as nn +from torch.nn.modules.utils import _pair, _triple + +from .registry import CONV_LAYERS, UPSAMPLE_LAYERS + +TORCH_VERSION = tuple(int(x) for x in torch.__version__.split('.')[:2]) + + +def obsolete_torch_version(torch_version, version_threshold): + return torch_version <= version_threshold + + +class NewEmptyTensorOp(torch.autograd.Function): + + @staticmethod + def forward(ctx, x, new_shape): + ctx.shape = x.shape + return x.new_empty(new_shape) + + @staticmethod + def backward(ctx, grad): + shape = ctx.shape + return NewEmptyTensorOp.apply(grad, shape), None + + +@CONV_LAYERS.register_module('Conv', force=True) +class Conv2d(nn.Conv2d): + + def forward(self, x): + if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 4)): + out_shape = [x.shape[0], self.out_channels] + for i, k, p, s, d in zip(x.shape[-2:], self.kernel_size, + self.padding, self.stride, self.dilation): + o = (i + 2 * p - (d * (k - 1) + 1)) // s + 1 + out_shape.append(o) + empty = NewEmptyTensorOp.apply(x, out_shape) + if self.training: + # produce dummy gradient to avoid DDP warning. + dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0 + return empty + dummy + else: + return empty + + return super().forward(x) + + +@CONV_LAYERS.register_module('Conv3d', force=True) +class Conv3d(nn.Conv3d): + + def forward(self, x): + if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 4)): + out_shape = [x.shape[0], self.out_channels] + for i, k, p, s, d in zip(x.shape[-3:], self.kernel_size, + self.padding, self.stride, self.dilation): + o = (i + 2 * p - (d * (k - 1) + 1)) // s + 1 + out_shape.append(o) + empty = NewEmptyTensorOp.apply(x, out_shape) + if self.training: + # produce dummy gradient to avoid DDP warning. + dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0 + return empty + dummy + else: + return empty + + return super().forward(x) + + +@CONV_LAYERS.register_module() +@CONV_LAYERS.register_module('deconv') +@UPSAMPLE_LAYERS.register_module('deconv', force=True) +class ConvTranspose2d(nn.ConvTranspose2d): + + def forward(self, x): + if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 4)): + out_shape = [x.shape[0], self.out_channels] + for i, k, p, s, d, op in zip(x.shape[-2:], self.kernel_size, + self.padding, self.stride, + self.dilation, self.output_padding): + out_shape.append((i - 1) * s - 2 * p + (d * (k - 1) + 1) + op) + empty = NewEmptyTensorOp.apply(x, out_shape) + if self.training: + # produce dummy gradient to avoid DDP warning. + dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0 + return empty + dummy + else: + return empty + + return super().forward(x) + + +@CONV_LAYERS.register_module() +@CONV_LAYERS.register_module('deconv3d') +@UPSAMPLE_LAYERS.register_module('deconv3d', force=True) +class ConvTranspose3d(nn.ConvTranspose3d): + + def forward(self, x): + if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 4)): + out_shape = [x.shape[0], self.out_channels] + for i, k, p, s, d, op in zip(x.shape[-3:], self.kernel_size, + self.padding, self.stride, + self.dilation, self.output_padding): + out_shape.append((i - 1) * s - 2 * p + (d * (k - 1) + 1) + op) + empty = NewEmptyTensorOp.apply(x, out_shape) + if self.training: + # produce dummy gradient to avoid DDP warning. + dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0 + return empty + dummy + else: + return empty + + return super().forward(x) + + +class MaxPool2d(nn.MaxPool2d): + + def forward(self, x): + # PyTorch 1.9 does not support empty tensor inference yet + if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 9)): + out_shape = list(x.shape[:2]) + for i, k, p, s, d in zip(x.shape[-2:], _pair(self.kernel_size), + _pair(self.padding), _pair(self.stride), + _pair(self.dilation)): + o = (i + 2 * p - (d * (k - 1) + 1)) / s + 1 + o = math.ceil(o) if self.ceil_mode else math.floor(o) + out_shape.append(o) + empty = NewEmptyTensorOp.apply(x, out_shape) + return empty + + return super().forward(x) + + +class MaxPool3d(nn.MaxPool3d): + + def forward(self, x): + # PyTorch 1.9 does not support empty tensor inference yet + if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 9)): + out_shape = list(x.shape[:2]) + for i, k, p, s, d in zip(x.shape[-3:], _triple(self.kernel_size), + _triple(self.padding), + _triple(self.stride), + _triple(self.dilation)): + o = (i + 2 * p - (d * (k - 1) + 1)) / s + 1 + o = math.ceil(o) if self.ceil_mode else math.floor(o) + out_shape.append(o) + empty = NewEmptyTensorOp.apply(x, out_shape) + return empty + + return super().forward(x) + + +class Linear(torch.nn.Linear): + + def forward(self, x): + # empty tensor forward of Linear layer is supported in Pytorch 1.6 + if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 5)): + out_shape = [x.shape[0], self.out_features] + empty = NewEmptyTensorOp.apply(x, out_shape) + if self.training: + # produce dummy gradient to avoid DDP warning. + dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0 + return empty + dummy + else: + return empty + + return super().forward(x) diff --git a/mmcv/models/builder.py b/mmcv/models/builder.py new file mode 100644 index 0000000..798e70d --- /dev/null +++ b/mmcv/models/builder.py @@ -0,0 +1,137 @@ +import warnings + +# from mmcv.cnn import MODELS as MMCV_MODELS +from mmcv.utils import Registry +from .backbones.base_module import Sequential +from ..utils import Registry, build_from_cfg + +######### from mmcv.cnn +def build_model_from_cfg(cfg, registry, default_args=None): + """Build a PyTorch model from config dict(s). Different from + ``build_from_cfg``, if cfg is a list, a ``nn.Sequential`` will be built. + + Args: + cfg (dict, list[dict]): The config of modules, is is either a config + dict or a list of config dicts. If cfg is a list, a + the built modules will be wrapped with ``nn.Sequential``. + registry (:obj:`Registry`): A registry the module belongs to. + default_args (dict, optional): Default arguments to build the module. + Defaults to None. + + Returns: + nn.Module: A built nn module. + """ + if isinstance(cfg, list): + modules = [ + build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg + ] + return Sequential(*modules) + else: + return build_from_cfg(cfg, registry, default_args) + + +CNN_MODELS = Registry('model', build_func=build_model_from_cfg) + + +MODELS = Registry('models', parent=CNN_MODELS) + +BACKBONES = MODELS +NECKS = MODELS +ROI_EXTRACTORS = MODELS +SHARED_HEADS = MODELS +HEADS = MODELS +LOSSES = MODELS +DETECTORS = MODELS +SEGMENTORS = MODELS + +VOXEL_ENCODERS = MODELS +MIDDLE_ENCODERS = MODELS +FUSION_LAYERS = MODELS + + +def build_backbone(cfg): + """Build backbone.""" + return BACKBONES.build(cfg) + + +def build_neck(cfg): + """Build neck.""" + return NECKS.build(cfg) + + +def build_roi_extractor(cfg): + """Build roi extractor.""" + return ROI_EXTRACTORS.build(cfg) + + +def build_shared_head(cfg): + """Build shared head.""" + return SHARED_HEADS.build(cfg) + + +def build_head(cfg): + """Build head.""" + return HEADS.build(cfg) + + +def build_loss(cfg): + """Build loss.""" + return LOSSES.build(cfg) + + +def build_detector(cfg, train_cfg=None, test_cfg=None): + """Build detector.""" + if train_cfg is not None or test_cfg is not None: + warnings.warn( + 'train_cfg and test_cfg is deprecated, ' + 'please specify them in model', UserWarning) + assert cfg.get('train_cfg') is None or train_cfg is None, \ + 'train_cfg specified in both outer field and model field ' + assert cfg.get('test_cfg') is None or test_cfg is None, \ + 'test_cfg specified in both outer field and model field ' + return DETECTORS.build( + cfg, default_args=dict(train_cfg=train_cfg, test_cfg=test_cfg)) + + +def build_segmentor(cfg, train_cfg=None, test_cfg=None): + """Build segmentor.""" + if train_cfg is not None or test_cfg is not None: + warnings.warn( + 'train_cfg and test_cfg is deprecated, ' + 'please specify them in model', UserWarning) + assert cfg.get('train_cfg') is None or train_cfg is None, \ + 'train_cfg specified in both outer field and model field ' + assert cfg.get('test_cfg') is None or test_cfg is None, \ + 'test_cfg specified in both outer field and model field ' + return SEGMENTORS.build( + cfg, default_args=dict(train_cfg=train_cfg, test_cfg=test_cfg)) + + +def build_model(cfg, train_cfg=None, test_cfg=None): + """A function warpper for building 3D detector or segmentor according to + cfg. + + Should be deprecated in the future. + """ + if cfg.type in ['EncoderDecoder3D']: + return build_segmentor(cfg, train_cfg=train_cfg, test_cfg=test_cfg) + else: + return build_detector(cfg, train_cfg=train_cfg, test_cfg=test_cfg) + + +def build_voxel_encoder(cfg): + """Build voxel encoder.""" + return VOXEL_ENCODERS.build(cfg) + + +def build_middle_encoder(cfg): + """Build middle level encoder.""" + return MIDDLE_ENCODERS.build(cfg) + + +def build_fusion_layer(cfg): + """Build fusion layer.""" + return FUSION_LAYERS.build(cfg) + + + diff --git a/mmcv/models/dense_heads/VAD_head.py b/mmcv/models/dense_heads/VAD_head.py new file mode 100644 index 0000000..8dc29b3 --- /dev/null +++ b/mmcv/models/dense_heads/VAD_head.py @@ -0,0 +1,1898 @@ +import copy +from math import pi, cos, sin + +import torch +import numpy as np +import torch.nn as nn +import matplotlib.pyplot as plt +import torch.nn.functional as F +from mmcv.models.builder import HEADS, build_loss +from mmcv.models.dense_heads import DETRHead +from mmcv.utils import force_fp32, auto_fp16 +from mmcv.utils import TORCH_VERSION, digit_version +from mmcv.core.bbox.builder import build_assigner, build_sampler +from mmcv.core.bbox.coder import build_bbox_coder +from mmcv.models.utils.transformer import inverse_sigmoid +from mmcv.core.bbox.transforms import bbox_xyxy_to_cxcywh +from mmcv.models.bricks import Linear +from mmcv.models.utils import bias_init_with_prob, xavier_init +from mmcv.core.utils import (multi_apply, multi_apply, reduce_mean) +from mmcv.models.bricks.transformer import build_transformer_layer_sequence + +from mmcv.core.bbox.util import normalize_bbox +from mmcv.models.vad_utils.traj_lr_warmup import get_traj_warmup_loss_weight +from mmcv.models.vad_utils.map_utils import ( + normalize_2d_pts, normalize_2d_bbox, denormalize_2d_pts, denormalize_2d_bbox +) + +class MLP(nn.Module): + def __init__(self, in_channels, hidden_unit, verbose=False): + super(MLP, self).__init__() + self.mlp = nn.Sequential( + nn.Linear(in_channels, hidden_unit), + nn.LayerNorm(hidden_unit), + nn.ReLU() + ) + + def forward(self, x): + x = self.mlp(x) + return x + +class LaneNet(nn.Module): + def __init__(self, in_channels, hidden_unit, num_subgraph_layers): + super(LaneNet, self).__init__() + self.num_subgraph_layers = num_subgraph_layers + self.layer_seq = nn.Sequential() + for i in range(num_subgraph_layers): + self.layer_seq.add_module( + f'lmlp_{i}', MLP(in_channels, hidden_unit)) + in_channels = hidden_unit*2 + + def forward(self, pts_lane_feats): + ''' + Extract lane_feature from vectorized lane representation + + Args: + pts_lane_feats: [batch size, max_pnum, pts, D] + + Returns: + inst_lane_feats: [batch size, max_pnum, D] + ''' + x = pts_lane_feats + for name, layer in self.layer_seq.named_modules(): + if isinstance(layer, MLP): + # x [bs,max_lane_num,9,dim] + x = layer(x) + x_max = torch.max(x, -2)[0] + x_max = x_max.unsqueeze(2).repeat(1, 1, x.shape[2], 1) + x = torch.cat([x, x_max], dim=-1) + x_max = torch.max(x, -2)[0] + return x_max + + +@HEADS.register_module() +class VADHead(DETRHead): + """Head of VAD model. + Args: + with_box_refine (bool): Whether to refine the reference points + in the decoder. Defaults to False. + as_two_stage (bool) : Whether to generate the proposal from + the outputs of encoder. + transformer (obj:`ConfigDict`): ConfigDict is used for building + the Encoder and Decoder. + bev_h, bev_w (int): spatial shape of BEV queries. + """ + def __init__(self, + *args, + with_box_refine=False, + as_two_stage=False, + transformer=None, + bbox_coder=None, + num_cls_fcs=2, + code_weights=None, + bev_h=30, + bev_w=30, + fut_ts=6, + fut_mode=6, + loss_traj=dict(type='L1Loss', loss_weight=0.25), + loss_traj_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=0.8), + map_bbox_coder=None, + map_num_query=900, + map_num_classes=3, + map_num_vec=20, + map_num_pts_per_vec=2, + map_num_pts_per_gt_vec=2, + map_query_embed_type='all_pts', + map_transform_method='minmax', + map_gt_shift_pts_pattern='v0', + map_dir_interval=1, + map_code_size=None, + map_code_weights=None, + loss_map_cls=dict( + type='CrossEntropyLoss', + bg_cls_weight=0.1, + use_sigmoid=False, + loss_weight=1.0, + class_weight=1.0), + loss_map_bbox=dict(type='L1Loss', loss_weight=5.0), + loss_map_iou=dict(type='GIoULoss', loss_weight=2.0), + loss_map_pts=dict( + type='ChamferDistance',loss_src_weight=1.0,loss_dst_weight=1.0 + ), + loss_map_dir=dict(type='PtsDirCosLoss', loss_weight=2.0), + tot_epoch=None, + use_traj_lr_warmup=False, + motion_decoder=None, + motion_map_decoder=None, + use_pe=False, + motion_det_score=None, + map_thresh=0.5, + dis_thresh=0.2, + pe_normalization=True, + ego_his_encoder=None, + ego_fut_mode=3, + loss_plan_reg=dict(type='L1Loss', loss_weight=0.25), + loss_plan_bound=dict(type='PlanMapBoundLoss', loss_weight=0.1), + loss_plan_col=dict(type='PlanAgentDisLoss', loss_weight=0.1), + loss_plan_dir=dict(type='PlanMapThetaLoss', loss_weight=0.1), + ego_agent_decoder=None, + ego_map_decoder=None, + query_thresh=None, + query_use_fix_pad=None, + ego_lcf_feat_idx=None, + valid_fut_ts=6, + **kwargs): + + self.bev_h = bev_h + self.bev_w = bev_w + self.fp16_enabled = False + self.fut_ts = fut_ts + self.fut_mode = fut_mode + self.tot_epoch = tot_epoch + self.use_traj_lr_warmup = use_traj_lr_warmup + self.motion_decoder = motion_decoder + self.motion_map_decoder = motion_map_decoder + self.use_pe = use_pe + self.motion_det_score = motion_det_score + self.map_thresh = map_thresh + self.dis_thresh = dis_thresh + self.pe_normalization = pe_normalization + self.ego_his_encoder = ego_his_encoder + self.ego_fut_mode = ego_fut_mode + self.ego_agent_decoder = ego_agent_decoder + self.ego_map_decoder = ego_map_decoder + self.query_thresh = query_thresh + self.query_use_fix_pad = query_use_fix_pad + self.ego_lcf_feat_idx = ego_lcf_feat_idx + self.valid_fut_ts = valid_fut_ts + + if loss_traj_cls['use_sigmoid'] == True: + self.traj_num_cls = 1 + else: + self.traj_num_cls = 2 + + self.with_box_refine = with_box_refine + self.as_two_stage = as_two_stage + if self.as_two_stage: + transformer['as_two_stage'] = self.as_two_stage + if 'code_size' in kwargs: + self.code_size = kwargs['code_size'] + else: + self.code_size = 10 + if code_weights is not None: + self.code_weights = code_weights + else: + self.code_weights = [1.0, 1.0, 1.0, + 1.0, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2] + if map_code_size is not None: + self.map_code_size = map_code_size + else: + self.map_code_size = 10 + if map_code_weights is not None: + self.map_code_weights = map_code_weights + else: + self.map_code_weights = [1.0, 1.0, 1.0, + 1.0, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2] + + self.bbox_coder = build_bbox_coder(bbox_coder) + self.pc_range = self.bbox_coder.pc_range + self.real_w = self.pc_range[3] - self.pc_range[0] + self.real_h = self.pc_range[4] - self.pc_range[1] + self.num_cls_fcs = num_cls_fcs - 1 + + self.map_bbox_coder = build_bbox_coder(map_bbox_coder) + self.map_query_embed_type = map_query_embed_type + self.map_transform_method = map_transform_method + self.map_gt_shift_pts_pattern = map_gt_shift_pts_pattern + map_num_query = map_num_vec * map_num_pts_per_vec + self.map_num_query = map_num_query + self.map_num_classes = map_num_classes + self.map_num_vec = map_num_vec + self.map_num_pts_per_vec = map_num_pts_per_vec + self.map_num_pts_per_gt_vec = map_num_pts_per_gt_vec + self.map_dir_interval = map_dir_interval + + if loss_map_cls['use_sigmoid'] == True: + self.map_cls_out_channels = map_num_classes + else: + self.map_cls_out_channels = map_num_classes + 1 + + self.map_bg_cls_weight = 0 + map_class_weight = loss_map_cls.get('class_weight', None) + if map_class_weight is not None and (self.__class__ is VADHead): + assert isinstance(map_class_weight, float), 'Expected ' \ + 'class_weight to have type float. Found ' \ + f'{type(map_class_weight)}.' + # NOTE following the official DETR rep0, bg_cls_weight means + # relative classification weight of the no-object class. + map_bg_cls_weight = loss_map_cls.get('bg_cls_weight', map_class_weight) + assert isinstance(map_bg_cls_weight, float), 'Expected ' \ + 'bg_cls_weight to have type float. Found ' \ + f'{type(map_bg_cls_weight)}.' + map_class_weight = torch.ones(map_num_classes + 1) * map_class_weight + # set background class as the last indice + map_class_weight[map_num_classes] = map_bg_cls_weight + loss_map_cls.update({'class_weight': map_class_weight}) + if 'bg_cls_weight' in loss_map_cls: + loss_map_cls.pop('bg_cls_weight') + self.map_bg_cls_weight = map_bg_cls_weight + + self.traj_bg_cls_weight = 0 + + super(VADHead, self).__init__(*args, transformer=transformer, **kwargs) + self.code_weights = nn.Parameter(torch.tensor( + self.code_weights, requires_grad=False), requires_grad=False) + self.map_code_weights = nn.Parameter(torch.tensor( + self.map_code_weights, requires_grad=False), requires_grad=False) + + if kwargs['train_cfg'] is not None: + assert 'map_assigner' in kwargs['train_cfg'], 'map assigner should be provided '\ + 'when train_cfg is set.' + map_assigner = kwargs['train_cfg']['map_assigner'] + assert loss_map_cls['loss_weight'] == map_assigner['cls_cost']['weight'], \ + 'The classification weight for loss and matcher should be' \ + 'exactly the same.' + assert loss_map_bbox['loss_weight'] == map_assigner['reg_cost'][ + 'weight'], 'The regression L1 weight for loss and matcher ' \ + 'should be exactly the same.' + assert loss_map_iou['loss_weight'] == map_assigner['iou_cost']['weight'], \ + 'The regression iou weight for loss and matcher should be' \ + 'exactly the same.' + assert loss_map_pts['loss_weight'] == map_assigner['pts_cost']['weight'], \ + 'The regression l1 weight for map pts loss and matcher should be' \ + 'exactly the same.' + + self.map_assigner = build_assigner(map_assigner) + # DETR sampling=False, so use PseudoSampler + sampler_cfg = dict(type='PseudoSampler') + self.map_sampler = build_sampler(sampler_cfg, context=self) + + self.loss_traj = build_loss(loss_traj) + self.loss_traj_cls = build_loss(loss_traj_cls) + self.loss_map_bbox = build_loss(loss_map_bbox) + self.loss_map_cls = build_loss(loss_map_cls) + self.loss_map_iou = build_loss(loss_map_iou) + self.loss_map_pts = build_loss(loss_map_pts) + self.loss_map_dir = build_loss(loss_map_dir) + self.loss_plan_reg = build_loss(loss_plan_reg) + self.loss_plan_bound = build_loss(loss_plan_bound) + self.loss_plan_col = build_loss(loss_plan_col) + self.loss_plan_dir = build_loss(loss_plan_dir) + + def _init_layers(self): + """Initialize classification branch and regression branch of head.""" + cls_branch = [] + for _ in range(self.num_reg_fcs): + cls_branch.append(Linear(self.embed_dims, self.embed_dims)) + cls_branch.append(nn.LayerNorm(self.embed_dims)) + cls_branch.append(nn.ReLU(inplace=True)) + cls_branch.append(Linear(self.embed_dims, self.cls_out_channels)) + cls_branch = nn.Sequential(*cls_branch) + + reg_branch = [] + for _ in range(self.num_reg_fcs): + reg_branch.append(Linear(self.embed_dims, self.embed_dims)) + reg_branch.append(nn.ReLU()) + reg_branch.append(Linear(self.embed_dims, self.code_size)) + reg_branch = nn.Sequential(*reg_branch) + + traj_branch = [] + for _ in range(self.num_reg_fcs): + traj_branch.append(Linear(self.embed_dims*2, self.embed_dims*2)) + traj_branch.append(nn.ReLU()) + traj_branch.append(Linear(self.embed_dims*2, self.fut_ts*2)) + traj_branch = nn.Sequential(*traj_branch) + + traj_cls_branch = [] + for _ in range(self.num_reg_fcs): + traj_cls_branch.append(Linear(self.embed_dims*2, self.embed_dims*2)) + traj_cls_branch.append(nn.LayerNorm(self.embed_dims*2)) + traj_cls_branch.append(nn.ReLU(inplace=True)) + traj_cls_branch.append(Linear(self.embed_dims*2, self.traj_num_cls)) + traj_cls_branch = nn.Sequential(*traj_cls_branch) + + map_cls_branch = [] + for _ in range(self.num_reg_fcs): + map_cls_branch.append(Linear(self.embed_dims, self.embed_dims)) + map_cls_branch.append(nn.LayerNorm(self.embed_dims)) + map_cls_branch.append(nn.ReLU(inplace=True)) + map_cls_branch.append(Linear(self.embed_dims, self.map_cls_out_channels)) + map_cls_branch = nn.Sequential(*map_cls_branch) + + map_reg_branch = [] + for _ in range(self.num_reg_fcs): + map_reg_branch.append(Linear(self.embed_dims, self.embed_dims)) + map_reg_branch.append(nn.ReLU()) + map_reg_branch.append(Linear(self.embed_dims, self.map_code_size)) + map_reg_branch = nn.Sequential(*map_reg_branch) + + + def _get_clones(module, N): + return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) + + # last reg_branch is used to generate proposal from + # encode feature map when as_two_stage is True. + num_decoder_layers = 1 + num_map_decoder_layers = 1 + if self.transformer.decoder is not None: + num_decoder_layers = self.transformer.decoder.num_layers + if self.transformer.map_decoder is not None: + num_map_decoder_layers = self.transformer.map_decoder.num_layers + num_motion_decoder_layers = 1 + num_pred = (num_decoder_layers + 1) if \ + self.as_two_stage else num_decoder_layers + motion_num_pred = (num_motion_decoder_layers + 1) if \ + self.as_two_stage else num_motion_decoder_layers + map_num_pred = (num_map_decoder_layers + 1) if \ + self.as_two_stage else num_map_decoder_layers + + if self.with_box_refine: + self.cls_branches = _get_clones(cls_branch, num_pred) + self.reg_branches = _get_clones(reg_branch, num_pred) + self.traj_branches = _get_clones(traj_branch, motion_num_pred) + self.traj_cls_branches = _get_clones(traj_cls_branch, motion_num_pred) + self.map_cls_branches = _get_clones(map_cls_branch, map_num_pred) + self.map_reg_branches = _get_clones(map_reg_branch, map_num_pred) + else: + self.cls_branches = nn.ModuleList( + [cls_branch for _ in range(num_pred)]) + self.reg_branches = nn.ModuleList( + [reg_branch for _ in range(num_pred)]) + self.traj_branches = nn.ModuleList( + [traj_branch for _ in range(motion_num_pred)]) + self.traj_cls_branches = nn.ModuleList( + [traj_cls_branch for _ in range(motion_num_pred)]) + self.map_cls_branches = nn.ModuleList( + [map_cls_branch for _ in range(map_num_pred)]) + self.map_reg_branches = nn.ModuleList( + [map_reg_branch for _ in range(map_num_pred)]) + + if not self.as_two_stage: + self.bev_embedding = nn.Embedding( + self.bev_h * self.bev_w, self.embed_dims) + self.query_embedding = nn.Embedding(self.num_query, + self.embed_dims * 2) + if self.map_query_embed_type == 'all_pts': + self.map_query_embedding = nn.Embedding(self.map_num_query, + self.embed_dims * 2) + elif self.map_query_embed_type == 'instance_pts': + self.map_query_embedding = None + self.map_instance_embedding = nn.Embedding(self.map_num_vec, self.embed_dims * 2) + self.map_pts_embedding = nn.Embedding(self.map_num_pts_per_vec, self.embed_dims * 2) + + if self.motion_decoder is not None: + self.motion_decoder = build_transformer_layer_sequence(self.motion_decoder) + self.motion_mode_query = nn.Embedding(self.fut_mode, self.embed_dims) + self.motion_mode_query.weight.requires_grad = True + if self.use_pe: + self.pos_mlp_sa = nn.Linear(2, self.embed_dims) + else: + raise NotImplementedError('Not implement yet') + + if self.motion_map_decoder is not None: + self.lane_encoder = LaneNet(256, 128, 3) + self.motion_map_decoder = build_transformer_layer_sequence(self.motion_map_decoder) + if self.use_pe: + self.pos_mlp = nn.Linear(2, self.embed_dims) + + if self.ego_his_encoder is not None: + self.ego_his_encoder = LaneNet(2, self.embed_dims//2, 3) + else: + self.ego_query = nn.Embedding(1, self.embed_dims) + + if self.ego_agent_decoder is not None: + self.ego_agent_decoder = build_transformer_layer_sequence(self.ego_agent_decoder) + if self.use_pe: + self.ego_agent_pos_mlp = nn.Linear(2, self.embed_dims) + + if self.ego_map_decoder is not None: + self.ego_map_decoder = build_transformer_layer_sequence(self.ego_map_decoder) + if self.use_pe: + self.ego_map_pos_mlp = nn.Linear(2, self.embed_dims) + + ego_fut_decoder = [] + ego_fut_dec_in_dim = self.embed_dims*2 + len(self.ego_lcf_feat_idx) \ + if self.ego_lcf_feat_idx is not None else self.embed_dims*2 + for _ in range(self.num_reg_fcs): + ego_fut_decoder.append(Linear(ego_fut_dec_in_dim, ego_fut_dec_in_dim)) + ego_fut_decoder.append(nn.ReLU()) + ego_fut_decoder.append(Linear(ego_fut_dec_in_dim, self.ego_fut_mode*self.fut_ts*2)) + self.ego_fut_decoder = nn.Sequential(*ego_fut_decoder) + + self.agent_fus_mlp = nn.Sequential( + nn.Linear(self.fut_mode*2*self.embed_dims, self.embed_dims, bias=True), + nn.LayerNorm(self.embed_dims), + nn.ReLU(), + nn.Linear(self.embed_dims, self.embed_dims, bias=True)) + + def init_weights(self): + """Initialize weights of the DeformDETR head.""" + self.transformer.init_weights() + if self.loss_cls.use_sigmoid: + bias_init = bias_init_with_prob(0.01) + for m in self.cls_branches: + nn.init.constant_(m[-1].bias, bias_init) + if self.loss_map_cls.use_sigmoid: + bias_init = bias_init_with_prob(0.01) + for m in self.map_cls_branches: + nn.init.constant_(m[-1].bias, bias_init) + if self.loss_traj_cls.use_sigmoid: + bias_init = bias_init_with_prob(0.01) + for m in self.traj_cls_branches: + nn.init.constant_(m[-1].bias, bias_init) + # for m in self.map_reg_branches: + # constant_init(m[-1], 0, bias=0) + # nn.init.constant_(self.map_reg_branches[0][-1].bias.data[2:], 0.) + if self.motion_decoder is not None: + for p in self.motion_decoder.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + nn.init.orthogonal_(self.motion_mode_query.weight) + if self.use_pe: + xavier_init(self.pos_mlp_sa, distribution='uniform', bias=0.) + if self.motion_map_decoder is not None: + for p in self.motion_map_decoder.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + for p in self.lane_encoder.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + if self.use_pe: + xavier_init(self.pos_mlp, distribution='uniform', bias=0.) + if self.ego_his_encoder is not None: + for p in self.ego_his_encoder.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + if self.ego_agent_decoder is not None: + for p in self.ego_agent_decoder.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + if self.ego_map_decoder is not None: + for p in self.ego_map_decoder.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + + # @auto_fp16(apply_to=('mlvl_feats')) + @force_fp32(apply_to=('mlvl_feats', 'prev_bev')) + def forward(self, + mlvl_feats, + img_metas, + prev_bev=None, + only_bev=False, + ego_his_trajs=None, + ego_lcf_feat=None, + ): + """Forward function. + Args: + mlvl_feats (tuple[Tensor]): Features from the upstream + network, each is a 5D-tensor with shape + (B, N, C, H, W). + prev_bev: previous bev featues + only_bev: only compute BEV features with encoder. + Returns: + all_cls_scores (Tensor): Outputs from the classification head, \ + shape [nb_dec, bs, num_query, cls_out_channels]. Note \ + cls_out_channels should includes background. + all_bbox_preds (Tensor): Sigmoid outputs from the regression \ + head with normalized coordinate format (cx, cy, w, l, cz, h, theta, vx, vy). \ + Shape [nb_dec, bs, num_query, 9]. + """ + + bs, num_cam, _, _, _ = mlvl_feats[0].shape + dtype = mlvl_feats[0].dtype + object_query_embeds = self.query_embedding.weight.to(dtype) + + if self.map_query_embed_type == 'all_pts': + map_query_embeds = self.map_query_embedding.weight.to(dtype) + elif self.map_query_embed_type == 'instance_pts': + map_pts_embeds = self.map_pts_embedding.weight.unsqueeze(0) + map_instance_embeds = self.map_instance_embedding.weight.unsqueeze(1) + map_query_embeds = (map_pts_embeds + map_instance_embeds).flatten(0, 1).to(dtype) + + bev_queries = self.bev_embedding.weight.to(dtype) + + bev_mask = torch.zeros((bs, self.bev_h, self.bev_w), + device=bev_queries.device).to(dtype) + bev_pos = self.positional_encoding(bev_mask).to(dtype) + + if only_bev: # only use encoder to obtain BEV features, TODO: refine the workaround + return self.transformer.get_bev_features( + mlvl_feats, + bev_queries, + self.bev_h, + self.bev_w, + grid_length=(self.real_h / self.bev_h, + self.real_w / self.bev_w), + bev_pos=bev_pos, + img_metas=img_metas, + prev_bev=prev_bev, + ) + else: + outputs = self.transformer( + mlvl_feats, + bev_queries, + object_query_embeds, + map_query_embeds, + self.bev_h, + self.bev_w, + grid_length=(self.real_h / self.bev_h, + self.real_w / self.bev_w), + bev_pos=bev_pos, + reg_branches=self.reg_branches if self.with_box_refine else None, # noqa:E501 + cls_branches=self.cls_branches if self.as_two_stage else None, + map_reg_branches=self.map_reg_branches if self.with_box_refine else None, # noqa:E501 + map_cls_branches=self.map_cls_branches if self.as_two_stage else None, + img_metas=img_metas, + prev_bev=prev_bev + ) + + bev_embed, hs, init_reference, inter_references, \ + map_hs, map_init_reference, map_inter_references = outputs + + hs = hs.permute(0, 2, 1, 3) + outputs_classes = [] + outputs_coords = [] + outputs_coords_bev = [] + outputs_trajs = [] + outputs_trajs_classes = [] + + map_hs = map_hs.permute(0, 2, 1, 3) + map_outputs_classes = [] + map_outputs_coords = [] + map_outputs_pts_coords = [] + map_outputs_coords_bev = [] + + for lvl in range(hs.shape[0]): + if lvl == 0: + reference = init_reference + else: + reference = inter_references[lvl - 1] + reference = inverse_sigmoid(reference) + outputs_class = self.cls_branches[lvl](hs[lvl]) + tmp = self.reg_branches[lvl](hs[lvl]) + + # TODO: check the shape of reference + assert reference.shape[-1] == 3 + tmp[..., 0:2] = tmp[..., 0:2] + reference[..., 0:2] + tmp[..., 0:2] = tmp[..., 0:2].sigmoid() + outputs_coords_bev.append(tmp[..., 0:2].clone().detach()) + tmp[..., 4:5] = tmp[..., 4:5] + reference[..., 2:3] + tmp[..., 4:5] = tmp[..., 4:5].sigmoid() + tmp[..., 0:1] = (tmp[..., 0:1] * (self.pc_range[3] - + self.pc_range[0]) + self.pc_range[0]) + tmp[..., 1:2] = (tmp[..., 1:2] * (self.pc_range[4] - + self.pc_range[1]) + self.pc_range[1]) + tmp[..., 4:5] = (tmp[..., 4:5] * (self.pc_range[5] - + self.pc_range[2]) + self.pc_range[2]) + + # TODO: check if using sigmoid + outputs_coord = tmp + outputs_classes.append(outputs_class) + outputs_coords.append(outputs_coord) + + for lvl in range(map_hs.shape[0]): + if lvl == 0: + reference = map_init_reference + else: + reference = map_inter_references[lvl - 1] + reference = inverse_sigmoid(reference) + map_outputs_class = self.map_cls_branches[lvl]( + map_hs[lvl].view(bs,self.map_num_vec, self.map_num_pts_per_vec,-1).mean(2) + ) + tmp = self.map_reg_branches[lvl](map_hs[lvl]) + # TODO: check the shape of reference + assert reference.shape[-1] == 2 + tmp[..., 0:2] += reference[..., 0:2] + tmp = tmp.sigmoid() # cx,cy,w,h + map_outputs_coord, map_outputs_pts_coord = self.map_transform_box(tmp) + map_outputs_coords_bev.append(map_outputs_pts_coord.clone().detach()) + map_outputs_classes.append(map_outputs_class) + map_outputs_coords.append(map_outputs_coord) + map_outputs_pts_coords.append(map_outputs_pts_coord) + + if self.motion_decoder is not None: + batch_size, num_agent = outputs_coords_bev[-1].shape[:2] + # motion_query + motion_query = hs[-1].permute(1, 0, 2) # [A, B, D] + mode_query = self.motion_mode_query.weight # [fut_mode, D] + # [M, B, D], M=A*fut_mode + motion_query = (motion_query[:, None, :, :] + mode_query[None, :, None, :]).flatten(0, 1) + if self.use_pe: + motion_coords = outputs_coords_bev[-1] # [B, A, 2] + motion_pos = self.pos_mlp_sa(motion_coords) # [B, A, D] + motion_pos = motion_pos.unsqueeze(2).repeat(1, 1, self.fut_mode, 1).flatten(1, 2) + motion_pos = motion_pos.permute(1, 0, 2) # [M, B, D] + else: + motion_pos = None + + if self.motion_det_score is not None: + motion_score = outputs_classes[-1] + max_motion_score = motion_score.max(dim=-1)[0] + invalid_motion_idx = max_motion_score < self.motion_det_score # [B, A] + invalid_motion_idx = invalid_motion_idx.unsqueeze(2).repeat(1, 1, self.fut_mode).flatten(1, 2) + else: + invalid_motion_idx = None + + motion_hs = self.motion_decoder( + query=motion_query, + key=motion_query, + value=motion_query, + query_pos=motion_pos, + key_pos=motion_pos, + key_padding_mask=invalid_motion_idx) + + if self.motion_map_decoder is not None: + # map preprocess + motion_coords = outputs_coords_bev[-1] # [B, A, 2] + motion_coords = motion_coords.unsqueeze(2).repeat(1, 1, self.fut_mode, 1).flatten(1, 2) + map_query = map_hs[-1].view(batch_size, self.map_num_vec, self.map_num_pts_per_vec, -1) + map_query = self.lane_encoder(map_query) # [B, P, pts, D] -> [B, P, D] + map_score = map_outputs_classes[-1] + map_pos = map_outputs_coords_bev[-1] + map_query, map_pos, key_padding_mask = self.select_and_pad_pred_map( + motion_coords, map_query, map_score, map_pos, + map_thresh=self.map_thresh, dis_thresh=self.dis_thresh, + pe_normalization=self.pe_normalization, use_fix_pad=True) + map_query = map_query.permute(1, 0, 2) # [P, B*M, D] + ca_motion_query = motion_hs.permute(1, 0, 2).flatten(0, 1).unsqueeze(0) + + # position encoding + if self.use_pe: + (num_query, batch) = ca_motion_query.shape[:2] + motion_pos = torch.zeros((num_query, batch, 2), device=motion_hs.device) + motion_pos = self.pos_mlp(motion_pos) + map_pos = map_pos.permute(1, 0, 2) + map_pos = self.pos_mlp(map_pos) + else: + motion_pos, map_pos = None, None + + ca_motion_query = self.motion_map_decoder( + query=ca_motion_query, + key=map_query, + value=map_query, + query_pos=motion_pos, + key_pos=map_pos, + key_padding_mask=key_padding_mask) + else: + ca_motion_query = motion_hs.permute(1, 0, 2).flatten(0, 1).unsqueeze(0) + + batch_size = outputs_coords_bev[-1].shape[0] + motion_hs = motion_hs.permute(1, 0, 2).unflatten( + dim=1, sizes=(num_agent, self.fut_mode) + ) + ca_motion_query = ca_motion_query.squeeze(0).unflatten( + dim=0, sizes=(batch_size, num_agent, self.fut_mode) + ) + motion_hs = torch.cat([motion_hs, ca_motion_query], dim=-1) # [B, A, fut_mode, 2D] + else: + raise NotImplementedError('Not implement yet') + + outputs_traj = self.traj_branches[0](motion_hs) + outputs_trajs.append(outputs_traj) + outputs_traj_class = self.traj_cls_branches[0](motion_hs) + outputs_trajs_classes.append(outputs_traj_class.squeeze(-1)) + (batch, num_agent) = motion_hs.shape[:2] + + map_outputs_classes = torch.stack(map_outputs_classes) + map_outputs_coords = torch.stack(map_outputs_coords) + map_outputs_pts_coords = torch.stack(map_outputs_pts_coords) + + outputs_classes = torch.stack(outputs_classes) + outputs_coords = torch.stack(outputs_coords) + outputs_trajs = torch.stack(outputs_trajs) + outputs_trajs_classes = torch.stack(outputs_trajs_classes) + + # planning + (batch, num_agent) = motion_hs.shape[:2] + if self.ego_his_encoder is not None: + ego_his_feats = self.ego_his_encoder(ego_his_trajs) # [B, 1, dim] + else: + ego_his_feats = self.ego_query.weight.unsqueeze(0).repeat(batch, 1, 1) + # Interaction + ego_query = ego_his_feats + ego_pos = torch.zeros((batch, 1, 2), device=ego_query.device) + ego_pos_emb = self.ego_agent_pos_mlp(ego_pos) + agent_conf = outputs_classes[-1] + agent_query = motion_hs.reshape(batch, num_agent, -1) + agent_query = self.agent_fus_mlp(agent_query) # [B, A, fut_mode, 2*D] -> [B, A, D] + agent_pos = outputs_coords_bev[-1] + agent_query, agent_pos, agent_mask = self.select_and_pad_query( + agent_query, agent_pos, agent_conf, + score_thresh=self.query_thresh, use_fix_pad=self.query_use_fix_pad + ) + agent_pos_emb = self.ego_agent_pos_mlp(agent_pos) + # ego <-> agent interaction + ego_agent_query = self.ego_agent_decoder( + query=ego_query.permute(1, 0, 2), + key=agent_query.permute(1, 0, 2), + value=agent_query.permute(1, 0, 2), + query_pos=ego_pos_emb.permute(1, 0, 2), + key_pos=agent_pos_emb.permute(1, 0, 2), + key_padding_mask=agent_mask) + + # ego <-> map interaction + ego_pos = torch.zeros((batch, 1, 2), device=agent_query.device) + ego_pos_emb = self.ego_map_pos_mlp(ego_pos) + map_query = map_hs[-1].view(batch_size, self.map_num_vec, self.map_num_pts_per_vec, -1) + map_query = self.lane_encoder(map_query) # [B, P, pts, D] -> [B, P, D] + map_conf = map_outputs_classes[-1] + map_pos = map_outputs_coords_bev[-1] + # use the most close pts pos in each map inst as the inst's pos + batch, num_map = map_pos.shape[:2] + map_dis = torch.sqrt(map_pos[..., 0]**2 + map_pos[..., 1]**2) + min_map_pos_idx = map_dis.argmin(dim=-1).flatten() # [B*P] + min_map_pos = map_pos.flatten(0, 1) # [B*P, pts, 2] + min_map_pos = min_map_pos[range(min_map_pos.shape[0]), min_map_pos_idx] # [B*P, 2] + min_map_pos = min_map_pos.view(batch, num_map, 2) # [B, P, 2] + map_query, map_pos, map_mask = self.select_and_pad_query( + map_query, min_map_pos, map_conf, + score_thresh=self.query_thresh, use_fix_pad=self.query_use_fix_pad + ) + map_pos_emb = self.ego_map_pos_mlp(map_pos) + ego_map_query = self.ego_map_decoder( + query=ego_agent_query, + key=map_query.permute(1, 0, 2), + value=map_query.permute(1, 0, 2), + query_pos=ego_pos_emb.permute(1, 0, 2), + key_pos=map_pos_emb.permute(1, 0, 2), + key_padding_mask=map_mask) + + if self.ego_his_encoder is not None and self.ego_lcf_feat_idx is not None: + ego_feats = torch.cat( + [ego_his_feats, + ego_map_query.permute(1, 0, 2), + ego_lcf_feat.squeeze(1)[..., self.ego_lcf_feat_idx]], + dim=-1 + ) # [B, 1, 2D+2] + elif self.ego_his_encoder is not None and self.ego_lcf_feat_idx is None: + ego_feats = torch.cat( + [ego_his_feats, + ego_map_query.permute(1, 0, 2)], + dim=-1 + ) # [B, 1, 2D] + elif self.ego_his_encoder is None and self.ego_lcf_feat_idx is not None: + ego_feats = torch.cat( + [ego_agent_query.permute(1, 0, 2), + ego_map_query.permute(1, 0, 2), + ego_lcf_feat.squeeze(1)[..., self.ego_lcf_feat_idx]], + dim=-1 + ) # [B, 1, 2D+2] + elif self.ego_his_encoder is None and self.ego_lcf_feat_idx is None: + ego_feats = torch.cat( + [ego_agent_query.permute(1, 0, 2), + ego_map_query.permute(1, 0, 2)], + dim=-1 + ) # [B, 1, 2D] + + # Ego prediction + outputs_ego_trajs = self.ego_fut_decoder(ego_feats) + outputs_ego_trajs = outputs_ego_trajs.reshape(outputs_ego_trajs.shape[0], + self.ego_fut_mode, self.fut_ts, 2) + + outs = { + 'bev_embed': bev_embed, + 'all_cls_scores': outputs_classes, + 'all_bbox_preds': outputs_coords, + 'all_traj_preds': outputs_trajs.repeat(outputs_coords.shape[0], 1, 1, 1, 1), + 'all_traj_cls_scores': outputs_trajs_classes.repeat(outputs_coords.shape[0], 1, 1, 1), + 'map_all_cls_scores': map_outputs_classes, + 'map_all_bbox_preds': map_outputs_coords, + 'map_all_pts_preds': map_outputs_pts_coords, + 'enc_cls_scores': None, + 'enc_bbox_preds': None, + 'map_enc_cls_scores': None, + 'map_enc_bbox_preds': None, + 'map_enc_pts_preds': None, + 'ego_fut_preds': outputs_ego_trajs, + } + + return outs + + def map_transform_box(self, pts, y_first=False): + """ + Converting the points set into bounding box. + + Args: + pts: the input points sets (fields), each points + set (fields) is represented as 2n scalar. + y_first: if y_fisrt=True, the point set is represented as + [y1, x1, y2, x2 ... yn, xn], otherwise the point set is + represented as [x1, y1, x2, y2 ... xn, yn]. + Returns: + The bbox [cx, cy, w, h] transformed from points. + """ + pts_reshape = pts.view(pts.shape[0], self.map_num_vec, + self.map_num_pts_per_vec,2) + pts_y = pts_reshape[:, :, :, 0] if y_first else pts_reshape[:, :, :, 1] + pts_x = pts_reshape[:, :, :, 1] if y_first else pts_reshape[:, :, :, 0] + if self.map_transform_method == 'minmax': + # import pdb;pdb.set_trace() + + xmin = pts_x.min(dim=2, keepdim=True)[0] + xmax = pts_x.max(dim=2, keepdim=True)[0] + ymin = pts_y.min(dim=2, keepdim=True)[0] + ymax = pts_y.max(dim=2, keepdim=True)[0] + bbox = torch.cat([xmin, ymin, xmax, ymax], dim=2) + bbox = bbox_xyxy_to_cxcywh(bbox) + else: + raise NotImplementedError + return bbox, pts_reshape + + def _get_target_single(self, + cls_score, + bbox_pred, + gt_labels, + gt_bboxes, + gt_attr_labels, + gt_bboxes_ignore=None): + """"Compute regression and classification targets for one image. + Outputs from a single decoder layer of a single feature level are used. + Args: + cls_score (Tensor): Box score logits from a single decoder layer + for one image. Shape [num_query, cls_out_channels]. + bbox_pred (Tensor): Sigmoid outputs from a single decoder layer + for one image, with normalized coordinate (cx, cy, w, h) and + shape [num_query, 10]. + gt_bboxes (Tensor): Ground truth bboxes for one image with + shape (num_gts, 9) in [x,y,z,w,l,h,yaw,vx,vy] format. + gt_labels (Tensor): Ground truth class indices for one image + with shape (num_gts, ). + gt_bboxes_ignore (Tensor, optional): Bounding boxes + which can be ignored. Default None. + Returns: + tuple[Tensor]: a tuple containing the following for one image. + - labels (Tensor): Labels of each image. + - label_weights (Tensor]): Label weights of each image. + - bbox_targets (Tensor): BBox targets of each image. + - bbox_weights (Tensor): BBox weights of each image. + - pos_inds (Tensor): Sampled positive indices for each image. + - neg_inds (Tensor): Sampled negative indices for each image. + """ + + num_bboxes = bbox_pred.size(0) + # assigner and sampler + gt_fut_trajs = gt_attr_labels[:, :self.fut_ts*2] + gt_fut_masks = gt_attr_labels[:, self.fut_ts*2:self.fut_ts*3] + gt_bbox_c = gt_bboxes.shape[-1] + num_gt_bbox, gt_traj_c = gt_fut_trajs.shape + + assign_result = self.assigner.assign(bbox_pred, cls_score, gt_bboxes, + gt_labels, gt_bboxes_ignore) + + sampling_result = self.sampler.sample(assign_result, bbox_pred, + gt_bboxes) + pos_inds = sampling_result.pos_inds + neg_inds = sampling_result.neg_inds + + # label targets + labels = gt_bboxes.new_full((num_bboxes,), + self.num_classes, + dtype=torch.long) + labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds] + label_weights = gt_bboxes.new_ones(num_bboxes) + + # bbox targets + bbox_targets = torch.zeros_like(bbox_pred)[..., :gt_bbox_c] + bbox_weights = torch.zeros_like(bbox_pred) + bbox_weights[pos_inds] = 1.0 + + # trajs targets + traj_targets = torch.zeros((num_bboxes, gt_traj_c), dtype=torch.float32, device=bbox_pred.device) + traj_weights = torch.zeros_like(traj_targets) + traj_targets[pos_inds] = gt_fut_trajs[sampling_result.pos_assigned_gt_inds] + traj_weights[pos_inds] = 1.0 + + # Filter out invalid fut trajs + traj_masks = torch.zeros_like(traj_targets) # [num_bboxes, fut_ts*2] + gt_fut_masks = gt_fut_masks.unsqueeze(-1).repeat(1, 1, 2).view(num_gt_bbox, -1) # [num_gt_bbox, fut_ts*2] + traj_masks[pos_inds] = gt_fut_masks[sampling_result.pos_assigned_gt_inds] + traj_weights = traj_weights * traj_masks + + # Extra future timestamp mask for controlling pred horizon + fut_ts_mask = torch.zeros((num_bboxes, self.fut_ts, 2), + dtype=torch.float32, device=bbox_pred.device) + fut_ts_mask[:, :self.valid_fut_ts, :] = 1.0 + fut_ts_mask = fut_ts_mask.view(num_bboxes, -1) + traj_weights = traj_weights * fut_ts_mask + + # DETR + bbox_targets[pos_inds] = sampling_result.pos_gt_bboxes + + return ( + labels, label_weights, bbox_targets, bbox_weights, traj_targets, + traj_weights, traj_masks.view(-1, self.fut_ts, 2)[..., 0], + pos_inds, neg_inds + ) + + def _map_get_target_single(self, + cls_score, + bbox_pred, + pts_pred, + gt_labels, + gt_bboxes, + gt_shifts_pts, + gt_bboxes_ignore=None): + """"Compute regression and classification targets for one image. + Outputs from a single decoder layer of a single feature level are used. + Args: + cls_score (Tensor): Box score logits from a single decoder layer + for one image. Shape [num_query, cls_out_channels]. + bbox_pred (Tensor): Sigmoid outputs from a single decoder layer + for one image, with normalized coordinate (cx, cy, w, h) and + shape [num_query, 4]. + gt_bboxes (Tensor): Ground truth bboxes for one image with + shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels (Tensor): Ground truth class indices for one image + with shape (num_gts, ). + gt_bboxes_ignore (Tensor, optional): Bounding boxes + which can be ignored. Default None. + Returns: + tuple[Tensor]: a tuple containing the following for one image. + - labels (Tensor): Labels of each image. + - label_weights (Tensor]): Label weights of each image. + - bbox_targets (Tensor): BBox targets of each image. + - bbox_weights (Tensor): BBox weights of each image. + - pos_inds (Tensor): Sampled positive indices for each image. + - neg_inds (Tensor): Sampled negative indices for each image. + """ + num_bboxes = bbox_pred.size(0) + # assigner and sampler + gt_c = gt_bboxes.shape[-1] + assign_result, order_index = self.map_assigner.assign(bbox_pred, cls_score, pts_pred, + gt_bboxes, gt_labels, gt_shifts_pts, + gt_bboxes_ignore) + + sampling_result = self.map_sampler.sample(assign_result, bbox_pred, + gt_bboxes) + pos_inds = sampling_result.pos_inds + neg_inds = sampling_result.neg_inds + # label targets + labels = gt_bboxes.new_full((num_bboxes,), + self.map_num_classes, + dtype=torch.long) + labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds] + label_weights = gt_bboxes.new_ones(num_bboxes) + # bbox targets + bbox_targets = torch.zeros_like(bbox_pred)[..., :gt_c] + bbox_weights = torch.zeros_like(bbox_pred) + bbox_weights[pos_inds] = 1.0 + # pts targets + if order_index is None: + assigned_shift = gt_labels[sampling_result.pos_assigned_gt_inds] + else: + assigned_shift = order_index[sampling_result.pos_inds, sampling_result.pos_assigned_gt_inds] + pts_targets = pts_pred.new_zeros((pts_pred.size(0), + pts_pred.size(1), pts_pred.size(2))) + pts_weights = torch.zeros_like(pts_targets) + pts_weights[pos_inds] = 1.0 + # DETR + bbox_targets[pos_inds] = sampling_result.pos_gt_bboxes + pts_targets[pos_inds] = gt_shifts_pts[sampling_result.pos_assigned_gt_inds,assigned_shift,:,:] + return (labels, label_weights, bbox_targets, bbox_weights, + pts_targets, pts_weights, + pos_inds, neg_inds) + + def get_targets(self, + cls_scores_list, + bbox_preds_list, + gt_bboxes_list, + gt_labels_list, + gt_attr_labels_list, + gt_bboxes_ignore_list=None): + """"Compute regression and classification targets for a batch image. + Outputs from a single decoder layer of a single feature level are used. + Args: + cls_scores_list (list[Tensor]): Box score logits from a single + decoder layer for each image with shape [num_query, + cls_out_channels]. + bbox_preds_list (list[Tensor]): Sigmoid outputs from a single + decoder layer for each image, with normalized coordinate + (cx, cy, w, h) and shape [num_query, 4]. + gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image + with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels_list (list[Tensor]): Ground truth class indices for each + image with shape (num_gts, ). + gt_bboxes_ignore_list (list[Tensor], optional): Bounding + boxes which can be ignored for each image. Default None. + Returns: + tuple: a tuple containing the following targets. + - labels_list (list[Tensor]): Labels for all images. + - label_weights_list (list[Tensor]): Label weights for all \ + images. + - bbox_targets_list (list[Tensor]): BBox targets for all \ + images. + - bbox_weights_list (list[Tensor]): BBox weights for all \ + images. + - num_total_pos (int): Number of positive samples in all \ + images. + - num_total_neg (int): Number of negative samples in all \ + images. + """ + assert gt_bboxes_ignore_list is None, \ + 'Only supports for gt_bboxes_ignore setting to None.' + num_imgs = len(cls_scores_list) + gt_bboxes_ignore_list = [ + gt_bboxes_ignore_list for _ in range(num_imgs) + ] + + (labels_list, label_weights_list, bbox_targets_list, + bbox_weights_list, traj_targets_list, traj_weights_list, + gt_fut_masks_list, pos_inds_list, neg_inds_list) = multi_apply( + self._get_target_single, cls_scores_list, bbox_preds_list, + gt_labels_list, gt_bboxes_list, gt_attr_labels_list, gt_bboxes_ignore_list + ) + num_total_pos = sum((inds.numel() for inds in pos_inds_list)) + num_total_neg = sum((inds.numel() for inds in neg_inds_list)) + return (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, + traj_targets_list, traj_weights_list, gt_fut_masks_list, num_total_pos, num_total_neg) + + def map_get_targets(self, + cls_scores_list, + bbox_preds_list, + pts_preds_list, + gt_bboxes_list, + gt_labels_list, + gt_shifts_pts_list, + gt_bboxes_ignore_list=None): + """"Compute regression and classification targets for a batch image. + Outputs from a single decoder layer of a single feature level are used. + Args: + cls_scores_list (list[Tensor]): Box score logits from a single + decoder layer for each image with shape [num_query, + cls_out_channels]. + bbox_preds_list (list[Tensor]): Sigmoid outputs from a single + decoder layer for each image, with normalized coordinate + (cx, cy, w, h) and shape [num_query, 4]. + gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image + with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels_list (list[Tensor]): Ground truth class indices for each + image with shape (num_gts, ). + gt_bboxes_ignore_list (list[Tensor], optional): Bounding + boxes which can be ignored for each image. Default None. + Returns: + tuple: a tuple containing the following targets. + - labels_list (list[Tensor]): Labels for all images. + - label_weights_list (list[Tensor]): Label weights for all \ + images. + - bbox_targets_list (list[Tensor]): BBox targets for all \ + images. + - bbox_weights_list (list[Tensor]): BBox weights for all \ + images. + - num_total_pos (int): Number of positive samples in all \ + images. + - num_total_neg (int): Number of negative samples in all \ + images. + """ + assert gt_bboxes_ignore_list is None, \ + 'Only supports for gt_bboxes_ignore setting to None.' + num_imgs = len(cls_scores_list) + gt_bboxes_ignore_list = [ + gt_bboxes_ignore_list for _ in range(num_imgs) + ] + + (labels_list, label_weights_list, bbox_targets_list, + bbox_weights_list, pts_targets_list, pts_weights_list, + pos_inds_list, neg_inds_list) = multi_apply( + self._map_get_target_single, cls_scores_list, bbox_preds_list,pts_preds_list, + gt_labels_list, gt_bboxes_list, gt_shifts_pts_list, gt_bboxes_ignore_list) + num_total_pos = sum((inds.numel() for inds in pos_inds_list)) + num_total_neg = sum((inds.numel() for inds in neg_inds_list)) + return (labels_list, label_weights_list, bbox_targets_list, + bbox_weights_list, pts_targets_list, pts_weights_list, + num_total_pos, num_total_neg) + + def loss_planning(self, + ego_fut_preds, + ego_fut_gt, + ego_fut_masks, + ego_fut_cmd, + lane_preds, + lane_score_preds, + agent_preds, + agent_fut_preds, + agent_score_preds, + agent_fut_cls_preds): + """"Loss function for ego vehicle planning. + Args: + ego_fut_preds (Tensor): [B, ego_fut_mode, fut_ts, 2] + ego_fut_gt (Tensor): [B, fut_ts, 2] + ego_fut_masks (Tensor): [B, fut_ts] + ego_fut_cmd (Tensor): [B, ego_fut_mode] + lane_preds (Tensor): [B, num_vec, num_pts, 2] + lane_score_preds (Tensor): [B, num_vec, 3] + agent_preds (Tensor): [B, num_agent, 2] + agent_fut_preds (Tensor): [B, num_agent, fut_mode, fut_ts, 2] + agent_score_preds (Tensor): [B, num_agent, 10] + agent_fut_cls_scores (Tensor): [B, num_agent, fut_mode] + Returns: + loss_plan_reg (Tensor): planning reg loss. + loss_plan_bound (Tensor): planning map boundary constraint loss. + loss_plan_col (Tensor): planning col constraint loss. + loss_plan_dir (Tensor): planning directional constraint loss. + """ + + ego_fut_gt = ego_fut_gt.unsqueeze(1).repeat(1, self.ego_fut_mode, 1, 1) + loss_plan_l1_weight = ego_fut_cmd[..., None, None] * ego_fut_masks[:, None, :, None] + loss_plan_l1_weight = loss_plan_l1_weight.repeat(1, 1, 1, 2) + + loss_plan_l1 = self.loss_plan_reg( + ego_fut_preds, + ego_fut_gt, + loss_plan_l1_weight + ) + + loss_plan_bound = self.loss_plan_bound( + ego_fut_preds[ego_fut_cmd==1], + lane_preds, + lane_score_preds, + weight=ego_fut_masks + ) + + loss_plan_col = self.loss_plan_col( + ego_fut_preds[ego_fut_cmd==1], + agent_preds, + agent_fut_preds, + agent_score_preds, + agent_fut_cls_preds, + weight=ego_fut_masks[:, :, None].repeat(1, 1, 2) + ) + + loss_plan_dir = self.loss_plan_dir( + ego_fut_preds[ego_fut_cmd==1], + lane_preds, + lane_score_preds, + weight=ego_fut_masks + ) + + if digit_version(TORCH_VERSION) >= digit_version('1.8'): + loss_plan_l1 = torch.nan_to_num(loss_plan_l1) + loss_plan_bound = torch.nan_to_num(loss_plan_bound) + loss_plan_col = torch.nan_to_num(loss_plan_col) + loss_plan_dir = torch.nan_to_num(loss_plan_dir) + + loss_plan_dict = dict() + loss_plan_dict['loss_plan_reg'] = loss_plan_l1 + loss_plan_dict['loss_plan_bound'] = loss_plan_bound + loss_plan_dict['loss_plan_col'] = loss_plan_col + loss_plan_dict['loss_plan_dir'] = loss_plan_dir + + return loss_plan_dict + + def loss_single(self, + cls_scores, + bbox_preds, + traj_preds, + traj_cls_preds, + gt_bboxes_list, + gt_labels_list, + gt_attr_labels_list, + gt_bboxes_ignore_list=None): + """"Loss function for outputs from a single decoder layer of a single + feature level. + Args: + cls_scores (Tensor): Box score logits from a single decoder layer + for all images. Shape [bs, num_query, cls_out_channels]. + bbox_preds (Tensor): Sigmoid outputs from a single decoder layer + for all images, with normalized coordinate (cx, cy, w, h) and + shape [bs, num_query, 4]. + gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image + with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels_list (list[Tensor]): Ground truth class indices for each + image with shape (num_gts, ). + gt_bboxes_ignore_list (list[Tensor], optional): Bounding + boxes which can be ignored for each image. Default None. + Returns: + dict[str, Tensor]: A dictionary of loss components for outputs from + a single decoder layer. + """ + num_imgs = cls_scores.size(0) + cls_scores_list = [cls_scores[i] for i in range(num_imgs)] + bbox_preds_list = [bbox_preds[i] for i in range(num_imgs)] + cls_reg_targets = self.get_targets(cls_scores_list, bbox_preds_list, + gt_bboxes_list, gt_labels_list, + gt_attr_labels_list, gt_bboxes_ignore_list) + + (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, + traj_targets_list, traj_weights_list, gt_fut_masks_list, + num_total_pos, num_total_neg) = cls_reg_targets + + labels = torch.cat(labels_list, 0) + label_weights = torch.cat(label_weights_list, 0) + bbox_targets = torch.cat(bbox_targets_list, 0) + bbox_weights = torch.cat(bbox_weights_list, 0) + traj_targets = torch.cat(traj_targets_list, 0) + traj_weights = torch.cat(traj_weights_list, 0) + gt_fut_masks = torch.cat(gt_fut_masks_list, 0) + + # classification loss + cls_scores = cls_scores.reshape(-1, self.cls_out_channels) + # construct weighted avg_factor to match with the official DETR repo + cls_avg_factor = num_total_pos * 1.0 + \ + num_total_neg * self.bg_cls_weight + if self.sync_cls_avg_factor: + cls_avg_factor = reduce_mean( + cls_scores.new_tensor([cls_avg_factor])) + + cls_avg_factor = max(cls_avg_factor, 1) + loss_cls = self.loss_cls(cls_scores, labels, label_weights, avg_factor=cls_avg_factor) + + # Compute the average number of gt boxes accross all gpus, for + # normalization purposes + num_total_pos = loss_cls.new_tensor([num_total_pos]) + num_total_pos = torch.clamp(reduce_mean(num_total_pos), min=1).item() + + # regression L1 loss + bbox_preds = bbox_preds.reshape(-1, bbox_preds.size(-1)) + normalized_bbox_targets = normalize_bbox(bbox_targets, self.pc_range) + isnotnan = torch.isfinite(normalized_bbox_targets).all(dim=-1) + bbox_weights = bbox_weights * self.code_weights + loss_bbox = self.loss_bbox( + bbox_preds[isnotnan, :10], + normalized_bbox_targets[isnotnan, :10], + bbox_weights[isnotnan, :10], + avg_factor=num_total_pos) + + # traj regression loss + best_traj_preds = self.get_best_fut_preds( + traj_preds.reshape(-1, self.fut_mode, self.fut_ts, 2), + traj_targets.reshape(-1, self.fut_ts, 2), gt_fut_masks) + + neg_inds = (bbox_weights[:, 0] == 0) + traj_labels = self.get_traj_cls_target( + traj_preds.reshape(-1, self.fut_mode, self.fut_ts, 2), + traj_targets.reshape(-1, self.fut_ts, 2), + gt_fut_masks, neg_inds) + + loss_traj = self.loss_traj( + best_traj_preds[isnotnan], + traj_targets[isnotnan], + traj_weights[isnotnan], + avg_factor=num_total_pos) + + if self.use_traj_lr_warmup: + loss_scale_factor = get_traj_warmup_loss_weight(self.epoch, self.tot_epoch) + loss_traj = loss_scale_factor * loss_traj + + # traj classification loss + traj_cls_scores = traj_cls_preds.reshape(-1, self.fut_mode) + # construct weighted avg_factor to match with the official DETR repo + traj_cls_avg_factor = num_total_pos * 1.0 + \ + num_total_neg * self.traj_bg_cls_weight + if self.sync_cls_avg_factor: + traj_cls_avg_factor = reduce_mean( + traj_cls_scores.new_tensor([traj_cls_avg_factor])) + + traj_cls_avg_factor = max(traj_cls_avg_factor, 1) + loss_traj_cls = self.loss_traj_cls( + traj_cls_scores, traj_labels, label_weights, avg_factor=traj_cls_avg_factor + ) + + if digit_version(TORCH_VERSION) >= digit_version('1.8'): + loss_cls = torch.nan_to_num(loss_cls) + loss_bbox = torch.nan_to_num(loss_bbox) + loss_traj = torch.nan_to_num(loss_traj) + loss_traj_cls = torch.nan_to_num(loss_traj_cls) + + return loss_cls, loss_bbox, loss_traj, loss_traj_cls + + def get_best_fut_preds(self, + traj_preds, + traj_targets, + gt_fut_masks): + """"Choose best preds among all modes. + Args: + traj_preds (Tensor): MultiModal traj preds with shape (num_box_preds, fut_mode, fut_ts, 2). + traj_targets (Tensor): Ground truth traj for each pred box with shape (num_box_preds, fut_ts, 2). + gt_fut_masks (Tensor): Ground truth traj mask with shape (num_box_preds, fut_ts). + pred_box_centers (Tensor): Pred box centers with shape (num_box_preds, 2). + gt_box_centers (Tensor): Ground truth box centers with shape (num_box_preds, 2). + + Returns: + best_traj_preds (Tensor): best traj preds (min displacement error with gt) + with shape (num_box_preds, fut_ts*2). + """ + + cum_traj_preds = traj_preds.cumsum(dim=-2) + cum_traj_targets = traj_targets.cumsum(dim=-2) + + # Get min pred mode indices. + # (num_box_preds, fut_mode, fut_ts) + dist = torch.linalg.norm(cum_traj_targets[:, None, :, :] - cum_traj_preds, dim=-1) + dist = dist * gt_fut_masks[:, None, :] + dist = dist[..., -1] + dist[torch.isnan(dist)] = dist[torch.isnan(dist)] * 0 + min_mode_idxs = torch.argmin(dist, dim=-1).tolist() + box_idxs = torch.arange(traj_preds.shape[0]).tolist() + best_traj_preds = traj_preds[box_idxs, min_mode_idxs, :, :].reshape(-1, self.fut_ts*2) + + return best_traj_preds + + def get_traj_cls_target(self, + traj_preds, + traj_targets, + gt_fut_masks, + neg_inds): + """"Get Trajectory mode classification target. + Args: + traj_preds (Tensor): MultiModal traj preds with shape (num_box_preds, fut_mode, fut_ts, 2). + traj_targets (Tensor): Ground truth traj for each pred box with shape (num_box_preds, fut_ts, 2). + gt_fut_masks (Tensor): Ground truth traj mask with shape (num_box_preds, fut_ts). + neg_inds (Tensor): Negtive indices with shape (num_box_preds,) + + Returns: + traj_labels (Tensor): traj cls labels (num_box_preds,). + """ + + cum_traj_preds = traj_preds.cumsum(dim=-2) + cum_traj_targets = traj_targets.cumsum(dim=-2) + + # Get min pred mode indices. + # (num_box_preds, fut_mode, fut_ts) + dist = torch.linalg.norm(cum_traj_targets[:, None, :, :] - cum_traj_preds, dim=-1) + dist = dist * gt_fut_masks[:, None, :] + dist = dist[..., -1] + dist[torch.isnan(dist)] = dist[torch.isnan(dist)] * 0 + traj_labels = torch.argmin(dist, dim=-1) + traj_labels[neg_inds] = self.fut_mode + + return traj_labels + + def map_loss_single(self, + cls_scores, + bbox_preds, + pts_preds, + gt_bboxes_list, + gt_labels_list, + gt_shifts_pts_list, + gt_bboxes_ignore_list=None): + """"Loss function for outputs from a single decoder layer of a single + feature level. + Args: + cls_scores (Tensor): Box score logits from a single decoder layer + for all images. Shape [bs, num_query, cls_out_channels]. + bbox_preds (Tensor): Sigmoid outputs from a single decoder layer + for all images, with normalized coordinate (cx, cy, w, h) and + shape [bs, num_query, 4]. + gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image + with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels_list (list[Tensor]): Ground truth class indices for each + image with shape (num_gts, ). + gt_pts_list (list[Tensor]): Ground truth pts for each image + with shape (num_gts, fixed_num, 2) in [x,y] format. + gt_bboxes_ignore_list (list[Tensor], optional): Bounding + boxes which can be ignored for each image. Default None. + Returns: + dict[str, Tensor]: A dictionary of loss components for outputs from + a single decoder layer. + """ + num_imgs = cls_scores.size(0) + cls_scores_list = [cls_scores[i] for i in range(num_imgs)] + bbox_preds_list = [bbox_preds[i] for i in range(num_imgs)] + pts_preds_list = [pts_preds[i] for i in range(num_imgs)] + + cls_reg_targets = self.map_get_targets(cls_scores_list, bbox_preds_list,pts_preds_list, + gt_bboxes_list, gt_labels_list,gt_shifts_pts_list, + gt_bboxes_ignore_list) + (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, + pts_targets_list, pts_weights_list, + num_total_pos, num_total_neg) = cls_reg_targets + + labels = torch.cat(labels_list, 0) + label_weights = torch.cat(label_weights_list, 0) + bbox_targets = torch.cat(bbox_targets_list, 0) + bbox_weights = torch.cat(bbox_weights_list, 0) + pts_targets = torch.cat(pts_targets_list, 0) + pts_weights = torch.cat(pts_weights_list, 0) + + # classification loss + cls_scores = cls_scores.reshape(-1, self.map_cls_out_channels) + # construct weighted avg_factor to match with the official DETR repo + cls_avg_factor = num_total_pos * 1.0 + \ + num_total_neg * self.map_bg_cls_weight + if self.sync_cls_avg_factor: + cls_avg_factor = reduce_mean( + cls_scores.new_tensor([cls_avg_factor])) + + cls_avg_factor = max(cls_avg_factor, 1) + loss_cls = self.loss_map_cls( + cls_scores, labels, label_weights, avg_factor=cls_avg_factor) + + # Compute the average number of gt boxes accross all gpus, for + # normalization purposes + num_total_pos = loss_cls.new_tensor([num_total_pos]) + num_total_pos = torch.clamp(reduce_mean(num_total_pos), min=1).item() + + # regression L1 loss + bbox_preds = bbox_preds.reshape(-1, bbox_preds.size(-1)) + normalized_bbox_targets = normalize_2d_bbox(bbox_targets, self.pc_range) + # normalized_bbox_targets = bbox_targets + isnotnan = torch.isfinite(normalized_bbox_targets).all(dim=-1) + bbox_weights = bbox_weights * self.map_code_weights + + loss_bbox = self.loss_map_bbox( + bbox_preds[isnotnan, :4], + normalized_bbox_targets[isnotnan,:4], + bbox_weights[isnotnan, :4], + avg_factor=num_total_pos) + + # regression pts CD loss + # num_samples, num_order, num_pts, num_coords + normalized_pts_targets = normalize_2d_pts(pts_targets, self.pc_range) + + # num_samples, num_pts, num_coords + pts_preds = pts_preds.reshape(-1, pts_preds.size(-2), pts_preds.size(-1)) + if self.map_num_pts_per_vec != self.map_num_pts_per_gt_vec: + pts_preds = pts_preds.permute(0,2,1) + pts_preds = F.interpolate(pts_preds, size=(self.map_num_pts_per_gt_vec), mode='linear', + align_corners=True) + pts_preds = pts_preds.permute(0,2,1).contiguous() + + loss_pts = self.loss_map_pts( + pts_preds[isnotnan,:,:], + normalized_pts_targets[isnotnan,:,:], + pts_weights[isnotnan,:,:], + avg_factor=num_total_pos) + + dir_weights = pts_weights[:, :-self.map_dir_interval,0] + denormed_pts_preds = denormalize_2d_pts(pts_preds, self.pc_range) + denormed_pts_preds_dir = denormed_pts_preds[:,self.map_dir_interval:,:] - \ + denormed_pts_preds[:,:-self.map_dir_interval,:] + pts_targets_dir = pts_targets[:, self.map_dir_interval:,:] - pts_targets[:,:-self.map_dir_interval,:] + + loss_dir = self.loss_map_dir( + denormed_pts_preds_dir[isnotnan,:,:], + pts_targets_dir[isnotnan,:,:], + dir_weights[isnotnan,:], + avg_factor=num_total_pos) + + bboxes = denormalize_2d_bbox(bbox_preds, self.pc_range) + # regression IoU loss, defaultly GIoU loss + loss_iou = self.loss_map_iou( + bboxes[isnotnan, :4], + bbox_targets[isnotnan, :4], + bbox_weights[isnotnan, :4], + avg_factor=num_total_pos) + + if digit_version(TORCH_VERSION) >= digit_version('1.8'): + loss_cls = torch.nan_to_num(loss_cls) + loss_bbox = torch.nan_to_num(loss_bbox) + loss_iou = torch.nan_to_num(loss_iou) + loss_pts = torch.nan_to_num(loss_pts) + loss_dir = torch.nan_to_num(loss_dir) + + return loss_cls, loss_bbox, loss_iou, loss_pts, loss_dir + + @force_fp32(apply_to=('preds_dicts')) + def loss(self, + gt_bboxes_list, + gt_labels_list, + map_gt_bboxes_list, + map_gt_labels_list, + preds_dicts, + ego_fut_gt, + ego_fut_masks, + ego_fut_cmd, + gt_attr_labels, + gt_bboxes_ignore=None, + map_gt_bboxes_ignore=None, + img_metas=None): + """"Loss function. + Args: + + gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image + with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels_list (list[Tensor]): Ground truth class indices for each + image with shape (num_gts, ). + preds_dicts: + all_cls_scores (Tensor): Classification score of all + decoder layers, has shape + [nb_dec, bs, num_query, cls_out_channels]. + all_bbox_preds (Tensor): Sigmoid regression + outputs of all decode layers. Each is a 4D-tensor with + normalized coordinate format (cx, cy, w, h) and shape + [nb_dec, bs, num_query, 4]. + enc_cls_scores (Tensor): Classification scores of + points on encode feature map , has shape + (N, h*w, num_classes). Only be passed when as_two_stage is + True, otherwise is None. + enc_bbox_preds (Tensor): Regression results of each points + on the encode feature map, has shape (N, h*w, 4). Only be + passed when as_two_stage is True, otherwise is None. + gt_bboxes_ignore (list[Tensor], optional): Bounding boxes + which can be ignored for each image. Default None. + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + assert gt_bboxes_ignore is None, \ + f'{self.__class__.__name__} only supports ' \ + f'for gt_bboxes_ignore setting to None.' + + map_gt_vecs_list = copy.deepcopy(map_gt_bboxes_list) + + all_cls_scores = preds_dicts['all_cls_scores'] + all_bbox_preds = preds_dicts['all_bbox_preds'] + all_traj_preds = preds_dicts['all_traj_preds'] + all_traj_cls_scores = preds_dicts['all_traj_cls_scores'] + enc_cls_scores = preds_dicts['enc_cls_scores'] + enc_bbox_preds = preds_dicts['enc_bbox_preds'] + map_all_cls_scores = preds_dicts['map_all_cls_scores'] + map_all_bbox_preds = preds_dicts['map_all_bbox_preds'] + map_all_pts_preds = preds_dicts['map_all_pts_preds'] + map_enc_cls_scores = preds_dicts['map_enc_cls_scores'] + map_enc_bbox_preds = preds_dicts['map_enc_bbox_preds'] + map_enc_pts_preds = preds_dicts['map_enc_pts_preds'] + ego_fut_preds = preds_dicts['ego_fut_preds'] + + num_dec_layers = len(all_cls_scores) + device = gt_labels_list[0].device + + gt_bboxes_list = [torch.cat( + (gt_bboxes.gravity_center, gt_bboxes.tensor[:, 3:]), + dim=1).to(device) for gt_bboxes in gt_bboxes_list] + + all_gt_bboxes_list = [gt_bboxes_list for _ in range(num_dec_layers)] + all_gt_labels_list = [gt_labels_list for _ in range(num_dec_layers)] + all_gt_attr_labels_list = [gt_attr_labels for _ in range(num_dec_layers)] + all_gt_bboxes_ignore_list = [ + gt_bboxes_ignore for _ in range(num_dec_layers) + ] + + losses_cls, losses_bbox, loss_traj, loss_traj_cls = multi_apply( + self.loss_single, all_cls_scores, all_bbox_preds, all_traj_preds, + all_traj_cls_scores, all_gt_bboxes_list, all_gt_labels_list, + all_gt_attr_labels_list, all_gt_bboxes_ignore_list) + + + num_dec_layers = len(map_all_cls_scores) + device = map_gt_labels_list[0].device + + map_gt_bboxes_list = [ + map_gt_bboxes.bbox.to(device) for map_gt_bboxes in map_gt_vecs_list] + map_gt_pts_list = [ + map_gt_bboxes.fixed_num_sampled_points.to(device) for map_gt_bboxes in map_gt_vecs_list] + if self.map_gt_shift_pts_pattern == 'v0': + map_gt_shifts_pts_list = [ + gt_bboxes.shift_fixed_num_sampled_points.to(device) for gt_bboxes in map_gt_vecs_list] + elif self.map_gt_shift_pts_pattern == 'v1': + map_gt_shifts_pts_list = [ + gt_bboxes.shift_fixed_num_sampled_points_v1.to(device) for gt_bboxes in map_gt_vecs_list] + elif self.map_gt_shift_pts_pattern == 'v2': + map_gt_shifts_pts_list = [ + gt_bboxes.shift_fixed_num_sampled_points_v2.to(device) for gt_bboxes in map_gt_vecs_list] + elif self.map_gt_shift_pts_pattern == 'v3': + map_gt_shifts_pts_list = [ + gt_bboxes.shift_fixed_num_sampled_points_v3.to(device) for gt_bboxes in map_gt_vecs_list] + elif self.map_gt_shift_pts_pattern == 'v4': + map_gt_shifts_pts_list = [ + gt_bboxes.shift_fixed_num_sampled_points_v4.to(device) for gt_bboxes in map_gt_vecs_list] + else: + raise NotImplementedError + map_all_gt_bboxes_list = [map_gt_bboxes_list for _ in range(num_dec_layers)] + map_all_gt_labels_list = [map_gt_labels_list for _ in range(num_dec_layers)] + map_all_gt_pts_list = [map_gt_pts_list for _ in range(num_dec_layers)] + map_all_gt_shifts_pts_list = [map_gt_shifts_pts_list for _ in range(num_dec_layers)] + map_all_gt_bboxes_ignore_list = [ + map_gt_bboxes_ignore for _ in range(num_dec_layers) + ] + + map_losses_cls, map_losses_bbox, map_losses_iou, \ + map_losses_pts, map_losses_dir = multi_apply( + self.map_loss_single, map_all_cls_scores, map_all_bbox_preds, + map_all_pts_preds, map_all_gt_bboxes_list, map_all_gt_labels_list, + map_all_gt_shifts_pts_list, map_all_gt_bboxes_ignore_list) + + loss_dict = dict() + # loss from the last decoder layer + loss_dict['loss_cls'] = losses_cls[-1] + loss_dict['loss_bbox'] = losses_bbox[-1] + loss_dict['loss_traj'] = loss_traj[-1] + loss_dict['loss_traj_cls'] = loss_traj_cls[-1] + # loss from the last decoder layer + loss_dict['loss_map_cls'] = map_losses_cls[-1] + loss_dict['loss_map_bbox'] = map_losses_bbox[-1] + loss_dict['loss_map_iou'] = map_losses_iou[-1] + loss_dict['loss_map_pts'] = map_losses_pts[-1] + loss_dict['loss_map_dir'] = map_losses_dir[-1] + + # Planning Loss + ego_fut_gt = ego_fut_gt.squeeze(1) + ego_fut_masks = ego_fut_masks.squeeze(1).squeeze(1) + ego_fut_cmd = ego_fut_cmd.squeeze(1).squeeze(1) + + batch, num_agent = all_traj_preds[-1].shape[:2] + agent_fut_preds = all_traj_preds[-1].view(batch, num_agent, self.fut_mode, self.fut_ts, 2) + agent_fut_cls_preds = all_traj_cls_scores[-1].view(batch, num_agent, self.fut_mode) + loss_plan_input = [ego_fut_preds, ego_fut_gt, ego_fut_masks, ego_fut_cmd, + map_all_pts_preds[-1], map_all_cls_scores[-1].sigmoid(), + all_bbox_preds[-1][..., 0:2], agent_fut_preds, + all_cls_scores[-1].sigmoid(), agent_fut_cls_preds.sigmoid()] + + loss_planning_dict = self.loss_planning(*loss_plan_input) + loss_dict['loss_plan_reg'] = loss_planning_dict['loss_plan_reg'] + loss_dict['loss_plan_bound'] = loss_planning_dict['loss_plan_bound'] + loss_dict['loss_plan_col'] = loss_planning_dict['loss_plan_col'] + loss_dict['loss_plan_dir'] = loss_planning_dict['loss_plan_dir'] + + # loss from other decoder layers + num_dec_layer = 0 + for loss_cls_i, loss_bbox_i in zip(losses_cls[:-1], losses_bbox[:-1]): + loss_dict[f'd{num_dec_layer}.loss_cls'] = loss_cls_i + loss_dict[f'd{num_dec_layer}.loss_bbox'] = loss_bbox_i + num_dec_layer += 1 + # loss from other decoder layers + num_dec_layer = 0 + for map_loss_cls_i, map_loss_bbox_i, map_loss_iou_i, map_loss_pts_i, map_loss_dir_i in zip( + map_losses_cls[:-1], + map_losses_bbox[:-1], + map_losses_iou[:-1], + map_losses_pts[:-1], + map_losses_dir[:-1] + ): + loss_dict[f'd{num_dec_layer}.loss_map_cls'] = map_loss_cls_i + loss_dict[f'd{num_dec_layer}.loss_map_bbox'] = map_loss_bbox_i + loss_dict[f'd{num_dec_layer}.loss_map_iou'] = map_loss_iou_i + loss_dict[f'd{num_dec_layer}.loss_map_pts'] = map_loss_pts_i + loss_dict[f'd{num_dec_layer}.loss_map_dir'] = map_loss_dir_i + num_dec_layer += 1 + + # loss of proposal generated from encode feature map. + if enc_cls_scores is not None: + binary_labels_list = [ + torch.zeros_like(gt_labels_list[i]) + for i in range(len(all_gt_labels_list)) + ] + enc_loss_cls, enc_losses_bbox = \ + self.loss_single(enc_cls_scores, enc_bbox_preds, + gt_bboxes_list, binary_labels_list, + gt_bboxes_ignore) + loss_dict['enc_loss_cls'] = enc_loss_cls + loss_dict['enc_loss_bbox'] = enc_losses_bbox + + if map_enc_cls_scores is not None: + map_binary_labels_list = [ + torch.zeros_like(map_gt_labels_list[i]) + for i in range(len(map_all_gt_labels_list)) + ] + # TODO bug here, but we dont care enc_loss now + map_enc_loss_cls, map_enc_loss_bbox, map_enc_loss_iou, \ + map_enc_loss_pts, map_enc_loss_dir = \ + self.map_loss_single( + map_enc_cls_scores, map_enc_bbox_preds, + map_enc_pts_preds, map_gt_bboxes_list, + map_binary_labels_list, map_gt_pts_list, + map_gt_bboxes_ignore + ) + loss_dict['enc_loss_map_cls'] = map_enc_loss_cls + loss_dict['enc_loss_map_bbox'] = map_enc_loss_bbox + loss_dict['enc_loss_map_iou'] = map_enc_loss_iou + loss_dict['enc_loss_map_pts'] = map_enc_loss_pts + loss_dict['enc_loss_map_dir'] = map_enc_loss_dir + + return loss_dict + + @force_fp32(apply_to=('preds_dicts')) + def get_bboxes(self, preds_dicts, img_metas, rescale=False): + """Generate bboxes from bbox head predictions. + Args: + preds_dicts (tuple[list[dict]]): Prediction results. + img_metas (list[dict]): Point cloud and image's meta info. + Returns: + list[dict]: Decoded bbox, scores and labels after nms. + """ + + det_preds_dicts = self.bbox_coder.decode(preds_dicts) + # map_bboxes: xmin, ymin, xmax, ymax + map_preds_dicts = self.map_bbox_coder.decode(preds_dicts) + + num_samples = len(det_preds_dicts) + assert len(det_preds_dicts) == len(map_preds_dicts), \ + 'len(preds_dict) should be equal to len(map_preds_dicts)' + ret_list = [] + for i in range(num_samples): + preds = det_preds_dicts[i] + bboxes = preds['bboxes'] + bboxes[:, 2] = bboxes[:, 2] - bboxes[:, 5] * 0.5 + code_size = bboxes.shape[-1] + bboxes = img_metas[i]['box_type_3d'](bboxes, code_size) + scores = preds['scores'] + labels = preds['labels'] + trajs = preds['trajs'] + + map_preds = map_preds_dicts[i] + map_bboxes = map_preds['map_bboxes'] + map_scores = map_preds['map_scores'] + map_labels = map_preds['map_labels'] + map_pts = map_preds['map_pts'] + + ret_list.append([bboxes, scores, labels, trajs, map_bboxes, + map_scores, map_labels, map_pts]) + + return ret_list + + def select_and_pad_pred_map( + self, + motion_pos, + map_query, + map_score, + map_pos, + map_thresh=0.5, + dis_thresh=None, + pe_normalization=True, + use_fix_pad=False + ): + """select_and_pad_pred_map. + Args: + motion_pos: [B, A, 2] + map_query: [B, P, D]. + map_score: [B, P, 3]. + map_pos: [B, P, pts, 2]. + map_thresh: map confidence threshold for filtering low-confidence preds + dis_thresh: distance threshold for masking far maps for each agent in cross-attn + use_fix_pad: always pad one lane instance for each batch + Returns: + selected_map_query: [B*A, P1(+1), D], P1 is the max inst num after filter and pad. + selected_map_pos: [B*A, P1(+1), 2] + selected_padding_mask: [B*A, P1(+1)] + """ + + if dis_thresh is None: + raise NotImplementedError('Not implement yet') + + # use the most close pts pos in each map inst as the inst's pos + batch, num_map = map_pos.shape[:2] + map_dis = torch.sqrt(map_pos[..., 0]**2 + map_pos[..., 1]**2) + min_map_pos_idx = map_dis.argmin(dim=-1).flatten() # [B*P] + min_map_pos = map_pos.flatten(0, 1) # [B*P, pts, 2] + min_map_pos = min_map_pos[range(min_map_pos.shape[0]), min_map_pos_idx] # [B*P, 2] + min_map_pos = min_map_pos.view(batch, num_map, 2) # [B, P, 2] + + # select & pad map vectors for different batch using map_thresh + map_score = map_score.sigmoid() + map_max_score = map_score.max(dim=-1)[0] + map_idx = map_max_score > map_thresh + batch_max_pnum = 0 + for i in range(map_score.shape[0]): + pnum = map_idx[i].sum() + if pnum > batch_max_pnum: + batch_max_pnum = pnum + + selected_map_query, selected_map_pos, selected_padding_mask = [], [], [] + for i in range(map_score.shape[0]): + dim = map_query.shape[-1] + valid_pnum = map_idx[i].sum() + valid_map_query = map_query[i, map_idx[i]] + valid_map_pos = min_map_pos[i, map_idx[i]] + pad_pnum = batch_max_pnum - valid_pnum + padding_mask = torch.tensor([False], device=map_score.device).repeat(batch_max_pnum) + if pad_pnum != 0: + valid_map_query = torch.cat([valid_map_query, torch.zeros((pad_pnum, dim), device=map_score.device)], dim=0) + valid_map_pos = torch.cat([valid_map_pos, torch.zeros((pad_pnum, 2), device=map_score.device)], dim=0) + padding_mask[valid_pnum:] = True + selected_map_query.append(valid_map_query) + selected_map_pos.append(valid_map_pos) + selected_padding_mask.append(padding_mask) + + selected_map_query = torch.stack(selected_map_query, dim=0) + selected_map_pos = torch.stack(selected_map_pos, dim=0) + selected_padding_mask = torch.stack(selected_padding_mask, dim=0) + + # generate different pe for map vectors for each agent + num_agent = motion_pos.shape[1] + selected_map_query = selected_map_query.unsqueeze(1).repeat(1, num_agent, 1, 1) # [B, A, max_P, D] + selected_map_pos = selected_map_pos.unsqueeze(1).repeat(1, num_agent, 1, 1) # [B, A, max_P, 2] + selected_padding_mask = selected_padding_mask.unsqueeze(1).repeat(1, num_agent, 1) # [B, A, max_P] + # move lane to per-car coords system + selected_map_dist = selected_map_pos - motion_pos[:, :, None, :] # [B, A, max_P, 2] + if pe_normalization: + selected_map_pos = selected_map_pos - motion_pos[:, :, None, :] # [B, A, max_P, 2] + + # filter far map inst for each agent + map_dis = torch.sqrt(selected_map_dist[..., 0]**2 + selected_map_dist[..., 1]**2) + valid_map_inst = (map_dis <= dis_thresh) # [B, A, max_P] + invalid_map_inst = (valid_map_inst == False) + selected_padding_mask = selected_padding_mask + invalid_map_inst + + selected_map_query = selected_map_query.flatten(0, 1) + selected_map_pos = selected_map_pos.flatten(0, 1) + selected_padding_mask = selected_padding_mask.flatten(0, 1) + + num_batch = selected_padding_mask.shape[0] + feat_dim = selected_map_query.shape[-1] + if use_fix_pad: + pad_map_query = torch.zeros((num_batch, 1, feat_dim), device=selected_map_query.device) + pad_map_pos = torch.ones((num_batch, 1, 2), device=selected_map_pos.device) + pad_lane_mask = torch.tensor([False], device=selected_padding_mask.device).unsqueeze(0).repeat(num_batch, 1) + selected_map_query = torch.cat([selected_map_query, pad_map_query], dim=1) + selected_map_pos = torch.cat([selected_map_pos, pad_map_pos], dim=1) + selected_padding_mask = torch.cat([selected_padding_mask, pad_lane_mask], dim=1) + + return selected_map_query, selected_map_pos, selected_padding_mask + + + def select_and_pad_query( + self, + query, + query_pos, + query_score, + score_thresh=0.5, + use_fix_pad=True + ): + """select_and_pad_query. + Args: + query: [B, Q, D]. + query_pos: [B, Q, 2] + query_score: [B, Q, C]. + score_thresh: confidence threshold for filtering low-confidence query + use_fix_pad: always pad one query instance for each batch + Returns: + selected_query: [B, Q', D] + selected_query_pos: [B, Q', 2] + selected_padding_mask: [B, Q'] + """ + + # select & pad query for different batch using score_thresh + query_score = query_score.sigmoid() + query_score = query_score.max(dim=-1)[0] + query_idx = query_score > score_thresh + batch_max_qnum = 0 + for i in range(query_score.shape[0]): + qnum = query_idx[i].sum() + if qnum > batch_max_qnum: + batch_max_qnum = qnum + + selected_query, selected_query_pos, selected_padding_mask = [], [], [] + for i in range(query_score.shape[0]): + dim = query.shape[-1] + valid_qnum = query_idx[i].sum() + valid_query = query[i, query_idx[i]] + valid_query_pos = query_pos[i, query_idx[i]] + pad_qnum = batch_max_qnum - valid_qnum + padding_mask = torch.tensor([False], device=query_score.device).repeat(batch_max_qnum) + if pad_qnum != 0: + valid_query = torch.cat([valid_query, torch.zeros((pad_qnum, dim), device=query_score.device)], dim=0) + valid_query_pos = torch.cat([valid_query_pos, torch.zeros((pad_qnum, 2), device=query_score.device)], dim=0) + padding_mask[valid_qnum:] = True + selected_query.append(valid_query) + selected_query_pos.append(valid_query_pos) + selected_padding_mask.append(padding_mask) + + selected_query = torch.stack(selected_query, dim=0) + selected_query_pos = torch.stack(selected_query_pos, dim=0) + selected_padding_mask = torch.stack(selected_padding_mask, dim=0) + + num_batch = selected_padding_mask.shape[0] + feat_dim = selected_query.shape[-1] + if use_fix_pad: + pad_query = torch.zeros((num_batch, 1, feat_dim), device=selected_query.device) + pad_query_pos = torch.ones((num_batch, 1, 2), device=selected_query_pos.device) + pad_mask = torch.tensor([False], device=selected_padding_mask.device).unsqueeze(0).repeat(num_batch, 1) + selected_query = torch.cat([selected_query, pad_query], dim=1) + selected_query_pos = torch.cat([selected_query_pos, pad_query_pos], dim=1) + selected_padding_mask = torch.cat([selected_padding_mask, pad_mask], dim=1) + + return selected_query, selected_query_pos, selected_padding_mask diff --git a/mmcv/models/dense_heads/__init__.py b/mmcv/models/dense_heads/__init__.py new file mode 100644 index 0000000..6d6e404 --- /dev/null +++ b/mmcv/models/dense_heads/__init__.py @@ -0,0 +1,10 @@ +from .detr_head import DETRHead +from .rpn_head import RPNHead +from .ga_rpn_head import GARPNHead +from .track_head import BEVFormerTrackHead +from .panseg_head import PansegformerHead +from .occ_head import OccHead +from .motion_head import MotionHead +from .planning_head import PlanningHeadSingleMode +from .bevformer_head import BEVFormerHead +from .VAD_head import VADHead \ No newline at end of file diff --git a/mmcv/models/dense_heads/anchor3d_head.py b/mmcv/models/dense_heads/anchor3d_head.py new file mode 100644 index 0000000..4e566fa --- /dev/null +++ b/mmcv/models/dense_heads/anchor3d_head.py @@ -0,0 +1,513 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch +from mmcv.utils import force_fp32 +from mmcv.models import BaseModule +from torch import nn as nn + +from mmcv.core import (PseudoSampler, box3d_multiclass_nms, limit_period, + xywhr2xyxyr) +from mmcv.core import (build_assigner, build_bbox_coder, + build_prior_generator, build_sampler, multi_apply) +from mmcv.models import HEADS +from ..builder import build_loss +from .train_mixins import AnchorTrainMixin + +@HEADS.register_module() +class Anchor3DHead(BaseModule, AnchorTrainMixin): + """Anchor head for SECOND/PointPillars/MVXNet/PartA2. + + Args: + num_classes (int): Number of classes. + in_channels (int): Number of channels in the input feature map. + train_cfg (dict): Train configs. + test_cfg (dict): Test configs. + feat_channels (int): Number of channels of the feature map. + use_direction_classifier (bool): Whether to add a direction classifier. + anchor_generator(dict): Config dict of anchor generator. + assigner_per_size (bool): Whether to do assignment for each separate + anchor size. + assign_per_class (bool): Whether to do assignment for each class. + diff_rad_by_sin (bool): Whether to change the difference into sin + difference for box regression loss. + dir_offset (float | int): The offset of BEV rotation angles. + (TODO: may be moved into box coder) + dir_limit_offset (float | int): The limited range of BEV + rotation angles. (TODO: may be moved into box coder) + bbox_coder (dict): Config dict of box coders. + loss_cls (dict): Config of classification loss. + loss_bbox (dict): Config of localization loss. + loss_dir (dict): Config of direction classifier loss. + """ + + def __init__(self, + num_classes, + in_channels, + train_cfg, + test_cfg, + feat_channels=256, + use_direction_classifier=True, + anchor_generator=dict( + type='Anchor3DRangeGenerator', + range=[0, -39.68, -1.78, 69.12, 39.68, -1.78], + strides=[2], + sizes=[[1.6, 3.9, 1.56]], + rotations=[0, 1.57], + custom_values=[], + reshape_out=False), + assigner_per_size=False, + assign_per_class=False, + diff_rad_by_sin=True, + dir_offset=0, + dir_limit_offset=1, + bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'), + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + loss_weight=1.0), + loss_bbox=dict( + type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=2.0), + loss_dir=dict(type='CrossEntropyLoss', loss_weight=0.2), + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.in_channels = in_channels + self.num_classes = num_classes + self.feat_channels = feat_channels + self.diff_rad_by_sin = diff_rad_by_sin + self.use_direction_classifier = use_direction_classifier + self.train_cfg = train_cfg + self.test_cfg = test_cfg + self.assigner_per_size = assigner_per_size + self.assign_per_class = assign_per_class + self.dir_offset = dir_offset + self.dir_limit_offset = dir_limit_offset + self.fp16_enabled = False + + # build anchor generator + self.anchor_generator = build_prior_generator(anchor_generator) + # In 3D detection, the anchor stride is connected with anchor size + self.num_anchors = self.anchor_generator.num_base_anchors + # build box coder + self.bbox_coder = build_bbox_coder(bbox_coder) + self.box_code_size = self.bbox_coder.code_size + + # build loss function + self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) + self.sampling = loss_cls['type'] not in ['FocalLoss', 'GHMC'] + if not self.use_sigmoid_cls: + self.num_classes += 1 + self.loss_cls = build_loss(loss_cls) + self.loss_bbox = build_loss(loss_bbox) + self.loss_dir = build_loss(loss_dir) + self.fp16_enabled = False + + self._init_layers() + self._init_assigner_sampler() + + if init_cfg is None: + self.init_cfg = dict( + type='Normal', + layer='Conv2d', + std=0.01, + override=dict( + type='Normal', name='conv_cls', std=0.01, bias_prob=0.01)) + + def _init_assigner_sampler(self): + """Initialize the target assigner and sampler of the head.""" + if self.train_cfg is None: + return + + if self.sampling: + self.bbox_sampler = build_sampler(self.train_cfg.sampler) + else: + self.bbox_sampler = PseudoSampler() + if isinstance(self.train_cfg.assigner, dict): + self.bbox_assigner = build_assigner(self.train_cfg.assigner) + elif isinstance(self.train_cfg.assigner, list): + self.bbox_assigner = [ + build_assigner(res) for res in self.train_cfg.assigner + ] + + def _init_layers(self): + """Initialize neural network layers of the head.""" + self.cls_out_channels = self.num_anchors * self.num_classes + self.conv_cls = nn.Conv2d(self.feat_channels, self.cls_out_channels, 1) + self.conv_reg = nn.Conv2d(self.feat_channels, + self.num_anchors * self.box_code_size, 1) + if self.use_direction_classifier: + self.conv_dir_cls = nn.Conv2d(self.feat_channels, + self.num_anchors * 2, 1) + + def forward_single(self, x): + """Forward function on a single-scale feature map. + + Args: + x (torch.Tensor): Input features. + + Returns: + tuple[torch.Tensor]: Contain score of each class, bbox \ + regression and direction classification predictions. + """ + cls_score = self.conv_cls(x) + bbox_pred = self.conv_reg(x) + dir_cls_preds = None + if self.use_direction_classifier: + dir_cls_preds = self.conv_dir_cls(x) + return cls_score, bbox_pred, dir_cls_preds + + def forward(self, feats): + """Forward pass. + + Args: + feats (list[torch.Tensor]): Multi-level features, e.g., + features produced by FPN. + + Returns: + tuple[list[torch.Tensor]]: Multi-level class score, bbox \ + and direction predictions. + """ + return multi_apply(self.forward_single, feats) + + def get_anchors(self, featmap_sizes, input_metas, device='cuda'): + """Get anchors according to feature map sizes. + + Args: + featmap_sizes (list[tuple]): Multi-level feature map sizes. + input_metas (list[dict]): contain pcd and img's meta info. + device (str): device of current module. + + Returns: + list[list[torch.Tensor]]: Anchors of each image, valid flags \ + of each image. + """ + num_imgs = len(input_metas) + # since feature map sizes of all images are the same, we only compute + # anchors for one time + multi_level_anchors = self.anchor_generator.grid_anchors( + featmap_sizes, device=device) + anchor_list = [multi_level_anchors for _ in range(num_imgs)] + return anchor_list + + def loss_single(self, cls_score, bbox_pred, dir_cls_preds, labels, + label_weights, bbox_targets, bbox_weights, dir_targets, + dir_weights, num_total_samples): + """Calculate loss of Single-level results. + + Args: + cls_score (torch.Tensor): Class score in single-level. + bbox_pred (torch.Tensor): Bbox prediction in single-level. + dir_cls_preds (torch.Tensor): Predictions of direction class + in single-level. + labels (torch.Tensor): Labels of class. + label_weights (torch.Tensor): Weights of class loss. + bbox_targets (torch.Tensor): Targets of bbox predictions. + bbox_weights (torch.Tensor): Weights of bbox loss. + dir_targets (torch.Tensor): Targets of direction predictions. + dir_weights (torch.Tensor): Weights of direction loss. + num_total_samples (int): The number of valid samples. + + Returns: + tuple[torch.Tensor]: Losses of class, bbox \ + and direction, respectively. + """ + # classification loss + if num_total_samples is None: + num_total_samples = int(cls_score.shape[0]) + labels = labels.reshape(-1) + label_weights = label_weights.reshape(-1) + cls_score = cls_score.permute(0, 2, 3, 1).reshape(-1, self.num_classes) + assert labels.max().item() <= self.num_classes + loss_cls = self.loss_cls( + cls_score, labels, label_weights, avg_factor=num_total_samples) + + # regression loss + bbox_pred = bbox_pred.permute(0, 2, 3, + 1).reshape(-1, self.box_code_size) + bbox_targets = bbox_targets.reshape(-1, self.box_code_size) + bbox_weights = bbox_weights.reshape(-1, self.box_code_size) + + bg_class_ind = self.num_classes + pos_inds = ((labels >= 0) + & (labels < bg_class_ind)).nonzero( + as_tuple=False).reshape(-1) + num_pos = len(pos_inds) + + pos_bbox_pred = bbox_pred[pos_inds] + pos_bbox_targets = bbox_targets[pos_inds] + pos_bbox_weights = bbox_weights[pos_inds] + + # dir loss + if self.use_direction_classifier: + dir_cls_preds = dir_cls_preds.permute(0, 2, 3, 1).reshape(-1, 2) + dir_targets = dir_targets.reshape(-1) + dir_weights = dir_weights.reshape(-1) + pos_dir_cls_preds = dir_cls_preds[pos_inds] + pos_dir_targets = dir_targets[pos_inds] + pos_dir_weights = dir_weights[pos_inds] + + if num_pos > 0: + code_weight = self.train_cfg.get('code_weight', None) + if code_weight: + pos_bbox_weights = pos_bbox_weights * bbox_weights.new_tensor( + code_weight) + if self.diff_rad_by_sin: + pos_bbox_pred, pos_bbox_targets = self.add_sin_difference( + pos_bbox_pred, pos_bbox_targets) + loss_bbox = self.loss_bbox( + pos_bbox_pred, + pos_bbox_targets, + pos_bbox_weights, + avg_factor=num_total_samples) + + # direction classification loss + loss_dir = None + if self.use_direction_classifier: + loss_dir = self.loss_dir( + pos_dir_cls_preds, + pos_dir_targets, + pos_dir_weights, + avg_factor=num_total_samples) + else: + loss_bbox = pos_bbox_pred.sum() + if self.use_direction_classifier: + loss_dir = pos_dir_cls_preds.sum() + + return loss_cls, loss_bbox, loss_dir + + @staticmethod + def add_sin_difference(boxes1, boxes2): + """Convert the rotation difference to difference in sine function. + + Args: + boxes1 (torch.Tensor): Original Boxes in shape (NxC), where C>=7 + and the 7th dimension is rotation dimension. + boxes2 (torch.Tensor): Target boxes in shape (NxC), where C>=7 and + the 7th dimension is rotation dimension. + + Returns: + tuple[torch.Tensor]: ``boxes1`` and ``boxes2`` whose 7th \ + dimensions are changed. + """ + rad_pred_encoding = torch.sin(boxes1[..., 6:7]) * torch.cos( + boxes2[..., 6:7]) + rad_tg_encoding = torch.cos(boxes1[..., 6:7]) * torch.sin(boxes2[..., + 6:7]) + boxes1 = torch.cat( + [boxes1[..., :6], rad_pred_encoding, boxes1[..., 7:]], dim=-1) + boxes2 = torch.cat([boxes2[..., :6], rad_tg_encoding, boxes2[..., 7:]], + dim=-1) + return boxes1, boxes2 + + @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'dir_cls_preds')) + def loss(self, + cls_scores, + bbox_preds, + dir_cls_preds, + gt_bboxes, + gt_labels, + input_metas, + gt_bboxes_ignore=None): + """Calculate losses. + + Args: + cls_scores (list[torch.Tensor]): Multi-level class scores. + bbox_preds (list[torch.Tensor]): Multi-level bbox predictions. + dir_cls_preds (list[torch.Tensor]): Multi-level direction + class predictions. + gt_bboxes (list[:obj:`BaseInstance3DBoxes`]): Gt bboxes + of each sample. + gt_labels (list[torch.Tensor]): Gt labels of each sample. + input_metas (list[dict]): Contain pcd and img's meta info. + gt_bboxes_ignore (None | list[torch.Tensor]): Specify + which bounding. + + Returns: + dict[str, list[torch.Tensor]]: Classification, bbox, and \ + direction losses of each level. + + - loss_cls (list[torch.Tensor]): Classification losses. + - loss_bbox (list[torch.Tensor]): Box regression losses. + - loss_dir (list[torch.Tensor]): Direction classification \ + losses. + """ + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + assert len(featmap_sizes) == self.anchor_generator.num_levels + device = cls_scores[0].device + anchor_list = self.get_anchors( + featmap_sizes, input_metas, device=device) + label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 + cls_reg_targets = self.anchor_target_3d( + anchor_list, + gt_bboxes, + input_metas, + gt_bboxes_ignore_list=gt_bboxes_ignore, + gt_labels_list=gt_labels, + num_classes=self.num_classes, + label_channels=label_channels, + sampling=self.sampling) + + if cls_reg_targets is None: + return None + (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, + dir_targets_list, dir_weights_list, num_total_pos, + num_total_neg) = cls_reg_targets + num_total_samples = ( + num_total_pos + num_total_neg if self.sampling else num_total_pos) + + # num_total_samples = None + losses_cls, losses_bbox, losses_dir = multi_apply( + self.loss_single, + cls_scores, + bbox_preds, + dir_cls_preds, + labels_list, + label_weights_list, + bbox_targets_list, + bbox_weights_list, + dir_targets_list, + dir_weights_list, + num_total_samples=num_total_samples) + return dict( + loss_cls=losses_cls, loss_bbox=losses_bbox, loss_dir=losses_dir) + + def get_bboxes(self, + cls_scores, + bbox_preds, + dir_cls_preds, + input_metas, + cfg=None, + rescale=False): + """Get bboxes of anchor head. + + Args: + cls_scores (list[torch.Tensor]): Multi-level class scores. + bbox_preds (list[torch.Tensor]): Multi-level bbox predictions. + dir_cls_preds (list[torch.Tensor]): Multi-level direction + class predictions. + input_metas (list[dict]): Contain pcd and img's meta info. + cfg (None | :obj:`ConfigDict`): Training or testing config. + rescale (list[torch.Tensor]): Whether th rescale bbox. + + Returns: + list[tuple]: Prediction resultes of batches. + """ + assert len(cls_scores) == len(bbox_preds) + assert len(cls_scores) == len(dir_cls_preds) + num_levels = len(cls_scores) + featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)] + device = cls_scores[0].device + mlvl_anchors = self.anchor_generator.grid_anchors( + featmap_sizes, device=device) + mlvl_anchors = [ + anchor.reshape(-1, self.box_code_size) for anchor in mlvl_anchors + ] + + result_list = [] + for img_id in range(len(input_metas)): + cls_score_list = [ + cls_scores[i][img_id].detach() for i in range(num_levels) + ] + bbox_pred_list = [ + bbox_preds[i][img_id].detach() for i in range(num_levels) + ] + dir_cls_pred_list = [ + dir_cls_preds[i][img_id].detach() for i in range(num_levels) + ] + + input_meta = input_metas[img_id] + proposals = self.get_bboxes_single(cls_score_list, bbox_pred_list, + dir_cls_pred_list, mlvl_anchors, + input_meta, cfg, rescale) + result_list.append(proposals) + return result_list + + def get_bboxes_single(self, + cls_scores, + bbox_preds, + dir_cls_preds, + mlvl_anchors, + input_meta, + cfg=None, + rescale=False): + """Get bboxes of single branch. + + Args: + cls_scores (torch.Tensor): Class score in single batch. + bbox_preds (torch.Tensor): Bbox prediction in single batch. + dir_cls_preds (torch.Tensor): Predictions of direction class + in single batch. + mlvl_anchors (List[torch.Tensor]): Multi-level anchors + in single batch. + input_meta (list[dict]): Contain pcd and img's meta info. + cfg (None | :obj:`ConfigDict`): Training or testing config. + rescale (list[torch.Tensor]): whether th rescale bbox. + + Returns: + tuple: Contain predictions of single batch. + + - bboxes (:obj:`BaseInstance3DBoxes`): Predicted 3d bboxes. + - scores (torch.Tensor): Class score of each bbox. + - labels (torch.Tensor): Label of each bbox. + """ + cfg = self.test_cfg if cfg is None else cfg + assert len(cls_scores) == len(bbox_preds) == len(mlvl_anchors) + mlvl_bboxes = [] + mlvl_scores = [] + mlvl_dir_scores = [] + for cls_score, bbox_pred, dir_cls_pred, anchors in zip( + cls_scores, bbox_preds, dir_cls_preds, mlvl_anchors): + assert cls_score.size()[-2:] == bbox_pred.size()[-2:] + assert cls_score.size()[-2:] == dir_cls_pred.size()[-2:] + dir_cls_pred = dir_cls_pred.permute(1, 2, 0).reshape(-1, 2) + dir_cls_score = torch.max(dir_cls_pred, dim=-1)[1] + + cls_score = cls_score.permute(1, 2, + 0).reshape(-1, self.num_classes) + if self.use_sigmoid_cls: + scores = cls_score.sigmoid() + else: + scores = cls_score.softmax(-1) + bbox_pred = bbox_pred.permute(1, 2, + 0).reshape(-1, self.box_code_size) + + nms_pre = cfg.get('nms_pre', -1) + if nms_pre > 0 and scores.shape[0] > nms_pre: + if self.use_sigmoid_cls: + max_scores, _ = scores.max(dim=1) + else: + max_scores, _ = scores[:, :-1].max(dim=1) + _, topk_inds = max_scores.topk(nms_pre) + anchors = anchors[topk_inds, :] + bbox_pred = bbox_pred[topk_inds, :] + scores = scores[topk_inds, :] + dir_cls_score = dir_cls_score[topk_inds] + + bboxes = self.bbox_coder.decode(anchors, bbox_pred) + mlvl_bboxes.append(bboxes) + mlvl_scores.append(scores) + mlvl_dir_scores.append(dir_cls_score) + + mlvl_bboxes = torch.cat(mlvl_bboxes) + mlvl_bboxes_for_nms = xywhr2xyxyr(input_meta['box_type_3d']( + mlvl_bboxes, box_dim=self.box_code_size).bev) + mlvl_scores = torch.cat(mlvl_scores) + mlvl_dir_scores = torch.cat(mlvl_dir_scores) + + if self.use_sigmoid_cls: + # Add a dummy background class to the front when using sigmoid + padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1) + mlvl_scores = torch.cat([mlvl_scores, padding], dim=1) + + score_thr = cfg.get('score_thr', 0) + results = box3d_multiclass_nms(mlvl_bboxes, mlvl_bboxes_for_nms, + mlvl_scores, score_thr, cfg.max_num, + cfg, mlvl_dir_scores) + bboxes, scores, labels, dir_scores = results + if bboxes.shape[0] > 0: + dir_rot = limit_period(bboxes[..., 6] - self.dir_offset, + self.dir_limit_offset, np.pi) + bboxes[..., 6] = ( + dir_rot + self.dir_offset + + np.pi * dir_scores.to(bboxes.dtype)) + bboxes = input_meta['box_type_3d'](bboxes, box_dim=self.box_code_size) + return bboxes, scores, labels \ No newline at end of file diff --git a/mmcv/models/dense_heads/anchor_free_head.py b/mmcv/models/dense_heads/anchor_free_head.py new file mode 100644 index 0000000..7df38d6 --- /dev/null +++ b/mmcv/models/dense_heads/anchor_free_head.py @@ -0,0 +1,340 @@ +from abc import abstractmethod + +import torch +import torch.nn as nn +from mmcv.models.bricks import ConvModule +from mmcv.utils import force_fp32 + +from ...core.utils import multi_apply +from ..builder import HEADS, build_loss +from .base_dense_head import BaseDenseHead +from .dense_test_mixins import BBoxTestMixin + + +@HEADS.register_module() +class AnchorFreeHead(BaseDenseHead, BBoxTestMixin): + """Anchor-free head (FCOS, Fovea, RepPoints, etc.). + + Args: + num_classes (int): Number of categories excluding the background + category. + in_channels (int): Number of channels in the input feature map. + feat_channels (int): Number of hidden channels. Used in child classes. + stacked_convs (int): Number of stacking convs of the head. + strides (tuple): Downsample factor of each feature map. + dcn_on_last_conv (bool): If true, use dcn in the last layer of + towers. Default: False. + conv_bias (bool | str): If specified as `auto`, it will be decided by + the norm_cfg. Bias of conv will be set as True if `norm_cfg` is + None, otherwise False. Default: "auto". + loss_cls (dict): Config of classification loss. + loss_bbox (dict): Config of localization loss. + conv_cfg (dict): Config dict for convolution layer. Default: None. + norm_cfg (dict): Config dict for normalization layer. Default: None. + train_cfg (dict): Training config of anchor head. + test_cfg (dict): Testing config of anchor head. + init_cfg (dict or list[dict], optional): Initialization config dict. + """ # noqa: W605 + + _version = 1 + + def __init__(self, + num_classes, + in_channels, + feat_channels=256, + stacked_convs=4, + strides=(4, 8, 16, 32, 64), + dcn_on_last_conv=False, + conv_bias='auto', + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='IoULoss', loss_weight=1.0), + conv_cfg=None, + norm_cfg=None, + train_cfg=None, + test_cfg=None, + init_cfg=dict( + type='Normal', + layer='Conv2d', + std=0.01, + override=dict( + type='Normal', + name='conv_cls', + std=0.01, + bias_prob=0.01))): + super(AnchorFreeHead, self).__init__(init_cfg) + self.num_classes = num_classes + self.cls_out_channels = num_classes + self.in_channels = in_channels + self.feat_channels = feat_channels + self.stacked_convs = stacked_convs + self.strides = strides + self.dcn_on_last_conv = dcn_on_last_conv + assert conv_bias == 'auto' or isinstance(conv_bias, bool) + self.conv_bias = conv_bias + self.loss_cls = build_loss(loss_cls) + self.loss_bbox = build_loss(loss_bbox) + self.train_cfg = train_cfg + self.test_cfg = test_cfg + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.fp16_enabled = False + + self._init_layers() + + def _init_layers(self): + """Initialize layers of the head.""" + self._init_cls_convs() + self._init_reg_convs() + self._init_predictor() + + def _init_cls_convs(self): + """Initialize classification conv layers of the head.""" + self.cls_convs = nn.ModuleList() + for i in range(self.stacked_convs): + chn = self.in_channels if i == 0 else self.feat_channels + if self.dcn_on_last_conv and i == self.stacked_convs - 1: + conv_cfg = dict(type='DCNv2') + else: + conv_cfg = self.conv_cfg + self.cls_convs.append( + ConvModule( + chn, + self.feat_channels, + 3, + stride=1, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=self.norm_cfg, + bias=self.conv_bias)) + + def _init_reg_convs(self): + """Initialize bbox regression conv layers of the head.""" + self.reg_convs = nn.ModuleList() + for i in range(self.stacked_convs): + chn = self.in_channels if i == 0 else self.feat_channels + if self.dcn_on_last_conv and i == self.stacked_convs - 1: + conv_cfg = dict(type='DCNv2') + else: + conv_cfg = self.conv_cfg + self.reg_convs.append( + ConvModule( + chn, + self.feat_channels, + 3, + stride=1, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=self.norm_cfg, + bias=self.conv_bias)) + + def _init_predictor(self): + """Initialize predictor layers of the head.""" + self.conv_cls = nn.Conv2d( + self.feat_channels, self.cls_out_channels, 3, padding=1) + self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1) + + def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs): + """Hack some keys of the model state dict so that can load checkpoints + of previous version.""" + version = local_metadata.get('version', None) + if version is None: + # the key is different in early versions + # for example, 'fcos_cls' become 'conv_cls' now + bbox_head_keys = [ + k for k in state_dict.keys() if k.startswith(prefix) + ] + ori_predictor_keys = [] + new_predictor_keys = [] + # e.g. 'fcos_cls' or 'fcos_reg' + for key in bbox_head_keys: + ori_predictor_keys.append(key) + key = key.split('.') + conv_name = None + if key[1].endswith('cls'): + conv_name = 'conv_cls' + elif key[1].endswith('reg'): + conv_name = 'conv_reg' + elif key[1].endswith('centerness'): + conv_name = 'conv_centerness' + else: + assert NotImplementedError + if conv_name is not None: + key[1] = conv_name + new_predictor_keys.append('.'.join(key)) + else: + ori_predictor_keys.pop(-1) + for i in range(len(new_predictor_keys)): + state_dict[new_predictor_keys[i]] = state_dict.pop( + ori_predictor_keys[i]) + super()._load_from_state_dict(state_dict, prefix, local_metadata, + strict, missing_keys, unexpected_keys, + error_msgs) + + def forward(self, feats): + """Forward features from the upstream network. + + Args: + feats (tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + + Returns: + tuple: Usually contain classification scores and bbox predictions. + cls_scores (list[Tensor]): Box scores for each scale level, + each is a 4D-tensor, the channel number is + num_points * num_classes. + bbox_preds (list[Tensor]): Box energies / deltas for each scale + level, each is a 4D-tensor, the channel number is + num_points * 4. + """ + return multi_apply(self.forward_single, feats)[:2] + + def forward_single(self, x): + """Forward features of a single scale level. + + Args: + x (Tensor): FPN feature maps of the specified stride. + + Returns: + tuple: Scores for each class, bbox predictions, features + after classification and regression conv layers, some + models needs these features like FCOS. + """ + cls_feat = x + reg_feat = x + + for cls_layer in self.cls_convs: + cls_feat = cls_layer(cls_feat) + cls_score = self.conv_cls(cls_feat) + + for reg_layer in self.reg_convs: + reg_feat = reg_layer(reg_feat) + bbox_pred = self.conv_reg(reg_feat) + return cls_score, bbox_pred, cls_feat, reg_feat + + @abstractmethod + @force_fp32(apply_to=('cls_scores', 'bbox_preds')) + def loss(self, + cls_scores, + bbox_preds, + gt_bboxes, + gt_labels, + img_metas, + gt_bboxes_ignore=None): + """Compute loss of the head. + + Args: + cls_scores (list[Tensor]): Box scores for each scale level, + each is a 4D-tensor, the channel number is + num_points * num_classes. + bbox_preds (list[Tensor]): Box energies / deltas for each scale + level, each is a 4D-tensor, the channel number is + num_points * 4. + gt_bboxes (list[Tensor]): Ground truth bboxes for each image with + shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels (list[Tensor]): class indices corresponding to each box + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + gt_bboxes_ignore (None | list[Tensor]): specify which bounding + boxes can be ignored when computing the loss. + """ + + raise NotImplementedError + + @abstractmethod + @force_fp32(apply_to=('cls_scores', 'bbox_preds')) + def get_bboxes(self, + cls_scores, + bbox_preds, + img_metas, + cfg=None, + rescale=None): + """Transform network output for a batch into bbox predictions. + + Args: + cls_scores (list[Tensor]): Box scores for each scale level + Has shape (N, num_points * num_classes, H, W) + bbox_preds (list[Tensor]): Box energies / deltas for each scale + level with shape (N, num_points * 4, H, W) + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + cfg (mmcv.Config): Test / postprocessing configuration, + if None, test_cfg would be used + rescale (bool): If True, return boxes in original image space + """ + + raise NotImplementedError + + @abstractmethod + def get_targets(self, points, gt_bboxes_list, gt_labels_list): + """Compute regression, classification and centerness targets for points + in multiple images. + + Args: + points (list[Tensor]): Points of each fpn level, each has shape + (num_points, 2). + gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image, + each has shape (num_gt, 4). + gt_labels_list (list[Tensor]): Ground truth labels of each box, + each has shape (num_gt,). + """ + raise NotImplementedError + + def _get_points_single(self, + featmap_size, + stride, + dtype, + device, + flatten=False): + """Get points of a single scale level.""" + h, w = featmap_size + # First create Range with the default dtype, than convert to + # target `dtype` for onnx exporting. + x_range = torch.arange(w, device=device).to(dtype) + y_range = torch.arange(h, device=device).to(dtype) + y, x = torch.meshgrid(y_range, x_range) + if flatten: + y = y.flatten() + x = x.flatten() + return y, x + + def get_points(self, featmap_sizes, dtype, device, flatten=False): + """Get points according to feature map sizes. + + Args: + featmap_sizes (list[tuple]): Multi-level feature map sizes. + dtype (torch.dtype): Type of points. + device (torch.device): Device of points. + + Returns: + tuple: points of each image. + """ + mlvl_points = [] + for i in range(len(featmap_sizes)): + mlvl_points.append( + self._get_points_single(featmap_sizes[i], self.strides[i], + dtype, device, flatten)) + return mlvl_points + + def aug_test(self, feats, img_metas, rescale=False): + """Test function with test time augmentation. + + Args: + feats (list[Tensor]): the outer list indicates test-time + augmentations and inner Tensor should have a shape NxCxHxW, + which contains features for all images in the batch. + img_metas (list[list[dict]]): the outer list indicates test-time + augs (multiscale, flip, etc.) and the inner list indicates + images in a batch. each dict has image information. + rescale (bool, optional): Whether to rescale the results. + Defaults to False. + + Returns: + list[ndarray]: bbox results of each class + """ + return self.aug_test_bboxes(feats, img_metas, rescale=rescale) diff --git a/mmcv/models/dense_heads/anchor_head.py b/mmcv/models/dense_heads/anchor_head.py new file mode 100644 index 0000000..fd06528 --- /dev/null +++ b/mmcv/models/dense_heads/anchor_head.py @@ -0,0 +1,746 @@ +import torch +import torch.nn as nn +from mmcv.utils import force_fp32 + +from mmcv.core.anchor import (anchor_inside_flags, build_anchor_generator, images_to_levels) +from mmcv.core.bbox.builder import (build_assigner, build_bbox_coder, build_sampler) +from mmcv.core.utils import (multi_apply, unmap) +from mmcv.core.post_processing.bbox_nms import (multiclass_nms) +from ..builder import HEADS, build_loss +from .base_dense_head import BaseDenseHead +from .dense_test_mixins import BBoxTestMixin + + +@HEADS.register_module() +class AnchorHead(BaseDenseHead, BBoxTestMixin): + """Anchor-based head (RPN, RetinaNet, SSD, etc.). + + Args: + num_classes (int): Number of categories excluding the background + category. + in_channels (int): Number of channels in the input feature map. + feat_channels (int): Number of hidden channels. Used in child classes. + anchor_generator (dict): Config dict for anchor generator + bbox_coder (dict): Config of bounding box coder. + reg_decoded_bbox (bool): If true, the regression loss would be + applied directly on decoded bounding boxes, converting both + the predicted boxes and regression targets to absolute + coordinates format. Default False. It should be `True` when + using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head. + loss_cls (dict): Config of classification loss. + loss_bbox (dict): Config of localization loss. + train_cfg (dict): Training config of anchor head. + test_cfg (dict): Testing config of anchor head. + init_cfg (dict or list[dict], optional): Initialization config dict. + """ # noqa: W605 + + def __init__(self, + num_classes, + in_channels, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8, 16, 32], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + clip_border=True, + target_means=(.0, .0, .0, .0), + target_stds=(1.0, 1.0, 1.0, 1.0)), + reg_decoded_bbox=False, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + loss_weight=1.0), + loss_bbox=dict( + type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0), + train_cfg=None, + test_cfg=None, + init_cfg=dict(type='Normal', layers='Conv2d', std=0.01)): + super(AnchorHead, self).__init__(init_cfg) + self.in_channels = in_channels + self.num_classes = num_classes + self.feat_channels = feat_channels + self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) + # TODO better way to determine whether sample or not + self.sampling = loss_cls['type'] not in [ + 'FocalLoss', 'GHMC', 'QualityFocalLoss' + ] + if self.use_sigmoid_cls: + self.cls_out_channels = num_classes + else: + self.cls_out_channels = num_classes + 1 + + if self.cls_out_channels <= 0: + raise ValueError(f'num_classes={num_classes} is too small') + self.reg_decoded_bbox = reg_decoded_bbox + + self.bbox_coder = build_bbox_coder(bbox_coder) + self.loss_cls = build_loss(loss_cls) + self.loss_bbox = build_loss(loss_bbox) + self.train_cfg = train_cfg + self.test_cfg = test_cfg + if self.train_cfg: + self.assigner = build_assigner(self.train_cfg.assigner) + # use PseudoSampler when sampling is False + if self.sampling and hasattr(self.train_cfg, 'sampler'): + sampler_cfg = self.train_cfg.sampler + else: + sampler_cfg = dict(type='PseudoSampler') + self.sampler = build_sampler(sampler_cfg, context=self) + self.fp16_enabled = False + + self.anchor_generator = build_anchor_generator(anchor_generator) + # usually the numbers of anchors for each level are the same + # except SSD detectors + self.num_anchors = self.anchor_generator.num_base_anchors[0] + self._init_layers() + + def _init_layers(self): + """Initialize layers of the head.""" + self.conv_cls = nn.Conv2d(self.in_channels, + self.num_anchors * self.cls_out_channels, 1) + self.conv_reg = nn.Conv2d(self.in_channels, self.num_anchors * 4, 1) + + def forward_single(self, x): + """Forward feature of a single scale level. + + Args: + x (Tensor): Features of a single scale level. + + Returns: + tuple: + cls_score (Tensor): Cls scores for a single scale level \ + the channels number is num_anchors * num_classes. + bbox_pred (Tensor): Box energies / deltas for a single scale \ + level, the channels number is num_anchors * 4. + """ + cls_score = self.conv_cls(x) + bbox_pred = self.conv_reg(x) + return cls_score, bbox_pred + + def forward(self, feats): + """Forward features from the upstream network. + + Args: + feats (tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + + Returns: + tuple: A tuple of classification scores and bbox prediction. + + - cls_scores (list[Tensor]): Classification scores for all \ + scale levels, each is a 4D-tensor, the channels number \ + is num_anchors * num_classes. + - bbox_preds (list[Tensor]): Box energies / deltas for all \ + scale levels, each is a 4D-tensor, the channels number \ + is num_anchors * 4. + """ + return multi_apply(self.forward_single, feats) + + def get_anchors(self, featmap_sizes, img_metas, device='cuda'): + """Get anchors according to feature map sizes. + + Args: + featmap_sizes (list[tuple]): Multi-level feature map sizes. + img_metas (list[dict]): Image meta info. + device (torch.device | str): Device for returned tensors + + Returns: + tuple: + anchor_list (list[Tensor]): Anchors of each image. + valid_flag_list (list[Tensor]): Valid flags of each image. + """ + num_imgs = len(img_metas) + + # since feature map sizes of all images are the same, we only compute + # anchors for one time + multi_level_anchors = self.anchor_generator.grid_anchors( + featmap_sizes, device) + anchor_list = [multi_level_anchors for _ in range(num_imgs)] + + # for each image, we compute valid flags of multi level anchors + valid_flag_list = [] + for img_id, img_meta in enumerate(img_metas): + multi_level_flags = self.anchor_generator.valid_flags( + featmap_sizes, img_meta['pad_shape'], device) + valid_flag_list.append(multi_level_flags) + + return anchor_list, valid_flag_list + + def _get_targets_single(self, + flat_anchors, + valid_flags, + gt_bboxes, + gt_bboxes_ignore, + gt_labels, + img_meta, + label_channels=1, + unmap_outputs=True): + """Compute regression and classification targets for anchors in a + single image. + + Args: + flat_anchors (Tensor): Multi-level anchors of the image, which are + concatenated into a single tensor of shape (num_anchors ,4) + valid_flags (Tensor): Multi level valid flags of the image, + which are concatenated into a single tensor of + shape (num_anchors,). + gt_bboxes (Tensor): Ground truth bboxes of the image, + shape (num_gts, 4). + gt_bboxes_ignore (Tensor): Ground truth bboxes to be + ignored, shape (num_ignored_gts, 4). + img_meta (dict): Meta info of the image. + gt_labels (Tensor): Ground truth labels of each box, + shape (num_gts,). + label_channels (int): Channel of label. + unmap_outputs (bool): Whether to map outputs back to the original + set of anchors. + + Returns: + tuple: + labels_list (list[Tensor]): Labels of each level + label_weights_list (list[Tensor]): Label weights of each level + bbox_targets_list (list[Tensor]): BBox targets of each level + bbox_weights_list (list[Tensor]): BBox weights of each level + num_total_pos (int): Number of positive samples in all images + num_total_neg (int): Number of negative samples in all images + """ + inside_flags = anchor_inside_flags(flat_anchors, valid_flags, + img_meta['img_shape'][:2], + self.train_cfg.allowed_border) + if not inside_flags.any(): + return (None, ) * 7 + # assign gt and sample anchors + anchors = flat_anchors[inside_flags, :] + + assign_result = self.assigner.assign( + anchors, gt_bboxes, gt_bboxes_ignore, + None if self.sampling else gt_labels) + sampling_result = self.sampler.sample(assign_result, anchors, + gt_bboxes) + + num_valid_anchors = anchors.shape[0] + bbox_targets = torch.zeros_like(anchors) + bbox_weights = torch.zeros_like(anchors) + labels = anchors.new_full((num_valid_anchors, ), + self.num_classes, + dtype=torch.long) + label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) + + pos_inds = sampling_result.pos_inds + neg_inds = sampling_result.neg_inds + if len(pos_inds) > 0: + if not self.reg_decoded_bbox: + pos_bbox_targets = self.bbox_coder.encode( + sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes) + else: + pos_bbox_targets = sampling_result.pos_gt_bboxes + bbox_targets[pos_inds, :] = pos_bbox_targets + bbox_weights[pos_inds, :] = 1.0 + if gt_labels is None: + # Only rpn gives gt_labels as None + # Foreground is the first class since v2.5.0 + labels[pos_inds] = 0 + else: + labels[pos_inds] = gt_labels[ + sampling_result.pos_assigned_gt_inds] + if self.train_cfg.pos_weight <= 0: + label_weights[pos_inds] = 1.0 + else: + label_weights[pos_inds] = self.train_cfg.pos_weight + if len(neg_inds) > 0: + label_weights[neg_inds] = 1.0 + + # map up to original set of anchors + if unmap_outputs: + num_total_anchors = flat_anchors.size(0) + labels = unmap( + labels, num_total_anchors, inside_flags, + fill=self.num_classes) # fill bg label + label_weights = unmap(label_weights, num_total_anchors, + inside_flags) + bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags) + bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) + + return (labels, label_weights, bbox_targets, bbox_weights, pos_inds, + neg_inds, sampling_result) + + def get_targets(self, + anchor_list, + valid_flag_list, + gt_bboxes_list, + img_metas, + gt_bboxes_ignore_list=None, + gt_labels_list=None, + label_channels=1, + unmap_outputs=True, + return_sampling_results=False): + """Compute regression and classification targets for anchors in + multiple images. + + Args: + anchor_list (list[list[Tensor]]): Multi level anchors of each + image. The outer list indicates images, and the inner list + corresponds to feature levels of the image. Each element of + the inner list is a tensor of shape (num_anchors, 4). + valid_flag_list (list[list[Tensor]]): Multi level valid flags of + each image. The outer list indicates images, and the inner list + corresponds to feature levels of the image. Each element of + the inner list is a tensor of shape (num_anchors, ) + gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. + img_metas (list[dict]): Meta info of each image. + gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be + ignored. + gt_labels_list (list[Tensor]): Ground truth labels of each box. + label_channels (int): Channel of label. + unmap_outputs (bool): Whether to map outputs back to the original + set of anchors. + + Returns: + tuple: Usually returns a tuple containing learning targets. + + - labels_list (list[Tensor]): Labels of each level. + - label_weights_list (list[Tensor]): Label weights of each \ + level. + - bbox_targets_list (list[Tensor]): BBox targets of each level. + - bbox_weights_list (list[Tensor]): BBox weights of each level. + - num_total_pos (int): Number of positive samples in all \ + images. + - num_total_neg (int): Number of negative samples in all \ + images. + additional_returns: This function enables user-defined returns from + `self._get_targets_single`. These returns are currently refined + to properties at each feature map (i.e. having HxW dimension). + The results will be concatenated after the end + """ + num_imgs = len(img_metas) + assert len(anchor_list) == len(valid_flag_list) == num_imgs + + # anchor number of multi levels + num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] + # concat all level anchors to a single tensor + concat_anchor_list = [] + concat_valid_flag_list = [] + for i in range(num_imgs): + assert len(anchor_list[i]) == len(valid_flag_list[i]) + concat_anchor_list.append(torch.cat(anchor_list[i])) + concat_valid_flag_list.append(torch.cat(valid_flag_list[i])) + + # compute targets for each image + if gt_bboxes_ignore_list is None: + gt_bboxes_ignore_list = [None for _ in range(num_imgs)] + if gt_labels_list is None: + gt_labels_list = [None for _ in range(num_imgs)] + results = multi_apply( + self._get_targets_single, + concat_anchor_list, + concat_valid_flag_list, + gt_bboxes_list, + gt_bboxes_ignore_list, + gt_labels_list, + img_metas, + label_channels=label_channels, + unmap_outputs=unmap_outputs) + (all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, + pos_inds_list, neg_inds_list, sampling_results_list) = results[:7] + rest_results = list(results[7:]) # user-added return values + # no valid anchors + if any([labels is None for labels in all_labels]): + return None + # sampled anchors of all images + num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) + num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) + # split targets to a list w.r.t. multiple levels + labels_list = images_to_levels(all_labels, num_level_anchors) + label_weights_list = images_to_levels(all_label_weights, + num_level_anchors) + bbox_targets_list = images_to_levels(all_bbox_targets, + num_level_anchors) + bbox_weights_list = images_to_levels(all_bbox_weights, + num_level_anchors) + res = (labels_list, label_weights_list, bbox_targets_list, + bbox_weights_list, num_total_pos, num_total_neg) + if return_sampling_results: + res = res + (sampling_results_list, ) + for i, r in enumerate(rest_results): # user-added return values + rest_results[i] = images_to_levels(r, num_level_anchors) + + return res + tuple(rest_results) + + def loss_single(self, cls_score, bbox_pred, anchors, labels, label_weights, + bbox_targets, bbox_weights, num_total_samples): + """Compute loss of a single scale level. + + Args: + cls_score (Tensor): Box scores for each scale level + Has shape (N, num_anchors * num_classes, H, W). + bbox_pred (Tensor): Box energies / deltas for each scale + level with shape (N, num_anchors * 4, H, W). + anchors (Tensor): Box reference for each scale level with shape + (N, num_total_anchors, 4). + labels (Tensor): Labels of each anchors with shape + (N, num_total_anchors). + label_weights (Tensor): Label weights of each anchor with shape + (N, num_total_anchors) + bbox_targets (Tensor): BBox regression targets of each anchor wight + shape (N, num_total_anchors, 4). + bbox_weights (Tensor): BBox regression loss weights of each anchor + with shape (N, num_total_anchors, 4). + num_total_samples (int): If sampling, num total samples equal to + the number of total anchors; Otherwise, it is the number of + positive anchors. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + # classification loss + labels = labels.reshape(-1) + label_weights = label_weights.reshape(-1) + cls_score = cls_score.permute(0, 2, 3, + 1).reshape(-1, self.cls_out_channels) + loss_cls = self.loss_cls( + cls_score, labels, label_weights, avg_factor=num_total_samples) + # regression loss + bbox_targets = bbox_targets.reshape(-1, 4) + bbox_weights = bbox_weights.reshape(-1, 4) + bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) + if self.reg_decoded_bbox: + # When the regression loss (e.g. `IouLoss`, `GIouLoss`) + # is applied directly on the decoded bounding boxes, it + # decodes the already encoded coordinates to absolute format. + anchors = anchors.reshape(-1, 4) + bbox_pred = self.bbox_coder.decode(anchors, bbox_pred) + loss_bbox = self.loss_bbox( + bbox_pred, + bbox_targets, + bbox_weights, + avg_factor=num_total_samples) + return loss_cls, loss_bbox + + @force_fp32(apply_to=('cls_scores', 'bbox_preds')) + def loss(self, + cls_scores, + bbox_preds, + gt_bboxes, + gt_labels, + img_metas, + gt_bboxes_ignore=None): + """Compute losses of the head. + + Args: + cls_scores (list[Tensor]): Box scores for each scale level + Has shape (N, num_anchors * num_classes, H, W) + bbox_preds (list[Tensor]): Box energies / deltas for each scale + level with shape (N, num_anchors * 4, H, W) + gt_bboxes (list[Tensor]): Ground truth bboxes for each image with + shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels (list[Tensor]): class indices corresponding to each box + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + gt_bboxes_ignore (None | list[Tensor]): specify which bounding + boxes can be ignored when computing the loss. Default: None + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + assert len(featmap_sizes) == self.anchor_generator.num_levels + + device = cls_scores[0].device + + anchor_list, valid_flag_list = self.get_anchors( + featmap_sizes, img_metas, device=device) + label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 + cls_reg_targets = self.get_targets( + anchor_list, + valid_flag_list, + gt_bboxes, + img_metas, + gt_bboxes_ignore_list=gt_bboxes_ignore, + gt_labels_list=gt_labels, + label_channels=label_channels) + if cls_reg_targets is None: + return None + (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, + num_total_pos, num_total_neg) = cls_reg_targets + num_total_samples = ( + num_total_pos + num_total_neg if self.sampling else num_total_pos) + + # anchor number of multi levels + num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] + # concat all level anchors and flags to a single tensor + concat_anchor_list = [] + for i in range(len(anchor_list)): + concat_anchor_list.append(torch.cat(anchor_list[i])) + all_anchor_list = images_to_levels(concat_anchor_list, + num_level_anchors) + + losses_cls, losses_bbox = multi_apply( + self.loss_single, + cls_scores, + bbox_preds, + all_anchor_list, + labels_list, + label_weights_list, + bbox_targets_list, + bbox_weights_list, + num_total_samples=num_total_samples) + return dict(loss_cls=losses_cls, loss_bbox=losses_bbox) + + @force_fp32(apply_to=('cls_scores', 'bbox_preds')) + def get_bboxes(self, + cls_scores, + bbox_preds, + img_metas, + cfg=None, + rescale=False, + with_nms=True): + """Transform network output for a batch into bbox predictions. + + Args: + cls_scores (list[Tensor]): Box scores for each level in the + feature pyramid, has shape + (N, num_anchors * num_classes, H, W). + bbox_preds (list[Tensor]): Box energies / deltas for each + level in the feature pyramid, has shape + (N, num_anchors * 4, H, W). + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + cfg (mmcv.Config | None): Test / postprocessing configuration, + if None, test_cfg would be used + rescale (bool): If True, return boxes in original image space. + Default: False. + with_nms (bool): If True, do nms before return boxes. + Default: True. + + Returns: + list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. + The first item is an (n, 5) tensor, where 5 represent + (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1. + The shape of the second tensor in the tuple is (n,), and + each element represents the class label of the corresponding + box. + + Example: + >>> import mmcv + >>> self = AnchorHead( + >>> num_classes=9, + >>> in_channels=1, + >>> anchor_generator=dict( + >>> type='AnchorGenerator', + >>> scales=[8], + >>> ratios=[0.5, 1.0, 2.0], + >>> strides=[4,])) + >>> img_metas = [{'img_shape': (32, 32, 3), 'scale_factor': 1}] + >>> cfg = mmcv.Config(dict( + >>> score_thr=0.00, + >>> nms=dict(type='nms', iou_thr=1.0), + >>> max_per_img=10)) + >>> feat = torch.rand(1, 1, 3, 3) + >>> cls_score, bbox_pred = self.forward_single(feat) + >>> # note the input lists are over different levels, not images + >>> cls_scores, bbox_preds = [cls_score], [bbox_pred] + >>> result_list = self.get_bboxes(cls_scores, bbox_preds, + >>> img_metas, cfg) + >>> det_bboxes, det_labels = result_list[0] + >>> assert len(result_list) == 1 + >>> assert det_bboxes.shape[1] == 5 + >>> assert len(det_bboxes) == len(det_labels) == cfg.max_per_img + """ + assert len(cls_scores) == len(bbox_preds) + num_levels = len(cls_scores) + + device = cls_scores[0].device + featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)] + mlvl_anchors = self.anchor_generator.grid_anchors( + featmap_sizes, device=device) + + mlvl_cls_scores = [cls_scores[i].detach() for i in range(num_levels)] + mlvl_bbox_preds = [bbox_preds[i].detach() for i in range(num_levels)] + + if torch.onnx.is_in_onnx_export(): + assert len( + img_metas + ) == 1, 'Only support one input image while in exporting to ONNX' + img_shapes = img_metas[0]['img_shape_for_onnx'] + else: + img_shapes = [ + img_metas[i]['img_shape'] + for i in range(cls_scores[0].shape[0]) + ] + scale_factors = [ + img_metas[i]['scale_factor'] for i in range(cls_scores[0].shape[0]) + ] + + if with_nms: + # some heads don't support with_nms argument + result_list = self._get_bboxes(mlvl_cls_scores, mlvl_bbox_preds, + mlvl_anchors, img_shapes, + scale_factors, cfg, rescale) + else: + result_list = self._get_bboxes(mlvl_cls_scores, mlvl_bbox_preds, + mlvl_anchors, img_shapes, + scale_factors, cfg, rescale, + with_nms) + return result_list + + def _get_bboxes(self, + mlvl_cls_scores, + mlvl_bbox_preds, + mlvl_anchors, + img_shapes, + scale_factors, + cfg, + rescale=False, + with_nms=True): + """Transform outputs for a batch item into bbox predictions. + + Args: + mlvl_cls_scores (list[Tensor]): Each element in the list is + the scores of bboxes of single level in the feature pyramid, + has shape (N, num_anchors * num_classes, H, W). + mlvl_bbox_preds (list[Tensor]): Each element in the list is the + bboxes predictions of single level in the feature pyramid, + has shape (N, num_anchors * 4, H, W). + mlvl_anchors (list[Tensor]): Each element in the list is + the anchors of single level in feature pyramid, has shape + (num_anchors, 4). + img_shapes (list[tuple[int]]): Each tuple in the list represent + the shape(height, width, 3) of single image in the batch. + scale_factors (list[ndarray]): Scale factor of the batch + image arange as list[(w_scale, h_scale, w_scale, h_scale)]. + cfg (mmcv.Config): Test / postprocessing configuration, + if None, test_cfg would be used. + rescale (bool): If True, return boxes in original image space. + Default: False. + with_nms (bool): If True, do nms before return boxes. + Default: True. + + Returns: + list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. + The first item is an (n, 5) tensor, where 5 represent + (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1. + The shape of the second tensor in the tuple is (n,), and + each element represents the class label of the corresponding + box. + """ + cfg = self.test_cfg if cfg is None else cfg + assert len(mlvl_cls_scores) == len(mlvl_bbox_preds) == len( + mlvl_anchors) + batch_size = mlvl_cls_scores[0].shape[0] + # convert to tensor to keep tracing + nms_pre_tensor = torch.tensor( + cfg.get('nms_pre', -1), + device=mlvl_cls_scores[0].device, + dtype=torch.long) + + mlvl_bboxes = [] + mlvl_scores = [] + for cls_score, bbox_pred, anchors in zip(mlvl_cls_scores, + mlvl_bbox_preds, + mlvl_anchors): + assert cls_score.size()[-2:] == bbox_pred.size()[-2:] + cls_score = cls_score.permute(0, 2, 3, + 1).reshape(batch_size, -1, + self.cls_out_channels) + if self.use_sigmoid_cls: + scores = cls_score.sigmoid() + else: + scores = cls_score.softmax(-1) + bbox_pred = bbox_pred.permute(0, 2, 3, + 1).reshape(batch_size, -1, 4) + anchors = anchors.expand_as(bbox_pred) + # Always keep topk op for dynamic input in onnx + from mmcv.core.export import get_k_for_topk + nms_pre = get_k_for_topk(nms_pre_tensor, bbox_pred.shape[1]) + if nms_pre > 0: + # Get maximum scores for foreground classes. + if self.use_sigmoid_cls: + max_scores, _ = scores.max(-1) + else: + # remind that we set FG labels to [0, num_class-1] + # since mmcv v2.0 + # BG cat_id: num_class + max_scores, _ = scores[..., :-1].max(-1) + + _, topk_inds = max_scores.topk(nms_pre) + batch_inds = torch.arange(batch_size).view( + -1, 1).expand_as(topk_inds) + anchors = anchors[batch_inds, topk_inds, :] + bbox_pred = bbox_pred[batch_inds, topk_inds, :] + scores = scores[batch_inds, topk_inds, :] + + bboxes = self.bbox_coder.decode( + anchors, bbox_pred, max_shape=img_shapes) + mlvl_bboxes.append(bboxes) + mlvl_scores.append(scores) + + batch_mlvl_bboxes = torch.cat(mlvl_bboxes, dim=1) + if rescale: + batch_mlvl_bboxes /= batch_mlvl_bboxes.new_tensor( + scale_factors).unsqueeze(1) + batch_mlvl_scores = torch.cat(mlvl_scores, dim=1) + + # Replace multiclass_nms with ONNX::NonMaxSuppression in deployment + if torch.onnx.is_in_onnx_export() and with_nms: + from mmcv.core.export import add_dummy_nms_for_onnx + # ignore background class + if not self.use_sigmoid_cls: + num_classes = batch_mlvl_scores.shape[2] - 1 + batch_mlvl_scores = batch_mlvl_scores[..., :num_classes] + max_output_boxes_per_class = cfg.nms.get( + 'max_output_boxes_per_class', 200) + iou_threshold = cfg.nms.get('iou_threshold', 0.5) + score_threshold = cfg.score_thr + nms_pre = cfg.get('deploy_nms_pre', -1) + return add_dummy_nms_for_onnx(batch_mlvl_bboxes, batch_mlvl_scores, + max_output_boxes_per_class, + iou_threshold, score_threshold, + nms_pre, cfg.max_per_img) + if self.use_sigmoid_cls: + # Add a dummy background class to the backend when using sigmoid + # remind that we set FG labels to [0, num_class-1] since mmcv v2.0 + # BG cat_id: num_class + padding = batch_mlvl_scores.new_zeros(batch_size, + batch_mlvl_scores.shape[1], + 1) + batch_mlvl_scores = torch.cat([batch_mlvl_scores, padding], dim=-1) + + if with_nms: + det_results = [] + for (mlvl_bboxes, mlvl_scores) in zip(batch_mlvl_bboxes, + batch_mlvl_scores): + det_bbox, det_label = multiclass_nms(mlvl_bboxes, mlvl_scores, + cfg.score_thr, cfg.nms, + cfg.max_per_img) + det_results.append(tuple([det_bbox, det_label])) + else: + det_results = [ + tuple(mlvl_bs) + for mlvl_bs in zip(batch_mlvl_bboxes, batch_mlvl_scores) + ] + return det_results + + def aug_test(self, feats, img_metas, rescale=False): + """Test function with test time augmentation. + + Args: + feats (list[Tensor]): the outer list indicates test-time + augmentations and inner Tensor should have a shape NxCxHxW, + which contains features for all images in the batch. + img_metas (list[list[dict]]): the outer list indicates test-time + augs (multiscale, flip, etc.) and the inner list indicates + images in a batch. each dict has image information. + rescale (bool, optional): Whether to rescale the results. + Defaults to False. + + Returns: + list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. + The first item is ``bboxes`` with shape (n, 5), where + 5 represent (tl_x, tl_y, br_x, br_y, score). + The shape of the second tensor in the tuple is ``labels`` + with shape (n,), The length of list should always be 1. + """ + return self.aug_test_bboxes(feats, img_metas, rescale=rescale) diff --git a/mmcv/models/dense_heads/base_dense_head.py b/mmcv/models/dense_heads/base_dense_head.py new file mode 100644 index 0000000..e2a422d --- /dev/null +++ b/mmcv/models/dense_heads/base_dense_head.py @@ -0,0 +1,78 @@ +from abc import ABCMeta, abstractmethod + +from mmcv.models.backbones import BaseModule + + +class BaseDenseHead(BaseModule, metaclass=ABCMeta): + """Base class for DenseHeads.""" + + def __init__(self, init_cfg=None): + super(BaseDenseHead, self).__init__(init_cfg) + + @abstractmethod + def loss(self, **kwargs): + """Compute losses of the head.""" + pass + + @abstractmethod + def get_bboxes(self, **kwargs): + """Transform network output for a batch into bbox predictions.""" + pass + + def forward_train(self, + x, + img_metas, + gt_bboxes, + gt_labels=None, + gt_bboxes_ignore=None, + proposal_cfg=None, + **kwargs): + """ + Args: + x (list[Tensor]): Features from FPN. + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + gt_bboxes (Tensor): Ground truth bboxes of the image, + shape (num_gts, 4). + gt_labels (Tensor): Ground truth labels of each box, + shape (num_gts,). + gt_bboxes_ignore (Tensor): Ground truth bboxes to be + ignored, shape (num_ignored_gts, 4). + proposal_cfg (mmcv.Config): Test / postprocessing configuration, + if None, test_cfg would be used + + Returns: + tuple: + losses: (dict[str, Tensor]): A dictionary of loss components. + proposal_list (list[Tensor]): Proposals of each image. + """ + outs = self(x) + if gt_labels is None: + loss_inputs = outs + (gt_bboxes, img_metas) + else: + loss_inputs = outs + (gt_bboxes, gt_labels, img_metas) + losses = self.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) + if proposal_cfg is None: + return losses + else: + proposal_list = self.get_bboxes(*outs, img_metas, cfg=proposal_cfg) + return losses, proposal_list + + def simple_test(self, feats, img_metas, rescale=False): + """Test function without test-time augmentation. + + Args: + feats (tuple[torch.Tensor]): Multi-level features from the + upstream network, each is a 4D-tensor. + img_metas (list[dict]): List of image information. + rescale (bool, optional): Whether to rescale the results. + Defaults to False. + + Returns: + list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. + The first item is ``bboxes`` with shape (n, 5), + where 5 represent (tl_x, tl_y, br_x, br_y, score). + The shape of the second tensor in the tuple is ``labels`` + with shape (n,) + """ + return self.simple_test_bboxes(feats, img_metas, rescale=rescale) diff --git a/mmcv/models/dense_heads/bev_head.py b/mmcv/models/dense_heads/bev_head.py new file mode 100644 index 0000000..2227a0c --- /dev/null +++ b/mmcv/models/dense_heads/bev_head.py @@ -0,0 +1,130 @@ +import copy +from re import I +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.models import Linear, bias_init_with_prob +from mmcv.utils import TORCH_VERSION, digit_version + +from mmcv.core import (multi_apply, multi_apply, reduce_mean) +from mmcv.models.utils.transformer import inverse_sigmoid +from mmcv.models import HEADS, BaseModule +from mmcv.models.dense_heads import DETRHead +from mmcv.core.bbox.coder import build_bbox_coder +from traitlets import import_item +from mmcv.core.bbox.util import normalize_bbox +from mmcv.models.bricks import build_positional_encoding +from mmcv.utils import force_fp32 +import numpy as np +import cv2 as cv +from mmcv.models.modules.transformerV2 import PerceptionTransformerBEVEncoder +from mmcv.models.utils import build_transformer +from mmcv.models.builder import build_head +from mmcv.models.dense_heads.free_anchor3d_head import FreeAnchor3DHead + +@HEADS.register_module() +class BEVHead(BaseModule): + def __init__(self, + bev_h, + bev_w, + pc_range, + embed_dims, + transformer, + positional_encoding: dict, + pts_bbox_head_3d: dict, + init_cfg=None, + **kwargs, + ): + super(BEVHead, self).__init__(init_cfg=init_cfg) + self.bev_h = bev_h + self.bev_w = bev_w + self.embed_dims = embed_dims + self.pc_range = pc_range + self.fp16_enabled = False + self.transformer :PerceptionTransformerBEVEncoder = build_transformer(transformer) + self.positional_encoding = build_positional_encoding(positional_encoding) + + pts_bbox_head_3d.update(kwargs) + self.pts_bbox_head_3d = build_head(pts_bbox_head_3d) + self.real_w = self.pc_range[3] - self.pc_range[0] + self.real_h = self.pc_range[4] - self.pc_range[1] + + self._init_layers() + def init_weights(self): + """Initialize weights of the Multi View BEV Encoder""" + self.transformer.init_weights() + + def _init_layers(self): + """Initialize classification branch and regression branch of head.""" + + self.bev_embedding = nn.Embedding(self.bev_h * self.bev_w, self.embed_dims) + + @force_fp32(apply_to=('mlvl_feats', 'pred_bev')) + def forward(self, mlvl_feats, img_metas, prev_bev=None, only_bev=False): + bs, num_cam, _, _, _ = mlvl_feats[0].shape + dtype = mlvl_feats[0].dtype + bev_queries = self.bev_embedding.weight.to(dtype) + + bev_mask = torch.zeros((bs, self.bev_h, self.bev_w), + device=bev_queries.device).to(dtype) + bev_pos = self.positional_encoding(bev_mask).to(dtype) + + bev_embed = self.transformer( + mlvl_feats, + bev_queries, + self.bev_h, + self.bev_w, + grid_length=(self.real_h / self.bev_h, + self.real_w / self.bev_w), + bev_pos=bev_pos, + img_metas=img_metas, + prev_bev=prev_bev, + ) + + if only_bev: + return bev_embed + + bev_feature = bev_embed.permute(0, 2, 1).reshape(bs, self.embed_dims, self.bev_h, self.bev_w) + ret = {} + ret['pred'] = self.pts_bbox_head_3d([bev_feature,]) + if not self.training: + ret['bev_embed'] = bev_embed + return ret + + + @force_fp32(apply_to=('ret')) + def loss(self, + gt_bboxes_list, + gt_labels_list, + ret, + gt_bboxes_ignore=None, + img_metas=None): + assert gt_bboxes_ignore is None + return self.pts_bbox_head_3d.loss(gt_bboxes_list, gt_labels_list, ret['pred'], gt_bboxes_ignore=gt_bboxes_ignore, img_metas=img_metas) + + @force_fp32(apply_to=('ret')) + def get_bboxes(self, ret, img_metas, rescale=False): + return self.pts_bbox_head_3d.get_bboxes(ret['pred'], img_metas) + +@HEADS.register_module() +class FreeAnchor3DHeadV2(FreeAnchor3DHead): + @force_fp32(apply_to=('pred')) + def loss(self, + gt_bboxes_list, + gt_labels_list, + pred, + gt_bboxes_ignore=None, + img_metas=None): + cls_scores, bbox_preds, dir_cls_preds = pred + + return super().loss(cls_scores, bbox_preds, dir_cls_preds, gt_bboxes_list, gt_labels_list, img_metas, gt_bboxes_ignore) + @force_fp32(apply_to=('pred')) + def get_bboxes(self, pred, img_metas, rescale=False): + cls_scores, bbox_preds, dir_cls_preds = pred + return super().get_bboxes( + cls_scores, + bbox_preds, + dir_cls_preds, + img_metas, + cfg=None, + rescale=rescale) \ No newline at end of file diff --git a/mmcv/models/dense_heads/bevformer_head.py b/mmcv/models/dense_heads/bevformer_head.py new file mode 100644 index 0000000..c1852db --- /dev/null +++ b/mmcv/models/dense_heads/bevformer_head.py @@ -0,0 +1,686 @@ +import copy +import torch +import torch.nn as nn + +from mmcv.models.bricks import Linear +from mmcv.models.utils.weight_init import bias_init_with_prob + +from mmcv.utils import TORCH_VERSION, digit_version +from mmcv.core.utils.misc import multi_apply +from mmcv.core.utils.dist_utils import reduce_mean +from mmcv.models.utils.transformer import inverse_sigmoid +from mmcv.models import HEADS +from mmcv.models.dense_heads import DETRHead +from mmcv.core.bbox.coder import build_bbox_coder +from mmcv.core.bbox.util import normalize_bbox +from mmcv.utils import force_fp32, auto_fp16 + + +@HEADS.register_module() +class BEVFormerHead(DETRHead): + """Head of Detr3D. + Args: + with_box_refine (bool): Whether to refine the reference points + in the decoder. Defaults to False. + as_two_stage (bool) : Whether to generate the proposal from + the outputs of encoder. + transformer (obj:`ConfigDict`): ConfigDict is used for building + the Encoder and Decoder. + bev_h, bev_w (int): spatial shape of BEV queries. + """ + + def __init__(self, + *args, + with_box_refine=False, + as_two_stage=False, + transformer=None, + bbox_coder=None, + num_cls_fcs=2, + code_weights=None, + bev_h=30, + bev_w=30, + **kwargs): + + self.bev_h = bev_h + self.bev_w = bev_w + self.fp16_enabled = False + + self.with_box_refine = with_box_refine + self.as_two_stage = as_two_stage + if self.as_two_stage: + transformer['as_two_stage'] = self.as_two_stage + if 'code_size' in kwargs: + self.code_size = kwargs['code_size'] + else: + self.code_size = 10 + if code_weights is not None: + self.code_weights = code_weights + else: + self.code_weights = [1.0, 1.0, 1.0, + 1.0, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2] + + self.bbox_coder = build_bbox_coder(bbox_coder) + self.pc_range = self.bbox_coder.pc_range + self.real_w = self.pc_range[3] - self.pc_range[0] + self.real_h = self.pc_range[4] - self.pc_range[1] + self.num_cls_fcs = num_cls_fcs - 1 + super(BEVFormerHead, self).__init__( + *args, transformer=transformer, **kwargs) + self.code_weights = nn.Parameter(torch.tensor( + self.code_weights, requires_grad=False), requires_grad=False) + + def _init_layers(self): + """Initialize classification branch and regression branch of head.""" + cls_branch = [] + for _ in range(self.num_reg_fcs): + cls_branch.append(Linear(self.embed_dims, self.embed_dims)) + cls_branch.append(nn.LayerNorm(self.embed_dims)) + cls_branch.append(nn.ReLU(inplace=True)) + cls_branch.append(Linear(self.embed_dims, self.cls_out_channels)) + fc_cls = nn.Sequential(*cls_branch) + + reg_branch = [] + for _ in range(self.num_reg_fcs): + reg_branch.append(Linear(self.embed_dims, self.embed_dims)) + reg_branch.append(nn.ReLU()) + reg_branch.append(Linear(self.embed_dims, self.code_size)) + reg_branch = nn.Sequential(*reg_branch) + + def _get_clones(module, N): + return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) + + # last reg_branch is used to generate proposal from + # encode feature map when as_two_stage is True. + num_pred = (self.transformer.decoder.num_layers + 1) if \ + self.as_two_stage else self.transformer.decoder.num_layers + + if self.with_box_refine: + self.cls_branches = _get_clones(fc_cls, num_pred) + self.reg_branches = _get_clones(reg_branch, num_pred) + else: + self.cls_branches = nn.ModuleList( + [fc_cls for _ in range(num_pred)]) + self.reg_branches = nn.ModuleList( + [reg_branch for _ in range(num_pred)]) + + if not self.as_two_stage: + self.bev_embedding = nn.Embedding( + self.bev_h * self.bev_w, self.embed_dims) + self.query_embedding = nn.Embedding(self.num_query, + self.embed_dims * 2) + + def init_weights(self): + """Initialize weights of the DeformDETR head.""" + self.transformer.init_weights() + if self.loss_cls.use_sigmoid: + bias_init = bias_init_with_prob(0.01) + for m in self.cls_branches: + nn.init.constant_(m[-1].bias, bias_init) + + @auto_fp16(apply_to=('mlvl_feats')) + def forward(self, mlvl_feats, img_metas, prev_bev=None, only_bev=False): + """Forward function. + Args: + mlvl_feats (tuple[Tensor]): Features from the upstream + network, each is a 5D-tensor with shape + (B, N, C, H, W). + prev_bev: previous bev featues + only_bev: only compute BEV features with encoder. + Returns: + all_cls_scores (Tensor): Outputs from the classification head, \ + shape [nb_dec, bs, num_query, cls_out_channels]. Note \ + cls_out_channels should includes background. + all_bbox_preds (Tensor): Sigmoid outputs from the regression \ + head with normalized coordinate format (cx, cy, w, l, cz, h, theta, vx, vy). \ + Shape [nb_dec, bs, num_query, 9]. + """ + bs, num_cam, _, _, _ = mlvl_feats[0].shape + dtype = mlvl_feats[0].dtype + object_query_embeds = self.query_embedding.weight.to(dtype) + bev_queries = self.bev_embedding.weight.to(dtype) + + bev_mask = torch.zeros((bs, self.bev_h, self.bev_w), + device=bev_queries.device).to(dtype) + bev_pos = self.positional_encoding(bev_mask).to(dtype) + + if only_bev: # only use encoder to obtain BEV features, TODO: refine the workaround + return self.transformer.get_bev_features( + mlvl_feats, + bev_queries, + self.bev_h, + self.bev_w, + grid_length=(self.real_h / self.bev_h, + self.real_w / self.bev_w), + bev_pos=bev_pos, + img_metas=img_metas, + prev_bev=prev_bev, + ) + else: + outputs = self.transformer( + mlvl_feats, + bev_queries, + object_query_embeds, + self.bev_h, + self.bev_w, + grid_length=(self.real_h / self.bev_h, + self.real_w / self.bev_w), + bev_pos=bev_pos, + reg_branches=self.reg_branches if self.with_box_refine else None, # noqa:E501 + cls_branches=self.cls_branches if self.as_two_stage else None, + img_metas=img_metas, + prev_bev=prev_bev + ) + + bev_embed, hs, init_reference, inter_references = outputs + hs = hs.permute(0, 2, 1, 3) + outputs_classes = [] + outputs_coords = [] + for lvl in range(hs.shape[0]): + if lvl == 0: + reference = init_reference + else: + reference = inter_references[lvl - 1] + reference = inverse_sigmoid(reference) + outputs_class = self.cls_branches[lvl](hs[lvl]) + tmp = self.reg_branches[lvl](hs[lvl]) + + # TODO: check the shape of reference + assert reference.shape[-1] == 3 + tmp[..., 0:2] += reference[..., 0:2] + tmp[..., 0:2] = tmp[..., 0:2].sigmoid() + tmp[..., 4:5] += reference[..., 2:3] + tmp[..., 4:5] = tmp[..., 4:5].sigmoid() + tmp[..., 0:1] = (tmp[..., 0:1] * (self.pc_range[3] - + self.pc_range[0]) + self.pc_range[0]) + tmp[..., 1:2] = (tmp[..., 1:2] * (self.pc_range[4] - + self.pc_range[1]) + self.pc_range[1]) + tmp[..., 4:5] = (tmp[..., 4:5] * (self.pc_range[5] - + self.pc_range[2]) + self.pc_range[2]) + + # TODO: check if using sigmoid + outputs_coord = tmp + outputs_classes.append(outputs_class) + outputs_coords.append(outputs_coord) + + outputs_classes = torch.stack(outputs_classes) + outputs_coords = torch.stack(outputs_coords) + + outs = { + 'bev_embed': bev_embed, + 'all_cls_scores': outputs_classes, + 'all_bbox_preds': outputs_coords, + 'enc_cls_scores': None, + 'enc_bbox_preds': None, + } + + return outs + + def _get_target_single(self, + cls_score, + bbox_pred, + gt_labels, + gt_bboxes, + gt_bboxes_ignore=None): + """"Compute regression and classification targets for one image. + Outputs from a single decoder layer of a single feature level are used. + Args: + cls_score (Tensor): Box score logits from a single decoder layer + for one image. Shape [num_query, cls_out_channels]. + bbox_pred (Tensor): Sigmoid outputs from a single decoder layer + for one image, with normalized coordinate (cx, cy, w, h) and + shape [num_query, 4]. + gt_bboxes (Tensor): Ground truth bboxes for one image with + shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels (Tensor): Ground truth class indices for one image + with shape (num_gts, ). + gt_bboxes_ignore (Tensor, optional): Bounding boxes + which can be ignored. Default None. + Returns: + tuple[Tensor]: a tuple containing the following for one image. + - labels (Tensor): Labels of each image. + - label_weights (Tensor]): Label weights of each image. + - bbox_targets (Tensor): BBox targets of each image. + - bbox_weights (Tensor): BBox weights of each image. + - pos_inds (Tensor): Sampled positive indices for each image. + - neg_inds (Tensor): Sampled negative indices for each image. + """ + + num_bboxes = bbox_pred.size(0) + # assigner and sampler + gt_c = gt_bboxes.shape[-1] + + assign_result = self.assigner.assign(bbox_pred, cls_score, gt_bboxes, + gt_labels, gt_bboxes_ignore) + + sampling_result = self.sampler.sample(assign_result, bbox_pred, + gt_bboxes) + pos_inds = sampling_result.pos_inds + neg_inds = sampling_result.neg_inds + + # label targets + labels = gt_bboxes.new_full((num_bboxes,), + self.num_classes, + dtype=torch.long) + labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds] + label_weights = gt_bboxes.new_ones(num_bboxes) + + # bbox targets + bbox_targets = torch.zeros_like(bbox_pred)[..., :gt_c] + bbox_weights = torch.zeros_like(bbox_pred) + bbox_weights[pos_inds] = 1.0 + + # DETR + bbox_targets[pos_inds] = sampling_result.pos_gt_bboxes + return (labels, label_weights, bbox_targets, bbox_weights, + pos_inds, neg_inds) + + def get_targets(self, + cls_scores_list, + bbox_preds_list, + gt_bboxes_list, + gt_labels_list, + gt_bboxes_ignore_list=None): + """"Compute regression and classification targets for a batch image. + Outputs from a single decoder layer of a single feature level are used. + Args: + cls_scores_list (list[Tensor]): Box score logits from a single + decoder layer for each image with shape [num_query, + cls_out_channels]. + bbox_preds_list (list[Tensor]): Sigmoid outputs from a single + decoder layer for each image, with normalized coordinate + (cx, cy, w, h) and shape [num_query, 4]. + gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image + with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels_list (list[Tensor]): Ground truth class indices for each + image with shape (num_gts, ). + gt_bboxes_ignore_list (list[Tensor], optional): Bounding + boxes which can be ignored for each image. Default None. + Returns: + tuple: a tuple containing the following targets. + - labels_list (list[Tensor]): Labels for all images. + - label_weights_list (list[Tensor]): Label weights for all \ + images. + - bbox_targets_list (list[Tensor]): BBox targets for all \ + images. + - bbox_weights_list (list[Tensor]): BBox weights for all \ + images. + - num_total_pos (int): Number of positive samples in all \ + images. + - num_total_neg (int): Number of negative samples in all \ + images. + """ + assert gt_bboxes_ignore_list is None, \ + 'Only supports for gt_bboxes_ignore setting to None.' + num_imgs = len(cls_scores_list) + gt_bboxes_ignore_list = [ + gt_bboxes_ignore_list for _ in range(num_imgs) + ] + + (labels_list, label_weights_list, bbox_targets_list, + bbox_weights_list, pos_inds_list, neg_inds_list) = multi_apply( + self._get_target_single, cls_scores_list, bbox_preds_list, + gt_labels_list, gt_bboxes_list, gt_bboxes_ignore_list) + num_total_pos = sum((inds.numel() for inds in pos_inds_list)) + num_total_neg = sum((inds.numel() for inds in neg_inds_list)) + return (labels_list, label_weights_list, bbox_targets_list, + bbox_weights_list, num_total_pos, num_total_neg) + + def loss_single(self, + cls_scores, + bbox_preds, + gt_bboxes_list, + gt_labels_list, + gt_bboxes_ignore_list=None): + """"Loss function for outputs from a single decoder layer of a single + feature level. + Args: + cls_scores (Tensor): Box score logits from a single decoder layer + for all images. Shape [bs, num_query, cls_out_channels]. + bbox_preds (Tensor): Sigmoid outputs from a single decoder layer + for all images, with normalized coordinate (cx, cy, w, h) and + shape [bs, num_query, 4]. + gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image + with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels_list (list[Tensor]): Ground truth class indices for each + image with shape (num_gts, ). + gt_bboxes_ignore_list (list[Tensor], optional): Bounding + boxes which can be ignored for each image. Default None. + Returns: + dict[str, Tensor]: A dictionary of loss components for outputs from + a single decoder layer. + """ + num_imgs = cls_scores.size(0) + cls_scores_list = [cls_scores[i] for i in range(num_imgs)] + bbox_preds_list = [bbox_preds[i] for i in range(num_imgs)] + cls_reg_targets = self.get_targets(cls_scores_list, bbox_preds_list, + gt_bboxes_list, gt_labels_list, + gt_bboxes_ignore_list) + (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, + num_total_pos, num_total_neg) = cls_reg_targets + labels = torch.cat(labels_list, 0) + label_weights = torch.cat(label_weights_list, 0) + bbox_targets = torch.cat(bbox_targets_list, 0) + bbox_weights = torch.cat(bbox_weights_list, 0) + + # classification loss + cls_scores = cls_scores.reshape(-1, self.cls_out_channels) + # construct weighted avg_factor to match with the official DETR repo + cls_avg_factor = num_total_pos * 1.0 + \ + num_total_neg * self.bg_cls_weight + if self.sync_cls_avg_factor: + cls_avg_factor = reduce_mean( + cls_scores.new_tensor([cls_avg_factor])) + + cls_avg_factor = max(cls_avg_factor, 1) + loss_cls = self.loss_cls( + cls_scores, labels, label_weights, avg_factor=cls_avg_factor) + + # Compute the average number of gt boxes accross all gpus, for + # normalization purposes + num_total_pos = loss_cls.new_tensor([num_total_pos]) + num_total_pos = torch.clamp(reduce_mean(num_total_pos), min=1).item() + + # regression L1 loss + bbox_preds = bbox_preds.reshape(-1, bbox_preds.size(-1)) + normalized_bbox_targets = normalize_bbox(bbox_targets, self.pc_range) + isnotnan = torch.isfinite(normalized_bbox_targets).all(dim=-1) + bbox_weights = bbox_weights * self.code_weights + + loss_bbox = self.loss_bbox( + bbox_preds[isnotnan, :10], normalized_bbox_targets[isnotnan, + :10], bbox_weights[isnotnan, :10], + avg_factor=num_total_pos) + if digit_version(TORCH_VERSION) >= digit_version('1.8'): + loss_cls = torch.nan_to_num(loss_cls) + loss_bbox = torch.nan_to_num(loss_bbox) + return loss_cls, loss_bbox + + @force_fp32(apply_to=('preds_dicts')) + def loss(self, + gt_bboxes_list, + gt_labels_list, + preds_dicts, + gt_bboxes_ignore=None, + img_metas=None): + """"Loss function. + Args: + + gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image + with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels_list (list[Tensor]): Ground truth class indices for each + image with shape (num_gts, ). + preds_dicts: + all_cls_scores (Tensor): Classification score of all + decoder layers, has shape + [nb_dec, bs, num_query, cls_out_channels]. + all_bbox_preds (Tensor): Sigmoid regression + outputs of all decode layers. Each is a 4D-tensor with + normalized coordinate format (cx, cy, w, h) and shape + [nb_dec, bs, num_query, 4]. + enc_cls_scores (Tensor): Classification scores of + points on encode feature map , has shape + (N, h*w, num_classes). Only be passed when as_two_stage is + True, otherwise is None. + enc_bbox_preds (Tensor): Regression results of each points + on the encode feature map, has shape (N, h*w, 4). Only be + passed when as_two_stage is True, otherwise is None. + gt_bboxes_ignore (list[Tensor], optional): Bounding boxes + which can be ignored for each image. Default None. + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + assert gt_bboxes_ignore is None, \ + f'{self.__class__.__name__} only supports ' \ + f'for gt_bboxes_ignore setting to None.' + + all_cls_scores = preds_dicts['all_cls_scores'] + all_bbox_preds = preds_dicts['all_bbox_preds'] + enc_cls_scores = preds_dicts['enc_cls_scores'] + enc_bbox_preds = preds_dicts['enc_bbox_preds'] + + num_dec_layers = len(all_cls_scores) + device = gt_labels_list[0].device + + gt_bboxes_list = [torch.cat( + (gt_bboxes.gravity_center, gt_bboxes.tensor[:, 3:]), + dim=1).to(device) for gt_bboxes in gt_bboxes_list] + + all_gt_bboxes_list = [gt_bboxes_list for _ in range(num_dec_layers)] + all_gt_labels_list = [gt_labels_list for _ in range(num_dec_layers)] + all_gt_bboxes_ignore_list = [ + gt_bboxes_ignore for _ in range(num_dec_layers) + ] + + losses_cls, losses_bbox = multi_apply( + self.loss_single, all_cls_scores, all_bbox_preds, + all_gt_bboxes_list, all_gt_labels_list, + all_gt_bboxes_ignore_list) + + loss_dict = dict() + # loss of proposal generated from encode feature map. + if enc_cls_scores is not None: + binary_labels_list = [ + torch.zeros_like(gt_labels_list[i]) + for i in range(len(all_gt_labels_list)) + ] + enc_loss_cls, enc_losses_bbox = \ + self.loss_single(enc_cls_scores, enc_bbox_preds, + gt_bboxes_list, binary_labels_list, gt_bboxes_ignore) + loss_dict['enc_loss_cls'] = enc_loss_cls + loss_dict['enc_loss_bbox'] = enc_losses_bbox + + # loss from the last decoder layer + loss_dict['loss_cls'] = losses_cls[-1] + loss_dict['loss_bbox'] = losses_bbox[-1] + + # loss from other decoder layers + num_dec_layer = 0 + for loss_cls_i, loss_bbox_i in zip(losses_cls[:-1], + losses_bbox[:-1]): + loss_dict[f'd{num_dec_layer}.loss_cls'] = loss_cls_i + loss_dict[f'd{num_dec_layer}.loss_bbox'] = loss_bbox_i + num_dec_layer += 1 + return loss_dict + + @force_fp32(apply_to=('preds_dicts')) + def get_bboxes(self, preds_dicts, img_metas, rescale=False): + """Generate bboxes from bbox head predictions. + Args: + preds_dicts (tuple[list[dict]]): Prediction results. + img_metas (list[dict]): Point cloud and image's meta info. + Returns: + list[dict]: Decoded bbox, scores and labels after nms. + """ + + preds_dicts = self.bbox_coder.decode(preds_dicts) + + num_samples = len(preds_dicts) + ret_list = [] + for i in range(num_samples): + preds = preds_dicts[i] + bboxes = preds['bboxes'] + + bboxes[:, 2] = bboxes[:, 2] - bboxes[:, 5] * 0.5 + + code_size = bboxes.shape[-1] + bboxes = img_metas[i]['box_type_3d'](bboxes, code_size) + scores = preds['scores'] + labels = preds['labels'] + + ret_list.append([bboxes, scores, labels]) + + return ret_list + + +@HEADS.register_module() +class BEVFormerHead_GroupDETR(BEVFormerHead): + def __init__(self, + *args, + group_detr=1, + **kwargs): + self.group_detr = group_detr + assert 'num_query' in kwargs + kwargs['num_query'] = group_detr * kwargs['num_query'] + super().__init__(*args, **kwargs) + + def forward(self, mlvl_feats, img_metas, prev_bev=None, only_bev=False): + bs, num_cam, _, _, _ = mlvl_feats[0].shape + dtype = mlvl_feats[0].dtype + object_query_embeds = self.query_embedding.weight.to(dtype) + if not self.training: # NOTE: Only difference to bevformer head + object_query_embeds = object_query_embeds[:self.num_query // self.group_detr] + bev_queries = self.bev_embedding.weight.to(dtype) + + bev_mask = torch.zeros((bs, self.bev_h, self.bev_w), + device=bev_queries.device).to(dtype) + bev_pos = self.positional_encoding(bev_mask).to(dtype) + + if only_bev: + return self.transformer.get_bev_features( + mlvl_feats, + bev_queries, + self.bev_h, + self.bev_w, + grid_length=(self.real_h / self.bev_h, + self.real_w / self.bev_w), + bev_pos=bev_pos, + img_metas=img_metas, + prev_bev=prev_bev, + ) + else: + outputs = self.transformer( + mlvl_feats, + bev_queries, + object_query_embeds, + self.bev_h, + self.bev_w, + grid_length=(self.real_h / self.bev_h, + self.real_w / self.bev_w), + bev_pos=bev_pos, + reg_branches=self.reg_branches if self.with_box_refine else None, # noqa:E501 + cls_branches=self.cls_branches if self.as_two_stage else None, + img_metas=img_metas, + prev_bev=prev_bev + ) + + bev_embed, hs, init_reference, inter_references = outputs + hs = hs.permute(0, 2, 1, 3) + outputs_classes = [] + outputs_coords = [] + for lvl in range(hs.shape[0]): + if lvl == 0: + reference = init_reference + else: + reference = inter_references[lvl - 1] + reference = inverse_sigmoid(reference) + outputs_class = self.cls_branches[lvl](hs[lvl]) + tmp = self.reg_branches[lvl](hs[lvl]) + assert reference.shape[-1] == 3 + tmp[..., 0:2] += reference[..., 0:2] + tmp[..., 0:2] = tmp[..., 0:2].sigmoid() + tmp[..., 4:5] += reference[..., 2:3] + tmp[..., 4:5] = tmp[..., 4:5].sigmoid() + tmp[..., 0:1] = (tmp[..., 0:1] * (self.pc_range[3] - + self.pc_range[0]) + self.pc_range[0]) + tmp[..., 1:2] = (tmp[..., 1:2] * (self.pc_range[4] - + self.pc_range[1]) + self.pc_range[1]) + tmp[..., 4:5] = (tmp[..., 4:5] * (self.pc_range[5] - + self.pc_range[2]) + self.pc_range[2]) + outputs_coord = tmp + outputs_classes.append(outputs_class) + outputs_coords.append(outputs_coord) + + outputs_classes = torch.stack(outputs_classes) + outputs_coords = torch.stack(outputs_coords) + + outs = { + 'bev_embed': bev_embed, + 'all_cls_scores': outputs_classes, + 'all_bbox_preds': outputs_coords, + 'enc_cls_scores': None, + 'enc_bbox_preds': None, + } + + return outs + + def loss(self, + gt_bboxes_list, + gt_labels_list, + preds_dicts, + gt_bboxes_ignore=None, + img_metas=None): + """"Loss function. + Args: + + gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image + with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels_list (list[Tensor]): Ground truth class indices for each + image with shape (num_gts, ). + preds_dicts: + all_cls_scores (Tensor): Classification score of all + decoder layers, has shape + [nb_dec, bs, num_query, cls_out_channels]. + all_bbox_preds (Tensor): Sigmoid regression + outputs of all decode layers. Each is a 4D-tensor with + normalized coordinate format (cx, cy, w, h) and shape + [nb_dec, bs, num_query, 4]. + enc_cls_scores (Tensor): Classification scores of + points on encode feature map , has shape + (N, h*w, num_classes). Only be passed when as_two_stage is + True, otherwise is None. + enc_bbox_preds (Tensor): Regression results of each points + on the encode feature map, has shape (N, h*w, 4). Only be + passed when as_two_stage is True, otherwise is None. + gt_bboxes_ignore (list[Tensor], optional): Bounding boxes + which can be ignored for each image. Default None. + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + assert gt_bboxes_ignore is None, \ + f'{self.__class__.__name__} only supports ' \ + f'for gt_bboxes_ignore setting to None.' + + all_cls_scores = preds_dicts['all_cls_scores'] + all_bbox_preds = preds_dicts['all_bbox_preds'] + enc_cls_scores = preds_dicts['enc_cls_scores'] + enc_bbox_preds = preds_dicts['enc_bbox_preds'] + assert enc_cls_scores is None and enc_bbox_preds is None + + num_dec_layers = len(all_cls_scores) + device = gt_labels_list[0].device + + gt_bboxes_list = [torch.cat( + (gt_bboxes.gravity_center, gt_bboxes.tensor[:, 3:]), + dim=1).to(device) for gt_bboxes in gt_bboxes_list] + + all_gt_bboxes_list = [gt_bboxes_list for _ in range(num_dec_layers)] + all_gt_labels_list = [gt_labels_list for _ in range(num_dec_layers)] + all_gt_bboxes_ignore_list = [ + gt_bboxes_ignore for _ in range(num_dec_layers) + ] + + loss_dict = dict() + loss_dict['loss_cls'] = 0 + loss_dict['loss_bbox'] = 0 + for num_dec_layer in range(all_cls_scores.shape[0] - 1): + loss_dict[f'd{num_dec_layer}.loss_cls'] = 0 + loss_dict[f'd{num_dec_layer}.loss_bbox'] = 0 + num_query_per_group = self.num_query // self.group_detr + for group_index in range(self.group_detr): + group_query_start = group_index * num_query_per_group + group_query_end = (group_index+1) * num_query_per_group + group_cls_scores = all_cls_scores[:, :,group_query_start:group_query_end, :] + group_bbox_preds = all_bbox_preds[:, :,group_query_start:group_query_end, :] + losses_cls, losses_bbox = multi_apply( + self.loss_single, group_cls_scores, group_bbox_preds, + all_gt_bboxes_list, all_gt_labels_list, + all_gt_bboxes_ignore_list) + loss_dict['loss_cls'] += losses_cls[-1] / self.group_detr + loss_dict['loss_bbox'] += losses_bbox[-1] / self.group_detr + # loss from other decoder layers + num_dec_layer = 0 + for loss_cls_i, loss_bbox_i in zip(losses_cls[:-1], losses_bbox[:-1]): + loss_dict[f'd{num_dec_layer}.loss_cls'] += loss_cls_i / self.group_detr + loss_dict[f'd{num_dec_layer}.loss_bbox'] += loss_bbox_i / self.group_detr + num_dec_layer += 1 + return loss_dict \ No newline at end of file diff --git a/mmcv/models/dense_heads/dense_test_mixins.py b/mmcv/models/dense_heads/dense_test_mixins.py new file mode 100644 index 0000000..a548e40 --- /dev/null +++ b/mmcv/models/dense_heads/dense_test_mixins.py @@ -0,0 +1,202 @@ +import sys +from inspect import signature + +import torch + +from mmcv.core.post_processing.merge_augs import merge_aug_proposals +from mmcv.core.post_processing.bbox_nms import multiclass_nms +# from ...core.post_processing import merge_aug_proposals, multiclass_nms + +if sys.version_info >= (3, 7): + from mmcv.utils.contextmanagers import completed + + +class BBoxTestMixin(object): + """Mixin class for testing det bboxes via DenseHead.""" + + def simple_test_bboxes(self, feats, img_metas, rescale=False): + """Test det bboxes without test-time augmentation, can be applied in + DenseHead except for ``RPNHead`` and its variants, e.g., ``GARPNHead``, + etc. + + Args: + feats (tuple[torch.Tensor]): Multi-level features from the + upstream network, each is a 4D-tensor. + img_metas (list[dict]): List of image information. + rescale (bool, optional): Whether to rescale the results. + Defaults to False. + + Returns: + list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. + The first item is ``bboxes`` with shape (n, 5), + where 5 represent (tl_x, tl_y, br_x, br_y, score). + The shape of the second tensor in the tuple is ``labels`` + with shape (n,) + """ + outs = self.forward(feats) + results_list = self.get_bboxes(*outs, img_metas, rescale=rescale) + return results_list + + def aug_test_bboxes(self, feats, img_metas, rescale=False): + """Test det bboxes with test time augmentation, can be applied in + DenseHead except for ``RPNHead`` and its variants, e.g., ``GARPNHead``, + etc. + + Args: + feats (list[Tensor]): the outer list indicates test-time + augmentations and inner Tensor should have a shape NxCxHxW, + which contains features for all images in the batch. + img_metas (list[list[dict]]): the outer list indicates test-time + augs (multiscale, flip, etc.) and the inner list indicates + images in a batch. each dict has image information. + rescale (bool, optional): Whether to rescale the results. + Defaults to False. + + Returns: + list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. + The first item is ``bboxes`` with shape (n, 5), + where 5 represent (tl_x, tl_y, br_x, br_y, score). + The shape of the second tensor in the tuple is ``labels`` + with shape (n,). The length of list should always be 1. + """ + # check with_nms argument + gb_sig = signature(self.get_bboxes) + gb_args = [p.name for p in gb_sig.parameters.values()] + if hasattr(self, '_get_bboxes'): + gbs_sig = signature(self._get_bboxes) + else: + gbs_sig = signature(self._get_bboxes_single) + gbs_args = [p.name for p in gbs_sig.parameters.values()] + assert ('with_nms' in gb_args) and ('with_nms' in gbs_args), \ + f'{self.__class__.__name__}' \ + ' does not support test-time augmentation' + + aug_bboxes = [] + aug_scores = [] + aug_factors = [] # score_factors for NMS + for x, img_meta in zip(feats, img_metas): + # only one image in the batch + outs = self.forward(x) + bbox_inputs = outs + (img_meta, self.test_cfg, False, False) + bbox_outputs = self.get_bboxes(*bbox_inputs)[0] + aug_bboxes.append(bbox_outputs[0]) + aug_scores.append(bbox_outputs[1]) + # bbox_outputs of some detectors (e.g., ATSS, FCOS, YOLOv3) + # contains additional element to adjust scores before NMS + if len(bbox_outputs) >= 3: + aug_factors.append(bbox_outputs[2]) + + # after merging, bboxes will be rescaled to the original image size + merged_bboxes, merged_scores = self.merge_aug_bboxes( + aug_bboxes, aug_scores, img_metas) + merged_factors = torch.cat(aug_factors, dim=0) if aug_factors else None + det_bboxes, det_labels = multiclass_nms( + merged_bboxes, + merged_scores, + self.test_cfg.score_thr, + self.test_cfg.nms, + self.test_cfg.max_per_img, + score_factors=merged_factors) + + if rescale: + _det_bboxes = det_bboxes + else: + _det_bboxes = det_bboxes.clone() + _det_bboxes[:, :4] *= det_bboxes.new_tensor( + img_metas[0][0]['scale_factor']) + + return [ + (_det_bboxes, det_labels), + ] + + def simple_test_rpn(self, x, img_metas): + """Test without augmentation, only for ``RPNHead`` and its variants, + e.g., ``GARPNHead``, etc. + + Args: + x (tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + img_metas (list[dict]): Meta info of each image. + + Returns: + list[Tensor]: Proposals of each image, each item has shape (n, 5), + where 5 represent (tl_x, tl_y, br_x, br_y, score). + """ + rpn_outs = self(x) + proposal_list = self.get_bboxes(*rpn_outs, img_metas) + return proposal_list + + def aug_test_rpn(self, feats, img_metas): + """Test with augmentation for only for ``RPNHead`` and its variants, + e.g., ``GARPNHead``, etc. + + Args: + feats (tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + img_metas (list[dict]): Meta info of each image. + + Returns: + list[Tensor]: Proposals of each image, each item has shape (n, 5), + where 5 represent (tl_x, tl_y, br_x, br_y, score). + """ + samples_per_gpu = len(img_metas[0]) + aug_proposals = [[] for _ in range(samples_per_gpu)] + for x, img_meta in zip(feats, img_metas): + proposal_list = self.simple_test_rpn(x, img_meta) + for i, proposals in enumerate(proposal_list): + aug_proposals[i].append(proposals) + # reorganize the order of 'img_metas' to match the dimensions + # of 'aug_proposals' + aug_img_metas = [] + for i in range(samples_per_gpu): + aug_img_meta = [] + for j in range(len(img_metas)): + aug_img_meta.append(img_metas[j][i]) + aug_img_metas.append(aug_img_meta) + # after merging, proposals will be rescaled to the original image size + merged_proposals = [ + merge_aug_proposals(proposals, aug_img_meta, self.test_cfg) + for proposals, aug_img_meta in zip(aug_proposals, aug_img_metas) + ] + return merged_proposals + + if sys.version_info >= (3, 7): + + async def async_simple_test_rpn(self, x, img_metas): + sleep_interval = self.test_cfg.pop('async_sleep_interval', 0.025) + async with completed( + __name__, 'rpn_head_forward', + sleep_interval=sleep_interval): + rpn_outs = self(x) + + proposal_list = self.get_bboxes(*rpn_outs, img_metas) + return proposal_list + + def merge_aug_bboxes(self, aug_bboxes, aug_scores, img_metas): + """Merge augmented detection bboxes and scores. + + Args: + aug_bboxes (list[Tensor]): shape (n, 4*#class) + aug_scores (list[Tensor] or None): shape (n, #class) + img_shapes (list[Tensor]): shape (3, ). + + Returns: + tuple[Tensor]: ``bboxes`` with shape (n,4), where + 4 represent (tl_x, tl_y, br_x, br_y) + and ``scores`` with shape (n,). + """ + recovered_bboxes = [] + for bboxes, img_info in zip(aug_bboxes, img_metas): + img_shape = img_info[0]['img_shape'] + scale_factor = img_info[0]['scale_factor'] + flip = img_info[0]['flip'] + flip_direction = img_info[0]['flip_direction'] + bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip, + flip_direction) + recovered_bboxes.append(bboxes) + bboxes = torch.cat(recovered_bboxes, dim=0) + if aug_scores is None: + return bboxes + else: + scores = torch.cat(aug_scores, dim=0) + return bboxes, scores diff --git a/mmcv/models/dense_heads/detr_head.py b/mmcv/models/dense_heads/detr_head.py new file mode 100644 index 0000000..f19a3f6 --- /dev/null +++ b/mmcv/models/dense_heads/detr_head.py @@ -0,0 +1,843 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.models.bricks import Conv2d, Linear, build_activation_layer +from mmcv.models.bricks.transformer import FFN, build_positional_encoding +from mmcv.utils import force_fp32 + +from mmcv.core.utils import multi_apply, reduce_mean +from mmcv.core.bbox.builder import (build_assigner, build_sampler) +from mmcv.core.bbox.transforms import (bbox_cxcywh_to_xyxy, bbox_xyxy_to_cxcywh) +from mmcv.models.utils import build_transformer +from ..builder import HEADS, build_loss +from .anchor_free_head import AnchorFreeHead + + +@HEADS.register_module() +class DETRHead(AnchorFreeHead): + """Implements the DETR transformer head. + + See `paper: End-to-End Object Detection with Transformers + `_ for details. + + Args: + num_classes (int): Number of categories excluding the background. + in_channels (int): Number of channels in the input feature map. + num_query (int): Number of query in Transformer. + num_reg_fcs (int, optional): Number of fully-connected layers used in + `FFN`, which is then used for the regression head. Default 2. + transformer (obj:`mmcv.ConfigDict`|dict): Config for transformer. + Default: None. + sync_cls_avg_factor (bool): Whether to sync the avg_factor of + all ranks. Default to False. + positional_encoding (obj:`mmcv.ConfigDict`|dict): + Config for position encoding. + loss_cls (obj:`mmcv.ConfigDict`|dict): Config of the + classification loss. Default `CrossEntropyLoss`. + loss_bbox (obj:`mmcv.ConfigDict`|dict): Config of the + regression loss. Default `L1Loss`. + loss_iou (obj:`mmcv.ConfigDict`|dict): Config of the + regression iou loss. Default `GIoULoss`. + tran_cfg (obj:`mmcv.ConfigDict`|dict): Training config of + transformer head. + test_cfg (obj:`mmcv.ConfigDict`|dict): Testing config of + transformer head. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + _version = 2 + + def __init__(self, + num_classes, + in_channels, + num_query=100, + num_reg_fcs=2, + transformer=None, + sync_cls_avg_factor=False, + positional_encoding=dict( + type='SinePositionalEncoding', + num_feats=128, + normalize=True), + loss_cls=dict( + type='CrossEntropyLoss', + bg_cls_weight=0.1, + use_sigmoid=False, + loss_weight=1.0, + class_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=5.0), + loss_iou=dict(type='GIoULoss', loss_weight=2.0), + train_cfg=dict( + assigner=dict( + type='HungarianAssigner', + cls_cost=dict(type='ClassificationCost', weight=1.), + reg_cost=dict(type='BBoxL1Cost', weight=5.0), + iou_cost=dict( + type='IoUCost', iou_mode='giou', weight=2.0))), + test_cfg=dict(max_per_img=100), + init_cfg=None, + **kwargs): + # NOTE here use `AnchorFreeHead` instead of `TransformerHead`, + # since it brings inconvenience when the initialization of + # `AnchorFreeHead` is called. + super(AnchorFreeHead, self).__init__(init_cfg) + self.bg_cls_weight = 0 + self.sync_cls_avg_factor = sync_cls_avg_factor + class_weight = loss_cls.get('class_weight', None) + if class_weight is not None and (self.__class__ is DETRHead): + assert isinstance(class_weight, float), 'Expected ' \ + 'class_weight to have type float. Found ' \ + f'{type(class_weight)}.' + # NOTE following the official DETR rep0, bg_cls_weight means + # relative classification weight of the no-object class. + bg_cls_weight = loss_cls.get('bg_cls_weight', class_weight) + assert isinstance(bg_cls_weight, float), 'Expected ' \ + 'bg_cls_weight to have type float. Found ' \ + f'{type(bg_cls_weight)}.' + class_weight = torch.ones(num_classes + 1) * class_weight + # set background class as the last indice + class_weight[num_classes] = bg_cls_weight + loss_cls.update({'class_weight': class_weight}) + if 'bg_cls_weight' in loss_cls: + loss_cls.pop('bg_cls_weight') + self.bg_cls_weight = bg_cls_weight + + if train_cfg: + assert 'assigner' in train_cfg, 'assigner should be provided '\ + 'when train_cfg is set.' + assigner = train_cfg['assigner'] + assert loss_cls['loss_weight'] == assigner['cls_cost']['weight'], \ + 'The classification weight for loss and matcher should be' \ + 'exactly the same.' + assert loss_bbox['loss_weight'] == assigner['reg_cost'][ + 'weight'], 'The regression L1 weight for loss and matcher ' \ + 'should be exactly the same.' + assert loss_iou['loss_weight'] == assigner['iou_cost']['weight'], \ + 'The regression iou weight for loss and matcher should be' \ + 'exactly the same.' + self.assigner = build_assigner(assigner) + # DETR sampling=False, so use PseudoSampler + sampler_cfg = dict(type='PseudoSampler') + self.sampler = build_sampler(sampler_cfg, context=self) + self.num_query = num_query + self.num_classes = num_classes + self.in_channels = in_channels + self.num_reg_fcs = num_reg_fcs + self.train_cfg = train_cfg + self.test_cfg = test_cfg + self.fp16_enabled = False + self.loss_cls = build_loss(loss_cls) + self.loss_bbox = build_loss(loss_bbox) + self.loss_iou = build_loss(loss_iou) + + if self.loss_cls.use_sigmoid: + self.cls_out_channels = num_classes + else: + self.cls_out_channels = num_classes + 1 + self.act_cfg = transformer.get('act_cfg', + dict(type='ReLU', inplace=True)) + self.activate = build_activation_layer(self.act_cfg) + self.positional_encoding = build_positional_encoding( + positional_encoding) + self.transformer = build_transformer(transformer) + self.embed_dims = self.transformer.embed_dims + assert 'num_feats' in positional_encoding + num_feats = positional_encoding['num_feats'] + assert num_feats * 2 == self.embed_dims, 'embed_dims should' \ + f' be exactly 2 times of num_feats. Found {self.embed_dims}' \ + f' and {num_feats}.' + self._init_layers() + + def _init_layers(self): + """Initialize layers of the transformer head.""" + self.input_proj = Conv2d( + self.in_channels, self.embed_dims, kernel_size=1) + self.fc_cls = Linear(self.embed_dims, self.cls_out_channels) + self.reg_ffn = FFN( + self.embed_dims, + self.embed_dims, + self.num_reg_fcs, + self.act_cfg, + dropout=0.0, + add_residual=False) + self.fc_reg = Linear(self.embed_dims, 4) + self.query_embedding = nn.Embedding(self.num_query, self.embed_dims) + + def init_weights(self): + """Initialize weights of the transformer head.""" + # The initialization for transformer is important + self.transformer.init_weights() + + def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs): + """load checkpoints.""" + # NOTE here use `AnchorFreeHead` instead of `TransformerHead`, + # since `AnchorFreeHead._load_from_state_dict` should not be + # called here. Invoking the default `Module._load_from_state_dict` + # is enough. + + # Names of some parameters in has been changed. + version = local_metadata.get('version', None) + if (version is None or version < 2) and self.__class__ is DETRHead: + convert_dict = { + '.self_attn.': '.attentions.0.', + '.ffn.': '.ffns.0.', + '.multihead_attn.': '.attentions.1.', + '.decoder.norm.': '.decoder.post_norm.' + } + state_dict_keys = list(state_dict.keys()) + for k in state_dict_keys: + for ori_key, convert_key in convert_dict.items(): + if ori_key in k: + convert_key = k.replace(ori_key, convert_key) + state_dict[convert_key] = state_dict[k] + del state_dict[k] + + super(AnchorFreeHead, + self)._load_from_state_dict(state_dict, prefix, local_metadata, + strict, missing_keys, + unexpected_keys, error_msgs) + + def forward(self, feats, img_metas): + """Forward function. + + Args: + feats (tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + img_metas (list[dict]): List of image information. + + Returns: + tuple[list[Tensor], list[Tensor]]: Outputs for all scale levels. + + - all_cls_scores_list (list[Tensor]): Classification scores \ + for each scale level. Each is a 4D-tensor with shape \ + [nb_dec, bs, num_query, cls_out_channels]. Note \ + `cls_out_channels` should includes background. + - all_bbox_preds_list (list[Tensor]): Sigmoid regression \ + outputs for each scale level. Each is a 4D-tensor with \ + normalized coordinate format (cx, cy, w, h) and shape \ + [nb_dec, bs, num_query, 4]. + """ + num_levels = len(feats) + img_metas_list = [img_metas for _ in range(num_levels)] + return multi_apply(self.forward_single, feats, img_metas_list) + + def forward_single(self, x, img_metas): + """"Forward function for a single feature level. + + Args: + x (Tensor): Input feature from backbone's single stage, shape + [bs, c, h, w]. + img_metas (list[dict]): List of image information. + + Returns: + all_cls_scores (Tensor): Outputs from the classification head, + shape [nb_dec, bs, num_query, cls_out_channels]. Note + cls_out_channels should includes background. + all_bbox_preds (Tensor): Sigmoid outputs from the regression + head with normalized coordinate format (cx, cy, w, h). + Shape [nb_dec, bs, num_query, 4]. + """ + # construct binary masks which used for the transformer. + # NOTE following the official DETR repo, non-zero values representing + # ignored positions, while zero values means valid positions. + batch_size = x.size(0) + input_img_h, input_img_w = img_metas[0]['batch_input_shape'] + masks = x.new_ones((batch_size, input_img_h, input_img_w)) + for img_id in range(batch_size): + img_h, img_w, _ = img_metas[img_id]['img_shape'] + masks[img_id, :img_h, :img_w] = 0 + + x = self.input_proj(x) + # interpolate masks to have the same spatial shape with x + masks = F.interpolate( + masks.unsqueeze(1), size=x.shape[-2:]).to(torch.bool).squeeze(1) + # position encoding + pos_embed = self.positional_encoding(masks) # [bs, embed_dim, h, w] + # outs_dec: [nb_dec, bs, num_query, embed_dim] + outs_dec, _ = self.transformer(x, masks, self.query_embedding.weight, + pos_embed) + + all_cls_scores = self.fc_cls(outs_dec) + all_bbox_preds = self.fc_reg(self.activate( + self.reg_ffn(outs_dec))).sigmoid() + return all_cls_scores, all_bbox_preds + + @force_fp32(apply_to=('all_cls_scores_list', 'all_bbox_preds_list')) + def loss(self, + all_cls_scores_list, + all_bbox_preds_list, + gt_bboxes_list, + gt_labels_list, + img_metas, + gt_bboxes_ignore=None): + """"Loss function. + + Only outputs from the last feature level are used for computing + losses by default. + + Args: + all_cls_scores_list (list[Tensor]): Classification outputs + for each feature level. Each is a 4D-tensor with shape + [nb_dec, bs, num_query, cls_out_channels]. + all_bbox_preds_list (list[Tensor]): Sigmoid regression + outputs for each feature level. Each is a 4D-tensor with + normalized coordinate format (cx, cy, w, h) and shape + [nb_dec, bs, num_query, 4]. + gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image + with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels_list (list[Tensor]): Ground truth class indices for each + image with shape (num_gts, ). + img_metas (list[dict]): List of image meta information. + gt_bboxes_ignore (list[Tensor], optional): Bounding boxes + which can be ignored for each image. Default None. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + # NOTE defaultly only the outputs from the last feature scale is used. + all_cls_scores = all_cls_scores_list[-1] + all_bbox_preds = all_bbox_preds_list[-1] + assert gt_bboxes_ignore is None, \ + 'Only supports for gt_bboxes_ignore setting to None.' + + num_dec_layers = len(all_cls_scores) + all_gt_bboxes_list = [gt_bboxes_list for _ in range(num_dec_layers)] + all_gt_labels_list = [gt_labels_list for _ in range(num_dec_layers)] + all_gt_bboxes_ignore_list = [ + gt_bboxes_ignore for _ in range(num_dec_layers) + ] + img_metas_list = [img_metas for _ in range(num_dec_layers)] + + losses_cls, losses_bbox, losses_iou = multi_apply( + self.loss_single, all_cls_scores, all_bbox_preds, + all_gt_bboxes_list, all_gt_labels_list, img_metas_list, + all_gt_bboxes_ignore_list) + + loss_dict = dict() + # loss from the last decoder layer + loss_dict['loss_cls'] = losses_cls[-1] + loss_dict['loss_bbox'] = losses_bbox[-1] + loss_dict['loss_iou'] = losses_iou[-1] + # loss from other decoder layers + num_dec_layer = 0 + for loss_cls_i, loss_bbox_i, loss_iou_i in zip(losses_cls[:-1], + losses_bbox[:-1], + losses_iou[:-1]): + loss_dict[f'd{num_dec_layer}.loss_cls'] = loss_cls_i + loss_dict[f'd{num_dec_layer}.loss_bbox'] = loss_bbox_i + loss_dict[f'd{num_dec_layer}.loss_iou'] = loss_iou_i + num_dec_layer += 1 + return loss_dict + + def loss_single(self, + cls_scores, + bbox_preds, + gt_bboxes_list, + gt_labels_list, + img_metas, + gt_bboxes_ignore_list=None): + """"Loss function for outputs from a single decoder layer of a single + feature level. + + Args: + cls_scores (Tensor): Box score logits from a single decoder layer + for all images. Shape [bs, num_query, cls_out_channels]. + bbox_preds (Tensor): Sigmoid outputs from a single decoder layer + for all images, with normalized coordinate (cx, cy, w, h) and + shape [bs, num_query, 4]. + gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image + with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels_list (list[Tensor]): Ground truth class indices for each + image with shape (num_gts, ). + img_metas (list[dict]): List of image meta information. + gt_bboxes_ignore_list (list[Tensor], optional): Bounding + boxes which can be ignored for each image. Default None. + + Returns: + dict[str, Tensor]: A dictionary of loss components for outputs from + a single decoder layer. + """ + num_imgs = cls_scores.size(0) + cls_scores_list = [cls_scores[i] for i in range(num_imgs)] + bbox_preds_list = [bbox_preds[i] for i in range(num_imgs)] + cls_reg_targets = self.get_targets(cls_scores_list, bbox_preds_list, + gt_bboxes_list, gt_labels_list, + img_metas, gt_bboxes_ignore_list) + (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, + num_total_pos, num_total_neg) = cls_reg_targets + labels = torch.cat(labels_list, 0) + label_weights = torch.cat(label_weights_list, 0) + bbox_targets = torch.cat(bbox_targets_list, 0) + bbox_weights = torch.cat(bbox_weights_list, 0) + + # classification loss + cls_scores = cls_scores.reshape(-1, self.cls_out_channels) + # construct weighted avg_factor to match with the official DETR repo + cls_avg_factor = num_total_pos * 1.0 + \ + num_total_neg * self.bg_cls_weight + if self.sync_cls_avg_factor: + cls_avg_factor = reduce_mean( + cls_scores.new_tensor([cls_avg_factor])) + cls_avg_factor = max(cls_avg_factor, 1) + + loss_cls = self.loss_cls( + cls_scores, labels, label_weights, avg_factor=cls_avg_factor) + + # Compute the average number of gt boxes accross all gpus, for + # normalization purposes + num_total_pos = loss_cls.new_tensor([num_total_pos]) + num_total_pos = torch.clamp(reduce_mean(num_total_pos), min=1).item() + + # construct factors used for rescale bboxes + factors = [] + for img_meta, bbox_pred in zip(img_metas, bbox_preds): + img_h, img_w, _ = img_meta['img_shape'] + factor = bbox_pred.new_tensor([img_w, img_h, img_w, + img_h]).unsqueeze(0).repeat( + bbox_pred.size(0), 1) + factors.append(factor) + factors = torch.cat(factors, 0) + + # DETR regress the relative position of boxes (cxcywh) in the image, + # thus the learning target is normalized by the image size. So here + # we need to re-scale them for calculating IoU loss + bbox_preds = bbox_preds.reshape(-1, 4) + bboxes = bbox_cxcywh_to_xyxy(bbox_preds) * factors + bboxes_gt = bbox_cxcywh_to_xyxy(bbox_targets) * factors + + # regression IoU loss, defaultly GIoU loss + loss_iou = self.loss_iou( + bboxes, bboxes_gt, bbox_weights, avg_factor=num_total_pos) + + # regression L1 loss + loss_bbox = self.loss_bbox( + bbox_preds, bbox_targets, bbox_weights, avg_factor=num_total_pos) + return loss_cls, loss_bbox, loss_iou + + def get_targets(self, + cls_scores_list, + bbox_preds_list, + gt_bboxes_list, + gt_labels_list, + img_metas, + gt_bboxes_ignore_list=None): + """"Compute regression and classification targets for a batch image. + + Outputs from a single decoder layer of a single feature level are used. + + Args: + cls_scores_list (list[Tensor]): Box score logits from a single + decoder layer for each image with shape [num_query, + cls_out_channels]. + bbox_preds_list (list[Tensor]): Sigmoid outputs from a single + decoder layer for each image, with normalized coordinate + (cx, cy, w, h) and shape [num_query, 4]. + gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image + with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels_list (list[Tensor]): Ground truth class indices for each + image with shape (num_gts, ). + img_metas (list[dict]): List of image meta information. + gt_bboxes_ignore_list (list[Tensor], optional): Bounding + boxes which can be ignored for each image. Default None. + + Returns: + tuple: a tuple containing the following targets. + + - labels_list (list[Tensor]): Labels for all images. + - label_weights_list (list[Tensor]): Label weights for all \ + images. + - bbox_targets_list (list[Tensor]): BBox targets for all \ + images. + - bbox_weights_list (list[Tensor]): BBox weights for all \ + images. + - num_total_pos (int): Number of positive samples in all \ + images. + - num_total_neg (int): Number of negative samples in all \ + images. + """ + assert gt_bboxes_ignore_list is None, \ + 'Only supports for gt_bboxes_ignore setting to None.' + num_imgs = len(cls_scores_list) + gt_bboxes_ignore_list = [ + gt_bboxes_ignore_list for _ in range(num_imgs) + ] + + (labels_list, label_weights_list, bbox_targets_list, + bbox_weights_list, pos_inds_list, neg_inds_list) = multi_apply( + self._get_target_single, cls_scores_list, bbox_preds_list, + gt_bboxes_list, gt_labels_list, img_metas, gt_bboxes_ignore_list) + num_total_pos = sum((inds.numel() for inds in pos_inds_list)) + num_total_neg = sum((inds.numel() for inds in neg_inds_list)) + return (labels_list, label_weights_list, bbox_targets_list, + bbox_weights_list, num_total_pos, num_total_neg) + + def _get_target_single(self, + cls_score, + bbox_pred, + gt_bboxes, + gt_labels, + img_meta, + gt_bboxes_ignore=None): + """"Compute regression and classification targets for one image. + + Outputs from a single decoder layer of a single feature level are used. + + Args: + cls_score (Tensor): Box score logits from a single decoder layer + for one image. Shape [num_query, cls_out_channels]. + bbox_pred (Tensor): Sigmoid outputs from a single decoder layer + for one image, with normalized coordinate (cx, cy, w, h) and + shape [num_query, 4]. + gt_bboxes (Tensor): Ground truth bboxes for one image with + shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels (Tensor): Ground truth class indices for one image + with shape (num_gts, ). + img_meta (dict): Meta information for one image. + gt_bboxes_ignore (Tensor, optional): Bounding boxes + which can be ignored. Default None. + + Returns: + tuple[Tensor]: a tuple containing the following for one image. + + - labels (Tensor): Labels of each image. + - label_weights (Tensor]): Label weights of each image. + - bbox_targets (Tensor): BBox targets of each image. + - bbox_weights (Tensor): BBox weights of each image. + - pos_inds (Tensor): Sampled positive indices for each image. + - neg_inds (Tensor): Sampled negative indices for each image. + """ + + num_bboxes = bbox_pred.size(0) + # assigner and sampler + assign_result = self.assigner.assign(bbox_pred, cls_score, gt_bboxes, + gt_labels, img_meta, + gt_bboxes_ignore) + sampling_result = self.sampler.sample(assign_result, bbox_pred, + gt_bboxes) + pos_inds = sampling_result.pos_inds + neg_inds = sampling_result.neg_inds + + # label targets + labels = gt_bboxes.new_full((num_bboxes, ), + self.num_classes, + dtype=torch.long) + labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds] + label_weights = gt_bboxes.new_ones(num_bboxes) + + # bbox targets + bbox_targets = torch.zeros_like(bbox_pred) + bbox_weights = torch.zeros_like(bbox_pred) + bbox_weights[pos_inds] = 1.0 + img_h, img_w, _ = img_meta['img_shape'] + + # DETR regress the relative position of boxes (cxcywh) in the image. + # Thus the learning target should be normalized by the image size, also + # the box format should be converted from defaultly x1y1x2y2 to cxcywh. + factor = bbox_pred.new_tensor([img_w, img_h, img_w, + img_h]).unsqueeze(0) + pos_gt_bboxes_normalized = sampling_result.pos_gt_bboxes / factor + pos_gt_bboxes_targets = bbox_xyxy_to_cxcywh(pos_gt_bboxes_normalized) + bbox_targets[pos_inds] = pos_gt_bboxes_targets + return (labels, label_weights, bbox_targets, bbox_weights, pos_inds, + neg_inds) + + # over-write because img_metas are needed as inputs for bbox_head. + def forward_train(self, + x, + img_metas, + gt_bboxes, + gt_labels=None, + gt_bboxes_ignore=None, + proposal_cfg=None, + **kwargs): + """Forward function for training mode. + + Args: + x (list[Tensor]): Features from backbone. + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + gt_bboxes (Tensor): Ground truth bboxes of the image, + shape (num_gts, 4). + gt_labels (Tensor): Ground truth labels of each box, + shape (num_gts,). + gt_bboxes_ignore (Tensor): Ground truth bboxes to be + ignored, shape (num_ignored_gts, 4). + proposal_cfg (mmcv.Config): Test / postprocessing configuration, + if None, test_cfg would be used. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + assert proposal_cfg is None, '"proposal_cfg" must be None' + outs = self(x, img_metas) + if gt_labels is None: + loss_inputs = outs + (gt_bboxes, img_metas) + else: + loss_inputs = outs + (gt_bboxes, gt_labels, img_metas) + losses = self.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) + return losses + + @force_fp32(apply_to=('all_cls_scores_list', 'all_bbox_preds_list')) + def get_bboxes(self, + all_cls_scores_list, + all_bbox_preds_list, + img_metas, + rescale=False): + """Transform network outputs for a batch into bbox predictions. + + Args: + all_cls_scores_list (list[Tensor]): Classification outputs + for each feature level. Each is a 4D-tensor with shape + [nb_dec, bs, num_query, cls_out_channels]. + all_bbox_preds_list (list[Tensor]): Sigmoid regression + outputs for each feature level. Each is a 4D-tensor with + normalized coordinate format (cx, cy, w, h) and shape + [nb_dec, bs, num_query, 4]. + img_metas (list[dict]): Meta information of each image. + rescale (bool, optional): If True, return boxes in original + image space. Default False. + + Returns: + list[list[Tensor, Tensor]]: Each item in result_list is 2-tuple. \ + The first item is an (n, 5) tensor, where the first 4 columns \ + are bounding box positions (tl_x, tl_y, br_x, br_y) and the \ + 5-th column is a score between 0 and 1. The second item is a \ + (n,) tensor where each item is the predicted class label of \ + the corresponding box. + """ + # NOTE defaultly only using outputs from the last feature level, + # and only the outputs from the last decoder layer is used. + cls_scores = all_cls_scores_list[-1][-1] + bbox_preds = all_bbox_preds_list[-1][-1] + + result_list = [] + for img_id in range(len(img_metas)): + cls_score = cls_scores[img_id] + bbox_pred = bbox_preds[img_id] + img_shape = img_metas[img_id]['img_shape'] + scale_factor = img_metas[img_id]['scale_factor'] + proposals = self._get_bboxes_single(cls_score, bbox_pred, + img_shape, scale_factor, + rescale) + result_list.append(proposals) + + return result_list + + def _get_bboxes_single(self, + cls_score, + bbox_pred, + img_shape, + scale_factor, + rescale=False): + """Transform outputs from the last decoder layer into bbox predictions + for each image. + + Args: + cls_score (Tensor): Box score logits from the last decoder layer + for each image. Shape [num_query, cls_out_channels]. + bbox_pred (Tensor): Sigmoid outputs from the last decoder layer + for each image, with coordinate format (cx, cy, w, h) and + shape [num_query, 4]. + img_shape (tuple[int]): Shape of input image, (height, width, 3). + scale_factor (ndarray, optional): Scale factor of the image arange + as (w_scale, h_scale, w_scale, h_scale). + rescale (bool, optional): If True, return boxes in original image + space. Default False. + + Returns: + tuple[Tensor]: Results of detected bboxes and labels. + + - det_bboxes: Predicted bboxes with shape [num_query, 5], \ + where the first 4 columns are bounding box positions \ + (tl_x, tl_y, br_x, br_y) and the 5-th column are scores \ + between 0 and 1. + - det_labels: Predicted labels of the corresponding box with \ + shape [num_query]. + """ + assert len(cls_score) == len(bbox_pred) + max_per_img = self.test_cfg.get('max_per_img', self.num_query) + # exclude background + if self.loss_cls.use_sigmoid: + cls_score = cls_score.sigmoid() + scores, indexes = cls_score.view(-1).topk(max_per_img) + det_labels = indexes % self.num_classes + bbox_index = indexes // self.num_classes + bbox_pred = bbox_pred[bbox_index] + else: + scores, det_labels = F.softmax(cls_score, dim=-1)[..., :-1].max(-1) + scores, bbox_index = scores.topk(max_per_img) + bbox_pred = bbox_pred[bbox_index] + det_labels = det_labels[bbox_index] + + det_bboxes = bbox_cxcywh_to_xyxy(bbox_pred) + det_bboxes[:, 0::2] = det_bboxes[:, 0::2] * img_shape[1] + det_bboxes[:, 1::2] = det_bboxes[:, 1::2] * img_shape[0] + det_bboxes[:, 0::2].clamp_(min=0, max=img_shape[1]) + det_bboxes[:, 1::2].clamp_(min=0, max=img_shape[0]) + if rescale: + det_bboxes /= det_bboxes.new_tensor(scale_factor) + det_bboxes = torch.cat((det_bboxes, scores.unsqueeze(1)), -1) + + return det_bboxes, det_labels + + def simple_test_bboxes(self, feats, img_metas, rescale=False): + """Test det bboxes without test-time augmentation. + + Args: + feats (tuple[torch.Tensor]): Multi-level features from the + upstream network, each is a 4D-tensor. + img_metas (list[dict]): List of image information. + rescale (bool, optional): Whether to rescale the results. + Defaults to False. + + Returns: + list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. + The first item is ``bboxes`` with shape (n, 5), + where 5 represent (tl_x, tl_y, br_x, br_y, score). + The shape of the second tensor in the tuple is ``labels`` + with shape (n,) + """ + # forward of this head requires img_metas + outs = self.forward(feats, img_metas) + results_list = self.get_bboxes(*outs, img_metas, rescale=rescale) + return results_list + + def forward_onnx(self, feats, img_metas): + """Forward function for exporting to ONNX. + + Over-write `forward` because: `masks` is directly created with + zero (valid position tag) and has the same spatial size as `x`. + Thus the construction of `masks` is different from that in `forward`. + + Args: + feats (tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + img_metas (list[dict]): List of image information. + + Returns: + tuple[list[Tensor], list[Tensor]]: Outputs for all scale levels. + + - all_cls_scores_list (list[Tensor]): Classification scores \ + for each scale level. Each is a 4D-tensor with shape \ + [nb_dec, bs, num_query, cls_out_channels]. Note \ + `cls_out_channels` should includes background. + - all_bbox_preds_list (list[Tensor]): Sigmoid regression \ + outputs for each scale level. Each is a 4D-tensor with \ + normalized coordinate format (cx, cy, w, h) and shape \ + [nb_dec, bs, num_query, 4]. + """ + num_levels = len(feats) + img_metas_list = [img_metas for _ in range(num_levels)] + return multi_apply(self.forward_single_onnx, feats, img_metas_list) + + def forward_single_onnx(self, x, img_metas): + """"Forward function for a single feature level with ONNX exportation. + + Args: + x (Tensor): Input feature from backbone's single stage, shape + [bs, c, h, w]. + img_metas (list[dict]): List of image information. + + Returns: + all_cls_scores (Tensor): Outputs from the classification head, + shape [nb_dec, bs, num_query, cls_out_channels]. Note + cls_out_channels should includes background. + all_bbox_preds (Tensor): Sigmoid outputs from the regression + head with normalized coordinate format (cx, cy, w, h). + Shape [nb_dec, bs, num_query, 4]. + """ + # Note `img_shape` is not dynamically traceable to ONNX, + # since the related augmentation was done with numpy under + # CPU. Thus `masks` is directly created with zeros (valid tag) + # and the same spatial shape as `x`. + # The difference between torch and exported ONNX model may be + # ignored, since the same performance is achieved (e.g. + # 40.1 vs 40.1 for DETR) + batch_size = x.size(0) + h, w = x.size()[-2:] + masks = x.new_zeros((batch_size, h, w)) # [B,h,w] + + x = self.input_proj(x) + # interpolate masks to have the same spatial shape with x + masks = F.interpolate( + masks.unsqueeze(1), size=x.shape[-2:]).to(torch.bool).squeeze(1) + pos_embed = self.positional_encoding(masks) + outs_dec, _ = self.transformer(x, masks, self.query_embedding.weight, + pos_embed) + + all_cls_scores = self.fc_cls(outs_dec) + all_bbox_preds = self.fc_reg(self.activate( + self.reg_ffn(outs_dec))).sigmoid() + return all_cls_scores, all_bbox_preds + + def onnx_export(self, all_cls_scores_list, all_bbox_preds_list, img_metas): + """Transform network outputs into bbox predictions, with ONNX + exportation. + + Args: + all_cls_scores_list (list[Tensor]): Classification outputs + for each feature level. Each is a 4D-tensor with shape + [nb_dec, bs, num_query, cls_out_channels]. + all_bbox_preds_list (list[Tensor]): Sigmoid regression + outputs for each feature level. Each is a 4D-tensor with + normalized coordinate format (cx, cy, w, h) and shape + [nb_dec, bs, num_query, 4]. + img_metas (list[dict]): Meta information of each image. + + Returns: + tuple[Tensor, Tensor]: dets of shape [N, num_det, 5] + and class labels of shape [N, num_det]. + """ + assert len(img_metas) == 1, \ + 'Only support one input image while in exporting to ONNX' + + cls_scores = all_cls_scores_list[-1][-1] + bbox_preds = all_bbox_preds_list[-1][-1] + + # Note `img_shape` is not dynamically traceable to ONNX, + # here `img_shape_for_onnx` (padded shape of image tensor) + # is used. + img_shape = img_metas[0]['img_shape_for_onnx'] + max_per_img = self.test_cfg.get('max_per_img', self.num_query) + batch_size = cls_scores.size(0) + # `batch_index_offset` is used for the gather of concatenated tensor + batch_index_offset = torch.arange(batch_size).to( + cls_scores.device) * max_per_img + batch_index_offset = batch_index_offset.unsqueeze(1).expand( + batch_size, max_per_img) + + # supports dynamical batch inference + if self.loss_cls.use_sigmoid: + cls_scores = cls_scores.sigmoid() + scores, indexes = cls_scores.view(batch_size, -1).topk( + max_per_img, dim=1) + det_labels = indexes % self.num_classes + bbox_index = indexes // self.num_classes + bbox_index = (bbox_index + batch_index_offset).view(-1) + bbox_preds = bbox_preds.view(-1, 4)[bbox_index] + bbox_preds = bbox_preds.view(batch_size, -1, 4) + else: + scores, det_labels = F.softmax( + cls_scores, dim=-1)[..., :-1].max(-1) + scores, bbox_index = scores.topk(max_per_img, dim=1) + bbox_index = (bbox_index + batch_index_offset).view(-1) + bbox_preds = bbox_preds.view(-1, 4)[bbox_index] + det_labels = det_labels.view(-1)[bbox_index] + bbox_preds = bbox_preds.view(batch_size, -1, 4) + det_labels = det_labels.view(batch_size, -1) + + det_bboxes = bbox_cxcywh_to_xyxy(bbox_preds) + # use `img_shape_tensor` for dynamically exporting to ONNX + img_shape_tensor = img_shape.flip(0).repeat(2) # [w,h,w,h] + img_shape_tensor = img_shape_tensor.unsqueeze(0).unsqueeze(0).expand( + batch_size, det_bboxes.size(1), 4) + det_bboxes = det_bboxes * img_shape_tensor + # dynamically clip bboxes + x1, y1, x2, y2 = det_bboxes.split((1, 1, 1, 1), dim=-1) + from mmcv.core.export import dynamic_clip_for_onnx + x1, y1, x2, y2 = dynamic_clip_for_onnx(x1, y1, x2, y2, img_shape) + det_bboxes = torch.cat([x1, y1, x2, y2], dim=-1) + det_bboxes = torch.cat((det_bboxes, scores.unsqueeze(-1)), -1) + + return det_bboxes, det_labels diff --git a/mmcv/models/dense_heads/free_anchor3d_head.py b/mmcv/models/dense_heads/free_anchor3d_head.py new file mode 100644 index 0000000..6e9797c --- /dev/null +++ b/mmcv/models/dense_heads/free_anchor3d_head.py @@ -0,0 +1,284 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmcv.runner import force_fp32 +from torch.nn import functional as F + +from mmcv.core.bbox import bbox_overlaps_nearest_3d +from mmcv.models import HEADS +from .anchor3d_head import Anchor3DHead +from .train_mixins import get_direction_target + + +@HEADS.register_module() +class FreeAnchor3DHead(Anchor3DHead): + r"""`FreeAnchor `_ head for 3D detection. + + Note: + This implementation is directly modified from the `mmdet implementation + `_. + We find it also works on 3D detection with minor modification, i.e., + different hyper-parameters and a additional direction classifier. + + Args: + pre_anchor_topk (int): Number of boxes that be token in each bag. + bbox_thr (float): The threshold of the saturated linear function. It is + usually the same with the IoU threshold used in NMS. + gamma (float): Gamma parameter in focal loss. + alpha (float): Alpha parameter in focal loss. + kwargs (dict): Other arguments are the same as those in :class:`Anchor3DHead`. + """ # noqa: E501 + + def __init__(self, + pre_anchor_topk=50, + bbox_thr=0.6, + gamma=2.0, + alpha=0.5, + init_cfg=None, + **kwargs): + super().__init__(init_cfg=init_cfg, **kwargs) + self.pre_anchor_topk = pre_anchor_topk + self.bbox_thr = bbox_thr + self.gamma = gamma + self.alpha = alpha + + @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'dir_cls_preds')) + def loss(self, + cls_scores, + bbox_preds, + dir_cls_preds, + gt_bboxes, + gt_labels, + input_metas, + gt_bboxes_ignore=None): + """Calculate loss of FreeAnchor head. + + Args: + cls_scores (list[torch.Tensor]): Classification scores of + different samples. + bbox_preds (list[torch.Tensor]): Box predictions of + different samples + dir_cls_preds (list[torch.Tensor]): Direction predictions of + different samples + gt_bboxes (list[:obj:`BaseInstance3DBoxes`]): Ground truth boxes. + gt_labels (list[torch.Tensor]): Ground truth labels. + input_metas (list[dict]): List of input meta information. + gt_bboxes_ignore (list[:obj:`BaseInstance3DBoxes`], optional): + Ground truth boxes that should be ignored. Defaults to None. + + Returns: + dict[str, torch.Tensor]: Loss items. + + - positive_bag_loss (torch.Tensor): Loss of positive samples. + - negative_bag_loss (torch.Tensor): Loss of negative samples. + """ + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + assert len(featmap_sizes) == self.anchor_generator.num_levels + + anchor_list = self.get_anchors(featmap_sizes, input_metas) + anchors = [torch.cat(anchor) for anchor in anchor_list] + + # concatenate each level + cls_scores = [ + cls_score.permute(0, 2, 3, 1).reshape( + cls_score.size(0), -1, self.num_classes) + for cls_score in cls_scores + ] + bbox_preds = [ + bbox_pred.permute(0, 2, 3, 1).reshape( + bbox_pred.size(0), -1, self.box_code_size) + for bbox_pred in bbox_preds + ] + dir_cls_preds = [ + dir_cls_pred.permute(0, 2, 3, + 1).reshape(dir_cls_pred.size(0), -1, 2) + for dir_cls_pred in dir_cls_preds + ] + + cls_scores = torch.cat(cls_scores, dim=1) + bbox_preds = torch.cat(bbox_preds, dim=1) + dir_cls_preds = torch.cat(dir_cls_preds, dim=1) + + cls_prob = torch.sigmoid(cls_scores) + box_prob = [] + num_pos = 0 + positive_losses = [] + for _, (anchors_, gt_labels_, gt_bboxes_, cls_prob_, bbox_preds_, + dir_cls_preds_) in enumerate( + zip(anchors, gt_labels, gt_bboxes, cls_prob, bbox_preds, + dir_cls_preds)): + + gt_bboxes_ = gt_bboxes_.tensor.to(anchors_.device) + + with torch.no_grad(): + # box_localization: a_{j}^{loc}, shape: [j, 4] + pred_boxes = self.bbox_coder.decode(anchors_, bbox_preds_) + + # object_box_iou: IoU_{ij}^{loc}, shape: [i, j] + object_box_iou = bbox_overlaps_nearest_3d( + gt_bboxes_, pred_boxes) + + # object_box_prob: P{a_{j} -> b_{i}}, shape: [i, j] + t1 = self.bbox_thr + t2 = object_box_iou.max( + dim=1, keepdim=True).values.clamp(min=t1 + 1e-6) + object_box_prob = ((object_box_iou - t1) / (t2 - t1)).clamp( + min=0, max=1) + + # object_cls_box_prob: P{a_{j} -> b_{i}}, shape: [i, c, j] + num_obj = gt_labels_.size(0) + indices = torch.stack( + [torch.arange(num_obj).type_as(gt_labels_), gt_labels_], + dim=0) + + object_cls_box_prob = torch.sparse_coo_tensor( + indices, object_box_prob) + + # image_box_iou: P{a_{j} \in A_{+}}, shape: [c, j] + """ + from "start" to "end" implement: + image_box_iou = torch.sparse.max(object_cls_box_prob, + dim=0).t() + + """ + # start + box_cls_prob = torch.sparse.sum( + object_cls_box_prob, dim=0).to_dense() + + indices = torch.nonzero(box_cls_prob, as_tuple=False).t_() + if indices.numel() == 0: + image_box_prob = torch.zeros( + anchors_.size(0), + self.num_classes).type_as(object_box_prob) + else: + nonzero_box_prob = torch.where( + (gt_labels_.unsqueeze(dim=-1) == indices[0]), + object_box_prob[:, indices[1]], + torch.tensor( + [0]).type_as(object_box_prob)).max(dim=0).values + + # upmap to shape [j, c] + image_box_prob = torch.sparse_coo_tensor( + indices.flip([0]), + nonzero_box_prob, + size=(anchors_.size(0), self.num_classes)).to_dense() + # end + + box_prob.append(image_box_prob) + + # construct bags for objects + match_quality_matrix = bbox_overlaps_nearest_3d( + gt_bboxes_, anchors_) + _, matched = torch.topk( + match_quality_matrix, + self.pre_anchor_topk, + dim=1, + sorted=False) + del match_quality_matrix + + # matched_cls_prob: P_{ij}^{cls} + matched_cls_prob = torch.gather( + cls_prob_[matched], 2, + gt_labels_.view(-1, 1, 1).repeat(1, self.pre_anchor_topk, + 1)).squeeze(2) + + # matched_box_prob: P_{ij}^{loc} + matched_anchors = anchors_[matched] + matched_object_targets = self.bbox_coder.encode( + matched_anchors, + gt_bboxes_.unsqueeze(dim=1).expand_as(matched_anchors)) + + # direction classification loss + loss_dir = None + if self.use_direction_classifier: + # also calculate direction prob: P_{ij}^{dir} + matched_dir_targets = get_direction_target( + matched_anchors, + matched_object_targets, + self.dir_offset, + one_hot=False) + loss_dir = self.loss_dir( + dir_cls_preds_[matched].transpose(-2, -1), + matched_dir_targets, + reduction_override='none') + + # generate bbox weights + if self.diff_rad_by_sin: + bbox_preds_[matched], matched_object_targets = \ + self.add_sin_difference( + bbox_preds_[matched], matched_object_targets) + bbox_weights = matched_anchors.new_ones(matched_anchors.size()) + # Use pop is not right, check performance + code_weight = self.train_cfg.get('code_weight', None) + if code_weight: + bbox_weights = bbox_weights * bbox_weights.new_tensor( + code_weight) + loss_bbox = self.loss_bbox( + bbox_preds_[matched], + matched_object_targets, + bbox_weights, + reduction_override='none').sum(-1) + + if loss_dir is not None: + loss_bbox += loss_dir + matched_box_prob = torch.exp(-loss_bbox) + + # positive_losses: {-log( Mean-max(P_{ij}^{cls} * P_{ij}^{loc}) )} + num_pos += len(gt_bboxes_) + positive_losses.append( + self.positive_bag_loss(matched_cls_prob, matched_box_prob)) + + positive_loss = torch.cat(positive_losses).sum() / max(1, num_pos) + + # box_prob: P{a_{j} \in A_{+}} + box_prob = torch.stack(box_prob, dim=0) + + # negative_loss: + # \sum_{j}{ FL((1 - P{a_{j} \in A_{+}}) * (1 - P_{j}^{bg})) } / n||B|| + negative_loss = self.negative_bag_loss(cls_prob, box_prob).sum() / max( + 1, num_pos * self.pre_anchor_topk) + + losses = { + 'positive_bag_loss': positive_loss, + 'negative_bag_loss': negative_loss + } + return losses + + def positive_bag_loss(self, matched_cls_prob, matched_box_prob): + """Generate positive bag loss. + + Args: + matched_cls_prob (torch.Tensor): Classification probability + of matched positive samples. + matched_box_prob (torch.Tensor): Bounding box probability + of matched positive samples. + + Returns: + torch.Tensor: Loss of positive samples. + """ + # bag_prob = Mean-max(matched_prob) + matched_prob = matched_cls_prob * matched_box_prob + weight = 1 / torch.clamp(1 - matched_prob, 1e-12, None) + weight /= weight.sum(dim=1).unsqueeze(dim=-1) + bag_prob = (weight * matched_prob).sum(dim=1) + # positive_bag_loss = -self.alpha * log(bag_prob) + bag_prob = bag_prob.clamp(0, 1) # to avoid bug of BCE, check + return self.alpha * F.binary_cross_entropy( + bag_prob, torch.ones_like(bag_prob), reduction='none') + + def negative_bag_loss(self, cls_prob, box_prob): + """Generate negative bag loss. + + Args: + cls_prob (torch.Tensor): Classification probability + of negative samples. + box_prob (torch.Tensor): Bounding box probability + of negative samples. + + Returns: + torch.Tensor: Loss of negative samples. + """ + prob = cls_prob * (1 - box_prob) + prob = prob.clamp(0, 1) # to avoid bug of BCE, check + negative_bag_loss = prob**self.gamma * F.binary_cross_entropy( + prob, torch.zeros_like(prob), reduction='none') + return (1 - self.alpha) * negative_bag_loss \ No newline at end of file diff --git a/mmcv/models/dense_heads/ga_rpn_head.py b/mmcv/models/dense_heads/ga_rpn_head.py new file mode 100644 index 0000000..7c739de --- /dev/null +++ b/mmcv/models/dense_heads/ga_rpn_head.py @@ -0,0 +1,176 @@ +import copy +import warnings + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv import ConfigDict +from mmcv.ops import nms + +from ..builder import HEADS +from .guided_anchor_head import GuidedAnchorHead + + +@HEADS.register_module() +class GARPNHead(GuidedAnchorHead): + """Guided-Anchor-based RPN head.""" + + def __init__(self, + in_channels, + init_cfg=dict( + type='Normal', + layer='Conv2d', + std=0.01, + override=dict( + type='Normal', + name='conv_loc', + std=0.01, + bias_prob=0.01)), + **kwargs): + super(GARPNHead, self).__init__( + 1, in_channels, init_cfg=init_cfg, **kwargs) + + def _init_layers(self): + """Initialize layers of the head.""" + self.rpn_conv = nn.Conv2d( + self.in_channels, self.feat_channels, 3, padding=1) + super(GARPNHead, self)._init_layers() + + def forward_single(self, x): + """Forward feature of a single scale level.""" + + x = self.rpn_conv(x) + x = F.relu(x, inplace=True) + (cls_score, bbox_pred, shape_pred, + loc_pred) = super(GARPNHead, self).forward_single(x) + return cls_score, bbox_pred, shape_pred, loc_pred + + def loss(self, + cls_scores, + bbox_preds, + shape_preds, + loc_preds, + gt_bboxes, + img_metas, + gt_bboxes_ignore=None): + losses = super(GARPNHead, self).loss( + cls_scores, + bbox_preds, + shape_preds, + loc_preds, + gt_bboxes, + None, + img_metas, + gt_bboxes_ignore=gt_bboxes_ignore) + return dict( + loss_rpn_cls=losses['loss_cls'], + loss_rpn_bbox=losses['loss_bbox'], + loss_anchor_shape=losses['loss_shape'], + loss_anchor_loc=losses['loss_loc']) + + def _get_bboxes_single(self, + cls_scores, + bbox_preds, + mlvl_anchors, + mlvl_masks, + img_shape, + scale_factor, + cfg, + rescale=False): + cfg = self.test_cfg if cfg is None else cfg + + cfg = copy.deepcopy(cfg) + + # deprecate arguments warning + if 'nms' not in cfg or 'max_num' in cfg or 'nms_thr' in cfg: + warnings.warn( + 'In rpn_proposal or test_cfg, ' + 'nms_thr has been moved to a dict named nms as ' + 'iou_threshold, max_num has been renamed as max_per_img, ' + 'name of original arguments and the way to specify ' + 'iou_threshold of NMS will be deprecated.') + if 'nms' not in cfg: + cfg.nms = ConfigDict(dict(type='nms', iou_threshold=cfg.nms_thr)) + if 'max_num' in cfg: + if 'max_per_img' in cfg: + assert cfg.max_num == cfg.max_per_img, f'You ' \ + f'set max_num and max_per_img at the same time, ' \ + f'but get {cfg.max_num} ' \ + f'and {cfg.max_per_img} respectively' \ + 'Please delete max_num which will be deprecated.' + else: + cfg.max_per_img = cfg.max_num + if 'nms_thr' in cfg: + assert cfg.nms.iou_threshold == cfg.nms_thr, f'You set ' \ + f'iou_threshold in nms and ' \ + f'nms_thr at the same time, but get ' \ + f'{cfg.nms.iou_threshold} and {cfg.nms_thr}' \ + f' respectively. Please delete the ' \ + f'nms_thr which will be deprecated.' + + assert cfg.nms.get('type', 'nms') == 'nms', 'GARPNHead only support ' \ + 'naive nms.' + + mlvl_proposals = [] + for idx in range(len(cls_scores)): + rpn_cls_score = cls_scores[idx] + rpn_bbox_pred = bbox_preds[idx] + anchors = mlvl_anchors[idx] + mask = mlvl_masks[idx] + assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:] + # if no location is kept, end. + if mask.sum() == 0: + continue + rpn_cls_score = rpn_cls_score.permute(1, 2, 0) + if self.use_sigmoid_cls: + rpn_cls_score = rpn_cls_score.reshape(-1) + scores = rpn_cls_score.sigmoid() + else: + rpn_cls_score = rpn_cls_score.reshape(-1, 2) + # remind that we set FG labels to [0, num_class-1] + # since mmdet v2.0 + # BG cat_id: num_class + scores = rpn_cls_score.softmax(dim=1)[:, :-1] + # filter scores, bbox_pred w.r.t. mask. + # anchors are filtered in get_anchors() beforehand. + scores = scores[mask] + rpn_bbox_pred = rpn_bbox_pred.permute(1, 2, 0).reshape(-1, + 4)[mask, :] + if scores.dim() == 0: + rpn_bbox_pred = rpn_bbox_pred.unsqueeze(0) + anchors = anchors.unsqueeze(0) + scores = scores.unsqueeze(0) + # filter anchors, bbox_pred, scores w.r.t. scores + if cfg.nms_pre > 0 and scores.shape[0] > cfg.nms_pre: + _, topk_inds = scores.topk(cfg.nms_pre) + rpn_bbox_pred = rpn_bbox_pred[topk_inds, :] + anchors = anchors[topk_inds, :] + scores = scores[topk_inds] + # get proposals w.r.t. anchors and rpn_bbox_pred + proposals = self.bbox_coder.decode( + anchors, rpn_bbox_pred, max_shape=img_shape) + # filter out too small bboxes + if cfg.min_bbox_size >= 0: + w = proposals[:, 2] - proposals[:, 0] + h = proposals[:, 3] - proposals[:, 1] + valid_inds = torch.nonzero( + (w > cfg.min_bbox_size) & (h > cfg.min_bbox_size), + as_tuple=False).squeeze() + proposals = proposals[valid_inds, :] + scores = scores[valid_inds] + # NMS in current level + proposals, _ = nms(proposals, scores, cfg.nms.iou_threshold) + proposals = proposals[:cfg.nms_post, :] + mlvl_proposals.append(proposals) + proposals = torch.cat(mlvl_proposals, 0) + if cfg.get('nms_across_levels', False): + # NMS across multi levels + proposals, _ = nms(proposals[:, :4], proposals[:, -1], + cfg.nms.iou_threshold) + proposals = proposals[:cfg.max_per_img, :] + else: + scores = proposals[:, 4] + num = min(cfg.max_per_img, proposals.shape[0]) + _, topk_inds = scores.topk(num) + proposals = proposals[topk_inds, :] + return proposals diff --git a/mmcv/models/dense_heads/guided_anchor_head.py b/mmcv/models/dense_heads/guided_anchor_head.py new file mode 100644 index 0000000..b36b957 --- /dev/null +++ b/mmcv/models/dense_heads/guided_anchor_head.py @@ -0,0 +1,862 @@ +import torch +import torch.nn as nn +# from mmcv.ops import DeformConv2d, MaskedConv2d +from mmcv.ops.deform_conv import DeformConv2d +from mmcv.ops.masked_conv import MaskedConv2d +from mmcv.models.backbones import BaseModule +from mmcv.utils import force_fp32 + + +from mmcv.core.anchor import anchor_inside_flags, build_anchor_generator, images_to_levels, calc_region +from mmcv.core.utils import multi_apply, reduce_mean, unmap +from mmcv.core.bbox.builder import build_assigner, build_sampler +from mmcv.core.post_processing.bbox_nms import multiclass_nms +from ..builder import HEADS, build_loss +from .anchor_head import AnchorHead + + +class FeatureAdaption(BaseModule): + """Feature Adaption Module. + + Feature Adaption Module is implemented based on DCN v1. + It uses anchor shape prediction rather than feature map to + predict offsets of deform conv layer. + + Args: + in_channels (int): Number of channels in the input feature map. + out_channels (int): Number of channels in the output feature map. + kernel_size (int): Deformable conv kernel size. + deform_groups (int): Deformable conv group size. + init_cfg (dict or list[dict], optional): Initialization config dict. + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size=3, + deform_groups=4, + init_cfg=dict( + type='Normal', + layer='Conv2d', + std=0.1, + override=dict( + type='Normal', name='conv_adaption', std=0.01))): + super(FeatureAdaption, self).__init__(init_cfg) + offset_channels = kernel_size * kernel_size * 2 + self.conv_offset = nn.Conv2d( + 2, deform_groups * offset_channels, 1, bias=False) + self.conv_adaption = DeformConv2d( + in_channels, + out_channels, + kernel_size=kernel_size, + padding=(kernel_size - 1) // 2, + deform_groups=deform_groups) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x, shape): + offset = self.conv_offset(shape.detach()) + x = self.relu(self.conv_adaption(x, offset)) + return x + + +@HEADS.register_module() +class GuidedAnchorHead(AnchorHead): + """Guided-Anchor-based head (GA-RPN, GA-RetinaNet, etc.). + + This GuidedAnchorHead will predict high-quality feature guided + anchors and locations where anchors will be kept in inference. + There are mainly 3 categories of bounding-boxes. + + - Sampled 9 pairs for target assignment. (approxes) + - The square boxes where the predicted anchors are based on. (squares) + - Guided anchors. + + Please refer to https://arxiv.org/abs/1901.03278 for more details. + + Args: + num_classes (int): Number of classes. + in_channels (int): Number of channels in the input feature map. + feat_channels (int): Number of hidden channels. + approx_anchor_generator (dict): Config dict for approx generator + square_anchor_generator (dict): Config dict for square generator + anchor_coder (dict): Config dict for anchor coder + bbox_coder (dict): Config dict for bbox coder + reg_decoded_bbox (bool): If true, the regression loss would be + applied directly on decoded bounding boxes, converting both + the predicted boxes and regression targets to absolute + coordinates format. Default False. It should be `True` when + using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head. + deform_groups: (int): Group number of DCN in + FeatureAdaption module. + loc_filter_thr (float): Threshold to filter out unconcerned regions. + loss_loc (dict): Config of location loss. + loss_shape (dict): Config of anchor shape loss. + loss_cls (dict): Config of classification loss. + loss_bbox (dict): Config of bbox regression loss. + init_cfg (dict or list[dict], optional): Initialization config dict. + """ + + def __init__( + self, + num_classes, + in_channels, + feat_channels=256, + approx_anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=8, + scales_per_octave=3, + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + square_anchor_generator=dict( + type='AnchorGenerator', + ratios=[1.0], + scales=[8], + strides=[4, 8, 16, 32, 64]), + anchor_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0] + ), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0] + ), + reg_decoded_bbox=False, + deform_groups=4, + loc_filter_thr=0.01, + train_cfg=None, + test_cfg=None, + loss_loc=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0), + init_cfg=dict(type='Normal', layer='Conv2d', std=0.01, + override=dict(type='Normal', + name='conv_loc', + std=0.01, + bias_prob=0.01))): # yapf: disable + super(AnchorHead, self).__init__(init_cfg) + self.in_channels = in_channels + self.num_classes = num_classes + self.feat_channels = feat_channels + self.deform_groups = deform_groups + self.loc_filter_thr = loc_filter_thr + + # build approx_anchor_generator and square_anchor_generator + assert (approx_anchor_generator['octave_base_scale'] == + square_anchor_generator['scales'][0]) + assert (approx_anchor_generator['strides'] == + square_anchor_generator['strides']) + self.approx_anchor_generator = build_anchor_generator( + approx_anchor_generator) + self.square_anchor_generator = build_anchor_generator( + square_anchor_generator) + self.approxs_per_octave = self.approx_anchor_generator \ + .num_base_anchors[0] + + self.reg_decoded_bbox = reg_decoded_bbox + + # one anchor per location + self.num_anchors = 1 + self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) + self.loc_focal_loss = loss_loc['type'] in ['FocalLoss'] + self.sampling = loss_cls['type'] not in ['FocalLoss'] + self.ga_sampling = train_cfg is not None and hasattr( + train_cfg, 'ga_sampler') + if self.use_sigmoid_cls: + self.cls_out_channels = self.num_classes + else: + self.cls_out_channels = self.num_classes + 1 + + # build bbox_coder + self.anchor_coder = build_bbox_coder(anchor_coder) + self.bbox_coder = build_bbox_coder(bbox_coder) + + # build losses + self.loss_loc = build_loss(loss_loc) + self.loss_shape = build_loss(loss_shape) + self.loss_cls = build_loss(loss_cls) + self.loss_bbox = build_loss(loss_bbox) + + self.train_cfg = train_cfg + self.test_cfg = test_cfg + + if self.train_cfg: + self.assigner = build_assigner(self.train_cfg.assigner) + # use PseudoSampler when sampling is False + if self.sampling and hasattr(self.train_cfg, 'sampler'): + sampler_cfg = self.train_cfg.sampler + else: + sampler_cfg = dict(type='PseudoSampler') + self.sampler = build_sampler(sampler_cfg, context=self) + + self.ga_assigner = build_assigner(self.train_cfg.ga_assigner) + if self.ga_sampling: + ga_sampler_cfg = self.train_cfg.ga_sampler + else: + ga_sampler_cfg = dict(type='PseudoSampler') + self.ga_sampler = build_sampler(ga_sampler_cfg, context=self) + + self.fp16_enabled = False + + self._init_layers() + + def _init_layers(self): + self.relu = nn.ReLU(inplace=True) + self.conv_loc = nn.Conv2d(self.in_channels, 1, 1) + self.conv_shape = nn.Conv2d(self.in_channels, self.num_anchors * 2, 1) + self.feature_adaption = FeatureAdaption( + self.in_channels, + self.feat_channels, + kernel_size=3, + deform_groups=self.deform_groups) + self.conv_cls = MaskedConv2d(self.feat_channels, + self.num_anchors * self.cls_out_channels, + 1) + self.conv_reg = MaskedConv2d(self.feat_channels, self.num_anchors * 4, + 1) + + def forward_single(self, x): + loc_pred = self.conv_loc(x) + shape_pred = self.conv_shape(x) + x = self.feature_adaption(x, shape_pred) + # masked conv is only used during inference for speed-up + if not self.training: + mask = loc_pred.sigmoid()[0] >= self.loc_filter_thr + else: + mask = None + cls_score = self.conv_cls(x, mask) + bbox_pred = self.conv_reg(x, mask) + return cls_score, bbox_pred, shape_pred, loc_pred + + def forward(self, feats): + return multi_apply(self.forward_single, feats) + + def get_sampled_approxs(self, featmap_sizes, img_metas, device='cuda'): + """Get sampled approxs and inside flags according to feature map sizes. + + Args: + featmap_sizes (list[tuple]): Multi-level feature map sizes. + img_metas (list[dict]): Image meta info. + device (torch.device | str): device for returned tensors + + Returns: + tuple: approxes of each image, inside flags of each image + """ + num_imgs = len(img_metas) + + # since feature map sizes of all images are the same, we only compute + # approxes for one time + multi_level_approxs = self.approx_anchor_generator.grid_anchors( + featmap_sizes, device=device) + approxs_list = [multi_level_approxs for _ in range(num_imgs)] + + # for each image, we compute inside flags of multi level approxes + inside_flag_list = [] + for img_id, img_meta in enumerate(img_metas): + multi_level_flags = [] + multi_level_approxs = approxs_list[img_id] + + # obtain valid flags for each approx first + multi_level_approx_flags = self.approx_anchor_generator \ + .valid_flags(featmap_sizes, + img_meta['pad_shape'], + device=device) + + for i, flags in enumerate(multi_level_approx_flags): + approxs = multi_level_approxs[i] + inside_flags_list = [] + for i in range(self.approxs_per_octave): + split_valid_flags = flags[i::self.approxs_per_octave] + split_approxs = approxs[i::self.approxs_per_octave, :] + inside_flags = anchor_inside_flags( + split_approxs, split_valid_flags, + img_meta['img_shape'][:2], + self.train_cfg.allowed_border) + inside_flags_list.append(inside_flags) + # inside_flag for a position is true if any anchor in this + # position is true + inside_flags = ( + torch.stack(inside_flags_list, 0).sum(dim=0) > 0) + multi_level_flags.append(inside_flags) + inside_flag_list.append(multi_level_flags) + return approxs_list, inside_flag_list + + def get_anchors(self, + featmap_sizes, + shape_preds, + loc_preds, + img_metas, + use_loc_filter=False, + device='cuda'): + """Get squares according to feature map sizes and guided anchors. + + Args: + featmap_sizes (list[tuple]): Multi-level feature map sizes. + shape_preds (list[tensor]): Multi-level shape predictions. + loc_preds (list[tensor]): Multi-level location predictions. + img_metas (list[dict]): Image meta info. + use_loc_filter (bool): Use loc filter or not. + device (torch.device | str): device for returned tensors + + Returns: + tuple: square approxs of each image, guided anchors of each image, + loc masks of each image + """ + num_imgs = len(img_metas) + num_levels = len(featmap_sizes) + + # since feature map sizes of all images are the same, we only compute + # squares for one time + multi_level_squares = self.square_anchor_generator.grid_anchors( + featmap_sizes, device=device) + squares_list = [multi_level_squares for _ in range(num_imgs)] + + # for each image, we compute multi level guided anchors + guided_anchors_list = [] + loc_mask_list = [] + for img_id, img_meta in enumerate(img_metas): + multi_level_guided_anchors = [] + multi_level_loc_mask = [] + for i in range(num_levels): + squares = squares_list[img_id][i] + shape_pred = shape_preds[i][img_id] + loc_pred = loc_preds[i][img_id] + guided_anchors, loc_mask = self._get_guided_anchors_single( + squares, + shape_pred, + loc_pred, + use_loc_filter=use_loc_filter) + multi_level_guided_anchors.append(guided_anchors) + multi_level_loc_mask.append(loc_mask) + guided_anchors_list.append(multi_level_guided_anchors) + loc_mask_list.append(multi_level_loc_mask) + return squares_list, guided_anchors_list, loc_mask_list + + def _get_guided_anchors_single(self, + squares, + shape_pred, + loc_pred, + use_loc_filter=False): + """Get guided anchors and loc masks for a single level. + + Args: + square (tensor): Squares of a single level. + shape_pred (tensor): Shape predictions of a single level. + loc_pred (tensor): Loc predictions of a single level. + use_loc_filter (list[tensor]): Use loc filter or not. + + Returns: + tuple: guided anchors, location masks + """ + # calculate location filtering mask + loc_pred = loc_pred.sigmoid().detach() + if use_loc_filter: + loc_mask = loc_pred >= self.loc_filter_thr + else: + loc_mask = loc_pred >= 0.0 + mask = loc_mask.permute(1, 2, 0).expand(-1, -1, self.num_anchors) + mask = mask.contiguous().view(-1) + # calculate guided anchors + squares = squares[mask] + anchor_deltas = shape_pred.permute(1, 2, 0).contiguous().view( + -1, 2).detach()[mask] + bbox_deltas = anchor_deltas.new_full(squares.size(), 0) + bbox_deltas[:, 2:] = anchor_deltas + guided_anchors = self.anchor_coder.decode( + squares, bbox_deltas, wh_ratio_clip=1e-6) + return guided_anchors, mask + + def ga_loc_targets(self, gt_bboxes_list, featmap_sizes): + """Compute location targets for guided anchoring. + + Each feature map is divided into positive, negative and ignore regions. + - positive regions: target 1, weight 1 + - ignore regions: target 0, weight 0 + - negative regions: target 0, weight 0.1 + + Args: + gt_bboxes_list (list[Tensor]): Gt bboxes of each image. + featmap_sizes (list[tuple]): Multi level sizes of each feature + maps. + + Returns: + tuple + """ + anchor_scale = self.approx_anchor_generator.octave_base_scale + anchor_strides = self.approx_anchor_generator.strides + # Currently only supports same stride in x and y direction. + for stride in anchor_strides: + assert (stride[0] == stride[1]) + anchor_strides = [stride[0] for stride in anchor_strides] + + center_ratio = self.train_cfg.center_ratio + ignore_ratio = self.train_cfg.ignore_ratio + img_per_gpu = len(gt_bboxes_list) + num_lvls = len(featmap_sizes) + r1 = (1 - center_ratio) / 2 + r2 = (1 - ignore_ratio) / 2 + all_loc_targets = [] + all_loc_weights = [] + all_ignore_map = [] + for lvl_id in range(num_lvls): + h, w = featmap_sizes[lvl_id] + loc_targets = torch.zeros( + img_per_gpu, + 1, + h, + w, + device=gt_bboxes_list[0].device, + dtype=torch.float32) + loc_weights = torch.full_like(loc_targets, -1) + ignore_map = torch.zeros_like(loc_targets) + all_loc_targets.append(loc_targets) + all_loc_weights.append(loc_weights) + all_ignore_map.append(ignore_map) + for img_id in range(img_per_gpu): + gt_bboxes = gt_bboxes_list[img_id] + scale = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0]) * + (gt_bboxes[:, 3] - gt_bboxes[:, 1])) + min_anchor_size = scale.new_full( + (1, ), float(anchor_scale * anchor_strides[0])) + # assign gt bboxes to different feature levels w.r.t. their scales + target_lvls = torch.floor( + torch.log2(scale) - torch.log2(min_anchor_size) + 0.5) + target_lvls = target_lvls.clamp(min=0, max=num_lvls - 1).long() + for gt_id in range(gt_bboxes.size(0)): + lvl = target_lvls[gt_id].item() + # rescaled to corresponding feature map + gt_ = gt_bboxes[gt_id, :4] / anchor_strides[lvl] + # calculate ignore regions + ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region( + gt_, r2, featmap_sizes[lvl]) + # calculate positive (center) regions + ctr_x1, ctr_y1, ctr_x2, ctr_y2 = calc_region( + gt_, r1, featmap_sizes[lvl]) + all_loc_targets[lvl][img_id, 0, ctr_y1:ctr_y2 + 1, + ctr_x1:ctr_x2 + 1] = 1 + all_loc_weights[lvl][img_id, 0, ignore_y1:ignore_y2 + 1, + ignore_x1:ignore_x2 + 1] = 0 + all_loc_weights[lvl][img_id, 0, ctr_y1:ctr_y2 + 1, + ctr_x1:ctr_x2 + 1] = 1 + # calculate ignore map on nearby low level feature + if lvl > 0: + d_lvl = lvl - 1 + # rescaled to corresponding feature map + gt_ = gt_bboxes[gt_id, :4] / anchor_strides[d_lvl] + ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region( + gt_, r2, featmap_sizes[d_lvl]) + all_ignore_map[d_lvl][img_id, 0, ignore_y1:ignore_y2 + 1, + ignore_x1:ignore_x2 + 1] = 1 + # calculate ignore map on nearby high level feature + if lvl < num_lvls - 1: + u_lvl = lvl + 1 + # rescaled to corresponding feature map + gt_ = gt_bboxes[gt_id, :4] / anchor_strides[u_lvl] + ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region( + gt_, r2, featmap_sizes[u_lvl]) + all_ignore_map[u_lvl][img_id, 0, ignore_y1:ignore_y2 + 1, + ignore_x1:ignore_x2 + 1] = 1 + for lvl_id in range(num_lvls): + # ignore negative regions w.r.t. ignore map + all_loc_weights[lvl_id][(all_loc_weights[lvl_id] < 0) + & (all_ignore_map[lvl_id] > 0)] = 0 + # set negative regions with weight 0.1 + all_loc_weights[lvl_id][all_loc_weights[lvl_id] < 0] = 0.1 + # loc average factor to balance loss + loc_avg_factor = sum( + [t.size(0) * t.size(-1) * t.size(-2) + for t in all_loc_targets]) / 200 + return all_loc_targets, all_loc_weights, loc_avg_factor + + def _ga_shape_target_single(self, + flat_approxs, + inside_flags, + flat_squares, + gt_bboxes, + gt_bboxes_ignore, + img_meta, + unmap_outputs=True): + """Compute guided anchoring targets. + + This function returns sampled anchors and gt bboxes directly + rather than calculates regression targets. + + Args: + flat_approxs (Tensor): flat approxs of a single image, + shape (n, 4) + inside_flags (Tensor): inside flags of a single image, + shape (n, ). + flat_squares (Tensor): flat squares of a single image, + shape (approxs_per_octave * n, 4) + gt_bboxes (Tensor): Ground truth bboxes of a single image. + img_meta (dict): Meta info of a single image. + approxs_per_octave (int): number of approxs per octave + cfg (dict): RPN train configs. + unmap_outputs (bool): unmap outputs or not. + + Returns: + tuple + """ + if not inside_flags.any(): + return (None, ) * 5 + # assign gt and sample anchors + expand_inside_flags = inside_flags[:, None].expand( + -1, self.approxs_per_octave).reshape(-1) + approxs = flat_approxs[expand_inside_flags, :] + squares = flat_squares[inside_flags, :] + + assign_result = self.ga_assigner.assign(approxs, squares, + self.approxs_per_octave, + gt_bboxes, gt_bboxes_ignore) + sampling_result = self.ga_sampler.sample(assign_result, squares, + gt_bboxes) + + bbox_anchors = torch.zeros_like(squares) + bbox_gts = torch.zeros_like(squares) + bbox_weights = torch.zeros_like(squares) + + pos_inds = sampling_result.pos_inds + neg_inds = sampling_result.neg_inds + if len(pos_inds) > 0: + bbox_anchors[pos_inds, :] = sampling_result.pos_bboxes + bbox_gts[pos_inds, :] = sampling_result.pos_gt_bboxes + bbox_weights[pos_inds, :] = 1.0 + + # map up to original set of anchors + if unmap_outputs: + num_total_anchors = flat_squares.size(0) + bbox_anchors = unmap(bbox_anchors, num_total_anchors, inside_flags) + bbox_gts = unmap(bbox_gts, num_total_anchors, inside_flags) + bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) + + return (bbox_anchors, bbox_gts, bbox_weights, pos_inds, neg_inds) + + def ga_shape_targets(self, + approx_list, + inside_flag_list, + square_list, + gt_bboxes_list, + img_metas, + gt_bboxes_ignore_list=None, + unmap_outputs=True): + """Compute guided anchoring targets. + + Args: + approx_list (list[list]): Multi level approxs of each image. + inside_flag_list (list[list]): Multi level inside flags of each + image. + square_list (list[list]): Multi level squares of each image. + gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. + img_metas (list[dict]): Meta info of each image. + gt_bboxes_ignore_list (list[Tensor]): ignore list of gt bboxes. + unmap_outputs (bool): unmap outputs or not. + + Returns: + tuple + """ + num_imgs = len(img_metas) + assert len(approx_list) == len(inside_flag_list) == len( + square_list) == num_imgs + # anchor number of multi levels + num_level_squares = [squares.size(0) for squares in square_list[0]] + # concat all level anchors and flags to a single tensor + inside_flag_flat_list = [] + approx_flat_list = [] + square_flat_list = [] + for i in range(num_imgs): + assert len(square_list[i]) == len(inside_flag_list[i]) + inside_flag_flat_list.append(torch.cat(inside_flag_list[i])) + approx_flat_list.append(torch.cat(approx_list[i])) + square_flat_list.append(torch.cat(square_list[i])) + + # compute targets for each image + if gt_bboxes_ignore_list is None: + gt_bboxes_ignore_list = [None for _ in range(num_imgs)] + (all_bbox_anchors, all_bbox_gts, all_bbox_weights, pos_inds_list, + neg_inds_list) = multi_apply( + self._ga_shape_target_single, + approx_flat_list, + inside_flag_flat_list, + square_flat_list, + gt_bboxes_list, + gt_bboxes_ignore_list, + img_metas, + unmap_outputs=unmap_outputs) + # no valid anchors + if any([bbox_anchors is None for bbox_anchors in all_bbox_anchors]): + return None + # sampled anchors of all images + num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) + num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) + # split targets to a list w.r.t. multiple levels + bbox_anchors_list = images_to_levels(all_bbox_anchors, + num_level_squares) + bbox_gts_list = images_to_levels(all_bbox_gts, num_level_squares) + bbox_weights_list = images_to_levels(all_bbox_weights, + num_level_squares) + return (bbox_anchors_list, bbox_gts_list, bbox_weights_list, + num_total_pos, num_total_neg) + + def loss_shape_single(self, shape_pred, bbox_anchors, bbox_gts, + anchor_weights, anchor_total_num): + shape_pred = shape_pred.permute(0, 2, 3, 1).contiguous().view(-1, 2) + bbox_anchors = bbox_anchors.contiguous().view(-1, 4) + bbox_gts = bbox_gts.contiguous().view(-1, 4) + anchor_weights = anchor_weights.contiguous().view(-1, 4) + bbox_deltas = bbox_anchors.new_full(bbox_anchors.size(), 0) + bbox_deltas[:, 2:] += shape_pred + # filter out negative samples to speed-up weighted_bounded_iou_loss + inds = torch.nonzero( + anchor_weights[:, 0] > 0, as_tuple=False).squeeze(1) + bbox_deltas_ = bbox_deltas[inds] + bbox_anchors_ = bbox_anchors[inds] + bbox_gts_ = bbox_gts[inds] + anchor_weights_ = anchor_weights[inds] + pred_anchors_ = self.anchor_coder.decode( + bbox_anchors_, bbox_deltas_, wh_ratio_clip=1e-6) + loss_shape = self.loss_shape( + pred_anchors_, + bbox_gts_, + anchor_weights_, + avg_factor=anchor_total_num) + return loss_shape + + def loss_loc_single(self, loc_pred, loc_target, loc_weight, + loc_avg_factor): + loss_loc = self.loss_loc( + loc_pred.reshape(-1, 1), + loc_target.reshape(-1).long(), + loc_weight.reshape(-1), + avg_factor=loc_avg_factor) + return loss_loc + + @force_fp32( + apply_to=('cls_scores', 'bbox_preds', 'shape_preds', 'loc_preds')) + def loss(self, + cls_scores, + bbox_preds, + shape_preds, + loc_preds, + gt_bboxes, + gt_labels, + img_metas, + gt_bboxes_ignore=None): + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + assert len(featmap_sizes) == self.approx_anchor_generator.num_levels + + device = cls_scores[0].device + + # get loc targets + loc_targets, loc_weights, loc_avg_factor = self.ga_loc_targets( + gt_bboxes, featmap_sizes) + + # get sampled approxes + approxs_list, inside_flag_list = self.get_sampled_approxs( + featmap_sizes, img_metas, device=device) + # get squares and guided anchors + squares_list, guided_anchors_list, _ = self.get_anchors( + featmap_sizes, shape_preds, loc_preds, img_metas, device=device) + + # get shape targets + shape_targets = self.ga_shape_targets(approxs_list, inside_flag_list, + squares_list, gt_bboxes, + img_metas) + if shape_targets is None: + return None + (bbox_anchors_list, bbox_gts_list, anchor_weights_list, anchor_fg_num, + anchor_bg_num) = shape_targets + anchor_total_num = ( + anchor_fg_num if not self.ga_sampling else anchor_fg_num + + anchor_bg_num) + + # get anchor targets + label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 + cls_reg_targets = self.get_targets( + guided_anchors_list, + inside_flag_list, + gt_bboxes, + img_metas, + gt_bboxes_ignore_list=gt_bboxes_ignore, + gt_labels_list=gt_labels, + label_channels=label_channels) + if cls_reg_targets is None: + return None + (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, + num_total_pos, num_total_neg) = cls_reg_targets + num_total_samples = ( + num_total_pos + num_total_neg if self.sampling else num_total_pos) + + # anchor number of multi levels + num_level_anchors = [ + anchors.size(0) for anchors in guided_anchors_list[0] + ] + # concat all level anchors to a single tensor + concat_anchor_list = [] + for i in range(len(guided_anchors_list)): + concat_anchor_list.append(torch.cat(guided_anchors_list[i])) + all_anchor_list = images_to_levels(concat_anchor_list, + num_level_anchors) + + # get classification and bbox regression losses + losses_cls, losses_bbox = multi_apply( + self.loss_single, + cls_scores, + bbox_preds, + all_anchor_list, + labels_list, + label_weights_list, + bbox_targets_list, + bbox_weights_list, + num_total_samples=num_total_samples) + + # get anchor location loss + losses_loc = [] + for i in range(len(loc_preds)): + loss_loc = self.loss_loc_single( + loc_preds[i], + loc_targets[i], + loc_weights[i], + loc_avg_factor=loc_avg_factor) + losses_loc.append(loss_loc) + + # get anchor shape loss + losses_shape = [] + for i in range(len(shape_preds)): + loss_shape = self.loss_shape_single( + shape_preds[i], + bbox_anchors_list[i], + bbox_gts_list[i], + anchor_weights_list[i], + anchor_total_num=anchor_total_num) + losses_shape.append(loss_shape) + + return dict( + loss_cls=losses_cls, + loss_bbox=losses_bbox, + loss_shape=losses_shape, + loss_loc=losses_loc) + + @force_fp32( + apply_to=('cls_scores', 'bbox_preds', 'shape_preds', 'loc_preds')) + def get_bboxes(self, + cls_scores, + bbox_preds, + shape_preds, + loc_preds, + img_metas, + cfg=None, + rescale=False): + assert len(cls_scores) == len(bbox_preds) == len(shape_preds) == len( + loc_preds) + num_levels = len(cls_scores) + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + device = cls_scores[0].device + # get guided anchors + _, guided_anchors, loc_masks = self.get_anchors( + featmap_sizes, + shape_preds, + loc_preds, + img_metas, + use_loc_filter=not self.training, + device=device) + result_list = [] + for img_id in range(len(img_metas)): + cls_score_list = [ + cls_scores[i][img_id].detach() for i in range(num_levels) + ] + bbox_pred_list = [ + bbox_preds[i][img_id].detach() for i in range(num_levels) + ] + guided_anchor_list = [ + guided_anchors[img_id][i].detach() for i in range(num_levels) + ] + loc_mask_list = [ + loc_masks[img_id][i].detach() for i in range(num_levels) + ] + img_shape = img_metas[img_id]['img_shape'] + scale_factor = img_metas[img_id]['scale_factor'] + proposals = self._get_bboxes_single(cls_score_list, bbox_pred_list, + guided_anchor_list, + loc_mask_list, img_shape, + scale_factor, cfg, rescale) + result_list.append(proposals) + return result_list + + def _get_bboxes_single(self, + cls_scores, + bbox_preds, + mlvl_anchors, + mlvl_masks, + img_shape, + scale_factor, + cfg, + rescale=False): + cfg = self.test_cfg if cfg is None else cfg + assert len(cls_scores) == len(bbox_preds) == len(mlvl_anchors) + mlvl_bboxes = [] + mlvl_scores = [] + for cls_score, bbox_pred, anchors, mask in zip(cls_scores, bbox_preds, + mlvl_anchors, + mlvl_masks): + assert cls_score.size()[-2:] == bbox_pred.size()[-2:] + # if no location is kept, end. + if mask.sum() == 0: + continue + # reshape scores and bbox_pred + cls_score = cls_score.permute(1, 2, + 0).reshape(-1, self.cls_out_channels) + if self.use_sigmoid_cls: + scores = cls_score.sigmoid() + else: + scores = cls_score.softmax(-1) + bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) + # filter scores, bbox_pred w.r.t. mask. + # anchors are filtered in get_anchors() beforehand. + scores = scores[mask, :] + bbox_pred = bbox_pred[mask, :] + if scores.dim() == 0: + anchors = anchors.unsqueeze(0) + scores = scores.unsqueeze(0) + bbox_pred = bbox_pred.unsqueeze(0) + # filter anchors, bbox_pred, scores w.r.t. scores + nms_pre = cfg.get('nms_pre', -1) + if nms_pre > 0 and scores.shape[0] > nms_pre: + if self.use_sigmoid_cls: + max_scores, _ = scores.max(dim=1) + else: + # remind that we set FG labels to [0, num_class-1] + # since mmcv v2.0 + # BG cat_id: num_class + max_scores, _ = scores[:, :-1].max(dim=1) + _, topk_inds = max_scores.topk(nms_pre) + anchors = anchors[topk_inds, :] + bbox_pred = bbox_pred[topk_inds, :] + scores = scores[topk_inds, :] + bboxes = self.bbox_coder.decode( + anchors, bbox_pred, max_shape=img_shape) + mlvl_bboxes.append(bboxes) + mlvl_scores.append(scores) + mlvl_bboxes = torch.cat(mlvl_bboxes) + if rescale: + mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor) + mlvl_scores = torch.cat(mlvl_scores) + if self.use_sigmoid_cls: + # Add a dummy background class to the backend when using sigmoid + # remind that we set FG labels to [0, num_class-1] since mmcv v2.0 + # BG cat_id: num_class + padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1) + mlvl_scores = torch.cat([mlvl_scores, padding], dim=1) + # multi class NMS + det_bboxes, det_labels = multiclass_nms(mlvl_bboxes, mlvl_scores, + cfg.score_thr, cfg.nms, + cfg.max_per_img) + return det_bboxes, det_labels diff --git a/mmcv/models/dense_heads/motion_head.py b/mmcv/models/dense_heads/motion_head.py new file mode 100644 index 0000000..859c3ff --- /dev/null +++ b/mmcv/models/dense_heads/motion_head.py @@ -0,0 +1,560 @@ +#---------------------------------------------------------------------------------# +# UniAD: Planning-oriented Autonomous Driving (https://arxiv.org/abs/2212.10156) # +# Source code: https://github.com/OpenDriveLab/UniAD # +# Copyright (c) OpenDriveLab. All rights reserved. # +#---------------------------------------------------------------------------------# + +import torch +import copy +from mmcv.models import HEADS +from mmcv.utils import force_fp32, auto_fp16 +from mmcv.models.utils.functional import ( + bivariate_gaussian_activation, + norm_points, + pos2posemb2d, + anchor_coordinate_transform +) +from .motion_head_plugin.motion_utils import nonlinear_smoother +from .motion_head_plugin.base_motion_head import BaseMotionHead + + +@HEADS.register_module() +class MotionHead(BaseMotionHead): + """ + MotionHead module for a neural network, which predicts motion trajectories and is used in an autonomous driving task. + + Args: + *args: Variable length argument list. + predict_steps (int): The number of steps to predict motion trajectories. + transformerlayers (dict): A dictionary defining the configuration of transformer layers. + bbox_coder: An instance of a bbox coder to be used for encoding/decoding boxes. + num_cls_fcs (int): The number of fully-connected layers in the classification branch. + bev_h (int): The height of the bird's-eye-view map. + bev_w (int): The width of the bird's-eye-view map. + embed_dims (int): The number of dimensions to use for the query and key vectors in transformer layers. + num_anchor (int): The number of anchor points. + det_layer_num (int): The number of layers in the transformer model. + group_id_list (list): A list of group IDs to use for grouping the classes. + pc_range: The range of the point cloud. + use_nonlinear_optimizer (bool): A boolean indicating whether to use a non-linear optimizer for training. + anchor_info_path (str): The path to the file containing the anchor information. + vehicle_id_list(list[int]): class id of vehicle class, used for filtering out non-vehicle objects + """ + def __init__(self, + *args, + predict_steps=12, + transformerlayers=None, + bbox_coder=None, + num_cls_fcs=2, + bev_h=30, + bev_w=30, + embed_dims=256, + num_anchor=6, + det_layer_num=6, + group_id_list=[], + pc_range=None, + use_nonlinear_optimizer=False, + anchor_info_path=None, + loss_traj=dict(), + num_classes=0, + vehicle_id_list=[0, 1, 2, 3, 4, 6, 7], + **kwargs): + super(MotionHead, self).__init__() + + self.bev_h = bev_h + self.bev_w = bev_w + self.num_cls_fcs = num_cls_fcs - 1 + self.num_reg_fcs = num_cls_fcs - 1 + self.embed_dims = embed_dims + self.num_anchor = num_anchor + self.num_anchor_group = len(group_id_list) + + # we merge the classes into groups for anchor assignment + self.cls2group = [0 for i in range(num_classes)] + for i, grouped_ids in enumerate(group_id_list): + for gid in grouped_ids: + self.cls2group[gid] = i + self.cls2group = torch.tensor(self.cls2group) + self.pc_range = pc_range + self.predict_steps = predict_steps + self.vehicle_id_list = vehicle_id_list + + self.use_nonlinear_optimizer = use_nonlinear_optimizer + self._load_anchors(anchor_info_path) + self._build_loss(loss_traj) + self._build_layers(transformerlayers, det_layer_num) + self._init_layers() + + def forward_train(self, + bev_embed, + gt_bboxes_3d, + gt_labels_3d, + gt_fut_traj=None, + gt_fut_traj_mask=None, + gt_sdc_fut_traj=None, + gt_sdc_fut_traj_mask=None, + outs_track={}, + outs_seg={} + ): + """Forward function + Args: + bev_embed (Tensor): BEV feature map with the shape of [B, C, H, W]. + gt_bboxes_3d (list[:obj:`BaseInstance3DBoxes`]): Ground truth \ + bboxes of each sample. + gt_labels_3d (list[torch.Tensor]): Labels of each sample. + img_metas (list[dict]): Meta information of each sample. + gt_fut_traj (list[torch.Tensor]): Ground truth future trajectory of each sample. + gt_fut_traj_mask (list[torch.Tensor]): Ground truth future trajectory mask of each sample. + gt_sdc_fut_traj (list[torch.Tensor]): Ground truth future trajectory of each sample. + gt_sdc_fut_traj_mask (list[torch.Tensor]): Ground truth future trajectory mask of each sample. + outs_track (dict): Outputs of track head. + outs_seg (dict): Outputs of seg head. + future_states (list[torch.Tensor]): Ground truth future states of each sample. + Returns: + dict: Losses of each branch. + """ + track_query = outs_track['track_query_embeddings'][None, None, ...] # num_dec, B, A_track, D + all_matched_idxes = [outs_track['track_query_matched_idxes']] #BxN + track_boxes = outs_track['track_bbox_results'] + + # cat sdc query/gt to the last + sdc_match_index = torch.zeros((1,), dtype=all_matched_idxes[0].dtype, device=all_matched_idxes[0].device) + sdc_match_index[0] = gt_fut_traj[0].shape[0] + all_matched_idxes = [torch.cat([all_matched_idxes[0], sdc_match_index], dim=0)] + gt_fut_traj[0] = torch.cat([gt_fut_traj[0], gt_sdc_fut_traj[0]], dim=0) + gt_fut_traj_mask[0] = torch.cat([gt_fut_traj_mask[0], gt_sdc_fut_traj_mask[0]], dim=0) + track_query = torch.cat([track_query, outs_track['sdc_embedding'][None, None, None, :]], dim=2) + sdc_track_boxes = outs_track['sdc_track_bbox_results'] + track_boxes[0][0].tensor = torch.cat([track_boxes[0][0].tensor, sdc_track_boxes[0][0].tensor], dim=0) + track_boxes[0][1] = torch.cat([track_boxes[0][1], sdc_track_boxes[0][1]], dim=0) + track_boxes[0][2] = torch.cat([track_boxes[0][2], sdc_track_boxes[0][2]], dim=0) + track_boxes[0][3] = torch.cat([track_boxes[0][3], sdc_track_boxes[0][3]], dim=0) + + memory, memory_mask, memory_pos, lane_query, _, lane_query_pos, hw_lvl = outs_seg['args_tuple'] + + outs_motion = self(bev_embed, track_query, lane_query, lane_query_pos, track_boxes) + loss_inputs = [gt_bboxes_3d, gt_fut_traj, gt_fut_traj_mask, outs_motion, all_matched_idxes, track_boxes] + losses = self.loss(*loss_inputs) + + def filter_vehicle_query(outs_motion, all_matched_idxes, gt_labels_3d, vehicle_id_list): + query_label = gt_labels_3d[0][-1][all_matched_idxes[0]] + # select vehicle query according to vehicle_id_list + vehicle_mask = torch.zeros_like(query_label) + for veh_id in vehicle_id_list: + vehicle_mask |= query_label == veh_id + outs_motion['traj_query'] = outs_motion['traj_query'][:, :, vehicle_mask>0] + outs_motion['track_query'] = outs_motion['track_query'][:, vehicle_mask>0] + outs_motion['track_query_pos'] = outs_motion['track_query_pos'][:, vehicle_mask>0] + all_matched_idxes[0] = all_matched_idxes[0][vehicle_mask>0] + return outs_motion, all_matched_idxes + + all_matched_idxes[0] = all_matched_idxes[0][:-1] + outs_motion['sdc_traj_query'] = outs_motion['traj_query'][:, :, -1] # [3, 1, 6, 256] [n_dec, b, n_mode, d] + outs_motion['sdc_track_query'] = outs_motion['track_query'][:, -1] # [1, 256] [b, d] + outs_motion['sdc_track_query_pos'] = outs_motion['track_query_pos'][:, -1] # [1, 256] [b, d] + outs_motion['traj_query'] = outs_motion['traj_query'][:, :, :-1] # [3, 1, 3, 6, 256] [n_dec, b, nq, n_mode, d] + outs_motion['track_query'] = outs_motion['track_query'][:, :-1] # [1, 3, 256] [b, nq, d] + outs_motion['track_query_pos'] = outs_motion['track_query_pos'][:, :-1] # [1, 3, 256] [b, nq, d] + + + outs_motion, all_matched_idxes = filter_vehicle_query(outs_motion, all_matched_idxes, gt_labels_3d, self.vehicle_id_list) + outs_motion['all_matched_idxes'] = all_matched_idxes + + ret_dict = dict(losses=losses, outs_motion=outs_motion, track_boxes=track_boxes) + return ret_dict + + def forward_test(self, bev_embed, outs_track={}, outs_seg={}): + """Test function""" + track_query = outs_track['track_query_embeddings'][None, None, ...] + track_boxes = outs_track['track_bbox_results'] + + track_query = torch.cat([track_query, outs_track['sdc_embedding'][None, None, None, :]], dim=2) + sdc_track_boxes = outs_track['sdc_track_bbox_results'] + + track_boxes[0][0].tensor = torch.cat([track_boxes[0][0].tensor, sdc_track_boxes[0][0].tensor], dim=0) + track_boxes[0][1] = torch.cat([track_boxes[0][1], sdc_track_boxes[0][1]], dim=0) + track_boxes[0][2] = torch.cat([track_boxes[0][2], sdc_track_boxes[0][2]], dim=0) + track_boxes[0][3] = torch.cat([track_boxes[0][3], sdc_track_boxes[0][3]], dim=0) + memory, memory_mask, memory_pos, lane_query, _, lane_query_pos, hw_lvl = outs_seg['args_tuple'] + outs_motion = self(bev_embed, track_query, lane_query, lane_query_pos, track_boxes) + traj_results = self.get_trajs(outs_motion, track_boxes) + bboxes, scores, labels, bbox_index, mask = track_boxes[0] + outs_motion['track_scores'] = scores[None, :] + labels[-1] = 0 + def filter_vehicle_query(outs_motion, labels, vehicle_id_list): + if len(labels) < 1: # No other obj query except sdc query. + return None + + # select vehicle query according to vehicle_id_list + vehicle_mask = torch.zeros_like(labels) + for veh_id in vehicle_id_list: + vehicle_mask |= labels == veh_id + outs_motion['traj_query'] = outs_motion['traj_query'][:, :, vehicle_mask>0] + outs_motion['track_query'] = outs_motion['track_query'][:, vehicle_mask>0] + outs_motion['track_query_pos'] = outs_motion['track_query_pos'][:, vehicle_mask>0] + outs_motion['track_scores'] = outs_motion['track_scores'][:, vehicle_mask>0] + return outs_motion + + outs_motion = filter_vehicle_query(outs_motion, labels, self.vehicle_id_list) + + # filter sdc query + outs_motion['sdc_traj_query'] = outs_motion['traj_query'][:, :, -1] + outs_motion['sdc_track_query'] = outs_motion['track_query'][:, -1] + outs_motion['sdc_track_query_pos'] = outs_motion['track_query_pos'][:, -1] + outs_motion['traj_query'] = outs_motion['traj_query'][:, :, :-1] + outs_motion['track_query'] = outs_motion['track_query'][:, :-1] + outs_motion['track_query_pos'] = outs_motion['track_query_pos'][:, :-1] + outs_motion['track_scores'] = outs_motion['track_scores'][:, :-1] + + return traj_results, outs_motion + + @auto_fp16(apply_to=('bev_embed', 'track_query', 'lane_query', 'lane_query_pos', 'lane_query_embed', 'prev_bev')) + def forward(self, + bev_embed, + track_query, + lane_query, + lane_query_pos, + track_bbox_results): + """ + Applies forward pass on the model for motion prediction using bird's eye view (BEV) embedding, track query, lane query, and track bounding box results. + + Args: + bev_embed (torch.Tensor): A tensor of shape (h*w, B, D) representing the bird's eye view embedding. + track_query (torch.Tensor): A tensor of shape (B, num_dec, A_track, D) representing the track query. + lane_query (torch.Tensor): A tensor of shape (N, M_thing, D) representing the lane query. + lane_query_pos (torch.Tensor): A tensor of shape (N, M_thing, D) representing the position of the lane query. + track_bbox_results (List[torch.Tensor]): A list of tensors containing the tracking bounding box results for each image in the batch. + + Returns: + dict: A dictionary containing the following keys and values: + - 'all_traj_scores': A tensor of shape (num_levels, B, A_track, num_points) with trajectory scores for each level. + - 'all_traj_preds': A tensor of shape (num_levels, B, A_track, num_points, num_future_steps, 2) with predicted trajectories for each level. + - 'valid_traj_masks': A tensor of shape (B, A_track) indicating the validity of trajectory masks. + - 'traj_query': A tensor containing intermediate states of the trajectory queries. + - 'track_query': A tensor containing the input track queries. + - 'track_query_pos': A tensor containing the positional embeddings of the track queries. + """ + + dtype = track_query.dtype + device = track_query.device + num_groups = self.kmeans_anchors.shape[0] + + # extract the last frame of the track query + track_query = track_query[:, -1] + + # encode the center point of the track query + reference_points_track = self._extract_tracking_centers( + track_bbox_results, self.pc_range) + track_query_pos = self.boxes_query_embedding_layer(pos2posemb2d(reference_points_track.to(device))) # B, A, D + + # construct the learnable query positional embedding + # split and stack according to groups + learnable_query_pos = self.learnable_motion_query_embedding.weight.to(dtype) # latent anchor (P*G, D) + learnable_query_pos = torch.stack(torch.split(learnable_query_pos, self.num_anchor, dim=0)) + + # construct the agent level/scene-level query positional embedding + # (num_groups, num_anchor, 12, 2) + # to incorporate the information of different groups and coordinates, and embed the headding and location information + agent_level_anchors = self.kmeans_anchors.to(dtype).to(device).view(num_groups, self.num_anchor, self.predict_steps, 2).detach() + scene_level_ego_anchors = anchor_coordinate_transform(agent_level_anchors, track_bbox_results, with_translation_transform=True) # B, A, G, P ,12 ,2 + scene_level_offset_anchors = anchor_coordinate_transform(agent_level_anchors, track_bbox_results, with_translation_transform=False) # B, A, G, P ,12 ,2 + + agent_level_norm = norm_points(agent_level_anchors, self.pc_range) + scene_level_ego_norm = norm_points(scene_level_ego_anchors, self.pc_range) + scene_level_offset_norm = norm_points(scene_level_offset_anchors, self.pc_range) + + # we only use the last point of the anchor + agent_level_embedding = self.agent_level_embedding_layer( + pos2posemb2d(agent_level_norm[..., -1, :])) # G, P, D + scene_level_ego_embedding = self.scene_level_ego_embedding_layer( + pos2posemb2d(scene_level_ego_norm[..., -1, :])) # B, A, G, P , D + scene_level_offset_embedding = self.scene_level_offset_embedding_layer( + pos2posemb2d(scene_level_offset_norm[..., -1, :])) # B, A, G, P , D + + batch_size, num_agents = scene_level_ego_embedding.shape[:2] + agent_level_embedding = agent_level_embedding[None,None, ...].expand(batch_size, num_agents, -1, -1, -1) + learnable_embed = learnable_query_pos[None, None, ...].expand(batch_size, num_agents, -1, -1, -1) + + + # save for latter, anchors + # B, A, G, P ,12 ,2 -> B, A, P ,12 ,2 + scene_level_offset_anchors = self.group_mode_query_pos(track_bbox_results, scene_level_offset_anchors) + + # select class embedding + # B, A, G, P , D-> B, A, P , D + agent_level_embedding = self.group_mode_query_pos( + track_bbox_results, agent_level_embedding) + scene_level_ego_embedding = self.group_mode_query_pos( + track_bbox_results, scene_level_ego_embedding) # B, A, G, P , D-> B, A, P , D + + # B, A, G, P , D -> B, A, P , D + scene_level_offset_embedding = self.group_mode_query_pos( + track_bbox_results, scene_level_offset_embedding) + learnable_embed = self.group_mode_query_pos( + track_bbox_results, learnable_embed) + + init_reference = scene_level_offset_anchors.detach() + + outputs_traj_scores = [] + outputs_trajs = [] + + inter_states, inter_references = self.motionformer( + track_query, # B, A_track, D + lane_query, # B, M, D + track_query_pos=track_query_pos, + lane_query_pos=lane_query_pos, + track_bbox_results=track_bbox_results, + bev_embed=bev_embed, + reference_trajs=init_reference, + traj_reg_branches=self.traj_reg_branches, + traj_cls_branches=self.traj_cls_branches, + # anchor embeddings + agent_level_embedding=agent_level_embedding, + scene_level_ego_embedding=scene_level_ego_embedding, + scene_level_offset_embedding=scene_level_offset_embedding, + learnable_embed=learnable_embed, + # anchor positional embeddings layers + agent_level_embedding_layer=self.agent_level_embedding_layer, + scene_level_ego_embedding_layer=self.scene_level_ego_embedding_layer, + scene_level_offset_embedding_layer=self.scene_level_offset_embedding_layer, + spatial_shapes=torch.tensor( + [[self.bev_h, self.bev_w]], device=device), + level_start_index=torch.tensor([0], device=device)) + + for lvl in range(inter_states.shape[0]): + outputs_class = self.traj_cls_branches[lvl](inter_states[lvl]) + tmp = self.traj_reg_branches[lvl](inter_states[lvl]) + tmp = self.unflatten_traj(tmp) + + # we use cumsum trick here to get the trajectory + tmp[..., :2] = torch.cumsum(tmp[..., :2], dim=3) + + outputs_class = self.log_softmax(outputs_class.squeeze(3)) + outputs_traj_scores.append(outputs_class) + + for bs in range(tmp.shape[0]): + tmp[bs] = bivariate_gaussian_activation(tmp[bs]) + outputs_trajs.append(tmp) + outputs_traj_scores = torch.stack(outputs_traj_scores) + outputs_trajs = torch.stack(outputs_trajs) + + B, A_track, D = track_query.shape + valid_traj_masks = track_query.new_ones((B, A_track)) > 0 + outs = { + 'all_traj_scores': outputs_traj_scores, + 'all_traj_preds': outputs_trajs, + 'valid_traj_masks': valid_traj_masks, + 'traj_query': inter_states, + 'track_query': track_query, + 'track_query_pos': track_query_pos, + } + + return outs + + def group_mode_query_pos(self, bbox_results, mode_query_pos): + """ + Group mode query positions based on the input bounding box results. + + Args: + bbox_results (List[Tuple[torch.Tensor]]): A list of tuples containing the bounding box results for each image in the batch. + mode_query_pos (torch.Tensor): A tensor of shape (B, A, G, P, D) representing the mode query positions. + + Returns: + torch.Tensor: A tensor of shape (B, A, P, D) representing the classified mode query positions. + """ + batch_size = len(bbox_results) + agent_num = mode_query_pos.shape[1] + batched_mode_query_pos = [] + self.cls2group = self.cls2group.to(mode_query_pos.device) + # TODO: vectorize this + # group the embeddings based on the class + for i in range(batch_size): + bboxes, scores, labels, bbox_index, mask = bbox_results[i] + label = labels.to(mode_query_pos.device) + grouped_label = self.cls2group[label] + grouped_mode_query_pos = [] + for j in range(agent_num): + grouped_mode_query_pos.append( + mode_query_pos[i, j, grouped_label[j]]) + batched_mode_query_pos.append(torch.stack(grouped_mode_query_pos)) + return torch.stack(batched_mode_query_pos) + + @force_fp32(apply_to=('preds_dicts_motion')) + def loss(self, + gt_bboxes_3d, + gt_fut_traj, + gt_fut_traj_mask, + preds_dicts_motion, + all_matched_idxes, + track_bbox_results): + """ + Computes the loss function for the given ground truth and prediction dictionaries. + + Args: + gt_bboxes_3d (List[torch.Tensor]): A list of tensors representing ground truth 3D bounding boxes for each image. + gt_fut_traj (torch.Tensor): A tensor representing the ground truth future trajectories. + gt_fut_traj_mask (torch.Tensor): A tensor representing the ground truth future trajectory masks. + preds_dicts_motion (Dict[str, torch.Tensor]): A dictionary containing motion-related prediction tensors. + all_matched_idxes (List[torch.Tensor]): A list of tensors containing the matched ground truth indices for each image in the batch. + track_bbox_results (List[Tuple[torch.Tensor]]): A list of tuples containing the tracking bounding box results for each image in the batch. + + Returns: + dict[str, torch.Tensor]: A dictionary of loss components. + """ + + # motion related predictions + all_traj_scores = preds_dicts_motion['all_traj_scores'] + all_traj_preds = preds_dicts_motion['all_traj_preds'] + + num_dec_layers = len(all_traj_scores) + + all_gt_fut_traj = [gt_fut_traj for _ in range(num_dec_layers)] + all_gt_fut_traj_mask = [ + gt_fut_traj_mask for _ in range(num_dec_layers)] + + losses_traj = [] + gt_fut_traj_all, gt_fut_traj_mask_all = self.compute_matched_gt_traj( + all_gt_fut_traj[0], all_gt_fut_traj_mask[0], all_matched_idxes, track_bbox_results, gt_bboxes_3d) + for i in range(num_dec_layers): + loss_traj, l_class, l_reg, l_mindae, l_minfde, l_mr = self.compute_loss_traj(all_traj_scores[i], all_traj_preds[i], + gt_fut_traj_all, gt_fut_traj_mask_all, all_matched_idxes) + losses_traj.append( + (loss_traj, l_class, l_reg, l_mindae, l_minfde, l_mr)) + + loss_dict = dict() + loss_dict['loss_traj'] = losses_traj[-1][0] + loss_dict['l_class'] = losses_traj[-1][1] + loss_dict['l_reg'] = losses_traj[-1][2] + loss_dict['min_ade'] = losses_traj[-1][3] + loss_dict['min_fde'] = losses_traj[-1][4] + loss_dict['mr'] = losses_traj[-1][5] + + # loss from other decoder layers + num_dec_layer = 0 + for loss_traj_i in losses_traj[:-1]: + loss_dict[f'd{num_dec_layer}.loss_traj'] = loss_traj_i[0] + loss_dict[f'd{num_dec_layer}.l_class'] = loss_traj_i[1] + loss_dict[f'd{num_dec_layer}.l_reg'] = loss_traj_i[2] + loss_dict[f'd{num_dec_layer}.min_ade'] = loss_traj_i[3] + loss_dict[f'd{num_dec_layer}.min_fde'] = loss_traj_i[4] + loss_dict[f'd{num_dec_layer}.mr'] = loss_traj_i[5] + num_dec_layer += 1 + + return loss_dict + + def compute_matched_gt_traj(self, + gt_fut_traj, + gt_fut_traj_mask, + all_matched_idxes, + track_bbox_results, + gt_bboxes_3d): + """ + Computes the matched ground truth trajectories for a batch of images based on matched indexes. + + Args: + gt_fut_traj (torch.Tensor): Ground truth future trajectories of shape (num_imgs, num_objects, num_future_steps, 2). + gt_fut_traj_mask (torch.Tensor): Ground truth future trajectory masks of shape (num_imgs, num_objects, num_future_steps, 2). + all_matched_idxes (List[torch.Tensor]): A list of tensors containing the matched indexes for each image in the batch. + track_bbox_results (List[torch.Tensor]): A list of tensors containing the tracking bounding box results for each image in the batch. + gt_bboxes_3d (List[torch.Tensor]): A list of tensors containing the ground truth 3D bounding boxes for each image in the batch. + + Returns: + torch.Tensor: A concatenated tensor of the matched ground truth future trajectories. + torch.Tensor: A concatenated tensor of the matched ground truth future trajectory masks. + """ + num_imgs = len(all_matched_idxes) + gt_fut_traj_all = [] + gt_fut_traj_mask_all = [] + for i in range(num_imgs): + matched_gt_idx = all_matched_idxes[i] + valid_traj_masks = matched_gt_idx >= 0 + matched_gt_fut_traj = gt_fut_traj[i][matched_gt_idx][valid_traj_masks] + matched_gt_fut_traj_mask = gt_fut_traj_mask[i][matched_gt_idx][valid_traj_masks] + if self.use_nonlinear_optimizer: + # TODO: sdc query is not supported non-linear optimizer + bboxes = track_bbox_results[i][0].to(matched_gt_idx.device).tensor[valid_traj_masks] + matched_gt_bboxes_3d = gt_bboxes_3d[i][-1].to(matched_gt_idx.device).tensor[matched_gt_idx[:-1] + ][valid_traj_masks[:-1]] + sdc_gt_fut_traj = matched_gt_fut_traj[-1:] + sdc_gt_fut_traj_mask = matched_gt_fut_traj_mask[-1:] + matched_gt_fut_traj = matched_gt_fut_traj[:-1] + matched_gt_fut_traj_mask = matched_gt_fut_traj_mask[:-1] + bboxes = bboxes[:-1] + matched_gt_fut_traj, matched_gt_fut_traj_mask = nonlinear_smoother( + matched_gt_bboxes_3d, matched_gt_fut_traj, matched_gt_fut_traj_mask, bboxes) + matched_gt_fut_traj = torch.cat( + [matched_gt_fut_traj, sdc_gt_fut_traj], dim=0) + matched_gt_fut_traj_mask = torch.cat( + [matched_gt_fut_traj_mask, sdc_gt_fut_traj_mask], dim=0) + matched_gt_fut_traj_mask = torch.all( + matched_gt_fut_traj_mask > 0, dim=-1) + gt_fut_traj_all.append(matched_gt_fut_traj) + gt_fut_traj_mask_all.append(matched_gt_fut_traj_mask) + gt_fut_traj_all = torch.cat(gt_fut_traj_all, dim=0) + gt_fut_traj_mask_all = torch.cat(gt_fut_traj_mask_all, dim=0) + return gt_fut_traj_all, gt_fut_traj_mask_all + + def compute_loss_traj(self, + traj_scores, + traj_preds, + gt_fut_traj_all, + gt_fut_traj_mask_all, + all_matched_idxes): + """ + Computes the trajectory loss given the predicted trajectories, ground truth trajectories, and other relevant information. + + Args: + traj_scores (torch.Tensor): A tensor representing the trajectory scores. + traj_preds (torch.Tensor): A tensor representing the predicted trajectories. + gt_fut_traj_all (torch.Tensor): A tensor representing the ground truth future trajectories. + gt_fut_traj_mask_all (torch.Tensor): A tensor representing the ground truth future trajectory masks. + all_matched_idxes (List[torch.Tensor]): A list of tensors containing the matched ground truth indices for each image in the batch. + + Returns: + tuple: A tuple containing the total trajectory loss, classification loss, regression loss, minimum average displacement error, minimum final displacement error, and miss rate. + """ + num_imgs = traj_scores.size(0) + traj_prob_all = [] + traj_preds_all = [] + for i in range(num_imgs): + matched_gt_idx = all_matched_idxes[i] + valid_traj_masks = matched_gt_idx >= 0 + # select valid and matched + batch_traj_prob = traj_scores[i, valid_traj_masks, :] + # (n_objs, n_modes, step, 5) + batch_traj_preds = traj_preds[i, valid_traj_masks, ...] + traj_prob_all.append(batch_traj_prob) + traj_preds_all.append(batch_traj_preds) + traj_prob_all = torch.cat(traj_prob_all, dim=0) + traj_preds_all = torch.cat(traj_preds_all, dim=0) + traj_loss, l_class, l_reg, l_minade, l_minfde, l_mr = self.loss_traj( + traj_prob_all, traj_preds_all, gt_fut_traj_all, gt_fut_traj_mask_all) + return traj_loss, l_class, l_reg, l_minade, l_minfde, l_mr + + @force_fp32(apply_to=('preds_dicts')) + def get_trajs(self, preds_dicts, bbox_results): + """ + Generates trajectories from the prediction results, bounding box results. + + Args: + preds_dicts (tuple[list[dict]]): A tuple containing lists of dictionaries with prediction results. + bbox_results (List[Tuple[torch.Tensor]]): A list of tuples containing the bounding box results for each image in the batch. + + Returns: + List[dict]: A list of dictionaries containing decoded bounding boxes, scores, and labels after non-maximum suppression. + """ + num_samples = len(bbox_results) + num_layers = preds_dicts['all_traj_preds'].shape[0] + ret_list = [] + for i in range(num_samples): + preds = dict() + for j in range(num_layers): + subfix = '_' + str(j) if j < (num_layers - 1) else '' + traj = preds_dicts['all_traj_preds'][j, i] + traj_scores = preds_dicts['all_traj_scores'][j, i] + + traj_scores, traj = traj_scores.cpu(), traj.cpu() + preds['traj' + subfix] = traj + preds['traj_scores' + subfix] = traj_scores + ret_list.append(preds) + return ret_list diff --git a/mmcv/models/dense_heads/motion_head_plugin/__init__.py b/mmcv/models/dense_heads/motion_head_plugin/__init__.py new file mode 100644 index 0000000..79b6df5 --- /dev/null +++ b/mmcv/models/dense_heads/motion_head_plugin/__init__.py @@ -0,0 +1,4 @@ +from .motion_optimization import MotionNonlinearSmoother +from .modules import MotionTransformerDecoder +from .motion_deformable_attn import MotionTransformerAttentionLayer, MotionDeformableAttention +from .motion_utils import * \ No newline at end of file diff --git a/mmcv/models/dense_heads/motion_head_plugin/base_motion_head.py b/mmcv/models/dense_heads/motion_head_plugin/base_motion_head.py new file mode 100644 index 0000000..d71c0ee --- /dev/null +++ b/mmcv/models/dense_heads/motion_head_plugin/base_motion_head.py @@ -0,0 +1,140 @@ +#---------------------------------------------------------------------------------# +# UniAD: Planning-oriented Autonomous Driving (https://arxiv.org/abs/2212.10156) # +# Source code: https://github.com/OpenDriveLab/UniAD # +# Copyright (c) OpenDriveLab. All rights reserved. # +#---------------------------------------------------------------------------------# + +import torch +import copy +import pickle +import torch.nn as nn +from mmcv.models import build_loss +from mmcv.models.bricks.transformer import build_transformer_layer_sequence + +class BaseMotionHead(nn.Module): + def __init__(self, *args, **kwargs): + super(BaseMotionHead, self).__init__() + pass + + def _build_loss(self, loss_traj): + """ + Build the loss function for the motion prediction task. + + Args: + loss_traj (dict): A dictionary containing the parameters for the loss function. + + Returns: + None + """ + self.loss_traj = build_loss(loss_traj) + self.unflatten_traj = nn.Unflatten(3, (self.predict_steps, 5)) + self.log_softmax = nn.LogSoftmax(dim=2) + + def _load_anchors(self, anchor_info_path): + """ + Load the anchor information from a file. + + Args: + anchor_info_path (str): The path to the file containing the anchor information. + + Returns: + None + """ + anchor_infos = pickle.load(open(anchor_info_path, 'rb')) + self.kmeans_anchors = torch.stack( + [torch.from_numpy(a) for a in anchor_infos["anchors_all"]]) # Nc, Pc, steps, 2 + + def _build_layers(self, transformerlayers, det_layer_num): + """ + Build the layers of the motion prediction module. + + Args: + transformerlayers (dict): A dictionary containing the parameters for the transformer layers. + det_layer_num (int): The number of detection layers. + + Returns: + None + """ + self.learnable_motion_query_embedding = nn.Embedding( + self.num_anchor * self.num_anchor_group, self.embed_dims) + self.motionformer = build_transformer_layer_sequence( + transformerlayers) + self.layer_track_query_fuser = nn.Sequential( + nn.Linear(self.embed_dims * det_layer_num, self.embed_dims), + nn.LayerNorm(self.embed_dims), + nn.ReLU(inplace=True) + ) + + self.agent_level_embedding_layer = nn.Sequential( + nn.Linear(self.embed_dims, self.embed_dims*2), + nn.ReLU(), + nn.Linear(self.embed_dims*2, self.embed_dims), + ) + self.scene_level_ego_embedding_layer = nn.Sequential( + nn.Linear(self.embed_dims, self.embed_dims*2), + nn.ReLU(), + nn.Linear(self.embed_dims*2, self.embed_dims), + ) + self.scene_level_offset_embedding_layer = nn.Sequential( + nn.Linear(self.embed_dims, self.embed_dims*2), + nn.ReLU(), + nn.Linear(self.embed_dims*2, self.embed_dims), + ) + self.boxes_query_embedding_layer = nn.Sequential( + nn.Linear(self.embed_dims, self.embed_dims*2), + nn.ReLU(), + nn.Linear(self.embed_dims*2, self.embed_dims), + ) + + def _init_layers(self): + """Initialize classification branch and regression branch of head.""" + traj_cls_branch = [] + traj_cls_branch.append(nn.Linear(self.embed_dims, self.embed_dims)) + traj_cls_branch.append(nn.LayerNorm(self.embed_dims)) + traj_cls_branch.append(nn.ReLU(inplace=True)) + for _ in range(self.num_reg_fcs-1): + traj_cls_branch.append(nn.Linear(self.embed_dims, self.embed_dims)) + traj_cls_branch.append(nn.LayerNorm(self.embed_dims)) + traj_cls_branch.append(nn.ReLU(inplace=True)) + traj_cls_branch.append(nn.Linear(self.embed_dims, 1)) + traj_cls_branch = nn.Sequential(*traj_cls_branch) + + traj_reg_branch = [] + traj_reg_branch.append(nn.Linear(self.embed_dims, self.embed_dims)) + traj_reg_branch.append(nn.ReLU()) + for _ in range(self.num_reg_fcs-1): + traj_reg_branch.append(nn.Linear(self.embed_dims, self.embed_dims)) + traj_reg_branch.append(nn.ReLU()) + traj_reg_branch.append(nn.Linear(self.embed_dims, self.predict_steps * 5)) + traj_reg_branch = nn.Sequential(*traj_reg_branch) + + def _get_clones(module, N): + return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) + + num_pred = self.motionformer.num_layers + self.traj_cls_branches = _get_clones(traj_cls_branch, num_pred) + self.traj_reg_branches = _get_clones(traj_reg_branch, num_pred) + + def _extract_tracking_centers(self, bbox_results, bev_range): + """ + extract the bboxes centers and normized according to the bev range + + Args: + bbox_results (List[Tuple[torch.Tensor]]): A list of tuples containing the bounding box results for each image in the batch. + bev_range (List[float]): A list of float values representing the bird's eye view range. + + Returns: + torch.Tensor: A tensor representing normized centers of the detection bounding boxes. + """ + batch_size = len(bbox_results) + det_bbox_posembed = [] + for i in range(batch_size): + bboxes, scores, labels, bbox_index, mask = bbox_results[i] + xy = bboxes.gravity_center[:, :2] + x_norm = (xy[:, 0] - bev_range[0]) / \ + (bev_range[3] - bev_range[0]) + y_norm = (xy[:, 1] - bev_range[1]) / \ + (bev_range[4] - bev_range[1]) + det_bbox_posembed.append( + torch.cat([x_norm[:, None], y_norm[:, None]], dim=-1)) + return torch.stack(det_bbox_posembed) \ No newline at end of file diff --git a/mmcv/models/dense_heads/motion_head_plugin/modules.py b/mmcv/models/dense_heads/motion_head_plugin/modules.py new file mode 100644 index 0000000..372528c --- /dev/null +++ b/mmcv/models/dense_heads/motion_head_plugin/modules.py @@ -0,0 +1,280 @@ +#---------------------------------------------------------------------------------# +# UniAD: Planning-oriented Autonomous Driving (https://arxiv.org/abs/2212.10156) # +# Source code: https://github.com/OpenDriveLab/UniAD # +# Copyright (c) OpenDriveLab. All rights reserved. # +#---------------------------------------------------------------------------------# + +import torch +import torch.nn as nn +from mmcv.models.bricks.registry import TRANSFORMER_LAYER_SEQUENCE +from mmcv.models.bricks.transformer import build_transformer_layer +from mmcv.models.backbones.base_module import BaseModule +from mmcv.models.utils.functional import ( + norm_points, + pos2posemb2d, + trajectory_coordinate_transform +) + + +@TRANSFORMER_LAYER_SEQUENCE.register_module() +class MotionTransformerDecoder(BaseModule): + """Implements the decoder in DETR3D transformer. + Args: + return_intermediate (bool): Whether to return intermediate outputs. + coder_norm_cfg (dict): Config of last normalization layer. Default: + `LN`. + """ + + def __init__(self, pc_range=None, embed_dims=256, transformerlayers=None, num_layers=3, **kwargs): + super(MotionTransformerDecoder, self).__init__() + self.pc_range = pc_range + self.embed_dims = embed_dims + self.num_layers = num_layers + self.intention_interaction_layers = IntentionInteraction() + self.track_agent_interaction_layers = nn.ModuleList( + [TrackAgentInteraction() for i in range(self.num_layers)]) + self.map_interaction_layers = nn.ModuleList( + [MapInteraction() for i in range(self.num_layers)]) + self.bev_interaction_layers = nn.ModuleList( + [build_transformer_layer(transformerlayers) for i in range(self.num_layers)]) + + self.static_dynamic_fuser = nn.Sequential( + nn.Linear(self.embed_dims*2, self.embed_dims*2), + nn.ReLU(), + nn.Linear(self.embed_dims*2, self.embed_dims), + ) + self.dynamic_embed_fuser = nn.Sequential( + nn.Linear(self.embed_dims*3, self.embed_dims*2), + nn.ReLU(), + nn.Linear(self.embed_dims*2, self.embed_dims), + ) + self.in_query_fuser = nn.Sequential( + nn.Linear(self.embed_dims*2, self.embed_dims*2), + nn.ReLU(), + nn.Linear(self.embed_dims*2, self.embed_dims), + ) + self.out_query_fuser = nn.Sequential( + nn.Linear(self.embed_dims*4, self.embed_dims*2), + nn.ReLU(), + nn.Linear(self.embed_dims*2, self.embed_dims), + ) + + def forward(self, + track_query, + lane_query, + track_query_pos=None, + lane_query_pos=None, + track_bbox_results=None, + bev_embed=None, + reference_trajs=None, + traj_reg_branches=None, + agent_level_embedding=None, + scene_level_ego_embedding=None, + scene_level_offset_embedding=None, + learnable_embed=None, + agent_level_embedding_layer=None, + scene_level_ego_embedding_layer=None, + scene_level_offset_embedding_layer=None, + **kwargs): + """Forward function for `MotionTransformerDecoder`. + Args: + agent_query (B, A, D) + map_query (B, M, D) + map_query_pos (B, G, D) + static_intention_embed (B, A, P, D) + offset_query_embed (B, A, P, D) + global_intention_embed (B, A, P, D) + learnable_intention_embed (B, A, P, D) + det_query_pos (B, A, D) + Returns: + None + """ + intermediate = [] + intermediate_reference_trajs = [] + + B, _, P, D = agent_level_embedding.shape + track_query_bc = track_query.unsqueeze(2).expand(-1, -1, P, -1) # (B, A, P, D) + track_query_pos_bc = track_query_pos.unsqueeze(2).expand(-1, -1, P, -1) # (B, A, P, D) + + # static intention embedding, which is imutable throughout all layers + agent_level_embedding = self.intention_interaction_layers(agent_level_embedding) + static_intention_embed = agent_level_embedding + scene_level_offset_embedding + learnable_embed + reference_trajs_input = reference_trajs.unsqueeze(4).detach() + + query_embed = torch.zeros_like(static_intention_embed) + for lid in range(self.num_layers): + # fuse static and dynamic intention embedding + # the dynamic intention embedding is the output of the previous layer, which is initialized with anchor embedding + dynamic_query_embed = self.dynamic_embed_fuser(torch.cat( + [agent_level_embedding, scene_level_offset_embedding, scene_level_ego_embedding], dim=-1)) + + # fuse static and dynamic intention embedding + query_embed_intention = self.static_dynamic_fuser(torch.cat( + [static_intention_embed, dynamic_query_embed], dim=-1)) # (B, A, P, D) + + # fuse intention embedding with query embedding + query_embed = self.in_query_fuser(torch.cat([query_embed, query_embed_intention], dim=-1)) + + # interaction between agents + track_query_embed = self.track_agent_interaction_layers[lid]( + query_embed, track_query, query_pos=track_query_pos_bc, key_pos=track_query_pos) + + # interaction between agents and map + map_query_embed = self.map_interaction_layers[lid]( + query_embed, lane_query, query_pos=track_query_pos_bc, key_pos=lane_query_pos) + + # interaction between agents and bev, ie. interaction between agents and goals + # implemented with deformable transformer + bev_query_embed = self.bev_interaction_layers[lid]( + query_embed, + value=bev_embed, + query_pos=track_query_pos_bc, + bbox_results=track_bbox_results, + reference_trajs=reference_trajs_input, + **kwargs) + + # fusing the embeddings from different interaction layers + query_embed = [track_query_embed, map_query_embed, bev_query_embed, track_query_bc+track_query_pos_bc] + query_embed = torch.cat(query_embed, dim=-1) + query_embed = self.out_query_fuser(query_embed) + + if traj_reg_branches is not None: + # update reference trajectory + tmp = traj_reg_branches[lid](query_embed) + bs, n_agent, n_modes, n_steps, _ = reference_trajs.shape + tmp = tmp.view(bs, n_agent, n_modes, n_steps, -1) + + # we predict speed of trajectory and use cumsum trick to get the trajectory + tmp[..., :2] = torch.cumsum(tmp[..., :2], dim=3) + new_reference_trajs = torch.zeros_like(reference_trajs) + new_reference_trajs = tmp[..., :2] + reference_trajs = new_reference_trajs.detach() + reference_trajs_input = reference_trajs.unsqueeze(4) # BS NUM_AGENT NUM_MODE 12 NUM_LEVEL 2 + + # update embedding, which is used in the next layer + # only update the embedding of the last step, i.e. the goal + ep_offset_embed = reference_trajs.detach() + ep_ego_embed = trajectory_coordinate_transform(reference_trajs.unsqueeze( + 2), track_bbox_results, with_translation_transform=True, with_rotation_transform=False).squeeze(2).detach() + ep_agent_embed = trajectory_coordinate_transform(reference_trajs.unsqueeze( + 2), track_bbox_results, with_translation_transform=False, with_rotation_transform=True).squeeze(2).detach() + + agent_level_embedding = agent_level_embedding_layer(pos2posemb2d( + norm_points(ep_agent_embed[..., -1, :], self.pc_range))) + scene_level_ego_embedding = scene_level_ego_embedding_layer(pos2posemb2d( + norm_points(ep_ego_embed[..., -1, :], self.pc_range))) + scene_level_offset_embedding = scene_level_offset_embedding_layer(pos2posemb2d( + norm_points(ep_offset_embed[..., -1, :], self.pc_range))) + + intermediate.append(query_embed) + intermediate_reference_trajs.append(reference_trajs) + + return torch.stack(intermediate), torch.stack(intermediate_reference_trajs) + + +class TrackAgentInteraction(BaseModule): + """ + Modeling the interaction between the agents + """ + def __init__(self, + embed_dims=256, + num_heads=8, + dropout=0.1, + batch_first=True, + norm_cfg=None, + init_cfg=None): + super().__init__(init_cfg) + + self.batch_first = batch_first + self.interaction_transformer = nn.TransformerDecoderLayer(d_model=embed_dims, + nhead=num_heads, + dropout=dropout, + dim_feedforward=embed_dims*2, + batch_first=batch_first) + + def forward(self, query, key, query_pos=None, key_pos=None): + ''' + query: context query (B, A, P, D) + query_pos: mode pos embedding (B, A, P, D) + key: (B, A, D) + key_pos: (B, A, D) + ''' + B, A, P, D = query.shape + if query_pos is not None: + query = query + query_pos + if key_pos is not None: + key = key + key_pos + mem = key.expand(B*A, -1, -1) + # N, A, P, D -> N*A, P, D + query = torch.flatten(query, start_dim=0, end_dim=1) + query = self.interaction_transformer(query, mem) + query = query.view(B, A, P, D) + return query + + +class MapInteraction(BaseModule): + """ + Modeling the interaction between the agent and the map + """ + def __init__(self, + embed_dims=256, + num_heads=8, + dropout=0.1, + batch_first=True, + norm_cfg=None, + init_cfg=None): + super().__init__(init_cfg) + + self.batch_first = batch_first + self.interaction_transformer = nn.TransformerDecoderLayer(d_model=embed_dims, + nhead=num_heads, + dropout=dropout, + dim_feedforward=embed_dims*2, + batch_first=batch_first) + + def forward(self, query, key, query_pos=None, key_pos=None): + ''' + x: context query (B, A, P, D) + query_pos: mode pos embedding (B, A, P, D) + ''' + B, A, P, D = query.shape + if query_pos is not None: + query = query + query_pos + if key_pos is not None: + key = key + key_pos + + # N, A, P, D -> N*A, P, D + query = torch.flatten(query, start_dim=0, end_dim=1) + mem = key.expand(B*A, -1, -1) + query = self.interaction_transformer(query, mem) + query = query.view(B, A, P, D) + return query + + +class IntentionInteraction(BaseModule): + """ + Modeling the interaction between anchors + """ + def __init__(self, + embed_dims=256, + num_heads=8, + dropout=0.1, + batch_first=True, + norm_cfg=None, + init_cfg=None): + super().__init__(init_cfg) + + self.batch_first = batch_first + self.interaction_transformer = nn.TransformerEncoderLayer(d_model=embed_dims, + nhead=num_heads, + dropout=dropout, + dim_feedforward=embed_dims*2, + batch_first=batch_first) + + def forward(self, query): + B, A, P, D = query.shape + # B, A, P, D -> B*A,P, D + rebatch_x = torch.flatten(query, start_dim=0, end_dim=1) + rebatch_x = self.interaction_transformer(rebatch_x) + out = rebatch_x.view(B, A, P, D) + return out diff --git a/mmcv/models/dense_heads/motion_head_plugin/motion_deformable_attn.py b/mmcv/models/dense_heads/motion_head_plugin/motion_deformable_attn.py new file mode 100644 index 0000000..c91a7c1 --- /dev/null +++ b/mmcv/models/dense_heads/motion_head_plugin/motion_deformable_attn.py @@ -0,0 +1,632 @@ +#---------------------------------------------------------------------------------# +# UniAD: Planning-oriented Autonomous Driving (https://arxiv.org/abs/2212.10156) # +# Source code: https://github.com/OpenDriveLab/UniAD # +# Copyright (c) OpenDriveLab. All rights reserved. # +#---------------------------------------------------------------------------------# + +import copy +import warnings +import torch +import math +import torch.nn as nn + +from einops import rearrange, repeat +from mmcv.ops.multi_scale_deform_attn import multi_scale_deformable_attn_pytorch +from mmcv.models.utils import xavier_init, constant_init +from mmcv.models.bricks.registry import ATTENTION, TRANSFORMER_LAYER +from mmcv.models.bricks.transformer import build_attention, build_feedforward_network, build_norm_layer +from mmcv.models.bricks.drop import build_dropout +from mmcv.models.backbones.base_module import BaseModule, ModuleList, Sequential +from mmcv.utils import ConfigDict, deprecated_api_warning +from mmcv.models.modules.multi_scale_deformable_attn_function import MultiScaleDeformableAttnFunction_fp32 + + +@TRANSFORMER_LAYER.register_module() +class MotionTransformerAttentionLayer(BaseModule): + """Base `TransformerLayer` for vision transformer. + It can be built from `mmcv.ConfigDict` and support more flexible + customization, for example, using any number of `FFN or LN ` and + use different kinds of `attention` by specifying a list of `ConfigDict` + named `attn_cfgs`. It is worth mentioning that it supports `prenorm` + when you specifying `norm` as the first element of `operation_order`. + More details about the `prenorm`: `On Layer Normalization in the + Transformer Architecture `_ . + Args: + attn_cfgs (list[`mmcv.ConfigDict`] | obj:`mmcv.ConfigDict` | None )): + Configs for `self_attention` or `cross_attention` modules, + The order of the configs in the list should be consistent with + corresponding attentions in operation_order. + If it is a dict, all of the attention modules in operation_order + will be built with this config. Default: None. + ffn_cfgs (list[`mmcv.ConfigDict`] | obj:`mmcv.ConfigDict` | None )): + Configs for FFN, The order of the configs in the list should be + consistent with corresponding ffn in operation_order. + If it is a dict, all of the attention modules in operation_order + will be built with this config. + operation_order (tuple[str]): The execution order of operation + in transformer. Such as ('self_attn', 'norm', 'ffn', 'norm'). + Support `prenorm` when you specifying first element as `norm`. + Default:None. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN'). + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + batch_first (bool): Key, Query and Value are shape + of (batch, n, embed_dim) + or (n, batch, embed_dim). Default to False. + """ + + def __init__(self, + attn_cfgs=None, + ffn_cfgs=dict( + type='FFN', + embed_dims=256, + feedforward_channels=1024, + num_fcs=2, + ffn_drop=0., + act_cfg=dict(type='ReLU', inplace=True), + ), + operation_order=None, + norm_cfg=dict(type='LN'), + init_cfg=None, + batch_first=False, + **kwargs): + + deprecated_args = dict( + feedforward_channels='feedforward_channels', + ffn_dropout='ffn_drop', + ffn_num_fcs='num_fcs') + for ori_name, new_name in deprecated_args.items(): + if ori_name in kwargs: + warnings.warn( + f'The arguments `{ori_name}` in BaseTransformerLayer ' + f'has been deprecated, now you should set `{new_name}` ' + f'and other FFN related arguments ' + f'to a dict named `ffn_cfgs`. ', DeprecationWarning) + ffn_cfgs[new_name] = kwargs[ori_name] + + super().__init__(init_cfg) + + self.batch_first = batch_first + + num_attn = operation_order.count('self_attn') + operation_order.count( + 'cross_attn') + if isinstance(attn_cfgs, dict): + attn_cfgs = [copy.deepcopy(attn_cfgs) for _ in range(num_attn)] + else: + assert num_attn == len(attn_cfgs), f'The length ' \ + f'of attn_cfg {num_attn} is ' \ + f'not consistent with the number of attention' \ + f'in operation_order {operation_order}.' + + self.num_attn = num_attn + self.operation_order = operation_order + self.norm_cfg = norm_cfg + self.pre_norm = operation_order[0] == 'norm' + self.attentions = ModuleList() + + index = 0 + for operation_name in operation_order: + if operation_name in ['self_attn', 'cross_attn']: + if 'batch_first' in attn_cfgs[index]: + assert self.batch_first == attn_cfgs[index]['batch_first'] + else: + attn_cfgs[index]['batch_first'] = self.batch_first + attention = build_attention(attn_cfgs[index]) + # Some custom attentions used as `self_attn` + # or `cross_attn` can have different behavior. + attention.operation_name = operation_name + self.attentions.append(attention) + index += 1 + + self.embed_dims = self.attentions[0].embed_dims + + self.ffns = ModuleList() + num_ffns = operation_order.count('ffn') + if isinstance(ffn_cfgs, dict): + ffn_cfgs = ConfigDict(ffn_cfgs) + if isinstance(ffn_cfgs, dict): + ffn_cfgs = [copy.deepcopy(ffn_cfgs) for _ in range(num_ffns)] + assert len(ffn_cfgs) == num_ffns + for ffn_index in range(num_ffns): + if 'embed_dims' not in ffn_cfgs[ffn_index]: + ffn_cfgs[ffn_index]['embed_dims'] = self.embed_dims + else: + assert ffn_cfgs[ffn_index]['embed_dims'] == self.embed_dims + self.ffns.append( + build_feedforward_network(ffn_cfgs[ffn_index], + dict(type='FFN'))) + + self.norms = ModuleList() + num_norms = operation_order.count('norm') + for _ in range(num_norms): + self.norms.append(build_norm_layer(norm_cfg, self.embed_dims)[1]) + + def forward(self, + query, + key=None, + value=None, + query_pos=None, + key_pos=None, + attn_masks=None, + query_key_padding_mask=None, + key_padding_mask=None, + **kwargs): + """Forward function for `TransformerDecoderLayer`. + **kwargs contains some specific arguments of attentions. + Args: + query (Tensor): The input query with shape + [num_queries, bs, embed_dims] if + self.batch_first is False, else + [bs, num_queries embed_dims]. + key (Tensor): The key tensor with shape [num_keys, bs, + embed_dims] if self.batch_first is False, else + [bs, num_keys, embed_dims] . + value (Tensor): The value tensor with same shape as `key`. + query_pos (Tensor): The positional encoding for `query`. + Default: None. + key_pos (Tensor): The positional encoding for `key`. + Default: None. + attn_masks (List[Tensor] | None): 2D Tensor used in + calculation of corresponding attention. The length of + it should equal to the number of `attention` in + `operation_order`. Default: None. + query_key_padding_mask (Tensor): ByteTensor for `query`, with + shape [bs, num_queries]. Only used in `self_attn` layer. + Defaults to None. + key_padding_mask (Tensor): ByteTensor for `query`, with + shape [bs, num_keys]. Default: None. + Returns: + Tensor: forwarded results with shape [num_queries, bs, embed_dims]. + """ + + norm_index = 0 + attn_index = 0 + ffn_index = 0 + identity = query + if attn_masks is None: + attn_masks = [None for _ in range(self.num_attn)] + elif isinstance(attn_masks, torch.Tensor): + attn_masks = [ + copy.deepcopy(attn_masks) for _ in range(self.num_attn) + ] + warnings.warn(f'Use same attn_mask in all attentions in ' + f'{self.__class__.__name__} ') + else: + assert len(attn_masks) == self.num_attn, f'The length of ' \ + f'attn_masks {len(attn_masks)} must be equal ' \ + f'to the number of attention in ' \ + f'operation_order {self.num_attn}' + + for layer in self.operation_order: + if layer == 'self_attn': + temp_key = temp_value = query + query = self.attentions[attn_index]( + query, + temp_key, + temp_value, + identity if self.pre_norm else None, + query_pos=query_pos, + key_pos=query_pos, + attn_mask=attn_masks[attn_index], + key_padding_mask=query_key_padding_mask, + **kwargs) + attn_index += 1 + identity = query + + elif layer == 'norm': + query = self.norms[norm_index](query) + norm_index += 1 + + elif layer == 'cross_attn': + query = self.attentions[attn_index]( + query, + key, + value, + identity if self.pre_norm else None, + query_pos=query_pos, + key_pos=key_pos, + attn_mask=attn_masks[attn_index], + key_padding_mask=key_padding_mask, + **kwargs) + attn_index += 1 + identity = query + + elif layer == 'ffn': + query = self.ffns[ffn_index]( + query, identity if self.pre_norm else None) + ffn_index += 1 + + return query + +@ATTENTION.register_module() +class MotionDeformableAttention(BaseModule): + """An attention module used in Deformable-Detr. + + `Deformable DETR: Deformable Transformers for End-to-End Object Detection. + `_. + + Args: + embed_dims (int): The embedding dimension of Attention. + Default: 256. + num_heads (int): Parallel attention heads. Default: 64. + num_levels (int): The number of feature map used in + Attention. Default: 4. + num_points (int): The number of sampling points for + each query in each head. Default: 4. + im2col_step (int): The step used in image_to_column. + Default: 64. + dropout (float): A Dropout layer on `inp_identity`. + Default: 0.1. + batch_first (bool): Key, Query and Value are shape of + (batch, n, embed_dim) + or (n, batch, embed_dim). Default to False. + norm_cfg (dict): Config dict for normalization layer. + Default: None. + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + """ + + def __init__(self, + embed_dims=256, + num_heads=8, + num_levels=4, + num_points=4, + num_steps=1, + sample_index=-1, + im2col_step=64, + dropout=0.1, + bev_range=[-51.2, -51.2, -5.0, 51.2, 51.2, 3.0], + voxel_size=[0.2, 0.2, 8], + batch_first=True, + norm_cfg=None, + init_cfg=None): + super().__init__(init_cfg) + if embed_dims % num_heads != 0: + raise ValueError(f'embed_dims must be divisible by num_heads, ' + f'but got {embed_dims} and {num_heads}') + dim_per_head = embed_dims // num_heads + self.norm_cfg = norm_cfg + self.dropout = nn.Dropout(dropout) + self.batch_first = batch_first + self.fp16_enabled = False + self.bev_range = bev_range + + # you'd better set dim_per_head to a power of 2 + # which is more efficient in the CUDA implementation + def _is_power_of_2(n): + if (not isinstance(n, int)) or (n < 0): + raise ValueError( + 'invalid input for _is_power_of_2: {} (type: {})'.format( + n, type(n))) + return (n & (n - 1) == 0) and n != 0 + + if not _is_power_of_2(dim_per_head): + warnings.warn( + "You'd better set embed_dims in " + 'MultiScaleDeformAttention to make ' + 'the dimension of each attention head a power of 2 ' + 'which is more efficient in our CUDA implementation.') + + self.im2col_step = im2col_step + self.embed_dims = embed_dims + self.num_levels = num_levels + self.num_heads = num_heads + self.num_points = num_points + self.num_steps = num_steps + self.sample_index = sample_index + self.sampling_offsets = nn.Linear( + embed_dims, num_heads * num_steps * num_levels * num_points * 2) + self.attention_weights = nn.Linear(embed_dims, + num_heads * num_steps * num_levels * num_points) + self.value_proj = nn.Linear(embed_dims, embed_dims) + self.output_proj = Sequential(nn.Linear(num_steps*embed_dims, embed_dims), + nn.LayerNorm(embed_dims), + nn.ReLU(inplace=True) + ) + self.init_weights() + + def init_weights(self): + """Default initialization for Parameters of Module.""" + constant_init(self.sampling_offsets, 0.) + thetas = torch.arange( + self.num_heads, + dtype=torch.float32) * (2.0 * math.pi / self.num_heads) + grid_init = torch.stack([thetas.cos(), thetas.sin()], -1) + grid_init = (grid_init / + grid_init.abs().max(-1, keepdim=True)[0]).view( + self.num_heads, 1, 1, 1, + 2).repeat(1, self.num_steps, self.num_levels, self.num_points, 1) + for i in range(self.num_points): + grid_init[:, :, :, i, :] *= i + 1 + + self.sampling_offsets.bias.data = grid_init.view(-1) + constant_init(self.attention_weights, val=0., bias=0.) + xavier_init(self.value_proj, distribution='uniform', bias=0.) + xavier_init(self.output_proj, distribution='uniform', bias=0.) + self._is_init = True + + @deprecated_api_warning({'residual': 'identity'}, + cls_name='MultiScaleDeformableAttention') + def forward(self, + query, + key=None, + value=None, + identity=None, + query_pos=None, + key_padding_mask=None, + spatial_shapes=None, + level_start_index=None, + bbox_results=None, + reference_trajs=None, + flag='decoder', + **kwargs): + """Forward Function of MultiScaleDeformAttention. + + Args: + query (Tensor): Query of Transformer with shape + (num_query, bs, embed_dims). + key (Tensor): The key tensor with shape + `(num_key, bs, embed_dims)`. + value (Tensor): The value tensor with shape + `(num_key, bs, embed_dims)`. + identity (Tensor): The tensor used for addition, with the + same shape as `query`. Default None. If None, + `query` will be used. + query_pos (Tensor): The positional encoding for `query`. + Default: None. + key_pos (Tensor): The positional encoding for `key`. Default + None. + reference_points (Tensor): The normalized reference + points with shape (bs, num_query, num_levels, 2), + all elements is range in [0, 1], top-left (0,0), + bottom-right (1, 1), including padding area. + or (N, Length_{query}, num_levels, 4), add + additional two dimensions is (w, h) to + form reference boxes. + key_padding_mask (Tensor): ByteTensor for `query`, with + shape [bs, num_key]. + spatial_shapes (Tensor): Spatial shape of features in + different levels. With shape (num_levels, 2), + last dimension represents (h, w). + level_start_index (Tensor): The start index of each level. + A tensor has shape ``(num_levels, )`` and can be represented + as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...]. + + Returns: + Tensor: forwarded results with shape [num_query, bs, embed_dims]. + """ + bs, num_agent, num_mode, _ = query.shape + num_query = num_agent * num_mode + if value is None: + value = query + if identity is None: + identity = query + if query_pos is not None: + query = query + query_pos + query = torch.flatten(query, start_dim=1, end_dim=2) + + value = value.permute(1, 0, 2) + bs, num_value, _ = value.shape + assert (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() == num_value + + value = self.value_proj(value) + if key_padding_mask is not None: + value = value.masked_fill(key_padding_mask[..., None], 0.0) + value = value.view(bs, num_value, self.num_heads, -1) + sampling_offsets = self.sampling_offsets(query).view( + bs, num_query, self.num_heads, self.num_steps, self.num_levels, self.num_points, 2) + attention_weights = self.attention_weights(query).view( + bs, num_query, self.num_heads, self.num_steps, self.num_levels * self.num_points) + attention_weights = attention_weights.softmax(-1) + + attention_weights = attention_weights.view(bs, num_query, + self.num_heads, + self.num_steps, + self.num_levels, + self.num_points) + # bs, n_query, n_head, n_steps, N_level, N_points, 2 + # BS NUM_AGENT NUM_MODE 12 NUM_LEVEL 2 + if reference_trajs.shape[-1] == 2: + reference_trajs = reference_trajs[:, :, :, [self.sample_index], :, :] + reference_trajs_ego = self.agent_coords_to_ego_coords(copy.deepcopy(reference_trajs), bbox_results).detach() + reference_trajs_ego = torch.flatten(reference_trajs_ego, start_dim=1, end_dim=2) + reference_trajs_ego = reference_trajs_ego[:, :, None, :, :, None, :] + reference_trajs_ego[..., 0] -= self.bev_range[0] + reference_trajs_ego[..., 1] -= self.bev_range[1] + reference_trajs_ego[..., 0] /= (self.bev_range[3] - self.bev_range[0]) + reference_trajs_ego[..., 1] /= (self.bev_range[4] - self.bev_range[1]) + offset_normalizer = torch.stack( + [spatial_shapes[..., 1], spatial_shapes[..., 0]], -1) + sampling_locations = reference_trajs_ego \ + + sampling_offsets \ + / offset_normalizer[None, None, None, None, :, None, :] + + sampling_locations = rearrange(sampling_locations, 'bs nq nh ns nl np c -> bs nq ns nh nl np c') # permute([0,1,3,2,4,5,6]) + attention_weights = rearrange(attention_weights, 'bs nq nh ns nl np -> bs nq ns nh nl np') #.permute([0,1,3,2,4,5]) + sampling_locations = sampling_locations.reshape(bs, num_query*self.num_steps, self.num_heads, self.num_levels, self.num_points, 2) + attention_weights = attention_weights.reshape(bs, num_query*self.num_steps, self.num_heads, self.num_levels, self.num_points) + + else: + raise ValueError( + f'Last dim of reference_trajs must be' + f' 2 or 4, but get {reference_trajs.shape[-1]} instead.') + if torch.cuda.is_available() and value.is_cuda: + + # using fp16 deformable attention is unstable because it performs many sum operations + if value.dtype == torch.float16: + MultiScaleDeformableAttnFunction = MultiScaleDeformableAttnFunction_fp32 + else: + MultiScaleDeformableAttnFunction = MultiScaleDeformableAttnFunction_fp32 + output = MultiScaleDeformableAttnFunction.apply( + value, spatial_shapes, level_start_index, sampling_locations, + attention_weights, self.im2col_step) + else: + output = multi_scale_deformable_attn_pytorch( + value, spatial_shapes, sampling_locations, attention_weights) + output = output.view(bs, num_query, self.num_steps, -1) + output = torch.flatten(output, start_dim=2, end_dim=3) + output = self.output_proj(output) + output = output.view(bs, num_agent, num_mode, -1) + return self.dropout(output) + identity + + def agent_coords_to_ego_coords(self, reference_trajs, bbox_results): + batch_size = len(bbox_results) + reference_trajs_ego = [] + for i in range(batch_size): + boxes_3d, scores, labels, bbox_index, mask = bbox_results[i] + det_centers = boxes_3d.gravity_center.to(reference_trajs.device) + batch_reference_trajs = reference_trajs[i] + batch_reference_trajs += det_centers[:, None, None, None, :2] + reference_trajs_ego.append(batch_reference_trajs) + return torch.stack(reference_trajs_ego) + + def rot_2d(self, yaw): + sy, cy = torch.sin(yaw), torch.cos(yaw) + out = torch.stack([torch.stack([cy, -sy]), torch.stack([sy, cy])]).permute([2,0,1]) + return out + +@ATTENTION.register_module() +class CustomModeMultiheadAttention(BaseModule): + """A wrapper for ``torch.nn.MultiheadAttention``. + This module implements MultiheadAttention with identity connection, + and positional encoding is also passed as input. + Args: + embed_dims (int): The embedding dimension. + num_heads (int): Parallel attention heads. + attn_drop (float): A Dropout layer on attn_output_weights. + Default: 0.0. + proj_drop (float): A Dropout layer after `nn.MultiheadAttention`. + Default: 0.0. + dropout_layer (obj:`ConfigDict`): The dropout_layer used + when adding the shortcut. + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + batch_first (bool): When it is True, Key, Query and Value are shape of + (batch, n, embed_dim), otherwise (n, batch, embed_dim). + Default to False. + """ + + def __init__(self, + embed_dims, + num_heads, + attn_drop=0., + proj_drop=0., + dropout_layer=dict(type='Dropout', drop_prob=0.), + init_cfg=None, + **kwargs): + super().__init__(init_cfg) + if 'dropout' in kwargs: + warnings.warn( + 'The arguments `dropout` in MultiheadAttention ' + 'has been deprecated, now you can separately ' + 'set `attn_drop`(float), proj_drop(float), ' + 'and `dropout_layer`(dict) ', DeprecationWarning) + attn_drop = kwargs['dropout'] + dropout_layer['drop_prob'] = kwargs.pop('dropout') + + self.embed_dims = embed_dims + self.num_heads = num_heads + + self.attn = nn.MultiheadAttention(embed_dims, num_heads, attn_drop, **kwargs) + + self.proj_drop = nn.Dropout(proj_drop) + self.dropout_layer = build_dropout( + dropout_layer) if dropout_layer else nn.Identity() + + @deprecated_api_warning({'residual': 'identity'}, + cls_name='MultiheadAttention') + def forward(self, + query, + key=None, + value=None, + identity=None, + query_pos=None, + key_pos=None, + attn_mask=None, + key_padding_mask=None, + **kwargs): + """Forward function for `MultiheadAttention`. + **kwargs allow passing a more general data flow when combining + with other operations in `transformerlayer`. + Args: + query (Tensor): The input query with shape [num_queries, bs, + embed_dims] if self.batch_first is False, else + [bs, num_queries embed_dims]. + key (Tensor): The key tensor with shape [num_keys, bs, + embed_dims] if self.batch_first is False, else + [bs, num_keys, embed_dims] . + If None, the ``query`` will be used. Defaults to None. + value (Tensor): The value tensor with same shape as `key`. + Same in `nn.MultiheadAttention.forward`. Defaults to None. + If None, the `key` will be used. + identity (Tensor): This tensor, with the same shape as x, + will be used for the identity link. + If None, `x` will be used. Defaults to None. + query_pos (Tensor): The positional encoding for query, with + the same shape as `x`. If not None, it will + be added to `x` before forward function. Defaults to None. + key_pos (Tensor): The positional encoding for `key`, with the + same shape as `key`. Defaults to None. If not None, it will + be added to `key` before forward function. If None, and + `query_pos` has the same shape as `key`, then `query_pos` + will be used for `key_pos`. Defaults to None. + attn_mask (Tensor): ByteTensor mask with shape [num_queries, + num_keys]. Same in `nn.MultiheadAttention.forward`. + Defaults to None. + key_padding_mask (Tensor): ByteTensor with shape [bs, num_keys]. + Defaults to None. + Returns: + Tensor: forwarded results with shape + [num_queries, bs, embed_dims] + if self.batch_first is False, else + [bs, num_queries embed_dims]. + """ + query_pos = query_pos.unsqueeze(1) + key_pos = key_pos.unsqueeze(1) + bs, n_agent, n_query, D = query.shape + if key is None: + key = query + if value is None: + value = key + if identity is None: + identity = query + if key_pos is None: + if query_pos is not None: + # use query_pos if key_pos is not available + if query_pos.shape == key.shape: + key_pos = query_pos + else: + warnings.warn(f'position encoding of key is' + f'missing in {self.__class__.__name__}.') + if query_pos is not None: + query = query + query_pos + if key_pos is not None: + key = key + key_pos + + # Because the dataflow('key', 'query', 'value') of + # ``torch.nn.MultiheadAttention`` is (num_query, batch, + # embed_dims), We should adjust the shape of dataflow from + # batch_first (batch, num_query, embed_dims) to num_query_first + # (num_query ,batch, embed_dims), and recover ``attn_output`` + # from num_query_first to batch_first. + query = torch.flatten(query, start_dim=0, end_dim=1) + key = torch.flatten(key, start_dim=0, end_dim=1) + value = torch.flatten(value, start_dim=0, end_dim=1) + identity = torch.flatten(identity, start_dim=0, end_dim=1) + + query = query.transpose(0, 1) + key = key.transpose(0, 1) + value = value.transpose(0, 1) + + out = self.attn( + query=query, + key=key, + value=value, + attn_mask=attn_mask, + key_padding_mask=key_padding_mask)[0] + + out = out.transpose(0, 1) + out = identity + self.dropout_layer(self.proj_drop(out)) + + return out.view(bs, n_agent, n_query, D) \ No newline at end of file diff --git a/mmcv/models/dense_heads/motion_head_plugin/motion_optimization.py b/mmcv/models/dense_heads/motion_head_plugin/motion_optimization.py new file mode 100644 index 0000000..0dced26 --- /dev/null +++ b/mmcv/models/dense_heads/motion_head_plugin/motion_optimization.py @@ -0,0 +1,218 @@ +#---------------------------------------------------------------------------------# +# UniAD: Planning-oriented Autonomous Driving (https://arxiv.org/abs/2212.10156) # +# Source code: https://github.com/OpenDriveLab/UniAD # +# Copyright (c) OpenDriveLab. All rights reserved. # +#---------------------------------------------------------------------------------# + +from dataclasses import dataclass +from enum import Enum, auto +from typing import Any, Dict, List, Optional, Sequence, Tuple, Union + +import numpy as np +import numpy.typing as npt +from casadi import DM, Opti, OptiSol, cos, diff, sin, sumsqr, vertcat +Pose = Tuple[float, float, float] # (x, y, yaw) + + +class MotionNonlinearSmoother: + """ + Smoothing a set of xy observations with a vehicle dynamics model. + Solved with direct multiple-shooting. + modified from https://github.com/motional/nuplan-devkit + :param trajectory_len: trajectory length + :param dt: timestep (sec) + """ + + def __init__(self, trajectory_len: int, dt: float): + """ + :param trajectory_len: the length of trajectory to be optimized. + :param dt: the time interval between trajectory points. + """ + self.dt = dt + self.trajectory_len = trajectory_len + self.current_index = 0 + # Use a array of dts to make it compatible to situations with varying dts across different time steps. + self._dts: npt.NDArray[np.float32] = np.asarray( + [[dt] * trajectory_len]) + self._init_optimization() + + def _init_optimization(self) -> None: + """ + Initialize related variables and constraints for optimization. + """ + self.nx = 4 # state dim + self.nu = 2 # control dim + + self._optimizer = Opti() # Optimization problem + self._create_decision_variables() + self._create_parameters() + self._set_dynamic_constraints() + self._set_state_constraints() + self._set_control_constraints() + self._set_objective() + + # Set default solver options (quiet) + self._optimizer.solver( + "ipopt", {"ipopt.print_level": 0, "print_time": 0, "ipopt.sb": "yes"}) + + def set_reference_trajectory(self, x_curr: Sequence[float], reference_trajectory: Sequence[Pose]) -> None: + """ + Set the reference trajectory that the smoother is trying to loosely track. + :param x_curr: current state of size nx (x, y, yaw, speed) + :param reference_trajectory: N+1 x 3 reference, where the second dim is for (x, y, yaw) + """ + self._check_inputs(x_curr, reference_trajectory) + + self._optimizer.set_value(self.x_curr, DM(x_curr)) + self._optimizer.set_value(self.ref_traj, DM(reference_trajectory).T) + self._set_initial_guess(x_curr, reference_trajectory) + + def set_solver_optimizerons(self, options: Dict[str, Any]) -> None: + """ + Control solver options including verbosity. + :param options: Dictionary containing optimization criterias + """ + self._optimizer.solver("ipopt", options) + + def solve(self) -> OptiSol: + """ + Solve the optimization problem. Assumes the reference trajectory was already set. + :return Casadi optimization class + """ + return self._optimizer.solve() + + def _create_decision_variables(self) -> None: + """ + Define the decision variables for the trajectory optimization. + """ + # State trajectory (x, y, yaw, speed) + self.state = self._optimizer.variable(self.nx, self.trajectory_len + 1) + self.position_x = self.state[0, :] + self.position_y = self.state[1, :] + self.yaw = self.state[2, :] + self.speed = self.state[3, :] + + # Control trajectory (curvature, accel) + self.control = self._optimizer.variable(self.nu, self.trajectory_len) + self.curvature = self.control[0, :] + self.accel = self.control[1, :] + + # Derived control and state variables, dt[:, 1:] becuases state vector is one step longer than action. + self.curvature_rate = diff(self.curvature) / self._dts[:, 1:] + self.jerk = diff(self.accel) / self._dts[:, 1:] + self.lateral_accel = self.speed[: self.trajectory_len] ** 2 * \ + self.curvature + + def _create_parameters(self) -> None: + """ + Define the expert trjactory and current position for the trajectory optimizaiton. + """ + self.ref_traj = self._optimizer.parameter( + 3, self.trajectory_len + 1) # (x, y, yaw) + self.x_curr = self._optimizer.parameter(self.nx, 1) + + def _set_dynamic_constraints(self) -> None: + r""" + Set the system dynamics constraints as following: + dx/dt = f(x,u) + \dot{x} = speed * cos(yaw) + \dot{y} = speed * sin(yaw) + \dot{yaw} = speed * curvature + \dot{speed} = accel + """ + state = self.state + control = self.control + dt = self.dt + + def process(x: Sequence[float], u: Sequence[float]) -> Any: + """Process for state propagation.""" + return vertcat(x[3] * cos(x[2]), x[3] * sin(x[2]), x[3] * u[0], u[1]) + + for k in range(self.trajectory_len): # loop over control intervals + # Runge-Kutta 4 integration + k1 = process(state[:, k], control[:, k]) + k2 = process(state[:, k] + dt / 2 * k1, control[:, k]) + k3 = process(state[:, k] + dt / 2 * k2, control[:, k]) + k4 = process(state[:, k] + dt * k3, control[:, k]) + next_state = state[:, k] + dt / 6 * (k1 + 2 * k2 + 2 * k3 + k4) + self._optimizer.subject_to( + state[:, k + 1] == next_state) # close the gaps + + def _set_control_constraints(self) -> None: + """Set the hard control constraints.""" + curvature_limit = 1.0 / 5.0 # 1/m + self._optimizer.subject_to( + self._optimizer.bounded(-curvature_limit, self.curvature, curvature_limit)) + accel_limit = 4.0 # m/s^2 + self._optimizer.subject_to( + self._optimizer.bounded(-accel_limit, self.accel, accel_limit)) + + def _set_state_constraints(self) -> None: + """Set the hard state constraints.""" + # Constrain the current time -- NOT start of history + # initial boundary condition + self._optimizer.subject_to( + self.state[:, self.current_index] == self.x_curr) + + max_speed = 35.0 # m/s + self._optimizer.subject_to(self._optimizer.bounded( + 0.0, self.speed, max_speed)) # only forward + max_yaw_rate = 1.75 # rad/s + self._optimizer.subject_to( + self._optimizer.bounded(-max_yaw_rate, diff(self.yaw) / self._dts, max_yaw_rate)) + max_lateral_accel = 4.0 # m/s^2, assumes circular motion acc_lat = speed^2 * curvature + self._optimizer.subject_to( + self._optimizer.bounded( + -max_lateral_accel, self.speed[:, : self.trajectory_len] ** 2 * + self.curvature, max_lateral_accel + ) + ) + + def _set_objective(self) -> None: + """Set the objective function. Use care when modifying these weights.""" + # Follow reference, minimize control rates and absolute inputs + alpha_xy = 1.0 + alpha_yaw = 0.1 + alpha_rate = 0.08 + alpha_abs = 0.08 + alpha_lat_accel = 0.06 + cost_stage = ( + alpha_xy * + sumsqr(self.ref_traj[:2, :] - + vertcat(self.position_x, self.position_y)) + + alpha_yaw * sumsqr(self.ref_traj[2, :] - self.yaw) + + alpha_rate * (sumsqr(self.curvature_rate) + sumsqr(self.jerk)) + + alpha_abs * (sumsqr(self.curvature) + sumsqr(self.accel)) + + alpha_lat_accel * sumsqr(self.lateral_accel) + ) + + # Take special care with the final state + alpha_terminal_xy = 1.0 + alpha_terminal_yaw = 40.0 # really care about final heading to help with lane changes + cost_terminal = alpha_terminal_xy * sumsqr( + self.ref_traj[:2, -1] - + vertcat(self.position_x[-1], self.position_y[-1]) + ) + alpha_terminal_yaw * sumsqr(self.ref_traj[2, -1] - self.yaw[-1]) + + self._optimizer.minimize( + cost_stage + self.trajectory_len / 4.0 * cost_terminal) + + def _set_initial_guess(self, x_curr: Sequence[float], reference_trajectory: Sequence[Pose]) -> None: + """Set a warm-start for the solver based on the reference trajectory.""" + self._check_inputs(x_curr, reference_trajectory) + + # Initialize state guess based on reference + self._optimizer.set_initial(self.state[:3, :], DM( + reference_trajectory).T) # (x, y, yaw) + self._optimizer.set_initial(self.state[3, :], DM(x_curr[3])) # speed + + def _check_inputs(self, x_curr: Sequence[float], reference_trajectory: Sequence[Pose]) -> None: + """Raise ValueError if inputs are not of proper size.""" + if len(x_curr) != self.nx: + raise ValueError( + f"x_curr length {len(x_curr)} must be equal to state dim {self.nx}") + + if len(reference_trajectory) != self.trajectory_len + 1: + raise ValueError( + f"reference traj length {len(reference_trajectory)} must be equal to {self.trajectory_len + 1}" + ) diff --git a/mmcv/models/dense_heads/motion_head_plugin/motion_utils.py b/mmcv/models/dense_heads/motion_head_plugin/motion_utils.py new file mode 100644 index 0000000..48ef857 --- /dev/null +++ b/mmcv/models/dense_heads/motion_head_plugin/motion_utils.py @@ -0,0 +1,99 @@ +#---------------------------------------------------------------------------------# +# UniAD: Planning-oriented Autonomous Driving (https://arxiv.org/abs/2212.10156) # +# Source code: https://github.com/OpenDriveLab/UniAD # +# Copyright (c) OpenDriveLab. All rights reserved. # +#---------------------------------------------------------------------------------# + +import torch +import random +import numpy as np +from .motion_optimization import MotionNonlinearSmoother + + +def nonlinear_smoother(gt_bboxes_3d, gt_fut_traj, gt_fut_traj_mask, bbox_tensor): + """ + This function applies a nonlinear smoother to the ground truth future trajectories of 3D bounding boxes. + It takes into account the vehicle's yaw and velocity to generate smooth, realistic trajectories. + + Args: + gt_bboxes_3d (torch.Tensor): Ground truth 3D bounding boxes of shape (batch_size, 7). + gt_fut_traj (torch.Tensor): Ground truth future trajectories of shape (batch_size, 12, 2). + gt_fut_traj_mask (torch.Tensor): A mask indicating valid timesteps in the ground truth future trajectories of shape (batch_size, 12). + bbox_tensor (torch.Tensor): A tensor representing the bounding box properties of shape (batch_size, 9). + + Returns: + torch.Tensor: The perturbed trajectories of shape (batch_size, 12, 2). + torch.Tensor: The updated mask indicating valid timesteps in the perturbed trajectories of the same shape as gt_fut_traj_mask. + """ + device = gt_fut_traj.device + dtype = gt_fut_traj.dtype + gt_bboxes_3d = gt_bboxes_3d.cpu().detach().numpy() + gt_fut_traj = gt_fut_traj.cpu().detach().numpy() + gt_fut_traj_xy_diff = np.zeros((gt_fut_traj.shape[0], 13, 2)) + gt_fut_traj_xy_diff[:, 1:, :] = gt_fut_traj + gt_fut_traj_xy_diff = np.diff(gt_fut_traj_xy_diff, axis=1) + gt_fut_traj_yaw = np.arctan2( + gt_fut_traj_xy_diff[:, :, 1], gt_fut_traj_xy_diff[:, :, 0]) + gt_fut_traj_yaw = np.concatenate( + [-np.pi/2 - gt_bboxes_3d[:, None, 6:7], gt_fut_traj_yaw[:, :, None]], axis=1) + gt_fut_traj = np.concatenate( + [gt_bboxes_3d[:, None, :2], gt_fut_traj], axis=1) + + gt_fut_traj_mask = gt_fut_traj_mask.cpu().detach().numpy() + bbox_tensor = bbox_tensor.cpu().detach().numpy() + ts_limit = gt_fut_traj_mask.sum(1)[:, 0] + yaw_preds = bbox_tensor[:, 6] + vel_preds = bbox_tensor[:, -2:] + speed_preds = np.sqrt(np.sum(vel_preds**2, axis=-1)) + traj_perturb_all = [] + + # we set some constraints here to avoid perturbing the trajectories that are not dynamic, + # or have large differences with the ground truth + def _is_dynamic(traj, ts, dist_thres): + return np.sqrt(np.sum((traj[ts, :2] - traj[0, :2])**2)) > dist_thres + + def _check_diff(x_curr, ref_traj): + if np.sqrt((x_curr[0] - ref_traj[0, 0]) ** 2 + (x_curr[1] - ref_traj[0, 1])**2) > 2: + return False + a = np.array([np.cos(x_curr[2]), np.sin(x_curr[2])]) + b = np.array([np.cos(ref_traj[0, 2]), np.sin(ref_traj[0, 2])]) + diff_theta = np.arccos( + np.sum(a*b)/(np.sqrt(np.sum(a**2)) * np.sqrt(np.sum(b**2)))) + if diff_theta > np.pi/180 * 30: + return False + return True + + def _check_ade(traj_pert, traj_ref, thres): + return np.mean(np.sqrt(np.sum((traj_pert[:, :2] - traj_ref[:, :2])**2, axis=-1))) < thres + + perturb_count = 0 + perturb_used_count = 0 + for i in range(gt_fut_traj.shape[0]): + ts = ts_limit[i] + x_curr = [bbox_tensor[i, 0], bbox_tensor[i, 1], - + np.pi/2 - yaw_preds[i], speed_preds[i]] + reference_trajectory = np.concatenate( + [gt_fut_traj[i], gt_fut_traj_yaw[i]], axis=-1) + if ts > 1 and _is_dynamic(gt_fut_traj[i], int(ts), 2) and _check_diff(x_curr, reference_trajectory): + smoother = MotionNonlinearSmoother( + trajectory_len=int(ts), dt=0.5) + reference_trajectory = reference_trajectory[:int(ts)+1, :] + smoother.set_reference_trajectory(x_curr, reference_trajectory) + sol = smoother.solve() + traj_perturb = np.stack( + [sol.value(smoother.position_x), sol.value(smoother.position_y)], axis=-1) + perturb_used_count += 1 + if not _check_ade(traj_perturb, reference_trajectory, thres=1.5): + traj_perturb = gt_fut_traj[i, 1:, + :2] - gt_fut_traj[i, 0:1, :2] + else: + traj_perturb_tmp = traj_perturb[1:, + :2] - traj_perturb[0:1, :2] + traj_perturb = np.zeros((12, 2)) + traj_perturb[:traj_perturb_tmp.shape[0], + :] = traj_perturb_tmp[:, :2] + perturb_count += 1 + else: + traj_perturb = gt_fut_traj[i, 1:, :2] - gt_fut_traj[i, 0:1, :2] + traj_perturb_all.append(traj_perturb) + return torch.tensor(traj_perturb_all, device=device, dtype=dtype), torch.tensor(gt_fut_traj_mask > 0, device=device) diff --git a/mmcv/models/dense_heads/occ_head.py b/mmcv/models/dense_heads/occ_head.py new file mode 100644 index 0000000..5ad8a9a --- /dev/null +++ b/mmcv/models/dense_heads/occ_head.py @@ -0,0 +1,482 @@ +#---------------------------------------------------------------------------------# +# UniAD: Planning-oriented Autonomous Driving (https://arxiv.org/abs/2212.10156) # +# Source code: https://github.com/OpenDriveLab/UniAD # +# Copyright (c) OpenDriveLab. All rights reserved. # +#---------------------------------------------------------------------------------# + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.models.builder import HEADS, build_loss +from mmcv.models.backbones.base_module import BaseModule +from einops import rearrange +from mmcv.core.utils import reduce_mean +from mmcv.models.bricks.transformer import build_transformer_layer_sequence +import copy +from .occ_head_plugin import MLP, BevFeatureSlicer, SimpleConv2d, CVT_Decoder, Bottleneck, UpsamplingAdd, \ + predict_instance_segmentation_and_trajectories + +def _get_clones(module, N): + return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) + +@HEADS.register_module() +class OccHead(BaseModule): + def __init__(self, + # General + receptive_field=3, + n_future=4, + spatial_extent=(50, 50), + ignore_index=255, + + # BEV + grid_conf = None, + + bev_size=(200, 200), + bev_emb_dim=256, + bev_proj_dim=64, + bev_proj_nlayers=1, + + # Query + query_dim=256, + query_mlp_layers=3, + detach_query_pos=True, + temporal_mlp_layer=2, + + # Transformer + transformer_decoder=None, + + attn_mask_thresh=0.5, + # Loss + sample_ignore_mode='all_valid', + aux_loss_weight=1., + + loss_mask=None, + loss_dice=None, + + # Cfgs + init_cfg=None, + + # Eval + pan_eval=False, + test_seg_thresh:float=0.5, + test_with_track_score=False, + ): + assert init_cfg is None, 'To prevent abnormal initialization ' \ + 'behavior, init_cfg is not allowed to be set' + super().__init__(init_cfg) + self.receptive_field = receptive_field # NOTE: Used by prepare_future_labels in E2EPredTransformer + self.n_future = n_future + self.spatial_extent = spatial_extent + self.ignore_index = ignore_index + + bevformer_bev_conf = { + 'xbound': [-51.2, 51.2, 0.512], + 'ybound': [-51.2, 51.2, 0.512], + 'zbound': [-10.0, 10.0, 20.0], + } + self.bev_sampler = BevFeatureSlicer(bevformer_bev_conf, grid_conf) + + self.bev_size = bev_size + self.bev_proj_dim = bev_proj_dim + + if bev_proj_nlayers == 0: + self.bev_light_proj = nn.Sequential() + else: + self.bev_light_proj = SimpleConv2d( + in_channels=bev_emb_dim, + conv_channels=bev_emb_dim, + out_channels=bev_proj_dim, + num_conv=bev_proj_nlayers, + ) + + # Downscale bev_feat -> /4 + self.base_downscale = nn.Sequential( + Bottleneck(in_channels=bev_proj_dim, downsample=True), + Bottleneck(in_channels=bev_proj_dim, downsample=True) + ) + + # Future blocks with transformer + self.n_future_blocks = self.n_future + 1 + + # - transformer + self.attn_mask_thresh = attn_mask_thresh + + self.num_trans_layers = transformer_decoder.num_layers + assert self.num_trans_layers % self.n_future_blocks == 0 + + self.num_heads = transformer_decoder.transformerlayers.\ + attn_cfgs.num_heads + self.transformer_decoder = build_transformer_layer_sequence( + transformer_decoder) + + # - temporal-mlps + # query_out_dim = bev_proj_dim + + temporal_mlp = MLP(query_dim, query_dim, bev_proj_dim, num_layers=temporal_mlp_layer) + self.temporal_mlps = _get_clones(temporal_mlp, self.n_future_blocks) + + # - downscale-convs + downscale_conv = Bottleneck(in_channels=bev_proj_dim, downsample=True) + self.downscale_convs = _get_clones(downscale_conv, self.n_future_blocks) + + # - upsampleAdds + upsample_add = UpsamplingAdd(in_channels=bev_proj_dim, out_channels=bev_proj_dim) + self.upsample_adds = _get_clones(upsample_add, self.n_future_blocks) + + # Decoder + self.dense_decoder = CVT_Decoder( + dim=bev_proj_dim, + blocks=[bev_proj_dim, bev_proj_dim], + ) + + # Query + self.mode_fuser = nn.Sequential( + nn.Linear(query_dim, bev_proj_dim), + nn.LayerNorm(bev_proj_dim), + nn.ReLU(inplace=True) + ) + self.multi_query_fuser = nn.Sequential( + nn.Linear(query_dim * 3, query_dim * 2), + nn.LayerNorm(query_dim * 2), + nn.ReLU(inplace=True), + nn.Linear(query_dim * 2, bev_proj_dim), + ) + + self.detach_query_pos = detach_query_pos + + self.query_to_occ_feat = MLP( + query_dim, query_dim, bev_proj_dim, num_layers=query_mlp_layers + ) + self.temporal_mlp_for_mask = copy.deepcopy(self.query_to_occ_feat) + + # Loss + # For matching + self.sample_ignore_mode = sample_ignore_mode + assert self.sample_ignore_mode in ['all_valid', 'past_valid', 'none'] + + self.aux_loss_weight = aux_loss_weight + + self.loss_dice = build_loss(loss_dice) + self.loss_mask = build_loss(loss_mask) + + self.pan_eval = pan_eval + self.test_seg_thresh = test_seg_thresh + + self.test_with_track_score = test_with_track_score + self.init_weights() + + def init_weights(self): + for p in self.transformer_decoder.parameters(): + if p.dim() > 1: + nn.init.xavier_normal_(p) + + def get_attn_mask(self, state, ins_query): + # state: b, c, h, w + # ins_query: b, q, c + ins_embed = self.temporal_mlp_for_mask( + ins_query + ) + mask_pred = torch.einsum("bqc,bchw->bqhw", ins_embed, state) + attn_mask = mask_pred.sigmoid() < self.attn_mask_thresh + attn_mask = rearrange(attn_mask, 'b q h w -> b (h w) q').unsqueeze(1).repeat( + 1, self.num_heads, 1, 1).flatten(0, 1) + attn_mask = attn_mask.detach() + + # if a mask is all True(all background), then set it all False. + attn_mask[torch.where( + attn_mask.sum(-1) == attn_mask.shape[-1])] = False + + upsampled_mask_pred = F.interpolate( + mask_pred, + self.bev_size, + mode='bilinear', + align_corners=False + ) # Supervised by gt + + return attn_mask, upsampled_mask_pred, ins_embed + + def forward(self, x, ins_query): + base_state = rearrange(x, '(h w) b d -> b d h w', h=self.bev_size[0]) + + base_state = self.bev_sampler(base_state) + base_state = self.bev_light_proj(base_state) + base_state = self.base_downscale(base_state) + base_ins_query = ins_query + + last_state = base_state + last_ins_query = base_ins_query + future_states = [] + mask_preds = [] + temporal_query = [] + temporal_embed_for_mask_attn = [] + n_trans_layer_each_block = self.num_trans_layers // self.n_future_blocks + assert n_trans_layer_each_block >= 1 + + for i in range(self.n_future_blocks): + # Downscale + cur_state = self.downscale_convs[i](last_state) # /4 -> /8 + + # Attention + # temporal_aware ins_query + cur_ins_query = self.temporal_mlps[i](last_ins_query) # [b, q, d] + temporal_query.append(cur_ins_query) + + # Generate attn mask + attn_mask, mask_pred, cur_ins_emb_for_mask_attn = self.get_attn_mask(cur_state, cur_ins_query) + attn_masks = [None, attn_mask] + + mask_preds.append(mask_pred) # /1 + temporal_embed_for_mask_attn.append(cur_ins_emb_for_mask_attn) + + cur_state = rearrange(cur_state, 'b c h w -> (h w) b c') + cur_ins_query = rearrange(cur_ins_query, 'b q c -> q b c') + + for j in range(n_trans_layer_each_block): + trans_layer_ind = i * n_trans_layer_each_block + j + trans_layer = self.transformer_decoder.layers[trans_layer_ind] + cur_state = trans_layer( + query=cur_state, # [h'*w', b, c] + key=cur_ins_query, # [nq, b, c] + value=cur_ins_query, # [nq, b, c] + query_pos=None, + key_pos=None, + attn_masks=attn_masks, + query_key_padding_mask=None, + key_padding_mask=None + ) # out size: [h'*w', b, c] + + cur_state = rearrange(cur_state, '(h w) b c -> b c h w', h=self.bev_size[0]//8) + + # Upscale to /4 + cur_state = self.upsample_adds[i](cur_state, last_state) + + # Out + future_states.append(cur_state) # [b, d, h/4, w/4] + last_state = cur_state + + future_states = torch.stack(future_states, dim=1) # [b, t, d, h/4, w/4] + temporal_query = torch.stack(temporal_query, dim=1) # [b, t, q, d] + mask_preds = torch.stack(mask_preds, dim=2) # [b, q, t, h, w] + ins_query = torch.stack(temporal_embed_for_mask_attn, dim=1) # [b, t, q, d] + + # Decode future states to larger resolution + future_states = self.dense_decoder(future_states) + ins_occ_query = self.query_to_occ_feat(ins_query) # [b, t, q, query_out_dim] + + # Generate final outputs + ins_occ_logits = torch.einsum("btqc,btchw->bqthw", ins_occ_query, future_states) + + return mask_preds, ins_occ_logits + + def merge_queries(self, outs_dict, detach_query_pos=True): + ins_query = outs_dict.get('traj_query', None) # [n_dec, b, nq, n_modes, dim] + track_query = outs_dict['track_query'] # [b, nq, d] + track_query_pos = outs_dict['track_query_pos'] # [b, nq, d] + + if detach_query_pos: + track_query_pos = track_query_pos.detach() + + ins_query = ins_query[-1] + ins_query = self.mode_fuser(ins_query).max(2)[0] + ins_query = self.multi_query_fuser(torch.cat([ins_query, track_query, track_query_pos], dim=-1)) + + return ins_query + + # With matched queries [a small part of all queries] and matched_gt results + def forward_train( + self, + bev_feat, + outs_dict, + gt_inds_list=None, + gt_segmentation=None, + gt_instance=None, + gt_img_is_valid=None, + ): + # Generate warpped gt and related inputs + gt_segmentation, gt_instance, gt_img_is_valid = self.get_occ_labels(gt_segmentation, gt_instance, gt_img_is_valid) + + all_matched_gt_ids = outs_dict['all_matched_idxes'] # list of tensor, length bs + + ins_query = self.merge_queries(outs_dict, self.detach_query_pos) + + # Forward the occ-flow model + mask_preds_batch, ins_seg_preds_batch = self(bev_feat, ins_query=ins_query) + + # Get pred and gt + ins_seg_targets_batch = gt_instance # [1, 5, 200, 200] [b, t, h, w] # ins targets of a batch + + # img_valid flag, for filtering out invalid samples in sequence when calculating loss + img_is_valid = gt_img_is_valid # [1, 7] + assert img_is_valid.size(1) == self.receptive_field + self.n_future, \ + f"Img_is_valid can only be 7 as for loss calculation and evaluation!!! Don't change it" + frame_valid_mask = img_is_valid.bool() + past_valid_mask = frame_valid_mask[:, :self.receptive_field] + future_frame_mask = frame_valid_mask[:, (self.receptive_field-1):] # [1, 5] including current frame + + # only supervise when all 3 past frames are valid + past_valid = past_valid_mask.all(dim=1) + future_frame_mask[~past_valid] = False + + # Calculate loss in the batch + loss_dict = dict() + loss_dice = ins_seg_preds_batch.new_zeros(1)[0].float() + loss_mask = ins_seg_preds_batch.new_zeros(1)[0].float() + loss_aux_dice = ins_seg_preds_batch.new_zeros(1)[0].float() + loss_aux_mask = ins_seg_preds_batch.new_zeros(1)[0].float() + + bs = ins_query.size(0) + assert bs == 1 + for ind in range(bs): + # Each gt_bboxes contains 3 frames, we only use the last one + cur_gt_inds = gt_inds_list[ind][-1] + + cur_matched_gt = all_matched_gt_ids[ind] # [n_gt] + + # Re-order gt according to matched_gt_inds + cur_gt_inds = cur_gt_inds[cur_matched_gt] + + # Deal matched_gt: -1, its actually background(unmatched) + cur_gt_inds[cur_matched_gt == -1] = -1 # Bugfixed + cur_gt_inds[cur_matched_gt == -2] = -2 + + frame_mask = future_frame_mask[ind] # [t] + + # Prediction + ins_seg_preds = ins_seg_preds_batch[ind] # [q(n_gt for matched), t, h, w] + ins_seg_targets = ins_seg_targets_batch[ind] # [t, h, w] + mask_preds = mask_preds_batch[ind] + + # Assigned-gt + ins_seg_targets_ordered = [] + for ins_id in cur_gt_inds: + # -1 for unmatched query + # If ins_seg_targets is all 255, ignore (directly append occ-and-flow gt to list) + # 255 for special object --> change to -20 (same as in occ_label.py) + # -2 for no_query situation + if (ins_seg_targets == self.ignore_index).all().item() is True: + ins_tgt = ins_seg_targets.long() + elif ins_id.item() in [-1, -2] : # false positive query (unmatched) + ins_tgt = torch.ones_like(ins_seg_targets).long() * self.ignore_index + else: + SPECIAL_INDEX = -20 + if ins_id.item() == self.ignore_index: + ins_id = torch.ones_like(ins_id) * SPECIAL_INDEX + ins_tgt = (ins_seg_targets == ins_id).long() # [t, h, w], 0 or 1 + + ins_seg_targets_ordered.append(ins_tgt) + + ins_seg_targets_ordered = torch.stack(ins_seg_targets_ordered, dim=0) # [n_gt, t, h, w] + + # Sanity check + t, h, w = ins_seg_preds.shape[-3:] + assert t == 1+self.n_future, f"{ins_seg_preds.size()}" + assert ins_seg_preds.size() == ins_seg_targets_ordered.size(), \ + f"{ins_seg_preds.size()}, {ins_seg_targets_ordered.size()}" + + num_total_pos = ins_seg_preds.size(0) # Check this line + + # loss for a sample in batch + num_total_pos = ins_seg_preds.new_tensor([num_total_pos]) + num_total_pos = torch.clamp(reduce_mean(num_total_pos), min=1).item() + + cur_dice_loss = self.loss_dice( + ins_seg_preds, ins_seg_targets_ordered, avg_factor=num_total_pos, frame_mask=frame_mask) + + cur_mask_loss = self.loss_mask( + ins_seg_preds, ins_seg_targets_ordered, frame_mask=frame_mask + ) + + cur_aux_dice_loss = self.loss_dice( + mask_preds, ins_seg_targets_ordered, avg_factor=num_total_pos, frame_mask=frame_mask + ) + cur_aux_mask_loss = self.loss_mask( + mask_preds, ins_seg_targets_ordered, frame_mask=frame_mask + ) + + loss_dice += cur_dice_loss + loss_mask += cur_mask_loss + loss_aux_dice += cur_aux_dice_loss * self.aux_loss_weight + loss_aux_mask += cur_aux_mask_loss * self.aux_loss_weight + + loss_dict['loss_dice'] = loss_dice / bs + loss_dict['loss_mask'] = loss_mask / bs + loss_dict['loss_aux_dice'] = loss_aux_dice / bs + loss_dict['loss_aux_mask'] = loss_aux_mask / bs + + return loss_dict + + def forward_test( + self, + bev_feat, + outs_dict, + no_query=False, + gt_segmentation=None, + gt_instance=None, + gt_img_is_valid=None, + ): + out_dict = dict() + + #import pdb;pdb.set_trace() + + if gt_segmentation is not None and gt_instance is not None: + gt_segmentation, gt_instance, gt_img_is_valid = self.get_occ_labels(gt_segmentation, gt_instance, gt_img_is_valid) + + + out_dict['seg_gt'] = gt_segmentation[:, :1+self.n_future] # [1, 5, 1, 200, 200] + out_dict['ins_seg_gt'] = self.get_ins_seg_gt(gt_instance[:, :1+self.n_future]) # [1, 5, 200, 200] + if no_query: + # output all zero results + out_dict['seg_out'] = torch.zeros((1, 5, 1, 200, 200),device=bev_feat.device).long() # [1, 5, 1, 200, 200] + out_dict['ins_seg_out'] = torch.zeros((1, 5, 1, 200, 200),device=bev_feat.device).long() # [1, 5, 200, 200] + return out_dict + + + ins_query = self.merge_queries(outs_dict, self.detach_query_pos) + + _, pred_ins_logits = self(bev_feat, ins_query=ins_query) + + out_dict['pred_ins_logits'] = pred_ins_logits + + pred_ins_logits = pred_ins_logits[:,:,:1+self.n_future] # [b, q, t, h, w] + pred_ins_sigmoid = pred_ins_logits.sigmoid() # [b, q, t, h, w] + + if self.test_with_track_score: + track_scores = outs_dict['track_scores'].to(pred_ins_sigmoid) # [b, q] + track_scores = track_scores[:, :, None, None, None] + pred_ins_sigmoid = pred_ins_sigmoid * track_scores # [b, q, t, h, w] + + out_dict['pred_ins_sigmoid'] = pred_ins_sigmoid + pred_seg_scores = pred_ins_sigmoid.max(1)[0] + seg_out = (pred_seg_scores > self.test_seg_thresh).long().unsqueeze(2) # [b, t, 1, h, w] + out_dict['seg_out'] = seg_out + if self.pan_eval: + # ins_pred + pred_consistent_instance_seg = \ + predict_instance_segmentation_and_trajectories(seg_out, pred_ins_sigmoid) # bg is 0, fg starts with 1, consecutive + + out_dict['ins_seg_out'] = pred_consistent_instance_seg # [1, 5, 200, 200] + + return out_dict + + def get_ins_seg_gt(self, gt_instance): + ins_gt_old = gt_instance # Not consecutive, 0 for bg, otherwise ins_ind(start from 1) + ins_gt_new = torch.zeros_like(ins_gt_old).to(ins_gt_old) # Make it consecutive + ins_inds_unique = torch.unique(ins_gt_old) + new_id = 1 + for uni_id in ins_inds_unique: + if uni_id.item() in [0, self.ignore_index]: # ignore background_id + continue + ins_gt_new[ins_gt_old == uni_id] = new_id + new_id += 1 + return ins_gt_new # Consecutive + + def get_occ_labels(self, gt_segmentation, gt_instance, gt_img_is_valid): + if not self.training: + gt_segmentation = gt_segmentation[0] + gt_instance = gt_instance[0] + gt_img_is_valid = gt_img_is_valid[0] + + gt_segmentation = gt_segmentation[:, :self.n_future+1].long().unsqueeze(2) + gt_instance = gt_instance[:, :self.n_future+1].long() + gt_img_is_valid = gt_img_is_valid[:, :self.receptive_field + self.n_future] + return gt_segmentation, gt_instance, gt_img_is_valid diff --git a/mmcv/models/dense_heads/occ_head_plugin/__init__.py b/mmcv/models/dense_heads/occ_head_plugin/__init__.py new file mode 100644 index 0000000..00abc5d --- /dev/null +++ b/mmcv/models/dense_heads/occ_head_plugin/__init__.py @@ -0,0 +1,3 @@ +from .utils import * +from .metrics import * +from .modules import * \ No newline at end of file diff --git a/mmcv/models/dense_heads/occ_head_plugin/metrics.py b/mmcv/models/dense_heads/occ_head_plugin/metrics.py new file mode 100644 index 0000000..680a45f --- /dev/null +++ b/mmcv/models/dense_heads/occ_head_plugin/metrics.py @@ -0,0 +1,258 @@ +#---------------------------------------------------------------------------------# +# UniAD: Planning-oriented Autonomous Driving (https://arxiv.org/abs/2212.10156) # +# Source code: https://github.com/OpenDriveLab/UniAD # +# Copyright (c) OpenDriveLab. All rights reserved. # +#---------------------------------------------------------------------------------# + +from typing import Optional + +import torch +from mmcv.metrics.metric import Metric +from mmcv.metrics.classification import stat_scores_multiple_classes +from mmcv.metrics.reduction import reduce + +class IntersectionOverUnion(Metric): + """Computes intersection-over-union.""" + def __init__( + self, + n_classes: int, + ignore_index: Optional[int] = None, + absent_score: float = 0.0, + reduction: str = 'none', + ): + super().__init__() + + self.n_classes = n_classes + self.ignore_index = ignore_index + self.absent_score = absent_score + self.reduction = reduction + + self.add_state('true_positive', default=torch.zeros(n_classes), dist_reduce_fx='sum') + self.add_state('false_positive', default=torch.zeros(n_classes), dist_reduce_fx='sum') + self.add_state('false_negative', default=torch.zeros(n_classes), dist_reduce_fx='sum') + self.add_state('support', default=torch.zeros(n_classes), dist_reduce_fx='sum') + + def update(self, prediction: torch.Tensor, target: torch.Tensor): + tps, fps, _, fns, sups = stat_scores_multiple_classes(prediction, target, self.n_classes) + + self.true_positive += tps + self.false_positive += fps + self.false_negative += fns + self.support += sups + + def compute(self): + scores = torch.zeros(self.n_classes, device=self.true_positive.device, dtype=torch.float32) + + for class_idx in range(self.n_classes): + if class_idx == self.ignore_index: + continue + + tp = self.true_positive[class_idx] + fp = self.false_positive[class_idx] + fn = self.false_negative[class_idx] + sup = self.support[class_idx] + + # If this class is absent in the target (no support) AND absent in the pred (no true or false + # positives), then use the absent_score for this class. + if sup + tp + fp == 0: + scores[class_idx] = self.absent_score + continue + + denominator = tp + fp + fn + score = tp.to(torch.float) / denominator + scores[class_idx] = score + + # Remove the ignored class index from the scores. + if (self.ignore_index is not None) and (0 <= self.ignore_index < self.n_classes): + scores = torch.cat([scores[:self.ignore_index], scores[self.ignore_index+1:]]) + + return reduce(scores, reduction=self.reduction) + + +class PanopticMetric(Metric): + def __init__( + self, + n_classes: int, + temporally_consistent: bool = True, + vehicles_id: int = 1, + ): + super().__init__() + + self.n_classes = n_classes + self.temporally_consistent = temporally_consistent + self.vehicles_id = vehicles_id + self.keys = ['iou', 'true_positive', 'false_positive', 'false_negative'] + + self.add_state('iou', default=torch.zeros(n_classes), dist_reduce_fx='sum') + self.add_state('true_positive', default=torch.zeros(n_classes), dist_reduce_fx='sum') + self.add_state('false_positive', default=torch.zeros(n_classes), dist_reduce_fx='sum') + self.add_state('false_negative', default=torch.zeros(n_classes), dist_reduce_fx='sum') + + def update(self, pred_instance, gt_instance): + """ + Update state with predictions and targets. + + Parameters + ---------- + pred_instance: (b, s, h, w) + Temporally consistent instance segmentation prediction. + gt_instance: (b, s, h, w) + Ground truth instance segmentation. + """ + batch_size, sequence_length = gt_instance.shape[:2] + # Process labels + assert gt_instance.min() == 0, 'ID 0 of gt_instance must be background' + pred_segmentation = (pred_instance > 0).long() + gt_segmentation = (gt_instance > 0).long() + + for b in range(batch_size): + unique_id_mapping = {} + for t in range(sequence_length): + result = self.panoptic_metrics( + pred_segmentation[b, t].detach(), + pred_instance[b, t].detach(), + gt_segmentation[b, t], + gt_instance[b, t], + unique_id_mapping, + ) + + self.iou += result['iou'] + self.true_positive += result['true_positive'] + self.false_positive += result['false_positive'] + self.false_negative += result['false_negative'] + + def compute(self): + denominator = torch.maximum( + (self.true_positive + self.false_positive / 2 + self.false_negative / 2), + torch.ones_like(self.true_positive) + ) + pq = self.iou / denominator + sq = self.iou / torch.maximum(self.true_positive, torch.ones_like(self.true_positive)) + rq = self.true_positive / denominator + + return {'pq': pq, + 'sq': sq, + 'rq': rq, + # If 0, it means there wasn't any detection. + 'denominator': (self.true_positive + self.false_positive / 2 + self.false_negative / 2), + } + + def panoptic_metrics(self, pred_segmentation, pred_instance, gt_segmentation, gt_instance, unique_id_mapping): + """ + Computes panoptic quality metric components. + + Parameters + ---------- + pred_segmentation: [H, W] range {0, ..., n_classes-1} (>= n_classes is void) + pred_instance: [H, W] range {0, ..., n_instances} (zero means background) + gt_segmentation: [H, W] range {0, ..., n_classes-1} (>= n_classes is void) + gt_instance: [H, W] range {0, ..., n_instances} (zero means background) + unique_id_mapping: instance id mapping to check consistency + """ + n_classes = self.n_classes + + result = {key: torch.zeros(n_classes, dtype=torch.float32, device=gt_instance.device) for key in self.keys} + + assert pred_segmentation.dim() == 2 + assert pred_segmentation.shape == pred_instance.shape == gt_segmentation.shape == gt_instance.shape + + n_instances = int(torch.cat([pred_instance, gt_instance]).max().item()) + n_all_things = n_instances + n_classes # Classes + instances. + n_things_and_void = n_all_things + 1 + + # Now 1 is background; 0 is void (not used). 2 is vehicle semantic class but since it overlaps with + # instances, it is not present. + # and the rest are instance ids starting from 3 + prediction, pred_to_cls = self.combine_mask(pred_segmentation, pred_instance, n_classes, n_all_things) + target, target_to_cls = self.combine_mask(gt_segmentation, gt_instance, n_classes, n_all_things) + + # Compute ious between all stuff and things + # hack for bincounting 2 arrays together + x = prediction + n_things_and_void * target + bincount_2d = torch.bincount(x.long(), minlength=n_things_and_void ** 2) + if bincount_2d.shape[0] != n_things_and_void ** 2: + raise ValueError('Incorrect bincount size.') + conf = bincount_2d.reshape((n_things_and_void, n_things_and_void)) + # Drop void class + conf = conf[1:, 1:] + + # Confusion matrix contains intersections between all combinations of classes + union = conf.sum(0).unsqueeze(0) + conf.sum(1).unsqueeze(1) - conf + iou = torch.where(union > 0, (conf.float() + 1e-9) / (union.float() + 1e-9), torch.zeros_like(union).float()) + + # In the iou matrix, first dimension is target idx, second dimension is pred idx. + # Mapping will contain a tuple that maps prediction idx to target idx for segments matched by iou. + mapping = (iou > 0.5).nonzero(as_tuple=False) + + # Check that classes match. + is_matching = pred_to_cls[mapping[:, 1]] == target_to_cls[mapping[:, 0]] + mapping = mapping[is_matching] + tp_mask = torch.zeros_like(conf, dtype=torch.bool) + tp_mask[mapping[:, 0], mapping[:, 1]] = True + + # First ids correspond to "stuff" i.e. semantic seg. + # Instance ids are offset accordingly + for target_id, pred_id in mapping: + cls_id = pred_to_cls[pred_id] + + if self.temporally_consistent and cls_id == self.vehicles_id: + if target_id.item() in unique_id_mapping and unique_id_mapping[target_id.item()] != pred_id.item(): + # Not temporally consistent + result['false_negative'][target_to_cls[target_id]] += 1 + result['false_positive'][pred_to_cls[pred_id]] += 1 + unique_id_mapping[target_id.item()] = pred_id.item() + continue + + result['true_positive'][cls_id] += 1 + result['iou'][cls_id] += iou[target_id][pred_id] + unique_id_mapping[target_id.item()] = pred_id.item() + + for target_id in range(n_classes, n_all_things): + # If this is a true positive do nothing. + if tp_mask[target_id, n_classes:].any(): + continue + # If this target instance didn't match with any predictions and was present set it as false negative. + if target_to_cls[target_id] != -1: + result['false_negative'][target_to_cls[target_id]] += 1 + + for pred_id in range(n_classes, n_all_things): + # If this is a true positive do nothing. + if tp_mask[n_classes:, pred_id].any(): + continue + # If this predicted instance didn't match with any prediction, set that predictions as false positive. + if pred_to_cls[pred_id] != -1 and (conf[:, pred_id] > 0).any(): + result['false_positive'][pred_to_cls[pred_id]] += 1 + + return result + + def combine_mask(self, segmentation: torch.Tensor, instance: torch.Tensor, n_classes: int, n_all_things: int): + """Shifts all things ids by num_classes and combines things and stuff into a single mask + + Returns a combined mask + a mapping from id to segmentation class. + """ + instance = instance.view(-1) + instance_mask = instance > 0 + instance = instance - 1 + n_classes + + segmentation = segmentation.clone().view(-1) + segmentation_mask = segmentation < n_classes # Remove void pixels. + + # Build an index from instance id to class id. + instance_id_to_class_tuples = torch.cat( + ( + instance[instance_mask & segmentation_mask].unsqueeze(1), + segmentation[instance_mask & segmentation_mask].unsqueeze(1), + ), + dim=1, + ) + instance_id_to_class = -instance_id_to_class_tuples.new_ones((n_all_things,)) + instance_id_to_class[instance_id_to_class_tuples[:, 0]] = instance_id_to_class_tuples[:, 1] + instance_id_to_class[torch.arange(n_classes, device=segmentation.device)] = torch.arange( + n_classes, device=segmentation.device + ) + + segmentation[instance_mask] = instance[instance_mask] + segmentation += 1 # Shift all legit classes by 1. + segmentation[~segmentation_mask] = 0 # Shift void class to zero. + + return segmentation, instance_id_to_class \ No newline at end of file diff --git a/mmcv/models/dense_heads/occ_head_plugin/modules.py b/mmcv/models/dense_heads/occ_head_plugin/modules.py new file mode 100644 index 0000000..0942210 --- /dev/null +++ b/mmcv/models/dense_heads/occ_head_plugin/modules.py @@ -0,0 +1,342 @@ +#---------------------------------------------------------------------------------# +# UniAD: Planning-oriented Autonomous Driving (https://arxiv.org/abs/2212.10156) # +# Source code: https://github.com/OpenDriveLab/UniAD # +# Copyright (c) OpenDriveLab. All rights reserved. # +#---------------------------------------------------------------------------------# + +import torch +from torch import nn +import torch.utils.checkpoint as checkpoint +from .utils import calculate_birds_eye_view_parameters +import torch.nn.functional as F +from mmcv.models.backbones.base_module import BaseModule +from mmcv.models.bricks import ConvModule, build_conv_layer +from einops import rearrange +from collections import OrderedDict + +# Grid sampler +# Sample a smaller receptive-field bev from larger one +class BevFeatureSlicer(nn.Module): + def __init__(self, grid_conf, map_grid_conf): + super().__init__() + if grid_conf == map_grid_conf: + self.identity_mapping = True + else: + self.identity_mapping = False + + bev_resolution, bev_start_position, bev_dimension= calculate_birds_eye_view_parameters( + grid_conf['xbound'], grid_conf['ybound'], grid_conf['zbound'] + ) + + map_bev_resolution, map_bev_start_position, map_bev_dimension = calculate_birds_eye_view_parameters( + map_grid_conf['xbound'], map_grid_conf['ybound'], map_grid_conf['zbound'] + ) + + self.map_x = torch.arange( + map_bev_start_position[0], map_grid_conf['xbound'][1], map_bev_resolution[0]) + + self.map_y = torch.arange( + map_bev_start_position[1], map_grid_conf['ybound'][1], map_bev_resolution[1]) + + # convert to normalized coords + self.norm_map_x = self.map_x / (- bev_start_position[0]) + self.norm_map_y = self.map_y / (- bev_start_position[1]) + + tmp_m, tmp_n = torch.meshgrid( + self.norm_map_x, self.norm_map_y) # indexing 'ij' + tmp_m, tmp_n = tmp_m.T, tmp_n.T # change it to the 'xy' mode results + + self.map_grid = torch.stack([tmp_m, tmp_n], dim=2) + + def forward(self, x): + # x: bev feature map tensor of shape (b, c, h, w) + if self.identity_mapping: + return x + else: + grid = self.map_grid.unsqueeze(0).type_as( + x).repeat(x.shape[0], 1, 1, 1) # (b, h, w, 2) + + return F.grid_sample(x, grid=grid, mode='bilinear', align_corners=True) + +# General layers +class MLP(nn.Module): + """Very simple multi-layer perceptron (also called FFN)""" + + def __init__(self, input_dim, hidden_dim, output_dim, num_layers): + super().__init__() + self.num_layers = num_layers + h = [hidden_dim] * (num_layers - 1) + self.layers = nn.ModuleList( + nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]) + ) + + def forward(self, x): + for i, layer in enumerate(self.layers): + x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) + return x + +class SimpleConv2d(BaseModule): + def __init__(self, in_channels, + out_channels, + + conv_channels=64, + num_conv=1, + conv_cfg=dict(type='Conv2d'), + norm_cfg=dict(type='BN2d'), + bias='auto', + init_cfg=None, + ): + assert init_cfg is None, 'To prevent abnormal initialization ' \ + 'behavior, init_cfg is not allowed to be set' + super().__init__(init_cfg=init_cfg) + self.out_channels = out_channels + if num_conv == 1: + conv_channels = in_channels + + conv_layers = [] + c_in = in_channels + for i in range(num_conv-1): + conv_layers.append( + ConvModule( + c_in, + conv_channels, + kernel_size=3, + stride=1, + padding=1, + bias=bias, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + ) + ) + c_in = conv_channels + # No norm and relu in last conv + conv_layers.append( + build_conv_layer( + conv_cfg, + conv_channels, + out_channels, + kernel_size=1, + stride=1, + padding=0, + bias=True + ) + ) + self.conv_layers = nn.Sequential(*conv_layers) + + if init_cfg is None: + self.init_cfg = dict(type='Kaiming', layer='Conv2d') + + def forward(self, x): + b, c_in, h_in, w_in = x.size() + out = self.conv_layers(x) + assert out.size() == (b, self.out_channels, h_in, w_in) # sanity check + return out + +# Decoder +class CVT_DecoderBlock(nn.Module): + def __init__(self, in_channels, out_channels, skip_dim, residual, factor, upsample, with_relu=True): + super().__init__() + + dim = out_channels // factor + + if upsample: + self.conv = nn.Sequential( + nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True), + nn.Conv2d(in_channels, dim, 3, padding=1, bias=False), + nn.BatchNorm2d(dim), + nn.ReLU(inplace=True), + nn.Conv2d(dim, out_channels, 1, padding=0, bias=False), + nn.BatchNorm2d(out_channels)) + else: + self.conv = nn.Sequential( + nn.Conv2d(in_channels, dim, 3, padding=1, bias=False), + nn.BatchNorm2d(dim), + nn.ReLU(inplace=True), + nn.Conv2d(dim, out_channels, 1, padding=0, bias=False), + nn.BatchNorm2d(out_channels)) + + if residual: + self.up = nn.Conv2d(skip_dim, out_channels, 1) + else: + self.up = None + + self.with_relu = with_relu + if self.with_relu: + self.relu = nn.ReLU(inplace=True) + + def forward(self, x, skip): + x = self.conv(x) + + if self.up is not None: + up = self.up(skip) + up = F.interpolate(up, x.shape[-2:]) + + x = x + up + if self.with_relu: + return self.relu(x) + return x + +class CVT_Decoder(BaseModule): + def __init__(self, dim, blocks, residual=True, factor=2, upsample=True, use_checkpoint=False, init_cfg=None): + assert init_cfg is None, 'To prevent abnormal initialization ' \ + 'behavior, init_cfg is not allowed to be set' + super().__init__(init_cfg=init_cfg) + + layers = [] + channels = dim + + for i, out_channels in enumerate(blocks): + with_relu = i < len(blocks) - 1 # if not last block, with relu + layer = CVT_DecoderBlock(channels, out_channels, dim, residual, factor, upsample, with_relu=with_relu) + layers.append(layer) + + channels = out_channels + + self.layers = nn.Sequential(*layers) + self.out_channels = channels + self.use_checkpoint = use_checkpoint + + if init_cfg is None: + self.init_cfg = dict(type='Kaiming', layer='Conv2d') + + def forward(self, x): + b, t = x.size(0), x.size(1) + x = rearrange(x, 'b t c h w -> (b t) c h w') + y = x + for layer in self.layers: + if self.use_checkpoint: + y = checkpoint(layer, y, x) + else: + y = layer(y, x) + + y = rearrange(y, '(b t) c h w -> b t c h w', b=b, t=t) + return y + + +# Conv modules +class UpsamplingAdd(nn.Module): + def __init__(self, in_channels, out_channels, scale_factor=2): + super().__init__() + self.upsample_layer = nn.Sequential( + nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=False), + nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0, bias=False), + nn.BatchNorm2d(out_channels), + ) + + def forward(self, x, x_skip): + x = self.upsample_layer(x) + return x + x_skip + +class Interpolate(nn.Module): + def __init__(self, scale_factor: int = 2): + super().__init__() + self._interpolate = nn.functional.interpolate + self._scale_factor = scale_factor + + def forward(self, x): + return self._interpolate(x, scale_factor=self._scale_factor, mode='bilinear', align_corners=False) + +class Bottleneck(nn.Module): + """ + Defines a bottleneck module with a residual connection + """ + + def __init__( + self, + in_channels, + out_channels=None, + kernel_size=3, + dilation=1, + groups=1, + upsample=False, + downsample=False, + dropout=0.0, + ): + super().__init__() + self._downsample = downsample + bottleneck_channels = int(in_channels / 2) + out_channels = out_channels or in_channels + padding_size = ((kernel_size - 1) * dilation + 1) // 2 + + # Define the main conv operation + assert dilation == 1 + if upsample: + assert not downsample, 'downsample and upsample not possible simultaneously.' + bottleneck_conv = nn.ConvTranspose2d( + bottleneck_channels, + bottleneck_channels, + kernel_size=kernel_size, + bias=False, + dilation=1, + stride=2, + output_padding=padding_size, + padding=padding_size, + groups=groups, + ) + elif downsample: + bottleneck_conv = nn.Conv2d( + bottleneck_channels, + bottleneck_channels, + kernel_size=kernel_size, + bias=False, + dilation=dilation, + stride=2, + padding=padding_size, + groups=groups, + ) + else: + bottleneck_conv = nn.Conv2d( + bottleneck_channels, + bottleneck_channels, + kernel_size=kernel_size, + bias=False, + dilation=dilation, + padding=padding_size, + groups=groups, + ) + + self.layers = nn.Sequential( + OrderedDict( + [ + # First projection with 1x1 kernel + ('conv_down_project', nn.Conv2d(in_channels, bottleneck_channels, kernel_size=1, bias=False)), + ('abn_down_project', nn.Sequential(nn.BatchNorm2d(bottleneck_channels), + nn.ReLU(inplace=True))), + # Second conv block + ('conv', bottleneck_conv), + ('abn', nn.Sequential(nn.BatchNorm2d(bottleneck_channels), nn.ReLU(inplace=True))), + # Final projection with 1x1 kernel + ('conv_up_project', nn.Conv2d(bottleneck_channels, out_channels, kernel_size=1, bias=False)), + ('abn_up_project', nn.Sequential(nn.BatchNorm2d(out_channels), + nn.ReLU(inplace=True))), + # Regulariser + ('dropout', nn.Dropout2d(p=dropout)), + ] + ) + ) + + if out_channels == in_channels and not downsample and not upsample: + self.projection = None + else: + projection = OrderedDict() + if upsample: + projection.update({'upsample_skip_proj': Interpolate(scale_factor=2)}) + elif downsample: + projection.update({'upsample_skip_proj': nn.MaxPool2d(kernel_size=2, stride=2)}) + projection.update( + { + 'conv_skip_proj': nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False), + 'bn_skip_proj': nn.BatchNorm2d(out_channels), + } + ) + self.projection = nn.Sequential(projection) + + def forward(self, *args): + (x,) = args + x_residual = self.layers(x) + if self.projection is not None: + if self._downsample: + # pad h/w dimensions if they are odd to prevent shape mismatch with residual layer + x = nn.functional.pad(x, (0, x.shape[-1] % 2, 0, x.shape[-2] % 2), value=0) + return x_residual + self.projection(x) + return x_residual + x diff --git a/mmcv/models/dense_heads/occ_head_plugin/utils.py b/mmcv/models/dense_heads/occ_head_plugin/utils.py new file mode 100644 index 0000000..da71cb4 --- /dev/null +++ b/mmcv/models/dense_heads/occ_head_plugin/utils.py @@ -0,0 +1,87 @@ +#---------------------------------------------------------------------------------# +# UniAD: Planning-oriented Autonomous Driving (https://arxiv.org/abs/2212.10156) # +# Source code: https://github.com/OpenDriveLab/UniAD # +# Copyright (c) OpenDriveLab. All rights reserved. # +#---------------------------------------------------------------------------------# + +import torch +import torch.nn as nn +import torch.nn.functional as F +import numpy as np + + +def calculate_birds_eye_view_parameters(x_bounds, y_bounds, z_bounds): + """ + Parameters + ---------- + x_bounds: Forward direction in the ego-car. + y_bounds: Sides + z_bounds: Height + + Returns + ------- + bev_resolution: Bird's-eye view bev_resolution + bev_start_position Bird's-eye view first element + bev_dimension Bird's-eye view tensor spatial dimension + """ + bev_resolution = torch.tensor( + [row[2] for row in [x_bounds, y_bounds, z_bounds]]) + bev_start_position = torch.tensor( + [row[0] + row[2] / 2.0 for row in [x_bounds, y_bounds, z_bounds]]) + bev_dimension = torch.tensor([(row[1] - row[0]) / row[2] + for row in [x_bounds, y_bounds, z_bounds]], dtype=torch.long) + + return bev_resolution, bev_start_position, bev_dimension + + +def gen_dx_bx(xbound, ybound, zbound): + dx = torch.Tensor([row[2] for row in [xbound, ybound, zbound]]) + bx = torch.Tensor([row[0] + row[2]/2.0 for row in [xbound, ybound, zbound]]) + nx = torch.LongTensor([(row[1] - row[0]) / row[2] for row in [xbound, ybound, zbound]]) + + return dx, bx, nx + +# Instance utils +def update_instance_ids(instance_seg, old_ids, new_ids): + """ + Parameters + ---------- + instance_seg: torch.Tensor arbitrary shape + old_ids: 1D tensor containing the list of old ids, must be all present in instance_seg. + new_ids: 1D tensor with the new ids, aligned with old_ids + + Returns + new_instance_seg: torch.Tensor same shape as instance_seg with new ids + """ + indices = torch.arange(old_ids.max() + 1, device=instance_seg.device) + for old_id, new_id in zip(old_ids, new_ids): + indices[old_id] = new_id + + return indices[instance_seg].long() + + +def make_instance_seg_consecutive(instance_seg): + # Make the indices of instance_seg consecutive + unique_ids = torch.unique(instance_seg) # include background + new_ids = torch.arange(len(unique_ids), device=instance_seg.device) + instance_seg = update_instance_ids(instance_seg, unique_ids, new_ids) + return instance_seg + + +def predict_instance_segmentation_and_trajectories( + foreground_masks, + ins_sigmoid, + vehicles_id=1, + ): + if foreground_masks.dim() == 5 and foreground_masks.shape[2] == 1: + foreground_masks = foreground_masks.squeeze(2) # [b, t, h, w] + foreground_masks = foreground_masks == vehicles_id # [b, t, h, w] Only these places have foreground id + + argmax_ins = ins_sigmoid.argmax(dim=1) # long, [b, t, h, w], ins_id starts from 0 + argmax_ins = argmax_ins + 1 # [b, t, h, w], ins_id starts from 1 + instance_seg = (argmax_ins * foreground_masks.float()).long() # bg is 0, fg starts with 1 + + # Make the indices of instance_seg consecutive + instance_seg = make_instance_seg_consecutive(instance_seg).long() + + return instance_seg diff --git a/mmcv/models/dense_heads/panseg_head.py b/mmcv/models/dense_heads/panseg_head.py new file mode 100644 index 0000000..8210e59 --- /dev/null +++ b/mmcv/models/dense_heads/panseg_head.py @@ -0,0 +1,1327 @@ +#----------------------------------------------------------------------------------# +# UniAD: Planning-oriented Autonomous Driving (https://arxiv.org/abs/2212.10156) # +# Source code: https://github.com/OpenDriveLab/UniAD # +# Copyright (c) OpenDriveLab. All rights reserved. # +# Modified from panoptic_segformer (https://github.com/zhiqi-li/Panoptic-SegFormer)# +#--------------------------------------------------------------------------------- # + +import copy +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.models.bricks import Linear +from mmcv.models.utils import bias_init_with_prob, constant_init +from mmcv.utils import force_fp32, auto_fp16 +from mmcv.models.utils.transformer import inverse_sigmoid +from mmcv.models.builder import HEADS, build_loss +from mmcv.core.bbox.transforms import bbox_cxcywh_to_xyxy, bbox_xyxy_to_cxcywh +from mmcv.core.bbox.builder import build_assigner, build_sampler +from mmcv.core.utils import multi_apply, reduce_mean +from mmcv.models.utils import build_transformer +from .seg_head_plugin import SegDETRHead, IOU + +@HEADS.register_module() +class PansegformerHead(SegDETRHead): + """ + Head of Panoptic SegFormer + + Code is modified from the `official github repo + `_. + + Args: + with_box_refine (bool): Whether to refine the reference points + in the decoder. Defaults to False. + as_two_stage (bool) : Whether to generate the proposal from + the outputs of encoder. + transformer (obj:`ConfigDict`): ConfigDict is used for building + the Encoder and Decoder. + """ + + def __init__( + self, + *args, + bev_h, + bev_w, + canvas_size, + pc_range, + with_box_refine=False, + as_two_stage=False, + transformer=None, + quality_threshold_things=0.25, + quality_threshold_stuff=0.25, + overlap_threshold_things=0.4, + overlap_threshold_stuff=0.2, + thing_transformer_head=dict( + type='TransformerHead', # mask decoder for things + d_model=256, + nhead=8, + num_decoder_layers=6), + stuff_transformer_head=dict( + type='TransformerHead', # mask decoder for stuff + d_model=256, + nhead=8, + num_decoder_layers=6), + loss_mask=dict(type='DiceLoss', weight=2.0), + train_cfg=dict( + assigner=dict(type='HungarianAssigner', + cls_cost=dict(type='ClassificationCost', + weight=1.), + reg_cost=dict(type='BBoxL1Cost', weight=5.0), + iou_cost=dict(type='IoUCost', + iou_mode='giou', + weight=2.0)), + sampler=dict(type='PseudoSampler'), + ), + **kwargs): + self.bev_h = bev_h + self.bev_w = bev_w + self.canvas_size = canvas_size + self.pc_range = pc_range + self.real_w = self.pc_range[3] - self.pc_range[0] + self.real_h = self.pc_range[4] - self.pc_range[1] + + self.with_box_refine = with_box_refine + self.as_two_stage = as_two_stage + self.quality_threshold_things = 0.1 + self.quality_threshold_stuff = quality_threshold_stuff + self.overlap_threshold_things = overlap_threshold_things + self.overlap_threshold_stuff = overlap_threshold_stuff + self.fp16_enabled = False + + if self.as_two_stage: + transformer['as_two_stage'] = self.as_two_stage + self.num_dec_things = thing_transformer_head['num_decoder_layers'] + self.num_dec_stuff = stuff_transformer_head['num_decoder_layers'] + super(PansegformerHead, self).__init__(*args, + transformer=transformer, + train_cfg=train_cfg, + **kwargs) + if train_cfg: + sampler_cfg = train_cfg['sampler_with_mask'] + self.sampler_with_mask = build_sampler(sampler_cfg, context=self) + assigner_cfg = train_cfg['assigner_with_mask'] + self.assigner_with_mask = build_assigner(assigner_cfg) + self.assigner_filter = build_assigner( + dict( + type='HungarianAssigner_filter', + cls_cost=dict(type='FocalLossCost', weight=2.0), + reg_cost=dict(type='BBoxL1Cost', + weight=5.0, + box_format='xywh'), + iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0), + max_pos= + 3 # Depends on GPU memory, setting it to 1, model can be trained on 1080Ti + ), ) + + self.loss_mask = build_loss(loss_mask) + self.things_mask_head = build_transformer(thing_transformer_head) + self.stuff_mask_head = build_transformer(stuff_transformer_head) + self.count = 0 + + def _init_layers(self): + """Initialize classification branch and regression branch of head.""" + if not self.as_two_stage: + self.bev_embedding = nn.Embedding(self.bev_h * self.bev_w, self.embed_dims) + + fc_cls = Linear(self.embed_dims, self.cls_out_channels) + fc_cls_stuff = Linear(self.embed_dims, 1) + reg_branch = [] + for _ in range(self.num_reg_fcs): + reg_branch.append(Linear(self.embed_dims, self.embed_dims)) + reg_branch.append(nn.ReLU()) + reg_branch.append(Linear(self.embed_dims, 4)) + reg_branch = nn.Sequential(*reg_branch) + + def _get_clones(module, N): + return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) + + # last reg_branch is used to generate proposal from + # encode feature map when as_two_stage is True. + num_pred = (self.transformer.decoder.num_layers + 1) if \ + self.as_two_stage else self.transformer.decoder.num_layers + + if self.with_box_refine: + self.cls_branches = _get_clones(fc_cls, num_pred) + self.reg_branches = _get_clones(reg_branch, num_pred) + else: + self.cls_branches = nn.ModuleList( + [fc_cls for _ in range(num_pred)]) + self.reg_branches = nn.ModuleList( + [reg_branch for _ in range(num_pred)]) + if not self.as_two_stage: + self.query_embedding = nn.Embedding(self.num_query, + self.embed_dims * 2) + self.stuff_query = nn.Embedding(self.num_stuff_classes, + self.embed_dims * 2) + self.reg_branches2 = _get_clones(reg_branch, self.num_dec_things) # used in mask decoder + self.cls_thing_branches = _get_clones(fc_cls, self.num_dec_things) # used in mask decoder + self.cls_stuff_branches = _get_clones(fc_cls_stuff, self.num_dec_stuff) # used in mask deocder + + def init_weights(self): + """Initialize weights of the DeformDETR head.""" + self.transformer.init_weights() + if self.loss_cls.use_sigmoid: + bias_init = bias_init_with_prob(0.01) + for m in self.cls_branches: + nn.init.constant_(m.bias, bias_init) + for m in self.cls_thing_branches: + nn.init.constant_(m.bias, bias_init) + for m in self.cls_stuff_branches: + nn.init.constant_(m.bias, bias_init) + for m in self.reg_branches: + constant_init(m[-1], 0, bias=0) + for m in self.reg_branches2: + constant_init(m[-1], 0, bias=0) + nn.init.constant_(self.reg_branches[0][-1].bias.data[2:], -2.0) + + if self.as_two_stage: + for m in self.reg_branches: + nn.init.constant_(m[-1].bias.data[2:], 0.0) + + @force_fp32(apply_to=('bev_embed', )) + def forward(self, bev_embed): + """Forward function. + + Args: + bev_embed (tuple[Tensor]): Features from the upstream + network, each is a 4D-tensor with shape + (N, C, H, W). + img_metas (list[dict]): List of image information. + + Returns: + all_cls_scores (Tensor): Outputs from the classification head, \ + shape [nb_dec, bs, num_query, cls_out_channels]. Note \ + cls_out_channels should includes background. + all_bbox_preds (Tensor): Sigmoid outputs from the regression \ + head with normalized coordinate format (cx, cy, w, h). \ + Shape [nb_dec, bs, num_query, 4]. + enc_outputs_class (Tensor): The score of each point on encode \ + feature map, has shape (N, h*w, num_class). Only when \ + as_two_stage is True it would be returned, otherwise \ + `None` would be returned. + enc_outputs_coord (Tensor): The proposal generate from the \ + encode feature map, has shape (N, h*w, 4). Only when \ + as_two_stage is True it would be returned, otherwise \ + `None` would be returned. + """ + _, bs, _ = bev_embed.shape + + mlvl_feats = [torch.reshape(bev_embed, (bs, self.bev_h, self.bev_w ,-1)).permute(0, 3, 1, 2)] + img_masks = mlvl_feats[0].new_zeros((bs, self.bev_h, self.bev_w)) + + hw_lvl = [feat_lvl.shape[-2:] for feat_lvl in mlvl_feats] + mlvl_masks = [] + mlvl_positional_encodings = [] + for feat in mlvl_feats: + mlvl_masks.append( + F.interpolate(img_masks[None], + size=feat.shape[-2:]).to(torch.bool).squeeze(0)) + mlvl_positional_encodings.append( + self.positional_encoding(mlvl_masks[-1])) + + query_embeds = None + if not self.as_two_stage: + query_embeds = self.query_embedding.weight + (memory, memory_pos, memory_mask, query_pos), hs, init_reference, inter_references, \ + enc_outputs_class, enc_outputs_coord = self.transformer( + mlvl_feats, + mlvl_masks, + query_embeds, + mlvl_positional_encodings, + reg_branches=self.reg_branches if self.with_box_refine else None, # noqa:E501 + cls_branches=self.cls_branches if self.as_two_stage else None # noqa:E501 + ) + + memory = memory.permute(1, 0, 2) + query = hs[-1].permute(1, 0, 2) + query_pos = query_pos.permute(1, 0, 2) + memory_pos = memory_pos.permute(1, 0, 2) + + # we should feed these to mask deocder. + args_tuple = [memory, memory_mask, memory_pos, query, None, query_pos, hw_lvl] + + hs = hs.permute(0, 2, 1, 3) + outputs_classes = [] + outputs_coords = [] + for lvl in range(hs.shape[0]): + if lvl == 0: + reference = init_reference + else: + reference = inter_references[lvl - 1] + reference = inverse_sigmoid(reference) + outputs_class = self.cls_branches[lvl](hs[lvl]) + tmp = self.reg_branches[lvl](hs[lvl]) + + if reference.shape[-1] == 4: + tmp += reference + else: + assert reference.shape[-1] == 2 + tmp[..., :2] += reference + outputs_coord = tmp.sigmoid() + outputs_classes.append(outputs_class) + outputs_coords.append(outputs_coord) + + outputs_classes = torch.stack(outputs_classes) + outputs_coords = torch.stack(outputs_coords) + + outs = { + 'bev_embed': None if self.as_two_stage else bev_embed, + 'outputs_classes': outputs_classes, + 'outputs_coords': outputs_coords, + 'enc_outputs_class': enc_outputs_class if self.as_two_stage else None, + 'enc_outputs_coord': enc_outputs_coord.sigmoid() if self.as_two_stage else None, + 'args_tuple': args_tuple, + 'reference': reference, + } + + return outs + + @force_fp32(apply_to=('all_cls_scores_list', 'all_bbox_preds_list', + 'args_tuple', 'reference')) + def loss( + self, + all_cls_scores, + all_bbox_preds, + enc_cls_scores, + enc_bbox_preds, + args_tuple, + reference, + gt_labels_list, + gt_bboxes_list, + gt_masks_list, + img_metas=None, + gt_bboxes_ignore=None, + ): + """"Loss function. + + Args: + all_cls_scores (Tensor): Classification score of all + decoder layers, has shape + [nb_dec, bs, num_query, cls_out_channels]. + all_bbox_preds (Tensor): Sigmoid regression + outputs of all decode layers. Each is a 4D-tensor with + normalized coordinate format (cx, cy, w, h) and shape + [nb_dec, bs, num_query, 4]. + enc_cls_scores (Tensor): Classification scores of + points on encode feature map , has shape + (N, h*w, num_classes). Only be passed when as_two_stage is + True, otherwise is None. + enc_bbox_preds (Tensor): Regression results of each points + on the encode feature map, has shape (N, h*w, 4). Only be + passed when as_two_stage is True, otherwise is None. + args_tuple (Tuple) several args + reference (Tensor) reference from location decoder + gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image + with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels_list (list[Tensor]): Ground truth class indices for each + image with shape (num_gts, ). + img_metas (list[dict]): List of image meta information. + gt_bboxes_ignore (list[Tensor], optional): Bounding boxes + which can be ignored for each image. Default None. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + img_metas[0]['img_shape'] = (self.canvas_size[0], self.canvas_size[1], 3) + + assert gt_bboxes_ignore is None, \ + f'{self.__class__.__name__} only supports ' \ + f'for gt_bboxes_ignore setting to None.' + + ### seprate things and stuff + gt_things_lables_list = [] + gt_things_bboxes_list = [] + gt_things_masks_list = [] + gt_stuff_labels_list = [] + gt_stuff_masks_list = [] + for i, each in enumerate(gt_labels_list): + # MDS: for coco, id<80 (Continuous id) is things. This is not true for other data sets + things_selected = each < self.num_things_classes + + stuff_selected = things_selected == False + + gt_things_lables_list.append(gt_labels_list[i][things_selected]) + gt_things_bboxes_list.append(gt_bboxes_list[i][things_selected]) + gt_things_masks_list.append(gt_masks_list[i][things_selected]) + + gt_stuff_labels_list.append(gt_labels_list[i][stuff_selected]) + gt_stuff_masks_list.append(gt_masks_list[i][stuff_selected]) + + num_dec_layers = len(all_cls_scores) + all_gt_bboxes_list = [ + gt_things_bboxes_list for _ in range(num_dec_layers - 1) + ] + all_gt_labels_list = [ + gt_things_lables_list for _ in range(num_dec_layers - 1) + ] + # all_gt_masks_list = [gt_masks_list for _ in range(num_dec_layers-1)] + all_gt_bboxes_ignore_list = [ + gt_bboxes_ignore for _ in range(num_dec_layers - 1) + ] + img_metas_list = [img_metas for _ in range(num_dec_layers - 1)] + + # if the location decoder codntains L layers, we compute the losses of the first L-1 layers + losses_cls, losses_bbox, losses_iou = multi_apply( + self.loss_single, all_cls_scores[:-1], all_bbox_preds[:-1], + all_gt_bboxes_list, all_gt_labels_list, img_metas_list, + all_gt_bboxes_ignore_list) + + losses_cls_f, losses_bbox_f, losses_iou_f, losses_masks_things_f, losses_masks_stuff_f, loss_mask_things_list_f, loss_mask_stuff_list_f, loss_iou_list_f, loss_bbox_list_f, loss_cls_list_f, loss_cls_stuff_list_f, things_ratio, stuff_ratio = self.loss_single_panoptic( + all_cls_scores[-1], all_bbox_preds[-1], args_tuple, reference, + gt_things_bboxes_list, gt_things_lables_list, gt_things_masks_list, + (gt_stuff_labels_list, gt_stuff_masks_list), img_metas, + gt_bboxes_ignore) + + loss_dict = dict() + # loss of proposal generated from encode feature map. + if enc_cls_scores is not None: + binary_labels_list = [ + torch.zeros_like(gt_things_lables_list[i]) + for i in range(len(img_metas)) + ] + enc_loss_cls, enc_losses_bbox, enc_losses_iou = \ + self.loss_single(enc_cls_scores, enc_bbox_preds, + gt_things_bboxes_list, binary_labels_list, + img_metas, gt_bboxes_ignore) + loss_dict['enc_loss_cls'] = enc_loss_cls * things_ratio + loss_dict['enc_loss_bbox'] = enc_losses_bbox * things_ratio + loss_dict['enc_loss_iou'] = enc_losses_iou * things_ratio + # loss_dict['enc_loss_mask'] = enc_losses_mask + # loss from the last decoder layer + loss_dict['loss_cls'] = losses_cls_f * things_ratio + loss_dict['loss_bbox'] = losses_bbox_f * things_ratio + loss_dict['loss_iou'] = losses_iou_f * things_ratio + loss_dict['loss_mask_things'] = losses_masks_things_f * things_ratio + loss_dict['loss_mask_stuff'] = losses_masks_stuff_f * stuff_ratio + # loss from other decoder layers + num_dec_layer = 0 + for i in range(len(loss_mask_things_list_f)): + loss_dict[f'd{i}.loss_mask_things_f'] = loss_mask_things_list_f[ + i] * things_ratio + loss_dict[f'd{i}.loss_iou_f'] = loss_iou_list_f[i] * things_ratio + loss_dict[f'd{i}.loss_bbox_f'] = loss_bbox_list_f[i] * things_ratio + loss_dict[f'd{i}.loss_cls_f'] = loss_cls_list_f[i] * things_ratio + for i in range(len(loss_mask_stuff_list_f)): + loss_dict[f'd{i}.loss_mask_stuff_f'] = loss_mask_stuff_list_f[ + i] * stuff_ratio + loss_dict[f'd{i}.loss_cls_stuff_f'] = loss_cls_stuff_list_f[ + i] * stuff_ratio + for loss_cls_i, loss_bbox_i, loss_iou_i in zip( + losses_cls, + losses_bbox, + losses_iou, + ): + loss_dict[f'd{num_dec_layer}.loss_cls'] = loss_cls_i * things_ratio + loss_dict[ + f'd{num_dec_layer}.loss_bbox'] = loss_bbox_i * things_ratio + loss_dict[f'd{num_dec_layer}.loss_iou'] = loss_iou_i * things_ratio + + num_dec_layer += 1 + # print(loss_dict) + return loss_dict + + def filter_query(self, + cls_scores_list, + bbox_preds_list, + gt_bboxes_list, + gt_labels_list, + img_metas, + gt_bboxes_ignore_list=None): + ''' + This function aims to using the cost from the location decoder to filter out low-quality queries. + ''' + assert gt_bboxes_ignore_list is None, \ + 'Only supports for gt_bboxes_ignore setting to None.' + num_imgs = len(cls_scores_list) + gt_bboxes_ignore_list = [ + gt_bboxes_ignore_list for _ in range(num_imgs) + ] + + (pos_inds_mask_list, neg_inds_mask_list, labels_list, + label_weights_list, bbox_targets_list, + bbox_weights_list, pos_inds_list, neg_inds_list) = multi_apply( + self._filter_query_single, cls_scores_list, bbox_preds_list, + gt_bboxes_list, gt_labels_list, img_metas, gt_bboxes_ignore_list) + num_total_pos = sum((inds.numel() for inds in pos_inds_list)) + num_total_neg = sum((inds.numel() for inds in neg_inds_list)) + + return pos_inds_mask_list, neg_inds_mask_list, labels_list, label_weights_list, bbox_targets_list, \ + bbox_weights_list, num_total_pos, num_total_neg, pos_inds_list, neg_inds_list + + def _filter_query_single(self, + cls_score, + bbox_pred, + gt_bboxes, + gt_labels, + img_meta, + gt_bboxes_ignore=None): + num_bboxes = bbox_pred.size(0) + pos_ind_mask, neg_ind_mask, assign_result = self.assigner_filter.assign( + bbox_pred, cls_score, gt_bboxes, gt_labels, img_meta, + gt_bboxes_ignore) + sampling_result = self.sampler.sample(assign_result, bbox_pred, + gt_bboxes) + pos_inds = sampling_result.pos_inds + neg_inds = sampling_result.neg_inds + # label targets + labels = gt_bboxes.new_full((num_bboxes, ), + self.num_things_classes, + dtype=torch.long) + labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds] + label_weights = gt_bboxes.new_ones(num_bboxes) + + # bbox targets + bbox_targets = torch.zeros_like(bbox_pred) + bbox_weights = torch.zeros_like(bbox_pred) + bbox_weights[pos_inds] = 1.0 + img_h, img_w, _ = img_meta['img_shape'] + + # DETR regress the relative position of boxes (cxcywh) in the image. + # Thus the learning target should be normalized by the image size, also + # the box format should be converted from defaultly x1y1x2y2 to cxcywh. + factor = bbox_pred.new_tensor([img_w, img_h, img_w, + img_h]).unsqueeze(0) + pos_gt_bboxes_normalized = sampling_result.pos_gt_bboxes / factor + pos_gt_bboxes_targets = bbox_xyxy_to_cxcywh(pos_gt_bboxes_normalized) + bbox_targets[pos_inds] = pos_gt_bboxes_targets + + return (pos_ind_mask, neg_ind_mask, labels, label_weights, + bbox_targets, bbox_weights, pos_inds, neg_inds) + + def get_targets_with_mask(self, + cls_scores_list, + bbox_preds_list, + masks_preds_list_thing, + gt_bboxes_list, + gt_labels_list, + gt_masks_list, + img_metas, + gt_bboxes_ignore_list=None): + """"Compute regression and classification targets for a batch image. + + Outputs from a single decoder layer of a single feature level are used. + + Args: + cls_scores_list (list[Tensor]): Box score logits from a single + decoder layer for each image with shape [num_query, + cls_out_channels]. + bbox_preds_list (list[Tensor]): Sigmoid outputs from a single + decoder layer for each image, with normalized coordinate + (cx, cy, w, h) and shape [num_query, 4]. + masks_preds_list_thing (list[Tensor]): + gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image + with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels_list (list[Tensor]): Ground truth class indices for each + image with shape (num_gts, ). + img_metas (list[dict]): List of image meta information. + gt_bboxes_ignore_list (list[Tensor], optional): Bounding + boxes which can be ignored for each image. Default None. + """ + assert gt_bboxes_ignore_list is None, \ + 'Only supports for gt_bboxes_ignore setting to None.' + num_imgs = len(cls_scores_list) + gt_bboxes_ignore_list = [ + gt_bboxes_ignore_list for _ in range(num_imgs) + ] + + (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, + mask_targets_list, mask_weights_list, pos_inds_list, + neg_inds_list) = multi_apply(self._get_target_single_with_mask, + cls_scores_list, bbox_preds_list, + masks_preds_list_thing, gt_bboxes_list, + gt_labels_list, gt_masks_list, img_metas, + gt_bboxes_ignore_list) + num_total_pos_thing = sum((inds.numel() for inds in pos_inds_list)) + num_total_neg_thing = sum((inds.numel() for inds in neg_inds_list)) + return (labels_list, label_weights_list, bbox_targets_list, + bbox_weights_list, mask_targets_list, mask_weights_list, + num_total_pos_thing, num_total_neg_thing, pos_inds_list) + + def _get_target_single_with_mask(self, + cls_score, + bbox_pred, + masks_preds_things, + gt_bboxes, + gt_labels, + gt_masks, + img_meta, + gt_bboxes_ignore=None): + """ + """ + + num_bboxes = bbox_pred.size(0) + # assigner and sampler + + gt_masks = gt_masks.float() + + assign_result = self.assigner_with_mask.assign(bbox_pred, cls_score, + masks_preds_things, + gt_bboxes, gt_labels, + gt_masks, img_meta, + gt_bboxes_ignore) + sampling_result = self.sampler_with_mask.sample( + assign_result, bbox_pred, gt_bboxes, gt_masks) + pos_inds = sampling_result.pos_inds + neg_inds = sampling_result.neg_inds + + # label targets + labels = gt_bboxes.new_full((num_bboxes, ), + self.num_things_classes, + dtype=torch.long) + labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds] + label_weights = gt_bboxes.new_ones(num_bboxes) + + # bbox targets + bbox_targets = torch.zeros_like(bbox_pred) + bbox_weights = torch.zeros_like(bbox_pred) + bbox_weights[pos_inds] = 1.0 + img_h, img_w, _ = img_meta['img_shape'] + + # DETR regress the relative position of boxes (cxcywh) in the image. + # Thus the learning target should be normalized by the image size, also + # the box format should be converted from defaultly x1y1x2y2 to cxcywh. + factor = bbox_pred.new_tensor([img_w, img_h, img_w, + img_h]).unsqueeze(0) + pos_gt_bboxes_normalized = sampling_result.pos_gt_bboxes / factor + pos_gt_bboxes_targets = bbox_xyxy_to_cxcywh(pos_gt_bboxes_normalized) + bbox_targets[pos_inds] = pos_gt_bboxes_targets + + mask_weights = masks_preds_things.new_zeros(num_bboxes) + mask_weights[pos_inds] = 1.0 + pos_gt_masks = sampling_result.pos_gt_masks + _, w, h = pos_gt_masks.shape + mask_target = masks_preds_things.new_zeros([num_bboxes, w, h]) + mask_target[pos_inds] = pos_gt_masks + + return (labels, label_weights, bbox_targets, bbox_weights, mask_target, + mask_weights, pos_inds, neg_inds) + + def get_filter_results_and_loss(self, cls_scores, bbox_preds, + cls_scores_list, bbox_preds_list, + gt_bboxes_list, gt_labels_list, img_metas, + gt_bboxes_ignore_list): + + + pos_inds_mask_list, neg_inds_mask_list, labels_list, label_weights_list, bbox_targets_list, \ + bbox_weights_list, num_total_pos_thing, num_total_neg_thing, pos_inds_list, neg_inds_list = self.filter_query( + cls_scores_list, bbox_preds_list, + gt_bboxes_list, gt_labels_list, + img_metas, gt_bboxes_ignore_list) + labels = torch.cat(labels_list, 0) + label_weights = torch.cat(label_weights_list, 0) + bbox_targets = torch.cat(bbox_targets_list, 0) + bbox_weights = torch.cat(bbox_weights_list, 0) + + # classification loss + cls_scores = cls_scores.reshape(-1, self.cls_out_channels) + # construct weighted avg_factor to match with the official DETR repo + cls_avg_factor = num_total_pos_thing * 1.0 + \ + num_total_neg_thing * self.bg_cls_weight + if self.sync_cls_avg_factor: + cls_avg_factor = reduce_mean( + cls_scores.new_tensor([cls_avg_factor])) + cls_avg_factor = max(cls_avg_factor, 1) + + loss_cls = self.loss_cls(cls_scores, + labels, + label_weights, + avg_factor=cls_avg_factor) + + # Compute the average number of gt boxes accross all gpus, for + # normalization purposes + + num_total_pos_thing = loss_cls.new_tensor([num_total_pos_thing]) + num_total_pos_thing = torch.clamp(reduce_mean(num_total_pos_thing), + min=1).item() + + # construct factors used for rescale bboxes + factors = [] + for img_meta, bbox_pred in zip(img_metas, bbox_preds): + img_h, img_w, _ = img_meta['img_shape'] + factor = bbox_pred.new_tensor([img_w, img_h, img_w, + img_h]).unsqueeze(0).repeat( + bbox_pred.size(0), 1) + factors.append(factor) + factors = torch.cat(factors, 0) + + # DETR regress the relative position of boxes (cxcywh) in the image, + # thus the learning target is normalized by the image size. So here + # we need to re-scale them for calculating IoU loss + + bbox_preds = bbox_preds.reshape(-1, 4) + bboxes = bbox_cxcywh_to_xyxy(bbox_preds) * factors + bboxes_gt = bbox_cxcywh_to_xyxy(bbox_targets) * factors + + # regression IoU loss, defaultly GIoU loss + loss_iou = self.loss_iou(bboxes, + bboxes_gt, + bbox_weights, + avg_factor=num_total_pos_thing) + + # regression L1 loss + loss_bbox = self.loss_bbox(bbox_preds, + bbox_targets, + bbox_weights, + avg_factor=num_total_pos_thing) + return loss_cls, loss_iou, loss_bbox,\ + pos_inds_mask_list, num_total_pos_thing + + def loss_single_panoptic(self, + cls_scores, + bbox_preds, + args_tuple, + reference, + gt_bboxes_list, + gt_labels_list, + gt_masks_list, + gt_panoptic_list, + img_metas, + gt_bboxes_ignore_list=None): + """"Loss function for outputs from a single decoder layer of a single + feature level. + + Args: + cls_scores (Tensor): Box score logits from a single decoder layer + for all images. Shape [bs, num_query, cls_out_channels]. + bbox_preds (Tensor): Sigmoid outputs from a single decoder layer + for all images, with normalized coordinate (cx, cy, w, h) and + shape [bs, num_query, 4]. + args_tuple: + reference: + gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image + with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels_list (list[Tensor]): Ground truth class indices for each + image with shape (num_gts, ). + img_metas (list[dict]): List of image meta information. + gt_bboxes_ignore_list (list[Tensor], optional): Bounding + boxes which can be ignored for each image. Default None. + + Returns: + dict[str, Tensor]: A dictionary of loss components for outputs from + a single decoder layer. + """ + num_imgs = cls_scores.size(0) + gt_stuff_labels_list, gt_stuff_masks_list = gt_panoptic_list + cls_scores_list = [cls_scores[i] for i in range(num_imgs)] + bbox_preds_list = [bbox_preds[i] for i in range(num_imgs)] + loss_cls, loss_iou, loss_bbox, pos_inds_mask_list, num_total_pos_thing = self.get_filter_results_and_loss( + cls_scores, bbox_preds, cls_scores_list, bbox_preds_list, gt_bboxes_list, gt_labels_list, img_metas, gt_bboxes_ignore_list) + + memory, memory_mask, memory_pos, query, _, query_pos, hw_lvl = args_tuple + + BS, _, dim_query = query.shape[0], query.shape[1], query.shape[-1] + + len_query = max([len(pos_ind) for pos_ind in pos_inds_mask_list]) + thing_query = torch.zeros([BS, len_query, dim_query], + device=query.device) + + stuff_query, stuff_query_pos = torch.split(self.stuff_query.weight, + self.embed_dims, + dim=1) + stuff_query_pos = stuff_query_pos.unsqueeze(0).expand(BS, -1, -1) + stuff_query = stuff_query.unsqueeze(0).expand(BS, -1, -1) + + for i in range(BS): + thing_query[i, :len(pos_inds_mask_list[i])] = query[ + i, pos_inds_mask_list[i]] + + mask_preds_things = [] + mask_preds_stuff = [] + # mask_preds_inter = [[],[],[]] + mask_preds_inter_things = [[] for _ in range(self.num_dec_things)] + mask_preds_inter_stuff = [[] for _ in range(self.num_dec_stuff)] + cls_thing_preds = [[] for _ in range(self.num_dec_things)] + cls_stuff_preds = [[] for _ in range(self.num_dec_stuff)] + BS, NQ, L = bbox_preds.shape + new_bbox_preds = [ + torch.zeros([BS, len_query, L]).to(bbox_preds.device) + for _ in range(self.num_dec_things) + ] + + mask_things, mask_inter_things, query_inter_things = self.things_mask_head( + memory, memory_mask, None, thing_query, None, None, hw_lvl=hw_lvl) + + mask_stuff, mask_inter_stuff, query_inter_stuff = self.stuff_mask_head( + memory, + memory_mask, + None, + stuff_query, + None, + stuff_query_pos, + hw_lvl=hw_lvl) + + mask_things = mask_things.squeeze(-1) + mask_inter_things = torch.stack(mask_inter_things, 0).squeeze(-1) + + mask_stuff = mask_stuff.squeeze(-1) + mask_inter_stuff = torch.stack(mask_inter_stuff, 0).squeeze(-1) + + for i in range(BS): + tmp_i = mask_things[i][:len(pos_inds_mask_list[i])].reshape( + -1, *hw_lvl[0]) + mask_preds_things.append(tmp_i) + pos_ind = pos_inds_mask_list[i] + reference_i = reference[i:i + 1, pos_ind, :] + + for j in range(self.num_dec_things): + tmp_i_j = mask_inter_things[j][i][:len(pos_inds_mask_list[i] + )].reshape( + -1, *hw_lvl[0]) + mask_preds_inter_things[j].append(tmp_i_j) + + # mask_preds_inter_things[j].append(mask_inter_things[j].reshape(-1, *hw_lvl[0])) + query_things = query_inter_things[j] + t1, t2, t3 = query_things.shape + tmp = self.reg_branches2[j](query_things.reshape(t1 * t2, t3)).reshape(t1, t2, 4) + if len(pos_ind) == 0: + tmp = tmp.sum( + ) + reference_i # for reply bug of pytorch broadcast + elif reference_i.shape[-1] == 4: + tmp += reference_i + else: + assert reference_i.shape[-1] == 2 + tmp[..., :2] += reference_i + + outputs_coord = tmp.sigmoid() + + new_bbox_preds[j][i][:len(pos_inds_mask_list[i])] = outputs_coord + cls_thing_preds[j].append(self.cls_thing_branches[j]( + query_things.reshape(t1 * t2, t3))) + + # stuff + tmp_i = mask_stuff[i].reshape(-1, *hw_lvl[0]) + mask_preds_stuff.append(tmp_i) + for j in range(self.num_dec_stuff): + tmp_i_j = mask_inter_stuff[j][i].reshape(-1, *hw_lvl[0]) + mask_preds_inter_stuff[j].append(tmp_i_j) + + query_stuff = query_inter_stuff[j] + s1, s2, s3 = query_stuff.shape + cls_stuff_preds[j].append(self.cls_stuff_branches[j]( + query_stuff.reshape(s1 * s2, s3))) + + masks_preds_list_thing = [ + mask_preds_things[i] for i in range(num_imgs) + ] + mask_preds_things = torch.cat(mask_preds_things, 0) + mask_preds_inter_things = [ + torch.cat(each, 0) for each in mask_preds_inter_things + ] + cls_thing_preds = [torch.cat(each, 0) for each in cls_thing_preds] + cls_stuff_preds = [torch.cat(each, 0) for each in cls_stuff_preds] + mask_preds_stuff = torch.cat(mask_preds_stuff, 0) + mask_preds_inter_stuff = [ + torch.cat(each, 0) for each in mask_preds_inter_stuff + ] + cls_scores_list = [ + cls_scores_list[i][pos_inds_mask_list[i]] for i in range(num_imgs) + ] + + bbox_preds_list = [ + bbox_preds_list[i][pos_inds_mask_list[i]] for i in range(num_imgs) + ] + + gt_targets = self.get_targets_with_mask(cls_scores_list, + bbox_preds_list, + masks_preds_list_thing, + gt_bboxes_list, gt_labels_list, + gt_masks_list, img_metas, + gt_bboxes_ignore_list) + + (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, + mask_targets_list, mask_weights_list, _, _, + pos_inds_list) = gt_targets + + thing_labels = torch.cat(labels_list, 0) + things_weights = torch.cat(label_weights_list, 0) + + bboxes_taget = torch.cat(bbox_targets_list) + bboxes_weights = torch.cat(bbox_weights_list) + + factors = [] + for img_meta, bbox_pred in zip(img_metas, bbox_preds_list): + img_h, img_w, _ = img_meta['img_shape'] + factor = bbox_pred.new_tensor([img_w, img_h, img_w, + img_h]).unsqueeze(0).repeat( + bbox_pred.size(0), 1) + factors.append(factor) + factors = torch.cat(factors, 0) + + bboxes_gt = bbox_cxcywh_to_xyxy(bboxes_taget) * factors + + mask_things_gt = torch.cat(mask_targets_list, 0).to(torch.float) + + mask_weight_things = torch.cat(mask_weights_list, + 0).to(thing_labels.device) + + mask_stuff_gt = [] + mask_weight_stuff = [] + stuff_labels = [] + num_total_pos_stuff = 0 + for i in range(BS): + num_total_pos_stuff += len(gt_stuff_labels_list[i]) ## all stuff + + select_stuff_index = gt_stuff_labels_list[ + i] - self.num_things_classes + mask_weight_i_stuff = torch.zeros([self.num_stuff_classes]) + mask_weight_i_stuff[select_stuff_index] = 1 + stuff_masks = torch.zeros( + (self.num_stuff_classes, *mask_targets_list[i].shape[-2:]), + device=mask_targets_list[i].device).to(torch.bool) + stuff_masks[select_stuff_index] = gt_stuff_masks_list[i].to( + torch.bool) + mask_stuff_gt.append(stuff_masks) + select_stuff_index = torch.cat([ + select_stuff_index, + torch.tensor([self.num_stuff_classes], + device=select_stuff_index.device) + ]) + + stuff_labels.append(1 - mask_weight_i_stuff) + mask_weight_stuff.append(mask_weight_i_stuff) + + mask_weight_stuff = torch.cat(mask_weight_stuff, + 0).to(thing_labels.device) + stuff_labels = torch.cat(stuff_labels, 0).to(thing_labels.device) + mask_stuff_gt = torch.cat(mask_stuff_gt, 0).to(torch.float) + + num_total_pos_stuff = loss_cls.new_tensor([num_total_pos_stuff]) + num_total_pos_stuff = torch.clamp(reduce_mean(num_total_pos_stuff), + min=1).item() + if mask_preds_things.shape[0] == 0: + loss_mask_things = (0 * mask_preds_things).sum() + else: + mask_preds = F.interpolate(mask_preds_things.unsqueeze(0), + scale_factor=2.0, + mode='bilinear').squeeze(0) + mask_targets_things = F.interpolate(mask_things_gt.unsqueeze(0), + size=mask_preds.shape[-2:], + mode='bilinear').squeeze(0) + loss_mask_things = self.loss_mask(mask_preds, + mask_targets_things, + mask_weight_things, + avg_factor=num_total_pos_thing) + if mask_preds_stuff.shape[0] == 0: + loss_mask_stuff = (0 * mask_preds_stuff).sum() + else: + mask_preds = F.interpolate(mask_preds_stuff.unsqueeze(0), + scale_factor=2.0, + mode='bilinear').squeeze(0) + mask_targets_stuff = F.interpolate(mask_stuff_gt.unsqueeze(0), + size=mask_preds.shape[-2:], + mode='bilinear').squeeze(0) + + loss_mask_stuff = self.loss_mask(mask_preds, + mask_targets_stuff, + mask_weight_stuff, + avg_factor=num_total_pos_stuff) + + loss_mask_things_list = [] + loss_mask_stuff_list = [] + loss_iou_list = [] + loss_bbox_list = [] + for j in range(len(mask_preds_inter_things)): + mask_preds_this_level = mask_preds_inter_things[j] + if mask_preds_this_level.shape[0] == 0: + loss_mask_j = (0 * mask_preds_this_level).sum() + else: + mask_preds_this_level = F.interpolate( + mask_preds_this_level.unsqueeze(0), + scale_factor=2.0, + mode='bilinear').squeeze(0) + loss_mask_j = self.loss_mask(mask_preds_this_level, + mask_targets_things, + mask_weight_things, + avg_factor=num_total_pos_thing) + loss_mask_things_list.append(loss_mask_j) + bbox_preds_this_level = new_bbox_preds[j].reshape(-1, 4) + bboxes_this_level = bbox_cxcywh_to_xyxy( + bbox_preds_this_level) * factors + # We let this loss be 0. We didn't predict bbox in our mask decoder. Predicting bbox in the mask decoder is basically useless + loss_iou_j = self.loss_iou(bboxes_this_level, + bboxes_gt, + bboxes_weights, + avg_factor=num_total_pos_thing) * 0 + if bboxes_taget.shape[0] != 0: + loss_bbox_j = self.loss_bbox( + bbox_preds_this_level, + bboxes_taget, + bboxes_weights, + avg_factor=num_total_pos_thing) * 0 + else: + loss_bbox_j = bbox_preds_this_level.sum() * 0 + loss_iou_list.append(loss_iou_j) + loss_bbox_list.append(loss_bbox_j) + for j in range(len(mask_preds_inter_stuff)): + mask_preds_this_level = mask_preds_inter_stuff[j] + if mask_preds_this_level.shape[0] == 0: + loss_mask_j = (0 * mask_preds_this_level).sum() + else: + mask_preds_this_level = F.interpolate( + mask_preds_this_level.unsqueeze(0), + scale_factor=2.0, + mode='bilinear').squeeze(0) + loss_mask_j = self.loss_mask(mask_preds_this_level, + mask_targets_stuff, + mask_weight_stuff, + avg_factor=num_total_pos_stuff) + loss_mask_stuff_list.append(loss_mask_j) + + loss_cls_thing_list = [] + loss_cls_stuff_list = [] + thing_labels = thing_labels.reshape(-1) + for j in range(len(mask_preds_inter_things)): + # We let this loss be 0. When using "query-filter", only partial thing queries are feed to the mask decoder. This will cause imbalance when supervising these queries. + cls_scores = cls_thing_preds[j] + + if cls_scores.shape[0] == 0: + loss_cls_thing_j = cls_scores.sum() * 0 + else: + loss_cls_thing_j = self.loss_cls( + cls_scores, + thing_labels, + things_weights, + avg_factor=num_total_pos_thing) * 2 * 0 + loss_cls_thing_list.append(loss_cls_thing_j) + + for j in range(len(mask_preds_inter_stuff)): + cls_scores = cls_stuff_preds[j] + if cls_scores.shape[0] == 0: + loss_cls_stuff_j = cls_stuff_preds[j].sum() * 0 + else: + loss_cls_stuff_j = self.loss_cls( + cls_stuff_preds[j], + stuff_labels.to(torch.long), + avg_factor=num_total_pos_stuff) * 2 + loss_cls_stuff_list.append(loss_cls_stuff_j) + + ## dynamic adjusting the weights + things_ratio, stuff_ratio = num_total_pos_thing / ( + num_total_pos_stuff + num_total_pos_thing), num_total_pos_stuff / ( + num_total_pos_stuff + num_total_pos_thing) + + return loss_cls, loss_bbox, loss_iou, loss_mask_things, loss_mask_stuff, loss_mask_things_list, loss_mask_stuff_list, loss_iou_list, loss_bbox_list, loss_cls_thing_list, loss_cls_stuff_list, things_ratio, stuff_ratio + + def forward_test(self, + pts_feats=None, + gt_lane_labels=None, + gt_lane_masks=None, + img_metas=None, + rescale=False): + bbox_list = [dict() for i in range(len(img_metas))] + + pred_seg_dict = self(pts_feats) + results = self.get_bboxes(pred_seg_dict['outputs_classes'], + pred_seg_dict['outputs_coords'], + pred_seg_dict['enc_outputs_class'], + pred_seg_dict['enc_outputs_coord'], + pred_seg_dict['args_tuple'], + pred_seg_dict['reference'], + img_metas, + rescale=rescale) + + if gt_lane_labels is None or gt_lane_masks is None: + for result_dict, pts_bbox in zip(bbox_list, results): + result_dict['pts_bbox'] = pts_bbox + result_dict['args_tuple'] = pred_seg_dict['args_tuple'] + return bbox_list + + with torch.no_grad(): + drivable_pred = results[0]['drivable'] + drivable_gt = gt_lane_masks[0][0, -1] + drivable_iou, drivable_intersection, drivable_union = IOU(drivable_pred.view(1, -1), drivable_gt.view(1, -1)) + + lane_pred = results[0]['lane'] + lanes_pred = (results[0]['lane'].sum(0) > 0).int() + lanes_gt = (gt_lane_masks[0][0][:-1].sum(0) > 0).int() + lanes_iou, lanes_intersection, lanes_union = IOU(lanes_pred.view(1, -1), lanes_gt.view(1, -1)) + + divider_gt = (gt_lane_masks[0][0][gt_lane_labels[0][0] == 0].sum(0) > 0).int() + crossing_gt = (gt_lane_masks[0][0][gt_lane_labels[0][0] == 1].sum(0) > 0).int() + contour_gt = (gt_lane_masks[0][0][gt_lane_labels[0][0] == 2].sum(0) > 0).int() + divider_iou, divider_intersection, divider_union = IOU(lane_pred[0].view(1, -1), divider_gt.view(1, -1)) + crossing_iou, crossing_intersection, crossing_union = IOU(lane_pred[1].view(1, -1), crossing_gt.view(1, -1)) + contour_iou, contour_intersection, contour_union = IOU(lane_pred[2].view(1, -1), contour_gt.view(1, -1)) + + + ret_iou = {'drivable_intersection': drivable_intersection, + 'drivable_union': drivable_union, + 'lanes_intersection': lanes_intersection, + 'lanes_union': lanes_union, + 'divider_intersection': divider_intersection, + 'divider_union': divider_union, + 'crossing_intersection': crossing_intersection, + 'crossing_union': crossing_union, + 'contour_intersection': contour_intersection, + 'contour_union': contour_union, + 'drivable_iou': drivable_iou, + 'lanes_iou': lanes_iou, + 'divider_iou': divider_iou, + 'crossing_iou': crossing_iou, + 'contour_iou': contour_iou} + for result_dict, pts_bbox in zip(bbox_list, results): + result_dict['pts_bbox'] = pts_bbox + result_dict['ret_iou'] = ret_iou + result_dict['args_tuple'] = pred_seg_dict['args_tuple'] + return bbox_list + + + @auto_fp16(apply_to=("bev_feat", "prev_bev")) + def forward_train(self, + bev_feat=None, + img_metas=None, + gt_lane_labels=None, + gt_lane_bboxes=None, + gt_lane_masks=None, + ): + """ + Forward pass of the segmentation model during training. + + Args: + bev_feat (torch.Tensor): Bird's eye view feature maps. Shape [batch_size, channels, height, width]. + img_metas (list[dict]): List of image meta information dictionaries. + gt_lane_labels (list[torch.Tensor]): Ground-truth lane class labels. Shape [batch_size, num_lanes, max_lanes]. + gt_lane_bboxes (list[torch.Tensor]): Ground-truth lane bounding boxes. Shape [batch_size, num_lanes, 4]. + gt_lane_masks (list[torch.Tensor]): Ground-truth lane masks. Shape [batch_size, num_lanes, height, width]. + prev_bev (torch.Tensor): Previous bird's eye view feature map. Shape [batch_size, channels, height, width]. + + Returns: + tuple: + - losses_seg (torch.Tensor): Total segmentation loss. + - pred_seg_dict (dict): Dictionary of predicted segmentation outputs. + """ + pred_seg_dict = self(bev_feat) + loss_inputs = [ + pred_seg_dict['outputs_classes'], + pred_seg_dict['outputs_coords'], + pred_seg_dict['enc_outputs_class'], + pred_seg_dict['enc_outputs_coord'], + pred_seg_dict['args_tuple'], + pred_seg_dict['reference'], + gt_lane_labels, + gt_lane_bboxes, + gt_lane_masks + ] + losses_seg = self.loss(*loss_inputs, img_metas=img_metas) + return losses_seg, pred_seg_dict + + def _get_bboxes_single(self, + cls_score, + bbox_pred, + img_shape, + scale_factor, + rescale=False): + """ + """ + assert len(cls_score) == len(bbox_pred) + max_per_img = self.test_cfg.get('max_per_img', self.num_query) + + # exclude background + if self.loss_cls.use_sigmoid: + cls_score = cls_score.sigmoid() + scores, indexes = cls_score.view(-1).topk(max_per_img) + det_labels = indexes % self.num_things_classes + bbox_index = indexes // self.num_things_classes + bbox_pred = bbox_pred[bbox_index] + else: + scores, det_labels = F.softmax(cls_score, dim=-1)[..., :-1].max(-1) + scores, bbox_index = scores.topk(max_per_img) + bbox_pred = bbox_pred[bbox_index] + det_labels = det_labels[bbox_index] + + det_bboxes = bbox_cxcywh_to_xyxy(bbox_pred) + det_bboxes[:, 0::2] = det_bboxes[:, 0::2] * img_shape[1] + det_bboxes[:, 1::2] = det_bboxes[:, 1::2] * img_shape[0] + det_bboxes[:, 0::2].clamp_(min=0, max=img_shape[1]) + det_bboxes[:, 1::2].clamp_(min=0, max=img_shape[0]) + if rescale: + det_bboxes /= det_bboxes.new_tensor(scale_factor) + det_bboxes = torch.cat((det_bboxes, scores.unsqueeze(1)), -1) + + return bbox_index, det_bboxes, det_labels + + @force_fp32(apply_to=('all_cls_scores_list', 'all_bbox_preds_list', + 'args_tuple')) + def get_bboxes( + self, + all_cls_scores, + all_bbox_preds, + enc_cls_scores, + enc_bbox_preds, + args_tuple, + reference, + img_metas, + rescale=False, + ): + """ + """ + cls_scores = all_cls_scores[-1] + bbox_preds = all_bbox_preds[-1] + memory, memory_mask, memory_pos, query, _, query_pos, hw_lvl = args_tuple + + seg_list = [] + stuff_score_list = [] + panoptic_list = [] + bbox_list = [] + labels_list = [] + drivable_list = [] + lane_list = [] + lane_score_list = [] + score_list = [] + for img_id in range(len(img_metas)): + cls_score = cls_scores[img_id] + bbox_pred = bbox_preds[img_id] + # img_shape = img_metas[img_id]['img_shape'] + # ori_shape = img_metas[img_id]['ori_shape'] + # scale_factor = img_metas[img_id]['scale_factor'] + img_shape = (self.canvas_size[0], self.canvas_size[1], 3) + ori_shape = (self.canvas_size[0], self.canvas_size[1], 3) + scale_factor = 1 + + index, bbox, labels = self._get_bboxes_single( + cls_score, bbox_pred, img_shape, scale_factor, rescale) + + i = img_id + thing_query = query[i:i + 1, index, :] + thing_query_pos = query_pos[i:i + 1, index, :] + joint_query = torch.cat([ + thing_query, self.stuff_query.weight[None, :, :self.embed_dims] + ], 1) + + stuff_query_pos = self.stuff_query.weight[None, :, + self.embed_dims:] + + if self.num_stuff_classes>0: + joint_query = joint_query[:, :-self.num_stuff_classes] + + #import pdb;pdb.set_trace() + + mask_things, mask_inter_things, query_inter_things = self.things_mask_head( + memory[i:i + 1], + memory_mask[i:i + 1], + None, + joint_query, + None, + None, + hw_lvl=hw_lvl) + # mask_stuff, mask_inter_stuff, query_inter_stuff = self.stuff_mask_head( + # memory[i:i + 1], + # memory_mask[i:i + 1], + # None, + # joint_query, + # None, + # stuff_query_pos, + # hw_lvl=hw_lvl) + + #attn_map = torch.cat([mask_things, mask_stuff], 1) + attn_map = mask_things + + attn_map = attn_map.squeeze(-1) # BS, NQ, N_head,LEN + + # stuff_query = query_inter_stuff[-1] + # scores_stuff = self.cls_stuff_branches[-1]( + # stuff_query).sigmoid().reshape(-1) + + mask_pred = attn_map.reshape(-1, *hw_lvl[0]) + + mask_pred = F.interpolate(mask_pred.unsqueeze(0), + size=ori_shape[:2], + mode='bilinear').squeeze(0) + + masks_all = mask_pred + score_list.append(masks_all) + drivable_list.append(masks_all[-1] > 0.5) + if self.num_stuff_classes>0: + masks_all = masks_all[:-self.num_stuff_classes] + seg_all = masks_all > 0.5 + sum_seg_all = seg_all.sum((1, 2)).float() + 1 + # scores_all = torch.cat([bbox[:, -1], scores_stuff], 0) + # bboxes_all = torch.cat([bbox, torch.zeros([self.num_stuff_classes, 5], device=labels.device)], 0) + # labels_all = torch.cat([labels, torch.arange(self.num_things_classes, self.num_things_classes+self.num_stuff_classes).to(labels.device)], 0) + scores_all = bbox[:, -1] + bboxes_all = bbox + labels_all = labels + + ## mask wise merging + seg_scores = (masks_all * seg_all.float()).sum( + (1, 2)) / sum_seg_all + scores_all *= (seg_scores**2) + + scores_all, index = torch.sort(scores_all, descending=True) + + masks_all = masks_all[index] + labels_all = labels_all[index] + bboxes_all = bboxes_all[index] + seg_all = seg_all[index] + + bboxes_all[:, -1] = scores_all + + # MDS: select things for instance segmeantion + things_selected = labels_all < self.num_things_classes + stuff_selected = labels_all >= self.num_things_classes + bbox_th = bboxes_all[things_selected][:100] + labels_th = labels_all[things_selected][:100] + seg_th = seg_all[things_selected][:100] + labels_st = labels_all[stuff_selected] + scores_st = scores_all[stuff_selected] + masks_st = masks_all[stuff_selected] + + stuff_score_list.append(scores_st) + + results = torch.zeros((2, *mask_pred.shape[-2:]), + device=mask_pred.device).to(torch.long) + id_unique = 1 + lane = torch.zeros((self.num_things_classes, *mask_pred.shape[-2:]), device=mask_pred.device).to(torch.long) + lane_score = torch.zeros((self.num_things_classes, *mask_pred.shape[-2:]), device=mask_pred.device).to(mask_pred.dtype) + for i, scores in enumerate(scores_all): + # MDS: things and sutff have different threholds may perform a little bit better + if labels_all[i] < self.num_things_classes and scores < self.quality_threshold_things: + continue + elif labels_all[i] >= self.num_things_classes and scores < self.quality_threshold_stuff: + continue + _mask = masks_all[i] > 0.5 + mask_area = _mask.sum().item() + intersect = _mask & (results[0] > 0) + intersect_area = intersect.sum().item() + if labels_all[i] < self.num_things_classes: + if mask_area == 0 or (intersect_area * 1.0 / mask_area + ) > self.overlap_threshold_things: + continue + else: + if mask_area == 0 or (intersect_area * 1.0 / mask_area + ) > self.overlap_threshold_stuff: + continue + if intersect_area > 0: + _mask = _mask & (results[0] == 0) + results[0, _mask] = labels_all[i] + if labels_all[i] < self.num_things_classes: + lane[labels_all[i], _mask] = 1 + lane_score[labels_all[i], _mask] = masks_all[i][_mask] + results[1, _mask] = id_unique + id_unique += 1 + + # file_name = img_metas[img_id]['pts_filename'].split('/')[-1].split('.')[0] + # panoptic_list.append( + # (results.permute(1, 2, 0).cpu().numpy(), file_name, ori_shape)) + panoptic_list.append((results.permute(1, 2, 0).cpu().numpy(),ori_shape)) + + + bbox_list.append(bbox_th) + labels_list.append(labels_th) + seg_list.append(seg_th) + lane_list.append(lane) + lane_score_list.append(lane_score) + results = [] + for i in range(len(img_metas)): + results.append({ + 'bbox': bbox_list[i], + 'segm': seg_list[i], + 'labels': labels_list[i], + 'panoptic': panoptic_list[i], + 'drivable': drivable_list[i], + 'score_list': score_list[i], + 'lane': lane_list[i], + 'lane_score': lane_score_list[i], + 'stuff_score_list' : stuff_score_list[i], + }) + return results diff --git a/mmcv/models/dense_heads/planning_head.py b/mmcv/models/dense_heads/planning_head.py new file mode 100644 index 0000000..6df7afc --- /dev/null +++ b/mmcv/models/dense_heads/planning_head.py @@ -0,0 +1,251 @@ +#---------------------------------------------------------------------------------# +# UniAD: Planning-oriented Autonomous Driving (https://arxiv.org/abs/2212.10156) # +# Source code: https://github.com/OpenDriveLab/UniAD # +# Copyright (c) OpenDriveLab. All rights reserved. # +#---------------------------------------------------------------------------------# + +import torch +import torch.nn as nn +from mmcv.models.builder import HEADS, build_loss +from einops import rearrange +from mmcv.models.utils.functional import bivariate_gaussian_activation +from .planning_head_plugin import CollisionNonlinearOptimizer +import numpy as np +import copy + +@HEADS.register_module() +class PlanningHeadSingleMode(nn.Module): + def __init__(self, + bev_h=200, + bev_w=200, + embed_dims=256, + planning_steps=6, + command_dim=3, + loss_planning=None, + loss_collision=None, + planning_eval=False, + use_col_optim=False, + col_optim_args=dict( + occ_filter_range=5.0, + sigma=1.0, + alpha_collision=5.0, + ), + with_adapter=False, + ): + """ + Single Mode Planning Head for Autonomous Driving. + + Args: + embed_dims (int): Embedding dimensions. Default: 256. + planning_steps (int): Number of steps for motion planning. Default: 6. + loss_planning (dict): Configuration for planning loss. Default: None. + loss_collision (dict): Configuration for collision loss. Default: None. + planning_eval (bool): Whether to use planning for evaluation. Default: False. + use_col_optim (bool): Whether to use collision optimization. Default: False. + col_optim_args (dict): Collision optimization arguments. Default: dict(occ_filter_range=5.0, sigma=1.0, alpha_collision=5.0). + """ + super(PlanningHeadSingleMode, self).__init__() + + # Nuscenes + self.bev_h = bev_h + self.bev_w = bev_w + self.navi_embed = nn.Embedding(command_dim, embed_dims) + self.reg_branch = nn.Sequential( + nn.Linear(embed_dims, embed_dims), + nn.ReLU(), + nn.Linear(embed_dims, planning_steps * 2), + ) + self.loss_planning = build_loss(loss_planning) + self.planning_steps = planning_steps + self.planning_eval = planning_eval + + #### planning head + fuser_dim = 3 + attn_module_layer = nn.TransformerDecoderLayer(embed_dims, 8, dim_feedforward=embed_dims*2, dropout=0.1, batch_first=False) + self.attn_module = nn.TransformerDecoder(attn_module_layer, 3) + + self.mlp_fuser = nn.Sequential( + nn.Linear(embed_dims*fuser_dim, embed_dims), + nn.LayerNorm(embed_dims), + nn.ReLU(inplace=True), + ) + + self.pos_embed = nn.Embedding(1, embed_dims) + self.loss_collision = [] + for cfg in loss_collision: + self.loss_collision.append(build_loss(cfg)) + self.loss_collision = nn.ModuleList(self.loss_collision) + + self.use_col_optim = use_col_optim + self.occ_filter_range = col_optim_args['occ_filter_range'] + self.sigma = col_optim_args['sigma'] + self.alpha_collision = col_optim_args['alpha_collision'] + + # TODO: reimplement it with down-scaled feature_map + self.with_adapter = with_adapter + if with_adapter: + bev_adapter_block = nn.Sequential( + nn.Conv2d(embed_dims, embed_dims // 2, kernel_size=3, padding=1), + nn.ReLU(), + nn.Conv2d(embed_dims // 2, embed_dims, kernel_size=1), + ) + N_Blocks = 3 + bev_adapter = [copy.deepcopy(bev_adapter_block) for _ in range(N_Blocks)] + self.bev_adapter = nn.Sequential(*bev_adapter) + + def forward_train(self, + bev_embed, + outs_motion={}, + sdc_planning=None, + sdc_planning_mask=None, + command=None, + gt_future_boxes=None, + ): + """ + Perform forward planning training with the given inputs. + Args: + bev_embed (torch.Tensor): The input bird's eye view feature map. + outs_motion (dict): A dictionary containing the motion outputs. + outs_occflow (dict): A dictionary containing the occupancy flow outputs. + sdc_planning (torch.Tensor, optional): The self-driving car's planned trajectory. + sdc_planning_mask (torch.Tensor, optional): The mask for the self-driving car's planning. + command (torch.Tensor, optional): The driving command issued to the self-driving car. + gt_future_boxes (torch.Tensor, optional): The ground truth future bounding boxes. + img_metas (list[dict], optional): A list of metadata information about the input images. + + Returns: + ret_dict (dict): A dictionary containing the losses and planning outputs. + """ + sdc_traj_query = outs_motion['sdc_traj_query'] + sdc_track_query = outs_motion['sdc_track_query'] + bev_pos = outs_motion['bev_pos'] + + occ_mask = None + + outs_planning = self(bev_embed, occ_mask, bev_pos, sdc_traj_query, sdc_track_query, command) + loss_inputs = [sdc_planning, sdc_planning_mask, outs_planning, gt_future_boxes] + losses = self.loss(*loss_inputs) + ret_dict = dict(losses=losses, outs_motion=outs_planning) + return ret_dict + + def forward_test(self, bev_embed, outs_motion={}, outs_occflow={}, command=None): + sdc_traj_query = outs_motion['sdc_traj_query'] + sdc_track_query = outs_motion['sdc_track_query'] + bev_pos = outs_motion['bev_pos'] + occ_mask = outs_occflow['seg_out'] + + outs_planning = self(bev_embed, occ_mask, bev_pos, sdc_traj_query, sdc_track_query, command) + return outs_planning + + def forward(self, + bev_embed, + occ_mask, + bev_pos, + sdc_traj_query, + sdc_track_query, + command): + """ + Forward pass for PlanningHeadSingleMode. + + Args: + bev_embed (torch.Tensor): Bird's eye view feature embedding. + occ_mask (torch.Tensor): Instance mask for occupancy. + bev_pos (torch.Tensor): BEV position. + sdc_traj_query (torch.Tensor): SDC trajectory query. + sdc_track_query (torch.Tensor): SDC track query. + command (int): Driving command. + + Returns: + dict: A dictionary containing SDC trajectory and all SDC trajectories. + """ + sdc_track_query = sdc_track_query.detach() + sdc_traj_query = sdc_traj_query[-1] + P = sdc_traj_query.shape[1] + sdc_track_query = sdc_track_query[:, None].expand(-1,P,-1) + + #import pdb;pdb.set_trace() + navi_embed = self.navi_embed.weight[command] + navi_embed = navi_embed[None].expand(-1,P,-1) + plan_query = torch.cat([sdc_traj_query, sdc_track_query, navi_embed], dim=-1) + + plan_query = self.mlp_fuser(plan_query).max(1, keepdim=True)[0] # expand, then fuse # [1, 6, 768] -> [1, 1, 256] + plan_query = rearrange(plan_query, 'b p c -> p b c') + + bev_pos = rearrange(bev_pos, 'b c h w -> (h w) b c') + bev_feat = bev_embed + bev_pos + + ##### Plugin adapter ##### + if self.with_adapter: + bev_feat = rearrange(bev_feat, '(h w) b c -> b c h w', h=self.bev_h, w=self.bev_w) + + bev_feat = bev_feat + self.bev_adapter(bev_feat) # residual connection + bev_feat = rearrange(bev_feat, 'b c h w -> (h w) b c') + ########################## + + pos_embed = self.pos_embed.weight + plan_query = plan_query + pos_embed[None] # [1, 1, 256] + + # plan_query: [1, 1, 256] + # bev_feat: [40000, 1, 256] + plan_query = self.attn_module(plan_query, bev_feat) # [1, 1, 256] + + sdc_traj_all = self.reg_branch(plan_query).view((-1, self.planning_steps, 2)) + sdc_traj_all[...,:2] = torch.cumsum(sdc_traj_all[...,:2], dim=1) + sdc_traj_all[0] = bivariate_gaussian_activation(sdc_traj_all[0]) + if self.use_col_optim and not self.training: + # post process, only used when testing + assert occ_mask is not None + sdc_traj_all = self.collision_optimization(sdc_traj_all, occ_mask) + + return dict( + sdc_traj=sdc_traj_all, + sdc_traj_all=sdc_traj_all, + ) + + def collision_optimization(self, sdc_traj_all, occ_mask): + """ + Optimize SDC trajectory with occupancy instance mask. + + Args: + sdc_traj_all (torch.Tensor): SDC trajectory tensor. + occ_mask (torch.Tensor): Occupancy flow instance mask. + Returns: + torch.Tensor: Optimized SDC trajectory tensor. + """ + pos_xy_t = [] + valid_occupancy_num = 0 + + if occ_mask.shape[2] == 1: + occ_mask = occ_mask.squeeze(2) + occ_horizon = occ_mask.shape[1] + assert occ_horizon == 5 + + for t in range(self.planning_steps): + cur_t = min(t+1, occ_horizon-1) + pos_xy = torch.nonzero(occ_mask[0][cur_t], as_tuple=False) + pos_xy = pos_xy[:, [1, 0]] + pos_xy[:, 0] = (pos_xy[:, 0] - self.bev_h//2) * 0.5 + 0.25 + pos_xy[:, 1] = (pos_xy[:, 1] - self.bev_w//2) * 0.5 + 0.25 + + # filter the occupancy in range + keep_index = torch.sum((sdc_traj_all[0, t, :2][None, :] - pos_xy[:, :2])**2, axis=-1) < self.occ_filter_range**2 + pos_xy_t.append(pos_xy[keep_index].cpu().detach().numpy()) + valid_occupancy_num += torch.sum(keep_index>0) + if valid_occupancy_num == 0: + return sdc_traj_all + + col_optimizer = CollisionNonlinearOptimizer(self.planning_steps, 0.5, self.sigma, self.alpha_collision, pos_xy_t) + col_optimizer.set_reference_trajectory(sdc_traj_all[0].cpu().detach().numpy()) + sol = col_optimizer.solve() + sdc_traj_optim = np.stack([sol.value(col_optimizer.position_x), sol.value(col_optimizer.position_y)], axis=-1) + return torch.tensor(sdc_traj_optim[None], device=sdc_traj_all.device, dtype=sdc_traj_all.dtype) + + def loss(self, sdc_planning, sdc_planning_mask, outs_planning, future_gt_bbox=None): + sdc_traj_all = outs_planning['sdc_traj_all'] # b, p, t, 5 + loss_dict = dict() + for i in range(len(self.loss_collision)): + loss_collision = self.loss_collision[i](sdc_traj_all, sdc_planning[0, :, :self.planning_steps, :3], torch.any(sdc_planning_mask[0, :, :self.planning_steps], dim=-1), future_gt_bbox[0][1:self.planning_steps+1]) + loss_dict[f'loss_collision_{i}'] = loss_collision + loss_ade = self.loss_planning(sdc_traj_all, sdc_planning[0, :, :self.planning_steps, :2], torch.any(sdc_planning_mask[0, :, :self.planning_steps], dim=-1)) + loss_dict.update(dict(loss_ade=loss_ade)) + return loss_dict \ No newline at end of file diff --git a/mmcv/models/dense_heads/planning_head_plugin/__init__.py b/mmcv/models/dense_heads/planning_head_plugin/__init__.py new file mode 100644 index 0000000..57e8627 --- /dev/null +++ b/mmcv/models/dense_heads/planning_head_plugin/__init__.py @@ -0,0 +1,4 @@ +# from .collision_optimization import * +from .planning_metrics import * +from .collision_optimization import * +from .metric_stp3 import * \ No newline at end of file diff --git a/mmcv/models/dense_heads/planning_head_plugin/collision_optimization.py b/mmcv/models/dense_heads/planning_head_plugin/collision_optimization.py new file mode 100644 index 0000000..bf01e49 --- /dev/null +++ b/mmcv/models/dense_heads/planning_head_plugin/collision_optimization.py @@ -0,0 +1,116 @@ +#---------------------------------------------------------------------------------# +# UniAD: Planning-oriented Autonomous Driving (https://arxiv.org/abs/2212.10156) # +# Source code: https://github.com/OpenDriveLab/UniAD # +# Copyright (c) OpenDriveLab. All rights reserved. # +#---------------------------------------------------------------------------------# + +from typing import Any, Dict, List, Optional, Sequence, Tuple, Union + +import numpy as np +import numpy.typing as npt +from casadi import DM, Opti, OptiSol, cos, diff, sin, sumsqr, vertcat, exp + +Pose = Tuple[float, float, float] # (x, y, yaw) + + +class CollisionNonlinearOptimizer: + """ + Optimize planned trajectory with predicted occupancy + Solved with direct multiple-shooting. + modified from https://github.com/motional/nuplan-devkit + :param trajectory_len: trajectory length + :param dt: timestep (sec) + """ + + def __init__(self, trajectory_len: int, dt: float, sigma, alpha_collision, obj_pixel_pos): + """ + :param trajectory_len: the length of trajectory to be optimized. + :param dt: the time interval between trajectory points. + """ + self.dt = dt + self.trajectory_len = trajectory_len + self.current_index = 0 + self.sigma = sigma + self.alpha_collision = alpha_collision + self.obj_pixel_pos = obj_pixel_pos + # Use a array of dts to make it compatible to situations with varying dts across different time steps. + self._dts: npt.NDArray[np.float32] = np.asarray([[dt] * trajectory_len]) + self._init_optimization() + + def _init_optimization(self) -> None: + """ + Initialize related variables and constraints for optimization. + """ + self.nx = 2 # state dim + + self._optimizer = Opti() # Optimization problem + self._create_decision_variables() + self._create_parameters() + self._set_objective() + + # Set default solver options (quiet) + self._optimizer.solver("ipopt", {"ipopt.print_level": 0, "print_time": 0, "ipopt.sb": "yes"}) + + def set_reference_trajectory(self, reference_trajectory: Sequence[Pose]) -> None: + """ + Set the reference trajectory that the smoother is trying to loosely track. + :param x_curr: current state of size nx (x, y) + :param reference_trajectory: N x 3 reference, where the second dim is for (x, y) + """ + self._optimizer.set_value(self.ref_traj, DM(reference_trajectory).T) + self._set_initial_guess(reference_trajectory) + + def set_solver_optimizerons(self, options: Dict[str, Any]) -> None: + """ + Control solver options including verbosity. + :param options: Dictionary containing optimization criterias + """ + self._optimizer.solver("ipopt", options) + + def solve(self) -> OptiSol: + """ + Solve the optimization problem. Assumes the reference trajectory was already set. + :return Casadi optimization class + """ + return self._optimizer.solve() + + def _create_decision_variables(self) -> None: + """ + Define the decision variables for the trajectory optimization. + """ + # State trajectory (x, y) + self.state = self._optimizer.variable(self.nx, self.trajectory_len) + self.position_x = self.state[0, :] + self.position_y = self.state[1, :] + + def _create_parameters(self) -> None: + """ + Define the expert trjactory and current position for the trajectory optimizaiton. + """ + self.ref_traj = self._optimizer.parameter(2, self.trajectory_len) # (x, y) + + def _set_objective(self) -> None: + """Set the objective function. Use care when modifying these weights.""" + # Follow reference, minimize control rates and absolute inputs + alpha_xy = 1.0 + cost_stage = ( + alpha_xy * sumsqr(self.ref_traj[:2, :] - vertcat(self.position_x, self.position_y)) + ) + + alpha_collision = self.alpha_collision + + cost_collision = 0 + normalizer = 1/(2.507*self.sigma) + # TODO: vectorize this + for t in range(len(self.obj_pixel_pos)): + x, y = self.position_x[t], self.position_y[t] + for i in range(len(self.obj_pixel_pos[t])): + col_x, col_y = self.obj_pixel_pos[t][i] + cost_collision += alpha_collision * normalizer * exp(-((x - col_x)**2 + (y - col_y)**2)/2/self.sigma**2) + self._optimizer.minimize(cost_stage + cost_collision) + + def _set_initial_guess(self, reference_trajectory: Sequence[Pose]) -> None: + """Set a warm-start for the solver based on the reference trajectory.""" + # Initialize state guess based on reference + self._optimizer.set_initial(self.state[:2, :], DM(reference_trajectory).T) # (x, y, yaw) + diff --git a/mmcv/models/dense_heads/planning_head_plugin/metric_stp3.py b/mmcv/models/dense_heads/planning_head_plugin/metric_stp3.py new file mode 100644 index 0000000..e70f809 --- /dev/null +++ b/mmcv/models/dense_heads/planning_head_plugin/metric_stp3.py @@ -0,0 +1,337 @@ +''' +calculate planner metric same as stp3 +''' +import numpy as np +import torch +import cv2 +import copy +import matplotlib.pyplot as plt +from skimage.draw import polygon +from nuscenes.utils.data_classes import Box +from scipy.spatial.transform import Rotation as R + +ego_width, ego_length = 1.85, 4.084 + +class PlanningMetric(): + def __init__(self): + super().__init__() + self.X_BOUND = [-50.0, 50.0, 0.5] # Forward + self.Y_BOUND = [-50.0, 50.0, 0.5] # Sides + self.Z_BOUND = [-10.0, 10.0, 20.0] # Height + dx, bx, _ = self.gen_dx_bx(self.X_BOUND, self.Y_BOUND, self.Z_BOUND) + self.dx, self.bx = dx[:2], bx[:2] + + bev_resolution, bev_start_position, bev_dimension = self.calculate_birds_eye_view_parameters( + self.X_BOUND, self.Y_BOUND, self.Z_BOUND + ) + self.bev_resolution = bev_resolution.numpy() + self.bev_start_position = bev_start_position.numpy() + self.bev_dimension = bev_dimension.numpy() + + self.W = ego_width + self.H = ego_length + + self.category_index = { + 'human':[2,3,4,5,6,7,8], + 'vehicle':[14,15,16,17,18,19,20,21,22,23] + } + + # self.n_future = n_future + + # self.add_state("obj_col", default=torch.zeros(self.n_future), dist_reduce_fx="sum") + # self.add_state("obj_box_col", default=torch.zeros(self.n_future), dist_reduce_fx="sum") + # self.add_state("L2", default=torch.zeros(self.n_future),dist_reduce_fx="sum") + # self.add_state("total", default=torch.tensor(0), dist_reduce_fx="sum") + + def gen_dx_bx(self, xbound, ybound, zbound): + dx = torch.Tensor([row[2] for row in [xbound, ybound, zbound]]) + bx = torch.Tensor([row[0] + row[2]/2.0 for row in [xbound, ybound, zbound]]) + nx = torch.LongTensor([(row[1] - row[0]) / row[2] for row in [xbound, ybound, zbound]]) + + return dx, bx, nx + + def calculate_birds_eye_view_parameters(self, x_bounds, y_bounds, z_bounds): + """ + Parameters + ---------- + x_bounds: Forward direction in the ego-car. + y_bounds: Sides + z_bounds: Height + + Returns + ------- + bev_resolution: Bird's-eye view bev_resolution + bev_start_position Bird's-eye view first element + bev_dimension Bird's-eye view tensor spatial dimension + """ + bev_resolution = torch.tensor([row[2] for row in [x_bounds, y_bounds, z_bounds]]) + bev_start_position = torch.tensor([row[0] + row[2] / 2.0 for row in [x_bounds, y_bounds, z_bounds]]) + bev_dimension = torch.tensor([(row[1] - row[0]) / row[2] for row in [x_bounds, y_bounds, z_bounds]], + dtype=torch.long) + + return bev_resolution, bev_start_position, bev_dimension + + def get_label( + self, + gt_agent_boxes, + gt_agent_feats + ): + segmentation_np, pedestrian_np = self.get_birds_eye_view_label(gt_agent_boxes,gt_agent_feats) + segmentation = torch.from_numpy(segmentation_np).long().unsqueeze(0) + pedestrian = torch.from_numpy(pedestrian_np).long().unsqueeze(0) + + return segmentation, pedestrian + + def get_birds_eye_view_label( + self, + gt_agent_boxes, + gt_agent_feats + ): + ''' + gt_agent_boxes (LiDARInstance3DBoxes): list of GT Bboxs. + dim 9 = (x,y,z)+(w,l,h)+yaw+(vx,vy) + gt_agent_feats: (B, A, 34) + dim 34 = fut_traj(6*2) + fut_mask(6) + goal(1) + lcf_feat(9) + fut_yaw(6) + lcf_feat (x, y, yaw, vx, vy, width, length, height, type) + ego_lcf_feats: (B, 9) + dim 8 = (vx, vy, ax, ay, w, length, width, vel, steer) + ''' + T = 6 + segmentation = np.zeros((T,self.bev_dimension[0], self.bev_dimension[1])) + pedestrian = np.zeros((T,self.bev_dimension[0], self.bev_dimension[1])) + agent_num = gt_agent_feats.shape[1] + + gt_agent_boxes = gt_agent_boxes.tensor.cpu().numpy() #(N, 9) + gt_agent_feats = gt_agent_feats.cpu().numpy() + + gt_agent_fut_trajs = gt_agent_feats[..., :T*2].reshape(-1, 6, 2) + gt_agent_fut_mask = gt_agent_feats[..., T*2:T*3].reshape(-1, 6) + # gt_agent_lcf_feat = gt_agent_feats[..., T*3+1:T*3+10].reshape(-1, 9) + gt_agent_fut_yaw = gt_agent_feats[..., T*3+10:T*4+10].reshape(-1, 6, 1) + gt_agent_fut_trajs = np.cumsum(gt_agent_fut_trajs, axis=1) + gt_agent_fut_yaw = np.cumsum(gt_agent_fut_yaw, axis=1) + + gt_agent_boxes[:,6:7] = -1*(gt_agent_boxes[:,6:7] + np.pi/2) # NOTE: convert yaw to lidar frame + gt_agent_fut_trajs = gt_agent_fut_trajs + gt_agent_boxes[:, np.newaxis, 0:2] + gt_agent_fut_yaw = gt_agent_fut_yaw + gt_agent_boxes[:, np.newaxis, 6:7] + + for t in range(T): + for i in range(agent_num): + if gt_agent_fut_mask[i][t] == 1: + # Filter out all non vehicle instances + category_index = int(gt_agent_feats[0,i][27]) + agent_length, agent_width = gt_agent_boxes[i][4], gt_agent_boxes[i][3] + x_a = gt_agent_fut_trajs[i, t, 0] + y_a = gt_agent_fut_trajs[i, t, 1] + yaw_a = gt_agent_fut_yaw[i, t, 0] + param = [x_a,y_a,yaw_a,agent_length, agent_width] + if (category_index in self.category_index['vehicle']): + poly_region = self._get_poly_region_in_image(param) + cv2.fillPoly(segmentation[t], [poly_region], 1.0) + if (category_index in self.category_index['human']): + poly_region = self._get_poly_region_in_image(param) + cv2.fillPoly(pedestrian[t], [poly_region], 1.0) + + # vis for debug + # plt.figure('debug') + # for i in range(T): + # plt.subplot(2,T,i+1) + # plt.imshow(segmentation[i]) + # plt.subplot(2,T,i+1+T) + # plt.imshow(pedestrian[i]) + # plt.savefig('/home/users/qing01.xu/bevformer/debug_figs/car_ped_occ.jpg') + # plt.close() + + return segmentation, pedestrian + + def _get_poly_region_in_image(self,param): + lidar2cv_rot = np.array([[1,0], [0,-1]]) + x_a,y_a,yaw_a,agent_length, agent_width = param + trans_a = np.array([[x_a,y_a]]).T + rot_mat_a = np.array([[np.cos(yaw_a), -np.sin(yaw_a)], + [np.sin(yaw_a), np.cos(yaw_a)]]) + agent_corner = np.array([ + [agent_length/2, -agent_length/2, -agent_length/2, agent_length/2], + [agent_width/2, agent_width/2, -agent_width/2, -agent_width/2]]) #(2,4) + agent_corner_lidar = np.matmul(rot_mat_a, agent_corner) + trans_a #(2,4) + # convert to cv frame + agent_corner_cv2 = (np.matmul(lidar2cv_rot, agent_corner_lidar) \ + - self.bev_start_position[:2,None] + self.bev_resolution[:2,None] / 2.0).T / self.bev_resolution[:2] #(4,2) + agent_corner_cv2 = np.round(agent_corner_cv2).astype(np.int32) + + return agent_corner_cv2 + + + def evaluate_single_coll(self, traj, segmentation, input_gt): + ''' + traj: torch.Tensor (n_future, 2) + 自车lidar系为轨迹参考系 + ^ y + | + | + 0-------> + x + segmentation: torch.Tensor (n_future, 200, 200) + ''' + pts = np.array([ + [-self.H / 2. + 0.5, self.W / 2.], + [self.H / 2. + 0.5, self.W / 2.], + [self.H / 2. + 0.5, -self.W / 2.], + [-self.H / 2. + 0.5, -self.W / 2.], + ]) + pts = (pts - self.bx.cpu().numpy()) / (self.dx.cpu().numpy()) + pts[:, [0, 1]] = pts[:, [1, 0]] + rr, cc = polygon(pts[:,1], pts[:,0]) + rc = np.concatenate([rr[:,None], cc[:,None]], axis=-1) + + n_future, _ = traj.shape + trajs = traj.view(n_future, 1, 2) + # 轨迹坐标系转换为: + # ^ x + # | + # | + # 0-------> y + trajs_ = copy.deepcopy(trajs) + trajs_[:,:,[0,1]] = trajs_[:,:,[1,0]] # can also change original tensor + trajs_ = trajs_ / self.dx.to(trajs.device) + trajs_ = trajs_.cpu().numpy() + rc # (n_future, 32, 2) + + r = (self.bev_dimension[0] - trajs_[:,:,0]).astype(np.int32) + r = np.clip(r, 0, self.bev_dimension[0] - 1) + + c = trajs_[:,:,1].astype(np.int32) + c = np.clip(c, 0, self.bev_dimension[1] - 1) + + collision = np.full(n_future, False) + for t in range(n_future): + rr = r[t] + cc = c[t] + I = np.logical_and( + np.logical_and(rr >= 0, rr < self.bev_dimension[0]), + np.logical_and(cc >= 0, cc < self.bev_dimension[1]), + ) + collision[t] = np.any(segmentation[t, rr[I], cc[I]].cpu().numpy()) + + # vis for debug + # obs_occ = copy.deepcopy(segmentation) + # ego_occ = torch.zeros_like(obs_occ) + # for t in range(n_future): + # rr = r[t] + # cc = c[t] + # I = np.logical_and( + # np.logical_and(rr >= 0, rr < self.bev_dimension[0]), + # np.logical_and(cc >= 0, cc < self.bev_dimension[1]), + # ) + # ego_occ[t, rr[I], cc[I]]=1 + + # plt.figure() + # for i in range(6): + # plt.subplot(2,6,i+1) + # plt.imshow(obs_occ[i]) + # plt.subplot(2,6,i+7) + # plt.imshow(ego_occ[i]) + # if input_gt: + # plt.savefig('/home/users/qing01.xu/bevformer/debug_figs/occ_metric_stp3_gt.jpg') + # else: + # plt.savefig('/home/users/qing01.xu/bevformer/debug_figs/occ_metric_stp3_pred.jpg') + # plt.close() + + return torch.from_numpy(collision).to(device=traj.device) + + def evaluate_coll( + self, + trajs, + gt_trajs, + segmentation + ): + ''' + trajs: torch.Tensor (B, n_future, 2) + 自车lidar系为轨迹参考系 + ^ y + | + | + 0-------> + x + gt_trajs: torch.Tensor (B, n_future, 2) + segmentation: torch.Tensor (B, n_future, 200, 200) + + ''' + gt_trajs = gt_trajs.to(device=trajs.device) + B, n_future, _ = trajs.shape + # trajs = trajs * torch.tensor([-1, 1], device=trajs.device) + # gt_trajs = gt_trajs * torch.tensor([-1, 1], device=gt_trajs.device) + + obj_coll_sum = torch.zeros(n_future, device=segmentation.device) + obj_box_coll_sum = torch.zeros(n_future, device=segmentation.device) + + for i in range(B): + gt_box_coll = self.evaluate_single_coll(gt_trajs[i], segmentation[i], input_gt=True) + + xx, yy = trajs[i,:,0], trajs[i, :, 1] + # lidar系下的轨迹转换到图片坐标系下 + xi = ((-self.bx[0]/2 - yy) / self.dx[0]).long() + yi = ((-self.bx[1]/2 + xx) / self.dx[1]).long() + + m1 = torch.logical_and( + torch.logical_and(xi >= 0, xi < self.bev_dimension[0]), + torch.logical_and(yi >= 0, yi < self.bev_dimension[1]), + ).to(gt_box_coll.device) + m1 = torch.logical_and(m1, torch.logical_not(gt_box_coll)) + #import pdb;pdb.set_trace() + + ti = torch.arange(n_future) + obj_coll_sum[ti[m1]] += segmentation[i, ti[m1], xi[m1], yi[m1]].long() + + m2 = torch.logical_not(gt_box_coll) + box_coll = self.evaluate_single_coll(trajs[i], segmentation[i], input_gt=False).to(ti.device) + obj_box_coll_sum[ti[m2]] += (box_coll[ti[m2]]).long() + + return obj_coll_sum, obj_box_coll_sum + + def compute_L2(self, trajs, gt_trajs): + ''' + trajs: torch.Tensor (n_future, 2) + gt_trajs: torch.Tensor (n_future, 2) + ''' + # return torch.sqrt(((trajs[:, :, :2] - gt_trajs[:, :, :2]) ** 2).sum(dim=-1)) + pred_len = trajs.shape[0] + ade = float( + sum( + torch.sqrt( + (trajs[i, 0] - gt_trajs[i, 0]) ** 2 + + (trajs[i, 1] - gt_trajs[i, 1]) ** 2 + ) + for i in range(pred_len) + ) + / pred_len + ) + + return ade + + # def update(self, trajs, gt_trajs, segmentation): + # ''' + # trajs: torch.Tensor (B, n_future, 3) + # gt_trajs: torch.Tensor (B, n_future, 3) + # segmentation: torch.Tensor (B, n_future, 200, 200) + # ''' + # assert trajs.shape == gt_trajs.shape + # L2 = self.compute_L2(trajs, gt_trajs) + # obj_coll_sum, obj_box_coll_sum = self.evaluate_coll(trajs[:,:,:2], gt_trajs[:,:,:2], segmentation) + + # if torch.isnan(L2).max().item(): + # debug = 1 + # else: + # self.obj_col += obj_coll_sum + # self.obj_box_col += obj_box_coll_sum + # self.L2 += L2.sum(dim=0) + # if torch.isnan(self.L2).max().item(): + # debug=1 + # self.total +=len(trajs) + + + # def compute(self): + # return { + # 'obj_col': self.obj_col / self.total, + # 'obj_box_col': self.obj_box_col / self.total, + # 'L2' : self.L2 / self.total + # } \ No newline at end of file diff --git a/mmcv/models/dense_heads/planning_head_plugin/planning_metrics.py b/mmcv/models/dense_heads/planning_head_plugin/planning_metrics.py new file mode 100644 index 0000000..d598bb2 --- /dev/null +++ b/mmcv/models/dense_heads/planning_head_plugin/planning_metrics.py @@ -0,0 +1,147 @@ +#---------------------------------------------------------------------------------# +# UniAD: Planning-oriented Autonomous Driving (https://arxiv.org/abs/2212.10156) # +# Source code: https://github.com/OpenDriveLab/UniAD # +# Copyright (c) OpenDriveLab. All rights reserved. # +#---------------------------------------------------------------------------------# + +import torch +import torch.nn as nn +import numpy as np +from skimage.draw import polygon +from mmcv.metrics.metric import Metric +from ..occ_head_plugin import calculate_birds_eye_view_parameters, gen_dx_bx + + +class UniADPlanningMetric(Metric): + def __init__( + self, + n_future=6, + ): + super().__init__() + dx, bx, _ = gen_dx_bx([-50.0, 50.0, 0.5], [-50.0, 50.0, 0.5], [-10.0, 10.0, 20.0]) + dx, bx = dx[:2], bx[:2] + self.dx = nn.Parameter(dx, requires_grad=False) + self.bx = nn.Parameter(bx, requires_grad=False) + + _, _, self.bev_dimension = calculate_birds_eye_view_parameters( + [-50.0, 50.0, 0.5], [-50.0, 50.0, 0.5], [-10.0, 10.0, 20.0] + ) + self.bev_dimension = self.bev_dimension.numpy() + + self.W = 1.85 + self.H = 4.084 + + self.n_future = n_future + + self.add_state("obj_col", default=torch.zeros(self.n_future), dist_reduce_fx="sum") + self.add_state("obj_box_col", default=torch.zeros(self.n_future), dist_reduce_fx="sum") + self.add_state("L2", default=torch.zeros(self.n_future),dist_reduce_fx="sum") + self.add_state("total", default=torch.tensor(0), dist_reduce_fx="sum") + + + def evaluate_single_coll(self, traj, segmentation): + ''' + gt_segmentation + traj: torch.Tensor (n_future, 2) + segmentation: torch.Tensor (n_future, 200, 200) + ''' + pts = np.array([ + [-self.H / 2. + 0.5, self.W / 2.], + [self.H / 2. + 0.5, self.W / 2.], + [self.H / 2. + 0.5, -self.W / 2.], + [-self.H / 2. + 0.5, -self.W / 2.], + ]) + pts = (pts - self.bx.cpu().numpy()) / (self.dx.cpu().numpy()) + pts[:, [0, 1]] = pts[:, [1, 0]] + rr, cc = polygon(pts[:,1], pts[:,0]) + rc = np.concatenate([rr[:,None], cc[:,None]], axis=-1) + + n_future, _ = traj.shape + trajs = traj.view(n_future, 1, 2) + trajs[:,:,[0,1]] = trajs[:,:,[1,0]] # can also change original tensor + trajs = trajs / self.dx + trajs = trajs.cpu().numpy() + rc # (n_future, 32, 2) + + r = trajs[:,:,0].astype(np.int32) + r = np.clip(r, 0, self.bev_dimension[0] - 1) + + c = trajs[:,:,1].astype(np.int32) + c = np.clip(c, 0, self.bev_dimension[1] - 1) + + collision = np.full(n_future, False) + for t in range(n_future): + rr = r[t] + cc = c[t] + I = np.logical_and( + np.logical_and(rr >= 0, rr < self.bev_dimension[0]), + np.logical_and(cc >= 0, cc < self.bev_dimension[1]), + ) + collision[t] = np.any(segmentation[t, rr[I], cc[I]].cpu().numpy()) + + return torch.from_numpy(collision).to(device=traj.device) + + def evaluate_coll(self, trajs, gt_trajs, segmentation): + ''' + trajs: torch.Tensor (B, n_future, 2) + gt_trajs: torch.Tensor (B, n_future, 2) + segmentation: torch.Tensor (B, n_future, 200, 200) + ''' + B, n_future, _ = trajs.shape + trajs = trajs * torch.tensor([-1, 1], device=trajs.device) + gt_trajs = gt_trajs * torch.tensor([-1, 1], device=gt_trajs.device) + + obj_coll_sum = torch.zeros(n_future, device=segmentation.device) + obj_box_coll_sum = torch.zeros(n_future, device=segmentation.device) + + for i in range(B): + gt_box_coll = self.evaluate_single_coll(gt_trajs[i], segmentation[i]) + + xx, yy = trajs[i,:,0], trajs[i, :, 1] + yi = ((yy - self.bx[0]) / self.dx[0]).long() + xi = ((xx - self.bx[1]) / self.dx[1]).long() + + m1 = torch.logical_and( + torch.logical_and(yi >= 0, yi < self.bev_dimension[0]), + torch.logical_and(xi >= 0, xi < self.bev_dimension[1]), + ) + m1 = torch.logical_and(m1, torch.logical_not(gt_box_coll)) + + ti = torch.arange(n_future, device=m1.device) + obj_coll_sum[ti[m1]] += segmentation[i, ti[m1], yi[m1], xi[m1]].long() + + m2 = torch.logical_not(gt_box_coll) + box_coll = self.evaluate_single_coll(trajs[i], segmentation[i]) + obj_box_coll_sum[ti[m2]] += (box_coll[ti[m2]]).long() + + return obj_coll_sum, obj_box_coll_sum + + def compute_L2(self, trajs, gt_trajs, gt_trajs_mask): + ''' + trajs: torch.Tensor (B, n_future, 3) + gt_trajs: torch.Tensor (B, n_future, 3) + ''' + return torch.sqrt((((trajs[:, :, :2] - gt_trajs[:, :, :2]) ** 2) * gt_trajs_mask).sum(dim=-1)) + + def update(self, trajs, gt_trajs, gt_trajs_mask, segmentation): + ''' + trajs: torch.Tensor (B, n_future, 3) + gt_trajs: torch.Tensor (B, n_future, 3) + segmentation: torch.Tensor (B, n_future, 200, 200) + ''' + assert trajs.shape == gt_trajs.shape + trajs[..., 0] = - trajs[..., 0] + gt_trajs[..., 0] = - gt_trajs[..., 0] + L2 = self.compute_L2(trajs, gt_trajs, gt_trajs_mask) + obj_coll_sum, obj_box_coll_sum = self.evaluate_coll(trajs[:,:,:2], gt_trajs[:,:,:2], segmentation) + + self.obj_col += obj_coll_sum + self.obj_box_col += obj_box_coll_sum + self.L2 += L2.sum(dim=0) + self.total +=len(trajs) + + def compute(self): + return { + 'obj_col': self.obj_col / self.total, + 'obj_box_col': self.obj_box_col / self.total, + 'L2' : self.L2 / self.total + } \ No newline at end of file diff --git a/mmcv/models/dense_heads/rpn_head.py b/mmcv/models/dense_heads/rpn_head.py new file mode 100644 index 0000000..aa3f56b --- /dev/null +++ b/mmcv/models/dense_heads/rpn_head.py @@ -0,0 +1,319 @@ +import copy + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.ops.nms import batched_nms +from mmcv.utils import force_fp32 + +from ..builder import HEADS +from .anchor_head import AnchorHead + + +@HEADS.register_module() +class RPNHead(AnchorHead): + """RPN head. + + Args: + in_channels (int): Number of channels in the input feature map. + init_cfg (dict or list[dict], optional): Initialization config dict. + """ # noqa: W605 + + def __init__(self, + in_channels, + init_cfg=dict(type='Normal', layer='Conv2d', std=0.01), + **kwargs): + super(RPNHead, self).__init__( + 1, in_channels, init_cfg=init_cfg, **kwargs) + + def _init_layers(self): + """Initialize layers of the head.""" + self.rpn_conv = nn.Conv2d( + self.in_channels, self.feat_channels, 3, padding=1) + self.rpn_cls = nn.Conv2d(self.feat_channels, + self.num_anchors * self.cls_out_channels, 1) + self.rpn_reg = nn.Conv2d(self.feat_channels, self.num_anchors * 4, 1) + + def forward_single(self, x): + """Forward feature map of a single scale level.""" + x = self.rpn_conv(x) + x = F.relu(x, inplace=True) + rpn_cls_score = self.rpn_cls(x) + rpn_bbox_pred = self.rpn_reg(x) + return rpn_cls_score, rpn_bbox_pred + + def loss(self, + cls_scores, + bbox_preds, + gt_bboxes, + img_metas, + gt_bboxes_ignore=None): + """Compute losses of the head. + + Args: + cls_scores (list[Tensor]): Box scores for each scale level + Has shape (N, num_anchors * num_classes, H, W) + bbox_preds (list[Tensor]): Box energies / deltas for each scale + level with shape (N, num_anchors * 4, H, W) + gt_bboxes (list[Tensor]): Ground truth bboxes for each image with + shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + gt_bboxes_ignore (None | list[Tensor]): specify which bounding + boxes can be ignored when computing the loss. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + losses = super(RPNHead, self).loss( + cls_scores, + bbox_preds, + gt_bboxes, + None, + img_metas, + gt_bboxes_ignore=gt_bboxes_ignore) + return dict( + loss_rpn_cls=losses['loss_cls'], loss_rpn_bbox=losses['loss_bbox']) + + @force_fp32(apply_to=('cls_scores', 'bbox_preds')) + def get_bboxes(self, + cls_scores, + bbox_preds, + img_metas, + cfg=None, + rescale=False, + with_nms=True): + """Transform network output for a batch into bbox predictions. + + Args: + cls_scores (list[Tensor]): Box scores for each scale level + Has shape (N, num_anchors * num_classes, H, W) + bbox_preds (list[Tensor]): Box energies / deltas for each scale + level with shape (N, num_anchors * 4, H, W) + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + cfg (mmcv.Config | None): Test / postprocessing configuration, + if None, test_cfg would be used + rescale (bool): If True, return boxes in original image space. + Default: False. + with_nms (bool): If True, do nms before return boxes. + Default: True. + + Returns: + list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. + The first item is an (n, 5) tensor, where the first 4 columns + are bounding box positions (tl_x, tl_y, br_x, br_y) and the + 5-th column is a score between 0 and 1. The second item is a + (n,) tensor where each item is the predicted class label of the + corresponding box. + """ + assert with_nms, '``with_nms`` in RPNHead should always True' + assert len(cls_scores) == len(bbox_preds) + num_levels = len(cls_scores) + device = cls_scores[0].device + featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)] + mlvl_anchors = self.anchor_generator.grid_anchors( + featmap_sizes, device=device) + + result_list = [] + for img_id in range(len(img_metas)): + cls_score_list = [ + cls_scores[i][img_id].detach() for i in range(num_levels) + ] + bbox_pred_list = [ + bbox_preds[i][img_id].detach() for i in range(num_levels) + ] + img_shape = img_metas[img_id]['img_shape'] + scale_factor = img_metas[img_id]['scale_factor'] + proposals = self._get_bboxes_single(cls_score_list, bbox_pred_list, + mlvl_anchors, img_shape, + scale_factor, cfg, rescale) + result_list.append(proposals) + return result_list + + def _get_bboxes_single(self, + cls_scores, + bbox_preds, + mlvl_anchors, + img_shape, + scale_factor, + cfg, + rescale=False): + """Transform outputs for a single batch item into bbox predictions. + + Args: + cls_scores (list[Tensor]): Box scores of all scale level + each item has shape (num_anchors * num_classes, H, W). + bbox_preds (list[Tensor]): Box energies / deltas of all + scale level, each item has shape (num_anchors * 4, H, W). + mlvl_anchors (list[Tensor]): Anchors of all scale level + each item has shape (num_total_anchors, 4). + img_shape (tuple[int]): Shape of the input image, + (height, width, 3). + scale_factor (ndarray): Scale factor of the image arrange as + (w_scale, h_scale, w_scale, h_scale). + cfg (mmcv.Config): Test / postprocessing configuration, + if None, test_cfg would be used. + rescale (bool): If True, return boxes in original image space. + Default: False. + + Returns: + Tensor: Labeled boxes in shape (n, 5), where the first 4 columns + are bounding box positions (tl_x, tl_y, br_x, br_y) and the + 5-th column is a score between 0 and 1. + """ + cfg = self.test_cfg if cfg is None else cfg + cfg = copy.deepcopy(cfg) + # bboxes from different level should be independent during NMS, + # level_ids are used as labels for batched NMS to separate them + level_ids = [] + mlvl_scores = [] + mlvl_bbox_preds = [] + mlvl_valid_anchors = [] + for idx in range(len(cls_scores)): + rpn_cls_score = cls_scores[idx] + rpn_bbox_pred = bbox_preds[idx] + assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:] + rpn_cls_score = rpn_cls_score.permute(1, 2, 0) + if self.use_sigmoid_cls: + rpn_cls_score = rpn_cls_score.reshape(-1) + scores = rpn_cls_score.sigmoid() + else: + rpn_cls_score = rpn_cls_score.reshape(-1, 2) + # We set FG labels to [0, num_class-1] and BG label to + # num_class in RPN head since mmdet v2.5, which is unified to + # be consistent with other head since mmdet v2.0. In mmdet v2.0 + # to v2.4 we keep BG label as 0 and FG label as 1 in rpn head. + scores = rpn_cls_score.softmax(dim=1)[:, 0] + rpn_bbox_pred = rpn_bbox_pred.permute(1, 2, 0).reshape(-1, 4) + anchors = mlvl_anchors[idx] + if cfg.nms_pre > 0 and scores.shape[0] > cfg.nms_pre: + # sort is faster than topk + # _, topk_inds = scores.topk(cfg.nms_pre) + ranked_scores, rank_inds = scores.sort(descending=True) + topk_inds = rank_inds[:cfg.nms_pre] + scores = ranked_scores[:cfg.nms_pre] + rpn_bbox_pred = rpn_bbox_pred[topk_inds, :] + anchors = anchors[topk_inds, :] + mlvl_scores.append(scores) + mlvl_bbox_preds.append(rpn_bbox_pred) + mlvl_valid_anchors.append(anchors) + level_ids.append( + scores.new_full((scores.size(0), ), idx, dtype=torch.long)) + + scores = torch.cat(mlvl_scores) + anchors = torch.cat(mlvl_valid_anchors) + rpn_bbox_pred = torch.cat(mlvl_bbox_preds) + proposals = self.bbox_coder.decode( + anchors, rpn_bbox_pred, max_shape=img_shape) + ids = torch.cat(level_ids) + + if cfg.min_bbox_size > 0: + w = proposals[:, 2] - proposals[:, 0] + h = proposals[:, 3] - proposals[:, 1] + valid_mask = (w >= cfg.min_bbox_size) & (h >= cfg.min_bbox_size) + if not valid_mask.all(): + proposals = proposals[valid_mask] + scores = scores[valid_mask] + ids = ids[valid_mask] + if proposals.numel() > 0: + dets, keep = batched_nms(proposals, scores, ids, cfg.nms) + else: + return proposals.new_zeros(0, 5) + + return dets[:cfg.max_per_img] + + def onnx_export(self, x, img_metas): + """Test without augmentation. + + Args: + x (tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + img_metas (list[dict]): Meta info of each image. + + Returns: + tuple[Tensor, Tensor]: dets of shape [N, num_det, 5] + and class labels of shape [N, num_det]. + """ + cls_scores, bbox_preds = self(x) + + assert len(cls_scores) == len(bbox_preds) + num_levels = len(cls_scores) + + device = cls_scores[0].device + featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)] + mlvl_anchors = self.anchor_generator.grid_anchors( + featmap_sizes, device=device) + + cls_scores = [cls_scores[i].detach() for i in range(num_levels)] + bbox_preds = [bbox_preds[i].detach() for i in range(num_levels)] + + assert len( + img_metas + ) == 1, 'Only support one input image while in exporting to ONNX' + img_shapes = img_metas[0]['img_shape_for_onnx'] + + cfg = copy.deepcopy(self.test_cfg) + + mlvl_scores = [] + mlvl_bbox_preds = [] + mlvl_valid_anchors = [] + batch_size = cls_scores[0].shape[0] + nms_pre_tensor = torch.tensor( + cfg.nms_pre, device=cls_scores[0].device, dtype=torch.long) + for idx in range(len(cls_scores)): + rpn_cls_score = cls_scores[idx] + rpn_bbox_pred = bbox_preds[idx] + assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:] + rpn_cls_score = rpn_cls_score.permute(0, 2, 3, 1) + if self.use_sigmoid_cls: + rpn_cls_score = rpn_cls_score.reshape(batch_size, -1) + scores = rpn_cls_score.sigmoid() + else: + rpn_cls_score = rpn_cls_score.reshape(batch_size, -1, 2) + # We set FG labels to [0, num_class-1] and BG label to + # num_class in RPN head since mmdet v2.5, which is unified to + # be consistent with other head since mmdet v2.0. In mmdet v2.0 + # to v2.4 we keep BG label as 0 and FG label as 1 in rpn head. + scores = rpn_cls_score.softmax(-1)[..., 0] + rpn_bbox_pred = rpn_bbox_pred.permute(0, 2, 3, 1).reshape( + batch_size, -1, 4) + anchors = mlvl_anchors[idx] + anchors = anchors.expand_as(rpn_bbox_pred) + # Get top-k prediction + from mmcv.core.export import get_k_for_topk + nms_pre = get_k_for_topk(nms_pre_tensor, rpn_bbox_pred.shape[1]) + if nms_pre > 0: + _, topk_inds = scores.topk(nms_pre) + batch_inds = torch.arange(batch_size).view( + -1, 1).expand_as(topk_inds) + # Avoid onnx2tensorrt issue in https://github.com/NVIDIA/TensorRT/issues/1134 # noqa: E501 + # Mind k<=3480 in TensorRT for TopK + transformed_inds = scores.shape[1] * batch_inds + topk_inds + scores = scores.reshape(-1, 1)[transformed_inds].reshape( + batch_size, -1) + rpn_bbox_pred = rpn_bbox_pred.reshape( + -1, 4)[transformed_inds, :].reshape(batch_size, -1, 4) + anchors = anchors.reshape(-1, 4)[transformed_inds, :].reshape( + batch_size, -1, 4) + mlvl_scores.append(scores) + mlvl_bbox_preds.append(rpn_bbox_pred) + mlvl_valid_anchors.append(anchors) + + batch_mlvl_scores = torch.cat(mlvl_scores, dim=1) + batch_mlvl_anchors = torch.cat(mlvl_valid_anchors, dim=1) + batch_mlvl_rpn_bbox_pred = torch.cat(mlvl_bbox_preds, dim=1) + batch_mlvl_proposals = self.bbox_coder.decode( + batch_mlvl_anchors, batch_mlvl_rpn_bbox_pred, max_shape=img_shapes) + + # Use ONNX::NonMaxSuppression in deployment + from mmcv.core.export import add_dummy_nms_for_onnx + batch_mlvl_scores = batch_mlvl_scores.unsqueeze(2) + score_threshold = cfg.nms.get('score_thr', 0.0) + nms_pre = cfg.get('deploy_nms_pre', -1) + dets, _ = add_dummy_nms_for_onnx(batch_mlvl_proposals, + batch_mlvl_scores, cfg.max_per_img, + cfg.nms.iou_threshold, + score_threshold, nms_pre, + cfg.max_per_img) + return dets diff --git a/mmcv/models/dense_heads/seg_head_plugin/__init__.py b/mmcv/models/dense_heads/seg_head_plugin/__init__.py new file mode 100644 index 0000000..4e36f7d --- /dev/null +++ b/mmcv/models/dense_heads/seg_head_plugin/__init__.py @@ -0,0 +1,5 @@ +from .seg_detr_head import SegDETRHead +from .seg_mask_head import SegMaskHead +from .seg_deformable_transformer import SegDeformableTransformer +from .seg_assigner import * +from .seg_utils import * \ No newline at end of file diff --git a/mmcv/models/dense_heads/seg_head_plugin/seg_assigner.py b/mmcv/models/dense_heads/seg_head_plugin/seg_assigner.py new file mode 100644 index 0000000..ee25dc2 --- /dev/null +++ b/mmcv/models/dense_heads/seg_head_plugin/seg_assigner.py @@ -0,0 +1,446 @@ +from mmcv.core import mask +import torch +from mmcv.core.bbox.assigners.base_assigner import BaseAssigner + +from mmcv.core.bbox.assigners.assign_result import AssignResult +from mmcv.core.bbox.transforms import bbox_cxcywh_to_xyxy +from mmcv.core.bbox.match_costs import build_match_cost +from mmcv.core.bbox.builder import BBOX_ASSIGNERS +try: + from scipy.optimize import linear_sum_assignment +except ImportError: + linear_sum_assignment = None + +from mmcv.core.bbox.samplers.base_sampler import BaseSampler +from mmcv.core.bbox.builder import BBOX_SAMPLERS +from mmcv.core import mask +import torch + +from mmcv.utils import util_mixins + + +INF = 10000000 + + +class SamplingResult_segformer(util_mixins.NiceRepr): + """ + """ + + def __init__(self, pos_inds, neg_inds, bboxes, gt_bboxes, gt_masks,assign_result, + gt_flags): + self.pos_inds = pos_inds + self.neg_inds = neg_inds + self.pos_bboxes = bboxes[pos_inds] + self.neg_bboxes = bboxes[neg_inds] + self.pos_is_gt = gt_flags[pos_inds] + + self.num_gts = gt_bboxes.shape[0] + self.pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1 + + + if gt_bboxes.numel() == 0: + # hack for index error case + assert self.pos_assigned_gt_inds.numel() == 0 + self.pos_gt_bboxes = torch.empty_like(gt_bboxes).view(-1, 4) + + #print('pos_gt_bboxes',self.pos_gt_bboxes.shape) + #print('gt_mask',gt_masks.shape) + n,h,w = gt_masks.shape + #n = self.pos_gt_bboxes.shape[0] + self.pos_gt_masks = torch.empty_like(gt_masks).view(-1, h,w) + else: + if len(gt_bboxes.shape) < 2: + gt_bboxes = gt_bboxes.view(-1, 4) + + self.pos_gt_bboxes = gt_bboxes[self.pos_assigned_gt_inds, :] + self.pos_gt_masks = gt_masks[self.pos_assigned_gt_inds, :] + + if assign_result.labels is not None: + self.pos_gt_labels = assign_result.labels[pos_inds] + else: + self.pos_gt_labels = None + + @property + def bboxes(self): + """torch.Tensor: concatenated positive and negative boxes""" + return torch.cat([self.pos_bboxes, self.neg_bboxes]) + + + def to(self, device): + """Change the device of the data inplace. + + Example: + >>> self = SamplingResult.random() + >>> print(f'self = {self.to(None)}') + >>> # xdoctest: +REQUIRES(--gpu) + >>> print(f'self = {self.to(0)}') + """ + _dict = self.__dict__ + for key, value in _dict.items(): + if isinstance(value, torch.Tensor): + _dict[key] = value.to(device) + return self + + def __nice__(self): + data = self.info.copy() + data['pos_bboxes'] = data.pop('pos_bboxes').shape + data['neg_bboxes'] = data.pop('neg_bboxes').shape + parts = [f"'{k}': {v!r}" for k, v in sorted(data.items())] + body = ' ' + ',\n '.join(parts) + return '{\n' + body + '\n}' + + @property + def info(self): + """Returns a dictionary of info about the object.""" + return { + 'pos_inds': self.pos_inds, + 'neg_inds': self.neg_inds, + 'pos_bboxes': self.pos_bboxes, + 'neg_bboxes': self.neg_bboxes, + 'pos_is_gt': self.pos_is_gt, + 'num_gts': self.num_gts, + 'pos_assigned_gt_inds': self.pos_assigned_gt_inds, + } + + @classmethod + def random(cls, rng=None, **kwargs): + """ + Args: + rng (None | int | numpy.random.RandomState): seed or state. + kwargs (keyword arguments): + - num_preds: number of predicted boxes + - num_gts: number of true boxes + - p_ignore (float): probability of a predicted box assigned to \ + an ignored truth. + - p_assigned (float): probability of a predicted box not being \ + assigned. + - p_use_label (float | bool): with labels or not. + + Returns: + :obj:`SamplingResult`: Randomly generated sampling result. + + Example: + >>> from mmcv.core.bbox.samplers.sampling_result import * # NOQA + >>> self = SamplingResult.random() + >>> print(self.__dict__) + """ + from mmcv.core.bbox.samplers.random_sampler import RandomSampler + from mmcv.core.bbox.assigners.assign_result import AssignResult + from mmcv.core.bbox import demodata + rng = demodata.ensure_rng(rng) + + # make probabalistic? + num = 32 + pos_fraction = 0.5 + neg_pos_ub = -1 + + assign_result = AssignResult.random(rng=rng, **kwargs) + + # Note we could just compute an assignment + bboxes = demodata.random_boxes(assign_result.num_preds, rng=rng) + gt_bboxes = demodata.random_boxes(assign_result.num_gts, rng=rng) + + if rng.rand() > 0.2: + # sometimes algorithms squeeze their data, be robust to that + gt_bboxes = gt_bboxes.squeeze() + bboxes = bboxes.squeeze() + + if assign_result.labels is None: + gt_labels = None + else: + gt_labels = None # todo + + if gt_labels is None: + add_gt_as_proposals = False + else: + add_gt_as_proposals = True # make probabalistic? + + sampler = RandomSampler( + num, + pos_fraction, + neg_pos_ub=neg_pos_ub, + add_gt_as_proposals=add_gt_as_proposals, + rng=rng) + self = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels) + return self + + +@BBOX_SAMPLERS.register_module() +class PseudoSampler_segformer(BaseSampler): + """A pseudo sampler that does not do sampling actually.""" + + def __init__(self, **kwargs): + pass + + def _sample_pos(self, **kwargs): + """Sample positive samples.""" + raise NotImplementedError + + def _sample_neg(self, **kwargs): + """Sample negative samples.""" + raise NotImplementedError + + def sample(self, assign_result, bboxes, gt_bboxes,gt_masks, **kwargs): + """Directly returns the positive and negative indices of samples. + + Args: + assign_result (:obj:`AssignResult`): Assigned results + bboxes (torch.Tensor): Bounding boxes + gt_bboxes (torch.Tensor): Ground truth boxes + + Returns: + :obj:`SamplingResult`: sampler results + """ + pos_inds = torch.nonzero( + assign_result.gt_inds > 0, as_tuple=False).squeeze(-1).unique() + neg_inds = torch.nonzero( + assign_result.gt_inds == 0, as_tuple=False).squeeze(-1).unique() + gt_flags = bboxes.new_zeros(bboxes.shape[0], dtype=torch.uint8) + sampling_result = SamplingResult_segformer(pos_inds, neg_inds, bboxes, gt_bboxes,gt_masks, + assign_result, gt_flags,**kwargs) + return sampling_result + + +@BBOX_ASSIGNERS.register_module() +class HungarianAssigner_filter(BaseAssigner): + """ + """ + + def __init__(self, + cls_cost=dict(type='ClassificationCost', weight=1.), + reg_cost=dict(type='BBoxL1Cost', weight=1.0), + iou_cost=dict(type='IoUCost', iou_mode='giou', weight=1.0), + max_pos = 3 + ): + self.cls_cost = build_match_cost(cls_cost) + self.reg_cost = build_match_cost(reg_cost) + self.iou_cost = build_match_cost(iou_cost) + self.max_pos = max_pos + def assign(self, + bbox_pred, + cls_pred, + gt_bboxes, + gt_labels, + img_meta, + gt_bboxes_ignore=None, + eps=1e-7): + """ + """ + assert gt_bboxes_ignore is None, \ + 'Only case when gt_bboxes_ignore is None is supported.' + num_gts, num_bboxes = gt_bboxes.size(0), bbox_pred.size(0) + + # 1. assign -1 by default + assigned_gt_inds = bbox_pred.new_full((num_bboxes, ), + -1, + dtype=torch.long) + + assigned_labels = bbox_pred.new_full((num_bboxes, ),-1,dtype=torch.long) + + if num_gts == 0 or num_bboxes == 0: + # No ground truth or boxes, return empty assignment + if num_gts == 0: + assigned_gt_inds[:] = 0 + # No ground truth, assign all to background + pos_ind = assigned_gt_inds.gt(0).nonzero().squeeze(1) + neg_ind = assigned_gt_inds.eq(0).nonzero().squeeze(1) + # No ground truth, assign all to background + return pos_ind, neg_ind, AssignResult( + num_gts, assigned_gt_inds, None, labels=assigned_labels) + img_h, img_w, _ = img_meta['img_shape'] + factor = gt_bboxes.new_tensor([img_w, img_h, img_w, + img_h]).unsqueeze(0) + + # 2. compute the weighted costs + # classification and bboxcost. + + cls_cost = self.cls_cost(cls_pred, gt_labels) + # regression L1 cost + normalize_gt_bboxes = gt_bboxes / factor + reg_cost = self.reg_cost(bbox_pred, normalize_gt_bboxes) + # regression iou cost, defaultly giou is used in official DETR. + bboxes = bbox_cxcywh_to_xyxy(bbox_pred) * factor + iou_cost = self.iou_cost(bboxes, gt_bboxes) + # weighted sum of above three cost + + cost = cls_cost + reg_cost + iou_cost + + # 3. do Hungarian matching on CPU using linear_sum_assignment + cost = cost.detach().cpu() + + assigned_gt_inds[:] = 0 + #index_set = [] + + if linear_sum_assignment is None: + raise ImportError('Please run "pip install scipy" ' + 'to install scipy first.') + result=None + for i in range(max(min(self.max_pos, 300//num_gts),1)): + matched_row_inds, matched_col_inds = linear_sum_assignment(cost) + + matched_row_inds = torch.from_numpy(matched_row_inds).to( + bbox_pred.device) + matched_col_inds = torch.from_numpy(matched_col_inds).to( + bbox_pred.device) + #print(matched_row_inds) + + cost[matched_row_inds,:] = INF + #index_set.(matched_row_inds) + #print('this mathed row inds ', len(matched_row_inds), i) + assigned_gt_inds[matched_row_inds] = matched_col_inds + 1 + assigned_labels[matched_row_inds] = gt_labels[matched_col_inds] + if i == 0: + result = AssignResult(num_gts, assigned_gt_inds.clone(), None, labels=assigned_labels.clone()) + if cost[matched_row_inds.cpu(), matched_col_inds.cpu()].max()>=INF: + break + pos_ind = assigned_gt_inds.gt(0).nonzero().squeeze(1) + neg_ind = assigned_gt_inds.eq(0).nonzero().squeeze(1) + + return pos_ind, neg_ind, result + + + +@BBOX_ASSIGNERS.register_module() +class HungarianAssigner_multi_info(BaseAssigner): + """Computes one-to-one matching between predictions and ground truth. + + This class computes an assignment between the targets and the predictions + based on the costs. The costs are weighted sum of three components: + classification cost, regression L1 cost and regression iou cost. The + targets don't include the no_object, so generally there are more + predictions than targets. After the one-to-one matching, the un-matched + are treated as backgrounds. Thus each query prediction will be assigned + with `0` or a positive integer indicating the ground truth index: + + - 0: negative sample, no assigned gt + - positive integer: positive sample, index (1-based) of assigned gt + + Args: + cls_weight (int | float, optional): The scale factor for classification + cost. Default 1.0. + bbox_weight (int | float, optional): The scale factor for regression + L1 cost. Default 1.0. + iou_weight (int | float, optional): The scale factor for regression + iou cost. Default 1.0. + iou_calculator (dict | optional): The config for the iou calculation. + Default type `BboxOverlaps2D`. + iou_mode (str | optional): "iou" (intersection over union), "iof" + (intersection over foreground), or "giou" (generalized + intersection over union). Default "giou". + """ + + def __init__(self, + cls_cost=dict(type='ClassificationCost', weight=1.), + reg_cost=dict(type='BBoxL1Cost', weight=1.0), + iou_cost=dict(type='IoUCost', iou_mode='giou', weight=1.0), + mask_cost=dict(type='DiceCost', weight=1.0) + + ): + cls_cost['weight'] *= 2 + self.cls_cost = build_match_cost(cls_cost) + self.reg_cost = build_match_cost(reg_cost) + self.iou_cost = build_match_cost(iou_cost) + self.mask_cost = build_match_cost(mask_cost) + + + def assign(self, + bbox_pred, + cls_pred, + mask_pred, + gt_bboxes, + gt_labels, + gt_mask, + img_meta, + gt_bboxes_ignore=None, + eps=1e-7): + """Computes one-to-one matching based on the weighted costs. + + This method assign each query prediction to a ground truth or + background. The `assigned_gt_inds` with -1 means don't care, + 0 means negative sample, and positive number is the index (1-based) + of assigned gt. + The assignment is done in the following steps, the order matters. + + 1. assign every prediction to -1 + 2. compute the weighted costs + 3. do Hungarian matching on CPU based on the costs + 4. assign all to 0 (background) first, then for each matched pair + between predictions and gts, treat this prediction as foreground + and assign the corresponding gt index (plus 1) to it. + + Args: + bbox_pred (Tensor): Predicted boxes with normalized coordinates + (cx, cy, w, h), which are all in range [0, 1]. Shape + [num_query, 4]. + cls_pred (Tensor): Predicted classification logits, shape + [num_query, num_class]. + gt_bboxes (Tensor): Ground truth boxes with unnormalized + coordinates (x1, y1, x2, y2). Shape [num_gt, 4]. + gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,). + img_meta (dict): Meta information for current image. + gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are + labelled as `ignored`. Default None. + eps (int | float, optional): A value added to the denominator for + numerical stability. Default 1e-7. + + Returns: + :obj:`AssignResult`: The assigned result. + """ + assert gt_bboxes_ignore is None, \ + 'Only case when gt_bboxes_ignore is None is supported.' + #print(bbox_pred.shape, cls_pred.shape,mask_pred.shape,gt_bboxes.shape,gt_labels.shape,gt_mask.shape) + num_gts, num_bboxes = gt_bboxes.size(0), bbox_pred.size(0) + + # 1. assign -1 by default + assigned_gt_inds = bbox_pred.new_full((num_bboxes, ), + -1, + dtype=torch.long) + + assigned_labels = bbox_pred.new_full((num_bboxes, ), + -1, + dtype=torch.long) + + if num_gts == 0 or num_bboxes == 0: + # No ground truth or boxes, return empty assignment + if num_gts == 0: + # No ground truth, assign all to background + assigned_gt_inds[:] = 0 + return AssignResult( + num_gts, assigned_gt_inds, None, labels=assigned_labels) + img_h, img_w, _ = img_meta['img_shape'] + + factor = bbox_pred.new_tensor([img_w, img_h, img_w,img_h]).unsqueeze(0) + + + # classification and bboxcost. + cls_cost = self.cls_cost(cls_pred, gt_labels) + # regression L1 cost + normalize_gt_bboxes = gt_bboxes / factor + reg_cost = self.reg_cost(bbox_pred, normalize_gt_bboxes) + # regression iou cost, defaultly giou is used in official DETR. + bboxes = bbox_cxcywh_to_xyxy(bbox_pred) * factor + iou_cost = self.iou_cost(bboxes, gt_bboxes) + # weighted sum of above three costs + mask_cost = self.mask_cost(mask_pred,gt_mask) + # + cost = cls_cost + reg_cost + iou_cost + mask_cost + + # 3. do Hungarian matching on CPU using linear_sum_assignment + cost = cost.detach().cpu() + if linear_sum_assignment is None: + raise ImportError('Please run "pip install scipy" ' + 'to install scipy first.') + matched_row_inds, matched_col_inds = linear_sum_assignment(cost) + matched_row_inds = torch.from_numpy(matched_row_inds).to( + bbox_pred.device) + matched_col_inds = torch.from_numpy(matched_col_inds).to( + bbox_pred.device) + # 4. assign backgrounds and foregrounds + # assign all indices to backgrounds first + assigned_gt_inds[:] = 0 + # assign foregrounds based on matching results + + assigned_gt_inds[matched_row_inds] = matched_col_inds + 1 + assigned_labels[matched_row_inds] = gt_labels[matched_col_inds] + return AssignResult( + num_gts, assigned_gt_inds, None, labels=assigned_labels) \ No newline at end of file diff --git a/mmcv/models/dense_heads/seg_head_plugin/seg_deformable_transformer.py b/mmcv/models/dense_heads/seg_head_plugin/seg_deformable_transformer.py new file mode 100644 index 0000000..83c06af --- /dev/null +++ b/mmcv/models/dense_heads/seg_head_plugin/seg_deformable_transformer.py @@ -0,0 +1,385 @@ +from mmcv.utils import force_fp32 +from mmcv.models.utils.builder import TRANSFORMER +from mmcv.models.utils import Transformer +import warnings +import math +import copy +import torch +import torch.nn as nn +from mmcv.models.bricks import build_activation_layer, build_norm_layer +from mmcv.models.utils import xavier_init +from mmcv.models.bricks.registry import (TRANSFORMER_LAYER, + TRANSFORMER_LAYER_SEQUENCE) +from mmcv.models.bricks.transformer import (BaseTransformerLayer, + MultiScaleDeformableAttention, + TransformerLayerSequence, + build_transformer_layer_sequence) +from mmcv.models.backbones.base_module import BaseModule +from torch.nn.init import normal_ + +from mmcv.models.utils.builder import TRANSFORMER +from mmcv.models.bricks.registry import ATTENTION +from torch import einsum + +from einops import rearrange, repeat +from einops.layers.torch import Rearrange + +# Copy-paste from defromable detr in mmcv. +@TRANSFORMER.register_module() +class SegDeformableTransformer(Transformer): + """Implements the DeformableDETR transformer. + + Args: + as_two_stage (bool): Generate query from encoder features. + Default: False. + num_feature_levels (int): Number of feature maps from FPN: + Default: 4. + two_stage_num_proposals (int): Number of proposals when set + `as_two_stage` as True. Default: 300. + """ + def __init__(self, + as_two_stage=False, + num_feature_levels=4, + two_stage_num_proposals=300, + **kwargs): + super(SegDeformableTransformer, self).__init__(**kwargs) + self.fp16_enabled = False + self.as_two_stage = as_two_stage + self.num_feature_levels = num_feature_levels + self.two_stage_num_proposals = two_stage_num_proposals + self.embed_dims = self.encoder.embed_dims + self.init_layers() + + def init_layers(self): + """Initialize layers of the DeformableDetrTransformer.""" + self.level_embeds = nn.Parameter( + torch.Tensor(self.num_feature_levels, self.embed_dims)) + + if self.as_two_stage: + self.enc_output = nn.Linear(self.embed_dims, self.embed_dims) + self.enc_output_norm = nn.LayerNorm(self.embed_dims) + self.pos_trans = nn.Linear(self.embed_dims * 2, + self.embed_dims * 2) + self.pos_trans_norm = nn.LayerNorm(self.embed_dims * 2) + else: + self.reference_points = nn.Linear(self.embed_dims, 2) + + def init_weights(self): + """Initialize the transformer weights.""" + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + for m in self.modules(): + if isinstance(m, MultiScaleDeformableAttention): + try: + m.init_weight() + except: + m.init_weights() + if not self.as_two_stage: + xavier_init(self.reference_points, distribution='uniform', bias=0.) + normal_(self.level_embeds) + + def gen_encoder_output_proposals(self, memory, memory_padding_mask, + spatial_shapes): + """Generate proposals from encoded memory. + + Args: + memory (Tensor) : The output of encoder, + has shape (bs, num_key, embed_dim). num_key is + equal the number of points on feature map from + all level. + memory_padding_mask (Tensor): Padding mask for memory. + has shape (bs, num_key). + spatial_shapes (Tensor): The shape of all feature maps. + has shape (num_level, 2). + + Returns: + tuple: A tuple of feature map and bbox prediction. + + - output_memory (Tensor): The input of decoder, \ + has shape (bs, num_key, embed_dim). num_key is \ + equal the number of points on feature map from \ + all levels. + - output_proposals (Tensor): The normalized proposal \ + after a inverse sigmoid, has shape \ + (bs, num_keys, 4). + """ + + N, S, C = memory.shape + proposals = [] + _cur = 0 + for lvl, (H, W) in enumerate(spatial_shapes): + mask_flatten_ = memory_padding_mask[:, _cur:(_cur + H * W)].view( + N, H, W, 1) + valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1) + valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1) + + grid_y, grid_x = torch.meshgrid( + torch.linspace(0, + H - 1, + H, + dtype=torch.float32, + device=memory.device), + torch.linspace(0, + W - 1, + W, + dtype=torch.float32, + device=memory.device)) + grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1) + + scale = torch.cat([valid_W.unsqueeze(-1), + valid_H.unsqueeze(-1)], 1).view(N, 1, 1, 2) + grid = (grid.unsqueeze(0).expand(N, -1, -1, -1) + 0.5) / scale + wh = torch.ones_like(grid) * 0.05 * (2.0**lvl) + proposal = torch.cat((grid, wh), -1).view(N, -1, 4) + proposals.append(proposal) + _cur += (H * W) + output_proposals = torch.cat(proposals, 1) + output_proposals_valid = ((output_proposals > 0.01) & + (output_proposals < 0.99)).all(-1, + keepdim=True) + output_proposals = torch.log(output_proposals / (1 - output_proposals)) + output_proposals = output_proposals.masked_fill( + memory_padding_mask.unsqueeze(-1), float('inf')) + output_proposals = output_proposals.masked_fill( + ~output_proposals_valid, float('inf')) + + output_memory = memory + output_memory = output_memory.masked_fill( + memory_padding_mask.unsqueeze(-1), float(0)) + output_memory = output_memory.masked_fill(~output_proposals_valid, + float(0)) + output_memory = self.enc_output_norm(self.enc_output(output_memory)) + return output_memory, output_proposals + + @staticmethod + def get_reference_points(spatial_shapes, valid_ratios, device): + """Get the reference points used in decoder. + + Args: + spatial_shapes (Tensor): The shape of all + feature maps, has shape (num_level, 2). + valid_ratios (Tensor): The radios of valid + points on the feature map, has shape + (bs, num_levels, 2) + device (obj:`device`): The device where + reference_points should be. + + Returns: + Tensor: reference points used in decoder, has \ + shape (bs, num_keys, num_levels, 2). + """ + reference_points_list = [] + for lvl, (H, W) in enumerate(spatial_shapes): + # TODO check this 0.5 + ref_y, ref_x = torch.meshgrid( + torch.linspace(0.5, + H - 0.5, + H, + dtype=torch.float32, + device=device), + torch.linspace(0.5, + W - 0.5, + W, + dtype=torch.float32, + device=device)) + ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, lvl, 1] * + H) + ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, lvl, 0] * + W) + ref = torch.stack((ref_x, ref_y), -1) + reference_points_list.append(ref) + reference_points = torch.cat(reference_points_list, 1) + reference_points = reference_points[:, :, None] * valid_ratios[:, None] + return reference_points + + def get_valid_ratio(self, mask): + """Get the valid radios of feature maps of all level.""" + _, H, W = mask.shape + valid_H = torch.sum(~mask[:, :, 0], 1) + valid_W = torch.sum(~mask[:, 0, :], 1) + valid_ratio_h = valid_H.float() / H + valid_ratio_w = valid_W.float() / W + valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1) + return valid_ratio + + def get_proposal_pos_embed(self, + proposals, + num_pos_feats=128, + temperature=10000): + """Get the position embedding of proposal.""" + scale = 2 * math.pi + dim_t = torch.arange(num_pos_feats, + dtype=torch.float32, + device=proposals.device) + dim_t = temperature**(2 * (dim_t // 2) / num_pos_feats) + # N, L, 4 + proposals = proposals.sigmoid() * scale + # N, L, 4, 128 + pos = proposals[:, :, :, None] / dim_t + # N, L, 4, 64, 2 + pos = torch.stack((pos[:, :, :, 0::2].sin(), pos[:, :, :, 1::2].cos()), + dim=4).flatten(2) + return pos + + @force_fp32(apply_to=('mlvl_feats', 'query_embed', 'mlvl_pos_embeds')) + def forward(self, + mlvl_feats, + mlvl_masks, + query_embed, + mlvl_pos_embeds, + reg_branches=None, + cls_branches=None, + **kwargs): + """Forward function for `Transformer`. + + Args: + mlvl_feats (list(Tensor)): Input queries from + different level. Each element has shape + [bs, embed_dims, h, w]. + mlvl_masks (list(Tensor)): The key_padding_mask from + different level used for encoder and decoder, + each element has shape [bs, h, w]. + query_embed (Tensor): The query embedding for decoder, + with shape [num_query, c]. + mlvl_pos_embeds (list(Tensor)): The positional encoding + of feats from different level, has the shape + [bs, embed_dims, h, w]. + reg_branches (obj:`nn.ModuleList`): Regression heads for + feature maps from each decoder layer. Only would + be passed when + `with_box_refine` is True. Default to None. + cls_branches (obj:`nn.ModuleList`): Classification heads + for feature maps from each decoder layer. Only would + be passed when `as_two_stage` + is True. Default to None. + + + Returns: + tuple[Tensor]: results of decoder containing the following tensor. + + - inter_states: Outputs from decoder. If + return_intermediate_dec is True output has shape \ + (num_dec_layers, bs, num_query, embed_dims), else has \ + shape (1, bs, num_query, embed_dims). + - init_reference_out: The initial value of reference \ + points, has shape (bs, num_queries, 4). + - inter_references_out: The internal value of reference \ + points in decoder, has shape \ + (num_dec_layers, bs,num_query, embed_dims) + - enc_outputs_class: The classification score of \ + proposals generated from \ + encoder's feature maps, has shape \ + (batch, h*w, num_classes). \ + Only would be returned when `as_two_stage` is True, \ + otherwise None. + - enc_outputs_coord_unact: The regression results \ + generated from encoder's feature maps., has shape \ + (batch, h*w, 4). Only would \ + be returned when `as_two_stage` is True, \ + otherwise None. + """ + + assert self.as_two_stage or query_embed is not None + feat_flatten = [] + mask_flatten = [] + lvl_pos_embed_flatten = [] + spatial_shapes = [] + for lvl, (feat, mask, pos_embed) in enumerate( + zip(mlvl_feats, mlvl_masks, mlvl_pos_embeds)): + bs, c, h, w = feat.shape + spatial_shape = (h, w) + spatial_shapes.append(spatial_shape) + feat = feat.flatten(2).transpose(1, 2) + mask = mask.flatten(1) + pos_embed = pos_embed.flatten(2).transpose(1, 2) + lvl_pos_embed = pos_embed + self.level_embeds[lvl].view(1, 1, -1) + lvl_pos_embed_flatten.append(lvl_pos_embed) + feat_flatten.append(feat) + mask_flatten.append(mask) + feat_flatten = torch.cat(feat_flatten, 1) + mask_flatten = torch.cat(mask_flatten, 1) + lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1) + spatial_shapes = torch.as_tensor(spatial_shapes, + dtype=torch.long, + device=feat_flatten.device) + level_start_index = torch.cat((spatial_shapes.new_zeros( + (1, )), spatial_shapes.prod(1).cumsum(0)[:-1])) + valid_ratios = torch.stack( + [self.get_valid_ratio(m) for m in mlvl_masks], 1) + + reference_points = \ + self.get_reference_points(spatial_shapes, + valid_ratios, + device=feat.device) + + feat_flatten = feat_flatten.permute(1, 0, 2) # (H*W, bs, embed_dims) + lvl_pos_embed_flatten = lvl_pos_embed_flatten.permute( + 1, 0, 2) # (H*W, bs, embed_dims) + memory = self.encoder(query=feat_flatten, + key=None, + value=None, + query_pos=lvl_pos_embed_flatten, + query_key_padding_mask=mask_flatten, + spatial_shapes=spatial_shapes, + reference_points=reference_points, + level_start_index=level_start_index, + valid_ratios=valid_ratios, + **kwargs) + + memory = memory.permute(1, 0, 2) + bs, _, c = memory.shape + if self.as_two_stage: + output_memory, output_proposals = \ + self.gen_encoder_output_proposals( + memory, mask_flatten, spatial_shapes) + enc_outputs_class = cls_branches[self.decoder.num_layers]( + output_memory) + enc_outputs_coord_unact = \ + reg_branches[ + self.decoder.num_layers](output_memory) + output_proposals + + topk = self.two_stage_num_proposals + topk_proposals = torch.topk(enc_outputs_class[..., 0], topk, + dim=1)[1] + topk_coords_unact = torch.gather( + enc_outputs_coord_unact, 1, + topk_proposals.unsqueeze(-1).repeat(1, 1, 4)) + topk_coords_unact = topk_coords_unact.detach() + reference_points = topk_coords_unact.sigmoid() + init_reference_out = reference_points + pos_trans_out = self.pos_trans_norm( + self.pos_trans(self.get_proposal_pos_embed(topk_coords_unact))) + query_pos, query = torch.split(pos_trans_out, c, dim=2) + else: + #print('query_embd',query_embed.shape, c) + # query_embed N *(2C) + query_pos, query = torch.split(query_embed, c, dim=1) + query_pos = query_pos.unsqueeze(0).expand(bs, -1, -1) + query = query.unsqueeze(0).expand(bs, -1, -1) + reference_points = self.reference_points(query_pos).sigmoid() + init_reference_out = reference_points + + # decoder + query = query.permute(1, 0, 2) + memory = memory.permute(1, 0, 2) + query_pos = query_pos.permute(1, 0, 2) + inter_states, inter_references = self.decoder( + query=query, + key=None, + value=memory, + query_pos=query_pos, + key_padding_mask=mask_flatten, + reference_points=reference_points, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + valid_ratios=valid_ratios, + reg_branches=reg_branches, + **kwargs) + inter_references_out = inter_references + if self.as_two_stage: + return (memory,lvl_pos_embed_flatten,mask_flatten,query_pos), inter_states, init_reference_out,\ + inter_references_out, enc_outputs_class,\ + enc_outputs_coord_unact + return (memory,lvl_pos_embed_flatten,mask_flatten,query_pos), inter_states, init_reference_out, \ + inter_references_out, None, None \ No newline at end of file diff --git a/mmcv/models/dense_heads/seg_head_plugin/seg_detr_head.py b/mmcv/models/dense_heads/seg_head_plugin/seg_detr_head.py new file mode 100644 index 0000000..06d0352 --- /dev/null +++ b/mmcv/models/dense_heads/seg_head_plugin/seg_detr_head.py @@ -0,0 +1,689 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.models.bricks import Conv2d, Linear, build_activation_layer +from mmcv.models.bricks.transformer import FFN, build_positional_encoding +from mmcv.utils import force_fp32 + +from mmcv.core.bbox.transforms import bbox_cxcywh_to_xyxy, bbox_xyxy_to_cxcywh +from mmcv.core.bbox.builder import build_assigner, build_sampler +from mmcv.core.utils import multi_apply, reduce_mean +from mmcv.models.utils import build_transformer + +from mmcv.models.dense_heads.anchor_free_head import AnchorFreeHead +from mmcv.models.builder import HEADS, build_loss + + +@HEADS.register_module() +class SegDETRHead( + AnchorFreeHead +): # I modify DETRHead to make it to support panoptic segmentation + """Implements the DETR transformer head. + + See `paper: End-to-End Object Detection with Transformers + `_ for details. + + Args: + num_classes (int): Number of categories excluding the background. + in_channels (int): Number of channels in the input feature map. + num_query (int): Number of query in Transformer. + num_reg_fcs (int, optional): Number of fully-connected layers used in + `FFN`, which is then used for the regression head. Default 2. + transformer (obj:`mmcv.ConfigDict`|dict): Config for transformer. + Default: None. + sync_cls_avg_factor (bool): Whether to sync the avg_factor of + all ranks. Default to False. + positional_encoding (obj:`mmcv.ConfigDict`|dict): + Config for position encoding. + loss_cls (obj:`mmcv.ConfigDict`|dict): Config of the + classification loss. Default `CrossEntropyLoss`. + loss_bbox (obj:`mmcv.ConfigDict`|dict): Config of the + regression loss. Default `L1Loss`. + loss_iou (obj:`mmcv.ConfigDict`|dict): Config of the + regression iou loss. Default `GIoULoss`. + tran_cfg (obj:`mmcv.ConfigDict`|dict): Training config of + transformer head. + test_cfg (obj:`mmcv.ConfigDict`|dict): Testing config of + transformer head. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + _version = 2 + + def __init__( + self, + num_classes, + num_things_classes, + num_stuff_classes, + in_channels, + num_query=100, + num_reg_fcs=2, + transformer=None, + sync_cls_avg_factor=False, + positional_encoding=dict(type='SinePositionalEncoding', + num_feats=128, + normalize=True), + loss_cls=dict(type='CrossEntropyLoss', + bg_cls_weight=0.1, + use_sigmoid=False, + loss_weight=1.0, + class_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=5.0), + loss_iou=dict(type='GIoULoss', loss_weight=2.0), + train_cfg=dict(assigner=dict( + type='HungarianAssigner', + cls_cost=dict(type='ClassificationCost', weight=1.), + reg_cost=dict(type='BBoxL1Cost', weight=5.0), + iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0))), + test_cfg=dict(max_per_img=100), + init_cfg=None, + **kwargs): + # NOTE here use `AnchorFreeHead` instead of `TransformerHead`, + # since it brings inconvenience when the initialization of + # `AnchorFreeHead` is called. + super(AnchorFreeHead, self).__init__(init_cfg) + self.bg_cls_weight = 0 + self.sync_cls_avg_factor = sync_cls_avg_factor + class_weight = loss_cls.get('class_weight', None) + if class_weight is not None and (self.__class__ is SegDETRHead): + assert isinstance(class_weight, float), 'Expected ' \ + 'class_weight to have type float. Found ' \ + f'{type(class_weight)}.' + # NOTE following the official DETR rep0, bg_cls_weight means + # relative classification weight of the no-object class. + bg_cls_weight = loss_cls.get('bg_cls_weight', class_weight) + assert isinstance(bg_cls_weight, float), 'Expected ' \ + 'bg_cls_weight to have type float. Found ' \ + f'{type(bg_cls_weight)}.' + class_weight = torch.ones(num_things_classes + 1) * class_weight + # set background class as the last indice + class_weight[num_things_classes] = bg_cls_weight + loss_cls.update({'class_weight': class_weight}) + if 'bg_cls_weight' in loss_cls: + loss_cls.pop('bg_cls_weight') + self.bg_cls_weight = bg_cls_weight + + if train_cfg: + assert 'assigner' in train_cfg, 'assigner should be provided '\ + 'when train_cfg is set.' + assigner = train_cfg['assigner'] + # assert loss_cls['loss_weight'] == assigner['cls_cost']['weight'], \ + # 'The classification weight for loss and matcher should be' \ + # 'exactly the same.' + # assert loss_bbox['loss_weight'] == assigner['reg_cost'][ + # 'weight'], 'The regression L1 weight for loss and matcher ' \ + # 'should be exactly the same.' + # assert loss_iou['loss_weight'] == assigner['iou_cost']['weight'], \ + # 'The regression iou weight for loss and matcher should be' \ + # 'exactly the same.' + self.assigner = build_assigner(assigner) + # DETR sampling=False, so use PseudoSampler + sampler_cfg = dict(type='PseudoSampler') + self.sampler = build_sampler(sampler_cfg, context=self) + self.num_query = num_query + self.num_classes = num_classes + self.num_things_classes = num_things_classes + self.num_stuff_classes = num_stuff_classes + self.in_channels = in_channels + self.num_reg_fcs = num_reg_fcs + self.train_cfg = train_cfg + self.test_cfg = test_cfg + self.fp16_enabled = False + self.loss_cls = build_loss(loss_cls) + self.loss_bbox = build_loss(loss_bbox) + self.loss_iou = build_loss(loss_iou) + + if self.loss_cls.use_sigmoid: + self.cls_out_channels = num_things_classes + else: + self.cls_out_channels = num_things_classes + 1 + self.act_cfg = transformer.get('act_cfg', + dict(type='ReLU', inplace=True)) + self.activate = build_activation_layer(self.act_cfg) + self.positional_encoding = build_positional_encoding( + positional_encoding) + self.transformer = build_transformer(transformer) + self.embed_dims = self.transformer.embed_dims + assert 'num_feats' in positional_encoding + num_feats = positional_encoding['num_feats'] + assert num_feats * 2 == self.embed_dims, 'embed_dims should' \ + f' be exactly 2 times of num_feats. Found {self.embed_dims}' \ + f' and {num_feats}.' + self._init_layers() + + def _init_layers(self): + """Initialize layers of the transformer head.""" + self.input_proj = Conv2d(self.in_channels, + self.embed_dims, + kernel_size=1) + self.fc_cls = Linear(self.embed_dims, self.cls_out_channels) + self.reg_ffn = FFN(self.embed_dims, + self.embed_dims, + self.num_reg_fcs, + self.act_cfg, + dropout=0.0, + add_residual=False) + self.fc_reg = Linear(self.embed_dims, 4) + self.query_embedding = nn.Embedding(self.num_query, self.embed_dims) + + def init_weights(self): + """Initialize weights of the transformer head.""" + # The initialization for transformer is important + self.transformer.init_weights() + + def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs): + """load checkpoints.""" + # NOTE here use `AnchorFreeHead` instead of `TransformerHead`, + # since `AnchorFreeHead._load_from_state_dict` should not be + # called here. Invoking the default `Module._load_from_state_dict` + # is enough. + + # Names of some parameters in has been changed. + version = local_metadata.get('version', None) + if (version is None or version < 2) and self.__class__ is SegDETRHead: + convert_dict = { + '.self_attn.': '.attentions.0.', + '.ffn.': '.ffns.0.', + '.multihead_attn.': '.attentions.1.', + '.decoder.norm.': '.decoder.post_norm.' + } + for k in state_dict.keys(): + for ori_key, convert_key in convert_dict.items(): + if ori_key in k: + convert_key = k.replace(ori_key, convert_key) + state_dict[convert_key] = state_dict[k] + del state_dict[k] + + super(AnchorFreeHead, + self)._load_from_state_dict(state_dict, prefix, local_metadata, + strict, missing_keys, + unexpected_keys, error_msgs) + + def forward(self, feats, img_metas): + """Forward function. + + Args: + feats (tuple[Tensor]): Features from the upstream network, each is + a 4D-tensor. + img_metas (list[dict]): List of image information. + + Returns: + tuple[list[Tensor], list[Tensor]]: Outputs for all scale levels. + + - all_cls_scores_list (list[Tensor]): Classification scores \ + for each scale level. Each is a 4D-tensor with shape \ + [nb_dec, bs, num_query, cls_out_channels]. Note \ + `cls_out_channels` should includes background. + - all_bbox_preds_list (list[Tensor]): Sigmoid regression \ + outputs for each scale level. Each is a 4D-tensor with \ + normalized coordinate format (cx, cy, w, h) and shape \ + [nb_dec, bs, num_query, 4]. + """ + num_levels = len(feats) + img_metas_list = [img_metas for _ in range(num_levels)] + return multi_apply(self.forward_single, feats, img_metas_list) + + def forward_single(self, x, img_metas): + """"Forward function for a single feature level. + + Args: + x (Tensor): Input feature from backbone's single stage, shape + [bs, c, h, w]. + img_metas (list[dict]): List of image information. + + Returns: + all_cls_scores (Tensor): Outputs from the classification head, + shape [nb_dec, bs, num_query, cls_out_channels]. Note + cls_out_channels should includes background. + all_bbox_preds (Tensor): Sigmoid outputs from the regression + head with normalized coordinate format (cx, cy, w, h). + Shape [nb_dec, bs, num_query, 4]. + """ + # construct binary masks which used for the transformer. + # NOTE following the official DETR repo, non-zero values representing + # ignored positions, while zero values means valid positions. + batch_size = x.size(0) + input_img_h, input_img_w = img_metas[0]['batch_input_shape'] + masks = x.new_ones((batch_size, input_img_h, input_img_w)) + for img_id in range(batch_size): + img_h, img_w, _ = img_metas[img_id]['img_shape'] + masks[img_id, :img_h, :img_w] = 0 + + x = self.input_proj(x) + # interpolate masks to have the same spatial shape with x + masks = F.interpolate(masks.unsqueeze(1), + size=x.shape[-2:]).to(torch.bool).squeeze(1) + # position encoding + pos_embed = self.positional_encoding(masks) # [bs, embed_dim, h, w] + # outs_dec: [nb_dec, bs, num_query, embed_dim] + outs_dec, _ = self.transformer(x, masks, self.query_embedding.weight, + pos_embed) + + all_cls_scores = self.fc_cls(outs_dec) + all_bbox_preds = self.fc_reg(self.activate( + self.reg_ffn(outs_dec))).sigmoid() + return all_cls_scores, all_bbox_preds + + @force_fp32(apply_to=('all_cls_scores_list', 'all_bbox_preds_list')) + def loss(self, + all_cls_scores_list, + all_bbox_preds_list, + gt_bboxes_list, + gt_labels_list, + img_metas, + gt_bboxes_ignore=None): + """"Loss function. + + Only outputs from the last feature level are used for computing + losses by default. + + Args: + all_cls_scores_list (list[Tensor]): Classification outputs + for each feature level. Each is a 4D-tensor with shape + [nb_dec, bs, num_query, cls_out_channels]. + all_bbox_preds_list (list[Tensor]): Sigmoid regression + outputs for each feature level. Each is a 4D-tensor with + normalized coordinate format (cx, cy, w, h) and shape + [nb_dec, bs, num_query, 4]. + gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image + with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels_list (list[Tensor]): Ground truth class indices for each + image with shape (num_gts, ). + img_metas (list[dict]): List of image meta information. + gt_bboxes_ignore (list[Tensor], optional): Bounding boxes + which can be ignored for each image. Default None. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + # NOTE defaultly only the outputs from the last feature scale is used. + all_cls_scores = all_cls_scores_list[-1] + all_bbox_preds = all_bbox_preds_list[-1] + assert gt_bboxes_ignore is None, \ + 'Only supports for gt_bboxes_ignore setting to None.' + + num_dec_layers = len(all_cls_scores) + all_gt_bboxes_list = [gt_bboxes_list for _ in range(num_dec_layers)] + all_gt_labels_list = [gt_labels_list for _ in range(num_dec_layers)] + all_gt_bboxes_ignore_list = [ + gt_bboxes_ignore for _ in range(num_dec_layers) + ] + img_metas_list = [img_metas for _ in range(num_dec_layers)] + + losses_cls, losses_bbox, losses_iou = multi_apply( + self.loss_single, all_cls_scores, all_bbox_preds, + all_gt_bboxes_list, all_gt_labels_list, img_metas_list, + all_gt_bboxes_ignore_list) + + loss_dict = dict() + # loss from the last decoder layer + loss_dict['loss_cls'] = losses_cls[-1] + loss_dict['loss_bbox'] = losses_bbox[-1] + loss_dict['loss_iou'] = losses_iou[-1] + # loss from other decoder layers + num_dec_layer = 0 + for loss_cls_i, loss_bbox_i, loss_iou_i in zip(losses_cls[:-1], + losses_bbox[:-1], + losses_iou[:-1]): + loss_dict[f'd{num_dec_layer}.loss_cls'] = loss_cls_i + loss_dict[f'd{num_dec_layer}.loss_bbox'] = loss_bbox_i + loss_dict[f'd{num_dec_layer}.loss_iou'] = loss_iou_i + num_dec_layer += 1 + return loss_dict + + def loss_single(self, + cls_scores, + bbox_preds, + gt_bboxes_list, + gt_labels_list, + img_metas, + gt_bboxes_ignore_list=None): + """"Loss function for outputs from a single decoder layer of a single + feature level. + + Args: + cls_scores (Tensor): Box score logits from a single decoder layer + for all images. Shape [bs, num_query, cls_out_channels]. + bbox_preds (Tensor): Sigmoid outputs from a single decoder layer + for all images, with normalized coordinate (cx, cy, w, h) and + shape [bs, num_query, 4]. + gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image + with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels_list (list[Tensor]): Ground truth class indices for each + image with shape (num_gts, ). + img_metas (list[dict]): List of image meta information. + gt_bboxes_ignore_list (list[Tensor], optional): Bounding + boxes which can be ignored for each image. Default None. + + Returns: + dict[str, Tensor]: A dictionary of loss components for outputs from + a single decoder layer. + """ + num_imgs = cls_scores.size(0) + cls_scores_list = [cls_scores[i] for i in range(num_imgs)] + bbox_preds_list = [bbox_preds[i] for i in range(num_imgs)] + cls_reg_targets = self.get_targets(cls_scores_list, bbox_preds_list, + gt_bboxes_list, gt_labels_list, + img_metas, gt_bboxes_ignore_list) + (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, + num_total_pos, num_total_neg) = cls_reg_targets + labels = torch.cat(labels_list, 0) + label_weights = torch.cat(label_weights_list, 0) + bbox_targets = torch.cat(bbox_targets_list, 0) + bbox_weights = torch.cat(bbox_weights_list, 0) + + # classification loss + cls_scores = cls_scores.reshape(-1, self.cls_out_channels) + # construct weighted avg_factor to match with the official DETR repo + cls_avg_factor = num_total_pos * 1.0 + \ + num_total_neg * self.bg_cls_weight + if self.sync_cls_avg_factor: + cls_avg_factor = reduce_mean( + cls_scores.new_tensor([cls_avg_factor])) + cls_avg_factor = max(cls_avg_factor, 1) + loss_cls = self.loss_cls(cls_scores, + labels, + label_weights, + avg_factor=cls_avg_factor) + + # Compute the average number of gt boxes accross all gpus, for + # normalization purposes + num_total_pos = loss_cls.new_tensor([num_total_pos]) + num_total_pos = torch.clamp(reduce_mean(num_total_pos), min=1).item() + + # construct factors used for rescale bboxes + factors = [] + for img_meta, bbox_pred in zip(img_metas, bbox_preds): + img_h, img_w, _ = img_meta['img_shape'] + factor = bbox_pred.new_tensor([img_w, img_h, img_w, + img_h]).unsqueeze(0).repeat( + bbox_pred.size(0), 1) + factors.append(factor) + factors = torch.cat(factors, 0) + + # DETR regress the relative position of boxes (cxcywh) in the image, + # thus the learning target is normalized by the image size. So here + # we need to re-scale them for calculating IoU loss + bbox_preds = bbox_preds.reshape(-1, 4) + bboxes = bbox_cxcywh_to_xyxy(bbox_preds) * factors + bboxes_gt = bbox_cxcywh_to_xyxy(bbox_targets) * factors + + # regression IoU loss, defaultly GIoU loss + loss_iou = self.loss_iou(bboxes, + bboxes_gt, + bbox_weights, + avg_factor=num_total_pos) + + # regression L1 loss + loss_bbox = self.loss_bbox(bbox_preds, + bbox_targets, + bbox_weights, + avg_factor=num_total_pos) + return loss_cls, loss_bbox, loss_iou + + def get_targets(self, + cls_scores_list, + bbox_preds_list, + gt_bboxes_list, + gt_labels_list, + img_metas, + gt_bboxes_ignore_list=None): + """"Compute regression and classification targets for a batch image. + + Outputs from a single decoder layer of a single feature level are used. + + Args: + cls_scores_list (list[Tensor]): Box score logits from a single + decoder layer for each image with shape [num_query, + cls_out_channels]. + bbox_preds_list (list[Tensor]): Sigmoid outputs from a single + decoder layer for each image, with normalized coordinate + (cx, cy, w, h) and shape [num_query, 4]. + gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image + with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels_list (list[Tensor]): Ground truth class indices for each + image with shape (num_gts, ). + img_metas (list[dict]): List of image meta information. + gt_bboxes_ignore_list (list[Tensor], optional): Bounding + boxes which can be ignored for each image. Default None. + + Returns: + tuple: a tuple containing the following targets. + + - labels_list (list[Tensor]): Labels for all images. + - label_weights_list (list[Tensor]): Label weights for all \ + images. + - bbox_targets_list (list[Tensor]): BBox targets for all \ + images. + - bbox_weights_list (list[Tensor]): BBox weights for all \ + images. + - num_total_pos (int): Number of positive samples in all \ + images. + - num_total_neg (int): Number of negative samples in all \ + images. + """ + assert gt_bboxes_ignore_list is None, \ + 'Only supports for gt_bboxes_ignore setting to None.' + num_imgs = len(cls_scores_list) + gt_bboxes_ignore_list = [ + gt_bboxes_ignore_list for _ in range(num_imgs) + ] + + (labels_list, label_weights_list, bbox_targets_list, + bbox_weights_list, pos_inds_list, neg_inds_list) = multi_apply( + self._get_target_single, cls_scores_list, bbox_preds_list, + gt_bboxes_list, gt_labels_list, img_metas, gt_bboxes_ignore_list) + num_total_pos = sum((inds.numel() for inds in pos_inds_list)) + num_total_neg = sum((inds.numel() for inds in neg_inds_list)) + return (labels_list, label_weights_list, bbox_targets_list, + bbox_weights_list, num_total_pos, num_total_neg) + + def _get_target_single(self, + cls_score, + bbox_pred, + gt_bboxes, + gt_labels, + img_meta, + gt_bboxes_ignore=None): + """"Compute regression and classification targets for one image. + + Outputs from a single decoder layer of a single feature level are used. + + Args: + cls_score (Tensor): Box score logits from a single decoder layer + for one image. Shape [num_query, cls_out_channels]. + bbox_pred (Tensor): Sigmoid outputs from a single decoder layer + for one image, with normalized coordinate (cx, cy, w, h) and + shape [num_query, 4]. + gt_bboxes (Tensor): Ground truth bboxes for one image with + shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels (Tensor): Ground truth class indices for one image + with shape (num_gts, ). + img_meta (dict): Meta information for one image. + gt_bboxes_ignore (Tensor, optional): Bounding boxes + which can be ignored. Default None. + + Returns: + tuple[Tensor]: a tuple containing the following for one image. + + - labels (Tensor): Labels of each image. + - label_weights (Tensor]): Label weights of each image. + - bbox_targets (Tensor): BBox targets of each image. + - bbox_weights (Tensor): BBox weights of each image. + - pos_inds (Tensor): Sampled positive indices for each image. + - neg_inds (Tensor): Sampled negative indices for each image. + """ + num_bboxes = bbox_pred.size(0) + # assigner and sampler + assign_result = self.assigner.assign(bbox_pred, cls_score, gt_bboxes, + gt_labels, img_meta, + gt_bboxes_ignore) + sampling_result = self.sampler.sample(assign_result, bbox_pred, + gt_bboxes) + pos_inds = sampling_result.pos_inds + neg_inds = sampling_result.neg_inds + + # label targets + labels = gt_bboxes.new_full((num_bboxes, ), + self.num_things_classes, + dtype=torch.long) + labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds] + label_weights = gt_bboxes.new_ones(num_bboxes) + + # bbox targets + bbox_targets = torch.zeros_like(bbox_pred) + bbox_weights = torch.zeros_like(bbox_pred) + bbox_weights[pos_inds] = 1.0 + img_h, img_w, _ = img_meta['img_shape'] + + # DETR regress the relative position of boxes (cxcywh) in the image. + # Thus the learning target should be normalized by the image size, also + # the box format should be converted from defaultly x1y1x2y2 to cxcywh. + factor = bbox_pred.new_tensor([img_w, img_h, img_w, + img_h]).unsqueeze(0) + pos_gt_bboxes_normalized = sampling_result.pos_gt_bboxes / factor + pos_gt_bboxes_targets = bbox_xyxy_to_cxcywh(pos_gt_bboxes_normalized) + bbox_targets[pos_inds] = pos_gt_bboxes_targets + return (labels, label_weights, bbox_targets, bbox_weights, pos_inds, + neg_inds) + + # over-write because img_metas are needed as inputs for bbox_head. + def forward_train(self, + x, + img_metas, + gt_bboxes, + gt_labels=None, + gt_bboxes_ignore=None, + proposal_cfg=None, + **kwargs): + """Forward function for training mode. + + Args: + x (list[Tensor]): Features from backbone. + img_metas (list[dict]): Meta information of each image, e.g., + image size, scaling factor, etc. + gt_bboxes (Tensor): Ground truth bboxes of the image, + shape (num_gts, 4). + gt_labels (Tensor): Ground truth labels of each box, + shape (num_gts,). + gt_bboxes_ignore (Tensor): Ground truth bboxes to be + ignored, shape (num_ignored_gts, 4). + proposal_cfg (mmcv.Config): Test / postprocessing configuration, + if None, test_cfg would be used. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + assert proposal_cfg is None, '"proposal_cfg" must be None' + outs = self(x, img_metas) + if gt_labels is None: + loss_inputs = outs + (gt_bboxes, img_metas) + else: + loss_inputs = outs + (gt_bboxes, gt_labels, img_metas) + losses = self.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) + return losses + + @force_fp32(apply_to=('all_cls_scores_list', 'all_bbox_preds_list')) + def get_bboxes(self, + all_cls_scores_list, + all_bbox_preds_list, + img_metas, + rescale=False): + """Transform network outputs for a batch into bbox predictions. + + Args: + all_cls_scores_list (list[Tensor]): Classification outputs + for each feature level. Each is a 4D-tensor with shape + [nb_dec, bs, num_query, cls_out_channels]. + all_bbox_preds_list (list[Tensor]): Sigmoid regression + outputs for each feature level. Each is a 4D-tensor with + normalized coordinate format (cx, cy, w, h) and shape + [nb_dec, bs, num_query, 4]. + img_metas (list[dict]): Meta information of each image. + rescale (bool, optional): If True, return boxes in original + image space. Default False. + + Returns: + list[list[Tensor, Tensor]]: Each item in result_list is 2-tuple. \ + The first item is an (n, 5) tensor, where the first 4 columns \ + are bounding box positions (tl_x, tl_y, br_x, br_y) and the \ + 5-th column is a score between 0 and 1. The second item is a \ + (n,) tensor where each item is the predicted class label of \ + the corresponding box. + """ + # NOTE defaultly only using outputs from the last feature level, + # and only the outputs from the last decoder layer is used. + cls_scores = all_cls_scores_list[-1][-1] + bbox_preds = all_bbox_preds_list[-1][-1] + + result_list = [] + for img_id in range(len(img_metas)): + cls_score = cls_scores[img_id] + bbox_pred = bbox_preds[img_id] + img_shape = img_metas[img_id]['img_shape'] + scale_factor = img_metas[img_id]['scale_factor'] + proposals = self._get_bboxes_single(cls_score, bbox_pred, + img_shape, scale_factor, + rescale) + result_list.append(proposals) + + return result_list + + def _get_bboxes_single(self, + cls_score, + bbox_pred, + img_shape, + scale_factor, + rescale=False): + """Transform outputs from the last decoder layer into bbox predictions + for each image. + + Args: + cls_score (Tensor): Box score logits from the last decoder layer + for each image. Shape [num_query, cls_out_channels]. + bbox_pred (Tensor): Sigmoid outputs from the last decoder layer + for each image, with coordinate format (cx, cy, w, h) and + shape [num_query, 4]. + img_shape (tuple[int]): Shape of input image, (height, width, 3). + scale_factor (ndarray, optional): Scale factor of the image arange + as (w_scale, h_scale, w_scale, h_scale). + rescale (bool, optional): If True, return boxes in original image + space. Default False. + + Returns: + tuple[Tensor]: Results of detected bboxes and labels. + + - det_bboxes: Predicted bboxes with shape [num_query, 5], \ + where the first 4 columns are bounding box positions \ + (tl_x, tl_y, br_x, br_y) and the 5-th column are scores \ + between 0 and 1. + - det_labels: Predicted labels of the corresponding box with \ + shape [num_query]. + """ + assert len(cls_score) == len(bbox_pred) + max_per_img = self.test_cfg.get('max_per_img', self.num_query) + # exclude background + if self.loss_cls.use_sigmoid: + cls_score = cls_score.sigmoid() + scores, indexes = cls_score.view(-1).topk(max_per_img) + det_labels = indexes % self.num_things_classes + bbox_index = indexes // self.num_things_classes + bbox_pred = bbox_pred[bbox_index] + else: + scores, det_labels = F.softmax(cls_score, dim=-1)[..., :-1].max(-1) + scores, bbox_index = scores.topk(max_per_img) + bbox_pred = bbox_pred[bbox_index] + det_labels = det_labels[bbox_index] + + det_bboxes = bbox_cxcywh_to_xyxy(bbox_pred) + det_bboxes[:, 0::2] = det_bboxes[:, 0::2] * img_shape[1] + det_bboxes[:, 1::2] = det_bboxes[:, 1::2] * img_shape[0] + det_bboxes[:, 0::2].clamp_(min=0, max=img_shape[1]) + det_bboxes[:, 1::2].clamp_(min=0, max=img_shape[0]) + if rescale: + det_bboxes /= det_bboxes.new_tensor(scale_factor) + det_bboxes = torch.cat((det_bboxes, scores.unsqueeze(1)), -1) + + return det_bboxes, det_labels diff --git a/mmcv/models/dense_heads/seg_head_plugin/seg_mask_head.py b/mmcv/models/dense_heads/seg_head_plugin/seg_mask_head.py new file mode 100644 index 0000000..3a3bdbd --- /dev/null +++ b/mmcv/models/dense_heads/seg_head_plugin/seg_mask_head.py @@ -0,0 +1,393 @@ +""" +Copy-paste from torch.nn.Transformer, timm, with modifications: +""" +import copy +from typing import Optional, List + +import torch +import torch.nn.functional as F +from torch import nn, Tensor +from functools import partial +from mmcv.models.utils.builder import TRANSFORMER +import math +from mmcv.utils import force_fp32 + +count = 0 + + +class Mlp(nn.Module): + def __init__(self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + drop=0.): + super().__init__() + self.fp16_enabled = False + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + @force_fp32(apply_to=('x', )) + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class SelfAttention(nn.Module): + def __init__(self, + cfg, + dim, + num_heads=2, + qkv_bias=False, + qk_scale=None, + attn_drop=0., + proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.fp16_enabled = False + self.scale = qk_scale or head_dim**-0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + @force_fp32(apply_to=('x', )) + def forward(self, x): + B, N, C = x.shape + + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, + C // self.num_heads).permute(2, 0, 3, 1, + 4).contiguous() + q, k, v = qkv[0], qkv[1], qkv[ + 2] # make torchscript happy (cannot use tensor as tuple) + + attn = (q @ k.transpose(-2, -1)) * self.scale + + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + + return x + + +class Attention(nn.Module): + def __init__(self, + cfg, + dim, + num_heads=2, + qkv_bias=False, + qk_scale=None, + attn_drop=0., + proj_drop=0.): + super().__init__() + self.fp16_enabled = False + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim**-0.5 + self.q = nn.Linear(dim, dim, bias=qkv_bias) + self.k = nn.Linear(dim, dim, bias=qkv_bias) + self.v = nn.Linear(dim, dim, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + self.linear_l1 = nn.Sequential( + nn.Linear(self.num_heads, self.num_heads), + nn.ReLU(), + ) + self.linear = nn.Sequential( + nn.Linear(self.num_heads, 1), + nn.ReLU(), + ) + self._reset_parameters() + + def _reset_parameters(self): + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + + @force_fp32(apply_to=('query', 'key', 'value')) + def forward(self, query, key, value, key_padding_mask, hw_lvl): + B, N, C = query.shape + _, L, _ = key.shape + #print('query, key, value', query.shape, value.shape, key.shape) + q = self.q(query).reshape(B, N, + self.num_heads, C // self.num_heads).permute( + 0, 2, 1, + 3).contiguous() #.permute(2, 0, 3, 1, 4) + k = self.k(key).reshape(B, L, + self.num_heads, C // self.num_heads).permute( + 0, 2, 1, + 3).contiguous() #.permute(2, 0, 3, 1, 4) + + v = self.v(value).reshape(B, L, + self.num_heads, C // self.num_heads).permute( + 0, 2, 1, + 3).contiguous() #.permute(2, 0, 3, 1, 4) + + attn = (q @ k.transpose(-2, -1).contiguous()) * self.scale + + attn = attn.permute(0, 2, 3, 1) + + new_feats = self.linear_l1(attn) + mask = self.linear(new_feats) + + attn = attn.permute(0, 3, 1, 2) + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).contiguous().reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + + return x, mask + +# AttentionTail is a cheap implementation that can make mask decoder 1 layer deeper. +class AttentionTail(nn.Module): + def __init__(self, + cfg, + dim, + num_heads=2, + qkv_bias=False, + qk_scale=None, + attn_drop=0., + proj_drop=0.): + super().__init__() + self.fp16_enabled = False + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim**-0.5 + self.q = nn.Linear(dim, dim, bias=qkv_bias) + self.k = nn.Linear(dim, dim, bias=qkv_bias) + + self.linear_l1 = nn.Sequential( + nn.Linear(self.num_heads, self.num_heads), + nn.ReLU(), + ) + + self.linear = nn.Sequential( + nn.Linear(self.num_heads, 1), + nn.ReLU(), + ) + self._reset_parameters() + + def _reset_parameters(self): + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + + @force_fp32(apply_to=('query', 'key')) + def forward(self, query, key, key_padding_mask, hw_lvl=None): + B, N, C = query.shape + _, L, _ = key.shape + #print('query, key, value', query.shape, value.shape, key.shape) + q = self.q(query).reshape(B, N, + self.num_heads, C // self.num_heads).permute( + 0, 2, 1, + 3).contiguous() #.permute(2, 0, 3, 1, 4) + k = self.k(key).reshape(B, L, + self.num_heads, C // self.num_heads).permute( + 0, 2, 1, + 3).contiguous() #.permute(2, 0, 3, 1, 4) + attn = (q @ k.transpose(-2, -1).contiguous()) * self.scale + + attn = attn.permute(0, 2, 3, 1) + + new_feats = self.linear_l1(attn) + mask = self.linear(new_feats) + + return mask + + +class Block(nn.Module): + def __init__(self, + cfg, + dim, + num_heads, + mlp_ratio=4., + qkv_bias=False, + qk_scale=None, + drop=0., + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + self_attn=False): + super().__init__() + self.fp16_enabled = False + self.head_norm1 = norm_layer(dim) + self.self_attn = self_attn + self.attn = Attention(cfg, + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=drop) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + + self.drop_path = DropPath( + drop_path) if drop_path > 0. else nn.Identity() + self.head_norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop) + if self.self_attn: + self.self_attention = SelfAttention(cfg, + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=drop) + self.norm3 = norm_layer(dim) + + @force_fp32(apply_to=('query', 'key', 'value')) + def forward(self, query, key, value, key_padding_mask=None, hw_lvl=None): + if self.self_attn: + query = query + self.drop_path(self.self_attention(query)) + query = self.norm3(query) + x, mask = self.attn(query, key, value, key_padding_mask, hw_lvl=hw_lvl) + query = query + self.drop_path(x) + query = self.head_norm1(query) + + query = query + self.drop_path(self.mlp(query)) + query = self.head_norm2(query) + return query, mask + + +def drop_path(x, drop_prob: float = 0., training: bool = False): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, + the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... + See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-53296self.num_heads956 ... I've opted for + changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use + 'survival rate' as the argument. + """ + if drop_prob == 0. or not training: + return x + keep_prob = 1 - drop_prob + shape = (x.shape[0], ) + (1, ) * ( + x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets + random_tensor = keep_prob + torch.rand( + shape, dtype=x.dtype, device=x.device) + random_tensor.floor_() # binarize + output = x.div(keep_prob) * random_tensor + return output + + +def _get_clones(module, N): + return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) + + +class DropPath(nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + """ + def __init__(self, drop_prob=None): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + + @force_fp32(apply_to=('x', )) + def forward(self, x): + return drop_path(x, self.drop_prob, self.training) + + +@TRANSFORMER.register_module() +class SegMaskHead(nn.Module): + def __init__(self, + cfg=None, + d_model=16, + nhead=2, + num_encoder_layers=6, + num_decoder_layers=1, + dim_feedforward=64, + dropout=0.1, + activation="relu", + normalize_before=False, + return_intermediate_dec=False, + self_attn=False): + super().__init__() + + self.fp16_enabled = False + mlp_ratio = 4 + qkv_bias = True + qk_scale = None + drop_rate = 0 + attn_drop_rate = 0 + + norm_layer = None + norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) + act_layer = None + act_layer = act_layer or nn.GELU + block = Block(cfg, + dim=d_model, + num_heads=nhead, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=0, + norm_layer=norm_layer, + act_layer=act_layer, + self_attn=self_attn) + self.blocks = _get_clones(block, num_decoder_layers) + self.attnen = AttentionTail(cfg, + d_model, + num_heads=nhead, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop_rate, + proj_drop=0) + + self._reset_parameters() + + def _reset_parameters(self): + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + + def with_pos_embed(self, tensor, pos: Optional[Tensor]): + if pos is None: + return tensor + else: + return tensor + pos + #return tensor if pos is None else tensor + pos + @force_fp32(apply_to=('memory', 'mask_memory', 'pos_memory', 'query_embed', + 'mask_query', 'pos_query')) + def forward(self, memory, mask_memory, pos_memory, query_embed, mask_query, + pos_query, hw_lvl): + if mask_memory is not None and isinstance(mask_memory, torch.Tensor): + mask_memory = mask_memory.to(torch.bool) + masks = [] + inter_query = [] + for i, block in enumerate(self.blocks): + query_embed, mask = block(self.with_pos_embed( + query_embed, pos_query), + self.with_pos_embed(memory, pos_memory), + memory, + key_padding_mask=mask_memory, + hw_lvl=hw_lvl) + masks.append(mask) + inter_query.append(query_embed) + #if i == 1: + # return mask, masks, inter_query + attn = self.attnen(self.with_pos_embed(query_embed, pos_query), + self.with_pos_embed(memory, pos_memory), + key_padding_mask=mask_memory, + hw_lvl=hw_lvl) + return attn, masks, inter_query diff --git a/mmcv/models/dense_heads/seg_head_plugin/seg_utils.py b/mmcv/models/dense_heads/seg_head_plugin/seg_utils.py new file mode 100644 index 0000000..dd1e61d --- /dev/null +++ b/mmcv/models/dense_heads/seg_head_plugin/seg_utils.py @@ -0,0 +1,7 @@ + + +def IOU(intputs, targets): + numerator = (intputs * targets).sum(dim=1) + denominator = intputs.sum(dim=1) + targets.sum(dim=1) - numerator + loss = numerator / (denominator + 0.0000000000001) + return loss.cpu(), numerator.cpu(), denominator.cpu() diff --git a/mmcv/models/dense_heads/track_head.py b/mmcv/models/dense_heads/track_head.py new file mode 100644 index 0000000..0233b32 --- /dev/null +++ b/mmcv/models/dense_heads/track_head.py @@ -0,0 +1,533 @@ +#---------------------------------------------------------------------------------# +# UniAD: Planning-oriented Autonomous Driving (https://arxiv.org/abs/2212.10156) # +# Source code: https://github.com/OpenDriveLab/UniAD # +# Copyright (c) OpenDriveLab. All rights reserved. # +# Modified from bevformer (https://github.com/fundamentalvision/BEVFormer) # +#---------------------------------------------------------------------------------# + +import copy +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.models.bricks import Linear +from mmcv.models.utils import bias_init_with_prob +from mmcv.utils import TORCH_VERSION, digit_version + +from mmcv.core.utils import (multi_apply, reduce_mean) +from mmcv.models.utils.transformer import inverse_sigmoid +from mmcv.models import HEADS +from mmcv.models.dense_heads import DETRHead +from mmcv.core.bbox.coder import build_bbox_coder +from mmcv.core.bbox.util import normalize_bbox +from mmcv.utils import force_fp32, auto_fp16 + + +@HEADS.register_module() +class BEVFormerTrackHead(DETRHead): + """Head of Detr3D. + Args: + with_box_refine (bool): Whether to refine the reference points + in the decoder. Defaults to False. + as_two_stage (bool) : Whether to generate the proposal from + the outputs of encoder. + transformer (obj:`ConfigDict`): ConfigDict is used for building + the Encoder and Decoder. + bev_h, bev_w (int): spatial shape of BEV queries. + """ + + def __init__(self, + *args, + with_box_refine=False, + as_two_stage=False, + transformer=None, + bbox_coder=None, + num_cls_fcs=2, + code_weights=None, + bev_h=30, + bev_w=30, + past_steps=4, + fut_steps=4, + **kwargs): + + self.bev_h = bev_h + self.bev_w = bev_w + self.fp16_enabled = False + + self.with_box_refine = with_box_refine + + assert as_two_stage is False, 'as_two_stage is not supported yet.' + self.as_two_stage = as_two_stage + if self.as_two_stage: + transformer['as_two_stage'] = self.as_two_stage + if 'code_size' in kwargs: + self.code_size = kwargs['code_size'] + else: + self.code_size = 10 + if code_weights is not None: + self.code_weights = code_weights + else: + self.code_weights = [1.0, 1.0, 1.0, + 1.0, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2] + + self.bbox_coder = build_bbox_coder(bbox_coder) + self.pc_range = self.bbox_coder.pc_range + self.real_w = self.pc_range[3] - self.pc_range[0] + self.real_h = self.pc_range[4] - self.pc_range[1] + self.num_cls_fcs = num_cls_fcs - 1 + self.past_steps = past_steps + self.fut_steps = fut_steps + super(BEVFormerTrackHead, self).__init__( + *args, transformer=transformer, **kwargs) + self.code_weights = nn.Parameter(torch.tensor( + self.code_weights, requires_grad=False), requires_grad=False) + + def _init_layers(self): + """Initialize classification branch and regression branch of head.""" + cls_branch = [] + for _ in range(self.num_reg_fcs): + cls_branch.append(Linear(self.embed_dims, self.embed_dims)) + cls_branch.append(nn.LayerNorm(self.embed_dims)) + cls_branch.append(nn.ReLU(inplace=True)) + cls_branch.append(Linear(self.embed_dims, self.cls_out_channels)) + fc_cls = nn.Sequential(*cls_branch) + + reg_branch = [] + for _ in range(self.num_reg_fcs): + reg_branch.append(Linear(self.embed_dims, self.embed_dims)) + reg_branch.append(nn.ReLU()) + reg_branch.append(Linear(self.embed_dims, self.code_size)) + reg_branch = nn.Sequential(*reg_branch) + + past_traj_reg_branch = [] + for _ in range(self.num_reg_fcs): + past_traj_reg_branch.append( + Linear(self.embed_dims, self.embed_dims)) + past_traj_reg_branch.append(nn.ReLU()) + past_traj_reg_branch.append( + Linear(self.embed_dims, (self.past_steps + self.fut_steps)*2)) + past_traj_reg_branch = nn.Sequential(*past_traj_reg_branch) + + def _get_clones(module, N): + return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) + + # last reg_branch is used to generate proposal from + # encode feature map when as_two_stage is True. + num_pred = (self.transformer.decoder.num_layers + 1) if \ + self.as_two_stage else self.transformer.decoder.num_layers + + if self.with_box_refine: + self.cls_branches = _get_clones(fc_cls, num_pred) + self.reg_branches = _get_clones(reg_branch, num_pred) + self.past_traj_reg_branches = _get_clones( + past_traj_reg_branch, num_pred) + else: + self.cls_branches = nn.ModuleList( + [fc_cls for _ in range(num_pred)]) + self.reg_branches = nn.ModuleList( + [reg_branch for _ in range(num_pred)]) + self.past_traj_reg_branches = nn.ModuleList( + [past_traj_reg_branch for _ in range(num_pred)]) + if not self.as_two_stage: + self.bev_embedding = nn.Embedding( + self.bev_h * self.bev_w, self.embed_dims) + + def init_weights(self): + """Initialize weights of the DeformDETR head.""" + self.transformer.init_weights() + if self.loss_cls.use_sigmoid: + bias_init = bias_init_with_prob(0.01) + for m in self.cls_branches: + nn.init.constant_(m[-1].bias, bias_init) + + def get_bev_features(self, mlvl_feats, img_metas, prev_bev=None): + bs, num_cam, _, _, _ = mlvl_feats[0].shape + dtype = mlvl_feats[0].dtype + bev_queries = self.bev_embedding.weight.to(dtype) + + bev_mask = torch.zeros((bs, self.bev_h, self.bev_w), + device=bev_queries.device).to(dtype) + bev_pos = self.positional_encoding(bev_mask).to(dtype) + bev_embed = self.transformer.get_bev_features( + mlvl_feats, + bev_queries, + self.bev_h, + self.bev_w, + grid_length=(self.real_h / self.bev_h, + self.real_w / self.bev_w), + bev_pos=bev_pos, + prev_bev=prev_bev, + img_metas=img_metas, + ) + return bev_embed, bev_pos + + def get_detections( + self, + bev_embed, + object_query_embeds=None, + ref_points=None, + img_metas=None, + ): + assert bev_embed.shape[0] == self.bev_h * self.bev_w + hs, init_reference, inter_references = self.transformer.get_states_and_refs( + bev_embed, + object_query_embeds, + self.bev_h, + self.bev_w, + reference_points=ref_points, + reg_branches=self.reg_branches if self.with_box_refine else None, + cls_branches=self.cls_branches if self.as_two_stage else None, + img_metas=img_metas, + ) + hs = hs.permute(0, 2, 1, 3) + outputs_classes = [] + outputs_coords = [] + outputs_trajs = [] + for lvl in range(hs.shape[0]): + if lvl == 0: + # reference = init_reference + reference = ref_points.sigmoid() + else: + reference = inter_references[lvl - 1] + # ref_size_base = inter_box_sizes[lvl - 1] + reference = inverse_sigmoid(reference) + outputs_class = self.cls_branches[lvl](hs[lvl]) + tmp = self.reg_branches[lvl](hs[lvl]) # xydxdyxdz + outputs_past_traj = self.past_traj_reg_branches[lvl](hs[lvl]).view( + tmp.shape[0], -1, self.past_steps + self.fut_steps, 2) + # TODO: check the shape of reference + assert reference.shape[-1] == 3 + tmp[..., 0:2] += reference[..., 0:2] + tmp[..., 0:2] = tmp[..., 0:2].sigmoid() + tmp[..., 4:5] += reference[..., 2:3] + tmp[..., 4:5] = tmp[..., 4:5].sigmoid() + + last_ref_points = torch.cat( + [tmp[..., 0:2], tmp[..., 4:5]], dim=-1, + ) + + tmp[..., 0:1] = (tmp[..., 0:1] * (self.pc_range[3] - + self.pc_range[0]) + self.pc_range[0]) + tmp[..., 1:2] = (tmp[..., 1:2] * (self.pc_range[4] - + self.pc_range[1]) + self.pc_range[1]) + tmp[..., 4:5] = (tmp[..., 4:5] * (self.pc_range[5] - + self.pc_range[2]) + self.pc_range[2]) + + # tmp[..., 2:4] = tmp[..., 2:4] + ref_size_basse[..., 0:2] + # tmp[..., 5:6] = tmp[..., 5:6] + ref_size_basse[..., 2:3] + + # TODO: check if using sigmoid + outputs_coord = tmp + outputs_classes.append(outputs_class) + outputs_coords.append(outputs_coord) + outputs_trajs.append(outputs_past_traj) + outputs_classes = torch.stack(outputs_classes) + outputs_coords = torch.stack(outputs_coords) + outputs_trajs = torch.stack(outputs_trajs) + last_ref_points = inverse_sigmoid(last_ref_points) + outs = { + 'all_cls_scores': outputs_classes, + 'all_bbox_preds': outputs_coords, + 'all_past_traj_preds': outputs_trajs, + 'enc_cls_scores': None, + 'enc_bbox_preds': None, + 'last_ref_points': last_ref_points, + 'query_feats': hs, + } + return outs + + def _get_target_single(self, + cls_score, + bbox_pred, + gt_labels, + gt_bboxes, + gt_bboxes_ignore=None): + """"Compute regression and classification targets for one image. + Outputs from a single decoder layer of a single feature level are used. + Args: + cls_score (Tensor): Box score logits from a single decoder layer + for one image. Shape [num_query, cls_out_channels]. + bbox_pred (Tensor): Sigmoid outputs from a single decoder layer + for one image, with normalized coordinate (cx, cy, w, h) and + shape [num_query, 4]. + gt_bboxes (Tensor): Ground truth bboxes for one image with + shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels (Tensor): Ground truth class indices for one image + with shape (num_gts, ). + gt_bboxes_ignore (Tensor, optional): Bounding boxes + which can be ignored. Default None. + Returns: + tuple[Tensor]: a tuple containing the following for one image. + - labels (Tensor): Labels of each image. + - label_weights (Tensor]): Label weights of each image. + - bbox_targets (Tensor): BBox targets of each image. + - bbox_weights (Tensor): BBox weights of each image. + - pos_inds (Tensor): Sampled positive indices for each image. + - neg_inds (Tensor): Sampled negative indices for each image. + """ + + num_bboxes = bbox_pred.size(0) + # assigner and sampler + gt_c = gt_bboxes.shape[-1] + + assign_result = self.assigner.assign(bbox_pred, cls_score, gt_bboxes, + gt_labels, gt_bboxes_ignore) + + sampling_result = self.sampler.sample(assign_result, bbox_pred, + gt_bboxes) + pos_inds = sampling_result.pos_inds + neg_inds = sampling_result.neg_inds + + # label targets + labels = gt_bboxes.new_full((num_bboxes,), + self.num_classes, + dtype=torch.long) + labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds] + label_weights = gt_bboxes.new_ones(num_bboxes) + + # bbox targets + bbox_targets = torch.zeros_like(bbox_pred)[..., :gt_c] + bbox_weights = torch.zeros_like(bbox_pred) + bbox_weights[pos_inds] = 1.0 + + # DETR + bbox_targets[pos_inds] = sampling_result.pos_gt_bboxes + return (labels, label_weights, bbox_targets, bbox_weights, + pos_inds, neg_inds) + + def get_targets(self, + cls_scores_list, + bbox_preds_list, + gt_bboxes_list, + gt_labels_list, + gt_bboxes_ignore_list=None): + """"Compute regression and classification targets for a batch image. + Outputs from a single decoder layer of a single feature level are used. + Args: + cls_scores_list (list[Tensor]): Box score logits from a single + decoder layer for each image with shape [num_query, + cls_out_channels]. + bbox_preds_list (list[Tensor]): Sigmoid outputs from a single + decoder layer for each image, with normalized coordinate + (cx, cy, w, h) and shape [num_query, 4]. + gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image + with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels_list (list[Tensor]): Ground truth class indices for each + image with shape (num_gts, ). + gt_bboxes_ignore_list (list[Tensor], optional): Bounding + boxes which can be ignored for each image. Default None. + Returns: + tuple: a tuple containing the following targets. + - labels_list (list[Tensor]): Labels for all images. + - label_weights_list (list[Tensor]): Label weights for all \ + images. + - bbox_targets_list (list[Tensor]): BBox targets for all \ + images. + - bbox_weights_list (list[Tensor]): BBox weights for all \ + images. + - num_total_pos (int): Number of positive samples in all \ + images. + - num_total_neg (int): Number of negative samples in all \ + images. + """ + assert gt_bboxes_ignore_list is None, \ + 'Only supports for gt_bboxes_ignore setting to None.' + num_imgs = len(cls_scores_list) + gt_bboxes_ignore_list = [ + gt_bboxes_ignore_list for _ in range(num_imgs) + ] + + (labels_list, label_weights_list, bbox_targets_list, + bbox_weights_list, pos_inds_list, neg_inds_list) = multi_apply( + self._get_target_single, cls_scores_list, bbox_preds_list, + gt_labels_list, gt_bboxes_list, gt_bboxes_ignore_list) + num_total_pos = sum((inds.numel() for inds in pos_inds_list)) + num_total_neg = sum((inds.numel() for inds in neg_inds_list)) + return (labels_list, label_weights_list, bbox_targets_list, + bbox_weights_list, num_total_pos, num_total_neg) + + def loss_single(self, + cls_scores, + bbox_preds, + gt_bboxes_list, + gt_labels_list, + gt_bboxes_ignore_list=None): + """"Loss function for outputs from a single decoder layer of a single + feature level. + Args: + cls_scores (Tensor): Box score logits from a single decoder layer + for all images. Shape [bs, num_query, cls_out_channels]. + bbox_preds (Tensor): Sigmoid outputs from a single decoder layer + for all images, with normalized coordinate (cx, cy, w, h) and + shape [bs, num_query, 4]. + gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image + with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels_list (list[Tensor]): Ground truth class indices for each + image with shape (num_gts, ). + gt_bboxes_ignore_list (list[Tensor], optional): Bounding + boxes which can be ignored for each image. Default None. + Returns: + dict[str, Tensor]: A dictionary of loss components for outputs from + a single decoder layer. + """ + num_imgs = cls_scores.size(0) + cls_scores_list = [cls_scores[i] for i in range(num_imgs)] + bbox_preds_list = [bbox_preds[i] for i in range(num_imgs)] + cls_reg_targets = self.get_targets(cls_scores_list, bbox_preds_list, + gt_bboxes_list, gt_labels_list, + gt_bboxes_ignore_list) + (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, + num_total_pos, num_total_neg) = cls_reg_targets + labels = torch.cat(labels_list, 0) + label_weights = torch.cat(label_weights_list, 0) + bbox_targets = torch.cat(bbox_targets_list, 0) + bbox_weights = torch.cat(bbox_weights_list, 0) + + # classification loss + cls_scores = cls_scores.reshape(-1, self.cls_out_channels) + # construct weighted avg_factor to match with the official DETR repo + cls_avg_factor = num_total_pos * 1.0 + \ + num_total_neg * self.bg_cls_weight + if self.sync_cls_avg_factor: + cls_avg_factor = reduce_mean( + cls_scores.new_tensor([cls_avg_factor])) + + cls_avg_factor = max(cls_avg_factor, 1) + loss_cls = self.loss_cls( + cls_scores, labels, label_weights, avg_factor=cls_avg_factor) + + # Compute the average number of gt boxes accross all gpus, for + # normalization purposes + num_total_pos = loss_cls.new_tensor([num_total_pos]) + num_total_pos = torch.clamp(reduce_mean(num_total_pos), min=1).item() + + # regression L1 loss + bbox_preds = bbox_preds.reshape(-1, bbox_preds.size(-1)) + normalized_bbox_targets = normalize_bbox(bbox_targets, self.pc_range) + isnotnan = torch.isfinite(normalized_bbox_targets).all(dim=-1) + bbox_weights = bbox_weights * self.code_weights + + loss_bbox = self.loss_bbox( + bbox_preds[isnotnan, :10], normalized_bbox_targets[isnotnan, + :10], bbox_weights[isnotnan, :10], + avg_factor=num_total_pos) + loss_cls = torch.nan_to_num(loss_cls) + loss_bbox = torch.nan_to_num(loss_bbox) + return loss_cls, loss_bbox + + @force_fp32(apply_to=('preds_dicts')) + def loss(self, + gt_bboxes_list, + gt_labels_list, + preds_dicts, + gt_bboxes_ignore=None, + img_metas=None): + """"Loss function. + Args: + + gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image + with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels_list (list[Tensor]): Ground truth class indices for each + image with shape (num_gts, ). + preds_dicts: + all_cls_scores (Tensor): Classification score of all + decoder layers, has shape + [nb_dec, bs, num_query, cls_out_channels]. + all_bbox_preds (Tensor): Sigmoid regression + outputs of all decode layers. Each is a 4D-tensor with + normalized coordinate format (cx, cy, w, h) and shape + [nb_dec, bs, num_query, 4]. + enc_cls_scores (Tensor): Classification scores of + points on encode feature map , has shape + (N, h*w, num_classes). Only be passed when as_two_stage is + True, otherwise is None. + enc_bbox_preds (Tensor): Regression results of each points + on the encode feature map, has shape (N, h*w, 4). Only be + passed when as_two_stage is True, otherwise is None. + gt_bboxes_ignore (list[Tensor], optional): Bounding boxes + which can be ignored for each image. Default None. + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + assert gt_bboxes_ignore is None, \ + f'{self.__class__.__name__} only supports ' \ + f'for gt_bboxes_ignore setting to None.' + + all_cls_scores = preds_dicts['all_cls_scores'] + all_bbox_preds = preds_dicts['all_bbox_preds'] + enc_cls_scores = preds_dicts['enc_cls_scores'] + enc_bbox_preds = preds_dicts['enc_bbox_preds'] + + num_dec_layers = len(all_cls_scores) + device = gt_labels_list[0].device + + gt_bboxes_list = [torch.cat( + (gt_bboxes.gravity_center, gt_bboxes.tensor[:, 3:]), + dim=1).to(device) for gt_bboxes in gt_bboxes_list] + + all_gt_bboxes_list = [gt_bboxes_list for _ in range(num_dec_layers)] + all_gt_labels_list = [gt_labels_list for _ in range(num_dec_layers)] + all_gt_bboxes_ignore_list = [ + gt_bboxes_ignore for _ in range(num_dec_layers) + ] + + losses_cls, losses_bbox = multi_apply( + self.loss_single, all_cls_scores, all_bbox_preds, + all_gt_bboxes_list, all_gt_labels_list, + all_gt_bboxes_ignore_list) + + loss_dict = dict() + # loss of proposal generated from encode feature map. + if enc_cls_scores is not None: + binary_labels_list = [ + torch.zeros_like(gt_labels_list[i]) + for i in range(len(all_gt_labels_list)) + ] + enc_loss_cls, enc_losses_bbox = \ + self.loss_single(enc_cls_scores, enc_bbox_preds, + gt_bboxes_list, binary_labels_list, gt_bboxes_ignore) + loss_dict['enc_loss_cls'] = enc_loss_cls + loss_dict['enc_loss_bbox'] = enc_losses_bbox + + # loss from the last decoder layer + loss_dict['loss_cls'] = losses_cls[-1] + loss_dict['loss_bbox'] = losses_bbox[-1] + + # loss from other decoder layers + num_dec_layer = 0 + for loss_cls_i, loss_bbox_i in zip(losses_cls[:-1], + losses_bbox[:-1]): + loss_dict[f'd{num_dec_layer}.loss_cls'] = loss_cls_i + loss_dict[f'd{num_dec_layer}.loss_bbox'] = loss_bbox_i + num_dec_layer += 1 + return loss_dict + + @force_fp32(apply_to=('preds_dicts')) + def get_bboxes(self, preds_dicts, img_metas, rescale=False): + """Generate bboxes from bbox head predictions. + Args: + preds_dicts (tuple[list[dict]]): Prediction results. + img_metas (list[dict]): Point cloud and image's meta info. + Returns: + list[dict]: Decoded bbox, scores and labels after nms. + """ + + preds_dicts = self.bbox_coder.decode(preds_dicts) + + num_samples = len(preds_dicts) + ret_list = [] + for i in range(num_samples): + preds = preds_dicts[i] + bboxes = preds['bboxes'] + + bboxes[:, 2] = bboxes[:, 2] - bboxes[:, 5] * 0.5 + + code_size = bboxes.shape[-1] + bboxes = img_metas[i]['box_type_3d'](bboxes, code_size) + scores = preds['scores'] + labels = preds['labels'] + bbox_index = preds['bbox_index'] + mask = preds['mask'] + + ret_list.append([bboxes, scores, labels, bbox_index, mask]) + + return ret_list diff --git a/mmcv/models/dense_heads/track_head_plugin/__init__.py b/mmcv/models/dense_heads/track_head_plugin/__init__.py new file mode 100644 index 0000000..f7933ab --- /dev/null +++ b/mmcv/models/dense_heads/track_head_plugin/__init__.py @@ -0,0 +1,3 @@ +from .modules import MemoryBank, QueryInteractionModule +from .track_instance import Instances +from .tracker import RuntimeTrackerBase \ No newline at end of file diff --git a/mmcv/models/dense_heads/track_head_plugin/modules.py b/mmcv/models/dense_heads/track_head_plugin/modules.py new file mode 100644 index 0000000..db80e6b --- /dev/null +++ b/mmcv/models/dense_heads/track_head_plugin/modules.py @@ -0,0 +1,254 @@ +import torch +import torch.nn.functional as F +from torch import nn +from .track_instance import Instances + +# MemoryBank +class MemoryBank(nn.Module): + + def __init__(self, + args, + dim_in, hidden_dim, dim_out, + ): + super().__init__() + self._build_layers(args, dim_in, hidden_dim, dim_out) + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + + def _build_layers(self, args, dim_in, hidden_dim, dim_out): + self.save_thresh = args['memory_bank_score_thresh'] + self.save_period = 3 + self.max_his_length = args['memory_bank_len'] + + self.save_proj = nn.Linear(dim_in, dim_in) + + self.temporal_attn = nn.MultiheadAttention(dim_in, 8, dropout=0) + self.temporal_fc1 = nn.Linear(dim_in, hidden_dim) + self.temporal_fc2 = nn.Linear(hidden_dim, dim_in) + self.temporal_norm1 = nn.LayerNorm(dim_in) + self.temporal_norm2 = nn.LayerNorm(dim_in) + + def update(self, track_instances): + embed = track_instances.output_embedding[:, None] #( N, 1, 256) + scores = track_instances.scores + mem_padding_mask = track_instances.mem_padding_mask + device = embed.device + + save_period = track_instances.save_period + if self.training: + saved_idxes = scores > 0 + else: + saved_idxes = (save_period == 0) & (scores > self.save_thresh) + # saved_idxes = (save_period == 0) + save_period[save_period > 0] -= 1 + save_period[saved_idxes] = self.save_period + + saved_embed = embed[saved_idxes] + if len(saved_embed) > 0: + prev_embed = track_instances.mem_bank[saved_idxes] + save_embed = self.save_proj(saved_embed) + mem_padding_mask[saved_idxes] = torch.cat([mem_padding_mask[saved_idxes, 1:], torch.zeros((len(saved_embed), 1), dtype=torch.bool, device=device)], dim=1) + track_instances.mem_bank = track_instances.mem_bank.clone() + track_instances.mem_bank[saved_idxes] = torch.cat([prev_embed[:, 1:], save_embed], dim=1) + + def _forward_temporal_attn(self, track_instances): + if len(track_instances) == 0: + return track_instances + + key_padding_mask = track_instances.mem_padding_mask # [n_, memory_bank_len] + + valid_idxes = key_padding_mask[:, -1] == 0 + embed = track_instances.output_embedding[valid_idxes] # (n, 256) + + if len(embed) > 0: + prev_embed = track_instances.mem_bank[valid_idxes] + key_padding_mask = key_padding_mask[valid_idxes] + embed2 = self.temporal_attn( + embed[None], # (num_track, dim) to (1, num_track, dim) + prev_embed.transpose(0, 1), # (num_track, mem_len, dim) to (mem_len, num_track, dim) + prev_embed.transpose(0, 1), + key_padding_mask=key_padding_mask, + )[0][0] + + embed = self.temporal_norm1(embed + embed2) + embed2 = self.temporal_fc2(F.relu(self.temporal_fc1(embed))) + embed = self.temporal_norm2(embed + embed2) + track_instances.output_embedding = track_instances.output_embedding.clone() + track_instances.output_embedding[valid_idxes] = embed + + return track_instances + + def forward_temporal_attn(self, track_instances): + return self._forward_temporal_attn(track_instances) + + def forward(self, track_instances: Instances, update_bank=True) -> Instances: + track_instances = self._forward_temporal_attn(track_instances) + if update_bank: + self.update(track_instances) + return track_instances + + +# QIM +class QueryInteractionBase(nn.Module): + + def __init__(self, args, dim_in, hidden_dim, dim_out): + super().__init__() + self.args = args + self._build_layers(args, dim_in, hidden_dim, dim_out) + self._reset_parameters() + + def _build_layers(self, args, dim_in, hidden_dim, dim_out): + raise NotImplementedError() + + def _reset_parameters(self): + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + + def _select_active_tracks(self, data: dict) -> Instances: + raise NotImplementedError() + + def _update_track_embedding(self, track_instances): + raise NotImplementedError() + +class QueryInteractionModule(QueryInteractionBase): + + def __init__(self, args, dim_in, hidden_dim, dim_out): + super().__init__(args, dim_in, hidden_dim, dim_out) + self.random_drop = args["random_drop"] + self.fp_ratio = args["fp_ratio"] + self.update_query_pos = args["update_query_pos"] + + def _build_layers(self, args, dim_in, hidden_dim, dim_out): + dropout = args["merger_dropout"] + + self.self_attn = nn.MultiheadAttention(dim_in, 8, dropout) + self.linear1 = nn.Linear(dim_in, hidden_dim) + self.dropout = nn.Dropout(dropout) + self.linear2 = nn.Linear(hidden_dim, dim_in) + + if args["update_query_pos"]: + self.linear_pos1 = nn.Linear(dim_in, hidden_dim) + self.linear_pos2 = nn.Linear(hidden_dim, dim_in) + self.dropout_pos1 = nn.Dropout(dropout) + self.dropout_pos2 = nn.Dropout(dropout) + self.norm_pos = nn.LayerNorm(dim_in) + + self.linear_feat1 = nn.Linear(dim_in, hidden_dim) + self.linear_feat2 = nn.Linear(hidden_dim, dim_in) + self.dropout_feat1 = nn.Dropout(dropout) + self.dropout_feat2 = nn.Dropout(dropout) + self.norm_feat = nn.LayerNorm(dim_in) + + self.norm1 = nn.LayerNorm(dim_in) + self.norm2 = nn.LayerNorm(dim_in) + + self.dropout1 = nn.Dropout(dropout) + self.dropout2 = nn.Dropout(dropout) + self.activation = F.relu + + def _update_track_embedding(self, track_instances: Instances) -> Instances: + if len(track_instances) == 0: + return track_instances + dim = track_instances.query.shape[1] + out_embed = track_instances.output_embedding + query_pos = track_instances.query[:, :dim // 2] + query_feat = track_instances.query[:, dim // 2:] + q = k = query_pos + out_embed + + # attention + tgt = out_embed + tgt2 = self.self_attn(q[:, None], k[:, None], value=tgt[:, None])[0][:, + 0] + tgt = tgt + self.dropout1(tgt2) + tgt = self.norm1(tgt) + + # ffn + tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt)))) + tgt = tgt + self.dropout2(tgt2) + tgt = self.norm2(tgt) + + if self.update_query_pos: + # ffn: linear_pos2 + query_pos2 = self.linear_pos2( + self.dropout_pos1(self.activation(self.linear_pos1(tgt)))) + query_pos = query_pos + self.dropout_pos2(query_pos2) + query_pos = self.norm_pos(query_pos) + track_instances.query[:, :dim // 2] = query_pos + + query_feat2 = self.linear_feat2( + self.dropout_feat1(self.activation(self.linear_feat1(tgt)))) + query_feat = query_feat + self.dropout_feat2(query_feat2) + query_feat = self.norm_feat(query_feat) + track_instances.query[:, dim // 2:] = query_feat + # track_instances.ref_pts = inverse_sigmoid(track_instances.pred_boxes[:, :2].detach().clone()) + # update ref_pts using track_instances.pred_boxes + return track_instances + + def _random_drop_tracks(self, track_instances: Instances) -> Instances: + drop_probability = self.random_drop + if drop_probability > 0 and len(track_instances) > 0: + keep_idxes = torch.rand_like(track_instances.scores) > drop_probability + track_instances = track_instances[keep_idxes] + return track_instances + + def _add_fp_tracks(self, track_instances: Instances, + active_track_instances: Instances) -> Instances: + """ + self.fp_ratio is used to control num(add_fp) / num(active) + """ + inactive_instances = track_instances[track_instances.obj_idxes < 0] + + # add fp for each active track in a specific probability. + fp_prob = torch.ones_like( + active_track_instances.scores) * self.fp_ratio + selected_active_track_instances = active_track_instances[ + torch.bernoulli(fp_prob).bool()] + num_fp = len(selected_active_track_instances) + + if len(inactive_instances) > 0 and num_fp > 0: + if num_fp >= len(inactive_instances): + fp_track_instances = inactive_instances + else: + # randomly select num_fp from inactive_instances + # fp_indexes = np.random.permutation(len(inactive_instances)) + # fp_indexes = fp_indexes[:num_fp] + # fp_track_instances = inactive_instances[fp_indexes] + + # v2: select the fps with top scores rather than random selection + fp_indexes = torch.argsort(inactive_instances.scores)[-num_fp:] + fp_track_instances = inactive_instances[fp_indexes] + + merged_track_instances = Instances.cat( + [active_track_instances, fp_track_instances]) + return merged_track_instances + + return active_track_instances + + def _select_active_tracks(self, data: dict) -> Instances: + track_instances: Instances = data["track_instances"] + if self.training: + active_idxes = (track_instances.obj_idxes >= + 0) & (track_instances.iou > 0.5) + active_track_instances = track_instances[active_idxes] + # set -2 instead of -1 to ensure that these tracks will not be selected in matching. + active_track_instances = self._random_drop_tracks( + active_track_instances) + if self.fp_ratio > 0: + active_track_instances = self._add_fp_tracks( + track_instances, active_track_instances) + else: + active_track_instances = track_instances[ + track_instances.obj_idxes >= 0] + + return active_track_instances + + def forward(self, data) -> Instances: + active_track_instances = self._select_active_tracks(data) + active_track_instances = self._update_track_embedding( + active_track_instances) + init_track_instances: Instances = data["init_track_instances"] + merged_track_instances = Instances.cat( + [init_track_instances, active_track_instances]) + return merged_track_instances diff --git a/mmcv/models/dense_heads/track_head_plugin/track_instance.py b/mmcv/models/dense_heads/track_head_plugin/track_instance.py new file mode 100644 index 0000000..bfc7864 --- /dev/null +++ b/mmcv/models/dense_heads/track_head_plugin/track_instance.py @@ -0,0 +1,198 @@ +import itertools +from typing import Any, Dict, List, Tuple, Union +import torch + + +class Instances: + """ + This class represents a list of instances in an image. + It stores the attributes of instances (e.g., boxes, masks, labels, scores) as "fields". + All fields must have the same ``__len__`` which is the number of instances. + All other (non-field) attributes of this class are considered private: + they must start with '_' and are not modifiable by a user. + Some basic usage: + 1. Set/get/check a field: + .. code-block:: python + instances.gt_boxes = Boxes(...) + print(instances.pred_masks) # a tensor of shape (N, H, W) + print('gt_masks' in instances) + 2. ``len(instances)`` returns the number of instances + 3. Indexing: ``instances[indices]`` will apply the indexing on all the fields + and returns a new :class:`Instances`. + Typically, ``indices`` is a integer vector of indices, + or a binary mask of length ``num_instances`` + .. code-block:: python + category_3_detections = instances[instances.pred_classes == 3] + confident_detections = instances[instances.scores > 0.9] + """ + + def __init__(self, image_size: Tuple[int, int], **kwargs: Any): + """ + Args: + image_size (height, width): the spatial size of the image. + kwargs: fields to add to this `Instances`. + """ + self._image_size = image_size + self._fields: Dict[str, Any] = {} + for k, v in kwargs.items(): + self.set(k, v) + + @property + def image_size(self) -> Tuple[int, int]: + """ + Returns: + tuple: height, width + """ + return self._image_size + + def __setattr__(self, name: str, val: Any) -> None: + if name.startswith("_"): + super().__setattr__(name, val) + else: + self.set(name, val) + + def __getattr__(self, name: str) -> Any: + if name == "_fields" or name not in self._fields: + raise AttributeError("Cannot find field '{}' in the given Instances!".format(name)) + return self._fields[name] + + def set(self, name: str, value: Any) -> None: + """ + Set the field named `name` to `value`. + The length of `value` must be the number of instances, + and must agree with other existing fields in this object. + """ + data_len = len(value) + if len(self._fields): + assert ( + len(self) == data_len + ), "Adding a field of length {} to a Instances of length {}".format(data_len, len(self)) + self._fields[name] = value + + def has(self, name: str) -> bool: + """ + Returns: + bool: whether the field called `name` exists. + """ + return name in self._fields + + def remove(self, name: str) -> None: + """ + Remove the field called `name`. + """ + del self._fields[name] + + def get(self, name: str) -> Any: + """ + Returns the field called `name`. + """ + return self._fields[name] + + def get_fields(self) -> Dict[str, Any]: + """ + Returns: + dict: a dict which maps names (str) to data of the fields + Modifying the returned dict will modify this instance. + """ + return self._fields + + # Tensor-like methods + def to(self, *args: Any, **kwargs: Any) -> "Instances": + """ + Returns: + Instances: all fields are called with a `to(device)`, if the field has this method. + """ + ret = Instances(self._image_size) + for k, v in self._fields.items(): + if hasattr(v, "to"): + v = v.to(*args, **kwargs) + ret.set(k, v) + return ret + + def numpy(self): + ret = Instances(self._image_size) + for k, v in self._fields.items(): + if hasattr(v, "numpy"): + v = v.numpy() + ret.set(k, v) + return ret + + def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "Instances": + """ + Args: + item: an index-like object and will be used to index all the fields. + Returns: + If `item` is a string, return the data in the corresponding field. + Otherwise, returns an `Instances` where all fields are indexed by `item`. + """ + if type(item) == int: + if item >= len(self) or item < -len(self): + raise IndexError("Instances index out of range!") + else: + item = slice(item, None, len(self)) + + ret = Instances(self._image_size) + for k, v in self._fields.items(): + # print(k, type(item), 'getitem', item.type(), item.dtype) + # if index by torch.BoolTensor + if k == 'kalman_models' and isinstance(item, torch.Tensor): + # print(item.shape, 'in get item') + ret_list = [] + for i, if_true in enumerate(item): + if if_true: + ret_list.append(self.kalman_models[i]) + ret.set(k, ret_list) + + else: + ret.set(k, v[item]) + return ret + + def __len__(self) -> int: + for v in self._fields.values(): + # use __len__ because len() has to be int and is not friendly to tracing + return v.__len__() + raise NotImplementedError("Empty Instances does not support __len__!") + + def __iter__(self): + raise NotImplementedError("`Instances` object is not iterable!") + + @staticmethod + def cat(instance_lists: List["Instances"]) -> "Instances": + """ + Args: + instance_lists (list[Instances]) + Returns: + Instances + """ + assert all(isinstance(i, Instances) for i in instance_lists) + assert len(instance_lists) > 0 + if len(instance_lists) == 1: + return instance_lists[0] + + image_size = instance_lists[0].image_size + for i in instance_lists[1:]: + assert i.image_size == image_size + ret = Instances(image_size) + for k in instance_lists[0]._fields.keys(): + values = [i.get(k) for i in instance_lists] + v0 = values[0] + if isinstance(v0, torch.Tensor): + values = torch.cat(values, dim=0) + elif isinstance(v0, list): + values = list(itertools.chain(*values)) + elif hasattr(type(v0), "cat"): + values = type(v0).cat(values) + else: + raise ValueError("Unsupported type {} for concatenation".format(type(v0))) + ret.set(k, values) + return ret + + def __str__(self) -> str: + s = self.__class__.__name__ + "(" + s += "num_instances={}, ".format(len(self)) + s += "image_height={}, ".format(self._image_size[0]) + s += "image_width={}, ".format(self._image_size[1]) + s += "fields=[{}])".format(", ".join((f"{k}: {v}" for k, v in self._fields.items()))) + return s + + __repr__ = __str__ \ No newline at end of file diff --git a/mmcv/models/dense_heads/track_head_plugin/tracker.py b/mmcv/models/dense_heads/track_head_plugin/tracker.py new file mode 100644 index 0000000..1355436 --- /dev/null +++ b/mmcv/models/dense_heads/track_head_plugin/tracker.py @@ -0,0 +1,42 @@ +from .track_instance import Instances +from mmcv.core.bbox.iou_calculators.iou3d_calculator import ( + bbox_overlaps_nearest_3d as iou_3d, ) +from mmcv.core.bbox.util import denormalize_bbox + +class RuntimeTrackerBase(object): + def __init__(self, score_thresh=0.5, filter_score_thresh=0.4, miss_tolerance=5): + self.score_thresh = score_thresh + self.filter_score_thresh = filter_score_thresh + self.miss_tolerance = miss_tolerance + self.max_obj_id = 0 + + def clear(self): + self.max_obj_id = 0 + + def update(self, track_instances: Instances, iou_thre=None): + track_instances.disappear_time[track_instances.scores >= self.score_thresh] = 0 + for i in range(len(track_instances)): + if ( + track_instances.obj_idxes[i] == -1 + and track_instances.scores[i] >= self.score_thresh + ): + if iou_thre is not None and track_instances.pred_boxes[track_instances.obj_idxes>=0].shape[0]!=0: + iou3ds = iou_3d(denormalize_bbox(track_instances.pred_boxes[i].unsqueeze(0), None)[...,:7], denormalize_bbox(track_instances.pred_boxes[track_instances.obj_idxes>=0], None)[...,:7]) + if iou3ds.max()>iou_thre: + continue + # new track + # print("track {} has score {}, assign obj_id {}".format(i, track_instances.scores[i], self.max_obj_id)) + track_instances.obj_idxes[i] = self.max_obj_id + self.max_obj_id += 1 + elif ( + track_instances.obj_idxes[i] >= 0 + and track_instances.scores[i] < self.filter_score_thresh + ): + # sleep time ++ + track_instances.disappear_time[i] += 1 + if track_instances.disappear_time[i] >= self.miss_tolerance: + # mark deaded tracklets: Set the obj_id to -1. + # TODO: remove it by following functions + # Then this track will be removed by TrackEmbeddingLayer. + track_instances.obj_idxes[i] = -1 + \ No newline at end of file diff --git a/mmcv/models/dense_heads/train_mixins.py b/mmcv/models/dense_heads/train_mixins.py new file mode 100644 index 0000000..3d387f1 --- /dev/null +++ b/mmcv/models/dense_heads/train_mixins.py @@ -0,0 +1,347 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch + +from mmcv.core import limit_period +from mmcv.core import images_to_levels, multi_apply + + +class AnchorTrainMixin(object): + """Mixin class for target assigning of dense heads.""" + + def anchor_target_3d(self, + anchor_list, + gt_bboxes_list, + input_metas, + gt_bboxes_ignore_list=None, + gt_labels_list=None, + label_channels=1, + num_classes=1, + sampling=True): + """Compute regression and classification targets for anchors. + + Args: + anchor_list (list[list]): Multi level anchors of each image. + gt_bboxes_list (list[:obj:`BaseInstance3DBoxes`]): Ground truth + bboxes of each image. + input_metas (list[dict]): Meta info of each image. + gt_bboxes_ignore_list (None | list): Ignore list of gt bboxes. + gt_labels_list (list[torch.Tensor]): Gt labels of batches. + label_channels (int): The channel of labels. + num_classes (int): The number of classes. + sampling (bool): Whether to sample anchors. + + Returns: + tuple (list, list, list, list, list, list, int, int): + Anchor targets, including labels, label weights, + bbox targets, bbox weights, direction targets, + direction weights, number of postive anchors and + number of negative anchors. + """ + num_imgs = len(input_metas) + assert len(anchor_list) == num_imgs + + if isinstance(anchor_list[0][0], list): + # sizes of anchors are different + # anchor number of a single level + num_level_anchors = [ + sum([anchor.size(0) for anchor in anchors]) + for anchors in anchor_list[0] + ] + for i in range(num_imgs): + anchor_list[i] = anchor_list[i][0] + else: + # anchor number of multi levels + num_level_anchors = [ + anchors.view(-1, self.box_code_size).size(0) + for anchors in anchor_list[0] + ] + # concat all level anchors and flags to a single tensor + for i in range(num_imgs): + anchor_list[i] = torch.cat(anchor_list[i]) + + # compute targets for each image + if gt_bboxes_ignore_list is None: + gt_bboxes_ignore_list = [None for _ in range(num_imgs)] + if gt_labels_list is None: + gt_labels_list = [None for _ in range(num_imgs)] + + (all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, + all_dir_targets, all_dir_weights, pos_inds_list, + neg_inds_list) = multi_apply( + self.anchor_target_3d_single, + anchor_list, + gt_bboxes_list, + gt_bboxes_ignore_list, + gt_labels_list, + input_metas, + label_channels=label_channels, + num_classes=num_classes, + sampling=sampling) + + # no valid anchors + if any([labels is None for labels in all_labels]): + return None + # sampled anchors of all images + num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) + num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) + # split targets to a list w.r.t. multiple levels + labels_list = images_to_levels(all_labels, num_level_anchors) + label_weights_list = images_to_levels(all_label_weights, + num_level_anchors) + bbox_targets_list = images_to_levels(all_bbox_targets, + num_level_anchors) + bbox_weights_list = images_to_levels(all_bbox_weights, + num_level_anchors) + dir_targets_list = images_to_levels(all_dir_targets, num_level_anchors) + dir_weights_list = images_to_levels(all_dir_weights, num_level_anchors) + return (labels_list, label_weights_list, bbox_targets_list, + bbox_weights_list, dir_targets_list, dir_weights_list, + num_total_pos, num_total_neg) + + def anchor_target_3d_single(self, + anchors, + gt_bboxes, + gt_bboxes_ignore, + gt_labels, + input_meta, + label_channels=1, + num_classes=1, + sampling=True): + """Compute targets of anchors in single batch. + + Args: + anchors (torch.Tensor): Concatenated multi-level anchor. + gt_bboxes (:obj:`BaseInstance3DBoxes`): Gt bboxes. + gt_bboxes_ignore (torch.Tensor): Ignored gt bboxes. + gt_labels (torch.Tensor): Gt class labels. + input_meta (dict): Meta info of each image. + label_channels (int): The channel of labels. + num_classes (int): The number of classes. + sampling (bool): Whether to sample anchors. + + Returns: + tuple[torch.Tensor]: Anchor targets. + """ + if isinstance(self.bbox_assigner, + list) and (not isinstance(anchors, list)): + feat_size = anchors.size(0) * anchors.size(1) * anchors.size(2) + rot_angles = anchors.size(-2) + assert len(self.bbox_assigner) == anchors.size(-3) + (total_labels, total_label_weights, total_bbox_targets, + total_bbox_weights, total_dir_targets, total_dir_weights, + total_pos_inds, total_neg_inds) = [], [], [], [], [], [], [], [] + current_anchor_num = 0 + for i, assigner in enumerate(self.bbox_assigner): + current_anchors = anchors[..., i, :, :].reshape( + -1, self.box_code_size) + current_anchor_num += current_anchors.size(0) + if self.assign_per_class: + gt_per_cls = (gt_labels == i) + anchor_targets = self.anchor_target_single_assigner( + assigner, current_anchors, gt_bboxes[gt_per_cls, :], + gt_bboxes_ignore, gt_labels[gt_per_cls], input_meta, + num_classes, sampling) + else: + anchor_targets = self.anchor_target_single_assigner( + assigner, current_anchors, gt_bboxes, gt_bboxes_ignore, + gt_labels, input_meta, num_classes, sampling) + + (labels, label_weights, bbox_targets, bbox_weights, + dir_targets, dir_weights, pos_inds, neg_inds) = anchor_targets + total_labels.append(labels.reshape(feat_size, 1, rot_angles)) + total_label_weights.append( + label_weights.reshape(feat_size, 1, rot_angles)) + total_bbox_targets.append( + bbox_targets.reshape(feat_size, 1, rot_angles, + anchors.size(-1))) + total_bbox_weights.append( + bbox_weights.reshape(feat_size, 1, rot_angles, + anchors.size(-1))) + total_dir_targets.append( + dir_targets.reshape(feat_size, 1, rot_angles)) + total_dir_weights.append( + dir_weights.reshape(feat_size, 1, rot_angles)) + total_pos_inds.append(pos_inds) + total_neg_inds.append(neg_inds) + + total_labels = torch.cat(total_labels, dim=-2).reshape(-1) + total_label_weights = torch.cat( + total_label_weights, dim=-2).reshape(-1) + total_bbox_targets = torch.cat( + total_bbox_targets, dim=-3).reshape(-1, anchors.size(-1)) + total_bbox_weights = torch.cat( + total_bbox_weights, dim=-3).reshape(-1, anchors.size(-1)) + total_dir_targets = torch.cat( + total_dir_targets, dim=-2).reshape(-1) + total_dir_weights = torch.cat( + total_dir_weights, dim=-2).reshape(-1) + total_pos_inds = torch.cat(total_pos_inds, dim=0).reshape(-1) + total_neg_inds = torch.cat(total_neg_inds, dim=0).reshape(-1) + return (total_labels, total_label_weights, total_bbox_targets, + total_bbox_weights, total_dir_targets, total_dir_weights, + total_pos_inds, total_neg_inds) + elif isinstance(self.bbox_assigner, list) and isinstance( + anchors, list): + # class-aware anchors with different feature map sizes + assert len(self.bbox_assigner) == len(anchors), \ + 'The number of bbox assigners and anchors should be the same.' + (total_labels, total_label_weights, total_bbox_targets, + total_bbox_weights, total_dir_targets, total_dir_weights, + total_pos_inds, total_neg_inds) = [], [], [], [], [], [], [], [] + current_anchor_num = 0 + for i, assigner in enumerate(self.bbox_assigner): + current_anchors = anchors[i] + current_anchor_num += current_anchors.size(0) + if self.assign_per_class: + gt_per_cls = (gt_labels == i) + anchor_targets = self.anchor_target_single_assigner( + assigner, current_anchors, gt_bboxes[gt_per_cls, :], + gt_bboxes_ignore, gt_labels[gt_per_cls], input_meta, + num_classes, sampling) + else: + anchor_targets = self.anchor_target_single_assigner( + assigner, current_anchors, gt_bboxes, gt_bboxes_ignore, + gt_labels, input_meta, num_classes, sampling) + + (labels, label_weights, bbox_targets, bbox_weights, + dir_targets, dir_weights, pos_inds, neg_inds) = anchor_targets + total_labels.append(labels) + total_label_weights.append(label_weights) + total_bbox_targets.append( + bbox_targets.reshape(-1, anchors[i].size(-1))) + total_bbox_weights.append( + bbox_weights.reshape(-1, anchors[i].size(-1))) + total_dir_targets.append(dir_targets) + total_dir_weights.append(dir_weights) + total_pos_inds.append(pos_inds) + total_neg_inds.append(neg_inds) + + total_labels = torch.cat(total_labels, dim=0) + total_label_weights = torch.cat(total_label_weights, dim=0) + total_bbox_targets = torch.cat(total_bbox_targets, dim=0) + total_bbox_weights = torch.cat(total_bbox_weights, dim=0) + total_dir_targets = torch.cat(total_dir_targets, dim=0) + total_dir_weights = torch.cat(total_dir_weights, dim=0) + total_pos_inds = torch.cat(total_pos_inds, dim=0) + total_neg_inds = torch.cat(total_neg_inds, dim=0) + return (total_labels, total_label_weights, total_bbox_targets, + total_bbox_weights, total_dir_targets, total_dir_weights, + total_pos_inds, total_neg_inds) + else: + return self.anchor_target_single_assigner(self.bbox_assigner, + anchors, gt_bboxes, + gt_bboxes_ignore, + gt_labels, input_meta, + num_classes, sampling) + + def anchor_target_single_assigner(self, + bbox_assigner, + anchors, + gt_bboxes, + gt_bboxes_ignore, + gt_labels, + input_meta, + num_classes=1, + sampling=True): + """Assign anchors and encode positive anchors. + + Args: + bbox_assigner (BaseAssigner): assign positive and negative boxes. + anchors (torch.Tensor): Concatenated multi-level anchor. + gt_bboxes (:obj:`BaseInstance3DBoxes`): Gt bboxes. + gt_bboxes_ignore (torch.Tensor): Ignored gt bboxes. + gt_labels (torch.Tensor): Gt class labels. + input_meta (dict): Meta info of each image. + num_classes (int): The number of classes. + sampling (bool): Whether to sample anchors. + + Returns: + tuple[torch.Tensor]: Anchor targets. + """ + anchors = anchors.reshape(-1, anchors.size(-1)) + num_valid_anchors = anchors.shape[0] + bbox_targets = torch.zeros_like(anchors) + bbox_weights = torch.zeros_like(anchors) + dir_targets = anchors.new_zeros((anchors.shape[0]), dtype=torch.long) + dir_weights = anchors.new_zeros((anchors.shape[0]), dtype=torch.float) + labels = anchors.new_zeros(num_valid_anchors, dtype=torch.long) + label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) + if len(gt_bboxes) > 0: + if not isinstance(gt_bboxes, torch.Tensor): + gt_bboxes = gt_bboxes.tensor.to(anchors.device) + assign_result = bbox_assigner.assign(anchors, gt_bboxes, + gt_bboxes_ignore, gt_labels) + sampling_result = self.bbox_sampler.sample(assign_result, anchors, + gt_bboxes) + pos_inds = sampling_result.pos_inds + neg_inds = sampling_result.neg_inds + else: + pos_inds = torch.nonzero( + anchors.new_zeros((anchors.shape[0], ), dtype=torch.bool) > 0, + as_tuple=False).squeeze(-1).unique() + neg_inds = torch.nonzero( + anchors.new_zeros((anchors.shape[0], ), dtype=torch.bool) == 0, + as_tuple=False).squeeze(-1).unique() + + if gt_labels is not None: + labels += num_classes + if len(pos_inds) > 0: + pos_bbox_targets = self.bbox_coder.encode( + sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes) + pos_dir_targets = get_direction_target( + sampling_result.pos_bboxes, + pos_bbox_targets, + self.dir_offset, + one_hot=False) + bbox_targets[pos_inds, :] = pos_bbox_targets + bbox_weights[pos_inds, :] = 1.0 + dir_targets[pos_inds] = pos_dir_targets + dir_weights[pos_inds] = 1.0 + + if gt_labels is None: + labels[pos_inds] = 1 + else: + labels[pos_inds] = gt_labels[ + sampling_result.pos_assigned_gt_inds] + if self.train_cfg.pos_weight <= 0: + label_weights[pos_inds] = 1.0 + else: + label_weights[pos_inds] = self.train_cfg.pos_weight + + if len(neg_inds) > 0: + label_weights[neg_inds] = 1.0 + return (labels, label_weights, bbox_targets, bbox_weights, dir_targets, + dir_weights, pos_inds, neg_inds) + + +def get_direction_target(anchors, + reg_targets, + dir_offset=0, + num_bins=2, + one_hot=True): + """Encode direction to 0 ~ num_bins-1. + + Args: + anchors (torch.Tensor): Concatenated multi-level anchor. + reg_targets (torch.Tensor): Bbox regression targets. + dir_offset (int): Direction offset. + num_bins (int): Number of bins to divide 2*PI. + one_hot (bool): Whether to encode as one hot. + + Returns: + torch.Tensor: Encoded direction targets. + """ + rot_gt = reg_targets[..., 6] + anchors[..., 6] + offset_rot = limit_period(rot_gt - dir_offset, 0, 2 * np.pi) + dir_cls_targets = torch.floor(offset_rot / (2 * np.pi / num_bins)).long() + dir_cls_targets = torch.clamp(dir_cls_targets, min=0, max=num_bins - 1) + if one_hot: + dir_targets = torch.zeros( + *list(dir_cls_targets.shape), + num_bins, + dtype=anchors.dtype, + device=dir_cls_targets.device) + dir_targets.scatter_(dir_cls_targets.unsqueeze(dim=-1).long(), 1.0) + dir_cls_targets = dir_targets + return dir_cls_targets \ No newline at end of file diff --git a/mmcv/models/detectors/VAD.py b/mmcv/models/detectors/VAD.py new file mode 100644 index 0000000..34c6f5f --- /dev/null +++ b/mmcv/models/detectors/VAD.py @@ -0,0 +1,684 @@ +import time +import copy + +import torch +from mmcv.models import DETECTORS +from mmcv.core.bbox.transforms import bbox3d2result +from mmcv.utils import force_fp32, auto_fp16 +from scipy.optimize import linear_sum_assignment +from mmcv.models.detectors.mvx_two_stage import MVXTwoStageDetector + +from mmcv.models.utils.grid_mask import GridMask +from mmcv.models.dense_heads.planning_head_plugin.metric_stp3 import PlanningMetric + + +@DETECTORS.register_module() +class VAD(MVXTwoStageDetector): + """VAD model. + """ + def __init__(self, + use_grid_mask=False, + pts_voxel_layer=None, + pts_voxel_encoder=None, + pts_middle_encoder=None, + pts_fusion_layer=None, + img_backbone=None, + pts_backbone=None, + img_neck=None, + pts_neck=None, + pts_bbox_head=None, + img_roi_head=None, + img_rpn_head=None, + train_cfg=None, + test_cfg=None, + pretrained=None, + video_test_mode=False, + prev_frame_num=0, + fut_ts=6, + fut_mode=6 + ): + + super(VAD, + self).__init__(pts_voxel_layer, pts_voxel_encoder, + pts_middle_encoder, pts_fusion_layer, + img_backbone, pts_backbone, img_neck, pts_neck, + pts_bbox_head, img_roi_head, img_rpn_head, + train_cfg, test_cfg, pretrained) + self.grid_mask = GridMask( + True, True, rotate=1, offset=False, ratio=0.5, mode=1, prob=0.7) + self.use_grid_mask = use_grid_mask + self.fp16_enabled = False + self.fut_ts = fut_ts + self.fut_mode = fut_mode + self.valid_fut_ts = pts_bbox_head['valid_fut_ts'] + self.prev_frame_num = prev_frame_num + self.prev_frame_infos = [] + + # temporal + self.video_test_mode = video_test_mode + self.prev_frame_info = { + 'prev_bev': None, + 'scene_token': None, + 'prev_pos': 0, + 'prev_angle': 0, + } + + self.planning_metric = None + + def extract_img_feat(self, img, img_metas, len_queue=None): + """Extract features of images.""" + B = img.size(0) + if img is not None: + + # input_shape = img.shape[-2:] + # # update real input shape of each single img + # for img_meta in img_metas: + # img_meta.update(input_shape=input_shape) + + if img.dim() == 5 and img.size(0) == 1: + img.squeeze_() + elif img.dim() == 5 and img.size(0) > 1: + B, N, C, H, W = img.size() + img = img.reshape(B * N, C, H, W) + if self.use_grid_mask: + img = self.grid_mask(img) + + img_feats = self.img_backbone(img) + if isinstance(img_feats, dict): + img_feats = list(img_feats.values()) + else: + return None + if self.with_img_neck: + img_feats = self.img_neck(img_feats) + + img_feats_reshaped = [] + for img_feat in img_feats: + BN, C, H, W = img_feat.size() + if len_queue is not None: + img_feats_reshaped.append(img_feat.view(int(B/len_queue), len_queue, int(BN / B), C, H, W)) + else: + img_feats_reshaped.append(img_feat.view(B, int(BN / B), C, H, W)) + return img_feats_reshaped + + @auto_fp16(apply_to=('img'), out_fp32=True) + def extract_feat(self, img, img_metas=None, len_queue=None): + """Extract features from images and points.""" + + img_feats = self.extract_img_feat(img, img_metas, len_queue=len_queue) + + return img_feats + + def forward_pts_train(self, + pts_feats, + gt_bboxes_3d, + gt_labels_3d, + map_gt_bboxes_3d, + map_gt_labels_3d, + img_metas, + gt_bboxes_ignore=None, + map_gt_bboxes_ignore=None, + prev_bev=None, + ego_his_trajs=None, + ego_fut_trajs=None, + ego_fut_masks=None, + ego_fut_cmd=None, + ego_lcf_feat=None, + gt_attr_labels=None): + """Forward function' + Args: + pts_feats (list[torch.Tensor]): Features of point cloud branch + gt_bboxes_3d (list[:obj:`BaseInstance3DBoxes`]): Ground truth + boxes for each sample. + gt_labels_3d (list[torch.Tensor]): Ground truth labels for + boxes of each sampole + img_metas (list[dict]): Meta information of samples. + gt_bboxes_ignore (list[torch.Tensor], optional): Ground truth + boxes to be ignored. Defaults to None. + prev_bev (torch.Tensor, optional): BEV features of previous frame. + Returns: + dict: Losses of each branch. + """ + + outs = self.pts_bbox_head(pts_feats, img_metas, prev_bev, + ego_his_trajs=ego_his_trajs, ego_lcf_feat=ego_lcf_feat) + loss_inputs = [ + gt_bboxes_3d, gt_labels_3d, map_gt_bboxes_3d, map_gt_labels_3d, + outs, ego_fut_trajs, ego_fut_masks, ego_fut_cmd, gt_attr_labels + ] + losses = self.pts_bbox_head.loss(*loss_inputs, img_metas=img_metas) + return losses + + def forward_dummy(self, img): + dummy_metas = None + return self.forward_test(img=img, img_metas=[[dummy_metas]]) + + def forward(self, inputs,return_loss=True,rescale=False): + """Calls either forward_train or forward_test depending on whether + return_loss=True. + Note this setting will change the expected inputs. When + `return_loss=True`, img and img_metas are single-nested (i.e. + torch.Tensor and list[dict]), and when `resturn_loss=False`, img and + img_metas should be double nested (i.e. list[torch.Tensor], + list[list[dict]]), with the outer list indicating test time + augmentations. + """ + if return_loss: + losses = self.forward_train(**inputs) + loss, log_vars = self._parse_losses(losses) + outputs = dict( + loss=loss, log_vars=log_vars, num_samples=len(inputs['img_metas'])) + return outputs + else: + outputs = self.forward_test(**inputs,rescale=rescale) + return outputs + + def obtain_history_bev(self, imgs_queue, img_metas_list): + """Obtain history BEV features iteratively. To save GPU memory, gradients are not calculated. + """ + self.eval() + + with torch.no_grad(): + prev_bev = None + bs, len_queue, num_cams, C, H, W = imgs_queue.shape + imgs_queue = imgs_queue.reshape(bs*len_queue, num_cams, C, H, W) + img_feats_list = self.extract_feat(img=imgs_queue, len_queue=len_queue) + for i in range(len_queue): + img_metas = [each[i] for each in img_metas_list] + # img_feats = self.extract_feat(img=img, img_metas=img_metas) + img_feats = [each_scale[:, i] for each_scale in img_feats_list] + prev_bev = self.pts_bbox_head( + img_feats, img_metas, prev_bev, only_bev=True) + self.train() + return prev_bev + + # @auto_fp16(apply_to=('img', 'points')) + @force_fp32(apply_to=('img','points','prev_bev')) + def forward_train(self, + points=None, + img_metas=None, + gt_bboxes_3d=None, + gt_labels_3d=None, + map_gt_bboxes_3d=None, + map_gt_labels_3d=None, + gt_labels=None, + gt_bboxes=None, + img=None, + proposals=None, + gt_bboxes_ignore=None, + map_gt_bboxes_ignore=None, + img_depth=None, + img_mask=None, + ego_his_trajs=None, + ego_fut_trajs=None, + ego_fut_masks=None, + ego_fut_cmd=None, + ego_lcf_feat=None, + gt_attr_labels=None, + **kwargs + ): + """Forward training function. + Args: + points (list[torch.Tensor], optional): Points of each sample. + Defaults to None. + img_metas (list[dict], optional): Meta information of each sample. + Defaults to None. + gt_bboxes_3d (list[:obj:`BaseInstance3DBoxes`], optional): + Ground truth 3D boxes. Defaults to None. + gt_labels_3d (list[torch.Tensor], optional): Ground truth labels + of 3D boxes. Defaults to None. + gt_labels (list[torch.Tensor], optional): Ground truth labels + of 2D boxes in images. Defaults to None. + gt_bboxes (list[torch.Tensor], optional): Ground truth 2D boxes in + images. Defaults to None. + img (torch.Tensor optional): Images of each sample with shape + (N, C, H, W). Defaults to None. + proposals ([list[torch.Tensor], optional): Predicted proposals + used for training Fast RCNN. Defaults to None. + gt_bboxes_ignore (list[torch.Tensor], optional): Ground truth + 2D boxes in images to be ignored. Defaults to None. + Returns: + dict: Losses of different branches. + """ + + len_queue = img.size(1) + prev_img = img[:, :-1, ...] + img = img[:, -1, ...] + + prev_img_metas = copy.deepcopy(img_metas) + # prev_bev = self.obtain_history_bev(prev_img, prev_img_metas) + # import pdb;pdb.set_trace() + prev_bev = self.obtain_history_bev(prev_img, prev_img_metas) if len_queue > 1 else None + + img_metas = [each[len_queue-1] for each in img_metas] + img_feats = self.extract_feat(img=img, img_metas=img_metas) + losses = dict() + losses_pts = self.forward_pts_train(img_feats, gt_bboxes_3d, gt_labels_3d, + map_gt_bboxes_3d, map_gt_labels_3d, img_metas, + gt_bboxes_ignore, map_gt_bboxes_ignore, prev_bev, + ego_his_trajs=ego_his_trajs, ego_fut_trajs=ego_fut_trajs, + ego_fut_masks=ego_fut_masks, ego_fut_cmd=ego_fut_cmd, + ego_lcf_feat=ego_lcf_feat, gt_attr_labels=gt_attr_labels) + + losses.update(losses_pts) + return losses + + def forward_test( + self, + img_metas, + gt_bboxes_3d=None, + gt_labels_3d=None, + img=None, + ego_his_trajs=None, + ego_fut_trajs=None, + ego_fut_cmd=None, + ego_lcf_feat=None, + gt_attr_labels=None, + **kwargs + ): + for var, name in [(img_metas, 'img_metas')]: + if not isinstance(var, list): + raise TypeError('{} must be a list, but got {}'.format( + name, type(var))) + img = [img] if img is None else img + + if self.prev_frame_num > 0: + if len(self.prev_frame_infos) < self.prev_frame_num: + self.prev_frame_info = { + "prev_bev": None, + "scene_token": None, + "prev_pos": 0, + "prev_angle": 0, + } + else: + self.prev_frame_info = self.prev_frame_infos.pop(0) + + + + + if img_metas[0][0]['scene_token'] != self.prev_frame_info['scene_token']: + # the first sample of each scene is truncated + self.prev_frame_info['prev_bev'] = None + # update idx + self.prev_frame_info['scene_token'] = img_metas[0][0]['scene_token'] + + # do not use temporal information + if not self.video_test_mode: + self.prev_frame_info['prev_bev'] = None + + # Get the delta of ego position and angle between two timestamps. + tmp_pos = copy.deepcopy(img_metas[0][0]['can_bus'][:3]) + tmp_angle = copy.deepcopy(img_metas[0][0]['can_bus'][-1]) + if self.prev_frame_info['prev_bev'] is not None: + img_metas[0][0]['can_bus'][:3] -= self.prev_frame_info['prev_pos'] + img_metas[0][0]['can_bus'][-1] -= self.prev_frame_info['prev_angle'] + else: + img_metas[0][0]['can_bus'][-1] = 0 + img_metas[0][0]['can_bus'][:3] = 0 + + + if ego_his_trajs is not None: + ego_his_trajs=ego_his_trajs[0] + if ego_fut_trajs is not None: + ego_fut_trajs=ego_fut_trajs[0] + if ego_fut_cmd is not None: + ego_fut_cmd=ego_fut_cmd[0] + if ego_lcf_feat is not None: + ego_lcf_feat=ego_lcf_feat[0] + + new_prev_bev, bbox_results = self.simple_test( + img_metas=img_metas[0], + img=img[0], + prev_bev=self.prev_frame_info['prev_bev'], + gt_bboxes_3d=gt_bboxes_3d, + gt_labels_3d=gt_labels_3d, + ego_his_trajs=ego_his_trajs, + ego_fut_trajs=ego_fut_trajs, + ego_fut_cmd=ego_fut_cmd, + ego_lcf_feat=ego_lcf_feat, + gt_attr_labels=gt_attr_labels, + **kwargs + ) + # During inference, we save the BEV features and ego motion of each timestamp. + self.prev_frame_info['prev_pos'] = tmp_pos + self.prev_frame_info['prev_angle'] = tmp_angle + self.prev_frame_info['prev_bev'] = new_prev_bev + if self.prev_frame_num > 0: + self.prev_frame_infos.append(self.prev_frame_info) + + return bbox_results + + def simple_test( + self, + img_metas, + gt_bboxes_3d, + gt_labels_3d, + img=None, + prev_bev=None, + points=None, + fut_valid_flag=None, + rescale=False, + ego_his_trajs=None, + ego_fut_trajs=None, + ego_fut_cmd=None, + ego_lcf_feat=None, + gt_attr_labels=None, + **kwargs + ): + """Test function without augmentaiton.""" + img_feats = self.extract_feat(img=img, img_metas=img_metas) + bbox_list = [dict() for i in range(len(img_metas))] + new_prev_bev, bbox_pts, metric_dict = self.simple_test_pts( + img_feats, + img_metas, + gt_bboxes_3d, + gt_labels_3d, + prev_bev, + fut_valid_flag=fut_valid_flag, + rescale=rescale, + start=None, + ego_his_trajs=ego_his_trajs, + ego_fut_trajs=ego_fut_trajs, + ego_fut_cmd=ego_fut_cmd, + ego_lcf_feat=ego_lcf_feat, + gt_attr_labels=gt_attr_labels, + ) + for result_dict, pts_bbox in zip(bbox_list, bbox_pts): + result_dict['pts_bbox'] = pts_bbox + result_dict['metric_results'] = metric_dict + + return new_prev_bev, bbox_list + + def simple_test_pts( + self, + x, + img_metas, + gt_bboxes_3d, + gt_labels_3d, + prev_bev=None, + fut_valid_flag=None, + rescale=False, + start=None, + ego_his_trajs=None, + ego_fut_trajs=None, + ego_fut_cmd=None, + ego_lcf_feat=None, + gt_attr_labels=None, + ): + """Test function""" + mapped_class_names = [ + 'car', 'truck', 'construction_vehicle', 'bus', + 'trailer', 'barrier', 'motorcycle', 'bicycle', + 'pedestrian', 'traffic_cone' + ] + + outs = self.pts_bbox_head(x, img_metas, prev_bev=prev_bev, + ego_his_trajs=ego_his_trajs, ego_lcf_feat=ego_lcf_feat) + bbox_list = self.pts_bbox_head.get_bboxes(outs, img_metas, rescale=rescale) + + bbox_results = [] + for i, (bboxes, scores, labels, trajs, map_bboxes, \ + map_scores, map_labels, map_pts) in enumerate(bbox_list): + bbox_result = bbox3d2result(bboxes, scores, labels) + bbox_result['trajs_3d'] = trajs.cpu() + map_bbox_result = self.map_pred2result(map_bboxes, map_scores, map_labels, map_pts) + bbox_result.update(map_bbox_result) + bbox_result['ego_fut_preds'] = outs['ego_fut_preds'][i].cpu() + bbox_result['ego_fut_cmd'] = ego_fut_cmd.cpu() + bbox_results.append(bbox_result) + + metric_dict = None + + if gt_attr_labels is not None: + + + assert len(bbox_results) == 1, 'only support batch_size=1 now' + score_threshold = 0.6 + with torch.no_grad(): + c_bbox_results = copy.deepcopy(bbox_results) + + bbox_result = c_bbox_results[0] + gt_bbox = gt_bboxes_3d[0][0] + gt_label = gt_labels_3d[0][0].to('cpu') + gt_attr_label = gt_attr_labels[0][0].to('cpu') + fut_valid_flag = bool(fut_valid_flag[0][0]) + # filter pred bbox by score_threshold + mask = bbox_result['scores_3d'] > score_threshold + bbox_result['boxes_3d'] = bbox_result['boxes_3d'][mask] + bbox_result['scores_3d'] = bbox_result['scores_3d'][mask] + bbox_result['labels_3d'] = bbox_result['labels_3d'][mask] + bbox_result['trajs_3d'] = bbox_result['trajs_3d'][mask] + + matched_bbox_result = self.assign_pred_to_gt_vip3d( + bbox_result, gt_bbox, gt_label) + + metric_dict = self.compute_motion_metric_vip3d( + gt_bbox, gt_label, gt_attr_label, bbox_result, + matched_bbox_result, mapped_class_names) + + # ego planning metric + assert ego_fut_trajs.shape[0] == 1, 'only support batch_size=1 for testing' + ego_fut_preds = bbox_result['ego_fut_preds'] + ego_fut_trajs = ego_fut_trajs[0, 0] + ego_fut_cmd = ego_fut_cmd[0, 0, 0] + ego_fut_cmd_idx = torch.nonzero(ego_fut_cmd)[0, 0] + ego_fut_pred = ego_fut_preds[ego_fut_cmd_idx] + ego_fut_pred = ego_fut_pred.cumsum(dim=-2) + ego_fut_trajs = ego_fut_trajs.cumsum(dim=-2) + + metric_dict_planner_stp3 = self.compute_planner_metric_stp3( + pred_ego_fut_trajs = ego_fut_pred[None], + gt_ego_fut_trajs = ego_fut_trajs[None], + gt_agent_boxes = gt_bbox, + gt_agent_feats = gt_attr_label.unsqueeze(0), + fut_valid_flag = fut_valid_flag + ) + metric_dict.update(metric_dict_planner_stp3) + + return outs['bev_embed'], bbox_results, metric_dict + + def map_pred2result(self, bboxes, scores, labels, pts, attrs=None): + """Convert detection results to a list of numpy arrays. + + Args: + bboxes (torch.Tensor): Bounding boxes with shape of (n, 5). + labels (torch.Tensor): Labels with shape of (n, ). + scores (torch.Tensor): Scores with shape of (n, ). + attrs (torch.Tensor, optional): Attributes with shape of (n, ). \ + Defaults to None. + + Returns: + dict[str, torch.Tensor]: Bounding box results in cpu mode. + + - boxes_3d (torch.Tensor): 3D boxes. + - scores (torch.Tensor): Prediction scores. + - labels_3d (torch.Tensor): Box labels. + - attrs_3d (torch.Tensor, optional): Box attributes. + """ + result_dict = dict( + map_boxes_3d=bboxes.to('cpu'), + map_scores_3d=scores.cpu(), + map_labels_3d=labels.cpu(), + map_pts_3d=pts.to('cpu')) + + if attrs is not None: + result_dict['map_attrs_3d'] = attrs.cpu() + + return result_dict + + def assign_pred_to_gt_vip3d( + self, + bbox_result, + gt_bbox, + gt_label, + match_dis_thresh=2.0 + ): + """Assign pred boxs to gt boxs according to object center preds in lcf. + Args: + bbox_result (dict): Predictions. + 'boxes_3d': (LiDARInstance3DBoxes) + 'scores_3d': (Tensor), [num_pred_bbox] + 'labels_3d': (Tensor), [num_pred_bbox] + 'trajs_3d': (Tensor), [fut_ts*2] + gt_bboxs (LiDARInstance3DBoxes): GT Bboxs. + gt_label (Tensor): GT labels for gt_bbox, [num_gt_bbox]. + match_dis_thresh (float): dis thresh for determine a positive sample for a gt bbox. + + Returns: + matched_bbox_result (np.array): assigned pred index for each gt box [num_gt_bbox]. + """ + dynamic_list = [0,1,3,4,6,7,8] + matched_bbox_result = torch.ones( + (len(gt_bbox)), dtype=torch.long) * -1 # -1: not assigned + gt_centers = gt_bbox.center[:, :2] + pred_centers = bbox_result['boxes_3d'].center[:, :2] + dist = torch.linalg.norm(pred_centers[:, None, :] - gt_centers[None, :, :], dim=-1) + pred_not_dyn = [label not in dynamic_list for label in bbox_result['labels_3d']] + gt_not_dyn = [label not in dynamic_list for label in gt_label] + dist[pred_not_dyn] = 1e6 + dist[:, gt_not_dyn] = 1e6 + dist[dist > match_dis_thresh] = 1e6 + + r_list, c_list = linear_sum_assignment(dist) + + for i in range(len(r_list)): + if dist[r_list[i], c_list[i]] <= match_dis_thresh: + matched_bbox_result[c_list[i]] = r_list[i] + + return matched_bbox_result + + def compute_motion_metric_vip3d( + self, + gt_bbox, + gt_label, + gt_attr_label, + pred_bbox, + matched_bbox_result, + mapped_class_names, + match_dis_thresh=2.0, + ): + """Compute EPA metric for one sample. + Args: + gt_bboxs (LiDARInstance3DBoxes): GT Bboxs. + gt_label (Tensor): GT labels for gt_bbox, [num_gt_bbox]. + pred_bbox (dict): Predictions. + 'boxes_3d': (LiDARInstance3DBoxes) + 'scores_3d': (Tensor), [num_pred_bbox] + 'labels_3d': (Tensor), [num_pred_bbox] + 'trajs_3d': (Tensor), [fut_ts*2] + matched_bbox_result (np.array): assigned pred index for each gt box [num_gt_bbox]. + match_dis_thresh (float): dis thresh for determine a positive sample for a gt bbox. + + Returns: + EPA_dict (dict): EPA metric dict of each cared class. + """ + motion_cls_names = ['car', 'pedestrian'] + motion_metric_names = ['gt', 'cnt_ade', 'cnt_fde', 'hit', + 'fp', 'ADE', 'FDE', 'MR'] + + metric_dict = {} + for met in motion_metric_names: + for cls in motion_cls_names: + metric_dict[met+'_'+cls] = 0.0 + + veh_list = [0,1,3,4] + ignore_list = ['construction_vehicle', 'barrier', + 'traffic_cone', 'motorcycle', 'bicycle'] + + for i in range(pred_bbox['labels_3d'].shape[0]): + pred_bbox['labels_3d'][i] = 0 if pred_bbox['labels_3d'][i] in veh_list else pred_bbox['labels_3d'][i] + box_name = mapped_class_names[pred_bbox['labels_3d'][i]] + if box_name in ignore_list: + continue + if i not in matched_bbox_result: + metric_dict['fp_'+box_name] += 1 + + for i in range(gt_label.shape[0]): + gt_label[i] = 0 if gt_label[i] in veh_list else gt_label[i] + box_name = mapped_class_names[gt_label[i]] + if box_name in ignore_list: + continue + gt_fut_masks = gt_attr_label[i][self.fut_ts*2:self.fut_ts*3] + num_valid_ts = sum(gt_fut_masks==1) + if num_valid_ts == self.fut_ts: + metric_dict['gt_'+box_name] += 1 + if matched_bbox_result[i] >= 0 and num_valid_ts > 0: + metric_dict['cnt_ade_'+box_name] += 1 + m_pred_idx = matched_bbox_result[i] + gt_fut_trajs = gt_attr_label[i][:self.fut_ts*2].reshape(-1, 2) + gt_fut_trajs = gt_fut_trajs[:num_valid_ts] + pred_fut_trajs = pred_bbox['trajs_3d'][m_pred_idx].reshape(self.fut_mode, self.fut_ts, 2) + pred_fut_trajs = pred_fut_trajs[:, :num_valid_ts, :] + gt_fut_trajs = gt_fut_trajs.cumsum(dim=-2) + pred_fut_trajs = pred_fut_trajs.cumsum(dim=-2) + gt_fut_trajs = gt_fut_trajs + gt_bbox[i].center[0, :2] + pred_fut_trajs = pred_fut_trajs + pred_bbox['boxes_3d'][int(m_pred_idx)].center[0, :2] + + dist = torch.linalg.norm(gt_fut_trajs[None, :, :] - pred_fut_trajs, dim=-1) + ade = dist.sum(-1) / num_valid_ts + ade = ade.min() + + metric_dict['ADE_'+box_name] += ade + if num_valid_ts == self.fut_ts: + fde = dist[:, -1].min() + metric_dict['cnt_fde_'+box_name] += 1 + metric_dict['FDE_'+box_name] += fde + if fde <= match_dis_thresh: + metric_dict['hit_'+box_name] += 1 + else: + metric_dict['MR_'+box_name] += 1 + + return metric_dict + + ### same planning metric as stp3 + def compute_planner_metric_stp3( + self, + pred_ego_fut_trajs, + gt_ego_fut_trajs, + gt_agent_boxes, + gt_agent_feats, + fut_valid_flag + ): + """Compute planner metric for one sample same as stp3.""" + metric_dict = { + 'plan_L2_1s':0, + 'plan_L2_2s':0, + 'plan_L2_3s':0, + 'plan_obj_col_1s':0, + 'plan_obj_col_2s':0, + 'plan_obj_col_3s':0, + 'plan_obj_box_col_1s':0, + 'plan_obj_box_col_2s':0, + 'plan_obj_box_col_3s':0, + } + metric_dict['fut_valid_flag'] = fut_valid_flag + future_second = 3 + assert pred_ego_fut_trajs.shape[0] == 1, 'only support bs=1' + if self.planning_metric is None: + self.planning_metric = PlanningMetric() + segmentation, pedestrian = self.planning_metric.get_label( + gt_agent_boxes, gt_agent_feats) + occupancy = torch.logical_or(segmentation, pedestrian) + + for i in range(future_second): + if fut_valid_flag: + cur_time = (i+1)*2 + traj_L2 = self.planning_metric.compute_L2( + pred_ego_fut_trajs[0, :cur_time].detach().to(gt_ego_fut_trajs.device), + gt_ego_fut_trajs[0, :cur_time] + ) + obj_coll, obj_box_coll = self.planning_metric.evaluate_coll( + pred_ego_fut_trajs[:, :cur_time].detach(), + gt_ego_fut_trajs[:, :cur_time], + occupancy) + metric_dict['plan_L2_{}s'.format(i+1)] = traj_L2 + metric_dict['plan_obj_col_{}s'.format(i+1)] = obj_coll.mean().item() + metric_dict['plan_obj_box_col_{}s'.format(i+1)] = obj_box_coll.mean().item() + else: + metric_dict['plan_L2_{}s'.format(i+1)] = 0.0 + metric_dict['plan_obj_col_{}s'.format(i+1)] = 0.0 + metric_dict['plan_obj_box_col_{}s'.format(i+1)] = 0.0 + + return metric_dict + + def set_epoch(self, epoch): + self.pts_bbox_head.epoch = epoch \ No newline at end of file diff --git a/mmcv/models/detectors/__init__.py b/mmcv/models/detectors/__init__.py new file mode 100644 index 0000000..d3fb4b0 --- /dev/null +++ b/mmcv/models/detectors/__init__.py @@ -0,0 +1,5 @@ +from .base import BaseDetector, Base3DDetector +from .single_stage_mono3d import SingleStageMono3DDetector +from .uniad_e2e import UniAD +from .bevformer import BEVFormer +from .VAD import VAD \ No newline at end of file diff --git a/mmcv/models/detectors/base.py b/mmcv/models/detectors/base.py new file mode 100644 index 0000000..9856b10 --- /dev/null +++ b/mmcv/models/detectors/base.py @@ -0,0 +1,407 @@ +from abc import ABCMeta, abstractmethod +from collections import OrderedDict + +from os import path as osp +import numpy as np +import torch +import torch.distributed as dist +from mmcv.parallel import DataContainer as DC +from mmcv.models.backbones import BaseModule +from mmcv.utils import auto_fp16 + +from mmcv.core.bbox.structures.box_3d_mode import Box3DMode +from mmcv.core.bbox.structures.coord_3d_mode import Coord3DMode +from mmcv.core.visualizer import show_result +from mmcv.core.visualization import imshow_det_bboxes +from mmcv.utils import concat_list, is_list_of +from mmcv.image import imread + + +class BaseDetector(BaseModule, metaclass=ABCMeta): + """Base class for detectors.""" + + def __init__(self, init_cfg=None): + super(BaseDetector, self).__init__(init_cfg) + self.fp16_enabled = False + + @property + def with_neck(self): + """bool: whether the detector has a neck""" + return hasattr(self, 'neck') and self.neck is not None + + # TODO: these properties need to be carefully handled + # for both single stage & two stage detectors + @property + def with_shared_head(self): + """bool: whether the detector has a shared head in the RoI Head""" + return hasattr(self, 'roi_head') and self.roi_head.with_shared_head + + @property + def with_bbox(self): + """bool: whether the detector has a bbox head""" + return ((hasattr(self, 'roi_head') and self.roi_head.with_bbox) + or (hasattr(self, 'bbox_head') and self.bbox_head is not None)) + + @property + def with_mask(self): + """bool: whether the detector has a mask head""" + return ((hasattr(self, 'roi_head') and self.roi_head.with_mask) + or (hasattr(self, 'mask_head') and self.mask_head is not None)) + + @abstractmethod + def extract_feat(self, imgs): + """Extract features from images.""" + pass + + def extract_feats(self, imgs): + """Extract features from multiple images. + + Args: + imgs (list[torch.Tensor]): A list of images. The images are + augmented from the same image but in different ways. + + Returns: + list[torch.Tensor]: Features of different images + """ + assert isinstance(imgs, list) + return [self.extract_feat(img) for img in imgs] + + def forward_train(self, imgs, img_metas, **kwargs): + """ + Args: + img (list[Tensor]): List of tensors of shape (1, C, H, W). + Typically these should be mean centered and std scaled. + img_metas (list[dict]): List of image info dict where each dict + has: 'img_shape', 'scale_factor', 'flip', and may also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys, see + :class:`mmcv.datasets.pipelines.Collect`. + kwargs (keyword arguments): Specific to concrete implementation. + """ + # NOTE the batched image size information may be useful, e.g. + # in DETR, this is needed for the construction of masks, which is + # then used for the transformer_head. + batch_input_shape = tuple(imgs[0].size()[-2:]) + for img_meta in img_metas: + img_meta['batch_input_shape'] = batch_input_shape + + async def async_simple_test(self, img, img_metas, **kwargs): + raise NotImplementedError + + @abstractmethod + def simple_test(self, img, img_metas, **kwargs): + pass + + @abstractmethod + def aug_test(self, imgs, img_metas, **kwargs): + """Test function with test time augmentation.""" + pass + + async def aforward_test(self, *, img, img_metas, **kwargs): + for var, name in [(img, 'img'), (img_metas, 'img_metas')]: + if not isinstance(var, list): + raise TypeError(f'{name} must be a list, but got {type(var)}') + + num_augs = len(img) + if num_augs != len(img_metas): + raise ValueError(f'num of augmentations ({len(img)}) ' + f'!= num of image metas ({len(img_metas)})') + # TODO: remove the restriction of samples_per_gpu == 1 when prepared + samples_per_gpu = img[0].size(0) + assert samples_per_gpu == 1 + + if num_augs == 1: + return await self.async_simple_test(img[0], img_metas[0], **kwargs) + else: + raise NotImplementedError + + def forward_test(self, imgs, img_metas, **kwargs): + """ + Args: + imgs (List[Tensor]): the outer list indicates test-time + augmentations and inner Tensor should have a shape NxCxHxW, + which contains all images in the batch. + img_metas (List[List[dict]]): the outer list indicates test-time + augs (multiscale, flip, etc.) and the inner list indicates + images in a batch. + """ + for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]: + if not isinstance(var, list): + raise TypeError(f'{name} must be a list, but got {type(var)}') + + num_augs = len(imgs) + if num_augs != len(img_metas): + raise ValueError(f'num of augmentations ({len(imgs)}) ' + f'!= num of image meta ({len(img_metas)})') + + # NOTE the batched image size information may be useful, e.g. + # in DETR, this is needed for the construction of masks, which is + # then used for the transformer_head. + for img, img_meta in zip(imgs, img_metas): + batch_size = len(img_meta) + for img_id in range(batch_size): + img_meta[img_id]['batch_input_shape'] = tuple(img.size()[-2:]) + + if num_augs == 1: + # proposals (List[List[Tensor]]): the outer list indicates + # test-time augs (multiscale, flip, etc.) and the inner list + # indicates images in a batch. + # The Tensor should have a shape Px4, where P is the number of + # proposals. + if 'proposals' in kwargs: + kwargs['proposals'] = kwargs['proposals'][0] + return self.simple_test(imgs[0], img_metas[0], **kwargs) + else: + assert imgs[0].size(0) == 1, 'aug test does not support ' \ + 'inference with batch size ' \ + f'{imgs[0].size(0)}' + # TODO: support test augmentation for predefined proposals + assert 'proposals' not in kwargs + return self.aug_test(imgs, img_metas, **kwargs) + + @auto_fp16(apply_to=('img', )) + def forward(self, img, img_metas, return_loss=True, **kwargs): + """Calls either :func:`forward_train` or :func:`forward_test` depending + on whether ``return_loss`` is ``True``. + + Note this setting will change the expected inputs. When + ``return_loss=True``, img and img_meta are single-nested (i.e. Tensor + and List[dict]), and when ``resturn_loss=False``, img and img_meta + should be double nested (i.e. List[Tensor], List[List[dict]]), with + the outer list indicating test time augmentations. + """ + if torch.onnx.is_in_onnx_export(): + assert len(img_metas) == 1 + return self.onnx_export(img[0], img_metas[0]) + + if return_loss: + return self.forward_train(img, img_metas, **kwargs) + else: + return self.forward_test(img, img_metas, **kwargs) + + def _parse_losses(self, losses): + """Parse the raw outputs (losses) of the network. + + Args: + losses (dict): Raw output of the network, which usually contain + losses and other necessary infomation. + + Returns: + tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor \ + which may be a weighted sum of all losses, log_vars contains \ + all the variables to be sent to the logger. + """ + log_vars = OrderedDict() + for loss_name, loss_value in losses.items(): + if isinstance(loss_value, torch.Tensor): + log_vars[loss_name] = loss_value.mean() + elif isinstance(loss_value, list): + log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value) + else: + raise TypeError( + f'{loss_name} is not a tensor or list of tensors') + + loss = sum(_value for _key, _value in log_vars.items() + if 'loss' in _key) + + log_vars['loss'] = loss + for loss_name, loss_value in log_vars.items(): + # reduce loss when distributed training + if dist.is_available() and dist.is_initialized(): + loss_value = loss_value.data.clone() + dist.all_reduce(loss_value.div_(dist.get_world_size())) + log_vars[loss_name] = loss_value.item() + + return loss, log_vars + + + def show_result(self, + img, + result, + score_thr=0.3, + bbox_color=(72, 101, 241), + text_color=(72, 101, 241), + mask_color=None, + thickness=2, + font_size=13, + win_name='', + show=False, + wait_time=0, + out_file=None): + """Draw `result` over `img`. + + Args: + img (str or Tensor): The image to be displayed. + result (Tensor or tuple): The results to draw over `img` + bbox_result or (bbox_result, segm_result). + score_thr (float, optional): Minimum score of bboxes to be shown. + Default: 0.3. + bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines. + The tuple of color should be in BGR order. Default: 'green' + text_color (str or tuple(int) or :obj:`Color`):Color of texts. + The tuple of color should be in BGR order. Default: 'green' + mask_color (None or str or tuple(int) or :obj:`Color`): + Color of masks. The tuple of color should be in BGR order. + Default: None + thickness (int): Thickness of lines. Default: 2 + font_size (int): Font size of texts. Default: 13 + win_name (str): The window name. Default: '' + wait_time (float): Value of waitKey param. + Default: 0. + show (bool): Whether to show the image. + Default: False. + out_file (str or None): The filename to write the image. + Default: None. + + Returns: + img (Tensor): Only if not `show` or `out_file` + """ + img = imread(img) + img = img.copy() + if isinstance(result, tuple): + bbox_result, segm_result = result + if isinstance(segm_result, tuple): + segm_result = segm_result[0] # ms rcnn + else: + bbox_result, segm_result = result, None + bboxes = np.vstack(bbox_result) + labels = [ + np.full(bbox.shape[0], i, dtype=np.int32) + for i, bbox in enumerate(bbox_result) + ] + labels = np.concatenate(labels) + # draw segmentation masks + segms = None + if segm_result is not None and len(labels) > 0: # non empty + segms = concat_list(segm_result) + if isinstance(segms[0], torch.Tensor): + segms = torch.stack(segms, dim=0).detach().cpu().numpy() + else: + segms = np.stack(segms, axis=0) + # if out_file specified, do not show image in window + if out_file is not None: + show = False + # draw bounding boxes + img = imshow_det_bboxes( + img, + bboxes, + labels, + segms, + class_names=self.CLASSES, + score_thr=score_thr, + bbox_color=bbox_color, + text_color=text_color, + mask_color=mask_color, + thickness=thickness, + font_size=font_size, + win_name=win_name, + show=show, + wait_time=wait_time, + out_file=out_file) + + if not (show or out_file): + return img + + def onnx_export(self, img, img_metas): + raise NotImplementedError(f'{self.__class__.__name__} does ' + f'not support ONNX EXPORT') + + +class Base3DDetector(BaseDetector): + """Base class for detectors.""" + + def forward_test(self, points, img_metas, img=None, **kwargs): + """ + Args: + points (list[torch.Tensor]): the outer list indicates test-time + augmentations and inner torch.Tensor should have a shape NxC, + which contains all points in the batch. + img_metas (list[list[dict]]): the outer list indicates test-time + augs (multiscale, flip, etc.) and the inner list indicates + images in a batch + img (list[torch.Tensor], optional): the outer + list indicates test-time augmentations and inner + torch.Tensor should have a shape NxCxHxW, which contains + all images in the batch. Defaults to None. + """ + for var, name in [(points, 'points'), (img_metas, 'img_metas')]: + if not isinstance(var, list): + raise TypeError('{} must be a list, but got {}'.format( + name, type(var))) + + num_augs = len(points) + if num_augs != len(img_metas): + raise ValueError( + 'num of augmentations ({}) != num of image meta ({})'.format( + len(points), len(img_metas))) + + if num_augs == 1: + img = [img] if img is None else img + return self.simple_test(points[0], img_metas[0], img[0], **kwargs) + else: + return self.aug_test(points, img_metas, img, **kwargs) + + @auto_fp16(apply_to=('img', 'points')) + def forward(self, return_loss=True, **kwargs): + """Calls either forward_train or forward_test depending on whether + return_loss=True. + + Note this setting will change the expected inputs. When + `return_loss=True`, img and img_metas are single-nested (i.e. + torch.Tensor and list[dict]), and when `resturn_loss=False`, img and + img_metas should be double nested (i.e. list[torch.Tensor], + list[list[dict]]), with the outer list indicating test time + augmentations. + """ + if return_loss: + return self.forward_train(**kwargs) + else: + return self.forward_test(**kwargs) + + def show_results(self, data, result, out_dir): + """Results visualization. + + Args: + data (list[dict]): Input points and the information of the sample. + result (list[dict]): Prediction results. + out_dir (str): Output directory of visualization result. + """ + for batch_id in range(len(result)): + if isinstance(data['points'][0], DC): + points = data['points'][0]._data[0][batch_id].numpy() + elif is_list_of(data['points'][0], torch.Tensor): + points = data['points'][0][batch_id] + else: + ValueError(f"Unsupported data type {type(data['points'][0])} " + f'for visualization!') + if isinstance(data['img_metas'][0], DC): + pts_filename = data['img_metas'][0]._data[0][batch_id][ + 'pts_filename'] + box_mode_3d = data['img_metas'][0]._data[0][batch_id][ + 'box_mode_3d'] + elif is_list_of(data['img_metas'][0], dict): + pts_filename = data['img_metas'][0][batch_id]['pts_filename'] + box_mode_3d = data['img_metas'][0][batch_id]['box_mode_3d'] + else: + ValueError( + f"Unsupported data type {type(data['img_metas'][0])} " + f'for visualization!') + file_name = osp.split(pts_filename)[-1].split('.')[0] + + assert out_dir is not None, 'Expect out_dir, got none.' + + pred_bboxes = result[batch_id]['boxes_3d'] + + # for now we convert points and bbox into depth mode + if (box_mode_3d == Box3DMode.CAM) or (box_mode_3d + == Box3DMode.LIDAR): + points = Coord3DMode.convert_point(points, Coord3DMode.LIDAR, + Coord3DMode.DEPTH) + pred_bboxes = Box3DMode.convert(pred_bboxes, box_mode_3d, + Box3DMode.DEPTH) + elif box_mode_3d != Box3DMode.DEPTH: + ValueError( + f'Unsupported box_mode_3d {box_mode_3d} for convertion!') + pred_bboxes = pred_bboxes.tensor.cpu().numpy() + show_result(points, None, pred_bboxes, out_dir, file_name) + diff --git a/mmcv/models/detectors/bevformer.py b/mmcv/models/detectors/bevformer.py new file mode 100644 index 0000000..a51778c --- /dev/null +++ b/mmcv/models/detectors/bevformer.py @@ -0,0 +1,295 @@ +# --------------------------------------------- +# Copyright (c) OpenMMLab. All rights reserved. +# --------------------------------------------- +# Modified by Zhiqi Li +# --------------------------------------------- + +import torch +from mmcv.utils import force_fp32, auto_fp16 +from mmcv.models import DETECTORS +from mmcv.core import bbox3d2result +from mmcv.models.detectors.mvx_two_stage import MVXTwoStageDetector +from mmcv.models.utils.grid_mask import GridMask +import time +import copy +import numpy as np +from mmcv.utils.bricks import run_time + + +@DETECTORS.register_module() +class BEVFormer(MVXTwoStageDetector): + """BEVFormer. + Args: + video_test_mode (bool): Decide whether to use temporal information during inference. + """ + + def __init__(self, + use_grid_mask=False, + pts_voxel_layer=None, + pts_voxel_encoder=None, + pts_middle_encoder=None, + pts_fusion_layer=None, + img_backbone=None, + pts_backbone=None, + img_neck=None, + pts_neck=None, + pts_bbox_head=None, + img_roi_head=None, + img_rpn_head=None, + train_cfg=None, + test_cfg=None, + pretrained=None, + video_test_mode=False + ): + + super(BEVFormer, + self).__init__(pts_voxel_layer, pts_voxel_encoder, + pts_middle_encoder, pts_fusion_layer, + img_backbone, pts_backbone, img_neck, pts_neck, + pts_bbox_head, img_roi_head, img_rpn_head, + train_cfg, test_cfg, pretrained) + self.grid_mask = GridMask( + True, True, rotate=1, offset=False, ratio=0.5, mode=1, prob=0.7) + self.use_grid_mask = use_grid_mask + self.fp16_enabled = False + + # temporal + self.video_test_mode = video_test_mode + self.prev_frame_info = { + 'prev_bev': None, + 'scene_token': None, + 'prev_pos': 0, + 'prev_angle': 0, + } + + + def extract_img_feat(self, img, img_metas, len_queue=None): + """Extract features of images.""" + B = img.size(0) + if img is not None: + + # input_shape = img.shape[-2:] + # # update real input shape of each single img + # for img_meta in img_metas: + # img_meta.update(input_shape=input_shape) + + if img.dim() == 5 and img.size(0) == 1: + img.squeeze_() + elif img.dim() == 5 and img.size(0) > 1: + B, N, C, H, W = img.size() + img = img.reshape(B * N, C, H, W) + if self.use_grid_mask: + img = self.grid_mask(img) + + img_feats = self.img_backbone(img) + if isinstance(img_feats, dict): + img_feats = list(img_feats.values()) + else: + return None + if self.with_img_neck: + img_feats = self.img_neck(img_feats) + + img_feats_reshaped = [] + for img_feat in img_feats: + BN, C, H, W = img_feat.size() + if len_queue is not None: + img_feats_reshaped.append(img_feat.view(int(B/len_queue), len_queue, int(BN / B), C, H, W)) + else: + img_feats_reshaped.append(img_feat.view(B, int(BN / B), C, H, W)) + return img_feats_reshaped + + @auto_fp16(apply_to=('img')) + def extract_feat(self, img, img_metas=None, len_queue=None): + """Extract features from images and points.""" + + img_feats = self.extract_img_feat(img, img_metas, len_queue=len_queue) + + return img_feats + + + def forward_pts_train(self, + pts_feats, + gt_bboxes_3d, + gt_labels_3d, + img_metas, + gt_bboxes_ignore=None, + prev_bev=None): + """Forward function' + Args: + pts_feats (list[torch.Tensor]): Features of point cloud branch + gt_bboxes_3d (list[:obj:`BaseInstance3DBoxes`]): Ground truth + boxes for each sample. + gt_labels_3d (list[torch.Tensor]): Ground truth labels for + boxes of each sampole + img_metas (list[dict]): Meta information of samples. + gt_bboxes_ignore (list[torch.Tensor], optional): Ground truth + boxes to be ignored. Defaults to None. + prev_bev (torch.Tensor, optional): BEV features of previous frame. + Returns: + dict: Losses of each branch. + """ + + outs = self.pts_bbox_head( + pts_feats, img_metas, prev_bev) + loss_inputs = [gt_bboxes_3d, gt_labels_3d, outs] + losses = self.pts_bbox_head.loss(*loss_inputs, img_metas=img_metas) + return losses + + def forward_dummy(self, img): + dummy_metas = None + return self.forward_test(img=img, img_metas=[[dummy_metas]]) + + def forward(self, inputs, return_loss=True, rescale=False): + """Calls either forward_train or forward_test depending on whether + return_loss=True. + Note this setting will change the expected inputs. When + `return_loss=True`, img and img_metas are single-nested (i.e. + torch.Tensor and list[dict]), and when `resturn_loss=False`, img and + img_metas should be double nested (i.e. list[torch.Tensor], + list[list[dict]]), with the outer list indicating test time + augmentations. + """ + if return_loss: + losses = self.forward_train(**inputs) + loss, log_vars = self._parse_losses(losses) + outputs = dict( + loss=loss, log_vars=log_vars, num_samples=len(inputs['img_metas'])) + return outputs + else: + outputs = self.forward_test(**inputs, rescale=rescale) + return outputs + + def obtain_history_bev(self, imgs_queue, img_metas_list): + """Obtain history BEV features iteratively. To save GPU memory, gradients are not calculated. + """ + self.eval() + + with torch.no_grad(): + prev_bev = None + bs, len_queue, num_cams, C, H, W = imgs_queue.shape + imgs_queue = imgs_queue.reshape(bs*len_queue, num_cams, C, H, W) + img_feats_list = self.extract_feat(img=imgs_queue, len_queue=len_queue) + for i in range(len_queue): + img_metas = [each[i] for each in img_metas_list] + if not img_metas[0]['prev_bev_exists']: + prev_bev = None + # img_feats = self.extract_feat(img=img, img_metas=img_metas) + img_feats = [each_scale[:, i] for each_scale in img_feats_list] + prev_bev = self.pts_bbox_head( + img_feats, img_metas, prev_bev, only_bev=True) + self.train() + return prev_bev + + @auto_fp16(apply_to=('img', 'points')) + def forward_train(self, + points=None, + img_metas=None, + gt_bboxes_3d=None, + gt_labels_3d=None, + gt_labels=None, + gt_bboxes=None, + img=None, + proposals=None, + gt_bboxes_ignore=None, + img_depth=None, + img_mask=None, + ): + """Forward training function. + Args: + points (list[torch.Tensor], optional): Points of each sample. + Defaults to None. + img_metas (list[dict], optional): Meta information of each sample. + Defaults to None. + gt_bboxes_3d (list[:obj:`BaseInstance3DBoxes`], optional): + Ground truth 3D boxes. Defaults to None. + gt_labels_3d (list[torch.Tensor], optional): Ground truth labels + of 3D boxes. Defaults to None. + gt_labels (list[torch.Tensor], optional): Ground truth labels + of 2D boxes in images. Defaults to None. + gt_bboxes (list[torch.Tensor], optional): Ground truth 2D boxes in + images. Defaults to None. + img (torch.Tensor optional): Images of each sample with shape + (N, C, H, W). Defaults to None. + proposals ([list[torch.Tensor], optional): Predicted proposals + used for training Fast RCNN. Defaults to None. + gt_bboxes_ignore (list[torch.Tensor], optional): Ground truth + 2D boxes in images to be ignored. Defaults to None. + Returns: + dict: Losses of different branches. + """ + len_queue = img.size(1) + prev_img = img[:, :-1, ...] + img = img[:, -1, ...] + + prev_img_metas = copy.deepcopy(img_metas) + prev_bev = self.obtain_history_bev(prev_img, prev_img_metas) + + img_metas = [each[len_queue-1] for each in img_metas] + if not img_metas[0]['prev_bev_exists']: + prev_bev = None + img_feats = self.extract_feat(img=img, img_metas=img_metas) + losses = dict() + losses_pts = self.forward_pts_train(img_feats, gt_bboxes_3d, + gt_labels_3d, img_metas, + gt_bboxes_ignore, prev_bev) + + losses.update(losses_pts) + return losses + + def forward_test(self, img_metas, img=None, rescale=None): + for var, name in [(img_metas, 'img_metas')]: + if not isinstance(var, list): + raise TypeError('{} must be a list, but got {}'.format( + name, type(var))) + img = [img] if img is None else img + + if img_metas[0][0]['scene_token'] != self.prev_frame_info['scene_token']: + # the first sample of each scene is truncated + self.prev_frame_info['prev_bev'] = None + # update idx + self.prev_frame_info['scene_token'] = img_metas[0][0]['scene_token'] + + # do not use temporal information + if not self.video_test_mode: + self.prev_frame_info['prev_bev'] = None + + # Get the delta of ego position and angle between two timestamps. + tmp_pos = copy.deepcopy(img_metas[0][0]['can_bus'][:3]) + tmp_angle = copy.deepcopy(img_metas[0][0]['can_bus'][-1]) + if self.prev_frame_info['prev_bev'] is not None: + img_metas[0][0]['can_bus'][:3] -= self.prev_frame_info['prev_pos'] + img_metas[0][0]['can_bus'][-1] -= self.prev_frame_info['prev_angle'] + else: + img_metas[0][0]['can_bus'][-1] = 0 + img_metas[0][0]['can_bus'][:3] = 0 + + new_prev_bev, bbox_results = self.simple_test( + img_metas[0], img[0], prev_bev=self.prev_frame_info['prev_bev'], rescale=rescale) + # During inference, we save the BEV features and ego motion of each timestamp. + self.prev_frame_info['prev_pos'] = tmp_pos + self.prev_frame_info['prev_angle'] = tmp_angle + self.prev_frame_info['prev_bev'] = new_prev_bev + return bbox_results + + def simple_test_pts(self, x, img_metas, prev_bev=None, rescale=False): + """Test function""" + outs = self.pts_bbox_head(x, img_metas, prev_bev=prev_bev) + + bbox_list = self.pts_bbox_head.get_bboxes( + outs, img_metas, rescale=rescale) + bbox_results = [ + bbox3d2result(bboxes, scores, labels) + for bboxes, scores, labels in bbox_list + ] + return outs['bev_embed'], bbox_results + + def simple_test(self, img_metas, img=None, prev_bev=None, rescale=False): + """Test function without augmentaiton.""" + img_feats = self.extract_feat(img=img, img_metas=img_metas) + + bbox_list = [dict() for i in range(len(img_metas))] + new_prev_bev, bbox_pts = self.simple_test_pts( + img_feats, img_metas, prev_bev, rescale=rescale) + for result_dict, pts_bbox in zip(bbox_list, bbox_pts): + result_dict['pts_bbox'] = pts_bbox + return new_prev_bev, bbox_list diff --git a/mmcv/models/detectors/bevformerV2.py b/mmcv/models/detectors/bevformerV2.py new file mode 100644 index 0000000..79efa12 --- /dev/null +++ b/mmcv/models/detectors/bevformerV2.py @@ -0,0 +1,269 @@ +# --------------------------------------------- +# Copyright (c) OpenMMLab. All rights reserved. +# --------------------------------------------- +# Modified by Zhiqi Li +# --------------------------------------------- + +import copy +from collections import OrderedDict +import torch +from mmdet.models import DETECTORS +from mmdet3d.core import bbox3d2result +from mmdet3d.models.detectors.mvx_two_stage import MVXTwoStageDetector +from mmdet3d.models.builder import build_head +from projects.mmdet3d_plugin.models.utils.grid_mask import GridMask + + +@DETECTORS.register_module() +class BEVFormerV2(MVXTwoStageDetector): + """BEVFormer. + Args: + video_test_mode (bool): Decide whether to use temporal information during inference. + """ + + def __init__(self, + use_grid_mask=False, + pts_voxel_layer=None, + pts_voxel_encoder=None, + pts_middle_encoder=None, + pts_fusion_layer=None, + img_backbone=None, + pts_backbone=None, + img_neck=None, + pts_neck=None, + pts_bbox_head=None, + fcos3d_bbox_head=None, + img_roi_head=None, + img_rpn_head=None, + train_cfg=None, + test_cfg=None, + pretrained=None, + video_test_mode=False, + num_levels=None, + num_mono_levels=None, + mono_loss_weight=1.0, + frames=(0,), + ): + + super(BEVFormerV2, + self).__init__(pts_voxel_layer, pts_voxel_encoder, + pts_middle_encoder, pts_fusion_layer, + img_backbone, pts_backbone, img_neck, pts_neck, + pts_bbox_head, img_roi_head, img_rpn_head, + train_cfg, test_cfg, pretrained) + self.grid_mask = GridMask( + True, True, rotate=1, offset=False, ratio=0.5, mode=1, prob=0.7) + self.use_grid_mask = use_grid_mask + self.fp16_enabled = False + assert not self.fp16_enabled # not support fp16 yet + # temporal + self.video_test_mode = video_test_mode + assert not self.video_test_mode # not support video_test_mode yet + + # fcos3d head + self.fcos3d_bbox_head = build_head(fcos3d_bbox_head) if fcos3d_bbox_head else None + # loss weight + self.mono_loss_weight = mono_loss_weight + + # levels of features + self.num_levels = num_levels + self.num_mono_levels = num_mono_levels + self.frames = frames + def extract_img_feat(self, img): + """Extract features of images.""" + B = img.size(0) + if img is not None: + if img.dim() == 5 and img.size(0) == 1: + img.squeeze_() + elif img.dim() == 5 and img.size(0) > 1: + B, N, C, H, W = img.size() + img = img.reshape(B * N, C, H, W) + if self.use_grid_mask: + img = self.grid_mask(img) + + img_feats = self.img_backbone(img) + if isinstance(img_feats, dict): + img_feats = list(img_feats.values()) + else: + return None + if self.with_img_neck: + img_feats = self.img_neck(img_feats) + + img_feats_reshaped = [] + for img_feat in img_feats: + BN, C, H, W = img_feat.size() + img_feats_reshaped.append(img_feat.view(B, int(BN / B), C, H, W)) + return img_feats_reshaped + + def extract_feat(self, img, img_metas, len_queue=None): + """Extract features from images and points.""" + + img_feats = self.extract_img_feat(img) + if 'aug_param' in img_metas[0] and img_metas[0]['aug_param']['CropResizeFlipImage_param'][-1] is True: + # flip feature + img_feats = [torch.flip(x, dims=[-1, ]) for x in img_feats] + return img_feats + + def forward_pts_train(self, + pts_feats, + gt_bboxes_3d, + gt_labels_3d, + img_metas, + gt_bboxes_ignore=None, + prev_bev=None): + outs = self.pts_bbox_head( + pts_feats, img_metas, prev_bev) + loss_inputs = [gt_bboxes_3d, gt_labels_3d, outs] + losses = self.pts_bbox_head.loss(*loss_inputs, img_metas=img_metas) + return losses + + def forward_mono_train(self, img_feats, mono_input_dict): + """ + img_feats (list[Tensor]): 5-D tensor for each level, (B, N, C, H, W) + gt_bboxes (list[list[Tensor]]): Ground truth bboxes for each image with + shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. + gt_labels (list[list[Tensor]]): class indices corresponding to each box + gt_bboxes_3d (list[list[[Tensor]]): 3D boxes ground truth with shape of + (num_gts, code_size). + gt_labels_3d (list[list[Tensor]]): same as gt_labels + centers2d (list[list[Tensor]]): 2D centers on the image with shape of + (num_gts, 2). + depths (list[list[Tensor]]): Depth ground truth with shape of + (num_gts, ). + attr_labels (list[list[Tensor]]): Attributes indices of each box. + img_metas (list[list[dict]]): Meta information of each image, e.g., + image size, scaling factor, etc. + ann_idx (list[list[idx]]): indicate which image has mono annotation. + """ + bsz = img_feats[0].shape[0]; + num_lvls = len(img_feats) + + img_feats_select = [[] for lvl in range(num_lvls)] + for lvl, img_feat in enumerate(img_feats): + for i in range(bsz): + img_feats_select[lvl].append(img_feat[i, mono_input_dict['mono_ann_idx'][i]]) + img_feats_select[lvl] = torch.cat(img_feats_select[lvl], dim=0) + bsz_new = img_feats_select[0].shape[0] + assert bsz == len(mono_input_dict['mono_input_dict']) + input_dict = [] + for i in range(bsz): + input_dict.extend(mono_input_dict['mono_input_dict'][i]) + assert bsz_new == len(input_dict) + losses = self.fcos3d_bbox_head.forward_train(img_feats_select, input_dict) + return losses + + def forward_dummy(self, img): + dummy_metas = None + return self.forward_test(img=img, img_metas=[[dummy_metas]]) + + def forward(self, return_loss=True, **kwargs): + if return_loss: + return self.forward_train(**kwargs) + else: + return self.forward_test(**kwargs) + + def obtain_history_bev(self, img_dict, img_metas_dict): + """Obtain history BEV features iteratively. To save GPU memory, gradients are not calculated. + """ + # Modify: roll back to previous version for single frame + is_training = self.training + self.eval() + prev_bev = OrderedDict({i: None for i in self.frames}) + with torch.no_grad(): + for t in img_dict.keys(): + img = img_dict[t] + img_metas = [img_metas_dict[t], ] + img_feats = self.extract_feat(img=img, img_metas=img_metas) + if self.num_levels: + img_feats = img_feats[:self.num_levels] + bev = self.pts_bbox_head( + img_feats, img_metas, None, only_bev=True) + prev_bev[t] = bev.detach() + if is_training: + self.train() + return list(prev_bev.values()) + + def forward_train(self, + points=None, + img_metas=None, + gt_bboxes_3d=None, + gt_labels_3d=None, + img=None, + gt_bboxes_ignore=None, + **mono_input_dict, + ): + img_metas = OrderedDict(sorted(img_metas[0].items())) + img_dict = {} + for ind, t in enumerate(img_metas.keys()): + img_dict[t] = img[:, ind, ...] + + img = img_dict[0] + img_dict.pop(0) + + prev_img_metas = copy.deepcopy(img_metas) + prev_img_metas.pop(0) + prev_bev = self.obtain_history_bev(img_dict, prev_img_metas) + + img_metas = [img_metas[0], ] + + img_feats = self.extract_feat(img=img, img_metas=img_metas) + losses = dict() + losses_pts = self.forward_pts_train(img_feats if self.num_levels is None + else img_feats[:self.num_levels], gt_bboxes_3d, + gt_labels_3d, img_metas, + gt_bboxes_ignore, prev_bev) + losses.update(losses_pts) + + if self.fcos3d_bbox_head: + losses_mono = self.forward_mono_train(img_feats=img_feats if self.num_mono_levels is None + else img_feats[:self.num_mono_levels], + mono_input_dict=mono_input_dict) + for k, v in losses_mono.items(): + losses[f'{k}_mono'] = v * self.mono_loss_weight + + return losses + + def forward_test(self, img_metas, img=None, **kwargs): + for var, name in [(img_metas, 'img_metas')]: + if not isinstance(var, list): + raise TypeError('{} must be a list, but got {}'.format( + name, type(var))) + img = [img] if img is None else img + new_prev_bev, bbox_results = self.simple_test(img_metas[0], img[0], prev_bev=None, **kwargs) + return bbox_results + + def simple_test_pts(self, x, img_metas, prev_bev=None, rescale=False): + """Test function""" + outs = self.pts_bbox_head(x, img_metas, prev_bev=prev_bev) + + bbox_list = self.pts_bbox_head.get_bboxes( + outs, img_metas, rescale=rescale) + bbox_results = [ + bbox3d2result(bboxes, scores, labels) + for bboxes, scores, labels in bbox_list + ] + return outs['bev_embed'], bbox_results + + def simple_test(self, img_metas, img=None, prev_bev=None, rescale=False, **kwargs): + """Test function without augmentaiton.""" + img_metas = OrderedDict(sorted(img_metas[0].items())) + img_dict = {} + for ind, t in enumerate(img_metas.keys()): + img_dict[t] = img[:, ind, ...] + img = img_dict[0] + img_dict.pop(0) + + prev_img_metas = copy.deepcopy(img_metas) + prev_bev = self.obtain_history_bev(img_dict, prev_img_metas) + + img_metas = [img_metas[0], ] + img_feats = self.extract_feat(img=img, img_metas=img_metas) + if self.num_levels: + img_feats = img_feats[:self.num_levels] + + bbox_list = [dict() for i in range(len(img_metas))] + new_prev_bev, bbox_pts = self.simple_test_pts( + img_feats, img_metas, prev_bev, rescale=rescale) + for result_dict, pts_bbox in zip(bbox_list, bbox_pts): + result_dict['pts_bbox'] = pts_bbox + return new_prev_bev, bbox_list diff --git a/mmcv/models/detectors/bevformer_fp16.py b/mmcv/models/detectors/bevformer_fp16.py new file mode 100644 index 0000000..5325e3c --- /dev/null +++ b/mmcv/models/detectors/bevformer_fp16.py @@ -0,0 +1,89 @@ +# --------------------------------------------- +# Copyright (c) OpenMMLab. All rights reserved. +# --------------------------------------------- +# Modified by Zhiqi Li +# --------------------------------------------- + +from tkinter.messagebox import NO +import torch +from mmcv.runner import force_fp32, auto_fp16 +from mmdet.models import DETECTORS +from mmdet3d.core import bbox3d2result +from mmdet3d.models.detectors.mvx_two_stage import MVXTwoStageDetector +from projects.mmdet3d_plugin.models.utils.grid_mask import GridMask +from projects.mmdet3d_plugin.bevformer.detectors.bevformer import BEVFormer +import time +import copy +import numpy as np +import mmdet3d +from projects.mmdet3d_plugin.models.utils.bricks import run_time + + +@DETECTORS.register_module() +class BEVFormer_fp16(BEVFormer): + """ + The default version BEVFormer currently can not support FP16. + We provide this version to resolve this issue. + """ + + @auto_fp16(apply_to=('img', 'prev_bev', 'points')) + def forward_train(self, + points=None, + img_metas=None, + gt_bboxes_3d=None, + gt_labels_3d=None, + gt_labels=None, + gt_bboxes=None, + img=None, + proposals=None, + gt_bboxes_ignore=None, + img_depth=None, + img_mask=None, + prev_bev=None, + ): + """Forward training function. + Args: + points (list[torch.Tensor], optional): Points of each sample. + Defaults to None. + img_metas (list[dict], optional): Meta information of each sample. + Defaults to None. + gt_bboxes_3d (list[:obj:`BaseInstance3DBoxes`], optional): + Ground truth 3D boxes. Defaults to None. + gt_labels_3d (list[torch.Tensor], optional): Ground truth labels + of 3D boxes. Defaults to None. + gt_labels (list[torch.Tensor], optional): Ground truth labels + of 2D boxes in images. Defaults to None. + gt_bboxes (list[torch.Tensor], optional): Ground truth 2D boxes in + images. Defaults to None. + img (torch.Tensor optional): Images of each sample with shape + (N, C, H, W). Defaults to None. + proposals ([list[torch.Tensor], optional): Predicted proposals + used for training Fast RCNN. Defaults to None. + gt_bboxes_ignore (list[torch.Tensor], optional): Ground truth + 2D boxes in images to be ignored. Defaults to None. + Returns: + dict: Losses of different branches. + """ + + img_feats = self.extract_feat(img=img, img_metas=img_metas) + + losses = dict() + losses_pts = self.forward_pts_train(img_feats, gt_bboxes_3d, + gt_labels_3d, img_metas, + gt_bboxes_ignore, prev_bev=prev_bev) + losses.update(losses_pts) + return losses + + + def val_step(self, data, optimizer): + """ + In BEVFormer_fp16, we use this `val_step` function to inference the `prev_pev`. + This is not the standard function of `val_step`. + """ + + img = data['img'] + img_metas = data['img_metas'] + img_feats = self.extract_feat(img=img, img_metas=img_metas) + prev_bev = data.get('prev_bev', None) + prev_bev = self.pts_bbox_head(img_feats, img_metas, prev_bev=prev_bev, only_bev=True) + return prev_bev \ No newline at end of file diff --git a/mmcv/models/detectors/mvx_two_stage.py b/mmcv/models/detectors/mvx_two_stage.py new file mode 100644 index 0000000..dc99ac3 --- /dev/null +++ b/mmcv/models/detectors/mvx_two_stage.py @@ -0,0 +1,506 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import warnings +from mmcv.parallel import DataContainer as DC +from mmcv.utils import force_fp32 +from os import path as osp +from torch.nn import functional as F + +from mmcv.core.bbox.structures.box_3d_mode import Box3DMode +from mmcv.core.bbox.structures.coord_3d_mode import Coord3DMode +from mmcv.core.bbox.transforms import bbox3d2result +from mmcv.core.post_processing.merge_augs import merge_aug_bboxes_3d +from mmcv.core.visualizer import show_result +from mmcv.ops.voxelize import Voxelization +from mmcv.core.utils import multi_apply +from mmcv.models import DETECTORS +from mmcv.utils import is_list_of +from .. import builder +from .base import Base3DDetector + + +@DETECTORS.register_module() +class MVXTwoStageDetector(Base3DDetector): + """Base class of Multi-modality VoxelNet.""" + + def __init__(self, + pts_voxel_layer=None, + pts_voxel_encoder=None, + pts_middle_encoder=None, + pts_fusion_layer=None, + img_backbone=None, + pts_backbone=None, + img_neck=None, + pts_neck=None, + pts_bbox_head=None, + img_roi_head=None, + img_rpn_head=None, + train_cfg=None, + test_cfg=None, + pretrained=None, + init_cfg=None): + super(MVXTwoStageDetector, self).__init__(init_cfg=init_cfg) + + if pts_voxel_layer: + self.pts_voxel_layer = Voxelization(**pts_voxel_layer) + if pts_voxel_encoder: + self.pts_voxel_encoder = builder.build_voxel_encoder( + pts_voxel_encoder) + if pts_middle_encoder: + self.pts_middle_encoder = builder.build_middle_encoder( + pts_middle_encoder) + if pts_backbone: + self.pts_backbone = builder.build_backbone(pts_backbone) + if pts_fusion_layer: + self.pts_fusion_layer = builder.build_fusion_layer( + pts_fusion_layer) + if pts_neck is not None: + self.pts_neck = builder.build_neck(pts_neck) + if pts_bbox_head: + pts_train_cfg = train_cfg.pts if train_cfg else None + pts_bbox_head.update(train_cfg=pts_train_cfg) + pts_test_cfg = test_cfg.pts if test_cfg else None + pts_bbox_head.update(test_cfg=pts_test_cfg) + self.pts_bbox_head = builder.build_head(pts_bbox_head) + + if img_backbone: + self.img_backbone = builder.build_backbone(img_backbone) + if img_neck is not None: + self.img_neck = builder.build_neck(img_neck) + if img_rpn_head is not None: + self.img_rpn_head = builder.build_head(img_rpn_head) + if img_roi_head is not None: + self.img_roi_head = builder.build_head(img_roi_head) + + self.train_cfg = train_cfg + self.test_cfg = test_cfg + + if pretrained is None: + img_pretrained = None + pts_pretrained = None + elif isinstance(pretrained, dict): + img_pretrained = pretrained.get('img', None) + pts_pretrained = pretrained.get('pts', None) + else: + raise ValueError( + f'pretrained should be a dict, got {type(pretrained)}') + + if self.with_img_backbone: + if img_pretrained is not None: + warnings.warn('DeprecationWarning: pretrained is a deprecated \ + key, please consider using init_cfg') + self.img_backbone.init_cfg = dict( + type='Pretrained', checkpoint=img_pretrained) + if self.with_img_roi_head: + if img_pretrained is not None: + warnings.warn('DeprecationWarning: pretrained is a deprecated \ + key, please consider using init_cfg') + self.img_roi_head.init_cfg = dict( + type='Pretrained', checkpoint=img_pretrained) + + if self.with_pts_backbone: + if pts_pretrained is not None: + warnings.warn('DeprecationWarning: pretrained is a deprecated \ + key, please consider using init_cfg') + self.pts_backbone.init_cfg = dict( + type='Pretrained', checkpoint=pts_pretrained) + + @property + def with_img_shared_head(self): + """bool: Whether the detector has a shared head in image branch.""" + return hasattr(self, + 'img_shared_head') and self.img_shared_head is not None + + @property + def with_pts_bbox(self): + """bool: Whether the detector has a 3D box head.""" + return hasattr(self, + 'pts_bbox_head') and self.pts_bbox_head is not None + + @property + def with_img_bbox(self): + """bool: Whether the detector has a 2D image box head.""" + return hasattr(self, + 'img_bbox_head') and self.img_bbox_head is not None + + @property + def with_img_backbone(self): + """bool: Whether the detector has a 2D image backbone.""" + return hasattr(self, 'img_backbone') and self.img_backbone is not None + + @property + def with_pts_backbone(self): + """bool: Whether the detector has a 3D backbone.""" + return hasattr(self, 'pts_backbone') and self.pts_backbone is not None + + @property + def with_fusion(self): + """bool: Whether the detector has a fusion layer.""" + return hasattr(self, + 'pts_fusion_layer') and self.fusion_layer is not None + + @property + def with_img_neck(self): + """bool: Whether the detector has a neck in image branch.""" + return hasattr(self, 'img_neck') and self.img_neck is not None + + @property + def with_pts_neck(self): + """bool: Whether the detector has a neck in 3D detector branch.""" + return hasattr(self, 'pts_neck') and self.pts_neck is not None + + @property + def with_img_rpn(self): + """bool: Whether the detector has a 2D RPN in image detector branch.""" + return hasattr(self, 'img_rpn_head') and self.img_rpn_head is not None + + @property + def with_img_roi_head(self): + """bool: Whether the detector has a RoI Head in image branch.""" + return hasattr(self, 'img_roi_head') and self.img_roi_head is not None + + @property + def with_voxel_encoder(self): + """bool: Whether the detector has a voxel encoder.""" + return hasattr(self, + 'voxel_encoder') and self.voxel_encoder is not None + + @property + def with_middle_encoder(self): + """bool: Whether the detector has a middle encoder.""" + return hasattr(self, + 'middle_encoder') and self.middle_encoder is not None + + def extract_img_feat(self, img, img_metas): + """Extract features of images.""" + if self.with_img_backbone and img is not None: + input_shape = img.shape[-2:] + # update real input shape of each single img + for img_meta in img_metas: + img_meta.update(input_shape=input_shape) + + if img.dim() == 5 and img.size(0) == 1: + img.squeeze_() + elif img.dim() == 5 and img.size(0) > 1: + B, N, C, H, W = img.size() + img = img.view(B * N, C, H, W) + img_feats = self.img_backbone(img) + else: + return None + if self.with_img_neck: + img_feats = self.img_neck(img_feats) + return img_feats + + def extract_pts_feat(self, pts, img_feats, img_metas): + """Extract features of points.""" + if not self.with_pts_bbox: + return None + voxels, num_points, coors = self.voxelize(pts) + voxel_features = self.pts_voxel_encoder(voxels, num_points, coors, + img_feats, img_metas) + batch_size = coors[-1, 0] + 1 + x = self.pts_middle_encoder(voxel_features, coors, batch_size) + x = self.pts_backbone(x) + if self.with_pts_neck: + x = self.pts_neck(x) + return x + + def extract_feat(self, points, img, img_metas): + """Extract features from images and points.""" + img_feats = self.extract_img_feat(img, img_metas) + pts_feats = self.extract_pts_feat(points, img_feats, img_metas) + return (img_feats, pts_feats) + + @torch.no_grad() + @force_fp32() + def voxelize(self, points): + """Apply dynamic voxelization to points. + + Args: + points (list[torch.Tensor]): Points of each sample. + + Returns: + tuple[torch.Tensor]: Concatenated points, number of points + per voxel, and coordinates. + """ + voxels, coors, num_points = [], [], [] + for res in points: + res_voxels, res_coors, res_num_points = self.pts_voxel_layer(res) + voxels.append(res_voxels) + coors.append(res_coors) + num_points.append(res_num_points) + voxels = torch.cat(voxels, dim=0) + num_points = torch.cat(num_points, dim=0) + coors_batch = [] + for i, coor in enumerate(coors): + coor_pad = F.pad(coor, (1, 0), mode='constant', value=i) + coors_batch.append(coor_pad) + coors_batch = torch.cat(coors_batch, dim=0) + return voxels, num_points, coors_batch + + def forward_train(self, + points=None, + img_metas=None, + gt_bboxes_3d=None, + gt_labels_3d=None, + gt_labels=None, + gt_bboxes=None, + img=None, + proposals=None, + gt_bboxes_ignore=None): + """Forward training function. + + Args: + points (list[torch.Tensor], optional): Points of each sample. + Defaults to None. + img_metas (list[dict], optional): Meta information of each sample. + Defaults to None. + gt_bboxes_3d (list[:obj:`BaseInstance3DBoxes`], optional): + Ground truth 3D boxes. Defaults to None. + gt_labels_3d (list[torch.Tensor], optional): Ground truth labels + of 3D boxes. Defaults to None. + gt_labels (list[torch.Tensor], optional): Ground truth labels + of 2D boxes in images. Defaults to None. + gt_bboxes (list[torch.Tensor], optional): Ground truth 2D boxes in + images. Defaults to None. + img (torch.Tensor optional): Images of each sample with shape + (N, C, H, W). Defaults to None. + proposals ([list[torch.Tensor], optional): Predicted proposals + used for training Fast RCNN. Defaults to None. + gt_bboxes_ignore (list[torch.Tensor], optional): Ground truth + 2D boxes in images to be ignored. Defaults to None. + + Returns: + dict: Losses of different branches. + """ + img_feats, pts_feats = self.extract_feat( + points, img=img, img_metas=img_metas) + losses = dict() + if pts_feats: + losses_pts = self.forward_pts_train(pts_feats, gt_bboxes_3d, + gt_labels_3d, img_metas, + gt_bboxes_ignore) + losses.update(losses_pts) + if img_feats: + losses_img = self.forward_img_train( + img_feats, + img_metas=img_metas, + gt_bboxes=gt_bboxes, + gt_labels=gt_labels, + gt_bboxes_ignore=gt_bboxes_ignore, + proposals=proposals) + losses.update(losses_img) + return losses + + def forward_pts_train(self, + pts_feats, + gt_bboxes_3d, + gt_labels_3d, + img_metas, + gt_bboxes_ignore=None): + """Forward function for point cloud branch. + + Args: + pts_feats (list[torch.Tensor]): Features of point cloud branch + gt_bboxes_3d (list[:obj:`BaseInstance3DBoxes`]): Ground truth + boxes for each sample. + gt_labels_3d (list[torch.Tensor]): Ground truth labels for + boxes of each sampole + img_metas (list[dict]): Meta information of samples. + gt_bboxes_ignore (list[torch.Tensor], optional): Ground truth + boxes to be ignored. Defaults to None. + + Returns: + dict: Losses of each branch. + """ + outs = self.pts_bbox_head(pts_feats) + loss_inputs = outs + (gt_bboxes_3d, gt_labels_3d, img_metas) + losses = self.pts_bbox_head.loss( + *loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) + return losses + + def forward_img_train(self, + x, + img_metas, + gt_bboxes, + gt_labels, + gt_bboxes_ignore=None, + proposals=None, + **kwargs): + """Forward function for image branch. + + This function works similar to the forward function of Faster R-CNN. + + Args: + x (list[torch.Tensor]): Image features of shape (B, C, H, W) + of multiple levels. + img_metas (list[dict]): Meta information of images. + gt_bboxes (list[torch.Tensor]): Ground truth boxes of each image + sample. + gt_labels (list[torch.Tensor]): Ground truth labels of boxes. + gt_bboxes_ignore (list[torch.Tensor], optional): Ground truth + boxes to be ignored. Defaults to None. + proposals (list[torch.Tensor], optional): Proposals of each sample. + Defaults to None. + + Returns: + dict: Losses of each branch. + """ + losses = dict() + # RPN forward and loss + if self.with_img_rpn: + rpn_outs = self.img_rpn_head(x) + rpn_loss_inputs = rpn_outs + (gt_bboxes, img_metas, + self.train_cfg.img_rpn) + rpn_losses = self.img_rpn_head.loss( + *rpn_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) + losses.update(rpn_losses) + + proposal_cfg = self.train_cfg.get('img_rpn_proposal', + self.test_cfg.img_rpn) + proposal_inputs = rpn_outs + (img_metas, proposal_cfg) + proposal_list = self.img_rpn_head.get_bboxes(*proposal_inputs) + else: + proposal_list = proposals + + # bbox head forward and loss + if self.with_img_bbox: + # bbox head forward and loss + img_roi_losses = self.img_roi_head.forward_train( + x, img_metas, proposal_list, gt_bboxes, gt_labels, + gt_bboxes_ignore, **kwargs) + losses.update(img_roi_losses) + + return losses + + def simple_test_img(self, x, img_metas, proposals=None, rescale=False): + """Test without augmentation.""" + if proposals is None: + proposal_list = self.simple_test_rpn(x, img_metas, + self.test_cfg.img_rpn) + else: + proposal_list = proposals + + return self.img_roi_head.simple_test( + x, proposal_list, img_metas, rescale=rescale) + + def simple_test_rpn(self, x, img_metas, rpn_test_cfg): + """RPN test function.""" + rpn_outs = self.img_rpn_head(x) + proposal_inputs = rpn_outs + (img_metas, rpn_test_cfg) + proposal_list = self.img_rpn_head.get_bboxes(*proposal_inputs) + return proposal_list + + def simple_test_pts(self, x, img_metas, rescale=False): + """Test function of point cloud branch.""" + outs = self.pts_bbox_head(x) + bbox_list = self.pts_bbox_head.get_bboxes( + *outs, img_metas, rescale=rescale) + bbox_results = [ + bbox3d2result(bboxes, scores, labels) + for bboxes, scores, labels in bbox_list + ] + return bbox_results + + def simple_test(self, points, img_metas, img=None, rescale=False): + """Test function without augmentaiton.""" + img_feats, pts_feats = self.extract_feat( + points, img=img, img_metas=img_metas) + + bbox_list = [dict() for i in range(len(img_metas))] + if pts_feats and self.with_pts_bbox: + bbox_pts = self.simple_test_pts( + pts_feats, img_metas, rescale=rescale) + for result_dict, pts_bbox in zip(bbox_list, bbox_pts): + result_dict['pts_bbox'] = pts_bbox + if img_feats and self.with_img_bbox: + bbox_img = self.simple_test_img( + img_feats, img_metas, rescale=rescale) + for result_dict, img_bbox in zip(bbox_list, bbox_img): + result_dict['img_bbox'] = img_bbox + return bbox_list + + def aug_test(self, points, img_metas, imgs=None, rescale=False): + """Test function with augmentaiton.""" + img_feats, pts_feats = self.extract_feats(points, img_metas, imgs) + + bbox_list = dict() + if pts_feats and self.with_pts_bbox: + bbox_pts = self.aug_test_pts(pts_feats, img_metas, rescale) + bbox_list.update(pts_bbox=bbox_pts) + return [bbox_list] + + def extract_feats(self, points, img_metas, imgs=None): + """Extract point and image features of multiple samples.""" + if imgs is None: + imgs = [None] * len(img_metas) + img_feats, pts_feats = multi_apply(self.extract_feat, points, imgs, + img_metas) + return img_feats, pts_feats + + def aug_test_pts(self, feats, img_metas, rescale=False): + """Test function of point cloud branch with augmentaiton.""" + # only support aug_test for one sample + aug_bboxes = [] + for x, img_meta in zip(feats, img_metas): + outs = self.pts_bbox_head(x) + bbox_list = self.pts_bbox_head.get_bboxes( + *outs, img_meta, rescale=rescale) + bbox_list = [ + dict(boxes_3d=bboxes, scores_3d=scores, labels_3d=labels) + for bboxes, scores, labels in bbox_list + ] + aug_bboxes.append(bbox_list[0]) + + # after merging, bboxes will be rescaled to the original image size + merged_bboxes = merge_aug_bboxes_3d(aug_bboxes, img_metas, + self.pts_bbox_head.test_cfg) + return merged_bboxes + + def show_results(self, data, result, out_dir): + """Results visualization. + + Args: + data (dict): Input points and the information of the sample. + result (dict): Prediction results. + out_dir (str): Output directory of visualization result. + """ + for batch_id in range(len(result)): + if isinstance(data['points'][0], DC): + points = data['points'][0]._data[0][batch_id].numpy() + elif is_list_of(data['points'][0], torch.Tensor): + points = data['points'][0][batch_id] + else: + ValueError(f"Unsupported data type {type(data['points'][0])} " + f'for visualization!') + if isinstance(data['img_metas'][0], DC): + pts_filename = data['img_metas'][0]._data[0][batch_id][ + 'pts_filename'] + box_mode_3d = data['img_metas'][0]._data[0][batch_id][ + 'box_mode_3d'] + elif is_list_of(data['img_metas'][0], dict): + pts_filename = data['img_metas'][0][batch_id]['pts_filename'] + box_mode_3d = data['img_metas'][0][batch_id]['box_mode_3d'] + else: + ValueError( + f"Unsupported data type {type(data['img_metas'][0])} " + f'for visualization!') + file_name = osp.split(pts_filename)[-1].split('.')[0] + + assert out_dir is not None, 'Expect out_dir, got none.' + inds = result[batch_id]['pts_bbox']['scores_3d'] > 0.1 + pred_bboxes = result[batch_id]['pts_bbox']['boxes_3d'][inds] + + # for now we convert points and bbox into depth mode + if (box_mode_3d == Box3DMode.CAM) or (box_mode_3d + == Box3DMode.LIDAR): + points = Coord3DMode.convert_point(points, Coord3DMode.LIDAR, + Coord3DMode.DEPTH) + pred_bboxes = Box3DMode.convert(pred_bboxes, box_mode_3d, + Box3DMode.DEPTH) + elif box_mode_3d != Box3DMode.DEPTH: + ValueError( + f'Unsupported box_mode_3d {box_mode_3d} for convertion!') + + pred_bboxes = pred_bboxes.tensor.cpu().numpy() + show_result(points, None, pred_bboxes, out_dir, file_name) diff --git a/mmcv/models/detectors/single_stage.py b/mmcv/models/detectors/single_stage.py new file mode 100644 index 0000000..4e0748b --- /dev/null +++ b/mmcv/models/detectors/single_stage.py @@ -0,0 +1,234 @@ +import warnings + +import torch + +from mmcv.core.bbox.transforms import bbox2result +from mmcv.models import DETECTORS, build_backbone, build_head, build_neck +from ..builder import DETECTORS, build_backbone, build_head, build_neck +from .base import BaseDetector, Base3DDetector + + +@DETECTORS.register_module() +class SingleStageDetector(BaseDetector): + """Base class for single-stage detectors. + + Single-stage detectors directly and densely predict bounding boxes on the + output features of the backbone+neck. + """ + + def __init__(self, + backbone, + neck=None, + bbox_head=None, + train_cfg=None, + test_cfg=None, + pretrained=None, + init_cfg=None): + super(SingleStageDetector, self).__init__(init_cfg) + if pretrained: + warnings.warn('DeprecationWarning: pretrained is deprecated, ' + 'please use "init_cfg" instead') + backbone.pretrained = pretrained + self.backbone = build_backbone(backbone) + if neck is not None: + self.neck = build_neck(neck) + bbox_head.update(train_cfg=train_cfg) + bbox_head.update(test_cfg=test_cfg) + self.bbox_head = build_head(bbox_head) + self.train_cfg = train_cfg + self.test_cfg = test_cfg + + def extract_feat(self, img): + """Directly extract features from the backbone+neck.""" + x = self.backbone(img) + if self.with_neck: + x = self.neck(x) + return x + + def forward_dummy(self, img): + """Used for computing network flops. + + See `mmcvection/tools/analysis_tools/get_flops.py` + """ + x = self.extract_feat(img) + outs = self.bbox_head(x) + return outs + + def forward_train(self, + img, + img_metas, + gt_bboxes, + gt_labels, + gt_bboxes_ignore=None): + """ + Args: + img (Tensor): Input images of shape (N, C, H, W). + Typically these should be mean centered and std scaled. + img_metas (list[dict]): A List of image info dict where each dict + has: 'img_shape', 'scale_factor', 'flip', and may also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys see + :class:`mmcv.datasets.pipelines.Collect`. + gt_bboxes (list[Tensor]): Each item are the truth boxes for each + image in [tl_x, tl_y, br_x, br_y] format. + gt_labels (list[Tensor]): Class indices corresponding to each box + gt_bboxes_ignore (None | list[Tensor]): Specify which bounding + boxes can be ignored when computing the loss. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + super(SingleStageDetector, self).forward_train(img, img_metas) + x = self.extract_feat(img) + losses = self.bbox_head.forward_train(x, img_metas, gt_bboxes, + gt_labels, gt_bboxes_ignore) + return losses + + def simple_test(self, img, img_metas, rescale=False): + """Test function without test-time augmentation. + + Args: + img (torch.Tensor): Images with shape (N, C, H, W). + img_metas (list[dict]): List of image information. + rescale (bool, optional): Whether to rescale the results. + Defaults to False. + + Returns: + list[list[np.ndarray]]: BBox results of each image and classes. + The outer list corresponds to each image. The inner list + corresponds to each class. + """ + feat = self.extract_feat(img) + results_list = self.bbox_head.simple_test( + feat, img_metas, rescale=rescale) + bbox_results = [ + bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes) + for det_bboxes, det_labels in results_list + ] + return bbox_results + + def aug_test(self, imgs, img_metas, rescale=False): + """Test function with test time augmentation. + + Args: + imgs (list[Tensor]): the outer list indicates test-time + augmentations and inner Tensor should have a shape NxCxHxW, + which contains all images in the batch. + img_metas (list[list[dict]]): the outer list indicates test-time + augs (multiscale, flip, etc.) and the inner list indicates + images in a batch. each dict has image information. + rescale (bool, optional): Whether to rescale the results. + Defaults to False. + + Returns: + list[list[np.ndarray]]: BBox results of each image and classes. + The outer list corresponds to each image. The inner list + corresponds to each class. + """ + assert hasattr(self.bbox_head, 'aug_test'), \ + f'{self.bbox_head.__class__.__name__}' \ + ' does not support test-time augmentation' + + feats = self.extract_feats(imgs) + results_list = self.bbox_head.aug_test( + feats, img_metas, rescale=rescale) + bbox_results = [ + bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes) + for det_bboxes, det_labels in results_list + ] + return bbox_results + + def onnx_export(self, img, img_metas): + """Test function without test time augmentation. + + Args: + img (torch.Tensor): input images. + img_metas (list[dict]): List of image information. + + Returns: + tuple[Tensor, Tensor]: dets of shape [N, num_det, 5] + and class labels of shape [N, num_det]. + """ + x = self.extract_feat(img) + outs = self.bbox_head(x) + # get origin input shape to support onnx dynamic shape + + # get shape as tensor + img_shape = torch._shape_as_tensor(img)[2:] + img_metas[0]['img_shape_for_onnx'] = img_shape + # get pad input shape to support onnx dynamic shape for exporting + # `CornerNet` and `CentripetalNet`, which 'pad_shape' is used + # for inference + img_metas[0]['pad_shape_for_onnx'] = img_shape + # TODO:move all onnx related code in bbox_head to onnx_export function + det_bboxes, det_labels = self.bbox_head.get_bboxes(*outs, img_metas) + + return det_bboxes, det_labels + +@DETECTORS.register_module() +class SingleStage3DDetector(Base3DDetector): + """SingleStage3DDetector. + + This class serves as a base class for single-stage 3D detectors. + + Args: + backbone (dict): Config dict of detector's backbone. + neck (dict, optional): Config dict of neck. Defaults to None. + bbox_head (dict, optional): Config dict of box head. Defaults to None. + train_cfg (dict, optional): Config dict of training hyper-parameters. + Defaults to None. + test_cfg (dict, optional): Config dict of test hyper-parameters. + Defaults to None. + pretrained (str, optional): Path of pretrained models. + Defaults to None. + """ + + def __init__(self, + backbone, + neck=None, + bbox_head=None, + train_cfg=None, + test_cfg=None, + init_cfg=None, + pretrained=None): + super(SingleStage3DDetector, self).__init__(init_cfg) + self.backbone = build_backbone(backbone) + if neck is not None: + self.neck = build_neck(neck) + bbox_head.update(train_cfg=train_cfg) + bbox_head.update(test_cfg=test_cfg) + self.bbox_head = build_head(bbox_head) + self.train_cfg = train_cfg + self.test_cfg = test_cfg + + def forward_dummy(self, points): + """Used for computing network flops. + + See `mmcvection/tools/analysis_tools/get_flops.py` + """ + x = self.extract_feat(points) + try: + sample_mod = self.train_cfg.sample_mod + outs = self.bbox_head(x, sample_mod) + except AttributeError: + outs = self.bbox_head(x) + return outs + + def extract_feat(self, points, img_metas=None): + """Directly extract features from the backbone+neck. + + Args: + points (torch.Tensor): Input points. + """ + x = self.backbone(points) + if self.with_neck: + x = self.neck(x) + return x + + def extract_feats(self, points, img_metas): + """Extract features of multiple samples.""" + return [ + self.extract_feat(pts, img_meta) + for pts, img_meta in zip(points, img_metas) + ] + diff --git a/mmcv/models/detectors/single_stage_mono3d.py b/mmcv/models/detectors/single_stage_mono3d.py new file mode 100644 index 0000000..2b42072 --- /dev/null +++ b/mmcv/models/detectors/single_stage_mono3d.py @@ -0,0 +1,224 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch +from mmcv.parallel import DataContainer as DC +from os import path as osp + +from mmcv.core.bbox.structures.cam_box3d import CameraInstance3DBoxes +from mmcv.core.bbox.transforms import bbox3d2result +from mmcv.core.visualizer import show_multi_modality_result + + +# from mmcv.core import (CameraInstance3DBoxes, bbox3d2result, +# show_multi_modality_result) +from mmcv.models.builder import DETECTORS +from mmcv.models.detectors.single_stage import SingleStageDetector +from mmcv.utils import is_list_of +from mmcv.image import imread + + +@DETECTORS.register_module() +class SingleStageMono3DDetector(SingleStageDetector): + """Base class for monocular 3D single-stage detectors. + + Single-stage detectors directly and densely predict bounding boxes on the + output features of the backbone+neck. + """ + + def extract_feats(self, imgs): + """Directly extract features from the backbone+neck.""" + assert isinstance(imgs, list) + return [self.extract_feat(img) for img in imgs] + + def forward_train(self, + img, + img_metas, + gt_bboxes, + gt_labels, + gt_bboxes_3d, + gt_labels_3d, + centers2d, + depths, + attr_labels=None, + gt_bboxes_ignore=None): + """ + Args: + img (Tensor): Input images of shape (N, C, H, W). + Typically these should be mean centered and std scaled. + img_metas (list[dict]): A List of image info dict where each dict + has: 'img_shape', 'scale_factor', 'flip', and may also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys see + :class:`mmcv.datasets.pipelines.Collect`. + gt_bboxes (list[Tensor]): Each item are the truth boxes for each + image in [tl_x, tl_y, br_x, br_y] format. + gt_labels (list[Tensor]): Class indices corresponding to each box + gt_bboxes_3d (list[Tensor]): Each item are the 3D truth boxes for + each image in [x, y, z, w, l, h, theta, vx, vy] format. + gt_labels_3d (list[Tensor]): 3D class indices corresponding to + each box. + centers2d (list[Tensor]): Projected 3D centers onto 2D images. + depths (list[Tensor]): Depth of projected centers on 2D images. + attr_labels (list[Tensor], optional): Attribute indices + corresponding to each box + gt_bboxes_ignore (None | list[Tensor]): Specify which bounding + boxes can be ignored when computing the loss. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + x = self.extract_feat(img) + losses = self.bbox_head.forward_train(x, img_metas, gt_bboxes, + gt_labels, gt_bboxes_3d, + gt_labels_3d, centers2d, depths, + attr_labels, gt_bboxes_ignore) + return losses + + def simple_test(self, img, img_metas, rescale=False): + """Test function without test time augmentation. + + Args: + imgs (list[torch.Tensor]): List of multiple images + img_metas (list[dict]): List of image information. + rescale (bool, optional): Whether to rescale the results. + Defaults to False. + + Returns: + list[list[np.ndarray]]: BBox results of each image and classes. + The outer list corresponds to each image. The inner list + corresponds to each class. + """ + x = self.extract_feat(img) + outs = self.bbox_head(x) + bbox_outputs = self.bbox_head.get_bboxes( + *outs, img_metas, rescale=rescale) + + if self.bbox_head.pred_bbox2d: + from mmcv.core import bbox2result + bbox2d_img = [ + bbox2result(bboxes2d, labels, self.bbox_head.num_classes) + for bboxes, scores, labels, attrs, bboxes2d in bbox_outputs + ] + bbox_outputs = [bbox_outputs[0][:-1]] + + bbox_img = [ + bbox3d2result(bboxes, scores, labels, attrs) + for bboxes, scores, labels, attrs in bbox_outputs + ] + + bbox_list = [dict() for i in range(len(img_metas))] + for result_dict, img_bbox in zip(bbox_list, bbox_img): + result_dict['img_bbox'] = img_bbox + if self.bbox_head.pred_bbox2d: + for result_dict, img_bbox2d in zip(bbox_list, bbox2d_img): + result_dict['img_bbox2d'] = img_bbox2d + return bbox_list + + def aug_test(self, imgs, img_metas, rescale=False): + """Test function with test time augmentation.""" + feats = self.extract_feats(imgs) + + # only support aug_test for one sample + outs_list = [self.bbox_head(x) for x in feats] + for i, img_meta in enumerate(img_metas): + if img_meta[0]['pcd_horizontal_flip']: + for j in range(len(outs_list[i])): # for each prediction + if outs_list[i][j][0] is None: + continue + for k in range(len(outs_list[i][j])): + # every stride of featmap + outs_list[i][j][k] = torch.flip( + outs_list[i][j][k], dims=[3]) + reg = outs_list[i][1] + for reg_feat in reg: + # offset_x + reg_feat[:, 0, :, :] = 1 - reg_feat[:, 0, :, :] + # velo_x + if self.bbox_head.pred_velo: + reg_feat[:, 7, :, :] = -reg_feat[:, 7, :, :] + # rotation + reg_feat[:, 6, :, :] = -reg_feat[:, 6, :, :] + np.pi + + merged_outs = [] + for i in range(len(outs_list[0])): # for each prediction + merged_feats = [] + for j in range(len(outs_list[0][i])): + if outs_list[0][i][0] is None: + merged_feats.append(None) + continue + # for each stride of featmap + avg_feats = torch.mean( + torch.cat([x[i][j] for x in outs_list]), + dim=0, + keepdim=True) + if i == 1: # regression predictions + # rot/velo/2d det keeps the original + avg_feats[:, 6:, :, :] = \ + outs_list[0][i][j][:, 6:, :, :] + if i == 2: + # dir_cls keeps the original + avg_feats = outs_list[0][i][j] + merged_feats.append(avg_feats) + merged_outs.append(merged_feats) + merged_outs = tuple(merged_outs) + + bbox_outputs = self.bbox_head.get_bboxes( + *merged_outs, img_metas[0], rescale=rescale) + if self.bbox_head.pred_bbox2d: + from mmcv.core import bbox2result + bbox2d_img = [ + bbox2result(bboxes2d, labels, self.bbox_head.num_classes) + for bboxes, scores, labels, attrs, bboxes2d in bbox_outputs + ] + bbox_outputs = [bbox_outputs[0][:-1]] + + bbox_img = [ + bbox3d2result(bboxes, scores, labels, attrs) + for bboxes, scores, labels, attrs in bbox_outputs + ] + + bbox_list = dict() + bbox_list.update(img_bbox=bbox_img[0]) + if self.bbox_head.pred_bbox2d: + bbox_list.update(img_bbox2d=bbox2d_img[0]) + + return [bbox_list] + + def show_results(self, data, result, out_dir): + """Results visualization. + + Args: + data (list[dict]): Input images and the information of the sample. + result (list[dict]): Prediction results. + out_dir (str): Output directory of visualization result. + """ + for batch_id in range(len(result)): + if isinstance(data['img_metas'][0], DC): + img_filename = data['img_metas'][0]._data[0][batch_id][ + 'filename'] + cam2img = data['img_metas'][0]._data[0][batch_id]['cam2img'] + elif is_list_of(data['img_metas'][0], dict): + img_filename = data['img_metas'][0][batch_id]['filename'] + cam2img = data['img_metas'][0][batch_id]['cam2img'] + else: + ValueError( + f"Unsupported data type {type(data['img_metas'][0])} " + f'for visualization!') + img = imread(img_filename) + file_name = osp.split(img_filename)[-1].split('.')[0] + + assert out_dir is not None, 'Expect out_dir, got none.' + + pred_bboxes = result[batch_id]['img_bbox']['boxes_3d'] + assert isinstance(pred_bboxes, CameraInstance3DBoxes), \ + f'unsupported predicted bbox type {type(pred_bboxes)}' + + show_multi_modality_result( + img, + None, + pred_bboxes, + cam2img, + out_dir, + file_name, + 'camera', + show=True) diff --git a/mmcv/models/detectors/uniad_e2e.py b/mmcv/models/detectors/uniad_e2e.py new file mode 100644 index 0000000..1e0bfc8 --- /dev/null +++ b/mmcv/models/detectors/uniad_e2e.py @@ -0,0 +1,385 @@ +#---------------------------------------------------------------------------------# +# UniAD: Planning-oriented Autonomous Driving (https://arxiv.org/abs/2212.10156) # +# Source code: https://github.com/OpenDriveLab/UniAD # +# Copyright (c) OpenDriveLab. All rights reserved. # +#---------------------------------------------------------------------------------# + +import torch +from mmcv.utils import auto_fp16 +from mmcv.models import DETECTORS +import copy +import os +from ..dense_heads.seg_head_plugin import IOU +from .uniad_track import UniADTrack +from mmcv.models.builder import build_head + +@DETECTORS.register_module() +class UniAD(UniADTrack): + """ + UniAD: Unifying Detection, Tracking, Segmentation, Motion Forecasting, Occupancy Prediction and Planning for Autonomous Driving + """ + def __init__( + self, + seg_head=None, + motion_head=None, + occ_head=None, + planning_head=None, + task_loss_weight=dict( + track=1.0, + map=1.0, + motion=1.0, + occ=1.0, + planning=1.0 + ), + **kwargs, + ): + super(UniAD, self).__init__(**kwargs) + if seg_head: + self.seg_head = build_head(seg_head) + if occ_head: + self.occ_head = build_head(occ_head) + if motion_head: + self.motion_head = build_head(motion_head) + if planning_head: + self.planning_head = build_head(planning_head) + + self.task_loss_weight = task_loss_weight + assert set(task_loss_weight.keys()) == \ + {'track', 'occ', 'motion', 'map', 'planning'} + + @property + def with_planning_head(self): + return hasattr(self, 'planning_head') and self.planning_head is not None + + @property + def with_occ_head(self): + return hasattr(self, 'occ_head') and self.occ_head is not None + + @property + def with_motion_head(self): + return hasattr(self, 'motion_head') and self.motion_head is not None + + @property + def with_seg_head(self): + return hasattr(self, 'seg_head') and self.seg_head is not None + + def forward_dummy(self, img): + dummy_metas = None + return self.forward_test(img=img, img_metas=[[dummy_metas]]) + + def forward(self, inputs, return_loss=True, rescale=False): + """Calls either forward_train or forward_test depending on whether + return_loss=True. + Note this setting will change the expected inputs. When + `return_loss=True`, img and img_metas are single-nested (i.e. + torch.Tensor and list[dict]), and when `resturn_loss=False`, img and + img_metas should be double nested (i.e. list[torch.Tensor], + list[list[dict]]), with the outer list indicating test time + augmentations. + """ + if return_loss: + losses = self.forward_train(**inputs) + loss, log_vars = self._parse_losses(losses) + outputs = dict( + loss=loss, log_vars=log_vars, num_samples=len(inputs['img_metas'])) + return outputs + else: + outputs = self.forward_test(**inputs, rescale=rescale) + return outputs + + # Add the subtask loss to the whole model loss + @auto_fp16(apply_to=('img', 'points')) + def forward_train(self, + img=None, + img_metas=None, + gt_bboxes_3d=None, + gt_labels_3d=None, + gt_inds=None, + l2g_t=None, + l2g_r_mat=None, + timestamp=None, + gt_lane_labels=None, + gt_lane_bboxes=None, + gt_lane_masks=None, + gt_fut_traj=None, + gt_fut_traj_mask=None, + gt_past_traj=None, + gt_past_traj_mask=None, + gt_sdc_bbox=None, + gt_sdc_label=None, + gt_sdc_fut_traj=None, + gt_sdc_fut_traj_mask=None, + + # Occ_gt + gt_segmentation=None, + gt_instance=None, + gt_occ_img_is_valid=None, + + #planning + sdc_planning=None, + sdc_planning_mask=None, + command=None, + + # fut gt for planning + gt_future_boxes=None, + **kwargs, # [1, 9] + ): + """Forward training function for the model that includes multiple tasks, such as tracking, segmentation, motion prediction, occupancy prediction, and planning. + + Args: + img (torch.Tensor, optional): Tensor containing images of each sample with shape (N, C, H, W). Defaults to None. + img_metas (list[dict], optional): List of dictionaries containing meta information for each sample. Defaults to None. + gt_bboxes_3d (list[:obj:BaseInstance3DBoxes], optional): List of ground truth 3D bounding boxes for each sample. Defaults to None. + gt_labels_3d (list[torch.Tensor], optional): List of tensors containing ground truth labels for 3D bounding boxes. Defaults to None. + gt_inds (list[torch.Tensor], optional): List of tensors containing indices of ground truth objects. Defaults to None. + l2g_t (list[torch.Tensor], optional): List of tensors containing translation vectors from local to global coordinates. Defaults to None. + l2g_r_mat (list[torch.Tensor], optional): List of tensors containing rotation matrices from local to global coordinates. Defaults to None. + timestamp (list[float], optional): List of timestamps for each sample. Defaults to None. + gt_bboxes_ignore (list[torch.Tensor], optional): List of tensors containing ground truth 2D bounding boxes in images to be ignored. Defaults to None. + gt_lane_labels (list[torch.Tensor], optional): List of tensors containing ground truth lane labels. Defaults to None. + gt_lane_bboxes (list[torch.Tensor], optional): List of tensors containing ground truth lane bounding boxes. Defaults to None. + gt_lane_masks (list[torch.Tensor], optional): List of tensors containing ground truth lane masks. Defaults to None. + gt_fut_traj (list[torch.Tensor], optional): List of tensors containing ground truth future trajectories. Defaults to None. + gt_fut_traj_mask (list[torch.Tensor], optional): List of tensors containing ground truth future trajectory masks. Defaults to None. + gt_past_traj (list[torch.Tensor], optional): List of tensors containing ground truth past trajectories. Defaults to None. + gt_past_traj_mask (list[torch.Tensor], optional): List of tensors containing ground truth past trajectory masks. Defaults to None. + gt_sdc_bbox (list[torch.Tensor], optional): List of tensors containing ground truth self-driving car bounding boxes. Defaults to None. + gt_sdc_label (list[torch.Tensor], optional): List of tensors containing ground truth self-driving car labels. Defaults to None. + gt_sdc_fut_traj (list[torch.Tensor], optional): List of tensors containing ground truth self-driving car future trajectories. Defaults to None. + gt_sdc_fut_traj_mask (list[torch.Tensor], optional): List of tensors containing ground truth self-driving car future trajectory masks. Defaults to None. + gt_segmentation (list[torch.Tensor], optional): List of tensors containing ground truth segmentation masks. Defaults to + gt_instance (list[torch.Tensor], optional): List of tensors containing ground truth instance segmentation masks. Defaults to None. + gt_occ_img_is_valid (list[torch.Tensor], optional): List of tensors containing binary flags indicating whether an image is valid for occupancy prediction. Defaults to None. + sdc_planning (list[torch.Tensor], optional): List of tensors containing self-driving car planning information. Defaults to None. + sdc_planning_mask (list[torch.Tensor], optional): List of tensors containing self-driving car planning masks. Defaults to None. + command (list[torch.Tensor], optional): List of tensors containing high-level command information for planning. Defaults to None. + gt_future_boxes (list[torch.Tensor], optional): List of tensors containing ground truth future bounding boxes for planning. Defaults to None. + gt_future_labels (list[torch.Tensor], optional): List of tensors containing ground truth future labels for planning. Defaults to None. + + Returns: + dict: Dictionary containing losses of different tasks, such as tracking, segmentation, motion prediction, occupancy prediction, and planning. Each key in the dictionary + is prefixed with the corresponding task name, e.g., 'track', 'map', 'motion', 'occ', and 'planning'. The values are the calculated losses for each task. + """ + losses = dict() + len_queue = img.size(1) + + + losses_track, outs_track = self.forward_track_train(img, gt_bboxes_3d, gt_labels_3d, gt_past_traj, gt_past_traj_mask, gt_inds, gt_sdc_bbox, gt_sdc_label, + l2g_t, l2g_r_mat, img_metas, timestamp) + losses_track = self.loss_weighted_and_prefixed(losses_track, prefix='track') + losses.update(losses_track) + + # Upsample bev for tiny version + outs_track = self.upsample_bev_if_tiny(outs_track) + + bev_embed = outs_track["bev_embed"] + bev_pos = outs_track["bev_pos"] + + img_metas = [each[len_queue-1] for each in img_metas] + + outs_seg = dict() + if self.with_seg_head: + losses_seg, outs_seg = self.seg_head.forward_train(bev_embed, img_metas, + gt_lane_labels, gt_lane_bboxes, gt_lane_masks) + + losses_seg = self.loss_weighted_and_prefixed(losses_seg, prefix='map') + losses.update(losses_seg) + + outs_motion = dict() + # Forward Motion Head + if self.with_motion_head: + ret_dict_motion = self.motion_head.forward_train(bev_embed, + gt_bboxes_3d, gt_labels_3d, + gt_fut_traj, gt_fut_traj_mask, + gt_sdc_fut_traj, gt_sdc_fut_traj_mask, + outs_track=outs_track, outs_seg=outs_seg + ) + losses_motion = ret_dict_motion["losses"] + outs_motion = ret_dict_motion["outs_motion"] + outs_motion['bev_pos'] = bev_pos + losses_motion = self.loss_weighted_and_prefixed(losses_motion, prefix='motion') + losses.update(losses_motion) + + # Forward Occ Head + if self.with_occ_head: + if outs_motion['track_query'].shape[1] == 0: + # TODO: rm hard code + outs_motion['track_query'] = torch.zeros((1, 1, 256)).to(bev_embed) + outs_motion['track_query_pos'] = torch.zeros((1,1, 256)).to(bev_embed) + outs_motion['traj_query'] = torch.zeros((3, 1, 1, 6, 256)).to(bev_embed) + outs_motion['all_matched_idxes'] = [[-1]] + losses_occ = self.occ_head.forward_train( + bev_embed, + outs_motion, + gt_inds_list=gt_inds, + gt_segmentation=gt_segmentation, + gt_instance=gt_instance, + gt_img_is_valid=gt_occ_img_is_valid, + ) + losses_occ = self.loss_weighted_and_prefixed(losses_occ, prefix='occ') + losses.update(losses_occ) + + + # Forward Plan Head + if self.with_planning_head: + outs_planning = self.planning_head.forward_train(bev_embed, outs_motion, sdc_planning, sdc_planning_mask, command, gt_future_boxes) + losses_planning = outs_planning['losses'] + losses_planning = self.loss_weighted_and_prefixed(losses_planning, prefix='planning') + losses.update(losses_planning) + + for k,v in losses.items(): + losses[k] = torch.nan_to_num(v) + return losses + + def loss_weighted_and_prefixed(self, loss_dict, prefix=''): + loss_factor = self.task_loss_weight[prefix] + loss_dict = {f"{prefix}.{k}" : v*loss_factor for k, v in loss_dict.items()} + return loss_dict + + def forward_test(self, + img=None, + img_metas=None, + l2g_t=None, + l2g_r_mat=None, + timestamp=None, + gt_lane_labels=None, + gt_lane_masks=None, + rescale=False, + # planning gt(for evaluation only) + sdc_planning=None, + sdc_planning_mask=None, + command=None, + + # Occ_gt (for evaluation only) + gt_segmentation=None, + gt_instance=None, + gt_occ_img_is_valid=None, + **kwargs, + ): + """Test function + """ + for var, name in [(img_metas, 'img_metas')]: + if not isinstance(var, list): + raise TypeError('{} must be a list, but got {}'.format( + name, type(var))) + img = [img] if img is None else img + + if self.prev_frame_num > 0: + if len(self.prev_frame_infos) < self.prev_frame_num: + self.prev_frame_info = { + "prev_bev": None, + "scene_token": None, + "prev_pos": 0, + "prev_angle": 0, + } + else: + self.prev_frame_info = self.prev_frame_infos.pop(0) + + if img_metas[0][0]['scene_token'] != self.prev_frame_info['scene_token']: + # the first sample of each scene is truncated + self.prev_frame_info['prev_bev'] = None + # update idx + self.prev_frame_info['scene_token'] = img_metas[0][0]['scene_token'] + + # do not use temporal information + if not self.video_test_mode: + self.prev_frame_info['prev_bev'] = None + + # Get the delta of ego position and angle between two timestamps. + tmp_pos = copy.deepcopy(img_metas[0][0]['can_bus'][:3]) + tmp_angle = copy.deepcopy(img_metas[0][0]['can_bus'][-1]) + # first frame + if self.prev_frame_info['scene_token'] is None: + img_metas[0][0]['can_bus'][:3] = 0 + img_metas[0][0]['can_bus'][-1] = 0 + # following frames + else: + img_metas[0][0]['can_bus'][:3] -= self.prev_frame_info['prev_pos'] + img_metas[0][0]['can_bus'][-1] -= self.prev_frame_info['prev_angle'] + self.prev_frame_info['prev_pos'] = tmp_pos + self.prev_frame_info['prev_angle'] = tmp_angle + + + + img = img[0] + img_metas = img_metas[0] + timestamp = timestamp[0] if timestamp is not None else None + + result = [dict() for i in range(len(img_metas))] + result_track = self.simple_test_track(img, l2g_t, l2g_r_mat, img_metas, timestamp) + + # Upsample bev for tiny model + result_track[0] = self.upsample_bev_if_tiny(result_track[0]) + + bev_embed = result_track[0]["bev_embed"] + + if self.prev_frame_num > 0: + self.prev_frame_infos.append(self.prev_frame_info) + + + + if self.with_seg_head: + result_seg = self.seg_head.forward_test(bev_embed, gt_lane_labels, gt_lane_masks, img_metas, rescale) + + if self.with_motion_head: + result_motion, outs_motion = self.motion_head.forward_test(bev_embed, outs_track=result_track[0], outs_seg=result_seg[0]) + outs_motion['bev_pos'] = result_track[0]['bev_pos'] + + outs_occ = dict() + if self.with_occ_head: + occ_no_query = outs_motion['track_query'].shape[1] == 0 + outs_occ = self.occ_head.forward_test( + bev_embed, + outs_motion, + no_query = occ_no_query, + gt_segmentation=gt_segmentation, + gt_instance=gt_instance, + gt_img_is_valid=gt_occ_img_is_valid, + ) + result[0]['occ'] = outs_occ + + if self.with_planning_head: + planning_gt=dict( + segmentation=gt_segmentation, + sdc_planning=sdc_planning, + sdc_planning_mask=sdc_planning_mask, + command=command + ) + result_planning = self.planning_head.forward_test(bev_embed, outs_motion, outs_occ, command) + result[0]['planning'] = dict( + planning_gt=planning_gt, + result_planning=result_planning, + ) + + pop_track_list = ['prev_bev', 'bev_pos', 'bev_embed', 'track_query_embeddings', 'sdc_embedding'] + result_track[0] = pop_elem_in_result(result_track[0], pop_track_list) + + if self.with_seg_head: + result_seg[0] = pop_elem_in_result(result_seg[0], pop_list=['pts_bbox', 'args_tuple']) + if self.with_motion_head: + result_motion[0] = pop_elem_in_result(result_motion[0]) + if self.with_occ_head: + result[0]['occ'] = pop_elem_in_result(result[0]['occ'], \ + pop_list=['seg_out_mask', 'flow_out', 'future_states_occ', 'pred_ins_masks', 'pred_raw_occ', 'pred_ins_logits', 'pred_ins_sigmoid']) + + for i, res in enumerate(result): + #res['token'] = img_metas[i]['sample_idx'] + res.update(result_track[i]) + if self.with_motion_head: + res.update(result_motion[i]) + if self.with_seg_head: + res.update(result_seg[i]) + + return result + + +def pop_elem_in_result(task_result:dict, pop_list:list=None): + all_keys = list(task_result.keys()) + for k in all_keys: + if k.endswith('query') or k.endswith('query_pos') or k.endswith('embedding'): + task_result.pop(k) + + if pop_list is not None: + for pop_k in pop_list: + task_result.pop(pop_k, None) + return task_result diff --git a/mmcv/models/detectors/uniad_track.py b/mmcv/models/detectors/uniad_track.py new file mode 100644 index 0000000..1989974 --- /dev/null +++ b/mmcv/models/detectors/uniad_track.py @@ -0,0 +1,869 @@ +#---------------------------------------------------------------------------------# +# UniAD: Planning-oriented Autonomous Driving (https://arxiv.org/abs/2212.10156) # +# Source code: https://github.com/OpenDriveLab/UniAD # +# Copyright (c) OpenDriveLab. All rights reserved. # +#---------------------------------------------------------------------------------# + +import torch +import torch.nn as nn +from mmcv.utils import auto_fp16 +from mmcv.models import DETECTORS +from mmcv.core.bbox.coder import build_bbox_coder +from mmcv.models.detectors.mvx_two_stage import MVXTwoStageDetector +from mmcv.models.utils.grid_mask import GridMask +import copy +import math +from mmcv.core.bbox.util import normalize_bbox +from mmcv.models import build_loss +from einops import rearrange +from mmcv.models.utils.transformer import inverse_sigmoid +from ..dense_heads.track_head_plugin import MemoryBank, QueryInteractionModule, Instances, RuntimeTrackerBase + +@DETECTORS.register_module() +class UniADTrack(MVXTwoStageDetector): + """UniAD tracking part + """ + def __init__( + self, + use_grid_mask=False, + img_backbone=None, + img_neck=None, + pts_bbox_head=None, + train_cfg=None, + test_cfg=None, + pretrained=None, + video_test_mode=False, + loss_cfg=None, + prev_frame_num=0, + qim_args=dict( + qim_type="QIMBase", + merger_dropout=0, + update_query_pos=False, + fp_ratio=0.3, + random_drop=0.1, + ), + mem_args=dict( + memory_bank_type="MemoryBank", + memory_bank_score_thresh=0.0, + memory_bank_len=4, + ), + bbox_coder=dict( + type="DETRTrack3DCoder", + post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], + pc_range=[-51.2, -51.2, -5.0, 51.2, 51.2, 3.0], + max_num=300, + num_classes=10, + score_threshold=0.0, + with_nms=False, + iou_thres=0.3, + ), + pc_range=None, + embed_dims=256, + num_query=900, + num_classes=10, + vehicle_id_list=None, + score_thresh=0.2, + filter_score_thresh=0.1, + miss_tolerance=5, + gt_iou_threshold=0.0, + freeze_img_backbone=False, + freeze_img_neck=False, + freeze_bn=False, + freeze_bev_encoder=False, + queue_length=3, + ): + super(UniADTrack, self).__init__( + img_backbone=img_backbone, + img_neck=img_neck, + pts_bbox_head=pts_bbox_head, + train_cfg=train_cfg, + test_cfg=test_cfg, + pretrained=pretrained, + ) + + self.grid_mask = GridMask( + True, True, rotate=1, offset=False, ratio=0.5, mode=1, prob=0.7 + ) + self.use_grid_mask = use_grid_mask + self.fp16_enabled = False + self.embed_dims = embed_dims + self.num_query = num_query + self.num_classes = num_classes + self.vehicle_id_list = vehicle_id_list + self.pc_range = pc_range + self.queue_length = queue_length + if freeze_img_backbone: + if freeze_bn: + self.img_backbone.eval() + for param in self.img_backbone.parameters(): + param.requires_grad = False + + if freeze_img_neck: + if freeze_bn: + self.img_neck.eval() + for param in self.img_neck.parameters(): + param.requires_grad = False + + # temporal + self.video_test_mode = video_test_mode + assert self.video_test_mode + self.prev_frame_num = prev_frame_num + self.prev_frame_infos = [] + self.prev_frame_info = { + "prev_bev": None, + "scene_token": None, + "prev_pos": 0, + "prev_angle": 0, + } + self.query_embedding = nn.Embedding(self.num_query+1, self.embed_dims * 2) # the final one is ego query, which constantly models ego-vehicle + self.reference_points = nn.Linear(self.embed_dims, 3) + + self.mem_bank_len = mem_args["memory_bank_len"] + self.track_base = RuntimeTrackerBase( + score_thresh=score_thresh, + filter_score_thresh=filter_score_thresh, + miss_tolerance=miss_tolerance, + ) # hyper-param for removing inactive queries + + self.query_interact = QueryInteractionModule( + qim_args, + dim_in=embed_dims, + hidden_dim=embed_dims, + dim_out=embed_dims, + ) + + self.bbox_coder = build_bbox_coder(bbox_coder) + + self.memory_bank = MemoryBank( + mem_args, + dim_in=embed_dims, + hidden_dim=embed_dims, + dim_out=embed_dims, + ) + self.mem_bank_len = ( + 0 if self.memory_bank is None else self.memory_bank.max_his_length + ) + self.criterion = build_loss(loss_cfg) + self.test_track_instances = None + self.l2g_r_mat = None + self.l2g_t = None + self.gt_iou_threshold = gt_iou_threshold + self.bev_h, self.bev_w = self.pts_bbox_head.bev_h, self.pts_bbox_head.bev_w + self.freeze_bev_encoder = freeze_bev_encoder + + def extract_img_feat(self, img, len_queue=None): + """Extract features of images.""" + if img is None: + return None + assert img.dim() == 5 + B, N, C, H, W = img.size() + img = img.reshape(B * N, C, H, W) + if self.use_grid_mask: + img = self.grid_mask(img) + img_feats = self.img_backbone(img) + if isinstance(img_feats, dict): + img_feats = list(img_feats.values()) + if self.with_img_neck: + img_feats = self.img_neck(img_feats) + + img_feats_reshaped = [] + for img_feat in img_feats: + _, c, h, w = img_feat.size() + if len_queue is not None: + img_feat_reshaped = img_feat.view(B//len_queue, len_queue, N, c, h, w) + else: + img_feat_reshaped = img_feat.view(B, N, c, h, w) + img_feats_reshaped.append(img_feat_reshaped) + return img_feats_reshaped + + def _generate_empty_tracks(self): + track_instances = Instances((1, 1)) + num_queries, dim = self.query_embedding.weight.shape # (300, 256 * 2) + device = self.query_embedding.weight.device + query = self.query_embedding.weight + track_instances.ref_pts = self.reference_points(query[..., : dim // 2]) + + # init boxes: xy, wl, z, h, sin, cos, vx, vy, vz + pred_boxes_init = torch.zeros( + (len(track_instances), 10), dtype=torch.float, device=device + ) + track_instances.query = query + + track_instances.output_embedding = torch.zeros( + (num_queries, dim >> 1), device=device + ) + + track_instances.obj_idxes = torch.full( + (len(track_instances),), -1, dtype=torch.long, device=device + ) + track_instances.matched_gt_idxes = torch.full( + (len(track_instances),), -1, dtype=torch.long, device=device + ) + track_instances.disappear_time = torch.zeros( + (len(track_instances),), dtype=torch.long, device=device + ) + + track_instances.iou = torch.zeros( + (len(track_instances),), dtype=torch.float, device=device + ) + track_instances.scores = torch.zeros( + (len(track_instances),), dtype=torch.float, device=device + ) + track_instances.track_scores = torch.zeros( + (len(track_instances),), dtype=torch.float, device=device + ) + # xy, wl, z, h, sin, cos, vx, vy, vz + track_instances.pred_boxes = pred_boxes_init + + track_instances.pred_logits = torch.zeros( + (len(track_instances), self.num_classes), dtype=torch.float, device=device + ) + + mem_bank_len = self.mem_bank_len + track_instances.mem_bank = torch.zeros( + (len(track_instances), mem_bank_len, dim // 2), + dtype=torch.float32, + device=device, + ) + track_instances.mem_padding_mask = torch.ones( + (len(track_instances), mem_bank_len), dtype=torch.bool, device=device + ) + track_instances.save_period = torch.zeros( + (len(track_instances),), dtype=torch.float32, device=device + ) + + return track_instances.to(self.query_embedding.weight.device) + + def velo_update( + self, ref_pts, velocity, l2g_r1, l2g_t1, l2g_r2, l2g_t2, time_delta + ): + """ + Args: + ref_pts (Tensor): (num_query, 3). in inevrse sigmoid space + velocity (Tensor): (num_query, 2). m/s + in lidar frame. vx, vy + global2lidar (np.Array) [4,4]. + Outs: + ref_pts (Tensor): (num_query, 3). in inevrse sigmoid space + """ + # print(l2g_r1.type(), l2g_t1.type(), ref_pts.type()) + + if isinstance(l2g_r1,list): + l2g_r1 = l2g_r1[0] + if isinstance(l2g_t1,list): + l2g_t1 = l2g_t1[0] + if isinstance(l2g_r2,list): + l2g_r2 = l2g_r2[0] + if isinstance(l2g_t2,list): + l2g_t2 = l2g_t2[0] + + l2g_r1 = l2g_r1.type(torch.float) + l2g_t1 = l2g_t1.type(torch.float) + l2g_t2 = l2g_t2.type(torch.float) + time_delta = time_delta.type(torch.float) + + num_query = ref_pts.size(0) + velo_pad_ = velocity.new_zeros((num_query, 1)) + velo_pad = torch.cat((velocity, velo_pad_), dim=-1) + + reference_points = ref_pts.sigmoid().clone() + pc_range = self.pc_range + reference_points[..., 0:1] = ( + reference_points[..., 0:1] * (pc_range[3] - pc_range[0]) + pc_range[0] + ) + reference_points[..., 1:2] = ( + reference_points[..., 1:2] * (pc_range[4] - pc_range[1]) + pc_range[1] + ) + reference_points[..., 2:3] = ( + reference_points[..., 2:3] * (pc_range[5] - pc_range[2]) + pc_range[2] + ) + + reference_points = reference_points + velo_pad * time_delta + + ref_pts = reference_points @ l2g_r1 + l2g_t1 - l2g_t2 + + g2l_r = torch.linalg.inv(l2g_r2).type(torch.float) + + ref_pts = ref_pts @ g2l_r + + ref_pts[..., 0:1] = (ref_pts[..., 0:1] - pc_range[0]) / ( + pc_range[3] - pc_range[0] + ) + ref_pts[..., 1:2] = (ref_pts[..., 1:2] - pc_range[1]) / ( + pc_range[4] - pc_range[1] + ) + ref_pts[..., 2:3] = (ref_pts[..., 2:3] - pc_range[2]) / ( + pc_range[5] - pc_range[2] + ) + + ref_pts = inverse_sigmoid(ref_pts) + + return ref_pts + + def _copy_tracks_for_loss(self, tgt_instances): + device = self.query_embedding.weight.device + track_instances = Instances((1, 1)) + + track_instances.obj_idxes = copy.deepcopy(tgt_instances.obj_idxes) + + track_instances.matched_gt_idxes = copy.deepcopy(tgt_instances.matched_gt_idxes) + track_instances.disappear_time = copy.deepcopy(tgt_instances.disappear_time) + + track_instances.scores = torch.zeros( + (len(track_instances),), dtype=torch.float, device=device + ) + track_instances.track_scores = torch.zeros( + (len(track_instances),), dtype=torch.float, device=device + ) + track_instances.pred_boxes = torch.zeros( + (len(track_instances), 10), dtype=torch.float, device=device + ) + track_instances.iou = torch.zeros( + (len(track_instances),), dtype=torch.float, device=device + ) + track_instances.pred_logits = torch.zeros( + (len(track_instances), self.num_classes), dtype=torch.float, device=device + ) + + track_instances.save_period = copy.deepcopy(tgt_instances.save_period) + return track_instances.to(device) + + def get_history_bev(self, imgs_queue, img_metas_list): + """ + Get history BEV features iteratively. To save GPU memory, gradients are not calculated. + """ + self.eval() + with torch.no_grad(): + prev_bev = None + bs, len_queue, num_cams, C, H, W = imgs_queue.shape + imgs_queue = imgs_queue.reshape(bs * len_queue, num_cams, C, H, W) + img_feats_list = self.extract_img_feat(img=imgs_queue, len_queue=len_queue) + for i in range(len_queue): + img_metas = [each[i] for each in img_metas_list] + img_feats = [each_scale[:, i] for each_scale in img_feats_list] + prev_bev, _ = self.pts_bbox_head.get_bev_features( + mlvl_feats=img_feats, + img_metas=img_metas, + prev_bev=prev_bev) + self.train() + return prev_bev + + # Generate bev using bev_encoder in BEVFormer + def get_bevs(self, imgs, img_metas, prev_img=None, prev_img_metas=None, prev_bev=None): + if prev_img is not None and prev_img_metas is not None: + assert prev_bev is None + prev_bev = self.get_history_bev(prev_img, prev_img_metas) + + img_feats = self.extract_img_feat(img=imgs) + if self.freeze_bev_encoder: + with torch.no_grad(): + bev_embed, bev_pos = self.pts_bbox_head.get_bev_features( + mlvl_feats=img_feats, img_metas=img_metas, prev_bev=prev_bev) + else: + bev_embed, bev_pos = self.pts_bbox_head.get_bev_features( + mlvl_feats=img_feats, img_metas=img_metas, prev_bev=prev_bev) + + if bev_embed.shape[1] == self.bev_h * self.bev_w: + bev_embed = bev_embed.permute(1, 0, 2) + + assert bev_embed.shape[0] == self.bev_h * self.bev_w + return bev_embed, bev_pos + + @auto_fp16(apply_to=("img", "prev_bev")) + def _forward_single_frame_train( + self, + img, + img_metas, + track_instances, + prev_img, + prev_img_metas, + l2g_r1=None, + l2g_t1=None, + l2g_r2=None, + l2g_t2=None, + time_delta=None, + all_query_embeddings=None, + all_matched_indices=None, + all_instances_pred_logits=None, + all_instances_pred_boxes=None, + ): + """ + Perform forward only on one frame. Called in forward_train + Warnning: Only Support BS=1 + Args: + img: shape [B, num_cam, 3, H, W] + if l2g_r2 is None or l2g_t2 is None: + it means this frame is the end of the training clip, + so no need to call velocity update + """ + # NOTE: You can replace BEVFormer with other BEV encoder and provide bev_embed here + bev_embed, bev_pos = self.get_bevs( + img, img_metas, + prev_img=prev_img, prev_img_metas=prev_img_metas, + ) + + det_output = self.pts_bbox_head.get_detections( + bev_embed, + object_query_embeds=track_instances.query, + ref_points=track_instances.ref_pts, + img_metas=img_metas, + ) + + output_classes = det_output["all_cls_scores"] + output_coords = det_output["all_bbox_preds"] + output_past_trajs = det_output["all_past_traj_preds"] + last_ref_pts = det_output["last_ref_points"] + query_feats = det_output["query_feats"] + + out = { + "pred_logits": output_classes[-1], + "pred_boxes": output_coords[-1], + "pred_past_trajs": output_past_trajs[-1], + "ref_pts": last_ref_pts, + "bev_embed": bev_embed, + "bev_pos": bev_pos + } + with torch.no_grad(): + track_scores = output_classes[-1, 0, :].sigmoid().max(dim=-1).values + + # Step-1 Update track instances with current prediction + # [nb_dec, bs, num_query, xxx] + nb_dec = output_classes.size(0) + + # the track id will be assigned by the matcher. + track_instances_list = [ + self._copy_tracks_for_loss(track_instances) for i in range(nb_dec - 1) + ] + track_instances.output_embedding = query_feats[-1][0] # [300, feat_dim] + velo = output_coords[-1, 0, :, -2:] # [num_query, 3] + if l2g_r2 is not None: + # Update ref_pts for next frame considering each agent's velocity + ref_pts = self.velo_update( + last_ref_pts[0], + velo, + l2g_r1, + l2g_t1, + l2g_r2, + l2g_t2, + time_delta=time_delta, + ) + else: + ref_pts = last_ref_pts[0] + + dim = track_instances.query.shape[-1] + track_instances.ref_pts = self.reference_points(track_instances.query[..., :dim//2]) + track_instances.ref_pts[...,:2] = ref_pts[...,:2] + + track_instances_list.append(track_instances) + + for i in range(nb_dec): + track_instances = track_instances_list[i] + + track_instances.scores = track_scores + track_instances.pred_logits = output_classes[i, 0] # [300, num_cls] + track_instances.pred_boxes = output_coords[i, 0] # [300, box_dim] + track_instances.pred_past_trajs = output_past_trajs[i, 0] # [300,past_steps, 2] + + out["track_instances"] = track_instances + track_instances, matched_indices = self.criterion.match_for_single_frame( + out, i, if_step=(i == (nb_dec - 1)) + ) + all_query_embeddings.append(query_feats[i][0]) + all_matched_indices.append(matched_indices) + all_instances_pred_logits.append(output_classes[i, 0]) + all_instances_pred_boxes.append(output_coords[i, 0]) # Not used + + active_index = (track_instances.obj_idxes>=0) & (track_instances.iou >= self.gt_iou_threshold) & (track_instances.matched_gt_idxes >=0) + out.update(self.select_active_track_query(track_instances, active_index, img_metas)) + out.update(self.select_sdc_track_query(track_instances[900], img_metas)) + + # memory bank + if self.memory_bank is not None: + track_instances = self.memory_bank(track_instances) + # Step-2 Update track instances using matcher + + tmp = {} + tmp["init_track_instances"] = self._generate_empty_tracks() + tmp["track_instances"] = track_instances + out_track_instances = self.query_interact(tmp) + out["track_instances"] = out_track_instances + return out + + def select_active_track_query(self, track_instances, active_index, img_metas, with_mask=True): + result_dict = self._track_instances2results(track_instances[active_index], img_metas, with_mask=with_mask) + result_dict["track_query_embeddings"] = track_instances.output_embedding[active_index][result_dict['bbox_index']][result_dict['mask']] + result_dict["track_query_matched_idxes"] = track_instances.matched_gt_idxes[active_index][result_dict['bbox_index']][result_dict['mask']] + return result_dict + + def select_sdc_track_query(self, sdc_instance, img_metas): + out = dict() + result_dict = self._track_instances2results(sdc_instance, img_metas, with_mask=False) + out["sdc_boxes_3d"] = result_dict['boxes_3d'] + out["sdc_scores_3d"] = result_dict['scores_3d'] + out["sdc_track_scores"] = result_dict['track_scores'] + out["sdc_track_bbox_results"] = result_dict['track_bbox_results'] + out["sdc_embedding"] = sdc_instance.output_embedding[0] + return out + + @auto_fp16(apply_to=("img", "points")) + def forward_track_train(self, + img, + gt_bboxes_3d, + gt_labels_3d, + gt_past_traj, + gt_past_traj_mask, + gt_inds, + gt_sdc_bbox, + gt_sdc_label, + l2g_t, + l2g_r_mat, + img_metas, + timestamp): + """Forward funciton + Args: + Returns: + """ + track_instances = self._generate_empty_tracks() + num_frame = img.size(1) + # init gt instances! + gt_instances_list = [] + + for i in range(num_frame): + gt_instances = Instances((1, 1)) + boxes = gt_bboxes_3d[0][i].tensor.to(img.device) + # normalize gt bboxes here! + boxes = normalize_bbox(boxes, self.pc_range) + sd_boxes = gt_sdc_bbox[0][i].tensor.to(img.device) + sd_boxes = normalize_bbox(sd_boxes, self.pc_range) + gt_instances.boxes = boxes + gt_instances.labels = gt_labels_3d[0][i] + gt_instances.obj_ids = gt_inds[0][i] + gt_instances.past_traj = gt_past_traj[0][i].float() + gt_instances.past_traj_mask = gt_past_traj_mask[0][i].float() + gt_instances.sdc_boxes = torch.cat([sd_boxes for _ in range(boxes.shape[0])], dim=0) # boxes.shape[0] sometimes 0 + gt_instances.sdc_labels = torch.cat([gt_sdc_label[0][i] for _ in range(gt_labels_3d[0][i].shape[0])], dim=0) + gt_instances_list.append(gt_instances) + + self.criterion.initialize_for_single_clip(gt_instances_list) + + out = dict() + + for i in range(num_frame): + prev_img = img[:, :i, ...] if i != 0 else img[:, :1, ...] + prev_img_metas = copy.deepcopy(img_metas) + # TODO: Generate prev_bev in an RNN way. + + img_single = torch.stack([img_[i] for img_ in img], dim=0) + img_metas_single = [copy.deepcopy(img_metas[0][i])] + if i == num_frame - 1: + l2g_r2 = None + l2g_t2 = None + time_delta = None + else: + l2g_r2 = l2g_r_mat[0][i + 1] + l2g_t2 = l2g_t[0][i + 1] + time_delta = timestamp[0][i + 1] - timestamp[0][i] + all_query_embeddings = [] + all_matched_idxes = [] + all_instances_pred_logits = [] + all_instances_pred_boxes = [] + frame_res = self._forward_single_frame_train( + img_single, + img_metas_single, + track_instances, + prev_img, + prev_img_metas, + l2g_r_mat[0][i], + l2g_t[0][i], + l2g_r2, + l2g_t2, + time_delta, + all_query_embeddings, + all_matched_idxes, + all_instances_pred_logits, + all_instances_pred_boxes, + ) + # all_query_embeddings: len=dec nums, N*256 + # all_matched_idxes: len=dec nums, N*2 + track_instances = frame_res["track_instances"] + + get_keys = ["bev_embed", "bev_pos", + "track_query_embeddings", "track_query_matched_idxes", "track_bbox_results", + "sdc_boxes_3d", "sdc_scores_3d", "sdc_track_scores", "sdc_track_bbox_results", "sdc_embedding"] + out.update({k: frame_res[k] for k in get_keys}) + + losses = self.criterion.losses_dict + return losses, out + + def upsample_bev_if_tiny(self, outs_track): + if outs_track["bev_embed"].size(0) == 100 * 100: + # For tiny model + # bev_emb + bev_embed = outs_track["bev_embed"] # [10000, 1, 256] + dim, _, _ = bev_embed.size() + w = h = int(math.sqrt(dim)) + assert h == w == 100 + + bev_embed = rearrange(bev_embed, '(h w) b c -> b c h w', h=h, w=w) # [1, 256, 100, 100] + bev_embed = nn.Upsample(scale_factor=2)(bev_embed) # [1, 256, 200, 200] + bev_embed = rearrange(bev_embed, 'b c h w -> (h w) b c') + outs_track["bev_embed"] = bev_embed + + # prev_bev + prev_bev = outs_track.get("prev_bev", None) + if prev_bev is not None: + if self.training: + # [1, 10000, 256] + prev_bev = rearrange(prev_bev, 'b (h w) c -> b c h w', h=h, w=w) + prev_bev = nn.Upsample(scale_factor=2)(prev_bev) # [1, 256, 200, 200] + prev_bev = rearrange(prev_bev, 'b c h w -> b (h w) c') + outs_track["prev_bev"] = prev_bev + else: + # [10000, 1, 256] + prev_bev = rearrange(prev_bev, '(h w) b c -> b c h w', h=h, w=w) + prev_bev = nn.Upsample(scale_factor=2)(prev_bev) # [1, 256, 200, 200] + prev_bev = rearrange(prev_bev, 'b c h w -> (h w) b c') + outs_track["prev_bev"] = prev_bev + + # bev_pos + bev_pos = outs_track["bev_pos"] # [1, 256, 100, 100] + bev_pos = nn.Upsample(scale_factor=2)(bev_pos) # [1, 256, 200, 200] + outs_track["bev_pos"] = bev_pos + return outs_track + + + def _forward_single_frame_inference( + self, + img, + img_metas, + track_instances, + prev_bev=None, + l2g_r1=None, + l2g_t1=None, + l2g_r2=None, + l2g_t2=None, + time_delta=None, + ): + """ + img: B, num_cam, C, H, W = img.shape + """ + + """ velo update """ + active_inst = track_instances[track_instances.obj_idxes >= 0] + other_inst = track_instances[track_instances.obj_idxes < 0] + + if l2g_r2 is not None and len(active_inst) > 0 and l2g_r1 is not None: + ref_pts = active_inst.ref_pts + velo = active_inst.pred_boxes[:, -2:] + ref_pts = self.velo_update( + ref_pts, velo, l2g_r1, l2g_t1, l2g_r2, l2g_t2, time_delta=time_delta + ) + ref_pts = ref_pts.squeeze(0) + dim = active_inst.query.shape[-1] + active_inst.ref_pts = self.reference_points(active_inst.query[..., :dim//2]) + active_inst.ref_pts[...,:2] = ref_pts[...,:2] + + track_instances = Instances.cat([other_inst, active_inst]) + + # NOTE: You can replace BEVFormer with other BEV encoder and provide bev_embed here + bev_embed, bev_pos = self.get_bevs(img, img_metas, prev_bev=prev_bev) + det_output = self.pts_bbox_head.get_detections( + bev_embed, + object_query_embeds=track_instances.query, + ref_points=track_instances.ref_pts, + img_metas=img_metas, + ) + output_classes = det_output["all_cls_scores"] + output_coords = det_output["all_bbox_preds"] + last_ref_pts = det_output["last_ref_points"] + query_feats = det_output["query_feats"] + + out = { + "pred_logits": output_classes, + "pred_boxes": output_coords, + "ref_pts": last_ref_pts, + "bev_embed": bev_embed, + "query_embeddings": query_feats, + "all_past_traj_preds": det_output["all_past_traj_preds"], + "bev_pos": bev_pos, + } + + """ update track instances with predict results """ + track_scores = output_classes[-1, 0, :].sigmoid().max(dim=-1).values + # each track will be assigned an unique global id by the track base. + track_instances.scores = track_scores + # track_instances.track_scores = track_scores # [300] + track_instances.pred_logits = output_classes[-1, 0] # [300, num_cls] + track_instances.pred_boxes = output_coords[-1, 0] # [300, box_dim] + track_instances.output_embedding = query_feats[-1][0] # [300, feat_dim] + track_instances.ref_pts = last_ref_pts[0] + # hard_code: assume the 901 query is sdc query + track_instances.obj_idxes[900] = -2 + """ update track base """ + self.track_base.update(track_instances, None) + + active_index = (track_instances.obj_idxes>=0) & (track_instances.scores >= self.track_base.filter_score_thresh) # filter out sleep objects + out.update(self.select_active_track_query(track_instances, active_index, img_metas)) + out.update(self.select_sdc_track_query(track_instances[track_instances.obj_idxes==-2], img_metas)) + + """ update with memory_bank """ + if self.memory_bank is not None: + track_instances = self.memory_bank(track_instances) + + """ Update track instances using matcher """ + tmp = {} + tmp["init_track_instances"] = self._generate_empty_tracks() + tmp["track_instances"] = track_instances + out_track_instances = self.query_interact(tmp) + out["track_instances_fordet"] = track_instances + out["track_instances"] = out_track_instances + out["track_obj_idxes"] = track_instances.obj_idxes + return out + + def simple_test_track( + self, + img=None, + l2g_t=None, + l2g_r_mat=None, + img_metas=None, + timestamp=None, + ): + """only support bs=1 and sequential input""" + + bs = img.size(0) + # img_metas = img_metas[0] + + """ init track instances for first frame """ + if ( + self.test_track_instances is None + or img_metas[0]["scene_token"] != self.scene_token + ): + self.timestamp = timestamp + self.scene_token = img_metas[0]["scene_token"] + self.prev_bev = None + track_instances = self._generate_empty_tracks() + time_delta, l2g_r1, l2g_t1, l2g_r2, l2g_t2 = None, None, None, None, None + + else: + track_instances = self.test_track_instances + time_delta = timestamp - self.timestamp + l2g_r1 = self.l2g_r_mat + l2g_t1 = self.l2g_t + l2g_r2 = l2g_r_mat + l2g_t2 = l2g_t + self.prev_bev = self.prev_frame_info['prev_bev'] + + """ get time_delta and l2g r/t infos """ + """ update frame info for next frame""" + self.timestamp = timestamp + self.l2g_t = l2g_t + self.l2g_r_mat = l2g_r_mat + + """ predict and update """ + + prev_bev = self.prev_bev + frame_res = self._forward_single_frame_inference( + img, + img_metas, + track_instances, + prev_bev, + l2g_r1, + l2g_t1, + l2g_r2, + l2g_t2, + time_delta, + ) + + self.prev_bev = frame_res["bev_embed"] + self.prev_frame_info['prev_bev'] = self.prev_bev + track_instances = frame_res["track_instances"] + track_instances_fordet = frame_res["track_instances_fordet"] + + self.test_track_instances = track_instances + results = [dict()] + get_keys = ["bev_embed", "bev_pos", + "track_query_embeddings", "track_bbox_results", + "boxes_3d", "scores_3d", "labels_3d", "track_scores", "track_ids"] + if self.with_motion_head: + get_keys += ["sdc_boxes_3d", "sdc_scores_3d", "sdc_track_scores", "sdc_track_bbox_results", "sdc_embedding"] + results[0].update({k: frame_res[k] for k in get_keys}) + results = self._det_instances2results(track_instances_fordet, results, img_metas) + return results + + def _track_instances2results(self, track_instances, img_metas, with_mask=True): + bbox_dict = dict( + cls_scores=track_instances.pred_logits, + bbox_preds=track_instances.pred_boxes, + track_scores=track_instances.scores, + obj_idxes=track_instances.obj_idxes, + ) + # bboxes_dict = self.bbox_coder.decode(bbox_dict, with_mask=with_mask)[0] + bboxes_dict = self.bbox_coder.decode(bbox_dict, with_mask=with_mask, img_metas=img_metas)[0] + bboxes = bboxes_dict["bboxes"] + # bboxes[:, 2] = bboxes[:, 2] - bboxes[:, 5] * 0.5 + bboxes = img_metas[0]["box_type_3d"](bboxes, 9) + labels = bboxes_dict["labels"] + scores = bboxes_dict["scores"] + bbox_index = bboxes_dict["bbox_index"] + + track_scores = bboxes_dict["track_scores"] + obj_idxes = bboxes_dict["obj_idxes"] + result_dict = dict( + boxes_3d=bboxes.to("cpu"), + scores_3d=scores.cpu(), + labels_3d=labels.cpu(), + track_scores=track_scores.cpu(), + bbox_index=bbox_index.cpu(), + track_ids=obj_idxes.cpu(), + mask=bboxes_dict["mask"].cpu(), + track_bbox_results=[[bboxes.to("cpu"), scores.cpu(), labels.cpu(), bbox_index.cpu(), bboxes_dict["mask"].cpu()]] + ) + return result_dict + + def _det_instances2results(self, instances, results, img_metas): + """ + Outs: + active_instances. keys: + - 'pred_logits': + - 'pred_boxes': normalized bboxes + - 'scores' + - 'obj_idxes' + out_dict. keys: + - boxes_3d (torch.Tensor): 3D boxes. + - scores (torch.Tensor): Prediction scores. + - labels_3d (torch.Tensor): Box labels. + - attrs_3d (torch.Tensor, optional): Box attributes. + - track_ids + - tracking_score + """ + # filter out sleep querys + if instances.pred_logits.numel() == 0: + return [None] + bbox_dict = dict( + cls_scores=instances.pred_logits, + bbox_preds=instances.pred_boxes, + track_scores=instances.scores, + obj_idxes=instances.obj_idxes, + ) + bboxes_dict = self.bbox_coder.decode(bbox_dict, img_metas=img_metas)[0] + bboxes = bboxes_dict["bboxes"] + # import pdb;pdb.set_trace() + bboxes = img_metas[0]["box_type_3d"](bboxes, 9) + labels = bboxes_dict["labels"] + scores = bboxes_dict["scores"] + + track_scores = bboxes_dict["track_scores"] + obj_idxes = bboxes_dict["obj_idxes"] + result_dict = results[0] + result_dict_det = dict( + boxes_3d_det=bboxes.to("cpu"), + scores_3d_det=scores.cpu(), + labels_3d_det=labels.cpu(), + ) + if result_dict is not None: + result_dict.update(result_dict_det) + else: + result_dict = None + + return [result_dict] + diff --git a/mmcv/models/losses/__init__.py b/mmcv/models/losses/__init__.py new file mode 100644 index 0000000..cb85d01 --- /dev/null +++ b/mmcv/models/losses/__init__.py @@ -0,0 +1,20 @@ +from .focal_loss import FocalLoss, sigmoid_focal_loss +from .iou_loss import (BoundedIoULoss, CIoULoss, DIoULoss, GIoULoss, IoULoss, + bounded_iou_loss, iou_loss) +from .smooth_l1_loss import L1Loss, SmoothL1Loss, l1_loss, smooth_l1_loss + + +# __all__ = [ +# 'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy', +# 'mask_cross_entropy', 'CrossEntropyLoss', 'sigmoid_focal_loss', +# 'FocalLoss', 'smooth_l1_loss', 'SmoothL1Loss', 'balanced_l1_loss', +# 'BalancedL1Loss', 'mse_loss', 'MSELoss', 'iou_loss', 'bounded_iou_loss', +# 'IoULoss', 'BoundedIoULoss', 'GIoULoss', 'DIoULoss', 'CIoULoss', 'GHMC', +# 'GHMR', 'reduce_loss', 'weight_reduce_loss', 'weighted_loss', 'L1Loss', +# 'l1_loss', 'isr_p', 'carl_loss', 'AssociativeEmbeddingLoss', +# 'GaussianFocalLoss', 'QualityFocalLoss', 'DistributionFocalLoss', +# 'VarifocalLoss', 'KnowledgeDistillationKLDivLoss', 'SeesawLoss', +# 'ChamferDistance', 'chamfer_distance', 'axis_aligned_iou_loss', +# 'AxisAlignedIoULoss', 'PAConvRegularizationLoss', +# 'LovaszLoss' +# ] diff --git a/mmcv/models/losses/focal_loss.py b/mmcv/models/losses/focal_loss.py new file mode 100644 index 0000000..1212e42 --- /dev/null +++ b/mmcv/models/losses/focal_loss.py @@ -0,0 +1,181 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.ops.focal_loss import sigmoid_focal_loss as _sigmoid_focal_loss + +from ..builder import LOSSES +from .utils import weight_reduce_loss + + +# This method is only for debugging +def py_sigmoid_focal_loss(pred, + target, + weight=None, + gamma=2.0, + alpha=0.25, + reduction='mean', + avg_factor=None): + """PyTorch version of `Focal Loss `_. + + Args: + pred (torch.Tensor): The prediction with shape (N, C), C is the + number of classes + target (torch.Tensor): The learning label of the prediction. + weight (torch.Tensor, optional): Sample-wise loss weight. + gamma (float, optional): The gamma for calculating the modulating + factor. Defaults to 2.0. + alpha (float, optional): A balanced form for Focal Loss. + Defaults to 0.25. + reduction (str, optional): The method used to reduce the loss into + a scalar. Defaults to 'mean'. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + """ + pred_sigmoid = pred.sigmoid() + target = target.type_as(pred) + pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target) + focal_weight = (alpha * target + (1 - alpha) * + (1 - target)) * pt.pow(gamma) + loss = F.binary_cross_entropy_with_logits( + pred, target, reduction='none') * focal_weight + if weight is not None: + if weight.shape != loss.shape: + if weight.size(0) == loss.size(0): + # For most cases, weight is of shape (num_priors, ), + # which means it does not have the second axis num_class + weight = weight.view(-1, 1) + else: + # Sometimes, weight per anchor per class is also needed. e.g. + # in FSAF. But it may be flattened of shape + # (num_priors x num_class, ), while loss is still of shape + # (num_priors, num_class). + assert weight.numel() == loss.numel() + weight = weight.view(loss.size(0), -1) + assert weight.ndim == loss.ndim + loss = weight_reduce_loss(loss, weight, reduction, avg_factor) + return loss + + +def sigmoid_focal_loss(pred, + target, + weight=None, + gamma=2.0, + alpha=0.25, + reduction='mean', + avg_factor=None): + r"""A warpper of cuda version `Focal Loss + `_. + + Args: + pred (torch.Tensor): The prediction with shape (N, C), C is the number + of classes. + target (torch.Tensor): The learning label of the prediction. + weight (torch.Tensor, optional): Sample-wise loss weight. + gamma (float, optional): The gamma for calculating the modulating + factor. Defaults to 2.0. + alpha (float, optional): A balanced form for Focal Loss. + Defaults to 0.25. + reduction (str, optional): The method used to reduce the loss into + a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum". + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + """ + # Function.apply does not accept keyword arguments, so the decorator + # "weighted_loss" is not applicable + loss = _sigmoid_focal_loss(pred.contiguous(), target, gamma, alpha, None, + 'none') + if weight is not None: + if weight.shape != loss.shape: + if weight.size(0) == loss.size(0): + # For most cases, weight is of shape (num_priors, ), + # which means it does not have the second axis num_class + weight = weight.view(-1, 1) + else: + # Sometimes, weight per anchor per class is also needed. e.g. + # in FSAF. But it may be flattened of shape + # (num_priors x num_class, ), while loss is still of shape + # (num_priors, num_class). + assert weight.numel() == loss.numel() + weight = weight.view(loss.size(0), -1) + assert weight.ndim == loss.ndim + loss = weight_reduce_loss(loss, weight, reduction, avg_factor) + return loss + + +@LOSSES.register_module() +class FocalLoss(nn.Module): + + def __init__(self, + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + reduction='mean', + loss_weight=1.0): + """`Focal Loss `_ + + Args: + use_sigmoid (bool, optional): Whether to the prediction is + used for sigmoid or softmax. Defaults to True. + gamma (float, optional): The gamma for calculating the modulating + factor. Defaults to 2.0. + alpha (float, optional): A balanced form for Focal Loss. + Defaults to 0.25. + reduction (str, optional): The method used to reduce the loss into + a scalar. Defaults to 'mean'. Options are "none", "mean" and + "sum". + loss_weight (float, optional): Weight of loss. Defaults to 1.0. + """ + super(FocalLoss, self).__init__() + assert use_sigmoid is True, 'Only sigmoid focal loss supported now.' + self.use_sigmoid = use_sigmoid + self.gamma = gamma + self.alpha = alpha + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None): + """Forward function. + + Args: + pred (torch.Tensor): The prediction. + target (torch.Tensor): The learning label of the prediction. + weight (torch.Tensor, optional): The weight of loss for each + prediction. Defaults to None. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + reduction_override (str, optional): The reduction method used to + override the original reduction method of the loss. + Options are "none", "mean" and "sum". + + Returns: + torch.Tensor: The calculated loss + """ + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + if self.use_sigmoid: + if torch.cuda.is_available() and pred.is_cuda: + calculate_loss_func = sigmoid_focal_loss + else: + num_classes = pred.size(1) + target = F.one_hot(target, num_classes=num_classes + 1) + target = target[:, :num_classes] + calculate_loss_func = py_sigmoid_focal_loss + + loss_cls = self.loss_weight * calculate_loss_func( + pred, + target, + weight, + gamma=self.gamma, + alpha=self.alpha, + reduction=reduction, + avg_factor=avg_factor) + + else: + raise NotImplementedError + return loss_cls diff --git a/mmcv/models/losses/iou_loss.py b/mmcv/models/losses/iou_loss.py new file mode 100644 index 0000000..466e299 --- /dev/null +++ b/mmcv/models/losses/iou_loss.py @@ -0,0 +1,440 @@ +import math +import torch +import torch.nn as nn + +from mmcv.core.bbox.iou_calculators.iou3d_calculator import bbox_overlaps +from ..builder import LOSSES +from .utils import weighted_loss + + + +@weighted_loss +def iou_loss(pred, target, linear=False, eps=1e-6): + """IoU loss. + + Computing the IoU loss between a set of predicted bboxes and target bboxes. + The loss is calculated as negative log of IoU. + + Args: + pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2), + shape (n, 4). + target (torch.Tensor): Corresponding gt bboxes, shape (n, 4). + linear (bool, optional): If True, use linear scale of loss instead of + log scale. Default: False. + eps (float): Eps to avoid log(0). + + Return: + torch.Tensor: Loss tensor. + """ + ious = bbox_overlaps(pred, target, is_aligned=True).clamp(min=eps) + if linear: + loss = 1 - ious + else: + loss = -ious.log() + return loss + + +@weighted_loss +def bounded_iou_loss(pred, target, beta=0.2, eps=1e-3): + """BIoULoss. + + This is an implementation of paper + `Improving Object Localization with Fitness NMS and Bounded IoU Loss. + `_. + + Args: + pred (torch.Tensor): Predicted bboxes. + target (torch.Tensor): Target bboxes. + beta (float): beta parameter in smoothl1. + eps (float): eps to avoid NaN. + """ + pred_ctrx = (pred[:, 0] + pred[:, 2]) * 0.5 + pred_ctry = (pred[:, 1] + pred[:, 3]) * 0.5 + pred_w = pred[:, 2] - pred[:, 0] + pred_h = pred[:, 3] - pred[:, 1] + with torch.no_grad(): + target_ctrx = (target[:, 0] + target[:, 2]) * 0.5 + target_ctry = (target[:, 1] + target[:, 3]) * 0.5 + target_w = target[:, 2] - target[:, 0] + target_h = target[:, 3] - target[:, 1] + + dx = target_ctrx - pred_ctrx + dy = target_ctry - pred_ctry + + loss_dx = 1 - torch.max( + (target_w - 2 * dx.abs()) / + (target_w + 2 * dx.abs() + eps), torch.zeros_like(dx)) + loss_dy = 1 - torch.max( + (target_h - 2 * dy.abs()) / + (target_h + 2 * dy.abs() + eps), torch.zeros_like(dy)) + loss_dw = 1 - torch.min(target_w / (pred_w + eps), pred_w / + (target_w + eps)) + loss_dh = 1 - torch.min(target_h / (pred_h + eps), pred_h / + (target_h + eps)) + loss_comb = torch.stack([loss_dx, loss_dy, loss_dw, loss_dh], + dim=-1).view(loss_dx.size(0), -1) + + loss = torch.where(loss_comb < beta, 0.5 * loss_comb * loss_comb / beta, + loss_comb - 0.5 * beta) + return loss + + + +@weighted_loss +def giou_loss(pred, target, eps=1e-7): + r"""`Generalized Intersection over Union: A Metric and A Loss for Bounding + Box Regression `_. + + Args: + pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2), + shape (n, 4). + target (torch.Tensor): Corresponding gt bboxes, shape (n, 4). + eps (float): Eps to avoid log(0). + + Return: + Tensor: Loss tensor. + """ + gious = bbox_overlaps(pred, target, mode='giou', is_aligned=True, eps=eps) + loss = 1 - gious + return loss + + +@weighted_loss +def diou_loss(pred, target, eps=1e-7): + r"""`Implementation of Distance-IoU Loss: Faster and Better + Learning for Bounding Box Regression, https://arxiv.org/abs/1911.08287`_. + + Code is modified from https://github.com/Zzh-tju/DIoU. + + Args: + pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2), + shape (n, 4). + target (Tensor): Corresponding gt bboxes, shape (n, 4). + eps (float): Eps to avoid log(0). + Return: + Tensor: Loss tensor. + """ + # overlap + lt = torch.max(pred[:, :2], target[:, :2]) + rb = torch.min(pred[:, 2:], target[:, 2:]) + wh = (rb - lt).clamp(min=0) + overlap = wh[:, 0] * wh[:, 1] + + # union + ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1]) + ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1]) + union = ap + ag - overlap + eps + + # IoU + ious = overlap / union + + # enclose area + enclose_x1y1 = torch.min(pred[:, :2], target[:, :2]) + enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:]) + enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0) + + cw = enclose_wh[:, 0] + ch = enclose_wh[:, 1] + + c2 = cw**2 + ch**2 + eps + + b1_x1, b1_y1 = pred[:, 0], pred[:, 1] + b1_x2, b1_y2 = pred[:, 2], pred[:, 3] + b2_x1, b2_y1 = target[:, 0], target[:, 1] + b2_x2, b2_y2 = target[:, 2], target[:, 3] + + left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2))**2 / 4 + right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2))**2 / 4 + rho2 = left + right + + # DIoU + dious = ious - rho2 / c2 + loss = 1 - dious + return loss + + +@weighted_loss +def ciou_loss(pred, target, eps=1e-7): + r"""`Implementation of paper `Enhancing Geometric Factors into + Model Learning and Inference for Object Detection and Instance + Segmentation `_. + + Code is modified from https://github.com/Zzh-tju/CIoU. + + Args: + pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2), + shape (n, 4). + target (Tensor): Corresponding gt bboxes, shape (n, 4). + eps (float): Eps to avoid log(0). + Return: + Tensor: Loss tensor. + """ + # overlap + lt = torch.max(pred[:, :2], target[:, :2]) + rb = torch.min(pred[:, 2:], target[:, 2:]) + wh = (rb - lt).clamp(min=0) + overlap = wh[:, 0] * wh[:, 1] + + # union + ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1]) + ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1]) + union = ap + ag - overlap + eps + + # IoU + ious = overlap / union + + # enclose area + enclose_x1y1 = torch.min(pred[:, :2], target[:, :2]) + enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:]) + enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0) + + cw = enclose_wh[:, 0] + ch = enclose_wh[:, 1] + + c2 = cw**2 + ch**2 + eps + + b1_x1, b1_y1 = pred[:, 0], pred[:, 1] + b1_x2, b1_y2 = pred[:, 2], pred[:, 3] + b2_x1, b2_y1 = target[:, 0], target[:, 1] + b2_x2, b2_y2 = target[:, 2], target[:, 3] + + w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps + w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps + + left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2))**2 / 4 + right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2))**2 / 4 + rho2 = left + right + + factor = 4 / math.pi**2 + v = factor * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) + + # CIoU + cious = ious - (rho2 / c2 + v**2 / (1 - ious + v)) + loss = 1 - cious + return loss + + +class IoULoss(nn.Module): + """IoULoss. + + Computing the IoU loss between a set of predicted bboxes and target bboxes. + + Args: + linear (bool): If True, use linear scale of loss instead of log scale. + Default: False. + eps (float): Eps to avoid log(0). + reduction (str): Options are "none", "mean" and "sum". + loss_weight (float): Weight of loss. + """ + + def __init__(self, + linear=False, + eps=1e-6, + reduction='mean', + loss_weight=1.0): + super(IoULoss, self).__init__() + self.linear = linear + self.eps = eps + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None, + **kwargs): + """Forward function. + + Args: + pred (torch.Tensor): The prediction. + target (torch.Tensor): The learning target of the prediction. + weight (torch.Tensor, optional): The weight of loss for each + prediction. Defaults to None. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + reduction_override (str, optional): The reduction method used to + override the original reduction method of the loss. + Defaults to None. Options are "none", "mean" and "sum". + """ + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + if (weight is not None) and (not torch.any(weight > 0)) and ( + reduction != 'none'): + if pred.dim() == weight.dim() + 1: + weight = weight.unsqueeze(1) + return (pred * weight).sum() # 0 + if weight is not None and weight.dim() > 1: + # TODO: remove this in the future + # reduce the weight of shape (n, 4) to (n,) to match the + # iou_loss of shape (n,) + assert weight.shape == pred.shape + weight = weight.mean(-1) + loss = self.loss_weight * iou_loss( + pred, + target, + weight, + linear=self.linear, + eps=self.eps, + reduction=reduction, + avg_factor=avg_factor, + **kwargs) + return loss + + +@LOSSES.register_module() +class BoundedIoULoss(nn.Module): + + def __init__(self, beta=0.2, eps=1e-3, reduction='mean', loss_weight=1.0): + super(BoundedIoULoss, self).__init__() + self.beta = beta + self.eps = eps + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None, + **kwargs): + if weight is not None and not torch.any(weight > 0): + if pred.dim() == weight.dim() + 1: + weight = weight.unsqueeze(1) + return (pred * weight).sum() # 0 + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + loss = self.loss_weight * bounded_iou_loss( + pred, + target, + weight, + beta=self.beta, + eps=self.eps, + reduction=reduction, + avg_factor=avg_factor, + **kwargs) + return loss + + +@LOSSES.register_module() +class GIoULoss(nn.Module): + + def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0): + super(GIoULoss, self).__init__() + self.eps = eps + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None, + **kwargs): + if weight is not None and not torch.any(weight > 0): + if pred.dim() == weight.dim() + 1: + weight = weight.unsqueeze(1) + return (pred * weight).sum() # 0 + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + if weight is not None and weight.dim() > 1: + # TODO: remove this in the future + # reduce the weight of shape (n, 4) to (n,) to match the + # giou_loss of shape (n,) + assert weight.shape == pred.shape + weight = weight.mean(-1) + loss = self.loss_weight * giou_loss( + pred, + target, + weight, + eps=self.eps, + reduction=reduction, + avg_factor=avg_factor, + **kwargs) + return loss + + +@LOSSES.register_module() +class DIoULoss(nn.Module): + + def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0): + super(DIoULoss, self).__init__() + self.eps = eps + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None, + **kwargs): + if weight is not None and not torch.any(weight > 0): + if pred.dim() == weight.dim() + 1: + weight = weight.unsqueeze(1) + return (pred * weight).sum() # 0 + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + if weight is not None and weight.dim() > 1: + # TODO: remove this in the future + # reduce the weight of shape (n, 4) to (n,) to match the + # giou_loss of shape (n,) + assert weight.shape == pred.shape + weight = weight.mean(-1) + loss = self.loss_weight * diou_loss( + pred, + target, + weight, + eps=self.eps, + reduction=reduction, + avg_factor=avg_factor, + **kwargs) + return loss + + +@LOSSES.register_module() +class CIoULoss(nn.Module): + + def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0): + super(CIoULoss, self).__init__() + self.eps = eps + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None, + **kwargs): + if weight is not None and not torch.any(weight > 0): + if pred.dim() == weight.dim() + 1: + weight = weight.unsqueeze(1) + return (pred * weight).sum() # 0 + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + if weight is not None and weight.dim() > 1: + # TODO: remove this in the future + # reduce the weight of shape (n, 4) to (n,) to match the + # giou_loss of shape (n,) + assert weight.shape == pred.shape + weight = weight.mean(-1) + loss = self.loss_weight * ciou_loss( + pred, + target, + weight, + eps=self.eps, + reduction=reduction, + avg_factor=avg_factor, + **kwargs) + return loss diff --git a/mmcv/models/losses/smooth_l1_loss.py b/mmcv/models/losses/smooth_l1_loss.py new file mode 100644 index 0000000..ad5e8a4 --- /dev/null +++ b/mmcv/models/losses/smooth_l1_loss.py @@ -0,0 +1,136 @@ +import torch +import torch.nn as nn + +from ..builder import LOSSES +from .utils import weighted_loss + + +@weighted_loss +def smooth_l1_loss(pred, target, beta=1.0): + """Smooth L1 loss. + + Args: + pred (torch.Tensor): The prediction. + target (torch.Tensor): The learning target of the prediction. + beta (float, optional): The threshold in the piecewise function. + Defaults to 1.0. + + Returns: + torch.Tensor: Calculated loss + """ + assert beta > 0 + assert pred.size() == target.size() and target.numel() > 0 + diff = torch.abs(pred - target) + loss = torch.where(diff < beta, 0.5 * diff * diff / beta, + diff - 0.5 * beta) + return loss + + +@weighted_loss +def l1_loss(pred, target): + """L1 loss. + + Args: + pred (torch.Tensor): The prediction. + target (torch.Tensor): The learning target of the prediction. + + Returns: + torch.Tensor: Calculated loss + """ + assert pred.size() == target.size() and target.numel() > 0 + loss = torch.abs(pred - target) + return loss + + +@LOSSES.register_module() +class SmoothL1Loss(nn.Module): + """Smooth L1 loss. + + Args: + beta (float, optional): The threshold in the piecewise function. + Defaults to 1.0. + reduction (str, optional): The method to reduce the loss. + Options are "none", "mean" and "sum". Defaults to "mean". + loss_weight (float, optional): The weight of loss. + """ + + def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0): + super(SmoothL1Loss, self).__init__() + self.beta = beta + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None, + **kwargs): + """Forward function. + + Args: + pred (torch.Tensor): The prediction. + target (torch.Tensor): The learning target of the prediction. + weight (torch.Tensor, optional): The weight of loss for each + prediction. Defaults to None. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + reduction_override (str, optional): The reduction method used to + override the original reduction method of the loss. + Defaults to None. + """ + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + loss_bbox = self.loss_weight * smooth_l1_loss( + pred, + target, + weight, + beta=self.beta, + reduction=reduction, + avg_factor=avg_factor, + **kwargs) + return loss_bbox + + +@LOSSES.register_module() +class L1Loss(nn.Module): + """L1 loss. + + Args: + reduction (str, optional): The method to reduce the loss. + Options are "none", "mean" and "sum". + loss_weight (float, optional): The weight of loss. + """ + + def __init__(self, reduction='mean', loss_weight=1.0): + super(L1Loss, self).__init__() + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None): + """Forward function. + + Args: + pred (torch.Tensor): The prediction. + target (torch.Tensor): The learning target of the prediction. + weight (torch.Tensor, optional): The weight of loss for each + prediction. Defaults to None. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + reduction_override (str, optional): The reduction method used to + override the original reduction method of the loss. + Defaults to None. + """ + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + loss_bbox = self.loss_weight * l1_loss( + pred, target, weight, reduction=reduction, avg_factor=avg_factor) + return loss_bbox diff --git a/mmcv/models/losses/utils.py b/mmcv/models/losses/utils.py new file mode 100644 index 0000000..0496fb7 --- /dev/null +++ b/mmcv/models/losses/utils.py @@ -0,0 +1,115 @@ +import functools + +from mmcv.fileio.io import load +import numpy as np +import torch.nn.functional as F + +def get_class_weight(class_weight): + """Get class weight for loss function. + + Args: + class_weight (list[float] | str | None): If class_weight is a str, + take it as a file name and read from it. + """ + if isinstance(class_weight, str): + # take it as a file path + if class_weight.endswith('.npy'): + class_weight = np.load(class_weight) + else: + # pkl, json or yaml + class_weight = load(class_weight) + + return class_weight + +def reduce_loss(loss, reduction): + """Reduce loss as specified. + + Args: + loss (Tensor): Elementwise loss tensor. + reduction (str): Options are "none", "mean" and "sum". + + Return: + Tensor: Reduced loss tensor. + """ + reduction_enum = F._Reduction.get_enum(reduction) + # none: 0, elementwise_mean:1, sum: 2 + if reduction_enum == 0: + return loss + elif reduction_enum == 1: + return loss.mean() + elif reduction_enum == 2: + return loss.sum() + +def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): + """Apply element-wise weight and reduce loss. + + Args: + loss (Tensor): Element-wise loss. + weight (Tensor): Element-wise weights. + reduction (str): Same as built-in losses of PyTorch. + avg_factor (float): Avarage factor when computing the mean of losses. + + Returns: + Tensor: Processed loss values. + """ + # if weight is specified, apply element-wise weight + if weight is not None: + loss = loss * weight + + # if avg_factor is not specified, just reduce the loss + if avg_factor is None: + loss = reduce_loss(loss, reduction) + else: + # if reduction is mean, then average the loss by avg_factor + if reduction == 'mean': + loss = loss.sum() / avg_factor + # if reduction is 'none', then do nothing, otherwise raise an error + elif reduction != 'none': + raise ValueError('avg_factor can not be used with reduction="sum"') + return loss + + +def weighted_loss(loss_func): + """Create a weighted version of a given loss function. + + To use this decorator, the loss function must have the signature like + `loss_func(pred, target, **kwargs)`. The function only needs to compute + element-wise loss without any reduction. This decorator will add weight + and reduction arguments to the function. The decorated function will have + the signature like `loss_func(pred, target, weight=None, reduction='mean', + avg_factor=None, **kwargs)`. + + :Example: + + >>> import torch + >>> @weighted_loss + >>> def l1_loss(pred, target): + >>> return (pred - target).abs() + + >>> pred = torch.Tensor([0, 2, 3]) + >>> target = torch.Tensor([1, 1, 1]) + >>> weight = torch.Tensor([1, 0, 1]) + + >>> l1_loss(pred, target) + tensor(1.3333) + >>> l1_loss(pred, target, weight) + tensor(1.) + >>> l1_loss(pred, target, reduction='none') + tensor([1., 1., 2.]) + >>> l1_loss(pred, target, weight, avg_factor=2) + tensor(1.5000) + """ + + @functools.wraps(loss_func) + def wrapper(pred, + target, + weight=None, + reduction='mean', + avg_factor=None, + **kwargs): + # get element-wise loss + loss = loss_func(pred, target, **kwargs) + loss = weight_reduce_loss(loss, weight, reduction, avg_factor) + return loss + + return wrapper diff --git a/mmcv/models/modules/VAD_transformer.py b/mmcv/models/modules/VAD_transformer.py new file mode 100644 index 0000000..6e2ec0b --- /dev/null +++ b/mmcv/models/modules/VAD_transformer.py @@ -0,0 +1,489 @@ +import torch +import numpy as np +import torch.nn as nn +from mmcv.models.utils import xavier_init +from mmcv.utils import ext_loader +from torch.nn.init import normal_ +from mmcv.models.backbones.base_module import BaseModule +from mmcv.models.utils.builder import TRANSFORMER +from torchvision.transforms.functional import rotate +from mmcv.models.bricks.registry import TRANSFORMER_LAYER_SEQUENCE +from mmcv.models.bricks.transformer import TransformerLayerSequence +from mmcv.models.bricks.transformer import build_transformer_layer_sequence + +from mmcv.models.modules.decoder import CustomMSDeformableAttention +from mmcv.models.modules.temporal_self_attention import TemporalSelfAttention +from mmcv.models.modules.spatial_cross_attention import MSDeformableAttention3D + + +ext_module = ext_loader.load_ext( + '_ext', ['ms_deform_attn_backward', 'ms_deform_attn_forward']) + +def inverse_sigmoid(x, eps=1e-5): + """Inverse function of sigmoid. + Args: + x (Tensor): The tensor to do the + inverse. + eps (float): EPS avoid numerical + overflow. Defaults 1e-5. + Returns: + Tensor: The x has passed the inverse + function of sigmoid, has same + shape with input. + """ + x = x.clamp(min=0, max=1) + x1 = x.clamp(min=eps) + x2 = (1 - x).clamp(min=eps) + return torch.log(x1 / x2) + + +@TRANSFORMER_LAYER_SEQUENCE.register_module() +class MapDetectionTransformerDecoder(TransformerLayerSequence): + """Implements the decoder in DETR3D transformer. + Args: + return_intermediate (bool): Whether to return intermediate outputs. + coder_norm_cfg (dict): Config of last normalization layer. Default: + `LN`. + """ + + def __init__(self, *args, return_intermediate=False, **kwargs): + super(MapDetectionTransformerDecoder, self).__init__(*args, **kwargs) + self.return_intermediate = return_intermediate + self.fp16_enabled = False + + def forward(self, + query, + *args, + reference_points=None, + reg_branches=None, + key_padding_mask=None, + **kwargs): + """Forward function for `Detr3DTransformerDecoder`. + Args: + query (Tensor): Input query with shape + `(num_query, bs, embed_dims)`. + reference_points (Tensor): The reference + points of offset. has shape + (bs, num_query, 4) when as_two_stage, + otherwise has shape ((bs, num_query, 2). + reg_branch: (obj:`nn.ModuleList`): Used for + refining the regression results. Only would + be passed when with_box_refine is True, + otherwise would be passed a `None`. + Returns: + Tensor: Results with shape [1, num_query, bs, embed_dims] when + return_intermediate is `False`, otherwise it has shape + [num_layers, num_query, bs, embed_dims]. + """ + output = query + intermediate = [] + intermediate_reference_points = [] + for lid, layer in enumerate(self.layers): + + reference_points_input = reference_points[..., :2].unsqueeze( + 2) # BS NUM_QUERY NUM_LEVEL 2 + output = layer( + output, + *args, + reference_points=reference_points_input, + key_padding_mask=key_padding_mask, + **kwargs) + output = output.permute(1, 0, 2) + + if reg_branches is not None: + tmp = reg_branches[lid](output) + + assert reference_points.shape[-1] == 2 + + new_reference_points = torch.zeros_like(reference_points) + new_reference_points[..., :2] = tmp[ + ..., :2] + inverse_sigmoid(reference_points[..., :2]) + # new_reference_points[..., 2:3] = tmp[ + # ..., 4:5] + inverse_sigmoid(reference_points[..., 2:3]) + + new_reference_points = new_reference_points.sigmoid() + + reference_points = new_reference_points.detach() + + output = output.permute(1, 0, 2) + if self.return_intermediate: + intermediate.append(output) + intermediate_reference_points.append(reference_points) + + if self.return_intermediate: + return torch.stack(intermediate), torch.stack( + intermediate_reference_points) + + return output, reference_points + + +@TRANSFORMER.register_module() +class VADPerceptionTransformer(BaseModule): + """Implements the Detr3D transformer. + Args: + as_two_stage (bool): Generate query from encoder features. + Default: False. + num_feature_levels (int): Number of feature maps from FPN: + Default: 4. + two_stage_num_proposals (int): Number of proposals when set + `as_two_stage` as True. Default: 300. + """ + + def __init__(self, + num_feature_levels=4, + num_cams=6, + two_stage_num_proposals=300, + encoder=None, + decoder=None, + map_decoder=None, + embed_dims=256, + rotate_prev_bev=True, + use_shift=True, + use_can_bus=True, + can_bus_norm=True, + use_cams_embeds=True, + rotate_center=[100, 100], + map_num_vec=50, + map_num_pts_per_vec=10, + **kwargs): + super(VADPerceptionTransformer, self).__init__(**kwargs) + self.encoder = build_transformer_layer_sequence(encoder) + if decoder is not None: + self.decoder = build_transformer_layer_sequence(decoder) + else: + self.decoder = None + if map_decoder is not None: + self.map_decoder = build_transformer_layer_sequence(map_decoder) + else: + self.map_decoder = None + + self.embed_dims = embed_dims + self.num_feature_levels = num_feature_levels + self.num_cams = num_cams + self.fp16_enabled = False + self.rotate_prev_bev = rotate_prev_bev + self.use_shift = use_shift + self.use_can_bus = use_can_bus + self.can_bus_norm = can_bus_norm + self.use_cams_embeds = use_cams_embeds + self.two_stage_num_proposals = two_stage_num_proposals + self.rotate_center = rotate_center + self.map_num_vec = map_num_vec + self.map_num_pts_per_vec = map_num_pts_per_vec + self.init_layers() + + def init_layers(self): + """Initialize layers of the Detr3DTransformer.""" + self.level_embeds = nn.Parameter(torch.Tensor( + self.num_feature_levels, self.embed_dims)) + self.cams_embeds = nn.Parameter( + torch.Tensor(self.num_cams, self.embed_dims)) + self.reference_points = nn.Linear(self.embed_dims, 3) + self.map_reference_points = nn.Linear(self.embed_dims, 2) + self.can_bus_mlp = nn.Sequential( + nn.Linear(18, self.embed_dims // 2), + nn.ReLU(inplace=True), + nn.Linear(self.embed_dims // 2, self.embed_dims), + nn.ReLU(inplace=True), + ) + if self.can_bus_norm: + self.can_bus_mlp.add_module('norm', nn.LayerNorm(self.embed_dims)) + + def init_weights(self): + """Initialize the transformer weights.""" + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + for m in self.modules(): + if isinstance(m, MSDeformableAttention3D) or isinstance(m, TemporalSelfAttention) \ + or isinstance(m, CustomMSDeformableAttention): + try: + m.init_weight() + except AttributeError: + m.init_weights() + normal_(self.level_embeds) + normal_(self.cams_embeds) + xavier_init(self.reference_points, distribution='uniform', bias=0.) + xavier_init(self.map_reference_points, distribution='uniform', bias=0.) + xavier_init(self.can_bus_mlp, distribution='uniform', bias=0.) + + # TODO apply fp16 to this module cause grad_norm NAN + # @auto_fp16(apply_to=('mlvl_feats', 'bev_queries', 'prev_bev', 'bev_pos')) + def get_bev_features( + self, + mlvl_feats, + bev_queries, + bev_h, + bev_w, + grid_length=[0.512, 0.512], + bev_pos=None, + prev_bev=None, + **kwargs): + """ + obtain bev features. + """ + + bs = mlvl_feats[0].size(0) + bev_queries = bev_queries.unsqueeze(1).repeat(1, bs, 1) + bev_pos = bev_pos.flatten(2).permute(2, 0, 1) + + # obtain rotation angle and shift with ego motion + delta_x = np.array([each['can_bus'][0] + for each in kwargs['img_metas']]) + delta_y = np.array([each['can_bus'][1] + for each in kwargs['img_metas']]) + ego_angle = np.array( + [each['can_bus'][-2] / np.pi * 180 for each in kwargs['img_metas']]) + grid_length_y = grid_length[0] + grid_length_x = grid_length[1] + translation_length = np.sqrt(delta_x ** 2 + delta_y ** 2) + translation_angle = np.arctan2(delta_y, delta_x) / np.pi * 180 + bev_angle = ego_angle - translation_angle + shift_y = translation_length * \ + np.cos(bev_angle / 180 * np.pi) / grid_length_y / bev_h + shift_x = translation_length * \ + np.sin(bev_angle / 180 * np.pi) / grid_length_x / bev_w + shift_y = shift_y * self.use_shift + shift_x = shift_x * self.use_shift + shift = bev_queries.new_tensor( + [shift_x, shift_y]).permute(1, 0) # xy, bs -> bs, xy + + if prev_bev is not None: + if prev_bev.shape[1] == bev_h * bev_w: + prev_bev = prev_bev.permute(1, 0, 2) + if self.rotate_prev_bev: + for i in range(bs): + # num_prev_bev = prev_bev.size(1) + rotation_angle = kwargs['img_metas'][i]['can_bus'][-1] + tmp_prev_bev = prev_bev[:, i].reshape( + bev_h, bev_w, -1).permute(2, 0, 1) + tmp_prev_bev = rotate(tmp_prev_bev, rotation_angle, + center=self.rotate_center) + tmp_prev_bev = tmp_prev_bev.permute(1, 2, 0).reshape( + bev_h * bev_w, 1, -1) + prev_bev[:, i] = tmp_prev_bev[:, 0] + + # add can bus signals + can_bus = bev_queries.new_tensor( + [each['can_bus'] for each in kwargs['img_metas']]) # [:, :] + can_bus = self.can_bus_mlp(can_bus)[None, :, :] + bev_queries = bev_queries + can_bus * self.use_can_bus + + feat_flatten = [] + spatial_shapes = [] + for lvl, feat in enumerate(mlvl_feats): + bs, num_cam, c, h, w = feat.shape + spatial_shape = (h, w) + feat = feat.flatten(3).permute(1, 0, 3, 2) + if self.use_cams_embeds: + feat = feat + self.cams_embeds[:, None, None, :].to(feat.dtype) + feat = feat + self.level_embeds[None, + None, lvl:lvl + 1, :].to(feat.dtype) + spatial_shapes.append(spatial_shape) + feat_flatten.append(feat) + + feat_flatten = torch.cat(feat_flatten, 2) + spatial_shapes = torch.as_tensor( + spatial_shapes, dtype=torch.long, device=bev_pos.device) + level_start_index = torch.cat((spatial_shapes.new_zeros( + (1,)), spatial_shapes.prod(1).cumsum(0)[:-1])) + + feat_flatten = feat_flatten.permute( + 0, 2, 1, 3) # (num_cam, H*W, bs, embed_dims) + + bev_embed = self.encoder( + bev_queries, + feat_flatten, + feat_flatten, + bev_h=bev_h, + bev_w=bev_w, + bev_pos=bev_pos, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + prev_bev=prev_bev, + shift=shift, + **kwargs + ) + + return bev_embed + + # TODO apply fp16 to this module cause grad_norm NAN + # @auto_fp16(apply_to=('mlvl_feats', 'bev_queries', 'object_query_embed', 'prev_bev', 'bev_pos')) + def forward(self, + mlvl_feats, + bev_queries, + object_query_embed, + map_query_embed, + bev_h, + bev_w, + grid_length=[0.512, 0.512], + bev_pos=None, + reg_branches=None, + cls_branches=None, + map_reg_branches=None, + map_cls_branches=None, + prev_bev=None, + **kwargs): + """Forward function for `Detr3DTransformer`. + Args: + mlvl_feats (list(Tensor)): Input queries from + different level. Each element has shape + [bs, num_cams, embed_dims, h, w]. + bev_queries (Tensor): (bev_h*bev_w, c) + bev_pos (Tensor): (bs, embed_dims, bev_h, bev_w) + object_query_embed (Tensor): The query embedding for decoder, + with shape [num_query, c]. + reg_branches (obj:`nn.ModuleList`): Regression heads for + feature maps from each decoder layer. Only would + be passed when `with_box_refine` is True. Default to None. + Returns: + tuple[Tensor]: results of decoder containing the following tensor. + - bev_embed: BEV features + - inter_states: Outputs from decoder. If + return_intermediate_dec is True output has shape \ + (num_dec_layers, bs, num_query, embed_dims), else has \ + shape (1, bs, num_query, embed_dims). + - init_reference_out: The initial value of reference \ + points, has shape (bs, num_queries, 4). + - inter_references_out: The internal value of reference \ + points in decoder, has shape \ + (num_dec_layers, bs,num_query, embed_dims) + - enc_outputs_class: The classification score of \ + proposals generated from \ + encoder's feature maps, has shape \ + (batch, h*w, num_classes). \ + Only would be returned when `as_two_stage` is True, \ + otherwise None. + - enc_outputs_coord_unact: The regression results \ + generated from encoder's feature maps., has shape \ + (batch, h*w, 4). Only would \ + be returned when `as_two_stage` is True, \ + otherwise None. + """ + + bev_embed = self.get_bev_features( + mlvl_feats, + bev_queries, + bev_h, + bev_w, + grid_length=grid_length, + bev_pos=bev_pos, + prev_bev=prev_bev, + **kwargs) # bev_embed shape: bs, bev_h*bev_w, embed_dims + + bs = mlvl_feats[0].size(0) + query_pos, query = torch.split( + object_query_embed, self.embed_dims, dim=1) + query_pos = query_pos.unsqueeze(0).expand(bs, -1, -1) + query = query.unsqueeze(0).expand(bs, -1, -1) + reference_points = self.reference_points(query_pos) + reference_points = reference_points.sigmoid() + init_reference_out = reference_points + + map_query_pos, map_query = torch.split( + map_query_embed, self.embed_dims, dim=1) + map_query_pos = map_query_pos.unsqueeze(0).expand(bs, -1, -1) + map_query = map_query.unsqueeze(0).expand(bs, -1, -1) + map_reference_points = self.map_reference_points(map_query_pos) + map_reference_points = map_reference_points.sigmoid() + map_init_reference_out = map_reference_points + + query = query.permute(1, 0, 2) + query_pos = query_pos.permute(1, 0, 2) + map_query = map_query.permute(1, 0, 2) + map_query_pos = map_query_pos.permute(1, 0, 2) + bev_embed = bev_embed.permute(1, 0, 2) + + if self.decoder is not None: + # [L, Q, B, D], [L, B, Q, D] + inter_states, inter_references = self.decoder( + query=query, + key=None, + value=bev_embed, + query_pos=query_pos, + reference_points=reference_points, + reg_branches=reg_branches, + cls_branches=cls_branches, + spatial_shapes=torch.tensor([[bev_h, bev_w]], device=query.device), + level_start_index=torch.tensor([0], device=query.device), + **kwargs) + inter_references_out = inter_references + else: + inter_states = query.unsqueeze(0) + inter_references_out = reference_points.unsqueeze(0) + + if self.map_decoder is not None: + # [L, Q, B, D], [L, B, Q, D] + map_inter_states, map_inter_references = self.map_decoder( + query=map_query, + key=None, + value=bev_embed, + query_pos=map_query_pos, + reference_points=map_reference_points, + reg_branches=map_reg_branches, + cls_branches=map_cls_branches, + spatial_shapes=torch.tensor([[bev_h, bev_w]], device=map_query.device), + level_start_index=torch.tensor([0], device=map_query.device), + **kwargs) + map_inter_references_out = map_inter_references + else: + map_inter_states = map_query.unsqueeze(0) + map_inter_references_out = map_reference_points.unsqueeze(0) + + return ( + bev_embed, inter_states, init_reference_out, inter_references_out, + map_inter_states, map_init_reference_out, map_inter_references_out) + + +@TRANSFORMER_LAYER_SEQUENCE.register_module() +class CustomTransformerDecoder(TransformerLayerSequence): + """Implements the decoder in DETR3D transformer. + Args: + return_intermediate (bool): Whether to return intermediate outputs. + coder_norm_cfg (dict): Config of last normalization layer. Default: `LN`. + """ + + def __init__(self, *args, return_intermediate=False, **kwargs): + super(CustomTransformerDecoder, self).__init__(*args, **kwargs) + self.return_intermediate = return_intermediate + self.fp16_enabled = False + + def forward(self, + query, + key=None, + value=None, + query_pos=None, + key_pos=None, + attn_masks=None, + key_padding_mask=None, + *args, + **kwargs): + """Forward function for `Detr3DTransformerDecoder`. + Args: + query (Tensor): Input query with shape + `(num_query, bs, embed_dims)`. + Returns: + Tensor: Results with shape [1, num_query, bs, embed_dims] when + return_intermediate is `False`, otherwise it has shape + [num_layers, num_query, bs, embed_dims]. + """ + intermediate = [] + for lid, layer in enumerate(self.layers): + query = layer( + query=query, + key=key, + value=value, + query_pos=query_pos, + key_pos=key_pos, + attn_masks=attn_masks, + key_padding_mask=key_padding_mask, + *args, + **kwargs) + + if self.return_intermediate: + intermediate.append(query) + + if self.return_intermediate: + return torch.stack(intermediate) + + return query \ No newline at end of file diff --git a/mmcv/models/modules/__init__.py b/mmcv/models/modules/__init__.py new file mode 100644 index 0000000..da1e029 --- /dev/null +++ b/mmcv/models/modules/__init__.py @@ -0,0 +1,8 @@ +from .transformer import BEVFormerPerceptionTransformer, UniADPerceptionTransformer, GroupFree3DMHA +from .spatial_cross_attention import SpatialCrossAttention, MSDeformableAttention3D +from .temporal_self_attention import TemporalSelfAttention +from .encoder import BEVFormerEncoder, BEVFormerLayer +from .decoder import DetectionTransformerDecoder +from .vote_module import VoteModule +from .VAD_transformer import VADPerceptionTransformer + diff --git a/mmcv/models/modules/custom_base_transformer_layer.py b/mmcv/models/modules/custom_base_transformer_layer.py new file mode 100644 index 0000000..c877db2 --- /dev/null +++ b/mmcv/models/modules/custom_base_transformer_layer.py @@ -0,0 +1,243 @@ +# --------------------------------------------- +# Copyright (c) OpenMMLab. All rights reserved. +# --------------------------------------------- +# Modified by Zhiqi Li +# --------------------------------------------- + +import copy +import warnings + +import torch + +from mmcv import ConfigDict +from mmcv.models.bricks import build_norm_layer +from mmcv.models.backbones.base_module import BaseModule, ModuleList + +from mmcv.models.bricks.registry import TRANSFORMER_LAYER +from mmcv.models.bricks.transformer import build_feedforward_network, build_attention + + +@TRANSFORMER_LAYER.register_module() +class MyCustomBaseTransformerLayer(BaseModule): + """Base `TransformerLayer` for vision transformer. + It can be built from `mmcv.ConfigDict` and support more flexible + customization, for example, using any number of `FFN or LN ` and + use different kinds of `attention` by specifying a list of `ConfigDict` + named `attn_cfgs`. It is worth mentioning that it supports `prenorm` + when you specifying `norm` as the first element of `operation_order`. + More details about the `prenorm`: `On Layer Normalization in the + Transformer Architecture `_ . + Args: + attn_cfgs (list[`mmcv.ConfigDict`] | obj:`mmcv.ConfigDict` | None )): + Configs for `self_attention` or `cross_attention` modules, + The order of the configs in the list should be consistent with + corresponding attentions in operation_order. + If it is a dict, all of the attention modules in operation_order + will be built with this config. Default: None. + ffn_cfgs (list[`mmcv.ConfigDict`] | obj:`mmcv.ConfigDict` | None )): + Configs for FFN, The order of the configs in the list should be + consistent with corresponding ffn in operation_order. + If it is a dict, all of the attention modules in operation_order + will be built with this config. + operation_order (tuple[str]): The execution order of operation + in transformer. Such as ('self_attn', 'norm', 'ffn', 'norm'). + Support `prenorm` when you specifying first element as `norm`. + Default:None. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN'). + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + batch_first (bool): Key, Query and Value are shape + of (batch, n, embed_dim) + or (n, batch, embed_dim). Default to False. + """ + + def __init__(self, + attn_cfgs=None, + ffn_cfgs=dict( + type='FFN', + embed_dims=256, + feedforward_channels=1024, + num_fcs=2, + ffn_drop=0., + act_cfg=dict(type='ReLU', inplace=True), + ), + operation_order=None, + norm_cfg=dict(type='LN'), + init_cfg=None, + batch_first=True, + **kwargs): + + deprecated_args = dict( + feedforward_channels='feedforward_channels', + ffn_dropout='ffn_drop', + ffn_num_fcs='num_fcs') + for ori_name, new_name in deprecated_args.items(): + if ori_name in kwargs: + warnings.warn( + f'The arguments `{ori_name}` in BaseTransformerLayer ' + f'has been deprecated, now you should set `{new_name}` ' + f'and other FFN related arguments ' + f'to a dict named `ffn_cfgs`. ') + ffn_cfgs[new_name] = kwargs[ori_name] + + super(MyCustomBaseTransformerLayer, self).__init__(init_cfg) + + self.batch_first = batch_first + + assert set(operation_order) & set( + ['self_attn', 'norm', 'ffn', 'cross_attn']) == \ + set(operation_order), f'The operation_order of' \ + f' {self.__class__.__name__} should ' \ + f'contains all four operation type ' \ + f"{['self_attn', 'norm', 'ffn', 'cross_attn']}" + + num_attn = operation_order.count('self_attn') + operation_order.count( + 'cross_attn') + if isinstance(attn_cfgs, dict): + attn_cfgs = [copy.deepcopy(attn_cfgs) for _ in range(num_attn)] + else: + assert num_attn == len(attn_cfgs), f'The length ' \ + f'of attn_cfg {num_attn} is ' \ + f'not consistent with the number of attention' \ + f'in operation_order {operation_order}.' + + self.num_attn = num_attn + self.operation_order = operation_order + self.norm_cfg = norm_cfg + self.pre_norm = operation_order[0] == 'norm' + self.attentions = ModuleList() + + index = 0 + for operation_name in operation_order: + if operation_name in ['self_attn', 'cross_attn']: + if 'batch_first' in attn_cfgs[index]: + assert self.batch_first == attn_cfgs[index]['batch_first'] + else: + attn_cfgs[index]['batch_first'] = self.batch_first + attention = build_attention(attn_cfgs[index]) + # Some custom attentions used as `self_attn` + # or `cross_attn` can have different behavior. + attention.operation_name = operation_name + self.attentions.append(attention) + index += 1 + + self.embed_dims = self.attentions[0].embed_dims + + self.ffns = ModuleList() + num_ffns = operation_order.count('ffn') + if isinstance(ffn_cfgs, dict): + ffn_cfgs = ConfigDict(ffn_cfgs) + if isinstance(ffn_cfgs, dict): + ffn_cfgs = [copy.deepcopy(ffn_cfgs) for _ in range(num_ffns)] + assert len(ffn_cfgs) == num_ffns + for ffn_index in range(num_ffns): + if 'embed_dims' not in ffn_cfgs[ffn_index]: + ffn_cfgs['embed_dims'] = self.embed_dims + else: + assert ffn_cfgs[ffn_index]['embed_dims'] == self.embed_dims + + self.ffns.append( + build_feedforward_network(ffn_cfgs[ffn_index])) + + self.norms = ModuleList() + num_norms = operation_order.count('norm') + for _ in range(num_norms): + self.norms.append(build_norm_layer(norm_cfg, self.embed_dims)[1]) + + def forward(self, + query, + key=None, + value=None, + query_pos=None, + key_pos=None, + attn_masks=None, + query_key_padding_mask=None, + key_padding_mask=None, + **kwargs): + """Forward function for `TransformerDecoderLayer`. + **kwargs contains some specific arguments of attentions. + Args: + query (Tensor): The input query with shape + [num_queries, bs, embed_dims] if + self.batch_first is False, else + [bs, num_queries embed_dims]. + key (Tensor): The key tensor with shape [num_keys, bs, + embed_dims] if self.batch_first is False, else + [bs, num_keys, embed_dims] . + value (Tensor): The value tensor with same shape as `key`. + query_pos (Tensor): The positional encoding for `query`. + Default: None. + key_pos (Tensor): The positional encoding for `key`. + Default: None. + attn_masks (List[Tensor] | None): 2D Tensor used in + calculation of corresponding attention. The length of + it should equal to the number of `attention` in + `operation_order`. Default: None. + query_key_padding_mask (Tensor): ByteTensor for `query`, with + shape [bs, num_queries]. Only used in `self_attn` layer. + Defaults to None. + key_padding_mask (Tensor): ByteTensor for `query`, with + shape [bs, num_keys]. Default: None. + Returns: + Tensor: forwarded results with shape [num_queries, bs, embed_dims]. + """ + + norm_index = 0 + attn_index = 0 + ffn_index = 0 + identity = query + if attn_masks is None: + attn_masks = [None for _ in range(self.num_attn)] + elif isinstance(attn_masks, torch.Tensor): + attn_masks = [ + copy.deepcopy(attn_masks) for _ in range(self.num_attn) + ] + warnings.warn(f'Use same attn_mask in all attentions in ' + f'{self.__class__.__name__} ') + else: + assert len(attn_masks) == self.num_attn, f'The length of ' \ + f'attn_masks {len(attn_masks)} must be equal ' \ + f'to the number of attention in ' \ + f'operation_order {self.num_attn}' + + for layer in self.operation_order: + if layer == 'self_attn': + temp_key = temp_value = query + query = self.attentions[attn_index]( + query, + temp_key, + temp_value, + identity if self.pre_norm else None, + query_pos=query_pos, + key_pos=query_pos, + attn_mask=attn_masks[attn_index], + key_padding_mask=query_key_padding_mask, + **kwargs) + attn_index += 1 + identity = query + + elif layer == 'norm': + query = self.norms[norm_index](query) + norm_index += 1 + + elif layer == 'cross_attn': + query = self.attentions[attn_index]( + query, + key, + value, + identity if self.pre_norm else None, + query_pos=query_pos, + key_pos=key_pos, + attn_mask=attn_masks[attn_index], + key_padding_mask=key_padding_mask, + **kwargs) + attn_index += 1 + identity = query + + elif layer == 'ffn': + query = self.ffns[ffn_index]( + query, identity if self.pre_norm else None) + ffn_index += 1 + + return query diff --git a/mmcv/models/modules/decoder.py b/mmcv/models/modules/decoder.py new file mode 100644 index 0000000..e320dbf --- /dev/null +++ b/mmcv/models/modules/decoder.py @@ -0,0 +1,344 @@ +# --------------------------------------------- +# Copyright (c) OpenMMLab. All rights reserved. +# --------------------------------------------- +# Modified by Zhiqi Li +# --------------------------------------------- + +from mmcv.ops.multi_scale_deform_attn import multi_scale_deformable_attn_pytorch +import cv2 as cv +import copy +import warnings +from matplotlib import pyplot as plt +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.models.utils import xavier_init, constant_init +from mmcv.models.bricks.registry import (ATTENTION, + TRANSFORMER_LAYER_SEQUENCE) +from mmcv.models.bricks.transformer import TransformerLayerSequence +import math +from mmcv.models.backbones.base_module import BaseModule, ModuleList, Sequential +from mmcv.utils import (ConfigDict, build_from_cfg, deprecated_api_warning, + to_2tuple) + +from mmcv.utils import ext_loader +from .multi_scale_deformable_attn_function import MultiScaleDeformableAttnFunction_fp32, \ + MultiScaleDeformableAttnFunction_fp16 + +ext_module = ext_loader.load_ext( + '_ext', ['ms_deform_attn_backward', 'ms_deform_attn_forward']) + + +def inverse_sigmoid(x, eps=1e-5): + """Inverse function of sigmoid. + Args: + x (Tensor): The tensor to do the + inverse. + eps (float): EPS avoid numerical + overflow. Defaults 1e-5. + Returns: + Tensor: The x has passed the inverse + function of sigmoid, has same + shape with input. + """ + x = x.clamp(min=0, max=1) + x1 = x.clamp(min=eps) + x2 = (1 - x).clamp(min=eps) + return torch.log(x1 / x2) + + +@TRANSFORMER_LAYER_SEQUENCE.register_module() +class DetectionTransformerDecoder(TransformerLayerSequence): + """Implements the decoder in DETR3D transformer. + Args: + return_intermediate (bool): Whether to return intermediate outputs. + coder_norm_cfg (dict): Config of last normalization layer. Default: + `LN`. + """ + + def __init__(self, *args, return_intermediate=False, **kwargs): + super(DetectionTransformerDecoder, self).__init__(*args, **kwargs) + self.return_intermediate = return_intermediate + self.fp16_enabled = False + + def forward(self, + query, + *args, + reference_points=None, + reg_branches=None, + key_padding_mask=None, + **kwargs): + """Forward function for `Detr3DTransformerDecoder`. + Args: + query (Tensor): Input query with shape + `(num_query, bs, embed_dims)`. + reference_points (Tensor): The reference + points of offset. has shape + (bs, num_query, 4) when as_two_stage, + otherwise has shape ((bs, num_query, 2). + reg_branch: (obj:`nn.ModuleList`): Used for + refining the regression results. Only would + be passed when with_box_refine is True, + otherwise would be passed a `None`. + Returns: + Tensor: Results with shape [1, num_query, bs, embed_dims] when + return_intermediate is `False`, otherwise it has shape + [num_layers, num_query, bs, embed_dims]. + """ + output = query + intermediate = [] + intermediate_reference_points = [] + for lid, layer in enumerate(self.layers): + + reference_points_input = reference_points[..., :2].unsqueeze( + 2) # BS NUM_QUERY NUM_LEVEL 2 + output = layer( + output, + *args, + reference_points=reference_points_input, + key_padding_mask=key_padding_mask, + **kwargs) + output = output.permute(1, 0, 2) + + if reg_branches is not None: + tmp = reg_branches[lid](output) + + assert reference_points.shape[-1] == 3 + + new_reference_points = torch.zeros_like(reference_points) + new_reference_points[..., :2] = tmp[ + ..., :2] + inverse_sigmoid(reference_points[..., :2]) + new_reference_points[..., 2:3] = tmp[ + ..., 4:5] + inverse_sigmoid(reference_points[..., 2:3]) + + new_reference_points = new_reference_points.sigmoid() + + reference_points = new_reference_points.detach() + + output = output.permute(1, 0, 2) + if self.return_intermediate: + intermediate.append(output) + intermediate_reference_points.append(reference_points) + + if self.return_intermediate: + return torch.stack(intermediate), torch.stack( + intermediate_reference_points) + + return output, reference_points + + +@ATTENTION.register_module() +class CustomMSDeformableAttention(BaseModule): + """An attention module used in Deformable-Detr. + + `Deformable DETR: Deformable Transformers for End-to-End Object Detection. + `_. + + Args: + embed_dims (int): The embedding dimension of Attention. + Default: 256. + num_heads (int): Parallel attention heads. Default: 64. + num_levels (int): The number of feature map used in + Attention. Default: 4. + num_points (int): The number of sampling points for + each query in each head. Default: 4. + im2col_step (int): The step used in image_to_column. + Default: 64. + dropout (float): A Dropout layer on `inp_identity`. + Default: 0.1. + batch_first (bool): Key, Query and Value are shape of + (batch, n, embed_dim) + or (n, batch, embed_dim). Default to False. + norm_cfg (dict): Config dict for normalization layer. + Default: None. + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + """ + + def __init__(self, + embed_dims=256, + num_heads=8, + num_levels=4, + num_points=4, + im2col_step=64, + dropout=0.1, + batch_first=False, + norm_cfg=None, + init_cfg=None): + super().__init__(init_cfg) + if embed_dims % num_heads != 0: + raise ValueError(f'embed_dims must be divisible by num_heads, ' + f'but got {embed_dims} and {num_heads}') + dim_per_head = embed_dims // num_heads + self.norm_cfg = norm_cfg + self.dropout = nn.Dropout(dropout) + self.batch_first = batch_first + self.fp16_enabled = False + + # you'd better set dim_per_head to a power of 2 + # which is more efficient in the CUDA implementation + def _is_power_of_2(n): + if (not isinstance(n, int)) or (n < 0): + raise ValueError( + 'invalid input for _is_power_of_2: {} (type: {})'.format( + n, type(n))) + return (n & (n - 1) == 0) and n != 0 + + if not _is_power_of_2(dim_per_head): + warnings.warn( + "You'd better set embed_dims in " + 'MultiScaleDeformAttention to make ' + 'the dimension of each attention head a power of 2 ' + 'which is more efficient in our CUDA implementation.') + + self.im2col_step = im2col_step + self.embed_dims = embed_dims + self.num_levels = num_levels + self.num_heads = num_heads + self.num_points = num_points + self.sampling_offsets = nn.Linear( + embed_dims, num_heads * num_levels * num_points * 2) + self.attention_weights = nn.Linear(embed_dims, + num_heads * num_levels * num_points) + self.value_proj = nn.Linear(embed_dims, embed_dims) + self.output_proj = nn.Linear(embed_dims, embed_dims) + self.init_weights() + + def init_weights(self): + """Default initialization for Parameters of Module.""" + constant_init(self.sampling_offsets, 0.) + thetas = torch.arange( + self.num_heads, + dtype=torch.float32) * (2.0 * math.pi / self.num_heads) + grid_init = torch.stack([thetas.cos(), thetas.sin()], -1) + grid_init = (grid_init / + grid_init.abs().max(-1, keepdim=True)[0]).view( + self.num_heads, 1, 1, + 2).repeat(1, self.num_levels, self.num_points, 1) + for i in range(self.num_points): + grid_init[:, :, i, :] *= i + 1 + + self.sampling_offsets.bias.data = grid_init.view(-1) + constant_init(self.attention_weights, val=0., bias=0.) + xavier_init(self.value_proj, distribution='uniform', bias=0.) + xavier_init(self.output_proj, distribution='uniform', bias=0.) + self._is_init = True + + @deprecated_api_warning({'residual': 'identity'}, + cls_name='MultiScaleDeformableAttention') + def forward(self, + query, + key=None, + value=None, + identity=None, + query_pos=None, + key_padding_mask=None, + reference_points=None, + spatial_shapes=None, + level_start_index=None, + flag='decoder', + **kwargs): + """Forward Function of MultiScaleDeformAttention. + + Args: + query (Tensor): Query of Transformer with shape + (num_query, bs, embed_dims). + key (Tensor): The key tensor with shape + `(num_key, bs, embed_dims)`. + value (Tensor): The value tensor with shape + `(num_key, bs, embed_dims)`. + identity (Tensor): The tensor used for addition, with the + same shape as `query`. Default None. If None, + `query` will be used. + query_pos (Tensor): The positional encoding for `query`. + Default: None. + key_pos (Tensor): The positional encoding for `key`. Default + None. + reference_points (Tensor): The normalized reference + points with shape (bs, num_query, num_levels, 2), + all elements is range in [0, 1], top-left (0,0), + bottom-right (1, 1), including padding area. + or (N, Length_{query}, num_levels, 4), add + additional two dimensions is (w, h) to + form reference boxes. + key_padding_mask (Tensor): ByteTensor for `query`, with + shape [bs, num_key]. + spatial_shapes (Tensor): Spatial shape of features in + different levels. With shape (num_levels, 2), + last dimension represents (h, w). + level_start_index (Tensor): The start index of each level. + A tensor has shape ``(num_levels, )`` and can be represented + as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...]. + + Returns: + Tensor: forwarded results with shape [num_query, bs, embed_dims]. + """ + + if value is None: + value = query + + if identity is None: + identity = query + if query_pos is not None: + query = query + query_pos + if not self.batch_first: + # change to (bs, num_query ,embed_dims) + query = query.permute(1, 0, 2) + value = value.permute(1, 0, 2) + + bs, num_query, _ = query.shape + bs, num_value, _ = value.shape + assert (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() == num_value + + value = self.value_proj(value) + if key_padding_mask is not None: + value = value.masked_fill(key_padding_mask[..., None], 0.0) + value = value.view(bs, num_value, self.num_heads, -1) + + sampling_offsets = self.sampling_offsets(query).view( + bs, num_query, self.num_heads, self.num_levels, self.num_points, 2) + attention_weights = self.attention_weights(query).view( + bs, num_query, self.num_heads, self.num_levels * self.num_points) + attention_weights = attention_weights.softmax(-1) + + attention_weights = attention_weights.view(bs, num_query, + self.num_heads, + self.num_levels, + self.num_points) + if reference_points.shape[-1] == 2: + offset_normalizer = torch.stack( + [spatial_shapes[..., 1], spatial_shapes[..., 0]], -1) + sampling_locations = reference_points[:, :, None, :, None, :] \ + + sampling_offsets \ + / offset_normalizer[None, None, None, :, None, :] + elif reference_points.shape[-1] == 4: + sampling_locations = reference_points[:, :, None, :, None, :2] \ + + sampling_offsets / self.num_points \ + * reference_points[:, :, None, :, None, 2:] \ + * 0.5 + else: + raise ValueError( + f'Last dim of reference_points must be' + f' 2 or 4, but get {reference_points.shape[-1]} instead.') + if torch.cuda.is_available() and value.is_cuda: + + # using fp16 deformable attention is unstable because it performs many sum operations + if value.dtype == torch.float16: + MultiScaleDeformableAttnFunction = MultiScaleDeformableAttnFunction_fp32 + else: + MultiScaleDeformableAttnFunction = MultiScaleDeformableAttnFunction_fp32 + output = MultiScaleDeformableAttnFunction.apply( + value, spatial_shapes, level_start_index, sampling_locations, + attention_weights, self.im2col_step) + else: + output = multi_scale_deformable_attn_pytorch( + value, spatial_shapes, sampling_locations, attention_weights) + + output = self.output_proj(output) + + if not self.batch_first: + # (num_query, bs ,embed_dims) + output = output.permute(1, 0, 2) + + return self.dropout(output) + identity diff --git a/mmcv/models/modules/encoder.py b/mmcv/models/modules/encoder.py new file mode 100644 index 0000000..aa3c1b0 --- /dev/null +++ b/mmcv/models/modules/encoder.py @@ -0,0 +1,405 @@ + +# --------------------------------------------- +# Copyright (c) OpenMMLab. All rights reserved. +# --------------------------------------------- +# Modified by Zhiqi Li +# --------------------------------------------- + +from .custom_base_transformer_layer import MyCustomBaseTransformerLayer +import copy +import warnings +from mmcv.models.bricks.registry import (ATTENTION, + TRANSFORMER_LAYER, + TRANSFORMER_LAYER_SEQUENCE) +from mmcv.models.bricks.transformer import TransformerLayerSequence +from mmcv.utils import force_fp32, auto_fp16 +import numpy as np +import torch +import cv2 as cv +from mmcv.utils import TORCH_VERSION, digit_version +from mmcv.utils import ext_loader +ext_module = ext_loader.load_ext( + '_ext', ['ms_deform_attn_backward', 'ms_deform_attn_forward']) + + +@TRANSFORMER_LAYER_SEQUENCE.register_module() +class BEVFormerEncoder(TransformerLayerSequence): + + """ + Attention with both self and cross + Implements the decoder in DETR transformer. + Args: + return_intermediate (bool): Whether to return intermediate outputs. + coder_norm_cfg (dict): Config of last normalization layer. Default: + `LN`. + """ + + def __init__(self, *args, pc_range=None, num_points_in_pillar=4, return_intermediate=False, dataset_type='nuscenes', + **kwargs): + + super(BEVFormerEncoder, self).__init__(*args, **kwargs) + self.return_intermediate = return_intermediate + + self.num_points_in_pillar = num_points_in_pillar + self.pc_range = pc_range + self.fp16_enabled = False + + @staticmethod + def get_reference_points(H, W, Z=8, num_points_in_pillar=4, dim='3d', bs=1, device='cuda', dtype=torch.float): + """Get the reference points used in SCA and TSA. + Args: + H, W: spatial shape of bev. + Z: hight of pillar. + D: sample D points uniformly from each pillar. + device (obj:`device`): The device where + reference_points should be. + Returns: + Tensor: reference points used in decoder, has \ + shape (bs, num_keys, num_levels, 2). + """ + + # reference points in 3D space, used in spatial cross-attention (SCA) + if dim == '3d': + zs = torch.linspace(0.5, Z - 0.5, num_points_in_pillar, dtype=dtype, + device=device).view(-1, 1, 1).expand(num_points_in_pillar, H, W) / Z + xs = torch.linspace(0.5, W - 0.5, W, dtype=dtype, + device=device).view(1, 1, W).expand(num_points_in_pillar, H, W) / W + ys = torch.linspace(0.5, H - 0.5, H, dtype=dtype, + device=device).view(1, H, 1).expand(num_points_in_pillar, H, W) / H + ref_3d = torch.stack((xs, ys, zs), -1) + ref_3d = ref_3d.permute(0, 3, 1, 2).flatten(2).permute(0, 2, 1) + ref_3d = ref_3d[None].repeat(bs, 1, 1, 1) + return ref_3d + + # reference points on 2D bev plane, used in temporal self-attention (TSA). + elif dim == '2d': + ref_y, ref_x = torch.meshgrid( + torch.linspace( + 0.5, H - 0.5, H, dtype=dtype, device=device), + torch.linspace( + 0.5, W - 0.5, W, dtype=dtype, device=device) + ) + ref_y = ref_y.reshape(-1)[None] / H + ref_x = ref_x.reshape(-1)[None] / W + ref_2d = torch.stack((ref_x, ref_y), -1) + ref_2d = ref_2d.repeat(bs, 1, 1).unsqueeze(2) + return ref_2d + + # This function must use fp32!!! + @force_fp32(apply_to=('reference_points', 'img_metas')) + def point_sampling(self, reference_points, pc_range, img_metas): + # NOTE: close tf32 here. TODO(yzj): used in bevformer + # allow_tf32 = torch.backends.cuda.matmul.allow_tf32 + # torch.backends.cuda.matmul.allow_tf32 = False + # torch.backends.cudnn.allow_tf32 = False + + lidar2img = [] + for img_meta in img_metas: + lidar2img.append(img_meta['lidar2img']) + lidar2img = np.asarray(lidar2img) + lidar2img = reference_points.new_tensor(lidar2img) # (B, N, 4, 4) + reference_points = reference_points.clone() + + reference_points[..., 0:1] = reference_points[..., 0:1] * \ + (pc_range[3] - pc_range[0]) + pc_range[0] + reference_points[..., 1:2] = reference_points[..., 1:2] * \ + (pc_range[4] - pc_range[1]) + pc_range[1] + reference_points[..., 2:3] = reference_points[..., 2:3] * \ + (pc_range[5] - pc_range[2]) + pc_range[2] + + reference_points = torch.cat( + (reference_points, torch.ones_like(reference_points[..., :1])), -1) + + reference_points = reference_points.permute(1, 0, 2, 3) + D, B, num_query = reference_points.size()[:3] + num_cam = lidar2img.size(1) + + reference_points = reference_points.view( + D, B, 1, num_query, 4).repeat(1, 1, num_cam, 1, 1).unsqueeze(-1) + + lidar2img = lidar2img.view( + 1, B, num_cam, 1, 4, 4).repeat(D, 1, 1, num_query, 1, 1) + + reference_points_cam = torch.matmul(lidar2img.to(torch.float32), + reference_points.to(torch.float32)).squeeze(-1) + eps = 1e-5 + + bev_mask = (reference_points_cam[..., 2:3] > eps) + reference_points_cam = reference_points_cam[..., 0:2] / torch.maximum( + reference_points_cam[..., 2:3], torch.ones_like(reference_points_cam[..., 2:3]) * eps) + + reference_points_cam[..., 0] /= img_metas[0]['img_shape'][0][1] + reference_points_cam[..., 1] /= img_metas[0]['img_shape'][0][0] + + bev_mask = (bev_mask & (reference_points_cam[..., 1:2] > 0.0) + & (reference_points_cam[..., 1:2] < 1.0) + & (reference_points_cam[..., 0:1] < 1.0) + & (reference_points_cam[..., 0:1] > 0.0)) + if digit_version(TORCH_VERSION) >= digit_version('1.8'): + bev_mask = torch.nan_to_num(bev_mask) + else: + bev_mask = bev_mask.new_tensor( + np.nan_to_num(bev_mask.cpu().numpy())) + + reference_points_cam = reference_points_cam.permute(2, 1, 3, 0, 4) + bev_mask = bev_mask.permute(2, 1, 3, 0, 4).squeeze(-1) + + return reference_points_cam, bev_mask + + @auto_fp16() + def forward(self, + bev_query, + key, + value, + *args, + bev_h=None, + bev_w=None, + bev_pos=None, + spatial_shapes=None, + level_start_index=None, + valid_ratios=None, + prev_bev=None, + shift=0., + img_metas=None, + **kwargs): + """Forward function for `TransformerDecoder`. + Args: + bev_query (Tensor): Input BEV query with shape + `(num_query, bs, embed_dims)`. + key & value (Tensor): Input multi-cameta features with shape + (num_cam, num_value, bs, embed_dims) + reference_points (Tensor): The reference + points of offset. has shape + (bs, num_query, 4) when as_two_stage, + otherwise has shape ((bs, num_query, 2). + valid_ratios (Tensor): The radios of valid + points on the feature map, has shape + (bs, num_levels, 2) + Returns: + Tensor: Results with shape [1, num_query, bs, embed_dims] when + return_intermediate is `False`, otherwise it has shape + [num_layers, num_query, bs, embed_dims]. + """ + + output = bev_query + intermediate = [] + + ref_3d = self.get_reference_points( + bev_h, bev_w, self.pc_range[5]-self.pc_range[2], self.num_points_in_pillar, dim='3d', bs=bev_query.size(1), device=bev_query.device, dtype=bev_query.dtype) + ref_2d = self.get_reference_points( + bev_h, bev_w, dim='2d', bs=bev_query.size(1), device=bev_query.device, dtype=bev_query.dtype) + + reference_points_cam, bev_mask = self.point_sampling( + ref_3d, self.pc_range, img_metas) + + # bug: this code should be 'shift_ref_2d = ref_2d.clone()', we keep this bug for reproducing our results in paper. + shift_ref_2d = ref_2d # .clone() + shift_ref_2d += shift[:, None, None, :] + + # (num_query, bs, embed_dims) -> (bs, num_query, embed_dims) + bev_query = bev_query.permute(1, 0, 2) + bev_pos = bev_pos.permute(1, 0, 2) + bs, len_bev, num_bev_level, _ = ref_2d.shape + if prev_bev is not None: + prev_bev = prev_bev.permute(1, 0, 2) + prev_bev = torch.stack( + [prev_bev, bev_query], 1).reshape(bs*2, len_bev, -1) + hybird_ref_2d = torch.stack([shift_ref_2d, ref_2d], 1).reshape( + bs*2, len_bev, num_bev_level, 2) + else: + hybird_ref_2d = torch.stack([ref_2d, ref_2d], 1).reshape( + bs*2, len_bev, num_bev_level, 2) + + for lid, layer in enumerate(self.layers): + output = layer( + bev_query, + key, + value, + *args, + bev_pos=bev_pos, + ref_2d=hybird_ref_2d, + ref_3d=ref_3d, + bev_h=bev_h, + bev_w=bev_w, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + reference_points_cam=reference_points_cam, + bev_mask=bev_mask, + prev_bev=prev_bev, + **kwargs) + + bev_query = output + if self.return_intermediate: + intermediate.append(output) + + if self.return_intermediate: + return torch.stack(intermediate) + + return output + + +@TRANSFORMER_LAYER.register_module() +class BEVFormerLayer(MyCustomBaseTransformerLayer): + """Implements decoder layer in DETR transformer. + Args: + attn_cfgs (list[`mmcv.ConfigDict`] | list[dict] | dict )): + Configs for self_attention or cross_attention, the order + should be consistent with it in `operation_order`. If it is + a dict, it would be expand to the number of attention in + `operation_order`. + feedforward_channels (int): The hidden dimension for FFNs. + ffn_dropout (float): Probability of an element to be zeroed + in ffn. Default 0.0. + operation_order (tuple[str]): The execution order of operation + in transformer. Such as ('self_attn', 'norm', 'ffn', 'norm'). + Default: None + act_cfg (dict): The activation config for FFNs. Default: `LN` + norm_cfg (dict): Config dict for normalization layer. + Default: `LN`. + ffn_num_fcs (int): The number of fully-connected layers in FFNs. + Default: 2. + """ + + def __init__(self, + attn_cfgs, + feedforward_channels, + ffn_dropout=0.0, + operation_order=None, + act_cfg=dict(type='ReLU', inplace=True), + norm_cfg=dict(type='LN'), + ffn_num_fcs=2, + **kwargs): + super(BEVFormerLayer, self).__init__( + attn_cfgs=attn_cfgs, + feedforward_channels=feedforward_channels, + ffn_dropout=ffn_dropout, + operation_order=operation_order, + act_cfg=act_cfg, + norm_cfg=norm_cfg, + ffn_num_fcs=ffn_num_fcs, + **kwargs) + self.fp16_enabled = False + assert len(operation_order) == 6 + assert set(operation_order) == set( + ['self_attn', 'norm', 'cross_attn', 'ffn']) + + def forward(self, + query, + key=None, + value=None, + bev_pos=None, + query_pos=None, + key_pos=None, + attn_masks=None, + query_key_padding_mask=None, + key_padding_mask=None, + ref_2d=None, + ref_3d=None, + bev_h=None, + bev_w=None, + reference_points_cam=None, + mask=None, + spatial_shapes=None, + level_start_index=None, + prev_bev=None, + **kwargs): + """Forward function for `TransformerDecoderLayer`. + + **kwargs contains some specific arguments of attentions. + + Args: + query (Tensor): The input query with shape + [num_queries, bs, embed_dims] if + self.batch_first is False, else + [bs, num_queries embed_dims]. + key (Tensor): The key tensor with shape [num_keys, bs, + embed_dims] if self.batch_first is False, else + [bs, num_keys, embed_dims] . + value (Tensor): The value tensor with same shape as `key`. + query_pos (Tensor): The positional encoding for `query`. + Default: None. + key_pos (Tensor): The positional encoding for `key`. + Default: None. + attn_masks (List[Tensor] | None): 2D Tensor used in + calculation of corresponding attention. The length of + it should equal to the number of `attention` in + `operation_order`. Default: None. + query_key_padding_mask (Tensor): ByteTensor for `query`, with + shape [bs, num_queries]. Only used in `self_attn` layer. + Defaults to None. + key_padding_mask (Tensor): ByteTensor for `query`, with + shape [bs, num_keys]. Default: None. + + Returns: + Tensor: forwarded results with shape [num_queries, bs, embed_dims]. + """ + + norm_index = 0 + attn_index = 0 + ffn_index = 0 + identity = query + if attn_masks is None: + attn_masks = [None for _ in range(self.num_attn)] + elif isinstance(attn_masks, torch.Tensor): + attn_masks = [ + copy.deepcopy(attn_masks) for _ in range(self.num_attn) + ] + warnings.warn(f'Use same attn_mask in all attentions in ' + f'{self.__class__.__name__} ') + else: + assert len(attn_masks) == self.num_attn, f'The length of ' \ + f'attn_masks {len(attn_masks)} must be equal ' \ + f'to the number of attention in ' \ + f'operation_order {self.num_attn}' + + for layer in self.operation_order: + # temporal self attention + if layer == 'self_attn': + + query = self.attentions[attn_index]( + query, + prev_bev, + prev_bev, + identity if self.pre_norm else None, + query_pos=bev_pos, + key_pos=bev_pos, + attn_mask=attn_masks[attn_index], + key_padding_mask=query_key_padding_mask, + reference_points=ref_2d, + spatial_shapes=torch.tensor( + [[bev_h, bev_w]], device=query.device), + level_start_index=torch.tensor([0], device=query.device), + **kwargs) + attn_index += 1 + identity = query + + elif layer == 'norm': + query = self.norms[norm_index](query) + norm_index += 1 + + # spaital cross attention + elif layer == 'cross_attn': + query = self.attentions[attn_index]( + query, + key, + value, + identity if self.pre_norm else None, + query_pos=query_pos, + key_pos=key_pos, + reference_points=ref_3d, + reference_points_cam=reference_points_cam, + mask=mask, + attn_mask=attn_masks[attn_index], + key_padding_mask=key_padding_mask, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + **kwargs) + attn_index += 1 + identity = query + + elif layer == 'ffn': + query = self.ffns[ffn_index]( + query, identity if self.pre_norm else None) + ffn_index += 1 + + return query diff --git a/mmcv/models/modules/group_attention.py b/mmcv/models/modules/group_attention.py new file mode 100644 index 0000000..907a576 --- /dev/null +++ b/mmcv/models/modules/group_attention.py @@ -0,0 +1,162 @@ +import copy +import math +import warnings +from typing import Sequence + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from mmcv.cnn import (Linear, build_activation_layer, build_conv_layer, build_norm_layer) +from mmcv.runner.base_module import BaseModule, ModuleList, Sequential +from mmcv.utils import (ConfigDict, build_from_cfg, deprecated_api_warning, to_2tuple) +from mmcv.cnn.bricks.drop import build_dropout +from mmcv.cnn.bricks.registry import (ATTENTION, FEEDFORWARD_NETWORK, POSITIONAL_ENCODING, TRANSFORMER_LAYER, + TRANSFORMER_LAYER_SEQUENCE) + + +@ATTENTION.register_module() +class GroupMultiheadAttention(BaseModule): + """A wrapper for ``torch.nn.MultiheadAttention``. + This module implements MultiheadAttention with identity connection, + and positional encoding is also passed as input. + Args: + embed_dims (int): The embedding dimension. + num_heads (int): Parallel attention heads. + attn_drop (float): A Dropout layer on attn_output_weights. + Default: 0.0. + proj_drop (float): A Dropout layer after `nn.MultiheadAttention`. + Default: 0.0. + dropout_layer (obj:`ConfigDict`): The dropout_layer used + when adding the shortcut. + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + batch_first (bool): When it is True, Key, Query and Value are shape of + (batch, n, embed_dim), otherwise (n, batch, embed_dim). + Default to False. + """ + + def __init__(self, + embed_dims, + num_heads, + attn_drop=0., + proj_drop=0., + group=1, + dropout_layer=dict(type='Dropout', drop_prob=0.), + init_cfg=None, + batch_first=False, + **kwargs): + super().__init__(init_cfg) + if 'dropout' in kwargs: + warnings.warn( + 'The arguments `dropout` in MultiheadAttention ' + 'has been deprecated, now you can separately ' + 'set `attn_drop`(float), proj_drop(float), ' + 'and `dropout_layer`(dict) ', DeprecationWarning) + attn_drop = kwargs['dropout'] + dropout_layer['drop_prob'] = kwargs.pop('dropout') + + self.embed_dims = embed_dims + self.num_heads = num_heads + self.group = group + self.batch_first = batch_first + + self.attn = nn.MultiheadAttention(embed_dims, num_heads, attn_drop, **kwargs) + + self.proj_drop = nn.Dropout(proj_drop) + self.dropout_layer = build_dropout(dropout_layer) if dropout_layer else nn.Identity() + + @deprecated_api_warning({'residual': 'identity'}, cls_name='MultiheadAttention') + def forward(self, + query, + key=None, + value=None, + identity=None, + query_pos=None, + key_pos=None, + attn_mask=None, + key_padding_mask=None, + **kwargs): + """Forward function for `MultiheadAttention`. + **kwargs allow passing a more general data flow when combining + with other operations in `transformerlayer`. + Args: + query (Tensor): The input query with shape [num_queries, bs, + embed_dims] if self.batch_first is False, else + [bs, num_queries embed_dims]. + key (Tensor): The key tensor with shape [num_keys, bs, + embed_dims] if self.batch_first is False, else + [bs, num_keys, embed_dims] . + If None, the ``query`` will be used. Defaults to None. + value (Tensor): The value tensor with same shape as `key`. + Same in `nn.MultiheadAttention.forward`. Defaults to None. + If None, the `key` will be used. + identity (Tensor): This tensor, with the same shape as x, + will be used for the identity link. + If None, `x` will be used. Defaults to None. + query_pos (Tensor): The positional encoding for query, with + the same shape as `x`. If not None, it will + be added to `x` before forward function. Defaults to None. + key_pos (Tensor): The positional encoding for `key`, with the + same shape as `key`. Defaults to None. If not None, it will + be added to `key` before forward function. If None, and + `query_pos` has the same shape as `key`, then `query_pos` + will be used for `key_pos`. Defaults to None. + attn_mask (Tensor): ByteTensor mask with shape [num_queries, + num_keys]. Same in `nn.MultiheadAttention.forward`. + Defaults to None. + key_padding_mask (Tensor): ByteTensor with shape [bs, num_keys]. + Defaults to None. + Returns: + Tensor: forwarded results with shape + [num_queries, bs, embed_dims] + if self.batch_first is False, else + [bs, num_queries embed_dims]. + """ + + if key is None: + key = query + if value is None: + value = key + if identity is None: + identity = query + if key_pos is None: + if query_pos is not None: + # use query_pos if key_pos is not available + if query_pos.shape == key.shape: + key_pos = query_pos + else: + warnings.warn(f'position encoding of key is' + f'missing in {self.__class__.__name__}.') + if query_pos is not None: + query = query + query_pos + if key_pos is not None: + key = key + key_pos + + # Because the dataflow('key', 'query', 'value') of + # ``torch.nn.MultiheadAttention`` is (num_query, batch, + # embed_dims), We should adjust the shape of dataflow from + # batch_first (batch, num_query, embed_dims) to num_query_first + # (num_query ,batch, embed_dims), and recover ``attn_output`` + # from num_query_first to batch_first. + if self.batch_first: + query = query.transpose(0, 1) + key = key.transpose(0, 1) + value = value.transpose(0, 1) + + num_queries = query.shape[0] + bs = query.shape[1] + if self.training: + query = torch.cat(query.split(num_queries // self.group, dim=0), dim=1) + key = torch.cat(key.split(num_queries // self.group, dim=0), dim=1) + value = torch.cat(value.split(num_queries // self.group, dim=0), dim=1) + + out = self.attn(query=query, key=key, value=value, attn_mask=attn_mask, key_padding_mask=key_padding_mask)[0] + + if self.training: + out = torch.cat(out.split(bs, dim=1), dim=0) # shape + + if self.batch_first: + out = out.transpose(0, 1) + + return identity + self.dropout_layer(self.proj_drop(out)) diff --git a/mmcv/models/modules/multi_scale_deformable_attn_function.py b/mmcv/models/modules/multi_scale_deformable_attn_function.py new file mode 100644 index 0000000..77b0f31 --- /dev/null +++ b/mmcv/models/modules/multi_scale_deformable_attn_function.py @@ -0,0 +1,163 @@ +# --------------------------------------------- +# Copyright (c) OpenMMLab. All rights reserved. +# --------------------------------------------- +# Modified by Zhiqi Li +# --------------------------------------------- + +import torch +from torch.cuda.amp import custom_bwd, custom_fwd +from torch.autograd.function import Function, once_differentiable +from mmcv.utils import ext_loader +ext_module = ext_loader.load_ext( + '_ext', ['ms_deform_attn_backward', 'ms_deform_attn_forward']) + + +class MultiScaleDeformableAttnFunction_fp16(Function): + + @staticmethod + @custom_fwd(cast_inputs=torch.float16) + def forward(ctx, value, value_spatial_shapes, value_level_start_index, + sampling_locations, attention_weights, im2col_step): + """GPU version of multi-scale deformable attention. + + Args: + value (Tensor): The value has shape + (bs, num_keys, mum_heads, embed_dims//num_heads) + value_spatial_shapes (Tensor): Spatial shape of + each feature map, has shape (num_levels, 2), + last dimension 2 represent (h, w) + sampling_locations (Tensor): The location of sampling points, + has shape + (bs ,num_queries, num_heads, num_levels, num_points, 2), + the last dimension 2 represent (x, y). + attention_weights (Tensor): The weight of sampling points used + when calculate the attention, has shape + (bs ,num_queries, num_heads, num_levels, num_points), + im2col_step (Tensor): The step used in image to column. + + Returns: + Tensor: has shape (bs, num_queries, embed_dims) + """ + ctx.im2col_step = im2col_step + output = ext_module.ms_deform_attn_forward( + value, + value_spatial_shapes, + value_level_start_index, + sampling_locations, + attention_weights, + im2col_step=ctx.im2col_step) + ctx.save_for_backward(value, value_spatial_shapes, + value_level_start_index, sampling_locations, + attention_weights) + return output + + @staticmethod + @once_differentiable + @custom_bwd + def backward(ctx, grad_output): + """GPU version of backward function. + + Args: + grad_output (Tensor): Gradient + of output tensor of forward. + + Returns: + Tuple[Tensor]: Gradient + of input tensors in forward. + """ + value, value_spatial_shapes, value_level_start_index, \ + sampling_locations, attention_weights = ctx.saved_tensors + grad_value = torch.zeros_like(value) + grad_sampling_loc = torch.zeros_like(sampling_locations) + grad_attn_weight = torch.zeros_like(attention_weights) + + ext_module.ms_deform_attn_backward( + value, + value_spatial_shapes, + value_level_start_index, + sampling_locations, + attention_weights, + grad_output.contiguous(), + grad_value, + grad_sampling_loc, + grad_attn_weight, + im2col_step=ctx.im2col_step) + + return grad_value, None, None, \ + grad_sampling_loc, grad_attn_weight, None + + +class MultiScaleDeformableAttnFunction_fp32(Function): + + @staticmethod + @custom_fwd(cast_inputs=torch.float32) + def forward(ctx, value, value_spatial_shapes, value_level_start_index, + sampling_locations, attention_weights, im2col_step): + """GPU version of multi-scale deformable attention. + + Args: + value (Tensor): The value has shape + (bs, num_keys, mum_heads, embed_dims//num_heads) + value_spatial_shapes (Tensor): Spatial shape of + each feature map, has shape (num_levels, 2), + last dimension 2 represent (h, w) + sampling_locations (Tensor): The location of sampling points, + has shape + (bs ,num_queries, num_heads, num_levels, num_points, 2), + the last dimension 2 represent (x, y). + attention_weights (Tensor): The weight of sampling points used + when calculate the attention, has shape + (bs ,num_queries, num_heads, num_levels, num_points), + im2col_step (Tensor): The step used in image to column. + + Returns: + Tensor: has shape (bs, num_queries, embed_dims) + """ + + ctx.im2col_step = im2col_step + output = ext_module.ms_deform_attn_forward( + value, + value_spatial_shapes, + value_level_start_index, + sampling_locations, + attention_weights, + im2col_step=ctx.im2col_step) + ctx.save_for_backward(value, value_spatial_shapes, + value_level_start_index, sampling_locations, + attention_weights) + return output + + @staticmethod + @once_differentiable + @custom_bwd + def backward(ctx, grad_output): + """GPU version of backward function. + + Args: + grad_output (Tensor): Gradient + of output tensor of forward. + + Returns: + Tuple[Tensor]: Gradient + of input tensors in forward. + """ + value, value_spatial_shapes, value_level_start_index, \ + sampling_locations, attention_weights = ctx.saved_tensors + grad_value = torch.zeros_like(value) + grad_sampling_loc = torch.zeros_like(sampling_locations) + grad_attn_weight = torch.zeros_like(attention_weights) + + ext_module.ms_deform_attn_backward( + value, + value_spatial_shapes, + value_level_start_index, + sampling_locations, + attention_weights, + grad_output.contiguous(), + grad_value, + grad_sampling_loc, + grad_attn_weight, + im2col_step=ctx.im2col_step) + + return grad_value, None, None, \ + grad_sampling_loc, grad_attn_weight, None diff --git a/mmcv/models/modules/spatial_cross_attention.py b/mmcv/models/modules/spatial_cross_attention.py new file mode 100644 index 0000000..e0b7587 --- /dev/null +++ b/mmcv/models/modules/spatial_cross_attention.py @@ -0,0 +1,398 @@ + +# --------------------------------------------- +# Copyright (c) OpenMMLab. All rights reserved. +# --------------------------------------------- +# Modified by Zhiqi Li +# --------------------------------------------- + +from mmcv.ops.multi_scale_deform_attn import multi_scale_deformable_attn_pytorch +import warnings +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.models.utils import xavier_init, constant_init +from mmcv.models.bricks.registry import (ATTENTION, + TRANSFORMER_LAYER, + TRANSFORMER_LAYER_SEQUENCE) +from mmcv.models.bricks.transformer import build_attention +import math +from mmcv.utils import force_fp32, auto_fp16 + +from mmcv.models.backbones.base_module import BaseModule, ModuleList, Sequential + +from mmcv.utils import ext_loader +from .multi_scale_deformable_attn_function import MultiScaleDeformableAttnFunction_fp32, \ + MultiScaleDeformableAttnFunction_fp16 +ext_module = ext_loader.load_ext( + '_ext', ['ms_deform_attn_backward', 'ms_deform_attn_forward']) + + +@ATTENTION.register_module() +class SpatialCrossAttention(BaseModule): + """An attention module used in BEVFormer. + Args: + embed_dims (int): The embedding dimension of Attention. + Default: 256. + num_cams (int): The number of cameras + dropout (float): A Dropout layer on `inp_residual`. + Default: 0.. + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + deformable_attention: (dict): The config for the deformable attention used in SCA. + """ + + def __init__(self, + embed_dims=256, + num_cams=6, + pc_range=None, + dropout=0.1, + init_cfg=None, + batch_first=False, + deformable_attention=dict( + type='MSDeformableAttention3D', + embed_dims=256, + num_levels=4), + **kwargs + ): + super(SpatialCrossAttention, self).__init__(init_cfg) + + self.init_cfg = init_cfg + self.dropout = nn.Dropout(dropout) + self.pc_range = pc_range + self.fp16_enabled = False + self.deformable_attention = build_attention(deformable_attention) + self.embed_dims = embed_dims + self.num_cams = num_cams + self.output_proj = nn.Linear(embed_dims, embed_dims) + self.batch_first = batch_first + self.init_weight() + + def init_weight(self): + """Default initialization for Parameters of Module.""" + xavier_init(self.output_proj, distribution='uniform', bias=0.) + + @force_fp32(apply_to=('query', 'key', 'value', 'query_pos', 'reference_points_cam')) + def forward(self, + query, + key, + value, + residual=None, + query_pos=None, + key_padding_mask=None, + reference_points=None, + spatial_shapes=None, + reference_points_cam=None, + bev_mask=None, + level_start_index=None, + flag='encoder', + **kwargs): + """Forward Function of Detr3DCrossAtten. + Args: + query (Tensor): Query of Transformer with shape + (num_query, bs, embed_dims). + key (Tensor): The key tensor with shape + `(num_key, bs, embed_dims)`. + value (Tensor): The value tensor with shape + `(num_key, bs, embed_dims)`. (B, N, C, H, W) + residual (Tensor): The tensor used for addition, with the + same shape as `x`. Default None. If None, `x` will be used. + query_pos (Tensor): The positional encoding for `query`. + Default: None. + key_pos (Tensor): The positional encoding for `key`. Default + None. + reference_points (Tensor): The normalized reference + points with shape (bs, num_query, 4), + all elements is range in [0, 1], top-left (0,0), + bottom-right (1, 1), including padding area. + or (N, Length_{query}, num_levels, 4), add + additional two dimensions is (w, h) to + form reference boxes. + key_padding_mask (Tensor): ByteTensor for `query`, with + shape [bs, num_key]. + spatial_shapes (Tensor): Spatial shape of features in + different level. With shape (num_levels, 2), + last dimension represent (h, w). + level_start_index (Tensor): The start index of each level. + A tensor has shape (num_levels) and can be represented + as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...]. + Returns: + Tensor: forwarded results with shape [num_query, bs, embed_dims]. + """ + + if key is None: + key = query + if value is None: + value = key + + if residual is None: + inp_residual = query + slots = torch.zeros_like(query) + if query_pos is not None: + query = query + query_pos + + bs, num_query, _ = query.size() + + D = reference_points_cam.size(3) + indexes = [] + for i, mask_per_img in enumerate(bev_mask): + index_query_per_img = mask_per_img[0].sum(-1).nonzero().squeeze(-1) + indexes.append(index_query_per_img) + max_len = max([len(each) for each in indexes]) + + # each camera only interacts with its corresponding BEV queries. This step can greatly save GPU memory. + queries_rebatch = query.new_zeros( + [bs, self.num_cams, max_len, self.embed_dims]) + reference_points_rebatch = reference_points_cam.new_zeros( + [bs, self.num_cams, max_len, D, 2]) + + for j in range(bs): + for i, reference_points_per_img in enumerate(reference_points_cam): + index_query_per_img = indexes[i] + queries_rebatch[j, i, :len(index_query_per_img)] = query[j, index_query_per_img] + reference_points_rebatch[j, i, :len(index_query_per_img)] = reference_points_per_img[j, index_query_per_img] + + num_cams, l, bs, embed_dims = key.shape + + key = key.permute(2, 0, 1, 3).reshape( + bs * self.num_cams, l, self.embed_dims) + value = value.permute(2, 0, 1, 3).reshape( + bs * self.num_cams, l, self.embed_dims) + + queries = self.deformable_attention(query=queries_rebatch.view(bs*self.num_cams, max_len, self.embed_dims), key=key, value=value, + reference_points=reference_points_rebatch.view(bs*self.num_cams, max_len, D, 2), spatial_shapes=spatial_shapes, + level_start_index=level_start_index).view(bs, self.num_cams, max_len, self.embed_dims) + for j in range(bs): + for i, index_query_per_img in enumerate(indexes): + slots[j, index_query_per_img] += queries[j, i, :len(index_query_per_img)] + + count = bev_mask.sum(-1) > 0 + count = count.permute(1, 2, 0).sum(-1) + count = torch.clamp(count, min=1.0) + slots = slots / count[..., None] + slots = self.output_proj(slots) + + return self.dropout(slots) + inp_residual + + +@ATTENTION.register_module() +class MSDeformableAttention3D(BaseModule): + """An attention module used in BEVFormer based on Deformable-Detr. + `Deformable DETR: Deformable Transformers for End-to-End Object Detection. + `_. + Args: + embed_dims (int): The embedding dimension of Attention. + Default: 256. + num_heads (int): Parallel attention heads. Default: 64. + num_levels (int): The number of feature map used in + Attention. Default: 4. + num_points (int): The number of sampling points for + each query in each head. Default: 4. + im2col_step (int): The step used in image_to_column. + Default: 64. + dropout (float): A Dropout layer on `inp_identity`. + Default: 0.1. + batch_first (bool): Key, Query and Value are shape of + (batch, n, embed_dim) + or (n, batch, embed_dim). Default to False. + norm_cfg (dict): Config dict for normalization layer. + Default: None. + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + """ + + def __init__(self, + embed_dims=256, + num_heads=8, + num_levels=4, + num_points=8, + im2col_step=64, + dropout=0.1, + batch_first=True, + norm_cfg=None, + init_cfg=None): + super().__init__(init_cfg) + if embed_dims % num_heads != 0: + raise ValueError(f'embed_dims must be divisible by num_heads, ' + f'but got {embed_dims} and {num_heads}') + dim_per_head = embed_dims // num_heads + self.norm_cfg = norm_cfg + self.batch_first = batch_first + self.output_proj = None + self.fp16_enabled = False + + # you'd better set dim_per_head to a power of 2 + # which is more efficient in the CUDA implementation + def _is_power_of_2(n): + if (not isinstance(n, int)) or (n < 0): + raise ValueError( + 'invalid input for _is_power_of_2: {} (type: {})'.format( + n, type(n))) + return (n & (n - 1) == 0) and n != 0 + + if not _is_power_of_2(dim_per_head): + warnings.warn( + "You'd better set embed_dims in " + 'MultiScaleDeformAttention to make ' + 'the dimension of each attention head a power of 2 ' + 'which is more efficient in our CUDA implementation.') + + self.im2col_step = im2col_step + self.embed_dims = embed_dims + self.num_levels = num_levels + self.num_heads = num_heads + self.num_points = num_points + self.sampling_offsets = nn.Linear( + embed_dims, num_heads * num_levels * num_points * 2) + self.attention_weights = nn.Linear(embed_dims, + num_heads * num_levels * num_points) + self.value_proj = nn.Linear(embed_dims, embed_dims) + + self.init_weights() + + def init_weights(self): + """Default initialization for Parameters of Module.""" + constant_init(self.sampling_offsets, 0.) + thetas = torch.arange( + self.num_heads, + dtype=torch.float32) * (2.0 * math.pi / self.num_heads) + grid_init = torch.stack([thetas.cos(), thetas.sin()], -1) + grid_init = (grid_init / + grid_init.abs().max(-1, keepdim=True)[0]).view( + self.num_heads, 1, 1, + 2).repeat(1, self.num_levels, self.num_points, 1) + for i in range(self.num_points): + grid_init[:, :, i, :] *= i + 1 + + self.sampling_offsets.bias.data = grid_init.view(-1) + constant_init(self.attention_weights, val=0., bias=0.) + xavier_init(self.value_proj, distribution='uniform', bias=0.) + xavier_init(self.output_proj, distribution='uniform', bias=0.) + self._is_init = True + + def forward(self, + query, + key=None, + value=None, + identity=None, + query_pos=None, + key_padding_mask=None, + reference_points=None, + spatial_shapes=None, + level_start_index=None, + **kwargs): + """Forward Function of MultiScaleDeformAttention. + Args: + query (Tensor): Query of Transformer with shape + ( bs, num_query, embed_dims). + key (Tensor): The key tensor with shape + `(bs, num_key, embed_dims)`. + value (Tensor): The value tensor with shape + `(bs, num_key, embed_dims)`. + identity (Tensor): The tensor used for addition, with the + same shape as `query`. Default None. If None, + `query` will be used. + query_pos (Tensor): The positional encoding for `query`. + Default: None. + key_pos (Tensor): The positional encoding for `key`. Default + None. + reference_points (Tensor): The normalized reference + points with shape (bs, num_query, num_levels, 2), + all elements is range in [0, 1], top-left (0,0), + bottom-right (1, 1), including padding area. + or (N, Length_{query}, num_levels, 4), add + additional two dimensions is (w, h) to + form reference boxes. + key_padding_mask (Tensor): ByteTensor for `query`, with + shape [bs, num_key]. + spatial_shapes (Tensor): Spatial shape of features in + different levels. With shape (num_levels, 2), + last dimension represents (h, w). + level_start_index (Tensor): The start index of each level. + A tensor has shape ``(num_levels, )`` and can be represented + as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...]. + Returns: + Tensor: forwarded results with shape [num_query, bs, embed_dims]. + """ + + if value is None: + value = query + if identity is None: + identity = query + if query_pos is not None: + query = query + query_pos + + if not self.batch_first: + # change to (bs, num_query ,embed_dims) + query = query.permute(1, 0, 2) + value = value.permute(1, 0, 2) + + bs, num_query, _ = query.shape + bs, num_value, _ = value.shape + assert (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() == num_value + + value = self.value_proj(value) + if key_padding_mask is not None: + value = value.masked_fill(key_padding_mask[..., None], 0.0) + value = value.view(bs, num_value, self.num_heads, -1) + sampling_offsets = self.sampling_offsets(query).view( + bs, num_query, self.num_heads, self.num_levels, self.num_points, 2) + attention_weights = self.attention_weights(query).view( + bs, num_query, self.num_heads, self.num_levels * self.num_points) + + attention_weights = attention_weights.softmax(-1) + + attention_weights = attention_weights.view(bs, num_query, + self.num_heads, + self.num_levels, + self.num_points) + + if reference_points.shape[-1] == 2: + """ + For each BEV query, it owns `num_Z_anchors` in 3D space that having different heights. + After proejcting, each BEV query has `num_Z_anchors` reference points in each 2D image. + For each referent point, we sample `num_points` sampling points. + For `num_Z_anchors` reference points, it has overall `num_points * num_Z_anchors` sampling points. + """ + offset_normalizer = torch.stack( + [spatial_shapes[..., 1], spatial_shapes[..., 0]], -1) + + bs, num_query, num_Z_anchors, xy = reference_points.shape + reference_points = reference_points[:, :, None, None, None, :, :] + sampling_offsets = sampling_offsets / \ + offset_normalizer[None, None, None, :, None, :] + bs, num_query, num_heads, num_levels, num_all_points, xy = sampling_offsets.shape + sampling_offsets = sampling_offsets.view( + bs, num_query, num_heads, num_levels, num_all_points // num_Z_anchors, num_Z_anchors, xy) + sampling_locations = reference_points + sampling_offsets + bs, num_query, num_heads, num_levels, num_points, num_Z_anchors, xy = sampling_locations.shape + assert num_all_points == num_points * num_Z_anchors + + sampling_locations = sampling_locations.view( + bs, num_query, num_heads, num_levels, num_all_points, xy) + + elif reference_points.shape[-1] == 4: + assert False + else: + raise ValueError( + f'Last dim of reference_points must be' + f' 2 or 4, but get {reference_points.shape[-1]} instead.') + + # sampling_locations.shape: bs, num_query, num_heads, num_levels, num_all_points, 2 + # attention_weights.shape: bs, num_query, num_heads, num_levels, num_all_points + # + + if torch.cuda.is_available() and value.is_cuda: + if value.dtype == torch.float16: + MultiScaleDeformableAttnFunction = MultiScaleDeformableAttnFunction_fp32 + else: + MultiScaleDeformableAttnFunction = MultiScaleDeformableAttnFunction_fp32 + output = MultiScaleDeformableAttnFunction.apply( + value, spatial_shapes, level_start_index, sampling_locations, + attention_weights, self.im2col_step) + else: + output = multi_scale_deformable_attn_pytorch( + value, spatial_shapes, sampling_locations, attention_weights) + if not self.batch_first: + output = output.permute(1, 0, 2) + + return output diff --git a/mmcv/models/modules/temporal_self_attention.py b/mmcv/models/modules/temporal_self_attention.py new file mode 100644 index 0000000..6de0020 --- /dev/null +++ b/mmcv/models/modules/temporal_self_attention.py @@ -0,0 +1,269 @@ +# --------------------------------------------- +# Copyright (c) OpenMMLab. All rights reserved. +# --------------------------------------------- +# Modified by Zhiqi Li +# --------------------------------------------- + +from .multi_scale_deformable_attn_function import MultiScaleDeformableAttnFunction_fp32 +from mmcv.ops.multi_scale_deform_attn import multi_scale_deformable_attn_pytorch +import warnings +import torch +import torch.nn as nn +from mmcv.models.utils import xavier_init, constant_init +from mmcv.models.bricks.registry import ATTENTION +import math +from mmcv.models.backbones.base_module import BaseModule, ModuleList, Sequential +from mmcv.utils import (ConfigDict, build_from_cfg, deprecated_api_warning, + to_2tuple) + +from mmcv.utils import ext_loader +ext_module = ext_loader.load_ext( + '_ext', ['ms_deform_attn_backward', 'ms_deform_attn_forward']) + + +@ATTENTION.register_module() +class TemporalSelfAttention(BaseModule): + """An attention module used in BEVFormer based on Deformable-Detr. + + `Deformable DETR: Deformable Transformers for End-to-End Object Detection. + `_. + + Args: + embed_dims (int): The embedding dimension of Attention. + Default: 256. + num_heads (int): Parallel attention heads. Default: 64. + num_levels (int): The number of feature map used in + Attention. Default: 4. + num_points (int): The number of sampling points for + each query in each head. Default: 4. + im2col_step (int): The step used in image_to_column. + Default: 64. + dropout (float): A Dropout layer on `inp_identity`. + Default: 0.1. + batch_first (bool): Key, Query and Value are shape of + (batch, n, embed_dim) + or (n, batch, embed_dim). Default to True. + norm_cfg (dict): Config dict for normalization layer. + Default: None. + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + num_bev_queue (int): In this version, we only use one history BEV and one currenct BEV. + the length of BEV queue is 2. + """ + + def __init__(self, + embed_dims=256, + num_heads=8, + num_levels=4, + num_points=4, + num_bev_queue=2, + im2col_step=64, + dropout=0.1, + batch_first=True, + norm_cfg=None, + init_cfg=None): + + super().__init__(init_cfg) + if embed_dims % num_heads != 0: + raise ValueError(f'embed_dims must be divisible by num_heads, ' + f'but got {embed_dims} and {num_heads}') + dim_per_head = embed_dims // num_heads + self.norm_cfg = norm_cfg + self.dropout = nn.Dropout(dropout) + self.batch_first = batch_first + self.fp16_enabled = False + + # you'd better set dim_per_head to a power of 2 + # which is more efficient in the CUDA implementation + def _is_power_of_2(n): + if (not isinstance(n, int)) or (n < 0): + raise ValueError( + 'invalid input for _is_power_of_2: {} (type: {})'.format( + n, type(n))) + return (n & (n - 1) == 0) and n != 0 + + if not _is_power_of_2(dim_per_head): + warnings.warn( + "You'd better set embed_dims in " + 'MultiScaleDeformAttention to make ' + 'the dimension of each attention head a power of 2 ' + 'which is more efficient in our CUDA implementation.') + + self.im2col_step = im2col_step + self.embed_dims = embed_dims + self.num_levels = num_levels + self.num_heads = num_heads + self.num_points = num_points + self.num_bev_queue = num_bev_queue + self.sampling_offsets = nn.Linear( + embed_dims*self.num_bev_queue, num_bev_queue*num_heads * num_levels * num_points * 2) + self.attention_weights = nn.Linear(embed_dims*self.num_bev_queue, + num_bev_queue*num_heads * num_levels * num_points) + self.value_proj = nn.Linear(embed_dims, embed_dims) + self.output_proj = nn.Linear(embed_dims, embed_dims) + self.init_weights() + + def init_weights(self): + """Default initialization for Parameters of Module.""" + constant_init(self.sampling_offsets, 0.) + thetas = torch.arange( + self.num_heads, + dtype=torch.float32) * (2.0 * math.pi / self.num_heads) + grid_init = torch.stack([thetas.cos(), thetas.sin()], -1) + grid_init = (grid_init / + grid_init.abs().max(-1, keepdim=True)[0]).view( + self.num_heads, 1, 1, + 2).repeat(1, self.num_levels*self.num_bev_queue, self.num_points, 1) + + for i in range(self.num_points): + grid_init[:, :, i, :] *= i + 1 + + self.sampling_offsets.bias.data = grid_init.view(-1) + constant_init(self.attention_weights, val=0., bias=0.) + xavier_init(self.value_proj, distribution='uniform', bias=0.) + xavier_init(self.output_proj, distribution='uniform', bias=0.) + self._is_init = True + + def forward(self, + query, + key=None, + value=None, + identity=None, + query_pos=None, + key_padding_mask=None, + reference_points=None, + spatial_shapes=None, + level_start_index=None, + flag='decoder', + + **kwargs): + """Forward Function of MultiScaleDeformAttention. + + Args: + query (Tensor): Query of Transformer with shape + (num_query, bs, embed_dims). + key (Tensor): The key tensor with shape + `(num_key, bs, embed_dims)`. + value (Tensor): The value tensor with shape + `(num_key, bs, embed_dims)`. + identity (Tensor): The tensor used for addition, with the + same shape as `query`. Default None. If None, + `query` will be used. + query_pos (Tensor): The positional encoding for `query`. + Default: None. + key_pos (Tensor): The positional encoding for `key`. Default + None. + reference_points (Tensor): The normalized reference + points with shape (bs, num_query, num_levels, 2), + all elements is range in [0, 1], top-left (0,0), + bottom-right (1, 1), including padding area. + or (N, Length_{query}, num_levels, 4), add + additional two dimensions is (w, h) to + form reference boxes. + key_padding_mask (Tensor): ByteTensor for `query`, with + shape [bs, num_key]. + spatial_shapes (Tensor): Spatial shape of features in + different levels. With shape (num_levels, 2), + last dimension represents (h, w). + level_start_index (Tensor): The start index of each level. + A tensor has shape ``(num_levels, )`` and can be represented + as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...]. + + Returns: + Tensor: forwarded results with shape [num_query, bs, embed_dims]. + """ + + if value is None: + assert self.batch_first + bs, len_bev, c = query.shape + value = torch.stack([query, query], 1).reshape(bs*2, len_bev, c) + + if identity is None: + identity = query + if query_pos is not None: + query = query + query_pos + if not self.batch_first: + # change to (bs, num_query ,embed_dims) + query = query.permute(1, 0, 2) + value = value.permute(1, 0, 2) + bs, num_query, embed_dims = query.shape + _, num_value, _ = value.shape + assert (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() == num_value + assert self.num_bev_queue == 2 + + query = torch.cat([value[:bs], query], -1) + value = self.value_proj(value) + + if key_padding_mask is not None: + value = value.masked_fill(key_padding_mask[..., None], 0.0) + + value = value.reshape(bs*self.num_bev_queue, + num_value, self.num_heads, -1) + + sampling_offsets = self.sampling_offsets(query) + sampling_offsets = sampling_offsets.view( + bs, num_query, self.num_heads, self.num_bev_queue, self.num_levels, self.num_points, 2) + attention_weights = self.attention_weights(query).view( + bs, num_query, self.num_heads, self.num_bev_queue, self.num_levels * self.num_points) + attention_weights = attention_weights.softmax(-1) + + attention_weights = attention_weights.view(bs, num_query, + self.num_heads, + self.num_bev_queue, + self.num_levels, + self.num_points) + + attention_weights = attention_weights.permute(0, 3, 1, 2, 4, 5)\ + .reshape(bs*self.num_bev_queue, num_query, self.num_heads, self.num_levels, self.num_points).contiguous() + sampling_offsets = sampling_offsets.permute(0, 3, 1, 2, 4, 5, 6)\ + .reshape(bs*self.num_bev_queue, num_query, self.num_heads, self.num_levels, self.num_points, 2) + + if reference_points.shape[-1] == 2: + offset_normalizer = torch.stack( + [spatial_shapes[..., 1], spatial_shapes[..., 0]], -1) + sampling_locations = reference_points[:, :, None, :, None, :] \ + + sampling_offsets \ + / offset_normalizer[None, None, None, :, None, :] + + elif reference_points.shape[-1] == 4: + sampling_locations = reference_points[:, :, None, :, None, :2] \ + + sampling_offsets / self.num_points \ + * reference_points[:, :, None, :, None, 2:] \ + * 0.5 + else: + raise ValueError( + f'Last dim of reference_points must be' + f' 2 or 4, but get {reference_points.shape[-1]} instead.') + if torch.cuda.is_available() and value.is_cuda: + + # using fp16 deformable attention is unstable because it performs many sum operations + if value.dtype == torch.float16: + MultiScaleDeformableAttnFunction = MultiScaleDeformableAttnFunction_fp32 + else: + MultiScaleDeformableAttnFunction = MultiScaleDeformableAttnFunction_fp32 + output = MultiScaleDeformableAttnFunction.apply( + value, spatial_shapes, level_start_index, sampling_locations, + attention_weights, self.im2col_step) + else: + + output = multi_scale_deformable_attn_pytorch( + value, spatial_shapes, sampling_locations, attention_weights) + + # output shape (bs*num_bev_queue, num_query, embed_dims) + # (bs*num_bev_queue, num_query, embed_dims)-> (num_query, embed_dims, bs*num_bev_queue) + output = output.permute(1, 2, 0) + + # fuse history value and current value + # (num_query, embed_dims, bs*num_bev_queue)-> (num_query, embed_dims, bs, num_bev_queue) + output = output.view(num_query, embed_dims, bs, self.num_bev_queue) + output = output.mean(-1) + + # (num_query, embed_dims, bs)-> (bs, num_query, embed_dims) + output = output.permute(2, 0, 1) + + output = self.output_proj(output) + + if not self.batch_first: + output = output.permute(1, 0, 2) + + return self.dropout(output) + identity diff --git a/mmcv/models/modules/transformer.py b/mmcv/models/modules/transformer.py new file mode 100644 index 0000000..8013048 --- /dev/null +++ b/mmcv/models/modules/transformer.py @@ -0,0 +1,632 @@ +# --------------------------------------------- +# Copyright (c) OpenMMLab. All rights reserved. +# --------------------------------------------- +# Modified by Zhiqi Li +# --------------------------------------------- + +import numpy as np +import torch +import torch.nn as nn +from mmcv.models.utils import xavier_init +from mmcv.models.bricks.transformer import build_transformer_layer_sequence +from mmcv.models.backbones.base_module import BaseModule + +from mmcv.models.utils.builder import TRANSFORMER +from torch.nn.init import normal_ +from torchvision.transforms.functional import rotate +from .temporal_self_attention import TemporalSelfAttention +from .spatial_cross_attention import MSDeformableAttention3D +from .decoder import CustomMSDeformableAttention +from mmcv.utils import force_fp32, auto_fp16 +from mmcv.models.bricks.registry import ATTENTION +from mmcv.models.bricks.transformer import POSITIONAL_ENCODING, MultiheadAttention + +@TRANSFORMER.register_module() +class UniADPerceptionTransformer(BaseModule): + """Implements the Detr3D transformer. + Args: + as_two_stage (bool): Generate query from encoder features. + Default: False. + num_feature_levels (int): Number of feature maps from FPN: + Default: 4. + two_stage_num_proposals (int): Number of proposals when set + `as_two_stage` as True. Default: 300. + """ + + def __init__(self, + num_feature_levels=4, + num_cams=6, + two_stage_num_proposals=300, + encoder=None, + decoder=None, + embed_dims=256, + rotate_prev_bev=True, + use_shift=True, + use_can_bus=True, + can_bus_norm=True, + use_cams_embeds=True, + rotate_center=[100, 100], + **kwargs): + super(UniADPerceptionTransformer, self).__init__(**kwargs) + self.encoder = build_transformer_layer_sequence(encoder) + self.decoder = build_transformer_layer_sequence(decoder) + self.embed_dims = embed_dims + self.num_feature_levels = num_feature_levels + self.num_cams = num_cams + self.fp16_enabled = False + + self.rotate_prev_bev = rotate_prev_bev + self.use_shift = use_shift + self.use_can_bus = use_can_bus + self.can_bus_norm = can_bus_norm + self.use_cams_embeds = use_cams_embeds + + self.two_stage_num_proposals = two_stage_num_proposals + self.init_layers() + self.rotate_center = rotate_center + + def init_layers(self): + """Initialize layers of the Detr3DTransformer.""" + self.level_embeds = nn.Parameter(torch.Tensor( + self.num_feature_levels, self.embed_dims)) + self.cams_embeds = nn.Parameter( + torch.Tensor(self.num_cams, self.embed_dims)) + self.can_bus_mlp = nn.Sequential( + nn.Linear(18, self.embed_dims // 2), + nn.ReLU(inplace=True), + nn.Linear(self.embed_dims // 2, self.embed_dims), + nn.ReLU(inplace=True), + ) + if self.can_bus_norm: + self.can_bus_mlp.add_module('norm', nn.LayerNorm(self.embed_dims)) + + def init_weights(self): + """Initialize the transformer weights.""" + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + for m in self.modules(): + if isinstance(m, MSDeformableAttention3D) or isinstance(m, TemporalSelfAttention) \ + or isinstance(m, CustomMSDeformableAttention): + try: + m.init_weight() + except AttributeError: + m.init_weights() + normal_(self.level_embeds) + normal_(self.cams_embeds) + xavier_init(self.can_bus_mlp, distribution='uniform', bias=0.) + + @auto_fp16(apply_to=('mlvl_feats', 'bev_queries', 'prev_bev', 'bev_pos')) + def get_bev_features( + self, + mlvl_feats, + bev_queries, + bev_h, + bev_w, + grid_length=[0.512, 0.512], + bev_pos=None, + prev_bev=None, + img_metas=None): + """ + obtain bev features. + """ + + bs = mlvl_feats[0].size(0) + bev_queries = bev_queries.unsqueeze(1).repeat(1, bs, 1) + bev_pos = bev_pos.flatten(2).permute(2, 0, 1) + # obtain rotation angle and shift with ego motion + delta_x = np.array([each['can_bus'][0] + for each in img_metas]) + delta_y = np.array([each['can_bus'][1] + for each in img_metas]) + ego_angle = np.array( + [each['can_bus'][-2] / np.pi * 180 for each in img_metas]) + grid_length_y = grid_length[0] + grid_length_x = grid_length[1] + translation_length = np.sqrt(delta_x ** 2 + delta_y ** 2) + translation_angle = np.arctan2(delta_y, delta_x) / np.pi * 180 + bev_angle = ego_angle - translation_angle + shift_y = translation_length * \ + np.cos(bev_angle / 180 * np.pi) / grid_length_y / bev_h + shift_x = translation_length * \ + np.sin(bev_angle / 180 * np.pi) / grid_length_x / bev_w + shift_y = shift_y * self.use_shift + shift_x = shift_x * self.use_shift + shift = bev_queries.new_tensor( + [shift_x, shift_y]).permute(1, 0) # xy, bs -> bs, xy + + if prev_bev is not None: + if prev_bev.shape[1] == bev_h * bev_w: + prev_bev = prev_bev.permute(1, 0, 2) + if self.rotate_prev_bev: + for i in range(bs): + rotation_angle = img_metas[i]['can_bus'][-1] + tmp_prev_bev = prev_bev[:, i].reshape( + bev_h, bev_w, -1).permute(2, 0, 1) + tmp_prev_bev = rotate(tmp_prev_bev, rotation_angle, + center=self.rotate_center) + tmp_prev_bev = tmp_prev_bev.permute(1, 2, 0).reshape( + bev_h * bev_w, 1, -1) + prev_bev[:, i] = tmp_prev_bev[:, 0] + + # add can bus signals + can_bus = bev_queries.new_tensor( + [each['can_bus'] for each in img_metas]) # [:, :] + can_bus = self.can_bus_mlp(can_bus)[None, :, :] + bev_queries = bev_queries + can_bus * self.use_can_bus + + feat_flatten = [] + spatial_shapes = [] + for lvl, feat in enumerate(mlvl_feats): + bs, num_cam, c, h, w = feat.shape + spatial_shape = (h, w) + feat = feat.flatten(3).permute(1, 0, 3, 2) + if self.use_cams_embeds: + feat = feat + self.cams_embeds[:, None, None, :].to(feat.dtype) + feat = feat + self.level_embeds[None, + None, lvl:lvl + 1, :].to(feat.dtype) + spatial_shapes.append(spatial_shape) + feat_flatten.append(feat) + + feat_flatten = torch.cat(feat_flatten, 2) + spatial_shapes = torch.as_tensor( + spatial_shapes, dtype=torch.long, device=bev_pos.device) + level_start_index = torch.cat((spatial_shapes.new_zeros( + (1,)), spatial_shapes.prod(1).cumsum(0)[:-1])) + + feat_flatten = feat_flatten.permute( + 0, 2, 1, 3) # (num_cam, H*W, bs, embed_dims) + + bev_embed = self.encoder( + bev_queries, + feat_flatten, + feat_flatten, + bev_h=bev_h, + bev_w=bev_w, + bev_pos=bev_pos, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + prev_bev=prev_bev, + shift=shift, + img_metas=img_metas, + ) + + return bev_embed + + def get_states_and_refs( + self, + bev_embed, + object_query_embed, + bev_h, + bev_w, + reference_points, + reg_branches=None, + cls_branches=None, + img_metas=None + ): + bs = bev_embed.shape[1] + query_pos, query = torch.split( + object_query_embed, self.embed_dims, dim=1) + query_pos = query_pos.unsqueeze(0).expand(bs, -1, -1) + query = query.unsqueeze(0).expand(bs, -1, -1) + + reference_points = reference_points.unsqueeze(0).expand(bs, -1, -1) + reference_points = reference_points.sigmoid() + + init_reference_out = reference_points + query = query.permute(1, 0, 2) + query_pos = query_pos.permute(1, 0, 2) + inter_states, inter_references = self.decoder( + query=query, + key=None, + value=bev_embed, + query_pos=query_pos, + reference_points=reference_points, + reg_branches=reg_branches, + cls_branches=cls_branches, + spatial_shapes=torch.tensor([[bev_h, bev_w]], device=query.device), + level_start_index=torch.tensor([0], device=query.device), + img_metas=img_metas + ) + inter_references_out = inter_references + + return inter_states, init_reference_out, inter_references_out + + +@ATTENTION.register_module() +class GroupFree3DMHA(MultiheadAttention): + """A warpper for torch.nn.MultiheadAttention for GroupFree3D. + + This module implements MultiheadAttention with identity connection, + and positional encoding used in DETR is also passed as input. + + Args: + embed_dims (int): The embedding dimension. + num_heads (int): Parallel attention heads. Same as + `nn.MultiheadAttention`. + attn_drop (float): A Dropout layer on attn_output_weights. Default 0.0. + proj_drop (float): A Dropout layer. Default 0.0. + dropout_layer (obj:`ConfigDict`): The dropout_layer used + when adding the shortcut. + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + batch_first (bool): Key, Query and Value are shape of + (batch, n, embed_dim) + or (n, batch, embed_dim). Default to False. + """ + + def __init__(self, + embed_dims, + num_heads, + attn_drop=0., + proj_drop=0., + dropout_layer=dict(type='DropOut', drop_prob=0.), + init_cfg=None, + batch_first=False, + **kwargs): + super().__init__(embed_dims, num_heads, attn_drop, proj_drop, + dropout_layer, init_cfg, batch_first, **kwargs) + + def forward(self, + query, + key, + value, + identity, + query_pos=None, + key_pos=None, + attn_mask=None, + key_padding_mask=None, + **kwargs): + """Forward function for `GroupFree3DMHA`. + + **kwargs allow passing a more general data flow when combining + with other operations in `transformerlayer`. + + Args: + query (Tensor): The input query with shape [num_queries, bs, + embed_dims]. Same in `nn.MultiheadAttention.forward`. + key (Tensor): The key tensor with shape [num_keys, bs, + embed_dims]. Same in `nn.MultiheadAttention.forward`. + If None, the ``query`` will be used. Defaults to None. + value (Tensor): The value tensor with same shape as `key`. + Same in `nn.MultiheadAttention.forward`. Defaults to None. + If None, the `key` will be used. + identity (Tensor): This tensor, with the same shape as x, + will be used for the identity link. + If None, `x` will be used. Defaults to None. + query_pos (Tensor): The positional encoding for query, with + the same shape as `x`. If not None, it will + be added to `x` before forward function. Defaults to None. + key_pos (Tensor): The positional encoding for `key`, with the + same shape as `key`. Defaults to None. If not None, it will + be added to `key` before forward function. If None, and + `query_pos` has the same shape as `key`, then `query_pos` + will be used for `key_pos`. Defaults to None. + attn_mask (Tensor): ByteTensor mask with shape [num_queries, + num_keys]. Same in `nn.MultiheadAttention.forward`. + Defaults to None. + key_padding_mask (Tensor): ByteTensor with shape [bs, num_keys]. + Same in `nn.MultiheadAttention.forward`. Defaults to None. + + Returns: + Tensor: forwarded results with shape [num_queries, bs, embed_dims]. + """ + + if hasattr(self, 'operation_name'): + if self.operation_name == 'self_attn': + value = value + query_pos + elif self.operation_name == 'cross_attn': + value = value + key_pos + else: + raise NotImplementedError( + f'{self.__class__.name} ' + f"can't be used as {self.operation_name}") + else: + value = value + query_pos + + return super(GroupFree3DMHA, self).forward( + query=query, + key=key, + value=value, + identity=identity, + query_pos=query_pos, + key_pos=key_pos, + attn_mask=attn_mask, + key_padding_mask=key_padding_mask, + **kwargs) + + +@POSITIONAL_ENCODING.register_module() +class ConvBNPositionalEncoding(nn.Module): + """Absolute position embedding with Conv learning. + + Args: + input_channel (int): input features dim. + num_pos_feats (int): output position features dim. + Defaults to 288 to be consistent with seed features dim. + """ + + def __init__(self, input_channel, num_pos_feats=288): + super().__init__() + self.position_embedding_head = nn.Sequential( + nn.Conv1d(input_channel, num_pos_feats, kernel_size=1), + nn.BatchNorm1d(num_pos_feats), nn.ReLU(inplace=True), + nn.Conv1d(num_pos_feats, num_pos_feats, kernel_size=1)) + + def forward(self, xyz): + """Forward pass. + + Args: + xyz (Tensor): (B, N, 3) the coordinates to embed. + + Returns: + Tensor: (B, num_pos_feats, N) the embeded position features. + """ + xyz = xyz.permute(0, 2, 1) + position_embedding = self.position_embedding_head(xyz) + return position_embedding + +@TRANSFORMER.register_module() +class BEVFormerPerceptionTransformer(BaseModule): + """Implements the Detr3D transformer. + Args: + as_two_stage (bool): Generate query from encoder features. + Default: False. + num_feature_levels (int): Number of feature maps from FPN: + Default: 4. + two_stage_num_proposals (int): Number of proposals when set + `as_two_stage` as True. Default: 300. + """ + + def __init__(self, + num_feature_levels=4, + num_cams=6, + two_stage_num_proposals=300, + encoder=None, + decoder=None, + embed_dims=256, + rotate_prev_bev=True, + use_shift=True, + use_can_bus=True, + can_bus_norm=True, + use_cams_embeds=True, + rotate_center=[100, 100], + **kwargs): + super(BEVFormerPerceptionTransformer, self).__init__(**kwargs) + self.encoder = build_transformer_layer_sequence(encoder) + self.decoder = build_transformer_layer_sequence(decoder) + self.embed_dims = embed_dims + self.num_feature_levels = num_feature_levels + self.num_cams = num_cams + self.fp16_enabled = False + + self.rotate_prev_bev = rotate_prev_bev + self.use_shift = use_shift + self.use_can_bus = use_can_bus + self.can_bus_norm = can_bus_norm + self.use_cams_embeds = use_cams_embeds + + self.two_stage_num_proposals = two_stage_num_proposals + self.init_layers() + self.rotate_center = rotate_center + + def init_layers(self): + """Initialize layers of the Detr3DTransformer.""" + self.level_embeds = nn.Parameter(torch.Tensor( + self.num_feature_levels, self.embed_dims)) + self.cams_embeds = nn.Parameter( + torch.Tensor(self.num_cams, self.embed_dims)) + self.reference_points = nn.Linear(self.embed_dims, 3) + self.can_bus_mlp = nn.Sequential( + nn.Linear(18, self.embed_dims // 2), + nn.ReLU(inplace=True), + nn.Linear(self.embed_dims // 2, self.embed_dims), + nn.ReLU(inplace=True), + ) + if self.can_bus_norm: + self.can_bus_mlp.add_module('norm', nn.LayerNorm(self.embed_dims)) + + def init_weights(self): + """Initialize the transformer weights.""" + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + for m in self.modules(): + if isinstance(m, MSDeformableAttention3D) or isinstance(m, TemporalSelfAttention) \ + or isinstance(m, CustomMSDeformableAttention): + try: + m.init_weight() + except AttributeError: + m.init_weights() + normal_(self.level_embeds) + normal_(self.cams_embeds) + xavier_init(self.reference_points, distribution='uniform', bias=0.) + xavier_init(self.can_bus_mlp, distribution='uniform', bias=0.) + + @auto_fp16(apply_to=('mlvl_feats', 'bev_queries', 'prev_bev', 'bev_pos')) + def get_bev_features( + self, + mlvl_feats, + bev_queries, + bev_h, + bev_w, + grid_length=[0.512, 0.512], + bev_pos=None, + prev_bev=None, + **kwargs): + """ + obtain bev features. + """ + + bs = mlvl_feats[0].size(0) + bev_queries = bev_queries.unsqueeze(1).repeat(1, bs, 1) + bev_pos = bev_pos.flatten(2).permute(2, 0, 1) + + # obtain rotation angle and shift with ego motion + delta_x = np.array([each['can_bus'][0] + for each in kwargs['img_metas']]) + delta_y = np.array([each['can_bus'][1] + for each in kwargs['img_metas']]) + ego_angle = np.array( + [each['can_bus'][-2] / np.pi * 180 for each in kwargs['img_metas']]) + grid_length_y = grid_length[0] + grid_length_x = grid_length[1] + translation_length = np.sqrt(delta_x ** 2 + delta_y ** 2) + translation_angle = np.arctan2(delta_y, delta_x) / np.pi * 180 + bev_angle = ego_angle - translation_angle + shift_y = translation_length * \ + np.cos(bev_angle / 180 * np.pi) / grid_length_y / bev_h + shift_x = translation_length * \ + np.sin(bev_angle / 180 * np.pi) / grid_length_x / bev_w + shift_y = shift_y * self.use_shift + shift_x = shift_x * self.use_shift + shift = bev_queries.new_tensor( + [shift_x, shift_y]).permute(1, 0) # xy, bs -> bs, xy + + if prev_bev is not None: + if prev_bev.shape[1] == bev_h * bev_w: + prev_bev = prev_bev.permute(1, 0, 2) + if self.rotate_prev_bev: + for i in range(bs): + # num_prev_bev = prev_bev.size(1) + rotation_angle = kwargs['img_metas'][i]['can_bus'][-1] + tmp_prev_bev = prev_bev[:, i].reshape( + bev_h, bev_w, -1).permute(2, 0, 1) + tmp_prev_bev = rotate(tmp_prev_bev, rotation_angle, + center=self.rotate_center) + tmp_prev_bev = tmp_prev_bev.permute(1, 2, 0).reshape( + bev_h * bev_w, 1, -1) + prev_bev[:, i] = tmp_prev_bev[:, 0] + + # add can bus signals + can_bus = bev_queries.new_tensor( + [each['can_bus'] for each in kwargs['img_metas']]) # [:, :] + can_bus = self.can_bus_mlp(can_bus)[None, :, :] + bev_queries = bev_queries + can_bus * self.use_can_bus + + feat_flatten = [] + spatial_shapes = [] + for lvl, feat in enumerate(mlvl_feats): + bs, num_cam, c, h, w = feat.shape + spatial_shape = (h, w) + feat = feat.flatten(3).permute(1, 0, 3, 2) + if self.use_cams_embeds: + feat = feat + self.cams_embeds[:, None, None, :].to(feat.dtype) + feat = feat + self.level_embeds[None, + None, lvl:lvl + 1, :].to(feat.dtype) + spatial_shapes.append(spatial_shape) + feat_flatten.append(feat) + + feat_flatten = torch.cat(feat_flatten, 2) + spatial_shapes = torch.as_tensor( + spatial_shapes, dtype=torch.long, device=bev_pos.device) + level_start_index = torch.cat((spatial_shapes.new_zeros( + (1,)), spatial_shapes.prod(1).cumsum(0)[:-1])) + + feat_flatten = feat_flatten.permute( + 0, 2, 1, 3) # (num_cam, H*W, bs, embed_dims) + + bev_embed = self.encoder( + bev_queries, + feat_flatten, + feat_flatten, + bev_h=bev_h, + bev_w=bev_w, + bev_pos=bev_pos, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + prev_bev=prev_bev, + shift=shift, + **kwargs + ) + + return bev_embed + + @auto_fp16(apply_to=('mlvl_feats', 'bev_queries', 'object_query_embed', 'prev_bev', 'bev_pos')) + def forward(self, + mlvl_feats, + bev_queries, + object_query_embed, + bev_h, + bev_w, + grid_length=[0.512, 0.512], + bev_pos=None, + reg_branches=None, + cls_branches=None, + prev_bev=None, + **kwargs): + """Forward function for `Detr3DTransformer`. + Args: + mlvl_feats (list(Tensor)): Input queries from + different level. Each element has shape + [bs, num_cams, embed_dims, h, w]. + bev_queries (Tensor): (bev_h*bev_w, c) + bev_pos (Tensor): (bs, embed_dims, bev_h, bev_w) + object_query_embed (Tensor): The query embedding for decoder, + with shape [num_query, c]. + reg_branches (obj:`nn.ModuleList`): Regression heads for + feature maps from each decoder layer. Only would + be passed when `with_box_refine` is True. Default to None. + Returns: + tuple[Tensor]: results of decoder containing the following tensor. + - bev_embed: BEV features + - inter_states: Outputs from decoder. If + return_intermediate_dec is True output has shape \ + (num_dec_layers, bs, num_query, embed_dims), else has \ + shape (1, bs, num_query, embed_dims). + - init_reference_out: The initial value of reference \ + points, has shape (bs, num_queries, 4). + - inter_references_out: The internal value of reference \ + points in decoder, has shape \ + (num_dec_layers, bs,num_query, embed_dims) + - enc_outputs_class: The classification score of \ + proposals generated from \ + encoder's feature maps, has shape \ + (batch, h*w, num_classes). \ + Only would be returned when `as_two_stage` is True, \ + otherwise None. + - enc_outputs_coord_unact: The regression results \ + generated from encoder's feature maps., has shape \ + (batch, h*w, 4). Only would \ + be returned when `as_two_stage` is True, \ + otherwise None. + """ + + bev_embed = self.get_bev_features( + mlvl_feats, + bev_queries, + bev_h, + bev_w, + grid_length=grid_length, + bev_pos=bev_pos, + prev_bev=prev_bev, + **kwargs) # bev_embed shape: bs, bev_h*bev_w, embed_dims + + bs = mlvl_feats[0].size(0) + query_pos, query = torch.split( + object_query_embed, self.embed_dims, dim=1) + query_pos = query_pos.unsqueeze(0).expand(bs, -1, -1) + query = query.unsqueeze(0).expand(bs, -1, -1) + reference_points = self.reference_points(query_pos) + reference_points = reference_points.sigmoid() + init_reference_out = reference_points + + query = query.permute(1, 0, 2) + query_pos = query_pos.permute(1, 0, 2) + bev_embed = bev_embed.permute(1, 0, 2) + + inter_states, inter_references = self.decoder( + query=query, + key=None, + value=bev_embed, + query_pos=query_pos, + reference_points=reference_points, + reg_branches=reg_branches, + cls_branches=cls_branches, + spatial_shapes=torch.tensor([[bev_h, bev_w]], device=query.device), + level_start_index=torch.tensor([0], device=query.device), + **kwargs) + + inter_references_out = inter_references + + return bev_embed, inter_states, init_reference_out, inter_references_out diff --git a/mmcv/models/modules/transformerV2.py b/mmcv/models/modules/transformerV2.py new file mode 100644 index 0000000..41587de --- /dev/null +++ b/mmcv/models/modules/transformerV2.py @@ -0,0 +1,353 @@ +import torch +import torch.nn as nn +from mmcv.cnn import xavier_init +from mmcv.cnn.bricks.transformer import build_transformer_layer_sequence +from mmdet.models.utils.builder import TRANSFORMER +from torch.nn.init import normal_ +from mmcv.runner.base_module import BaseModule +from .temporal_self_attention import TemporalSelfAttention +from .spatial_cross_attention import MSDeformableAttention3D +from .decoder import CustomMSDeformableAttention +from mmcv.cnn import build_norm_layer, build_conv_layer +import torch.utils.checkpoint as checkpoint +from mmdet.models.backbones.resnet import Bottleneck, BasicBlock + + +class ResNetFusion(BaseModule): + def __init__(self, in_channels, out_channels, inter_channels, num_layer, norm_cfg=dict(type='SyncBN'), + with_cp=False): + super(ResNetFusion, self).__init__() + layers = [] + self.inter_channels = inter_channels + for i in range(num_layer): + if i == 0: + if inter_channels == in_channels: + layers.append(BasicBlock(in_channels, inter_channels, stride=1, norm_cfg=norm_cfg)) + else: + downsample = nn.Sequential( + build_conv_layer(None, in_channels, inter_channels, 3, stride=1, padding=1, dilation=1, + bias=False), + build_norm_layer(norm_cfg, inter_channels)[1]) + layers.append( + BasicBlock(in_channels, inter_channels, stride=1, norm_cfg=norm_cfg, downsample=downsample)) + else: + layers.append(BasicBlock(inter_channels, inter_channels, stride=1, norm_cfg=norm_cfg)) + self.layers = nn.Sequential(*layers) + self.layer_norm = nn.Sequential( + nn.Linear(inter_channels, out_channels), + nn.LayerNorm(out_channels)) + self.with_cp = with_cp + + def forward(self, x): + x = torch.cat(x, 1).contiguous() + # x should be [1, in_channels, bev_h, bev_w] + for lid, layer in enumerate(self.layers): + if self.with_cp and x.requires_grad: + x = checkpoint.checkpoint(layer, x) + else: + x = layer(x) + x = x.reshape(x.shape[0], x.shape[1], -1).permute(0, 2, 1) # nchw -> n(hw)c + x = self.layer_norm(x) + return x + + +@TRANSFORMER.register_module() +class PerceptionTransformerBEVEncoder(BaseModule): + def __init__(self, + num_feature_levels=4, + num_cams=6, + two_stage_num_proposals=300, + encoder=None, + embed_dims=256, + use_cams_embeds=True, + rotate_center=[100, 100], + **kwargs): + super(PerceptionTransformerBEVEncoder, self).__init__(**kwargs) + self.encoder = build_transformer_layer_sequence(encoder) + self.embed_dims = embed_dims + self.num_feature_levels = num_feature_levels + self.num_cams = num_cams + self.fp16_enabled = False + + self.use_cams_embeds = use_cams_embeds + + self.two_stage_num_proposals = two_stage_num_proposals + self.rotate_center = rotate_center + """Initialize layers of the Detr3DTransformer.""" + self.level_embeds = nn.Parameter(torch.Tensor(self.num_feature_levels, self.embed_dims)) + if self.use_cams_embeds: + self.cams_embeds = nn.Parameter(torch.Tensor(self.num_cams, self.embed_dims)) + + def init_weights(self): + """Initialize the transformer weights.""" + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + for m in self.modules(): + if isinstance(m, MSDeformableAttention3D) or isinstance(m, TemporalSelfAttention) \ + or isinstance(m, CustomMSDeformableAttention): + try: + m.init_weight() + except AttributeError: + m.init_weights() + normal_(self.level_embeds) + if self.use_cams_embeds: + normal_(self.cams_embeds) + + def forward(self, + mlvl_feats, + bev_queries, + bev_h, + bev_w, + grid_length=[0.512, 0.512], + bev_pos=None, + prev_bev=None, + **kwargs): + """ + obtain bev features. + """ + bs = mlvl_feats[0].size(0) + bev_queries = bev_queries.unsqueeze(1).repeat(1, bs, 1) + bev_pos = bev_pos.flatten(2).permute(2, 0, 1) + + feat_flatten = [] + spatial_shapes = [] + for lvl, feat in enumerate(mlvl_feats): + bs, num_cam, c, h, w = feat.shape + spatial_shape = (h, w) + feat = feat.flatten(3).permute(1, 0, 3, 2) + if self.use_cams_embeds: + feat = feat + self.cams_embeds[:, None, None, :].to(feat.dtype) + feat = feat + self.level_embeds[None, None, lvl:lvl + 1, :].to(feat.dtype) + spatial_shapes.append(spatial_shape) + feat_flatten.append(feat) + + feat_flatten = torch.cat(feat_flatten, 2) + spatial_shapes = torch.as_tensor(spatial_shapes, dtype=torch.long, device=bev_pos.device) + level_start_index = torch.cat((spatial_shapes.new_zeros((1,)), spatial_shapes.prod(1).cumsum(0)[:-1])) + + feat_flatten = feat_flatten.permute(0, 2, 1, 3) # (num_cam, H*W, bs, embed_dims) + + bev_embed = self.encoder(bev_queries, + feat_flatten, + feat_flatten, + bev_h=bev_h, + bev_w=bev_w, + bev_pos=bev_pos, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + prev_bev=None, + shift=bev_queries.new_tensor([0, 0]).unsqueeze(0), + **kwargs) + # rotate current bev to final aligned + prev_bev = bev_embed + if 'aug_param' in kwargs['img_metas'][0] and 'GlobalRotScaleTransImage_param' in kwargs['img_metas'][0][ + 'aug_param']: + rot_angle, scale_ratio, flip_dx, flip_dy, bda_mat, only_gt = kwargs['img_metas'][0]['aug_param'][ + 'GlobalRotScaleTransImage_param'] + prev_bev = prev_bev.reshape(bs, bev_h, bev_w, -1).permute(0, 3, 1, 2) # bchw + if only_gt: + # rot angle + # prev_bev = torchvision.transforms.functional.rotate(prev_bev, -30, InterpolationMode.BILINEAR) + ref_y, ref_x = torch.meshgrid( + torch.linspace(0.5, bev_h - 0.5, bev_h, dtype=bev_queries.dtype, device=bev_queries.device), + torch.linspace(0.5, bev_w - 0.5, bev_w, dtype=bev_queries.dtype, device=bev_queries.device)) + ref_y = (ref_y / bev_h) + ref_x = (ref_x / bev_w) + grid = torch.stack((ref_x, ref_y), -1) + grid_shift = grid * 2.0 - 1.0 + grid_shift = grid_shift.unsqueeze(0).unsqueeze(-1) + # bda_mat = ( bda_mat[:2, :2] / scale_ratio).to(grid_shift).view(1, 1, 1, 2,2).repeat(grid_shift.shape[0], grid_shift.shape[1], grid_shift.shape[2], 1, 1) + bda_mat = bda_mat[:2, :2].to(grid_shift).view(1, 1, 1, 2, 2).repeat(grid_shift.shape[0], + grid_shift.shape[1], + grid_shift.shape[2], 1, 1) + grid_shift = torch.matmul(bda_mat, grid_shift).squeeze(-1) + # grid_shift = grid_shift / scale_ratio + prev_bev = torch.nn.functional.grid_sample(prev_bev, grid_shift, align_corners=False) + # if flip_dx: + # prev_bev = torch.flip(prev_bev, dims=[-1]) + # if flip_dy: + # prev_bev = torch.flip(prev_bev, dims=[-2]) + prev_bev = prev_bev.reshape(bs, -1, bev_h * bev_w) + prev_bev = prev_bev.permute(0, 2, 1) + return prev_bev + + +@TRANSFORMER.register_module() +class PerceptionTransformerV2(PerceptionTransformerBEVEncoder): + """Implements the Detr3D transformer. + Args: + as_two_stage (bool): Generate query from encoder features. + Default: False. + num_feature_levels (int): Number of feature maps from FPN: + Default: 4. + two_stage_num_proposals (int): Number of proposals when set + `as_two_stage` as True. Default: 300. + """ + + def __init__(self, + num_feature_levels=4, + num_cams=6, + two_stage_num_proposals=300, + encoder=None, + embed_dims=256, + use_cams_embeds=True, + rotate_center=[100, 100], + frames=(0,), + decoder=None, + num_fusion=3, + inter_channels=None, + **kwargs): + super(PerceptionTransformerV2, self).__init__(num_feature_levels, num_cams, two_stage_num_proposals, encoder, + embed_dims, use_cams_embeds, rotate_center, + **kwargs) + self.decoder = build_transformer_layer_sequence(decoder) + """Initialize layers of the Detr3DTransformer.""" + self.reference_points = nn.Linear(self.embed_dims, 3) + self.frames = frames + if len(self.frames) > 1: + self.fusion = ResNetFusion(len(self.frames) * self.embed_dims, self.embed_dims, + inter_channels if inter_channels is not None else len( + self.frames) * self.embed_dims, + num_fusion) + + def init_weights(self): + """Initialize the transformer weights.""" + super().init_weights() + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + for m in self.modules(): + if isinstance(m, MSDeformableAttention3D) or isinstance(m, TemporalSelfAttention) \ + or isinstance(m, CustomMSDeformableAttention): + try: + m.init_weight() + except AttributeError: + m.init_weights() + xavier_init(self.reference_points, distribution='uniform', bias=0.) + + def get_bev_features( + self, + mlvl_feats, + bev_queries, + bev_h, + bev_w, + grid_length=[0.512, 0.512], + bev_pos=None, + prev_bev=None, + **kwargs): + return super().forward( + mlvl_feats, + bev_queries, + bev_h, + bev_w, + grid_length, + bev_pos, + prev_bev, + **kwargs + ) + + def forward(self, + mlvl_feats, + bev_queries, + object_query_embed, + bev_h, + bev_w, + grid_length=[0.512, 0.512], + bev_pos=None, + reg_branches=None, + cls_branches=None, + prev_bev=None, + **kwargs): + """Forward function for `Detr3DTransformer`. + Args: + mlvl_feats (list(Tensor)): Input queries from + different level. Each element has shape + [bs, num_cams, embed_dims, h, w]. + bev_queries (Tensor): (bev_h*bev_w, c) + bev_pos (Tensor): (bs, embed_dims, bev_h, bev_w) + object_query_embed (Tensor): The query embedding for decoder, + with shape [num_query, c]. + reg_branches (obj:`nn.ModuleList`): Regression heads for + feature maps from each decoder layer. Only would + be passed when `with_box_refine` is True. Default to None. + Returns: + tuple[Tensor]: results of decoder containing the following tensor. + - bev_embed: BEV features + - inter_states: Outputs from decoder. If + return_intermediate_dec is True output has shape \ + (num_dec_layers, bs, num_query, embed_dims), else has \ + shape (1, bs, num_query, embed_dims). + - init_reference_out: The initial value of reference \ + points, has shape (bs, num_queries, 4). + - inter_references_out: The internal value of reference \ + points in decoder, has shape \ + (num_dec_layers, bs,num_query, embed_dims) + - enc_outputs_class: The classification score of \ + proposals generated from \ + encoder's feature maps, has shape \ + (batch, h*w, num_classes). \ + Only would be returned when `as_two_stage` is True, \ + otherwise None. + - enc_outputs_coord_unact: The regression results \ + generated from encoder's feature maps., has shape \ + (batch, h*w, 4). Only would \ + be returned when `as_two_stage` is True, \ + otherwise None. + """ + bev_embed = self.get_bev_features( + mlvl_feats, + bev_queries, + bev_h, + bev_w, + grid_length=grid_length, + bev_pos=bev_pos, + prev_bev=None, + **kwargs) # bev_embed shape: bs, bev_h*bev_w, embed_dims + + if len(self.frames) > 1: + cur_ind = list(self.frames).index(0) + assert prev_bev[cur_ind] is None and len(prev_bev) == len(self.frames) + prev_bev[cur_ind] = bev_embed + + # fill prev frame feature + for i in range(1, cur_ind + 1): + if prev_bev[cur_ind - i] is None: + prev_bev[cur_ind - i] = prev_bev[cur_ind - i + 1].detach() + + # fill next frame feature + for i in range(cur_ind + 1, len(self.frames)): + if prev_bev[i] is None: + prev_bev[i] = prev_bev[i - 1].detach() + bev_embed = [x.reshape(x.shape[0], bev_h, bev_w, x.shape[-1]).permute(0, 3, 1, 2).contiguous() for x in + prev_bev] + bev_embed = self.fusion(bev_embed) + + bs = mlvl_feats[0].size(0) + query_pos, query = torch.split( + object_query_embed, self.embed_dims, dim=1) + query_pos = query_pos.unsqueeze(0).expand(bs, -1, -1) + query = query.unsqueeze(0).expand(bs, -1, -1) + reference_points = self.reference_points(query_pos) + reference_points = reference_points.sigmoid() + init_reference_out = reference_points + + query = query.permute(1, 0, 2) + query_pos = query_pos.permute(1, 0, 2) + bev_embed = bev_embed.permute(1, 0, 2) + + inter_states, inter_references = self.decoder( + query=query, + key=None, + value=bev_embed, + query_pos=query_pos, + reference_points=reference_points, + reg_branches=reg_branches, + cls_branches=cls_branches, + spatial_shapes=torch.tensor([[bev_h, bev_w]], device=query.device), + level_start_index=torch.tensor([0], device=query.device), + **kwargs) + + inter_references_out = inter_references + + return bev_embed, inter_states, init_reference_out, inter_references_out diff --git a/mmcv/models/modules/vote_module.py b/mmcv/models/modules/vote_module.py new file mode 100644 index 0000000..0f7b5d1 --- /dev/null +++ b/mmcv/models/modules/vote_module.py @@ -0,0 +1,181 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmcv import is_tuple_of +from mmcv.models.bricks import ConvModule +from torch import nn as nn + +from mmcv.models.builder import build_loss + + +class VoteModule(nn.Module): + """Vote module. + + Generate votes from seed point features. + + Args: + in_channels (int): Number of channels of seed point features. + vote_per_seed (int): Number of votes generated from each seed point. + gt_per_seed (int): Number of ground truth votes generated + from each seed point. + num_points (int): Number of points to be used for voting. + conv_channels (tuple[int]): Out channels of vote + generating convolution. + conv_cfg (dict): Config of convolution. + Default: dict(type='Conv1d'). + norm_cfg (dict): Config of normalization. + Default: dict(type='BN1d'). + norm_feats (bool): Whether to normalize features. + Default: True. + with_res_feat (bool): Whether to predict residual features. + Default: True. + vote_xyz_range (list[float], None): The range of points translation. + vote_loss (dict): Config of vote loss. + """ + + def __init__(self, + in_channels, + vote_per_seed=1, + gt_per_seed=3, + num_points=-1, + conv_channels=(16, 16), + conv_cfg=dict(type='Conv1d'), + norm_cfg=dict(type='BN1d'), + act_cfg=dict(type='ReLU'), + norm_feats=True, + with_res_feat=True, + vote_xyz_range=None, + vote_loss=None): + super().__init__() + self.in_channels = in_channels + self.vote_per_seed = vote_per_seed + self.gt_per_seed = gt_per_seed + self.num_points = num_points + self.norm_feats = norm_feats + self.with_res_feat = with_res_feat + + assert vote_xyz_range is None or is_tuple_of(vote_xyz_range, float) + self.vote_xyz_range = vote_xyz_range + + if vote_loss is not None: + self.vote_loss = build_loss(vote_loss) + + prev_channels = in_channels + vote_conv_list = list() + for k in range(len(conv_channels)): + vote_conv_list.append( + ConvModule( + prev_channels, + conv_channels[k], + 1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + bias=True, + inplace=True)) + prev_channels = conv_channels[k] + self.vote_conv = nn.Sequential(*vote_conv_list) + + # conv_out predicts coordinate and residual features + if with_res_feat: + out_channel = (3 + in_channels) * self.vote_per_seed + else: + out_channel = 3 * self.vote_per_seed + self.conv_out = nn.Conv1d(prev_channels, out_channel, 1) + + def forward(self, seed_points, seed_feats): + """forward. + + Args: + seed_points (torch.Tensor): Coordinate of the seed + points in shape (B, N, 3). + seed_feats (torch.Tensor): Features of the seed points in shape + (B, C, N). + + Returns: + tuple[torch.Tensor]: + + - vote_points: Voted xyz based on the seed points \ + with shape (B, M, 3), ``M=num_seed*vote_per_seed``. + - vote_features: Voted features based on the seed points with \ + shape (B, C, M) where ``M=num_seed*vote_per_seed``, \ + ``C=vote_feature_dim``. + """ + if self.num_points != -1: + assert self.num_points < seed_points.shape[1], \ + f'Number of vote points ({self.num_points}) should be '\ + f'smaller than seed points size ({seed_points.shape[1]})' + seed_points = seed_points[:, :self.num_points] + seed_feats = seed_feats[..., :self.num_points] + + batch_size, feat_channels, num_seed = seed_feats.shape + num_vote = num_seed * self.vote_per_seed + x = self.vote_conv(seed_feats) + # (batch_size, (3+out_dim)*vote_per_seed, num_seed) + votes = self.conv_out(x) + + votes = votes.transpose(2, 1).view(batch_size, num_seed, + self.vote_per_seed, -1) + + offset = votes[:, :, :, 0:3] + if self.vote_xyz_range is not None: + limited_offset_list = [] + for axis in range(len(self.vote_xyz_range)): + limited_offset_list.append(offset[..., axis].clamp( + min=-self.vote_xyz_range[axis], + max=self.vote_xyz_range[axis])) + limited_offset = torch.stack(limited_offset_list, -1) + vote_points = (seed_points.unsqueeze(2) + + limited_offset).contiguous() + else: + vote_points = (seed_points.unsqueeze(2) + offset).contiguous() + vote_points = vote_points.view(batch_size, num_vote, 3) + offset = offset.reshape(batch_size, num_vote, 3).transpose(2, 1) + + if self.with_res_feat: + res_feats = votes[:, :, :, 3:] + vote_feats = (seed_feats.transpose(2, 1).unsqueeze(2) + + res_feats).contiguous() + vote_feats = vote_feats.view(batch_size, + num_vote, feat_channels).transpose( + 2, 1).contiguous() + + if self.norm_feats: + features_norm = torch.norm(vote_feats, p=2, dim=1) + vote_feats = vote_feats.div(features_norm.unsqueeze(1)) + else: + vote_feats = seed_feats + return vote_points, vote_feats, offset + + def get_loss(self, seed_points, vote_points, seed_indices, + vote_targets_mask, vote_targets): + """Calculate loss of voting module. + + Args: + seed_points (torch.Tensor): Coordinate of the seed points. + vote_points (torch.Tensor): Coordinate of the vote points. + seed_indices (torch.Tensor): Indices of seed points in raw points. + vote_targets_mask (torch.Tensor): Mask of valid vote targets. + vote_targets (torch.Tensor): Targets of votes. + + Returns: + torch.Tensor: Weighted vote loss. + """ + batch_size, num_seed = seed_points.shape[:2] + + seed_gt_votes_mask = torch.gather(vote_targets_mask, 1, + seed_indices).float() + + seed_indices_expand = seed_indices.unsqueeze(-1).repeat( + 1, 1, 3 * self.gt_per_seed) + seed_gt_votes = torch.gather(vote_targets, 1, seed_indices_expand) + seed_gt_votes += seed_points.repeat(1, 1, self.gt_per_seed) + + weight = seed_gt_votes_mask / (torch.sum(seed_gt_votes_mask) + 1e-6) + distance = self.vote_loss( + vote_points.view(batch_size * num_seed, -1, 3), + seed_gt_votes.view(batch_size * num_seed, -1, 3), + dst_weight=weight.view(batch_size * num_seed, 1))[1] + vote_loss = torch.sum(torch.min(distance, dim=1)[0]) + + return vote_loss diff --git a/mmcv/models/necks/__init__.py b/mmcv/models/necks/__init__.py new file mode 100644 index 0000000..614ac35 --- /dev/null +++ b/mmcv/models/necks/__init__.py @@ -0,0 +1,24 @@ +# from .bfp import BFP +# from .channel_mapper import ChannelMapper +# from .ct_resnet_neck import CTResNetNeck +# from .dilated_encoder import DilatedEncoder +# from .fpg import FPG +from .fpn import FPN +# from .fpn_carafe import FPN_CARAFE +# from .multilevel_neck import MultiLevelNeck +# from .hrfpn import HRFPN +# from .nas_fpn import NASFPN +# from .nasfcos_fpn import NASFCOS_FPN +# from .pafpn import PAFPN +# from .rfp import RFP +# from .ssd_neck import SSDNeck +# from .yolo_neck import YOLOV3Neck +# from .imvoxel_neck import OutdoorImVoxelNeck +# from .second_fpn import SECONDFPN + +# __all__ = [ +# 'FPN', 'BFP', 'ChannelMapper', 'HRFPN', 'NASFPN', 'FPN_CARAFE', 'PAFPN', +# 'NASFCOS_FPN', 'RFP', 'YOLOV3Neck', 'FPG', 'DilatedEncoder', +# 'CTResNetNeck', 'SSDNeck', 'SECONDFPN', 'OutdoorImVoxelNeck', +# 'MultiLevelNeck' +# ] diff --git a/mmcv/models/necks/fpn.py b/mmcv/models/necks/fpn.py new file mode 100644 index 0000000..5ef8b03 --- /dev/null +++ b/mmcv/models/necks/fpn.py @@ -0,0 +1,203 @@ +import torch.nn as nn +import torch.nn.functional as F +from mmcv.models.bricks import ConvModule +from mmcv.models.backbones import BaseModule +from mmcv.utils import auto_fp16 + +from ..builder import NECKS + + +@NECKS.register_module() +class FPN(BaseModule): + r"""Feature Pyramid Network. + + This is an implementation of paper `Feature Pyramid Networks for Object + Detection `_. + + Args: + in_channels (List[int]): Number of input channels per scale. + out_channels (int): Number of output channels (used at each scale) + num_outs (int): Number of output scales. + start_level (int): Index of the start input backbone level used to + build the feature pyramid. Default: 0. + end_level (int): Index of the end input backbone level (exclusive) to + build the feature pyramid. Default: -1, which means the last level. + add_extra_convs (bool | str): If bool, it decides whether to add conv + layers on top of the original feature maps. Default to False. + If True, it is equivalent to `add_extra_convs='on_input'`. + If str, it specifies the source feature map of the extra convs. + Only the following options are allowed + + - 'on_input': Last feat map of neck inputs (i.e. backbone feature). + - 'on_lateral': Last feature map after lateral convs. + - 'on_output': The last output feature map after fpn convs. + relu_before_extra_convs (bool): Whether to apply relu before the extra + conv. Default: False. + no_norm_on_lateral (bool): Whether to apply norm on lateral. + Default: False. + conv_cfg (dict): Config dict for convolution layer. Default: None. + norm_cfg (dict): Config dict for normalization layer. Default: None. + act_cfg (str): Config dict for activation layer in ConvModule. + Default: None. + upsample_cfg (dict): Config dict for interpolate layer. + Default: `dict(mode='nearest')` + init_cfg (dict or list[dict], optional): Initialization config dict. + + Example: + >>> import torch + >>> in_channels = [2, 3, 5, 7] + >>> scales = [340, 170, 84, 43] + >>> inputs = [torch.rand(1, c, s, s) + ... for c, s in zip(in_channels, scales)] + >>> self = FPN(in_channels, 11, len(in_channels)).eval() + >>> outputs = self.forward(inputs) + >>> for i in range(len(outputs)): + ... print(f'outputs[{i}].shape = {outputs[i].shape}') + outputs[0].shape = torch.Size([1, 11, 340, 340]) + outputs[1].shape = torch.Size([1, 11, 170, 170]) + outputs[2].shape = torch.Size([1, 11, 84, 84]) + outputs[3].shape = torch.Size([1, 11, 43, 43]) + """ + + def __init__(self, + in_channels, + out_channels, + num_outs, + start_level=0, + end_level=-1, + add_extra_convs=False, + relu_before_extra_convs=False, + no_norm_on_lateral=False, + conv_cfg=None, + norm_cfg=None, + act_cfg=None, + upsample_cfg=dict(mode='nearest'), + init_cfg=dict( + type='Xavier', layer='Conv2d', distribution='uniform')): + super(FPN, self).__init__(init_cfg) + assert isinstance(in_channels, list) + self.in_channels = in_channels + self.out_channels = out_channels + self.num_ins = len(in_channels) + self.num_outs = num_outs + self.relu_before_extra_convs = relu_before_extra_convs + self.no_norm_on_lateral = no_norm_on_lateral + self.fp16_enabled = False + self.upsample_cfg = upsample_cfg.copy() + + if end_level == -1: + self.backbone_end_level = self.num_ins + assert num_outs >= self.num_ins - start_level + else: + # if end_level < inputs, no extra level is allowed + self.backbone_end_level = end_level + assert end_level <= len(in_channels) + assert num_outs == end_level - start_level + self.start_level = start_level + self.end_level = end_level + self.add_extra_convs = add_extra_convs + assert isinstance(add_extra_convs, (str, bool)) + if isinstance(add_extra_convs, str): + # Extra_convs_source choices: 'on_input', 'on_lateral', 'on_output' + assert add_extra_convs in ('on_input', 'on_lateral', 'on_output') + elif add_extra_convs: # True + self.add_extra_convs = 'on_input' + + self.lateral_convs = nn.ModuleList() + self.fpn_convs = nn.ModuleList() + + for i in range(self.start_level, self.backbone_end_level): + l_conv = ConvModule( + in_channels[i], + out_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg if not self.no_norm_on_lateral else None, + act_cfg=act_cfg, + inplace=False) + fpn_conv = ConvModule( + out_channels, + out_channels, + 3, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + inplace=False) + + self.lateral_convs.append(l_conv) + self.fpn_convs.append(fpn_conv) + + # add extra conv layers (e.g., RetinaNet) + extra_levels = num_outs - self.backbone_end_level + self.start_level + if self.add_extra_convs and extra_levels >= 1: + for i in range(extra_levels): + if i == 0 and self.add_extra_convs == 'on_input': + in_channels = self.in_channels[self.backbone_end_level - 1] + else: + in_channels = out_channels + extra_fpn_conv = ConvModule( + in_channels, + out_channels, + 3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + inplace=False) + self.fpn_convs.append(extra_fpn_conv) + + @auto_fp16() + def forward(self, inputs): + """Forward function.""" + assert len(inputs) == len(self.in_channels) + + # build laterals + laterals = [ + lateral_conv(inputs[i + self.start_level]) + for i, lateral_conv in enumerate(self.lateral_convs) + ] + + # build top-down path + used_backbone_levels = len(laterals) + for i in range(used_backbone_levels - 1, 0, -1): + # In some cases, fixing `scale factor` (e.g. 2) is preferred, but + # it cannot co-exist with `size` in `F.interpolate`. + if 'scale_factor' in self.upsample_cfg: + laterals[i - 1] += F.interpolate(laterals[i], + **self.upsample_cfg) + else: + prev_shape = laterals[i - 1].shape[2:] + laterals[i - 1] += F.interpolate( + laterals[i], size=prev_shape, **self.upsample_cfg) + + # build outputs + # part 1: from original levels + outs = [ + self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels) + ] + # part 2: add extra levels + if self.num_outs > len(outs): + # use max pool to get more levels on top of outputs + # (e.g., Faster R-CNN, Mask R-CNN) + if not self.add_extra_convs: + for i in range(self.num_outs - used_backbone_levels): + outs.append(F.max_pool2d(outs[-1], 1, stride=2)) + # add conv layers on top of original feature maps (RetinaNet) + else: + if self.add_extra_convs == 'on_input': + extra_source = inputs[self.backbone_end_level - 1] + elif self.add_extra_convs == 'on_lateral': + extra_source = laterals[-1] + elif self.add_extra_convs == 'on_output': + extra_source = outs[-1] + else: + raise NotImplementedError + outs.append(self.fpn_convs[used_backbone_levels](extra_source)) + for i in range(used_backbone_levels + 1, self.num_outs): + if self.relu_before_extra_convs: + outs.append(self.fpn_convs[i](F.relu(outs[-1]))) + else: + outs.append(self.fpn_convs[i](outs[-1])) + return tuple(outs) diff --git a/mmcv/models/opt/__init__.py b/mmcv/models/opt/__init__.py new file mode 100644 index 0000000..c7dd426 --- /dev/null +++ b/mmcv/models/opt/__init__.py @@ -0,0 +1 @@ +from .adamw import AdamW2 \ No newline at end of file diff --git a/mmcv/models/opt/adamw.py b/mmcv/models/opt/adamw.py new file mode 100644 index 0000000..6b6f358 --- /dev/null +++ b/mmcv/models/opt/adamw.py @@ -0,0 +1,131 @@ +try: + from torch.optim import _functional as F +except: + print('WARNING!!!, I recommend using torch>=1.8') + +import torch +from torch.optim.optimizer import Optimizer +from mmcv.optims import OPTIMIZERS + +@OPTIMIZERS.register_module() +class AdamW2(Optimizer): + r"""Implements AdamW algorithm. Solve the bug of torch 1.8 + + The original Adam algorithm was proposed in `Adam: A Method for Stochastic Optimization`_. + The AdamW variant was proposed in `Decoupled Weight Decay Regularization`_. + + Args: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay coefficient (default: 1e-2) + amsgrad (boolean, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + (default: False) + + .. _Adam\: A Method for Stochastic Optimization: + https://arxiv.org/abs/1412.6980 + .. _Decoupled Weight Decay Regularization: + https://arxiv.org/abs/1711.05101 + .. _On the Convergence of Adam and Beyond: + https://openreview.net/forum?id=ryQu7f-RZ + """ + + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, + weight_decay=1e-2, amsgrad=False): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) + if not 0.0 <= weight_decay: + raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) + defaults = dict(lr=lr, betas=betas, eps=eps, + weight_decay=weight_decay, amsgrad=amsgrad) + super(AdamW2, self).__init__(params, defaults) + + def __setstate__(self, state): + super(AdamW2, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('amsgrad', False) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + + Args: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + params_with_grad = [] + grads = [] + exp_avgs = [] + exp_avg_sqs = [] + state_sums = [] + max_exp_avg_sqs = [] + state_steps = [] + amsgrad = group['amsgrad'] + + # put this line here for solving bug + beta1, beta2 = group['betas'] + + for p in group['params']: + if p.grad is None: + continue + params_with_grad.append(p) + if p.grad.is_sparse: + raise RuntimeError('AdamW does not support sparse gradients') + grads.append(p.grad) + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format) + if amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format) + + exp_avgs.append(state['exp_avg']) + exp_avg_sqs.append(state['exp_avg_sq']) + + if amsgrad: + max_exp_avg_sqs.append(state['max_exp_avg_sq']) + + + # update the steps for each param group update + state['step'] += 1 + # record the step after step update + state_steps.append(state['step']) + + F.adamw(params_with_grad, + grads, + exp_avgs, + exp_avg_sqs, + max_exp_avg_sqs, + state_steps, + amsgrad, + beta1, + beta2, + group['lr'], + group['weight_decay'], + group['eps']) + + return loss \ No newline at end of file diff --git a/mmcv/models/roi_heads/mask_heads/__init__.py b/mmcv/models/roi_heads/mask_heads/__init__.py new file mode 100644 index 0000000..89ed5bc --- /dev/null +++ b/mmcv/models/roi_heads/mask_heads/__init__.py @@ -0,0 +1 @@ +from .fused_semantic_head import FusedSemanticHead \ No newline at end of file diff --git a/mmcv/models/roi_heads/mask_heads/fused_semantic_head.py b/mmcv/models/roi_heads/mask_heads/fused_semantic_head.py new file mode 100644 index 0000000..deb6810 --- /dev/null +++ b/mmcv/models/roi_heads/mask_heads/fused_semantic_head.py @@ -0,0 +1,107 @@ +import torch.nn as nn +import torch.nn.functional as F +from mmcv.models import ConvModule +from mmcv.models.backbones import BaseModule +from mmcv.utils import auto_fp16, force_fp32 + +from mmcv.models.builder import HEADS + + +@HEADS.register_module() +class FusedSemanticHead(BaseModule): + r"""Multi-level fused semantic segmentation head. + + .. code-block:: none + + in_1 -> 1x1 conv --- + | + in_2 -> 1x1 conv -- | + || + in_3 -> 1x1 conv - || + ||| /-> 1x1 conv (mask prediction) + in_4 -> 1x1 conv -----> 3x3 convs (*4) + | \-> 1x1 conv (feature) + in_5 -> 1x1 conv --- + """ # noqa: W605 + + def __init__(self, + num_ins, + fusion_level, + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=183, + ignore_label=255, + loss_weight=0.2, + conv_cfg=None, + norm_cfg=None, + init_cfg=dict( + type='Kaiming', override=dict(name='conv_logits'))): + super(FusedSemanticHead, self).__init__(init_cfg) + self.num_ins = num_ins + self.fusion_level = fusion_level + self.num_convs = num_convs + self.in_channels = in_channels + self.conv_out_channels = conv_out_channels + self.num_classes = num_classes + self.ignore_label = ignore_label + self.loss_weight = loss_weight + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.fp16_enabled = False + + self.lateral_convs = nn.ModuleList() + for i in range(self.num_ins): + self.lateral_convs.append( + ConvModule( + self.in_channels, + self.in_channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + inplace=False)) + + self.convs = nn.ModuleList() + for i in range(self.num_convs): + in_channels = self.in_channels if i == 0 else conv_out_channels + self.convs.append( + ConvModule( + in_channels, + conv_out_channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg)) + self.conv_embedding = ConvModule( + conv_out_channels, + conv_out_channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg) + self.conv_logits = nn.Conv2d(conv_out_channels, self.num_classes, 1) + + self.criterion = nn.CrossEntropyLoss(ignore_index=ignore_label) + + @auto_fp16() + def forward(self, feats): + x = self.lateral_convs[self.fusion_level](feats[self.fusion_level]) + fused_size = tuple(x.shape[-2:]) + for i, feat in enumerate(feats): + if i != self.fusion_level: + feat = F.interpolate( + feat, size=fused_size, mode='bilinear', align_corners=True) + x += self.lateral_convs[i](feat) + + for i in range(self.num_convs): + x = self.convs[i](x) + + mask_pred = self.conv_logits(x) + x = self.conv_embedding(x) + return mask_pred, x + + @force_fp32(apply_to=('mask_pred', )) + def loss(self, mask_pred, labels): + labels = labels.squeeze(1).long() + loss_semantic_seg = self.criterion(mask_pred, labels) + loss_semantic_seg *= self.loss_weight + return loss_semantic_seg diff --git a/mmcv/models/segmentors/__init__.py b/mmcv/models/segmentors/__init__.py new file mode 100644 index 0000000..9358b86 --- /dev/null +++ b/mmcv/models/segmentors/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .base import Base3DSegmentor, BaseSegmentor +# from .cascade_encoder_decoder import CascadeEncoderDecoder +# from .encoder_decoder import EncoderDecoder3D, EncoderDecoder + +# __all__ = ['Base3DSegmentor', 'EncoderDecoder3D', +# 'BaseSegmentor', 'EncoderDecoder', 'CascadeEncoderDecoder'] diff --git a/mmcv/models/segmentors/base.py b/mmcv/models/segmentors/base.py new file mode 100644 index 0000000..742e16b --- /dev/null +++ b/mmcv/models/segmentors/base.py @@ -0,0 +1,379 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings +import numpy as np +import torch +import torch.distributed as dist +from mmcv.parallel import DataContainer as DC +from mmcv.models.backbones.base_module import BaseModule +from mmcv.utils import auto_fp16 +from os import path as osp +from abc import ABCMeta, abstractmethod +from collections import OrderedDict +from mmcv.image import imread, imwrite +from mmcv.utils import is_list_of +from mmcv.core.visualizer import show_seg_result +from mmcv.core.visualization import imshow + + +class BaseSegmentor(BaseModule, metaclass=ABCMeta): + """Base class for segmentors.""" + + def __init__(self, init_cfg=None): + super(BaseSegmentor, self).__init__(init_cfg) + self.fp16_enabled = False + + @property + def with_neck(self): + """bool: whether the segmentor has neck""" + return hasattr(self, 'neck') and self.neck is not None + + @property + def with_auxiliary_head(self): + """bool: whether the segmentor has auxiliary head""" + return hasattr(self, + 'auxiliary_head') and self.auxiliary_head is not None + + @property + def with_decode_head(self): + """bool: whether the segmentor has decode head""" + return hasattr(self, 'decode_head') and self.decode_head is not None + + @abstractmethod + def extract_feat(self, imgs): + """Placeholder for extract features from images.""" + pass + + @abstractmethod + def encode_decode(self, img, img_metas): + """Placeholder for encode images with backbone and decode into a + semantic segmentation map of the same size as input.""" + pass + + @abstractmethod + def forward_train(self, imgs, img_metas, **kwargs): + """Placeholder for Forward function for training.""" + pass + + @abstractmethod + def simple_test(self, img, img_meta, **kwargs): + """Placeholder for single image test.""" + pass + + @abstractmethod + def aug_test(self, imgs, img_metas, **kwargs): + """Placeholder for augmentation test.""" + pass + + def forward_test(self, imgs, img_metas, **kwargs): + """ + Args: + imgs (List[Tensor]): the outer list indicates test-time + augmentations and inner Tensor should have a shape NxCxHxW, + which contains all images in the batch. + img_metas (List[List[dict]]): the outer list indicates test-time + augs (multiscale, flip, etc.) and the inner list indicates + images in a batch. + """ + for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]: + if not isinstance(var, list): + raise TypeError(f'{name} must be a list, but got ' + f'{type(var)}') + + num_augs = len(imgs) + if num_augs != len(img_metas): + raise ValueError(f'num of augmentations ({len(imgs)}) != ' + f'num of image meta ({len(img_metas)})') + # all images in the same aug batch all of the same ori_shape and pad + # shape + for img_meta in img_metas: + ori_shapes = [_['ori_shape'] for _ in img_meta] + assert all(shape == ori_shapes[0] for shape in ori_shapes) + img_shapes = [_['img_shape'] for _ in img_meta] + assert all(shape == img_shapes[0] for shape in img_shapes) + pad_shapes = [_['pad_shape'] for _ in img_meta] + assert all(shape == pad_shapes[0] for shape in pad_shapes) + + if num_augs == 1: + return self.simple_test(imgs[0], img_metas[0], **kwargs) + else: + return self.aug_test(imgs, img_metas, **kwargs) + + @auto_fp16(apply_to=('img', )) + def forward(self, img, img_metas, return_loss=True, **kwargs): + """Calls either :func:`forward_train` or :func:`forward_test` depending + on whether ``return_loss`` is ``True``. + + Note this setting will change the expected inputs. When + ``return_loss=True``, img and img_meta are single-nested (i.e. Tensor + and List[dict]), and when ``resturn_loss=False``, img and img_meta + should be double nested (i.e. List[Tensor], List[List[dict]]), with + the outer list indicating test time augmentations. + """ + if return_loss: + return self.forward_train(img, img_metas, **kwargs) + else: + return self.forward_test(img, img_metas, **kwargs) + + def train_step(self, data_batch, optimizer, **kwargs): + """The iteration step during training. + + This method defines an iteration step during training, except for the + back propagation and optimizer updating, which are done in an optimizer + hook. Note that in some complicated cases or models, the whole process + including back propagation and optimizer updating is also defined in + this method, such as GAN. + + Args: + data (dict): The output of dataloader. + optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of + runner is passed to ``train_step()``. This argument is unused + and reserved. + + Returns: + dict: It should contain at least 3 keys: ``loss``, ``log_vars``, + ``num_samples``. + ``loss`` is a tensor for back propagation, which can be a + weighted sum of multiple losses. + ``log_vars`` contains all the variables to be sent to the + logger. + ``num_samples`` indicates the batch size (when the model is + DDP, it means the batch size on each GPU), which is used for + averaging the logs. + """ + losses = self(**data_batch) + loss, log_vars = self._parse_losses(losses) + + outputs = dict( + loss=loss, + log_vars=log_vars, + num_samples=len(data_batch['img_metas'])) + + return outputs + + def val_step(self, data_batch, **kwargs): + """The iteration step during validation. + + This method shares the same signature as :func:`train_step`, but used + during val epochs. Note that the evaluation after training epochs is + not implemented with this method, but an evaluation hook. + """ + output = self(**data_batch, **kwargs) + return output + + @staticmethod + def _parse_losses(losses): + """Parse the raw outputs (losses) of the network. + + Args: + losses (dict): Raw output of the network, which usually contain + losses and other necessary information. + + Returns: + tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor + which may be a weighted sum of all losses, log_vars contains + all the variables to be sent to the logger. + """ + log_vars = OrderedDict() + for loss_name, loss_value in losses.items(): + if isinstance(loss_value, torch.Tensor): + log_vars[loss_name] = loss_value.mean() + elif isinstance(loss_value, list): + log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value) + else: + raise TypeError( + f'{loss_name} is not a tensor or list of tensors') + + loss = sum(_value for _key, _value in log_vars.items() + if 'loss' in _key) + + log_vars['loss'] = loss + for loss_name, loss_value in log_vars.items(): + # reduce loss when distributed training + if dist.is_available() and dist.is_initialized(): + loss_value = loss_value.data.clone() + dist.all_reduce(loss_value.div_(dist.get_world_size())) + log_vars[loss_name] = loss_value.item() + + return loss, log_vars + + def show_result(self, + img, + result, + palette=None, + win_name='', + show=False, + wait_time=0, + out_file=None, + opacity=0.5): + """Draw `result` over `img`. + + Args: + img (str or Tensor): The image to be displayed. + result (Tensor): The semantic segmentation results to draw over + `img`. + palette (list[list[int]]] | np.ndarray | None): The palette of + segmentation map. If None is given, random palette will be + generated. Default: None + win_name (str): The window name. + wait_time (int): Value of waitKey param. + Default: 0. + show (bool): Whether to show the image. + Default: False. + out_file (str or None): The filename to write the image. + Default: None. + opacity(float): Opacity of painted segmentation map. + Default 0.5. + Must be in (0, 1] range. + Returns: + img (Tensor): Only if not `show` or `out_file` + """ + img = imread(img) + img = img.copy() + seg = result[0] + if palette is None: + if self.PALETTE is None: + palette = np.random.randint( + 0, 255, size=(len(self.CLASSES), 3)) + else: + palette = self.PALETTE + palette = np.array(palette) + assert palette.shape[0] == len(self.CLASSES) + assert palette.shape[1] == 3 + assert len(palette.shape) == 2 + assert 0 < opacity <= 1.0 + color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8) + for label, color in enumerate(palette): + color_seg[seg == label, :] = color + # convert to BGR + color_seg = color_seg[..., ::-1] + + img = img * (1 - opacity) + color_seg * opacity + img = img.astype(np.uint8) + # if out_file specified, do not show image in window + if out_file is not None: + show = False + + if show: + imshow(img, win_name, wait_time) + if out_file is not None: + imwrite(img, out_file) + + if not (show or out_file): + warnings.warn('show==False and out_file is not specified, only ' + 'result image will be returned') + return img + +class Base3DSegmentor(BaseSegmentor): + """Base class for 3D segmentors. + + The main difference with `BaseSegmentor` is that we modify the keys in + data_dict and use a 3D seg specific visualization function. + """ + + @property + def with_regularization_loss(self): + """bool: whether the segmentor has regularization loss for weight""" + return hasattr(self, 'loss_regularization') and \ + self.loss_regularization is not None + + def forward_test(self, points, img_metas, **kwargs): + """Calls either simple_test or aug_test depending on the length of + outer list of points. If len(points) == 1, call simple_test. Otherwise + call aug_test to aggregate the test results by e.g. voting. + + Args: + points (list[list[torch.Tensor]]): the outer list indicates + test-time augmentations and inner torch.Tensor should have a + shape BXNxC, which contains all points in the batch. + img_metas (list[list[dict]]): the outer list indicates test-time + augs (multiscale, flip, etc.) and the inner list indicates + images in a batch. + """ + for var, name in [(points, 'points'), (img_metas, 'img_metas')]: + if not isinstance(var, list): + raise TypeError(f'{name} must be a list, but got {type(var)}') + + num_augs = len(points) + if num_augs != len(img_metas): + raise ValueError(f'num of augmentations ({len(points)}) != ' + f'num of image meta ({len(img_metas)})') + + if num_augs == 1: + return self.simple_test(points[0], img_metas[0], **kwargs) + else: + return self.aug_test(points, img_metas, **kwargs) + + @auto_fp16(apply_to=('points')) + def forward(self, return_loss=True, **kwargs): + """Calls either forward_train or forward_test depending on whether + return_loss=True. + + Note this setting will change the expected inputs. When + `return_loss=True`, point and img_metas are single-nested (i.e. + torch.Tensor and list[dict]), and when `resturn_loss=False`, point and + img_metas should be double nested (i.e. list[torch.Tensor], + list[list[dict]]), with the outer list indicating test time + augmentations. + """ + if return_loss: + return self.forward_train(**kwargs) + else: + return self.forward_test(**kwargs) + + def show_results(self, + data, + result, + palette=None, + out_dir=None, + ignore_index=None): + """Results visualization. + + Args: + data (list[dict]): Input points and the information of the sample. + result (list[dict]): Prediction results. + palette (list[list[int]]] | np.ndarray | None): The palette of + segmentation map. If None is given, random palette will be + generated. Default: None + out_dir (str): Output directory of visualization result. + ignore_index (int, optional): The label index to be ignored, e.g. + unannotated points. If None is given, set to len(self.CLASSES). + Defaults to None. + """ + assert out_dir is not None, 'Expect out_dir, got none.' + if palette is None: + if self.PALETTE is None: + palette = np.random.randint( + 0, 255, size=(len(self.CLASSES), 3)) + else: + palette = self.PALETTE + palette = np.array(palette) + for batch_id in range(len(result)): + if isinstance(data['points'][0], DC): + points = data['points'][0]._data[0][batch_id].numpy() + elif is_list_of(data['points'][0], torch.Tensor): + points = data['points'][0][batch_id] + else: + ValueError(f"Unsupported data type {type(data['points'][0])} " + f'for visualization!') + if isinstance(data['img_metas'][0], DC): + pts_filename = data['img_metas'][0]._data[0][batch_id][ + 'pts_filename'] + elif is_list_of(data['img_metas'][0], dict): + pts_filename = data['img_metas'][0][batch_id]['pts_filename'] + else: + ValueError( + f"Unsupported data type {type(data['img_metas'][0])} " + f'for visualization!') + file_name = osp.split(pts_filename)[-1].split('.')[0] + + pred_sem_mask = result[batch_id]['semantic_mask'].cpu().numpy() + + show_seg_result( + points, + None, + pred_sem_mask, + out_dir, + file_name, + palette, + ignore_index, + show=True) \ No newline at end of file diff --git a/mmcv/models/utils/__init__.py b/mmcv/models/utils/__init__.py new file mode 100644 index 0000000..6b9c3c9 --- /dev/null +++ b/mmcv/models/utils/__init__.py @@ -0,0 +1,25 @@ +from .builder import build_linear_layer, build_transformer +from .positional_encoding import (LearnedPositionalEncoding, + SinePositionalEncoding) +from .res_layer import ResLayer, SimplifiedBasicBlock +from .transformer import (DetrTransformerDecoder, DetrTransformerDecoderLayer, + DynamicConv, Transformer) +from .grid_mask import GridMask +from .weight_init import (INITIALIZERS, Caffe2XavierInit, ConstantInit, + KaimingInit, NormalInit, PretrainedInit, + TruncNormalInit, UniformInit, XavierInit, + bias_init_with_prob, caffe2_xavier_init, + constant_init, initialize, kaiming_init, normal_init, + trunc_normal_init, uniform_init, xavier_init) +from .fuse_conv_bn import fuse_conv_bn + + +# __all__ = [ +# 'ResLayer', 'gaussian_radius', 'gen_gaussian_target', +# 'DetrTransformerDecoderLayer', 'DetrTransformerDecoder', 'Transformer', +# 'build_transformer', 'build_linear_layer', 'SinePositionalEncoding', +# 'LearnedPositionalEncoding', 'DynamicConv', 'SimplifiedBasicBlock', +# 'NormedLinear', 'NormedConv2d', 'make_divisible', 'InvertedResidual', +# 'SELayer','clip_sigmoid', 'MLP', 'run_time', 'GridMask', 'SelfAttentionBlock', +# 'UpConvBlock', 'InvertedResidualV3', 'DropPath', 'trunc_normal_' +# ] diff --git a/mmcv/models/utils/builder.py b/mmcv/models/utils/builder.py new file mode 100644 index 0000000..fdcff09 --- /dev/null +++ b/mmcv/models/utils/builder.py @@ -0,0 +1,46 @@ +import torch.nn as nn +from mmcv.utils import Registry, build_from_cfg + +TRANSFORMER = Registry('Transformer') +LINEAR_LAYERS = Registry('linear layers') + + +def build_transformer(cfg, default_args=None): + """Builder for Transformer.""" + return build_from_cfg(cfg, TRANSFORMER, default_args) + + +LINEAR_LAYERS.register_module('Linear', module=nn.Linear) + + +def build_linear_layer(cfg, *args, **kwargs): + """Build linear layer. + Args: + cfg (None or dict): The linear layer config, which should contain: + - type (str): Layer type. + - layer args: Args needed to instantiate an linear layer. + args (argument list): Arguments passed to the `__init__` + method of the corresponding linear layer. + kwargs (keyword arguments): Keyword arguments passed to the `__init__` + method of the corresponding linear layer. + Returns: + nn.Module: Created linear layer. + """ + if cfg is None: + cfg_ = dict(type='Linear') + else: + if not isinstance(cfg, dict): + raise TypeError('cfg must be a dict') + if 'type' not in cfg: + raise KeyError('the cfg dict must contain the key "type"') + cfg_ = cfg.copy() + + layer_type = cfg_.pop('type') + if layer_type not in LINEAR_LAYERS: + raise KeyError(f'Unrecognized linear type {layer_type}') + else: + linear_layer = LINEAR_LAYERS.get(layer_type) + + layer = linear_layer(*args, **kwargs, **cfg_) + + return layer diff --git a/mmcv/models/utils/functional.py b/mmcv/models/utils/functional.py new file mode 100644 index 0000000..b4ae933 --- /dev/null +++ b/mmcv/models/utils/functional.py @@ -0,0 +1,141 @@ +import math +import torch +from einops import rearrange, repeat + +def bivariate_gaussian_activation(ip): + """ + Activation function to output parameters of bivariate Gaussian distribution. + + Args: + ip (torch.Tensor): Input tensor. + + Returns: + torch.Tensor: Output tensor containing the parameters of the bivariate Gaussian distribution. + """ + mu_x = ip[..., 0:1] + mu_y = ip[..., 1:2] + sig_x = ip[..., 2:3] + sig_y = ip[..., 3:4] + rho = ip[..., 4:5] + sig_x = torch.exp(sig_x) + sig_y = torch.exp(sig_y) + rho = torch.tanh(rho) + out = torch.cat([mu_x, mu_y, sig_x, sig_y, rho], dim=-1) + return out + +def norm_points(pos, pc_range): + """ + Normalize the end points of a given position tensor. + + Args: + pos (torch.Tensor): Input position tensor. + pc_range (List[float]): Point cloud range. + + Returns: + torch.Tensor: Normalized end points tensor. + """ + x_norm = (pos[..., 0] - pc_range[0]) / (pc_range[3] - pc_range[0]) + y_norm = (pos[..., 1] - pc_range[1]) / (pc_range[4] - pc_range[1]) + return torch.stack([x_norm, y_norm], dim=-1) + +def pos2posemb2d(pos, num_pos_feats=128, temperature=10000): + """ + Convert 2D position into positional embeddings. + + Args: + pos (torch.Tensor): Input 2D position tensor. + num_pos_feats (int, optional): Number of positional features. Default is 128. + temperature (int, optional): Temperature factor for positional embeddings. Default is 10000. + + Returns: + torch.Tensor: Positional embeddings tensor. + """ + scale = 2 * math.pi + pos = pos * scale + dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=pos.device) + dim_t = temperature ** (2 * (dim_t // 2) / num_pos_feats) + pos_x = pos[..., 0, None] / dim_t + pos_y = pos[..., 1, None] / dim_t + pos_x = torch.stack((pos_x[..., 0::2].sin(), pos_x[..., 1::2].cos()), dim=-1).flatten(-2) + pos_y = torch.stack((pos_y[..., 0::2].sin(), pos_y[..., 1::2].cos()), dim=-1).flatten(-2) + posemb = torch.cat((pos_y, pos_x), dim=-1) + return posemb + +def rot_2d(yaw): + """ + Compute 2D rotation matrix for a given yaw angle tensor. + + Args: + yaw (torch.Tensor): Input yaw angle tensor. + + Returns: + torch.Tensor: 2D rotation matrix tensor. + """ + sy, cy = torch.sin(yaw), torch.cos(yaw) + out = torch.stack([torch.stack([cy, -sy]), torch.stack([sy, cy])]).permute([2,0,1]) + return out + +def anchor_coordinate_transform(anchors, bbox_results, with_translation_transform=True, with_rotation_transform=True): + """ + Transform anchor coordinates with respect to detected bounding boxes in the batch. + + Args: + anchors (torch.Tensor): A tensor containing the k-means anchor values. + bbox_results (List[Tuple[torch.Tensor]]): A list of tuples containing the bounding box results for each image in the batch. + with_translate (bool, optional): Whether to perform translation transformation. Defaults to True. + with_rot (bool, optional): Whether to perform rotation transformation. Defaults to True. + + Returns: + torch.Tensor: A tensor containing the transformed anchor coordinates. + """ + batch_size = len(bbox_results) + batched_anchors = [] + transformed_anchors = anchors[None, ...] # expand num agents: num_groups, num_modes, 12, 2 -> 1, ... + for i in range(batch_size): + bboxes, scores, labels, bbox_index, mask = bbox_results[i] + yaw = bboxes.yaw.to(transformed_anchors.device) + bbox_centers = bboxes.gravity_center.to(transformed_anchors.device) + if with_rotation_transform: + angle = yaw - 3.1415953 # num_agents, 1 + rot_yaw = rot_2d(angle) # num_agents, 2, 2 + rot_yaw = rot_yaw[:, None, None,:, :] # num_agents, 1, 1, 2, 2 + transformed_anchors = rearrange(transformed_anchors, 'b g m t c -> b g m c t') # 1, num_groups, num_modes, 12, 2 -> 1, num_groups, num_modes, 2, 12 + transformed_anchors = torch.matmul(rot_yaw, transformed_anchors)# -> num_agents, num_groups, num_modes, 12, 2 + transformed_anchors = rearrange(transformed_anchors, 'b g m c t -> b g m t c') + if with_translation_transform: + transformed_anchors = bbox_centers[:, None, None, None, :2] + transformed_anchors + batched_anchors.append(transformed_anchors) + return torch.stack(batched_anchors) + + +def trajectory_coordinate_transform(trajectory, bbox_results, with_translation_transform=True, with_rotation_transform=True): + """ + Transform trajectory coordinates with respect to detected bounding boxes in the batch. + Args: + trajectory (torch.Tensor): predicted trajectory. + bbox_results (List[Tuple[torch.Tensor]]): A list of tuples containing the bounding box results for each image in the batch. + with_translate (bool, optional): Whether to perform translation transformation. Defaults to True. + with_rot (bool, optional): Whether to perform rotation transformation. Defaults to True. + + Returns: + torch.Tensor: A tensor containing the transformed trajectory coordinates. + """ + batch_size = len(bbox_results) + batched_trajectories = [] + for i in range(batch_size): + bboxes, scores, labels, bbox_index, mask = bbox_results[i] + yaw = bboxes.yaw.to(trajectory.device) + bbox_centers = bboxes.gravity_center.to(trajectory.device) + transformed_trajectory = trajectory[i,...] + if with_rotation_transform: + # we take negtive here, to reverse the trajectory back to ego centric coordinate + angle = -(yaw - 3.1415953) + rot_yaw = rot_2d(angle) + rot_yaw = rot_yaw[:,None, None,:, :] # A, 1, 1, 2, 2 + transformed_trajectory = rearrange(transformed_trajectory, 'a g p t c -> a g p c t') # A, G, P, 12 ,2 -> # A, G, P, 2, 12 + transformed_trajectory = torch.matmul(rot_yaw, transformed_trajectory)# -> A, G, P, 12, 2 + transformed_trajectory = rearrange(transformed_trajectory, 'a g p c t -> a g p t c') + if with_translation_transform: + transformed_trajectory = bbox_centers[:, None, None, None, :2] + transformed_trajectory + batched_trajectories.append(transformed_trajectory) + return torch.stack(batched_trajectories) \ No newline at end of file diff --git a/mmcv/models/utils/fuse_conv_bn.py b/mmcv/models/utils/fuse_conv_bn.py new file mode 100644 index 0000000..cb7076f --- /dev/null +++ b/mmcv/models/utils/fuse_conv_bn.py @@ -0,0 +1,59 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn + + +def _fuse_conv_bn(conv, bn): + """Fuse conv and bn into one module. + + Args: + conv (nn.Module): Conv to be fused. + bn (nn.Module): BN to be fused. + + Returns: + nn.Module: Fused module. + """ + conv_w = conv.weight + conv_b = conv.bias if conv.bias is not None else torch.zeros_like( + bn.running_mean) + + factor = bn.weight / torch.sqrt(bn.running_var + bn.eps) + conv.weight = nn.Parameter(conv_w * + factor.reshape([conv.out_channels, 1, 1, 1])) + conv.bias = nn.Parameter((conv_b - bn.running_mean) * factor + bn.bias) + return conv + + +def fuse_conv_bn(module): + """Recursively fuse conv and bn in a module. + + During inference, the functionary of batch norm layers is turned off + but only the mean and var alone channels are used, which exposes the + chance to fuse it with the preceding conv layers to save computations and + simplify network structures. + + Args: + module (nn.Module): Module to be fused. + + Returns: + nn.Module: Fused module. + """ + last_conv = None + last_conv_name = None + + for name, child in module.named_children(): + if isinstance(child, + (nn.modules.batchnorm._BatchNorm, nn.SyncBatchNorm)): + if last_conv is None: # only fuse BN that is after Conv + continue + fused_conv = _fuse_conv_bn(last_conv, child) + module._modules[last_conv_name] = fused_conv + # To reduce changes, set BN as Identity instead of deleting it. + module._modules[name] = nn.Identity() + last_conv = None + elif isinstance(child, nn.Conv2d): + last_conv = child + last_conv_name = name + else: + fuse_conv_bn(child) + return module diff --git a/mmcv/models/utils/grid_mask.py b/mmcv/models/utils/grid_mask.py new file mode 100755 index 0000000..2986e52 --- /dev/null +++ b/mmcv/models/utils/grid_mask.py @@ -0,0 +1,124 @@ +import torch +import torch.nn as nn +import numpy as np +from PIL import Image +from mmcv.utils import force_fp32, auto_fp16 + +class Grid(object): + def __init__(self, use_h, use_w, rotate = 1, offset=False, ratio = 0.5, mode=0, prob = 1.): + self.use_h = use_h + self.use_w = use_w + self.rotate = rotate + self.offset = offset + self.ratio = ratio + self.mode=mode + self.st_prob = prob + self.prob = prob + + def set_prob(self, epoch, max_epoch): + self.prob = self.st_prob * epoch / max_epoch + + def __call__(self, img, label): + if np.random.rand() > self.prob: + return img, label + h = img.size(1) + w = img.size(2) + self.d1 = 2 + self.d2 = min(h, w) + hh = int(1.5*h) + ww = int(1.5*w) + d = np.random.randint(self.d1, self.d2) + if self.ratio == 1: + self.l = np.random.randint(1, d) + else: + self.l = min(max(int(d*self.ratio+0.5),1),d-1) + mask = np.ones((hh, ww), np.float32) + st_h = np.random.randint(d) + st_w = np.random.randint(d) + if self.use_h: + for i in range(hh//d): + s = d*i + st_h + t = min(s+self.l, hh) + mask[s:t,:] *= 0 + if self.use_w: + for i in range(ww//d): + s = d*i + st_w + t = min(s+self.l, ww) + mask[:,s:t] *= 0 + + r = np.random.randint(self.rotate) + mask = Image.fromarray(np.uint8(mask)) + mask = mask.rotate(r) + mask = np.asarray(mask) + mask = mask[(hh-h)//2:(hh-h)//2+h, (ww-w)//2:(ww-w)//2+w] + + mask = torch.from_numpy(mask).float() + if self.mode == 1: + mask = 1-mask + + mask = mask.expand_as(img) + if self.offset: + offset = torch.from_numpy(2 * (np.random.rand(h,w) - 0.5)).float() + offset = (1 - mask) * offset + img = img * mask + offset + else: + img = img * mask + + return img, label + + +class GridMask(nn.Module): + def __init__(self, use_h, use_w, rotate = 1, offset=False, ratio = 0.5, mode=0, prob = 1.): + super(GridMask, self).__init__() + self.use_h = use_h + self.use_w = use_w + self.rotate = rotate + self.offset = offset + self.ratio = ratio + self.mode = mode + self.st_prob = prob + self.prob = prob + self.fp16_enable = False + def set_prob(self, epoch, max_epoch): + self.prob = self.st_prob * epoch / max_epoch #+ 1.#0.5 + @auto_fp16() + def forward(self, x): + if np.random.rand() > self.prob or not self.training: + return x + n,c,h,w = x.size() + x = x.view(-1,h,w) + hh = int(1.5*h) + ww = int(1.5*w) + d = np.random.randint(2, h) + self.l = min(max(int(d*self.ratio+0.5),1),d-1) + mask = np.ones((hh, ww), np.float32) + st_h = np.random.randint(d) + st_w = np.random.randint(d) + if self.use_h: + for i in range(hh//d): + s = d*i + st_h + t = min(s+self.l, hh) + mask[s:t,:] *= 0 + if self.use_w: + for i in range(ww//d): + s = d*i + st_w + t = min(s+self.l, ww) + mask[:,s:t] *= 0 + + r = np.random.randint(self.rotate) + mask = Image.fromarray(np.uint8(mask)) + mask = mask.rotate(r) + mask = np.asarray(mask) + mask = mask[(hh-h)//2:(hh-h)//2+h, (ww-w)//2:(ww-w)//2+w] + + mask = torch.from_numpy(mask).to(x.dtype).cuda() + if self.mode == 1: + mask = 1-mask + mask = mask.expand_as(x) + if self.offset: + offset = torch.from_numpy(2 * (np.random.rand(h,w) - 0.5)).to(x.dtype).cuda() + x = x * mask + offset * (1 - mask) + else: + x = x * mask + + return x.view(n,c,h,w) \ No newline at end of file diff --git a/mmcv/models/utils/positional_encoding.py b/mmcv/models/utils/positional_encoding.py new file mode 100644 index 0000000..785516f --- /dev/null +++ b/mmcv/models/utils/positional_encoding.py @@ -0,0 +1,162 @@ +import math + +import torch +import torch.nn as nn +from mmcv.models.bricks.transformer import POSITIONAL_ENCODING +from mmcv.models.backbones.base_module import BaseModule + + +@POSITIONAL_ENCODING.register_module() +class SinePositionalEncoding(BaseModule): + """Position encoding with sine and cosine functions. + + See `End-to-End Object Detection with Transformers + `_ for details. + + Args: + num_feats (int): The feature dimension for each position + along x-axis or y-axis. Note the final returned dimension + for each position is 2 times of this value. + temperature (int, optional): The temperature used for scaling + the position embedding. Defaults to 10000. + normalize (bool, optional): Whether to normalize the position + embedding. Defaults to False. + scale (float, optional): A scale factor that scales the position + embedding. The scale will be used only when `normalize` is True. + Defaults to 2*pi. + eps (float, optional): A value added to the denominator for + numerical stability. Defaults to 1e-6. + offset (float): offset add to embed when do the normalization. + Defaults to 0. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + num_feats, + temperature=10000, + normalize=False, + scale=2 * math.pi, + eps=1e-6, + offset=0., + init_cfg=None): + super(SinePositionalEncoding, self).__init__(init_cfg) + if normalize: + assert isinstance(scale, (float, int)), 'when normalize is set,' \ + 'scale should be provided and in float or int type, ' \ + f'found {type(scale)}' + self.num_feats = num_feats + self.temperature = temperature + self.normalize = normalize + self.scale = scale + self.eps = eps + self.offset = offset + + def forward(self, mask): + """Forward function for `SinePositionalEncoding`. + + Args: + mask (Tensor): ByteTensor mask. Non-zero values representing + ignored positions, while zero values means valid positions + for this image. Shape [bs, h, w]. + + Returns: + pos (Tensor): Returned position embedding with shape + [bs, num_feats*2, h, w]. + """ + # For convenience of exporting to ONNX, it's required to convert + # `masks` from bool to int. + mask = mask.to(torch.int) + not_mask = 1 - mask # logical_not + y_embed = not_mask.cumsum(1, dtype=torch.float32) + x_embed = not_mask.cumsum(2, dtype=torch.float32) + if self.normalize: + y_embed = (y_embed + self.offset) / \ + (y_embed[:, -1:, :] + self.eps) * self.scale + x_embed = (x_embed + self.offset) / \ + (x_embed[:, :, -1:] + self.eps) * self.scale + dim_t = torch.arange( + self.num_feats, dtype=torch.float32, device=mask.device) + dim_t = self.temperature**(2 * (dim_t // 2) / self.num_feats) + pos_x = x_embed[:, :, :, None] / dim_t + pos_y = y_embed[:, :, :, None] / dim_t + # use `view` instead of `flatten` for dynamically exporting to ONNX + B, H, W = mask.size() + pos_x = torch.stack( + (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), + dim=4).view(B, H, W, -1) + pos_y = torch.stack( + (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), + dim=4).view(B, H, W, -1) + pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) + return pos + + def __repr__(self): + """str: a string that describes the module""" + repr_str = self.__class__.__name__ + repr_str += f'(num_feats={self.num_feats}, ' + repr_str += f'temperature={self.temperature}, ' + repr_str += f'normalize={self.normalize}, ' + repr_str += f'scale={self.scale}, ' + repr_str += f'eps={self.eps})' + return repr_str + + +@POSITIONAL_ENCODING.register_module() +class LearnedPositionalEncoding(BaseModule): + """Position embedding with learnable embedding weights. + + Args: + num_feats (int): The feature dimension for each position + along x-axis or y-axis. The final returned dimension for + each position is 2 times of this value. + row_num_embed (int, optional): The dictionary size of row embeddings. + Default 50. + col_num_embed (int, optional): The dictionary size of col embeddings. + Default 50. + init_cfg (dict or list[dict], optional): Initialization config dict. + """ + + def __init__(self, + num_feats, + row_num_embed=50, + col_num_embed=50, + init_cfg=dict(type='Uniform', layer='Embedding')): + super(LearnedPositionalEncoding, self).__init__(init_cfg) + self.row_embed = nn.Embedding(row_num_embed, num_feats) + self.col_embed = nn.Embedding(col_num_embed, num_feats) + self.num_feats = num_feats + self.row_num_embed = row_num_embed + self.col_num_embed = col_num_embed + + def forward(self, mask): + """Forward function for `LearnedPositionalEncoding`. + + Args: + mask (Tensor): ByteTensor mask. Non-zero values representing + ignored positions, while zero values means valid positions + for this image. Shape [bs, h, w]. + + Returns: + pos (Tensor): Returned position embedding with shape + [bs, num_feats*2, h, w]. + """ + h, w = mask.shape[-2:] + x = torch.arange(w, device=mask.device) + y = torch.arange(h, device=mask.device) + x_embed = self.col_embed(x) + y_embed = self.row_embed(y) + pos = torch.cat( + (x_embed.unsqueeze(0).repeat(h, 1, 1), y_embed.unsqueeze(1).repeat( + 1, w, 1)), + dim=-1).permute(2, 0, + 1).unsqueeze(0).repeat(mask.shape[0], 1, 1, 1) + return pos + + def __repr__(self): + """str: a string that describes the module""" + repr_str = self.__class__.__name__ + repr_str += f'(num_feats={self.num_feats}, ' + repr_str += f'row_num_embed={self.row_num_embed}, ' + repr_str += f'col_num_embed={self.col_num_embed})' + return repr_str diff --git a/mmcv/models/utils/res_layer.py b/mmcv/models/utils/res_layer.py new file mode 100644 index 0000000..03ca1f2 --- /dev/null +++ b/mmcv/models/utils/res_layer.py @@ -0,0 +1,191 @@ +from ..bricks.conv import build_conv_layer +from ..bricks.norm import build_norm_layer +# from ..bricks import build_conv_layer, build_norm_layer +from mmcv.models.backbones.base_module import BaseModule, Sequential +from torch import nn as nn + + +class ResLayer(Sequential): + """ResLayer to build ResNet style backbone. + + Args: + block (nn.Module): block used to build ResLayer. + inplanes (int): inplanes of block. + planes (int): planes of block. + num_blocks (int): number of blocks. + stride (int): stride of the first block. Default: 1 + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False + conv_cfg (dict): dictionary to construct and config conv layer. + Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + downsample_first (bool): Downsample at the first block or last block. + False for Hourglass, True for ResNet. Default: True + """ + + def __init__(self, + block, + inplanes, + planes, + num_blocks, + stride=1, + avg_down=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + downsample_first=True, + **kwargs): + self.block = block + + downsample = None + if stride != 1 or inplanes != planes * block.expansion: + downsample = [] + conv_stride = stride + if avg_down: + conv_stride = 1 + downsample.append( + nn.AvgPool2d( + kernel_size=stride, + stride=stride, + ceil_mode=True, + count_include_pad=False)) + downsample.extend([ + build_conv_layer( + conv_cfg, + inplanes, + planes * block.expansion, + kernel_size=1, + stride=conv_stride, + bias=False), + build_norm_layer(norm_cfg, planes * block.expansion)[1] + ]) + downsample = nn.Sequential(*downsample) + + layers = [] + if downsample_first: + layers.append( + block( + inplanes=inplanes, + planes=planes, + stride=stride, + downsample=downsample, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + **kwargs)) + inplanes = planes * block.expansion + for _ in range(1, num_blocks): + layers.append( + block( + inplanes=inplanes, + planes=planes, + stride=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + **kwargs)) + + else: # downsample_first=False is for HourglassModule + for _ in range(num_blocks - 1): + layers.append( + block( + inplanes=inplanes, + planes=inplanes, + stride=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + **kwargs)) + layers.append( + block( + inplanes=inplanes, + planes=planes, + stride=stride, + downsample=downsample, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + **kwargs)) + super(ResLayer, self).__init__(*layers) + + +class SimplifiedBasicBlock(BaseModule): + """Simplified version of original basic residual block. This is used in + `SCNet `_. + + - Norm layer is now optional + - Last ReLU in forward function is removed + """ + expansion = 1 + + def __init__(self, + inplanes, + planes, + stride=1, + dilation=1, + downsample=None, + style='pytorch', + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + dcn=None, + plugins=None, + init_fg=None): + super(SimplifiedBasicBlock, self).__init__(init_fg) + assert dcn is None, 'Not implemented yet.' + assert plugins is None, 'Not implemented yet.' + assert not with_cp, 'Not implemented yet.' + self.with_norm = norm_cfg is not None + with_bias = True if norm_cfg is None else False + self.conv1 = build_conv_layer( + conv_cfg, + inplanes, + planes, + 3, + stride=stride, + padding=dilation, + dilation=dilation, + bias=with_bias) + if self.with_norm: + self.norm1_name, norm1 = build_norm_layer( + norm_cfg, planes, postfix=1) + self.add_module(self.norm1_name, norm1) + self.conv2 = build_conv_layer( + conv_cfg, planes, planes, 3, padding=1, bias=with_bias) + if self.with_norm: + self.norm2_name, norm2 = build_norm_layer( + norm_cfg, planes, postfix=2) + self.add_module(self.norm2_name, norm2) + + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + self.dilation = dilation + self.with_cp = with_cp + + @property + def norm1(self): + """nn.Module: normalization layer after the first convolution layer""" + return getattr(self, self.norm1_name) if self.with_norm else None + + @property + def norm2(self): + """nn.Module: normalization layer after the second convolution layer""" + return getattr(self, self.norm2_name) if self.with_norm else None + + def forward(self, x): + """Forward function.""" + + identity = x + + out = self.conv1(x) + if self.with_norm: + out = self.norm1(out) + out = self.relu(out) + + out = self.conv2(out) + if self.with_norm: + out = self.norm2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out diff --git a/mmcv/models/utils/transformer.py b/mmcv/models/utils/transformer.py new file mode 100644 index 0000000..7d6c0c3 --- /dev/null +++ b/mmcv/models/utils/transformer.py @@ -0,0 +1,800 @@ +import math +import warnings + +import torch +import torch.nn as nn +from ..bricks.activation import build_activation_layer +from ..bricks.norm import build_norm_layer +from .weight_init import xavier_init +# from mmcv.models import build_activation_layer, build_norm_layer, xavier_init +from mmcv.models.bricks.registry import (TRANSFORMER_LAYER, + TRANSFORMER_LAYER_SEQUENCE) +from mmcv.models.bricks.transformer import (BaseTransformerLayer, + TransformerLayerSequence, + build_transformer_layer_sequence) +from mmcv.models.backbones.base_module import BaseModule +from torch.nn.init import normal_ + +from mmcv.models.utils.builder import TRANSFORMER + +from mmcv.ops.multi_scale_deform_attn import MultiScaleDeformableAttention + +try: + from mmcv.ops.multi_scale_deform_attn import MultiScaleDeformableAttention + +except ImportError: + warnings.warn( + '`MultiScaleDeformableAttention` in MMCV has been moved to ' + '`mmcv.ops.multi_scale_deform_attn`, please update your MMCV') + from ..bricks.transformer import MultiScaleDeformableAttention + + +def inverse_sigmoid(x, eps=1e-5): + """Inverse function of sigmoid. + + Args: + x (Tensor): The tensor to do the + inverse. + eps (float): EPS avoid numerical + overflow. Defaults 1e-5. + Returns: + Tensor: The x has passed the inverse + function of sigmoid, has same + shape with input. + """ + x = x.clamp(min=0, max=1) + x1 = x.clamp(min=eps) + x2 = (1 - x).clamp(min=eps) + return torch.log(x1 / x2) + + +@TRANSFORMER_LAYER.register_module() +class DetrTransformerDecoderLayer(BaseTransformerLayer): + """Implements decoder layer in DETR transformer. + + Args: + attn_cfgs (list[`mmcv.ConfigDict`] | list[dict] | dict )): + Configs for self_attention or cross_attention, the order + should be consistent with it in `operation_order`. If it is + a dict, it would be expand to the number of attention in + `operation_order`. + feedforward_channels (int): The hidden dimension for FFNs. + ffn_dropout (float): Probability of an element to be zeroed + in ffn. Default 0.0. + operation_order (tuple[str]): The execution order of operation + in transformer. Such as ('self_attn', 'norm', 'ffn', 'norm'). + Default:None + act_cfg (dict): The activation config for FFNs. Default: `LN` + norm_cfg (dict): Config dict for normalization layer. + Default: `LN`. + ffn_num_fcs (int): The number of fully-connected layers in FFNs. + Default:2. + """ + + def __init__(self, + attn_cfgs, + feedforward_channels, + ffn_dropout=0.0, + operation_order=None, + act_cfg=dict(type='ReLU', inplace=True), + norm_cfg=dict(type='LN'), + ffn_num_fcs=2, + with_cp=True, + **kwargs): + super(DetrTransformerDecoderLayer, self).__init__( + attn_cfgs=attn_cfgs, + feedforward_channels=feedforward_channels, + ffn_dropout=ffn_dropout, + operation_order=operation_order, + act_cfg=act_cfg, + norm_cfg=norm_cfg, + ffn_num_fcs=ffn_num_fcs, + **kwargs) + assert len(operation_order) == 6 + assert set(operation_order) == set( + ['self_attn', 'norm', 'cross_attn', 'ffn']) + + +@TRANSFORMER_LAYER_SEQUENCE.register_module() +class DetrTransformerEncoder(TransformerLayerSequence): + """TransformerEncoder of DETR. + + Args: + post_norm_cfg (dict): Config of last normalization layer. Default: + `LN`. Only used when `self.pre_norm` is `True` + """ + + def __init__(self, *args, post_norm_cfg=dict(type='LN'), **kwargs): + super(DetrTransformerEncoder, self).__init__(*args, **kwargs) + if post_norm_cfg is not None: + self.post_norm = build_norm_layer( + post_norm_cfg, self.embed_dims)[1] if self.pre_norm else None + else: + assert not self.pre_norm, f'Use prenorm in ' \ + f'{self.__class__.__name__},' \ + f'Please specify post_norm_cfg' + self.post_norm = None + + def forward(self, *args, **kwargs): + """Forward function for `TransformerCoder`. + + Returns: + Tensor: forwarded results with shape [num_query, bs, embed_dims]. + """ + x = super(DetrTransformerEncoder, self).forward(*args, **kwargs) + if self.post_norm is not None: + x = self.post_norm(x) + return x + + +@TRANSFORMER_LAYER_SEQUENCE.register_module() +class DetrTransformerDecoder(TransformerLayerSequence): + """Implements the decoder in DETR transformer. + + Args: + return_intermediate (bool): Whether to return intermediate outputs. + post_norm_cfg (dict): Config of last normalization layer. Default: + `LN`. + """ + + def __init__(self, + *args, + post_norm_cfg=dict(type='LN'), + return_intermediate=False, + **kwargs): + + super(DetrTransformerDecoder, self).__init__(*args, **kwargs) + self.return_intermediate = return_intermediate + if post_norm_cfg is not None: + self.post_norm = build_norm_layer(post_norm_cfg, + self.embed_dims)[1] + else: + self.post_norm = None + + def forward(self, query, *args, **kwargs): + """Forward function for `TransformerDecoder`. + + Args: + query (Tensor): Input query with shape + `(num_query, bs, embed_dims)`. + + Returns: + Tensor: Results with shape [1, num_query, bs, embed_dims] when + return_intermediate is `False`, otherwise it has shape + [num_layers, num_query, bs, embed_dims]. + """ + if not self.return_intermediate: + x = super().forward(query, *args, **kwargs) + if self.post_norm: + x = self.post_norm(x)[None] + return x + + intermediate = [] + for layer in self.layers: + query = layer(query, *args, **kwargs) + if self.return_intermediate: + if self.post_norm is not None: + intermediate.append(self.post_norm(query)) + else: + intermediate.append(query) + return torch.stack(intermediate) + + +@TRANSFORMER.register_module() +class Transformer(BaseModule): + """Implements the DETR transformer. + + Following the official DETR implementation, this module copy-paste + from torch.nn.Transformer with modifications: + + * positional encodings are passed in MultiheadAttention + * extra LN at the end of encoder is removed + * decoder returns a stack of activations from all decoding layers + + See `paper: End-to-End Object Detection with Transformers + `_ for details. + + Args: + encoder (`mmcv.ConfigDict` | Dict): Config of + TransformerEncoder. Defaults to None. + decoder ((`mmcv.ConfigDict` | Dict)): Config of + TransformerDecoder. Defaults to None + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Defaults to None. + """ + + def __init__(self, encoder=None, decoder=None, init_cfg=None): + super(Transformer, self).__init__(init_cfg=init_cfg) + self.encoder = build_transformer_layer_sequence(encoder) + self.decoder = build_transformer_layer_sequence(decoder) + self.embed_dims = self.encoder.embed_dims + + def init_weights(self): + # follow the official DETR to init parameters + for m in self.modules(): + if hasattr(m, 'weight') and m.weight.dim() > 1: + xavier_init(m, distribution='uniform') + self._is_init = True + + def forward(self, x, mask, query_embed, pos_embed): + """Forward function for `Transformer`. + + Args: + x (Tensor): Input query with shape [bs, c, h, w] where + c = embed_dims. + mask (Tensor): The key_padding_mask used for encoder and decoder, + with shape [bs, h, w]. + query_embed (Tensor): The query embedding for decoder, with shape + [num_query, c]. + pos_embed (Tensor): The positional encoding for encoder and + decoder, with the same shape as `x`. + + Returns: + tuple[Tensor]: results of decoder containing the following tensor. + + - out_dec: Output from decoder. If return_intermediate_dec \ + is True output has shape [num_dec_layers, bs, + num_query, embed_dims], else has shape [1, bs, \ + num_query, embed_dims]. + - memory: Output results from encoder, with shape \ + [bs, embed_dims, h, w]. + """ + bs, c, h, w = x.shape + # use `view` instead of `flatten` for dynamically exporting to ONNX + x = x.view(bs, c, -1).permute(2, 0, 1) # [bs, c, h, w] -> [h*w, bs, c] + pos_embed = pos_embed.view(bs, c, -1).permute(2, 0, 1) + query_embed = query_embed.unsqueeze(1).repeat( + 1, bs, 1) # [num_query, dim] -> [num_query, bs, dim] + mask = mask.view(bs, -1) # [bs, h, w] -> [bs, h*w] + memory = self.encoder( + query=x, + key=None, + value=None, + query_pos=pos_embed, + query_key_padding_mask=mask) + target = torch.zeros_like(query_embed) + # out_dec: [num_layers, num_query, bs, dim] + out_dec = self.decoder( + query=target, + key=memory, + value=memory, + key_pos=pos_embed, + query_pos=query_embed, + key_padding_mask=mask) + out_dec = out_dec.transpose(1, 2) + memory = memory.permute(1, 2, 0).reshape(bs, c, h, w) + return out_dec, memory + + +@TRANSFORMER_LAYER_SEQUENCE.register_module() +class DeformableDetrTransformerDecoder(TransformerLayerSequence): + """Implements the decoder in DETR transformer. + + Args: + return_intermediate (bool): Whether to return intermediate outputs. + coder_norm_cfg (dict): Config of last normalization layer. Default: + `LN`. + """ + + def __init__(self, *args, return_intermediate=False, **kwargs): + + super(DeformableDetrTransformerDecoder, self).__init__(*args, **kwargs) + self.return_intermediate = return_intermediate + + def forward(self, + query, + *args, + reference_points=None, + valid_ratios=None, + reg_branches=None, + **kwargs): + """Forward function for `TransformerDecoder`. + + Args: + query (Tensor): Input query with shape + `(num_query, bs, embed_dims)`. + reference_points (Tensor): The reference + points of offset. has shape + (bs, num_query, 4) when as_two_stage, + otherwise has shape ((bs, num_query, 2). + valid_ratios (Tensor): The radios of valid + points on the feature map, has shape + (bs, num_levels, 2) + reg_branch: (obj:`nn.ModuleList`): Used for + refining the regression results. Only would + be passed when with_box_refine is True, + otherwise would be passed a `None`. + + Returns: + Tensor: Results with shape [1, num_query, bs, embed_dims] when + return_intermediate is `False`, otherwise it has shape + [num_layers, num_query, bs, embed_dims]. + """ + output = query + intermediate = [] + intermediate_reference_points = [] + for lid, layer in enumerate(self.layers): + if reference_points.shape[-1] == 4: + reference_points_input = reference_points[:, :, None] * \ + torch.cat([valid_ratios, valid_ratios], -1)[:, None] + else: + assert reference_points.shape[-1] == 2 + reference_points_input = reference_points[:, :, None] * \ + valid_ratios[:, None] + output = layer( + output, + *args, + reference_points=reference_points_input, + **kwargs) + output = output.permute(1, 0, 2) + + if reg_branches is not None: + tmp = reg_branches[lid](output) + if reference_points.shape[-1] == 4: + new_reference_points = tmp + inverse_sigmoid( + reference_points) + new_reference_points = new_reference_points.sigmoid() + else: + assert reference_points.shape[-1] == 2 + new_reference_points = tmp + new_reference_points[..., :2] = tmp[ + ..., :2] + inverse_sigmoid(reference_points) + new_reference_points = new_reference_points.sigmoid() + reference_points = new_reference_points.detach() + + output = output.permute(1, 0, 2) + if self.return_intermediate: + intermediate.append(output) + intermediate_reference_points.append(reference_points) + + if self.return_intermediate: + return torch.stack(intermediate), torch.stack( + intermediate_reference_points) + + return output, reference_points + + +@TRANSFORMER.register_module() +class DeformableDetrTransformer(Transformer): + """Implements the DeformableDETR transformer. + + Args: + as_two_stage (bool): Generate query from encoder features. + Default: False. + num_feature_levels (int): Number of feature maps from FPN: + Default: 4. + two_stage_num_proposals (int): Number of proposals when set + `as_two_stage` as True. Default: 300. + """ + + def __init__(self, + as_two_stage=False, + num_feature_levels=4, + two_stage_num_proposals=300, + **kwargs): + super(DeformableDetrTransformer, self).__init__(**kwargs) + self.as_two_stage = as_two_stage + self.num_feature_levels = num_feature_levels + self.two_stage_num_proposals = two_stage_num_proposals + self.embed_dims = self.encoder.embed_dims + self.init_layers() + + def init_layers(self): + """Initialize layers of the DeformableDetrTransformer.""" + self.level_embeds = nn.Parameter( + torch.Tensor(self.num_feature_levels, self.embed_dims)) + + if self.as_two_stage: + self.enc_output = nn.Linear(self.embed_dims, self.embed_dims) + self.enc_output_norm = nn.LayerNorm(self.embed_dims) + self.pos_trans = nn.Linear(self.embed_dims * 2, + self.embed_dims * 2) + self.pos_trans_norm = nn.LayerNorm(self.embed_dims * 2) + else: + self.reference_points = nn.Linear(self.embed_dims, 2) + + def init_weights(self): + """Initialize the transformer weights.""" + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + for m in self.modules(): + if isinstance(m, MultiScaleDeformableAttention): + m.init_weights() + if not self.as_two_stage: + xavier_init(self.reference_points, distribution='uniform', bias=0.) + normal_(self.level_embeds) + + def gen_encoder_output_proposals(self, memory, memory_padding_mask, + spatial_shapes): + """Generate proposals from encoded memory. + + Args: + memory (Tensor) : The output of encoder, + has shape (bs, num_key, embed_dim). num_key is + equal the number of points on feature map from + all level. + memory_padding_mask (Tensor): Padding mask for memory. + has shape (bs, num_key). + spatial_shapes (Tensor): The shape of all feature maps. + has shape (num_level, 2). + + Returns: + tuple: A tuple of feature map and bbox prediction. + + - output_memory (Tensor): The input of decoder, \ + has shape (bs, num_key, embed_dim). num_key is \ + equal the number of points on feature map from \ + all levels. + - output_proposals (Tensor): The normalized proposal \ + after a inverse sigmoid, has shape \ + (bs, num_keys, 4). + """ + + N, S, C = memory.shape + proposals = [] + _cur = 0 + for lvl, (H, W) in enumerate(spatial_shapes): + mask_flatten_ = memory_padding_mask[:, _cur:(_cur + H * W)].view( + N, H, W, 1) + valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1) + valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1) + + grid_y, grid_x = torch.meshgrid( + torch.linspace( + 0, H - 1, H, dtype=torch.float32, device=memory.device), + torch.linspace( + 0, W - 1, W, dtype=torch.float32, device=memory.device)) + grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1) + + scale = torch.cat([valid_W.unsqueeze(-1), + valid_H.unsqueeze(-1)], 1).view(N, 1, 1, 2) + grid = (grid.unsqueeze(0).expand(N, -1, -1, -1) + 0.5) / scale + wh = torch.ones_like(grid) * 0.05 * (2.0**lvl) + proposal = torch.cat((grid, wh), -1).view(N, -1, 4) + proposals.append(proposal) + _cur += (H * W) + output_proposals = torch.cat(proposals, 1) + output_proposals_valid = ((output_proposals > 0.01) & + (output_proposals < 0.99)).all( + -1, keepdim=True) + output_proposals = torch.log(output_proposals / (1 - output_proposals)) + output_proposals = output_proposals.masked_fill( + memory_padding_mask.unsqueeze(-1), float('inf')) + output_proposals = output_proposals.masked_fill( + ~output_proposals_valid, float('inf')) + + output_memory = memory + output_memory = output_memory.masked_fill( + memory_padding_mask.unsqueeze(-1), float(0)) + output_memory = output_memory.masked_fill(~output_proposals_valid, + float(0)) + output_memory = self.enc_output_norm(self.enc_output(output_memory)) + return output_memory, output_proposals + + @staticmethod + def get_reference_points(spatial_shapes, valid_ratios, device): + """Get the reference points used in decoder. + + Args: + spatial_shapes (Tensor): The shape of all + feature maps, has shape (num_level, 2). + valid_ratios (Tensor): The radios of valid + points on the feature map, has shape + (bs, num_levels, 2) + device (obj:`device`): The device where + reference_points should be. + + Returns: + Tensor: reference points used in decoder, has \ + shape (bs, num_keys, num_levels, 2). + """ + reference_points_list = [] + for lvl, (H, W) in enumerate(spatial_shapes): + # TODO check this 0.5 + ref_y, ref_x = torch.meshgrid( + torch.linspace( + 0.5, H - 0.5, H, dtype=torch.float32, device=device), + torch.linspace( + 0.5, W - 0.5, W, dtype=torch.float32, device=device)) + ref_y = ref_y.reshape(-1)[None] / ( + valid_ratios[:, None, lvl, 1] * H) + ref_x = ref_x.reshape(-1)[None] / ( + valid_ratios[:, None, lvl, 0] * W) + ref = torch.stack((ref_x, ref_y), -1) + reference_points_list.append(ref) + reference_points = torch.cat(reference_points_list, 1) + reference_points = reference_points[:, :, None] * valid_ratios[:, None] + return reference_points + + def get_valid_ratio(self, mask): + """Get the valid radios of feature maps of all level.""" + _, H, W = mask.shape + valid_H = torch.sum(~mask[:, :, 0], 1) + valid_W = torch.sum(~mask[:, 0, :], 1) + valid_ratio_h = valid_H.float() / H + valid_ratio_w = valid_W.float() / W + valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1) + return valid_ratio + + def get_proposal_pos_embed(self, + proposals, + num_pos_feats=128, + temperature=10000): + """Get the position embedding of proposal.""" + scale = 2 * math.pi + dim_t = torch.arange( + num_pos_feats, dtype=torch.float32, device=proposals.device) + dim_t = temperature**(2 * (dim_t // 2) / num_pos_feats) + # N, L, 4 + proposals = proposals.sigmoid() * scale + # N, L, 4, 128 + pos = proposals[:, :, :, None] / dim_t + # N, L, 4, 64, 2 + pos = torch.stack((pos[:, :, :, 0::2].sin(), pos[:, :, :, 1::2].cos()), + dim=4).flatten(2) + return pos + + def forward(self, + mlvl_feats, + mlvl_masks, + query_embed, + mlvl_pos_embeds, + reg_branches=None, + cls_branches=None, + **kwargs): + """Forward function for `Transformer`. + + Args: + mlvl_feats (list(Tensor)): Input queries from + different level. Each element has shape + [bs, embed_dims, h, w]. + mlvl_masks (list(Tensor)): The key_padding_mask from + different level used for encoder and decoder, + each element has shape [bs, h, w]. + query_embed (Tensor): The query embedding for decoder, + with shape [num_query, c]. + mlvl_pos_embeds (list(Tensor)): The positional encoding + of feats from different level, has the shape + [bs, embed_dims, h, w]. + reg_branches (obj:`nn.ModuleList`): Regression heads for + feature maps from each decoder layer. Only would + be passed when + `with_box_refine` is True. Default to None. + cls_branches (obj:`nn.ModuleList`): Classification heads + for feature maps from each decoder layer. Only would + be passed when `as_two_stage` + is True. Default to None. + + + Returns: + tuple[Tensor]: results of decoder containing the following tensor. + + - inter_states: Outputs from decoder. If + return_intermediate_dec is True output has shape \ + (num_dec_layers, bs, num_query, embed_dims), else has \ + shape (1, bs, num_query, embed_dims). + - init_reference_out: The initial value of reference \ + points, has shape (bs, num_queries, 4). + - inter_references_out: The internal value of reference \ + points in decoder, has shape \ + (num_dec_layers, bs,num_query, embed_dims) + - enc_outputs_class: The classification score of \ + proposals generated from \ + encoder's feature maps, has shape \ + (batch, h*w, num_classes). \ + Only would be returned when `as_two_stage` is True, \ + otherwise None. + - enc_outputs_coord_unact: The regression results \ + generated from encoder's feature maps., has shape \ + (batch, h*w, 4). Only would \ + be returned when `as_two_stage` is True, \ + otherwise None. + """ + assert self.as_two_stage or query_embed is not None + + feat_flatten = [] + mask_flatten = [] + lvl_pos_embed_flatten = [] + spatial_shapes = [] + for lvl, (feat, mask, pos_embed) in enumerate( + zip(mlvl_feats, mlvl_masks, mlvl_pos_embeds)): + bs, c, h, w = feat.shape + spatial_shape = (h, w) + spatial_shapes.append(spatial_shape) + feat = feat.flatten(2).transpose(1, 2) + mask = mask.flatten(1) + pos_embed = pos_embed.flatten(2).transpose(1, 2) + lvl_pos_embed = pos_embed + self.level_embeds[lvl].view(1, 1, -1) + lvl_pos_embed_flatten.append(lvl_pos_embed) + feat_flatten.append(feat) + mask_flatten.append(mask) + feat_flatten = torch.cat(feat_flatten, 1) + mask_flatten = torch.cat(mask_flatten, 1) + lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1) + spatial_shapes = torch.as_tensor( + spatial_shapes, dtype=torch.long, device=feat_flatten.device) + level_start_index = torch.cat((spatial_shapes.new_zeros( + (1, )), spatial_shapes.prod(1).cumsum(0)[:-1])) + valid_ratios = torch.stack( + [self.get_valid_ratio(m) for m in mlvl_masks], 1) + + reference_points = \ + self.get_reference_points(spatial_shapes, + valid_ratios, + device=feat.device) + + feat_flatten = feat_flatten.permute(1, 0, 2) # (H*W, bs, embed_dims) + lvl_pos_embed_flatten = lvl_pos_embed_flatten.permute( + 1, 0, 2) # (H*W, bs, embed_dims) + memory = self.encoder( + query=feat_flatten, + key=None, + value=None, + query_pos=lvl_pos_embed_flatten, + query_key_padding_mask=mask_flatten, + spatial_shapes=spatial_shapes, + reference_points=reference_points, + level_start_index=level_start_index, + valid_ratios=valid_ratios, + **kwargs) + + memory = memory.permute(1, 0, 2) + bs, _, c = memory.shape + if self.as_two_stage: + output_memory, output_proposals = \ + self.gen_encoder_output_proposals( + memory, mask_flatten, spatial_shapes) + enc_outputs_class = cls_branches[self.decoder.num_layers]( + output_memory) + enc_outputs_coord_unact = \ + reg_branches[ + self.decoder.num_layers](output_memory) + output_proposals + + topk = self.two_stage_num_proposals + topk_proposals = torch.topk( + enc_outputs_class[..., 0], topk, dim=1)[1] + topk_coords_unact = torch.gather( + enc_outputs_coord_unact, 1, + topk_proposals.unsqueeze(-1).repeat(1, 1, 4)) + topk_coords_unact = topk_coords_unact.detach() + reference_points = topk_coords_unact.sigmoid() + init_reference_out = reference_points + pos_trans_out = self.pos_trans_norm( + self.pos_trans(self.get_proposal_pos_embed(topk_coords_unact))) + query_pos, query = torch.split(pos_trans_out, c, dim=2) + else: + query_pos, query = torch.split(query_embed, c, dim=1) + query_pos = query_pos.unsqueeze(0).expand(bs, -1, -1) + query = query.unsqueeze(0).expand(bs, -1, -1) + reference_points = self.reference_points(query_pos).sigmoid() + init_reference_out = reference_points + + # decoder + query = query.permute(1, 0, 2) + memory = memory.permute(1, 0, 2) + query_pos = query_pos.permute(1, 0, 2) + inter_states, inter_references = self.decoder( + query=query, + key=None, + value=memory, + query_pos=query_pos, + key_padding_mask=mask_flatten, + reference_points=reference_points, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + valid_ratios=valid_ratios, + reg_branches=reg_branches, + **kwargs) + + inter_references_out = inter_references + if self.as_two_stage: + return inter_states, init_reference_out,\ + inter_references_out, enc_outputs_class,\ + enc_outputs_coord_unact + return inter_states, init_reference_out, \ + inter_references_out, None, None + + +@TRANSFORMER.register_module() +class DynamicConv(BaseModule): + """Implements Dynamic Convolution. + + This module generate parameters for each sample and + use bmm to implement 1*1 convolution. Code is modified + from the `official github repo `_ . + + Args: + in_channels (int): The input feature channel. + Defaults to 256. + feat_channels (int): The inner feature channel. + Defaults to 64. + out_channels (int, optional): The output feature channel. + When not specified, it will be set to `in_channels` + by default + input_feat_shape (int): The shape of input feature. + Defaults to 7. + act_cfg (dict): The activation config for DynamicConv. + norm_cfg (dict): Config dict for normalization layer. Default + layer normalization. + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + """ + + def __init__(self, + in_channels=256, + feat_channels=64, + out_channels=None, + input_feat_shape=7, + act_cfg=dict(type='ReLU', inplace=True), + norm_cfg=dict(type='LN'), + init_cfg=None): + super(DynamicConv, self).__init__(init_cfg) + self.in_channels = in_channels + self.feat_channels = feat_channels + self.out_channels_raw = out_channels + self.input_feat_shape = input_feat_shape + self.act_cfg = act_cfg + self.norm_cfg = norm_cfg + self.out_channels = out_channels if out_channels else in_channels + + self.num_params_in = self.in_channels * self.feat_channels + self.num_params_out = self.out_channels * self.feat_channels + self.dynamic_layer = nn.Linear( + self.in_channels, self.num_params_in + self.num_params_out) + + self.norm_in = build_norm_layer(norm_cfg, self.feat_channels)[1] + self.norm_out = build_norm_layer(norm_cfg, self.out_channels)[1] + + self.activation = build_activation_layer(act_cfg) + + num_output = self.out_channels * input_feat_shape**2 + self.fc_layer = nn.Linear(num_output, self.out_channels) + self.fc_norm = build_norm_layer(norm_cfg, self.out_channels)[1] + + def forward(self, param_feature, input_feature): + """Forward function for `DynamicConv`. + + Args: + param_feature (Tensor): The feature can be used + to generate the parameter, has shape + (num_all_proposals, in_channels). + input_feature (Tensor): Feature that + interact with parameters, has shape + (num_all_proposals, in_channels, H, W). + + Returns: + Tensor: The output feature has shape + (num_all_proposals, out_channels). + """ + num_proposals = param_feature.size(0) + input_feature = input_feature.view(num_proposals, self.in_channels, + -1).permute(2, 0, 1) + + input_feature = input_feature.permute(1, 0, 2) + parameters = self.dynamic_layer(param_feature) + + param_in = parameters[:, :self.num_params_in].view( + -1, self.in_channels, self.feat_channels) + param_out = parameters[:, -self.num_params_out:].view( + -1, self.feat_channels, self.out_channels) + + # input_feature has shape (num_all_proposals, H*W, in_channels) + # param_in has shape (num_all_proposals, in_channels, feat_channels) + # feature has shape (num_all_proposals, H*W, feat_channels) + features = torch.bmm(input_feature, param_in) + features = self.norm_in(features) + features = self.activation(features) + + # param_out has shape (batch_size, feat_channels, out_channels) + features = torch.bmm(features, param_out) + features = self.norm_out(features) + features = self.activation(features) + + features = features.flatten(1) + features = self.fc_layer(features) + features = self.fc_norm(features) + features = self.activation(features) + + return features diff --git a/mmcv/models/utils/weight_init.py b/mmcv/models/utils/weight_init.py new file mode 100644 index 0000000..c347f29 --- /dev/null +++ b/mmcv/models/utils/weight_init.py @@ -0,0 +1,683 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import math +import warnings + +import numpy as np +import torch +import torch.nn as nn +from torch import Tensor + +from mmcv.utils import Registry, build_from_cfg, get_logger, print_log + +INITIALIZERS = Registry('initializer') + + +def update_init_info(module, init_info): + """Update the `_params_init_info` in the module if the value of parameters + are changed. + + Args: + module (obj:`nn.Module`): The module of PyTorch with a user-defined + attribute `_params_init_info` which records the initialization + information. + init_info (str): The string that describes the initialization. + """ + assert hasattr( + module, + '_params_init_info'), f'Can not find `_params_init_info` in {module}' + for name, param in module.named_parameters(): + + assert param in module._params_init_info, ( + f'Find a new :obj:`Parameter` ' + f'named `{name}` during executing the ' + f'`init_weights` of ' + f'`{module.__class__.__name__}`. ' + f'Please do not add or ' + f'replace parameters during executing ' + f'the `init_weights`. ') + + # The parameter has been changed during executing the + # `init_weights` of module + mean_value = param.data.mean() + if module._params_init_info[param]['tmp_mean_value'] != mean_value: + module._params_init_info[param]['init_info'] = init_info + module._params_init_info[param]['tmp_mean_value'] = mean_value + + +def constant_init(module, val, bias=0): + if hasattr(module, 'weight') and module.weight is not None: + nn.init.constant_(module.weight, val) + if hasattr(module, 'bias') and module.bias is not None: + nn.init.constant_(module.bias, bias) + + +def xavier_init(module, gain=1, bias=0, distribution='normal'): + assert distribution in ['uniform', 'normal'] + if hasattr(module, 'weight') and module.weight is not None: + if distribution == 'uniform': + nn.init.xavier_uniform_(module.weight, gain=gain) + else: + nn.init.xavier_normal_(module.weight, gain=gain) + if hasattr(module, 'bias') and module.bias is not None: + nn.init.constant_(module.bias, bias) + + +def normal_init(module, mean=0, std=1, bias=0): + if hasattr(module, 'weight') and module.weight is not None: + nn.init.normal_(module.weight, mean, std) + if hasattr(module, 'bias') and module.bias is not None: + nn.init.constant_(module.bias, bias) + + +def trunc_normal_init(module: nn.Module, + mean: float = 0, + std: float = 1, + a: float = -2, + b: float = 2, + bias: float = 0) -> None: + if hasattr(module, 'weight') and module.weight is not None: + trunc_normal_(module.weight, mean, std, a, b) # type: ignore + if hasattr(module, 'bias') and module.bias is not None: + nn.init.constant_(module.bias, bias) # type: ignore + + +def uniform_init(module, a=0, b=1, bias=0): + if hasattr(module, 'weight') and module.weight is not None: + nn.init.uniform_(module.weight, a, b) + if hasattr(module, 'bias') and module.bias is not None: + nn.init.constant_(module.bias, bias) + + +def kaiming_init(module, + a=0, + mode='fan_out', + nonlinearity='relu', + bias=0, + distribution='normal'): + assert distribution in ['uniform', 'normal'] + if hasattr(module, 'weight') and module.weight is not None: + if distribution == 'uniform': + nn.init.kaiming_uniform_( + module.weight, a=a, mode=mode, nonlinearity=nonlinearity) + else: + nn.init.kaiming_normal_( + module.weight, a=a, mode=mode, nonlinearity=nonlinearity) + if hasattr(module, 'bias') and module.bias is not None: + nn.init.constant_(module.bias, bias) + + +def caffe2_xavier_init(module, bias=0): + # `XavierFill` in Caffe2 corresponds to `kaiming_uniform_` in PyTorch + # Acknowledgment to FAIR's internal code + kaiming_init( + module, + a=1, + mode='fan_in', + nonlinearity='leaky_relu', + bias=bias, + distribution='uniform') + + +def bias_init_with_prob(prior_prob): + """initialize conv/fc bias value according to a given probability value.""" + bias_init = float(-np.log((1 - prior_prob) / prior_prob)) + return bias_init + + +def _get_bases_name(m): + return [b.__name__ for b in m.__class__.__bases__] + + +class BaseInit(object): + + def __init__(self, *, bias=0, bias_prob=None, layer=None): + self.wholemodule = False + if not isinstance(bias, (int, float)): + raise TypeError(f'bias must be a number, but got a {type(bias)}') + + if bias_prob is not None: + if not isinstance(bias_prob, float): + raise TypeError(f'bias_prob type must be float, \ + but got {type(bias_prob)}') + + if layer is not None: + if not isinstance(layer, (str, list)): + raise TypeError(f'layer must be a str or a list of str, \ + but got a {type(layer)}') + else: + layer = [] + + if bias_prob is not None: + self.bias = bias_init_with_prob(bias_prob) + else: + self.bias = bias + self.layer = [layer] if isinstance(layer, str) else layer + + def _get_init_info(self): + info = f'{self.__class__.__name__}, bias={self.bias}' + return info + + +@INITIALIZERS.register_module(name='Constant') +class ConstantInit(BaseInit): + """Initialize module parameters with constant values. + + Args: + val (int | float): the value to fill the weights in the module with + bias (int | float): the value to fill the bias. Defaults to 0. + bias_prob (float, optional): the probability for bias initialization. + Defaults to None. + layer (str | list[str], optional): the layer will be initialized. + Defaults to None. + """ + + def __init__(self, val, **kwargs): + super().__init__(**kwargs) + self.val = val + + def __call__(self, module): + + def init(m): + if self.wholemodule: + constant_init(m, self.val, self.bias) + else: + layername = m.__class__.__name__ + basesname = _get_bases_name(m) + if len(set(self.layer) & set([layername] + basesname)): + constant_init(m, self.val, self.bias) + + module.apply(init) + if hasattr(module, '_params_init_info'): + update_init_info(module, init_info=self._get_init_info()) + + def _get_init_info(self): + info = f'{self.__class__.__name__}: val={self.val}, bias={self.bias}' + return info + + +@INITIALIZERS.register_module(name='Xavier') +class XavierInit(BaseInit): + r"""Initialize module parameters with values according to the method + described in `Understanding the difficulty of training deep feedforward + neural networks - Glorot, X. & Bengio, Y. (2010). + `_ + + Args: + gain (int | float): an optional scaling factor. Defaults to 1. + bias (int | float): the value to fill the bias. Defaults to 0. + bias_prob (float, optional): the probability for bias initialization. + Defaults to None. + distribution (str): distribution either be ``'normal'`` + or ``'uniform'``. Defaults to ``'normal'``. + layer (str | list[str], optional): the layer will be initialized. + Defaults to None. + """ + + def __init__(self, gain=1, distribution='normal', **kwargs): + super().__init__(**kwargs) + self.gain = gain + self.distribution = distribution + + def __call__(self, module): + + def init(m): + if self.wholemodule: + xavier_init(m, self.gain, self.bias, self.distribution) + else: + layername = m.__class__.__name__ + basesname = _get_bases_name(m) + if len(set(self.layer) & set([layername] + basesname)): + xavier_init(m, self.gain, self.bias, self.distribution) + + module.apply(init) + if hasattr(module, '_params_init_info'): + update_init_info(module, init_info=self._get_init_info()) + + def _get_init_info(self): + info = f'{self.__class__.__name__}: gain={self.gain}, ' \ + f'distribution={self.distribution}, bias={self.bias}' + return info + + +@INITIALIZERS.register_module(name='Normal') +class NormalInit(BaseInit): + r"""Initialize module parameters with the values drawn from the normal + distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`. + + Args: + mean (int | float):the mean of the normal distribution. Defaults to 0. + std (int | float): the standard deviation of the normal distribution. + Defaults to 1. + bias (int | float): the value to fill the bias. Defaults to 0. + bias_prob (float, optional): the probability for bias initialization. + Defaults to None. + layer (str | list[str], optional): the layer will be initialized. + Defaults to None. + + """ + + def __init__(self, mean=0, std=1, **kwargs): + super().__init__(**kwargs) + self.mean = mean + self.std = std + + def __call__(self, module): + + def init(m): + if self.wholemodule: + normal_init(m, self.mean, self.std, self.bias) + else: + layername = m.__class__.__name__ + basesname = _get_bases_name(m) + if len(set(self.layer) & set([layername] + basesname)): + normal_init(m, self.mean, self.std, self.bias) + + module.apply(init) + if hasattr(module, '_params_init_info'): + update_init_info(module, init_info=self._get_init_info()) + + def _get_init_info(self): + info = f'{self.__class__.__name__}: mean={self.mean},' \ + f' std={self.std}, bias={self.bias}' + return info + + +@INITIALIZERS.register_module(name='TruncNormal') +class TruncNormalInit(BaseInit): + r"""Initialize module parameters with the values drawn from the normal + distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` with values + outside :math:`[a, b]`. + + Args: + mean (float): the mean of the normal distribution. Defaults to 0. + std (float): the standard deviation of the normal distribution. + Defaults to 1. + a (float): The minimum cutoff value. + b ( float): The maximum cutoff value. + bias (float): the value to fill the bias. Defaults to 0. + bias_prob (float, optional): the probability for bias initialization. + Defaults to None. + layer (str | list[str], optional): the layer will be initialized. + Defaults to None. + + """ + + def __init__(self, + mean: float = 0, + std: float = 1, + a: float = -2, + b: float = 2, + **kwargs) -> None: + super().__init__(**kwargs) + self.mean = mean + self.std = std + self.a = a + self.b = b + + def __call__(self, module: nn.Module) -> None: + + def init(m): + if self.wholemodule: + trunc_normal_init(m, self.mean, self.std, self.a, self.b, + self.bias) + else: + layername = m.__class__.__name__ + basesname = _get_bases_name(m) + if len(set(self.layer) & set([layername] + basesname)): + trunc_normal_init(m, self.mean, self.std, self.a, self.b, + self.bias) + + module.apply(init) + if hasattr(module, '_params_init_info'): + update_init_info(module, init_info=self._get_init_info()) + + def _get_init_info(self): + info = f'{self.__class__.__name__}: a={self.a}, b={self.b},' \ + f' mean={self.mean}, std={self.std}, bias={self.bias}' + return info + + +@INITIALIZERS.register_module(name='Uniform') +class UniformInit(BaseInit): + r"""Initialize module parameters with values drawn from the uniform + distribution :math:`\mathcal{U}(a, b)`. + + Args: + a (int | float): the lower bound of the uniform distribution. + Defaults to 0. + b (int | float): the upper bound of the uniform distribution. + Defaults to 1. + bias (int | float): the value to fill the bias. Defaults to 0. + bias_prob (float, optional): the probability for bias initialization. + Defaults to None. + layer (str | list[str], optional): the layer will be initialized. + Defaults to None. + """ + + def __init__(self, a=0, b=1, **kwargs): + super().__init__(**kwargs) + self.a = a + self.b = b + + def __call__(self, module): + + def init(m): + if self.wholemodule: + uniform_init(m, self.a, self.b, self.bias) + else: + layername = m.__class__.__name__ + basesname = _get_bases_name(m) + if len(set(self.layer) & set([layername] + basesname)): + uniform_init(m, self.a, self.b, self.bias) + + module.apply(init) + if hasattr(module, '_params_init_info'): + update_init_info(module, init_info=self._get_init_info()) + + def _get_init_info(self): + info = f'{self.__class__.__name__}: a={self.a},' \ + f' b={self.b}, bias={self.bias}' + return info + + +@INITIALIZERS.register_module(name='Kaiming') +class KaimingInit(BaseInit): + r"""Initialize module parameters with the values according to the method + described in `Delving deep into rectifiers: Surpassing human-level + performance on ImageNet classification - He, K. et al. (2015). + `_ + + Args: + a (int | float): the negative slope of the rectifier used after this + layer (only used with ``'leaky_relu'``). Defaults to 0. + mode (str): either ``'fan_in'`` or ``'fan_out'``. Choosing + ``'fan_in'`` preserves the magnitude of the variance of the weights + in the forward pass. Choosing ``'fan_out'`` preserves the + magnitudes in the backwards pass. Defaults to ``'fan_out'``. + nonlinearity (str): the non-linear function (`nn.functional` name), + recommended to use only with ``'relu'`` or ``'leaky_relu'`` . + Defaults to 'relu'. + bias (int | float): the value to fill the bias. Defaults to 0. + bias_prob (float, optional): the probability for bias initialization. + Defaults to None. + distribution (str): distribution either be ``'normal'`` or + ``'uniform'``. Defaults to ``'normal'``. + layer (str | list[str], optional): the layer will be initialized. + Defaults to None. + """ + + def __init__(self, + a=0, + mode='fan_out', + nonlinearity='relu', + distribution='normal', + **kwargs): + super().__init__(**kwargs) + self.a = a + self.mode = mode + self.nonlinearity = nonlinearity + self.distribution = distribution + + def __call__(self, module): + + def init(m): + if self.wholemodule: + kaiming_init(m, self.a, self.mode, self.nonlinearity, + self.bias, self.distribution) + else: + layername = m.__class__.__name__ + basesname = _get_bases_name(m) + if len(set(self.layer) & set([layername] + basesname)): + kaiming_init(m, self.a, self.mode, self.nonlinearity, + self.bias, self.distribution) + + module.apply(init) + if hasattr(module, '_params_init_info'): + update_init_info(module, init_info=self._get_init_info()) + + def _get_init_info(self): + info = f'{self.__class__.__name__}: a={self.a}, mode={self.mode}, ' \ + f'nonlinearity={self.nonlinearity}, ' \ + f'distribution ={self.distribution}, bias={self.bias}' + return info + + +@INITIALIZERS.register_module(name='Caffe2Xavier') +class Caffe2XavierInit(KaimingInit): + # `XavierFill` in Caffe2 corresponds to `kaiming_uniform_` in PyTorch + # Acknowledgment to FAIR's internal code + def __init__(self, **kwargs): + super().__init__( + a=1, + mode='fan_in', + nonlinearity='leaky_relu', + distribution='uniform', + **kwargs) + + def __call__(self, module): + super().__call__(module) + + +@INITIALIZERS.register_module(name='Pretrained') +class PretrainedInit(object): + """Initialize module by loading a pretrained model. + + Args: + checkpoint (str): the checkpoint file of the pretrained model should + be load. + prefix (str, optional): the prefix of a sub-module in the pretrained + model. it is for loading a part of the pretrained model to + initialize. For example, if we would like to only load the + backbone of a detector model, we can set ``prefix='backbone.'``. + Defaults to None. + map_location (str): map tensors into proper locations. + """ + + def __init__(self, checkpoint, prefix=None, map_location=None): + self.checkpoint = checkpoint + self.prefix = prefix + self.map_location = map_location + + def __call__(self, module): + from mmcv.utils import load_checkpoint + logger = get_logger('mmcv') + if self.prefix is None: + print_log(f'load model from: {self.checkpoint}', logger=logger) + load_checkpoint( + module, + self.checkpoint, + map_location=self.map_location, + strict=False, + logger=logger) + else: + print_log( + f'load {self.prefix} in model from: {self.checkpoint}', + logger=logger) + state_dict = _load_checkpoint_with_prefix( + self.prefix, self.checkpoint, map_location=self.map_location) + load_state_dict(module, state_dict, strict=False, logger=logger) + + if hasattr(module, '_params_init_info'): + update_init_info(module, init_info=self._get_init_info()) + + def _get_init_info(self): + info = f'{self.__class__.__name__}: load from {self.checkpoint}' + return info + + +def _initialize(module, cfg, wholemodule=False): + func = build_from_cfg(cfg, INITIALIZERS) + # wholemodule flag is for override mode, there is no layer key in override + # and initializer will give init values for the whole module with the name + # in override. + func.wholemodule = wholemodule + func(module) + + +def _initialize_override(module, override, cfg): + if not isinstance(override, (dict, list)): + raise TypeError(f'override must be a dict or a list of dict, \ + but got {type(override)}') + + override = [override] if isinstance(override, dict) else override + + for override_ in override: + + cp_override = copy.deepcopy(override_) + name = cp_override.pop('name', None) + if name is None: + raise ValueError('`override` must contain the key "name",' + f'but got {cp_override}') + # if override only has name key, it means use args in init_cfg + if not cp_override: + cp_override.update(cfg) + # if override has name key and other args except type key, it will + # raise error + elif 'type' not in cp_override.keys(): + raise ValueError( + f'`override` need "type" key, but got {cp_override}') + + if hasattr(module, name): + _initialize(getattr(module, name), cp_override, wholemodule=True) + else: + raise RuntimeError(f'module did not have attribute {name}, ' + f'but init_cfg is {cp_override}.') + + +def initialize(module, init_cfg): + """Initialize a module. + + Args: + module (``torch.nn.Module``): the module will be initialized. + init_cfg (dict | list[dict]): initialization configuration dict to + define initializer. OpenMMLab has implemented 6 initializers + including ``Constant``, ``Xavier``, ``Normal``, ``Uniform``, + ``Kaiming``, and ``Pretrained``. + Example: + >>> module = nn.Linear(2, 3, bias=True) + >>> init_cfg = dict(type='Constant', layer='Linear', val =1 , bias =2) + >>> initialize(module, init_cfg) + + >>> module = nn.Sequential(nn.Conv1d(3, 1, 3), nn.Linear(1,2)) + >>> # define key ``'layer'`` for initializing layer with different + >>> # configuration + >>> init_cfg = [dict(type='Constant', layer='Conv1d', val=1), + dict(type='Constant', layer='Linear', val=2)] + >>> initialize(module, init_cfg) + + >>> # define key``'override'`` to initialize some specific part in + >>> # module + >>> class FooNet(nn.Module): + >>> def __init__(self): + >>> super().__init__() + >>> self.feat = nn.Conv2d(3, 16, 3) + >>> self.reg = nn.Conv2d(16, 10, 3) + >>> self.cls = nn.Conv2d(16, 5, 3) + >>> model = FooNet() + >>> init_cfg = dict(type='Constant', val=1, bias=2, layer='Conv2d', + >>> override=dict(type='Constant', name='reg', val=3, bias=4)) + >>> initialize(model, init_cfg) + + >>> model = ResNet(depth=50) + >>> # Initialize weights with the pretrained model. + >>> init_cfg = dict(type='Pretrained', + checkpoint='torchvision://resnet50') + >>> initialize(model, init_cfg) + + >>> # Initialize weights of a sub-module with the specific part of + >>> # a pretrained model by using "prefix". + >>> url = 'http://download.openmmlab.com/mmdetection/v2.0/retinanet/'\ + >>> 'retinanet_r50_fpn_1x_coco/'\ + >>> 'retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth' + >>> init_cfg = dict(type='Pretrained', + checkpoint=url, prefix='backbone.') + """ + if not isinstance(init_cfg, (dict, list)): + raise TypeError(f'init_cfg must be a dict or a list of dict, \ + but got {type(init_cfg)}') + + if isinstance(init_cfg, dict): + init_cfg = [init_cfg] + + for cfg in init_cfg: + # should deeply copy the original config because cfg may be used by + # other modules, e.g., one init_cfg shared by multiple bottleneck + # blocks, the expected cfg will be changed after pop and will change + # the initialization behavior of other modules + cp_cfg = copy.deepcopy(cfg) + override = cp_cfg.pop('override', None) + _initialize(module, cp_cfg) + + if override is not None: + cp_cfg.pop('layer', None) + _initialize_override(module, override, cp_cfg) + else: + # All attributes in module have same initialization. + pass + + +def _no_grad_trunc_normal_(tensor: Tensor, mean: float, std: float, a: float, + b: float) -> Tensor: + # Method based on + # https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf + # Modified from + # https://github.com/pytorch/pytorch/blob/master/torch/nn/init.py + def norm_cdf(x): + # Computes standard normal cumulative distribution function + return (1. + math.erf(x / math.sqrt(2.))) / 2. + + if (mean < a - 2 * std) or (mean > b + 2 * std): + warnings.warn( + 'mean is more than 2 std from [a, b] in nn.init.trunc_normal_. ' + 'The distribution of values may be incorrect.', + stacklevel=2) + + with torch.no_grad(): + # Values are generated by using a truncated uniform distribution and + # then using the inverse CDF for the normal distribution. + # Get upper and lower cdf values + lower = norm_cdf((a - mean) / std) + upper = norm_cdf((b - mean) / std) + + # Uniformly fill tensor with values from [lower, upper], then translate + # to [2lower-1, 2upper-1]. + tensor.uniform_(2 * lower - 1, 2 * upper - 1) + + # Use inverse cdf transform for normal distribution to get truncated + # standard normal + tensor.erfinv_() + + # Transform to proper mean, std + tensor.mul_(std * math.sqrt(2.)) + tensor.add_(mean) + + # Clamp to ensure it's in the proper range + tensor.clamp_(min=a, max=b) + return tensor + + +def trunc_normal_(tensor: Tensor, + mean: float = 0., + std: float = 1., + a: float = -2., + b: float = 2.) -> Tensor: + r"""Fills the input Tensor with values drawn from a truncated + normal distribution. The values are effectively drawn from the + normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` + with values outside :math:`[a, b]` redrawn until they are within + the bounds. The method used for generating the random values works + best when :math:`a \leq \text{mean} \leq b`. + + Modified from + https://github.com/pytorch/pytorch/blob/master/torch/nn/init.py + + Args: + tensor (``torch.Tensor``): an n-dimensional `torch.Tensor`. + mean (float): the mean of the normal distribution. + std (float): the standard deviation of the normal distribution. + a (float): the minimum cutoff value. + b (float): the maximum cutoff value. + """ + return _no_grad_trunc_normal_(tensor, mean, std, a, b) diff --git a/mmcv/models/vad_utils/CD_loss.py b/mmcv/models/vad_utils/CD_loss.py new file mode 100644 index 0000000..193b628 --- /dev/null +++ b/mmcv/models/vad_utils/CD_loss.py @@ -0,0 +1,710 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from torch import nn as nn +from torch.nn.functional import l1_loss, mse_loss, smooth_l1_loss + +from mmcv.models.builder import LOSSES +from mmcv.models.losses.utils import weighted_loss +import torch.nn.functional as F +from mmcv.core.bbox.match_costs.builder import MATCH_COST +import functools + + +def reduce_loss(loss, reduction): + """Reduce loss as specified. + + Args: + loss (Tensor): Elementwise loss tensor. + reduction (str): Options are "none", "mean" and "sum". + + Return: + Tensor: Reduced loss tensor. + """ + reduction_enum = F._Reduction.get_enum(reduction) + # none: 0, elementwise_mean:1, sum: 2 + if reduction_enum == 0: + return loss + elif reduction_enum == 1: + return loss.mean() + elif reduction_enum == 2: + return loss.sum() + +def custom_weight_dir_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): + """Apply element-wise weight and reduce loss. + + Args: + loss (Tensor): num_sample, num_dir + weight (Tensor): Element-wise weights. + reduction (str): Same as built-in losses of PyTorch. + avg_factor (float): Average factor when computing the mean of losses. + + Returns: + Tensor: Processed loss values. + """ + # if weight is specified, apply element-wise weight + if weight is not None: + loss = loss * weight + + # if avg_factor is not specified, just reduce the loss + if avg_factor is None: + raise ValueError('avg_factor should not be none for OrderedPtsL1Loss') + # loss = reduce_loss(loss, reduction) + else: + # if reduction is mean, then average the loss by avg_factor + if reduction == 'mean': + # import pdb;pdb.set_trace() + # loss = loss.permute(1,0,2,3).contiguous() + loss = loss.sum() + loss = loss / avg_factor + # if reduction is 'none', then do nothing, otherwise raise an error + elif reduction != 'none': + raise ValueError('avg_factor can not be used with reduction="sum"') + return loss + +def custom_weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): + """Apply element-wise weight and reduce loss. + + Args: + loss (Tensor): num_sample, num_order, num_pts, num_coords + weight (Tensor): Element-wise weights. + reduction (str): Same as built-in losses of PyTorch. + avg_factor (float): Average factor when computing the mean of losses. + + Returns: + Tensor: Processed loss values. + """ + # if weight is specified, apply element-wise weight + if weight is not None: + loss = loss * weight + + # if avg_factor is not specified, just reduce the loss + if avg_factor is None: + raise ValueError('avg_factor should not be none for OrderedPtsL1Loss') + # loss = reduce_loss(loss, reduction) + else: + # if reduction is mean, then average the loss by avg_factor + if reduction == 'mean': + # import pdb;pdb.set_trace() + loss = loss.permute(1,0,2,3).contiguous() + loss = loss.sum((1,2,3)) + loss = loss / avg_factor + # if reduction is 'none', then do nothing, otherwise raise an error + elif reduction != 'none': + raise ValueError('avg_factor can not be used with reduction="sum"') + return loss + +def custom_weighted_loss(loss_func): + """Create a weighted version of a given loss function. + + To use this decorator, the loss function must have the signature like + `loss_func(pred, target, **kwargs)`. The function only needs to compute + element-wise loss without any reduction. This decorator will add weight + and reduction arguments to the function. The decorated function will have + the signature like `loss_func(pred, target, weight=None, reduction='mean', + avg_factor=None, **kwargs)`. + + :Example: + + >>> import torch + >>> @weighted_loss + >>> def l1_loss(pred, target): + >>> return (pred - target).abs() + + >>> pred = torch.Tensor([0, 2, 3]) + >>> target = torch.Tensor([1, 1, 1]) + >>> weight = torch.Tensor([1, 0, 1]) + + >>> l1_loss(pred, target) + tensor(1.3333) + >>> l1_loss(pred, target, weight) + tensor(1.) + >>> l1_loss(pred, target, reduction='none') + tensor([1., 1., 2.]) + >>> l1_loss(pred, target, weight, avg_factor=2) + tensor(1.5000) + """ + + @functools.wraps(loss_func) + def wrapper(pred, + target, + weight=None, + reduction='mean', + avg_factor=None, + **kwargs): + # get element-wise loss + loss = loss_func(pred, target, **kwargs) + loss = custom_weight_reduce_loss(loss, weight, reduction, avg_factor) + return loss + + return wrapper + + +def custom_weighted_dir_loss(loss_func): + """Create a weighted version of a given loss function. + + To use this decorator, the loss function must have the signature like + `loss_func(pred, target, **kwargs)`. The function only needs to compute + element-wise loss without any reduction. This decorator will add weight + and reduction arguments to the function. The decorated function will have + the signature like `loss_func(pred, target, weight=None, reduction='mean', + avg_factor=None, **kwargs)`. + + :Example: + + >>> import torch + >>> @weighted_loss + >>> def l1_loss(pred, target): + >>> return (pred - target).abs() + + >>> pred = torch.Tensor([0, 2, 3]) + >>> target = torch.Tensor([1, 1, 1]) + >>> weight = torch.Tensor([1, 0, 1]) + + >>> l1_loss(pred, target) + tensor(1.3333) + >>> l1_loss(pred, target, weight) + tensor(1.) + >>> l1_loss(pred, target, reduction='none') + tensor([1., 1., 2.]) + >>> l1_loss(pred, target, weight, avg_factor=2) + tensor(1.5000) + """ + + @functools.wraps(loss_func) + def wrapper(pred, + target, + weight=None, + reduction='mean', + avg_factor=None, + **kwargs): + # get element-wise loss + loss = loss_func(pred, target, **kwargs) + loss = custom_weight_dir_reduce_loss(loss, weight, reduction, avg_factor) + return loss + + return wrapper + +@custom_weighted_loss +def ordered_pts_smooth_l1_loss(pred, target): + """L1 loss. + + Args: + pred (torch.Tensor): shape [num_samples, num_pts, num_coords] + target (torch.Tensor): shape [num_samples, num_order, num_pts, num_coords] + + Returns: + torch.Tensor: Calculated loss + """ + if target.numel() == 0: + return pred.sum() * 0 + pred = pred.unsqueeze(1).repeat(1, target.size(1),1,1) + assert pred.size() == target.size() + loss =smooth_l1_loss(pred,target, reduction='none') + # import pdb;pdb.set_trace() + return loss + +@weighted_loss +def pts_l1_loss(pred, target): + """L1 loss. + + Args: + pred (torch.Tensor): shape [num_samples, num_pts, num_coords] + target (torch.Tensor): shape [num_samples, num_pts, num_coords] + + Returns: + torch.Tensor: Calculated loss + """ + if target.numel() == 0: + return pred.sum() * 0 + assert pred.size() == target.size() + loss = torch.abs(pred - target) + return loss + +@custom_weighted_loss +def ordered_pts_l1_loss(pred, target): + """L1 loss. + + Args: + pred (torch.Tensor): shape [num_samples, num_pts, num_coords] + target (torch.Tensor): shape [num_samples, num_order, num_pts, num_coords] + + Returns: + torch.Tensor: Calculated loss + """ + if target.numel() == 0: + return pred.sum() * 0 + pred = pred.unsqueeze(1).repeat(1, target.size(1),1,1) + assert pred.size() == target.size() + loss = torch.abs(pred - target) + return loss + +@custom_weighted_dir_loss +def pts_dir_cos_loss(pred, target): + """ Dir cosine similiarity loss + pred (torch.Tensor): shape [num_samples, num_dir, num_coords] + target (torch.Tensor): shape [num_samples, num_dir, num_coords] + + """ + if target.numel() == 0: + return pred.sum() * 0 + # import pdb;pdb.set_trace() + num_samples, num_dir, num_coords = pred.shape + loss_func = torch.nn.CosineEmbeddingLoss(reduction='none') + tgt_param = target.new_ones((num_samples, num_dir)) + tgt_param = tgt_param.flatten(0) + loss = loss_func(pred.flatten(0,1), target.flatten(0,1), tgt_param) + loss = loss.view(num_samples, num_dir) + return loss + +@LOSSES.register_module() +class OrderedPtsSmoothL1Loss(nn.Module): + """L1 loss. + + Args: + reduction (str, optional): The method to reduce the loss. + Options are "none", "mean" and "sum". + loss_weight (float, optional): The weight of loss. + """ + + def __init__(self, reduction='mean', loss_weight=1.0): + super(OrderedPtsSmoothL1Loss, self).__init__() + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None): + """Forward function. + + Args: + pred (torch.Tensor): The prediction. + target (torch.Tensor): The learning target of the prediction. + weight (torch.Tensor, optional): The weight of loss for each + prediction. Defaults to None. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + reduction_override (str, optional): The reduction method used to + override the original reduction method of the loss. + Defaults to None. + """ + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + # import pdb;pdb.set_trace() + loss_bbox = self.loss_weight * ordered_pts_smooth_l1_loss( + pred, target, weight, reduction=reduction, avg_factor=avg_factor) + return loss_bbox + + +@LOSSES.register_module() +class PtsDirCosLoss(nn.Module): + """L1 loss. + + Args: + reduction (str, optional): The method to reduce the loss. + Options are "none", "mean" and "sum". + loss_weight (float, optional): The weight of loss. + """ + + def __init__(self, reduction='mean', loss_weight=1.0): + super(PtsDirCosLoss, self).__init__() + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None): + """Forward function. + + Args: + pred (torch.Tensor): The prediction. + target (torch.Tensor): The learning target of the prediction. + weight (torch.Tensor, optional): The weight of loss for each + prediction. Defaults to None. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + reduction_override (str, optional): The reduction method used to + override the original reduction method of the loss. + Defaults to None. + """ + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + # import pdb;pdb.set_trace() + loss_dir = self.loss_weight * pts_dir_cos_loss( + pred, target, weight, reduction=reduction, avg_factor=avg_factor) + return loss_dir + + + +@LOSSES.register_module() +class PtsL1Loss(nn.Module): + """L1 loss. + + Args: + reduction (str, optional): The method to reduce the loss. + Options are "none", "mean" and "sum". + loss_weight (float, optional): The weight of loss. + """ + + def __init__(self, reduction='mean', loss_weight=1.0): + super(PtsL1Loss, self).__init__() + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None): + """Forward function. + + Args: + pred (torch.Tensor): The prediction. + target (torch.Tensor): The learning target of the prediction. + weight (torch.Tensor, optional): The weight of loss for each + prediction. Defaults to None. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + reduction_override (str, optional): The reduction method used to + override the original reduction method of the loss. + Defaults to None. + """ + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + # import pdb;pdb.set_trace() + loss_bbox = self.loss_weight * pts_l1_loss( + pred, target, weight, reduction=reduction, avg_factor=avg_factor) + return loss_bbox + +@LOSSES.register_module() +class OrderedPtsL1Loss(nn.Module): + """L1 loss. + + Args: + reduction (str, optional): The method to reduce the loss. + Options are "none", "mean" and "sum". + loss_weight (float, optional): The weight of loss. + """ + + def __init__(self, reduction='mean', loss_weight=1.0): + super(OrderedPtsL1Loss, self).__init__() + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None): + """Forward function. + + Args: + pred (torch.Tensor): The prediction. + target (torch.Tensor): The learning target of the prediction. + weight (torch.Tensor, optional): The weight of loss for each + prediction. Defaults to None. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + reduction_override (str, optional): The reduction method used to + override the original reduction method of the loss. + Defaults to None. + """ + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + # import pdb;pdb.set_trace() + loss_bbox = self.loss_weight * ordered_pts_l1_loss( + pred, target, weight, reduction=reduction, avg_factor=avg_factor) + return loss_bbox + + + + +@MATCH_COST.register_module() +class OrderedPtsSmoothL1Cost(object): + """OrderedPtsL1Cost. + Args: + weight (int | float, optional): loss_weight + """ + + def __init__(self, weight=1.): + self.weight = weight + + def __call__(self, bbox_pred, gt_bboxes): + """ + Args: + bbox_pred (Tensor): Predicted boxes with normalized coordinates + (x, y), which are all in range [0, 1]. Shape + [num_query, num_pts, 2]. + gt_bboxes (Tensor): Ground truth boxes with normalized + coordinates (x,y). + Shape [num_gt, num_ordered, num_pts, 2]. + Returns: + torch.Tensor: bbox_cost value with weight + """ + num_gts, num_orders, num_pts, num_coords = gt_bboxes.shape + # import pdb;pdb.set_trace() + bbox_pred = bbox_pred.view(bbox_pred.size(0),-1).unsqueeze(1).repeat(1,num_gts*num_orders,1) + gt_bboxes = gt_bboxes.flatten(2).view(num_gts*num_orders,-1).unsqueeze(0).repeat(bbox_pred.size(0),1,1) + # import pdb;pdb.set_trace() + bbox_cost = smooth_l1_loss(bbox_pred, gt_bboxes, reduction='none').sum(-1) + # bbox_cost = torch.cdist(bbox_pred, gt_bboxes, p=1) + return bbox_cost * self.weight + +@MATCH_COST.register_module() +class PtsL1Cost(object): + """OrderedPtsL1Cost. + Args: + weight (int | float, optional): loss_weight + """ + + def __init__(self, weight=1.): + self.weight = weight + + def __call__(self, bbox_pred, gt_bboxes): + """ + Args: + bbox_pred (Tensor): Predicted boxes with normalized coordinates + (x, y), which are all in range [0, 1]. Shape + [num_query, num_pts, 2]. + gt_bboxes (Tensor): Ground truth boxes with normalized + coordinates (x,y). + Shape [num_gt, num_ordered, num_pts, 2]. + Returns: + torch.Tensor: bbox_cost value with weight + """ + num_gts, num_pts, num_coords = gt_bboxes.shape + # import pdb;pdb.set_trace() + bbox_pred = bbox_pred.view(bbox_pred.size(0),-1) + gt_bboxes = gt_bboxes.view(num_gts,-1) + bbox_cost = torch.cdist(bbox_pred, gt_bboxes, p=1) + return bbox_cost * self.weight + +@MATCH_COST.register_module() +class OrderedPtsL1Cost(object): + """OrderedPtsL1Cost. + Args: + weight (int | float, optional): loss_weight + """ + + def __init__(self, weight=1.): + self.weight = weight + + def __call__(self, bbox_pred, gt_bboxes): + """ + Args: + bbox_pred (Tensor): Predicted boxes with normalized coordinates + (x, y), which are all in range [0, 1]. Shape + [num_query, num_pts, 2]. + gt_bboxes (Tensor): Ground truth boxes with normalized + coordinates (x,y). + Shape [num_gt, num_ordered, num_pts, 2]. + Returns: + torch.Tensor: bbox_cost value with weight + """ + num_gts, num_orders, num_pts, num_coords = gt_bboxes.shape + # import pdb;pdb.set_trace() + bbox_pred = bbox_pred.view(bbox_pred.size(0),-1) + gt_bboxes = gt_bboxes.flatten(2).view(num_gts*num_orders,-1) + bbox_cost = torch.cdist(bbox_pred, gt_bboxes, p=1) + return bbox_cost * self.weight + +@MATCH_COST.register_module() +class MyChamferDistanceCost: + def __init__(self, loss_src_weight=1., loss_dst_weight=1.): + # assert mode in ['smooth_l1', 'l1', 'l2'] + # self.mode = mode + self.loss_src_weight = loss_src_weight + self.loss_dst_weight = loss_dst_weight + + def __call__(self, src, dst,src_weight=1.0,dst_weight=1.0,): + """ + pred_pts (Tensor): normed coordinate(x,y), shape (num_q, num_pts_M, 2) + gt_pts (Tensor): normed coordinate(x,y), shape (num_gt, num_pts_N, 2) + """ + # criterion_mode = self.mode + # if criterion_mode == 'smooth_l1': + # criterion = smooth_l1_loss + # elif criterion_mode == 'l1': + # criterion = l1_loss + # elif criterion_mode == 'l2': + # criterion = mse_loss + # else: + # raise NotImplementedError + # import pdb;pdb.set_trace() + src_expand = src.unsqueeze(1).repeat(1,dst.shape[0],1,1) + dst_expand = dst.unsqueeze(0).repeat(src.shape[0],1,1,1) + # src_expand = src.unsqueeze(2).unsqueeze(1).repeat(1,dst.shape[0], 1, dst.shape[1], 1) + # dst_expand = dst.unsqueeze(1).unsqueeze(0).repeat(src.shape[0],1, src.shape[1], 1, 1) + distance = torch.cdist(src_expand, dst_expand) + src2dst_distance = torch.min(distance, dim=3)[0] # (num_q, num_gt, num_pts_N) + dst2src_distance = torch.min(distance, dim=2)[0] # (num_q, num_gt, num_pts_M) + loss_src = (src2dst_distance * src_weight).mean(-1) + loss_dst = (dst2src_distance * dst_weight).mean(-1) + loss = loss_src*self.loss_src_weight + loss_dst * self.loss_dst_weight + return loss + +def chamfer_distance(src, + dst, + src_weight=1.0, + dst_weight=1.0, + # criterion_mode='l1', + reduction='mean', + avg_factor=None): + """Calculate Chamfer Distance of two sets. + + Args: + src (torch.Tensor): Source set with shape [B, N, C] to + calculate Chamfer Distance. + dst (torch.Tensor): Destination set with shape [B, M, C] to + calculate Chamfer Distance. + src_weight (torch.Tensor or float): Weight of source loss. + dst_weight (torch.Tensor or float): Weight of destination loss. + criterion_mode (str): Criterion mode to calculate distance. + The valid modes are smooth_l1, l1 or l2. + reduction (str): Method to reduce losses. + The valid reduction method are 'none', 'sum' or 'mean'. + + Returns: + tuple: Source and Destination loss with the corresponding indices. + + - loss_src (torch.Tensor): The min distance \ + from source to destination. + - loss_dst (torch.Tensor): The min distance \ + from destination to source. + - indices1 (torch.Tensor): Index the min distance point \ + for each point in source to destination. + - indices2 (torch.Tensor): Index the min distance point \ + for each point in destination to source. + """ + + # if criterion_mode == 'smooth_l1': + # criterion = smooth_l1_loss + # elif criterion_mode == 'l1': + # criterion = l1_loss + # elif criterion_mode == 'l2': + # criterion = mse_loss + # else: + # raise NotImplementedError + + # src_expand = src.unsqueeze(2).repeat(1, 1, dst.shape[1], 1) + # dst_expand = dst.unsqueeze(1).repeat(1, src.shape[1], 1, 1) + # import pdb;pdb.set_trace() + distance = torch.cdist(src, dst) + src2dst_distance, indices1 = torch.min(distance, dim=2) # (B,N) + dst2src_distance, indices2 = torch.min(distance, dim=1) # (B,M) + # import pdb;pdb.set_trace() + #TODO this may be wrong for misaligned src_weight, now[N,fixed_num] + # should be [N], then view + loss_src = (src2dst_distance * src_weight) + loss_dst = (dst2src_distance * dst_weight) + if avg_factor is None: + reduction_enum = F._Reduction.get_enum(reduction) + if reduction_enum == 0: + raise ValueError('MyCDLoss can not be used with reduction=`none`') + elif reduction_enum == 1: + loss_src = loss_src.mean(-1).mean() + loss_dst = loss_dst.mean(-1).mean() + elif reduction_enum == 2: + loss_src = loss_src.mean(-1).sum() + loss_dst = loss_dst.mean(-1).sum() + else: + raise NotImplementedError + else: + if reduction == 'mean': + eps = torch.finfo(torch.float32).eps + loss_src = loss_src.mean(-1).sum() / (avg_factor + eps) + loss_dst = loss_dst.mean(-1).sum() / (avg_factor + eps) + elif reduction != 'none': + raise ValueError('avg_factor can not be used with reduction="sum"') + + return loss_src, loss_dst, indices1, indices2 + + +@LOSSES.register_module() +class MyChamferDistance(nn.Module): + """Calculate Chamfer Distance of two sets. + + Args: + mode (str): Criterion mode to calculate distance. + The valid modes are smooth_l1, l1 or l2. + reduction (str): Method to reduce losses. + The valid reduction method are none, sum or mean. + loss_src_weight (float): Weight of loss_source. + loss_dst_weight (float): Weight of loss_target. + """ + + def __init__(self, + # mode='l1', + reduction='mean', + loss_src_weight=1.0, + loss_dst_weight=1.0): + super(MyChamferDistance, self).__init__() + + # assert mode in ['smooth_l1', 'l1', 'l2'] + assert reduction in ['none', 'sum', 'mean'] + # self.mode = mode + self.reduction = reduction + self.loss_src_weight = loss_src_weight + self.loss_dst_weight = loss_dst_weight + + def forward(self, + source, + target, + src_weight=1.0, + dst_weight=1.0, + avg_factor=None, + reduction_override=None, + return_indices=False, + **kwargs): + """Forward function of loss calculation. + + Args: + source (torch.Tensor): Source set with shape [B, N, C] to + calculate Chamfer Distance. + target (torch.Tensor): Destination set with shape [B, M, C] to + calculate Chamfer Distance. + src_weight (torch.Tensor | float, optional): + Weight of source loss. Defaults to 1.0. + dst_weight (torch.Tensor | float, optional): + Weight of destination loss. Defaults to 1.0. + reduction_override (str, optional): Method to reduce losses. + The valid reduction method are 'none', 'sum' or 'mean'. + Defaults to None. + return_indices (bool, optional): Whether to return indices. + Defaults to False. + + Returns: + tuple[torch.Tensor]: If ``return_indices=True``, return losses of \ + source and target with their corresponding indices in the \ + order of ``(loss_source, loss_target, indices1, indices2)``. \ + If ``return_indices=False``, return \ + ``(loss_source, loss_target)``. + """ + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + + loss_source, loss_target, indices1, indices2 = chamfer_distance( + source, target, src_weight, dst_weight, reduction, + avg_factor=avg_factor) + + loss_source *= self.loss_src_weight + loss_target *= self.loss_dst_weight + + loss_pts = loss_source + loss_target + + if return_indices: + return loss_pts, indices1, indices2 + else: + return loss_pts diff --git a/mmcv/models/vad_utils/__init__.py b/mmcv/models/vad_utils/__init__.py new file mode 100644 index 0000000..126bf07 --- /dev/null +++ b/mmcv/models/vad_utils/__init__.py @@ -0,0 +1,7 @@ +from .map_utils import normalize_2d_bbox, normalize_2d_pts, denormalize_2d_bbox, denormalize_2d_pts +from .CD_loss import ( + MyChamferDistance, MyChamferDistanceCost, + OrderedPtsL1Cost, PtsL1Cost, OrderedPtsSmoothL1Cost, + OrderedPtsL1Loss, PtsL1Loss, PtsDirCosLoss +) +from .plan_loss import PlanMapBoundLoss, PlanCollisionLoss, PlanMapDirectionLoss \ No newline at end of file diff --git a/mmcv/models/vad_utils/map_utils.py b/mmcv/models/vad_utils/map_utils.py new file mode 100644 index 0000000..0b3f6b3 --- /dev/null +++ b/mmcv/models/vad_utils/map_utils.py @@ -0,0 +1,41 @@ +from mmcv.core.bbox.transforms import bbox_xyxy_to_cxcywh, bbox_cxcywh_to_xyxy + +def normalize_2d_bbox(bboxes, pc_range): + + patch_h = pc_range[4]-pc_range[1] + patch_w = pc_range[3]-pc_range[0] + cxcywh_bboxes = bbox_xyxy_to_cxcywh(bboxes) + cxcywh_bboxes[...,0:1] = cxcywh_bboxes[..., 0:1] - pc_range[0] + cxcywh_bboxes[...,1:2] = cxcywh_bboxes[...,1:2] - pc_range[1] + factor = bboxes.new_tensor([patch_w, patch_h,patch_w,patch_h]) + + normalized_bboxes = cxcywh_bboxes / factor + return normalized_bboxes + +def normalize_2d_pts(pts, pc_range): + patch_h = pc_range[4]-pc_range[1] + patch_w = pc_range[3]-pc_range[0] + new_pts = pts.clone() + new_pts[...,0:1] = pts[..., 0:1] - pc_range[0] + new_pts[...,1:2] = pts[...,1:2] - pc_range[1] + factor = pts.new_tensor([patch_w, patch_h]) + normalized_pts = new_pts / factor + return normalized_pts + +def denormalize_2d_bbox(bboxes, pc_range): + + bboxes = bbox_cxcywh_to_xyxy(bboxes) + bboxes[..., 0::2] = (bboxes[..., 0::2]*(pc_range[3] - + pc_range[0]) + pc_range[0]) + bboxes[..., 1::2] = (bboxes[..., 1::2]*(pc_range[4] - + pc_range[1]) + pc_range[1]) + + return bboxes + +def denormalize_2d_pts(pts, pc_range): + new_pts = pts.clone() + new_pts[...,0:1] = (pts[..., 0:1]*(pc_range[3] - + pc_range[0]) + pc_range[0]) + new_pts[...,1:2] = (pts[...,1:2]*(pc_range[4] - + pc_range[1]) + pc_range[1]) + return new_pts \ No newline at end of file diff --git a/mmcv/models/vad_utils/plan_loss.py b/mmcv/models/vad_utils/plan_loss.py new file mode 100644 index 0000000..7792da8 --- /dev/null +++ b/mmcv/models/vad_utils/plan_loss.py @@ -0,0 +1,440 @@ +import math +import torch +from torch import nn as nn +from mmcv.models.losses.utils import weighted_loss +from mmcv.models.builder import LOSSES + + +@LOSSES.register_module() +class PlanMapBoundLoss(nn.Module): + """Planning constraint to push ego vehicle away from the lane boundary. + + Args: + reduction (str, optional): The method to reduce the loss. + Options are "none", "mean" and "sum". + loss_weight (float, optional): The weight of loss. + map_thresh (float, optional): confidence threshold to filter map predictions. + lane_bound_cls_idx (float, optional): lane_boundary class index. + dis_thresh (float, optional): distance threshold between ego vehicle and lane bound. + point_cloud_range (list, optional): point cloud range. + """ + + def __init__( + self, + reduction='mean', + loss_weight=1.0, + map_thresh=0.5, + lane_bound_cls_idx=2, + dis_thresh=1.0, + point_cloud_range=[-15.0, -30.0, -2.0, 15.0, 30.0, 2.0], + perception_detach=False + ): + super(PlanMapBoundLoss, self).__init__() + self.reduction = reduction + self.loss_weight = loss_weight + self.map_thresh = map_thresh + self.lane_bound_cls_idx = lane_bound_cls_idx + self.dis_thresh = dis_thresh + self.pc_range = point_cloud_range + self.perception_detach = perception_detach + + def forward(self, + ego_fut_preds, + lane_preds, + lane_score_preds, + weight=None, + avg_factor=None, + reduction_override=None): + """Forward function. + + Args: + ego_fut_preds (Tensor): [B, fut_ts, 2] + lane_preds (Tensor): [B, num_vec, num_pts, 2] + lane_score_preds (Tensor): [B, num_vec, 3] + weight (torch.Tensor, optional): The weight of loss for each + prediction. Defaults to None. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + reduction_override (str, optional): The reduction method used to + override the original reduction method of the loss. + Defaults to None. + """ + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + + if self.perception_detach: + lane_preds = lane_preds.detach() + lane_score_preds = lane_score_preds.detach() + + # filter lane element according to confidence score and class + not_lane_bound_mask = lane_score_preds[..., self.lane_bound_cls_idx] < self.map_thresh + # denormalize map pts + lane_bound_preds = lane_preds.clone() + lane_bound_preds[...,0:1] = (lane_bound_preds[..., 0:1] * (self.pc_range[3] - + self.pc_range[0]) + self.pc_range[0]) + lane_bound_preds[...,1:2] = (lane_bound_preds[..., 1:2] * (self.pc_range[4] - + self.pc_range[1]) + self.pc_range[1]) + # pad not-lane-boundary cls and low confidence preds + lane_bound_preds[not_lane_bound_mask] = 1e6 + + loss_bbox = self.loss_weight * plan_map_bound_loss(ego_fut_preds, lane_bound_preds, + weight=weight, dis_thresh=self.dis_thresh, + reduction=reduction, avg_factor=avg_factor) + return loss_bbox + +@weighted_loss +def plan_map_bound_loss(pred, target, dis_thresh=1.0): + """Planning map bound constraint (L1 distance). + + Args: + pred (torch.Tensor): ego_fut_preds, [B, fut_ts, 2]. + target (torch.Tensor): lane_bound_preds, [B, num_vec, num_pts, 2]. + weight (torch.Tensor): [B, fut_ts] + + Returns: + torch.Tensor: Calculated loss [B, fut_ts] + """ + pred = pred.cumsum(dim=-2) + ego_traj_starts = pred[:, :-1, :] + ego_traj_ends = pred + B, T, _ = ego_traj_ends.size() + padding_zeros = torch.zeros((B, 1, 2), dtype=pred.dtype, device=pred.device) # initial position + ego_traj_starts = torch.cat((padding_zeros, ego_traj_starts), dim=1) + _, V, P, _ = target.size() + ego_traj_expanded = ego_traj_ends.unsqueeze(2).unsqueeze(3) # [B, T, 1, 1, 2] + maps_expanded = target.unsqueeze(1) # [1, 1, M, P, 2] + dist = torch.linalg.norm(ego_traj_expanded - maps_expanded, dim=-1) # [B, T, M, P] + dist = dist.min(dim=-1, keepdim=False)[0] + min_inst_idxs = torch.argmin(dist, dim=-1).tolist() + batch_idxs = [[i] for i in range(dist.shape[0])] + ts_idxs = [[i for i in range(dist.shape[1])] for j in range(dist.shape[0])] + bd_target = target.unsqueeze(1).repeat(1, pred.shape[1], 1, 1, 1) + min_bd_insts = bd_target[batch_idxs, ts_idxs, min_inst_idxs] # [B, T, P, 2] + bd_inst_starts = min_bd_insts[:, :, :-1, :].flatten(0, 2) + bd_inst_ends = min_bd_insts[:, :, 1:, :].flatten(0, 2) + ego_traj_starts = ego_traj_starts.unsqueeze(2).repeat(1, 1, P-1, 1).flatten(0, 2) + ego_traj_ends = ego_traj_ends.unsqueeze(2).repeat(1, 1, P-1, 1).flatten(0, 2) + + intersect_mask = segments_intersect(ego_traj_starts, ego_traj_ends, + bd_inst_starts, bd_inst_ends) + intersect_mask = intersect_mask.reshape(B, T, P-1) + intersect_mask = intersect_mask.any(dim=-1) + intersect_idx = (intersect_mask == True).nonzero() + + target = target.view(target.shape[0], -1, target.shape[-1]) + # [B, fut_ts, num_vec*num_pts] + dist = torch.linalg.norm(pred[:, :, None, :] - target[:, None, :, :], dim=-1) + min_idxs = torch.argmin(dist, dim=-1).tolist() + batch_idxs = [[i] for i in range(dist.shape[0])] + ts_idxs = [[i for i in range(dist.shape[1])] for j in range(dist.shape[0])] + min_dist = dist[batch_idxs, ts_idxs, min_idxs] + loss = min_dist + safe_idx = loss > dis_thresh + unsafe_idx = loss <= dis_thresh + loss[safe_idx] = 0 + loss[unsafe_idx] = dis_thresh - loss[unsafe_idx] + + for i in range(len(intersect_idx)): + loss[intersect_idx[i, 0], intersect_idx[i, 1]:] = 0 + + return loss + + +def segments_intersect(line1_start, line1_end, line2_start, line2_end): + # Calculating the differences + dx1 = line1_end[:, 0] - line1_start[:, 0] + dy1 = line1_end[:, 1] - line1_start[:, 1] + dx2 = line2_end[:, 0] - line2_start[:, 0] + dy2 = line2_end[:, 1] - line2_start[:, 1] + + # Calculating determinants + det = dx1 * dy2 - dx2 * dy1 + det_mask = det != 0 + + # Checking if lines are parallel or coincident + parallel_mask = torch.logical_not(det_mask) + + # Calculating intersection parameters + t1 = ((line2_start[:, 0] - line1_start[:, 0]) * dy2 + - (line2_start[:, 1] - line1_start[:, 1]) * dx2) / det + t2 = ((line2_start[:, 0] - line1_start[:, 0]) * dy1 + - (line2_start[:, 1] - line1_start[:, 1]) * dx1) / det + + # Checking intersection conditions + intersect_mask = torch.logical_and( + torch.logical_and(t1 >= 0, t1 <= 1), + torch.logical_and(t2 >= 0, t2 <= 1) + ) + + # Handling parallel or coincident lines + intersect_mask[parallel_mask] = False + + return intersect_mask + + +@LOSSES.register_module() +class PlanCollisionLoss(nn.Module): + """Planning constraint to push ego vehicle away from other agents. + + Args: + reduction (str, optional): The method to reduce the loss. + Options are "none", "mean" and "sum". + loss_weight (float, optional): The weight of loss. + agent_thresh (float, optional): confidence threshold to filter agent predictions. + x_dis_thresh (float, optional): distance threshold between ego and other agents in x-axis. + y_dis_thresh (float, optional): distance threshold between ego and other agents in y-axis. + point_cloud_range (list, optional): point cloud range. + """ + + def __init__( + self, + reduction='mean', + loss_weight=1.0, + agent_thresh=0.5, + x_dis_thresh=1.5, + y_dis_thresh=3.0, + point_cloud_range = [-15.0, -30.0, -2.0, 15.0, 30.0, 2.0] + ): + super(PlanCollisionLoss, self).__init__() + self.reduction = reduction + self.loss_weight = loss_weight + self.agent_thresh = agent_thresh + self.x_dis_thresh = x_dis_thresh + self.y_dis_thresh = y_dis_thresh + self.pc_range = point_cloud_range + + def forward(self, + ego_fut_preds, + agent_preds, + agent_fut_preds, + agent_score_preds, + agent_fut_cls_preds, + weight=None, + avg_factor=None, + reduction_override=None): + """Forward function. + + Args: + ego_fut_preds (Tensor): [B, fut_ts, 2] + agent_preds (Tensor): [B, num_agent, 2] + agent_fut_preds (Tensor): [B, num_agent, fut_mode, fut_ts, 2] + agent_fut_cls_preds (Tensor): [B, num_agent, fut_mode] + agent_score_preds (Tensor): [B, num_agent, 10] + weight (torch.Tensor, optional): The weight of loss for each + prediction. Defaults to None. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + reduction_override (str, optional): The reduction method used to + override the original reduction method of the loss. + Defaults to None. + """ + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + + # filter agent element according to confidence score + agent_max_score_preds, agent_max_score_idxs = agent_score_preds.max(dim=-1) + not_valid_agent_mask = agent_max_score_preds < self.agent_thresh + # filter low confidence preds + agent_fut_preds[not_valid_agent_mask] = 1e6 + # filter not vehicle preds + not_veh_pred_mask = agent_max_score_idxs > 4 # veh idxs are 0-4 + agent_fut_preds[not_veh_pred_mask] = 1e6 + # only use best mode pred + best_mode_idxs = torch.argmax(agent_fut_cls_preds, dim=-1).tolist() + batch_idxs = [[i] for i in range(agent_fut_cls_preds.shape[0])] + agent_num_idxs = [[i for i in range(agent_fut_cls_preds.shape[1])] for j in range(agent_fut_cls_preds.shape[0])] + agent_fut_preds = agent_fut_preds[batch_idxs, agent_num_idxs, best_mode_idxs] + + loss_bbox = self.loss_weight * plan_col_loss(ego_fut_preds, agent_preds, + agent_fut_preds=agent_fut_preds, weight=weight, + x_dis_thresh=self.x_dis_thresh, + y_dis_thresh=self.y_dis_thresh, + reduction=reduction, avg_factor=avg_factor) + return loss_bbox + +@weighted_loss +def plan_col_loss( + pred, + target, + agent_fut_preds, + x_dis_thresh=1.5, + y_dis_thresh=3.0, + dis_thresh=3.0 +): + """Planning ego-agent collsion constraint. + + Args: + pred (torch.Tensor): ego_fut_preds, [B, fut_ts, 2]. + target (torch.Tensor): agent_preds, [B, num_agent, 2]. + agent_fut_preds (Tensor): [B, num_agent, fut_ts, 2]. + weight (torch.Tensor): [B, fut_ts, 2]. + x_dis_thresh (float, optional): distance threshold between ego and other agents in x-axis. + y_dis_thresh (float, optional): distance threshold between ego and other agents in y-axis. + dis_thresh (float, optional): distance threshold to filter distant agents. + + Returns: + torch.Tensor: Calculated loss [B, fut_mode, fut_ts, 2] + """ + pred = pred.cumsum(dim=-2) + agent_fut_preds = agent_fut_preds.cumsum(dim=-2) + target = target[:, :, None, :] + agent_fut_preds + # filter distant agents from ego vehicle + dist = torch.linalg.norm(pred[:, None, :, :] - target, dim=-1) + dist_mask = dist > dis_thresh + target[dist_mask] = 1e6 + + # [B, num_agent, fut_ts] + x_dist = torch.abs(pred[:, None, :, 0] - target[..., 0]) + y_dist = torch.abs(pred[:, None, :, 1] - target[..., 1]) + x_min_idxs = torch.argmin(x_dist, dim=1).tolist() + y_min_idxs = torch.argmin(y_dist, dim=1).tolist() + batch_idxs = [[i] for i in range(y_dist.shape[0])] + ts_idxs = [[i for i in range(y_dist.shape[-1])] for j in range(y_dist.shape[0])] + + # [B, fut_ts] + x_min_dist = x_dist[batch_idxs, x_min_idxs, ts_idxs] + y_min_dist = y_dist[batch_idxs, y_min_idxs, ts_idxs] + x_loss = x_min_dist + safe_idx = x_loss > x_dis_thresh + unsafe_idx = x_loss <= x_dis_thresh + x_loss[safe_idx] = 0 + x_loss[unsafe_idx] = x_dis_thresh - x_loss[unsafe_idx] + y_loss = y_min_dist + safe_idx = y_loss > y_dis_thresh + unsafe_idx = y_loss <= y_dis_thresh + y_loss[safe_idx] = 0 + y_loss[unsafe_idx] = y_dis_thresh - y_loss[unsafe_idx] + loss = torch.cat([x_loss.unsqueeze(-1), y_loss.unsqueeze(-1)], dim=-1) + + return loss + + +@LOSSES.register_module() +class PlanMapDirectionLoss(nn.Module): + """Planning loss to force the ego heading angle consistent with lane direction. + + Args: + reduction (str, optional): The method to reduce the loss. + Options are "none", "mean" and "sum". + loss_weight (float, optional): The weight of loss. + theta_thresh (float, optional): angle diff thresh between ego and lane. + point_cloud_range (list, optional): point cloud range. + """ + + def __init__( + self, + reduction='mean', + loss_weight=1.0, + map_thresh=0.5, + dis_thresh=2.0, + lane_div_cls_idx=0, + point_cloud_range = [-15.0, -30.0, -2.0, 15.0, 30.0, 2.0] + ): + super(PlanMapDirectionLoss, self).__init__() + self.reduction = reduction + self.loss_weight = loss_weight + self.map_thresh = map_thresh + self.dis_thresh = dis_thresh + self.lane_div_cls_idx = lane_div_cls_idx + self.pc_range = point_cloud_range + + def forward(self, + ego_fut_preds, + lane_preds, + lane_score_preds, + weight=None, + avg_factor=None, + reduction_override=None): + """Forward function. + + Args: + ego_fut_preds (Tensor): [B, fut_ts, 2] + lane_preds (Tensor): [B, num_vec, num_pts, 2] + lane_score_preds (Tensor): [B, num_vec, 3] + weight (torch.Tensor, optional): The weight of loss for each + prediction. Defaults to None. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + reduction_override (str, optional): The reduction method used to + override the original reduction method of the loss. + Defaults to None. + """ + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + + # filter lane element according to confidence score and class + not_lane_div_mask = lane_score_preds[..., self.lane_div_cls_idx] < self.map_thresh + # denormalize map pts + lane_div_preds = lane_preds.clone() + lane_div_preds[...,0:1] = (lane_div_preds[..., 0:1] * (self.pc_range[3] - + self.pc_range[0]) + self.pc_range[0]) + lane_div_preds[...,1:2] = (lane_div_preds[..., 1:2] * (self.pc_range[4] - + self.pc_range[1]) + self.pc_range[1]) + # pad not-lane-divider cls and low confidence preds + lane_div_preds[not_lane_div_mask] = 1e6 + + loss_bbox = self.loss_weight * plan_map_dir_loss(ego_fut_preds, lane_div_preds, + weight=weight, dis_thresh=self.dis_thresh, + reduction=reduction, avg_factor=avg_factor) + return loss_bbox + +@weighted_loss +def plan_map_dir_loss(pred, target, dis_thresh=2.0): + """Planning ego-map directional loss. + + Args: + pred (torch.Tensor): ego_fut_preds, [B, fut_ts, 2]. + target (torch.Tensor): lane_div_preds, [B, num_vec, num_pts, 2]. + weight (torch.Tensor): [B, fut_ts] + + Returns: + torch.Tensor: Calculated loss [B, fut_ts] + """ + num_map_pts = target.shape[2] + pred = pred.cumsum(dim=-2) + traj_dis = torch.linalg.norm(pred[:, -1, :] - pred[:, 0, :], dim=-1) + static_mask = traj_dis < 1.0 + target = target.unsqueeze(1).repeat(1, pred.shape[1], 1, 1, 1) + + # find the closest map instance for ego at each timestamp + dist = torch.linalg.norm(pred[:, :, None, None, :] - target, dim=-1) + dist = dist.min(dim=-1, keepdim=False)[0] + min_inst_idxs = torch.argmin(dist, dim=-1).tolist() + batch_idxs = [[i] for i in range(dist.shape[0])] + ts_idxs = [[i for i in range(dist.shape[1])] for j in range(dist.shape[0])] + target_map_inst = target[batch_idxs, ts_idxs, min_inst_idxs] # [B, fut_ts, num_pts, 2] + + # calculate distance + dist = torch.linalg.norm(pred[:, :, None, :] - target_map_inst, dim=-1) + min_pts_idxs = torch.argmin(dist, dim=-1) + min_pts_next_idxs = min_pts_idxs.clone() + is_end_point = (min_pts_next_idxs == num_map_pts-1) + not_end_point = (min_pts_next_idxs != num_map_pts-1) + min_pts_next_idxs[is_end_point] = num_map_pts - 2 + min_pts_next_idxs[not_end_point] = min_pts_next_idxs[not_end_point] + 1 + min_pts_idxs = min_pts_idxs.tolist() + min_pts_next_idxs = min_pts_next_idxs.tolist() + traj_yaw = torch.atan2(torch.diff(pred[..., 1]), torch.diff(pred[..., 0])) # [B, fut_ts-1] + # last ts yaw assume same as previous + traj_yaw = torch.cat([traj_yaw, traj_yaw[:, [-1]]], dim=-1) # [B, fut_ts] + min_pts = target_map_inst[batch_idxs, ts_idxs, min_pts_idxs] + dist = torch.linalg.norm(min_pts - pred, dim=-1) + dist_mask = dist > dis_thresh + min_pts = min_pts.unsqueeze(2) + min_pts_next = target_map_inst[batch_idxs, ts_idxs, min_pts_next_idxs].unsqueeze(2) + map_pts = torch.cat([min_pts, min_pts_next], dim=2) + lane_yaw = torch.atan2(torch.diff(map_pts[..., 1]).squeeze(-1), torch.diff(map_pts[..., 0]).squeeze(-1)) # [B, fut_ts] + yaw_diff = traj_yaw - lane_yaw + yaw_diff[yaw_diff > math.pi] = yaw_diff[yaw_diff > math.pi] - math.pi + yaw_diff[yaw_diff > math.pi/2] = yaw_diff[yaw_diff > math.pi/2] - math.pi + yaw_diff[yaw_diff < -math.pi] = yaw_diff[yaw_diff < -math.pi] + math.pi + yaw_diff[yaw_diff < -math.pi/2] = yaw_diff[yaw_diff < -math.pi/2] + math.pi + yaw_diff[dist_mask] = 0 # loss = 0 if no lane around ego + yaw_diff[static_mask] = 0 # loss = 0 if ego is static + + loss = torch.abs(yaw_diff) + + return loss # [B, fut_ts] diff --git a/mmcv/models/vad_utils/traj_lr_warmup.py b/mmcv/models/vad_utils/traj_lr_warmup.py new file mode 100644 index 0000000..0b5ba1b --- /dev/null +++ b/mmcv/models/vad_utils/traj_lr_warmup.py @@ -0,0 +1,13 @@ +import torch + +def get_traj_warmup_loss_weight( + cur_epoch, + tot_epoch, + start_pos=0.3, + end_pos=0.35, + scale_weight=1.1 +): + epoch_percentage = cur_epoch / tot_epoch + sigmoid_input = 5 / (end_pos-start_pos) * epoch_percentage - 2.5 * (end_pos+start_pos) / (end_pos - start_pos) + + return scale_weight * torch.sigmoid(torch.tensor(sigmoid_input)) diff --git a/mmcv/ops/__init__.py b/mmcv/ops/__init__.py new file mode 100644 index 0000000..0e4638e --- /dev/null +++ b/mmcv/ops/__init__.py @@ -0,0 +1,57 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .modulated_deform_conv import (ModulatedDeformConv2d, + ModulatedDeformConv2dPack, + modulated_deform_conv2d) +from .multi_scale_deform_attn import MultiScaleDeformableAttention +from .roiaware_pool3d import (RoIAwarePool3d, points_in_boxes_batch, + points_in_boxes_cpu, points_in_boxes_gpu) +from .roi_align import RoIAlign, roi_align +from .iou3d import boxes_iou_bev, nms_bev, nms_normal_bev +from .focal_loss import (SigmoidFocalLoss, SoftmaxFocalLoss, + sigmoid_focal_loss, softmax_focal_loss) +from .voxelize import Voxelization, voxelization +from .nms import batched_nms, nms, nms_match, nms_rotated, soft_nms +from .masked_conv import MaskedConv2d, masked_conv2d +from .deform_conv import DeformConv2d, DeformConv2dPack, deform_conv2d + + +# __all__ = [ +# 'bbox_overlaps', 'CARAFE', 'CARAFENaive', 'CARAFEPack', 'carafe', +# 'carafe_naive', 'CornerPool', 'DeformConv2d', 'DeformConv2dPack', +# 'deform_conv2d', 'DeformRoIPool', 'DeformRoIPoolPack', +# 'ModulatedDeformRoIPoolPack', 'deform_roi_pool', 'SigmoidFocalLoss', +# 'SoftmaxFocalLoss', 'sigmoid_focal_loss', 'softmax_focal_loss', +# 'get_compiler_version', 'get_compiling_cuda_version', +# 'get_onnxruntime_op_path', 'MaskedConv2d', 'masked_conv2d', +# 'ModulatedDeformConv2d', 'ModulatedDeformConv2dPack', +# 'modulated_deform_conv2d', 'batched_nms', 'nms', 'soft_nms', 'nms_match', +# 'RoIAlign', 'roi_align', 'RoIPool', 'roi_pool', 'SyncBatchNorm', 'Conv2d', +# 'ConvTranspose2d', 'Linear', 'MaxPool2d', 'CrissCrossAttention', 'PSAMask', +# 'point_sample', 'rel_roi_point_to_rel_img_point', 'SimpleRoIAlign', +# 'SAConv2d', 'TINShift', 'tin_shift', 'assign_score_withk', +# 'box_iou_rotated', 'RoIPointPool3d', 'nms_rotated', 'knn', 'ball_query', +# 'upfirdn2d', 'FusedBiasLeakyReLU', 'fused_bias_leakyrelu', +# 'RoIAlignRotated', 'roi_align_rotated', 'pixel_group', 'QueryAndGroup', +# 'GroupAll', 'grouping_operation', 'contour_expand', 'three_nn', +# 'three_interpolate', 'MultiScaleDeformableAttention', 'BorderAlign', +# 'border_align', 'gather_points', 'furthest_point_sample', +# 'furthest_point_sample_with_dist', 'PointsSampler', 'Correlation', +# 'boxes_iou_bev', 'nms_bev', 'nms_normal_bev', 'Voxelization', +# 'voxelization', 'dynamic_scatter', 'DynamicScatter', 'RoIAwarePool3d', +# 'points_in_boxes_part', 'points_in_boxes_cpu', 'points_in_boxes_all', +# 'soft_nms', 'get_compiler_version', +# 'get_compiling_cuda_version', 'NaiveSyncBatchNorm1d', +# 'NaiveSyncBatchNorm2d', 'batched_nms', 'Voxelization', 'voxelization', +# 'dynamic_scatter', 'DynamicScatter', +# 'SparseBasicBlock', 'SparseBottleneck', +# 'RoIAwarePool3d', 'points_in_boxes_gpu', 'points_in_boxes_cpu', +# 'make_sparse_convmodule', 'ball_query', 'knn', 'furthest_point_sample', +# 'furthest_point_sample_with_dist', 'three_interpolate', 'three_nn', +# 'gather_points', 'grouping_operation', 'group_points', 'GroupAll', +# 'QueryAndGroup', 'PointSAModule', 'PointSAModuleMSG', 'PointFPModule', +# 'points_in_boxes_batch', 'assign_score_withk', +# 'Points_Sampler', 'build_sa_module', +# 'PAConv', 'PAConvCUDA', 'PAConvSAModuleMSG', 'PAConvSAModule', +# 'PAConvCUDASAModule', 'PAConvCUDASAModuleMSG', +# 'Upsample', 'resize', 'Encoding' +# ] diff --git a/mmcv/ops/csrc/common/box_iou_rotated_utils.hpp b/mmcv/ops/csrc/common/box_iou_rotated_utils.hpp new file mode 100644 index 0000000..67190dc --- /dev/null +++ b/mmcv/ops/csrc/common/box_iou_rotated_utils.hpp @@ -0,0 +1,343 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +// modified from +// https://github.com/facebookresearch/detectron2/blob/master/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_utils.h +#pragma once +#include +#include + +#ifdef __CUDACC__ +// Designates functions callable from the host (CPU) and the device (GPU) +#define HOST_DEVICE __host__ __device__ +#define HOST_DEVICE_INLINE HOST_DEVICE __forceinline__ +#else +#include +#define HOST_DEVICE +#define HOST_DEVICE_INLINE HOST_DEVICE inline +#endif + +namespace { + +template +struct RotatedBox { + T x_ctr, y_ctr, w, h, a; +}; + +template +struct Point { + T x, y; + HOST_DEVICE_INLINE Point(const T& px = 0, const T& py = 0) : x(px), y(py) {} + HOST_DEVICE_INLINE Point operator+(const Point& p) const { + return Point(x + p.x, y + p.y); + } + HOST_DEVICE_INLINE Point& operator+=(const Point& p) { + x += p.x; + y += p.y; + return *this; + } + HOST_DEVICE_INLINE Point operator-(const Point& p) const { + return Point(x - p.x, y - p.y); + } + HOST_DEVICE_INLINE Point operator*(const T coeff) const { + return Point(x * coeff, y * coeff); + } +}; + +template +HOST_DEVICE_INLINE T dot_2d(const Point& A, const Point& B) { + return A.x * B.x + A.y * B.y; +} + +template +HOST_DEVICE_INLINE T cross_2d(const Point& A, const Point& B) { + return A.x * B.y - B.x * A.y; +} + +template +HOST_DEVICE_INLINE void get_rotated_vertices(const RotatedBox& box, + Point (&pts)[4]) { + // M_PI / 180. == 0.01745329251 + // double theta = box.a * 0.01745329251; + // MODIFIED + double theta = box.a; + T cosTheta2 = (T)cos(theta) * 0.5f; + T sinTheta2 = (T)sin(theta) * 0.5f; + + // y: top --> down; x: left --> right + pts[0].x = box.x_ctr - sinTheta2 * box.h - cosTheta2 * box.w; + pts[0].y = box.y_ctr + cosTheta2 * box.h - sinTheta2 * box.w; + pts[1].x = box.x_ctr + sinTheta2 * box.h - cosTheta2 * box.w; + pts[1].y = box.y_ctr - cosTheta2 * box.h - sinTheta2 * box.w; + pts[2].x = 2 * box.x_ctr - pts[0].x; + pts[2].y = 2 * box.y_ctr - pts[0].y; + pts[3].x = 2 * box.x_ctr - pts[1].x; + pts[3].y = 2 * box.y_ctr - pts[1].y; +} + +template +HOST_DEVICE_INLINE int get_intersection_points(const Point (&pts1)[4], + const Point (&pts2)[4], + Point (&intersections)[24]) { + // Line vector + // A line from p1 to p2 is: p1 + (p2-p1)*t, t=[0,1] + Point vec1[4], vec2[4]; + for (int i = 0; i < 4; i++) { + vec1[i] = pts1[(i + 1) % 4] - pts1[i]; + vec2[i] = pts2[(i + 1) % 4] - pts2[i]; + } + + // Line test - test all line combos for intersection + int num = 0; // number of intersections + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + // Solve for 2x2 Ax=b + T det = cross_2d(vec2[j], vec1[i]); + + // This takes care of parallel lines + if (fabs(det) <= 1e-14) { + continue; + } + + auto vec12 = pts2[j] - pts1[i]; + + T t1 = cross_2d(vec2[j], vec12) / det; + T t2 = cross_2d(vec1[i], vec12) / det; + + if (t1 >= 0.0f && t1 <= 1.0f && t2 >= 0.0f && t2 <= 1.0f) { + intersections[num++] = pts1[i] + vec1[i] * t1; + } + } + } + + // Check for vertices of rect1 inside rect2 + { + const auto& AB = vec2[0]; + const auto& DA = vec2[3]; + auto ABdotAB = dot_2d(AB, AB); + auto ADdotAD = dot_2d(DA, DA); + for (int i = 0; i < 4; i++) { + // assume ABCD is the rectangle, and P is the point to be judged + // P is inside ABCD iff. P's projection on AB lies within AB + // and P's projection on AD lies within AD + + auto AP = pts1[i] - pts2[0]; + + auto APdotAB = dot_2d(AP, AB); + auto APdotAD = -dot_2d(AP, DA); + + if ((APdotAB >= 0) && (APdotAD >= 0) && (APdotAB <= ABdotAB) && + (APdotAD <= ADdotAD)) { + intersections[num++] = pts1[i]; + } + } + } + + // Reverse the check - check for vertices of rect2 inside rect1 + { + const auto& AB = vec1[0]; + const auto& DA = vec1[3]; + auto ABdotAB = dot_2d(AB, AB); + auto ADdotAD = dot_2d(DA, DA); + for (int i = 0; i < 4; i++) { + auto AP = pts2[i] - pts1[0]; + + auto APdotAB = dot_2d(AP, AB); + auto APdotAD = -dot_2d(AP, DA); + + if ((APdotAB >= 0) && (APdotAD >= 0) && (APdotAB <= ABdotAB) && + (APdotAD <= ADdotAD)) { + intersections[num++] = pts2[i]; + } + } + } + + return num; +} + +template +HOST_DEVICE_INLINE int convex_hull_graham(const Point (&p)[24], + const int& num_in, Point (&q)[24], + bool shift_to_zero = false) { + assert(num_in >= 2); + + // Step 1: + // Find point with minimum y + // if more than 1 points have the same minimum y, + // pick the one with the minimum x. + int t = 0; + for (int i = 1; i < num_in; i++) { + if (p[i].y < p[t].y || (p[i].y == p[t].y && p[i].x < p[t].x)) { + t = i; + } + } + auto& start = p[t]; // starting point + + // Step 2: + // Subtract starting point from every points (for sorting in the next step) + for (int i = 0; i < num_in; i++) { + q[i] = p[i] - start; + } + + // Swap the starting point to position 0 + auto tmp = q[0]; + q[0] = q[t]; + q[t] = tmp; + + // Step 3: + // Sort point 1 ~ num_in according to their relative cross-product values + // (essentially sorting according to angles) + // If the angles are the same, sort according to their distance to origin + T dist[24]; + for (int i = 0; i < num_in; i++) { + dist[i] = dot_2d(q[i], q[i]); + } + +#ifdef __CUDACC__ + // CUDA version + // In the future, we can potentially use thrust + // for sorting here to improve speed (though not guaranteed) + for (int i = 1; i < num_in - 1; i++) { + for (int j = i + 1; j < num_in; j++) { + T crossProduct = cross_2d(q[i], q[j]); + if ((crossProduct < -1e-6) || + (fabs(crossProduct) < 1e-6 && dist[i] > dist[j])) { + auto q_tmp = q[i]; + q[i] = q[j]; + q[j] = q_tmp; + auto dist_tmp = dist[i]; + dist[i] = dist[j]; + dist[j] = dist_tmp; + } + } + } +#else + // CPU version + std::sort(q + 1, q + num_in, + [](const Point& A, const Point& B) -> bool { + T temp = cross_2d(A, B); + if (fabs(temp) < 1e-6) { + return dot_2d(A, A) < dot_2d(B, B); + } else { + return temp > 0; + } + }); +#endif + + // Step 4: + // Make sure there are at least 2 points (that don't overlap with each other) + // in the stack + int k; // index of the non-overlapped second point + for (k = 1; k < num_in; k++) { + if (dist[k] > 1e-8) { + break; + } + } + if (k == num_in) { + // We reach the end, which means the convex hull is just one point + q[0] = p[t]; + return 1; + } + q[1] = q[k]; + int m = 2; // 2 points in the stack + // Step 5: + // Finally we can start the scanning process. + // When a non-convex relationship between the 3 points is found + // (either concave shape or duplicated points), + // we pop the previous point from the stack + // until the 3-point relationship is convex again, or + // until the stack only contains two points + for (int i = k + 1; i < num_in; i++) { + while (m > 1 && cross_2d(q[i] - q[m - 2], q[m - 1] - q[m - 2]) >= 0) { + m--; + } + q[m++] = q[i]; + } + + // Step 6 (Optional): + // In general sense we need the original coordinates, so we + // need to shift the points back (reverting Step 2) + // But if we're only interested in getting the area/perimeter of the shape + // We can simply return. + if (!shift_to_zero) { + for (int i = 0; i < m; i++) { + q[i] += start; + } + } + + return m; +} + +template +HOST_DEVICE_INLINE T polygon_area(const Point (&q)[24], const int& m) { + if (m <= 2) { + return 0; + } + + T area = 0; + for (int i = 1; i < m - 1; i++) { + area += fabs(cross_2d(q[i] - q[0], q[i + 1] - q[0])); + } + + return area / 2.0; +} + +template +HOST_DEVICE_INLINE T rotated_boxes_intersection(const RotatedBox& box1, + const RotatedBox& box2) { + // There are up to 4 x 4 + 4 + 4 = 24 intersections (including dups) returned + // from rotated_rect_intersection_pts + Point intersectPts[24], orderedPts[24]; + + Point pts1[4]; + Point pts2[4]; + get_rotated_vertices(box1, pts1); + get_rotated_vertices(box2, pts2); + + int num = get_intersection_points(pts1, pts2, intersectPts); + + if (num <= 2) { + return 0.0; + } + + // Convex Hull to order the intersection points in clockwise order and find + // the contour area. + int num_convex = convex_hull_graham(intersectPts, num, orderedPts, true); + return polygon_area(orderedPts, num_convex); +} + +} // namespace + +template +HOST_DEVICE_INLINE T single_box_iou_rotated(T const* const box1_raw, + T const* const box2_raw, + const int mode_flag) { + // shift center to the middle point to achieve higher precision in result + RotatedBox box1, box2; + auto center_shift_x = (box1_raw[0] + box2_raw[0]) / 2.0; + auto center_shift_y = (box1_raw[1] + box2_raw[1]) / 2.0; + box1.x_ctr = box1_raw[0] - center_shift_x; + box1.y_ctr = box1_raw[1] - center_shift_y; + box1.w = box1_raw[2]; + box1.h = box1_raw[3]; + box1.a = box1_raw[4]; + box2.x_ctr = box2_raw[0] - center_shift_x; + box2.y_ctr = box2_raw[1] - center_shift_y; + box2.w = box2_raw[2]; + box2.h = box2_raw[3]; + box2.a = box2_raw[4]; + + const T area1 = box1.w * box1.h; + const T area2 = box2.w * box2.h; + if (area1 < 1e-14 || area2 < 1e-14) { + return 0.f; + } + + const T intersection = rotated_boxes_intersection(box1, box2); + T baseS = 1.0; + if (mode_flag == 0) { + baseS = (area1 + area2 - intersection); + } else if (mode_flag == 1) { + baseS = area1; + } + const T iou = intersection / baseS; + return iou; +} diff --git a/mmcv/ops/csrc/common/cuda/assign_score_withk_cuda_kernel.cuh b/mmcv/ops/csrc/common/cuda/assign_score_withk_cuda_kernel.cuh new file mode 100644 index 0000000..bf0abf7 --- /dev/null +++ b/mmcv/ops/csrc/common/cuda/assign_score_withk_cuda_kernel.cuh @@ -0,0 +1,112 @@ +// Copyright (c) OpenMMLab. All rights reserved +#ifndef ASSIGN_SCORE_WITHK_CUDA_KERNEL_CUH +#define ASSIGN_SCORE_WITHK_CUDA_KERNEL_CUH + +#include "pytorch_cuda_helper.hpp" + +// input: points(B,N0,M,O), centers(B,N0,M,O), scores(B,N1,K,M), knn_idx(B,N1,K) +// output: fout(B,O,N) +// algo: fout(b,i,k,j) = s(b,i,k,m)*p(b,c(i),k,m,j) = s(b,i,k,m)*p(b,i(k),m,j) +// i(k) = idx(b,i,k) +// sum: fout(b,i,j) = fout(b,i,j) + s(b,i,k,m)*p(b,i,k,m,j) +// avg: fout(b,i,j) = sum(fout(b,i,k,j)) / k +// max: fout(b,i,j) = max(fout(b,i,k,j), sum(s(b,i,k,m)*p(b,i,k,m,j))) + +template +__global__ void assign_score_withk_forward_cuda_kernel( + const int B, const int N0, const int N1, const int M, const int K, + const int O, const int aggregate, const T* points, const T* centers, + const T* scores, const int64_t* knn_idx, T* output) { + // ----- parallel loop for B, N1, K and O --------- + long i = blockIdx.x * blockDim.x + threadIdx.x; + if (i >= B * N1 * K * O) return; + // ------- loop for M ---------- + const int b = (int)(i / (O * N1 * K)); + const int o = (int)(i % (O * N1 * K) / (N1 * K)); + const int n = (int)(i % (N1 * K) / K); + const int k = (int)(i % K); + const int cn = (int)knn_idx[b * K * N1 + n * K + + 0]; // The first neighbor is the center point + const int kn = (int)knn_idx[b * K * N1 + n * K + k]; + if (kn >= N0 || + kn < 0) { // if index overflows, it is out of the neighborhood range + return; + } + assert(b < B); + assert(kn < N0); + assert(cn < N0); + assert(o < O); + assert(n < N1); + const int out_idx = b * N1 * O * K + o * N1 * K + n * K + k; + T val = output[out_idx]; + for (int m = 0; m < M; m++) { + val += points[b * N0 * M * O + kn * M * O + m * O + o] * + scores[b * N1 * K * M + n * K * M + k * M + m] - + centers[b * N0 * M * O + cn * M * O + m * O + o] * + scores[b * N1 * K * M + n * K * M + k * M + m]; + } + output[out_idx] = val; +} + +template +__global__ void assign_score_withk_points_backward_cuda_kernel( + const int B, const int N0, const int N, const int M, const int K, + const int O, const int aggregate, const T* grad_out, const T* scores, + const int64_t* knn_idx, T* grad_points, T* grad_centers) { + // ----- parallel loop for B, M, O --------- + long i = blockIdx.x * blockDim.x + threadIdx.x; + if (i >= B * M * O) return; + int b = (int)(i / (M * O)); + int m = (int)(i % (M * O) / O); + int o = (int)(i % O); + + // ----- loop for N,K --------- + for (int n = 0; n < N; n++) { + for (int k = 0; k < K; k++) { + int kn = knn_idx[b * N * K + n * K + k]; + int cn = knn_idx[b * N * K + n * K + 0]; + if (kn >= N0 || + kn < 0) { // if index overflows, it is out of the neighborhood range + continue; + } + atomicAdd(grad_points + b * N0 * M * O + kn * M * O + m * O + o, + scores[b * N * K * M + n * K * M + k * M + m] * + grad_out[b * O * N * K + o * N * K + n * K + k]); + atomicAdd(grad_centers + b * N0 * M * O + cn * M * O + m * O + o, + -scores[b * N * K * M + n * K * M + k * M + m] * + grad_out[b * O * N * K + o * N * K + n * K + k]); + } + } +} + +template +__global__ void assign_score_withk_scores_backward_cuda_kernel( + const int B, const int N0, const int N, const int M, const int K, + const int O, const int aggregate, const T* grad_out, const T* points, + const T* centers, const int64_t* knn_idx, T* grad_scores) { + // ----- parallel loop for B, N, K, M --------- + long i = blockIdx.x * blockDim.x + threadIdx.x; + if (i >= B * N * K * M) return; + const int b = (int)(i / (N * M * K)); + const int n = (int)(i % (N * M * K) / M / K); + const int k = (int)(i % (M * K) / M); + const int m = (int)(i % M); + const int cn = knn_idx[b * N * K + n * K + 0]; + const int kn = knn_idx[b * N * K + n * K + k]; + if (kn >= N0 || + kn < 0) { // if index overflows, it is out of the neighborhood range + return; + } + + // -------------- loop for O ------------------------ + const int out_idx = b * N * K * M + n * K * M + k * M + m; + T val = grad_scores[out_idx]; + for (int o = 0; o < O; o++) { + val += (points[b * N0 * M * O + kn * M * O + m * O + o] - + centers[b * N0 * M * O + cn * M * O + m * O + o]) * + grad_out[b * O * N * K + o * N * K + n * K + k]; + } + grad_scores[out_idx] = val; +} + +#endif // ASSIGN_SCORE_WITHK_CUDA_KERNEL_CUH diff --git a/mmcv/ops/csrc/common/cuda/ball_query_cuda_kernel.cuh b/mmcv/ops/csrc/common/cuda/ball_query_cuda_kernel.cuh new file mode 100644 index 0000000..2d88c63 --- /dev/null +++ b/mmcv/ops/csrc/common/cuda/ball_query_cuda_kernel.cuh @@ -0,0 +1,53 @@ +// Copyright (c) OpenMMLab. All rights reserved +// Modified from +// https://github.com/sshaoshuai/Pointnet2.PyTorch/tree/master/pointnet2/src/ball_query_gpu.cu +#ifndef BALL_QUERY_CUDA_KERNEL_CUH +#define BALL_QUERY_CUDA_KERNEL_CUH + +#include "pytorch_cuda_helper.hpp" + +template +__global__ void ball_query_forward_cuda_kernel(int b, int n, int m, + float min_radius, + float max_radius, int nsample, + const T* new_xyz, const T* xyz, + int* idx) { + // new_xyz: (B, M, 3) + // xyz: (B, N, 3) + // output: + // idx: (B, M, nsample) + int bs_idx = blockIdx.y; + int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (bs_idx >= b || pt_idx >= m) return; + + new_xyz += bs_idx * m * 3 + pt_idx * 3; + xyz += bs_idx * n * 3; + idx += bs_idx * m * nsample + pt_idx * nsample; + + float max_radius2 = max_radius * max_radius; + float min_radius2 = min_radius * min_radius; + T new_x = new_xyz[0]; + T new_y = new_xyz[1]; + T new_z = new_xyz[2]; + + int cnt = 0; + for (int k = 0; k < n; ++k) { + T x = xyz[k * 3 + 0]; + T y = xyz[k * 3 + 1]; + T z = xyz[k * 3 + 2]; + T d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) + + (new_z - z) * (new_z - z); + if (d2 == 0 || (d2 >= min_radius2 && d2 < max_radius2)) { + if (cnt == 0) { + for (int l = 0; l < nsample; ++l) { + idx[l] = k; + } + } + idx[cnt] = k; + ++cnt; + if (cnt >= nsample) break; + } + } +} + +#endif // BALL_QUERY_CUDA_KERNEL_CUH diff --git a/mmcv/ops/csrc/common/cuda/bbox_overlaps_cuda_kernel.cuh b/mmcv/ops/csrc/common/cuda/bbox_overlaps_cuda_kernel.cuh new file mode 100644 index 0000000..27e2c70 --- /dev/null +++ b/mmcv/ops/csrc/common/cuda/bbox_overlaps_cuda_kernel.cuh @@ -0,0 +1,80 @@ +// Copyright (c) OpenMMLab. All rights reserved +#ifndef BBOX_OVERLAPS_CUDA_KERNEL_CUH +#define BBOX_OVERLAPS_CUDA_KERNEL_CUH + +#include "pytorch_cuda_helper.hpp" + +template +__global__ void bbox_overlaps_cuda_kernel(const T* bbox1, const T* bbox2, + T* ious, const int num_bbox1, + const int num_bbox2, const int mode, + const bool aligned, + const int offset) { + if (aligned) { + CUDA_1D_KERNEL_LOOP(index, num_bbox1) { + int b1 = index; + int b2 = index; + + int base1 = b1 * 4; + T b1_x1 = bbox1[base1]; + T b1_y1 = bbox1[base1 + 1]; + T b1_x2 = bbox1[base1 + 2]; + T b1_y2 = bbox1[base1 + 3]; + T b1_area = (b1_x2 - b1_x1 + offset) * (b1_y2 - b1_y1 + offset); + + int base2 = b2 * 4; + T b2_x1 = bbox2[base2]; + T b2_y1 = bbox2[base2 + 1]; + T b2_x2 = bbox2[base2 + 2]; + T b2_y2 = bbox2[base2 + 3]; + T b2_area = (b2_x2 - b2_x1 + offset) * (b2_y2 - b2_y1 + offset); + + T left = fmaxf(b1_x1, b2_x1), right = fminf(b1_x2, b2_x2); + T top = fmaxf(b1_y1, b2_y1), bottom = fminf(b1_y2, b2_y2); + T width = fmaxf(right - left + offset, 0.f); + T height = fmaxf(bottom - top + offset, 0.f); + T interS = width * height; + T baseS = 1.0; + if (mode == 0) { + baseS = fmaxf(b1_area + b2_area - interS, T(offset)); + } else if (mode == 1) { + baseS = fmaxf(b1_area, T(offset)); + } + ious[index] = interS / baseS; + } + } else { + CUDA_1D_KERNEL_LOOP(index, num_bbox1 * num_bbox2) { + int b1 = index / num_bbox2; + int b2 = index % num_bbox2; + + int base1 = b1 * 4; + T b1_x1 = bbox1[base1]; + T b1_y1 = bbox1[base1 + 1]; + T b1_x2 = bbox1[base1 + 2]; + T b1_y2 = bbox1[base1 + 3]; + T b1_area = (b1_x2 - b1_x1 + offset) * (b1_y2 - b1_y1 + offset); + + int base2 = b2 * 4; + T b2_x1 = bbox2[base2]; + T b2_y1 = bbox2[base2 + 1]; + T b2_x2 = bbox2[base2 + 2]; + T b2_y2 = bbox2[base2 + 3]; + T b2_area = (b2_x2 - b2_x1 + offset) * (b2_y2 - b2_y1 + offset); + + T left = fmaxf(b1_x1, b2_x1), right = fminf(b1_x2, b2_x2); + T top = fmaxf(b1_y1, b2_y1), bottom = fminf(b1_y2, b2_y2); + T width = fmaxf(right - left + offset, 0.f); + T height = fmaxf(bottom - top + offset, 0.f); + T interS = width * height; + T baseS = 1.0; + if (mode == 0) { + baseS = fmaxf(b1_area + b2_area - interS, T(offset)); + } else if (mode == 1) { + baseS = fmaxf(b1_area, T(offset)); + } + ious[index] = interS / baseS; + } + } +} + +#endif // BBOX_OVERLAPS_CUDA_KERNEL_CUH diff --git a/mmcv/ops/csrc/common/cuda/border_align_cuda_kernel.cuh b/mmcv/ops/csrc/common/cuda/border_align_cuda_kernel.cuh new file mode 100644 index 0000000..49c7877 --- /dev/null +++ b/mmcv/ops/csrc/common/cuda/border_align_cuda_kernel.cuh @@ -0,0 +1,196 @@ +// Copyright (c) OpenMMLab. All rights reserved +// modified from +// https://github.com/Megvii-BaseDetection/cvpods/blob/master/cvpods/layers/csrc/border_align/border_align_kernel.cu. +// the main difference: (1) use `argmax_idx` for fast computing of gradient +// during the backward. (2) `wh` is directly computed by `boxes`, rather than +// passing it as argument to forward or backward functions. + +#ifndef BORDER_ALIGN_CUDA_KERNEL_CUH +#define BORDER_ALIGN_CUDA_KERNEL_CUH + +#include +#ifdef MMCV_WITH_TRT +#include "common_cuda_helper.hpp" +#else // MMCV_WITH_TRT +#include "pytorch_cuda_helper.hpp" +#endif // MMCV_WITH_TRT + +enum BorderMode { Top = 0, Left = 1, Bottom = 2, Right = 3 }; + +/*** Forward ***/ +template +__global__ void border_align_forward_cuda_kernel( + const int nthreads, const T* input, const T* boxes, T* output, + int* argmax_idx, const int channels, const int box_size, const int height, + const int width, const int pool_size) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + // (batch_idx, c_idx, box_idx) is an element paralleled for computing + // output, and `extreme_idx` is in range [0,3] + int batch_idx, c_idx, box_idx, extreme_idx, maxidx, *offset_argmax_idx; + const T *offset_box, *offset_input, *offset_box_x; + T *offset_output, box_width, box_height, stride, x_stride, y_stride, x, y, + val, maxval; + + extreme_idx = threadIdx.y; + // shape (N, C, box_size, 4) for output + batch_idx = index / channels / box_size; + // shape (N, box_size, 4) for boxes + box_idx = index % box_size + batch_idx * box_size; + c_idx = (index / box_size) % channels; + + offset_box = boxes + box_idx * 4; + box_width = *(offset_box + 2) - *offset_box; + box_height = *(offset_box + 3) - *(offset_box + 1); + offset_output = output + index * 4 + extreme_idx; + offset_argmax_idx = argmax_idx + index * 4 + extreme_idx; + // shape (N, 4C, h, w) for input. + // [0,C) for top feature, [C,2C) for left feature, + // [2C,3C) for bottom feature, [3C,4C) for right feature + offset_input = + input + (batch_idx * channels * 4 + extreme_idx * channels + c_idx) * + height * width; + + // extreme_idx in [0,1] -> offset_box_x indexed at x1 + // extreme_idx in [2,3] -> offset_box_x indexed at x2 + offset_box_x = offset_box + extreme_idx / 2 * 2; + + // (x1,y1) or (x2,y2) for (x,y) + x = *offset_box_x; + y = *(offset_box_x + 1); + + switch (extreme_idx) { + // top + case BorderMode::Top: + stride = box_width / pool_size; + x_stride = stride; + y_stride = 0; + break; + // left + case BorderMode::Left: + stride = box_height / pool_size; + x_stride = 0; + y_stride = stride; + break; + // bottom + case BorderMode::Bottom: + stride = box_width / pool_size; + x_stride = -stride; + y_stride = 0; + break; + // right + case BorderMode::Right: + stride = box_height / pool_size; + x_stride = 0; + y_stride = -stride; + break; + } + + // initialize maxval and maxidx with the start position (e.g. (x1,y1) or + // (x2,y2)) + maxval = bilinear_interpolate(offset_input, height, width, y, x, index); + maxidx = 0; + + // do max_pool along the border + for (int i = 1; i <= pool_size; i++) { + x += x_stride; + y += y_stride; + val = bilinear_interpolate(offset_input, height, width, y, x, index); + if (val > maxval) { + maxval = val; + maxidx = i; + } + } + + // update output and argmax_idx + *offset_output = maxval; + *offset_argmax_idx = maxidx; + } +} + +/*** Backward ***/ +template +__global__ void border_align_backward_cuda_kernel( + const int nthreads, const T* grad_output, const T* boxes, + const int* argmax_idx, T* grad_input, const int channels, + const int box_size, const int height, const int width, + const int pool_size) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + // (batch_idx, c_idx, box_idx) is an element paralleled for computing + // output, and `extreme_idx` is in range [0,3] + int batch_idx, c_idx, box_idx, extreme_idx; + const int* offset_argmax_idx; + const T *offset_grad_output, *offset_box, *offset_box_x; + T *offset_grad_input, box_width, box_height, stride, x_stride, y_stride, x, + y; + + extreme_idx = threadIdx.y; + batch_idx = index / channels / box_size; + box_idx = index % box_size + batch_idx * box_size; + c_idx = (index / box_size) % channels; + + offset_box = boxes + box_idx * 4; + box_width = *(offset_box + 2) - *offset_box; + box_height = *(offset_box + 3) - *(offset_box + 1); + offset_grad_output = grad_output + index * 4 + extreme_idx; + offset_argmax_idx = argmax_idx + index * 4 + extreme_idx; + // [0,C) for top feature grad, [C,2C) for left feature grad, + // [2C,3C) for bottom feature grad, [3C,4C) for right feature grad + offset_grad_input = grad_input + (batch_idx * channels * 4 + + extreme_idx * channels + c_idx) * + height * width; + + // extreme_idx in [0,1] -> offset_box_x indexed at x1 + // extreme_idx in [2,3] -> offset_box_x indexed at x2 + offset_box_x = offset_box + extreme_idx / 2 * 2; + + switch (extreme_idx) { + // top + case BorderMode::Top: + stride = box_width / pool_size; + x_stride = stride; + y_stride = 0; + break; + // left + case BorderMode::Left: + stride = box_height / pool_size; + x_stride = 0; + y_stride = stride; + break; + // bottom + case BorderMode::Bottom: + stride = box_width / pool_size; + x_stride = -stride; + y_stride = 0; + break; + // right + case BorderMode::Right: + stride = box_height / pool_size; + x_stride = 0; + y_stride = -stride; + break; + } + + // get position (x,y) which has maximum value during forward + x = *offset_box_x; + y = *(offset_box_x + 1); + x += x_stride * (T)(*offset_argmax_idx); + y += y_stride * (T)(*offset_argmax_idx); + + T w1, w2, w3, w4; + int x_low, x_high, y_low, y_high; + bilinear_interpolate_gradient(height, width, y, x, w1, w2, w3, w4, x_low, + x_high, y_low, y_high, index); + + // update grad_output + atomicAdd(offset_grad_input + y_low * width + x_low, + *offset_grad_output * w1); + atomicAdd(offset_grad_input + y_low * width + x_high, + *offset_grad_output * w2); + atomicAdd(offset_grad_input + y_high * width + x_low, + *offset_grad_output * w3); + atomicAdd(offset_grad_input + y_high * width + x_high, + *offset_grad_output * w4); + } +} + +#endif // BORDER_ALIGN_CUDA_KERNEL_CUH diff --git a/mmcv/ops/csrc/common/cuda/box_iou_rotated_cuda.cuh b/mmcv/ops/csrc/common/cuda/box_iou_rotated_cuda.cuh new file mode 100644 index 0000000..e7171e0 --- /dev/null +++ b/mmcv/ops/csrc/common/cuda/box_iou_rotated_cuda.cuh @@ -0,0 +1,78 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +// modified from +// https://github.com/facebookresearch/detectron2/blob/master/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cuda.cu +#ifndef BOX_IOU_ROTATED_CUDA_CUH +#define BOX_IOU_ROTATED_CUDA_CUH + + +#include "pytorch_cuda_helper.hpp" +#include "box_iou_rotated_utils.hpp" + +// 2D block with 32 * 16 = 512 threads per block +const int BLOCK_DIM_X = 32; +const int BLOCK_DIM_Y = 16; + +inline int divideUP(const int x, const int y) { return (((x) + (y)-1) / (y)); } + +template +__global__ void box_iou_rotated_cuda_kernel( + const int n_boxes1, const int n_boxes2, const T* dev_boxes1, + const T* dev_boxes2, T* dev_ious, const int mode_flag, const bool aligned) { + if (aligned) { + CUDA_1D_KERNEL_LOOP(index, n_boxes1) { + int b1 = index; + int b2 = index; + + int base1 = b1 * 5; + + float block_boxes1[5]; + float block_boxes2[5]; + + block_boxes1[0] = dev_boxes1[base1 + 0]; + block_boxes1[1] = dev_boxes1[base1 + 1]; + block_boxes1[2] = dev_boxes1[base1 + 2]; + block_boxes1[3] = dev_boxes1[base1 + 3]; + block_boxes1[4] = dev_boxes1[base1 + 4]; + + int base2 = b2 * 5; + + block_boxes2[0] = dev_boxes2[base2 + 0]; + block_boxes2[1] = dev_boxes2[base2 + 1]; + block_boxes2[2] = dev_boxes2[base2 + 2]; + block_boxes2[3] = dev_boxes2[base2 + 3]; + block_boxes2[4] = dev_boxes2[base2 + 4]; + + dev_ious[index] = + single_box_iou_rotated(block_boxes1, block_boxes2, mode_flag); + } + } else { + CUDA_1D_KERNEL_LOOP(index, n_boxes1 * n_boxes2) { + int b1 = index / n_boxes2; + int b2 = index % n_boxes2; + + int base1 = b1 * 5; + + float block_boxes1[5]; + float block_boxes2[5]; + + block_boxes1[0] = dev_boxes1[base1 + 0]; + block_boxes1[1] = dev_boxes1[base1 + 1]; + block_boxes1[2] = dev_boxes1[base1 + 2]; + block_boxes1[3] = dev_boxes1[base1 + 3]; + block_boxes1[4] = dev_boxes1[base1 + 4]; + + int base2 = b2 * 5; + + block_boxes2[0] = dev_boxes2[base2 + 0]; + block_boxes2[1] = dev_boxes2[base2 + 1]; + block_boxes2[2] = dev_boxes2[base2 + 2]; + block_boxes2[3] = dev_boxes2[base2 + 3]; + block_boxes2[4] = dev_boxes2[base2 + 4]; + + dev_ious[index] = + single_box_iou_rotated(block_boxes1, block_boxes2, mode_flag); + } + } +} + +#endif diff --git a/mmcv/ops/csrc/common/cuda/carafe_cuda_kernel.cuh b/mmcv/ops/csrc/common/cuda/carafe_cuda_kernel.cuh new file mode 100644 index 0000000..d77f87c --- /dev/null +++ b/mmcv/ops/csrc/common/cuda/carafe_cuda_kernel.cuh @@ -0,0 +1,328 @@ +// Copyright (c) OpenMMLab. All rights reserved +#ifndef CARAFE_CUDA_KERNEL_CUH +#define CARAFE_CUDA_KERNEL_CUH + +#include "pytorch_cuda_helper.hpp" + +#ifdef HIP_DIFF +#define WARP_SIZE 64 +#else +#define WARP_SIZE 32 +#endif +#define THREADS_PER_PIXEL 32 +#define MAX_SHARED_MEMORY 49152 +#define MAX_SHARED_SCALAR_T 6144 // 49152 / 8 = 6144 +#define MAXIMIZE_KERNEL_SIZE true +#define kTileDim 32 +#define kBlockRows 8 +#define FULL_MASK 0xffffffff + +inline int divideUP(const int x, const int y) { return (((x) + (y)-1) / (y)); } + +__device__ inline int Loc2Index(const int n, const int c, const int h, + const int w, const int channel_num, + const int height, const int width) { + int index = w + (h + (c + n * channel_num) * height) * width; + return index; +} +#ifndef HIP_DIFF +/* TODO: move this to a common place */ +template +__device__ inline scalar_t min(scalar_t a, scalar_t b) { + return a < b ? a : b; +} + +template +__device__ inline scalar_t max(scalar_t a, scalar_t b) { + return a > b ? a : b; +} +#endif +template +__device__ __forceinline__ scalar_t warpReduceSum(scalar_t val) { + for (int offset = WARP_SIZE / 2; offset > 0; offset /= 2) +#ifdef HIP_DIFF + val += __shfl_down(val, offset); +#else + val += __shfl_down_sync(FULL_MASK, val, offset); +#endif + return val; +} + +template <> +__device__ __forceinline__ phalf warpReduceSum(phalf val) { + for (int offset = WARP_SIZE / 2; offset > 0; offset /= 2) +#ifdef HIP_DIFF + __PHALF(val) += __shfl_down(FULL_MASK, val, offset); +#else + __PHALF(val) += + __shfl_down_sync(FULL_MASK, static_cast<__half>(__PHALF(val)), offset); +#endif + return val; +} + +// Splits the original matrix into submatrices with size 32 * 32. +// Each block transposes one submatrix by loading it into shared memory. +// Reference https://devblogs.nvidia.com/efficient-matrix-transpose-cuda-cc/ +template +__global__ void BatchTranspose2DCUDAKernel(const int N, const int H, + const int W, const int dh, + const int dw, + const scalar_t *__restrict__ X, + scalar_t *__restrict__ Y) { + __shared__ scalar_t tile[kTileDim][kTileDim + 1]; + const int n = blockIdx.x / (dh * dw); + const int k = blockIdx.x % (dh * dw); + const int r = k / dw; + const int c = k % dw; + const int offset = n * H * W; + int x = c * kTileDim + threadIdx.x; + int y = r * kTileDim + threadIdx.y; + if (x < W) { + for (int i = 0; threadIdx.y + i < kTileDim && y + i < H; i += kBlockRows) { + tile[threadIdx.y + i][threadIdx.x] = X[offset + (y + i) * W + x]; + } + } + __syncthreads(); + x = r * kTileDim + threadIdx.x; + y = c * kTileDim + threadIdx.y; + if (x < H) { + for (int i = 0; threadIdx.y + i < kTileDim && y + i < W; i += kBlockRows) { + Y[offset + (y + i) * H + x] = tile[threadIdx.x][threadIdx.y + i]; + } + } +} +template +__global__ void CARAFEForward( + const int num_kernels, const scalar_t *__restrict__ bottom_data, + const scalar_t *__restrict__ bottom_masks, const int kernel_size, + const int group_size, const int scale_factor, const int channels, + const int down_height, const int down_width, const int height, + const int width, const int mask_channels, scalar_t *__restrict__ top_data) { +#if MAXIMIZE_KERNEL_SIZE + __shared__ float shared_mask[MAX_SHARED_SCALAR_T * 2]; +#else + __shared__ scalar_t shared_mask[MAX_SHARED_SCALAR_T]; +#endif + + int index = threadIdx.x + blockIdx.x * blockDim.x; + if (index > num_kernels - 1) { + return; + } + const int pixel_id = threadIdx.x / THREADS_PER_PIXEL; + const int split_id = threadIdx.x % THREADS_PER_PIXEL; + index = index / THREADS_PER_PIXEL; + const int pw = index % width; + const int ph = (index / width) % height; + const int n = index / width / height; + + const int down_pw = pw / scale_factor; + const int down_ph = ph / scale_factor; + + const int start_w = down_pw - (kernel_size - 1) / 2; + const int end_w = down_pw + (kernel_size - 1) / 2 + 1; + const int start_h = down_ph - (kernel_size - 1) / 2; + const int end_h = down_ph + (kernel_size - 1) / 2 + 1; + for (int c = split_id; c < mask_channels; c += THREADS_PER_PIXEL) { + int mask_index = Loc2Index(n, ph, pw, c, height, width, mask_channels); + shared_mask[c * WARP_SIZE + pixel_id] = bottom_masks[mask_index]; + } + __syncthreads(); + + const int channels_per_group = ceilf(channels / (float)group_size); +#pragma unroll + for (int c = split_id; c < channels; c += THREADS_PER_PIXEL) { + int mask_group = c / channels_per_group; + scalar_t output_val = 0; +#pragma unroll + for (int iy = start_h; iy < end_h; iy++) { +#pragma unroll + for (int ix = start_w; ix < end_w; ix++) { + if (iy < 0 || iy > down_height - 1 || ix < 0 || ix > down_width - 1) { + continue; + } + int mask_iy = iy - down_ph + (kernel_size - 1) / 2; + int mask_ix = ix - down_pw + (kernel_size - 1) / 2; + int mask_c = + (mask_group * kernel_size + mask_iy) * kernel_size + mask_ix; + int feat_index = + Loc2Index(n, iy, ix, c, down_height, down_width, channels); + + output_val += bottom_data[feat_index] * + shared_mask[mask_c * WARP_SIZE + pixel_id]; + } + } + + int top_index = Loc2Index(n, ph, pw, c, height, width, channels); + top_data[top_index] = output_val; + } +} + +template +__global__ void CARAFEBackward_Feature( + const int num_kernels, const scalar_t *__restrict__ top_diff, + const scalar_t *__restrict__ bottom_masks, const int kernel_size, + const int group_size, const int scale_factor, const int channels, + const int down_height, const int down_width, const int height, + const int width, const int mask_channels, + scalar_t *__restrict__ bottom_diff) { +#if MAXIMIZE_KERNEL_SIZE + __shared__ float shared_mask[MAX_SHARED_SCALAR_T * 2]; +#else + __shared__ scalar_t shared_mask[MAX_SHARED_SCALAR_T]; +#endif + + int index = threadIdx.x + blockIdx.x * blockDim.x; + if (index > num_kernels - 1) { + return; + } + + const int pixel_id = threadIdx.x / THREADS_PER_PIXEL; + const int split_id = threadIdx.x % THREADS_PER_PIXEL; + // (n, c, ph, pw) is an element in the bottom_data + index = index / THREADS_PER_PIXEL; + const int pw = index % width; + const int ph = (index / width) % height; + const int n = index / width / height; + + const int start_w = pw - (kernel_size - 1) * scale_factor / 2; + const int end_w = pw + (kernel_size - 1) * scale_factor / 2 + 1; + const int start_h = ph - (kernel_size - 1) * scale_factor / 2; + const int end_h = ph + (kernel_size - 1) * scale_factor / 2 + 1; + for (int c = split_id; c < mask_channels; c += THREADS_PER_PIXEL) { + const int mask_w = (c % kernel_size) * scale_factor; + const int mask_h = (c / kernel_size % kernel_size) * scale_factor; + const int mask_x = start_w + mask_w; + const int mask_y = start_h + mask_h; + if (mask_y < 0 || mask_y > height - 1 || mask_x < 0 || mask_x > width - 1) { + shared_mask[c * WARP_SIZE + pixel_id] = 0; + continue; + } + const int mask_group = c / (kernel_size * kernel_size); + const int mask_c = (2 * mask_group + 1) * kernel_size * kernel_size - c - 1; + int mask_index = + Loc2Index(n, mask_c, mask_y, mask_x, mask_channels, height, width); + shared_mask[c * WARP_SIZE + pixel_id] = bottom_masks[mask_index]; + } + __syncthreads(); + const int channels_per_group = ceilf(channels / (float)group_size); +#pragma unroll + for (int c = split_id; c < channels; c += THREADS_PER_PIXEL) { + int mask_group = c / channels_per_group; + int top_index = Loc2Index(n, ph, pw, c, height, width, channels); + scalar_t output_val = 0; +#pragma unroll + for (int iy = start_h; iy < end_h; iy += scale_factor) { +#pragma unroll + for (int ix = start_w; ix < end_w; ix += scale_factor) { + if (iy < 0 || iy > height - 1 || ix < 0 || ix > width - 1) { + continue; + } + int mask_iy = + (iy - ph + (kernel_size - 1) * scale_factor / 2) / scale_factor; + int mask_ix = + (ix - pw + (kernel_size - 1) * scale_factor / 2) / scale_factor; + int mask_c = + (mask_group * kernel_size + mask_iy) * kernel_size + mask_ix; + int feat_index = Loc2Index(n, iy, ix, c, height, width, channels); + output_val += + shared_mask[mask_c * WARP_SIZE + pixel_id] * top_diff[feat_index]; + } + } + bottom_diff[top_index] = output_val; + } +} + +template +__global__ void FeatureSum(const int num_kernels, + const scalar_t *__restrict__ input_data, + const int scale_factor, const int channels, + const int height, const int width, + scalar_t *__restrict__ output_data) { + int index = threadIdx.x + blockIdx.x * blockDim.x; + if (index > num_kernels - 1) { + return; + } + const int split_id = threadIdx.x % THREADS_PER_PIXEL; + index = index / THREADS_PER_PIXEL; + const int pw = index % width; + const int ph = (index / width) % height; + const int n = index / width / height; + for (int c = split_id; c < channels; c += THREADS_PER_PIXEL) { + scalar_t output_val = 0; + for (int iy = ph * scale_factor; iy < (ph + 1) * scale_factor; iy++) { + for (int ix = pw * scale_factor; ix < (pw + 1) * scale_factor; ix++) { + int input_id = Loc2Index(n, iy, ix, c, height * scale_factor, + width * scale_factor, channels); + output_val += input_data[input_id]; + } + } + const int output_id = Loc2Index(n, ph, pw, c, height, width, channels); + output_data[output_id] = output_val; + } +} + +template +__global__ void CARAFEBackward_Mask(const int num_kernels, + const scalar_t *__restrict__ top_diff, + const scalar_t *__restrict__ bottom_data, + const int kernel_size, const int group_size, + const int scale_factor, const int channels, + const int down_height, const int down_width, + const int height, const int width, + const int mask_channels, + scalar_t *__restrict__ mask_diff) { + int index = threadIdx.x + blockIdx.x * blockDim.x; + if (index > num_kernels - 1) { + return; + } + + const int lane_id = index % WARP_SIZE; + index = index / WARP_SIZE; + const int mask_c = index % mask_channels; + // (n, c, ph, pw) is an element in the bottom_data + index = index / mask_channels; + const int pw = index % width; + const int ph = (index / width) % height; + const int n = index / width / height; + + const int down_pw = pw / scale_factor; + const int down_ph = ph / scale_factor; + + const int mask_group = mask_c / (kernel_size * kernel_size); + const int mask_loc = mask_c % (kernel_size * kernel_size); + + const int offset_x = mask_loc % kernel_size - (kernel_size - 1) / 2; + const int offset_y = + mask_loc / kernel_size % kernel_size - (kernel_size - 1) / 2; + + const int down_x = down_pw + offset_x; + const int down_y = down_ph + offset_y; + + scalar_t output_val = 0; + + if (down_y >= 0 && down_y <= down_height - 1 && down_x >= 0 && + down_x <= down_width - 1) { + const int channels_per_mask = ceilf(channels / (float)group_size); + const int start = channels_per_mask * mask_group; + const int end = min(channels_per_mask * (mask_group + 1), channels); + for (int c = start + lane_id; c < end; c += WARP_SIZE) { + int bottom_id = + Loc2Index(n, down_y, down_x, c, down_height, down_width, channels); + int top_id = Loc2Index(n, ph, pw, c, height, width, channels); + output_val += top_diff[top_id] * bottom_data[bottom_id]; + } + } +#ifdef HIP_DIFF + __syncthreads(); +#else + __syncwarp(); +#endif + output_val = warpReduceSum(output_val); + if (lane_id == 0) { + const int mask_id = + Loc2Index(n, ph, pw, mask_c, height, width, mask_channels); + mask_diff[mask_id] = output_val; + } +} + +#endif // CARAFE_CUDA_KERNEL_CUH diff --git a/mmcv/ops/csrc/common/cuda/carafe_naive_cuda_kernel.cuh b/mmcv/ops/csrc/common/cuda/carafe_naive_cuda_kernel.cuh new file mode 100644 index 0000000..0a4ab87 --- /dev/null +++ b/mmcv/ops/csrc/common/cuda/carafe_naive_cuda_kernel.cuh @@ -0,0 +1,107 @@ +// Copyright (c) OpenMMLab. All rights reserved +#ifndef CARAFE_NAIVE_CUDA_KERNEL_CUH +#define CARAFE_NAIVE_CUDA_KERNEL_CUH + +#include "pytorch_cuda_helper.hpp" + +__device__ inline int Loc2Index(const int n, const int c, const int h, + const int w, const int channel_num, + const int height, const int width) { + int index = w + (h + (c + n * channel_num) * height) * width; + return index; +} + +template +__global__ void carafe_naive_forward_cuda_kernel( + const int nthreads, const scalar_t *bottom_data, + const scalar_t *bottom_masks, scalar_t *top_data, const int kernel_size, + const int group_size, const int scale_factor, const int channels, + const int height, const int width) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + // (n, c, ph, pw) is an element in the bottom_data + int pw = index % width; + int ph = (index / width) % height; + int c = (index / width / height) % channels; + int n = index / width / height / channels; + + int mask_channels = kernel_size * kernel_size * group_size; + int mask_group = c / (channels / group_size); + + int down_pw = pw / scale_factor; + int down_ph = ph / scale_factor; + int down_width = width / scale_factor; + int down_height = height / scale_factor; + int start_w = down_pw - (kernel_size - 1) / 2; + int end_w = down_pw + (kernel_size - 1) / 2 + 1; + int start_h = down_ph - (kernel_size - 1) / 2; + int end_h = down_ph + (kernel_size - 1) / 2 + 1; + + scalar_t output_val = 0; + for (int iy = start_h; iy < end_h; iy++) { + for (int ix = start_w; ix < end_w; ix++) { + if (iy < 0 || iy > down_height - 1 || ix < 0 || ix > down_width - 1) { + continue; + } + int mask_iy = iy - down_ph + (kernel_size - 1) / 2; + int mask_ix = ix - down_pw + (kernel_size - 1) / 2; + int mask_c = + (mask_group * kernel_size + mask_iy) * kernel_size + mask_ix; + int feat_index = + Loc2Index(n, c, iy, ix, channels, down_height, down_width); + int mask_index = + Loc2Index(n, mask_c, ph, pw, mask_channels, height, width); + output_val += bottom_data[feat_index] * bottom_masks[mask_index]; + } + } + top_data[index] = output_val; + } +} + +template +__global__ void carafe_naive_backward_cuda_kernel( + const int nthreads, const scalar_t *top_diff, const scalar_t *bottom_data, + const scalar_t *bottom_masks, scalar_t *bottom_diff, scalar_t *mask_diff, + const int kernel_size, const int group_size, const int scale_factor, + const int channels, const int height, const int width) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + // (n, c, ph, pw) is an element in the bottom_data + int pw = index % width; + int ph = (index / width) % height; + int c = (index / width / height) % channels; + int n = index / width / height / channels; + + int mask_channels = kernel_size * kernel_size * group_size; + int mask_group = c / (channels / group_size); + + int down_pw = pw / scale_factor; + int down_ph = ph / scale_factor; + int down_width = width / scale_factor; + int down_height = height / scale_factor; + int start_w = down_pw - (kernel_size - 1) / 2; + int end_w = down_pw + (kernel_size - 1) / 2 + 1; + int start_h = down_ph - (kernel_size - 1) / 2; + int end_h = down_ph + (kernel_size - 1) / 2 + 1; + + for (int iy = start_h; iy < end_h; iy++) { + for (int ix = start_w; ix < end_w; ix++) { + if (iy < 0 || iy > down_height - 1 || ix < 0 || ix > down_width - 1) { + continue; + } + int mask_iy = iy - down_ph + (kernel_size - 1) / 2; + int mask_ix = ix - down_pw + (kernel_size - 1) / 2; + int mask_c = + (mask_group * kernel_size + mask_iy) * kernel_size + mask_ix; + int feat_index = + Loc2Index(n, c, iy, ix, channels, down_height, down_width); + int mask_index = + Loc2Index(n, mask_c, ph, pw, mask_channels, height, width); + atomicAdd(bottom_diff + feat_index, + bottom_masks[mask_index] * top_diff[index]); + atomicAdd(mask_diff + mask_index, + bottom_data[feat_index] * top_diff[index]); + } + } + } +} + +#endif // CARAFE_NAIVE_CUDA_KERNEL_CUH diff --git a/mmcv/ops/csrc/common/cuda/common_cuda_helper.hpp b/mmcv/ops/csrc/common/cuda/common_cuda_helper.hpp new file mode 100644 index 0000000..dc5df17 --- /dev/null +++ b/mmcv/ops/csrc/common/cuda/common_cuda_helper.hpp @@ -0,0 +1,112 @@ +#ifndef COMMON_CUDA_HELPER +#define COMMON_CUDA_HELPER + +#include + +#define CUDA_1D_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ + i += blockDim.x * gridDim.x) + +#define THREADS_PER_BLOCK 512 + +#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) + +inline int GET_BLOCKS(const int N) { + int optimal_block_num = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; + int max_block_num = 4096; + return min(optimal_block_num, max_block_num); +} + +template +__device__ T bilinear_interpolate(const T* input, const int height, + const int width, T y, T x, + const int index /* index for debug only*/) { + // deal with cases that inverse elements are out of feature map boundary + if (y < -1.0 || y > height || x < -1.0 || x > width) return 0; + + if (y <= 0) y = 0; + if (x <= 0) x = 0; + + int y_low = (int)y; + int x_low = (int)x; + int y_high; + int x_high; + + if (y_low >= height - 1) { + y_high = y_low = height - 1; + y = (T)y_low; + } else { + y_high = y_low + 1; + } + + if (x_low >= width - 1) { + x_high = x_low = width - 1; + x = (T)x_low; + } else { + x_high = x_low + 1; + } + + T ly = y - y_low; + T lx = x - x_low; + T hy = 1. - ly, hx = 1. - lx; + // do bilinear interpolation + T v1 = input[y_low * width + x_low]; + T v2 = input[y_low * width + x_high]; + T v3 = input[y_high * width + x_low]; + T v4 = input[y_high * width + x_high]; + T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; + + T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + + return val; +} + +template +__device__ void bilinear_interpolate_gradient( + const int height, const int width, T y, T x, T& w1, T& w2, T& w3, T& w4, + int& x_low, int& x_high, int& y_low, int& y_high, + const int index /* index for debug only*/) { + // deal with cases that inverse elements are out of feature map boundary + if (y < -1.0 || y > height || x < -1.0 || x > width) { + // empty + w1 = w2 = w3 = w4 = 0.; + x_low = x_high = y_low = y_high = -1; + return; + } + + if (y <= 0) y = 0; + if (x <= 0) x = 0; + + y_low = (int)y; + x_low = (int)x; + + if (y_low >= height - 1) { + y_high = y_low = height - 1; + y = (T)y_low; + } else { + y_high = y_low + 1; + } + + if (x_low >= width - 1) { + x_high = x_low = width - 1; + x = (T)x_low; + } else { + x_high = x_low + 1; + } + + T ly = y - y_low; + T lx = x - x_low; + T hy = 1. - ly, hx = 1. - lx; + + // reference in forward + // T v1 = input[y_low * width + x_low]; + // T v2 = input[y_low * width + x_high]; + // T v3 = input[y_high * width + x_low]; + // T v4 = input[y_high * width + x_high]; + // T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + + w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; + + return; +} +#endif // COMMON_CUDA_HELPER diff --git a/mmcv/ops/csrc/common/cuda/correlation_cuda.cuh b/mmcv/ops/csrc/common/cuda/correlation_cuda.cuh new file mode 100644 index 0000000..0ef3fae --- /dev/null +++ b/mmcv/ops/csrc/common/cuda/correlation_cuda.cuh @@ -0,0 +1,227 @@ +// Copyright (c) OpenMMLab. All rights reserved. +// Modified from +// https://github.com/ClementPinard/Pytorch-Correlation-extension/blob/master/Correlation_Module/correlation_cuda_kernel.cu +// Original licence: Under MIT License + +#ifndef CORRELATION_CUDA +#define CORRELATION_CUDA + +#include "pytorch_cuda_helper.hpp" + +#include +#include +// Using is recommended in the official documentation in +// https://pytorch.org/tutorials/advanced/cpp_extension.html#writing-the-c-op. +// However, we use for compatibility with CUDA 9.0 +// Read https://github.com/pytorch/extension-cpp/issues/35 for more details. +#include + +#include +#include + +using namespace torch; + +#define TensorAcc4R PackedTensorAccessor32 +#define TensorAcc5R PackedTensorAccessor32 +#define WITHIN_BOUNDS(x, y, H, W) (x >= 0 && x < H && y >= 0 && y < W) + +#define THREADS_FORWARD 32 +#define THREADS_BACKWARD 16 + +template +__global__ void correlation_forward_cuda_kernel( + const TensorAcc4R rInput1, const TensorAcc4R rInput2, TensorAcc5R output, + int kH, int kW, int patchH, int patchW, int padH, int padW, int dilationH, + int dilationW, int dilation_patchH, int dilation_patchW, int dH, int dW) { + const int iH = rInput1.size(1); + const int iW = rInput1.size(2); + const int C = rInput1.size(3); + + const int n = blockIdx.x; + const int h = blockIdx.y; + const int w = blockIdx.z; + const int thread = threadIdx.x; + + const int start_i = -padH + h * dH; + const int start_j = -padW + w * dW; + + const int patchRadH = dilation_patchH * (patchH - 1) / 2; + const int patchRadW = dilation_patchW * (patchW - 1) / 2; + + __shared__ scalar_t prod_sum[THREADS_FORWARD]; + + for (int ph = 0; ph < patchH; ++ph) { + int ph_dilated = ph * dilation_patchH - patchRadH; + for (int pw = 0; pw < patchW; ++pw) { + int pw_dilated = pw * dilation_patchW - patchRadW; + prod_sum[thread] = 0; + for (int i = 0; i < kH; ++i) { + int i1 = start_i + i * dilationH; + int i2 = i1 + ph_dilated; + if + WITHIN_BOUNDS(i1, i2, iH, iH) { + for (int j = 0; j < kW; ++j) { + int j1 = start_j + j * dilationW; + int j2 = j1 + pw_dilated; + if + WITHIN_BOUNDS(j1, j2, iW, iW) { + for (int c = thread; c < C; c += THREADS_FORWARD) { + scalar_t v1 = rInput1[n][i1][j1][c]; + scalar_t v2 = rInput2[n][i2][j2][c]; + prod_sum[thread] += v1 * v2; + } + } + } + } + } + // accumulate + __syncthreads(); + if (thread == 0) { + scalar_t reduce_sum = 0; + for (int index = 0; index < THREADS_FORWARD; ++index) { + reduce_sum += prod_sum[index]; + } + output[n][ph][pw][h][w] = reduce_sum; + } + } + } +} + +template +__global__ void correlation_backward_cuda_kernel_input1( + const TensorAcc5R grad_output, const TensorAcc4R input2, + TensorAcc4R grad_input1, const int kH, const int kW, const int patchH, + const int patchW, const int padH, const int padW, const int dilationH, + const int dilationW, const int dilation_patchH, const int dilation_patchW, + const int dH, const int dW, const int batch) { + const int iH = input2.size(2); + const int iW = input2.size(3); + + const int H = grad_output.size(3); + const int W = grad_output.size(4); + + const int patchRadH = (patchH - 1) / 2; + const int patchRadW = (patchW - 1) / 2; + + const int n = batch; + const int c = blockIdx.x; + const int h = blockIdx.y; + const int w = blockIdx.z; + const int ph_off = threadIdx.x; + const int pw_off = threadIdx.y; + + const int h_2 = h + padH; + const int w_2 = w + padW; + const int min_h = h_2 - kH * dilationH; + const int min_w = w_2 - kW * dilationW; + + __shared__ scalar_t prod_sum[THREADS_BACKWARD][THREADS_BACKWARD]; + prod_sum[ph_off][pw_off] = 0; + + for (int ph = ph_off; ph < patchH; ph += THREADS_BACKWARD) { + int i1 = h + dilation_patchH * (ph - patchRadH); + for (int pw = pw_off; pw < patchW; pw += THREADS_BACKWARD) { + int j1 = w + dilation_patchW * (pw - patchRadW); + if (WITHIN_BOUNDS(i1, j1, iH, iW)) { + scalar_t val = input2[n][c][i1][j1]; + for (int h_3 = h_2; h_3 > min_h; h_3 -= dilationH) { + int i2 = (h_3) / dH; + if (i2 * dH != h_3) continue; + for (int w_3 = w_2; w_3 > min_w; w_3 -= dilationW) { + int j2 = (w_3) / dW; + if (j2 * dW != w_3) continue; + if + WITHIN_BOUNDS(i2, j2, H, W) { + prod_sum[ph_off][pw_off] += + grad_output[n][ph][pw][i2][j2] * val; + } + } + } + } + } + } + + __syncthreads(); + + if (ph_off == 0 && pw_off == 0) { + scalar_t reduce_sum = 0; + for (int ph = 0; ph < THREADS_BACKWARD; ++ph) { + for (int pw = 0; pw < THREADS_BACKWARD; ++pw) { + reduce_sum += prod_sum[ph][pw]; + } + } + grad_input1[n][c][h][w] = reduce_sum; + } +} + +template +__global__ void correlation_backward_cuda_kernel_input2( + const TensorAcc5R grad_output, const TensorAcc4R input1, + TensorAcc4R grad_input2, int kH, int kW, int patchH, int patchW, int padH, + int padW, int dilationH, int dilationW, int dilation_patchH, + int dilation_patchW, int dH, int dW, int batch) { + const int iH = input1.size(2); + const int iW = input1.size(3); + + const int patchRadH = (patchH - 1) / 2; + const int patchRadW = (patchW - 1) / 2; + + const int H = grad_output.size(3); + const int W = grad_output.size(4); + + const int dilatedKH = kH * dilationH; + const int dilatedKW = kW * dilationW; + + const int n = batch; + const int c = blockIdx.x; + const int h = blockIdx.y; + const int w = blockIdx.z; + const int ph_off = threadIdx.x; + const int pw_off = threadIdx.y; + + __shared__ scalar_t prod_sum[THREADS_BACKWARD][THREADS_BACKWARD]; + prod_sum[ph_off][pw_off] = 0; + + for (int ph = ph_off; ph < patchH; ph += THREADS_BACKWARD) { + int i1 = h - dilation_patchH * (ph - patchRadH); + for (int pw = pw_off; pw < patchW; pw += THREADS_BACKWARD) { + int j1 = w - dilation_patchW * (pw - patchRadW); + if + WITHIN_BOUNDS(i1, j1, iH, iW) { + scalar_t val = input1[n][c][i1][j1]; + + const int h_2 = i1 + padH; + const int w_2 = j1 + padW; + const int min_h = h_2 - dilatedKH; + const int min_w = w_2 - dilatedKW; + + for (int h_3 = h_2; h_3 > min_h; h_3 -= dilationH) { + int i2 = (h_3) / dH; + if (i2 * dH != h_3) continue; + for (int w_3 = w_2; w_3 > min_w; w_3 -= dilationW) { + int j2 = (w_3) / dW; + if (j2 * dW != w_3) continue; + if + WITHIN_BOUNDS(i2, j2, H, W) { + prod_sum[ph_off][pw_off] += + grad_output[n][ph][pw][i2][j2] * val; + } + } + } + } + } + } + + __syncthreads(); + + if (ph_off == 0 && pw_off == 0) { + scalar_t reduce_sum = 0; + for (int ph = 0; ph < THREADS_BACKWARD; ++ph) { + for (int pw = 0; pw < THREADS_BACKWARD; ++pw) { + reduce_sum += prod_sum[ph][pw]; + } + } + grad_input2[n][c][h][w] = reduce_sum; + } +} +#endif diff --git a/mmcv/ops/csrc/common/cuda/deform_conv_cuda_kernel.cuh b/mmcv/ops/csrc/common/cuda/deform_conv_cuda_kernel.cuh new file mode 100644 index 0000000..98e1e7a --- /dev/null +++ b/mmcv/ops/csrc/common/cuda/deform_conv_cuda_kernel.cuh @@ -0,0 +1,363 @@ +/*! + ******************* BEGIN Caffe Copyright Notice and Disclaimer + ***************** + * + * COPYRIGHT + * + * All contributions by the University of California: + * Copyright (c) 2014-2017 The Regents of the University of California (Regents) + * All rights reserved. + * + * All other contributions: + * Copyright (c) 2014-2017, the respective contributors + * All rights reserved. + * + * Caffe uses a shared copyright model: each contributor holds copyright over + * their contributions to Caffe. The project versioning records all such + * contribution and copyright details. If a contributor wants to further mark + * their specific copyright on a particular contribution, they should indicate + * their copyright solely in the commit message of the change when it is + * committed. + * + * LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + *this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + *AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + *IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE + *FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + *DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + *SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + *CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + *OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + *OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * CONTRIBUTION AGREEMENT + * + * By contributing to the BVLC/caffe repository through pull-request, comment, + * or otherwise, the contributor releases their content to the + * license and copyright terms herein. + * + ***************** END Caffe Copyright Notice and Disclaimer + ********************* + * + * Copyright (c) 2018 Microsoft + * Licensed under The MIT License [see LICENSE for details] + * \file modulated_deformable_im2col.cuh + * \brief Function definitions of converting an image to + * column matrix based on kernel, padding, dilation, and offset. + * These functions are mainly used in deformable convolution operators. + * \ref: https://arxiv.org/abs/1703.06211 + * \author Yuwen Xiong, Haozhi Qi, Jifeng Dai, Xizhou Zhu, Han Hu, Dazhi Cheng + */ + +// modified from +// https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu + +#ifndef DEFORM_CONV_CUDA_KERNEL_CUH +#define DEFORM_CONV_CUDA_KERNEL_CUH + +#include +#ifdef MMCV_WITH_TRT +#include "common_cuda_helper.hpp" +#else // MMCV_WITH_TRT +#include "pytorch_cuda_helper.hpp" +#endif // MMCV_WITH_TRT + +template +__device__ T deformable_im2col_bilinear(const T *input, const int data_width, + const int height, const int width, T h, + T w) { + if (h <= -1 || height <= h || w <= -1 || width <= w) { + return 0; + } + + int h_low = floorf(h); + int w_low = floorf(w); + int h_high = h_low + 1; + int w_high = w_low + 1; + + T lh = h - h_low; + T lw = w - w_low; + T hh = 1 - lh, hw = 1 - lw; + + T v1 = 0; + if (h_low >= 0 && w_low >= 0) v1 = input[h_low * data_width + w_low]; + T v2 = 0; + if (h_low >= 0 && w_high <= width - 1) + v2 = input[h_low * data_width + w_high]; + T v3 = 0; + if (h_high <= height - 1 && w_low >= 0) + v3 = input[h_high * data_width + w_low]; + T v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) + v4 = input[h_high * data_width + w_high]; + + T w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + + T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + return val; +} + +template +__device__ T get_gradient_weight(T argmax_h, T argmax_w, const int h, + const int w, const int height, + const int width) { + if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || + argmax_w >= width) { + // empty + return 0; + } + + int argmax_h_low = floorf(argmax_h); + int argmax_w_low = floorf(argmax_w); + int argmax_h_high = argmax_h_low + 1; + int argmax_w_high = argmax_w_low + 1; + + T weight = 0; + if (h == argmax_h_low && w == argmax_w_low) + weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); + if (h == argmax_h_low && w == argmax_w_high) + weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); + if (h == argmax_h_high && w == argmax_w_low) + weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); + if (h == argmax_h_high && w == argmax_w_high) + weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); + return weight; +} + +template +__device__ T get_coordinate_weight(T argmax_h, T argmax_w, const int height, + const int width, const T *im_data, + const int data_width, const int bp_dir) { + if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || + argmax_w >= width) { + // empty + return 0; + } + + int argmax_h_low = floorf(argmax_h); + int argmax_w_low = floorf(argmax_w); + int argmax_h_high = argmax_h_low + 1; + int argmax_w_high = argmax_w_low + 1; + + T weight = 0; + + if (bp_dir == 0) { + if (argmax_h_low >= 0 && argmax_w_low >= 0) + weight += -1 * (argmax_w_low + 1 - argmax_w) * + im_data[argmax_h_low * data_width + argmax_w_low]; + if (argmax_h_low >= 0 && argmax_w_high <= width - 1) + weight += -1 * (argmax_w - argmax_w_low) * + im_data[argmax_h_low * data_width + argmax_w_high]; + if (argmax_h_high <= height - 1 && argmax_w_low >= 0) + weight += (argmax_w_low + 1 - argmax_w) * + im_data[argmax_h_high * data_width + argmax_w_low]; + if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) + weight += (argmax_w - argmax_w_low) * + im_data[argmax_h_high * data_width + argmax_w_high]; + } else if (bp_dir == 1) { + if (argmax_h_low >= 0 && argmax_w_low >= 0) + weight += -1 * (argmax_h_low + 1 - argmax_h) * + im_data[argmax_h_low * data_width + argmax_w_low]; + if (argmax_h_low >= 0 && argmax_w_high <= width - 1) + weight += (argmax_h_low + 1 - argmax_h) * + im_data[argmax_h_low * data_width + argmax_w_high]; + if (argmax_h_high <= height - 1 && argmax_w_low >= 0) + weight += -1 * (argmax_h - argmax_h_low) * + im_data[argmax_h_high * data_width + argmax_w_low]; + if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) + weight += (argmax_h - argmax_h_low) * + im_data[argmax_h_high * data_width + argmax_w_high]; + } + + return weight; +} + +template +__global__ void deformable_im2col_gpu_kernel( + const int n, const T *data_im, const T *data_offset, const int height, + const int width, const int kernel_h, const int kernel_w, const int pad_h, + const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int channel_per_deformable_group, const int batch_size, + const int num_channels, const int deformable_group, const int height_col, + const int width_col, T *data_col) { + CUDA_1D_KERNEL_LOOP(index, n) { + // index index of output matrix + const int w_col = index % width_col; + const int h_col = (index / width_col) % height_col; + const int b_col = (index / width_col / height_col) % batch_size; + const int c_im = (index / width_col / height_col) / batch_size; + const int c_col = c_im * kernel_h * kernel_w; + + // compute deformable group index + const int deformable_group_index = c_im / channel_per_deformable_group; + + const int h_in = h_col * stride_h - pad_h; + const int w_in = w_col * stride_w - pad_w; + T *data_col_ptr = + data_col + + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; + const T *data_im_ptr = + data_im + (b_col * num_channels + c_im) * height * width; + const T *data_offset_ptr = + data_offset + (b_col * deformable_group + deformable_group_index) * 2 * + kernel_h * kernel_w * height_col * width_col; + + for (int i = 0; i < kernel_h; ++i) { + for (int j = 0; j < kernel_w; ++j) { + const int data_offset_h_ptr = + ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; + const int data_offset_w_ptr = + ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + + w_col; + const T offset_h = data_offset_ptr[data_offset_h_ptr]; + const T offset_w = data_offset_ptr[data_offset_w_ptr]; + T val = static_cast(0); + const T h_im = h_in + i * dilation_h + offset_h; + const T w_im = w_in + j * dilation_w + offset_w; + if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) + val = deformable_im2col_bilinear(data_im_ptr, width, height, width, + h_im, w_im); + *data_col_ptr = val; + data_col_ptr += batch_size * height_col * width_col; + } + } + } +} + +template +__global__ void deformable_col2im_gpu_kernel( + const int n, const T *data_col, const T *data_offset, const int channels, + const int height, const int width, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int channel_per_deformable_group, const int batch_size, + const int deformable_group, const int height_col, const int width_col, + T *grad_im) { + CUDA_1D_KERNEL_LOOP(index, n) { + const int j = (index / width_col / height_col / batch_size) % kernel_w; + const int i = + (index / width_col / height_col / batch_size / kernel_w) % kernel_h; + const int c = + index / width_col / height_col / batch_size / kernel_w / kernel_h; + // compute the start and end of the output + + const int deformable_group_index = c / channel_per_deformable_group; + + int w_out = index % width_col; + int h_out = (index / width_col) % height_col; + int b = (index / width_col / height_col) % batch_size; + int w_in = w_out * stride_w - pad_w; + int h_in = h_out * stride_h - pad_h; + + const T *data_offset_ptr = + data_offset + (b * deformable_group + deformable_group_index) * 2 * + kernel_h * kernel_w * height_col * width_col; + const int data_offset_h_ptr = + ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; + const int data_offset_w_ptr = + ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; + const T offset_h = data_offset_ptr[data_offset_h_ptr]; + const T offset_w = data_offset_ptr[data_offset_w_ptr]; + const T cur_inv_h_data = h_in + i * dilation_h + offset_h; + const T cur_inv_w_data = w_in + j * dilation_w + offset_w; + + const T cur_top_grad = data_col[index]; + const int cur_h = (int)cur_inv_h_data; + const int cur_w = (int)cur_inv_w_data; + for (int dy = -2; dy <= 2; dy++) { + for (int dx = -2; dx <= 2; dx++) { + if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && + cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && + abs(cur_inv_w_data - (cur_w + dx)) < 1) { + int cur_bottom_grad_pos = + ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; + T weight = get_gradient_weight(cur_inv_h_data, cur_inv_w_data, + cur_h + dy, cur_w + dx, height, width); + atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); + } + } + } + } +} + +template +__global__ void deformable_col2im_coord_gpu_kernel( + const int n, const T *data_col, const T *data_im, const T *data_offset, + const int channels, const int height, const int width, const int kernel_h, + const int kernel_w, const int pad_h, const int pad_w, const int stride_h, + const int stride_w, const int dilation_h, const int dilation_w, + const int channel_per_deformable_group, const int batch_size, + const int offset_channels, const int deformable_group, const int height_col, + const int width_col, T *grad_offset) { + CUDA_1D_KERNEL_LOOP(index, n) { + T val = 0; + int w = index % width_col; + int h = (index / width_col) % height_col; + int c = (index / width_col / height_col) % offset_channels; + int b = (index / width_col / height_col) / offset_channels; + // compute the start and end of the output + + const int deformable_group_index = c / (2 * kernel_h * kernel_w); + const int col_step = kernel_h * kernel_w; + int cnt = 0; + const T *data_col_ptr = data_col + deformable_group_index * + channel_per_deformable_group * + batch_size * width_col * height_col; + const T *data_im_ptr = + data_im + (b * deformable_group + deformable_group_index) * + channel_per_deformable_group / kernel_h / kernel_w * + height * width; + const T *data_offset_ptr = + data_offset + (b * deformable_group + deformable_group_index) * 2 * + kernel_h * kernel_w * height_col * width_col; + + const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; + + for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; + col_c += col_step) { + const int col_pos = + (((col_c * batch_size + b) * height_col) + h) * width_col + w; + const int bp_dir = offset_c % 2; + + int j = (col_pos / width_col / height_col / batch_size) % kernel_w; + int i = + (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; + int w_out = col_pos % width_col; + int h_out = (col_pos / width_col) % height_col; + int w_in = w_out * stride_w - pad_w; + int h_in = h_out * stride_h - pad_h; + const int data_offset_h_ptr = + (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); + const int data_offset_w_ptr = + (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + + w_out); + const T offset_h = data_offset_ptr[data_offset_h_ptr]; + const T offset_w = data_offset_ptr[data_offset_w_ptr]; + T inv_h = h_in + i * dilation_h + offset_h; + T inv_w = w_in + j * dilation_w + offset_w; + if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) + inv_h = inv_w = -2; + const T weight = get_coordinate_weight(inv_h, inv_w, height, width, + data_im_ptr + cnt * height * width, + width, bp_dir); + val += weight * data_col_ptr[col_pos]; + cnt += 1; + } + + grad_offset[index] = val; + } +} + +#endif // DEFORM_CONV_CUDA_KERNEL_CUH diff --git a/mmcv/ops/csrc/common/cuda/deform_roi_pool_cuda_kernel.cuh b/mmcv/ops/csrc/common/cuda/deform_roi_pool_cuda_kernel.cuh new file mode 100644 index 0000000..ac95b35 --- /dev/null +++ b/mmcv/ops/csrc/common/cuda/deform_roi_pool_cuda_kernel.cuh @@ -0,0 +1,182 @@ +// Copyright (c) OpenMMLab. All rights reserved +#ifndef DEFORM_ROI_POOL_CUDA_KERNEL_CUH +#define DEFORM_ROI_POOL_CUDA_KERNEL_CUH + +#include "pytorch_cuda_helper.hpp" + +template +__global__ void deform_roi_pool_forward_cuda_kernel( + const int nthreads, const T* input, const T* rois, const T* offset, + T* output, const int pooled_height, const int pooled_width, + const T spatial_scale, const int sampling_ratio, const T gamma, + const int channels, const int height, const int width) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + // (n, c, ph, pw) is an element in the pooled output + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + + const T* offset_rois = rois + n * 5; + int roi_batch_ind = offset_rois[0]; + + // Do not using rounding; this implementation detail is critical + T roi_start_w = offset_rois[1] * spatial_scale - 0.5; + T roi_start_h = offset_rois[2] * spatial_scale - 0.5; + T roi_end_w = offset_rois[3] * spatial_scale - 0.5; + T roi_end_h = offset_rois[4] * spatial_scale - 0.5; + + T roi_width = roi_end_w - roi_start_w; + T roi_height = roi_end_h - roi_start_h; + + T bin_size_h = static_cast(roi_height) / static_cast(pooled_height); + T bin_size_w = static_cast(roi_width) / static_cast(pooled_width); + + const T* offset_input = + input + (roi_batch_ind * channels + c) * height * width; + + // We use roi_bin_grid to sample the grid and mimic integral + int roi_bin_grid_h = + (sampling_ratio > 0) + ? sampling_ratio + : static_cast(ceilf(roi_height / pooled_height)); + int roi_bin_grid_w = + (sampling_ratio > 0) + ? sampling_ratio + : static_cast(ceilf(roi_width / pooled_width)); + + // Compute roi offset + if (offset != NULL) { + const T* offset_cur_w = offset + n * pooled_width * pooled_height * 2 + + ph * pooled_width + pw; + T offset_roi_w = gamma * roi_width * offset_cur_w[0]; + T offset_roi_h = + gamma * roi_height * offset_cur_w[pooled_width * pooled_height]; + roi_start_w += offset_roi_w; + roi_start_h += offset_roi_h; + } + + // We do average pooling inside a bin + const T count = max(roi_bin_grid_h * roi_bin_grid_w, 1); + T output_val = 0.; + for (int iy = 0; iy < roi_bin_grid_h; iy++) { + const T y = roi_start_h + ph * bin_size_h + + static_cast(iy + .5f) * bin_size_h / + static_cast(roi_bin_grid_h); + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + const T x = roi_start_w + pw * bin_size_w + + static_cast(ix + .5f) * bin_size_w / + static_cast(roi_bin_grid_w); + T val = bilinear_interpolate(offset_input, height, width, y, x, index); + output_val += val; + } + } + output[index] = output_val / count; + } +} + +template +__global__ void deform_roi_pool_backward_cuda_kernel( + const int nthreads, const T* grad_output, const T* input, const T* rois, + const T* offset, T* grad_input, T* grad_offset, const int pooled_height, + const int pooled_width, const T spatial_scale, const int sampling_ratio, + const T gamma, const int channels, const int height, const int width) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + // (n, c, ph, pw) is an element in the pooled output + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + + const T* offset_rois = rois + n * 5; + int roi_batch_ind = offset_rois[0]; + const T* offset_input = + input + ((roi_batch_ind * channels + c) * height * width); + T* offset_grad_input = + grad_input + ((roi_batch_ind * channels + c) * height * width); + + // Do not using rounding; this implementation detail is critical + T roi_start_w = offset_rois[1] * spatial_scale - 0.5; + T roi_start_h = offset_rois[2] * spatial_scale - 0.5; + T roi_end_w = offset_rois[3] * spatial_scale - 0.5; + T roi_end_h = offset_rois[4] * spatial_scale - 0.5; + + T roi_width = roi_end_w - roi_start_w; + T roi_height = roi_end_h - roi_start_h; + + T bin_size_h = static_cast(roi_height) / static_cast(pooled_height); + T bin_size_w = static_cast(roi_width) / static_cast(pooled_width); + + // We use roi_bin_grid to sample the grid and mimic integral + int roi_bin_grid_h = + (sampling_ratio > 0) + ? sampling_ratio + : static_cast(ceilf(roi_height / pooled_height)); + int roi_bin_grid_w = + (sampling_ratio > 0) + ? sampling_ratio + : static_cast(ceilf(roi_width / pooled_width)); + + // Compute roi offset + if (offset != NULL) { + const T* offset_cur_w = offset + n * pooled_width * pooled_height * 2 + + ph * pooled_width + pw; + T offset_roi_w = gamma * roi_width * offset_cur_w[0]; + T offset_roi_h = + gamma * roi_height * offset_cur_w[pooled_width * pooled_height]; + roi_start_w += offset_roi_w; + roi_start_h += offset_roi_h; + } + + // We do average (integral) pooling inside a bin + const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 + const T grad_output_this_bin = grad_output[index] / count; + + for (int iy = 0; iy < roi_bin_grid_h; iy++) { + const T y = roi_start_h + ph * bin_size_h + + static_cast(iy + .5f) * bin_size_h / + static_cast(roi_bin_grid_h); + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + const T x = roi_start_w + pw * bin_size_w + + static_cast(ix + .5f) * bin_size_w / + static_cast(roi_bin_grid_w); + + T w1, w2, w3, w4; + int x_low, x_high, y_low, y_high; + bilinear_interpolate_gradient(height, width, y, x, w1, w2, w3, w4, + x_low, x_high, y_low, y_high, index); + + if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { + atomicAdd(offset_grad_input + y_low * width + x_low, + grad_output_this_bin * w1); + atomicAdd(offset_grad_input + y_low * width + x_high, + grad_output_this_bin * w2); + atomicAdd(offset_grad_input + y_high * width + x_low, + grad_output_this_bin * w3); + atomicAdd(offset_grad_input + y_high * width + x_high, + grad_output_this_bin * w4); + if (offset != NULL) { + T input_00 = offset_input[y_low * width + x_low]; + T input_10 = offset_input[y_low * width + x_high]; + T input_01 = offset_input[y_high * width + x_low]; + T input_11 = offset_input[y_high * width + x_high]; + T ogx = gamma * roi_width * grad_output_this_bin * + (input_11 * (y - y_low) + input_10 * (y_high - y) + + input_01 * (y_low - y) + input_00 * (y - y_high)); + T ogy = gamma * roi_height * grad_output_this_bin * + (input_11 * (x - x_low) + input_01 * (x_high - x) + + input_10 * (x_low - x) + input_00 * (x - x_high)); + atomicAdd(grad_offset + n * pooled_width * pooled_height * 2 + + ph * pooled_width + pw, + ogx); + atomicAdd(grad_offset + n * pooled_width * pooled_height * 2 + + pooled_width * pooled_height + ph * pooled_width + pw, + ogy); + } + } + } + } + } +} + +#endif // DEFORM_ROI_POOL_CUDA_KERNEL_CUH diff --git a/mmcv/ops/csrc/common/cuda/furthest_point_sample_cuda_kernel.cuh b/mmcv/ops/csrc/common/cuda/furthest_point_sample_cuda_kernel.cuh new file mode 100644 index 0000000..c23278a --- /dev/null +++ b/mmcv/ops/csrc/common/cuda/furthest_point_sample_cuda_kernel.cuh @@ -0,0 +1,148 @@ +// Copyright (c) OpenMMLab. All rights reserved +#ifndef FURTHEST_POINT_SAMPLE_CUDA_KERNEL_CUH +#define FURTHEST_POINT_SAMPLE_CUDA_KERNEL_CUH + +#include "pytorch_cuda_helper.hpp" + +__device__ void __update(float *__restrict__ dists, int *__restrict__ dists_i, + int idx1, int idx2) { + const float v1 = dists[idx1], v2 = dists[idx2]; + const int i1 = dists_i[idx1], i2 = dists_i[idx2]; + dists[idx1] = max(v1, v2); + dists_i[idx1] = v2 > v1 ? i2 : i1; +} + +template +__global__ void furthest_point_sampling_forward_cuda_kernel( + int b, int n, int m, const float *__restrict__ dataset, + float *__restrict__ temp, int *__restrict__ idxs) { + // dataset: (B, N, 3) + // tmp: (B, N) + // output: + // idx: (B, M) + + if (m <= 0) return; + __shared__ float dists[block_size]; + __shared__ int dists_i[block_size]; + + int batch_index = blockIdx.x; + dataset += batch_index * n * 3; + temp += batch_index * n; + idxs += batch_index * m; + + int tid = threadIdx.x; + const int stride = block_size; + + int old = 0; + if (threadIdx.x == 0) idxs[0] = old; + + __syncthreads(); + for (int j = 1; j < m; j++) { + int besti = 0; + float best = -1; + float x1 = dataset[old * 3 + 0]; + float y1 = dataset[old * 3 + 1]; + float z1 = dataset[old * 3 + 2]; + for (int k = tid; k < n; k += stride) { + float x2, y2, z2; + x2 = dataset[k * 3 + 0]; + y2 = dataset[k * 3 + 1]; + z2 = dataset[k * 3 + 2]; + // float mag = (x2 * x2) + (y2 * y2) + (z2 * z2); + // if (mag <= 1e-3) + // continue; + + float d = + (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1); + float d2 = min(d, temp[k]); + temp[k] = d2; + besti = d2 > best ? k : besti; + best = d2 > best ? d2 : best; + } + dists[tid] = best; + dists_i[tid] = besti; + __syncthreads(); + +#pragma unroll + for (int block_size_thres = 1024; block_size_thres >= 2; + block_size_thres >>= 1) { + const int tid_thres = block_size_thres / 2; + if (block_size >= block_size_thres && tid < tid_thres) { + __update(dists, dists_i, tid, tid + tid_thres); + } + __syncthreads(); + } + + old = dists_i[0]; + if (tid == 0) idxs[j] = old; + } +} + +// Modified from +// https://github.com/qiqihaer/3DSSD-pytorch/blob/master/lib/pointnet2/src/sampling_gpu.cu +template +__global__ void furthest_point_sampling_with_dist_forward_cuda_kernel( + int b, int n, int m, const float *__restrict__ dataset, + float *__restrict__ temp, int *__restrict__ idxs) { + // dataset: (B, N, N) + // tmp: (B, N) + // output: + // idx: (B, M) + + if (m <= 0) return; + __shared__ float dists[block_size]; + __shared__ int dists_i[block_size]; + + int batch_index = blockIdx.x; + dataset += batch_index * n * n; + temp += batch_index * n; + idxs += batch_index * m; + + int tid = threadIdx.x; + const int stride = block_size; + + int old = 0; + if (threadIdx.x == 0) idxs[0] = old; + + __syncthreads(); + for (int j = 1; j < m; j++) { + int besti = 0; + float best = -1; + // float x1 = dataset[old * 3 + 0]; + // float y1 = dataset[old * 3 + 1]; + // float z1 = dataset[old * 3 + 2]; + for (int k = tid; k < n; k += stride) { + // float x2, y2, z2; + // x2 = dataset[k * 3 + 0]; + // y2 = dataset[k * 3 + 1]; + // z2 = dataset[k * 3 + 2]; + + // float d = (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * + // (z2 - z1); + float d = dataset[old * n + k]; + + float d2 = min(d, temp[k]); + temp[k] = d2; + besti = d2 > best ? k : besti; + best = d2 > best ? d2 : best; + } + dists[tid] = best; + dists_i[tid] = besti; + __syncthreads(); + +#pragma unroll + for (int block_size_thres = 1024; block_size_thres >= 2; + block_size_thres >>= 1) { + const int tid_thres = block_size_thres / 2; + if (block_size >= block_size_thres && tid < tid_thres) { + __update(dists, dists_i, tid, tid + tid_thres); + } + __syncthreads(); + } + + old = dists_i[0]; + if (tid == 0) idxs[j] = old; + } +} + +#endif // FURTHEST_POINT_SAMPLE_CUDA_KERNEL_CUH diff --git a/mmcv/ops/csrc/common/cuda/gather_points_cuda_kernel.cuh b/mmcv/ops/csrc/common/cuda/gather_points_cuda_kernel.cuh new file mode 100644 index 0000000..0008453 --- /dev/null +++ b/mmcv/ops/csrc/common/cuda/gather_points_cuda_kernel.cuh @@ -0,0 +1,52 @@ +// Copyright (c) OpenMMLab. All rights reserved +#ifndef GATHER_POINTS_CUDA_KERNEL_CUH +#define GATHER_POINTS_CUDA_KERNEL_CUH + +#include "pytorch_cuda_helper.hpp" + +#define TOTAL_THREADS 1024 + +template +__global__ void gather_points_forward_cuda_kernel(int b, int c, int n, int m, + const T *points, + const int *__restrict__ idx, + T *out) { + // points: (B, C, N) + // idx: (B, M) + // output: + // out: (B, C, M) + + int bs_idx = blockIdx.z; + int c_idx = blockIdx.y; + int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (bs_idx >= b || c_idx >= c || pt_idx >= m) return; + + out += bs_idx * c * m + c_idx * m + pt_idx; + idx += bs_idx * m + pt_idx; + points += bs_idx * c * n + c_idx * n; + out[0] = points[idx[0]]; +} + +template +__global__ void gather_points_backward_cuda_kernel(int b, int c, int n, int m, + const T *grad_out, + const int *__restrict__ idx, + T *grad_points) { + // grad_out: (B, C, M) + // idx: (B, M) + // output: + // grad_points: (B, C, N) + + int bs_idx = blockIdx.z; + int c_idx = blockIdx.y; + int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (bs_idx >= b || c_idx >= c || pt_idx >= m) return; + + grad_out += bs_idx * c * m + c_idx * m + pt_idx; + idx += bs_idx * m + pt_idx; + grad_points += bs_idx * c * n + c_idx * n; + + atomicAdd(grad_points + idx[0], grad_out[0]); +} + +#endif // GATHER_POINTS_CUDA_KERNEL_CUH diff --git a/mmcv/ops/csrc/common/cuda/group_points_cuda_kernel.cuh b/mmcv/ops/csrc/common/cuda/group_points_cuda_kernel.cuh new file mode 100644 index 0000000..ffbc1f9 --- /dev/null +++ b/mmcv/ops/csrc/common/cuda/group_points_cuda_kernel.cuh @@ -0,0 +1,59 @@ +// Copyright (c) OpenMMLab. All rights reserved. +// Modified from +// https://github.com/sshaoshuai/Pointnet2.PyTorch/tree/master/pointnet2/src/group_points_gpu.cu +#ifndef GROUP_POINTS_CUDA_KERNEL_CUH +#define GROUP_POINTS_CUDA_KERNEL_CUH + +#include "pytorch_cuda_helper.hpp" + +template +__global__ void group_points_forward_cuda_kernel(int b, int c, int n, + int npoints, int nsample, + const T *points, + const int *__restrict__ idx, + T *out) { + // points: (B, C, N) + // idx: (B, npoints, nsample) + // output: + // out: (B, C, npoints, nsample) + int bs_idx = blockIdx.z; + int c_idx = blockIdx.y; + int index = blockIdx.x * blockDim.x + threadIdx.x; + int pt_idx = index / nsample; + if (bs_idx >= b || c_idx >= c || pt_idx >= npoints) return; + + int sample_idx = index % nsample; + + idx += bs_idx * npoints * nsample + pt_idx * nsample + sample_idx; + int in_idx = bs_idx * c * n + c_idx * n + idx[0]; + int out_idx = bs_idx * c * npoints * nsample + c_idx * npoints * nsample + + pt_idx * nsample + sample_idx; + + out[out_idx] = points[in_idx]; +} + +template +__global__ void group_points_backward_cuda_kernel(int b, int c, int n, + int npoints, int nsample, + const T *grad_out, + const int *__restrict__ idx, + T *grad_points) { + // grad_out: (B, C, npoints, nsample) + // idx: (B, npoints, nsample) + // output: + // grad_points: (B, C, N) + int bs_idx = blockIdx.z; + int c_idx = blockIdx.y; + int index = blockIdx.x * blockDim.x + threadIdx.x; + int pt_idx = index / nsample; + if (bs_idx >= b || c_idx >= c || pt_idx >= npoints) return; + + int sample_idx = index % nsample; + grad_out += bs_idx * c * npoints * nsample + c_idx * npoints * nsample + + pt_idx * nsample + sample_idx; + idx += bs_idx * npoints * nsample + pt_idx * nsample + sample_idx; + + atomicAdd(grad_points + bs_idx * c * n + c_idx * n + idx[0], grad_out[0]); +} + +#endif // GROUP_POINTS_CUDA_KERNEL_CUH diff --git a/mmcv/ops/csrc/common/cuda/iou3d_cuda_kernel.cuh b/mmcv/ops/csrc/common/cuda/iou3d_cuda_kernel.cuh new file mode 100644 index 0000000..c85ca64 --- /dev/null +++ b/mmcv/ops/csrc/common/cuda/iou3d_cuda_kernel.cuh @@ -0,0 +1,365 @@ +// Copyright (c) OpenMMLab. All rights reserved +#ifndef IOU3D_CUDA_KERNEL_CUH +#define IOU3D_CUDA_KERNEL_CUH + +#include "pytorch_cuda_helper.hpp" + +const int THREADS_PER_BLOCK_IOU3D = 16; +const int THREADS_PER_BLOCK_NMS = sizeof(unsigned long long) * 8; +__device__ const float EPS = 1e-8; + +struct Point { + float x, y; + __device__ Point() {} + __device__ Point(double _x, double _y) { x = _x, y = _y; } + + __device__ void set(float _x, float _y) { + x = _x; + y = _y; + } + + __device__ Point operator+(const Point &b) const { + return Point(x + b.x, y + b.y); + } + + __device__ Point operator-(const Point &b) const { + return Point(x - b.x, y - b.y); + } +}; + +__device__ inline float cross(const Point &a, const Point &b) { + return a.x * b.y - a.y * b.x; +} + +__device__ inline float cross(const Point &p1, const Point &p2, + const Point &p0) { + return (p1.x - p0.x) * (p2.y - p0.y) - (p2.x - p0.x) * (p1.y - p0.y); +} + +__device__ int check_rect_cross(const Point &p1, const Point &p2, + const Point &q1, const Point &q2) { + int ret = min(p1.x, p2.x) <= max(q1.x, q2.x) && + min(q1.x, q2.x) <= max(p1.x, p2.x) && + min(p1.y, p2.y) <= max(q1.y, q2.y) && + min(q1.y, q2.y) <= max(p1.y, p2.y); + return ret; +} + +__device__ inline int check_in_box2d(const float *box, const Point &p) { + // params: box (5) [x1, y1, x2, y2, angle] + const float MARGIN = 1e-5; + + float center_x = (box[0] + box[2]) / 2; + float center_y = (box[1] + box[3]) / 2; + float angle_cos = cos(-box[4]), + angle_sin = + sin(-box[4]); // rotate the point in the opposite direction of box + float rot_x = + (p.x - center_x) * angle_cos - (p.y - center_y) * angle_sin + center_x; + float rot_y = + (p.x - center_x) * angle_sin + (p.y - center_y) * angle_cos + center_y; + + return (rot_x > box[0] - MARGIN && rot_x < box[2] + MARGIN && + rot_y > box[1] - MARGIN && rot_y < box[3] + MARGIN); +} + +__device__ inline int intersection(const Point &p1, const Point &p0, + const Point &q1, const Point &q0, + Point &ans_point) { + // fast exclusion + if (check_rect_cross(p0, p1, q0, q1) == 0) return 0; + + // check cross standing + float s1 = cross(q0, p1, p0); + float s2 = cross(p1, q1, p0); + float s3 = cross(p0, q1, q0); + float s4 = cross(q1, p1, q0); + + if (!(s1 * s2 > 0 && s3 * s4 > 0)) return 0; + + // calculate intersection of two lines + float s5 = cross(q1, p1, p0); + if (fabs(s5 - s1) > EPS) { + ans_point.x = (s5 * q0.x - s1 * q1.x) / (s5 - s1); + ans_point.y = (s5 * q0.y - s1 * q1.y) / (s5 - s1); + + } else { + float a0 = p0.y - p1.y, b0 = p1.x - p0.x, c0 = p0.x * p1.y - p1.x * p0.y; + float a1 = q0.y - q1.y, b1 = q1.x - q0.x, c1 = q0.x * q1.y - q1.x * q0.y; + float D = a0 * b1 - a1 * b0; + + ans_point.x = (b0 * c1 - b1 * c0) / D; + ans_point.y = (a1 * c0 - a0 * c1) / D; + } + + return 1; +} + +__device__ inline void rotate_around_center(const Point ¢er, + const float angle_cos, + const float angle_sin, Point &p) { + float new_x = + (p.x - center.x) * angle_cos - (p.y - center.y) * angle_sin + center.x; + float new_y = + (p.x - center.x) * angle_sin + (p.y - center.y) * angle_cos + center.y; + p.set(new_x, new_y); +} + +__device__ inline int point_cmp(const Point &a, const Point &b, + const Point ¢er) { + return atan2(a.y - center.y, a.x - center.x) > + atan2(b.y - center.y, b.x - center.x); +} + +__device__ inline float box_overlap(const float *box_a, const float *box_b) { + // params: box_a (5) [x1, y1, x2, y2, angle] + // params: box_b (5) [x1, y1, x2, y2, angle] + + float a_x1 = box_a[0], a_y1 = box_a[1], a_x2 = box_a[2], a_y2 = box_a[3], + a_angle = box_a[4]; + float b_x1 = box_b[0], b_y1 = box_b[1], b_x2 = box_b[2], b_y2 = box_b[3], + b_angle = box_b[4]; + + Point center_a((a_x1 + a_x2) / 2, (a_y1 + a_y2) / 2); + Point center_b((b_x1 + b_x2) / 2, (b_y1 + b_y2) / 2); + + Point box_a_corners[5]; + box_a_corners[0].set(a_x1, a_y1); + box_a_corners[1].set(a_x2, a_y1); + box_a_corners[2].set(a_x2, a_y2); + box_a_corners[3].set(a_x1, a_y2); + + Point box_b_corners[5]; + box_b_corners[0].set(b_x1, b_y1); + box_b_corners[1].set(b_x2, b_y1); + box_b_corners[2].set(b_x2, b_y2); + box_b_corners[3].set(b_x1, b_y2); + + // get oriented corners + float a_angle_cos = cos(a_angle), a_angle_sin = sin(a_angle); + float b_angle_cos = cos(b_angle), b_angle_sin = sin(b_angle); + + for (int k = 0; k < 4; k++) { + rotate_around_center(center_a, a_angle_cos, a_angle_sin, box_a_corners[k]); + rotate_around_center(center_b, b_angle_cos, b_angle_sin, box_b_corners[k]); + } + + box_a_corners[4] = box_a_corners[0]; + box_b_corners[4] = box_b_corners[0]; + + // get intersection of lines + Point cross_points[16]; + Point poly_center; + int cnt = 0, flag = 0; + + poly_center.set(0, 0); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + flag = intersection(box_a_corners[i + 1], box_a_corners[i], + box_b_corners[j + 1], box_b_corners[j], + cross_points[cnt]); + if (flag) { + poly_center = poly_center + cross_points[cnt]; + cnt++; + } + } + } + + // check corners + for (int k = 0; k < 4; k++) { + if (check_in_box2d(box_a, box_b_corners[k])) { + poly_center = poly_center + box_b_corners[k]; + cross_points[cnt] = box_b_corners[k]; + cnt++; + } + if (check_in_box2d(box_b, box_a_corners[k])) { + poly_center = poly_center + box_a_corners[k]; + cross_points[cnt] = box_a_corners[k]; + cnt++; + } + } + + poly_center.x /= cnt; + poly_center.y /= cnt; + + // sort the points of polygon + Point temp; + for (int j = 0; j < cnt - 1; j++) { + for (int i = 0; i < cnt - j - 1; i++) { + if (point_cmp(cross_points[i], cross_points[i + 1], poly_center)) { + temp = cross_points[i]; + cross_points[i] = cross_points[i + 1]; + cross_points[i + 1] = temp; + } + } + } + + // get the overlap areas + float area = 0; + for (int k = 0; k < cnt - 1; k++) { + area += cross(cross_points[k] - cross_points[0], + cross_points[k + 1] - cross_points[0]); + } + + return fabs(area) / 2.0; +} + +__device__ inline float iou_bev(const float *box_a, const float *box_b) { + // params: box_a (5) [x1, y1, x2, y2, angle] + // params: box_b (5) [x1, y1, x2, y2, angle] + float sa = (box_a[2] - box_a[0]) * (box_a[3] - box_a[1]); + float sb = (box_b[2] - box_b[0]) * (box_b[3] - box_b[1]); + float s_overlap = box_overlap(box_a, box_b); + return s_overlap / fmaxf(sa + sb - s_overlap, EPS); +} + +__global__ void iou3d_boxes_overlap_bev_forward_cuda_kernel( + const int num_a, const float *boxes_a, const int num_b, + const float *boxes_b, float *ans_overlap) { + const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y; + const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; + + if (a_idx >= num_a || b_idx >= num_b) { + return; + } + const float *cur_box_a = boxes_a + a_idx * 5; + const float *cur_box_b = boxes_b + b_idx * 5; + float s_overlap = box_overlap(cur_box_a, cur_box_b); + ans_overlap[a_idx * num_b + b_idx] = s_overlap; +} + +__global__ void iou3d_boxes_iou_bev_forward_cuda_kernel(const int num_a, + const float *boxes_a, + const int num_b, + const float *boxes_b, + float *ans_iou) { + const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y; + const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; + + if (a_idx >= num_a || b_idx >= num_b) { + return; + } + + const float *cur_box_a = boxes_a + a_idx * 5; + const float *cur_box_b = boxes_b + b_idx * 5; + float cur_iou_bev = iou_bev(cur_box_a, cur_box_b); + ans_iou[a_idx * num_b + b_idx] = cur_iou_bev; +} + +__global__ void nms_forward_cuda_kernel(const int boxes_num, + const float nms_overlap_thresh, + const float *boxes, + unsigned long long *mask) { + // params: boxes (N, 5) [x1, y1, x2, y2, ry] + // params: mask (N, N/THREADS_PER_BLOCK_NMS) + + const int row_start = blockIdx.y; + const int col_start = blockIdx.x; + + // if (row_start > col_start) return; + + const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS, + THREADS_PER_BLOCK_NMS); + const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS, + THREADS_PER_BLOCK_NMS); + + __shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 5]; + + if (threadIdx.x < col_size) { + block_boxes[threadIdx.x * 5 + 0] = + boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 0]; + block_boxes[threadIdx.x * 5 + 1] = + boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 1]; + block_boxes[threadIdx.x * 5 + 2] = + boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 2]; + block_boxes[threadIdx.x * 5 + 3] = + boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 3]; + block_boxes[threadIdx.x * 5 + 4] = + boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 4]; + } + __syncthreads(); + + if (threadIdx.x < row_size) { + const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x; + const float *cur_box = boxes + cur_box_idx * 5; + + int i = 0; + unsigned long long t = 0; + int start = 0; + if (row_start == col_start) { + start = threadIdx.x + 1; + } + for (i = start; i < col_size; i++) { + if (iou_bev(cur_box, block_boxes + i * 5) > nms_overlap_thresh) { + t |= 1ULL << i; + } + } + const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); + mask[cur_box_idx * col_blocks + col_start] = t; + } +} + +__device__ inline float iou_normal(float const *const a, float const *const b) { + float left = fmaxf(a[0], b[0]), right = fminf(a[2], b[2]); + float top = fmaxf(a[1], b[1]), bottom = fminf(a[3], b[3]); + float width = fmaxf(right - left, 0.f), height = fmaxf(bottom - top, 0.f); + float interS = width * height; + float Sa = (a[2] - a[0]) * (a[3] - a[1]); + float Sb = (b[2] - b[0]) * (b[3] - b[1]); + return interS / fmaxf(Sa + Sb - interS, EPS); +} + +__global__ void nms_normal_forward_cuda_kernel(const int boxes_num, + const float nms_overlap_thresh, + const float *boxes, + unsigned long long *mask) { + // params: boxes (N, 5) [x1, y1, x2, y2, ry] + // params: mask (N, N/THREADS_PER_BLOCK_NMS) + + const int row_start = blockIdx.y; + const int col_start = blockIdx.x; + + // if (row_start > col_start) return; + + const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS, + THREADS_PER_BLOCK_NMS); + const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS, + THREADS_PER_BLOCK_NMS); + + __shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 5]; + + if (threadIdx.x < col_size) { + block_boxes[threadIdx.x * 5 + 0] = + boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 0]; + block_boxes[threadIdx.x * 5 + 1] = + boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 1]; + block_boxes[threadIdx.x * 5 + 2] = + boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 2]; + block_boxes[threadIdx.x * 5 + 3] = + boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 3]; + block_boxes[threadIdx.x * 5 + 4] = + boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 4]; + } + __syncthreads(); + + if (threadIdx.x < row_size) { + const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x; + const float *cur_box = boxes + cur_box_idx * 5; + + int i = 0; + unsigned long long t = 0; + int start = 0; + if (row_start == col_start) { + start = threadIdx.x + 1; + } + for (i = start; i < col_size; i++) { + if (iou_normal(cur_box, block_boxes + i * 5) > nms_overlap_thresh) { + t |= 1ULL << i; + } + } + const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); + mask[cur_box_idx * col_blocks + col_start] = t; + } +} + +#endif // IOU3D_CUDA_KERNEL_CUH diff --git a/mmcv/ops/csrc/common/cuda/knn_cuda_kernel.cuh b/mmcv/ops/csrc/common/cuda/knn_cuda_kernel.cuh new file mode 100644 index 0000000..9a48cb0 --- /dev/null +++ b/mmcv/ops/csrc/common/cuda/knn_cuda_kernel.cuh @@ -0,0 +1,87 @@ +// Copyright (c) OpenMMLab. All rights reserved +// Modified from +// https://github.com/CVMI-Lab/PAConv/tree/main/scene_seg/lib/pointops/src/knnquery_heap +#ifndef KNN_CUDA_KERNEL_CUH +#define KNN_CUDA_KERNEL_CUH + +#include "pytorch_cuda_helper.hpp" + +inline __device__ void swap_float(float *x, float *y) { + float tmp = *x; + *x = *y; + *y = tmp; +} + +inline __device__ void swap_int(int *x, int *y) { + int tmp = *x; + *x = *y; + *y = tmp; +} + +__device__ void reheap(float *dist, int *idx, int k) { + int root = 0; + int child = root * 2 + 1; + while (child < k) { + if (child + 1 < k && dist[child + 1] > dist[child]) child++; + if (dist[root] > dist[child]) return; + swap_float(&dist[root], &dist[child]); + swap_int(&idx[root], &idx[child]); + root = child; + child = root * 2 + 1; + } +} + +__device__ void heap_sort(float *dist, int *idx, int k) { + int i; + for (i = k - 1; i > 0; i--) { + swap_float(&dist[0], &dist[i]); + swap_int(&idx[0], &idx[i]); + reheap(dist, idx, i); + } +} + +// input: xyz (b, n, 3) new_xyz (b, m, 3) +// output: idx (b, m, nsample) dist2 (b, m, nsample) +template +__global__ void knn_forward_cuda_kernel(int b, int n, int m, int nsample, + const T *xyz, const T *new_xyz, + int *__restrict__ idx, T *dist2) { + int bs_idx = blockIdx.y; + int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (bs_idx >= b || pt_idx >= m) return; + + new_xyz += bs_idx * m * 3 + pt_idx * 3; + xyz += bs_idx * n * 3; + idx += bs_idx * m * nsample + pt_idx * nsample; + dist2 += bs_idx * m * nsample + pt_idx * nsample; + + T new_x = new_xyz[0]; + T new_y = new_xyz[1]; + T new_z = new_xyz[2]; + + float best_dist[100]; + int best_idx[100]; + for (int i = 0; i < nsample; i++) { + best_dist[i] = 1e10; + best_idx[i] = 0; + } + for (int i = 0; i < n; i++) { + T x = xyz[i * 3 + 0]; + T y = xyz[i * 3 + 1]; + T z = xyz[i * 3 + 2]; + T d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) + + (new_z - z) * (new_z - z); + if (d2 < best_dist[0]) { + best_dist[0] = d2; + best_idx[0] = i; + reheap(best_dist, best_idx, nsample); + } + } + heap_sort(best_dist, best_idx, nsample); + for (int i = 0; i < nsample; i++) { + idx[i] = best_idx[i]; + dist2[i] = best_dist[i]; + } +} + +#endif // KNN_CUDA_KERNEL_CUH diff --git a/mmcv/ops/csrc/common/cuda/masked_conv2d_cuda_kernel.cuh b/mmcv/ops/csrc/common/cuda/masked_conv2d_cuda_kernel.cuh new file mode 100644 index 0000000..b11b3cd --- /dev/null +++ b/mmcv/ops/csrc/common/cuda/masked_conv2d_cuda_kernel.cuh @@ -0,0 +1,58 @@ +// Copyright (c) OpenMMLab. All rights reserved +#ifndef MASKED_CONV2D_CUDA_KERNEL_CUH +#define MASKED_CONV2D_CUDA_KERNEL_CUH + +#include "pytorch_cuda_helper.hpp" + +template +__global__ void MaskedIm2colForward(const int n, const scalar_t *data_im, + const int height, const int width, + const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int64_t *mask_h_idx, + const int64_t *mask_w_idx, + const int mask_cnt, scalar_t *data_col) { + // mask_cnt * channels + CUDA_1D_KERNEL_LOOP(index, n) { + const int m_index = index % mask_cnt; + const int h_col = mask_h_idx[m_index]; + const int w_col = mask_w_idx[m_index]; + const int c_im = index / mask_cnt; + const int c_col = c_im * kernel_h * kernel_w; + const int h_offset = h_col - pad_h; + const int w_offset = w_col - pad_w; + scalar_t *data_col_ptr = data_col + c_col * mask_cnt + m_index; + for (int i = 0; i < kernel_h; ++i) { + int h_im = h_offset + i; + for (int j = 0; j < kernel_w; ++j) { + int w_im = w_offset + j; + if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { + *data_col_ptr = + (scalar_t)data_im[(c_im * height + h_im) * width + w_im]; + } else { + *data_col_ptr = 0.0; + } + data_col_ptr += mask_cnt; + } + } + } +} + +template +__global__ void MaskedCol2imForward(const int n, const scalar_t *data_col, + const int height, const int width, + const int channels, + const int64_t *mask_h_idx, + const int64_t *mask_w_idx, + const int mask_cnt, scalar_t *data_im) { + CUDA_1D_KERNEL_LOOP(index, n) { + const int m_index = index % mask_cnt; + const int h_im = mask_h_idx[m_index]; + const int w_im = mask_w_idx[m_index]; + const int c_im = index / mask_cnt; + // compute the start and end of the output + data_im[(c_im * height + h_im) * width + w_im] = data_col[index]; + } +} + +#endif // MASKED_CONV2D_CUDA_KERNEL_CUH diff --git a/mmcv/ops/csrc/common/cuda/modulated_deform_conv_cuda_kernel.cuh b/mmcv/ops/csrc/common/cuda/modulated_deform_conv_cuda_kernel.cuh new file mode 100644 index 0000000..b29c74e --- /dev/null +++ b/mmcv/ops/csrc/common/cuda/modulated_deform_conv_cuda_kernel.cuh @@ -0,0 +1,395 @@ +/*! + ******************* BEGIN Caffe Copyright Notice and Disclaimer + ***************** + * + * COPYRIGHT + * + * All contributions by the University of California: + * Copyright (c) 2014-2017 The Regents of the University of California (Regents) + * All rights reserved. + * + * All other contributions: + * Copyright (c) 2014-2017, the respective contributors + * All rights reserved. + * + * Caffe uses a shared copyright model: each contributor holds copyright over + * their contributions to Caffe. The project versioning records all such + * contribution and copyright details. If a contributor wants to further mark + * their specific copyright on a particular contribution, they should indicate + * their copyright solely in the commit message of the change when it is + * committed. + * + * LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + *this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + *AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + *IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE + *FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + *DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + *SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + *CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + *OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + *OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * CONTRIBUTION AGREEMENT + * + * By contributing to the BVLC/caffe repository through pull-request, comment, + * or otherwise, the contributor releases their content to the + * license and copyright terms herein. + * + ***************** END Caffe Copyright Notice and Disclaimer + ********************* + * + * Copyright (c) 2018 Microsoft + * Licensed under The MIT License [see LICENSE for details] + * \file modulated_deformable_im2col.cuh + * \brief Function definitions of converting an image to + * column matrix based on kernel, padding, dilation, and offset. + * These functions are mainly used in deformable convolution operators. + * \ref: https://arxiv.org/abs/1703.06211 + * \author Yuwen Xiong, Haozhi Qi, Jifeng Dai, Xizhou Zhu, Han Hu, Dazhi Cheng + */ + +// modified from +// https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu + +#ifndef MODULATED_DEFORM_CONV_CUDA_KERNEL_CUH +#define MODULATED_DEFORM_CONV_CUDA_KERNEL_CUH + +#include +#ifdef MMCV_WITH_TRT +#include "common_cuda_helper.hpp" +#else // MMCV_WITH_TRT +#include "pytorch_cuda_helper.hpp" +#endif // MMCV_WITH_TRT + +template +__device__ T dmcn_im2col_bilinear(const T *input, const int data_width, + const int height, const int width, T h, T w) { + int h_low = floorf(h); + int w_low = floorf(w); + int h_high = h_low + 1; + int w_high = w_low + 1; + + T lh = h - h_low; + T lw = w - w_low; + T hh = 1 - lh, hw = 1 - lw; + + T v1 = 0; + if (h_low >= 0 && w_low >= 0) v1 = input[h_low * data_width + w_low]; + T v2 = 0; + if (h_low >= 0 && w_high <= width - 1) + v2 = input[h_low * data_width + w_high]; + T v3 = 0; + if (h_high <= height - 1 && w_low >= 0) + v3 = input[h_high * data_width + w_low]; + T v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) + v4 = input[h_high * data_width + w_high]; + + T w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + + T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + return val; +} + +template +__device__ T dmcn_get_gradient_weight(T argmax_h, T argmax_w, const int h, + const int w, const int height, + const int width) { + if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || + argmax_w >= width) { + // empty + return 0; + } + + int argmax_h_low = floorf(argmax_h); + int argmax_w_low = floorf(argmax_w); + int argmax_h_high = argmax_h_low + 1; + int argmax_w_high = argmax_w_low + 1; + + T weight = 0; + if (h == argmax_h_low && w == argmax_w_low) + weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); + if (h == argmax_h_low && w == argmax_w_high) + weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); + if (h == argmax_h_high && w == argmax_w_low) + weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); + if (h == argmax_h_high && w == argmax_w_high) + weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); + return weight; +} + +template +__device__ T dmcn_get_coordinate_weight(T argmax_h, T argmax_w, + const int height, const int width, + const T *im_data, const int data_width, + const int bp_dir) { + if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || + argmax_w >= width) { + // empty + return 0; + } + + int argmax_h_low = floorf(argmax_h); + int argmax_w_low = floorf(argmax_w); + int argmax_h_high = argmax_h_low + 1; + int argmax_w_high = argmax_w_low + 1; + + T weight = 0; + + if (bp_dir == 0) { + if (argmax_h_low >= 0 && argmax_w_low >= 0) + weight += -1 * (argmax_w_low + 1 - argmax_w) * + im_data[argmax_h_low * data_width + argmax_w_low]; + if (argmax_h_low >= 0 && argmax_w_high <= width - 1) + weight += -1 * (argmax_w - argmax_w_low) * + im_data[argmax_h_low * data_width + argmax_w_high]; + if (argmax_h_high <= height - 1 && argmax_w_low >= 0) + weight += (argmax_w_low + 1 - argmax_w) * + im_data[argmax_h_high * data_width + argmax_w_low]; + if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) + weight += (argmax_w - argmax_w_low) * + im_data[argmax_h_high * data_width + argmax_w_high]; + } else if (bp_dir == 1) { + if (argmax_h_low >= 0 && argmax_w_low >= 0) + weight += -1 * (argmax_h_low + 1 - argmax_h) * + im_data[argmax_h_low * data_width + argmax_w_low]; + if (argmax_h_low >= 0 && argmax_w_high <= width - 1) + weight += (argmax_h_low + 1 - argmax_h) * + im_data[argmax_h_low * data_width + argmax_w_high]; + if (argmax_h_high <= height - 1 && argmax_w_low >= 0) + weight += -1 * (argmax_h - argmax_h_low) * + im_data[argmax_h_high * data_width + argmax_w_low]; + if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) + weight += (argmax_h - argmax_h_low) * + im_data[argmax_h_high * data_width + argmax_w_high]; + } + + return weight; +} + +template +__global__ void modulated_deformable_im2col_gpu_kernel( + const int n, const T *data_im, const T *data_offset, const T *data_mask, + const int height, const int width, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int channel_per_deformable_group, const int batch_size, + const int num_channels, const int deformable_group, const int height_col, + const int width_col, T *data_col) { + CUDA_1D_KERNEL_LOOP(index, n) { + // index index of output matrix + const int w_col = index % width_col; + const int h_col = (index / width_col) % height_col; + const int b_col = (index / width_col / height_col) % batch_size; + const int c_im = (index / width_col / height_col) / batch_size; + const int c_col = c_im * kernel_h * kernel_w; + + // compute deformable group index + const int deformable_group_index = c_im / channel_per_deformable_group; + + const int h_in = h_col * stride_h - pad_h; + const int w_in = w_col * stride_w - pad_w; + + T *data_col_ptr = + data_col + + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; + const T *data_im_ptr = + data_im + (b_col * num_channels + c_im) * height * width; + const T *data_offset_ptr = + data_offset + (b_col * deformable_group + deformable_group_index) * 2 * + kernel_h * kernel_w * height_col * width_col; + + const T *data_mask_ptr = + data_mask + (b_col * deformable_group + deformable_group_index) * + kernel_h * kernel_w * height_col * width_col; + + for (int i = 0; i < kernel_h; ++i) { + for (int j = 0; j < kernel_w; ++j) { + const int data_offset_h_ptr = + ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; + const int data_offset_w_ptr = + ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + + w_col; + const int data_mask_hw_ptr = + ((i * kernel_w + j) * height_col + h_col) * width_col + w_col; + const T offset_h = data_offset_ptr[data_offset_h_ptr]; + const T offset_w = data_offset_ptr[data_offset_w_ptr]; + const T mask = data_mask_ptr[data_mask_hw_ptr]; + T val = static_cast(0); + const T h_im = h_in + i * dilation_h + offset_h; + const T w_im = w_in + j * dilation_w + offset_w; + if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) + val = dmcn_im2col_bilinear(data_im_ptr, width, height, width, h_im, + w_im); + *data_col_ptr = val * mask; + data_col_ptr += batch_size * height_col * width_col; + } + } + } +} + +template +__global__ void modulated_deformable_col2im_gpu_kernel( + const int n, const T *data_col, const T *data_offset, const T *data_mask, + const int channels, const int height, const int width, const int kernel_h, + const int kernel_w, const int pad_h, const int pad_w, const int stride_h, + const int stride_w, const int dilation_h, const int dilation_w, + const int channel_per_deformable_group, const int batch_size, + const int deformable_group, const int height_col, const int width_col, + T *grad_im) { + CUDA_1D_KERNEL_LOOP(index, n) { + const int j = (index / width_col / height_col / batch_size) % kernel_w; + const int i = + (index / width_col / height_col / batch_size / kernel_w) % kernel_h; + const int c = + index / width_col / height_col / batch_size / kernel_w / kernel_h; + // compute the start and end of the output + + const int deformable_group_index = c / channel_per_deformable_group; + + int w_out = index % width_col; + int h_out = (index / width_col) % height_col; + int b = (index / width_col / height_col) % batch_size; + int w_in = w_out * stride_w - pad_w; + int h_in = h_out * stride_h - pad_h; + + const T *data_offset_ptr = + data_offset + (b * deformable_group + deformable_group_index) * 2 * + kernel_h * kernel_w * height_col * width_col; + const T *data_mask_ptr = + data_mask + (b * deformable_group + deformable_group_index) * kernel_h * + kernel_w * height_col * width_col; + const int data_offset_h_ptr = + ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; + const int data_offset_w_ptr = + ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; + const int data_mask_hw_ptr = + ((i * kernel_w + j) * height_col + h_out) * width_col + w_out; + const T offset_h = data_offset_ptr[data_offset_h_ptr]; + const T offset_w = data_offset_ptr[data_offset_w_ptr]; + const T mask = data_mask_ptr[data_mask_hw_ptr]; + const T cur_inv_h_data = h_in + i * dilation_h + offset_h; + const T cur_inv_w_data = w_in + j * dilation_w + offset_w; + + const T cur_top_grad = data_col[index] * mask; + const int cur_h = (int)cur_inv_h_data; + const int cur_w = (int)cur_inv_w_data; + for (int dy = -2; dy <= 2; dy++) { + for (int dx = -2; dx <= 2; dx++) { + if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && + cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && + abs(cur_inv_w_data - (cur_w + dx)) < 1) { + int cur_bottom_grad_pos = + ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; + T weight = + dmcn_get_gradient_weight(cur_inv_h_data, cur_inv_w_data, + cur_h + dy, cur_w + dx, height, width); + atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); + } + } + } + } +} + +template +__global__ void modulated_deformable_col2im_coord_gpu_kernel( + const int n, const T *data_col, const T *data_im, const T *data_offset, + const T *data_mask, const int channels, const int height, const int width, + const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, + const int stride_h, const int stride_w, const int dilation_h, + const int dilation_w, const int channel_per_deformable_group, + const int batch_size, const int offset_channels, const int deformable_group, + const int height_col, const int width_col, T *grad_offset, T *grad_mask) { + CUDA_1D_KERNEL_LOOP(index, n) { + T val = 0, mval = 0; + int w = index % width_col; + int h = (index / width_col) % height_col; + int c = (index / width_col / height_col) % offset_channels; + int b = (index / width_col / height_col) / offset_channels; + // compute the start and end of the output + + const int deformable_group_index = c / (2 * kernel_h * kernel_w); + const int col_step = kernel_h * kernel_w; + int cnt = 0; + const T *data_col_ptr = data_col + deformable_group_index * + channel_per_deformable_group * + batch_size * width_col * height_col; + const T *data_im_ptr = + data_im + (b * deformable_group + deformable_group_index) * + channel_per_deformable_group / kernel_h / kernel_w * + height * width; + const T *data_offset_ptr = + data_offset + (b * deformable_group + deformable_group_index) * 2 * + kernel_h * kernel_w * height_col * width_col; + const T *data_mask_ptr = + data_mask + (b * deformable_group + deformable_group_index) * kernel_h * + kernel_w * height_col * width_col; + + const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; + + for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; + col_c += col_step) { + const int col_pos = + (((col_c * batch_size + b) * height_col) + h) * width_col + w; + const int bp_dir = offset_c % 2; + + int j = (col_pos / width_col / height_col / batch_size) % kernel_w; + int i = + (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; + int w_out = col_pos % width_col; + int h_out = (col_pos / width_col) % height_col; + int w_in = w_out * stride_w - pad_w; + int h_in = h_out * stride_h - pad_h; + const int data_offset_h_ptr = + (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); + const int data_offset_w_ptr = + (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + + w_out); + const int data_mask_hw_ptr = + (((i * kernel_w + j) * height_col + h_out) * width_col + w_out); + const T offset_h = data_offset_ptr[data_offset_h_ptr]; + const T offset_w = data_offset_ptr[data_offset_w_ptr]; + const T mask = data_mask_ptr[data_mask_hw_ptr]; + T inv_h = h_in + i * dilation_h + offset_h; + T inv_w = w_in + j * dilation_w + offset_w; + if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) + inv_h = inv_w = -2; + else + mval += data_col_ptr[col_pos] * + dmcn_im2col_bilinear(data_im_ptr + cnt * height * width, width, + height, width, inv_h, inv_w); + const T weight = dmcn_get_coordinate_weight( + inv_h, inv_w, height, width, data_im_ptr + cnt * height * width, + width, bp_dir); + val += weight * data_col_ptr[col_pos] * mask; + cnt += 1; + } + // KERNEL_ASSIGN(grad_offset[index], offset_req, val); + grad_offset[index] = val; + if (offset_c % 2 == 0) + // KERNEL_ASSIGN(grad_mask[(((b * deformable_group + + // deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * + // height_col + h) * width_col + w], mask_req, mval); + grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * + kernel_w + + offset_c / 2) * + height_col + + h) * + width_col + + w] = mval; + } +} + +#endif // MODULATED_DEFORM_CONV_CUDA_KERNEL_CUH diff --git a/mmcv/ops/csrc/common/cuda/ms_deform_attn_cuda_kernel.cuh b/mmcv/ops/csrc/common/cuda/ms_deform_attn_cuda_kernel.cuh new file mode 100644 index 0000000..aff1ea2 --- /dev/null +++ b/mmcv/ops/csrc/common/cuda/ms_deform_attn_cuda_kernel.cuh @@ -0,0 +1,800 @@ +/*! +************************************************************************************************** +* Deformable DETR +* Copyright (c) 2020 SenseTime. All Rights Reserved. +* Licensed under the Apache License, Version 2.0 [see LICENSE for details] +************************************************************************************************** +* Modified from +*https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +************************************************************************************************** +*/ +#ifndef DEFORM_ATTN_CUDA_KERNEL +#define DEFORM_ATTN_CUDA_KERNEL + +#include "common_cuda_helper.hpp" +#include "pytorch_cuda_helper.hpp" + +const int CUDA_NUM_THREADS = 1024; +inline int GET_BLOCKS(const int N, const int num_threads) { + return (N + num_threads - 1) / num_threads; +} + +template +__device__ scalar_t ms_deform_attn_im2col_bilinear( + const scalar_t *&bottom_data, const int &height, const int &width, + const int &nheads, const int &channels, const scalar_t &h, + const scalar_t &w, const int &m, const int &c) { + const int h_low = floorf(h); + const int w_low = floorf(w); + const int h_high = h_low + 1; + const int w_high = w_low + 1; + + const scalar_t lh = h - h_low; + const scalar_t lw = w - w_low; + const scalar_t hh = 1 - lh, hw = 1 - lw; + + const int w_stride = nheads * channels; + const int h_stride = width * w_stride; + const int h_low_ptr_offset = h_low * h_stride; + const int h_high_ptr_offset = h_low_ptr_offset + h_stride; + const int w_low_ptr_offset = w_low * w_stride; + const int w_high_ptr_offset = w_low_ptr_offset + w_stride; + const int base_ptr = m * channels + c; + + scalar_t v1 = 0; + if (h_low >= 0 && w_low >= 0) { + const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr; + v1 = bottom_data[ptr1]; + } + scalar_t v2 = 0; + if (h_low >= 0 && w_high <= width - 1) { + const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr; + v2 = bottom_data[ptr2]; + } + scalar_t v3 = 0; + if (h_high <= height - 1 && w_low >= 0) { + const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr; + v3 = bottom_data[ptr3]; + } + scalar_t v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) { + const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr; + v4 = bottom_data[ptr4]; + } + + const scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + + const scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + return val; +} + +template +__device__ void ms_deform_attn_col2im_bilinear( + const scalar_t *&bottom_data, const int &height, const int &width, + const int &nheads, const int &channels, const scalar_t &h, + const scalar_t &w, const int &m, const int &c, const scalar_t &top_grad, + const scalar_t &attn_weight, scalar_t *&grad_value, + scalar_t *grad_sampling_loc, scalar_t *grad_attn_weight) { + const int h_low = floorf(h); + const int w_low = floorf(w); + const int h_high = h_low + 1; + const int w_high = w_low + 1; + + const scalar_t lh = h - h_low; + const scalar_t lw = w - w_low; + const scalar_t hh = 1 - lh, hw = 1 - lw; + + const int w_stride = nheads * channels; + const int h_stride = width * w_stride; + const int h_low_ptr_offset = h_low * h_stride; + const int h_high_ptr_offset = h_low_ptr_offset + h_stride; + const int w_low_ptr_offset = w_low * w_stride; + const int w_high_ptr_offset = w_low_ptr_offset + w_stride; + const int base_ptr = m * channels + c; + + const scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + const scalar_t top_grad_value = top_grad * attn_weight; + scalar_t grad_h_weight = 0, grad_w_weight = 0; + + scalar_t v1 = 0; + if (h_low >= 0 && w_low >= 0) { + const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr; + v1 = bottom_data[ptr1]; + grad_h_weight -= hw * v1; + grad_w_weight -= hh * v1; + atomicAdd(grad_value + ptr1, w1 * top_grad_value); + } + scalar_t v2 = 0; + if (h_low >= 0 && w_high <= width - 1) { + const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr; + v2 = bottom_data[ptr2]; + grad_h_weight -= lw * v2; + grad_w_weight += hh * v2; + atomicAdd(grad_value + ptr2, w2 * top_grad_value); + } + scalar_t v3 = 0; + if (h_high <= height - 1 && w_low >= 0) { + const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr; + v3 = bottom_data[ptr3]; + grad_h_weight += hw * v3; + grad_w_weight -= lh * v3; + atomicAdd(grad_value + ptr3, w3 * top_grad_value); + } + scalar_t v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) { + const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr; + v4 = bottom_data[ptr4]; + grad_h_weight += lw * v4; + grad_w_weight += lh * v4; + atomicAdd(grad_value + ptr4, w4 * top_grad_value); + } + + const scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + *grad_attn_weight = top_grad * val; + *grad_sampling_loc = width * grad_w_weight * top_grad_value; + *(grad_sampling_loc + 1) = height * grad_h_weight * top_grad_value; +} + +template +__device__ void ms_deform_attn_col2im_bilinear_gm( + const scalar_t *&bottom_data, const int &height, const int &width, + const int &nheads, const int &channels, const scalar_t &h, + const scalar_t &w, const int &m, const int &c, const scalar_t &top_grad, + const scalar_t &attn_weight, scalar_t *&grad_value, + scalar_t *grad_sampling_loc, scalar_t *grad_attn_weight) { + const int h_low = floorf(h); + const int w_low = floorf(w); + const int h_high = h_low + 1; + const int w_high = w_low + 1; + + const scalar_t lh = h - h_low; + const scalar_t lw = w - w_low; + const scalar_t hh = 1 - lh, hw = 1 - lw; + + const int w_stride = nheads * channels; + const int h_stride = width * w_stride; + const int h_low_ptr_offset = h_low * h_stride; + const int h_high_ptr_offset = h_low_ptr_offset + h_stride; + const int w_low_ptr_offset = w_low * w_stride; + const int w_high_ptr_offset = w_low_ptr_offset + w_stride; + const int base_ptr = m * channels + c; + + const scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + const scalar_t top_grad_value = top_grad * attn_weight; + scalar_t grad_h_weight = 0, grad_w_weight = 0; + + scalar_t v1 = 0; + if (h_low >= 0 && w_low >= 0) { + const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr; + v1 = bottom_data[ptr1]; + grad_h_weight -= hw * v1; + grad_w_weight -= hh * v1; + atomicAdd(grad_value + ptr1, w1 * top_grad_value); + } + scalar_t v2 = 0; + if (h_low >= 0 && w_high <= width - 1) { + const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr; + v2 = bottom_data[ptr2]; + grad_h_weight -= lw * v2; + grad_w_weight += hh * v2; + atomicAdd(grad_value + ptr2, w2 * top_grad_value); + } + scalar_t v3 = 0; + if (h_high <= height - 1 && w_low >= 0) { + const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr; + v3 = bottom_data[ptr3]; + grad_h_weight += hw * v3; + grad_w_weight -= lh * v3; + atomicAdd(grad_value + ptr3, w3 * top_grad_value); + } + scalar_t v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) { + const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr; + v4 = bottom_data[ptr4]; + grad_h_weight += lw * v4; + grad_w_weight += lh * v4; + atomicAdd(grad_value + ptr4, w4 * top_grad_value); + } + + const scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + atomicAdd(grad_attn_weight, top_grad * val); + atomicAdd(grad_sampling_loc, width * grad_w_weight * top_grad_value); + atomicAdd(grad_sampling_loc + 1, height * grad_h_weight * top_grad_value); +} + +template +__global__ void ms_deformable_im2col_gpu_kernel( + const int n, const scalar_t *data_value, const int64_t *data_spatial_shapes, + const int64_t *data_level_start_index, const scalar_t *data_sampling_loc, + const scalar_t *data_attn_weight, const int batch_size, + const int spatial_size, const int num_heads, const int channels, + const int num_levels, const int num_query, const int num_point, + scalar_t *data_col) { + CUDA_1D_KERNEL_LOOP(index, n) { + int _temp = index; + const int c_col = _temp % channels; + _temp /= channels; + const int sampling_index = _temp; + const int m_col = _temp % num_heads; + _temp /= num_heads; + _temp /= num_query; + const int b_col = _temp; + + scalar_t *data_col_ptr = data_col + index; + int data_weight_ptr = sampling_index * num_levels * num_point; + int data_loc_w_ptr = data_weight_ptr << 1; + const int qid_stride = num_heads * channels; + const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride; + scalar_t col = 0; + + for (int l_col = 0; l_col < num_levels; ++l_col) { + const int level_start_id = data_level_start_index[l_col]; + const int spatial_h_ptr = l_col << 1; + const int spatial_h = data_spatial_shapes[spatial_h_ptr]; + const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1]; + const scalar_t *data_value_ptr = + data_value + + (data_value_ptr_init_offset + level_start_id * qid_stride); + for (int p_col = 0; p_col < num_point; ++p_col) { + const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr]; + const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1]; + const scalar_t weight = data_attn_weight[data_weight_ptr]; + + const scalar_t h_im = loc_h * spatial_h - 0.5; + const scalar_t w_im = loc_w * spatial_w - 0.5; + + if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) { + col += ms_deform_attn_im2col_bilinear(data_value_ptr, spatial_h, + spatial_w, num_heads, channels, + h_im, w_im, m_col, c_col) * + weight; + } + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + } + } + *data_col_ptr = col; + } +} + +template +__global__ void ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1( + const int n, const scalar_t *grad_col, const scalar_t *data_value, + const int64_t *data_spatial_shapes, const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, const scalar_t *data_attn_weight, + const int batch_size, const int spatial_size, const int num_heads, + const int channels, const int num_levels, const int num_query, + const int num_point, scalar_t *grad_value, scalar_t *grad_sampling_loc, + scalar_t *grad_attn_weight) { + CUDA_1D_KERNEL_LOOP(index, n) { + __shared__ scalar_t cache_grad_sampling_loc[blockSize * 2]; + __shared__ scalar_t cache_grad_attn_weight[blockSize]; + unsigned int tid = threadIdx.x; + int _temp = index; + const int c_col = _temp % channels; + _temp /= channels; + const int sampling_index = _temp; + const int m_col = _temp % num_heads; + _temp /= num_heads; + _temp /= num_query; + const int b_col = _temp; + + const scalar_t top_grad = grad_col[index]; + + int data_weight_ptr = sampling_index * num_levels * num_point; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_sampling_loc += grad_sampling_ptr << 1; + grad_attn_weight += grad_sampling_ptr; + const int grad_weight_stride = 1; + const int grad_loc_stride = 2; + const int qid_stride = num_heads * channels; + const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride; + + for (int l_col = 0; l_col < num_levels; ++l_col) { + const int level_start_id = data_level_start_index[l_col]; + const int spatial_h_ptr = l_col << 1; + const int spatial_h = data_spatial_shapes[spatial_h_ptr]; + const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1]; + const int value_ptr_offset = + data_value_ptr_init_offset + level_start_id * qid_stride; + const scalar_t *data_value_ptr = data_value + value_ptr_offset; + scalar_t *grad_value_ptr = grad_value + value_ptr_offset; + + for (int p_col = 0; p_col < num_point; ++p_col) { + const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr]; + const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1]; + const scalar_t weight = data_attn_weight[data_weight_ptr]; + + const scalar_t h_im = loc_h * spatial_h - 0.5; + const scalar_t w_im = loc_w * spatial_w - 0.5; + *(cache_grad_sampling_loc + (threadIdx.x << 1)) = 0; + *(cache_grad_sampling_loc + ((threadIdx.x << 1) + 1)) = 0; + *(cache_grad_attn_weight + threadIdx.x) = 0; + if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) { + ms_deform_attn_col2im_bilinear( + data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, + w_im, m_col, c_col, top_grad, weight, grad_value_ptr, + cache_grad_sampling_loc + (threadIdx.x << 1), + cache_grad_attn_weight + threadIdx.x); + } + + __syncthreads(); + if (tid == 0) { + scalar_t _grad_w = cache_grad_sampling_loc[0], + _grad_h = cache_grad_sampling_loc[1], + _grad_a = cache_grad_attn_weight[0]; + int sid = 2; + for (unsigned int tid = 1; tid < blockSize; ++tid) { + _grad_w += cache_grad_sampling_loc[sid]; + _grad_h += cache_grad_sampling_loc[sid + 1]; + _grad_a += cache_grad_attn_weight[tid]; + sid += 2; + } + + *grad_sampling_loc = _grad_w; + *(grad_sampling_loc + 1) = _grad_h; + *grad_attn_weight = _grad_a; + } + __syncthreads(); + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_attn_weight += grad_weight_stride; + grad_sampling_loc += grad_loc_stride; + } + } + } +} + +template +__global__ void ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2( + const int n, const scalar_t *grad_col, const scalar_t *data_value, + const int64_t *data_spatial_shapes, const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, const scalar_t *data_attn_weight, + const int batch_size, const int spatial_size, const int num_heads, + const int channels, const int num_levels, const int num_query, + const int num_point, scalar_t *grad_value, scalar_t *grad_sampling_loc, + scalar_t *grad_attn_weight) { + CUDA_1D_KERNEL_LOOP(index, n) { + __shared__ scalar_t cache_grad_sampling_loc[blockSize * 2]; + __shared__ scalar_t cache_grad_attn_weight[blockSize]; + unsigned int tid = threadIdx.x; + int _temp = index; + const int c_col = _temp % channels; + _temp /= channels; + const int sampling_index = _temp; + const int m_col = _temp % num_heads; + _temp /= num_heads; + _temp /= num_query; + const int b_col = _temp; + + const scalar_t top_grad = grad_col[index]; + + int data_weight_ptr = sampling_index * num_levels * num_point; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_sampling_loc += grad_sampling_ptr << 1; + grad_attn_weight += grad_sampling_ptr; + const int grad_weight_stride = 1; + const int grad_loc_stride = 2; + const int qid_stride = num_heads * channels; + const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride; + + for (int l_col = 0; l_col < num_levels; ++l_col) { + const int level_start_id = data_level_start_index[l_col]; + const int spatial_h_ptr = l_col << 1; + const int spatial_h = data_spatial_shapes[spatial_h_ptr]; + const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1]; + const int value_ptr_offset = + data_value_ptr_init_offset + level_start_id * qid_stride; + const scalar_t *data_value_ptr = data_value + value_ptr_offset; + scalar_t *grad_value_ptr = grad_value + value_ptr_offset; + + for (int p_col = 0; p_col < num_point; ++p_col) { + const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr]; + const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1]; + const scalar_t weight = data_attn_weight[data_weight_ptr]; + + const scalar_t h_im = loc_h * spatial_h - 0.5; + const scalar_t w_im = loc_w * spatial_w - 0.5; + *(cache_grad_sampling_loc + (threadIdx.x << 1)) = 0; + *(cache_grad_sampling_loc + ((threadIdx.x << 1) + 1)) = 0; + *(cache_grad_attn_weight + threadIdx.x) = 0; + if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) { + ms_deform_attn_col2im_bilinear( + data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, + w_im, m_col, c_col, top_grad, weight, grad_value_ptr, + cache_grad_sampling_loc + (threadIdx.x << 1), + cache_grad_attn_weight + threadIdx.x); + } + + __syncthreads(); + + for (unsigned int s = blockSize / 2; s > 0; s >>= 1) { + if (tid < s) { + const unsigned int xid1 = tid << 1; + const unsigned int xid2 = (tid + s) << 1; + cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + s]; + cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2]; + cache_grad_sampling_loc[xid1 + 1] += + cache_grad_sampling_loc[xid2 + 1]; + } + __syncthreads(); + } + + if (tid == 0) { + *grad_sampling_loc = cache_grad_sampling_loc[0]; + *(grad_sampling_loc + 1) = cache_grad_sampling_loc[1]; + *grad_attn_weight = cache_grad_attn_weight[0]; + } + __syncthreads(); + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_attn_weight += grad_weight_stride; + grad_sampling_loc += grad_loc_stride; + } + } + } +} + +template +__global__ void ms_deformable_col2im_gpu_kernel_shm_reduce_v1( + const int n, const scalar_t *grad_col, const scalar_t *data_value, + const int64_t *data_spatial_shapes, const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, const scalar_t *data_attn_weight, + const int batch_size, const int spatial_size, const int num_heads, + const int channels, const int num_levels, const int num_query, + const int num_point, scalar_t *grad_value, scalar_t *grad_sampling_loc, + scalar_t *grad_attn_weight) { + CUDA_1D_KERNEL_LOOP(index, n) { + extern __shared__ int _s[]; + scalar_t *cache_grad_sampling_loc = reinterpret_cast(_s); + scalar_t *cache_grad_attn_weight = cache_grad_sampling_loc + 2 * blockDim.x; + unsigned int tid = threadIdx.x; + int _temp = index; + const int c_col = _temp % channels; + _temp /= channels; + const int sampling_index = _temp; + const int m_col = _temp % num_heads; + _temp /= num_heads; + _temp /= num_query; + const int b_col = _temp; + + const scalar_t top_grad = grad_col[index]; + + int data_weight_ptr = sampling_index * num_levels * num_point; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_sampling_loc += grad_sampling_ptr << 1; + grad_attn_weight += grad_sampling_ptr; + const int grad_weight_stride = 1; + const int grad_loc_stride = 2; + const int qid_stride = num_heads * channels; + const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride; + + for (int l_col = 0; l_col < num_levels; ++l_col) { + const int level_start_id = data_level_start_index[l_col]; + const int spatial_h_ptr = l_col << 1; + const int spatial_h = data_spatial_shapes[spatial_h_ptr]; + const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1]; + const int value_ptr_offset = + data_value_ptr_init_offset + level_start_id * qid_stride; + const scalar_t *data_value_ptr = data_value + value_ptr_offset; + scalar_t *grad_value_ptr = grad_value + value_ptr_offset; + + for (int p_col = 0; p_col < num_point; ++p_col) { + const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr]; + const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1]; + const scalar_t weight = data_attn_weight[data_weight_ptr]; + + const scalar_t h_im = loc_h * spatial_h - 0.5; + const scalar_t w_im = loc_w * spatial_w - 0.5; + *(cache_grad_sampling_loc + (threadIdx.x << 1)) = 0; + *(cache_grad_sampling_loc + ((threadIdx.x << 1) + 1)) = 0; + *(cache_grad_attn_weight + threadIdx.x) = 0; + if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) { + ms_deform_attn_col2im_bilinear( + data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, + w_im, m_col, c_col, top_grad, weight, grad_value_ptr, + cache_grad_sampling_loc + (threadIdx.x << 1), + cache_grad_attn_weight + threadIdx.x); + } + + __syncthreads(); + if (tid == 0) { + scalar_t _grad_w = cache_grad_sampling_loc[0], + _grad_h = cache_grad_sampling_loc[1], + _grad_a = cache_grad_attn_weight[0]; + int sid = 2; + for (unsigned int tid = 1; tid < blockDim.x; ++tid) { + _grad_w += cache_grad_sampling_loc[sid]; + _grad_h += cache_grad_sampling_loc[sid + 1]; + _grad_a += cache_grad_attn_weight[tid]; + sid += 2; + } + + *grad_sampling_loc = _grad_w; + *(grad_sampling_loc + 1) = _grad_h; + *grad_attn_weight = _grad_a; + } + __syncthreads(); + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_attn_weight += grad_weight_stride; + grad_sampling_loc += grad_loc_stride; + } + } + } +} + +template +__global__ void ms_deformable_col2im_gpu_kernel_shm_reduce_v2( + const int n, const scalar_t *grad_col, const scalar_t *data_value, + const int64_t *data_spatial_shapes, const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, const scalar_t *data_attn_weight, + const int batch_size, const int spatial_size, const int num_heads, + const int channels, const int num_levels, const int num_query, + const int num_point, scalar_t *grad_value, scalar_t *grad_sampling_loc, + scalar_t *grad_attn_weight) { + CUDA_1D_KERNEL_LOOP(index, n) { + extern __shared__ int _s[]; + scalar_t *cache_grad_sampling_loc = reinterpret_cast(_s); + scalar_t *cache_grad_attn_weight = cache_grad_sampling_loc + 2 * blockDim.x; + unsigned int tid = threadIdx.x; + int _temp = index; + const int c_col = _temp % channels; + _temp /= channels; + const int sampling_index = _temp; + const int m_col = _temp % num_heads; + _temp /= num_heads; + _temp /= num_query; + const int b_col = _temp; + + const scalar_t top_grad = grad_col[index]; + + int data_weight_ptr = sampling_index * num_levels * num_point; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_sampling_loc += grad_sampling_ptr << 1; + grad_attn_weight += grad_sampling_ptr; + const int grad_weight_stride = 1; + const int grad_loc_stride = 2; + const int qid_stride = num_heads * channels; + const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride; + + for (int l_col = 0; l_col < num_levels; ++l_col) { + const int level_start_id = data_level_start_index[l_col]; + const int spatial_h_ptr = l_col << 1; + const int spatial_h = data_spatial_shapes[spatial_h_ptr]; + const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1]; + const int value_ptr_offset = + data_value_ptr_init_offset + level_start_id * qid_stride; + const scalar_t *data_value_ptr = data_value + value_ptr_offset; + scalar_t *grad_value_ptr = grad_value + value_ptr_offset; + + for (int p_col = 0; p_col < num_point; ++p_col) { + const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr]; + const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1]; + const scalar_t weight = data_attn_weight[data_weight_ptr]; + + const scalar_t h_im = loc_h * spatial_h - 0.5; + const scalar_t w_im = loc_w * spatial_w - 0.5; + *(cache_grad_sampling_loc + (threadIdx.x << 1)) = 0; + *(cache_grad_sampling_loc + ((threadIdx.x << 1) + 1)) = 0; + *(cache_grad_attn_weight + threadIdx.x) = 0; + if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) { + ms_deform_attn_col2im_bilinear( + data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, + w_im, m_col, c_col, top_grad, weight, grad_value_ptr, + cache_grad_sampling_loc + (threadIdx.x << 1), + cache_grad_attn_weight + threadIdx.x); + } + + __syncthreads(); + + for (unsigned int s = blockDim.x / 2, spre = blockDim.x; s > 0; + s >>= 1, spre >>= 1) { + if (tid < s) { + const unsigned int xid1 = tid << 1; + const unsigned int xid2 = (tid + s) << 1; + cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + s]; + cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2]; + cache_grad_sampling_loc[xid1 + 1] += + cache_grad_sampling_loc[xid2 + 1]; + if (tid + (s << 1) < spre) { + cache_grad_attn_weight[tid] += + cache_grad_attn_weight[tid + (s << 1)]; + cache_grad_sampling_loc[xid1] += + cache_grad_sampling_loc[xid2 + (s << 1)]; + cache_grad_sampling_loc[xid1 + 1] += + cache_grad_sampling_loc[xid2 + 1 + (s << 1)]; + } + } + __syncthreads(); + } + + if (tid == 0) { + *grad_sampling_loc = cache_grad_sampling_loc[0]; + *(grad_sampling_loc + 1) = cache_grad_sampling_loc[1]; + *grad_attn_weight = cache_grad_attn_weight[0]; + } + __syncthreads(); + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_attn_weight += grad_weight_stride; + grad_sampling_loc += grad_loc_stride; + } + } + } +} + +template +__global__ void ms_deformable_col2im_gpu_kernel_shm_reduce_v2_multi_blocks( + const int n, const scalar_t *grad_col, const scalar_t *data_value, + const int64_t *data_spatial_shapes, const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, const scalar_t *data_attn_weight, + const int batch_size, const int spatial_size, const int num_heads, + const int channels, const int num_levels, const int num_query, + const int num_point, scalar_t *grad_value, scalar_t *grad_sampling_loc, + scalar_t *grad_attn_weight) { + CUDA_1D_KERNEL_LOOP(index, n) { + extern __shared__ int _s[]; + scalar_t *cache_grad_sampling_loc = reinterpret_cast(_s); + scalar_t *cache_grad_attn_weight = cache_grad_sampling_loc + 2 * blockDim.x; + unsigned int tid = threadIdx.x; + int _temp = index; + const int c_col = _temp % channels; + _temp /= channels; + const int sampling_index = _temp; + const int m_col = _temp % num_heads; + _temp /= num_heads; + _temp /= num_query; + const int b_col = _temp; + + const scalar_t top_grad = grad_col[index]; + + int data_weight_ptr = sampling_index * num_levels * num_point; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_sampling_loc += grad_sampling_ptr << 1; + grad_attn_weight += grad_sampling_ptr; + const int grad_weight_stride = 1; + const int grad_loc_stride = 2; + const int qid_stride = num_heads * channels; + const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride; + + for (int l_col = 0; l_col < num_levels; ++l_col) { + const int level_start_id = data_level_start_index[l_col]; + const int spatial_h_ptr = l_col << 1; + const int spatial_h = data_spatial_shapes[spatial_h_ptr]; + const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1]; + const int value_ptr_offset = + data_value_ptr_init_offset + level_start_id * qid_stride; + const scalar_t *data_value_ptr = data_value + value_ptr_offset; + scalar_t *grad_value_ptr = grad_value + value_ptr_offset; + + for (int p_col = 0; p_col < num_point; ++p_col) { + const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr]; + const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1]; + const scalar_t weight = data_attn_weight[data_weight_ptr]; + + const scalar_t h_im = loc_h * spatial_h - 0.5; + const scalar_t w_im = loc_w * spatial_w - 0.5; + *(cache_grad_sampling_loc + (threadIdx.x << 1)) = 0; + *(cache_grad_sampling_loc + ((threadIdx.x << 1) + 1)) = 0; + *(cache_grad_attn_weight + threadIdx.x) = 0; + if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) { + ms_deform_attn_col2im_bilinear( + data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, + w_im, m_col, c_col, top_grad, weight, grad_value_ptr, + cache_grad_sampling_loc + (threadIdx.x << 1), + cache_grad_attn_weight + threadIdx.x); + } + + __syncthreads(); + + for (unsigned int s = blockDim.x / 2, spre = blockDim.x; s > 0; + s >>= 1, spre >>= 1) { + if (tid < s) { + const unsigned int xid1 = tid << 1; + const unsigned int xid2 = (tid + s) << 1; + cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + s]; + cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2]; + cache_grad_sampling_loc[xid1 + 1] += + cache_grad_sampling_loc[xid2 + 1]; + if (tid + (s << 1) < spre) { + cache_grad_attn_weight[tid] += + cache_grad_attn_weight[tid + (s << 1)]; + cache_grad_sampling_loc[xid1] += + cache_grad_sampling_loc[xid2 + (s << 1)]; + cache_grad_sampling_loc[xid1 + 1] += + cache_grad_sampling_loc[xid2 + 1 + (s << 1)]; + } + } + __syncthreads(); + } + + if (tid == 0) { + atomicAdd(grad_sampling_loc, cache_grad_sampling_loc[0]); + atomicAdd(grad_sampling_loc + 1, cache_grad_sampling_loc[1]); + atomicAdd(grad_attn_weight, cache_grad_attn_weight[0]); + } + __syncthreads(); + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_attn_weight += grad_weight_stride; + grad_sampling_loc += grad_loc_stride; + } + } + } +} + +template +__global__ void ms_deformable_col2im_gpu_kernel_gm( + const int n, const scalar_t *grad_col, const scalar_t *data_value, + const int64_t *data_spatial_shapes, const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, const scalar_t *data_attn_weight, + const int batch_size, const int spatial_size, const int num_heads, + const int channels, const int num_levels, const int num_query, + const int num_point, scalar_t *grad_value, scalar_t *grad_sampling_loc, + scalar_t *grad_attn_weight) { + CUDA_1D_KERNEL_LOOP(index, n) { + int _temp = index; + const int c_col = _temp % channels; + _temp /= channels; + const int sampling_index = _temp; + const int m_col = _temp % num_heads; + _temp /= num_heads; + _temp /= num_query; + const int b_col = _temp; + + const scalar_t top_grad = grad_col[index]; + + int data_weight_ptr = sampling_index * num_levels * num_point; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_sampling_loc += grad_sampling_ptr << 1; + grad_attn_weight += grad_sampling_ptr; + const int grad_weight_stride = 1; + const int grad_loc_stride = 2; + const int qid_stride = num_heads * channels; + const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride; + + for (int l_col = 0; l_col < num_levels; ++l_col) { + const int level_start_id = data_level_start_index[l_col]; + const int spatial_h_ptr = l_col << 1; + const int spatial_h = data_spatial_shapes[spatial_h_ptr]; + const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1]; + const int value_ptr_offset = + data_value_ptr_init_offset + level_start_id * qid_stride; + const scalar_t *data_value_ptr = data_value + value_ptr_offset; + scalar_t *grad_value_ptr = grad_value + value_ptr_offset; + + for (int p_col = 0; p_col < num_point; ++p_col) { + const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr]; + const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1]; + const scalar_t weight = data_attn_weight[data_weight_ptr]; + + const scalar_t h_im = loc_h * spatial_h - 0.5; + const scalar_t w_im = loc_w * spatial_w - 0.5; + if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) { + ms_deform_attn_col2im_bilinear_gm( + data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, + w_im, m_col, c_col, top_grad, weight, grad_value_ptr, + grad_sampling_loc, grad_attn_weight); + } + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_attn_weight += grad_weight_stride; + grad_sampling_loc += grad_loc_stride; + } + } + } +} +#endif // DEFORM_ATTN_CUDA_KERNEL diff --git a/mmcv/ops/csrc/common/cuda/nms_cuda_kernel.cuh b/mmcv/ops/csrc/common/cuda/nms_cuda_kernel.cuh new file mode 100644 index 0000000..2bd4ef8 --- /dev/null +++ b/mmcv/ops/csrc/common/cuda/nms_cuda_kernel.cuh @@ -0,0 +1,70 @@ +// Copyright (c) OpenMMLab. All rights reserved +#ifndef NMS_CUDA_KERNEL_CUH +#define NMS_CUDA_KERNEL_CUH + +#include +#ifdef MMCV_WITH_TRT +#include "common_cuda_helper.hpp" +#else // MMCV_WITH_TRT +#include "pytorch_cuda_helper.hpp" +#endif // MMCV_WITH_TRT + +int const threadsPerBlock = sizeof(unsigned long long int) * 8; + +__device__ inline bool devIoU(float const *const a, float const *const b, + const int offset, const float threshold) { + float left = fmaxf(a[0], b[0]), right = fminf(a[2], b[2]); + float top = fmaxf(a[1], b[1]), bottom = fminf(a[3], b[3]); + float width = fmaxf(right - left + offset, 0.f), + height = fmaxf(bottom - top + offset, 0.f); + float interS = width * height; + float Sa = (a[2] - a[0] + offset) * (a[3] - a[1] + offset); + float Sb = (b[2] - b[0] + offset) * (b[3] - b[1] + offset); + return interS > threshold * (Sa + Sb - interS); +} + +__global__ void nms_cuda(const int n_boxes, const float iou_threshold, + const int offset, const float *dev_boxes, + unsigned long long *dev_mask) { + const int row_start = blockIdx.y; + const int col_start = blockIdx.x; + const int tid = threadIdx.x; + + if (row_start > col_start) return; + + const int row_size = + fminf(n_boxes - row_start * threadsPerBlock, threadsPerBlock); + const int col_size = + fminf(n_boxes - col_start * threadsPerBlock, threadsPerBlock); + + __shared__ float block_boxes[threadsPerBlock * 4]; + if (tid < col_size) { + block_boxes[tid * 4 + 0] = + dev_boxes[(threadsPerBlock * col_start + tid) * 4 + 0]; + block_boxes[tid * 4 + 1] = + dev_boxes[(threadsPerBlock * col_start + tid) * 4 + 1]; + block_boxes[tid * 4 + 2] = + dev_boxes[(threadsPerBlock * col_start + tid) * 4 + 2]; + block_boxes[tid * 4 + 3] = + dev_boxes[(threadsPerBlock * col_start + tid) * 4 + 3]; + } + __syncthreads(); + + if (tid < row_size) { + const int cur_box_idx = threadsPerBlock * row_start + tid; + const float *cur_box = dev_boxes + cur_box_idx * 4; + int i = 0; + unsigned long long int t = 0; + int start = 0; + if (row_start == col_start) { + start = tid + 1; + } + for (i = start; i < col_size; i++) { + if (devIoU(cur_box, block_boxes + i * 4, offset, iou_threshold)) { + t |= 1ULL << i; + } + } + dev_mask[cur_box_idx * gridDim.y + col_start] = t; + } +} +#endif // NMS_CUDA_KERNEL_CUH diff --git a/mmcv/ops/csrc/common/cuda/nms_rotated_cuda.cuh b/mmcv/ops/csrc/common/cuda/nms_rotated_cuda.cuh new file mode 100644 index 0000000..cf4863e --- /dev/null +++ b/mmcv/ops/csrc/common/cuda/nms_rotated_cuda.cuh @@ -0,0 +1,131 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +// modified from +// https://github.com/facebookresearch/detectron2/blob/master/detectron2/layers/csrc/nms_rotated/nms_rotated_cuda.cu +#ifndef NMS_ROTATED_CUDA_CUH +#define NMS_ROTATED_CUDA_CUH + +#include "pytorch_cuda_helper.hpp" +#include "box_iou_rotated_utils.hpp" + +__host__ __device__ inline int divideUP(const int x, const int y) { + return (((x) + (y)-1) / (y)); +} + +namespace { +int const threadsPerBlock = sizeof(unsigned long long) * 8; +} + +template +__global__ void nms_rotated_cuda_kernel(const int n_boxes, + const float iou_threshold, + const T* dev_boxes, + unsigned long long* dev_mask, + const int multi_label) { + // nms_rotated_cuda_kernel is modified from torchvision's nms_cuda_kernel + + if (multi_label == 1) { + const int row_start = blockIdx.y; + const int col_start = blockIdx.x; + + // if (row_start > col_start) return; + + const int row_size = + min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); + const int col_size = + min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); + + // Compared to nms_cuda_kernel, where each box is represented with 4 values + // (x1, y1, x2, y2), each rotated box is represented with 5 values + // (x_center, y_center, width, height, angle_degrees) here. + __shared__ T block_boxes[threadsPerBlock * 5]; + if (threadIdx.x < col_size) { + block_boxes[threadIdx.x * 6 + 0] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 0]; + block_boxes[threadIdx.x * 6 + 1] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 1]; + block_boxes[threadIdx.x * 6 + 2] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 2]; + block_boxes[threadIdx.x * 6 + 3] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 3]; + block_boxes[threadIdx.x * 6 + 4] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 4]; + block_boxes[threadIdx.x * 6 + 5] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 5]; + } + __syncthreads(); + + if (threadIdx.x < row_size) { + const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; + const T* cur_box = dev_boxes + cur_box_idx * 6; + int i = 0; + unsigned long long t = 0; + int start = 0; + if (row_start == col_start) { + start = threadIdx.x + 1; + } + for (i = start; i < col_size; i++) { + // Instead of devIoU used by original horizontal nms, here + // we use the single_box_iou_rotated function from + // box_iou_rotated_utils.h + if (single_box_iou_rotated(cur_box, block_boxes + i * 6, 0) > + iou_threshold) { + t |= 1ULL << i; + } + } + const int col_blocks = divideUP(n_boxes, threadsPerBlock); + dev_mask[cur_box_idx * col_blocks + col_start] = t; + } + } else { + const int row_start = blockIdx.y; + const int col_start = blockIdx.x; + + // if (row_start > col_start) return; + + const int row_size = + min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); + const int col_size = + min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); + + // Compared to nms_cuda_kernel, where each box is represented with 4 values + // (x1, y1, x2, y2), each rotated box is represented with 5 values + // (x_center, y_center, width, height, angle_degrees) here. + __shared__ T block_boxes[threadsPerBlock * 5]; + if (threadIdx.x < col_size) { + block_boxes[threadIdx.x * 5 + 0] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; + block_boxes[threadIdx.x * 5 + 1] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; + block_boxes[threadIdx.x * 5 + 2] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; + block_boxes[threadIdx.x * 5 + 3] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; + block_boxes[threadIdx.x * 5 + 4] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; + } + __syncthreads(); + + if (threadIdx.x < row_size) { + const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; + const T* cur_box = dev_boxes + cur_box_idx * 5; + int i = 0; + unsigned long long t = 0; + int start = 0; + if (row_start == col_start) { + start = threadIdx.x + 1; + } + for (i = start; i < col_size; i++) { + // Instead of devIoU used by original horizontal nms, here + // we use the single_box_iou_rotated function from + // box_iou_rotated_utils.h + if (single_box_iou_rotated(cur_box, block_boxes + i * 5, 0) > + iou_threshold) { + t |= 1ULL << i; + } + } + const int col_blocks = divideUP(n_boxes, threadsPerBlock); + dev_mask[cur_box_idx * col_blocks + col_start] = t; + } + } +} + +#endif diff --git a/mmcv/ops/csrc/common/cuda/points_in_boxes_cuda_kernel.cuh b/mmcv/ops/csrc/common/cuda/points_in_boxes_cuda_kernel.cuh new file mode 100644 index 0000000..011f5f7 --- /dev/null +++ b/mmcv/ops/csrc/common/cuda/points_in_boxes_cuda_kernel.cuh @@ -0,0 +1,89 @@ +// Copyright (c) OpenMMLab. All rights reserved +#ifndef POINT_IN_BOXES_CUDA_KERNEL_CUH +#define POINT_IN_BOXES_CUDA_KERNEL_CUH + +#include "pytorch_cuda_helper.hpp" + +template +__device__ inline void lidar_to_local_coords(T shift_x, T shift_y, T rz, + T &local_x, T &local_y) { + T cosa = cos(-rz), sina = sin(-rz); + local_x = shift_x * cosa + shift_y * (-sina); + local_y = shift_x * sina + shift_y * cosa; +} + +template +__device__ inline int check_pt_in_box3d(const T *pt, const T *box3d, T &local_x, + T &local_y) { + // param pt: (x, y, z) + // param box3d: (cx, cy, cz, x_size, y_size, z_size, rz) in LiDAR coordinate, + // cz in the bottom center + T x = pt[0], y = pt[1], z = pt[2]; + T cx = box3d[0], cy = box3d[1], cz = box3d[2]; + T x_size = box3d[3], y_size = box3d[4], z_size = box3d[5], rz = box3d[6]; + cz += z_size / + 2.0; // shift to the center since cz in box3d is the bottom center + + if (fabsf(z - cz) > z_size / 2.0) return 0; + lidar_to_local_coords(x - cx, y - cy, rz, local_x, local_y); + float in_flag = (local_x > -x_size / 2.0) & (local_x < x_size / 2.0) & + (local_y > -y_size / 2.0) & (local_y < y_size / 2.0); + return in_flag; +} + +template +__global__ void points_in_boxes_part_forward_cuda_kernel( + int batch_size, int boxes_num, int pts_num, const T *boxes, const T *pts, + int *box_idx_of_points) { + // params boxes: (B, N, 7) [x, y, z, x_size, y_size, z_size, rz] in LiDAR + // coordinate, z is the bottom center, each box DO NOT overlaps params pts: + // (B, npoints, 3) [x, y, z] in LiDAR coordinate params boxes_idx_of_points: + // (B, npoints), default -1 + + int bs_idx = blockIdx.y; + int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (bs_idx >= batch_size || pt_idx >= pts_num) return; + + boxes += bs_idx * boxes_num * 7; + pts += bs_idx * pts_num * 3 + pt_idx * 3; + box_idx_of_points += bs_idx * pts_num + pt_idx; + + T local_x = 0, local_y = 0; + int cur_in_flag = 0; + for (int k = 0; k < boxes_num; k++) { + cur_in_flag = check_pt_in_box3d(pts, boxes + k * 7, local_x, local_y); + if (cur_in_flag) { + box_idx_of_points[0] = k; + break; + } + } +} + +template +__global__ void points_in_boxes_all_forward_cuda_kernel( + int batch_size, int boxes_num, int pts_num, const T *boxes, const T *pts, + int *box_idx_of_points) { + // params boxes: (B, N, 7) [x, y, z, x_size, y_size, z_size, rz] in LiDAR + // coordinate, z is the bottom center, each box DO NOT overlaps params pts: + // (B, npoints, 3) [x, y, z] in LiDAR coordinate params boxes_idx_of_points: + // (B, npoints), default -1 + + int bs_idx = blockIdx.y; + int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (bs_idx >= batch_size || pt_idx >= pts_num) return; + + boxes += bs_idx * boxes_num * 7; + pts += bs_idx * pts_num * 3 + pt_idx * 3; + box_idx_of_points += bs_idx * pts_num * boxes_num + pt_idx * boxes_num; + + T local_x = 0, local_y = 0; + for (int k = 0; k < boxes_num; k++) { + const int cur_in_flag = + check_pt_in_box3d(pts, boxes + k * 7, local_x, local_y); + if (cur_in_flag) { + box_idx_of_points[k] = 1; + } + } +} + +#endif // POINT_IN_BOXES_CUDA_KERNEL_CUH diff --git a/mmcv/ops/csrc/common/cuda/psamask_cuda_kernel.cuh b/mmcv/ops/csrc/common/cuda/psamask_cuda_kernel.cuh new file mode 100644 index 0000000..523d71a --- /dev/null +++ b/mmcv/ops/csrc/common/cuda/psamask_cuda_kernel.cuh @@ -0,0 +1,137 @@ +// Copyright (c) OpenMMLab. All rights reserved +#ifndef PSAMASK_CUDA_KERNEL_CUH +#define PSAMASK_CUDA_KERNEL_CUH + +#include "pytorch_cuda_helper.hpp" + +// CUDA: grid stride looping +#ifndef CUDA_KERNEL_LOOP +#define CUDA_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ + i += blockDim.x * gridDim.x) +#endif + +template +__global__ void psamask_collect_forward_cuda( + const int nthreads, const int h_feature, const int w_feature, + const int h_mask, const int w_mask, const int half_h_mask, + const int half_w_mask, const T* mask_data, T* buffer_data) { + CUDA_KERNEL_LOOP(index, nthreads) { + const int w = index % w_feature; + const int h = (index / w_feature) % h_feature; + const int n = index / w_feature / h_feature; + // effective mask region : [hstart, hend) x [wstart, wend) with mask-indexed + const int hstart = max(0, half_h_mask - h); + const int hend = min(h_mask, h_feature + half_h_mask - h); + const int wstart = max(0, half_w_mask - w); + const int wend = min(w_mask, w_feature + half_w_mask - w); + // (hidx, widx ) with mask-indexed + // (hidx + h - half_h_mask, widx + w - half_w_mask) with feature-indexed + for (int hidx = hstart; hidx < hend; hidx++) { + for (int widx = wstart; widx < wend; widx++) { + buffer_data[(n * h_feature * w_feature + + (hidx + h - half_h_mask) * w_feature + + (widx + w - half_w_mask)) * + h_feature * w_feature + + h * w_feature + w] = mask_data + [((n * h_mask * w_mask + hidx * w_mask + widx) * h_feature + h) * + w_feature + + w]; + } + } + } +} + +template +__global__ void psamask_distribute_forward_cuda( + const int nthreads, const int h_feature, const int w_feature, + const int h_mask, const int w_mask, const int half_h_mask, + const int half_w_mask, const T* mask_data, T* buffer_data) { + CUDA_KERNEL_LOOP(index, nthreads) { + const int w = index % w_feature; + const int h = (index / w_feature) % h_feature; + const int n = index / w_feature / h_feature; + // effective mask region : [hstart, hend) x [wstart, wend) with mask-indexed + const int hstart = max(0, half_h_mask - h); + const int hend = min(h_mask, h_feature + half_h_mask - h); + const int wstart = max(0, half_w_mask - w); + const int wend = min(w_mask, w_feature + half_w_mask - w); + // (hidx, widx ) with mask-indexed + // (hidx + h - half_h_mask, widx + w - half_w_mask) with feature-indexed + for (int hidx = hstart; hidx < hend; hidx++) { + for (int widx = wstart; widx < wend; widx++) { + buffer_data[(n * h_feature * w_feature + h * w_feature + w) * + h_feature * w_feature + + (hidx + h - half_h_mask) * w_feature + + (widx + w - half_w_mask)] = mask_data + [((n * h_mask * w_mask + hidx * w_mask + widx) * h_feature + h) * + w_feature + + w]; + } + } + } +} + +template +__global__ void psamask_collect_backward_cuda( + const int nthreads, const int h_feature, const int w_feature, + const int h_mask, const int w_mask, const int half_h_mask, + const int half_w_mask, const T* buffer_diff, T* mask_diff) { + CUDA_KERNEL_LOOP(index, nthreads) { + const int w = index % w_feature; + const int h = (index / w_feature) % h_feature; + const int n = index / w_feature / h_feature; + // effective mask region : [hstart, hend) x [wstart, wend) with mask-indexed + const int hstart = max(0, half_h_mask - h); + const int hend = min(h_mask, h_feature + half_h_mask - h); + const int wstart = max(0, half_w_mask - w); + const int wend = min(w_mask, w_feature + half_w_mask - w); + // (hidx, widx ) with mask-indexed + // (hidx + h - half_h_mask, widx + w - half_w_mask) with feature-indexed + for (int hidx = hstart; hidx < hend; hidx++) { + for (int widx = wstart; widx < wend; widx++) { + mask_diff[((n * h_mask * w_mask + hidx * w_mask + widx) * h_feature + + h) * + w_feature + + w] = buffer_diff[(n * h_feature * w_feature + + (hidx + h - half_h_mask) * w_feature + + (widx + w - half_w_mask)) * + h_feature * w_feature + + h * w_feature + w]; + } + } + } +} + +template +__global__ void psamask_distribute_backward_cuda( + const int nthreads, const int h_feature, const int w_feature, + const int h_mask, const int w_mask, const int half_h_mask, + const int half_w_mask, const T* buffer_diff, T* mask_diff) { + CUDA_KERNEL_LOOP(index, nthreads) { + const int w = index % w_feature; + const int h = (index / w_feature) % h_feature; + const int n = index / w_feature / h_feature; + // effective mask region : [hstart, hend) x [wstart, wend) with mask-indexed + const int hstart = max(0, half_h_mask - h); + const int hend = min(h_mask, h_feature + half_h_mask - h); + const int wstart = max(0, half_w_mask - w); + const int wend = min(w_mask, w_feature + half_w_mask - w); + // (hidx, widx ) with mask-indexed + // (hidx + h - half_h_mask, widx + w - half_w_mask) with feature-indexed + for (int hidx = hstart; hidx < hend; hidx++) { + for (int widx = wstart; widx < wend; widx++) { + mask_diff[((n * h_mask * w_mask + hidx * w_mask + widx) * h_feature + + h) * + w_feature + + w] = + buffer_diff[(n * h_feature * w_feature + h * w_feature + w) * + h_feature * w_feature + + (hidx + h - half_h_mask) * w_feature + + (widx + w - half_w_mask)]; + } + } + } +} + +#endif // PSAMASK_CUDA_KERNEL_CUH diff --git a/mmcv/ops/csrc/common/cuda/roi_align_cuda_kernel.cuh b/mmcv/ops/csrc/common/cuda/roi_align_cuda_kernel.cuh new file mode 100644 index 0000000..8b90ee6 --- /dev/null +++ b/mmcv/ops/csrc/common/cuda/roi_align_cuda_kernel.cuh @@ -0,0 +1,208 @@ +// Copyright (c) OpenMMLab. All rights reserved +#ifndef ROI_ALIGN_CUDA_KERNEL_CUH +#define ROI_ALIGN_CUDA_KERNEL_CUH + +#include +#ifdef MMCV_WITH_TRT +#include "common_cuda_helper.hpp" +#else // MMCV_WITH_TRT +#include "pytorch_cuda_helper.hpp" +#endif // MMCV_WITH_TRT + +/*** Forward ***/ +template +__global__ void roi_align_forward_cuda_kernel( + const int nthreads, const T* input, const T* rois, T* output, T* argmax_y, + T* argmax_x, const int pooled_height, const int pooled_width, + const T spatial_scale, const int sampling_ratio, + const int pool_mode, // 0 - max pool, 1 - avg pool + const bool aligned, const int channels, const int height, const int width) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + // (n, c, ph, pw) is an element in the pooled output + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + + const T* offset_rois = rois + n * 5; + int roi_batch_ind = offset_rois[0]; + + // Do not using rounding; this implementation detail is critical + T offset = aligned ? (T)0.5 : (T)0.0; + T roi_start_w = offset_rois[1] * spatial_scale - offset; + T roi_start_h = offset_rois[2] * spatial_scale - offset; + T roi_end_w = offset_rois[3] * spatial_scale - offset; + T roi_end_h = offset_rois[4] * spatial_scale - offset; + + T roi_width = roi_end_w - roi_start_w; + T roi_height = roi_end_h - roi_start_h; + if (!aligned) { // for backward-compatibility only + roi_width = max(roi_width, (T)1.); + roi_height = max(roi_height, (T)1.); + } + + T bin_size_h = static_cast(roi_height) / static_cast(pooled_height); + T bin_size_w = static_cast(roi_width) / static_cast(pooled_width); + + const T* offset_input = + input + (roi_batch_ind * channels + c) * height * width; + + // We use roi_bin_grid to sample the grid and mimic integral + int roi_bin_grid_h = + (sampling_ratio > 0) + ? sampling_ratio + : static_cast(ceilf(roi_height / pooled_height)); + int roi_bin_grid_w = + (sampling_ratio > 0) + ? sampling_ratio + : static_cast(ceilf(roi_width / pooled_width)); + + if (pool_mode == 0) { + // We do max pooling inside a bin + T maxval = -FLT_MAX; + T maxidx_y = -1.f, maxidx_x = -1.f; + for (int iy = 0; iy < roi_bin_grid_h; iy++) { + const T y = roi_start_h + ph * bin_size_h + + static_cast(iy + .5f) * bin_size_h / + static_cast(roi_bin_grid_h); + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + const T x = roi_start_w + pw * bin_size_w + + static_cast(ix + .5f) * bin_size_w / + static_cast(roi_bin_grid_w); + T val = + bilinear_interpolate(offset_input, height, width, y, x, index); + if (val > maxval) { + maxval = val; + maxidx_y = y; + maxidx_x = x; + } + } + } + output[index] = maxval; + argmax_y[index] = maxidx_y; + argmax_x[index] = maxidx_x; + } else if (pool_mode == 1) { + // We do average pooling inside a bin + const T count = max(roi_bin_grid_h * roi_bin_grid_w, 1); + T output_val = 0.; + for (int iy = 0; iy < roi_bin_grid_h; iy++) { + const T y = roi_start_h + ph * bin_size_h + + static_cast(iy + .5f) * bin_size_h / + static_cast(roi_bin_grid_h); + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + const T x = roi_start_w + pw * bin_size_w + + static_cast(ix + .5f) * bin_size_w / + static_cast(roi_bin_grid_w); + T val = + bilinear_interpolate(offset_input, height, width, y, x, index); + output_val += val; + } + } + output[index] = output_val / count; + } + } +} + +/*** Backward ***/ +template +__global__ void roi_align_backward_cuda_kernel( + const int nthreads, const T* grad_output, const T* rois, const T* argmax_y, + const T* argmax_x, T* grad_input, const int pooled_height, + const int pooled_width, const T spatial_scale, const int sampling_ratio, + const int pool_mode, // 0 - max pool, 1 - avg pool + const bool aligned, const int channels, const int height, const int width) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + // (n, c, ph, pw) is an element in the pooled output + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + + const T grad_output_this_bin = grad_output[index]; + + const T* offset_rois = rois + n * 5; + int roi_batch_ind = offset_rois[0]; + T* offset_grad_input = + grad_input + ((roi_batch_ind * channels + c) * height * width); + + if (pool_mode == 0) { + T y = argmax_y[index], x = argmax_x[index]; + if (y != -1.f) { + T w1, w2, w3, w4; + int x_low, x_high, y_low, y_high; + bilinear_interpolate_gradient(height, width, y, x, w1, w2, w3, w4, + x_low, x_high, y_low, y_high, index); + + if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { + atomicAdd(offset_grad_input + y_low * width + x_low, + grad_output_this_bin * w1); + atomicAdd(offset_grad_input + y_low * width + x_high, + grad_output_this_bin * w2); + atomicAdd(offset_grad_input + y_high * width + x_low, + grad_output_this_bin * w3); + atomicAdd(offset_grad_input + y_high * width + x_high, + grad_output_this_bin * w4); + } + } + } else if (pool_mode == 1) { + // Do not using rounding; this implementation detail is critical + T offset = aligned ? (T)0.5 : (T)0.0; + T roi_start_w = offset_rois[1] * spatial_scale - offset; + T roi_start_h = offset_rois[2] * spatial_scale - offset; + T roi_end_w = offset_rois[3] * spatial_scale - offset; + T roi_end_h = offset_rois[4] * spatial_scale - offset; + + T roi_width = roi_end_w - roi_start_w; + T roi_height = roi_end_h - roi_start_h; + if (!aligned) { // for backward-compatibility only + roi_width = max(roi_width, (T)1.); + roi_height = max(roi_height, (T)1.); + } + + T bin_size_h = static_cast(roi_height) / static_cast(pooled_height); + T bin_size_w = static_cast(roi_width) / static_cast(pooled_width); + + // We use roi_bin_grid to sample the grid and mimic integral + int roi_bin_grid_h = + (sampling_ratio > 0) + ? sampling_ratio + : static_cast(ceilf(roi_height / pooled_height)); + int roi_bin_grid_w = + (sampling_ratio > 0) + ? sampling_ratio + : static_cast(ceilf(roi_width / pooled_width)); + + // We do average (integral) pooling inside a bin + const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 + + for (int iy = 0; iy < roi_bin_grid_h; iy++) { + const T y = roi_start_h + ph * bin_size_h + + static_cast(iy + .5f) * bin_size_h / + static_cast(roi_bin_grid_h); + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + const T x = roi_start_w + pw * bin_size_w + + static_cast(ix + .5f) * bin_size_w / + static_cast(roi_bin_grid_w); + + T w1, w2, w3, w4; + int x_low, x_high, y_low, y_high; + bilinear_interpolate_gradient(height, width, y, x, w1, w2, w3, w4, + x_low, x_high, y_low, y_high, index); + + if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { + atomicAdd(offset_grad_input + y_low * width + x_low, + grad_output_this_bin * w1 / count); + atomicAdd(offset_grad_input + y_low * width + x_high, + grad_output_this_bin * w2 / count); + atomicAdd(offset_grad_input + y_high * width + x_low, + grad_output_this_bin * w3 / count); + atomicAdd(offset_grad_input + y_high * width + x_high, + grad_output_this_bin * w4 / count); + } + } + } + } + } +} + +#endif // ROI_ALIGN_CUDA_KERNEL_CUH diff --git a/mmcv/ops/csrc/common/cuda/roi_align_rotated_cuda_kernel.cuh b/mmcv/ops/csrc/common/cuda/roi_align_rotated_cuda_kernel.cuh new file mode 100644 index 0000000..0978f40 --- /dev/null +++ b/mmcv/ops/csrc/common/cuda/roi_align_rotated_cuda_kernel.cuh @@ -0,0 +1,198 @@ +// Modified from +// https://github.com/facebookresearch/detectron2/tree/master/detectron2/layers/csrc/ROIAlignRotated +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +#ifndef ROI_ALIGN_ROTATED_CUDA_KERNEL_CUH +#define ROI_ALIGN_ROTATED_CUDA_KERNEL_CUH + +#include +#ifdef MMCV_WITH_TRT +#include "common_cuda_helper.hpp" +#else // MMCV_WITH_TRT +#include "pytorch_cuda_helper.hpp" +#endif // MMCV_WITH_TRT + +/*** Forward ***/ +template +__global__ void roi_align_rotated_forward_cuda_kernel( + const int nthreads, const scalar_t *bottom_data, + const scalar_t *bottom_rois, const scalar_t spatial_scale, + const int sample_num, const bool aligned, const bool clockwise, + const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, scalar_t *top_data) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + // (n, c, ph, pw) is an element in the pooled output + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + + const scalar_t *offset_bottom_rois = bottom_rois + n * 6; + int roi_batch_ind = offset_bottom_rois[0]; + + // Do not using rounding; this implementation detail is critical + scalar_t offset = aligned ? (scalar_t)0.5 : (scalar_t)0.0; + scalar_t roi_center_w = offset_bottom_rois[1] * spatial_scale - offset; + scalar_t roi_center_h = offset_bottom_rois[2] * spatial_scale - offset; + scalar_t roi_width = offset_bottom_rois[3] * spatial_scale; + scalar_t roi_height = offset_bottom_rois[4] * spatial_scale; + // scalar_t theta = offset_bottom_rois[5] * M_PI / 180.0; + scalar_t theta = offset_bottom_rois[5]; + if (clockwise) { + theta = -theta; // If clockwise, the angle needs to be reversed. + } + if (!aligned) { // for backward-compatibility only + // Force malformed ROIs to be 1x1 + roi_width = max(roi_width, (scalar_t)1.); + roi_height = max(roi_height, (scalar_t)1.); + } + scalar_t bin_size_h = static_cast(roi_height) / + static_cast(pooled_height); + scalar_t bin_size_w = + static_cast(roi_width) / static_cast(pooled_width); + + const scalar_t *offset_bottom_data = + bottom_data + (roi_batch_ind * channels + c) * height * width; + + // We use roi_bin_grid to sample the grid and mimic integral + int roi_bin_grid_h = (sample_num > 0) + ? sample_num + : ceilf(roi_height / pooled_height); // e.g., = 2 + int roi_bin_grid_w = + (sample_num > 0) ? sample_num : ceilf(roi_width / pooled_width); + + // roi_start_h and roi_start_w are computed wrt the center of RoI (x, y). + // Appropriate translation needs to be applied after. + scalar_t roi_start_h = -roi_height / 2.0; + scalar_t roi_start_w = -roi_width / 2.0; + scalar_t cosscalar_theta = cos(theta); + scalar_t sinscalar_theta = sin(theta); + + // We do average (integral) pooling inside a bin + const scalar_t count = max(roi_bin_grid_h * roi_bin_grid_w, 1); // e.g. = 4 + + scalar_t output_val = 0.; + for (int iy = 0; iy < roi_bin_grid_h; iy++) { // e.g., iy = 0, 1 + const scalar_t yy = + roi_start_h + ph * bin_size_h + + static_cast(iy + .5f) * bin_size_h / + static_cast(roi_bin_grid_h); // e.g., 0.5, 1.5 + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + const scalar_t xx = roi_start_w + pw * bin_size_w + + static_cast(ix + .5f) * bin_size_w / + static_cast(roi_bin_grid_w); + + // Rotate by theta (counterclockwise) around the center and translate + scalar_t y = yy * cosscalar_theta - xx * sinscalar_theta + roi_center_h; + scalar_t x = yy * sinscalar_theta + xx * cosscalar_theta + roi_center_w; + + scalar_t val = bilinear_interpolate( + offset_bottom_data, height, width, y, x, index); + output_val += val; + } + } + output_val /= count; + + top_data[index] = output_val; + } +} + +/*** Backward ***/ +template +__global__ void roi_align_rotated_backward_cuda_kernel( + const int nthreads, const scalar_t *top_diff, const scalar_t *bottom_rois, + const scalar_t spatial_scale, const int sample_num, const bool aligned, + const bool clockwise, const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, scalar_t *bottom_diff) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + // (n, c, ph, pw) is an element in the pooled output + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + + const scalar_t *offset_bottom_rois = bottom_rois + n * 6; + int roi_batch_ind = offset_bottom_rois[0]; + + // Do not round + scalar_t offset = aligned ? (scalar_t)0.5 : (scalar_t)0.0; + scalar_t roi_center_w = offset_bottom_rois[1] * spatial_scale - offset; + scalar_t roi_center_h = offset_bottom_rois[2] * spatial_scale - offset; + scalar_t roi_width = offset_bottom_rois[3] * spatial_scale; + scalar_t roi_height = offset_bottom_rois[4] * spatial_scale; + // scalar_t theta = offset_bottom_rois[5] * M_PI / 180.0; + scalar_t theta = offset_bottom_rois[5]; + if (clockwise) { + theta = -theta; // If clockwise, the angle needs to be reversed. + } + if (!aligned) { // for backward-compatibility only + // Force malformed ROIs to be 1x1 + roi_width = max(roi_width, (scalar_t)1.); + roi_height = max(roi_height, (scalar_t)1.); + } + scalar_t bin_size_h = static_cast(roi_height) / + static_cast(pooled_height); + scalar_t bin_size_w = + static_cast(roi_width) / static_cast(pooled_width); + + scalar_t *offset_bottom_diff = + bottom_diff + (roi_batch_ind * channels + c) * height * width; + + int top_offset = (n * channels + c) * pooled_height * pooled_width; + const scalar_t *offset_top_diff = top_diff + top_offset; + const scalar_t top_diff_this_bin = offset_top_diff[ph * pooled_width + pw]; + + // We use roi_bin_grid to sample the grid and mimic integral + int roi_bin_grid_h = (sample_num > 0) + ? sample_num + : ceilf(roi_height / pooled_height); // e.g., = 2 + int roi_bin_grid_w = + (sample_num > 0) ? sample_num : ceilf(roi_width / pooled_width); + + // roi_start_h and roi_start_w are computed wrt the center of RoI (x, y). + // Appropriate translation needs to be applied after. + scalar_t roi_start_h = -roi_height / 2.0; + scalar_t roi_start_w = -roi_width / 2.0; + scalar_t cosTheta = cos(theta); + scalar_t sinTheta = sin(theta); + + // We do average (integral) pooling inside a bin + const scalar_t count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 + + for (int iy = 0; iy < roi_bin_grid_h; iy++) { // e.g., iy = 0, 1 + const scalar_t yy = + roi_start_h + ph * bin_size_h + + static_cast(iy + .5f) * bin_size_h / + static_cast(roi_bin_grid_h); // e.g., 0.5, 1.5 + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + const scalar_t xx = roi_start_w + pw * bin_size_w + + static_cast(ix + .5f) * bin_size_w / + static_cast(roi_bin_grid_w); + + // Rotate by theta around the center and translate + scalar_t y = yy * cosTheta - xx * sinTheta + roi_center_h; + scalar_t x = yy * sinTheta + xx * cosTheta + roi_center_w; + + scalar_t w1, w2, w3, w4; + int x_low, x_high, y_low, y_high; + + bilinear_interpolate_gradient(height, width, y, x, w1, w2, w3, + w4, x_low, x_high, y_low, + y_high, index); + + scalar_t g1 = top_diff_this_bin * w1 / count; + scalar_t g2 = top_diff_this_bin * w2 / count; + scalar_t g3 = top_diff_this_bin * w3 / count; + scalar_t g4 = top_diff_this_bin * w4 / count; + + if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { + atomicAdd(offset_bottom_diff + y_low * width + x_low, g1); + atomicAdd(offset_bottom_diff + y_low * width + x_high, g2); + atomicAdd(offset_bottom_diff + y_high * width + x_low, g3); + atomicAdd(offset_bottom_diff + y_high * width + x_high, g4); + } // if + } // ix + } // iy + } // CUDA_1D_KERNEL_LOOP +} // RoIAlignBackward + +#endif // ROI_ALIGN_ROTATED_CUDA_KERNEL_CUH diff --git a/mmcv/ops/csrc/common/cuda/roi_pool_cuda_kernel.cuh b/mmcv/ops/csrc/common/cuda/roi_pool_cuda_kernel.cuh new file mode 100644 index 0000000..39c7cb1 --- /dev/null +++ b/mmcv/ops/csrc/common/cuda/roi_pool_cuda_kernel.cuh @@ -0,0 +1,89 @@ +// Copyright (c) OpenMMLab. All rights reserved +#ifndef ROI_POOL_CUDA_KERNEL_CUH +#define ROI_POOL_CUDA_KERNEL_CUH + +#include "pytorch_cuda_helper.hpp" + +template +__global__ void roi_pool_forward_cuda_kernel( + const int nthreads, const T* input, const T* rois, T* output, int* argmax, + const int pooled_height, const int pooled_width, const T spatial_scale, + const int channels, const int height, const int width) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + // (n, c, ph, pw) is an element in the pooled output + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + + const T* offset_rois = rois + n * 5; + int roi_batch_ind = offset_rois[0]; + // calculate the roi region on feature maps + T roi_x1 = offset_rois[1] * spatial_scale; + T roi_y1 = offset_rois[2] * spatial_scale; + T roi_x2 = (offset_rois[3] + 1) * spatial_scale; + T roi_y2 = (offset_rois[4] + 1) * spatial_scale; + + // force malformed rois to be 1x1 + T roi_w = roi_x2 - roi_x1; + T roi_h = roi_y2 - roi_y1; + if (roi_w <= 0 || roi_h <= 0) continue; + + T bin_size_w = roi_w / static_cast(pooled_width); + T bin_size_h = roi_h / static_cast(pooled_height); + + // the corresponding bin region + int bin_x1 = floorf(static_cast(pw) * bin_size_w + roi_x1); + int bin_y1 = floorf(static_cast(ph) * bin_size_h + roi_y1); + int bin_x2 = ceilf(static_cast(pw + 1) * bin_size_w + roi_x1); + int bin_y2 = ceilf(static_cast(ph + 1) * bin_size_h + roi_y1); + + // add roi offsets and clip to input boundaries + bin_x1 = min(max(bin_x1, 0), width); + bin_y1 = min(max(bin_y1, 0), height); + bin_x2 = min(max(bin_x2, 0), width); + bin_y2 = min(max(bin_y2, 0), height); + bool is_empty = (bin_y2 <= bin_y1) || (bin_x2 <= bin_x1); + + const T* offset_input = + input + (roi_batch_ind * channels + c) * height * width; + // Define an empty pooling region to be zero + // If nothing is pooled, argmax = -1 causes nothing to be backprop'd + T max_val = is_empty ? 0 : -FLT_MAX; + int max_idx = -1; + for (int h = bin_y1; h < bin_y2; ++h) { + for (int w = bin_x1; w < bin_x2; ++w) { + int offset = h * width + w; + if (offset_input[offset] > max_val) { + max_val = offset_input[offset]; + max_idx = offset; + } + } + } + output[index] = max_val; + if (argmax != NULL) argmax[index] = max_idx; + } +} + +template +__global__ void roi_pool_backward_cuda_kernel( + const int nthreads, const T* grad_output, const T* rois, const int* argmax, + T* grad_input, const int pooled_height, const int pooled_width, + const int channels, const int height, const int width) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + // (n, c) is an element in the pooled output + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + + int roi_batch_ind = rois[n * 5]; + T* grad_input_offset = + grad_input + ((roi_batch_ind * channels + c) * height * width); + int argmax_index = argmax[index]; + + if (argmax_index != -1) { + atomicAdd(grad_input_offset + argmax_index, grad_output[index]); + } + } +} + +#endif // ROI_POOL_CUDA_KERNEL_CUH diff --git a/mmcv/ops/csrc/common/cuda/roiaware_pool3d_cuda_kernel.cuh b/mmcv/ops/csrc/common/cuda/roiaware_pool3d_cuda_kernel.cuh new file mode 100644 index 0000000..4d56943 --- /dev/null +++ b/mmcv/ops/csrc/common/cuda/roiaware_pool3d_cuda_kernel.cuh @@ -0,0 +1,264 @@ +// Copyright (c) OpenMMLab. All rights reserved +#ifndef ROIAWARE_POOL3D_CUDA_KERNEL_CUH +#define ROIAWARE_POOL3D_CUDA_KERNEL_CUH + +#include "pytorch_cuda_helper.hpp" + +template +__device__ inline void lidar_to_local_coords(T shift_x, T shift_y, T rz, + T &local_x, T &local_y) { + T cosa = cos(-rz), sina = sin(-rz); + local_x = shift_x * cosa + shift_y * (-sina); + local_y = shift_x * sina + shift_y * cosa; +} + +template +__device__ inline int check_pt_in_box3d(const T *pt, const T *box3d, T &local_x, + T &local_y) { + // param pt: (x, y, z) + // param box3d: (cx, cy, cz, x_size, y_size, z_size, rz) in LiDAR coordinate, + // cz in the bottom center + T x = pt[0], y = pt[1], z = pt[2]; + T cx = box3d[0], cy = box3d[1], cz = box3d[2]; + T x_size = box3d[3], y_size = box3d[4], z_size = box3d[5], rz = box3d[6]; + cz += z_size / + 2.0; // shift to the center since cz in box3d is the bottom center + + if (fabsf(z - cz) > z_size / 2.0) return 0; + lidar_to_local_coords(x - cx, y - cy, rz, local_x, local_y); + float in_flag = (local_x > -x_size / 2.0) & (local_x < x_size / 2.0) & + (local_y > -y_size / 2.0) & (local_y < y_size / 2.0); + return in_flag; +} + +template +__global__ void generate_pts_mask_for_box3d(int boxes_num, int pts_num, + int out_x, int out_y, int out_z, + const T *rois, const T *pts, + int *pts_mask) { + // params rois: (N, 7) [x, y, z, x_size, y_size, z_size, rz] in LiDAR + // coordinate params pts: (npoints, 3) [x, y, z] params pts_mask: (N, + // npoints): -1 means point does not in this box, otherwise: encode (x_idxs, + // y_idxs, z_idxs) by binary bit + int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; + int box_idx = blockIdx.y; + if (pt_idx >= pts_num || box_idx >= boxes_num) return; + + pts += pt_idx * 3; + rois += box_idx * 7; + pts_mask += box_idx * pts_num + pt_idx; + + T local_x = 0, local_y = 0; + int cur_in_flag = check_pt_in_box3d(pts, rois, local_x, local_y); + + pts_mask[0] = -1; + if (cur_in_flag > 0) { + T local_z = pts[2] - rois[2]; + T x_size = rois[3], y_size = rois[4], z_size = rois[5]; + + T x_res = x_size / out_x; + T y_res = y_size / out_y; + T z_res = z_size / out_z; + + unsigned int x_idx = int((local_x + x_size / 2) / x_res); + unsigned int y_idx = int((local_y + y_size / 2) / y_res); + unsigned int z_idx = int(local_z / z_res); + + x_idx = min(max(x_idx, 0), out_x - 1); + y_idx = min(max(y_idx, 0), out_y - 1); + z_idx = min(max(z_idx, 0), out_z - 1); + + unsigned int idx_encoding = (x_idx << 16) + (y_idx << 8) + z_idx; + + pts_mask[0] = idx_encoding; + } +} + +template +__global__ void collect_inside_pts_for_box3d(int boxes_num, int pts_num, + int max_pts_each_voxel, int out_x, + int out_y, int out_z, + const int *pts_mask, + T *pts_idx_of_voxels) { + // params pts_mask: (N, npoints) 0 or 1 + // params pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel) + + int box_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (box_idx >= boxes_num) return; + + int max_num_pts = max_pts_each_voxel - 1; // index 0 is the counter + pts_idx_of_voxels += box_idx * out_x * out_y * out_z * max_pts_each_voxel; + + for (int k = 0; k < pts_num; k++) { + if (pts_mask[box_idx * pts_num + k] != -1) { + unsigned int idx_encoding = pts_mask[box_idx * pts_num + k]; + unsigned int x_idx = (idx_encoding >> 16) & 0xFF; + unsigned int y_idx = (idx_encoding >> 8) & 0xFF; + unsigned int z_idx = idx_encoding & 0xFF; + unsigned int base_offset = x_idx * out_y * out_z * max_pts_each_voxel + + y_idx * out_z * max_pts_each_voxel + + z_idx * max_pts_each_voxel; + unsigned int cnt = pts_idx_of_voxels[base_offset]; + if (cnt < max_num_pts) { + pts_idx_of_voxels[base_offset + cnt + 1] = k; + pts_idx_of_voxels[base_offset]++; + } + } + } +} + +template +__global__ void roiaware_maxpool3d(int boxes_num, int pts_num, int channels, + int max_pts_each_voxel, int out_x, int out_y, + int out_z, const T *pts_feature, + const int *pts_idx_of_voxels, + T *pooled_features, int *argmax) { + // params pts_feature: (npoints, C) + // params pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel), + // index 0 is the counter params pooled_features: (N, out_x, out_y, out_z, C) + // params argmax: (N, out_x, out_y, out_z, C) + + int box_idx = blockIdx.z; + int channel_idx = blockIdx.y; + int voxel_idx_flat = blockIdx.x * blockDim.x + threadIdx.x; + + int x_idx = voxel_idx_flat / (out_y * out_z); + int y_idx = (voxel_idx_flat - x_idx * (out_y * out_z)) / out_z; + int z_idx = voxel_idx_flat % out_z; + if (box_idx >= boxes_num || channel_idx >= channels || x_idx >= out_x || + y_idx >= out_y || z_idx >= out_z) + return; + + int offset_base = x_idx * out_y * out_z + y_idx * out_z + z_idx; + pts_idx_of_voxels += box_idx * out_x * out_y * out_z * max_pts_each_voxel + + offset_base * max_pts_each_voxel; + pooled_features += box_idx * out_x * out_y * out_z * channels + + offset_base * channels + channel_idx; + argmax += box_idx * out_x * out_y * out_z * channels + + offset_base * channels + channel_idx; + + int argmax_idx = -1; + float max_val = -1e50; + + int total_pts = pts_idx_of_voxels[0]; + + for (int k = 1; k <= total_pts; k++) { + if (pts_feature[pts_idx_of_voxels[k] * channels + channel_idx] > max_val) { + max_val = pts_feature[pts_idx_of_voxels[k] * channels + channel_idx]; + argmax_idx = pts_idx_of_voxels[k]; + } + } + + if (argmax_idx != -1) { + pooled_features[0] = max_val; + } + argmax[0] = argmax_idx; +} + +template +__global__ void roiaware_avgpool3d(int boxes_num, int pts_num, int channels, + int max_pts_each_voxel, int out_x, int out_y, + int out_z, const T *pts_feature, + const int *pts_idx_of_voxels, + T *pooled_features) { + // params pts_feature: (npoints, C) + // params pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel), + // index 0 is the counter params pooled_features: (N, out_x, out_y, out_z, C) + // params argmax: (N, out_x, out_y, out_z, C) + + int box_idx = blockIdx.z; + int channel_idx = blockIdx.y; + int voxel_idx_flat = blockIdx.x * blockDim.x + threadIdx.x; + + int x_idx = voxel_idx_flat / (out_y * out_z); + int y_idx = (voxel_idx_flat - x_idx * (out_y * out_z)) / out_z; + int z_idx = voxel_idx_flat % out_z; + if (box_idx >= boxes_num || channel_idx >= channels || x_idx >= out_x || + y_idx >= out_y || z_idx >= out_z) + return; + + int offset_base = x_idx * out_y * out_z + y_idx * out_z + z_idx; + pts_idx_of_voxels += box_idx * out_x * out_y * out_z * max_pts_each_voxel + + offset_base * max_pts_each_voxel; + pooled_features += box_idx * out_x * out_y * out_z * channels + + offset_base * channels + channel_idx; + + float sum_val = 0; + int total_pts = pts_idx_of_voxels[0]; + + for (int k = 1; k <= total_pts; k++) { + sum_val += pts_feature[pts_idx_of_voxels[k] * channels + channel_idx]; + } + + if (total_pts > 0) { + pooled_features[0] = sum_val / total_pts; + } +} + +template +__global__ void roiaware_maxpool3d_backward(int boxes_num, int channels, + int out_x, int out_y, int out_z, + const int *argmax, + const T *grad_out, T *grad_in) { + // params argmax: (N, out_x, out_y, out_z, C) + // params grad_out: (N, out_x, out_y, out_z, C) + // params grad_in: (npoints, C), return value + + int box_idx = blockIdx.z; + int channel_idx = blockIdx.y; + int voxel_idx_flat = blockIdx.x * blockDim.x + threadIdx.x; + + int x_idx = voxel_idx_flat / (out_y * out_z); + int y_idx = (voxel_idx_flat - x_idx * (out_y * out_z)) / out_z; + int z_idx = voxel_idx_flat % out_z; + if (box_idx >= boxes_num || channel_idx >= channels || x_idx >= out_x || + y_idx >= out_y || z_idx >= out_z) + return; + + int offset_base = x_idx * out_y * out_z + y_idx * out_z + z_idx; + argmax += box_idx * out_x * out_y * out_z * channels + + offset_base * channels + channel_idx; + grad_out += box_idx * out_x * out_y * out_z * channels + + offset_base * channels + channel_idx; + + if (argmax[0] == -1) return; + + atomicAdd(grad_in + argmax[0] * channels + channel_idx, grad_out[0] * 1); +} + +template +__global__ void roiaware_avgpool3d_backward(int boxes_num, int channels, + int out_x, int out_y, int out_z, + int max_pts_each_voxel, + const int *pts_idx_of_voxels, + const T *grad_out, T *grad_in) { + // params pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel) + // params grad_out: (N, out_x, out_y, out_z, C) + // params grad_in: (npoints, C), return value + + int box_idx = blockIdx.z; + int channel_idx = blockIdx.y; + int voxel_idx_flat = blockIdx.x * blockDim.x + threadIdx.x; + + int x_idx = voxel_idx_flat / (out_y * out_z); + int y_idx = (voxel_idx_flat - x_idx * (out_y * out_z)) / out_z; + int z_idx = voxel_idx_flat % out_z; + if (box_idx >= boxes_num || channel_idx >= channels || x_idx >= out_x || + y_idx >= out_y || z_idx >= out_z) + return; + + int offset_base = x_idx * out_y * out_z + y_idx * out_z + z_idx; + pts_idx_of_voxels += box_idx * out_x * out_y * out_z * max_pts_each_voxel + + offset_base * max_pts_each_voxel; + grad_out += box_idx * out_x * out_y * out_z * channels + + offset_base * channels + channel_idx; + + int total_pts = pts_idx_of_voxels[0]; + float cur_grad = 1 / fmaxf(float(total_pts), 1.0); + for (int k = 1; k <= total_pts; k++) { + atomicAdd(grad_in + pts_idx_of_voxels[k] * channels + channel_idx, + grad_out[0] * cur_grad); + } +} + +#endif // ROIAWARE_POOL3D_CUDA_KERNEL_CUH diff --git a/mmcv/ops/csrc/common/cuda/roipoint_pool3d_cuda_kernel.cuh b/mmcv/ops/csrc/common/cuda/roipoint_pool3d_cuda_kernel.cuh new file mode 100644 index 0000000..bef665a --- /dev/null +++ b/mmcv/ops/csrc/common/cuda/roipoint_pool3d_cuda_kernel.cuh @@ -0,0 +1,140 @@ +// Copyright (c) OpenMMLab. All rights reserved +#ifndef ROIPOINT_POOL3D_CUDA_KERNEL_CUH +#define ROIPOINT_POOL3D_CUDA_KERNEL_CUH + +#include "pytorch_cuda_helper.hpp" + +template +__device__ inline void lidar_to_local_coords(T shift_x, T shift_y, T rz, + T &local_x, T &local_y) { + T cosa = cos(-rz), sina = sin(-rz); + local_x = shift_x * cosa + shift_y * (-sina); + local_y = shift_x * sina + shift_y * cosa; +} + +template +__device__ inline int check_pt_in_box3d(const T *pt, const T *box3d, T &local_x, + T &local_y) { + // param pt: (x, y, z) + // param box3d: (cx, cy, cz, dx, dy, dz, rz) in LiDAR coordinate, cz in the + // bottom center + T x = pt[0], y = pt[1], z = pt[2]; + T cx = box3d[0], cy = box3d[1], cz = box3d[2]; + T dx = box3d[3], dy = box3d[4], dz = box3d[5], rz = box3d[6]; + cz += dz / 2.0; // shift to the center since cz in box3d is the bottom center + + if (fabsf(z - cz) > dz / 2.0) return 0; + lidar_to_local_coords(x - cx, y - cy, rz, local_x, local_y); + T in_flag = (local_x > -dx / 2.0) & (local_x < dx / 2.0) & + (local_y > -dy / 2.0) & (local_y < dy / 2.0); + return in_flag; +} + +template +__global__ void assign_pts_to_box3d(int batch_size, int pts_num, int boxes_num, + const T *xyz, const T *boxes3d, + int *pts_assign) { + // params xyz: (B, N, 3) + // params boxes3d: (B, M, 7) + // params pts_assign: (B, N, M): idx of the corresponding box3d, -1 means + // background points + int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; + int box_idx = blockIdx.y; + int bs_idx = blockIdx.z; + + if (pt_idx >= pts_num || box_idx >= boxes_num || bs_idx >= batch_size) { + return; + } + int assign_idx = bs_idx * pts_num * boxes_num + pt_idx * boxes_num + box_idx; + pts_assign[assign_idx] = 0; + + int box_offset = bs_idx * boxes_num * 7 + box_idx * 7; + int pt_offset = bs_idx * pts_num * 3 + pt_idx * 3; + + T local_x = 0, local_y = 0; + int cur_in_flag = check_pt_in_box3d(xyz + pt_offset, boxes3d + box_offset, + local_x, local_y); + pts_assign[assign_idx] = cur_in_flag; +} + +__global__ void get_pooled_idx(int batch_size, int pts_num, int boxes_num, + int sampled_pts_num, const int *pts_assign, + int *pts_idx, int *pooled_empty_flag) { + // params xyz: (B, N, 3) + // params pts_feature: (B, N, C) + // params pts_assign: (B, N) + // params pts_idx: (B, M, 512) + // params pooled_empty_flag: (B, M) + + int boxes_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (boxes_idx >= boxes_num) { + return; + } + + int bs_idx = blockIdx.y; + + int cnt = 0; + for (int k = 0; k < pts_num; k++) { + if (pts_assign[bs_idx * pts_num * boxes_num + k * boxes_num + boxes_idx]) { + if (cnt < sampled_pts_num) { + pts_idx[bs_idx * boxes_num * sampled_pts_num + + boxes_idx * sampled_pts_num + cnt] = k; + cnt++; + } else + break; + } + } + + if (cnt == 0) { + pooled_empty_flag[bs_idx * boxes_num + boxes_idx] = 1; + } else if (cnt < sampled_pts_num) { + // duplicate same points for sampling + for (int k = cnt; k < sampled_pts_num; k++) { + int duplicate_idx = k % cnt; + int base_offset = + bs_idx * boxes_num * sampled_pts_num + boxes_idx * sampled_pts_num; + pts_idx[base_offset + k] = pts_idx[base_offset + duplicate_idx]; + } + } +} + +template +__global__ void roipoint_pool3d_forward( + int batch_size, int pts_num, int boxes_num, int feature_in_len, + int sampled_pts_num, const T *xyz, const int *pts_idx, const T *pts_feature, + T *pooled_features, int *pooled_empty_flag) { + // params xyz: (B, N, 3) + // params pts_idx: (B, M, 512) + // params pts_feature: (B, N, C) + // params pooled_features: (B, M, 512, 3+C) + // params pooled_empty_flag: (B, M) + + int sample_pt_idx = blockIdx.x * blockDim.x + threadIdx.x; + int box_idx = blockIdx.y; + int bs_idx = blockIdx.z; + + if (sample_pt_idx >= sampled_pts_num || box_idx >= boxes_num || + bs_idx >= batch_size) { + return; + } + + if (pooled_empty_flag[bs_idx * boxes_num + box_idx]) { + return; + } + + int temp_idx = bs_idx * boxes_num * sampled_pts_num + + box_idx * sampled_pts_num + sample_pt_idx; + int src_pt_idx = pts_idx[temp_idx]; + int dst_feature_offset = temp_idx * (3 + feature_in_len); + + for (int j = 0; j < 3; j++) + pooled_features[dst_feature_offset + j] = + xyz[bs_idx * pts_num * 3 + src_pt_idx * 3 + j]; + + int src_feature_offset = + bs_idx * pts_num * feature_in_len + src_pt_idx * feature_in_len; + memcpy(pooled_features + dst_feature_offset + 3, + pts_feature + src_feature_offset, feature_in_len * sizeof(T)); +} + +#endif // ROIPOINT_POOL3D_CUDA_KERNEL_CUH diff --git a/mmcv/ops/csrc/common/cuda/scatter_points_cuda_kernel.cuh b/mmcv/ops/csrc/common/cuda/scatter_points_cuda_kernel.cuh new file mode 100644 index 0000000..c375f24 --- /dev/null +++ b/mmcv/ops/csrc/common/cuda/scatter_points_cuda_kernel.cuh @@ -0,0 +1,183 @@ +// Copyright (c) OpenMMLab. All rights reserved +#ifndef SCATTER_POINTS_CUDA_KERNEL_CUH +#define SCATTER_POINTS_CUDA_KERNEL_CUH + +#include "pytorch_cuda_helper.hpp" + +typedef enum { SUM = 0, MEAN = 1, MAX = 2 } reduce_t; +int const maxGridDim = 50000; + +__device__ __forceinline__ static void reduceMax(float *address, float val) { + int *address_as_i = reinterpret_cast(address); + int old = *address_as_i, assumed; + do { + assumed = old; + old = atomicCAS(address_as_i, assumed, + __float_as_int(fmaxf(val, __int_as_float(assumed)))); + } while (assumed != old || __int_as_float(old) < val); +} + +__device__ __forceinline__ static void reduceMax(double *address, double val) { + unsigned long long *address_as_ull = + reinterpret_cast(address); + unsigned long long old = *address_as_ull, assumed; + do { + assumed = old; + old = atomicCAS( + address_as_ull, assumed, + __double_as_longlong(fmax(val, __longlong_as_double(assumed)))); + } while (assumed != old || __longlong_as_double(old) < val); +} + +// get rid of meaningless warnings when compiling host code +#ifdef HIP_DIFF +__device__ __forceinline__ static void reduceAdd(float *address, float val) { + atomicAdd(address, val); +} +__device__ __forceinline__ static void reduceAdd(double *address, double val) { + atomicAdd(address, val); +} +#else +#ifdef __CUDA_ARCH__ +__device__ __forceinline__ static void reduceAdd(float *address, float val) { +#if (__CUDA_ARCH__ < 200) +#ifdef _MSC_VER +#pragma message( \ + "compute capability lower than 2.x. fall back to use CAS version of atomicAdd for float32") +#else +#warning \ + "compute capability lower than 2.x. fall back to use CAS version of atomicAdd for float32" +#endif + int *address_as_i = reinterpret_cast(address); + int old = *address_as_i, assumed; + do { + assumed = old; + old = atomicCAS(address_as_i, assumed, + __float_as_int(val + __int_as_float(assumed))); + } while (assumed != old); +#else + atomicAdd(address, val); +#endif +} + +__device__ __forceinline__ static void reduceAdd(double *address, double val) { +#if (__CUDA_ARCH__ < 600) +#ifdef _MSC_VER +#pragma message( \ + "compute capability lower than 6.x. fall back to use CAS version of atomicAdd for float64") +#else +#warning \ + "compute capability lower than 6.x. fall back to use CAS version of atomicAdd for float64" +#endif + unsigned long long *address_as_ull = + reinterpret_cast(address); + unsigned long long old = *address_as_ull, assumed; + do { + assumed = old; + old = atomicCAS(address_as_ull, assumed, + __double_as_longlong(val + __longlong_as_double(assumed))); + } while (assumed != old); +#else + atomicAdd(address, val); +#endif +} +#endif // __CUDA_ARCH__ +#endif // HIP_DIFF + +template +__global__ void feats_reduce_kernel( + const T *feats, const int32_t *coors_map, + T *reduced_feats, // shall be 0 at initialization + const int num_input, const int num_feats, const reduce_t reduce_type) { + CUDA_1D_KERNEL_LOOP(x, num_input) { + int32_t reduce_to = coors_map[x]; + if (reduce_to == -1) continue; + + const T *feats_offset = feats + x * num_feats; + T *reduced_feats_offset = reduced_feats + reduce_to * num_feats; + if (reduce_type == reduce_t::MAX) { + for (int i = 0; i < num_feats; i++) { + reduceMax(&reduced_feats_offset[i], feats_offset[i]); + } + } else { + for (int i = 0; i < num_feats; i++) { + reduceAdd(&reduced_feats_offset[i], feats_offset[i]); + } + } + } +} + +template +__global__ void add_reduce_traceback_grad_kernel( + T *grad_feats, const T *grad_reduced_feats, const int32_t *coors_map, + const int32_t *reduce_count, const int num_input, const int num_feats, + const reduce_t reduce_type) { + CUDA_1D_KERNEL_LOOP(x, num_input) { + int32_t reduce_to = coors_map[x]; + if (reduce_to == -1) { + continue; + } + + const int input_offset = x * num_feats; + T *grad_feats_offset = grad_feats + input_offset; + const int reduced_offset = reduce_to * num_feats; + const T *grad_reduced_feats_offset = grad_reduced_feats + reduced_offset; + + if (reduce_type == reduce_t::SUM) { + for (int i = 0; i < num_feats; i++) { + grad_feats_offset[i] = grad_reduced_feats_offset[i]; + } + } else if (reduce_type == reduce_t::MEAN) { + for (int i = 0; i < num_feats; i++) { + grad_feats_offset[i] = grad_reduced_feats_offset[i] / + static_cast(reduce_count[reduce_to]); + } + } + } +} + +template +__global__ void max_reduce_traceback_scatter_idx_kernel( + const T *feats, const T *reduced_feats, int32_t *reduce_from, + const int32_t *coors_map, const int num_input, const int num_feats) { + CUDA_1D_KERNEL_LOOP(x, num_input) { + int32_t reduce_to = coors_map[x]; + + const int input_offset = x * num_feats; + const T *feats_offset = feats + input_offset; + + if (reduce_to == -1) { + continue; + } + + const int reduced_offset = reduce_to * num_feats; + const T *reduced_feats_offset = reduced_feats + reduced_offset; + int32_t *reduce_from_offset = reduce_from + reduced_offset; + + for (int i = 0; i < num_feats; i++) { + if (feats_offset[i] == reduced_feats_offset[i]) { + atomicMin(&reduce_from_offset[i], static_cast(x)); + } + } + } +} + +template +__global__ void max_reduce_scatter_grad_kernel(T *grad_feats, + const T *grad_reduced_feats, + const int32_t *reduce_from, + const int num_reduced, + const int num_feats) { + CUDA_1D_KERNEL_LOOP(x, num_reduced) { + const int reduced_offset = x * num_feats; + const int32_t *scatter_to_offset = reduce_from + reduced_offset; + const T *grad_reduced_feats_offset = grad_reduced_feats + reduced_offset; + + for (int i = 0; i < num_feats; i++) { + grad_feats[scatter_to_offset[i] * num_feats + i] = + grad_reduced_feats_offset[i]; + } + } +} + +#endif // SCATTER_POINTS_CUDA_KERNEL_CUH diff --git a/mmcv/ops/csrc/common/cuda/sigmoid_focal_loss_cuda_kernel.cuh b/mmcv/ops/csrc/common/cuda/sigmoid_focal_loss_cuda_kernel.cuh new file mode 100644 index 0000000..d133589 --- /dev/null +++ b/mmcv/ops/csrc/common/cuda/sigmoid_focal_loss_cuda_kernel.cuh @@ -0,0 +1,67 @@ +// Copyright (c) OpenMMLab. All rights reserved +#ifndef SIGMOID_FOCAL_LOSS_CUDA_KERNEL_CUH +#define SIGMOID_FOCAL_LOSS_CUDA_KERNEL_CUH + +#include "pytorch_cuda_helper.hpp" + +template +__global__ void sigmoid_focal_loss_forward_cuda_kernel( + const int nthreads, const T* input, const int64_t* target, const T* weight, + T* output, const T gamma, const T alpha, const int num_classes) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + int n = index / num_classes; + int c = index % num_classes; + + int64_t t = target[n]; + T flag_p = (t == c); + T flag_n = (t != c); + + // p = sigmoid(x) = 1. / 1. + expf(-x) + T p = (T)1. / ((T)1. + expf(-input[index])); + + // (1 - p)**gamma * log(p) + T term_p = pow(((T)1. - p), gamma) * log(max(p, (T)FLT_MIN)); + // p**gamma * log(1 - p) + T term_n = pow(p, gamma) * log(max((T)1. - p, (T)FLT_MIN)); + + output[index] = (T)0.; + output[index] += -flag_p * alpha * term_p; + output[index] += -flag_n * ((T)1. - alpha) * term_n; + if (weight != NULL) { + output[index] *= weight[t]; + } + } +} + +template +__global__ void sigmoid_focal_loss_backward_cuda_kernel( + const int nthreads, const T* input, const int64_t* target, const T* weight, + T* grad_input, const T gamma, const T alpha, const int num_classes) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + int n = index / num_classes; + int c = index % num_classes; + + int64_t t = target[n]; + T flag_p = (t == c); + T flag_n = (t != c); + + // p = sigmoid(x) = 1. / 1. + expf(-x) + T p = (T)1. / ((T)1. + exp(-input[index])); + + // (1 - p)**gamma * (1 - p - gamma*p*log(p)) + T term_p = pow(((T)1. - p), gamma) * + ((T)1. - p - (gamma * p * log(max(p, (T)FLT_MIN)))); + // p**gamma * (gamma * (1 - p) * log(1 - p) - p) + T term_n = pow(p, gamma) * + (gamma * ((T)1. - p) * log(max((T)1. - p, (T)FLT_MIN)) - p); + + grad_input[index] = (T)0.; + grad_input[index] += -flag_p * alpha * term_p; + grad_input[index] += -flag_n * ((T)1. - alpha) * term_n; + if (weight != NULL) { + grad_input[index] *= weight[t]; + } + } +} + +#endif // SIGMOID_FOCAL_LOSS_CUDA_KERNEL_CUH diff --git a/mmcv/ops/csrc/common/cuda/softmax_focal_loss_cuda_kernel.cuh b/mmcv/ops/csrc/common/cuda/softmax_focal_loss_cuda_kernel.cuh new file mode 100644 index 0000000..64299b9 --- /dev/null +++ b/mmcv/ops/csrc/common/cuda/softmax_focal_loss_cuda_kernel.cuh @@ -0,0 +1,68 @@ +// Copyright (c) OpenMMLab. All rights reserved +#ifndef SOFTMAX_FOCAL_LOSS_CUDA_KERNEL_CUH +#define SOFTMAX_FOCAL_LOSS_CUDA_KERNEL_CUH + +#include "pytorch_cuda_helper.hpp" + +template +__global__ void softmax_focal_loss_forward_cuda_kernel( + const int nthreads, const T* softmax, const int64_t* target, + const T* weight, T* output, const T gamma, const T alpha, + const int num_classes) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + int64_t label = target[index]; + T pred = softmax[index * num_classes + label]; + + if (label >= 0) { + output[index] = + -alpha * pow((T)1. - pred, gamma) * log(max(pred, (T)FLT_MIN)); + } else { + output[index] = 0; + } + if (weight != NULL) { + output[index] *= weight[label]; + } + } +} + +template +__global__ void softmax_focal_loss_backward_cuda1_kernel( + const int nthreads, const T* softmax, const int64_t* target, + const T* weight, T* buff, const T gamma, const T alpha, + const int num_classes) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + int64_t label = target[index]; + T pred = softmax[index * num_classes + label]; + + if (label >= 0) { + buff[index] = alpha * (-pow((T)1. - pred, gamma) + + gamma * pow((T)1. - pred, gamma - 1) * pred * + log(max(pred, (T)FLT_MIN))); + } else { + buff[index] = 0; + } + if (weight != NULL) { + buff[index] *= weight[label]; + } + } +} + +template +__global__ void softmax_focal_loss_backward_cuda2_kernel( + const int nthreads, const T* softmax, const int64_t* target, const T* buff, + T* grad_input, const int num_classes) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + int n = index / num_classes; + int c = index % num_classes; + int64_t label = target[n]; + + if (label >= 0) { + T flag = (label == c ? (T)1. : (T)0.); + grad_input[index] = buff[n] * (flag - softmax[index]); + } else { + grad_input[index] = 0; + } + } +} + +#endif // SOFTMAX_FOCAL_LOSS_CUDA_KERNEL_CUH diff --git a/mmcv/ops/csrc/common/cuda/sync_bn_cuda_kernel.cuh b/mmcv/ops/csrc/common/cuda/sync_bn_cuda_kernel.cuh new file mode 100644 index 0000000..e16e637 --- /dev/null +++ b/mmcv/ops/csrc/common/cuda/sync_bn_cuda_kernel.cuh @@ -0,0 +1,327 @@ +// Copyright (c) OpenMMLab. All rights reserved +#ifndef SYNCBN_CUDA_KERNEL_CUH +#define SYNCBN_CUDA_KERNEL_CUH + +#include "pytorch_cuda_helper.hpp" + +template +__global__ void sync_bn_forward_mean_cuda_kernel(const T *input, float *mean, + int num, int channels, + int spatial) { + __shared__ float buffer[THREADS_PER_BLOCK]; + int tid = threadIdx.x; + int c = blockIdx.x; + buffer[tid] = 0; + for (int i = tid; i < num * spatial; i += blockDim.x) { + int index = (i / spatial) * channels * spatial + c * spatial + i % spatial; + buffer[tid] += input[index]; + } + __syncthreads(); + + for (int s = blockDim.x / 2; s > 0; s >>= 1) { + if (tid < s) { + buffer[tid] += buffer[tid + s]; + } + __syncthreads(); + } + int total = num * spatial; + if (tid == 0) { + mean[c] = buffer[0] / total; + } +} + +template <> +__global__ void sync_bn_forward_mean_cuda_kernel(const phalf *input, + float *mean, int num, + int channels, int spatial) { + __shared__ float buffer[THREADS_PER_BLOCK]; + int tid = threadIdx.x; + int c = blockIdx.x; + buffer[tid] = 0; + for (int i = tid; i < num * spatial; i += blockDim.x) { + int index = (i / spatial) * channels * spatial + c * spatial + i % spatial; + buffer[tid] += static_cast(input[index]); + } + __syncthreads(); + + for (int s = blockDim.x / 2; s > 0; s >>= 1) { + if (tid < s) { + buffer[tid] += buffer[tid + s]; + } + __syncthreads(); + } + int total = num * spatial; + if (tid == 0) { + mean[c] = buffer[0] / total; + } +} + +template +__global__ void sync_bn_forward_var_cuda_kernel(const T *input, + const float *mean, float *var, + int num, int channels, + int spatial) { + __shared__ float buffer[THREADS_PER_BLOCK]; + int tid = threadIdx.x; + int c = blockIdx.x; + buffer[tid] = 0; + for (int i = tid; i < num * spatial; i += blockDim.x) { + int index = (i / spatial) * channels * spatial + c * spatial + i % spatial; + float td = input[index] - mean[c]; + buffer[tid] += td * td; + } + __syncthreads(); + for (int s = blockDim.x / 2; s > 0; s >>= 1) { + if (tid < s) { + buffer[tid] += buffer[tid + s]; + } + __syncthreads(); + } + int total = num * spatial; + if (tid == 0) { + var[c] = buffer[0] / total; + } +} + +template <> +__global__ void sync_bn_forward_var_cuda_kernel(const phalf *input, + const float *mean, float *var, + int num, int channels, + int spatial) { + __shared__ float buffer[THREADS_PER_BLOCK]; + int tid = threadIdx.x; + int c = blockIdx.x; + buffer[tid] = 0; + for (int i = tid; i < num * spatial; i += blockDim.x) { + int index = (i / spatial) * channels * spatial + c * spatial + i % spatial; + float td = static_cast(input[index]) - mean[c]; + buffer[tid] += td * td; + } + __syncthreads(); + for (int s = blockDim.x / 2; s > 0; s >>= 1) { + if (tid < s) { + buffer[tid] += buffer[tid + s]; + } + __syncthreads(); + } + int total = num * spatial; + if (tid == 0) { + var[c] = buffer[0] / total; + } +} + +template +__global__ void sync_bn_forward_output_cuda_kernel( + const T *input, const float *mean, const float *var, float *running_mean, + float *running_var, const float *weight, const float *bias, float *norm, + float *std, T *output, int num, int channels, int spatial, float eps, + float momentum, int group_size) { + int tid = threadIdx.x; + int c = blockIdx.x; + float mean_value = mean[c]; + float std_value = sqrt(var[c] + eps); + + if (weight != nullptr) { + float weight_value = weight[c]; + float bias_value = bias[c]; + if (norm != nullptr) { + for (int i = tid; i < num * spatial; i += blockDim.x) { + int index = + (i / spatial) * channels * spatial + c * spatial + i % spatial; + norm[index] = (input[index] - mean_value) / std_value; + output[index] = norm[index] * weight_value + bias_value; + } + } else { + for (int i = tid; i < num * spatial; i += blockDim.x) { + int index = + (i / spatial) * channels * spatial + c * spatial + i % spatial; + output[index] = + (input[index] - mean_value) / std_value * weight_value + bias_value; + } + } + } else { + if (norm != nullptr) { + for (int i = tid; i < num * spatial; i += blockDim.x) { + int index = + (i / spatial) * channels * spatial + c * spatial + i % spatial; + output[index] = norm[index] = (input[index] - mean_value) / std_value; + } + } else { + for (int i = tid; i < num * spatial; i += blockDim.x) { + int index = + (i / spatial) * channels * spatial + c * spatial + i % spatial; + output[index] = (input[index] - mean_value) / std_value; + } + } + } + if (tid == 0) { + if (std != nullptr) std[c] = std_value; + if (running_mean != nullptr) { + running_mean[c] = + momentum * mean_value + (1 - momentum) * running_mean[c]; + int count = num * spatial * group_size; + float var_unbias = count > 1 ? var[c] * count / (count - 1) : var[c]; + running_var[c] = momentum * var_unbias + (1 - momentum) * running_var[c]; + } + } +} + +template <> +__global__ void sync_bn_forward_output_cuda_kernel( + const phalf *input, const float *mean, const float *var, + float *running_mean, float *running_var, const float *weight, + const float *bias, float *norm, float *std, phalf *output, int num, + int channels, int spatial, float eps, float momentum, int group_size) { + int tid = threadIdx.x; + int c = blockIdx.x; + float mean_value = mean[c]; + float std_value = sqrt(var[c] + eps); + if (weight != nullptr) { + float weight_value = weight[c]; + float bias_value = bias[c]; + if (norm != nullptr) { + for (int i = tid; i < num * spatial; i += blockDim.x) { + int index = + (i / spatial) * channels * spatial + c * spatial + i % spatial; + norm[index] = + (static_cast(input[index]) - mean_value) / std_value; + output[index] = + static_cast(norm[index] * weight_value + bias_value); + } + } else { + for (int i = tid; i < num * spatial; i += blockDim.x) { + int index = + (i / spatial) * channels * spatial + c * spatial + i % spatial; + output[index] = + static_cast((static_cast(input[index]) - mean_value) / + std_value * weight_value + + bias_value); + } + } + } else { + if (norm != nullptr) { + for (int i = tid; i < num * spatial; i += blockDim.x) { + int index = + (i / spatial) * channels * spatial + c * spatial + i % spatial; + norm[index] = + (static_cast(input[index]) - mean_value) / std_value; + output[index] = static_cast(norm[index]); + } + } else { + for (int i = tid; i < num * spatial; i += blockDim.x) { + int index = + (i / spatial) * channels * spatial + c * spatial + i % spatial; + output[index] = static_cast( + (static_cast(input[index]) - mean_value) / std_value); + } + } + } + if (tid == 0) { + if (std != nullptr) std[c] = std_value; + if (running_mean != nullptr) { + running_mean[c] = + momentum * mean_value + (1 - momentum) * running_mean[c]; + int count = num * spatial * group_size; + float var_unbias = count > 1 ? var[c] * count / (count - 1) : var[c]; + running_var[c] = momentum * var_unbias + (1 - momentum) * running_var[c]; + } + } +} + +template +__global__ void sync_bn_backward_param_cuda_kernel(const T *grad_output, + const float *norm, + float *grad_weight, + float *grad_bias, int num, + int channels, int spatial) { + __shared__ float buffer1[THREADS_PER_BLOCK]; + __shared__ float buffer2[THREADS_PER_BLOCK]; + + int tid = threadIdx.x; + int c = blockIdx.x; + buffer1[tid] = buffer2[tid] = 0; + for (int i = tid; i < num * spatial; i += blockDim.x) { + int index = (i / spatial) * channels * spatial + c * spatial + i % spatial; + buffer1[tid] += grad_output[index] * norm[index]; + buffer2[tid] += grad_output[index]; + } + __syncthreads(); + + for (int s = blockDim.x / 2; s > 0; s >>= 1) { + if (tid < s) { + buffer1[tid] += buffer1[tid + s]; + buffer2[tid] += buffer2[tid + s]; + } + __syncthreads(); + } + if (tid == 0) { + grad_weight[c] = buffer1[0]; + grad_bias[c] = buffer2[0]; + } +} + +template <> +__global__ void sync_bn_backward_param_cuda_kernel(const phalf *grad_output, + const float *norm, + float *grad_weight, + float *grad_bias, int num, + int channels, int spatial) { + __shared__ float buffer1[THREADS_PER_BLOCK]; + __shared__ float buffer2[THREADS_PER_BLOCK]; + + int tid = threadIdx.x; + int c = blockIdx.x; + buffer1[tid] = buffer2[tid] = 0; + for (int i = tid; i < num * spatial; i += blockDim.x) { + int index = (i / spatial) * channels * spatial + c * spatial + i % spatial; + buffer1[tid] += static_cast(grad_output[index]) * norm[index]; + buffer2[tid] += static_cast(grad_output[index]); + } + __syncthreads(); + + for (int s = blockDim.x / 2; s > 0; s >>= 1) { + if (tid < s) { + buffer1[tid] += buffer1[tid + s]; + buffer2[tid] += buffer2[tid + s]; + } + __syncthreads(); + } + if (tid == 0) { + grad_weight[c] = buffer1[0]; + grad_bias[c] = buffer2[0]; + } +} + +template +__global__ void sync_bn_backward_data_cuda_kernel( + int output_size, const T *grad_output, const float *weight, + const float *grad_weight, const float *grad_bias, const float *norm, + const float *std, T *grad_input, int num, int channels, int spatial) { + int factor = num * spatial; + CUDA_1D_KERNEL_LOOP(index, output_size) { + int c = (index / spatial) % channels; + grad_input[index] = + weight[c] * + (grad_output[index] - + (grad_weight[c] * norm[index] + grad_bias[c]) / factor) / + std[c]; + } +} + +template <> +__global__ void sync_bn_backward_data_cuda_kernel( + int output_size, const phalf *grad_output, const float *weight, + const float *grad_weight, const float *grad_bias, const float *norm, + const float *std, phalf *grad_input, int num, int channels, int spatial) { + int factor = num * spatial; + CUDA_1D_KERNEL_LOOP(index, output_size) { + int c = (index / spatial) % channels; + grad_input[index] = static_cast( + weight[c] * + (static_cast(grad_output[index]) - + (grad_weight[c] * norm[index] + grad_bias[c]) / factor) / + std[c]); + } +} + +#endif // SYNCBN_CUDA_KERNEL_CUH diff --git a/mmcv/ops/csrc/common/cuda/three_interpolate_cuda_kernel.cuh b/mmcv/ops/csrc/common/cuda/three_interpolate_cuda_kernel.cuh new file mode 100644 index 0000000..1346b40 --- /dev/null +++ b/mmcv/ops/csrc/common/cuda/three_interpolate_cuda_kernel.cuh @@ -0,0 +1,57 @@ +// Copyright (c) OpenMMLab. All rights reserved +#ifndef THREE_INTERPOLATE_CUDA_KERNEL_CUH +#define THREE_INTERPOLATE_CUDA_KERNEL_CUH + +#include "pytorch_cuda_helper.hpp" + +template +__global__ void three_interpolate_forward_cuda_kernel( + int b, int c, int m, int n, const T *points, const int *__restrict__ idx, + const T *weight, T *out) { + // points: (B, C, M) + // idx: (B, N, 3) + // weight: (B, N, 3) + // output: + // out: (B, C, N) + + int bs_idx = blockIdx.z; + int c_idx = blockIdx.y; + int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; + + if (bs_idx >= b || c_idx >= c || pt_idx >= n) return; + + weight += bs_idx * n * 3 + pt_idx * 3; + points += bs_idx * c * m + c_idx * m; + idx += bs_idx * n * 3 + pt_idx * 3; + out += bs_idx * c * n + c_idx * n; + + out[pt_idx] = weight[0] * points[idx[0]] + weight[1] * points[idx[1]] + + weight[2] * points[idx[2]]; +} + +template +__global__ void three_interpolate_backward_cuda_kernel( + int b, int c, int n, int m, const T *grad_out, const int *__restrict__ idx, + const T *weight, T *grad_points) { + // grad_out: (B, C, N) + // weight: (B, N, 3) + // output: + // grad_points: (B, C, M) + + int bs_idx = blockIdx.z; + int c_idx = blockIdx.y; + int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; + + if (bs_idx >= b || c_idx >= c || pt_idx >= n) return; + + grad_out += bs_idx * c * n + c_idx * n + pt_idx; + weight += bs_idx * n * 3 + pt_idx * 3; + grad_points += bs_idx * c * m + c_idx * m; + idx += bs_idx * n * 3 + pt_idx * 3; + + atomicAdd(grad_points + idx[0], grad_out[0] * weight[0]); + atomicAdd(grad_points + idx[1], grad_out[0] * weight[1]); + atomicAdd(grad_points + idx[2], grad_out[0] * weight[2]); +} + +#endif // THREE_INTERPOLATE_CUDA_KERNEL_CUH diff --git a/mmcv/ops/csrc/common/cuda/three_nn_cuda_kernel.cuh b/mmcv/ops/csrc/common/cuda/three_nn_cuda_kernel.cuh new file mode 100644 index 0000000..23fa091 --- /dev/null +++ b/mmcv/ops/csrc/common/cuda/three_nn_cuda_kernel.cuh @@ -0,0 +1,62 @@ +// Copyright (c) OpenMMLab. All rights reserved +#ifndef THREE_NN_CUDA_KERNEL_CUH +#define THREE_NN_CUDA_KERNEL_CUH + +#include "pytorch_cuda_helper.hpp" + +template +__global__ void three_nn_forward_cuda_kernel(int b, int n, int m, + const T *unknown, const T *known, + T *dist2, int *__restrict__ idx) { + // unknown: (B, N, 3) + // known: (B, M, 3) + // output: + // dist2: (B, N, 3) + // idx: (B, N, 3) + + int bs_idx = blockIdx.y; + int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (bs_idx >= b || pt_idx >= n) return; + + unknown += bs_idx * n * 3 + pt_idx * 3; + known += bs_idx * m * 3; + dist2 += bs_idx * n * 3 + pt_idx * 3; + idx += bs_idx * n * 3 + pt_idx * 3; + + T ux = unknown[0]; + T uy = unknown[1]; + T uz = unknown[2]; + + double best1 = 1e40, best2 = 1e40, best3 = 1e40; + int besti1 = 0, besti2 = 0, besti3 = 0; + for (int k = 0; k < m; ++k) { + T x = known[k * 3 + 0]; + T y = known[k * 3 + 1]; + T z = known[k * 3 + 2]; + T d = (ux - x) * (ux - x) + (uy - y) * (uy - y) + (uz - z) * (uz - z); + if (d < best1) { + best3 = best2; + besti3 = besti2; + best2 = best1; + besti2 = besti1; + best1 = d; + besti1 = k; + } else if (d < best2) { + best3 = best2; + besti3 = besti2; + best2 = d; + besti2 = k; + } else if (d < best3) { + best3 = d; + besti3 = k; + } + } + dist2[0] = best1; + dist2[1] = best2; + dist2[2] = best3; + idx[0] = besti1; + idx[1] = besti2; + idx[2] = besti3; +} + +#endif // THREE_NN_CUDA_KERNEL_CUH diff --git a/mmcv/ops/csrc/common/cuda/tin_shift_cuda_kernel.cuh b/mmcv/ops/csrc/common/cuda/tin_shift_cuda_kernel.cuh new file mode 100644 index 0000000..8b7112d --- /dev/null +++ b/mmcv/ops/csrc/common/cuda/tin_shift_cuda_kernel.cuh @@ -0,0 +1,57 @@ +// Copyright (c) OpenMMLab. All rights reserved +#ifndef TIN_SHIFT_CUDA_KERNEL_CUH +#define TIN_SHIFT_CUDA_KERNEL_CUH + +#include "pytorch_cuda_helper.hpp" + +template +__global__ void tin_shift_forward_cuda_kernel( + const int nthreads, const T* input, const int* shift, T* output, + const int batch_size, const int channels, const int t_size, + const int hw_size, const int group_size, const int group_channel) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + const int hw_index = index % hw_size; + const int j = (index / hw_size) % channels; + + const int n_index = (index / hw_size / channels) % batch_size; + int group_id = j / group_channel; + int t_shift = shift[n_index * group_size + group_id]; + int offset = n_index * t_size * hw_size * channels + hw_size * j + hw_index; + for (int i = 0; i < t_size; i++) { + int now_t = i + t_shift; + int data_id = i * hw_size * channels + offset; + if (now_t < 0 || now_t >= t_size) { + continue; + } + int out_id = now_t * hw_size * channels + offset; + output[out_id] = input[data_id]; + } + } +} + +template +__global__ void tin_shift_backward_cuda_kernel( + const int nthreads, const T* input, const int* shift, T* output, + const int batch_size, const int channels, const int t_size, + const int hw_size, const int group_size, const int group_channel) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + const int hw_index = index % hw_size; + const int j = (index / hw_size) % channels; + + const int n_index = (index / hw_size / channels) % batch_size; + int group_id = j / group_channel; + int t_shift = shift[n_index * group_size + group_id]; + int offset = n_index * t_size * hw_size * channels + hw_size * j + hw_index; + for (int i = 0; i < t_size; i++) { + int now_t = i + t_shift; + int data_id = i * hw_size * channels + offset; + if (now_t < 0 || now_t >= t_size) { + continue; + } + int out_id = now_t * hw_size * channels + offset; + output[out_id] = input[data_id]; + } + } +} + +#endif // TIN_SHIFT_CUDA_KERNEL_CUH diff --git a/mmcv/ops/csrc/common/cuda/voxelization_cuda_kernel.cuh b/mmcv/ops/csrc/common/cuda/voxelization_cuda_kernel.cuh new file mode 100644 index 0000000..f817662 --- /dev/null +++ b/mmcv/ops/csrc/common/cuda/voxelization_cuda_kernel.cuh @@ -0,0 +1,165 @@ +// Copyright (c) OpenMMLab. All rights reserved. +#ifndef VOXELIZATION_CUDA_KERNEL_CUH +#define VOXELIZATION_CUDA_KERNEL_CUH + +#include "pytorch_cuda_helper.hpp" + +typedef enum { SUM = 0, MEAN = 1, MAX = 2 } reduce_t; + +template +__global__ void dynamic_voxelize_kernel( + const T* points, T_int* coors, const float voxel_x, const float voxel_y, + const float voxel_z, const float coors_x_min, const float coors_y_min, + const float coors_z_min, const float coors_x_max, const float coors_y_max, + const float coors_z_max, const int grid_x, const int grid_y, + const int grid_z, const int num_points, const int num_features, + const int NDim) { + // const int index = blockIdx.x * threadsPerBlock + threadIdx.x; + CUDA_1D_KERNEL_LOOP(index, num_points) { + // To save some computation + auto points_offset = points + index * num_features; + auto coors_offset = coors + index * NDim; + int c_x = floor((points_offset[0] - coors_x_min) / voxel_x); + if (c_x < 0 || c_x >= grid_x) { + coors_offset[0] = -1; + continue; + } + + int c_y = floor((points_offset[1] - coors_y_min) / voxel_y); + if (c_y < 0 || c_y >= grid_y) { + coors_offset[0] = -1; + coors_offset[1] = -1; + continue; + } + + int c_z = floor((points_offset[2] - coors_z_min) / voxel_z); + if (c_z < 0 || c_z >= grid_z) { + coors_offset[0] = -1; + coors_offset[1] = -1; + coors_offset[2] = -1; + } else { + coors_offset[0] = c_z; + coors_offset[1] = c_y; + coors_offset[2] = c_x; + } + } +} + +template +__global__ void assign_point_to_voxel(const int nthreads, const T* points, + T_int* point_to_voxelidx, + T_int* coor_to_voxelidx, T* voxels, + const int max_points, + const int num_features, + const int num_points, const int NDim) { + CUDA_1D_KERNEL_LOOP(thread_idx, nthreads) { + // const int index = blockIdx.x * threadsPerBlock + threadIdx.x; + int index = thread_idx / num_features; + + int num = point_to_voxelidx[index]; + int voxelidx = coor_to_voxelidx[index]; + if (num > -1 && voxelidx > -1) { + auto voxels_offset = + voxels + voxelidx * max_points * num_features + num * num_features; + + int k = thread_idx % num_features; + voxels_offset[k] = points[thread_idx]; + } + } +} + +template +__global__ void assign_voxel_coors(const int nthreads, T_int* coor, + T_int* point_to_voxelidx, + T_int* coor_to_voxelidx, T_int* voxel_coors, + const int num_points, const int NDim) { + CUDA_1D_KERNEL_LOOP(thread_idx, nthreads) { + // const int index = blockIdx.x * threadsPerBlock + threadIdx.x; + // if (index >= num_points) return; + int index = thread_idx / NDim; + int num = point_to_voxelidx[index]; + int voxelidx = coor_to_voxelidx[index]; + if (num == 0 && voxelidx > -1) { + auto coors_offset = voxel_coors + voxelidx * NDim; + int k = thread_idx % NDim; + coors_offset[k] = coor[thread_idx]; + } + } +} + +template +__global__ void point_to_voxelidx_kernel(const T_int* coor, + T_int* point_to_voxelidx, + T_int* point_to_pointidx, + const int max_points, + const int max_voxels, + const int num_points, const int NDim) { + CUDA_1D_KERNEL_LOOP(index, num_points) { + auto coor_offset = coor + index * NDim; + // skip invalid points + if ((index >= num_points) || (coor_offset[0] == -1)) return; + + int num = 0; + int coor_x = coor_offset[0]; + int coor_y = coor_offset[1]; + int coor_z = coor_offset[2]; + // only calculate the coors before this coor[index] + for (int i = 0; i < index; ++i) { + auto prev_coor = coor + i * NDim; + if (prev_coor[0] == -1) continue; + + // Find all previous points that have the same coors + // if find the same coor, record it + if ((prev_coor[0] == coor_x) && (prev_coor[1] == coor_y) && + (prev_coor[2] == coor_z)) { + num++; + if (num == 1) { + // point to the same coor that first show up + point_to_pointidx[index] = i; + } else if (num >= max_points) { + // out of boundary + return; + } + } + } + if (num == 0) { + point_to_pointidx[index] = index; + } + if (num < max_points) { + point_to_voxelidx[index] = num; + } + } +} + +template +__global__ void determin_voxel_num( + // const T_int* coor, + T_int* num_points_per_voxel, T_int* point_to_voxelidx, + T_int* point_to_pointidx, T_int* coor_to_voxelidx, T_int* voxel_num, + const int max_points, const int max_voxels, const int num_points) { + // only calculate the coors before this coor[index] + for (int i = 0; i < num_points; ++i) { + int point_pos_in_voxel = point_to_voxelidx[i]; + // record voxel + if (point_pos_in_voxel == -1) { + // out of max_points or invalid point + continue; + } else if (point_pos_in_voxel == 0) { + // record new voxel + int voxelidx = voxel_num[0]; + if (voxel_num[0] >= max_voxels) continue; + voxel_num[0] += 1; + coor_to_voxelidx[i] = voxelidx; + num_points_per_voxel[voxelidx] = 1; + } else { + int point_idx = point_to_pointidx[i]; + int voxelidx = coor_to_voxelidx[point_idx]; + if (voxelidx != -1) { + coor_to_voxelidx[i] = voxelidx; + num_points_per_voxel[voxelidx] += 1; + } + } + } +} + +#endif // VOXELIZATION_CUDA_KERNEL_CUH diff --git a/mmcv/ops/csrc/common/pytorch_cpp_helper.hpp b/mmcv/ops/csrc/common/pytorch_cpp_helper.hpp new file mode 100644 index 0000000..c7f9f35 --- /dev/null +++ b/mmcv/ops/csrc/common/pytorch_cpp_helper.hpp @@ -0,0 +1,24 @@ +#ifndef PYTORCH_CPP_HELPER +#define PYTORCH_CPP_HELPER +#include + +#include + +using namespace at; + +#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) + +#define CHECK_CUDA(x) \ + TORCH_CHECK(x.device().is_cuda(), #x " must be a CUDA tensor") +#define CHECK_CPU(x) \ + TORCH_CHECK(!x.device().is_cuda(), #x " must be a CPU tensor") +#define CHECK_CONTIGUOUS(x) \ + TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") +#define CHECK_CUDA_INPUT(x) \ + CHECK_CUDA(x); \ + CHECK_CONTIGUOUS(x) +#define CHECK_CPU_INPUT(x) \ + CHECK_CPU(x); \ + CHECK_CONTIGUOUS(x) + +#endif // PYTORCH_CPP_HELPER diff --git a/mmcv/ops/csrc/common/pytorch_cuda_helper.hpp b/mmcv/ops/csrc/common/pytorch_cuda_helper.hpp new file mode 100644 index 0000000..9869b53 --- /dev/null +++ b/mmcv/ops/csrc/common/pytorch_cuda_helper.hpp @@ -0,0 +1,19 @@ +#ifndef PYTORCH_CUDA_HELPER +#define PYTORCH_CUDA_HELPER + +#include +#include +#include + +#include +#include + +#include "common_cuda_helper.hpp" + +using at::Half; +using at::Tensor; +using phalf = at::Half; + +#define __PHALF(x) (x) + +#endif // PYTORCH_CUDA_HELPER diff --git a/mmcv/ops/csrc/common/pytorch_device_registry.hpp b/mmcv/ops/csrc/common/pytorch_device_registry.hpp new file mode 100644 index 0000000..2a32b72 --- /dev/null +++ b/mmcv/ops/csrc/common/pytorch_device_registry.hpp @@ -0,0 +1,141 @@ +#ifndef PYTORCH_DEVICE_REGISTRY_H +#define PYTORCH_DEVICE_REGISTRY_H + +// Using is recommended in the official documentation in +// https://pytorch.org/tutorials/advanced/cpp_extension.html#writing-the-c-op. +// However, we use for compatibility with CUDA 9.0 +// Read https://github.com/pytorch/extension-cpp/issues/35 for more details. +#include + +#include +#include +#include +#include + +inline std::string GetDeviceStr(const at::Device& device) { + std::string str = DeviceTypeName(device.type(), true); + if (device.has_index()) { + str.push_back(':'); + str.append(std::to_string(device.index())); + } + return str; +} + +// Registry +template +class DeviceRegistry; + +template +class DeviceRegistry { + public: + using FunctionType = Ret (*)(Args...); + static const int MAX_DEVICE_TYPES = + int8_t(at::DeviceType::COMPILE_TIME_MAX_DEVICE_TYPES); + + void Register(at::DeviceType device, FunctionType function) { + funcs_[int8_t(device)] = function; + } + + FunctionType Find(at::DeviceType device) const { + return funcs_[int8_t(device)]; + } + + static DeviceRegistry& instance() { + static DeviceRegistry inst; + return inst; + } + + private: + DeviceRegistry() { + for (size_t i = 0; i < MAX_DEVICE_TYPES; ++i) { + funcs_[i] = nullptr; + } + }; + FunctionType funcs_[MAX_DEVICE_TYPES]; +}; + +// get device of first tensor param + +template , at::Tensor>::value, + bool> = true> +at::Device GetFirstTensorDevice(T&& t, Args&&... args) { + return std::forward(t).device(); +} +template , at::Tensor>::value, + bool> = true> +at::Device GetFirstTensorDevice(T&& t, Args&&... args) { + return GetFirstTensorDevice(std::forward(args)...); +} + +// check device consistency + +inline std::pair CheckDeviceConsistency( + const at::Device& device, int index) { + return {index, device}; +} + +template , at::Tensor>::value, + bool> = true> +std::pair CheckDeviceConsistency(const at::Device& device, + int index, T&& t, + Args&&... args); + +template , at::Tensor>::value, + bool> = true> +std::pair CheckDeviceConsistency(const at::Device& device, + int index, T&& t, + Args&&... args) { + auto new_device = std::forward(t).device(); + if (new_device.type() != device.type() || + new_device.index() != device.index()) { + return {index, new_device}; + } + return CheckDeviceConsistency(device, index + 1, std::forward(args)...); +} + +template < + typename T, typename... Args, + std::enable_if_t, at::Tensor>::value, bool>> +std::pair CheckDeviceConsistency(const at::Device& device, + int index, T&& t, + Args&&... args) { + return CheckDeviceConsistency(device, index + 1, std::forward(args)...); +} + +// dispatch + +template +auto Dispatch(const R& registry, const char* name, Args&&... args) { + auto device = GetFirstTensorDevice(std::forward(args)...); + auto inconsist = + CheckDeviceConsistency(device, 0, std::forward(args)...); + TORCH_CHECK(inconsist.first >= int(sizeof...(Args)), name, ": at param ", + inconsist.first, + ", inconsistent device: ", GetDeviceStr(inconsist.second).c_str(), + " vs ", GetDeviceStr(device).c_str(), "\n") + auto f_ptr = registry.Find(device.type()); + TORCH_CHECK(f_ptr != nullptr, name, ": implementation for device ", + GetDeviceStr(device).c_str(), " not found.\n") + return f_ptr(std::forward(args)...); +} + +// helper macro + +#define DEVICE_REGISTRY(key) DeviceRegistry::instance() + +#define REGISTER_DEVICE_IMPL(key, device, value) \ + struct key##_##device##_registerer { \ + key##_##device##_registerer() { \ + DEVICE_REGISTRY(key).Register(at::k##device, value); \ + } \ + }; \ + static key##_##device##_registerer _##key##_##device##_registerer; + +#define DISPATCH_DEVICE_IMPL(key, ...) \ + Dispatch(DEVICE_REGISTRY(key), #key, __VA_ARGS__) + +#endif // PYTORCH_DEVICE_REGISTRY diff --git a/mmcv/ops/csrc/pytorch/assign_score_withk.cpp b/mmcv/ops/csrc/pytorch/assign_score_withk.cpp new file mode 100644 index 0000000..9076277 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/assign_score_withk.cpp @@ -0,0 +1,42 @@ +// Modified from +// https://github.com/CVMI-Lab/PAConv/tree/main/scene_seg/lib/paconv_lib/src/gpu +#include "pytorch_cpp_helper.hpp" +#include "pytorch_device_registry.hpp" + +void assign_score_withk_forward_impl(int B, int N0, int N1, int M, int K, int O, + int aggregate, const Tensor& points, + const Tensor& centers, + const Tensor& scores, + const Tensor& knn_idx, Tensor& output) { + DISPATCH_DEVICE_IMPL(assign_score_withk_forward_impl, B, N0, N1, M, K, O, + aggregate, points, centers, scores, knn_idx, output); +} + +void assign_score_withk_backward_impl( + int B, int N0, int N1, int M, int K, int O, int aggregate, + const Tensor& grad_out, const Tensor& points, const Tensor& centers, + const Tensor& scores, const Tensor& knn_idx, Tensor& grad_points, + Tensor& grad_centers, Tensor& grad_scores) { + DISPATCH_DEVICE_IMPL(assign_score_withk_backward_impl, B, N0, N1, M, K, O, + aggregate, grad_out, points, centers, scores, knn_idx, + grad_points, grad_centers, grad_scores); +} + +void assign_score_withk_forward(const Tensor& points, const Tensor& centers, + const Tensor& scores, const Tensor& knn_idx, + Tensor& output, int B, int N0, int N1, int M, + int K, int O, int aggregate) { + assign_score_withk_forward_impl(B, N0, N1, M, K, O, aggregate, points, + centers, scores, knn_idx, output); +} + +void assign_score_withk_backward(const Tensor& grad_out, const Tensor& points, + const Tensor& centers, const Tensor& scores, + const Tensor& knn_idx, Tensor& grad_points, + Tensor& grad_centers, Tensor& grad_scores, + int B, int N0, int N1, int M, int K, int O, + int aggregate) { + assign_score_withk_backward_impl(B, N0, N1, M, K, O, aggregate, grad_out, + points, centers, scores, knn_idx, + grad_points, grad_centers, grad_scores); +} diff --git a/mmcv/ops/csrc/pytorch/ball_query.cpp b/mmcv/ops/csrc/pytorch/ball_query.cpp new file mode 100644 index 0000000..1c9e7a2 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/ball_query.cpp @@ -0,0 +1,20 @@ +// Modified from +// https://github.com/sshaoshuai/Pointnet2.PyTorch/tree/master/pointnet2/src/ball_query.cpp + +#include "pytorch_cpp_helper.hpp" +#include "pytorch_device_registry.hpp" + +void ball_query_forward_impl(int b, int n, int m, float min_radius, + float max_radius, int nsample, + const Tensor new_xyz, const Tensor xyz, + Tensor idx) { + DISPATCH_DEVICE_IMPL(ball_query_forward_impl, b, n, m, min_radius, max_radius, + nsample, new_xyz, xyz, idx); +} + +void ball_query_forward(Tensor new_xyz_tensor, Tensor xyz_tensor, + Tensor idx_tensor, int b, int n, int m, + float min_radius, float max_radius, int nsample) { + ball_query_forward_impl(b, n, m, min_radius, max_radius, nsample, + new_xyz_tensor, xyz_tensor, idx_tensor); +} diff --git a/mmcv/ops/csrc/pytorch/bbox_overlaps.cpp b/mmcv/ops/csrc/pytorch/bbox_overlaps.cpp new file mode 100644 index 0000000..187216f --- /dev/null +++ b/mmcv/ops/csrc/pytorch/bbox_overlaps.cpp @@ -0,0 +1,14 @@ +// Copyright (c) OpenMMLab. All rights reserved +#include "pytorch_cpp_helper.hpp" +#include "pytorch_device_registry.hpp" + +void bbox_overlaps_impl(const Tensor bboxes1, const Tensor bboxes2, Tensor ious, + const int mode, const bool aligned, const int offset) { + DISPATCH_DEVICE_IMPL(bbox_overlaps_impl, bboxes1, bboxes2, ious, mode, + aligned, offset); +} + +void bbox_overlaps(const Tensor bboxes1, const Tensor bboxes2, Tensor ious, + const int mode, const bool aligned, const int offset) { + bbox_overlaps_impl(bboxes1, bboxes2, ious, mode, aligned, offset); +} diff --git a/mmcv/ops/csrc/pytorch/border_align.cpp b/mmcv/ops/csrc/pytorch/border_align.cpp new file mode 100644 index 0000000..565de68 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/border_align.cpp @@ -0,0 +1,30 @@ +// Copyright (c) OpenMMLab. All rights reserved +#include "pytorch_cpp_helper.hpp" +#include "pytorch_device_registry.hpp" + +void border_align_forward_impl(const Tensor &input, const Tensor &boxes, + Tensor output, Tensor argmax_idx, + const int pool_size) { + DISPATCH_DEVICE_IMPL(border_align_forward_impl, input, boxes, output, + argmax_idx, pool_size); +} + +void border_align_backward_impl(const Tensor &grad_output, const Tensor &boxes, + const Tensor &argmax_idx, Tensor grad_input, + const int pool_size) { + DISPATCH_DEVICE_IMPL(border_align_backward_impl, grad_output, boxes, + argmax_idx, grad_input, pool_size); +} + +void border_align_forward(const Tensor &input, const Tensor &boxes, + Tensor output, Tensor argmax_idx, + const int pool_size) { + border_align_forward_impl(input, boxes, output, argmax_idx, pool_size); +} + +void border_align_backward(const Tensor &grad_output, const Tensor &boxes, + const Tensor &argmax_idx, Tensor grad_input, + const int pool_size) { + border_align_backward_impl(grad_output, boxes, argmax_idx, grad_input, + pool_size); +} diff --git a/mmcv/ops/csrc/pytorch/box_iou_rotated.cpp b/mmcv/ops/csrc/pytorch/box_iou_rotated.cpp new file mode 100644 index 0000000..a2a4e09 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/box_iou_rotated.cpp @@ -0,0 +1,19 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +// modified from +// https://github.com/facebookresearch/detectron2/blob/master/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated.h +#include "pytorch_cpp_helper.hpp" +#include "pytorch_device_registry.hpp" + +void box_iou_rotated_impl(const Tensor boxes1, const Tensor boxes2, Tensor ious, + const int mode_flag, const bool aligned) { + DISPATCH_DEVICE_IMPL(box_iou_rotated_impl, boxes1, boxes2, ious, mode_flag, + aligned); +} + +// Interface for Python +// inline is needed to prevent multiple function definitions when this header is +// included by different cpps +void box_iou_rotated(const Tensor boxes1, const Tensor boxes2, Tensor ious, + const int mode_flag, const bool aligned) { + box_iou_rotated_impl(boxes1, boxes2, ious, mode_flag, aligned); +} diff --git a/mmcv/ops/csrc/pytorch/carafe.cpp b/mmcv/ops/csrc/pytorch/carafe.cpp new file mode 100644 index 0000000..a563aed --- /dev/null +++ b/mmcv/ops/csrc/pytorch/carafe.cpp @@ -0,0 +1,38 @@ +// Copyright (c) OpenMMLab. All rights reserved +#include "pytorch_cpp_helper.hpp" +#include "pytorch_device_registry.hpp" + +void carafe_forward_impl(Tensor features, Tensor masks, Tensor rfeatures, + Tensor routput, Tensor rmasks, Tensor output, + int kernel_size, int group_size, int scale_factor) { + DISPATCH_DEVICE_IMPL(carafe_forward_impl, features, masks, rfeatures, routput, + rmasks, output, kernel_size, group_size, scale_factor); +} + +void carafe_backward_impl(Tensor top_grad, Tensor rfeatures, Tensor masks, + Tensor rtop_grad, Tensor rbottom_grad_hs, + Tensor rbottom_grad, Tensor rmask_grad, + Tensor bottom_grad, Tensor mask_grad, int kernel_size, + int group_size, int scale_factor) { + DISPATCH_DEVICE_IMPL(carafe_backward_impl, top_grad, rfeatures, masks, + rtop_grad, rbottom_grad_hs, rbottom_grad, rmask_grad, + bottom_grad, mask_grad, kernel_size, group_size, + scale_factor); +} + +void carafe_forward(Tensor features, Tensor masks, Tensor rfeatures, + Tensor routput, Tensor rmasks, Tensor output, + int kernel_size, int group_size, int scale_factor) { + carafe_forward_impl(features, masks, rfeatures, routput, rmasks, output, + kernel_size, group_size, scale_factor); +} + +void carafe_backward(Tensor top_grad, Tensor rfeatures, Tensor masks, + Tensor rtop_grad, Tensor rbottom_grad_hs, + Tensor rbottom_grad, Tensor rmask_grad, Tensor bottom_grad, + Tensor mask_grad, int kernel_size, int group_size, + int scale_factor) { + carafe_backward_impl(top_grad, rfeatures, masks, rtop_grad, rbottom_grad_hs, + rbottom_grad, rmask_grad, bottom_grad, mask_grad, + kernel_size, group_size, scale_factor); +} diff --git a/mmcv/ops/csrc/pytorch/carafe_naive.cpp b/mmcv/ops/csrc/pytorch/carafe_naive.cpp new file mode 100644 index 0000000..6e8917a --- /dev/null +++ b/mmcv/ops/csrc/pytorch/carafe_naive.cpp @@ -0,0 +1,32 @@ +// Copyright (c) OpenMMLab. All rights reserved +#include "pytorch_cpp_helper.hpp" +#include "pytorch_device_registry.hpp" + +void carafe_naive_forward_impl(Tensor features, Tensor masks, Tensor output, + int kernel_size, int group_size, + int scale_factor) { + DISPATCH_DEVICE_IMPL(carafe_naive_forward_impl, features, masks, output, + kernel_size, group_size, scale_factor); +} + +void carafe_naive_backward_impl(Tensor top_grad, Tensor features, Tensor masks, + Tensor bottom_grad, Tensor mask_grad, + int kernel_size, int group_size, + int scale_factor) { + DISPATCH_DEVICE_IMPL(carafe_naive_backward_impl, top_grad, features, masks, + bottom_grad, mask_grad, kernel_size, group_size, + scale_factor); +} + +void carafe_naive_forward(Tensor features, Tensor masks, Tensor output, + int kernel_size, int group_size, int scale_factor) { + carafe_naive_forward_impl(features, masks, output, kernel_size, group_size, + scale_factor); +} + +void carafe_naive_backward(Tensor top_grad, Tensor features, Tensor masks, + Tensor bottom_grad, Tensor mask_grad, + int kernel_size, int group_size, int scale_factor) { + carafe_naive_backward_impl(top_grad, features, masks, bottom_grad, mask_grad, + kernel_size, group_size, scale_factor); +} diff --git a/mmcv/ops/csrc/pytorch/contour_expand.cpp b/mmcv/ops/csrc/pytorch/contour_expand.cpp new file mode 100755 index 0000000..7639ae5 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/contour_expand.cpp @@ -0,0 +1,112 @@ +// Copyright (c) OpenMMLab. All rights reserved +// It is modified from https://github.com/whai362/PSENet +#include +#include + +#include "pytorch_cpp_helper.hpp" + +using namespace std; + +class Point2d { + public: + int x; + int y; + + Point2d() : x(0), y(0) {} + Point2d(int _x, int _y) : x(_x), y(_y) {} +}; + +void kernel_dilate(const uint8_t *data, IntArrayRef data_shape, + const int *label_map, int &label_num, int &min_area, + vector> &text_line) { + std::vector area(label_num + 1); + int kernel_num = data_shape[0]; + int height = data_shape[1]; + int width = data_shape[2]; + + for (int x = 0; x < height; ++x) { + for (int y = 0; y < width; ++y) { + int label = label_map[x * width + y]; + if (label == 0) continue; + area[label] += 1; + } + } + + queue queue, next_queue; + for (int x = 0; x < height; ++x) { + vector row(width); + for (int y = 0; y < width; ++y) { + int label = label_map[x * width + y]; + if (label == 0) continue; + if (area[label] < min_area) continue; + + Point2d point(x, y); + queue.push(point); + row[y] = label; + } + text_line.emplace_back(row); + } + + int dx[] = {-1, 1, 0, 0}; + int dy[] = {0, 0, -1, 1}; + vector kernel_step(kernel_num); + std::for_each(kernel_step.begin(), kernel_step.end(), + [=](int &k) { return k * height * width; }); + + for (int kernel_id = kernel_num - 2; kernel_id >= 0; --kernel_id) { + while (!queue.empty()) { + Point2d point = queue.front(); + queue.pop(); + int x = point.x; + int y = point.y; + int label = text_line[x][y]; + + bool is_edge = true; + for (int d = 0; d < 4; ++d) { + int tmp_x = x + dx[d]; + int tmp_y = y + dy[d]; + + if (tmp_x < 0 || tmp_x >= height) continue; + if (tmp_y < 0 || tmp_y >= width) continue; + int kernel_value = data[kernel_step[kernel_id] + tmp_x * width + tmp_y]; + if (kernel_value == 0) continue; + if (text_line[tmp_x][tmp_y] > 0) continue; + + Point2d point(tmp_x, tmp_y); + queue.push(point); + text_line[tmp_x][tmp_y] = label; + is_edge = false; + } + + if (is_edge) { + next_queue.push(point); + } + } + swap(queue, next_queue); + } +} + +std::vector> contour_expand(Tensor kernel_mask, + Tensor internal_kernel_label, + int min_kernel_area, + int kernel_num) { + kernel_mask = kernel_mask.contiguous(); + internal_kernel_label = internal_kernel_label.contiguous(); + assert(kernel_mask.dim() == 3); + assert(internal_kernel_label.dim() == 2); + assert(kernel_mask.size(1) == internal_kernel_label.size(0)); + assert(kernel_mask.size(2) == internal_kernel_label.size(1)); + CHECK_CPU_INPUT(kernel_mask); + CHECK_CPU_INPUT(internal_kernel_label); + auto ptr_data = kernel_mask.data_ptr(); + IntArrayRef data_shape = kernel_mask.sizes(); + + auto data_label_map = internal_kernel_label.data_ptr(); + IntArrayRef label_map_shape = internal_kernel_label.sizes(); + vector> text_line; + + kernel_dilate(ptr_data, data_shape, data_label_map, kernel_num, + min_kernel_area, text_line); + + return text_line; +} diff --git a/mmcv/ops/csrc/pytorch/corner_pool.cpp b/mmcv/ops/csrc/pytorch/corner_pool.cpp new file mode 100644 index 0000000..732cdb0 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/corner_pool.cpp @@ -0,0 +1,240 @@ +// Copyright (c) OpenMMLab. All rights reserved +// Modified from +// https://github.com/princeton-vl/CornerNet-Lite/tree/master/core/models/py_utils/_cpools/src +#include "pytorch_cpp_helper.hpp" + +Tensor bottom_pool_forward(Tensor input) { + // Initialize output + Tensor output = at::zeros_like(input); + // Get height + int64_t height = input.size(2); + output.copy_(input); + + for (int64_t ind = 1; ind < height; ind <<= 1) { + Tensor max_temp = at::slice(output, 2, ind, height); + Tensor cur_temp = at::slice(output, 2, ind, height).clone(); + Tensor next_temp = at::slice(output, 2, 0, height - ind).clone(); + at::max_out(max_temp, cur_temp, next_temp); + } + + return output; +} + +Tensor bottom_pool_backward(Tensor input, Tensor grad_output) { + auto output = at::zeros_like(input); + + int32_t batch = input.size(0); + int32_t channel = input.size(1); + int32_t height = input.size(2); + int32_t width = input.size(3); + + auto max_val = torch::zeros({batch, channel, width}, + at::device(at::kCUDA).dtype(at::kFloat)); + auto max_ind = torch::zeros({batch, channel, width}, + at::device(at::kCUDA).dtype(at::kLong)); + + auto input_temp = input.select(2, 0); + max_val.copy_(input_temp); + + max_ind.fill_(0); + + auto output_temp = output.select(2, 0); + auto grad_output_temp = grad_output.select(2, 0); + output_temp.copy_(grad_output_temp); + + auto un_max_ind = max_ind.unsqueeze(2); + auto gt_mask = torch::zeros({batch, channel, width}, + at::device(at::kCUDA).dtype(at::kBool)); + auto max_temp = torch::zeros({batch, channel, width}, + at::device(at::kCUDA).dtype(at::kFloat)); + for (int32_t ind = 0; ind < height - 1; ++ind) { + input_temp = input.select(2, ind + 1); + at::gt_out(gt_mask, input_temp, max_val); + + at::masked_select_out(max_temp, input_temp, gt_mask); + max_val.masked_scatter_(gt_mask, max_temp); + max_ind.masked_fill_(gt_mask, ind + 1); + + grad_output_temp = grad_output.select(2, ind + 1).unsqueeze(2); + output.scatter_add_(2, un_max_ind, grad_output_temp); + } + + return output; +} + +Tensor left_pool_forward(Tensor input) { + // Initialize output + Tensor output = at::zeros_like(input); + // Get width + int64_t width = input.size(3); + output.copy_(input); + + for (int64_t ind = 1; ind < width; ind <<= 1) { + Tensor max_temp = at::slice(output, 3, 0, width - ind); + Tensor cur_temp = at::slice(output, 3, 0, width - ind).clone(); + Tensor next_temp = at::slice(output, 3, ind, width).clone(); + at::max_out(max_temp, cur_temp, next_temp); + } + + return output; +} + +Tensor left_pool_backward(Tensor input, Tensor grad_output) { + auto output = at::zeros_like(input); + + int32_t batch = input.size(0); + int32_t channel = input.size(1); + int32_t height = input.size(2); + int32_t width = input.size(3); + + auto max_val = torch::zeros({batch, channel, height}, + at::device(at::kCUDA).dtype(at::kFloat)); + auto max_ind = torch::zeros({batch, channel, height}, + at::device(at::kCUDA).dtype(at::kLong)); + + auto input_temp = input.select(3, width - 1); + max_val.copy_(input_temp); + + max_ind.fill_(width - 1); + + auto output_temp = output.select(3, width - 1); + auto grad_output_temp = grad_output.select(3, width - 1); + output_temp.copy_(grad_output_temp); + + auto un_max_ind = max_ind.unsqueeze(3); + auto gt_mask = torch::zeros({batch, channel, height}, + at::device(at::kCUDA).dtype(at::kBool)); + auto max_temp = torch::zeros({batch, channel, height}, + at::device(at::kCUDA).dtype(at::kFloat)); + for (int32_t ind = 1; ind < width; ++ind) { + input_temp = input.select(3, width - ind - 1); + at::gt_out(gt_mask, input_temp, max_val); + + at::masked_select_out(max_temp, input_temp, gt_mask); + max_val.masked_scatter_(gt_mask, max_temp); + max_ind.masked_fill_(gt_mask, width - ind - 1); + + grad_output_temp = grad_output.select(3, width - ind - 1).unsqueeze(3); + output.scatter_add_(3, un_max_ind, grad_output_temp); + } + + return output; +} + +Tensor right_pool_forward(Tensor input) { + // Initialize output + Tensor output = at::zeros_like(input); + // Get width + int64_t width = input.size(3); + output.copy_(input); + + for (int64_t ind = 1; ind < width; ind <<= 1) { + Tensor max_temp = at::slice(output, 3, ind, width); + Tensor cur_temp = at::slice(output, 3, ind, width).clone(); + Tensor next_temp = at::slice(output, 3, 0, width - ind).clone(); + at::max_out(max_temp, cur_temp, next_temp); + } + + return output; +} + +Tensor right_pool_backward(Tensor input, Tensor grad_output) { + Tensor output = at::zeros_like(input); + + int32_t batch = input.size(0); + int32_t channel = input.size(1); + int32_t height = input.size(2); + int32_t width = input.size(3); + + auto max_val = torch::zeros({batch, channel, height}, + at::device(at::kCUDA).dtype(at::kFloat)); + auto max_ind = torch::zeros({batch, channel, height}, + at::device(at::kCUDA).dtype(at::kLong)); + + auto input_temp = input.select(3, 0); + max_val.copy_(input_temp); + + max_ind.fill_(0); + + auto output_temp = output.select(3, 0); + auto grad_output_temp = grad_output.select(3, 0); + output_temp.copy_(grad_output_temp); + + auto un_max_ind = max_ind.unsqueeze(3); + auto gt_mask = torch::zeros({batch, channel, height}, + at::device(at::kCUDA).dtype(at::kBool)); + auto max_temp = torch::zeros({batch, channel, height}, + at::device(at::kCUDA).dtype(at::kFloat)); + for (int32_t ind = 0; ind < width - 1; ++ind) { + input_temp = input.select(3, ind + 1); + at::gt_out(gt_mask, input_temp, max_val); + + at::masked_select_out(max_temp, input_temp, gt_mask); + max_val.masked_scatter_(gt_mask, max_temp); + max_ind.masked_fill_(gt_mask, ind + 1); + + grad_output_temp = grad_output.select(3, ind + 1).unsqueeze(3); + output.scatter_add_(3, un_max_ind, grad_output_temp); + } + + return output; +} + +Tensor top_pool_forward(Tensor input) { + // Initialize output + Tensor output = at::zeros_like(input); + // Get height + int64_t height = input.size(2); + output.copy_(input); + + for (int64_t ind = 1; ind < height; ind <<= 1) { + Tensor max_temp = at::slice(output, 2, 0, height - ind); + Tensor cur_temp = at::slice(output, 2, 0, height - ind).clone(); + Tensor next_temp = at::slice(output, 2, ind, height).clone(); + at::max_out(max_temp, cur_temp, next_temp); + } + + return output; +} + +Tensor top_pool_backward(Tensor input, Tensor grad_output) { + auto output = at::zeros_like(input); + + int32_t batch = input.size(0); + int32_t channel = input.size(1); + int32_t height = input.size(2); + int32_t width = input.size(3); + + auto max_val = torch::zeros({batch, channel, width}, + at::device(at::kCUDA).dtype(at::kFloat)); + auto max_ind = torch::zeros({batch, channel, width}, + at::device(at::kCUDA).dtype(at::kLong)); + + auto input_temp = input.select(2, height - 1); + max_val.copy_(input_temp); + + max_ind.fill_(height - 1); + + auto output_temp = output.select(2, height - 1); + auto grad_output_temp = grad_output.select(2, height - 1); + output_temp.copy_(grad_output_temp); + + auto un_max_ind = max_ind.unsqueeze(2); + auto gt_mask = torch::zeros({batch, channel, width}, + at::device(at::kCUDA).dtype(at::kBool)); + auto max_temp = torch::zeros({batch, channel, width}, + at::device(at::kCUDA).dtype(at::kFloat)); + for (int32_t ind = 1; ind < height; ++ind) { + input_temp = input.select(2, height - ind - 1); + at::gt_out(gt_mask, input_temp, max_val); + + at::masked_select_out(max_temp, input_temp, gt_mask); + max_val.masked_scatter_(gt_mask, max_temp); + max_ind.masked_fill_(gt_mask, height - ind - 1); + + grad_output_temp = grad_output.select(2, height - ind - 1).unsqueeze(2); + output.scatter_add_(2, un_max_ind, grad_output_temp); + } + + return output; +} diff --git a/mmcv/ops/csrc/pytorch/correlation.cpp b/mmcv/ops/csrc/pytorch/correlation.cpp new file mode 100644 index 0000000..f4adba2 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/correlation.cpp @@ -0,0 +1,47 @@ +// Copyright (c) OpenMMLab. All rights reserved. +#include + +#include "pytorch_cpp_helper.hpp" +#include "pytorch_device_registry.hpp" + +void correlation_forward_impl(Tensor input1, Tensor input2, Tensor output, + int kH, int kW, int patchH, int patchW, int padH, + int padW, int dilationH, int dilationW, + int dilation_patchH, int dilation_patchW, int dH, + int dW) { + DISPATCH_DEVICE_IMPL(correlation_forward_impl, input1, input2, output, kH, kW, + patchH, patchW, padH, padW, dilationH, dilationW, + dilation_patchH, dilation_patchW, dH, dW); +} + +void correlation_backward_impl(Tensor grad_output, Tensor input1, Tensor input2, + Tensor grad_input1, Tensor grad_input2, int kH, + int kW, int patchH, int patchW, int padH, + int padW, int dilationH, int dilationW, + int dilation_patchH, int dilation_patchW, int dH, + int dW) { + DISPATCH_DEVICE_IMPL(correlation_backward_impl, grad_output, input1, input2, + grad_input1, grad_input2, kH, kW, patchH, patchW, padH, + padW, dilationH, dilationW, dilation_patchH, + dilation_patchW, dH, dW); +} + +void correlation_forward(Tensor input1, Tensor input2, Tensor output, int kH, + int kW, int patchH, int patchW, int padH, int padW, + int dilationH, int dilationW, int dilation_patchH, + int dilation_patchW, int dH, int dW) { + correlation_forward_impl(input1, input2, output, kH, kW, patchH, patchW, padH, + padW, dilationH, dilationW, dilation_patchH, + dilation_patchW, dH, dW); +} + +void correlation_backward(Tensor grad_output, Tensor input1, Tensor input2, + Tensor grad_input1, Tensor grad_input2, int kH, + int kW, int patchH, int patchW, int padH, int padW, + int dilationH, int dilationW, int dilation_patchH, + int dilation_patchW, int dH, int dW) { + correlation_backward_impl(grad_output, input1, input2, grad_input1, + grad_input2, kH, kW, patchH, patchW, padH, padW, + dilationH, dilationW, dilation_patchH, + dilation_patchW, dH, dW); +} diff --git a/mmcv/ops/csrc/pytorch/cpu/box_iou_rotated.cpp b/mmcv/ops/csrc/pytorch/cpu/box_iou_rotated.cpp new file mode 100644 index 0000000..585d2c9 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/cpu/box_iou_rotated.cpp @@ -0,0 +1,38 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +// modified from +// https://github.com/facebookresearch/detectron2/blob/master/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cpu.cpp +#include "box_iou_rotated_utils.hpp" +#include "pytorch_cpp_helper.hpp" +#include "pytorch_device_registry.hpp" + +template +void box_iou_rotated_cpu_kernel(const Tensor boxes1, const Tensor boxes2, + Tensor ious, const int mode_flag, + const bool aligned) { + int output_size = ious.numel(); + auto num_boxes1 = boxes1.size(0); + auto num_boxes2 = boxes2.size(0); + + if (aligned) { + for (int i = 0; i < output_size; i++) { + ious[i] = single_box_iou_rotated(boxes1[i].data_ptr(), + boxes2[i].data_ptr(), mode_flag); + } + } else { + for (int i = 0; i < num_boxes1; i++) { + for (int j = 0; j < num_boxes2; j++) { + ious[i * num_boxes2 + j] = single_box_iou_rotated( + boxes1[i].data_ptr(), boxes2[j].data_ptr(), mode_flag); + } + } + } +} + +void box_iou_rotated_cpu(const Tensor boxes1, const Tensor boxes2, Tensor ious, + const int mode_flag, const bool aligned) { + box_iou_rotated_cpu_kernel(boxes1, boxes2, ious, mode_flag, aligned); +} + +void box_iou_rotated_impl(const Tensor boxes1, const Tensor boxes2, Tensor ious, + const int mode_flag, const bool aligned); +REGISTER_DEVICE_IMPL(box_iou_rotated_impl, CPU, box_iou_rotated_cpu); diff --git a/mmcv/ops/csrc/pytorch/cpu/deform_conv.cpp b/mmcv/ops/csrc/pytorch/cpu/deform_conv.cpp new file mode 100644 index 0000000..7ab67e7 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/cpu/deform_conv.cpp @@ -0,0 +1,408 @@ +// Copyright (c) OpenMMLab. All rights reserved +#include "pytorch_cpp_helper.hpp" +#include "pytorch_device_registry.hpp" + +template +T deformable_im2col_bilinear_cpu(const T *input, const int data_width, + const int height, const int width, T h, T w) { + if (h <= -1 || height <= h || w <= -1 || width <= w) { + return 0; + } + + int h_low = floor(h); + int w_low = floor(w); + int h_high = h_low + 1; + int w_high = w_low + 1; + + T lh = h - h_low; + T lw = w - w_low; + T hh = 1 - lh, hw = 1 - lw; + + T v1 = 0; + if (h_low >= 0 && w_low >= 0) v1 = input[h_low * data_width + w_low]; + T v2 = 0; + if (h_low >= 0 && w_high <= width - 1) + v2 = input[h_low * data_width + w_high]; + T v3 = 0; + if (h_high <= height - 1 && w_low >= 0) + v3 = input[h_high * data_width + w_low]; + T v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) + v4 = input[h_high * data_width + w_high]; + + T w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + + T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + return val; +} + +template +T get_gradient_weight_cpu(T argmax_h, T argmax_w, const int h, const int w, + const int height, const int width) { + if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || + argmax_w >= width) { + // empty + return 0; + } + + int argmax_h_low = floor(argmax_h); + int argmax_w_low = floor(argmax_w); + int argmax_h_high = argmax_h_low + 1; + int argmax_w_high = argmax_w_low + 1; + + T weight = 0; + if (h == argmax_h_low && w == argmax_w_low) + weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); + if (h == argmax_h_low && w == argmax_w_high) + weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); + if (h == argmax_h_high && w == argmax_w_low) + weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); + if (h == argmax_h_high && w == argmax_w_high) + weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); + return weight; +} + +template +T get_coordinate_weight_cpu(T argmax_h, T argmax_w, const int height, + const int width, const T *im_data, + const int data_width, const int bp_dir) { + if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || + argmax_w >= width) { + // empty + return 0; + } + + int argmax_h_low = floor(argmax_h); + int argmax_w_low = floor(argmax_w); + int argmax_h_high = argmax_h_low + 1; + int argmax_w_high = argmax_w_low + 1; + + T weight = 0; + + if (bp_dir == 0) { + if (argmax_h_low >= 0 && argmax_w_low >= 0) + weight += -1 * (argmax_w_low + 1 - argmax_w) * + im_data[argmax_h_low * data_width + argmax_w_low]; + if (argmax_h_low >= 0 && argmax_w_high <= width - 1) + weight += -1 * (argmax_w - argmax_w_low) * + im_data[argmax_h_low * data_width + argmax_w_high]; + if (argmax_h_high <= height - 1 && argmax_w_low >= 0) + weight += (argmax_w_low + 1 - argmax_w) * + im_data[argmax_h_high * data_width + argmax_w_low]; + if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) + weight += (argmax_w - argmax_w_low) * + im_data[argmax_h_high * data_width + argmax_w_high]; + } else if (bp_dir == 1) { + if (argmax_h_low >= 0 && argmax_w_low >= 0) + weight += -1 * (argmax_h_low + 1 - argmax_h) * + im_data[argmax_h_low * data_width + argmax_w_low]; + if (argmax_h_low >= 0 && argmax_w_high <= width - 1) + weight += (argmax_h_low + 1 - argmax_h) * + im_data[argmax_h_low * data_width + argmax_w_high]; + if (argmax_h_high <= height - 1 && argmax_w_low >= 0) + weight += -1 * (argmax_h - argmax_h_low) * + im_data[argmax_h_high * data_width + argmax_w_low]; + if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) + weight += (argmax_h - argmax_h_low) * + im_data[argmax_h_high * data_width + argmax_w_high]; + } + + return weight; +} + +template +void deformable_im2col_cpu_kernel( + const int n, const T *data_im, const T *data_offset, const int height, + const int width, const int kernel_h, const int kernel_w, const int pad_h, + const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int channel_per_deformable_group, const int batch_size, + const int num_channels, const int deformable_group, const int height_col, + const int width_col, T *data_col) { + for (int index = 0; index < n; index++) { + // index index of output matrix + const int w_col = index % width_col; + const int h_col = (index / width_col) % height_col; + const int b_col = (index / width_col / height_col) % batch_size; + const int c_im = (index / width_col / height_col) / batch_size; + const int c_col = c_im * kernel_h * kernel_w; + + // compute deformable group index + const int deformable_group_index = c_im / channel_per_deformable_group; + + const int h_in = h_col * stride_h - pad_h; + const int w_in = w_col * stride_w - pad_w; + T *data_col_ptr = + data_col + + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; + const T *data_im_ptr = + data_im + (b_col * num_channels + c_im) * height * width; + const T *data_offset_ptr = + data_offset + (b_col * deformable_group + deformable_group_index) * 2 * + kernel_h * kernel_w * height_col * width_col; + + for (int i = 0; i < kernel_h; ++i) { + for (int j = 0; j < kernel_w; ++j) { + const int data_offset_h_ptr = + ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; + const int data_offset_w_ptr = + ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + + w_col; + const T offset_h = data_offset_ptr[data_offset_h_ptr]; + const T offset_w = data_offset_ptr[data_offset_w_ptr]; + T val = static_cast(0); + const T h_im = h_in + i * dilation_h + offset_h; + const T w_im = w_in + j * dilation_w + offset_w; + if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) + val = deformable_im2col_bilinear_cpu(data_im_ptr, width, height, + width, h_im, w_im); + *data_col_ptr = val; + data_col_ptr += batch_size * height_col * width_col; + } + } + } +} + +template +void deformable_col2im_cpu_kernel( + const int n, const T *data_col, const T *data_offset, const int channels, + const int height, const int width, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int channel_per_deformable_group, const int batch_size, + const int deformable_group, const int height_col, const int width_col, + T *grad_im) { + for (int index = 0; index < n; index++) { + const int j = (index / width_col / height_col / batch_size) % kernel_w; + const int i = + (index / width_col / height_col / batch_size / kernel_w) % kernel_h; + const int c = + index / width_col / height_col / batch_size / kernel_w / kernel_h; + // compute the start and end of the output + + const int deformable_group_index = c / channel_per_deformable_group; + + int w_out = index % width_col; + int h_out = (index / width_col) % height_col; + int b = (index / width_col / height_col) % batch_size; + int w_in = w_out * stride_w - pad_w; + int h_in = h_out * stride_h - pad_h; + + const T *data_offset_ptr = + data_offset + (b * deformable_group + deformable_group_index) * 2 * + kernel_h * kernel_w * height_col * width_col; + const int data_offset_h_ptr = + ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; + const int data_offset_w_ptr = + ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; + const T offset_h = data_offset_ptr[data_offset_h_ptr]; + const T offset_w = data_offset_ptr[data_offset_w_ptr]; + const T cur_inv_h_data = h_in + i * dilation_h + offset_h; + const T cur_inv_w_data = w_in + j * dilation_w + offset_w; + + const T cur_top_grad = data_col[index]; + const int cur_h = (int)cur_inv_h_data; + const int cur_w = (int)cur_inv_w_data; + for (int dy = -2; dy <= 2; dy++) { + for (int dx = -2; dx <= 2; dx++) { + if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && + cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && + abs(cur_inv_w_data - (cur_w + dx)) < 1) { + int cur_bottom_grad_pos = + ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; + T weight = + get_gradient_weight_cpu(cur_inv_h_data, cur_inv_w_data, + cur_h + dy, cur_w + dx, height, width); + *(grad_im + cur_bottom_grad_pos) += weight * cur_top_grad; + } + } + } + } +} + +template +void deformable_col2im_coord_cpu_kernel( + const int n, const T *data_col, const T *data_im, const T *data_offset, + const int channels, const int height, const int width, const int kernel_h, + const int kernel_w, const int pad_h, const int pad_w, const int stride_h, + const int stride_w, const int dilation_h, const int dilation_w, + const int channel_per_deformable_group, const int batch_size, + const int offset_channels, const int deformable_group, const int height_col, + const int width_col, T *grad_offset) { + for (int index = 0; index < n; index++) { + T val = 0; + int w = index % width_col; + int h = (index / width_col) % height_col; + int c = (index / width_col / height_col) % offset_channels; + int b = (index / width_col / height_col) / offset_channels; + // compute the start and end of the output + + const int deformable_group_index = c / (2 * kernel_h * kernel_w); + const int col_step = kernel_h * kernel_w; + int cnt = 0; + const T *data_col_ptr = data_col + deformable_group_index * + channel_per_deformable_group * + batch_size * width_col * height_col; + const T *data_im_ptr = + data_im + (b * deformable_group + deformable_group_index) * + channel_per_deformable_group / kernel_h / kernel_w * + height * width; + const T *data_offset_ptr = + data_offset + (b * deformable_group + deformable_group_index) * 2 * + kernel_h * kernel_w * height_col * width_col; + + const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; + + for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; + col_c += col_step) { + const int col_pos = + (((col_c * batch_size + b) * height_col) + h) * width_col + w; + const int bp_dir = offset_c % 2; + + int j = (col_pos / width_col / height_col / batch_size) % kernel_w; + int i = + (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; + int w_out = col_pos % width_col; + int h_out = (col_pos / width_col) % height_col; + int w_in = w_out * stride_w - pad_w; + int h_in = h_out * stride_h - pad_h; + const int data_offset_h_ptr = + (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); + const int data_offset_w_ptr = + (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + + w_out); + const T offset_h = data_offset_ptr[data_offset_h_ptr]; + const T offset_w = data_offset_ptr[data_offset_w_ptr]; + T inv_h = h_in + i * dilation_h + offset_h; + T inv_w = w_in + j * dilation_w + offset_w; + if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) + inv_h = inv_w = -2; + const T weight = get_coordinate_weight_cpu( + inv_h, inv_w, height, width, data_im_ptr + cnt * height * width, + width, bp_dir); + val += weight * data_col_ptr[col_pos]; + cnt += 1; + } + + grad_offset[index] = val; + } +} + +void deformable_im2col_cpu(Tensor data_im, Tensor data_offset, + const int channels, const int height, + const int width, const int ksize_h, + const int ksize_w, const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int parallel_imgs, const int deformable_group, + Tensor data_col) { + int height_col = + (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; + int width_col = + (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; + int num_kernels = channels * height_col * width_col * parallel_imgs; + int channel_per_deformable_group = channels / deformable_group; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_im.scalar_type(), "deformable_im2col_cpu", [&] { + deformable_im2col_cpu_kernel( + num_kernels, data_im.data_ptr(), + data_offset.data_ptr(), height, width, ksize_h, ksize_w, + pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, + channel_per_deformable_group, parallel_imgs, channels, + deformable_group, height_col, width_col, + data_col.data_ptr()); + }); +} + +void deformable_col2im_cpu(Tensor data_col, Tensor data_offset, + const int channels, const int height, + const int width, const int ksize_h, + const int ksize_w, const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int parallel_imgs, const int deformable_group, + Tensor grad_im) { + // todo: make sure parallel_imgs is passed in correctly + int height_col = + (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; + int width_col = + (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; + int num_kernels = + channels * ksize_h * ksize_w * height_col * width_col * parallel_imgs; + int channel_per_deformable_group = channels / deformable_group; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_col.scalar_type(), "deformable_col2im_gpu", ([&] { + const scalar_t *data_col_ = data_col.data_ptr(); + const scalar_t *data_offset_ = data_offset.data_ptr(); + scalar_t *grad_im_ = grad_im.data_ptr(); + + deformable_col2im_cpu_kernel( + num_kernels, data_col_, data_offset_, channels, height, width, + ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, + dilation_w, channel_per_deformable_group, parallel_imgs, + deformable_group, height_col, width_col, grad_im_); + })); +} + +void deformable_col2im_coord_cpu( + Tensor data_col, Tensor data_im, Tensor data_offset, const int channels, + const int height, const int width, const int ksize_h, const int ksize_w, + const int pad_h, const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, const int parallel_imgs, + const int deformable_group, Tensor grad_offset) { + int height_col = + (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; + int width_col = + (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; + int num_kernels = height_col * width_col * 2 * ksize_h * ksize_w * + deformable_group * parallel_imgs; + int channel_per_deformable_group = + channels * ksize_h * ksize_w / deformable_group; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_col.scalar_type(), "deformable_col2im_coord_cpu", ([&] { + const scalar_t *data_col_ = data_col.data_ptr(); + const scalar_t *data_im_ = data_im.data_ptr(); + const scalar_t *data_offset_ = data_offset.data_ptr(); + scalar_t *grad_offset_ = grad_offset.data_ptr(); + + deformable_col2im_coord_cpu_kernel( + num_kernels, data_col_, data_im_, data_offset_, channels, height, + width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, + dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, + 2 * ksize_h * ksize_w * deformable_group, deformable_group, + height_col, width_col, grad_offset_); + })); +} + +void deformable_im2col_impl(Tensor data_im, Tensor data_offset, + const int channels, const int height, + const int width, const int ksize_h, + const int ksize_w, const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int parallel_imgs, const int deformable_group, + Tensor data_col); + +void deformable_col2im_impl(Tensor data_col, Tensor data_offset, + const int channels, const int height, + const int width, const int ksize_h, + const int ksize_w, const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int parallel_imgs, const int deformable_group, + Tensor grad_im); + +void deformable_col2im_coord_impl( + Tensor data_col, Tensor data_im, Tensor data_offset, const int channels, + const int height, const int width, const int ksize_h, const int ksize_w, + const int pad_h, const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, const int parallel_imgs, + const int deformable_group, Tensor grad_offset); + +REGISTER_DEVICE_IMPL(deformable_im2col_impl, CPU, deformable_im2col_cpu); +REGISTER_DEVICE_IMPL(deformable_col2im_impl, CPU, deformable_col2im_cpu); +REGISTER_DEVICE_IMPL(deformable_col2im_coord_impl, CPU, + deformable_col2im_coord_cpu); diff --git a/mmcv/ops/csrc/pytorch/cpu/modulated_deform_conv.cpp b/mmcv/ops/csrc/pytorch/cpu/modulated_deform_conv.cpp new file mode 100644 index 0000000..9539095 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/cpu/modulated_deform_conv.cpp @@ -0,0 +1,436 @@ +// Copyright (c) OpenMMLab. All rights reserved +#include "pytorch_cpp_helper.hpp" +#include "pytorch_device_registry.hpp" + +template +T dmcn_im2col_bilinear_cpu(const T *input, const int data_width, + const int height, const int width, T h, T w) { + int h_low = floorf(h); + int w_low = floorf(w); + int h_high = h_low + 1; + int w_high = w_low + 1; + + T lh = h - h_low; + T lw = w - w_low; + T hh = 1 - lh, hw = 1 - lw; + + T v1 = 0; + if (h_low >= 0 && w_low >= 0) v1 = input[h_low * data_width + w_low]; + T v2 = 0; + if (h_low >= 0 && w_high <= width - 1) + v2 = input[h_low * data_width + w_high]; + T v3 = 0; + if (h_high <= height - 1 && w_low >= 0) + v3 = input[h_high * data_width + w_low]; + T v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) + v4 = input[h_high * data_width + w_high]; + + T w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + + T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + return val; +} + +template +T dmcn_get_gradient_weight_cpu(T argmax_h, T argmax_w, const int h, const int w, + const int height, const int width) { + if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || + argmax_w >= width) { + // empty + return 0; + } + + int argmax_h_low = floorf(argmax_h); + int argmax_w_low = floorf(argmax_w); + int argmax_h_high = argmax_h_low + 1; + int argmax_w_high = argmax_w_low + 1; + + T weight = 0; + if (h == argmax_h_low && w == argmax_w_low) + weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); + if (h == argmax_h_low && w == argmax_w_high) + weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); + if (h == argmax_h_high && w == argmax_w_low) + weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); + if (h == argmax_h_high && w == argmax_w_high) + weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); + return weight; +} + +template +T dmcn_get_coordinate_weight_cpu(T argmax_h, T argmax_w, const int height, + const int width, const T *im_data, + const int data_width, const int bp_dir) { + if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || + argmax_w >= width) { + // empty + return 0; + } + + int argmax_h_low = floorf(argmax_h); + int argmax_w_low = floorf(argmax_w); + int argmax_h_high = argmax_h_low + 1; + int argmax_w_high = argmax_w_low + 1; + + T weight = 0; + + if (bp_dir == 0) { + if (argmax_h_low >= 0 && argmax_w_low >= 0) + weight += -1 * (argmax_w_low + 1 - argmax_w) * + im_data[argmax_h_low * data_width + argmax_w_low]; + if (argmax_h_low >= 0 && argmax_w_high <= width - 1) + weight += -1 * (argmax_w - argmax_w_low) * + im_data[argmax_h_low * data_width + argmax_w_high]; + if (argmax_h_high <= height - 1 && argmax_w_low >= 0) + weight += (argmax_w_low + 1 - argmax_w) * + im_data[argmax_h_high * data_width + argmax_w_low]; + if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) + weight += (argmax_w - argmax_w_low) * + im_data[argmax_h_high * data_width + argmax_w_high]; + } else if (bp_dir == 1) { + if (argmax_h_low >= 0 && argmax_w_low >= 0) + weight += -1 * (argmax_h_low + 1 - argmax_h) * + im_data[argmax_h_low * data_width + argmax_w_low]; + if (argmax_h_low >= 0 && argmax_w_high <= width - 1) + weight += (argmax_h_low + 1 - argmax_h) * + im_data[argmax_h_low * data_width + argmax_w_high]; + if (argmax_h_high <= height - 1 && argmax_w_low >= 0) + weight += -1 * (argmax_h - argmax_h_low) * + im_data[argmax_h_high * data_width + argmax_w_low]; + if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) + weight += (argmax_h - argmax_h_low) * + im_data[argmax_h_high * data_width + argmax_w_high]; + } + + return weight; +} + +template +void modulated_deformable_im2col_cpu_kernel( + const int n, const T *data_im, const T *data_offset, const T *data_mask, + const int height, const int width, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int channel_per_deformable_group, const int batch_size, + const int num_channels, const int deformable_group, const int height_col, + const int width_col, T *data_col) { + for (int index = 0; index < n; index++) { + // index index of output matrix + const int w_col = index % width_col; + const int h_col = (index / width_col) % height_col; + const int b_col = (index / width_col / height_col) % batch_size; + const int c_im = (index / width_col / height_col) / batch_size; + const int c_col = c_im * kernel_h * kernel_w; + + // compute deformable group index + const int deformable_group_index = c_im / channel_per_deformable_group; + + const int h_in = h_col * stride_h - pad_h; + const int w_in = w_col * stride_w - pad_w; + + T *data_col_ptr = + data_col + + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; + const T *data_im_ptr = + data_im + (b_col * num_channels + c_im) * height * width; + const T *data_offset_ptr = + data_offset + (b_col * deformable_group + deformable_group_index) * 2 * + kernel_h * kernel_w * height_col * width_col; + + const T *data_mask_ptr = + data_mask + (b_col * deformable_group + deformable_group_index) * + kernel_h * kernel_w * height_col * width_col; + + for (int i = 0; i < kernel_h; ++i) { + for (int j = 0; j < kernel_w; ++j) { + const int data_offset_h_ptr = + ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; + const int data_offset_w_ptr = + ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + + w_col; + const int data_mask_hw_ptr = + ((i * kernel_w + j) * height_col + h_col) * width_col + w_col; + const T offset_h = data_offset_ptr[data_offset_h_ptr]; + const T offset_w = data_offset_ptr[data_offset_w_ptr]; + const T mask = data_mask_ptr[data_mask_hw_ptr]; + T val = static_cast(0); + const T h_im = h_in + i * dilation_h + offset_h; + const T w_im = w_in + j * dilation_w + offset_w; + if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) + val = dmcn_im2col_bilinear_cpu(data_im_ptr, width, height, width, + h_im, w_im); + *data_col_ptr = val * mask; + data_col_ptr += batch_size * height_col * width_col; + } + } + } +} + +template +void modulated_deformable_col2im_cpu_kernel( + const int n, const T *data_col, const T *data_offset, const T *data_mask, + const int channels, const int height, const int width, const int kernel_h, + const int kernel_w, const int pad_h, const int pad_w, const int stride_h, + const int stride_w, const int dilation_h, const int dilation_w, + const int channel_per_deformable_group, const int batch_size, + const int deformable_group, const int height_col, const int width_col, + T *grad_im) { + for (int index = 0; index < n; index++) { + const int j = (index / width_col / height_col / batch_size) % kernel_w; + const int i = + (index / width_col / height_col / batch_size / kernel_w) % kernel_h; + const int c = + index / width_col / height_col / batch_size / kernel_w / kernel_h; + // compute the start and end of the output + + const int deformable_group_index = c / channel_per_deformable_group; + + int w_out = index % width_col; + int h_out = (index / width_col) % height_col; + int b = (index / width_col / height_col) % batch_size; + int w_in = w_out * stride_w - pad_w; + int h_in = h_out * stride_h - pad_h; + + const T *data_offset_ptr = + data_offset + (b * deformable_group + deformable_group_index) * 2 * + kernel_h * kernel_w * height_col * width_col; + const T *data_mask_ptr = + data_mask + (b * deformable_group + deformable_group_index) * kernel_h * + kernel_w * height_col * width_col; + const int data_offset_h_ptr = + ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; + const int data_offset_w_ptr = + ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; + const int data_mask_hw_ptr = + ((i * kernel_w + j) * height_col + h_out) * width_col + w_out; + const T offset_h = data_offset_ptr[data_offset_h_ptr]; + const T offset_w = data_offset_ptr[data_offset_w_ptr]; + const T mask = data_mask_ptr[data_mask_hw_ptr]; + const T cur_inv_h_data = h_in + i * dilation_h + offset_h; + const T cur_inv_w_data = w_in + j * dilation_w + offset_w; + + const T cur_top_grad = data_col[index] * mask; + const int cur_h = (int)cur_inv_h_data; + const int cur_w = (int)cur_inv_w_data; + for (int dy = -2; dy <= 2; dy++) { + for (int dx = -2; dx <= 2; dx++) { + if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && + cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && + abs(cur_inv_w_data - (cur_w + dx)) < 1) { + int cur_bottom_grad_pos = + ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; + T weight = dmcn_get_gradient_weight_cpu(cur_inv_h_data, + cur_inv_w_data, cur_h + dy, + cur_w + dx, height, width); + *(grad_im + cur_bottom_grad_pos) += weight * cur_top_grad; + } + } + } + } +} + +template +void modulated_deformable_col2im_coord_cpu_kernel( + const int n, const T *data_col, const T *data_im, const T *data_offset, + const T *data_mask, const int channels, const int height, const int width, + const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, + const int stride_h, const int stride_w, const int dilation_h, + const int dilation_w, const int channel_per_deformable_group, + const int batch_size, const int offset_channels, const int deformable_group, + const int height_col, const int width_col, T *grad_offset, T *grad_mask) { + for (int index = 0; index < n; index++) { + T val = 0, mval = 0; + int w = index % width_col; + int h = (index / width_col) % height_col; + int c = (index / width_col / height_col) % offset_channels; + int b = (index / width_col / height_col) / offset_channels; + // compute the start and end of the output + + const int deformable_group_index = c / (2 * kernel_h * kernel_w); + const int col_step = kernel_h * kernel_w; + int cnt = 0; + const T *data_col_ptr = data_col + deformable_group_index * + channel_per_deformable_group * + batch_size * width_col * height_col; + const T *data_im_ptr = + data_im + (b * deformable_group + deformable_group_index) * + channel_per_deformable_group / kernel_h / kernel_w * + height * width; + const T *data_offset_ptr = + data_offset + (b * deformable_group + deformable_group_index) * 2 * + kernel_h * kernel_w * height_col * width_col; + const T *data_mask_ptr = + data_mask + (b * deformable_group + deformable_group_index) * kernel_h * + kernel_w * height_col * width_col; + + const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; + + for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; + col_c += col_step) { + const int col_pos = + (((col_c * batch_size + b) * height_col) + h) * width_col + w; + const int bp_dir = offset_c % 2; + + int j = (col_pos / width_col / height_col / batch_size) % kernel_w; + int i = + (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; + int w_out = col_pos % width_col; + int h_out = (col_pos / width_col) % height_col; + int w_in = w_out * stride_w - pad_w; + int h_in = h_out * stride_h - pad_h; + const int data_offset_h_ptr = + (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); + const int data_offset_w_ptr = + (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + + w_out); + const int data_mask_hw_ptr = + (((i * kernel_w + j) * height_col + h_out) * width_col + w_out); + const T offset_h = data_offset_ptr[data_offset_h_ptr]; + const T offset_w = data_offset_ptr[data_offset_w_ptr]; + const T mask = data_mask_ptr[data_mask_hw_ptr]; + T inv_h = h_in + i * dilation_h + offset_h; + T inv_w = w_in + j * dilation_w + offset_w; + if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) + inv_h = inv_w = -2; + else + mval += data_col_ptr[col_pos] * + dmcn_im2col_bilinear_cpu(data_im_ptr + cnt * height * width, + width, height, width, inv_h, inv_w); + const T weight = dmcn_get_coordinate_weight_cpu( + inv_h, inv_w, height, width, data_im_ptr + cnt * height * width, + width, bp_dir); + val += weight * data_col_ptr[col_pos] * mask; + cnt += 1; + } + // KERNEL_ASSIGN(grad_offset[index], offset_req, val); + grad_offset[index] = val; + if (offset_c % 2 == 0) + // KERNEL_ASSIGN(grad_mask[(((b * deformable_group + + // deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * + // height_col + h) * width_col + w], mask_req, mval); + grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * + kernel_w + + offset_c / 2) * + height_col + + h) * + width_col + + w] = mval; + } +} + +void modulated_deformable_im2col_cpu( + const Tensor data_im, const Tensor data_offset, const Tensor data_mask, + const int batch_size, const int channels, const int height_im, + const int width_im, const int height_col, const int width_col, + const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, + const int stride_h, const int stride_w, const int dilation_h, + const int dilation_w, const int deformable_group, Tensor data_col) { + // num_axes should be smaller than block size + const int channel_per_deformable_group = channels / deformable_group; + const int num_kernels = channels * batch_size * height_col * width_col; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_im.scalar_type(), "modulated_deformable_im2col_cpu", ([&] { + const scalar_t *data_im_ = data_im.data_ptr(); + const scalar_t *data_offset_ = data_offset.data_ptr(); + const scalar_t *data_mask_ = data_mask.data_ptr(); + scalar_t *data_col_ = data_col.data_ptr(); + + modulated_deformable_im2col_cpu_kernel( + num_kernels, data_im_, data_offset_, data_mask_, height_im, + width_im, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, + dilation_h, dilation_w, channel_per_deformable_group, batch_size, + channels, deformable_group, height_col, width_col, data_col_); + })); +} + +void modulated_deformable_col2im_cpu( + const Tensor data_col, const Tensor data_offset, const Tensor data_mask, + const int batch_size, const int channels, const int height_im, + const int width_im, const int height_col, const int width_col, + const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, + const int stride_h, const int stride_w, const int dilation_h, + const int dilation_w, const int deformable_group, Tensor grad_im) { + const int channel_per_deformable_group = channels / deformable_group; + const int num_kernels = + channels * kernel_h * kernel_w * batch_size * height_col * width_col; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_col.scalar_type(), "modulated_deformable_col2im_cpu", ([&] { + const scalar_t *data_col_ = data_col.data_ptr(); + const scalar_t *data_offset_ = data_offset.data_ptr(); + const scalar_t *data_mask_ = data_mask.data_ptr(); + scalar_t *grad_im_ = grad_im.data_ptr(); + + modulated_deformable_col2im_cpu_kernel( + num_kernels, data_col_, data_offset_, data_mask_, channels, + height_im, width_im, kernel_h, kernel_w, pad_h, pad_w, stride_h, + stride_w, dilation_h, dilation_w, channel_per_deformable_group, + batch_size, deformable_group, height_col, width_col, grad_im_); + })); +} + +void modulated_deformable_col2im_coord_cpu( + const Tensor data_col, const Tensor data_im, const Tensor data_offset, + const Tensor data_mask, const int batch_size, const int channels, + const int height_im, const int width_im, const int height_col, + const int width_col, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, const int deformable_group, + Tensor grad_offset, Tensor grad_mask) { + const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h * + kernel_w * deformable_group; + const int channel_per_deformable_group = + channels * kernel_h * kernel_w / deformable_group; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_col.scalar_type(), "modulated_deformable_col2im_coord_cpu", ([&] { + const scalar_t *data_col_ = data_col.data_ptr(); + const scalar_t *data_im_ = data_im.data_ptr(); + const scalar_t *data_offset_ = data_offset.data_ptr(); + const scalar_t *data_mask_ = data_mask.data_ptr(); + scalar_t *grad_offset_ = grad_offset.data_ptr(); + scalar_t *grad_mask_ = grad_mask.data_ptr(); + + modulated_deformable_col2im_coord_cpu_kernel( + num_kernels, data_col_, data_im_, data_offset_, data_mask_, + channels, height_im, width_im, kernel_h, kernel_w, pad_h, pad_w, + stride_h, stride_w, dilation_h, dilation_w, + channel_per_deformable_group, batch_size, + 2 * kernel_h * kernel_w * deformable_group, deformable_group, + height_col, width_col, grad_offset_, grad_mask_); + })); +} + +void modulated_deformable_im2col_impl( + const Tensor data_im, const Tensor data_offset, const Tensor data_mask, + const int batch_size, const int channels, const int height_im, + const int width_im, const int height_col, const int width_col, + const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, + const int stride_h, const int stride_w, const int dilation_h, + const int dilation_w, const int deformable_group, Tensor data_col); + +void modulated_deformable_col2im_impl( + const Tensor data_col, const Tensor data_offset, const Tensor data_mask, + const int batch_size, const int channels, const int height_im, + const int width_im, const int height_col, const int width_col, + const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, + const int stride_h, const int stride_w, const int dilation_h, + const int dilation_w, const int deformable_group, Tensor grad_im); + +void modulated_deformable_col2im_coord_impl( + const Tensor data_col, const Tensor data_im, const Tensor data_offset, + const Tensor data_mask, const int batch_size, const int channels, + const int height_im, const int width_im, const int height_col, + const int width_col, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, const int deformable_group, + Tensor grad_offset, Tensor grad_mask); + +REGISTER_DEVICE_IMPL(modulated_deformable_im2col_impl, CPU, + modulated_deformable_im2col_cpu); +REGISTER_DEVICE_IMPL(modulated_deformable_col2im_impl, CPU, + modulated_deformable_col2im_cpu); +REGISTER_DEVICE_IMPL(modulated_deformable_col2im_coord_impl, CPU, + modulated_deformable_col2im_coord_cpu); diff --git a/mmcv/ops/csrc/pytorch/cpu/nms.cpp b/mmcv/ops/csrc/pytorch/cpu/nms.cpp new file mode 100644 index 0000000..53e9b9a --- /dev/null +++ b/mmcv/ops/csrc/pytorch/cpu/nms.cpp @@ -0,0 +1,230 @@ +// Copyright (c) OpenMMLab. All rights reserved +#include "pytorch_cpp_helper.hpp" +#include "pytorch_device_registry.hpp" + +Tensor nms_cpu(Tensor boxes, Tensor scores, float iou_threshold, int offset) { + if (boxes.numel() == 0) { + return at::empty({0}, boxes.options().dtype(at::kLong)); + } + auto x1_t = boxes.select(1, 0).contiguous(); + auto y1_t = boxes.select(1, 1).contiguous(); + auto x2_t = boxes.select(1, 2).contiguous(); + auto y2_t = boxes.select(1, 3).contiguous(); + + Tensor areas_t = (x2_t - x1_t + offset) * (y2_t - y1_t + offset); + + auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); + + auto nboxes = boxes.size(0); + Tensor select_t = at::ones({nboxes}, boxes.options().dtype(at::kBool)); + + auto select = select_t.data_ptr(); + auto order = order_t.data_ptr(); + auto x1 = x1_t.data_ptr(); + auto y1 = y1_t.data_ptr(); + auto x2 = x2_t.data_ptr(); + auto y2 = y2_t.data_ptr(); + auto areas = areas_t.data_ptr(); + + for (int64_t _i = 0; _i < nboxes; _i++) { + if (select[_i] == false) continue; + auto i = order[_i]; + auto ix1 = x1[i]; + auto iy1 = y1[i]; + auto ix2 = x2[i]; + auto iy2 = y2[i]; + auto iarea = areas[i]; + + for (int64_t _j = _i + 1; _j < nboxes; _j++) { + if (select[_j] == false) continue; + auto j = order[_j]; + auto xx1 = std::max(ix1, x1[j]); + auto yy1 = std::max(iy1, y1[j]); + auto xx2 = std::min(ix2, x2[j]); + auto yy2 = std::min(iy2, y2[j]); + + auto w = std::max(0.f, xx2 - xx1 + offset); + auto h = std::max(0.f, yy2 - yy1 + offset); + auto inter = w * h; + auto ovr = inter / (iarea + areas[j] - inter); + if (ovr > iou_threshold) select[_j] = false; + } + } + return order_t.masked_select(select_t); +} + +Tensor nms_impl(Tensor boxes, Tensor scores, float iou_threshold, int offset); +REGISTER_DEVICE_IMPL(nms_impl, CPU, nms_cpu); + +Tensor softnms_cpu(Tensor boxes, Tensor scores, Tensor dets, + float iou_threshold, float sigma, float min_score, + int method, int offset) { + if (boxes.numel() == 0) { + return at::empty({0}, boxes.options().dtype(at::kLong)); + } + + auto x1_t = boxes.select(1, 0).contiguous(); + auto y1_t = boxes.select(1, 1).contiguous(); + auto x2_t = boxes.select(1, 2).contiguous(); + auto y2_t = boxes.select(1, 3).contiguous(); + auto scores_t = scores.clone(); + + Tensor areas_t = (x2_t - x1_t + offset) * (y2_t - y1_t + offset); + + auto nboxes = boxes.size(0); + auto x1 = x1_t.data_ptr(); + auto y1 = y1_t.data_ptr(); + auto x2 = x2_t.data_ptr(); + auto y2 = y2_t.data_ptr(); + auto sc = scores_t.data_ptr(); + auto areas = areas_t.data_ptr(); + auto de = dets.data_ptr(); + + int64_t pos = 0; + Tensor inds_t = at::arange(nboxes, boxes.options().dtype(at::kLong)); + auto inds = inds_t.data_ptr(); + + for (int64_t i = 0; i < nboxes; i++) { + auto max_score = sc[i]; + auto max_pos = i; + + pos = i + 1; + // get max box + while (pos < nboxes) { + if (max_score < sc[pos]) { + max_score = sc[pos]; + max_pos = pos; + } + pos = pos + 1; + } + // swap + auto ix1 = de[i * 5 + 0] = x1[max_pos]; + auto iy1 = de[i * 5 + 1] = y1[max_pos]; + auto ix2 = de[i * 5 + 2] = x2[max_pos]; + auto iy2 = de[i * 5 + 3] = y2[max_pos]; + auto iscore = de[i * 5 + 4] = sc[max_pos]; + auto iarea = areas[max_pos]; + auto iind = inds[max_pos]; + x1[max_pos] = x1[i]; + y1[max_pos] = y1[i]; + x2[max_pos] = x2[i]; + y2[max_pos] = y2[i]; + sc[max_pos] = sc[i]; + areas[max_pos] = areas[i]; + inds[max_pos] = inds[i]; + x1[i] = ix1; + y1[i] = iy1; + x2[i] = ix2; + y2[i] = iy2; + sc[i] = iscore; + areas[i] = iarea; + inds[i] = iind; + + pos = i + 1; + while (pos < nboxes) { + auto xx1 = std::max(ix1, x1[pos]); + auto yy1 = std::max(iy1, y1[pos]); + auto xx2 = std::min(ix2, x2[pos]); + auto yy2 = std::min(iy2, y2[pos]); + + auto w = std::max(0.f, xx2 - xx1 + offset); + auto h = std::max(0.f, yy2 - yy1 + offset); + auto inter = w * h; + auto ovr = inter / (iarea + areas[pos] - inter); + + float weight = 1.; + if (method == 0) { + if (ovr >= iou_threshold) weight = 0; + } else if (method == 1) { + if (ovr >= iou_threshold) weight = 1 - ovr; + } else if (method == 2) { + weight = std::exp(-(ovr * ovr) / sigma); + } + sc[pos] *= weight; + // if box score falls below threshold, discard the box by + // swapping with last box update N + if (sc[pos] < min_score) { + x1[pos] = x1[nboxes - 1]; + y1[pos] = y1[nboxes - 1]; + x2[pos] = x2[nboxes - 1]; + y2[pos] = y2[nboxes - 1]; + sc[pos] = sc[nboxes - 1]; + areas[pos] = areas[nboxes - 1]; + inds[pos] = inds[nboxes - 1]; + nboxes = nboxes - 1; + pos = pos - 1; + } + pos = pos + 1; + } + } + return inds_t.slice(0, 0, nboxes); +} + +Tensor softnms_impl(Tensor boxes, Tensor scores, Tensor dets, + float iou_threshold, float sigma, float min_score, + int method, int offset); +REGISTER_DEVICE_IMPL(softnms_impl, CPU, softnms_cpu); + +std::vector > nms_match_cpu(Tensor dets, float iou_threshold) { + auto x1_t = dets.select(1, 0).contiguous(); + auto y1_t = dets.select(1, 1).contiguous(); + auto x2_t = dets.select(1, 2).contiguous(); + auto y2_t = dets.select(1, 3).contiguous(); + auto scores = dets.select(1, 4).contiguous(); + + at::Tensor areas_t = (x2_t - x1_t) * (y2_t - y1_t); + + auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); + + auto ndets = dets.size(0); + at::Tensor suppressed_t = + at::zeros({ndets}, dets.options().dtype(at::kByte).device(at::kCPU)); + + auto suppressed = suppressed_t.data_ptr(); + auto order = order_t.data_ptr(); + auto x1 = x1_t.data_ptr(); + auto y1 = y1_t.data_ptr(); + auto x2 = x2_t.data_ptr(); + auto y2 = y2_t.data_ptr(); + auto areas = areas_t.data_ptr(); + + std::vector keep; + std::vector > matched; + + for (int64_t _i = 0; _i < ndets; _i++) { + auto i = order[_i]; + if (suppressed[i] == 1) continue; + keep.push_back(i); + std::vector v_i; + auto ix1 = x1[i]; + auto iy1 = y1[i]; + auto ix2 = x2[i]; + auto iy2 = y2[i]; + auto iarea = areas[i]; + + for (int64_t _j = _i + 1; _j < ndets; _j++) { + auto j = order[_j]; + if (suppressed[j] == 1) continue; + auto xx1 = std::max(ix1, x1[j]); + auto yy1 = std::max(iy1, y1[j]); + auto xx2 = std::min(ix2, x2[j]); + auto yy2 = std::min(iy2, y2[j]); + + auto w = std::max(static_cast(0), xx2 - xx1); + auto h = std::max(static_cast(0), yy2 - yy1); + auto inter = w * h; + auto ovr = inter / (iarea + areas[j] - inter); + if (ovr >= iou_threshold) { + suppressed[j] = 1; + v_i.push_back(j); + } + } + matched.push_back(v_i); + } + for (size_t i = 0; i < keep.size(); i++) + matched[i].insert(matched[i].begin(), keep[i]); + return matched; +} + +std::vector > nms_match_impl(Tensor dets, float iou_threshold); +REGISTER_DEVICE_IMPL(nms_match_impl, CPU, nms_match_cpu); diff --git a/mmcv/ops/csrc/pytorch/cpu/nms_rotated.cpp b/mmcv/ops/csrc/pytorch/cpu/nms_rotated.cpp new file mode 100644 index 0000000..223ee1a --- /dev/null +++ b/mmcv/ops/csrc/pytorch/cpu/nms_rotated.cpp @@ -0,0 +1,66 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +// modified from +// https://github.com/facebookresearch/detectron2/blob/master/detectron2/layers/csrc/nms_rotated/nms_rotated_cpu.cpp +#include "box_iou_rotated_utils.hpp" +#include "pytorch_cpp_helper.hpp" + +template +Tensor nms_rotated_cpu_kernel(const Tensor dets, const Tensor scores, + const float iou_threshold) { + // nms_rotated_cpu_kernel is modified from torchvision's nms_cpu_kernel, + // however, the code in this function is much shorter because + // we delegate the IoU computation for rotated boxes to + // the single_box_iou_rotated function in box_iou_rotated_utils.h + AT_ASSERTM(!dets.is_cuda(), "dets must be a CPU tensor"); + AT_ASSERTM(!scores.is_cuda(), "scores must be a CPU tensor"); + AT_ASSERTM(dets.scalar_type() == scores.scalar_type(), + "dets should have the same type as scores"); + + if (dets.numel() == 0) { + return at::empty({0}, dets.options().dtype(at::kLong)); + } + + auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); + + auto ndets = dets.size(0); + Tensor suppressed_t = at::zeros({ndets}, dets.options().dtype(at::kByte)); + Tensor keep_t = at::zeros({ndets}, dets.options().dtype(at::kLong)); + + auto suppressed = suppressed_t.data_ptr(); + auto keep = keep_t.data_ptr(); + auto order = order_t.data_ptr(); + + int64_t num_to_keep = 0; + + for (int64_t _i = 0; _i < ndets; _i++) { + auto i = order[_i]; + if (suppressed[i] == 1) { + continue; + } + + keep[num_to_keep++] = i; + + for (int64_t _j = _i + 1; _j < ndets; _j++) { + auto j = order[_j]; + if (suppressed[j] == 1) { + continue; + } + + auto ovr = single_box_iou_rotated( + dets[i].data_ptr(), dets[j].data_ptr(), 0); + if (ovr >= iou_threshold) { + suppressed[j] = 1; + } + } + } + return keep_t.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep); +} + +Tensor nms_rotated_cpu(const Tensor dets, const Tensor scores, + const float iou_threshold) { + auto result = at::empty({0}, dets.options()); + AT_DISPATCH_FLOATING_TYPES(dets.type(), "nms_rotated", [&] { + result = nms_rotated_cpu_kernel(dets, scores, iou_threshold); + }); + return result; +} diff --git a/mmcv/ops/csrc/pytorch/cpu/pixel_group.cpp b/mmcv/ops/csrc/pytorch/cpu/pixel_group.cpp new file mode 100755 index 0000000..9083281 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/cpu/pixel_group.cpp @@ -0,0 +1,124 @@ +// Copyright (c) OpenMMLab. All rights reserved +// It is modified from https://github.com/WenmuZhou/PAN.pytorch + +#include "pytorch_cpp_helper.hpp" +#include "pytorch_device_registry.hpp" + +std::vector> estimate_confidence(int32_t* label, + float* score, int label_num, + int height, int width) { + std::vector> point_vector; + for (int i = 0; i < label_num; i++) { + std::vector point; + point.push_back(0); + point.push_back(0); + point_vector.push_back(point); + } + for (int y = 0; y < height; y++) { + auto label_tmp = label + y * width; + auto score_tmp = score + y * width; + for (int x = 0; x < width; x++) { + auto l = label_tmp[x]; + if (l > 0) { + float confidence = score_tmp[x]; + point_vector[l].push_back(x); + point_vector[l].push_back(y); + point_vector[l][0] += confidence; + point_vector[l][1] += 1; + } + } + } + for (size_t l = 0; l < point_vector.size(); l++) + if (point_vector[l][1] > 0) { + point_vector[l][0] /= point_vector[l][1]; + } + return point_vector; +} +std::vector> pixel_group_cpu( + Tensor score, Tensor mask, Tensor embedding, Tensor kernel_label, + Tensor kernel_contour, int kernel_region_num, float dis_threshold) { + assert(score.dim() == 2); + assert(mask.dim() == 2); + assert(embedding_dim.dim() == 3); + int height = score.size(0); + int width = score.size(1); + assert(height == mask.size(0) == embedding.size(1) == kernel_label.size(1)); + assert(width == mask.size(1) == embedding.size(2) == kernel_label.size(2)); + + auto threshold_square = dis_threshold * dis_threshold; + auto ptr_score = score.data_ptr(); + auto ptr_mask = mask.data_ptr(); + auto ptr_kernel_contour = kernel_contour.data_ptr(); + auto ptr_embedding = embedding.data_ptr(); + auto ptr_kernel_label = kernel_label.data_ptr(); + std::queue> contour_pixels; + auto embedding_dim = embedding.size(2); + std::vector> kernel_vector( + kernel_region_num, std::vector(embedding_dim + 1, 0)); + + Tensor text_label; + text_label = kernel_label.clone(); + auto ptr_text_label = text_label.data_ptr(); + + for (int i = 0; i < height; i++) { + auto ptr_embedding_tmp = ptr_embedding + i * width * embedding_dim; + auto ptr_kernel_label_tmp = ptr_kernel_label + i * width; + auto ptr_kernel_contour_tmp = ptr_kernel_contour + i * width; + + for (int j = 0, k = 0; j < width && k < width * embedding_dim; + j++, k += embedding_dim) { + int32_t label = ptr_kernel_label_tmp[j]; + if (label > 0) { + for (int d = 0; d < embedding_dim; d++) + kernel_vector[label][d] += ptr_embedding_tmp[k + d]; + kernel_vector[label][embedding_dim] += 1; + // kernel pixel number + if (ptr_kernel_contour_tmp[j]) { + contour_pixels.push(std::make_tuple(i, j, label)); + } + } + } + } + for (int i = 0; i < kernel_region_num; i++) { + for (int j = 0; j < embedding_dim; j++) { + kernel_vector[i][j] /= kernel_vector[i][embedding_dim]; + } + } + int dx[4] = {-1, 1, 0, 0}; + int dy[4] = {0, 0, -1, 1}; + while (!contour_pixels.empty()) { + auto query_pixel = contour_pixels.front(); + contour_pixels.pop(); + int y = std::get<0>(query_pixel); + int x = std::get<1>(query_pixel); + int32_t l = std::get<2>(query_pixel); + auto kernel_cv = kernel_vector[l]; + for (int idx = 0; idx < 4; idx++) { + int tmpy = y + dy[idx]; + int tmpx = x + dx[idx]; + auto ptr_text_label_tmp = ptr_text_label + tmpy * width; + if (tmpy < 0 || tmpy >= height || tmpx < 0 || tmpx >= width) continue; + if (!ptr_mask[tmpy * width + tmpx] || ptr_text_label_tmp[tmpx] > 0) + continue; + + float dis = 0; + auto ptr_embedding_tmp = ptr_embedding + tmpy * width * embedding_dim; + for (size_t i = 0; i < embedding_dim; i++) { + dis += + pow(kernel_cv[i] - ptr_embedding_tmp[tmpx * embedding_dim + i], 2); + // ignore further computing if dis is big enough + if (dis >= threshold_square) break; + } + if (dis >= threshold_square) continue; + contour_pixels.push(std::make_tuple(tmpy, tmpx, l)); + ptr_text_label_tmp[tmpx] = l; + } + } + + return estimate_confidence(ptr_text_label, ptr_score, kernel_region_num, + height, width); +} +std::vector> pixel_group_impl( + Tensor score, Tensor mask, Tensor embedding, Tensor kernel_label, + Tensor kernel_contour, int kernel_region_num, float dis_threshold); +REGISTER_DEVICE_IMPL(pixel_group_impl, CPU, pixel_group_cpu); diff --git a/mmcv/ops/csrc/pytorch/cpu/points_in_boxes.cpp b/mmcv/ops/csrc/pytorch/cpu/points_in_boxes.cpp new file mode 100644 index 0000000..c16baa4 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/cpu/points_in_boxes.cpp @@ -0,0 +1,53 @@ +#include "pytorch_cpp_helper.hpp" + +inline void lidar_to_local_coords_cpu(float shift_x, float shift_y, float rz, + float &local_x, float &local_y) { + float cosa = cos(-rz), sina = sin(-rz); + local_x = shift_x * cosa + shift_y * (-sina); + local_y = shift_x * sina + shift_y * cosa; +} + +inline int check_pt_in_box3d_cpu(const float *pt, const float *box3d, + float &local_x, float &local_y) { + // param pt: (x, y, z) + // param box3d: (cx, cy, cz, x_size, y_size, z_size, rz) in LiDAR coordinate, + // cz in the bottom center + float x = pt[0], y = pt[1], z = pt[2]; + float cx = box3d[0], cy = box3d[1], cz = box3d[2]; + float x_size = box3d[3], y_size = box3d[4], z_size = box3d[5], rz = box3d[6]; + cz += z_size / + 2.0; // shift to the center since cz in box3d is the bottom center + + if (fabsf(z - cz) > z_size / 2.0) return 0; + lidar_to_local_coords_cpu(x - cx, y - cy, rz, local_x, local_y); + float in_flag = (local_x > -x_size / 2.0) & (local_x < x_size / 2.0) & + (local_y > -y_size / 2.0) & (local_y < y_size / 2.0); + return in_flag; +} + +void points_in_boxes_cpu_forward(Tensor boxes_tensor, Tensor pts_tensor, + Tensor pts_indices_tensor) { + // params boxes: (N, 7) [x, y, z, x_size, y_size, z_size, rz] in LiDAR + // coordinate, z is the bottom center, each box DO NOT overlaps params pts: + // (npoints, 3) [x, y, z] in LiDAR coordinate params pts_indices: (N, npoints) + + CHECK_CONTIGUOUS(boxes_tensor); + CHECK_CONTIGUOUS(pts_tensor); + CHECK_CONTIGUOUS(pts_indices_tensor); + + int boxes_num = boxes_tensor.size(0); + int pts_num = pts_tensor.size(0); + + const float *boxes = boxes_tensor.data_ptr(); + const float *pts = pts_tensor.data_ptr(); + int *pts_indices = pts_indices_tensor.data_ptr(); + + float local_x = 0, local_y = 0; + for (int i = 0; i < boxes_num; i++) { + for (int j = 0; j < pts_num; j++) { + int cur_in_flag = + check_pt_in_box3d_cpu(pts + j * 3, boxes + i * 7, local_x, local_y); + pts_indices[i * pts_num + j] = cur_in_flag; + } + } +} diff --git a/mmcv/ops/csrc/pytorch/cpu/psamask.cpp b/mmcv/ops/csrc/pytorch/cpu/psamask.cpp new file mode 100644 index 0000000..aa7fdcb --- /dev/null +++ b/mmcv/ops/csrc/pytorch/cpu/psamask.cpp @@ -0,0 +1,199 @@ +// Copyright (c) OpenMMLab. All rights reserved +// Modified from +// https://github.com/hszhao/semseg/blob/master/lib/psa/src +#include "pytorch_cpp_helper.hpp" +#include "pytorch_device_registry.hpp" + +#ifndef min +#define min(a, b) (((a) < (b)) ? (a) : (b)) +#endif +#ifndef max +#define max(a, b) (((a) > (b)) ? (a) : (b)) +#endif + +void psamask_collect_forward(const int num_, const int h_feature, + const int w_feature, const int h_mask, + const int w_mask, const int half_h_mask, + const int half_w_mask, const Tensor mask_data, + Tensor buffer_data) { + for (int n = 0; n < num_; n++) { + for (int h = 0; h < h_feature; h++) { + for (int w = 0; w < w_feature; w++) { + // effective mask region : [hstart, hend) x [wstart, wend) with + // mask-indexed + const int hstart = max(0, half_h_mask - h); + const int hend = min(h_mask, h_feature + half_h_mask - h); + const int wstart = max(0, half_w_mask - w); + const int wend = min(w_mask, w_feature + half_w_mask - w); + // (hidx, widx ) with mask-indexed + // (hidx + h - half_h_mask, widx + w - half_w_mask) with + // feature-indexed + for (int hidx = hstart; hidx < hend; hidx++) { + for (int widx = wstart; widx < wend; widx++) { + buffer_data.view({-1})[(n * h_feature * w_feature + + (hidx + h - half_h_mask) * w_feature + + (widx + w - half_w_mask)) * + h_feature * w_feature + + h * w_feature + w] = + mask_data.view( + {-1})[((n * h_mask * w_mask + hidx * w_mask + widx) * + h_feature + + h) * + w_feature + + w]; + } + } + } + } + } +} + +void psamask_distribute_forward(const int num_, const int h_feature, + const int w_feature, const int h_mask, + const int w_mask, const int half_h_mask, + const int half_w_mask, const Tensor mask_data, + Tensor buffer_data) { + for (int n = 0; n < num_; n++) { + for (int h = 0; h < h_feature; h++) { + for (int w = 0; w < w_feature; w++) { + // effective mask region : [hstart, hend) x [wstart, wend) with + // mask-indexed + const int hstart = max(0, half_h_mask - h); + const int hend = min(h_mask, h_feature + half_h_mask - h); + const int wstart = max(0, half_w_mask - w); + const int wend = min(w_mask, w_feature + half_w_mask - w); + // (hidx, widx ) with mask-indexed + // (hidx + h - half_h_mask, widx + w - half_w_mask) with + // feature-indexed + for (int hidx = hstart; hidx < hend; hidx++) { + for (int widx = wstart; widx < wend; widx++) { + buffer_data.view( + {-1})[(n * h_feature * w_feature + h * w_feature + w) * + h_feature * w_feature + + (hidx + h - half_h_mask) * w_feature + + (widx + w - half_w_mask)] = + mask_data.view( + {-1})[((n * h_mask * w_mask + hidx * w_mask + widx) * + h_feature + + h) * + w_feature + + w]; + } + } + } + } + } +} + +void psamask_collect_backward(const int num_, const int h_feature, + const int w_feature, const int h_mask, + const int w_mask, const int half_h_mask, + const int half_w_mask, const Tensor buffer_diff, + Tensor mask_diff) { + for (int n = 0; n < num_; n++) { + for (int h = 0; h < h_feature; h++) { + for (int w = 0; w < w_feature; w++) { + // effective mask region : [hstart, hend) x [wstart, wend) with + // mask-indexed + const int hstart = max(0, half_h_mask - h); + const int hend = min(h_mask, h_feature + half_h_mask - h); + const int wstart = max(0, half_w_mask - w); + const int wend = min(w_mask, w_feature + half_w_mask - w); + // (hidx, widx ) with mask-indexed + // (hidx + h - half_h_mask, widx + w - half_w_mask) with + // feature-indexed + for (int hidx = hstart; hidx < hend; hidx++) { + for (int widx = wstart; widx < wend; widx++) { + mask_diff.view({-1})[((n * h_mask * w_mask + hidx * w_mask + widx) * + h_feature + + h) * + w_feature + + w] = + buffer_diff.view({-1})[(n * h_feature * w_feature + + (hidx + h - half_h_mask) * w_feature + + (widx + w - half_w_mask)) * + h_feature * w_feature + + h * w_feature + w]; + } + } + } + } + } +} + +void psamask_distribute_backward(const int num_, const int h_feature, + const int w_feature, const int h_mask, + const int w_mask, const int half_h_mask, + const int half_w_mask, + const Tensor buffer_diff, Tensor mask_diff) { + for (int n = 0; n < num_; n++) { + for (int h = 0; h < h_feature; h++) { + for (int w = 0; w < w_feature; w++) { + // effective mask region : [hstart, hend) x [wstart, wend) with + // mask-indexed + const int hstart = max(0, half_h_mask - h); + const int hend = min(h_mask, h_feature + half_h_mask - h); + const int wstart = max(0, half_w_mask - w); + const int wend = min(w_mask, w_feature + half_w_mask - w); + // (hidx, widx ) with mask-indexed + // (hidx + h - half_h_mask, widx + w - half_w_mask) with + // feature-indexed + for (int hidx = hstart; hidx < hend; hidx++) { + for (int widx = wstart; widx < wend; widx++) { + mask_diff.view({-1})[((n * h_mask * w_mask + hidx * w_mask + widx) * + h_feature + + h) * + w_feature + + w] = + buffer_diff.view( + {-1})[(n * h_feature * w_feature + h * w_feature + w) * + h_feature * w_feature + + (hidx + h - half_h_mask) * w_feature + + (widx + w - half_w_mask)]; + } + } + } + } + } +} + +void psamask_forward_cpu(const int psa_type, const Tensor input, Tensor output, + const int num_, const int h_feature, + const int w_feature, const int h_mask, + const int w_mask, const int half_h_mask, + const int half_w_mask) { + if (psa_type == 0) + psamask_collect_forward(num_, h_feature, w_feature, h_mask, w_mask, + half_h_mask, half_w_mask, input, output); + else + psamask_distribute_forward(num_, h_feature, w_feature, h_mask, w_mask, + half_h_mask, half_w_mask, input, output); +} + +void psamask_backward_cpu(const int psa_type, const Tensor grad_output, + Tensor grad_input, const int num_, + const int h_feature, const int w_feature, + const int h_mask, const int w_mask, + const int half_h_mask, const int half_w_mask) { + if (psa_type == 0) + psamask_collect_backward(num_, h_feature, w_feature, h_mask, w_mask, + half_h_mask, half_w_mask, grad_output, grad_input); + else + psamask_distribute_backward(num_, h_feature, w_feature, h_mask, w_mask, + half_h_mask, half_w_mask, grad_output, + grad_input); +} + +void psamask_forward_impl(const int psa_type, const Tensor input, Tensor output, + const int num_, const int h_feature, + const int w_feature, const int h_mask, + const int w_mask, const int half_h_mask, + const int half_w_mask); + +void psamask_backward_impl(const int psa_type, const Tensor grad_output, + Tensor grad_input, const int num_, + const int h_feature, const int w_feature, + const int h_mask, const int w_mask, + const int half_h_mask, const int half_w_mask); +REGISTER_DEVICE_IMPL(psamask_forward_impl, CPU, psamask_forward_cpu); +REGISTER_DEVICE_IMPL(psamask_backward_impl, CPU, psamask_backward_cpu); diff --git a/mmcv/ops/csrc/pytorch/cpu/roi_align.cpp b/mmcv/ops/csrc/pytorch/cpu/roi_align.cpp new file mode 100644 index 0000000..d545390 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/cpu/roi_align.cpp @@ -0,0 +1,466 @@ +// Modified from +// https://github.com/facebookresearch/detectron2/tree/master/detectron2/layers/csrc/ROIAlign +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +#include +#include + +#include "pytorch_cpp_helper.hpp" +#include "pytorch_device_registry.hpp" + +// implementation taken from Caffe2 +template +struct PreCalc { + int pos1; + int pos2; + int pos3; + int pos4; + T w1; + T w2; + T w3; + T w4; +}; + +template +void pre_calc_for_bilinear_interpolate( + const int height, const int width, const int pooled_height, + const int pooled_width, const int iy_upper, const int ix_upper, + T roi_start_h, T roi_start_w, T bin_size_h, T bin_size_w, + int roi_bin_grid_h, int roi_bin_grid_w, std::vector>& pre_calc) { + int pre_calc_index = 0; + for (int ph = 0; ph < pooled_height; ph++) { + for (int pw = 0; pw < pooled_width; pw++) { + for (int iy = 0; iy < iy_upper; iy++) { + const T yy = roi_start_h + ph * bin_size_h + + static_cast(iy + .5f) * bin_size_h / + static_cast(roi_bin_grid_h); // e.g., 0.5, 1.5 + for (int ix = 0; ix < ix_upper; ix++) { + const T xx = roi_start_w + pw * bin_size_w + + static_cast(ix + .5f) * bin_size_w / + static_cast(roi_bin_grid_w); + + T x = xx; + T y = yy; + // deal with: inverse elements are out of feature map boundary + if (y < -1.0 || y > height || x < -1.0 || x > width) { + // empty + PreCalc pc; + pc.pos1 = 0; + pc.pos2 = 0; + pc.pos3 = 0; + pc.pos4 = 0; + pc.w1 = 0; + pc.w2 = 0; + pc.w3 = 0; + pc.w4 = 0; + pre_calc[pre_calc_index] = pc; + pre_calc_index += 1; + continue; + } + + if (y <= 0) { + y = 0; + } + if (x <= 0) { + x = 0; + } + + int y_low = (int)y; + int x_low = (int)x; + int y_high; + int x_high; + + if (y_low >= height - 1) { + y_high = y_low = height - 1; + y = (T)y_low; + } else { + y_high = y_low + 1; + } + + if (x_low >= width - 1) { + x_high = x_low = width - 1; + x = (T)x_low; + } else { + x_high = x_low + 1; + } + + T ly = y - y_low; + T lx = x - x_low; + T hy = 1. - ly, hx = 1. - lx; + T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; + + // save weights and indices + PreCalc pc; + pc.pos1 = y_low * width + x_low; + pc.pos2 = y_low * width + x_high; + pc.pos3 = y_high * width + x_low; + pc.pos4 = y_high * width + x_high; + pc.w1 = w1; + pc.w2 = w2; + pc.w3 = w3; + pc.w4 = w4; + pre_calc[pre_calc_index] = pc; + + pre_calc_index += 1; + } + } + } + } +} + +template +void ROIAlignForward(const int nthreads, const T* input, const T* rois, + T* output, T* argmax_y, T* argmax_x, + const int pooled_height, const int pooled_width, + const T spatial_scale, const int sampling_ratio, + const int pool_mode, // 0 - max pool, 1 - avg pool + const bool aligned, const int channels, const int height, + const int width) { + int n_rois = nthreads / channels / pooled_width / pooled_height; + // (n, c, ph, pw) is an element in the pooled output + // can be parallelized using omp + // #pragma omp parallel for num_threads(32) + for (int n = 0; n < n_rois; n++) { + int index_n = n * channels * pooled_width * pooled_height; + + const T* offset_rois = rois + n * 5; + int roi_batch_ind = offset_rois[0]; + + // Do not use rounding; this implementation detail is critical + T offset = aligned ? (T)0.5 : (T)0.0; + T roi_start_w = offset_rois[1] * spatial_scale - offset; + T roi_start_h = offset_rois[2] * spatial_scale - offset; + T roi_end_w = offset_rois[3] * spatial_scale - offset; + T roi_end_h = offset_rois[4] * spatial_scale - offset; + + T roi_width = roi_end_w - roi_start_w; + T roi_height = roi_end_h - roi_start_h; + if (aligned) { + AT_ASSERTM(roi_width >= 0 && roi_height >= 0, + "ROIs in ROIAlign cannot have non-negative size!"); + } else { // for backward-compatibility only + roi_width = std::max(roi_width, (T)1.); + roi_height = std::max(roi_height, (T)1.); + } + T bin_size_h = static_cast(roi_height) / static_cast(pooled_height); + T bin_size_w = static_cast(roi_width) / static_cast(pooled_width); + + // We use roi_bin_grid to sample the grid and mimic integral + int roi_bin_grid_h = (sampling_ratio > 0) + ? sampling_ratio + : ceilf(roi_height / pooled_height); // e.g., = 2 + int roi_bin_grid_w = + (sampling_ratio > 0) ? sampling_ratio : ceilf(roi_width / pooled_width); + + // When the grid is empty, output zeros == 0/1, instead of NaN. + const T count = std::max(roi_bin_grid_h * roi_bin_grid_w, 1); // e.g. = 4 + + // we want to precalculate indices and weights shared by all channels, + // this is the key point of optimization + std::vector> pre_calc(roi_bin_grid_h * roi_bin_grid_w * + pooled_width * pooled_height); + pre_calc_for_bilinear_interpolate( + height, width, pooled_height, pooled_width, roi_bin_grid_h, + roi_bin_grid_w, roi_start_h, roi_start_w, bin_size_h, bin_size_w, + roi_bin_grid_h, roi_bin_grid_w, pre_calc); + + for (int c = 0; c < channels; c++) { + int index_n_c = index_n + c * pooled_width * pooled_height; + const T* offset_input = + input + (roi_batch_ind * channels + c) * height * width; + int pre_calc_index = 0; + + for (int ph = 0; ph < pooled_height; ph++) { + for (int pw = 0; pw < pooled_width; pw++) { + int index = index_n_c + ph * pooled_width + pw; + + T output_val = 0.; + T maxval = -10000; + T maxidx_y = -1.f, maxidx_x = -1.f; + for (int iy = 0; iy < roi_bin_grid_h; iy++) { + const T y = roi_start_h + ph * bin_size_h + + static_cast(iy + .5f) * bin_size_h / + static_cast(roi_bin_grid_h); + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + const T x = roi_start_w + pw * bin_size_w + + static_cast(ix + .5f) * bin_size_w / + static_cast(roi_bin_grid_w); + PreCalc pc = pre_calc[pre_calc_index]; + T val = pc.w1 * offset_input[pc.pos1] + + pc.w2 * offset_input[pc.pos2] + + pc.w3 * offset_input[pc.pos3] + + pc.w4 * offset_input[pc.pos4]; + if (val > maxval) { + maxval = val; + maxidx_y = y; + maxidx_x = x; + } + output_val += val; + pre_calc_index += 1; + } + } + if (pool_mode == 0) { + // We do max pooling inside a bin + output[index] = maxval; + argmax_y[index] = maxidx_y; + argmax_x[index] = maxidx_x; + } else if (pool_mode == 1) { + // We do average (integral) pooling inside a bin + output[index] = output_val / count; + } // if + } // for pw + } // for ph + } // for c + } // for n +} + +template +void bilinear_interpolate_gradient(const int height, const int width, T y, T x, + T& w1, T& w2, T& w3, T& w4, int& x_low, + int& x_high, int& y_low, int& y_high, + const int index /* index for debug only*/) { + // deal with cases that inverse elements are out of feature map boundary + if (y < -1.0 || y > height || x < -1.0 || x > width) { + // empty + w1 = w2 = w3 = w4 = 0.; + x_low = x_high = y_low = y_high = -1; + return; + } + + if (y <= 0) y = 0; + if (x <= 0) x = 0; + + y_low = (int)y; + x_low = (int)x; + + if (y_low >= height - 1) { + y_high = y_low = height - 1; + y = (T)y_low; + } else { + y_high = y_low + 1; + } + + if (x_low >= width - 1) { + x_high = x_low = width - 1; + x = (T)x_low; + } else { + x_high = x_low + 1; + } + + T ly = y - y_low; + T lx = x - x_low; + T hy = 1. - ly, hx = 1. - lx; + + // reference in forward + // T v1 = input[y_low * width + x_low]; + // T v2 = input[y_low * width + x_high]; + // T v3 = input[y_high * width + x_low]; + // T v4 = input[y_high * width + x_high]; + // T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + + w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; + + return; +} + +template +inline void add(T* address, const T& val) { + *address += val; +} + +template +void ROIAlignBackward(const int nthreads, const T* grad_output, const T* rois, + const T* argmax_y, const T* argmax_x, T* grad_input, + const int pooled_height, const int pooled_width, + const T spatial_scale, const int sampling_ratio, + const int pool_mode, // 0 - max pool, 1 - avg pool + const bool aligned, const int channels, const int height, + const int width, const int n_stride, const int c_stride, + const int h_stride, const int w_stride) { + for (int index = 0; index < nthreads; index++) { + // (n, c, ph, pw) is an element in the pooled output + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + + const T* offset_rois = rois + n * 5; + int roi_batch_ind = offset_rois[0]; + + // Do not use rounding; this implementation detail is critical + T offset = aligned ? (T)0.5 : (T)0.0; + T roi_start_w = offset_rois[1] * spatial_scale - offset; + T roi_start_h = offset_rois[2] * spatial_scale - offset; + T roi_end_w = offset_rois[3] * spatial_scale - offset; + T roi_end_h = offset_rois[4] * spatial_scale - offset; + + T roi_width = roi_end_w - roi_start_w; + T roi_height = roi_end_h - roi_start_h; + if (aligned) { + AT_ASSERTM(roi_width >= 0 && roi_height >= 0, + "ROIs in ROIAlign do not have non-negative size!"); + } else { // for backward-compatibility only + roi_width = std::max(roi_width, (T)1.); + roi_height = std::max(roi_height, (T)1.); + } + T bin_size_h = static_cast(roi_height) / static_cast(pooled_height); + T bin_size_w = static_cast(roi_width) / static_cast(pooled_width); + + T* offset_grad_input = + grad_input + ((roi_batch_ind * channels + c) * height * width); + + int output_offset = n * n_stride + c * c_stride; + const T* offset_grad_output = grad_output + output_offset; + const T grad_output_this_bin = + offset_grad_output[ph * h_stride + pw * w_stride]; + + if (pool_mode == 0) { + // We do max pooling inside a bin + T y = argmax_y[index], x = argmax_x[index]; + if (y != -1.f) { + T w1, w2, w3, w4; + int x_low, x_high, y_low, y_high; + bilinear_interpolate_gradient(height, width, y, x, w1, w2, w3, w4, + x_low, x_high, y_low, y_high, index); + + T g1 = grad_output_this_bin * w1; + T g2 = grad_output_this_bin * w2; + T g3 = grad_output_this_bin * w3; + T g4 = grad_output_this_bin * w4; + + if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { + // atomic add is not needed for now since it is single threaded + add(offset_grad_input + y_low * width + x_low, static_cast(g1)); + add(offset_grad_input + y_low * width + x_high, static_cast(g2)); + add(offset_grad_input + y_high * width + x_low, static_cast(g3)); + add(offset_grad_input + y_high * width + x_high, static_cast(g4)); + } // if + } // mode + } else if (pool_mode == 1) { + // We do average (integral) pooling inside a bin + // We use roi_bin_grid to sample the grid and mimic integral + int roi_bin_grid_h = + (sampling_ratio > 0) + ? sampling_ratio + : ceilf(roi_height / pooled_height); // e.g., = 2 + int roi_bin_grid_w = (sampling_ratio > 0) + ? sampling_ratio + : ceilf(roi_width / pooled_width); + + const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 + for (int iy = 0; iy < roi_bin_grid_h; iy++) { + const T y = roi_start_h + ph * bin_size_h + + static_cast(iy + .5f) * bin_size_h / + static_cast(roi_bin_grid_h); // e.g., 0.5, 1.5 + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + const T x = roi_start_w + pw * bin_size_w + + static_cast(ix + .5f) * bin_size_w / + static_cast(roi_bin_grid_w); + + T w1, w2, w3, w4; + int x_low, x_high, y_low, y_high; + + bilinear_interpolate_gradient(height, width, y, x, w1, w2, w3, w4, + x_low, x_high, y_low, y_high, index); + + T g1 = grad_output_this_bin * w1 / count; + T g2 = grad_output_this_bin * w2 / count; + T g3 = grad_output_this_bin * w3 / count; + T g4 = grad_output_this_bin * w4 / count; + + if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { + // atomic add is not needed for now since it is single threaded + add(offset_grad_input + y_low * width + x_low, static_cast(g1)); + add(offset_grad_input + y_low * width + x_high, static_cast(g2)); + add(offset_grad_input + y_high * width + x_low, static_cast(g3)); + add(offset_grad_input + y_high * width + x_high, + static_cast(g4)); + } // if + } // ix + } // iy + } // mode + } // for +} // ROIAlignBackward + +void ROIAlignForwardCPULauncher(Tensor input, Tensor rois, Tensor output, + Tensor argmax_y, Tensor argmax_x, + int aligned_height, int aligned_width, + float spatial_scale, int sampling_ratio, + int pool_mode, bool aligned) { + int output_size = output.numel(); + int channels = input.size(1); + int height = input.size(2); + int width = input.size(3); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + input.scalar_type(), "ROIAlign_forward", [&] { + ROIAlignForward( + output_size, input.data_ptr(), rois.data_ptr(), + output.data_ptr(), argmax_y.data_ptr(), + argmax_x.data_ptr(), aligned_height, aligned_width, + static_cast(spatial_scale), sampling_ratio, pool_mode, + aligned, channels, height, width); + }); +} + +void ROIAlignBackwardCPULauncher(Tensor grad_output, Tensor rois, + Tensor argmax_y, Tensor argmax_x, + Tensor grad_input, int aligned_height, + int aligned_width, float spatial_scale, + int sampling_ratio, int pool_mode, + bool aligned) { + int output_size = grad_output.numel(); + int channels = grad_input.size(1); + int height = grad_input.size(2); + int width = grad_input.size(3); + + // get stride values to ensure indexing into gradients is correct. + int n_stride = grad_output.stride(0); + int c_stride = grad_output.stride(1); + int h_stride = grad_output.stride(2); + int w_stride = grad_output.stride(3); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + grad_output.scalar_type(), "ROIAlign_backward", [&] { + ROIAlignBackward( + output_size, grad_output.data_ptr(), + rois.data_ptr(), argmax_y.data_ptr(), + argmax_x.data_ptr(), grad_input.data_ptr(), + aligned_height, aligned_width, static_cast(spatial_scale), + sampling_ratio, pool_mode, aligned, channels, height, width, + n_stride, c_stride, h_stride, w_stride); + }); +} + +void roi_align_forward_cpu(Tensor input, Tensor rois, Tensor output, + Tensor argmax_y, Tensor argmax_x, int aligned_height, + int aligned_width, float spatial_scale, + int sampling_ratio, int pool_mode, bool aligned) { + ROIAlignForwardCPULauncher(input, rois, output, argmax_y, argmax_x, + aligned_height, aligned_width, spatial_scale, + sampling_ratio, pool_mode, aligned); +} + +void roi_align_backward_cpu(Tensor grad_output, Tensor rois, Tensor argmax_y, + Tensor argmax_x, Tensor grad_input, + int aligned_height, int aligned_width, + float spatial_scale, int sampling_ratio, + int pool_mode, bool aligned) { + ROIAlignBackwardCPULauncher(grad_output, rois, argmax_y, argmax_x, grad_input, + aligned_height, aligned_width, spatial_scale, + sampling_ratio, pool_mode, aligned); +} + +void roi_align_forward_impl(Tensor input, Tensor rois, Tensor output, + Tensor argmax_y, Tensor argmax_x, + int aligned_height, int aligned_width, + float spatial_scale, int sampling_ratio, + int pool_mode, bool aligned); + +void roi_align_backward_impl(Tensor grad_output, Tensor rois, Tensor argmax_y, + Tensor argmax_x, Tensor grad_input, + int aligned_height, int aligned_width, + float spatial_scale, int sampling_ratio, + int pool_mode, bool aligned); + +REGISTER_DEVICE_IMPL(roi_align_forward_impl, CPU, roi_align_forward_cpu); +REGISTER_DEVICE_IMPL(roi_align_backward_impl, CPU, roi_align_backward_cpu); diff --git a/mmcv/ops/csrc/pytorch/cpu/roi_align_rotated.cpp b/mmcv/ops/csrc/pytorch/cpu/roi_align_rotated.cpp new file mode 100644 index 0000000..0f7511b --- /dev/null +++ b/mmcv/ops/csrc/pytorch/cpu/roi_align_rotated.cpp @@ -0,0 +1,458 @@ +// Modified from +// https://github.com/facebookresearch/detectron2/tree/master/detectron2/layers/csrc/ROIAlignRotated +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +#include +#include + +#include "pytorch_cpp_helper.hpp" +#include "pytorch_device_registry.hpp" + +// implementation taken from Caffe2 +template +struct PreCalc { + int pos1; + int pos2; + int pos3; + int pos4; + T w1; + T w2; + T w3; + T w4; +}; + +template +void pre_calc_for_bilinear_interpolate( + const int height, const int width, const int pooled_height, + const int pooled_width, const int iy_upper, const int ix_upper, + T roi_start_h, T roi_start_w, T bin_size_h, T bin_size_w, + int roi_bin_grid_h, int roi_bin_grid_w, T roi_center_h, T roi_center_w, + T cos_theta, T sin_theta, std::vector>& pre_calc) { + int pre_calc_index = 0; + for (int ph = 0; ph < pooled_height; ph++) { + for (int pw = 0; pw < pooled_width; pw++) { + for (int iy = 0; iy < iy_upper; iy++) { + const T yy = roi_start_h + ph * bin_size_h + + static_cast(iy + .5f) * bin_size_h / + static_cast(roi_bin_grid_h); // e.g., 0.5, 1.5 + for (int ix = 0; ix < ix_upper; ix++) { + const T xx = roi_start_w + pw * bin_size_w + + static_cast(ix + .5f) * bin_size_w / + static_cast(roi_bin_grid_w); + + // Rotate by theta around the center and translate + // In image space, (y, x) is the order for Right Handed System, + // and this is essentially multiplying the point by a rotation matrix + // to rotate it counterclockwise through angle theta. + T y = yy * cos_theta - xx * sin_theta + roi_center_h; + T x = yy * sin_theta + xx * cos_theta + roi_center_w; + // deal with: inverse elements are out of feature map boundary + if (y < -1.0 || y > height || x < -1.0 || x > width) { + // empty + PreCalc pc; + pc.pos1 = 0; + pc.pos2 = 0; + pc.pos3 = 0; + pc.pos4 = 0; + pc.w1 = 0; + pc.w2 = 0; + pc.w3 = 0; + pc.w4 = 0; + pre_calc[pre_calc_index] = pc; + pre_calc_index += 1; + continue; + } + + if (y < 0) { + y = 0; + } + if (x < 0) { + x = 0; + } + + int y_low = (int)y; + int x_low = (int)x; + int y_high; + int x_high; + + if (y_low >= height - 1) { + y_high = y_low = height - 1; + y = (T)y_low; + } else { + y_high = y_low + 1; + } + + if (x_low >= width - 1) { + x_high = x_low = width - 1; + x = (T)x_low; + } else { + x_high = x_low + 1; + } + + T ly = y - y_low; + T lx = x - x_low; + T hy = 1. - ly, hx = 1. - lx; + T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; + + // save weights and indices + PreCalc pc; + pc.pos1 = y_low * width + x_low; + pc.pos2 = y_low * width + x_high; + pc.pos3 = y_high * width + x_low; + pc.pos4 = y_high * width + x_high; + pc.w1 = w1; + pc.w2 = w2; + pc.w3 = w3; + pc.w4 = w4; + pre_calc[pre_calc_index] = pc; + + pre_calc_index += 1; + } + } + } + } +} + +template +void ROIAlignRotatedForward(const int nthreads, const T* input, + const T& spatial_scale, const bool aligned, + const bool clockwise, const int channels, + const int height, const int width, + const int pooled_height, const int pooled_width, + const int sampling_ratio, const T* rois, + T* output) { + int n_rois = nthreads / channels / pooled_width / pooled_height; + // (n, c, ph, pw) is an element in the pooled output + // can be parallelized using omp + // #pragma omp parallel for num_threads(32) + for (int n = 0; n < n_rois; n++) { + int index_n = n * channels * pooled_width * pooled_height; + + const T* current_roi = rois + n * 6; + int roi_batch_ind = current_roi[0]; + + // Do not use rounding; this implementation detail is critical + T offset = aligned ? (T)0.5 : (T)0.0; + T roi_center_w = current_roi[1] * spatial_scale - offset; + T roi_center_h = current_roi[2] * spatial_scale - offset; + T roi_width = current_roi[3] * spatial_scale; + T roi_height = current_roi[4] * spatial_scale; + T theta = current_roi[5]; + if (clockwise) { + theta = -theta; // If clockwise, the angle needs to be reversed. + } + T cos_theta = cos(theta); + T sin_theta = sin(theta); + + if (aligned) { + AT_ASSERTM(roi_width >= 0 && roi_height >= 0, + "ROIs in ROIAlignRotated do not have non-negative size!"); + } else { // for backward-compatibility only + roi_width = std::max(roi_width, (T)1.); + roi_height = std::max(roi_height, (T)1.); + } + + T bin_size_h = static_cast(roi_height) / static_cast(pooled_height); + T bin_size_w = static_cast(roi_width) / static_cast(pooled_width); + + // We use roi_bin_grid to sample the grid and mimic integral + int roi_bin_grid_h = (sampling_ratio > 0) + ? sampling_ratio + : ceilf(roi_height / pooled_height); // e.g., = 2 + int roi_bin_grid_w = + (sampling_ratio > 0) ? sampling_ratio : ceilf(roi_width / pooled_width); + + // We do average (integral) pooling inside a bin + const T count = std::max(roi_bin_grid_h * roi_bin_grid_w, 1); // e.g. = 4 + + // we want to precalculate indices and weights shared by all channels, + // this is the key point of optimization + std::vector> pre_calc(roi_bin_grid_h * roi_bin_grid_w * + pooled_width * pooled_height); + + // roi_start_h and roi_start_w are computed wrt the center of RoI (x, y). + // Appropriate translation needs to be applied after. + T roi_start_h = -roi_height / 2.0; + T roi_start_w = -roi_width / 2.0; + + pre_calc_for_bilinear_interpolate( + height, width, pooled_height, pooled_width, roi_bin_grid_h, + roi_bin_grid_w, roi_start_h, roi_start_w, bin_size_h, bin_size_w, + roi_bin_grid_h, roi_bin_grid_w, roi_center_h, roi_center_w, cos_theta, + sin_theta, pre_calc); + + for (int c = 0; c < channels; c++) { + int index_n_c = index_n + c * pooled_width * pooled_height; + const T* offset_input = + input + (roi_batch_ind * channels + c) * height * width; + int pre_calc_index = 0; + + for (int ph = 0; ph < pooled_height; ph++) { + for (int pw = 0; pw < pooled_width; pw++) { + int index = index_n_c + ph * pooled_width + pw; + + T output_val = 0.; + for (int iy = 0; iy < roi_bin_grid_h; iy++) { + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + PreCalc pc = pre_calc[pre_calc_index]; + output_val += pc.w1 * offset_input[pc.pos1] + + pc.w2 * offset_input[pc.pos2] + + pc.w3 * offset_input[pc.pos3] + + pc.w4 * offset_input[pc.pos4]; + + pre_calc_index += 1; + } + } + output_val /= count; + + output[index] = output_val; + } // for pw + } // for ph + } // for c + } // for n +} + +template +void bilinear_interpolate_gradient(const int height, const int width, T y, T x, + T& w1, T& w2, T& w3, T& w4, int& x_low, + int& x_high, int& y_low, int& y_high) { + // deal with cases that inverse elements are out of feature map boundary + if (y < -1.0 || y > height || x < -1.0 || x > width) { + // empty + w1 = w2 = w3 = w4 = 0.; + x_low = x_high = y_low = y_high = -1; + return; + } + + if (y < 0) { + y = 0; + } + + if (x < 0) { + x = 0; + } + + y_low = (int)y; + x_low = (int)x; + + if (y_low >= height - 1) { + y_high = y_low = height - 1; + y = (T)y_low; + } else { + y_high = y_low + 1; + } + + if (x_low >= width - 1) { + x_high = x_low = width - 1; + x = (T)x_low; + } else { + x_high = x_low + 1; + } + + T ly = y - y_low; + T lx = x - x_low; + T hy = 1. - ly, hx = 1. - lx; + + // reference in forward + // T v1 = input[y_low * width + x_low]; + // T v2 = input[y_low * width + x_high]; + // T v3 = input[y_high * width + x_low]; + // T v4 = input[y_high * width + x_high]; + // T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + + w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; + + return; +} + +template +inline void add(T* address, const T& val) { + *address += val; +} + +template +void ROIAlignRotatedBackward( + const int nthreads, + // may not be contiguous. should index using n_stride, etc + const T* grad_output, const T& spatial_scale, const bool aligned, + const bool clockwise, const int channels, const int height, const int width, + const int pooled_height, const int pooled_width, const int sampling_ratio, + T* grad_input, const T* rois, const int n_stride, const int c_stride, + const int h_stride, const int w_stride) { + for (int index = 0; index < nthreads; index++) { + // (n, c, ph, pw) is an element in the pooled output + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + + const T* current_roi = rois + n * 6; + int roi_batch_ind = current_roi[0]; + + // Do not use rounding; this implementation detail is critical + T offset = aligned ? (T)0.5 : (T)0.0; + T roi_center_w = current_roi[1] * spatial_scale - offset; + T roi_center_h = current_roi[2] * spatial_scale - offset; + T roi_width = current_roi[3] * spatial_scale; + T roi_height = current_roi[4] * spatial_scale; + T theta = current_roi[5]; + if (clockwise) { + theta = -theta; // If clockwise, the angle needs to be reversed. + } + T cos_theta = cos(theta); + T sin_theta = sin(theta); + + if (aligned) { + AT_ASSERTM(roi_width >= 0 && roi_height >= 0, + "ROIs in ROIAlignRotated do not have non-negative size!"); + } else { // for backward-compatibility only + roi_width = std::max(roi_width, (T)1.); + roi_height = std::max(roi_height, (T)1.); + } + + T bin_size_h = static_cast(roi_height) / static_cast(pooled_height); + T bin_size_w = static_cast(roi_width) / static_cast(pooled_width); + + T* offset_grad_input = + grad_input + ((roi_batch_ind * channels + c) * height * width); + + int output_offset = n * n_stride + c * c_stride; + const T* offset_grad_output = grad_output + output_offset; + const T grad_output_this_bin = + offset_grad_output[ph * h_stride + pw * w_stride]; + + // We use roi_bin_grid to sample the grid and mimic integral + int roi_bin_grid_h = (sampling_ratio > 0) + ? sampling_ratio + : ceilf(roi_height / pooled_height); // e.g., = 2 + int roi_bin_grid_w = + (sampling_ratio > 0) ? sampling_ratio : ceilf(roi_width / pooled_width); + + // roi_start_h and roi_start_w are computed wrt the center of RoI (x, y). + // Appropriate translation needs to be applied after. + T roi_start_h = -roi_height / 2.0; + T roi_start_w = -roi_width / 2.0; + + // We do average (integral) pooling inside a bin + const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 + + for (int iy = 0; iy < roi_bin_grid_h; iy++) { + const T yy = roi_start_h + ph * bin_size_h + + static_cast(iy + .5f) * bin_size_h / + static_cast(roi_bin_grid_h); // e.g., 0.5, 1.5 + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + const T xx = roi_start_w + pw * bin_size_w + + static_cast(ix + .5f) * bin_size_w / + static_cast(roi_bin_grid_w); + + // Rotate by theta around the center and translate + T y = yy * cos_theta - xx * sin_theta + roi_center_h; + T x = yy * sin_theta + xx * cos_theta + roi_center_w; + + T w1, w2, w3, w4; + int x_low, x_high, y_low, y_high; + + bilinear_interpolate_gradient(height, width, y, x, w1, w2, w3, w4, + x_low, x_high, y_low, y_high); + + T g1 = grad_output_this_bin * w1 / count; + T g2 = grad_output_this_bin * w2 / count; + T g3 = grad_output_this_bin * w3 / count; + T g4 = grad_output_this_bin * w4 / count; + + if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { + // atomic add is not needed for now since it is single threaded + add(offset_grad_input + y_low * width + x_low, static_cast(g1)); + add(offset_grad_input + y_low * width + x_high, static_cast(g2)); + add(offset_grad_input + y_high * width + x_low, static_cast(g3)); + add(offset_grad_input + y_high * width + x_high, static_cast(g4)); + } // if + } // ix + } // iy + } // for +} // ROIAlignRotatedBackward + +void ROIAlignRotatedForwardCPULauncher(Tensor input, Tensor rois, Tensor output, + int aligned_height, int aligned_width, + float spatial_scale, int sampling_ratio, + bool aligned, bool clockwise) { + int output_size = output.numel(); + int channels = input.size(1); + int height = input.size(2); + int width = input.size(3); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + input.scalar_type(), "ROIAlignRotated_forward", [&] { + ROIAlignRotatedForward( + output_size, input.data_ptr(), + static_cast(spatial_scale), aligned, clockwise, channels, + height, width, aligned_height, aligned_width, sampling_ratio, + rois.data_ptr(), output.data_ptr()); + }); +} + +void ROIAlignRotatedBackwardCPULauncher(Tensor grad_output, Tensor rois, + Tensor grad_input, int aligned_height, + int aligned_width, float spatial_scale, + int sampling_ratio, bool aligned, + bool clockwise) { + int output_size = grad_output.numel(); + int channels = grad_input.size(1); + int height = grad_input.size(2); + int width = grad_input.size(3); + + // get stride values to ensure indexing into gradients is correct. + int n_stride = grad_output.stride(0); + int c_stride = grad_output.stride(1); + int h_stride = grad_output.stride(2); + int w_stride = grad_output.stride(3); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + grad_output.scalar_type(), "ROIAlignRotated_backward", [&] { + ROIAlignRotatedBackward( + grad_output.numel(), grad_output.data_ptr(), + static_cast(spatial_scale), aligned, clockwise, channels, + height, width, aligned_height, aligned_width, sampling_ratio, + grad_input.data_ptr(), rois.data_ptr(), + n_stride, c_stride, h_stride, w_stride); + }); +} + +void roi_align_rotated_forward_cpu(Tensor input, Tensor rois, Tensor output, + int aligned_height, int aligned_width, + float spatial_scale, int sampling_ratio, + bool aligned, bool clockwise) { + ROIAlignRotatedForwardCPULauncher(input, rois, output, aligned_height, + aligned_width, spatial_scale, + sampling_ratio, aligned, clockwise); +} + +void roi_align_rotated_backward_cpu(Tensor top_grad, Tensor rois, + Tensor bottom_grad, int aligned_height, + int aligned_width, float spatial_scale, + int sampling_ratio, bool aligned, + bool clockwise) { + // Number of ROIs + int num_rois = rois.size(0); + int size_rois = rois.size(1); + if (size_rois != 6) { + AT_ERROR("wrong roi size"); + } + ROIAlignRotatedBackwardCPULauncher( + top_grad, rois, bottom_grad, aligned_height, aligned_width, spatial_scale, + sampling_ratio, aligned, clockwise); +} + +void roi_align_rotated_forward_impl(Tensor features, Tensor rois, Tensor output, + int aligned_height, int aligned_width, + float spatial_scale, int sample_ratio, + bool aligned, bool clockwise); + +void roi_align_rotated_backward_impl(Tensor top_grad, Tensor rois, + Tensor bottom_grad, int aligned_height, + int aligned_width, float spatial_scale, + int sample_ratio, bool aligned, + bool clockwise); +REGISTER_DEVICE_IMPL(roi_align_rotated_forward_impl, CPU, + roi_align_rotated_forward_cpu); +REGISTER_DEVICE_IMPL(roi_align_rotated_backward_impl, CPU, + roi_align_rotated_backward_cpu); diff --git a/mmcv/ops/csrc/pytorch/cpu/voxelization.cpp b/mmcv/ops/csrc/pytorch/cpu/voxelization.cpp new file mode 100644 index 0000000..25cc2b5 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/cpu/voxelization.cpp @@ -0,0 +1,170 @@ +// Copyright (c) OpenMMLab. All rights reserved. +#include "pytorch_cpp_helper.hpp" +#include "pytorch_device_registry.hpp" + +template +void dynamic_voxelize_forward_cpu_kernel( + const torch::TensorAccessor points, + torch::TensorAccessor coors, const std::vector voxel_size, + const std::vector coors_range, const std::vector grid_size, + const int num_points, const int num_features, const int NDim) { + const int ndim_minus_1 = NDim - 1; + bool failed = false; + // int coor[NDim]; + int* coor = new int[NDim](); + int c; + + for (int i = 0; i < num_points; ++i) { + failed = false; + for (int j = 0; j < NDim; ++j) { + c = floor((points[i][j] - coors_range[j]) / voxel_size[j]); + // necessary to rm points out of range + if ((c < 0 || c >= grid_size[j])) { + failed = true; + break; + } + coor[ndim_minus_1 - j] = c; + } + + if (failed) + memset(&coors[i][0], -1, NDim * sizeof(T_int)); + else + memcpy(&coors[i][0], &coor[0], NDim * sizeof(T_int)); + } + + delete[] coor; +} + +template +void hard_voxelize_forward_cpu_kernel( + const torch::TensorAccessor points, + torch::TensorAccessor voxels, torch::TensorAccessor coors, + torch::TensorAccessor num_points_per_voxel, + torch::TensorAccessor coor_to_voxelidx, int& voxel_num, + const std::vector voxel_size, const std::vector coors_range, + const std::vector grid_size, const int max_points, + const int max_voxels, const int num_points, const int num_features, + const int NDim) { + // declare a temp coors + at::Tensor temp_coors = at::zeros( + {num_points, NDim}, at::TensorOptions().dtype(at::kInt).device(at::kCPU)); + + // First use dynamic voxelization to get coors, + // then check max points/voxels constraints + dynamic_voxelize_forward_cpu_kernel( + points, temp_coors.accessor(), voxel_size, coors_range, grid_size, + num_points, num_features, NDim); + + int voxelidx, num; + auto coor = temp_coors.accessor(); + + for (int i = 0; i < num_points; ++i) { + // T_int* coor = temp_coors.data_ptr() + i * NDim; + + if (coor[i][0] == -1) continue; + + voxelidx = coor_to_voxelidx[coor[i][0]][coor[i][1]][coor[i][2]]; + + // record voxel + if (voxelidx == -1) { + voxelidx = voxel_num; + if (max_voxels != -1 && voxel_num >= max_voxels) continue; + voxel_num += 1; + + coor_to_voxelidx[coor[i][0]][coor[i][1]][coor[i][2]] = voxelidx; + memcpy(&coors[voxelidx][0], &coor[i][0], NDim * sizeof(T_int)); + } + + // put points into voxel + num = num_points_per_voxel[voxelidx]; + if (max_points == -1 || num < max_points) { + memcpy(&voxels[voxelidx][num][0], &points[i][0], + num_features * sizeof(T)); + num_points_per_voxel[voxelidx] += 1; + } + } + + return; +} + +void dynamic_voxelize_forward_cpu(const at::Tensor& points, at::Tensor& coors, + const std::vector voxel_size, + const std::vector coors_range, + const int NDim = 3) { + // check device + AT_ASSERTM(points.device().is_cpu(), "points must be a CPU tensor"); + + std::vector grid_size(NDim); + const int num_points = points.size(0); + const int num_features = points.size(1); + + for (int i = 0; i < NDim; ++i) { + grid_size[i] = + round((coors_range[NDim + i] - coors_range[i]) / voxel_size[i]); + } + + // coors, num_points_per_voxel, coor_to_voxelidx are int Tensor + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + points.scalar_type(), "dynamic_voxelize_forward_cpu_kernel", [&] { + dynamic_voxelize_forward_cpu_kernel( + points.accessor(), coors.accessor(), + voxel_size, coors_range, grid_size, num_points, num_features, NDim); + }); +} + +int hard_voxelize_forward_cpu(const at::Tensor& points, at::Tensor& voxels, + at::Tensor& coors, + at::Tensor& num_points_per_voxel, + const std::vector voxel_size, + const std::vector coors_range, + const int max_points, const int max_voxels, + const int NDim = 3) { + // current version tooks about 0.02s_0.03s for one frame on cpu + // check device + AT_ASSERTM(points.device().is_cpu(), "points must be a CPU tensor"); + + std::vector grid_size(NDim); + const int num_points = points.size(0); + const int num_features = points.size(1); + + for (int i = 0; i < NDim; ++i) { + grid_size[i] = + round((coors_range[NDim + i] - coors_range[i]) / voxel_size[i]); + } + + // coors, num_points_per_voxel, coor_to_voxelidx are int Tensor + // printf("cpu coor_to_voxelidx size: [%d, %d, %d]\n", grid_size[2], + // grid_size[1], grid_size[0]); + at::Tensor coor_to_voxelidx = + -at::ones({grid_size[2], grid_size[1], grid_size[0]}, coors.options()); + + int voxel_num = 0; + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + points.scalar_type(), "hard_voxelize_forward_cpu_kernel", [&] { + hard_voxelize_forward_cpu_kernel( + points.accessor(), voxels.accessor(), + coors.accessor(), num_points_per_voxel.accessor(), + coor_to_voxelidx.accessor(), voxel_num, voxel_size, + coors_range, grid_size, max_points, max_voxels, num_points, + num_features, NDim); + }); + + return voxel_num; +} + +int hard_voxelize_forward_impl(const at::Tensor& points, at::Tensor& voxels, + at::Tensor& coors, + at::Tensor& num_points_per_voxel, + const std::vector voxel_size, + const std::vector coors_range, + const int max_points, const int max_voxels, + const int NDim); + +void dynamic_voxelize_forward_impl(const at::Tensor& points, at::Tensor& coors, + const std::vector voxel_size, + const std::vector coors_range, + const int NDim); +REGISTER_DEVICE_IMPL(hard_voxelize_forward_impl, CPU, + hard_voxelize_forward_cpu); +REGISTER_DEVICE_IMPL(dynamic_voxelize_forward_impl, CPU, + dynamic_voxelize_forward_cpu); diff --git a/mmcv/ops/csrc/pytorch/cuda/assign_score_withk_cuda.cu b/mmcv/ops/csrc/pytorch/cuda/assign_score_withk_cuda.cu new file mode 100644 index 0000000..c4e684b --- /dev/null +++ b/mmcv/ops/csrc/pytorch/cuda/assign_score_withk_cuda.cu @@ -0,0 +1,66 @@ +// Modified from +// https://github.com/CVMI-Lab/PAConv/tree/main/scene_seg/lib/paconv_lib/src/gpu +#include +#include + +#include "assign_score_withk_cuda_kernel.cuh" +#include "pytorch_cuda_helper.hpp" + +void AssignScoreWithKForwardCUDAKernelLauncher( + int B, int N0, int N1, int M, int K, int O, int aggregate, + const Tensor& points, const Tensor& centers, const Tensor& scores, + const Tensor& knn_idx, Tensor& output) { + at::cuda::CUDAGuard device_guard(points.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + dim3 blocks(DIVUP(B * O * N1 * K, THREADS_PER_BLOCK)); + dim3 threads(THREADS_PER_BLOCK); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + points.scalar_type(), "assign_score_withk_forward_cuda_kernel", [&] { + assign_score_withk_forward_cuda_kernel + <<>>( + B, N0, N1, M, K, O, aggregate, points.data_ptr(), + centers.data_ptr(), scores.data_ptr(), + knn_idx.data_ptr(), output.data_ptr()); + }); + + AT_CUDA_CHECK(cudaGetLastError()); +} + +void AssignScoreWithKBackwardCUDAKernelLauncher( + int B, int N0, int N1, int M, int K, int O, int aggregate, + const Tensor& grad_out, const Tensor& points, const Tensor& centers, + const Tensor& scores, const Tensor& knn_idx, Tensor& grad_points, + Tensor& grad_centers, Tensor& grad_scores) { + at::cuda::CUDAGuard device_guard(grad_out.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + dim3 blocks1(DIVUP(B * M * O, THREADS_PER_BLOCK)); + dim3 threads1(THREADS_PER_BLOCK); + dim3 blocks2(DIVUP(B * N1 * K * M, THREADS_PER_BLOCK)); + dim3 threads2(THREADS_PER_BLOCK); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + grad_out.scalar_type(), "assign_score_withk_points_backward_cuda_kernel", + [&] { + assign_score_withk_points_backward_cuda_kernel + <<>>( + B, N0, N1, M, K, O, aggregate, grad_out.data_ptr(), + scores.data_ptr(), knn_idx.data_ptr(), + grad_points.data_ptr(), + grad_centers.data_ptr()); + }); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + grad_out.scalar_type(), "assign_score_withk_scores_backward_cuda_kernel", + [&] { + assign_score_withk_scores_backward_cuda_kernel + <<>>( + B, N0, N1, M, K, O, aggregate, grad_out.data_ptr(), + points.data_ptr(), centers.data_ptr(), + knn_idx.data_ptr(), grad_scores.data_ptr()); + }); + + AT_CUDA_CHECK(cudaGetLastError()); +} diff --git a/mmcv/ops/csrc/pytorch/cuda/ball_query_cuda.cu b/mmcv/ops/csrc/pytorch/cuda/ball_query_cuda.cu new file mode 100644 index 0000000..f5f5f39 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/cuda/ball_query_cuda.cu @@ -0,0 +1,38 @@ +// Copyright (c) OpenMMLab. All rights reserved +// Modified from +// https://github.com/sshaoshuai/Pointnet2.PyTorch/tree/master/pointnet2/src/ball_query_gpu.cu + +#include +#include +#include + +#include "ball_query_cuda_kernel.cuh" +#include "pytorch_cuda_helper.hpp" + +void BallQueryForwardCUDAKernelLauncher(int b, int n, int m, float min_radius, + float max_radius, int nsample, + const Tensor new_xyz, const Tensor xyz, + Tensor idx) { + // new_xyz: (B, M, 3) + // xyz: (B, N, 3) + // output: + // idx: (B, M, nsample) + + at::cuda::CUDAGuard device_guard(new_xyz.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + // blockIdx.x(col), blockIdx.y(row) + dim3 blocks(DIVUP(m, THREADS_PER_BLOCK), b); + dim3 threads(THREADS_PER_BLOCK); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + new_xyz.scalar_type(), "ball_query_forward_cuda_kernel", [&] { + ball_query_forward_cuda_kernel + <<>>( + b, n, m, min_radius, max_radius, nsample, + new_xyz.data_ptr(), xyz.data_ptr(), + idx.data_ptr()); + }); + + AT_CUDA_CHECK(cudaGetLastError()); +} diff --git a/mmcv/ops/csrc/pytorch/cuda/bbox_overlaps_cuda.cu b/mmcv/ops/csrc/pytorch/cuda/bbox_overlaps_cuda.cu new file mode 100644 index 0000000..16679c7 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/cuda/bbox_overlaps_cuda.cu @@ -0,0 +1,23 @@ +// Copyright (c) OpenMMLab. All rights reserved +#include "bbox_overlaps_cuda_kernel.cuh" +#include "pytorch_cuda_helper.hpp" + +void BBoxOverlapsCUDAKernelLauncher(const Tensor bboxes1, const Tensor bboxes2, + Tensor ious, const int mode, + const bool aligned, const int offset) { + int output_size = ious.numel(); + int num_bbox1 = bboxes1.size(0); + int num_bbox2 = bboxes2.size(0); + + at::cuda::CUDAGuard device_guard(bboxes1.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + bboxes1.scalar_type(), "bbox_overlaps_cuda_kernel", ([&] { + bbox_overlaps_cuda_kernel + <<>>( + bboxes1.data_ptr(), bboxes2.data_ptr(), + ious.data_ptr(), num_bbox1, num_bbox2, mode, aligned, + offset); + })); + AT_CUDA_CHECK(cudaGetLastError()); +} diff --git a/mmcv/ops/csrc/pytorch/cuda/border_align_cuda.cu b/mmcv/ops/csrc/pytorch/cuda/border_align_cuda.cu new file mode 100644 index 0000000..3aeefea --- /dev/null +++ b/mmcv/ops/csrc/pytorch/cuda/border_align_cuda.cu @@ -0,0 +1,68 @@ +// Copyright (c) OpenMMLab. All rights reserved +#include "border_align_cuda_kernel.cuh" +#include "pytorch_cuda_helper.hpp" + +void BorderAlignForwardCUDAKernelLauncher(const Tensor &input, + const Tensor &boxes, Tensor output, + Tensor argmax_idx, + const int pool_size) { + // shape assertion + AT_ASSERTM(input.ndimension() == 4, + "non-empty 4D(batch mode) tensor expected for input feature"); + AT_ASSERTM(boxes.ndimension() == 3, + "boxes must be 3D tensor with size of [B, H*W, 4]"); + + int batch_size = input.size(0); + int feat_channels = input.size(1); + int channels = feat_channels / 4; + int height = input.size(2); + int width = input.size(3); + // shape [N, box_size, 4] for boxes. (x1, y1, x2, y2) format + int box_size = boxes.size(1); + // shape [N, channels, box_size, 4] for output + int nthreads = batch_size * channels * box_size; + + at::cuda::CUDAGuard device_guard(input.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 block(128, 4); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + input.scalar_type(), "border_align_forward_cuda_kernel", [&] { + border_align_forward_cuda_kernel + <<>>( + nthreads, input.data_ptr(), + boxes.data_ptr(), output.data_ptr(), + argmax_idx.data_ptr(), channels, box_size, height, width, + pool_size); + }); + + AT_CUDA_CHECK(cudaGetLastError()); +} + +void BorderAlignBackwardCUDAKernelLauncher(const Tensor &grad_output, + const Tensor &boxes, + const Tensor &argmax_idx, + Tensor grad_input, + const int pool_size) { + int batch_size = grad_input.size(0); + int feat_channels = grad_input.size(1); + int channels = feat_channels / 4; + int height = grad_input.size(2); + int width = grad_input.size(3); + int box_size = boxes.size(1); + int nthreads = batch_size * channels * box_size; + + at::cuda::CUDAGuard device_guard(grad_output.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 block(128, 4); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + grad_output.scalar_type(), "border_align_backward_cuda_kernel", [&] { + border_align_backward_cuda_kernel + <<>>( + nthreads, grad_output.data_ptr(), + boxes.data_ptr(), argmax_idx.data_ptr(), + grad_input.data_ptr(), channels, box_size, height, + width, pool_size); + }); + + AT_CUDA_CHECK(cudaGetLastError()); +} diff --git a/mmcv/ops/csrc/pytorch/cuda/box_iou_rotated_cuda.cu b/mmcv/ops/csrc/pytorch/cuda/box_iou_rotated_cuda.cu new file mode 100644 index 0000000..3c13e06 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/cuda/box_iou_rotated_cuda.cu @@ -0,0 +1,25 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +// modified from +// https://github.com/facebookresearch/detectron2/blob/master/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cuda.cu +#include "box_iou_rotated_cuda.cuh" +#include "pytorch_cuda_helper.hpp" + +void box_iou_rotated_cuda(const Tensor boxes1, const Tensor boxes2, Tensor ious, + const int mode_flag, const bool aligned) { + using scalar_t = float; + AT_ASSERTM(boxes1.is_cuda(), "boxes1 must be a CUDA tensor"); + AT_ASSERTM(boxes2.is_cuda(), "boxes2 must be a CUDA tensor"); + + int output_size = ious.numel(); + int num_boxes1 = boxes1.size(0); + int num_boxes2 = boxes2.size(0); + + at::cuda::CUDAGuard device_guard(boxes1.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + box_iou_rotated_cuda_kernel + <<>>( + num_boxes1, num_boxes2, boxes1.data_ptr(), + boxes2.data_ptr(), (scalar_t*)ious.data_ptr(), + mode_flag, aligned); + AT_CUDA_CHECK(cudaGetLastError()); +} diff --git a/mmcv/ops/csrc/pytorch/cuda/carafe_cuda.cu b/mmcv/ops/csrc/pytorch/cuda/carafe_cuda.cu new file mode 100644 index 0000000..984e734 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/cuda/carafe_cuda.cu @@ -0,0 +1,180 @@ +// Copyright (c) OpenMMLab. All rights reserved +#include "carafe_cuda_kernel.cuh" +#include "pytorch_cuda_helper.hpp" + +void CARAFEForwardCUDAKernelLauncher(const Tensor features, const Tensor masks, + Tensor rfeatures, Tensor routput, + Tensor rmasks, Tensor output, + const int kernel_size, + const int group_size, + const int scale_factor) { + const int batch_size = output.size(0); + const int channels = output.size(1); + const int output_height = output.size(2); + const int output_width = output.size(3); + + const int input_height = features.size(2); + const int input_width = features.size(3); + + const int mask_channels = masks.size(1); + + rfeatures.resize_({batch_size, input_height, input_width, channels}); + routput.resize_({batch_size, output_height, output_width, channels}); + rmasks.resize_({batch_size, output_height, output_width, mask_channels}); + + // one warp per pixel + at::cuda::CUDAGuard device_guard(features.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + features.scalar_type(), "NCHW2NHWC_Feature", ([&] { + const scalar_t *bottom_data = features.data_ptr(); + scalar_t *top_data = rfeatures.data_ptr(); + const int dh = divideUP(channels, kTileDim); + const int dw = divideUP(input_height * input_width, kTileDim); + BatchTranspose2DCUDAKernel + <<>>( + batch_size, channels, input_height * input_width, dh, dw, + bottom_data, top_data); + })); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + features.scalar_type(), "NCHW2NHWC_Masks", ([&] { + const scalar_t *bottom_data = masks.data_ptr(); + scalar_t *top_data = rmasks.data_ptr(); + const int dh = divideUP(mask_channels, kTileDim); + const int dw = divideUP(output_height * output_width, kTileDim); + BatchTranspose2DCUDAKernel + <<>>( + batch_size, mask_channels, output_height * output_width, dh, dw, + bottom_data, top_data); + })); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + features.scalar_type(), "CARAFELaucherForward", ([&] { + const int num_kernels = + batch_size * output_height * output_width * THREADS_PER_PIXEL; + const scalar_t *bottom_data = rfeatures.data_ptr(); + const scalar_t *bottom_masks = rmasks.data_ptr(); + scalar_t *top_data = routput.data_ptr(); + + CARAFEForward<<>>( + num_kernels, bottom_data, bottom_masks, kernel_size, group_size, + scale_factor, channels, input_height, input_width, output_height, + output_width, mask_channels, top_data); + })); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + features.scalar_type(), "NHWC2NCHW", ([&] { + const scalar_t *bottom_data = routput.data_ptr(); + scalar_t *top_data = output.data_ptr(); + const int dh = divideUP(output_height * output_width, kTileDim); + const int dw = divideUP(channels, kTileDim); + BatchTranspose2DCUDAKernel + <<>>( + batch_size, output_height * output_width, channels, dh, dw, + bottom_data, top_data); + })); + + AT_CUDA_CHECK(cudaGetLastError()); +} + +void CARAFEBackwardCUDAKernelLauncher( + const Tensor top_grad, const Tensor rfeatures, const Tensor masks, + Tensor rtop_grad, Tensor rbottom_grad_hs, Tensor rbottom_grad, + Tensor rmask_grad, Tensor bottom_grad, Tensor mask_grad, + const int kernel_size, const int group_size, const int scale_factor) { + const int batch_size = top_grad.size(0); + const int channels = top_grad.size(1); + const int output_height = top_grad.size(2); + const int output_width = top_grad.size(3); + + const int input_height = bottom_grad.size(2); + const int input_width = bottom_grad.size(3); + + const int mask_channels = masks.size(1); + + rtop_grad.resize_({batch_size, output_height, output_width, channels}); + rbottom_grad.resize_({batch_size, input_height, input_width, channels}); + rbottom_grad_hs.resize_({batch_size, output_height, output_width, channels}); + rmask_grad.resize_({batch_size, output_height, output_width, mask_channels}); + + at::cuda::CUDAGuard device_guard(top_grad.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + top_grad.scalar_type(), "NCHW2NHWC_Top_Grad", ([&] { + const scalar_t *bottom_data = top_grad.data_ptr(); + scalar_t *top_data = rtop_grad.data_ptr(); + const int dh = divideUP(channels, kTileDim); + const int dw = divideUP(output_height * output_width, kTileDim); + BatchTranspose2DCUDAKernel + <<>>( + batch_size, channels, output_height * output_width, dh, dw, + bottom_data, top_data); + })); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + top_grad.scalar_type(), "CARAFELaucherBackward_Feature", ([&] { + const int num_kernels = + batch_size * output_height * output_width * THREADS_PER_PIXEL; + const scalar_t *top_diff = rtop_grad.data_ptr(); + const scalar_t *bottom_masks = masks.data_ptr(); + scalar_t *bottom_diff = rbottom_grad_hs.data_ptr(); + + CARAFEBackward_Feature + <<>>(num_kernels, top_diff, bottom_masks, kernel_size, + group_size, scale_factor, channels, input_height, + input_width, output_height, output_width, + mask_channels, bottom_diff); + })); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + top_grad.scalar_type(), "FeatureSum", ([&] { + const int num_kernels = + batch_size * input_height * input_width * THREADS_PER_PIXEL; + const scalar_t *bottom_diff_hs = rbottom_grad_hs.data_ptr(); + scalar_t *bottom_diff = rbottom_grad.data_ptr(); + + FeatureSum + <<>>(num_kernels, bottom_diff_hs, scale_factor, channels, + input_height, input_width, bottom_diff); + })); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + top_grad.scalar_type(), "NHWC2NCHW_Bottom_Grad", ([&] { + const scalar_t *bottom_data = rbottom_grad.data_ptr(); + scalar_t *top_data = bottom_grad.data_ptr(); + const int dh = divideUP(input_height * input_width, kTileDim); + const int dw = divideUP(channels, kTileDim); + BatchTranspose2DCUDAKernel + <<>>( + batch_size, input_height * input_width, channels, dh, dw, + bottom_data, top_data); + })); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + top_grad.scalar_type(), "CARAFELaucherBackward_Mask", ([&] { + const int num_kernels = batch_size * output_height * output_width * + mask_channels * WARP_SIZE; + const scalar_t *top_diff = rtop_grad.data_ptr(); + const scalar_t *bottom_data = rfeatures.data_ptr(); + scalar_t *mask_diff = rmask_grad.data_ptr(); + + CARAFEBackward_Mask + <<>>(num_kernels, top_diff, bottom_data, kernel_size, + group_size, scale_factor, channels, input_height, + input_width, output_height, output_width, + mask_channels, mask_diff); + })); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + top_grad.scalar_type(), "NHWC2NCHW_Mask_Grad", ([&] { + const scalar_t *bottom_data = rmask_grad.data_ptr(); + scalar_t *top_data = mask_grad.data_ptr(); + const int dh = divideUP(output_height * output_width, kTileDim); + const int dw = divideUP(mask_channels, kTileDim); + BatchTranspose2DCUDAKernel + <<>>( + batch_size, output_height * output_width, mask_channels, dh, dw, + bottom_data, top_data); + })); + + AT_CUDA_CHECK(cudaGetLastError()); +} diff --git a/mmcv/ops/csrc/pytorch/cuda/carafe_naive_cuda.cu b/mmcv/ops/csrc/pytorch/cuda/carafe_naive_cuda.cu new file mode 100644 index 0000000..2fc5667 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/cuda/carafe_naive_cuda.cu @@ -0,0 +1,52 @@ +// Copyright (c) OpenMMLab. All rights reserved +#include "carafe_naive_cuda_kernel.cuh" +#include "pytorch_cuda_helper.hpp" + +void CARAFENAIVEForwardCUDAKernelLauncher(const Tensor features, + const Tensor masks, Tensor output, + const int kernel_size, + const int group_size, + const int scale_factor) { + int output_size = output.numel(); + int channels = output.size(1); + int height = output.size(2); + int width = output.size(3); + + at::cuda::CUDAGuard device_guard(features.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + features.scalar_type(), "CARAFENAIVEForward", ([&] { + carafe_naive_forward_cuda_kernel + <<>>( + output_size, features.data_ptr(), + masks.data_ptr(), output.data_ptr(), + kernel_size, group_size, scale_factor, channels, height, width); + })); + + AT_CUDA_CHECK(cudaGetLastError()); +} + +void CARAFENAIVEBackwardCUDAKernelLauncher( + const Tensor top_grad, const Tensor features, const Tensor masks, + Tensor bottom_grad, Tensor mask_grad, const int kernel_size, + const int group_size, const int scale_factor) { + int output_size = top_grad.numel(); + int channels = top_grad.size(1); + int height = top_grad.size(2); + int width = top_grad.size(3); + + at::cuda::CUDAGuard device_guard(top_grad.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + top_grad.scalar_type(), "CARAFENAIVEBackward", ([&] { + carafe_naive_backward_cuda_kernel + <<>>( + output_size, top_grad.data_ptr(), + features.data_ptr(), masks.data_ptr(), + bottom_grad.data_ptr(), + mask_grad.data_ptr(), kernel_size, group_size, + scale_factor, channels, height, width); + })); + + AT_CUDA_CHECK(cudaGetLastError()); +} diff --git a/mmcv/ops/csrc/pytorch/cuda/correlation_cuda.cu b/mmcv/ops/csrc/pytorch/cuda/correlation_cuda.cu new file mode 100644 index 0000000..56d2e64 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/cuda/correlation_cuda.cu @@ -0,0 +1,93 @@ +// Copyright (c) OpenMMLab. All rights reserved. +// Modified from +// https://github.com/ClementPinard/Pytorch-Correlation-extension/blob/master/Correlation_Module/correlation_cuda_kernel.cu +// Original licence: Under MIT License + +#include "correlation_cuda.cuh" +#include "pytorch_cuda_helper.hpp" + +void CorrelationForwardCUDAKernelLauncher(Tensor input1, Tensor input2, + Tensor output, int kH, int kW, + int patchH, int patchW, int padH, + int padW, int dilationH, + int dilationW, int dilation_patchH, + int dilation_patchW, int dH, int dW) { + const int batch_size = input1.size(0); + const int iH = input1.size(2); + const int iW = input1.size(3); + const int dilatedKH = (kH - 1) * dilationH + 1; + const int dilatedKW = (kW - 1) * dilationW + 1; + + const auto oH = (iH + 2 * padH - dilatedKH) / dH + 1; + const auto oW = (iW + 2 * padW - dilatedKW) / dW + 1; + + auto trInput1 = input1.permute({0, 2, 3, 1}).contiguous(); + auto trInput2 = input2.permute({0, 2, 3, 1}).contiguous(); + + const int threads = THREADS_FORWARD; + const dim3 blocks(batch_size, oH, oW); + + at::cuda::CUDAGuard device_guard(input1.device()); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + input1.scalar_type(), "correlation_forward_cuda", ([&] { + TensorAcc4R trInput1_acc = + trInput1.packed_accessor32(); + TensorAcc4R trInput2_acc = + trInput2.packed_accessor32(); + TensorAcc5R output_acc = + output.packed_accessor32(); + + correlation_forward_cuda_kernel + <<>>( + trInput1_acc, trInput2_acc, output_acc, kH, kW, patchH, patchW, + padH, padW, dilationH, dilationW, dilation_patchH, + dilation_patchW, dH, dW); + })); +} + +void CorrelationBackwardCUDAKernelLauncher( + Tensor grad_output, Tensor input1, Tensor input2, Tensor grad_input1, + Tensor grad_input2, int kH, int kW, int patchH, int patchW, int padH, + int padW, int dilationH, int dilationW, int dilation_patchH, + int dilation_patchW, int dH, int dW) { + const int batch_size = input1.size(0); + const int iH = input1.size(2); + const int iW = input1.size(3); + const int C = input1.size(1); + + const dim3 blocks(C, iH, iW); + const dim3 threads(THREADS_BACKWARD, THREADS_BACKWARD); + + at::cuda::CUDAGuard device_guard(input1.device()); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + input1.scalar_type(), "correlation_backward_cuda", ([&] { + TensorAcc4R input1_acc = + input1.packed_accessor32(); + TensorAcc4R input2_acc = + input2.packed_accessor32(); + TensorAcc4R grad_input1_acc = + grad_input1.packed_accessor32(); + TensorAcc4R grad_input2_acc = + grad_input2.packed_accessor32(); + TensorAcc5R grad_output_acc = + grad_output.packed_accessor32(); + + for (int n = 0; n < batch_size; ++n) { + correlation_backward_cuda_kernel_input1 + <<>>( + grad_output_acc, input2_acc, grad_input1_acc, kH, kW, patchH, + patchW, padH, padW, dilationH, dilationW, dilation_patchH, + dilation_patchW, dH, dW, n); + } + + for (int n = 0; n < batch_size; ++n) { + correlation_backward_cuda_kernel_input2 + <<>>( + grad_output_acc, input1_acc, grad_input2_acc, kH, kW, patchH, + patchW, padH, padW, dilationH, dilationW, dilation_patchH, + dilation_patchW, dH, dW, n); + } + })); +} diff --git a/mmcv/ops/csrc/pytorch/cuda/cudabind.cpp b/mmcv/ops/csrc/pytorch/cuda/cudabind.cpp new file mode 100644 index 0000000..2e7a3f5 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/cuda/cudabind.cpp @@ -0,0 +1,1364 @@ +#include "pytorch_cpp_helper.hpp" +#include "pytorch_device_registry.hpp" + +void AssignScoreWithKForwardCUDAKernelLauncher( + int B, int N0, int N1, int M, int K, int O, int aggregate, + const Tensor& points, const Tensor& centers, const Tensor& scores, + const Tensor& knn_idx, Tensor& output); + +void AssignScoreWithKBackwardCUDAKernelLauncher( + int B, int N0, int N1, int M, int K, int O, int aggregate, + const Tensor& grad_out, const Tensor& points, const Tensor& centers, + const Tensor& scores, const Tensor& knn_idx, Tensor& grad_points, + Tensor& grad_centers, Tensor& grad_scores); + +void assign_score_withk_forward_cuda(int B, int N0, int N1, int M, int K, int O, + int aggregate, const Tensor& points, + const Tensor& centers, + const Tensor& scores, + const Tensor& knn_idx, Tensor& output) { + AssignScoreWithKForwardCUDAKernelLauncher( + B, N0, N1, M, K, O, aggregate, points, centers, scores, knn_idx, output); +}; + +void assign_score_withk_backward_cuda( + int B, int N0, int N1, int M, int K, int O, int aggregate, + const Tensor& grad_out, const Tensor& points, const Tensor& centers, + const Tensor& scores, const Tensor& knn_idx, Tensor& grad_points, + Tensor& grad_centers, Tensor& grad_scores) { + AssignScoreWithKBackwardCUDAKernelLauncher( + B, N0, N1, M, K, O, aggregate, grad_out, points, centers, scores, knn_idx, + grad_points, grad_centers, grad_scores); +}; + +void assign_score_withk_forward_impl(int B, int N0, int N1, int M, int K, int O, + int aggregate, const Tensor& points, + const Tensor& centers, + const Tensor& scores, + const Tensor& knn_idx, Tensor& output); + +void assign_score_withk_backward_impl( + int B, int N0, int N1, int M, int K, int O, int aggregate, + const Tensor& grad_out, const Tensor& points, const Tensor& centers, + const Tensor& scores, const Tensor& knn_idx, Tensor& grad_points, + Tensor& grad_centers, Tensor& grad_scores); + +REGISTER_DEVICE_IMPL(assign_score_withk_forward_impl, CUDA, + assign_score_withk_forward_cuda); +REGISTER_DEVICE_IMPL(assign_score_withk_backward_impl, CUDA, + assign_score_withk_backward_cuda); + +void BallQueryForwardCUDAKernelLauncher(int b, int n, int m, float min_radius, + float max_radius, int nsample, + const Tensor new_xyz, const Tensor xyz, + Tensor idx); + +void ball_query_forward_cuda(int b, int n, int m, float min_radius, + float max_radius, int nsample, + const Tensor new_xyz, const Tensor xyz, + Tensor idx) { + BallQueryForwardCUDAKernelLauncher(b, n, m, min_radius, max_radius, nsample, + new_xyz, xyz, idx); +}; + +void ball_query_forward_impl(int b, int n, int m, float min_radius, + float max_radius, int nsample, + const Tensor new_xyz, const Tensor xyz, + Tensor idx); +REGISTER_DEVICE_IMPL(ball_query_forward_impl, CUDA, ball_query_forward_cuda); + +void BBoxOverlapsCUDAKernelLauncher(const Tensor bboxes1, const Tensor bboxes2, + Tensor ious, const int mode, + const bool aligned, const int offset); + +void bbox_overlaps_cuda(const Tensor bboxes1, const Tensor bboxes2, Tensor ious, + const int mode, const bool aligned, const int offset) { + BBoxOverlapsCUDAKernelLauncher(bboxes1, bboxes2, ious, mode, aligned, offset); +} + +void bbox_overlaps_impl(const Tensor bboxes1, const Tensor bboxes2, Tensor ious, + const int mode, const bool aligned, const int offset); +REGISTER_DEVICE_IMPL(bbox_overlaps_impl, CUDA, bbox_overlaps_cuda); + +void BorderAlignForwardCUDAKernelLauncher(const Tensor& input, + const Tensor& boxes, Tensor output, + Tensor argmax_idx, + const int pool_size); + +void BorderAlignBackwardCUDAKernelLauncher(const Tensor& grad_output, + const Tensor& boxes, + const Tensor& argmax_idx, + Tensor grad_input, + const int pool_size); + +void border_align_forward_cuda(const Tensor& input, const Tensor& boxes, + Tensor output, Tensor argmax_idx, + const int pool_size) { + BorderAlignForwardCUDAKernelLauncher(input, boxes, output, argmax_idx, + pool_size); +} + +void border_align_backward_cuda(const Tensor& grad_output, const Tensor& boxes, + const Tensor& argmax_idx, Tensor grad_input, + const int pool_size) { + BorderAlignBackwardCUDAKernelLauncher(grad_output, boxes, argmax_idx, + grad_input, pool_size); +} + +void border_align_forward_impl(const Tensor& input, const Tensor& boxes, + Tensor output, Tensor argmax_idx, + const int pool_size); + +void border_align_backward_impl(const Tensor& grad_output, const Tensor& boxes, + const Tensor& argmax_idx, Tensor grad_input, + const int pool_size); + +REGISTER_DEVICE_IMPL(border_align_forward_impl, CUDA, + border_align_forward_cuda); +REGISTER_DEVICE_IMPL(border_align_backward_impl, CUDA, + border_align_backward_cuda); + +void box_iou_rotated_cuda(const Tensor boxes1, const Tensor boxes2, Tensor ious, + const int mode_flag, const bool aligned); + +void box_iou_rotated_impl(const Tensor boxes1, const Tensor boxes2, Tensor ious, + const int mode_flag, const bool aligned); +REGISTER_DEVICE_IMPL(box_iou_rotated_impl, CUDA, box_iou_rotated_cuda); + +void CARAFEForwardCUDAKernelLauncher(const Tensor features, const Tensor masks, + Tensor rfeatures, Tensor routput, + Tensor rmasks, Tensor output, + const int kernel_size, + const int group_size, + const int scale_factor); + +void CARAFEBackwardCUDAKernelLauncher( + const Tensor top_grad, const Tensor rfeatures, const Tensor masks, + Tensor rtop_grad, Tensor rbottom_grad_hs, Tensor rbottom_grad, + Tensor rmask_grad, Tensor bottom_grad, Tensor mask_grad, + const int kernel_size, const int group_size, const int scale_factor); + +void carafe_forward_cuda(Tensor features, Tensor masks, Tensor rfeatures, + Tensor routput, Tensor rmasks, Tensor output, + int kernel_size, int group_size, int scale_factor) { + CARAFEForwardCUDAKernelLauncher(features, masks, rfeatures, routput, rmasks, + output, kernel_size, group_size, + scale_factor); +} + +void carafe_backward_cuda(Tensor top_grad, Tensor rfeatures, Tensor masks, + Tensor rtop_grad, Tensor rbottom_grad_hs, + Tensor rbottom_grad, Tensor rmask_grad, + Tensor bottom_grad, Tensor mask_grad, int kernel_size, + int group_size, int scale_factor) { + CARAFEBackwardCUDAKernelLauncher(top_grad, rfeatures, masks, rtop_grad, + rbottom_grad_hs, rbottom_grad, rmask_grad, + bottom_grad, mask_grad, kernel_size, + group_size, scale_factor); +} + +void carafe_forward_impl(Tensor features, Tensor masks, Tensor rfeatures, + Tensor routput, Tensor rmasks, Tensor output, + int kernel_size, int group_size, int scale_factor); + +void carafe_backward_impl(Tensor top_grad, Tensor rfeatures, Tensor masks, + Tensor rtop_grad, Tensor rbottom_grad_hs, + Tensor rbottom_grad, Tensor rmask_grad, + Tensor bottom_grad, Tensor mask_grad, int kernel_size, + int group_size, int scale_factor); + +REGISTER_DEVICE_IMPL(carafe_forward_impl, CUDA, carafe_forward_cuda); +REGISTER_DEVICE_IMPL(carafe_backward_impl, CUDA, carafe_backward_cuda); + +void CARAFENAIVEForwardCUDAKernelLauncher(const Tensor features, + const Tensor masks, Tensor output, + const int kernel_size, + const int group_size, + const int scale_factor); + +void CARAFENAIVEBackwardCUDAKernelLauncher( + const Tensor top_grad, const Tensor features, const Tensor masks, + Tensor bottom_grad, Tensor mask_grad, const int kernel_size, + const int group_size, const int scale_factor); + +void carafe_naive_forward_cuda(Tensor features, Tensor masks, Tensor output, + int kernel_size, int group_size, + int scale_factor) { + CARAFENAIVEForwardCUDAKernelLauncher(features, masks, output, kernel_size, + group_size, scale_factor); +} + +void carafe_naive_backward_cuda(Tensor top_grad, Tensor features, Tensor masks, + Tensor bottom_grad, Tensor mask_grad, + int kernel_size, int group_size, + int scale_factor) { + CARAFENAIVEBackwardCUDAKernelLauncher(top_grad, features, masks, bottom_grad, + mask_grad, kernel_size, group_size, + scale_factor); +} +void carafe_naive_forward_impl(Tensor features, Tensor masks, Tensor output, + int kernel_size, int group_size, + int scale_factor); + +void carafe_naive_backward_impl(Tensor top_grad, Tensor features, Tensor masks, + Tensor bottom_grad, Tensor mask_grad, + int kernel_size, int group_size, + int scale_factor); + +REGISTER_DEVICE_IMPL(carafe_naive_forward_impl, CUDA, + carafe_naive_forward_cuda); +REGISTER_DEVICE_IMPL(carafe_naive_backward_impl, CUDA, + carafe_naive_backward_cuda); + +void CorrelationForwardCUDAKernelLauncher(Tensor input1, Tensor input2, + Tensor output, int kH, int kW, + int patchH, int patchW, int padH, + int padW, int dilationH, + int dilationW, int dilation_patchH, + int dilation_patchW, int dH, int dW); + +void CorrelationBackwardCUDAKernelLauncher(Tensor grad_output, Tensor input1, + Tensor input2, Tensor grad_input1, + Tensor grad_input2, int kH, int kW, + int patchH, int patchW, int padH, + int padW, int dilationH, + int dilationW, int dilation_patchH, + int dilation_patchW, int dH, int dW); + +void correlation_forward_cuda(Tensor input1, Tensor input2, Tensor output, + int kH, int kW, int patchH, int patchW, int padH, + int padW, int dilationH, int dilationW, + int dilation_patchH, int dilation_patchW, int dH, + int dW) { + CorrelationForwardCUDAKernelLauncher( + input1, input2, output, kH, kW, patchH, patchW, padH, padW, dilationH, + dilationW, dilation_patchH, dilation_patchW, dH, dW); +} + +void correlation_backward_cuda(Tensor grad_output, Tensor input1, Tensor input2, + Tensor grad_input1, Tensor grad_input2, int kH, + int kW, int patchH, int patchW, int padH, + int padW, int dilationH, int dilationW, + int dilation_patchH, int dilation_patchW, int dH, + int dW) { + CorrelationBackwardCUDAKernelLauncher( + grad_output, input1, input2, grad_input1, grad_input2, kH, kW, patchH, + patchW, padH, padW, dilationH, dilationW, dilation_patchH, + dilation_patchW, dH, dW); +} + +void correlation_forward_impl(Tensor input1, Tensor input2, Tensor output, + int kH, int kW, int patchH, int patchW, int padH, + int padW, int dilationH, int dilationW, + int dilation_patchH, int dilation_patchW, int dH, + int dW); + +void correlation_backward_impl(Tensor grad_output, Tensor input1, Tensor input2, + Tensor grad_input1, Tensor grad_input2, int kH, + int kW, int patchH, int patchW, int padH, + int padW, int dilationH, int dilationW, + int dilation_patchH, int dilation_patchW, int dH, + int dW); + +REGISTER_DEVICE_IMPL(correlation_forward_impl, CUDA, correlation_forward_cuda); +REGISTER_DEVICE_IMPL(correlation_backward_impl, CUDA, + correlation_backward_cuda); + +void deformable_im2col_cuda(Tensor data_im, Tensor data_offset, + const int channels, const int height, + const int width, const int ksize_h, + const int ksize_w, const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int parallel_imgs, const int deformable_group, + Tensor data_col); + +void deformable_col2im_cuda(Tensor data_col, Tensor data_offset, + const int channels, const int height, + const int width, const int ksize_h, + const int ksize_w, const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int parallel_imgs, const int deformable_group, + Tensor grad_im); + +void deformable_col2im_coord_cuda( + Tensor data_col, Tensor data_im, Tensor data_offset, const int channels, + const int height, const int width, const int ksize_h, const int ksize_w, + const int pad_h, const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, const int parallel_imgs, + const int deformable_group, Tensor grad_offset); + +void deformable_im2col_impl(Tensor data_im, Tensor data_offset, + const int channels, const int height, + const int width, const int ksize_h, + const int ksize_w, const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int parallel_imgs, const int deformable_group, + Tensor data_col); + +void deformable_col2im_impl(Tensor data_col, Tensor data_offset, + const int channels, const int height, + const int width, const int ksize_h, + const int ksize_w, const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int parallel_imgs, const int deformable_group, + Tensor grad_im); + +void deformable_col2im_coord_impl( + Tensor data_col, Tensor data_im, Tensor data_offset, const int channels, + const int height, const int width, const int ksize_h, const int ksize_w, + const int pad_h, const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, const int parallel_imgs, + const int deformable_group, Tensor grad_offset); + +REGISTER_DEVICE_IMPL(deformable_im2col_impl, CUDA, deformable_im2col_cuda); +REGISTER_DEVICE_IMPL(deformable_col2im_impl, CUDA, deformable_col2im_cuda); +REGISTER_DEVICE_IMPL(deformable_col2im_coord_impl, CUDA, + deformable_col2im_coord_cuda); + +void DeformRoIPoolForwardCUDAKernelLauncher(Tensor input, Tensor rois, + Tensor offset, Tensor output, + int pooled_height, int pooled_width, + float spatial_scale, + int sampling_ratio, float gamma); + +void DeformRoIPoolBackwardCUDAKernelLauncher( + Tensor grad_output, Tensor input, Tensor rois, Tensor offset, + Tensor grad_input, Tensor grad_offset, int pooled_height, int pooled_width, + float spatial_scale, int sampling_ratio, float gamma); + +void deform_roi_pool_forward_cuda(Tensor input, Tensor rois, Tensor offset, + Tensor output, int pooled_height, + int pooled_width, float spatial_scale, + int sampling_ratio, float gamma) { + DeformRoIPoolForwardCUDAKernelLauncher(input, rois, offset, output, + pooled_height, pooled_width, + spatial_scale, sampling_ratio, gamma); +} + +void deform_roi_pool_backward_cuda(Tensor grad_output, Tensor input, + Tensor rois, Tensor offset, + Tensor grad_input, Tensor grad_offset, + int pooled_height, int pooled_width, + float spatial_scale, int sampling_ratio, + float gamma) { + DeformRoIPoolBackwardCUDAKernelLauncher( + grad_output, input, rois, offset, grad_input, grad_offset, pooled_height, + pooled_width, spatial_scale, sampling_ratio, gamma); +} + +void deform_roi_pool_forward_impl(Tensor input, Tensor rois, Tensor offset, + Tensor output, int pooled_height, + int pooled_width, float spatial_scale, + int sampling_ratio, float gamma); + +void deform_roi_pool_backward_impl(Tensor grad_output, Tensor input, + Tensor rois, Tensor offset, + Tensor grad_input, Tensor grad_offset, + int pooled_height, int pooled_width, + float spatial_scale, int sampling_ratio, + float gamma); + +REGISTER_DEVICE_IMPL(deform_roi_pool_forward_impl, CUDA, + deform_roi_pool_forward_cuda); +REGISTER_DEVICE_IMPL(deform_roi_pool_backward_impl, CUDA, + deform_roi_pool_backward_cuda); + +void SigmoidFocalLossForwardCUDAKernelLauncher(Tensor input, Tensor target, + Tensor weight, Tensor output, + const float gamma, + const float alpha); + +void SigmoidFocalLossBackwardCUDAKernelLauncher(Tensor input, Tensor target, + Tensor weight, + Tensor grad_input, + const float gamma, + const float alpha); + +void SoftmaxFocalLossForwardCUDAKernelLauncher(Tensor softmax, Tensor target, + Tensor weight, Tensor output, + const float gamma, + const float alpha); + +void SoftmaxFocalLossBackwardCUDAKernelLauncher(Tensor softmax, Tensor target, + Tensor weight, Tensor buff, + Tensor grad_input, + const float gamma, + const float alpha); + +void sigmoid_focal_loss_forward_cuda(Tensor input, Tensor target, Tensor weight, + Tensor output, float gamma, float alpha) { + SigmoidFocalLossForwardCUDAKernelLauncher(input, target, weight, output, + gamma, alpha); +} + +void sigmoid_focal_loss_backward_cuda(Tensor input, Tensor target, + Tensor weight, Tensor grad_input, + float gamma, float alpha) { + SigmoidFocalLossBackwardCUDAKernelLauncher(input, target, weight, grad_input, + gamma, alpha); +} + +void softmax_focal_loss_forward_cuda(Tensor input, Tensor target, Tensor weight, + Tensor output, float gamma, float alpha) { + SoftmaxFocalLossForwardCUDAKernelLauncher(input, target, weight, output, + gamma, alpha); +} + +void softmax_focal_loss_backward_cuda(Tensor input, Tensor target, + Tensor weight, Tensor buff, + Tensor grad_input, float gamma, + float alpha) { + SoftmaxFocalLossBackwardCUDAKernelLauncher(input, target, weight, buff, + grad_input, gamma, alpha); +} + +void sigmoid_focal_loss_forward_impl(Tensor input, Tensor target, Tensor weight, + Tensor output, float gamma, float alpha); + +void sigmoid_focal_loss_backward_impl(Tensor input, Tensor target, + Tensor weight, Tensor grad_input, + float gamma, float alpha); + +void softmax_focal_loss_forward_impl(Tensor input, Tensor target, Tensor weight, + Tensor output, float gamma, float alpha); + +void softmax_focal_loss_backward_impl(Tensor input, Tensor target, + Tensor weight, Tensor buff, + Tensor grad_input, float gamma, + float alpha); + +REGISTER_DEVICE_IMPL(sigmoid_focal_loss_forward_impl, CUDA, + sigmoid_focal_loss_forward_cuda); +REGISTER_DEVICE_IMPL(sigmoid_focal_loss_backward_impl, CUDA, + sigmoid_focal_loss_backward_cuda); +REGISTER_DEVICE_IMPL(softmax_focal_loss_forward_impl, CUDA, + softmax_focal_loss_forward_cuda); +REGISTER_DEVICE_IMPL(softmax_focal_loss_backward_impl, CUDA, + softmax_focal_loss_backward_cuda); + +void FurthestPointSamplingForwardCUDAKernelLauncher(int b, int n, int m, + const float* dataset, + float* temp, int* idxs); + +void FurthestPointSamplingWithDistForwardCUDAKernelLauncher( + int b, int n, int m, const float* dataset, float* temp, int* idxs); + +void furthest_point_sampling_forward_cuda(Tensor points_tensor, + Tensor temp_tensor, Tensor idx_tensor, + int b, int n, int m) { + const float* dataset = points_tensor.data_ptr(); + float* temp = temp_tensor.data_ptr(); + int* idxs = idx_tensor.data_ptr(); + FurthestPointSamplingForwardCUDAKernelLauncher(b, n, m, dataset, temp, idxs); +} + +void furthest_point_sampling_with_dist_forward_cuda(Tensor points_tensor, + Tensor temp_tensor, + Tensor idx_tensor, int b, + int n, int m) { + const float* dataset = points_tensor.data_ptr(); + float* temp = temp_tensor.data_ptr(); + int* idxs = idx_tensor.data_ptr(); + FurthestPointSamplingWithDistForwardCUDAKernelLauncher(b, n, m, dataset, temp, + idxs); +} + +void furthest_point_sampling_forward_impl(Tensor points_tensor, + Tensor temp_tensor, Tensor idx_tensor, + int b, int n, int m); + +void furthest_point_sampling_with_dist_forward_impl(Tensor points_tensor, + Tensor temp_tensor, + Tensor idx_tensor, int b, + int n, int m); + +REGISTER_DEVICE_IMPL(furthest_point_sampling_forward_impl, CUDA, + furthest_point_sampling_forward_cuda); +REGISTER_DEVICE_IMPL(furthest_point_sampling_with_dist_forward_impl, CUDA, + furthest_point_sampling_with_dist_forward_cuda); + +torch::Tensor fused_bias_leakyrelu_op(const torch::Tensor& input, + const torch::Tensor& bias, + const torch::Tensor& refer, int act, + int grad, float alpha, float scale); + +torch::Tensor fused_bias_leakyrelu_op_impl(const torch::Tensor& input, + const torch::Tensor& bias, + const torch::Tensor& refer, int act, + int grad, float alpha, float scale); +REGISTER_DEVICE_IMPL(fused_bias_leakyrelu_op_impl, CUDA, + fused_bias_leakyrelu_op); + +void GatherPointsForwardCUDAKernelLauncher(int b, int c, int n, int npoints, + const Tensor points, + const Tensor idx, Tensor out); + +void GatherPointsBackwardCUDAKernelLauncher(int b, int c, int n, int npoints, + const Tensor grad_out, + const Tensor idx, + Tensor grad_points); + +void gather_points_forward_cuda(int b, int c, int n, int npoints, + const Tensor points, const Tensor idx, + Tensor out) { + GatherPointsForwardCUDAKernelLauncher(b, c, n, npoints, points, idx, out); +}; + +void gather_points_backward_cuda(int b, int c, int n, int npoints, + const Tensor grad_out, const Tensor idx, + Tensor grad_points) { + GatherPointsBackwardCUDAKernelLauncher(b, c, n, npoints, grad_out, idx, + grad_points); +}; + +void gather_points_forward_impl(int b, int c, int n, int npoints, + const Tensor points, const Tensor idx, + Tensor out); + +void gather_points_backward_impl(int b, int c, int n, int npoints, + const Tensor grad_out, const Tensor idx, + Tensor grad_points); + +REGISTER_DEVICE_IMPL(gather_points_forward_impl, CUDA, + gather_points_forward_cuda); +REGISTER_DEVICE_IMPL(gather_points_backward_impl, CUDA, + gather_points_backward_cuda); + +void GroupPointsForwardCUDAKernelLauncher(int b, int c, int n, int npoints, + int nsample, const Tensor points, + const Tensor idx, Tensor out); + +void GroupPointsBackwardCUDAKernelLauncher(int b, int c, int n, int npoints, + int nsample, const Tensor grad_out, + const Tensor idx, + Tensor grad_points); + +void group_points_forward_cuda(int b, int c, int n, int npoints, int nsample, + const Tensor points, const Tensor idx, + Tensor out) { + GroupPointsForwardCUDAKernelLauncher(b, c, n, npoints, nsample, points, idx, + out); +}; + +void group_points_backward_cuda(int b, int c, int n, int npoints, int nsample, + const Tensor grad_out, const Tensor idx, + Tensor grad_points) { + GroupPointsBackwardCUDAKernelLauncher(b, c, n, npoints, nsample, grad_out, + idx, grad_points); +}; + +void group_points_forward_impl(int b, int c, int n, int npoints, int nsample, + const Tensor points, const Tensor idx, + Tensor out); + +void group_points_backward_impl(int b, int c, int n, int npoints, int nsample, + const Tensor grad_out, const Tensor idx, + Tensor grad_points); + +REGISTER_DEVICE_IMPL(group_points_forward_impl, CUDA, + group_points_forward_cuda); +REGISTER_DEVICE_IMPL(group_points_backward_impl, CUDA, + group_points_backward_cuda); + +void IoU3DBoxesOverlapBevForwardCUDAKernelLauncher(const int num_a, + const Tensor boxes_a, + const int num_b, + const Tensor boxes_b, + Tensor ans_overlap); + +void IoU3DBoxesIoUBevForwardCUDAKernelLauncher(const int num_a, + const Tensor boxes_a, + const int num_b, + const Tensor boxes_b, + Tensor ans_iou); + +void IoU3DNMSForwardCUDAKernelLauncher(const Tensor boxes, + unsigned long long* mask, int boxes_num, + float nms_overlap_thresh); + +void IoU3DNMSNormalForwardCUDAKernelLauncher(const Tensor boxes, + unsigned long long* mask, + int boxes_num, + float nms_overlap_thresh); + +void iou3d_boxes_overlap_bev_forward_cuda(const int num_a, const Tensor boxes_a, + const int num_b, const Tensor boxes_b, + Tensor ans_overlap) { + IoU3DBoxesOverlapBevForwardCUDAKernelLauncher(num_a, boxes_a, num_b, boxes_b, + ans_overlap); +}; + +void iou3d_boxes_iou_bev_forward_cuda(const int num_a, const Tensor boxes_a, + const int num_b, const Tensor boxes_b, + Tensor ans_iou) { + IoU3DBoxesIoUBevForwardCUDAKernelLauncher(num_a, boxes_a, num_b, boxes_b, + ans_iou); +}; + +void iou3d_nms_forward_cuda(const Tensor boxes, unsigned long long* mask, + int boxes_num, float nms_overlap_thresh) { + IoU3DNMSForwardCUDAKernelLauncher(boxes, mask, boxes_num, nms_overlap_thresh); +}; + +void iou3d_nms_normal_forward_cuda(const Tensor boxes, unsigned long long* mask, + int boxes_num, float nms_overlap_thresh) { + IoU3DNMSNormalForwardCUDAKernelLauncher(boxes, mask, boxes_num, + nms_overlap_thresh); +}; + +void iou3d_boxes_overlap_bev_forward_impl(const int num_a, const Tensor boxes_a, + const int num_b, const Tensor boxes_b, + Tensor ans_overlap); + +void iou3d_boxes_iou_bev_forward_impl(const int num_a, const Tensor boxes_a, + const int num_b, const Tensor boxes_b, + Tensor ans_iou); + +void iou3d_nms_forward_impl(const Tensor boxes, unsigned long long* mask, + int boxes_num, float nms_overlap_thresh); + +void iou3d_nms_normal_forward_impl(const Tensor boxes, unsigned long long* mask, + int boxes_num, float nms_overlap_thresh); + +REGISTER_DEVICE_IMPL(iou3d_boxes_overlap_bev_forward_impl, CUDA, + iou3d_boxes_overlap_bev_forward_cuda); +REGISTER_DEVICE_IMPL(iou3d_boxes_iou_bev_forward_impl, CUDA, + iou3d_boxes_iou_bev_forward_cuda); +REGISTER_DEVICE_IMPL(iou3d_nms_forward_impl, CUDA, iou3d_nms_forward_cuda); +REGISTER_DEVICE_IMPL(iou3d_nms_normal_forward_impl, CUDA, + iou3d_nms_normal_forward_cuda); + +void KNNForwardCUDAKernelLauncher(int b, int n, int m, int nsample, + const Tensor xyz, const Tensor new_xyz, + Tensor idx, Tensor dist2); + +void knn_forward_cuda(int b, int n, int m, int nsample, const Tensor xyz, + const Tensor new_xyz, Tensor idx, Tensor dist2) { + KNNForwardCUDAKernelLauncher(b, n, m, nsample, xyz, new_xyz, idx, dist2); +} + +void knn_forward_impl(int b, int n, int m, int nsample, const Tensor xyz, + const Tensor new_xyz, Tensor idx, Tensor dist2); +REGISTER_DEVICE_IMPL(knn_forward_impl, CUDA, knn_forward_cuda); + +void MaskedIm2colForwardCUDAKernelLauncher(const Tensor bottom_data, + const Tensor mask_h_idx, + const Tensor mask_w_idx, + Tensor top_data, const int kernel_h, + const int kernel_w, const int pad_h, + const int pad_w); + +void MaskedCol2imForwardCUDAKernelLauncher(const Tensor bottom_data, + const Tensor mask_h_idx, + const Tensor mask_w_idx, + Tensor top_data, const int height, + const int width, const int channels); + +void masked_im2col_forward_cuda(const Tensor im, const Tensor mask_h_idx, + const Tensor mask_w_idx, Tensor col, + const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w) { + // im: (n, ic, h, w), kernel size (kh, kw) + // kernel: (oc, ic * kh * kw), col: (kh * kw * ic, ow * oh) + MaskedIm2colForwardCUDAKernelLauncher(im, mask_h_idx, mask_w_idx, col, + kernel_h, kernel_w, pad_h, pad_w); +} + +void masked_col2im_forward_cuda(const Tensor col, const Tensor mask_h_idx, + const Tensor mask_w_idx, Tensor im, int height, + int width, int channels) { + // im: (n, ic, h, w), kernel size (kh, kw) + // kernel: (oc, ic * kh * kh), col: (kh * kw * ic, ow * oh) + MaskedCol2imForwardCUDAKernelLauncher(col, mask_h_idx, mask_w_idx, im, height, + width, channels); +} + +void masked_im2col_forward_impl(const Tensor im, const Tensor mask_h_idx, + const Tensor mask_w_idx, Tensor col, + const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w); + +void masked_col2im_forward_impl(const Tensor col, const Tensor mask_h_idx, + const Tensor mask_w_idx, Tensor im, int height, + int width, int channels); + +REGISTER_DEVICE_IMPL(masked_im2col_forward_impl, CUDA, + masked_im2col_forward_cuda); +REGISTER_DEVICE_IMPL(masked_col2im_forward_impl, CUDA, + masked_col2im_forward_cuda); + +void modulated_deformable_im2col_cuda( + const Tensor data_im, const Tensor data_offset, const Tensor data_mask, + const int batch_size, const int channels, const int height_im, + const int width_im, const int height_col, const int width_col, + const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, + const int stride_h, const int stride_w, const int dilation_h, + const int dilation_w, const int deformable_group, Tensor data_col); + +void modulated_deformable_col2im_cuda( + const Tensor data_col, const Tensor data_offset, const Tensor data_mask, + const int batch_size, const int channels, const int height_im, + const int width_im, const int height_col, const int width_col, + const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, + const int stride_h, const int stride_w, const int dilation_h, + const int dilation_w, const int deformable_group, Tensor grad_im); + +void modulated_deformable_col2im_coord_cuda( + const Tensor data_col, const Tensor data_im, const Tensor data_offset, + const Tensor data_mask, const int batch_size, const int channels, + const int height_im, const int width_im, const int height_col, + const int width_col, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, const int deformable_group, + Tensor grad_offset, Tensor grad_mask); + +void modulated_deformable_im2col_impl( + const Tensor data_im, const Tensor data_offset, const Tensor data_mask, + const int batch_size, const int channels, const int height_im, + const int width_im, const int height_col, const int width_col, + const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, + const int stride_h, const int stride_w, const int dilation_h, + const int dilation_w, const int deformable_group, Tensor data_col); + +void modulated_deformable_col2im_impl( + const Tensor data_col, const Tensor data_offset, const Tensor data_mask, + const int batch_size, const int channels, const int height_im, + const int width_im, const int height_col, const int width_col, + const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, + const int stride_h, const int stride_w, const int dilation_h, + const int dilation_w, const int deformable_group, Tensor grad_im); + +void modulated_deformable_col2im_coord_impl( + const Tensor data_col, const Tensor data_im, const Tensor data_offset, + const Tensor data_mask, const int batch_size, const int channels, + const int height_im, const int width_im, const int height_col, + const int width_col, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, const int deformable_group, + Tensor grad_offset, Tensor grad_mask); + +REGISTER_DEVICE_IMPL(modulated_deformable_im2col_impl, CUDA, + modulated_deformable_im2col_cuda); +REGISTER_DEVICE_IMPL(modulated_deformable_col2im_impl, CUDA, + modulated_deformable_col2im_cuda); +REGISTER_DEVICE_IMPL(modulated_deformable_col2im_coord_impl, CUDA, + modulated_deformable_col2im_coord_cuda); + +Tensor ms_deform_attn_cuda_forward(const Tensor& value, + const Tensor& spatial_shapes, + const Tensor& level_start_index, + const Tensor& sampling_loc, + const Tensor& attn_weight, + const int im2col_step); + +void ms_deform_attn_cuda_backward( + const Tensor& value, const Tensor& spatial_shapes, + const Tensor& level_start_index, const Tensor& sampling_loc, + const Tensor& attn_weight, const Tensor& grad_output, Tensor& grad_value, + Tensor& grad_sampling_loc, Tensor& grad_attn_weight, const int im2col_step); + +Tensor ms_deform_attn_impl_forward(const Tensor& value, + const Tensor& spatial_shapes, + const Tensor& level_start_index, + const Tensor& sampling_loc, + const Tensor& attn_weight, + const int im2col_step); + +void ms_deform_attn_impl_backward( + const Tensor& value, const Tensor& spatial_shapes, + const Tensor& level_start_index, const Tensor& sampling_loc, + const Tensor& attn_weight, const Tensor& grad_output, Tensor& grad_value, + Tensor& grad_sampling_loc, Tensor& grad_attn_weight, const int im2col_step); + +REGISTER_DEVICE_IMPL(ms_deform_attn_impl_forward, CUDA, + ms_deform_attn_cuda_forward); +REGISTER_DEVICE_IMPL(ms_deform_attn_impl_backward, CUDA, + ms_deform_attn_cuda_backward); + +Tensor NMSCUDAKernelLauncher(Tensor boxes, Tensor scores, float iou_threshold, + int offset); + +Tensor nms_cuda(Tensor boxes, Tensor scores, float iou_threshold, int offset) { + return NMSCUDAKernelLauncher(boxes, scores, iou_threshold, offset); +} + +Tensor nms_impl(Tensor boxes, Tensor scores, float iou_threshold, int offset); +REGISTER_DEVICE_IMPL(nms_impl, CUDA, nms_cuda); + +void PointsInBoxesPartForwardCUDAKernelLauncher(int batch_size, int boxes_num, + int pts_num, const Tensor boxes, + const Tensor pts, + Tensor box_idx_of_points); + +void PointsInBoxesAllForwardCUDAKernelLauncher(int batch_size, int boxes_num, + int pts_num, const Tensor boxes, + const Tensor pts, + Tensor box_idx_of_points); + +void points_in_boxes_part_forward_cuda(int batch_size, int boxes_num, + int pts_num, const Tensor boxes, + const Tensor pts, + Tensor box_idx_of_points) { + PointsInBoxesPartForwardCUDAKernelLauncher(batch_size, boxes_num, pts_num, + boxes, pts, box_idx_of_points); +}; + +void points_in_boxes_all_forward_cuda(int batch_size, int boxes_num, + int pts_num, const Tensor boxes, + const Tensor pts, + Tensor box_idx_of_points) { + PointsInBoxesAllForwardCUDAKernelLauncher(batch_size, boxes_num, pts_num, + boxes, pts, box_idx_of_points); +}; + +void points_in_boxes_part_forward_impl(int batch_size, int boxes_num, + int pts_num, const Tensor boxes, + const Tensor pts, + Tensor box_idx_of_points); + +void points_in_boxes_all_forward_impl(int batch_size, int boxes_num, + int pts_num, const Tensor boxes, + const Tensor pts, + Tensor box_idx_of_points); +REGISTER_DEVICE_IMPL(points_in_boxes_part_forward_impl, CUDA, + points_in_boxes_part_forward_cuda); +REGISTER_DEVICE_IMPL(points_in_boxes_all_forward_impl, CUDA, + points_in_boxes_all_forward_cuda); + +void PSAMaskForwardCUDAKernelLauncher(const int psa_type, const Tensor input, + Tensor output, const int num_, + const int h_feature, const int w_feature, + const int h_mask, const int w_mask, + const int half_h_mask, + const int half_w_mask); + +void PSAMaskBackwardCUDAKernelLauncher( + const int psa_type, const Tensor grad_output, Tensor grad_input, + const int num_, const int h_feature, const int w_feature, const int h_mask, + const int w_mask, const int half_h_mask, const int half_w_mask); + +void psamask_forward_cuda(const int psa_type, const Tensor input, Tensor output, + const int num_, const int h_feature, + const int w_feature, const int h_mask, + const int w_mask, const int half_h_mask, + const int half_w_mask) { + PSAMaskForwardCUDAKernelLauncher(psa_type, input, output, num_, h_feature, + w_feature, h_mask, w_mask, half_h_mask, + half_w_mask); +} + +void psamask_backward_cuda(const int psa_type, const Tensor grad_output, + Tensor grad_input, const int num_, + const int h_feature, const int w_feature, + const int h_mask, const int w_mask, + const int half_h_mask, const int half_w_mask) { + PSAMaskBackwardCUDAKernelLauncher(psa_type, grad_output, grad_input, num_, + h_feature, w_feature, h_mask, w_mask, + half_h_mask, half_w_mask); +} + +void psamask_forward_impl(const int psa_type, const Tensor input, Tensor output, + const int num_, const int h_feature, + const int w_feature, const int h_mask, + const int w_mask, const int half_h_mask, + const int half_w_mask); + +void psamask_backward_impl(const int psa_type, const Tensor grad_output, + Tensor grad_input, const int num_, + const int h_feature, const int w_feature, + const int h_mask, const int w_mask, + const int half_h_mask, const int half_w_mask); +REGISTER_DEVICE_IMPL(psamask_forward_impl, CUDA, psamask_forward_cuda); +REGISTER_DEVICE_IMPL(psamask_backward_impl, CUDA, psamask_backward_cuda); + +void ROIAlignForwardCUDAKernelLauncher(Tensor input, Tensor rois, Tensor output, + Tensor argmax_y, Tensor argmax_x, + int aligned_height, int aligned_width, + float spatial_scale, int sampling_ratio, + int pool_mode, bool aligned); + +void ROIAlignBackwardCUDAKernelLauncher(Tensor grad_output, Tensor rois, + Tensor argmax_y, Tensor argmax_x, + Tensor grad_input, int aligned_height, + int aligned_width, float spatial_scale, + int sampling_ratio, int pool_mode, + bool aligned); + +void roi_align_forward_cuda(Tensor input, Tensor rois, Tensor output, + Tensor argmax_y, Tensor argmax_x, + int aligned_height, int aligned_width, + float spatial_scale, int sampling_ratio, + int pool_mode, bool aligned) { + ROIAlignForwardCUDAKernelLauncher( + input, rois, output, argmax_y, argmax_x, aligned_height, aligned_width, + spatial_scale, sampling_ratio, pool_mode, aligned); +} + +void roi_align_backward_cuda(Tensor grad_output, Tensor rois, Tensor argmax_y, + Tensor argmax_x, Tensor grad_input, + int aligned_height, int aligned_width, + float spatial_scale, int sampling_ratio, + int pool_mode, bool aligned) { + ROIAlignBackwardCUDAKernelLauncher( + grad_output, rois, argmax_y, argmax_x, grad_input, aligned_height, + aligned_width, spatial_scale, sampling_ratio, pool_mode, aligned); +} + +void roi_align_forward_impl(Tensor input, Tensor rois, Tensor output, + Tensor argmax_y, Tensor argmax_x, + int aligned_height, int aligned_width, + float spatial_scale, int sampling_ratio, + int pool_mode, bool aligned); + +void roi_align_backward_impl(Tensor grad_output, Tensor rois, Tensor argmax_y, + Tensor argmax_x, Tensor grad_input, + int aligned_height, int aligned_width, + float spatial_scale, int sampling_ratio, + int pool_mode, bool aligned); + +REGISTER_DEVICE_IMPL(roi_align_forward_impl, CUDA, roi_align_forward_cuda); +REGISTER_DEVICE_IMPL(roi_align_backward_impl, CUDA, roi_align_backward_cuda); + +void ROIAlignRotatedForwardCUDAKernelLauncher( + const at::Tensor features, const at::Tensor rois, const float spatial_scale, + const int sample_num, const bool aligned, const bool clockwise, + const int channels, const int height, const int width, const int num_rois, + const int pooled_height, const int pooled_width, at::Tensor output); + +void ROIAlignRotatedBackwardCUDAKernelLauncher( + const at::Tensor top_grad, const at::Tensor rois, const float spatial_scale, + const int sample_num, const bool aligned, const bool clockwise, + const int channels, const int height, const int width, const int num_rois, + const int pooled_height, const int pooled_width, at::Tensor bottom_grad); + +void roi_align_rotated_forward_cuda(Tensor features, Tensor rois, Tensor output, + int aligned_height, int aligned_width, + float spatial_scale, int sample_ratio, + bool aligned, bool clockwise) { + // Number of ROIs + int num_rois = rois.size(0); + int size_rois = rois.size(1); + + if (size_rois != 6) { + AT_ERROR("wrong roi size"); + } + + int num_channels = features.size(1); + int data_height = features.size(2); + int data_width = features.size(3); + ROIAlignRotatedForwardCUDAKernelLauncher( + features, rois, spatial_scale, sample_ratio, aligned, clockwise, + num_channels, data_height, data_width, num_rois, aligned_height, + aligned_width, output); +} + +void roi_align_rotated_backward_cuda(Tensor top_grad, Tensor rois, + Tensor bottom_grad, int aligned_height, + int aligned_width, float spatial_scale, + int sample_ratio, bool aligned, + bool clockwise) { + // Number of ROIs + int num_rois = rois.size(0); + int size_rois = rois.size(1); + if (size_rois != 6) { + AT_ERROR("wrong roi size"); + } + + int num_channels = bottom_grad.size(1); + int data_height = bottom_grad.size(2); + int data_width = bottom_grad.size(3); + ROIAlignRotatedBackwardCUDAKernelLauncher( + top_grad, rois, spatial_scale, sample_ratio, aligned, clockwise, + num_channels, data_height, data_width, num_rois, aligned_height, + aligned_width, bottom_grad); +} + +void roi_align_rotated_forward_impl(Tensor features, Tensor rois, Tensor output, + int aligned_height, int aligned_width, + float spatial_scale, int sample_ratio, + bool aligned, bool clockwise); + +void roi_align_rotated_backward_impl(Tensor top_grad, Tensor rois, + Tensor bottom_grad, int aligned_height, + int aligned_width, float spatial_scale, + int sample_ratio, bool aligned, + bool clockwise); +REGISTER_DEVICE_IMPL(roi_align_rotated_forward_impl, CUDA, + roi_align_rotated_forward_cuda); +REGISTER_DEVICE_IMPL(roi_align_rotated_backward_impl, CUDA, + roi_align_rotated_backward_cuda); + +void RoiawarePool3dForwardCUDAKernelLauncher( + int boxes_num, int pts_num, int channels, int max_pts_each_voxel, int out_x, + int out_y, int out_z, const Tensor rois, const Tensor pts, + const Tensor pts_feature, Tensor argmax, Tensor pts_idx_of_voxels, + Tensor pooled_features, int pool_method); + +void RoiawarePool3dBackwardCUDAKernelLauncher( + int boxes_num, int out_x, int out_y, int out_z, int channels, + int max_pts_each_voxel, const Tensor pts_idx_of_voxels, const Tensor argmax, + const Tensor grad_out, Tensor grad_in, int pool_method); + +void roiaware_pool3d_forward_cuda(int boxes_num, int pts_num, int channels, + int max_pts_each_voxel, int out_x, int out_y, + int out_z, const Tensor rois, + const Tensor pts, const Tensor pts_feature, + Tensor argmax, Tensor pts_idx_of_voxels, + Tensor pooled_features, int pool_method) { + RoiawarePool3dForwardCUDAKernelLauncher( + boxes_num, pts_num, channels, max_pts_each_voxel, out_x, out_y, out_z, + rois, pts, pts_feature, argmax, pts_idx_of_voxels, pooled_features, + pool_method); +}; + +void roiaware_pool3d_backward_cuda(int boxes_num, int out_x, int out_y, + int out_z, int channels, + int max_pts_each_voxel, + const Tensor pts_idx_of_voxels, + const Tensor argmax, const Tensor grad_out, + Tensor grad_in, int pool_method) { + RoiawarePool3dBackwardCUDAKernelLauncher( + boxes_num, out_x, out_y, out_z, channels, max_pts_each_voxel, + pts_idx_of_voxels, argmax, grad_out, grad_in, pool_method); +}; + +void roiaware_pool3d_forward_impl(int boxes_num, int pts_num, int channels, + int max_pts_each_voxel, int out_x, int out_y, + int out_z, const Tensor rois, + const Tensor pts, const Tensor pts_feature, + Tensor argmax, Tensor pts_idx_of_voxels, + Tensor pooled_features, int pool_method); + +void roiaware_pool3d_backward_impl(int boxes_num, int out_x, int out_y, + int out_z, int channels, + int max_pts_each_voxel, + const Tensor pts_idx_of_voxels, + const Tensor argmax, const Tensor grad_out, + Tensor grad_in, int pool_method); + +REGISTER_DEVICE_IMPL(roiaware_pool3d_forward_impl, CUDA, + roiaware_pool3d_forward_cuda); +REGISTER_DEVICE_IMPL(roiaware_pool3d_backward_impl, CUDA, + roiaware_pool3d_backward_cuda); + +void RoIPointPool3dForwardCUDAKernelLauncher( + int batch_size, int pts_num, int boxes_num, int feature_in_len, + int sampled_pts_num, const Tensor xyz, const Tensor boxes3d, + const Tensor pts_feature, Tensor pooled_features, Tensor pooled_empty_flag); + +void roipoint_pool3d_forward_cuda(int batch_size, int pts_num, int boxes_num, + int feature_in_len, int sampled_pts_num, + const Tensor xyz, const Tensor boxes3d, + const Tensor pts_feature, + Tensor pooled_features, + Tensor pooled_empty_flag) { + RoIPointPool3dForwardCUDAKernelLauncher( + batch_size, pts_num, boxes_num, feature_in_len, sampled_pts_num, xyz, + boxes3d, pts_feature, pooled_features, pooled_empty_flag); +}; + +void roipoint_pool3d_forward_impl(int batch_size, int pts_num, int boxes_num, + int feature_in_len, int sampled_pts_num, + const Tensor xyz, const Tensor boxes3d, + const Tensor pts_feature, + Tensor pooled_features, + Tensor pooled_empty_flag); +REGISTER_DEVICE_IMPL(roipoint_pool3d_forward_impl, CUDA, + roipoint_pool3d_forward_cuda); + +void ROIPoolForwardCUDAKernelLauncher(Tensor input, Tensor rois, Tensor output, + Tensor argmax, int pooled_height, + int pooled_width, float spatial_scale); + +void ROIPoolBackwardCUDAKernelLauncher(Tensor grad_output, Tensor rois, + Tensor argmax, Tensor grad_input, + int pooled_height, int pooled_width, + float spatial_scale); + +void roi_pool_forward_cuda(Tensor input, Tensor rois, Tensor output, + Tensor argmax, int pooled_height, int pooled_width, + float spatial_scale) { + ROIPoolForwardCUDAKernelLauncher(input, rois, output, argmax, pooled_height, + pooled_width, spatial_scale); +} + +void roi_pool_backward_cuda(Tensor grad_output, Tensor rois, Tensor argmax, + Tensor grad_input, int pooled_height, + int pooled_width, float spatial_scale) { + ROIPoolBackwardCUDAKernelLauncher(grad_output, rois, argmax, grad_input, + pooled_height, pooled_width, spatial_scale); +} + +void roi_pool_forward_impl(Tensor input, Tensor rois, Tensor output, + Tensor argmax, int pooled_height, int pooled_width, + float spatial_scale); +void roi_pool_backward_impl(Tensor grad_output, Tensor rois, Tensor argmax, + Tensor grad_input, int pooled_height, + int pooled_width, float spatial_scale); +REGISTER_DEVICE_IMPL(roi_pool_forward_impl, CUDA, roi_pool_forward_cuda); +REGISTER_DEVICE_IMPL(roi_pool_backward_impl, CUDA, roi_pool_backward_cuda); + +typedef enum { SUM = 0, MEAN = 1, MAX = 2 } reduce_t; + +std::vector DynamicPointToVoxelForwardCUDAKernelLauncher( + const at::Tensor& feats, const at::Tensor& coors, + const reduce_t reduce_type); + +void DynamicPointToVoxelBackwardCUDAKernelLauncher( + at::Tensor& grad_feats, const at::Tensor& grad_reduced_feats, + const at::Tensor& feats, const at::Tensor& reduced_feats, + const at::Tensor& coors_map, const at::Tensor& reduce_count, + const reduce_t reduce_type); + +std::vector dynamic_point_to_voxel_forward_cuda( + const torch::Tensor& feats, const torch::Tensor& coors, + const reduce_t reduce_type) { + return DynamicPointToVoxelForwardCUDAKernelLauncher(feats, coors, + reduce_type); +}; + +void dynamic_point_to_voxel_backward_cuda( + torch::Tensor& grad_feats, const torch::Tensor& grad_reduced_feats, + const torch::Tensor& feats, const torch::Tensor& reduced_feats, + const torch::Tensor& coors_idx, const torch::Tensor& reduce_count, + const reduce_t reduce_type) { + DynamicPointToVoxelBackwardCUDAKernelLauncher(grad_feats, grad_reduced_feats, + feats, reduced_feats, coors_idx, + reduce_count, reduce_type); +}; + +std::vector dynamic_point_to_voxel_forward_impl( + const torch::Tensor& feats, const torch::Tensor& coors, + const reduce_t reduce_type); + +void dynamic_point_to_voxel_backward_impl( + torch::Tensor& grad_feats, const torch::Tensor& grad_reduced_feats, + const torch::Tensor& feats, const torch::Tensor& reduced_feats, + const torch::Tensor& coors_idx, const torch::Tensor& reduce_count, + const reduce_t reduce_type); + +REGISTER_DEVICE_IMPL(dynamic_point_to_voxel_forward_impl, CUDA, + dynamic_point_to_voxel_forward_cuda); +REGISTER_DEVICE_IMPL(dynamic_point_to_voxel_backward_impl, CUDA, + dynamic_point_to_voxel_backward_cuda); + +void SyncBNForwardMeanCUDAKernelLauncher(const Tensor input, Tensor mean); + +void SyncBNForwardVarCUDAKernelLauncher(const Tensor input, const Tensor mean, + Tensor var); + +void SyncBNForwardOutputCUDAKernelLauncher( + const Tensor input, const Tensor mean, const Tensor var, + Tensor running_mean, Tensor running_var, const Tensor weight, + const Tensor bias, Tensor norm, Tensor std, Tensor output, float eps, + float momentum, int group_size); + +void SyncBNBackwardParamCUDAKernelLauncher(const Tensor grad_output, + const Tensor norm, + Tensor grad_weight, + Tensor grad_bias); + +void SyncBNBackwardDataCUDAKernelLauncher(const Tensor grad_output, + const Tensor weight, + const Tensor grad_weight, + const Tensor grad_bias, + const Tensor norm, const Tensor std, + Tensor grad_input); + +void sync_bn_forward_mean_cuda(const Tensor input, Tensor mean) { + SyncBNForwardMeanCUDAKernelLauncher(input, mean); +} + +void sync_bn_forward_var_cuda(const Tensor input, const Tensor mean, + Tensor var) { + SyncBNForwardVarCUDAKernelLauncher(input, mean, var); +} + +void sync_bn_forward_output_cuda(const Tensor input, const Tensor mean, + const Tensor var, Tensor running_mean, + Tensor running_var, const Tensor weight, + const Tensor bias, Tensor norm, Tensor std, + Tensor output, float eps, float momentum, + int group_size) { + SyncBNForwardOutputCUDAKernelLauncher(input, mean, var, running_mean, + running_var, weight, bias, norm, std, + output, eps, momentum, group_size); +} + +void sync_bn_backward_param_cuda(const Tensor grad_output, const Tensor norm, + Tensor grad_weight, Tensor grad_bias) { + SyncBNBackwardParamCUDAKernelLauncher(grad_output, norm, grad_weight, + grad_bias); +} + +void sync_bn_backward_data_cuda(const Tensor grad_output, const Tensor weight, + const Tensor grad_weight, + const Tensor grad_bias, const Tensor norm, + const Tensor std, Tensor grad_input) { + SyncBNBackwardDataCUDAKernelLauncher(grad_output, weight, grad_weight, + grad_bias, norm, std, grad_input); +} + +void sync_bn_forward_mean_impl(const Tensor input, Tensor mean); + +void sync_bn_forward_var_impl(const Tensor input, const Tensor mean, + Tensor var); + +void sync_bn_forward_output_impl(const Tensor input, const Tensor mean, + const Tensor var, Tensor running_mean, + Tensor running_var, const Tensor weight, + const Tensor bias, Tensor norm, Tensor std, + Tensor output, float eps, float momentum, + int group_size); + +void sync_bn_backward_param_impl(const Tensor grad_output, const Tensor norm, + Tensor grad_weight, Tensor grad_bias); + +void sync_bn_backward_data_impl(const Tensor grad_output, const Tensor weight, + const Tensor grad_weight, + const Tensor grad_bias, const Tensor norm, + const Tensor std, Tensor grad_input); + +REGISTER_DEVICE_IMPL(sync_bn_forward_mean_impl, CUDA, + sync_bn_forward_mean_cuda); +REGISTER_DEVICE_IMPL(sync_bn_forward_var_impl, CUDA, sync_bn_forward_var_cuda); +REGISTER_DEVICE_IMPL(sync_bn_forward_output_impl, CUDA, + sync_bn_forward_output_cuda); +REGISTER_DEVICE_IMPL(sync_bn_backward_param_impl, CUDA, + sync_bn_backward_param_cuda); +REGISTER_DEVICE_IMPL(sync_bn_backward_data_impl, CUDA, + sync_bn_backward_data_cuda); + +void ThreeInterpolateForwardCUDAKernelLauncher(int b, int c, int m, int n, + const Tensor points, + const Tensor idx, + const Tensor weight, Tensor out); + +void ThreeInterpolateBackwardCUDAKernelLauncher(int b, int c, int n, int m, + const Tensor grad_out, + const Tensor idx, + const Tensor weight, + Tensor grad_points); + +void three_interpolate_forward_cuda(int b, int c, int m, int n, + const Tensor points, const Tensor idx, + const Tensor weight, Tensor out) { + ThreeInterpolateForwardCUDAKernelLauncher(b, c, m, n, points, idx, weight, + out); +}; + +void three_interpolate_backward_cuda(int b, int c, int n, int m, + const Tensor grad_out, const Tensor idx, + const Tensor weight, Tensor grad_points) { + ThreeInterpolateBackwardCUDAKernelLauncher(b, c, n, m, grad_out, idx, weight, + grad_points); +}; + +void three_interpolate_forward_impl(int b, int c, int m, int n, + const Tensor points, const Tensor idx, + const Tensor weight, Tensor out); + +void three_interpolate_backward_impl(int b, int c, int n, int m, + const Tensor grad_out, const Tensor idx, + const Tensor weight, Tensor grad_points); +REGISTER_DEVICE_IMPL(three_interpolate_forward_impl, CUDA, + three_interpolate_forward_cuda); +REGISTER_DEVICE_IMPL(three_interpolate_backward_impl, CUDA, + three_interpolate_backward_cuda); + +void ThreeNNForwardCUDAKernelLauncher(int b, int n, int m, const Tensor unknown, + const Tensor known, Tensor dist2, + Tensor idx); + +void three_nn_forward_cuda(int b, int n, int m, const Tensor unknown, + const Tensor known, Tensor dist2, Tensor idx) { + ThreeNNForwardCUDAKernelLauncher(b, n, m, unknown, known, dist2, idx); +}; + +void three_nn_forward_impl(int b, int n, int m, const Tensor unknown, + const Tensor known, Tensor dist2, Tensor idx); +REGISTER_DEVICE_IMPL(three_nn_forward_impl, CUDA, three_nn_forward_cuda); + +void TINShiftForwardCUDAKernelLauncher(Tensor input, Tensor shift, + Tensor output); + +void TINShiftBackwardCUDAKernelLauncher(Tensor grad_output, Tensor shift, + Tensor grad_input); + +void tin_shift_forward_cuda(Tensor input, Tensor shift, Tensor output) { + TINShiftForwardCUDAKernelLauncher(input, shift, output); +} + +void tin_shift_backward_cuda(Tensor grad_output, Tensor shift, + Tensor grad_input) { + TINShiftBackwardCUDAKernelLauncher(grad_output, shift, grad_input); +} + +void tin_shift_forward_impl(Tensor input, Tensor shift, Tensor output); +void tin_shift_backward_impl(Tensor grad_output, Tensor shift, + Tensor grad_input); +REGISTER_DEVICE_IMPL(tin_shift_forward_impl, CUDA, tin_shift_forward_cuda); +REGISTER_DEVICE_IMPL(tin_shift_backward_impl, CUDA, tin_shift_backward_cuda); + +torch::Tensor upfirdn2d_op(const torch::Tensor& input, + const torch::Tensor& kernel, int up_x, int up_y, + int down_x, int down_y, int pad_x0, int pad_x1, + int pad_y0, int pad_y1); + +torch::Tensor upfirdn2d_op_impl(const torch::Tensor& input, + const torch::Tensor& kernel, int up_x, int up_y, + int down_x, int down_y, int pad_x0, int pad_x1, + int pad_y0, int pad_y1); +REGISTER_DEVICE_IMPL(upfirdn2d_op_impl, CUDA, upfirdn2d_op); + +int HardVoxelizeForwardCUDAKernelLauncher( + const at::Tensor& points, at::Tensor& voxels, at::Tensor& coors, + at::Tensor& num_points_per_voxel, const std::vector voxel_size, + const std::vector coors_range, const int max_points, + const int max_voxels, const int NDim = 3); + +void DynamicVoxelizeForwardCUDAKernelLauncher( + const at::Tensor& points, at::Tensor& coors, + const std::vector voxel_size, const std::vector coors_range, + const int NDim = 3); + +int hard_voxelize_forward_cuda(const at::Tensor& points, at::Tensor& voxels, + at::Tensor& coors, + at::Tensor& num_points_per_voxel, + const std::vector voxel_size, + const std::vector coors_range, + const int max_points, const int max_voxels, + const int NDim) { + return HardVoxelizeForwardCUDAKernelLauncher( + points, voxels, coors, num_points_per_voxel, voxel_size, coors_range, + max_points, max_voxels, NDim); +}; + +void dynamic_voxelize_forward_cuda(const at::Tensor& points, at::Tensor& coors, + const std::vector voxel_size, + const std::vector coors_range, + const int NDim) { + DynamicVoxelizeForwardCUDAKernelLauncher(points, coors, voxel_size, + coors_range, NDim); +}; + +int hard_voxelize_forward_impl(const at::Tensor& points, at::Tensor& voxels, + at::Tensor& coors, + at::Tensor& num_points_per_voxel, + const std::vector voxel_size, + const std::vector coors_range, + const int max_points, const int max_voxels, + const int NDim); + +void dynamic_voxelize_forward_impl(const at::Tensor& points, at::Tensor& coors, + const std::vector voxel_size, + const std::vector coors_range, + const int NDim); +REGISTER_DEVICE_IMPL(hard_voxelize_forward_impl, CUDA, + hard_voxelize_forward_cuda); +REGISTER_DEVICE_IMPL(dynamic_voxelize_forward_impl, CUDA, + dynamic_voxelize_forward_cuda); diff --git a/mmcv/ops/csrc/pytorch/cuda/deform_conv_cuda.cu b/mmcv/ops/csrc/pytorch/cuda/deform_conv_cuda.cu new file mode 100644 index 0000000..05fc08b --- /dev/null +++ b/mmcv/ops/csrc/pytorch/cuda/deform_conv_cuda.cu @@ -0,0 +1,105 @@ +// Copyright (c) OpenMMLab. All rights reserved +#include "deform_conv_cuda_kernel.cuh" +#include "pytorch_cuda_helper.hpp" + +void deformable_im2col_cuda(Tensor data_im, Tensor data_offset, + const int channels, const int height, + const int width, const int ksize_h, + const int ksize_w, const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int parallel_imgs, const int deformable_group, + Tensor data_col) { + // num_axes should be smaller than block size + // todo: check parallel_imgs is correctly passed in + int height_col = + (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; + int width_col = + (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; + int num_kernels = channels * height_col * width_col * parallel_imgs; + int channel_per_deformable_group = channels / deformable_group; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_im.scalar_type(), "deformable_im2col_gpu", ([&] { + const scalar_t *data_im_ = data_im.data_ptr(); + const scalar_t *data_offset_ = data_offset.data_ptr(); + scalar_t *data_col_ = data_col.data_ptr(); + + deformable_im2col_gpu_kernel<<>>( + num_kernels, data_im_, data_offset_, height, width, ksize_h, + ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, + channel_per_deformable_group, parallel_imgs, channels, + deformable_group, height_col, width_col, data_col_); + })); + AT_CUDA_CHECK(cudaGetLastError()); +} + +void deformable_col2im_cuda(Tensor data_col, Tensor data_offset, + const int channels, const int height, + const int width, const int ksize_h, + const int ksize_w, const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int parallel_imgs, const int deformable_group, + Tensor grad_im) { + // todo: make sure parallel_imgs is passed in correctly + int height_col = + (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; + int width_col = + (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; + int num_kernels = + channels * ksize_h * ksize_w * height_col * width_col * parallel_imgs; + int channel_per_deformable_group = channels / deformable_group; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_col.scalar_type(), "deformable_col2im_gpu", ([&] { + const scalar_t *data_col_ = data_col.data_ptr(); + const scalar_t *data_offset_ = data_offset.data_ptr(); + scalar_t *grad_im_ = grad_im.data_ptr(); + + deformable_col2im_gpu_kernel<<>>( + num_kernels, data_col_, data_offset_, channels, height, width, + ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, + dilation_w, channel_per_deformable_group, parallel_imgs, + deformable_group, height_col, width_col, grad_im_); + })); + AT_CUDA_CHECK(cudaGetLastError()); +} + +void deformable_col2im_coord_cuda( + Tensor data_col, Tensor data_im, Tensor data_offset, const int channels, + const int height, const int width, const int ksize_h, const int ksize_w, + const int pad_h, const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, const int parallel_imgs, + const int deformable_group, Tensor grad_offset) { + int height_col = + (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; + int width_col = + (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; + int num_kernels = height_col * width_col * 2 * ksize_h * ksize_w * + deformable_group * parallel_imgs; + int channel_per_deformable_group = + channels * ksize_h * ksize_w / deformable_group; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_col.scalar_type(), "deformable_col2im_coord_gpu", ([&] { + const scalar_t *data_col_ = data_col.data_ptr(); + const scalar_t *data_im_ = data_im.data_ptr(); + const scalar_t *data_offset_ = data_offset.data_ptr(); + scalar_t *grad_offset_ = grad_offset.data_ptr(); + + deformable_col2im_coord_gpu_kernel<<< + GET_BLOCKS(num_kernels), THREADS_PER_BLOCK, 0, + at::cuda::getCurrentCUDAStream()>>>( + num_kernels, data_col_, data_im_, data_offset_, channels, height, + width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, + dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, + 2 * ksize_h * ksize_w * deformable_group, deformable_group, + height_col, width_col, grad_offset_); + })); + AT_CUDA_CHECK(cudaGetLastError()); +} diff --git a/mmcv/ops/csrc/pytorch/cuda/deform_roi_pool_cuda.cu b/mmcv/ops/csrc/pytorch/cuda/deform_roi_pool_cuda.cu new file mode 100644 index 0000000..d443998 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/cuda/deform_roi_pool_cuda.cu @@ -0,0 +1,55 @@ +// Copyright (c) OpenMMLab. All rights reserved +#include "deform_roi_pool_cuda_kernel.cuh" +#include "pytorch_cuda_helper.hpp" + +void DeformRoIPoolForwardCUDAKernelLauncher(Tensor input, Tensor rois, + Tensor offset, Tensor output, + int pooled_height, int pooled_width, + float spatial_scale, + int sampling_ratio, float gamma) { + int output_size = output.numel(); + int channels = input.size(1); + int height = input.size(2); + int width = input.size(3); + + at::cuda::CUDAGuard device_guard(input.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + input.scalar_type(), "deform_roi_pool_forward_cuda_kernel", [&] { + deform_roi_pool_forward_cuda_kernel + <<>>( + output_size, input.data_ptr(), + rois.data_ptr(), offset.data_ptr(), + output.data_ptr(), pooled_height, pooled_width, + static_cast(spatial_scale), sampling_ratio, + static_cast(gamma), channels, height, width); + }); + + AT_CUDA_CHECK(cudaGetLastError()); +} + +void DeformRoIPoolBackwardCUDAKernelLauncher( + Tensor grad_output, Tensor input, Tensor rois, Tensor offset, + Tensor grad_input, Tensor grad_offset, int pooled_height, int pooled_width, + float spatial_scale, int sampling_ratio, float gamma) { + int output_size = grad_output.numel(); + int channels = grad_input.size(1); + int height = grad_input.size(2); + int width = grad_input.size(3); + + at::cuda::CUDAGuard device_guard(grad_output.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + grad_output.scalar_type(), "deform_roi_pool_backward_cuda_kernel", [&] { + deform_roi_pool_backward_cuda_kernel + <<>>( + output_size, grad_output.data_ptr(), + input.data_ptr(), rois.data_ptr(), + offset.data_ptr(), grad_input.data_ptr(), + grad_offset.data_ptr(), pooled_height, pooled_width, + static_cast(spatial_scale), sampling_ratio, + static_cast(gamma), channels, height, width); + }); + + AT_CUDA_CHECK(cudaGetLastError()); +} diff --git a/mmcv/ops/csrc/pytorch/cuda/focal_loss_cuda.cu b/mmcv/ops/csrc/pytorch/cuda/focal_loss_cuda.cu new file mode 100644 index 0000000..cb899f9 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/cuda/focal_loss_cuda.cu @@ -0,0 +1,111 @@ +// Copyright (c) OpenMMLab. All rights reserved +#include "pytorch_cuda_helper.hpp" +#include "sigmoid_focal_loss_cuda_kernel.cuh" +#include "softmax_focal_loss_cuda_kernel.cuh" + +void SigmoidFocalLossForwardCUDAKernelLauncher(Tensor input, Tensor target, + Tensor weight, Tensor output, + const float gamma, + const float alpha) { + int output_size = output.numel(); + int num_classes = input.size(1); + AT_ASSERTM(target.max().item() <= (int64_t)num_classes, + "target label should smaller or equal than num classes"); + at::cuda::CUDAGuard device_guard(input.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + input.scalar_type(), "sigmoid_focal_loss_forward_cuda_kernel", [&] { + sigmoid_focal_loss_forward_cuda_kernel + <<>>( + output_size, input.data_ptr(), + target.data_ptr(), weight.data_ptr(), + output.data_ptr(), gamma, alpha, num_classes); + }); + + AT_CUDA_CHECK(cudaGetLastError()); +} + +void SigmoidFocalLossBackwardCUDAKernelLauncher(Tensor input, Tensor target, + Tensor weight, + Tensor grad_input, + const float gamma, + const float alpha) { + int output_size = grad_input.numel(); + int num_classes = input.size(1); + + at::cuda::CUDAGuard device_guard(grad_input.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + input.scalar_type(), "sigmoid_focal_loss_backward_cuda_kernel", [&] { + sigmoid_focal_loss_backward_cuda_kernel + <<>>( + output_size, input.data_ptr(), + target.data_ptr(), weight.data_ptr(), + grad_input.data_ptr(), gamma, alpha, num_classes); + }); + + AT_CUDA_CHECK(cudaGetLastError()); +} + +void SoftmaxFocalLossForwardCUDAKernelLauncher(Tensor softmax, Tensor target, + Tensor weight, Tensor output, + const float gamma, + const float alpha) { + int output_size = output.numel(); + int num_classes = softmax.size(1); + + AT_ASSERTM(target.max().item() <= (int64_t)num_classes, + "target label should smaller or equal than num classes"); + at::cuda::CUDAGuard device_guard(softmax.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + softmax.scalar_type(), "softmax_focal_loss_forward_cuda_kernel", [&] { + softmax_focal_loss_forward_cuda_kernel + <<>>( + output_size, softmax.data_ptr(), + target.data_ptr(), weight.data_ptr(), + output.data_ptr(), gamma, alpha, num_classes); + }); + + AT_CUDA_CHECK(cudaGetLastError()); +} + +void SoftmaxFocalLossBackwardCUDAKernelLauncher(Tensor softmax, Tensor target, + Tensor weight, Tensor buff, + Tensor grad_input, + const float gamma, + const float alpha) { + int num_classes = softmax.size(1); + + int output_size = buff.numel(); + at::cuda::CUDAGuard device_guard(grad_input.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + grad_input.scalar_type(), + "softmax_focal_loss_backward_cuda1_" + "kernel", + [&] { + softmax_focal_loss_backward_cuda1_kernel + <<>>( + output_size, softmax.data_ptr(), + target.data_ptr(), weight.data_ptr(), + buff.data_ptr(), gamma, alpha, num_classes); + }); + + AT_CUDA_CHECK(cudaGetLastError()); + + output_size = grad_input.numel(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + grad_input.scalar_type(), + "softmax_focal_loss_backward_cuda2_" + "kernel", + [&] { + softmax_focal_loss_backward_cuda2_kernel + <<>>( + output_size, softmax.data_ptr(), + target.data_ptr(), buff.data_ptr(), + grad_input.data_ptr(), num_classes); + }); + + AT_CUDA_CHECK(cudaGetLastError()); +} diff --git a/mmcv/ops/csrc/pytorch/cuda/furthest_point_sample_cuda.cu b/mmcv/ops/csrc/pytorch/cuda/furthest_point_sample_cuda.cu new file mode 100644 index 0000000..cfb4cd3 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/cuda/furthest_point_sample_cuda.cu @@ -0,0 +1,143 @@ +// Modified from +// https://github.com/sshaoshuai/Pointnet2.PyTorch/tree/master/pointnet2/src/sampling_gpu.cu + +#include +#include + +#include "furthest_point_sample_cuda_kernel.cuh" +#include "pytorch_cuda_helper.hpp" + +inline int opt_n_threads(int work_size) { + const int pow_2 = std::log(static_cast(work_size)) / std::log(2.0); + + return max(min(1 << pow_2, 1024), 1); +} + +void FurthestPointSamplingForwardCUDAKernelLauncher(int b, int n, int m, + const float* dataset, + float* temp, int* idxs) { + // dataset: (B, N, 3) + // tmp: (B, N) + // output: + // idx: (B, M) + + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + unsigned int n_threads = opt_n_threads(n); + + switch (n_threads) { + case 1024: + furthest_point_sampling_forward_cuda_kernel<1024> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 512: + furthest_point_sampling_forward_cuda_kernel<512> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 256: + furthest_point_sampling_forward_cuda_kernel<256> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 128: + furthest_point_sampling_forward_cuda_kernel<128> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 64: + furthest_point_sampling_forward_cuda_kernel<64> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 32: + furthest_point_sampling_forward_cuda_kernel<32> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 16: + furthest_point_sampling_forward_cuda_kernel<16> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 8: + furthest_point_sampling_forward_cuda_kernel<8> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 4: + furthest_point_sampling_forward_cuda_kernel<4> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 2: + furthest_point_sampling_forward_cuda_kernel<2> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 1: + furthest_point_sampling_forward_cuda_kernel<1> + <<>>(b, n, m, dataset, temp, idxs); + break; + default: + furthest_point_sampling_forward_cuda_kernel<512> + <<>>(b, n, m, dataset, temp, idxs); + } + + AT_CUDA_CHECK(cudaGetLastError()); +} + +void FurthestPointSamplingWithDistForwardCUDAKernelLauncher( + int b, int n, int m, const float* dataset, float* temp, int* idxs) { + // dataset: (B, N, N) + // temp: (B, N) + // output: + // idx: (B, M) + + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + unsigned int n_threads = opt_n_threads(n); + + switch (n_threads) { + case 1024: + furthest_point_sampling_with_dist_forward_cuda_kernel<1024> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 512: + furthest_point_sampling_with_dist_forward_cuda_kernel<512> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 256: + furthest_point_sampling_with_dist_forward_cuda_kernel<256> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 128: + furthest_point_sampling_with_dist_forward_cuda_kernel<128> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 64: + furthest_point_sampling_with_dist_forward_cuda_kernel<64> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 32: + furthest_point_sampling_with_dist_forward_cuda_kernel<32> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 16: + furthest_point_sampling_with_dist_forward_cuda_kernel<16> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 8: + furthest_point_sampling_with_dist_forward_cuda_kernel<8> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 4: + furthest_point_sampling_with_dist_forward_cuda_kernel<4> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 2: + furthest_point_sampling_with_dist_forward_cuda_kernel<2> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 1: + furthest_point_sampling_with_dist_forward_cuda_kernel<1> + <<>>(b, n, m, dataset, temp, idxs); + break; + default: + furthest_point_sampling_with_dist_forward_cuda_kernel<512> + <<>>(b, n, m, dataset, temp, idxs); + } + + AT_CUDA_CHECK(cudaGetLastError()); +} diff --git a/mmcv/ops/csrc/pytorch/cuda/fused_bias_leakyrelu_cuda.cu b/mmcv/ops/csrc/pytorch/cuda/fused_bias_leakyrelu_cuda.cu new file mode 100644 index 0000000..911ea01 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/cuda/fused_bias_leakyrelu_cuda.cu @@ -0,0 +1,109 @@ +// Modified from +// https://github.com/rosinality/stylegan2-pytorch/blob/master/op/fused_bias_act_kernel.cu +// Copyright (c) 2019, NVIDIA Corporation. All rights reserved. +// +// This work is made available under the Nvidia Source Code License-NC. +// To view a copy of this license, visit +// https://nvlabs.github.io/stylegan2/license.html + +#include +#include +#include +#include +#include +#include + +#include + +template +static __global__ void fused_bias_act_kernel( + scalar_t* out, const scalar_t* p_x, const scalar_t* p_b, + const scalar_t* p_ref, int act, int grad, scalar_t alpha, scalar_t scale, + int loop_x, int size_x, int step_b, int size_b, int use_bias, int use_ref) { + int xi = blockIdx.x * loop_x * blockDim.x + threadIdx.x; + + scalar_t zero = 0.0; + + for (int loop_idx = 0; loop_idx < loop_x && xi < size_x; + loop_idx++, xi += blockDim.x) { + scalar_t x = p_x[xi]; + + if (use_bias) { + x += p_b[(xi / step_b) % size_b]; + } + + scalar_t ref = use_ref ? p_ref[xi] : zero; + + scalar_t y; + + // act = 1: linear layer + // act = 3: leaky relu layer + // grad = 0: direct forward path + // grad = 1: first order deviation + // grad = 2: second order deviation + switch (act * 10 + grad) { + default: + case 10: + y = x; + break; + case 11: + y = x; + break; + case 12: + y = 0.0; + break; + + case 30: + y = (x > 0.0) ? x : x * alpha; + break; + case 31: + y = (ref > 0.0) ? x : x * alpha; + break; + case 32: + y = 0.0; + break; + } + + out[xi] = y * scale; + } +} + +torch::Tensor fused_bias_leakyrelu_op(const torch::Tensor& input, + const torch::Tensor& bias, + const torch::Tensor& refer, int act, + int grad, float alpha, float scale) { + int curDevice = -1; + cudaGetDevice(&curDevice); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice); + + auto x = input.contiguous(); + auto b = bias.contiguous(); + auto ref = refer.contiguous(); + + int use_bias = b.numel() ? 1 : 0; + int use_ref = ref.numel() ? 1 : 0; + + int size_x = x.numel(); + int size_b = b.numel(); + int step_b = 1; + + for (int i = 1 + 1; i < x.dim(); i++) { + step_b *= x.size(i); + } + + int loop_x = 4; + int block_size = 4 * 32; + int grid_size = (size_x - 1) / (loop_x * block_size) + 1; + + auto y = torch::empty_like(x); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + x.scalar_type(), "fused_bias_act_kernel", [&] { + fused_bias_act_kernel<<>>( + y.data_ptr(), x.data_ptr(), + b.data_ptr(), ref.data_ptr(), act, grad, alpha, + scale, loop_x, size_x, step_b, size_b, use_bias, use_ref); + }); + + return y; +} diff --git a/mmcv/ops/csrc/pytorch/cuda/gather_points_cuda.cu b/mmcv/ops/csrc/pytorch/cuda/gather_points_cuda.cu new file mode 100644 index 0000000..672fec6 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/cuda/gather_points_cuda.cu @@ -0,0 +1,58 @@ +#include +#include + +#include "gather_points_cuda_kernel.cuh" +#include "pytorch_cuda_helper.hpp" + +void GatherPointsForwardCUDAKernelLauncher(int b, int c, int n, int npoints, + const Tensor points, + const Tensor idx, Tensor out) { + // points: (B, C, N) + // idx: (B, npoints) + // output: + // out: (B, C, npoints) + + at::cuda::CUDAGuard device_guard(points.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + // blockIdx.x(col), blockIdx.y(row) + dim3 blocks(DIVUP(npoints, THREADS_PER_BLOCK), c, b); + dim3 threads(THREADS_PER_BLOCK); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + points.scalar_type(), "gather_points_forward_cuda_kernel", [&] { + gather_points_forward_cuda_kernel + <<>>( + b, c, n, npoints, points.data_ptr(), + idx.data_ptr(), out.data_ptr()); + }); + + AT_CUDA_CHECK(cudaGetLastError()); +} + +void GatherPointsBackwardCUDAKernelLauncher(int b, int c, int n, int npoints, + const Tensor grad_out, + const Tensor idx, + Tensor grad_points) { + // grad_out: (B, C, npoints) + // idx: (B, npoints) + // output: + // grad_points: (B, C, N) + + at::cuda::CUDAGuard device_guard(grad_out.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + // blockIdx.x(col), blockIdx.y(row) + dim3 blocks(DIVUP(npoints, THREADS_PER_BLOCK), c, b); + dim3 threads(THREADS_PER_BLOCK); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + grad_out.scalar_type(), "gather_points_backward_cuda_kernel", [&] { + gather_points_backward_cuda_kernel + <<>>( + b, c, n, npoints, grad_out.data_ptr(), + idx.data_ptr(), grad_points.data_ptr()); + }); + + AT_CUDA_CHECK(cudaGetLastError()); +} diff --git a/mmcv/ops/csrc/pytorch/cuda/group_points_cuda.cu b/mmcv/ops/csrc/pytorch/cuda/group_points_cuda.cu new file mode 100644 index 0000000..e7c57b0 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/cuda/group_points_cuda.cu @@ -0,0 +1,61 @@ +// Copyright (c) OpenMMLab. All rights reserved. +// Modified from +// https://github.com/sshaoshuai/Pointnet2.PyTorch/tree/master/pointnet2/src/group_points_gpu.cu +#include +#include + +#include "group_points_cuda_kernel.cuh" +#include "pytorch_cuda_helper.hpp" + +void GroupPointsForwardCUDAKernelLauncher(int b, int c, int n, int npoints, + int nsample, const Tensor points, + const Tensor idx, Tensor out) { + // points: (B, C, N) + // idx: (B, npoints, nsample) + // output: + // out: (B, C, npoints, nsample) + + at::cuda::CUDAGuard device_guard(points.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + // blockIdx.x(col), blockIdx.y(row) + dim3 blocks(DIVUP(npoints * nsample, THREADS_PER_BLOCK), c, b); + dim3 threads(THREADS_PER_BLOCK); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + points.scalar_type(), "group_points_forward_cuda_kernel", [&] { + group_points_forward_cuda_kernel + <<>>( + b, c, n, npoints, nsample, points.data_ptr(), + idx.data_ptr(), out.data_ptr()); + }); + + AT_CUDA_CHECK(cudaGetLastError()); +} + +void GroupPointsBackwardCUDAKernelLauncher(int b, int c, int n, int npoints, + int nsample, const Tensor grad_out, + const Tensor idx, + Tensor grad_points) { + // grad_out: (B, C, npoints, nsample) + // idx: (B, npoints, nsample) + // output: + // grad_points: (B, C, N) + + at::cuda::CUDAGuard device_guard(grad_out.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + // blockIdx.x(col), blockIdx.y(row) + dim3 blocks(DIVUP(npoints * nsample, THREADS_PER_BLOCK), c, b); + dim3 threads(THREADS_PER_BLOCK); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + grad_out.scalar_type(), "group_points_backward_cuda_kernel", [&] { + group_points_backward_cuda_kernel + <<>>( + b, c, n, npoints, nsample, grad_out.data_ptr(), + idx.data_ptr(), grad_points.data_ptr()); + }); + + AT_CUDA_CHECK(cudaGetLastError()); +} diff --git a/mmcv/ops/csrc/pytorch/cuda/iou3d_cuda.cu b/mmcv/ops/csrc/pytorch/cuda/iou3d_cuda.cu new file mode 100644 index 0000000..0643c16 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/cuda/iou3d_cuda.cu @@ -0,0 +1,86 @@ +// Modified from +// https://github.com/open-mmlab/OpenPCDet/blob/master/pcdet/ops/iou3d_nms/src/iou3d_nms_kernel.cu + +/* +3D IoU Calculation and Rotated NMS(modified from 2D NMS written by others) +Written by Shaoshuai Shi +All Rights Reserved 2019-2020. +*/ + +#include + +#include "iou3d_cuda_kernel.cuh" +#include "pytorch_cuda_helper.hpp" + +void IoU3DBoxesOverlapBevForwardCUDAKernelLauncher(const int num_a, + const Tensor boxes_a, + const int num_b, + const Tensor boxes_b, + Tensor ans_overlap) { + at::cuda::CUDAGuard device_guard(boxes_a.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + // blockIdx.x(col), blockIdx.y(row) + dim3 blocks(DIVUP(num_b, THREADS_PER_BLOCK_IOU3D), + DIVUP(num_a, THREADS_PER_BLOCK_IOU3D)); + dim3 threads(THREADS_PER_BLOCK_IOU3D, THREADS_PER_BLOCK_IOU3D); + + iou3d_boxes_overlap_bev_forward_cuda_kernel<<>>( + num_a, boxes_a.data_ptr(), num_b, boxes_b.data_ptr(), + ans_overlap.data_ptr()); + + AT_CUDA_CHECK(cudaGetLastError()); +} + +void IoU3DBoxesIoUBevForwardCUDAKernelLauncher(const int num_a, + const Tensor boxes_a, + const int num_b, + const Tensor boxes_b, + Tensor ans_iou) { + at::cuda::CUDAGuard device_guard(boxes_a.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + // blockIdx.x(col), blockIdx.y(row) + dim3 blocks(DIVUP(num_b, THREADS_PER_BLOCK_IOU3D), + DIVUP(num_a, THREADS_PER_BLOCK_IOU3D)); + dim3 threads(THREADS_PER_BLOCK_IOU3D, THREADS_PER_BLOCK_IOU3D); + + iou3d_boxes_iou_bev_forward_cuda_kernel<<>>( + num_a, boxes_a.data_ptr(), num_b, boxes_b.data_ptr(), + ans_iou.data_ptr()); + + AT_CUDA_CHECK(cudaGetLastError()); +} + +void IoU3DNMSForwardCUDAKernelLauncher(const Tensor boxes, + unsigned long long *mask, int boxes_num, + float nms_overlap_thresh) { + at::cuda::CUDAGuard device_guard(boxes.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + dim3 blocks(DIVUP(boxes_num, THREADS_PER_BLOCK_NMS), + DIVUP(boxes_num, THREADS_PER_BLOCK_NMS)); + dim3 threads(THREADS_PER_BLOCK_NMS); + + nms_forward_cuda_kernel<<>>( + boxes_num, nms_overlap_thresh, boxes.data_ptr(), mask); + + AT_CUDA_CHECK(cudaGetLastError()); +} + +void IoU3DNMSNormalForwardCUDAKernelLauncher(const Tensor boxes, + unsigned long long *mask, + int boxes_num, + float nms_overlap_thresh) { + at::cuda::CUDAGuard device_guard(boxes.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + dim3 blocks(DIVUP(boxes_num, THREADS_PER_BLOCK_NMS), + DIVUP(boxes_num, THREADS_PER_BLOCK_NMS)); + dim3 threads(THREADS_PER_BLOCK_NMS); + + nms_normal_forward_cuda_kernel<<>>( + boxes_num, nms_overlap_thresh, boxes.data_ptr(), mask); + + AT_CUDA_CHECK(cudaGetLastError()); +} diff --git a/mmcv/ops/csrc/pytorch/cuda/knn_cuda.cu b/mmcv/ops/csrc/pytorch/cuda/knn_cuda.cu new file mode 100644 index 0000000..4954fe4 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/cuda/knn_cuda.cu @@ -0,0 +1,34 @@ +// Copyright (c) OpenMMLab. All rights reserved +// Modified from +// https://github.com/CVMI-Lab/PAConv/tree/main/scene_seg/lib/pointops/src/knnquery_heap + +#include +#include + +#include "knn_cuda_kernel.cuh" +#include "pytorch_cuda_helper.hpp" + +void KNNForwardCUDAKernelLauncher(int b, int n, int m, int nsample, + const Tensor xyz, const Tensor new_xyz, + Tensor idx, Tensor dist2) { + // param new_xyz: (B, m, 3) + // param xyz: (B, n, 3) + // param idx: (B, m, nsample) + + at::cuda::CUDAGuard device_guard(new_xyz.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + // blockIdx.x(col), blockIdx.y(row) + dim3 blocks(DIVUP(m, THREADS_PER_BLOCK), b); + dim3 threads(THREADS_PER_BLOCK); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + new_xyz.scalar_type(), "knn_forward_cuda_kernel", [&] { + knn_forward_cuda_kernel<<>>( + b, n, m, nsample, xyz.data_ptr(), + new_xyz.data_ptr(), idx.data_ptr(), + dist2.data_ptr()); + }); + + AT_CUDA_CHECK(cudaGetLastError()); +} diff --git a/mmcv/ops/csrc/pytorch/cuda/masked_conv2d_cuda.cu b/mmcv/ops/csrc/pytorch/cuda/masked_conv2d_cuda.cu new file mode 100644 index 0000000..022e189 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/cuda/masked_conv2d_cuda.cu @@ -0,0 +1,54 @@ +// Copyright (c) OpenMMLab. All rights reserved +#include "masked_conv2d_cuda_kernel.cuh" +#include "pytorch_cuda_helper.hpp" + +void MaskedIm2colForwardCUDAKernelLauncher(const Tensor bottom_data, + const Tensor mask_h_idx, + const Tensor mask_w_idx, + Tensor top_data, const int kernel_h, + const int kernel_w, const int pad_h, + const int pad_w) { + int channels = bottom_data.size(1); + int height = bottom_data.size(2); + int width = bottom_data.size(3); + int mask_cnt = mask_h_idx.size(0); + int output_size = mask_cnt * channels; + + at::cuda::CUDAGuard device_guard(bottom_data.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + bottom_data.scalar_type(), "MaskedIm2colLaucherForward", ([&] { + const scalar_t *bottom_data_ = bottom_data.data_ptr(); + const int64_t *mask_h_idx_ = mask_h_idx.data_ptr(); + const int64_t *mask_w_idx_ = mask_w_idx.data_ptr(); + scalar_t *top_data_ = top_data.data_ptr(); + MaskedIm2colForward + <<>>( + output_size, bottom_data_, height, width, kernel_h, kernel_w, + pad_h, pad_w, mask_h_idx_, mask_w_idx_, mask_cnt, top_data_); + })); + AT_CUDA_CHECK(cudaGetLastError()); +} + +void MaskedCol2imForwardCUDAKernelLauncher( + const Tensor bottom_data, const Tensor mask_h_idx, const Tensor mask_w_idx, + Tensor top_data, const int height, const int width, const int channels) { + int mask_cnt = mask_h_idx.size(0); + int output_size = mask_cnt * channels; + + at::cuda::CUDAGuard device_guard(bottom_data.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + bottom_data.scalar_type(), "MaskedCol2imLaucherForward", ([&] { + const scalar_t *bottom_data_ = bottom_data.data_ptr(); + const int64_t *mask_h_idx_ = mask_h_idx.data_ptr(); + const int64_t *mask_w_idx_ = mask_w_idx.data_ptr(); + scalar_t *top_data_ = top_data.data_ptr(); + + MaskedCol2imForward + <<>>( + output_size, bottom_data_, height, width, channels, mask_h_idx_, + mask_w_idx_, mask_cnt, top_data_); + })); + AT_CUDA_CHECK(cudaGetLastError()); +} diff --git a/mmcv/ops/csrc/pytorch/cuda/modulated_deform_conv_cuda.cu b/mmcv/ops/csrc/pytorch/cuda/modulated_deform_conv_cuda.cu new file mode 100644 index 0000000..2b52796 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/cuda/modulated_deform_conv_cuda.cu @@ -0,0 +1,96 @@ +// Copyright (c) OpenMMLab. All rights reserved +#include "modulated_deform_conv_cuda_kernel.cuh" +#include "pytorch_cuda_helper.hpp" + +void modulated_deformable_im2col_cuda( + const Tensor data_im, const Tensor data_offset, const Tensor data_mask, + const int batch_size, const int channels, const int height_im, + const int width_im, const int height_col, const int width_col, + const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, + const int stride_h, const int stride_w, const int dilation_h, + const int dilation_w, const int deformable_group, Tensor data_col) { + // num_axes should be smaller than block size + const int channel_per_deformable_group = channels / deformable_group; + const int num_kernels = channels * batch_size * height_col * width_col; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_im.scalar_type(), "modulated_deformable_im2col_gpu", ([&] { + const scalar_t *data_im_ = data_im.data_ptr(); + const scalar_t *data_offset_ = data_offset.data_ptr(); + const scalar_t *data_mask_ = data_mask.data_ptr(); + scalar_t *data_col_ = data_col.data_ptr(); + + modulated_deformable_im2col_gpu_kernel<<< + GET_BLOCKS(num_kernels), THREADS_PER_BLOCK, 0, + at::cuda::getCurrentCUDAStream()>>>( + num_kernels, data_im_, data_offset_, data_mask_, height_im, + width_im, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, + dilation_h, dilation_w, channel_per_deformable_group, batch_size, + channels, deformable_group, height_col, width_col, data_col_); + })); + AT_CUDA_CHECK(cudaGetLastError()); +} + +void modulated_deformable_col2im_cuda( + const Tensor data_col, const Tensor data_offset, const Tensor data_mask, + const int batch_size, const int channels, const int height_im, + const int width_im, const int height_col, const int width_col, + const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, + const int stride_h, const int stride_w, const int dilation_h, + const int dilation_w, const int deformable_group, Tensor grad_im) { + const int channel_per_deformable_group = channels / deformable_group; + const int num_kernels = + channels * kernel_h * kernel_w * batch_size * height_col * width_col; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_col.scalar_type(), "modulated_deformable_col2im_gpu", ([&] { + const scalar_t *data_col_ = data_col.data_ptr(); + const scalar_t *data_offset_ = data_offset.data_ptr(); + const scalar_t *data_mask_ = data_mask.data_ptr(); + scalar_t *grad_im_ = grad_im.data_ptr(); + + modulated_deformable_col2im_gpu_kernel<<< + GET_BLOCKS(num_kernels), THREADS_PER_BLOCK, 0, + at::cuda::getCurrentCUDAStream()>>>( + num_kernels, data_col_, data_offset_, data_mask_, channels, + height_im, width_im, kernel_h, kernel_w, pad_h, pad_w, stride_h, + stride_w, dilation_h, dilation_w, channel_per_deformable_group, + batch_size, deformable_group, height_col, width_col, grad_im_); + })); + AT_CUDA_CHECK(cudaGetLastError()); +} + +void modulated_deformable_col2im_coord_cuda( + const Tensor data_col, const Tensor data_im, const Tensor data_offset, + const Tensor data_mask, const int batch_size, const int channels, + const int height_im, const int width_im, const int height_col, + const int width_col, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, const int deformable_group, + Tensor grad_offset, Tensor grad_mask) { + const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h * + kernel_w * deformable_group; + const int channel_per_deformable_group = + channels * kernel_h * kernel_w / deformable_group; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_col.scalar_type(), "modulated_deformable_col2im_coord_gpu", ([&] { + const scalar_t *data_col_ = data_col.data_ptr(); + const scalar_t *data_im_ = data_im.data_ptr(); + const scalar_t *data_offset_ = data_offset.data_ptr(); + const scalar_t *data_mask_ = data_mask.data_ptr(); + scalar_t *grad_offset_ = grad_offset.data_ptr(); + scalar_t *grad_mask_ = grad_mask.data_ptr(); + + modulated_deformable_col2im_coord_gpu_kernel<<< + GET_BLOCKS(num_kernels), THREADS_PER_BLOCK, 0, + at::cuda::getCurrentCUDAStream()>>>( + num_kernels, data_col_, data_im_, data_offset_, data_mask_, + channels, height_im, width_im, kernel_h, kernel_w, pad_h, pad_w, + stride_h, stride_w, dilation_h, dilation_w, + channel_per_deformable_group, batch_size, + 2 * kernel_h * kernel_w * deformable_group, deformable_group, + height_col, width_col, grad_offset_, grad_mask_); + })); + AT_CUDA_CHECK(cudaGetLastError()); +} diff --git a/mmcv/ops/csrc/pytorch/cuda/ms_deform_attn_cuda.cu b/mmcv/ops/csrc/pytorch/cuda/ms_deform_attn_cuda.cu new file mode 100644 index 0000000..2fccaa2 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/cuda/ms_deform_attn_cuda.cu @@ -0,0 +1,361 @@ +/*! +************************************************************************************************** +* Deformable DETR +* Copyright (c) 2020 SenseTime. All Rights Reserved. +* Licensed under the Apache License, Version 2.0 [see LICENSE for details] +************************************************************************************************** +* Modified from +*https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +************************************************************************************************** +*/ + +#include +#include +#include +#include + +#include +#include + +#include "ms_deform_attn_cuda_kernel.cuh" + +template +void ms_deformable_im2col_cuda(cudaStream_t stream, const scalar_t *data_value, + const int64_t *data_spatial_shapes, + const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, + const scalar_t *data_attn_weight, + const int batch_size, const int spatial_size, + const int num_heads, const int channels, + const int num_levels, const int num_query, + const int num_point, scalar_t *data_col) { + const int num_kernels = batch_size * num_query * num_heads * channels; + const int num_actual_kernels = batch_size * num_query * num_heads * channels; + const int num_threads = CUDA_NUM_THREADS; + ms_deformable_im2col_gpu_kernel + <<>>( + num_kernels, data_value, data_spatial_shapes, data_level_start_index, + data_sampling_loc, data_attn_weight, batch_size, spatial_size, + num_heads, channels, num_levels, num_query, num_point, data_col); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) { + printf("error in ms_deformable_im2col_cuda: %s\n", cudaGetErrorString(err)); + } +} + +template +void ms_deformable_col2im_cuda( + cudaStream_t stream, const scalar_t *grad_col, const scalar_t *data_value, + const int64_t *data_spatial_shapes, const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, const scalar_t *data_attn_weight, + const int batch_size, const int spatial_size, const int num_heads, + const int channels, const int num_levels, const int num_query, + const int num_point, scalar_t *grad_value, scalar_t *grad_sampling_loc, + scalar_t *grad_attn_weight) { + const int num_threads = + (channels > CUDA_NUM_THREADS) ? CUDA_NUM_THREADS : channels; + const int num_kernels = batch_size * num_query * num_heads * channels; + const int num_actual_kernels = batch_size * num_query * num_heads * channels; + if (channels > 1024) { + if ((channels & 1023) == 0) { + ms_deformable_col2im_gpu_kernel_shm_reduce_v2_multi_blocks + <<>>( + num_kernels, grad_col, data_value, data_spatial_shapes, + data_level_start_index, data_sampling_loc, data_attn_weight, + batch_size, spatial_size, num_heads, channels, num_levels, + num_query, num_point, grad_value, grad_sampling_loc, + grad_attn_weight); + } else { + ms_deformable_col2im_gpu_kernel_gm + <<>>(num_kernels, grad_col, data_value, data_spatial_shapes, + data_level_start_index, data_sampling_loc, + data_attn_weight, batch_size, spatial_size, num_heads, + channels, num_levels, num_query, num_point, grad_value, + grad_sampling_loc, grad_attn_weight); + } + } else { + switch (channels) { + case 1: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>(num_kernels, grad_col, data_value, data_spatial_shapes, + data_level_start_index, data_sampling_loc, + data_attn_weight, batch_size, spatial_size, num_heads, + channels, num_levels, num_query, num_point, grad_value, + grad_sampling_loc, grad_attn_weight); + break; + case 2: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>(num_kernels, grad_col, data_value, data_spatial_shapes, + data_level_start_index, data_sampling_loc, + data_attn_weight, batch_size, spatial_size, num_heads, + channels, num_levels, num_query, num_point, grad_value, + grad_sampling_loc, grad_attn_weight); + break; + case 4: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>(num_kernels, grad_col, data_value, data_spatial_shapes, + data_level_start_index, data_sampling_loc, + data_attn_weight, batch_size, spatial_size, num_heads, + channels, num_levels, num_query, num_point, grad_value, + grad_sampling_loc, grad_attn_weight); + break; + case 8: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>(num_kernels, grad_col, data_value, data_spatial_shapes, + data_level_start_index, data_sampling_loc, + data_attn_weight, batch_size, spatial_size, num_heads, + channels, num_levels, num_query, num_point, grad_value, + grad_sampling_loc, grad_attn_weight); + break; + case 16: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>(num_kernels, grad_col, data_value, data_spatial_shapes, + data_level_start_index, data_sampling_loc, + data_attn_weight, batch_size, spatial_size, num_heads, + channels, num_levels, num_query, num_point, grad_value, + grad_sampling_loc, grad_attn_weight); + break; + case 32: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>(num_kernels, grad_col, data_value, data_spatial_shapes, + data_level_start_index, data_sampling_loc, + data_attn_weight, batch_size, spatial_size, num_heads, + channels, num_levels, num_query, num_point, grad_value, + grad_sampling_loc, grad_attn_weight); + break; + case 64: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2 + <<>>(num_kernels, grad_col, data_value, data_spatial_shapes, + data_level_start_index, data_sampling_loc, + data_attn_weight, batch_size, spatial_size, num_heads, + channels, num_levels, num_query, num_point, grad_value, + grad_sampling_loc, grad_attn_weight); + break; + case 128: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2 + <<>>(num_kernels, grad_col, data_value, data_spatial_shapes, + data_level_start_index, data_sampling_loc, + data_attn_weight, batch_size, spatial_size, num_heads, + channels, num_levels, num_query, num_point, grad_value, + grad_sampling_loc, grad_attn_weight); + break; + case 256: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2 + <<>>(num_kernels, grad_col, data_value, data_spatial_shapes, + data_level_start_index, data_sampling_loc, + data_attn_weight, batch_size, spatial_size, num_heads, + channels, num_levels, num_query, num_point, grad_value, + grad_sampling_loc, grad_attn_weight); + break; + case 512: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2 + <<>>(num_kernels, grad_col, data_value, data_spatial_shapes, + data_level_start_index, data_sampling_loc, + data_attn_weight, batch_size, spatial_size, num_heads, + channels, num_levels, num_query, num_point, grad_value, + grad_sampling_loc, grad_attn_weight); + break; + case 1024: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2 + <<>>(num_kernels, grad_col, data_value, data_spatial_shapes, + data_level_start_index, data_sampling_loc, + data_attn_weight, batch_size, spatial_size, num_heads, + channels, num_levels, num_query, num_point, grad_value, + grad_sampling_loc, grad_attn_weight); + break; + default: + if (channels < 64) { + ms_deformable_col2im_gpu_kernel_shm_reduce_v1 + <<>>( + num_kernels, grad_col, data_value, data_spatial_shapes, + data_level_start_index, data_sampling_loc, data_attn_weight, + batch_size, spatial_size, num_heads, channels, num_levels, + num_query, num_point, grad_value, grad_sampling_loc, + grad_attn_weight); + } else { + ms_deformable_col2im_gpu_kernel_shm_reduce_v2 + <<>>( + num_kernels, grad_col, data_value, data_spatial_shapes, + data_level_start_index, data_sampling_loc, data_attn_weight, + batch_size, spatial_size, num_heads, channels, num_levels, + num_query, num_point, grad_value, grad_sampling_loc, + grad_attn_weight); + } + } + } + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) { + printf("error in ms_deformable_col2im_cuda: %s\n", cudaGetErrorString(err)); + } +} + +at::Tensor ms_deform_attn_cuda_forward(const at::Tensor &value, + const at::Tensor &spatial_shapes, + const at::Tensor &level_start_index, + const at::Tensor &sampling_loc, + const at::Tensor &attn_weight, + const int im2col_step) { + AT_ASSERTM(value.is_contiguous(), "value tensor has to be contiguous"); + AT_ASSERTM(spatial_shapes.is_contiguous(), + "spatial_shapes tensor has to be contiguous"); + AT_ASSERTM(level_start_index.is_contiguous(), + "level_start_index tensor has to be contiguous"); + AT_ASSERTM(sampling_loc.is_contiguous(), + "sampling_loc tensor has to be contiguous"); + AT_ASSERTM(attn_weight.is_contiguous(), + "attn_weight tensor has to be contiguous"); + + AT_ASSERTM(value.is_cuda(), "value must be a CUDA tensor"); + AT_ASSERTM(spatial_shapes.is_cuda(), "spatial_shapes must be a CUDA tensor"); + AT_ASSERTM(level_start_index.is_cuda(), + "level_start_index must be a CUDA tensor"); + AT_ASSERTM(sampling_loc.is_cuda(), "sampling_loc must be a CUDA tensor"); + AT_ASSERTM(attn_weight.is_cuda(), "attn_weight must be a CUDA tensor"); + + const int batch = value.size(0); + const int spatial_size = value.size(1); + const int num_heads = value.size(2); + const int channels = value.size(3); + + const int num_levels = spatial_shapes.size(0); + + const int num_query = sampling_loc.size(1); + const int num_point = sampling_loc.size(4); + + const int im2col_step_ = std::min(batch, im2col_step); + + AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", + batch, im2col_step_); + + auto output = + at::zeros({batch, num_query, num_heads, channels}, value.options()); + + const int batch_n = im2col_step_; + auto output_n = output.view( + {batch / im2col_step_, batch_n, num_query, num_heads, channels}); + auto per_value_size = spatial_size * num_heads * channels; + auto per_sample_loc_size = num_query * num_heads * num_levels * num_point * 2; + auto per_attn_weight_size = num_query * num_heads * num_levels * num_point; + for (int n = 0; n < batch / im2col_step_; ++n) { + auto columns = output_n.select(0, n); + AT_DISPATCH_FLOATING_TYPES( + value.scalar_type(), "ms_deform_attn_forward_cuda", ([&] { + ms_deformable_im2col_cuda( + at::cuda::getCurrentCUDAStream(), + value.data_ptr() + n * im2col_step_ * per_value_size, + spatial_shapes.data_ptr(), + level_start_index.data_ptr(), + sampling_loc.data_ptr() + + n * im2col_step_ * per_sample_loc_size, + attn_weight.data_ptr() + + n * im2col_step_ * per_attn_weight_size, + batch_n, spatial_size, num_heads, channels, num_levels, num_query, + num_point, columns.data_ptr()); + })); + } + + output = output.view({batch, num_query, num_heads * channels}); + + return output; +} + +void ms_deform_attn_cuda_backward( + const at::Tensor &value, const at::Tensor &spatial_shapes, + const at::Tensor &level_start_index, const at::Tensor &sampling_loc, + const at::Tensor &attn_weight, const at::Tensor &grad_output, + at::Tensor &grad_value, at::Tensor &grad_sampling_loc, + at::Tensor &grad_attn_weight, const int im2col_step) { + AT_ASSERTM(value.is_contiguous(), "value tensor has to be contiguous"); + AT_ASSERTM(spatial_shapes.is_contiguous(), + "spatial_shapes tensor has to be contiguous"); + AT_ASSERTM(level_start_index.is_contiguous(), + "level_start_index tensor has to be contiguous"); + AT_ASSERTM(sampling_loc.is_contiguous(), + "sampling_loc tensor has to be contiguous"); + AT_ASSERTM(attn_weight.is_contiguous(), + "attn_weight tensor has to be contiguous"); + AT_ASSERTM(grad_output.is_contiguous(), + "grad_output tensor has to be contiguous"); + + AT_ASSERTM(value.is_cuda(), "value must be a CUDA tensor"); + AT_ASSERTM(spatial_shapes.is_cuda(), "spatial_shapes must be a CUDA tensor"); + AT_ASSERTM(level_start_index.is_cuda(), + "level_start_index must be a CUDA tensor"); + AT_ASSERTM(sampling_loc.is_cuda(), "sampling_loc must be a CUDA tensor"); + AT_ASSERTM(attn_weight.is_cuda(), "attn_weight must be a CUDA tensor"); + AT_ASSERTM(grad_output.is_cuda(), "grad_output must be a CUDA tensor"); + + const int batch = value.size(0); + const int spatial_size = value.size(1); + const int num_heads = value.size(2); + const int channels = value.size(3); + + const int num_levels = spatial_shapes.size(0); + + const int num_query = sampling_loc.size(1); + const int num_point = sampling_loc.size(4); + + const int im2col_step_ = std::min(batch, im2col_step); + + AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", + batch, im2col_step_); + + const int batch_n = im2col_step_; + auto per_value_size = spatial_size * num_heads * channels; + auto per_sample_loc_size = num_query * num_heads * num_levels * num_point * 2; + auto per_attn_weight_size = num_query * num_heads * num_levels * num_point; + auto grad_output_n = grad_output.view( + {batch / im2col_step_, batch_n, num_query, num_heads, channels}); + + for (int n = 0; n < batch / im2col_step_; ++n) { + auto grad_output_g = grad_output_n.select(0, n); + AT_DISPATCH_FLOATING_TYPES( + value.scalar_type(), "ms_deform_attn_backward_cuda", ([&] { + ms_deformable_col2im_cuda( + at::cuda::getCurrentCUDAStream(), + grad_output_g.data_ptr(), + value.data_ptr() + n * im2col_step_ * per_value_size, + spatial_shapes.data_ptr(), + level_start_index.data_ptr(), + sampling_loc.data_ptr() + + n * im2col_step_ * per_sample_loc_size, + attn_weight.data_ptr() + + n * im2col_step_ * per_attn_weight_size, + batch_n, spatial_size, num_heads, channels, num_levels, num_query, + num_point, + grad_value.data_ptr() + + n * im2col_step_ * per_value_size, + grad_sampling_loc.data_ptr() + + n * im2col_step_ * per_sample_loc_size, + grad_attn_weight.data_ptr() + + n * im2col_step_ * per_attn_weight_size); + })); + } +} diff --git a/mmcv/ops/csrc/pytorch/cuda/nms_cuda.cu b/mmcv/ops/csrc/pytorch/cuda/nms_cuda.cu new file mode 100644 index 0000000..16cf646 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/cuda/nms_cuda.cu @@ -0,0 +1,53 @@ +// Copyright (c) OpenMMLab. All rights reserved +#include "nms_cuda_kernel.cuh" +#include "pytorch_cuda_helper.hpp" + +Tensor NMSCUDAKernelLauncher(Tensor boxes, Tensor scores, float iou_threshold, + int offset) { + at::cuda::CUDAGuard device_guard(boxes.device()); + + if (boxes.numel() == 0) { + return at::empty({0}, boxes.options().dtype(at::kLong)); + } + auto order_t = std::get<1>(scores.sort(0, /*descending=*/true)); + auto boxes_sorted = boxes.index_select(0, order_t); + + int boxes_num = boxes.size(0); + const int col_blocks = DIVUP(boxes_num, threadsPerBlock); + Tensor mask = + at::empty({boxes_num, col_blocks}, boxes.options().dtype(at::kLong)); + dim3 blocks(col_blocks, col_blocks); + dim3 threads(threadsPerBlock); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + nms_cuda<<>>( + boxes_num, iou_threshold, offset, boxes_sorted.data_ptr(), + (unsigned long long*)mask.data_ptr()); + + at::Tensor mask_cpu = mask.to(at::kCPU); + unsigned long long* mask_host = + (unsigned long long*)mask_cpu.data_ptr(); + + std::vector remv(col_blocks); + memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); + + at::Tensor keep_t = + at::zeros({boxes_num}, boxes.options().dtype(at::kBool).device(at::kCPU)); + bool* keep = keep_t.data_ptr(); + + for (int i = 0; i < boxes_num; i++) { + int nblock = i / threadsPerBlock; + int inblock = i % threadsPerBlock; + + if (!(remv[nblock] & (1ULL << inblock))) { + keep[i] = true; + // set every overlap box with bit 1 in remv + unsigned long long* p = mask_host + i * col_blocks; + for (int j = nblock; j < col_blocks; j++) { + remv[j] |= p[j]; + } + } + } + + AT_CUDA_CHECK(cudaGetLastError()); + return order_t.masked_select(keep_t.to(at::kCUDA)); +} diff --git a/mmcv/ops/csrc/pytorch/cuda/nms_rotated_cuda.cu b/mmcv/ops/csrc/pytorch/cuda/nms_rotated_cuda.cu new file mode 100644 index 0000000..e1185f8 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/cuda/nms_rotated_cuda.cu @@ -0,0 +1,62 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +// modified from +// https://github.com/facebookresearch/detectron2/blob/master/detectron2/layers/csrc/nms_rotated/nms_rotated_cuda.cu +#include "nms_rotated_cuda.cuh" +#include "pytorch_cuda_helper.hpp" + +Tensor nms_rotated_cuda(const Tensor dets, const Tensor scores, + const Tensor order_t, const Tensor dets_sorted, + float iou_threshold, const int multi_label) { + // using scalar_t = float; + AT_ASSERTM(dets.is_cuda(), "dets must be a CUDA tensor"); + AT_ASSERTM(scores.is_cuda(), "scores must be a CUDA tensor"); + at::cuda::CUDAGuard device_guard(dets.device()); + + int dets_num = dets.size(0); + + const int col_blocks = at::cuda::ATenCeilDiv(dets_num, threadsPerBlock); + + Tensor mask = + at::empty({dets_num * col_blocks}, dets.options().dtype(at::kLong)); + + dim3 blocks(col_blocks, col_blocks); + dim3 threads(threadsPerBlock); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + dets_sorted.scalar_type(), "nms_rotated_kernel_cuda", [&] { + nms_rotated_cuda_kernel<<>>( + dets_num, iou_threshold, dets_sorted.data_ptr(), + (unsigned long long*)mask.data_ptr(), multi_label); + }); + + Tensor mask_cpu = mask.to(at::kCPU); + unsigned long long* mask_host = + (unsigned long long*)mask_cpu.data_ptr(); + + std::vector remv(col_blocks); + memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); + + Tensor keep = + at::empty({dets_num}, dets.options().dtype(at::kLong).device(at::kCPU)); + int64_t* keep_out = keep.data_ptr(); + + int num_to_keep = 0; + for (int i = 0; i < dets_num; i++) { + int nblock = i / threadsPerBlock; + int inblock = i % threadsPerBlock; + + if (!(remv[nblock] & (1ULL << inblock))) { + keep_out[num_to_keep++] = i; + unsigned long long* p = mask_host + i * col_blocks; + for (int j = nblock; j < col_blocks; j++) { + remv[j] |= p[j]; + } + } + } + + AT_CUDA_CHECK(cudaGetLastError()); + return order_t.index( + {keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep) + .to(order_t.device(), keep.scalar_type())}); +} diff --git a/mmcv/ops/csrc/pytorch/cuda/points_in_boxes_cuda.cu b/mmcv/ops/csrc/pytorch/cuda/points_in_boxes_cuda.cu new file mode 100644 index 0000000..17e6441 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/cuda/points_in_boxes_cuda.cu @@ -0,0 +1,62 @@ +// Modified from +// https://github.com/sshaoshuai/PCDet/blob/master/pcdet/ops/roiaware_pool3d/src/roiaware_pool3d_kernel.cu +// Written by Shaoshuai Shi +// All Rights Reserved 2019. + +#include + +#include "points_in_boxes_cuda_kernel.cuh" +#include "pytorch_cuda_helper.hpp" + +void PointsInBoxesPartForwardCUDAKernelLauncher(int batch_size, int boxes_num, + int pts_num, const Tensor boxes, + const Tensor pts, + Tensor box_idx_of_points) { + // params boxes: (B, N, 7) [x, y, z, x_size, y_size, z_size, rz] in LiDAR + // coordinate, z is + // the bottom center, each box DO NOT overlaps params pts: (B, npoints, 3) [x, + // y, z] in LiDAR coordinate params boxes_idx_of_points: (B, npoints), default + // -1 + + at::cuda::CUDAGuard device_guard(boxes.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + dim3 blocks(DIVUP(pts_num, THREADS_PER_BLOCK), batch_size); + dim3 threads(THREADS_PER_BLOCK); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + boxes.scalar_type(), "points_in_boxes_part_forward_cuda_kernel", [&] { + points_in_boxes_part_forward_cuda_kernel + <<>>( + batch_size, boxes_num, pts_num, boxes.data_ptr(), + pts.data_ptr(), box_idx_of_points.data_ptr()); + }); + + AT_CUDA_CHECK(cudaGetLastError()); +} + +void PointsInBoxesAllForwardCUDAKernelLauncher(int batch_size, int boxes_num, + int pts_num, const Tensor boxes, + const Tensor pts, + Tensor box_idx_of_points) { + // params boxes: (B, N, 7) [x, y, z, x_size, y_size, z_size, rz] in LiDAR + // coordinate, z is the bottom center, each box params pts: (B, npoints, 3) + // [x, y, z] in LiDAR coordinate params boxes_idx_of_points: (B, npoints), + // default -1 + + at::cuda::CUDAGuard device_guard(boxes.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + dim3 blocks(DIVUP(pts_num, THREADS_PER_BLOCK), batch_size); + dim3 threads(THREADS_PER_BLOCK); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + boxes.scalar_type(), "points_in_boxes_all_forward_cuda_kernel", [&] { + points_in_boxes_all_forward_cuda_kernel + <<>>( + batch_size, boxes_num, pts_num, boxes.data_ptr(), + pts.data_ptr(), box_idx_of_points.data_ptr()); + }); + + AT_CUDA_CHECK(cudaGetLastError()); +} diff --git a/mmcv/ops/csrc/pytorch/cuda/psamask_cuda.cu b/mmcv/ops/csrc/pytorch/cuda/psamask_cuda.cu new file mode 100644 index 0000000..274be83 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/cuda/psamask_cuda.cu @@ -0,0 +1,59 @@ +// Copyright (c) OpenMMLab. All rights reserved +// Modified from +// https://github.com/hszhao/semseg/blob/master/lib/psa/src + +#include +#include "psamask_cuda_kernel.cuh" +#include "pytorch_cuda_helper.hpp" + +void PSAMaskForwardCUDAKernelLauncher(const int psa_type, const Tensor input, + Tensor output, const int num_, + const int h_feature, const int w_feature, + const int h_mask, const int w_mask, + const int half_h_mask, + const int half_w_mask) { + int nthreads = num_ * h_feature * w_feature; + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + if (psa_type == 0) + AT_DISPATCH_FLOATING_TYPES( + input.scalar_type(), "psamask_collect_forward_cuda", [&] { + psamask_collect_forward_cuda<<>>( + nthreads, h_feature, w_feature, h_mask, w_mask, half_h_mask, + half_w_mask, input.data_ptr(), + output.data_ptr()); + }); + else + AT_DISPATCH_FLOATING_TYPES( + input.scalar_type(), "psamask_distribute_forward_cuda", [&] { + psamask_distribute_forward_cuda + <<>>( + nthreads, h_feature, w_feature, h_mask, w_mask, half_h_mask, + half_w_mask, input.data_ptr(), + output.data_ptr()); + }); +} + +void PSAMaskBackwardCUDAKernelLauncher( + const int psa_type, const Tensor grad_output, Tensor grad_input, + const int num_, const int h_feature, const int w_feature, const int h_mask, + const int w_mask, const int half_h_mask, const int half_w_mask) { + int nthreads = num_ * h_feature * w_feature; + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + if (psa_type == 0) + AT_DISPATCH_FLOATING_TYPES( + grad_input.scalar_type(), "psamask_collect_backward_cuda", [&] { + psamask_collect_backward_cuda<<>>( + nthreads, h_feature, w_feature, h_mask, w_mask, half_h_mask, + half_w_mask, grad_output.data_ptr(), + grad_input.data_ptr()); + }); + else + AT_DISPATCH_FLOATING_TYPES( + grad_input.scalar_type(), "psamask_distribute_backward_cuda", [&] { + psamask_distribute_backward_cuda + <<>>( + nthreads, h_feature, w_feature, h_mask, w_mask, half_h_mask, + half_w_mask, grad_output.data_ptr(), + grad_input.data_ptr()); + }); +} diff --git a/mmcv/ops/csrc/pytorch/cuda/roi_align_cuda.cu b/mmcv/ops/csrc/pytorch/cuda/roi_align_cuda.cu new file mode 100644 index 0000000..3d4f761 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/cuda/roi_align_cuda.cu @@ -0,0 +1,58 @@ +// Copyright (c) OpenMMLab. All rights reserved +#include "pytorch_cuda_helper.hpp" +#include "roi_align_cuda_kernel.cuh" + +void ROIAlignForwardCUDAKernelLauncher(Tensor input, Tensor rois, Tensor output, + Tensor argmax_y, Tensor argmax_x, + int aligned_height, int aligned_width, + float spatial_scale, int sampling_ratio, + int pool_mode, bool aligned) { + int output_size = output.numel(); + int channels = input.size(1); + int height = input.size(2); + int width = input.size(3); + + at::cuda::CUDAGuard device_guard(input.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + input.scalar_type(), "roi_align_forward_cuda_kernel", [&] { + roi_align_forward_cuda_kernel + <<>>( + output_size, input.data_ptr(), + rois.data_ptr(), output.data_ptr(), + argmax_y.data_ptr(), argmax_x.data_ptr(), + aligned_height, aligned_width, + static_cast(spatial_scale), sampling_ratio, pool_mode, + aligned, channels, height, width); + }); + + AT_CUDA_CHECK(cudaGetLastError()); +} + +void ROIAlignBackwardCUDAKernelLauncher(Tensor grad_output, Tensor rois, + Tensor argmax_y, Tensor argmax_x, + Tensor grad_input, int aligned_height, + int aligned_width, float spatial_scale, + int sampling_ratio, int pool_mode, + bool aligned) { + int output_size = grad_output.numel(); + int channels = grad_input.size(1); + int height = grad_input.size(2); + int width = grad_input.size(3); + + at::cuda::CUDAGuard device_guard(grad_output.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + grad_output.scalar_type(), "roi_align_backward_cuda_kernel", [&] { + roi_align_backward_cuda_kernel + <<>>( + output_size, grad_output.data_ptr(), + rois.data_ptr(), argmax_y.data_ptr(), + argmax_x.data_ptr(), grad_input.data_ptr(), + aligned_height, aligned_width, + static_cast(spatial_scale), sampling_ratio, pool_mode, + aligned, channels, height, width); + }); + + AT_CUDA_CHECK(cudaGetLastError()); +} diff --git a/mmcv/ops/csrc/pytorch/cuda/roi_align_rotated_cuda.cu b/mmcv/ops/csrc/pytorch/cuda/roi_align_rotated_cuda.cu new file mode 100644 index 0000000..aa631bc --- /dev/null +++ b/mmcv/ops/csrc/pytorch/cuda/roi_align_rotated_cuda.cu @@ -0,0 +1,45 @@ +// Copyright (c) OpenMMLab. All rights reserved +#include "pytorch_cuda_helper.hpp" +#include "roi_align_rotated_cuda_kernel.cuh" + +void ROIAlignRotatedForwardCUDAKernelLauncher( + const at::Tensor features, const at::Tensor rois, const float spatial_scale, + const int sample_num, const bool aligned, const bool clockwise, + const int channels, const int height, const int width, const int num_rois, + const int pooled_height, const int pooled_width, at::Tensor output) { + const int output_size = num_rois * pooled_height * pooled_width * channels; + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + features.scalar_type(), "ROIAlignRotatedLaucherForward", ([&] { + const scalar_t *bottom_data = features.data_ptr(); + const scalar_t *rois_data = rois.data_ptr(); + scalar_t *top_data = output.data_ptr(); + + roi_align_rotated_forward_cuda_kernel + <<>>( + output_size, bottom_data, rois_data, scalar_t(spatial_scale), + sample_num, aligned, clockwise, channels, height, width, + pooled_height, pooled_width, top_data); + })); + + AT_CUDA_CHECK(cudaGetLastError()); +} + +void ROIAlignRotatedBackwardCUDAKernelLauncher( + const at::Tensor top_grad, const at::Tensor rois, const float spatial_scale, + const int sample_num, const bool aligned, const bool clockwise, + const int channels, const int height, const int width, const int num_rois, + const int pooled_height, const int pooled_width, at::Tensor bottom_grad) { + const int output_size = num_rois * pooled_height * pooled_width * channels; + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + top_grad.scalar_type(), "ROIAlignLaucherBackward", ([&] { + const scalar_t *top_diff = top_grad.data_ptr(); + const scalar_t *rois_data = rois.data_ptr(); + scalar_t *bottom_diff = bottom_grad.data_ptr(); + roi_align_rotated_backward_cuda_kernel + <<>>( + output_size, top_diff, rois_data, spatial_scale, sample_num, + aligned, clockwise, channels, height, width, pooled_height, + pooled_width, bottom_diff); + })); + AT_CUDA_CHECK(cudaGetLastError()); +} diff --git a/mmcv/ops/csrc/pytorch/cuda/roi_pool_cuda.cu b/mmcv/ops/csrc/pytorch/cuda/roi_pool_cuda.cu new file mode 100644 index 0000000..d9cdf30 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/cuda/roi_pool_cuda.cu @@ -0,0 +1,50 @@ +// Copyright (c) OpenMMLab. All rights reserved +#include "pytorch_cuda_helper.hpp" +#include "roi_pool_cuda_kernel.cuh" + +void ROIPoolForwardCUDAKernelLauncher(Tensor input, Tensor rois, Tensor output, + Tensor argmax, int pooled_height, + int pooled_width, float spatial_scale) { + int output_size = output.numel(); + int channels = input.size(1); + int height = input.size(2); + int width = input.size(3); + + at::cuda::CUDAGuard device_guard(input.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + input.scalar_type(), "roi_pool_forward_cuda_kernel", [&] { + roi_pool_forward_cuda_kernel + <<>>( + output_size, input.data_ptr(), + rois.data_ptr(), output.data_ptr(), + argmax.data_ptr(), pooled_height, pooled_width, + static_cast(spatial_scale), channels, height, width); + }); + + AT_CUDA_CHECK(cudaGetLastError()); +} + +void ROIPoolBackwardCUDAKernelLauncher(Tensor grad_output, Tensor rois, + Tensor argmax, Tensor grad_input, + int pooled_height, int pooled_width, + float spatial_scale) { + int output_size = grad_output.numel(); + int channels = grad_input.size(1); + int height = grad_input.size(2); + int width = grad_input.size(3); + + at::cuda::CUDAGuard device_guard(grad_output.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + grad_output.scalar_type(), "roi_pool_backward_cuda_kernel", [&] { + roi_pool_backward_cuda_kernel + <<>>( + output_size, grad_output.data_ptr(), + rois.data_ptr(), argmax.data_ptr(), + grad_input.data_ptr(), pooled_height, pooled_width, + channels, height, width); + }); + + AT_CUDA_CHECK(cudaGetLastError()); +} diff --git a/mmcv/ops/csrc/pytorch/cuda/roiaware_pool3d_cuda.cu b/mmcv/ops/csrc/pytorch/cuda/roiaware_pool3d_cuda.cu new file mode 100644 index 0000000..2bc7c3f --- /dev/null +++ b/mmcv/ops/csrc/pytorch/cuda/roiaware_pool3d_cuda.cu @@ -0,0 +1,118 @@ +// Modified from +// https://github.com/sshaoshuai/PCDet/blob/master/pcdet/ops/roiaware_pool3d/src/roiaware_pool3d_kernel.cu +// Written by Shaoshuai Shi +// All Rights Reserved 2019. + +#include + +#include "pytorch_cuda_helper.hpp" +#include "roiaware_pool3d_cuda_kernel.cuh" + +void RoiawarePool3dForwardCUDAKernelLauncher( + int boxes_num, int pts_num, int channels, int max_pts_each_voxel, int out_x, + int out_y, int out_z, const Tensor rois, const Tensor pts, + const Tensor pts_feature, Tensor argmax, Tensor pts_idx_of_voxels, + Tensor pooled_features, int pool_method) { + // params rois: (N, 7) [x, y, z, x_size, y_size, z_size, rz] in LiDAR + // coordinate params pts: (npoints, 3) [x, y, z] in LiDAR coordinate params + // pts_feature: (npoints, C) params argmax: (N, out_x, out_y, out_z, C) params + // pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel) params + // pooled_features: (N, out_x, out_y, out_z, C) params pool_method: 0: + // max_pool 1: avg_pool + + at::cuda::CUDAGuard device_guard(pts_feature.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + Tensor pts_mask = + -at::ones({boxes_num, pts_num}, pts_feature.options().dtype(at::kInt)); + + dim3 blocks_mask(DIVUP(pts_num, THREADS_PER_BLOCK), boxes_num); + dim3 threads(THREADS_PER_BLOCK); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + rois.scalar_type(), "generate_pts_mask_for_box3d", [&] { + generate_pts_mask_for_box3d + <<>>( + boxes_num, pts_num, out_x, out_y, out_z, + rois.data_ptr(), pts.data_ptr(), + pts_mask.data_ptr()); + }); + + AT_CUDA_CHECK(cudaGetLastError()); + + // TODO: Merge the collect and pool functions, SS + + dim3 blocks_collect(DIVUP(boxes_num, THREADS_PER_BLOCK)); + + AT_DISPATCH_INTEGRAL_TYPES( + pts_idx_of_voxels.scalar_type(), "collect_inside_pts_for_box3d", [&] { + collect_inside_pts_for_box3d + <<>>( + boxes_num, pts_num, max_pts_each_voxel, out_x, out_y, out_z, + pts_mask.data_ptr(), + pts_idx_of_voxels.data_ptr()); + }); + + AT_CUDA_CHECK(cudaGetLastError()); + + dim3 blocks_pool(DIVUP(out_x * out_y * out_z, THREADS_PER_BLOCK), channels, + boxes_num); + if (pool_method == 0) { + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + pts_feature.scalar_type(), "roiaware_maxpool3d", [&] { + roiaware_maxpool3d<<>>( + boxes_num, pts_num, channels, max_pts_each_voxel, out_x, out_y, + out_z, pts_feature.data_ptr(), + pts_idx_of_voxels.data_ptr(), + pooled_features.data_ptr(), argmax.data_ptr()); + }); + } else if (pool_method == 1) { + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + pts_feature.scalar_type(), "roiaware_avgpool3d", [&] { + roiaware_avgpool3d<<>>( + boxes_num, pts_num, channels, max_pts_each_voxel, out_x, out_y, + out_z, pts_feature.data_ptr(), + pts_idx_of_voxels.data_ptr(), + pooled_features.data_ptr()); + }); + } + + AT_CUDA_CHECK(cudaGetLastError()); +} + +void RoiawarePool3dBackwardCUDAKernelLauncher( + int boxes_num, int out_x, int out_y, int out_z, int channels, + int max_pts_each_voxel, const Tensor pts_idx_of_voxels, const Tensor argmax, + const Tensor grad_out, Tensor grad_in, int pool_method) { + // params pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel) + // params argmax: (N, out_x, out_y, out_z, C) + // params grad_out: (N, out_x, out_y, out_z, C) + // params grad_in: (npoints, C), return value + // params pool_method: 0: max_pool, 1: avg_pool + + at::cuda::CUDAGuard device_guard(grad_out.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + dim3 blocks(DIVUP(out_x * out_y * out_z, THREADS_PER_BLOCK), channels, + boxes_num); + dim3 threads(THREADS_PER_BLOCK); + + if (pool_method == 0) { + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + grad_in.scalar_type(), "roiaware_maxpool3d_backward", [&] { + roiaware_maxpool3d_backward<<>>( + boxes_num, channels, out_x, out_y, out_z, argmax.data_ptr(), + grad_out.data_ptr(), grad_in.data_ptr()); + }); + } else if (pool_method == 1) { + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + grad_in.scalar_type(), "roiaware_avgpool3d_backward", [&] { + roiaware_avgpool3d_backward<<>>( + boxes_num, channels, out_x, out_y, out_z, max_pts_each_voxel, + pts_idx_of_voxels.data_ptr(), grad_out.data_ptr(), + grad_in.data_ptr()); + }); + } + + AT_CUDA_CHECK(cudaGetLastError()); +} diff --git a/mmcv/ops/csrc/pytorch/cuda/roipoint_pool3d_cuda.cu b/mmcv/ops/csrc/pytorch/cuda/roipoint_pool3d_cuda.cu new file mode 100644 index 0000000..49c003f --- /dev/null +++ b/mmcv/ops/csrc/pytorch/cuda/roipoint_pool3d_cuda.cu @@ -0,0 +1,60 @@ +/* +Modified from +https://github.com/open-mmlab/OpenPCDet/blob/master/pcdet/ops/roipoint_pool3d/src/roipoint_pool3d_kernel.cu +Point cloud feature pooling +Written by Shaoshuai Shi +All Rights Reserved 2018. +*/ + +#include +#include + +#include "pytorch_cuda_helper.hpp" +#include "roipoint_pool3d_cuda_kernel.cuh" + +void RoIPointPool3dForwardCUDAKernelLauncher( + int batch_size, int pts_num, int boxes_num, int feature_in_len, + int sampled_pts_num, const Tensor xyz, const Tensor boxes3d, + const Tensor pts_feature, Tensor pooled_features, + Tensor pooled_empty_flag) { + Tensor pts_assign = at::empty({batch_size, pts_num, boxes_num}, + boxes3d.options().dtype(at::kInt)); + + at::cuda::CUDAGuard device_guard(xyz.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + // blockIdx.x(col), blockIdx.y(row) + dim3 blocks(DIVUP(pts_num, THREADS_PER_BLOCK), boxes_num, batch_size); + dim3 threads(THREADS_PER_BLOCK); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + xyz.scalar_type(), "assign_pts_to_box3d", [&] { + assign_pts_to_box3d<<>>( + batch_size, pts_num, boxes_num, xyz.data_ptr(), + boxes3d.data_ptr(), pts_assign.data_ptr()); + }); + + Tensor pts_idx = at::empty({batch_size, boxes_num, sampled_pts_num}, + boxes3d.options().dtype(at::kInt)); + + // blockIdx.x(col), blockIdx.y(row) + dim3 blocks2(DIVUP(boxes_num, THREADS_PER_BLOCK), batch_size); + + get_pooled_idx<<>>( + batch_size, pts_num, boxes_num, sampled_pts_num, + pts_assign.data_ptr(), pts_idx.data_ptr(), + pooled_empty_flag.data_ptr()); + + dim3 blocks_pool(DIVUP(sampled_pts_num, THREADS_PER_BLOCK), boxes_num, + batch_size); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + xyz.scalar_type(), "roipoint_pool3d_forward", [&] { + roipoint_pool3d_forward<<>>( + batch_size, pts_num, boxes_num, feature_in_len, sampled_pts_num, + xyz.data_ptr(), pts_idx.data_ptr(), + pts_feature.data_ptr(), + pooled_features.data_ptr(), + pooled_empty_flag.data_ptr()); + }); +} diff --git a/mmcv/ops/csrc/pytorch/cuda/scatter_points_cuda.cu b/mmcv/ops/csrc/pytorch/cuda/scatter_points_cuda.cu new file mode 100644 index 0000000..4939fe4 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/cuda/scatter_points_cuda.cu @@ -0,0 +1,127 @@ +// Copyright (c) OpenMMLab. All rights reserved. +#include +#include +#include + +#include "pytorch_cuda_helper.hpp" +#include "scatter_points_cuda_kernel.cuh" + +std::vector DynamicPointToVoxelForwardCUDAKernelLauncher( + const at::Tensor &feats, const at::Tensor &coors, + const reduce_t reduce_type) { + const int num_input = feats.size(0); + const int num_feats = feats.size(1); + + if (num_input == 0) + return {feats.clone().detach(), coors.clone().detach(), + coors.new_empty({0}, torch::kInt32), + coors.new_empty({0}, torch::kInt32)}; + + at::Tensor out_coors; + at::Tensor coors_map; + at::Tensor reduce_count; + + auto coors_clean = coors.masked_fill(coors.lt(0).any(-1, true), -1); + + std::tie(out_coors, coors_map, reduce_count) = + at::unique_dim(coors_clean, 0, true, true, true); + + // the first element of out_coors is always (-1,-1,-1) and should be removed + out_coors = out_coors.slice(0, 1); + reduce_count = reduce_count.slice(0, 1).to(torch::kInt32); + coors_map = coors_map.to(torch::kInt32) - 1; + + auto reduced_feats = + at::empty({out_coors.size(0), num_feats}, feats.options()); + + at::cuda::CUDAGuard device_guard(feats.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + AT_DISPATCH_FLOATING_TYPES( + feats.scalar_type(), "feats_reduce_kernel", ([&] { + if (reduce_type == reduce_t::MAX) + reduced_feats.fill_(-std::numeric_limits::infinity()); + else + reduced_feats.fill_(static_cast(0)); + + dim3 blocks(std::min( + at::cuda::ATenCeilDiv(num_input, THREADS_PER_BLOCK), maxGridDim)); + dim3 threads(THREADS_PER_BLOCK); + feats_reduce_kernel<<>>( + feats.data_ptr(), coors_map.data_ptr(), + reduced_feats.data_ptr(), num_input, num_feats, + reduce_type); + if (reduce_type == reduce_t::MEAN) + reduced_feats /= reduce_count.unsqueeze(-1).to(reduced_feats.dtype()); + })); + + AT_CUDA_CHECK(cudaGetLastError()); + + return {reduced_feats, out_coors, coors_map, reduce_count}; +} + +void DynamicPointToVoxelBackwardCUDAKernelLauncher( + at::Tensor &grad_feats, const at::Tensor &grad_reduced_feats, + const at::Tensor &feats, const at::Tensor &reduced_feats, + const at::Tensor &coors_map, const at::Tensor &reduce_count, + const reduce_t reduce_type) { + const int num_input = feats.size(0); + const int num_reduced = reduced_feats.size(0); + const int num_feats = feats.size(1); + + grad_feats.fill_(0); + // copy voxel grad to points + + if (num_input == 0 || num_reduced == 0) return; + at::cuda::CUDAGuard device_guard(feats.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + if (reduce_type == reduce_t::MEAN || reduce_type == reduce_t::SUM) { + AT_DISPATCH_FLOATING_TYPES( + grad_reduced_feats.scalar_type(), "add_reduce_traceback_grad_kernel", + ([&] { + dim3 blocks(std::min( + at::cuda::ATenCeilDiv(num_input, THREADS_PER_BLOCK), maxGridDim)); + dim3 threads(THREADS_PER_BLOCK); + add_reduce_traceback_grad_kernel<<>>( + grad_feats.data_ptr(), + grad_reduced_feats.data_ptr(), + coors_map.data_ptr(), reduce_count.data_ptr(), + num_input, num_feats, reduce_type); + })); + + AT_CUDA_CHECK(cudaGetLastError()); + } else { + auto reduce_from = at::full({num_reduced, num_feats}, num_input, + coors_map.options().dtype(torch::kInt32)); + AT_DISPATCH_FLOATING_TYPES( + grad_reduced_feats.scalar_type(), + "max_reduce_traceback_scatter_idx_kernel", ([&] { + dim3 blocks(std::min( + at::cuda::ATenCeilDiv(num_input, THREADS_PER_BLOCK), maxGridDim)); + dim3 threads(THREADS_PER_BLOCK); + max_reduce_traceback_scatter_idx_kernel<<>>( + feats.data_ptr(), reduced_feats.data_ptr(), + reduce_from.data_ptr(), coors_map.data_ptr(), + num_input, num_feats); + })); + + AT_CUDA_CHECK(cudaGetLastError()); + + AT_DISPATCH_FLOATING_TYPES( + grad_reduced_feats.scalar_type(), + "max_reduce_traceback_scatter_idx_kernel", ([&] { + dim3 blocks( + std::min(at::cuda::ATenCeilDiv(num_reduced, THREADS_PER_BLOCK), + maxGridDim)); + dim3 threads(THREADS_PER_BLOCK); + max_reduce_scatter_grad_kernel<<>>( + grad_feats.data_ptr(), + grad_reduced_feats.data_ptr(), + reduce_from.data_ptr(), num_reduced, num_feats); + })); + + AT_CUDA_CHECK(cudaGetLastError()); + } +} diff --git a/mmcv/ops/csrc/pytorch/cuda/sync_bn_cuda.cu b/mmcv/ops/csrc/pytorch/cuda/sync_bn_cuda.cu new file mode 100644 index 0000000..657c817 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/cuda/sync_bn_cuda.cu @@ -0,0 +1,110 @@ +// Copyright (c) OpenMMLab. All rights reserved +#include "pytorch_cuda_helper.hpp" +#include "sync_bn_cuda_kernel.cuh" + +void SyncBNForwardMeanCUDAKernelLauncher(const Tensor input, Tensor mean) { + int num = input.size(0); + int channels = input.size(1); + int spatial = input.size(2); + + at::cuda::CUDAGuard device_guard(input.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + input.scalar_type(), "sync_bn_forward_mean_cuda_kernel", [&] { + sync_bn_forward_mean_cuda_kernel + <<>>( + input.data_ptr(), mean.data_ptr(), num, + channels, spatial); + }); + AT_CUDA_CHECK(cudaGetLastError()); +} + +void SyncBNForwardVarCUDAKernelLauncher(const Tensor input, const Tensor mean, + Tensor var) { + int num = input.size(0); + int channels = input.size(1); + int spatial = input.size(2); + + at::cuda::CUDAGuard device_guard(input.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + input.scalar_type(), "sync_bn_forward_mean_cuda_kernel", [&] { + sync_bn_forward_var_cuda_kernel + <<>>( + input.data_ptr(), mean.data_ptr(), + var.data_ptr(), num, channels, spatial); + }); + AT_CUDA_CHECK(cudaGetLastError()); +} + +void SyncBNForwardOutputCUDAKernelLauncher( + const Tensor input, const Tensor mean, const Tensor var, + Tensor running_mean, Tensor running_var, const Tensor weight, + const Tensor bias, Tensor norm, Tensor std, Tensor output, float eps, + float momentum, int group_size) { + int num = input.size(0); + int channels = input.size(1); + int spatial = input.size(2); + + at::cuda::CUDAGuard device_guard(input.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + input.scalar_type(), "sync_bn_forward_mean_cuda_kernel", [&] { + sync_bn_forward_output_cuda_kernel + <<>>( + input.data_ptr(), mean.data_ptr(), + var.data_ptr(), running_mean.data_ptr(), + running_var.data_ptr(), weight.data_ptr(), + bias.data_ptr(), norm.data_ptr(), + std.data_ptr(), output.data_ptr(), num, + channels, spatial, eps, momentum, group_size); + }); + AT_CUDA_CHECK(cudaGetLastError()); +} + +void SyncBNBackwardParamCUDAKernelLauncher(const Tensor grad_output, + const Tensor norm, + Tensor grad_weight, + Tensor grad_bias) { + int num = grad_output.size(0); + int channels = grad_output.size(1); + int spatial = grad_output.size(2); + + at::cuda::CUDAGuard device_guard(grad_output.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + grad_output.scalar_type(), "sync_bn_backward_param_cuda_kernel", [&] { + sync_bn_backward_param_cuda_kernel + <<>>( + grad_output.data_ptr(), norm.data_ptr(), + grad_weight.data_ptr(), grad_bias.data_ptr(), num, + channels, spatial); + }); + AT_CUDA_CHECK(cudaGetLastError()); +} + +void SyncBNBackwardDataCUDAKernelLauncher(const Tensor grad_output, + const Tensor weight, + const Tensor grad_weight, + const Tensor grad_bias, + const Tensor norm, const Tensor std, + Tensor grad_input) { + int output_size = grad_input.numel(); + int num = grad_input.size(0); + int channels = grad_input.size(1); + int spatial = grad_input.size(2); + + at::cuda::CUDAGuard device_guard(grad_input.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + grad_output.scalar_type(), "sync_bn_backward_data_cuda_kernel", [&] { + sync_bn_backward_data_cuda_kernel + <<>>( + output_size, grad_output.data_ptr(), + weight.data_ptr(), grad_weight.data_ptr(), + grad_bias.data_ptr(), norm.data_ptr(), + std.data_ptr(), grad_input.data_ptr(), num, + channels, spatial); + }); + AT_CUDA_CHECK(cudaGetLastError()); +} diff --git a/mmcv/ops/csrc/pytorch/cuda/three_interpolate_cuda.cu b/mmcv/ops/csrc/pytorch/cuda/three_interpolate_cuda.cu new file mode 100644 index 0000000..839d2d8 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/cuda/three_interpolate_cuda.cu @@ -0,0 +1,66 @@ +// Modified from +// https://github.com/sshaoshuai/Pointnet2.PyTorch/tree/master/pointnet2/src/interpolate_gpu.cu + +#include +#include +#include + +#include "pytorch_cuda_helper.hpp" +#include "three_interpolate_cuda_kernel.cuh" + +void ThreeInterpolateForwardCUDAKernelLauncher(int b, int c, int m, int n, + const Tensor points, + const Tensor idx, + const Tensor weight, + Tensor out) { + // points: (B, C, M) + // idx: (B, N, 3) + // weight: (B, N, 3) + // output: + // out: (B, C, N) + + at::cuda::CUDAGuard device_guard(points.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + // blockIdx.x(col), blockIdx.y(row) + dim3 blocks(DIVUP(n, THREADS_PER_BLOCK), c, b); + dim3 threads(THREADS_PER_BLOCK); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + points.scalar_type(), "three_interpolate_forward_cuda_kernel", [&] { + three_interpolate_forward_cuda_kernel + <<>>( + b, c, m, n, points.data_ptr(), idx.data_ptr(), + weight.data_ptr(), out.data_ptr()); + }); + + AT_CUDA_CHECK(cudaGetLastError()); +} + +void ThreeInterpolateBackwardCUDAKernelLauncher(int b, int c, int n, int m, + const Tensor grad_out, + const Tensor idx, + const Tensor weight, + Tensor grad_points) { + // grad_out: (B, C, N) + // weight: (B, N, 3) + // output: + // grad_points: (B, C, M) + + at::cuda::CUDAGuard device_guard(grad_out.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + // blockIdx.x(col), blockIdx.y(row) + dim3 blocks(DIVUP(n, THREADS_PER_BLOCK), c, b); + dim3 threads(THREADS_PER_BLOCK); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + grad_out.scalar_type(), "three_interpolate_backward_cuda_kernel", [&] { + three_interpolate_backward_cuda_kernel + <<>>( + b, c, n, m, grad_out.data_ptr(), idx.data_ptr(), + weight.data_ptr(), grad_points.data_ptr()); + }); + + AT_CUDA_CHECK(cudaGetLastError()); +} diff --git a/mmcv/ops/csrc/pytorch/cuda/three_nn_cuda.cu b/mmcv/ops/csrc/pytorch/cuda/three_nn_cuda.cu new file mode 100644 index 0000000..9afde8f --- /dev/null +++ b/mmcv/ops/csrc/pytorch/cuda/three_nn_cuda.cu @@ -0,0 +1,35 @@ +// Modified from +// https://github.com/sshaoshuai/Pointnet2.PyTorch/tree/master/pointnet2/src/interpolate_gpu.cu + +#include +#include +#include + +#include "pytorch_cuda_helper.hpp" +#include "three_nn_cuda_kernel.cuh" + +void ThreeNNForwardCUDAKernelLauncher(int b, int n, int m, const Tensor unknown, + const Tensor known, Tensor dist2, + Tensor idx) { + // unknown: (B, N, 3) + // known: (B, M, 3) + // output: + // dist2: (B, N, 3) + // idx: (B, N, 3) + + at::cuda::CUDAGuard device_guard(unknown.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + // blockIdx.x(col), blockIdx.y(row) + dim3 blocks(DIVUP(n, THREADS_PER_BLOCK), b); + dim3 threads(THREADS_PER_BLOCK); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + unknown.scalar_type(), "three_nn_forward_cuda_kernel", [&] { + three_nn_forward_cuda_kernel<<>>( + b, n, m, unknown.data_ptr(), known.data_ptr(), + dist2.data_ptr(), idx.data_ptr()); + }); + + AT_CUDA_CHECK(cudaGetLastError()); +} diff --git a/mmcv/ops/csrc/pytorch/cuda/tin_shift_cuda.cu b/mmcv/ops/csrc/pytorch/cuda/tin_shift_cuda.cu new file mode 100644 index 0000000..19c85c7 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/cuda/tin_shift_cuda.cu @@ -0,0 +1,55 @@ +// Copyright (c) OpenMMLab. All rights reserved +#include "pytorch_cuda_helper.hpp" +#include "pytorch_device_registry.hpp" +#include "tin_shift_cuda_kernel.cuh" + +void TINShiftForwardCUDAKernelLauncher(Tensor input, Tensor shift, + Tensor output) { + int output_size = output.numel(); + int batch_size = input.size(0); + int t_size = input.size(1); + int channels = input.size(2); + int hw_size = input.size(3); + int group_size = shift.size(1); + int group_channel = channels / group_size; + int num_kernels = batch_size * hw_size * channels; + + at::cuda::CUDAGuard device_guard(input.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + input.scalar_type(), "tin_shift_forward_cuda_kernel", [&] { + tin_shift_forward_cuda_kernel + <<>>( + output_size, input.data_ptr(), shift.data_ptr(), + output.data_ptr(), batch_size, channels, t_size, + hw_size, group_size, group_channel); + }); + + AT_CUDA_CHECK(cudaGetLastError()); +} + +void TINShiftBackwardCUDAKernelLauncher(Tensor grad_output, Tensor shift, + Tensor grad_input) { + int output_size = grad_output.numel(); + int batch_size = grad_output.size(0); + int t_size = grad_output.size(1); + int channels = grad_output.size(2); + int hw_size = grad_output.size(3); + int group_size = shift.size(1); + int group_channel = channels / group_size; + int num_kernels = batch_size * hw_size * channels; + + at::cuda::CUDAGuard device_guard(grad_output.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + grad_output.scalar_type(), "tin_shift_backward_cuda_kernel", [&] { + tin_shift_backward_cuda_kernel + <<>>( + output_size, grad_output.data_ptr(), + shift.data_ptr(), grad_input.data_ptr(), + batch_size, channels, t_size, hw_size, group_size, + group_channel); + }); + + AT_CUDA_CHECK(cudaGetLastError()); +} diff --git a/mmcv/ops/csrc/pytorch/cuda/upfirdn2d_kernel.cu b/mmcv/ops/csrc/pytorch/cuda/upfirdn2d_kernel.cu new file mode 100644 index 0000000..ea2f088 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/cuda/upfirdn2d_kernel.cu @@ -0,0 +1,370 @@ +// Modified from +// https://github.com/rosinality/stylegan2-pytorch/blob/master/op/upfirdn2d_kernel.cu +// Copyright (c) 2019, NVIDIA Corporation. All rights reserved. +// +// This work is made available under the Nvidia Source Code License-NC. +// To view a copy of this license, visit +// https://nvlabs.github.io/stylegan2/license.html + +#include +#include +#include +#include +#include +#include + +#include + +static __host__ __device__ __forceinline__ int floor_div(int a, int b) { + int c = a / b; + + if (c * b > a) { + c--; + } + + return c; +} + +struct UpFirDn2DKernelParams { + int up_x; + int up_y; + int down_x; + int down_y; + int pad_x0; + int pad_x1; + int pad_y0; + int pad_y1; + + int major_dim; + int in_h; + int in_w; + int minor_dim; + int kernel_h; + int kernel_w; + int out_h; + int out_w; + int loop_major; + int loop_x; +}; + +template +__global__ void upfirdn2d_kernel_large(scalar_t *out, const scalar_t *input, + const scalar_t *kernel, + const UpFirDn2DKernelParams p) { + int minor_idx = blockIdx.x * blockDim.x + threadIdx.x; + int out_y = minor_idx / p.minor_dim; + minor_idx -= out_y * p.minor_dim; + int out_x_base = blockIdx.y * p.loop_x * blockDim.y + threadIdx.y; + int major_idx_base = blockIdx.z * p.loop_major; + + if (out_x_base >= p.out_w || out_y >= p.out_h || + major_idx_base >= p.major_dim) { + return; + } + + int mid_y = out_y * p.down_y + p.up_y - 1 - p.pad_y0; + int in_y = min(max(floor_div(mid_y, p.up_y), 0), p.in_h); + int h = min(max(floor_div(mid_y + p.kernel_h, p.up_y), 0), p.in_h) - in_y; + int kernel_y = mid_y + p.kernel_h - (in_y + 1) * p.up_y; + + for (int loop_major = 0, major_idx = major_idx_base; + loop_major < p.loop_major && major_idx < p.major_dim; + loop_major++, major_idx++) { + for (int loop_x = 0, out_x = out_x_base; + loop_x < p.loop_x && out_x < p.out_w; loop_x++, out_x += blockDim.y) { + int mid_x = out_x * p.down_x + p.up_x - 1 - p.pad_x0; + int in_x = min(max(floor_div(mid_x, p.up_x), 0), p.in_w); + int w = min(max(floor_div(mid_x + p.kernel_w, p.up_x), 0), p.in_w) - in_x; + int kernel_x = mid_x + p.kernel_w - (in_x + 1) * p.up_x; + + const scalar_t *x_p = + &input[((major_idx * p.in_h + in_y) * p.in_w + in_x) * p.minor_dim + + minor_idx]; + const scalar_t *k_p = &kernel[kernel_y * p.kernel_w + kernel_x]; + int x_px = p.minor_dim; + int k_px = -p.up_x; + int x_py = p.in_w * p.minor_dim; + int k_py = -p.up_y * p.kernel_w; + + scalar_t v = 0.0f; + + for (int y = 0; y < h; y++) { + for (int x = 0; x < w; x++) { + v += static_cast(*x_p) * static_cast(*k_p); + x_p += x_px; + k_p += k_px; + } + + x_p += x_py - w * x_px; + k_p += k_py - w * k_px; + } + + out[((major_idx * p.out_h + out_y) * p.out_w + out_x) * p.minor_dim + + minor_idx] = v; + } + } +} + +template +__global__ void upfirdn2d_kernel(scalar_t *out, const scalar_t *input, + const scalar_t *kernel, + const UpFirDn2DKernelParams p) { + const int tile_in_h = ((tile_out_h - 1) * down_y + kernel_h - 1) / up_y + 1; + const int tile_in_w = ((tile_out_w - 1) * down_x + kernel_w - 1) / up_x + 1; + + __shared__ volatile float sk[kernel_h][kernel_w]; + __shared__ volatile float sx[tile_in_h][tile_in_w]; + + int minor_idx = blockIdx.x; + int tile_out_y = minor_idx / p.minor_dim; + minor_idx -= tile_out_y * p.minor_dim; + tile_out_y *= tile_out_h; + int tile_out_x_base = blockIdx.y * p.loop_x * tile_out_w; + int major_idx_base = blockIdx.z * p.loop_major; + + if (tile_out_x_base >= p.out_w | tile_out_y >= p.out_h | + major_idx_base >= p.major_dim) { + return; + } + + for (int tap_idx = threadIdx.x; tap_idx < kernel_h * kernel_w; + tap_idx += blockDim.x) { + int ky = tap_idx / kernel_w; + int kx = tap_idx - ky * kernel_w; + scalar_t v = 0.0; + + if (kx < p.kernel_w & ky < p.kernel_h) { + v = kernel[(p.kernel_h - 1 - ky) * p.kernel_w + (p.kernel_w - 1 - kx)]; + } + + sk[ky][kx] = v; + } + + for (int loop_major = 0, major_idx = major_idx_base; + loop_major < p.loop_major & major_idx < p.major_dim; + loop_major++, major_idx++) { + for (int loop_x = 0, tile_out_x = tile_out_x_base; + loop_x < p.loop_x & tile_out_x < p.out_w; + loop_x++, tile_out_x += tile_out_w) { + int tile_mid_x = tile_out_x * down_x + up_x - 1 - p.pad_x0; + int tile_mid_y = tile_out_y * down_y + up_y - 1 - p.pad_y0; + int tile_in_x = floor_div(tile_mid_x, up_x); + int tile_in_y = floor_div(tile_mid_y, up_y); + + __syncthreads(); + + for (int in_idx = threadIdx.x; in_idx < tile_in_h * tile_in_w; + in_idx += blockDim.x) { + int rel_in_y = in_idx / tile_in_w; + int rel_in_x = in_idx - rel_in_y * tile_in_w; + int in_x = rel_in_x + tile_in_x; + int in_y = rel_in_y + tile_in_y; + + scalar_t v = 0.0; + + if (in_x >= 0 & in_y >= 0 & in_x < p.in_w & in_y < p.in_h) { + v = input[((major_idx * p.in_h + in_y) * p.in_w + in_x) * + p.minor_dim + + minor_idx]; + } + + sx[rel_in_y][rel_in_x] = v; + } + + __syncthreads(); + for (int out_idx = threadIdx.x; out_idx < tile_out_h * tile_out_w; + out_idx += blockDim.x) { + int rel_out_y = out_idx / tile_out_w; + int rel_out_x = out_idx - rel_out_y * tile_out_w; + int out_x = rel_out_x + tile_out_x; + int out_y = rel_out_y + tile_out_y; + + int mid_x = tile_mid_x + rel_out_x * down_x; + int mid_y = tile_mid_y + rel_out_y * down_y; + int in_x = floor_div(mid_x, up_x); + int in_y = floor_div(mid_y, up_y); + int rel_in_x = in_x - tile_in_x; + int rel_in_y = in_y - tile_in_y; + int kernel_x = (in_x + 1) * up_x - mid_x - 1; + int kernel_y = (in_y + 1) * up_y - mid_y - 1; + + scalar_t v = 0.0; + +#pragma unroll + for (int y = 0; y < kernel_h / up_y; y++) +#pragma unroll + for (int x = 0; x < kernel_w / up_x; x++) + v += sx[rel_in_y + y][rel_in_x + x] * + sk[kernel_y + y * up_y][kernel_x + x * up_x]; + + if (out_x < p.out_w & out_y < p.out_h) { + out[((major_idx * p.out_h + out_y) * p.out_w + out_x) * p.minor_dim + + minor_idx] = v; + } + } + } + } +} + +torch::Tensor upfirdn2d_op(const torch::Tensor &input, + const torch::Tensor &kernel, int up_x, int up_y, + int down_x, int down_y, int pad_x0, int pad_x1, + int pad_y0, int pad_y1) { + int curDevice = -1; + cudaGetDevice(&curDevice); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice); + + UpFirDn2DKernelParams p; + + auto x = input.contiguous(); + auto k = kernel.contiguous(); + + p.major_dim = x.size(0); + p.in_h = x.size(1); + p.in_w = x.size(2); + p.minor_dim = x.size(3); + p.kernel_h = k.size(0); + p.kernel_w = k.size(1); + p.up_x = up_x; + p.up_y = up_y; + p.down_x = down_x; + p.down_y = down_y; + p.pad_x0 = pad_x0; + p.pad_x1 = pad_x1; + p.pad_y0 = pad_y0; + p.pad_y1 = pad_y1; + + p.out_h = (p.in_h * p.up_y + p.pad_y0 + p.pad_y1 - p.kernel_h + p.down_y) / + p.down_y; + p.out_w = (p.in_w * p.up_x + p.pad_x0 + p.pad_x1 - p.kernel_w + p.down_x) / + p.down_x; + + auto out = + at::empty({p.major_dim, p.out_h, p.out_w, p.minor_dim}, x.options()); + + int mode = -1; + + int tile_out_h = -1; + int tile_out_w = -1; + + if (p.up_x == 1 && p.up_y == 1 && p.down_x == 1 && p.down_y == 1 && + p.kernel_h <= 4 && p.kernel_w <= 4) { + mode = 1; + tile_out_h = 16; + tile_out_w = 64; + } + + if (p.up_x == 1 && p.up_y == 1 && p.down_x == 1 && p.down_y == 1 && + p.kernel_h <= 3 && p.kernel_w <= 3) { + mode = 2; + tile_out_h = 16; + tile_out_w = 64; + } + + if (p.up_x == 2 && p.up_y == 2 && p.down_x == 1 && p.down_y == 1 && + p.kernel_h <= 4 && p.kernel_w <= 4) { + mode = 3; + tile_out_h = 16; + tile_out_w = 64; + } + + if (p.up_x == 2 && p.up_y == 2 && p.down_x == 1 && p.down_y == 1 && + p.kernel_h <= 2 && p.kernel_w <= 2) { + mode = 4; + tile_out_h = 16; + tile_out_w = 64; + } + + if (p.up_x == 1 && p.up_y == 1 && p.down_x == 2 && p.down_y == 2 && + p.kernel_h <= 4 && p.kernel_w <= 4) { + mode = 5; + tile_out_h = 8; + tile_out_w = 32; + } + + if (p.up_x == 1 && p.up_y == 1 && p.down_x == 2 && p.down_y == 2 && + p.kernel_h <= 2 && p.kernel_w <= 2) { + mode = 6; + tile_out_h = 8; + tile_out_w = 32; + } + + dim3 block_size; + dim3 grid_size; + + if (tile_out_h > 0 && tile_out_w > 0) { + p.loop_major = (p.major_dim - 1) / 16384 + 1; + p.loop_x = 1; + block_size = dim3(32 * 8, 1, 1); + grid_size = dim3(((p.out_h - 1) / tile_out_h + 1) * p.minor_dim, + (p.out_w - 1) / (p.loop_x * tile_out_w) + 1, + (p.major_dim - 1) / p.loop_major + 1); + } else { + p.loop_major = (p.major_dim - 1) / 16384 + 1; + p.loop_x = 4; + block_size = dim3(4, 32, 1); + grid_size = dim3((p.out_h * p.minor_dim - 1) / block_size.x + 1, + (p.out_w - 1) / (p.loop_x * block_size.y) + 1, + (p.major_dim - 1) / p.loop_major + 1); + } + + AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "upfirdn2d_cuda", [&] { + switch (mode) { + case 1: + upfirdn2d_kernel + <<>>(out.data_ptr(), + x.data_ptr(), + k.data_ptr(), p); + + break; + + case 2: + upfirdn2d_kernel + <<>>(out.data_ptr(), + x.data_ptr(), + k.data_ptr(), p); + + break; + + case 3: + upfirdn2d_kernel + <<>>(out.data_ptr(), + x.data_ptr(), + k.data_ptr(), p); + + break; + + case 4: + upfirdn2d_kernel + <<>>(out.data_ptr(), + x.data_ptr(), + k.data_ptr(), p); + + break; + + case 5: + upfirdn2d_kernel + <<>>(out.data_ptr(), + x.data_ptr(), + k.data_ptr(), p); + + break; + + case 6: + upfirdn2d_kernel + <<>>(out.data_ptr(), + x.data_ptr(), + k.data_ptr(), p); + + break; + + default: + upfirdn2d_kernel_large<<>>( + out.data_ptr(), x.data_ptr(), + k.data_ptr(), p); + } + }); + + return out; +} diff --git a/mmcv/ops/csrc/pytorch/cuda/voxelization_cuda.cu b/mmcv/ops/csrc/pytorch/cuda/voxelization_cuda.cu new file mode 100644 index 0000000..bcb7da3 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/cuda/voxelization_cuda.cu @@ -0,0 +1,188 @@ +// Copyright (c) OpenMMLab. All rights reserved. +#include +#include + +#include "pytorch_cuda_helper.hpp" +#include "voxelization_cuda_kernel.cuh" + +int HardVoxelizeForwardCUDAKernelLauncher( + const at::Tensor &points, at::Tensor &voxels, at::Tensor &coors, + at::Tensor &num_points_per_voxel, const std::vector voxel_size, + const std::vector coors_range, const int max_points, + const int max_voxels, const int NDim = 3) { + // current version tooks about 0.04s for one frame on cpu + // check device + + at::cuda::CUDAGuard device_guard(points.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + const int num_points = points.size(0); + const int num_features = points.size(1); + + const float voxel_x = voxel_size[0]; + const float voxel_y = voxel_size[1]; + const float voxel_z = voxel_size[2]; + const float coors_x_min = coors_range[0]; + const float coors_y_min = coors_range[1]; + const float coors_z_min = coors_range[2]; + const float coors_x_max = coors_range[3]; + const float coors_y_max = coors_range[4]; + const float coors_z_max = coors_range[5]; + + const int grid_x = round((coors_x_max - coors_x_min) / voxel_x); + const int grid_y = round((coors_y_max - coors_y_min) / voxel_y); + const int grid_z = round((coors_z_max - coors_z_min) / voxel_z); + + // map points to voxel coors + at::Tensor temp_coors = + at::zeros({num_points, NDim}, points.options().dtype(at::kInt)); + + dim3 grid(std::min(at::cuda::ATenCeilDiv(num_points, 512), 4096)); + dim3 block(512); + + // 1. link point to corresponding voxel coors + AT_DISPATCH_ALL_TYPES( + points.scalar_type(), "hard_voxelize_kernel", ([&] { + dynamic_voxelize_kernel<<>>( + points.contiguous().data_ptr(), + temp_coors.contiguous().data_ptr(), voxel_x, voxel_y, voxel_z, + coors_x_min, coors_y_min, coors_z_min, coors_x_max, coors_y_max, + coors_z_max, grid_x, grid_y, grid_z, num_points, num_features, + NDim); + })); + + AT_CUDA_CHECK(cudaGetLastError()); + + // 2. map point to the idx of the corresponding voxel, find duplicate coor + // create some temporary variables + auto point_to_pointidx = -at::ones( + { + num_points, + }, + points.options().dtype(at::kInt)); + auto point_to_voxelidx = -at::ones( + { + num_points, + }, + points.options().dtype(at::kInt)); + + dim3 map_grid(std::min(at::cuda::ATenCeilDiv(num_points, 512), 4096)); + dim3 map_block(512); + + AT_DISPATCH_ALL_TYPES( + temp_coors.scalar_type(), "determin_duplicate", ([&] { + point_to_voxelidx_kernel<<>>( + temp_coors.contiguous().data_ptr(), + point_to_voxelidx.contiguous().data_ptr(), + point_to_pointidx.contiguous().data_ptr(), max_points, + max_voxels, num_points, NDim); + })); + + AT_CUDA_CHECK(cudaGetLastError()); + + // 3. determine voxel num and voxel's coor index + // make the logic in the CUDA device could accelerate about 10 times + auto coor_to_voxelidx = -at::ones( + { + num_points, + }, + points.options().dtype(at::kInt)); + auto voxel_num = at::zeros( + { + 1, + }, + points.options().dtype(at::kInt)); // must be zero from the beginning + + AT_DISPATCH_ALL_TYPES(temp_coors.scalar_type(), "determin_duplicate", ([&] { + determin_voxel_num<<<1, 1, 0, stream>>>( + num_points_per_voxel.contiguous().data_ptr(), + point_to_voxelidx.contiguous().data_ptr(), + point_to_pointidx.contiguous().data_ptr(), + coor_to_voxelidx.contiguous().data_ptr(), + voxel_num.contiguous().data_ptr(), + max_points, max_voxels, num_points); + })); + + AT_CUDA_CHECK(cudaGetLastError()); + + // 4. copy point features to voxels + // Step 4 & 5 could be parallel + auto pts_output_size = num_points * num_features; + dim3 cp_grid(std::min(at::cuda::ATenCeilDiv(pts_output_size, 512), 4096)); + dim3 cp_block(512); + AT_DISPATCH_ALL_TYPES( + points.scalar_type(), "assign_point_to_voxel", ([&] { + assign_point_to_voxel<<>>( + pts_output_size, points.contiguous().data_ptr(), + point_to_voxelidx.contiguous().data_ptr(), + coor_to_voxelidx.contiguous().data_ptr(), + voxels.contiguous().data_ptr(), max_points, num_features, + num_points, NDim); + })); + // cudaDeviceSynchronize(); + // AT_CUDA_CHECK(cudaGetLastError()); + + // 5. copy coors of each voxels + auto coors_output_size = num_points * NDim; + dim3 coors_cp_grid( + std::min(at::cuda::ATenCeilDiv(coors_output_size, 512), 4096)); + dim3 coors_cp_block(512); + AT_DISPATCH_ALL_TYPES( + points.scalar_type(), "assign_point_to_voxel", ([&] { + assign_voxel_coors + <<>>( + coors_output_size, temp_coors.contiguous().data_ptr(), + point_to_voxelidx.contiguous().data_ptr(), + coor_to_voxelidx.contiguous().data_ptr(), + coors.contiguous().data_ptr(), num_points, NDim); + })); + + AT_CUDA_CHECK(cudaGetLastError()); + + auto voxel_num_cpu = voxel_num.to(at::kCPU); + int voxel_num_int = voxel_num_cpu.data_ptr()[0]; + + return voxel_num_int; +} + +void DynamicVoxelizeForwardCUDAKernelLauncher( + const at::Tensor &points, at::Tensor &coors, + const std::vector voxel_size, const std::vector coors_range, + const int NDim = 3) { + // current version tooks about 0.04s for one frame on cpu + // check device + + at::cuda::CUDAGuard device_guard(points.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + const int num_points = points.size(0); + const int num_features = points.size(1); + + const float voxel_x = voxel_size[0]; + const float voxel_y = voxel_size[1]; + const float voxel_z = voxel_size[2]; + const float coors_x_min = coors_range[0]; + const float coors_y_min = coors_range[1]; + const float coors_z_min = coors_range[2]; + const float coors_x_max = coors_range[3]; + const float coors_y_max = coors_range[4]; + const float coors_z_max = coors_range[5]; + + const int grid_x = round((coors_x_max - coors_x_min) / voxel_x); + const int grid_y = round((coors_y_max - coors_y_min) / voxel_y); + const int grid_z = round((coors_z_max - coors_z_min) / voxel_z); + + const int col_blocks = at::cuda::ATenCeilDiv(num_points, THREADS_PER_BLOCK); + dim3 blocks(col_blocks); + dim3 threads(THREADS_PER_BLOCK); + + AT_DISPATCH_ALL_TYPES(points.scalar_type(), "dynamic_voxelize_kernel", [&] { + dynamic_voxelize_kernel<<>>( + points.contiguous().data_ptr(), + coors.contiguous().data_ptr(), voxel_x, voxel_y, voxel_z, + coors_x_min, coors_y_min, coors_z_min, coors_x_max, coors_y_max, + coors_z_max, grid_x, grid_y, grid_z, num_points, num_features, NDim); + }); + + AT_CUDA_CHECK(cudaGetLastError()); +} diff --git a/mmcv/ops/csrc/pytorch/deform_conv.cpp b/mmcv/ops/csrc/pytorch/deform_conv.cpp new file mode 100644 index 0000000..86690b9 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/deform_conv.cpp @@ -0,0 +1,517 @@ +// Copyright (c) OpenMMLab. All rights reserved +#include "pytorch_cpp_helper.hpp" +#include "pytorch_device_registry.hpp" + +void deformable_im2col_impl(Tensor data_im, Tensor data_offset, + const int channels, const int height, + const int width, const int ksize_h, + const int ksize_w, const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int parallel_imgs, const int deformable_group, + Tensor data_col) { + DISPATCH_DEVICE_IMPL(deformable_im2col_impl, data_im, data_offset, channels, + height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, + stride_w, dilation_h, dilation_w, parallel_imgs, + deformable_group, data_col); +} + +void deformable_col2im_impl(Tensor data_col, Tensor data_offset, + const int channels, const int height, + const int width, const int ksize_h, + const int ksize_w, const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int parallel_imgs, const int deformable_group, + Tensor grad_im) { + DISPATCH_DEVICE_IMPL(deformable_col2im_impl, data_col, data_offset, channels, + height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, + stride_w, dilation_h, dilation_w, parallel_imgs, + deformable_group, grad_im); +} + +void deformable_col2im_coord_impl( + Tensor data_col, Tensor data_im, Tensor data_offset, const int channels, + const int height, const int width, const int ksize_h, const int ksize_w, + const int pad_h, const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, const int parallel_imgs, + const int deformable_group, Tensor grad_offset) { + DISPATCH_DEVICE_IMPL(deformable_col2im_coord_impl, data_col, data_im, + data_offset, channels, height, width, ksize_h, ksize_w, + pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, + parallel_imgs, deformable_group, grad_offset); +} + +void deform_conv_shape_check(at::Tensor input, at::Tensor offset, + at::Tensor *gradOutput, at::Tensor weight, int kH, + int kW, int dH, int dW, int padH, int padW, + int dilationH, int dilationW, int group, + int deformable_group) { + TORCH_CHECK( + weight.ndimension() == 4, + "4D weight tensor (nOutputPlane,nInputPlane,kH,kW) expected, but got: %s", + weight.ndimension()); + + TORCH_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous"); + + TORCH_CHECK(kW > 0 && kH > 0, + "kernel size should be greater than zero, but got kH: %d kW: %d", + kH, kW); + + TORCH_CHECK((weight.size(2) == kH && weight.size(3) == kW), + "kernel size should be consistent with weight, ", + "but got kH: %d kW: %d weight.size(2): %d, weight.size(3): %d", + kH, kW, weight.size(2), weight.size(3)); + + TORCH_CHECK(dW > 0 && dH > 0, + "stride should be greater than zero, but got dH: %d dW: %d", dH, + dW); + + TORCH_CHECK( + dilationW > 0 && dilationH > 0, + "dilation should be greater than 0, but got dilationH: %d dilationW: %d", + dilationH, dilationW); + + int ndim = input.ndimension(); + int dimf = 0; + int dimh = 1; + int dimw = 2; + + if (ndim == 4) { + dimf++; + dimh++; + dimw++; + } + + TORCH_CHECK(ndim == 3 || ndim == 4, + "3D or 4D input tensor expected but got: %s", ndim); + + long nInputPlane = weight.size(1) * group; + long inputHeight = input.size(dimh); + long inputWidth = input.size(dimw); + long nOutputPlane = weight.size(0); + long outputHeight = + (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; + long outputWidth = + (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; + + TORCH_CHECK(nInputPlane % deformable_group == 0, + "input channels must divide deformable group size"); + + if (outputWidth < 1 || outputHeight < 1) + AT_ERROR( + "Given input size: (%ld x %ld x %ld). " + "Calculated output size: (%ld x %ld x %ld). Output size is too small", + nInputPlane, inputHeight, inputWidth, nOutputPlane, outputHeight, + outputWidth); + + TORCH_CHECK(input.size(1) == nInputPlane, + "invalid number of input planes, expected: %d, but got: %d", + nInputPlane, input.size(1)); + + TORCH_CHECK((inputHeight >= kH && inputWidth >= kW), + "input image is smaller than kernel"); + + TORCH_CHECK( + (offset.size(2) == outputHeight && offset.size(3) == outputWidth), + "invalid spatial size of offset, expected height: %d width: %d, but " + "got height: %d width: %d", + outputHeight, outputWidth, offset.size(2), offset.size(3)); + + TORCH_CHECK((offset.size(1) == deformable_group * 2 * kH * kW), + "invalid number of channels of offset"); + + if (gradOutput != NULL) { + TORCH_CHECK( + gradOutput->size(dimf) == nOutputPlane, + "invalid number of gradOutput planes, expected: %d, but got: %d", + nOutputPlane, gradOutput->size(dimf)); + + TORCH_CHECK( + (gradOutput->size(dimh) == outputHeight && + gradOutput->size(dimw) == outputWidth), + "invalid size of gradOutput, expected height: %d width: %d , but " + "got height: %d width: %d", + outputHeight, outputWidth, gradOutput->size(dimh), + gradOutput->size(dimw)); + } +} + +void deform_conv_forward(Tensor input, Tensor weight, Tensor offset, + Tensor output, Tensor columns, Tensor ones, int kW, + int kH, int dW, int dH, int padW, int padH, + int dilationW, int dilationH, int group, + int deformable_group, int im2col_step) { + if (input.device().is_cuda()) { +#ifdef MMCV_WITH_CUDA + CHECK_CUDA_INPUT(input); + CHECK_CUDA_INPUT(offset); + CHECK_CUDA_INPUT(weight); + CHECK_CUDA_INPUT(output); + CHECK_CUDA_INPUT(columns); + CHECK_CUDA_INPUT(ones); +#else + AT_ERROR("DeformConv is not compiled with GPU support"); +#endif + } else { + CHECK_CPU_INPUT(input); + CHECK_CPU_INPUT(offset); + CHECK_CPU_INPUT(weight); + CHECK_CPU_INPUT(output); + CHECK_CPU_INPUT(columns); + CHECK_CPU_INPUT(ones); + } + + deform_conv_shape_check(input, offset, NULL, weight, kH, kW, dH, dW, padH, + padW, dilationH, dilationW, group, deformable_group); + at::DeviceGuard guard(input.device()); + + int batch = 1; + if (input.ndimension() == 3) { + // Force batch + batch = 0; + input.unsqueeze_(0); + offset.unsqueeze_(0); + } + + // todo: assert batchsize dividable by im2col_step + + long batchSize = input.size(0); + long nInputPlane = input.size(1); + long inputHeight = input.size(2); + long inputWidth = input.size(3); + + long nOutputPlane = weight.size(0); + + long outputWidth = + (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; + long outputHeight = + (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; + + TORCH_CHECK((offset.size(0) == batchSize), "invalid batch size of offset"); + + output = output.view({batchSize / im2col_step, im2col_step, nOutputPlane, + outputHeight, outputWidth}); + columns = at::zeros( + {nInputPlane * kW * kH, im2col_step * outputHeight * outputWidth}, + input.options()); + + if (ones.ndimension() != 2 || + ones.size(0) * ones.size(1) < outputHeight * outputWidth) { + ones = at::ones({outputHeight, outputWidth}, input.options()); + } + + input = input.view({batchSize / im2col_step, im2col_step, nInputPlane, + inputHeight, inputWidth}); + offset = + offset.view({batchSize / im2col_step, im2col_step, + deformable_group * 2 * kH * kW, outputHeight, outputWidth}); + + Tensor output_buffer = at::zeros({batchSize / im2col_step, nOutputPlane, + im2col_step * outputHeight, outputWidth}, + output.options()); + + output_buffer = output_buffer.view( + {output_buffer.size(0), group, output_buffer.size(1) / group, + output_buffer.size(2), output_buffer.size(3)}); + + for (int elt = 0; elt < batchSize / im2col_step; elt++) { + deformable_im2col_impl(input[elt], offset[elt], nInputPlane, inputHeight, + inputWidth, kH, kW, padH, padW, dH, dW, dilationH, + dilationW, im2col_step, deformable_group, columns); + + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + weight = weight.view({group, weight.size(0) / group, weight.size(1), + weight.size(2), weight.size(3)}); + + for (int g = 0; g < group; g++) { + output_buffer[elt][g] = output_buffer[elt][g] + .flatten(1) + .addmm_(weight[g].flatten(1), columns[g]) + .view_as(output_buffer[elt][g]); + } + columns = + columns.view({columns.size(0) * columns.size(1), columns.size(2)}); + weight = weight.view({weight.size(0) * weight.size(1), weight.size(2), + weight.size(3), weight.size(4)}); + } + + output_buffer = output_buffer.view( + {output_buffer.size(0), output_buffer.size(1) * output_buffer.size(2), + output_buffer.size(3), output_buffer.size(4)}); + + output_buffer = output_buffer.view({batchSize / im2col_step, nOutputPlane, + im2col_step, outputHeight, outputWidth}); + output_buffer.transpose_(1, 2); + output.copy_(output_buffer); + output = output.view({batchSize, nOutputPlane, outputHeight, outputWidth}); + + input = input.view({batchSize, nInputPlane, inputHeight, inputWidth}); + offset = offset.view( + {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); + + if (batch == 0) { + output = output.view({nOutputPlane, outputHeight, outputWidth}); + input = input.view({nInputPlane, inputHeight, inputWidth}); + offset = offset.view({offset.size(1), offset.size(2), offset.size(3)}); + } +} + +void deform_conv_backward_input(Tensor input, Tensor offset, Tensor gradOutput, + Tensor gradInput, Tensor gradOffset, + Tensor weight, Tensor columns, int kW, int kH, + int dW, int dH, int padW, int padH, + int dilationW, int dilationH, int group, + int deformable_group, int im2col_step) { + if (input.device().is_cuda()) { +#ifdef MMCV_WITH_CUDA + CHECK_CUDA_INPUT(input); + CHECK_CUDA_INPUT(offset); + CHECK_CUDA_INPUT(gradOutput); + CHECK_CUDA_INPUT(gradInput); + CHECK_CUDA_INPUT(gradOffset); + CHECK_CUDA_INPUT(weight); + CHECK_CUDA_INPUT(columns); +#else + AT_ERROR("DeformConv is not compiled with GPU support"); +#endif + } else { + CHECK_CPU_INPUT(input); + CHECK_CPU_INPUT(offset); + CHECK_CPU_INPUT(gradOutput); + CHECK_CPU_INPUT(gradInput); + CHECK_CPU_INPUT(gradOffset); + CHECK_CPU_INPUT(weight); + CHECK_CPU_INPUT(columns); + } + deform_conv_shape_check(input, offset, &gradOutput, weight, kH, kW, dH, dW, + padH, padW, dilationH, dilationW, group, + deformable_group); + + at::DeviceGuard guard(input.device()); + + int batch = 1; + if (input.ndimension() == 3) { + // Force batch + batch = 0; + input = input.view({1, input.size(0), input.size(1), input.size(2)}); + offset = offset.view({1, offset.size(0), offset.size(1), offset.size(2)}); + gradOutput = gradOutput.view( + {1, gradOutput.size(0), gradOutput.size(1), gradOutput.size(2)}); + } + + long batchSize = input.size(0); + long nInputPlane = input.size(1); + long inputHeight = input.size(2); + long inputWidth = input.size(3); + + long nOutputPlane = weight.size(0); + + long outputWidth = + (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; + long outputHeight = + (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; + + TORCH_CHECK((offset.size(0) == batchSize), 3, "invalid batch size of offset"); + gradInput = gradInput.view({batchSize, nInputPlane, inputHeight, inputWidth}); + columns = at::zeros( + {nInputPlane * kW * kH, im2col_step * outputHeight * outputWidth}, + input.options()); + + // change order of grad output + gradOutput = gradOutput.view({batchSize / im2col_step, im2col_step, + nOutputPlane, outputHeight, outputWidth}); + gradOutput.transpose_(1, 2); + + gradInput = gradInput.view({batchSize / im2col_step, im2col_step, nInputPlane, + inputHeight, inputWidth}); + input = input.view({batchSize / im2col_step, im2col_step, nInputPlane, + inputHeight, inputWidth}); + gradOffset = gradOffset.view({batchSize / im2col_step, im2col_step, + deformable_group * 2 * kH * kW, outputHeight, + outputWidth}); + offset = + offset.view({batchSize / im2col_step, im2col_step, + deformable_group * 2 * kH * kW, outputHeight, outputWidth}); + + for (int elt = 0; elt < batchSize / im2col_step; elt++) { + // divide into groups + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + weight = weight.view({group, weight.size(0) / group, weight.size(1), + weight.size(2), weight.size(3)}); + gradOutput = gradOutput.view( + {gradOutput.size(0), group, gradOutput.size(1) / group, + gradOutput.size(2), gradOutput.size(3), gradOutput.size(4)}); + + for (int g = 0; g < group; g++) { + columns[g] = columns[g].addmm_(weight[g].flatten(1).transpose(0, 1), + gradOutput[elt][g].flatten(1), 0.0f, 1.0f); + } + + columns = + columns.view({columns.size(0) * columns.size(1), columns.size(2)}); + gradOutput = gradOutput.view( + {gradOutput.size(0), gradOutput.size(1) * gradOutput.size(2), + gradOutput.size(3), gradOutput.size(4), gradOutput.size(5)}); + + deformable_col2im_coord_impl(columns, input[elt], offset[elt], nInputPlane, + inputHeight, inputWidth, kH, kW, padH, padW, + dH, dW, dilationH, dilationW, im2col_step, + deformable_group, gradOffset[elt]); + + deformable_col2im_impl(columns, offset[elt], nInputPlane, inputHeight, + inputWidth, kH, kW, padH, padW, dH, dW, dilationH, + dilationW, im2col_step, deformable_group, + gradInput[elt]); + + weight = weight.view({weight.size(0) * weight.size(1), weight.size(2), + weight.size(3), weight.size(4)}); + } + + gradOutput.transpose_(1, 2); + gradOutput = + gradOutput.view({batchSize, nOutputPlane, outputHeight, outputWidth}); + + gradInput = gradInput.view({batchSize, nInputPlane, inputHeight, inputWidth}); + input = input.view({batchSize, nInputPlane, inputHeight, inputWidth}); + gradOffset = gradOffset.view( + {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); + offset = offset.view( + {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); + + if (batch == 0) { + gradOutput = gradOutput.view({nOutputPlane, outputHeight, outputWidth}); + input = input.view({nInputPlane, inputHeight, inputWidth}); + gradInput = gradInput.view({nInputPlane, inputHeight, inputWidth}); + offset = offset.view({offset.size(1), offset.size(2), offset.size(3)}); + gradOffset = + gradOffset.view({offset.size(1), offset.size(2), offset.size(3)}); + } +} + +void deform_conv_backward_parameters(Tensor input, Tensor offset, + Tensor gradOutput, Tensor gradWeight, + Tensor columns, Tensor ones, int kW, + int kH, int dW, int dH, int padW, int padH, + int dilationW, int dilationH, int group, + int deformable_group, float scale, + int im2col_step) { + if (input.device().is_cuda()) { +#ifdef MMCV_WITH_CUDA + CHECK_CUDA_INPUT(input); + CHECK_CUDA_INPUT(offset); + CHECK_CUDA_INPUT(gradOutput); + CHECK_CUDA_INPUT(gradWeight); + CHECK_CUDA_INPUT(columns); + CHECK_CUDA_INPUT(ones); +#else + AT_ERROR("DeformConv is not compiled with GPU support"); +#endif + } else { + CHECK_CPU_INPUT(input); + CHECK_CPU_INPUT(offset); + CHECK_CPU_INPUT(gradOutput); + CHECK_CPU_INPUT(gradWeight); + CHECK_CPU_INPUT(columns); + CHECK_CPU_INPUT(ones); + } + + deform_conv_shape_check(input, offset, &gradOutput, gradWeight, kH, kW, dH, + dW, padH, padW, dilationH, dilationW, group, + deformable_group); + at::DeviceGuard guard(input.device()); + + int batch = 1; + + if (input.ndimension() == 3) { + // Force batch + batch = 0; + input = input.view( + at::IntList({1, input.size(0), input.size(1), input.size(2)})); + gradOutput = gradOutput.view( + {1, gradOutput.size(0), gradOutput.size(1), gradOutput.size(2)}); + } + + long batchSize = input.size(0); + long nInputPlane = input.size(1); + long inputHeight = input.size(2); + long inputWidth = input.size(3); + + long nOutputPlane = gradWeight.size(0); + + long outputWidth = + (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; + long outputHeight = + (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; + + TORCH_CHECK((offset.size(0) == batchSize), "invalid batch size of offset"); + + columns = at::zeros( + {nInputPlane * kW * kH, im2col_step * outputHeight * outputWidth}, + input.options()); + + gradOutput = gradOutput.view({batchSize / im2col_step, im2col_step, + nOutputPlane, outputHeight, outputWidth}); + gradOutput.transpose_(1, 2); + + Tensor gradOutputBuffer = at::zeros_like(gradOutput); + gradOutputBuffer = + gradOutputBuffer.view({batchSize / im2col_step, nOutputPlane, im2col_step, + outputHeight, outputWidth}); + gradOutputBuffer = gradOutputBuffer.contiguous(); + gradOutputBuffer.copy_(gradOutput); + gradOutputBuffer = + gradOutputBuffer.view({batchSize / im2col_step, nOutputPlane, + im2col_step * outputHeight, outputWidth}); + + gradOutput.transpose_(1, 2); + gradOutput = + gradOutput.view({batchSize, nOutputPlane, outputHeight, outputWidth}); + + input = input.view({batchSize / im2col_step, im2col_step, nInputPlane, + inputHeight, inputWidth}); + offset = + offset.view({batchSize / im2col_step, im2col_step, + deformable_group * 2 * kH * kW, outputHeight, outputWidth}); + + for (int elt = 0; elt < batchSize / im2col_step; elt++) { + deformable_im2col_impl(input[elt], offset[elt], nInputPlane, inputHeight, + inputWidth, kH, kW, padH, padW, dH, dW, dilationH, + dilationW, im2col_step, deformable_group, columns); + + // divide into group + gradOutputBuffer = gradOutputBuffer.view( + {gradOutputBuffer.size(0), group, gradOutputBuffer.size(1) / group, + gradOutputBuffer.size(2), gradOutputBuffer.size(3)}); + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + gradWeight = + gradWeight.view({group, gradWeight.size(0) / group, gradWeight.size(1), + gradWeight.size(2), gradWeight.size(3)}); + + for (int g = 0; g < group; g++) { + gradWeight[g] = gradWeight[g] + .flatten(1) + .addmm_(gradOutputBuffer[elt][g].flatten(1), + columns[g].transpose(1, 0), 1.0, scale) + .view_as(gradWeight[g]); + } + gradOutputBuffer = gradOutputBuffer.view( + {gradOutputBuffer.size(0), + gradOutputBuffer.size(1) * gradOutputBuffer.size(2), + gradOutputBuffer.size(3), gradOutputBuffer.size(4)}); + columns = + columns.view({columns.size(0) * columns.size(1), columns.size(2)}); + gradWeight = gradWeight.view({gradWeight.size(0) * gradWeight.size(1), + gradWeight.size(2), gradWeight.size(3), + gradWeight.size(4)}); + } + + input = input.view({batchSize, nInputPlane, inputHeight, inputWidth}); + offset = offset.view( + {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); + + if (batch == 0) { + gradOutput = gradOutput.view({nOutputPlane, outputHeight, outputWidth}); + input = input.view({nInputPlane, inputHeight, inputWidth}); + } +} diff --git a/mmcv/ops/csrc/pytorch/deform_roi_pool.cpp b/mmcv/ops/csrc/pytorch/deform_roi_pool.cpp new file mode 100644 index 0000000..4fb78a9 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/deform_roi_pool.cpp @@ -0,0 +1,42 @@ +// Copyright (c) OpenMMLab. All rights reserved +#include "pytorch_cpp_helper.hpp" +#include "pytorch_device_registry.hpp" + +void deform_roi_pool_forward_impl(Tensor input, Tensor rois, Tensor offset, + Tensor output, int pooled_height, + int pooled_width, float spatial_scale, + int sampling_ratio, float gamma) { + DISPATCH_DEVICE_IMPL(deform_roi_pool_forward_impl, input, rois, offset, + output, pooled_height, pooled_width, spatial_scale, + sampling_ratio, gamma); +} + +void deform_roi_pool_backward_impl(Tensor grad_output, Tensor input, + Tensor rois, Tensor offset, + Tensor grad_input, Tensor grad_offset, + int pooled_height, int pooled_width, + float spatial_scale, int sampling_ratio, + float gamma) { + DISPATCH_DEVICE_IMPL(deform_roi_pool_backward_impl, grad_output, input, rois, + offset, grad_input, grad_offset, pooled_height, + pooled_width, spatial_scale, sampling_ratio, gamma); +} + +void deform_roi_pool_forward(Tensor input, Tensor rois, Tensor offset, + Tensor output, int pooled_height, int pooled_width, + float spatial_scale, int sampling_ratio, + float gamma) { + deform_roi_pool_forward_impl(input, rois, offset, output, pooled_height, + pooled_width, spatial_scale, sampling_ratio, + gamma); +} + +void deform_roi_pool_backward(Tensor grad_output, Tensor input, Tensor rois, + Tensor offset, Tensor grad_input, + Tensor grad_offset, int pooled_height, + int pooled_width, float spatial_scale, + int sampling_ratio, float gamma) { + deform_roi_pool_backward_impl(grad_output, input, rois, offset, grad_input, + grad_offset, pooled_height, pooled_width, + spatial_scale, sampling_ratio, gamma); +} diff --git a/mmcv/ops/csrc/pytorch/focal_loss.cpp b/mmcv/ops/csrc/pytorch/focal_loss.cpp new file mode 100644 index 0000000..ed0e218 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/focal_loss.cpp @@ -0,0 +1,53 @@ +// Copyright (c) OpenMMLab. All rights reserved +#include "pytorch_cpp_helper.hpp" +#include "pytorch_device_registry.hpp" + +void sigmoid_focal_loss_forward_impl(Tensor input, Tensor target, Tensor weight, + Tensor output, float gamma, float alpha) { + DISPATCH_DEVICE_IMPL(sigmoid_focal_loss_forward_impl, input, target, weight, + output, gamma, alpha); +} + +void sigmoid_focal_loss_backward_impl(Tensor input, Tensor target, + Tensor weight, Tensor grad_input, + float gamma, float alpha) { + DISPATCH_DEVICE_IMPL(sigmoid_focal_loss_backward_impl, input, target, weight, + grad_input, gamma, alpha); +} + +void softmax_focal_loss_forward_impl(Tensor input, Tensor target, Tensor weight, + Tensor output, float gamma, float alpha) { + DISPATCH_DEVICE_IMPL(softmax_focal_loss_forward_impl, input, target, weight, + output, gamma, alpha); +} + +void softmax_focal_loss_backward_impl(Tensor input, Tensor target, + Tensor weight, Tensor buff, + Tensor grad_input, float gamma, + float alpha) { + DISPATCH_DEVICE_IMPL(softmax_focal_loss_backward_impl, input, target, weight, + buff, grad_input, gamma, alpha); +} + +void sigmoid_focal_loss_forward(Tensor input, Tensor target, Tensor weight, + Tensor output, float gamma, float alpha) { + sigmoid_focal_loss_forward_impl(input, target, weight, output, gamma, alpha); +} + +void sigmoid_focal_loss_backward(Tensor input, Tensor target, Tensor weight, + Tensor grad_input, float gamma, float alpha) { + sigmoid_focal_loss_backward_impl(input, target, weight, grad_input, gamma, + alpha); +} + +void softmax_focal_loss_forward(Tensor input, Tensor target, Tensor weight, + Tensor output, float gamma, float alpha) { + softmax_focal_loss_forward_impl(input, target, weight, output, gamma, alpha); +} + +void softmax_focal_loss_backward(Tensor input, Tensor target, Tensor weight, + Tensor buff, Tensor grad_input, float gamma, + float alpha) { + softmax_focal_loss_backward_impl(input, target, weight, buff, grad_input, + gamma, alpha); +} diff --git a/mmcv/ops/csrc/pytorch/furthest_point_sample.cpp b/mmcv/ops/csrc/pytorch/furthest_point_sample.cpp new file mode 100644 index 0000000..9c7098a --- /dev/null +++ b/mmcv/ops/csrc/pytorch/furthest_point_sample.cpp @@ -0,0 +1,34 @@ +// Modified from +// https://github.com/sshaoshuai/Pointnet2.PyTorch/tree/master/pointnet2/src/sampling.cpp + +#include "pytorch_cpp_helper.hpp" +#include "pytorch_device_registry.hpp" + +void furthest_point_sampling_forward_impl(Tensor points_tensor, + Tensor temp_tensor, Tensor idx_tensor, + int b, int n, int m) { + DISPATCH_DEVICE_IMPL(furthest_point_sampling_forward_impl, points_tensor, + temp_tensor, idx_tensor, b, n, m); +} + +void furthest_point_sampling_with_dist_forward_impl(Tensor points_tensor, + Tensor temp_tensor, + Tensor idx_tensor, int b, + int n, int m) { + DISPATCH_DEVICE_IMPL(furthest_point_sampling_with_dist_forward_impl, + points_tensor, temp_tensor, idx_tensor, b, n, m); +} + +void furthest_point_sampling_forward(Tensor points_tensor, Tensor temp_tensor, + Tensor idx_tensor, int b, int n, int m) { + furthest_point_sampling_forward_impl(points_tensor, temp_tensor, idx_tensor, + b, n, m); +} + +void furthest_point_sampling_with_dist_forward(Tensor points_tensor, + Tensor temp_tensor, + Tensor idx_tensor, int b, int n, + int m) { + furthest_point_sampling_with_dist_forward_impl(points_tensor, temp_tensor, + idx_tensor, b, n, m); +} diff --git a/mmcv/ops/csrc/pytorch/fused_bias_leakyrelu.cpp b/mmcv/ops/csrc/pytorch/fused_bias_leakyrelu.cpp new file mode 100644 index 0000000..8d411c9 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/fused_bias_leakyrelu.cpp @@ -0,0 +1,119 @@ +// Modified from +// https://github.com/rosinality/stylegan2-pytorch/blob/master/op/fused_bias_act.cpp + +/* +Copyright (c) 2021, NVIDIA Corporation. All rights reserved. + +NVIDIA Source Code License for StyleGAN2 with Adaptive Discriminator +Augmentation (ADA) +======================================================================= + +1. Definitions + +"Licensor" means any person or entity that distributes its Work. + +"Software" means the original work of authorship made available under +this License. + +"Work" means the Software and any additions to or derivative works of +the Software that are made available under this License. + +The terms "reproduce," "reproduction," "derivative works," and +"distribution" have the meaning as provided under U.S. copyright law; +provided, however, that for the purposes of this License, derivative +works shall not include works that remain separable from, or merely +link (or bind by name) to the interfaces of, the Work. + +Works, including the Software, are "made available" under this License +by including in or with the Work either (a) a copyright notice +referencing the applicability of this License to the Work, or (b) a +copy of this License. + +2. License Grants + + 2.1 Copyright Grant. Subject to the terms and conditions of this + License, each Licensor grants to you a perpetual, worldwide, + non-exclusive, royalty-free, copyright license to reproduce, + prepare derivative works of, publicly display, publicly perform, + sublicense and distribute its Work and any resulting derivative + works in any form. + +3. Limitations + + 3.1 Redistribution. You may reproduce or distribute the Work only + if (a) you do so under this License, (b) you include a complete + copy of this License with your distribution, and (c) you retain + without modification any copyright, patent, trademark, or + attribution notices that are present in the Work. + + 3.2 Derivative Works. You may specify that additional or different + terms apply to the use, reproduction, and distribution of your + derivative works of the Work ("Your Terms") only if (a) Your Terms + provide that the use limitation in Section 3.3 applies to your + derivative works, and (b) you identify the specific derivative + works that are subject to Your Terms. Notwithstanding Your Terms, + this License (including the redistribution requirements in Section + 3.1) will continue to apply to the Work itself. + + 3.3 Use Limitation. The Work and any derivative works thereof only + may be used or intended for use non-commercially. Notwithstanding + the foregoing, NVIDIA and its affiliates may use the Work and any + derivative works commercially. As used herein, "non-commercially" + means for research or evaluation purposes only. + + 3.4 Patent Claims. If you bring or threaten to bring a patent claim + against any Licensor (including any claim, cross-claim or + counterclaim in a lawsuit) to enforce any patents that you allege + are infringed by any Work, then your rights under this License from + such Licensor (including the grant in Section 2.1) will terminate + immediately. + + 3.5 Trademarks. This License does not grant any rights to use any + Licensor’s or its affiliates’ names, logos, or trademarks, except + as necessary to reproduce the notices described in this License. + + 3.6 Termination. If you violate any term of this License, then your + rights under this License (including the grant in Section 2.1) will + terminate immediately. + +4. Disclaimer of Warranty. + +THE WORK IS PROVIDED "AS IS" WITHOUT WARRANTIES OR CONDITIONS OF ANY +KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WARRANTIES OR CONDITIONS OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE OR +NON-INFRINGEMENT. YOU BEAR THE RISK OF UNDERTAKING ANY ACTIVITIES UNDER +THIS LICENSE. + +5. Limitation of Liability. + +EXCEPT AS PROHIBITED BY APPLICABLE LAW, IN NO EVENT AND UNDER NO LEGAL +THEORY, WHETHER IN TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE +SHALL ANY LICENSOR BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY DIRECT, +INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF +OR RELATED TO THIS LICENSE, THE USE OR INABILITY TO USE THE WORK +(INCLUDING BUT NOT LIMITED TO LOSS OF GOODWILL, BUSINESS INTERRUPTION, +LOST PROFITS OR DATA, COMPUTER FAILURE OR MALFUNCTION, OR ANY OTHER +COMMERCIAL DAMAGES OR LOSSES), EVEN IF THE LICENSOR HAS BEEN ADVISED OF +THE POSSIBILITY OF SUCH DAMAGES. + +======================================================================= +*/ + +#include "pytorch_cpp_helper.hpp" +#include "pytorch_device_registry.hpp" + +torch::Tensor fused_bias_leakyrelu_op_impl(const torch::Tensor& input, + const torch::Tensor& bias, + const torch::Tensor& refer, int act, + int grad, float alpha, float scale) { + return DISPATCH_DEVICE_IMPL(fused_bias_leakyrelu_op_impl, input, bias, refer, + act, grad, alpha, scale); +} + +torch::Tensor fused_bias_leakyrelu(const torch::Tensor& input, + const torch::Tensor& bias, + const torch::Tensor& refer, int act, + int grad, float alpha, float scale) { + return fused_bias_leakyrelu_op_impl(input, bias, refer, act, grad, alpha, + scale); +} diff --git a/mmcv/ops/csrc/pytorch/gather_points.cpp b/mmcv/ops/csrc/pytorch/gather_points.cpp new file mode 100644 index 0000000..b8fb020 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/gather_points.cpp @@ -0,0 +1,30 @@ +#include "pytorch_cpp_helper.hpp" +#include "pytorch_device_registry.hpp" + +void gather_points_forward_impl(int b, int c, int n, int npoints, + const Tensor points, const Tensor idx, + Tensor out) { + DISPATCH_DEVICE_IMPL(gather_points_forward_impl, b, c, n, npoints, points, + idx, out); +} + +void gather_points_backward_impl(int b, int c, int n, int npoints, + const Tensor grad_out, const Tensor idx, + Tensor grad_points) { + DISPATCH_DEVICE_IMPL(gather_points_backward_impl, b, c, n, npoints, grad_out, + idx, grad_points); +} + +void gather_points_forward(Tensor points_tensor, Tensor idx_tensor, + Tensor out_tensor, int b, int c, int n, + int npoints) { + gather_points_forward_impl(b, c, n, npoints, points_tensor, idx_tensor, + out_tensor); +} + +void gather_points_backward(Tensor grad_out_tensor, Tensor idx_tensor, + Tensor grad_points_tensor, int b, int c, int n, + int npoints) { + gather_points_backward_impl(b, c, n, npoints, grad_out_tensor, idx_tensor, + grad_points_tensor); +} diff --git a/mmcv/ops/csrc/pytorch/group_points.cpp b/mmcv/ops/csrc/pytorch/group_points.cpp new file mode 100644 index 0000000..cdd190d --- /dev/null +++ b/mmcv/ops/csrc/pytorch/group_points.cpp @@ -0,0 +1,34 @@ +// Copyright (c) OpenMMLab. All rights reserved. +// Modified from +// https://github.com/sshaoshuai/Pointnet2.PyTorch/tree/master/pointnet2/src/group_points.cpp + +#include "pytorch_cpp_helper.hpp" +#include "pytorch_device_registry.hpp" + +void group_points_forward_impl(int b, int c, int n, int npoints, int nsample, + const Tensor points, const Tensor idx, + Tensor out) { + DISPATCH_DEVICE_IMPL(group_points_forward_impl, b, c, n, npoints, nsample, + points, idx, out); +} + +void group_points_backward_impl(int b, int c, int n, int npoints, int nsample, + const Tensor grad_out, const Tensor idx, + Tensor grad_points) { + DISPATCH_DEVICE_IMPL(group_points_backward_impl, b, c, n, npoints, nsample, + grad_out, idx, grad_points); +} + +void group_points_forward(Tensor points_tensor, Tensor idx_tensor, + Tensor out_tensor, int b, int c, int n, int npoints, + int nsample) { + DISPATCH_DEVICE_IMPL(group_points_forward_impl, b, c, n, npoints, nsample, + points_tensor, idx_tensor, out_tensor); +} + +void group_points_backward(Tensor grad_out_tensor, Tensor idx_tensor, + Tensor grad_points_tensor, int b, int c, int n, + int npoints, int nsample) { + group_points_backward_impl(b, c, n, npoints, nsample, grad_out_tensor, + idx_tensor, grad_points_tensor); +} diff --git a/mmcv/ops/csrc/pytorch/info.cpp b/mmcv/ops/csrc/pytorch/info.cpp new file mode 100644 index 0000000..a08d227 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/info.cpp @@ -0,0 +1,56 @@ +// Copyright (c) OpenMMLab. All rights reserved +// modified from +// https://github.com/facebookresearch/detectron2/blob/master/detectron2/layers/csrc/vision.cpp +#include "pytorch_cpp_helper.hpp" + +#ifdef MMCV_WITH_CUDA +#ifndef HIP_DIFF +#include +int get_cudart_version() { return CUDART_VERSION; } +#endif +#endif + +std::string get_compiling_cuda_version() { +#ifdef MMCV_WITH_CUDA +#ifndef HIP_DIFF + std::ostringstream oss; + // copied from + // https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/cuda/detail/CUDAHooks.cpp#L231 + auto printCudaStyleVersion = [&](int v) { + oss << (v / 1000) << "." << (v / 10 % 100); + if (v % 10 != 0) { + oss << "." << (v % 10); + } + }; + printCudaStyleVersion(get_cudart_version()); + return oss.str(); +#else + return std::string("rocm not available"); +#endif +#else + return std::string("not available"); +#endif +} + +// similar to +// https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/Version.cpp +std::string get_compiler_version() { + std::ostringstream ss; +#if defined(__GNUC__) +#ifndef __clang__ + { ss << "GCC " << __GNUC__ << "." << __GNUC_MINOR__; } +#endif +#endif + +#if defined(__clang_major__) + { + ss << "clang " << __clang_major__ << "." << __clang_minor__ << "." + << __clang_patchlevel__; + } +#endif + +#if defined(_MSC_VER) + { ss << "MSVC " << _MSC_FULL_VER; } +#endif + return ss.str(); +} diff --git a/mmcv/ops/csrc/pytorch/iou3d.cpp b/mmcv/ops/csrc/pytorch/iou3d.cpp new file mode 100644 index 0000000..71f5030 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/iou3d.cpp @@ -0,0 +1,151 @@ +// Modified from +// https://github.com/open-mmlab/OpenPCDet/blob/master/pcdet/ops/iou3d_nms/src/iou3d_nms.cpp + +/* +3D IoU Calculation and Rotated NMS(modified from 2D NMS written by others) +Written by Shaoshuai Shi +All Rights Reserved 2019-2020. +*/ + +#include "pytorch_cpp_helper.hpp" +#include "pytorch_device_registry.hpp" + +const int THREADS_PER_BLOCK_NMS = sizeof(unsigned long long) * 8; + +void iou3d_boxes_overlap_bev_forward_impl(const int num_a, const Tensor boxes_a, + const int num_b, const Tensor boxes_b, + Tensor ans_overlap) { + DISPATCH_DEVICE_IMPL(iou3d_boxes_overlap_bev_forward_impl, num_a, boxes_a, + num_b, boxes_b, ans_overlap); +} + +void iou3d_boxes_iou_bev_forward_impl(const int num_a, const Tensor boxes_a, + const int num_b, const Tensor boxes_b, + Tensor ans_iou) { + DISPATCH_DEVICE_IMPL(iou3d_boxes_iou_bev_forward_impl, num_a, boxes_a, num_b, + boxes_b, ans_iou); +} + +void iou3d_nms_forward_impl(const Tensor boxes, unsigned long long *mask, + int boxes_num, float nms_overlap_thresh) { + DISPATCH_DEVICE_IMPL(iou3d_nms_forward_impl, boxes, mask, boxes_num, + nms_overlap_thresh); +} + +void iou3d_nms_normal_forward_impl(const Tensor boxes, unsigned long long *mask, + int boxes_num, float nms_overlap_thresh) { + DISPATCH_DEVICE_IMPL(iou3d_nms_normal_forward_impl, boxes, mask, boxes_num, + nms_overlap_thresh); +} + +void iou3d_boxes_overlap_bev_forward(Tensor boxes_a, Tensor boxes_b, + Tensor ans_overlap) { + // params boxes_a: (N, 5) [x1, y1, x2, y2, ry] + // params boxes_b: (M, 5) + // params ans_overlap: (N, M) + + int num_a = boxes_a.size(0); + int num_b = boxes_b.size(0); + + iou3d_boxes_overlap_bev_forward_impl(num_a, boxes_a, num_b, boxes_b, + ans_overlap); +} + +void iou3d_boxes_iou_bev_forward(Tensor boxes_a, Tensor boxes_b, + Tensor ans_iou) { + // params boxes_a: (N, 5) [x1, y1, x2, y2, ry] + // params boxes_b: (M, 5) + // params ans_overlap: (N, M) + int num_a = boxes_a.size(0); + int num_b = boxes_b.size(0); + + iou3d_boxes_iou_bev_forward_impl(num_a, boxes_a, num_b, boxes_b, ans_iou); +} + +void iou3d_nms_forward(Tensor boxes, Tensor keep, Tensor keep_num, + float nms_overlap_thresh) { + // params boxes: (N, 5) [x1, y1, x2, y2, ry] + // params keep: (N) + CHECK_CONTIGUOUS(boxes); + CHECK_CONTIGUOUS(keep); + + int boxes_num = boxes.size(0); + int64_t *keep_data = keep.data_ptr(); + int64_t *keep_num_data = keep_num.data_ptr(); + + const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); + + Tensor mask = + at::empty({boxes_num, col_blocks}, boxes.options().dtype(at::kLong)); + unsigned long long *mask_data = + (unsigned long long *)mask.data_ptr(); + iou3d_nms_forward_impl(boxes, mask_data, boxes_num, nms_overlap_thresh); + + at::Tensor mask_cpu = mask.to(at::kCPU); + unsigned long long *mask_host = + (unsigned long long *)mask_cpu.data_ptr(); + + std::vector remv_cpu(col_blocks); + memset(&remv_cpu[0], 0, sizeof(unsigned long long) * col_blocks); + + int num_to_keep = 0; + + for (int i = 0; i < boxes_num; i++) { + int nblock = i / THREADS_PER_BLOCK_NMS; + int inblock = i % THREADS_PER_BLOCK_NMS; + + if (!(remv_cpu[nblock] & (1ULL << inblock))) { + keep_data[num_to_keep++] = i; + unsigned long long *p = &mask_host[0] + i * col_blocks; + for (int j = nblock; j < col_blocks; j++) { + remv_cpu[j] |= p[j]; + } + } + *keep_num_data = num_to_keep; + } +} + +void iou3d_nms_normal_forward(Tensor boxes, Tensor keep, Tensor keep_num, + float nms_overlap_thresh) { + // params boxes: (N, 5) [x1, y1, x2, y2, ry] + // params keep: (N) + + CHECK_CONTIGUOUS(boxes); + CHECK_CONTIGUOUS(keep); + + int boxes_num = boxes.size(0); + int64_t *keep_data = keep.data_ptr(); + int64_t *keep_num_data = keep_num.data_ptr(); + + const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); + + Tensor mask = + at::empty({boxes_num, col_blocks}, boxes.options().dtype(at::kLong)); + unsigned long long *mask_data = + (unsigned long long *)mask.data_ptr(); + iou3d_nms_normal_forward_impl(boxes, mask_data, boxes_num, + nms_overlap_thresh); + + at::Tensor mask_cpu = mask.to(at::kCPU); + unsigned long long *mask_host = + (unsigned long long *)mask_cpu.data_ptr(); + + std::vector remv_cpu(col_blocks); + memset(&remv_cpu[0], 0, sizeof(unsigned long long) * col_blocks); + int num_to_keep = 0; + + for (int i = 0; i < boxes_num; i++) { + int nblock = i / THREADS_PER_BLOCK_NMS; + int inblock = i % THREADS_PER_BLOCK_NMS; + + if (!(remv_cpu[nblock] & (1ULL << inblock))) { + keep_data[num_to_keep++] = i; + unsigned long long *p = &mask_host[0] + i * col_blocks; + for (int j = nblock; j < col_blocks; j++) { + remv_cpu[j] |= p[j]; + } + } + } + + *keep_num_data = num_to_keep; +} diff --git a/mmcv/ops/csrc/pytorch/knn.cpp b/mmcv/ops/csrc/pytorch/knn.cpp new file mode 100644 index 0000000..b4be942 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/knn.cpp @@ -0,0 +1,17 @@ +// Modified from +// https://github.com/CVMI-Lab/PAConv/tree/main/scene_seg/lib/pointops/src/knnquery_heap + +#include "pytorch_cpp_helper.hpp" +#include "pytorch_device_registry.hpp" + +void knn_forward_impl(int b, int n, int m, int nsample, const Tensor xyz, + const Tensor new_xyz, Tensor idx, Tensor dist2) { + DISPATCH_DEVICE_IMPL(knn_forward_impl, b, n, m, nsample, xyz, new_xyz, idx, + dist2); +} + +void knn_forward(Tensor xyz_tensor, Tensor new_xyz_tensor, Tensor idx_tensor, + Tensor dist2_tensor, int b, int n, int m, int nsample) { + knn_forward_impl(b, n, m, nsample, xyz_tensor, new_xyz_tensor, idx_tensor, + dist2_tensor); +} diff --git a/mmcv/ops/csrc/pytorch/masked_conv2d.cpp b/mmcv/ops/csrc/pytorch/masked_conv2d.cpp new file mode 100644 index 0000000..5903925 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/masked_conv2d.cpp @@ -0,0 +1,33 @@ +// Copyright (c) OpenMMLab. All rights reserved +#include "pytorch_cpp_helper.hpp" +#include "pytorch_device_registry.hpp" + +void masked_im2col_forward_impl(const Tensor im, const Tensor mask_h_idx, + const Tensor mask_w_idx, Tensor col, + const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w) { + DISPATCH_DEVICE_IMPL(masked_im2col_forward_impl, im, mask_h_idx, mask_w_idx, + col, kernel_h, kernel_w, pad_h, pad_w); +} + +void masked_col2im_forward_impl(const Tensor col, const Tensor mask_h_idx, + const Tensor mask_w_idx, Tensor im, int height, + int width, int channels) { + DISPATCH_DEVICE_IMPL(masked_col2im_forward_impl, col, mask_h_idx, mask_w_idx, + im, height, width, channels); +} + +void masked_im2col_forward(const Tensor im, const Tensor mask_h_idx, + const Tensor mask_w_idx, Tensor col, + const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w) { + masked_im2col_forward_impl(im, mask_h_idx, mask_w_idx, col, kernel_h, + kernel_w, pad_h, pad_w); +} + +void masked_col2im_forward(const Tensor col, const Tensor mask_h_idx, + const Tensor mask_w_idx, Tensor im, int height, + int width, int channels) { + masked_col2im_forward_impl(col, mask_h_idx, mask_w_idx, im, height, width, + channels); +} diff --git a/mmcv/ops/csrc/pytorch/modulated_deform_conv.cpp b/mmcv/ops/csrc/pytorch/modulated_deform_conv.cpp new file mode 100644 index 0000000..12b538a --- /dev/null +++ b/mmcv/ops/csrc/pytorch/modulated_deform_conv.cpp @@ -0,0 +1,237 @@ +// Copyright (c) OpenMMLab. All rights reserved +#include "pytorch_cpp_helper.hpp" +#include "pytorch_device_registry.hpp" + +void modulated_deformable_im2col_impl( + const Tensor data_im, const Tensor data_offset, const Tensor data_mask, + const int batch_size, const int channels, const int height_im, + const int width_im, const int height_col, const int width_col, + const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, + const int stride_h, const int stride_w, const int dilation_h, + const int dilation_w, const int deformable_group, Tensor data_col) { + DISPATCH_DEVICE_IMPL(modulated_deformable_im2col_impl, data_im, data_offset, + data_mask, batch_size, channels, height_im, width_im, + height_col, width_col, kernel_h, kernel_w, pad_h, pad_w, + stride_h, stride_w, dilation_h, dilation_w, + deformable_group, data_col); +} + +void modulated_deformable_col2im_impl( + const Tensor data_col, const Tensor data_offset, const Tensor data_mask, + const int batch_size, const int channels, const int height_im, + const int width_im, const int height_col, const int width_col, + const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, + const int stride_h, const int stride_w, const int dilation_h, + const int dilation_w, const int deformable_group, Tensor grad_im) { + DISPATCH_DEVICE_IMPL(modulated_deformable_col2im_impl, data_col, data_offset, + data_mask, batch_size, channels, height_im, width_im, + height_col, width_col, kernel_h, kernel_w, pad_h, pad_w, + stride_h, stride_w, dilation_h, dilation_w, + deformable_group, grad_im); +} + +void modulated_deformable_col2im_coord_impl( + const Tensor data_col, const Tensor data_im, const Tensor data_offset, + const Tensor data_mask, const int batch_size, const int channels, + const int height_im, const int width_im, const int height_col, + const int width_col, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, const int deformable_group, + Tensor grad_offset, Tensor grad_mask) { + DISPATCH_DEVICE_IMPL(modulated_deformable_col2im_coord_impl, data_col, + data_im, data_offset, data_mask, batch_size, channels, + height_im, width_im, height_col, width_col, kernel_h, + kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, + dilation_w, deformable_group, grad_offset, grad_mask); +} + +void modulated_deform_conv_forward( + Tensor input, Tensor weight, Tensor bias, Tensor ones, Tensor offset, + Tensor mask, Tensor output, Tensor columns, int kernel_h, int kernel_w, + const int stride_h, const int stride_w, const int pad_h, const int pad_w, + const int dilation_h, const int dilation_w, const int group, + const int deformable_group, const bool with_bias) { + at::DeviceGuard guard(input.device()); + + const int batch = input.size(0); + const int channels = input.size(1); + const int height = input.size(2); + const int width = input.size(3); + + const int channels_out = weight.size(0); + const int channels_kernel = weight.size(1); + const int kernel_h_ = weight.size(2); + const int kernel_w_ = weight.size(3); + + if (kernel_h_ != kernel_h || kernel_w_ != kernel_w) + AT_ERROR("Input shape and kernel shape won't match: (%d x %d vs %d x %d).", + kernel_h_, kernel_w, kernel_h_, kernel_w_); + if (channels != channels_kernel * group) + AT_ERROR("Input shape and kernel channels won't match: (%d vs %d).", + channels, channels_kernel * group); + + const int height_out = + (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; + const int width_out = + (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; + + if (ones.ndimension() != 2 || + ones.size(0) * ones.size(1) < height_out * width_out) { + // Resize plane and fill with ones... + ones = at::ones({height_out, width_out}, input.options()); + } + + // resize output + output = output.view({batch, channels_out, height_out, width_out}).zero_(); + // resize temporary columns + columns = + at::zeros({channels * kernel_h * kernel_w, 1 * height_out * width_out}, + input.options()); + + output = output.view({output.size(0), group, output.size(1) / group, + output.size(2), output.size(3)}); + + for (int b = 0; b < batch; b++) { + modulated_deformable_im2col_impl( + input[b], offset[b], mask[b], 1, channels, height, width, height_out, + width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, + dilation_h, dilation_w, deformable_group, columns); + + // divide into group + weight = weight.view({group, weight.size(0) / group, weight.size(1), + weight.size(2), weight.size(3)}); + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + + for (int g = 0; g < group; g++) { + output[b][g] = output[b][g] + .flatten(1) + .addmm_(weight[g].flatten(1), columns[g]) + .view_as(output[b][g]); + } + + weight = weight.view({weight.size(0) * weight.size(1), weight.size(2), + weight.size(3), weight.size(4)}); + columns = + columns.view({columns.size(0) * columns.size(1), columns.size(2)}); + } + + output = output.view({output.size(0), output.size(1) * output.size(2), + output.size(3), output.size(4)}); + + if (with_bias) { + output += bias.view({1, bias.size(0), 1, 1}); + } +} + +void modulated_deform_conv_backward( + Tensor input, Tensor weight, Tensor bias, Tensor ones, Tensor offset, + Tensor mask, Tensor columns, Tensor grad_input, Tensor grad_weight, + Tensor grad_bias, Tensor grad_offset, Tensor grad_mask, Tensor grad_output, + int kernel_h, int kernel_w, int stride_h, int stride_w, int pad_h, + int pad_w, int dilation_h, int dilation_w, int group, int deformable_group, + const bool with_bias) { + at::DeviceGuard guard(input.device()); + + const int batch = input.size(0); + const int channels = input.size(1); + const int height = input.size(2); + const int width = input.size(3); + + const int channels_kernel = weight.size(1); + const int kernel_h_ = weight.size(2); + const int kernel_w_ = weight.size(3); + if (kernel_h_ != kernel_h || kernel_w_ != kernel_w) + AT_ERROR("Input shape and kernel shape won't match: (%d x %d vs %d x %d).", + kernel_h_, kernel_w, kernel_h_, kernel_w_); + if (channels != channels_kernel * group) + AT_ERROR("Input shape and kernel channels won't match: (%d vs %d).", + channels, channels_kernel * group); + + const int height_out = + (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; + const int width_out = + (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; + + if (ones.ndimension() != 2 || + ones.size(0) * ones.size(1) < height_out * width_out) { + // Resize plane and fill with ones... + ones = at::ones({height_out, width_out}, input.options()); + } + + grad_input = grad_input.view({batch, channels, height, width}); + columns = at::zeros({channels * kernel_h * kernel_w, height_out * width_out}, + input.options()); + + grad_output = + grad_output.view({grad_output.size(0), group, grad_output.size(1) / group, + grad_output.size(2), grad_output.size(3)}); + + for (int b = 0; b < batch; b++) { + // divide int group + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + weight = weight.view({group, weight.size(0) / group, weight.size(1), + weight.size(2), weight.size(3)}); + + for (int g = 0; g < group; g++) { + columns[g].addmm_(weight[g].flatten(1).transpose(0, 1), + grad_output[b][g].flatten(1), 0.0f, 1.0f); + } + + columns = + columns.view({columns.size(0) * columns.size(1), columns.size(2)}); + weight = weight.view({weight.size(0) * weight.size(1), weight.size(2), + weight.size(3), weight.size(4)}); + + // gradient w.r.t. input coordinate data + modulated_deformable_col2im_coord_impl( + columns, input[b], offset[b], mask[b], 1, channels, height, width, + height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, + stride_w, dilation_h, dilation_w, deformable_group, grad_offset[b], + grad_mask[b]); + // gradient w.r.t. input data + modulated_deformable_col2im_impl( + columns, offset[b], mask[b], 1, channels, height, width, height_out, + width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, + dilation_h, dilation_w, deformable_group, grad_input[b]); + + // gradient w.r.t. weight, dWeight should accumulate across the batch and + // group + modulated_deformable_im2col_impl( + input[b], offset[b], mask[b], 1, channels, height, width, height_out, + width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, + dilation_h, dilation_w, deformable_group, columns); + + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + grad_weight = grad_weight.view({group, grad_weight.size(0) / group, + grad_weight.size(1), grad_weight.size(2), + grad_weight.size(3)}); + if (with_bias) + grad_bias = grad_bias.view({group, grad_bias.size(0) / group}); + + for (int g = 0; g < group; g++) { + grad_weight[g] = + grad_weight[g] + .flatten(1) + .addmm_(grad_output[b][g].flatten(1), columns[g].transpose(0, 1)) + .view_as(grad_weight[g]); + if (with_bias) { + grad_bias[g] = + grad_bias[g] + .view({-1, 1}) + .addmm_(grad_output[b][g].flatten(1), ones.view({-1, 1})) + .view(-1); + } + } + + columns = + columns.view({columns.size(0) * columns.size(1), columns.size(2)}); + grad_weight = grad_weight.view({grad_weight.size(0) * grad_weight.size(1), + grad_weight.size(2), grad_weight.size(3), + grad_weight.size(4)}); + if (with_bias) + grad_bias = grad_bias.view({grad_bias.size(0) * grad_bias.size(1)}); + } + grad_output = grad_output.view({grad_output.size(0) * grad_output.size(1), + grad_output.size(2), grad_output.size(3), + grad_output.size(4)}); +} diff --git a/mmcv/ops/csrc/pytorch/ms_deform_attn.cpp b/mmcv/ops/csrc/pytorch/ms_deform_attn.cpp new file mode 100644 index 0000000..25c8f62 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/ms_deform_attn.cpp @@ -0,0 +1,60 @@ +/*! +************************************************************************************************** +* Deformable DETR +* Copyright (c) 2020 SenseTime. All Rights Reserved. +* Licensed under the Apache License, Version 2.0 [see LICENSE for details] +************************************************************************************************** +* Modified from +*https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +************************************************************************************************** +*/ + +#include "pytorch_cpp_helper.hpp" +#include "pytorch_device_registry.hpp" + +Tensor ms_deform_attn_impl_forward(const Tensor &value, + const Tensor &spatial_shapes, + const Tensor &level_start_index, + const Tensor &sampling_loc, + const Tensor &attn_weight, + const int im2col_step) { + return DISPATCH_DEVICE_IMPL(ms_deform_attn_impl_forward, value, + spatial_shapes, level_start_index, sampling_loc, + attn_weight, im2col_step); +} + +void ms_deform_attn_impl_backward( + const Tensor &value, const Tensor &spatial_shapes, + const Tensor &level_start_index, const Tensor &sampling_loc, + const Tensor &attn_weight, const Tensor &grad_output, Tensor &grad_value, + Tensor &grad_sampling_loc, Tensor &grad_attn_weight, + const int im2col_step) { + DISPATCH_DEVICE_IMPL(ms_deform_attn_impl_backward, value, spatial_shapes, + level_start_index, sampling_loc, attn_weight, + grad_output, grad_value, grad_sampling_loc, + grad_attn_weight, im2col_step); +} + +Tensor ms_deform_attn_forward(const Tensor &value, const Tensor &spatial_shapes, + const Tensor &level_start_index, + const Tensor &sampling_loc, + const Tensor &attn_weight, + const int im2col_step) { + at::DeviceGuard guard(value.device()); + return ms_deform_attn_impl_forward(value, spatial_shapes, level_start_index, + sampling_loc, attn_weight, im2col_step); +} + +void ms_deform_attn_backward(const Tensor &value, const Tensor &spatial_shapes, + const Tensor &level_start_index, + const Tensor &sampling_loc, + const Tensor &attn_weight, + const Tensor &grad_output, Tensor &grad_value, + Tensor &grad_sampling_loc, + Tensor &grad_attn_weight, const int im2col_step) { + at::DeviceGuard guard(value.device()); + ms_deform_attn_impl_backward(value, spatial_shapes, level_start_index, + sampling_loc, attn_weight, grad_output, + grad_value, grad_sampling_loc, grad_attn_weight, + im2col_step); +} diff --git a/mmcv/ops/csrc/pytorch/nms.cpp b/mmcv/ops/csrc/pytorch/nms.cpp new file mode 100644 index 0000000..199d8af --- /dev/null +++ b/mmcv/ops/csrc/pytorch/nms.cpp @@ -0,0 +1,33 @@ +// Copyright (c) OpenMMLab. All rights reserved +#include "pytorch_cpp_helper.hpp" +#include "pytorch_device_registry.hpp" + +Tensor nms_impl(Tensor boxes, Tensor scores, float iou_threshold, int offset) { + return DISPATCH_DEVICE_IMPL(nms_impl, boxes, scores, iou_threshold, offset); +} + +Tensor softnms_impl(Tensor boxes, Tensor scores, Tensor dets, + float iou_threshold, float sigma, float min_score, + int method, int offset) { + return DISPATCH_DEVICE_IMPL(softnms_impl, boxes, scores, dets, iou_threshold, + sigma, min_score, method, offset); +} + +std::vector > nms_match_impl(Tensor dets, + float iou_threshold) { + return DISPATCH_DEVICE_IMPL(nms_match_impl, dets, iou_threshold); +} + +Tensor nms(Tensor boxes, Tensor scores, float iou_threshold, int offset) { + return nms_impl(boxes, scores, iou_threshold, offset); +} + +Tensor softnms(Tensor boxes, Tensor scores, Tensor dets, float iou_threshold, + float sigma, float min_score, int method, int offset) { + return softnms_impl(boxes, scores, dets, iou_threshold, sigma, min_score, + method, offset); +} + +std::vector > nms_match(Tensor dets, float iou_threshold) { + return nms_match_impl(dets, iou_threshold); +} diff --git a/mmcv/ops/csrc/pytorch/nms_rotated.cpp b/mmcv/ops/csrc/pytorch/nms_rotated.cpp new file mode 100644 index 0000000..e4ef676 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/nms_rotated.cpp @@ -0,0 +1,32 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +// modified from +// https://github.com/facebookresearch/detectron2/blob/master/detectron2/layers/csrc/nms_rotated/nms_rotated.h +#include "pytorch_cpp_helper.hpp" + +Tensor nms_rotated_cpu(const Tensor dets, const Tensor scores, + const float iou_threshold); + +#ifdef MMCV_WITH_CUDA +Tensor nms_rotated_cuda(const Tensor dets, const Tensor scores, + const Tensor order, const Tensor dets_sorted, + const float iou_threshold, const int multi_label); +#endif + +// Interface for Python +// inline is needed to prevent multiple function definitions when this header is +// included by different cpps +Tensor nms_rotated(const Tensor dets, const Tensor scores, const Tensor order, + const Tensor dets_sorted, const float iou_threshold, + const int multi_label) { + assert(dets.device().is_cuda() == scores.device().is_cuda()); + if (dets.device().is_cuda()) { +#ifdef MMCV_WITH_CUDA + return nms_rotated_cuda(dets, scores, order, dets_sorted, iou_threshold, + multi_label); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + + return nms_rotated_cpu(dets, scores, iou_threshold); +} diff --git a/mmcv/ops/csrc/pytorch/pixel_group.cpp b/mmcv/ops/csrc/pytorch/pixel_group.cpp new file mode 100755 index 0000000..2bf8c8b --- /dev/null +++ b/mmcv/ops/csrc/pytorch/pixel_group.cpp @@ -0,0 +1,26 @@ +// Copyright (c) OpenMMLab. All rights reserved +// It is modified from https://github.com/WenmuZhou/PAN.pytorch + +#include "pytorch_cpp_helper.hpp" +#include "pytorch_device_registry.hpp" + +std::vector> pixel_group_impl( + Tensor score, Tensor mask, Tensor embedding, Tensor kernel_label, + Tensor kernel_contour, int kernel_region_num, float dis_threshold) { + return DISPATCH_DEVICE_IMPL(pixel_group_impl, score, mask, embedding, + kernel_label, kernel_contour, kernel_region_num, + dis_threshold); +} + +std::vector> pixel_group( + Tensor score, Tensor mask, Tensor embedding, Tensor kernel_label, + Tensor kernel_contour, int kernel_region_num, float distance_threshold) { + score = score.contiguous(); + mask = mask.contiguous(); + embedding = embedding.contiguous(); + kernel_label = kernel_label.contiguous(); + kernel_contour = kernel_contour.contiguous(); + + return pixel_group_impl(score, mask, embedding, kernel_label, kernel_contour, + kernel_region_num, distance_threshold); +} diff --git a/mmcv/ops/csrc/pytorch/points_in_boxes.cpp b/mmcv/ops/csrc/pytorch/points_in_boxes.cpp new file mode 100644 index 0000000..540da94 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/points_in_boxes.cpp @@ -0,0 +1,44 @@ +#include "pytorch_cpp_helper.hpp" +#include "pytorch_device_registry.hpp" + +void points_in_boxes_part_forward_impl(int batch_size, int boxes_num, + int pts_num, const Tensor boxes, + const Tensor pts, + Tensor box_idx_of_points) { + DISPATCH_DEVICE_IMPL(points_in_boxes_part_forward_impl, batch_size, boxes_num, + pts_num, boxes, pts, box_idx_of_points); +} + +void points_in_boxes_all_forward_impl(int batch_size, int boxes_num, + int pts_num, const Tensor boxes, + const Tensor pts, + Tensor box_idx_of_points) { + DISPATCH_DEVICE_IMPL(points_in_boxes_all_forward_impl, batch_size, boxes_num, + pts_num, boxes, pts, box_idx_of_points); +} + +void points_in_boxes_part_forward(Tensor boxes_tensor, Tensor pts_tensor, + Tensor box_idx_of_points_tensor) { + // params boxes: (B, N, 7) [x, y, z, x_size, y_size, z_size, rz] in LiDAR + // coordinate, z is the bottom center, each box params pts: (B, npoints, 3) + // [x, y, z] in LiDAR coordinate params boxes_idx_of_points: (B, npoints), + // default -1 + int batch_size = boxes_tensor.size(0); + int boxes_num = boxes_tensor.size(1); + int pts_num = pts_tensor.size(1); + points_in_boxes_part_forward_impl(batch_size, boxes_num, pts_num, + boxes_tensor, pts_tensor, + box_idx_of_points_tensor); +} + +void points_in_boxes_all_forward(Tensor boxes_tensor, Tensor pts_tensor, + Tensor box_idx_of_points_tensor) { + // params boxes: (B, N, 7) [x, y, z, x_size, y_size, z_size, rz] in LiDAR + // coordinate, z is the bottom center. params pts: (B, npoints, 3) [x, y, z] + // in LiDAR coordinate params boxes_idx_of_points: (B, npoints), default -1 + int batch_size = boxes_tensor.size(0); + int boxes_num = boxes_tensor.size(1); + int pts_num = pts_tensor.size(1); + points_in_boxes_all_forward_impl(batch_size, boxes_num, pts_num, boxes_tensor, + pts_tensor, box_idx_of_points_tensor); +} diff --git a/mmcv/ops/csrc/pytorch/psamask.cpp b/mmcv/ops/csrc/pytorch/psamask.cpp new file mode 100644 index 0000000..6064c9b --- /dev/null +++ b/mmcv/ops/csrc/pytorch/psamask.cpp @@ -0,0 +1,41 @@ +// Copyright (c) OpenMMLab. All rights reserved +// Modified from +// https://github.com/hszhao/semseg/blob/master/lib/psa/src +#include "pytorch_cpp_helper.hpp" +#include "pytorch_device_registry.hpp" + +void psamask_forward_impl(const int psa_type, const Tensor input, Tensor output, + const int num_, const int h_feature, + const int w_feature, const int h_mask, + const int w_mask, const int half_h_mask, + const int half_w_mask) { + DISPATCH_DEVICE_IMPL(psamask_forward_impl, psa_type, input, output, num_, + h_feature, w_feature, h_mask, w_mask, half_h_mask, + half_w_mask); +} + +void psamask_backward_impl(const int psa_type, const Tensor grad_output, + Tensor grad_input, const int num_, + const int h_feature, const int w_feature, + const int h_mask, const int w_mask, + const int half_h_mask, const int half_w_mask) { + DISPATCH_DEVICE_IMPL(psamask_backward_impl, psa_type, grad_output, grad_input, + num_, h_feature, w_feature, h_mask, w_mask, half_h_mask, + half_w_mask); +} + +void psamask_forward(const Tensor input, Tensor output, const int psa_type, + const int num_, const int h_feature, const int w_feature, + const int h_mask, const int w_mask, const int half_h_mask, + const int half_w_mask) { + psamask_forward_impl(psa_type, input, output, num_, h_feature, w_feature, + h_mask, w_mask, half_h_mask, half_w_mask); +} + +void psamask_backward(Tensor grad_output, const Tensor grad_input, + const int psa_type, const int num_, const int h_feature, + const int w_feature, const int h_mask, const int w_mask, + const int half_h_mask, const int half_w_mask) { + psamask_backward_impl(psa_type, grad_output, grad_input, num_, h_feature, + w_feature, h_mask, w_mask, half_h_mask, half_w_mask); +} diff --git a/mmcv/ops/csrc/pytorch/pybind.cpp b/mmcv/ops/csrc/pytorch/pybind.cpp new file mode 100644 index 0000000..09d62d3 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/pybind.cpp @@ -0,0 +1,689 @@ +// Copyright (c) OpenMMLab. All rights reserved +#include "pytorch_cpp_helper.hpp" + +std::string get_compiler_version(); +std::string get_compiling_cuda_version(); + +void assign_score_withk_forward(const Tensor &points, const Tensor ¢ers, + const Tensor &scores, const Tensor &knn_idx, + Tensor &output, int B, int N0, int N1, int M, + int K, int O, int aggregate); + +void assign_score_withk_backward(const Tensor &grad_out, const Tensor &points, + const Tensor ¢ers, const Tensor &scores, + const Tensor &knn_idx, Tensor &grad_points, + Tensor &grad_centers, Tensor &grad_scores, + int B, int N0, int N1, int M, int K, int O, + int aggregate); + +void carafe_naive_forward(Tensor features, Tensor masks, Tensor output, + int kernel_size, int group_size, int scale_factor); + +void carafe_naive_backward(Tensor top_grad, Tensor features, Tensor masks, + Tensor bottom_grad, Tensor mask_grad, + int kernel_size, int group_size, int scale_factor); + +void carafe_forward(Tensor features, Tensor masks, Tensor rfeatures, + Tensor routput, Tensor rmasks, Tensor output, + int kernel_size, int group_size, int scale_factor); + +void carafe_backward(Tensor top_grad, Tensor rfeatures, Tensor masks, + Tensor rtop_grad, Tensor rbottom_grad_hs, + Tensor rbottom_grad, Tensor rmask_grad, Tensor bottom_grad, + Tensor mask_grad, int kernel_size, int group_size, + int scale_factor); + +void deform_conv_forward(Tensor input, Tensor weight, Tensor offset, + Tensor output, Tensor columns, Tensor ones, int kW, + int kH, int dW, int dH, int padW, int padH, + int dilationW, int dilationH, int group, + int deformable_group, int im2col_step); + +void deform_conv_backward_input(Tensor input, Tensor offset, Tensor gradOutput, + Tensor gradInput, Tensor gradOffset, + Tensor weight, Tensor columns, int kW, int kH, + int dW, int dH, int padW, int padH, + int dilationW, int dilationH, int group, + int deformable_group, int im2col_step); + +void deform_conv_backward_parameters(Tensor input, Tensor offset, + Tensor gradOutput, Tensor gradWeight, + Tensor columns, Tensor ones, int kW, + int kH, int dW, int dH, int padW, int padH, + int dilationW, int dilationH, int group, + int deformable_group, float scale, + int im2col_step); + +void deform_roi_pool_forward(Tensor input, Tensor rois, Tensor offset, + Tensor output, int pooled_height, int pooled_width, + float spatial_scale, int sampling_ratio, + float gamma); + +void deform_roi_pool_backward(Tensor grad_output, Tensor input, Tensor rois, + Tensor offset, Tensor grad_input, + Tensor grad_offset, int pooled_height, + int pooled_width, float spatial_scale, + int sampling_ratio, float gamma); + +void group_points_forward(Tensor points_tensor, Tensor idx_tensor, + Tensor out_tensor, int b, int c, int n, int npoints, + int nsample); + +void group_points_backward(Tensor grad_out_tensor, Tensor idx_tensor, + Tensor grad_points_tensor, int b, int c, int n, + int npoints, int nsample); + +void roipoint_pool3d_forward(Tensor xyz, Tensor boxes3d, Tensor pts_feature, + Tensor pooled_features, Tensor pooled_empty_flag); + +void gather_points_forward(Tensor points_tensor, Tensor idx_tensor, + Tensor out_tensor, int b, int c, int n, int npoints); + +void gather_points_backward(Tensor grad_out_tensor, Tensor idx_tensor, + Tensor grad_points_tensor, int b, int c, int n, + int npoints); + +void sigmoid_focal_loss_forward(Tensor input, Tensor target, Tensor weight, + Tensor output, float gamma, float alpha); + +void sigmoid_focal_loss_backward(Tensor input, Tensor target, Tensor weight, + Tensor grad_input, float gamma, float alpha); + +void softmax_focal_loss_forward(Tensor input, Tensor target, Tensor weight, + Tensor output, float gamma, float alpha); + +void softmax_focal_loss_backward(Tensor input, Tensor target, Tensor weight, + Tensor buff, Tensor grad_input, float gamma, + float alpha); + +void three_interpolate_forward(Tensor points_tensor, Tensor idx_tensor, + Tensor weight_tensor, Tensor out_tensor, int b, + int c, int m, int n); + +void three_interpolate_backward(Tensor grad_out_tensor, Tensor idx_tensor, + Tensor weight_tensor, Tensor grad_points_tensor, + int b, int c, int n, int m); + +void three_nn_forward(Tensor unknown_tensor, Tensor known_tensor, + Tensor dist2_tensor, Tensor idx_tensor, int b, int n, + int m); + +void bbox_overlaps(const Tensor bboxes1, const Tensor bboxes2, Tensor ious, + const int mode, const bool aligned, const int offset); + +void knn_forward(Tensor xyz_tensor, Tensor new_xyz_tensor, Tensor idx_tensor, + Tensor dist2_tensor, int b, int n, int m, int nsample); +void iou3d_boxes_overlap_bev_forward(Tensor boxes_a, Tensor boxes_b, + Tensor ans_overlap); + +void iou3d_boxes_iou_bev_forward(Tensor boxes_a, Tensor boxes_b, + Tensor ans_iou); + +void iou3d_nms_forward(Tensor boxes, Tensor keep, Tensor keep_num, + float nms_overlap_thresh); + +void iou3d_nms_normal_forward(Tensor boxes, Tensor keep, Tensor keep_num, + float nms_overlap_thresh); + +void furthest_point_sampling_forward(Tensor points_tensor, Tensor temp_tensor, + Tensor idx_tensor, int b, int n, int m); + +void furthest_point_sampling_with_dist_forward(Tensor points_tensor, + Tensor temp_tensor, + Tensor idx_tensor, int b, int n, + int m); + +void masked_im2col_forward(const Tensor im, const Tensor mask_h_idx, + const Tensor mask_w_idx, Tensor col, + const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w); + +void masked_col2im_forward(const Tensor col, const Tensor mask_h_idx, + const Tensor mask_w_idx, Tensor im, int height, + int width, int channels); + +void modulated_deform_conv_forward( + Tensor input, Tensor weight, Tensor bias, Tensor ones, Tensor offset, + Tensor mask, Tensor output, Tensor columns, int kernel_h, int kernel_w, + const int stride_h, const int stride_w, const int pad_h, const int pad_w, + const int dilation_h, const int dilation_w, const int group, + const int deformable_group, const bool with_bias); + +void modulated_deform_conv_backward( + Tensor input, Tensor weight, Tensor bias, Tensor ones, Tensor offset, + Tensor mask, Tensor columns, Tensor grad_input, Tensor grad_weight, + Tensor grad_bias, Tensor grad_offset, Tensor grad_mask, Tensor grad_output, + int kernel_h, int kernel_w, int stride_h, int stride_w, int pad_h, + int pad_w, int dilation_h, int dilation_w, int group, int deformable_group, + const bool with_bias); + +Tensor ms_deform_attn_forward(const Tensor &value, const Tensor &spatial_shapes, + const Tensor &level_start_index, + const Tensor &sampling_loc, + const Tensor &attn_weight, const int im2col_step); + +void ms_deform_attn_backward(const Tensor &value, const Tensor &spatial_shapes, + const Tensor &level_start_index, + const Tensor &sampling_loc, + const Tensor &attn_weight, + const Tensor &grad_output, Tensor &grad_value, + Tensor &grad_sampling_loc, + Tensor &grad_attn_weight, const int im2col_step); + +Tensor nms(Tensor boxes, Tensor scores, float iou_threshold, int offset); + +Tensor softnms(Tensor boxes, Tensor scores, Tensor dets, float iou_threshold, + float sigma, float min_score, int method, int offset); + +std::vector> nms_match(Tensor dets, float iou_threshold); + +std::vector> pixel_group( + Tensor score, Tensor mask, Tensor embedding, Tensor kernel_label, + Tensor kernel_contour, int kernel_region_num, float distance_threshold); + +std::vector> contour_expand(Tensor kernel_mask, + Tensor internal_kernel_label, + int min_kernel_area, + int kernel_num); + +void roi_align_forward(Tensor input, Tensor rois, Tensor output, + Tensor argmax_y, Tensor argmax_x, int aligned_height, + int aligned_width, float spatial_scale, + int sampling_ratio, int pool_mode, bool aligned); + +void roi_align_backward(Tensor grad_output, Tensor rois, Tensor argmax_y, + Tensor argmax_x, Tensor grad_input, int aligned_height, + int aligned_width, float spatial_scale, + int sampling_ratio, int pool_mode, bool aligned); + +void roi_pool_forward(Tensor input, Tensor rois, Tensor output, Tensor argmax, + int pooled_height, int pooled_width, float spatial_scale); + +void roi_pool_backward(Tensor grad_output, Tensor rois, Tensor argmax, + Tensor grad_input, int pooled_height, int pooled_width, + float spatial_scale); + +void sync_bn_forward_mean(const Tensor input, Tensor mean); + +void sync_bn_forward_var(const Tensor input, const Tensor mean, Tensor var); + +void sync_bn_forward_output(const Tensor input, const Tensor mean, + const Tensor var, const Tensor weight, + const Tensor bias, Tensor running_mean, + Tensor running_var, Tensor norm, Tensor std, + Tensor output, float eps, float momentum, + int group_size); + +void sync_bn_backward_param(const Tensor grad_output, const Tensor norm, + Tensor grad_weight, Tensor grad_bias); + +void sync_bn_backward_data(const Tensor grad_output, const Tensor weight, + const Tensor grad_weight, const Tensor grad_bias, + const Tensor norm, const Tensor std, + Tensor grad_input); + +void psamask_forward(const Tensor input, Tensor output, const int psa_type, + const int num_, const int h_feature, const int w_feature, + const int h_mask, const int w_mask, const int half_h_mask, + const int half_w_mask); + +void psamask_backward(Tensor grad_output, const Tensor grad_input, + const int psa_type, const int num_, const int h_feature, + const int w_feature, const int h_mask, const int w_mask, + const int half_h_mask, const int half_w_mask); + +void tin_shift_forward(Tensor input, Tensor shift, Tensor output); + +void tin_shift_backward(Tensor grad_output, Tensor shift, Tensor grad_input); + +void ball_query_forward(Tensor new_xyz_tensor, Tensor xyz_tensor, + Tensor idx_tensor, int b, int n, int m, + float min_radius, float max_radius, int nsample); + +Tensor bottom_pool_forward(Tensor input); + +Tensor bottom_pool_backward(Tensor input, Tensor grad_output); + +Tensor left_pool_forward(Tensor input); + +Tensor left_pool_backward(Tensor input, Tensor grad_output); + +Tensor right_pool_forward(Tensor input); + +Tensor right_pool_backward(Tensor input, Tensor grad_output); + +Tensor top_pool_forward(Tensor input); + +Tensor top_pool_backward(Tensor input, Tensor grad_output); + +void box_iou_rotated(const Tensor boxes1, const Tensor boxes2, Tensor ious, + const int mode_flag, const bool aligned); + +Tensor nms_rotated(const Tensor dets, const Tensor scores, const Tensor order, + const Tensor dets_sorted, const float iou_threshold, + const int multi_label); + +Tensor upfirdn2d(const Tensor &input, const Tensor &kernel, int up_x, int up_y, + int down_x, int down_y, int pad_x0, int pad_x1, int pad_y0, + int pad_y1); + +Tensor fused_bias_leakyrelu(const Tensor &input, const Tensor &bias, + const Tensor &refer, int act, int grad, float alpha, + float scale); + +void roi_align_rotated_forward(Tensor input, Tensor rois, Tensor output, + int pooled_height, int pooled_width, + float spatial_scale, int sample_num, + bool aligned, bool clockwise); + +void roi_align_rotated_backward(Tensor grad_output, Tensor rois, + Tensor grad_input, int pooled_height, + int pooled_width, float spatial_scale, + int sample_num, bool aligned, bool clockwise); + +std::vector dynamic_point_to_voxel_forward( + const torch::Tensor &feats, const torch::Tensor &coors, + const std::string &reduce_type); + +void dynamic_point_to_voxel_backward(torch::Tensor &grad_feats, + const torch::Tensor &grad_reduced_feats, + const torch::Tensor &feats, + const torch::Tensor &reduced_feats, + const torch::Tensor &coors_idx, + const torch::Tensor &reduce_count, + const std::string &reduce_type); + +void hard_voxelize_forward(const at::Tensor &points, + const at::Tensor &voxel_size, + const at::Tensor &coors_range, at::Tensor &voxels, + at::Tensor &coors, at::Tensor &num_points_per_voxel, + at::Tensor &voxel_num, const int max_points, + const int max_voxels, const int NDim); + +void dynamic_voxelize_forward(const at::Tensor &points, + const at::Tensor &voxel_size, + const at::Tensor &coors_range, at::Tensor &coors, + const int NDim); + +void border_align_forward(const Tensor &input, const Tensor &boxes, + Tensor output, Tensor argmax_idx, + const int pool_size); + +void border_align_backward(const Tensor &grad_output, const Tensor &boxes, + const Tensor &argmax_idx, Tensor grad_input, + const int pool_size); + +void points_in_boxes_cpu_forward(Tensor boxes_tensor, Tensor pts_tensor, + Tensor pts_indices_tensor); + +void points_in_boxes_part_forward(Tensor boxes_tensor, Tensor pts_tensor, + Tensor box_idx_of_points_tensor); + +void points_in_boxes_all_forward(Tensor boxes_tensor, Tensor pts_tensor, + Tensor box_idx_of_points_tensor); + +void roiaware_pool3d_forward(Tensor rois, Tensor pts, Tensor pts_feature, + Tensor argmax, Tensor pts_idx_of_voxels, + Tensor pooled_features, int pool_method); + +void roiaware_pool3d_backward(Tensor pts_idx_of_voxels, Tensor argmax, + Tensor grad_out, Tensor grad_in, int pool_method); + +void correlation_forward(Tensor input1, Tensor input2, Tensor output, int kH, + int kW, int patchH, int patchW, int padH, int padW, + int dilationH, int dilationW, int dilation_patchH, + int dilation_patchW, int dH, int dW); + +void correlation_backward(Tensor grad_output, Tensor input1, Tensor input2, + Tensor grad_input1, Tensor grad_input2, int kH, + int kW, int patchH, int patchW, int padH, int padW, + int dilationH, int dilationW, int dilation_patchH, + int dilation_patchW, int dH, int dW); + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("upfirdn2d", &upfirdn2d, "upfirdn2d (CUDA)", py::arg("input"), + py::arg("kernel"), py::arg("up_x"), py::arg("up_y"), py::arg("down_x"), + py::arg("down_y"), py::arg("pad_x0"), py::arg("pad_x1"), + py::arg("pad_y0"), py::arg("pad_y1")); + m.def("fused_bias_leakyrelu", &fused_bias_leakyrelu, + "fused_bias_leakyrelu (CUDA)", py::arg("input"), py::arg("bias"), + py::arg("empty"), py::arg("act"), py::arg("grad"), py::arg("alpha"), + py::arg("scale")); + m.def("gather_points_forward", &gather_points_forward, + "gather_points_forward", py::arg("points_tensor"), + py::arg("idx_tensor"), py::arg("out_tensor"), py::arg("b"), + py::arg("c"), py::arg("n"), py::arg("npoints")); + m.def("gather_points_backward", &gather_points_backward, + "gather_points_backward", py::arg("grad_out_tensor"), + py::arg("idx_tensor"), py::arg("grad_points_tensor"), py::arg("b"), + py::arg("c"), py::arg("n"), py::arg("npoints")); + m.def("get_compiler_version", &get_compiler_version, "get_compiler_version"); + m.def("get_compiling_cuda_version", &get_compiling_cuda_version, + "get_compiling_cuda_version"); + m.def("assign_score_withk_forward", &assign_score_withk_forward, + "assign_score_withk_forward", py::arg("points"), py::arg("centers"), + py::arg("scores"), py::arg("knn_idx"), py::arg("output"), py::arg("B"), + py::arg("N0"), py::arg("N1"), py::arg("M"), py::arg("K"), py::arg("O"), + py::arg("aggregate")); + m.def("assign_score_withk_backward", &assign_score_withk_backward, + "assign_score_withk_backward", py::arg("grad_out"), py::arg("points"), + py::arg("centers"), py::arg("scores"), py::arg("knn_idx"), + py::arg("grad_points"), py::arg("grad_centers"), py::arg("grad_scores"), + py::arg("B"), py::arg("N0"), py::arg("N1"), py::arg("M"), py::arg("K"), + py::arg("O"), py::arg("aggregate")); + m.def("knn_forward", &knn_forward, "knn_forward", py::arg("xyz_tensor"), + py::arg("new_xyz_tensor"), py::arg("idx_tensor"), + py::arg("dist2_tensor"), py::arg("b"), py::arg("n"), py::arg("m"), + py::arg("nsample")); + m.def("carafe_naive_forward", &carafe_naive_forward, "carafe_naive_forward", + py::arg("features"), py::arg("masks"), py::arg("output"), + py::arg("kernel_size"), py::arg("group_size"), py::arg("scale_factor")); + m.def("carafe_naive_backward", &carafe_naive_backward, + "carafe_naive_backward", py::arg("top_grad"), py::arg("features"), + py::arg("masks"), py::arg("bottom_grad"), py::arg("mask_grad"), + py::arg("kernel_size"), py::arg("group_size"), py::arg("scale_factor")); + m.def("carafe_forward", &carafe_forward, "carafe_forward", + py::arg("features"), py::arg("masks"), py::arg("rfeatures"), + py::arg("routput"), py::arg("rmasks"), py::arg("output"), + py::arg("kernel_size"), py::arg("group_size"), py::arg("scale_factor")); + m.def("carafe_backward", &carafe_backward, "carafe_backward", + py::arg("top_grad"), py::arg("rfeatures"), py::arg("masks"), + py::arg("rtop_grad"), py::arg("rbottom_grad_hs"), + py::arg("rbottom_grad"), py::arg("rmask_grad"), py::arg("bottom_grad"), + py::arg("mask_grad"), py::arg("kernel_size"), py::arg("group_size"), + py::arg("scale_factor")); + m.def("deform_conv_forward", &deform_conv_forward, "deform_conv_forward", + py::arg("input"), py::arg("weight"), py::arg("offset"), + py::arg("output"), py::arg("columns"), py::arg("ones"), py::arg("kW"), + py::arg("kH"), py::arg("dW"), py::arg("dH"), py::arg("padH"), + py::arg("padW"), py::arg("dilationW"), py::arg("dilationH"), + py::arg("group"), py::arg("deformable_group"), py::arg("im2col_step")); + m.def("deform_conv_backward_input", &deform_conv_backward_input, + "deform_conv_backward_input", py::arg("input"), py::arg("offset"), + py::arg("gradOutput"), py::arg("gradInput"), py::arg("gradOffset"), + py::arg("weight"), py::arg("columns"), py::arg("kW"), py::arg("kH"), + py::arg("dW"), py::arg("dH"), py::arg("padH"), py::arg("padW"), + py::arg("dilationW"), py::arg("dilationH"), py::arg("group"), + py::arg("deformable_group"), py::arg("im2col_step")); + m.def("deform_conv_backward_parameters", &deform_conv_backward_parameters, + "deform_conv_backward_parameters", py::arg("input"), py::arg("offset"), + py::arg("gradOutput"), py::arg("gradWeight"), py::arg("columns"), + py::arg("ones"), py::arg("kW"), py::arg("kH"), py::arg("dW"), + py::arg("dH"), py::arg("padH"), py::arg("padW"), py::arg("dilationW"), + py::arg("dilationH"), py::arg("group"), py::arg("deformable_group"), + py::arg("scale"), py::arg("im2col_step")); + m.def("deform_roi_pool_forward", &deform_roi_pool_forward, + "deform roi pool forward", py::arg("input"), py::arg("rois"), + py::arg("offset"), py::arg("output"), py::arg("pooled_height"), + py::arg("pooled_width"), py::arg("spatial_scale"), + py::arg("sampling_ratio"), py::arg("gamma")); + m.def("deform_roi_pool_backward", &deform_roi_pool_backward, + "deform roi pool backward", py::arg("grad_output"), py::arg("input"), + py::arg("rois"), py::arg("offset"), py::arg("grad_input"), + py::arg("grad_offset"), py::arg("pooled_height"), + py::arg("pooled_width"), py::arg("spatial_scale"), + py::arg("sampling_ratio"), py::arg("gamma")); + m.def("roipoint_pool3d_forward", &roipoint_pool3d_forward, + "roipoint_pool3d_forward", py::arg("xyz"), py::arg("boxes3d"), + py::arg("pts_feature"), py::arg("pooled_features"), + py::arg("pooled_empty_flag")); + m.def("sigmoid_focal_loss_forward", &sigmoid_focal_loss_forward, + "sigmoid_focal_loss_forward ", py::arg("input"), py::arg("target"), + py::arg("weight"), py::arg("output"), py::arg("gamma"), + py::arg("alpha")); + m.def("sigmoid_focal_loss_backward", &sigmoid_focal_loss_backward, + "sigmoid_focal_loss_backward", py::arg("input"), py::arg("target"), + py::arg("weight"), py::arg("grad_input"), py::arg("gamma"), + py::arg("alpha")); + m.def("softmax_focal_loss_forward", &softmax_focal_loss_forward, + "softmax_focal_loss_forward", py::arg("input"), py::arg("target"), + py::arg("weight"), py::arg("output"), py::arg("gamma"), + py::arg("alpha")); + m.def("softmax_focal_loss_backward", &softmax_focal_loss_backward, + "softmax_focal_loss_backward", py::arg("input"), py::arg("target"), + py::arg("weight"), py::arg("buff"), py::arg("grad_input"), + py::arg("gamma"), py::arg("alpha")); + m.def("three_interpolate_forward", &three_interpolate_forward, + "three_interpolate_forward", py::arg("points_tensor"), + py::arg("idx_tensor"), py::arg("weight_tensor"), py::arg("out_tensor"), + py::arg("b"), py::arg("c"), py::arg("m"), py::arg("n")); + m.def("three_interpolate_backward", &three_interpolate_backward, + "three_interpolate_backward", py::arg("grad_out_tensor"), + py::arg("idx_tensor"), py::arg("weight_tensor"), + py::arg("grad_points_tensor"), py::arg("b"), py::arg("c"), py::arg("n"), + py::arg("m")); + m.def("three_nn_forward", &three_nn_forward, "three_nn_forward", + py::arg("unknown_tensor"), py::arg("known_tensor"), + py::arg("dist2_tensor"), py::arg("idx_tensor"), py::arg("b"), + py::arg("n"), py::arg("m")); + m.def("bbox_overlaps", &bbox_overlaps, "bbox_overlaps", py::arg("bboxes1"), + py::arg("bboxes2"), py::arg("ious"), py::arg("mode"), + py::arg("aligned"), py::arg("offset")); + m.def("group_points_forward", &group_points_forward, "group_points_forward", + py::arg("points_tensor"), py::arg("idx_tensor"), py::arg("out_tensor"), + py::arg("b"), py::arg("c"), py::arg("n"), py::arg("npoints"), + py::arg("nsample")); + m.def("group_points_backward", &group_points_backward, + "group_points_backward", py::arg("grad_out_tensor"), + py::arg("idx_tensor"), py::arg("grad_points_tensor"), py::arg("b"), + py::arg("c"), py::arg("n"), py::arg("npoints"), py::arg("nsample")); + m.def("knn_forward", &knn_forward, "knn_forward", py::arg("b"), py::arg("n"), + py::arg("m"), py::arg("nsample"), py::arg("xyz_tensor"), + py::arg("new_xyz_tensor"), py::arg("idx_tensor"), + py::arg("dist2_tensor")); + m.def("iou3d_boxes_overlap_bev_forward", &iou3d_boxes_overlap_bev_forward, + "iou3d_boxes_overlap_bev_forward", py::arg("boxes_a"), + py::arg("boxes_b"), py::arg("ans_overlap")); + m.def("iou3d_boxes_iou_bev_forward", &iou3d_boxes_iou_bev_forward, + "iou3d_boxes_iou_bev_forward", py::arg("boxes_a"), py::arg("boxes_b"), + py::arg("ans_iou")); + m.def("iou3d_nms_forward", &iou3d_nms_forward, "iou3d_nms_forward", + py::arg("boxes"), py::arg("keep"), py::arg("num_out"), + py::arg("nms_overlap_thresh")); + m.def("iou3d_nms_normal_forward", &iou3d_nms_normal_forward, + "iou3d_nms_normal_forward", py::arg("boxes"), py::arg("keep"), + py::arg("num_out"), py::arg("nms_overlap_thresh")); + m.def("furthest_point_sampling_forward", &furthest_point_sampling_forward, + "furthest_point_sampling_forward", py::arg("points_tensor"), + py::arg("temp_tensor"), py::arg("idx_tensor"), py::arg("b"), + py::arg("n"), py::arg("m")); + m.def("furthest_point_sampling_with_dist_forward", + &furthest_point_sampling_with_dist_forward, + "furthest_point_sampling_with_dist_forward", py::arg("points_tensor"), + py::arg("temp_tensor"), py::arg("idx_tensor"), py::arg("b"), + py::arg("n"), py::arg("m")); + m.def("masked_im2col_forward", &masked_im2col_forward, + "masked_im2col_forward", py::arg("im"), py::arg("mask_h_idx"), + py::arg("mask_w_idx"), py::arg("col"), py::arg("kernel_h"), + py::arg("kernel_w"), py::arg("pad_h"), py::arg("pad_w")); + m.def("masked_col2im_forward", &masked_col2im_forward, + "masked_col2im_forward", py::arg("col"), py::arg("mask_h_idx"), + py::arg("mask_w_idx"), py::arg("im"), py::arg("height"), + py::arg("width"), py::arg("channels")); + m.def("modulated_deform_conv_forward", &modulated_deform_conv_forward, + "modulated deform conv forward", py::arg("input"), py::arg("weight"), + py::arg("bias"), py::arg("ones"), py::arg("offset"), py::arg("mask"), + py::arg("output"), py::arg("columns"), py::arg("kernel_h"), + py::arg("kernel_w"), py::arg("stride_h"), py::arg("stride_w"), + py::arg("pad_h"), py::arg("pad_w"), py::arg("dilation_h"), + py::arg("dilation_w"), py::arg("group"), py::arg("deformable_group"), + py::arg("with_bias")); + m.def("modulated_deform_conv_backward", &modulated_deform_conv_backward, + "modulated deform conv backward", py::arg("input"), py::arg("weight"), + py::arg("bias"), py::arg("ones"), py::arg("offset"), py::arg("mask"), + py::arg("columns"), py::arg("grad_input"), py::arg("grad_weight"), + py::arg("grad_bias"), py::arg("grad_offset"), py::arg("grad_mask"), + py::arg("grad_output"), py::arg("kernel_h"), py::arg("kernel_w"), + py::arg("stride_h"), py::arg("stride_w"), py::arg("pad_h"), + py::arg("pad_w"), py::arg("dilation_h"), py::arg("dilation_w"), + py::arg("group"), py::arg("deformable_group"), py::arg("with_bias")); + m.def("nms", &nms, "nms (CPU/CUDA) ", py::arg("boxes"), py::arg("scores"), + py::arg("iou_threshold"), py::arg("offset")); + m.def("softnms", &softnms, "softnms (CPU) ", py::arg("boxes"), + py::arg("scores"), py::arg("dets"), py::arg("iou_threshold"), + py::arg("sigma"), py::arg("min_score"), py::arg("method"), + py::arg("offset")); + m.def("nms_match", &nms_match, "nms_match (CPU) ", py::arg("dets"), + py::arg("iou_threshold")); + m.def("pixel_group", &pixel_group, "pixel group (CPU) ", py::arg("score"), + py::arg("mask"), py::arg("embedding"), py::arg("kernel_label"), + py::arg("kernel_contour"), py::arg("kernel_region_label"), + py::arg("distance_threshold")); + m.def("contour_expand", &contour_expand, "contour exapnd (CPU) ", + py::arg("kernel_mask"), py::arg("internal_kernel_label"), + py::arg("min_kernel_area"), py::arg("kernel_num")); + m.def("roi_align_forward", &roi_align_forward, "roi_align forward", + py::arg("input"), py::arg("rois"), py::arg("output"), + py::arg("argmax_y"), py::arg("argmax_x"), py::arg("aligned_height"), + py::arg("aligned_width"), py::arg("spatial_scale"), + py::arg("sampling_ratio"), py::arg("pool_mode"), py::arg("aligned")); + m.def("roi_align_backward", &roi_align_backward, "roi_align backward", + py::arg("grad_output"), py::arg("rois"), py::arg("argmax_y"), + py::arg("argmax_x"), py::arg("grad_input"), py::arg("aligned_height"), + py::arg("aligned_width"), py::arg("spatial_scale"), + py::arg("sampling_ratio"), py::arg("pool_mode"), py::arg("aligned")); + m.def("roi_pool_forward", &roi_pool_forward, "roi_pool forward", + py::arg("input"), py::arg("rois"), py::arg("output"), py::arg("argmax"), + py::arg("pooled_height"), py::arg("pooled_width"), + py::arg("spatial_scale")); + m.def("roi_pool_backward", &roi_pool_backward, "roi_pool backward", + py::arg("grad_output"), py::arg("rois"), py::arg("argmax"), + py::arg("grad_input"), py::arg("pooled_height"), + py::arg("pooled_width"), py::arg("spatial_scale")); + m.def("sync_bn_forward_mean", &sync_bn_forward_mean, "sync_bn forward_mean", + py::arg("input"), py::arg("mean")); + m.def("sync_bn_forward_var", &sync_bn_forward_var, "sync_bn forward_var", + py::arg("input"), py::arg("mean"), py::arg("var")); + m.def("sync_bn_forward_output", &sync_bn_forward_output, + "sync_bn forward_output", py::arg("input"), py::arg("mean"), + py::arg("var"), py::arg("weight"), py::arg("bias"), + py::arg("running_mean"), py::arg("running_var"), py::arg("norm"), + py::arg("std"), py::arg("output"), py::arg("eps"), py::arg("momentum"), + py::arg("group_size")); + m.def("sync_bn_backward_param", &sync_bn_backward_param, + "sync_bn backward_param", py::arg("grad_output"), py::arg("norm"), + py::arg("grad_weight"), py::arg("grad_bias")); + m.def("sync_bn_backward_data", &sync_bn_backward_data, + "sync_bn backward_data", py::arg("grad_output"), py::arg("weight"), + py::arg("grad_weight"), py::arg("grad_bias"), py::arg("norm"), + py::arg("std"), py::arg("grad_input")); + m.def("psamask_forward", &psamask_forward, "PSAMASK forward (CPU/CUDA)", + py::arg("input"), py::arg("output"), py::arg("psa_type"), + py::arg("num_"), py::arg("h_feature"), py::arg("w_feature"), + py::arg("h_mask"), py::arg("w_mask"), py::arg("half_h_mask"), + py::arg("half_w_mask")); + m.def("psamask_backward", &psamask_backward, "PSAMASK backward (CPU/CUDA)", + py::arg("grad_output"), py::arg("grad_input"), py::arg("psa_type"), + py::arg("num_"), py::arg("h_feature"), py::arg("w_feature"), + py::arg("h_mask"), py::arg("w_mask"), py::arg("half_h_mask"), + py::arg("half_w_mask")); + m.def("tin_shift_forward", &tin_shift_forward, "tin_shift forward", + py::arg("input"), py::arg("shift"), py::arg("output")); + m.def("tin_shift_backward", &tin_shift_backward, "tin_shift backward", + py::arg("grad_output"), py::arg("shift"), py::arg("grad_input")); + m.def("bottom_pool_forward", &bottom_pool_forward, "Bottom Pool Forward", + py::arg("input"), py::call_guard()); + m.def("bottom_pool_backward", &bottom_pool_backward, "Bottom Pool Backward", + py::arg("input"), py::arg("grad_output"), + py::call_guard()); + m.def("left_pool_forward", &left_pool_forward, "Left Pool Forward", + py::arg("input"), py::call_guard()); + m.def("left_pool_backward", &left_pool_backward, "Left Pool Backward", + py::arg("input"), py::arg("grad_output"), + py::call_guard()); + m.def("right_pool_forward", &right_pool_forward, "Right Pool Forward", + py::arg("input"), py::call_guard()); + m.def("right_pool_backward", &right_pool_backward, "Right Pool Backward", + py::arg("input"), py::arg("grad_output"), + py::call_guard()); + m.def("top_pool_forward", &top_pool_forward, "Top Pool Forward", + py::arg("input"), py::call_guard()); + m.def("top_pool_backward", &top_pool_backward, "Top Pool Backward", + py::arg("input"), py::arg("grad_output"), + py::call_guard()); + m.def("box_iou_rotated", &box_iou_rotated, "IoU for rotated boxes", + py::arg("boxes1"), py::arg("boxes2"), py::arg("ious"), + py::arg("mode_flag"), py::arg("aligned")); + m.def("nms_rotated", &nms_rotated, "NMS for rotated boxes", py::arg("dets"), + py::arg("scores"), py::arg("order"), py::arg("dets_sorted"), + py::arg("iou_threshold"), py::arg("multi_label")); + m.def("ball_query_forward", &ball_query_forward, "ball_query_forward", + py::arg("new_xyz_tensor"), py::arg("xyz_tensor"), py::arg("idx_tensor"), + py::arg("b"), py::arg("n"), py::arg("m"), py::arg("min_radius"), + py::arg("max_radius"), py::arg("nsample")); + m.def("roi_align_rotated_forward", &roi_align_rotated_forward, + "roi_align_rotated forward", py::arg("input"), py::arg("rois"), + py::arg("output"), py::arg("pooled_height"), py::arg("pooled_width"), + py::arg("spatial_scale"), py::arg("sample_num"), py::arg("aligned"), + py::arg("clockwise")); + m.def("roi_align_rotated_backward", &roi_align_rotated_backward, + "roi_align_rotated backward", py::arg("rois"), py::arg("grad_input"), + py::arg("grad_output"), py::arg("pooled_height"), + py::arg("pooled_width"), py::arg("spatial_scale"), + py::arg("sample_num"), py::arg("aligned"), py::arg("clockwise")); + m.def("dynamic_point_to_voxel_forward", &dynamic_point_to_voxel_forward, + "dynamic_point_to_voxel_forward", py::arg("feats"), py::arg("coors"), + py::arg("reduce_type")); + m.def("dynamic_point_to_voxel_backward", &dynamic_point_to_voxel_backward, + "dynamic_point_to_voxel_backward", py::arg("grad_feats"), + py::arg("grad_reduced_feats"), py::arg("feats"), + py::arg("reduced_feats"), py::arg("coors_idx"), py::arg("reduce_count"), + py::arg("reduce_type")); + m.def("hard_voxelize_forward", &hard_voxelize_forward, + "hard_voxelize_forward", py::arg("points"), py::arg("voxel_size"), + py::arg("coors_range"), py::arg("voxels"), py::arg("coors"), + py::arg("num_points_per_voxel"), py::arg("voxel_num"), + py::arg("max_points"), py::arg("max_voxels"), py::arg("NDim")); + m.def("dynamic_voxelize_forward", &dynamic_voxelize_forward, + "dynamic_voxelize_forward", py::arg("points"), py::arg("voxel_size"), + py::arg("coors_range"), py::arg("coors"), py::arg("NDim")); + m.def("ms_deform_attn_forward", &ms_deform_attn_forward, + "forward function of multi-scale deformable attention", + py::arg("value"), py::arg("value_spatial_shapes"), + py::arg("value_level_start_index"), py::arg("sampling_locations"), + py::arg("attention_weights"), py::arg("im2col_step")); + m.def("ms_deform_attn_backward", &ms_deform_attn_backward, + "backward function of multi-scale deformable attention", + py::arg("value"), py::arg("value_spatial_shapes"), + py::arg("value_level_start_index"), py::arg("sampling_locations"), + py::arg("attention_weights"), py::arg("grad_output"), + py::arg("grad_value"), py::arg("grad_sampling_loc"), + py::arg("grad_attn_weight"), py::arg("im2col_step")); + m.def("border_align_forward", &border_align_forward, + "forward function of border_align", py::arg("input"), py::arg("boxes"), + py::arg("output"), py::arg("argmax_idx"), py::arg("pool_size")); + m.def("border_align_backward", &border_align_backward, + "backward function of border_align", py::arg("grad_output"), + py::arg("boxes"), py::arg("argmax_idx"), py::arg("grad_input"), + py::arg("pool_size")); + m.def("correlation_forward", &correlation_forward, "Correlation forward", + py::arg("input1"), py::arg("input2"), py::arg("output"), py::arg("kH"), + py::arg("kW"), py::arg("patchH"), py::arg("patchW"), py::arg("padH"), + py::arg("padW"), py::arg("dilationH"), py::arg("dilationW"), + py::arg("dilation_patchH"), py::arg("dilation_patchW"), py::arg("dH"), + py::arg("dW")); + m.def("correlation_backward", &correlation_backward, "Correlation backward", + py::arg("grad_output"), py::arg("input1"), py::arg("input2"), + py::arg("grad_input1"), py::arg("grad_input2"), py::arg("kH"), + py::arg("kW"), py::arg("patchH"), py::arg("patchW"), py::arg("padH"), + py::arg("padW"), py::arg("dilationH"), py::arg("dilationW"), + py::arg("dilation_patchH"), py::arg("dilation_patchW"), py::arg("dH"), + py::arg("dW")); + m.def("points_in_boxes_cpu_forward", &points_in_boxes_cpu_forward, + "points_in_boxes_cpu_forward", py::arg("boxes_tensor"), + py::arg("pts_tensor"), py::arg("pts_indices_tensor")); + m.def("points_in_boxes_part_forward", &points_in_boxes_part_forward, + "points_in_boxes_part_forward", py::arg("boxes_tensor"), + py::arg("pts_tensor"), py::arg("box_idx_of_points_tensor")); + m.def("points_in_boxes_all_forward", &points_in_boxes_all_forward, + "points_in_boxes_all_forward", py::arg("boxes_tensor"), + py::arg("pts_tensor"), py::arg("box_idx_of_points_tensor")); + m.def("roiaware_pool3d_forward", &roiaware_pool3d_forward, + "roiaware_pool3d_forward", py::arg("rois"), py::arg("pts"), + py::arg("pts_feature"), py::arg("argmax"), py::arg("pts_idx_of_voxels"), + py::arg("pooled_features"), py::arg("pool_method")); + m.def("roiaware_pool3d_backward", &roiaware_pool3d_backward, + "roiaware_pool3d_backward", py::arg("pts_idx_of_voxels"), + py::arg("argmax"), py::arg("grad_out"), py::arg("grad_in"), + py::arg("pool_method")); +} diff --git a/mmcv/ops/csrc/pytorch/roi_align.cpp b/mmcv/ops/csrc/pytorch/roi_align.cpp new file mode 100644 index 0000000..6e70773 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/roi_align.cpp @@ -0,0 +1,41 @@ +// Copyright (c) OpenMMLab. All rights reserved +#include "pytorch_cpp_helper.hpp" +#include "pytorch_device_registry.hpp" + +void roi_align_forward_impl(Tensor input, Tensor rois, Tensor output, + Tensor argmax_y, Tensor argmax_x, + int aligned_height, int aligned_width, + float spatial_scale, int sampling_ratio, + int pool_mode, bool aligned) { + DISPATCH_DEVICE_IMPL(roi_align_forward_impl, input, rois, output, argmax_y, + argmax_x, aligned_height, aligned_width, spatial_scale, + sampling_ratio, pool_mode, aligned); +} + +void roi_align_backward_impl(Tensor grad_output, Tensor rois, Tensor argmax_y, + Tensor argmax_x, Tensor grad_input, + int aligned_height, int aligned_width, + float spatial_scale, int sampling_ratio, + int pool_mode, bool aligned) { + DISPATCH_DEVICE_IMPL(roi_align_backward_impl, grad_output, rois, argmax_y, + argmax_x, grad_input, aligned_height, aligned_width, + spatial_scale, sampling_ratio, pool_mode, aligned); +} + +void roi_align_forward(Tensor input, Tensor rois, Tensor output, + Tensor argmax_y, Tensor argmax_x, int aligned_height, + int aligned_width, float spatial_scale, + int sampling_ratio, int pool_mode, bool aligned) { + roi_align_forward_impl(input, rois, output, argmax_y, argmax_x, + aligned_height, aligned_width, spatial_scale, + sampling_ratio, pool_mode, aligned); +} + +void roi_align_backward(Tensor grad_output, Tensor rois, Tensor argmax_y, + Tensor argmax_x, Tensor grad_input, int aligned_height, + int aligned_width, float spatial_scale, + int sampling_ratio, int pool_mode, bool aligned) { + roi_align_backward_impl(grad_output, rois, argmax_y, argmax_x, grad_input, + aligned_height, aligned_width, spatial_scale, + sampling_ratio, pool_mode, aligned); +} diff --git a/mmcv/ops/csrc/pytorch/roi_align_rotated.cpp b/mmcv/ops/csrc/pytorch/roi_align_rotated.cpp new file mode 100644 index 0000000..5ef691a --- /dev/null +++ b/mmcv/ops/csrc/pytorch/roi_align_rotated.cpp @@ -0,0 +1,41 @@ +// Copyright (c) OpenMMLab. All rights reserved +#include "pytorch_cpp_helper.hpp" +#include "pytorch_device_registry.hpp" + +void roi_align_rotated_forward_impl(Tensor features, Tensor rois, Tensor output, + int aligned_height, int aligned_width, + float spatial_scale, int sample_ratio, + bool aligned, bool clockwise) { + DISPATCH_DEVICE_IMPL(roi_align_rotated_forward_impl, features, rois, output, + aligned_height, aligned_width, spatial_scale, + sample_ratio, aligned, clockwise); +} + +void roi_align_rotated_backward_impl(Tensor top_grad, Tensor rois, + Tensor bottom_grad, int aligned_height, + int aligned_width, float spatial_scale, + int sample_ratio, bool aligned, + bool clockwise) { + DISPATCH_DEVICE_IMPL(roi_align_rotated_backward_impl, top_grad, rois, + bottom_grad, aligned_height, aligned_width, + spatial_scale, sample_ratio, aligned, clockwise); +} + +void roi_align_rotated_forward(Tensor input, Tensor rois, Tensor output, + int aligned_height, int aligned_width, + float spatial_scale, int sampling_ratio, + bool aligned, bool clockwise) { + roi_align_rotated_forward_impl(input, rois, output, aligned_height, + aligned_width, spatial_scale, sampling_ratio, + aligned, clockwise); +} + +void roi_align_rotated_backward(Tensor top_grad, Tensor rois, + Tensor bottom_grad, int aligned_height, + int aligned_width, float spatial_scale, + int sampling_ratio, bool aligned, + bool clockwise) { + roi_align_rotated_backward_impl(top_grad, rois, bottom_grad, aligned_height, + aligned_width, spatial_scale, sampling_ratio, + aligned, clockwise); +} diff --git a/mmcv/ops/csrc/pytorch/roi_pool.cpp b/mmcv/ops/csrc/pytorch/roi_pool.cpp new file mode 100644 index 0000000..bba90b8 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/roi_pool.cpp @@ -0,0 +1,31 @@ +// Copyright (c) OpenMMLab. All rights reserved +#include "pytorch_cpp_helper.hpp" +#include "pytorch_device_registry.hpp" + +void roi_pool_forward_impl(Tensor input, Tensor rois, Tensor output, + Tensor argmax, int pooled_height, int pooled_width, + float spatial_scale) { + DISPATCH_DEVICE_IMPL(roi_pool_forward_impl, input, rois, output, argmax, + pooled_height, pooled_width, spatial_scale); +} + +void roi_pool_backward_impl(Tensor grad_output, Tensor rois, Tensor argmax, + Tensor grad_input, int pooled_height, + int pooled_width, float spatial_scale) { + DISPATCH_DEVICE_IMPL(roi_pool_backward_impl, grad_output, rois, argmax, + grad_input, pooled_height, pooled_width, spatial_scale); +} + +void roi_pool_forward(Tensor input, Tensor rois, Tensor output, Tensor argmax, + int pooled_height, int pooled_width, + float spatial_scale) { + roi_pool_forward_impl(input, rois, output, argmax, pooled_height, + pooled_width, spatial_scale); +} + +void roi_pool_backward(Tensor grad_output, Tensor rois, Tensor argmax, + Tensor grad_input, int pooled_height, int pooled_width, + float spatial_scale) { + roi_pool_backward_impl(grad_output, rois, argmax, grad_input, pooled_height, + pooled_width, spatial_scale); +} diff --git a/mmcv/ops/csrc/pytorch/roiaware_pool3d.cpp b/mmcv/ops/csrc/pytorch/roiaware_pool3d.cpp new file mode 100644 index 0000000..6cf9cf0 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/roiaware_pool3d.cpp @@ -0,0 +1,72 @@ +#include "pytorch_cpp_helper.hpp" +#include "pytorch_device_registry.hpp" + +void roiaware_pool3d_forward_impl(int boxes_num, int pts_num, int channels, + int max_pts_each_voxel, int out_x, int out_y, + int out_z, const Tensor rois, + const Tensor pts, const Tensor pts_feature, + Tensor argmax, Tensor pts_idx_of_voxels, + Tensor pooled_features, int pool_method) { + DISPATCH_DEVICE_IMPL(roiaware_pool3d_forward_impl, boxes_num, pts_num, + channels, max_pts_each_voxel, out_x, out_y, out_z, rois, + pts, pts_feature, argmax, pts_idx_of_voxels, + pooled_features, pool_method); +} + +void roiaware_pool3d_backward_impl(int boxes_num, int out_x, int out_y, + int out_z, int channels, + int max_pts_each_voxel, + const Tensor pts_idx_of_voxels, + const Tensor argmax, const Tensor grad_out, + Tensor grad_in, int pool_method) { + DISPATCH_DEVICE_IMPL(roiaware_pool3d_backward_impl, boxes_num, out_x, out_y, + out_z, channels, max_pts_each_voxel, pts_idx_of_voxels, + argmax, grad_out, grad_in, pool_method); +} + +void roiaware_pool3d_forward(Tensor rois, Tensor pts, Tensor pts_feature, + Tensor argmax, Tensor pts_idx_of_voxels, + Tensor pooled_features, int pool_method) { + // params rois: (N, 7) [x, y, z, x_size, y_size, z_size, ry] in LiDAR + // coordinate + // params pts: (npoints, 3) [x, y, z] in LiDAR coordinate + // params pts_feature: (npoints, C) + // params argmax: (N, out_x, out_y, out_z, C) + // params pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel) + // params pooled_features: (N, out_x, out_y, out_z, C) + // params pool_method: 0: max_pool 1: avg_pool + int boxes_num = rois.size(0); + int pts_num = pts.size(0); + int channels = pts_feature.size(1); + int max_pts_each_voxel = pts_idx_of_voxels.size(4); // index 0 is the counter + int out_x = pts_idx_of_voxels.size(1); + int out_y = pts_idx_of_voxels.size(2); + int out_z = pts_idx_of_voxels.size(3); + assert((out_x < 256) && (out_y < 256) && + (out_z < 256)); // we encode index with 8bit + + roiaware_pool3d_forward_impl(boxes_num, pts_num, channels, max_pts_each_voxel, + out_x, out_y, out_z, rois, pts, pts_feature, + argmax, pts_idx_of_voxels, pooled_features, + pool_method); +} + +void roiaware_pool3d_backward(Tensor pts_idx_of_voxels, Tensor argmax, + Tensor grad_out, Tensor grad_in, + int pool_method) { + // params pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel) + // params argmax: (N, out_x, out_y, out_z, C) + // params grad_out: (N, out_x, out_y, out_z, C) + // params grad_in: (npoints, C), return value + // params pool_method: 0: max_pool 1: avg_pool + int boxes_num = pts_idx_of_voxels.size(0); + int out_x = pts_idx_of_voxels.size(1); + int out_y = pts_idx_of_voxels.size(2); + int out_z = pts_idx_of_voxels.size(3); + int max_pts_each_voxel = pts_idx_of_voxels.size(4); // index 0 is the counter + int channels = grad_out.size(4); + + roiaware_pool3d_backward_impl(boxes_num, out_x, out_y, out_z, channels, + max_pts_each_voxel, pts_idx_of_voxels, argmax, + grad_out, grad_in, pool_method); +} diff --git a/mmcv/ops/csrc/pytorch/roipoint_pool3d.cpp b/mmcv/ops/csrc/pytorch/roipoint_pool3d.cpp new file mode 100644 index 0000000..a10080b --- /dev/null +++ b/mmcv/ops/csrc/pytorch/roipoint_pool3d.cpp @@ -0,0 +1,39 @@ +/* +Modified from +https://github.com/open-mmlab/OpenPCDet/blob/master/pcdet/ops/roipoint_pool3d/src/roipoint_pool3d.cpp +Point cloud feature pooling +Written by Shaoshuai Shi +All Rights Reserved 2018. +*/ + +#include "pytorch_cpp_helper.hpp" +#include "pytorch_device_registry.hpp" + +void roipoint_pool3d_forward_impl(int batch_size, int pts_num, int boxes_num, + int feature_in_len, int sampled_pts_num, + const Tensor xyz, const Tensor boxes3d, + const Tensor pts_feature, + Tensor pooled_features, + Tensor pooled_empty_flag) { + DISPATCH_DEVICE_IMPL(roipoint_pool3d_forward_impl, batch_size, pts_num, + boxes_num, feature_in_len, sampled_pts_num, xyz, boxes3d, + pts_feature, pooled_features, pooled_empty_flag); +} + +void roipoint_pool3d_forward(Tensor xyz, Tensor boxes3d, Tensor pts_feature, + Tensor pooled_features, Tensor pooled_empty_flag) { + // params xyz: (B, N, 3) + // params boxes3d: (B, M, 7) + // params pts_feature: (B, N, C) + // params pooled_features: (B, M, 512, 3+C) + // params pooled_empty_flag: (B, M) + int batch_size = xyz.size(0); + int pts_num = xyz.size(1); + int boxes_num = boxes3d.size(1); + int feature_in_len = pts_feature.size(2); + int sampled_pts_num = pooled_features.size(2); + + roipoint_pool3d_forward_impl(batch_size, pts_num, boxes_num, feature_in_len, + sampled_pts_num, xyz, boxes3d, pts_feature, + pooled_features, pooled_empty_flag); +} diff --git a/mmcv/ops/csrc/pytorch/scatter_points.cpp b/mmcv/ops/csrc/pytorch/scatter_points.cpp new file mode 100644 index 0000000..0de8ebf --- /dev/null +++ b/mmcv/ops/csrc/pytorch/scatter_points.cpp @@ -0,0 +1,53 @@ +// Copyright (c) OpenMMLab. All rights reserved. +#include "pytorch_cpp_helper.hpp" +#include "pytorch_device_registry.hpp" + +typedef enum { SUM = 0, MEAN = 1, MAX = 2 } reduce_t; + +std::vector dynamic_point_to_voxel_forward_impl( + const torch::Tensor &feats, const torch::Tensor &coors, + const reduce_t reduce_type) { + return DISPATCH_DEVICE_IMPL(dynamic_point_to_voxel_forward_impl, feats, coors, + reduce_type); +} + +void dynamic_point_to_voxel_backward_impl( + torch::Tensor &grad_feats, const torch::Tensor &grad_reduced_feats, + const torch::Tensor &feats, const torch::Tensor &reduced_feats, + const torch::Tensor &coors_idx, const torch::Tensor &reduce_count, + const reduce_t reduce_type) { + DISPATCH_DEVICE_IMPL(dynamic_point_to_voxel_backward_impl, grad_feats, + grad_reduced_feats, feats, reduced_feats, coors_idx, + reduce_count, reduce_type); +} + +inline reduce_t convert_reduce_type(const std::string &reduce_type) { + if (reduce_type == "max") + return reduce_t::MAX; + else if (reduce_type == "sum") + return reduce_t::SUM; + else if (reduce_type == "mean") + return reduce_t::MEAN; + else + TORCH_CHECK(false, "do not support reduce type " + reduce_type) + return reduce_t::SUM; +} + +std::vector dynamic_point_to_voxel_forward( + const torch::Tensor &feats, const torch::Tensor &coors, + const std::string &reduce_type) { + return dynamic_point_to_voxel_forward_impl(feats, coors, + convert_reduce_type(reduce_type)); +} + +void dynamic_point_to_voxel_backward(torch::Tensor &grad_feats, + const torch::Tensor &grad_reduced_feats, + const torch::Tensor &feats, + const torch::Tensor &reduced_feats, + const torch::Tensor &coors_idx, + const torch::Tensor &reduce_count, + const std::string &reduce_type) { + dynamic_point_to_voxel_backward_impl(grad_feats, grad_reduced_feats, feats, + reduced_feats, coors_idx, reduce_count, + convert_reduce_type(reduce_type)); +} diff --git a/mmcv/ops/csrc/pytorch/sync_bn.cpp b/mmcv/ops/csrc/pytorch/sync_bn.cpp new file mode 100644 index 0000000..fd5a513 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/sync_bn.cpp @@ -0,0 +1,69 @@ +// Copyright (c) OpenMMLab. All rights reserved +#include "pytorch_cpp_helper.hpp" +#include "pytorch_device_registry.hpp" + +void sync_bn_forward_mean_impl(const Tensor input, Tensor mean) { + DISPATCH_DEVICE_IMPL(sync_bn_forward_mean_impl, input, mean); +} + +void sync_bn_forward_var_impl(const Tensor input, const Tensor mean, + Tensor var) { + DISPATCH_DEVICE_IMPL(sync_bn_forward_var_impl, input, mean, var); +} + +void sync_bn_forward_output_impl(const Tensor input, const Tensor mean, + const Tensor var, Tensor running_mean, + Tensor running_var, const Tensor weight, + const Tensor bias, Tensor norm, Tensor std, + Tensor output, float eps, float momentum, + int group_size) { + DISPATCH_DEVICE_IMPL(sync_bn_forward_output_impl, input, mean, var, + running_mean, running_var, weight, bias, norm, std, + output, eps, momentum, group_size); +} + +void sync_bn_backward_param_impl(const Tensor grad_output, const Tensor norm, + Tensor grad_weight, Tensor grad_bias) { + DISPATCH_DEVICE_IMPL(sync_bn_backward_param_impl, grad_output, norm, + grad_weight, grad_bias); +} + +void sync_bn_backward_data_impl(const Tensor grad_output, const Tensor weight, + const Tensor grad_weight, + const Tensor grad_bias, const Tensor norm, + const Tensor std, Tensor grad_input) { + DISPATCH_DEVICE_IMPL(sync_bn_backward_data_impl, grad_output, weight, + grad_weight, grad_bias, norm, std, grad_input); +} + +void sync_bn_forward_mean(const Tensor input, Tensor mean) { + sync_bn_forward_mean_impl(input, mean); +} + +void sync_bn_forward_var(const Tensor input, const Tensor mean, Tensor var) { + sync_bn_forward_var_impl(input, mean, var); +} + +void sync_bn_forward_output(const Tensor input, const Tensor mean, + const Tensor var, const Tensor weight, + const Tensor bias, Tensor running_mean, + Tensor running_var, Tensor norm, Tensor std, + Tensor output, float eps, float momentum, + int group_size) { + sync_bn_forward_output_impl(input, mean, var, running_mean, running_var, + weight, bias, norm, std, output, eps, momentum, + group_size); +} + +void sync_bn_backward_param(const Tensor grad_output, const Tensor norm, + Tensor grad_weight, Tensor grad_bias) { + sync_bn_backward_param_impl(grad_output, norm, grad_weight, grad_bias); +} + +void sync_bn_backward_data(const Tensor grad_output, const Tensor weight, + const Tensor grad_weight, const Tensor grad_bias, + const Tensor norm, const Tensor std, + Tensor grad_input) { + sync_bn_backward_data_impl(grad_output, weight, grad_weight, grad_bias, norm, + std, grad_input); +} diff --git a/mmcv/ops/csrc/pytorch/three_interpolate.cpp b/mmcv/ops/csrc/pytorch/three_interpolate.cpp new file mode 100644 index 0000000..1e0ec71 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/three_interpolate.cpp @@ -0,0 +1,33 @@ +// Modified from +// https://github.com/sshaoshuai/Pointnet2.PyTorch/tree/master/pointnet2/src/interpolate.cpp + +#include "pytorch_cpp_helper.hpp" +#include "pytorch_device_registry.hpp" + +void three_interpolate_forward_impl(int b, int c, int m, int n, + const Tensor points, const Tensor idx, + const Tensor weight, Tensor out) { + DISPATCH_DEVICE_IMPL(three_interpolate_forward_impl, b, c, m, n, points, idx, + weight, out); +} + +void three_interpolate_backward_impl(int b, int c, int n, int m, + const Tensor grad_out, const Tensor idx, + const Tensor weight, Tensor grad_points) { + DISPATCH_DEVICE_IMPL(three_interpolate_backward_impl, b, c, n, m, grad_out, + idx, weight, grad_points); +} + +void three_interpolate_forward(Tensor points_tensor, Tensor idx_tensor, + Tensor weight_tensor, Tensor out_tensor, int b, + int c, int m, int n) { + three_interpolate_forward_impl(b, c, m, n, points_tensor, idx_tensor, + weight_tensor, out_tensor); +} + +void three_interpolate_backward(Tensor grad_out_tensor, Tensor idx_tensor, + Tensor weight_tensor, Tensor grad_points_tensor, + int b, int c, int n, int m) { + three_interpolate_backward_impl(b, c, n, m, grad_out_tensor, idx_tensor, + weight_tensor, grad_points_tensor); +} diff --git a/mmcv/ops/csrc/pytorch/three_nn.cpp b/mmcv/ops/csrc/pytorch/three_nn.cpp new file mode 100644 index 0000000..b629200 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/three_nn.cpp @@ -0,0 +1,18 @@ +// Modified from +// https://github.com/sshaoshuai/Pointnet2.PyTorch/tree/master/pointnet2/src/interpolate.cpp + +#include "pytorch_cpp_helper.hpp" +#include "pytorch_device_registry.hpp" + +void three_nn_forward_impl(int b, int n, int m, const Tensor unknown, + const Tensor known, Tensor dist2, Tensor idx) { + DISPATCH_DEVICE_IMPL(three_nn_forward_impl, b, n, m, unknown, known, dist2, + idx); +} + +void three_nn_forward(Tensor unknown_tensor, Tensor known_tensor, + Tensor dist2_tensor, Tensor idx_tensor, int b, int n, + int m) { + three_nn_forward_impl(b, n, m, unknown_tensor, known_tensor, dist2_tensor, + idx_tensor); +} diff --git a/mmcv/ops/csrc/pytorch/tin_shift.cpp b/mmcv/ops/csrc/pytorch/tin_shift.cpp new file mode 100644 index 0000000..b03f587 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/tin_shift.cpp @@ -0,0 +1,20 @@ +// Copyright (c) OpenMMLab. All rights reserved +#include "pytorch_cpp_helper.hpp" +#include "pytorch_device_registry.hpp" + +void tin_shift_forward_impl(Tensor input, Tensor shift, Tensor output) { + DISPATCH_DEVICE_IMPL(tin_shift_forward_impl, input, shift, output); +} + +void tin_shift_backward_impl(Tensor grad_output, Tensor shift, + Tensor grad_input) { + DISPATCH_DEVICE_IMPL(tin_shift_backward_impl, grad_output, shift, grad_input); +} + +void tin_shift_forward(Tensor input, Tensor shift, Tensor output) { + tin_shift_forward_impl(input, shift, output); +} + +void tin_shift_backward(Tensor grad_output, Tensor shift, Tensor grad_input) { + tin_shift_backward_impl(grad_output, shift, grad_input); +} diff --git a/mmcv/ops/csrc/pytorch/upfirdn2d.cpp b/mmcv/ops/csrc/pytorch/upfirdn2d.cpp new file mode 100644 index 0000000..dd325bd --- /dev/null +++ b/mmcv/ops/csrc/pytorch/upfirdn2d.cpp @@ -0,0 +1,118 @@ +// Modified from +// https://github.com/rosinality/stylegan2-pytorch/blob/master/op/upfirdn2d.cpp + +/* +Copyright (c) 2021, NVIDIA Corporation. All rights reserved. + +NVIDIA Source Code License for StyleGAN2 with Adaptive Discriminator +Augmentation (ADA) +======================================================================= + +1. Definitions + +"Licensor" means any person or entity that distributes its Work. + +"Software" means the original work of authorship made available under +this License. + +"Work" means the Software and any additions to or derivative works of +the Software that are made available under this License. + +The terms "reproduce," "reproduction," "derivative works," and +"distribution" have the meaning as provided under U.S. copyright law; +provided, however, that for the purposes of this License, derivative +works shall not include works that remain separable from, or merely +link (or bind by name) to the interfaces of, the Work. + +Works, including the Software, are "made available" under this License +by including in or with the Work either (a) a copyright notice +referencing the applicability of this License to the Work, or (b) a +copy of this License. + +2. License Grants + + 2.1 Copyright Grant. Subject to the terms and conditions of this + License, each Licensor grants to you a perpetual, worldwide, + non-exclusive, royalty-free, copyright license to reproduce, + prepare derivative works of, publicly display, publicly perform, + sublicense and distribute its Work and any resulting derivative + works in any form. + +3. Limitations + + 3.1 Redistribution. You may reproduce or distribute the Work only + if (a) you do so under this License, (b) you include a complete + copy of this License with your distribution, and (c) you retain + without modification any copyright, patent, trademark, or + attribution notices that are present in the Work. + + 3.2 Derivative Works. You may specify that additional or different + terms apply to the use, reproduction, and distribution of your + derivative works of the Work ("Your Terms") only if (a) Your Terms + provide that the use limitation in Section 3.3 applies to your + derivative works, and (b) you identify the specific derivative + works that are subject to Your Terms. Notwithstanding Your Terms, + this License (including the redistribution requirements in Section + 3.1) will continue to apply to the Work itself. + + 3.3 Use Limitation. The Work and any derivative works thereof only + may be used or intended for use non-commercially. Notwithstanding + the foregoing, NVIDIA and its affiliates may use the Work and any + derivative works commercially. As used herein, "non-commercially" + means for research or evaluation purposes only. + + 3.4 Patent Claims. If you bring or threaten to bring a patent claim + against any Licensor (including any claim, cross-claim or + counterclaim in a lawsuit) to enforce any patents that you allege + are infringed by any Work, then your rights under this License from + such Licensor (including the grant in Section 2.1) will terminate + immediately. + + 3.5 Trademarks. This License does not grant any rights to use any + Licensor’s or its affiliates’ names, logos, or trademarks, except + as necessary to reproduce the notices described in this License. + + 3.6 Termination. If you violate any term of this License, then your + rights under this License (including the grant in Section 2.1) will + terminate immediately. + +4. Disclaimer of Warranty. + +THE WORK IS PROVIDED "AS IS" WITHOUT WARRANTIES OR CONDITIONS OF ANY +KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WARRANTIES OR CONDITIONS OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE OR +NON-INFRINGEMENT. YOU BEAR THE RISK OF UNDERTAKING ANY ACTIVITIES UNDER +THIS LICENSE. + +5. Limitation of Liability. + +EXCEPT AS PROHIBITED BY APPLICABLE LAW, IN NO EVENT AND UNDER NO LEGAL +THEORY, WHETHER IN TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE +SHALL ANY LICENSOR BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY DIRECT, +INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF +OR RELATED TO THIS LICENSE, THE USE OR INABILITY TO USE THE WORK +(INCLUDING BUT NOT LIMITED TO LOSS OF GOODWILL, BUSINESS INTERRUPTION, +LOST PROFITS OR DATA, COMPUTER FAILURE OR MALFUNCTION, OR ANY OTHER +COMMERCIAL DAMAGES OR LOSSES), EVEN IF THE LICENSOR HAS BEEN ADVISED OF +THE POSSIBILITY OF SUCH DAMAGES. + +======================================================================= +*/ + +#include "pytorch_cpp_helper.hpp" +#include "pytorch_device_registry.hpp" + +torch::Tensor upfirdn2d_op_impl(const torch::Tensor& input, + const torch::Tensor& kernel, int up_x, int up_y, + int down_x, int down_y, int pad_x0, int pad_x1, + int pad_y0, int pad_y1) { + return DISPATCH_DEVICE_IMPL(upfirdn2d_op_impl, input, kernel, up_x, up_y, + down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1); +} + +torch::Tensor upfirdn2d(const torch::Tensor& input, const torch::Tensor& kernel, + int up_x, int up_y, int down_x, int down_y, int pad_x0, + int pad_x1, int pad_y0, int pad_y1) { + return upfirdn2d_op_impl(input, kernel, up_x, up_y, down_x, down_y, pad_x0, + pad_x1, pad_y0, pad_y1); +} diff --git a/mmcv/ops/csrc/pytorch/voxelization.cpp b/mmcv/ops/csrc/pytorch/voxelization.cpp new file mode 100644 index 0000000..1d1c229 --- /dev/null +++ b/mmcv/ops/csrc/pytorch/voxelization.cpp @@ -0,0 +1,56 @@ +// Copyright (c) OpenMMLab. All rights reserved. +#include "pytorch_cpp_helper.hpp" +#include "pytorch_device_registry.hpp" + +int hard_voxelize_forward_impl(const at::Tensor &points, at::Tensor &voxels, + at::Tensor &coors, + at::Tensor &num_points_per_voxel, + const std::vector voxel_size, + const std::vector coors_range, + const int max_points, const int max_voxels, + const int NDim = 3) { + return DISPATCH_DEVICE_IMPL(hard_voxelize_forward_impl, points, voxels, coors, + num_points_per_voxel, voxel_size, coors_range, + max_points, max_voxels, NDim); +} + +void dynamic_voxelize_forward_impl(const at::Tensor &points, at::Tensor &coors, + const std::vector voxel_size, + const std::vector coors_range, + const int NDim = 3) { + DISPATCH_DEVICE_IMPL(dynamic_voxelize_forward_impl, points, coors, voxel_size, + coors_range, NDim); +} + +void hard_voxelize_forward(const at::Tensor &points, + const at::Tensor &voxel_size, + const at::Tensor &coors_range, at::Tensor &voxels, + at::Tensor &coors, at::Tensor &num_points_per_voxel, + at::Tensor &voxel_num, const int max_points, + const int max_voxels, const int NDim = 3) { + int64_t *voxel_num_data = voxel_num.data_ptr(); + std::vector voxel_size_v( + voxel_size.data_ptr(), + voxel_size.data_ptr() + voxel_size.numel()); + std::vector coors_range_v( + coors_range.data_ptr(), + coors_range.data_ptr() + coors_range.numel()); + + *voxel_num_data = hard_voxelize_forward_impl( + points, voxels, coors, num_points_per_voxel, voxel_size_v, coors_range_v, + max_points, max_voxels, NDim); +} + +void dynamic_voxelize_forward(const at::Tensor &points, + const at::Tensor &voxel_size, + const at::Tensor &coors_range, at::Tensor &coors, + const int NDim = 3) { + std::vector voxel_size_v( + voxel_size.data_ptr(), + voxel_size.data_ptr() + voxel_size.numel()); + std::vector coors_range_v( + coors_range.data_ptr(), + coors_range.data_ptr() + coors_range.numel()); + dynamic_voxelize_forward_impl(points, coors, voxel_size_v, coors_range_v, + NDim); +} diff --git a/mmcv/ops/deform_conv.py b/mmcv/ops/deform_conv.py new file mode 100644 index 0000000..bca9b56 --- /dev/null +++ b/mmcv/ops/deform_conv.py @@ -0,0 +1,405 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Tuple, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch import Tensor +from torch.autograd import Function +from torch.autograd.function import once_differentiable +from torch.nn.modules.utils import _pair, _single + +from mmcv.utils import deprecated_api_warning +from ..models.bricks import CONV_LAYERS +from ..utils import ext_loader, print_log + +ext_module = ext_loader.load_ext('_ext', [ + 'deform_conv_forward', 'deform_conv_backward_input', + 'deform_conv_backward_parameters' +]) + + +class DeformConv2dFunction(Function): + + @staticmethod + def symbolic(g, + input, + offset, + weight, + stride, + padding, + dilation, + groups, + deform_groups, + bias=False, + im2col_step=32): + return g.op( + 'mmcv::MMCVDeformConv2d', + input, + offset, + weight, + stride_i=stride, + padding_i=padding, + dilation_i=dilation, + groups_i=groups, + deform_groups_i=deform_groups, + bias_i=bias, + im2col_step_i=im2col_step) + + @staticmethod + def forward(ctx, + input, + offset, + weight, + stride=1, + padding=0, + dilation=1, + groups=1, + deform_groups=1, + bias=False, + im2col_step=32): + if input is not None and input.dim() != 4: + raise ValueError( + f'Expected 4D tensor as input, got {input.dim()}D tensor \ + instead.') + assert bias is False, 'Only support bias is False.' + ctx.stride = _pair(stride) + ctx.padding = _pair(padding) + ctx.dilation = _pair(dilation) + ctx.groups = groups + ctx.deform_groups = deform_groups + ctx.im2col_step = im2col_step + + # When pytorch version >= 1.6.0, amp is adopted for fp16 mode; + # amp won't cast the type of model (float32), but "offset" is cast + # to float16 by nn.Conv2d automatically, leading to the type + # mismatch with input (when it is float32) or weight. + # The flag for whether to use fp16 or amp is the type of "offset", + # we cast weight and input to temporarily support fp16 and amp + # whatever the pytorch version is. + input = input.type_as(offset) + weight = weight.type_as(input) + ctx.save_for_backward(input, offset, weight) + + output = input.new_empty( + DeformConv2dFunction._output_size(ctx, input, weight)) + + ctx.bufs_ = [input.new_empty(0), input.new_empty(0)] # columns, ones + + cur_im2col_step = min(ctx.im2col_step, input.size(0)) + assert (input.size(0) % cur_im2col_step + ) == 0, 'batch size must be divisible by im2col_step' + ext_module.deform_conv_forward( + input, + weight, + offset, + output, + ctx.bufs_[0], + ctx.bufs_[1], + kW=weight.size(3), + kH=weight.size(2), + dW=ctx.stride[1], + dH=ctx.stride[0], + padW=ctx.padding[1], + padH=ctx.padding[0], + dilationW=ctx.dilation[1], + dilationH=ctx.dilation[0], + group=ctx.groups, + deformable_group=ctx.deform_groups, + im2col_step=cur_im2col_step) + return output + + @staticmethod + @once_differentiable + def backward(ctx, grad_output): + input, offset, weight = ctx.saved_tensors + + grad_input = grad_offset = grad_weight = None + + cur_im2col_step = min(ctx.im2col_step, input.size(0)) + assert (input.size(0) % cur_im2col_step + ) == 0, 'batch size must be divisible by im2col_step' + + grad_output = grad_output.contiguous() + if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]: + grad_input = torch.zeros_like(input) + grad_offset = torch.zeros_like(offset) + ext_module.deform_conv_backward_input( + input, + offset, + grad_output, + grad_input, + grad_offset, + weight, + ctx.bufs_[0], + kW=weight.size(3), + kH=weight.size(2), + dW=ctx.stride[1], + dH=ctx.stride[0], + padW=ctx.padding[1], + padH=ctx.padding[0], + dilationW=ctx.dilation[1], + dilationH=ctx.dilation[0], + group=ctx.groups, + deformable_group=ctx.deform_groups, + im2col_step=cur_im2col_step) + + if ctx.needs_input_grad[2]: + grad_weight = torch.zeros_like(weight) + ext_module.deform_conv_backward_parameters( + input, + offset, + grad_output, + grad_weight, + ctx.bufs_[0], + ctx.bufs_[1], + kW=weight.size(3), + kH=weight.size(2), + dW=ctx.stride[1], + dH=ctx.stride[0], + padW=ctx.padding[1], + padH=ctx.padding[0], + dilationW=ctx.dilation[1], + dilationH=ctx.dilation[0], + group=ctx.groups, + deformable_group=ctx.deform_groups, + scale=1, + im2col_step=cur_im2col_step) + + return grad_input, grad_offset, grad_weight, \ + None, None, None, None, None, None, None + + @staticmethod + def _output_size(ctx, input, weight): + channels = weight.size(0) + output_size = (input.size(0), channels) + for d in range(input.dim() - 2): + in_size = input.size(d + 2) + pad = ctx.padding[d] + kernel = ctx.dilation[d] * (weight.size(d + 2) - 1) + 1 + stride_ = ctx.stride[d] + output_size += ((in_size + (2 * pad) - kernel) // stride_ + 1, ) + if not all(map(lambda s: s > 0, output_size)): + raise ValueError( + 'convolution input is too small (output would be ' + + 'x'.join(map(str, output_size)) + ')') + return output_size + + +deform_conv2d = DeformConv2dFunction.apply + + +class DeformConv2d(nn.Module): + r"""Deformable 2D convolution. + + Applies a deformable 2D convolution over an input signal composed of + several input planes. DeformConv2d was described in the paper + `Deformable Convolutional Networks + `_ + + Note: + The argument ``im2col_step`` was added in version 1.3.17, which means + number of samples processed by the ``im2col_cuda_kernel`` per call. + It enables users to define ``batch_size`` and ``im2col_step`` more + flexibly and solved `issue mmcv#1440 + `_. + + Args: + in_channels (int): Number of channels in the input image. + out_channels (int): Number of channels produced by the convolution. + kernel_size(int, tuple): Size of the convolving kernel. + stride(int, tuple): Stride of the convolution. Default: 1. + padding (int or tuple): Zero-padding added to both sides of the input. + Default: 0. + dilation (int or tuple): Spacing between kernel elements. Default: 1. + groups (int): Number of blocked connections from input. + channels to output channels. Default: 1. + deform_groups (int): Number of deformable group partitions. + bias (bool): If True, adds a learnable bias to the output. + Default: False. + im2col_step (int): Number of samples processed by im2col_cuda_kernel + per call. It will work when ``batch_size`` > ``im2col_step``, but + ``batch_size`` must be divisible by ``im2col_step``. Default: 32. + `New in version 1.3.17.` + """ + + @deprecated_api_warning({'deformable_groups': 'deform_groups'}, + cls_name='DeformConv2d') + def __init__(self, + in_channels: int, + out_channels: int, + kernel_size: Union[int, Tuple[int, ...]], + stride: Union[int, Tuple[int, ...]] = 1, + padding: Union[int, Tuple[int, ...]] = 0, + dilation: Union[int, Tuple[int, ...]] = 1, + groups: int = 1, + deform_groups: int = 1, + bias: bool = False, + im2col_step: int = 32) -> None: + super(DeformConv2d, self).__init__() + + assert not bias, \ + f'bias={bias} is not supported in DeformConv2d.' + assert in_channels % groups == 0, \ + f'in_channels {in_channels} cannot be divisible by groups {groups}' + assert out_channels % groups == 0, \ + f'out_channels {out_channels} cannot be divisible by groups \ + {groups}' + + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = _pair(kernel_size) + self.stride = _pair(stride) + self.padding = _pair(padding) + self.dilation = _pair(dilation) + self.groups = groups + self.deform_groups = deform_groups + self.im2col_step = im2col_step + # enable compatibility with nn.Conv2d + self.transposed = False + self.output_padding = _single(0) + + # only weight, no bias + self.weight = nn.Parameter( + torch.Tensor(out_channels, in_channels // self.groups, + *self.kernel_size)) + + self.reset_parameters() + + def reset_parameters(self): + # switch the initialization of `self.weight` to the standard kaiming + # method described in `Delving deep into rectifiers: Surpassing + # human-level performance on ImageNet classification` - He, K. et al. + # (2015), using a uniform distribution + nn.init.kaiming_uniform_(self.weight, nonlinearity='relu') + + def forward(self, x: Tensor, offset: Tensor) -> Tensor: + """Deformable Convolutional forward function. + + Args: + x (Tensor): Input feature, shape (B, C_in, H_in, W_in) + offset (Tensor): Offset for deformable convolution, shape + (B, deform_groups*kernel_size[0]*kernel_size[1]*2, + H_out, W_out), H_out, W_out are equal to the output's. + + An offset is like `[y0, x0, y1, x1, y2, x2, ..., y8, x8]`. + The spatial arrangement is like: + + .. code:: text + + (x0, y0) (x1, y1) (x2, y2) + (x3, y3) (x4, y4) (x5, y5) + (x6, y6) (x7, y7) (x8, y8) + + Returns: + Tensor: Output of the layer. + """ + # To fix an assert error in deform_conv_cuda.cpp:128 + # input image is smaller than kernel + input_pad = (x.size(2) < self.kernel_size[0]) or (x.size(3) < + self.kernel_size[1]) + if input_pad: + pad_h = max(self.kernel_size[0] - x.size(2), 0) + pad_w = max(self.kernel_size[1] - x.size(3), 0) + x = F.pad(x, (0, pad_w, 0, pad_h), 'constant', 0).contiguous() + offset = F.pad(offset, (0, pad_w, 0, pad_h), 'constant', 0) + offset = offset.contiguous() + out = deform_conv2d(x, offset, self.weight, self.stride, self.padding, + self.dilation, self.groups, self.deform_groups, + False, self.im2col_step) + if input_pad: + out = out[:, :, :out.size(2) - pad_h, :out.size(3) - + pad_w].contiguous() + return out + + def __repr__(self): + s = self.__class__.__name__ + s += f'(in_channels={self.in_channels},\n' + s += f'out_channels={self.out_channels},\n' + s += f'kernel_size={self.kernel_size},\n' + s += f'stride={self.stride},\n' + s += f'padding={self.padding},\n' + s += f'dilation={self.dilation},\n' + s += f'groups={self.groups},\n' + s += f'deform_groups={self.deform_groups},\n' + # bias is not supported in DeformConv2d. + s += 'bias=False)' + return s + + +@CONV_LAYERS.register_module('DCN') +class DeformConv2dPack(DeformConv2d): + """A Deformable Conv Encapsulation that acts as normal Conv layers. + + The offset tensor is like `[y0, x0, y1, x1, y2, x2, ..., y8, x8]`. + The spatial arrangement is like: + + .. code:: text + + (x0, y0) (x1, y1) (x2, y2) + (x3, y3) (x4, y4) (x5, y5) + (x6, y6) (x7, y7) (x8, y8) + + Args: + in_channels (int): Same as nn.Conv2d. + out_channels (int): Same as nn.Conv2d. + kernel_size (int or tuple[int]): Same as nn.Conv2d. + stride (int or tuple[int]): Same as nn.Conv2d. + padding (int or tuple[int]): Same as nn.Conv2d. + dilation (int or tuple[int]): Same as nn.Conv2d. + groups (int): Same as nn.Conv2d. + bias (bool or str): If specified as `auto`, it will be decided by the + norm_cfg. Bias will be set as True if norm_cfg is None, otherwise + False. + """ + + _version = 2 + + def __init__(self, *args, **kwargs): + super(DeformConv2dPack, self).__init__(*args, **kwargs) + self.conv_offset = nn.Conv2d( + self.in_channels, + self.deform_groups * 2 * self.kernel_size[0] * self.kernel_size[1], + kernel_size=self.kernel_size, + stride=_pair(self.stride), + padding=_pair(self.padding), + dilation=_pair(self.dilation), + bias=True) + self.init_offset() + + def init_offset(self): + self.conv_offset.weight.data.zero_() + self.conv_offset.bias.data.zero_() + + def forward(self, x): + offset = self.conv_offset(x) + return deform_conv2d(x, offset, self.weight, self.stride, self.padding, + self.dilation, self.groups, self.deform_groups, + False, self.im2col_step) + + def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs): + version = local_metadata.get('version', None) + + if version is None or version < 2: + # the key is different in early versions + # In version < 2, DeformConvPack loads previous benchmark models. + if (prefix + 'conv_offset.weight' not in state_dict + and prefix[:-1] + '_offset.weight' in state_dict): + state_dict[prefix + 'conv_offset.weight'] = state_dict.pop( + prefix[:-1] + '_offset.weight') + if (prefix + 'conv_offset.bias' not in state_dict + and prefix[:-1] + '_offset.bias' in state_dict): + state_dict[prefix + + 'conv_offset.bias'] = state_dict.pop(prefix[:-1] + + '_offset.bias') + + if version is not None and version > 1: + print_log( + f'DeformConv2dPack {prefix.rstrip(".")} is upgraded to ' + 'version 2.', + logger='root') + + super()._load_from_state_dict(state_dict, prefix, local_metadata, + strict, missing_keys, unexpected_keys, + error_msgs) diff --git a/mmcv/ops/focal_loss.py b/mmcv/ops/focal_loss.py new file mode 100644 index 0000000..763bc93 --- /dev/null +++ b/mmcv/ops/focal_loss.py @@ -0,0 +1,212 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from torch.autograd import Function +from torch.autograd.function import once_differentiable + +from ..utils import ext_loader + +ext_module = ext_loader.load_ext('_ext', [ + 'sigmoid_focal_loss_forward', 'sigmoid_focal_loss_backward', + 'softmax_focal_loss_forward', 'softmax_focal_loss_backward' +]) + + +class SigmoidFocalLossFunction(Function): + + @staticmethod + def symbolic(g, input, target, gamma, alpha, weight, reduction): + return g.op( + 'mmcv::MMCVSigmoidFocalLoss', + input, + target, + gamma_f=gamma, + alpha_f=alpha, + weight_f=weight, + reduction_s=reduction) + + @staticmethod + def forward(ctx, + input, + target, + gamma=2.0, + alpha=0.25, + weight=None, + reduction='mean'): + + assert isinstance(target, (torch.LongTensor, torch.cuda.LongTensor)) + assert input.dim() == 2 + assert target.dim() == 1 + assert input.size(0) == target.size(0) + if weight is None: + weight = input.new_empty(0) + else: + assert weight.dim() == 1 + assert input.size(1) == weight.size(0) + ctx.reduction_dict = {'none': 0, 'mean': 1, 'sum': 2} + assert reduction in ctx.reduction_dict.keys() + + ctx.gamma = float(gamma) + ctx.alpha = float(alpha) + ctx.reduction = ctx.reduction_dict[reduction] + + output = input.new_zeros(input.size()) + + ext_module.sigmoid_focal_loss_forward( + input, target, weight, output, gamma=ctx.gamma, alpha=ctx.alpha) + if ctx.reduction == ctx.reduction_dict['mean']: + output = output.sum() / input.size(0) + elif ctx.reduction == ctx.reduction_dict['sum']: + output = output.sum() + ctx.save_for_backward(input, target, weight) + return output + + @staticmethod + @once_differentiable + def backward(ctx, grad_output): + input, target, weight = ctx.saved_tensors + + grad_input = input.new_zeros(input.size()) + + ext_module.sigmoid_focal_loss_backward( + input, + target, + weight, + grad_input, + gamma=ctx.gamma, + alpha=ctx.alpha) + + grad_input *= grad_output + if ctx.reduction == ctx.reduction_dict['mean']: + grad_input /= input.size(0) + return grad_input, None, None, None, None, None + + +sigmoid_focal_loss = SigmoidFocalLossFunction.apply + + +class SigmoidFocalLoss(nn.Module): + + def __init__(self, gamma, alpha, weight=None, reduction='mean'): + super(SigmoidFocalLoss, self).__init__() + self.gamma = gamma + self.alpha = alpha + self.register_buffer('weight', weight) + self.reduction = reduction + + def forward(self, input, target): + return sigmoid_focal_loss(input, target, self.gamma, self.alpha, + self.weight, self.reduction) + + def __repr__(self): + s = self.__class__.__name__ + s += f'(gamma={self.gamma}, ' + s += f'alpha={self.alpha}, ' + s += f'reduction={self.reduction})' + return s + + +class SoftmaxFocalLossFunction(Function): + + @staticmethod + def symbolic(g, input, target, gamma, alpha, weight, reduction): + return g.op( + 'mmcv::MMCVSoftmaxFocalLoss', + input, + target, + gamma_f=gamma, + alpha_f=alpha, + weight_f=weight, + reduction_s=reduction) + + @staticmethod + def forward(ctx, + input, + target, + gamma=2.0, + alpha=0.25, + weight=None, + reduction='mean'): + + assert isinstance(target, (torch.LongTensor, torch.cuda.LongTensor)) + assert input.dim() == 2 + assert target.dim() == 1 + assert input.size(0) == target.size(0) + if weight is None: + weight = input.new_empty(0) + else: + assert weight.dim() == 1 + assert input.size(1) == weight.size(0) + ctx.reduction_dict = {'none': 0, 'mean': 1, 'sum': 2} + assert reduction in ctx.reduction_dict.keys() + + ctx.gamma = float(gamma) + ctx.alpha = float(alpha) + ctx.reduction = ctx.reduction_dict[reduction] + + channel_stats, _ = torch.max(input, dim=1) + input_softmax = input - channel_stats.unsqueeze(1).expand_as(input) + input_softmax.exp_() + + channel_stats = input_softmax.sum(dim=1) + input_softmax /= channel_stats.unsqueeze(1).expand_as(input) + + output = input.new_zeros(input.size(0)) + ext_module.softmax_focal_loss_forward( + input_softmax, + target, + weight, + output, + gamma=ctx.gamma, + alpha=ctx.alpha) + + if ctx.reduction == ctx.reduction_dict['mean']: + output = output.sum() / input.size(0) + elif ctx.reduction == ctx.reduction_dict['sum']: + output = output.sum() + ctx.save_for_backward(input_softmax, target, weight) + return output + + @staticmethod + def backward(ctx, grad_output): + input_softmax, target, weight = ctx.saved_tensors + buff = input_softmax.new_zeros(input_softmax.size(0)) + grad_input = input_softmax.new_zeros(input_softmax.size()) + + ext_module.softmax_focal_loss_backward( + input_softmax, + target, + weight, + buff, + grad_input, + gamma=ctx.gamma, + alpha=ctx.alpha) + + grad_input *= grad_output + if ctx.reduction == ctx.reduction_dict['mean']: + grad_input /= input_softmax.size(0) + return grad_input, None, None, None, None, None + + +softmax_focal_loss = SoftmaxFocalLossFunction.apply + + +class SoftmaxFocalLoss(nn.Module): + + def __init__(self, gamma, alpha, weight=None, reduction='mean'): + super(SoftmaxFocalLoss, self).__init__() + self.gamma = gamma + self.alpha = alpha + self.register_buffer('weight', weight) + self.reduction = reduction + + def forward(self, input, target): + return softmax_focal_loss(input, target, self.gamma, self.alpha, + self.weight, self.reduction) + + def __repr__(self): + s = self.__class__.__name__ + s += f'(gamma={self.gamma}, ' + s += f'alpha={self.alpha}, ' + s += f'reduction={self.reduction})' + return s diff --git a/mmcv/ops/iou3d.py b/mmcv/ops/iou3d.py new file mode 100644 index 0000000..8c4cc82 --- /dev/null +++ b/mmcv/ops/iou3d.py @@ -0,0 +1,89 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from ..utils import ext_loader + +ext_module = ext_loader.load_ext('_ext', [ + 'iou3d_boxes_iou_bev_forward', 'iou3d_nms_forward', + 'iou3d_nms_normal_forward' +]) + + +def boxes_iou_bev(boxes_a, boxes_b): + """Calculate boxes IoU in the Bird's Eye View. + + Args: + boxes_a (torch.Tensor): Input boxes a with shape (M, 5). + boxes_b (torch.Tensor): Input boxes b with shape (N, 5). + + Returns: + ans_iou (torch.Tensor): IoU result with shape (M, N). + """ + ans_iou = boxes_a.new_zeros( + torch.Size((boxes_a.shape[0], boxes_b.shape[0]))) + + ext_module.iou3d_boxes_iou_bev_forward(boxes_a.contiguous(), + boxes_b.contiguous(), ans_iou) + + return ans_iou + + +def nms_bev(boxes, scores, thresh, pre_max_size=None, post_max_size=None): + """NMS function GPU implementation (for BEV boxes). The overlap of two + boxes for IoU calculation is defined as the exact overlapping area of the + two boxes. In this function, one can also set ``pre_max_size`` and + ``post_max_size``. + + Args: + boxes (torch.Tensor): Input boxes with the shape of [N, 5] + ([x1, y1, x2, y2, ry]). + scores (torch.Tensor): Scores of boxes with the shape of [N]. + thresh (float): Overlap threshold of NMS. + pre_max_size (int, optional): Max size of boxes before NMS. + Default: None. + post_max_size (int, optional): Max size of boxes after NMS. + Default: None. + + Returns: + torch.Tensor: Indexes after NMS. + """ + assert boxes.size(1) == 5, 'Input boxes shape should be [N, 5]' + order = scores.sort(0, descending=True)[1] + + if pre_max_size is not None: + order = order[:pre_max_size] + boxes = boxes[order].contiguous() + + keep = torch.zeros(boxes.size(0), dtype=torch.long) + num_out = torch.zeros(size=(), dtype=torch.long) + ext_module.iou3d_nms_forward( + boxes, keep, num_out, nms_overlap_thresh=thresh) + keep = order[keep[:num_out].cuda(boxes.device)].contiguous() + if post_max_size is not None: + keep = keep[:post_max_size] + return keep + + +def nms_normal_bev(boxes, scores, thresh): + """Normal NMS function GPU implementation (for BEV boxes). The overlap of + two boxes for IoU calculation is defined as the exact overlapping area of + the two boxes WITH their yaw angle set to 0. + + Args: + boxes (torch.Tensor): Input boxes with shape (N, 5). + scores (torch.Tensor): Scores of predicted boxes with shape (N). + thresh (float): Overlap threshold of NMS. + + Returns: + torch.Tensor: Remaining indices with scores in descending order. + """ + assert boxes.shape[1] == 5, 'Input boxes shape should be [N, 5]' + order = scores.sort(0, descending=True)[1] + + boxes = boxes[order].contiguous() + + keep = torch.zeros(boxes.size(0), dtype=torch.long) + num_out = torch.zeros(size=(), dtype=torch.long) + ext_module.iou3d_nms_normal_forward( + boxes, keep, num_out, nms_overlap_thresh=thresh) + return order[keep[:num_out].cuda(boxes.device)].contiguous() diff --git a/mmcv/ops/iou3d_det/__init__.py b/mmcv/ops/iou3d_det/__init__.py new file mode 100644 index 0000000..9c35fb7 --- /dev/null +++ b/mmcv/ops/iou3d_det/__init__.py @@ -0,0 +1,3 @@ +from .iou3d_utils import boxes_iou_bev, nms_gpu, nms_normal_gpu + +__all__ = ['boxes_iou_bev', 'nms_gpu', 'nms_normal_gpu'] diff --git a/mmcv/ops/iou3d_det/iou3d_utils.py b/mmcv/ops/iou3d_det/iou3d_utils.py new file mode 100644 index 0000000..6f36019 --- /dev/null +++ b/mmcv/ops/iou3d_det/iou3d_utils.py @@ -0,0 +1,71 @@ +import torch + +from . import iou3d_cuda + + +def boxes_iou_bev(boxes_a, boxes_b): + """Calculate boxes IoU in the bird view. + + Args: + boxes_a (torch.Tensor): Input boxes a with shape (M, 5). + boxes_b (torch.Tensor): Input boxes b with shape (N, 5). + + Returns: + ans_iou (torch.Tensor): IoU result with shape (M, N). + """ + ans_iou = boxes_a.new_zeros( + torch.Size((boxes_a.shape[0], boxes_b.shape[0]))) + + iou3d_cuda.boxes_iou_bev_gpu(boxes_a.contiguous(), boxes_b.contiguous(), + ans_iou) + + return ans_iou + + +def nms_gpu(boxes, scores, thresh, pre_maxsize=None, post_max_size=None): + """Nms function with gpu implementation. + + Args: + boxes (torch.Tensor): Input boxes with the shape of [N, 5] + ([x1, y1, x2, y2, ry]). + scores (torch.Tensor): Scores of boxes with the shape of [N]. + thresh (int): Threshold. + pre_maxsize (int): Max size of boxes before nms. Default: None. + post_maxsize (int): Max size of boxes after nms. Default: None. + + Returns: + torch.Tensor: Indexes after nms. + """ + order = scores.sort(0, descending=True)[1] + + if pre_maxsize is not None: + order = order[:pre_maxsize] + boxes = boxes[order].contiguous() + + keep = torch.zeros(boxes.size(0), dtype=torch.long) + num_out = iou3d_cuda.nms_gpu(boxes, keep, thresh, boxes.device.index) + keep = order[keep[:num_out].cuda(boxes.device)].contiguous() + if post_max_size is not None: + keep = keep[:post_max_size] + return keep + + +def nms_normal_gpu(boxes, scores, thresh): + """Normal non maximum suppression on GPU. + + Args: + boxes (torch.Tensor): Input boxes with shape (N, 5). + scores (torch.Tensor): Scores of predicted boxes with shape (N). + thresh (torch.Tensor): Threshold of non maximum suppression. + + Returns: + torch.Tensor: Remaining indices with scores in descending order. + """ + order = scores.sort(0, descending=True)[1] + + boxes = boxes[order].contiguous() + + keep = torch.zeros(boxes.size(0), dtype=torch.long) + num_out = iou3d_cuda.nms_normal_gpu(boxes, keep, thresh, + boxes.device.index) + return order[keep[:num_out].cuda(boxes.device)].contiguous() diff --git a/mmcv/ops/iou3d_det/src/iou3d.cpp b/mmcv/ops/iou3d_det/src/iou3d.cpp new file mode 100644 index 0000000..25a5cd9 --- /dev/null +++ b/mmcv/ops/iou3d_det/src/iou3d.cpp @@ -0,0 +1,210 @@ +// Modified from +// https://github.com/open-mmlab/OpenPCDet/blob/master/pcdet/ops/iou3d_nms/src/iou3d_nms.cpp + +/* +3D IoU Calculation and Rotated NMS(modified from 2D NMS written by others) +Written by Shaoshuai Shi +All Rights Reserved 2019-2020. +*/ + +#include +#include +#include +#include + +#include +#include + +#define CHECK_CUDA(x) \ + TORCH_CHECK(x.device().is_cuda(), #x, " must be a CUDAtensor ") +#define CHECK_CONTIGUOUS(x) \ + TORCH_CHECK(x.is_contiguous(), #x, " must be contiguous ") +#define CHECK_INPUT(x) \ + CHECK_CUDA(x); \ + CHECK_CONTIGUOUS(x) + +#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) + +#define CHECK_ERROR(ans) \ + { gpuAssert((ans), __FILE__, __LINE__); } +inline void gpuAssert(cudaError_t code, const char *file, int line, + bool abort = true) { + if (code != cudaSuccess) { + fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, + line); + if (abort) exit(code); + } +} + +const int THREADS_PER_BLOCK_NMS = sizeof(unsigned long long) * 8; + +void boxesoverlapLauncher(const int num_a, const float *boxes_a, + const int num_b, const float *boxes_b, + float *ans_overlap); +void boxesioubevLauncher(const int num_a, const float *boxes_a, const int num_b, + const float *boxes_b, float *ans_iou); +void nmsLauncher(const float *boxes, unsigned long long *mask, int boxes_num, + float nms_overlap_thresh); +void nmsNormalLauncher(const float *boxes, unsigned long long *mask, + int boxes_num, float nms_overlap_thresh); + +int boxes_overlap_bev_gpu(at::Tensor boxes_a, at::Tensor boxes_b, + at::Tensor ans_overlap) { + // params boxes_a: (N, 5) [x1, y1, x2, y2, ry] + // params boxes_b: (M, 5) + // params ans_overlap: (N, M) + + CHECK_INPUT(boxes_a); + CHECK_INPUT(boxes_b); + CHECK_INPUT(ans_overlap); + + int num_a = boxes_a.size(0); + int num_b = boxes_b.size(0); + + const float *boxes_a_data = boxes_a.data_ptr(); + const float *boxes_b_data = boxes_b.data_ptr(); + float *ans_overlap_data = ans_overlap.data_ptr(); + + boxesoverlapLauncher(num_a, boxes_a_data, num_b, boxes_b_data, + ans_overlap_data); + + return 1; +} + +int boxes_iou_bev_gpu(at::Tensor boxes_a, at::Tensor boxes_b, + at::Tensor ans_iou) { + // params boxes_a: (N, 5) [x1, y1, x2, y2, ry] + // params boxes_b: (M, 5) + // params ans_overlap: (N, M) + + CHECK_INPUT(boxes_a); + CHECK_INPUT(boxes_b); + CHECK_INPUT(ans_iou); + + int num_a = boxes_a.size(0); + int num_b = boxes_b.size(0); + + const float *boxes_a_data = boxes_a.data_ptr(); + const float *boxes_b_data = boxes_b.data_ptr(); + float *ans_iou_data = ans_iou.data_ptr(); + + boxesioubevLauncher(num_a, boxes_a_data, num_b, boxes_b_data, ans_iou_data); + + return 1; +} + +int nms_gpu(at::Tensor boxes, at::Tensor keep, + float nms_overlap_thresh, int device_id) { + // params boxes: (N, 5) [x1, y1, x2, y2, ry] + // params keep: (N) + + CHECK_INPUT(boxes); + CHECK_CONTIGUOUS(keep); + cudaSetDevice(device_id); + + int boxes_num = boxes.size(0); + const float *boxes_data = boxes.data_ptr(); + int64_t *keep_data = keep.data_ptr(); + + const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); + + unsigned long long *mask_data = NULL; + CHECK_ERROR(cudaMalloc((void **)&mask_data, + boxes_num * col_blocks * sizeof(unsigned long long))); + nmsLauncher(boxes_data, mask_data, boxes_num, nms_overlap_thresh); + + // unsigned long long mask_cpu[boxes_num * col_blocks]; + // unsigned long long *mask_cpu = new unsigned long long [boxes_num * + // col_blocks]; + std::vector mask_cpu(boxes_num * col_blocks); + + // printf("boxes_num=%d, col_blocks=%d\n", boxes_num, col_blocks); + CHECK_ERROR(cudaMemcpy(&mask_cpu[0], mask_data, + boxes_num * col_blocks * sizeof(unsigned long long), + cudaMemcpyDeviceToHost)); + + cudaFree(mask_data); + + unsigned long long *remv_cpu = new unsigned long long[col_blocks](); + + int num_to_keep = 0; + + for (int i = 0; i < boxes_num; i++) { + int nblock = i / THREADS_PER_BLOCK_NMS; + int inblock = i % THREADS_PER_BLOCK_NMS; + + if (!(remv_cpu[nblock] & (1ULL << inblock))) { + keep_data[num_to_keep++] = i; + unsigned long long *p = &mask_cpu[0] + i * col_blocks; + for (int j = nblock; j < col_blocks; j++) { + remv_cpu[j] |= p[j]; + } + } + } + delete[] remv_cpu; + if (cudaSuccess != cudaGetLastError()) printf("Error!\n"); + + return num_to_keep; +} + +int nms_normal_gpu(at::Tensor boxes, at::Tensor keep, + float nms_overlap_thresh, int device_id) { + // params boxes: (N, 5) [x1, y1, x2, y2, ry] + // params keep: (N) + + CHECK_INPUT(boxes); + CHECK_CONTIGUOUS(keep); + cudaSetDevice(device_id); + + int boxes_num = boxes.size(0); + const float *boxes_data = boxes.data_ptr(); + int64_t *keep_data = keep.data_ptr(); + + const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); + + unsigned long long *mask_data = NULL; + CHECK_ERROR(cudaMalloc((void **)&mask_data, + boxes_num * col_blocks * sizeof(unsigned long long))); + nmsNormalLauncher(boxes_data, mask_data, boxes_num, nms_overlap_thresh); + + // unsigned long long mask_cpu[boxes_num * col_blocks]; + // unsigned long long *mask_cpu = new unsigned long long [boxes_num * + // col_blocks]; + std::vector mask_cpu(boxes_num * col_blocks); + + // printf("boxes_num=%d, col_blocks=%d\n", boxes_num, col_blocks); + CHECK_ERROR(cudaMemcpy(&mask_cpu[0], mask_data, + boxes_num * col_blocks * sizeof(unsigned long long), + cudaMemcpyDeviceToHost)); + + cudaFree(mask_data); + + unsigned long long *remv_cpu = new unsigned long long[col_blocks](); + + int num_to_keep = 0; + + for (int i = 0; i < boxes_num; i++) { + int nblock = i / THREADS_PER_BLOCK_NMS; + int inblock = i % THREADS_PER_BLOCK_NMS; + + if (!(remv_cpu[nblock] & (1ULL << inblock))) { + keep_data[num_to_keep++] = i; + unsigned long long *p = &mask_cpu[0] + i * col_blocks; + for (int j = nblock; j < col_blocks; j++) { + remv_cpu[j] |= p[j]; + } + } + } + delete[] remv_cpu; + if (cudaSuccess != cudaGetLastError()) printf("Error!\n"); + + return num_to_keep; +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("boxes_overlap_bev_gpu", &boxes_overlap_bev_gpu, + "oriented boxes overlap"); + m.def("boxes_iou_bev_gpu", &boxes_iou_bev_gpu, "oriented boxes iou"); + m.def("nms_gpu", &nms_gpu, "oriented nms gpu"); + m.def("nms_normal_gpu", &nms_normal_gpu, "nms gpu"); +} diff --git a/mmcv/ops/iou3d_det/src/iou3d_kernel.cu b/mmcv/ops/iou3d_det/src/iou3d_kernel.cu new file mode 100644 index 0000000..861aea3 --- /dev/null +++ b/mmcv/ops/iou3d_det/src/iou3d_kernel.cu @@ -0,0 +1,439 @@ +// Modified from +// https://github.com/open-mmlab/OpenPCDet/blob/master/pcdet/ops/iou3d_nms/src/iou3d_nms_kernel.cu + +/* +3D IoU Calculation and Rotated NMS(modified from 2D NMS written by others) +Written by Shaoshuai Shi +All Rights Reserved 2019-2020. +*/ + +#include +#define THREADS_PER_BLOCK 16 +#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) + +//#define DEBUG +const int THREADS_PER_BLOCK_NMS = sizeof(unsigned long long) * 8; +__device__ const float EPS = 1e-8; +struct Point { + float x, y; + __device__ Point() {} + __device__ Point(double _x, double _y) { x = _x, y = _y; } + + __device__ void set(float _x, float _y) { + x = _x; + y = _y; + } + + __device__ Point operator+(const Point &b) const { + return Point(x + b.x, y + b.y); + } + + __device__ Point operator-(const Point &b) const { + return Point(x - b.x, y - b.y); + } +}; + +__device__ inline float cross(const Point &a, const Point &b) { + return a.x * b.y - a.y * b.x; +} + +__device__ inline float cross(const Point &p1, const Point &p2, + const Point &p0) { + return (p1.x - p0.x) * (p2.y - p0.y) - (p2.x - p0.x) * (p1.y - p0.y); +} + +__device__ int check_rect_cross(const Point &p1, const Point &p2, + const Point &q1, const Point &q2) { + int ret = min(p1.x, p2.x) <= max(q1.x, q2.x) && + min(q1.x, q2.x) <= max(p1.x, p2.x) && + min(p1.y, p2.y) <= max(q1.y, q2.y) && + min(q1.y, q2.y) <= max(p1.y, p2.y); + return ret; +} + +__device__ inline int check_in_box2d(const float *box, const Point &p) { + // params: box (5) [x1, y1, x2, y2, angle] + const float MARGIN = 1e-5; + + float center_x = (box[0] + box[2]) / 2; + float center_y = (box[1] + box[3]) / 2; + float angle_cos = cos(-box[4]), + angle_sin = + sin(-box[4]); // rotate the point in the opposite direction of box + float rot_x = + (p.x - center_x) * angle_cos + (p.y - center_y) * angle_sin + center_x; + float rot_y = + -(p.x - center_x) * angle_sin + (p.y - center_y) * angle_cos + center_y; +#ifdef DEBUG + printf("box: (%.3f, %.3f, %.3f, %.3f, %.3f)\n", box[0], box[1], box[2], + box[3], box[4]); + printf( + "center: (%.3f, %.3f), cossin(%.3f, %.3f), src(%.3f, %.3f), rot(%.3f, " + "%.3f)\n", + center_x, center_y, angle_cos, angle_sin, p.x, p.y, rot_x, rot_y); +#endif + return (rot_x > box[0] - MARGIN && rot_x < box[2] + MARGIN && + rot_y > box[1] - MARGIN && rot_y < box[3] + MARGIN); +} + +__device__ inline int intersection(const Point &p1, const Point &p0, + const Point &q1, const Point &q0, + Point &ans) { + // fast exclusion + if (check_rect_cross(p0, p1, q0, q1) == 0) return 0; + + // check cross standing + float s1 = cross(q0, p1, p0); + float s2 = cross(p1, q1, p0); + float s3 = cross(p0, q1, q0); + float s4 = cross(q1, p1, q0); + + if (!(s1 * s2 > 0 && s3 * s4 > 0)) return 0; + + // calculate intersection of two lines + float s5 = cross(q1, p1, p0); + if (fabs(s5 - s1) > EPS) { + ans.x = (s5 * q0.x - s1 * q1.x) / (s5 - s1); + ans.y = (s5 * q0.y - s1 * q1.y) / (s5 - s1); + + } else { + float a0 = p0.y - p1.y, b0 = p1.x - p0.x, c0 = p0.x * p1.y - p1.x * p0.y; + float a1 = q0.y - q1.y, b1 = q1.x - q0.x, c1 = q0.x * q1.y - q1.x * q0.y; + float D = a0 * b1 - a1 * b0; + + ans.x = (b0 * c1 - b1 * c0) / D; + ans.y = (a1 * c0 - a0 * c1) / D; + } + + return 1; +} + +__device__ inline void rotate_around_center(const Point ¢er, + const float angle_cos, + const float angle_sin, Point &p) { + float new_x = + (p.x - center.x) * angle_cos + (p.y - center.y) * angle_sin + center.x; + float new_y = + -(p.x - center.x) * angle_sin + (p.y - center.y) * angle_cos + center.y; + p.set(new_x, new_y); +} + +__device__ inline int point_cmp(const Point &a, const Point &b, + const Point ¢er) { + return atan2(a.y - center.y, a.x - center.x) > + atan2(b.y - center.y, b.x - center.x); +} + +__device__ inline float box_overlap(const float *box_a, const float *box_b) { + // params: box_a (5) [x1, y1, x2, y2, angle] + // params: box_b (5) [x1, y1, x2, y2, angle] + + float a_x1 = box_a[0], a_y1 = box_a[1], a_x2 = box_a[2], a_y2 = box_a[3], + a_angle = box_a[4]; + float b_x1 = box_b[0], b_y1 = box_b[1], b_x2 = box_b[2], b_y2 = box_b[3], + b_angle = box_b[4]; + + Point center_a((a_x1 + a_x2) / 2, (a_y1 + a_y2) / 2); + Point center_b((b_x1 + b_x2) / 2, (b_y1 + b_y2) / 2); +#ifdef DEBUG + printf( + "a: (%.3f, %.3f, %.3f, %.3f, %.3f), b: (%.3f, %.3f, %.3f, %.3f, %.3f)\n", + a_x1, a_y1, a_x2, a_y2, a_angle, b_x1, b_y1, b_x2, b_y2, b_angle); + printf("center a: (%.3f, %.3f), b: (%.3f, %.3f)\n", center_a.x, center_a.y, + center_b.x, center_b.y); +#endif + + Point box_a_corners[5]; + box_a_corners[0].set(a_x1, a_y1); + box_a_corners[1].set(a_x2, a_y1); + box_a_corners[2].set(a_x2, a_y2); + box_a_corners[3].set(a_x1, a_y2); + + Point box_b_corners[5]; + box_b_corners[0].set(b_x1, b_y1); + box_b_corners[1].set(b_x2, b_y1); + box_b_corners[2].set(b_x2, b_y2); + box_b_corners[3].set(b_x1, b_y2); + + // get oriented corners + float a_angle_cos = cos(a_angle), a_angle_sin = sin(a_angle); + float b_angle_cos = cos(b_angle), b_angle_sin = sin(b_angle); + + for (int k = 0; k < 4; k++) { +#ifdef DEBUG + printf("before corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k, + box_a_corners[k].x, box_a_corners[k].y, box_b_corners[k].x, + box_b_corners[k].y); +#endif + rotate_around_center(center_a, a_angle_cos, a_angle_sin, box_a_corners[k]); + rotate_around_center(center_b, b_angle_cos, b_angle_sin, box_b_corners[k]); +#ifdef DEBUG + printf("corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k, box_a_corners[k].x, + box_a_corners[k].y, box_b_corners[k].x, box_b_corners[k].y); +#endif + } + + box_a_corners[4] = box_a_corners[0]; + box_b_corners[4] = box_b_corners[0]; + + // get intersection of lines + Point cross_points[16]; + Point poly_center; + int cnt = 0, flag = 0; + + poly_center.set(0, 0); + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + flag = intersection(box_a_corners[i + 1], box_a_corners[i], + box_b_corners[j + 1], box_b_corners[j], + cross_points[cnt]); + if (flag) { + poly_center = poly_center + cross_points[cnt]; + cnt++; + } + } + } + + // check corners + for (int k = 0; k < 4; k++) { + if (check_in_box2d(box_a, box_b_corners[k])) { + poly_center = poly_center + box_b_corners[k]; + cross_points[cnt] = box_b_corners[k]; + cnt++; + } + if (check_in_box2d(box_b, box_a_corners[k])) { + poly_center = poly_center + box_a_corners[k]; + cross_points[cnt] = box_a_corners[k]; + cnt++; + } + } + + poly_center.x /= cnt; + poly_center.y /= cnt; + + // sort the points of polygon + Point temp; + for (int j = 0; j < cnt - 1; j++) { + for (int i = 0; i < cnt - j - 1; i++) { + if (point_cmp(cross_points[i], cross_points[i + 1], poly_center)) { + temp = cross_points[i]; + cross_points[i] = cross_points[i + 1]; + cross_points[i + 1] = temp; + } + } + } + +#ifdef DEBUG + printf("cnt=%d\n", cnt); + for (int i = 0; i < cnt; i++) { + printf("All cross point %d: (%.3f, %.3f)\n", i, cross_points[i].x, + cross_points[i].y); + } +#endif + + // get the overlap areas + float area = 0; + for (int k = 0; k < cnt - 1; k++) { + area += cross(cross_points[k] - cross_points[0], + cross_points[k + 1] - cross_points[0]); + } + + return fabs(area) / 2.0; +} + +__device__ inline float iou_bev(const float *box_a, const float *box_b) { + // params: box_a (5) [x1, y1, x2, y2, angle] + // params: box_b (5) [x1, y1, x2, y2, angle] + float sa = (box_a[2] - box_a[0]) * (box_a[3] - box_a[1]); + float sb = (box_b[2] - box_b[0]) * (box_b[3] - box_b[1]); + float s_overlap = box_overlap(box_a, box_b); + return s_overlap / fmaxf(sa + sb - s_overlap, EPS); +} + +__global__ void boxes_overlap_kernel(const int num_a, const float *boxes_a, + const int num_b, const float *boxes_b, + float *ans_overlap) { + const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y; + const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; + + if (a_idx >= num_a || b_idx >= num_b) { + return; + } + const float *cur_box_a = boxes_a + a_idx * 5; + const float *cur_box_b = boxes_b + b_idx * 5; + float s_overlap = box_overlap(cur_box_a, cur_box_b); + ans_overlap[a_idx * num_b + b_idx] = s_overlap; +} + +__global__ void boxes_iou_bev_kernel(const int num_a, const float *boxes_a, + const int num_b, const float *boxes_b, + float *ans_iou) { + const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y; + const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; + + if (a_idx >= num_a || b_idx >= num_b) { + return; + } + + const float *cur_box_a = boxes_a + a_idx * 5; + const float *cur_box_b = boxes_b + b_idx * 5; + float cur_iou_bev = iou_bev(cur_box_a, cur_box_b); + ans_iou[a_idx * num_b + b_idx] = cur_iou_bev; +} + +__global__ void nms_kernel(const int boxes_num, const float nms_overlap_thresh, + const float *boxes, unsigned long long *mask) { + // params: boxes (N, 5) [x1, y1, x2, y2, ry] + // params: mask (N, N/THREADS_PER_BLOCK_NMS) + + const int row_start = blockIdx.y; + const int col_start = blockIdx.x; + + // if (row_start > col_start) return; + + const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS, + THREADS_PER_BLOCK_NMS); + const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS, + THREADS_PER_BLOCK_NMS); + + __shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 5]; + + if (threadIdx.x < col_size) { + block_boxes[threadIdx.x * 5 + 0] = + boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 0]; + block_boxes[threadIdx.x * 5 + 1] = + boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 1]; + block_boxes[threadIdx.x * 5 + 2] = + boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 2]; + block_boxes[threadIdx.x * 5 + 3] = + boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 3]; + block_boxes[threadIdx.x * 5 + 4] = + boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 4]; + } + __syncthreads(); + + if (threadIdx.x < row_size) { + const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x; + const float *cur_box = boxes + cur_box_idx * 5; + + int i = 0; + unsigned long long t = 0; + int start = 0; + if (row_start == col_start) { + start = threadIdx.x + 1; + } + for (i = start; i < col_size; i++) { + if (iou_bev(cur_box, block_boxes + i * 5) > nms_overlap_thresh) { + t |= 1ULL << i; + } + } + const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); + mask[cur_box_idx * col_blocks + col_start] = t; + } +} + +__device__ inline float iou_normal(float const *const a, float const *const b) { + float left = fmaxf(a[0], b[0]), right = fminf(a[2], b[2]); + float top = fmaxf(a[1], b[1]), bottom = fminf(a[3], b[3]); + float width = fmaxf(right - left, 0.f), height = fmaxf(bottom - top, 0.f); + float interS = width * height; + float Sa = (a[2] - a[0]) * (a[3] - a[1]); + float Sb = (b[2] - b[0]) * (b[3] - b[1]); + return interS / fmaxf(Sa + Sb - interS, EPS); +} + +__global__ void nms_normal_kernel(const int boxes_num, + const float nms_overlap_thresh, + const float *boxes, + unsigned long long *mask) { + // params: boxes (N, 5) [x1, y1, x2, y2, ry] + // params: mask (N, N/THREADS_PER_BLOCK_NMS) + + const int row_start = blockIdx.y; + const int col_start = blockIdx.x; + + // if (row_start > col_start) return; + + const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS, + THREADS_PER_BLOCK_NMS); + const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS, + THREADS_PER_BLOCK_NMS); + + __shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 5]; + + if (threadIdx.x < col_size) { + block_boxes[threadIdx.x * 5 + 0] = + boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 0]; + block_boxes[threadIdx.x * 5 + 1] = + boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 1]; + block_boxes[threadIdx.x * 5 + 2] = + boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 2]; + block_boxes[threadIdx.x * 5 + 3] = + boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 3]; + block_boxes[threadIdx.x * 5 + 4] = + boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 4]; + } + __syncthreads(); + + if (threadIdx.x < row_size) { + const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x; + const float *cur_box = boxes + cur_box_idx * 5; + + int i = 0; + unsigned long long t = 0; + int start = 0; + if (row_start == col_start) { + start = threadIdx.x + 1; + } + for (i = start; i < col_size; i++) { + if (iou_normal(cur_box, block_boxes + i * 5) > nms_overlap_thresh) { + t |= 1ULL << i; + } + } + const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); + mask[cur_box_idx * col_blocks + col_start] = t; + } +} + +void boxesoverlapLauncher(const int num_a, const float *boxes_a, + const int num_b, const float *boxes_b, + float *ans_overlap) { + dim3 blocks( + DIVUP(num_b, THREADS_PER_BLOCK), + DIVUP(num_a, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) + dim3 threads(THREADS_PER_BLOCK, THREADS_PER_BLOCK); + + boxes_overlap_kernel<<>>(num_a, boxes_a, num_b, boxes_b, + ans_overlap); +#ifdef DEBUG + cudaDeviceSynchronize(); // for using printf in kernel function +#endif +} + +void boxesioubevLauncher(const int num_a, const float *boxes_a, const int num_b, + const float *boxes_b, float *ans_iou) { + dim3 blocks( + DIVUP(num_b, THREADS_PER_BLOCK), + DIVUP(num_a, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) + dim3 threads(THREADS_PER_BLOCK, THREADS_PER_BLOCK); + + boxes_iou_bev_kernel<<>>(num_a, boxes_a, num_b, boxes_b, + ans_iou); +} + +void nmsLauncher(const float *boxes, unsigned long long *mask, int boxes_num, + float nms_overlap_thresh) { + dim3 blocks(DIVUP(boxes_num, THREADS_PER_BLOCK_NMS), + DIVUP(boxes_num, THREADS_PER_BLOCK_NMS)); + dim3 threads(THREADS_PER_BLOCK_NMS); + nms_kernel<<>>(boxes_num, nms_overlap_thresh, boxes, mask); +} + +void nmsNormalLauncher(const float *boxes, unsigned long long *mask, + int boxes_num, float nms_overlap_thresh) { + dim3 blocks(DIVUP(boxes_num, THREADS_PER_BLOCK_NMS), + DIVUP(boxes_num, THREADS_PER_BLOCK_NMS)); + dim3 threads(THREADS_PER_BLOCK_NMS); + nms_normal_kernel<<>>(boxes_num, nms_overlap_thresh, boxes, + mask); +} diff --git a/mmcv/ops/masked_conv.py b/mmcv/ops/masked_conv.py new file mode 100644 index 0000000..cd514cc --- /dev/null +++ b/mmcv/ops/masked_conv.py @@ -0,0 +1,111 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +import torch +import torch.nn as nn +from torch.autograd import Function +from torch.autograd.function import once_differentiable +from torch.nn.modules.utils import _pair + +from ..utils import ext_loader + +ext_module = ext_loader.load_ext( + '_ext', ['masked_im2col_forward', 'masked_col2im_forward']) + + +class MaskedConv2dFunction(Function): + + @staticmethod + def symbolic(g, features, mask, weight, bias, padding, stride): + return g.op( + 'mmcv::MMCVMaskedConv2d', + features, + mask, + weight, + bias, + padding_i=padding, + stride_i=stride) + + @staticmethod + def forward(ctx, features, mask, weight, bias, padding=0, stride=1): + assert mask.dim() == 3 and mask.size(0) == 1 + assert features.dim() == 4 and features.size(0) == 1 + assert features.size()[2:] == mask.size()[1:] + pad_h, pad_w = _pair(padding) + stride_h, stride_w = _pair(stride) + if stride_h != 1 or stride_w != 1: + raise ValueError( + 'Stride could not only be 1 in masked_conv2d currently.') + out_channel, in_channel, kernel_h, kernel_w = weight.size() + + batch_size = features.size(0) + out_h = int( + math.floor((features.size(2) + 2 * pad_h - + (kernel_h - 1) - 1) / stride_h + 1)) + out_w = int( + math.floor((features.size(3) + 2 * pad_w - + (kernel_h - 1) - 1) / stride_w + 1)) + mask_inds = torch.nonzero(mask[0] > 0, as_tuple=False) + output = features.new_zeros(batch_size, out_channel, out_h, out_w) + if mask_inds.numel() > 0: + mask_h_idx = mask_inds[:, 0].contiguous() + mask_w_idx = mask_inds[:, 1].contiguous() + data_col = features.new_zeros(in_channel * kernel_h * kernel_w, + mask_inds.size(0)) + ext_module.masked_im2col_forward( + features, + mask_h_idx, + mask_w_idx, + data_col, + kernel_h=kernel_h, + kernel_w=kernel_w, + pad_h=pad_h, + pad_w=pad_w) + + masked_output = torch.addmm(1, bias[:, None], 1, + weight.view(out_channel, -1), data_col) + ext_module.masked_col2im_forward( + masked_output, + mask_h_idx, + mask_w_idx, + output, + height=out_h, + width=out_w, + channels=out_channel) + return output + + @staticmethod + @once_differentiable + def backward(ctx, grad_output): + return (None, ) * 5 + + +masked_conv2d = MaskedConv2dFunction.apply + + +class MaskedConv2d(nn.Conv2d): + """A MaskedConv2d which inherits the official Conv2d. + + The masked forward doesn't implement the backward function and only + supports the stride parameter to be 1 currently. + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + bias=True): + super(MaskedConv2d, + self).__init__(in_channels, out_channels, kernel_size, stride, + padding, dilation, groups, bias) + + def forward(self, input, mask=None): + if mask is None: # fallback to the normal Conv2d + return super(MaskedConv2d, self).forward(input) + else: + return masked_conv2d(input, mask, self.weight, self.bias, + self.padding) diff --git a/mmcv/ops/modulated_deform_conv.py b/mmcv/ops/modulated_deform_conv.py new file mode 100644 index 0000000..2681bc8 --- /dev/null +++ b/mmcv/ops/modulated_deform_conv.py @@ -0,0 +1,282 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +import torch +import torch.nn as nn +from torch.autograd import Function +from torch.autograd.function import once_differentiable +from torch.nn.modules.utils import _pair, _single + +from mmcv.utils import deprecated_api_warning +from ..models import CONV_LAYERS +from ..utils import ext_loader, print_log + +ext_module = ext_loader.load_ext( + '_ext', + ['modulated_deform_conv_forward', 'modulated_deform_conv_backward']) + + +class ModulatedDeformConv2dFunction(Function): + + @staticmethod + def symbolic(g, input, offset, mask, weight, bias, stride, padding, + dilation, groups, deform_groups): + input_tensors = [input, offset, mask, weight] + if bias is not None: + input_tensors.append(bias) + return g.op( + 'mmcv::MMCVModulatedDeformConv2d', + *input_tensors, + stride_i=stride, + padding_i=padding, + dilation_i=dilation, + groups_i=groups, + deform_groups_i=deform_groups) + + @staticmethod + def forward(ctx, + input, + offset, + mask, + weight, + bias=None, + stride=1, + padding=0, + dilation=1, + groups=1, + deform_groups=1): + if input is not None and input.dim() != 4: + raise ValueError( + f'Expected 4D tensor as input, got {input.dim()}D tensor \ + instead.') + ctx.stride = _pair(stride) + ctx.padding = _pair(padding) + ctx.dilation = _pair(dilation) + ctx.groups = groups + ctx.deform_groups = deform_groups + ctx.with_bias = bias is not None + if not ctx.with_bias: + bias = input.new_empty(0) # fake tensor + # When pytorch version >= 1.6.0, amp is adopted for fp16 mode; + # amp won't cast the type of model (float32), but "offset" is cast + # to float16 by nn.Conv2d automatically, leading to the type + # mismatch with input (when it is float32) or weight. + # The flag for whether to use fp16 or amp is the type of "offset", + # we cast weight and input to temporarily support fp16 and amp + # whatever the pytorch version is. + input = input.type_as(offset) + weight = weight.type_as(input) + ctx.save_for_backward(input, offset, mask, weight, bias) + output = input.new_empty( + ModulatedDeformConv2dFunction._output_size(ctx, input, weight)) + ctx._bufs = [input.new_empty(0), input.new_empty(0)] + ext_module.modulated_deform_conv_forward( + input, + weight, + bias, + ctx._bufs[0], + offset, + mask, + output, + ctx._bufs[1], + kernel_h=weight.size(2), + kernel_w=weight.size(3), + stride_h=ctx.stride[0], + stride_w=ctx.stride[1], + pad_h=ctx.padding[0], + pad_w=ctx.padding[1], + dilation_h=ctx.dilation[0], + dilation_w=ctx.dilation[1], + group=ctx.groups, + deformable_group=ctx.deform_groups, + with_bias=ctx.with_bias) + return output + + @staticmethod + @once_differentiable + def backward(ctx, grad_output): + input, offset, mask, weight, bias = ctx.saved_tensors + grad_input = torch.zeros_like(input) + grad_offset = torch.zeros_like(offset) + grad_mask = torch.zeros_like(mask) + grad_weight = torch.zeros_like(weight) + grad_bias = torch.zeros_like(bias) + grad_output = grad_output.contiguous() + ext_module.modulated_deform_conv_backward( + input, + weight, + bias, + ctx._bufs[0], + offset, + mask, + ctx._bufs[1], + grad_input, + grad_weight, + grad_bias, + grad_offset, + grad_mask, + grad_output, + kernel_h=weight.size(2), + kernel_w=weight.size(3), + stride_h=ctx.stride[0], + stride_w=ctx.stride[1], + pad_h=ctx.padding[0], + pad_w=ctx.padding[1], + dilation_h=ctx.dilation[0], + dilation_w=ctx.dilation[1], + group=ctx.groups, + deformable_group=ctx.deform_groups, + with_bias=ctx.with_bias) + if not ctx.with_bias: + grad_bias = None + + return (grad_input, grad_offset, grad_mask, grad_weight, grad_bias, + None, None, None, None, None) + + @staticmethod + def _output_size(ctx, input, weight): + channels = weight.size(0) + output_size = (input.size(0), channels) + for d in range(input.dim() - 2): + in_size = input.size(d + 2) + pad = ctx.padding[d] + kernel = ctx.dilation[d] * (weight.size(d + 2) - 1) + 1 + stride_ = ctx.stride[d] + output_size += ((in_size + (2 * pad) - kernel) // stride_ + 1, ) + if not all(map(lambda s: s > 0, output_size)): + raise ValueError( + 'convolution input is too small (output would be ' + + 'x'.join(map(str, output_size)) + ')') + return output_size + + +modulated_deform_conv2d = ModulatedDeformConv2dFunction.apply + + +class ModulatedDeformConv2d(nn.Module): + + @deprecated_api_warning({'deformable_groups': 'deform_groups'}, + cls_name='ModulatedDeformConv2d') + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + deform_groups=1, + bias=True): + super(ModulatedDeformConv2d, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = _pair(kernel_size) + self.stride = _pair(stride) + self.padding = _pair(padding) + self.dilation = _pair(dilation) + self.groups = groups + self.deform_groups = deform_groups + # enable compatibility with nn.Conv2d + self.transposed = False + self.output_padding = _single(0) + + self.weight = nn.Parameter( + torch.Tensor(out_channels, in_channels // groups, + *self.kernel_size)) + if bias: + self.bias = nn.Parameter(torch.Tensor(out_channels)) + else: + self.register_parameter('bias', None) + self.init_weights() + + def init_weights(self): + n = self.in_channels + for k in self.kernel_size: + n *= k + stdv = 1. / math.sqrt(n) + self.weight.data.uniform_(-stdv, stdv) + if self.bias is not None: + self.bias.data.zero_() + + def forward(self, x, offset, mask): + return modulated_deform_conv2d(x, offset, mask, self.weight, self.bias, + self.stride, self.padding, + self.dilation, self.groups, + self.deform_groups) + + +@CONV_LAYERS.register_module('DCNv2') +class ModulatedDeformConv2dPack(ModulatedDeformConv2d): + """A ModulatedDeformable Conv Encapsulation that acts as normal Conv + layers. + + Args: + in_channels (int): Same as nn.Conv2d. + out_channels (int): Same as nn.Conv2d. + kernel_size (int or tuple[int]): Same as nn.Conv2d. + stride (int): Same as nn.Conv2d, while tuple is not supported. + padding (int): Same as nn.Conv2d, while tuple is not supported. + dilation (int): Same as nn.Conv2d, while tuple is not supported. + groups (int): Same as nn.Conv2d. + bias (bool or str): If specified as `auto`, it will be decided by the + norm_cfg. Bias will be set as True if norm_cfg is None, otherwise + False. + """ + + _version = 2 + + def __init__(self, *args, **kwargs): + super(ModulatedDeformConv2dPack, self).__init__(*args, **kwargs) + self.conv_offset = nn.Conv2d( + self.in_channels, + self.deform_groups * 3 * self.kernel_size[0] * self.kernel_size[1], + kernel_size=self.kernel_size, + stride=self.stride, + padding=self.padding, + dilation=self.dilation, + bias=True) + self.init_weights() + + def init_weights(self): + super(ModulatedDeformConv2dPack, self).init_weights() + if hasattr(self, 'conv_offset'): + self.conv_offset.weight.data.zero_() + self.conv_offset.bias.data.zero_() + + def forward(self, x): + out = self.conv_offset(x) + o1, o2, mask = torch.chunk(out, 3, dim=1) + offset = torch.cat((o1, o2), dim=1) + mask = torch.sigmoid(mask) + return modulated_deform_conv2d(x, offset, mask, self.weight, self.bias, + self.stride, self.padding, + self.dilation, self.groups, + self.deform_groups) + + def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs): + version = local_metadata.get('version', None) + + if version is None or version < 2: + # the key is different in early versions + # In version < 2, ModulatedDeformConvPack + # loads previous benchmark models. + if (prefix + 'conv_offset.weight' not in state_dict + and prefix[:-1] + '_offset.weight' in state_dict): + state_dict[prefix + 'conv_offset.weight'] = state_dict.pop( + prefix[:-1] + '_offset.weight') + if (prefix + 'conv_offset.bias' not in state_dict + and prefix[:-1] + '_offset.bias' in state_dict): + state_dict[prefix + + 'conv_offset.bias'] = state_dict.pop(prefix[:-1] + + '_offset.bias') + + if version is not None and version > 1: + print_log( + f'ModulatedDeformConvPack {prefix.rstrip(".")} is upgraded to ' + 'version 2.', + logger='root') + + super()._load_from_state_dict(state_dict, prefix, local_metadata, + strict, missing_keys, unexpected_keys, + error_msgs) diff --git a/mmcv/ops/multi_scale_deform_attn.py b/mmcv/ops/multi_scale_deform_attn.py new file mode 100644 index 0000000..527ce70 --- /dev/null +++ b/mmcv/ops/multi_scale_deform_attn.py @@ -0,0 +1,358 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +import warnings + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd.function import Function, once_differentiable + +from mmcv import deprecated_api_warning +from mmcv.models.utils.weight_init import constant_init, xavier_init +from mmcv.models.bricks.registry import ATTENTION +from mmcv.models.backbones.base_module import BaseModule +from ..utils import ext_loader + +ext_module = ext_loader.load_ext( + '_ext', ['ms_deform_attn_backward', 'ms_deform_attn_forward']) + + +class MultiScaleDeformableAttnFunction(Function): + + @staticmethod + def forward(ctx, value, value_spatial_shapes, value_level_start_index, + sampling_locations, attention_weights, im2col_step): + """GPU version of multi-scale deformable attention. + + Args: + value (Tensor): The value has shape + (bs, num_keys, mum_heads, embed_dims//num_heads) + value_spatial_shapes (Tensor): Spatial shape of + each feature map, has shape (num_levels, 2), + last dimension 2 represent (h, w) + sampling_locations (Tensor): The location of sampling points, + has shape + (bs ,num_queries, num_heads, num_levels, num_points, 2), + the last dimension 2 represent (x, y). + attention_weights (Tensor): The weight of sampling points used + when calculate the attention, has shape + (bs ,num_queries, num_heads, num_levels, num_points), + im2col_step (Tensor): The step used in image to column. + + Returns: + Tensor: has shape (bs, num_queries, embed_dims) + """ + + ctx.im2col_step = im2col_step + output = ext_module.ms_deform_attn_forward( + value, + value_spatial_shapes, + value_level_start_index, + sampling_locations, + attention_weights, + im2col_step=ctx.im2col_step) + ctx.save_for_backward(value, value_spatial_shapes, + value_level_start_index, sampling_locations, + attention_weights) + return output + + @staticmethod + @once_differentiable + def backward(ctx, grad_output): + """GPU version of backward function. + + Args: + grad_output (Tensor): Gradient + of output tensor of forward. + + Returns: + Tuple[Tensor]: Gradient + of input tensors in forward. + """ + value, value_spatial_shapes, value_level_start_index,\ + sampling_locations, attention_weights = ctx.saved_tensors + grad_value = torch.zeros_like(value) + grad_sampling_loc = torch.zeros_like(sampling_locations) + grad_attn_weight = torch.zeros_like(attention_weights) + + ext_module.ms_deform_attn_backward( + value, + value_spatial_shapes, + value_level_start_index, + sampling_locations, + attention_weights, + grad_output.contiguous(), + grad_value, + grad_sampling_loc, + grad_attn_weight, + im2col_step=ctx.im2col_step) + + return grad_value, None, None, \ + grad_sampling_loc, grad_attn_weight, None + + +def multi_scale_deformable_attn_pytorch(value, value_spatial_shapes, + sampling_locations, attention_weights): + """CPU version of multi-scale deformable attention. + + Args: + value (Tensor): The value has shape + (bs, num_keys, mum_heads, embed_dims//num_heads) + value_spatial_shapes (Tensor): Spatial shape of + each feature map, has shape (num_levels, 2), + last dimension 2 represent (h, w) + sampling_locations (Tensor): The location of sampling points, + has shape + (bs ,num_queries, num_heads, num_levels, num_points, 2), + the last dimension 2 represent (x, y). + attention_weights (Tensor): The weight of sampling points used + when calculate the attention, has shape + (bs ,num_queries, num_heads, num_levels, num_points), + + Returns: + Tensor: has shape (bs, num_queries, embed_dims) + """ + + bs, _, num_heads, embed_dims = value.shape + _, num_queries, num_heads, num_levels, num_points, _ =\ + sampling_locations.shape + value_list = value.split([H_ * W_ for H_, W_ in value_spatial_shapes], + dim=1) + sampling_grids = 2 * sampling_locations - 1 + sampling_value_list = [] + for level, (H_, W_) in enumerate(value_spatial_shapes): + # bs, H_*W_, num_heads, embed_dims -> + # bs, H_*W_, num_heads*embed_dims -> + # bs, num_heads*embed_dims, H_*W_ -> + # bs*num_heads, embed_dims, H_, W_ + value_l_ = value_list[level].flatten(2).transpose(1, 2).reshape( + bs * num_heads, embed_dims, H_, W_) + # bs, num_queries, num_heads, num_points, 2 -> + # bs, num_heads, num_queries, num_points, 2 -> + # bs*num_heads, num_queries, num_points, 2 + sampling_grid_l_ = sampling_grids[:, :, :, + level].transpose(1, 2).flatten(0, 1) + # bs*num_heads, embed_dims, num_queries, num_points + sampling_value_l_ = F.grid_sample( + value_l_, + sampling_grid_l_, + mode='bilinear', + padding_mode='zeros', + align_corners=False) + sampling_value_list.append(sampling_value_l_) + # (bs, num_queries, num_heads, num_levels, num_points) -> + # (bs, num_heads, num_queries, num_levels, num_points) -> + # (bs, num_heads, 1, num_queries, num_levels*num_points) + attention_weights = attention_weights.transpose(1, 2).reshape( + bs * num_heads, 1, num_queries, num_levels * num_points) + output = (torch.stack(sampling_value_list, dim=-2).flatten(-2) * + attention_weights).sum(-1).view(bs, num_heads * embed_dims, + num_queries) + return output.transpose(1, 2).contiguous() + + +@ATTENTION.register_module() +class MultiScaleDeformableAttention(BaseModule): + """An attention module used in Deformable-Detr. + + `Deformable DETR: Deformable Transformers for End-to-End Object Detection. + `_. + + Args: + embed_dims (int): The embedding dimension of Attention. + Default: 256. + num_heads (int): Parallel attention heads. Default: 64. + num_levels (int): The number of feature map used in + Attention. Default: 4. + num_points (int): The number of sampling points for + each query in each head. Default: 4. + im2col_step (int): The step used in image_to_column. + Default: 64. + dropout (float): A Dropout layer on `inp_identity`. + Default: 0.1. + batch_first (bool): Key, Query and Value are shape of + (batch, n, embed_dim) + or (n, batch, embed_dim). Default to False. + norm_cfg (dict): Config dict for normalization layer. + Default: None. + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + """ + + def __init__(self, + embed_dims=256, + num_heads=8, + num_levels=4, + num_points=4, + im2col_step=64, + dropout=0.1, + batch_first=False, + norm_cfg=None, + init_cfg=None): + super().__init__(init_cfg) + if embed_dims % num_heads != 0: + raise ValueError(f'embed_dims must be divisible by num_heads, ' + f'but got {embed_dims} and {num_heads}') + dim_per_head = embed_dims // num_heads + self.norm_cfg = norm_cfg + self.dropout = nn.Dropout(dropout) + self.batch_first = batch_first + + # you'd better set dim_per_head to a power of 2 + # which is more efficient in the CUDA implementation + def _is_power_of_2(n): + if (not isinstance(n, int)) or (n < 0): + raise ValueError( + 'invalid input for _is_power_of_2: {} (type: {})'.format( + n, type(n))) + return (n & (n - 1) == 0) and n != 0 + + if not _is_power_of_2(dim_per_head): + warnings.warn( + "You'd better set embed_dims in " + 'MultiScaleDeformAttention to make ' + 'the dimension of each attention head a power of 2 ' + 'which is more efficient in our CUDA implementation.') + + self.im2col_step = im2col_step + self.embed_dims = embed_dims + self.num_levels = num_levels + self.num_heads = num_heads + self.num_points = num_points + self.sampling_offsets = nn.Linear( + embed_dims, num_heads * num_levels * num_points * 2) + self.attention_weights = nn.Linear(embed_dims, + num_heads * num_levels * num_points) + self.value_proj = nn.Linear(embed_dims, embed_dims) + self.output_proj = nn.Linear(embed_dims, embed_dims) + self.init_weights() + + def init_weights(self): + """Default initialization for Parameters of Module.""" + constant_init(self.sampling_offsets, 0.) + thetas = torch.arange( + self.num_heads, + dtype=torch.float32) * (2.0 * math.pi / self.num_heads) + grid_init = torch.stack([thetas.cos(), thetas.sin()], -1) + grid_init = (grid_init / + grid_init.abs().max(-1, keepdim=True)[0]).view( + self.num_heads, 1, 1, + 2).repeat(1, self.num_levels, self.num_points, 1) + for i in range(self.num_points): + grid_init[:, :, i, :] *= i + 1 + + self.sampling_offsets.bias.data = grid_init.view(-1) + constant_init(self.attention_weights, val=0., bias=0.) + xavier_init(self.value_proj, distribution='uniform', bias=0.) + xavier_init(self.output_proj, distribution='uniform', bias=0.) + self._is_init = True + + @deprecated_api_warning({'residual': 'identity'}, + cls_name='MultiScaleDeformableAttention') + def forward(self, + query, + key=None, + value=None, + identity=None, + query_pos=None, + key_padding_mask=None, + reference_points=None, + spatial_shapes=None, + level_start_index=None, + **kwargs): + """Forward Function of MultiScaleDeformAttention. + + Args: + query (Tensor): Query of Transformer with shape + (num_query, bs, embed_dims). + key (Tensor): The key tensor with shape + `(num_key, bs, embed_dims)`. + value (Tensor): The value tensor with shape + `(num_key, bs, embed_dims)`. + identity (Tensor): The tensor used for addition, with the + same shape as `query`. Default None. If None, + `query` will be used. + query_pos (Tensor): The positional encoding for `query`. + Default: None. + key_pos (Tensor): The positional encoding for `key`. Default + None. + reference_points (Tensor): The normalized reference + points with shape (bs, num_query, num_levels, 2), + all elements is range in [0, 1], top-left (0,0), + bottom-right (1, 1), including padding area. + or (N, Length_{query}, num_levels, 4), add + additional two dimensions is (w, h) to + form reference boxes. + key_padding_mask (Tensor): ByteTensor for `query`, with + shape [bs, num_key]. + spatial_shapes (Tensor): Spatial shape of features in + different levels. With shape (num_levels, 2), + last dimension represents (h, w). + level_start_index (Tensor): The start index of each level. + A tensor has shape ``(num_levels, )`` and can be represented + as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...]. + + Returns: + Tensor: forwarded results with shape [num_query, bs, embed_dims]. + """ + + if value is None: + value = query + + if identity is None: + identity = query + if query_pos is not None: + query = query + query_pos + if not self.batch_first: + # change to (bs, num_query ,embed_dims) + query = query.permute(1, 0, 2) + value = value.permute(1, 0, 2) + + bs, num_query, _ = query.shape + bs, num_value, _ = value.shape + assert (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() == num_value + + value = self.value_proj(value) + if key_padding_mask is not None: + value = value.masked_fill(key_padding_mask[..., None], 0.0) + value = value.view(bs, num_value, self.num_heads, -1) + sampling_offsets = self.sampling_offsets(query).view( + bs, num_query, self.num_heads, self.num_levels, self.num_points, 2) + attention_weights = self.attention_weights(query).view( + bs, num_query, self.num_heads, self.num_levels * self.num_points) + attention_weights = attention_weights.softmax(-1) + + attention_weights = attention_weights.view(bs, num_query, + self.num_heads, + self.num_levels, + self.num_points) + if reference_points.shape[-1] == 2: + offset_normalizer = torch.stack( + [spatial_shapes[..., 1], spatial_shapes[..., 0]], -1) + sampling_locations = reference_points[:, :, None, :, None, :] \ + + sampling_offsets \ + / offset_normalizer[None, None, None, :, None, :] + elif reference_points.shape[-1] == 4: + sampling_locations = reference_points[:, :, None, :, None, :2] \ + + sampling_offsets / self.num_points \ + * reference_points[:, :, None, :, None, 2:] \ + * 0.5 + else: + raise ValueError( + f'Last dim of reference_points must be' + f' 2 or 4, but get {reference_points.shape[-1]} instead.') + if torch.cuda.is_available() and value.is_cuda: + output = MultiScaleDeformableAttnFunction.apply( + value, spatial_shapes, level_start_index, sampling_locations, + attention_weights, self.im2col_step) + else: + output = multi_scale_deformable_attn_pytorch( + value, spatial_shapes, sampling_locations, attention_weights) + + output = self.output_proj(output) + + if not self.batch_first: + # (num_query, bs ,embed_dims) + output = output.permute(1, 0, 2) + + return self.dropout(output) + identity diff --git a/mmcv/ops/nms.py b/mmcv/ops/nms.py new file mode 100644 index 0000000..ed9835e --- /dev/null +++ b/mmcv/ops/nms.py @@ -0,0 +1,388 @@ +import os + +import numpy as np +import torch + +from mmcv.utils import deprecated_api_warning +from ..utils import ext_loader + +ext_module = ext_loader.load_ext( + '_ext', ['nms', 'softnms', 'nms_match', 'nms_rotated']) + + +# This function is modified from: https://github.com/pytorch/vision/ +class NMSop(torch.autograd.Function): + + @staticmethod + def forward(ctx, bboxes, scores, iou_threshold, offset, score_threshold, + max_num): + is_filtering_by_score = score_threshold > 0 + if is_filtering_by_score: + valid_mask = scores > score_threshold + bboxes, scores = bboxes[valid_mask], scores[valid_mask] + valid_inds = torch.nonzero( + valid_mask, as_tuple=False).squeeze(dim=1) + + inds = ext_module.nms( + bboxes, scores, iou_threshold=float(iou_threshold), offset=offset) + + if max_num > 0: + inds = inds[:max_num] + if is_filtering_by_score: + inds = valid_inds[inds] + return inds + + @staticmethod + def symbolic(g, bboxes, scores, iou_threshold, offset, score_threshold, + max_num): + from ..onnx import is_custom_op_loaded + has_custom_op = is_custom_op_loaded() + # TensorRT nms plugin is aligned with original nms in ONNXRuntime + is_trt_backend = os.environ.get('ONNX_BACKEND') == 'MMCVTensorRT' + if has_custom_op and (not is_trt_backend): + return g.op( + 'mmcv::NonMaxSuppression', + bboxes, + scores, + iou_threshold_f=float(iou_threshold), + offset_i=int(offset)) + else: + from torch.onnx.symbolic_opset9 import select, squeeze, unsqueeze + from ..onnx.onnx_utils.symbolic_helper import _size_helper + + boxes = unsqueeze(g, bboxes, 0) + scores = unsqueeze(g, unsqueeze(g, scores, 0), 0) + + if max_num > 0: + max_num = g.op( + 'Constant', + value_t=torch.tensor(max_num, dtype=torch.long)) + else: + dim = g.op('Constant', value_t=torch.tensor(0)) + max_num = _size_helper(g, bboxes, dim) + max_output_per_class = max_num + iou_threshold = g.op( + 'Constant', + value_t=torch.tensor([iou_threshold], dtype=torch.float)) + score_threshold = g.op( + 'Constant', + value_t=torch.tensor([score_threshold], dtype=torch.float)) + nms_out = g.op('NonMaxSuppression', boxes, scores, + max_output_per_class, iou_threshold, + score_threshold) + return squeeze( + g, + select( + g, nms_out, 1, + g.op( + 'Constant', + value_t=torch.tensor([2], dtype=torch.long))), 1) + + +class SoftNMSop(torch.autograd.Function): + + @staticmethod + def forward(ctx, boxes, scores, iou_threshold, sigma, min_score, method, + offset): + dets = boxes.new_empty((boxes.size(0), 5), device='cpu') + inds = ext_module.softnms( + boxes.cpu(), + scores.cpu(), + dets.cpu(), + iou_threshold=float(iou_threshold), + sigma=float(sigma), + min_score=float(min_score), + method=int(method), + offset=int(offset)) + return dets, inds + + @staticmethod + def symbolic(g, boxes, scores, iou_threshold, sigma, min_score, method, + offset): + from packaging import version + assert version.parse(torch.__version__) >= version.parse('1.7.0') + nms_out = g.op( + 'mmcv::SoftNonMaxSuppression', + boxes, + scores, + iou_threshold_f=float(iou_threshold), + sigma_f=float(sigma), + min_score_f=float(min_score), + method_i=int(method), + offset_i=int(offset), + outputs=2) + return nms_out + + +@deprecated_api_warning({'iou_thr': 'iou_threshold'}) +def nms(boxes, scores, iou_threshold, offset=0, score_threshold=0, max_num=-1): + """Dispatch to either CPU or GPU NMS implementations. + + The input can be either torch tensor or numpy array. GPU NMS will be used + if the input is gpu tensor, otherwise CPU NMS + will be used. The returned type will always be the same as inputs. + + Arguments: + boxes (torch.Tensor or np.ndarray): boxes in shape (N, 4). + scores (torch.Tensor or np.ndarray): scores in shape (N, ). + iou_threshold (float): IoU threshold for NMS. + offset (int, 0 or 1): boxes' width or height is (x2 - x1 + offset). + score_threshold (float): score threshold for NMS. + max_num (int): maximum number of boxes after NMS. + + Returns: + tuple: kept dets(boxes and scores) and indice, which is always the \ + same data type as the input. + + Example: + >>> boxes = np.array([[49.1, 32.4, 51.0, 35.9], + >>> [49.3, 32.9, 51.0, 35.3], + >>> [49.2, 31.8, 51.0, 35.4], + >>> [35.1, 11.5, 39.1, 15.7], + >>> [35.6, 11.8, 39.3, 14.2], + >>> [35.3, 11.5, 39.9, 14.5], + >>> [35.2, 11.7, 39.7, 15.7]], dtype=np.float32) + >>> scores = np.array([0.9, 0.9, 0.5, 0.5, 0.5, 0.4, 0.3],\ + dtype=np.float32) + >>> iou_threshold = 0.6 + >>> dets, inds = nms(boxes, scores, iou_threshold) + >>> assert len(inds) == len(dets) == 3 + """ + assert isinstance(boxes, (torch.Tensor, np.ndarray)) + assert isinstance(scores, (torch.Tensor, np.ndarray)) + is_numpy = False + if isinstance(boxes, np.ndarray): + is_numpy = True + boxes = torch.from_numpy(boxes) + if isinstance(scores, np.ndarray): + scores = torch.from_numpy(scores) + assert boxes.size(1) == 4 + assert boxes.size(0) == scores.size(0) + assert offset in (0, 1) + + + inds = NMSop.apply(boxes, scores, iou_threshold, offset, + score_threshold, max_num) + dets = torch.cat((boxes[inds], scores[inds].reshape(-1, 1)), dim=1) + if is_numpy: + dets = dets.cpu().numpy() + inds = inds.cpu().numpy() + return dets, inds + + +@deprecated_api_warning({'iou_thr': 'iou_threshold'}) +def soft_nms(boxes, + scores, + iou_threshold=0.3, + sigma=0.5, + min_score=1e-3, + method='linear', + offset=0): + """Dispatch to only CPU Soft NMS implementations. + + The input can be either a torch tensor or numpy array. + The returned type will always be the same as inputs. + + Arguments: + boxes (torch.Tensor or np.ndarray): boxes in shape (N, 4). + scores (torch.Tensor or np.ndarray): scores in shape (N, ). + iou_threshold (float): IoU threshold for NMS. + sigma (float): hyperparameter for gaussian method + min_score (float): score filter threshold + method (str): either 'linear' or 'gaussian' + offset (int, 0 or 1): boxes' width or height is (x2 - x1 + offset). + + Returns: + tuple: kept dets(boxes and scores) and indice, which is always the \ + same data type as the input. + + Example: + >>> boxes = np.array([[4., 3., 5., 3.], + >>> [4., 3., 5., 4.], + >>> [3., 1., 3., 1.], + >>> [3., 1., 3., 1.], + >>> [3., 1., 3., 1.], + >>> [3., 1., 3., 1.]], dtype=np.float32) + >>> scores = np.array([0.9, 0.9, 0.5, 0.5, 0.4, 0.0], dtype=np.float32) + >>> iou_threshold = 0.6 + >>> dets, inds = soft_nms(boxes, scores, iou_threshold, sigma=0.5) + >>> assert len(inds) == len(dets) == 5 + """ + + assert isinstance(boxes, (torch.Tensor, np.ndarray)) + assert isinstance(scores, (torch.Tensor, np.ndarray)) + is_numpy = False + if isinstance(boxes, np.ndarray): + is_numpy = True + boxes = torch.from_numpy(boxes) + if isinstance(scores, np.ndarray): + scores = torch.from_numpy(scores) + assert boxes.size(1) == 4 + assert boxes.size(0) == scores.size(0) + assert offset in (0, 1) + method_dict = {'naive': 0, 'linear': 1, 'gaussian': 2} + assert method in method_dict.keys() + + + dets, inds = SoftNMSop.apply(boxes.cpu(), scores.cpu(), + float(iou_threshold), float(sigma), + float(min_score), method_dict[method], + int(offset)) + + dets = dets[:inds.size(0)] + + if is_numpy: + dets = dets.cpu().numpy() + inds = inds.cpu().numpy() + return dets, inds + else: + return dets.to(device=boxes.device), inds.to(device=boxes.device) + + +def batched_nms(boxes, scores, idxs, nms_cfg, class_agnostic=False): + """Performs non-maximum suppression in a batched fashion. + + Modified from https://github.com/pytorch/vision/blob + /505cd6957711af790211896d32b40291bea1bc21/torchvision/ops/boxes.py#L39. + In order to perform NMS independently per class, we add an offset to all + the boxes. The offset is dependent only on the class idx, and is large + enough so that boxes from different classes do not overlap. + + Arguments: + boxes (torch.Tensor): boxes in shape (N, 4). + scores (torch.Tensor): scores in shape (N, ). + idxs (torch.Tensor): each index value correspond to a bbox cluster, + and NMS will not be applied between elements of different idxs, + shape (N, ). + nms_cfg (dict): specify nms type and other parameters like iou_thr. + Possible keys includes the following. + + - iou_thr (float): IoU threshold used for NMS. + - split_thr (float): threshold number of boxes. In some cases the + number of boxes is large (e.g., 200k). To avoid OOM during + training, the users could set `split_thr` to a small value. + If the number of boxes is greater than the threshold, it will + perform NMS on each group of boxes separately and sequentially. + Defaults to 10000. + class_agnostic (bool): if true, nms is class agnostic, + i.e. IoU thresholding happens over all boxes, + regardless of the predicted class. + + Returns: + tuple: kept dets and indice. + """ + nms_cfg_ = nms_cfg.copy() + class_agnostic = nms_cfg_.pop('class_agnostic', class_agnostic) + if class_agnostic: + boxes_for_nms = boxes + else: + max_coordinate = boxes.max() + offsets = idxs.to(boxes) * (max_coordinate + torch.tensor(1).to(boxes)) + boxes_for_nms = boxes + offsets[:, None] + + nms_type = nms_cfg_.pop('type', 'nms') + nms_op = eval(nms_type) + + split_thr = nms_cfg_.pop('split_thr', 10000) + # Won't split to multiple nms nodes when exporting to onnx + if boxes_for_nms.shape[0] < split_thr or torch.onnx.is_in_onnx_export(): + dets, keep = nms_op(boxes_for_nms, scores, **nms_cfg_) + boxes = boxes[keep] + # -1 indexing works abnormal in TensorRT + # This assumes `dets` has 5 dimensions where + # the last dimension is score. + # TODO: more elegant way to handle the dimension issue. + # Some type of nms would reweight the score, such as SoftNMS + scores = dets[:, 4] + else: + max_num = nms_cfg_.pop('max_num', -1) + total_mask = scores.new_zeros(scores.size(), dtype=torch.bool) + # Some type of nms would reweight the score, such as SoftNMS + scores_after_nms = scores.new_zeros(scores.size()) + for id in torch.unique(idxs): + mask = (idxs == id).nonzero(as_tuple=False).view(-1) + dets, keep = nms_op(boxes_for_nms[mask], scores[mask], **nms_cfg_) + total_mask[mask[keep]] = True + scores_after_nms[mask[keep]] = dets[:, -1] + keep = total_mask.nonzero(as_tuple=False).view(-1) + + scores, inds = scores_after_nms[keep].sort(descending=True) + keep = keep[inds] + boxes = boxes[keep] + + if max_num > 0: + keep = keep[:max_num] + boxes = boxes[:max_num] + scores = scores[:max_num] + + return torch.cat([boxes, scores[:, None]], -1), keep + + +def nms_match(dets, iou_threshold): + """Matched dets into different groups by NMS. + + NMS match is Similar to NMS but when a bbox is suppressed, nms match will + record the indice of suppressed bbox and form a group with the indice of + kept bbox. In each group, indice is sorted as score order. + + Arguments: + dets (torch.Tensor | np.ndarray): Det boxes with scores, shape (N, 5). + iou_thr (float): IoU thresh for NMS. + + Returns: + List[torch.Tensor | np.ndarray]: The outer list corresponds different + matched group, the inner Tensor corresponds the indices for a group + in score order. + """ + if dets.shape[0] == 0: + matched = [] + else: + assert dets.shape[-1] == 5, 'inputs dets.shape should be (N, 5), ' \ + f'but get {dets.shape}' + if isinstance(dets, torch.Tensor): + dets_t = dets.detach().cpu() + else: + dets_t = torch.from_numpy(dets) + indata_list = [dets_t] + indata_dict = {'iou_threshold': float(iou_threshold)} + matched = ext_module.nms_match(*indata_list, **indata_dict) + + if isinstance(dets, torch.Tensor): + return [dets.new_tensor(m, dtype=torch.long) for m in matched] + else: + return [np.array(m, dtype=np.int) for m in matched] + + +def nms_rotated(dets, scores, iou_threshold, labels=None): + """Performs non-maximum suppression (NMS) on the rotated boxes according to + their intersection-over-union (IoU). + + Rotated NMS iteratively removes lower scoring rotated boxes which have an + IoU greater than iou_threshold with another (higher scoring) rotated box. + + Args: + boxes (Tensor): Rotated boxes in shape (N, 5). They are expected to \ + be in (x_ctr, y_ctr, width, height, angle_radian) format. + scores (Tensor): scores in shape (N, ). + iou_threshold (float): IoU thresh for NMS. + labels (Tensor): boxes' label in shape (N,). + + Returns: + tuple: kept dets(boxes and scores) and indice, which is always the \ + same data type as the input. + """ + if dets.shape[0] == 0: + return dets, None + multi_label = labels is not None + if multi_label: + dets_wl = torch.cat((dets, labels.unsqueeze(1)), 1) + else: + dets_wl = dets + _, order = scores.sort(0, descending=True) + dets_sorted = dets_wl.index_select(0, order) + + keep_inds = ext_module.nms_rotated(dets_wl, scores, order, dets_sorted, + iou_threshold, multi_label) + dets = torch.cat((dets[keep_inds], scores[keep_inds].reshape(-1, 1)), + dim=1) + return dets, keep_inds diff --git a/mmcv/ops/roi_align.py b/mmcv/ops/roi_align.py new file mode 100644 index 0000000..0755aef --- /dev/null +++ b/mmcv/ops/roi_align.py @@ -0,0 +1,223 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from torch.autograd import Function +from torch.autograd.function import once_differentiable +from torch.nn.modules.utils import _pair + +from ..utils import deprecated_api_warning, ext_loader + +ext_module = ext_loader.load_ext('_ext', + ['roi_align_forward', 'roi_align_backward']) + + +class RoIAlignFunction(Function): + + @staticmethod + def symbolic(g, input, rois, output_size, spatial_scale, sampling_ratio, + pool_mode, aligned): + from ..onnx import is_custom_op_loaded + has_custom_op = is_custom_op_loaded() + if has_custom_op: + return g.op( + 'mmcv::MMCVRoiAlign', + input, + rois, + output_height_i=output_size[0], + output_width_i=output_size[1], + spatial_scale_f=spatial_scale, + sampling_ratio_i=sampling_ratio, + mode_s=pool_mode, + aligned_i=aligned) + else: + from torch.onnx.symbolic_opset9 import sub, squeeze + from torch.onnx.symbolic_helper import _slice_helper + from torch.onnx import TensorProtoDataType + # batch_indices = rois[:, 0].long() + batch_indices = _slice_helper( + g, rois, axes=[1], starts=[0], ends=[1]) + batch_indices = squeeze(g, batch_indices, 1) + batch_indices = g.op( + 'Cast', batch_indices, to_i=TensorProtoDataType.INT64) + # rois = rois[:, 1:] + rois = _slice_helper(g, rois, axes=[1], starts=[1], ends=[5]) + if aligned: + # rois -= 0.5/spatial_scale + aligned_offset = g.op( + 'Constant', + value_t=torch.tensor([0.5 / spatial_scale], + dtype=torch.float32)) + rois = sub(g, rois, aligned_offset) + # roi align + return g.op( + 'RoiAlign', + input, + rois, + batch_indices, + output_height_i=output_size[0], + output_width_i=output_size[1], + spatial_scale_f=spatial_scale, + sampling_ratio_i=max(0, sampling_ratio), + mode_s=pool_mode) + + @staticmethod + def forward(ctx, + input, + rois, + output_size, + spatial_scale=1.0, + sampling_ratio=0, + pool_mode='avg', + aligned=True): + ctx.output_size = _pair(output_size) + ctx.spatial_scale = spatial_scale + ctx.sampling_ratio = sampling_ratio + assert pool_mode in ('max', 'avg') + ctx.pool_mode = 0 if pool_mode == 'max' else 1 + ctx.aligned = aligned + ctx.input_shape = input.size() + + assert rois.size(1) == 5, 'RoI must be (idx, x1, y1, x2, y2)!' + + output_shape = (rois.size(0), input.size(1), ctx.output_size[0], + ctx.output_size[1]) + output = input.new_zeros(output_shape) + if ctx.pool_mode == 0: + argmax_y = input.new_zeros(output_shape) + argmax_x = input.new_zeros(output_shape) + else: + argmax_y = input.new_zeros(0) + argmax_x = input.new_zeros(0) + + ext_module.roi_align_forward( + input, + rois, + output, + argmax_y, + argmax_x, + aligned_height=ctx.output_size[0], + aligned_width=ctx.output_size[1], + spatial_scale=ctx.spatial_scale, + sampling_ratio=ctx.sampling_ratio, + pool_mode=ctx.pool_mode, + aligned=ctx.aligned) + + ctx.save_for_backward(rois, argmax_y, argmax_x) + return output + + @staticmethod + @once_differentiable + def backward(ctx, grad_output): + rois, argmax_y, argmax_x = ctx.saved_tensors + grad_input = grad_output.new_zeros(ctx.input_shape) + # complex head architecture may cause grad_output uncontiguous. + grad_output = grad_output.contiguous() + ext_module.roi_align_backward( + grad_output, + rois, + argmax_y, + argmax_x, + grad_input, + aligned_height=ctx.output_size[0], + aligned_width=ctx.output_size[1], + spatial_scale=ctx.spatial_scale, + sampling_ratio=ctx.sampling_ratio, + pool_mode=ctx.pool_mode, + aligned=ctx.aligned) + return grad_input, None, None, None, None, None, None + + +roi_align = RoIAlignFunction.apply + + +class RoIAlign(nn.Module): + """RoI align pooling layer. + + Args: + output_size (tuple): h, w + spatial_scale (float): scale the input boxes by this number + sampling_ratio (int): number of inputs samples to take for each + output sample. 0 to take samples densely for current models. + pool_mode (str, 'avg' or 'max'): pooling mode in each bin. + aligned (bool): if False, use the legacy implementation in + MMDetection. If True, align the results more perfectly. + use_torchvision (bool): whether to use roi_align from torchvision. + + Note: + The implementation of RoIAlign when aligned=True is modified from + https://github.com/facebookresearch/detectron2/ + + The meaning of aligned=True: + + Given a continuous coordinate c, its two neighboring pixel + indices (in our pixel model) are computed by floor(c - 0.5) and + ceil(c - 0.5). For example, c=1.3 has pixel neighbors with discrete + indices [0] and [1] (which are sampled from the underlying signal + at continuous coordinates 0.5 and 1.5). But the original roi_align + (aligned=False) does not subtract the 0.5 when computing + neighboring pixel indices and therefore it uses pixels with a + slightly incorrect alignment (relative to our pixel model) when + performing bilinear interpolation. + + With `aligned=True`, + we first appropriately scale the ROI and then shift it by -0.5 + prior to calling roi_align. This produces the correct neighbors; + + The difference does not make a difference to the model's + performance if ROIAlign is used together with conv layers. + """ + + @deprecated_api_warning( + { + 'out_size': 'output_size', + 'sample_num': 'sampling_ratio' + }, + cls_name='RoIAlign') + def __init__(self, + output_size, + spatial_scale=1.0, + sampling_ratio=0, + pool_mode='avg', + aligned=True, + use_torchvision=False): + super(RoIAlign, self).__init__() + + self.output_size = _pair(output_size) + self.spatial_scale = float(spatial_scale) + self.sampling_ratio = int(sampling_ratio) + self.pool_mode = pool_mode + self.aligned = aligned + self.use_torchvision = use_torchvision + + def forward(self, input, rois): + """ + Args: + input: NCHW images + rois: Bx5 boxes. First column is the index into N.\ + The other 4 columns are xyxy. + """ + if self.use_torchvision: + from torchvision.ops import roi_align as tv_roi_align + if 'aligned' in tv_roi_align.__code__.co_varnames: + return tv_roi_align(input, rois, self.output_size, + self.spatial_scale, self.sampling_ratio, + self.aligned) + else: + if self.aligned: + rois -= rois.new_tensor([0.] + + [0.5 / self.spatial_scale] * 4) + return tv_roi_align(input, rois, self.output_size, + self.spatial_scale, self.sampling_ratio) + else: + return roi_align(input, rois, self.output_size, self.spatial_scale, + self.sampling_ratio, self.pool_mode, self.aligned) + + def __repr__(self): + s = self.__class__.__name__ + s += f'(output_size={self.output_size}, ' + s += f'spatial_scale={self.spatial_scale}, ' + s += f'sampling_ratio={self.sampling_ratio}, ' + s += f'pool_mode={self.pool_mode}, ' + s += f'aligned={self.aligned}, ' + s += f'use_torchvision={self.use_torchvision})' + return s diff --git a/mmcv/ops/roiaware_pool3d/__init__.py b/mmcv/ops/roiaware_pool3d/__init__.py new file mode 100644 index 0000000..aba9e18 --- /dev/null +++ b/mmcv/ops/roiaware_pool3d/__init__.py @@ -0,0 +1,8 @@ +from .points_in_boxes import (points_in_boxes_batch, points_in_boxes_cpu, + points_in_boxes_gpu) +from .roiaware_pool3d import RoIAwarePool3d + +__all__ = [ + 'RoIAwarePool3d', 'points_in_boxes_gpu', 'points_in_boxes_cpu', + 'points_in_boxes_batch' +] diff --git a/mmcv/ops/roiaware_pool3d/points_in_boxes.py b/mmcv/ops/roiaware_pool3d/points_in_boxes.py new file mode 100644 index 0000000..f576fed --- /dev/null +++ b/mmcv/ops/roiaware_pool3d/points_in_boxes.py @@ -0,0 +1,123 @@ +import torch + +from . import roiaware_pool3d_ext + + +def points_in_boxes_gpu(points, boxes): + """Find points that are in boxes (CUDA) + + Args: + points (torch.Tensor): [B, M, 3], [x, y, z] in LiDAR coordinate + boxes (torch.Tensor): [B, T, 7], + num_valid_boxes <= T, [x, y, z, w, l, h, ry] in LiDAR coordinate, + (x, y, z) is the bottom center + + Returns: + box_idxs_of_pts (torch.Tensor): (B, M), default background = -1 + """ + assert boxes.shape[0] == points.shape[0], \ + f'Points and boxes should have the same batch size, ' \ + f'got {boxes.shape[0]} and {boxes.shape[0]}' + assert boxes.shape[2] == 7, \ + f'boxes dimension should be 7, ' \ + f'got unexpected shape {boxes.shape[2]}' + assert points.shape[2] == 3, \ + f'points dimension should be 3, ' \ + f'got unexpected shape {points.shape[2]}' + batch_size, num_points, _ = points.shape + + box_idxs_of_pts = points.new_zeros((batch_size, num_points), + dtype=torch.int).fill_(-1) + + # If manually put the tensor 'points' or 'boxes' on a device + # which is not the current device, some temporary variables + # will be created on the current device in the cuda op, + # and the output will be incorrect. + # Therefore, we force the current device to be the same + # as the device of the tensors if it was not. + # Please refer to https://github.com/open-mmlab/mmdetection3d/issues/305 + # for the incorrect output before the fix. + points_device = points.get_device() + assert points_device == boxes.get_device(), \ + 'Points and boxes should be put on the same device' + if torch.cuda.current_device() != points_device: + torch.cuda.set_device(points_device) + + roiaware_pool3d_ext.points_in_boxes_gpu(boxes.contiguous(), + points.contiguous(), + box_idxs_of_pts) + + return box_idxs_of_pts + + +def points_in_boxes_cpu(points, boxes): + """Find points that are in boxes (CPU) + + Note: + Currently, the output of this function is different from that of + points_in_boxes_gpu. + + Args: + points (torch.Tensor): [npoints, 3] + boxes (torch.Tensor): [N, 7], in LiDAR coordinate, + (x, y, z) is the bottom center + + Returns: + point_indices (torch.Tensor): (N, npoints) + """ + # TODO: Refactor this function as a CPU version of points_in_boxes_gpu + assert boxes.shape[1] == 7, \ + f'boxes dimension should be 7, ' \ + f'got unexpected shape {boxes.shape[2]}' + assert points.shape[1] == 3, \ + f'points dimension should be 3, ' \ + f'got unexpected shape {points.shape[2]}' + + point_indices = points.new_zeros((boxes.shape[0], points.shape[0]), + dtype=torch.int) + roiaware_pool3d_ext.points_in_boxes_cpu(boxes.float().contiguous(), + points.float().contiguous(), + point_indices) + + return point_indices + + +def points_in_boxes_batch(points, boxes): + """Find points that are in boxes (CUDA) + + Args: + points (torch.Tensor): [B, M, 3], [x, y, z] in LiDAR coordinate + boxes (torch.Tensor): [B, T, 7], + num_valid_boxes <= T, [x, y, z, w, l, h, ry] in LiDAR coordinate, + (x, y, z) is the bottom center. + + Returns: + box_idxs_of_pts (torch.Tensor): (B, M, T), default background = 0 + """ + assert boxes.shape[0] == points.shape[0], \ + f'Points and boxes should have the same batch size, ' \ + f'got {boxes.shape[0]} and {boxes.shape[0]}' + assert boxes.shape[2] == 7, \ + f'boxes dimension should be 7, ' \ + f'got unexpected shape {boxes.shape[2]}' + assert points.shape[2] == 3, \ + f'points dimension should be 3, ' \ + f'got unexpected shape {points.shape[2]}' + batch_size, num_points, _ = points.shape + num_boxes = boxes.shape[1] + + box_idxs_of_pts = points.new_zeros((batch_size, num_points, num_boxes), + dtype=torch.int).fill_(0) + + # Same reason as line 25-32 + points_device = points.get_device() + assert points_device == boxes.get_device(), \ + 'Points and boxes should be put on the same device' + if torch.cuda.current_device() != points_device: + torch.cuda.set_device(points_device) + + roiaware_pool3d_ext.points_in_boxes_batch(boxes.contiguous(), + points.contiguous(), + box_idxs_of_pts) + + return box_idxs_of_pts diff --git a/mmcv/ops/roiaware_pool3d/roiaware_pool3d.py b/mmcv/ops/roiaware_pool3d/roiaware_pool3d.py new file mode 100644 index 0000000..536c9a1 --- /dev/null +++ b/mmcv/ops/roiaware_pool3d/roiaware_pool3d.py @@ -0,0 +1,110 @@ +from mmcv.utils import is_tuple_of +import torch +from torch import nn as nn +from torch.autograd import Function + +from . import roiaware_pool3d_ext + + +class RoIAwarePool3d(nn.Module): + + def __init__(self, out_size, max_pts_per_voxel=128, mode='max'): + super().__init__() + """RoIAwarePool3d module + + Args: + out_size (int or tuple): n or [n1, n2, n3] + max_pts_per_voxel (int): m + mode (str): 'max' or 'avg' + """ + self.out_size = out_size + self.max_pts_per_voxel = max_pts_per_voxel + assert mode in ['max', 'avg'] + pool_method_map = {'max': 0, 'avg': 1} + self.mode = pool_method_map[mode] + + def forward(self, rois, pts, pts_feature): + """RoIAwarePool3d module forward. + + Args: + rois (torch.Tensor): [N, 7],in LiDAR coordinate, + (x, y, z) is the bottom center of rois + pts (torch.Tensor): [npoints, 3] + pts_feature (torch.Tensor): [npoints, C] + + Returns: + pooled_features (torch.Tensor): [N, out_x, out_y, out_z, C] + """ + + return RoIAwarePool3dFunction.apply(rois, pts, pts_feature, + self.out_size, + self.max_pts_per_voxel, self.mode) + + +class RoIAwarePool3dFunction(Function): + + @staticmethod + def forward(ctx, rois, pts, pts_feature, out_size, max_pts_per_voxel, + mode): + """RoIAwarePool3d function forward. + + Args: + rois (torch.Tensor): [N, 7], in LiDAR coordinate, + (x, y, z) is the bottom center of rois + pts (torch.Tensor): [npoints, 3] + pts_feature (torch.Tensor): [npoints, C] + out_size (int or tuple): n or [n1, n2, n3] + max_pts_per_voxel (int): m + mode (int): 0 (max pool) or 1 (average pool) + + Returns: + pooled_features (torch.Tensor): [N, out_x, out_y, out_z, C] + """ + + if isinstance(out_size, int): + out_x = out_y = out_z = out_size + else: + assert len(out_size) == 3 + assert is_tuple_of(out_size, int) + out_x, out_y, out_z = out_size + + num_rois = rois.shape[0] + num_channels = pts_feature.shape[-1] + num_pts = pts.shape[0] + + pooled_features = pts_feature.new_zeros( + (num_rois, out_x, out_y, out_z, num_channels)) + argmax = pts_feature.new_zeros( + (num_rois, out_x, out_y, out_z, num_channels), dtype=torch.int) + pts_idx_of_voxels = pts_feature.new_zeros( + (num_rois, out_x, out_y, out_z, max_pts_per_voxel), + dtype=torch.int) + + roiaware_pool3d_ext.forward(rois, pts, pts_feature, argmax, + pts_idx_of_voxels, pooled_features, mode) + + ctx.roiaware_pool3d_for_backward = (pts_idx_of_voxels, argmax, mode, + num_pts, num_channels) + return pooled_features + + @staticmethod + def backward(ctx, grad_out): + """RoIAwarePool3d function forward. + + Args: + grad_out (torch.Tensor): [N, out_x, out_y, out_z, C] + Returns: + grad_in (torch.Tensor): [npoints, C] + """ + ret = ctx.roiaware_pool3d_for_backward + pts_idx_of_voxels, argmax, mode, num_pts, num_channels = ret + + grad_in = grad_out.new_zeros((num_pts, num_channels)) + roiaware_pool3d_ext.backward(pts_idx_of_voxels, argmax, + grad_out.contiguous(), grad_in, mode) + + return None, None, grad_in, None, None, None + + +if __name__ == '__main__': + pass diff --git a/mmcv/ops/roiaware_pool3d/src/points_in_boxes_cpu.cpp b/mmcv/ops/roiaware_pool3d/src/points_in_boxes_cpu.cpp new file mode 100644 index 0000000..a26ffb6 --- /dev/null +++ b/mmcv/ops/roiaware_pool3d/src/points_in_boxes_cpu.cpp @@ -0,0 +1,69 @@ +// Modified from +// https://github.com/sshaoshuai/PCDet/blob/master/pcdet/ops/roiaware_pool3d/src/roiaware_pool3d_kernel.cu +// Written by Shaoshuai Shi +// All Rights Reserved 2019. + +#include +#include +#include +#include +#include + +#define CHECK_CONTIGUOUS(x) \ + TORCH_CHECK(x.is_contiguous(), #x, " must be contiguous ") +// #define DEBUG + +inline void lidar_to_local_coords_cpu(float shift_x, float shift_y, float rz, + float &local_x, float &local_y) { + // should rotate pi/2 + alpha to translate LiDAR to local + float rot_angle = rz + M_PI / 2; + float cosa = cos(rot_angle), sina = sin(rot_angle); + local_x = shift_x * cosa + shift_y * (-sina); + local_y = shift_x * sina + shift_y * cosa; +} + +inline int check_pt_in_box3d_cpu(const float *pt, const float *box3d, + float &local_x, float &local_y) { + // param pt: (x, y, z) + // param box3d: (cx, cy, cz, w, l, h, rz) in LiDAR coordinate, cz in the + // bottom center + float x = pt[0], y = pt[1], z = pt[2]; + float cx = box3d[0], cy = box3d[1], cz = box3d[2]; + float w = box3d[3], l = box3d[4], h = box3d[5], rz = box3d[6]; + cz += h / 2.0; // shift to the center since cz in box3d is the bottom center + + if (fabsf(z - cz) > h / 2.0) return 0; + lidar_to_local_coords_cpu(x - cx, y - cy, rz, local_x, local_y); + float in_flag = (local_x > -l / 2.0) & (local_x < l / 2.0) & + (local_y > -w / 2.0) & (local_y < w / 2.0); + return in_flag; +} + +int points_in_boxes_cpu(at::Tensor boxes_tensor, at::Tensor pts_tensor, + at::Tensor pts_indices_tensor) { + // params boxes: (N, 7) [x, y, z, w, l, h, rz] in LiDAR coordinate, z is the + // bottom center, each box DO NOT overlaps params pts: (npoints, 3) [x, y, z] + // in LiDAR coordinate params pts_indices: (N, npoints) + + CHECK_CONTIGUOUS(boxes_tensor); + CHECK_CONTIGUOUS(pts_tensor); + CHECK_CONTIGUOUS(pts_indices_tensor); + + int boxes_num = boxes_tensor.size(0); + int pts_num = pts_tensor.size(0); + + const float *boxes = boxes_tensor.data_ptr(); + const float *pts = pts_tensor.data_ptr(); + int *pts_indices = pts_indices_tensor.data_ptr(); + + float local_x = 0, local_y = 0; + for (int i = 0; i < boxes_num; i++) { + for (int j = 0; j < pts_num; j++) { + int cur_in_flag = + check_pt_in_box3d_cpu(pts + j * 3, boxes + i * 7, local_x, local_y); + pts_indices[i * pts_num + j] = cur_in_flag; + } + } + + return 1; +} diff --git a/mmcv/ops/roiaware_pool3d/src/points_in_boxes_cuda.cu b/mmcv/ops/roiaware_pool3d/src/points_in_boxes_cuda.cu new file mode 100644 index 0000000..896b316 --- /dev/null +++ b/mmcv/ops/roiaware_pool3d/src/points_in_boxes_cuda.cu @@ -0,0 +1,203 @@ +// Modified from +// https://github.com/sshaoshuai/PCDet/blob/master/pcdet/ops/roiaware_pool3d/src/roiaware_pool3d_kernel.cu +// Written by Shaoshuai Shi +// All Rights Reserved 2019. + +#include +#include +#include +#include +#include + +#define THREADS_PER_BLOCK 256 +#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) + +#define CHECK_CUDA(x) \ + TORCH_CHECK(x.device().is_cuda(), #x, " must be a CUDAtensor ") +#define CHECK_CONTIGUOUS(x) \ + TORCH_CHECK(x.is_contiguous(), #x, " must be contiguous ") +#define CHECK_INPUT(x) \ + CHECK_CUDA(x); \ + CHECK_CONTIGUOUS(x) +// #define DEBUG + +__device__ inline void lidar_to_local_coords(float shift_x, float shift_y, + float rz, float &local_x, + float &local_y) { + // should rotate pi/2 + alpha to translate LiDAR to local + float rot_angle = rz + M_PI / 2; + float cosa = cos(rot_angle), sina = sin(rot_angle); + local_x = shift_x * cosa + shift_y * (-sina); + local_y = shift_x * sina + shift_y * cosa; +} + +__device__ inline int check_pt_in_box3d(const float *pt, const float *box3d, + float &local_x, float &local_y) { + // param pt: (x, y, z) + // param box3d: (cx, cy, cz, w, l, h, rz) in LiDAR coordinate, cz in the + // bottom center + float x = pt[0], y = pt[1], z = pt[2]; + float cx = box3d[0], cy = box3d[1], cz = box3d[2]; + float w = box3d[3], l = box3d[4], h = box3d[5], rz = box3d[6]; + cz += h / 2.0; // shift to the center since cz in box3d is the bottom center + + if (fabsf(z - cz) > h / 2.0) return 0; + lidar_to_local_coords(x - cx, y - cy, rz, local_x, local_y); + float in_flag = (local_x > -l / 2.0) & (local_x < l / 2.0) & + (local_y > -w / 2.0) & (local_y < w / 2.0); + return in_flag; +} + +__global__ void points_in_boxes_kernel(int batch_size, int boxes_num, + int pts_num, const float *boxes, + const float *pts, + int *box_idx_of_points) { + // params boxes: (B, N, 7) [x, y, z, w, l, h, rz] in LiDAR coordinate, z is + // the bottom center, each box DO NOT overlaps params pts: (B, npoints, 3) [x, + // y, z] in LiDAR coordinate params boxes_idx_of_points: (B, npoints), default + // -1 + + int bs_idx = blockIdx.y; + int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (bs_idx >= batch_size || pt_idx >= pts_num) return; + + boxes += bs_idx * boxes_num * 7; + pts += bs_idx * pts_num * 3 + pt_idx * 3; + box_idx_of_points += bs_idx * pts_num + pt_idx; + + float local_x = 0, local_y = 0; + int cur_in_flag = 0; + for (int k = 0; k < boxes_num; k++) { + cur_in_flag = check_pt_in_box3d(pts, boxes + k * 7, local_x, local_y); + if (cur_in_flag) { + box_idx_of_points[0] = k; + break; + } + } +} + +__global__ void points_in_boxes_batch_kernel(int batch_size, int boxes_num, + int pts_num, const float *boxes, + const float *pts, + int *box_idx_of_points) { + // params boxes: (B, N, 7) [x, y, z, w, l, h, rz] in LiDAR coordinate, z is + // the bottom center, each box DO NOT overlaps params pts: (B, npoints, 3) [x, + // y, z] in LiDAR coordinate params boxes_idx_of_points: (B, npoints), default + // -1 + + int bs_idx = blockIdx.y; + int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (bs_idx >= batch_size || pt_idx >= pts_num) return; + + boxes += bs_idx * boxes_num * 7; + pts += bs_idx * pts_num * 3 + pt_idx * 3; + box_idx_of_points += bs_idx * pts_num * boxes_num + pt_idx * boxes_num; + + float local_x = 0, local_y = 0; + int cur_in_flag = 0; + for (int k = 0; k < boxes_num; k++) { + cur_in_flag = check_pt_in_box3d(pts, boxes + k * 7, local_x, local_y); + if (cur_in_flag) { + box_idx_of_points[k] = 1; + } + cur_in_flag = 0; + } +} + +void points_in_boxes_launcher(int batch_size, int boxes_num, int pts_num, + const float *boxes, const float *pts, + int *box_idx_of_points) { + // params boxes: (B, N, 7) [x, y, z, w, l, h, rz] in LiDAR coordinate, z is + // the bottom center, each box DO NOT overlaps params pts: (B, npoints, 3) [x, + // y, z] in LiDAR coordinate params boxes_idx_of_points: (B, npoints), default + // -1 + cudaError_t err; + + dim3 blocks(DIVUP(pts_num, THREADS_PER_BLOCK), batch_size); + dim3 threads(THREADS_PER_BLOCK); + points_in_boxes_kernel<<>>(batch_size, boxes_num, pts_num, + boxes, pts, box_idx_of_points); + + err = cudaGetLastError(); + if (cudaSuccess != err) { + fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); + exit(-1); + } + +#ifdef DEBUG + cudaDeviceSynchronize(); // for using printf in kernel function +#endif +} + +void points_in_boxes_batch_launcher(int batch_size, int boxes_num, int pts_num, + const float *boxes, const float *pts, + int *box_idx_of_points) { + // params boxes: (B, N, 7) [x, y, z, w, l, h, rz] in LiDAR coordinate, z is + // the bottom center, each box params pts: (B, npoints, 3) [x, y, z] in + // LiDAR coordinate params boxes_idx_of_points: (B, npoints), default -1 + cudaError_t err; + + dim3 blocks(DIVUP(pts_num, THREADS_PER_BLOCK), batch_size); + dim3 threads(THREADS_PER_BLOCK); + points_in_boxes_batch_kernel<<>>( + batch_size, boxes_num, pts_num, boxes, pts, box_idx_of_points); + + err = cudaGetLastError(); + if (cudaSuccess != err) { + fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); + exit(-1); + } + +#ifdef DEBUG + cudaDeviceSynchronize(); // for using printf in kernel function +#endif +} + +int points_in_boxes_gpu(at::Tensor boxes_tensor, at::Tensor pts_tensor, + at::Tensor box_idx_of_points_tensor) { + // params boxes: (B, N, 7) [x, y, z, w, l, h, rz] in LiDAR coordinate, z is + // the bottom center, each box DO NOT overlaps params pts: (B, npoints, 3) [x, + // y, z] in LiDAR coordinate params boxes_idx_of_points: (B, npoints), default + // -1 + + CHECK_INPUT(boxes_tensor); + CHECK_INPUT(pts_tensor); + CHECK_INPUT(box_idx_of_points_tensor); + + int batch_size = boxes_tensor.size(0); + int boxes_num = boxes_tensor.size(1); + int pts_num = pts_tensor.size(1); + + const float *boxes = boxes_tensor.data_ptr(); + const float *pts = pts_tensor.data_ptr(); + int *box_idx_of_points = box_idx_of_points_tensor.data_ptr(); + + points_in_boxes_launcher(batch_size, boxes_num, pts_num, boxes, pts, + box_idx_of_points); + + return 1; +} + +int points_in_boxes_batch(at::Tensor boxes_tensor, at::Tensor pts_tensor, + at::Tensor box_idx_of_points_tensor) { + // params boxes: (B, N, 7) [x, y, z, w, l, h, rz] in LiDAR coordinate, z is + // the bottom center. params pts: (B, npoints, 3) [x, y, z] in LiDAR + // coordinate params boxes_idx_of_points: (B, npoints), default -1 + + CHECK_INPUT(boxes_tensor); + CHECK_INPUT(pts_tensor); + CHECK_INPUT(box_idx_of_points_tensor); + + int batch_size = boxes_tensor.size(0); + int boxes_num = boxes_tensor.size(1); + int pts_num = pts_tensor.size(1); + + const float *boxes = boxes_tensor.data_ptr(); + const float *pts = pts_tensor.data_ptr(); + int *box_idx_of_points = box_idx_of_points_tensor.data_ptr(); + + points_in_boxes_batch_launcher(batch_size, boxes_num, pts_num, boxes, pts, + box_idx_of_points); + + return 1; +} diff --git a/mmcv/ops/roiaware_pool3d/src/roiaware_pool3d.cpp b/mmcv/ops/roiaware_pool3d/src/roiaware_pool3d.cpp new file mode 100644 index 0000000..cd743b1 --- /dev/null +++ b/mmcv/ops/roiaware_pool3d/src/roiaware_pool3d.cpp @@ -0,0 +1,136 @@ +// Modified from +// https://github.com/sshaoshuai/PCDet/blob/master/pcdet/ops/roiaware_pool3d/src/roiaware_pool3d_kernel.cu +// Written by Shaoshuai Shi +// All Rights Reserved 2019. + +#include +#include +#include + +#define CHECK_CUDA(x) \ + TORCH_CHECK(x.device().is_cuda(), #x, " must be a CUDAtensor ") +#define CHECK_CONTIGUOUS(x) \ + TORCH_CHECK(x.is_contiguous(), #x, " must be contiguous ") +#define CHECK_INPUT(x) \ + CHECK_CUDA(x); \ + CHECK_CONTIGUOUS(x) + +void roiaware_pool3d_launcher(int boxes_num, int pts_num, int channels, + int max_pts_each_voxel, int out_x, int out_y, + int out_z, const float *rois, const float *pts, + const float *pts_feature, int *argmax, + int *pts_idx_of_voxels, float *pooled_features, + int pool_method); + +void roiaware_pool3d_backward_launcher(int boxes_num, int out_x, int out_y, + int out_z, int channels, + int max_pts_each_voxel, + const int *pts_idx_of_voxels, + const int *argmax, const float *grad_out, + float *grad_in, int pool_method); + +int roiaware_pool3d_gpu(at::Tensor rois, at::Tensor pts, at::Tensor pts_feature, + at::Tensor argmax, at::Tensor pts_idx_of_voxels, + at::Tensor pooled_features, int pool_method); + +int roiaware_pool3d_gpu_backward(at::Tensor pts_idx_of_voxels, + at::Tensor argmax, at::Tensor grad_out, + at::Tensor grad_in, int pool_method); + +int points_in_boxes_cpu(at::Tensor boxes_tensor, at::Tensor pts_tensor, + at::Tensor pts_indices_tensor); + +int points_in_boxes_gpu(at::Tensor boxes_tensor, at::Tensor pts_tensor, + at::Tensor box_idx_of_points_tensor); + +int points_in_boxes_batch(at::Tensor boxes_tensor, at::Tensor pts_tensor, + at::Tensor box_idx_of_points_tensor); + +int roiaware_pool3d_gpu(at::Tensor rois, at::Tensor pts, at::Tensor pts_feature, + at::Tensor argmax, at::Tensor pts_idx_of_voxels, + at::Tensor pooled_features, int pool_method) { + // params rois: (N, 7) [x, y, z, w, l, h, ry] in LiDAR coordinate + // params pts: (npoints, 3) [x, y, z] in LiDAR coordinate + // params pts_feature: (npoints, C) + // params argmax: (N, out_x, out_y, out_z, C) + // params pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel) + // params pooled_features: (N, out_x, out_y, out_z, C) + // params pool_method: 0: max_pool 1: avg_pool + + CHECK_INPUT(rois); + CHECK_INPUT(pts); + CHECK_INPUT(pts_feature); + CHECK_INPUT(argmax); + CHECK_INPUT(pts_idx_of_voxels); + CHECK_INPUT(pooled_features); + + int boxes_num = rois.size(0); + int pts_num = pts.size(0); + int channels = pts_feature.size(1); + int max_pts_each_voxel = pts_idx_of_voxels.size(4); // index 0 is the counter + int out_x = pts_idx_of_voxels.size(1); + int out_y = pts_idx_of_voxels.size(2); + int out_z = pts_idx_of_voxels.size(3); + assert((out_x < 256) && (out_y < 256) && + (out_z < 256)); // we encode index with 8bit + + const float *rois_data = rois.data_ptr(); + const float *pts_data = pts.data_ptr(); + const float *pts_feature_data = pts_feature.data_ptr(); + int *argmax_data = argmax.data_ptr(); + int *pts_idx_of_voxels_data = pts_idx_of_voxels.data_ptr(); + float *pooled_features_data = pooled_features.data_ptr(); + + roiaware_pool3d_launcher( + boxes_num, pts_num, channels, max_pts_each_voxel, out_x, out_y, out_z, + rois_data, pts_data, pts_feature_data, argmax_data, + pts_idx_of_voxels_data, pooled_features_data, pool_method); + + return 1; +} + +int roiaware_pool3d_gpu_backward(at::Tensor pts_idx_of_voxels, + at::Tensor argmax, at::Tensor grad_out, + at::Tensor grad_in, int pool_method) { + // params pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel) + // params argmax: (N, out_x, out_y, out_z, C) + // params grad_out: (N, out_x, out_y, out_z, C) + // params grad_in: (npoints, C), return value + // params pool_method: 0: max_pool 1: avg_pool + + CHECK_INPUT(pts_idx_of_voxels); + CHECK_INPUT(argmax); + CHECK_INPUT(grad_out); + CHECK_INPUT(grad_in); + + int boxes_num = pts_idx_of_voxels.size(0); + int out_x = pts_idx_of_voxels.size(1); + int out_y = pts_idx_of_voxels.size(2); + int out_z = pts_idx_of_voxels.size(3); + int max_pts_each_voxel = pts_idx_of_voxels.size(4); // index 0 is the counter + int channels = grad_out.size(4); + + const int *pts_idx_of_voxels_data = pts_idx_of_voxels.data_ptr(); + const int *argmax_data = argmax.data_ptr(); + const float *grad_out_data = grad_out.data_ptr(); + float *grad_in_data = grad_in.data_ptr(); + + roiaware_pool3d_backward_launcher(boxes_num, out_x, out_y, out_z, channels, + max_pts_each_voxel, pts_idx_of_voxels_data, + argmax_data, grad_out_data, grad_in_data, + pool_method); + + return 1; +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("forward", &roiaware_pool3d_gpu, "roiaware pool3d forward (CUDA)"); + m.def("backward", &roiaware_pool3d_gpu_backward, + "roiaware pool3d backward (CUDA)"); + m.def("points_in_boxes_gpu", &points_in_boxes_gpu, + "points_in_boxes_gpu forward (CUDA)"); + m.def("points_in_boxes_batch", &points_in_boxes_batch, + "points_in_boxes_batch forward (CUDA)"); + m.def("points_in_boxes_cpu", &points_in_boxes_cpu, + "points_in_boxes_cpu forward (CPU)"); +} diff --git a/mmcv/ops/roiaware_pool3d/src/roiaware_pool3d_kernel.cu b/mmcv/ops/roiaware_pool3d/src/roiaware_pool3d_kernel.cu new file mode 100644 index 0000000..312b35d --- /dev/null +++ b/mmcv/ops/roiaware_pool3d/src/roiaware_pool3d_kernel.cu @@ -0,0 +1,366 @@ +// Modified from +// https://github.com/sshaoshuai/PCDet/blob/master/pcdet/ops/roiaware_pool3d/src/roiaware_pool3d_kernel.cu +// Written by Shaoshuai Shi +// All Rights Reserved 2019. + +#include +#include +#include +#include +#include + +#define THREADS_PER_BLOCK 256 +#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) + +// #define DEBUG + +__device__ inline void lidar_to_local_coords(float shift_x, float shift_y, + float rz, float &local_x, + float &local_y) { + // should rotate pi/2 + alpha to translate LiDAR to local + float rot_angle = rz + M_PI / 2; + float cosa = cos(rot_angle), sina = sin(rot_angle); + local_x = shift_x * cosa + shift_y * (-sina); + local_y = shift_x * sina + shift_y * cosa; +} + +__device__ inline int check_pt_in_box3d(const float *pt, const float *box3d, + float &local_x, float &local_y) { + // param pt: (x, y, z) + // param box3d: (cx, cy, cz, w, l, h, rz) in LiDAR coordinate, cz in the + // bottom center + float x = pt[0], y = pt[1], z = pt[2]; + float cx = box3d[0], cy = box3d[1], cz = box3d[2]; + float w = box3d[3], l = box3d[4], h = box3d[5], rz = box3d[6]; + cz += h / 2.0; // shift to the center since cz in box3d is the bottom center + + if (fabsf(z - cz) > h / 2.0) return 0; + lidar_to_local_coords(x - cx, y - cy, rz, local_x, local_y); + float in_flag = (local_x > -l / 2.0) & (local_x < l / 2.0) & + (local_y > -w / 2.0) & (local_y < w / 2.0); + return in_flag; +} + +__global__ void generate_pts_mask_for_box3d(int boxes_num, int pts_num, + int out_x, int out_y, int out_z, + const float *rois, const float *pts, + int *pts_mask) { + // params rois: (N, 7) [x, y, z, w, l, h, rz] in LiDAR coordinate + // params pts: (npoints, 3) [x, y, z] + // params pts_mask: (N, npoints): -1 means point doesnot in this box, + // otherwise: encode (x_idxs, y_idxs, z_idxs) by binary bit + int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; + int box_idx = blockIdx.y; + if (pt_idx >= pts_num || box_idx >= boxes_num) return; + + pts += pt_idx * 3; + rois += box_idx * 7; + pts_mask += box_idx * pts_num + pt_idx; + + float local_x = 0, local_y = 0; + int cur_in_flag = check_pt_in_box3d(pts, rois, local_x, local_y); + + pts_mask[0] = -1; + if (cur_in_flag > 0) { + float local_z = pts[2] - rois[2]; + float w = rois[3], l = rois[4], h = rois[5]; + + float x_res = l / out_x; + float y_res = w / out_y; + float z_res = h / out_z; + + unsigned int x_idx = int((local_x + l / 2) / x_res); + unsigned int y_idx = int((local_y + w / 2) / y_res); + unsigned int z_idx = int(local_z / z_res); + + x_idx = min(max(x_idx, 0), out_x - 1); + y_idx = min(max(y_idx, 0), out_y - 1); + z_idx = min(max(z_idx, 0), out_z - 1); + + unsigned int idx_encoding = (x_idx << 16) + (y_idx << 8) + z_idx; +#ifdef DEBUG + printf( + "mask: pts_%d(%.3f, %.3f, %.3f), local(%.3f, %.3f, %.3f), idx(%d, %d, " + "%d), res(%.3f, %.3f, %.3f), idx_encoding=%x\n", + pt_idx, pts[0], pts[1], pts[2], local_x, local_y, local_z, x_idx, y_idx, + z_idx, x_res, y_res, z_res, idx_encoding); +#endif + + pts_mask[0] = idx_encoding; + } +} + +__global__ void collect_inside_pts_for_box3d(int boxes_num, int pts_num, + int max_pts_each_voxel, int out_x, + int out_y, int out_z, + const int *pts_mask, + int *pts_idx_of_voxels) { + // params pts_mask: (N, npoints) 0 or 1 + // params pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel) + + int box_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (box_idx >= boxes_num) return; + + int max_num_pts = max_pts_each_voxel - 1; // index 0 is the counter + pts_idx_of_voxels += box_idx * out_x * out_y * out_z * max_pts_each_voxel; + + for (int k = 0; k < pts_num; k++) { + if (pts_mask[box_idx * pts_num + k] != -1) { + unsigned int idx_encoding = pts_mask[box_idx * pts_num + k]; + unsigned int x_idx = (idx_encoding >> 16) & 0xFF; + unsigned int y_idx = (idx_encoding >> 8) & 0xFF; + unsigned int z_idx = idx_encoding & 0xFF; + unsigned int base_offset = x_idx * out_y * out_z * max_pts_each_voxel + + y_idx * out_z * max_pts_each_voxel + + z_idx * max_pts_each_voxel; + unsigned int cnt = pts_idx_of_voxels[base_offset]; + if (cnt < max_num_pts) { + pts_idx_of_voxels[base_offset + cnt + 1] = k; + pts_idx_of_voxels[base_offset]++; + } +#ifdef DEBUG + printf("collect: pts_%d, idx(%d, %d, %d), idx_encoding=%x\n", k, x_idx, + y_idx, z_idx, idx_encoding); +#endif + } + } +} + +__global__ void roiaware_maxpool3d(int boxes_num, int pts_num, int channels, + int max_pts_each_voxel, int out_x, int out_y, + int out_z, const float *pts_feature, + const int *pts_idx_of_voxels, + float *pooled_features, int *argmax) { + // params pts_feature: (npoints, C) + // params pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel), + // index 0 is the counter params pooled_features: (N, out_x, out_y, out_z, C) + // params argmax: (N, out_x, out_y, out_z, C) + + int box_idx = blockIdx.z; + int channel_idx = blockIdx.y; + int voxel_idx_flat = blockIdx.x * blockDim.x + threadIdx.x; + + int x_idx = voxel_idx_flat / (out_y * out_z); + int y_idx = (voxel_idx_flat - x_idx * (out_y * out_z)) / out_z; + int z_idx = voxel_idx_flat % out_z; + if (box_idx >= boxes_num || channel_idx >= channels || x_idx >= out_x || + y_idx >= out_y || z_idx >= out_z) + return; + +#ifdef DEBUG + printf("src pts_idx_of_voxels: (%p, ), argmax: %p\n", pts_idx_of_voxels, + argmax); +#endif + + int offset_base = x_idx * out_y * out_z + y_idx * out_z + z_idx; + pts_idx_of_voxels += box_idx * out_x * out_y * out_z * max_pts_each_voxel + + offset_base * max_pts_each_voxel; + pooled_features += box_idx * out_x * out_y * out_z * channels + + offset_base * channels + channel_idx; + argmax += box_idx * out_x * out_y * out_z * channels + + offset_base * channels + channel_idx; + + int argmax_idx = -1; + float max_val = -1e50; + + int total_pts = pts_idx_of_voxels[0]; + + for (int k = 1; k <= total_pts; k++) { + if (pts_feature[pts_idx_of_voxels[k] * channels + channel_idx] > max_val) { + max_val = pts_feature[pts_idx_of_voxels[k] * channels + channel_idx]; + argmax_idx = pts_idx_of_voxels[k]; + } + } + + if (argmax_idx != -1) { + pooled_features[0] = max_val; + } + argmax[0] = argmax_idx; + +#ifdef DEBUG + printf( + "channel_%d idx(%d, %d, %d), argmax_idx=(%d, %.3f), total=%d, after " + "pts_idx: %p, argmax: (%p, %d)\n", + channel_idx, x_idx, y_idx, z_idx, argmax_idx, max_val, total_pts, + pts_idx_of_voxels, argmax, argmax_idx); +#endif +} + +__global__ void roiaware_avgpool3d(int boxes_num, int pts_num, int channels, + int max_pts_each_voxel, int out_x, int out_y, + int out_z, const float *pts_feature, + const int *pts_idx_of_voxels, + float *pooled_features) { + // params pts_feature: (npoints, C) + // params pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel), + // index 0 is the counter params pooled_features: (N, out_x, out_y, out_z, C) + // params argmax: (N, out_x, out_y, out_z, C) + + int box_idx = blockIdx.z; + int channel_idx = blockIdx.y; + int voxel_idx_flat = blockIdx.x * blockDim.x + threadIdx.x; + + int x_idx = voxel_idx_flat / (out_y * out_z); + int y_idx = (voxel_idx_flat - x_idx * (out_y * out_z)) / out_z; + int z_idx = voxel_idx_flat % out_z; + if (box_idx >= boxes_num || channel_idx >= channels || x_idx >= out_x || + y_idx >= out_y || z_idx >= out_z) + return; + + int offset_base = x_idx * out_y * out_z + y_idx * out_z + z_idx; + pts_idx_of_voxels += box_idx * out_x * out_y * out_z * max_pts_each_voxel + + offset_base * max_pts_each_voxel; + pooled_features += box_idx * out_x * out_y * out_z * channels + + offset_base * channels + channel_idx; + + float sum_val = 0; + int total_pts = pts_idx_of_voxels[0]; + + for (int k = 1; k <= total_pts; k++) { + sum_val += pts_feature[pts_idx_of_voxels[k] * channels + channel_idx]; + } + + if (total_pts > 0) { + pooled_features[0] = sum_val / total_pts; + } +} + +void roiaware_pool3d_launcher(int boxes_num, int pts_num, int channels, + int max_pts_each_voxel, int out_x, int out_y, + int out_z, const float *rois, const float *pts, + const float *pts_feature, int *argmax, + int *pts_idx_of_voxels, float *pooled_features, + int pool_method) { + // params rois: (N, 7) [x, y, z, w, l, h, rz] in LiDAR coordinate + // params pts: (npoints, 3) [x, y, z] in LiDAR coordinate + // params pts_feature: (npoints, C) + // params argmax: (N, out_x, out_y, out_z, C) + // params pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel) + // params pooled_features: (N, out_x, out_y, out_z, C) + // params pool_method: 0: max_pool 1: avg_pool + + int *pts_mask = NULL; + cudaMalloc(&pts_mask, boxes_num * pts_num * sizeof(int)); // (N, M) + cudaMemset(pts_mask, -1, boxes_num * pts_num * sizeof(int)); + + dim3 blocks_mask(DIVUP(pts_num, THREADS_PER_BLOCK), boxes_num); + dim3 threads(THREADS_PER_BLOCK); + generate_pts_mask_for_box3d<<>>( + boxes_num, pts_num, out_x, out_y, out_z, rois, pts, pts_mask); + + // TODO: Merge the collect and pool functions, SS + + dim3 blocks_collect(DIVUP(boxes_num, THREADS_PER_BLOCK)); + collect_inside_pts_for_box3d<<>>( + boxes_num, pts_num, max_pts_each_voxel, out_x, out_y, out_z, pts_mask, + pts_idx_of_voxels); + + dim3 blocks_pool(DIVUP(out_x * out_y * out_z, THREADS_PER_BLOCK), channels, + boxes_num); + if (pool_method == 0) { + roiaware_maxpool3d<<>>( + boxes_num, pts_num, channels, max_pts_each_voxel, out_x, out_y, out_z, + pts_feature, pts_idx_of_voxels, pooled_features, argmax); + } else if (pool_method == 1) { + roiaware_avgpool3d<<>>( + boxes_num, pts_num, channels, max_pts_each_voxel, out_x, out_y, out_z, + pts_feature, pts_idx_of_voxels, pooled_features); + } + + cudaFree(pts_mask); + +#ifdef DEBUG + cudaDeviceSynchronize(); // for using printf in kernel function +#endif +} + +__global__ void roiaware_maxpool3d_backward(int boxes_num, int channels, + int out_x, int out_y, int out_z, + const int *argmax, + const float *grad_out, + float *grad_in) { + // params argmax: (N, out_x, out_y, out_z, C) + // params grad_out: (N, out_x, out_y, out_z, C) + // params grad_in: (npoints, C), return value + + int box_idx = blockIdx.z; + int channel_idx = blockIdx.y; + int voxel_idx_flat = blockIdx.x * blockDim.x + threadIdx.x; + + int x_idx = voxel_idx_flat / (out_y * out_z); + int y_idx = (voxel_idx_flat - x_idx * (out_y * out_z)) / out_z; + int z_idx = voxel_idx_flat % out_z; + if (box_idx >= boxes_num || channel_idx >= channels || x_idx >= out_x || + y_idx >= out_y || z_idx >= out_z) + return; + + int offset_base = x_idx * out_y * out_z + y_idx * out_z + z_idx; + argmax += box_idx * out_x * out_y * out_z * channels + + offset_base * channels + channel_idx; + grad_out += box_idx * out_x * out_y * out_z * channels + + offset_base * channels + channel_idx; + + if (argmax[0] == -1) return; + + atomicAdd(grad_in + argmax[0] * channels + channel_idx, grad_out[0] * 1); +} + +__global__ void roiaware_avgpool3d_backward(int boxes_num, int channels, + int out_x, int out_y, int out_z, + int max_pts_each_voxel, + const int *pts_idx_of_voxels, + const float *grad_out, + float *grad_in) { + // params pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel) + // params grad_out: (N, out_x, out_y, out_z, C) + // params grad_in: (npoints, C), return value + + int box_idx = blockIdx.z; + int channel_idx = blockIdx.y; + int voxel_idx_flat = blockIdx.x * blockDim.x + threadIdx.x; + + int x_idx = voxel_idx_flat / (out_y * out_z); + int y_idx = (voxel_idx_flat - x_idx * (out_y * out_z)) / out_z; + int z_idx = voxel_idx_flat % out_z; + if (box_idx >= boxes_num || channel_idx >= channels || x_idx >= out_x || + y_idx >= out_y || z_idx >= out_z) + return; + + int offset_base = x_idx * out_y * out_z + y_idx * out_z + z_idx; + pts_idx_of_voxels += box_idx * out_x * out_y * out_z * max_pts_each_voxel + + offset_base * max_pts_each_voxel; + grad_out += box_idx * out_x * out_y * out_z * channels + + offset_base * channels + channel_idx; + + int total_pts = pts_idx_of_voxels[0]; + float cur_grad = 1 / fmaxf(float(total_pts), 1.0); + for (int k = 1; k <= total_pts; k++) { + atomicAdd(grad_in + pts_idx_of_voxels[k] * channels + channel_idx, + grad_out[0] * cur_grad); + } +} + +void roiaware_pool3d_backward_launcher(int boxes_num, int out_x, int out_y, + int out_z, int channels, + int max_pts_each_voxel, + const int *pts_idx_of_voxels, + const int *argmax, const float *grad_out, + float *grad_in, int pool_method) { + // params pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel) + // params argmax: (N, out_x, out_y, out_z, C) + // params grad_out: (N, out_x, out_y, out_z, C) + // params grad_in: (npoints, C), return value + // params pool_method: 0: max_pool, 1: avg_pool + + dim3 blocks(DIVUP(out_x * out_y * out_z, THREADS_PER_BLOCK), channels, + boxes_num); + dim3 threads(THREADS_PER_BLOCK); + if (pool_method == 0) { + roiaware_maxpool3d_backward<<>>( + boxes_num, channels, out_x, out_y, out_z, argmax, grad_out, grad_in); + } else if (pool_method == 1) { + roiaware_avgpool3d_backward<<>>( + boxes_num, channels, out_x, out_y, out_z, max_pts_each_voxel, + pts_idx_of_voxels, grad_out, grad_in); + } +} diff --git a/mmcv/ops/voxelize.py b/mmcv/ops/voxelize.py new file mode 100644 index 0000000..d6fc855 --- /dev/null +++ b/mmcv/ops/voxelize.py @@ -0,0 +1,145 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from torch import nn +from torch.autograd import Function +from torch.nn.modules.utils import _pair + +from ..utils import ext_loader + +ext_module = ext_loader.load_ext( + '_ext', ['dynamic_voxelize_forward', 'hard_voxelize_forward']) + + +class _Voxelization(Function): + + @staticmethod + def forward(ctx, + points, + voxel_size, + coors_range, + max_points=35, + max_voxels=20000): + """Convert kitti points(N, >=3) to voxels. + + Args: + points (torch.Tensor): [N, ndim]. Points[:, :3] contain xyz points + and points[:, 3:] contain other information like reflectivity. + voxel_size (tuple or float): The size of voxel with the shape of + [3]. + coors_range (tuple or float): The coordinate range of voxel with + the shape of [6]. + max_points (int, optional): maximum points contained in a voxel. if + max_points=-1, it means using dynamic_voxelize. Default: 35. + max_voxels (int, optional): maximum voxels this function create. + for second, 20000 is a good choice. Users should shuffle points + before call this function because max_voxels may drop points. + Default: 20000. + + Returns: + voxels_out (torch.Tensor): Output voxels with the shape of [M, + max_points, ndim]. Only contain points and returned when + max_points != -1. + coors_out (torch.Tensor): Output coordinates with the shape of + [M, 3]. + num_points_per_voxel_out (torch.Tensor): Num points per voxel with + the shape of [M]. Only returned when max_points != -1. + """ + if max_points == -1 or max_voxels == -1: + coors = points.new_zeros(size=(points.size(0), 3), dtype=torch.int) + ext_module.dynamic_voxelize_forward( + points, + torch.tensor(voxel_size, dtype=torch.float), + torch.tensor(coors_range, dtype=torch.float), + coors, + NDim=3) + return coors + else: + voxels = points.new_zeros( + size=(max_voxels, max_points, points.size(1))) + coors = points.new_zeros(size=(max_voxels, 3), dtype=torch.int) + num_points_per_voxel = points.new_zeros( + size=(max_voxels, ), dtype=torch.int) + voxel_num = torch.zeros(size=(), dtype=torch.long) + ext_module.hard_voxelize_forward( + points, + torch.tensor(voxel_size, dtype=torch.float), + torch.tensor(coors_range, dtype=torch.float), + voxels, + coors, + num_points_per_voxel, + voxel_num, + max_points=max_points, + max_voxels=max_voxels, + NDim=3) + # select the valid voxels + voxels_out = voxels[:voxel_num] + coors_out = coors[:voxel_num] + num_points_per_voxel_out = num_points_per_voxel[:voxel_num] + return voxels_out, coors_out, num_points_per_voxel_out + + +voxelization = _Voxelization.apply + + +class Voxelization(nn.Module): + """Convert kitti points(N, >=3) to voxels. + + Please refer to `PVCNN `_ for more + details. + + Args: + voxel_size (tuple or float): The size of voxel with the shape of [3]. + point_cloud_range (tuple or float): The coordinate range of voxel with + the shape of [6]. + max_num_points (int): maximum points contained in a voxel. if + max_points=-1, it means using dynamic_voxelize. + max_voxels (int, optional): maximum voxels this function create. + for second, 20000 is a good choice. Users should shuffle points + before call this function because max_voxels may drop points. + Default: 20000. + """ + + def __init__(self, + voxel_size, + point_cloud_range, + max_num_points, + max_voxels=20000): + super().__init__() + + self.voxel_size = voxel_size + self.point_cloud_range = point_cloud_range + self.max_num_points = max_num_points + if isinstance(max_voxels, tuple): + self.max_voxels = max_voxels + else: + self.max_voxels = _pair(max_voxels) + + point_cloud_range = torch.tensor( + point_cloud_range, dtype=torch.float32) + voxel_size = torch.tensor(voxel_size, dtype=torch.float32) + grid_size = (point_cloud_range[3:] - + point_cloud_range[:3]) / voxel_size + grid_size = torch.round(grid_size).long() + input_feat_shape = grid_size[:2] + self.grid_size = grid_size + # the origin shape is as [x-len, y-len, z-len] + # [w, h, d] -> [d, h, w] + self.pcd_shape = [*input_feat_shape, 1][::-1] + + def forward(self, input): + if self.training: + max_voxels = self.max_voxels[0] + else: + max_voxels = self.max_voxels[1] + + return voxelization(input, self.voxel_size, self.point_cloud_range, + self.max_num_points, max_voxels) + + def __repr__(self): + s = self.__class__.__name__ + '(' + s += 'voxel_size=' + str(self.voxel_size) + s += ', point_cloud_range=' + str(self.point_cloud_range) + s += ', max_num_points=' + str(self.max_num_points) + s += ', max_voxels=' + str(self.max_voxels) + s += ')' + return s diff --git a/mmcv/optims/__init__.py b/mmcv/optims/__init__.py new file mode 100644 index 0000000..55f2449 --- /dev/null +++ b/mmcv/optims/__init__.py @@ -0,0 +1 @@ +from .optimizer import build_optimizer, OPTIMIZERS \ No newline at end of file diff --git a/mmcv/optims/adamw.py b/mmcv/optims/adamw.py new file mode 100644 index 0000000..c890aea --- /dev/null +++ b/mmcv/optims/adamw.py @@ -0,0 +1,131 @@ +try: + from torch.optim import _functional as F +except: + print('WARNING!!!, I recommend using torch>=1.8') + +import torch +from torch.optim.optimizer import Optimizer +from mmcv.runner.optimizer.builder import OPTIMIZERS + +@OPTIMIZERS.register_module() +class AdamW2(Optimizer): + r"""Implements AdamW algorithm. Solve the bug of torch 1.8 + + The original Adam algorithm was proposed in `Adam: A Method for Stochastic Optimization`_. + The AdamW variant was proposed in `Decoupled Weight Decay Regularization`_. + + Args: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay coefficient (default: 1e-2) + amsgrad (boolean, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + (default: False) + + .. _Adam\: A Method for Stochastic Optimization: + https://arxiv.org/abs/1412.6980 + .. _Decoupled Weight Decay Regularization: + https://arxiv.org/abs/1711.05101 + .. _On the Convergence of Adam and Beyond: + https://openreview.net/forum?id=ryQu7f-RZ + """ + + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, + weight_decay=1e-2, amsgrad=False): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) + if not 0.0 <= weight_decay: + raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) + defaults = dict(lr=lr, betas=betas, eps=eps, + weight_decay=weight_decay, amsgrad=amsgrad) + super(AdamW2, self).__init__(params, defaults) + + def __setstate__(self, state): + super(AdamW2, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('amsgrad', False) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + + Args: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + params_with_grad = [] + grads = [] + exp_avgs = [] + exp_avg_sqs = [] + state_sums = [] + max_exp_avg_sqs = [] + state_steps = [] + amsgrad = group['amsgrad'] + + # put this line here for solving bug + beta1, beta2 = group['betas'] + + for p in group['params']: + if p.grad is None: + continue + params_with_grad.append(p) + if p.grad.is_sparse: + raise RuntimeError('AdamW does not support sparse gradients') + grads.append(p.grad) + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format) + if amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format) + + exp_avgs.append(state['exp_avg']) + exp_avg_sqs.append(state['exp_avg_sq']) + + if amsgrad: + max_exp_avg_sqs.append(state['max_exp_avg_sq']) + + + # update the steps for each param group update + state['step'] += 1 + # record the step after step update + state_steps.append(state['step']) + + F.adamw(params_with_grad, + grads, + exp_avgs, + exp_avg_sqs, + max_exp_avg_sqs, + state_steps, + amsgrad, + beta1, + beta2, + group['lr'], + group['weight_decay'], + group['eps']) + + return loss \ No newline at end of file diff --git a/mmcv/optims/optimizer.py b/mmcv/optims/optimizer.py new file mode 100644 index 0000000..10d3772 --- /dev/null +++ b/mmcv/optims/optimizer.py @@ -0,0 +1,268 @@ +import torch +from torch.nn import GroupNorm, LayerNorm +from mmcv.utils import build_from_cfg, is_list_of, Registry +from torch.nn.modules.instancenorm import _InstanceNorm +from torch.nn.modules.batchnorm import _BatchNorm +from mmcv.utils.ext_loader import check_ops_exist +from mmcv.utils import Registry +import inspect +import copy + +OPTIMIZERS = Registry('optimizer') + +def build_optimizer(model, cfg): + optimizer_cfg = copy.deepcopy(cfg) + paramwise_cfg = optimizer_cfg.pop('paramwise_cfg', None) + return DefaultOptimizerConstructor(optimizer_cfg, paramwise_cfg)(model) + +def register_torch_optimizers(): + torch_optimizers = [] + for module_name in dir(torch.optim): + if module_name.startswith('__'): + continue + _optim = getattr(torch.optim, module_name) + if inspect.isclass(_optim) and issubclass(_optim, + torch.optim.Optimizer): + OPTIMIZERS.register_module()(_optim) + torch_optimizers.append(module_name) + return torch_optimizers + +TORCH_OPTIMIZERS = register_torch_optimizers() + +class DefaultOptimizerConstructor: + """Default constructor for optimizers. + + By default each parameter share the same optimizer settings, and we + provide an argument ``paramwise_cfg`` to specify parameter-wise settings. + It is a dict and may contain the following fields: + + - ``custom_keys`` (dict): Specified parameters-wise settings by keys. If + one of the keys in ``custom_keys`` is a substring of the name of one + parameter, then the setting of the parameter will be specified by + ``custom_keys[key]`` and other setting like ``bias_lr_mult`` etc. will + be ignored. It should be noted that the aforementioned ``key`` is the + longest key that is a substring of the name of the parameter. If there + are multiple matched keys with the same length, then the key with lower + alphabet order will be chosen. + ``custom_keys[key]`` should be a dict and may contain fields ``lr_mult`` + and ``decay_mult``. See Example 2 below. + - ``bias_lr_mult`` (float): It will be multiplied to the learning + rate for all bias parameters (except for those in normalization + layers and offset layers of DCN). + - ``bias_decay_mult`` (float): It will be multiplied to the weight + decay for all bias parameters (except for those in + normalization layers, depthwise conv layers, offset layers of DCN). + - ``norm_decay_mult`` (float): It will be multiplied to the weight + decay for all weight and bias parameters of normalization + layers. + - ``dwconv_decay_mult`` (float): It will be multiplied to the weight + decay for all weight and bias parameters of depthwise conv + layers. + - ``dcn_offset_lr_mult`` (float): It will be multiplied to the learning + rate for parameters of offset layer in the deformable convs + of a model. + - ``bypass_duplicate`` (bool): If true, the duplicate parameters + would not be added into optimizer. Default: False. + + Note: + 1. If the option ``dcn_offset_lr_mult`` is used, the constructor will + override the effect of ``bias_lr_mult`` in the bias of offset + layer. So be careful when using both ``bias_lr_mult`` and + ``dcn_offset_lr_mult``. If you wish to apply both of them to the + offset layer in deformable convs, set ``dcn_offset_lr_mult`` + to the original ``dcn_offset_lr_mult`` * ``bias_lr_mult``. + 2. If the option ``dcn_offset_lr_mult`` is used, the constructor will + apply it to all the DCN layers in the model. So be careful when + the model contains multiple DCN layers in places other than + backbone. + + Args: + model (:obj:`nn.Module`): The model with parameters to be optimized. + optimizer_cfg (dict): The config dict of the optimizer. + Positional fields are + + - `type`: class name of the optimizer. + + Optional fields are + + - any arguments of the corresponding optimizer type, e.g., + lr, weight_decay, momentum, etc. + paramwise_cfg (dict, optional): Parameter-wise options. + + Example 1: + >>> model = torch.nn.modules.Conv1d(1, 1, 1) + >>> optimizer_cfg = dict(type='SGD', lr=0.01, momentum=0.9, + >>> weight_decay=0.0001) + >>> paramwise_cfg = dict(norm_decay_mult=0.) + >>> optim_builder = DefaultOptimizerConstructor( + >>> optimizer_cfg, paramwise_cfg) + >>> optimizer = optim_builder(model) + + Example 2: + >>> # assume model have attribute model.backbone and model.cls_head + >>> optimizer_cfg = dict(type='SGD', lr=0.01, weight_decay=0.95) + >>> paramwise_cfg = dict(custom_keys={ + '.backbone': dict(lr_mult=0.1, decay_mult=0.9)}) + >>> optim_builder = DefaultOptimizerConstructor( + >>> optimizer_cfg, paramwise_cfg) + >>> optimizer = optim_builder(model) + >>> # Then the `lr` and `weight_decay` for model.backbone is + >>> # (0.01 * 0.1, 0.95 * 0.9). `lr` and `weight_decay` for + >>> # model.cls_head is (0.01, 0.95). + """ + + def __init__(self, optimizer_cfg, paramwise_cfg=None): + if not isinstance(optimizer_cfg, dict): + raise TypeError('optimizer_cfg should be a dict', + f'but got {type(optimizer_cfg)}') + self.optimizer_cfg = optimizer_cfg + self.paramwise_cfg = {} if paramwise_cfg is None else paramwise_cfg + self.base_lr = optimizer_cfg.get('lr', None) + self.base_wd = optimizer_cfg.get('weight_decay', None) + self._validate_cfg() + + def _validate_cfg(self): + if not isinstance(self.paramwise_cfg, dict): + raise TypeError('paramwise_cfg should be None or a dict, ' + f'but got {type(self.paramwise_cfg)}') + + if 'custom_keys' in self.paramwise_cfg: + if not isinstance(self.paramwise_cfg['custom_keys'], dict): + raise TypeError( + 'If specified, custom_keys must be a dict, ' + f'but got {type(self.paramwise_cfg["custom_keys"])}') + if self.base_wd is None: + for key in self.paramwise_cfg['custom_keys']: + if 'decay_mult' in self.paramwise_cfg['custom_keys'][key]: + raise ValueError('base_wd should not be None') + + # get base lr and weight decay + # weight_decay must be explicitly specified if mult is specified + if ('bias_decay_mult' in self.paramwise_cfg + or 'norm_decay_mult' in self.paramwise_cfg + or 'dwconv_decay_mult' in self.paramwise_cfg): + if self.base_wd is None: + raise ValueError('base_wd should not be None') + + def _is_in(self, param_group, param_group_list): + assert is_list_of(param_group_list, dict) + param = set(param_group['params']) + param_set = set() + for group in param_group_list: + param_set.update(set(group['params'])) + + return not param.isdisjoint(param_set) + + def add_params(self, params, module, prefix='', is_dcn_module=None): + """Add all parameters of module to the params list. + + The parameters of the given module will be added to the list of param + groups, with specific rules defined by paramwise_cfg. + + Args: + params (list[dict]): A list of param groups, it will be modified + in place. + module (nn.Module): The module to be added. + prefix (str): The prefix of the module + is_dcn_module (int|float|None): If the current module is a + submodule of DCN, `is_dcn_module` will be passed to + control conv_offset layer's learning rate. Defaults to None. + """ + # get param-wise options + custom_keys = self.paramwise_cfg.get('custom_keys', {}) + # first sort with alphabet order and then sort with reversed len of str + sorted_keys = sorted(sorted(custom_keys.keys()), key=len, reverse=True) + + bias_lr_mult = self.paramwise_cfg.get('bias_lr_mult', 1.) + bias_decay_mult = self.paramwise_cfg.get('bias_decay_mult', 1.) + norm_decay_mult = self.paramwise_cfg.get('norm_decay_mult', 1.) + dwconv_decay_mult = self.paramwise_cfg.get('dwconv_decay_mult', 1.) + bypass_duplicate = self.paramwise_cfg.get('bypass_duplicate', False) + dcn_offset_lr_mult = self.paramwise_cfg.get('dcn_offset_lr_mult', 1.) + + # special rules for norm layers and depth-wise conv layers + is_norm = isinstance(module, + (_BatchNorm, _InstanceNorm, GroupNorm, LayerNorm)) + is_dwconv = ( + isinstance(module, torch.nn.Conv2d) + and module.in_channels == module.groups) + + for name, param in module.named_parameters(recurse=False): + param_group = {'params': [param]} + if not param.requires_grad: + params.append(param_group) + continue + if bypass_duplicate and self._is_in(param_group, params): + warnings.warn(f'{prefix} is duplicate. It is skipped since ' + f'bypass_duplicate={bypass_duplicate}') + continue + # if the parameter match one of the custom keys, ignore other rules + is_custom = False + for key in sorted_keys: + if key in f'{prefix}.{name}': + is_custom = True + lr_mult = custom_keys[key].get('lr_mult', 1.) + param_group['lr'] = self.base_lr * lr_mult + if self.base_wd is not None: + decay_mult = custom_keys[key].get('decay_mult', 1.) + param_group['weight_decay'] = self.base_wd * decay_mult + break + + if not is_custom: + # bias_lr_mult affects all bias parameters + # except for norm.bias dcn.conv_offset.bias + if name == 'bias' and not (is_norm or is_dcn_module): + param_group['lr'] = self.base_lr * bias_lr_mult + + if (prefix.find('conv_offset') != -1 and is_dcn_module + and isinstance(module, torch.nn.Conv2d)): + # deal with both dcn_offset's bias & weight + param_group['lr'] = self.base_lr * dcn_offset_lr_mult + + # apply weight decay policies + if self.base_wd is not None: + # norm decay + if is_norm: + param_group[ + 'weight_decay'] = self.base_wd * norm_decay_mult + # depth-wise conv + elif is_dwconv: + param_group[ + 'weight_decay'] = self.base_wd * dwconv_decay_mult + # bias lr and decay + elif name == 'bias' and not is_dcn_module: + # TODO: current bias_decay_mult will have affect on DCN + param_group[ + 'weight_decay'] = self.base_wd * bias_decay_mult + params.append(param_group) + + if check_ops_exist(): + from mmcv.ops import DeformConv2d, ModulatedDeformConv2d + is_dcn_module = isinstance(module, + (DeformConv2d, ModulatedDeformConv2d)) + else: + is_dcn_module = False + for child_name, child_mod in module.named_children(): + child_prefix = f'{prefix}.{child_name}' if prefix else child_name + self.add_params( + params, + child_mod, + prefix=child_prefix, + is_dcn_module=is_dcn_module) + + def __call__(self, model): + if hasattr(model, 'module'): + model = model.module + + optimizer_cfg = self.optimizer_cfg.copy() + # if no paramwise option is specified, just use the global setting + if not self.paramwise_cfg: + optimizer_cfg['params'] = model.parameters() + return build_from_cfg(optimizer_cfg, OPTIMIZERS) + + # set param-wise lr and weight decay recursively + params = [] + self.add_params(params, model) + optimizer_cfg['params'] = params + + return build_from_cfg(optimizer_cfg, OPTIMIZERS) \ No newline at end of file diff --git a/mmcv/parallel/__init__.py b/mmcv/parallel/__init__.py new file mode 100644 index 0000000..0be77ee --- /dev/null +++ b/mmcv/parallel/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .collate import collate +from .data_container import DataContainer +from .utils import is_module_wrapper \ No newline at end of file diff --git a/mmcv/parallel/collate.py b/mmcv/parallel/collate.py new file mode 100644 index 0000000..d291203 --- /dev/null +++ b/mmcv/parallel/collate.py @@ -0,0 +1,95 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from collections.abc import Mapping, Sequence + +import torch +import torch.nn.functional as F +from torch.utils.data.dataloader import default_collate + +from .data_container import DataContainer + +def collate(batch, samples_per_gpu=1): + batch = collate_dc(batch, samples_per_gpu) + data_dict = {} + for key, value in batch.items(): + if isinstance(value, DataContainer): + data_dict[key] = value.data[0] + elif isinstance(value[0], DataContainer): + data_dict[key] = value[0].data + else: + data_dict[key] = value + return data_dict + +def collate_dc(batch, samples_per_gpu=1): + """Puts each data field into a tensor/DataContainer with outer dimension + batch size. + + Extend default_collate to add support for + :type:`~mmcv.parallel.DataContainer`. There are 3 cases. + + 1. cpu_only = True, e.g., meta data + 2. cpu_only = False, stack = True, e.g., images tensors + 3. cpu_only = False, stack = False, e.g., gt bboxes + """ + + if not isinstance(batch, Sequence): + raise TypeError(f'{batch.dtype} is not supported.') + + if isinstance(batch[0], DataContainer): + stacked = [] + if batch[0].cpu_only: + for i in range(0, len(batch), samples_per_gpu): + stacked.append( + [sample.data for sample in batch[i:i + samples_per_gpu]]) + return DataContainer( + stacked, batch[0].stack, batch[0].padding_value, cpu_only=True) + elif batch[0].stack: + for i in range(0, len(batch), samples_per_gpu): + assert isinstance(batch[i].data, torch.Tensor) + + if batch[i].pad_dims is not None: + ndim = batch[i].dim() + assert ndim > batch[i].pad_dims + max_shape = [0 for _ in range(batch[i].pad_dims)] + for dim in range(1, batch[i].pad_dims + 1): + max_shape[dim - 1] = batch[i].size(-dim) + for sample in batch[i:i + samples_per_gpu]: + for dim in range(0, ndim - batch[i].pad_dims): + assert batch[i].size(dim) == sample.size(dim) + for dim in range(1, batch[i].pad_dims + 1): + max_shape[dim - 1] = max(max_shape[dim - 1], + sample.size(-dim)) + padded_samples = [] + for sample in batch[i:i + samples_per_gpu]: + pad = [0 for _ in range(batch[i].pad_dims * 2)] + for dim in range(1, batch[i].pad_dims + 1): + pad[2 * dim - + 1] = max_shape[dim - 1] - sample.size(-dim) + padded_samples.append( + F.pad( + sample.data, pad, value=sample.padding_value)) + stacked.append(default_collate(padded_samples)) + elif batch[i].pad_dims is None: + stacked.append( + default_collate([ + sample.data + for sample in batch[i:i + samples_per_gpu] + ])) + else: + raise ValueError( + 'pad_dims should be either None or integers (1-3)') + + else: + for i in range(0, len(batch), samples_per_gpu): + stacked.append( + [sample.data for sample in batch[i:i + samples_per_gpu]]) + return DataContainer(stacked, batch[0].stack, batch[0].padding_value) + elif isinstance(batch[0], Sequence): + transposed = zip(*batch) + return [collate_dc(samples, samples_per_gpu) for samples in transposed] + elif isinstance(batch[0], Mapping): + return { + key: collate_dc([d[key] for d in batch], samples_per_gpu) + for key in batch[0] + } + else: + return default_collate(batch) diff --git a/mmcv/parallel/data_container.py b/mmcv/parallel/data_container.py new file mode 100644 index 0000000..17ba05b --- /dev/null +++ b/mmcv/parallel/data_container.py @@ -0,0 +1,88 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import functools +import torch + + +def assert_tensor_type(func): + + @functools.wraps(func) + def wrapper(*args, **kwargs): + if not isinstance(args[0].data, torch.Tensor): + raise AttributeError( + f'{args[0].__class__.__name__} has no attribute ' + f'{func.__name__} for type {args[0].datatype}') + return func(*args, **kwargs) + + return wrapper + + +class DataContainer: + """A container for any type of objects. + + Typically tensors will be stacked in the collate function and sliced along + some dimension in the scatter function. This behavior has some limitations. + 1. All tensors have to be the same size. + 2. Types are limited (numpy array or Tensor). + + We design `DataContainer` and `MMDataParallel` to overcome these + limitations. The behavior can be either of the following. + + - copy to GPU, pad all tensors to the same size and stack them + - copy to GPU without stacking + - leave the objects as is and pass it to the model + - pad_dims specifies the number of last few dimensions to do padding + """ + + def __init__(self, + data, + stack=False, + padding_value=0, + cpu_only=False, + pad_dims=2): + self._data = data + self._cpu_only = cpu_only + self._stack = stack + self._padding_value = padding_value + assert pad_dims in [None, 1, 2, 3] + self._pad_dims = pad_dims + + def __repr__(self): + return f'{self.__class__.__name__}({repr(self.data)})' + + def __len__(self): + return len(self._data) + + @property + def data(self): + return self._data + + @property + def datatype(self): + if isinstance(self.data, torch.Tensor): + return self.data.type() + else: + return type(self.data) + + @property + def cpu_only(self): + return self._cpu_only + + @property + def stack(self): + return self._stack + + @property + def padding_value(self): + return self._padding_value + + @property + def pad_dims(self): + return self._pad_dims + + @assert_tensor_type + def size(self, *args, **kwargs): + return self.data.size(*args, **kwargs) + + @assert_tensor_type + def dim(self): + return self.data.dim() diff --git a/mmcv/parallel/registry.py b/mmcv/parallel/registry.py new file mode 100644 index 0000000..144f9fb --- /dev/null +++ b/mmcv/parallel/registry.py @@ -0,0 +1,8 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from torch.nn.parallel import DataParallel, DistributedDataParallel + +from mmcv.utils import Registry + +MODULE_WRAPPERS = Registry('module wrapper') +MODULE_WRAPPERS.register_module(module=DataParallel) +MODULE_WRAPPERS.register_module(module=DistributedDataParallel) diff --git a/mmcv/parallel/utils.py b/mmcv/parallel/utils.py new file mode 100644 index 0000000..0f5712c --- /dev/null +++ b/mmcv/parallel/utils.py @@ -0,0 +1,20 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .registry import MODULE_WRAPPERS + + +def is_module_wrapper(module): + """Check if a module is a module wrapper. + + The following 3 modules in MMCV (and their subclasses) are regarded as + module wrappers: DataParallel, DistributedDataParallel, + MMDistributedDataParallel (the deprecated version). You may add you own + module wrapper by registering it to mmcv.parallel.MODULE_WRAPPERS. + + Args: + module (nn.Module): The module to be checked. + + Returns: + bool: True if the input module is a module wrapper. + """ + module_wrappers = tuple(MODULE_WRAPPERS.module_dict.values()) + return isinstance(module, module_wrappers) diff --git a/mmcv/runner/__init__.py b/mmcv/runner/__init__.py new file mode 100644 index 0000000..2705045 --- /dev/null +++ b/mmcv/runner/__init__.py @@ -0,0 +1,3 @@ +from .hooks import DistEvalHook, EvalHook, OptimizerHook, HOOKS, DistSamplerSeedHook, Fp16OptimizerHook +from .epoch_based_runner import EpochBasedRunner +from .builder import build_runner \ No newline at end of file diff --git a/mmcv/runner/base_runner.py b/mmcv/runner/base_runner.py new file mode 100644 index 0000000..5d39f23 --- /dev/null +++ b/mmcv/runner/base_runner.py @@ -0,0 +1,532 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import logging +import os.path as osp +import warnings +from abc import ABCMeta, abstractmethod + +import torch +from torch.optim import Optimizer + +from .hooks import HOOKS, Hook +from ..parallel import is_module_wrapper +from ..utils import load_checkpoint, LogBuffer, is_str, mkdir_or_exist, build_from_cfg, \ + Config, get_dist_info, get_time_str, Priority, get_priority + + +class BaseRunner(metaclass=ABCMeta): + """The base class of Runner, a training helper for PyTorch. + + All subclasses should implement the following APIs: + + - ``run()`` + - ``train()`` + - ``val()`` + - ``save_checkpoint()`` + + Args: + model (:obj:`torch.nn.Module`): The model to be run. + batch_processor (callable): A callable method that process a data + batch. The interface of this method should be + `batch_processor(model, data, train_mode) -> dict` + optimizer (dict or :obj:`torch.optim.Optimizer`): It can be either an + optimizer (in most cases) or a dict of optimizers (in models that + requires more than one optimizer, e.g., GAN). + work_dir (str, optional): The working directory to save checkpoints + and logs. Defaults to None. + logger (:obj:`logging.Logger`): Logger used during training. + Defaults to None. (The default value is just for backward + compatibility) + meta (dict | None): A dict records some import information such as + environment info and seed, which will be logged in logger hook. + Defaults to None. + max_epochs (int, optional): Total training epochs. + max_iters (int, optional): Total training iterations. + """ + + def __init__(self, + model, + batch_processor=None, + optimizer=None, + work_dir=None, + logger=None, + meta=None, + max_iters=None, + max_epochs=None): + if batch_processor is not None: + if not callable(batch_processor): + raise TypeError('batch_processor must be callable, ' + f'but got {type(batch_processor)}') + warnings.warn('batch_processor is deprecated, please implement ' + 'train_step() and val_step() in the model instead.') + # raise an error is `batch_processor` is not None and + # `model.train_step()` exists. + if is_module_wrapper(model): + _model = model.module + else: + _model = model + + # check the type of `optimizer` + if isinstance(optimizer, dict): + for name, optim in optimizer.items(): + if not isinstance(optim, Optimizer): + raise TypeError( + f'optimizer must be a dict of torch.optim.Optimizers, ' + f'but optimizer["{name}"] is a {type(optim)}') + elif not isinstance(optimizer, Optimizer) and optimizer is not None: + raise TypeError( + f'optimizer must be a torch.optim.Optimizer object ' + f'or dict or None, but got {type(optimizer)}') + + # check the type of `logger` + if not isinstance(logger, logging.Logger): + raise TypeError(f'logger must be a logging.Logger object, ' + f'but got {type(logger)}') + + # check the type of `meta` + if meta is not None and not isinstance(meta, dict): + raise TypeError( + f'meta must be a dict or None, but got {type(meta)}') + + self.model = model + self.batch_processor = batch_processor + self.optimizer = optimizer + self.logger = logger + self.meta = meta + # create work_dir + if is_str(work_dir): + self.work_dir = osp.abspath(work_dir) + mkdir_or_exist(self.work_dir) + elif work_dir is None: + self.work_dir = None + else: + raise TypeError('"work_dir" must be a str or None') + + # get model name from the model class + if hasattr(self.model, 'module'): + self._model_name = self.model.module.__class__.__name__ + else: + self._model_name = self.model.__class__.__name__ + + self._rank, self._world_size = get_dist_info() + self.timestamp = get_time_str() + self.mode = None + self._hooks = [] + self._epoch = 0 + self._iter = 0 + self._inner_iter = 0 + + if max_epochs is not None and max_iters is not None: + raise ValueError( + 'Only one of `max_epochs` or `max_iters` can be set.') + + self._max_epochs = max_epochs + self._max_iters = max_iters + # TODO: Redesign LogBuffer, it is not flexible and elegant enough + self.log_buffer = LogBuffer() + + @property + def model_name(self): + """str: Name of the model, usually the module class name.""" + return self._model_name + + @property + def rank(self): + """int: Rank of current process. (distributed training)""" + return self._rank + + @property + def world_size(self): + """int: Number of processes participating in the job. + (distributed training)""" + return self._world_size + + @property + def hooks(self): + """list[:obj:`Hook`]: A list of registered hooks.""" + return self._hooks + + @property + def epoch(self): + """int: Current epoch.""" + return self._epoch + + @property + def iter(self): + """int: Current iteration.""" + return self._iter + + @property + def inner_iter(self): + """int: Iteration in an epoch.""" + return self._inner_iter + + @property + def max_epochs(self): + """int: Maximum training epochs.""" + return self._max_epochs + + @property + def max_iters(self): + """int: Maximum training iterations.""" + return self._max_iters + + @abstractmethod + def train(self): + pass + + @abstractmethod + def val(self): + pass + + @abstractmethod + def run(self, data_loaders, workflow, **kwargs): + pass + + @abstractmethod + def save_checkpoint(self, + out_dir, + filename_tmpl, + save_optimizer=True, + meta=None, + create_symlink=True): + pass + + def current_lr(self): + """Get current learning rates. + + Returns: + list[float] | dict[str, list[float]]: Current learning rates of all + param groups. If the runner has a dict of optimizers, this + method will return a dict. + """ + if isinstance(self.optimizer, torch.optim.Optimizer): + lr = [group['lr'] for group in self.optimizer.param_groups] + elif isinstance(self.optimizer, dict): + lr = dict() + for name, optim in self.optimizer.items(): + lr[name] = [group['lr'] for group in optim.param_groups] + else: + raise RuntimeError( + 'lr is not applicable because optimizer does not exist.') + return lr + + def current_momentum(self): + """Get current momentums. + + Returns: + list[float] | dict[str, list[float]]: Current momentums of all + param groups. If the runner has a dict of optimizers, this + method will return a dict. + """ + + def _get_momentum(optimizer): + momentums = [] + for group in optimizer.param_groups: + if 'momentum' in group.keys(): + momentums.append(group['momentum']) + elif 'betas' in group.keys(): + momentums.append(group['betas'][0]) + else: + momentums.append(0) + return momentums + + if self.optimizer is None: + raise RuntimeError( + 'momentum is not applicable because optimizer does not exist.') + elif isinstance(self.optimizer, torch.optim.Optimizer): + momentums = _get_momentum(self.optimizer) + elif isinstance(self.optimizer, dict): + momentums = dict() + for name, optim in self.optimizer.items(): + momentums[name] = _get_momentum(optim) + return momentums + + def register_hook(self, hook, priority='NORMAL'): + """Register a hook into the hook list. + + The hook will be inserted into a priority queue, with the specified + priority (See :class:`Priority` for details of priorities). + For hooks with the same priority, they will be triggered in the same + order as they are registered. + + Args: + hook (:obj:`Hook`): The hook to be registered. + priority (int or str or :obj:`Priority`): Hook priority. + Lower value means higher priority. + """ + assert isinstance(hook, Hook) + if hasattr(hook, 'priority'): + raise ValueError('"priority" is a reserved attribute for hooks') + priority = get_priority(priority) + hook.priority = priority + # insert the hook to a sorted list + inserted = False + for i in range(len(self._hooks) - 1, -1, -1): + if priority >= self._hooks[i].priority: + self._hooks.insert(i + 1, hook) + inserted = True + break + if not inserted: + self._hooks.insert(0, hook) + + def register_hook_from_cfg(self, hook_cfg): + """Register a hook from its cfg. + + Args: + hook_cfg (dict): Hook config. It should have at least keys 'type' + and 'priority' indicating its type and priority. + + Notes: + The specific hook class to register should not use 'type' and + 'priority' arguments during initialization. + """ + hook_cfg = hook_cfg.copy() + priority = hook_cfg.pop('priority', 'NORMAL') + hook = build_from_cfg(hook_cfg, HOOKS) + self.register_hook(hook, priority=priority) + + def call_hook(self, fn_name): + """Call all hooks. + + Args: + fn_name (str): The function name in each hook to be called, such as + "before_train_epoch". + """ + for hook in self._hooks: + getattr(hook, fn_name)(self) + + def get_hook_info(self): + # Get hooks info in each stage + stage_hook_map = {stage: [] for stage in Hook.stages} + for hook in self.hooks: + try: + priority = Priority(hook.priority).name + except ValueError: + priority = hook.priority + classname = hook.__class__.__name__ + hook_info = f'({priority:<12}) {classname:<35}' + for trigger_stage in hook.get_triggered_stages(): + stage_hook_map[trigger_stage].append(hook_info) + + stage_hook_infos = [] + for stage in Hook.stages: + hook_infos = stage_hook_map[stage] + if len(hook_infos) > 0: + info = f'{stage}:\n' + info += '\n'.join(hook_infos) + info += '\n -------------------- ' + stage_hook_infos.append(info) + return '\n'.join(stage_hook_infos) + + def load_checkpoint(self, + filename, + map_location='cpu', + strict=False, + revise_keys=[(r'^module.', '')]): + return load_checkpoint( + self.model, + filename, + map_location, + strict, + self.logger, + revise_keys=revise_keys) + + def resume(self, + checkpoint, + resume_optimizer=True, + map_location='default'): + if map_location == 'default': + if torch.cuda.is_available(): + device_id = torch.cuda.current_device() + checkpoint = self.load_checkpoint( + checkpoint, + map_location=lambda storage, loc: storage.cuda(device_id)) + else: + checkpoint = self.load_checkpoint(checkpoint) + else: + checkpoint = self.load_checkpoint( + checkpoint, map_location=map_location) + + self._epoch = checkpoint['meta']['epoch'] + self._iter = checkpoint['meta']['iter'] + if self.meta is None: + self.meta = {} + self.meta.setdefault('hook_msgs', {}) + # load `last_ckpt`, `best_score`, `best_ckpt`, etc. for hook messages + self.meta['hook_msgs'].update(checkpoint['meta'].get('hook_msgs', {})) + + # Re-calculate the number of iterations when resuming + # models with different number of GPUs + if 'config' in checkpoint['meta']: + config = Config.fromstring( + checkpoint['meta']['config'], file_format='.py') + previous_gpu_ids = config.get('gpu_ids', None) + if previous_gpu_ids and len(previous_gpu_ids) > 0 and len( + previous_gpu_ids) != self.world_size: + self._iter = int(self._iter * len(previous_gpu_ids) / + self.world_size) + self.logger.info('the iteration number is changed due to ' + 'change of GPU number') + + # resume meta information meta + self.meta = checkpoint['meta'] + + if 'optimizer' in checkpoint and resume_optimizer: + if isinstance(self.optimizer, Optimizer): + self.optimizer.load_state_dict(checkpoint['optimizer']) + elif isinstance(self.optimizer, dict): + for k in self.optimizer.keys(): + self.optimizer[k].load_state_dict( + checkpoint['optimizer'][k]) + else: + raise TypeError( + 'Optimizer should be dict or torch.optim.Optimizer ' + f'but got {type(self.optimizer)}') + + self.logger.info('resumed epoch %d, iter %d', self.epoch, self.iter) + + def register_lr_hook(self, lr_config): + if lr_config is None: + return + elif isinstance(lr_config, dict): + assert 'policy' in lr_config + policy_type = lr_config.pop('policy') + # If the type of policy is all in lower case, e.g., 'cyclic', + # then its first letter will be capitalized, e.g., to be 'Cyclic'. + # This is for the convenient usage of Lr updater. + # Since this is not applicable for ` + # CosineAnnealingLrUpdater`, + # the string will not be changed if it contains capital letters. + if policy_type == policy_type.lower(): + policy_type = policy_type.title() + hook_type = policy_type + 'LrUpdaterHook' + lr_config['type'] = hook_type + hook = build_from_cfg(lr_config, HOOKS) + else: + hook = lr_config + self.register_hook(hook, priority='VERY_HIGH') + + def register_momentum_hook(self, momentum_config): + if momentum_config is None: + return + if isinstance(momentum_config, dict): + assert 'policy' in momentum_config + policy_type = momentum_config.pop('policy') + # If the type of policy is all in lower case, e.g., 'cyclic', + # then its first letter will be capitalized, e.g., to be 'Cyclic'. + # This is for the convenient usage of momentum updater. + # Since this is not applicable for + # `CosineAnnealingMomentumUpdater`, + # the string will not be changed if it contains capital letters. + if policy_type == policy_type.lower(): + policy_type = policy_type.title() + hook_type = policy_type + 'MomentumUpdaterHook' + momentum_config['type'] = hook_type + hook = build_from_cfg(momentum_config, HOOKS) + else: + hook = momentum_config + self.register_hook(hook, priority='HIGH') + + def register_optimizer_hook(self, optimizer_config): + if optimizer_config is None: + return + if isinstance(optimizer_config, dict): + optimizer_config.setdefault('type', 'OptimizerHook') + hook = build_from_cfg(optimizer_config, HOOKS) + else: + hook = optimizer_config + self.register_hook(hook, priority='ABOVE_NORMAL') + + def register_checkpoint_hook(self, checkpoint_config): + if checkpoint_config is None: + return + if isinstance(checkpoint_config, dict): + checkpoint_config.setdefault('type', 'CheckpointHook') + hook = build_from_cfg(checkpoint_config, HOOKS) + else: + hook = checkpoint_config + self.register_hook(hook, priority='NORMAL') + + def register_logger_hooks(self, log_config): + if log_config is None: + return + log_interval = log_config['interval'] + for info in log_config['hooks']: + logger_hook = build_from_cfg( + info, HOOKS, default_args=dict(interval=log_interval)) + self.register_hook(logger_hook, priority='VERY_LOW') + + def register_timer_hook(self, timer_config): + if timer_config is None: + return + if isinstance(timer_config, dict): + timer_config_ = copy.deepcopy(timer_config) + hook = build_from_cfg(timer_config_, HOOKS) + else: + hook = timer_config + self.register_hook(hook, priority='LOW') + + def register_custom_hooks(self, custom_config): + if custom_config is None: + return + + if not isinstance(custom_config, list): + custom_config = [custom_config] + + for item in custom_config: + if isinstance(item, dict): + self.register_hook_from_cfg(item) + else: + self.register_hook(item, priority='NORMAL') + + def register_profiler_hook(self, profiler_config): + if profiler_config is None: + return + if isinstance(profiler_config, dict): + profiler_config.setdefault('type', 'ProfilerHook') + hook = build_from_cfg(profiler_config, HOOKS) + else: + hook = profiler_config + self.register_hook(hook) + + def register_training_hooks(self, + lr_config, + optimizer_config=None, + checkpoint_config=None, + log_config=None, + momentum_config=None, + timer_config=dict(type='IterTimerHook'), + custom_hooks_config=None): + """Register default and custom hooks for training. + + Default and custom hooks include: + + +----------------------+-------------------------+ + | Hooks | Priority | + +======================+=========================+ + | LrUpdaterHook | VERY_HIGH (10) | + +----------------------+-------------------------+ + | MomentumUpdaterHook | HIGH (30) | + +----------------------+-------------------------+ + | OptimizerStepperHook | ABOVE_NORMAL (40) | + +----------------------+-------------------------+ + | CheckpointSaverHook | NORMAL (50) | + +----------------------+-------------------------+ + | IterTimerHook | LOW (70) | + +----------------------+-------------------------+ + | LoggerHook(s) | VERY_LOW (90) | + +----------------------+-------------------------+ + | CustomHook(s) | defaults to NORMAL (50) | + +----------------------+-------------------------+ + + If custom hooks have same priority with default hooks, custom hooks + will be triggered after default hooks. + """ + self.register_lr_hook(lr_config) + self.register_momentum_hook(momentum_config) + self.register_optimizer_hook(optimizer_config) + self.register_checkpoint_hook(checkpoint_config) + self.register_timer_hook(timer_config) + self.register_logger_hooks(log_config) + self.register_custom_hooks(custom_hooks_config) diff --git a/mmcv/runner/builder.py b/mmcv/runner/builder.py new file mode 100644 index 0000000..6443fe3 --- /dev/null +++ b/mmcv/runner/builder.py @@ -0,0 +1,10 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +from mmcv.utils import Registry + +RUNNERS = Registry('runner') + +def build_runner(cfg, default_args=None): + runner_cfg = copy.deepcopy(cfg) + runner = RUNNERS.build(runner_cfg, default_args=default_args) + return runner \ No newline at end of file diff --git a/mmcv/runner/epoch_based_runner.py b/mmcv/runner/epoch_based_runner.py new file mode 100644 index 0000000..7139e80 --- /dev/null +++ b/mmcv/runner/epoch_based_runner.py @@ -0,0 +1,262 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +import platform +import shutil +import time +import warnings + +import torch + +from .base_runner import BaseRunner +from .builder import RUNNERS +from ..utils import save_checkpoint, is_list_of, symlink, get_host_info + + + + + + +@RUNNERS.register_module() +class EpochBasedRunner(BaseRunner): + """Epoch-based Runner. + + This runner train models epoch by epoch. + """ + + def run_iter(self, data_batch, train_mode, **kwargs): + if self.batch_processor is not None: + outputs = self.batch_processor( + self.model, data_batch, train_mode=train_mode, **kwargs) + elif train_mode: + outputs = self.model(data_batch, return_loss=train_mode, **kwargs) + else: + outputs = self.model.val_step(data_batch, self.optimizer, **kwargs) + if not isinstance(outputs, dict): + raise TypeError('"batch_processor()" or "model.train_step()"' + 'and "model.val_step()" must return a dict') + if 'log_vars' in outputs: + self.log_buffer.update(outputs['log_vars'], outputs['num_samples']) + self.outputs = outputs + + def train(self, data_loader, **kwargs): + self.model.train() + self.mode = 'train' + self.data_loader = data_loader + self._max_iters = self._max_epochs * len(self.data_loader) + self.call_hook('before_train_epoch') + time.sleep(2) # Prevent possible deadlock during epoch transition + for i, data_batch in enumerate(self.data_loader): + self._inner_iter = i + self.call_hook('before_train_iter') + self.run_iter(data_batch, train_mode=True, **kwargs) + self.call_hook('after_train_iter') + self._iter += 1 + + self.call_hook('after_train_epoch') + self._epoch += 1 + + @torch.no_grad() + def val(self, data_loader, **kwargs): + self.model.eval() + self.mode = 'val' + self.data_loader = data_loader + self.call_hook('before_val_epoch') + time.sleep(2) # Prevent possible deadlock during epoch transition + for i, data_batch in enumerate(self.data_loader): + self._inner_iter = i + self.call_hook('before_val_iter') + self.run_iter(data_batch, train_mode=False) + self.call_hook('after_val_iter') + + + self.call_hook('after_val_epoch') + + def run(self, data_loaders, workflow, max_epochs=None, **kwargs): + """Start running. + + Args: + data_loaders (list[:obj:`DataLoader`]): Dataloaders for training + and validation. + workflow (list[tuple]): A list of (phase, epochs) to specify the + running order and epochs. E.g, [('train', 2), ('val', 1)] means + running 2 epochs for training and 1 epoch for validation, + iteratively. + """ + assert isinstance(data_loaders, list) + assert is_list_of(workflow, tuple) + assert len(data_loaders) == len(workflow) + if max_epochs is not None: + warnings.warn( + 'setting max_epochs in run is deprecated, ' + 'please set max_epochs in runner_config', DeprecationWarning) + self._max_epochs = max_epochs + + assert self._max_epochs is not None, ( + 'max_epochs must be specified during instantiation') + + for i, flow in enumerate(workflow): + mode, epochs = flow + if mode == 'train': + self._max_iters = self._max_epochs * len(data_loaders[i]) + break + + work_dir = self.work_dir if self.work_dir is not None else 'NONE' + self.logger.info('Start running, host: %s, work_dir: %s', + get_host_info(), work_dir) + self.logger.info('Hooks will be executed in the following order:\n%s', + self.get_hook_info()) + self.logger.info('workflow: %s, max: %d epochs', workflow, + self._max_epochs) + self.call_hook('before_run') + + while self.epoch < self._max_epochs: + for i, flow in enumerate(workflow): + mode, epochs = flow + if isinstance(mode, str): # self.train() + if not hasattr(self, mode): + raise ValueError( + f'runner has no method named "{mode}" to run an ' + 'epoch') + epoch_runner = getattr(self, mode) + else: + raise TypeError( + 'mode in workflow must be a str, but got {}'.format( + type(mode))) + + for _ in range(epochs): + if mode == 'train' and self.epoch >= self._max_epochs: + break + epoch_runner(data_loaders[i], **kwargs) + + time.sleep(1) # wait for some hooks like loggers to finish + self.call_hook('after_run') + + def save_checkpoint(self, + out_dir, + filename_tmpl='epoch_{}.pth', + save_optimizer=True, + meta=None, + create_symlink=True): + """Save the checkpoint. + + Args: + out_dir (str): The directory that checkpoints are saved. + filename_tmpl (str, optional): The checkpoint filename template, + which contains a placeholder for the epoch number. + Defaults to 'epoch_{}.pth'. + save_optimizer (bool, optional): Whether to save the optimizer to + the checkpoint. Defaults to True. + meta (dict, optional): The meta information to be saved in the + checkpoint. Defaults to None. + create_symlink (bool, optional): Whether to create a symlink + "latest.pth" to point to the latest checkpoint. + Defaults to True. + """ + if meta is None: + meta = {} + elif not isinstance(meta, dict): + raise TypeError( + f'meta should be a dict or None, but got {type(meta)}') + if self.meta is not None: + meta.update(self.meta) + # Note: meta.update(self.meta) should be done before + # meta.update(epoch=self.epoch + 1, iter=self.iter) otherwise + # there will be problems with resumed checkpoints. + # More details in https://github.com/open-mmlab/mmcv/pull/1108 + meta.update(epoch=self.epoch + 1, iter=self.iter) + + filename = filename_tmpl.format(self.epoch + 1) + filepath = osp.join(out_dir, filename) + optimizer = self.optimizer if save_optimizer else None + save_checkpoint(self.model, filepath, optimizer=optimizer, meta=meta) + # in some environments, `os.symlink` is not supported, you may need to + # set `create_symlink` to False + if create_symlink: + dst_file = osp.join(out_dir, 'latest.pth') + if platform.system() != 'Windows': + symlink(filename, dst_file) + else: + shutil.copy(filepath, dst_file) + + + +@RUNNERS.register_module() +class EpochBasedRunner_video(EpochBasedRunner): + + ''' + # basic logic + + input_sequence = [a, b, c] # given a sequence of samples + + prev_bev = None + for each in input_sequcene[:-1] + prev_bev = eval_model(each, prev_bev)) # inference only. + + model(input_sequcene[-1], prev_bev) # train the last sample. + ''' + + def __init__(self, + model, + eval_model=None, + batch_processor=None, + optimizer=None, + work_dir=None, + logger=None, + meta=None, + keys=['gt_bboxes_3d', 'gt_labels_3d', 'img'], + max_iters=None, + max_epochs=None): + super().__init__(model, + batch_processor, + optimizer, + work_dir, + logger, + meta, + max_iters, + max_epochs) + keys.append('img_metas') + self.keys = keys + self.eval_model = eval_model + self.eval_model.eval() + + def run_iter(self, data_batch, train_mode, **kwargs): + if self.batch_processor is not None: + assert False + # outputs = self.batch_processor( + # self.model, data_batch, train_mode=train_mode, **kwargs) + elif train_mode: + + num_samples = data_batch['img'].data[0].size(1) + data_list = [] + prev_bev = None + for i in range(num_samples): + data = {} + for key in self.keys: + if key not in ['img_metas', 'img', 'points']: + data[key] = data_batch[key] + else: + if key == 'img': + data['img'] = DataContainer(data=[data_batch['img'].data[0][:, i]], cpu_only=data_batch['img'].cpu_only, stack=True) + elif key == 'img_metas': + data['img_metas'] = DataContainer(data=[[each[i] for each in data_batch['img_metas'].data[0]]], cpu_only=data_batch['img_metas'].cpu_only) + else: + assert False + data_list.append(data) + with torch.no_grad(): + for i in range(num_samples-1): + if i>0: data_list[i]['prev_bev'] = DataContainer(data=[prev_bev], cpu_only=False) + prev_bev = self.eval_model.val_step(data_list[i], self.optimizer, **kwargs) + + data_list[-1]['prev_bev'] = DataContainer(data=[prev_bev], cpu_only=False) + outputs = self.model.train_step(data_list[-1], self.optimizer, **kwargs) + else: + assert False + # outputs = self.model.val_step(data_batch, self.optimizer, **kwargs) + + if not isinstance(outputs, dict): + raise TypeError('"batch_processor()" or "model.train_step()"' + 'and "model.val_step()" must return a dict') + if 'log_vars' in outputs: + self.log_buffer.update(outputs['log_vars'], outputs['num_samples']) + self.outputs = outputs + diff --git a/mmcv/runner/hooks/__init__.py b/mmcv/runner/hooks/__init__.py new file mode 100644 index 0000000..4c5c3a2 --- /dev/null +++ b/mmcv/runner/hooks/__init__.py @@ -0,0 +1,9 @@ +from .evaluation import DistEvalHook, EvalHook +from .optimizer import OptimizerHook, Fp16OptimizerHook +from .sampler_seed import DistSamplerSeedHook +from .hook import HOOKS, Hook +from .lr_updater import LrUpdaterHook +from .checkpoint import CheckpointHook +from .iter_timer import IterTimerHook +from .logger import * +from .vad_hooks import * \ No newline at end of file diff --git a/mmcv/runner/hooks/checkpoint.py b/mmcv/runner/hooks/checkpoint.py new file mode 100644 index 0000000..0cf051d --- /dev/null +++ b/mmcv/runner/hooks/checkpoint.py @@ -0,0 +1,167 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +import warnings + +from mmcv.fileio.file_client import FileClient +from mmcv.utils import allreduce_params, master_only +from .hook import HOOKS, Hook + + +@HOOKS.register_module() +class CheckpointHook(Hook): + """Save checkpoints periodically. + + Args: + interval (int): The saving period. If ``by_epoch=True``, interval + indicates epochs, otherwise it indicates iterations. + Default: -1, which means "never". + by_epoch (bool): Saving checkpoints by epoch or by iteration. + Default: True. + save_optimizer (bool): Whether to save optimizer state_dict in the + checkpoint. It is usually used for resuming experiments. + Default: True. + out_dir (str, optional): The root directory to save checkpoints. If not + specified, ``runner.work_dir`` will be used by default. If + specified, the ``out_dir`` will be the concatenation of ``out_dir`` + and the last level directory of ``runner.work_dir``. + `Changed in version 1.3.16.` + max_keep_ckpts (int, optional): The maximum checkpoints to keep. + In some cases we want only the latest few checkpoints and would + like to delete old ones to save the disk space. + Default: -1, which means unlimited. + save_last (bool, optional): Whether to force the last checkpoint to be + saved regardless of interval. Default: True. + sync_buffer (bool, optional): Whether to synchronize buffers in + different gpus. Default: False. + file_client_args (dict, optional): Arguments to instantiate a + FileClient. See :class:`mmcv.fileio.FileClient` for details. + Default: None. + `New in version 1.3.16.` + + .. warning:: + Before v1.3.16, the ``out_dir`` argument indicates the path where the + checkpoint is stored. However, since v1.3.16, ``out_dir`` indicates the + root directory and the final path to save checkpoint is the + concatenation of ``out_dir`` and the last level directory of + ``runner.work_dir``. Suppose the value of ``out_dir`` is "/path/of/A" + and the value of ``runner.work_dir`` is "/path/of/B", then the final + path will be "/path/of/A/B". + """ + + def __init__(self, + interval=-1, + by_epoch=True, + save_optimizer=True, + out_dir=None, + max_keep_ckpts=-1, + save_last=True, + sync_buffer=False, + file_client_args=None, + **kwargs): + self.interval = interval + self.by_epoch = by_epoch + self.save_optimizer = save_optimizer + self.out_dir = out_dir + self.max_keep_ckpts = max_keep_ckpts + self.save_last = save_last + self.args = kwargs + self.sync_buffer = sync_buffer + self.file_client_args = file_client_args + + def before_run(self, runner): + if not self.out_dir: + self.out_dir = runner.work_dir + + self.file_client = FileClient.infer_client(self.file_client_args, + self.out_dir) + + # if `self.out_dir` is not equal to `runner.work_dir`, it means that + # `self.out_dir` is set so the final `self.out_dir` is the + # concatenation of `self.out_dir` and the last level directory of + # `runner.work_dir` + if self.out_dir != runner.work_dir: + basename = osp.basename(runner.work_dir.rstrip(osp.sep)) + self.out_dir = self.file_client.join_path(self.out_dir, basename) + + runner.logger.info((f'Checkpoints will be saved to {self.out_dir} by ' + f'{self.file_client.name}.')) + + # disable the create_symlink option because some file backends do not + # allow to create a symlink + if 'create_symlink' in self.args: + if self.args[ + 'create_symlink'] and not self.file_client.allow_symlink: + self.args['create_symlink'] = False + warnings.warn( + ('create_symlink is set as True by the user but is changed' + 'to be False because creating symbolic link is not ' + f'allowed in {self.file_client.name}')) + else: + self.args['create_symlink'] = self.file_client.allow_symlink + + def after_train_epoch(self, runner): + if not self.by_epoch: + return + + # save checkpoint for following cases: + # 1. every ``self.interval`` epochs + # 2. reach the last epoch of training + if self.every_n_epochs( + runner, self.interval) or (self.save_last + and self.is_last_epoch(runner)): + runner.logger.info( + f'Saving checkpoint at {runner.epoch + 1} epochs') + if self.sync_buffer: + allreduce_params(runner.model.buffers()) + self._save_checkpoint(runner) + + @master_only + def _save_checkpoint(self, runner): + """Save the current checkpoint and delete unwanted checkpoint.""" + runner.save_checkpoint( + self.out_dir, save_optimizer=self.save_optimizer, **self.args) + if runner.meta is not None: + if self.by_epoch: + cur_ckpt_filename = self.args.get( + 'filename_tmpl', 'epoch_{}.pth').format(runner.epoch + 1) + else: + cur_ckpt_filename = self.args.get( + 'filename_tmpl', 'iter_{}.pth').format(runner.iter + 1) + runner.meta.setdefault('hook_msgs', dict()) + runner.meta['hook_msgs']['last_ckpt'] = self.file_client.join_path( + self.out_dir, cur_ckpt_filename) + # remove other checkpoints + if self.max_keep_ckpts > 0: + if self.by_epoch: + name = 'epoch_{}.pth' + current_ckpt = runner.epoch + 1 + else: + name = 'iter_{}.pth' + current_ckpt = runner.iter + 1 + redundant_ckpts = range( + current_ckpt - self.max_keep_ckpts * self.interval, 0, + -self.interval) + filename_tmpl = self.args.get('filename_tmpl', name) + for _step in redundant_ckpts: + ckpt_path = self.file_client.join_path( + self.out_dir, filename_tmpl.format(_step)) + if self.file_client.isfile(ckpt_path): + self.file_client.remove(ckpt_path) + else: + break + + def after_train_iter(self, runner): + if self.by_epoch: + return + + # save checkpoint for following cases: + # 1. every ``self.interval`` iterations + # 2. reach the last iteration of training + if self.every_n_iters( + runner, self.interval) or (self.save_last + and self.is_last_iter(runner)): + runner.logger.info( + f'Saving checkpoint at {runner.iter + 1} iterations') + if self.sync_buffer: + allreduce_params(runner.model.buffers()) + self._save_checkpoint(runner) diff --git a/mmcv/runner/hooks/evaluation.py b/mmcv/runner/hooks/evaluation.py new file mode 100644 index 0000000..b09243a --- /dev/null +++ b/mmcv/runner/hooks/evaluation.py @@ -0,0 +1,507 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +import warnings +from math import inf + +import torch.distributed as dist +from torch.nn.modules.batchnorm import _BatchNorm +from torch.utils.data import DataLoader + +from mmcv.fileio.file_client import FileClient +from mmcv.utils import is_seq_of +from .hook import Hook +from .logger import LoggerHook + + +class EvalHook(Hook): + """Non-Distributed evaluation hook. + + This hook will regularly perform evaluation in a given interval when + performing in non-distributed environment. + + Args: + dataloader (DataLoader): A PyTorch dataloader, whose dataset has + implemented ``evaluate`` function. + start (int | None, optional): Evaluation starting epoch. It enables + evaluation before the training starts if ``start`` <= the resuming + epoch. If None, whether to evaluate is merely decided by + ``interval``. Default: None. + interval (int): Evaluation interval. Default: 1. + by_epoch (bool): Determine perform evaluation by epoch or by iteration. + If set to True, it will perform by epoch. Otherwise, by iteration. + Default: True. + save_best (str, optional): If a metric is specified, it would measure + the best checkpoint during evaluation. The information about best + checkpoint would be saved in ``runner.meta['hook_msgs']`` to keep + best score value and best checkpoint path, which will be also + loaded when resume checkpoint. Options are the evaluation metrics + on the test dataset. e.g., ``bbox_mAP``, ``segm_mAP`` for bbox + detection and instance segmentation. ``AR@100`` for proposal + recall. If ``save_best`` is ``auto``, the first key of the returned + ``OrderedDict`` result will be used. Default: None. + rule (str | None, optional): Comparison rule for best score. If set to + None, it will infer a reasonable rule. Keys such as 'acc', 'top' + .etc will be inferred by 'greater' rule. Keys contain 'loss' will + be inferred by 'less' rule. Options are 'greater', 'less', None. + Default: None. + test_fn (callable, optional): test a model with samples from a + dataloader, and return the test results. If ``None``, the default + test function ``mmcv.engine.single_gpu_test`` will be used. + (default: ``None``) + greater_keys (List[str] | None, optional): Metric keys that will be + inferred by 'greater' comparison rule. If ``None``, + _default_greater_keys will be used. (default: ``None``) + less_keys (List[str] | None, optional): Metric keys that will be + inferred by 'less' comparison rule. If ``None``, _default_less_keys + will be used. (default: ``None``) + out_dir (str, optional): The root directory to save checkpoints. If not + specified, `runner.work_dir` will be used by default. If specified, + the `out_dir` will be the concatenation of `out_dir` and the last + level directory of `runner.work_dir`. + `New in version 1.3.16.` + file_client_args (dict): Arguments to instantiate a FileClient. + See :class:`mmcv.fileio.FileClient` for details. Default: None. + `New in version 1.3.16.` + **eval_kwargs: Evaluation arguments fed into the evaluate function of + the dataset. + + Notes: + If new arguments are added for EvalHook, tools/test.py, + tools/eval_metric.py may be affected. + """ + + # Since the key for determine greater or less is related to the downstream + # tasks, downstream repos may need to overwrite the following inner + # variable accordingly. + + rule_map = {'greater': lambda x, y: x > y, 'less': lambda x, y: x < y} + init_value_map = {'greater': -inf, 'less': inf} + _default_greater_keys = [ + 'acc', 'top', 'AR@', 'auc', 'precision', 'mAP', 'mDice', 'mIoU', + 'mAcc', 'aAcc' + ] + _default_less_keys = ['loss'] + + def __init__(self, + dataloader, + start=None, + interval=1, + by_epoch=True, + save_best=None, + rule=None, + test_fn=None, + greater_keys=None, + less_keys=None, + out_dir=None, + file_client_args=None, + **eval_kwargs): + if not isinstance(dataloader, DataLoader): + raise TypeError(f'dataloader must be a pytorch DataLoader, ' + f'but got {type(dataloader)}') + + if interval <= 0: + raise ValueError(f'interval must be a positive number, ' + f'but got {interval}') + + assert isinstance(by_epoch, bool), '``by_epoch`` should be a boolean' + + if start is not None and start < 0: + raise ValueError(f'The evaluation start epoch {start} is smaller ' + f'than 0') + + self.dataloader = dataloader + self.interval = interval + self.start = start + self.by_epoch = by_epoch + + assert isinstance(save_best, str) or save_best is None, \ + '""save_best"" should be a str or None ' \ + f'rather than {type(save_best)}' + self.save_best = save_best + self.eval_kwargs = eval_kwargs + self.initial_flag = True + + if test_fn is None: + raise 'not implement single_gpu_test test_gn' + else: + self.test_fn = test_fn + + if greater_keys is None: + self.greater_keys = self._default_greater_keys + else: + if not isinstance(greater_keys, (list, tuple)): + greater_keys = (greater_keys, ) + assert is_seq_of(greater_keys, str) + self.greater_keys = greater_keys + + if less_keys is None: + self.less_keys = self._default_less_keys + else: + if not isinstance(less_keys, (list, tuple)): + less_keys = (less_keys, ) + assert is_seq_of(less_keys, str) + self.less_keys = less_keys + + if self.save_best is not None: + self.best_ckpt_path = None + self._init_rule(rule, self.save_best) + + self.out_dir = out_dir + self.file_client_args = file_client_args + + def _init_rule(self, rule, key_indicator): + """Initialize rule, key_indicator, comparison_func, and best score. + + Here is the rule to determine which rule is used for key indicator + when the rule is not specific (note that the key indicator matching + is case-insensitive): + 1. If the key indicator is in ``self.greater_keys``, the rule will be + specified as 'greater'. + 2. Or if the key indicator is in ``self.less_keys``, the rule will be + specified as 'less'. + 3. Or if the key indicator is equal to the substring in any one item + in ``self.greater_keys``, the rule will be specified as 'greater'. + 4. Or if the key indicator is equal to the substring in any one item + in ``self.less_keys``, the rule will be specified as 'less'. + + Args: + rule (str | None): Comparison rule for best score. + key_indicator (str | None): Key indicator to determine the + comparison rule. + """ + if rule not in self.rule_map and rule is not None: + raise KeyError(f'rule must be greater, less or None, ' + f'but got {rule}.') + + if rule is None: + if key_indicator != 'auto': + # `_lc` here means we use the lower case of keys for + # case-insensitive matching + key_indicator_lc = key_indicator.lower() + greater_keys = [key.lower() for key in self.greater_keys] + less_keys = [key.lower() for key in self.less_keys] + + if key_indicator_lc in greater_keys: + rule = 'greater' + elif key_indicator_lc in less_keys: + rule = 'less' + elif any(key in key_indicator_lc for key in greater_keys): + rule = 'greater' + elif any(key in key_indicator_lc for key in less_keys): + rule = 'less' + else: + raise ValueError(f'Cannot infer the rule for key ' + f'{key_indicator}, thus a specific rule ' + f'must be specified.') + self.rule = rule + self.key_indicator = key_indicator + if self.rule is not None: + self.compare_func = self.rule_map[self.rule] + + def before_run(self, runner): + if not self.out_dir: + self.out_dir = runner.work_dir + + self.file_client = FileClient.infer_client(self.file_client_args, + self.out_dir) + + # if `self.out_dir` is not equal to `runner.work_dir`, it means that + # `self.out_dir` is set so the final `self.out_dir` is the + # concatenation of `self.out_dir` and the last level directory of + # `runner.work_dir` + if self.out_dir != runner.work_dir: + basename = osp.basename(runner.work_dir.rstrip(osp.sep)) + self.out_dir = self.file_client.join_path(self.out_dir, basename) + runner.logger.info( + (f'The best checkpoint will be saved to {self.out_dir} by ' + f'{self.file_client.name}')) + + if self.save_best is not None: + if runner.meta is None: + warnings.warn('runner.meta is None. Creating an empty one.') + runner.meta = dict() + runner.meta.setdefault('hook_msgs', dict()) + self.best_ckpt_path = runner.meta['hook_msgs'].get( + 'best_ckpt', None) + + def before_train_iter(self, runner): + """Evaluate the model only at the start of training by iteration.""" + if self.by_epoch or not self.initial_flag: + return + if self.start is not None and runner.iter >= self.start: + self.after_train_iter(runner) + self.initial_flag = False + + def before_train_epoch(self, runner): + """Evaluate the model only at the start of training by epoch.""" + if not (self.by_epoch and self.initial_flag): + return + if self.start is not None and runner.epoch >= self.start: + self.after_train_epoch(runner) + self.initial_flag = False + + def after_train_iter(self, runner): + """Called after every training iter to evaluate the results.""" + if not self.by_epoch and self._should_evaluate(runner): + # Because the priority of EvalHook is higher than LoggerHook, the + # training log and the evaluating log are mixed. Therefore, + # we need to dump the training log and clear it before evaluating + # log is generated. In addition, this problem will only appear in + # `IterBasedRunner` whose `self.by_epoch` is False, because + # `EpochBasedRunner` whose `self.by_epoch` is True calls + # `_do_evaluate` in `after_train_epoch` stage, and at this stage + # the training log has been printed, so it will not cause any + # problem. more details at + # https://github.com/open-mmlab/mmsegmentation/issues/694 + for hook in runner._hooks: + if isinstance(hook, LoggerHook): + hook.after_train_iter(runner) + runner.log_buffer.clear() + + self._do_evaluate(runner) + + def after_train_epoch(self, runner): + """Called after every training epoch to evaluate the results.""" + if self.by_epoch and self._should_evaluate(runner): + self._do_evaluate(runner) + + def _do_evaluate(self, runner): + """perform evaluation and save ckpt.""" + results = self.test_fn(runner.model, self.dataloader) + runner.log_buffer.output['eval_iter_num'] = len(self.dataloader) + key_score = self.evaluate(runner, results) + # the key_score may be `None` so it needs to skip the action to save + # the best checkpoint + if self.save_best and key_score: + self._save_ckpt(runner, key_score) + + def _should_evaluate(self, runner): + """Judge whether to perform evaluation. + + Here is the rule to judge whether to perform evaluation: + 1. It will not perform evaluation during the epoch/iteration interval, + which is determined by ``self.interval``. + 2. It will not perform evaluation if the start time is larger than + current time. + 3. It will not perform evaluation when current time is larger than + the start time but during epoch/iteration interval. + + Returns: + bool: The flag indicating whether to perform evaluation. + """ + if self.by_epoch: + current = runner.epoch + check_time = self.every_n_epochs + else: + current = runner.iter + check_time = self.every_n_iters + + if self.start is None: + if not check_time(runner, self.interval): + # No evaluation during the interval. + return False + elif (current + 1) < self.start: + # No evaluation if start is larger than the current time. + return False + else: + # Evaluation only at epochs/iters 3, 5, 7... + # if start==3 and interval==2 + if (current + 1 - self.start) % self.interval: + return False + return True + + def _save_ckpt(self, runner, key_score): + """Save the best checkpoint. + + It will compare the score according to the compare function, write + related information (best score, best checkpoint path) and save the + best checkpoint into ``work_dir``. + """ + if self.by_epoch: + current = f'epoch_{runner.epoch + 1}' + cur_type, cur_time = 'epoch', runner.epoch + 1 + else: + current = f'iter_{runner.iter + 1}' + cur_type, cur_time = 'iter', runner.iter + 1 + + best_score = runner.meta['hook_msgs'].get( + 'best_score', self.init_value_map[self.rule]) + if self.compare_func(key_score, best_score): + best_score = key_score + runner.meta['hook_msgs']['best_score'] = best_score + + if self.best_ckpt_path and self.file_client.isfile( + self.best_ckpt_path): + self.file_client.remove(self.best_ckpt_path) + runner.logger.info( + (f'The previous best checkpoint {self.best_ckpt_path} was ' + 'removed')) + + best_ckpt_name = f'best_{self.key_indicator}_{current}.pth' + self.best_ckpt_path = self.file_client.join_path( + self.out_dir, best_ckpt_name) + runner.meta['hook_msgs']['best_ckpt'] = self.best_ckpt_path + + runner.save_checkpoint( + self.out_dir, best_ckpt_name, create_symlink=False) + runner.logger.info( + f'Now best checkpoint is saved as {best_ckpt_name}.') + runner.logger.info( + f'Best {self.key_indicator} is {best_score:0.4f} ' + f'at {cur_time} {cur_type}.') + + def evaluate(self, runner, results): + """Evaluate the results. + + Args: + runner (:obj:`mmcv.Runner`): The underlined training runner. + results (list): Output results. + """ + eval_res = self.dataloader.dataset.evaluate( + results, logger=runner.logger, **self.eval_kwargs) + + for name, val in eval_res.items(): + runner.log_buffer.output[name] = val + runner.log_buffer.ready = True + + if self.save_best is not None: + # If the performance of model is pool, the `eval_res` may be an + # empty dict and it will raise exception when `self.save_best` is + # not None. More details at + # https://github.com/open-mmlab/mmdetection/issues/6265. + if not eval_res: + warnings.warn( + 'Since `eval_res` is an empty dict, the behavior to save ' + 'the best checkpoint will be skipped in this evaluation.') + return None + + if self.key_indicator == 'auto': + # infer from eval_results + self._init_rule(self.rule, list(eval_res.keys())[0]) + return eval_res[self.key_indicator] + + return None + + +class DistEvalHook(EvalHook): + """Distributed evaluation hook. + + This hook will regularly perform evaluation in a given interval when + performing in distributed environment. + + Args: + dataloader (DataLoader): A PyTorch dataloader, whose dataset has + implemented ``evaluate`` function. + start (int | None, optional): Evaluation starting epoch. It enables + evaluation before the training starts if ``start`` <= the resuming + epoch. If None, whether to evaluate is merely decided by + ``interval``. Default: None. + interval (int): Evaluation interval. Default: 1. + by_epoch (bool): Determine perform evaluation by epoch or by iteration. + If set to True, it will perform by epoch. Otherwise, by iteration. + default: True. + save_best (str, optional): If a metric is specified, it would measure + the best checkpoint during evaluation. The information about best + checkpoint would be saved in ``runner.meta['hook_msgs']`` to keep + best score value and best checkpoint path, which will be also + loaded when resume checkpoint. Options are the evaluation metrics + on the test dataset. e.g., ``bbox_mAP``, ``segm_mAP`` for bbox + detection and instance segmentation. ``AR@100`` for proposal + recall. If ``save_best`` is ``auto``, the first key of the returned + ``OrderedDict`` result will be used. Default: None. + rule (str | None, optional): Comparison rule for best score. If set to + None, it will infer a reasonable rule. Keys such as 'acc', 'top' + .etc will be inferred by 'greater' rule. Keys contain 'loss' will + be inferred by 'less' rule. Options are 'greater', 'less', None. + Default: None. + test_fn (callable, optional): test a model with samples from a + dataloader in a multi-gpu manner, and return the test results. If + ``None``, the default test function ``mmcv.engine.multi_gpu_test`` + will be used. (default: ``None``) + tmpdir (str | None): Temporary directory to save the results of all + processes. Default: None. + gpu_collect (bool): Whether to use gpu or cpu to collect results. + Default: False. + broadcast_bn_buffer (bool): Whether to broadcast the + buffer(running_mean and running_var) of rank 0 to other rank + before evaluation. Default: True. + out_dir (str, optional): The root directory to save checkpoints. If not + specified, `runner.work_dir` will be used by default. If specified, + the `out_dir` will be the concatenation of `out_dir` and the last + level directory of `runner.work_dir`. + file_client_args (dict): Arguments to instantiate a FileClient. + See :class:`mmcv.fileio.FileClient` for details. Default: None. + **eval_kwargs: Evaluation arguments fed into the evaluate function of + the dataset. + """ + + def __init__(self, + dataloader, + start=None, + interval=1, + by_epoch=True, + save_best=None, + rule=None, + test_fn=None, + greater_keys=None, + less_keys=None, + broadcast_bn_buffer=True, + tmpdir=None, + gpu_collect=False, + out_dir=None, + file_client_args=None, + **eval_kwargs): + + if test_fn is None: + raise 'not implement multi_gpu_test test_fn' + + super().__init__( + dataloader, + start=start, + interval=interval, + by_epoch=by_epoch, + save_best=save_best, + rule=rule, + test_fn=test_fn, + greater_keys=greater_keys, + less_keys=less_keys, + out_dir=out_dir, + file_client_args=file_client_args, + **eval_kwargs) + + self.broadcast_bn_buffer = broadcast_bn_buffer + self.tmpdir = tmpdir + self.gpu_collect = gpu_collect + + def _do_evaluate(self, runner): + """perform evaluation and save ckpt.""" + # Synchronization of BatchNorm's buffer (running_mean + # and running_var) is not supported in the DDP of pytorch, + # which may cause the inconsistent performance of models in + # different ranks, so we broadcast BatchNorm's buffers + # of rank 0 to other ranks to avoid this. + if self.broadcast_bn_buffer: + model = runner.model + for name, module in model.named_modules(): + if isinstance(module, + _BatchNorm) and module.track_running_stats: + dist.broadcast(module.running_var, 0) + dist.broadcast(module.running_mean, 0) + + tmpdir = self.tmpdir + if tmpdir is None: + tmpdir = osp.join(runner.work_dir, '.eval_hook') + + results = self.test_fn( + runner.model, + self.dataloader, + tmpdir=tmpdir, + gpu_collect=self.gpu_collect) + if runner.rank == 0: + print('\n') + runner.log_buffer.output['eval_iter_num'] = len(self.dataloader) + key_score = self.evaluate(runner, results) + # the key_score may be `None` so it needs to skip the action to + # save the best checkpoint + if self.save_best and key_score: + self._save_ckpt(runner, key_score) diff --git a/mmcv/runner/hooks/hook.py b/mmcv/runner/hooks/hook.py new file mode 100644 index 0000000..f2d1c98 --- /dev/null +++ b/mmcv/runner/hooks/hook.py @@ -0,0 +1,92 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmcv.utils import Registry, is_method_overridden + +HOOKS = Registry('hook') + + +class Hook: + stages = ('before_run', 'before_train_epoch', 'before_train_iter', + 'after_train_iter', 'after_train_epoch', 'before_val_epoch', + 'before_val_iter', 'after_val_iter', 'after_val_epoch', + 'after_run') + + def before_run(self, runner): + pass + + def after_run(self, runner): + pass + + def before_epoch(self, runner): + pass + + def after_epoch(self, runner): + pass + + def before_iter(self, runner): + pass + + def after_iter(self, runner): + pass + + def before_train_epoch(self, runner): + self.before_epoch(runner) + + def before_val_epoch(self, runner): + self.before_epoch(runner) + + def after_train_epoch(self, runner): + self.after_epoch(runner) + + def after_val_epoch(self, runner): + self.after_epoch(runner) + + def before_train_iter(self, runner): + self.before_iter(runner) + + def before_val_iter(self, runner): + self.before_iter(runner) + + def after_train_iter(self, runner): + self.after_iter(runner) + + def after_val_iter(self, runner): + self.after_iter(runner) + + def every_n_epochs(self, runner, n): + return (runner.epoch + 1) % n == 0 if n > 0 else False + + def every_n_inner_iters(self, runner, n): + return (runner.inner_iter + 1) % n == 0 if n > 0 else False + + def every_n_iters(self, runner, n): + return (runner.iter + 1) % n == 0 if n > 0 else False + + def end_of_epoch(self, runner): + return runner.inner_iter + 1 == len(runner.data_loader) + + def is_last_epoch(self, runner): + return runner.epoch + 1 == runner._max_epochs + + def is_last_iter(self, runner): + return runner.iter + 1 == runner._max_iters + + def get_triggered_stages(self): + trigger_stages = set() + for stage in Hook.stages: + if is_method_overridden(stage, Hook, self): + trigger_stages.add(stage) + + # some methods will be triggered in multi stages + # use this dict to map method to stages. + method_stages_map = { + 'before_epoch': ['before_train_epoch', 'before_val_epoch'], + 'after_epoch': ['after_train_epoch', 'after_val_epoch'], + 'before_iter': ['before_train_iter', 'before_val_iter'], + 'after_iter': ['after_train_iter', 'after_val_iter'], + } + + for method, map_stages in method_stages_map.items(): + if is_method_overridden(method, Hook, self): + trigger_stages.update(map_stages) + + return [stage for stage in Hook.stages if stage in trigger_stages] diff --git a/mmcv/runner/hooks/iter_timer.py b/mmcv/runner/hooks/iter_timer.py new file mode 100644 index 0000000..cfd5002 --- /dev/null +++ b/mmcv/runner/hooks/iter_timer.py @@ -0,0 +1,18 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import time + +from .hook import HOOKS, Hook + + +@HOOKS.register_module() +class IterTimerHook(Hook): + + def before_epoch(self, runner): + self.t = time.time() + + def before_iter(self, runner): + runner.log_buffer.update({'data_time': time.time() - self.t}) + + def after_iter(self, runner): + runner.log_buffer.update({'time': time.time() - self.t}) + self.t = time.time() diff --git a/mmcv/runner/hooks/logger/__init__.py b/mmcv/runner/hooks/logger/__init__.py new file mode 100644 index 0000000..409be48 --- /dev/null +++ b/mmcv/runner/hooks/logger/__init__.py @@ -0,0 +1,3 @@ +from .base import LoggerHook +from .text import TextLoggerHook +from .tensorboard import TensorboardLoggerHook \ No newline at end of file diff --git a/mmcv/runner/hooks/logger/base.py b/mmcv/runner/hooks/logger/base.py new file mode 100644 index 0000000..f845256 --- /dev/null +++ b/mmcv/runner/hooks/logger/base.py @@ -0,0 +1,166 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numbers +from abc import ABCMeta, abstractmethod + +import numpy as np +import torch + +from ..hook import Hook + + +class LoggerHook(Hook): + """Base class for logger hooks. + + Args: + interval (int): Logging interval (every k iterations). + ignore_last (bool): Ignore the log of last iterations in each epoch + if less than `interval`. + reset_flag (bool): Whether to clear the output buffer after logging. + by_epoch (bool): Whether EpochBasedRunner is used. + """ + + __metaclass__ = ABCMeta + + def __init__(self, + interval=10, + ignore_last=True, + reset_flag=False, + by_epoch=True): + self.interval = interval + self.ignore_last = ignore_last + self.reset_flag = reset_flag + self.by_epoch = by_epoch + + @abstractmethod + def log(self, runner): + pass + + @staticmethod + def is_scalar(val, include_np=True, include_torch=True): + """Tell the input variable is a scalar or not. + + Args: + val: Input variable. + include_np (bool): Whether include 0-d np.ndarray as a scalar. + include_torch (bool): Whether include 0-d torch.Tensor as a scalar. + + Returns: + bool: True or False. + """ + if isinstance(val, numbers.Number): + return True + elif include_np and isinstance(val, np.ndarray) and val.ndim == 0: + return True + elif include_torch and isinstance(val, torch.Tensor) and len(val) == 1: + return True + else: + return False + + def get_mode(self, runner): + if runner.mode == 'train': + if 'time' in runner.log_buffer.output: + mode = 'train' + else: + mode = 'val' + elif runner.mode == 'val': + mode = 'val' + else: + raise ValueError(f"runner mode should be 'train' or 'val', " + f'but got {runner.mode}') + return mode + + def get_epoch(self, runner): + if runner.mode == 'train': + epoch = runner.epoch + 1 + elif runner.mode == 'val': + # normal val mode + # runner.epoch += 1 has been done before val workflow + epoch = runner.epoch + else: + raise ValueError(f"runner mode should be 'train' or 'val', " + f'but got {runner.mode}') + return epoch + + def get_iter(self, runner, inner_iter=False): + """Get the current training iteration step.""" + if self.by_epoch and inner_iter: + current_iter = runner.inner_iter + 1 + else: + current_iter = runner.iter + 1 + return current_iter + + def get_lr_tags(self, runner): + tags = {} + lrs = runner.current_lr() + if isinstance(lrs, dict): + for name, value in lrs.items(): + tags[f'learning_rate/{name}'] = value[0] + else: + tags['learning_rate'] = lrs[0] + return tags + + def get_momentum_tags(self, runner): + tags = {} + momentums = runner.current_momentum() + if isinstance(momentums, dict): + for name, value in momentums.items(): + tags[f'momentum/{name}'] = value[0] + else: + tags['momentum'] = momentums[0] + return tags + + def get_loggable_tags(self, + runner, + allow_scalar=True, + allow_text=False, + add_mode=True, + tags_to_skip=('time', 'data_time')): + tags = {} + for var, val in runner.log_buffer.output.items(): + if var in tags_to_skip: + continue + if self.is_scalar(val) and not allow_scalar: + continue + if isinstance(val, str) and not allow_text: + continue + if add_mode: + var = f'{self.get_mode(runner)}/{var}' + tags[var] = val + tags.update(self.get_lr_tags(runner)) + tags.update(self.get_momentum_tags(runner)) + return tags + + def before_run(self, runner): + for hook in runner.hooks[::-1]: + if isinstance(hook, LoggerHook): + hook.reset_flag = True + break + + def before_epoch(self, runner): + runner.log_buffer.clear() # clear logs of last epoch + + def after_train_iter(self, runner): + if self.by_epoch and self.every_n_inner_iters(runner, self.interval): + runner.log_buffer.average(self.interval) + elif not self.by_epoch and self.every_n_iters(runner, self.interval): + runner.log_buffer.average(self.interval) + elif self.end_of_epoch(runner) and not self.ignore_last: + # not precise but more stable + runner.log_buffer.average(self.interval) + + if runner.log_buffer.ready: + self.log(runner) + if self.reset_flag: + runner.log_buffer.clear_output() + + def after_train_epoch(self, runner): + if runner.log_buffer.ready: + self.log(runner) + if self.reset_flag: + runner.log_buffer.clear_output() + + def after_val_epoch(self, runner): + runner.log_buffer.average() + self.log(runner) + if self.reset_flag: + runner.log_buffer.clear_output() diff --git a/mmcv/runner/hooks/logger/tensorboard.py b/mmcv/runner/hooks/logger/tensorboard.py new file mode 100644 index 0000000..72b6759 --- /dev/null +++ b/mmcv/runner/hooks/logger/tensorboard.py @@ -0,0 +1,55 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp + +from mmcv.utils import TORCH_VERSION, digit_version, master_only +from ..hook import HOOKS +from .base import LoggerHook + + +@HOOKS.register_module() +class TensorboardLoggerHook(LoggerHook): + + def __init__(self, + log_dir=None, + interval=10, + ignore_last=True, + reset_flag=False, + by_epoch=True): + super(TensorboardLoggerHook, self).__init__(interval, ignore_last, + reset_flag, by_epoch) + self.log_dir = log_dir + + @master_only + def before_run(self, runner): + super(TensorboardLoggerHook, self).before_run(runner) + if (digit_version(TORCH_VERSION) < digit_version('1.1')): + try: + from tensorboardX import SummaryWriter + except ImportError: + raise ImportError('Please install tensorboardX to use ' + 'TensorboardLoggerHook.') + else: + try: + from torch.utils.tensorboard import SummaryWriter + except ImportError: + raise ImportError( + 'Please run "pip install future tensorboard" to install ' + 'the dependencies to use torch.utils.tensorboard ' + '(applicable to PyTorch 1.1 or higher)') + + if self.log_dir is None: + self.log_dir = osp.join(runner.work_dir, 'tf_logs') + self.writer = SummaryWriter(self.log_dir) + + @master_only + def log(self, runner): + tags = self.get_loggable_tags(runner, allow_text=True) + for tag, val in tags.items(): + if isinstance(val, str): + self.writer.add_text(tag, val, self.get_iter(runner)) + else: + self.writer.add_scalar(tag, val, self.get_iter(runner)) + + @master_only + def after_run(self, runner): + self.writer.close() diff --git a/mmcv/runner/hooks/logger/text.py b/mmcv/runner/hooks/logger/text.py new file mode 100644 index 0000000..0413cd8 --- /dev/null +++ b/mmcv/runner/hooks/logger/text.py @@ -0,0 +1,256 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import datetime +import os +import os.path as osp +from collections import OrderedDict + +import torch +import torch.distributed as dist + +from mmcv.fileio.file_client import FileClient +from mmcv.utils import is_tuple_of, scandir +from mmcv.fileio.io import dump +from ..hook import HOOKS +from .base import LoggerHook + + +@HOOKS.register_module() +class TextLoggerHook(LoggerHook): + """Logger hook in text. + + In this logger hook, the information will be printed on terminal and + saved in json file. + + Args: + by_epoch (bool, optional): Whether EpochBasedRunner is used. + Default: True. + interval (int, optional): Logging interval (every k iterations). + Default: 10. + ignore_last (bool, optional): Ignore the log of last iterations in each + epoch if less than :attr:`interval`. Default: True. + reset_flag (bool, optional): Whether to clear the output buffer after + logging. Default: False. + interval_exp_name (int, optional): Logging interval for experiment + name. This feature is to help users conveniently get the experiment + information from screen or log file. Default: 1000. + out_dir (str, optional): Logs are saved in ``runner.work_dir`` default. + If ``out_dir`` is specified, logs will be copied to a new directory + which is the concatenation of ``out_dir`` and the last level + directory of ``runner.work_dir``. Default: None. + `New in version 1.3.16.` + out_suffix (str or tuple[str], optional): Those filenames ending with + ``out_suffix`` will be copied to ``out_dir``. + Default: ('.log.json', '.log', '.py'). + `New in version 1.3.16.` + keep_local (bool, optional): Whether to keep local log when + :attr:`out_dir` is specified. If False, the local log will be + removed. Default: True. + `New in version 1.3.16.` + file_client_args (dict, optional): Arguments to instantiate a + FileClient. See :class:`mmcv.fileio.FileClient` for details. + Default: None. + `New in version 1.3.16.` + """ + + def __init__(self, + by_epoch=True, + interval=10, + ignore_last=True, + reset_flag=False, + interval_exp_name=1000, + out_dir=None, + out_suffix=('.log.json', '.log', '.py'), + keep_local=True, + file_client_args=None): + super(TextLoggerHook, self).__init__(interval, ignore_last, reset_flag, + by_epoch) + self.by_epoch = by_epoch + self.time_sec_tot = 0 + self.interval_exp_name = interval_exp_name + + if out_dir is None and file_client_args is not None: + raise ValueError( + 'file_client_args should be "None" when `out_dir` is not' + 'specified.') + self.out_dir = out_dir + + if not (out_dir is None or isinstance(out_dir, str) + or is_tuple_of(out_dir, str)): + raise TypeError('out_dir should be "None" or string or tuple of ' + 'string, but got {out_dir}') + self.out_suffix = out_suffix + + self.keep_local = keep_local + self.file_client_args = file_client_args + if self.out_dir is not None: + self.file_client = FileClient.infer_client(file_client_args, + self.out_dir) + + def before_run(self, runner): + super(TextLoggerHook, self).before_run(runner) + + if self.out_dir is not None: + self.file_client = FileClient.infer_client(self.file_client_args, + self.out_dir) + # The final `self.out_dir` is the concatenation of `self.out_dir` + # and the last level directory of `runner.work_dir` + basename = osp.basename(runner.work_dir.rstrip(osp.sep)) + self.out_dir = self.file_client.join_path(self.out_dir, basename) + runner.logger.info( + (f'Text logs will be saved to {self.out_dir} by ' + f'{self.file_client.name} after the training process.')) + + self.start_iter = runner.iter + self.json_log_path = osp.join(runner.work_dir, + f'{runner.timestamp}.log.json') + if runner.meta is not None: + self._dump_log(runner.meta, runner) + + def _get_max_memory(self, runner): + device = getattr(runner.model, 'output_device', None) + mem = torch.cuda.max_memory_allocated(device=device) + mem_mb = torch.tensor([mem / (1024 * 1024)], + dtype=torch.int, + device=device) + if runner.world_size > 1: + dist.reduce(mem_mb, 0, op=dist.ReduceOp.MAX) + return mem_mb.item() + + def _log_info(self, log_dict, runner): + # print exp name for users to distinguish experiments + # at every ``interval_exp_name`` iterations and the end of each epoch + if runner.meta is not None and 'exp_name' in runner.meta: + if (self.every_n_iters(runner, self.interval_exp_name)) or ( + self.by_epoch and self.end_of_epoch(runner)): + exp_info = f'Exp name: {runner.meta["exp_name"]}' + runner.logger.info(exp_info) + + if log_dict['mode'] == 'train': + if isinstance(log_dict['lr'], dict): + lr_str = [] + for k, val in log_dict['lr'].items(): + lr_str.append(f'lr_{k}: {val:.3e}') + lr_str = ' '.join(lr_str) + else: + lr_str = f'lr: {log_dict["lr"]:.3e}' + + # by epoch: Epoch [4][100/1000] + # by iter: Iter [100/100000] + if self.by_epoch: + log_str = f'Epoch [{log_dict["epoch"]}]' \ + f'[{log_dict["iter"]}/{len(runner.data_loader)}]\t' + else: + log_str = f'Iter [{log_dict["iter"]}/{runner.max_iters}]\t' + log_str += f'{lr_str}, ' + + if 'time' in log_dict.keys(): + self.time_sec_tot += (log_dict['time'] * self.interval) + time_sec_avg = self.time_sec_tot / ( + runner.iter - self.start_iter + 1) + eta_sec = time_sec_avg * (runner.max_iters - runner.iter - 1) + eta_str = str(datetime.timedelta(seconds=int(eta_sec))) + log_str += f'eta: {eta_str}, ' + log_str += f'time: {log_dict["time"]:.3f}, ' \ + f'data_time: {log_dict["data_time"]:.3f}, ' + # statistic memory + if torch.cuda.is_available(): + log_str += f'memory: {log_dict["memory"]}, ' + else: + # val/test time + # here 1000 is the length of the val dataloader + # by epoch: Epoch[val] [4][1000] + # by iter: Iter[val] [1000] + if self.by_epoch: + log_str = f'Epoch({log_dict["mode"]}) ' \ + f'[{log_dict["epoch"]}][{log_dict["iter"]}]\t' + else: + log_str = f'Iter({log_dict["mode"]}) [{log_dict["iter"]}]\t' + + log_items = [] + for name, val in log_dict.items(): + # TODO: resolve this hack + # these items have been in log_str + if name in [ + 'mode', 'Epoch', 'iter', 'lr', 'time', 'data_time', + 'memory', 'epoch' + ]: + continue + if isinstance(val, float): + val = f'{val:.4f}' + log_items.append(f'{name}: {val}') + log_str += ', '.join(log_items) + + runner.logger.info(log_str) + + def _dump_log(self, log_dict, runner): + # dump log in json format + json_log = OrderedDict() + for k, v in log_dict.items(): + json_log[k] = self._round_float(v) + # only append log at last line + if runner.rank == 0: + with open(self.json_log_path, 'a+') as f: + dump(json_log, f, file_format='json') + f.write('\n') + + def _round_float(self, items): + if isinstance(items, list): + return [self._round_float(item) for item in items] + elif isinstance(items, float): + return round(items, 5) + else: + return items + + def log(self, runner): + if 'eval_iter_num' in runner.log_buffer.output: + # this doesn't modify runner.iter and is regardless of by_epoch + cur_iter = runner.log_buffer.output.pop('eval_iter_num') + else: + cur_iter = self.get_iter(runner, inner_iter=True) + + log_dict = OrderedDict( + mode=self.get_mode(runner), + epoch=self.get_epoch(runner), + iter=cur_iter) + + # only record lr of the first param group + cur_lr = runner.current_lr() + if isinstance(cur_lr, list): + log_dict['lr'] = cur_lr[0] + else: + assert isinstance(cur_lr, dict) + log_dict['lr'] = {} + for k, lr_ in cur_lr.items(): + assert isinstance(lr_, list) + log_dict['lr'].update({k: lr_[0]}) + + if 'time' in runner.log_buffer.output: + # statistic memory + if torch.cuda.is_available(): + log_dict['memory'] = self._get_max_memory(runner) + + log_dict = dict(log_dict, **runner.log_buffer.output) + + self._log_info(log_dict, runner) + self._dump_log(log_dict, runner) + return log_dict + + def after_run(self, runner): + # copy or upload logs to self.out_dir + if self.out_dir is not None: + for filename in scandir(runner.work_dir, self.out_suffix, True): + local_filepath = osp.join(runner.work_dir, filename) + out_filepath = self.file_client.join_path( + self.out_dir, filename) + with open(local_filepath, 'r') as f: + self.file_client.put_text(f.read(), out_filepath) + + runner.logger.info( + (f'The file {local_filepath} has been uploaded to ' + f'{out_filepath}.')) + + if not self.keep_local: + os.remove(local_filepath) + runner.logger.info( + (f'{local_filepath} was removed due to the ' + '`self.keep_local=False`')) diff --git a/mmcv/runner/hooks/lr_updater.py b/mmcv/runner/hooks/lr_updater.py new file mode 100644 index 0000000..a750548 --- /dev/null +++ b/mmcv/runner/hooks/lr_updater.py @@ -0,0 +1,670 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numbers +from math import cos, pi + +from mmcv.utils import is_list_of +from .hook import HOOKS, Hook + + +class LrUpdaterHook(Hook): + """LR Scheduler in MMCV. + + Args: + by_epoch (bool): LR changes epoch by epoch + warmup (string): Type of warmup used. It can be None(use no warmup), + 'constant', 'linear' or 'exp' + warmup_iters (int): The number of iterations or epochs that warmup + lasts + warmup_ratio (float): LR used at the beginning of warmup equals to + warmup_ratio * initial_lr + warmup_by_epoch (bool): When warmup_by_epoch == True, warmup_iters + means the number of epochs that warmup lasts, otherwise means the + number of iteration that warmup lasts + """ + + def __init__(self, + by_epoch=True, + warmup=None, + warmup_iters=0, + warmup_ratio=0.1, + warmup_by_epoch=False): + # validate the "warmup" argument + if warmup is not None: + if warmup not in ['constant', 'linear', 'exp']: + raise ValueError( + f'"{warmup}" is not a supported type for warming up, valid' + ' types are "constant" and "linear"') + if warmup is not None: + assert warmup_iters > 0, \ + '"warmup_iters" must be a positive integer' + assert 0 < warmup_ratio <= 1.0, \ + '"warmup_ratio" must be in range (0,1]' + + self.by_epoch = by_epoch + self.warmup = warmup + self.warmup_iters = warmup_iters + self.warmup_ratio = warmup_ratio + self.warmup_by_epoch = warmup_by_epoch + + if self.warmup_by_epoch: + self.warmup_epochs = self.warmup_iters + self.warmup_iters = None + else: + self.warmup_epochs = None + + self.base_lr = [] # initial lr for all param groups + self.regular_lr = [] # expected lr if no warming up is performed + + def _set_lr(self, runner, lr_groups): + if isinstance(runner.optimizer, dict): + for k, optim in runner.optimizer.items(): + for param_group, lr in zip(optim.param_groups, lr_groups[k]): + param_group['lr'] = lr + else: + for param_group, lr in zip(runner.optimizer.param_groups, + lr_groups): + param_group['lr'] = lr + + def get_lr(self, runner, base_lr): + raise NotImplementedError + + def get_regular_lr(self, runner): + if isinstance(runner.optimizer, dict): + lr_groups = {} + for k in runner.optimizer.keys(): + _lr_group = [ + self.get_lr(runner, _base_lr) + for _base_lr in self.base_lr[k] + ] + lr_groups.update({k: _lr_group}) + + return lr_groups + else: + return [self.get_lr(runner, _base_lr) for _base_lr in self.base_lr] + + def get_warmup_lr(self, cur_iters): + + def _get_warmup_lr(cur_iters, regular_lr): + if self.warmup == 'constant': + warmup_lr = [_lr * self.warmup_ratio for _lr in regular_lr] + elif self.warmup == 'linear': + k = (1 - cur_iters / self.warmup_iters) * (1 - + self.warmup_ratio) + warmup_lr = [_lr * (1 - k) for _lr in regular_lr] + elif self.warmup == 'exp': + k = self.warmup_ratio**(1 - cur_iters / self.warmup_iters) + warmup_lr = [_lr * k for _lr in regular_lr] + return warmup_lr + + if isinstance(self.regular_lr, dict): + lr_groups = {} + for key, regular_lr in self.regular_lr.items(): + lr_groups[key] = _get_warmup_lr(cur_iters, regular_lr) + return lr_groups + else: + return _get_warmup_lr(cur_iters, self.regular_lr) + + def before_run(self, runner): + # NOTE: when resuming from a checkpoint, if 'initial_lr' is not saved, + # it will be set according to the optimizer params + if isinstance(runner.optimizer, dict): + self.base_lr = {} + for k, optim in runner.optimizer.items(): + for group in optim.param_groups: + group.setdefault('initial_lr', group['lr']) + _base_lr = [ + group['initial_lr'] for group in optim.param_groups + ] + self.base_lr.update({k: _base_lr}) + else: + for group in runner.optimizer.param_groups: + group.setdefault('initial_lr', group['lr']) + self.base_lr = [ + group['initial_lr'] for group in runner.optimizer.param_groups + ] + + def before_train_epoch(self, runner): + if self.warmup_iters is None: + epoch_len = len(runner.data_loader) + self.warmup_iters = self.warmup_epochs * epoch_len + + if not self.by_epoch: + return + + self.regular_lr = self.get_regular_lr(runner) + self._set_lr(runner, self.regular_lr) + + def before_train_iter(self, runner): + cur_iter = runner.iter + if not self.by_epoch: + self.regular_lr = self.get_regular_lr(runner) + if self.warmup is None or cur_iter >= self.warmup_iters: + self._set_lr(runner, self.regular_lr) + else: + warmup_lr = self.get_warmup_lr(cur_iter) + self._set_lr(runner, warmup_lr) + elif self.by_epoch: + if self.warmup is None or cur_iter > self.warmup_iters: + return + elif cur_iter == self.warmup_iters: + self._set_lr(runner, self.regular_lr) + else: + warmup_lr = self.get_warmup_lr(cur_iter) + self._set_lr(runner, warmup_lr) + + +@HOOKS.register_module() +class FixedLrUpdaterHook(LrUpdaterHook): + + def __init__(self, **kwargs): + super(FixedLrUpdaterHook, self).__init__(**kwargs) + + def get_lr(self, runner, base_lr): + return base_lr + + +@HOOKS.register_module() +class StepLrUpdaterHook(LrUpdaterHook): + """Step LR scheduler with min_lr clipping. + + Args: + step (int | list[int]): Step to decay the LR. If an int value is given, + regard it as the decay interval. If a list is given, decay LR at + these steps. + gamma (float, optional): Decay LR ratio. Default: 0.1. + min_lr (float, optional): Minimum LR value to keep. If LR after decay + is lower than `min_lr`, it will be clipped to this value. If None + is given, we don't perform lr clipping. Default: None. + """ + + def __init__(self, step, gamma=0.1, min_lr=None, **kwargs): + if isinstance(step, list): + assert is_list_of(step, int) + assert all([s > 0 for s in step]) + elif isinstance(step, int): + assert step > 0 + else: + raise TypeError('"step" must be a list or integer') + self.step = step + self.gamma = gamma + self.min_lr = min_lr + super(StepLrUpdaterHook, self).__init__(**kwargs) + + def get_lr(self, runner, base_lr): + progress = runner.epoch if self.by_epoch else runner.iter + + # calculate exponential term + if isinstance(self.step, int): + exp = progress // self.step + else: + exp = len(self.step) + for i, s in enumerate(self.step): + if progress < s: + exp = i + break + + lr = base_lr * (self.gamma**exp) + if self.min_lr is not None: + # clip to a minimum value + lr = max(lr, self.min_lr) + return lr + + +@HOOKS.register_module() +class ExpLrUpdaterHook(LrUpdaterHook): + + def __init__(self, gamma, **kwargs): + self.gamma = gamma + super(ExpLrUpdaterHook, self).__init__(**kwargs) + + def get_lr(self, runner, base_lr): + progress = runner.epoch if self.by_epoch else runner.iter + return base_lr * self.gamma**progress + + +@HOOKS.register_module() +class PolyLrUpdaterHook(LrUpdaterHook): + + def __init__(self, power=1., min_lr=0., **kwargs): + self.power = power + self.min_lr = min_lr + super(PolyLrUpdaterHook, self).__init__(**kwargs) + + def get_lr(self, runner, base_lr): + if self.by_epoch: + progress = runner.epoch + max_progress = runner.max_epochs + else: + progress = runner.iter + max_progress = runner.max_iters + coeff = (1 - progress / max_progress)**self.power + return (base_lr - self.min_lr) * coeff + self.min_lr + + +@HOOKS.register_module() +class InvLrUpdaterHook(LrUpdaterHook): + + def __init__(self, gamma, power=1., **kwargs): + self.gamma = gamma + self.power = power + super(InvLrUpdaterHook, self).__init__(**kwargs) + + def get_lr(self, runner, base_lr): + progress = runner.epoch if self.by_epoch else runner.iter + return base_lr * (1 + self.gamma * progress)**(-self.power) + + +@HOOKS.register_module() +class CosineAnnealingLrUpdaterHook(LrUpdaterHook): + + def __init__(self, min_lr=None, min_lr_ratio=None, **kwargs): + assert (min_lr is None) ^ (min_lr_ratio is None) + self.min_lr = min_lr + self.min_lr_ratio = min_lr_ratio + super(CosineAnnealingLrUpdaterHook, self).__init__(**kwargs) + + def get_lr(self, runner, base_lr): + if self.by_epoch: + progress = runner.epoch + max_progress = runner.max_epochs + else: + progress = runner.iter + max_progress = runner.max_iters + + if self.min_lr_ratio is not None: + target_lr = base_lr * self.min_lr_ratio + else: + target_lr = self.min_lr + return annealing_cos(base_lr, target_lr, progress / max_progress) + + +@HOOKS.register_module() +class FlatCosineAnnealingLrUpdaterHook(LrUpdaterHook): + """Flat + Cosine lr schedule. + + Modified from https://github.com/fastai/fastai/blob/master/fastai/callback/schedule.py#L128 # noqa: E501 + + Args: + start_percent (float): When to start annealing the learning rate + after the percentage of the total training steps. + The value should be in range [0, 1). + Default: 0.75 + min_lr (float, optional): The minimum lr. Default: None. + min_lr_ratio (float, optional): The ratio of minimum lr to the base lr. + Either `min_lr` or `min_lr_ratio` should be specified. + Default: None. + """ + + def __init__(self, + start_percent=0.75, + min_lr=None, + min_lr_ratio=None, + **kwargs): + assert (min_lr is None) ^ (min_lr_ratio is None) + if start_percent < 0 or start_percent > 1 or not isinstance( + start_percent, float): + raise ValueError( + 'expected float between 0 and 1 start_percent, but ' + f'got {start_percent}') + self.start_percent = start_percent + self.min_lr = min_lr + self.min_lr_ratio = min_lr_ratio + super(FlatCosineAnnealingLrUpdaterHook, self).__init__(**kwargs) + + def get_lr(self, runner, base_lr): + if self.by_epoch: + start = round(runner.max_epochs * self.start_percent) + progress = runner.epoch - start + max_progress = runner.max_epochs - start + else: + start = round(runner.max_iters * self.start_percent) + progress = runner.iter - start + max_progress = runner.max_iters - start + + if self.min_lr_ratio is not None: + target_lr = base_lr * self.min_lr_ratio + else: + target_lr = self.min_lr + + if progress < 0: + return base_lr + else: + return annealing_cos(base_lr, target_lr, progress / max_progress) + + +@HOOKS.register_module() +class CosineRestartLrUpdaterHook(LrUpdaterHook): + """Cosine annealing with restarts learning rate scheme. + + Args: + periods (list[int]): Periods for each cosine anneling cycle. + restart_weights (list[float], optional): Restart weights at each + restart iteration. Default: [1]. + min_lr (float, optional): The minimum lr. Default: None. + min_lr_ratio (float, optional): The ratio of minimum lr to the base lr. + Either `min_lr` or `min_lr_ratio` should be specified. + Default: None. + """ + + def __init__(self, + periods, + restart_weights=[1], + min_lr=None, + min_lr_ratio=None, + **kwargs): + assert (min_lr is None) ^ (min_lr_ratio is None) + self.periods = periods + self.min_lr = min_lr + self.min_lr_ratio = min_lr_ratio + self.restart_weights = restart_weights + assert (len(self.periods) == len(self.restart_weights) + ), 'periods and restart_weights should have the same length.' + super(CosineRestartLrUpdaterHook, self).__init__(**kwargs) + + self.cumulative_periods = [ + sum(self.periods[0:i + 1]) for i in range(0, len(self.periods)) + ] + + def get_lr(self, runner, base_lr): + if self.by_epoch: + progress = runner.epoch + else: + progress = runner.iter + + if self.min_lr_ratio is not None: + target_lr = base_lr * self.min_lr_ratio + else: + target_lr = self.min_lr + + idx = get_position_from_periods(progress, self.cumulative_periods) + current_weight = self.restart_weights[idx] + nearest_restart = 0 if idx == 0 else self.cumulative_periods[idx - 1] + current_periods = self.periods[idx] + + alpha = min((progress - nearest_restart) / current_periods, 1) + return annealing_cos(base_lr, target_lr, alpha, current_weight) + + +def get_position_from_periods(iteration, cumulative_periods): + """Get the position from a period list. + + It will return the index of the right-closest number in the period list. + For example, the cumulative_periods = [100, 200, 300, 400], + if iteration == 50, return 0; + if iteration == 210, return 2; + if iteration == 300, return 3. + + Args: + iteration (int): Current iteration. + cumulative_periods (list[int]): Cumulative period list. + + Returns: + int: The position of the right-closest number in the period list. + """ + for i, period in enumerate(cumulative_periods): + if iteration < period: + return i + raise ValueError(f'Current iteration {iteration} exceeds ' + f'cumulative_periods {cumulative_periods}') + + +@HOOKS.register_module() +class CyclicLrUpdaterHook(LrUpdaterHook): + """Cyclic LR Scheduler. + + Implement the cyclical learning rate policy (CLR) described in + https://arxiv.org/pdf/1506.01186.pdf + + Different from the original paper, we use cosine annealing rather than + triangular policy inside a cycle. This improves the performance in the + 3D detection area. + + Args: + by_epoch (bool): Whether to update LR by epoch. + target_ratio (tuple[float]): Relative ratio of the highest LR and the + lowest LR to the initial LR. + cyclic_times (int): Number of cycles during training + step_ratio_up (float): The ratio of the increasing process of LR in + the total cycle. + anneal_strategy (str): {'cos', 'linear'} + Specifies the annealing strategy: 'cos' for cosine annealing, + 'linear' for linear annealing. Default: 'cos'. + """ + + def __init__(self, + by_epoch=False, + target_ratio=(10, 1e-4), + cyclic_times=1, + step_ratio_up=0.4, + anneal_strategy='cos', + **kwargs): + if isinstance(target_ratio, float): + target_ratio = (target_ratio, target_ratio / 1e5) + elif isinstance(target_ratio, tuple): + target_ratio = (target_ratio[0], target_ratio[0] / 1e5) \ + if len(target_ratio) == 1 else target_ratio + else: + raise ValueError('target_ratio should be either float ' + f'or tuple, got {type(target_ratio)}') + + assert len(target_ratio) == 2, \ + '"target_ratio" must be list or tuple of two floats' + assert 0 <= step_ratio_up < 1.0, \ + '"step_ratio_up" must be in range [0,1)' + + self.target_ratio = target_ratio + self.cyclic_times = cyclic_times + self.step_ratio_up = step_ratio_up + self.lr_phases = [] # init lr_phases + # validate anneal_strategy + if anneal_strategy not in ['cos', 'linear']: + raise ValueError('anneal_strategy must be one of "cos" or ' + f'"linear", instead got {anneal_strategy}') + elif anneal_strategy == 'cos': + self.anneal_func = annealing_cos + elif anneal_strategy == 'linear': + self.anneal_func = annealing_linear + + assert not by_epoch, \ + 'currently only support "by_epoch" = False' + super(CyclicLrUpdaterHook, self).__init__(by_epoch, **kwargs) + + def before_run(self, runner): + super(CyclicLrUpdaterHook, self).before_run(runner) + # initiate lr_phases + # total lr_phases are separated as up and down + max_iter_per_phase = runner.max_iters // self.cyclic_times + iter_up_phase = int(self.step_ratio_up * max_iter_per_phase) + self.lr_phases.append( + [0, iter_up_phase, max_iter_per_phase, 1, self.target_ratio[0]]) + self.lr_phases.append([ + iter_up_phase, max_iter_per_phase, max_iter_per_phase, + self.target_ratio[0], self.target_ratio[1] + ]) + + def get_lr(self, runner, base_lr): + curr_iter = runner.iter + for (start_iter, end_iter, max_iter_per_phase, start_ratio, + end_ratio) in self.lr_phases: + curr_iter %= max_iter_per_phase + if start_iter <= curr_iter < end_iter: + progress = curr_iter - start_iter + return self.anneal_func(base_lr * start_ratio, + base_lr * end_ratio, + progress / (end_iter - start_iter)) + + +@HOOKS.register_module() +class OneCycleLrUpdaterHook(LrUpdaterHook): + """One Cycle LR Scheduler. + + The 1cycle learning rate policy changes the learning rate after every + batch. The one cycle learning rate policy is described in + https://arxiv.org/pdf/1708.07120.pdf + + Args: + max_lr (float or list): Upper learning rate boundaries in the cycle + for each parameter group. + total_steps (int, optional): The total number of steps in the cycle. + Note that if a value is not provided here, it will be the max_iter + of runner. Default: None. + pct_start (float): The percentage of the cycle (in number of steps) + spent increasing the learning rate. + Default: 0.3 + anneal_strategy (str): {'cos', 'linear'} + Specifies the annealing strategy: 'cos' for cosine annealing, + 'linear' for linear annealing. + Default: 'cos' + div_factor (float): Determines the initial learning rate via + initial_lr = max_lr/div_factor + Default: 25 + final_div_factor (float): Determines the minimum learning rate via + min_lr = initial_lr/final_div_factor + Default: 1e4 + three_phase (bool): If three_phase is True, use a third phase of the + schedule to annihilate the learning rate according to + final_div_factor instead of modifying the second phase (the first + two phases will be symmetrical about the step indicated by + pct_start). + Default: False + """ + + def __init__(self, + max_lr, + total_steps=None, + pct_start=0.3, + anneal_strategy='cos', + div_factor=25, + final_div_factor=1e4, + three_phase=False, + **kwargs): + # validate by_epoch, currently only support by_epoch = False + if 'by_epoch' not in kwargs: + kwargs['by_epoch'] = False + else: + assert not kwargs['by_epoch'], \ + 'currently only support "by_epoch" = False' + if not isinstance(max_lr, (numbers.Number, list, dict)): + raise ValueError('the type of max_lr must be the one of list or ' + f'dict, but got {type(max_lr)}') + self._max_lr = max_lr + if total_steps is not None: + if not isinstance(total_steps, int): + raise ValueError('the type of total_steps must be int, but' + f'got {type(total_steps)}') + self.total_steps = total_steps + # validate pct_start + if pct_start < 0 or pct_start > 1 or not isinstance(pct_start, float): + raise ValueError('expected float between 0 and 1 pct_start, but ' + f'got {pct_start}') + self.pct_start = pct_start + # validate anneal_strategy + if anneal_strategy not in ['cos', 'linear']: + raise ValueError('anneal_strategy must be one of "cos" or ' + f'"linear", instead got {anneal_strategy}') + elif anneal_strategy == 'cos': + self.anneal_func = annealing_cos + elif anneal_strategy == 'linear': + self.anneal_func = annealing_linear + self.div_factor = div_factor + self.final_div_factor = final_div_factor + self.three_phase = three_phase + self.lr_phases = [] # init lr_phases + super(OneCycleLrUpdaterHook, self).__init__(**kwargs) + + def before_run(self, runner): + if hasattr(self, 'total_steps'): + total_steps = self.total_steps + else: + total_steps = runner.max_iters + if total_steps < runner.max_iters: + raise ValueError( + 'The total steps must be greater than or equal to max ' + f'iterations {runner.max_iters} of runner, but total steps ' + f'is {total_steps}.') + + if isinstance(runner.optimizer, dict): + self.base_lr = {} + for k, optim in runner.optimizer.items(): + _max_lr = format_param(k, optim, self._max_lr) + self.base_lr[k] = [lr / self.div_factor for lr in _max_lr] + for group, lr in zip(optim.param_groups, self.base_lr[k]): + group.setdefault('initial_lr', lr) + else: + k = type(runner.optimizer).__name__ + _max_lr = format_param(k, runner.optimizer, self._max_lr) + self.base_lr = [lr / self.div_factor for lr in _max_lr] + for group, lr in zip(runner.optimizer.param_groups, self.base_lr): + group.setdefault('initial_lr', lr) + + if self.three_phase: + self.lr_phases.append( + [float(self.pct_start * total_steps) - 1, 1, self.div_factor]) + self.lr_phases.append([ + float(2 * self.pct_start * total_steps) - 2, self.div_factor, 1 + ]) + self.lr_phases.append( + [total_steps - 1, 1, 1 / self.final_div_factor]) + else: + self.lr_phases.append( + [float(self.pct_start * total_steps) - 1, 1, self.div_factor]) + self.lr_phases.append( + [total_steps - 1, self.div_factor, 1 / self.final_div_factor]) + + def get_lr(self, runner, base_lr): + curr_iter = runner.iter + start_iter = 0 + for i, (end_iter, start_lr, end_lr) in enumerate(self.lr_phases): + if curr_iter <= end_iter: + pct = (curr_iter - start_iter) / (end_iter - start_iter) + lr = self.anneal_func(base_lr * start_lr, base_lr * end_lr, + pct) + break + start_iter = end_iter + return lr + + +def annealing_cos(start, end, factor, weight=1): + """Calculate annealing cos learning rate. + + Cosine anneal from `weight * start + (1 - weight) * end` to `end` as + percentage goes from 0.0 to 1.0. + + Args: + start (float): The starting learning rate of the cosine annealing. + end (float): The ending learing rate of the cosine annealing. + factor (float): The coefficient of `pi` when calculating the current + percentage. Range from 0.0 to 1.0. + weight (float, optional): The combination factor of `start` and `end` + when calculating the actual starting learning rate. Default to 1. + """ + cos_out = cos(pi * factor) + 1 + return end + 0.5 * weight * (start - end) * cos_out + + +def annealing_linear(start, end, factor): + """Calculate annealing linear learning rate. + + Linear anneal from `start` to `end` as percentage goes from 0.0 to 1.0. + + Args: + start (float): The starting learning rate of the linear annealing. + end (float): The ending learing rate of the linear annealing. + factor (float): The coefficient of `pi` when calculating the current + percentage. Range from 0.0 to 1.0. + """ + return start + (end - start) * factor + + +def format_param(name, optim, param): + if isinstance(param, numbers.Number): + return [param] * len(optim.param_groups) + elif isinstance(param, (list, tuple)): # multi param groups + if len(param) != len(optim.param_groups): + raise ValueError(f'expected {len(optim.param_groups)} ' + f'values for {name}, got {len(param)}') + return param + else: # multi optimizers + if name not in param: + raise KeyError(f'{name} is not found in {param.keys()}') + return param[name] diff --git a/mmcv/runner/hooks/optimizer.py b/mmcv/runner/hooks/optimizer.py new file mode 100644 index 0000000..af1aa00 --- /dev/null +++ b/mmcv/runner/hooks/optimizer.py @@ -0,0 +1,506 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +from collections import defaultdict +from itertools import chain + +from torch.nn.utils import clip_grad + +from torch.nn.modules.batchnorm import _BatchNorm +from mmcv.utils import LossScaler, wrap_fp16_model, TORCH_VERSION, digit_version, allreduce_grads +from .hook import HOOKS, Hook + +try: + # If PyTorch version >= 1.6.0, torch.cuda.amp.GradScaler would be imported + # and used; otherwise, auto fp16 will adopt mmcv's implementation. + from torch.cuda.amp import GradScaler +except ImportError: + pass + + +@HOOKS.register_module() +class OptimizerHook(Hook): + + def __init__(self, grad_clip=None): + self.grad_clip = grad_clip + + def clip_grads(self, params): + params = list( + filter(lambda p: p.requires_grad and p.grad is not None, params)) + if len(params) > 0: + return clip_grad.clip_grad_norm_(params, **self.grad_clip) + + def after_train_iter(self, runner): + runner.optimizer.zero_grad() + runner.outputs['loss'].backward() + if self.grad_clip is not None: + grad_norm = self.clip_grads(runner.model.parameters()) + if grad_norm is not None: + # Add grad norm to the logger + runner.log_buffer.update({'grad_norm': float(grad_norm)}, + runner.outputs['num_samples']) + runner.optimizer.step() + + +@HOOKS.register_module() +class GradientCumulativeOptimizerHook(OptimizerHook): + """Optimizer Hook implements multi-iters gradient cumulating. + + Args: + cumulative_iters (int, optional): Num of gradient cumulative iters. + The optimizer will step every `cumulative_iters` iters. + Defaults to 1. + + Examples: + >>> # Use cumulative_iters to simulate a large batch size + >>> # It is helpful when the hardware cannot handle a large batch size. + >>> loader = DataLoader(data, batch_size=64) + >>> optim_hook = GradientCumulativeOptimizerHook(cumulative_iters=4) + >>> # almost equals to + >>> loader = DataLoader(data, batch_size=256) + >>> optim_hook = OptimizerHook() + """ + + def __init__(self, cumulative_iters=1, **kwargs): + super(GradientCumulativeOptimizerHook, self).__init__(**kwargs) + + assert isinstance(cumulative_iters, int) and cumulative_iters > 0, \ + f'cumulative_iters only accepts positive int, but got ' \ + f'{type(cumulative_iters)} instead.' + + self.cumulative_iters = cumulative_iters + self.divisible_iters = 0 + self.remainder_iters = 0 + self.initialized = False + + def has_batch_norm(self, module): + if isinstance(module, _BatchNorm): + return True + for m in module.children(): + if self.has_batch_norm(m): + return True + return False + + def _init(self, runner): + if runner.iter % self.cumulative_iters != 0: + runner.logger.warning( + 'Resume iter number is not divisible by cumulative_iters in ' + 'GradientCumulativeOptimizerHook, which means the gradient of ' + 'some iters is lost and the result may be influenced slightly.' + ) + + if self.has_batch_norm(runner.model) and self.cumulative_iters > 1: + runner.logger.warning( + 'GradientCumulativeOptimizerHook may slightly decrease ' + 'performance if the model has BatchNorm layers.') + + residual_iters = runner.max_iters - runner.iter + + self.divisible_iters = ( + residual_iters // self.cumulative_iters * self.cumulative_iters) + self.remainder_iters = residual_iters - self.divisible_iters + + self.initialized = True + + def after_train_iter(self, runner): + if not self.initialized: + self._init(runner) + + if runner.iter < self.divisible_iters: + loss_factor = self.cumulative_iters + else: + loss_factor = self.remainder_iters + loss = runner.outputs['loss'] + loss = loss / loss_factor + loss.backward() + + if (self.every_n_iters(runner, self.cumulative_iters) + or self.is_last_iter(runner)): + + if self.grad_clip is not None: + grad_norm = self.clip_grads(runner.model.parameters()) + if grad_norm is not None: + # Add grad norm to the logger + runner.log_buffer.update({'grad_norm': float(grad_norm)}, + runner.outputs['num_samples']) + runner.optimizer.step() + runner.optimizer.zero_grad() + + +if (digit_version(TORCH_VERSION) >= digit_version('1.6.0')): + + @HOOKS.register_module() + class Fp16OptimizerHook(OptimizerHook): + """FP16 optimizer hook (using PyTorch's implementation). + + If you are using PyTorch >= 1.6, torch.cuda.amp is used as the backend, + to take care of the optimization procedure. + + Args: + loss_scale (float | str | dict): Scale factor configuration. + If loss_scale is a float, static loss scaling will be used with + the specified scale. If loss_scale is a string, it must be + 'dynamic', then dynamic loss scaling will be used. + It can also be a dict containing arguments of GradScalar. + Defaults to 512. For Pytorch >= 1.6, mmcv uses official + implementation of GradScaler. If you use a dict version of + loss_scale to create GradScaler, please refer to: + https://pytorch.org/docs/stable/amp.html#torch.cuda.amp.GradScaler + for the parameters. + + Examples: + >>> loss_scale = dict( + ... init_scale=65536.0, + ... growth_factor=2.0, + ... backoff_factor=0.5, + ... growth_interval=2000 + ... ) + >>> optimizer_hook = Fp16OptimizerHook(loss_scale=loss_scale) + """ + + def __init__(self, + grad_clip=None, + coalesce=True, + bucket_size_mb=-1, + loss_scale=512., + distributed=True): + self.grad_clip = grad_clip + self.coalesce = coalesce + self.bucket_size_mb = bucket_size_mb + self.distributed = distributed + self._scale_update_param = None + if loss_scale == 'dynamic': + self.loss_scaler = GradScaler() + elif isinstance(loss_scale, float): + self._scale_update_param = loss_scale + self.loss_scaler = GradScaler(init_scale=loss_scale) + elif isinstance(loss_scale, dict): + self.loss_scaler = GradScaler(**loss_scale) + else: + raise ValueError('loss_scale must be of type float, dict, or ' + f'"dynamic", got {loss_scale}') + + def before_run(self, runner): + """Preparing steps before Mixed Precision Training.""" + # wrap model mode to fp16 + wrap_fp16_model(runner.model) + # resume from state dict + if 'fp16' in runner.meta and 'loss_scaler' in runner.meta['fp16']: + scaler_state_dict = runner.meta['fp16']['loss_scaler'] + self.loss_scaler.load_state_dict(scaler_state_dict) + + def copy_grads_to_fp32(self, fp16_net, fp32_weights): + """Copy gradients from fp16 model to fp32 weight copy.""" + for fp32_param, fp16_param in zip(fp32_weights, + fp16_net.parameters()): + if fp16_param.grad is not None: + if fp32_param.grad is None: + fp32_param.grad = fp32_param.data.new( + fp32_param.size()) + fp32_param.grad.copy_(fp16_param.grad) + + def copy_params_to_fp16(self, fp16_net, fp32_weights): + """Copy updated params from fp32 weight copy to fp16 model.""" + for fp16_param, fp32_param in zip(fp16_net.parameters(), + fp32_weights): + fp16_param.data.copy_(fp32_param.data) + + def after_train_iter(self, runner): + """Backward optimization steps for Mixed Precision Training. For + dynamic loss scaling, please refer to + https://pytorch.org/docs/stable/amp.html#torch.cuda.amp.GradScaler. + + 1. Scale the loss by a scale factor. + 2. Backward the loss to obtain the gradients. + 3. Unscale the optimizer’s gradient tensors. + 4. Call optimizer.step() and update scale factor. + 5. Save loss_scaler state_dict for resume purpose. + """ + # clear grads of last iteration + runner.model.zero_grad() + runner.optimizer.zero_grad() + + self.loss_scaler.scale(runner.outputs['loss']).backward() + self.loss_scaler.unscale_(runner.optimizer) + # grad clip + if self.grad_clip is not None: + grad_norm = self.clip_grads(runner.model.parameters()) + if grad_norm is not None: + # Add grad norm to the logger + runner.log_buffer.update({'grad_norm': float(grad_norm)}, + runner.outputs['num_samples']) + # backward and update scaler + self.loss_scaler.step(runner.optimizer) + self.loss_scaler.update(self._scale_update_param) + + # save state_dict of loss_scaler + runner.meta.setdefault( + 'fp16', {})['loss_scaler'] = self.loss_scaler.state_dict() + + @HOOKS.register_module() + class GradientCumulativeFp16OptimizerHook(GradientCumulativeOptimizerHook, + Fp16OptimizerHook): + """Fp16 optimizer Hook (using PyTorch's implementation) implements + multi-iters gradient cumulating. + + If you are using PyTorch >= 1.6, torch.cuda.amp is used as the backend, + to take care of the optimization procedure. + """ + + def __init__(self, *args, **kwargs): + super(GradientCumulativeFp16OptimizerHook, + self).__init__(*args, **kwargs) + + def after_train_iter(self, runner): + if not self.initialized: + self._init(runner) + + if runner.iter < self.divisible_iters: + loss_factor = self.cumulative_iters + else: + loss_factor = self.remainder_iters + loss = runner.outputs['loss'] + loss = loss / loss_factor + + self.loss_scaler.scale(loss).backward() + + if (self.every_n_iters(runner, self.cumulative_iters) + or self.is_last_iter(runner)): + + # copy fp16 grads in the model to fp32 params in the optimizer + self.loss_scaler.unscale_(runner.optimizer) + + if self.grad_clip is not None: + grad_norm = self.clip_grads(runner.model.parameters()) + if grad_norm is not None: + # Add grad norm to the logger + runner.log_buffer.update( + {'grad_norm': float(grad_norm)}, + runner.outputs['num_samples']) + + # backward and update scaler + self.loss_scaler.step(runner.optimizer) + self.loss_scaler.update(self._scale_update_param) + + # save state_dict of loss_scaler + runner.meta.setdefault( + 'fp16', {})['loss_scaler'] = self.loss_scaler.state_dict() + + # clear grads + runner.model.zero_grad() + runner.optimizer.zero_grad() + +else: + + @HOOKS.register_module() + class Fp16OptimizerHook(OptimizerHook): + """FP16 optimizer hook (mmcv's implementation). + + The steps of fp16 optimizer is as follows. + 1. Scale the loss value. + 2. BP in the fp16 model. + 2. Copy gradients from fp16 model to fp32 weights. + 3. Update fp32 weights. + 4. Copy updated parameters from fp32 weights to fp16 model. + + Refer to https://arxiv.org/abs/1710.03740 for more details. + + Args: + loss_scale (float | str | dict): Scale factor configuration. + If loss_scale is a float, static loss scaling will be used with + the specified scale. If loss_scale is a string, it must be + 'dynamic', then dynamic loss scaling will be used. + It can also be a dict containing arguments of LossScaler. + Defaults to 512. + """ + + def __init__(self, + grad_clip=None, + coalesce=True, + bucket_size_mb=-1, + loss_scale=512., + distributed=True): + self.grad_clip = grad_clip + self.coalesce = coalesce + self.bucket_size_mb = bucket_size_mb + self.distributed = distributed + if loss_scale == 'dynamic': + self.loss_scaler = LossScaler(mode='dynamic') + elif isinstance(loss_scale, float): + self.loss_scaler = LossScaler( + init_scale=loss_scale, mode='static') + elif isinstance(loss_scale, dict): + self.loss_scaler = LossScaler(**loss_scale) + else: + raise ValueError('loss_scale must be of type float, dict, or ' + f'"dynamic", got {loss_scale}') + + def before_run(self, runner): + """Preparing steps before Mixed Precision Training. + + 1. Make a master copy of fp32 weights for optimization. + 2. Convert the main model from fp32 to fp16. + """ + # keep a copy of fp32 weights + old_groups = runner.optimizer.param_groups + runner.optimizer.param_groups = copy.deepcopy( + runner.optimizer.param_groups) + state = defaultdict(dict) + p_map = { + old_p: p + for old_p, p in zip( + chain(*(g['params'] for g in old_groups)), + chain(*(g['params'] + for g in runner.optimizer.param_groups))) + } + for k, v in runner.optimizer.state.items(): + state[p_map[k]] = v + runner.optimizer.state = state + # convert model to fp16 + wrap_fp16_model(runner.model) + # resume from state dict + if 'fp16' in runner.meta and 'loss_scaler' in runner.meta['fp16']: + scaler_state_dict = runner.meta['fp16']['loss_scaler'] + self.loss_scaler.load_state_dict(scaler_state_dict) + + def copy_grads_to_fp32(self, fp16_net, fp32_weights): + """Copy gradients from fp16 model to fp32 weight copy.""" + for fp32_param, fp16_param in zip(fp32_weights, + fp16_net.parameters()): + if fp16_param.grad is not None: + if fp32_param.grad is None: + fp32_param.grad = fp32_param.data.new( + fp32_param.size()) + fp32_param.grad.copy_(fp16_param.grad) + + def copy_params_to_fp16(self, fp16_net, fp32_weights): + """Copy updated params from fp32 weight copy to fp16 model.""" + for fp16_param, fp32_param in zip(fp16_net.parameters(), + fp32_weights): + fp16_param.data.copy_(fp32_param.data) + + def after_train_iter(self, runner): + """Backward optimization steps for Mixed Precision Training. For + dynamic loss scaling, please refer `loss_scalar.py` + + 1. Scale the loss by a scale factor. + 2. Backward the loss to obtain the gradients (fp16). + 3. Copy gradients from the model to the fp32 weight copy. + 4. Scale the gradients back and update the fp32 weight copy. + 5. Copy back the params from fp32 weight copy to the fp16 model. + 6. Save loss_scaler state_dict for resume purpose. + """ + # clear grads of last iteration + runner.model.zero_grad() + runner.optimizer.zero_grad() + # scale the loss value + scaled_loss = runner.outputs['loss'] * self.loss_scaler.loss_scale + scaled_loss.backward() + # copy fp16 grads in the model to fp32 params in the optimizer + + fp32_weights = [] + for param_group in runner.optimizer.param_groups: + fp32_weights += param_group['params'] + self.copy_grads_to_fp32(runner.model, fp32_weights) + # allreduce grads + if self.distributed: + allreduce_grads(fp32_weights, self.coalesce, + self.bucket_size_mb) + + has_overflow = self.loss_scaler.has_overflow(fp32_weights) + # if has overflow, skip this iteration + if not has_overflow: + # scale the gradients back + for param in fp32_weights: + if param.grad is not None: + param.grad.div_(self.loss_scaler.loss_scale) + if self.grad_clip is not None: + grad_norm = self.clip_grads(fp32_weights) + if grad_norm is not None: + # Add grad norm to the logger + runner.log_buffer.update( + {'grad_norm': float(grad_norm)}, + runner.outputs['num_samples']) + # update fp32 params + runner.optimizer.step() + # copy fp32 params to the fp16 model + self.copy_params_to_fp16(runner.model, fp32_weights) + self.loss_scaler.update_scale(has_overflow) + if has_overflow: + runner.logger.warning('Check overflow, downscale loss scale ' + f'to {self.loss_scaler.cur_scale}') + + # save state_dict of loss_scaler + runner.meta.setdefault( + 'fp16', {})['loss_scaler'] = self.loss_scaler.state_dict() + + @HOOKS.register_module() + class GradientCumulativeFp16OptimizerHook(GradientCumulativeOptimizerHook, + Fp16OptimizerHook): + """Fp16 optimizer Hook (using mmcv implementation) implements multi- + iters gradient cumulating.""" + + def __init__(self, *args, **kwargs): + super(GradientCumulativeFp16OptimizerHook, + self).__init__(*args, **kwargs) + + def after_train_iter(self, runner): + if not self.initialized: + self._init(runner) + + if runner.iter < self.divisible_iters: + loss_factor = self.cumulative_iters + else: + loss_factor = self.remainder_iters + + loss = runner.outputs['loss'] + loss = loss / loss_factor + + # scale the loss value + scaled_loss = loss * self.loss_scaler.loss_scale + scaled_loss.backward() + + if (self.every_n_iters(runner, self.cumulative_iters) + or self.is_last_iter(runner)): + + # copy fp16 grads in the model to fp32 params in the optimizer + fp32_weights = [] + for param_group in runner.optimizer.param_groups: + fp32_weights += param_group['params'] + self.copy_grads_to_fp32(runner.model, fp32_weights) + # allreduce grads + if self.distributed: + allreduce_grads(fp32_weights, self.coalesce, + self.bucket_size_mb) + + has_overflow = self.loss_scaler.has_overflow(fp32_weights) + # if has overflow, skip this iteration + if not has_overflow: + # scale the gradients back + for param in fp32_weights: + if param.grad is not None: + param.grad.div_(self.loss_scaler.loss_scale) + if self.grad_clip is not None: + grad_norm = self.clip_grads(fp32_weights) + if grad_norm is not None: + # Add grad norm to the logger + runner.log_buffer.update( + {'grad_norm': float(grad_norm)}, + runner.outputs['num_samples']) + # update fp32 params + runner.optimizer.step() + # copy fp32 params to the fp16 model + self.copy_params_to_fp16(runner.model, fp32_weights) + else: + runner.logger.warning( + 'Check overflow, downscale loss scale ' + f'to {self.loss_scaler.cur_scale}') + + self.loss_scaler.update_scale(has_overflow) + + # save state_dict of loss_scaler + runner.meta.setdefault( + 'fp16', {})['loss_scaler'] = self.loss_scaler.state_dict() + + # clear grads + runner.model.zero_grad() + runner.optimizer.zero_grad() diff --git a/mmcv/runner/hooks/sampler_seed.py b/mmcv/runner/hooks/sampler_seed.py new file mode 100644 index 0000000..ee0dc6b --- /dev/null +++ b/mmcv/runner/hooks/sampler_seed.py @@ -0,0 +1,20 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .hook import HOOKS, Hook + + +@HOOKS.register_module() +class DistSamplerSeedHook(Hook): + """Data-loading sampler for distributed training. + + When distributed training, it is only useful in conjunction with + :obj:`EpochBasedRunner`, while :obj:`IterBasedRunner` achieves the same + purpose with :obj:`IterLoader`. + """ + + def before_epoch(self, runner): + if hasattr(runner.data_loader.sampler, 'set_epoch'): + # in case the data loader uses `SequentialSampler` in Pytorch + runner.data_loader.sampler.set_epoch(runner.epoch) + elif hasattr(runner.data_loader.batch_sampler.sampler, 'set_epoch'): + # batch sampler in pytorch warps the sampler as its attributes. + runner.data_loader.batch_sampler.sampler.set_epoch(runner.epoch) diff --git a/mmcv/runner/hooks/vad_hooks.py b/mmcv/runner/hooks/vad_hooks.py new file mode 100644 index 0000000..56a4b05 --- /dev/null +++ b/mmcv/runner/hooks/vad_hooks.py @@ -0,0 +1,17 @@ +from mmcv.runner.hooks.hook import HOOKS, Hook +from mmcv.parallel import is_module_wrapper + + + + +@HOOKS.register_module() +class CustomSetEpochInfoHook(Hook): + """Set runner's epoch information to the model.""" + + def before_train_epoch(self, runner): + epoch = runner.epoch + model = runner.model + if is_module_wrapper(model): + model = model.module + model.set_epoch(epoch) + diff --git a/mmcv/structures/__init__.py b/mmcv/structures/__init__.py new file mode 100644 index 0000000..a0e31b2 --- /dev/null +++ b/mmcv/structures/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from .boxes import BoxMode, Boxes +from .instances import Instances +# from .keypoints import Keypoints, heatmaps_to_keypoints +from .masks import ROIMasks +# from .masks import BitMasks, PolygonMasks, polygons_to_bitmask, ROIMasks +# from .rotated_boxes import RotatedBoxes +# from .rotated_boxes import pairwise_iou as pairwise_iou_rotated diff --git a/mmcv/structures/boxes.py b/mmcv/structures/boxes.py new file mode 100644 index 0000000..fd396f6 --- /dev/null +++ b/mmcv/structures/boxes.py @@ -0,0 +1,425 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import math +import numpy as np +from enum import IntEnum, unique +from typing import List, Tuple, Union +import torch +from torch import device + +_RawBoxType = Union[List[float], Tuple[float, ...], torch.Tensor, np.ndarray] + + +@unique +class BoxMode(IntEnum): + """ + Enum of different ways to represent a box. + """ + + XYXY_ABS = 0 + """ + (x0, y0, x1, y1) in absolute floating points coordinates. + The coordinates in range [0, width or height]. + """ + XYWH_ABS = 1 + """ + (x0, y0, w, h) in absolute floating points coordinates. + """ + XYXY_REL = 2 + """ + Not yet supported! + (x0, y0, x1, y1) in range [0, 1]. They are relative to the size of the image. + """ + XYWH_REL = 3 + """ + Not yet supported! + (x0, y0, w, h) in range [0, 1]. They are relative to the size of the image. + """ + XYWHA_ABS = 4 + """ + (xc, yc, w, h, a) in absolute floating points coordinates. + (xc, yc) is the center of the rotated box, and the angle a is in degrees ccw. + """ + + @staticmethod + def convert(box: _RawBoxType, from_mode: "BoxMode", to_mode: "BoxMode") -> _RawBoxType: + """ + Args: + box: can be a k-tuple, k-list or an Nxk array/tensor, where k = 4 or 5 + from_mode, to_mode (BoxMode) + + Returns: + The converted box of the same type. + """ + if from_mode == to_mode: + return box + + original_type = type(box) + is_numpy = isinstance(box, np.ndarray) + single_box = isinstance(box, (list, tuple)) + if single_box: + assert len(box) == 4 or len(box) == 5, ( + "BoxMode.convert takes either a k-tuple/list or an Nxk array/tensor," + " where k == 4 or 5" + ) + arr = torch.tensor(box)[None, :] + else: + # avoid modifying the input box + if is_numpy: + arr = torch.from_numpy(np.asarray(box)).clone() + else: + arr = box.clone() + + assert to_mode not in [BoxMode.XYXY_REL, BoxMode.XYWH_REL] and from_mode not in [ + BoxMode.XYXY_REL, + BoxMode.XYWH_REL, + ], "Relative mode not yet supported!" + + if from_mode == BoxMode.XYWHA_ABS and to_mode == BoxMode.XYXY_ABS: + assert ( + arr.shape[-1] == 5 + ), "The last dimension of input shape must be 5 for XYWHA format" + original_dtype = arr.dtype + arr = arr.double() + + w = arr[:, 2] + h = arr[:, 3] + a = arr[:, 4] + c = torch.abs(torch.cos(a * math.pi / 180.0)) + s = torch.abs(torch.sin(a * math.pi / 180.0)) + # This basically computes the horizontal bounding rectangle of the rotated box + new_w = c * w + s * h + new_h = c * h + s * w + + # convert center to top-left corner + arr[:, 0] -= new_w / 2.0 + arr[:, 1] -= new_h / 2.0 + # bottom-right corner + arr[:, 2] = arr[:, 0] + new_w + arr[:, 3] = arr[:, 1] + new_h + + arr = arr[:, :4].to(dtype=original_dtype) + elif from_mode == BoxMode.XYWH_ABS and to_mode == BoxMode.XYWHA_ABS: + original_dtype = arr.dtype + arr = arr.double() + arr[:, 0] += arr[:, 2] / 2.0 + arr[:, 1] += arr[:, 3] / 2.0 + angles = torch.zeros((arr.shape[0], 1), dtype=arr.dtype) + arr = torch.cat((arr, angles), axis=1).to(dtype=original_dtype) + else: + if to_mode == BoxMode.XYXY_ABS and from_mode == BoxMode.XYWH_ABS: + arr[:, 2] += arr[:, 0] + arr[:, 3] += arr[:, 1] + elif from_mode == BoxMode.XYXY_ABS and to_mode == BoxMode.XYWH_ABS: + arr[:, 2] -= arr[:, 0] + arr[:, 3] -= arr[:, 1] + else: + raise NotImplementedError( + "Conversion from BoxMode {} to {} is not supported yet".format( + from_mode, to_mode + ) + ) + + if single_box: + return original_type(arr.flatten().tolist()) + if is_numpy: + return arr.numpy() + else: + return arr + + +class Boxes: + """ + This structure stores a list of boxes as a Nx4 torch.Tensor. + It supports some common methods about boxes + (`area`, `clip`, `nonempty`, etc), + and also behaves like a Tensor + (support indexing, `to(device)`, `.device`, and iteration over all boxes) + + Attributes: + tensor (torch.Tensor): float matrix of Nx4. Each row is (x1, y1, x2, y2). + """ + + def __init__(self, tensor: torch.Tensor): + """ + Args: + tensor (Tensor[float]): a Nx4 matrix. Each row is (x1, y1, x2, y2). + """ + if not isinstance(tensor, torch.Tensor): + tensor = torch.as_tensor(tensor, dtype=torch.float32, device=torch.device("cpu")) + else: + tensor = tensor.to(torch.float32) + if tensor.numel() == 0: + # Use reshape, so we don't end up creating a new tensor that does not depend on + # the inputs (and consequently confuses jit) + tensor = tensor.reshape((-1, 4)).to(dtype=torch.float32) + assert tensor.dim() == 2 and tensor.size(-1) == 4, tensor.size() + + self.tensor = tensor + + def clone(self) -> "Boxes": + """ + Clone the Boxes. + + Returns: + Boxes + """ + return Boxes(self.tensor.clone()) + + def to(self, device: torch.device): + # Boxes are assumed float32 and does not support to(dtype) + return Boxes(self.tensor.to(device=device)) + + def area(self) -> torch.Tensor: + """ + Computes the area of all the boxes. + + Returns: + torch.Tensor: a vector with areas of each box. + """ + box = self.tensor + area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1]) + return area + + def clip(self, box_size: Tuple[int, int]) -> None: + """ + Clip (in place) the boxes by limiting x coordinates to the range [0, width] + and y coordinates to the range [0, height]. + + Args: + box_size (height, width): The clipping box's size. + """ + assert torch.isfinite(self.tensor).all(), "Box tensor contains infinite or NaN!" + h, w = box_size + x1 = self.tensor[:, 0].clamp(min=0, max=w) + y1 = self.tensor[:, 1].clamp(min=0, max=h) + x2 = self.tensor[:, 2].clamp(min=0, max=w) + y2 = self.tensor[:, 3].clamp(min=0, max=h) + self.tensor = torch.stack((x1, y1, x2, y2), dim=-1) + + def nonempty(self, threshold: float = 0.0) -> torch.Tensor: + """ + Find boxes that are non-empty. + A box is considered empty, if either of its side is no larger than threshold. + + Returns: + Tensor: + a binary vector which represents whether each box is empty + (False) or non-empty (True). + """ + box = self.tensor + widths = box[:, 2] - box[:, 0] + heights = box[:, 3] - box[:, 1] + keep = (widths > threshold) & (heights > threshold) + return keep + + def __getitem__(self, item) -> "Boxes": + """ + Args: + item: int, slice, or a BoolTensor + + Returns: + Boxes: Create a new :class:`Boxes` by indexing. + + The following usage are allowed: + + 1. `new_boxes = boxes[3]`: return a `Boxes` which contains only one box. + 2. `new_boxes = boxes[2:10]`: return a slice of boxes. + 3. `new_boxes = boxes[vector]`, where vector is a torch.BoolTensor + with `length = len(boxes)`. Nonzero elements in the vector will be selected. + + Note that the returned Boxes might share storage with this Boxes, + subject to Pytorch's indexing semantics. + """ + if isinstance(item, int): + return Boxes(self.tensor[item].view(1, -1)) + b = self.tensor[item] + assert b.dim() == 2, "Indexing on Boxes with {} failed to return a matrix!".format(item) + return Boxes(b) + + def __len__(self) -> int: + return self.tensor.shape[0] + + def __repr__(self) -> str: + return "Boxes(" + str(self.tensor) + ")" + + def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor: + """ + Args: + box_size (height, width): Size of the reference box. + boundary_threshold (int): Boxes that extend beyond the reference box + boundary by more than boundary_threshold are considered "outside". + + Returns: + a binary vector, indicating whether each box is inside the reference box. + """ + height, width = box_size + inds_inside = ( + (self.tensor[..., 0] >= -boundary_threshold) + & (self.tensor[..., 1] >= -boundary_threshold) + & (self.tensor[..., 2] < width + boundary_threshold) + & (self.tensor[..., 3] < height + boundary_threshold) + ) + return inds_inside + + def get_centers(self) -> torch.Tensor: + """ + Returns: + The box centers in a Nx2 array of (x, y). + """ + return (self.tensor[:, :2] + self.tensor[:, 2:]) / 2 + + def scale(self, scale_x: float, scale_y: float) -> None: + """ + Scale the box with horizontal and vertical scaling factors + """ + self.tensor[:, 0::2] *= scale_x + self.tensor[:, 1::2] *= scale_y + + @classmethod + def cat(cls, boxes_list: List["Boxes"]) -> "Boxes": + """ + Concatenates a list of Boxes into a single Boxes + + Arguments: + boxes_list (list[Boxes]) + + Returns: + Boxes: the concatenated Boxes + """ + assert isinstance(boxes_list, (list, tuple)) + if len(boxes_list) == 0: + return cls(torch.empty(0)) + assert all([isinstance(box, Boxes) for box in boxes_list]) + + # use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input + cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0)) + return cat_boxes + + @property + def device(self) -> device: + return self.tensor.device + + # type "Iterator[torch.Tensor]", yield, and iter() not supported by torchscript + # https://github.com/pytorch/pytorch/issues/18627 + @torch.jit.unused + def __iter__(self): + """ + Yield a box as a Tensor of shape (4,) at a time. + """ + yield from self.tensor + + +def pairwise_intersection(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor: + """ + Given two lists of boxes of size N and M, + compute the intersection area between __all__ N x M pairs of boxes. + The box order must be (xmin, ymin, xmax, ymax) + + Args: + boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively. + + Returns: + Tensor: intersection, sized [N,M]. + """ + boxes1, boxes2 = boxes1.tensor, boxes2.tensor + width_height = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) - torch.max( + boxes1[:, None, :2], boxes2[:, :2] + ) # [N,M,2] + + width_height.clamp_(min=0) # [N,M,2] + intersection = width_height.prod(dim=2) # [N,M] + return intersection + + +# implementation from https://github.com/kuangliu/torchcv/blob/master/torchcv/utils/box.py +# with slight modifications +def pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor: + """ + Given two lists of boxes of size N and M, compute the IoU + (intersection over union) between **all** N x M pairs of boxes. + The box order must be (xmin, ymin, xmax, ymax). + + Args: + boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively. + + Returns: + Tensor: IoU, sized [N,M]. + """ + area1 = boxes1.area() # [N] + area2 = boxes2.area() # [M] + inter = pairwise_intersection(boxes1, boxes2) + + # handle empty boxes + iou = torch.where( + inter > 0, + inter / (area1[:, None] + area2 - inter), + torch.zeros(1, dtype=inter.dtype, device=inter.device), + ) + return iou + + +def pairwise_ioa(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor: + """ + Similar to :func:`pariwise_iou` but compute the IoA (intersection over boxes2 area). + + Args: + boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively. + + Returns: + Tensor: IoA, sized [N,M]. + """ + area2 = boxes2.area() # [M] + inter = pairwise_intersection(boxes1, boxes2) + + # handle empty boxes + ioa = torch.where( + inter > 0, inter / area2, torch.zeros(1, dtype=inter.dtype, device=inter.device) + ) + return ioa + + +def pairwise_point_box_distance(points: torch.Tensor, boxes: Boxes): + """ + Pairwise distance between N points and M boxes. The distance between a + point and a box is represented by the distance from the point to 4 edges + of the box. Distances are all positive when the point is inside the box. + + Args: + points: Nx2 coordinates. Each row is (x, y) + boxes: M boxes + + Returns: + Tensor: distances of size (N, M, 4). The 4 values are distances from + the point to the left, top, right, bottom of the box. + """ + x, y = points.unsqueeze(dim=2).unbind(dim=1) # (N, 1) + x0, y0, x1, y1 = boxes.tensor.unsqueeze(dim=0).unbind(dim=2) # (1, M) + return torch.stack([x - x0, y - y0, x1 - x, y1 - y], dim=2) + + +def matched_pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor: + """ + Compute pairwise intersection over union (IOU) of two sets of matched + boxes that have the same number of boxes. + Similar to :func:`pairwise_iou`, but computes only diagonal elements of the matrix. + + Args: + boxes1 (Boxes): bounding boxes, sized [N,4]. + boxes2 (Boxes): same length as boxes1 + Returns: + Tensor: iou, sized [N]. + """ + assert len(boxes1) == len( + boxes2 + ), "boxlists should have the same" "number of entries, got {}, {}".format( + len(boxes1), len(boxes2) + ) + area1 = boxes1.area() # [N] + area2 = boxes2.area() # [N] + box1, box2 = boxes1.tensor, boxes2.tensor + lt = torch.max(box1[:, :2], box2[:, :2]) # [N,2] + rb = torch.min(box1[:, 2:], box2[:, 2:]) # [N,2] + wh = (rb - lt).clamp(min=0) # [N,2] + inter = wh[:, 0] * wh[:, 1] # [N] + iou = inter / (area1 + area2 - inter) # [N] + return iou diff --git a/mmcv/structures/image_list.py b/mmcv/structures/image_list.py new file mode 100644 index 0000000..e4243bb --- /dev/null +++ b/mmcv/structures/image_list.py @@ -0,0 +1,129 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from __future__ import division +from typing import Any, Dict, List, Optional, Tuple +import torch +from torch import device +from torch.nn import functional as F + +from detectron2.layers.wrappers import move_device_like, shapes_to_tensor + + +class ImageList: + """ + Structure that holds a list of images (of possibly + varying sizes) as a single tensor. + This works by padding the images to the same size. + The original sizes of each image is stored in `image_sizes`. + + Attributes: + image_sizes (list[tuple[int, int]]): each tuple is (h, w). + During tracing, it becomes list[Tensor] instead. + """ + + def __init__(self, tensor: torch.Tensor, image_sizes: List[Tuple[int, int]]): + """ + Arguments: + tensor (Tensor): of shape (N, H, W) or (N, C_1, ..., C_K, H, W) where K >= 1 + image_sizes (list[tuple[int, int]]): Each tuple is (h, w). It can + be smaller than (H, W) due to padding. + """ + self.tensor = tensor + self.image_sizes = image_sizes + + def __len__(self) -> int: + return len(self.image_sizes) + + def __getitem__(self, idx) -> torch.Tensor: + """ + Access the individual image in its original size. + + Args: + idx: int or slice + + Returns: + Tensor: an image of shape (H, W) or (C_1, ..., C_K, H, W) where K >= 1 + """ + size = self.image_sizes[idx] + return self.tensor[idx, ..., : size[0], : size[1]] + + @torch.jit.unused + def to(self, *args: Any, **kwargs: Any) -> "ImageList": + cast_tensor = self.tensor.to(*args, **kwargs) + return ImageList(cast_tensor, self.image_sizes) + + @property + def device(self) -> device: + return self.tensor.device + + @staticmethod + def from_tensors( + tensors: List[torch.Tensor], + size_divisibility: int = 0, + pad_value: float = 0.0, + padding_constraints: Optional[Dict[str, int]] = None, + ) -> "ImageList": + """ + Args: + tensors: a tuple or list of `torch.Tensor`, each of shape (Hi, Wi) or + (C_1, ..., C_K, Hi, Wi) where K >= 1. The Tensors will be padded + to the same shape with `pad_value`. + size_divisibility (int): If `size_divisibility > 0`, add padding to ensure + the common height and width is divisible by `size_divisibility`. + This depends on the model and many models need a divisibility of 32. + pad_value (float): value to pad. + padding_constraints (optional[Dict]): If given, it would follow the format as + {"size_divisibility": int, "square_size": int}, where `size_divisibility` will + overwrite the above one if presented and `square_size` indicates the + square padding size if `square_size` > 0. + Returns: + an `ImageList`. + """ + assert len(tensors) > 0 + assert isinstance(tensors, (tuple, list)) + for t in tensors: + assert isinstance(t, torch.Tensor), type(t) + assert t.shape[:-2] == tensors[0].shape[:-2], t.shape + + image_sizes = [(im.shape[-2], im.shape[-1]) for im in tensors] + image_sizes_tensor = [shapes_to_tensor(x) for x in image_sizes] + max_size = torch.stack(image_sizes_tensor).max(0).values + + if padding_constraints is not None: + square_size = padding_constraints.get("square_size", 0) + if square_size > 0: + # pad to square. + max_size[0] = max_size[1] = square_size + if "size_divisibility" in padding_constraints: + size_divisibility = padding_constraints["size_divisibility"] + if size_divisibility > 1: + stride = size_divisibility + # the last two dims are H,W, both subject to divisibility requirement + max_size = (max_size + (stride - 1)).div(stride, rounding_mode="floor") * stride + + # handle weirdness of scripting and tracing ... + if torch.jit.is_scripting(): + max_size: List[int] = max_size.to(dtype=torch.long).tolist() + else: + if torch.jit.is_tracing(): + image_sizes = image_sizes_tensor + + if len(tensors) == 1: + # This seems slightly (2%) faster. + # TODO: check whether it's faster for multiple images as well + image_size = image_sizes[0] + padding_size = [0, max_size[-1] - image_size[1], 0, max_size[-2] - image_size[0]] + batched_imgs = F.pad(tensors[0], padding_size, value=pad_value).unsqueeze_(0) + else: + # max_size can be a tensor in tracing mode, therefore convert to list + batch_shape = [len(tensors)] + list(tensors[0].shape[:-2]) + list(max_size) + device = ( + None if torch.jit.is_scripting() else ("cpu" if torch.jit.is_tracing() else None) + ) + batched_imgs = tensors[0].new_full(batch_shape, pad_value, device=device) + batched_imgs = move_device_like(batched_imgs, tensors[0]) + for i, img in enumerate(tensors): + # Use `batched_imgs` directly instead of `img, pad_img = zip(tensors, batched_imgs)` + # Tracing mode cannot capture `copy_()` of temporary locals + batched_imgs[i, ..., : img.shape[-2], : img.shape[-1]].copy_(img) + + return ImageList(batched_imgs.contiguous(), image_sizes) diff --git a/mmcv/structures/instances.py b/mmcv/structures/instances.py new file mode 100644 index 0000000..c9579bc --- /dev/null +++ b/mmcv/structures/instances.py @@ -0,0 +1,194 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import itertools +import warnings +from typing import Any, Dict, List, Tuple, Union +import torch + + +class Instances: + """ + This class represents a list of instances in an image. + It stores the attributes of instances (e.g., boxes, masks, labels, scores) as "fields". + All fields must have the same ``__len__`` which is the number of instances. + + All other (non-field) attributes of this class are considered private: + they must start with '_' and are not modifiable by a user. + + Some basic usage: + + 1. Set/get/check a field: + + .. code-block:: python + + instances.gt_boxes = Boxes(...) + print(instances.pred_masks) # a tensor of shape (N, H, W) + print('gt_masks' in instances) + + 2. ``len(instances)`` returns the number of instances + 3. Indexing: ``instances[indices]`` will apply the indexing on all the fields + and returns a new :class:`Instances`. + Typically, ``indices`` is a integer vector of indices, + or a binary mask of length ``num_instances`` + + .. code-block:: python + + category_3_detections = instances[instances.pred_classes == 3] + confident_detections = instances[instances.scores > 0.9] + """ + + def __init__(self, image_size: Tuple[int, int], **kwargs: Any): + """ + Args: + image_size (height, width): the spatial size of the image. + kwargs: fields to add to this `Instances`. + """ + self._image_size = image_size + self._fields: Dict[str, Any] = {} + for k, v in kwargs.items(): + self.set(k, v) + + @property + def image_size(self) -> Tuple[int, int]: + """ + Returns: + tuple: height, width + """ + return self._image_size + + def __setattr__(self, name: str, val: Any) -> None: + if name.startswith("_"): + super().__setattr__(name, val) + else: + self.set(name, val) + + def __getattr__(self, name: str) -> Any: + if name == "_fields" or name not in self._fields: + raise AttributeError("Cannot find field '{}' in the given Instances!".format(name)) + return self._fields[name] + + def set(self, name: str, value: Any) -> None: + """ + Set the field named `name` to `value`. + The length of `value` must be the number of instances, + and must agree with other existing fields in this object. + """ + with warnings.catch_warnings(record=True): + data_len = len(value) + if len(self._fields): + assert ( + len(self) == data_len + ), "Adding a field of length {} to a Instances of length {}".format(data_len, len(self)) + self._fields[name] = value + + def has(self, name: str) -> bool: + """ + Returns: + bool: whether the field called `name` exists. + """ + return name in self._fields + + def remove(self, name: str) -> None: + """ + Remove the field called `name`. + """ + del self._fields[name] + + def get(self, name: str) -> Any: + """ + Returns the field called `name`. + """ + return self._fields[name] + + def get_fields(self) -> Dict[str, Any]: + """ + Returns: + dict: a dict which maps names (str) to data of the fields + + Modifying the returned dict will modify this instance. + """ + return self._fields + + # Tensor-like methods + def to(self, *args: Any, **kwargs: Any) -> "Instances": + """ + Returns: + Instances: all fields are called with a `to(device)`, if the field has this method. + """ + ret = Instances(self._image_size) + for k, v in self._fields.items(): + if hasattr(v, "to"): + v = v.to(*args, **kwargs) + ret.set(k, v) + return ret + + def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "Instances": + """ + Args: + item: an index-like object and will be used to index all the fields. + + Returns: + If `item` is a string, return the data in the corresponding field. + Otherwise, returns an `Instances` where all fields are indexed by `item`. + """ + if type(item) == int: + if item >= len(self) or item < -len(self): + raise IndexError("Instances index out of range!") + else: + item = slice(item, None, len(self)) + + ret = Instances(self._image_size) + for k, v in self._fields.items(): + ret.set(k, v[item]) + return ret + + def __len__(self) -> int: + for v in self._fields.values(): + # use __len__ because len() has to be int and is not friendly to tracing + return v.__len__() + raise NotImplementedError("Empty Instances does not support __len__!") + + def __iter__(self): + raise NotImplementedError("`Instances` object is not iterable!") + + @staticmethod + def cat(instance_lists: List["Instances"]) -> "Instances": + """ + Args: + instance_lists (list[Instances]) + + Returns: + Instances + """ + assert all(isinstance(i, Instances) for i in instance_lists) + assert len(instance_lists) > 0 + if len(instance_lists) == 1: + return instance_lists[0] + + image_size = instance_lists[0].image_size + if not isinstance(image_size, torch.Tensor): # could be a tensor in tracing + for i in instance_lists[1:]: + assert i.image_size == image_size + ret = Instances(image_size) + for k in instance_lists[0]._fields.keys(): + values = [i.get(k) for i in instance_lists] + v0 = values[0] + if isinstance(v0, torch.Tensor): + values = torch.cat(values, dim=0) + elif isinstance(v0, list): + values = list(itertools.chain(*values)) + elif hasattr(type(v0), "cat"): + values = type(v0).cat(values) + else: + raise ValueError("Unsupported type {} for concatenation".format(type(v0))) + ret.set(k, values) + return ret + + def __str__(self) -> str: + s = self.__class__.__name__ + "(" + s += "num_instances={}, ".format(len(self)) + s += "image_height={}, ".format(self._image_size[0]) + s += "image_width={}, ".format(self._image_size[1]) + s += "fields=[{}])".format(", ".join((f"{k}: {v}" for k, v in self._fields.items()))) + return s + + __repr__ = __str__ diff --git a/mmcv/structures/keypoints.py b/mmcv/structures/keypoints.py new file mode 100644 index 0000000..b93ebed --- /dev/null +++ b/mmcv/structures/keypoints.py @@ -0,0 +1,235 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import numpy as np +from typing import Any, List, Tuple, Union +import torch +from torch.nn import functional as F + + +class Keypoints: + """ + Stores keypoint **annotation** data. GT Instances have a `gt_keypoints` property + containing the x,y location and visibility flag of each keypoint. This tensor has shape + (N, K, 3) where N is the number of instances and K is the number of keypoints per instance. + + The visibility flag follows the COCO format and must be one of three integers: + + * v=0: not labeled (in which case x=y=0) + * v=1: labeled but not visible + * v=2: labeled and visible + """ + + def __init__(self, keypoints: Union[torch.Tensor, np.ndarray, List[List[float]]]): + """ + Arguments: + keypoints: A Tensor, numpy array, or list of the x, y, and visibility of each keypoint. + The shape should be (N, K, 3) where N is the number of + instances, and K is the number of keypoints per instance. + """ + device = keypoints.device if isinstance(keypoints, torch.Tensor) else torch.device("cpu") + keypoints = torch.as_tensor(keypoints, dtype=torch.float32, device=device) + assert keypoints.dim() == 3 and keypoints.shape[2] == 3, keypoints.shape + self.tensor = keypoints + + def __len__(self) -> int: + return self.tensor.size(0) + + def to(self, *args: Any, **kwargs: Any) -> "Keypoints": + return type(self)(self.tensor.to(*args, **kwargs)) + + @property + def device(self) -> torch.device: + return self.tensor.device + + def to_heatmap(self, boxes: torch.Tensor, heatmap_size: int) -> torch.Tensor: + """ + Convert keypoint annotations to a heatmap of one-hot labels for training, + as described in :paper:`Mask R-CNN`. + + Arguments: + boxes: Nx4 tensor, the boxes to draw the keypoints to + + Returns: + heatmaps: + A tensor of shape (N, K), each element is integer spatial label + in the range [0, heatmap_size**2 - 1] for each keypoint in the input. + valid: + A tensor of shape (N, K) containing whether each keypoint is in the roi or not. + """ + return _keypoints_to_heatmap(self.tensor, boxes, heatmap_size) + + def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "Keypoints": + """ + Create a new `Keypoints` by indexing on this `Keypoints`. + + The following usage are allowed: + + 1. `new_kpts = kpts[3]`: return a `Keypoints` which contains only one instance. + 2. `new_kpts = kpts[2:10]`: return a slice of key points. + 3. `new_kpts = kpts[vector]`, where vector is a torch.ByteTensor + with `length = len(kpts)`. Nonzero elements in the vector will be selected. + + Note that the returned Keypoints might share storage with this Keypoints, + subject to Pytorch's indexing semantics. + """ + if isinstance(item, int): + return Keypoints([self.tensor[item]]) + return Keypoints(self.tensor[item]) + + def __repr__(self) -> str: + s = self.__class__.__name__ + "(" + s += "num_instances={})".format(len(self.tensor)) + return s + + @staticmethod + def cat(keypoints_list: List["Keypoints"]) -> "Keypoints": + """ + Concatenates a list of Keypoints into a single Keypoints + + Arguments: + keypoints_list (list[Keypoints]) + + Returns: + Keypoints: the concatenated Keypoints + """ + assert isinstance(keypoints_list, (list, tuple)) + assert len(keypoints_list) > 0 + assert all(isinstance(keypoints, Keypoints) for keypoints in keypoints_list) + + cat_kpts = type(keypoints_list[0])( + torch.cat([kpts.tensor for kpts in keypoints_list], dim=0) + ) + return cat_kpts + + +# TODO make this nicer, this is a direct translation from C2 (but removing the inner loop) +def _keypoints_to_heatmap( + keypoints: torch.Tensor, rois: torch.Tensor, heatmap_size: int +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Encode keypoint locations into a target heatmap for use in SoftmaxWithLoss across space. + + Maps keypoints from the half-open interval [x1, x2) on continuous image coordinates to the + closed interval [0, heatmap_size - 1] on discrete image coordinates. We use the + continuous-discrete conversion from Heckbert 1990 ("What is the coordinate of a pixel?"): + d = floor(c) and c = d + 0.5, where d is a discrete coordinate and c is a continuous coordinate. + + Arguments: + keypoints: tensor of keypoint locations in of shape (N, K, 3). + rois: Nx4 tensor of rois in xyxy format + heatmap_size: integer side length of square heatmap. + + Returns: + heatmaps: A tensor of shape (N, K) containing an integer spatial label + in the range [0, heatmap_size**2 - 1] for each keypoint in the input. + valid: A tensor of shape (N, K) containing whether each keypoint is in + the roi or not. + """ + + if rois.numel() == 0: + return rois.new().long(), rois.new().long() + offset_x = rois[:, 0] + offset_y = rois[:, 1] + scale_x = heatmap_size / (rois[:, 2] - rois[:, 0]) + scale_y = heatmap_size / (rois[:, 3] - rois[:, 1]) + + offset_x = offset_x[:, None] + offset_y = offset_y[:, None] + scale_x = scale_x[:, None] + scale_y = scale_y[:, None] + + x = keypoints[..., 0] + y = keypoints[..., 1] + + x_boundary_inds = x == rois[:, 2][:, None] + y_boundary_inds = y == rois[:, 3][:, None] + + x = (x - offset_x) * scale_x + x = x.floor().long() + y = (y - offset_y) * scale_y + y = y.floor().long() + + x[x_boundary_inds] = heatmap_size - 1 + y[y_boundary_inds] = heatmap_size - 1 + + valid_loc = (x >= 0) & (y >= 0) & (x < heatmap_size) & (y < heatmap_size) + vis = keypoints[..., 2] > 0 + valid = (valid_loc & vis).long() + + lin_ind = y * heatmap_size + x + heatmaps = lin_ind * valid + + return heatmaps, valid + + +@torch.jit.script_if_tracing +def heatmaps_to_keypoints(maps: torch.Tensor, rois: torch.Tensor) -> torch.Tensor: + """ + Extract predicted keypoint locations from heatmaps. + + Args: + maps (Tensor): (#ROIs, #keypoints, POOL_H, POOL_W). The predicted heatmap of logits for + each ROI and each keypoint. + rois (Tensor): (#ROIs, 4). The box of each ROI. + + Returns: + Tensor of shape (#ROIs, #keypoints, 4) with the last dimension corresponding to + (x, y, logit, score) for each keypoint. + + When converting discrete pixel indices in an NxN image to a continuous keypoint coordinate, + we maintain consistency with :meth:`Keypoints.to_heatmap` by using the conversion from + Heckbert 1990: c = d + 0.5, where d is a discrete coordinate and c is a continuous coordinate. + """ + + offset_x = rois[:, 0] + offset_y = rois[:, 1] + + widths = (rois[:, 2] - rois[:, 0]).clamp(min=1) + heights = (rois[:, 3] - rois[:, 1]).clamp(min=1) + widths_ceil = widths.ceil() + heights_ceil = heights.ceil() + + num_rois, num_keypoints = maps.shape[:2] + xy_preds = maps.new_zeros(rois.shape[0], num_keypoints, 4) + + width_corrections = widths / widths_ceil + height_corrections = heights / heights_ceil + + keypoints_idx = torch.arange(num_keypoints, device=maps.device) + + for i in range(num_rois): + outsize = (int(heights_ceil[i]), int(widths_ceil[i])) + roi_map = F.interpolate(maps[[i]], size=outsize, mode="bicubic", align_corners=False) + + # Although semantically equivalent, `reshape` is used instead of `squeeze` due + # to limitation during ONNX export of `squeeze` in scripting mode + roi_map = roi_map.reshape(roi_map.shape[1:]) # keypoints x H x W + + # softmax over the spatial region + max_score, _ = roi_map.view(num_keypoints, -1).max(1) + max_score = max_score.view(num_keypoints, 1, 1) + tmp_full_resolution = (roi_map - max_score).exp_() + tmp_pool_resolution = (maps[i] - max_score).exp_() + # Produce scores over the region H x W, but normalize with POOL_H x POOL_W, + # so that the scores of objects of different absolute sizes will be more comparable + roi_map_scores = tmp_full_resolution / tmp_pool_resolution.sum((1, 2), keepdim=True) + + w = roi_map.shape[2] + pos = roi_map.view(num_keypoints, -1).argmax(1) + + x_int = pos % w + y_int = (pos - x_int) // w + + assert ( + roi_map_scores[keypoints_idx, y_int, x_int] + == roi_map_scores.view(num_keypoints, -1).max(1)[0] + ).all() + + x = (x_int.float() + 0.5) * width_corrections[i] + y = (y_int.float() + 0.5) * height_corrections[i] + + xy_preds[i, :, 0] = x + offset_x[i] + xy_preds[i, :, 1] = y + offset_y[i] + xy_preds[i, :, 2] = roi_map[keypoints_idx, y_int, x_int] + xy_preds[i, :, 3] = roi_map_scores[keypoints_idx, y_int, x_int] + + return xy_preds diff --git a/mmcv/structures/masks.py b/mmcv/structures/masks.py new file mode 100644 index 0000000..0db389f --- /dev/null +++ b/mmcv/structures/masks.py @@ -0,0 +1,534 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import copy +import itertools +import numpy as np +from typing import Any, Iterator, List, Union +import pycocotools.mask as mask_util +import torch +from torch import device + +from mmcv.layers.roi_align import ROIAlign +from mmcv.utils import retry_if_cuda_oom + +from .boxes import Boxes + + +def polygon_area(x, y): + # Using the shoelace formula + # https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates + return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1))) + + +def polygons_to_bitmask(polygons: List[np.ndarray], height: int, width: int) -> np.ndarray: + """ + Args: + polygons (list[ndarray]): each array has shape (Nx2,) + height, width (int) + + Returns: + ndarray: a bool mask of shape (height, width) + """ + if len(polygons) == 0: + # COCOAPI does not support empty polygons + return np.zeros((height, width)).astype(bool) + rles = mask_util.frPyObjects(polygons, height, width) + rle = mask_util.merge(rles) + return mask_util.decode(rle).astype(bool) + + +def rasterize_polygons_within_box( + polygons: List[np.ndarray], box: np.ndarray, mask_size: int +) -> torch.Tensor: + """ + Rasterize the polygons into a mask image and + crop the mask content in the given box. + The cropped mask is resized to (mask_size, mask_size). + + This function is used when generating training targets for mask head in Mask R-CNN. + Given original ground-truth masks for an image, new ground-truth mask + training targets in the size of `mask_size x mask_size` + must be provided for each predicted box. This function will be called to + produce such targets. + + Args: + polygons (list[ndarray[float]]): a list of polygons, which represents an instance. + box: 4-element numpy array + mask_size (int): + + Returns: + Tensor: BoolTensor of shape (mask_size, mask_size) + """ + # 1. Shift the polygons w.r.t the boxes + w, h = box[2] - box[0], box[3] - box[1] + + polygons = copy.deepcopy(polygons) + for p in polygons: + p[0::2] = p[0::2] - box[0] + p[1::2] = p[1::2] - box[1] + + # 2. Rescale the polygons to the new box size + # max() to avoid division by small number + ratio_h = mask_size / max(h, 0.1) + ratio_w = mask_size / max(w, 0.1) + + if ratio_h == ratio_w: + for p in polygons: + p *= ratio_h + else: + for p in polygons: + p[0::2] *= ratio_w + p[1::2] *= ratio_h + + # 3. Rasterize the polygons with coco api + mask = polygons_to_bitmask(polygons, mask_size, mask_size) + mask = torch.from_numpy(mask) + return mask + + +class BitMasks: + """ + This class stores the segmentation masks for all objects in one image, in + the form of bitmaps. + + Attributes: + tensor: bool Tensor of N,H,W, representing N instances in the image. + """ + + def __init__(self, tensor: Union[torch.Tensor, np.ndarray]): + """ + Args: + tensor: bool Tensor of N,H,W, representing N instances in the image. + """ + if isinstance(tensor, torch.Tensor): + tensor = tensor.to(torch.bool) + else: + tensor = torch.as_tensor(tensor, dtype=torch.bool, device=torch.device("cpu")) + assert tensor.dim() == 3, tensor.size() + self.image_size = tensor.shape[1:] + self.tensor = tensor + + @torch.jit.unused + def to(self, *args: Any, **kwargs: Any) -> "BitMasks": + return BitMasks(self.tensor.to(*args, **kwargs)) + + @property + def device(self) -> torch.device: + return self.tensor.device + + @torch.jit.unused + def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "BitMasks": + """ + Returns: + BitMasks: Create a new :class:`BitMasks` by indexing. + + The following usage are allowed: + + 1. `new_masks = masks[3]`: return a `BitMasks` which contains only one mask. + 2. `new_masks = masks[2:10]`: return a slice of masks. + 3. `new_masks = masks[vector]`, where vector is a torch.BoolTensor + with `length = len(masks)`. Nonzero elements in the vector will be selected. + + Note that the returned object might share storage with this object, + subject to Pytorch's indexing semantics. + """ + if isinstance(item, int): + return BitMasks(self.tensor[item].unsqueeze(0)) + m = self.tensor[item] + assert m.dim() == 3, "Indexing on BitMasks with {} returns a tensor with shape {}!".format( + item, m.shape + ) + return BitMasks(m) + + @torch.jit.unused + def __iter__(self) -> torch.Tensor: + yield from self.tensor + + @torch.jit.unused + def __repr__(self) -> str: + s = self.__class__.__name__ + "(" + s += "num_instances={})".format(len(self.tensor)) + return s + + def __len__(self) -> int: + return self.tensor.shape[0] + + def nonempty(self) -> torch.Tensor: + """ + Find masks that are non-empty. + + Returns: + Tensor: a BoolTensor which represents + whether each mask is empty (False) or non-empty (True). + """ + return self.tensor.flatten(1).any(dim=1) + + @staticmethod + def from_polygon_masks( + polygon_masks: Union["PolygonMasks", List[List[np.ndarray]]], height: int, width: int + ) -> "BitMasks": + """ + Args: + polygon_masks (list[list[ndarray]] or PolygonMasks) + height, width (int) + """ + if isinstance(polygon_masks, PolygonMasks): + polygon_masks = polygon_masks.polygons + masks = [polygons_to_bitmask(p, height, width) for p in polygon_masks] + if len(masks): + return BitMasks(torch.stack([torch.from_numpy(x) for x in masks])) + else: + return BitMasks(torch.empty(0, height, width, dtype=torch.bool)) + + @staticmethod + def from_roi_masks(roi_masks: "ROIMasks", height: int, width: int) -> "BitMasks": + """ + Args: + roi_masks: + height, width (int): + """ + return roi_masks.to_bitmasks(height, width) + + def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor: + """ + Crop each bitmask by the given box, and resize results to (mask_size, mask_size). + This can be used to prepare training targets for Mask R-CNN. + It has less reconstruction error compared to rasterization with polygons. + However we observe no difference in accuracy, + but BitMasks requires more memory to store all the masks. + + Args: + boxes (Tensor): Nx4 tensor storing the boxes for each mask + mask_size (int): the size of the rasterized mask. + + Returns: + Tensor: + A bool tensor of shape (N, mask_size, mask_size), where + N is the number of predicted boxes for this image. + """ + assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self)) + device = self.tensor.device + + batch_inds = torch.arange(len(boxes), device=device).to(dtype=boxes.dtype)[:, None] + rois = torch.cat([batch_inds, boxes], dim=1) # Nx5 + + bit_masks = self.tensor.to(dtype=torch.float32) + rois = rois.to(device=device) + output = ( + ROIAlign((mask_size, mask_size), 1.0, 0, aligned=True) + .forward(bit_masks[:, None, :, :], rois) + .squeeze(1) + ) + output = output >= 0.5 + return output + + def get_bounding_boxes(self) -> Boxes: + """ + Returns: + Boxes: tight bounding boxes around bitmasks. + If a mask is empty, it's bounding box will be all zero. + """ + boxes = torch.zeros(self.tensor.shape[0], 4, dtype=torch.float32) + x_any = torch.any(self.tensor, dim=1) + y_any = torch.any(self.tensor, dim=2) + for idx in range(self.tensor.shape[0]): + x = torch.where(x_any[idx, :])[0] + y = torch.where(y_any[idx, :])[0] + if len(x) > 0 and len(y) > 0: + boxes[idx, :] = torch.as_tensor( + [x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=torch.float32 + ) + return Boxes(boxes) + + @staticmethod + def cat(bitmasks_list: List["BitMasks"]) -> "BitMasks": + """ + Concatenates a list of BitMasks into a single BitMasks + + Arguments: + bitmasks_list (list[BitMasks]) + + Returns: + BitMasks: the concatenated BitMasks + """ + assert isinstance(bitmasks_list, (list, tuple)) + assert len(bitmasks_list) > 0 + assert all(isinstance(bitmask, BitMasks) for bitmask in bitmasks_list) + + cat_bitmasks = type(bitmasks_list[0])(torch.cat([bm.tensor for bm in bitmasks_list], dim=0)) + return cat_bitmasks + + +class PolygonMasks: + """ + This class stores the segmentation masks for all objects in one image, in the form of polygons. + + Attributes: + polygons: list[list[ndarray]]. Each ndarray is a float64 vector representing a polygon. + """ + + def __init__(self, polygons: List[List[Union[torch.Tensor, np.ndarray]]]): + """ + Arguments: + polygons (list[list[np.ndarray]]): The first + level of the list correspond to individual instances, + the second level to all the polygons that compose the + instance, and the third level to the polygon coordinates. + The third level array should have the format of + [x0, y0, x1, y1, ..., xn, yn] (n >= 3). + """ + if not isinstance(polygons, list): + raise ValueError( + "Cannot create PolygonMasks: Expect a list of list of polygons per image. " + "Got '{}' instead.".format(type(polygons)) + ) + + def _make_array(t: Union[torch.Tensor, np.ndarray]) -> np.ndarray: + # Use float64 for higher precision, because why not? + # Always put polygons on CPU (self.to is a no-op) since they + # are supposed to be small tensors. + # May need to change this assumption if GPU placement becomes useful + if isinstance(t, torch.Tensor): + t = t.cpu().numpy() + return np.asarray(t).astype("float64") + + def process_polygons( + polygons_per_instance: List[Union[torch.Tensor, np.ndarray]] + ) -> List[np.ndarray]: + if not isinstance(polygons_per_instance, list): + raise ValueError( + "Cannot create polygons: Expect a list of polygons per instance. " + "Got '{}' instead.".format(type(polygons_per_instance)) + ) + # transform each polygon to a numpy array + polygons_per_instance = [_make_array(p) for p in polygons_per_instance] + for polygon in polygons_per_instance: + if len(polygon) % 2 != 0 or len(polygon) < 6: + raise ValueError(f"Cannot create a polygon from {len(polygon)} coordinates.") + return polygons_per_instance + + self.polygons: List[List[np.ndarray]] = [ + process_polygons(polygons_per_instance) for polygons_per_instance in polygons + ] + + def to(self, *args: Any, **kwargs: Any) -> "PolygonMasks": + return self + + @property + def device(self) -> torch.device: + return torch.device("cpu") + + def get_bounding_boxes(self) -> Boxes: + """ + Returns: + Boxes: tight bounding boxes around polygon masks. + """ + boxes = torch.zeros(len(self.polygons), 4, dtype=torch.float32) + for idx, polygons_per_instance in enumerate(self.polygons): + minxy = torch.as_tensor([float("inf"), float("inf")], dtype=torch.float32) + maxxy = torch.zeros(2, dtype=torch.float32) + for polygon in polygons_per_instance: + coords = torch.from_numpy(polygon).view(-1, 2).to(dtype=torch.float32) + minxy = torch.min(minxy, torch.min(coords, dim=0).values) + maxxy = torch.max(maxxy, torch.max(coords, dim=0).values) + boxes[idx, :2] = minxy + boxes[idx, 2:] = maxxy + return Boxes(boxes) + + def nonempty(self) -> torch.Tensor: + """ + Find masks that are non-empty. + + Returns: + Tensor: + a BoolTensor which represents whether each mask is empty (False) or not (True). + """ + keep = [1 if len(polygon) > 0 else 0 for polygon in self.polygons] + return torch.from_numpy(np.asarray(keep, dtype=bool)) + + def __getitem__(self, item: Union[int, slice, List[int], torch.BoolTensor]) -> "PolygonMasks": + """ + Support indexing over the instances and return a `PolygonMasks` object. + `item` can be: + + 1. An integer. It will return an object with only one instance. + 2. A slice. It will return an object with the selected instances. + 3. A list[int]. It will return an object with the selected instances, + correpsonding to the indices in the list. + 4. A vector mask of type BoolTensor, whose length is num_instances. + It will return an object with the instances whose mask is nonzero. + """ + if isinstance(item, int): + selected_polygons = [self.polygons[item]] + elif isinstance(item, slice): + selected_polygons = self.polygons[item] + elif isinstance(item, list): + selected_polygons = [self.polygons[i] for i in item] + elif isinstance(item, torch.Tensor): + # Polygons is a list, so we have to move the indices back to CPU. + if item.dtype == torch.bool: + assert item.dim() == 1, item.shape + item = item.nonzero().squeeze(1).cpu().numpy().tolist() + elif item.dtype in [torch.int32, torch.int64]: + item = item.cpu().numpy().tolist() + else: + raise ValueError("Unsupported tensor dtype={} for indexing!".format(item.dtype)) + selected_polygons = [self.polygons[i] for i in item] + return PolygonMasks(selected_polygons) + + def __iter__(self) -> Iterator[List[np.ndarray]]: + """ + Yields: + list[ndarray]: the polygons for one instance. + Each Tensor is a float64 vector representing a polygon. + """ + return iter(self.polygons) + + def __repr__(self) -> str: + s = self.__class__.__name__ + "(" + s += "num_instances={})".format(len(self.polygons)) + return s + + def __len__(self) -> int: + return len(self.polygons) + + def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor: + """ + Crop each mask by the given box, and resize results to (mask_size, mask_size). + This can be used to prepare training targets for Mask R-CNN. + + Args: + boxes (Tensor): Nx4 tensor storing the boxes for each mask + mask_size (int): the size of the rasterized mask. + + Returns: + Tensor: A bool tensor of shape (N, mask_size, mask_size), where + N is the number of predicted boxes for this image. + """ + assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self)) + + device = boxes.device + # Put boxes on the CPU, as the polygon representation is not efficient GPU-wise + # (several small tensors for representing a single instance mask) + boxes = boxes.to(torch.device("cpu")) + + results = [ + rasterize_polygons_within_box(poly, box.numpy(), mask_size) + for poly, box in zip(self.polygons, boxes) + ] + """ + poly: list[list[float]], the polygons for one instance + box: a tensor of shape (4,) + """ + if len(results) == 0: + return torch.empty(0, mask_size, mask_size, dtype=torch.bool, device=device) + return torch.stack(results, dim=0).to(device=device) + + def area(self): + """ + Computes area of the mask. + Only works with Polygons, using the shoelace formula: + https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates + + Returns: + Tensor: a vector, area for each instance + """ + + area = [] + for polygons_per_instance in self.polygons: + area_per_instance = 0 + for p in polygons_per_instance: + area_per_instance += polygon_area(p[0::2], p[1::2]) + area.append(area_per_instance) + + return torch.tensor(area) + + @staticmethod + def cat(polymasks_list: List["PolygonMasks"]) -> "PolygonMasks": + """ + Concatenates a list of PolygonMasks into a single PolygonMasks + + Arguments: + polymasks_list (list[PolygonMasks]) + + Returns: + PolygonMasks: the concatenated PolygonMasks + """ + assert isinstance(polymasks_list, (list, tuple)) + assert len(polymasks_list) > 0 + assert all(isinstance(polymask, PolygonMasks) for polymask in polymasks_list) + + cat_polymasks = type(polymasks_list[0])( + list(itertools.chain.from_iterable(pm.polygons for pm in polymasks_list)) + ) + return cat_polymasks + + +class ROIMasks: + """ + Represent masks by N smaller masks defined in some ROIs. Once ROI boxes are given, + full-image bitmask can be obtained by "pasting" the mask on the region defined + by the corresponding ROI box. + """ + + def __init__(self, tensor: torch.Tensor): + """ + Args: + tensor: (N, M, M) mask tensor that defines the mask within each ROI. + """ + if tensor.dim() != 3: + raise ValueError("ROIMasks must take a masks of 3 dimension.") + self.tensor = tensor + + def to(self, device: torch.device) -> "ROIMasks": + return ROIMasks(self.tensor.to(device)) + + @property + def device(self) -> device: + return self.tensor.device + + def __len__(self): + return self.tensor.shape[0] + + def __getitem__(self, item) -> "ROIMasks": + """ + Returns: + ROIMasks: Create a new :class:`ROIMasks` by indexing. + + The following usage are allowed: + + 1. `new_masks = masks[2:10]`: return a slice of masks. + 2. `new_masks = masks[vector]`, where vector is a torch.BoolTensor + with `length = len(masks)`. Nonzero elements in the vector will be selected. + + Note that the returned object might share storage with this object, + subject to Pytorch's indexing semantics. + """ + t = self.tensor[item] + if t.dim() != 3: + raise ValueError( + f"Indexing on ROIMasks with {item} returns a tensor with shape {t.shape}!" + ) + return ROIMasks(t) + + @torch.jit.unused + def __repr__(self) -> str: + s = self.__class__.__name__ + "(" + s += "num_instances={})".format(len(self.tensor)) + return s + + @torch.jit.unused + def to_bitmasks(self, boxes: torch.Tensor, height, width, threshold=0.5): + """ + Args: see documentation of :func:`paste_masks_in_image`. + """ + from detectron2.layers.mask_ops import paste_masks_in_image, _paste_masks_tensor_shape + + if torch.jit.is_tracing(): + if isinstance(height, torch.Tensor): + paste_func = _paste_masks_tensor_shape + else: + paste_func = paste_masks_in_image + else: + paste_func = retry_if_cuda_oom(paste_masks_in_image) + bitmasks = paste_func(self.tensor, boxes.tensor, (height, width), threshold=threshold) + return BitMasks(bitmasks) diff --git a/mmcv/structures/rotated_boxes.py b/mmcv/structures/rotated_boxes.py new file mode 100644 index 0000000..c842b99 --- /dev/null +++ b/mmcv/structures/rotated_boxes.py @@ -0,0 +1,505 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import math +from typing import List, Tuple +import torch + +from detectron2.layers.rotated_boxes import pairwise_iou_rotated + +from .boxes import Boxes + + +class RotatedBoxes(Boxes): + """ + This structure stores a list of rotated boxes as a Nx5 torch.Tensor. + It supports some common methods about boxes + (`area`, `clip`, `nonempty`, etc), + and also behaves like a Tensor + (support indexing, `to(device)`, `.device`, and iteration over all boxes) + """ + + def __init__(self, tensor: torch.Tensor): + """ + Args: + tensor (Tensor[float]): a Nx5 matrix. Each row is + (x_center, y_center, width, height, angle), + in which angle is represented in degrees. + While there's no strict range restriction for it, + the recommended principal range is between [-180, 180) degrees. + + Assume we have a horizontal box B = (x_center, y_center, width, height), + where width is along the x-axis and height is along the y-axis. + The rotated box B_rot (x_center, y_center, width, height, angle) + can be seen as: + + 1. When angle == 0: + B_rot == B + 2. When angle > 0: + B_rot is obtained by rotating B w.r.t its center by :math:`|angle|` degrees CCW; + 3. When angle < 0: + B_rot is obtained by rotating B w.r.t its center by :math:`|angle|` degrees CW. + + Mathematically, since the right-handed coordinate system for image space + is (y, x), where y is top->down and x is left->right, the 4 vertices of the + rotated rectangle :math:`(yr_i, xr_i)` (i = 1, 2, 3, 4) can be obtained from + the vertices of the horizontal rectangle :math:`(y_i, x_i)` (i = 1, 2, 3, 4) + in the following way (:math:`\\theta = angle*\\pi/180` is the angle in radians, + :math:`(y_c, x_c)` is the center of the rectangle): + + .. math:: + + yr_i = \\cos(\\theta) (y_i - y_c) - \\sin(\\theta) (x_i - x_c) + y_c, + + xr_i = \\sin(\\theta) (y_i - y_c) + \\cos(\\theta) (x_i - x_c) + x_c, + + which is the standard rigid-body rotation transformation. + + Intuitively, the angle is + (1) the rotation angle from y-axis in image space + to the height vector (top->down in the box's local coordinate system) + of the box in CCW, and + (2) the rotation angle from x-axis in image space + to the width vector (left->right in the box's local coordinate system) + of the box in CCW. + + More intuitively, consider the following horizontal box ABCD represented + in (x1, y1, x2, y2): (3, 2, 7, 4), + covering the [3, 7] x [2, 4] region of the continuous coordinate system + which looks like this: + + .. code:: none + + O--------> x + | + | A---B + | | | + | D---C + | + v y + + Note that each capital letter represents one 0-dimensional geometric point + instead of a 'square pixel' here. + + In the example above, using (x, y) to represent a point we have: + + .. math:: + + O = (0, 0), A = (3, 2), B = (7, 2), C = (7, 4), D = (3, 4) + + We name vector AB = vector DC as the width vector in box's local coordinate system, and + vector AD = vector BC as the height vector in box's local coordinate system. Initially, + when angle = 0 degree, they're aligned with the positive directions of x-axis and y-axis + in the image space, respectively. + + For better illustration, we denote the center of the box as E, + + .. code:: none + + O--------> x + | + | A---B + | | E | + | D---C + | + v y + + where the center E = ((3+7)/2, (2+4)/2) = (5, 3). + + Also, + + .. math:: + + width = |AB| = |CD| = 7 - 3 = 4, + height = |AD| = |BC| = 4 - 2 = 2. + + Therefore, the corresponding representation for the same shape in rotated box in + (x_center, y_center, width, height, angle) format is: + + (5, 3, 4, 2, 0), + + Now, let's consider (5, 3, 4, 2, 90), which is rotated by 90 degrees + CCW (counter-clockwise) by definition. It looks like this: + + .. code:: none + + O--------> x + | B-C + | | | + | |E| + | | | + | A-D + v y + + The center E is still located at the same point (5, 3), while the vertices + ABCD are rotated by 90 degrees CCW with regard to E: + A = (4, 5), B = (4, 1), C = (6, 1), D = (6, 5) + + Here, 90 degrees can be seen as the CCW angle to rotate from y-axis to + vector AD or vector BC (the top->down height vector in box's local coordinate system), + or the CCW angle to rotate from x-axis to vector AB or vector DC (the left->right + width vector in box's local coordinate system). + + .. math:: + + width = |AB| = |CD| = 5 - 1 = 4, + height = |AD| = |BC| = 6 - 4 = 2. + + Next, how about (5, 3, 4, 2, -90), which is rotated by 90 degrees CW (clockwise) + by definition? It looks like this: + + .. code:: none + + O--------> x + | D-A + | | | + | |E| + | | | + | C-B + v y + + The center E is still located at the same point (5, 3), while the vertices + ABCD are rotated by 90 degrees CW with regard to E: + A = (6, 1), B = (6, 5), C = (4, 5), D = (4, 1) + + .. math:: + + width = |AB| = |CD| = 5 - 1 = 4, + height = |AD| = |BC| = 6 - 4 = 2. + + This covers exactly the same region as (5, 3, 4, 2, 90) does, and their IoU + will be 1. However, these two will generate different RoI Pooling results and + should not be treated as an identical box. + + On the other hand, it's easy to see that (X, Y, W, H, A) is identical to + (X, Y, W, H, A+360N), for any integer N. For example (5, 3, 4, 2, 270) would be + identical to (5, 3, 4, 2, -90), because rotating the shape 270 degrees CCW is + equivalent to rotating the same shape 90 degrees CW. + + We could rotate further to get (5, 3, 4, 2, 180), or (5, 3, 4, 2, -180): + + .. code:: none + + O--------> x + | + | C---D + | | E | + | B---A + | + v y + + .. math:: + + A = (7, 4), B = (3, 4), C = (3, 2), D = (7, 2), + + width = |AB| = |CD| = 7 - 3 = 4, + height = |AD| = |BC| = 4 - 2 = 2. + + Finally, this is a very inaccurate (heavily quantized) illustration of + how (5, 3, 4, 2, 60) looks like in case anyone wonders: + + .. code:: none + + O--------> x + | B\ + | / C + | /E / + | A / + | `D + v y + + It's still a rectangle with center of (5, 3), width of 4 and height of 2, + but its angle (and thus orientation) is somewhere between + (5, 3, 4, 2, 0) and (5, 3, 4, 2, 90). + """ + device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu") + tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device) + if tensor.numel() == 0: + # Use reshape, so we don't end up creating a new tensor that does not depend on + # the inputs (and consequently confuses jit) + tensor = tensor.reshape((0, 5)).to(dtype=torch.float32, device=device) + assert tensor.dim() == 2 and tensor.size(-1) == 5, tensor.size() + + self.tensor = tensor + + def clone(self) -> "RotatedBoxes": + """ + Clone the RotatedBoxes. + + Returns: + RotatedBoxes + """ + return RotatedBoxes(self.tensor.clone()) + + def to(self, device: torch.device): + # Boxes are assumed float32 and does not support to(dtype) + return RotatedBoxes(self.tensor.to(device=device)) + + def area(self) -> torch.Tensor: + """ + Computes the area of all the boxes. + + Returns: + torch.Tensor: a vector with areas of each box. + """ + box = self.tensor + area = box[:, 2] * box[:, 3] + return area + + # Avoid in-place operations so that we can torchscript; NOTE: this creates a new tensor + def normalize_angles(self) -> None: + """ + Restrict angles to the range of [-180, 180) degrees + """ + angle_tensor = (self.tensor[:, 4] + 180.0) % 360.0 - 180.0 + self.tensor = torch.cat((self.tensor[:, :4], angle_tensor[:, None]), dim=1) + + def clip(self, box_size: Tuple[int, int], clip_angle_threshold: float = 1.0) -> None: + """ + Clip (in place) the boxes by limiting x coordinates to the range [0, width] + and y coordinates to the range [0, height]. + + For RRPN: + Only clip boxes that are almost horizontal with a tolerance of + clip_angle_threshold to maintain backward compatibility. + + Rotated boxes beyond this threshold are not clipped for two reasons: + + 1. There are potentially multiple ways to clip a rotated box to make it + fit within the image. + 2. It's tricky to make the entire rectangular box fit within the image + and still be able to not leave out pixels of interest. + + Therefore we rely on ops like RoIAlignRotated to safely handle this. + + Args: + box_size (height, width): The clipping box's size. + clip_angle_threshold: + Iff. abs(normalized(angle)) <= clip_angle_threshold (in degrees), + we do the clipping as horizontal boxes. + """ + h, w = box_size + + # normalize angles to be within (-180, 180] degrees + self.normalize_angles() + + idx = torch.where(torch.abs(self.tensor[:, 4]) <= clip_angle_threshold)[0] + + # convert to (x1, y1, x2, y2) + x1 = self.tensor[idx, 0] - self.tensor[idx, 2] / 2.0 + y1 = self.tensor[idx, 1] - self.tensor[idx, 3] / 2.0 + x2 = self.tensor[idx, 0] + self.tensor[idx, 2] / 2.0 + y2 = self.tensor[idx, 1] + self.tensor[idx, 3] / 2.0 + + # clip + x1.clamp_(min=0, max=w) + y1.clamp_(min=0, max=h) + x2.clamp_(min=0, max=w) + y2.clamp_(min=0, max=h) + + # convert back to (xc, yc, w, h) + self.tensor[idx, 0] = (x1 + x2) / 2.0 + self.tensor[idx, 1] = (y1 + y2) / 2.0 + # make sure widths and heights do not increase due to numerical errors + self.tensor[idx, 2] = torch.min(self.tensor[idx, 2], x2 - x1) + self.tensor[idx, 3] = torch.min(self.tensor[idx, 3], y2 - y1) + + def nonempty(self, threshold: float = 0.0) -> torch.Tensor: + """ + Find boxes that are non-empty. + A box is considered empty, if either of its side is no larger than threshold. + + Returns: + Tensor: a binary vector which represents + whether each box is empty (False) or non-empty (True). + """ + box = self.tensor + widths = box[:, 2] + heights = box[:, 3] + keep = (widths > threshold) & (heights > threshold) + return keep + + def __getitem__(self, item) -> "RotatedBoxes": + """ + Returns: + RotatedBoxes: Create a new :class:`RotatedBoxes` by indexing. + + The following usage are allowed: + + 1. `new_boxes = boxes[3]`: return a `RotatedBoxes` which contains only one box. + 2. `new_boxes = boxes[2:10]`: return a slice of boxes. + 3. `new_boxes = boxes[vector]`, where vector is a torch.ByteTensor + with `length = len(boxes)`. Nonzero elements in the vector will be selected. + + Note that the returned RotatedBoxes might share storage with this RotatedBoxes, + subject to Pytorch's indexing semantics. + """ + if isinstance(item, int): + return RotatedBoxes(self.tensor[item].view(1, -1)) + b = self.tensor[item] + assert b.dim() == 2, "Indexing on RotatedBoxes with {} failed to return a matrix!".format( + item + ) + return RotatedBoxes(b) + + def __len__(self) -> int: + return self.tensor.shape[0] + + def __repr__(self) -> str: + return "RotatedBoxes(" + str(self.tensor) + ")" + + def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor: + """ + Args: + box_size (height, width): Size of the reference box covering + [0, width] x [0, height] + boundary_threshold (int): Boxes that extend beyond the reference box + boundary by more than boundary_threshold are considered "outside". + + For RRPN, it might not be necessary to call this function since it's common + for rotated box to extend to outside of the image boundaries + (the clip function only clips the near-horizontal boxes) + + Returns: + a binary vector, indicating whether each box is inside the reference box. + """ + height, width = box_size + + cnt_x = self.tensor[..., 0] + cnt_y = self.tensor[..., 1] + half_w = self.tensor[..., 2] / 2.0 + half_h = self.tensor[..., 3] / 2.0 + a = self.tensor[..., 4] + c = torch.abs(torch.cos(a * math.pi / 180.0)) + s = torch.abs(torch.sin(a * math.pi / 180.0)) + # This basically computes the horizontal bounding rectangle of the rotated box + max_rect_dx = c * half_w + s * half_h + max_rect_dy = c * half_h + s * half_w + + inds_inside = ( + (cnt_x - max_rect_dx >= -boundary_threshold) + & (cnt_y - max_rect_dy >= -boundary_threshold) + & (cnt_x + max_rect_dx < width + boundary_threshold) + & (cnt_y + max_rect_dy < height + boundary_threshold) + ) + + return inds_inside + + def get_centers(self) -> torch.Tensor: + """ + Returns: + The box centers in a Nx2 array of (x, y). + """ + return self.tensor[:, :2] + + def scale(self, scale_x: float, scale_y: float) -> None: + """ + Scale the rotated box with horizontal and vertical scaling factors + Note: when scale_factor_x != scale_factor_y, + the rotated box does not preserve the rectangular shape when the angle + is not a multiple of 90 degrees under resize transformation. + Instead, the shape is a parallelogram (that has skew) + Here we make an approximation by fitting a rotated rectangle to the parallelogram. + """ + self.tensor[:, 0] *= scale_x + self.tensor[:, 1] *= scale_y + theta = self.tensor[:, 4] * math.pi / 180.0 + c = torch.cos(theta) + s = torch.sin(theta) + + # In image space, y is top->down and x is left->right + # Consider the local coordintate system for the rotated box, + # where the box center is located at (0, 0), and the four vertices ABCD are + # A(-w / 2, -h / 2), B(w / 2, -h / 2), C(w / 2, h / 2), D(-w / 2, h / 2) + # the midpoint of the left edge AD of the rotated box E is: + # E = (A+D)/2 = (-w / 2, 0) + # the midpoint of the top edge AB of the rotated box F is: + # F(0, -h / 2) + # To get the old coordinates in the global system, apply the rotation transformation + # (Note: the right-handed coordinate system for image space is yOx): + # (old_x, old_y) = (s * y + c * x, c * y - s * x) + # E(old) = (s * 0 + c * (-w/2), c * 0 - s * (-w/2)) = (-c * w / 2, s * w / 2) + # F(old) = (s * (-h / 2) + c * 0, c * (-h / 2) - s * 0) = (-s * h / 2, -c * h / 2) + # After applying the scaling factor (sfx, sfy): + # E(new) = (-sfx * c * w / 2, sfy * s * w / 2) + # F(new) = (-sfx * s * h / 2, -sfy * c * h / 2) + # The new width after scaling tranformation becomes: + + # w(new) = |E(new) - O| * 2 + # = sqrt[(sfx * c * w / 2)^2 + (sfy * s * w / 2)^2] * 2 + # = sqrt[(sfx * c)^2 + (sfy * s)^2] * w + # i.e., scale_factor_w = sqrt[(sfx * c)^2 + (sfy * s)^2] + # + # For example, + # when angle = 0 or 180, |c| = 1, s = 0, scale_factor_w == scale_factor_x; + # when |angle| = 90, c = 0, |s| = 1, scale_factor_w == scale_factor_y + self.tensor[:, 2] *= torch.sqrt((scale_x * c) ** 2 + (scale_y * s) ** 2) + + # h(new) = |F(new) - O| * 2 + # = sqrt[(sfx * s * h / 2)^2 + (sfy * c * h / 2)^2] * 2 + # = sqrt[(sfx * s)^2 + (sfy * c)^2] * h + # i.e., scale_factor_h = sqrt[(sfx * s)^2 + (sfy * c)^2] + # + # For example, + # when angle = 0 or 180, |c| = 1, s = 0, scale_factor_h == scale_factor_y; + # when |angle| = 90, c = 0, |s| = 1, scale_factor_h == scale_factor_x + self.tensor[:, 3] *= torch.sqrt((scale_x * s) ** 2 + (scale_y * c) ** 2) + + # The angle is the rotation angle from y-axis in image space to the height + # vector (top->down in the box's local coordinate system) of the box in CCW. + # + # angle(new) = angle_yOx(O - F(new)) + # = angle_yOx( (sfx * s * h / 2, sfy * c * h / 2) ) + # = atan2(sfx * s * h / 2, sfy * c * h / 2) + # = atan2(sfx * s, sfy * c) + # + # For example, + # when sfx == sfy, angle(new) == atan2(s, c) == angle(old) + self.tensor[:, 4] = torch.atan2(scale_x * s, scale_y * c) * 180 / math.pi + + @classmethod + def cat(cls, boxes_list: List["RotatedBoxes"]) -> "RotatedBoxes": + """ + Concatenates a list of RotatedBoxes into a single RotatedBoxes + + Arguments: + boxes_list (list[RotatedBoxes]) + + Returns: + RotatedBoxes: the concatenated RotatedBoxes + """ + assert isinstance(boxes_list, (list, tuple)) + if len(boxes_list) == 0: + return cls(torch.empty(0)) + assert all([isinstance(box, RotatedBoxes) for box in boxes_list]) + + # use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input + cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0)) + return cat_boxes + + @property + def device(self) -> torch.device: + return self.tensor.device + + @torch.jit.unused + def __iter__(self): + """ + Yield a box as a Tensor of shape (5,) at a time. + """ + yield from self.tensor + + +def pairwise_iou(boxes1: RotatedBoxes, boxes2: RotatedBoxes) -> None: + """ + Given two lists of rotated boxes of size N and M, + compute the IoU (intersection over union) + between **all** N x M pairs of boxes. + The box order must be (x_center, y_center, width, height, angle). + + Args: + boxes1, boxes2 (RotatedBoxes): + two `RotatedBoxes`. Contains N & M rotated boxes, respectively. + + Returns: + Tensor: IoU, sized [N,M]. + """ + + return pairwise_iou_rotated(boxes1.tensor, boxes2.tensor) diff --git a/mmcv/utils/__init__.py b/mmcv/utils/__init__.py new file mode 100644 index 0000000..5992858 --- /dev/null +++ b/mmcv/utils/__init__.py @@ -0,0 +1,29 @@ +# flake8: noqa +# Copyright (c) OpenMMLab. All rights reserved. +from .config import Config, ConfigDict, DictAction +from .misc import (check_prerequisites, concat_list, deprecated_api_warning, + has_method, import_modules_from_strings, is_list_of, + is_method_overridden, is_seq_of, is_str, is_tuple_of, + iter_cast, list_cast, requires_executable, requires_package, + slice_list, to_1tuple, to_2tuple, to_3tuple, to_4tuple, + to_ntuple, tuple_cast) +from .path import (check_file_exist, fopen, is_filepath, mkdir_or_exist, + scandir, symlink) +from .progressbar import (ProgressBar, track_iter_progress, + track_parallel_progress, track_progress) +from .timer import Timer, TimerError, check_time +from .version_utils import digit_version, get_git_hash +import torch +from .logging import get_logger, print_log +from .registry import Registry, build_from_cfg +from .hub import load_url +from .logging import get_logger, print_log +from .logger import get_root_logger +from .collect_env import collect_env +from .runner_utils import * +from .fp16_utils import LossScaler, auto_fp16, force_fp32, wrap_fp16_model, TORCH_VERSION +from .checkpoint import load_checkpoint, save_checkpoint +from .log_buffer import LogBuffer +from .priority import Priority, get_priority +from .memory import retry_if_cuda_oom +from .visual import convert_color, save_tensor \ No newline at end of file diff --git a/mmcv/utils/bricks.py b/mmcv/utils/bricks.py new file mode 100644 index 0000000..fd45881 --- /dev/null +++ b/mmcv/utils/bricks.py @@ -0,0 +1,20 @@ +import functools +import time +from collections import defaultdict +import torch +time_maps = defaultdict(lambda :0.) +count_maps = defaultdict(lambda :0.) +def run_time(name): + def middle(fn): + def wrapper(*args, **kwargs): + torch.cuda.synchronize() + start = time.time() + res = fn(*args, **kwargs) + torch.cuda.synchronize() + time_maps['%s : %s'%(name, fn.__name__) ] += time.time()-start + count_maps['%s : %s'%(name, fn.__name__) ] +=1 + print("%s : %s takes up %f "% (name, fn.__name__,time_maps['%s : %s'%(name, fn.__name__) ] /count_maps['%s : %s'%(name, fn.__name__) ] )) + return res + return wrapper + return middle + \ No newline at end of file diff --git a/mmcv/utils/checkpoint.py b/mmcv/utils/checkpoint.py new file mode 100644 index 0000000..5b76d0b --- /dev/null +++ b/mmcv/utils/checkpoint.py @@ -0,0 +1,153 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import io +import os +import os.path as osp +import pkgutil +import re +import time +import warnings +from collections import OrderedDict +import torch +from torch.optim import Optimizer +from .logging import print_log +from .runner_utils import get_dist_info +from ..parallel import is_module_wrapper +from mmcv.fileio.file_client import FileClient + +def load_checkpoint(model, + filename, + map_location=None, + strict=False, + logger=None, + revise_keys=[(r'^module\.', '')]): + """Load checkpoint from a file or URI. + + Args: + model (Module): Module to load checkpoint. + filename (str): Accept local filepath + map_location (str): Same as :func:`torch.load`. + strict (bool): Whether to allow different params for the model and + checkpoint. + logger (:mod:`logging.Logger` or None): The logger for error message. + revise_keys (list): A list of customized keywords to modify the + state_dict in checkpoint. Each item is a (pattern, replacement) + pair of the regular expression operations. Default: strip + the prefix 'module.' by [(r'^module\\.', '')]. + + Returns: + dict or OrderedDict: The loaded checkpoint. + """ + print_log( + f'load checkpoint from path: {filename}', logger) + if not osp.isfile(filename): + raise IOError(f'{filename} is not a checkpoint file') + checkpoint = torch.load(filename, map_location=map_location) + # OrderedDict is a subclass of dict + if not isinstance(checkpoint, dict): + raise RuntimeError( + f'No state_dict found in checkpoint file {filename}') + # get state_dict from checkpoint + if 'state_dict' in checkpoint: + state_dict = checkpoint['state_dict'] + else: + state_dict = checkpoint + + # strip prefix of state_dict + metadata = getattr(state_dict, '_metadata', OrderedDict()) + for p, r in revise_keys: + state_dict = OrderedDict( + {re.sub(p, r, k): v + for k, v in state_dict.items()}) + # Keep metadata in state_dict + state_dict._metadata = metadata + + # load state_dict + if is_module_wrapper(model): + model = model.module + missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict) + # ignore "num_batches_tracked" of BN layers + err_msg = [] + if unexpected_keys: + err_msg.append('unexpected key in source ' + f'state_dict: {", ".join(unexpected_keys)}\n') + if missing_keys: + err_msg.append( + f'missing keys in source state_dict: {", ".join(missing_keys)}\n') + + rank, _ = get_dist_info() + if len(err_msg) > 0 and rank == 0: + err_msg.insert( + 0, 'The model and loaded state dict do not match exactly\n') + err_msg = '\n'.join(err_msg) + if strict: + raise RuntimeError(err_msg) + elif logger is not None: + logger.warning(err_msg) + else: + print(err_msg) + return checkpoint + +def weights_to_cpu(state_dict): + """Copy a model state_dict to cpu. + + Args: + state_dict (OrderedDict): Model weights on GPU. + + Returns: + OrderedDict: Model weights on GPU. + """ + state_dict_cpu = OrderedDict() + for key, val in state_dict.items(): + state_dict_cpu[key] = val.cpu() + # Keep metadata in state_dict + state_dict_cpu._metadata = getattr(state_dict, '_metadata', OrderedDict()) + return state_dict_cpu + +def save_checkpoint(model, + filename, + optimizer=None, + meta=None, + file_client_args=None): + """Save checkpoint to file. + + The checkpoint will have 3 fields: ``meta``, ``state_dict`` and + ``optimizer``. By default ``meta`` will contain version and time info. + + Args: + model (Module): Module whose params are to be saved. + filename (str): Checkpoint filename. + optimizer (:obj:`Optimizer`, optional): Optimizer to be saved. + meta (dict, optional): Metadata to be saved in checkpoint. + file_client_args (dict, optional): Arguments to instantiate a + FileClient. See :class:`mmcv.fileio.FileClient` for details. + Default: None. + `New in version 1.3.16.` + """ + if meta is None: + meta = {} + elif not isinstance(meta, dict): + raise TypeError(f'meta must be a dict or None, but got {type(meta)}') + meta.update(time=time.asctime()) + + if is_module_wrapper(model): + model = model.module + + if hasattr(model, 'CLASSES') and model.CLASSES is not None: + # save class name to the meta + meta.update(CLASSES=model.CLASSES) + checkpoint = { + 'meta': meta, + 'state_dict': weights_to_cpu(model.state_dict()), + } + # save optimizer state dict in the checkpoint + if isinstance(optimizer, Optimizer): + checkpoint['optimizer'] = optimizer.state_dict() + elif isinstance(optimizer, dict): + checkpoint['optimizer'] = {} + for name, optim in optimizer.items(): + checkpoint['optimizer'][name] = optim.state_dict() + + file_client = FileClient.infer_client(file_client_args, filename) + with io.BytesIO() as f: + torch.save(checkpoint, f) + file_client.put(f.getvalue(), filename) diff --git a/mmcv/utils/collect_env.py b/mmcv/utils/collect_env.py new file mode 100644 index 0000000..25bea53 --- /dev/null +++ b/mmcv/utils/collect_env.py @@ -0,0 +1,13 @@ +from mmcv.utils import get_git_hash +from mmcv import __version__ + +def collect_env(): + """Collect the information of the running environments.""" + env_info = {} + env_info['MMCV'] = __version__ + return env_info + + +if __name__ == '__main__': + for name, val in collect_env().items(): + print(f'{name}: {val}') diff --git a/mmcv/utils/config.py b/mmcv/utils/config.py new file mode 100644 index 0000000..3c7fae7 --- /dev/null +++ b/mmcv/utils/config.py @@ -0,0 +1,687 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import ast +import copy +import os +import os.path as osp +import platform +import shutil +import sys +import tempfile +import uuid +import warnings +from argparse import Action, ArgumentParser +from collections import abc +from importlib import import_module +from mmcv.fileio.io import load, dump + +from addict import Dict +from yapf.yapflib.yapf_api import FormatCode + +from .misc import import_modules_from_strings +from .path import check_file_exist + +if platform.system() == 'Windows': + import regex as re +else: + import re + +BASE_KEY = '_base_' +DELETE_KEY = '_delete_' +DEPRECATION_KEY = '_deprecation_' +RESERVED_KEYS = ['filename', 'text', 'pretty_text'] + + +class ConfigDict(Dict): + + def __missing__(self, name): + raise KeyError(name) + + def __getattr__(self, name): + try: + value = super(ConfigDict, self).__getattr__(name) + except KeyError: + ex = AttributeError(f"'{self.__class__.__name__}' object has no " + f"attribute '{name}'") + except Exception as e: + ex = e + else: + return value + raise ex + + +def add_args(parser, cfg, prefix=''): + for k, v in cfg.items(): + if isinstance(v, str): + parser.add_argument('--' + prefix + k) + elif isinstance(v, int): + parser.add_argument('--' + prefix + k, type=int) + elif isinstance(v, float): + parser.add_argument('--' + prefix + k, type=float) + elif isinstance(v, bool): + parser.add_argument('--' + prefix + k, action='store_true') + elif isinstance(v, dict): + add_args(parser, v, prefix + k + '.') + elif isinstance(v, abc.Iterable): + parser.add_argument('--' + prefix + k, type=type(v[0]), nargs='+') + else: + print(f'cannot parse key {prefix + k} of type {type(v)}') + return parser + + +class Config: + """A facility for config and config files. + + It supports common file formats as configs: python/json/yaml. The interface + is the same as a dict object and also allows access config values as + attributes. + + Example: + >>> cfg = Config(dict(a=1, b=dict(b1=[0, 1]))) + >>> cfg.a + 1 + >>> cfg.b + {'b1': [0, 1]} + >>> cfg.b.b1 + [0, 1] + >>> cfg = Config.fromfile('tests/data/config/a.py') + >>> cfg.filename + "/home/kchen/projects/mmcv/tests/data/config/a.py" + >>> cfg.item4 + 'test' + >>> cfg + "Config [path: /home/kchen/projects/mmcv/tests/data/config/a.py]: " + "{'item1': [1, 2], 'item2': {'a': 0}, 'item3': True, 'item4': 'test'}" + """ + + @staticmethod + def _validate_py_syntax(filename): + with open(filename, 'r', encoding='utf-8') as f: + # Setting encoding explicitly to resolve coding issue on windows + content = f.read() + try: + ast.parse(content) + except SyntaxError as e: + raise SyntaxError('There are syntax errors in config ' + f'file {filename}: {e}') + + @staticmethod + def _substitute_predefined_vars(filename, temp_config_name): + file_dirname = osp.dirname(filename) + file_basename = osp.basename(filename) + file_basename_no_extension = osp.splitext(file_basename)[0] + file_extname = osp.splitext(filename)[1] + support_templates = dict( + fileDirname=file_dirname, + fileBasename=file_basename, + fileBasenameNoExtension=file_basename_no_extension, + fileExtname=file_extname) + with open(filename, 'r', encoding='utf-8') as f: + # Setting encoding explicitly to resolve coding issue on windows + config_file = f.read() + for key, value in support_templates.items(): + regexp = r'\{\{\s*' + str(key) + r'\s*\}\}' + value = value.replace('\\', '/') + config_file = re.sub(regexp, value, config_file) + with open(temp_config_name, 'w', encoding='utf-8') as tmp_config_file: + tmp_config_file.write(config_file) + + @staticmethod + def _pre_substitute_base_vars(filename, temp_config_name): + """Substitute base variable placehoders to string, so that parsing + would work.""" + with open(filename, 'r', encoding='utf-8') as f: + # Setting encoding explicitly to resolve coding issue on windows + config_file = f.read() + base_var_dict = {} + regexp = r'\{\{\s*' + BASE_KEY + r'\.([\w\.]+)\s*\}\}' + base_vars = set(re.findall(regexp, config_file)) + for base_var in base_vars: + randstr = f'_{base_var}_{uuid.uuid4().hex.lower()[:6]}' + base_var_dict[randstr] = base_var + regexp = r'\{\{\s*' + BASE_KEY + r'\.' + base_var + r'\s*\}\}' + config_file = re.sub(regexp, f'"{randstr}"', config_file) + with open(temp_config_name, 'w', encoding='utf-8') as tmp_config_file: + tmp_config_file.write(config_file) + return base_var_dict + + @staticmethod + def _substitute_base_vars(cfg, base_var_dict, base_cfg): + """Substitute variable strings to their actual values.""" + cfg = copy.deepcopy(cfg) + + if isinstance(cfg, dict): + for k, v in cfg.items(): + if isinstance(v, str) and v in base_var_dict: + new_v = base_cfg + for new_k in base_var_dict[v].split('.'): + new_v = new_v[new_k] + cfg[k] = new_v + elif isinstance(v, (list, tuple, dict)): + cfg[k] = Config._substitute_base_vars( + v, base_var_dict, base_cfg) + elif isinstance(cfg, tuple): + cfg = tuple( + Config._substitute_base_vars(c, base_var_dict, base_cfg) + for c in cfg) + elif isinstance(cfg, list): + cfg = [ + Config._substitute_base_vars(c, base_var_dict, base_cfg) + for c in cfg + ] + elif isinstance(cfg, str) and cfg in base_var_dict: + new_v = base_cfg + for new_k in base_var_dict[cfg].split('.'): + new_v = new_v[new_k] + cfg = new_v + + return cfg + + @staticmethod + def _file2dict(filename, use_predefined_variables=True): + filename = osp.abspath(osp.expanduser(filename)) + check_file_exist(filename) + fileExtname = osp.splitext(filename)[1] + if fileExtname not in ['.py', '.json', '.yaml', '.yml']: + raise IOError('Only py/yml/yaml/json type are supported now!') + + with tempfile.TemporaryDirectory() as temp_config_dir: + temp_config_file = tempfile.NamedTemporaryFile( + dir=temp_config_dir, suffix=fileExtname) + if platform.system() == 'Windows': + temp_config_file.close() + temp_config_name = osp.basename(temp_config_file.name) + # Substitute predefined variables + if use_predefined_variables: + Config._substitute_predefined_vars(filename, + temp_config_file.name) + else: + shutil.copyfile(filename, temp_config_file.name) + # Substitute base variables from placeholders to strings + base_var_dict = Config._pre_substitute_base_vars( + temp_config_file.name, temp_config_file.name) + + if filename.endswith('.py'): + temp_module_name = osp.splitext(temp_config_name)[0] + sys.path.insert(0, temp_config_dir) + Config._validate_py_syntax(filename) + mod = import_module(temp_module_name) + sys.path.pop(0) + cfg_dict = { + name: value + for name, value in mod.__dict__.items() + if not name.startswith('__') + } + # delete imported module + del sys.modules[temp_module_name] + elif filename.endswith(('.yml', '.yaml', '.json')): + cfg_dict = load(temp_config_file.name) + # close temp file + temp_config_file.close() + + # check deprecation information + if DEPRECATION_KEY in cfg_dict: + deprecation_info = cfg_dict.pop(DEPRECATION_KEY) + warning_msg = f'The config file {filename} will be deprecated ' \ + 'in the future.' + if 'expected' in deprecation_info: + warning_msg += f' Please use {deprecation_info["expected"]} ' \ + 'instead.' + if 'reference' in deprecation_info: + warning_msg += ' More information can be found at ' \ + f'{deprecation_info["reference"]}' + warnings.warn(warning_msg) + + cfg_text = filename + '\n' + with open(filename, 'r', encoding='utf-8') as f: + # Setting encoding explicitly to resolve coding issue on windows + cfg_text += f.read() + + if BASE_KEY in cfg_dict: + cfg_dir = osp.dirname(filename) + base_filename = cfg_dict.pop(BASE_KEY) + base_filename = base_filename if isinstance( + base_filename, list) else [base_filename] + + cfg_dict_list = list() + cfg_text_list = list() + for f in base_filename: + _cfg_dict, _cfg_text = Config._file2dict(osp.join(cfg_dir, f)) + cfg_dict_list.append(_cfg_dict) + cfg_text_list.append(_cfg_text) + + base_cfg_dict = dict() + for c in cfg_dict_list: + duplicate_keys = base_cfg_dict.keys() & c.keys() + if len(duplicate_keys) > 0: + raise KeyError('Duplicate key is not allowed among bases. ' + f'Duplicate keys: {duplicate_keys}') + base_cfg_dict.update(c) + + # Substitute base variables from strings to their actual values + cfg_dict = Config._substitute_base_vars(cfg_dict, base_var_dict, + base_cfg_dict) + + base_cfg_dict = Config._merge_a_into_b(cfg_dict, base_cfg_dict) + cfg_dict = base_cfg_dict + + # merge cfg_text + cfg_text_list.append(cfg_text) + cfg_text = '\n'.join(cfg_text_list) + + return cfg_dict, cfg_text + + @staticmethod + def _merge_a_into_b(a, b, allow_list_keys=False): + """merge dict ``a`` into dict ``b`` (non-inplace). + + Values in ``a`` will overwrite ``b``. ``b`` is copied first to avoid + in-place modifications. + + Args: + a (dict): The source dict to be merged into ``b``. + b (dict): The origin dict to be fetch keys from ``a``. + allow_list_keys (bool): If True, int string keys (e.g. '0', '1') + are allowed in source ``a`` and will replace the element of the + corresponding index in b if b is a list. Default: False. + + Returns: + dict: The modified dict of ``b`` using ``a``. + + Examples: + # Normally merge a into b. + >>> Config._merge_a_into_b( + ... dict(obj=dict(a=2)), dict(obj=dict(a=1))) + {'obj': {'a': 2}} + + # Delete b first and merge a into b. + >>> Config._merge_a_into_b( + ... dict(obj=dict(_delete_=True, a=2)), dict(obj=dict(a=1))) + {'obj': {'a': 2}} + + # b is a list + >>> Config._merge_a_into_b( + ... {'0': dict(a=2)}, [dict(a=1), dict(b=2)], True) + [{'a': 2}, {'b': 2}] + """ + b = b.copy() + for k, v in a.items(): + if allow_list_keys and k.isdigit() and isinstance(b, list): + k = int(k) + if len(b) <= k: + raise KeyError(f'Index {k} exceeds the length of list {b}') + b[k] = Config._merge_a_into_b(v, b[k], allow_list_keys) + elif isinstance(v, + dict) and k in b and not v.pop(DELETE_KEY, False): + allowed_types = (dict, list) if allow_list_keys else dict + if not isinstance(b[k], allowed_types): + raise TypeError( + f'{k}={v} in child config cannot inherit from base ' + f'because {k} is a dict in the child config but is of ' + f'type {type(b[k])} in base config. You may set ' + f'`{DELETE_KEY}=True` to ignore the base config') + b[k] = Config._merge_a_into_b(v, b[k], allow_list_keys) + else: + b[k] = v + return b + + @staticmethod + def fromfile(filename, + use_predefined_variables=True, + import_custom_modules=True): + cfg_dict, cfg_text = Config._file2dict(filename, + use_predefined_variables) + if import_custom_modules and cfg_dict.get('custom_imports', None): + import_modules_from_strings(**cfg_dict['custom_imports']) + return Config(cfg_dict, cfg_text=cfg_text, filename=filename) + + @staticmethod + def fromstring(cfg_str, file_format): + """Generate config from config str. + + Args: + cfg_str (str): Config str. + file_format (str): Config file format corresponding to the + config str. Only py/yml/yaml/json type are supported now! + + Returns: + obj:`Config`: Config obj. + """ + if file_format not in ['.py', '.json', '.yaml', '.yml']: + raise IOError('Only py/yml/yaml/json type are supported now!') + if file_format != '.py' and 'dict(' in cfg_str: + # check if users specify a wrong suffix for python + warnings.warn( + 'Please check "file_format", the file format may be .py') + with tempfile.NamedTemporaryFile( + 'w', encoding='utf-8', suffix=file_format, + delete=False) as temp_file: + temp_file.write(cfg_str) + # on windows, previous implementation cause error + # see PR 1077 for details + cfg = Config.fromfile(temp_file.name) + os.remove(temp_file.name) + return cfg + + @staticmethod + def auto_argparser(description=None): + """Generate argparser from config file automatically (experimental)""" + partial_parser = ArgumentParser(description=description) + partial_parser.add_argument('config', help='config file path') + cfg_file = partial_parser.parse_known_args()[0].config + cfg = Config.fromfile(cfg_file) + parser = ArgumentParser(description=description) + parser.add_argument('config', help='config file path') + add_args(parser, cfg) + return parser, cfg + + def __init__(self, cfg_dict=None, cfg_text=None, filename=None): + if cfg_dict is None: + cfg_dict = dict() + elif not isinstance(cfg_dict, dict): + raise TypeError('cfg_dict must be a dict, but ' + f'got {type(cfg_dict)}') + for key in cfg_dict: + if key in RESERVED_KEYS: + raise KeyError(f'{key} is reserved for config file') + + super(Config, self).__setattr__('_cfg_dict', ConfigDict(cfg_dict)) + super(Config, self).__setattr__('_filename', filename) + if cfg_text: + text = cfg_text + elif filename: + with open(filename, 'r') as f: + text = f.read() + else: + text = '' + super(Config, self).__setattr__('_text', text) + + @property + def filename(self): + return self._filename + + @property + def text(self): + return self._text + + @property + def pretty_text(self): + + indent = 4 + + def _indent(s_, num_spaces): + s = s_.split('\n') + if len(s) == 1: + return s_ + first = s.pop(0) + s = [(num_spaces * ' ') + line for line in s] + s = '\n'.join(s) + s = first + '\n' + s + return s + + def _format_basic_types(k, v, use_mapping=False): + if isinstance(v, str): + v_str = f"'{v}'" + else: + v_str = str(v) + + if use_mapping: + k_str = f"'{k}'" if isinstance(k, str) else str(k) + attr_str = f'{k_str}: {v_str}' + else: + attr_str = f'{str(k)}={v_str}' + attr_str = _indent(attr_str, indent) + + return attr_str + + def _format_list(k, v, use_mapping=False): + # check if all items in the list are dict + if all(isinstance(_, dict) for _ in v): + v_str = '[\n' + v_str += '\n'.join( + f'dict({_indent(_format_dict(v_), indent)}),' + for v_ in v).rstrip(',') + if use_mapping: + k_str = f"'{k}'" if isinstance(k, str) else str(k) + attr_str = f'{k_str}: {v_str}' + else: + attr_str = f'{str(k)}={v_str}' + attr_str = _indent(attr_str, indent) + ']' + else: + attr_str = _format_basic_types(k, v, use_mapping) + return attr_str + + def _contain_invalid_identifier(dict_str): + contain_invalid_identifier = False + for key_name in dict_str: + contain_invalid_identifier |= \ + (not str(key_name).isidentifier()) + return contain_invalid_identifier + + def _format_dict(input_dict, outest_level=False): + r = '' + s = [] + + use_mapping = _contain_invalid_identifier(input_dict) + if use_mapping: + r += '{' + for idx, (k, v) in enumerate(input_dict.items()): + is_last = idx >= len(input_dict) - 1 + end = '' if outest_level or is_last else ',' + if isinstance(v, dict): + v_str = '\n' + _format_dict(v) + if use_mapping: + k_str = f"'{k}'" if isinstance(k, str) else str(k) + attr_str = f'{k_str}: dict({v_str}' + else: + attr_str = f'{str(k)}=dict({v_str}' + attr_str = _indent(attr_str, indent) + ')' + end + elif isinstance(v, list): + attr_str = _format_list(k, v, use_mapping) + end + else: + attr_str = _format_basic_types(k, v, use_mapping) + end + + s.append(attr_str) + r += '\n'.join(s) + if use_mapping: + r += '}' + return r + + cfg_dict = self._cfg_dict.to_dict() + text = _format_dict(cfg_dict, outest_level=True) + # copied from setup.cfg + yapf_style = dict( + based_on_style='pep8', + blank_line_before_nested_class_or_def=True, + split_before_expression_after_opening_paren=True) + text, _ = FormatCode(text, style_config=yapf_style, verify=True) + + return text + + def __repr__(self): + return f'Config (path: {self.filename}): {self._cfg_dict.__repr__()}' + + def __len__(self): + return len(self._cfg_dict) + + def __getattr__(self, name): + return getattr(self._cfg_dict, name) + + def __getitem__(self, name): + return self._cfg_dict.__getitem__(name) + + def __setattr__(self, name, value): + if isinstance(value, dict): + value = ConfigDict(value) + self._cfg_dict.__setattr__(name, value) + + def __setitem__(self, name, value): + if isinstance(value, dict): + value = ConfigDict(value) + self._cfg_dict.__setitem__(name, value) + + def __iter__(self): + return iter(self._cfg_dict) + + def __getstate__(self): + return (self._cfg_dict, self._filename, self._text) + + def __setstate__(self, state): + _cfg_dict, _filename, _text = state + super(Config, self).__setattr__('_cfg_dict', _cfg_dict) + super(Config, self).__setattr__('_filename', _filename) + super(Config, self).__setattr__('_text', _text) + + def dump(self, file=None): + cfg_dict = super(Config, self).__getattribute__('_cfg_dict').to_dict() + if self.filename.endswith('.py'): + if file is None: + return self.pretty_text + else: + with open(file, 'w', encoding='utf-8') as f: + f.write(self.pretty_text) + else: + if file is None: + file_format = self.filename.split('.')[-1] + return dump(cfg_dict, file_format=file_format) + else: + dump(cfg_dict, file) + + def merge_from_dict(self, options, allow_list_keys=True): + """Merge list into cfg_dict. + + Merge the dict parsed by MultipleKVAction into this cfg. + + Examples: + >>> options = {'model.backbone.depth': 50, + ... 'model.backbone.with_cp':True} + >>> cfg = Config(dict(model=dict(backbone=dict(type='ResNet')))) + >>> cfg.merge_from_dict(options) + >>> cfg_dict = super(Config, self).__getattribute__('_cfg_dict') + >>> assert cfg_dict == dict( + ... model=dict(backbone=dict(depth=50, with_cp=True))) + + # Merge list element + >>> cfg = Config(dict(pipeline=[ + ... dict(type='LoadImage'), dict(type='LoadAnnotations')])) + >>> options = dict(pipeline={'0': dict(type='SelfLoadImage')}) + >>> cfg.merge_from_dict(options, allow_list_keys=True) + >>> cfg_dict = super(Config, self).__getattribute__('_cfg_dict') + >>> assert cfg_dict == dict(pipeline=[ + ... dict(type='SelfLoadImage'), dict(type='LoadAnnotations')]) + + Args: + options (dict): dict of configs to merge from. + allow_list_keys (bool): If True, int string keys (e.g. '0', '1') + are allowed in ``options`` and will replace the element of the + corresponding index in the config if the config is a list. + Default: True. + """ + option_cfg_dict = {} + for full_key, v in options.items(): + d = option_cfg_dict + key_list = full_key.split('.') + for subkey in key_list[:-1]: + d.setdefault(subkey, ConfigDict()) + d = d[subkey] + subkey = key_list[-1] + d[subkey] = v + + cfg_dict = super(Config, self).__getattribute__('_cfg_dict') + super(Config, self).__setattr__( + '_cfg_dict', + Config._merge_a_into_b( + option_cfg_dict, cfg_dict, allow_list_keys=allow_list_keys)) + + +class DictAction(Action): + """ + argparse action to split an argument into KEY=VALUE form + on the first = and append to a dictionary. List options can + be passed as comma separated values, i.e 'KEY=V1,V2,V3', or with explicit + brackets, i.e. 'KEY=[V1,V2,V3]'. It also support nested brackets to build + list/tuple values. e.g. 'KEY=[(V1,V2),(V3,V4)]' + """ + + @staticmethod + def _parse_int_float_bool(val): + try: + return int(val) + except ValueError: + pass + try: + return float(val) + except ValueError: + pass + if val.lower() in ['true', 'false']: + return True if val.lower() == 'true' else False + return val + + @staticmethod + def _parse_iterable(val): + """Parse iterable values in the string. + + All elements inside '()' or '[]' are treated as iterable values. + + Args: + val (str): Value string. + + Returns: + list | tuple: The expanded list or tuple from the string. + + Examples: + >>> DictAction._parse_iterable('1,2,3') + [1, 2, 3] + >>> DictAction._parse_iterable('[a, b, c]') + ['a', 'b', 'c'] + >>> DictAction._parse_iterable('[(1, 2, 3), [a, b], c]') + [(1, 2, 3), ['a', 'b'], 'c'] + """ + + def find_next_comma(string): + """Find the position of next comma in the string. + + If no ',' is found in the string, return the string length. All + chars inside '()' and '[]' are treated as one element and thus ',' + inside these brackets are ignored. + """ + assert (string.count('(') == string.count(')')) and ( + string.count('[') == string.count(']')), \ + f'Imbalanced brackets exist in {string}' + end = len(string) + for idx, char in enumerate(string): + pre = string[:idx] + # The string before this ',' is balanced + if ((char == ',') and (pre.count('(') == pre.count(')')) + and (pre.count('[') == pre.count(']'))): + end = idx + break + return end + + # Strip ' and " characters and replace whitespace. + val = val.strip('\'\"').replace(' ', '') + is_tuple = False + if val.startswith('(') and val.endswith(')'): + is_tuple = True + val = val[1:-1] + elif val.startswith('[') and val.endswith(']'): + val = val[1:-1] + elif ',' not in val: + # val is a single value + return DictAction._parse_int_float_bool(val) + + values = [] + while len(val) > 0: + comma_idx = find_next_comma(val) + element = DictAction._parse_iterable(val[:comma_idx]) + values.append(element) + val = val[comma_idx + 1:] + if is_tuple: + values = tuple(values) + return values + + def __call__(self, parser, namespace, values, option_string=None): + options = {} + for kv in values: + key, val = kv.split('=', maxsplit=1) + options[key] = self._parse_iterable(val) + setattr(namespace, self.dest, options) diff --git a/mmcv/utils/contextmanagers.py b/mmcv/utils/contextmanagers.py new file mode 100644 index 0000000..38a6392 --- /dev/null +++ b/mmcv/utils/contextmanagers.py @@ -0,0 +1,121 @@ +import asyncio +import contextlib +import logging +import os +import time +from typing import List + +import torch + +logger = logging.getLogger(__name__) + +DEBUG_COMPLETED_TIME = bool(os.environ.get('DEBUG_COMPLETED_TIME', False)) + + +@contextlib.asynccontextmanager +async def completed(trace_name='', + name='', + sleep_interval=0.05, + streams: List[torch.cuda.Stream] = None): + """Async context manager that waits for work to complete on given CUDA + streams.""" + if not torch.cuda.is_available(): + yield + return + + stream_before_context_switch = torch.cuda.current_stream() + if not streams: + streams = [stream_before_context_switch] + else: + streams = [s if s else stream_before_context_switch for s in streams] + + end_events = [ + torch.cuda.Event(enable_timing=DEBUG_COMPLETED_TIME) for _ in streams + ] + + if DEBUG_COMPLETED_TIME: + start = torch.cuda.Event(enable_timing=True) + stream_before_context_switch.record_event(start) + + cpu_start = time.monotonic() + logger.debug('%s %s starting, streams: %s', trace_name, name, streams) + grad_enabled_before = torch.is_grad_enabled() + try: + yield + finally: + current_stream = torch.cuda.current_stream() + assert current_stream == stream_before_context_switch + + if DEBUG_COMPLETED_TIME: + cpu_end = time.monotonic() + for i, stream in enumerate(streams): + event = end_events[i] + stream.record_event(event) + + grad_enabled_after = torch.is_grad_enabled() + + # observed change of torch.is_grad_enabled() during concurrent run of + # async_test_bboxes code + assert (grad_enabled_before == grad_enabled_after + ), 'Unexpected is_grad_enabled() value change' + + are_done = [e.query() for e in end_events] + logger.debug('%s %s completed: %s streams: %s', trace_name, name, + are_done, streams) + with torch.cuda.stream(stream_before_context_switch): + while not all(are_done): + await asyncio.sleep(sleep_interval) + are_done = [e.query() for e in end_events] + logger.debug( + '%s %s completed: %s streams: %s', + trace_name, + name, + are_done, + streams, + ) + + current_stream = torch.cuda.current_stream() + assert current_stream == stream_before_context_switch + + if DEBUG_COMPLETED_TIME: + cpu_time = (cpu_end - cpu_start) * 1000 + stream_times_ms = '' + for i, stream in enumerate(streams): + elapsed_time = start.elapsed_time(end_events[i]) + stream_times_ms += f' {stream} {elapsed_time:.2f} ms' + logger.info('%s %s %.2f ms %s', trace_name, name, cpu_time, + stream_times_ms) + + +@contextlib.asynccontextmanager +async def concurrent(streamqueue: asyncio.Queue, + trace_name='concurrent', + name='stream'): + """Run code concurrently in different streams. + + :param streamqueue: asyncio.Queue instance. + + Queue tasks define the pool of streams used for concurrent execution. + """ + if not torch.cuda.is_available(): + yield + return + + initial_stream = torch.cuda.current_stream() + + with torch.cuda.stream(initial_stream): + stream = await streamqueue.get() + assert isinstance(stream, torch.cuda.Stream) + + try: + with torch.cuda.stream(stream): + logger.debug('%s %s is starting, stream: %s', trace_name, name, + stream) + yield + current = torch.cuda.current_stream() + assert current == stream + logger.debug('%s %s has finished, stream: %s', trace_name, + name, stream) + finally: + streamqueue.task_done() + streamqueue.put_nowait(stream) diff --git a/mmcv/utils/ext_loader.py b/mmcv/utils/ext_loader.py new file mode 100644 index 0000000..971d3d9 --- /dev/null +++ b/mmcv/utils/ext_loader.py @@ -0,0 +1,18 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import importlib +import os +import pkgutil +import warnings +from collections import namedtuple + +import torch + +def load_ext(name, funcs): + ext = importlib.import_module('mmcv.' + name) + for fun in funcs: + assert hasattr(ext, fun), f'{fun} miss in module {name}' + return ext + +def check_ops_exist(): + ext_loader = pkgutil.find_loader('mmcv._ext') + return ext_loader is not None diff --git a/mmcv/utils/fp16_utils.py b/mmcv/utils/fp16_utils.py new file mode 100644 index 0000000..5c620ff --- /dev/null +++ b/mmcv/utils/fp16_utils.py @@ -0,0 +1,407 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import functools +import warnings +from collections import abc +from inspect import getfullargspec + +import numpy as np +import torch +import torch.nn as nn +TORCH_VERSION = torch.__version__ +from .version_utils import digit_version +from .runner_utils import allreduce_grads as _allreduce_grads + +try: + # If PyTorch version >= 1.6.0, torch.cuda.amp.autocast would be imported + # and used; otherwise, auto fp16 will adopt mmcv's implementation. + # Note that when PyTorch >= 1.6.0, we still cast tensor types to fp16 + # manually, so the behavior may not be consistent with real amp. + from torch.cuda.amp import autocast +except ImportError: + pass + + +def cast_tensor_type(inputs, src_type, dst_type): + """Recursively convert Tensor in inputs from src_type to dst_type. + + Args: + inputs: Inputs that to be casted. + src_type (torch.dtype): Source type.. + dst_type (torch.dtype): Destination type. + + Returns: + The same type with inputs, but all contained Tensors have been cast. + """ + if isinstance(inputs, nn.Module): + return inputs + elif isinstance(inputs, torch.Tensor): + return inputs.to(dst_type) + elif isinstance(inputs, str): + return inputs + elif isinstance(inputs, np.ndarray): + return inputs + elif isinstance(inputs, abc.Mapping): + return type(inputs)({ + k: cast_tensor_type(v, src_type, dst_type) + for k, v in inputs.items() + }) + elif isinstance(inputs, abc.Iterable): + return type(inputs)( + cast_tensor_type(item, src_type, dst_type) for item in inputs) + else: + return inputs + + +def auto_fp16(apply_to=None, out_fp32=False): + """Decorator to enable fp16 training automatically. + + This decorator is useful when you write custom modules and want to support + mixed precision training. If inputs arguments are fp32 tensors, they will + be converted to fp16 automatically. Arguments other than fp32 tensors are + ignored. If you are using PyTorch >= 1.6, torch.cuda.amp is used as the + backend, otherwise, original mmcv implementation will be adopted. + + Args: + apply_to (Iterable, optional): The argument names to be converted. + `None` indicates all arguments. + out_fp32 (bool): Whether to convert the output back to fp32. + + Example: + + >>> import torch.nn as nn + >>> class MyModule1(nn.Module): + >>> + >>> # Convert x and y to fp16 + >>> @auto_fp16() + >>> def forward(self, x, y): + >>> pass + + >>> import torch.nn as nn + >>> class MyModule2(nn.Module): + >>> + >>> # convert pred to fp16 + >>> @auto_fp16(apply_to=('pred', )) + >>> def do_something(self, pred, others): + >>> pass + """ + + def auto_fp16_wrapper(old_func): + + @functools.wraps(old_func) + def new_func(*args, **kwargs): + # check if the module has set the attribute `fp16_enabled`, if not, + # just fallback to the original method. + if not isinstance(args[0], torch.nn.Module): + raise TypeError('@auto_fp16 can only be used to decorate the ' + 'method of nn.Module') + if not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled): + return old_func(*args, **kwargs) + + # get the arg spec of the decorated method + args_info = getfullargspec(old_func) + # get the argument names to be casted + args_to_cast = args_info.args if apply_to is None else apply_to + # convert the args that need to be processed + new_args = [] + # NOTE: default args are not taken into consideration + if args: + arg_names = args_info.args[:len(args)] + for i, arg_name in enumerate(arg_names): + if arg_name in args_to_cast: + new_args.append( + cast_tensor_type(args[i], torch.float, torch.half)) + else: + new_args.append(args[i]) + # convert the kwargs that need to be processed + new_kwargs = {} + if kwargs: + for arg_name, arg_value in kwargs.items(): + if arg_name in args_to_cast: + new_kwargs[arg_name] = cast_tensor_type( + arg_value, torch.float, torch.half) + else: + new_kwargs[arg_name] = arg_value + # apply converted arguments to the decorated method + if (digit_version(TORCH_VERSION) >= digit_version('1.6.0')): + with autocast(enabled=True): + output = old_func(*new_args, **new_kwargs) + else: + output = old_func(*new_args, **new_kwargs) + # cast the results back to fp32 if necessary + if out_fp32: + output = cast_tensor_type(output, torch.half, torch.float) + return output + + return new_func + + return auto_fp16_wrapper + + +def force_fp32(apply_to=None, out_fp16=False): + """Decorator to convert input arguments to fp32 in force. + + This decorator is useful when you write custom modules and want to support + mixed precision training. If there are some inputs that must be processed + in fp32 mode, then this decorator can handle it. If inputs arguments are + fp16 tensors, they will be converted to fp32 automatically. Arguments other + than fp16 tensors are ignored. If you are using PyTorch >= 1.6, + torch.cuda.amp is used as the backend, otherwise, original mmcv + implementation will be adopted. + + Args: + apply_to (Iterable, optional): The argument names to be converted. + `None` indicates all arguments. + out_fp16 (bool): Whether to convert the output back to fp16. + + Example: + + >>> import torch.nn as nn + >>> class MyModule1(nn.Module): + >>> + >>> # Convert x and y to fp32 + >>> @force_fp32() + >>> def loss(self, x, y): + >>> pass + + >>> import torch.nn as nn + >>> class MyModule2(nn.Module): + >>> + >>> # convert pred to fp32 + >>> @force_fp32(apply_to=('pred', )) + >>> def post_process(self, pred, others): + >>> pass + """ + + def force_fp32_wrapper(old_func): + + @functools.wraps(old_func) + def new_func(*args, **kwargs): + # check if the module has set the attribute `fp16_enabled`, if not, + # just fallback to the original method. + if not isinstance(args[0], torch.nn.Module): + raise TypeError('@force_fp32 can only be used to decorate the ' + 'method of nn.Module') + if not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled): + return old_func(*args, **kwargs) + # get the arg spec of the decorated method + args_info = getfullargspec(old_func) + # get the argument names to be casted + args_to_cast = args_info.args if apply_to is None else apply_to + # convert the args that need to be processed + new_args = [] + if args: + arg_names = args_info.args[:len(args)] + for i, arg_name in enumerate(arg_names): + if arg_name in args_to_cast: + new_args.append( + cast_tensor_type(args[i], torch.half, torch.float)) + else: + new_args.append(args[i]) + # convert the kwargs that need to be processed + new_kwargs = dict() + if kwargs: + for arg_name, arg_value in kwargs.items(): + if arg_name in args_to_cast: + new_kwargs[arg_name] = cast_tensor_type( + arg_value, torch.half, torch.float) + else: + new_kwargs[arg_name] = arg_value + # apply converted arguments to the decorated method + if (digit_version(TORCH_VERSION) >= digit_version('1.6.0')): + with autocast(enabled=False): + output = old_func(*new_args, **new_kwargs) + else: + output = old_func(*new_args, **new_kwargs) + # cast the results back to fp32 if necessary + if out_fp16: + output = cast_tensor_type(output, torch.float, torch.half) + return output + + return new_func + + return force_fp32_wrapper + + +def allreduce_grads(params, coalesce=True, bucket_size_mb=-1): + warnings.warning( + '"mmcv.runner.fp16_utils.allreduce_grads" is deprecated, and will be ' + 'removed in v2.8. Please switch to "mmcv.runner.allreduce_grads') + _allreduce_grads(params, coalesce=coalesce, bucket_size_mb=bucket_size_mb) + + +def wrap_fp16_model(model): + """Wrap the FP32 model to FP16. + + If you are using PyTorch >= 1.6, torch.cuda.amp is used as the + backend, otherwise, original mmcv implementation will be adopted. + + For PyTorch >= 1.6, this function will + 1. Set fp16 flag inside the model to True. + + Otherwise: + 1. Convert FP32 model to FP16. + 2. Remain some necessary layers to be FP32, e.g., normalization layers. + 3. Set `fp16_enabled` flag inside the model to True. + + Args: + model (nn.Module): Model in FP32. + """ + if (digit_version(TORCH_VERSION) < digit_version('1.6.0')): + # convert model to fp16 + model.half() + # patch the normalization layers to make it work in fp32 mode + patch_norm_fp32(model) + # set `fp16_enabled` flag + for m in model.modules(): + if hasattr(m, 'fp16_enabled'): + m.fp16_enabled = True + + +def patch_norm_fp32(module): + """Recursively convert normalization layers from FP16 to FP32. + + Args: + module (nn.Module): The modules to be converted in FP16. + + Returns: + nn.Module: The converted module, the normalization layers have been + converted to FP32. + """ + if isinstance(module, (nn.modules.batchnorm._BatchNorm, nn.GroupNorm)): + module.float() + if isinstance(module, nn.GroupNorm) or torch.__version__ < '1.3': + module.forward = patch_forward_method(module.forward, torch.half, + torch.float) + for child in module.children(): + patch_norm_fp32(child) + return module + + +def patch_forward_method(func, src_type, dst_type, convert_output=True): + """Patch the forward method of a module. + + Args: + func (callable): The original forward method. + src_type (torch.dtype): Type of input arguments to be converted from. + dst_type (torch.dtype): Type of input arguments to be converted to. + convert_output (bool): Whether to convert the output back to src_type. + + Returns: + callable: The patched forward method. + """ + + def new_forward(*args, **kwargs): + output = func(*cast_tensor_type(args, src_type, dst_type), + **cast_tensor_type(kwargs, src_type, dst_type)) + if convert_output: + output = cast_tensor_type(output, dst_type, src_type) + return output + + return new_forward + + +class LossScaler: + """Class that manages loss scaling in mixed precision training which + supports both dynamic or static mode. + + The implementation refers to + https://github.com/NVIDIA/apex/blob/master/apex/fp16_utils/loss_scaler.py. + Indirectly, by supplying ``mode='dynamic'`` for dynamic loss scaling. + It's important to understand how :class:`LossScaler` operates. + Loss scaling is designed to combat the problem of underflowing + gradients encountered at long times when training fp16 networks. + Dynamic loss scaling begins by attempting a very high loss + scale. Ironically, this may result in OVERflowing gradients. + If overflowing gradients are encountered, :class:`FP16_Optimizer` then + skips the update step for this particular iteration/minibatch, + and :class:`LossScaler` adjusts the loss scale to a lower value. + If a certain number of iterations occur without overflowing gradients + detected,:class:`LossScaler` increases the loss scale once more. + In this way :class:`LossScaler` attempts to "ride the edge" of always + using the highest loss scale possible without incurring overflow. + + Args: + init_scale (float): Initial loss scale value, default: 2**32. + scale_factor (float): Factor used when adjusting the loss scale. + Default: 2. + mode (str): Loss scaling mode. 'dynamic' or 'static' + scale_window (int): Number of consecutive iterations without an + overflow to wait before increasing the loss scale. Default: 1000. + """ + + def __init__(self, + init_scale=2**32, + mode='dynamic', + scale_factor=2., + scale_window=1000): + self.cur_scale = init_scale + self.cur_iter = 0 + assert mode in ('dynamic', + 'static'), 'mode can only be dynamic or static' + self.mode = mode + self.last_overflow_iter = -1 + self.scale_factor = scale_factor + self.scale_window = scale_window + + def has_overflow(self, params): + """Check if params contain overflow.""" + if self.mode != 'dynamic': + return False + for p in params: + if p.grad is not None and LossScaler._has_inf_or_nan(p.grad.data): + return True + return False + + def _has_inf_or_nan(x): + """Check if params contain NaN.""" + try: + cpu_sum = float(x.float().sum()) + except RuntimeError as instance: + if 'value cannot be converted' not in instance.args[0]: + raise + return True + else: + if cpu_sum == float('inf') or cpu_sum == -float('inf') \ + or cpu_sum != cpu_sum: + return True + return False + + def update_scale(self, overflow): + """update the current loss scale value when overflow happens.""" + if self.mode != 'dynamic': + return + if overflow: + self.cur_scale = max(self.cur_scale / self.scale_factor, 1) + self.last_overflow_iter = self.cur_iter + else: + if (self.cur_iter - self.last_overflow_iter) % \ + self.scale_window == 0: + self.cur_scale *= self.scale_factor + self.cur_iter += 1 + + def state_dict(self): + """Returns the state of the scaler as a :class:`dict`.""" + return dict( + cur_scale=self.cur_scale, + cur_iter=self.cur_iter, + mode=self.mode, + last_overflow_iter=self.last_overflow_iter, + scale_factor=self.scale_factor, + scale_window=self.scale_window) + + def load_state_dict(self, state_dict): + """Loads the loss_scaler state dict. + + Args: + state_dict (dict): scaler state. + """ + self.cur_scale = state_dict['cur_scale'] + self.cur_iter = state_dict['cur_iter'] + self.mode = state_dict['mode'] + self.last_overflow_iter = state_dict['last_overflow_iter'] + self.scale_factor = state_dict['scale_factor'] + self.scale_window = state_dict['scale_window'] + + @property + def loss_scale(self): + return self.cur_scale diff --git a/mmcv/utils/grid_mask.py b/mmcv/utils/grid_mask.py new file mode 100755 index 0000000..3d04b2c --- /dev/null +++ b/mmcv/utils/grid_mask.py @@ -0,0 +1,124 @@ +import torch +import torch.nn as nn +import numpy as np +from PIL import Image +from mmcv.runner import force_fp32, auto_fp16 + +class Grid(object): + def __init__(self, use_h, use_w, rotate = 1, offset=False, ratio = 0.5, mode=0, prob = 1.): + self.use_h = use_h + self.use_w = use_w + self.rotate = rotate + self.offset = offset + self.ratio = ratio + self.mode=mode + self.st_prob = prob + self.prob = prob + + def set_prob(self, epoch, max_epoch): + self.prob = self.st_prob * epoch / max_epoch + + def __call__(self, img, label): + if np.random.rand() > self.prob: + return img, label + h = img.size(1) + w = img.size(2) + self.d1 = 2 + self.d2 = min(h, w) + hh = int(1.5*h) + ww = int(1.5*w) + d = np.random.randint(self.d1, self.d2) + if self.ratio == 1: + self.l = np.random.randint(1, d) + else: + self.l = min(max(int(d*self.ratio+0.5),1),d-1) + mask = np.ones((hh, ww), np.float32) + st_h = np.random.randint(d) + st_w = np.random.randint(d) + if self.use_h: + for i in range(hh//d): + s = d*i + st_h + t = min(s+self.l, hh) + mask[s:t,:] *= 0 + if self.use_w: + for i in range(ww//d): + s = d*i + st_w + t = min(s+self.l, ww) + mask[:,s:t] *= 0 + + r = np.random.randint(self.rotate) + mask = Image.fromarray(np.uint8(mask)) + mask = mask.rotate(r) + mask = np.asarray(mask) + mask = mask[(hh-h)//2:(hh-h)//2+h, (ww-w)//2:(ww-w)//2+w] + + mask = torch.from_numpy(mask).float() + if self.mode == 1: + mask = 1-mask + + mask = mask.expand_as(img) + if self.offset: + offset = torch.from_numpy(2 * (np.random.rand(h,w) - 0.5)).float() + offset = (1 - mask) * offset + img = img * mask + offset + else: + img = img * mask + + return img, label + + +class GridMask(nn.Module): + def __init__(self, use_h, use_w, rotate = 1, offset=False, ratio = 0.5, mode=0, prob = 1.): + super(GridMask, self).__init__() + self.use_h = use_h + self.use_w = use_w + self.rotate = rotate + self.offset = offset + self.ratio = ratio + self.mode = mode + self.st_prob = prob + self.prob = prob + self.fp16_enable = False + def set_prob(self, epoch, max_epoch): + self.prob = self.st_prob * epoch / max_epoch #+ 1.#0.5 + @auto_fp16() + def forward(self, x): + if np.random.rand() > self.prob or not self.training: + return x + n,c,h,w = x.size() + x = x.view(-1,h,w) + hh = int(1.5*h) + ww = int(1.5*w) + d = np.random.randint(2, h) + self.l = min(max(int(d*self.ratio+0.5),1),d-1) + mask = np.ones((hh, ww), np.float32) + st_h = np.random.randint(d) + st_w = np.random.randint(d) + if self.use_h: + for i in range(hh//d): + s = d*i + st_h + t = min(s+self.l, hh) + mask[s:t,:] *= 0 + if self.use_w: + for i in range(ww//d): + s = d*i + st_w + t = min(s+self.l, ww) + mask[:,s:t] *= 0 + + r = np.random.randint(self.rotate) + mask = Image.fromarray(np.uint8(mask)) + mask = mask.rotate(r) + mask = np.asarray(mask) + mask = mask[(hh-h)//2:(hh-h)//2+h, (ww-w)//2:(ww-w)//2+w] + + mask = torch.from_numpy(mask).to(x.dtype).cuda() + if self.mode == 1: + mask = 1-mask + mask = mask.expand_as(x) + if self.offset: + offset = torch.from_numpy(2 * (np.random.rand(h,w) - 0.5)).to(x.dtype).cuda() + x = x * mask + offset * (1 - mask) + else: + x = x * mask + + return x.view(n,c,h,w) \ No newline at end of file diff --git a/mmcv/utils/hub.py b/mmcv/utils/hub.py new file mode 100644 index 0000000..a2505c0 --- /dev/null +++ b/mmcv/utils/hub.py @@ -0,0 +1,128 @@ +# The 1.6 release of PyTorch switched torch.save to use a new zipfile-based +# file format. It will cause RuntimeError when a checkpoint was saved in +# torch >= 1.6.0 but loaded in torch < 1.7.0. +# More details at https://github.com/open-mmlab/mmpose/issues/904 +from .path import mkdir_or_exist +from .version_utils import digit_version +import torch + +TORCH_VERSION = torch.__version__ + +if digit_version(TORCH_VERSION) < digit_version('1.7.0'): + # Modified from https://github.com/pytorch/pytorch/blob/master/torch/hub.py + import os + import torch + import warnings + from urllib.parse import urlparse + import sys + import zipfile + from torch.hub import download_url_to_file, _get_torch_home, HASH_REGEX + + # Hub used to support automatically extracts from zipfile manually + # compressed by users. The legacy zip format expects only one file from + # torch.save() < 1.6 in the zip. We should remove this support since + # zipfile is now default zipfile format for torch.save(). + def _is_legacy_zip_format(filename): + if zipfile.is_zipfile(filename): + infolist = zipfile.ZipFile(filename).infolist() + return len(infolist) == 1 and not infolist[0].is_dir() + return False + + def _legacy_zip_load(filename, model_dir, map_location): + warnings.warn('Falling back to the old format < 1.6. This support will' + ' be deprecated in favor of default zipfile format ' + 'introduced in 1.6. Please redo torch.save() to save it ' + 'in the new zipfile format.') + # Note: extractall() defaults to overwrite file if exists. No need to + # clean up beforehand. We deliberately don't handle tarfile here + # since our legacy serialization format was in tar. + # E.g. resnet18-5c106cde.pth which is widely used. + with zipfile.ZipFile(filename) as f: + members = f.infolist() + if len(members) != 1: + raise RuntimeError( + 'Only one file(not dir) is allowed in the zipfile') + f.extractall(model_dir) + extraced_name = members[0].filename + extracted_file = os.path.join(model_dir, extraced_name) + return torch.load(extracted_file, map_location=map_location) + + def load_url(url, + model_dir=None, + map_location=None, + progress=True, + check_hash=False, + file_name=None): + r"""Loads the Torch serialized object at the given URL. + + If downloaded file is a zip file, it will be automatically decompressed + + If the object is already present in `model_dir`, it's deserialized and + returned. + The default value of ``model_dir`` is ``/checkpoints`` where + ``hub_dir`` is the directory returned by :func:`~torch.hub.get_dir`. + + Args: + url (str): URL of the object to download + model_dir (str, optional): directory in which to save the object + map_location (optional): a function or a dict specifying how to + remap storage locations (see torch.load) + progress (bool, optional): whether or not to display a progress bar + to stderr. Default: True + check_hash(bool, optional): If True, the filename part of the URL + should follow the naming convention ``filename-.ext`` + where ```` is the first eight or more digits of the + SHA256 hash of the contents of the file. The hash is used to + ensure unique names and to verify the contents of the file. + Default: False + file_name (str, optional): name for the downloaded file. Filename + from ``url`` will be used if not set. Default: None. + + Example: + >>> url = ('https://s3.amazonaws.com/pytorch/models/resnet18-5c106' + ... 'cde.pth') + >>> state_dict = torch.hub.load_state_dict_from_url(url) + """ + # Issue warning to move data if old env is set + if os.getenv('TORCH_MODEL_ZOO'): + warnings.warn('TORCH_MODEL_ZOO is deprecated, please use env ' + 'TORCH_HOME instead') + + if model_dir is None: + torch_home = _get_torch_home() + model_dir = os.path.join(torch_home, 'checkpoints') + + mkdir_or_exist(model_dir) + + parts = urlparse(url) + filename = os.path.basename(parts.path) + if file_name is not None: + filename = file_name + cached_file = os.path.join(model_dir, filename) + if not os.path.exists(cached_file): + sys.stderr.write('Downloading: "{}" to {}\n'.format( + url, cached_file)) + hash_prefix = None + if check_hash: + r = HASH_REGEX.search(filename) # r is Optional[Match[str]] + hash_prefix = r.group(1) if r else None + download_url_to_file( + url, cached_file, hash_prefix, progress=progress) + + if _is_legacy_zip_format(cached_file): + return _legacy_zip_load(cached_file, model_dir, map_location) + + try: + return torch.load(cached_file, map_location=map_location) + except RuntimeError as error: + if digit_version(TORCH_VERSION) < digit_version('1.5.0'): + warnings.warn( + f'If the error is the same as "{cached_file} is a zip ' + 'archive (did you mean to use torch.jit.load()?)", you can' + ' upgrade your torch to 1.5.0 or higher (current torch ' + f'version is {TORCH_VERSION}). The error was raised ' + ' because the checkpoint was saved in torch>=1.6.0 but ' + 'loaded in torch<1.5.') + raise error +else: + from torch.utils.model_zoo import load_url # noqa: F401 diff --git a/mmcv/utils/log_buffer.py b/mmcv/utils/log_buffer.py new file mode 100644 index 0000000..d949e29 --- /dev/null +++ b/mmcv/utils/log_buffer.py @@ -0,0 +1,41 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from collections import OrderedDict + +import numpy as np + + +class LogBuffer: + + def __init__(self): + self.val_history = OrderedDict() + self.n_history = OrderedDict() + self.output = OrderedDict() + self.ready = False + + def clear(self): + self.val_history.clear() + self.n_history.clear() + self.clear_output() + + def clear_output(self): + self.output.clear() + self.ready = False + + def update(self, vars, count=1): + assert isinstance(vars, dict) + for key, var in vars.items(): + if key not in self.val_history: + self.val_history[key] = [] + self.n_history[key] = [] + self.val_history[key].append(var) + self.n_history[key].append(count) + + def average(self, n=0): + """Average latest n values or all values.""" + assert n >= 0 + for key in self.val_history: + values = np.array(self.val_history[key][-n:]) + nums = np.array(self.n_history[key][-n:]) + avg = np.sum(values * nums) / np.sum(nums) + self.output[key] = avg + self.ready = True diff --git a/mmcv/utils/logger.py b/mmcv/utils/logger.py new file mode 100644 index 0000000..ed1de21 --- /dev/null +++ b/mmcv/utils/logger.py @@ -0,0 +1,21 @@ +import logging +from .logging import get_logger +from mmcv import __version__ + +def get_root_logger(log_file=None, log_level=logging.INFO, name = __version__): + """Get root logger. + + Args: + log_file (str, optional): File path of log. Defaults to None. + log_level (int, optional): The level of logger. + Defaults to logging.INFO. + + Returns: + :obj:`logging.Logger`: The obtained logger + """ + logger = get_logger(name=name, log_file=log_file, log_level=log_level) + + logging_filter = logging.Filter(name) + logging_filter.filter = lambda record: record.find(name) != -1 + + return logger diff --git a/mmcv/utils/logging.py b/mmcv/utils/logging.py new file mode 100644 index 0000000..4aa0e04 --- /dev/null +++ b/mmcv/utils/logging.py @@ -0,0 +1,110 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import logging + +import torch.distributed as dist + +logger_initialized = {} + + +def get_logger(name, log_file=None, log_level=logging.INFO, file_mode='w'): + """Initialize and get a logger by name. + + If the logger has not been initialized, this method will initialize the + logger by adding one or two handlers, otherwise the initialized logger will + be directly returned. During initialization, a StreamHandler will always be + added. If `log_file` is specified and the process rank is 0, a FileHandler + will also be added. + + Args: + name (str): Logger name. + log_file (str | None): The log filename. If specified, a FileHandler + will be added to the logger. + log_level (int): The logger level. Note that only the process of + rank 0 is affected, and other processes will set the level to + "Error" thus be silent most of the time. + file_mode (str): The file mode used in opening log file. + Defaults to 'w'. + + Returns: + logging.Logger: The expected logger. + """ + logger = logging.getLogger(name) + if name in logger_initialized: + return logger + # handle hierarchical names + # e.g., logger "a" is initialized, then logger "a.b" will skip the + # initialization since it is a child of "a". + for logger_name in logger_initialized: + if name.startswith(logger_name): + return logger + + # handle duplicate logs to the console + # Starting in 1.8.0, PyTorch DDP attaches a StreamHandler (NOTSET) + # to the root logger. As logger.propagate is True by default, this root + # level handler causes logging messages from rank>0 processes to + # unexpectedly show up on the console, creating much unwanted clutter. + # To fix this issue, we set the root logger's StreamHandler, if any, to log + # at the ERROR level. + for handler in logger.root.handlers: + if type(handler) is logging.StreamHandler: + handler.setLevel(logging.ERROR) + + stream_handler = logging.StreamHandler() + handlers = [stream_handler] + + if dist.is_available() and dist.is_initialized(): + rank = dist.get_rank() + else: + rank = 0 + + # only rank 0 will add a FileHandler + if rank == 0 and log_file is not None: + # Here, the default behaviour of the official logger is 'a'. Thus, we + # provide an interface to change the file mode to the default + # behaviour. + file_handler = logging.FileHandler(log_file, file_mode) + handlers.append(file_handler) + + formatter = logging.Formatter( + '%(asctime)s - %(name)s - %(levelname)s - %(message)s') + for handler in handlers: + handler.setFormatter(formatter) + handler.setLevel(log_level) + logger.addHandler(handler) + + if rank == 0: + logger.setLevel(log_level) + else: + logger.setLevel(logging.ERROR) + + logger_initialized[name] = True + + return logger + + +def print_log(msg, logger=None, level=logging.INFO): + """Print a log message. + + Args: + msg (str): The message to be logged. + logger (logging.Logger | str | None): The logger to be used. + Some special loggers are: + - "silent": no message will be printed. + - other str: the logger obtained with `get_root_logger(logger)`. + - None: The `print()` method will be used to print log messages. + level (int): Logging level. Only available when `logger` is a Logger + object or "root". + """ + if logger is None: + print(msg) + elif isinstance(logger, logging.Logger): + logger.log(level, msg) + elif logger == 'silent': + pass + elif isinstance(logger, str): + _logger = get_logger(logger) + _logger.log(level, msg) + else: + raise TypeError( + 'logger should be either a logging.Logger object, str, ' + f'"silent" or None, but got {type(logger)}') diff --git a/mmcv/utils/memory.py b/mmcv/utils/memory.py new file mode 100644 index 0000000..3ecd7d7 --- /dev/null +++ b/mmcv/utils/memory.py @@ -0,0 +1,84 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import logging +from contextlib import contextmanager +from functools import wraps +import torch + +__all__ = ["retry_if_cuda_oom"] + + +@contextmanager +def _ignore_torch_cuda_oom(): + """ + A context which ignores CUDA OOM exception from pytorch. + """ + try: + yield + except RuntimeError as e: + # NOTE: the string may change? + if "CUDA out of memory. " in str(e): + pass + else: + raise + + +def retry_if_cuda_oom(func): + """ + Makes a function retry itself after encountering + pytorch's CUDA OOM error. + It will first retry after calling `torch.cuda.empty_cache()`. + + If that still fails, it will then retry by trying to convert inputs to CPUs. + In this case, it expects the function to dispatch to CPU implementation. + The return values may become CPU tensors as well and it's user's + responsibility to convert it back to CUDA tensor if needed. + + Args: + func: a stateless callable that takes tensor-like objects as arguments + + Returns: + a callable which retries `func` if OOM is encountered. + + Examples: + :: + output = retry_if_cuda_oom(some_torch_function)(input1, input2) + # output may be on CPU even if inputs are on GPU + + Note: + 1. When converting inputs to CPU, it will only look at each argument and check + if it has `.device` and `.to` for conversion. Nested structures of tensors + are not supported. + + 2. Since the function might be called more than once, it has to be + stateless. + """ + + def maybe_to_cpu(x): + try: + like_gpu_tensor = x.device.type == "cuda" and hasattr(x, "to") + except AttributeError: + like_gpu_tensor = False + if like_gpu_tensor: + return x.to(device="cpu") + else: + return x + + @wraps(func) + def wrapped(*args, **kwargs): + with _ignore_torch_cuda_oom(): + return func(*args, **kwargs) + + # Clear cache and retry + torch.cuda.empty_cache() + with _ignore_torch_cuda_oom(): + return func(*args, **kwargs) + + # Try on CPU. This slows down the code significantly, therefore print a notice. + logger = logging.getLogger(__name__) + logger.info("Attempting to copy inputs of {} to CPU due to CUDA OOM".format(str(func))) + new_args = (maybe_to_cpu(x) for x in args) + new_kwargs = {k: maybe_to_cpu(v) for k, v in kwargs.items()} + return func(*new_args, **new_kwargs) + + return wrapped \ No newline at end of file diff --git a/mmcv/utils/misc.py b/mmcv/utils/misc.py new file mode 100644 index 0000000..2c58d0d --- /dev/null +++ b/mmcv/utils/misc.py @@ -0,0 +1,377 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import collections.abc +import functools +import itertools +import subprocess +import warnings +from collections import abc +from importlib import import_module +from inspect import getfullargspec +from itertools import repeat + + +# From PyTorch internals +def _ntuple(n): + + def parse(x): + if isinstance(x, collections.abc.Iterable): + return x + return tuple(repeat(x, n)) + + return parse + + +to_1tuple = _ntuple(1) +to_2tuple = _ntuple(2) +to_3tuple = _ntuple(3) +to_4tuple = _ntuple(4) +to_ntuple = _ntuple + + +def is_str(x): + """Whether the input is an string instance. + + Note: This method is deprecated since python 2 is no longer supported. + """ + return isinstance(x, str) + + +def import_modules_from_strings(imports, allow_failed_imports=False): + """Import modules from the given list of strings. + + Args: + imports (list | str | None): The given module names to be imported. + allow_failed_imports (bool): If True, the failed imports will return + None. Otherwise, an ImportError is raise. Default: False. + + Returns: + list[module] | module | None: The imported modules. + + Examples: + >>> osp, sys = import_modules_from_strings( + ... ['os.path', 'sys']) + >>> import os.path as osp_ + >>> import sys as sys_ + >>> assert osp == osp_ + >>> assert sys == sys_ + """ + if not imports: + return + single_import = False + if isinstance(imports, str): + single_import = True + imports = [imports] + if not isinstance(imports, list): + raise TypeError( + f'custom_imports must be a list but got type {type(imports)}') + imported = [] + for imp in imports: + if not isinstance(imp, str): + raise TypeError( + f'{imp} is of type {type(imp)} and cannot be imported.') + try: + imported_tmp = import_module(imp) + except ImportError: + if allow_failed_imports: + warnings.warn(f'{imp} failed to import and is ignored.', + UserWarning) + imported_tmp = None + else: + raise ImportError + imported.append(imported_tmp) + if single_import: + imported = imported[0] + return imported + + +def iter_cast(inputs, dst_type, return_type=None): + """Cast elements of an iterable object into some type. + + Args: + inputs (Iterable): The input object. + dst_type (type): Destination type. + return_type (type, optional): If specified, the output object will be + converted to this type, otherwise an iterator. + + Returns: + iterator or specified type: The converted object. + """ + if not isinstance(inputs, abc.Iterable): + raise TypeError('inputs must be an iterable object') + if not isinstance(dst_type, type): + raise TypeError('"dst_type" must be a valid type') + + out_iterable = map(dst_type, inputs) + + if return_type is None: + return out_iterable + else: + return return_type(out_iterable) + + +def list_cast(inputs, dst_type): + """Cast elements of an iterable object into a list of some type. + + A partial method of :func:`iter_cast`. + """ + return iter_cast(inputs, dst_type, return_type=list) + + +def tuple_cast(inputs, dst_type): + """Cast elements of an iterable object into a tuple of some type. + + A partial method of :func:`iter_cast`. + """ + return iter_cast(inputs, dst_type, return_type=tuple) + + +def is_seq_of(seq, expected_type, seq_type=None): + """Check whether it is a sequence of some type. + + Args: + seq (Sequence): The sequence to be checked. + expected_type (type): Expected type of sequence items. + seq_type (type, optional): Expected sequence type. + + Returns: + bool: Whether the sequence is valid. + """ + if seq_type is None: + exp_seq_type = abc.Sequence + else: + assert isinstance(seq_type, type) + exp_seq_type = seq_type + if not isinstance(seq, exp_seq_type): + return False + for item in seq: + if not isinstance(item, expected_type): + return False + return True + + +def is_list_of(seq, expected_type): + """Check whether it is a list of some type. + + A partial method of :func:`is_seq_of`. + """ + return is_seq_of(seq, expected_type, seq_type=list) + + +def is_tuple_of(seq, expected_type): + """Check whether it is a tuple of some type. + + A partial method of :func:`is_seq_of`. + """ + return is_seq_of(seq, expected_type, seq_type=tuple) + + +def slice_list(in_list, lens): + """Slice a list into several sub lists by a list of given length. + + Args: + in_list (list): The list to be sliced. + lens(int or list): The expected length of each out list. + + Returns: + list: A list of sliced list. + """ + if isinstance(lens, int): + assert len(in_list) % lens == 0 + lens = [lens] * int(len(in_list) / lens) + if not isinstance(lens, list): + raise TypeError('"indices" must be an integer or a list of integers') + elif sum(lens) != len(in_list): + raise ValueError('sum of lens and list length does not ' + f'match: {sum(lens)} != {len(in_list)}') + out_list = [] + idx = 0 + for i in range(len(lens)): + out_list.append(in_list[idx:idx + lens[i]]) + idx += lens[i] + return out_list + + +def concat_list(in_list): + """Concatenate a list of list into a single list. + + Args: + in_list (list): The list of list to be merged. + + Returns: + list: The concatenated flat list. + """ + return list(itertools.chain(*in_list)) + + +def check_prerequisites( + prerequisites, + checker, + msg_tmpl='Prerequisites "{}" are required in method "{}" but not ' + 'found, please install them first.'): # yapf: disable + """A decorator factory to check if prerequisites are satisfied. + + Args: + prerequisites (str of list[str]): Prerequisites to be checked. + checker (callable): The checker method that returns True if a + prerequisite is meet, False otherwise. + msg_tmpl (str): The message template with two variables. + + Returns: + decorator: A specific decorator. + """ + + def wrap(func): + + @functools.wraps(func) + def wrapped_func(*args, **kwargs): + requirements = [prerequisites] if isinstance( + prerequisites, str) else prerequisites + missing = [] + for item in requirements: + if not checker(item): + missing.append(item) + if missing: + print(msg_tmpl.format(', '.join(missing), func.__name__)) + raise RuntimeError('Prerequisites not meet.') + else: + return func(*args, **kwargs) + + return wrapped_func + + return wrap + + +def _check_py_package(package): + try: + import_module(package) + except ImportError: + return False + else: + return True + + +def _check_executable(cmd): + if subprocess.call(f'which {cmd}', shell=True) != 0: + return False + else: + return True + + +def requires_package(prerequisites): + """A decorator to check if some python packages are installed. + + Example: + >>> @requires_package('numpy') + >>> func(arg1, args): + >>> return numpy.zeros(1) + array([0.]) + >>> @requires_package(['numpy', 'non_package']) + >>> func(arg1, args): + >>> return numpy.zeros(1) + ImportError + """ + return check_prerequisites(prerequisites, checker=_check_py_package) + + +def requires_executable(prerequisites): + """A decorator to check if some executable files are installed. + + Example: + >>> @requires_executable('ffmpeg') + >>> func(arg1, args): + >>> print(1) + 1 + """ + return check_prerequisites(prerequisites, checker=_check_executable) + + +def deprecated_api_warning(name_dict, cls_name=None): + """A decorator to check if some arguments are deprecate and try to replace + deprecate src_arg_name to dst_arg_name. + + Args: + name_dict(dict): + key (str): Deprecate argument names. + val (str): Expected argument names. + + Returns: + func: New function. + """ + + def api_warning_wrapper(old_func): + + @functools.wraps(old_func) + def new_func(*args, **kwargs): + # get the arg spec of the decorated method + args_info = getfullargspec(old_func) + # get name of the function + func_name = old_func.__name__ + if cls_name is not None: + func_name = f'{cls_name}.{func_name}' + if args: + arg_names = args_info.args[:len(args)] + for src_arg_name, dst_arg_name in name_dict.items(): + if src_arg_name in arg_names: + warnings.warn( + f'"{src_arg_name}" is deprecated in ' + f'`{func_name}`, please use "{dst_arg_name}" ' + 'instead') + arg_names[arg_names.index(src_arg_name)] = dst_arg_name + if kwargs: + for src_arg_name, dst_arg_name in name_dict.items(): + if src_arg_name in kwargs: + + assert dst_arg_name not in kwargs, ( + f'The expected behavior is to replace ' + f'the deprecated key `{src_arg_name}` to ' + f'new key `{dst_arg_name}`, but got them ' + f'in the arguments at the same time, which ' + f'is confusing. `{src_arg_name} will be ' + f'deprecated in the future, please ' + f'use `{dst_arg_name}` instead.') + + warnings.warn( + f'"{src_arg_name}" is deprecated in ' + f'`{func_name}`, please use "{dst_arg_name}" ' + 'instead') + kwargs[dst_arg_name] = kwargs.pop(src_arg_name) + + # apply converted arguments to the decorated method + output = old_func(*args, **kwargs) + return output + + return new_func + + return api_warning_wrapper + + +def is_method_overridden(method, base_class, derived_class): + """Check if a method of base class is overridden in derived class. + + Args: + method (str): the method name to check. + base_class (type): the class of the base class. + derived_class (type | Any): the class or instance of the derived class. + """ + assert isinstance(base_class, type), \ + "base_class doesn't accept instance, Please pass class instead." + + if not isinstance(derived_class, type): + derived_class = derived_class.__class__ + + base_method = getattr(base_class, method) + derived_method = getattr(derived_class, method) + return derived_method != base_method + + +def has_method(obj: object, method: str) -> bool: + """Check whether the object has a method. + + Args: + method (str): The method name to check. + obj (object): The object to check. + + Returns: + bool: True if the object has the method else False. + """ + return hasattr(obj, method) and callable(getattr(obj, method)) diff --git a/mmcv/utils/path.py b/mmcv/utils/path.py new file mode 100644 index 0000000..7dab4b3 --- /dev/null +++ b/mmcv/utils/path.py @@ -0,0 +1,101 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +import os.path as osp +from pathlib import Path + +from .misc import is_str + + +def is_filepath(x): + return is_str(x) or isinstance(x, Path) + + +def fopen(filepath, *args, **kwargs): + if is_str(filepath): + return open(filepath, *args, **kwargs) + elif isinstance(filepath, Path): + return filepath.open(*args, **kwargs) + raise ValueError('`filepath` should be a string or a Path') + + +def check_file_exist(filename, msg_tmpl='file "{}" does not exist'): + if not osp.isfile(filename): + raise FileNotFoundError(msg_tmpl.format(filename)) + + +def mkdir_or_exist(dir_name, mode=0o777): + if dir_name == '': + return + dir_name = osp.expanduser(dir_name) + os.makedirs(dir_name, mode=mode, exist_ok=True) + + +def symlink(src, dst, overwrite=True, **kwargs): + if os.path.lexists(dst) and overwrite: + os.remove(dst) + os.symlink(src, dst, **kwargs) + + +def scandir(dir_path, suffix=None, recursive=False, case_sensitive=True): + """Scan a directory to find the interested files. + + Args: + dir_path (str | obj:`Path`): Path of the directory. + suffix (str | tuple(str), optional): File suffix that we are + interested in. Default: None. + recursive (bool, optional): If set to True, recursively scan the + directory. Default: False. + case_sensitive (bool, optional) : If set to False, ignore the case of + suffix. Default: True. + + Returns: + A generator for all the interested files with relative paths. + """ + if isinstance(dir_path, (str, Path)): + dir_path = str(dir_path) + else: + raise TypeError('"dir_path" must be a string or Path object') + + if (suffix is not None) and not isinstance(suffix, (str, tuple)): + raise TypeError('"suffix" must be a string or tuple of strings') + + if suffix is not None and not case_sensitive: + suffix = suffix.lower() if isinstance(suffix, str) else tuple( + item.lower() for item in suffix) + + root = dir_path + + def _scandir(dir_path, suffix, recursive, case_sensitive): + for entry in os.scandir(dir_path): + if not entry.name.startswith('.') and entry.is_file(): + rel_path = osp.relpath(entry.path, root) + _rel_path = rel_path if case_sensitive else rel_path.lower() + if suffix is None or _rel_path.endswith(suffix): + yield rel_path + elif recursive and os.path.isdir(entry.path): + # scan recursively if entry.path is a directory + yield from _scandir(entry.path, suffix, recursive, + case_sensitive) + + return _scandir(dir_path, suffix, recursive, case_sensitive) + + +def find_vcs_root(path, markers=('.git', )): + """Finds the root directory (including itself) of specified markers. + + Args: + path (str): Path of directory or file. + markers (list[str], optional): List of file or directory names. + + Returns: + The directory contained one of the markers or None if not found. + """ + if osp.isfile(path): + path = osp.dirname(path) + + prev, cur = None, osp.abspath(osp.expanduser(path)) + while cur != prev: + if any(osp.exists(osp.join(cur, marker)) for marker in markers): + return cur + prev, cur = cur, osp.split(cur)[0] + return None diff --git a/mmcv/utils/position_embedding.py b/mmcv/utils/position_embedding.py new file mode 100644 index 0000000..290110f --- /dev/null +++ b/mmcv/utils/position_embedding.py @@ -0,0 +1,34 @@ +import torch +import torch.nn as nn +import math + +class RelPositionEmbedding(nn.Module): + def __init__(self, num_pos_feats=64, pos_norm=True): + super().__init__() + self.num_pos_feats = num_pos_feats + self.fc = nn.Linear(4, self.num_pos_feats,bias=False) + #nn.init.orthogonal_(self.fc.weight) + #self.fc.weight.requires_grad = False + self.pos_norm = pos_norm + if self.pos_norm: + self.norm = nn.LayerNorm(self.num_pos_feats) + def forward(self, tensor): + #mask = nesttensor.mask + B,C,H,W = tensor.shape + #print('tensor.shape', tensor.shape) + y_range = (torch.arange(H) / float(H - 1)).to(tensor.device) + #y_axis = torch.stack((y_range, 1-y_range),dim=1) + y_axis = torch.stack((torch.cos(y_range * math.pi), torch.sin(y_range * math.pi)), dim=1) + y_axis = y_axis.reshape(H, 1, 2).repeat(1, W, 1).reshape(H * W, 2) + + x_range = (torch.arange(W) / float(W - 1)).to(tensor.device) + #x_axis =torch.stack((x_range,1-x_range),dim=1) + x_axis = torch.stack((torch.cos(x_range * math.pi), torch.sin(x_range * math.pi)), dim=1) + x_axis = x_axis.reshape(1, W, 2).repeat(H, 1, 1).reshape(H * W, 2) + x_pos = torch.cat((y_axis, x_axis), dim=1) + x_pos = self.fc(x_pos) + + if self.pos_norm: + x_pos = self.norm(x_pos) + #print('xpos,', x_pos.max(),x_pos.min()) + return x_pos \ No newline at end of file diff --git a/mmcv/utils/priority.py b/mmcv/utils/priority.py new file mode 100644 index 0000000..64cc4e3 --- /dev/null +++ b/mmcv/utils/priority.py @@ -0,0 +1,60 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from enum import Enum + + +class Priority(Enum): + """Hook priority levels. + + +--------------+------------+ + | Level | Value | + +==============+============+ + | HIGHEST | 0 | + +--------------+------------+ + | VERY_HIGH | 10 | + +--------------+------------+ + | HIGH | 30 | + +--------------+------------+ + | ABOVE_NORMAL | 40 | + +--------------+------------+ + | NORMAL | 50 | + +--------------+------------+ + | BELOW_NORMAL | 60 | + +--------------+------------+ + | LOW | 70 | + +--------------+------------+ + | VERY_LOW | 90 | + +--------------+------------+ + | LOWEST | 100 | + +--------------+------------+ + """ + + HIGHEST = 0 + VERY_HIGH = 10 + HIGH = 30 + ABOVE_NORMAL = 40 + NORMAL = 50 + BELOW_NORMAL = 60 + LOW = 70 + VERY_LOW = 90 + LOWEST = 100 + + +def get_priority(priority): + """Get priority value. + + Args: + priority (int or str or :obj:`Priority`): Priority. + + Returns: + int: The priority value. + """ + if isinstance(priority, int): + if priority < 0 or priority > 100: + raise ValueError('priority must be between 0 and 100') + return priority + elif isinstance(priority, Priority): + return priority.value + elif isinstance(priority, str): + return Priority[priority.upper()].value + else: + raise TypeError('priority must be an integer or Priority enum value') diff --git a/mmcv/utils/progressbar.py b/mmcv/utils/progressbar.py new file mode 100644 index 0000000..0062f67 --- /dev/null +++ b/mmcv/utils/progressbar.py @@ -0,0 +1,208 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import sys +from collections.abc import Iterable +from multiprocessing import Pool +from shutil import get_terminal_size + +from .timer import Timer + + +class ProgressBar: + """A progress bar which can print the progress.""" + + def __init__(self, task_num=0, bar_width=50, start=True, file=sys.stdout): + self.task_num = task_num + self.bar_width = bar_width + self.completed = 0 + self.file = file + if start: + self.start() + + @property + def terminal_width(self): + width, _ = get_terminal_size() + return width + + def start(self): + if self.task_num > 0: + self.file.write(f'[{" " * self.bar_width}] 0/{self.task_num}, ' + 'elapsed: 0s, ETA:') + else: + self.file.write('completed: 0, elapsed: 0s') + self.file.flush() + self.timer = Timer() + + def update(self, num_tasks=1): + assert num_tasks > 0 + self.completed += num_tasks + elapsed = self.timer.since_start() + if elapsed > 0: + fps = self.completed / elapsed + else: + fps = float('inf') + if self.task_num > 0: + percentage = self.completed / float(self.task_num) + eta = int(elapsed * (1 - percentage) / percentage + 0.5) + msg = f'\r[{{}}] {self.completed}/{self.task_num}, ' \ + f'{fps:.1f} task/s, elapsed: {int(elapsed + 0.5)}s, ' \ + f'ETA: {eta:5}s' + + bar_width = min(self.bar_width, + int(self.terminal_width - len(msg)) + 2, + int(self.terminal_width * 0.6)) + bar_width = max(2, bar_width) + mark_width = int(bar_width * percentage) + bar_chars = '>' * mark_width + ' ' * (bar_width - mark_width) + self.file.write(msg.format(bar_chars)) + else: + self.file.write( + f'completed: {self.completed}, elapsed: {int(elapsed + 0.5)}s,' + f' {fps:.1f} tasks/s') + self.file.flush() + + +def track_progress(func, tasks, bar_width=50, file=sys.stdout, **kwargs): + """Track the progress of tasks execution with a progress bar. + + Tasks are done with a simple for-loop. + + Args: + func (callable): The function to be applied to each task. + tasks (list or tuple[Iterable, int]): A list of tasks or + (tasks, total num). + bar_width (int): Width of progress bar. + + Returns: + list: The task results. + """ + if isinstance(tasks, tuple): + assert len(tasks) == 2 + assert isinstance(tasks[0], Iterable) + assert isinstance(tasks[1], int) + task_num = tasks[1] + tasks = tasks[0] + elif isinstance(tasks, Iterable): + task_num = len(tasks) + else: + raise TypeError( + '"tasks" must be an iterable object or a (iterator, int) tuple') + prog_bar = ProgressBar(task_num, bar_width, file=file) + results = [] + for task in tasks: + results.append(func(task, **kwargs)) + prog_bar.update() + prog_bar.file.write('\n') + return results + + +def init_pool(process_num, initializer=None, initargs=None): + if initializer is None: + return Pool(process_num) + elif initargs is None: + return Pool(process_num, initializer) + else: + if not isinstance(initargs, tuple): + raise TypeError('"initargs" must be a tuple') + return Pool(process_num, initializer, initargs) + + +def track_parallel_progress(func, + tasks, + nproc, + initializer=None, + initargs=None, + bar_width=50, + chunksize=1, + skip_first=False, + keep_order=True, + file=sys.stdout): + """Track the progress of parallel task execution with a progress bar. + + The built-in :mod:`multiprocessing` module is used for process pools and + tasks are done with :func:`Pool.map` or :func:`Pool.imap_unordered`. + + Args: + func (callable): The function to be applied to each task. + tasks (list or tuple[Iterable, int]): A list of tasks or + (tasks, total num). + nproc (int): Process (worker) number. + initializer (None or callable): Refer to :class:`multiprocessing.Pool` + for details. + initargs (None or tuple): Refer to :class:`multiprocessing.Pool` for + details. + chunksize (int): Refer to :class:`multiprocessing.Pool` for details. + bar_width (int): Width of progress bar. + skip_first (bool): Whether to skip the first sample for each worker + when estimating fps, since the initialization step may takes + longer. + keep_order (bool): If True, :func:`Pool.imap` is used, otherwise + :func:`Pool.imap_unordered` is used. + + Returns: + list: The task results. + """ + if isinstance(tasks, tuple): + assert len(tasks) == 2 + assert isinstance(tasks[0], Iterable) + assert isinstance(tasks[1], int) + task_num = tasks[1] + tasks = tasks[0] + elif isinstance(tasks, Iterable): + task_num = len(tasks) + else: + raise TypeError( + '"tasks" must be an iterable object or a (iterator, int) tuple') + pool = init_pool(nproc, initializer, initargs) + start = not skip_first + task_num -= nproc * chunksize * int(skip_first) + prog_bar = ProgressBar(task_num, bar_width, start, file=file) + results = [] + if keep_order: + gen = pool.imap(func, tasks, chunksize) + else: + gen = pool.imap_unordered(func, tasks, chunksize) + for result in gen: + results.append(result) + if skip_first: + if len(results) < nproc * chunksize: + continue + elif len(results) == nproc * chunksize: + prog_bar.start() + continue + prog_bar.update() + prog_bar.file.write('\n') + pool.close() + pool.join() + return results + + +def track_iter_progress(tasks, bar_width=50, file=sys.stdout): + """Track the progress of tasks iteration or enumeration with a progress + bar. + + Tasks are yielded with a simple for-loop. + + Args: + tasks (list or tuple[Iterable, int]): A list of tasks or + (tasks, total num). + bar_width (int): Width of progress bar. + + Yields: + list: The task results. + """ + if isinstance(tasks, tuple): + assert len(tasks) == 2 + assert isinstance(tasks[0], Iterable) + assert isinstance(tasks[1], int) + task_num = tasks[1] + tasks = tasks[0] + elif isinstance(tasks, Iterable): + task_num = len(tasks) + else: + raise TypeError( + '"tasks" must be an iterable object or a (iterator, int) tuple') + prog_bar = ProgressBar(task_num, bar_width, file=file) + for task in tasks: + yield task + prog_bar.update() + prog_bar.file.write('\n') diff --git a/mmcv/utils/registry.py b/mmcv/utils/registry.py new file mode 100644 index 0000000..21ad671 --- /dev/null +++ b/mmcv/utils/registry.py @@ -0,0 +1,315 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import inspect +import warnings +from functools import partial + +from .misc import is_seq_of + + +def build_from_cfg(cfg, registry, default_args=None): + """Build a module from config dict. + + Args: + cfg (dict): Config dict. It should at least contain the key "type". + registry (:obj:`Registry`): The registry to search the type from. + default_args (dict, optional): Default initialization arguments. + + Returns: + object: The constructed object. + """ + if not isinstance(cfg, dict): + raise TypeError(f'cfg must be a dict, but got {type(cfg)}') + if 'type' not in cfg: + if default_args is None or 'type' not in default_args: + raise KeyError( + '`cfg` or `default_args` must contain the key "type", ' + f'but got {cfg}\n{default_args}') + if not isinstance(registry, Registry): + raise TypeError('registry must be an mmcv.Registry object, ' + f'but got {type(registry)}') + if not (isinstance(default_args, dict) or default_args is None): + raise TypeError('default_args must be a dict or None, ' + f'but got {type(default_args)}') + + args = cfg.copy() + + if default_args is not None: + for name, value in default_args.items(): + args.setdefault(name, value) + + obj_type = args.pop('type') + if isinstance(obj_type, str): + obj_cls = registry.get(obj_type) + if obj_cls is None: + raise KeyError( + f'{obj_type} is not in the {registry.name} registry') + elif inspect.isclass(obj_type): + obj_cls = obj_type + else: + raise TypeError( + f'type must be a str or valid type, but got {type(obj_type)}') + try: + return obj_cls(**args) + except Exception as e: + # Normal TypeError does not print class name. + raise type(e)(f'{obj_cls.__name__}: {e}') + + +class Registry: + """A registry to map strings to classes. + + Registered object could be built from registry. + Example: + >>> MODELS = Registry('models') + >>> @MODELS.register_module() + >>> class ResNet: + >>> pass + >>> resnet = MODELS.build(dict(type='ResNet')) + + Please refer to + https://mmcv.readthedocs.io/en/latest/understand_mmcv/registry.html for + advanced usage. + + Args: + name (str): Registry name. + build_func(func, optional): Build function to construct instance from + Registry, func:`build_from_cfg` is used if neither ``parent`` or + ``build_func`` is specified. If ``parent`` is specified and + ``build_func`` is not given, ``build_func`` will be inherited + from ``parent``. Default: None. + parent (Registry, optional): Parent registry. The class registered in + children registry could be built from parent. Default: None. + scope (str, optional): The scope of registry. It is the key to search + for children registry. If not specified, scope will be the name of + the package where class is defined, e.g. mmdet, mmcls, mmseg. + Default: None. + """ + + def __init__(self, name, build_func=None, parent=None, scope=None): + self._name = name + self._module_dict = dict() + self._children = dict() + self._scope = self.infer_scope() if scope is None else scope + + # self.build_func will be set with the following priority: + # 1. build_func + # 2. parent.build_func + # 3. build_from_cfg + if build_func is None: + if parent is not None: + self.build_func = parent.build_func + else: + self.build_func = build_from_cfg + else: + self.build_func = build_func + if parent is not None: + assert isinstance(parent, Registry) + parent._add_children(self) + self.parent = parent + else: + self.parent = None + + def __len__(self): + return len(self._module_dict) + + def __contains__(self, key): + return self.get(key) is not None + + def __repr__(self): + format_str = self.__class__.__name__ + \ + f'(name={self._name}, ' \ + f'items={self._module_dict})' + return format_str + + @staticmethod + def infer_scope(): + """Infer the scope of registry. + + The name of the package where registry is defined will be returned. + + Example: + # in mmdet/models/backbone/resnet.py + >>> MODELS = Registry('models') + >>> @MODELS.register_module() + >>> class ResNet: + >>> pass + The scope of ``ResNet`` will be ``mmdet``. + + + Returns: + scope (str): The inferred scope name. + """ + # inspect.stack() trace where this function is called, the index-2 + # indicates the frame where `infer_scope()` is called + filename = inspect.getmodule(inspect.stack()[2][0]).__name__ + split_filename = filename.split('.') + return split_filename[0] + + @staticmethod + def split_scope_key(key): + """Split scope and key. + + The first scope will be split from key. + + Examples: + >>> Registry.split_scope_key('mmcv.ResNet') + 'mmdet', 'ResNet' + >>> Registry.split_scope_key('ResNet') + None, 'ResNet' + + Return: + scope (str, None): The first scope. + key (str): The remaining key. + """ + split_index = key.find('.') + if split_index != -1: + return key[:split_index], key[split_index + 1:] + else: + return None, key + + @property + def name(self): + return self._name + + @property + def scope(self): + return self._scope + + @property + def module_dict(self): + return self._module_dict + + @property + def children(self): + return self._children + + def get(self, key): + """Get the registry record. + + Args: + key (str): The class name in string format. + + Returns: + class: The corresponding class. + """ + scope, real_key = self.split_scope_key(key) + if scope is None or scope == self._scope: + # get from self + if real_key in self._module_dict: + return self._module_dict[real_key] + else: + # get from self._children + if scope in self._children: + return self._children[scope].get(real_key) + else: + # goto root + parent = self.parent + while parent.parent is not None: + parent = parent.parent + return parent.get(key) + + def build(self, *args, **kwargs): + return self.build_func(*args, **kwargs, registry=self) + + def _add_children(self, registry): + """Add children for a registry. + + The ``registry`` will be added as children based on its scope. + The parent registry could build objects from children registry. + + Example: + >>> models = Registry('models') + >>> mmdet_models = Registry('models', parent=models) + >>> @mmdet_models.register_module() + >>> class ResNet: + >>> pass + >>> resnet = models.build(dict(type='mmcv.ResNet')) + """ + + assert isinstance(registry, Registry) + assert registry.scope is not None + assert registry.scope not in self.children, \ + f'scope {registry.scope} exists in {self.name} registry' + self.children[registry.scope] = registry + + def _register_module(self, module_class, module_name=None, force=False): + if not inspect.isclass(module_class): + raise TypeError('module must be a class, ' + f'but got {type(module_class)}') + + if module_name is None: + module_name = module_class.__name__ + if isinstance(module_name, str): + module_name = [module_name] + for name in module_name: + if not force and name in self._module_dict: + raise KeyError(f'{name} is already registered ' + f'in {self.name}') + self._module_dict[name] = module_class + + def deprecated_register_module(self, cls=None, force=False): + warnings.warn( + 'The old API of register_module(module, force=False) ' + 'is deprecated and will be removed, please use the new API ' + 'register_module(name=None, force=False, module=None) instead.') + if cls is None: + return partial(self.deprecated_register_module, force=force) + self._register_module(cls, force=force) + return cls + + def register_module(self, name=None, force=False, module=None): + """Register a module. + + A record will be added to `self._module_dict`, whose key is the class + name or the specified name, and value is the class itself. + It can be used as a decorator or a normal function. + + Example: + >>> backbones = Registry('backbone') + >>> @backbones.register_module() + >>> class ResNet: + >>> pass + + >>> backbones = Registry('backbone') + >>> @backbones.register_module(name='mnet') + >>> class MobileNet: + >>> pass + + >>> backbones = Registry('backbone') + >>> class ResNet: + >>> pass + >>> backbones.register_module(ResNet) + + Args: + name (str | None): The module name to be registered. If not + specified, the class name will be used. + force (bool, optional): Whether to override an existing class with + the same name. Default: False. + module (type): Module class to be registered. + """ + if not isinstance(force, bool): + raise TypeError(f'force must be a boolean, but got {type(force)}') + # NOTE: This is a walkaround to be compatible with the old api, + # while it may introduce unexpected bugs. + if isinstance(name, type): + return self.deprecated_register_module(name, force=force) + + # raise the error ahead of time + if not (name is None or isinstance(name, str) or is_seq_of(name, str)): + raise TypeError( + 'name must be either of None, an instance of str or a sequence' + f' of str, but got {type(name)}') + + # use it as a normal method: x.register_module(module=SomeClass) + if module is not None: + self._register_module( + module_class=module, module_name=name, force=force) + return module + + # use it as a decorator: @x.register_module() + def _register(cls): + self._register_module( + module_class=cls, module_name=name, force=force) + return cls + + return _register diff --git a/mmcv/utils/runner_utils.py b/mmcv/utils/runner_utils.py new file mode 100644 index 0000000..0ec8cf3 --- /dev/null +++ b/mmcv/utils/runner_utils.py @@ -0,0 +1,254 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +import random +import sys +import time +import warnings +from getpass import getuser +from socket import gethostname + +import numpy as np +from mmcv.utils import is_str +import functools +import os +import subprocess +from collections import OrderedDict + +import torch +import torch.multiprocessing as mp +from torch import distributed as dist +from torch._utils import (_flatten_dense_tensors, _take_tensors, + _unflatten_dense_tensors) + + +def get_host_info(): + """Get hostname and username. + + Return empty string if exception raised, e.g. ``getpass.getuser()`` will + lead to error in docker container + """ + host = '' + try: + host = f'{getuser()}@{gethostname()}' + except Exception as e: + warnings.warn(f'Host or user not found: {str(e)}') + finally: + return host + + +def get_time_str(): + return time.strftime('%Y%m%d_%H%M%S', time.localtime()) + + +def obj_from_dict(info, parent=None, default_args=None): + """Initialize an object from dict. + + The dict must contain the key "type", which indicates the object type, it + can be either a string or type, such as "list" or ``list``. Remaining + fields are treated as the arguments for constructing the object. + + Args: + info (dict): Object types and arguments. + parent (:class:`module`): Module which may containing expected object + classes. + default_args (dict, optional): Default arguments for initializing the + object. + + Returns: + any type: Object built from the dict. + """ + assert isinstance(info, dict) and 'type' in info + assert isinstance(default_args, dict) or default_args is None + args = info.copy() + obj_type = args.pop('type') + if is_str(obj_type): + if parent is not None: + obj_type = getattr(parent, obj_type) + else: + obj_type = sys.modules[obj_type] + elif not isinstance(obj_type, type): + raise TypeError('type must be a str or valid type, but ' + f'got {type(obj_type)}') + if default_args is not None: + for name, value in default_args.items(): + args.setdefault(name, value) + return obj_type(**args) + + +def set_random_seed(seed, deterministic=False, use_rank_shift=False): + """Set random seed. + + Args: + seed (int): Seed to be used. + deterministic (bool): Whether to set the deterministic option for + CUDNN backend, i.e., set `torch.backends.cudnn.deterministic` + to True and `torch.backends.cudnn.benchmark` to False. + Default: False. + rank_shift (bool): Whether to add rank number to the random seed to + have different random seed in different threads. Default: False. + """ + if use_rank_shift: + rank, _ = get_dist_info() + seed += rank + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + os.environ['PYTHONHASHSEED'] = str(seed) + if deterministic: + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + + +def init_dist(launcher, backend='nccl', **kwargs): + if mp.get_start_method(allow_none=True) is None: + mp.set_start_method('spawn') + if launcher == 'pytorch': + _init_dist_pytorch(backend, **kwargs) + elif launcher == 'mpi': + _init_dist_mpi(backend, **kwargs) + elif launcher == 'slurm': + _init_dist_slurm(backend, **kwargs) + else: + raise ValueError(f'Invalid launcher type: {launcher}') + + +def _init_dist_pytorch(backend, **kwargs): + # TODO: use local_rank instead of rank % num_gpus + rank = int(os.environ['RANK']) + num_gpus = torch.cuda.device_count() + torch.cuda.set_device(rank % num_gpus) + dist.init_process_group(backend=backend, **kwargs) + + +def _init_dist_mpi(backend, **kwargs): + # TODO: use local_rank instead of rank % num_gpus + rank = int(os.environ['OMPI_COMM_WORLD_RANK']) + num_gpus = torch.cuda.device_count() + torch.cuda.set_device(rank % num_gpus) + dist.init_process_group(backend=backend, **kwargs) + + +def _init_dist_slurm(backend, port=None): + """Initialize slurm distributed training environment. + + If argument ``port`` is not specified, then the master port will be system + environment variable ``MASTER_PORT``. If ``MASTER_PORT`` is not in system + environment variable, then a default port ``29500`` will be used. + + Args: + backend (str): Backend of torch.distributed. + port (int, optional): Master port. Defaults to None. + """ + proc_id = int(os.environ['SLURM_PROCID']) + ntasks = int(os.environ['SLURM_NTASKS']) + node_list = os.environ['SLURM_NODELIST'] + num_gpus = torch.cuda.device_count() + torch.cuda.set_device(proc_id % num_gpus) + addr = subprocess.getoutput( + f'scontrol show hostname {node_list} | head -n1') + # specify master port + if port is not None: + os.environ['MASTER_PORT'] = str(port) + elif 'MASTER_PORT' in os.environ: + pass # use MASTER_PORT in the environment variable + else: + # 29500 is torch.distributed default port + os.environ['MASTER_PORT'] = '29500' + # use MASTER_ADDR in the environment variable if it already exists + if 'MASTER_ADDR' not in os.environ: + os.environ['MASTER_ADDR'] = addr + os.environ['WORLD_SIZE'] = str(ntasks) + os.environ['LOCAL_RANK'] = str(proc_id % num_gpus) + os.environ['RANK'] = str(proc_id) + dist.init_process_group(backend=backend) + + +def get_dist_info(): + if dist.is_available() and dist.is_initialized(): + rank = dist.get_rank() + world_size = dist.get_world_size() + else: + rank = 0 + world_size = 1 + return rank, world_size + + +def master_only(func): + + @functools.wraps(func) + def wrapper(*args, **kwargs): + rank, _ = get_dist_info() + if rank == 0: + return func(*args, **kwargs) + + return wrapper + + +def allreduce_params(params, coalesce=True, bucket_size_mb=-1): + """Allreduce parameters. + + Args: + params (list[torch.Parameters]): List of parameters or buffers of a + model. + coalesce (bool, optional): Whether allreduce parameters as a whole. + Defaults to True. + bucket_size_mb (int, optional): Size of bucket, the unit is MB. + Defaults to -1. + """ + _, world_size = get_dist_info() + if world_size == 1: + return + params = [param.data for param in params] + if coalesce: + _allreduce_coalesced(params, world_size, bucket_size_mb) + else: + for tensor in params: + dist.all_reduce(tensor.div_(world_size)) + + +def allreduce_grads(params, coalesce=True, bucket_size_mb=-1): + """Allreduce gradients. + + Args: + params (list[torch.Parameters]): List of parameters of a model + coalesce (bool, optional): Whether allreduce parameters as a whole. + Defaults to True. + bucket_size_mb (int, optional): Size of bucket, the unit is MB. + Defaults to -1. + """ + grads = [ + param.grad.data for param in params + if param.requires_grad and param.grad is not None + ] + _, world_size = get_dist_info() + if world_size == 1: + return + if coalesce: + _allreduce_coalesced(grads, world_size, bucket_size_mb) + else: + for tensor in grads: + dist.all_reduce(tensor.div_(world_size)) + + +def _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1): + if bucket_size_mb > 0: + bucket_size_bytes = bucket_size_mb * 1024 * 1024 + buckets = _take_tensors(tensors, bucket_size_bytes) + else: + buckets = OrderedDict() + for tensor in tensors: + tp = tensor.type() + if tp not in buckets: + buckets[tp] = [] + buckets[tp].append(tensor) + buckets = buckets.values() + + for bucket in buckets: + flat_tensors = _flatten_dense_tensors(bucket) + dist.all_reduce(flat_tensors) + flat_tensors.div_(world_size) + for tensor, synced in zip( + bucket, _unflatten_dense_tensors(flat_tensors, bucket)): + tensor.copy_(synced) \ No newline at end of file diff --git a/mmcv/utils/timer.py b/mmcv/utils/timer.py new file mode 100644 index 0000000..66d4a78 --- /dev/null +++ b/mmcv/utils/timer.py @@ -0,0 +1,118 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from time import time + + +class TimerError(Exception): + + def __init__(self, message): + self.message = message + super(TimerError, self).__init__(message) + + +class Timer: + """A flexible Timer class. + + :Example: + + >>> import time + >>> import mmcv + >>> with mmcv.Timer(): + >>> # simulate a code block that will run for 1s + >>> time.sleep(1) + 1.000 + >>> with mmcv.Timer(print_tmpl='it takes {:.1f} seconds'): + >>> # simulate a code block that will run for 1s + >>> time.sleep(1) + it takes 1.0 seconds + >>> timer = mmcv.Timer() + >>> time.sleep(0.5) + >>> print(timer.since_start()) + 0.500 + >>> time.sleep(0.5) + >>> print(timer.since_last_check()) + 0.500 + >>> print(timer.since_start()) + 1.000 + """ + + def __init__(self, start=True, print_tmpl=None): + self._is_running = False + self.print_tmpl = print_tmpl if print_tmpl else '{:.3f}' + if start: + self.start() + + @property + def is_running(self): + """bool: indicate whether the timer is running""" + return self._is_running + + def __enter__(self): + self.start() + return self + + def __exit__(self, type, value, traceback): + print(self.print_tmpl.format(self.since_last_check())) + self._is_running = False + + def start(self): + """Start the timer.""" + if not self._is_running: + self._t_start = time() + self._is_running = True + self._t_last = time() + + def since_start(self): + """Total time since the timer is started. + + Returns (float): Time in seconds. + """ + if not self._is_running: + raise TimerError('timer is not running') + self._t_last = time() + return self._t_last - self._t_start + + def since_last_check(self): + """Time since the last checking. + + Either :func:`since_start` or :func:`since_last_check` is a checking + operation. + + Returns (float): Time in seconds. + """ + if not self._is_running: + raise TimerError('timer is not running') + dur = time() - self._t_last + self._t_last = time() + return dur + + +_g_timers = {} # global timers + + +def check_time(timer_id): + """Add check points in a single line. + + This method is suitable for running a task on a list of items. A timer will + be registered when the method is called for the first time. + + :Example: + + >>> import time + >>> import mmcv + >>> for i in range(1, 6): + >>> # simulate a code block + >>> time.sleep(i) + >>> mmcv.check_time('task1') + 2.000 + 3.000 + 4.000 + 5.000 + + Args: + timer_id (str): Timer identifier. + """ + if timer_id not in _g_timers: + _g_timers[timer_id] = Timer() + return 0 + else: + return _g_timers[timer_id].since_last_check() diff --git a/mmcv/utils/util_mixins.py b/mmcv/utils/util_mixins.py new file mode 100644 index 0000000..9aed015 --- /dev/null +++ b/mmcv/utils/util_mixins.py @@ -0,0 +1,104 @@ +"""This module defines the :class:`NiceRepr` mixin class, which defines a +``__repr__`` and ``__str__`` method that only depend on a custom ``__nice__`` +method, which you must define. This means you only have to overload one +function instead of two. Furthermore, if the object defines a ``__len__`` +method, then the ``__nice__`` method defaults to something sensible, otherwise +it is treated as abstract and raises ``NotImplementedError``. + +To use simply have your object inherit from :class:`NiceRepr` +(multi-inheritance should be ok). + +This code was copied from the ubelt library: https://github.com/Erotemic/ubelt + +Example: + >>> # Objects that define __nice__ have a default __str__ and __repr__ + >>> class Student(NiceRepr): + ... def __init__(self, name): + ... self.name = name + ... def __nice__(self): + ... return self.name + >>> s1 = Student('Alice') + >>> s2 = Student('Bob') + >>> print(f's1 = {s1}') + >>> print(f's2 = {s2}') + s1 = + s2 = + +Example: + >>> # Objects that define __len__ have a default __nice__ + >>> class Group(NiceRepr): + ... def __init__(self, data): + ... self.data = data + ... def __len__(self): + ... return len(self.data) + >>> g = Group([1, 2, 3]) + >>> print(f'g = {g}') + g = +""" +import warnings + + +class NiceRepr: + """Inherit from this class and define ``__nice__`` to "nicely" print your + objects. + + Defines ``__str__`` and ``__repr__`` in terms of ``__nice__`` function + Classes that inherit from :class:`NiceRepr` should redefine ``__nice__``. + If the inheriting class has a ``__len__``, method then the default + ``__nice__`` method will return its length. + + Example: + >>> class Foo(NiceRepr): + ... def __nice__(self): + ... return 'info' + >>> foo = Foo() + >>> assert str(foo) == '' + >>> assert repr(foo).startswith('>> class Bar(NiceRepr): + ... pass + >>> bar = Bar() + >>> import pytest + >>> with pytest.warns(None) as record: + >>> assert 'object at' in str(bar) + >>> assert 'object at' in repr(bar) + + Example: + >>> class Baz(NiceRepr): + ... def __len__(self): + ... return 5 + >>> baz = Baz() + >>> assert str(baz) == '' + """ + + def __nice__(self): + """str: a "nice" summary string describing this module""" + if hasattr(self, '__len__'): + # It is a common pattern for objects to use __len__ in __nice__ + # As a convenience we define a default __nice__ for these objects + return str(len(self)) + else: + # In all other cases force the subclass to overload __nice__ + raise NotImplementedError( + f'Define the __nice__ method for {self.__class__!r}') + + def __repr__(self): + """str: the string of the module""" + try: + nice = self.__nice__() + classname = self.__class__.__name__ + return f'<{classname}({nice}) at {hex(id(self))}>' + except NotImplementedError as ex: + warnings.warn(str(ex), category=RuntimeWarning) + return object.__repr__(self) + + def __str__(self): + """str: the string of the module""" + try: + classname = self.__class__.__name__ + nice = self.__nice__() + return f'<{classname}({nice})>' + except NotImplementedError as ex: + warnings.warn(str(ex), category=RuntimeWarning) + return object.__repr__(self) diff --git a/mmcv/utils/version_utils.py b/mmcv/utils/version_utils.py new file mode 100644 index 0000000..a7dda06 --- /dev/null +++ b/mmcv/utils/version_utils.py @@ -0,0 +1,88 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +import subprocess +import warnings + +from packaging.version import parse + +def digit_version(version_str: str, length: int = 4): + """Convert a version string into a tuple of integers. + + This method is usually used for comparing two versions. For pre-release + versions: alpha < beta < rc. + + Args: + version_str (str): The version string. + length (int): The maximum number of version levels. Default: 4. + + Returns: + tuple[int]: The version info in digits (integers). + """ + version = parse(version_str) + assert version.release, f'failed to parse version {version_str}' + release = list(version.release) + release = release[:length] + if len(release) < length: + release = release + [0] * (length - len(release)) + if version.is_prerelease: + mapping = {'a': -3, 'b': -2, 'rc': -1} + val = -4 + # version.pre can be None + if version.pre: + if version.pre[0] not in mapping: + warnings.warn(f'unknown prerelease version {version.pre[0]}, ' + 'version checking may go wrong') + else: + val = mapping[version.pre[0]] + release.extend([val, version.pre[-1]]) + else: + release.extend([val, 0]) + + elif version.is_postrelease: + release.extend([1, version.post]) + else: + release.extend([0, 0]) + return tuple(release) + + +def _minimal_ext_cmd(cmd): + # construct minimal environment + env = {} + for k in ['SYSTEMROOT', 'PATH', 'HOME']: + v = os.environ.get(k) + if v is not None: + env[k] = v + # LANGUAGE is used on win32 + env['LANGUAGE'] = 'C' + env['LANG'] = 'C' + env['LC_ALL'] = 'C' + out = subprocess.Popen( + cmd, stdout=subprocess.PIPE, env=env).communicate()[0] + return out + + +def get_git_hash(fallback='unknown', digits=None): + """Get the git hash of the current repo. + + Args: + fallback (str, optional): The fallback string when git hash is + unavailable. Defaults to 'unknown'. + digits (int, optional): kept digits of the hash. Defaults to None, + meaning all digits are kept. + + Returns: + str: Git commit hash. + """ + + if digits is not None and not isinstance(digits, int): + raise TypeError('digits must be None or an integer') + + try: + out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD']) + sha = out.strip().decode('ascii') + if digits is not None: + sha = sha[:digits] + except OSError: + sha = fallback + + return sha diff --git a/mmcv/utils/visual.py b/mmcv/utils/visual.py new file mode 100644 index 0000000..f9718af --- /dev/null +++ b/mmcv/utils/visual.py @@ -0,0 +1,24 @@ +import torch +from torchvision.utils import make_grid +import torchvision +import matplotlib.pyplot as plt +import cv2 + + +def convert_color(img_path): + plt.figure() + img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE) + plt.imsave(img_path, img, cmap=plt.get_cmap('viridis')) + plt.close() + + +def save_tensor(tensor, path, pad_value=254.0,): + print('save_tensor', path) + tensor = tensor.to(torch.float).detach().cpu() + if tensor.type() == 'torch.BoolTensor': + tensor = tensor*255 + if len(tensor.shape) == 3: + tensor = tensor.unsqueeze(1) + tensor = make_grid(tensor, pad_value=pad_value, normalize=False).permute(1, 2, 0).numpy().copy() + torchvision.utils.save_image(torch.tensor(tensor).permute(2, 0, 1), path) + convert_color(path) diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..e839ffe --- /dev/null +++ b/requirements.txt @@ -0,0 +1,48 @@ +cython +numba==0.48.0 # In order to speed up +addict +packaging +Pillow +matplotlib +regex;sys_platform=='win32' +pycocotools; platform_system == "Linux" +pycocotools-windows; platform_system == "Windows" +prettytable +six +terminaltables +lyft_dataset_sdk +nuscenes-devkit +scikit-image +tensorboard +cityscapesscripts +imagecorruptions +scipy +scikit-learn +open3d +networkx +ipython +opencv-python +seaborn +numpy==1.20.0 # In order to adapt numba +# metric related +einops +casadi +torchmetrics +motmetrics==1.1.3 # Fixed +trimesh +# pytest related +pytest +pytest-cov +pytest-runner +yapf==0.40.1 +flake8 +trimesh==2.35.39 +similaritymeasures +laspy==2.5.0 +lazrs==0.5.3 +py-trees==0.8.3 +simple_watchdog_timer +transforms3d +tabulate +ephem +dictor \ No newline at end of file diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..b759948 --- /dev/null +++ b/setup.py @@ -0,0 +1,224 @@ +import glob +import os +import platform +import re +from packaging.version import parse as parse_version +from setuptools import find_packages, setup +import torch +from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension + +EXT_TYPE = 'pytorch' +cmd_class = {'build_ext': BuildExtension} + +def make_cuda_ext(name, + module, + sources, + sources_cuda=[], + extra_args=[], + extra_include_path=[]): + + define_macros = [] + extra_compile_args = {'cxx': [] + extra_args} + + if torch.cuda.is_available(): + define_macros += [('WITH_CUDA', None)] + extension = CUDAExtension + extra_compile_args['nvcc'] = extra_args + [ + '-D__CUDA_NO_HALF_OPERATORS__', + '-D__CUDA_NO_HALF_CONVERSIONS__', + '-D__CUDA_NO_HALF2_OPERATORS__', + ] + sources += sources_cuda + else: + print('Compiling {} without CUDA'.format(name)) + extension = CppExtension + + return extension( + name='{}.{}'.format(module, name), + sources=[os.path.join(*module.split('.'), p) for p in sources], + include_dirs=extra_include_path, + define_macros=define_macros, + extra_compile_args=extra_compile_args) + +def parse_requirements(fname='requirements.txt', with_version=True): + """Parse the package dependencies listed in a requirements file but strips + specific versioning information. + + Args: + fname (str): path to requirements file + with_version (bool, default=False): if True include version specs + + Returns: + List[str]: list of requirements items + + CommandLine: + python -c "import setup; print(setup.parse_requirements())" + """ + import sys + from os.path import exists + require_fpath = fname + + def parse_line(line): + """Parse information from a line in a requirements text file.""" + if line.startswith('-r '): + # Allow specifying requirements in other files + target = line.split(' ')[1] + for info in parse_require_file(target): + yield info + else: + info = {'line': line} + if line.startswith('-e '): + info['package'] = line.split('#egg=')[1] + else: + # Remove versioning from the package + pat = '(' + '|'.join(['>=', '==', '>']) + ')' + parts = re.split(pat, line, maxsplit=1) + parts = [p.strip() for p in parts] + + info['package'] = parts[0] + if len(parts) > 1: + op, rest = parts[1:] + if ';' in rest: + # Handle platform specific dependencies + # http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies + version, platform_deps = map(str.strip, + rest.split(';')) + info['platform_deps'] = platform_deps + else: + version = rest # NOQA + info['version'] = (op, version) + yield info + + def parse_require_file(fpath): + with open(fpath, 'r') as f: + for line in f.readlines(): + line = line.strip() + if line and not line.startswith('#'): + for info in parse_line(line): + yield info + + def gen_packages_items(): + if exists(require_fpath): + for info in parse_require_file(require_fpath): + parts = [info['package']] + if with_version and 'version' in info: + parts.extend(info['version']) + if not sys.version.startswith('3.4'): + # apparently package_deps are broken in 3.4 + platform_deps = info.get('platform_deps') + if platform_deps is not None: + parts.append(';' + platform_deps) + item = ''.join(parts) + yield item + + packages = list(gen_packages_items()) + return packages + +def get_extensions(): + extensions = [] + + if EXT_TYPE == 'pytorch': + ext_name = 'mmcv._ext' + # prevent ninja from using too many resources + try: + import psutil + num_cpu = len(psutil.Process().cpu_affinity()) + cpu_use = max(4, num_cpu - 1) + except (ModuleNotFoundError, AttributeError): + cpu_use = 4 + + os.environ.setdefault('MAX_JOBS', str(cpu_use)) + define_macros = [] + + extra_compile_args = {'cxx': []} + if platform.system() != 'Windows': + if parse_version(torch.__version__) <= parse_version('1.12.1'): + extra_compile_args['cxx'] = ['-std=c++14'] + else: + extra_compile_args['cxx'] = ['-std=c++17'] + else: + if parse_version(torch.__version__) <= parse_version('1.12.1'): + extra_compile_args['cxx'] = ['/std:c++14'] + else: + extra_compile_args['cxx'] = ['/std:c++17'] + + include_dirs = [] + + if torch.cuda.is_available(): + define_macros += [('MMCV_WITH_CUDA', None)] + cuda_args = os.getenv('MMCV_CUDA_ARGS') + extra_compile_args['nvcc'] = [cuda_args] if cuda_args else [] + op_files = glob.glob('./mmcv/ops/csrc/pytorch/*.cpp') + \ + glob.glob('./mmcv/ops/csrc/pytorch/cpu/*.cpp') + \ + glob.glob('./mmcv/ops/csrc/pytorch/cuda/*.cu') + \ + glob.glob('./mmcv/ops/csrc/pytorch/cuda/*.cpp') + extension = CUDAExtension + include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common')) + include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common/cuda')) + else: + print(f'Compiling {ext_name} without CUDA') + op_files = glob.glob('./mmcv/ops/csrc/pytorch/*.cpp') + \ + glob.glob('./mmcv/ops/csrc/pytorch/cpu/*.cpp') + extension = CppExtension + include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common')) + + if 'nvcc' in extra_compile_args and platform.system() != 'Windows': + if parse_version(torch.__version__) <= parse_version('1.12.1'): + extra_compile_args['nvcc'] += ['-std=c++14'] + else: + extra_compile_args['nvcc'] += ['-std=c++17'] + + ext_ops = extension( + name=ext_name, + sources=op_files, + include_dirs=include_dirs, + define_macros=define_macros, + extra_compile_args=extra_compile_args) + extensions.append(ext_ops) + + return extensions + +setup( + name='mmcv', + version='0.0.1', + description='OpenMMLab Computer Vision Foundation', + keywords='computer vision', + packages=[ + *find_packages(include=('mmcv', "mmcv.*")), + *find_packages(include=('adzoo', "adzoo.*")), + ], + include_package_data=True, + classifiers=[ + 'Development Status :: 4 - Beta', + 'License :: OSI Approved :: Apache Software License', + 'Operating System :: OS Independent', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', + 'Topic :: Utilities', + ], + url='https://github.com/open-mmlab/mmcv', + author='MMCV Contributors', + author_email='openmmlab@gmail.com', + install_requires=parse_requirements(), + ext_modules= get_extensions() + [ + make_cuda_ext( + name='iou3d_cuda', + module='mmcv.ops.iou3d_det', + sources=[ + 'src/iou3d.cpp', + 'src/iou3d_kernel.cu', + ]), + make_cuda_ext( + name='roiaware_pool3d_ext', + module='mmcv.ops.roiaware_pool3d', + sources=[ + 'src/roiaware_pool3d.cpp', + 'src/points_in_boxes_cpu.cpp', + ], + sources_cuda=[ + 'src/roiaware_pool3d_kernel.cu', + 'src/points_in_boxes_cuda.cu', + ]), + ], + cmdclass=cmd_class, + zip_safe=False) diff --git a/team_code/pid_controller.py b/team_code/pid_controller.py new file mode 100644 index 0000000..af43e3c --- /dev/null +++ b/team_code/pid_controller.py @@ -0,0 +1,113 @@ +from collections import deque +import numpy as np + +class PID(object): + def __init__(self, K_P=1.0, K_I=0.0, K_D=0.0, n=20): + self._K_P = K_P + self._K_I = K_I + self._K_D = K_D + + self._window = deque([0 for _ in range(n)], maxlen=n) + self._max = 0.0 + self._min = 0.0 + + def step(self, error): + self._window.append(error) + self._max = max(self._max, abs(error)) + self._min = -abs(self._max) + + if len(self._window) >= 2: + integral = np.mean(self._window) + derivative = (self._window[-1] - self._window[-2]) + else: + integral = 0.0 + derivative = 0.0 + + return self._K_P * error + self._K_I * integral + self._K_D * derivative + + + +class PIDController(object): + + def __init__(self, turn_KP=0.75, turn_KI=0.75, turn_KD=0.3, turn_n=40, speed_KP=5.0, speed_KI=0.5,speed_KD=1.0, speed_n = 40,max_throttle=0.75, brake_speed=0.4,brake_ratio=1.1, clip_delta=0.25, aim_dist=4.0, angle_thresh=0.3, dist_thresh=10): + + self.turn_controller = PID(K_P=turn_KP, K_I=turn_KI, K_D=turn_KD, n=turn_n) + self.speed_controller = PID(K_P=speed_KP, K_I=speed_KI, K_D=speed_KD, n=speed_n) + self.max_throttle = max_throttle + self.brake_speed = brake_speed + self.brake_ratio = brake_ratio + self.clip_delta = clip_delta + self.aim_dist = aim_dist + self.angle_thresh = angle_thresh + self.dist_thresh = dist_thresh + + def control_pid(self, waypoints, speed, target): + ''' Predicts vehicle control with a PID controller. + Args: + waypoints (tensor): output of self.plan() + speed (tensor): speedometer input + ''' + + # iterate over vectors between predicted waypoints + num_pairs = len(waypoints) - 1 + best_norm = 1e5 + desired_speed = 0 + aim = waypoints[0] + for i in range(num_pairs): + # magnitude of vectors, used for speed + desired_speed += np.linalg.norm( + waypoints[i+1] - waypoints[i]) * 2.0 / num_pairs + + # norm of vector midpoints, used for steering + norm = np.linalg.norm((waypoints[i+1] + waypoints[i]) / 2.0) + if abs(self.aim_dist-best_norm) > abs(self.aim_dist-norm): + aim = waypoints[i] + best_norm = norm + + aim_last = waypoints[-1] - waypoints[-2] + + angle = np.degrees(np.pi / 2 - np.arctan2(aim[1], aim[0])) / 90 + angle_last = np.degrees(np.pi / 2 - np.arctan2(aim_last[1], aim_last[0])) / 90 + angle_target = np.degrees(np.pi / 2 - np.arctan2(target[1], target[0])) / 90 + + # choice of point to aim for steering, removing outlier predictions + # use target point if it has a smaller angle or if error is large + # predicted point otherwise + # (reduces noise in eg. straight roads, helps with sudden turn commands) + use_target_to_aim = np.abs(angle_target) < np.abs(angle) + use_target_to_aim = use_target_to_aim or (np.abs(angle_target-angle_last) > self.angle_thresh and target[1] < self.dist_thresh) + if use_target_to_aim: + angle_final = angle_target + else: + angle_final = angle + + steer = self.turn_controller.step(angle_final) + steer = np.clip(steer, -1.0, 1.0) + + brake = desired_speed < self.brake_speed or (speed / desired_speed) > self.brake_ratio + + delta = np.clip(desired_speed - speed, 0.0, self.clip_delta) + throttle = self.speed_controller.step(delta) + throttle = np.clip(throttle, 0.0, self.max_throttle) + throttle = throttle if not brake else 0.0 + + metadata = { + 'speed': float(speed.astype(np.float64)), + 'steer': float(steer), + 'throttle': float(throttle), + 'brake': float(brake), + 'wp_4': tuple(waypoints[3].astype(np.float64)), + 'wp_3': tuple(waypoints[2].astype(np.float64)), + 'wp_2': tuple(waypoints[1].astype(np.float64)), + 'wp_1': tuple(waypoints[0].astype(np.float64)), + 'aim': tuple(aim.astype(np.float64)), + 'target': tuple(target.astype(np.float64)), + 'desired_speed': float(desired_speed.astype(np.float64)), + 'angle': float(angle.astype(np.float64)), + 'angle_last': float(angle_last.astype(np.float64)), + 'angle_target': float(angle_target.astype(np.float64)), + 'angle_final': float(angle_final.astype(np.float64)), + 'delta': float(delta.astype(np.float64)), + } + + return steer, throttle, brake, metadata \ No newline at end of file diff --git a/team_code/planner.py b/team_code/planner.py new file mode 100644 index 0000000..ef3d4c6 --- /dev/null +++ b/team_code/planner.py @@ -0,0 +1,128 @@ +import os +from collections import deque + +import numpy as np +import math +EARTH_RADIUS_EQUA = 6378137.0 + + +DEBUG = int(os.environ.get('HAS_DISPLAY', 0)) + + +class Plotter(object): + def __init__(self, size): + self.size = size + self.clear() + self.title = str(self.size) + + def clear(self): + from PIL import Image, ImageDraw + + self.img = Image.fromarray(np.zeros((self.size, self.size, 3), dtype=np.uint8)) + self.draw = ImageDraw.Draw(self.img) + + def dot(self, pos, node, color=(255, 255, 255), r=2): + x, y = 5.5 * (pos - node) + x += self.size / 2 + y += self.size / 2 + + self.draw.ellipse((x-r, y-r, x+r, y+r), color) + + def show(self): + if not DEBUG: + return + + import cv2 + + cv2.imshow(self.title, cv2.cvtColor(np.array(self.img), cv2.COLOR_BGR2RGB)) + cv2.waitKey(1) + + +class RoutePlanner(object): + def __init__(self, min_distance, max_distance, debug_size=256, lat_ref=42.0, lon_ref=2.0): + self.route = deque() + self.min_distance = min_distance + self.max_distance = max_distance + + # self.mean = np.array([49.0, 8.0]) # for carla 9.9 + # self.scale = np.array([111324.60662786, 73032.1570362]) # for carla 9.9 + self.mean = np.array([0.0, 0.0]) # for carla 9.10 + self.scale = np.array([111324.60662786, 111319.490945]) # for carla 9.10 + + self.debug = Plotter(debug_size) + # self.lat_ref, self.lon_ref = self._get_latlon_ref() + self.lat_ref = lat_ref + self.lon_ref = lon_ref + + def set_route(self, global_plan, gps=False, global_plan_world = None): + self.route.clear() + + if global_plan_world: + for (pos, cmd), (pos_word, _ )in zip(global_plan, global_plan_world): + if gps: + pos = self.gps_to_location(np.array([pos['lat'], pos['lon']])) + # pos -= self.mean + # pos *= self.scale + else: + pos = np.array([pos.location.x, pos.location.y]) + # pos -= self.mean + + self.route.append((pos, cmd, pos_word)) + else: + for pos, cmd in global_plan: + if gps: + pos = self.gps_to_location(np.array([pos['lat'], pos['lon']])) + # pos -= self.mean + # pos *= self.scale + else: + pos = np.array([pos.location.x, pos.location.y]) + # pos -= self.mean + + self.route.append((pos, cmd)) + + def run_step(self, gps): + self.debug.clear() + + if len(self.route) == 1: + return self.route[0] + + to_pop = 0 + farthest_in_range = -np.inf + cumulative_distance = 0.0 + + for i in range(1, len(self.route)): + if cumulative_distance > self.max_distance: + break + + cumulative_distance += np.linalg.norm(self.route[i][0] - self.route[i-1][0]) + distance = np.linalg.norm(self.route[i][0] - gps) + + if distance <= self.min_distance and distance > farthest_in_range: + farthest_in_range = distance + to_pop = i + + r = 255 * int(distance > self.min_distance) + g = 255 * int(self.route[i][1].value == 4) + b = 255 + self.debug.dot(gps, self.route[i][0], (r, g, b)) + + for _ in range(to_pop): + if len(self.route) > 2: + self.route.popleft() + + self.debug.dot(gps, self.route[0][0], (0, 255, 0)) + self.debug.dot(gps, self.route[1][0], (255, 0, 0)) + self.debug.dot(gps, gps, (0, 0, 255)) + self.debug.show() + + return self.route[1] + + def gps_to_location(self, gps): + # gps content: numpy array: [lat, lon, alt] + lat, lon = gps + scale = math.cos(self.lat_ref * math.pi / 180.0) + my = math.log(math.tan((lat+90) * math.pi / 360.0)) * (EARTH_RADIUS_EQUA * scale) + mx = (lon * (math.pi * EARTH_RADIUS_EQUA * scale)) / 180.0 + y = scale * EARTH_RADIUS_EQUA * math.log(math.tan((90.0 + self.lat_ref) * math.pi / 360.0)) - my + x = mx - scale * self.lon_ref * math.pi * EARTH_RADIUS_EQUA / 180.0 + return np.array([x, y]) \ No newline at end of file diff --git a/team_code/uniad_b2d_agent.py b/team_code/uniad_b2d_agent.py new file mode 100644 index 0000000..efc2816 --- /dev/null +++ b/team_code/uniad_b2d_agent.py @@ -0,0 +1,433 @@ +import os +import json +import datetime +import pathlib +import time +import cv2 +import carla +from collections import deque +import math +from collections import OrderedDict +import torch +import carla +import numpy as np +from PIL import Image +from torchvision import transforms as T +from Bench2DriveZoo.team_code.pid_controller import PIDController +from Bench2DriveZoo.team_code.planner import RoutePlanner +from leaderboard.autoagents import autonomous_agent +from mmcv import Config +from mmcv.models import build_model +from mmcv.utils import (get_dist_info, init_dist, load_checkpoint,wrap_fp16_model) +from mmcv.datasets.pipelines import Compose +from mmcv.parallel.collate import collate as mm_collate_to_batch_form +from mmcv.core.bbox import get_box_type +from pyquaternion import Quaternion +from scipy.optimize import fsolve +SAVE_PATH = os.environ.get('SAVE_PATH', None) +IS_BENCH2DRIVE = os.environ.get('IS_BENCH2DRIVE', None) + +def get_entry_point(): + return 'UniadAgent' + +class UniadAgent(autonomous_agent.AutonomousAgent): + def setup(self, path_to_conf_file): + self.track = autonomous_agent.Track.SENSORS + self.steer_step = 0 + self.last_moving_status = 0 + self.last_moving_step = -1 + self.last_steers = deque() + self.pidcontroller = PIDController() + if IS_BENCH2DRIVE: + self.save_name = path_to_conf_file.split('+')[-1] + self.config_path = path_to_conf_file.split('+')[0] + else: + self.config_path = path_to_conf_file + self.save_name = '_'.join(map(lambda x: '%02d' % x, (now.month, now.day, now.hour, now.minute, now.second))) + self.step = -1 + self.wall_start = time.time() + self.initialized = False + cfg = Config.fromfile('Bench2DriveZoo/adzoo/uniad/configs/stage2_e2e/base_e2e_b2d.py') + cfg.model['motion_head']['anchor_info_path'] = os.path.join('Bench2DriveZoo',cfg.model['motion_head']['anchor_info_path']) + if hasattr(cfg, 'plugin'): + if cfg.plugin: + import importlib + if hasattr(cfg, 'plugin_dir'): + plugin_dir = cfg.plugin_dir + plugin_dir = os.path.join("Bench2DriveZoo", plugin_dir) + _module_dir = os.path.dirname(plugin_dir) + _module_dir = _module_dir.split('/') + _module_path = _module_dir[0] + for m in _module_dir[1:]: + _module_path = _module_path + '.' + m + print(_module_path) + plg_lib = importlib.import_module(_module_path) + + self.model = build_model(cfg.model, train_cfg=cfg.get('train_cfg'), test_cfg=cfg.get('test_cfg')) + checkpoint = load_checkpoint(self.model, self.config_path, map_location='cpu', strict=True) + self.model.cuda() + self.model.eval() + self.inference_only_pipeline = [] + for inference_only_pipeline in cfg.inference_only_pipeline: + if inference_only_pipeline["type"] not in ['LoadMultiViewImageFromFilesInCeph']: + self.inference_only_pipeline.append(inference_only_pipeline) + self.inference_only_pipeline = Compose(self.inference_only_pipeline) + ckpt = torch.load(self.config_path) + ckpt = ckpt["state_dict"] + new_state_dict = OrderedDict() + for key, value in ckpt.items(): + new_key = key.replace("model.","") + new_state_dict[new_key] = value + self.takeover = False + self.stop_time = 0 + self.takeover_time = 0 + self.save_path = None + self._im_transform = T.Compose([T.ToTensor(), T.Normalize(mean=[0.485,0.456,0.406], std=[0.229,0.224,0.225])]) + self.last_steers = deque() + self.lat_ref, self.lon_ref = 42.0, 2.0 + control = carla.VehicleControl() + control.steer = 0.0 + control.throttle = 0.0 + control.brake = 0.0 + self.prev_control = control + if SAVE_PATH is not None: + now = datetime.datetime.now() + # string = pathlib.Path(os.environ['ROUTES']).stem + '_' + string = self.save_name + self.save_path = pathlib.Path(os.environ['SAVE_PATH']) / string + self.save_path.mkdir(parents=True, exist_ok=False) + (self.save_path / 'rgb_front').mkdir() + (self.save_path / 'rgb_front_right').mkdir() + (self.save_path / 'rgb_front_left').mkdir() + (self.save_path / 'rgb_back').mkdir() + (self.save_path / 'rgb_back_right').mkdir() + (self.save_path / 'rgb_back_left').mkdir() + (self.save_path / 'meta').mkdir() + (self.save_path / 'bev').mkdir() + + # write extrinsics directly + self.lidar2img = { + 'CAM_FRONT':np.array([[ 1.14251841e+03, 8.00000000e+02, 0.00000000e+00, -9.52000000e+02], + [ 0.00000000e+00, 4.50000000e+02, -1.14251841e+03, -8.09704417e+02], + [ 0.00000000e+00, 1.00000000e+00, 0.00000000e+00, -1.19000000e+00], + [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]), + 'CAM_FRONT_LEFT':np.array([[ 6.03961325e-14, 1.39475744e+03, 0.00000000e+00, -9.20539908e+02], + [-3.68618420e+02, 2.58109396e+02, -1.14251841e+03, -6.47296750e+02], + [-8.19152044e-01, 5.73576436e-01, 0.00000000e+00, -8.29094072e-01], + [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]), + 'CAM_FRONT_RIGHT':np.array([[ 1.31064327e+03, -4.77035138e+02, 0.00000000e+00,-4.06010608e+02], + [ 3.68618420e+02, 2.58109396e+02, -1.14251841e+03,-6.47296750e+02], + [ 8.19152044e-01, 5.73576436e-01, 0.00000000e+00,-8.29094072e-01], + [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]), + 'CAM_BACK':np.array([[-1.00000000e+00, -1.22464680e-16, 0.00000000e+00, -1.97168135e-16], + [ 0.00000000e+00, 0.00000000e+00, -1.00000000e+00, -2.40000000e-01], + [ 1.22464680e-16, -1.00000000e+00, 0.00000000e+00, -1.61000000e+00], + [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]), + 'CAM_BACK_LEFT':np.array([[-1.14251841e+03, 8.00000000e+02, 0.00000000e+00, -6.84385123e+02], + [-4.22861679e+02, -1.53909064e+02, -1.14251841e+03, -4.96004706e+02], + [-9.39692621e-01, -3.42020143e-01, 0.00000000e+00, -4.92889531e-01], + [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]), + + 'CAM_BACK_RIGHT': np.array([[ 3.60989788e+02, -1.34723223e+03, 0.00000000e+00, -1.04238127e+02], + [ 4.22861679e+02, -1.53909064e+02, -1.14251841e+03, -4.96004706e+02], + [ 9.39692621e-01, -3.42020143e-01, 0.00000000e+00, -4.92889531e-01], + [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]) + } + self.lidar2cam = { + 'CAM_FRONT':np.array([[ 1. , 0. , 0. , 0. ], + [ 0. , 0. , -1. , -0.24], + [ 0. , 1. , 0. , -1.19], + [ 0. , 0. , 0. , 1. ]]), + 'CAM_FRONT_LEFT':np.array([[ 0.57357644, 0.81915204, 0. , -0.22517331], + [ 0. , 0. , -1. , -0.24 ], + [-0.81915204, 0.57357644, 0. , -0.82909407], + [ 0. , 0. , 0. , 1. ]]), + 'CAM_FRONT_RIGHT':np.array([[ 0.57357644, -0.81915204, 0. , 0.22517331], + [ 0. , 0. , -1. , -0.24 ], + [ 0.81915204, 0.57357644, 0. , -0.82909407], + [ 0. , 0. , 0. , 1. ]]), + 'CAM_BACK':np.array([[-1. , 0., 0., 0. ], + [ 0. , 0., -1., -0.24], + [ 0. , -1., 0., -1.61], + [ 0. , 0., 0., 1. ]]), + + 'CAM_BACK_LEFT':np.array([[-0.34202014, 0.93969262, 0. , -0.25388956], + [ 0. , 0. , -1. , -0.24 ], + [-0.93969262, -0.34202014, 0. , -0.49288953], + [ 0. , 0. , 0. , 1. ]]), + + 'CAM_BACK_RIGHT':np.array([[-0.34202014, -0.93969262, 0. , 0.25388956], + [ 0. , 0. , -1. , -0.24 ], + [ 0.93969262, -0.34202014 , 0. , -0.49288953], + [ 0. , 0. , 0. , 1. ]]) + } + self.lidar2ego = np.array([[ 0. , 1. , 0. , -0.39], + [-1. , 0. , 0. , 0. ], + [ 0. , 0. , 1. , 1.84], + [ 0. , 0. , 0. , 1. ]]) + + topdown_extrinsics = np.array([[0.0, -0.0, -1.0, 50.0], [0.0, 1.0, -0.0, 0.0], [1.0, -0.0, 0.0, -0.0], [0.0, 0.0, 0.0, 1.0]]) + unreal2cam = np.array([[0,1,0,0], [0,0,-1,0], [1,0,0,0], [0,0,0,1]]) + self.coor2topdown = unreal2cam @ topdown_extrinsics + topdown_intrinsics = np.array([[548.993771650447, 0.0, 256.0, 0], [0.0, 548.993771650447, 256.0, 0], [0.0, 0.0, 1.0, 0], [0, 0, 0, 1.0]]) + self.coor2topdown = topdown_intrinsics @ self.coor2topdown + + def _init(self): + + try: + locx, locy = self._global_plan_world_coord[0][0].location.x, self._global_plan_world_coord[0][0].location.y + lon, lat = self._global_plan[0][0]['lon'], self._global_plan[0][0]['lat'] + EARTH_RADIUS_EQUA = 6378137.0 + def equations(vars): + x, y = vars + eq1 = lon * math.cos(x * math.pi / 180) - (locx * x * 180) / (math.pi * EARTH_RADIUS_EQUA) - math.cos(x * math.pi / 180) * y + eq2 = math.log(math.tan((lat + 90) * math.pi / 360)) * EARTH_RADIUS_EQUA * math.cos(x * math.pi / 180) + locy - math.cos(x * math.pi / 180) * EARTH_RADIUS_EQUA * math.log(math.tan((90 + x) * math.pi / 360)) + return [eq1, eq2] + initial_guess = [0, 0] + solution = fsolve(equations, initial_guess) + self.lat_ref, self.lon_ref = solution[0], solution[1] + except Exception as e: + print(e, flush=True) + self.lat_ref, self.lon_ref = 0, 0 + self._route_planner = RoutePlanner(4.0, 50.0, lat_ref=self.lat_ref, lon_ref=self.lon_ref) + self._route_planner.set_route(self._global_plan, True) + self.initialized = True + + + + def sensors(self): + sensors =[ + # camera rgb + { + 'type': 'sensor.camera.rgb', + 'x': 0.80, 'y': 0.0, 'z': 1.60, + 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, + 'width': 1600, 'height': 900, 'fov': 70, + 'id': 'CAM_FRONT' + }, + { + 'type': 'sensor.camera.rgb', + 'x': 0.27, 'y': -0.55, 'z': 1.60, + 'roll': 0.0, 'pitch': 0.0, 'yaw': -55.0, + 'width': 1600, 'height': 900, 'fov': 70, + 'id': 'CAM_FRONT_LEFT' + }, + { + 'type': 'sensor.camera.rgb', + 'x': 0.27, 'y': 0.55, 'z': 1.60, + 'roll': 0.0, 'pitch': 0.0, 'yaw': 55.0, + 'width': 1600, 'height': 900, 'fov': 70, + 'id': 'CAM_FRONT_RIGHT' + }, + { + 'type': 'sensor.camera.rgb', + 'x': -2.0, 'y': 0.0, 'z': 1.60, + 'roll': 0.0, 'pitch': 0.0, 'yaw': 180.0, + 'width': 1600, 'height': 900, 'fov': 110, + 'id': 'CAM_BACK' + }, + { + 'type': 'sensor.camera.rgb', + 'x': -0.32, 'y': -0.55, 'z': 1.60, + 'roll': 0.0, 'pitch': 0.0, 'yaw': -110.0, + 'width': 1600, 'height': 900, 'fov': 70, + 'id': 'CAM_BACK_LEFT' + }, + { + 'type': 'sensor.camera.rgb', + 'x': -0.32, 'y': 0.55, 'z': 1.60, + 'roll': 0.0, 'pitch': 0.0, 'yaw': 110.0, + 'width': 1600, 'height': 900, 'fov': 70, + 'id': 'CAM_BACK_RIGHT' + }, + # imu + { + 'type': 'sensor.other.imu', + 'x': -1.4, 'y': 0.0, 'z': 0.0, + 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, + 'sensor_tick': 0.05, + 'id': 'IMU' + }, + # gps + { + 'type': 'sensor.other.gnss', + 'x': -1.4, 'y': 0.0, 'z': 0.0, + 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, + 'sensor_tick': 0.01, + 'id': 'GPS' + }, + # speed + { + 'type': 'sensor.speedometer', + 'reading_frequency': 20, + 'id': 'SPEED' + }, + + ] + + if IS_BENCH2DRIVE: + sensors += [ + { + 'type': 'sensor.camera.rgb', + 'x': 0.0, 'y': 0.0, 'z': 50.0, + 'roll': 0.0, 'pitch': -90.0, 'yaw': 0.0, + 'width': 512, 'height': 512, 'fov': 5 * 10.0, + 'id': 'bev' + }] + return sensors + + def tick(self, input_data): + self.step += 1 + encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 20] + imgs = {} + for cam in ['CAM_FRONT','CAM_FRONT_LEFT','CAM_FRONT_RIGHT','CAM_BACK','CAM_BACK_LEFT','CAM_BACK_RIGHT']: + img = cv2.cvtColor(input_data[cam][1][:, :, :3], cv2.COLOR_BGR2RGB) + _, img = cv2.imencode('.jpg', img, encode_param) + img = cv2.imdecode(img, cv2.IMREAD_COLOR) + imgs[cam] = img + bev = cv2.cvtColor(input_data['bev'][1][:, :, :3], cv2.COLOR_BGR2RGB) + gps = input_data['GPS'][1][:2] + speed = input_data['SPEED'][1]['speed'] + compass = input_data['IMU'][1][-1] + acceleration = input_data['IMU'][1][:3] + angular_velocity = input_data['IMU'][1][3:6] + pos = self.gps_to_location(gps) + near_node, near_command = self._route_planner.run_step(pos) + if (math.isnan(compass) == True): #It can happen that the compass sends nan for a few frames + compass = 0.0 + acceleration = np.zeros(3) + angular_velocity = np.zeros(3) + + result = { + 'imgs': imgs, + 'gps': gps, + 'pos':pos, + 'speed': speed, + 'compass': compass, + 'bev': bev, + 'acceleration':acceleration, + 'angular_velocity':angular_velocity, + 'command_near':near_command, + 'command_near_xy':near_node + + } + + return result + + @torch.no_grad() + def run_step(self, input_data, timestamp): + if not self.initialized: + self._init() + tick_data = self.tick(input_data) + results = {} + results['lidar2img'] = [] + results['lidar2cam'] = [] + results['img'] = [] + results['folder'] = ' ' + results['scene_token'] = ' ' + results['frame_idx'] = 0 + results['timestamp'] = self.step / 20 + results['box_type_3d'], _ = get_box_type('LiDAR') + + for cam in ['CAM_FRONT','CAM_FRONT_LEFT','CAM_FRONT_RIGHT','CAM_BACK','CAM_BACK_LEFT','CAM_BACK_RIGHT']: + results['lidar2img'].append(self.lidar2img[cam]) + results['lidar2cam'].append(self.lidar2cam[cam]) + results['img'].append(tick_data['imgs'][cam]) + results['lidar2img'] = np.stack(results['lidar2img'],axis=0) + results['lidar2cam'] = np.stack(results['lidar2cam'],axis=0) + + raw_theta = tick_data['compass'] if not np.isnan(tick_data['compass']) else 0 + ego_theta = -raw_theta + np.pi/2 + rotation = list(Quaternion(axis=[0, 0, 1], radians=ego_theta)) + + can_bus = np.zeros(18) + can_bus[0] = tick_data['pos'][0] + can_bus[1] = -tick_data['pos'][1] + can_bus[3:7] = rotation + can_bus[7] = tick_data['speed'] + can_bus[10:13] = tick_data['acceleration'] + can_bus[11] *= -1 + can_bus[13:16] = -tick_data['angular_velocity'] + can_bus[16] = ego_theta + can_bus[17] = ego_theta / np.pi * 180 + results['can_bus'] = can_bus + command = tick_data['command_near'] + if command < 0: + command = 4 + command -= 1 + results['command'] = command + + theta_to_lidar = raw_theta + command_near_xy = np.array([tick_data['command_near_xy'][0]-can_bus[0],-tick_data['command_near_xy'][1]-can_bus[1]]) + rotation_matrix = np.array([[np.cos(theta_to_lidar),-np.sin(theta_to_lidar)],[np.sin(theta_to_lidar),np.cos(theta_to_lidar)]]) + local_command_xy = rotation_matrix @ command_near_xy + + ego2world = np.eye(4) + ego2world[0:3,0:3] = Quaternion(axis=[0, 0, 1], radians=ego_theta).rotation_matrix + ego2world[0:3,0:2] = can_bus[0:2] + lidar2global = ego2world @ self.lidar2ego + results['l2g_r_mat'] = lidar2global[0:3,0:3] + results['l2g_t'] = lidar2global[0:3,3] + stacked_imgs = np.stack(results['img'],axis=-1) + results['img_shape'] = stacked_imgs.shape + results['ori_shape'] = stacked_imgs.shape + results['pad_shape'] = stacked_imgs.shape + results = self.inference_only_pipeline(results) + self.device="cuda" + input_data_batch = mm_collate_to_batch_form([results], samples_per_gpu=1) + for key, data in input_data_batch.items(): + if key != 'img_metas': + if torch.is_tensor(data[0]): + data[0] = data[0].to(self.device) + output_data_batch = self.model(input_data_batch, return_loss=False, rescale=True) + out_truck = output_data_batch[0]['planning']['result_planning']['sdc_traj'][0].cpu().numpy() + steer_traj, throttle_traj, brake_traj, metadata_traj = self.pidcontroller.control_pid(out_truck, tick_data['speed'], local_command_xy) + if brake_traj < 0.05: brake_traj = 0.0 + if throttle_traj > brake_traj: brake_traj = 0.0 + if tick_data['speed']>5: + throttle_traj = 0 + control = carla.VehicleControl() + self.pid_metadata = metadata_traj + self.pid_metadata['agent'] = 'only_traj' + control.steer = np.clip(float(steer_traj), -1, 1) + control.throttle = np.clip(float(throttle_traj), 0, 0.75) + control.brake = np.clip(float(brake_traj), 0, 1) + self.pid_metadata['steer'] = control.steer + self.pid_metadata['throttle'] = control.throttle + self.pid_metadata['brake'] = control.brake + self.pid_metadata['steer_traj'] = float(steer_traj) + self.pid_metadata['throttle_traj'] = float(throttle_traj) + self.pid_metadata['brake_traj'] = float(brake_traj) + self.pid_metadata['plan'] = out_truck.tolist() + if SAVE_PATH is not None and self.step % 10 == 0: + self.save(tick_data) + self.prev_control = control + return control + + def save(self, tick_data): + frame = self.step // 10 + Image.fromarray(tick_data['imgs']['CAM_FRONT']).save(self.save_path / 'rgb_front' / ('%04d.png' % frame)) + Image.fromarray(tick_data['imgs']['CAM_FRONT_LEFT']).save(self.save_path / 'rgb_front_left' / ('%04d.png' % frame)) + Image.fromarray(tick_data['imgs']['CAM_FRONT_RIGHT']).save(self.save_path / 'rgb_front_right' / ('%04d.png' % frame)) + Image.fromarray(tick_data['imgs']['CAM_BACK']).save(self.save_path / 'rgb_back' / ('%04d.png' % frame)) + Image.fromarray(tick_data['imgs']['CAM_BACK_LEFT']).save(self.save_path / 'rgb_back_left' / ('%04d.png' % frame)) + Image.fromarray(tick_data['imgs']['CAM_BACK_RIGHT']).save(self.save_path / 'rgb_back_right' / ('%04d.png' % frame)) + Image.fromarray(tick_data['bev']).save(self.save_path / 'bev' / ('%04d.png' % frame)) + outfile = open(self.save_path / 'meta' / ('%04d.json' % frame), 'w') + json.dump(self.pid_metadata, outfile, indent=4) + outfile.close() + + def destroy(self): + del self.model + torch.cuda.empty_cache() + + def gps_to_location(self, gps): + EARTH_RADIUS_EQUA = 6378137.0 + # gps content: numpy array: [lat, lon, alt] + lat, lon = gps + scale = math.cos(self.lat_ref * math.pi / 180.0) + my = math.log(math.tan((lat+90) * math.pi / 360.0)) * (EARTH_RADIUS_EQUA * scale) + mx = (lon * (math.pi * EARTH_RADIUS_EQUA * scale)) / 180.0 + y = scale * EARTH_RADIUS_EQUA * math.log(math.tan((90.0 + self.lat_ref) * math.pi / 360.0)) - my + x = mx - scale * self.lon_ref * math.pi * EARTH_RADIUS_EQUA / 180.0 + return np.array([x, y]) \ No newline at end of file diff --git a/team_code/vad_b2d_agent.py b/team_code/vad_b2d_agent.py new file mode 100644 index 0000000..13c09b7 --- /dev/null +++ b/team_code/vad_b2d_agent.py @@ -0,0 +1,460 @@ +import os +import json +import datetime +import pathlib +import time +import cv2 +import carla +from collections import deque +import math +from collections import OrderedDict +from scipy.optimize import fsolve +import torch +import carla +import numpy as np +from PIL import Image +from torchvision import transforms as T +from Bench2DriveZoo.team_code.pid_controller import PIDController +from Bench2DriveZoo.team_code.planner import RoutePlanner +from leaderboard.autoagents import autonomous_agent +from mmcv import Config +from mmcv.models import build_model +from mmcv.utils import (get_dist_info, init_dist, load_checkpoint, + wrap_fp16_model) +from mmcv.datasets.pipelines import Compose +from mmcv.parallel.collate import collate as mm_collate_to_batch_form +from mmcv.core.bbox import get_box_type +from pyquaternion import Quaternion + +SAVE_PATH = os.environ.get('SAVE_PATH', None) +IS_BENCH2DRIVE = os.environ.get('IS_BENCH2DRIVE', None) + + +def get_entry_point(): + return 'VadAgent' + + +class VadAgent(autonomous_agent.AutonomousAgent): + def setup(self, path_to_conf_file): + self.track = autonomous_agent.Track.SENSORS + self.steer_step = 0 + self.last_moving_status = 0 + self.last_moving_step = -1 + self.last_steer = 0 + self.pidcontroller = PIDController() + if IS_BENCH2DRIVE: + self.save_name = path_to_conf_file.split('+')[-1] + self.config_path = path_to_conf_file.split('+')[0] + else: + self.config_path = path_to_conf_file + self.save_name = '_'.join(map(lambda x: '%02d' % x, (now.month, now.day, now.hour, now.minute, now.second))) + self.step = -1 + self.wall_start = time.time() + self.initialized = False + cfg = Config.fromfile('Bench2DriveZoo/adzoo/vad/configs/VAD/VAD_base_e2e_b2d.py') + if hasattr(cfg, 'plugin'): + if cfg.plugin: + import importlib + if hasattr(cfg, 'plugin_dir'): + plugin_dir = cfg.plugin_dir + plugin_dir = os.path.join("Bench2DriveZoo", plugin_dir) + _module_dir = os.path.dirname(plugin_dir) + _module_dir = _module_dir.split('/') + _module_path = _module_dir[0] + for m in _module_dir[1:]: + _module_path = _module_path + '.' + m + print(_module_path) + plg_lib = importlib.import_module(_module_path) + + self.model = build_model(cfg.model, train_cfg=cfg.get('train_cfg'), test_cfg=cfg.get('test_cfg')) + checkpoint = load_checkpoint(self.model, self.config_path, map_location='cpu', strict=True) + self.model.cuda() + self.model.eval() + self.inference_only_pipeline = [] + for inference_only_pipeline in cfg.inference_only_pipeline: + if inference_only_pipeline["type"] not in ['LoadMultiViewImageFromFilesInCeph','LoadMultiViewImageFromFiles']: + self.inference_only_pipeline.append(inference_only_pipeline) + + self.inference_only_pipeline = Compose(self.inference_only_pipeline) + ckpt = torch.load(self.config_path) + ckpt = ckpt["state_dict"] + new_state_dict = OrderedDict() + for key, value in ckpt.items(): + new_key = key.replace("model.","") + new_state_dict[new_key] = value + + self.takeover = False + self.stop_time = 0 + self.takeover_time = 0 + self.save_path = None + self._im_transform = T.Compose([T.ToTensor(), T.Normalize(mean=[0.485,0.456,0.406], std=[0.229,0.224,0.225])]) + self.lat_ref, self.lon_ref = 42.0, 2.0 + + control = carla.VehicleControl() + control.steer = 0.0 + control.throttle = 0.0 + control.brake = 0.0 + self.prev_control = control + self.prev_control_cache = [] + if SAVE_PATH is not None: + now = datetime.datetime.now() + string = pathlib.Path(os.environ['ROUTES']).stem + '_' + string += self.save_name + self.save_path = pathlib.Path(os.environ['SAVE_PATH']) / string + self.save_path.mkdir(parents=True, exist_ok=False) + (self.save_path / 'rgb_front').mkdir() + (self.save_path / 'rgb_front_right').mkdir() + (self.save_path / 'rgb_front_left').mkdir() + (self.save_path / 'rgb_back').mkdir() + (self.save_path / 'rgb_back_right').mkdir() + (self.save_path / 'rgb_back_left').mkdir() + (self.save_path / 'meta').mkdir() + (self.save_path / 'bev').mkdir() + + self.lidar2img = { + 'CAM_FRONT':np.array([[ 1.14251841e+03, 8.00000000e+02, 0.00000000e+00, -9.52000000e+02], + [ 0.00000000e+00, 4.50000000e+02, -1.14251841e+03, -8.09704417e+02], + [ 0.00000000e+00, 1.00000000e+00, 0.00000000e+00, -1.19000000e+00], + [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]), + 'CAM_FRONT_LEFT':np.array([[ 6.03961325e-14, 1.39475744e+03, 0.00000000e+00, -9.20539908e+02], + [-3.68618420e+02, 2.58109396e+02, -1.14251841e+03, -6.47296750e+02], + [-8.19152044e-01, 5.73576436e-01, 0.00000000e+00, -8.29094072e-01], + [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]), + 'CAM_FRONT_RIGHT':np.array([[ 1.31064327e+03, -4.77035138e+02, 0.00000000e+00,-4.06010608e+02], + [ 3.68618420e+02, 2.58109396e+02, -1.14251841e+03,-6.47296750e+02], + [ 8.19152044e-01, 5.73576436e-01, 0.00000000e+00,-8.29094072e-01], + [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]), + 'CAM_BACK':np.array([[-1.00000000e+00, -1.22464680e-16, 0.00000000e+00, -1.97168135e-16], + [ 0.00000000e+00, 0.00000000e+00, -1.00000000e+00, -2.40000000e-01], + [ 1.22464680e-16, -1.00000000e+00, 0.00000000e+00, -1.61000000e+00], + [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]), + 'CAM_BACK_LEFT':np.array([[-1.14251841e+03, 8.00000000e+02, 0.00000000e+00, -6.84385123e+02], + [-4.22861679e+02, -1.53909064e+02, -1.14251841e+03, -4.96004706e+02], + [-9.39692621e-01, -3.42020143e-01, 0.00000000e+00, -4.92889531e-01], + [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]), + + 'CAM_BACK_RIGHT': np.array([[ 3.60989788e+02, -1.34723223e+03, 0.00000000e+00, -1.04238127e+02], + [ 4.22861679e+02, -1.53909064e+02, -1.14251841e+03, -4.96004706e+02], + [ 9.39692621e-01, -3.42020143e-01, 0.00000000e+00, -4.92889531e-01], + [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]) + } + self.lidar2cam = { + 'CAM_FRONT':np.array([[ 1. , 0. , 0. , 0. ], + [ 0. , 0. , -1. , -0.24], + [ 0. , 1. , 0. , -1.19], + [ 0. , 0. , 0. , 1. ]]), + 'CAM_FRONT_LEFT':np.array([[ 0.57357644, 0.81915204, 0. , -0.22517331], + [ 0. , 0. , -1. , -0.24 ], + [-0.81915204, 0.57357644, 0. , -0.82909407], + [ 0. , 0. , 0. , 1. ]]), + 'CAM_FRONT_RIGHT':np.array([[ 0.57357644, -0.81915204, 0. , 0.22517331], + [ 0. , 0. , -1. , -0.24 ], + [ 0.81915204, 0.57357644, 0. , -0.82909407], + [ 0. , 0. , 0. , 1. ]]), + 'CAM_BACK':np.array([[-1. , 0., 0., 0. ], + [ 0. , 0., -1., -0.24], + [ 0. , -1., 0., -1.61], + [ 0. , 0., 0., 1. ]]), + + 'CAM_BACK_LEFT':np.array([[-0.34202014, 0.93969262, 0. , -0.25388956], + [ 0. , 0. , -1. , -0.24 ], + [-0.93969262, -0.34202014, 0. , -0.49288953], + [ 0. , 0. , 0. , 1. ]]), + + 'CAM_BACK_RIGHT':np.array([[-0.34202014, -0.93969262, 0. , 0.25388956], + [ 0. , 0. , -1. , -0.24 ], + [ 0.93969262, -0.34202014 , 0. , -0.49288953], + [ 0. , 0. , 0. , 1. ]]) + } + self.lidar2ego = np.array([[ 0. , 1. , 0. , -0.39], + [-1. , 0. , 0. , 0. ], + [ 0. , 0. , 1. , 1.84], + [ 0. , 0. , 0. , 1. ]]) + + topdown_extrinsics = np.array([[0.0, -0.0, -1.0, 50.0], [0.0, 1.0, -0.0, 0.0], [1.0, -0.0, 0.0, -0.0], [0.0, 0.0, 0.0, 1.0]]) + unreal2cam = np.array([[0,1,0,0], [0,0,-1,0], [1,0,0,0], [0,0,0,1]]) + self.coor2topdown = unreal2cam @ topdown_extrinsics + topdown_intrinsics = np.array([[548.993771650447, 0.0, 256.0, 0], [0.0, 548.993771650447, 256.0, 0], [0.0, 0.0, 1.0, 0], [0, 0, 0, 1.0]]) + self.coor2topdown = topdown_intrinsics @ self.coor2topdown + + def _init(self): + try: + locx, locy = self._global_plan_world_coord[0][0].location.x, self._global_plan_world_coord[0][0].location.y + lon, lat = self._global_plan[0][0]['lon'], self._global_plan[0][0]['lat'] + EARTH_RADIUS_EQUA = 6378137.0 + def equations(vars): + x, y = vars + eq1 = lon * math.cos(x * math.pi / 180) - (locx * x * 180) / (math.pi * EARTH_RADIUS_EQUA) - math.cos(x * math.pi / 180) * y + eq2 = math.log(math.tan((lat + 90) * math.pi / 360)) * EARTH_RADIUS_EQUA * math.cos(x * math.pi / 180) + locy - math.cos(x * math.pi / 180) * EARTH_RADIUS_EQUA * math.log(math.tan((90 + x) * math.pi / 360)) + return [eq1, eq2] + initial_guess = [0, 0] + solution = fsolve(equations, initial_guess) + self.lat_ref, self.lon_ref = solution[0], solution[1] + except Exception as e: + print(e, flush=True) + self.lat_ref, self.lon_ref = 0, 0 + self._route_planner = RoutePlanner(4.0, 50.0, lat_ref=self.lat_ref, lon_ref=self.lon_ref) + self._route_planner.set_route(self._global_plan, True) + self.initialized = True + + + + def sensors(self): + sensors =[ + # camera rgb + { + 'type': 'sensor.camera.rgb', + 'x': 0.80, 'y': 0.0, 'z': 1.60, + 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, + 'width': 1600, 'height': 900, 'fov': 70, + 'id': 'CAM_FRONT' + }, + { + 'type': 'sensor.camera.rgb', + 'x': 0.27, 'y': -0.55, 'z': 1.60, + 'roll': 0.0, 'pitch': 0.0, 'yaw': -55.0, + 'width': 1600, 'height': 900, 'fov': 70, + 'id': 'CAM_FRONT_LEFT' + }, + { + 'type': 'sensor.camera.rgb', + 'x': 0.27, 'y': 0.55, 'z': 1.60, + 'roll': 0.0, 'pitch': 0.0, 'yaw': 55.0, + 'width': 1600, 'height': 900, 'fov': 70, + 'id': 'CAM_FRONT_RIGHT' + }, + { + 'type': 'sensor.camera.rgb', + 'x': -2.0, 'y': 0.0, 'z': 1.60, + 'roll': 0.0, 'pitch': 0.0, 'yaw': 180.0, + 'width': 1600, 'height': 900, 'fov': 110, + 'id': 'CAM_BACK' + }, + { + 'type': 'sensor.camera.rgb', + 'x': -0.32, 'y': -0.55, 'z': 1.60, + 'roll': 0.0, 'pitch': 0.0, 'yaw': -110.0, + 'width': 1600, 'height': 900, 'fov': 70, + 'id': 'CAM_BACK_LEFT' + }, + { + 'type': 'sensor.camera.rgb', + 'x': -0.32, 'y': 0.55, 'z': 1.60, + 'roll': 0.0, 'pitch': 0.0, 'yaw': 110.0, + 'width': 1600, 'height': 900, 'fov': 70, + 'id': 'CAM_BACK_RIGHT' + }, + # imu + { + 'type': 'sensor.other.imu', + 'x': -1.4, 'y': 0.0, 'z': 0.0, + 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, + 'sensor_tick': 0.05, + 'id': 'IMU' + }, + # gps + { + 'type': 'sensor.other.gnss', + 'x': -1.4, 'y': 0.0, 'z': 0.0, + 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, + 'sensor_tick': 0.01, + 'id': 'GPS' + }, + # speed + { + 'type': 'sensor.speedometer', + 'reading_frequency': 20, + 'id': 'SPEED' + }, + ] + if IS_BENCH2DRIVE: + sensors += [ + { + 'type': 'sensor.camera.rgb', + 'x': 0.0, 'y': 0.0, 'z': 50.0, + 'roll': 0.0, 'pitch': -90.0, 'yaw': 0.0, + 'width': 512, 'height': 512, 'fov': 5 * 10.0, + 'id': 'bev' + }] + return sensors + + def tick(self, input_data): + self.step += 1 + encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 20] + imgs = {} + for cam in ['CAM_FRONT','CAM_FRONT_LEFT','CAM_FRONT_RIGHT','CAM_BACK','CAM_BACK_LEFT','CAM_BACK_RIGHT']: + img = cv2.cvtColor(input_data[cam][1][:, :, :3], cv2.COLOR_BGR2RGB) + _, img = cv2.imencode('.jpg', img, encode_param) + img = cv2.imdecode(img, cv2.IMREAD_COLOR) + imgs[cam] = img + bev = cv2.cvtColor(input_data['bev'][1][:, :, :3], cv2.COLOR_BGR2RGB) + gps = input_data['GPS'][1][:2] + speed = input_data['SPEED'][1]['speed'] + compass = input_data['IMU'][1][-1] + acceleration = input_data['IMU'][1][:3] + angular_velocity = input_data['IMU'][1][3:6] + + pos = self.gps_to_location(gps) + near_node, near_command = self._route_planner.run_step(pos) + + if (math.isnan(compass) == True): #It can happen that the compass sends nan for a few frames + compass = 0.0 + acceleration = np.zeros(3) + angular_velocity = np.zeros(3) + + result = { + 'imgs': imgs, + 'gps': gps, + 'pos':pos, + 'speed': speed, + 'compass': compass, + 'bev': bev, + 'acceleration':acceleration, + 'angular_velocity':angular_velocity, + 'command_near':near_command, + 'command_near_xy':near_node + } + + return result + + @torch.no_grad() + def run_step(self, input_data, timestamp): + if not self.initialized: + self._init() + tick_data = self.tick(input_data) + results = {} + results['lidar2img'] = [] + results['lidar2cam'] = [] + results['img'] = [] + results['folder'] = ' ' + results['scene_token'] = ' ' + results['frame_idx'] = 0 + results['timestamp'] = self.step / 20 + results['box_type_3d'], _ = get_box_type('LiDAR') + + for cam in ['CAM_FRONT','CAM_FRONT_LEFT','CAM_FRONT_RIGHT','CAM_BACK','CAM_BACK_LEFT','CAM_BACK_RIGHT']: + results['lidar2img'].append(self.lidar2img[cam]) + results['lidar2cam'].append(self.lidar2cam[cam]) + results['img'].append(tick_data['imgs'][cam]) + results['lidar2img'] = np.stack(results['lidar2img'],axis=0) + results['lidar2cam'] = np.stack(results['lidar2cam'],axis=0) + raw_theta = tick_data['compass'] if not np.isnan(tick_data['compass']) else 0 + ego_theta = -raw_theta + np.pi/2 + rotation = list(Quaternion(axis=[0, 0, 1], radians=ego_theta)) + can_bus = np.zeros(18) + can_bus[0] = tick_data['pos'][0] + can_bus[1] = -tick_data['pos'][1] + can_bus[3:7] = rotation + can_bus[7] = tick_data['speed'] + can_bus[10:13] = tick_data['acceleration'] + can_bus[11] *= -1 + can_bus[13:16] = -tick_data['angular_velocity'] + can_bus[16] = ego_theta + can_bus[17] = ego_theta / np.pi * 180 + results['can_bus'] = can_bus + ego_lcf_feat = np.zeros(9) + ego_lcf_feat[0:2] = can_bus[0:2].copy() + ego_lcf_feat[2:4] = can_bus[10:12].copy() + ego_lcf_feat[4] = rotation[-1] + ego_lcf_feat[5] = 4.89238167 + ego_lcf_feat[6] = 1.83671331 + ego_lcf_feat[7] = np.sqrt(can_bus[0]**2+can_bus[1]**2) + + if len(self.prev_control_cache)<10: + ego_lcf_feat[8] = 0 + else: + ego_lcf_feat[8] = self.prev_control_cache[0].steer + + command = tick_data['command_near'] + if command < 0: + command = 4 + command -= 1 + results['command'] = command + command_onehot = np.zeros(6) + command_onehot[command] = 1 + results['ego_fut_cmd'] = command_onehot + theta_to_lidar = raw_theta + command_near_xy = np.array([tick_data['command_near_xy'][0]-can_bus[0],-tick_data['command_near_xy'][1]-can_bus[1]]) + rotation_matrix = np.array([[np.cos(theta_to_lidar),-np.sin(theta_to_lidar)],[np.sin(theta_to_lidar),np.cos(theta_to_lidar)]]) + local_command_xy = rotation_matrix @ command_near_xy + + ego2world = np.eye(4) + ego2world[0:3,0:3] = Quaternion(axis=[0, 0, 1], radians=ego_theta).rotation_matrix + ego2world[0:2,3] = can_bus[0:2] + lidar2global = ego2world @ self.lidar2ego + results['l2g_r_mat'] = lidar2global[0:3,0:3] + results['l2g_t'] = lidar2global[0:3,3] + stacked_imgs = np.stack(results['img'],axis=-1) + results['img_shape'] = stacked_imgs.shape + results['ori_shape'] = stacked_imgs.shape + results['pad_shape'] = stacked_imgs.shape + results = self.inference_only_pipeline(results) + self.device="cuda" + input_data_batch = mm_collate_to_batch_form([results], samples_per_gpu=1) + for key, data in input_data_batch.items(): + if key != 'img_metas': + if torch.is_tensor(data[0]): + data[0] = data[0].to(self.device) + output_data_batch = self.model(input_data_batch, return_loss=False, rescale=True) + all_out_truck_d1 = output_data_batch[0]['pts_bbox']['ego_fut_preds'].cpu().numpy() + all_out_truck = np.cumsum(all_out_truck_d1,axis=1) + out_truck = all_out_truck[command] + steer_traj, throttle_traj, brake_traj, metadata_traj = self.pidcontroller.control_pid(out_truck, tick_data['speed'], local_command_xy) + if brake_traj < 0.05: brake_traj = 0.0 + if throttle_traj > brake_traj: brake_traj = 0.0 + + control = carla.VehicleControl() + self.pid_metadata = metadata_traj + self.pid_metadata['agent'] = 'only_traj' + control.steer = np.clip(float(steer_traj), -1, 1) + control.throttle = np.clip(float(throttle_traj), 0, 0.75) + control.brake = np.clip(float(brake_traj), 0, 1) + self.pid_metadata['steer'] = control.steer + self.pid_metadata['throttle'] = control.throttle + self.pid_metadata['brake'] = control.brake + self.pid_metadata['steer_traj'] = float(steer_traj) + self.pid_metadata['throttle_traj'] = float(throttle_traj) + self.pid_metadata['brake_traj'] = float(brake_traj) + self.pid_metadata['plan'] = out_truck.tolist() + self.pid_metadata['command'] = command + self.pid_metadata['all_plan'] = all_out_truck.tolist() + + if SAVE_PATH is not None and self.step % 10 == 0: + self.save(tick_data) + self.prev_control = control + + if len(self.prev_control_cache)==10: + self.prev_control_cache.pop(0) + self.prev_control_cache.append(control) + return control + + + def save(self, tick_data): + frame = self.step // 10 + + Image.fromarray(tick_data['imgs']['CAM_FRONT']).save(self.save_path / 'rgb_front' / ('%04d.png' % frame)) + Image.fromarray(tick_data['imgs']['CAM_FRONT_LEFT']).save(self.save_path / 'rgb_front_left' / ('%04d.png' % frame)) + Image.fromarray(tick_data['imgs']['CAM_FRONT_RIGHT']).save(self.save_path / 'rgb_front_right' / ('%04d.png' % frame)) + Image.fromarray(tick_data['imgs']['CAM_BACK']).save(self.save_path / 'rgb_back' / ('%04d.png' % frame)) + Image.fromarray(tick_data['imgs']['CAM_BACK_LEFT']).save(self.save_path / 'rgb_back_left' / ('%04d.png' % frame)) + Image.fromarray(tick_data['imgs']['CAM_BACK_RIGHT']).save(self.save_path / 'rgb_back_right' / ('%04d.png' % frame)) + Image.fromarray(tick_data['bev']).save(self.save_path / 'bev' / ('%04d.png' % frame)) + + outfile = open(self.save_path / 'meta' / ('%04d.json' % frame), 'w') + json.dump(self.pid_metadata, outfile, indent=4) + outfile.close() + + def destroy(self): + del self.model + torch.cuda.empty_cache() + + def gps_to_location(self, gps): + EARTH_RADIUS_EQUA = 6378137.0 + # gps content: numpy array: [lat, lon, alt] + lat, lon = gps + scale = math.cos(self.lat_ref * math.pi / 180.0) + my = math.log(math.tan((lat+90) * math.pi / 360.0)) * (EARTH_RADIUS_EQUA * scale) + mx = (lon * (math.pi * EARTH_RADIUS_EQUA * scale)) / 180.0 + y = scale * EARTH_RADIUS_EQUA * math.log(math.tan((90.0 + self.lat_ref) * math.pi / 360.0)) - my + x = mx - scale * self.lon_ref * math.pi * EARTH_RADIUS_EQUA / 180.0 + return np.array([x, y]) \ No newline at end of file

W_T=sS_EXs(;?dFc_NmL-nr! zYfZ=>z2$g&CTpO1VM#WJ(?DeNkqf>GHmyJ8RGIzQR{aO6m+Mp7;r8y1cp^E9Sl09G zyR#`y^ni?nuq3p)e{2&&*Lv1ITg)h{WD|G;7Wo_61z(xRc zwbgFK$@Nv)#%0kxQ%ZcH&$9)<2ut0HEtUo8H>eo$T+AR7W-dtUmJv@sij5y?8+PTk2!?iIe>% z4SrNMYw<)ptUjfbQ^ZKe-52i0!kpwWcq|V0o`~k9PsB4$_u{+1>NU~Wc{pY_AB)bx zCFy`)zl7VI&HO=dG~3Y{MBYN2lEB0E1!sCyoF@<57p<)wdHdxM|0s!{I_5K z!}wP}_O@7E??iW>&2LI2y;pY0Gf$-ElM}S3<<%vx1GYMikB$wZyW5LypVK#r zjiPx<-X44C9vlYxcp4Zzh%fvRI1J+_L0}w;KR^%jnZS7ki3<=u@(Ctk2^pTg0=JIF zC#&fw2l^1J*B?lOPpx?Qf1J8LuQY6M*xT#M-)Y26H#Op|zxFfIzKyu&WABcBcT@a9 zFJ&WTfAHVgBb{ruvYkSYOnCbrHBc%QEBuHZeKt+{BY9Q8uF>w%b;g@XFGRu@fI6i; zGY&;@Ma~$YYI`~UpLUa<@k&0B2gv-wE4#sc^agM2?gJLrPokwcbKHm&-CjGP8>e{i}~!Vj9(1M1q8~@ zjvC14gr|o2u`~(~h^0K<03ZdFE3aSAaXoc5qXUi-TTp|Sftz2CN1hnOhu-&O{F@(t ztMbjob3XT_@gIKTr{dflcRA0Ij5WLXVGQL^+(%LiCVYc_U96|s{_>uB>Vgfj{m3%Md?@;Cy&|Fb zJ!Qj7E~qF?*}#G>jCnGp7s)rJM@Nwj<>002$|dP2I;OOFq^azRk`cy~2_>K>q-DcM zqDi{*G?D*($W01hEHKvBLcH}8dU#IS{1d&E$L3?<8+r}D95>cIf$(N}WJfxFng1G4 zd4y-X(__)|`c@>9iRlC;|DAqHeig_Mo=H2I)Y0LpkL6n1ANXtoio6A5`ljkS4ih}} zz7B2^M2~1?M?GQ1yyQK+MD7A6#;Q(`hh7mNod*`+gO9>xny2D@RR?W;SNy`@XUk9d z3O^O<__1sN+DwtlI86GJ^dvZ3<%=uhbfn~JKB*o);#Ya>3*cC*AG3q6aCWNWn`Aq} z4re|F*U}VSDGwPTKf8H;3q+l-CvaFqXR(8w%FBz*SZ=jqpt2r+@Iu`8sgK8}?tL`= z%YXYhuf9L?uG^HR<%yBq-R&}&0WIRP`&b-JX@mwq=o1_3pwojJ`VIkk>4$4R+6S_8 zTUQ$+J zPL7=O8^woxA1}&VHv1zc%J@brx{0`&8l(<2$Ze@yS!$(OT{|9UG|0G_3E?Lnj<0#v z^Wp`!osEy&^9c=Z7UJ_?^U8?R*TscV7McEDz7iQCjX1?=5t)WceKlAIvVuF%aMFk2Bfn- zmbak+@qA}7ESz1nV3PwBGjOB0kHscATj zi8%Ym%*1Jc#72OTd^?VhJJNt~e{WBmRh;U|h6N@zvh}L&OiUlBjfg+~rl$ZO1aNzU z^(dt$arI2AWz)~oE73Vw01}_5>r8QWi~Ly30=&?o=!xxO!c3Q% zUXW!@$~Qj*$%_Q)mT!<|8Y`NBMIG|mw3Ej9vDr$a{MH_Yx!$^BW1LcYZ2Dz4J+PBMAz z^8H40>O)~>>cun)$Lp^y=vL*0ac~tc$%RYoI5J;DgTk$IC69h+Y~m5;+|VZR{^2Lm zH==V^N|lSK${X~KXNOghqwTHj%!EG6F3~zo#$~*175y)Cj?31w+7R)HlX#UQmSP9I zp+xD?G2>QNuLX(1dd#ZJrrk(SV1qaIh%9<;$Ducsxx$g<=*PGOTBxVeTfWp+dAO%M zJ;}x>QoqcXGSr=-7`x{}6yZz~`mJ&RpGfK8CFYlUFsYHWGA7Ty20gQ`n@_o+3x5@r z;hWdS;Wz#$PM(PAQd<$J)$f4*&jgSSZvJ4BD(xat(xq;ytkZ#q@~hthP2#og(fxX) zD|NEGJsuYS)qX5f>Lg&HlEtU|gox5<%nv{8k1HKCXWDFM>7OeNB#A%fMZq-<>BFgb z{b;go`LuD!Qroj!oUqcOXUJ4Oj*hlm!9h%+*NZ^Kzi?WG*{Xv}^*8+jp6kg%PKbY| z@{NOMKcNGvd$rw?N9Lz2dPLxWCwl{bE_<+u!( zA|T(4{bqQ<6=Y%-FsFVa@2&yztERdWy{4zgElVp0hl(d7kr`eGrWOwvQCYs&@=+hYBcjwAl@tMGgK@(cH+pL5>yCqB`^^{JU&7Osl!=pyQ0&RVNvDC$s&t&Ve)a4Lx-`b6YOD zrmIinR6O!THhDetg2sVtK03Oxu;>Z%OBXN1Ll4{=v-^Bgc_ZHU?svxGqS6hz@%(2$ zJHGJcuZWFHmwo(nyT#`c#BV`&*j0Tu<9@#Lzu%4Lyz-9tiof!WanxFfNALSYeC_N0 zYOM3-cVpg@^G9=sv9i4C_PesOqUU+F=lwXgzUGCZ)2C0zIecZstt=k%i3^XPeZ-}SV3)hoU*)>aqd!B5>6 zPd@gD>1EsjukwvIn1W2X&&|x71e-ie8Pu^R_LQp^D|L0lYa;(q+z3S(P^PptmDd`|wwkw=QO~4S>3^;DztRoBb?6 zv&cs|;GullDBqH1p`N!%SyZKhMbnEpLS3dg8>1SW^32JHGC=!6_T~ zVCa$oHAH2zs{czsbuk5WyL5R2)xn!Vi*0~L+i1nZC-k5CCSQ|x)Vu0#9Z&8voDepI_HEw0Wi;WV!-d^lz+)7%?U_%bPG(Y#%kBayEBACz7 zLKtP!lNe;L;OR*6qg-T-K2rz^-yW6~e$KhzIE~(H_K=k?<{!WJr zTvx{0%J1ng`moYZ`QE}zU1t&3bcm<;72a?PZu~T4DVcGd_J`^IGLQ5apWgQw2Fc9n z^KvDpEn8Q_tg7$mRQe$~_Lx(emfK9?@M(XN8Jot)&zJ(TITD}ze!;Vmyz^VyqK)*t zflgayT?ZTB6E2#xCu~6Sk+}r=arcXYX?jqEyb6cHc{APg2{+?%>^lBW>H3Y6len;K z*a?4XPvyfy)}MI0pGE40PM-GIR9dDOIqn-*0##$9*1pE`{Vpf9Y)Nh!?(gl!l`9+Z z!B0y zhedB}8S>yBdxdSb&n%+m@Eh5&%DjT&S&a3%nEqk^;TbPM$Vxb$ik9TC*Qu{F%-h3b z=`rmU-_M^)5MJ9h*)C2X;hXx$Wy|0Vz5?x@{G@T+WE*x>zrF3Ab%+;321&vjekz-OiD7X^a-M5T*XBF+3FhXOb-x~uJboq4 zpWlkV{)Qh4-f>VS^M>!`<7@G~d{h&E>QMg0@?Fo3=YPZBjz{NKVn<9oi}DjxE|d@@E&nu5KsNyTjSGnqyo21RKZB#GBvpr}58z z_{}~6pf_N>Ge5f_t%J$duY-f0HxK3pfOBrMoZ^j6vH@Uc3m>5`k%(`+Ab{PO2K*`v zZ}G!z46cEazKoC1#eB|jmK!Xc*7UWeF--8G8itaVzRNEL);u%|c=4I?L#NWw#@r4# zfg?D5A~yi=8Ie0~X~xaBw&N|o{2vu_H9r2aKZ{;}!!LU=2}ipG8>fJ>p`H;$o&b>k z5IRp4W)9TybdOFeG|Mk^Opk{E^dtM4l;gDDlUHc~_DixOEgOAmpP4^!usMK8bdUb2 zj3t$qeCb1Fy42OO0YKEfz!Tl@Qwd0j!js5_D?A$~lqUOm&L7B?rxeD$kwcI&*08Jq z22ILIIQ+oj#@J%8>4;?b=NtJg*~UB$nSM>2xvV4`h}h+c(^xkFU?{v(&Iq%dI%!X% zsn6s$!#n|;Oz=Y`tEe92&ii-FYJ`KN#EEE8s@b&<*73a-0g97ZRtFh7fC47evj zm6<-A9Vmd`#AR?K;{f!STo$b(8P$aaoXBHw?*kig;n7z7$WQ#71`)&fim&+k__3e< z1*eCOLk%40KyWmvL%x}za`1a4r-m9hFrJ8^;mqyJ;opkh&Xe)zBlpX3JsmgQ@+{SG zvH`$O6CEZ;pa@cVsT0DcLBr5=RBY~)XK!amMq|Sr2l6H_$5J)Q2>@wP;St6lc?O8H z}P~B~?s6z>>GON$mKn)|!)IA2pjT$`A zkPUDAoJykm!t@{VnyDn|-WS}H9^Hn~HfXoo8nJsU5~ z6Or`zxlV*AKW!F+p6PVYrB3J+ei27n^AIQwaGikh!1^;lO`o`@pHnX5XLW=LSphq= zf|_p{(BWAn2ro}el8Oy6GvWBE^k<*#tdEz5;>hU(}S7#r*YslI6k5~`t1yTWs( zk$m+uoeuDknDD^z0DtlWIXG>l7oDhC0ntjld0{BVoP40G`91@^d86{JcLdP(N_j+? z9^DJkHDTx=_XL4f@s%H5=uw&3ZzzE}J!}A&nJ{-f=qU9JE_QSRbzqJ?w#-~MwCSD* z<(ueOruDP#3uz?Pz0#Sm48-OqKZq|03uHKLrIhEu;ZPj@RFImQDDQ{Ze)NnH@`3diU+VKOlbINylAY%4Ld{(Ib;mA9gGjt+%)j>U^5>=egT_<`dtLg%G;;ZUyy;I(V%$Ga<)m9}t z94X_MKD(aAA)5RwS*n8Bp4sUvA4L8~_Gb+bfY@2hM97@{l-1>xICbK9=?6z~;^bt6Sl9Qywos0tGyYEvx659ssNpEDW4qR9~b+sBVM5Pcem z>I&e)>&!%$4iO@-qtGO$NS#ed7AMi)TzR#)`J57+i< z4pUvc5QObQAKd_YK6Oc>=a`Yt#$?xFzZ(a8eYqwB|KKS{=+3v~`yN}2SU(xDd@P0! zJ{*@n{{A@H>BWq6X<7Ps*wX-C^;lF|24)OIJqdHbgh3L_w|x0F;(=)B4Mwpk{k{Lm z%kh%G^kuR9IiC|6>BV8ErF^Qh{6~aDKa}lBToV28$CEvW=zrG7xQksUjLn+~#7VyJ zPCVdq2W)H@NM=L*W(LD(z<23=@?c-j!UHdKkA-700$}PQ8J+zW_RF$DIvQNn_FI88 zv_EVG{T6i5u5Cq-$H77-w7js0MQ&^~ zr|7kA=@~o7y~jGVU+7SpI$@@)#SfFEko_@T!bMvko_b&lYTG0XE?9C(Z&`t#a3hj_ zD=lq;Hd_07ctw--$ykync9AgN{7ZR&CuQQ(KvaVBOFp7V>1LzBW|>x=lH+oG&GyK? z3bzLzyQ4VXR2Hu|VF27>l+$}-l4z~XOt# zvzqvWZlI&!qrXr4F4_tj>V++3l9okj*H>_Kkb0`vw6&s>=1E=62gXav>kY!#SlKVX z*{^hz!CPbOwPV2kQMiz|db9(Xvx3=Ujsvl>eg!wq$ z3>`2T+}(?n_2ua8Y{t%&EAh_X`^|XEJANhZTs#w}kFCcGpY^PG)}6P;<;Nb4#~yqj zMti%lz*|&}cAPkUA~x0UeE1WO#i-qk)e|S;+0T1kTz|{$GI@>I9dcyzzE6zW$1aI? z`08rpzGu;qdRX`L6Zca7(nE_B`ATP*^ULreAoN4PCV?s^zoCrX6~(};~rPsY6u-5Wc5SL6Nny*u9clRu$x#bNxlZ~gW- zFaFLF9?;>|bT6wedyZolJcap%+Sisj|yIC=feqA+=Y-Q`rM3R_n$ z$Hm8zxvB@ z>B;kP_T)+1E51YD<-5;r@03B?#~xs(B_9a_`e?_g$6}Y{#|uc<_j$$7&7~aSsW-oo zR^jwLEYkRRW1-G??h`)?>1@ClLAC0Ku3~3NGgAM>oH2R$*0qoFSGj~UR=@}I8|7+0 zGxLN==fx)4>q2```Zp7c;xWH-*WDD?Y5a5Y#Id;khMVI0>#vWMHMOVZ!p|~)Foska zLF&qkHoT{9WVWX4!%Tm<1Vg=mB^yBKji!dufGMIS^`mUq0 z0S|oDW_r#|g_9h8b3X&1=ml8BYP4IbLw+iQjfGhc*%}Ga8!ZJ@H~D$)vdmM6Du=;m zfOP0?^-IP8Pbd~48j>H_8}s$3>$=&d3!gBRGD|%P8|M{ZtDr}E1Ko$R;jkFlXTPr+ zD4aCRt$1^=?iqJs?VZLtC;Km5*q43xn3Y9D0lcOSkp~^iHb+e1gJ`CXPDh)WEgMwn zv;pN#$-y9@o6m-;})r}(JgT3%f8Z(*?={uGYX zQeA7bL>hbS7-=+xU%i1#gJM|@P>V*jJN>S7=i<^8);_v%`QoM6-P(+c7ca)8i%-Pn zmCd-iamkB}z3y&oY;MNp=C*Q+H!)e+TGc+o!#B-+V$c;=V?wb(g%OKeDQ24dQbUnIf? zp&eof*QsXfHmy2f7v~Pt=g(=})_XFR)c$8LeKMMtJ{)iS8($rFEDz$sz3+>anGMmX zepurMpIoW_>QJ`w5dD?y=0zmtiD+wXtE`FA;ADSCGcENM;&ylqXzK#@G?E+b|0S3QsI6%kx-t@s~G}m2sy?nj4^pOn!Bl!c}*xuYxztLBn@~v~* zIs{bKYiI!prPBCW^yLL6>oTRC8^|-oq-jw4Yhi*bK{6qU*Z+h^4vE{e%E+8)BhtB{ z{N$5C>5!4rYrJ-({*seUx_kZT_4ne=TRL&aot^mA|N4_LIM|F&e3A_SSEJKbKDAAB z6rI9{7-7SOEl;iCgevMeE4z+O5>3`=XwR~f*bk7duLw%y4&uaotb&TBqE7qr@!yZGaYJ8QwMX&&?JQ3 zz~dO*W;#{M&(p;4LxHk0U|>b%Fla35b7Ty*Xc;TfC zp~Cmea&)cC0Jv5fz`IfpRe^d-T)v$S)nIoRmf*}q-*KC z5fq)TJ5RWZapL)aWBKXKahT=Y$}qC~8b@Y$z#BknsLdtiXvPDd+K9_nm*PMF;(u2I zZpZh3@0;Q^U;T~7ufja>O6M`Bo(pG&@4C_;>{zQl9zY4w0f%=q9Gtzb6ni)Bf8b-W zeR(5Z@&&I}`c-ubyt&4JHXW*2@~a#>`>LPiE`wl0pl8aaF6adlMRa_!-u@O1VVnTa zYN|&ye?%WNW5fhc<>9BnlrgymKEnn8|0Vr2P7J!A#S4{d@Uwo`JWqkc!ApI)FY!iG zp1lE}+?&QJ16OIr7#4l-2?P!+6HRm!#n|!eH$TeT{6Jl(9Ru~?LRyTyMhV6vI%=5Z zG2LGMS4q(6F&~8!S+N0t?D|vl)i{Ism@+jSs-qhlbHP&%*J#HYMD+`>mwM4BP-Q~9JFeCtkf4ntmkWKSAKUgN-sY0 zjQ(RU>H;X~N$YE7HGhFmqqE3S)~;MDedRhuuWg8F5>)w#R?4UYh03Eb-kQJIAAqFP znH=aW$-3_*=`)K}6ESf)6?~KALr0 zf6AYf4L#UEXsR?CO1(TeXAmWh%LKeZisR>YRAd3v{Iwsec4wBIgN6yer|Fo$RJ{b? z%VwMMz#+(Z_@aCDTRfYuMRtx?`|-VkEIW9v=ZRgQO;QK-1(J2e>4^r?D9rRLAKATO zhx$?u&y_Fe2^$5V1sqjx1hxIT{F$7P_dP&4TtrotUF@`eh!9y*cb>8L>cSi@U&y&! zl<6wvsVwl5#`Z^qnsz0$E-GG8B=k?BF8Q-Rwi8cU(?}ttR@%bvx&feq!=?gwX2=S- z-xWZs+ojuc_KDzyHtVW%q_(5HGT36kD!)K9D+_DBBs^g=NKN1(OUWv`^t=QVU8dtAgjz=DQG`d?m zap}?}Zwz2UfFty_x3`6l3Ay<=dweC%o;nrZ{jFaUf8n)%AwKZazZ}2#BmW_;Yc0kx z`A4UZUvC=EJo9Ps?63UVi0jV8AN=8O#uxpAzwgC=zPIo7lsWn!7PmFL6OCMXL)l&v zOml6-0pyEg2uL5r0qZ?&QjAl)8OVd=2~T~OvMqgZ>QZUb3iskbD!b$(K1SCQ2`r2s z4LJUP-*3n9hJ46(JJKNB-@=nl$oYx+_4PP9b6w2HM?2b<-?g(D8=v|_Y~BAvXtW$F z(j%r84>TxWZc)e7#aZkDG-yDEUBnOG;P^oecxO8c@yNzTY$)wBzT`ErbO)aUSh0Q| zdg2$`EFX@BOP#5Qu%!Q%Z-%H6g%hbKOym)7pAP$Fh8CSs&*%QVJOyIfCCNJfV7yMvNPEuE}dKCSswgFlIYn$|r zNzri_MowTo(S5R7_ft5{ThT>Q+C-z%md`Jn!rPeguX%&hgCKY=+oFhEs8<-cSlAa5 z{ZM=%!F*va*f<40iL>o3KhwsK%IHe4!<1L}j%Sl`YR3et{^qNB0+jM=z5KV}BO`jKe%SSL|55s7pz?)GHLL00jc3kD=w_EVP=)3_sU86-d3MUJ!<(bjU^}%BIgzn`Dv_szw}5%6JufsUK~L za3&u~=i_ySk8yFGoX%(2Evg|cg}(YJ4C-rbQ5vhqSe_(PKS8?@zu6GMGd?>i&j|u7 zeqaj8L)m^)P<|EdlO|UE+w}MI8_tXuIQ?L5wl93x5T04w5+cU>l&|#WsqWM7fv@VX zMUzj|5EkA7mllb%q__H~l#9xY^+Nc^eR#oZkpAM=cz!|nv2DB&#CL^VPyL{_@&ny9 z&SaAQtDQ?W))nOG;Ia_PgUWPWM1RtiG+;9rQ!l|G8COu=`AnQ=F zM6Gzs%t!vK4xHRFtG@qeb~T2gV~+3ruCK+3lP6>8`0;2hbNuU~>7sekyZh}u`H8eG z7HYEXs1PffbXR{#gTKsMiM&0|#5w+4cd#2<@>fP7-?pLhRlf!49P?)v9(o|&fA@Rh zkKXZ)Xlwj+<8{a5bMCk$UhwSaMqfVSmB$~At*aaE#t!8xG?y1+b^T;qluvN)X#vu}P?yuI-FWAWoZ{^RlP_kBPyhrw$>d?y<9uDZ@ATyD7TTs%iI z`PrZPkMYQ(kHs(k!q50k*A?m7et$1^gsa!bzN=)}X@91{GJ(fwv$3%NN45jZAtDPF zsAyyJ4QR%iVB4{WY#zw4l$+!ue$}xI<16BjJHF&xqhlGnNTub(BQ{p1UZ<^OOoMV0 z)?Qrl!c=qVSgb6s$EoX1#kq6mVtwsYtgWy5p2ehD{IF{v^`-)!9R&fE zwqtX5+jLN;ZhsiN=o2S*>~-CjGJh-yrA$;PeVrH2=i4z7Z$>knGOmalEI4T!6QF-WS7n9e?*iime{UqCoWyItR#*2S?mLQq zr8I;yR{&5>1;U>S#M?I#5AvCE^;8ytu(#O{7klOQlJ%xu@LCc2U(2Hol4aq!`z^(L zK1JbKZ>3e4Q14AliYJcw3*8tWew$y^B){&7H(uFEvYQtF3O@aKuIWYx91EUx0AQm( zKO{LJgQ^E?G$1`=ZXa2WevpG^V_p1hLAL1`UkOLN z*z=~+vr!Ftyzxhru>OEfvJlP*9_XR%iP|jV8vHMxGA;SC2oEq8GcCeNUia!s!x|L} z;7#5*m(68(4K7{`!G?<3SXdH1u>+pscFHuWE*c*1>RxRbp7&)xx3;$9(#4I~+}`kp zx{a&(a>mZimN(t8QD8&k`r+O{esjKI&*w;_-qBP0b{kaxOZj~JpUpdBvFahY$W|~f z1<%#}I=}_0*h51q3e0(WZ8@y1k zXZ@m1(h$jta-?G;>ds3Phd%y)aiJL}Pp-%EGIM3T)h}9_;`u^5T1#!ogZV(!RZJ4O zG29n6sXMac^9G$o8lYU5Dy zoxlFhxa*7ma*S?#R_rb<#E#-@GnM5){vXR~@WChq=pyYA9}b!t;wB$bnccq>OS1n* zmp&1P=ieVc@cJ)~7vA2EM?d-}v2>)k1+@XSlYON{_95LpJ9k60 z#n+K^$}bK{ADGAE9=ReT72dPI)CA>4P@ z0(F%{q`Gdqws{1WNDl!jOw%LC@qyro)14#_W$UX=@ky&Y?`X%pV7IinHT{ft2x9 zW!SMrha}q~Lz0~UN6PZ87aL%OQ4aZCNeP8XU^9^NRh}6jqsW!YR^}1rHvzra0$lC{=ua(d^mQ21|$Yx6U$lHjESB7!n;(YwhAACrIhS~VlH@`;&Wamk_`IZNh z9jcp%Ji>ugkw}erb!R+SS)q&0iHXzxXiuHlC@!6UJl^xCe;BX$!Y__v>vxHyT^R;8 z0LbVGX9d$MfJftrC*69oT6zE<4o3aBdi6>SWIQ>>0wib&^%b8w4_xryMZX`gx zCd|y_Clk`J2#RKl0eSs2P?@T`GM2}u(on+303PZ)mCnYb$kD((>GU%{NSh5WA=9RQ zlziqBl0gqM2pH=N_tqDI7ultcICI2Hzl=-Cj+iJ@7`urwzBv1yaM4A`lMICkA5OlF z3=X&l7`Qgjlr&?=F-?_b+C#lJFu_=8w@f~b7t%Wve+-p>Tm_z>oO+(!^Tl+EP|$M! z)Xw13K;@3p6`qdy1wdnYCSC3T&#;DO|Y9m2VRKVBGQ;Z*guVJ3~hIQWTfPdGq7 z#uWn$ny1j~Nmhkjs{={{O$8@Bbh$va8OrB+DSci`x+K2PRMVnAWuWST9XE6+dbA9$ zg^H)v1vFz_DrF^9qDmQxtM!=FCt-H3ya6T1bmCvtFM-RH^H zVd(MLmX7N);RQe%p0jLSg~pSqMIJTnbRDQ)4VSg|bplC;7k1c8P;gf|xdMKhlJ=&u z(zF-)(M#8_)`#?YKQ14=LDzW3NuZAWoJ{Rc2$NzeimQ?bIdFKbf5Jv_+1{8lNz+kDdk5}cVQ(BMm`w{yCPB4%*X;Xa^?JEO#M5q9xMJ6QYO$s4|-`m|?0Eq?mPe=P2J z(TkKSWeL>@Pi;S{z~NSW?1LYU|NiD*kDvRGKNXjE`?0-y7|*%wx;S-uJ$~-T|8;!i zkKYyl`tSZwEJz1mcIsF(4u*01XfaxPUby~L>@Tdv7yaM=E@qzpY*kH6K59p|zopVz z){l+`eDb{IXD|RN@>bm}FYyr__hbQeCr+T2X`N??S@j6Ls7cJGN%92uy|n4%A?=#> z?RJD+o9U*j!JtY$ju)%gQ9bB;0|1``nA4yio{!L%<)vsIKOHm2mY4xlMKl0;1ut=iQf^r4--xGKGP z@mGIUbZ);jW>!|^M0Z4E)|H8W`?cbYbp(DLvH^fi?ScuI^tIJyB+JyF zqWcs}nYLHbNs5zvX$*p>*+1Bq5#79*;=U4l=n9s)Sl-z-;On92_Lu=Z zHgRc>XZkY= zj8aVbXV^lepXNj6R~^Yu-BTeZfd2$8-%ruzb|f%e`teM>{Xu|=3Lo^MfCa|=zVyr! zcDBbn7r7oTT2E>?=`+0I0H>a1%g~>(T)C1){oj~Z1at3SrgK|2z(3lCuBCpGlDhAt zKYf9cw$SGMjJ2FR<9M6bV_x$++vQ{uK&eVtmIoAp(hiIQyzrsaqtNfu9>C3s9>UF( zWd)}*fS}0MAlj6+`Y}$Y{;<+M2A8LH4lk-4D@cCUF9Wtw`OOD$r|{0W)plHgDnDJl zIR|tO6HQ!YD=aT|dNC>7SD0+x!BBpt?CYp*5Rjea&6UyaAV&L&V=_j0_m%E&pT&~& zP5jK}1L1@o-YB)M6f*p+R?$WCqyHT7#*x|;Y244MYCOAn3bNlN9yzHFvEy{uRs(&e zo;g8@#TGWI!OQ9o;w#wRi5BKLypgTouNx5n@Vk7<^L?hP_^@B-(Clp2c|1u;yyDPH zfP3pksUJM9?F5@g`pE{c1h*yO#YVGWpJhT%;;H$QHl-06xp0taxVpa5@#*prufxHo z{NQH;1#>v~i_UAz6;IZs-Y4G`hkfBnn^8P-8p$H%R@=$8LD{8Tr_Hj>Gx6Q&OBx8v zE8t4FZkACAx2%mjX-G&x&CaW@SNurvBXrJo42IP?B>EHOgWqBuKBwuRtw={mgSul9 zya~YNh__0CN*!pd$;A9Vd`8By69>vO(DUN@@i=wru4t^T#`1|%v2yHqG?$lMH;&mK z3NgL`i~7oJxiY~&cf`q(ED#*TeqVJ~IDYNE`h(prN17f*qtgoB%sh5%-Oto zhhwL^6K75wk7Fy#>f_bdZ(oi-_=DeyUw+%q#~tk(V@$&zSXO;IWl!dKFhAkdgRh>9LmSJ>$%T~Q@7j_okjU8@>jR^y7Ki#F<|jheSM?V z$^Nj|M*3V@Ud~CS)+A4XA{UKss-FW`-nvcyMn65HadXPYo912^RT$bir!jTnPPte8 zOC3T74jH+qEr;J`+}_$y`|rh2{?~;k9*?*E&Tqs+yAQ@)t9QlIZ+m)t)oZ^pt~+%@ zeEh>7i%)*yfw<-7o1@drIk6)?n{_x4U9vZl5gRRbdY9t^_q`|H^QjNAy)-yA9y58v zjn~COXEpA;>z4TGpZ(dGTRN%G0vdm~PZ1l~gg`$?+nQTgkE@SA8N0o1-22G~Kxzm5AR?|OH{(n7rZZND4u{nK|xcW*bk zyL)n9(0M-h+4sT~`G$vmFFcR-*%&oxYn~`q`@jx!B`n)n&%Dl!uE;d!QwbxUD|VR; z&g3HuTT3_#PSbX>hBft@Z8c(b<(R)iee(3FIC1JsoH=t{+Ne%oC-JMLo`&)RNFM+*;G9Pqrm`yBC9 z`T$MnI6f+$4mpek+mkUn+;@v45sEybEd1lc`l_->;dw-$G&^*ce&6k|v=je`RGb$< z^Z>h&7jf7nPMU*m+i#^KLF!HB=M!1(i%R=GD)yCD3g2eta|5yaV%43x*UQ+ zSmdXBkKbtPx*keb4+qIB91;7S>5!O?+ASm=*F+DLFIwTZ%OsKL#!5O3aS700X<(t7 zKA%+z;7~k3ujl5}e~`^Qnxt25r>oDk{7GMxSCT55K>U($dGQ2efp1YsPWvT|RQaWJ z|FFkiNXHLDKa_#?NqY23J(qMX`D?vV8%0ybc6$01)5(0aDr9@A^t5}z2R+7b z+~cP)&J*qUYYsnDKcsTx?^XYz)){-vXKtG<)_{u62|T_M|Juz3>l5X0^{1mXroy*x z$p&=V?Xn=98w(m5b2XK(qbtvRH=l)kk?)gjym?Z#aG$f*i@gDILmzf2^^l|Vu{SZ( zGabA=&H5E(Ep--Tdse)uYq_)NQx6swRQ|kLDf5c1hOR=TR%m=xSY0^+`!Xi%m**{Q zKEbnj<#O!G9`NayOP9{uHebGcIUafBk$B?#qvqAV#@hQ`Ufh6hx|iITbAT`QYgHHS ztvBNP3}ZhZyT2C;B4|N&p0~dj)CV)J_67*iJCrk0EpYE@o7pgmi2uxSPO1i*oGxxw7T>?r2@S7TY@jmFOXvG?G+ z<2%3ndGVKCaeLhN{@;uC@bNf)Y(Y9Tu+Qc8&!){;Hd&~h%6H^rPk!MCyP=-OK9Ab{ z%-n+XRV{UXH9D&&<4^wh6Y*_-|7T)xN#c?|Vng{PBQH}N%8|qmVIvQx>d=3lJ##J= zmRoKMoa(?v6gB{$Ka|C174Y3{R}S)pQ1tm)`N66F6=*jWvXDtS;c=b=4QKGlD#P zSGU1LnC_W~X3mVGKFEimWmgeS3P(5ay($=4aZ-ozP6^+OX)*xh!9aO~0?(qV(Wdbt zD3t|XHLJ{<@s%g!K_S6u#1aU~4L1x2JYMqz+V3*+*c39K5%E?R$+ zj)9&Of4~kLjr>LjbH2a2aS#tZI*dQ~(~rfAKlh8{JHP9PRUo)PY7ba$`DM?Um!4KkMF*`N1v=3T_Ej?(3Vgc6wtK8gtp z;$iBF(K`*D4~h+4+nbx-$;OVnks1~q!Rp$2CKen@S4u`*GVHU$*Z|b4vQoEMb~$r% zZAU65=JlySnV%KNOTL<>;^3Jwi08^};o&$fpl3UX6*7Oyw?e^%+yJGYt~zNy8!u|y zG`@<4MH26<^gv2Kmy>NUS;8~zN%Y~&NS2OcBm&IfAaVn4>5V$5^h|*)#CSO3kgb~z zP+oAEh%|uS@?4&iANu*2t`bM9a(wEITps-}h_xM+GOv+$DGw-gPxx2UC0yGHC77Q| zpZp_@^pqg@q5QOa8)Jp}n?H*8!Wr?(V4mZclxMeP<@3O$IIKx0#Xy{Ik~Z24S;s)V zsY9mACIB(np-j_$%>-0Q@3do}j2ysN-b$yG`8=j;0>&Y_YMopSg;g;Z6J;Q5@(l0L zYiKJxs2~UUTAmjt9a@yG(p@3-638p@kZPUPeW|kVZAZ#|?I>9=!Uh?)!g%|u7UBHG z4rJL$7uRWCPG|*++{UzuKayd}cAEA~#{?v}5+}I$Wxm7>2!FK?=m5HBeN7w&^oM%c zX;Z#hapTdw(qe-+Pt*6p#o zH}nARXkY$I3*SVvF?h3XWqsYZiX8hCD#weBqMf%f*m&r{^;|9h&2p5L>PB6ur{9vN zzEeEJ53(#Yh-0AD>t2o3r4#X`ul>UK_&uM9n@^u{97lcivoc)w%)Y`pN++v3g}Z;Xfj;E(-Q{NmbL?5K`szw%4tx!?bNa%xmZ z4LVp88foCdgfJ>Z7v(p|Ri?{DFULkpd~n+oz)yig#9t>V?P#nh1#0dwxE-S#sSBuQ zwejZrT%EX3#89J0^}k|T9P##*2F@c5$`9rHFgRqqe5k_bS5~6UB;?|<^jHHo`F63} zkH_A3ck~{5BpQ3Wu_{?LCH$7#g>+GJrGKB{$YP?>W~Sd27C7d;Eh%dsb0@{tD~ ziGyRy@x0f5SuEUqOB|g!=_O3MIdDHf7zmYliiuRI| zQQ#tdDb7F`i#ARVA6!1|4?CiG`Pufhc_UI`OsINd)Poh_63=HeU}q9F7igtJOdv@I z__i^Ahv|~6b)%nnYu{XVN&|g9?oZL!ML&x`HX>EtXvk^$s@-%2kXr%EQ1x~>;)MbE zQZWQS{sa^GlLq53zY0$H2$(qq$JoCpaTQ9ts&X@!hSEkPM9Wd*r%WTA(*;eyzUBg7 zmf;0@Wae~PUiAZ<2D}5X;7me(wz6T%26^RuXzSk`T^^4wqM3S=s$K|5sIQ;OZs^R zT+c#>5RIQ1$ur{hVwn3IrFUN_kZxKorFbS#4YadzPhR**et&zJtYzHgu{3oBrvtKF zw^bf>^<)URpp~EX($SOpLV&|*7s#Q|M>wadaK)C;H}a}83kqr{-juYjwjtZg<}Aja zOki{J04!&Ml!-4EA^gS~8-Y}h{(ipE29GmM-~AeWr%x&IRs7&K!0UmV@2h@TMg?d~ zC5g+ZSq;(^6kP!RdLIKCQ#e{5xz~dpX~7WgODo+mBB@^l)$r*9SlN!T3kt|Bszl z4Dm>Pc4j{o+hwdEhO;O}-bSktYs<%CYiBz?`sW{s`i1eD|6~GI)#(;64l2$PXL9mAT@X=%<~NeMbf?8X;%uL>~q{ z-bm^GEWC(=3C6Z{mKI}WaV1uc9g7x6RO@=?%$Zm_ej-*^m!qTSnVE7zR0ZWlTlj7> z-32EgvA8wDE}=WJ>&S4QF`5{}M$Nfd{7dm+zU97zlRvNz zEMjq`F1&m~ynPGt5Zi-1u_f@)Z+z;RIyt=93Hq%e{=e#Ny461t&(EKzBrSH@e86_8 z$0*Iqw$L_>Hbi;Vek(E2@RIhbA7%L}9$;!GHZaHVE{B?UUO~Eojwl?UJ=zd}!y(dI zl?^?NK@MlTleCmcUCg`0QRC>KeJ9yb8mVQm70eOpqQN|;RS>5uPHKTM4%bQLyT1@U z_60zx{E9+@yfK!mc`;pn>aXc@m6A_~v6FRIxceH1r~DI#VoO?Jf;Z*kjTbzR`>OkwN8+(t^a|kPbZX;7Ry2m$J*Tbdks~fG zCyQo#JBqe&LM#0YeF%LmFS^>+%5Q_Fj{0yGvhi=&M8Mnb*h$(r`oGjsTbJVSDFB9? z$P+&7?sUBYZD)Hgu3Whi7cN{1j{e`-+;Km&ySr!m%m$)e+3s$)=RWyB{F;?-+dP!N ztT8$LnKz(HzTTK7eO+V@M{>fZT>GYZ&UfKwA0DXvSq?1lvneCr{3k7XNWJ*13Y!B) zJ!FLLp`%B!ywr?C^?ir^-Izb@#`?;F!rJB)FQ_ymNc-QzU8NqzlKQFT#YUVwweAf7 z3?$hY!Z2icbs?G^`A1DQE@WF{UZ|d|1dkgXHh^G97YLA^uc@5%c;fPIeE6P6;_rUX zPdcrL)gLJhpXcU<;Tdcsx-Qxp>bpubqlNf8WFLop1Q*@ag~J4X5Vt zsRDYB(O_1-LH5<~2>m~G>Xh2Vg7uzHz;t)>1OUKtRXSRM?MC^888SupMVKzwpKR|H zGEG9-r1I>^02+*Vk|ARtm5n8Fq#+DE3Nt?aX4L+i4i5V!)A@O-%xSoWUgk}C;Yo79 zZ()P^)9+e}7r*$%_`m<{zliSs`FP-g52~+`jw%oS5_3JgD27$Q$M#kH_67jiPr3!F zPYcMtPwC~mQEK*+=tGXxSkI_*a-IHF$a`T^!vG&Y>@a;c_t*#Kr2!sEpJ|Xo?Jsiv zmmlR9U<1G%cS~3=sO4qhskC-b9YzKr1iOAIKvl9JW&z#fcq;CKM1T~{&$#>K(_JOr zcG3dCF@Y!z3>01sCho|*?inES%+5uQUg6~#Du5C8(J;z~(Iky`-RK!3Ouothisnct zzxl5R20!FUCm$n>1FEt`2k~qW7}H53*3~?*f@T^60}90xUOQ>hIm~sN@Cjhz(t~8x zOAq6?xfyorD81@H7(!KTL=$iSvv?9AUokFUmQ%goiI05ZLVWyztMMcM>OaTQv8Njc zZ`*@{dYR9~=%JcP7c{|ig)5!k(4i!PO{iDZi==*o)i(~Cj{h{c2KT3zb8Y^V;RJXVj2*-BT+p3T2 zm8Ljf+-DrLl(tm)PcUgKD34h#91bn~x86dF`AJzA##$cF#Rf>nE0B(OW%4tSGR|kq zr0{yI$F=?p9Q-W9EgC%8omEKSVXPZKEhF`&Lap~0zv?iZt2BMZPh<{&nmjIMnqJXC zA3F_RG!RfGi~i|GKl0bKqEl_K#{1d$ z6;D_et6PPQY44q zKlP6|IJWC0tfnK5XD)xVm{I`syFCp$sy?2|p;4desR6^8@~fEUHDH%dhIf;ZZl`11-X#zsVTt ziT_skNAKK8zM#&>+{x5axu z{3$QDozd@xPQ;5}_}nIXj%r%tZhM~RobI9~muKN|a^ zZZsC<{IxY)62FJ)fB42(Ybd{>C7Dov`6DyP=o)Ki?Quw9r<<8mAxrV%NCQ`^Pl-pg zpAK-IyhP?b;7OO#UxIH|117$iJR3rXypSgN^XkvBnJ!Io~V<2q2G=_J7-&MzZtNx z(~bK#E=O;5HNNPZzAff&y-}lSjt-_R%BLI*qP@VkFPnPS!1b_Q`V7&p>d+$69_AvI zZ{ZX4*@n%l4XZsW6T?9K)XXhilfJQ$)I*|?((>lvXrMxS9_aIl1djTXVwq3)EBhMY zQ!WL0LKt>R_Sj&TE5UJZyzrlInCq(6;`o`vRbLe$LY0PzBt5dB#`G&KZDWS+S4^mz zMW|QSGi{oIJ{r3v-m+WYZBcc!AHcv_JcHk}YVdwb-;4iD61YJ9kcAimPndiXKKFyz zF@D$_fV3DW1NJd_42=6sc#Z9ny~$(QVT^!wQLfof@mz5s19$|E62?7Y05YlJ7PbhT zmwZTLpIiV>z*#2p#-N5{zU!A=PT?CocOLYSi1Y)P9L+jS@)@u6%JL=*oTWPWVGb|! zIk~UUqKDG2`6MUua4Lw(Jvx#h0j(DRNe{o0mw?NKmulw#{;9LGP{(y{S7|c+m_`7< zkbL83r9mD_%c3P|-7l$8nGJ-KOku=LeD6{b;USF0gK2OP}!tTza` zK9tumZn83goLtB}v>(?&L7*}nC_5lwdyPcpc>=ka3+4F#ylqI4q<0@&=-{3(w^}gvZ^}c~Q($`~evEzSqA%m}Y446l@0l)5 z@J9F2&JJU5Z2wGeRrb<;*Z|<-bq~4FoNYwU@J@PzcZ;mZ%bUV6%e!5AMj!NREG$L) z_-eF|9gE|qPsXt`*G1>}y4uNt@+?P}MG?`%xAOTGFJ<)Udxc|Ed`kF8Jvh3TqtjV@ z@B+rcd<@m6!b3?^MYEA$7(2b)81^|5b?EQyo;i0mT8$Ym0PkMCqW-)W@BHoGj6eMS zH^-{zJ-fCVx1Ku}PrK!o7;KTZ7X!70#m=JpPLA`({}emo@e7yZYHt+JfB6^0GoJta z7|pg~Q_uTqN6^cg&;5OI0DY3YZ39>wDRRuW^>gymVU|BU1qe$?)&58olTJ%7oKO04 zq?)QdpqD=BO&vCVvQI~#Oo$Y3JSU*>9y-#TlP@OzKufEM?NPF$p}N%VxO(|=Jn`_S z;w^9e<+yj}p?L1>#-{xHXFTg^(OO)IZ~i~OCBEugzJ;pA zrH3CBPY3dU=e+PeQk}N1?nZAo@SB{3UATNHdUCfOdgy`pzuxq3;`jgf50o?cL|gC3 zDQYZ;Cu)Ovv~^qVC<`#+x$3yz+mFqSop|$Gej{G?MXyei>f8}mAAKS&o|nIR`KreX z`@{U!^NxH_KXY_5x-Ncu9$C7_VmNayd|oBrToga&Q$KUgcHTO|TfFm)j&+p9qxm*( zmA1vJ+&sZTMRRG@Z}(HrIbKq#fI55zU{T%kmnvglI>RT~XvcG{VKh{$o__d>^t)(N z?IQ9dRyOyr_^3SUw5ZESvft@xeAH`axO9ZYlC3@IgY=(8n@x@Hu5N9|mVEim?akPg z%vo&dOGYfLFCANqh0c;UFTjIlI~N}P>^D87Yr3Zm@EvvNO8tdj(pAZ9M(r6I-4`qG zc#cYW4fGWl9)azcbY01v4*Z8c0jyd!g{#fdq_9d^o@RWHUGaea(RQ3i zrT4Q^fZrfFCGF5Ol09687}M!;6rTCY_(DIP`5Eww;az#00CiQLta6m1@J{)tBeGE( z^M3WDQ07;NA06R;CbIh3NZ)f+TJn#dff0388G3Y|UHX;E{{!k}-m4x(_T+V4;6<^4 zq^CXjEp7n5P+m#P)qVn59F}xbJIB@Vxjg9uhWje{a;0yx98_l2Kc^Q?-2@xWdE>sV zF%_G?v8}{o&$)8Soj(g8AU|VOS!4GPs+B65zW1P?^p7_Hs2=2HO$0b#!>8<$X;EJ2 zal`gAbRl!K6~=D9@Tq~O+u~wp!E-ZwR-ifG@)7^oCjM1h)#}hsiDvnd$O3+PVZZW6 z^(7DYQpu4x@4FDIfZp7}^jz25Vl|AZiTv1=X5nFuD@v7|A)vGa5 zJ-k7HO#!MmHl6+KY%1lH7)oI+%b!0`UE*lZFOzVRf?uQ+9V7W3xQn=-9ocbCVStaG zw}AhOKvsQdxq4Zy_f9M{j^gx*wODG;MY~1aDN}rs4zTDt8ghxNyuE_Th z9gPl~#6(-0%?2%>T*NcJBI6YL8!w8hubWk$#wL9kebrBXZ}WI`mQKbaPi(~pKJs9E z??3&|<{=v@N6b$IoAf0k`B@&5>3NnY! z>?dgthV#)MbltAdTh>+SQ&(1&ETe(^F!W(!;*Jjh0RQw!L_t({doTNG_^6C7SM{nJ z0LE?GmJd)t1&I4!La{$qavuG63X0vF#8eutJ;4_Hk#uDGyq~}|&$f45Wm9GkqPw%} zO?pqieI;J?s%OTJ{HuQuJA3Ejp@%;#+3kwIvQx}MxNF6Zq7yPB`ojqTl*tJIbdShL z+9v%;I`OwC+2yNT{35?7C;7wIN-r|0G^TH>;?$T3t3Lo4RJ%%;eSw+={7#qiu~jxz zZh3|BnfK1T{`GgI2&{{i*Dlr{$6C(N1p#UQ}r`?#@-;Gav@}Ui-D8 zWyPJq+*SjH2lLbQ5umHo1DSd9K`@fTzqKL9SDg@V1d5BXqJiRq1~YKPCkbGr zOa0By&jzL)sMpjN7LZj7Jr({>#gC!jp8#pH@q9Ye{7;3F-ZRudd1j)m8ZSdFcY^2I z`lSQ*{~rp?IL_5b7Jel@WU4S6KmEM9f`N&`?HJWE5mZr1VG{e9c&juEzvIYP>UkhO zs|^B^bOP76){%S@9^|5^9&1}rT$K+FFmxg@x0N}KP^ZusN5zHS8V3=d4%z-n-AdUN z9OnG!WJyN^^(*yYXSOYu+K!LKRXQdC0CC1w>{AY^C`VV*myRiAWP1>gb-~fceqj>< zm&?Sn`GMTT%LDN7Y-&W#gdRhY4W9$4y9cKMoY#b@BcFaQe!9Yn{IE?^FexhSJ@h|y zQu;GNZTHiBrre|XT)>$O43a*0It>|?II<3fzEa<59@L@$aFh3t{-WGFulR#rV?uzc zYd*GLx0W`tCy$=Vm%Kp#z@d9|q80`Z;J5WYk%BurN}}PDZfNfYkP6pWmgGquBz=j` zFwNg7{&`aUK%JUkvapfmRsI-1d7u^CUYO4`6R0>Lc&bje+Ru2kuyvk=V!PPng z?TUAtuJnH#p*vRw?p^>;eJ*cYis4{4-tqR|kDvNazYwo{*^AeQKpf2bOG7G4|tjKWb**W zDRWZ6u)7^g3x{$4C-04~|GKZzKwRlGc=lUg8gSBYciI|^Y4E$XeKr32w|{f|@Q?pg z>~22kK_Pt%Z=gdTWl)8Y1`^mzq{igX(%ND?`0-E02jBDl`0nre+cDaVc;>BV;<_{I zam#H_kJtaTuZtTrkc!?YKK$eVDL(QGzZfeEOQNe88^@Mn@pGROU;N$Q9W!gkG;&b| z=GW9E@usF8(Gitd`G*SU#7^=IgE8F}#*XoHd#rjsNoFAX1TwuRGu_^&0RKsL1)k3^ zd4^-eL<84hKMpwwU^tBZ!BCp1ywdvtM;SC5(LR1A8fQ*=Agpb2%ANH3H_^6gU(MXw^OnfJBi(T)D7uBty}!lMe~GzCl# z@d}4cegTUiAUJ;_QzxaSOm? zgJds+GaA@~kxmSc0t$C%qrp<8$ zPt6}{7p5!abXvkdof=($#M>5HBL@ zJ)a;+DEXx;g!_$Wo^=l&vb|2T@n7%%r_qKd^wUrW0uJaLqEfLswqd1O)V8mMKLwpE0dd6@KJ1BUfiKN=p{ zkuj6Zvg@9JmQL=o32I+g>>Yl>fW_5*^!JA9Ls?ux?jtYi`GmeBCOz3Wz~%zxKw720 zE$yxld^E%HplW5%CB5R^D%Y>rHR{f(D79V8%1&YLz2MH`=XfEewu_8|vsn%jr%c@x zy|t+{4@dY^ffUWKObmF1F* zb^+~fv&PLY^-r1$nlVmet+;h1Jli1cJ$1PPtRxQoB;2-IxXlOgH`C^o6GZCb6Ffh# zebG<&NuMXD5rlpxKG#P-{3Prw&6@>omO+NJefT)rjQP%T%r7s*((%)=a`t2_ojetd z)sEWAVnn+UTk_BL1}*84bVT@szSMU%=VtIv;f=!JF<1Q4`MG@S^?RO#B*QtdGR;D?;phNH$6QrUN|4?;(1p#h5B%uJBv)% zlOxeP81!Rv>uNml_=PzC#HIKf-|_9SvqM_+aXwa-SIpNYI(MWz!%-})F2q7-F*dGj zicf6pWkF{yx_jI4oX>kf{Ev6NJGQrn%G-=ycNp_U-|ZJlcly0upE$rA;>h+e$0MG5 zwSLQv`#HxuvE3zz87mGTbJU$jZ}=7hI)dm-A3`e|%6-)f7$Y{Hc;l`7%4 z=O!KO0yaeDYiyMZ{Ag^cuzbXeL^cw}!`6}6qkMyfLWZ1;;FguTd3P1%3K z!`y6`el_jP`KDM7R4SNjH`w0sOiYa>PO>G9bRq3CZJ1_Q?Wdv?&PKWf+Bes%2WiIk zlJx`Obo+*8$%|m(4rkQ=(gyT%9?Ev#o;X}; zQ_G=v=kasWPoD`Nya}N^+jmpE`LCq#6QHatCvn0DWG0FfCcQ)k`U_CDPoz`-M}LJL z!UpROc!kTfkWSB)PM$ddfK4F{H`A;GV;fk2{^Qqi!W}SPZ=q2NDxZYaRK!LGkGQy8>J0Y+1*>o{pF7$9uU*i*>1f(6wHfOpl&w0T?JCe_Cy$lUarMrU#4 zc1-`^4FPJ`@WAa`^&l;F$1j;GKhm$8(4m2sUdc~d+se6CwAwl5q|XJ2$2fVxUHXa7 zBJCJ2Y-2lG`jKB)yx87Sf5toxry3v_Y#S#5uz9G*seJ1559M3V(I@IRR2-X>Hm<1O zc;bn;^yG!uRDX2!(q%6YGk<}d=VeawQan47P3&`YzuK^0yrlt1u3k5jeAu*q&>y*u zd=DM_Bd^4KZcrDsb9&dq;a)T)!&UWzoJO;@)QC23_RH63s9kcr{YYi;-T$S- zCZA+U_LdhSI6<=!&DNo0v0z^C(hj6xgYmx_<>YYBNn#u1|I9TOCFd2{|{GMHqe5Q!MwU(XFGi3uj0KFKnq_4zJ(fU-Ne?|w7I0Zxe8+647 zwfAS-u^M0X+84$5|C7Hd-aHwPKJp>)mUU8%lhpp|c!A9V(2CoU<78}|?iGg&Xp7V3 zyU$cTs;`rFD92tNuR*&=D(1}iC<`3Z_Z4U5FZsv%4WEP`{UV-xz;wP$@7p{9K&U34 z01)Y1>kc@;h82`NJB=>FP9hxrZr z3}?Z7e<+1$aO{j4TvN_sqaz1Xws6#p!O?2`_uu)W_||Xx{y2I1<;ttp%#r2DDp@N) z`S_Q2(1fgJ>Oj;DgMm;!A94hRl!l4(eL1yFk#phv!}0dF{6_r6*SPwG5|w&F32@4S=`_u%i}RN{)G~1n$L&4iNy{&JH0d1c zgckZ@6;OG}^9lW?Q;>X}gnu@e@c%PGSs?i=ysP{%O{$M4Rz%oj@Q~%#j%Qqs2aV#5 z<*#QJc&5@Oq<*H139EG}ZFZbK%klsO{uI^80gB2KZIz?E`Oo*rGrgWggBRY4W0lpw z)+JshArOJf6aCqTDUOSt$5(o9>W3XyJ|^I+91P-%!|Ot!^-IrHSD^!Xs~!|Ej|#Y+ z_MLx+9SFNd%N(nJE$%-_JNik_T`2*oh)+gj1b; zEVw4Vh)a>!)SkeDOiCML>{jic`(cGiBTc9EQhve_Zw{(0dQ+n+g)cKwf0Ry?i_lem zLo;=j;whWKOAabW{K=bOAE3tL4 z7ytYp{@u9m&+dumKmQrAw6+@G{ty3QJpC148FBSO99?`oF8uxn;`iS8&*PW|f~StH z#=+uJ+NNqC=g9(v;fJAQqTz^*Ekg|q<;NWi`aX5R3o#l*Bi|u! z#2q^xowfCtU0TkCX5^;|9X)b?Y<}#capk^G#^~zhI3^!yUIVlf%SxxR4;XBt8^b|< zTVFcPpm1M0+*xeNk35X6{$YIN@h4-rdMv*9+rBO0?D2@@6^+1oWA|XJD>FwO`M^S} zyi5kz_DaXR(LgY%tM~~!L|?x3L%SfJzJNhYnl*e$bQkcXJ^V3m#0$5hA|L5TT_oeX zYEG{@5^VoPyi)6w@2RkY1Kx4e?!53|r`-<95vH_%@F!7<#}>dN+P@$-yb63Fo%k%C z^ZnGUn1HmT0|zu?Yf&)L+wk$5N~^je57(yx_?;G-0mbo58Q4hOV<*xM(mutzOgIL@ zFmtK5iQ5}92&~W0g{=~okP%!@5YMa3iV1W(6i?_6fl>pvS2ko+sJ>0wn+)UL_zF(s z3=jr=riq)(Jg~`F1-0H zdQ0$_PskTqlokDju4L;3cj{zL6|rEe%=(pbi~MSN6=&5Q9&1@S(NNkI`anAC6xW1W zZlytg@>x~R@WN>7WxV-h`Hg8Hv*ZQ8X0xqQmfs+Aze2ozLIqB8k>AO5mvadjUf%{K zhP13pXZFHBfNRmG8eVC^KXv;NepqsJ&`>u{eD;@2`~p;!BVPUHBKoNu;9wFDUU?os z^s@25e89g{{<;AQ!IH*^e6MyUp7D9US#(&>{;sda&FCU{RASOiKje80U6Hl>C1C}Z z_$8*PZDRjudx|5R6Jyj@91i)WyyB(XjI;S}%b)acJK_#bYmN7f>m_jnsEQHxtD)3Cmbe^+JElFD4hU6Zjsy(MtmJQ9tubJhmOt zbMmCfT}jdZDmNf)boFzTZ=bfB3ss`R?Se^7`0w_qv{m-VJMABNO`gMv*KMZJFViUA zGFNKhHjWH*m`fn9(pr~vpT@Gt0+}%w!GGeGcy!-|%D?3*@DbGL8|hUJw##-#arWU< z4C3bPWA3d(%8Tfo_K*cg$rC(!o^42<<`<*Aax9kCPsPf~ld*dCx@fJg$K29l#De;E z(cV>`PdmD@aW%HKhQgI&Ugl%?POabMVtz}01ArdE2kLqtzm8){y)i*)IN6bnfPLn4 z#Iqxf$C;n=ffMq#cpGwWdq=YB#&BmNM!P%lu6MjG{`^CK5^KtP!|H0BJAEonF0aN= zKF;3OPIN@)xpU|2rwmoUzRJDy|FQNTaJnT&eK%eco-jGwuzT<3q>YNJl>{guP(;QU zOb!McfiX5F2(U3=zZgqmy&e_?FjjnW7diq(f`|Nnt8{Z*%$M5{! zug4?j?vER1ua2d~*?7&XULCJ}&FkaIM^D7Lv#VZf>~|+)ap`ExP92U;XH)Y9_2J&8 z+MaLe@5N0w-4tieoQid|=}t%U1KEQ2-}nA_^2}p#=&CDXW_C8d{B2(!U-T7U7F(U2 zm^(BV7tdaBe{IdO(BF%>nR(0W(Z?T&Ey?uV?|N6f{k!Wc|K(zGQ=K!cewKCvG}HM{5$cz|MdH12jo9)bz)pTKTR_X zg8lzbP{p4jz}ILM(A6vS5j0Ko4S3&pl>XAMc=bJRULj8T)D8KR>MOguld&mXT)2Pij zR2NiL*jz^=F1`iNJ%hXftB$WgzjXcOi~@4bH{o$zJY3f2G~VMqc@1qm{a08sJZnVQ z;}KavOZ9W;FIH}0(1<1jt?}GB>5tmhn+E1;8F!k3B~lemD9od5uj_R?37$!ib&!Er zXRwQR)8M!UTN=C6%3PdsL*dGwsDX6kl>wTwjNv&eiFE|?T71{)@6~bGYZVIBUJ~Gq zKlrIPEM?-%1NBrN&HO={>nn5&*Q;bdSy$MTEs-rXf2^gEqZ581_|W}|cjzl3L(yP= zR61b%y7tX0;ROa7lZNU~q?~ZGENkU)(|b~Oj5oS)a~yoN4)n_Romd+{x7Ft8DDp^p zL<0Spxx40|lz|`KTtYpp=YWGZHgO|bMPbVHdfnGLrobDBX(Mc_?0|2&i>~RJxtN*e zttC@2)tr*fPR87HzNrNKv==wQo6YpI5dq)GXFOvg0Wxj2*udT z)y_R zh4Uxl%;{5c`q`85#AA=gvrj!8YZuOY{NuAV8!M;O2K#|kW1|2MuI|aVlwnee5J{JO zkB_;Ugosb5Uawv-7SoNHICAJ{%uI=HfixwPMak>%q3M`uX`C7CsDH?Q;3qHKmKU^_)q_uXjYGC@m6jLh0h%`S8b&2X0s%p#NIo4V=TS+wQ>CJ*Tl~9p;+1M z#2#aSeZCR@7@H?Op`X8Qs&Lh~pE_#^3$xFN_&&Os(wD(G*H{&yCX;D2k0~#0GhH#ef2{=7t>d(>`%_d`ewv0 z{p#<;|9Z!LA$L)}M2sab2bm={Z7eJz9xE$T6cGPvAMnFzHPnKc%ecrZ~YTr zV%7S4X}B~2c??4-@8CW^5{CDWkd>lCGzD%5U-0>ce99YoSg#qW=`Rg=Ule?--46WL zs!n%5?!0|2zUC`FH@@}TzDBY;8K+M?p!QEYFh0q61^pU(r6bsPe0=1OhI<@U8xw?G zznMm;`d^|d1CQy85=QxeZ}M|o1gEfD7HJ>rcv;I3=;P=c>E7rreLvU`kamVZ*{pNy zLt|h0RX2T5K=+W~G<8_&!I^fTBM^!qHi}YdR+PjbkUF_@MQTXS#ycsdQBYSPdgv62 z=Z$?j4vYww6qpwaHHh98=kkrZ9y~N~xciP8@{#G&K^Y zQTGSTv}kE5kYH$ZUTf&a)ntwxHXU{Lv5DCjYaWgdeCWaWlCSz90UZ^5SMq1anQ)g* zmDj#>C{!y$bP%c9M_>?>!9sCa$Y9qU-*WACHsY}dJ{T7?d3o)d-x|Fg4V*$dJ+&xQ zlisL+BgG92)2=!zfBd2o49e~lG90{qW@Tk1S{jrX42~W>qWW;?Gze0JS%hgc@ zE)r*kk})&@<$T!XmW2e66<D=u3zU2MemP|7)Nhpqu7X?FTN}!YB1V<&Bo9d>(|_{+UPXK%41vy9-_jl9Xc46e|5yzlnpz zi}I2iZQ~p9qK`D^Aue(?;DFaU!kJ^hxKiXJIq}p33j_5@4`}qweIa(7_AfpMQMHKi zbM_|-LK7z(4)WDT16I!!>i3RKuf&zOg--QYLOSDgv+J1&7Y;NN$+PE9`iAA{XP%93 z`Ic{uJMMZx+;qb=@tg1XS&Q})4?p7tPWvqIC}J8dzSkwYEH$D&#hwsIYuZ!;Ye<1$nzx~%SzPTEAzwDmqw#MSY)zx_2SAK0=`K4bdr(@bT)^{a$ zNnfIo9B5PhWLsxj5mFx|FL;+|;Ga;(&>BkeWdYqH;{YGQI7%O7QkzC9b=3E)+4nGo*;ZvsG1CedrZ1?E-r7bi z2$pZY(QbD1Suw{()dh+EZVb4yqyE&MQXkYr@xsPteDvWbV(xjj#LK_@OCzr2O#qA1 zg`RbUo8RLhzf#3vi>)}2gf5P506-7ZZVIT_in^hc8<1&ZRn%h1zGJyTmyp9N{<+Vx z+eTB3yJW^qHwRZKyzoU?1zx>PEi3}CV8{+^H>U8XyboUgns(-({@UJ|miP$fspx=h zkO=bPoYDz!mapAc+=4+U%BKi6M^SVwp)=T)8?RE~$uy3n7yCZ?Rv#~J6*%h3@{)7O zm)0hMlvQ8N#8MXDMZskl1Xe%a34D&pM(pxUL^iZ$8mFsazq!w~)NaV}m3MjI3p_uxou~ z{$%;+ns~8;{-j3fgO2-cDU1Qz0_C#P4e4uY;f%OYTnJ-}L7}P95HHiSwarzKt{B3T*@Z_#Hw~ATpPop&@ z|8p|>k{xS3&FPt#k{>WNyA;!hk4AHTKBkrqsf=oo&FzccoqkWpz&CMug1yyP4!&z^ zpVG2Q-QzP9v{YaICcLxZ0~igWxnEJViDMnA+di<$TK8B7>Pk)JYw+D+)wd>JWUSwb z^UpjU_rCxA@yw$S$iG~R8yBvOYYs2Pv7<+#DV}-IgSDQPe8VGGULk)(-o^4_taNtb z;b+dqSbHj-clVv~hBtqK=6J0*_3Y``+~L*nyV2|J`_=z^0%4-T`j^^Hy2*I3TlyFw z{RZS&73iSNfT?gb@Z_Nb_DVjFU(-vHaPt;`0ew&Pv9{O{O-`PqbLY;uj&a$cr_VeQe|rB1 zyjk|bxzlmz*cI{r{OFIy3qRv!wsXuIcKAN1hTQf{JDz^}X|=(c*V;b(;Sb06fBz2v zZ$Oa0n{`x^H3=m_p9i25LX-FzbZmyPv9` zFt0nnanPo9Q62rJKid@QJkXpiKM8@o5S(lh{cj-I_Iu2OI+FKJZkF2V#>TqN(u0kS zeynx|abfd<`x7@!dg>TW%`f=$hOHNKB+YGF?P=bq>AQMhOmN7lc zj0WSUVd(c7Q}9y=qj1g^4%^jYQz=(ztAV&}j*{qGx>OqNkhZ_tc;Y^DFwTHhF}IcE zK{;$P`e3{5ki2R%@c8Vxl1jP#O`Fa_xX&8^6ik~Em;(D6{G~i`NFZ+dksny#D;Rju zrSwZH&D1!`chyBg`X^aWu5slUNIq|(G8i|eEJ);&G_Zz#;CJF%Kwb4$Ds2GiniSe| zbWCypr}8;Dl(Ee$6&wnlTd25iND3z~4#Ecv>uGGR0M2@2vM6s(M0M6pSpscCVB?tE zOJ%{sx5sM$k7cIoT$?!)reEr^%zeCYx$o z$(I)oOo_g>?A!dzWK1@Y?}YcOvH`Wbu^PRt_2{gx#o1F&#;IqXj`h`Zv3~YKTwJ|i zAE3Lv5xikwU-lV1ya{VUz6I^kmmkU}5%~-RZwi7Ynbpt`9E;P9IC5wdE$%r-oJ=0m-blg3cqn1Fn+%jm(rBKShwymwVz1TC#P0GnG5^BX#Py%` zIniBOiVNF)jW@PR!FbaP zm*S0|adWJn{%EuYXJf9h=?6&mq?Z%TInh7u4FG(?plSao`xb%aA69g_JVY%0F`tc@ z*zQlpM?dy({P4g2UvX|DVqvK%nCvI4p;4aAmvkRCJ^Qm4k|jFM=W6Oks7W?u3TU+d zr8D)Zg7Rv>-jx6zm#e|$M`7#wKnCD5U?ZqcJUgwVmkj{JAUz$4eacYbeWqR50HCpm zHz(nT-gU=(e8b;M|a|A!5n({ItiF z&dM+L40D>Ff)VV99B40(%Zwi$*wTKE`a@PEk6nrD5kCF0}*Hd*p;9O+%V1tP9t26y9H4O593vy4Rmo< z1mScHj-7cOHgM|9xge-cA~339@`>@FybmQng?w%=HZyso8J*4({m#?1EJ|}MziYwd zoE=qm;>05Y;5E2>3m5!+f&jyuG$b9=m9!@PK8hgC2iM%-QDDeaOcyc~9^i3wC|??K zC`$@uqq5VEU?*6O>rRV8V!R~3UE`ARl#|kuQR=885B844jdy;jN~r-Y1&o<+dDs>> z?dGc-h>;AzdHOS*mnhP>ol{?nFE&DKZk>zCy=_fyemAba{`$D<#jnudrh!LJf5dcj zJA4l$H&eJ0@lpq27CZ=afPsuZvg~YkA0 zmxiPlxn`*6zbd4RKV?uy%lc9}#(B9xt7MewhY8ksg#uriHv^=VY*Zz~W&B)LzUKSJ zp$nrQPD$9pH)+XQ;XEb zlLt1XS3C^XgxdEBE;rh4wk_z&h;fu5Ef)o^uEA02w!YSDLXBg86kLUV=r{D+oaB^H zbR9#1&B2+mokD*#vjaA1QB)kHMDr%KUvWp-(gr@|oetN>EB$k!2pI?uG?;e=vYhN3 zkdihbZ&bje>e7J6M1e9chJ5~#P;uqtf_e{VA=vRRp{CV<%ri&_$P(R2KWU|Y^CCwc zx%vhIL2ZJ)tAiVKE48*6>=fuQu9a$SPPmhi7 zFAmFyZW0@d^HTy6lAeU&=s_jkYkB{$W~lu)l>Va z>30x)0vpO#n2c44mSLftFk0V`7lIG9o*~Vm-UXdF^ZEiNM61354qw&(z^``~f@q6VYS5aao$K?~` zVpUVL@I=z4@LzT1mGYhT;?LguN3neUjdA9gPZ}pr+3?*_>fsu{!1bzac-85JQ_sX7 z{Fk4N-}p%3*H;DA$!Pm#D7J0rk0^}vK?iJv;e|tB?kA=H;TN(f=2w+>G=XCiz@QgtgKK5$ktPv-!!ECbKamI+;Z6 zTPNTJQ%NsDG9J&6oeEW-!lBxey$-!ce;QlKRyrG7@V9MCKq5@YH|2vou`d;u0P1)_ zKgM1Nc1#mo%bPNil@DaC2_JZTgTD0p9A6YbJq$`C-{pm;`T&FxdojjiW8~qt8b0k= zA_Ir=if))*aMgN^Q{4NUG<+ZKI9XW?IP6}H0Vb(PztVPAH6RNk~( z2F0u1fwSaSe5M0A4(mv|3^jTlxy~S73JRJ6d8VEfI*09$<9LOo|GB1O4{2lbUGph# z85jAeJa`Z06y>aT9P*;7#v*T=AmfA|z1f%LGoIy1v+a<~NUyJ0C(Sw~erkEZd@gdd z>S;brs2g$kmNRFMv&y%RVf~~B(kDfOX!crw^+EG-;8iWM%0A3o0UW}OOpy_D4HztS z<40Qe)X%93eAwH@WIK6`Tu8^4-~kEBc}y#To4oMMyt}Ex4^GJUu>B{{hsVUPyKZLH1ba7X*^-=dZ2SxG4IIJ^GPPOK&AQD2 zZr;Se8?f*{X+v&u_LNS1T;s1dPfkzA)WYGITRarYS6mZQON$zt=T)u|d(A1Y&vbTo zVs&FZwmaLw_rrJ99`o}{(NaFPjn`$TyoK8fwmhk>_LM(nvPBd63x~U+6?-I2lm7Fh zp!BfK({6p~0dFemM!T&ZDW7GgIUbuA&&8?79*+0_(eK7%Pkl6I#pAK@#klV3tK#^= z5vw?`+bsACcVBubM>7sxcXdoI&BYU^&c*2qTk+!8zC2#_*WMZvt*JPBem%~gIVT#W zk6kvbWP9_y^+~_MDs`WSaoi_S0s{Juohf}ZH=HTS_8YnC44+mmdo4cGI@w31BPxqE z8*5JnZ`UX0Dn_2_S}$8Y@3uf`ud z_`bM(;#eFKpSM5n*7%CQ^<}YnVJFUI`&wuh~V&?FC zbmX@qHCP7MCmw!Sb)1Wz`yam$zx*q|ZoUbuZz2x9BhQWh#o3uyUEhdvXU;1(<rIC}Kj`0*e8k+}2bYyG4>ZNAM0<8Ie79g_}u>}GQy zeM1U|jG3#`e)xoJKBgIYiv=4nI@lZhJkj7!GN`ZcW-8_#1MG-!bkM6Uo`RH(QrQXR#(?0uWiY2S9yKES#4vJC-^sFLw0S8PZFS$s)7E>H=^nL4fUBl^~KqRV|$Z z3xT&zh?I;AhlUWxEhavn?Ar+Z+j>`wHojZ|1ANs11;1?I-ks1OFwP zh~T%ou-OFVP}v+xvtDoL5*&FH1qWy9Ah_@0fA*55^y?|jScz>f_8z=CCK(&WBd^8w zn*o8PpGqE-XP%@M*Pr}n#xVMZ%2{{d1-{7F7+QHJo$r7X_K=G+m=gn8>uQl@WXX@d~CCdM3gSTq$AsQT>XG~1oRr-kwByZRX51Rmx0dBJO#?+?X z5vqJDUEt-r|7>t1&YMZNIZ5lmPtu1R(hu=Eu8PxZ2|2G79fSPz2L1Wd`6RgbqMP{h z#)!DdXSV74+!$|btk4{FcRE%s!uNEXIJp+T_$$93AAImZ%Q4zVqOI|;!{hQoG(OdI zzvXQ}F*Y5$b63XfOFuiV{mfU#?!r}ZUgOig#y|QVb{@IY?}m0(H1LD|;r~s@Zpp)l ziEc02`<-a@&cwv#voX_sAZ}e+kH7yFpBvjN55)M!lQA=XkrhJGiwtH|zs70VKsJ-w zMq?+W@7yPBi+?s$Y-=26>}kx*OD40i(;Ev7_wnuD_iy5XhxTLfP+KY~{Z`-32e7k@ zPCCZzQ=)75ojr~5+&{(8@R8Ig*!s{UA`^FbwDW1;%!izL)U| zJG4K=E*QV^ENkelRZ!<&R-ZzR`?F!xC%+e8c_1GPx!iU8QheLDd~tmFm%Ls+&gnRN z`cbiutk@jOMhWap(#a>k{0UB_4Cl@S>8t6J-T=oMn&;|DudeJt5D((Ze2aeDWAIU~ z9>uQPn3n+0(84^B034gv+3@1^C1@xxj1%Tt=>*;iBAPxl_SIi`+Xsd89*h_ObTklI zkR@P5QvL@O)ddy5`d_eSR<%{(0&6^6L4d$3v4)i+g|f1sP#Zg6w=cyWkt1_pj1uMZ zTm1+1OvrGesN~VTQB)RQi8ouBhwidVsEP7QFuX2>SAzISI9}e2L?S6`o*tyZCuP_tMc?iKrIB%x!Rg4s zn$!fMw>ueIokrYp`x~P*DN58kF=I_N*CmxC4ddIKD*Z;k5jhU(CEF+qB_b|#bh%y#_)t>OUvs))H zeu}G|8MMiRKb&)Rh!13}x$$wuv7jZsAvsVedRNQ+)q(P#IwVF( z-r}nGQu(s26zaKDKTL2NDhv&nE-UO`87fWa*yZJvXWSL$IRrQy7{r7F4=@Zo$bZ=3 zYY=~=`;e1u=T!CB5h@4QV8clw?qaGZuMg53tUJn=J3 z2#ojT_1c&t6P^sW{d21LPX|)H*S~pFTsTC7+ia+3vLB!*^dIp1sq~t(%+GcomCfaH zIw)WHx;&Vdnq{YE1V?$I49CCHeh{(^y+F#V*Rg@3$4_o49H1GgSBaOnv;>$1`7`c{`j4mJ(Sb&7S zIuS5a!bcita*&5n@Ng*{wz&tv{GsoqE(g<8cr3wiHUHCuDysv%IS7_pwLeujnh;mI zvcBY%F~T&I@yR#rOA!ZPDX2v1ByUvTouxPvw9&wg%EOJs-h&G>}= z$kx;L`K>TH*HlfN;5Tg6W%5?I8;PdISR_v&^XfVlHPL6u1bgLz z<9_8-zo&HQWryzWcx-8MxpHCMi(8-6Th9U?bv$u$@s_r;G1HheMNls8(tS*_b-1!a0+WD z^G$K`2`0D^qlwhSbTi`Qxp?nS|71M)yMGvao7-{Et3NC5{*Ld6pZlqwh}VAp--v7e z*4Mfna)P1uwa$qS#3UJJduAA3TbGn48M<8+G7s52H;`9tZJo$C&<~3)S{D@0v4Ccx z&Co)0U1jl;nDFMLi zk31X~K6zgZF04kYtGc(k(bQ?S(N-T~VL{#)q^Ny&JE8&KOB0#>=DylzCeE&{$H$+1 zG8S&TDPH=uUmjiQU5mE?$k&k;x;!DpqVfnx<5Jc2OEv(|ujvcukux<(uC@bso}RNz z$psD|{S0N6*DUziUaKDaiRyE%54^6H_M|>X6rf9Ox0^RKs*jR3OyzYAP3eDR+Hm|r z-g)8Q8vF#1eE@S}I0tAg8_rT$h;vrl8x#@`lRKw5C=Izl$8fx%FI2wD;WYFRJm5qQ zf~jeeW6C|@2*ydi?B(c<5c;v7RwQcPptrPJS&;NxNqAwyaxFYsw@FY&j)OWYN^;^- zpwNiSlCHu#G?8By4k_ph{4aP;x(Kz6DqY4wq8Bc5p=vl@l!exVakk(t^{7zRU1h9t zCTr+NCExaAOn(VOHWI)67Z)snHn(g_ozyd0-t$6YgOA zCJg00Y=_dG@aB;9XM5qu51&r-67K zm+v4vi9g#1_!v+ROLoXo1<-%Bzi-%1lzFcoJEEM@crtrH-_;1mVy4>we*}ctK0K+7 zVvUdIQ(vU>n(vA6vOymh`2u{lz#A0AuW8lz!%cr~*wp@x758uQP(4t&;! zKV*?y9ys{s0R;1h+!PMf{$QN)XNW`(b3Akl_`ETZp~8WNH!A4YX^FQ4`F&}Z)#k3F zR8)1+;~)rRi#dT)V+d*?t`wI>DHK6t+h)?MXIWmZGH}$RDZkiJkqv>KgQ-w{lb6?)D!WMKY34_d-e&nPcLp> zIv&RkEyv-xMfqKMA{{-J=L4t=t+0igCX#z*+Je$h_! zTlIN=huAhwx}_6xoNTw$KII8a7SCA>*SOD`V0%(&l3|N}C0VrvbK=RzBl z=ITXF-`jEK?6a|Q@m#E)TaAD8J#UYX-uH<3R-bF+T&B(Yl@+?`lh$b#|K%gjsJ&PN znVO!n&d*67krOlyG&jIE;z__8uD?FM*f5WMCh+&JmUx9#-PuCO`D-@XSQJ~I7FKLd6Y zNLNIJanRPZCvzWe&~u}Xwr4{Jc5tda;~W0mDCdn1UjM-!PPWzd(zWs2w1VDI*Tm$E z^=4wCnf;hL6!&ZY>E~sBjZR{t*m&b7)KN9T0q-};FfUa+`@YWZa|gD@*W#s)yBM_E zUkO|phVms4PkVS^x9W{)s4;G2qflXFax&N^C{35l6@X2@PxBD9XAL4lxGJ5Ai~i=b z(z4!6fHyYG6Xzh$6I=yus3v_NOq;A8sWi0{bf|C3QsrlQ!0C-&;FT;@M%tuna?+Te4xo=^z#@~5`eb9}xbHmZ$qSgw@^##%F47Au zHfc0q?+LWIL*XC4)_BX2x+56L%a~Pem?v9SN_$MqAl~o~q&LW~2EPeIahHW<1W7u$ z9Wq~-EPwOr8V_&~D*V{E?5D2xJP!d@>DX5A3wP3pZsoX#Jkl;R5~?2+-C;~eQAh`F zARhrbm=6!QAuhaCf80mV3*q91E$N=4(9Y^}kjxs0O?=^%JbVn=5%`>6gG=MJ^7xc6 z`Xx};2c5RDB#Z>?mB$g$>dz#NY#_5gN1rHlRM`@UV;_{|@{rGs_JYdvF(<{gGsX{O z@3SdYhiwEj9q^u)xT0VlXts_WQ$R_P0n+T?(p|P(uJ+14-XiQC82azuuA9&-9?LX}&kuS`hjg{5dTw9Ct7tTg+W5b^c z+Sd5By{dFRCAq;vAd->Bz_xUPhf~HU`Y|bbcK3LD;+FUvkLelFFP%GjWG)uw+p#bM z?W(H3Bi{-CR&v4DbpO?u&f66D?t$7?*oBi%PETu0X~ZdwWu2X7y#GV@$1ncUZyBCr zeEEvlLw4LnM+Mkm!KY6AdEn`Yr7L3Lp4Y~)7ri{X(}!b)59_z+-!M(RG#iihsJ>x} z{Cu*ZNeOH&ZmEsO#cyl(Y_xmlV|wSonCm|h-}_Bppy}%~(Lei8%uietOC8Il*_ab4 z)80?w*Zj+0@w@~b(>O3L`L)C^{fiB}<9v!{Qe*kpRLssDiSPNLACKSoou^}VQPx)T zihSESn|K-;Z+Q(q%qqRy=+AzX{dQuaWk2bXbnzf1fvHPEE%SdBhU)~SpN`Bg2^CMZ zdu}78dF@Dne2blHvOfhq6^GB@z;+N9FVx2d0QzIUEB{N?-+9|YeBbwcU3}h~UM_#+ zY^ zOrCZd1EF|@a~ai9kIGXOvXuxy>)JwP;R$)62!NJ}IdEPukQm(|Xb%VEszda9r=;lZ zX|QB>YqNoa$}U;Tjmu$IN9~KA=4_l_-;Tpa?})3ee~Z*eMvogOES_-IF9VzDEl+5u z)^e`xm^*hc4(u#rNZ8Z$n2ehAOVGNeo#cq`?sg@U@i>3>MEu&X{7gLm#V?7^`m8Sy z4shX^aq|J3C|u$BsW&AW8qJbm4$Ohua6*&`AoVkt7`G zG$CO!U?63G3?_WwXW>|eI?K{OazlXB&wa}aB+_jHLuY*=2wIRwrOP*RDvb_F3z9TTvH&e3=?4qdMIM$34sg<+eGwX^ z4^Mc9?BQ`N_((0W*!A+ zI;m19$FT3T6>W{qdBL{OLv`hbjMG%!ZB%hPP(R}rWG7u2N_!528GUZB+yy86*g*8# zp|&jx6wr@85*?;jUC#dCpz(!iTn)6R>X ztdWWa6)L&`9xuojo+OnaZk$u=S&<{YLk2le2FWr!L1@)dwkK9&;$HxRGT~xOVR4E) z3Q`}hpQNAc61V>%nMe-6=pOKFaFoO(f#qq&cx|6tFgSes5)|E0h1KI8;RWZG*QBl zH}BFv(;>fr2^%^`eJql(*f25PitTR4-=|zXzosUfj3!ShwP&KKNti#MAw10{3tEyh zyfUdqzYx9Jm?a1nUUqgaMsH^&mNWsp^~S5>qaXTce8rc3Sxkwqt@X8-RK0uyc4K>8 z^yF^g?ao%b_|DtogMaq!oY07ct&R0q-&plq0Pvv*15FZEFPx9T)^^wfR_y#BNSdo3`!R_e44otZF70b&$Obe3HRft&&Cty&cxiUH^-~K?b{=cEZbGZwy{V}-y4U19lqHN z57Je1$u~qS7xAhyqb_M)FyTfkvM!4iRtowoZGcUr85EF<`y+R9i0e9iJH@4^y3D#6 zPw*qCOZ}kQ3F@@n_KjUtrH+C9=G%ay!Q&io&_aKLM$c`4)pZRpUu=uR<=4OyC_nT+ zl#hK!9>`s$+x1mG$XBoqAW`ZUPSdGS^@-rm37H=j3NsxU3~y+Z0ROO}E6bs#87Ji> zC*lOuH2uMpbrw|I^%Q;zkEYu)QCx7?narttCyu-!cM$44v4-mZ>&oz{RA4IeA=9WN z^jLvqn3SL5ED()l`!TOH-Q|g@+CKJ&ik!0J<^ueXPR1M8fj0+8nQxRfH%)F^`F*4V zZ#va3ZjUODq|<(auT`edUc(T^LFG&rezfo)&w?*ylz4}54x$GmG;9O<7-hsaH2NC8 z$S?ev9`Gy8d>r5f+dhxplyC`s$as`~%9>7OQqmksJIi*uycfBO+)E(6;^rIS)JUk-a@k~ak^?Uy&5x|FF%L%0B9%9 znuvpB&P_cpU8q2#k@a)q&rc<*e=vUVN?M%+&zb-@K@NKw6ExNiG-k6IptCdZQ>55% zp3?2{9a!b@xdHkCH}Zh>x92ING26bF@WLB6x2d0nX`YtF4@mzf>8<=BEBHaYm0$BO z9QF+*4|v7Kn(x}qmX)B@CitR?9<(>OrGNAz5vcO`f50LWJf@U=2I@nu(izE3(n|du zsG(Fp^>A?GNyEPL7}NcbhGYp%Z0ehyo%c8X+lxyvwR|)djxNN0qZR$}d`gWQ0bOpG z_V)c5C*NQcK5ld2`!TG7t6?*?A3Z+hpt>64d24|TO;BQLi)AFbGc}lOl8LMYb#F-}_j6ZwV@5jl_N8)(Q#&vT?yjgT{de%NnZ?o@L z!P6gRmKI{+%H>$sP`J`vi#5@)uH&s=_Qmmn&w5R)c6)K+%=tLKz7cEdn=d8=F}JJ3{+sl}{aZBS!&_&Z$7UFGJM)L*h1*m7HcPr>l5)Vm$(tWi@pJ_ms;|{_ zd$ibi>8HK3|MPH=`+@45X}4lpvfJL;aNgR5bFp#uWct7kKdj0F;-~H(S8=w5-Cu3*xT)gBnUlHH;ul{ZH#Ow6@ zT-<;Ez0p}e8-MeQz9ODJdm+Vvo=c2AQ19ZN_${Md^%*G;>!ssO`RvV^Xw6T@{QPn( zEgp*Nue~;IyY1$9-DiJx-1dUIVvcV#tM~^#_F(+(AN=oFxo|PoMeoY$x;OmLr_nL5 z^QsP-${PZB`dvEazMwXy4R{THF!#Z4X|{YLW_NPDY#v7bnxEA5S{=<+BWcw^`?i|1 zzR9&Gd)R0rKk2*Dx$)m>PuY&LzBwtHtq-b;kmC`3NA>Z?JdpnDjShn2MySF*>j8=j zi}i>N*XXu-i}UMzz1|@9H?IYa?yBi@C0k(KL?a2ZB`OaN+YWF8sF3=ZwxhMCiuA%b$yjNOvVOFTpN148v9Y`o?OlK$Fsn%^a6(9g3WI8ytZ;8cLG1G<6%(T$4B0s*kb(+$2kH6vrk}Z>7D; zai0g!RJN~3+6DYU(!V9s7@JA!3_Pp0h3@cC}h5BW&PHC5l27NzjbLHazJk-b@_703ORY) zG)`E{4TO|;J8A4E)Z#9q@g^Uh(%Eu^?yM90U_I^UeT#Yn9a?Je8=H%MA(sQ4S3SkY z~@=t!ARj;a`-|dMGXd$rC5cQg{zpNmfj5+vGrqgXJ zE`$_b*<_de9ncRxp6muE_5-+GUhl3rei31yG17w;PfAc!?c%W6B=)|1Mo z0(jf?6mJ%4O{ZKWC!Xr}dYt0e;YM4wbY|J#d>?P|gtu%cV7zeArTVA> zzXOdVV8zGwTdJq!a!U*Iv9vT3%k#5wc)k_0(~Ve~jc8LYDK!iJghEetKrArU^|rTT zb7dvAH!j4*^XFq{YfaHyOC*Bv$ z*|F%VU-4!MMmDLPKNXcWO!VO+N>|ZC#zpvCs3bpY zuKq38_qN;T;)j3W@5US6cz0}VpOQ|V5Z1o+Z@`-vP&oO^^j8{nPq>iX!6r4>qD=&+ zJZ!;m98^C32lB#(xIa=}e3>6bZ9|m@nb5>M#B~w3ZAre6KcA1Q_tkCxxh}HxJ@;Zq zp*ipG>YyGSs`^7?U;P!geo+159#3|Z0=;DV#)cvXgGZ@E!L+ymFr>NTJ12=Mr*sSj zc9#A|n#+mcdM!*zh=i1{_E)|L7pYFSO_o4yVD;z8wHRmD0=toztbhZYGIb(NbR%$z z%DMKrDFY{X46q%-NnYi|u6Q~9etn8cvH?I2A$F7Rv7qc2aMPwEom#W(IKkpTE;^eR z)obM!v+e_4Xgc7tc8n?m;+C756(__uz{;e3f}0sqXeNQY>S|v`XreJK=XW-?c3N@$ zt)Hp7Zx%Lgz+>p?z|^Oc8YOR%WCy}M$WYJZQ9lZhywnW<11p=_QEkZQ5tyuxs{?Lq zti}4~nfR^WerJ5D`dOLX7p$PEI;{AWpqKC|qogg7gHVqmaMA>d?ftF5G6C&eicPj&QBdP8K?< zenQ%?PK;y?YTGJ1Lbk8^I(?76Q)wv9Svuj|zyp>%F7QyqN0ARt#!%ijHQdiAn*&^i zZ12jq7t4K-Jt%2No#tAY5?A4Ek0quXxMNlVLp>pwnzf9^`8{y$q%bpq?)c zwMnQy%Vj+^q#Otb?gVazKRDvar{$Pu|BHHWx-V@aJtUpKu(zt4fR?9`C4B^gBxCXK4m>s&Ss^SA=LiL zrLPpoho%uQR*G}Lhez^@{YM|&RtYSQvVO{wP&O)zZUD%)0tk=CH_>Onw7VY5C~n!K zGbvwg$WW~(;HDpXbW%Np1C~?{`O?-7dCrL%?c~aaayF?2-ir;whrZ%GVVB5D`O&9U zw@WL^H^W3%?ay3$f+bk`E$z>@Q~l~D)yp?=4JSH)qmG6rKofGJdm4*#yuo(ia=h2kL}WI;@QYp_|LR}-D=%W7cKfO?ZVY@Ho6+08e6^Sq9m2P&N#4UJo{9GL*TqY|>ThbIyBOogt}yOB zbWv@__{%~G^8xDj7$KQij-uCnOLmi|mn<(fP|&9tcVQj*$tO-fIAw^GEJOMYixzGd zV6Y+C|HhGRvB!QgAK44OE|?%k2cA$w?n+NP({UYCUVRksJ5Zn7l(X=FuQ(WZ+g?~1 zwrSRrLD%Am_O3RFknN8SL0366@yVi)#++0RdQL)CEi5zyYR;SA2}Zr+gEC(kZ+pkMtb> z9C10<>wMzKxU^%DPYsy|fs`jN%OBJW-3NF$XGmHyU)6e*Pw^@bwO{qq9fkfOJ&emK zS4PjoTj|U4Lw<^%1a_!P+Nd-AEcAf|I-Zjm<+sX;I6mn?JaG_!M}3T4^bY4+&QHIm zwqs=rJS3xEsZu2a96pjK!X-S5pR~iZALGvz*%PY1m$EfrxAI1F$r<)T@|0ftd%59M zH-7I%+WLcqwMvuV8H-B0uyCJzrc9yB@|9llRwfpv>rJZU2YV!%)ZVt~q6&V-8X9*5 z#@Yc3f6QmV%iDqs=Q_a3l250na;kUG%NqgQfTVgavQjrX*9R}{ zCSD8QtUo~q+<3jDjJy7n6|Ew^-Q2oH7HEVwlX8QpOfmqs@-L0+Zof0OWHz2Tbvn*%s*ObpH&%Cdc4AU;MnBjPhkmi5$5ZQ!&+q}gZWmY} z=-M}aq^|??idA)ms?)SJZA24VkJToISNrlEWfrl~bsjRpPhz2ePVHj;dBeqiCz?`r zbb4BCq+S%~&zy*jwY69|eJcL+kNzm$_o4TQ?uoeN>YHO}`ii*w?z`j87u^*rYwKR0 z?1(pf<)+$~jSf5szaxJ)pG;&uLi&WrL?`o^>NC(E`2POF;f1*GqxZ&Vyy~9#+^_k< zIP>_^@xFKce!Sz?em*)Io4y(G1%K%bsUM3U|LLDK9`04} zB%2>v6ra8UBHwy>>2S2C=IEv|)|!gN#re4I+ULa`cij=!-*`hDyZ+iZbZ9B2X6NKL zZbf&y8&5s@)`QT zC40>VTxfQB@n6_nKs{+2sK3lxxfU+x38ilYmtebLY74K*ODQN|PE}_er)|^VP5KX< z<*q)6QNR){fyu|{12Dc+!-IB`18;T z35HcZHp}b8lIyr&=syjmv)Ozcp8$I+)Rq1NKg=9>YJd*#z>Tm;9FmRd2j2QTlI9I3 z9uj3kJUJj?$``ZLb8 z#~KDuq!5fyog<^BxqVyl=$&l@V zoLo+D=ofVp$1mWTx10cnU*J-bGxXH)-hEo3>J?=Pt_42Se(qD&e}p19$MGY@>xAf2 zRu)SgvR@~9!C`+F1+*4%D*dm>Xiv7EIL+@iLQ6qk_^>_E_?dP>Ad)G0*|#JU*(Jtz z>nZXa&Tn`-Q%`abn01@6P?0jfmZ6nzCp?VJ;%9tvKX3SwSL)_f;ropl-}j>Wmh5wT zD({V9(!6O-%EIO>^C7ia8YDdm|PMh1D++S<8H2%o`^JMz; zG;auKd0d^AUo_in3#;%_hsIw%$IU!{Y)>{>JdF*uMX&sY-7T4wuG)Aj2AyreaKm;- zb?}J=e3yyn^?7}}>J*=}KkIayO)s+0_=8>0$Yhy~O-#j!GaK=dhfl=szw>u2XWqg% zF+CH5v34|O7ws1ecGw)*ibiWz=P8X_v(ahI#MJStqj|-#SnW#=W5QWBkkFo-_tY<2 zNKG=Mtq6=ayU0*fU^m$SFo`eLIT^FD8LgeiV&$Pfitl{e=f#U|n~C#}|9Q;EisTg0 z-&l!ub6QA5DmF*uz0pB6`auoZkrtchWFNUd=XE=cuM?toY_c7PjvR~A7y9vE{@bs| z|Ni59`V|w|cd((^w#)}wX}@(nz^7MU-uKgmLh2-;_Tv%+ zugnrrD7H$e8HzpCMV`w1(r1;eaM*l8u088QIt?wfD?YS zGAgTv&TAAm(*d2Zq182MFr)(_JSh`2%w%Bq55rBu0>)&{8xWvbMhAS90AHh}gPo;H z%kQOd!(S5s86;j&qWNvKB)oQGS}-WS@X63of1O~sE+OUcAbUx`QG=iCFqKX(7!(<1 zC~_+u#m2w`taMF-7z0;BM$zAg+ntDh2S-&BYfP!63}=hQ@A+uWUKz7TUZS*Vl|zP2 z)eVKJzv3OrO0^CfG*yyNDFRk4RIc-T!~+df?4acn3>kJ~b7NI9n~4v6_&xFN-~Xfd z=70FDapcf7%A+iXV9$nz9l7k<9SBcP69isKV}mP2u@Y)>0BzjF;0f9jCr-x04?i4N zUww7l^1N$OKFC{x3Uu=dFch9wlo32=s>*zllYs@E(JIk8G$4OP81hy5RuW2#LuTty zg0gM1E7(Yu!_d(y_`Co_p^E>x^<*e?j?im}Ay_SUSr}=*45fbb5l02Q*3E*5euwUr z{x_-*D{!y#*Vkl%Q*1GfJv^2k9ek0C)G0X z<-futd=B1(8n!k^JsSMPcnRvQjx(w9O)!Bn3B_3P-TvemeyKwP42E|2#2o|0 z0d6~7)L+}J%5m6!)Fs}e55N(s>@JNb-!+&I*$X?j3Z~cjb)YKnrQMk+NA&;X{FZ4@ZDNkL_3qil>St9oeeFL8F!6deqM^Ym!VCDyJJs0wgI# zpP|Jx7kz{lXBEB_M)~Hos!kTarrUCtY?!bTXB@y8$8k`7IRL9XPBe&d9KeVC&|mU6 z^UhPQL?<^8QjSbi1flByi$HY0VOtIRJ?SZ5A1p*zq{$a)Am z!YIE*SjL_8II4i&Q8(o!1L^V5DRM|XNneCxK`SNR50oIw8p*RXsIsVAOT6lN+d zqoF!r72b~*5lTN+i98iOa=#-$k3vp>3w;g=M^h$^OyUJzCrC`#;KO#Cg_>?LdCEuE zfH6Nzx+@P#r#S5?Sa@Y2q}P=d8fd)NxQ}z>SK8{}Np?01uCH#y=Jr;O{~BlcE?Yy+ zKeA=<^Zfa9(d})-$M5}keBayO7E?#BjccyGA%6G=zc0S%EuS6FtPSGC$(7jL+Oo_S z7nVHEZ*OmU?Bum|*rV#GK{t*l6B@=kHY*~Hj{F41Sr#om^5Og9>g%tL=iT~z@8aIt z>3RINU&Vr^LXSASif`(*{#eFJbAOaGE6h#xW&ENs#hYrEoz?R((4_J!zve5Vb73tG zEwp28b=^S9{{SrHR^Re-Iyi|Nn_QKjW2g;q?8QoaV%|*=~xT?30f@_-LGd z^uhRdKk{SoUElf-tzOb;Y5G zr_RRHzw{gN`#FIlp&b-$zf=X_p!wy^N7KE$}1z6P2~8qmPYhPolRE@$EQ zf(K(7;u4-}XD3V}wkpry1BX#7*arJh9km_6=NlnP(hptS5#TsuQw$4jCDa8s+RFu1 zAGS$lY8wJy$A3Z{s|ks+=#VI6LBY07{WWitXBZtPO(*lCswXNt#El%w81Fnbj0t~* zGcC5ig$LD@I?R|NakC&!uy3qvjgt-%95~W{08ioD{%Vd#EIjcxH75(k^oeq^+*XCg zJgZ*XmbuC3O=HQ^W3B3^4lm?_YY0=zmx(&#Wnt|AE@4x)q=WZBeuje=(hDE15*Zh} z<1{A<4lo6$TY+jm8Ce%a-Ns(PE%h^Y75$~G%hUGATOeseQg+A(_VGE5i!|hCI;q#% z2vWz^6??!(veQbsW5OX-wL`3l~S1|jol zXJmn2Q)zb?`hT()_#d*91jd~Vl6{dCq9IOj9j2q!FL{%1nsqQQQ9iL++X1gBBlND) z#+;X-p!RET3{W0^yduIaAj=Wia3p`6y|9G;BpAwrlQ$)L1FWh{e@S@7pZy8fL;3Wj z@JU!uW_^U}@=c3I?-<`t%F zUK?nG`>sUtEYbuVZab+Ye<--_iKL z&W&Lq9bhXY2I~q7_^Jb%^Nn@FiC;I6o(umN?I&8O+@E=9jHTZynfNo8N}~)%Ut=SS z&gRqOM7H_XHom0%&MA%WE%|I3<9ob4Xh(i(ZzonyorniM^k?z#$37^ZGnrcl%N51~H{G+K>(@*110k5`t^x;S1 z)Z>rEZ~w+S<3kU8FqUFAu2?!2#||Hl+it%p?zroP!Dd~a1?Q~++~ns~_nSM?)&6eu zHOB4oBr0^X=&!mo>+pKnV3*HrCatvN;l-s`*xuCBzHb7y09ebcXJ z#-5=3(+g8^_~@0k1Kdnv-H-J_e9dtluu{7tS2mm4cSIMZ7x0D|Qgg|D=*ogBqJPf) zK|X1(-X|F`qNk4XMla?_ed#al>j$FfgRDED60|wZ-5BYoI!hx`UrL|F#?U{hgWxAL zv16(UU)(yQyt-ap*O@Ft|MlI^bQ1r2&jzpKIXbDa~FY#v20 zp^&NqtX)&3-y#m9VZ<49vS!eYS_9S1g?_jkgzhh#;sq)byB&!Mvn5GN`2Tr z+hY4Il&yY&@_}WYUQsyl9FlbwjRkwV*cI^#|DZ7~#Kji|lD1GDI^azJ*hppjb>hO? z(mae$`SP|oUf*vSn$Qf4TgZX~`R}S;QF1`rSbNyEvc%Gl9M5(oS2p0C}FFRv5tw)89;&0Pa&@XP_X4~c1=xgB1IBorLU8xV_8h?_P>{I*b zse=1BsfEv4j(Qo-2kL9UBX9VI0`lvIL+nAq=XzMhB zVOY^oL*0PsFw`sZ6NWmdk2ZjQZ+yo-xJ?s=zKX1BTLZ_Z1Gs4oU+9}@6){!L1nMH4 zadt=&@^+m`7iZi^$88`fpZUzYv2{w!^2mU?%Djqn)_uJGscY(GTpsYYLE}_EVEmUK z#T+T0!55v7M>}H&=*Mi}C$Jk=c=*P=9Y5}!wPu11bG|RuATImPI)Q>WUdX<;o6E-K zjivNewT)$I`={|*J`8r8bnfr?bChZe#^kBiY%I>rN1IIo8bcbg<@3`TSCogmiBC@S z{bc(juM1G$?+>;uA3h6UGZbyVxxi`YmU!Rpbz`U7iS9Nxc4;G7x*nTQc*}M}^4X2) zx#?Kfc)NPB6Wg6mv=-)LYC+@Vw0txkij|&;O7W!esfUEIBiwgt#YV3mn;O^8Z*Ilb z-cB?$woXbee748;K?MapeChn9UpX1`8;C5?X_*f@oT%vDb3D^Ju?$kpzvGsIb zlCQFK-y0wMcrSc=ZJ#%yt8c3;8+K?r0%MqE%7zSG^H*&-GG*NG zX3DYwI=F8s)P7Y1JZ4_8e+tG0jp!qng)Z|ntb?-- z#r@#9`hHKk-<97xwlCQ#=|wNOGXC37zdf!v-iob_v$3^$O0aBT3FfnD$)MOGWTzsdN%>e9ZYAB7T{Tni1ipqF24ew0>qRB$jxo!Y}Hq%-J zNFWXVd@chR0=H1bU((EhOj0fcPk_~cDSS09jf#AHGhZE99omPCFt0e`=07+58&gMO z;m9p=;;$3jjFcfek8!dvt6-c8Neg+YbeDE6wTIP@ay5u%w>DBnEulzeUYEv_dny_rA5m0?~Af##x3V6|eQQ=xTJM1tTi)l@YyF1-@ z{K+R`<^09C@s_LNN=*{SHF53_cBJgt#=PwTJa*8;8)?YPyQlRBC#0*TjsLFg3pen!Vv#+>&cKxI$Usi4*pv1vhY{ptq7EEgLWBx z#gVa*0aslB#&NJtCw$c(@JWLu6G_=96dTd~#Fw~-Ph8b}WH;13+DAB)Wn;*JadXgF zaWBwGmutX*qP)@Sn)9$~%l)A4tW!2Xe88Fj9QT5}b)@t|iJQI&app0Z5?$$NGT2*> zn6MBR-r64sSRsOWfj8N)8n??^7Vw6l@JQ9RZ!dGp-LT8U896`q7nyv)sxJ1P^gTzN=1DSs&;?wO6KvbWK|?4=(Hc46didt%DAU zb96%Bc*#G2%V5Wpv|)!*5Q7?hd^t+*rDY2*@C*$)Cu|8L`bZcqD2P+=@bmrUnqzOqHSqyvXS=US z%1%tr&c){TMm+Su!*Tb^UmC6UoL>*SwWaY=a^USc2YhB(%f{hAu55Zhoa5Xym1EBY zi`V(G5wanBxV^C&hv$~#hyLkz$M3%5f5!6ia_p)eaI+b2bN!b*6RqkO7DtXP=s%&3(|#48>^*vx z{=aAupB}TYk){6;m@-=rMFS$S4dt+XPMw4<>_2&IEVlfZ#G}iCG5zq1Ohgm9ia=Au z(55=n=2h2u%Zw=yoi)vYdWYeFM#HdLQra)^CT)JhOz~-l!8j=+z6!72bVfSk_0aYy z(B4FA-5#e?p45@@5J0t`L*qmo8j`=17ncPeWtC;w>d5f~xDqcL0IZjy-6D2^jc#%hnjnd4s;sQBwO?LkPG3?_MyJRS4_7Swc|m86Z=HF9z~@-l4kU#MSau8IVF zO&Z~#z5-wRp)wrz#+FQRAgz#M{&FE|Box1jV0;=Y3aOWb#I1~)iSAz#z)j7icI2y* zZ(LJOXXIo0U1yf}hP`5^(G9<{m5jm<5Y8XQ6)c0)6EA4{LiNyo^xYCD!|r~{)x+uh zjPhM{E%Rk}$r=iw=#M~#^(zeLErpJQc1ZYx_RMk|qkZQ&eyjrHBIT={8Lf{1NuE2n zEJRAi@^@;J+D@?Xs}wWMg7xARD}lo{8V!sQ3iJ(K9q?87De$HfJ5q}mKYJ*j!Xs}O z%j?Xq_MiUXbkT#YpdMe9*d(yKD_F@y zE#;f8-R_oY19pI)*6YZx%Z&v4eR^Lm3X4ngIiQVUnkKQxQV&wMk{5zumG88aw&}_n zkn$%Qyh?q_Px?>sTKU#&G+I0{2G7DFo>*vPZDkjIm)i01lOBu88cX-^8bnUgM>>addGuu3B35Mgi~l6FwFqyXeDIGo}_6qTd*g z)2mz2Y0Sh8cis_i{^GwGJMu-&Z+GJC+C|?$-B$bT$RGAXw%P+flrs`W_Z;M3)U`;9 zUixjBONX4M_M=~L;}xHoH(}Cb);H26Jm2tdOn5V@G%6-}^QUC&o7QR<^bCKVr}5YT z(3DL4>Up&RPZmyk4YC^>7tVWQ;>o9N1{*s-gEw@$F3mIs9vk1WNjU-vojhBtm*9KP}jzX5``Yh1jz8mrQS zZTX?-^W>CtL;8p;nfFggM$=8{aZ_HU<|fcTJvZyTe5Exv>eXK*F5aj{JMlz1-_GY7 z#poYT7P3*9%_!*U?!J6q72s*iU1=#BcB$Ln5Jw8aMLrAOhEpExj?93dtAZz={uaJ* z=I?^nLBKC%gM!y3@Ml%PH<#(| zta#=vVorxJg~Xw=ZLQE6Hf@CUSN+zu8Qg+ZPq5!_+aujg8)RzpLLtj5&vcQVdQO$h zU%0Mt8H#-^=^0X<0(Se=v&T!*nEbi_!J2*!@F^6#J30;~Kinu2E#k@MMYz(}&;jm( z!u>-5yEG}nxo_JNOk4h{xGAltBE=t0o{}a%1vw`DQPr#mvvC5%G~52ZuqdBl9J}Eg zj4J1wfnYU1;(a*Ii!bw+8{&3(V%Pi+DJa*fdzhYSDG!|iE3&B@Fe}`_bq!UYGE_f- z@}5@}f6lTTflFNQpXDi6>oC2g?P_@k)0lPFv-O1bsQznhKiZS_vHm2!th3Ooa&Ain z;$CN?ZpkwfDN8x&ff}A`b&`MMkn0csZ^6CmT4c%R^gr+C*` zfDL0!Xkub6c&LSb$5@xNX&g~;-Xg^~WH|{3pG5MTH&wkK+^f8bK3bn)mrwBU`DxmA zg14~Z7io?+F*#sQg5Wrhg@$y&3jaRitB`OLobqGk(}(g^ya`Pb!e(nYvfbbc4K4%~R(wXf97H#!M_Q}uFhGyPxLOO086>&_N&I^Q zQ3=3HKwLSVp~SI4hDR5GI(jGdECDXe98p*N>@~nG|p}po$28SKsnMp}mJh3Z@~*2J`F3NEhui7Wp(p zom?Vh25ff#_)|F=hMl$qCa)|=n5a^oIPirI5tI%+2RHUf#zD`?pD07Fg?Ks^9h%8C z8vr1ueu4lPcA6t7c7^fkkA`sKsPbeKXFdghqu=7yUW!l3nECAhx^4&;b#hP_XVVL9 z;?p;t73Zko(s((S0g{FySm7lcD=bP#Ba)>v0a-H%rra*hw2Tk0W92OX(^E&IKQaZTW;g!n5Bzi0MT0A-Y`@pl#Hg3wlBo(;JFN1%%Etj{O|{jAGF&(TGQ@xJ z7k?3}YvOK(9_U>yOKaJVYPF~)p{u*S2z(p?^7 zNSzv3=vx|43;O2-J9-Mh$$jwkXt4f*$N1D9vI9!B{?qR=&}Vci?dJ^u5Qafk2A3wY z^iPbc`?t~(1GyAeJ>)BH4H7=$&I3@iI-e}c%hAN=GQLU*GW9?uSTYruUz(0muO}^v z!#nX|UzYxuU)64YD?f&ze68fNP|GV+SleyIac;AmV}J}pmAx+VtWfL7l%$^~g(bl1 zO<#iLpy=gvB{`#QnRtp>R4Dung9b*Gk$Y1_R{%@@tMaQ0w1p7WQ{li5Q0;$Q({`8M zK&bLoj^pr^c)McAU@B%=NX`~gQ zInQvy?evk5_2rqPim8?#g*Tom9^+0r;V0t>;~;*Z&p2z^B_R>la>$!@s^OsD*7hm7 z%6GLKS3Z4M0tXW`P(CNgghDv|mHq>Mitt47NO0+*jCS0G7d@vgJH|TaV%>xsXF2)( zeA<4XafZ!-@W{Z~pDSj8A;%PojCn z74gbfygGjGoj)5q)AY=8IHm@;`V|jCws5iVpK4dikFJ z+$^su9goMKIvah_`^-}(Vn^fa=e+54@@w|v{7NTIom`2T`8kb&?66KB%JODGyXe*> zft{+74Rz2K*ttn2&uWjh$~9Rm>ukmqhY!WC{o>EX4}SMI$HKxfjTV~F%AVp6@olzo z(aMub+Y<4y#fA8)ul)S@*Z=NECG)x1>z>x6WzA14@kW3rpL!&I>|g#!{N^A0UbK(D zAojP;#b9eE8uA5Vb31k}u0(Hj&5Ndt6@%7TbTn&fwx`A0QZ)Gf-LBgE3wRVPBfbOLc{KGcgN=pJYDE9pvPw!K_nQyE7%+@I6l z8D0eqMn7gKlRPso%lMiN0N`}I*a4KCxXUqDA+6#s*CcXweO!}Ka4OHfIk-gtX~-XP z4JiERhwU9asVytbP)m9Zc6C+HFm1G69ncNAb!;Jca{(YaYTX$Me*|z)(0Fp3cL9e& zPxU_$qC>&~?>R@2({Q5f@ejVS1=0uUoySsS0M8UhHt_4JQ(mdZ${)%Ax}_(jKX60F zaMm5gEz`nNAH5kBG3k- z3D2n$1s@4nZ^2P$tYz})T&{DBH7MEjj?6Fp z#NfyV#u`kQ^+uuKb$!TO?U?N%+0oYM4{1~Y=x_sx(kMH$cPa~C#a|t^t@s&UmSEac zKK+OhK1@@UkD|W0Qu-K4mY)LshhQ1&TJ)LNsxu{!H`>Y~vO2M#lTq|tDfAQL&-g)owf}LQ_4r9e7C03u%~fW`1zfQu)sMY| zUgX816R^tnt99vr$bmL3XU2Utrnru>;EXrtg(mr**({n#!-FDKR&~uL`xzxBeB*|G z4{hXmNOHUc%A0lqo(9+WV8A8uJe&axH%0Ne3@-ZoeR5Pz<)D-Chr01S`7S$jA|KLO zW2%0XVHEtDW$MI+2#&QTMUx)Y4IIAKzzb^l1AX~++scw0O5VQ4PyJdBEb3WS>Bt}37a#b;@aoqW z_xWBg(iE}c+kQFvtf+Ou{3=Uv+Q#wHU(hG}^L=!DJeoXJ&w7w}LKgm(f1}~GK3>O< z{Jicr!KW+nX;qKpG@&$Z^7?zJd)=6wnN*&}#oeuV`tis71lR}u_`UIG|L1)%J=2WM ziyisMM`K|LfB$ph`s=Uv3!d1#+icIOO?P8`ZPQPb`R27)%7t0YGi0lID$jjfJh^Y} zvJFJ#Mf(x?dwizgjc#{-${I7kpvdxHNAwvLjjY6^V|{o1$Su zbBfiqExDBVb3E;s{SO`I3C{@;*_Ip$v`0Q|yB`hs($%(_WLwB=@6|}mr+6xnzzq@} z=3${(mPs(;UDvzV8^Pj<+8%KOS5;8H2K3stf_$C$COjP(dqp3~oAG{U6?!RDn2j$l~8>i`m*85cxav#_k5LtBLP;% z%Em~7;mnT8C^Oe~urEp{A376Kg_B2bV$&HqodXWX3;vIqik!VBT2*u;)H=|a z)WwuP+!9EGMDxcftJ3*$8fyD-??h+jtt#j@P&pKOXd{LChD~2fF3<_>j6WnNEnFOI zpui?WR`p>j1#UalpJS{vi9V^Z2%Q-B20-y6{;f}yPT63;xw7aD?Z8w2_yxejvuRN8 zC$4{o1O5v?3Q1q;4dXOtXi1ujJszf&GNQ*~a@ox1Ans85Dg4s6>Kw%K6Ww)VI`|DI z7;j*&vMG4D&Upx91bL)azez#$Bn{c#_-n)&=l$6e-=Hq~L6J&VS(g~rC7Q&Zg6|k* zy2h2p7`{Qz_1W%9&v{}WlgnFIcpxGDA84nogdhD!U+FlzZJmWt;})Nl;mtGL5pM|g|oy+zS z9>K7&po>4HvGUdEW9 zQK)+RoN!P+^@TMmIL%P?sE&TtSG5@~cG9VKrLUkr%p>H(({|nNhV`y5CAjlN$Ktns z?FW^&CjXrc0ITMgH=CnF-9cCVJ$+5TWdk0Bb5HW&!4XUrbhtK=%lgXjK7raI`zyMr z?t>kKUgMyEbW!x!Z{d)u!?-u2dWQ@M42O`jIv*`S>1KXHh@ns zADVdK3lF_bm0x8b;o-2b98b}57Rf4ru~ZTasPPGPN0k>lIV1=>7hKn7aO;Rb4lxdH z;whv&Ac!60l?HxLJU6KR_-?&e%lauRhBLr;mf|&Wba)Gqg+0m&E_VlmMUw(4wDDk2 zFl6YHP8bwV7zw~)#9f7O=;sGt1Yjo<%tOJ>Y(1^COtP>%IH(i;iS$*-Z}E$u-7M9FkcH(S-AOUjmVK3$$iAWets9myNg zQA5Eq)Ki^D%Z%tyoh%fhN}%@&U`RgkES)+>oPL!yf;Mws1g{R-%7X}Jr(t9`&@r9q za~M#iag58DjpK08ThT+s?Yb14;Nej3E^!8e#7PuF{e*@?_T9Xa?v(2cDo5P{Fdsz- zzVuZM!06;Y4E;_~T>YlV%fA*#9)_>ioNK*0A1v@zfr>`kqEY~ZUTGrNI!i$&BE$C0hy5aDh){^BV2qntqgN^ry^c zJS*|4dnFAbbBs+KFrX}7r!MKpA{n3`A{`ROPfX~bnQDC%sE%DVBu-zMe+x`$)d8y^ zr9pM#o=c69uL%{<44eTpd$m_6Rkx~)^Fo1cm;!py^Oje>fd|K}qp@%0;!3QXU5P*c$Oq%Me)*^4*safxTd%)0 ze(IE`FfJ)iYLjk^)&FRaDpPS@Yz$Jv7~aIgrLizwM%DYJuss7WCo zehtHzDjMy}iGO6uhRC`3g?RUS-zVpH5WQ`U@v5iCO?xrKH|&`l@Y=OW$^0W9yEoqQ z<~PLAV@G23>{>QDyhycYg2p;`3ks=6L?=zf?25)z}jQY$O)V32Qt z$Jf0iBpe}|AWK-sTqgC`zmmVz8HQyCa#0jE%>(M5fwqyum911zPL?KEHih_#lbkvwkcZ@l<{Fp=h$K zuhM8ZA6g+PD3cAE^p)YY~ zdNE{)54)1F%=frZX-fP)(|_tA7<^Y?d2534m$T_JJqfD_oEX33h=@RDhIdF!mx?otVxpca&l>$`@I66^;$kYvjA7|>QH{tcWIgz z=}MgMLVFGfj>;c5y;P@ZGkuIVoW-@yo+~(B0`%$6xK)>B&X6w@nk1DvsV?hJoGBxb zOa@&^$vRUli-xw}b#9|;V9gH)N%}EgoKvq{m{97l+aVZa=&~&4xdP=Wi#kk$3%c3m#SuXh+)>YuxgsGF_>bo`1`8p^M zz0~QntiwlHe5ecli8FDf+~9$-c?Pc5c>qsY0hTaKR~jY!2TqqOa3kfF?fznD^3fsj zGpdjLi95r)uelKY!GvZ$yc9TkNCn47Ut+jBB$bNY_>QhB7`sXULBj77@V7~z|4)Fl`ev^;lHIj-%e z88+u+{x-j1IEyCA3Yw>dxvA4r8*6lvpRwyWdqWU6PnNL`+m(;fRbTE)FrC4`im!dUGz7D}QRc^8=-$i5eo_q=xT6f2retRQN zl8M|HAP0@Nd+3Sk6I+rGZ6G;o+~4EI5_*9S>x}-6OAp48D{Hue(q6PVZ748WjE^iS4X=x9pZ|h5 zySfvnR@d_>I#J70^Sf$9w1dX%cQoe6KV;3#H%gT3L+xJQhywfQu5U^xjy@CMhd#o% zK_T9rIW^t%jq|qX$p z?Y-UIi3=CkOz61qd!wY;7N2wp#d*7_(D)4sd_TS=+Gb|vy*7Evt+&L?@`67bwzadR z0_qF+edYGnlKS9XIUTvlR(&?&y zs=c~h$)>Zbvg%7+`IIbj55u!f9 zbxS(g7EF@|x0KfAGa7!p?a|pLAXXza} zuQuR%Tz$st!Aet-+XmgMui-cC#F-O-gEaLcaf0~Mo_QXxGz>BQOn(&*opq}%>+D9`x7r9<|Ft1;h*PW+zV z^Y%WSIf!#3UutOohyDc~OG0hn7Tj+-&e)PHE5Fd~wMZ)=?158h#IgN_W=^y*sQF<% zrSg;GAeqER(#sFgoCm@WiIT5rq~3&iZg5S*0L`fbgo8S)NuxN>HRq#`Txaln$cmz~ z=_F9_pm6mE%qSxe#1-9D@A24Ry(Jq**XjCmAK*-#x^zr(=B)bY7v9XGJp34F&ywWx zlh^4_9{93mMJcKE916-a!|1zkrsP$daGI<{m~E;{>ypmcQ{TrSSLM@Zan!+UVw4*u zmiiRDuEu&(ky8p>G4XBijQX1L!Yq1Z%bWl$&cMW*7s}DWKuD2t(T7Uc!! zdl-h-bvfs_l;ad>wx5DC4s5fcrh&L@5l_-H&R9;|ACjR1j)O)}%&_j$3YLcs<}b-E zy+}HsvR;1=)1%zCPJwm#`Vjyezf1@V!?X;OyT zPqH8C+Q<^7o{_PCD-YNS=UM%oM%(4)7PTilu>nFX9lv@>{?l8d-&Y- z3Vbp8m5-X1*T9ejy*fEvh*dFfQNUR8jgei^*J!b0c}{dK#p0nGm3%}fmy|D>)#wb! zJedJ~mQ7-IIH)W6AUI8QXIdVf(8G({6FQZwIA<^F(XggponP(Qn4I$W_inrE_W1dq z`vrA`?YQ&KmkNd;StH*GHUNk|wF7uCOamFgR+F74$V>x)+6uNY`n3JR+;ZIe;SVd` zh~rls6C48&GGc;AC(llYQD&lIo^TQ^duSZx^)EYs;h?wVbvlE!Ljwgat_byfc_{dt z%@4Q;CA}V;E=j8);fuSYPWwz~-pU%Jsp)j!4qLqdLqM z1{9lsapl^H4#MKIo8FgfGT@J(-()ajg$C#5f6@hv>(W6Yct|U87b7W81%{frNV>6~ z=9vW`&IKJml^Wpar@&@V4s;hjlWz)@pg1P_k|g6SNrh(Hb+us@#|985;%tXuoCCaS zH`D1lupQ8>BOeJ$V1>M@agZb;^J^KgkWtdXj&572FP-k)h~>c1kM=kgP7taP2{xG}8$pf-`giF3bDPC&Gs;{Fwsy0nG44w*^~q zSZ9a%$V`!PX}cr1gT8YRJpTHiL0u)O{!6ze5B2#Y~rIZu1|nIw@Pa9C%cSGeS;;HwzWAuVzB zH5|7yLwiR~^FD%&c zPv=>_jwV2~9kU-^`Rd7`_-7)mVba=<9$viIiQZU0KKg-s-^>|uM1-*fS@mjo9$GO@(zSr%^GJGQs+7LaZP0cSiNhB#XRxXsbJ$Ym2 z@}K_c`+T!`Zeb?)+|MqriDeUmYzK?*Jxu~OHK}^?;fLcZzxHdQySC{`+{)SYc+Y$O zPdxF!gYlg|_H#1P=VP$CN$qND@w&Ml>t{~Hgm6qoOSJRWrAbeYxS2B{Ux3N-UVA$B z=4WDDd^WGWAuc@jXq>q3zF3%Aj*XM2lZJ71tbDj>b*5Mew$uy=}Fd zWB|L8htKE<1c=w15a;z!=N(3`3CL1yg8ZtkG4{C}K8~*I6WGaJK~b(9kaN8ftm@-w zGj157uD%H%e5~x#WM1e~2D`lXdA*?OW+Fb;Yzv;<%G}Iv%j_txsj+KeF4~LpF+VpG zYa8p*k#m~NYVtYg$BZU*+(cy)Wwcdhz~{20``bG)(eK9YPFK_0rFi=BCt~G^C*$}F zU&4+Ru`L}@8=y0OVncch*K8o5tsu9kmx6MblodG+<*5_mq3^kKlS~=gQS@F3G>j)T zI`}8AZkT@w(n26#(sQFrB{3D)j|`O1$@KLb{<%r@%P5$uc|CN<=qaz z(nocjH)6m^8Kt|d_}4VWrntHl&`pA5qYKkv*uL=VOgDENdN{)rIFW_V(ofUM{D(Mo zC#^a8OL+UizD_WB^-=iq_#>RGW1*+XtB$hJlJs+_df|^`?2xXgkGDF2ia0kLb#Xw0 z;B51ZC;1zPZtwvE?}ZQCE#H7tId7QKL3`#+PiiF}d{n@^k8~BX;Vbect<+;!rExH4 zV6ISoI_7rt7vBVea?q<7Klt=1JO39v*|!Xby#vP1{Nx4;lBh@g z7(Z>8=|#{bKk4Z~%&T<-+DV6I zqY+KmI{KNe(I@hK6j+xnX*RnA`i;jc>}8(ENdv9hLN;ZjKDQ4QDnAeLA-U3Dto`O4 zzGY{{1NEgJAP|9@zag%qClA4rt-;D!ZohI3Uf0PPSP&&0X&0T4dBW|HS8~a|tOGsK zpW7G4Nv2N9(0-Pc&b++?`l%B=MwGw}WvRIE^RyH6m@oHbWuRO7V_}f*=GILa6lDOm zt+(zeRR_3p^S!Q>&Vv>Bd79Y^FXh3dbftlx8-cu5-&F}G4>q8O{^MHpBXRZfPRC*jGL`nz>2N@W;)6Zd&_h18rXp;8{%WN*1h{&L+JvH-l(kuM|U&_zBsW4&-JR>8rWm$;!1J*HRm^o5UalJ9Cv3TfB zfvA^t|3vWV>5Gp`v4gYr(s{M5EWLXi^IkJy8z;spxZi zGZ-r3o0p;s6=VGkU7KJ};*aV0|TaF091q#~zORKk)uI z^X!w+i3@S8wG^|>W-QCMSe&1anMOPMl23bjI(8=~{UrGl=g!5+t(|BdUW_|m@v?Z; zTRuMyUvpjTwWead*Ne4n$yYRAT-y-oEaF3iW|6y%=D+R4aC+2`Ca~8pm-aIM|2ure_TG* zl;}nemk%$+^X|G+>4R8bUz5)CVrP3R8uDr9=ERfaFw>g!+YVMPoQXUR(^9AM<9)GNXw1ck2FY=t^)zFQ+AwMR)@~2aEZFWhcjEZ|Rt}3D~4&qJmLmTSf zn~Zr2EBekRRX)**E^|G}O$}XhQ;0p%yt%chxZm0!otPATE+-jM4Q+{g%ncZ*wJlW| z2bKaoQnSdEs%7w>l$}9^`1L%IL=mRzj4LYHSXL^K*`kq(Egu4~%2l4{SQJ;PUp%0`fVK^5efCmz=gT59&RqR8s5qLNj9~l57 zPzKtN1$oFVb;%FI0AV6%mpmtK#j&eKqXfn%^ME$69-uUmE6vo)*@7WWaSM_>j=c0i z9h|`D3sFOHUG1vh`Ykug;>|cIG3g!sMY>l%37z#6y*3}Bk1^14!oSM);~2`sWduoE zi6;9@3as1srlP=Dv*8|;*MC$tVI(UHLK@;RC-EiUqCkcNN|0YxK|i64^wJtTVjFa@ zZjqZD1+SC}Fd)37Cpa?IsrHASl=BKmxzvhY%Qf+Xx|##OV<#;#Ms zGp@=Y0!T0~TvH#ADhCWk6B0!S2d~&?(*SSCMwhYgyh*Zg#VoY@l;~QnT0X*_WjL-96q0 zq`042r?;yNZ;DYqYiE0MDrq}>BJ_f0+?C%4c(>P)&xT)6HmP;ku(lgL)yct_%ng2g zKK#rcZ~T#s{b+p)QRHpaSJ@v;}(7Dwe@ zt)4v@y{)yFl^-ypahp$-f}i%{lU6(-&bo{@7wXR&>e@}oq$k*Ix9^U87&g$V;rx_) zr#px%cuUY*-V)D$;q&9JJ6~j*+>o!gxVY#|0Qk|X7uW3{v)0&SGll%-u4o_dc23C) z{iQ!~q?#)oA#nDAZ&&)A`;lZQnE;25^F|Kqx^yVZqwDCf!pQaTJn~1(6Cmny`~Z%+ z!G})Te{q~r^zKYUhUg8ShZ?Y^01ryL`ljRYcfS4$V|(pn?De_UbA<|`U~Wzao~kvh0W zo{Af%>Q3I+6hr^cb#3~7uG1%+>lHXMvdJ17&kX>Rlap8Z+xyDJ;6Sty>Wp$%gGBH` zGiSwd;%WsL&I!XhQ*e@#B2j)f0?+!XCyQtR zCMsx}C0~tC0(I0wDPEx)MimmLZ^B4f?94DIOtwTHuhL(P>A7p7Ie)FH=hgr0K2jN1 z1`^3Yvajtj8j#uLVH9rImM*U|bY#i_c_I#(IJRH&N1vQ#WnL#(;(fAzri0PN0u`LrW=Q#EAVzudz>k{xl&QUE#U1b{OVg9^k87 zI#5qNYA7_(aq8iED%>an;sm9+&QDiwg{$Wpfd6!PBXQxzc>1!kY@?TbGDBWirw$Sw z4&t0IrIEbsQ;wB0;qq(xYsLsJL;oxEBnm}nQ`7d`K(D4g&KX!JA(Z$KKe%;rwYsk0 zr83u*KaO*a5A4`TvJvRtpg7UsBegJsbCIFYRUGq-qyLmmWI4Esa;wLsvY>r3{jxw= zs-P&itnVs|d`xh$Db}%16_SQwKJbMehtX?=16qA5+58-oHxAOM*A4<1$~l}J673BegdNL`Wu-^hxD#0_c5=Lqa2^`aBX zb9QZK;;OUOZ~o{*N%@gb+Jky=o^W(LPP@*7kmJ@!P_`$Wdzu&ms>$Mk{%SlVpQ}>L zZzXWJ6fa~MrpRa*YFwFi#C$EjsYCGAE9zGsI>Xq`kVU%v2Nv#h&5{1fc$_@5hE))GJ)qP>L8)Looxa!Cip4coc&&5ytiyw&}|BwGZrk1aX-HRtR zdDED`-HpAC_2^wV6XSy8Z9lxi*8w5O5%=2#cojYLiRZ~CH|%y}Vrn|BJ9{`W zyY}NxJsme3U5q22_og`g&`0C3Kl}5z=~b_dv6i%tiJEVir*Fm058pJTy=enim@=qi zngfbtWl9KtZ+bY6q@S39i;8QcJb{H4Q_qpXZ6I1~zlUw2z{HLIC%H@G?QbDt=4pUX zw|8)}2+Jk_T1V)815I-0o6MSF2Tzp6p+m8F{8&s+&AF}Eh0gbn_xkC-?eEGj6Yeoh zyz$eaeL}i4EBNM2GoF3?(de$6kHfd!lyaNW1Ok0vkz9Sj3wF{IbkB?T=&kUgXZ+c2 z=z#tj@Zud1wRz{?Ld$FM?znIu6N{b6}#`Ui&&&o!7w0PB3t`cWAVzdB62=_4Ciajh%bv(i%VnF7xB040|y zJWSG$(bmm69QjOQRm?c`?!0DgGCjMMdK{Sw+naQ*39pZ%!BJfR~D>97bK{3P$HW>f+3Thv6ww^tTSyK3sw1r{2#cVU{&f@UyeBtv`F~SF|U(@L0AU^~0186I& zs=(u&@ld(Nq(H^RxD|Y+o;ZPyg&-BLPnQ-5C71q8ipK%kSosE%P0-n6z;HZFpTcM$ z6e5EEn6bsQi-HtZ|Z13p0(~Yg|o!IVlqQj?-b~>@S zt^A#Cbd=W-%(lv~5V|cNrk3$r3A%mfca+}g?CZ>%3Pfj5b%{81fQ<+#Z_%ebK8a%6 zT5W>z(JA=BkM^ec+{Hi*f$qYHUeve71@w>ClORWY70^ca_E>YSmZh zf*jQHyxE{7xh%|1#roRC*jhUm9m(RECmxQU{D~hE&Aqtl(3L_n z9!rZ?#8p>Z6R-K~H%D)Ww!n5Xf8nM=Kh`!kqqEyHecWW&XHG+P4b~v(57GmbU~v+T zxCzsoneyfYHf&sd?KLrXcp=u;SL32+Nq;NXM%k>$=G(635|dNyxcQE|;?V~mi4DI2 zV?B-?zb2+7kLj6dwbLLjboOGk(~k?Zf#)3DfZK~UPxgs!+e7+=^|6jgN)ugf7GzZV zR<7HjYm%SRn8SFksB2zDjZO=~Jn!P?t6!?ysvP;k32wpDt}L1}CZpf@Z|Fq6iEQAp zQWFKip$JJ23X$>Dlj>h^V-Fj}G9Wik@KHsUX)@Ky<4l@AT_xFP(+d}KL;b@Z8)^yW zm!FhCi6ei~oMLhux4x<#U7I%7M4o<-5j|Jeo_kPE$zIeatpXZYfkXmHoT$VV>0fla zjsk$M4fccL{?RFUHGalVUn4V>CCaq|S$Xq{khFRInpm+j^^D>oXCjuV`7EW1u7HU3 zZp{v?57r4_I75^2hv}+ZCmrg;!iOcY|E2mtl{foU7_vzaPUuC4oUat~LOJ0;--vr- z9DgnYOv-}FsgI7He5f4xz*`rR9)fKG^yv@PaK*p_;IfH}b|nFRAS&|(=R(TkI>qFC z%ZFRnn?hX|O36?Zd>K@Iia-aJo-sMga?n2#kM)gf1%{~_vV97#C7;0j z)AtsrfVlbu@u7bLgQ{ytN4fUcDLl3TE}HlgPYTF$SqN4VsrGX$Fl3o9i8ygxLYMkS zt+R%M+!DY`yHHMup?O^TkGlDVHdH;tdueH!(W&|||GtKtF{NZShD`_G_UDE$4}bCo0M@gH$7CZ~%4Y+f*KBy@ zzs6HGw0Yd6jVp_!HVV25gM=m@K}^*vyauJnM&bJlZX9KcG!L^e{&0+GCj7$MDh{sgCB%8$euU4aV<%OBq({a@lCv`vO#Ct z_fdB)UWn}rC*svFd43!}vKZ&joQ&PhrZ)hL3yw{Y9@qSK$kOMmt6VXr%jcCp3XQhS z8v7fp8?uHZIkj8U^5=Hr!pd4aDIf5uN1uwv9(_FC`zL=IAOF}Vy}q*|KW=M#+y1wt zE#jd#+Z1-`>-ZEBy4$Jt1&==o;5bswd#v=mN@E5<*ho!#yZt#MXWG3S#0gvzF3)HD z&9k$I=?CR%*#bbF&SNc;jf;|vba`ioje%Y1bi}nc9gDB}iZ{i^#b+ezb?Hp{*2YUa z!l!1tfVJKs7wHQJK3ejfGIT9e=X#N^ zbWFThzP69>B}Qym0Ld{l%eW@B1Lej}PQ3CJSH5j*V(+T@%18_p&hGx$z>4bz*V#LN zqyXR(peoGDNV7ylIlDQ1vsXAc@d{mUAW->K7O9JcmJMJ7<^$qtT-YI0IH|t|8+zh4 zC~5@r&O||H>eIE$LykcBW=PVVM$oT)D%WrUDhmx;r9z%?DMA^g7`G>+G>#jHbW?2^ zQIU5#j!SW*;3*eqM+GR7ic*y?Z?fsqXHh^yyDm+RWDauYI5!4b>J%+aVy=wYu?Ani}uS(z1AF$|7p-B~{QCgh;W6?L=gvlfIt3RXImF1V!1h;*(n zvY`xOY@9M)(rN&Il@)oeY#35*nPw1Q+p;@G+!J>G+;*f_{CUm7J!mX%<0FjJ$r+w$ z49dDu4&dlJISz4@v`a#^Zyw-CN>f(|$en8sgaszE!K4wdr0cihfSv*??G!nj%h&~s z<%wKLNF0=P(}5KjMTWS<&)#^?vZ7x5ekxb8^drYb8QFd%3^OzAYs|}Wwzew)9iuL) zOvn7v06wKb4+}URXfK0tRQcfc8EI35IylttHQw9#qO9QXRRG6t$>RR?c*amhxrf>p%}}Gudv%1Rl8f^37|;W*rl)sTe9|A6LHkh4Rg1n>0D~Kz@wh5^MxMybS-N3n6H^5Ae1dQ{{|NL?ph%BoYOdNCnYiV7h&e-Dqz)h?4c z0jS;fn7|5#HU>9km<)Kb%Nf1J?`DFpYjDUTQd#AA?Byr{twyJBp53Sxy@HRop9qD2!KPfGJI~IepCJ?XH{dcS1N?zF|nGFJMj3WBQ(iI)EQ?#g96e7hu6*0(CCqFK6?u)Z|Ni zFt~fRMj6T)k%H3DJtvc{xXOD`T4me=B?mu&&Ntl2bD9o65vevJfZNx#yl50&!wEM$ z@9$!p1R}nu9$LJR?ujPas4~b&Wj*mNFwSF@o(YukltXpq#1nYn;6rtSg7UH-i9-}q zmthDh3!bu&@7?jP4?=5M&H_~$aW4dw`pkskl75bQU15=NmN`hX|D|i=1P|$y(c$)l zSl4M1l}`Ji9hB2Aa#7uGQ*gn9<)H%Rhk7_uo_z6NWR*C(nNHkgsZ-Yuq-A^gT8Hrf zp^L1Cd6II3);A2OmV6P!*Z_?*rFF$9zz$c?UCKC(2)NNJU2+YsLVLn;4J>i=9^a|v z(PlccI7U11r*gF&99*!}r$QY0`IwP*LY_)OVe(0s!YA~)4(YdG-7kz6d!RE+x*F_^ z@)WgD2q>RqVcHqLfTs=xXz>jL-s*vFx~>c^jhxGM$_ljDG-N9JqN}vzk+XwkPbtz! zfCut8gM%{!0LT1+OWK3_v0Yg<({&IafOeoB;^1?=Zb`EbjCm7G(yMgf(%`r}c0Xwl z-;jjd$pWvDB+a#bh`8@KT}RC~PSqpvV_(y*fQUM{Ag%CXda|9(r+~(6A85;z|3N1o<%z*F@*+p%``WIXhd55#m9Czx*@eC9i&UTz%_J zu_hnz^xAryS-I#>F08N1-&HxrI^>D9^~MhQ80ZfQ2VZpkyd|!Pbi|uvm29ddOWP81 zQ3lb%_jTLyTc+f*aFc&#ZZ_uTm!my16YZ%f``0b`I88rsCtt6@lZ!g*sQy1UM<9nA z|M*=zRk$!c6Vt6`Y;CNHJp7<1PAKH{D4B=h=z<_q)7%W>%V z5#xFK8&FCIlU#hdyQRPCsCeSQV@#VG8 zpivl?Jx`hdQ95G=e?F&7{WP?2)`#OJ#VcZj-t3i1b)Be^?3d{0Qxdje%J3`drB|#u zc}&FSDds%W4e~4xZ^+&>aP5jJv2Zg2Yk{FtdFDvjQYN#_S7~!>G}$0u<;$gseb zdMTJ|BE-1{R5{23Mhy!>LmQ^nYm!`t0enh4$OD)ZRF#BpDMWZa2oBk7Z&KGawJ}Z~ z@BlW&l)+70V^KLanZvv1eij6Jl}ClC!PPMj2A$*Jq-yg3z*L5Z(+rQ9TZG!2B9vcC{R69ZOESopLoglEp;U~2rg0h0&AJ^O*+`0zD zvS++g8UcP71!Y;Ka=@ug3OdVD_TMxH@!EUyr?MJ0l4rw9H-XW3>&<%d16XBa3X#H+Op&)Dsdwl+ zYqt1P=r(?guJ^dn%C&ewmW>Bu=1NuDjY*;rS7t_k;WVu=F4X&aE zvH6V{Y7g|2r|lUzcO@sT`@OMfPA^4gV<&#$7v2%~e(Z_h6I{GO0N;u(*pk2JcIS|+ z8|Z=X@rg|;-Q@uk(NYie7O7D;A?f5oJi1Ry9?sLXI#kYCIGj?hG9P`ePovDZf>!8*-E5p3i(i{PnlKJ~r2%jzMoj zkkTNP9Z0$Gzs$RE=3Iw0vFww~*K=M~Xyi;9mw=Xj?YE+t_LAp-Zs%2<)1YtoR08JT zA$dy<2$b>WN72ATXTDcU|0TbG86S#Go%+Yt+1AF`$%!|-_J+5$nv+-I%(6R$Z%0fv zMGTIs5KnTGoNF0dUVDHc<&_C21CAF0N2JtgBU%K_HI0qT;hSANq|?dm&?}??Rv9V6 zu$+jj-{ZFfCpgofdJ``=gj99HgLhDKRz7br@FWNZDeFVms*{lTgv(`xPe<;K7A>WI z59sh?`e`>B6;5bCQbfP9tdI^e8#Jhd>^3FStu4BgkPFN?5PCoc*5peOFg$pwG9ujk zZ)kEeKGu@)T#lv1n`2`3xJoXllTjf9HTYCkbz62+XsuDc(?M(G8Bxf%uXLpH)HGuJ zxP(y9ECU%2Sg-ZaNw^`x!VS_v620AS-1YpI#_U2P{`1fL*Lca@FN^uPmJMu6y$FN9 zJLnpJN<)K~I*w_jKDCGSOb0E3KHPlEP4V!f569z=JQB~l^$x+wv`J?Erk!||^elf~ zL#wC*eSvl_F&!o2IiaRsn(vqm6AeqO9wlzvoZaxBI*uWt$R$DPz>LCkUE`M&02k?_ z$3e)xp$jMT^PI{`7^RVLDW2zsis!F_yDVIepONSPDipqlhFj&i9`b*AIN%dKIMCx7 zFtYS1>D5S_2|$K~CyCTrgU^NoCz&`Wnd8%tzsmxdI>=R%=vtafaaa%!HsvY{YEa00 z>xX!yUA+e@u8x*;Ca&S4Bqel#(a)gkYV{T9pDs`Wu-K7G?hq&EN^6A&h&2DyM0%xy zjc0+aa3cp$h9vG#N<*PK3GKlBl z$<$E)kYB=J7jp-^5jo9mkyiyL^^lgScJc%|l43fj&vELa4NRkJ&wRs^k)>)`>UEn{ zoX|uW|7Cd)LNd;=N%xPgRn9QOG?gi7beAlXc#wf7W`@c)KtMG#wak{VT$c;ar#F z$V_McfkB2&9YQO(@WI(~piZ4>kb>X3P-r69O*B$YqDwU4_%q(w30IaE4K+5g_{eyL z-@uLP_4W1G*jSHepL{BQ>sNn0-t~LG8;i$oh+A&CBmT+vd`}!Zz8FuO>Bs8kn#VuB z@5Ka&-O0UvS9R?A^{L$O=7}`Ew}sQj6KLz}8y2=(;9!Z-E=%wdse^RFPjZ%4_|1}-oIMZy~Kx;iy z{{fCgeAPqWr7zK>>>dY}CnE7Lk+&zqI(%b@h0zJ@=dLD&;;WyV;l>viV(!>gu`oBI zGMe0Uw_{7~$_)zYWf7XE6={ENl#I#8n{I0|y5EbZAGkjjls0=M>a`A!k7h4By*#!hw0%e_^ z*U9riUEaWG9{5uzaF&;4Ay}uAR`PX7dzD?rNXf=GHYg{gS*(oPhD%(g!~gmN7x~1= z)|osn9PsB15;NcJ!F5hvMLTD~!HaYB3vKj2rK1O~pSlaa^i>m@kZ^fzo0eAvb~H`F zjiFFZ1d?)*S!L9!CQ(J;E&UhLGoIDz$9q!s+ct)vNGl7Ln`Q;6rK>d{}yj9ixe z6bfJ5w9J}yrV{B&#X(t@A!<7Xk`e#YmC!Isb~^}+AD7Vu`8rj6nMJznD`y>STJf38 zVR{0m%>$=k5S9g&2h_v8Z=#6=O9EI`YUzqUa3)pY5f$);-lXLEym9HEj1JeSKW^CN*qHPsA3A)kKk`gF zv9t*lhVk5sUD9Q%0yuOA!mt{rY*z(Gd9Rt^Z(u-FhP5E(FBS>q5{VDD3o-(fb6yxe z#Sg!=3_N5Vxqc+v#8to89F}+AL?+0cYUQ6yAaVhvO##2s4X1+e&bTIAuHId?Tyqro zT;tL>Afe&U4&-aG7|hxUK7<#o<%4dnU5uU0^|*NIL_B=oN8*W(-yidPQ*qV8;W)fF zA4?01apcglUz@+x*@;uq}k~*WP(o9KYqZn3$c9^IMy7 z;>`Kj=yqdI{@hNlA1(Q6$k1;;L)fB|*WU6O7hdb@4Iz>PeqMi{2SMZqp>KkN?E$`w z%8rYEeDKy(OMZ1DX5`Pz%;gjRyyl;q{rINv%}sV*Sxg(#gp>XR0xl$r)ZCEP_{g1M z$&&R!);cF8#5taflWup!N2j~)&mjE7PyE|BeeP7uw`St_702Vy@}ZcSUWmW`H@+y2 zUvqt|u54KEb~BlMNW6e`!(vwbFV!eEL9q3Lo{grzT@=em-uy^^W+^z4yi^9{Ob5 z^`g7ursv%l=QXEjD{ogk?n@>)KjbF;o)`G(iujb&$0rKVAG`?r#QZ5sNuEj-&$;-^ znMRlMnKBY0Z#S?^mBG3&uu?mxO$v}3?zz^2Pm85joQBo`kG#N&Lq)QF$`t&*SykiE zq_Xydr9*5Qu+Jl$^gZSb%w5Eh;E5{_`v;uMD5^HFMWl>v2d}m^p!F|-9Mh%X+<&Pg z^L=St$Tax`xk@_DKeFaeWaw8vs*9W2Mr~Ll@HG;qEJ?tTSL5_|r~8DBGf>iL03CJ| zsYlk-*)Hw$=gw_%G;moRnVUBt3ADS3CaP<1#48b#B7B8<(NfNgf6&E0Wn3<4NMGAo zLIIAf-PtN*AA|AIGKMd(GsYUT&TO928NWjW_@<@`=eVnXBNn{QlvGD6X_Qr6g7+I# z3_w2Rlt;asMU%${w*&NWE^-FHkqHj^&{tw4ITZe}_bGTFjh_=dSvO2L*-H0#&6#w; z8y1;lpCe@5(4uR@P=Yj(;-^P#$1iWXI7pZ`LK9L@q{P zMuO+D$P#%ckWv$;&%VeyVY5mn^Jr6DTGp~C_OVj8?3bvp54E>-#_ge!Dg#U%zfF>8 z02lQkd+^jcz~M2z;4`>kiE~Yw;As;wd?d|=H<1+3zc2a8Bp(GwMgh`U{Y)p9&wg3T z3=FVekVQ?$))|d z>CbXTZZuDw{9R(h2tCgw%elXvl^uwoJfd z`x@_*&hp2E2xzN(Q)BK#ZxCY|v-j6GV}egD_WLoZ{JzF+-bf<}L}#0|Hc-lESKWR2 zT*z*FZ6$hVPsWku>3GQ78|SEv9L59$B$oS`S8Xe&a?@BzA##ksK@@A z;;ajL%%{$xYp5>4V?-IBM5=KiYWFqzjT&T3ddH>~`>;BAD4Trqpz}doFyJofh81t9 zeTCu7W=}uZO+N6X>)M-1L;)LEZ2OF3EL!rv_)ORK4i71@`C&g^^0Hgw&2M~Vtekr^ zy4&lbI`3T*j8{Grw#RcO?jI>V=X-)dSM-;@zWCGO{;Bl5v5&5%E_*rzUPOdy8HQPr z&gix1@%WCnx^;=iTwPW}|VH zch|P2VyeYXpNVKrHvIb9mY8KDKtqj$Ae%JUctg&fDnk$uB!qN+#KIyngmJ@oLJDVR z4F>e%K@MVcA|wM+1StxRBHt!KFo_o;)tR_SY(+|VoJ#~a$wIW!eP0=ySL~h}T;AlQ zDgz&z2cW@9pfiiu-~xG7R#73Es6$9xQ3{;;5|G9;!Aeg}rg7<5nyw^I5H1Pu%2YcP zFl=1HjddUi6(mmqqm-|zqeEeQF@Q6QYjQJvJf;raqIR5Du_+z6w5pdngb%(!2)Rb= z0R8p)Q-xH9gf}sH5HlXBIgGn6k{8F~!jh1ofVpd)vdE)8O_aOjNEeuB<&PTx4ZnJ(5m#JsMf~9(yf+q?m*UXj z1sm7gG{MAHMw5w#=~orlKb_69`OfrDA4-0T&LPEEWl{se7y;sJz#1-#qXVPGYZ$^D zz}q0eH%U0xq@zzpFj@!ssq=)?ty)PvUmi;NVZE2)du|}V9)!#BB8N{==X3Bw|Nl+6 z3>SEe3;BQgVAe}OcKNN845{zxf5utqJ-{#PFpPU5sWa$GiVlVZ($y9!%Od zU!gZq9P|sz+V+@u9<|@5jjkbK4&ciH!cjP*E%;5m5kaZTW*qo(twW<+>LUN>A)>Rx z!KkLF;3>g4V?0WC6H*{ARHe?~RowZMp^$BFi8Dw50zFh+%Cay_O_e|6<+sX(bjS5O z><Id@ui>t>Uef_FHW91 zRjSs+XLhdaK4vPVF^kut$tM9X6B%!)l)vSh|7t&8FMHz5>A3GB_r}-#-LHwk_E?-g zb3w3dt{L}&pgbDUC*MrI7ZuSQZZax01Eue~T@XN~IS|#4qq;UYBdL(Ekk8Xel-<)L zh0O&#Q8uZu8(-_82Oo%)^XJucd-6-#hVKfl)7NBYrV(w8@7tT3u_HpJ~CF+-kGz}L>IQU;=+liV_#*t*^D#4E88{z5f$&_SES(d-Ack{f-w$^F=R?&c&7ZonQW6 zanGwhBPN%RsXs_R{1N1ZAU{>8vLg(y5hT|t%eX)~8#5gWjAdQn zrEOeV@3Sm*l4RNMu~46{>z6Q;s7Zy+_*!hT44(#MpXG%RU5F7TFzF}3x@ zIPvfkF(W*4x85FOv$LXAc0{mDRQITlg?Z&OF``c)LQzzBlrp>I(hA4uA=9=B_yi*%w&_ZX(K$dFPF#;+J+a*=|| zI0Z{0<*@HWIYi&yZa#&VRg?{Nz*vq(khoo5hj~?>gEr<*fEs*jUSt}{HBYBuIj%vPdg(ZHIT1efJER2-S31`|r2l|nrzofE z)Kx)|4m6uxTra$G)}cc6I$y)gxJ*AH3w|l9OY(GGAq*Z7(ykYPOxrRBV`qy}*ix^dCx%1#QJ)*`Qx4!z~}u526_$P@VS3G5%`Rc}_1JPOn@B#A{HpTS4niGyCxihxE;-_UgXl21MS%LaJ9 zg5H1^`wWQsDfn4`84Nmt9~eD;4>aXD+tGH0O!45?rPEIo&pDLPVMnM7J%dlzg)DR} z`^tC@K;}unQXv&6H z7gko|{QAXM?QTa$^*5Fm;@FMPi#zUqY213xJ#p-Lx7Y_>@ATp+`K?=g@<4vYc31wi z&XDLQoWvh$=-2cusOuJfX5yR{Ub^P1Pj9e2Jk)>baY#&%c!&qQ?P zx9;rpz3G=H>^t3_-vGhgY&PfScap`IOfGM;O8t>0vc4%FiqCtEwHq-n*)J^}5uLlS z)9*xMg0~CsirI+cS00J&t?lS_b}TpAcwufX*EPlCb=Te)XU<=UKYrIAM2M%)dBdAy zAm!onPCSC6me0N}I_OY-3x?(;yt$$wKaraWbQk6gwy`?scV3j&h1YL&woWDrpK(9) zppRstHxmORKHM`&BQ6?p{7>BY1>p0I5bBf0kPala(EgObT~<0kKc{1uJSBSl5nQ@v zjF%|9fw9sHU3>?Zw$6ELDE#6O0~VnL)nQm z56Y5mR!OTN%Na&T&K2_}k7-pIBuj#e72BB+4uWtKfOWcbS@igttSqQA+!h(~qZrt2 z%>?5%g+L4hGE`c%)wXLYV@skuf3IF3-T+|wII|M0>mq;GiOjiDS;m}f6ZD!?>dtyN zIW8EFStXAA^Wd7Yg>KW&@&EDm-vP2FSA9QxCUy7ZnVaM8-KgEwuCz#6B@{q_WWYia zMi>Ob#Qp`TJP)Z=BPc^Y*?~C3LYl^?Yr`Dp6VG#!pts(% z?8(6VEuVg}ZIXlhfrpGsml~8f^1EZp{-n%Q0=eiD9)(dQp&4K0lNTB!tH7k8bw1K` zlEZT0L~fOzV~Q?W6edvIVlStRh$2to4u(l)X!Je|Ny^81mo#+2qJ|#21cLv`>tDwk zU<5qNPdIY8DQCZMeRHdpEpUEc4mqoR?smlI`rZ8N0OR;J$hFAD@w5nKNlg4uCyn)9GppX7D~+u28$KBH1is-} z%HrV)-)~WQwa;)b#)Bo5x2rI75%+t9&zm*;DJIhREP%?DcDgQz!cWe_KX#()rwY;> z{AjYmHuCWS@dHa^8hka)X}%qbR%19OG?sG!pfS#zIu^t7V;-5fiiDnNieB1iDnoSr zfR5HJH2rp$eG%~`J@?@NjUVi(F<0WcKT{9LC_Sx9f#Q(9Z}uyKF^0oIej}Nf<;FNN z-$C|DXDk}!{u5*%z@8MJt5JW;H8Oivs@v#3rFvlG+8Yr5ZSvKMcbpyVL2EB{cZ9qW%i6phw!JomQSVrIG#V;nr) zSd2z@!<=$dg9DelqeZ6}Z}?N4vHj$=EL*Yz+)Rq!mA>RB5+wWPWK4|D#>N`=Qpe*X zANypSJ$E%GWNYzh?@8H3)SG+UjTo7Oh58uG*DKcuuEJJ1A+Q-y&k2 zqMF zJUUz5SmhH~Q}mIYc>Py>alGgy&x=dvABy(Iva+cc2-k0sS4bksGDIua@1jc!B4Y)Z z(p-EU{~e}xa9}ibJP%ImKN*$0;IG8-*TX|o!AYocN?Bq9a_CjImijRVG4R&|6@Yn! zH~rF1IgU{a${Rj6@`kUz`z=zs<9xrPC8L!@jLUIePt}kiDr31NLzT@}pSW#}J03;P zo9Ysb3dUf?@J6`A`4w9l0L+}OQQsKO3Q`AHuribdWnHk7Rx92RAEZ%HUQ6N1t48qW zC(=!aN;A+f__2Y{K$xeH=uDJ@p`0frRi*&=7>Y#@u0}aet zo*@f1tc2#tF%2#kHvmw*4y!0`0A~+F5#{ z__g)LxbFooi|+7RyzOm26|a8v*C@a0rO~(^;^@8kLG6cP`unkJ2lJ-3&yz_^Gir0? zyY*S8;vMgJcN{%-Bxcx@W=dhfh(g1(>SJ-3U#?n_zX%5Kiq|sm4&O6DMWUnuxIQe- z_d-dpc!T(EJB0`06yx8Tk96}@Aax6HHN6;=J-%?zE%$Zk5~1>p&UjH< zPH|)Z+T1 znO#3Ux{NbQ5Dz{1@S8JV;TcyR8i24H#-5CS27p!?3MB&Q`P1`^^6JUB^0AkGwgGV% zlgvft!r5}+Q}Hu}{6zWS3H(U8JO@} zi%~qZDNimzvky8hI0VcaSH`D)F%M^4Li(v&@g%L{&5O*A64clVC4q6!jP$zTGl0rq zQtUzDof3NV0W>mYjT%vD8$sAF*d}GOC_(cTL`0UGr9AzA`SO)G|J1qo;UD{_v9-Dt zH{Et;eEFBXGXDAZ{k>Sw^l|#^rPyeo>;cYQ(kWMcV_7M$5*w4_wL)qis2)XFU)>Ohc}$a;Y$` zcy}AS#!9sxSB~_-ZLBD_+o<$Ve(vKz&EG6TqzHX?D-P~I7$11=yJB@^O?DrL5!Qsf z<5RQagx7Lomks3GZn-^P^|BYmo4@miO2#m|_Z)@o~E1Yg_?zx`Ryj?dhGe_VU&G1;QsIQiV? z#AIt;ym#W!Pk$399=SLE5GX@>qWIKJ5U&b@oIr zlacN7*do`qvt*5C(vIE~F^(cJa@3#swk#CAeCT#1H|%RNV-Kn(JVH3EMetTf@oGT@ zImj_A`xxRw)3Y&o@>Co?c>_z=SZi;@CL8aPsIC+eNO8|87qPnBQK;HZlF7eKrmlyAv8Ic+YiN0JdmFu!4A1dlP;B~q%N`7GpS>vYye)5;{h!XEE6FRCB z=?3x;4-$1Sz9_;}p=ln#VPgY%pf8Hg0}m=2b%7t`LmvKx$dxzOz;mSgHS-UfDy}=1 zH~i36;fQDZosTpME_4awYJBreKo#t;c*yT4%9}rAQJS;bH9bd&U*e!f95?i!FB&e- zcx=GpDz2ATE<8he4^850ebV&?P)YCCGxIn-Yhr%kJ?mf6&6mbM(r6{eB_H%dXwl~0 z%&xReGyUM(J|Ms42pze~>wx3yMc#q1d68EaI@L?#hyNs&F8|;c*9S64)`e~h(3s;}4gWyY1{3Kv}^USM3u4}`hMP-vJ{xwI$tr#Vmsh_|eSYI5NC!pp{$ z5gZ3a{_}P0ic{{Cx#Coyaa?El&7j626+kqhsxP7SyKN`8161Uf0BSqe@=U||@ylD| z34o@sb*<{Te3@P^#ZUw(gx$B0ZIq@|xpVQ6_q;nUKlxP5jW%MwH6FJdIT<&dIwezKC{{PxacSXdEN`yInq<`6Hxv7g zAB|h?z9;T}$&2F1&9}ttp<}VIwh>RByBKF>7jJBB$bN3gmQ{VEK*)BR8~#2R29Mj( z;q3rajdoEz!GUw$1kl~tj4j%THdXz2_=J5%p75WZo{pK>nV8bWXCnCyG2i-UkBT=) z`N^lzeBz6}Jpfy~(c*y;LXy9DvW+iWkZ%{W57-bca%zprmXVOpJn?8O%U1pU-~FBV z-|zh&ad7lN9BA#6O}jrPn=^6iEw{y&e8sC{acL!%me$0-+NkKz@aOH>QW{oqXjJmJsV3mQ`saq!S#--JN#`D9p_zecPq zE=N=0?p9mNEDZGQd%WFXZFN1C#ZSAl9pjS|@$f^B#p?QIoV@vlICAtvG$+P53TQsX zrq&V4FC}5q9a-_97v`}W$>U^>7w9m0nEh3LE4E_L?b63o3Ax~45ki-nA;vZh(ph)1 z59wxIKB{#%2SmKfQMa(eycwy-UrPs6SHT1QD)&%04zC1MG3(+C04ZPI9Wc` z(XX)Pk997*=+FJfxR7H{vS~l#5q%rA;kFNND!}Dn0nhmPCWshG-i0sTNHbedrk}e6 z{PLMqc^Dr`{VgwLt};j%OJwRxkbL$NR|$!R>7_p%C<3ZKAcC1E!@kjlyl_oV_;UF( zGr34qJ$SQ(f(6%j2Kiyg0?jL{2>(XaMMift`@Ofxzwp4nTa~+{k_@UNBk)K~id+&i1I;O9`)FHx&m$WjkTZaL|l~O!% zfnV1T9NMkSbws)R^!g^M#uxn7A>}cT)sGN`&!6|O@gXafY5MMHB~QI@@}~#xhVbbt zdCENMf7AsUB@3?GPU#e3=*2lJ6cW%$(yT}FxLq>UtjSyHWU~ zcg1D?Tt66yr@|#0P9JIr`3p=#z_^L)X34ROAI77~KZR37<|ys15cf9tZV z8=KfMBkcY7p<`W&_ot{mZYQ5!yGoxt4Op(OmHt#O>2`j8YS$0?Fju7QhrOs++Z~-e zU?3dEj;$&;Pdw^z78+60per;nTHr6|SBLHhQ4iHjLFT4Z z#;OmU!Vfk8dSx9#C#+-Kqb_Ahp^5V+IgpJ)@}yt7hD;oVErqKrq&cf`$RR!4PJ1~2 z5|a0_Mx&j%JbtKtMJ9x?Y?7|V1LRNtM!k!0%r|8UU+%e)H@^Pfw~UUA9_PF2+*s$- zFsDYE&CzJumEnw0O}vzfS8?Gsw1DvUB}R6XKI9X~91MU4orBK6fdUQ|s2SaO9$b_r z$KXa&4@<>RMPO3G%x}_C8LLDphiYCMp24H{82~i^arF%XeCv#p!1!r+D^sG{k9b7J z7sYaNILlX>%1piSXre|?W2&`m_|ibin+B&aBwRnKrjAKtvMBJWEVr2W!lgoX;XV2H z0#WtZatGq(wiJ7`C6tEf&&SN%P0^gYQJH3hKcQ;~%1fJX~ZZkEy2Kt;!|12W$$fxz)2C*qNZ9*#eG*L&jC zul{N;=thS*SDtdB9KIpKKuZzG9D4HUkB+T6B2Nu|v$K5iU^9OIfBf%w$;)3V9?%zL zM@h-3-zXsdtpn!SKLw~G6?>+u;^R$;rA+YEtHPlF{|282&kCSYOGCMh3hH1SGN{I; z!@hx#{CX0qyn|Y3W?UhL0b!_kjX0r!kSqt2A$H z(pKv0y`~U2D?I4JjNq3)Kbd2i*$0wfx%g*r?&+&g!8Kt#Do0h=QzxMC*q1l?YoO*O zU&@5Z2)=;SiNT8kXg`>dqd53ZNWOYH$o~K{Sw7(s=XBA-@DrX75bc4)O&TSSXn4_R zof~W~#@p%_wvwv6$-cK96)Iiwisvdv=kt&A)}j*>a5=g_H9~EV?2}24G@&SL6sN&2 zP|9zfQ@X_`r5P{T(Be9U3? zJ3u+|5Fpw#3&_u39e>~_<1_3H4l8=qk$RzL;1Vyl_B+pgiJA3u?9-v(B`x8Toy7(7 zDe0t9rg4&j%OjuLnC+S3cQP_&-$mo1@HCUj5#53UNBA|VxLi>xL{^J{ zPDj-hea%m__n78MSf+n$wR!rc6PueVOY`Ul?|(2ZpS={{`t5HFKG*)_nbUr%iEo;& zuB`;$r6oVF7=>?UP;3EgByOZp***P%y2@uc@T#!l&tz$uaS6hoxExiSX!AA%_IN?RRs?FOFWIW*!K7AwKV!-kG-4<^+P`$QfhhjrXx7ZZthP_|yHz6Bx zQZhJlG^P*k^XqiaU%C_v3)fU8->ch>sR)K`MBZsJ0r%Y zL<4(Hb>*;(iuNK}?chb6BJlghqW)9gs4KD+I1Zri5l?+}xxZyNpBkg@60a!iY-rLx z%)9)P#*U!ExiHVR^QJWY13K`4Nu+eo+u#{Z9$oz9LX9#NCBIoH@y^URiM)wbS-x2& z?jQlKd8%oYVftB^Z{$m+QgY*DBDCZ}UwOAGJJ{GpFt8cjSpS1elNav|v2*3}0k!bRhuDXIlSPA%tsEf;@jVTMhkP+>G zY$%U}q64JEtNMA4+bUnYrJR!oZj`v4s=cd0z*S^i08dx8pJ+guc=#!DGLWu15mzsh zvK(j|cvn1s&{PH-O7{kUE+H#AaRA=v;5ad7d(Uq2p7e<$JumC6cqHVy@mu%+*Y($h z#&LmY5?uQ6!E@*CMd~;C-+LupYG9g@Jx`Ug$;FL@9ooR6)KB3PIaJqGu^zz%RPckZ zMo7W;@Lh)dv_;Alk~J=A6R8;esXPlO+trtVPuQdnndovmk*rwgIj`c;Kgy`>uJO(E zbome0^cAHfw2i#}7cJV64S&}Ux`hq;Xyp(vYy5c}H6wPt#;-w8^#d=ImH8Fb%MYuh z4_x~5&0EpQI3dAvdWy@XUy3eij@PAP>}Da!H#9vj_420Q*n@r4VelzC{cZ;_pZsL` z{fjVM6?&F?mH}^wSv8kmtqL8-6&|HKma+A<)(c-g*pWfG`U5I$a@Rc*=YdaT0Cou* zS7FU0Du)gEtW&OA1R9NXg~(U_a|_x+% z&HU8fZby5Ir{%X)7qt_6T&iQe`Ol623E3?^05CJ-8{AX!^EoaK25_T*n*@CGztQ5L zEH?}!(egk~K768iQFJ+Q%YgyxUiJg|gyg8~;-+YfOLaRNOR{Y*#RuQ@&iJ?g`IlwW zx8wNik!XxGV~V#;%^!*{d(BtI$>TT2g>#o0u>8XfwUPP^hd4Y|$zB?5jQeIrb80*e9y=5b*=NWy-{v6xhSh5HLA8rd=1RuA z1%N$P9(3aAeYCH>eF9ZN7WaSr zrflL0-|9O&5j(;GUKfJm!V4GU2RxV|g`rDWG1P^2Rk?|g{nEJb<@JNEQZ|0dD;Ngn zbUNiqaR$;D6I>>CelJ@6iU4~BS|2eMWg@Pe;%7XlVee-VmknT4`x7DmD;_+1A1HMo z<3VsH#V0tY;(VhGoPoH!Tul1;6Rng&{$GCJT)uKUFh9Ak%QsGJ+(TI~a9D*1y+&UYzP*ap<29UDM7=wH%c0 z{Ei?CodXZ_%OyW~d+|=oq&yUsP^x6Wt}@@ECn@1kWW^M~ANtBG{ZawK&Vt-Yqdet7r0&~98fZU$Q(m?QMI&#pYV0WVz|T2Yy_S6k z=Tlg7K(41uBmshu6Vw}H73yIg6V+9#Q6$s@X4xEr@+!SX{MR@__%vTqoaTg#7r(|4 zZI$I*3KElf2J^}X4@Mvkroqp!`@P&A4U4Gg6KRH;54|JkyX)!KkLB(*B@NdJj)(8M z&+2|$_!@+i6=ljr9u(zxXwm02Zo01^FA~8wLKX?0c*X3b46{vHjK^$LpzpeJ?$z9) z;fyjl$mWM_dGM1g?CBKpbSc|8rYSlq*JC`SSPyDVz-PdFeMbMjN2dzAI)ueDb8n~1 z0Vd=jqSOxrxj42v(82naE6vZg7rAGrInnDK>;R(kX`swPbDOEG^Eg04_~PtucL zj#JDZ!|bt0*V&KZV0}Yt>&?Zbn9v+_=KlMm{fP%;AhlJDYy@8CDEmwXM=UQb#n|jz zw5I1`V{^-Uf=Ik2+iZJfIo6jKqPw;fQ^Vc3?>WzksflK+UO64Rn^$6bsu5G;jo4%# zackYYQVaO=1KW+3=fJ7)DJtbXM~jAgwqp~~nwW~A=0vQmZpUQvV0`=|55&js|7H0yKPKy_<6NG`=AXUTx=SLi-*%W4C-q_ z_F-#0CS+IfkP&Y)XtgG6Q`&ZvorV24F4^%Q`-E(y7}=8TyXv>F?%O{WvoqWAZQt^S zIB;w{u3maPwmKUz!9zrZxnjpYf3HNHIOcp`Zm=Klm^&`eqx0!wAGD zy*?)KqAmUMd}uK$9n$=%^I#%nKR}*&-#QO;qKAwbY7laXcD9XdHR-VDPW0Mw1H5&I z@)H0u(2mmyP+}SeA=^lz;fPbCaiWAyjlt&=w;0wmn2cQx0yHqlb%Dzv0^Sh7iQT+; ziz4u(9R`@_!U--4!X`C1FfHDw5EGJ$c7-tXU1?>ZlX7x|bOunisq2YW4CAD$e<3P4 z>8oqFYz8R8l%;}v!bp7P0GR@F8ihcl$O(Dbs~QZQ|@C zo(|<#XBC6>^hmQgDZS9*r*$}H58SH3az!K_OsSY%-cRd+=YWWGZ;S=CN;i`mk z(+5t;2d&~oselqRpokKM!Xs87d{sW&{HOiNk((jiSld{Rul!429UuGb$Kt>K_HV^s z{!4EV-%O;!qGO=g!t=&E0#@NX&E_&0yfj#trsydcH=MXJPCs=vo_O+!xZ_#Riq6)C z(wpLC%YzxOU!%PZmFXU2ZJ!PWzk=GpS($a75uQf)^FSYOuyeWY17ToqJJs-Y<#-aR z@q_s*-u3Qv;D-HtGWbH^Iyv>=dYUSqve*D|mVSDV{XLg{oKXGgO%L(c(+6rE^snSK z?tuFRv>)v-NPQrs(64|#Yb>CD({^q@(H9O?^{IY9S(p%dooElxUqDuh(v4@-)Hi;! z$db=Agp!T`PYvMirEk2!YY!lcY(I-w>4m&N9{)4e5eY7C;-}6jZ2cn!nd1WB)By3w zi8x&1{8QZYlQ;9%IL5tn6L4!<s5~TIMW6ukE1!7hRmyHoQk+!*>PH@3M3JA9*2Y&1;cD0$ALu!FOvvRY3)IJy z6iFRO%W%#~&;(C&6S_jVpccI`Mj7`j8D9``W6EE^C1pj5XsJVTm6-};fr6Yjl2-Bt zKqEJ8u+K$Pl-eYQ-`CP%yhn-6E-c4L zQ*)|}@?jaG|L3EB9%s*Ai@)*hZ;GWwtqVkp4Nn$<%S)@`jpMCsCW@SlUfPX0nM!e( z&U8mzD{4fwsaWwU-m)OjPtV0x;vj<^Em*o8zD18sguN$wOL*97q#ir8FRop_6d(W4 zM`bIp*x`wk>D7rZoLZ6SuX+Of0NiES1#%PU$q zbz^pFMrDk}mWD_#24sWzReJba&DfPaxicbTSq48R!*_)1w*-h!Zu+CMqe|P+qJ$09 z*{O*bUOW?*uU?5yefq(;_Z6><;XCh+6SFh%p+Ek^n4g%B{WqRaJFCF4mMWaK40S`J zEE)A9>n1UX#bQcX?Sb0K1mM)Z`Ls6xa0BZZL2XBSSueqre1&S;cL*C7y4aJ7R5Grc zNqS`N2-EZmaKMCx2{#|HBWeE;Es&CR;VZ7Y1sw>fZMDE;aT-&T(L8c2j@<;4=N@pOUb`ei-w@>H2^Ut#6O2x)TA9m2jc zOo2Qtn%@YO-}O-|)%;}u0RQw!L_t)|ykNo)fJ`Fxt5WroV^fxwH@oE_-h2$?F^<}d zBB57{rVNFB<64VCZrvt38b#0zFkHUE#KRwQQi1Tv0@645^Da<~;F`YVrF33(Yi8j` z{6R0n(2{HZRX)kOV9GW+JSm(Tc%&f*cvU_0mn7u#_(}WYhx^P+nrVv$H>dLRW?Alz zDndGoVXqk|Pjnm$p3%r#=z^13O(WMCC5+*K&o`pmdf^)2(& zi+p>+)&;kmXwi>KInaSerFe8A4Y`A9I*PSRu^d4vG9jDRiO3nKc9h%3x>hoQTmCXG zflWlWJNP9&+n%fV!Erf-5^_{L>^fZXFwj;$1|g&RG%F;oYKSl#hL6O{FlFxz1Bnmc zCCv@oPlslv)gT!NARD~;rR{nMN>`Zi&doaji9%YXNm~~^lRmg?+D>U6E2s|x2rMbu zta=QkB_%V{7?2YVHl+&&N8leIX^M>*p%Ahi@{(W!~J3q#)%f z4Zz%05LK7dH%m-({Mi@C$l()Y(6T+HzT-BdFERevuXZeQqF%-~3*6KuOb{LmpY%IQ zB@b@lJLL#})Q@qWd6ROt^b2#0hg_1YGV)m-Udc3SU;Sa}yfA;kC^KtT7x5m9+{NlLlzWd_%ZMQ}=TXFWvwRrN}g*bb8A+E`u<2%B9n#s2hR5_}( z#W!u`^7gB4o6imLZU1dQ1VlgU$R^}as^8k7n&yV^sBhv=PEN(#>}<@=@jCqZm>i#s zN!csJV?(onn{UO!R#KVRsvFor8;e|HCT*ba;%?3XEeaN$+YfN!h&pjFI z*Dl6`AN@%D>@WUUj7KwOV=9gvJREZ~voSw&IKJrK7sP!pepy^yxE2>LEk%pNkl3us zzq+|5dZOEqE!>g4%U3wrCb70=%A5Yh@KbZ&K zWM_#4b|O#zchqjY(G)d7rYH#eWn0^AAE-q>ci;8w_~<7;5ubVZlksI={+gISb~rXR z*S)d6Df@3o!r<`3@L1|B2R3->a0h)Bj~sldGcCTtC7HUM;u86-9VR9 zy11$*zm6jWOR%x-7$>juI=}ex zfS@wnR`61Q!!qR0yz)>7(}Qox05Ooi@Qp0YJG6vP-CU0Hb2AaW;Pu_gkZpnN3O(Ws zKH-uNc@t#3fo{?^Ynv4rjiFMYLg^XNi6TlHA)Ij-3uY5U4^c^P} zh$Z_;_6ho*_voOHs1(nf>4Sj)adKRy%gXgTGI#y>F=SbZBQOJaAfU^E#Hqp5WBmFd zu2&!OKwmEV1w;e9#>aaS6`a{V^QZiEG)aCd6dXCW?OoqK^yQe0Wmf&}MI*B=%{9_O z-b660e&_aMaP+|vbc?Xomdp_*DUBtLO(5hB8Tym@5Pfn>^tMEow*YMG;*&mDIC(L< zM08Y{j^>GuYy%!B<+bSC1LDnv%x`{skZ`zZihX073gWi6sFTX)-k5M0-+W(Dyjbic zQ!VPo7%IBhZhjk$TpnbGUFI(GjVZtlS|1D$4J5!J{@pF)BKwTF7^z5qupubhV$it8 zCpx#bC|4KqpuFx@w8Y!?`f4m{K8|iDT1O7V?*9F8@_F~hv3u{1waID8r4u0=qd7Zm zrdf0H$!qH&4-ko;t(C>tT3d)$zUbaKa%e7AuRN({kqx3boJ09MXv^4)9fi*Guncc4 zRitFa+@-mN{$PHaisvBYSgd*PU@S&kbMcu^J{kY?r++>cmey34wrm-hd$Q}$6?CtE zb6LiefHw*7R#FZjcKEDRXH9-S37|Of-IlGfr8;+H4-s};dsBJXvr-s)rY%=E>^rvG zJG$DwIBc*Xd$q0fP1&x5@nait@fM?w+J!etfxjWUXl-pJ*4Ni#V{Kjj)mT|siIvr5 zxy!M*hn|p@V{u*ov6qrYiQ2;6k{ddxlw@t1()0el znt*IiJ?=4<_VEqopV;B*hCFg$=`3@VLp@%w}Z9Z;Q#4P*qyDOMYfBl#8p1`p>G zO;`}R6c<*Ih>R^2Y-0;U5SPt!moKALboe%}4;~a3#pVD)b97vMCQtd~zx$JLl&((d zr}?~4P_wAedQnfrLp}tZO+o|7Frl&Ba2Q9rG(h-7H(1K2u5WywFhbA+4QeAnNI3~o zNNE`G8Nd|f4QCA!TPkN%{HtHYNMm2j@4G$5CT+}2z#`EUH^4UwM5U0KLKH?kmy++*E7{cd}mo;LDT|P zarC?Nb7fxq0`pw`7*gQTH*mp&-d?xVTK~GlYnDkGV=MIKH4gFhBC_JrMj3XraBDxw zg>Y>{XlSf~mXj^DjIQ|s!UklkxSk$EH&Xzz)V%m>O@K_34kUu(d?MRn*=aR~aZP^rL$wEU75oq-G}h46Aah7gz5$u(Q1wFF z|CKK;M>*_8CX=uJGq&nTVt7y$PToKxuH-k51&6xY3n%2)WgX&59P)5M+x-TeYpqotqcwVJ&EQ z^{EhT8|BmM7`c2qtSiI3qjDNUn(t-r{QmF#QS5eh;;;RkH~Y2!R~DAMAeoqK${1hO zJh!E}QgoUaeAyo`5b4t_STKhDh9l-onvAi-@=KTYOZh8Y>T4I7I4qC-+$g}t;l?<9 zA?-R2GmJ?G$6L*qnV5@@e(-~_xxNxxvX4e&n=ucMiC=8On{K)x?z;W<_`x6l*(_#b z&708OrNvl2b1sIpSQsB~Mq{`k!(U}OAE$4#&yVo5x@<-^OLvD=FYLLdn8WT6FT)rN zGLm@OfqHRJV0eBihPKw?)Tx`}gPH7**S1*8+aJNOJN#)l=J)uLWa;!mSrD=m#?Ft`7k^r{zs_X^(-6xdROAC1H#w zT;r-Iu~{ad@3QFflUG_Kvuxhp(Z!-rwiSBN?Y5QHj**TQsN=GuTGOV-MlTmQK0GUG zyh&lV(Ue>zgUJ~!JSSu9#v6QqVtsQl)>ju~Vs2{THx%Pz!*YkCySftXwavJ4;e2c@ zT#duG+!jM~%B!-xpwU8%MKAVXeg``HTk0XTkn7Gy-LsEZ(;~a6WI-RcO_p;o2O5xv zZA+zD8mb3$vcDr@jxXX&PAlD#(C3JNm{U(4`>;Owo*BcmL7S70l$#t@hNR0 z2Kb=2Ja1uy-)c|FbH^YSvK%T@SU-IRX z{3Kc~3ade!*ZK3Js@@O)!*!t)U2H1J4~`oVM{SC{Ou!j+(N+TlorG5|?GCL>!y#QV zlpJU;cO@aZT=ACpQR~S|e#FLLSgPWWxuKKTU zG>SjVKs@yFgXcf;C>s97C;Y&7wJEr=?v*F_%=nr{S(agu7dTl?=1ZQ+MT+y12S(&C z-!|o^wp-$mH+8VmOB#6ruD`cd2jZ<;gZRaDA*I}M5m)CN8UZUKWkIg8mhzfPP-FyM zCQRFXit_;_q96mJ>X7BYJ6@zJP_$7Pyv>Gq(%9rrAQEI!pbXU?5{|`FWZ|(=|CHnU zAX~;|)~VoAKj{G*qdwH2Jj{Lkk&$A_&kgbCb&ge5}Ut#(|h`wBpd* zY|M{O#K!u1EUzt_)|R^Mfs@DM&KEp4?s)$5;+DJb3K^rZxV8~zFJFqwD@(DovFY#M zZ;E&NEt~LTqb8h;&%88o)8{=jvms8M7nPr<(z=>^+g;UlR06=h;gec?mPzfxfdK1;qPa=MA=l9n z*-BL`z_k{k4G(ZQ}s``fxJw<$YreQhrc?O8|EK>N^T!Xx%U=F6*R>%zZFFQiB0H@mteWU0e+qB|+<3M;w0_SpjmW{rNKu3jP? zl8LHH44E`e2-i5mcR7mIA7KSSahz{!6~E1Y*GYKDRoILy%!i~W765PfOI?JYMC7u@ zk>2DXS>hxg;dvD0Up#?ERcQg^D8VEMf$yp-2Vy{YVwOKMU6}{04C2wmxRASkO^cs$ zk&P&U+d>yN+btE-$_v|(gA0kIRMo+4?RFy*@fDB_sCEA$%VyrjuU-vn;!QfH%WpeT zJRCue`Z!+Yfs-y-B_HvYwE2Y(WzblGK&(=vL5p@VkCY1zEtdOc_(<@Y%6|%bJfY3# z4~)S|Rm|myS9pe>S_YYfZ5i<=92MfR!<<4>cs_t?1~c7!E8eZE z0y1|db5W&SB8mp(Xu@(Eh>ZELD;(q@7ZmjfI1w$?ld(&YE=o9v%pfn)_rV`!?2jKA z<}k0h5wFn7dp7v7S5t>vkHWtKF5e8x?*j1QEpge#Fpz1<3~2)7TEht1{5iqD39K`7 z?4s;E!~}rjf)rQqsjKw$(bhr4Zc+1>$x zps=_%hJ8p|`fwWfN_U?CfqPOS%aibKRMpeNE_RFPv0n1$awLn6+N~@8#jQq6Sp{0G zux9m}vuRVUn}sHFtU=*Ht?soDhxC;Rj<6+3Bd?OEvhVj%zG%Xe{&it*Q3;Lf;(wHf zlywouJws@7XpXUeM{~FDZH|t4Uk(n~CUHGg7JFUHN4#ALU_L~zu+`X_;qXF3^J=pt zMz-J?8*Doko_IXA&OWZP+A)6YV2s~-d%WcJuZw-RoQm=N2jVg}$lDt+EIW)j08@X9 zvvkyJQ}J7y(O$R^ot2Am_w6U*relX=YwdJQj;?F&;>7-%VGHMm&qw?&<&<9it z@y(5GX4bLRgzC|XA?nP&-q2(mI(}=MdE#39>@WRlJoe~{Z^%!`7UAi6ui4a|9Nxr^ zVhzJuio` z0Kkp?PG?is;NgKx;}F571aN&AfkOh~Z7bjOz^4kRgD>$7Uwl%N zHsjz4YjR}eLk|uA!xEeuUb*cefq%Sf)uovupsVffxNS{$nk9EpZ zX!+Ama!I6rLtES{J^L_g8MVo|k+1ujd)_iUGIX4R$T%Wn+OO}BKIhH0+rTCvX1hxBS)kSHJNaapC-<@!}V~+{*7)ZE5gfuwdcBpu($4 zTqTvk#N$_}Fvx0f=cF<>`(OOh7sT)Uw|B(BBM0KZ{(Xw;#<&)XW)`TGsG>2H`VY#% zh|sQnpvq$q3SRGmu$lb)7tZx@8L6T(Z?B0;IOA}UGYN{3OI#WinJ3+XP-$N8GHX5~ zR6GFx^%pSc7QelNvMU$mI6n0~FVKtd_v8`u!r%=4@+Y^pAl5t(DPG~0xu+>jjS;o+)mJ`6hw{k14VR1wIcCU1-t;gGB@nCte}iG57gsY>y710qBc4j8bW^V? z;gn(Jzf^^bc>YDE8iU|5I@(K`Jt@s>$E;r_zz^kAz6ODG(o&W{<*~2leaOw&9LJL! zW%TnVWd1&gPjHC_UG!az!j5wH^5c4uU`Ty2B6TU@dckJ$nECMmzJXW+0_ax&-=sy^);D5#U`bEi#IqoMLO8Y^aHgpES1w*!Euq*|hH1H&+EC3Rh zg3`^LCI23uehZoG8-!EO`t65alB3AUa|vll1+Z>f>{a#&RR`h2{n5$7B_5yX)N=ZZ zCF%nYC9aHz)*2Cz8Ke;UDLf&uvA_Z<4Oi~L0c z{cr!R7-`MMiCgZ7FZ!bA#?SoN55(E^i1U{gV`+u&Lyh}e{+i}&M2}BRyM3_@Y{xVk zu_=RgtBWx{7FXBX@!x*$w_`(bfBQSXH9BiUDyxGj6~me%`L3s|2H9B5yRL$0r-6-J zEjutYr$V2)Q`P(-O@^gy8U=-4<;A#z9BK7z2^Ph|-Ql&P+>n<|BA$H`n(}0L+7j{~^(pVSe#K$QX#u z!nJ6K7IvTKHQ8??V@+LSY9`tOd#x4Q3h$0{>R3XQZQw(ODwDQjEO%wVkucDCZYZ)K zMPY`H-xx#dop|cQA9UQ*_;gI2IuZL{^om$J^VxXc+y5}`d&x`HO~eDIuC)L{X1+OW zK}hfh^i+;Fq=1xLA5v;nzj|TopF$o7&_m*pd!{C&4!S041do*ktd??B71f(VBfQay zSGB1aRqTEZDl1~>iZA5LVw;x%a5%Ta;)_^xQ8b1R z?2nmSZ;5@!kBLw>Hnw(SQSF9JJ3l?I1<;ik-5HHd*=*g7jhH`iGKOZfV1;0FTusDc z!$Ql8c}&5qy8J{NQuo01{D&Sp7Lfd1x72S7Lh`X6ByQh%@=fuFL>2RE817urcbUDCxnZ7qvPs{VkilYcT~ zrJRZ*zi&vpJ>)T+tyPWQ0iD+&>fw^SDf;dc)Jw=UI|xM7r@Q+v@J_J zI!G>)%?aOx(%N4(_YzM zj3NLzGgMxDcnfgjrkAw3=`uJEbp|RJ2o+mp;P04d27ufJB9}RgkXJp@AR%Y^u7?5M)zOzwkslST0-| zOWDXt7dLDjA)LxG**-VYuxj1s`O>xKbpC7kA=bjjyn2P02CoV2t zi$ykE#czj$U#iHi>qARF$=)xb2u|$iBaDK8Ch7W9`C~_*cL1*7)4%hvR_gADKNGhYlW##>iA0 zJ9;v{;xE5evKWuY9zX3jdufVNJ+`AQn|Q3nVb-?VOm*de0iXHe=7415m$GOrF-p6r z-*K}+mG*c4#i-W|`wt(A$(bqHTpQlZ<|$o#v4_et+CkN;z1hqE7@&3 ztX0qxbXmOcZQj+@bw=M9ZB53bk3SmcFJFmQzUo!6e{NPQiHNpnZgjdaCi-01MX1TB z=wSQjrn2Ov%et)#8$piB$#ETh_4vRhzWY;ZBeG{-)}LDtZ4P?m6NB^*sw)mmSF~_Z z1j&bS!{ybBNcS^@2R|ye=zQ{oPjtDEGkwP3gfcfmLVQbKvrqfSxcEic?E|0QvsMPn zQh&C|MZ<3{5@ky`}rd+!v;?b$%lEr4m>6m za2Y}c+hekl;KzqX4}&5u+_n)4m>!57(M=&t}cA4PtsG!JV8@@sb3hc3}0(>dojP zOK|%*BwDuhQViE%93T$TbvWzfc2x*D8sJA>>QRUbj|@eg`bYI`zd3+|;8h1F7dbdp ziRd5lBMu$Wl>mPF>o`;OIzeghrmM%B0NExMWeP}hgi_SM$=Abn5^W`qljN$>VC4Q^ zhH>nb<7o3yd7;1Q*L@v+K*)G^eBvk}y#m@rI1Yo8`PA_X$>iVmi3s;DP@S4&2~85cA+l$_Hj)SUG-yJ^^2vmPsYx|m1t^k*^<4}IC3(kkKGun z;%TYfjXNKA!|=$hH`X_4aODxV93@i`{tFd;UEB z`+xgj962%`^RmI%Q{<*Pa^2iqqiKWBd~%cBdLRQx)sw6xZ{esp@woS zyT>h&_T_z+WOlGm+h!7D*n_lJ_AMo#Fi0RH?83jNE20n*zlA3D6#W7~_X&fK1l0|c zFUL=8ZsMy=XF9|uQSrl0;Lf>`uY3Ko-=fMLcSDI83~M?a!o$#RsIqhnIugcXL!HqR zpE?Au(-`9PkxJ&t?z+*-vc*P<1Oy{AEuC*j=?D{}lf@Q_GseweS_mABd(lS${hbGu z?KcRhv%*7z&k&&CJ~*I`QwIdmqoK3HDM0pdU2k@CsSWREP|KG;ke& z5GL}lsAw(1T&llsnDo-s@IC>j{3fP!l9ZcbcvkLBfE8Tkj| z$e}ybiPa24Q@UEpD|`(+wKKTP0;z6_LB|}!0=dq@L|p4YzQi|uyBM&4D-}m$bh8G9{t-ZOjbBf4;D3;7@sEI)2J2;T|s~FW>>OmOlWd z+Xr{pUfC52?Oxe~Q1M9P8qAmUazH4MJDc)UK5AOg^WFu>wRa#~^Y!Xj++Mf4zLdMj`LdvqMetEGV9GCBUlMXQU1L#GcPq6c%9u>rYLFwzU2={PRPHA)Z8Ou?0 zUBLo1uH+x)2`tN$cA!e7ZgL$K*|K&~SU41;AB6KsInCex0U@|)L6&{ zMTk!7(16g)BYleu1sBc_E9XBWddTa)7ZOho!giLoCALlirBXMnIF}fR__{IO{ z7vu4VJ{2c#xh-zHf$-AvF%6Sx^HdrQ1`Hak8Yi;cxR z&!N4^$w|MO?Y3v#6aVK=|1?fN`Jmc;R11lXICawvacJLxc-2c^5nu7fZ%|WiM)%Ui z2rW=HuP(&)#%9b+O#0v+uLf&P&&IIim!JPpTg$E+85@tGNm=48PBL>dw;Ll3Y%=vOLTIs@HnT--AjGIdMBA+3nn16qAv#AL4V5MKj@YF0ugF!+CYtl zTIPjcmu*RkXOld2&~}gh-1W&WnG9i1*`CG@XW_gXoRsde;MRD9zOgXHCg;EqPqL%K zJk^v7BVE#C7PB0h*b%KZ2QOt)vl<$aUG294NIzqGKH~6PjGZ{4{6n#^uo#a&_E;=j zyA~r`;%{>!8m(qrxq8`~(6h&7qc&xq^0@#H>RAug3ptkLILdJ2`da^}U)EEoN(V<+ z3fRUKj&+)5W>q7MSn`6BrotmD0xF}R#m5q4s}$BIV-be9W#zT zneAdyHH|R1L={gh-%o$b&2T|-S>RNj2vFDbS9u_N>g0A+USSJOGFke?J>MG=OVK3#B1=9IQ*WLYU^lMxm)jU>o=gA1f?^g_D8 z#V@>4@4WDHm2yA<2I}G>90pP-y<*J|0ZXtZCnadA8bp}{I4-BgP!HNJ@v}X=QQ#lJc$EmQ7sO zLEvO~sQGIaa#WyAm7IT7JfS!1RS$8TVCG4N;kjHT<{qeTt#>)QRI>R3~!b#@*#!%`0RyNmI1p6&2c0GQi!4Q z*kUhAb)(MU&`)x_)2zX`t+@}LbM425tI8K2eigaG7LO}~Enx;l2i}!S^CY_NH+k>` zkSH17Jh@QYka7Y1jGKA)aoZic6QT#-)X;v8#D~?!?h};g`KU?*8Hz$H5y<#`w%!tV^Hy zB*0o{GnP8*v9Tp95!tJm@)fWg=@AdAVtXT>&dMjlcug)ckrcf<3<^(Pv9Hw8Or^O* z>8**0nB6}c`(!)rm;E+9Hye}F(=j_g9}}_{IRN0d7s)uYt;-7^A*Z_X6gxKn*!$zK z0d&VFRBPFc%u97clqbPhgQFjl>z%a-LZ0*5o#WuuSDe%g?HVt7ktrFf-X z#A_G*fIr#J=m9tK{T3^=r$3v}Y{dRU`{TgjgRvz&SP?JS@_w6u+L1L0H{{v>MCm5v zZnw9*|GBZW;#cjqWFIy~cTG0omh=EyZhUeo7FHMI@Szj&{y+afT$BGxU-@#ct`|EU zNO^t2n_78;0DZ~d+{LzLe2~n_(!o0HIS_rbp643Gaq@VNgT5wTL+yauyD=;JzHx)R zOIx@i@|eEDE(T@R;FJ&j5K(M!;*`Hc6(B?@ih!U8H9-E-KQjk7xYUPnRh)`daB?FY z4uxEJB;K#SR-8&zP-)8Gu_tp2RrOVS@Fs)$MgXrn;lbk_h|pu#+X2Z5KA}od;gvj$ zbv^#UlVAK%Z0+BCNu6+xQ=9T-M9pv&sDDaOt1bJmC6WjIrb56 zLLO_u#cNuEl(hozk0XC=JLHhI0`#a8@q{43FWIPb#lilNkG$}!bhMuNnBt&4LLpW~ z=Wv4MBRaGNZ|WjHFz7P&)8dH7wztW+Ah%0JD20%@`aa|oo%^vs>H#ibmO(Hda!Nn+fn1cuyr4^& z-}_#K!6jU>_P}BE3_U5e7d`Vp68)J%!Ly%8XR+%;7$$)M!bOPLf_V5*omD9Jqp_W~ z)lTq$En|De_g)}G$-))kd=nt`vKAFHm1^yQU5*LiZfOpmK5--_Paccz_;f69Zbo-R_5-*JE1H-m{NNDw)7JV{?8@e94R@oh z{6n(S4ji3|SHAQGvMJVLsC_L~*RIN+;~sB?92I}?zau+(gnNgQDf>g?vio@E1)GI8 zHm$Dj#Ms1KoPG3a{Ih@lbMZfa?|#c|o=4v$DBkprJ-W8O>UQ7S+KqOHdP)qcKdp;i z_z*39LNvnchmMPaB zU6cWRk54qXTR8O}6c<-qwkLQ_hT%iaW-`10q9FumZ~%ZUumk%e4jr41ANt|%h*LM; z7MCtQ78fp_mfr9=B;|vD@#eP%5LVr&7h&#yqTc|v2K?pvuU_a1xE2cm~_iR{(R_*@;LyjenWtogJulng9qA31-b7SM{z=`eHVX6{aJjR8~NJT z-tiXU9j8DxToGhWI_ii*bYqZN#yiCP9fI--E^vH8Mhs$<@}>aklfj=4kkEMjel`kS zA476i@$5qmsnam|t6~cKsV8;nhB{PJmE&eV8|bYu8Lcw zc~H#nZ9vNc(7@q^raDJ`O~{xOrGKJTgA5HQJZ^gPTB?z;>DcT}#G%7?$JBv4gf}5# z6XuPDt9}$$=of(3{6{fpgMOO5XnT#a3xj|YeVIo2r48$X8(EEMU@92}D4{@=Ohw{S zEV?3|7;JfqLsugJuLfpQ(XS-a_1eGk)$x;W`$Y}1lX2?Qtyu--YmIY)o7bvHRxDm< zE47{J(g5N=2YqOd`gL$O-h3$j&A!_<=ES^E0 za{%n8*R-Ms3MNHbEM0ZaUYHFVGLzqiA;@w{Q+uE_*rCnjsZkJ627>k)$SfjC%%}S0 zLPseKuF$oeLyRLaLNn3BCVIh`L7g(_{^Ee~(YMq+^Wq!i_!UujNkkWtX|5Bv8D+O# zt}ykgiG@~AUka2BMVDb*wUnIHVSx0Er~Woh9ZHuphqJPdf&ZMa5>o@GXo_wkX1WJU zMJO9NElf9z1BH^n%*b_~QW|*RI1YUP@j6idPCoJ~k;Tor)oA)rA4ed*2-RN-@R&i8 z>Vbw4N_o+S$s_5c(InOt=gaoa`r(oWep1I+Hom~*Dy?4sOI&y+*{mcP_d@%k}M%H?vr_!3IWw&<08 zTe9Uk9X}7oH=;6b0 z*KIe)zxbv9M|o9;b5G_Ha&0xb%WE-0TZkqa#f|KSWWrcTn?P^0*^F&$0m;Tsax0Cx z=G?A2s~y=C_ct0CSlQqaTXPYP&{k#<4r^jN$fn zy!d&~jSu|M@5J!RMjX0Rc1WwGmCdGvtVNROdHzQ?x@eQi@}j$ri9O-<0GlIwdfFA> zFrFLGWo!}w@~5}D!N_48cNo-M(W$?*N%j|h#EXYZ@gvk>9|o9^?8a5!R*Y~6qB$jn z<&y~wp>yD5T5{A%MtS(ID;x33V0?Jb;}aLm2U}XG?rd)=UTr-&8zaYV zjG4RcjswSz$KqN$HrkufUS5we*}5wWOYSqWz+>w0v51-R*xll@40%wAg&RDxq2qa& zf7**Wvu4nRB`rfbMpR*BjocYwU3=73X>JSh=@K!=J1pY%1>V7!3VD5>Ql==!N~&w zO2<#x@TM?-$lJYEQN)`s=tD{!XVIq$@D0e}8TPkMd=?%8#r3 z&Wm`Qu6!1i%3>TQO*E8C4j1`A@5|hWP~6!IzHt5YbxHSYO3UT>(&=EyRX(>gBvdA^ z9dxbb7l{-XVa6b6$YpQQ@v!K0a+I+RyH9wwpEAaf^RcC=5cErGC8P^lV3C52?7tGv zI+?e+BO3EB8m^1*vM$7ukzaU)A5%(sI)_T8|KW4xoa%PauL97bOz<3|Y$c!c8lC0M z6to#SoCPNx$^g&vquZanBo~=ql)+Y(p|+dC12) zN;X0zE%6e+>Ot?|is&*wd9z6Pi3bjTUC=ch#s)l=g5oND;e%s5;_^a&j4QeO03hRl zvqE1U4n)By{myg>5JfiSVNCR&Xywf=r6*yhgY5N#6apGK_2Eg~GcWex7Y>3qO=#$% zEMpK>X81`#CJ+Wg3?Lukp+aGGyh4u&*Lr)7GM=LG<5EXj#QitZ=`?PmsvE8}eqsUI zpwcERJ5zC*O~bFY2bC<8uTtFas5Z8{reE)$$@YOC-*j>vAg7+mT&?+(gPh)HB7WzSJz@;Z9SIT8?myn9v%2n`AEw)o8^jL zuZXJ+PN z1l$&OXI|_pHO8B&zUtmUCN!xyv7M;?COPC`3iWj!KtfI$TRDi(=B7L?rT*U#&m1Kk z$EKCbTa2ze@nm#l%Y67x|1^H-SKb=aYPplsM`L<=e@su!#oR2f&wpin@t1r>oV{=% zo;-6lZ|19hJaq_9>~$?HFUsy3lRYNp#4mC|Wnh+vTe=;|MQ!XG6RIDEJ@) z>)CPU;`w;{AHO}m;Tzr*v#o|d<1jToDI7JZ{?T&|L~x_ghkR&P%0ow$s3hwrYbtYQ zf4!u?iqD6ERCknJAXZb?xr(2Pc&`Jz9(|(qqqgKwgl|eyx;hEb^kekR_11+4zj2C> zna~ruDq~xdxRRGlh!Yy6SowCm7fq1fomr9}@Y#^&trQ4`gSVzdya<>ZWn`9QeztW; z1?Ae|@p0kQn*j9L^y)`MY=3hp46l{}$;w=7gM2d*$odb{*G(eRpx~S)4sx-3Sg%;F z(go^6suEI;U^W6a%ds5xAi1c98xM>X$>! zz`$Qqk{+u7`Xn(U>Z7bJ7+3r>Kl&{Cqsq=>J+#@F^6-@t$B9S6=LU%|H@Sr06hD;JfH%<^lOWgu$JGdA9PVq-&6}nL zZsKzziMMEgZ$8v!8Y#>xpOhfpdB9b|7f6kd^%TG<~+c{!{jnWnJI6PssIJg|-uZ>It%jUfLVh$LzFD<8nMWf_me~ zbD!dD12|vmisyUEmn*^o;mONAnciLmIs_LgKX3YxPk8dzehgH-t9h(nk`rtrBhr}D zTo=*jPZS_$pYst*vG(aw8I_F;>rxmg5a}!i_)%EVIVXe9u@E{4Wrzd2VJZ(-<0*SQ^^IY!g8WiH`=RUMOF!68e)t!T(NAvBc01ye@^;j}cz~Fv{YPW} z%uF0Tcqnc^)i3a;3*A`;!%oF~U;>g5!OdgS~bHfd>*&2@p z*;yU&Jw10Yx}*8P5N~wsw7JQv@j+w2>ctB&G2Hd%7w6_C;sq~wZX7;3sWECZ78ft7 zOSIE2ROKZeA4uF~;nmd)CqqP3qS+Xi%*W!w#Rb_KyYblvFUC*&mPbWdxMAH4UM1Nq^C+q6s9Rrh0NRF zOXD(qT;{k2)47@9n4cbv`T0g1KG5{{`;Q%&jl+k=%MsIJxAh|Uwmi$<*#~9eCwOPCjQR1eZ6#JK9;XO73VHJ!KR{Q3J<*D!MmEe z#HRF?Yg_uqI-ECQpuAqIF%>H4{bl*FnQ03jgyGV_u+4t2@;I!kj=dhUp+w)ReMRLs zta!rcMvcdHKC=ZK=_d5_$5`z>V*QaDe@8Z}?@JZGKP9%ab8hHA{rVd|qygzZ5^NYi z4&p+vp{Ie14rN0@@xHk(JQ|B{FYI*N$&fp#Dj}uKO?Eb`5Dt8Tt9ZG5^9?%Pt%?uh zDvmb`P_DqIMg)HChWzxs-b3j_TOLgO-Cj+arl$_$8_I>A2XQWCgoe#Hbp){~aWH7% z(tyfig=evX_U2%0LBf=9eDYJ8z`$lBk^vAt*o?F}*p@-AfkK0U%U8Y$^^9?KKFd$C z^Wcdxsg8Ved~H*?xQX4`r-je0apSEok4EDL(UpeB1f(_WxQMJnHj9B*Jkj7;M-oz2 zPXlHlMh4?t2Sis$kqg*J02jKzU>QI~B zKlxtAGr7P7cqq2pPw`1#L_~{@Mmw?y15~`hC<0F|l-mOeC$h#dI2Ght>t5yMb^G8O zkW)>^tx!oAge#e49s#HC3ebmuz99xw`F|ep6Iwu_R||F-g3_mZO!Uet@il)BFZI9n zMIghKZuaATa@D^qFAYRNDL)NGA#0&Ro}nEU(@LXXDSf9$FA1^;@IJ8Ol!KTws1!y# zj6)2l0C|S^{%vN>bh)`elV1h!bcr`LDT6q^*{E^DfWEmu0fmV8!f^I0!v!q9UA}1e zX+)1#q^tiTv+@KObt24!#)b%Gc$`#K6@rra+tv8N$YI*ZrflwCFJJR(mil}V#~TkE z^g#`HQJ;bh3`|41#ufDBU&`@~ZKr7hpkB^b%Ellq?MP?o^9dsLR6z1e$Z{BaEBK~N z0dE#j2V~%`si|GzT{alYxa6C+BYe*&^Hgv-LQ4aYa3XlK(L4&2*NX=)Kov$lgbQEr zYrY+qasqOG5GZV_)(^+@d4w$GlOIFIG!p#H{nAg-AM1njGxp07u2V}Mcus6|Lx_@h z4U@lrb0_Osi#$e41>>vjE4@n)yzA80RRMk;cLz965v7CCqmmF;rJXpF;y>zf<4JD7(xA$gAA@Ex{^>``&K9nGfZbrC3>AmmxkHqq0P4ZC;Uu zoH^VE{fTk56}DqxX(={W+R;&;{)wOXg?Pm)?~CvG{=Y5*b2lzuU5a(hnSMincMf-C z zX+g5)MZrX~5i?C}8qL)koAH`gzA~0CeKub8ir2(De)~6K^uU3L%NL@1?Xv8dM(kWV zAD!hTRY~;KMtoLb6zg3EEZV~6Et{$mzSx^|`!Ig)Dt%W1(p_~^|TqYz678RvS=Vdl3N_?_G=J5u<`ZMFTlx%m410tF$wzX*E z%?+EY8p?)ZjHlRxW$|3xIan;?r(s2L>_+I zljc3_I1bS^Slz)F2CnRH>cvs@T^1?2yy1;Lb`>w4{G>UG!xM58fjG!YJNYJr+7A*O zexQ%QgN%sW03(4m;fA&{a%0eOW!x?EcgD#r^RI6lk}SW|%X~Yut%WvCtEWu@+8S64dD;+VBSn0{K6~Az41Tk_3|h_dCFjxLqPK~os4-E+yVQ`;oex5 z>AiUyS3G*YC4%{z^g92k&G$mJqf#Ewb6-|FvH|IMlNO$e1lJ`KUmC6Qe!YEVKHa|1 z&T<9;`PI07TH3MLpYU37YW<*<#Zitfw}Ikm-(*m%QD(7sOMORp16Hq&39w(IdgY4- zQQnk;Hb9$KIrZdM^x`@h&~66x7s|Ih7?*1MDUva|gp{6TxV4N&U!ZK&9nyR!oA&4y zy1nwRcdNXM{MCMb+MJ#(68`3UsZXhu+u5>)zG6v-TK4~tY@Rl^&}C&r!b6DUF|?f``0I zU8*K*i>}9W_KifFjWf>=$bvQ+RhqIw8-N$)5#J;byrHk6|IS8y_`UCm4?Xbyn2TB2YGW}yH?1PK1teFxn|yZ(tT-l&9WPtXo)Xg2dlYRysodWYYrD0JF=?Bj^6~p zo312#@l1WT`6MLmQ`#R}lJEL+0APN8K4xYQ#<=X0W@}PQN$Hx}e{-uNy->f99I)m6 z=FbKK&6{u7rhLzqeu_NNv-~tAZJPEy57umTJN^v8)c8b9%I3sI8lzpMlYH0z(&;B+ zdu=^F{Jsyw5B`hqc5Cd5sW`arXtbJBv48)8xZ{pz#T&l`kXDDFCnGWFrGz?kgbQFu{4=sKhgQpH&En^qF1E87odN+-F5k?Ph8cJzU?PrCV#Tf*26 z*}NqlD0~3q%QF(#8PuFN0+UB;K-thdxk_0)$!vUObKPiopwcY!fcf%-Q})4zcxAP0 z0<|0cjrkS5u$*apjcL{s-t+^%JKXRr{4u{&`FWA2eyc3p?Ca@~`A&KF=4>ZHA2w6o zfLTMa@fTcZI#0m?2HP5YHuYC?BQ((#RUHgi@LcHp&LY0Vr*r~7GgABsk18QC@J$c# z!=Dn-S0Y?*A+hGn12^!R`oz#>eiUbRXeUJy$9@UzR?`9F!KYZjUvEI=!A(t>&@*1n z*Hp%Nd-J*aq3c=tGdKmRAGfcESq?l|CLGw8pKrW_moikI0b7c^RURDfKy6fc$xHlF zZ%Fs}sQ}|3B1=ZTz3II%QGLf{`U&7ZkK(SE2EU09&5D!r7Cc)Xnvb&qg(q^hGqSl1 zJLnTM2F7LTRr7oQ4LQ{eJy829Td!E#_1J6KKIo^Rj6Y9ns_Mt&~t)$s1dOc(-qK)Q;GrMb7wD7B{m=XMFNLJ8dBp>PtD;C+ype4L7rmWkcBQ z@}|Qvk2%;L?8zOK{d4^2iDS?kmG^uKw*C;0Xqhr`5V}4qk8_vti z5ev&JaqZf*xVp9&S67x|Lw#Yby%`@Mos9|J!Z<3M z>-@RcIQ@7umM%o+fse%?k$`5AcYRedT9991?r$Q_&2=}vY&!jD5a_gJ)2W?! zQXbP7z2T}_VVlu|nc|f|8noH}LWMA!p%0H707EbRzybB6?qgfgoEQntShDB4|G-q7 zJbpNi9y<_6jvk5|Pu>u72WMj6zWp&fI~y(8%p6ihw>W$?)@aAd@>R)gDOT22VtsAN z_qC^ZyQuUD9fBWhQ};RL@gesv^Md%0K6*b{wiIFZe}$>63QI?FeVui}E#&6NR4aW#7U07$zzo|yfdB%@54jC z^=q&H(D2ZR4*(!sPR!684EPLQZd@4iiBDCCj^(f#R|Y5@&kH0KKp{L4P8zQXV^ScF zd~A}#Q%;yPfEe+|N$JoOm>bV14dp_E4)4=y&{rkkV`zj|7f>-(T!t@v`G%tSMUlKg zBwUL^)t;A@N_U`)2W?HxOmA@vq7r*;l)e=r3~(J3;_DtGd>M{zO3_4b)hTs14VpBF372x}#NCtGb>&y+@lo(=xJQrja$v5I zO{W20#rEP$KEk|qZtwtsG(ZUhH0Y#dCM)s9B9U}&U@5)NqsA!_+-pcFP4R{v{}5H07BC4_4jNa@ ze~z1$ms75T>)Wl=tG}_mH$M5uinv`0UT?$QpfaudK%I2@a>%f7l)oPa)KW!AL&Gu* z3JXqG@pZG!`3Bo_fEJMaNq>9o4DYo*a-}QGV+pC}gL0;!IA#J+hWQvMk8oc;HD4M3 z0pv~lnI-8%i6-2W%Qo#jz*lKc>mk4bAU&l8glA=9t|Cv-?<&+1 z>E*al|M&J4X4e-*U~|-U9%;+s&QMR5n!W`gZ zj^MSKT^Xg!GnB`C@!5wTic4oN$9Mn0_r~&>r{b1-?u!?H@r&X={PNGmsu=#nXD&or z_D*+8G7!D7F*a-25Y91sW~LRh)2%pv`EqP^hGTPW%imf3wO{+qxaqE2zwgNT~WpD4aEpO&0>g88s0@1*u^RloN_e;yfZE#*U>CA-Y4 zN)`FDkxraOs^kOyu(OoLQxotwrbWWcNGsm=j(5b*{p1hD*T3=g@$s0T zYP+`Fjo7?&-mgK!24Z8Aw+ak5*eVwH4fO|XDA_c-tQv+Vb!iO6?&Ff*y9a7>-vp3A zkgqotQC;b=C4wHw#^9BUJ8Ea1f?xR1$K!o(``Ng5>0%7GCgZQa?dP=8*&oYVV14X= z{kK@zT#YaNo8J^u&$%aJ_JDP4ha0+Tf1g}0T@vHnvQRu58rYX?R#;||7f&@{OCTQ> z=w6ui+6B8s9H-Jd4>m>}Gi;mE73eq0NZq(O%gt_dHg#HUBD`)}?iO_DxA`qL$^mb; zy(>GMwpF|q6`hSW%d0g#9Td1UO>%3t-(auN8T zok4ksV!?@{wr6Z`-?e~FN?p|KmFgMxklu8y|=czQ3HXG;(8cVWCvSrw569M7~gPU?uTu+`( z(WN-qU%ZvW8@19w>mTx?J>es%rVAa~%e)o-**|a@Mkb5~z%Vmg` z^}^-J&%9LFAz#1V6^cHHE_ntI0Jx817?BR|B9LM6=z0<_9G97LrI2c``mh;wH(u#? zrT_K9ySYIIUW$N%;5)~Q(6?@x{%B(GhO?<_+Vn%FxX02Tq_jG@3VWI8e^PE1slp$Y$DFU60w( zNllTPF@X)ba5dif`~N#GU4AMKOwGsa?7VN*kB*N-b4v5GioLqI5{FM5iM#H-JD&Tp zmP#@g~mT)86KL1QKM9S^1ObWpW_c#{=8Q%~ObWZMLt&^!*`-fLkTqP?XLyk3{r z=3{(e}!U;#3#u?WHqkV@0<44}Jgl#D#^6F*7tS`Hjad zH{ELc?#SVj@pa$u^>O^hoBb616KBo^d!<|0DJlyYaU*zP>6+SXET-mXV`Pjyur048 zuwiYx>Z0x7b#%O}Plg+@@4$Xt2W1P())GB7nny-=qT}&K^-;U;4mHIqb?rn$^_Xrp zqa}Cw+>_B=T8oxwH76%k$2=sm)m7cbL`(X^=LN_nQ@@-0nvOQug)~R~f`(1)@oDGR;x&a3wAE6~+|$TF$$K zovyY8Xmb_sG+$yU-$bB1;t_J$&voI~ykUE*cL1)AKS`gIvN1jxEUyxPAM@Bx&us7~u6vmhav1OD z@975pUx~HO*z16wN`vy^9^-`GE3M4*}vi_=R4@aMS!T-g^$?f)VR?>W%XkSuw8>r<5|j zE9|6{!A^M~2AyHfa63c8vX?HvlUys0z`B5|hGKr$VgHVF;*?V7Rp$k#u;I54%g@cP zj8|ChPO+b%S`Vh19`lnjV8%l;*Q;Wq^cP-Z?zYu7h(~Aw zb&W<1OkY<8V+api?4qnk>Vx|w2Uw%+U|gPMMNF<&EEaI@NwFZH)XaJ0Ur2 z-DS;fqdfI{^i6uOE#ro_;&4BS`I*lYa^w>~2M!NwzU2TUH-0%p*lBYwvh9OM z$|Bi!)CPPOM|?yx{B}tTH2LY$zcin72#n7mt#>wsedBC7Y(JI`LLg-Z=E+V;AFN_dgOp_X~gO)#kBdhoUvX zzFQ;KJ8jtzYdJpbw*7F(_{6B+9Q6F>-xc5TH@+?!&E4p1tx9G?@~cTCmxaY^aqa3- zEG}%s)r$*p?!wi$bY&^7TweB@4AwWdMN9eQf?xL&b~yO0|JrXy)gL(UI!d=NO4l}{ z-z+ax#}96Uw_r+rIM6UT-iQ;&j>eJW2ja$4`{VeDLovT^GG^zeqt%oyD_^&}?gJjQ zHT}QSZbw`8&qh1%O%9JVReoD^^OKsp>^DhwC%N%23`J6 z`l&el^nE|^ue_9po#1f~+9qI7ZDS0nita zL$!GUaD1V!=q~h?+1*ZY?r-!JZP(RT{T)vs z_2w$z%|lfhW*Ja91d!k0SLIbf7mff_ZXH0Y00Exf-wp%^{w}x`}g_WmFp{ z#jufVIE@S*<$>hEeq@uO!uS*8oXE~k88l_Mvsj}(JY7{M94lI5Ca$erjp_aO#Ix@D zD-ojyMSsRL*kD1fwOk2OW#D&X=t{*wVhMeDJ~NQ#{|!)iu1dcG5(A1MCg6sPmLY!I zLB{hMwyo3^O`a@d*qHz6U-*gmt>5^q_~`wg@adzZjgH8emjRXX&P56x#v8}92korC z>jL0W!B9D-K#+t>VxPf0G&Ar0tY9 zpJq~i%l0pVTL0_7C-|U~w0htf5;vV5u)zj!Bhq5-%k%#Z*U^6(Scsxu+!{8dywh9D zEA)Ir4JhGa{5-w9q=&2%O2w){^R#k5pG=Fs)^e&m1Q^X8tOSfd6|xT}KE@;AG~(rF zys^Pk@%Dmo$tt$epLz=LL_Z735-RztoH9%^x*w|lI-P~6(K1(W16p8w$^d^LoKWb( zbBW7z;;YOA@I#qitpNS@0Ed<(L=?X5-~N&w&`` zZ+J@kMCcm(yKJax+$WweZS7C?Fi%Nj`Utu7 zscwXs&?#4TpTlZv3V*Rt9jcf1vK>Xov{AMhidorH? zl9$A*e&Q!%XmKUFPd*&)dHdUAO_^W!qdyY6H{PhOCx*7Q;A&_wsPa=bk}Nuna{59N zvQb!{HVg@jf_IpR5;MUU3O`EZ$^HNsqNc8)K2JvRhi@sH`D{ zcgJW0>Avb}UTm)^r)+HIM0B9Dx*FSBFtxB@n^O9g7Kvbe`2P6tFaKIR_`Y|>mKMJ! z?s-l;=XHNMPJG?h#qP<2`DDKAa%k|H+m8CkR(msAQ`2gDE$}us{E3Oigcfl;;pUAM zfj9P5zU9)1VNOxYrQRG89bpI7o7C#3Of#b4K}JI434C6Gxx2e5TN7J9`Q~kJ<;-fCOkA1kLLy9i-yM@+Dibc%T{~To8^x0*+T_NdHs#Ez2H7x zSAoyO>&bUO~(aMl=`mao%XNmtg9Yi>){o*!u9d>Mj_!GD+lluXFS!b zH;D?etbYEzG^vN`yeFQUzG!jc$SYT6IkoCvp>jB;*-P98rzTX^A1IA_OxhnJv zBFTf}`35~iuSgj+Z?#Q#N3?#2LFzz+vWYW?0Usz)&@_maye*o_e>qr?Pxag9U)wxY z&oo{U%TvCpk;|hjTYi9I3L{!zjFmkm&V z$6l~Q{}a?3Ha_S;xRgu(vp&O++orUUQxgVx?1NfU`4BrKi}(kiP(ZFEx|{(#bkb#N zzN_@r*KB`^uI(a$pJsDAqr;M$yvVxO-;lQ<-8_(tUo^li>1;P4FUvre-<$<7mzk%c zAKoX*GWI~p+Z&T;bB9wOxg1{`8BiTl7b`t@$#PtOKr*0b_-2_8lhB7j&h%P4zKvVU zfS2Cu=h>iHO3N9`qEnRDs6Yz$I{ZB_At9wQURqtO zRRQd~YQNc;nP|!e@|yv;aouR?$_IL&$ymg< zX|Z4Yc`L>e#jmUDtaoJZNS3_v*S4s}ZSu6+8!o@vV%U#4r_;tMdzxTOwhcDRN}!<#E8cWtw6n=0R~z*FJnV1%mV^^!zJ8*i&0 zj5V8SqqRn3dS))>4j+t(@oAYcUF$>YN~h?K`kL)Fjjt+?Crg{E`=n$zrSR6(OR;$7 zyvKmqefuQGh&2v)qC?0=>j(N3ho2XhuEk@g&&F^6?jOXDzx4;>o_p@}uFGY~WLvi0 zwn9AZPJ7WCtpBCo?18HPg8<(pTGj4lqaXF=1R-#kg|J#vG0X_a&&kfioG+`+vU42i z-ndRVbKt@$(h>SW?V04tK3?Y!TW*==0?=8}a7?yc(yK99KKbdRv@5_(06#^ytG-4l zYIxtkki7Ggagv+T*>lWK9*z0U9bL5n`s}tBP#-v@v@AEvw`f!r{l#On_(C^&{4o|6 zKw8KGKC69USqqQJI{8n)Hm))p1ke8H6z!l4l#lMXECo1FLI0tyiIe33=F4+{L$2F! z*^$c||y zdX6Vl0md1B!dS_gbu{M555D}^bz53banJ=oKbXHM9=x3!5$VS&4=*eq^|* zvK3ap>a<~%eV9%RH`BIR*;<#J7(#opeIJ&Xg_5&z8Yn@nl^6^-{|I?b7mttai zD%KY?U(5b#o;VhpGqdrP|L`9~+;})X@x-|}cjk;_HKEE)#O-(99J6ynar(2D;^ISR zV@kHu@Y-@LpME^z>iIZ0u^!L9c|3mfJHIwg-MAIYPkk(|o%>kC<|UQ87~`^GnyTMW zOvI?@jkTsV2C%;=6xmEyFI|hD`ib9+3(GsPzA_s74jqm0$wP6CHyX6pqA{knQq!z& z@bv#s9G8vwoI6g%_x|8NjM?eYSX#cKc;0Zzn+AqsV{=6|WIN{P_DlcwTecNeR@UP3 z<*RY=($zTq_}Msj?owR3c-e7$UX}FC4cWAsZ~YL}NW=Yrr}7yO8k!^a%}>XqY|ul8 z_QlPoPR79_2jj@G<8k1?zL?v0Af_f-GLP~AWqWg3?WwlZx^;DRRc*bY_Ty&1%8^}2 z`|)NdfZoJs&(H_PM090rjD8`vk%wA&sEaqXSKBqm_|$Wchn@!+3$PIwTUjsDk2R7p zw(h(1(fX_t5q@9v>e1cfayA9}=Q!CIsRxyZvXB=bry=jm(y=6CiG>9f?9)Cz zEFneTR;T%aANl*y?vBR4{OO;KmGuQJ&Sq5tPjBS}q-M9Qc@h$xqipD-)O_CrSXy4v zV)H=!hyU_hvD?{+H-Gb+{q&m;iAYBEDSz@|{LmM8GXu(AZxVGi=;P8JRPhVD`40@X z!2pySc>Npe6|TdtVzs=067jTrpAY_`b}aOI^sld1;YZ~mALv=H{{L|RfV4ulZul{o z6Q8fhBy6W=h%+8qPs0m9o8OkKCZB;lTp{04OA=$|D=F$$4o{Zh&Bjy6(weKhHD4)7#JW z)ny)fbzrU>WWVII0Sr|6)$yYuDjuCH8_5X}=2BY4M~<{9a@cGkBiYw1HS zs@T9Mf^tZc0V6ttO`X%@K!l;{pNf!tkW=Q#ay}pQ`09b^6rZP1^d~!v<9g-5FTp`Q zR)M<%Ws@crX3oI@# zMu#WhJIz?t0^@Bz|8sH2vu}$Z{%8L%u3nHWzjT%RnK9aEdV|s%aI!~w{Y7<5f41LX zKgu{k*~k#G>%n;Rg+XmUOSJ%cQ@qkEY#i)9+c#xg?>K#TdD|5%2hgpNU8Q=#S&3n~%lr zzQggwuYYZfO-{w9KJuYB_vFKI=-JPY7k|g!inY#2%p5%Aa`_a1Z$6iCl?(fjPY}^A z9xufMI*W{uBK8_Q(Dp3W(K_oW`Q=g%-}s?FbCX)S!MEyY`<;!p7GulNUhB%D%0}2w z+{WrkEMHyFqV0UFEiA<1r7M#AQmn6T#LA);UMpI3O3u7FqA@uhv$K;iGdCNPv%Ka^ z3#0?Harnj)TIB8bt9DzeH=kwT(*V3DZKO3BL!5GMjjJ!IjmIQo^iX{Hrm6IiZ_UE{ zwiZPkv>#p(->WYI;F3F}f?s66^A>W)2!-@34~Zr)8# zVZUjh%qfI>T}U(%zic>BEzv_vkXNva!MlWL!l9=AHSE2T7rTSw6FM%)S7Qq#--??cuH7u9Jn?UvO*+VW&QBXFr=J8Q zU+ITASCN!$>Lc_!G)dxGf| zdo1&2Ss7M+C|7js)afJ|$IThDwV1KNW+VtnRKq@6UgRf%QZ3 zgB-IBfo$89LAsTXoWH@cTjblLj}8LpgrMHwChU#F(ms9rvxIx}6NvK-&#Eivol&T( z;uv?GuWwJ(^r}Ufp@!k5afQ0wP%6;EBDiERUzoIoBTm;7M`YF(a~1Og{*|(dS^>NGtb{ws_NxKgZOH>9NUZZ?4DV_dggPeD8Z>|MWymX+CdgejT4^#qk?& z(Db+)*Va~IM|1e0Q^(`L@uSh0pN^6FiP-R4JBMS^uaeJK=&oGA^G!8@n4D@k_t>w)k&<@;h%X$~z&O2;OYgT8X3nVX-~hmQqLsZNs$^HktdpLsa8);8jV+Lt^sU)3Ix z2f89Zdu8KO<9<-4)truB`S-sWhfeH^AN%J&Y#a09hHR>_R&1)h>2(}rt-Z5Cj*}7} z{?b?aeTH~3Ca6*U{>pfco@d)YhP3*`F*zB|@e)t>U-^Wb zHUe`*jzhl8q$+N_GM^QO>iCHKd-%HCXG!;Hh3aH|Tn0F@r6lYRnRn8p{{XZA zbkDYg>NPkn!cUbQVdPNL)1?6h(#qUJzeyIcH*#EZl4voO8kkc=f>A8(1ridE{c-5K zpDV8B;eb1S#LYPBFJ(~1u+|R3^x+?s#hW^O!>`OUdGk;7G)__`Rjv>c(7!tGtt7et(m3SpZO&pMWK&l-eV#Lv?4F*2W zAAy_vIE{MJ?pA!pgTlCE?5+kI$N^8!u2LAcJOEVRAVNRKMSjq>twH<3pXjq*<1O2K zI*0b)exl#Zt(uHaPQ<|j^Koe3d`xSOCjWLvdcMA4I`mKXf3d)u1j*lMQkL-4Km2f@ z%TQQx>+N;%t{me$Nk1NIn;WvJRyA)*#;P-K6;Osa|JWJXNu5|aeI`!7>%B1~Tj^Ptgpqw;(81%ZbW1Md>lJ*JcfBgfzmqH zuEfsb)mT4&I<~HyjpH-haddh)e*F8sF^(NviKjmI?r4oH#7K8JcGs7r0^%K=-<^nI z-cZ<>jMn&+?}1?hEv;_HZ~f;#j!!>$F27%;5VJ}%KSzHS&7fB4at2}wqZ-QgxXDd$GtyQG+HWu zYI@E$)Ul^{JwEm(8}$LQ+{xJtd)m1fgBGQLpd;%Wx)e`$~>ropLh-c@Z^8iFY&=aM{+D-%9ULqo|uE` z_(B_Tp`-naPLq8E`t-YGr}Pv2iU(Yt{J2;(jK<_KUt**b?Rap^4{;m-`1N1=j$!_lABR= zVwm9-4V~E@lLmYEpegWEd%~|9uZc?;T0oW}d+ku5 z#+xrdMN?0L*_X-i^mZQvSI1?k?15ymc#ai~>+9`ymOeWMCJJL!-?BW-{i6-3X4B(6 z2MM_`$^4pgx8@A&7173Ui-dMnvTfeeBgmk#fLud!T6^?`ok{gC9il@ zeDC*uPrUA{UgJ}!Blu&Jb4+Ej*x*rzhn_ea?S<9Pg%}n-2IlIfY##Ikqd2g+QWiD{5o{eS1=VoOwkqJh zp5cD_UjS8Z1GMpDzghm$tCUx(`?4!>8ZH*)WdOTx*7Mq;9kG- zzpaHztS|V@2_x-xG&Fzvq&Ba6N8fl%Lv`hq|3hlC9qa@x3{+Ukz?bm6At~LJoRnzY z<@I=O_EJwaPSY$_oak{lV7tAE)<=_9=4(N6?tSl$|Nfod8T(q3S}e-YpP7&6|MjHzp@hDjpf+tNXF~kSU7z#E`08EY+Se;S1+E6Pd)JQ_~d6l5|@V7 zWg{xPNTT|zjkNCa$f5OWjv#8gd}&#g#*Q?JZC)*Aq8Tm8;rTDTFK&L;Eph7h+vC)& zx5v!E{jqz)u^5t_A9J%wyruf7{@9%UgqHl+tnkY#fw^h#jdT{r%PX<)yMGYB_p84Y zmmYg8?tAr@$1DH-KZ!VgL+tL;f}6^!uUVhi*q843wXPbA94DZR903(Z&fE}^y!Q4$>QMoH7PM!w95_ zCUI;$iTWPkUjI z22GTFxkB02y}YWMWPq-^ABnzKVWfE@vechVYsDqVm3+*Puy^YxVFZcOZ>uaLWrDnH zW2nDXJg4t)fI{J7N0qGdXkJm&Agu)Pr#Qc6nkq5>(Ow?Y1jRMUULIACwh$@WiRNLX z$nx_*YqiH%^fUG4lggD=Kb^)lp2riV6EEN1I`_-n17d&;&T1N3-wHjqZJDQ1PSEMg zs)FZc+6h)lI^V_jde8a+PiimYWu1Y1EoFjf7MXedNB)Xf;QBfchRwTun+1Mi5nCifp27Cnw#Q-1f^I0B}?AWVY?AiA}Z{Xr?!*hVUjk{7_g_UYrO#TBNtrC>*ZIZ@}`ca-a*6d zhZ4Jfr5~k`m7e1YmBWu75_0s=jO2WXdmSacC^ zz6$7;c~nWz7yq@oR%f+qZieT$tfyW%1SfWcm5|hKSL!3wJ>f}lM zDSx)J++mGt;Dcg5H2#>r>=3AtM>dc1uvWtMk-x5eu!kHkm%W}CVgq$2wp7pY@s{mU zZn|`M+Dv&Sc#2)N7EjIda z*+(9Yho5{r_8&VOvqz7_JuiAe-16+(W4qb#b_Y+1w`8-fYCdm{$!ZuI_q=@W+=V#% z)Z=mPsnhY;lMl;oJsVFv@mQR_c2PX_<+m4dKCF_=2{_@z{v&bY$rEwQEw{zV6Q|i8#ZA3#SCY{@g?-pze`#zaI%*@* z(F;ktn|)EuAd9EwfZ zC>w3@tafu#>SBGwxH{Gx6D|({HDq_q$AQC#tnZEI-~lAYKFaVW?$DT~a^9pg z8V%KbmN$vAr@Firk9_=rSUh_%?s)cH(U@$;rtpTY^ht*u z$GC>u*LUo7>;Sf=0rld7vl~3k%$r-m&v{(@6?+U}nw~H4Q)Zs)4vAdc}pGZfA&1dWUHn>{y`6Ju%qRtE>(8`=P7*Fyfv-X z+j*Fa1^MJM3}!oFQ+Yo^d?67eS3kee%df1xCl!s)IIKwZ6RZ!C*rEp?yCBz)>U(m1 zgGU{kzU(&xiyr&=^v%5CPuzs_!W*vp5n-=K(+_w)EqvJ)ME*3P`J%m5gG4R!K6UEr z0eS+AFor8CQ>pd!`baf#98g2gOVb*R zC!wPI?B{dXi?Nn5wj=+}RvyX%r>n7<{>vc)>{j|W_9{2w2tljj!jH>DSG^Wd3o4A> zQ5GPH+pYDgfZsAuy|RyFobwBK^BzFBmSsH241X=#hsF!uE_(_k-fJb1@?pqQ+}Tn-(PnPV1LwQ}TzVsX{VS-kN-m#uA-ue{tf-Ng-8Uao)2&$F z?8e0_SItL5HURrQ|DrHy&WHW-}_?ifzQU#FM3XV`5V73-u=ga6lXv5URAUc zv2Zowp1b4ruX$bUzvY(L8XJvIKYBXWFE6OF(Bxb!oOvv^m#)R;g)?#S z(ND*n$5p?{wfM>J|Hhb^I2)h4{|{sN^5^2@{@Iw8{l;OL9oZtgTTRtn<9VYM)6@I& zzO01VnmiDTtGjXG>P}oZ--$;bc`APY?SB$$E7#N}n|`u?tKEqs$LA!2cD(UxUlV`l z+y7=f`NXGUY301u7~R;|*vNWwi?7T%o%U9=JDZvxM%9*l_G!vD{0WaYCSydlXk!eU z78`RcZ5p+i=LEI24}tBb-PXm{Q`>g8H+%@Ty|JS9lPkXH*B!MZ_wPJDsa?@49;B&| z2ZE-45>nV;gdWuI&`jB?w4moGunIT4HnrOrm< z`V~UtV~~~MOvOid0#MQZHk5euR7AxUhEH&_0n=X_5nTA8IO@V_*m_EY^r|F0`M^V9 z=8doiMJkgMUp)Z8p@^B;DIWmf;KGm$c{Wc*N1D!)lkKp0YfHhWH zE+qpkq@|1udI@D>;d05zPv5OT+UJ93O1mz<>m#Ve_FxbXKN}T;ah0b{aZmPaS5CnI zqf%@-4ejmky3&D0Tk)oEdSkreRbLk0@wfhNw7VP89GR3%($Js-GGOJRzQ2&%l9B8i z`W}5oR0XO(?gb9mALXApY$Um!d!e{LW>o&I%rg`dHVLhBbdkj=yO z3b2kj}tollWed*!A|;-p_?({JT_n#4+< zu8{3r{FOXcJM2zD2ve*J_=pSN|0g2t1|W*Ul-X$*4V@gzviv- za(%@_Wr=h)&^_i@I0|bt@W2Zt%SYh)>vDSXX~Rg#^#DL$Dp{B}^6b&r1MQ+0z4kA9 zJH!S)QEFF1!Bd2PU0#iYpPs%Ia9Y7l;i^8#x9g{iaj&<47wna##^8+pzeAxp2$?^} zamxd@uv!<40PS0i$O^tWLVDpDAV1rB>?+fdpu>lUyDk8%v6N_@+0%DGnY#9qDjOaiu_CGnKp8 zZ<3m2XBwI_c{58xVZUZw^6aiJ#r#w&Mt9cZq5JQTZ~JR+jP6o9hBp^t^U8(T)q-V9 zm2GN%WnqMzhDLd+US%m9*jBn^XvNN`#yl+y{LNI#z($a3JakO1H`f(MwP`c!9bxNW z>J&B|HU>`$@K%M;Vq|D%ELP5+kMqC%+wtMw`L8h}{d(m$esj$JqwkcdqJ_h?%kkj< z_?>v<6A#8KzxkVE?jd@V2JV#cKBwte7B zzdS{^$bWC|eiDmDY z^qS9Ku{l54Y`I^$4b={v4K27NFJ4s!N3G^m@~bwpJha%Pn{0MvXG-te-F4g1dJo5d`f)xzO`47$0cy7S~3pH-4q^_70CY^uGx zapt}6iI4oJUytT+Gv@BOFJAgBZ;Cj6Eak!H6tKG_0Ndk=OF8Bmf-q0yptigX!8e+a zrR+B6=ZO@aFi|~**>%Nk-|A|iuRcdxb9e$evI&{Hc>-URmdZkl{K>0toXM@w%ao?LsT|B5}Sb5PBw(@h-PQ$(L)7Lc?=Y zhS6u%3A7pET8@E8LFUW4c>XSRa{R!?763idkyepxf2B>D$BV2xarFQKT`I#_m$Yx7 zS=>5*x(_BlcD4R8^y)|cA{&PjQl`+Wx*)x9vgi)r)p5?GOZ*<02pmrb!(VP7_xLGg z6Q(SnA0EU{uns8QPq8?>m+u~+8_3;)QePLkag%HxYMb@iC}l_)P_6Bh?TsvpTnn#5 z8jllFPQW1l&RA#z;AGonyAU4azvr*nzSrAn?xiKuo+U&pJQ2J{NzT0dkuW%T7x^}-A*Ztwtr zuZkx>#pAk7xk#tK2x>d?WFzHOIrxStGzN48ydL!Wm-thyhPkmVU=}l;(v#OdAK7lD zOpTv|^y&csstCFQ08qwCEE2EPCf-I9!}=%L!SmeqQe*tO2Ny$f&`ZVLDjw1 zo;dSe<m3+Ajx3B18lT>=T%nQn@IOkJ38=sY*stLS}C}qWFf6B8vn)&=` z0qA4L_ntM81 zbw7?9oFK|ZQ{6{oug}fQ$K3Q>+<4<9zkkkiE^V|eeVUri%~)?7 z%U)z358k%geWd+LePKDhem1D}c?{GsoUjaZ85-HA9n zb1;rf&&Pq;!{X&={FOI-QyjYCXk5H-HMUgt6K77x!os2t{I*4pTJSnqWH>gSH!Rzd zvG+pj0V;K4;&&Qp?=&ab_?ERWrSgs)jiy|``3dRBe&Y}^PcjaTsXtj}(h)Vvw$|4i zV8Eo5B^C###$!k}UsE>aXFl|icA7mF|B6$XzB-6-+&*Saqy zN**uaA$E8Z7C6Q3D>rmmqfs-j)zmiBLtdFdDnq;x_7m1xcM`Vw2_1M>e#%IGxKvlt z@i1nHb;p+x!z!L9CfX9Q}U{Z{OMJ^Q0w$p^Es~g#i0GT$}uQaO5FbYyx?{bjQWicKr z9xi}d{@_6mn#*;ShqiG#cxq$l`<2T^Qk&CGc)cnjA1wg=Y|q**Lz?%{LF*;)^%dms z0|9EGGAPb`kjD5d*_wWdtNcn6l1_<7*xB|;-%*rZ=MO;ieDedZ#(&~1Gt*Ccl|JoD zDYdl$uayf_dm*Jm{w#yH%0Pp<66N)P@{!KP-i!Ivuk~KI=Ao2Lo8k8x1UQsK9T;B} z=AIC1O=$CUFMGPIkxAp>imq=yZF{`qW}V~ZvK}?Q!qDa*Jn8t63FT(qtgYLL`Cd9< z6#`EwwsExPhcyn-ad-rpd5?#2aKQE|zzw-tCvbbf6Y&7XaOpUf8#s~?en7eGR-m0o z!~TJ%r`vA~TI#%d~OFv6ch5^0nf%G@!DtWlPQkL}HbVLKcTa+>fNceMEqsb3rqU9-F z66eBZ{76|rpF@7|>vehnTWXQhPd@*UlRN8yg634 z8?m&p9ha_LG*7c5&6pZ)MQ7o1G`3b_{p{m$?Xl0rT{ld{%kMoN-}UWZ9^+%D<1-(8 zdp!2g`{U%%x!5;3%OvHxb+$%B=3R_5WHSsmVq~&B(5<>o&K`@=kwdYzHXiT)vk%9Q z{Ij2yec4pK+G@A$nCC50Gox|z=xqG-+x~epTkUx4u}{hN+;BQ|&+p=={E?~0fa1?} zS=xuVKFFc6J#Lh7$NNHE`XO)aEd6JfhjCNp)>G^O>r2ObFCKr;Qw~7wDow6@)+=a` z=C_mx*uNZ+oo!td4&fe1K;{Ppln%iUZ?lG=Im_p_WGTMW&imisdJI&4>mgKlutvOaFph_exUbFLG&lcUHu@=C zIdXa68JT9e#bXB=$iWtk;^2d~ywpPgg#D0!nuamkRfkvUJ9SeDA0GPofAJ+B8W|qF zZ_EW|qvc72>?mwX;wq~r!1O0GTtnTK6s!)a6ez}o;#Ywxj!CBCleOU+?Fg1QDv%Q} z;G6I~Iwm2q=%6f@Q5w&RrnDixcU5V3)d{JXl|aJPg4W8VG#ZanBU|bqd7@Jaqko@L zg8}fx19ar1h`x*C7&+rA^d;Z3iLZ~6Ma zrt*hCYOH{-kiKi?fc|0_QmlajXw5w*7u zaunS`7p$`wc!FU%g81f%QBG&q4UE4SO5F>cJ}`?Qi|YheCa)f#q=QiDT#xtppjR&L z0Katuwg;b&&xBrG85o!~hPZ(&f8~!pY54qnL8E@_v0wi6<*NUPX5GlU9x_d(sLclD z&%gv*6)IcxiRM}k@!l9_BTISeCUGxbz&Pa7&(N12BaK;~)LP3*4+MXgPnYpZ{d*fu zg0yWEa1vvX9Pn|tuG6E+OV%ON$m-1`Vwk_SHCfB*kfu7_Pa-vd5vhY)&(7gUeJN|6{lBX5(&>$J^N!WgXLhscs%WnfD|Q0vsEo$?#n&F_B`PMnNK+EtHhpQw2&{{S2K zK;6JcK0tk9aIh{FQf88q69?*}ytQA#OO;*vlYUPQfb}2^Kk7^0vF?h%U^^PE?at%`cI6?DwVMg+0{6&`4eNES6T7eCY~(7$fRhukFG9;n#!G+ zXlXt#ZywUMyu765S&M)4ecu;<_OAEEt+zcZ?!EVU@&Em$Ux~Sc)A7j%&&HK&OVOI0 z*5Z|?e`KtRhmG}3&quuSbaJ8@4?l5Q-EcVG`;kA3U--eFjKB7M{~*5ihyPwIT!^@E zc_liM2~tKr+#KdcHycg1DTZ<(k{kHcugb08E`v~In&Z^d@%rfl?F1CPtYb-)%}zI8 zQwkkq!>bx4AmcZ&>t&N{x7VY)u@(oWT5`zq1VeOx25Js>N+;dcTHK=_99j2yz;~3d?FP)Fp>Tdk*Pye%6`qU@loB!>v$I!E0tgI`1q|C2V z{j-1npW@QWdiYc3=)ptLnwqpMj-R+m?YA2j&RvW%Pdu&#=i_nt;<;Gq ztf(H$>pZ8_h_#TMu!~g_@{DJtxfAcG`a&0HR?mPZ=?0d;8R4KLf%((3}#`muJ z9UEk0P3%Dqa&og!VHW=CN6a~~Gbw#z`&z3f#x><(;@}4VFq=bixlu7p9BKNyf-HPt%LoFBm#dVmpTyvx!`DhIRxotMSJ^PYd(aEFhGuW4V{Z@SG-#iz_oNl@q zk>#Rga%!Db9`thD$U1T(r+)ie4ed1JasvX6uzjE*+4g+Wj^oYC%*u3 zrYk(^tdbNKx1ehN%X(@o@c6EoRAu;3ghHOn<*9VR^;2}>9mr3JDc&3R?*!TuhgT?zZLfIE^vF7Flg|&Wisl2rX`u9tF$j@Lw$T8^@u*poUC~BAhZ$nKX_u7 z0dN~CS$G+*u;>l~<<}dk*iWh3xrtTvoHiz0c_?IiP@nX)on?lFOIESv6qQ?_>N1Ur zLwk#``!!MAKsS%_E3a>E5(bDX`3R(VlRpFUqBGJ?mNy6~7m!`HqRb-c^4I#vSKNX2 z9)P;B-_vtwnc>V=>FjQ6O-x@wZYqd&@^oJKth(p6GmoWQ+ie045QD!B4YhgEKjyIt zy?PX$Oanb3yp#5)B?=z$C^3L>>R4F8`k*-d(i<#h;gzQEwKaJdy9t+~_tL>WDD}u{ zri^J1+p=VG_s+KS6F($#!K)%urU)S0&EvXoePgq>D>Ma+TQE}BWbj2AyEoV_RLg$A zwoI{SaBDf5*IfsASNzWIq);{c%jR)e(oa6|!#Ci^$Hz4Pk48gPJbChDJaqp9@yK=bA0 z^#zZPk38~N{Lvr1Gk*UM-X2RUm*7@Vgx`aH7~8F9iy>F$1zpS&yCX9~r4LX)KqY8i z7^2OsyEz{Bp~zUigxi#2t6s5x3lOv%l-llhRGeVso=?+nvLW z?CbGl9{j8H<)+}(cygu+z%f5%xB~5LyyEi{gEGthu6-`eM!9RE59nvpFbZLFY-;?^|*BPiZ`BzN42(4Tdd3O;R&(neKRq_C#NLi zjZKYvqr=f?&U!6C+(=V4t@vq7;O<0gdOY?W&>CS@GLl^ie>_>%-OLT(5!u1K0f0KN z@0XjmBhmC{nTDbveLXli6|ue+4cSGX{=lEbXFvGKxclzATs^xvZ1!F9VQ$q_%60 zAyqH>xC7&a`1M?^q%v%|?_pCD_8}&2yhE3GmCOACha~~}S3V(}eZ}#mKh`+P7X`Zs zsZa0&y&cLBZg1R>Yr4?#rF_y&tpHVTN}bAn=lM=9Z!|6S%y`MiH#HQaN^nrY{a&>A z7Bu^oMkk#*h(2>+x++^`5MRe%*1XUGT&FV6x-8+4hQH3|H7&!*2PM^b_NSHPwM(@Y z5??d5*O%4Lh^}jt1Gv%=H5=tD~OG0nnkudLutR%2a~{MB1&E|f6g<+3y<|hDW80-|8VO?+jGN)o7h>; z0zG@Gwmo|kYKLvDmxP!5M>J-?Z4+NW88>^jspkt@2@xCLRyr07Zo_^)F{G_439M0*=h(&iRUBG^B*p>1D<|^z# z@`JDGE9K57pj8EtQoe2JK8NYWD{0Jo;3>?fC2ZqLPPH8BJ{;3hPVj|;0Z?RLWygh| z0;)PG)xT~74jw3MSrvSzRW}3GClGz;|X@>EA^yJfWd=VnZM5yF74zul8Btc zjN`Ofj}F=7S6LokYQHb_BSgO8UCmE~slQROo+=3h%RK8X9t&ynpmis6+z4+JSDVnS z$Y1-M$13)T(+svH+T47VK5>7I5nn~CU8t*Qy3fk= zBL5g~rBuI=p0m}!XNPE3bb^C-9v$Q^EiG!^9E#TDL^KF*n_c z3Gu~Vc*)eC**ETfY|1fA*pHnlHUG zzW!^T8xzB4;^9yHQGE7+KaQh^XJiNJ8lTdbJ{)W7DxxEKOK#W%!xK#(l2~2eirE7v zRs3{pb?4$w-uJ=yu7C7XaqNbvn4IdwIIp{xop$pL$Kp+I`l@*OEAES{SDuWkSI??V z*hk8@Y9lw=$~VkahTD+?C>j&g`VQy(UB)4W={sEnEqvO;eaVfI^F8}7`6dh=vSeAkOZV#bcFsL@ zpZhwc!00S89OlvhPywz$U5SI1qh&~$ zVNlZ?mtk6C697jv zu)r=J2xkHSKv3lEjSlPpYV)Byh3nlIa>%I|E16?aV&x?oYwP(9QYwT^6oWDrLxXCN z14Bx$id4s$-;Ge5DtK{uBd)mawQBgcDU)H*;7eaPTdH6w83xa?DF*yGKpB$>;ie4F z1)fpT^TShdJJ$mLKQ;ik+*|p575MgqpgN3R&+3b`$e#Z$)*#}9tb z563V6%CE(Xuf9%$sTJ==QUd)?eL?s_6+hXySkUL~e)4n!QGPfIr{<2uzyIamil6-# zKOQ@_ZSz}c@X7aip~I>HmD`0P`w!Kh?iQrGnxl>ZQvvX>691LzU5E`q}#Js)&+ ztcFVGE8zp@Hpfp)HiLw!Cx6952Li( zKl(vlNuQmv=d>l=cVtD`&>wW94#@u@s0?)x5%9)E1!5r8jnS5EDNs%lb<%X`rrvTj zK-Y{F<_OLL^T+yV^m2q0{R@3G`QQNYb}<8SX!=MuSN-}D{d7T^0%zBv}>d*j6E86Oea+vuD;06!f2Nc$dW z$nWUo2vqUe)*BEzQf%9;=YZ<2OYiS+iTIS>R%>w>|DTB>n#f@r^vL-)? z0VfmTIGoetQ*qhlSH|VnTpc_1?eVew(VCCb$B)Hhk3Jd?KKNjqK6%DF;rTp3pLpj8 zU0lAS()~X9Ts=dL*t36kT=Jru;w4}6*0}K6t2B6#559ZEaqCP*%P(e~n9%?`!nam< zJAW+>eDX8#$v^zF*njaQaq~OAHO4it?U|H5GqJ$NG|AC#1oq0uLc@8JU**HEHz4PA z^?&qr@Fo8)7Z+5gY9l=i9BC^W-P`_hWgtkL22}1CuHJzdP(2Coo+>>5JGfm3YN}5`XRdV5{x- zB((GEvLmKl`akRkS4l;jl9%dT?Zvd5b>?6FliKMp{zGOgd?&9}H!>b*=OrKyKO-Zc zEI@YZ4nFg~8=gy}(g(MbSNC(LF7=FmW;hSOeGlJNHW^m)%WL6RH0y*lP_K%}zg~a^ zSM<;whYD)_8ORI$B{us3`x1I=Ulj&GpC6yF1K;^c2k$B3F`z(NiIme zNIC>&1e4YFEqa&zOq}y9Hk)VKN9Cg%Qa?%=sXyyf=I^$`lXW8bheVc*m|9NV5n;Rx zbF6XB1+(3b$ZK1l>j_RZeaX*q5-@@3s-XRmN!y9)=osRw=|Rp()G9MYs&BKb1WBLx zN=N;S{#IH*2d4wh-?jvrwG2wLnHx}jk8YU`@%*wb63*3I<|81p)yJpy9AduPILfQ? zROu5{`KtD)zQMz@aoT-#rZu2%g;pC%KB_ri`AeM=%HEDDuL`J@>6agTBrhsQq9|Np z@U1?_`Eg(5X5s?cG}S|V-RuW2Y3ge&zc`^@J!TScYnsSZ_31I02=VQIw*|>F7a#RA zP4d={vJ-DJtF!|w4i;LXvk`3OgOAegWk>NbIWn718!_!QIW`fek31Wn{^&>I?3tO^ zyK6^mpPY!v(J{FzTXEvl8T+I|Q{z5r{#9S|#<=dL7stX@D^82X>7_aOqNn5Gr=Ey+ zzx#j2LytbFyr@6&9Ee`|A)D*+iAG0acx=pbSxq+bZS=~&W$rH92;JR)`~enbkfZ14 zwuaPKY{V)X@>C>V`lx-Fk78AvCqI?e5_S=#D=qwW`8=wdzMh6Bcv%#ZcPz;>57`@6 zUU_BQaNV`>@>jjmi{89o!^T~{$;*5sn*;KcgpJ$;yuKO}!=o`fI~TL3PsN$TC*q&} z$oIwEnOT=~@5FX5dhXw`H-`F0;+B`ZEWY#me>k2xcr>0laL}LTTAV-Y`a}&Dmve#_ zeQJ(2e2S9aaP`Y-Ni|IYnmK(=quiN+Ow=ao| z@TXW{>%@=wO3!Nu!tG`Sy*LbCi7upMf|01 z(>|*Hcft~?{g9n5HV{3b6oe8&wDrG>R=z=BzkMoAN^^!o9g`c^a7c?o$ zwu(=OA63T+%6r{;QVL}xs0$Xh2j(+r0+POHs>Wefko^Wc=G-@phBS;f>f4M2*Ldcp z>tqt-r#j{8n~yHDfhxl`puY&vY4Abln9m^Jmnp>fMV!JPRM3Hg2LM z2#;{fua>j4X#l#}sxz;ok@cHA0(m9R?5nX0gp{0w4h=*?{`42*C3Mqb?wti_QM7)O zXM1b_6+N?vMtRT)7SUE(vb}37vZZ;NK!(w8WFKR3DE&I)Mm0w2vC0uXIR${cYMJ28 zCs3Pd!So#^=+rAv=SHgSpu9FG2wLE>6gm}%$2N+T@sSNsX;!E-D^$A= z)cSP!1)a1-cCP2b6F=Shxm~8D-`ignY?&vb9706S-W(^s`mndc=k({%(c##!Z8|2^ z57YNEo@c(Lo-_d-_KtLH0D%to93{`ypCVEHQd--Zve_(c7&epBb0lB&zYB|V{#2zm z!^`%tCb_!28jnBzWbEEP6+0%!WA5Zpznt*Q!}rCP{^D=^cK`lcZ;R_+_3BvYX~x0B zC!?=19P1|!$I%DxjJN1phce42-2gqK|ww_0kiJib_v2BsO=wpIU7fJqzuGTzlhkB(S%xCF`uC$?yOHBL2 z^_lr=D@UP6zo08``{O6#*H}M`F8b4f=)aX5AiUI}=xx>Y6Zz5L&?n}66m(P;wPnxW zsZ7*qUHr$l3Gb!>p-Nsa&b=#{g+o?6>iM(3N zxiNJen}H5R4xQ z4Q#Y~29+!6L8s@gzBjz~qVE^MeSE7EA>$lv+Axzj6_bK@W$M<26>!BKY>F`iZ)P2% zKtFTg9!fB(f2s^fy8!yfu|n)xgPA-&j7~-gZ86CoMUiXRU_$_oOb!gZo6)#nvoK%M z0K)UmW0XoiRlSh1J5x&G!liH#0!k%XHV&faBg&-c{c_Uzj)lM9FMRTXS9XTvrvkuF zM+jN*T{kP0D9i3RYbN%{i=B(S1y3ONSumM4|K;dLQ5LxHJc-;1In9) zn4Nnk^>0(-p+lvfB_9SHlB7wL)sXe3wX*PuJD9Gta%VlYTuOfx{6i}trQ8*?;Go?z@$nn3}#sS zsvo*t6MGS~KK82qc!0HnS{$fq47nx&i$toizf453XtI%Q_`Picx?0qwNpRUpHVmv4%^3v}Nb zk~aMlzyvY*`P2r7CtK7MIar^dQSiAEUh#6Wj$mmJ{QH06y8E&0iO(-oaJe0JB{xZUf-aj zk8-^@jM)6;UwX$*{k6;Ryimi-)$NCSr7@H6QGl|QTQc<%c_+iVf}829)7S>>%$)A=oBs>*El(0LY0=M2uAte(QD65YBAW%NKguA{ z>puG}opMAL>+u^* zK&lU(KYlo_yX1n{v3q-HgcP#}4@7I`RE!PDR}w$HO6z%aoHgY`-C@Hxj*yl9O?Vk|Dqt3KtkN;el4kH^syN8ErLk>Nd&Y`rRFyvlIKs(I zK5MEO;mFSTlLF8rxqharFyCUYn8-y>%OEq?8pRKJwCx#zqQOL>=sH-}93K%64e}5k z=?JoZR&YQ8`lp&Nw8W#*nO14PzL-wo+xZZV9q9fci@a7ocuu2U(q^bw(?bb!AV3iK z`cW6oQonXNglOMM>2jb+m`bKFZ-@~A*GtLXM=tB3j|~jMg*Le6-E=b#^2_pe-}^yj zE%geojwZa^3yQ_gItNDKP&0Gpws{8YTOQVqdE^iQx@nY zZZ|!A=gwo^o1ZQmpvuz}h?inzP?on1=8gWEYl%Z!DxoHm&L&J zXdIe3;nRTr{&Sy>fBP%{cf9`te-$TB^TvNiCcxInxE#~r=pCPqjpj(K4UI?7$fWxC zu^5=xCSPSZ`o|&Cqkn2LdPjvbxGiE>_=6+zWrku!;27G?`_C` z7!~aipZv7m-%xYGpP>9~wCs%)_2=q|EhmnzhovR`TaGhv_~hep&qH^_Uw!bs@&3Q~ zi1cec_V3vhmtDLswoOe(Yqe#+8rhDIj>eSAOr@SWc_NzfEB^EE{&sxio=?S)!nO}i z$nQ&aK4{SU`crQr>-^_8tyS!FS)*Ken`R#yE6|Ij#Z z0ZOA-SfWj;!UphfRDX;=*QFzU1LB3Z@I_~Id@Lq+Y`4$1#)b!lG0(t6a-B=GCZYc> zeopZE7vHM5lbalz-LU74oRBl$kb7=7s7WC46bS5DzVjJirMX{iIep{o3;E7CSp zM*|`_0AJ7$RTkQtlI4K>)&a?k zl*R-6wv7(Pj`5M$G1`osed#a&dGj>ntH>q^v{jo#-_qNfVm>%g- z`o5SP>W#_4-WV4HBCB6djA+cD;hA{M!}b;AQ^gvxKU8P0<&8`vKdGYYMORldwh-}R zYIU`&^&<{>+P5ZL&veP)(QbI*jUT?zy6VNBwon|t1IwqJs6doD)P^TBgF~V z>A+JCE{`RKmWXeqOF1N;q9mGN3Qu8~H$R0@7u*Y(uGM8`{02?zJ_ICp#sTn^pmwEn zwBNzTK#XXdJ~2EJ;~F1to0y25+s9+ij%~4P&$igJduQxc|G0PeuGqU*zkR!6$F`~1 zzI`gTO;7rh%%j7jF+AMJ&%$Ud${XN-^&8mY1mo0c+JKmX1wA8J@#vQ|Ru#uH@)f?~ zu@m^#TkF^=^pG}`G}1PyT+$Wn2Ic271(eC-Uy-sZ@<-!oGNj%eq9FhHryJH!Ob>o} zb~@!FV`Y2EgTnb|P@ZW^^{jkdOb8Oiv%|O&Q20zD{5b~*Prro+Q&JerJF%bHP}6ya zM#b~RKwsqb9LWo7Ltob~dk7A?;xZy1{p;@=klk%4yG`Bm(rhCpC&%N${rh6~F7?|x zr{y29>8Ho1@GLHH`e)9|v1Zry!}zB>`&I7)yx7YcAUvoX>VlmHx%P4DrR=k^R@}NT z$d+}bHLpyfH_UW!R#Gwbp!{L}ebwF)`zG*#HThd+mI>XJT+7K@j zBVS&$QP^0XF0!)L8&4fM8qYj)Dt7L^EdK6qKN?Rwe$4HowcLvBJJe2^tMS&iydkc> z^5Qsl^gt}koE9DFh6KWbH0_7=H`db7FSl#er(edlN}y8;lkHGzmKLD@8RmKxuP8pc zkp77hfe#(YodtSt_TmSg8;NuY9;{QsW4r}z@}$A&mujYGY>6;(zK3|F(LJBitai|+ zQbC{2NWf=ZM@9yvV*}At`O-$Hmn>~A(@{pSrMu=m^-8w_@u(ZtJgwuj6{Q6Sma%uf z5tpR3PohWq5Kd9xWY0!p(0(L3%Hlus&3*Q1lZVU;-L(*XC7gnDQ9!{nldA73FL1e6 zxcLxZJQB^U%bz{c^Q*t~nmhG)3t{Y-;Yg8O^yZh0I8)lW00E}a$bh~rIlv-pXF{3s ztH;UIV;plLCYk3OUgYk#sxVf)OnxKW+^qvRKe<;v3Bbb0u#8JOZDZyIEDB5qrpk>x z0s$w3{j_v4JXgn^dmzgp+WJ}O`t{4%=9>9a(UcoQfk8+f$luDRa;&eb^IXd}IFTKs zS(Iq@59KESHaTvD0S}E}Kove6mtB5qOkMIarI;2^qEG!uVUZso>06n&J0Ei5Dgi#u zg_K=hk$QcuP?Vs3E$Po+Kz?qvT~N~|WSy$v>L1Pn^@lup<}F9-Ptg-534Q#&>S1wt zAx1~G#k=0~d+~+O-Wk97tN+zF8JVR1YK;J?TPhl^(#yXg635IzASV#!}(ldc}E;}9ZheQ)JX$*ep0 zDsMsxy6uCZJAHw3F+_8NBh<>hz#X>lcot9X1fpem6+SAMuas>E2opZ}BWFYMP+ITVn0#ZB5ZzrwHb zuX^Y!-w^C#zFn>wUjSYxQw?iBXHx8&D(PUJofDoIQCwZhhHp@on$?=J>H6 z`@UFO>xo0h=Huw`)3K>OgTcnYAbr|ev{pn@<>Z*L+4&`jU?6_`5C1eCd;H0G>pR~b z-}dd_6s^UrI5jhCXPn8ns!M(H4{)GUHu@=?U8nL-?ML8Wc_UVlYpv4f_VWqIg}Aya z8@YAkcG?B?!F`c8^hMmxG=24kcwj^%7?6C_>&E&<%eu$JR%4U5k8qk*=A#GW8^89o zv2k`jHfB%7?4hTkueB6o8l?5AAWVh^I8qxACku))^zA_LnU@m*zijy@_46X(2L4t}2okKplk+2Kc?jOEXNHa_kFgYkhs_~W?i-cL(Qwqk$ZgoHWdg?>)f8l4)6zR{uR zMNXq5(GuMu4Qj8v>e6`qYriory!3+Dnw(UY>Pw|-9Z;F!L4Hp3sh{V%vEruIGq)Hq zw-U<-kH;gAJ`_)@49`69P;4$N#IeK2Vp#cIbiu{(^4nh?mtXgS=%1L1g{Pm5M<0JY zKKjAG(Li!7nxeh6p)#zlxGYF_L1i2gzk9B^I&S`!?~IFId1Lgc4kvd{=7|ns!#yr< z-nY~qW_#nwPkbWo{O||j9Y6ik(SO~I<}n%!M(MRBOH~ZzkU$l(=804Z7A2MP^9Ct6 zy65c*>6+ghUz1*-mn)K}bg6e$I!O6fRnOjFpfvopBnJU${AQu@vTbG4faHWOU<0f( z=#kRUX3%+mLVzRqQ73d99%vT=4{&v#J+`7@m$c}m@Vb8$ymP!v7F1~Cly=Qxr*aYx z)U6m?SN`J1n~?=={6e?=jEwjdIzZw{H<4|Dx|vfs`7b!|-0nvUJ<#%AvcbNpPLQE8 zP+1cPb3(b(ED2As$YqmRaq&XBo{bUR$~Aj7l0GhyJQ*(o^8i|h)tS*&)FGzQ71%0%f*qws$|m=j|(9 zJYRYs`&sHF>uOW}UDg%9tSeZowz{Y7=joaHaj<*>q1HSc@4BNeK&Hu)$({$~ZyHrj z0!D5dn}eCJ{+K(-EJaoFCamh0#!vQDMTLG;pi9pyIJ`jTzFy0ek-}4&=i;-GtK9i7ah5~LpUGN3i%$c65j@v0E0NF( zUt9R+CaH23IYE>1XTDuwJc?fB2Yl|U3{xhRYqC;Uxo@X|PUXR1U(pSD+ zRnEc#__>fLTtymB^fs70tjKOB+?0YBuoeh6%Y_fhc#rv(femd5m){mU9}JoNYj=B1w#1BS+9*S-tn2am?^_++dqztx^bY}>It zUU2oLar5o3iTxK|8sp<*vJLCrG|N$^^yAFWFgL~IIQ+uLYOCMZ#l<dbljWcJ?#EIj_6t@&-XBVTja5h$#Td^V^MV?WtFD}auY56TW`hNU#FNjlb_yQaG z78v!wDMmaun*p$3$DE;8uInv2ird+ALYUO9*o0JJrzIr)87{j z{kwAdqUcxrp7HH5HaryDrY?-{|Gw{utFM1)JoMxv@!&&`MPHM*=H%Wjw*2X)1@YA@ zAI6)9$A_c0DIQdQ+COuZ7-Gg3e1eU|efS$18?idb9WyYm!|Z7ci;JhUdqgek|Vik-v>U`J?|NJ%~7rAKTlE zHL0N|simg0Mby_i&efO#Me}T9Z2+GDi(B}<6_mMN>Az{H9BE>(FS5;LjDcM@Jz;ZY zJSB$ptZQjg2HyDAeYVNGCOw`L30I5xyi7!UgVee92%c!${#29lfe)_qTj{&|{}V51q+JwL zABYQHG$Cc6%#JgC<--&`wS zzRHf~rXVq*XWAX~`GhHJFFwA#wteXii+7}R8y64Q#dX3}BY_!DxU=s#& zXZQ}p*Kq{*ti9!Ck4&H8MZxetp;`32g2xijVeUj1-6Y)#r9GVkN>kf&yIu>_yi@1( z+|KV@J#3!<0C~}?{xJ=r^3K&|R~oJ$BRlJc#*(b_;b-I;ATQqb*|!-V8I9fBc6y$y z!F+=7IsI^bP4%U^V*Q70O#^)mm&5u{%F~ceIDZ)**HbBv+Z`f=SH`?ft9aqW=8DR@ z9(`=$Q*pgn5fZ9fUS3_8nU7N^PRI24NQ}sA&Yd_M$DVmM?)=z?JU27`vfJXOw|-q5 zUs{W2=hk9ayhmd+`daIrcWjBGn3%AFH&%QgFB$6#-4mw6lSTaBsaVw^a1S2R|hiIuaDMgP_b6{QvH%lHxv&r>e0 z_Q_A&l%Fva{f%ZcMr2zg%bB^Ac=*7fxZ}#vrr&O3=pr+*X>Tc0y;`Q#G*78|}OKlxW&(knr!Du=6lclG@0FMUK^VJJRNbvPj7Y_Z5W`9pxbk7I+@}kY(7E5$F34q8dYu5y??TOI|k0x2Z-t zI&xvW>9N+s-e;{tW_C@(k4bd8G z4*TkFhfM?Ur_*(xM z-mK>guZGx0S>c^x!%sfcjtRhKp?joH60!wKozOpwk+mZa*zqm*2A(@m^#G?CU}vZY5S8w8 z+FoH$KzywaAj^~ZsSn&0y?Tzm7d@zXP=6Vc_%1W4asi@3&9mxj6~psErt73V7rsKG zE+9SiZGOyS)`jm+W7g)g%?GXFz(xL zY!yhoRlIP}1^4F;hhXIsAiXZ=dbdcFu9l0wnqPh|j<>)Q=+|j`*#70E6(1AY93F}T zk3A9J`Q6_ai_7z|W6!?$;qUwY_=SJ{lSm?to!W>a$4|uB*#$3PbEN0!WHWY*kHygw zXVqQw#nD5jk&--%-<&&EId*`J8-`hmB_$)j6w{1o32UGXuc=^L!apz=h< zIDiZ^>|4m6!RI)Cpr?ZQZ+*@-OdVHwAfry)b0OOkqJb`XB~)D6Pr1b|ssCW03(fon zn()}Xrz_tg?OR!l5#gOV{6ySvXlQEec*eni-!-#)ZA+P&)@ui;(^b6G5*yb{7&>;!veK*Z@3wA zM~_AS{CupQITj!L+YiOpe8+dk;061mm+!91F5$RaBFJ%7p2XbQ*pg3i`q=4MKYAv< z_@R%+-~Gj($K9u&4!oIBvB?yGhGQ|lb1Ejc?}}ylyBq8(9vO(MU--g!#hYIrx4z}g zE=t1x7ys)YfCES3;1}+UyFT@)c=Vx19c)7pQEF zSi1Ym@vi^&2l43LpO0a+Ki-mCRb8K0I2lVKwWM;7^bE#TFMV;m9FVNoK0M zLE+7x)!EY-Sp9VOn|Gc zmQQ$LhcU>03zEfqwHy3)a5(zEy#WB9j?E<++^}d6n+lVD_>B#Huk`gLed?joQNdmaO&MF}!WEXXEVQzg4b&hDqH1Oe9$@HX=;Tvnl3~uP ztq+9qobuYxIE_52ez}C!3(?SRr2~NPYu2XI_>)I9y!%?~^m(8gubWq%HM-PY%Ch>5 zt{XN0_*!Iy?9pMyf5=`mtRsv+imXf*d62B66A{bTCwcI!G!>|hJiP>&Wtj64eqp5z zu4;I+J?KEk4`S*S`{hRgbq*uBKqZWlKPUJwkC6Ow?{SXs0f)6|2;fg(T87l6j-C() z-A=hG6s`8(4s@qO=7rw*Ao_%-p1@P`?=S)x2Xy5Ndl{!2>OgYxWHj|gJl~N-bX|pW zzI+l29EEr6k?Iw>Id76Et$)lf$cO0CU*nc`nsUxMAXm$h2mF)*n|;y)g?S>MbS&%^ z8e|Q9)(^JIB%gXXbvr%k_7th-jcDqZli`C6rC(c(;YK4azu@9{>aqLcLx1r;b#u!x zJ~0xPTzFCJoSup^@!vcS|O7stTxWDHMkkLK96xaGE2##`U{j=1K^9WkzQHpTUtn4T9dqFoh>yxyXV zm#r`pa10QZzm!EoW#kT#frqXzJwGFQ&z@L|haPz}9)Ik~c;@LRW9IOYSX)?((+3X3 z@=5tpi_0+}1!m4?V|B%EUE_;NuwJa;Xn*Uizen5BFUMlTjUDm!ulxG=mhb$wxb@as zV`*;Qll2eWad*7!d)^|SrWyN&cL$>0HaH%8r>A2qCgQE%_zm&KAN;O3a^F*N&%F<- z?x{86m%*TnD(;6G>j$Qh#D8E$}iI_`NL&l%ScVCqsvY~YLRaKX480a7B zkEuP|WB0`ucutJDDpZU5ML(G<9gx9gk!Ncy%Ym;%`{naYOdPWTfQ_(&62yd<9}t~! zJ>UC@PsfM;?7ea26<5TrUAy9{7v317J9orvYdO|M0DsH*k&o~>K6&@voiQ&T_dor? ze~VxIfBs$Uo*Ieck`W7HYf959$0OGh^GM5Y6CLMSvZHNyQxa`PKs%7e78{@aRiE$NRgCRGa|5P1V_B=G>H(KEms>G^qF%GF6%`C~ljd;D}l z3JPJ&$>(VRNIN0p6IU_jsp@c!1Bjrkop6`Ci>_0m*YHHVg4}k61YVwI3O)VoQpg^e zVESL^hf+Jt^g!1&{dVxmctN_v!pt4j9-_^7ts5#2=krsFsQbJE{;WgE zuR<^53Ae|D`f>-n(gsnq)}6e0thYbPGN?^`h#k`kh zsl+n}LRhz7XVuYj#_L}BBR8O}XN8BXGx1iQ+ddWZcttO4Bg+PC<6vt<&wQ32JaylL z%N1WJf9l_O(O0yX7vODwPPo~*ZMzp}hZ=0+*ovh^)q(0|ad}B~qO#g{v5=ZC3PYcJ z)*J~>U6JhMPw>pI@_|oAann{&7aP_ce@<6+F3Uwf!by;9{9{zn7xOc7F)M#%dTcC4 z2DakVvroh)-uFK7b|zxy_PG4Dua3)ZdwG2MsY7vQy%9sxyJKW>TWl??t1sS+ZKBmX zb0VI;c>|WVs)b-{alac&{&L4PJ2NcT|7PCiu)cp z5P$QryW+|VUmU;x-`*YTD(}wi1JNAVjH|BL6+iy{Z;4x8dTX3Hbu><$K3dwD{4YL3 zB|+MET~8kxKd8>L(uOQsBqSWb@>a`2w#eIKE$JfdiLefiY`BaPb%Dsit#NHVB9ic~ z`UAM4N4CMbx-WMhCw-GnWqmU*OucT{R-o(fNZ62ci%lo^TEuY@gp7vtAp0D;C~PqF z%y;3(eEXMq*%pc)?j=e)F!LU-@^p-zfxd0AMGCGj~a{LGkQO0mYEy zPReXx5bfP;>OKoVr@JAC5+JP=LIW=<$pwNonVVr2;1NR70xkE;UmhKbU`J2+;apMHdQTu9r>vAuHll7&RhK(~4O7u3L#FLO9e$-up9>YgUaJFM;e2}k5J(2%-!hU!Q}#F( zIS|I-sr(a8W58h3Sj_~lYh!D zHw37DZNU5dy^X`i4#$7~y?4dG{F$GK$q6GM0T3^jhw4eOpQ?z>60bA-D+8w!d z%NVG2P~;=s0|>#$BLsL-l>)M?LuJNy362X(zPy8c9)@MX;aeXNBH_n6c(y`0|4}$AJA2gj`ufkWD^xmjbjTc zeb@td=9bAtWZ-!0pVBix^>Zl;JYw6r@xcR!IG!_au0Xj`wyJX6iyvs1Uu+I^Ymx#M zvlqrIJ#f495gP5fE#+n+)=-|?_=R}a1bVv-bkXr{P3NmlmGY`QSw=_ezRL;G@BH%S zb3)QYugE8DOB=pI`>T4WJ3n`CzyWHW*g@d@{OWV&mF3Ey4qn1}`L_W+DzAC(`XNuk z#Am%v+UL_f9}>SpoxnM#?N|{@oz%K<`4fk`-x-6OFsKjeC~^1h~In9d*iBo zSH;`D?c3w!uX?E*<(@coW;RyELt|*rZ#prsqdoWOT6c(ZtQkiP!l(Tq6`+!JJp ze+`LK>!U*TACiu+%C$aA{?d1EWK3Uca1s?t9mkV@U0Ld@$nKhrb+e zf7_cQRu^ODu}5R+=)ssA8Hgbk-AS*0n1K@Cq~&TKv1|YcU1MNaKl<8H{giJH3r}i0 z4cQOQq}0F_yBYGwsDJ!KWbXRK9#KwTRU+$S@%3uERGB`3U>%NEM zKmYTeh~c5BxZ!Kx9xr(9Yhp?GV{-UMw^rh=Pkua(Y0&f1H@_|RzvAVte{^hpb|E(A z7h-;PHWud>qgT%R+~HGk_a{FUfBh%FA9HK7_MUc*Oe*h&23^B3I5ruLas8&Z#r)cO zTzB(};`MKTYg};CjpnmcU)e@hC9AZ(l8JtNF3P)vOB<>-8y^GL5Idak_o1##_7H^nvAUmY8*wRrlz z$K!9`_dnv;lh4MeGH7a`wxWS&Yi=%>8*Ql#Y^r`Pc)=C%>hJj>4REhf{sS?#cZV_( z@AzOW0dA#n8of+{Q>3o6T-ytV1#11;*3q|CT>CFwJc(zX zee9i+s0_pv{juyydcSoIF6qk8`qRm_#6v}Ny7L8Bacsb3k;84UO;^?IHcq}OGyJij zMuc>s6hCQ@MtV#RHN8^%x#m@QbV9dT0)@gKsQdu2khWK|9z3K ziI8WBqY`~z%EAO5eWUG-bQJp3b%{@);;{&N79JCv&^Ab3_E#)h@>V-{$upmhADhpB zLwI(cf+7dkQ>9z-h7aV%I*7?Dod^6xsy^zN+ds=D%bgogQ;(rRvrPWFK=_ss#7h49 zqX*@&nF~k`Q+*qJP5#>R2H5I0-3&uN`pU$z7voF$_;uvef#-kjgz?Pyp&Qz1$_4;1 z4BnI>KB|1%@>iXf+cu!b!m-$rUD4^ZKY--e74oyJy6(>ha4EYz{#v&J^D25=Sxl>S z;qc10p!R=>pY@lJyb$iAIokzEn&9QNOD_q7A4us+ypxrM8Nf-%uf!9cWh4TePQIO! z7CJ@CMK0li)}CN4;O15&4?3C;)kxASypC;xN8^OG~(a84T)9oiw^FvVBM$zbIT z??!-0$qQ7418vgyl?sxF)Q)pImffYkoSyI5qw~}w7sUZ=GVoiaz4VC+lbSL`x*5&v zyLnz(jFI7H?ApE~9=`XBanENyA4ARl*uHH_b-w8n0cfY&_H9=;w-LK9zBpd# z^mxpzugL$LiT8i_1My$~>`!B!> zz_sI$ZlEQBEQ;*H>G^0aEX0`;C*$DbPscM)JsC46&%~+2N8;F#WA@kiq!gO~IW=oz zWhs0Vv!0n3+-S*1SKrUkv>egDYx{Pe2yoj=ZjF6A_QY@f>aWM&-Sgqt-?ux)`vQ69*PUc_Qh>C+!$~Fp&t^xR($sJ_s44aXZ>@>Nvt zmSdU6R0_+fS7QR}3X3+@hispG*L_R6)}+oQ?}mJ#speqZ|LM=f2jBIcxa^{fV|scz zuDSW97~8cw=2lv|uRcazstZ};4PZ|uK#Z@lF#UmYjpt8tps+15(T zOSZfOg>A>D!X2V{(02iCJumGK9q?7r@R}PI6=(ZNAkRsoLiWX!jX>y@KTwoFXkBT6EI+WSHku35@PIxPaM_YS+j@d3 zJ@_+^@Zpf;Qx7?o%yjU?+&*;|kY^=-J+G>a{{e$T{SIV7NZ*;L9 zOK+GHfd$H_=LE@8^a^^B5%SDum)~>~>a+2?>dSNltlRuloz0 zH`j{c^;WJSo;^Jqz07TipN9OtBah!3pL_RvA}-z+W9sv7e8U@}XaBx9Ji8Ff0~660 znT~}O<){93Lw?1WbYl15a15?3#>pohhy#z`AM;N>CKqBOhWGD`8*aNfZg|zLv1@oF z_D_w**`vo}_UN;*dvqgScB6c_l|wPoI2I?LzCU`_o{hnQ^%(4Hif{3xa;(Ybudc8` zaa?Vr-}9Zl((7Yq=Hjj|Jr$#k6z9zjLpv|cK=z;4HeN&LP)S+9S@>jO3J5smk(D2ZZH<&aUz49Za zgT-&RZc#3$&3sI=@L2P$`H`9VQn2}?S%^-Csm#*bObKLLg(KpdZ1@$_ys~{4x$;v& z>vJi;{TFOxN3=A?c$s)aivY`qPvf8BzQ5*)P@%K|p2Z|x#_>X=Jj7AB)a-KcO zDgGEd)IzYxOWv72uL!2bTc#L5aCFTh7}5aLYZaJiDH7-3uo5xVv+~>SEqq=3@9dkZF~$T+;z}J$J+TWUU0L%;!Rp%|9AC`0$9a_5yz! zQaS4;0mo}l)=9S+UwZhS`2GL|U+*QS|}G*d3mN1rwdIIbAR z3WPz8Z}fD7Z9tJlrD?ghCydmw=r0q{fSrwYUGrS=3a@40QypW#P7^aJ0&GcTf&3UK z!Gw?QbePhajda)*07H2#Q13{eG{{dzz35E#1@NKx$}0{%9h(w&ms6Igh&jVLy`X)%W!(ljd$er+lSNKUNdVrp{=?map(B{3A z$^4q(?sh@h#gEfFbN!@0diFq}Kz8~ize1nqB%SoTq3X#$1l{Fn^I4R^3l3TFem0ZzI6jrTdt&qc%}pLZ_;=EA^>M*U*SPsMfpP|kDVe7)_|y`aji-3B^(+X)l2<%=z#0#pymm%Qcv=$UCr zhNDc{ZbmFX;TiyLHNX=;^7DA>sHdC^L_0tpshcXV>s?p< zsy$$nCsEY#mg<|neMNHiWU)G_v3?CCN5*6B{)gg=fBHx9*T4DOF}J)C*X$aL%P+e$ zT7&&@Sh}-)-@bVH+ujgVD~gf`mQm7r2=^3 zz&f%L3(B|Zi{&Xf;9KLH;#Uogj%d)wH&R@ zR?Nyy_QptDf7|u(J^%b)#>6XcjaXHkG^ESa3vU7}&BR~)kH3z$|Je7+4b9pAMf|SK z0r>;HxjDFRQ~YymH3Jw4C`4=D^Pv%%Wh@yFQc#%0^ zKR}u0lK>fYjWrW!6WLfYU+84nEPEGS6!0&MBe`%RGR7}#)&8&M4J55|X(PwH7CxlM zDMRY41es_dOgamWde_*MI8}JcUU|xIDQIoTEXgNz0v@POCTQzqZ3jfp6~#0uWgtx< zMmpK6w#f9H8^|SR3x?EKaZ%@4rWrNJg$4LcJ>;zr#@=k2&N^l@zKAl=)sMQR-gwgn z-iS)w(^bDJM{nl0I>0BJu*DBJ@QI!P)SZiy<#zF4mGY66$fELf1Ay;MBl9{RI$`is z@=@7RZ>w(>5BfTTujdLmXPE1tL4sX+KxI_ zs!+3W8a?ET1wf`r{Yl;}F9G3||Mpe<*jMbjVl%4VXZx@7Hs}C)k~|s;BdRf><$br! zKI(w25$8i$bPp=LQAY~PO~Oj!A{5$|o9vbR z7FsLxH>}KzWn~+1tSApF5qjBX`*Tx&)@|JkpH@_pUX%57)SUI8iv$~#?KeV>C6|-mOVqsxHzR7wl zE-v~g*?|T%)Tg|Mq9LALKmBRb*~B_FIvgWId4zh(E;p^9Gfc9Nj55jJBR?sRvqwT4 z7>wVfV5iwoz0NA^^lDE$_TZCo-t1qeY`^e=m{nf>UO6XXNzXWei;bjw z8lkE2$jau5=iMH7{DJtfAOE2^xuSgJPo9{ck9mz{_+D_^_PYJiPo&;gp#AZ7X&o0K zU*h?!f#X#L@ZQV3CN@D02rRc95YT^~$wC3xeVS;m+F$nX*}ec!ZPH5i&Mv80=9MqH zju=o`bhRxZj60#CheksmQI9TqzSeTUr|DNH4=O{Jvp~U1xe%UxpPGwmHz7-)7LDBy za{U3-j#MnJth+LXAsm^2iruKT4f>g97abQ`*J^i*ULs7VQ~jka%lfRo81|3);Ywdn zH)U~>ifLDyrt!Py%_T?EP!{+pbxd3Ht2}u@9os1Rkbhm=r*DM5Su!8WUoyY~CLble z$hPZK&s9Fpy+0QSTMc#70Mao2^~o*g(69BLK2YUD zclrlEoqoxmQ1BTk8BSQ;z)Qd4=kh~tmYf<&>bGQsJg_4bhv-7P-5#uW@Ch#&2bh2P zaMUer1<3l(bdKk_KzG{q6&dN4IG-c|dKnLN{Hg?SDiB}&j6S7H-!t>_o$2F8rTj8& z(N{sG#h9>Poq=)e_aKw}bR2DidLs@UVg3nbX?re~^w#>eCi$40(vpUFrAH-m9V>aT z?ZQaY7G#Ii1x<`$3w#!FVNRx%t$9@ zd=iTau%>Yzu3WPwmVylzg}nha5c4RGe|4C57z4ZU}F4t?DE~O}a^-*eUBp%9Z)6 zdO|m0(95;4OwUF(*NOVq{G>+F0o4I7#Y|01#@=0fe8m6I(1_&OFL|v+YgPRf8z+<( zZ5U3-3m|>{r5u!>{<46dwE3U*iXZ)JmNPdl!W(@u>zvd}Yh^A52GvhChUN1tdM<{3 zd3J8_Th8axfN--W^Xr__uN0b(h2iyCPci$K&+LLoqZcKWwz2 zDjkW9HT54FlboGj^2bS-6Z+Eq564G7_V;n%^n4sTxDpF<3Lj|3RhJFN#L#m5>`#Ax z+3`iPG5>_$ zC1|oyb6_BveAY?+^swrQ4JfHkx?)JB8{Gh%py6i%9|#7<)aovHpF1%Biv<3<*g`#SxChJt{q`pZWY?asu3!S!)Oof$Swd+`Ct^D52RNK=hfdoVgo?xaZUe~ z;2)~TOz(l8jWROmN>dhUE0iYD28xx=r`l2p1E>Sn)p;^Wo;&F(*zr`H7lm|X;F0mz zf({M0H^pOz6ro>j&mB3q1EF0hxIM9q;RVkHsIn8_(11sKs$qh5d;y*aOBp3UJi{AT zcL>-4@lP}M!Zi~L&@Om2Od0W%;o`N@OZ2w9D^A@|QRQdb1E|hYZUv+t-SP!$ z-YFN~`kr*WQ7?oH6!^|zUA0QQWv=kbph3^*NvSVGI~~_HS0Yg}u6kwTtMCpEfGseH zPURbDcj|4L8TbGD1Po>_fHvR7zWOXd*9L&j zq=)W{o$G7_pgk)Nd)t-~={ma2J^h9!uSF}vE1v~Nd@DBXj_S^S0%;V+wKmSc8yDb~EBGQUa2Ii#ztmIolG&dkL} zKKa@B+-L8MHSxf4>fiSx-yi#S4aeeAZyY&3>oPQkhNGo4u6hAw@=>8{E%_CBQWA^Q z6k$Mq5EFtdf>uabmGTv+a`}qkt`Gg%{YJ?gXzR3~U2l?aiZ1(4k&&yWQ2mkb#8G7p zwbA~K^=NFY#@JxQ?BN6Prq{nBVtygkPCgrR#}6rPB}U{#+ZK5-wk5i%M-WZ``}fID*oft!?Q!{6e@(pj z?Oz+A0T+Hlu;Js-iQs$xOQ%l9+KGku^atJ-ANn7EEM7GDlfO4SI;wi@i5+`(`c3## zE7FzbSiJPBUmoxLp??}NEkC2OCqVK`8tD65_IjF`JMGDK2Eq8a)^l9}78B6lOrAc| z8gI_yJFVy99pP*WK-ZXH;LX6%k&$SqJ_Z{cPb>Xmz`eW~qob02Gj*e9sTK2&JRTqZ zyAQ`5pZUD%dp)kW{E~R}o4z`(yyChD`LT!Zdp6$pKmIhHe&CUq)Zl19zS#2oT=cSO zK`bt;wxT&Q9GBhj!g%XX{B(@(-w}gTlgd!_Dt~YB;1lui6OY7A-|=0_e<^y0CR8Tz zuK_&{JZ0qsCKltFyz@8Z&>_{Y>si2pzQ6s;CJ0W{V)1`f?Phs3dU!LDSyzp$duU55 zrN5)|Wy635H_++4agUC7|FYlLjb+MU2vtL!j1Mnd-QJ*;`e(c6IP@~}$kvtqC-`a4 z2$k*&op85PY#03uRi|guYx74SxN19kJb&UZaRlVg#7$K!%8bm~3!VvR4rv%*SK!$b zJpzW5Zdyf7ECgG($lvWZ{pc*G@Tng`J@T(@r{NnO0JViSpGwEQX-kijR>rZ&VLNVF za#f(2A(Au>@MEP>q28Mx>Y*evKDfH;#&6e0p;yzSJ;`vtq3_iJa3n`~sm9ysb!n$d zo?Q>ZuVvsq`^c=XjvXefo2PmuJx@%;6UOABqS5K(OAT<@1>{0 zO4`bpwo7=L=T60r+-D8=$fw1(6Mh4q&nB{1{Nk707;paO zZ;D3`KO2ue`Aq!92R{_=`M`UmIx3&~=KYsn5u@9;#fAGXh}&*|S^U6vetV2au4k0% z>5U%t(#6?X-OrzmnWZzaw7lSXFEQf9Zx*)rW-YdXpTCjHCd9s6++CNi#ZmLTfj`*H zHz%5avO%{cn((KvSK za6I$)Q}Ng%kH*PEN8pSczC!By64c3&Kq z?b#nAJtJ}DzKh~#{;&Vj`95^lm*lgbly5SXivaQ|koO~xKNe@swqn=59kJ))i(U44 zHbpRr%p6voyOgf1(eJCC!QpdXoHVxm!kykUu&}fkt88FYJfA%^DVLK(hFmT z#U-U9pZ>vy%90yMr46djru0HI<#T=ZjyvL8e&D-fe4-hPlGTFx^fOEI&XY3PW@|iV z{VeSVK=)}k?kgm}`V2n)L*gh-G4>p*EFhqQUfx`YX8zLl>kN3}FxDGT&nf9R3& zBdzn)wc0=XtAy$2{=SEe#^h7;=OZ7559tr#InSE;cp;iHnI`2IUPfh{=_t8F(g!gf zj3}P6QGc0NmNn<0z*BgkUF*2=Z+}%X5|N~*pBLao)AT4$m92H9{?H53C2x+0sAzHc zS_R4qT}xgy@4Qc8rwj=^k56CGt1N$o?t6m}Q_rYpW$XoZ5^L<`+Eb|8Xa;lF>zkry zk{?B9Edwg>%vurPeoy71oRkTjCrxdCCL?~(*=j$)ahT-hvMDahnBnbtI_1_*ulQU) zNqO|3OUlIjs2pyCWqe0DRYSfe-IR|;?uti;Sg%sq^q2B)ZlrIVGD;o8&*8+5%sb0l z+pp0Cr0LO*1#d4lGXKHm1O1qvs_h79t!}FS$xq8L??fN8vb5$aZ>MuzS6@qC>T;KU zRXwcoPZ~@=vnWWub%Q&bZ+v1vLGp6iL`3H$hIhU8&Op`XEPjsQ3 zWlw-M&(O!Wpsaj?%=QD>>MAVbCSM)?-EPoF_!BRz6RoO`T-jLii#hlx%%$){j_|lT zzbu^+f9Rs>>d3Q)Vo>zwW@e*x_H=yq1AivQH)Hh0FN_=C@cL*?PR7Bd;RLxcVPR)2F~JO&zL z!AnbPvLkE&n44dU|Nfr$$H)Kv$!H8moI1F`h~Wab%Z`-n~?nV<38ZfR~)Fn2^wCNk@X^2+{`Pg5e-gJd7d{Qnp z0W^JWx_z<%2;CunaG3u9nA-+iCPV6d_XdD$R?T49D1-86CsC<1z-e&abs?M$0DQ}ZoqsvmJQ#BtG%Tp$4v$WF=TK}k zVvWTE4el3LTB_)6apR3|im5%fs637j&6Bv zIezx%|6Tmzzx&s*V{B4*azr)Aq;A}9Y~Ztz*Lp;mH`Z57pG_cy;}E#fimyYb4#ltk z;%~;ceEYY?i*LFimSwnxG+3k&W8^*QZv90_q^0qd@}4iZ3hg4I>s(Z>ZrrZ;1kq0m zcP=E7T(K?Ps7}M6pxb z4!{5b+fm!#bD-_mC7-o(Xmf?J{xI4`xf4wB{V% z+VV9(BRi&OTI7(D>#sJ3ec{6Rvk2?v2VjU zanP-fPlZCGeHD&mSL+U5^1Dtru!NNID(}i~1=j%2#5tru$09V7igxQbmVQ6_5{LLC|8vcd7&e1d~^(YopQS#y66KP-7Mv;`qM6V;or|0=8oBT z=t@Ukc~yTPp?2tIeO2B&;ed40&!jE+wev04_JA_ebHIAEWMTUtC&12*>I(c8yHus%1M(%BO-^^|ksm!T-%*3Of&Q2|J0D9cYw^QB^b>LN z-n-&e-|)`(!$17hxOgn$aBDNZbk8$!^z@AS{=ryWY{{uU9fuDeje`e{#-oov69A613u(TpGAK!v8<~EP|Y1HjKLHZmR55 zUiXo$e69f>fHt}!|6+*Aedz{!40<&nizA2QPk-!3WA5Z}*}jeVx_|nEvFDxNowPZ5 zWNslg=U3w7q2tke@>G2J<9{C?`O`m%`%XXVZ~W6TMtetMYVR)H3`b*VBKE!Tl6c)a zzdf#g;~QYcq$;fGeo1w_qCp{k5DWNhdS&q1K;gy%68KvJewZ zHoVNnVk`$I0_-2(AK&rA-w`kT#&3$4-XAf4+6(dzJ$6qlt@Xss-~FwUhxk@qY_JJt zWK@+VUrmEmrZL#GiT(9Nk)D~@u-(AM>c7AB&tQo+|65YDWesU{-{VsOR#Xs$>7Ju& zy`hLr1JVQA7JLrW-+gr2Q0ox`1?Ayq@YqaH;v6riSNGRdZnPV4;15Fu*gj?d=gm;) zt^F6CrQ=F(yJdPMed#|ufOL4tzu3#_fBIXPjwejm%AW}{DYV0&kkN?`PVM6}ofS#< zCBH%o;NHCI%J?bcN;%8GxAI;4woX4<%3iLNqZ0;n_@M$i<(H^LhkB{HY$$fC;ux^` zlCkr4p;A_r=W{^%b55A~PyDQ=QZ9x01Ol}yfOyNi*iVazINMFpQ$3AUe=Qk$S7y~8$D=q1zOwyn(p&h=%cFYJc0WLChJ_DEL*+mCi&36KIyZ4hXUN&9<*ilJarC~_FXqH`dD>A z&qn&&oqEu%o@qZ;C3K5GU5O{XQrI$a>;BRbZ5r9nwt1_9;rO|}=n zWya4vhto-J4EC-MC={iItSjF;)t{`gtE!jm7>KuOze z&ock=T;$iW!^Mu3@+NJd7B0(xV@6Eo1AmSzd2uH_R+w?6W6o1$bUPqc{4;6iBhz(d zBc*xcXnevck8to$nPWninE&W+UMPsDt(|$Oj zbVz;t(C}DXbm3+3mUnzzOkQ?LEUhlYpS<_o@y~wwm(2&Qzh`1Hc3*TsyyRuCjF*4) zYvOy}{}O^84_!eS(AZ3;zb-Z6$0sFM(hUI1vE97(ba!2E2vU z;6$v!%@`T5C-2jgn!3tMGz};%DBlB5pN=OUemw5~(mnCWeP51e9(yWQ=NDs5{^{E4 zQmid5Mi0J0v|_MlI7SDDVr^Oe`Xv{~-ob70vKw!TAOHD(rGoUv+*6Ol{SQ19z4DXR zc)xzD6=zP*#lsIj5@*hw@!R|tUwL^HN^K-d**uGhwguuipn@wBtXIO0aL6wr# z?*02??-duQ0P^c)57&i*{II(Sl#RgnXVyFUDd<#_whjSas#5#dVl#|zS&Cv|tY2-5 zZvc--pSF*V#e;wU`FP)d`{UTYV>+hCr{l8guZ{gzUKKNZ0zh<`2WzP8tEw<~$P==1 zQ^)xDWIX)XgE1*SU;Bnv#Jrl#f+}=rb1hbdm-D^Sc}_BdIobt!OgqreHeJ^`#pk(R z-$*&!HdJfq0KyRDUj+-FUoo$x)hJ9qk|mKsqR@8uR{(fIt`)>xAwhd3qfD=+W$?Xz zSP9d|ynzNC{c=uM0cxW)jGv+1$LMN%qIO(pP*2FFuQxZq5J$hTPx7}svcMVd`-)%cv)z9pBZW!N9PVdm@~bi@ zoS*)hwtl_Y|L1}g`D8lZ2?<`PM*uwm%ojh6n_^w{jgAx^;m3F4(@jISCAEHvDSk0^ zmkZJwj}`d?{-h!C+_O+v`d2@ftC%tmc_cq_PtTnyBEa4V?JtwvXl(~YUk#~ z{;T387x?M#X{ZR$>TzFJ?M*)$$P~^u$5~f%S=6u8&s+DX99%b*hf1q5Zh8{{-x2qT zfs!+yoY_==%1MFbyST6vOLMt_0A5xZD>METKi+AiqfcQspy&`e%_g{X6b0_YLOIbVcvDijmAe>yhNls{q6&|B}^8 z(%tG?m2WkmF1YE%{|&WY)-7q41Nf4Qo$EUXN}2FNu*Id%P|NL@aCA%b zPR5ZZABv~`;twJoyC-6DD&pEp zH)F7OK4xZ5%7;7?6XQE$d}KP-B!k|8;h0}qj^pC#1AqOu@&1oI9b+OpFg)rxPW0qO zH(n6`- z$*lI-%2HvtX1VY`G(705@znpJck~H<<^&Sjj{H^DN#NC>c7jaW8vyD!gbkQ zQ`-|jd9$H_&n6%9j*S&QZG;zz+Pd3SzDfhyKD9$($kaS2Yo6ETV|L~Tl(XVeZndps zTdaKZ$sBAcCrxcipJjjU>iN}Qdd;0exCNQpd1E5fAOhmm#8r4U0boEO&aY3;6j&~Z z8F&8)j_}R8jST|{DkP8B_h$kG1M(u-B{d*=>~pMZfYig0Ch7zyCohi|z3f|*hBwd$HK3w%6@AICmv7|K za6~fs%Y2^;gs1$@g;Yd1O~?s^Hgv?6P|*xDomA)!Ae(L^b;wqrj$*rxb&q_rgH)h? zCE7|$d4Q}(I!@{&%ZB2TmH`!VTxl&wvwt`~`GpV0XFh&s{M^s~g7N4C>VTr|ZUbum zbZqD$o`bSfxH6Ugk|&NztF;n?L(TZ}|NidSK0Y3=d(+ow@Fs^@Vx-O~8%9c{Q++VG zC8M>GdSFr_!>?T~ve$h+m&T0YDe;Exdjq^={TfQUXzLfcN?fgW4+MZ(*EL~DR}oA4 zPW@L%{3@$7)X469#!7uEKX7Y(cIa{|zL9Uae?I8w59w%g=jFpQ^}#JDrCs|TXgkDN z-kd&w855DQu@|ApTCv;oweAV5yJOnxT zDtrhWsyGjt@DPk!2~d9>zME}*hyk3T)SIq}+L?eo z7mB`Ero>l1;Y&Ad{Xfr+S-+~%2x|H^euWMWG&}h`7i#{-YlF%!8D+Zxs=O=RLQixJ zWdlHk`sP{(I`!pxl5oz^rQ&EO4_)OefI|hnb9wAQ`wh5))15!huIsL}-L#BFJ0ROq zUf$p^d!-#A55VolI;bDUQHkgi;LjVsiXE~1bgf6aI)8;#{qsf&vMhYF;lO>qk}{A1 z*5uS+iwUoybxu4epMDnk#jiiRqds(*Z?Ub2k0#&o+VF+|fVW@hBUW4Lt9wS{m;S@g z#hxA8zW=<$Z(d(}i2XM+1v5e~VN0Astiku3!2?Dcdy-I6MF=vT?tL ztiVOamUq@Cvaac%ZJU9uDo!@?K!!{}D}kPwkkc*MLdKOWpgmY8b@iY(dBj{al`rMq zTt4d~&ZdTY zW9tn7szb`mLbQB6p8%k+-Ui>RS8l@R%{6(s3Kw1vOYKyJ^ahqyC)S(7KlfhI7uY6A zA8=?r|E}~beldu_(Z#v;F{^89(a`VuK=Pd$oQwgLWpek?{U?7UV*9jt z0nmUY{WjNfGvt=8{R|ky14pM0NV@hrRBztgs5;DJ(j-m|gea@+FI7)Jr#{Ua0sxyi zMRwNbKCVJ6`|RZ;p#E-xtFJ+hXzLiFojiFU5!7`=>GU?2#B1-9~FM zdRNzDVP+-<28ZSAF2svo`qFsyJH9RUTz+wE4R4R0`D5|O-S@=s-d%CwH@!VJIYD3t zqfhLC>PKZj4%X{3f$4fx9WzLBz0%fcAM``=%1nZC6 z9g{|0^wp2}V$X$<=}JFc!5asJ$7_MmAzkv6{k8oK0c{Q+s`?$!O1oamoi?LBSI9h* zf583|=~OoJy$(I>RnbFF$SQ9M2-}qW!!vZ5tji5x73|lEQD1q^LL~3fRRiS{I6w7& z9e%KV)KJREV~g^Je&VF=cR#~RM~1}n%XsqkgckO#rmuC#k1*s(y{P`oBmRW>EW8Rb ztpL6&3*^hS)Q8i-dzlcaGO7JLJQ>vgWjv{|_p*-+$W>SEG&Mf~$CY|gRbd;YpMbaa z!lvl1Fgduk={H+8dIzF+NW91fGY`sV_x!!hX44Bpbn`7;>E5y2gDftxo6W>}WpCE} zwg-!p9K+dK&Qsjh)%LKV$d6NN8tR*yve}q7#$7A(iyqf2H{q{)^8g!6RfE}2l`>Jc zeCEqzbpiE_NPSX{@}T{*F}p4OQg+%ZZz*H<*2}~P@*+J|nqzYCbK;%)K)(#G-ztCM zb1^^StN5c#^f;e(d&o;2V=uh?rK@iUIPb<;Cs`Pz@?Lm7wYCh4L_252jc zY)UyCFOD5v?FjZq@o189s(u?+SNHJ=PFKPWC_zWkisL@ZU_95i7$~|{(^neFNAW)d zJi`fK-%xnA8A@L#i2;u*i4(s2b?bFTb{0@7-`W4eSEf%%T8~Iy>NOV;fFeJhovzkT zCw=m%bgo0`iTF$0%(J%53bb@W+NWf}lRFIByljipXFhxryGQMU6Q8`mPp0zMi0P5Vp*IV zJ9RSN^EV%iKmN-s?F62P}&jYUs_ZjFMncwX(<-u4|9ZZty8Ls#S3KE zM|*M+`JV59{0r=jhryUah?3vTi63;`D*O2V~3ogAh zUj5?R;-~(%e;Q*uw#C+wGx5dGe1ScLv8ra*lCGUyn2)0;j>lt*f}ZNx~_P9Vcsy7 zj*2_v>R-`2CYBM2Z6n{F^+Jr~+Q;?+$$|FPzYz_E^~b%k@&Hn`d*jk zjO_b{bYX=J3929J3;zWjg&a7w9|ur>7eQb>P#HGM0%Y56scaRBi9Z*V9-7tuSEJxC zZWVk^GRTzx@~bD#6>}j-2GduNjVX)G&?}^q&qAMU-JW&NqIR}>An5=#3^`;udG3@e zdCi47H|&zsd=-00nz}IR^j&gspCWqj=LJO4xdP!S)7Em>593)}dybnzNcT)rKseCx zGo-7uQ#KosPCw&f>qH!1iKFAe!9GEm4FN8K^xn+>QJwOO+X5x~0Fb{s>2FXMdFG zD;+%ezKYNb(_GK-qroZH!Z;Unzxs{TKfs&7iIU}}Up27KK`3E0Gx3yd zLNV}(CNO?-IO$1I<&*N`x$*!I+a?Ftf2L41T`7n3fxN8CrknbgeTkxxpYZTF@lQNP zsdDz8eCfis{ORH$A)na*s~l8kRsL+!qkZb?ycI{8m7hu}%IeUj~nBoo55Uho#iPEC*v&I0WDc$oeIplt+fK&@LP? z#ax~H|7Lz$oyGp)=3opphaCIrW5)}wi6>5;jv49g=)_cP zHin|Lv=U28t(YF3i1Fc(XiC)=PtV)W8B(71E7bmn2j%PSorn|n{C)iW2i_H%bF;A} zUuNo(YvNTee`UP!HLr-hySGPk?PTm3-H5fNr}aB1-|=uvj><1<&Bff(X^poAgHMvL zOV>BV|NPvN^k5^l?c5s=Kk!)m;h+Ccj}Lv~f&7H;|E{;j8{cqiJn`g1vAMQv`L3-m zTkog~&%dgDBNu!YPAx6QO%kHLi2u@0p?{XKbSw2l^(UQoU19UAW5TnKL3>r*$(FK# zDeH|og?wp;qHi1HdnZp?<(aGVWB#3R+Ba9l)p`fGXF4){Q#JsgqfPn1X?E>`OJo)%9Ht#@=;76Ljlp1opZB;N4IA}D`o}JlZ|MX zlYX$EK->MS+68oocmBFMUjfgEA;-e$b2?qYlje9%ptiN%p5D89e)T_m&7JC`Zy}IG zG9$^PelDykn8uGlaNMaN;FkCQ{bS>7lWKhf#70N_jsjyN}*?9mt z(Z$Z%j=&_ua?}f9Ty1=q)WV8L+~9c9Mm+GnMZW3Ji3aRwX3(@L1!R{mJGA(&e^ZTn znFR;lt{51UBR&|{-SGO@vFA0GD--jcoJ5%@2b0;zTf9?BswpA)X8@5P;p(Xla81g^ zNW9J!zJdBoI-r}5Kxm1X`E}%!_1h++U>i&7k_ahszsd;kjTRgt59kU=V!}n?SQwIA zYXcIXzG*Oe)p=eeC_SoPe8c)*WpcTFZ)tKhd zmgGqb#UT?Nh1)iGCn&m- zWg!cAj@*=w9j5hS=+%^LSFk6xS(y&|K|0!@+okG{>xOt?lD4;RP1zDJJmrf{Vpl0E zvVs?~C45j>S zC=+y@_L}X4{PVfuSG_9F|A_YIyt~3pSMo!a?e@^#DACC$q1xs1!b|?hFyZ<0Z}Trc zyX9LQAnLHYe09J`N@&`C);!$jCA4W#eh;u}T<6}7i1ZOh5gkMy0Z%L_JC3*qmpFcD zPt&ei7ZrzI`rA^9$I(QENzdjQ?~vyE?g}F*aU0UN0Sz*E^N^QoDBluq0{1pzZgp7> ztm>CH(1k~Cr%uns{$1m-b7#Z_FH+sGn4`Ky=X`XnXnNqSbZ&Bzu^n0Hp>M@+ zkf%r?6wI?g@?`=YJkU32-;$SORS$fJm5ms@*(Rh24aO3fq!k}atI;}iBzli6#mPfQ zZRt#;F^HnpZSvcQ%T;cIFD-q@fx zbZw1^Ug>;e7(Y{bhIe0di_HO?g0aCuxzY{}HoOael+PC^{U9ea82V%7@X`3pr#}|& z`|yY3ktZLIcfRwT@%pcSL+szbTg4lX6OVm4?)=!_$KU?JpT+41pNwJoS_7-Rxi=Ii z*G>e@W>?QxeA~DGlepy7ua2#GHI&7*c<{hO(cHHuF8?Rr7d;~?H&ut+BpW$v(qGlv zmRh9y8I@V=)ArN#7JvBR;It$wCe;^LG}v8;KDFJg){^VDm(8>oBKkr${@@=<9qGdo zI(CtEsY`gzX-1HCAXYjM|lD~pSU7_!l-@-q6v}La{Sk8`v-z<}P*w9!~ML~S9 z=r_QbXz&Cm_5(hk!6j?R`C%jAv6QzC!aMzG2V8dQAG=zi)B{nTq#!-z7oQc9NAe=` z3MHQkMV|oLA^GRRcWww`5h&{@>0if*NBU4QFFu zs1a-O1Np}2TBA4m2Zv&6=kD0Cdspng=<*ny-X0UvJEMP6ws6$hbfBAcM71RQIyV>d zGpFMCk)v_ov4`XEp+o9BkHyTf6Van)IigNuT6s^X1I33N8ykrRGMC-$Qyt2Psef<8 zI;VJue-*P`@71qVj}~iu6IFdtW0=qA$sa}M^>aTbKa0MV2^;$fvg?fZs{e$P>n4D5 zalKRLt`hjfxX?cPixKlHG+dFeMBD4=INjenpe$4FP8J5K?00E{AfM5Pj@{LCL4jYQWQZLII< zn<+5ki~MpzOu@=K<&yPRH=uSlo_5K*3!v{RpuH*`<1E^d8}m5;&5nJv9v~kEG4 zz;QVKw+MSfb%{G4l22&zB)snWF8R}*Z9kj^`DOm(XK{e1aK2G&@v0tOW)=YSOnP{x z9S;o3pB+&D&HMwA@~iMk@p1cOIA-V0DqCz5vRIC>p^3Qfp1b4Vfv011d^nmDqu#VL zwtahCuzPR3?6ohCm%ZZ~}KK}5(|55zMOB z6^2Hf*6%6ZEG=!t<4+uknVFdwQ~TMyYe#ICKRh6RYYw-siB9#zskw+VGs|({nS;8{ ztBvIbpw*3KFC_4-d%$nP50AvKe2xL}$Qx6hKpCJ;bxR$IUSFf>iB>*cfSqGPT^Le# z$&5u8A@kkZ=9qkD`AN-YPc$UcVfiHu`HXBVm>6ybZrg}zc1-OI4djFXiq7W@9)0jk zeDY%-jAx#FCI&?B@dxjZ`@eK=jA?uz_dH&B)wS^(zw#?_)uob(e50jjXXDX(?voum z5%Y`7v7$C}@Y&OG?8F(@;U$;ui{1Nn#lq5jEUYY3J25mon)OSYQ=N^E@qPb!h4sep z$VjaBZ^W((_r%Wqdz8g`EF%P!lg+KzE%brSP55}}-?M2#@)1q8J+NVf@BPyr={nJ) z0nuh*YCv-BU2FLS08Yhx;;wt*gMakj<$E)MKN9;dy*#dY$%|u+%^3Al46=#&&FPh5 zXE$STXef5=-5!gJi}CR<+!5dUz3+^@+nO;eo3<`rU0#-t3r4QAAX)^Qx7HOvY0d|# zn6`{bq`7oMyV*Hb&w>hG4vSjb2D3kO_iCjF#g8N;vAuu))x@wV_p5f0n<_GO^s=PVm# z+UTlBUV`g&xu6Z6=qfsWVM|_oWIyS-G7yj>^(cF6+O~EAg{ACtTL`(FluYz;9#FxS zIv`sOzja;+Ow-SN?Q`il!wA4{=O*bwKTkQye$%)Xn#yBwWV(9qYk`VOKg1$f2D;$B z)+_3+Qd`dTRPvTB|<06uv7qs*NbY2a7(fP7sK zx^kkxy2kIwe^q__s>TiUQ!GT|uQTrQ2@2p#KcMA0D1IyDqd)KKYuNuD(pZD_65hD? zx8uEW!KhXt4Q)_^SY1{=%y}tq&xI&Il~iRORDB?glqvo_z&&6)f}Dz-%ac@yFY;F3 zsUAW+A~)u=bA0W%B0o;5V4W>#mhgUjHOk%R1$p{YJ_WS6u<8?p*vLGrvW%%LXO7Oq zkpqu=6Tt9LUwq+XABowA?vLIV-4Hu(dU;%a%PnzMJaW>~`e4JnoSHi$OVtz2f#DeJ z9gcoB7VvWEy6TH|A+yCMy_L1u*t2squD)v6b2JrimJ2oNUCN9Yb+;R%5}%)i^bKHcpU{#64oDdN_C!~P}d!G2HgyI)o|4~7j?)w z%>KuUB<#$%*o&ek9uEsA`(5U~%89Ds|44YCFPqxP7TSgxe12N>5j91eDNS zFZ`3X3wWkZ$g{?E%dGh2-KpfaNb;&(0#`i=*9HKs zKvKU+0A*kRSe3vaXMtT5y?y!if~cBs9VjATGGvhng<_zH!C*H=9&aU|)jK3*1Z3PC zZu(iU`rUXJ2x%*=E0(6|@}>f=mmPFDP-J=-g)9Peq4h+)lsB&O=oSP=yngCHoS*1B zo(P--cd$ipQa1A``s{87k4borP~7NIhXnJaS`*JQPz;vnuKN4OV|{ab-2PSHECX~g z=0Qj=~PHO%9mv?ATrWD^X2lk zlNJ2pxV0Tym6-aV5vClo^B}(*2yMplVoUnP8*42Mj@g;s^6_^~bt3GT$0b`hJ12Sf z#`MIF7#kat`mLA6edNz%nCcPdPX>k~N!ED-O!qPy_5E zap0)~@l~&WrB7^NcN*nlk$?&FJ|AhB9Swsy6sHg6M-QkbDIWCgEGY~?9}|kai{2oS z1o+`zR}`@r0KmNBFdDzsS4n4MEU+U1WVt%&OL!TOq*DV_{Ccf?RmZm)&`Q7R#d-NE zT{_|&dgim>TMnwroPZK=Zyc`BI5&Sfgv75C)aitD@=qWSe&|rjrO**CCf%l+xNfWY z;1Xn5R@5(PSaF`uM97l??#(ZA;Qs0H>C}hFgAJ z^#^nu_ZojbbZ91=k5l!h;+1&h)#gD^>$$7_IKJ9n=>@p$35e28WC48T&hR|#&pAF% z`%8M2-=bITc38uczWV|}&A-z=E4}CDZ`ze!8!xYRFa+(1lq#QsPd@GkWxq@Z{1*Zr1XOt$V41HH4~ry%-wPF^qDwu z=Cstm5o43v;(NaH+v2iIx0~OA#%3%nEy?j?@xCXP7y4pEeS_RH=by*O+ug8_q9$tE zYzk;C^Q^M0uEfIPNzqe(gOe^9^KCQuS!Gf{vgbH9cG@BobZel|=Lz90@y)~u^ejKM zN92W``B^`|*`TsknP-2R?a^S}PW`rRNHpVgAn%Fl4y8W50f1ZDyzDn+Cx0d!`_^00 zP}rdSs@AC!@#a_D6bmPwjlQL`v3B-U3~aWdS9Qosl0~!J2(WXsJQ-VwvyqvT!*hu*P-@gh4GY`6M26!Q~nITjieV&xcA5$nsQKb)cQ6p_GgIfTyBQ05MDe+pkhM zWwo!x!kSPSH_3d!nSN>?vRfQE$cE^a3AHrhR7F#1rw zM+puKg#BgX{+8M~Vbr_(O~&CWN4MWyRS~dWD6SLRsaNX3Cyby!=hRuooeTB3l)uKy zeq~$h#;3-LMQbyY*Q%#&o1y*->)+7}@d7Gs)g6TRmA<$2xm(W(_R^i&AFFkG$_+3; zr<@5A672?igWW^lXw#y{A~rT7eZ|6u0$eBRpU9{Blh}{sGwsf%?0-U*M?J4|u5V(| zL~LLCBxlkIKkK^G$cF3%nH$i5;hKJ4vpiWAAHytWtOt&RvD%)6-&N*HqXYP<*}f8x z0htwhhI|OEbQO~MWO=;-0Q$ATNOrtwMgPG#jzV_Q_2s5S;|$gt6f!Ldf_ zc55ki?b;D14?i7Wy6cNExqX}Z@%6a;y31lt{rwoH7JkFm#Mthg@$AWi@e9BBYjNij z_r&(gfz;;E;O$p2oA-P?BQ zw?kd;7Kb08T)ppt_^$8% z_IUBtSH+p*b1@=XU%K<&SUfdv+3=aKe)%oq+a|n0a&V+K_hZGRq-d*VNtvR z_?{J#_XJFnT?x=idpf_lb=na!a0q`~&Nfh3{1J!|4nCcJMyl;(d(NPGkFXL($osKs7 zf)7#Bz5Y`7#lP{J?}}$!U|P1>;#GcU(oOnxe!}wOLKyK%4>ji_o-3V-10W83FJ_W# zzm=T>rmsA#CK~!McM#4%y1XE@O=Ntcu5T2T6#+Q8RJ;j$c*e+B*&a_qbUgp*b z7O|QRpay+4PYPE%@zwq%45(cA_{^(_e|Ui>qYy7SIDMgs{PKqSgC2Y-{if(4ThjPK|6iqBJ*DK(iu3m&7Y@V)Uzy2bb(058k8(PAObkv>#tSaKDBk$$o8#ImcE{A%s>WB$yBv~#a72F4LJT!mJTJBe~L*L}78z=c>?IPSOpsk3$Y8eXThoU#r$ zp^~)$>;-jSWu5j-ZcY$KC|!FCWDUH!Z=vW&>T+Emh@ueJQ}!Y`lB1=SZ$LU{G8K zNz~tj5GqWPqE8KmW4?IC;AGmY3=i1*q+rHG7-F(lqX6XrO@_i8|EIso7qViC^e&{H zbe{%^AO-|PE0rn~aK#Iir<6z@`RG6bata+XhiT#rax) zK)nLGH(z4V#49`>ehoYS2h?i?90BE>3_HZ2TNIIcWY>2GV1jZZ57m@+z-9fD2wC@O z;AmOVz{RhXPB^WVB@LifVs(WDk){TpXEkU$6kojSOL6BNUyOUc_{BJI@Nhi(;Nv3I z7ptum@78|&(Z}P7#~zEv9(^>Pdh&@la`>>1v*TDgjx6n!&M{Fk!kZ_eJ37p_W+iSE z#d?>zH#|BX3$q#&Kh1G(+l+&LuFL9kYQXUDh)d`rgjH8L5e5I!1RF+`OGY4f9>OA; zY;9}*Jc*(*D_IFcURKh30}Lx2gAH}z5MFr( zH5YJ*lXHT2l+1RYCI(F*X$z#|DcW>QFYt?Y+>yqTcPu%N41RvJ%`8v&a7Lqz>iCr13rU|E@i#H)5H!y%FNi5~ds z2$&$2Yxxy$9aLKdE%1mpO2wm8{G&j6qOP1TkYR0H93&CRcxS46U&_@Pl%K%*Ph8C( zyHFCGbFIG$^}dsTmbJ7$|JBvEC9m@Ar_xqBjuJm;B0t@ida$Udz#8xHikkpF)aZ9{ zu6*Pe-w+lGInYlJFuPaMae~Ym3v{B38ltmUKQD}mFZz3*rlYuijxiP;==(T#iHiwG z7LYkLrgvZ{=9iaZSp$hP^NaDs6HmuSKlz1t_j~_3{^uXRE1tOLzBqF5*_fMOiY2wL zWjTlkpFJ5*%jVzr@PT;Zz_GaNfhXeAcRm=LSXq-j)PKw( zV(DewMB*4y*T4x_14HT;hI(UidPH%GR6FYH$4OTCR8IP~HQuO~+)~dNWGNvsTcyqD zmPr{~8Tus_dvr%1Mkq3=gC{$($W!H4wDn2)16|}}9ks(kHVB`ayzkUj_q{-TWtCdO z{xz6*l`qnxfxwmqI3w!smrtFH%XV*z9oxoZ`ONXyT+o1rw!E50|6|m8l^=@;cATv^ zY|KM1@NtBM5LMS~-r$5N9~;ZJ?!>d>#V2*^c!ro)Lb; zzLCk8+`T=9w@t@YFMnx#-!J`QT>njP*J!RBl`TO37Bzrk8Vz}{(BHGpptdhIRySg0 zdBtJt3d=2A?CbBpwIGCt_2+^ktV{7UO%i$DI$~7k~Ak55^X$P`SecEM@;b56iiP!7+HJt3Y|hw{_dAi-u0Yoh9m@d9;gT|_weS?DRJ0Bp&=tV+hZ?^PO)J6N!w!bHcL z37E0wSwC456hQx>sd(uJn-tuK1ByoX6(~IN(X;aMB0G7Oe;6hAyWA^W%qX$+mN4OZ zASdvI8cOH;8mBa*D`CFsA174cU-&aT3iHprmh^>oLcIzc22Sbo9M$PRsBg-QKEb#9 zFPB%pHW||L+{usjk^GWee{5{{)pW#@9}lIxrQhvQ|Chd1nLt9;=HFOJPuEWRtP6Az zbWY{652i5t)g-eVxg#&`^XK$#*w9jtfC*rS^WS_ZQu#Y6&*aaKF#X3+WLZ_$$jb5| z0w6r|$^m(ycOG=|CcdWa0QB2+>1f?sPq>ya<+CPzWU*!od#E2*Y+`>OZ_+RsCF{ai zMEGm)tbUbVLoFee`4+cu*P0sabL{iLMojJB7uUY(RdLgszBVp<)vdAPs>`E)LSrG0 zb!nHQQ=V*YwG0X;N@2C?9bciH%mvEFv1W|y-xC+ycwM~sjjxScue&+smKWmg2Of?G zAAUGa&zx5C+Ke5$_NYAKoe4kLI~MUE!<)q$TPx}!=r=Z^PgmX|qQKy?sfo}0G@5)S zNOlwdl!ZnmwwBwbp*y-qhs7^3$(<|!^{zNd*a7&@A8(1&jRWcHL$l~v7j)_~e;Lg^ z0z$T=Brl$cBn~LNnPv%c8bCTmy<5=wDgRVOAu?#tJxQR8pG3J#i+&Hh%BT4E1g&(z zdJbL1L6X$?>DVUYW(K;D2PA~Ygj5m~PR92zsik{zQ=qQCmB!7UA zjiXE?<1(UfjO{YlM1(*WJ?ImdxOop`xRdw~`RSH644?Y>L_guQlL%bqa_z*WZ2YYH zVgreF-r3`e5ten7*CJ2oV-qq)|9U-UT4TnTaggMXFX#SOhK>0V#$WwRyfaUr^b7Ji z7S;cb3^!xj=y)`x>jP@dfA^uk@%LcS{}}0uA@#4cyUimd&760;=emTDT)vu08_3!i~#4e~D$LBWU%lAJX2M-?e8&8*9 zd{JC_$vEr3YFj;V`ovP)efL9g&zB#L1J69|Z~1TVruk4KwvCU)^u%~vv~N#bvVT`x zb>aTF>Z09o{gu1ovI}=8d^#@LwLNxkn~0qgW3f;D`MwNK<#uq4o{5@6@`o)=4hVe1B0A25``t7!fPNnW< zyOkcvANGQzusJ6NLS`ho{ER={<6ZXN}V^ubXaI_lzLEHP(JX8DO6+Jg+=G8ZXED7=Y z<1$n%!t$DaJiy;JMg6!}`O;YDv(l)PB`$GHlRUXXvyPL=uLdc^LAt5^w$pMgd~9M@ zK~R`4&Le?G-ds-CfHWmczv?d3Ya!=_evexfDs|Cc^-H7^ad>bV>OnWm1Jkb%j<4e) zbFWA?RAM=?j-;V4XAI25y+)FDbJ#?~^M-VWH1N13f6M6=2XD~9qUOEmE8V)TY)(pd z7~h&-$Iq6>B!5EBg$}a!9H+}c8hkUx>hR5z_yEVhg0HxB=*pKzcdUElSALl@NW!^43V?!Zu zMegX8(@C#LWBni>-_s_z2S@h`CmwwJ`&LtUQaE%oKyk{8eD&lxIMNXAGLR4XL%Ya< zvOrV#lOa1Pu{J z*UUFbkUu;+uE+%53y*m%<#E2?!XNlpE?r%AaBazyjqt@Qx?`G>AN7`c1uLRKS)o_T z?1eXBAX~Evjzb7%p22~-hdyDtue@P5oSu8==^moghb|i|I4cb6w;{ULN&55Bt~sHF zOv%S>4mxTZvOfsit0d6T0c*`zl;YPTS$Pu6v|UIq~_; zr+!s7u4~~SJKDICTbA^Nd}_%0r3z;+D0Sm9=-F>=3y=6!<|ft{ccg!U?1*RHmpKFY zfzM5ivz;HlC;_5Y?HX$%taGgkf4Q|7D=p?a)TgQsTvOk=uCX}nj`23-MVe;T=_NtR zWSXMt4Lz_S{v*b{#?y^Z?TWUA|E#v;%`m!>(rNUw|A73dSL(FV_jA@E z<99b-{s@^6^5!o!<%-cAx|_Td#W6rfz7I9$jJQHxSK%{n+NW_eV@FN^7~o#M5@kUj z@P*uX;ZyZsa@JAtfj;V*YN9JRAQ-aFoK9MjdAblXHQ?xnz9ED1a~iIa8Fz4-Wvk^u z9t36nIrp7%a0UYFtf2Z>mT$SQ;QmQKsD00TOn%5-__S4ut*0YBzw%3Oxl@I`g(*nh zXlDnACvy5h!r-Y1Dlv`M0~r|v^28YU2r%BrmarK7kd8sE@u&nUMKpZ$nH0`%SNF7R z0N8nCeBOXl9CO1Ao|yb2zX5h0yKMS{W{P{1uU8H;M+dnh6))_7XAp|9Gj`1v!HY+V zg5f~W2&?WGO4A;wiH`s&RcPu>~h2Bi+BrH){|C4)I1 zGcy<_$x~^EhKFP3)J#17*wgXQ1NX(_Pd^op9eOl|`UhiSVKIh=Mq=mmUJZtuv48)C zapMg)=j6J$f|&K2Y7>jT{7LbZ(m$+;ojJ$L1}ZaFNTLl zef*j0QpWCm@A*sSpW|&`|IM+`nw1Xp`K>sB#eXEnt}x2WF;5h~kBuJ!@}|xybIJ?H z&QS;o0(oykRU-s+q{OSkOOP%(nl`7Kwqff|-vM;p)xohZ&bGV>X*!(iarNzYN(l+)i#GQ))*9~mp4 zEHy971D)zFnCPi)+w5?!_}p+&@LV5U74F~?ZoI^=Q13kfX27P9G&?@KVQDxFpgZp^ z7V{{3J|jc)s_qmsPh3e)r(#^f(mr`7vGZ0q(W(!}%eJ5%p44>NGjD7d4k3#LLQz24 zFD4CmJD($HXpfuLJB7=Rw--JUJG+a{qO zyePz+^X3rG^arFthtZX4mz|~jiP}e#hB*8Lxn%w(F>lXsY$)HEf)0zjiz}_(b%LO~k~Gy&7x{`bhqvCT|TkqgQ>) z+8Ud;G(Zs}95ufx=YCD?Za}_-PtagdQ3E8tjfzfDhMm(>F)=wBd-hJp4)qPw(}OWJ zJ+3~66AAM3M!eyeCuwZNlKMJ!yN)(TV!pNDPXaOdihr|8I};8k0?^cuFLQR3-vc@Z zOu8a7bhvK-ABr8S>8K)G@5EO+HlkG_B7gKkv0c{+JPWEWR&XDsILU(no`|X(eLk*! zY2j21s$A2fjW~7SK)n7HH$_u}owG-tjOOY>G_l>SMU@|0m3FJG>$+!WFTC=tf1iq^ z-^uIqABkoJoDIm{$hQ zSaXi_7uT*oPC{B*ZpHjU%NuFd+oyKK=(cTY4=XNZV?=FO^~`s1 zAAI1E`0$569J_aHkFR^vo8!XEE{w*?P|Tj4i8D{1iu*qQ_wo6U{#BfK_F$|oNDlK$ z-t~Uz_Pz14i>{7WTy<^ixOji?w%v)vGqEz?dbthKAIYX?azr{Q+eY1C z^NMbIvybYTUHWXx@0F}K@Zpx!UoEy`YgPFzuj;3^t@hKaK9JJ_Box()>VH#p<&F$_ zszOAUvP)79^8^VI<&6^FptbJ80_{ln?yt~G;aC4CCkqr0mczw2kAx_1M;S65;Bo=V zj{d=*xSYI`O;U8l1+4DvG)x{PV1L!b44D_D$^O9nK@Y_<#7;1M4A zweho@;?6n@bM6O(Ho;barje)ERPE*$T3`& zGoj?`ehx6xqRQzX_V#sB2`jGdv?d6`Lod)8jc%Yazk8y z+sh-Sk!6+>8pxmVP}XU$X<|R2$Hb&hu-j}Iua2AZ$~ZYU=~a51gw`+F4scot_69L4 zj89Nmw6qJ}Y+(ayYa>oS^k{ta!yk|-Pk5G^P$Nc)u6y^Yw&TnpYRh4DSO|5t9MFMe@t6noK>jysC zX#~|>a=0M%rq-pxs{XX~y2j;|=Pa8ajh{FaAoEJTD<3sOl}ACwMIb_QU)vu4m$p^g zU%P#z{1`*{xs!2T+GSfez4rJ^@$gaY(fM+LzWK!7nRd#ZH`t5=t%@qhH|8_FHz*`g z>AvX0qc;u+y4qi+PrPngfO-R$uv0Syy6QLOm{9qyZ4I5R_X(1_C`!AOj(hQif7+H$ zwx0y#Bz@9jj^o6M%bQ_+&6Ekhjg1vA6mLkrym2(3K6#A~&P!qGj~K6cvx{_ia&#m< z^q2o5rgl%6&IOlVsP@k1sQTiYe)tFD$g!jG+kgB=@$OH3Sb1*7KJ}>=UU6}}?k#VM zANq+O^5==>RUglCrHqM?|h%?7d$MM6* ztwy`|Y_~7Bb9y?)*_1FclBX_GU$POcl?D0b%aTc-%D`zHY(2_Ni1zK(fAYyC%A&&C zck^eVR8AHYptG)cPDk>hI{vR{@tt~Pzs_lz3R_re#ggbRo}G_X`Oc%mBQZHX5)%^> zF(ChXLw$K`aY?>CCoRdhkxlIP(Zd{pj4o{{4YG|mabh86&YX!uPaKG6AAKzD`1q$| ze)en}K6EnPaQkiXec$@_*ef4zWN=&DrSZevpZl`PAuW(VM&;iR%KsS}8uB=y#U~JC zqj>I9zWxkCZ+{GrkJ>I=boFJi`?6gzGcR9Cd@$~&yIPZdV_}150KKTgYx*B=G86%S z7GYpWb&t+TUVW*f1CpOV0U%;cwXcEomDoNs7Ka~rINtkT|4<`}-WVMnk6jn+k4vw6 zK{Pb>=~X}A(--{GO4bK`z`A&0eqeM$Ws=WGJ3MgYP`v0Bx5WO7b}C<$Q?(&f19e7A zsDB0S32nYT(v+pAxB=8p2G{o-26SLV+$ zcd%0LnG**nM#_SG!4V(&i=UF8kd>`~u<%Vz_95qvlg~QdOIcDD=KGR1AuLn1fihcG zDmOg3pH^M!KK(87D}1(b^4TDsd~sgrfUd=EcJj}w)9ar8gLpV0tnJrHKDodR4wy-s zr7)$|FZt~DSDj8iycc|HKiCHBZRVF8NKWQPZ4I4k^X`kTq9MkrpS;yCdN7JYfLdiG zdgthDm5*o}z|jT0$^0`Po{?qFyCqO9m2cz1gRamg>kP|hkdX>$j1L0W5&+)$P-#}M z=t{qubB0MjL3WY$rDsU{-f}JFt8!ui%lAdTRqj;|g)hf*+a>43Fa2b}ryeVGc!7@O zC;QgRRcY$@0F7cnG{+?nt%fr5slc`v`whsPWb(HdAF{lB4%G2ko6jQeVsmQysCJ;^ zKNS%D>Z_!zDG#ZO$Wk?kMNH!^8;p@Vf zpR4lP^x6ZShvJn_omVEF&6>nh8WtbP3vA3nZ_!o4SP+Ls#dlf2Exb`D&aco%4?LeD zIWafp$|qm!wRq=K+tBD!yP^zz@}npl_%65d&OB0|YCFO2>ldx^&aNaYeC3OsOL>*{ zQ?BSmhNph!f@?kln|uOXK>6Um`ia&j@e3|;w>5Kl*CkSEjWUH7?06qzO7;J+Iy^Mt zjfkrY!dY*{=KOp-^W+n;K7S^Tso#9$&d)|nPDGDpW^)Y<$4RPe`*;r&V ztx>)TFU?L355=&2oQ)MlY%q^AEFYrZ8%HtggPe}kB!50JdNziK7hLiEU$^Kayh*ORag!-J1}Qhh&xg4LX1_ zq+WC&b*AI9OVZLOFa8&NYL4^>Q(`Z2FMX_aLG=QbbRe5io(O>kyMBa{ZInnL>$&P_ zzVK7>5O9LK!R=DvipR3@VkP?PPf#@mVxXUsq?lXgrO6y~`x6|@A9MlP`eX{VztorV zv(5=nRxad)Jj!^i+Kf~}bW3yy&rsQhjAsrd<*XQNvbT&RIB(@epNW5+?I~q}?6TbC z)uq?^IZd%ERc>4}ZQ-4Et$V-VxsDHm{5S9F`H#Q&wmYSbw{Ro}Z|cz~Kx6FNICoUi@F3q-zyOF?VZeg{3FgnKK@ZKu)VV0geUan-C~y zzw$#>Tp%_hpa^QHipdVc1&8jkXrA2lplGaA1D`Zcg)FHuB^1;)Tz@N%nZiCrw_*8e&i$Zsn32o z9)J4rI6Zqt;l1(7m%lV#`-WG?H7~d^_V2wcrl%(2;)^epoJ*F^3$L4ps-J4|wOu8Zzb-zlCrV)bN`9-os13HWFj&(SgaVLo zo2mE}x+Y)~YW%r!fiC=a(}f-b7{dKVouTAk88j#@h6|Fku{I9~BL^hO8)-_u=kb|w z0y$_v(olLFzP2zjO&eFxeXj?hU=!fCpu?`qp#ZK%OB~}ngxln?LK{c?Vj#W1t!QjS zCob_iSK-S3aMzZB_@@=wKocZ;!wZh!TPCD2JtB!nIq}E_qP5MI?HzB#de@p13ttI~ z=MG{W`-Pm-E@4mY9Lfe@b2nCceGD~kO1XSEl^W=SM~-aLVj>lT4|q@rU&_lJjM|Ia zKQbUaJc@yo7y6!hnQtrN_#q?3$&LaZAW$AZ2`r~9|G7X#DL?6HdyxN?pz!L5?y#Wgl!$d7myy2wmLRAOzr+r;c_K}uQ=t`9z2Pz^{!-vogE%%#+lg} z??fFRpHyGUVlg{52V!<+Df)&+;^f(xSW^5+^(UYC>|ODmzxrUTJ^6UZyNhU!Mem*q zqJMf@Ozqqo7wx|^_UzjmyLRn~#^6w#KD!jBPo0TVGbiHA>9eu4)UpGPQ|ND~FE15m zORS1VAJYn-EF$uqaTci;mSbRQOx^xQZ0BvKol~)6+hp8w)6L57g4n%x%sXnSNy@ss zw&+RH*?ErS>QP^(^2jHky>SHlx}0xh#D)V@i<4~VQ{DC@GA2rS!-r>d$ow&IQTjUI zwl70syd)~Ls!yt4iF-cC`f?r9{)IQFGB#vS))r1hljpUi7*T!R{DO;P`Owp`bmCx4 zG~`grj`Yc1`Au!?o$W{00-nmr?s@w*a=bY*VT-<$I+dTbh3(=5fTq$Y%&R#4k{Z{N zzUq+mjhm{7RvP^%m;1U*!v-ealBFzODC!@SVU_QSc|u-t=GWC`H}NSK7Gmw>sW|l5 zp*a4)m*e+;{^w&^gFNbONB>kzUwW|ycVltgt8b65{e}Nsx+VWcdB}RoOIlSO@kZ)e z3`%F4ThwEIgWv1U%Fh;8`oo6Sy0ONCE%Pe2RsJPTL0Vt&?(C)IH4O;YeXK0?PZ8?? z8vR#qV0rjnG%3rVWSfI6@x@62%PqIFrUsjMaZJ=B!tv3un3O*?tTK#@3`cKM#5J(> zDHJNv=ITl;&o8KLoOK=(lOxfX7>%X1{P_BCR9qd#tzPxq~_e0l5; z?bg|dIoXYQ#qE0Wi(>qBFOAS(G`0<^zGOqi1Dk#@VH+ze(oc)b_1dep*eCs8S36vn z9oU>-j;*DI=;bV9nkWk_suwo2^=#&55I(t(dLnv7=V?c5v_X9J%>?5WTL$=6f3m;E zR;CR$V}i~`LOlylB-?E*!-@A(@?lyIXvc$}O}5|`oSfiF+iP6Y?oONf!F~O~cV3HK zuY4+uUF6AG0p&yg*X}>K78!NYDO31HYC=fbnXjSBqsXhLuO&OcU746E@hQKA4h{Op z>@RcjR0-Fm&z4`}P!7OjFRrA8$HHHZQwT5g-JeR5)eop`t&%IynaHTugbttenh7h< z6*^_8Wh;D&w^IFryg4^5_~jN5zD*gox^jw`LH3vWgZl;9hL&s#-|lCU+oD(e@|wn% zvcrv$al0|sf7L7Fy08BlC-1Iks(Gz$09aeGAHl*9e66UhaB|2>OLW28n3JB#M#{hE zGiYqiV(A}F@FANT<8xf%&@h|j=#AE+A;!IV%OHKCdZ-2rX`7Ff&+L~TiU0cRzZUm? z=B~J4*N(Vi@5Qli*Us2jSW`ZmF|2a1`42yWHo3mCZky@xmfACbeL^2R?!^A8f22>{ zpdV%9P8koktFi#)t+ib@9#a=qQ?3X~17Suxmhj1AzM!pmJ6D z>CiCgnyv(=T=4~xKXe;XKGgy{qUZM2a#Z>BpFWIde+rFYaxAz?O@N@|E7q_y1KgLm zE_ndmI#78x-C{QY!V^zv+xArLAsI<1_6GV@uF#?PY|EMewGkGzp+}fnS1hwpWphpa z?uhkxMS3ghegOg^r_P*=3-@0XAOE|*iL?PjQOQypPs}gg82Lueh@y+{QP{ZEUKrFzr~9aw3W4umgi8m*pw-G(5Bf`(3BrC zy>my542!SHspt_OlY1|W!SU%>kS%Jd-(Hn3Gt}%;9vd+?$L0gRW8W(urr)1*#O5q2 zZ;y*S8G*0dkWW4q6Y^IoDCd|zBU?8=7x#SO&UpV{{Z)M7b9cnX{Hn@262I^-ekOkC zpL|0!)fDdj_>=M8cfUVoj~|cAFS{~^CI(%9oNm@^4*IP|pO&ZkWgSGuDMnPD9ea1j zMK8EARyOBktCt;~pC*y5Wf4>D!G54*P{-G)kFpQwxPJY#k^W)PmagfC&&z^DgVm89 z*+=0wRGIvXO0G zVX%ckfC;uSGC>3zn;;vLlRzMWBqWprLK=-UInT{GoE-KE`<(MVzo%C3eeOv3egFFE z-hI~UR9#(FU0vO)SFiW}_V3sa92*>q(XFE4x`WX*G#VXj++Yn(IQ{t~*+g#uz{iw) zJl7iRmY>)aXD?mgOWN`3H{2&XDpyW&V^~7NdU(K6`V~5Fdt<%H zIDjaF+F0&w_ZnGB2F0r$ii$nWa*AkTBS_uJ%Z)&}#;`7M|K#VwX}hcGsQ{fJjY71+ z!=9{cz5s9}S7s$Y6(p0o;)VBRsWb%u9I;~_o~lth=Y%Cg}b3Y5{zG(QG2~M z(@+j*D{TfZ{7gJ*vhT@JPMqNZplv}J2b8E1UXLJ-4TbA+PRMKhp`TGdfF6|buc|-C zU#^87z}~Q~Wq^;cZx7aUUz$1S)bwgfTo(bD0@Lh$1`e*)*ran`{i7yCtor4Mw6^={uoS&^@Gm2}C+NgJW6C-r@UE)*b*i*S+F zNLT*i`#TOi*l+wP^xv`r6n|M86|8U+oc4Xp@9fu3tNJ#8jL74)5aBX^Sl3l~CU7twFYI30-T8?w%phrhGNwo|}pBQ|IIK z(W9|AH6D*W{H0hrb2Qqoy)O2>;MO>F@5|!S`fyyBU5I(vGhWh|otsom>(STU6@409 z2jv&=T(z#b%<_7##@Y`137zuIMm72A?pTe5+0_`AkJjBkFZw6qz^-j^52u4$Z>1B?BO+S;!d9L>t^KPB_R8z z%4gqAag5bjr*yvbNzr6^O0m5LhJUX5gyl;uzx2*;xPN_Z`F4b4Q%-2c=V;t)nvyvL9kOznCh8Lcrwbh;_`QLdmp_e30J8PAN5tBOnnDZ^e7! zFXxwZP5|Ia8GweS@siPz;t-EOTGwk*W-&?=YTK>KAd(NDp#`ju7*g<>)fC7Ds8SDL z<1E^3l;qGc$@PTX3)RAmgH8QJuZ#BfJVl_kkQ1L)DOzo*Gj6;6jWM$6c4ZPib^QEZ zq3UK6f@n}B`swO{s^Y(i1j@BQMIRlb81a7+QmLAdbSE_L-9|4UWZY#nR?1Tbaqf#e zxTm|&kyh(%F+P6LZ~M>A&U;b3x4$nIXJ+ETM;?w3{rv~y*y)p+#P!FPO=EH8fve&r z_r5&tz2~LT(@*C~{b|y*k=6=OUJsU%Q?-F3X=i~;9YJ(>0t!`xnbipR-Us*>7gdKk zlKM;3H8H0%bZSES$g|JHfBM<~9KAj5@nb*u*N&hh1k|p{ie!~Xu z43g{_V;bTsY;K68L{!(x=vmHyyy?jC*6gs{95lm*WKJ zn;VN%%_3jv!`yEym-oq!%kaT}3DYPfr%dAtoaVRPsyx<3|EJkuk?sH$=tWUoJALS?zsXxg zI&VUj1Co!;t6)REqPMgO>U^(X#6ug1E8)!7 zzzIFH5k7IXLjqLw1JKY~Ex_vDHk~-vnG=RmuT@HQpebB|)c+hih|6+-uA&2(B6g>X z?!*DTsiUT~Z8`O%p;t*m=HpUmt*%ww4Uqm9d$JM0O}!iWQ6_&0qDN^g+ZojZ%zdO1+Od{X0s@0Y}QJ7j-@^|{5KJrX_;BP-1Pe1!yOv%|E z(Kym3N4=+iK;uehwDk?ff$Oh}gE!w2W20N6UA*8y{)N-$Vz#y%b38Cq{fx_eAKg^& zAXWO?CA-@xxh-jo;NdmLxAE|PnYW9@{Q0>#(Xgm-rsmCkJTRP|oRZ4Pan}T4pszPJ zZ5@qkuDl|yz4ofO^5EX+>FJPDuQF<6p3uR=^ixVSPGeX6K7buR9{=Bnrx ze-=+Y7adEpG0-h~M7Kso_j~(D!o6_=z*;MhbYwr|$3?evLVnR&I~ycrbG*^Adhp*V z-mASl@Xw6^Oe%y7J(4U?i|i+&)qfS1T+;s|Q9r_>f4yZs+at%CZ`z0S`Ae9_Cpdfup;U#wZj z*yK?*4^VDJbs1F7rbO*1eJ(eEVAJJOtDX3{JjmnpjeKz25*Ny!GT)6Vb&59#|~+M2xnco){SElfEumAM1a`^vTl3sA&K??{o{N?0ZHUi`F> zN%xpQI)a1G@SY}=cm;$BIT6gb=2ZnO-@FCCZxx^nI-U_(UUa%@@@M^~^D>;NA5O2z z0Q6Dci@2;!KstU1I2Etj9rie-_L0BLL%^5zIqyUBmoh#vUX+Pxo`9nZ3j=BwX~dTW zpawnF6+bucB@5_SE#qI(-l)H-3m%c)7|S>K0Y>8m1bi07ldnN@UK{mR_}!Kb@cjJb z^WQ}7a1#er5GEcx6%8Ap*_I3grF_<$0HHu$zx0EGV=_}06ero#)tCJ(Z3SDq#9!G% z4WZs}-aFPGbL!lDYx1gFZ;U&>^^L{@xpbEW9x|%G(C2wm6*_r9w+ufe3Krwp{Cx4k zgpa+RRv%olJ(|+oZ9(G{n~>C>L_ZJLStwyM_ux>kZN})>kZs7&(16C+(eQV9Jm$}zloU*V*DK~FhsP*tYjvfp5SG-c>KRlHiNn6xtu z*ucYt;2l%p*H!o>&uhMGb=kRKcJ?(0v`CEJ#jkDoI9hj{k%^i-~`3l$?3EyntLwcBdh$1 z#$eAc*%T~bQ8i_o-}dj8e+(aZo4>Dr*mSfh+$mppWasYKvio3+ZrLuH2W3Mx`Gk?4 zYCVmI?+bG^+cP%v;*)gCN0A2QOE|KJtIEUo?Kv3|Ulf-T!0dD?Td&;9qsPw07e4o3 zeDMA6i?b)s#z1Rd+0GBHO4rHtUY^ z)7v{cqpPn+w5-P1woP%xwO2=LcW2D4PS^)SA8lKhUt5plk1E8x8nCV#V45W zIu_l-!x}Gmc&z>)+VMI4;9)UOli?+*zQF<6@*e5UL_B-^cznnAep`rjF;icaPpNUE z>Nl(<!ED9)7-aGm-aJ=(x=u66I7AcxJ#6~DMN*%!S%V%B6 zSeY`Q*q-#y6jlSWzh*jgV!N^|5v}5t+9Uq5UShw<2#Wmh?Wh+XfUvX=8F7PePJGUP zC%nGR`(%bUFqf zmpE`}?g^S8i~YH-$ZZJ`YlUy1Kk*BC@gmC^}qk;$$9y50npFm^&$} z?R5H|{)skh2KiwIWvMy;HtToZ#(nBx@*Mv7vH_A0(BpDSFZEfk3NPWSV71zi2j5Hw zel+(&jM^0(w9S03bkTXp?)Ejo`75GaCw515B2RpU9u`w)|a3;xM_2V{9~CroE}3jk1mUD4=#JAK_B2 zl+Agn<_Ky*Tbt(G)`P6WFFpyq-_+--3L0gtx49le9&Z9z>v3H5XiboFkm96sakzq} zz9(&*AO^iqm9Mu+UxlZr-Kf4i5^iiH$4~YD=X37H6y-E#TGV$ZtjguLxZtJ^RKmq z^*HtHnfUxCJ{C{j|D|Y~KOd_{o{r(Yy)ii45&!zF-yAQxE^VHK>uR;&hCRl8nkN!yZ#s1}d9G<7H z@nVa^RvaFpBOiD#i>rHY9uRK-s<<+zZ(Ny2P%m<6&G|e1p0M>=I@Xx)RA8DKI-hcc zUN!&#g!35&&zG5UN(1rR+VlA`dceu8e14XfmFaI~13<2|cw}w#hXP5He$uB(r82){ zi_Qhlf-}-|<@lK865vPPxxLa~<^<%WJ!M07rlsv6J8NI$qfHGO^ebTAN*X+mG#{$6 z@c*)WBAU7&mGPSApZ3?2|6gkP#sB=q`z7q#{f4Im$v1WCa%jm2B91(Q2#)wSB0mIG zapleUWi#8y;UWOFHU*RdR5e*nxJUyhJ2O}mrXKdV;s_%&gv6WwFs!{J3Fk)|n0${C zB49P1-9c?qAZW~VC6I28yHMjwl${fl5PA%3HmKSKAWtiwn9m!4UT3?!H}d$KBtq@;j7ILY6|CCn&b*1yrc)AOV>n70GGO zhoYTMui96Nl_oV{WBJ010;H8VIv`GChEPh^FJ7W38;|$KP!?Vx>-zd~tSzIEZcBNz}b+rf}qlm49Dd;F@cU_9A@}vb3y^a@O6>b{3!sqI0 z-7Apy#Q-M2A8^&0VSqS+`+@+zG<7S1aOszJfT7FvrSWk8(QjkfBKHQaqKh`*f`(iw zI;!i}!jV8-3iG2bswc1Rov+{**tj*B=vx;Bultqws66Co(x)r&CETDV za4^uMF*e>R1wG+D@oi{ZOQ$Ex7=8~Rit~V2;UzbCsKF0QotE{oxPU#e0as49YlX8| z)UT8YyXdB?{8D`>_=YqOikyE5woHRL)c9eMXkoXZk5UzQ!GZU# zR6h^27!W*QDi+)8JW$Ferb?c)iN%~&)2?=?>CU6RO%ev@wAY%hF(`` z&l~73!^i#eKt0%397|7z3V;`y+0mzRrEb@0sBDMw`C)-U*HV82m*`ZrlqafLz6qqK z4j`P$fuUJU62R?tKEGXx`x<=m1cv(HhZPc?a>54wfxo)6^ZtXG8D0;oLA zHYOOS`6)u7(*y(1+ss?q%N2PSX!et;yydUoYC2ae$dFDGu)jR~28ZKv0CyjEpHduo zynrA*?&29lPacmy{?qrx=Rf=rA-xpQyETHv%`H1(VDt7k zbj>w!-%DN?L%a4wXIF2Wzj!fDp19<LuMn*tFkZxcRo8XjRuccOiW0&HUV2%+J#EHYPc;?6<5ic1sGdxL9+T zzK8$Y(b*%t?r>Zu8!g1Y9^u5McPpMr5B%am88YI8jW$)%AwQeXKCsZuZt08aJJ8qN z*B#q;Y>obrK_9U{C*E~+s_s?!d6&+|XFl?oXd51l@A|&)j`Iqml=R^hrB9x^7=Qbx ze;R-Fd%qv!lM}I7G~nI7YTNbkZ@%$OF)Z0kOwNg~OL1yqKDrL*9TVZXwp@=H+^5>{?Wtb!}PUBm{-UFMPi; z=F#?|FJ2TzXB2Gcmg>rT2@BgQp6BGtP0K8#6F>*hLlUca^rG0w#6x|`Uge=b5YLgy z#z)^Hm*V_uyn&?t1TG2)aL$1;&WoGzK*)9h(Cis8u%*M*bdWTCC`cI z<@SRx_;k-gnrOLf44@4PmpI#@@&>Z$w48BY(2KX8l0MZGut(e9M}L04-hW*UH?zc_a>>JsbOOy)It=gKrCWssd{MtZV|G z&uQ;cpW##+^_80Z#c7RMj8(^vos37Gd_2ybI~NzuT=G~(JddclRc3KX^;R3s{8kuD zR~ic_$j0N3;`{r1qqnas_U+yqJGO6&D`Y$O?zuXyzT#kL_|54W&BX=d;e3zmKK-Dh zVvbbhQ-ATk_{E?6xi~O37%#cw_UNst0ju>G>g6SfpNS7P>ZCPc1+$dekq0%qUFRJ`N*(g@zT3}8<9xRU1oH29-CSYW`L9t5F~68bccGsKQ)rzgWlM zTKS*UmwB^~`+<&kpU_@oXg1XmB~Z=(@FG;+dBmeU&Q_uoK7cYMj@oSRL<=VcP;ar9 zqQch##8Z}sMpY3(^2&4;`za{s>6Sp^RUWsybE~cLk<@Qkbc7Cn%14B^b$9xxS2pP_X#D%|2R|J9d6U0qId*K` z8sG8O?~OCF)A9R%_?~$8XaCkU?AdZK_TO-Q{P2&yJ-+T$_d4#9s5md9P?NuN_{5p` z`~#18Q!Hb9)u+4V1C6QAZreN-yS8lhXAg1BSdLnm61~%BHLg#q z9ZOM{pL0TX;?tl0Y&`VP!|~{YkH+`{8v?Kr>pt8=Bkc2}10+A20GE)r+N{i7Qg4Qv z-rnx$_8VAjn&0B5wZ+PMXVg}^V&}d+al`F*#*NqB6l2@AcoXl=9owQ){lB-5^+EYj z>JHQMiz=tNgZda(p7OJyvQ3TZke`FE1m6dS+G9Xvcr**$5`o&X9-sU47vsVu?iIgJeEl@m^))rTiZAR0G*Sb zuofV`c>`9vuBx{qmZE!kB}V$YW8%3J@t)uKtyo@IjQ*aX*vjc+SL}*j`C|i{H;E^$ zKJ5y6IvGP`&sP>#z2O+&U})2D^z`+{qI{)?o_aj)d(}(g+AFrjtn^Yv-G9+~xu~`~ zZSp6Hfpm14VZrMf^ck+Q53JwPDd+>uzR?87yAP$Fc;3#HyvFC~1~_X4zJFdGE&0M4@24zY-ff6#@QU#z?Zb}d7Ra@JOO|gvl1jQ0V`x=hi?v3ujuf|0`Y3U?Vapr zrt3$34SqZtDE73FU@nS@u1oje!e*?oAg`)zk5nhNzW8Od!~STYn>x@Fst29oJuiVj zGK^)8&X0Pi-(x!UvN1vVQWjhlMt;1Q29Au*`jrpdZnGZVO0S@an|9~e zWnMYXfAy>As>UWhsm9f1ML(Q!&JtJM`wA-C>kOMlITi3b3BAv&Dq|{;pKw&9em+vZD&wXKNJ_|hXvgJH zWvqA#SjSd1u2x7&e8J(n5=Z+tl*{@dvRXf@@D_ipK_7JxPuW7OWmCa=_P-{53AVG! zpZynH&2*o(WI2FGY1E~>Sx4cofINjS4gVqSLzY)!>wMDdIt0}Q0OX5ND&?(7mCmPD z=}PsUKTtR82g;&9bQ|(fS^u+Y}3o&)}LOgx{7vt&Ad?sRaG-CVKc)_b)6<6PNdpvsbVjP}a zjjn;gXp^tP=g*c`mV{q^18Xt$08j4JEiH;Y8>1>7JGM&$_MOHnnbX?9uI!zbbR*1pNJfn9>HyNFXZwn+r0qP9&Ont`+09Z8pifbU>6OkKcf+0}uYHFM zs&MD4!gDY<-I(#QstKSvH92^Lt4)=yt#`+vtM7_!dtRyReZjGX zHb^Y|vPiz3yVU$yh5x^A04VXqaW4ZUo-k2GW;_l6DQG=`(4>kZ7ilYD9{P`8ycDNT zpYf!tyK_M1vpqiek@v^n{QU>nUm3f`w!{l=yDh%?o4zCV?Aawumjg}ZC|2I8Y}G^u zx$`h6Asb=?&$WsvVltmdLk{pk31yQ)*;!M5;Dvtl5tXM77KU5JchY>W~n=9~_8f z$@|;h^6hct$s_Sc|LYH8&!L?$H9jSKzr}gkxnoU)5U~#K@s_KS{43B9V_Vaa5UC01 z%LcGG8#^6~T&k-(cZo|#161NFnKl++s&LimmEgZky-FxydWwMsARDO42vtU%y~Ri-Js7kS2EdJR2ls|{kcWDzm;%Z7G>Yzb3VVT#zFDEYYyq#r zv60sDqn^x{dCNB{H`18-N`0b_fvDmK8#PERxB!b&4h)k^CUXTh&Y#OE17V*y;^i>!*`^Ip@h%xfe@LA$D9!mB1L6M- zXj>((;$*;^Qk~IOfl_CwM^Nb6NNWR_zyMYID3`L39j>B^Y%UL{4$%KnCK+t@zjFO* zfFAcD2iU>Pz2by;T@&>L=r1^$ZOOXGkM02Kf8t4FL&VqE08r?+E};aZ6+Ugq2NYf< zG}FAeAUx=X>z0Brjzhl}j@(a$AKg$s#%Akr)qhj38MD&~Bb|jg9!BtnVP{us%+Aze ze6|*6&YX+){K;R$7ax4kM-^}1w>P?n`l4rGAbNXyWAom9as90~$Nno1#YkUg96ot2 z&S^~Ijh*Gywip~4j!h$Dv1ybA(XQz0jp(PzYWuRx<;--=$9(djw7sLU5n{V()L4dp z!}pO_4f)&TXfDZ_nx9*Yxw(aynw*KV7cRzyOXp*LY9Z=Ni*duX z`{OmQcxmk3w>?&B8i6$4aLh8_TW|MHmOvm8U56D&YwIyd((@Q%(?>63-il<_|L<(I-3ZU51r>^Ar@shp4AHm>N8!foiSHi z@W}w=U#QK*%*>1*Dsc4f?Cgy62cs-IY9B;px;TBRFS>hs{5ht9f&Lii>$ktgk-3Aq zr*C!lcSmo}fF_tdxk*g*!85LX-93_nbU`xU$lsay8S4)laEE#aqOYgh$Nthmn6!6} z48&jl`TJseW+MK@+ke=f2snJ?8E+^XXzLHoTzc%wkH&lc=uhJCS00I_$=T>vpV}k( ze(1ZuDRvBPjVZ~ZMeUtZ`zK>64&8fq?7ZW)Xxp|qqPJUZlm4<%q|$Xh0l=p^S{7Gg zb#Xpc=WE^w&?4Tf&q+W^Z$U5BXItb`x_?CV#<2AKYyC(|6=Snq$_v0|vFXMNMYyON zvaRBP^2U>dkv+@z2^;GvVJ|v3klI21ywZQF{MDG4bmd$NzalpUSP^AwrPq#^FKQZ+ zK#wO?`K-5$w^@I=8o%Vr8~fD9n-74S2H#7&Fo;Cymg>=!W?fv74FNU)aFh0)c^7uJ zyiw<$@g@z>nel3Yt{mYBKID-Z@>&*LfsN@|w^?R9l5z3qNL-_g$Ui0sQ?pHxX>1Nb%eb!pCc`tY;y zu7CeCF{JtC6@!B@ESyVIv$9(}OqJbT7FVQiJ)AHgHC$NKJW;qj-br1d#Sb=grIzOd z@lEYX@LV4L3ooGp&5aq9OIaCEd_hA_Ra#43`5Sl?uI3Tz)u#UBlR#zYk`!~C@nv1^ z-;9|?S(UH)TSM=QepP_ZZ45W`+#C3T3b*sAA69x-9$*>3S&jLHzOn&;Fi`Z{cEIB+ zi*Q+{Vu9&k!%W(oQa>jEpooA zmL5NWjht9}-ux$wuRA86^!&v6c;?8l*s^JJ3~z0Z>+ZZWMt6?JZ~xw(#h-ui6LIzK z>!M?5ByNAft?`ck{Nr(G_cm+nGh(lOKRx>6dZfL^|GV!w0Mw zH|DmgHXL(5jKg4T~l7mK~eBnd*`{fe>Q!}gazCZiR_`rML7rk})HnXeo=C6NU zy!HFORldx0oH%?c{_)eFjoQLWY?6HDXXc}`yWbDg+0=UI>MLVp*B03%KGP!qt)u$H ziqu6KDn2p4QO)?pim_{(s%2>CwUp2sK{fs{we`5zh7ZjKC&g=i!3h9t0O*pehNM%| zM^8030CWw+*1fx9+y33rDLXv0S-z8O>zeq3zVi?S8!3}+Uvo(~MmLQ`pL|5tcAq)+ zTk#G5 za;LShyOjq9n#-(+2-->xUq zX@jv;%xlI8_y)ISmRLc@z4M?8LT33?_!>a_l^6d#A@#1g9;h-9QlQd{QcojXZNPD3 zHbN!8Qhy1X7UJOtI12xaPrp*mwBVO3pLM!vAB>N9L&~q=2X3T!19$@t{G%U|r}7tD z;GaR`f$$c;qR?9CWzN45v$Nz2mV&pGuC-o0a`PU*Eh&t z={u0)P~ri6SMk49V`izxd=fVM1G;sc%4bXr1^~@B-&T~fF1~0EC7*^l%i1z;>$k_U z=3cG!mKc|hb@G|R@yr)KA9K$hi;yp}>6RPf&R4!7TIHWTcItdguE_uC?(?ZZ_(N?P zyX%^-wD4(PQQFz2d7YZXrtEe3RLi1=6U=(~WTR~J+_Zd{ww35@Uyp&|`Ul5_sM_4$$xAoeHh%}QCVHcUBW4!#L4)|Cik)dpxA?Q@<;ZArv3nwxBVAzg+5-u z#>cW+i{EUVME>o#fYKW_faCo|mTnVwG4YA`EWuWgQvzNj$ppOZuog+ns$N z>2&_8zclizoht1(NQN({VDg8W!Kb!~&zXN?*}N~%i1XhKX|`Fqs0FkK`dPCEAP1XE zFy$!!STcsD#{7s406+7SufAUyZeL$55B$?HDQ~>fQE~DC8q6Xf40hNL=&1q1t2fms zqaTiNDpL}`LIOKpNrMOk=v_B5z!0CuGB~?B5<95HSXJW_0aPa6)KB4E>TU&r?`jzY0nw zr&jt_MP#{mo*pDH-gs_^v4e+{RT3NPRs6~ z@RA8MBwI;Sj}7juCEuW}fw^lBh{J~TRify$9+K+%Uv%H-#lI+Z~Gw-18RmjiBNTG8da z=%dhpDeEJx;BB@majX7fh*XCc0o<@ONF|``+*VQ+%TO8fPaf%plzHO8IsV0n+ zx8+h7qu;XokUk?jgPmg$YI$+ReZ5n5nt>l)t|(vOv#D18>>t=FcF}=DHW7KSAgrtF zQ-8t+@TnHUU*-4;x54pMdkmF*a~qZCpV+}`fZqjA#d9mB;d%8HjyLi*@xfnk73ZNz zi|*xc7?b6rHWFvr;DvtGUWqeBdDdVvDr1B04;7fyd8=&HsET(RoF|NPyy4ZK^HGVT_W_&u%oj((orY56rU?j#iZ}Bmr+eUfg zNbON`mTIjrKf4~Yb9@R$zJhE%eF(o|a%w)NrpCnO7Y8{Dq~MTbPffWlc7gl5afZC>tXf>v)r>&2lbsZT36MUiXY&Wdnfwl%QNwHv~mo zXac$`1Bg$Df5#4F)Th2Q6J2U@*VqaOVa9=?XraAO6djag4L#);T4BHsj~1J;=hk ztVX6ya&-oq3*EbFd`anROPlCXq{|^E=~`WQS~RI^7w=m&nLF~t;W+liFUIfu#81YI zCP}w#zcL1PZI7{Q_r>@A;ya^d%TD7*{b65Cz8t=Y`7Ar>2j6U5aE9bRx{bcnE4^>; z>WtdTa!5R0D4(0D`2oQE^t^5E^xUlDXBQWQLjHv0J8^D2<|e13Hd~AFi|6Cq#j{?3 zK6CMOEH2ezPXFX?G%Spv$hClI*$9YuprbFgY~L1#4qoB@uvzyzcJGNR5A2Vv+jqs* z&6{Iza6o>Hd{xPYqrlk&$pfF|rNtPZI3E{Joru|)=@`^Rb?2VlesHlkzYvqN&sOK>VqM{N^=)2R^JWj6Gw~Q7)eo;K+fDz#&)~BfY1{D;B`EY4zi=ZH zW$YYobf7Hn#ZX;=Mjvze5`Vd1sLy^>8c!w5uA@Kny(-=1)}&V(=tQUTfqz1&Gi|S~ zO0%w4c}ltv<-P5B-WNK7;)`W|^&MS6S^Nh+R{?#^5hL>h&9dOD#_tm58@3hBjLa)( z_6-eHS_Q?|bpMqP0W|qk>bkrQSw|BZ;i~?IKZh--=qY)gSJc3Kj%Ga{uTos+#`wH1 z;f?-_;;IG6#<)%UOB*b!QVt&Ir*wAW3rX@{>DUa$!A{v(#=<4hu+q~Ob27@UeXa49 zcl|<>y-PeSTa2#0j#%LHCT%^@Cl*d=Y$EkCtVrpVKmNkcC z1JSf>&3tXarvxwtGFN3o7d{{3a&7=mzWLZ|HmPYW#E$tvpKLh~5;)GAFc0Yadxmu{ zTie+Yx7>U~+;-EAamQ`9#*V#PHD)C6;Gc1dO$J@)nCyMX4u0<&-W;{#&&3;G@#+{- zZa%x#BOjLsaf=H}G1%X4-thK6dPzChz_`pO3ZcQW@WirWPzCTFm7*>7pOBYTsVPS? zSpO?|xj+HS4V=iCu}SxG^f#!5!J96DLIU-;;-dRaC_2^9X)YVy0E5mlM?jy*Pn%1* zjo>=0gR*(0GaGU~*La|XUN-uKZa3gc(l_|2hKd&|X&YA2s@(cPAM}HVIEAYX2&MmN zv^49mQPh?7(KgCtc_90K#!*Mkk1~-kzY+%Q4|w4ve2X>fzPzqNW1&lTk#RT=FSIez`zbHsg_fVPVm6J^g(Ow?s|- zmJ@YOPhX0m;ept-cTc?Xn_d&ooj4Ic^3y*R&(x==Hm<$)1@WE#{GY{7{P5dc732RY z{SKcTk7H*p#p!eBQ#lkLj&FqvN$gr zDn9_foq5RaZNo7tUeF`^Mc=VYi}A^i|6}~0-~5fZc>Gd~4)2T?+;C0&@-P2nY}*p? zf%iQeAAA4bMPGNHtpL5tAiv^o4a# z=FPDxTUxC@0`>)@1I#v^4F$;uf=@{By_vt@64jTyk$PZYAcjUqeB#T= z3umH3bCy@U=DuiUoKYDbh@;nwY<`xlgLm!=O2bBaUBM5Q&Oj2Xq z%y4OU`P8IwP#-jr=nZJppQUJr>uW5^Zs0%oE3lu99<$!<{K&6LRNbz>Af*EcvV6+8 zq#=LbH`XV`Up4?#bv4@7z@0Xe^%h{gWiGF7C~=qTC}3RRP&8}v2}xg>eEcC$4(i;H2h=$x|vT~ zXHrf{r=aQ&K+&16iC6i2LIZTtZpAyl<(WLf>K34W(#c}-rT^sN6g=AFHF5CKR;)Eu zxO#Ype5*LQ>K{!(AAOIE^?O>R+m z12MXY9{Ii2W7%mXIzH!^x+0@on6|u36SAZIW=HZF|0&ml95l_zAGgqLw%<(|)2(z@ zk@Dg6Kl0<>LorvZi>yTzpI5+t3l7oYBiP;kHI|DKB)9Z=tLHR@9-7{uEq@+og{M4^ zG{38Qi!Fu@uUX4jXF27??zw%-s#*O{Y#!_%;xNFdFRXc^=ITmI9DDX?eDM>Xi8ymS z*5?k3jQM=|Y3sKX2tGk0alEz_? zg+Icl0(dzkHypG_yXaWe^sA%2uJODhw(gMccSTPO4{*|BOH5A6?>jynn}^%tsw?_p zq`SqZmOlINTzvffACJ$!?~h{j5uT0akPNxb9&~XKYE^6U1>khzp0S&X>*}1+sZY| zChmqUBCql4O1jwZ_+(xa=*L`?mwV4Y-EIYii#0BIWZgIY`P`l~+Y80vPct`mor?Dh zN=gH3($o5-mY@2GSKcp2ub&@}K_dt)z za;NeTZ-Yilt2BVJ>vfAM^42=h}2IUVIUvE=7U?+=y*;5;FN$gj4~KD zmYG(R!Zg5a7_IsdAVz_l~?t2b!?7BO|JMJ z+Q87Tt`U3p-Vz6|d9CV^b4>fN`NGE}k->mPY8x(v*#JPDv1 zwvFzI@A;1JiEsF(Z+6FW0hCw#B7Zujc{Cfk$wPQ(E8leF8&1>*cWGXE;E@|X3SEk~ zOemXeP{3DrQ#z4kQCT2EDVuG9q`LF?Te>QX&REtOfV#Unyy(vab!22HzV!HG@m=5b zJ+XCkQ@rEf{Ycz;@2xS^Hsm*BIA#~|Q71Oa5~CSZr$C{&yb%J>4f5u4nYcYgegztN z@><&bPlD>XEKdVpQc}_iD>GR2Zw1njQ>^5d;YNNH(arMeBg*15LG}+Co{o#bb(i{+ zzvM~TRrw7iE$ic6mLIz;4^zhg+Obh? zfWpt~KmRw#`cv-;5XX4;e0{R|Rec1o8n~a}z;G`asBSo7u*WQL+f$d)DabJGG0ujg zlK?LP@&cx(VtC^oJ6G}B6HbBSk!eN?caBkx%c!pjJb>`qPZeJaozPcR;XZE%rwT)d z%V|=+7F`-y&vN(9v0om%+xHwtYZ#N}*y}Rvv>(TbMHJqY zabHn;;05V`TI+tcEa#cGypgrCxegWpJ9`I|sVyd^CgX{xpNjEw<1sycDPDT-?eU72 zzBq1I=d$$)q@kxvaA47b%ZzK9FUhG6=_b3ZL3w=$Ce&7i- zx}!0j#bF#(9xn0Fzmo_2;&yw7_@nZ&L2STi=};M^+mBMa)nJ=U?4;?|k97r)1G-N= zM6Z$$=qvi<=r+kuBr$%OKk_g9W=m~3CeNRbg_*@T_3(r7yZ`>jV??bU9vX{H2lvFL zn{SA3{*`yCe(A5!u#B{v19wL=Gc;LCb)>@Id%z1ZfxS!d!?FG%{cvdeJfwZc)=hjAt+eTpkyM?!^9bt3zXZ3Y@) z^_)Um19t@(e|i1X;BgQuN_p82%3_nuGa4mmJP1`D((~SbtNRL{>~}(rJ*9rrvH?nV zAkopjW}YT}mzOKNY05dT=_54hFXOfsAS+y@L<0VSE~1a|0|VD8`-rXQ=zq_VxyMYw@Kod{H)J-Wx<8d+Nz} z-~0Y1o_p>&KfLF2bZj@Qf<5U(-2k~Zn(bCbbzLQ!bd(kS>-EDcZNYvKJ32Lj+ z)z_u5w(bX;HBRD?ZN@%nh9>Rl^6}n-!-FxXac|S+vDmkFf4u1K7sRz!Ulp6jhTXPq z*&yuPpyrRfeT?m|9X%HB{K0=0OB(-o4Q-1<`*y1juf?MJHGVbU!iL9wc>v~B;~#2k zo#BVAH@}PAFXw$juI_W(NT1Ap{ZU!oSh60VckT}*-$oGbGUle8gv%@uKGvusZ7`N2+R2NNlF^?8uu?R7^{rilzI7CZ>u=u zPX7le?sSMHEPW^3te-gl^?iYj8vvjI{ZF7=!I?3oyvV8`akQ*=3kK1=@~--e1C_!Eqia_CycV=m~P{f#yGHuBL@AK{nM zq}zVEq1uE^Us+mSjFGXi_`>~PjNXyq7#i-4+g^HaEO*Yu&;IIf#$!`w;^1D5$9wn1 zTmHpAkGFjHH(FKBsIHUeXJTe?DV{uhEKZ)jr2f{gKG_$WM@C|-r#rR`bjok(j7h2>c&-)YHkyAe;u;7Cn9awkf3_67O+BK7{9?>ho;S!57Il(D^J<&P{EL zs*UIP>UG%~)>T&IU$T*iV`+K7PW_y+)Yj4M2iIMybJxB-vGc&b*njA%*tU0942+K1 zzvP7=9$fbGEquufp9}kjZuK~w@n5j>Ts`@)7Zq)5J#oef*p5xLv+|SIjNVi>5rfnDw*Os?V4g!r!KU zo(H0Xq9OC;x&=Q{;RD55us^H|>bLO|0DPK{PXKhup7e-G{i<)~_{sS5|NdLD`E$0l zJNF%kEqixHhxjtEX;ZXICm2(B+YtX7S3)8&-)jp?KE+^oY}E9O&s_32a?K4_$Nnod z&l0&SbOq^eO*hN3bM34{=s$Xhzv2zu`o|_0{6)Xj_)l#BFs>||-9`m$`w}N0GW_#> zRhBg3Hg;n_sIS5WvMhC~A8y149^*(~kMy)lY0vVfF!Kc=X8z~#m_bc6Ql31#NQSI7 zo&Z2S=y*9o-S=$tfC0A8iWd$ZYf#Lzk%xPJk_R~oy^L$DpOfb1=gtkGR$V--gVqX< z?`5m-k)YjS{}?Bfi8vFeI#r9?BxuM)v6iDEATwX6VXnjpCg8Ps4+r!IXNv{t>`?Wqt`>jOk-5g2{DDEg{L`}B znx8|QTZsog@rgKd|ASu7Y8e=bz4zW52VZbU%u6;?@@pn)%dse4w8?jBZD$_TqxqS9 zJ=P}hjo_X73;qhW;X}imzPl?;;13 zh#8=tKPB(W)qXInra$N=`#ZnXN6$fYMYe02KNMRmiRdT6h`&XT(*8kH)}Qytw!*ho zeF8vqBU5z9bJGs_QR1_o+bT?+@?3?N8CipNe^9>!FZx#W2H*g=Ku5n}dForndp_k5 zL@U1PbN1tOO?#+#%!Be5`pSz!#$kN$fWD{pD{f<*1;CeWRbG57-Ih2ofX94P*z1Jw zQh5Am6~?b-9M4$sB}@JG-^3@=p#h!^0Bv{Qz3Z)O>uWppvqHF1|EW4C3%`o-TTmVC zM%d9IChBKrLLDH5lZFpt+^SBFFxmB|CMMz3sY+QZ<&6Z;^BocR$+0sq+(%I9=N&s# zt~%9B6GUC`H;)w}R?Jr~VnKOTn#3YM-*{)FrNO*Gr)u2^VIZXlKv6Chiy4}v@CLe% zD}q6yk98Q+tt-dWtEg$mNEnyG7OK))HJI~#kELZzZZx4`^3vWp8dqQc;^^qyDI8EK z0niQ=wh`c1xFTXw{2U-NRc^H<|{ z-~GR$Q+Zzh`qv33^a#JiR#*9U)q`Q@Z6d0+o}(lVi`S~V1;)@eU7Gx-6XrD=Lz(=+ zC(62${=b0pIGd3)eneNSXk4p|Rxiwm7sUx62Ex7}4}Ao@`YU5%t;uFPE-wXXhBI|z z9P|@5^5l!N5=R>M1xnd02fzv*Dv=L7$g$B@8U$K547^ZCKB6F0ywyJjmn=Xi|6zYj z7wtAER{BtWnIBy6N8wTrRZ~V~3+;(nSrm=k0ylY$UO)I-+HF>lUjaCZ7NGvl!-(Ml zPjSk~z{KJUZz1#0zFYdZQeUwh=4}jrUr;z|3-y?qn2qs?>9}~|QjCvJ$A$9~ieHF@ zrJ9Ini@yGW=<4o`fx#gk56$Zl{(h=z?O^d)17de~U(^>WWwZw zCSL>vM}~>2vg@ie-Sf5uk*SG5>T#DB^w6OBm9g%)?&;4>!V|IOB}d@&1K&l-}BF2A2%NyjgEf$4I9g#Wi|Hh z-X2%&AC``@eW)!~7TaQRVV&pHa>BE(=UYt5Pv6)0)+fF0>FS8C&X(vBP5nK+(Obp! zs86t=ptGGH$7QuDPJOVWMg1Y2Elv*LAxta%MdLl=hc%A}l=`vINu4|k@9FA^LkA9s zJuNY_yci$*=Mwj^9p*mX0hioxiFqDvRG#1673EPBDv1tJwK{t*zaa9^N$8n6M3Uj2Z$4X{I z?u$m9WrWjqfiP|19@-rzT)ws|8-=PLHf&)r+UMuv-0{fXh_PN7x{@jH)ee!gSUz&`Gx#>7_`dpkobu_;Cg)hZN{{HXdc#n<_UP&3^A+%z4GhwG z={66(R~DCJVR1ed)YtgT#fs#IPR`EF#F_JF;+f~3jr$+?V*LF_J{nIw{Z!1%Oa-6L z7#SIiEt`fli5!UfqWWq7U~~-&r27W-@NL>Bblv7>-W$ zyZZb>)F!7jQJsqOr_V;8`qaP<)oc9`KX{P9Ng3++t;>>@u4~XC+p)f+D-RaLzg0F& zh;KH1qM31v@Ve3%8)OL^eZ^_IcaWcSk6|nqD#;Iy%)jvmJpT%${ZpL#o#HhPf+bI4 z;f}Zpk7P{Himq}_Wj+KxWk=tcjX{g4h9;E}g)YK8jl5p)^8#AhS|bHDX$~i-sZ{Ns z!5s=(GDuf*e$|~vFOpV%^tr4LNPod0YpwuT>2@PQTRIo z%PN6rSc?^;-p4e08MmA=lHU9ZRfTds~5{j2Y8eF3-HG!dVEXH(COL4U-g3UY&H=1Ehfyag`~0er0L+X%_K>_g3d zvLPRJvf0#i!JCo>Et%|iM`s$;EG@`u@L&i)W6|`gd_jbLWR2dOYra$?b9Njr-&9@w0LC>1ShT z%htH=)*GU4a3D@xI4Pf{Gsea?#iH!sG8>9<_N9l+39Qpni1f{oqVy6SvC5@>S*N6Y z*a4IXeF6n<-QrW?IgpnJCGD~Y?aJ4wL3(9sE}nSc%c^rp^OMf#8yG>_>iL zyLhU}RoVvZmn!XOQCJQ4<_r1UZ1Cbl74)*EImwD}?cBLjc3N{Q(IqlsMKrOTft=`P z_8*{EgwujtgHSvT@dI2!{Lzm#(pbld249IX2Ho+OHfKCQcK0O#SM}Zus4y&|>>BM+ zA%~qr`-BbWg|Gr?Z#qvhEq5AAn*LaE{gT|DWJu!kAF(z2UzUL zoH`l5sl$niqwOA7gpYLH6}o)l1lj!${?c1X6J16OJ@6sNan=Rl1&K;gz6zHWLRZ-~ zHOYJ>v5d2R;AFX|9L342GF3LW&$fmmtdRe-xqO=PwbFPTg@BU0p!dBO%)&?4;RbnN5vi(8RZh4 ze^6BYcY^zXbva}B>%JqIWd7`@e%|IhToG-y*L_z1&}{1`@vs}*%8>tPujIvispx_( z{=>Q;Uf$Ir%L3F5Mcl@y*&gTl|H@(Xoh;(Jsvw$Ku??VoaXD5E{%it}lw- z$=JDPbG+v3Un$bh#lp;ySg+4{eUo+5Md9YzK)ZZA)=1zTFSEEGi66`nTG{NQHu+w> zF8qXt~-JnMkOY%d!ENDw`yQ8GXY(RC3{9Q`2+BiAZj z@#rBN(EM!J?RP_~{+#X2XHJQy{754|{gCH}rH*nfP+scH_6w!kn+JujwYlK5C6M_ z#G3-zlhC>i6SUEh8T5G3OGKm25G!)n4a_13%x00Bc!WqJm^^YVN<#f?E+7s;Q7*rU zMZFns$5_nI2MbxZ8zo>MvT{U+6eH8jJVAL=(f7)4TI389V-X@;jDUaz5_baMgC}X@ z`yt+enff5w2BW6V+#w!ybq&Rioj1kR*S|&uMr04vmNs~oG|&bHX?E9m03>WDB55+X zBPJlyN}H01#(i})B)wk^O*{!DUo)P1aRBHsY0nA6Fa7E- z$EWW9L|lFFQ2fXbz9U}pa-;JmJALebUHEi%`F6A{=IhWxBk;(KF}Q#`QjMN?}> zb`VACYE%g*EEh$VpEFc#_X8~{J32up@gjsEjT#*(%c9HZn{@yhWN_d+lS|&#TwCq* zH(waM`g&xDJCs9{W6CUOGWo+l_V42ZANfS|wyniq{@MHEii3NVwL6A}`h+9S|k~x+p`NG33RtbENq6WZ4?M{rz%A zmh-LGiso+7#*wrvzP5L;fWU^Wx(8M)JAASph-Qv)qAx;w@(8=xixPiBokR{?~{pbrB%%sa9Cl5_s%^Ve73P5{-BG=0Ruhc;N77Kz6e#zU6~9DqwjY{+iYJCfUa}=2?kfiAjiWB-%l~Jn>f>)7E1hqI z8l7&yoxBu%(9IoTmE-d&ya7=hE$gtdVDssp=t0@Z>2D9)aq5iO+M-am#l^Ez@wb2d z7x7nr`PUIWL$P!3zUUv+76u?^-j?*9`Ys}Z~L<$U`_pjv~mK#8dn@xP5@y1 z7H-BOZv;>nJ+it~I#+;t=q|LA2?66dic=qtoc;bmCOcTHn+Q?^Y(}?|7 z^p!>Tj_$75GPWf)Zyt+Hn&s`>zAH9u+7kW!z0p6AZ|3qAE{?`}Tb>rNywnm)nsm<2 zb2>~dPMh!(3NrDUFhTO zv48)5$-FcA`g=rIhjdsnRnh6G@p$gYv*MHVUwm6ynpeKAcwQXe=C0LSAI2m7uw>$z-avFKl|S3SrngZQ?aJL zF}<=BXJ;3qXW#a?^6uMW*KIdOj1Adtth1YYN&R6-d|!~9<`-gZPLu8FrD)NZ*RHXz zzQnzxYW$BFPMWG1r$a-jo3Q#TvZ%^7j(!yoN8OoTMQ(sBBRr}g$3A0|JQRl$At*TX$6@j} z0UKEPpH+Ivg-{DkDO^j(S3b9}w#pl;*f7nv7G|PDbB}KMYL{e>NB8fF*ZtVrtUid4 z2l>3M(%#h_i#3fiveh5`@F!!aza#b@x+YE^IU9fd!4Jg;-~YE!o1Tmz`FAVQgS0ET zNsX~$bZE2sEPfnsMznj*+TGV1BU{E|Xn5Fp*!YAF)fsyvTjp`gaxy0-C;es&56gJF zmIrpE^~i>>1#6WDcWUO^Nq^{YrF5_@HZzjS{+t&wQGI@sPSKA1W4@-<}7G z$Bv?p6?xI`ba}oUd4mod(t+&fl|Bocqbko4%~sp0PYF>~FXb|(S(iY#2v_dE@6&fv z9hI)x5A=K|%egpfD5Z`DtqFvzr>2uOst)xyQP#xW0O0@+dEg`U(~%_G!Y?KuvSMCillqo%CrnH@)o6*uHyL{N%6wT6}5hXk2mNrr3A&Rq-Ew<|pGNcia%? zqyp!r7vsXzTwK&R_uSFrzUOm3J9g}d?IT;FPrlUV-qzSGcc5EM;q>^0=~^t+=HlGB zbMik|Y|!H^rX4yVQ3F9M{{En|Zje%SK=WnE&$79cAZ%iuq8fSk3RlrJpJ_3aq--F+XhZ6D5nc>Y6#;$$JiHN2s_x$XDRjTA0COV zJNCqZE3b^5nm3GW*&e-pqIG062K)M>w~IM~v;vzZ-ml31bt$d`T@@Yuz1{M!BR=xs zhvJw1+pos)qZj0}ToXU_lW&hp7boM{uRIwCuGk;D<=dQ-U&VZ4L~|789d+5HmDM~k z6I%J90luo;);~aX*;mdX#c8}y#r%W$FK_>IqH0}2qz{qiJnJjbDZAe-zjL^=D`t+J zj`#lNyS+Zp-_a9W_&mZs%^xJgp3#x$laJfp+iO1a*s)b(6S7=fU6OAo-$-=!_sgG> zjzaT9ZCvuJ#eFY(actT=5DP23HP;$*%W|VRNsIZuX!g>8bDQd{rVZrx#vU?#TvFkE`OcT;?qg`RMalN$f(g z3B@nrL3>?(S>mqjpeiFjKgX-TP!7MwWh*JpFtSpyx`XL57T4w{hs>g;q zKF2>~Qs@MCSvRBoT)n|gSMU~aIIGlssYkeJ3pb9>INep(q#Ye{Y?ZI{8GvR$OCwK( zFT>)Sn$jpI;GP*S7M^DMUHB};sgypoAphlH}mtt0JsyY0+uJV zX0W2A(XPBIgG+FN9X1EZG|7W7?bMZ~5WSI>MqL8yfzliDA*^x6X;OatljKJwgh~d) zHApcOA!S{KUT|JscSQ@bV9ch2T?UuuH%ZSc4?vgaEEWHn{4IKxbe11}Nw$woaX{*S zOB);Mh$me1SZ%?NalOJv(D#7)tLQfU;FkYF*u^TgDn)iz`Kz`y`9wbKO=fJ~XJ4R- zChg>49L{=8tJ543x%w&S=LOhv()=v1DF@0D5B=UuQ2{QM*Isl*?)gA9$2@>CoPxs` z!^wNUH z#!kOr0kroppJH8ZUE?O}oT^*IwFomWl)?wb)pb7itNC4lJ-BHS-tudU3<=c~t-(T>>0%NipcF*gzM*q1NHN8j_iF>&}A(SJtr zI2Bul>hT@l_Of{OD-H_Z>1dgsaQXG6zF1xCh^}@%f!Zzg68)$>x+c2phx7c$Yuec# z;2pp`!JzRa`-^xi-uf7Pflm?;6y7^`5?xWt4d|hXOO~MbkOxcUuCzz=eDZ-ercp?H~O7x zNV3~1#+tW53YWOhL&XUxVItrfHHqf@@NuiT= zmN5do1N=-$yk(wT@}xZzpF|()*!Ak9(e-8JX>IvZ%a8oX-S?~L?S7LPB*v5o>DY9< zcG*0L=<4e5LK}`D59v83DUT|WQz$~!$12E5VTO0WsiB21zb%XEs6pP^7AdbRKr+GML#` zyZ)r37-og4tL~B&hBUYhh7!@tP$^JCr9cg!3@x(yG{Vhmf_Z@zRNKnJXI;GLRGSZ< zJRWnCb2327@ve8hE8g?ozl=S5#^N16_9OBA|MDHq+XTLi%;HbV6osZm@QmHHTuo0! zJIzRWiXq}@?x;x~fTP*O>@@r=7xOF=%Y7d4p|J5gN%}~A9yY?jaZmDSM@3NSwEE+CUpL6XbyQHwBvgzrY>cl^R8aVOITU_XLBZv?g}o8Z z2}E%Mo1oU%Sxa64;Apn1v@!Ype7L;6#9iVpqnTL`df+&8G5Z#}$3OPTc=q8(Vtug| zEj^vlyJd?XBCRg3NB>a27jGDsx-|Kpo}N~ifng;so}Z9CYx7$au%)fHJGO7%99La= zWxV{pd*bWf^bOH77%?+5=Zym_sP=c~_bd56T#K9`zFp)u21Ke@A=}CTt9xZy)gaOq zEYu`F-EO}TKwI4$_l=ZY6;bJeI(}Yw03v+YBIjU7G&d#O5TJb zC6Ikx`7GB`?rQ+@@dP@d&`SMw;H)!6@0x&Y8vu^#mT2xXnQ|NPInvc(rLX{ z-R2|ORxz`*uYl<@uS*|=UdpGv`=_AbGpPM$F>QCfY1a~$;<$Bw^%E2TxhTxf<9fZr zb{KtKmBY%0vbyXkJ6+p&gPHGn(?<2vx!FZK&b(oA>h#6@^q`kr zJbY%uN;PSKK5y!hbMDECn$z3U z{KMyCp>bwrJq{l`6DQA{iRoF54Ql)LZM)Sc2ZVE7W9520^4O#C^s`4|d3H9gyJ9T9 z{afD{H{3Yl<4|X3mg3Zz>6n~?e)Tc=OMQI4ni`Q#m#YQIsosa2D1r8zH-96!>LtbDq{_*MIH zd2gLX{{*Nu%_3EWO$QI@+ocb6*|PCdXQO_0BL3``-xZI1_M>s*HMd0P$fmga+rK_; z|AFs!DPU@TF_sqR@=Y<>y|txfjo%%yyqF8S_$K(J)YaX~2`af5v@kmp6XWCY_`{FK zh4WLI^sU7B_=Ih*1QT=fGm_u=__A#Flg~a^)s`^W))OORqcNo4{zF&9u0300*Zw`R zY1=m0=Iya<>z){vZSU^ti=NIdu}pl$N0pp&QJlu-NMn6hzv>k4I=k{)XRDgXtx7@X zYO`_p=+QWS=2U$4v!9Mf9(*LuoH`wI)AKPae{DsI%mxi_eypHdvbz4z4e_#>G;tg(<)NwkUm>pP!4;m-EWG%(Jk_y zX5#559*>6}eJr+Z+Y&clb3<(1voAtrzWlKV;@`jJt?~5a(Rjy;-xNRembb*|$DfLG z;?wHtLM*F|;~IO~M@Hky7rr14z2r_!z-0{Dq)!@?>l&w)#wTNaK@(clyE>=IxcWnj zWQILp13+%%rC;RS1DP?Jccu4>>H}T05)dC{6$Iuv$s5|YfLj*;J7Y*qjRRLKP=W!fi z-RG~O$C?72nGTf2H2`{?zbfyCu2p$lTqcf$@p%&9QP!RQVaAmD?W^k+8|^sBlIqA? z@@#}Mp>3w+JWM4r=z6)&x-#5^9H*OcE=e1FtReWt+eUKYboo=;OX>1(77{B(*|06#5-uJi+ zNAmOxZ;Rn=TTRlG?9xE*aCEfy#-81~v@E;77+g}eC637KOl8P_l>6r)s?dLhjRRNl&{j84SG&o_Jg7il^sF2y5H!h z6+Pmrky5z1&+;ZibeDNrZcTJbwaG;TDXgQhBMmKZAh*mig5#GUg$Y*b}e zi~Oid`T4_ zIDA`}h1@e2Pdoo@FOZM5YkT~!KmODB$caZ{G5X`>Z}|Fn_wTw($yC$yLzbU%Z#5VQU0X1Pr!{X}FdMr#VYMwVPgHhAG?@WC3 z@BcpD`&aLaXU{xgS^=x8x<_xXcpyJ(pm#`dJ+cj&^L2K_p#ulw)?06myY6~X-15RZ zqQ7TA!xTQxy8I;GM$^6Y1KS`>u`o9uiz}Sg0Xh*PKzH${+gUUez!}D1%R&T{^+0^~ zt;2)SDSs1*tkjm`(FY%mPkruF@tMzjCQcte?TtqCaZW~|-Z_reRoTJ8e)rcc+qcEw zz^MI1PA=N9dtY3A(+zRup{rx-w(T*tWvf5w$hYcQGn7lGcC2|$%SPMb(V^Hh$m}~} zSQ_&CfA{(L@gMuiU{*UaG7^9A$Nwk#<@cR<=D6(WoEMP?hj_6^dLiZUe4?H8N42uj z6I8-_;g*Z%l9AT|(0l8xWCwp3w^%C^V`R!)X0?;?W=%ey(o`-#0Z@-l`Ikeod2_P4 zfBjqkJ1${@YV7EcpEj~-b9D6g#?tDF$7`SBpnt6E)s_~`|IV%+ z>n5Lfsjn@?`SEjc(=FG>)z|D-Iq1y|06crBMCv&Y@)J4YHJ>OD$7D0m10M;DZ24gf zDjSh={w8>?;G3!sa;2~F2Aw59`9$P+-GYKE;WGRhHxUI?pC3M`ZBEl*R?;(X`hF!Y zeG8>2KWjXEBF_0L`WsjMXs^o?M;XT_O+fmS!YA30kKbyUf03@gtSyx~fwD0HAU(@v zoNaa04&4|BWsO62igd!MUkU5tACT*wYN`B5<7H3>q=1wu_OVfy$1T;h)+w6N&5AGf z$Dkvxjr{sKU5c6E#N{{0iPt@JSFaPlf$DEN_< zy|)R8Pjb*dgT@O+yj+J#`_6OKGLE_~@w{>0(8Z)7`A9nLOWf#_0U`o_AO2S9UQY#N zE8P~+B9z{cK(P($E1X)!CnS?^rOuoW@B(9l24A69i133HcGbXYrEewtRdS|WRhD?S z2OX;9Sj``j8vyyTJ^TRUE^?`Cn3^hJUSCR<@xN*(cqqedMFZ_@$}r`Dp#!heD#*Gw z-V?|?=GDgh!c+PLG_lTVJKNxM;-I_%TI{bR$RegL(9ax^{A*)<1iirL6!^4ipd5iN zpdGvc?BcZBkvacst@@vF7~rYyh1YFRA?rGZegmYAseQ~3b+t{P0zh<^2eIJAw)56A zV^ISd?S@C>t^63_^8;{F9Ci+0%eZ7ePI*#}grTR+d@XqZ=r5bg;eD&>^z$4+X>HlW zW<3%8mXGM=)a3Me6-Pc5%QlNI&_X`vE#*_zx>M=I-pC^~=4lm29_pf8{*;G4Mmn-` z8N&H+lsQYg9HRR2QY^D(bMd@4Cw}e|pNix1L1NnuWf+RBx4azazRdk6K?@jr!7Ttjo9Sm0iC2$}Mr_fq{sHmYBZ~@sFQ; zC?5XIXXClYJ}2DM(Oy3s9qVV}m)`XQ@v4{YjYq%og{W!H*59%<+ScULMVIBb*0yAx z!24D_Pjr&;1RvV{0KH4SwH_!h`sMy>p0?Y+O`BcGLG}yHU~XdBDWkh%kp;h09ob>V z7|GqLgCE^UQUTU(Q}+G|37bnz10WiCe&T1m0MCS#hM(skVF{S*MIRDh&JW#g$#IQO zBG@L;C!kMrN_0edx#sw*I@CVKRiAp~bcmJ?tNsQ}6{J1lnNF59?&_CmnV+?@x_oND z51L^q_zm<2vI%7^S4X4GMF zB2W$J5TOQ850iP~iE~}3SLw(S7)P(tb|(I$V_f_#G9J9kFqhp(l;<8={hbkhC=QJf zq%e{m2}Ng&jP|)bYb&XDS!Gq1uDprr<<#Ctcsra9Hn&BpV?Q%eAJJ&TCT(=X437*x} z`^6{vpPfZ`wt+T>0hbrvY6AfM+E6)grJuWPWpwLyK_t`5PXhx7qBrUqA#{8rm5Mp}lJt0F4-A-A+y)dY(1 z1EvAwHU7)x;dGpps;_xM!9WD<$U}0l?7s#S9LO4RQHJt)UxC6U*r>CBXmX#1=F%{r zqS=mA;fdR-_=*<(HuM-;1z$xgb)z?o85N`+tFDdtXs3c--OgsY=R@jom0r=I(N?#T zX|dOMI4nmrVG6=Yro9UW#7n>e+F2qPE0ik3JQ@ z_TPUq#*dzfkpsJ8%dUMfJhmy`^1a_2Z+hdMF(>rAEPZJ_zp>9)g`9i(`(z(jFrtsM z_@rsR#$Ar!^xOUDr})(=AEie&e3he-m5)s;9QWVd((&_Dy(3=t1K$@1-}FXGA#vF`wTE_ToM@E~Sp2)R<7yNBkx=vYEQ|Wc zgT9DNjy(HZoH}*JrzA~IPW$cH72C+xc;=aB;?c*RjPqyDN;g;hNryq%>-O%R*fhE+ z_U_shx88AUTzSp4v19kH=-<@mH*sZs%;UD`R^2N;p#`6LWo0=ZbV=^?F+NXWe^fGD zuBmS=Do*@n13;(7OpZF^4d0Gxb1VzsDi|}<3-Q#m&%_t+|8jic!7s+8^HXu|lIUJt zNFEAzO`UFDW5#IDSRCB9C*J(6-xjZY^~+*x+a|T6H{TpoAL^4&bneWFSe&cHxs&I8 z>c!zBC!$Zf`o?d0t>oRIJbm%Zk)!eWV_%7#JNL#=Z+~nZ7>?1cJL9=8JQ~0LbH5nR ze(~XW&5bw2t6ut|=wE3UKhMVex$`k2n{jEO7Mu1Th@ID783*sYDO&e#k9En6rR^1V z=FiPa2i5;IzO65@Vy&OruvSwC#Ad1wBQG{jAaN`cKl}pXtxw3>wlKkQ*cHVUzr*95 z^`z2q{oEI-dl@v_$8rLI=QIlE;ve`bsOCYon;GH1>wJT>ig*95zG?3_w0zBav*RgG zwcrO0gv*8oz;U!v^fQ{4yp+ecE_R!1qYah)VE#qFvP~yUof%^&3#LH&iV0-%04=7nh-vA}OnV+Rvdr4K8`tkpob!@0kb!U0NZ4lo#(gs+5!i)J(v&9kQ*DOcw zLI>kR@}lBNWoKzYDM9t*2N$K29`O-@@dgi2(Xfs`A{z6QhmBw?zGBy~gVVC3%hI2| z&7(2CG#@pI>j!=>AN~Ui>R;0{;!Jl1 zrd1c;H|uEVKj^FS1xz>VAmE|D0JPB+R5r_Ff3sXZ^lyey0B&2P_!*b&NZ&AJ4EjvV zK=L=`L*3{f-!P@7ES)wy`!_rnnEuQUfDe9{FXi5bCgtXNr?RF_%75tYirQ)|&YU{y zBj<0v^_KXn5B*(y^2k%M=Zfp&z^ymO&;9zl;=tzqIJ~eL=f-DZN`A%3OXI;?|1*nq zzxlsy+qSrJ_h@WlAL4>^ZgJ5YF}q~r=clJ)M*aBw`7`mj`rwB?^r3j@EBA|*wWw+Q z>+9>0D^`zzzRu_!?D3jZ-{6pRt2c%R#(d(>Yu@m6aor6!#OT;iEKbjQv(MCQ&L4|y z1~}H+SOW5?1)`qT1_RKkGQ?5g5EYo~QHR+dY zn1-sKp+}8}Bi;a@avd42Gar$j!|!yLHBVVzicYn6sJ%1hPoIjv`fWAa ze)EI}$jZ!1X|`R}gJbi7y!vAtZXH;XyYxrm{Qy)^Zmsf{_8>pwseaG{{_?<*Y?+pg zDdm)_iZ9`W5;jfXah|F%0_5Si((nyX_>=d_vsK=<5$_Cs;uWJD#QkW3UdV&9gVUI zS0t1%yFn6w@>KmHp(-!@9Zl(wNXEBt82gyy;i@^mhOrpv?$tbJC8j6N#nhz> zar)S)`0U?(AmY-+XxnpDtPXCCyWj8)(W>jY>4lip_|K94^qO|j(%G3WXSB9;+ODjw zqfd+lYHyeBB`?-<*4CH3=GNQ27F&n=VsJp?kLFiA=NlUCkKx|-ICXZyFH_Jq=3Trz z)!Q%MMsp;5to7B!Xj`6(J)4JO>$bsInUd9>Zqb};DxQ7(+4%BD{w(HBd?AK6EyU9N z)A22DzBAtXFJBci=Z^Y>&yKYrjSg%8(7-O0S#Mhsdtr)fe`{_8@_549yJ&T|;y>I6 zc);&_>lHjrSvKsX^%MTcWO`m@rE!-=^8Amywy(S+vEFG`($ zKFzaKWuY8dQ}1+DR^LN6ug2pa}Sjp7Ud^nRQ0T*^H|W{#NM#a;NX`Ij*AfDHHCCY_hI8YhD@~ z`Gl6&%$3JuHo_KA5ApaZJhxSuFSXtC;+=1m(c7thk~qa+79D9cG?)Mwtqd%hlaIQKYL5$`$5=+ecc95!BE3QehV9EZC5S0z0fL3m(=I$7MS} z!hlKV=9=I8W!uIZocKd=oz?5xe0fvn68u;U+L)nCX(vsTa0Ed0>pT8>&Cc2 z1ggJ;febaEQjUbC5+s!J&%>uO**-cM1{pp{@npc~Yco+_S&wrU$76YJE`IpOe^Fvvbg0=ZJk7yR($kaPC&Es@Dv8xJQ+|KzSDwks-V)! zuc|~cQF3&2idGqknkJs}3$w<@gtSW&M;6!DSC-=LyYG(fw!Zkl-@GqA{e}DE(MKPQ zfBu%Yhz|M*`cJ#ueDRicai!kWrP40yEA=}L2gWi~f(=`h(XDaje*&sB>z)z$e>ytN za?N@%UC#&QV}TqU<=##QJgExVaO6GZ3#6>F47!e_vP#7hN4f_Vpy*sFQ{o9zDD@Xv zQ#Pqoe(8duz3PUj zEzQP}ryh^JJGaM6Uv+C-zh^kE+_yPyy>dsq@TLQC^Bq^m%kQ~4Zn@!(xbn)Y;_537 zc`^B~KlIV~>%aYIJo?0w(bw4#T|uE+!A;A33jUO&fMfB8>& zr9+T$)9=90#NW6lPo?XUi1I-bNcfRPKH|7kau~C*f7Jy$13D;iKH8KC z2`&$A@B*UF$yFZGU6%e$TC*%E4JdVk%PA$5{KUaOQQ>I*Ko?iinbN`+qUnFW@o$}n zM-?v{6lMki(+=^jRrXlALA=L9-qw)=f^P8OlSMZ4g-@k)4fMy%)O38}6JLm*|H&VZ z+T;^4cJS8Nw0C#heDjU*u3!DBc*V^-qpmtnjYph4U5|6;F2ur;9Af?B1fb=md?>mk zJIz8MK1Ta`XSPI!x3{Z1270=qyQ^LIZ80{~6`KZnVnjMTHqsfroozAH-x_1R((CRP z9}mhY0DXP!(s}*ZmD|%1-JM<0)7vSXZ`!tX)yw9C74_W}e1{cH07Ppie3J_D{bY^> zt)vnC1M9JUORxIqHjPM~F+H1aQqIoI1fNnG8y$=u(FluAoj4y4JoH2yK5{lj$9Ben z1A}qZezl{cUlp@4Z{FVpgYPTL9AVl3tw2)06MRP&TVlP(rg_w*X5|vS?Dj@y9A9*r zF_OMaJnFqN7eg!%&R&Y`!-KJFbR=pQPQ}vnbPUQivQ42jI)}Ia4d6+UlEd% zu5l87$^BaPq6MC{$WLUfW}%R=7qY-n`I6*eq0yW6tZwL1(oQxP5RKT&d;sDk#nPLy z7o(GP$sy{ZNfR>q$^7}rSidk8AN`Z}#QWa&dvWyv*|Htm;#>d2567O@ZSY}gE7>+7 z=}Q-sE}l;-xrx z090$-|*&dj=S%@H?Fzvx@hlH z9lRl^N*Be`*?AtoFUcR{d)&N9sH=Q&-oE3z+w^h18QvxRM7Au>FVr-N(>=NhPgwY> z*R#(q38y!=im3zAx81vU#a(ya8TZ_CpTEIBS6_&e=T61U^o(?OMY_G@UCRqAwK%P| zed<%6h$o+XPW_`l4qka6Mn;F^5A~~G*J5~NTl8siIy^8G7tUW0ee-ep!o_&xp$Fqd zci$OpU45}>({Kz9^~IwPKN#bamtyC(UD3aJYm9E)8n1o%YvcT>qw$9y{?iz19gIU) z9g5MBO+LM5=F)UbUYzog|Ff4SCF9i?-MuaKg~fOAhgE81<~OsMSkWhCXVw(wu|xW5 zwNWy@fad`UjLBCl(^Oc2e$RL6-4x7q<19qAI$4?+K zpu#wU#UGCWx+DPT$R z;ppfYj?J64#0&4bE8g~lZ;x;K_BX_#Yj&s`wZ?@D({cFlvH0Bm{}{(lo$$#X{j&Z2 z!$Up=h0p4BYD~c2W0P55f2YR8KG}f2Sd@LNEiP!BIvVFrUx*!hcE`2@+u~Z;vODj* z!w-@M21nz}spBy&8#yyK9p}%SR^OkDPR*ruZ{6XA8GoWmJmWMyz9iT&q;YOowz8); z{`mKPH^w$^k4=O9VvX|1b}_f);i8Wdm487$XN|!Axp3H362RAn9*_phx$+|qNn~cF zj&51{rn5;GEL{2){X&-_G8e9X{;xkSMqK7s71fVF>Ngr(kTMzh3jsxkf#!WPF7NTF z!I3x}tLT#5X@qrc^f~%GdC_&+L2>*Y;O(%UsR= zNSl3Eq0D-e&;E83GE|jvX61n=in9%XZwh-uVlpiAc#hLRUaC{POX`MsP^NQ=pid(5 z=DSX>UDV{WF3xN0?p}*+@^wG{na{>Y4}T>z;>N8nyf6OrucYttHK$bm$hnC)t+DjP znG133^!Zqof35SyX=ApX$ zW7fO){3mbAc|8WUV)?||u1@%)v063|PA|)!y)bUR;pUi@ z{hOW`kBLi{up#2A`1B_}9>-3dkI~VAxbcpgqpPPUE{tE0 zEt2kO5SN%`E8AtCI^=Ju54myTgN=?fRP~^z`9h4^j;heNiBneVJvyy%gUA&y8>}D& zxv%1kh3s5ap7hMBi|#YlzkCUUJ7>`?9ML_f9M*pZd|0d)(_*$K{iWYFZLRLj{MLL} z!#HA_m~pJXfRwdnzNHTpwx^WyPxBUq?LQF4?rQP}dVCh!DaF^ZsSt;aAVT2LfOP%D zb}zz1Bl8ybAM#??UFfWM}4lBd$2O!Z2Dp@X#OKiF&IZOT`Zfm={?%g)Dt_{U|Jy;B&m|7Ufknv=iVP z@62X#3U^v}2{ z32s)&bWc(8<^Sx~#)14T1?5pb+Ej(r8VV&o^)cd*gQNLbZdE{6tC8EGDB{ew45!~v zQK|@J#OD3fPlXkS2^Gq!JQ!SKmN&CYf>K-z1E(AOHF$(RioNt}`v-0_yi^v2;imcP zvYAGcO*=v6O~S`GNL;psj7q2eiWWzixAa$CVJ_8^P7*ccjlvKC?TlUADr_92(Em~o z0WQQh>x92qr*UM#%%`8{e^lh8Y^U|rbo-8Glw!U?&AH{73mb@<45z(hq)yRq9%Ey& z95_oH?Eoz&801~{QQs=B`x-p(f>7f1vq<^?{1DUWcRpbozYx5PwYsuiLmr>tUDe5$ z%3OlaJ~Q4kW_Ix$PI#8)<>QFR$&0h`_kZ$75#!?`tSve=?T&-D-5Eo>55~mIQp_!C zrrF*LRW8SA2CaV4OG6O#@K5C!`&mO;QG0mSh(%=Grdx9>K0DAUnGN=~#b|$bY}=`M zj(pD3=cnTQg-P?x;|5$%o7tqsnoQ47Zww9gMt2*ZKkHE4?J+;m5@*jWSw2If+vJm7 zic6pU+gR+9PcnBY`g>R9zqH2I(cu`CpRg?YSLMI(Qfk%#li@Kxc+Za)G7%v<06Wn& zsIfp4mHK2|R+{B3@w@HL?|}HMi+;@W&=Ti&t16%@^5f-i>5Hf`4(pWU1RkFNuX0`+ z6koxq+MN%-oM(0&9qK*yN4QEyTqpOss+{%9B!Sl@^$$83TSSA`THv)>h~B8(^bY@1 zx=-JvUkDBIU_A|9WWQeV=}P1!4{L9&P_qG|TU#0-j+P zE_wcfJRqJpZrz3wuGX}a(CF{tX+F!&I=N4T)Q^qG{PYd~Ygs$f^22Yx^?qf!-8-68 zAq%#CkSkz^1)YvvHC)+=!b4pY7RGoNLG{2D#1=CB)q{&zE}D2#oJD&GE=<>dJ3$`w zWPrFw0Mx--PUQ3V8}L2_7Ow6|C(1OmqX7ImRZsJwGE8tjUCFxl)K(dXRXNQ5mKj1;N6!HYeEHU$s#6_Znh^2!h@A7T!Pv9sx_HsczC-!821mZN zwf0u^X4wo>od}XtKQwq1x&b7RA8B*tHt8==#TqE@flBg#dft?Pr#||4Lryk5&}Mi+ z8TdoHre~+b`??pi`8Ls0ho6ie`{|#EeVaDL_y5qpi`T#58!8Nr~Hbk#Sq& zVUNhHujaQ_TdEx@>{NkWej60}Bu_pMvGHU8F(pqB;9dh@@(6u{*OEW89zbm`dWKHX zO4F|FBEM==wug3GPJGixeD+Qyv5t)m#4C%+Nahk=oyB6+176?V;yY-Y^-)b8`n`rapZ!0;{sGxr%WZd)ggU%!{E<={th%IP1?Myp2IE+f0T&%^%Kk_M=gOzy2OJ5e7wrq~R-j3+)S F zh2UN0>8V-$T~$qTyE?mLd8r;NEj7Psy>07oJo)UY__IIx^O&6(kNfVtFJ5r_O)=O% zpgKi;XNMewx#;41%ssqDvgS8WM+YOGczPl3fB4Jsz=IE~>`L5r)4q7sJ$J{LaPo%9 z;^MUWJ0}AyATGD7=(ntnCli1t56F&tb{Sp|X^ky~wSFhb+|P(B(iIC=YGErwsN8!zRcD;iNBPimqo{vW^Y!G zfFvRNd;+vpd?--0*KMuZSaP@}Y^0!m@oHl`8~w+V!3_Yf1WkNZzYt_t*mPkGOVhET z|M3$(^P(Ep(PQ@+(NM$RV_yYKEpvPX5rm>T^ z{&^^sVbO)`THvMdhz33nu%>~wwNpAK{jeVh3dQ?jzWZx?I@gsRIWuEKkqGsVAO|$N%ml@$osVr;z7$~YGzvg(R$1+&8r?BAc!`FX=_{MWAcWu zeIdnh0)Tgfvrw!O=qR7AAdZs)kPps&oA}EEbmEsJBfhtdkL;b&(vh-(k_{xiqHk$s zIZmHD8xMT({`kj#_Q>erFY3k z>WZm8$soOIgSa%y80*^Q#gLFjM2q+@c4|4VEoni zmVJa2WRT;NdGFlZE0M9yik@J6{G=xd;xYZtZer=A_7CtAqAw{AV~xy6sXyxgvff6xjK}Ub&3KOi+?VmE(Y8{)nZFtin`Jga zHU4~6yxZ3V#t;010$v~%;Ah)rWwpMi4+0rSpME1|>aa4I4Z+_dK;L3OlWV;G@Va8BhB?Gs)kw3$RWIhQ10RQw!L_t)@BNs4q?{GD~ z3O{|12|Vwz*Ce|YwWrpx8VlX6vG3L!;|)LXFOUNAO7mylUc zPF=bX4?OZnJn)r=d=kKmUiOmMJ+jje8GE{V~j<(ZysMbd@|nsbMK1hzVu++bKNzurEkQ(@xtu9+$zl@ znJ-BW$klUm%b@s+^kcNFk_Q4t2I|GB%E%+HK-0&o6t9XewgPz~1MV3OxW|7$HyvN* z3041Tg7mdbICrU@wgb^ROjWIuq0WHG*)Qx&et0%`1Y&Gr|27tb*qzFUA$uT`9kDF@EQ--W%Hv9Ej1qd*c^=>%YY{SL}+%FD=G} z>A6^3uE+7SXXRf^M4S52{zLm>>-H_NeXK2(=2xRvKGB+oD-V1r z{_qd}AWohkY-Phd{i}FkTqLTbbKC858)0TMUt6vqj+2v2b=U$aqnxm0JfejeaWgDLYA51_TCVy1XWe@<0c2I}t;*04 z=PS8r1AVowD?bhd7&SyF`hn3x71Bd5-!U?Zd+r&@vIqj$_wboKM zpB!Ke_GT0D4I4o}wQSW6@^Na|s{3ThnM83x7yVUvI0#xcIKt9gKO((^+s#jK5>8zO zP34a(pXDp!u=)6H)CLrv2cFQDq##oZKI>4W4CIAo=yiTarwqyObk$M(n`U_j6V(66 zTl@eXLc>eKIiJY37%zanp(ose$mQUR+D%yL(839}SwDRaT9Rh;B=v+gdrDQiRBsGLBL|@w6Kq<(D=<`k{N`S+nyeaD*nG)3tg$1n>!L_rw!$ z@hKr)kh1b-9KV9Q13s&q=LN)zzovl((r>HhiL?qj*n9weY|^BEv}q*8pPHPVi!XfQ zgK_c$AMnxtt^I@1wf$h+@!GGCT9?Muwf0z8X^DkJ$w_5tlB$=VhW+Oy;??Nx?NV>3 z_$?#BsgNO_Xd0$qYjkTaw!S(Wd$$b5mHYb4;xiXkpI_XlIOeiElMc#Z<+aqYLrOgGZt!UufID{jqsdYdrgj_r^1y{2kFa9xuFg zTikd1j(G8%noA9}$K?2w<<4nI^|~k(uUj=3`Xx9~o;taxd-{kPsGm2*5F=vYW6pQE zulfP`D!o%HtDpV~l>t7%j-o?VUj*DwqEod?w&;d%lplQ@$_ljb=~0Etr#ZXh$FLzmAO+F+T&de^dMk)BHws) z>;ji_)7h!|v6`#msrbE8uW2yo^a7Ek+o3K=2NT_Gni!!X-8|@2{cV!fI_1_EWj{5s z(d42FV+LQOH*Lb*1%At31?h}9TT(vS!4dK7-a_5i@USF@4o$$!E3w2UL}HY{s5E%H z!pEA)p|;|H7>@=#oTz#%slMFQQqzQQWo0RPdU~U$w^uZRqcs*5R$^{;R^!1gbxMwa zTZ!wgKNN$#Bk{iX{bf9J;$*!4Z~i)d=->XR9XKBN;8c81&qJ>4W}5y`3Um{YDA$b{`{!LZrc)?2~GxLa7&&F>e*;JXO4z}1bzE%74{(1T)&J`nWe9D5JJ6?Hx@300ghA^B3IZ0(F6gUwB6A9lNPf|-j zJ&{DUlf2MJOt~sf=lvQxjd14lXa zse*@iQD4~%U5Pd?pa^{jKZY53<8zQAM@No1iv@h^m7m{On4cHl*JIn(A?cj7aIqe< zwb}Ti-~a2l>&_eFb>H}!*t&Tj`uf|{ze0UF*7smH%u0n7Nj@4t=y@Jxd!XC zu4r%VQa|g9!JhuO{id7Z+2d#8mwxeGaq`@`n4epU8*aVc&K^ACZG$uC&c@XAbo6%j z3tw9d4t2!USM7;!dhPAHUJ<|lyMGz){mVa#H+|EaL`!STFNf2qN zeX+*onra||SLOr#jDZrpk=-?qbVUnAfq=giUy&dB>;!a}t9Wg>V(-vP>~l${;`T=~h`XPWt8iYF9O$Wm(z= zEzpT#B>lEArjILlXcH%;G%0XS0J%%3MbZ4h6J_v8{x3|b{s;z^4==_>&aZB`fw?-dt4yj)Z_0fM9 zqqmRmZ}B!ii-#BD;V*wFKK8MX#TUN#xj6F7lX2$E$+)Ecaq-N#ICbK9oIHLko_Xqt zc;d0g;*l>u9FINtNId=clZrp?=v!ZiYxeDqwW+CCnVyX{rD1${JCn~TND+iWaVR(oZtnBc zp?)xbu*c0L=x!zSy! zs#Lx8^<|CU_!a1}2$Ju>B+nBH(SQ@ZJaI8r^n2$1FT^{4?kD1!y$55{-d*vHKlCqS z%S&D)OuW4|t8sVLi^hC5h|fka4lm6udh*rXH)uZMP4V4Z-iqSw`_qSy#N!WrCC*D< z=4YqHw&j?Z8jq)+dN%&_&;Bx=IPz3X&rOQXr5GL=im|bQxc&B<bB_LG@_}Sq^kN>CCjawN7YZJ zuniF_qiXXLL*KE0!3QneoWn%(|J_&TQ+EJ+V#ll;)# z_D!0?5BYM;o?6QZP59sVq3|+4eZb-gI*K0QudrC4UZQ@KZ+7V_-Lwy_pYdpn!4{^i z<_9aO6A|W!0NBq*XLZR3_Emy)JME{h*bL_-jq+Jb!@eqiPPP?Sa2XU3P5$Pr((ygg zm#M}>pDw`S7QTyq?mx2W_%0e4GzjM=4KwUr7e`UGY?Xj zU(rV|n5iL4>WK3z^RXz}4o_T)w!yx5%R7I@c&UkpQgzvig@yTe^wGyf#Y(*R#dpV% zqt8S~cX#~5XFnG|{f@UsOV>c`zxsx_`L;Xb```BVc;h#{F0Q-oP;A-OuRhWii_6UM zT4Zxpd^*RcKKqaHjQTOBI$eM3HF3lBS8Kl06?5}*ep|K;d1y@P)xg%-)+K7tEBsnE ztF1?m#^wHAHWhTlwk<=p)gS-+e~j@d&FQ)aV*8%G{$BT%ZCm2%TW-?axg(Apm3_Q$ zCYI(W zRnZOt=pwqzC{DZ3Y3vn!wdxc4@z0<#d7rxM_7P^W+&I<$xWWf_E@&4n`Uh7EIG#L^ z2p(vFGV11@c7RxE_?pV&zQ6*t`(D~iWCSXxfs_2D-SEZ8i|pM0mFkShrJ|EgWLc5` zo#fJ$zJ?5h&23kn^kt!qn!zug`rd^^x#-s|<0bf+Ke)YI-QJY>nr$cjG3Ar?PvACk z&&f!%rqlC1PQT*&>fPPF>bDx>Yb$;*>oUiRm{LqWD@$}K->Mv(wv&PoDUU+jH+%p(`EThaV#EyZ^7}eN`zc4;N9y61( z@z~=J#~=RgAH?tf!SBVn^T#w^s}HFqtkba=Ju)_?zS0>Z8Z&Ra;o5l1TfR5G`(L~z zuD#}JWm}KgnK{h^XQHP5zfzx*Z-XyX)3}M;@Uv*J`akO}im@JeV+M=V-Kt6aK?d|c z`F5=Pu++{eQ)mRf3Tq(w;6WG&GhcJQ>?gG)>QcKzC%C++Q}K&x3-f1ABgITs(g&=4U1~cb+q?oCt8~;>FmrduQBm-PLjI*h!xTaPi!kIDX`4oK$#S zCi+p|CZ^bh_lYVr^7efd3> zvnuo_kGj}gse1i(n)HDo41djY6_01BCSFiRkLfFnW9CEpuw1R9;(%?4>So-AAAF~m zO)!iXol5HxA6Ddh9ev`Vn4X*nUR3GPSkNOI%t-`&Z1CvSPddo>Lw};%EUl<6#vSpO zzUmX()CS&G#xC^r^lE%qaGQ4R+bg{5-l)>rLH|{sR$FqeD=)zcAuL=Jf&ZHAQUvW6 ze^ix{Xt-|NRwX!}nx;QIG<81)2RfoYkIwk}zStt&TMw|OlqI7st`nf2t6d)b6^C!b zTARAOb&h_`)jTghuk$FceyivvxK~qOnJ0-u!bjQk6pVvPTnDWIqx?qmVO739FO8+$ zX=znI5FzlIW1qbM&oJuzDo3S=75jxP(A9M~2kEef`j~5jvo0E0FHyREL+Td7!t}~s zK|B42^6wW(;)wM+Jg zHTlJw_^A0vUH7b`EVGG#xe@(OJ`tbmV1p!^*O_cHC*$-K{2xB;!7~)tLqCwVh#vYY zJgw%ks;RWAmCvh6N!HY-@f+%vk$x(>O8XVYXR-goTBsEl8S1LGnVlYQ@nzt@WN5h~ zS8>I@5PiX#t$T8caTHW}>%aSrG_Oj7F6Tq8rN8KEt+NbOMmhkMPS^Z}IRB)pUUQW0 zXB(AEVdcmy6lu#kkU2D2{!XAS(y}k6J}Fv1)9-ZQ6|c-A2|Bl;=6&{nG_<#Z3d zv@2_Zezlc;PoI_!xlbAosp2V`RJA4Tn=mN|XM{<(9O*Lp)64^HDg!_9(UDW;PkA|) z#Mi&%99%%R}W=4f3bkaPnaGJB@R0vt&iQ z`Y-tTd^cs#ZRlX!r5!EUHt1i=c}8nB?s4);TT8}KfhlJU5-olJ$^-ItwM?C^jcZq1 zw0H7(a>fvmrm-7Loo(IG+0yBafnDvak#}fZ*HskM)hFwkYshKy+T6v-nK=CPGx5}W z{zR44qitk!v~JoJ+YVhHTlZZV3+&cz@AV5l{$@No$iO6iEia`lX2S=cm27R*xUIT) z8MH(FxJ7oImvuVYmSVJTC2qNLEcR^g@=HX|9y=Gu&s_56%a)EVuLG_LM|)>a^z;nI zs>YUS;a{$IgshBs-5$f;Yq31nBL7e2pF9;$KYA=?=9Xh-svbLcZIAEz_Iu+q|L{nx zoIM`f_Uw$ULnE4nEXifSyD9YTlkU#!cb!}Ag0@*03_FE4B{4x=B~ zLTJei0#F3M;0x&BCG|m-a=E?GP7M7v^RcD?UEE{0c=imqb>OhwPM8d$z)IgoZwb>D6R+#;nLY*v~Y-EP6fyv6Irpb(^&tq5)2)N&ka#jE`%s3 z;oAr*%nlIV4)-8JB6KPZfj|?uT_*+zKo z4JlZK)sDtaPx9k1u{drz$SXAwO5V=m!8V^X;3(eS-hr5r^E|d`Z`^kG8xeyrs4W_# zEi5{eI+tpO90P5Eeji&wV^KmY8^D#H8|at@sNKGxfJ$=f-HDilfKz;ueAqn%Z&=(e z-`e+%CmBqR4S|-6;}_%N*|SlbS&WJCiTL?<{e0a2m4A$X_piP`e*HIoPe!xP&EyOmM85 zXr7;)^|#AsGy$5rG#(3@xYkz|WrW#?Ks(hL(E&OXJ9dzrgWM9|f!*@?ayv#H5@%wg0ADuZvr6z9a5`;DI>G1A@n%j5oag z>taq5v_4I|(IM)_$hqISUU@Zy+CW z%7D6n#YxIg)F%yF0{Fnz=<5`ud=;d$2YK!sR}#3=CS7gB2`jJkJ0GA@ChH{5v~ur1 zN&-Ac63apSyXP(kXfz`F-kfPca+{7Hu$8q)n2 zFj6H7p9il(Pv#@+!qnli$z8_7CMaI=gH+!mY~^K0F{msPagIQga>}W1kLl@IKcL|u z4i7GtMC<$h@`Lf(SKk|V-Fs8aPp!wrOVcqvF|U3)=_3!9rIUPnnFph6Qfk-Wwx~%u zRbpw_40Z9+gz8w69(TkYx4s}=ao>IM(8G_#qmMr2(>89n>I(02ekROO+s0mPwb z1&8E~e92aoPe?rOFQl2SN7n&}kU$B99?q7MMoYa@W0>-_O zMTRN@)YT0STByh9if%gt_eFs-w;dVXCUx)<4pn`fJ}ViP?+@Ws4{}3tkf2HjTzl|I81>n_qrIoE3uS zPA$glY&|BXC!@2yBl>!}HIK>d>igR^jl|fbT7z_rJ^UU{6{NLy8b7lrh>fXO+&UZRgS65e8S6BDx(?@k4 z8XAh@C(ntlideg*J!&*QaY_%H)!6vMgoA~>g;E~TsyNk~I+aPDz2K*I!@fcG+?3cn zYAZhwWV57jv{Y!E9O{luYLB*6tufYfF6K2p*Ro}Kb|xw@&MmJv^sEqAK;+jyUgX5( zB@13KGO~5|i9n(U9bj8hJa=Eg?g0-(OCK)sFj~qVy_PRh@=)21yy9sxa?z>k7ciZ2 z$zPa>(urd9obHOjQ$6wUSH2KseKWCj-@dr>Gd~j>-~VpsnVg%F+_K|Y(+joD!tC7A zf(vEVH8u;b#Bch~6(@Ym_2}?$yrS{_%+cevJ+PA|B^yUiorv#0_-Op#x##>Ky`;1z z)~s3+8`f`(TW-8L-g(FEannsV#lfqujK+p$ZyX&O9Z~*~7@v@g75I*Hju7p%FEHp_CSQyJ=yE>SMZZoqbn2IE4sd@JF8q~}IcyPc zd=(yO;1e1J@nBMYhcN~lZtM0farM>LMq68JG&XW%Tv3zgGM@}lC4B6He)+}cKoJR&O zeN`014?eZ9x}zg@@82CSKlxlNh&P)wVO^YEl$=ki&qA~8HOrFjlA)^R1`qi&qm#PN zdO`>v*l5ZkhHWj;B#-!^yJUrlCv_y=pG6R#m_V{=MO;%kz_}+`7O)`*A!taWZ7Jsn#u<-(Ubw|o@mP2&6xOAeT6?Qu2DXmN@=-Ecv2kt z&Dn*|7ENp;&e@`@0HbR-c(-`r}_uu!$1A8=-j(I zW+sa|~!nT|fe_6|(MAN{vKh~A-!xaFoB<6W9}eEKt= ziFNB%DQzx#dyCP1z7YNWGtoOZVH%5)dmdg5NQaG2jz>*ho%&;QbarxD-J(xJVe@!R zby_fU?bQF+$dB&vn`Y9L=nwkYC=bADsx)SF#-*AEJ^%cRapd^PsH<-_U&l3uu=#G= zwyn|8+!Du5AB$Zs4FNOEyZv>I=}B>XuXFq;yBG zfEWADbxkghM~0K!yM*P83WF=95u^a!mgvhaT*gq_htyWP ze_8zI@BL0(amAiEJvtYshen08AznIiA_n>fqjS~jxa!*dv9GgMtSyPrff2Q_kjr-M z@9&NAvGF){^iyO#sFX4JzDik=A4#Cpy^3fE_rYI`dhGTGe@q)4eEK zHYT*QUh0j7l1HAkX&CS&ROYWmgYuLIyUI_8C6?>W%V^T^4=^* zP*>6#DQH<#!K~pzA^DI)+gOYPz>q>4h>rX+P0}BzN^j6IMji!a`4xg+rZvxl?T9TV z7_TX2f}3;~cX>=ynK${k=>-j6xF24%%RCLa86hu#a-&fiK>q>eQvn zRWRMUWippi_yse9kmO%N<=>p{AG#;D+nK%`PnM2^z@rpQcf3wln^PWOK2+OuTngpEG%V{K#BA*RQpAr(wJi`QQ5qJz#N3n zzcFtqs?YKQ5&b%?`Bx|xqZ8Fuy;7A_XL(c&a~r>CW>Xt-IH#bWWhxz>P~Bzzr3=R` zwLHK2Q$k`Haq7hi*p|sN$dT-Lip`M{M_%z|$*EJ4-fP9ik{n>jWnvz6)E)dw&Y6-|Y4F1Ec4XGC3p8bfbY%=y z7L*WUBl_2>DiUy)E79>j8hQl7i{9^Gr5HcUE||1rE7d-M02PK^_j^S?L8kO zecjRDeKO9Ud?UI~z8+nt-qihQoOtt49DU=ZINxSfVwyB9xiOG!mw0e0NYk-C5 zY%h;Xx2=w*+LGuWnvK(Ex?*T-I%;a0qOP$?)vfZ0A&3tzW?(NA`*@CAbJq~A{0+hd zo5fR}b)hb#*y9t#$Sv_f`4WZHQt8M$X^OObXK-9;$Pmudr4*OpQCV5c(w%-mpS3-r z{()Oy!<_14Xyu_^e)tdBuD9v{kHil@+^7EvCAJ$IWYgLxyvgdFmCN0AmxG@4tUk48 zneu|GtfcgO*>%^f|E%z@0}#W?jF%8El0o90;geIP{0n1AN9ZY#0(k2`84Eae@#c<) zv2YG1Vn}jX08m^i%ey{d7=O;Mc>eOb!ZgLFimE&}Xu|9A7#LiH@~B+r6%OhG0>6MW@31VZ^4dBInRi)1u3eD80fhWZ@hds9(v&Z z_};g_5ug9uU&WWc@OSZrFZ@+}@$WttfA^)&#~1$Y3-PtDeKo%Lg|GR|<8OcKYw?|L zd?UVd*In`0Ll49w4?Y;rJpE)GdHwY`dF*J63=YS_%ycx?))m6WIlE@TLvsd}8GP*ueuxA;+DM>@xq zlt#7WVBfwyQC(iII^7i)21et-hwqMG{`sGGyU-cABV+h^;9y}UP7I`-xIA&uMbjfI z%MN}LmqjYz?>EY&8$W7$V05QAJC%pav$&MBF3lz(!OD zPUj6$w$#7r$~>eCylai;sTn!_n4QrFzW!6CfNfT~vQaeXC5m z6OD@uEVsq7JSAuSoLz2M)ol5~EyJ33a!WX9bT>SH5>i+_IhD!coYR#E2>INI2G)N{ zOZc+J%UmwM=~{Ns;0sukfyyB_aGrFi>?J+UZ$>jN`SbW?7|Kt1(q$QxTPn-2O0iDO zapZj~mAr&XzB$VHvTkihl-Ja0Os$GTFTWE1^k+XB)5D{b7CR1H6Wg|K zia-2MzZs!cMZILQzPdbWWN6n^g>-&} zziT=>S(K68;I{}__~gx)hwi&CzVziU#iNfr6eA-8nlO|`Gv6xL#R6@6dplG|e{Y{R zp!N6n#Dom?8BOTtG-fg;6lW%-v)LFX17GzkPEEvYVcKsL9Y6kNy!8BY@yJ8>Y7+6f z`cOox`qZkn_GoFY^N9ru>L+!w4SW>)6h{G;#rP=4zO&Ds2lbVrgSW+G_?OmcasaSE zPro6%WNy|AdDXSN-CP=zg@TXldhN)mn4hVP-88Arn8ww?^!6n7w-;k+ zQT_0iG!L2B>{lu|wf#iD(;qwkHJW<^fQpEkx$!6toQli0uJ?%rg~49Ua+D@mnFL-` zw!P&hS42M0$e)RUe(*i9|FfS+xI`om zyurspI&XSSO-^aTI3eDZr|;roCr!_20;v8lKNCG&UGd=e?u(J(5kG+H8yvDt_0S_v z##itDZuCpemOM$Y;`{C2x3ozF@^1NGq>cHqLs;|+Ig7B*n;oo#_C zo;FvVyjV*AWPHFDVe<atvjr`!t#RP8{jqodWqzxS@B1@;j8BgG zl%tX2c$_|ZDxMIpt3~gn2QH8L#`-9!tdj)H$C{1~Z&W*a>_ohI&xLHZPnIufE5kE_C3Q#z<~Xr%yp32=Rf7wkJt<>uYEt zLu#{p(T6O~D`bg=9EYP)IKM;#`oVARxZDI+f&wuv;F4SR$q_IOf^i;|FS_#a!u{KI z08Pqiya`zU@IS$t0P=YyKJgoT{Mmsg!<9Rgn&qWJq@^p3oxn@qkO#phUcZjd?tnQi zB`69W+T+V}!z{$xE}$WdouZKb)V}glU!2ER`Ye9_SPV)@rU@4KtqP^tzE?h{D~*R9 ze(M#vlw6OhZ}u)u#ixJcx1yw`JVwXH1v~3GjFe*x4h*S!rP0yW>0_OF6XehT`mf@F z?>`=Q-1)Bf%&-1h+<41PQQuG>bLwZq!-W`>&YEGPvGVcu0!6b>%<+HY8jA*H`|(*5 z9_Fv>T&MB2EG9;VqgG=hpAV@(|IG9KaQc(*s2}+o)u?sp7!b~B?oYq1(m20%&3aY1 zA|8AA$rvw;#oG1jV#CIbQKPvT3+MYU-K%kIAYMOl#D>MW)7??iP#HJhcx#kb*INGA zxK>kB9YyK#y2_e(`iV!Q`_zTlw|iIAlvZi%W#cLQLsF9LvoR`(kOe)pQEC2Jz`7_+b8m9H{#x5m zE?ZwO8?1yE)S~ws4|^j<2W6vb9Ax~v|9f}GfBwV&Cx(Y)uVIOBqEW?SG_}Z{EUWSu z-nn*N{P@Q{8lU>vPshfMTWq6`PE5r3#FSseU>wH2D=$x*78o{Ja$R~TfbPm|1lKD4 zqHj}Mzyqb_a^WasF^^AIg7Zx-W1Pd19N|a*Aj3G;^sw{T5QN>rqNin8$ibD~5?_dh zhNjrEd7JeYFMpitI;(v1vK6Z0Y}WWgbP_=p2*P4#@^JobXP6>q6L16 zlQ%79b1^qfYs-u=$Qsgx{9+^e!634vdgUVG;f68^$bl1EkM$0}+@!QcPW+=>(MNe+ zTqlF=Jo$B5Cn(M`39QReW`U*mk=^4FNwCtMEa`JAph3{c&2znVyC zqRe>V1@+J!Suk$h$>R1RE|ujwI21rsxL;JBffQyp2)N*<5h)sYIBFmRCcn}WPNj)Y z*i|W+2F}ueF@BYk)fE`8-J9c!5WD(EQxA(&r?<0>(yKl-?K{j+w$#4E6QOwi=XwfMld_ zs$=5EN=I~B*M@K0fk|Vd(Hpt0gfN3iF7Yo5WX|c--FbclO_Ik%OTq`7Yiz&DUdpSR z?oL%a`X?#a1cLL!eZZBOE5YXc7o5=mz_TEYe{4b#oS)Arkrzdw*EWS@Bz;yzoGi!A zPY*1i&2N4)Ch+{v8xqi$8W(uz&zQ+Npf2Q_m*sfjm^nUmD$&@)W@R*7RSo)|mo_J3 zYJ4P)yz*i^`}kvV`0*#>=wpw?@fV+uQ-@xPGsj+!t`l#@xnr+IU-yX^?l}{ECti;e zPdy&phhB&n=#FTrkKhDojj4^D8=_(3I?cOEV`7$b3qo_AX%#LiB{Hm6vOYh}XR$?N zLtT~b8OggUBAW~e@}8(LXd9wHj|L>gwGD@PoBP z=0Y?JFMCX@Eyj4^qrAdvgE6BqTRL}aB;wrJG2NvHMhn5`xED3=o0}bs3(x)_P7QR$ z`#$*gm>BMhGUj(4dUosP4}C!OYDM0KY+)H>B%J(l$(LMT*W0Crw;@RVwd z=@Jb~{1Vtt|vn*uYf9(w6l!_A2B4v;PCg=$>@K@lLh{p z>!MBR9{5~_aA9Zw3!O5ydGODqVo`Q2F!+^4S@VGnzv48gCnlAeX)K=dpc|p&s1%Od z=eXJuDYPizt=#6g<9$CRzm&&p!HnlkBnDjp>w*`F*u)2jgpB`Bogb z;yUBRBrZ;k`3L1Sx~wcYUUJRaHPP9*Ha2Zs9|!hc8dqzwwtLIA z*tuLvwrt#>iOA|`sMkbTd>)f>8y_2uGpA0(;a6XdC!TmT9)IGI_|84w_L0s^ z^rptAVy?({{Ac_y$Ag7f>2F8V7VOT_WRv56=|ntiLSEnxZ=KSS2wPqSkIc|%8Q{!Q z@tTJ;EQrl$(#NxkZQD1*_|#my_Qq>*!M^$V$LMii67D%S=v35r*UgyiTynHOit$YlA4-No@q=2Krx%A9a2 z3%87hFjta0@}-M@O@7^-L_fjh{V5@!ehF8ehdBIBmP_!C*B@OZ;Zp>D;`QU5?9QvU zF(g@qPx>m64@VQNDGva;gTr{T2=eA>z?#vDpiJ^PF2iqnGrtN?6DPvpNb#7~rq87_ z@Q`11avlaYn?}75X2i z1t2@TZNP#RYFb=Tjd355F7 z*reo`W3THp$B^-$dO~S?Q*Bx_kdaX*<7Lmzt?^f%yGsQY;)*LS)0nX6MI(-hMECOs zEA54@%^J;anb(k&HAMufJlzKU?p612Xr9Dl?^FmSaBFFKJ|g|l(AWHj6J-)UR^Ik zkcS%5wM7|+Cr_M>=bwBg{`o)sb!DuNHM{o5uFI~8+pfJl{^U=7BQ`ac#em3r^VC>O z$WX47abGDN+1}FNWBhA1;jNLJvH^g%U#Q{23=ijJY>WQU{(-oli82pChkJ(OrRScG zZ++t*;-0(jj&o76EEB+309%l2N8!FPR@69ExY3dodn) z=%IN02am;Y|Cq)sjhW(0S*0um(N|Sl>BZLC>Kcz7g(>wN@o{#hRAa-UCh;8CUKs^8 z&8eOE6E}UFJiJ((nU28$j#eLt=B9Qp_RVP0${U!xp^2`bjTxIP1&kBuFLlZE=BC=W zjNYI+s9n7gPh6?LY#CRW1eNni+XBN@an#(i4XG01c;r~v^Mf?Q=7smSz%zj zJC?BMz{VQv?umg5y)k;`OnmEq{%P#J?11EKf872be@FR*U(+@gGWnzv51(twHEDz& z!pp{~`sOBKQ~L}IdE&+c`>x~1<8_VYEWAvNPkOTjNA`d9u5ZRmhYxwda8+|>G&iq` z-P?D^Z8zNzcieGDTykJvtXsRzG)_!S#N&0k=!&s4NO8PMF+5<7Nmi;e5nD}F-#U`#kmqNSlu$QS)x<6}pUN-j_P z^n;d$TIJ(J1@&LaCf@=-dHO`W^3p4@aqAXuTq%*RXl|(2ST+;q&-X;<+O?uVV~}`N zUR@RIWD~A$>xjqhe$aI8-mx{tMyFy_6MGiC;60}^F)?asYL<)>Bi_Mh$>O3W>bBR} zI453}s!oifNCKMy(y_P7p-7L>s;l*&A+kS3=JMuvWM1KAIphvbnkvENei8^7hTNos z-#A2;+QnFj*B||rf1^v$zU)j~lK*7CDnS>?F2!^}tD-4XJYkdQQhs1FKjoqqM24~9 zOo0HrsH?j{-QSj->y$g=%i!de(oAD=c^tn$Qmz-2h&PcW(DxJF7dhZE??sNPatDRz zTS-&$XN#BSPb$}GinX4>pTQ-U-9@*@QprTJ`Q^Z|qCU7ZXR|N6Q=jAqFPG<_=rTHw z9X453?=0+&Y50XGNVfUDO3(OMlr-1IJ3jc4;FU1u+`NTaDIHO*vF2R&>8P)5RR3R8 zpQ_gUrz`&5fBd7k=Gv>{_kaJ7V%MIXF*2bsYGNiv#^$1TXfS4_PbwK}#C+zk%4+?C zF6Zt4B4gXcq)$)VylK7qc|%M}U-4Nrj_YI$M=xQ`vtbp@pH4dAUY(jxx&|Fkm{osL zLs(wJom;qB$e}%zr7G2!k?{b-=b)A;l zBadBbYvd7O%^W>5%U~EUn9G}N%9Puf+@8dOQ~fWY&s@T{6y?4u_#8-g(~ZrQ$;L`& z(y}Q0Na&JuW&I#sDK7EA&632}p^IID0U*(qpi)7k<*|4y_xjJXfEe6vaVzBq z4x0c!!2G7L@#b*u_f!^uET7`?hc>v>x0WxLC;3Y7gqM?@OY{0X`%)S%yXSn#9djq( zs4exN*+o^#41F1#vuE_cN;lr!qLIcA-l7Wy_u(?1}N2`S`hC z`E-2Z7d{>%#fURQtx{iVZtl=nygoksq4&qX_!qLXH*K;` zIeX?@j7iswPmXDfE=V7!?o?0}pg-Y~%|U%7Wr|OCe|H=g!8)jnaRut+K5ay@{&1u~E9I zI(mA0W%EqSPD0<5#DV>nM|a=3c=GY*;?li))Rkq2p(lke9WTto3^rHtFey1|&d3B` z=UFd=zv4#;_OI&3A2>WliLu177KI&6+hA+KDO#sOczBa|Zel!6y!xU~#AIGmS5qVX z+2B)K_>4-aY^aLrTDK!I2-lKqr!vKHu~v7bVGA)PpjX+JJUz#{SY>S5-03--H;^%= zi6q8>S>a-V9unDKiY~HW1kT9MR|03$nPznr=1L^Pj1OuvH7W8unfVKXD+*>tFdTp@ z#qWNDka>(zoK>unm|keAVP5(N{z(bZZ_6bc^u8e?#gC~G4inFcApK+!u<;M>Mb0Fe!8n9`Vo`sX$0WIW#o}9ky5}b=#a#_4+ zPdI=lUT{itzt7T>TY(2Z<&oF-dW88B6CBk!6TzSbZ$%E$oqvJ8hgMLw2!+Nsl zoEjaq+@q(ELq0{}m)7XVqRH!#qB&_U^<&AkPrrZ%+*MsZJ41L5l;%(7%!lN1`!#Qa zB`EZY56w0_w35~mpkHku2G?}hvz4Lm`i>2 z52PPxE1oNzNLVHa$({58I*WA+Mf&p=-UFpS)_+*v;9)---k_FiTI2on=uk|Ljl{%o zzlMzY80|S9=TE#DZ$A5Myz%t&arC8E;>6+CV)TN>df{tpu8EHIZLxZNtFE?Kzo{iU z@vp6qjq4g?{krB@y}Hq#Y;0<;i{?(HuUi#$nvYd9)<#WhqwR7&%~DifWNtJwHyP8# zF~L-d&iN?LPD&iuq%+G?Gu2BYq2kM{7NfkjENZJ}V#oICIJj?(#&FquXA5!Ucvp;$ z7X0#XO?`{UOL)smPr$$>239tf$xMt%*+^BDeD+fTp)Zcj#^8mq815a8$)QmhLwufF z<(2cIuKIm>A!=G`<6}Q@N4!t=WPQ~_j0~NRwg%Y>4H`RU#;nf}171S3Ew9E-?W-0P z-`%GrH+dX4S|iCbu<4>rJ@-wq=~sDqCIcGsW5)rK{3)Ld4@(PqRCD2>y(K}cUuCes z;ZJMO!l!$>(!*=7c>%9FgNHcVE19g4pF7WN=_1zq)@fNEAkDIv#wGvhj`NG;_>GG= zV0j*%<|ji?fm{8LV)W##9~uDyEQXUj^<))}9E zFhaePk-|`R2&9?AjnZXE33D=N$doG*jFF6)il;ydCmqMP@-SL3=x7Ze9i*DUSSA8E z7A0-iz$_}WRF2BIsVD0j*KXD8tB7M&l(f10oEgcf?z`{7_#glCkK;>U`9i$#@>4N9IP7DOI$B!d@=Gs`eFyf&&RsiV z|NaB9YuE1BeaW7!QA~P7@>A5}qJ4 zo^q%SrLILcyyLAq9)8g_e8Yn#nPWn#I@9jlIYJ#dDoTi0eD?yKl=5KTP~3Xc4e<|O zyUX91?z=Fo0b)FEz2#;%osX&EsA#nb3*18N@-1yD&z+Nw59^7~nwANHWk?AU)69{J z7NuQ9F9SDWXVwq$lVL&ze#jw=S>Dudxzh)QnS$(+vR#tHq)M_&gGZJx!DaX?w~Ej2 zcE~1iz>t^wa@rCYY5W7Ca#3RVL2ZA=ACpIf`Y8)W$bjo=AmZ?ISKejhOV0c-E$~bi z^~UOOmEmcjN>wxZzxyRX)>#xwe|uAZIrcxdBv|AbcHhu6P=+l zlAnAj>i<8NS|i=lrK;-2*pVYziO_}mN<(6D^k?WxWvh*Je=EIt_+nY2#`(2OLtf@e z*gXHmcoTfu@wgJt|GQhpk)+;7zPhX=!>Lc|QXHTO9=XJ+Ji{qHLt}YGxd@KwVge^l z_*qPF+a#RH*kZw5JQiHq70DPmr(});W)lkEu3CuBHO-n(Rzz{SG#>l@qw%kQag{e44Rv+V+S1_N_Vsm@QC}~)Qb}{X zK|MC1qFACT@mv2q#AVX}>z@yN`=0px=l?dIeDaYfP7cPJw$@m;W=&L8mB-rEt7&YX zO7qxbkH4a(BBp1 zq3GE7h#!K&+i{JL$PmYLO-{&|o-W4F;9$IQ=nWr_{^Ik`#VU69ki`U~5s)O3HvVx;KHR*2>9xF}ge2IoR$1&U_FJg3!4_xcqxVc#l$;re zZ~W>1h{D)ZtlqgLKKVC)rm`tCWUtJ5Q5gJwIH!8d7Dc#hFOL1^lTPymjrG_^s`s0( zycEwp{fzYMOf=Rv#ne(&8sZaLjwaE7d76rv}rP2tJ$dN(m2J;sE+-M&lTw+0fD?`>r}>CZ|<*+3%vu4-|yg4*k!ft@_a% zeL?-ra}rMJDv>?LT*jb6CP%xi)SswHzvTqu1>9% zUy*OgCA82;nR8#&-IGkx@;Go{zwj!V^}qN)8Z>$SfD_HCFI6Tb{U-XT4b?Il;v^H; z6HZB>KhoLu%mHjOfgrmm+xLqRB>r*dCtcJkeJYAROqhJ)y=5`MJ4!C`6cV9LX~c^c zMnw=hz(b#Qq-0ujctKjQrpPGq<@J-_nF$jwN1lW=g>iYBTwagdEeF+hrHp^sB9_sn zzG8b-fI00(=Zo@Hf9tw3p8!yz@uPckII254;+A*6ho?Lmj|wp@S!PkKLV99wK=TR7 za&uFg<@8Vf`v1f;Pd**L^Y4B??!4__3{6K2vN)!GGb6rDFeg>H&{e@_a#&204t1YU zem*HMGBWNB0DSkEPjYm$H;Jw^=jC++>_C(|eVpnsx5no598(u{LO**Gfa)|iTPECd z(Ny0MXV3S<8?PUYZCkg*nzbF#SW^?t^_qi8&s=xa)$#nZ&qj}QHb-N&x2}rI4_p}y z&CME%nb)a3)kk^T?bwl%@x+sl#s2NPqh9)m4NHs!=ArmaAI1j3{v;xAQ#X<3oNg-# z8h2QqT(bKN?@l89>%UBndoa)XP zj;%56lYTV5sC#gHYxm?>968+`*T4PR_`Uz~?~UPH-$=|97UE?0`S{Woz7_`#Y>7{Q z@Q$deT#SigMAzwl^`m34ZgqQX+psDw^bf|+h5q=X|MW-k`1kLHQsZG^xVo-78q~)c zr6Vree`);ozxwUC@#b4>6HZQHyN;=ztY^@DvA)ys&mQ+gWO|sZx-i~LH&I9G<*?e( zwwhwi9~w~YZJEg0&AgFwT=M=%=k+`TP%wG1bC?$^UTL-&)h@2OG|xh+}P=py-uGw6+P$ALyYb{(b&=)mmj<$hGg3g4-E^~Oq}ky z5aUBbvg1d+aq8f~gPL>H__%*gBdC$>f?OOwaXdC`+7#>BYNLN>Bn}+BIv#lZ2l1S2 z{9A9nF)GVyVvhNpzjP*hO%xi$0YL+Ma7r%NbCjqD) zaK#${ly_ljB2FBBB}N7ZqDu3iy6Sq()2qEE#mfTNQDwUOp_pY{EEIa%a>zeN79-mn z1I}8SY|jeKKRDjFAUn9Av1Yw=cS$8WU2fmF*+@o)|`+{0MoOjF(bHH#Ull3%Bb{_#L%b+mBrc|V-38M z2>T&@>TkT91)t|NFM$s6R+U4Zkr!%6p0q(naRmI69;V=sGs>m6t4D}tl`U|}_lsfp zbwQjTA)0W)PC0STNf=)6jc-9a(r2tVN1sc3@KtAx#%Z8)xdE+JU$Ok&d zI4K|5XU;7g0$|(*&%&I_lkG?Ca5Orp+Q<$V zg8_7TjP*-U)P+2WPU{Wy0{jqCwuZn1@{*4++%iphy7LF-#4Do7LwWqBI6rUDJ;hPB zDYr}ffClQQa>-+Y$x9n#Z3T=lgNyR=_M-h#8s$Qi_)KS0nm>tVqSb!7u&R+OE+nv0 zp4q~Q{i8N6$j0IYo0j^Txb^Cd@u_!R6Yt->DfYHEMys&bD<9-hp7pBA;m_;O zSsc8aS0AX-osB(E%#uo@`eVCfd%JYbodtvqLk%>^63MP6`t&F?r=~1N)EoJ^qj|TTRV6O zYzqBE0rgqs@o8rK}4RQ6s zZE^6jEphENJL2lgw#U9pHpbpv>*A7aov~xvrr5AyO*AysMV0z>WkZ$6xUq@R7~qrB zBYiQh`|SLLXw^7Y9QDKhxy9L-oE?wCLLnxlCs@nmc|w#-MO#BjTz{Z7-gZ+*@Fhdc zl2;FR$3XwMY_;~NZ)j0HWye%mYnfks?v&5^pn#?eGf|ZCFRQAtZB$t$o=ug+NMBLc zNR0OlYOs~DIIA(7Z{*Kt?pMNT08?@BhTZW`KYeXnx~3_vxMY76#(U%Z=@Ze>(IPzq z%z{5XS*mf8)^XdbzO)r>lSD5y>hiOp6z{Q^fM9txjNOBLV5<;gJ4Ql;-Dum`fAkf> z5ik2EZ5E-CB^lT`tjAbhz$Xtmz(amSgK~w>8vw{%fgk*N8h66!@|qg3x)S-KjfzoG z+4`!b?vh0bF6t&hzyjn`mm}!oDbZ_2%Ee`S&SMe0LZ26fC*^|{I)m>}iWx#c{f^C{ zi8j%sdSW+%hvCwtq)e`tvdX2g$g3o}V;XX@pzEd!S`x1W3C-|Bc~TDqhsxnD$C?9p z{hSkIq6d3g`OcT!aNWkw3emcx=;cI+nITjlNCV7re9{g|&6HGX2&JX6LAlb13pHOCv=LLMJDDYyfVHEGE{4-+Tx=h`vrB-MkOe} zV8xMCI$9CjX1F7=IwC0fV1|I}(7UXwoz`=C>qAM~0+OigEPyH{;=lAC516`TxXIPd^qH2K)Uj*G-$&#r|Epv9q<{UxW z*4`Rhw#vZTy(g}{`s%pi(tUB=RaeHgE!$%AhIP@|ULP$|*iH47F(jq<^wU3xd+)zH z?*864WxUNsttNB}1$JwR7wT=b_0`nayVmMzYv~J;4>lFBDS%HNFj=N`BopGZJDI{P za-qQ36%Mc6i7P6irL8`$x$d&~^51{ko%^1>_M`!r}f%+wlj0+yc7Ge4#M?)323erS9x3m?>V$ ze_~=n`l3Fn>+1cc;?qw)8>?2g#Em!J5M$#U>BZX(i!n7NqemF)1jlA2Z$6TnM}DPK zDVx(!*vZA9;sJwnE4g&y#26<4u&^RSt#wsL)YjC+D=)t$`JRt8Ydh6P*Ld-0U}#wS z7CaTk&qFB|Sf-{p$t0pMHyQiXfA72hzIf!J2jdeT|0&U579)dW8dNkkVDO=%X%+cb zy2RT=n|Ww{IAl7_u#4nk+2#Dr^Cx#PTn?j}8*VB1Bk?~KX=Qp3FaTmFUTy~@}K4N*lGV;>Cej}Oxxp?UzbzdIr#Sgs|dz8wVdLY`C&l& zu{t~^ieKo>v}lmp5t(F>rbcqbVxkvqDr@7++5UL`>1X5L{KjtzPjhVEe`OqyF8tI_ ze+GZ-M0f#T{jW|&cXgGSJVJASOo# z{cWv*3m4+`mtT(0f9@~i+_{rcp+2^G!-m+rW_2_+HbjH2qWaPkk3SXX` zkk3gl>(_NgV?%wcU(M+Nb}#!p-_}zUoDnmo=Hcj zuW%v&53|t~?0#*i=lf<=lID58QOR;agUW7fsPkzuwEu$2JahJZ%+Hnj4fMIWlJtOw z@knh}TUG0E$`659*_BHMqYOZ1Rm7`ojuh4K8nc=>?cdN54Z<-kx@GW0xd_rciqX&G znPo=$hCZ&$YA%c*(Pv!9gW{M3mZ|+zAHV(2${uA3FAEMVBonq>rOP&vWh{-c)~ncH zDi8Ta$Fq>-)21Lnb)x;@#i}(eenXhI7a^L3a83i@ zO-tSYELXo=qFM3?AI(qI*$XMy!jb{PtmOp>!sG_d3ebK)lqx@o=tjvUu)1gi z0MeWTSQ>IUFJS4ObT9Jkxrv-14Up1|KV`6Uf5=@PPgyBTx+s?m4798B0HYXSL7CxS zxH(m-Hh691E3OuQ}`i(Uu^)){c5Ta%ej$)&VqI+G)uXi9m| zb0#V;6WqWlEp6ZtUSuxAV-Ro49yqfeINOW;GFN-;WBa4-~QiGl!|@x*kGKy(Cu$OPt7p8mPk)P zHz(>;X?(7=4&|HV=#MGk=5rp5-D9H@0x!{c*OnF|;0N+Q&++~RjZ3N@Ri^r)naXCp zOt58|FBKHe@zBsc7nRb3-5Q%u96zqHur9W3-lqOP8+97jnKw3ymf`*j@!V5SDZM_n zY}phy-*%hoS{ubF^vQzm1s`|b-_sxW-TSTBxqfSOG&XsIK{jAjD&Mz@T*@N{JZ*}zEr&}Pl3d%D(VCT_c<5)Lk9*FOr@O6_Q#rzwwN9ti&I@^ z;=-BJ@dtnKhw;i&k3&e*)XL7SYltn|H_G;Gj7{q|#n1fAkH@Dz^QmZVZHb9f$h=%-uy6cQxC}$RNr=Geqq~(0%?zf^0kor>MseSe%D?iFDw?lqU z`Ih5LeTo%yphx*WDV>v(<{*k^11>{xY)BJC+|)N4z8 z!ykQHAzN?no=c)29W%}xyq1?YD*fpI^lN|rVC=tizmLx^PEUIiKlr4eS>jfXgn?`+&7~1;mH^p zn~AA{biTSZn-n+-pD|}a_8o^+sfl2d>OXIpDCb+s^fmZ}PQ$K;e{6`%cuaj^FdX$5 zAq+g&P4=1M^vlKw%E1gI&K*xqV7ihlgDy{YJah?%xu%FT4kkyKhehZ$F2g9F)vgdx zCaK1wpY#BhaH12KR9*(7woYl>m6?V^_2E~W=QNVZ5-0ysU!w0o2lxQt&*LU^gOBt? zbMgyt1x~MJ372V5nM-)hqx_PC9(lcvnQ_BB77ow#aFh>FmXE%eoXX+PqyQv%=yv#G zm(!J;@R%Or(w!?kprU;ed?JkWoDV5~o+Eeok#Xw?*3r~wc&opZxu)hcrK~>`r=mgn zqe1;^uE>iBGn&Vj#Cvbr8y|byuDE1XbJU2YzNwiQohyrOWPflv1|{Qz{gb+;V@Tn# zQH2NDd^soGJ|F#kiW{DlEmn+yzKQ5lcuIX|XlyE`)DOm|7iAan!cW9id$z;}-gQ%~ zUeg}OPxn}d*NG-xJYfFh6Rf0yW~6`okl*}gok1L+pR1x}YESiGwT<`%QxrA_d{54{ ziQE#qY2(amVTOTr%XyMpO0ZjQujB|Ph>PK9u#}S8*!gH*-Ap1uY?cgPN>&&`osN!F zf`1(4yeo^b{4T{UC)j?WO#B|t@KG@@j+Y%eruaX_HsJ_n=-5Cu% zMb)t=8)8)TnpU0WqN=LQn+PkaBDSn;h?}q48r!$m#`tK&(Idq;a(qbR>0H#xZfk69 zmA#<>jsdwm&EFaKYh-({9)m8LnbA1I`l56X>X&t#!svX=NViPNW|^O$TrpJb!Kd3a z-BXv64v?v`^T77lynUU@C<#6Yv~FE{oH=n!_75j~&Wh(1Ub|(kh|agYqC#w$(`?F< zP9sY4k=j1#h!h44_%v_yrU2b@IHv+5ne;u22WOhI(T|l+VV*bm88Rd|E!{Eyd{-Ll z#NHIBME?BJBIFq_(k6N{yvul?Axk&ii3Vj)PL=U`k^2_9V0nx}wv!qF2HQkGpi#U( zVNW_2V;}if)So`?jSSETM=3W;^CkMo!+1S~Ct+eogQA=G@TMxZMd zY%W$wp>5l^SrerFarF&X$KK2Kd*P@5d`~>{gJL`RLmtYXnws#!Iqf$wHReTTj>DuL(8L3DcC%O4ag<5D7p`{f+#Hu(zCYHk>F@)Y zQ)iEBaxFT?rsLST-nj4H`{I#%z7tK&O>y-V2cxm6LGo7Z&V@4OAt@b%2{Fehvrvv~ z)3K0W9P;2!WEp_BX?GUilB<(4uaS~%X={_ASQp>8>+7BfzIyo8_|OM`%x}D*z~P$;c8%ouP(1#P@5MDY z-4a*q*bw6d=}GEko-@fn(L%0%;8b?M$iXUe1pO+#Pvfm z96C$ziF}T#Pe+a05TgGHh0K1*9YiXDs`4#aC zKmUpN)JLv2+Ft4Ao}QWL?;naj8L>RPu5YaLgtmqSviTVwwO1^R#@aUN`1ab z*l*7?@qoU*AzJvxWm`vV+PE<`Y}gPxx9^Jm`?tkD;kf$hD|}?=4L4pFS6+EAF4Gvc zT@!}Y9c|Ix(JEFhh%Swl(^D}vHfDX!!_n#KIltLmE8~A`Y*gbyAtuL1;>3yL@x&8P z#tY9q6We(^di~mHsjpLCtBvVNz8hBS4TZcl#^!{Y8kv3KJ8w=`GDeBVEL1i%)R^yM znn}!P%o-Z(jlSV=O_-{qb4`2H)mKUnNK2@{7N$6nq09@DJYcikBE5$(%3K4vh~wk^pdMLad}0h#ejcDA!N`#<5Ki=95fu5R z4a=2J<$6b|8}FWDdO{OE^c)rapOQ%p><=)x%_l27S2RoaVwC9*#&%POT?Ym6U#SX5gc ze)_36^5*NZMT)W&nk8G)@$I|sjW68&E#s>Yd|Sub*tvUO{K_x?vL=bQM_b!!+YM8^ zb*p;7*EyBLL>PWkhjJEV<>xaQRUFYu-I?U7ebSMi81`ur<;k=9AMrD)R(pS*!BcrTdZbmy}%l}x;a*ZrC%Qaj8q z^58ywGKtSq)T;k*!U3l$H8nTJh29I%-`i)MF*Z9RTWTgwUAUl0<|+5BO`AHSvav~h zw>lc@>f?_R(jEZ3KtsPHLt`;KIv8~oHU8X%H`>;yU&zke zyL(4`gY1jX~3+6EUOy$5>dc3GATk$I7w_wM(n`pqYs3j!idT#GXr^zJTA{ z(}rKwogd9(u$h_1DB65oN`t$)qvqH&0FUw^1>V4CAq7~xC01Vg1$PH?dB%}}_zVuD za%?bBNMX%eY)>Sey;QD#@G}>1nfL+7=x`%Khh<5@>;y6YXL)l-GA10{6AbhxyDZU@ z#v(a_rYGf1Wb)(ElXE`tnlYPk2s+F`Ir*3BieE{#n-yhTmNC+FCvJgF)%5MO0D{YH zJ?jPe)0@wV5MCaLI8u4c5&2Vo+7lTjEt|LL?gw?$O=Wq=)=w%s#izwF6QW;NUKh#4 z>rRloDJXB|(Kz4=f#3X2eU9?ocJ9~Cn&E<1iB5;54+@MMnnQGJ{?ju)6hHBq&wBdE zV%oUoN9c_DhK9J%*B7hSu8qmjg62@q#`7;7ijREkCpAXxlbxoqw0~0fiL}8~I2+i(hzCE7~&<8vu-=-0O2+( zR5y?$2BLai~|SuMYHO_sXg;kl2Peap6uN7o$q>6 z+tzK{|d&D^(lSNM{bqKKR@>z7qS^Y>m$Lj#wx%M@GI9P3F5Anne+s z-K3V$9s&c>vXDFG=F5(aWl3i9Iak&$nOpM7lVm_1`aE2~ueLEQgeh0{r)9%wx{=qv z7ZhxMpuwhfbPsK@|Eyou-ilKLKUve&-i~Q~o#L6!| zfKRxINqyQ?OOCPHapZ&V$a+e$Et?1OQXS^*NV_}^+?0~$QSzx0PJ+J)#5c-H^raBS zP~sgfuW#0l?$;>>cX8VbLcvr&=+D!@F9!`J=wW08U6bVBZI;7ABYj>e`2xSkOUYrX zO*ZbRZ*bC2xoozP2^qQ-b>GzvwEDBg$a>kb)%DfUP}dw+ zU9mrY=~sR=uDbFnrI*X@85f=*ZzRH=1}Oo;^SrMR1@@fxk^(RHMRB8b^4sko);l&+L>=$ z5`XiByQELY;=rZ*qPD6|JmwQbnJtIBWOYXP(!39OQ5{7Ow8Oh{?%2t}w|EmTu~$@i zIBvd{ixwX9R*H9g8fAWRJWd`y97FvVqFQN^Us9e^PQYUR;df z>4g}Xo{7N;`KRV$cv5qd(dihJE+3jK#+Y>e@OVM@VvH(%RP8l9fqypo1$VB0A}$O~ zN6%n?OlU0RG&7$#T3ezzvFSnlkgDZiD3Orl!PSs`CmVZl0XrKV&iI5tA*Z^-L*=FJ z9{8P+bfimtRbKikW1`d4F37hXnUvSyPE5&J&;+9}_CEcFkSA2rY(lfv7q!48R<2Yg{E31e7{N;Il2cCA< z{6l|0AQK5+aycvryh(sK|Kcd0bRoAuDXa^l9!hb`ZyPI@qw;dIek^~&uOyGd>KjXV ziA?TdKYfEc+2jG9c7|@#p5}_VJx)32XVx*#1@jNYF9 z=-jwDS~@DCIFtB-WOH?>Ph6uuxntMD{+YQ)L;q&Yp&(|A`D#ah);NOS^3A@%624A3TOJzg~}o9 z^5(F4Su1}5u+1bG>QJJ7m*k8142HC~km7X)m6NVa&LuPHIScv#8B$tOkd8|Zg7{78 zkOq$2E#o6E_JfEq8N{u0sho5H03zy}?rEb<$^%ZLkjw8&&SmH2XLK@eME*UV}9J@cVhg=;w_m10{kgU99dCCYrDY|)V(#7Y;be{-4&<2FFPlZU*b zF`e+?mQy;yT^ zoT;xYi(OmR#tk>@j}N@>miXAmu89x4cTc?Ink{kju2pe~`sUiYh*piC*j3fK*i_6$ zLQdOY*2&2m#Opt*^h;c$>PC9AhUK*lrLm#CJTBeT6bCP9i)#*c#-*3+jOJR6*QL`j zIx(oZ(_l(6mn)|e9 zoR!UkJ*IN!<|dVwlR(RDO8JwG>bsoMi=A9nT^7~e9KrGbb1^kE72~}_Q8HOjrFc%q zdKOPHG#;wL4O>=6$A%8}wZ4S? zj`X}MlVi0VZRlsPFv(GBYx+voU)=5JU!v2`h?z9%*D}HdB48yqJ{>@Lyir~ko0PCa z{pNo*pJJ@H-6vejcWgt&i!tPLJbumfl1%Di{n2fi!n!TN<}yqgU4_b_69GfV@e(dC zbh}GIE;=*6+@}R4KR5vbow6^%8fh8NW#A!vV3dIlXx0TCqL;eB502g!WSE^geZ;vk3+_WU!oTBrQd_hPS#7EZ`=4*VUTyHWpaKkOI^NO9hwK;wbfWkQsgAfL;DC<%=pz$xujv#KxcleKajvBBK1pN z3npbssErRA*)Sl6sxcWL^MylYRyNThgPp~GwU#D&jWIE~5I^ygpNXo<4Kf^L0T`DW zmPR6v@T7*PO#KK)QpBn3lp@Ch9%12i6fAQl{uRjKY^=$j-)U%)b6QnyxeR#d^X3P& zL0?~Q49dVRE0n~mC*O#F|F8dD{QkfF&G@Ik{<{h1LLughQ)!`EdEptn;ycJAqa$&y z>vW8c498I4NPOoT-;D2k`>uHMiAQ{#+Um8Parxf8aoa6d$JN(g6+18A8ynVa@aF)Y zeCnyV_j?b-8>dc1|IldEHPptgT{~m{fdjFwb0b7c5jOa!hMnG;(9RZhpPKOZ@c4E!pCI5^i_M!h#LYL{5bwD4ZLxXXS{ZIKni{HO zU0aj#O~wzNd@}yw>tBfm;cRZJjiM$ZK9K+!cL&s^Hb@gV$(|IlW#4jxOiLzMP*+6Z2Xj;x;9NpN2S@_qxex8Q7KgnNexdwPp}T_*BRtTWVi^Wj0&cRENDj>GMNMC zuy1uKp8iV+nsYxc`GIkNSNRD(zmt|O`&FEkr|}S35I)i5ie(0vO&9k&>4WIED1K(HHinh zOMj8wZ?%fP&YtUzV`omstvBB2h1sFO5y?Yo)Ku4c(Sipv?Cxa|mWNC@ z7GfnNmO1YR6HHbM%O`b6-&`t>3Qb(#7YkDJ3)L|`J`ppTc%MGoWukAq@ftrj8X2C7 znIdmfFd$JkHfhcH;UCBBOsJnU)YZl*`tE7h_|SXa<_SJ>%=ei24z=~LXv^Di zrCW+y;#KAx1>K2HD=#P7C3wd%ju_rzkijY4i+3yVF6CK~S2+_MO0}+q*2J@AIW(YQ zlFL2dQCf*~bMK=Zh~oZKbZwIePq5OeoKn{_?Nm`wx9Prle~(uUQ+rcW%}A(Hd>79n$CH z8k@cy_dfhk^z>a&Tyr$kH^koUTl`(|9Xoc!Ew|njcfRkv@t!;09q)MOJL1M0u8(W3 zxh}4_a$j6>$==w#V`r@I?DT`lw$`TTSk)fewrq-nmtPUr-Ed93^BuRvd!^&waogMC znk%o2^&8gtZ7arH*uF3~tNvCVO*M61lxu5liPna?C`?Yo0}tI7Pd)ym$FEgwEgE|& zy%}wMeA1`Gu<2pCz&GB}B62L4%+J{l;`^R0(i=Po@;Co^^Qkx;UESx@7F8N+I@Ca! zevoYWxMZ~vc`NzsOlE`d7C#NlTc!=7b9#6%);HC~W{r`2Pi=wCb;8ZA1b@?wu}KY| zjDBo0WEg{4LP7_WY#(E&UW?NOpLR4c&=)5Uy&A&<=f%_6n3*e${^6nc{1?9-j~sqplf$Z5 z)zlH|*Kdk_d-lXHeD)XO%7X`DRPsJ7Ib_oow(i{AoW^#(&0i_IrN(wdx%vmYo2xjQ zRrW&e(gLum><*V~U2&2o_o5^Z``K{A;xu-H-`q!8%cXt@uh`I`iK4i&;G_8z595S~ zvXLStoe*Raa ztV8pz3C$(Oqq;`ppZ?0rH7_g07MzLC{mtL`P02ex@WEKWd2NgrBPOP$0W|;MGXZQy z=0PO$I2KmeD1)wH5d|GJt3JrLvDsWyT3YG*u*#aBort!!R_F0IZ#6d2*S)b^<#D=< zWlmM%O;Fm;3l=QiS$|Y}(<$FPAJbfMaBwhMH1=-UyfGT<>!fF=JU;UV=YYoMyERU? z$d25)d8_73@AN0;*x=2awRDz}7NWQ7e0<}suf~own_@%TYJWzJ)AdBM)SrkFFPT5N zS@ITf8{=8`vdup)cNxZ6yQFfF&JdMaPuGKkZ4Gvk~0B zrLumMQ(7vI0w~O}OO7P+=Qxs%a<+@(s8a%V94^n7!>x3zWhA~3;3xSr^grY-F4r;6 z=V;%lrvs@>;Y(#_{VH?DlZP`rDWu=La0E{LOUcCJOXWdl?g+ZoeODVQe@?%qHQd|nT(gxjbTm!ZsZC1+V8hIH9GoCe4P%(22`l-D-KsN~|zg`xP= zuYM|Sc;_v~aips|s$|O@Id?Q(e*Vq)xnKB1v^Q1x;s2QWVo~E8r%rWucSZkTUz|UA zBL2xQ{Yp&9PC*gX%GR!_t&R1Y)<$zvd)$8O+v1=9U%#gQTp?RzBD%V|yve7izK+&m zahQ{bupy~)Vm~$jbx|Ldt-#zZZ#P_ez@OSL@se~EuN)pnNmjg6S>mfQ->CcNUVg$?o(V2z=4J#)2!Dk`fq@2`mp(I#TVYhA(#K0n~Xt2%kEu8Y+YK7mzMR;TeutuI~91F_NZ zLTuW$J~Ty$-qEQTE|kW|^ql0nBu2((VqmfmBa?G6I6V`?lQS_kwHT9yd5tG*WzZPH z23y%=rM~K-sAm+l4Ob( zO4o2?d1cH)wy|4w2CrIt1+(I<6xyzcPGd9u7&2JWd}DxPX`*N>}Agf4GH0Ehi4Z+^L7>W(v? zGQJU$!$^=7)}Q9C@HOS(0v3ntF9nv|at!=UbzSKae&KO_fl~h$3D}6%AsGVn6%f#$ z#@@@y|eVKMa@Blfw)C<)~$N%b?macPr)Xhg~MinVjY>^-MXJk>Z!|=t^@s z;|gTpEXgr{DR+Ld%fG zDCvIN<|g&Wj6EF*7x;ss3&FY7pvOQwH zGJ3kE;-y#4#qks8HU25RwJ}Me3V$Pq!muE@k0#Lq@Py{34G6NqjQh zp-jonkQDG7R`wZVw{1H4QyOze*8fu2Z@>e8(Ir$oRCig4Htsxd&-+z=0)}FJCm%2u z%QdN42HjbYz!GrZRr$cuAJbR~?$kGl_os3wf+XPdmwzhP8z0pFQW>c%uf0OMO7wFY z^##H_{P#0ygJK6J``dY7i~5aEYVq@H@ak@?nae9|LK1D>l!!jeRA3I`Pqr#EL1=}) z;ItiN<$xVw`p%bKf8B=9mX?>TqcSE~M7omMpzMtDFEx~ic0*b*pa2rV2xQuImh!p* zq%=UoLMCY#y8fo7jSk&aNfwn|mg15jjEul678s;7R4)D`G|G}GAi}PXF;UgbSptO) zoa8l~j>%{eQ%I9@L#Yhv3ps$I;7FcV$49vR7M&(o>Fs|SU-43OTQ=^BtFQTh@HNmw zosS8OiBu)12bT-9gI@T8U_+A=b;}euVKTU9rEr4am%601G)c`;5neijl9$^{?I5L0 zXB{5v)8uh7UODzkeD>476ut&;PCA$Ue)l+iIY{ zFYdeN?)a1c@n7Sihwc|xAy&7v#buZ7joa_I-EVlVYHPHDdg00E;_tqESKRmL6Ecj> zYLZkNYdY4%%{Sg0x4z@;aoK@Oqp_hmYU=7@{f70i?~+Sm?b?oL*TjnjC>{pY*3?KD zmU|LL+fXrT%Nz4>teOEt?FCQOH10IeTw7o7ysb@5an1GD#KukQW4bsQhYr6S<6~1X zJuw|`_6){j_k1@loH-kpUw(OXZrSPud!B^yw%x?|xQd(irw6>qC7y8M=4aK>FyfGH z+FI0bA=i8dyRos!8ww6wdLaJ#Z$2;MZX!;fJQ;1PTI2fbZj=GX5yT~aID>f7cI?quQU`l@Ki#-rh%~dWYorGXy3t`K)mFmzm#u*{d{J$?++_`Hv%xZ@*Fw-;(A5h9yk~0uQk0 z*BpOLF+5nJAijq;C!?Q-M!9Zg;*5cX&aSD^BxYnZdNtYT?(4O@U3%G-(Y~rYhKFWU z&YW;)Fp_~r8~B^1$hvUz=4W$5MVvZ*F7Er*T~XWG7Vo?BLv%dp<$1sHj4Iy zJq2U@Sy{g5XNtEx1G9YL(!?1~dC*F63F?IFL#p|!0h`#;-3bwv)U%e{Y zS}UWWwls=k<1sck5G@UjQ73tw9v#&rxFH7n2I8T6?vH=-JHHh#zwo>a?v~inxjJ_3 z-W}U_?u?n~>G=F#|4sb;_r4uxd%9!{&v+MaTT^3f+T0np-g-lPi$8(};M$>9@LP*px66H8P$X8tP)*rggDP zc&@wpU|ez8{@A~Bk2sY+2LLZCWvj4@na|g7ip|*AP>c_c#Gzxytj0utn_rI28k|&sK+P5l-alATG7}TS19ADLHPKWu7t{UyQiIb` zE*X=(Ej-dA2(fGmNrv)DF6JbQ*b3m{rg%S;Lyu!4Va0k-X2znv%fo%b*m8!GCd1FT zF0AQ~#k5S`6ul^2HZQ$txs@JbB8fcG7kI$CSWtV6il;rAB>v#lc<%f6#H!Bq@sVHu zCsDb*(|G1)HG>n@8O;j)O??}~smw~rY?)?M1?l7Bz+k-o{IhYP=Uhx{Le;3r+1U%- z@w@-!kK)X5zj1Z6t&WEJrnutTYvVWm**}XN+qe4YY-|o5w&idBvU!n3Xn2j?;vK$> zVBV18$YOIBi}wXK0Pp}Ch^)UtzwLAJluak;d-(Kk z(V#!NEBr=pb1^Zu9j=_Y-Vn4g@yo+Ow+oYSMNkiq18P6RXKhShAs_Y?i%Qga-TF>{ zmZ`6|&u<$RHGv)%zgckV9vq4j$BuaO%SC4J5Ix53OOw!C)1}RSCweG~b zJ<<0>lTrWqTr9u5z?Y?KmSCCmNfB8&xy$J#zvmX_6)wN$<%@qBi*oq94nO3`uWLCu zW$?HI4X!T{q7NmN0{%@lh00^DgI|}MB5NDlhFLwylkA4&I;Ze*UGn$@!;SO=vqTrS zmDSE@f!O5B@=IT(O+BI{!|n3e7?owAS-{Jj6g#dU`NWDHm+n6`*c(6fna@O7ZAFaB z7VJ86I##V&=bUEBEia zMC0?@z4%oSUD&p4vPOrD4~@jvzV@Zq)VemdZ`|Z@eqME?43%sh>s~;Y&S>-o4#eM_ zmXo}QT&1ZIkQuiHDOnovUBSahm4Vp2+I){PP|n@Ntt?LYT!)H^7Ivc z#=MKs%P9_cNLL!NL3u0bcq>h~2GXE||w6a>upmczcMo9rXSlTDIHesq^N=&8R=%Ql*p z{ZS$t_T}S8WBrcR@q7R454>?_d`A6sY&!bJ$Kw9`ABsyZ-4*Y=c3+Gr|G)N0i`p(cR-KfT`c5c1dNV80yoL7e|l28OOW(qg1w3SNEB?`i5)c(gXYA zx#wPt3E2*^HiTm?Ci!I84}K6gTz6A+u3KXpBApr}`ozB~;hvLC^2Xs4(X?h`Z0J}W z)5Ym{@x|9-E{}i#XJLB2>ZCSvL?#KA6`CSK5si5Ii@{C2pf6X1e;n6 z9gnSEsX4`5%t^$`)E0%A>FDU>L`%*4`zGR~u1Pieq=a~+Fdsu4cP{&MR`e9rXJ+QB zWN&Fs$U2(fC|g9R49O0*q%O4f92>0^CxxGV*O&+Vyxda|FK1$8LVaU+G`e~QH71Q~ zY-3GCb(FmG$@8i-O;DA@W7&iC8e?We6A$D)1~7hzcOFMnSEeb+vQt~SFEhJUf$&~; zaoxO4HXu!ubQta8IkTYIK!9E15ytD*^cPM>Ozr45`qieqs6k#(`K@wYEY9O>t8iCh z!q1&Dz@hxec_P6sw`GtQ;{@38+$lf#>2|4y2nPC;rUbeTQNC=uDh~Sd{B{{F7BJd` zI_nQvP4gCDmmOgq@aHn>RO$gwyhS24j{Clmf-vDps4m8r>J9&sKhd=evs}L7jWfTK zri(f%&TEBcUTR;{$7Q&LBfopol3|dS1OqSGK*>WqiE}J!UNJK{=uhvJb7GQgvu*8- z@r$3nBW`G|j|%0PP|ouM)6qLr7U%m0TJxbkFasGuBIW{ z+FN|`--f`sdv~pgnwpMy;K2v|{c%n;VS zw?wP)la~0C5vPCVNy~U~@SOM0^X$Z@KBVAV?gn_v-Sn{jX+QA_qYqtX>Jzve2gTwi z7I?D^;c$9_wOcAP*?P{il)RjVeqrMs)`CwRW4wV5*Iil%+2c=rtWskWWmHvEMRkql z`Wi20G&d+Iol;)-T8P(QdM#dm{Hf?ac~bqb7?)ns8MoeYFn;l~?~7k}@7}m|*DB2k zG%l)YGooj|NUs2lsD`OLy;% zX3gD%LSt)*PoC`U8Hy7px?^C3&smp7b!|;FG_^#j#;w{0o(qJ=yrSz-CB(2h9~Wpk z>N2abt-P{KxoI=PUd#$aj@uF24qF8Jyctm$JZ~UE zdDFxHTt-wGOYIv60{x z8hyFHdTkN;%kt$8ulp^2G8>ue?{GlNKJnHa2w`OqEl+rRtYz!HV20lxufLxG1Jy5?sJ zae;5gUAPbr6CtG{wOyu8a@e`H^_{d)^;c-*{te*tjt!hQ_4~M`L(s#0$QB|FxhA z7~f0g8@w!L*Vos|IBJVk?H$qC*%_Uy*Tm}8tE0WWJ(}BEy%6TkC?2v{jvOFQ@Qx$Z z`v-?KX&BOAFyUR%*Is!|?Af{7M>f;>*2+j~tgeffpL;EydFBT(r*>>>Y4s$91+#`m zO{7EzJE{HfRU&N#z@?V=X&3U(cy~C9^}MAwBV}H&%vVaeBa1g&b6wnX_dPzb;LYQ2 z#*NqB7)?!$n#eRL|EvZH$%opB9pGT5&*n{@D;xJLEe-F{ZO`{v*%iXP2a%DwsAW zr2&*af?i?+0I-}a0XzdDcTNg$J#t;3dX`(x7CbpX=e+jD8yfuQQi2HEn2S6$Y{9xfrouO>+bDaW8>xxDyGD5l%bztMa~)X;77R( zAeR^Jj=~_oEeU|UZo}l3ytI$yKk4#iot|%?l5?9A2Mx5#N}Lzp<-7Qvl}{T1#NSF7 zFk%292gIf~6Nsep3ED9F%WWO(Htl+M@45cTA0rNk(Vl@GGyxU;o9Q#seCM*0->(T=U)puIr z`wu)AfBm^HN3Zn$whg>-+#H)XZ1mB??|bjN;*&q~GjY>RH)|YO6%(W5(RKE8oH}(P zj=Xs^y3e1F^XGeGSmWg2@OeK7DQY|_@UUNenAN0$cI4Y;@P$RyVHx&A14D84%-J}7 z@>HBV*X8)yy876)b7$<^cS$rgHm9*&?aQKNy~fQJP2xEcx!DGIb(}tXHXeKUk(iw= ziTVbOIaRgld((c1UnPToPWk|Y5Ca&8tnhGWa(Y6HFO6D_CANXeE21zy?TtNE)%D&y z!KV`ZKv{jT(vydDEZ}@m_6hg7nW&Z?S=5-@P*EC}Y*-r=6Js$w(yy6QL6b6#e=0+g z;fqzY5^Rj@LNL(HXgKr|vdM8==s=E{Wpk4TTlv+eOL<>GVfun_y1%keE`4FSLvQ4F z9DPf5<~fdagR;pIRI+4FMbH(B10yj#G#0b{{qf}IKd%W;P3*XAPh9e=pD>ETlysKr z$y@6j{m-$f=t36ND@Dtk_)s!47gNfA=9SlCWbk}U6sI)VlU?xU;rL&F`j?W~xu`F1 ziS-+{#;R58;@x+=J%09QKPfw8Rb1%rQ@^10B3JcGgR{4vA)b zfJxXyICz%SiP}=M8V9WkKGEk*Yl2^xM`oZ2o+sNBok7|7(E*&+<E8E>&BQ}D2s!aU8XT*G7cX)8h!n}v1#L0 z%L9FA?b@~KgCiPWYNQv`PCWRehO<+#y15}x*i zN#s??w-uRe`@26ZkAJk=;zEUGDsA-2%GPBa4c-hf(Kz&y@5)S+W+y2Ou9P+505XeP z>0Fo0(TkVIm6UIZ20QM_k4C_ew7ghHJ&_x5#FRH5|8hL34<>i<*eH4ky4}euKR-(_iX-hU&Yv={Fd#cw zI{lw|(MHPeg$ZDct}JGlV=LTWn1~loydFRE=}$*ZOM|I?^~L9+sjVa0S9SP-I-h6a z(`OId|40mukH@y1yJF{#Em2t?F*z#jQ8K4KgbhpGG$(=w)I|}z`CIK1NTfqW&eTL9 z3hEHlgE%(94Gi`QrY7n&C#$ZmihA|2^jW9$Ac_TK`7+)xUoQ2=>HO1u3(Sp`Hm7oD zX69pXs9)(7e*3?H zgYmV$|5B`NS`)iAZ}tZ6{CGr~&zsZWE$sFLirgd+rvBWO?my9=cP6{#HsX(WU2@sL zrFw!U$3r-aw3kBUQu7Lq^?>F4h)Bv}VjM?)`*Jk-g|5Lvet|;|-2nKv+HHojC#8Wu z(U{dI$GJpzc1QN)UCO_Nhw(tUMR3BCE{f%PE1JCCxgJ=;qdewW2A3V^>znFK-RSvT*e|r1d5eC*ulQsm&EL}8>bdgN^axg>vGdTL2Wgbmx@L`Nor|uMXKb^K zYF=@^`&?YM|7y`b7srksi-wk#XlklcKI!6to|urnJ$S{{w$EHf%^wB3{?S@UUw!JgnIC?_!kAc{;XP0{gdXG(#@RZE}7#x1{H*<^wnwgtL=xXZ6 z2~6f2=`ch*1%h=7RvcxGi9?*qC0#7L{n#rn#gOc$N-?pvx+ZF+Q zRxa6mJdF3EFXd5$=t551?5uA}pL2qXOgoh%+A6C(5Ap_zRSnTSG7~+TJ2_r$KCO&A zB$-pbdC@n^Cj#b$5L*(e-Jq#Ix*H=Zr~$>7Xg+|v@z5N2#_}qSHx;#Iy4R@Myd0r% zMfCGot$|^+SO0Jfa{T(Fkg!pNzXdiOX#7+iz41Zv?h_Mq_XA;->$kDwK@-+4IpV>Y@*5^|IbPZ%c{6VdH_7+x-W%&WSH=F_JL8sXE|2%W=hk@J&DX`2 zjq78R?3hj4w#U|On`7Iyt+8GHy?b`V=1m*p;N_Ra6_;Ng&F#(6+}7egI?z8JFFgNJ zeC4ZmMSE+TKOb6{oL7G@kK%O1>7BL1(@!V-Up9qRRrcF}okm@-GFTk;O=(&;L(b=3dR+OzYzc>|RBmFVh-KTN7 zE2f4D8n-)Q{}q?SM}OjN@smGuTikTT+Ni0D7@A&;qZel5RR4UO?4FFSb2D-7!dRR- zJEAdoHVz%V5KlaPEbf2kP<-#6XX9Jndp7R=-t+Mf-}*t^^S$TdzVAL2-~ZOr@%_7> ziwEz2CLX@$src?cJQ@!@dMKWL=14sG^pSYsm6LJk$a#(H=cA{OF=*PKwHY0skI^ws zz7gH(HdB5Eq&}xQw^ob(_}~Zmaspons-(B7*d#a{`ISbj#CYPf zk=144pg};y_%gj7l~jo(M>;omU9<@wvbIhIl?< zCB}~D_kZa9aqaatMUBdNLyn{K>W6Vs2x9q)N}tX;Ln zi@<|2V)}+gV_t39Sl1Mt8#YF3OKVKe6k=$2P`sA%S(vfBuwctXu%@oo-|TN}Xw|(n znp-;j-4u>nUM;+Jymi{tWOeeuNO565{;ps%_7%IJ{MRU;#gHy`ID0LWWW8aM6sLAE3>YS%e`@0KIa zk&)S$Qa$G9IWoO6sx&DL)#tLyE{i9hdMu8-c}!xk7DD^G1tim~L2JhmR!zpi;mPMxp z@#Vbu-GAMu095Q!nWQK99j1I-BfQUrVDrl{eX!+s{Y`ICra|}YhaZN zJnVWSET3|rFe!}z6)a#+d^NumPGzUOss6ZBo_*2{Cc{(_rcilAN1|PR=yIykX{@ZT zq9ZBT4{(Ww2xt?$Cxz07;}hetZvC2=l)Uf_;y2$o674c{+S}SZA)P4{Z2(lrP(m%i zQ+SA;^1tMUfoD##R+uhWUt)AYv4}KZd21P3%PNzOtCS99Gu2FSMmnNsJ#qBdalaXP z#T5tDuNqXYbVvDu44TO(2!;pzoEYHEDc0L8Vw8A6_MY$Eqd9naG}Si7t+(FfjR8Ce zVh2C1P-^`J?dWoO2xOP}BA@-)-M=&d76<;!YkesPI=}HJSfG=gtkzT<>`yR~Jzxw= z6A6Lx`$iiG-k?s)`j~&5-DRe75}YUNJVHWFoIAe~EFK^eG;}MCIKvZGe)m5?xL=Tl zKh+{VJS4Ak`o1*zu`s6m1brDM!6ldbg)^haV~u&Tl*x6nyh6Z_aMY7FayyA^WDQw0 zr^P&I^EbnlNY}5bf(NJ#qegSDeup(A$3@hDV32f8jfej>Xw2`B^BF zF{VDl!&2TxWR#))Re z5Nswk;grdis+JMX9AI{OI?kRuAOCRI*Q2_oE^fK)dgGXwoQN{%fQ{?gqfo>ak)bBM z3mB8Uc`aK6KM#i)I~wX*V_;x7P9Hm=W-5&qjgd9AssADv)Cc{=;uvpK&(82|X-(Ed z^X$Y(Y}bTkwfghIaDNnshN66KCMt#3#w$ulQozb!iR=QNR@0lr04bUz+wP};p_`P4 z4GDhZ-+tZkGe!d>I*?6os1#2)>Ob8Tudx%|=EWt~7g<(WlFLdlj5PQ-E1ncbWVZ}W z#o5bRYFzkX~am0Ct=jFBZlo;?$wT z(SP=gY`U4KZEB6@UpW+i@V9>%(;hb#rK`8Z`t_ah{tw(4Klk&W7O(64IVI~uy1D6B zpRV9@Ey;GRu-+A|sxI~nogbaxg+;N-@T!X^It#LsS;RD76t6h)&<}m&qip2#r~DfG zytxA0%AYo+st|MzZOk|AT`u;mcuKtV0eUWtdo&4cM5-Yaj~zJcggAH%*L6^}Wd_H<8g^j+xjrxP}ASR2*N z9kI5pAznXrBu=U?KL6a)apwo$AMLBxdgD(^b7M@7j|4l@eL9kOQ=zt8y=#9wde=9j zzqdPfZ`mHDl1WZ%5Rsn5OpK}3=lzEGzSdnK|`s%NX{I`#StrC2gCCKb~;& zm+58L8E{G0`oR}r zXe8`0Eo(cf(h1<5E~Z$#!J3VJa9tcua7iy1B?i7dPfeM&=48 zm~@91DP-`B2QI_$iB3C}XFTx1@syh-8CaL=mEseQtR8j%)0YxV9>0tOPY&yv$>sR6 zG{YjdiqCDp%%$@3{JEn5r9fK0-il)r6YUL*;>>@Nv$jd>(cMhMslCMGgpD`hX-{3~ zFv*jlNVmMf6Yz^J&jY2))aNQ{s$y39}4EjI4j&eMeGkzT5-uZ~r#S9_eP zs4S0>(cyUhnHS<*pTsAWqOPXao4u(RCjl_lN!6!?12W2Cck+P94_uggxe1Z! z#TXqQQy$ro8ixwgBhgS*78^7+^A2d8#?uGyxm*4InOL=|EpER4nmBmnmEzG{OhLos zwDgf=U)ONoh4{ihd_6X|tWlresyY@_mg=Owq}CB@B`352eVd(Bw2#9gMrmntCbFCO zodL)xo(&>-H1`G7H)~dlnA$J7OkXI@V;$}9JY*uAV1z*F!s}@DGd!YGxf6`xeuP6- zj4uU~4xx6)t&u4mEa^^E3L7|GrfUg-okLC&?G`=7a2Ne~6@XfhESlDo!)c_h>?5hq z(7+BQ7+uym#+Au9IGkT$ZaB&$68*P~(*DF}r3<0^OqMq}3AT}?%j9UxNiNGse0KN5 zKzHj#%2K^jsxHx*i<-LmP`Y#ypNQ=lit7 zlT$HLENBcY#vOOQGv4;m+hb&GF@{ElV@`GW!K=^3Bab~BzxTVpE_;OU{x4|!;xmRI zomW2}uq^%YAO3zEe&)$kn8wmp%@Y?ZOJhyzs`%-j`0@D2kAKYlq5Isq=pW<_cox~Q z`%2S#8S7o@pXoC|Y!cK3FGK@8=gsf5DM+%Wcewxhk~SuQ<_?Idy=np>mmvd~ z2AB}9XgfB8F;rsTdM#LWLe#wJl=`R1yz0%ve>N>ZbEO}i6y|iXNmsT!Yjqd;Mx$q7 z#O=jHl{(q_wbWUdSYz=g6D9Lp)O3#es&46l3iQE#bJKFiJc@Ya%%I+CipD^`!HKn{ zkVKZxZvg=uBJ6r)>E0BpyT=;=AmhnAkl*mBFOko8c?98f{*4#j_*(X(ZzwzEV(g{8 zbO(Qi)6TplL|&z(?jxtUg?{OeB>Pgs(qGJjtUT(P2ubzN@mw_ixTY!WvU7w&Mc-d` z%Vp(``X-0Y$-FL~dmcf6yy-Gx$`lZE^5?wp#L%hVa3=x^6MQN=Z%5qnx}<}~lVdTT zvq&~zvjLzuEn91DI#$)!#nAcF(Ofnc|LPZiBGxFMW*ad+8PPv9FI_SffA^Jp;){R! zMa|jK-8I3dY5}G zF(PCOeA-ERth}Z)HgB$zPO8^jYKQ!rqo%Po`g+gD;K)EsO^wDg&mN8=Zyb+1-*a2E zqvmC@RH`4huWE^j(SbUyNg7P)04{1E&Q-0zFYkjQ8(d*D7=rc);bSpfd`Y>1X zM+6t;l<_J#(%|Dok#>1px+ge0(9kv>D6lf@dy+ zT<1ENw8Z07j()^<<{PGu%M3zFbGgrP6Nk%bBt8p+(uwee0D90&w)@A(&JbmJ=-;Bx z2^O}4Jt! z*)`bW?QL1U+M!H#vY zdi#bbl+VSqbWUaEVoYn^*e+eNwL^B3=7lYyrKY}JcF{t-@br_hrgNy6Eg;?tkCX<{Dv(R;GP9w>3-*v{I-rVsq6r73i9z6jJYdoVE^PdD*PCDQ-z}9d8@8IW(u=$#ewT$*PeGNx0K5QM<%MfC?h-YAjnD(o(Z36 zQ;`V67vNQ%}d={Kfy4fn6Hw*0#ll zHJx$Q)d%C_pZZ;Xx%$GhMC9^p?9Z+)b;+K0Bp z@Ilut%cyUuA713Lgr(?ZMTE=yq6O$jx*R^kvvhw;ezemHw~RM0AB9O7DV+Q&Rlyu1 zF1_`XsKM||IfM(n0-z$moL@@{4?l`ac^n3@9Vz-%d3heY%)nZ%8-Cr1vmc-ob|ca6 zv$Cwfp+5mF-*I_E*k$n2!A%1FnBF0B2{NRO+P1js|rTR;~j%I5`H9$Pxs#m%=~A2;4~WAsa>fBDOI#h-uWOHnU_dQDqU> zBYyT1pY%8VcW&Dji*tqO>N*$QlG%Zw0iOtie4}?c#=N1v!EZ`&f(GB|?`;>Qh`8upGmT$-+he^fnLs8H*Tqy_lxHIWab=F>6?U7ShV1v4Kz0R43jl4!%-8 zeT(DDYa8m^H(!1IXgu-c<8k?A`=h43&O4)dYaQP6?Le8Jy3}$SR4mS-r?O6T=ns5V zqIFW^EZ_3tICLJ~H#OFo|876VDb-_MAJJOH+)YFn+nMB6Y@%1MKq%G5nKhM7$m*2@k`z^c74;#VB z!(m|Zx*8V|gyqNU79^OZssH&VSROFJNBXXFP!=Biy1P%8RM-xj6a1|D_tS5@;3FFE z_^}U5M&_fVu`!;0^2yk#ezL$ruxi0;E;K$q6VJW)Vw^a6IzIS;AJ;fi8NEX@ZZGC^ z%)J<|;Is9RaQTxO>KEP=AUT^+|K*5c=z?cFaGIMhdQmDgM)3ApI-!imBD|F%`9=1q zh6>{1!I;0@NTo0k7t1~G92*&pN$FH*;Ut^c>G4>vIph|N{R6!{F|B#&J>UI@IC1QF zT)JmZ{M;u#6&-6=X`ZO@xmfU=UxX^(RGd9_GQN1utmU^%#+N|ywA9|h?t31+ z!t{35ZWv9HUf9JTV1Gtl>Ne}lF z3mXZOZt(2Bxs9TNA6qK(dMrB9cfB)n0Q$O>O*s1g$i7hrX<3iV|E_bD| zk%{=q=l(qI`TAEV+-oK^vQyWrYm4^Aj`+|A?~G4<`ZF;wFsM1%`4|}+4L(^|Q&aD; zpT17tX8dE_f^nGjE!(YL&k)g>&Sfz%=c8HSzLcuX*o7_^y`{Cx6PW5~;Pdb~eSLkjw6sb`Fu$1Q#W=T7xoq@RZL8z#+0)TGI2g6E4bETaSGz67byr>! z$4{S;KAe+HQ5_|+Vam&^##2{@xEb%8Muthu8tc{}DW^imGz40$P_^jcgY*{verLm5O&FKL$b#y;P zMpU3b-zB`Wl0QG-Al~iGIw-21?-tL=2A3@=++}JjY}8h@7pH`l>#C`$k5bh!RG&f3 zC3^-(Ww*}?ca2{vD$L8apP?qQeKY=6a7Qj7We)k3{0p{JV>@#(wTcQ)a$|9%Ba7~& z$KW>n!KPGR)dSw7WBrw)A0GBAS$LTPAj8y0*p%O!0t}-#^59ox-Oq%ZJ0_^-+sdD( zqXy{~=p;p{9l)b7euvFU2k<9)v%CTVhP=QyPlnL_&~R?`)CR^u*|aF(B!_8D1mrFz z;W5qm9h^D5a4KvYR|amvNh}$mMLy%OWZ0j0MLt9MA`^JYYuv<|jx0TQ3V?%h;2%6s zGzm5Xbb9hxo~SIi@e`kT2|OY4%ZU#jzuADK`9yP7MNE$j#r)(*{PItIAoe!bx(^;b zCHrNnpt<7F_|w1nT0HdF8xgHdv1ZGTShr?VtXsW0uDxb&-1heCV%vs>D3dLA;&@k_ zIC?5ZM+e=PtE=l{aC|b3pXrZdCwt=QXAZ@qk3AFL_}X`(>ttV)SG9=9vRKvG5|{1Y z8n@s2cIlBR_3x^vttyS~vuES^XJ3lld-kheR7ACGtg*?61N%FpuYWRLJAN#xYc&p3 zSE)|CD1hyzekFcVO&qmm(==lO3^U(}&Cyh(pX^r29QE$e_x5t25to z*;`5`Pm-lPNLDuG`Pq|PDmU}HOXEk+m`@yzBqn4oXWM_njsgv71>;dY0m3yUS+nlTmrT9rX*dCGr(JMr!e!^oK9RC zHJ06S5}swaR2J!Kf9mJ@D^?&bIs;hdfp2m-4)iT$>286M4lHtt4>~rrA^Mnk`keH9 zg~oJFYMz=H^O)J&(;tPgVl?qt&$UhQzV}`qAGq^C?AlrplLHa`qY;OWOsKC+L~s90 zoI5uZZ@zve?z`{lxaYoSslk)H%DyU6VbUfV$+t04GMRzkFqUW zW1+J%$~J9?5{=Pid`fWL>L}l`K``rMK{i`#*(m>}h|Sw0)@_Sey*WxcwnkY~XG|Ar zsRH&xeXn&f-pcR0>^a$` z4lA8gA93?ZhASw2*wD94JzTFv8bq(qlBC9jIyJvwzffrUP*l5eLtni znG}T#$&#o_d{AC*0DwX8KpiWuI};9C!@L1Iq=?rkEE*FBmM-WMv@X$@)yKiii-T6) z$f5%R&&C86Po}1aW4v!PzH{$);{I=cC%*dkUyi!QcIO!%8Hj>lI3AeY)6>%vF({+D zSH{H`|Msuqt6%z}>Yxc#T|?~F1nABWe=u%;=ey#pCQ5(wNB=#(arbxQBM}_-@JSAIAC zMO!b)dv;GSx)7$U4RgP*|N7UXQRD39^&N53Z8yhN2QH0go_aq1^?&^LapL@0h>q6!`ndY) zE8;Vs{<-+^AOERnZfa2-X4KclqOZ3nM#e_er$$6Sr?pUdzH`NiLKRV=egdzUgw!|G zM{D~kk0pFt6ytPyaz<(C0Vk*8_>E|_7vC!v6V#{5{7ulNhDP->9=MkHZRqN{D)%>z z_oDAMx6}p4m)5DC82o(C3pdn15XGrU^)FQ$OdJOujS9cQ6YR6T&sEv1&es2S&Vr%6P-47C6OVc6M6gt$Gxv zV@~Z=r^&~}KwoTM)e)Q9TVrPELTDBfrA5tPG&Way5wTL@B%VpvuqYDh@0LA&GJ6hZ z*+l47=Mo++OGfyXp%|?&agrnCN%cg3GkHk5RWb#PLTMpNlLO$D&5s9FODoJsc+vy&P*dZ;AK(_OFM8#Wuhq_JL@bTj1OO9LFl2&8RITb7kmN z)#v1^hvN84hhuzvII3lnyzug&_zz$CTP#RXGq3sLrdThTeAn&oh>!l{M_k9=z8*iY zZKGgs?O%2WVRyVa9-y}?<|T( zX;(h>Cl9k(D4Y{7^G^=Y4t~Q=II+WUs4njqS3Ee`sm>-(9>8d{ltxw<8;zxfvRgOX@%R=-gneE5L}q$6fx_kkTzUsdbRakR8Gc~V=76W zh3CS#SSnsC><9ELz^f!q0E0Q!zO-TH;C1di9B@xe%p80n0A{?jF=5&y@e5lf#kutC zp8J#ilExbHrnVss`N_i`R)}*R?kRsh{&2a!P`+S2jwyc*tE3E0F~*-e-oRWL&dO7M zvcoG2W}cVwjXN=e1%iYw$B{9~_r!YuOoQ_hOB`Oxg0RFZCuaH6TRu4s!k}SH%y?q? zR+=B+DNH*+lW7G$-E-$-2<`E|oq5L*ew2|#E-qj*5y`L3+J>5_X>NuRed zgVK$nk+%q@H9pY4@$=?q1#ihpAC0R&c|)A^A`i-_PhuJKRrE9);43vhs-d#7WwAv$ z26`?g)mP^v=)7G!FmOIf=ZewLP^~%F?pW2@6c>8CWB5XE{Ow=<4~=(@9y}6}f^X&gJ~bD>xG$ zY@@0G&zlTu$Q73|k&m0x@Y8STcM5Y!R!z6l1^5<6>_sVtV375O5{x^!C0-Gye?Opd2GOc=)H4;xFYxLv7(mG27NgxVJ!!i~ z7yW#pv?3-rK~eQCR+PrMzTSBEhu$4mz4J=haEg^lG&DFBk3R9FbWJgS>CSh@DdFtx z8~5gy3tc_YT+esy*Ty|}eJ%d-zyG1jsHm%n`o?y54cQVljpd;g@9rtvZ?>0=t4&61=HBH#(UKm#Uv8l37^MY2^ zkeZvLwyxF-=PZ!Ib51A0VJ8o39?iowPL8B`#`&Cx@Ub={vnN)q>GbC=pLymTXe2!^XC&9Y8zuzx~niX73VK>X)aI~?|=WhlU+%@#7Q1lH_NuDfPbRz z%YXm1*d!a2&CO#IV}97&+|rWZ#m_m-m1zfLC%wo7@2PWzF6k`Wb6KBfyntWgCydGS zn1ixes>;{^AR07ZIP&UCG2Gu9HA=4*A9xsu-Cr&_EUS_2Q^BV!B^TnQKSzp;s%-Gm z!W3r`)kvIN$s2p7MM^K7YE$MD0IF+Se2nm<&NzVuLD@YWX7}wZ_ zewx<&gkzX5^bg1RLA8~J3hc$^1}r!9oW>{Bqn7n#RT8@!TQ(LrwMtUTg1UGB&zaky z-}w$WCupT{K}o6~^+^2yn-D!A+}TC~>cX>A#yYwT+@ghDPwFQglG}OJhrmNTc)06d zX$n&YIGoP9CcGl7yufe)BRnoc{?r#!e#Ss(VLhGv>4Ce-22Sl|nG(;i=jm|N+xc~+ zz6>5J3@+qDcl)_RJ25FX)rB!dECQYSL26fMg1&@5b!_@IWpO!&{^c=!0!fisUd74J zx_Zi+)0WDD4##DGN>D!90a((gui_;)9=i;!6#AC%`_ndzW!zH=C^AQkdKt}6JFrP> zwiqqd($(Evv488D`1CE8MTOug>HOeW96Q|;|K)#wDPDNxOjI}2NlqJMRmW;SP}{nr zGv56!&2OZS`cIvXLx&H^hI!3v05@KHWo+Ks7VFoyM*F7Cv3~0%@$?HX#hb^wr=RZ9|epvoPrsSWqrMLv)JQrM}F1Pj0gq7WP|i60`8? zU%?c<$hp0&rLv&7nzaR-vo3T�$3(k(N1!(J6S7%wV&y*=~6qTJo#I)NftVSj!rO{vTs(jGX z9#jPM(2PexSDl7)y_4QI7|-vZ$oSzAzZz@QSIeqtlQPW_`lIL7u4rG=9B;d3cl_L^ zu8f11G@Je7VHgpD5%`VRS~PV zY9Lq@wQXynxqWqXY*-%~wr-9c`*z3HOLoS_T^ge|Z-{kU*2bnS>lME#cI@3AyY_5| zt@}1c$F}v+zJ7JoXr5A0T@%w52W&#wQ!|Vm&Eii}EEZ~Gd^qCNiHZ2ZlU?zh``(P# z4)w`i`2R8XA8?)>SD8Qjbj~?UhsiUV8BNkC%gVu$oO8lpn`8`T7g!dSB`mC2;<9*` zWQ?)Rf(oy}{x-KqQ|4s6uopoRDpbBhHvL;u`Lw6H<-g*g|x9-}g)*Ve6ot@Iu(v0VWlVN`p zEYIlt*)!VO*`(dAwJIttRF&IR;Q86M?VI%Y(+{cGHbh-@jqBwT!0tKYM}Nm>;IRvg zGtXd|!_ucj!EdCIpBGSKK4w{xK7ont!kV5(0yYeAd`~*dN<5S0c>%T`X`D2V7~)E6 z4yF^IG=N&@MxGfvK6H}P@WcL$aSndWLz8R|K7}9cF)t7Pj67VVXZK{Y&qcn7Cz|rB z8-jrkeZi0RqgTP7GKuan&ks8-INoV#qXTW^xi)l2w6YHkUDBmo2lY^4ipj@VZWl3+ z=hI!xj~R_=uFlv=`MA(q0h4(Uwt&5S18|Js0G-mqs&E^^=CbV{CmY#5c5HXi6<4&p z-$grVgww$$aEpnxp~k~?f>c(FQX!oT2rr0;NMHhq0hEveKnMiqf??v=C4>xk*=S@@ zgGC4ki=obg|LA;(=`@H2k)uxf6W=!o@Mu(|Q=!v>8NkAL8aEgq@AxPK621>30eopr zfKP#~FU^wS^sfZp`>x5Wd*GhKZenVwRug=b@PquSax zsI7BjyeV2yRUHO13;U&I74eXztb{X+%-rBGHXKLb#ZmYcYAlYGR@nG2t5j8WZR9UC zy`c-oy0UQ2sQ`Qj9U~lC(ob`qfb;fTWlgmR3zvCUMsi}4Pd`#Gxmwr@*4yVdDrU4t>%Ld*C&Ng0v1;eKV=34 zf{7x;1bX(fp%^Z2FDdKsOVwwY!X=n%!vK2WC&8QyPsUY}2O%6NpCe9%&KL@g6ch!~ zPS0`4^JXR1xhSz3AK83GF~f|o#~|W|1peY7m-%^QY%JQ++|m+qfZhG$;}d$)lYoo& zUaZQJGR>K%5&i%wvQh*dZQ_kfKGnh797{{S_aDcfCWYWkZeqOIiFk1(@!iA{9(1w` z*zimfKJUTe#z0@cPMkWW4v$T~IkvJ~7Dp}@TH(RpRh4{)ufXGyhouD`YqNUlsmFET z-~kO(L|24j4&NpR$>ZjkQ>as$ew^1_3+r#E#lwJ@e3_|FE{DmI53*xN#t7#yQ z%j0wTm*-DzO^E`REA?OQQkmrF(+lffbEIXMhHe2~LME$sUHz$^bfrAF^vB@EAY;D)(jcvO9&fC@A)}qgS;Y<41r$42ZdQYNT+H}dq7wheBf4hF+ z7e1&?>l)sM>gqZl-{|K}NfxBzZF`Rc7DmyvVOSv!Oeh|9@FwJf^*Rq=c?&Xhl*bct zoCo05)t=znn1voZ@MZk6Xu4p-oKGc14-v%@8Y$)$UDcp|W%x&CgI za$J}jxHLZ#Z~uTRf{X|1X<@`=GrNnoJrFA)GhrQM9BJ5XrZs*X|L^#?uyeSA$DJ2h zO7EsxK7Uq!{V$)?q_Ju!ZBlDXyIQTM zZ@S?sz2PlyP=zN&Bc|2T!d!eKyP~=}_z9eO^Bta|inL(mSo0Fcx^cP@x+UIFb3A&K z#ZWdYM*GNZ`lYw^SG3@X7e;wHw%+pEK75Bxjp6ExaT^cZqeA{63=Vl=`q{-sTBk?P zP$o!>%t`hpWua#*8q&XpPhNBkZ_+U#qa2e6a0{63GZy31Bq+gQw>lZ=LukO8d8Oqn zYL|NaO>1yyK#OyWwwI<$sK;GyGM^d{J3tv{YH(A zW7Ew>p6T%^HP}|;^og0_VXb%q-C=q0;zJLq!1|`l{V>JG730cgB>aWB>6n36RM~c` ztctnH92+%_BjYpriz^^+9nLtpJfnjgt98*S52`Aeva|oEsCD?hn%(+a;otLLi z*Sob)v>PCnQLF9seT)U}ogSY{h$4A8`+<8N&RtgC{+b@8e#>^+nO)!?yM z2OrdlqleWqI;u-9yFoP-m8xwn)ykr6Lh~^X$!46F*70U5M?*8-7d>A=-!X4O2$Wcs z@WwsyjBmdCJmzt>Y*|!g*;!Xt>7j(YX56p|&T`akK^In4R3zEvI2N|z0i&BeFf^z^ z>#>-KpW;K21(dxO_o8 z)SI%LFEefr+<(8m|Mb(kxV~BYcJ9{n*d)tVfkUtgboM>;P7B2lImt|DjhFf{^W6>} zl0?~c{6$(WKgok*!zC`>n9X=Lv1htp@8^BKns2?IXR}*(d%=AgTZtdDqINlc36I>- z>!5<#LjCR^|EY>9rBmm7H8MV}GZWqV%Gd7J z?|kUDgH^jnr^M-m;{(Iuh*(UQ!%sh~|MnZdVtZ#nwbp%ncB{F$UKJkWx88EIe&*+Y zKGq`o`g$}nJQg|{Jx)3fo1HhRmptxy+b7y$+At4A9?>uLi9$8zXh50WhxIzlnU&&x8X=WJxgN%_PSGGvLR1 zoat$MrZg7H-3H?guZAsSI|J=FH?yFrsoD7K$jH#RhAe+(W*2M+&$&-*yB2b+w{08G zkLs%HjBB}09Xn}xGpmZSV$0iw*swG**r(mQcZXfx)!VBo+kLa1Z`at~+qz|oYHO?F zbgfFu#qn`AO%$uqcH+d?xF#pCH7iu>x$~&|hE4bt<;Y?zUGzYaWoEH$Gv*mdf5WfH z>YSZ}9W$L2W6hUyr#>>xVneYrm)k~Oni<#8XP$O@`&Dk(<(}trf+jMYhyHv9pxCx* zq5T!(0t`O;fTL%rlzxb6d>@-HOmkQqGl{&O+;gecjz$%fmf5Bo2IJt2*Z_-7 zZ8-@nTZ`@=lP{AO zl!r3*$r2kwTs{aN5VJfaQpWdyh&W$sGMI3>V_hvU&GZ!oq8u=@F&$Ty%$Q2BQkVxi zIC?P3MQ-p-gdiXB`S>SGuDKA%r^9`OmrMKL1iw?Hc zY0&(&Fu$z+{&9Wf>-Xx|xnY$xc6wZF_nfEFHd%`<+Pq2ET)9^pnkqHi*KK=kLG4ZT zy7sC|bjcMvJTF<%xvl~Ak1goz*+KpD=f9zT%d3Nz9MZK{U!&Uk2K5h~(`^5Mo_Xn* z&YnA~>WW6Kl$2}Mw$U|L?$LEuT?xZ!uz$$&s&Sn?b4oY8>L!hka#~uFyWRbxI^NZz zDbu3Tc9RE2*nDXo=|(~e%M6GVd~NWQ02Oe_kDu=GgU9I3)&9jx<@31&O}@}kU&@xU!0UM6Z7;xZLu>}q zF6TjTS;Z&w!zEzrisTiqTAG;W6&gU6wVgAZV74t2LM6h|Kz7 zwRCbs8D z^Bke?`MJw;DrmoXwR2aKe&QWB=nXfU|9hp=J-$xQ=;XN(jaa_F_}mHIb=Qw{_@Psp z9CX>5Mrrd#Y3pW{?%JuQ1ADal>I2$x@h)xKy;WOxZqlYL9cpf=^&H04E{?97r>xl~ zTUfR(E3=)m80!`EE^89;FxE7Umxm#%Noh4`{aEDr6{iU~Jh!NsbA2lCyo+Z>tU?vc z&&5Xa727f$dpBvta$sy`TC*#xhtP*hsnfs2p<>_Nb zRB2m>2jAElq5rKLuvU<z)qIo?ddeOiwq4c5tcCDLuH zM1IzpVi0HW$d~d4FPaeOEI2tf(z~s(mg&2s%aTRnnXd1HF|)GNL#c!lj`Zx-j`Ge= zIO3AMwib_$pbT{YH{nEP!eK*aIx#i|MBdbfRKIJcJ<-IAW5@UsAEvS2&86UhwXX=gY@O2#>F6*$p2@&-K0)J7l=`i%|^h@BsX6P@Xb zF-AG|0)&YR1HUvaFh)Kz9mM6605A`RWDpo+zyO3H=qjT?Tqn^a;}-cMtA8LealVIP zNuo1wG3iWq!B_}0m=8mFk%@zw$xdW&<$iW%DGXE=} zmIuB?r5O%pP8i5>{H-o%)8j@a7Sy2&Wmx&9qrlsRp$&d|+dWFz&(k6z`OX%MV32$_ zk#Es+jPms8g#P$1{zTh5YxS$Y`Dc-Ternv4p~>inB_^KZ!||Px!%se~|Me$-rk=j@ zDlT2JkzTDs2X^a~uYQ%OCjIqa{9pa`XYNwj@|-HmO7)5xuGV{g>I1su;6c6c;wkkF z55-YK4HnKDHg>A5eWMy1noRrRFh1rDBPOH9k$#4e7Z8Sv3!Y@n#W%!fc^jTKyG^Es zO3Er##?f!JO)B@~wxZldzv-GD$P`6iz;_H5-g2y}sCOL_pA6s#w>rKB>q$^6Z&Q|( z=-jz88XX;t@5nW`HEY+lE!w!bL)~Z3>1@||4UKz%nTL7@2K3zXhqY_RPVK*Vw+hS4 zv^YDbsTtlho7JNGnzuO1Dr{I;vB4{Gq!>FW-Ntwr#+t)*$y6vU&g*oSwl78Ih;i%W@MA9*ubX z>)K~7dbhDBgl9Ii--H{44aysj-$rQDl4@-hjLu<%RP>)_+xDGz19ldecxT09Uj-Mue??L zJ>B}ur~f%N^e~Qp@q<4r4#eo{9q>40l$(mkFvhz3D<)@X9r`?I22G(2zJD`WoamXy zapm$4m;7AOr~U>$86(M#JnQ-|;F)eAUwx5GRV1YjwU@z&06&z+rF2kBcd6dB3uph| z)StH{<;i)#n1sK|e}Tg~J%x+BsXX~pv`1iMp$ybB&yp`66X54C<0>8boa$MvBOGyA z{90txosl7u(}p_3hks3nWYBZmV}WI!4OJGA{8v(3Y9q4VMG92xKK}cU|Gj?nz0aw! zWv6!T+N+=Y*`L&%x9ruFVGa*lmiF|UU#GOi277%=y_;wWYaTv#?zAeaO4Z)hrbZjm z-Dl3|nTMa$@BH>}>d46>>ZotE{@J4&ZoO5TH*awKAL!o4A6BLDtf{EeZMWT^U;fX( zqE|W{!>E6--;;@b!HZ0m!<}x`qcj;I!h&SDEdqrKu^EdpupT zeT$YyhqW-=r=rCL56;8!nC;^@PSZ zwuRCO0DSW^HUc0YePR1SLnaF@6?_=Hh%xQ-B49W_n=CL`(C&);~8smeb-pg6wwmd7boi;t#r$_F4(5h@s<(8Lc z&volJ|NgHu<}t%?XlviBR@(+Q+b>t#ePcsBjAo&e&;OK^*@(A%jW$?7z-OG0 z0#7U~vM6j`a=Y2Z&FL+nXJUaZdlS!PtIqYF(*XzTF`Ol)S~_=3Ofkr6s$SNo5$;7eR=RB<|s?(86cm@iaS*2F`Iy1F`z4i9T&IO&9$*|8`y>%N$G8=rdW zF}?LIZ?-(>v@KSsi?{FA4}SQ7zV)5^b<@q)YU`%0I@jH$y7~rBnr#Et)M#;XLgT$d zswpp0+qPZ$?pMC5$-!Z@wX~Q$(}T@PT-#BqTLF3V5(47U{Kp4_HT;6$8%B0E21j!YSzS-F(K_2H8iK|9}ohj}yE zb4ecMZWXV*E{|E@l9RC&4j-9B7T>2P<^2Z#laENQgaD90Z@*u{4VP$5p_qfN+MjuP z`ZP#R&-ko>$6)Z6?K<87owXc7zYI)|==iBKVM3qmIivUe%+IN?%yO%!L|^^f=k)5= zzs5XdeQf!_=Pf2iC-tpw-DOB+divSJdf)rrsrm}*B1^d$k7MS}i=K~7Oif$faQcnq zMr>O0{KT@5F^xXrQ#e%|d+qVLvQQZMkqtY1%527EYd!YTMt0~#zbWijPRyZgXaV#o zGM<_zE@G7pePvk7jKDe*J4-)?Huuf_CoQs7v?n(;xlO zpJ;JvCQg=P(`gBE%=F?--Ac=wPyh2j>4mOtT~ytm9UHe=2hc)~SM(Ax)b-P&oNmMX zJ-yNG*zoBu9Q~MFGM(IC9yIAhCHDsXM?c1cO8U+a$rt>F@0cu1-=+SCSBUg|=JS>< zkmCF56e7(o-TT7hj{079rDXURxG7)$03;71&!es3QvFdhE1&oim+DRJb0}y*zKlNP zpe?=$d(w$fM+g#J$^-cVb_OrT7v(Q#KerA82YrK|*;Nii@|p|IPtIj8NDUl-K_E{- zf7aPk_Bfb#-;XWKXxcjdE$@7@4%~dmnU;0BuTT9$6T0Vn_i2l5t~XzEsd`;@ zbbLm~kDO3TUA5X;n{@Wr3BCW_Z}*_$1GSh!rLrlZ`sy0(x@fC@{WpJ86(wcr>gv+? z_-N>K-frew_pDQ~DKs4)>ROSB*eNUmdkjNy^fVg~V}aLcZ1Th&#ldgLfz-$56}Ky2 z2TYga4L9+ie+g%_Ew-%UbgM*1`qGVC9Z#;?ct$_^4?Ppbz>R*7IK!iA7sI>+PGM2P zacKp6eMxh(b79-CkrrK!{TL5)U5Jgm!^0CA92!+`AK%Nit!lWI3YKRxmTPIalde4N;b)D5KZ@*oQ^>wkf z18w6|cOLKawn?|{*r9LU^-a&IM|9a``!zB!tO?7|=9YBYX3WEBBX*SS09$cxx<|VQ z7^g*Hld#6h+E&Oe=VkC=GhjAEURq*1nD2d;GiR8Z&`VD~p;6E0`6K`*ft6x|8xR}1 z3O!#bF0E97d8M$#iov*IUnd@>FSzK(MEiKb%j1JLra5Ysey2={X+1DBrq1o1<`t=T zfKPJHg$^k4`~WcU8+}L@L(AxE<~R1j_Ow#tK|h;Fjd{dDzjDJ1z2x?k@o>d+29C^+ zlT;V!xB1xEF+MTvITo0@fZN6!ndIYi0yfD`&CYAk`fy;>a|5@vu8z$jZl2r2Mr-Oq zGI%QCHi84_aayc#aeA_|(Ph>w=mq|AzISHQ`3qao{0c8Fx-a;wLvE+XpUbjAAZSM& zz#;7R12b$CCxr}-u|qdx#Nufsw1s+L$(jI*BB%3nH?I`Oy zftlcgLgK%4<@b4dYFmPn!nyu(L{!1suYsY1(RRnhd?eD6ANr55= z9MFwdZB%njkv2BhYVWotb#81>h2hOiF2%`aBa?GFdA?U4`_z|p-w%$db8CxkxZxFg z-#ho|b+10C7oIw$Q;&aJbBm=K8K2Pe$Bt`pc18yd?DkxzSRGB3dhIP&SpJXcJNMk{ z7L?c~l5Vkhu`h@?Y-O*7Be$dDi+bx#+35Ga7kS=_mftxkNaRQHf;2AQSg#zL^5V z87BwBevA4N|E|`NomiF^S*#M>tbV@AcuI~=rw{y0}0b(?=df_Vct(H|L!SNkUF?4&f-Vm?AWtN5f^ z@L!HLE=SUL8$1#%7k4n}Bbe&}GF1oM4P?vEX}$OTYmg;rWMQloT6+ z_3VgsG%pMdjV|b#gDv`%pS@1II*K$fu&i!h6Jv!sdv;V0KYl{r{@PP|@tcopy4QMs z(el5pN<~fe+O~JQ4qbInmtJwPF1@l-ZJX*<=6Mk|#_asK7HvO`5Ap3q4^g(hB+x~ zO3WMMd9K?KFMm6crtr2uHg`M|HGbj4_rY<AIani z+YUJphKOq=9?V0L(jIiZTj+4C>$0xpHew(0>GMVN2~~m@wn^m2miHeRU`xe|#MBiu zN?4^$vx!3S@{!YG3C#J@hDLB$=`OnR>h||LPbZmxm>LPuATo*@kcmJ5<`?8*5{+bX z<8pyOtLdPGhdU^Y0$)yC@<5@{@qHwGTR72xtWL_sf@Uy~3CskBKi?;ZP%*5i58T}G zfddFeg&r6z)IwAO#Dk9+G#JuBIs%+S`xzhCn|2(7hY90oCRVf=!(`FQpU#b)ECr>i zuBg|cORmu7E&E;E1IvG*z_PzksAl~9lLWY!gk^0Ag+<$<%@JkHNdspLCGw_t{GgLN zo$?Hf=zPfCnH`Q==-h!1(OfKBFVgJg+Z);cMnO-eUJeLvyi9(8Ib*4w3{M5hd$ca{YiUn?$As>so41qY6p^#&XlYm|@ZD!_X;$bO$QCnN1 z_Kh78|I+hEJ-|FUu%K&gZPh_f_#S=qv3T%OTU(#-uyA_tM-S@ai}z|{dz<Z#b5nRcdId35PYK;upqXNA(p%ov&&xHZZI-MDvJj76u1uyz~^+ToI1A$yd` z>Pg`ghaVFir=6_SGA{5*>xM@C=z#~-W?rnWVVt^Xi6_RThG|+a zTKMy@oo^F{z^5pA;1%lg1eggqo33VNr{kD!CNmfX7-3bViJ2p0KBWg*d)dl*@r@yGTz2SA*ym_;peDW!M;nN>e zbwcNrVl5l((e13_d&R^Bo%=*NAY zHS!X6pGC+Q{6c?P=EHTXSu^&@jqTt;`yqq5z>7R22ad#Lh3v%w8g&^q=}FJ!dGNXL z9FBYH&+wv5)R^uG`7VB=EZKp{P3{aARLPxStd+;XGKn6kj=;k(QZC0?UaY#nF~but z3Xmtk%3W%L{~V3HN%|>WJSdy9!HCYQsjUv9uBNh*L%G%0)vYHUd0Zd<DHy?{iF}gff;*$u_iZ&;jVeqi9wPc#oE*9%OO6=PxD`Wx6xgi+~O(rLA z`Nt8BL1%MUfMRn^8joQU7L$57+LNTowzOOaoNrrJNFE2OT!uU)9T{_3{ zUhW$nPGVFyHr2*kMunCY#qPfcAAHC(=lH+9nzS+4Wce^S%C6`Mm6TRkK9t9UX<9bV z(a&=>>aG7aZ`>HiI1dkvIG;SG)2Ewg!Sei6EOIPNj%(kR4z+vyFOKwTX>w2%7_A<2 zC0wQ+rUZu`g-4Y9; zlcoVXtc%Ls4#O-fGd$y0Xxpurfn{Dh^30Q(^8}AKBI znwoU^rTg{z*S%KFruXphknNaB^J}T9YO3R59S`L&!g;$4!w&DV``_(>)5EJ?la!9M4r>8u-9mscpyqcp$Ch!Zks!WvCDXeTh0A4W zqAhYG*FE$s4%y=L5bE#|MjloG0RQw!L_t&nyh48P#bxEeA^hCV$rE zAbA`Jc}-4aQ^GmNf-*Ud$))??OBW|8C1>$89-jsRI^z9Hn;FF9w@ndcsEh3Qz;m9! zpD6x_qg_O-wlAdxUCeh;Nz@XRMg0l1e0OAj4V5jIBgiZ0r9km>cB<2^pU^%Xl#fZ>>tqV{H!X=d2myr zCF``IfqwO#J+D9i=-;Zr6x-RpQCA+kR5RA|OPpfk&Sax0@(ccgKXW-pKjGj<9I5a{ zqCRnnk5&hARz5EmW8C8oeVMZHgM9ILBv&vJ?q$CFKKk21Vj%H_F0XH$bLKpMKK}A@ zaRLGL=H*uLO&Ox8ag+EdwJpP+qAnM3vvR8xa6MT~4sf5rvs{E{bD6>TjWOqTC)BtH zumgl++>tjcAJnmPka%hjNO^w3t4Lqy0@G}KtYyjDusdjU&vhQB_&rpaI*g z8#*`WS3mrl)`7D+X`5wvrBsg|ep+YF_1d2O6-_vA@8qH;#z$4nH=zgnRN{7i{?nh* z;YS~|O%A|c1{iT)_v|XY)fni7WI%P;1vqn z-O2b(5C4rj(X}&FY+7$r{T{#7|95 zYI0&eaH;fMgYV$eBt8wqd>FmY@$IbpG_|y8!gGcfUwA3(eBQ>}xS`#)$hf-Bb$Je6 zr7fKsJvW(+&j_46eL_2T?$%|OS^lsAiOmj{r`RlP=4`63(lbxJq(>fmNbSv?YG|&B z6B;<#i?`j;EAUYqiR<=2lVm4i2fM)zvyn9HE;rptE@OJR{jpw|#T7X#pFAqV*0N3d z{8Nuc7ktc$;Y1bAL{}10eoFx zT{18@sn9$j7rRWU+sFgd*kEftRq8Qa2HR$SiSU|tKET0QZwt?;avJzES)WnN6?mFAy3)Yp(nzDXaFrQY}S~Hkt zOKzh((l+Lh&JB!cd~zlFu*`P)+??k!F3bB2@Em)Z8E2eSz}zZr2A_9JkoLUjWu7x0 z@E)UpF~!LQ#4p*t4d2+Ajoeo(pu?)zg}jL5gy3Z7u@|xep2n_p9{7>r#jv3V_{rxD z5yU3BVOb>-wASP*r`Y2F4dv-`J?TA9_gLBeQDVwB6&M69CNX zW@gr{W6j+CGtaIX=G@2!lO(OTnlG}hODqk$=o`6i04wk5@< zn-rJwWpNicm@~Q5-j`pQqii5e{uCd$2mCb{v7SdZ%aNc{4r2A@<$h|<8h%`BF7LJ+ zcfx@R@xEeSY#0$wKI#oW(FaGn&=~k(M?_}Vi|q4lN=q-ga=P&%FPANN@PuqzvOXQx z$i$4QtBQ2vD>mzGZ#bYT%iAZO9Mkc0Ga4K((KF9=>H9x;UT2>h@)V>}>THoVwX1PQ zhc3S2VqJdyL2cUGp_xwbDV4 zRd{|?tcKd^fL&3}r*+3Ycj9Fsk3l{G?eWMbaCq3iV4CuYB2HK0nd#`@#OmAsr6smU zSJG*~eBQIDv0NQ{wi}m14Ns0}!Ck^2qgX23w`bcGgVH5BcmXe93SLyTCqcLTqMW?T22EwIlOS9OsWdcmHGW zkd8VJzCpjkFRm|q=SS}&yUjI*%^i#c|1##SKP?mC*JOvM2j97m-JAThCY6t!q{9q& zb(}CR5>C{MJi$-6B>r~5<$MpED4#3>;%Q-Odx9I&6l5Ck9ZwPF!#umA?LcKy1np;x zL>zVbK6DSahEZsR61FY*(b0Bn%mshi#L0b}X38@i!tpX2GQllKnFA4{;JVB|yiANs zZA;I;Nr$S5o~C28Ep1{%%`MpW;bhL0rKRqo%dcp8zYBFTnt{!6N-$8qOOgdo7RQ_t z6)Y_URg82xf{czCJ#>aImnR)T73Hn? zdE?gqPGb?;Z8J>1!9fRl&f$BERTG(6j&b0qj2o`MU9}CJ&d*|r3z(2WgfRMDHu?&@ zfa!91y~dkAIEnf>a>E&;OnT_Mu*y6Mhz-qP`uvR&ganJR6roLYHU{XdMn(rTJ2I;0 zPd%?+`_j@`S| zv5CVY(u0?&nHe+iLNIv&-xPPd=w;gDR+*0+rfDwA18_E5@OC{%z47)wN0_acXLvB< zRy!Yqhh0(_^Hwa7i-!xQ2fJQ)V9LWBe8^l}>bxF6ke3IN@M%jc-*_(dZ)2#Eat3u zCIf%IOBYAjSU7Ze)*Sw z)ezuYHkt4SA)iR_q@IZh_(vxvVF+KY@otO^j{&z29)mvgaZouK;*14o77N|>G4L3x zl!+tDJkFDl$oS7?q8|8?zYsRK=sAatr|U|Fmdi6Cf=7YtxHOreZsQpFFwlMPIt#M< zFftKxKma}<0}r<`zJuidz#Yd}FbxVk0OMgEcv^{>9x*O}nYA-b269CJ5ODE-j49gc zdcifuW8Q|;KI#t|BR|5}0Y;*E-u7^$F?MOIgE`%ig~fa(#IlbQAr{<^aonHfClBZf zOQF@0y3coMUcVyw2PHOCCR4w(Qbf_k2&!9X_mUuDL>$p7gRq-C{i+sPO3v zbCnGR`UcsGTy(u5aB=2W8w9+~HE&~LZhkJlInG(3QlKHan!Gpc%N3@@5jwu-|6aDUs zN6VzalhMbHiYD?y`g-4vdZPaUVjn#capn{1jy`nmfJ48UX7Hc;El{u=jm;pzN6y6e zhJvraje?8;%daR;HU1%ATn@)_hRX;3=BfBzQ8EnDqGjqUtyt$R``&)KoW|Gb z65mk`hBrIgSpecb@_`wUOJ=)@1o={#TThzAAo7B`?tKP12I&<`h{@@RPThAPMRy#U3>D9O2 zto@fB(okQwe)o5NU+2b#)L7N1Js0oM&;ImJ>8)>jr$$G{`HrD2C#ZU1j9t;GL*R&7nj2JAU25ZhO8s4@RJFXI8!tPgslGui4iBn$ZpvI@I&!)| zF?{G&n3OE;#py=oE%>?so6mLf&e@Ao%#Zq*xq^;e{( zx_HCF(PD~qgrieSo<*ctabx`Yv#CMS$bUj=iH@I)G(hDT#T z(y|iS72IZ6E?2~a!X?6n#Hf$qozin#Irz*tL-$uA|HmAn`_GenK<}@bc1@(K38OI{alp^yF3%+b731>!krO(9wpSD5Q(<4XY-o>=aIA3BONs=_8R zO~a?}7NIXP7W;{QftF6gL*ARtxX?5HOMPz6LvKL)+)gQ+cVE-DzDNHtgA84j%W+^C zF8VR>NphKf0@q*#=S_2=|#RN{-6R3_{gc!s`>d+H&NJ+ef8-$xwvyN%!hT+@NN zQ@c_vp(_F|G4bO#mkC{h4ueK68)1-z?@U9=<$BS&k2znWPtu9-W7=??;YnFQg`erc z;D5Kkaj6}t-qa7Ir}|?YM4CO{!;8_js287lv;K?z0SrFv#2GgC&;fYSbTYyF7J^|H zaAo$3W20{aVV4Fc7iWUL65HQgQ&xw64IOFZ%QR2(RA0zZr|_^Af64IM+FNzx>@oFv zj&|n7 zJs+%zBd}XrEgzd2V>1nWGC4V^-rjDVJAY2S-RIS1J$}BsOItQ}=+^76jd597TA_32 zx->B~qTm0m59!`-eNAm`8`asmS-d=nyut!l8YD0#d`KTg2RhEsOU#vFot zY??xbVJD>iOS}QEk`C|Dza-Figq#mPHVv7()8G8%{TDVGW$9y&?Nr|JpaEWFX%13p<8C;q{5kjJzyvOcrXqY3M`MdP|uQKSKn z)A#=T`?PiMPMz->wr*EEnEb-$@6zkv_GZ0e$7Y>hD6|bSYrQt7nyNC*O--ub~&hm2*I4(y^pP8JHNP?6N(y*d!hRJhcxWT%E-uw z4j#C~ZCKKgSTeodDPV=7UTh3ap#@Yw;EMOjA65+I#=ysYIu_4R$7d*%!dAvDEAM(UM zWDkAVGwi~$fSq3WsWK7ZPi!A#gb}6QjJH{9 za%VYg-yOH)_Mo3iToygSdahTay!*$~^a(vn(HXU(U@C zV)MCs-*)oMg6BHs!D){R_D)aD8oq71^6CbU)8ZI|gTvDvAGQG(iq!0RL1VpCR_e^I zC#|8Tcv<*LL9x5!YiuJ@L^CfGk^vRNGz=KkrUoE-ad=i#*Y$KC=cMR#A zuoHHYabsnRaS_{rhzolvNjhkW<3$xmIYgh()9FM5Y<6T?*qUL};JeeYvlwWEBb{!` z#}_aeZ-$=RTkh8w$E>GVP9i_Uj$sbLJpiI?lEtBiV#qoUS>kpCwowm|$w&RJGx&&h zQwJ^bpX-awvz`;Cv7h7Qc;k_{$bZHU8WqM^pk1aXdc>Jhdyr+612_92*SMn%GzeMX zXmq^GhWrONXyWX&F>G(DN47w(pdoUE%bm`Z!HT}|ZES>L{lGj^xU{HZ%eI-pUe%fh z-gL*6s&;O(oXJz52OfP&_xj&J68@fLOUj_BL>Kc#acGupiOfJVm_JnyX2mJNn8JE>C3$#=ftwfgG!?$^ZW zM^)_c_2EDJ4K=k@Yie*%rEb=S*2J$LwF^ExlG?Kl52pOl_Hc3OvD zJgsee_qypNu_2qyO68tUGrnghIn{14Y%cTxtOsv|3|j3&|0ZH)4!Y$RF3~^w0Nx9m z%K7O-!=dLAKjiS>BYRQCFx)rE9?tub^5lgd=<&q&1+jkR{)@HitWM&i4zj~v)RlNK z^o*~xj{UOw;nCnX#{_@CcZO+r2{#hZh15g3$9Y_YX*an04jYVlilY)9t|Ri$o&e;s z@GW)=e9cQ9DV%Ui;}4h#AIcF&+5Aeh4*eMTDsPQjN0_&m^E1w|WtJ8S zboNY-=4aTTU#2(QaX>%)#vKM-U>iv~+f%5QPA%x3@4TQBPaVKW+K*z~B!?|>RAm(*Hapv!jD>$O*O>P@fM ztan`6s@Lsl)O9;*blIj#UADDOmu;)pl{;&6rSo06t5#P!{jxo6x_tiz?c3I@t*tex zuPjvYvd8iqr|itx1}wC$DN#d9f%SZW4(_ei<%imI(e}-5)0Db;&e{fCiVd4H9!raJ zwok^U^xV^@^whB-%e_kN-_@+$%>{;GJH+#};h}zwjrFUfyhg>9)tZ@KvK?c|ZeE(4 zT#9ExRaGULo}5?L^Do6qEG*KLxvyqC-&=IO(7kbct2(!LXl#B=6ARN?unuC4FxDg8 zCk0C$+aBaqCB^DL+o!8`@6;CS=ta-}IJtonD~l@XtmmfU^K5*UI-VPuM@mbote@QP zW{u!Q_dWA3UI2j&;5pWg(u03SS;kv-bKvRDHT!@VaoNUFZ5Wr(>=5iYk zT2Y=D@IZHSI$-$bH#Wz5-q&4p#T6Ug@6w%src>#>bf18sSP1hF8i#!0hTe6ANZ1Fo6nOeX(0O-WCizC}_NVExFT}RMgkD=vA+Ni$T{q z)#*$eU0)oJ3j$HK<4qG>bO;2?>PQPe*+rJfmT-gyAz+;r?J%CfP;7_@_&Hq(`!YkN z#k6JhcAeGC^n$+htuN{3n=a7@f9XS!Ukl@&Y)-`Y%4Vi!;?2w_AAek5{_0npe@?qC z5IQ>AbjcNT&`JKp^cwX|-~F;9|SI(0H8!&|p* z)1H0%RNv5IgMx2M&T7`Pw0Q91w$SfJHgI)}m4ydgC=CXm=@5Oi;t7)tl7+dLP=~OA zkxR?na{IBN#Bku5;2YDxZDtceJaj>bI6o7z8Be^Ttof3+;s9kPjfW2$%Z8BQt@~x; z(AUKSk_ij;qIiIE>1CJe`}coWQ>I^uEv=U3TFtqQ_uv0Lz2U8IjS1b1mCoe!OyU~@ zXEI0&5Jb^nyD*O3&v7iW>to?=dU7UC95{5~fIj)DPph`N$%^l+8tNN#_0?B_5w^vpp4bNLOi;M(k z+la~>>B+YY&hp@rPv7QuzAA-_Py%u@6nE}TU^!( z+-boJhQBn=7Gd)-g4~k}Y7N0{M2%G_8;mIgJ#vPHtf+r-GIklV>%F`GoW6C}x3y(s zr*66JcH0pJs;Q}rx3Hs*r8$jy;xjfjruK$<{oVilxWUYDuCB8^`pw_^ zbk@1LyTnvzY) zE|G3vL^_B3DB!rsuHX;)6rX%tako}B(i5Mq@^dJC`-YpvWG)L+9~tuetGRN1Nns4; zxP#yE13lmQBD{JX$(uyH;SLY-4Q)$PHPkg}Xk6z#b#!%Lu&?zP`mY?jF&FRLthCgEg((vJe)kRc{ zS!m`Td|XngRTvSZ)|+7 zEO`>k4r1D1+Hq`Y(7=w?6QHi3rRiS4+oRb7espi_w0ByfuP-J7^ij^IE}vKvN+-LM zHx)Q#fgQ-uPLs1*8lTkM^n{-I_I>eie}iq0y+8hru;1geJ08cp-7t#`v^=gTVpD|i z@`P&IymsQL!=9MVX=;2_-}~VY^_>?VQzet13chvSti5~o==M8bsjb_#hF!p0sch7m zUra~OVt}*oJ8OB#&SQ8ETbE5wxlN2qMz~R+O*9NXv&RP7XeO&I5ILyFgLS@jI%OVM zN_1IurU$ekpW!4LL9WD~L33wMlPQD5qvGII-`a!YLhkxX9`z zR`3~L#8Zm2u-9BK}kFF)vxztxY;(+v?r-6>Kz=3K2^wY6f zjSXLXo`FvR@Md7hFT>&uN_elnu_@V3ZUiSQfX;jCb zCfJ67-HBTr=iuU7U_~C}Np=fPQI-(xBwJQLbO09hW+1{v`|MmT_{JiDvvHp<@}&$> zL&PUMU9aoO=@E4GE&Y<{xGo--im!8dyOU3ms6QJ^sST7j-MBYiiJuZ&+7ggmJoyEh zw|p+N-eA#eVt7Q=m8JT|H}BFbZ@)u3_UzNt@UR}e?@`_UsyoDMj0T=!x-S6nQ$J>GZ2Rufsb3GtwBD?(gdmdY>DI9A!Wr9qYbusH}o zkDyg@LBbUlUhO#mSvb*%8#dD{TYtw%ISxnfm}bb*MIQRwhR4rOBrN1XwP9o9rq6d; z-as$vh@V7ns2Vy0nguGv;8OV%>AtfZqTP`QyU~yQb8lP$!OS-{-SX!&7C+O&(iPj= zo~sz9Re!V#dz0RyP4p+5716uA**0(cu)4fLFCBSaLwy5kY_8UomtLYH$4saD@TB8aCk86iOUXM8gh|!A=a>FX67}!P@rbdmmhlaF|F7fnO~lXjZ$pbZ?NqV za}&3dPo>gd@$l0Pj0aR8no%RC8?<7)GB-eV66WoCHUM~r7H`=vPL1k?rykcNpB1#N zTw&bGViN{7cu8y$D=Mv2k!5o+n^K(zyDipB;Z^9(!a4ordyeNXw;da67w)Gx1&O+& zJC-#yF{QSS&G8}&YZ2J2^RrViHz?x4A15zace&9j^IVoaz~p02Sm8#@+ivG%F=K8T zO-C9V$(iTCchF&Ri8VZUY#}~#QDiu*=}pfrxeHd}^NFRNkMij*+Cn`1z?v-fT~#fg zJSdG*6(=TVG&DZ#u~M#@YU?}?5)W5%2L-I>XqfStH9yPrDl#v?eef9%9eddT*`%^u0VB0frRA6~cwH>ePq^LipkO&R~Kml*-ZVIT(F3}C<~ zUlxa56Q-V1)?g+VGTFETC6El7gtzL_dL-a-yv?KBWO>&?OJU>*b!T-YM5zXjCMl0! zpHBXikNZgB$5E_xtju}r8un1nS=E#l>J7JE>G`wS+Hs8M^WB4b_QfOd0*t2{revXQ zpIY5|{eEq#FVgt%xaWt?Y#AAwn4qWYS+vaU8648lvwgbf!G|?lT(9Sk^{9JfTq6TL z+TGEl`pSZMNdo?19{b3H59z>_m+19xWUWrIv8KqKJmI#(1#&gMeJ^akmI(BYAJ1@CZ%WSNf_1H8Fx@RsvkK!?EIR@O|QS)-h28SU> z-p9CLIfrS>`gr6n`I93)OM6*AuEr;7haYs|0Uvp@hwJGf9xkP?`b0-K;jqRunZD29 zBm46jlFRe0)gSrI7r==JZ}u(m*5C()^7@m0ia-1eIDWz-$2WJu{EpA?$?}sAR`pF* zHe&6PzE0>lEzn4Ht!5&e`D^XS-OIyS{h13m=`Q^OD=z4Sp2*Tnlaz<{B$$l6T(0l~ zT~>z^(Q&0RX>Nl|vc58W)}j|J--D8tQPa~?9v35)rG;v&k>2%|SLoWyTQxB%y?A&? zFFfC^haNkwr|)|~i_6xxd^F$sxNUp0Zn$Qjc3-wh4I4`1S=7+rl*WdqG%+&b`Nx>6 zoKi*kigt7~>-H=5>y}H~b>03Ax@v2!w$zoX#w4D$j31qu)39x=iMa($%rE-#IPyGV z$|83zWVvZ#J}h-pN=$~v>LP7wEmK2PjdiQ_-}tDeXU5#FSuIRk?#>rjCt7t_skF38 z`}VeK=f$0xUzyg}KfE^SD#s?OqCVYp=(zbW|C4XE7Zn|H><3+ zN@LcoGuFXuQscANE1t*2X^*^IYQ0xoV%u+aK?gif+gV?xCCeyI8)g1qu*4~Be|CcN=mT27nUkQ<4wgQ4vxOW^C(t$Sh} z*lnD5AF>gEwJg>RaS5I&2E9xy)*N~M&e~UL1)r#8y&0P~Iega4czGx2Wcuf}eMgj6P;3Bbf4@7iGvNnG(gS@OK*lgWf{0Lv&P)&g$pA8ROziduNk)OFCK*Er36KL^ z^mJ-?5CPN6H@roghyz1`6bckSg9#%10xMvz#SseQ(iNmyvtPo_vVL+85mG9|NxMj* z(_D9A5VzauOtcYwQ4qt<{ZLj>84I-=+PCV8tKZ-}&Pf&&KKQte$z|oDJ$P$vNDJaH z4_Nq?sslZ@+ka^hIN_cq!!S9ZVO;to8GyhHGQh;k3$f@u*gc?cfA1T5=E*1Y@lSj? z@>|4da%@ENHZC}>k+<2Of96^J)nERFP8~n4rlvY=+_*`X9N4X^uD(Vmo_}8d`i0M` zr-wHO8}*vkzgDk$^{XwM7j^u^X&vi2rKPDk?K^l-7wtZ1AyKKZDI49M4CQb6akN@c zIWvf%Blt!`z{znQw*DiXwc^Bl zjRnL+im}Vbn(ImiA-v$Y06~}-eCmaMIqXy)VJJ_wuP=<4Jby%oOE(csL3rc> zN7%5K-QowD2*#8hdi##Zf8)S2csau*WJ|Qq6Ul}7*fAeM+DbM~D{()~%`S@XQuD^~ z(~mvw32K23?7LVKQ|8aPSsMbC!55`V#oD;7L*MxNcXasqXLQR=x2V=Uh|FbSl<%DL z-5@rJ@IZ_sjoC5IM4{S-2H%rKRHp~Ho{Y~gq{)PpI-=&e7Z2;(-?~Q|TI+QAfy=b9 zy;UtO^&YC`8Mk46deW2iMGf{_PME(x`41n{;TI0;&wl^EX=`V*PM+@3|M=Yx>Deb9 zP@xTw&bE#EFTe4hZ9tT3aKK|?X3C^aiapUdjqWv<$EUcEhwx9H$25(JC$x7|78~)a zp{%b{IKSw}{7Md<3HaGXu`v1rHZ?kLq@{3j@Zfql>*_+L;JZv*tL*WzyhWKVhzmYX zJ~gj*VOZlr2P8+X*vW2~DR=s1G8+&6EeDhOn}a1V#|n`fiBA)%Lre*{clYhd=j>b+sMKm zM~?CBwgTHdv9ZW{2KIqx84s`|8}4H9(d~7EO+L#%w=Ycu3=1Ea*mO2TBQCRN=&Qt^ zj&eS$wVH>ma6Ls4$d@^OCA6QYtrO<5Kd zm>!LM&c~Ceg^6i(z4W5}5%boho_hQ--Tj06b$$SzKw-ae%9FHus~u1Fw(QUw-|%J~ zxa5*J;*qn4_$o`mrq9cN%F)2d+;h!De{|E?C?epGEreL1P+Xk(0wJA8Ra26 zc!U4knI0WEawI0Tz+rjVxxHXa_dpf+xV`uKUGjo=~xA&%|{Z*)_}Q5eu>Tl5vh*R8du>%1Sosra1T% znnwS*P3UQKc+%w%2V&r-b|pWLN__!s*B*GKJiOf(_K4vS6@9VlKt9VEePNGUWBVDP`dY6Y&-W2w=Ix%+8b;JXQXlLs0aN)Zh0XF4@POCTt9ABwP z>B`Dtf8jzWImA2;{w7mwmVocFwB*QxmjERF3w&TwD9MPZlb}JQqTtR=iVwhX5BYv~U=?4_w|diac4~aO?fl-aIbeUJGE1A;QorIb=Yx zIh-7{#6`$gvW9wzUeSH7>%(0_-{OLz^ z#r4;#gg5s~i?pG+IX32f{tKVc%FKw4A33f2fApaC@4G~McJ7R~@HwU&TXA9D`qDbA zva~oJ+Lu+Z8DK`UGvi{<4xgV2eQG(iXn8+vJ8*Jx+&1B`#wNz%lQ!tAnwsi(Gnr3U zwKO)Uqpj6?dV|_qTa35oaHje3!%yo!{o*g`?yr0*zG1s<%NFgwc(;D{w|_&|-g>>J z2S?)cpOxh~6w)Wmc$ql&IZfD z_graHJ9)4rlA|mz2q4i@wK_rmSqihs1t?Z}II1!)KmAuG@%gdzRTz}ylPnydFYQ#8@+dPpE>rt{^}2ZH(+x*AjkU`TOMrg+@jz3t^exr zTBZ|cPO8+lMguP*%+1BdZcZWKoA@~18cTBs+5=5Yp9^GGx{q{XT$W9Q=?zfp^MzUK zN82y9ZIGX=GjQeWGwDGQ@redz{8)crk&}5Zwp!2);Ly`_Og3NgQ=hHTIqyGkOXHFs z^82EE2cpIzyJVwYS;EZ?-)-5u2*w8~Of&$9d=ub0R6y1`3bA&rtkbYjdpjNKSw-wm^Z$KreUQj%4{Dh9jVN^>9YBaHu3pEXgJSPR^!3uU)X}T z)Y!<$%M}yTyx3#g*k#MB*jQe!3d5Xp{|}5#m~1>uDR+AcHG_?A8-Bq3IO$?Vh2?5* ztcVA!@BuG!&Cgix+3w~62kVzpb4!{rk24;aqs2o8qr+Mr8!D~BvdJ%_>=4ZMsBgW?GEz9CM zZM$fb4jpP!eL0_@829|XIOdHN9E-tx$vibaHKP|^IHAvd<=dLDo*h^y)?DwX*~WTf zZbnOUle*^M9@Be4t*v$H8XVLU$Bt>gVZZGiw}u>J;}IKVCOkK)s;E*!t?Q_$*SGHa zmTK!8b<>Tv1s_i=P3vF2_HFB;Qf<3WT@vF&6P`C^cT!{yuXXF7^`UyYrTDrf$$AjOK63AQtO7U5EwR}Rxjy$f* z;S`mvU$43VkVU2V_@zPNNTWRX0xOe=8T`bDsooq5T#g1KX01)+Pj4=#dw$&JY@9@0 ztMQC~Le|x*gz`nb5hj@Or@C{O&S_E^(G zxTPEp^IXOOwLA?An zH$AOVx24qjg$-+UB?YRm&?>WXsLwDbC!#+6ZpY1?S_RgV^PY>*Wy{v(HLdk(+S;Z$-*bBP zlKZrzf(>nm>BPHi2)A&F4FG)Bv)*mFW-Bkkn(TBOa(S6g#qok{gSxuUJI(!QnZr5< z^FG!u(<$xlefK--pYWhfi&)buv~?2eeU`VZ-(eGmZ_Z%7FYFRuF5x_`(mFdHSOWuD zAE)afui+uM3>(B!*7b857&d6wrN(nL-WT^V7Ggf#aN|<=G*&A~r_;LJR%FeaOdzNzfXZ z6LyWEdYIa|1)KnI@KE#no!aTt)L}+Ah~_I6%c64~WU%`NJ0afSH*rZ%LWOkSQX^&U zI3_6;-NV^9hl%KTS4=*3(I9k#uraA{2behmfEgnlFG1a@8IU-+)R73(s1uUgPv0tY zJUow2y5Wbgg+S3D@Faiggm8wRh~o**JQJZz=s6Apm4(M8U@aHxz~0NX=i=)P+HG)0 z0Wa_gn$ebkn4ua-0LBi1>zgb-J$$8#%!gTUrYnOChRo?5Ep*4_Z!QsrA-MYc`!(VT z2fNEZ^r7F;-~Gi$)zZFEi*v)V$g{j)c*c=kFU)Nzy2+aPRyvTu3E2u^P6?xvV#_+V;UG4(z(Ijc<6BHRaa}*o&%ozE^2sc zEEceNs2XoQy8mbn7ZeQRmUg8k&`7A^_9c1~iIL5NOLiD>R7gAku;Gq_?jcB+48enL zZ_qjZxc9Hi6curFGY=m;5Fn5hhpPzqELO+S&c=g@1%k1usaXR9gX%ha&RL3LA#3mc zeLDW)iy9d0*Mt>Fb!BBd%zDno*yWd9srvf*;8zy!r#L#y`~i=}8{c+$U-4~rj>%M36O789UiM@o;d0 zLZsX4zeo#}la+}prH4;A!v_fPK*A%h+jmL&I?R}aju8x(aK-0xea;&@wIhOlhL^JX z^K!WlzXd{$$gZ`v`!|El_|3=JT3m8b75Rw<_0>x3e2a4AZT>hG$GpinUGzkPomza8 z5U{i6gWvtFKhVy72h`cvtj5Nsc!QB+xj5o)=cY{_SC{I=mk#TD_uQjRn>x)OMK)Gu z%ny9?uEsn!8F=sw7j}%f^DUdwi4hq1)6?Uz;bfL0^emSOEejug>~S4Ea!eaL+q8er z9_`=1M;+}OJRvSsS5LS2EC}T=DoV-=^tG>lRkow_iGTXIIyScH#OZE5@W2E5i$DL6 zI=1arQ$xM>?z~7p|NeKYuYV-IcZhOE?Ip6MN2~NNN;YKsy1q^{CYX^>@{Rst_`k(BCK_miBr4Nzc*i$-BRL~PdU%lX=it_el+~XnL^MCX0tR8SJ8YQ|<0g;K;l{uJEI-g| z;he;-LKgE%Z4fpz)LGswhY|h43y1Zg-~L54b{tga#+|zH#w+z}zx-~^x+#N$>|$i6 zId7EMDz;x}qYeXmXlPLFovo^k;w7Ehw5G%`LEMp8P`v&<9i%!thTG3iUo!)TY=6s`1c z#&gI@77>%4&c?a#!`?7I#X}s2lP%&2iG9klJ2>7t z{hBmywzqCnLtB$>xc+**`7LiZ4-dP47gfqpiM~o%AoOI3Hvmi6L}-UDr~hLe+q zro-Z*<*w~U$ipTr`hC%cJ&P=C6pTgodHM`}?MWP`9>I^6o%AC|^5%9i4n9t@34Tlb zon&Y7(=;R5#yA&^^0=U@^Mu_@-#A~|;F87)S3JU@~aoj1`pG%&M+a5A~I3jS$;o;#yz4Ep@Jn^ltKCIV^ zFS@U~dvw`lhjh<(zh|A*t}R=(Xnxu{(75vkOtIlF&Q7VVrAhaH=Wb8vr#(@xNT&ms z$9Ov}WS;+N6JwNPfm!IKPdFKhW7QJIrawna8yAC(Hiu6dZA{Jal>*5NiQ+nvzFIwc zJC3JxT;!p=Z_EAGh+b7$@!1NmVE{a+^|1cK#O>a!Dp#V>Vp(d(ZC_XnNNN3 z2k-Nj%H(xLOyH8kNO9y34va8xAXF+F`3Tr0cy>XT$R3UbJX}5pTW|jo1C-DwL67x5 zKdJtlZV_+h3P>JjF%c*J-@Cvg{U-|eKW%h(QU4$>28RaR@0JUeU&mj1QI}t_U;8h+ zTA%;SXEpA5-hu0`*0RSEGT_FWUZJ|0ay|U`i)!s?Q-4>FzV(f->!lZ;Q)5Go>Z@wv zVG9p**dWT=1$?)*C<&c(8TX~d={TktJXUN|viY31jhpJ~RbNx*I;!Kl)|?1XQUu4TJGLTKJRi39VYJ6VjT>dguh7lW!S6sOVpX{9oFVpSEQXu2f1E0 z^*hYQSGOSYt`fq)`Qt5o@LaGQY2L6weZAd!?(lQ5`KYX{Fg9Lv^_;WeCd;US`0UBS z1N%G&KCPqAAJM*j`*q1Bm&STmMP+r!Q|wsm?rMj>^tCV99+}YA&W)Cdi>j|{R7+E{ zCLEXBW*`K;R^TCb1Efi55WsPu?M~uNIw%9Z*~9>4OWfxr&a*f*tQVeq(shlg+_qU+ zk!@7$yJ)B9M4X0Td$*{J4Pc(fxP5E{fL45xGwEtqPMC3PAG*cE9O`Bc;rvU5=`BF` ze%kix&K;ZL`_0(G%`J6mtgfZpNzZL8+gCX1 zpKq+QF0wqYf<+#DvOYF%Jz$^pe-nP$G)4E&u0{6~n=5$OGH0Jt)~2S}@ZtHC>Bnc? z=y$gwn-}s)&hl6vS#n*h(T$Fd#N4I6w%Q`iG%QVWE+}Q?Rp@!-l;>perlkMaaNz2r zZ?I!+wnu-&XFeG#uAK*6%u(r{s2e-p`JiuFe8)Co{N&??8W~4PmO92b8E+Q~8BZP7 zr;%<3h&-re;#~*D1BVEMB`YfEJcr%ZFx<`8NtRJh8mElk3*HHAmG|ET0RT+rGPZ#N_3>79nbZTC$!>rRTSq3)R&Y2t^){nk>pI&>% zYqg`PTvPJ}wtyqdEe z_t1}?)PqmHpnlKY7c1(mH_B9F88kmL9&^|oo7y~V7Hk(yYIJs3-KYEX!C(4-uG-S5 z^TRXh@9&Ggs;;eaf0tN>6=-;LQZF5U(R!guKmOL6b-H&_=llEhmAk*In$}Hfx9v3+ z>w8n?iD+o(i!vSy>JR>*4O!UoljKy5f3o%@nCs$cceH7({aohY)f!h_4loC@&i&h{3nOxT*YM8W z%L|$|j>|Vb;JAxiAy=RsVUJDcK}JQ>uovMS-=`OMmr%3jW!5&3?QD2sv)#P)V7Xp> z`(@g5QG?F)Eb8=;5#96k`}Dy5PuO;bPjLF1w^td#`BFhOOo1i5bm}%<1e4 zr!>%g)_`ZV#X9Gzz1#HaLp${POPjR2zC!Z@BU%`q(|Aw6`c9qES?lQ+pMF-4e*cI1 z!S^4~_wT+>-@o@B-Sf@6^c~NO?)v(dJwN)Y{V(X7U;DDY_T?|??yr4C58Qj79{Kf4G=$IykW=x`DO1{4=BtJGn> z{Xa47@i`^+96P6_Y0H(0I#rcc#3|H;MT=URolrqlvD&t^*{&?tv~9QPg&7r>muSwi z2M32mzb-F^bzRJ}O3R{Z%lcj0H>$cs;wc+1eX;qQjR(9i!A7IW@p0?^lCUjHc;4iB zA0mRKpx|8F5xj6j8V|zx{3Uh_Iym=Pv&q-{;w3W}#HCXn)@~DD!9Mws{lqXgXBe|C z9Xi#tg^utt>vOpDyazb`HGo(T!#C`FoJKepIexPEG$z8v2PR?1^CAFpiYyGz&|<;s zzMLKV0=>F|o^W}_YdGQw2kO{S_GzLtdG(L*5lGA2h#K3Smz7*2Yn6GV=(Y{S+3OVWJ}Wq)+0mi$9Og! z3yQjnE8aL;!>0pR^Q9}|!Iw1u zMJE#tE-(oVb{kmKWf8Hcs6wxN<;fhtQ3@^ zVgs#-Z=!*ce!U1hs~*RIa^8l<7AvY+U3>L)`sONO==dfhGXjBaULl1FoH=rSDHW&&jYYxPuM*{VzDk5A_>Xl zAObYw9;#bNnSbJq6GuY3Y0ESSRA{QExAoC5#i;N-ZR zMxEq~wBY9)KMJRbR+I?ebWE9}-?+_w>~e(ZboX)a6nOYD&C{6YO1Z+I47^qwn(~Cp z>Z1)@&J*8*TE#7Qv@PA`yc)ENdVEit0|)bn``0Qm;4!J=2z2_3eyZf5=ftQw+BWIq zpZ{Ht4PGJDZwq1Rd04&p)ld{j0y!mi7*9+q6+_ z8(LN0SZAJ_wlO~)4`zev$OFq%CLes;jyE`X+iS3|ThBf5j2?dC5%ueicIp?%vMVh8)skS7##86{nQ&0bBy#1fvFtb76_TnI0))jUc5!c0_Du{(s;to1; z@Bh_)GGEfo5I>7a4t|Zk!_O}I*)T#^Aafj(-{<-1tEdL!BD8N(fN*sf6XvUc8;){L zi+b=K*8148o$K?mg~=Jr*Ozle22pK%^E-BrxeCt zI2Lwt+7X}mDK3R#*aT4tI8WTBdKTv0zm_Q&C7e1yAC=nx+_rV2hHU80jW6f}zwis1 z8tYR_#||AjxKDrcH-8ilmdcTNxNzgVMVHr8im z&1YZHNB;IB=A}9{H?(=uU8C2%{x5DSCID0B@p<@-_j8ywI8*-ZPQ?O{{+o}yul8g7K$ z%Z{@JlE`ewZdhoQBp9?!^v(MR8Ydaz0_Xw+yVKA^%O)hU1DU{s&pBj+!yFk~QCV)9 zRjScrn}-^FTNx3~gsRfIrEhRhT|*;=TBy@!PHN8WyY$LeSeCS@x~fKfy}Yfopz`9% zSTw^{p$&1Wh}*q}SeYt(GP|=EsLryf9(wf7)dh;=5u&PZwc4 zvr~A*@@2*FS(xC+&Y+j&8xQ7VEYNh*g*VZ{2xYSxI?TSpb~8PSoxy-ZW(RGZZdy?u zG0ivyE}JHJL#M!U2b+aOJ2rZ7%;V$J;9?UaGAIY*_P~hYxL*s$M9Q=a9x*ARbh|xv>dCCSD=i$^49x5O^ zVpsCql1`sIWB3cIt*s7NgCk=hE$2L@TWoV(eeLyXXsU}v_ZMC~rv3YN>7`@GEiXp3 z=i)u;XxR|BEYHD%o+NubRMxlXN1n9znKv5i>SEJw;Ow!*w`5r;Om@?vsw%2gRn6wj z5}Sdgw*S~f=do;h@xVP~ZRi8&KnKvK0wil#k7R{lp||Iru;Fl%5_-4q2B;t+L9-PRAvKZWpDvA+7rR#fEUS5qwVWQib^4S2>4{1aT_NF$tlF5m~e-~q!(8M8XTA<>I)vad+b>6r2+587e7M@C-> zc2pe=NbRT26pq`h9)28`e}gm}VSCX>DPJm=(*#UYf0HrZxHiAw5(MYrJ{62G<7l;i z$P#o5202n(IL2Ilk8}S~A7c~!#Hk<1Jr+}V$j95MC(oSFjQO$1I_>$V538}h zS=U~FwaRL1^)Da)q{}a9|5aD3XwKuWqF9$)yf02c`QZ<~uSt)gh3R1(d;TfieeZWQ zIWeZKwjsB4w5h4K((@4Gy*zK&zQhJpzR=%w)s?ER ztx$b^WyDogmU`SZT3*iS=<_ewX8f@J=1+gm_&)BrV4)hyt36L_)*D~*O8w!V|Ea1i zlNUVhhx^W{tLu#Csl%T0G{)Sszpu-BXhknRcUZslKmSOzCR3%U+)`DitM=_vU1^o& z$U>Z&RfOE3yF5Qb=P*3b$!T0bYiOMMfH>@;)N7fXq}xd69=^@`D|f5?X1D$xm-qz$ zNoEG0W?@{)p2|=*<$sxT-yu8JU*N%mL~vQHGq0NTh>rk2$wKPs`vf zXY;yc*x(<+NK+>yIya6`&;AG#?kT7 zNH4arO&%5t(cNrpLzZAu@Gz9P_%tAGf^Mb-_EvPK2^R~FE|2WtyQ;?CZQ`W5Iol+h zc*w%zvd1(wBM&vR&5N|tSK4S`JY!MU9BX}=Yz*YLbt@IQjqpZV!g9`$sc)1>DuGcy>W%evUI^~{O0I@i^!3Cq=Yzw3P} zv29+2;4z25H*Dgptgh4dzJIsQpXpXpbED_gmS3K?cW!L=oIQP-2)i#9%dwA*XKZS; ztc*57Bj~`rFKEL&fHadkE^PEthnFVD_572McrG^TIKIhin1#H-4$Yk3wk^lu^Ih0@ z_)mXfTUr+KG64(|_M69J^iPZfw*^LWvx?2rd;*SB3ufnMf<6yb|f!-OwBA9rzMA7sp(Vh{Jho7nl|lap`6XN z6BFa+>#6u$4fCdwqKeoM$6KAeKvP{-9uGc(uU&SY`HevW(I}1Ji#M#=8SdV zPI6QbCm1oV0PMQL&+q_5WX#eKqVB(_!;k_~GTB9x?_*qoYXsNo#l{ndm;aqL)v*r1>OGm=k1_wEKZ&Q1Jz$Y$05Ki_2XB%CxyOAfZhsA4stcFL!PCi4 zj0E893g^Ta3q~~JfJ;4u9i3+*PqZ6gL;@paNO<@sRkRwvdcW!t?8N8f$?93>UOrw^ z{>-iKKWL86TrhAWAMJ-`k`Hxy-?b1BNLuT|D>rFNqJjGbD&KM6J z_R8xH=*~MjEc<0$I$Yng>Opc`Q|O1+P`hi*DIfsU{;*>2WbPGNa;|Wj*)!GrIrYd-T0;->V0{^IiSm zd-v(_AN)X1J^HYoeB==w@tox3k>_>x#Bp_$3#J@d%p z`o=fEqOX4bONMi=20f0Os#>+VqeC4P*1slEdqcUlZfjLdO_RnZhU3}Wk(Z8XW_C%< zt*xr9E>MGI!JZ8z+H`TNbxwPnlrua$p*~-WGb=z7+}f(?Q% zkR7oGNjnzRU3BQshW8stCx!yXKR`evIU^IdD>kHDxeK90V&_lv8U!{0eC5JEa${K$ zFak(%2tr=qq@4@t9wQG1Aq<>EJl~VwjUW$YX-w`XIDj{N*TVvAYB2RXF7tzD0!K2h zyCuzugz>pZ_kWa0lN#e-Lk6M(x3a3%V9WHTH@wqEc9ZLvPOxl!=J{7Ez?o>tXajHn z&Ei1{M_O`0i&ZA2eH3^7_!#YWOd`T?r}Iq*-sYH_pN{YQ&wE1m)o*@7fB5@KFNRoC7S3#lU$6B-yE(~Qejl$U8Kj_=AA5TYIYTM#iB z4b)RR^WCNCm^toKJLyjbMi?ocUC{sF9Rn~EW^v>uu-)$93(}&FwD4QR1_WT6v{`bh zmOhBSW@1xpVappQ3!0jmv~aH1_{fA7CTBG~)~^k%9olzbzm8cUJoC~~ZEkB(8Ix|~ zbmrVyH8nPR60}DXHt2Y>n+iR3pw6k1V>LsXKjcvZpz9FwR_LS zdgQ5xH9E{L=_#E&byPp`I%Xn`sFXhOU3~=~0ST@QtPl!RS6ivQd-v;`U-=ikc(O;= z-gv8KN5*4^J$3W_Zgvhg);8#-Yp&BpyLajPo-n-d;`6FA&+--kMebtejVt z<~(6GPn#e3J`vydId$TsUOMuU$-ktXTQ=*>uYIH5_{P_0+twy^pX=4B^F8Vx=rw=N z8OE}@&Y#tN-@RAIPaV^bzv+$omH+r3H8wM+ljqN=_uPQ~%dh<>_4V3vGbeA~agmyv z8uXEmencaai}5|!xw&a8A(O)6G|6|%YPT0f9JI{jYS5H!gk)s%Pa+>m+uo{7%v@r^ zL|h#5+#Y%&?@L_pc9hTiH!Fujs{BNtUjEXptO# zGK_MADACV(?2{AuPyHT|?n926DvO5^73CFie0}%nZvE3I|4~2s-UF&?-KvW(+O6OC zjbGEQZ7r6YvY|W@G8`PL%Bo_ae#utF_~>YSJ9zWvjq$Mm%;{5l?y0Bs;s5g|nm4a* z?CexyU5$R~r+!*bJo&J`e%IZqud3IbcfL{|_`uJ`&h~znpPm`_H5qb`6Jc01V8M>> zkOuD2R32dA;D_9nAR)DzHe=VM1x?3BcO}}ygM=dYhs$suhOp~`*8mD!L@OFZqG14l@VqkESP8x6r5Cb635e-^4R=F3dys>*7Ojto29hOQBW2iRc7 z1CI$$`1za&$J>ujOzHB=uQE-mb?&@%u+zf!u)g867{(q8U%V;Fn>9sC)*XxPkEv0; z;wo5bs@S$Q-aKV1AG(%JC%ko9uvlmwsy3K19eex<74w}=+bKs5 zKd_?|)Azh}TdnQ13QbL0kJ;9#w@l#8mN&ott*U9L z@+5ggJ$=0@Eh|x5^9G$fe#+I&Yv1m@@pdFf=@pnCt#mv#Dr`HA>G?-&v)0w7w}Ook zr#5ipB{map=$q!zY@TwS@x=l+XBOY@_y8YF6=(g)`(m?-^<)LAfRj9%`?CD1yv0nX(zIWd} zp8vfRC&evJ&)8n{oN;Et4PUZ9X_-4?eKBSGlCjCrn3I}kZt1kCsd3}euXCqP>+my= z>+ZYm(w9E-8GY&#f3JW2_(%2C&;P4NoW6hGK5gEzUAwmL)NAj&U4Q)h|3j~OQU46&Ov7K4e>(jDr+T;EX?Yi z@BVRZ-MdZQy@NV-;bo|tVV1!Z`r<4gSKlPd*Fw9 z>cM*hez|RnCi8qvO@n^%gCEqA>+S3Bk2G{y*a{{q@;pue^u1$(hOvPJJsg)4Air^q zxAT(_3mOJdNOA_?abZ8LEORt8n?}?6OXz=RWeq1iB(;WgzW9`p9V8(id9ZtVD?Hg8 z;7a*qbC8zPkSm2T6;dAfgt3ecUhkaZc-vTP%+A&aDVXM8b{Rjcjw8EdbCHCt&ze$N zqz)$Vd@GK@HZKH^I=!T<*fzwFPMkcd>B%W=-MZDbVS^4IJ}M3hLpE;Nwn=4G<(7w2 zmXGt=x_z7W?7BGY9BhXv%O-5P+UgoT_V~kk@`>lPp|MHhydY9mrrMfn+aFb_oo+9D z#YsiXHR!!qN9Eo)CK}N<1kwLAIXKc~q*B`}C9Zd2VpPvP@rY(7$HI1x6AawG*vRbu z;}Za8Ocik~J|~%Bj{^*6Kv+A4)_kfDe{=sLRant6_@#Zrp zT`k!5uC#tCGyYZfn`&xQVc5&IyBNmqIn{U`xqVZ+F57#tt~_+e_Fj!fMh12M%t@U- z_L5EP8!QWlY+=0%UZxJxJP(YtRKPy` z!9D%OJ;5j$_2z4*!7ypmMOo|#eA<9dat;$0ydUxnmvBwVDTXBmWLs>G4ZKnwT*Q;l z{LYAsG!k6NdOby(h=+O!&Z1IhLEjMW2*7AlN+L^a z;EeT0rv-6HPi+r9MRj(?2?_{`2Sc&iA}QKmRkY)9AD`Ha4sJ>hf4WEU`_; z#w0%HyKDhX9^Xc@Nm>Uu&wCtMRG3>|G!+8FUQ4xJPgg`5$P$#zpg8< zX33bOs{LcRkb{Y}1@1lmmdtDO)6=$JYBV}NV%{p$D=t5vefzd(W=uMM_?-UnAHSf{ zv1yfU-K_09ck1%Xcj=XH-KXlx0-bugOMR!$crGz$ol>MbuDwjR?cb>boy}^{jAr}# zHF$nRPdEee0`V)zc3>r1PgvXt-z4I*4_0^Znw2>Z&T^H0$Q( z`uMzH%Z5g^wl%1^txoM7O=@VURh#D`TRS@9lOD}AEvl`mjT3L4e(Xv8>t{cs&wlPR zy8nkyXm)y8wGE9PKc(8#RH`iLi*qV) z+p4Xr5AJSNwM)!fmc(a>+}VtZSa-0`={eXYWqcdK`o!Se`_5yYVM9QAUdNb+4_Gfv zx)_-o7qFZ!@oe(L{xk4cJ9Tv8FL;hq8IxOkj+^ukYotj>I?gf{z?4lFu<>ni_&(nR z88%hKhqubdYvU#*1ePunzHx#E$RbjypL!{b{y=XgysV6z0oY`Zc<06T=5tD62Xi?S zIB{AL1jQyTFg)fF*cxp1M_;2q=55npTLVAVsr?r(GIQC+!Vbb#EVHc@eHHS?b@-k- z;u)&tIxoF&9~%Iu)K#bK%g)giA%%_IR1~w z_=%f{Po0>BF`)d%!?WlF7>t5s35JbK`3*n5cfhx_1%(xi7E8&7+z2)aapmMsIx6dnsVlCZld1cJn6Y_|TC4 z<)1&LV@Hqb(n}8M^2;uZxAzHL*KE+2>H6z$)TLKnt+VIOYh-#nCbI_* zU8*auxzTl&YHDUq!($VgnwnB&b)`yeR0M8|Zom7Jmc&ukAn6dyhz}W{O-xWp^$!~X zXfI5=vXZ^Yj?jwy+*_d73+0IPaXc;=r!GUh2R`5UkDdDv+-dMu(A0&AAI$4!Ml6Kgp)M0Y9I1+7!Q%#iYxYH^$c1n&#g|M^I3grL@T)<>pG+`<-o7Ac zHn0=_*{yag9{s;Aj3bDPc=|4lh16$Rf4e{E`-sA==Lwf1KDsqBkl!A=U!xqK)JXa0 z)qooprStoA(XPB6com=Vm+AwTtPC#j%^eBZ6@{pa_H&;UWNLN>GaUJ8uYHX49PX+! zjKC?Y-{U-$@h@Tj2S_6ox-NJ%#Yb8HVTcrnlSY#M5xS2lncN|LK=(oNrd&kSEcDeIbK+ zdxmkwgI*qZFve)pqH#|xNwOV#A{lAU?l8QF?Bg$O0LY9DhFA$Po~ z?f|^tOjHp0bBCykMLhmZv*Kdg0}jXOGOhXtjTd1JFCJi$2MJroZ|OG^1( zK8t{+gfBMUl$*Dg`JU<6keVvX^xE65SDEpi?dwrhF^g!1IXR}Pa*uDffkn{*BfiYr zQocBzbD7UNxIKl|Z*ZZ}@)cAeI%vWM2;La75QuM6^Sw?M$@7hRv>h41U#w&T1KK${ zJ^aT;Krg3-%9RE83FFW-Vkh}z)b);x(#(%kGxU~Qe{H*aoCnl5gmJ3QpUk1>h1(glM~++GrZ=#Hp`9PPBtP|!`!m3#zDy)Z{FyR37p8ywANV^4 zWavx6VU!U|dZGI=5a&I<3>Nf&DKiI-sV!@9Q9iB|2mOLB)DiKxc{m23?B7QFn|B z>I%SQT7Llx*+p7F@kKe(vg|>$wfp4zC)$)<9yidtz;;x5QJJc2+i{}B>2v2S-^Vp# zexJ3?_SnM@slKvCZ+`pRJhv#)r~m$M)pPE+ZoK(c6;&WxJhnD<>Rmtf4%L-c>+s>H zb?)>@>zGN)`6&(bp4D^DJf;V}_icUaYhTel-}Eu559Z1?*95$^u^D7TA%%wPw7ja{)q1T=2u`&O*mJK_^;YzCN_nQ=Q# z>pj2ns~R({%1x4X&qGRjMhhhz;&bGdxf$2{{vQ9Jvpe7qQTh&0bXn#bAA`C0=$s(yI+M4PLnQ0jxI?s6Ii;WKYN_lWGg+YB<_cFJgSenzaZM22b z0yT9s>!*J411hO3)`?R+`ouqdTBmLQ*4X~|gFpFQ^Rd)3y{I4k@JFhzHh;I)X=t!V zfAvQn(wxT-uC=AXHeac3zWzqN{XOqAoRe{~7B3sHDGr;JIcACF1gEXVa-QkQ+DDR~ z+1iHXWjdx5N859Gj&Hqaow%@!9`*P!yjX84vhC-###pi~L6gZF`X^gB#Agu`m+S=k zs36ukii#?OX3#m_ayK3!KOIiA=RS(2I-EwHcmv)S8&->osXt#IOnnjgt*>>X~5WBMqss$yx!mvXknuSo}oKwUnT1|!~H6A-48$hBTbA=Ysb#b+O~bC z4nOytCTw@|aGe*6wrty`rTImTkB`M>fp`AcyQAKb;o%t0%tdPJ%k}UNAJ*OX+~fS4 zjc;k3%ECjCojbRyw6r2l?}MQ%PC5Q#jVR#4?sOsyVf`wy4fB9aA6Uzp~o(&<2;UG)+rf2ly0wEG^H>dBE^M zblLNZGBs6~XlG}uuDk3K&j&Z#PAkyi7oO9@4?d#j4nJ$X^Q7u3tV29rYO1ON)~sRh z?Q?9MirOkw+velJI}h(Ul?(nXDN8&wIx^*YD&j-{aH^~;T(|`OE%T3Zd-~wWlVHA*>aKzuKyL!)0yhn`{p4+=#4yb5s=}>o1 zU&!~Np&`R5juR-hbasUNADfu+T%|uAxYSftdz{p&rJ+h^&JF4@^L$I|1~<1T#&SbV zrKUzk)Y~`WIeMLb`e)v)cf94QfHykgIr`ju%+D)r0kf!&vnsFw`lhzm#c4dOp~6!v+*%DWw{Iwl$Jf0=lFl*Z4nz` zp$(k{@%e>z(CFWDPJrgI?iutnk3!JpB+n8E`VJq*4g7*UHwN%X#ILUa9MYP{T3X_z z1d=fQW}XbV7nY4*Hi|hXF4dFnaPi_nRwVHRu+1lAb3A;NzCj*=4F7S8voqP`WV{vG|@|M`gi>0^JTryhS$3+8c6k4v+YDzXkMT(a-_S%#{qC{R! z{JDqqoqN8gryhG!t#$RfXva?DwxYI5skyCD)wRtU92n7=Q>QiDKdOb4_G)e2v`KYUt>TkHgX28onO5I(C)G1CuLB3RskqX*zM@=5P7mwJ7tU&NXjUq- zE-YnpUAZQQ2DD-uVx`>rxw%@c?X4k4SkK@OyMZ-BbOACJJ;)0|sV_ZJ=h*mdox0+@ zOEbFS;C8i`PMmI&PUo^dwLU{0@EmK#_>E7Dm?q^aDK-qtGal$${M(*zpJQtVm++Tu z0EHjiSJ*Y!Yw;qBxntF~WGwnGofZXmVZRzSn;Oi+a3A9#m)GFLbI%yB;0^PV-#$DT zLGH6LmSl$Oj(h)*ff~biTsGVi`=SNLKD#Z!Cfm%6wcbnu4=C1cjEq$z)utB#RM< zPBMw39y+I{vO%?t?YjEfSGlxtSelADjcoWKE;Br-K78Z58V~Ny3M)mB_>|G{DLsxN z4I%BzG$NHSWR1b>eDU_92kM{&!u<4{3ad)=e?R_7edO;x4InMf887N_dpP=ka&%O8 zf8#59`jN-g(b29eF1qT)(bRF>$T@7||(c>-8n>j`}S zKui!gf{aN(MSSnQz>|u34>I>-^j}eI24FI{V%{t&^F+y$x{j@z<6D4_KlYR-J$1VO z2aoEtuYG;&Kwt3Wv&s_~9>VcJ82a$gA45AHXqm3_bLruKy!~&typrvpqU}L<{5(vM zpG1bZ%WhH@8+FB1SE#wJL9?Fx_L)c7Ofo+|r8B2b>X|2>))S9Bp%;$7pxVkJz3Fvt(69f- zuiHRr*XiS@^xR8FG&MG%KmL`0qM>re9-|#x6?Oubcj#Cj15ibH-GoHHPmxj?VI-N+N&~%drs5j?;e2Q#Ly7+9De{hXEqJW(?Id>YC&FW>*ml5- zaC>Mj`kl<7UyLvh*4Z?b^C=g)hRuww#$}*IQ5aS6hBst`*>b*)GN!D-;?QT@CRhnC zjss=lK`-~Sd^@=tQhg+Y1YO%aCqoEJvZk` z$dGXgE=H?bX#&5NrDNsec8NCp18PM)Hwu?TsUM^fV?3oEIO^oUikKNrDZ`0CX zpQZ;r2`jQu=rPQ)OUNtq2*x1#oQ-AZG<@qHJ`cpVuY#*H+R}#LAqRMC48CFEkt0VT z<_h=FAKS=z(}oJ)LnNq1ydkF1;dHkRgOu+fq9<99Tbi8KvgtF>b6!9C=3OeaOxtt$ zrP}q{JELrVY%O|nc6w69O9k;TYGr0o=UzH#`8cb&u}MAs_|y8pu9oH|%hj`<9G01`GqFI3Z9p4`hlf3>8!&z6;|SSB^AATrPRA#DEGNtt z(a-j2Uj8sVykj0C%|naxzB1B-EOlilSRt=R%fM&kq@+Mxe(x|2>l*h!`@ zGSekvA>H=e>T$A;j?IGp!v_@$rNoc!;(+Ts0hhn1FAP{BsUlpC!}=>pGRmOySS;mu zNsc3~2srVrG`F*)yfnVcUtyl+yR}D;9My#TbJ2ZP?uj>a7_)vFwH>o%Q-^N6^(LL` zI`1(*V4AL2pKaIKbLaHr<4@@F%P-T8t=l!w-xF_~F+ra-tO`$p`p=$HPgj?gXQpFA z!HV(W4Hq^b(D3pyKGRg7N>8LKt8B;FW{vN>(qYjSw}v(sq|=m=c%<(bk>*a=G&MPB zN7l&WI@U-9Et39NeTy~H5}k0#_DOE7T&jl+#!)UL3M0bNE*Je0e?+A5LLU6U)mPvV zf95A-6>0EEmVFf!A{c)>D0jYC6wMNY$Ci-9TgInxq$2B9M_rTW-E}?Bh>9uAVcebp2JAXy=~2dg{?<^uRse&?UR}*$$H?2S;OJ>(JGg>&M^v zc3rY>x916qs;{iFeb=GZ`bKSNY>Wr>#nwwx)(fM<13Gv5gifD0p;IT1>g=gwI(z!K z&T&j;*BQ?X2hH0HsxB+l)~!3#xp9-WZr-L%o3?1nmMz-paeCv`SLr|h(l6<^e*M?= z&iA}mZ5^GG$Nu<0zxq#{Qg2_chKIW2i09_!X3v|3)n%DJGBm0qFTS9k{m=hZ^QOf{ z)1j%bO!aQ>0gw018yvH+5~mR`D3C{?HXt3e%j8jd6O;`AK}lbBkUAWhoh!yha%}jC z^mTFTUBH3gGM9KPi(hpwk6W8RT$t}dmn2NOTknwn$))%N;@`r_>sWK}xAU<8Y?sPG zB)fd<*j<29M#B&(zKOJ0z_yDqNdCM%Da?J|t{|K5Q(Q!?)}O|B+@<_kI%T*;!sRaP zd|Y^`{>V;V*PrUk08p1@*ZO`8M||95`7lO7AsA)N<#bDHP`Dh9^SQ4%0V*vb@-R8o zo6D=P=fTM#^cFDq%#6xyn^|uzmKN#i>tCVEuDM1d6Z3lMB+^8Ol)U9|ACE;?ai^Ju^^now4dyGhp}G_ zBOc@%Jm^smHBnuZr>y79&K|IH+_=vg5Vkr6=&Gp8_tam^>*MaRz?Znd!~JM`jdd9| zj8579-L!3s9((jbbq!8hK33_Hg9r5@$JLLHtEQsZw%?puH*8Snh7R@i_vz%xQ+oYt z-k`>o7Tf4k(e|0C8Dmmxc$2#4d*4-a;|AN1Jn)~1e%iZ#pKbdpO-xM2M%ma5>b@)n zZ!Y=+diov*``L<$ex+U5#l`L`WH#T%U-I1O=|_KL+3xz?t`cYBSp8UZH(d)$ZSxeB zdrD-RvdHsucpSUjkf1}XHCnHwzO>|VpYU0OI3a~T1`l7%srhUfn?QhzwYImY zf1pP%Jo~gpM+P-HG@`Dv))nVZTUYdZp4hFy-fqw5dez-^RzsezOpFa{Y;@S|pNKVs zs`4W3-o90r?%AW(=0?vO%i`hD*%QaChhDIL=vJxmnZ7d$Ura(%U>724R@<}oxG4=UT6H|Py-dYx{)ZEKvW zI5r{k?X)bKJ$A-yr%h>aVp4tmqqe~|=+FNAuVS<4ul(vSYj|`?M~|J*Z~ejVX=Zpr z*S_g(s@CO5xQK|E9fp zIMH;K9x;E6o6P^Wj=X8jV7)V*F0y8S8g769cCKd~GH$^?+?$t@?l!;pp3`gjL}#^a zfLQ-p@SMJ_Mc3bWjpoe9y`H0g?q9y3D$he|+S|2lQ=5MLCtjm{d#lxddfwM*70k@& z(DqGw?G1;tqu#d6_@d6fbY91vI;zpG^ZN8hKduk|`CsaRZ+}a(69cNLtkl-djjF3G zQ)6A3VYaBLu|}KPo3wdrr){Zrb#7|c=FOYc+S#s#)(zU=dC3OPU)npjsHJ_Qn%i8y zxz(^(^Ql!uWlglN!n(V$sZkXbhFx2$y6W0E$*{tMvG?40-FMGD`q~#i>$y^|Y8zWs zRaUKziW+q`wrO))yXI^UoIlf}sS%IgB2JMkww}R4SW$aZnd@)RqQ}nc^0bO;Ja1W= z)9F*^RnyR>lIl`*56tPznF-q{&NOfNWQDUZJQ|-8V53T5t>;J^8dY3dqFM9pEbCy* z>k&le36?}-L!TRiO_%tZ@y~Or1zD|X#rpQz%P&@E$%K7NIr8p zF+OTLj8DSP*iHZzeQ(*}ICum0zzz)larNekU=ni?LlI{24`F!EK0FnCOCY$(e*j9S zjPe(bPcw!NNO^EMtLN)%$rdD@v5@R1($?}}>w>Q%U~2I4eY#Q}T&Sz4!ao_l*op8q z7dXTfdMe!~{IfCW{Lm!eQU@{Qx7KBCkUYHQj}5^1;8X5A*Ncj__U>MihDJR*Dsq zhX%&VyHl)bs4rh6Nu9*9hyUmV&^}@m|<|jN%qj-?F;=#!7pdCB*>EZ*|1KM#M#p8Ox9maOa5RQ>(BtqB* zGR7%|Gy2;_u4tcoIi(pE)@AILF@8Dj;2rYaHxvbD-h3My8;ObD4}bW8+B!RQ)s;5| zO!lj?Gj(EQERMfA@%)SW+rRlMwY9eBic2q3Q$vGBY`9cf;64Asv-;sfkL$ppJ-X%2 zS7~@;LgSMYnwXl?)z@CDOAcKXI~vDrfKSiNMYBq6HJ8}n1{pUu+8%EKf@?I^|Cyp) z8>GbfKHBcSFrXMz*@XaLGYJEg38EWG=>(5E;Ku|#xx`2OdYB;~F~~iL;NJMqiDU#T~D?Lp}Y2HuCsJn)}`OQxC@00uQD^SsZN8QBoFrV!~>4{+B(flS>d~2QE7p$zVb@lcFPUA z#6$+_K75B$h49-hD>OF*8~ z;BaKnjUdas{(-4Kp56Y5Q(u9cGB{wDXrm1E9otvMxw9kjoo{_N{`Q{#A|O?3u_(peno+uG6+d-v{+p1$+ZcluQP+82K% zM#jcvl&^`s+qcE_H{KABJ^DyI@X#Z%rn^n;;78;3_kX}AON@++_!zDl+Yi_hd~Zr+ z2*)|~X?(L0eaIgomD;g*A^{-_Y1uSYjqxgji{l>oR1a@=B;6z`=!TD@^VyUoYrt(4 znj`Xq>r2jpLwjAhEj-*@VmtfZlK79ekINxw$k;XjJ<+tpW zj~&B&CSlF$)oP!88^{pITg+U4pEQiz;%BT{2TU`c$-;Co(Dpj_;0l=Rx!agvU@T#qH2^Ii6 zurxhuTn6?E^2n(pi?TcBC#Gz_-1p7DiPgJy#2r8KiD=%w-LN>G8a)V~X2-|9z{KYi zO0s*#FZ9Hi0i9Hv(}N2e z{^$zfB?AFYhQWSfL6S7s7M@%eRP<545ph{}Fl~w*z_*DxmaHb`WRr5NK4DmH9BEuA zRI9#{E5Uy2jn}>Czkb8oXliJVvpqfDy*@b96QBRF&q)U@`SSoXvZ*$1+z=x}gYn`^ zFZ=s=H{JLi@07-#m@jb$Kha*eJp1_L3Y&`dmNuy%Ra9{Jd_}@D)sczJLPJB;3KpN^ zNCZ^f#3v;aO&AQ3FwqGwEbs)V>E=gACZ4Hq{d*D@yGxw`a$!Czp4+nXP%z^Gaa{Cd zSs}jgCtq^%scgz{R9+Sjbe7y*H}Iou=|i%R6}Lie?114gUAkMY2m=@XczaF$)b}Wg zxD|Km<3X=7LD&4kakDpNB}w)}8f27wq*=~mNBy{Ymql`v!v(MeGo?3OSGXU7^^OU| zRQ~dF(#IFW!3RxUP*(pGip%crC^vUTi>!{92bpr2dgj0Un1yA|yt)V%m23Z;$8vQV zV=(d3-Pz>{qgPKKjRxt`Y03VC`n+e}cq0~vCgOwdzr~+py3lhjzVelOVsfxQE>(X} z)!r%i^HFGOjD4408b9)5>QiriZ!|R*V@~6@*{RW(o8sty*-O%4b=B&hgrc;s-s}o#4XtW+)zLdJC>-bFZ@>Qa_5KCc9gP1+hx<=l1o#gOT)S89yV3EQ{z_oArW4CeO(cuV2@ZdloF0 z#FIcgd4&9or?NVxG%Fmz!1G^*zuT#NW#G~YvbC~6gQ5uA#5ji$k6#yH=r0tWszH3p z>nnHp_^TY2`DuTqh01j~%;y+Ta?FpE3(uJKZ5wJ|n78FkG~@vHyp*JD)nDxXXI^DqBJJp9Px zv0>BB_{2v)9GCCi7URP4t+!6c`LpL^%jONy)K(wA^TpqYfpaH~F3?x4S`$}Xc}3ju z(Ys@KaMTN*yj@P~WCJ3b0kCO#nSi5WU0)HRniBzP>up1M-oy4QxYI{so8Z@7^<#b{ z={ymF`d7-XQ`&s-A4DKg-p0~mwZfDyU9z_sZ)P?!b2ivZ>GNE|MbB(n&GQ4Bxgk$x z>Yl=qon2j>rFWsi%T!2ohcAgu2$t@8pv*knwvFN{rK9o_ys-6+k8r`J&vE96d5(l9 zy+2(p>l|zQc86lZ$YJ8l1c;GDekzmhpYH@MTVGetalidgdwHJoTz8ryf=F2A$-M24pxbm{N_Ob&pG123Bg;!pBNp|1!G1}J?tGhcj zzHE*9x?+q^v7k~BBmKiMp?-`LNEy4&&rC(J(Co42RB2pkvP~_L|4PY0X?jX{u1swr`21`pRgjRhtmKvCrnSO*Y;jD!R}PJS8i6;S)iGGxz2`Tq&{H(_A@| z?Iu>@ozqLkgj=$wL|B>|UObQ<8FYZ|oYbavm2?qTO6z!UETAmX=W#0CY4M}0C_CQ( zhf8tbfFwDX?!+*;rFtfP#}iBkMrvDz!<{VjsdzG%`zbcX3petLiViW3g2~OW^n;&A z$r3W;Ybh?vmbwGd0V2HxnH8#A(q-vcm-&(TS2$p! zKQsbY`$%K9f@G$>ttsXvC*sW0Psa(h;rWHyxcxl`qm>OwoK#X#6FatTjI-y@X#6su z_Ax5?9+G}I?*;!$FX@Wr)-~28^Rr8WS%^YyL2_9YPrr01hDOGuZ(3sK)~&I3&-S=< z@BUc3X0_TtRrK^txF4yntCWl`Y1}^=gQJsCQL2eIUw=D(`*;3bbhdZIZ+`JtV{K=| zE3b~kugib#=?9{6&n0ogZ6AmU(Q|N$Pcfq7Rj)aX$y7fC!LlYAc|0$EAfk+I6E&od z6i4@f<8C@tZOl5&gK9VY#ew zeN%f&cu=o*x;!M^nG3MLY%l(Bj+HLK&K~Rt;D!yIh4`*;*qS-Q986y&fnnUqLO&+r z?DTXj$yPz0koP9(-p_sZ15r5}abEiNum0qom_2$vimO(|o?ScQXMW}bl6Su0zZ4e^ zos7bKDGqMm8P{Fb9UGb>E}R&TirK0-rSZcbf9Xr{2fy>1@y2V9MQQw0)Qdii;!k@+ zbu_6hZeH6G`?jo)`g*n5_7?TkEwOp)`e^TJk98Y2MAzCi(V=k*n=#jH*%E6vY>73S zw#NExyJGv!eY$qX`gI$ky|dk$`I?(FhEe>YM80m6zX$(Z7WO8aGS{m!*pNuW5J7aHGqijO@ z0<{z6VWV6vr|8J`;DivdH^xVXq&4P5~O?6H$3CYIDV6gDd zi_?ZL8eW!Y5RW7ZM zfGs|7P>-)7=QIZQJa$E#uR3sG-4_IRomH8D!&D|JD44;2Vt{}t%*+v}bjMLZ0-QTf zRw*o>Q1YaQ;?qDASc!2oF48dxoOY@pD-e-qJvdwyCW?5&%F9mhseGt13|Nsb4BTbH zCE_JvQDqfwK~Xx63{5+O0iGCWA!E{1#7^I}W+0*RR76vAM_hjC^|5ZtekEmcg70vH zSIUR5Szyc?U{YlerD1YIVB8kW5EY<1>A**KUC_yk-Aa_0nCQwPfk6RN7mFWEfb%BP z!;e20KlQo4AH{lUPc>BXRI4#_>=hHc_kR29G1%J|S6q5&Y~QvehDV3J3824!ARcBZE+c4tjdC zhU<{3l1ZHW&=n7oMSb;%D9D45TNoEa%aFo_E}{jn_3QQz#h^d*FkGBGEEZ(qV0u~+ zNRpF4!{_8DE*-Ox7U$*0PB6Mwt=0ggA>MrJwK#U{cD%T5#N0wdhb19cEP?i(um0Lc)z=^2W(7YX-CIWJVDcrG)vU z!WEZZ5#PV>yE6P{#gE?DxN%c#*}Og2xz8saXnUvwzAayFUm_HSB;g$6=cBtXnlQqt zaOeh6$);O=S11X}&buNt>a)?7JNeV1e+o0nXv=sAgRq%%8YWPPqfGvEPmkOsy9xu6 zvu7@^Pm-Hlk>i|FX$j}9N31vtM1pgXQ@S+JP$}+U@LR}I{Z}|c2Bv}HxRXB7FDEf6 zbH9B0Q5rwz3DO<;wyp{d>Ls^yjPQr=pfh>m%>aw@F{8mJJL@Oa@ow6&s$AWf$GI~n3-RXLZ@O+Rt(}7385=fkiI2Sh1M&IK|48%=jmGJ- zXJSl)A~vev5QTi<2&zC)J6IqhmRYb;zC=$;MWz!y*|?mOjF|`cE8^tk(YsGqJeT>W zwqJ&l!B|-=7&?AMQN+t!vfrQtn`kccw_Jhbc=n)3&SIfULoDB=h#a19A|NG9eOL0o z(>c!apPb7pCnaBU*P9~;;2iXZ#jC*uRRUKz(uPsW)uXVh2CN0ahk^wibVd4XE`#wh7Ik}J20g|L&?X5dIQ?*lnu*@`I#8$?TIU;C${Wd8?!yVG1+@AT8oq~V^Ow& zCmC6wg*G!9#Hp`ZnxFPyL4-?hRAYOvu$t-A^!AtJ#|==}nRxOs(TJZ(WBNPwT~=fe zl!>gIUea&+F}&3F6h+^uzH(9JRr5A^Wvy{vP=EQ%laIuilP6;Ho}F>qk9|6pG)SYr zEBEOb)Vi7)>Z7WzK88=8jdLeX#c9eImA+{daAMd;Ey_Op$ zcxR#Ud`+xfyEeMlbi+m41LtHv@zzsoYpZ0jLH45RE?J?k=MzY_jYV%hc~rrhhjQ3# zURZ#uNmyJCf5?VyPm_X$^<*1hr;yY#BpNA;a3DZ;ZWb-_LO)%QPtRq3sQodK?r`Z* z+pw}Rfai8kgn=lU^5v+b2U3E=CNBCY28?WQAgytC*(o1`VcMKG(9-A>PaVM#-78V` zbNLhkU2O8@gn$|8gD%nj>1Upf3+H>HwWTTehCiQ18d5(}P??usb(J^2@Wx$#Z@=(a zFwIY%I2m)()3I;gzG$p(a-9~IIN4&xwsK2zOMK^Vz9!pkKH3`_qtZ0hCEJcF%&`eW z%9;}%s)_~axRzK{+g#Lul{(sPR~;4>OH_)gIv=&c{vgh_jKlPkJjo9)>@~NR;}o8# zOez{lMk{*THl_$&Xa?MNH zvIDXU=SHZun-C#DS2?FH@I{sqjyT{g*9H18j>{wY$tjS@lH770g;JK_IX6Vk(wFO> zm2Z2__$|XBZ+?}XWH802Fuaa8?BrJFDcc*q>p7<>XgEy@U%oFpgCsq7T~OJ24JY|q zQKTXj%QFbkwsp(#;6hVyXCoV9QBJ7gO~)bWtJSMF#0FjG&Y$y6e-^rDrH|e^c{Com z|H0U|XQOocW$HtUap*KvQ-6R|FeR0PJJ`f+2t@o)XRm7VR|x3<73f(>P+_`r=fN9~Ms zhx#M>AX8ZM!{*~|vP*C{AqU#|O*RoZlWaR^ZZ<%792p3KZ`P6UMRChnGWSCP?{J1% z2Ae;7)(v+N^GWv(c$RO%!N)$~q8lf0C~rBr!DRJyy&d{4E`#SgbV8ow&GKL55YcjY zg1vmD=iK?zAeoHYM$e*_t4E&He2B9>q&vw-lek1)elmY{U+HpqEIaw@X+1xb*0$!>pak0#KKNdyji8Y$)RhGGMOB$FGSzi zXzbX%E8hFj+vDv+$6{b;B);_Dz7%7eY_oMs{K~KWQR>CZ&h>NWm6KWUQtGZib z!@9Nc_Uo@uYuk3%M;w1o^=HKg^4)#+-IB+esH^AG2Xh+t%zHlR_(4CkT(mM=!?x}602VjO}v3tjjB>StkJzreH`EVpPw9y z7oL7XcJFi)L>so9R#)=^i0F#FjZIonkSwqnAlm={EmbbGNaGw@0!68QvO#%UY(_>L zZ=#_WeR`J^MkSj&MdyNSC**9?h7Ing_@v-<*IpY3FWDU(?d^WMvwL-?7YWc8bF(Ec z291vmM_=#xIClISO^bZk;~e`Rdjwpwx`p3XuG^(B&}`MLCh4Mwit>=#{_Q=r{>L$9bp zn@M)1#!UM2Jgvs>uo1mZT6xS<(&jp)d>2d{?TJ3i`2>e}Cz0cx(&G}h@=c@UOKJ1; zg2J)SJV-{o@Gr$nl+%z)IN2|SZS+*8U%qgS^74rAQ_`#U>Cg4j-HGzD1v4AZrM@h| zF|LlJ-aPY@;Viq{pKdj5@*siuC~EMQmmOPn`h?lZ$V{uMn*rnM8*-T6ESAYH0_`cfCjR&ucU;h{XL}R6h*IyrsfBofH%9mbX2+zqitZC41h+@`fiwx)#pGbs7 z`VO~(Uhd@W(j8e7XXVTICkK5!KeZwoE;pHTKXCIjIiE=YA3a`L$$z|?5 zJWf8>%X8N&9OXM)e$vuz(vTqKT9&wFxUN?gztW|6fFzgF0m{%X!fV+PmR~q@J$Y#t zguP2S+1)fNBcHV`Z@|gI|5wfi87`GmPM&{)gD`$o$V&T*u{OF4I@+&3kiN-lD5_tL z(iLRPt9Oel4_*>o-7V2`wm%+z;K7)gUW~@>?zraiUGasVes8R5ix{{NG14~_9nDp- zZ_CD5*VPccr-$OzSC7QN*>mw<|NZ|P|MB1cW}G>B#3#12Hgv?6bsJ*G#&xlM^ZMAf zeNAlMvM$ziwM2#Nf{v~ZmDw5HYrCVRqunRgZrHRjR;^hZtJkcFwQ6g8c8pUv+S-)1 zV^y@vzq)I6tnOYNTQ_cwz5DmZfy)lY_O07u%jV5KjTve*s_$!5zgCn!Xebn-slGAR z@|j2$%0I=`R|7nPN*?DX(pnurB zt})tM+GYDxiG~YNqF)=GiSuXsqNZ3M6&eps4vz;f;LNMPipiOX2Jxt=7|pAiMC{6# zsVJ%0iSW#`@qrTQ8E@!YRDY%Z$?Kc6ZDdNx_{1o4F%6>EtZazxRjqNw=1%El4)Eut z7G9cA9_a?svk{0>tp`W?1&{TuMTH5j%BdFK%=dB%D;qEK`2*wy`OM{}l22n2&X|h; z$Kj)erG$K9kr!_KsYT>V@@MipdT=XgF zy#Nk!W`h2q4bLJtl}_KUfazq&j3uQ3DGR8=INFNr`33-kQd};4?2_ag5DY+Q%(K(8 z@xn8YNa0k)HP?MmiKP$_3N_^Mv7wlsnTf&k=i?iH@#nFodre%jf4@5ZIvExvA1n95 zOD{(M$Y>nA;wlY<+I)1_nhjfG`<{JK-_R^$Xh}xKd`wO=a7o{yU_j;pC<;L3Ak5I( zJa^r=;Ach)O9d{;fobh};TP}J@XMz!C=DHmzxjq=WvC#Rl^TZNnqTq@Mxvr{5&fZ= z?FW_VyJ>2$Htcu{l;`|g8?{p8$N_^3j<~F?sf?4SPfH1p#m;^EqOGG<1Cz0M@Zm?} zhU>445z%REd?c2tq$-uZLiN7%ip!%_okV|MzsgU1v_ceI7Mb9icNGh12FX4~ShC7M zw3bf_6sqFj!OP-1-}+{pI(07Ug~K=g=Iimx|Lj*KpDbXZ$Qjt7JZKB54oX09QS9Y< zCDO{J3PX0&PVEo+(BASltsTif&-e*S$#jQTWX{4@{wghP+=`j>E1cmd5U!iuJPM}- zUUX3y-n_ir@eIw91?CWep}pu|_k7Zabj}HEIsDF-xiop3>YW!+?l`~~%=K)w$KXrJ zT`T~3lI=I3TKh@A1_9+Mojlj&)7ylQh* zOq?j=o$voZoIH0ro`3ogAN&8;-}s6K(yQWQcYnx73f0Qsm>QpqnTc`l1ZI;`N$s}B zN2`x&Aj?Ld&;Yn#L+6UC4oXh1iyLmZHg3G`hIrpCx5lmSyE$&S_L{i*sw?~^cK0d< z=(RClT8O@ZK@FrQHOOVbr6=zH_V?nK|LxYHG=?>(=PhO?o@;e++6D5N^sJm|pPUB>q95(qyb!NUd(jTw`oV2m^frAHQOz^* z>Pn7#7Kdz^XZ#B$#ai#A{NxihPLp5!F=$Ls(sPzO!J>TNrC^0+m;aKVC>$GRR=Omg zgrUETjPoRy(kA`*?#^<==);+)GxsF3xLno~KH0MsVG3V%D%UWUX^B3?ktc6|xj(;n zLl9~+P6mKHAp(tbQ4CcH6p(#bulzsR#7zGJ9SUucXB z(nr;>NsUQE$+5o^uR3yZ0sxXgZNC8h33dVYRk8!pg_%V;(AJv-ShR+GDkqZymt&hk z7sq*0rs+x)>)?b*cUOsq^utUhAr0Wo#2Y6Q(3df(>4^b_Gbu7NIp=S77VGMwzPZWr zFg!A(c=h#c=26_#_*k4C=6h(h(JS3S*!%CiEk-71H9nY7ACqmCC{0JDEWjD*m3kSb zx8HJ;+G<6VF7(IzNU!*&Wb-BAEu5sE+!oZ=FG{CW2w#m7Vy;vYoLL`1t)5Kv6x~%t zQ3evYzNwwbQI8s>!ERCgme>Fw+S~S{AB1<#CkW72dD$_WPEO*IO(QTB>L2;sfD8@g zsfghVJu!XuY&`YEgCa+D?78}~*!zi3x$m7-KP%EmOof4PWYbh*eRC|$Rm9La$>)hv zlDi{u`qcU872VEBXVo;cMEj=IaqUg-iT(R8iGhI;>9j!&W~!C9KGv*R9gU5R9@vae zOjrhmz89Gr8=EvR!v0){iHRwtnNwIpG%L<0AP8UmSRQPT=r2upV+*MMgo*FiO14e) z>;VBhke|;Uq;4>^f2%#{lx-{^fVS*7?$#HADg4q&7uf`i(jn7sF78t|k#<_o4m-8K~)Bf+JpegWKpjLgqU7O{ucty}9i zyAHkiw)+*ngURQohWq>6=8&)V-~IvFo%PYz+vi=<9KUz!_^Ieqn#TINII#bKHz`ew z4tetcr-Rh1uY6Q`gcFojx3x<)Bx%BJVOF*|aD^W-$|fJ=tfjR*s_SZ{H&Q)0)yd-r z_18Jg!9I6x+C*e@upY?LK*^LQ4@-Uq&P=pYE_Muh+rt%zY1(5Ok`^5=k0Vv4Ny?8d zjTJ~wc^*qp0C*_McAArsS6FZtEM!H#9Wve1x@^9r!zrYq?Cql5UV6Spy%+*WJ_*%}A5OO=YL@R}^LY z!~MU)e3UKud9dS79`3pnZq&F)Pf$5+axTw7*+^A?d*@?Lm5{uC+7T)@+E?o43UJ?Ym?1?!B@5(ko;CRX6Fn zF7{t>wXQ2;huYfKU3;Q?!-i;5d##i0y6iNLWjwneTamY7CMRQjpeKgdFg44!G%KT} zra^61_Mz&*sXvQqf8!ISxbM5)kDvdwUysv#;!5>ym)hbu+=hyJ-5cVrTW*i~D8%e! zNwlEP7T?8dDC!ocXZQW$W$IIH3&}kn#v{0^3JZGhmSbwNq)v|C=%AdSb(3d05mUu>ono^1w@ z?qC5wZ3bYR!ookB1Y}Q(mMrcvDUH2P+PXruC$3>3b5+1&z6Th{0QL%>ZDhf}v%N+3 z<7_mpHpBfyRkli^GC(d1njoUWG&eipz3*$w8tJFSJSGBu& z5eB=1_5pq9ThwgSZX`Q|iC*5ABU$pP8ZyrO7i*e7WDF^jkCDZmKjxTo|I_EZLb9C0KTy)0XzSss;egR*j2 z25C0m6EI){3r@oWJtt)CoCQ9V0VYQtGfHvjJL*0^OM9${`l80*%xg);H?7$eM_zte z2G{v`>W#NTY9lU{P1f038Aau9maf^nZAToIJdceI#Gq`Q!NI|J^66)zr*}|orx-^L zpS11M)v5Ndu_~_Eu{|z7cuib=<&N03xjFW1YEU^XlKsYLZm5g)wqn#3Yhr3@JYIP5 z<@naU-;e+F-~Py(fIjz`PsY#u#OFPRfBxA+@x_1f8}Y*Zf2x63TQsfN5(m_7M%4!T zr{-cxf?HKn6rZ4NS~p;`Pin^t6VGM$>wB4A}@KkJf1^L1(sp0bh(XSfZ0WV0wzELWVtN! zTw0Z9!%xzTp3U1v!Xc9-_4tIX+=z@TmzlYQ*YcId4P42s#QU9jU5@LV=OsSRpDt*d zsFKA2kITwNcdspRCyDNEe8AN$a|s6}aQzhT2AqO$riA(uKY|0cf%1^cI!VvW7tKp% zcrk*-W?sISn=Zwi>>y5F*}ZvF>^rbMrp8P0*!LceV{aUdmX40N;*y>53qN;Dw6{m} zoL!3E^CMBLS&Thfy4?QiRFC6tz8&BC@|WXx|K-oc@uNRbseICW7GPk5Q?!w^_x=1@#q7OdgEks zS68$YDq^i*wRbo8Q$jP7<9->JPos@XS1h!4`DK^!{;?Q8aVBCy<198@wA4kjXj#>? z7-LHls=L~EQT@8cj!Ts@9bjhY@ zP@Kr1_=?c*Q|h@|c15-Owuah96+IPWBmL^fWqZk%VeP3}bf^F2#i2BpiCrN=N?yEH zC%?NBd71BUnEvx|yG7h`iiZ}+JSl0AH)Ngmha=7M-Fck{81fkqWvnbOh2hHH>~4Ek zph)ZX;BawN_tZX8IoQWOjmcwoG)cl4yOpNpIV%z>BWrcCoJ@ZQ(w61tMPP3n6iEE} ziz#dbW6p_rXxn3g#V4e_hN5TY^IS*D7QSwm!p*n{F5>|<)TfP`6slr{oH3_t-CPFY z;7xz4v}(Deuni+|B9rQa{8yc?y5y2IUl4QFQGj)!%D3TeWTjArSkeb+FoGf_+}}q+ zz!sUXl26a11q+ab;2@tLmE?MvlJbKouqjWlQ7jO{m2pxi4IrorAysiI48P5c7tIcXa^4Ir#IcinGZQHg+S4XG6C&YKS zPH8au(rd3rV{3CTgx59@TYe z2Y0F$^%Rn>Rx;DfM?BO7T4Z%_q|&)zXka6c6O)z>fimHnjd(6HWO+UL07T?KmwA}+ z6=yy>M}kS&;LaVokq+5)C#}XiJ2Mp>O&uC!oQVFuf!MuscXY2^6TJidaqoA&6W88& zjo(;1e&U1;K#aql{-L<`y6e>uwZzEKuvpG1103zaTb=4?G2kSBE*V>Hb3CcoTq?^v$erNvCLoba`s7gwUV8G*KjmBLR+d+mY5tr}Frl#?Z1e$l zMf!`JkV%7{ls_TIJ++lQP9W2@!sQDQxCB4Z2NJ!j1D4};&kP`Eb308qCOB!3g7c(v z^2+7$##H&F+oZSV<{&WqU65aLC;0FhzOfSvnV?KIY_MCH_H)BES42nq+BkOfL^L(m z#A`3V7+?C2{~=n`CfCY%nH-&n*7io1SCstmCe@aW8>6GERdU1I7X`zuD>Bhqdg7iZSUP-byh~Xs2qwsZG&dMOUi=Dre<$`thgZo?A??ew#!Cw+nplTJ-d zO@5n?J`RI=YHGqp=CNak)lW=E_o^;7yvBXsyEne`&?B*~yE9sf^|4n5`>J*8VR(jx?w#rIL%Ey8z9Ofe5#=_86+8H*G zjZb?*YF_$gTG!5fS44eFs|RjV(^IxnL@brL7^D4t8jx*N8`f~6r2h-2qGE1byegTs)*n$iT8Yee_?h;Q8O_wy|pBL;u8Q&XBm&K7l%Yc1JzG?ZSOI0E9Fs9qrp+F zjMu8#v;hG5t7IZsZ3{+ZIwBRSXI$2x1i^G*0tUmFu6uH@FV4OFdc5%ROHtF@6gPkL zuIS!>i8o%d_Z8ZfW~OZ-GQr2&78TQraqO*QaqP8MI1QBe7u@yg*^EaG2#^txfX$}C%L;)+#L?RiL+P!^lW~( z3?_232^?f}g-dym_mtKKKd=V5V=_Y?ZX|R9LpH!+-n`@R_y!Gl!aqifu=xl4!_;xYoIAuw65O!|! zOaPLVv-^XR)UlJsI=aKyiZ28Q71@nwv__1RgCY)_2L1SHnx~8sg%Hv0} z@ywmP@>q{3zo?nwoHms~h!^M~x6}JS!L}p)O5Ai89z5o0mY4uCZb`3@GQqLW59FuZ zyzJx@PPy;w|1vqUE;Wpl7oR_~#$N_6<;&p_ub)H&r(F@1obYrNbExAC-<9R3x+Ol0!OPr)Gy2<&LKbSU|e}yZ90W>*M zLtEi8#BnS8c5o4oZO8aueTVY$eMP@fD1A^@kbKI|=S@zYJ`+Xt1(#obbqq=$*2w0s zkxcU@2H$sm;Ki5X)t6q3og3CggKWS;V=+!2KNqjR^hyk!J0DX6eNh@4jd>Q~r__&* z_D99!i1g%8Obzwuxi^-k$D(qY&0Z4`6C)8*>UXBbVqtt#x>h#i=uk|LPYBMiH{VPS z568I1DHH0S#)bxC;M|#*ni%lnf3@^-X>#0eZw?O)#PIM~j1G^-51xKL{_lVOYw^ve zp9s#Wb7AiF#`S)#R@Wy*GOdKW^bgbVoR@vh4XmyVIPFT3-c>STSAxpMo~%|7&{5J>G> zF0XIdrDwx1eU|I#%^=*3W47?ccwOz%)m!@Tl=r3P5w!J0!XJMR|Ip|ku9W3lK*T8%jHU;x4 z;ElZt$^0e~6Zdq~ilJ7F$Iz=@sHcBdc}(#8&3%>U1%1)Myn@E?Ua$~t($c4iKA#P(N?_VGYQ!6D$g55 zl!S$+IoYYxe2<)s9>V+7sgttFj>hJV8>6T(+PLU3TVg&?eUj=_(D8v;ZJPLg6C+n)rm zUZDNZ%AgaI_zHI$AzqjMXm_5c#1WULS3H0ALudVw%xW;}&r&PJ>8|v+@}0Eg)1zBD z|7r^s=#(P;D#3zbrF0^J@}W^uJncHyhiUUu(u286c*>t` zks&W-8Jyc=a(Q5WPvZuITbW+ZP|xB&siJl96F*zanR@6qb#^8J=ZhUwuh7~{FH=;3*baZJ#x_`!> z(fIZ^{x+U|<$1O5rFh?s*T=8?@-N5r*IX87PoIs$M|$E9e*gF5xd*=z#f=A}R^!)8 zZ@eisAG|zH4Nb-f!KBUE3eodANtq_V*kNCye}N52F}FcI5>WuB%H)~vWN@J(#=;Tvq>WevKv#GW=_hC*0h#liQ49dD5#E8F&Q)Zt7}%6^oZY-=Y?dkrB>}wMyYrs zHmd)tlTEfH!p(_)yhy}};5AjOnaxL2p(?K2za`pb2gy5FkO<&=|a|UC>z50%MK!F~WqCj4&Y#G0AIKCMLKlRthZt<{8F+`4Y^WQBG&U*DHLJ{K`N9rIFh`QiiL`bx^&6yhc)fkr7autb>j zNBK@eo$x|Hr$y+fS4KnP#6XMQqNoHd^nlQ%#l~4NO#&bsI9~b6tL{)jXocaShN-~{ z{OLP?Hoo=OUynVzxBGkZ7@tgDkB?8qJzx8)sFh+ncj;Wba zw6?d!)mL8?W8>pekkh8qoCawHj>VMR`0YgD%r|GjuDVuoB^jPs;0IP=y}Nj$^m*&8S5=*k=izyO>70}0+?KWEF|<#Z8LZ93}A4SkF%NByh(-$!0vhv=|Ilu8D0R~Kxi}XxoNh7!dWw@?S7L5b1tX{NNb2L7^W5NRHJjs*TiZurMz_vR5rqydHn_r9X<_`_I20gEEF(Pt}hJnYPY0 z>B6SCM!Mn8{_MZYkec?SRKJX+^rk19TjsozI86#MDV5qBZQpzp8Tm{HZ6cA_0ZOlD zrS;qR%9nJaTpIc6pKt<_UQZ2P0r_Q@?z!&wbG`%CGKix-l+M&Kd?PN4BE{7EAST$pa#EWMGodAkLX5@nr~}vjLxo(ICtty{N#^)Hfq`$qq(Iie)1x)NfpmP4a#t{c!5Xd83E=3cU23=q|m-3c+d-?Po;0 z>C!~hsto!KCN|Rq6fr#>x7~PMH1Zwx-o7Xe z^=QDM`bvjX$X4(MM;ZSt{;N}hIR5t8;;j0A7GY*(Qz)!j?T+t|a?}+A0=IeSDXh6< z07XTO_`+hhu24VcZ;^UJ3K>>E0B1!2KBGgJeCCabf{mem6XMdBd68jmF~*06;(@Pz zC1!BslPtU;LW4RcX1SQOZE9}zLVrCIHm-|!A-#hP z@kUoc^2~(0{soW00E_<|p{BGF9@5$ZgwLW48@G@Lo|7G?eqA!<{*QJqTH_}SU7GI5 z9(Txv{EL1`n0(I{w{zX#_K?9^w#kxgl1e~(u@^RdF!yjdrEST+i_ z9|IpYI{2gv*=V#sPJv-V1AQg(PJa4R>@7An(ns)?0Is2djSm&^`s=TI!x*P=G&I!v z`>Z7mw0TSTwp(wHwd>YtpnOs^Sc*nyE}i!Fu_JNr-1)dhyj!<;W$WQ8v zbfHh7y{Z5{xFq?>S$E?UkUqggTFT`jBFm+8+z|;Yq4bnp@;g!1M)J%2gV)3 zXfCDI#byTG>EmsCawkz zvU2&?9c8Y1djm1~74IxOGMEIiKzy%gAm62#5XI>@T zSp1iY^cxitSM_@}E~BJ&GA|v+TlA|tR_W@B8Oi*x^#7#vQ%Q*jRrDE+Ngk0sH`ag7 zr)o45YhrA4D$X1`5hva}9H);QiovsIV{&L9YSbr{M#uc!$zipl;ob|T2g`_jTY6z) zTsn19eZZ6unu%HU$xCxnx{t=p7@G$AV{D)&M$ey%-qXjU=hTsO^ncHp7*QWH(9;`( z7y7&z>+HF{`0jTfieLZD--@q1_GAn(O)h>YUyOTqvr;0zR-nBs6aop zyF`q1*-vDGK{^+v=hS9bI_x3wn)Ydcv>TNH2yiC3FHK|pJFwy6i4P-;5Z($2A zx>WFy6Wbsf4>F(Ty6F!ww!RhJ3Bhh*T<22;bTQ}Q69KH&L=+K5R;yV`-_K<`!8QRW z0w_#2gl#CX!bkQ~2+yXV{wP})dHo%2{Hhy^_RN7`pC~BhwN1_Z&!Wc}_8WeRgNL?f zfWw2zuw4ajC=WcPGip>P>}pu;g>u<1Ubqn4I?;{|h0_|dG)W&l{=}oE;lYD@qoutg z-gr|s~Cz?%5_if*?)yEI>*(u&!=S7{w6KpRklz1jm$;loOG0ZR3N#=|6<$}fn5v7rV zc;@j(y}7z5TJW-uRfYQ?)sMcoQg)em%Ef27ILQE>`h+s*raT_AVw*FbQF=CUR94HL z1q(VNL*tE4<~P)D$eyc{%=2l{vu94l{(bv3&TEnlHyNeUocfIg>Fs&9E9PmDOJst# zC)q&T)!7~!WZ!jncg7~QgM<5b#8sE>i|&?&Xs^|{Y+*7+CkABy9gEjqeJ#$NIPH18 zg9rD==8cW+t@y44zg>X*v+^n|OR9~;YZX(Y2oba35U8}l82qwViMdxWtH|@Hh zbbP0Od|G%)-Wc)>539WK`8cq*+uvuNnv#u7F|`Qb&i!@vS+efPM_;r$~SC1 zD)n-ix;$r&-!Q?R)(w6subYbeX?_n}6ad4PG+v`3E`u<^q6`NjE7>z;Ig5U%SDv5w zZaY2WsDt8Dzd(Kh$xI%+c^FQ9>3t{-?SRysDlakmL=Nb6Ullrs#N?gkc4UY*Xo#?Az zVv?OfDey1xEVtv!o2@e#&XclV>Yx*)e_~t;%-!u0tv(zn9CCOLZh1JQbeFZ;Iz49JgGrG=7i=e4HkCN^5%w zmxqIEUJh_q;x50zr?_+>C+LH;mXi`7FX>MCUO$lz0A=Qr`2;BQL&ah>OHQ$8$0qfU zt0enp;(b{@L%u z^muR7)mKGVSCe3Eh^w#KAMgL*`&8}uxbKns_4zSKK_|c#m>FEqifyT zs8C-zI$4St>11>^i=Xs$jP)5qxOS?C7gFi_HO^X4Up?bbc%!-1x6N?!?UGMAqdhPW z$Xq&oH~m3pXQwy3ZP>UeT3VXb*VOuRyU+p~W>VuMUhW$l>xr@9o_OlvQDMX$aHZPJ9v1l8h=1c#vNSQ~SWBd(!v7$)D*j zNQ?aA&)T1^w0^9h%-J*~j(9XxbTOIP*|{OEy75khO~j5pastk zb&P(gCq@U#U4G#SUD$v?YR5xU<%KC?yOh<84f&&o55>+MyQ6c}dcl}BpD=#LhK40P z^YQR~--)ro0Szj*dO`5q`STzVCuP7NIet9$UvgR8dfOe**s&_wy4Q*|wJ|)p@A zVxtD1Qa}*IIU4(w%kGYfr=uEjm0A0knbD_h*a8{#X zBH9BU6rK`$B0KMlp^=mX$}9sR9wh;Ao6h`6RxrlJ2cb^S~#MN)bJ>eN&eDI zNu7*1r@Hj@o>603j8*H_xQtVx@xahfeBvW_$FtAB5U2ZwqP3werYELVaKts&U6)jZ zKylNIeP(a8yjS-v#R{<$Zc{$d&w!lV#xw!JmtKx6|`YSOuG8rvRb@8{~ z{BC^uM?U2vhM`lV23j6a%hCBx3ug-E*c#-U1tJ_u#WYp{C=G=;KRL~p6l8~p=gu28 z?BQQ|NbC$a(OfVX6#0#=cR1plF>~oz&PwZIC5$V_|F0ZvFGDaNu>ReSIHkvzCv(}9 zqf7tGx#c4f)}$kHdL{bhDGkTvr*=pZ!g4%vKYvV2*H*D}p2-DuvQo@AHkUB?Mdl_a z#=TgFqGq(t&h+u|v6v}M#DRT#;v*mahzy3&7#SXkRc)ZC84tG#NufgVED_!QWLM92ku6f9JmV z;&1+|_{!J*GESX6WjuLnvxOa(GHzJ3a;qaW!azsEohh_OP{sGi1t_TIbQf@@=g8x?)UCPsHidC!+5{Pwd^dSA9rb4D|NL z9|&Gaa=%JO|2FkSUEQnV$p;?~zUBF`kA5`Puixm!YYb2Kd)4aWr9&BTFyV>3uo=L| z)vIdY&caCkjizO~lRp_#oWj9d!`?xu=lrs=mh&J-^30znA=L&_-y^xf1`zFF5j*8g zEA*pzhJ)AoCB4$wHc}{bg{By7!1qQp;atlEsp?Aq1Fqz;O$B^gWa^JF_T=R)eh+lj z-mzi47+qSFei1#CV0>gOCS-eXT0>=RA^L|#VHR4Jo-VS2{?CEos*&Z%ItkIJvYcvszoz54*< z4B`-mpHCrF)hd>^hr}ltVX{?Z7nwdR3PUo{l>Qvr@|cV<(TE8{nWVbNH!m1f=tvDx z&|Qyz_gh}DtzX?8H-6y#QK>=Y(t`A#`r0KHm<6t>p~;K?lVj3@qoeWSGf%|mK!40E z&BaTH4#n{FTy$(&A0Pbq-JX2KerjV;zgUc>rsimAY*DpK>U+mL=$jE8IPruXxtJ9g z%ah}iiktL=yk!Qyt6nox37+2^wEX0Bk?}43ur1i^XIK(GWZK^r6>Uo@E8kE_lg-<5 zx+OmmS#Xja1TXXBki8^J3}6WNO?NT@RKKD%5}M0*f|K&4iGSY2!?we|mmSTe{w&4w zob0k>Kk2UUw7E9LG0DvdAf#tkar(Be7$bS*EiF#M7$4O@TDbK0^~O0CBSlL-SwY`8 zE}aAIue|EYxZyq5#fg)rWM5B3TYIOEZ9Q@7luv8ew|7r$+PqnPUcUyR1@URhdT3Vs zeem9Uqp7+WRg$L~bR07hZeUo}j038D@=k4IQxsZTqNdoOHp)b|$V5B9R>!_Hu^}RN zyor~%^4-zN@3!hk0A-Fi+DHEAAHfmbQ9+=Ua%FcjE$jbsm)TjV81&qV^w~WH@uMrB z9LnN-qQHvhbYHFu@I0uLOd_{V3Sy2$xg zGe`prbV(MUJC}ui6+Cb;%?TG^=W`nPn93)_;!U`4#=lG-Ii+#D5mS5$QvC#14~jMX zavR|(_ve7ij`(F)reF5s64s+VLFUa;t9Jk@59 zaWC{~>@qMo=!t~w+qSq+bQI%_D$%W#K1;dJjZCX;_4@n!#X@87t#LjpGapapB~N z7*hY=f9`CQly`zRKc{A6Vq`St)L)E^$TpoU#T&05j(hI;oA`^r{7O9Z`WuXdgl*l`Y<@!)`BXjgaXjHG+ax^o3K$V~ z&^pOw+2s%9!(Wa|@?pl|^6F>apwJjzjw}17Ouf%gt zJsxYwiZ!KrxP zt+z#c*DBd^GctE;m3BfjtBeO9d>~GoI2xTD-O=9CCR?ZEH`v#2+z_j~Is6y=|sEVqH~C4)({3Pd%wIK}FQTCygJ{MoVZ6y;Zl| z{uXa5R9EIHdFwEpq@ZZ=%9;&QDQ~Xj7!UFZL*@@FYuK2POsQ?a7ZFFcUz!Y`l?*YYq(1X=r8p(qa@)pDv1v^~`Nb27 zk>yS_;Z1*RMr<}d<>olw84qHMAWM>4(LKp3d6IUd|FQhR4?<{c@K+h~#S#=#5?iIs zc>zG@&+g=Xhs&ZY=YC^d0lrdjmKS)$Q5QYZMs%|_$N(fgC?`BTzp8MfMM~ymSEMQ9 zqi4o|hL3Zd$isX$=?EttU4lR12M#-+D#Gftj5?v3LGwXLJCyc{pQ`f8jy zKOE~cCYfDArsut3y1TnBZoK}gc+Y!oiPfuCN2_#tv8mO42QPHZOfALf)2HLq2|jPu zE1Rodl`Q$xjJ^9d$HzW&dwlNGcgN5C>`%m}KXr>Yw;ws#7iW5U2r za8bG}EOR-od0OAO=qFXyvK-;pZ3WVK(u-s%ch0*UPJLEB=PsAaGAuhhN`H|E!@Jb3 zaquc_V$n0@lbATe;CW@cqRlzN{)-*)`K4dkNd^P_a$4XeKjkxi26{R{Na^UO=jJrd zu8cbDamh}H#{UNn?vX7}5%>S?H{3sNUEdx*cGop==QWaj0T}9^iK)?X$=;O43?p&m zjhEw(e(zt!$=9Be{jd}}cWjB4=Emr5?~0plzBcyk-yMT}eevisPkYXrxxQ|XN8Bt$=?X?ur-NMr^p-A4*ab9>;|Bl^4Uj#|rpQ;b6t0~ClDtLZ_jf};{ zI?q*L!_XF>G4)NhiS)X{_!JQJYie$a#)f)t%;qiZrOLTz5kBa)Rz4NU$s4jY2KtBN z*%zOQb*nc<+uDuNBN2OduZ@bisyK7}oa!LEuMs(kSk>MrT#D*H8{_!tv$1sUg8C{& z)T{rJ{bGJ$QnC16ITMApX0>tkhN=(zf~IB0 z76mJblhfHI054thvPZy&aNfjc9h9}nd1O}UyeE0oYzwnEQOCn^UC26+a_eiFNJ8d7l8#@dedo9XmSwRy^ejc?>)Nli$aM zD#AOA6$%dcg6&IjX-9_Mm!QS z>Vp7PW`;V1fPT);C1JzmLc_!-rw|-Jc_^;C{tl&4BVQO#@Kv9Q(cu`80(s(r?|O2) zMhXsD8{?beQdmzt`&<<3o8sV=SI5=YULOr@tD?5P)yMu%m*%6sUIt6C7*kUvFEp^r z8CoUUF)&Vb@Z!H@!91kS3iD1M;eY~yDoT~&vnRhjY1S!?>IprN14IhgdBgH3MYOS^ zpkji7f=T&B2M-hzOv|7uNLmKY2)DlxtUF~hp!b{ekWy*L#sUpI=kx|j!~kQUJ+8rE zUte!DHmM`h!2H;uLouL1%8or-qrIsmp7_BFY9EV!D;gtl^M(yRU7L7d09FI?Y`ZFHHVW`c{ zPsi+(I?*ZJWx#P%Xnld5&FnaWL@c(LN|s4vh%)^xL3n1S;Z1ZyB=d_2ll&oHz{TZh zaenKB`--r#V+XMvd=myr0u(335d2@{C5(Z%@pHz6OZja&@teq$?&AE(D8l9VW(pi3 zTk#AGcQ*36e!|h6mGg1WVJA4aR1|GbGNY_q8In#*Jn-2e$T*XMv3C_(e5FTaXmCxA zPe@)S#B0g3j12fUBYo1)&|o8?SYL>ZYd6LF-gk3s(O}~KM;;ab2gL8WIHEzqckcUs zeD_=5jG2-Q0pU<5m~AaBZWc_wpyH||le2705)N$KVvx&`>{F7Zm!Eq%{_M~1iQoI( z{}_)v`H1wu7!`9psCR2iYjmyJpt@8?Q)6TNvw!waD+LLzHtF z&McSb1z<{_1IP_v>ynfyyKwJv_k=z0c+ARjAzZXsc&wowDgYi4R{adlFyCV*t9E`z%zL=gEmyGcCk#whcQ>?9! z{4}Xgob{X8&p-DA_h*+~b*U$r4j*|dzV_FD?Iv6;8NK6<_eIZ$3}YGfS6*{@{Jqb8 zK8B?0rb|nG!cQ{ z`bxYajJ9fWs(*0Z)qlVij8|_Qtl|wMHi=1I)qZT)2@o_d+Z*N~#UazMl5(*v7*t?1 z^M)3NoGu!mXwF+lEdJ+jOL+qtaN!m30LP{W^$F>Oomo#N%rbeUzKMZ)eO--D`v87P z{Z4=XP+SlVIZa`@G#T4=Zi~yWxjOoW`c>|H)QZl0SM0hg4@7H|`k|q*m>nGwF49#Z z7k!?X?qiv`h#~$`4vT*@fejY0Ifh$zf&V58dx9`+^LZn!HF0?dp2Hu@#UD*K=XWmZ#?nF zD^cCp7T4Z#L+rimV4RRXUDeSZZEfw*-qsl%&27m(Qaj)@jHJ&+9yU0se_+9XYI4eb zJKqjPUa&EdBaRszpBQz$_(Tx4BZj}s4$BL><>?cUefJmg*w$kb5*e29jU59e$dPmy zcU{=cdAP4ERi=A>_)7K-dd!wmmW4Xtko9E4CBJf{^eVCvTh-~9m`0ZqOCkeIxU0^| zHbeH4jh^$9O}B+Y_Q|8gS<80~Wl1L3@WBMrxcWdQ+!;W>_`(Zn*OT6SfxOQv&-mDc z-wwO(+H3u`%(!$3p8zQoYvc7d-i$LR&qhO2LtJsi72*@JIV(9ww=YDUMCh@5zvmN` zswJ~UwKv|bqpem~i{5y|*L*^}mJ@ZFTO7|DUEYAAdZl!#cbVqgsK2w@4@t}e9(mSf z`(Fu6L-|vyCZ1olk(?QF4g!Y$pt2d@P}%~K{*fab81P%33@)(zs6gTj4EUCv9G4VK zG;jA&PU?ebxAeN1U%##|05d$0+Z5qIa_T1mw9ZnwVyWxRpKhd3o=gwx0*BmIH1gs( z2~$WGE|=$Z{lo)r03a!Uw(In)aT)?kTAbg=)}8Xx!cu|({qW`XuVFeqOQRb&CElg5 z)EI~qPW1oKkTjw}A}w%;^FzX68T}lJ)1~s|Qc{uAwU8gU3PuU&xm6O7;y zlPZ2IRLM9#niERs3()HY^#Qc4U0b%r`ZcR#cu0M++R2>oE0JE8RA8KuefjLE^KskFojD)9=lf!Kcqm5IZw&YM zdqct4@Nf(d498IKV2t$ey~@5Al)p4J>5Z&!3+^LNJQZL6=J(>E7ha3k28Lon)tgm+ zLH+1AT5Ae!qfBU5X&is$&b{%$8*YvI>57;b7>&hAPU2vJQvIZfmM)8~XoYS{oYcTlcKa#2I(|21HzL2cZ)oSi&c_v7LmV ztU9@awy8AARwT(?PvEX}*>ghtB1Z^EW+UC{Nta;Qm$kNa z&Z+Mom>Q1I82@K~;pbvTK>B<8<3IhU{~Uei&qU|Cjj>s6^b?=>h{kb?aqR8m(Y>nE zZ;=%=UjP2R--&0Ren=KkF^b3x-NhWhG{=?;pV~SWca=x@@zxu5rgbM(rjCTsepGq)@eo44i8f4nQ%CG;+8CPx zQr}G|^I%^57Y?G5<2=`5n2d94bjeSD552)bb|20QX=NO^Gp8fmm~gG8*QGrRDjRy~ zqv5YEY+dYp!my`2$w`AY&fLT^KPR}Hj7PbogKwM;ML3(Nn1h*=PQ>P7vts%NcCF*t zXjLjrM1%4_`q;y=J*Hyw=C!)E`z`ko+1h^lUA8i(L3OWM?M(q}CRn|0eOz+zGT9>< zb4yn)E6&3`D7HtpIVW?8#lA|Cyg$e>yoT4>W;zBc*dJEBokG< zsGxFqhWh%^CH|vvC12Ip!Ws`Sj`5qplrMhQ*2xyrI3z8S)WnH1$793#_1?tUuRbE3 zn86EJDqH2S_=xSX(by#{n-W6_TVL3JvOqgf->@#G%le| z-cT+Xf)?}Yhv%ik)4Vs6_1I*BpfMk9!<<5oSTx~h(yM+Z+-Z{jG4T}v-;$nAWs?@! zK)!Q)kQc-vZy)Y93@AaelMc)6G7GnzV#&CW+J|+bZIR^iHlzrbl+p_(eNY(&j_0Jy zJsuWJqWz)}_0pKr<)TXgX*__I^jLaQ9OVPYiSVaz?0DO*1SwZC{{%NnYaI=4WtZnO zy7DdCpH4-ceNSO|UCQ;vr68BZX{AXoF<#}w2gcxO19y8%Yph+nK6dQd5ih)bIOa|s z)xcpOMg~vEGm^pYJ@IU8-?}5VZQl|djcNme*-%>%9ZeCNwzkL3S8a~#uG$)0a$Ciz;fA{9N^Y-_|-}}r*;$t7WK5o1D%DDVe!P?RtvlFw@HIp$oG#szL z`bzxrKmMh7;r_43+~k;Kxgi!cc5Pj|F>d_GN20c4U7Q*ii-GaEsBLWasiD&vUoI@B z`3LwbVYS|7{K*Rv^mDwZ#v&1mxGNnpZSpXgiBI_~TV;OXARNEQZqjAQi388xBe4^ERio4RCb1;3wd&|^{4Le9dy!9Rvx|;ndMf>fyGYc zdJ~=V5ts0!Y|raaFYZ}Ayxt+NlNedi(DTa%mcoFs0*}AjW#wjY(lbu^^Dz@hamtt79(uqTF z$N%`xzY~40JrLD()v>y(E?Qd};?hg@#4UH;7VFoo^C_VZJo>QgoGI!3gYl_Pe zzf?oTr5I3~7@M3{ebgt_6{AvPi>fL6YSiDf_W2N_Y-DiuGc_Dw$wD@7xT zfNZZy&oQCn;34uv|3tp#y4t9f|JwcQqd|2O$%Gwix@?+c)7+fj_GjZ?t@MP{MT|`h z`4bPwQGJm)Q|vjxUeZWUvPMhxx+-{Cc2YY{8+CQ3-*6nUVwUpeFNvl(3LLqyHH9nu zG*o%OJJVC}W(a3~iXrGH?)aPc@mfBBj%^RTQ> z=P$otlQIA27s;`+oge*|U=!zgX8hm|9;%ydI}w>WAa}?ob`~mSUJYcf0vkek1fRJ> zHer}j=wj;S{J>Jx^vB{qWq8g}cpGam+VBM%8j*&%18`)1i%l~#k|*ZfeBvNB_TnNN z0NTGG0TWi-6MP=?JGIPjBCg zkVFDpoam&yo^AO_nMxXi9P!+i(UJ#SA!gj1t|$na0&*r z;thKSk-!(Cy3(Qpb+F)3pr12((K^l8Oo04HazT)i^$r~2rb5g(Hxi($F|(LGIoe}o zyl&HeHPmszP-7^~MTHve%*0f@{rvMWGBgkkjZGF{zHe1oTNQ5{I_}By&hFK5`yC&S zH5#l=tAm&di!pC)%hDRZtr9 z8Q!2?vU*b|KZ`!z`7PzepwT;)rHRuHc8oHpf-I0fk717Y#6gzkA(!Cf{6XPlm^H03cfEj#^wdg^I>GD(+(@5d$u;$3l>WUh5 z0`;+D=dS4IZR_!24H(B_Y;-)vM~367tFQFHl8L|?b*xNUGpObbMx=tH&Jw-EBbP3x zGNA%&Nk%{X0i)!SA#TaELLq(LR>|kKW*#MYY$hOGo+fYG$U_Q%M|m7@i9piX+f)*b<-k*r#L|G{?yJgd~5-M|h8qm*TN!e-MBBt#8M-zkOeP^IP}+ zR)u5$0RQw!L_t)>vrj)4FKb|M=x;Ux@GB`(XU#UwtM1_>ccb zeB;}H9mh@_SDQ{d2^q9ENiVduw8y568)MD-jnPzSjl1vqQ2fi^{&%rz&6?=z8`Xfi z#|yIz`Z*qwHkCSj(MGz_-&I#Vk;UAAL}Iw8KSs3;dWATNT$#;o*ZBp#GlG)avY7Fs zEJOJ{cX|5!p37nWobj#voE&gh@)5X}W#gZ89A&Cj^20+t8i&mPPCW8Qm!pSrbb`C= zc=j!WC$DedB99CL2jmyk$#6_~WDnq;GL{{u zPzb(BS0NZxOEXcqFe}@O$)d^_pB#w??*Cr2cXY;8*IyIEqr>s&qmRbJk3AVpGVU?- z@A~N7@!T^{YY;FPKl$T78T&5TAIFcMb-O`1G7yA!^h2DIf!tviVA${`HMq0w=)&CbA&j2$y+|%|vHt@PY|fqqfFo0Zw+Q z74EiMWDBrBpKtCN9vzL*;o+#Quk+Dk4b2U4&5hT@1iPzerlKHusuHiR+`BWXCDT(q zXJdM7NbMfFh`Q=`DkK++oFbrfm68croG~vQw5UFet;$Q%Mf4{0gNzy7ny9mDD*C7% zrYE$;Ko-rHm@uUOgXU}oAd7XZ0KhKGbmE&ciw->6c3GHCv{&%l@JP&yM`uqTju)SN zDjHXc&zH|iftRWM+*zkDhIjd2v#;d(sq))l{Q<_=Q40&^1ZH56ZO*L?0V)c zcQ)SHrd1tls%mY6fS*52pg3yEj2t8N{2%(({5x+NP`nSSzc4Q z(Az2*ypqTAOy7py@##klC^C6R?OX1tm^7|)7MHSzTGKd=n9X%Nl8d2UAer>3J= zUmu_S>}O(HZF5HbPESvNv}mw=p}#+R2m7L>wK=Z5>T>H5PETMH9Se#@(e%VyN22%i z8QGDuQD0l|fGTl7xQgze%|>1hVXDzUys@d(ItLpQS{PyKZ&Q(hk&$O1;U zw1Gk%CCuZ?I*J$vrgBp_eqGSpN%TV*q|L%pK%Q3R=INX!Q9Y+?N`p)383WlV6Ka6G z{(dfe7cujGiiukHhusTbq1r=E{TUppL!1Y=C&WqLo}Ija*6t(C=S zZ)l3{#+GQQDMaI9b*yjih&!&mA+~p|j|vv%#wMdurYZUy0zf)+F8aj-7}3q+K;#mg zLca{HAe*VPOx0xvee?UuSaK{sTaLKw!DSH5KRFAqTsb;<(4)zvXWNf*dAQq#>f*MQ zgi`2}Hr*{YxRhN8=dNC+6vF-+a_%QpceiObM4nRDQVl_iv0*84bQ z5SQ0;InTu|Jp;%1XBW=D=(H6Mye~#kzsd_KF2*r;@SRBf%fx{W{v!?@LEcOjoR-Tj z&!dtRmf8p6j^DmHD|7FS<)mFGZaWZys|>{m|7axKJoHdEHB-Lr_voENq-b5^9|1rq3K z8|9E{niol*$rG6<3O;*7|t($;ac|nSt22 zX-yp5voDUHI2FVFoTkYMM$=K>&=9+K?~2LNw7*^4wPtnPbkqA(mj&53g_w|DW^8{_ z1FG)pJXC)-frERORn;pUd+XyTKTC5=O5KY5d+s%Lr$H`58qS>82{0Z)}t zy*M95l{30-4*<#2)< zCl1iFKz7gLNtO%+<640}-EuL>Brh$n>HqB`M`W9^FuEqjrNb+#Yi;+lU3f{b*#xjC zyR;-8FR2YpXq?0Q3-hu|7;aQ*yu*S`N5`5d7GwubPe zv2EM>cNS6Q6@RW!#Dfi4g zHY~%GT-ZkQv%=MmjT*Q)fbzkMjK_1QzjC%CRHor%@gPNju7xGF4Z(0rGmg+h?L)eran^;t{uq{w-u|JxV{m#- zGA6sMbAx2Q5Tob%;_H9;<+%UJ$Ku4fA=RxmN>LsCgLA4&K{72W2))%Em2vHrTjJK6 zcf_Upcg2B&TVwy8P0`sVU7)edK>t__stueu+Z$)j_r$5Q=i~Y3pN}tn>5t-%fB!$q z&ePbuZBve2m7vFOK`kJdDy=xF^= zFX2FpJo4%e>s&9z(S7RK`oq9TuP|ulXL(hmKl`Kp0h#An(qVnVo9>jshPevq^cwYB z&5cF1`-nBtt=C?$C(a!?5#Ri)uf@8Kwzzc5n)uyc`H;8e^z}%`GBy|)jq}Iej(h&> z|BDB{c~8vsp0vK)vtxa9wKvDcb!*~|yOc+|>b2M2jyDb+jiy3V+D!m)k#Xw49Gqlb`17ek=Ks=XrJ3L3#Y?48-Ic_5$e|#` zsUoxmRR~*yO*4EJ)cR6?-Tvk z?L&S1*yw0H^TLa*GGuiy&5Laov*iVhV~tZ9l@d(TM% z)GFsDqq@K*ow=xN)cC2jSv2D$n_17x*A^Q+C(2qaYRkIaycgDL-}Lt)M2c4)HjOeT z#2ACO{%6NW)76_mLuOI^fL-R#CPt;v!N@kFjPosj@Y`tK(&9=|S5J z>0;Aqn)8RK$f3&Tj*X(bZBgqA9I{2f$(=L`k$eG)JN*Jg2OqCH>v>*OVNR995eVlm ziLjnjC-_7AwC$_hiET3ykq@DBX;FyP18Wk-3C?BDcBv=U2 zM5pB2#&2~^^8360iXs`3UQ<)=iOH!+79k`Ys`EA1-<%fQr~8y>PUR`Dgh%WUw!o*8 z#Yg>>1W>zr%N#@CHd(6-;Ub4iYOE+VwU4=rzc`e1D8jic{Yj~ zm|S(ub*j&eqG_`Xl!cgCR2dRzgc^FWNCYeR=9ecM#ZNi{cw;9f@aMY0Q#uSq!!qOq zfhO9WiT*smqY=3$Lh(Wmo@-=8Gubm2)lw?nfi8M`@B$OXXCoqK!?3*52R=e+j-2JF zvUZ-@u)e>jFzd^2xBmlkBm9gJ=bgvLh4{C)SCX?Et%c+s~; z^2s8c2lG=g3AfID(oF&Fi#ZM%zxwkp$HbT#sNmfD(1UTy4Y$U& zom(|HFRGq3o*d`6W|aHv?2PII;b931g<5g3S%+Z=16|}vZK?{&(Y8pJ1{@w3So0}V zca#KtG{34| z4IepCKy7kqk*Q4D3%sKprm|VEFsyvAic;`%sv&t$%gByexlqxw_-?pvca)Xt?n{44 ztUuBKOL?f5ad9>5(r#o@LR;{rOi7`7B*fJQ6uIaa1P5iyVA}(3=%)*vT7mombV;2q zax8k+^5(NT`$i42Z@=UAxa_h^6}}K}y#1Ds{ufwd$??g3gM)GC*wJ`FgW|`Zd^#R{ zTCo9v0k&^vADX>6*CmgZKq^|~mPD&p4n-Wq@K zrT^@~_Nd@aPL*Pyug{%KtvY#j%(Ej69nM4sX!{5;^qplcqc`-D?o0zi@eO|RW)gf; z>F_@dEOcQsC{#U+{NM&Pfk!yR=1M$D7yQV0XE}h%gu{JHKz7fGlF1+WyM;aGrxh5O^Km+N+ew-Jx3WV!0|Q;-{VBrvay6P^p1~al-$t-rb~P; zYubJ0`Za5!*i;lbs^ZJ{{6#!=-?wA+rX6v`ccRk*h-TEJH>*n~9=D>1`%n=B2BdAS%>0+7_`cLO0poQ2UZDW8syv z-!YCCGyterq;I9IC;6Zsw>=}G`Q!llH5M0VIc=b%HVMzgEy*?){cTmP`UUwHRmXIc zzvM(RV*7(jwYCBPrtPM$XAl+NIs{h#m4binwW^Wv58oe z?9@PKwIlV$?n{&Y6^_Co?5?t}K_FsxI=WPBz+Q zM_GqMT`JFEtc{`m z-gx?n2V9qRnN%OR+ zxxtg|*a?0^VwMH%8R;RC;sGaT~7;DNW!IBpL3 z=uqSrS@w6?R2I97Z6Bfpa{Z}iQQzUnnq>=GiswGjM?U12jSGab!x-&RQzttXy(C&4y&|{SvATuaR6CG-Uep{6=!BU-P zO7o@-r!jzMrTTFOLD;{@aB+$7^!W=>udvCg4uadr}-pHHoj)8!rg$~dx zw{)?}0Sg@RvaRqHqJhQwY zQ-&yB>KQz&`kY@kbdT>zo@qDfb51IYwzz;?gP(8(9ykMsvU#T7Zm$$&NTM-zCeOf8 z*>2N<=V$swrRUomgh__{Oa>fvBM-2t16b;gbA5mXboFak>N)LQHZ(L&`5ioki}Qe| z?yhh8GiZgG^p3(Wn4~RVv`gAtY7acq23g8Ze#+9tUF9X)N8XJ8J}m~|S$XM4m*_|v z%k7o~Q##cBRN|FSds8U<_1IcZj9F-h=C|=myrD0tLPx00v2kzrrcJTBy(MZE)EjFo z!lpS6#GDbdIRW6Uf^l8Li?h*F8jo|s12H-}7*n$oalE%L4xQs?Qlg|0@q;cX^Ovbf`^rC?Xwd zzGbpy@~D8^_RRT*ZJ63iqVF>Q@;kDo3*AUI%P5}2KS0?gMe$LdVZl?{68^-moPXFD z$)CjopS125^-SaAp6NpukY6fy8GqqJy(}N9SSHU(m|%hf*+d`iNuv2<+`wOIS59sC zv=v}eFZ|L|nLp_&a1orW{a7}sg8?j;Tc%G2&%w}|Ao~+dX)n}qiTM}el{S@QEv1{D z^Lp~^OU`hWMnxKL(UV%Fu^#fSJCRGwKcMpxp3a)(Nq8hY6YS)tT=-{rrk@hRFWOR> z#)6;G+U<~B0_-`Z%-*D2zMVcNKYeIPaAzckbH!@4rzwAW>8fk?$62+ZhrfA$JoWT5 z(b?4*?Oh%5kx$$m>$h);=U#j^>eL1{ZCVvm)1$J@rsGfl`?_#X#yX9 zfV(ByQ4k!%0?_rOvU*#XFXmYp5T*3b+KdHCQJ9JGbf|9wLY%7_A0+2h?`Ma zeDi*ww*-Z0k_)oK;!O|jmmAJGg1!rHof=Y1zwM zRg#H9Q=?!>4rKq%$+qBdmpREqMMY8RBTCca=X_0cty(X;xL&p$aBAY@i3`FK6N&l8&^fgsx?v7 z*rMTVA;xFt;`pia(b2U^ZLHmkF;mmy-o##~aq{SJ`Yc*?&Ae>Q-njazy|JOWCZ^SP z{EdH&i+nhdJn)<0l0`lxfG**C)3YMboZ6Ykui^zmLF5}+d;VEmUc`<87SxpP z^H?x3ME}ql!OpI-?$r%AN}C>tBU~4=!Jmx8YL1x+)^->yffcdM?ID7#(%$uo<)b^O;qQAvYIbeyyk1Xj3TkUjO|kE)D`HY@y8o?LBbb+)nN+cZ(Rb=_ zJoWJR;(z|hAH_Gm_o&96Q$A6zxuGW7HIAVzGu0xnaKh-$OsLIB&rD7aO6I0wa9|)_ zc=5IP=DqjFfBMoN#W(-QAII4vFPjfh)2gc_mKr*uYtO;B=7S%Ls^&G(r}5Dc4NUEm z&G=A;(+j8}IvrZ0JCT+2@}nd|`keBuGOUk9bMcdYBE9`j?i7w&ws)bih)5+Yiy6Ib z4`npX@|0+5oeBv4D!AE}SEA(3`MPb;1w&Sr|ln(inKQG%*3@|H0vO;MBc6@|GB%`3Z-*nSu z(N=7X?|<`KG4SjQamf|C;&;FJBeAxy7~>NQG1}K3iyD{oo;w!*^*{Vhy!QO#G1Y%I z+FFXy(o!Ftt(|fC{ylNa`#&UoT#9F(dnI0a^~Kn@c1?WjlXu1S*Ip~zC!NkbF~U4J z^hu}E)YUe~UXp~!j#yd{pV1$xZ)HWD+OBL7u1ai3=|K@G(HXuVfBHlI)K4zVNuywA z>7wtcsS=LzXJd4>MATN-3&&=E26*<= zKr9v&7`1BpUD4Fuu0ClgrpL#kS~PEIZ%+C@I3blWpvF_O%Y34`WWTzG_M|%Lnw_1B zY3U@Wz{xml*PELhi+!6{#ibqfF*vDyoX%NdRVyZ|HxS=h|KyZM`h{89dkyOQCPXjM zTe34dQ$iwb4w93dFnqErI0zL zW4_FvL}dY8ZQA2CUEWZ~`i|frujyi}%v`1LQ2FW92hl}*l`rWS6}wa=nUUXP6qSiu zwEYO4ESbLuE*k<5Gf+y17FF6IdpvB=yRcj2C1PA5E-A%vrbH2`>^a%iheK{Ch4 zcz{g@vnBdKXhqwQPho7L&0az}Uv=O>=NE+0I*!VRiK;m|+Qi2|B*+W6y11iM;Z5E# zTPcx9myLE255t*fjtIjbNXCW8rCKxT2C^v+4udNhAC!ra;6?i!K4D0M^5=v2v>+`! z=}Z)mx-IT$0ARybmGuR~<=``^lJZsAz!9D{JhD!ay0hDQLBs_o6eHiVy8Q!pM{~z! zb?U<+o*J6+d!WIeV1OeaF~XrCek)b`2~oT|8|ski&9C2@V$`L04+Qjw;q6^1@WZGh z80;ui{RHBz!*9nW2QC+U4Y1TW_?GL`*hnnO=sk1%SR6m}T6DCu8y0V?bM##A@NgVH zek!(X*&Mgu_I@d>HYu?}%pxc%B;Re0V{-(!o4=AK*M&G&keeV=6+e*+@g<)6;u0L_ zj2u~B1}(}GmbY2Y-!kiPxsYygGNDw~DTw75d!rvW8n2B1kEpf$H7)NVMw zD-0nKK;*;eJn9g=yGjk6xAD8$TjKobld|ciVr}Q@s4vvTGfzDh6KX&^ zH*bmk2M)wve)a3YnoNfVfHa_e`#H93V@$G>SRDzCoL&5N)l#yxQXaz5n_1wU28i&G z9d&6zRc%2$!{+4p+*T?0J$rY=L-#)v4b81FJT?>$J^X0A|9!WMpH1Qyoii1%iKKc9 zcX*)=Nfgm@YUHV{svW2k^T1lhZQ^C3i%*y!ztK{-`InsAMRw;(bWG{;JBpoT?5cy1 zC~mlmECbstgGZ#L3d;SfL#klRZ+v3hUJ{%n#T`@W_84dN#G7xH`UIP#> z*of8`E3^yVG+;r)yBEFKR1@{}4KlKtePn-oduOcQuqoPPaJ4jb#CvYIF8=NR`}^^s zkK85WYBbLFT<~`cX$Kfr6@oh>!-SJHTrV7LOP6KLoqnpb2M~3qE+1(-8+S@YhX2 z7>8-#KtQ^3`A^SAfnWRc+j82x|HGyGJM-f(5Her-rA)~V$?)gtmvKo2rDr{5{e~@m zWLU-kZ^pA&$tI)v#s>GdINBloHH&Y^0}~+Z94!?1UjMi!IaYP9ir)U-c=Po)RV&@a8|IbL)m1Y7YIGF~7@bqMsWdht;pc!xE^(noV0{ z_s*TlHy?EsbFq1~+IUN2XoL|Z_4`XoSKm;SAul~EI#j5P1&;D!gM;K^fkpo*4RR(X z{7C=?p-|USC|s!rbP@6jfI2Ev^>KaK6eMqD9Xw!%h|){maF%I2uB+&w{GN=2grXyQ zfPRlY()3l_^ze8r&CJA;k31BEeZA4Od2?L*p$|lJ_i7;d%_+V=hn%p3g5BT@h)*9o z5pTZniez*)#xy`ZapruCsg76OcztZ%yFXrjs z^Hz7^a38JyF?Z}gDJ0Nvpw_KbOrZ6!L8 z-g1h3>sfRaazMzO;uxqvGwEIQFRTDA3-g?!h5V!cDIYkz9S9tsMj*LhGeWZ6kyUJ0 zWK_J8N`em*$%Q{0ASXvv9Hl*x|yS`B=U z96c82&YtoB7`spit6ytUTy5;#zbDph+HBcBb^45Wcs5o^me1%uF*Y7sw{D5m-QALb zshFRhj2hv_w{aIG6Myr~`~2oJZ`9PO@0$?sDW5tZpe&YP*Rv6@*iavh&CQZs+4-U= zV-@QP)rnf+ka9o37S+*V@W@VhyY8utn>cnUzmG<>OZqDT$?pW~$4MxYuspwroZXEFz7=;I`LjGZow+a5C%;pl%!^YR(xp6pyvXsK$EWM! ze))&*37@>}Bzil5yv&fa@ECc1YQOW_NPtRV6QM{wLD_jz*YKfa;KY^{EAAt`{gKw2<7fS>Wiou1fk(_CgIy z^c5QC%*D9Ix?}2VhNc%{xU>)>N;|H>2A>(2)g?6(P12(cYU3>T)d;m|1E(^6ZA zmbzlBmd)8#+ZZiX_0iGP9M|l*ByPU+%2?Cb9!n+Zy$R{;Sw2^zenR=#xM(_MG)oIm z)Hj7eccPL!L5-Ad*`@RurQYS9(hHnnLiAiV{fzyW`STohDJc=}fOt_VCnCi8oi@p- zb2({_Z(e4`?+gc*uPP{uf`DaDiq69`IJy6#OFAu)?&5Nw+r=*7Md7|D6;y4pKbBtY>XMqNum&m|UM_S2(bRQFv3u-*xSW{aS=O>2ZqaXidY~8j#PMVL-M=%_#IL?6uO zh_1ZqTJ>$)-0xwZ(w}lsC-+Ul%eE7B=U;JU+f#imrx-F9m3UL(1s`{46WER-ETems8g^z*T0lkkTLQ(r9J66pEola1aPMi-CY)5g&R*h?uoA!CY~LY;G`UKnXG_KidNc8{;VG; zGYckFS|lbITtu6MPzF(WiCEig%M@gT7gX1)w05*JL(++(r?SUnf@sW`j7#oSrJNLwX z>Fg1~7}vNYid_+nt8{M_=t4{k^~ZA$KOBGa)vv~%{qdi~pZ>tFOi3BS+%C`|pqc^56bj{K0?yv{^`d8wO7oUyN(2(FYM`6>B zD6HKavn`!b-L)~^`_a$D7THpz%6i#n^D)k+DkS%8oS7H;)Q1-nO)DN!b=lmmpJ7S-g1>M0j#i#VkaMQg! zPC&0YUv|%)6T&1&x8;1k$>pAOcbSni6XN_nB3)0j}H{guPWkmb!?PUzHNohLc+XWo@gHVx$^S}K9) zNFRY*aB+ej{XO%AypT${=+Aof*S!42i#N_wJ65eB@*5hj+T48I#?_r)cRbP#$df^~<6moD~AD)W!A=G9h}>-{aKf z^Xs;q?27E-8T$f-3e6Qq|6zG@Cg`GeqJ99F@Pu+akEpR1d6~By8ySyL;Z|3yi`Mq$ z7#LzR&urRcBl7bi4>)5FynX0sTzSP+(Y2}}hKCno!^V1z<0|6(g>(KSWudMzO4F6< z4_Cz-Zyt$$>42(+##qvLtU|P9E_FdT&51wN_4U!vTCbW>H@ApXuKQ`#P2uUqOy)7A z%VwuNhiEz}ZH?+(BN}X9*AbVkYl_hl^iguz4Xg*|b5f9_&Yw6{nS6puwgxXDvKd4q zF>SnV3=JU|q+Lw%>}QU$??nrhbQwf)D#X&*ddX} z6PA=hVloT^KQG9Ue@=L+oCV$tm%v((Yp6$BK=F|esuy>5mDD$?b0<85#{j}Ot1z)V z34t4&oczf_;9MwGZf=~2mRufq3K{SPmc%Sw=AddMX4Aps@$3Yi7#WV5ng-z|qeH1# z@Bst!Mod3<>bM6Lv%p|cMevxoVKTK+gP1)B4#c)yJEF2qLO>pim*}8EFzh^Gso_Z4puV|boPTxg%gp+Ue`z=c;Rwl>i%}*xDiAxJxoJK$+ zS#I1kKyYM$oMZ<1B0Yn$xh3UOt>>kv2q%w?9&4+_GqV6(MVqQZZ7gbJQCDb)miF#w z>s}kv8ohJGS4ZcnXm4K?^BTOBq+GY{*byD=P0Fu9f@GyMGZlxA9+E6+5XtBS#iB+@ zx8!4kvi4JL)ZoyMQj29RUkii7^_4x{B#FCTrw&OvtfNiLupJLOZG$p-u@9M9zo z*%Z0B$`1M^PgxE;NYnt0Z{o4rh_?n3pAy~6m+DN-bNIOAw^!xBSL&DWN@?}XmA4tW zq=S;Yf{k&IXC-VO4=6a4nJc**4`9;y(23cfU3Bce*s;xJ{PCl7Q|DtCE<54l&WiHr zSR@aA!;j~T*5HkE8u5S)M&!ZKrjbeVA-8;+#qp|@`ByF7Q(JE%oo@~O%CG!N{KcRD zMf~G`@=s#dWd~#cfCI4A=}V8IgW1%O7B}e_=iOP)s?(UB#6JfB*ZxAM4hvis8YbICG&_M%ILB#)gq;+NbKCCQZ_gVT@3A2V-=hn^Qf7t1Fdg zlO`d=BR!i=R1{gsbh)3CM?bot8)5EaO(hHQ!;$n3j8*eh3L3nV{ZmQJhve1#oRn^ugtj_JbMtSIG zc&lWJ2{iFtcAff~QcO)viLXM2L5=h~yZfu^>Z7f#E7q@HZ{0zk!|_xn55FEYi&L>m zgR!RSrReHt5Rma$m>$(2Y%Hop4{z#&k0M6}_8D@b`qCam>{y&C#bQYYk@OukxANqo zy&!8`7;5MRkjO20GpA)+@aVWuuM4q%f34&-2nx@Ju@Y?YCZ2T}ysbi=6sf6TlZ$z~c}cv~mAUnBY*w{Ep7>ra^hr08st8TR_q!$T2)u!mJNWyt5pr ze6`@R?TH#%dEs0}#o_2#-{eb6*WG#+y)0U!_9?9KORkIR%j=|eRu$^wmVKAS=WqLP+_Y<7Y-;O_2Jx&YovDv zJT2*fA-~&Qel5E)?(gE>EiQ#+a5KGQ8mR7=1@zzUM`gDp2S&)fBCC)(a^_*n1(ZcY zZe{qpyLaSY!L!9`^&`9p09~?hCxd^Sbg6>C&$b^NVPRD?>F8diwxs^0G#zKpo{f%< zHrqAL4UNHD$LG$S_k>`v$XmGTiqFX<}_waHfWJf7;}H{$6x zRHIpq-+2>%dS+VVK28ne3=uZYiCy5NwppvIj{M>Tn^0$zmLp};1f%enmo};3ty7k? zg_{?Hr=z5?0^?`DxF8s&zeJTgn_=r~t8ACro)_&SU*{TfbV*Gphfj=YA%~hgCLjF%HNU z?Nb)`2CaChM|H^MQsVAOWa_F1^GbDLtd!fO^P)fLY&Xk!UP)~!jmJ})UXpHxHjcyg z6fSITU6c(@(4`-_h5j$UaXMajWlU`- z#BbP{ZETUNQ~P7{Y61PV2*0EwXhX{9c_X!B>Yrp+EnuGK?IROx^cVU}_?6`%kI$T+xeEj`m*xqVFKi63 zb^qrwQXa~}E=px0Kj>-7l&ly@V4-7D9CC>Mqkdf890A&y1LR7)RyxAL%@_0{qx+{c z59KSTG3iZTJXxGB*E7489ry}Q(YRW5trFg@huR!L*@85@g_z4w~9>%ALeLOiZgU$H3LXmWTc9=z{6 z@!-ARikaE5XslU?&W<{tByiK!2jgQO`&gVmc{cv`-tR?!&tTks%kA;$PyJ}@-nn10 zQWL{N6F!L>`Y?Zj-B2sN&Dx{+DBReLm=-tDSI9oPg8nb*4RjPbNWGr(upY%rH}p-| zC)Ry#GfL0;6Y;`-R_&NOG65g3C0wR>UZK806jNM9%qkE4+V-v6;TQt>GM&Fq;u`mm#g z!gX5q;`~s|PY%Y+{G@F%ubAUB)~Z^n5pB{u7;Bf-UE)7WeiY}MKkF|NWsQPC&5}Qjty$+tWx3sFIQjF<%Et@0>1A!8OQuu~ zZ4()xy)cGZ);5AEI|CTDvjmfiv`GfVBh`&h6me1jh_OBkUEz<%N1}yKrcgB`p?*nh za$-6r#!JD8e;fcl#fyE&DH{f6l@DG>7W~4P&b(4ZjP(34q zidA@(5QDu59+PG~G05VLe2z#5mW3p9Wu>ifL2wy)80<*E1qI=9D3gx(=wyfzJfkQ) zglobD6R%265%qJl6y`HloTbfF1}IMTyPMnX0)*eheb7;DyUcN}FPC{yf8 z9v_Jpo_<0~LB?yLHf98CMuIRjF%gH4orq#fYuxev_e)Xj^6p#7FKtgoJBpF&=mC6b zfCRW9>KO?XFBBp_NE4j|8r<2rP2G?SR}-}KL;Ds)YHiqj*VsGL9j`hnA8;xw*&SW& zff>6Oy<=IK7#xbCx=Si0KaB4j0)3Uhu#gk(iiEcaI}KUCN=L&L9;79|a3WtC1cO+r z7cgne1r0(*2m0gan}=e{=FPFLdwo3e@cnV@*qPY6Wpmtm=ZE6n?|v_ioa>8@mSPMI z4@a{G?^j-ZxfGEkLKg#H2AmuVf>LpVhjuO(!bw`Ghww!b3zDyz>hz6&@SH4-$F{vY z-6mds>5XV^?1;X>o_OHlhvV*#d{oMzTDvtI-2<_8x1oA9-Oi5GANU0JmQlwPeDF9!R2Zv>4yKsf4 z1nEkK7G;Y(SAX7#kn+q!AtYfqnbpt`FW3AN%NMV*jO=MQc}gG`6-!Yg#q38pZuBl*`NP~`264diMZmjgC3k;=pBr{fq@tv zk%0lFrSB~dYSXDA2~+9_P1N1_nTARIsjqpXIJYdX z%k<6ZP1s78aBxyNcwv5!Jc&;NkrYZ+4q?l8(_ZB!TovXkFGq2faRB|W%Vo!TSGcU& z%Pud^?R#as;OB8fWc9IJ$eYp;u6rKsvK&Eh<{aUB>T6RR__5&G*idg(JkcP0cT)EBH?)$QHw!($V1@3;Ooo_hE@v1Rvxxb%_(@yq}8pG9?TA%=z| z8(Vhks86^1e`)$`dkI_IVqU0zkNi8?tEN-%X^}!qbnYN^s#vO!TY1W zu|4+e*&Ca;Zj6_ne@=baeEj&2e!^{iY-~g@>pa2b?;MF2)C_%$>V`M$Kz4fBfPpd)g@Gc*%{Y_2)!KSgX zsKLidJ?FRDX&$5#eztu8?)1`wD$AP-nFw`W^kSNrtl-l(g5fuTl%DUaqED$epGxxN znff>0s7Z^DG6JV2)ow?kr++YNI1XQZ%?;OIADcFAi2Ay!XzOT?`QgzR9~cxq3sEgx z>SSC(m?b6wXQyRf&&aQ9d?Ka?hc#H6lqIb`2$Es1DNmAr>O;@z?5dNVCBJ-U6ykX? zk+DN3Ru@Je*V6!&Hg|Iz124~SscIetqq0k)knDxEYxFcJ?x`f!wF z7_Zy4JFffqM=;KmPIAHkKm!4AfG)$T=Uizve(=O2F|WRkN$;aak45j$P_%WfitBE= zK2G)Yst@gtO`EpFo}D{_lX|906Dm{kAfuj>A;8KDz$%B+E;!!J6Usu50Wx&)X;J7B z-Fb_NxA^I6)v{o<>O?)$j&bNGLMAz)o{~i$A1HoU9GRR|UTBRSMFFOTl0Xp4oaG3( z3bvs7-{DZxHj(9peuVVERat`NeAJL<;ebpfegY4<)Lj<^QaAYOOq9(Z&nm@EFsNgS z!x>2k17Z&I@olP`k33T~XlEQD?1_05#^;KJLYx;b4j+0;0~9t5PJ0s(1A1_4lzn>b z4cA0-TU+$^^~YO>j{4I$$ZVhbtGZ%MTz}0qYDX0@aG^Kq)RwAMeoIqhJn`@Y(K9p= z>sniUDg?G8liajVhLV-iha6RmZI4E2?^qQz#e(EOMliKgC1|&%pV^RPA@v`6hCld7 zr)N8Oa*@kp(mhQ^N@9Sgq`=^r#m|&ezly^a@UxMSFDt{UF1v`&sgGM3m+}CUhmgN=%yVFzmi|V-2zwF9=TwZKB-8<7~_~hyJoMbX9=kLg$ zruf4I#eK#zhCoBz*ul)TrH~JEkXP=j-Avod)Tx9qTs7 zC0lmH_Vt^it+h$Ixz?L)iqg;Z8fz3a=4h;}i{_ewo~xq}3fFUitD-iFvPbKr)a%th zHS4)veN&_Q=?0~5DKtj0x){xM4bfOzujl&cY-o*Um9eq2BW}9vin#gmD`L;)jnS?$ z=clK9!W5sUnU^hrj^~YI>pU59?kk}IbSsnLVjAM0l}QiHG8!-QHFx+{>RHA!iz~k55SfKhjFDLh3zxSy@WRQ@^9p+SLhz~lJNclni~5BeA#_jX&5QIipm-?P zf7yXYy8Pt7M8))+&`;&$Sd`m|8|XXT#buDv{ds!KT$aXl*5yQm*`r&cqsJJ!Qwl6( zhJZ2qg2Jks>Z2t7-F5ffQD`W}u~*-SKmX%D^_%>2vb}fh-V=9x{KN75b1w+oVrq5!L?a z&xqztw4BGbu$V_uT5rzrgktK4p_Rj-g)VCA&t?e-N6&Ji9CH(Fe*Ic6;Pcrm zWVc3ggnaS2HRdyrea{^$Exch2hh3&%$q8*9+j%}38f&Aovokt6TcfM1BZ|e^7#!}8 z;lAPM9q5f2(QwDUJ<(Wbh~fU;7#y96mtJ`(HtpFKS6+KV3{FhP%dfo}wJmKL4;96q zsc3Cq5?W)S_fzSZ(ym%XdUm4dZ0y%%B5fQ_)rk zBTLhsSQql5CvrRE4dkkW;`8qo(__;PJhz#JrS=33xR6u^G1(wF9rnvS!H^<&eNbP6?=$U(XeljT$q{uY47pAkefrqHS#y)^Cr>?zPdr zX-`~t+Z}QBd+&^vHJg1B)0l=U!;`a7V$n~;;AnQpK}~Ikny44^4ab}};Z%?I?l6+t zqr7^sJ)HvlyL?wR!^>boG`svR8765~rsullB@b|a*eNoHfw#O^n1#LLp2Glq^5^oB z!+uA;a*=s>?(&F>^V5H(`uh>~TSQE1VQj!p5?pL}2JSlg(iOL6|} z*%&(CACElpz4-E<{i)}vR(G|>x-DzeZ`H=WOLoTHKl-USark6><8L2~=3-skaPy6E z!wok^v--Btp^=!F8ueTq_5y1QjQ?y`&;a$%+zUd%T$%fI{dq$LL~$LtQoo2TAY&s7 zQ+jL`ePp*PbyX-*vues8k zPa$M68tuy3;KJ-;T-~bmT!AEiTQ) zbysW_9V)Hsco_-3hHaoO4kC*#N=uv8AD^A96<|J@4P2j$DLa>WmDH-RUEq_-Q6A>i z&&O*W)y@fyagL>o_T9U1c-p=_2 zhp=@$SDV?hoQCIR{W1E4n+l_JNQEkg66!W|CBVP~>yoICNJ^#dk ztuoPmNF%g-rTGd<0xSIr#;oVPu)h~Wzp4-R!~f6Pe?ZxqUH5(9u5hdB-pa3D&fV{I zPUuE=1L#IZBNG6VC}xQxQDb>zk31e*(#Tr2m)F?xc*e41X^{$~Ey*H5i4?_bf<#6m zb^tnezntp5%Hh^cRky0<|NEcs+`9GpH3)h6RGs^M---L|v(Mi9WS|7QCF@YWdP`s6 z1;jxkEPn82dh1%!ub6{|uM~g418&eUIYkGgP6iPQd6R$yW7iG`o4t7*v) zDpn4Hqr_lAWk`x#$x6Kk#`UBDk_nAOXtbNDi3%5@^W+~0&0$@Esvs2a1h5dJZ&$R4 zb(5XA4A1E^?H<+>KgdSFutU$tmJ>F2Vpz?i&}tB>a1({{P~npzggQce!Y26iDmTU3 z>=R5PHYWMiAk(+q97LJBzDydBu>2)}Hy{jJney<8Hf2P7 zm{5KaLuEDL3*lI`JY|rcd>f}?SV<73N(aaMGPwpGkWQP8TCA-u#r}iG6_18aWtRNT z?)v%5m!#mXrG_uVG0&__y>NM2<>*zX=2)Ci$A-Zml~{j*fmJK&Bp{&N@x>OB(D)P@ zTrL~xMz%*~QTfbpWKr;`dyb-|@pEiAaMOrVhfP*73EQWPq;HEx6rBd9r{mH<1q*m! z!2?Ve8e~!ieR&N_n-KlQsG*#l5aOW9PbN`DR;BZ zy?bJ=0_ed0BW5@aG4#slSnS_(IJTOrQD0jX&;#+dx4q36udQ(e^JZMVd?n_tPpk9H z#DTCQ!AYkX=IB*tqAO9DkSKapV+W_OrC!mWC%jco%D$|6y?o`0CsDW;@MnJZXX8hH z{Kw zEy}Js@*UOGA9d;WMZFUSrl3Oj2@ydDiVtJdllJE}sJOJhOao5Ogu)^N(v?v-oH2en z554eIyCXmD9&&r#hFPg?cqaz$v*9(6w#I0WlJntoHZ>1EK3ak{uX_%B47c z*X^d~i)YWoou`h*snf@NCu*&_BAOH*70=|TM|P@gZ8v50luTwicB-cEEmg+F& zre&V4Ds3vFQltTq^C?wPI|DU zE!Z}sD4SPqsyupcZ-_UFj;o6+F+aZ;ZFRcH?$V{JF{)10@e@ad!(qwj%ifuH>hx`H z+t68gus4R(nepie=o0E!aU|`aI;v^nU$8Tfxn7TEYfXxET{>bj)@u#HRuhk|d)04` zI^W(kC8VW6mZFT}OO~Q?m+GQUlkys5p@(>a=?9$$`G-sePw>fvTk|VDlTJvQ`qZPt z4+?tbJGTYW%6!IpoRYF$sTaaPrA~^v!c(w8?hHmaA2~Z+o<;STe+rhAF1n8wdJSu}8RBx!1bJffJ zJ8$TlP7{4iD0WLa;aYt)nj34bFVnCLsKpvHddZXjmIr*xBi+>vE|TT$Im(~=CA%$k ziR=NPypytUZJJMYXM74q419?YxWS?VJ4o_rivVspcg~>bCQ~rgOfATBR z(0h6!XA-@n?-U-Bd1ZcPKWWkWNY*k69O#+SNb(7rzwPF-l1|Epvh5gPgWd~&!-V@z z{BE1!+c62J32(_5W~Z0Tt}*~?0sk&rz`@P~?>=yQNNGF$0 zw;engKl0#v;;%jRWAWEM_#^RiPkbake$S)vwgV^Q#K_***Ebv!n?o_aS#tOhfi@yu zJgR=msPY<>+#eQ5quc#4s{YZ)_CO5BKuk!M?G<0WW8cB}-~*4uPk-o>@rytFQ}I(D z`H^_w#Jy2!^+juOJ?hJ?Xs$?qsLQfInMoH0CX_ZZ(tK71%CbpJD81iga{b-14vTui zn3tg+Ii2iY*f*u0>aUAjI;PvL6Q!&EI#yhTwA~jf@^QVXR7}Dqy*B5sZ|kqwK-0kg zW@GZp?V$iBwGCb--(|rSwv!&2blJka+fWq!+9v+7V{%BIlsWSN_Y>UzD0pYl z{!&Q#CwedPhhNejp(h553ByU5=sTRiG$h>&*)TIZAInh{ji~=Geel%jlM1>WE6dC3 z$B!z$%2zOqnVE%HS09eL;Do-CbnyXBbCgVEqlVkG^c>mbco)e}H!epwZ2}Ly5APCRGduQB~%FK^D-Jr4ON-h&$a~1(`Dw2293-+xo7E?A9SLv0&gYdegu$!DS(o_>@ z3eOF@p&$CxGQs3QX`v?9QL;s!33?Sg=-f$Hk_*<~;6Qp0gGe&y7g+O9V#s-F7MtA3p57@nh`i(Ek1YrkqMLT8)jkc;>Qjf$qtv zQ)i_MLO7-0Fr2ejO%$SM$yn;dD$qQP|8$Phx(yP&9y)c9^1zBjz_;9+k zJIbK2s(W;zq;u+BX>$?>7X6knCHF0dD;}eoXR)r*s;DnWdKkXhvB^XN{T{*tA9W7R z;lCZNq%%u>qOaF#)m)cT`}V{**8!9VVsvUE>djhQS-2KU()DWfrg!t7yz^*$`bU09 zeS>y<_xW$f^WS(zxZRBRJ^r?M?Cw)>?TzPR`PzlpKRzg3eKE!>1EOnH9`v9`UHyt} zCQ{UI-Y!)XvlO+Ka4Z@3lInAA8LdYBO7-8+Pq{u18eoDSI$`IlnxN7wXi<1c=z@pv zKu*DL3Vti&aX#@m#&Uc&vG}QX`pFWWu{3IvXqI!f(g#zLf>-fnJ+4h!Tkw9-iD2fv zV>+^iI^;o}@SW%fK*`Mvps>JU8YoQXoji;8j+@I0u0&7JhUm=Wbf&+=Bb`@alg^ZE zqA9%zu5@Z7=~5Pz!|#I4&MDt)vq`VOtU!``$i5v*PfO*Hmvsc5LO|-PbXziTCPiMr zDcH>md8#xBS=A3bhdQTC=C;CB5gfJeJHSOhO|^Y?{%`aT$M)3Gc>9n1Y<&8!|G(p@ zzw)=@=sQ0aJqPcKW_c>=mC@K9;BN0BH4yPPx=U-ylLe6_xsC7T*t3X($KvS2?~1p5 z=o9hDzxW@-5C7%A8vE~hTdZsi#GLZ3s!i3!1K8OM?E$yjDB}s>N9@pjnl8LyQomGB z)f4BPF!$i6_(*3g2|v>a--O2|znyx#$*_RQcqN+hE$H;mG-?A}+vsP}uiRx*1qvq8 zN^adUey4mHu3N`XvIP2~Ehf3L+bFKfmeQmQ9?Os^P-c4V7{jf!WZehUH%DtbjV`>w z>#?NIGi^|UpJuoVgKkyOBL)~!yjvOa5aHy$U-6VAvTw@2%i5oORi~Z$)F=A-tlW}rO8`{^&!M#QfN2g-5xsiQGDG88X8|DA}{rFiJ>ZJxPXllcJwQKYH{vr{OYg#!}#hKz9RH)#Ykm4?mRvyTn@y{ z<;%kHdW=bxIDB*{KJ@;_;^@KQh_yu}&Q+bu?)#x*a8dy{LRVzLB~3S(cR4p`Ubv=GvUXF^TJ2uDNkA(hHcud3^%Rdq9zrj}l_}Nbr1Xp= z&o)_PD1HRBdoEjY-y4KPSF|7ahbR9F{4{Uy^7To; zFCCA(%@+ioVKdc;=3Fk)C7-mwkbIC8&Z6`^;cKX3v-y~-(5>Sx!QrUf?pHX3@bv*@vF*nnu09fd55IliFP2LENARGMnFu8!fk71awo^Z>5>59eFc?mWgcxDM zN{kSN^<=-0uOhn;RHUf@gh6?7=jl~7ipylBcgQJS6d~thFa5x%9JO%dDU-;BN>Lob z0IWKvqV=Mp2n3=*76b%*ia~h0DNKBqez4D`MCsULNj+N~8e zHW;HhqYR9K$VGxm*nC5I(P1VZ7sHO}y@=>X8a#wZsCFtGrCVM!C^`WmG8z~6{cbf@ zqADhE?9PXT4s}XYHYS?;!~|+9OL6{t&xj}%)v2TNu^CknMOA(;zxG=6OVN1fk%!~) zN8TX?e#8W2g+`gNx`$m_tR!ONo(HpmS&B2tZW`RyH$<&6vMR009AbDrW`>4A`oJT`11?QlVv^+PI7)+mIxDzg!5@=HtSkq$ zMzemHR7m%bDR0psduWub@Mo2^@9AI$K>u{$ z(orm_zG)$pltxWEbEgjHo5Cbc;3}^@&bii4%-6$Y*MFe=NpsCn=DG2Aj`%D`KKJa=mJ znQ-Oslw02g?SVmpC0J7UgjMJp9xKH_G?NB-?d{2u!J)L<1AzvdX6zDlO+DADYq7Mn z9JNM0mX}wf(X0pe7>-Sh#j%seJ#oki#YU?h^RtU_eRd`mmsg}vty#HaCj+HH&>4pU znD%VAflaU^UafENn5*6J=||somnD80RFhHp9aIrg8B>AMDp!3M-V>q)Z^6#Vv-GP6sz8%(9>FSyJ(whwFWOx-<0p%ul zaO1S`PfrIU3IPb?^hg_5K6l-Zq;O{!-X|1n9_!7kJSw zS*;EY&;q3r#qv@uCdVdOug}GQ-n2$@>XGM1t@wT@=8g(h2fAmMc7q7kYQjCvt z$LV9nDAStsrU@mv3qG?Nb*m4B|oJj za0rujF`@fp+wWcXxD%FL`#$Eh;7O$E(`lR*6CLd2S6CJZ2o~h07ZvbcEOJ9;4-A#1 zkg4sf{rhTP$g`lk-PBg(!oddT72PX5PE?gP3Em`rIUKajYxo<~xVL4uW`s;CN`bP9g zzCHS(_r5)1@&mj#XJNtt$)hp>JT#8ION|W>p9&cdmT0Bz{vgrkyc2xQ8=7aD>xF>pq z^mvM{{6Goe+Nw{Q;=sV5k^(02pv5I$7qYatsA9Bzk_dQV7Zz7u{OC{oSPWK1%~Lo9 z;5yd=i11sY|E2lGc<0;S5r+>=#rZeRL|t{if6ru0jE%)>FTNVjz3@tmNM{{kXTLhQ zZi^~6vW(6#i=x=&(v;paIySDhGbSD+KNyQ(pavny4o{{W{>$&l@J#wdVN6Rha@y$& zkd7jeYD&(238Q=zjyY8tw+z3op%kcNNh7D7(#wAOEi%ZvNAdtbcNh~O)$O1u{EjKY z+`^yTd(`G#=pzv zKRkL@+_l=Clv3>jEf!pth zNAA5h9=qqhc>Lb`;*opqi3jhzGakD0j=2BM({bPF+vDLo?~1qGbC<&17mq6Jqxant zk3aZeJn`Ve@#G^9#pCzi7x&(NDvs>g6Mf2aLv3+wc{%FRM?Enh{j*PS(e;sJ)Sm#> zcRu|F=;4b=x1wyReBEXj-PqjxT^Q?sm{hlJn+|$Y_|E6ed3A;3^;XgR^JW;6OW|J7 zTkiKeQX(=Oi!?CMH6Pibeb=32sl?xGl)di4@`dP#C%449o>+ zh3QVOh+3FkxVs6?Wbn`YHRFc?K(L{231C|MhCo{Hla8V`cI>`8}3`qJ)LhBuy)+KRcJSLqMMVmz@RvvoqRa@>;{V)lPT(_>W)$$la zr7T?!JZ(0t%d-iEeAs19{}-Lr>0AGV=TZOYLxQUh{7OGU*92sbAEW_POJdP7Fs5-k zxhXDg3Mx~)TYnL}!V7wvb$`li-3JT&+v=~Bl(+Jw{OC)vgkju|#e`h-u+dnHm!5w< zR>i}HN<(p%cwxCzj#plL)yJ-);dt;=dJ0!Au=5Wc>q8%WN`0i?o{q15 z?Hf^7zx?371Hx6=1NBUxbKm{3lefpP`f1d_dU3TQi#RdJ7<6vRg<3*A_=slJPg#9~ z>9c3!#b>`Ad&kFPK<-=W4=@pE{3(aSv&05q{A0_pN7=LUuTGr8MBg2)kxcC0Ism{mZ znQwk8MyB?8(q;eAL-Byx-It#JYD}vR59~h_b=r~OE(_NzN^jL0zNUe3FVma+(Yxs< zQ+5_tjE;`PgLm(b<+^aIFii9SzE5Tl)Xj}8x>p`jR5L0jBZP64k9L{Iyj(yV+M+N( zn-qF!10IKV6wo{Zol{{EAod^Or+A-E3%E z`Yi$`liJyiL;lWgJl&4<=?Pyu(hm6+o#8IP0Ttapi{c~(MGJT+kl_YR1j@*AVtVLl z@+nNpnjgxS^VDxAF1FCK#a|phdu4ZQ&WEQ1@eNM`E`8G3!UbM|A9W_YyZn#eyWaJlxa}S9iHAP?iFok+ABl(Fqj2y4aJ>C}PsOPR9*cd)?}*k`e_UUz$Lv}& z=Bv$EYi-7g=y!uYwb}xZvMGToWD=ccYHg_v?>6v3o@lCFrikMgo5GhmvPsWfaWF=% zUyAo4oL{@%d3IN1#~^WJ2;^`%OcAMgc1O+WxUSw*#y7_|ofgh-hM_CIyzq9qUExgI zIew8WId>brO!`5QCw+Dos*-~GG)P4tRMR>a4-E@1y&u4FtM zKl_Wn5HEfAh4`J{{}a(=UwrZtKjc%8;J@p#YznHYZ6T*Iz=-TcKl2GSDkI+%+HIS$ z2l<)r$bpT4v|HZOH)u)kMuBj!Ge(FL^Z+kTJl+dCl((d=!=$|At`I2=!l09ZKZV0f zKbw*xEf$yw*Npe=RbPt*ck|*Apq*2>xu$h~c3$nd9z%n}ar(~NW0XY~(wQz?x?%l- zja0SzM%;FCG%j3Sj8~YU2NwwBq2U;w9QT4ju3t{qC=Mol4!$KAeR??ZLik}56pR1R z52-jZGhQd9>ZADGCTH|1*_@@_Y9x)mG9TIIuhR925m{hGQx7(y}>NrJS z*h} zd*EclOtUz(GU=)i8lssKL|7mzTf<*iuQ(tFH6T`J;0Gk^_-7ZP3-8HZg%v^=MD?m_5fMrt zaowOK&_GCDL5iSrVNgsdlET>v-ZT)K@&`%milD)=l8?KoHrDms7Mz>ChQVPr)L6ZW zmkC-u9S0cG3e^M~VPQHwo>bup!I7_VMSEj+E(k~ER;6Z>;M-wBJ8s#g@M*WItPJU= z^6Z3#hDa8|<9l^wMffVkVDF??+OT?gtEEnt!ZcUvarTWjOe84y^@f?61P|%c z8COnpK-~D%NrxUXb76r(Do3IxR_pU!7gOEtVK;bBOpTAlfB9eit@z;|{o$CIn>MX} z_q#8}Lk~U}&prD>jH)gtC#PbM>Xti6XijLqcER0vBGZTYGRU}*7c3J9_r}%L(< zx--5>F1@F^Q`$v1+oFygPiflbcJ2IuEy1wkpWrpj`tI1A<}EC30F%D;ZW<&teT!PL zE;sL9eXbTx{ZWr@%P9)z@VCNYJAMfQ$MFI#eWjCb$b>SJE_EQA^8p0>aZ;#&bnTy4_#W;ELcnpnKVr{Jvzw<}G7Z+ZAE^a$WMo~pY&brEFh?G36|T8__t>dHz9S)B=dAiuQm=zcC6fxE#Cc~ z%dB?6JGAR}2_hzQZ8*$@+;PBW+GlDh)7spYFLNUXEMhj!^u(NUIe6gPf$G z`sy!^Xci2>j;=!;vNH^Qi?qs|1R$7LP{2X~1|55)f1r#r@O*UtUX>0akr6$x+X$Ho z_r@JLYMCcF#66=N>$)YL&|F=KP1T<~O;7B6&h>76NqoiaLdKkoc;e^QTg*fBl$$&p zCh-Wp^PTtN&oC! zSG|Z{B;t^Gg8~vg27hqGDqi?3CnsQg)Y(A`9THnEqnoswLWB5FTmIs^?3(i_3!8ej z#e3BV7oaGTo>IOSar8!}P+!dOQsOgHvZ+~npCC^|z|df*oQCCOZs5+Dhy za^s(Nu?iN~huo-R+<;xSyR6;$W`kd%%iSj5q0ckH1pLX(mTzR&P41oV0uIto_NK6W z3!v=A&V`O2wup~`7Q11$`0qAdc_jm065I^3`JO`}6*x?oNH(3CP}uygXE*+^x55$T zR_=y3=cBr#zjAZFz_NpzB3~Q6xnZTgKWW{aVQP|-$DK;ZEDKY;rucVF*|W@o&Ii9 zfYxQmtIOv6vGhp{i!Aj=-$8jB4xBN_vNtE1>YFg~yA@o?EyG>#AT0FI)kb!=I~l&o zvtu%?%egyjcRCQZDGXVXo}D(7%lZSDTn?TdP*wWr)Ps~yYKs{j(#PiXRe$;Eypwx| zxf?~KpB6y9*<`qP;McQ56Qn6x^iD|HBj4#sa;_6(EHjZZ=x3*IKPjCrqAor9)mLA) zJ9-?~MUIV4#N6z>2VOami7e=kBfiyiJq~!FNZ8<;+}CBj<7DN9Go6du9T$fvM7~2 z*QJ}Wu##&KBgRHcF*zaLFfkOP($PnT`(jG(W8<7ECOmAS^VDLyu>g5MhQAsm$+SlokhW6f&?WrdFYL;XupT#)uHjwW;*W&mv@QAz?D8g$ zl5}nQKrEC;pWJ)sKn#^fpoBQ2clvO|C!M2*)R!ua#q9ioVJs=lw4k`xi>yWE!lrT- z|KrLP+CK{=SC%#LcHww#DlhZ+4x+2dOs~-uMF7 zO2Kqs_A8TV0+VQiC-L>J{-61&d17i8s!8Rk_9V-qU_q7YLC;(dPAgTBjdjc79b-6T zt!fk=JaW5pKfvWy-N}0M402EY{3YJ1e0d5lKmqTxzR8zpc# z@f1=V4?UcBa9iNnZr}ue#|8HaR-|KV7zKqK4XS?ZuKJ@qo-`}ywli_0Z?_Z5EIy&8 zrhWuRX7@%zZLXzqt*YO*yxNGlrDmMFz8L3bS7Np`5Sx2W#K`GKV(h-h6iX-+_-wi@a@?<>C=$zx%=+;sh|D1xNzo`_=7+Gqd5D4^K4 zK4aU$jdV*frCoYkYA1q0nIqSrGu|x&B%dfZ#(b5rN!6>bj+F3B3j%iLjL~6oxOOY$ zxw)?N#qYCRvdmK+Pzme>eFnXPZ{#3x4V!R8{TW{cugc@+&s|ph3d-dkEF_ zE4U4>aLttg)aM3uDOc(TL*L3eobu`)K)+?I7~MC)MwTzIrN5s1_9o8BQ{Sz$Vbm4}rtE+&lQxcO9ng`!Q+-@Q6uEE2O+Jc z6!9oeHuzGRz{c^v7!$VA;qH2vZoO7@rwWJ~wWb?Rd7vz1WYK9&O-;qglP4XQfm9X& zm>KHHDq^ql5&^S&gZI83Ry4QN;Zvh$fL9%rK6OBPwwlqa4q%^@_da#x2GkiHQs-_! z%0#c+IPQ%@Vd?PosnPb(S!AM_Bj}2bCih4W^vhqC!E{!aao1Ui!FQB&xig^5A_F>) z9$c2?IB4!OPIbVf4q-zu_S&Z7xP%w9Z`Ca*2A|+0(N-FC5~b;>atkjkMu4Wl1q%>Z z*>wByW3k?B#PsFMF+M&V2c`~0qq-W`u3Zrfy>Z)Zw+Uydx}f!I*RHAYrp_jl?mU?^ zfIjFH0!w0KG-N;$Er{kgiaF7Q+{IIv1R4|rn4h0D-Cn!NI!G3pbc%OdaKkaq zJYiIblDAeM*`zGgo?v+{y4=Pq?R(Z-$PspL-A zyi7FW9l$C#JdJBl5T4xi!sITe0Dz~pR@G&!i5CzSVct-OL@I)nIw3vbQh&VDj#vRm zC7RzB;HQaxS;L4U?FcNW99ekTJry;D$)@|g%daa8@&QbC34|V{uyS7?iR(shH&lq!<$;ol~S7PDDVtnW6r{iD$o6kmN&mD36 z@R9h*pZN*zvbcKrl9U+nXcQb(?%3Fv>6aCr=4n#Ipe@=Jclu&z=hc;E%b$_)5i3RP z@t*zhWilJl+ z?^#J?@d3M8JgBI4J0wM)dqK;>Z(2Z5_R|Xol*fSHu|tA^umggpG&JZiEF{3P@W&@C zi6>&AEAnFzq=lRqij_O!jgH0W*o1LON8`xh{c+bFcN*VY>L|2os~+qFpXO~vJ7Omn z^o+j2YUPd974E-rx13#J&=sTzFG#eWc%3P;iw@zbBx%Jv6MM%$j+Se)Xi7TFHs4!yxi_Lud=?%3oERjp>ye7-{>>h zA05jBJEW;+zhnkaCJh*r@FF{XGoXxBB&9_MM|GC1+^DFeD~SYEXXt3w%jCvlP=AxZ z%E>}dY4=K7yh(h5g`5ls(^gDZiVvSdPZ}L7t4*PIw_TseHz8H}YddaSqeHB)qV;`y z_tGVg>sP16C;HvK*@Xb#plz(Kt~pIstWWOSj}{Vb;cu;`FluLsUy9CD0Q3cDrEPi) zFPcLqL+0)bRP7kjO>F==&ri82XvfN(wOZRk4k2LSJhMMkpJ3Q+3B|*G8Gq}&cw%^` z7H%Gx#gIqG>bpB0ZrOCdV~Sesq_>l<%C8TabhqJ?{BJd>mUV^vyp{#oW` z*x$HlDYid<-|Q_*X{0flzxWUdFc5j zn!;T+F=AeJ8|w;PPw)I!-`PD;r4LLSjnw~!&-;#T<;3F9jk@}U91pl$jrzi3tjx~F z^2|)ktN(K2!sVE`cqL}eUydv1F2v4k=i=HM7vk#c>3RM9rMQ0KO3Yrqrnn1H zTUv?cYE_L(d8E#R+dcf5!8_VC{VCcQ?KKZ>0DFcP<2ozkNO%45eyjTYKgED(9(s1u zM>eLVn=N6Bc$oa2;RMng2Adb|d)JcU``hW{GU=l**)6AqF%K$efG2d(8AMi?Oat9< zFoY!xsbja{QH4!uq_lHs{yE*9^mm)@r;m4=ZkRA>F+egY z?VQgKmfoLb+$OPlcik#K^bYG_GW0h&M)US#C!&s?B|Vq^H>U=2FDi0rY-}W^uU@m> z$ZliHLCHMYA`^t!5C(xK3k_4yktZ&uXm^`wr}niHV6=t2Sf*o+<0+j6KXP zFS<`cy)~q7x;~}j&`%&OXn;Xt=!k{)zM4{XL0Qpr=jP^IF7)$~`V($Hf&+`m?FLmv zwdrr@lEK~vd=Lj@Cey7K6ymRUPC;N&XJKK%V_@ic^tl-0VIfTFLvaV7 zzaHOv<~ixwtMSl%Ct}};<1usTY%E+o7Zt`BRqtNlqWH8)+9_8Q^r$bzm7Ci={n87$ zqJ1n@s;lwL^Yfhk>w2TUK?|GYs08|bEV%ZfTh#}$mqj(mGw=nCX8chUTzypkHk)6h z=`+qP3vIZpddqlGRr#51t`ppoUrN(*gbI=A)W2N1S@1n(!CTxIu zWqeCsw*FQM&>i1S4;iGAf-C&81wTwsn`e?HN(j#)T#=P>50t3udp^s z*ZKMz58c`w_`>tA#uuJ>Ill1i7voFMy&7M7_LcbZ zcV3OBzjr3S`SN*%xf;)(y%x`&yArQoo{h8D=Hv2>#aIyEX$si`DDSdq zit$*F=Se1QAzMJM>pNTh7WI%#FSC8=E#V=5zJ8-LW?>#G*-Z*jeSlo8SXo^b43gpKJp-a~#>2#$kq_B0?(KzCvYzPf9Z9idOM_tf!YJ}vs$XTKf4{p-K!Jlaa*k^Ape zs@w6xcVCu%vmIQlbY%ZjeBjZyr!kwg#b`Ge-&?behYrM*O6f%Fg7$>N-0F~0JKj| zDe!7zm4S*tsB+H);ahDf$x@ieV{CW~;1n~#keCkmD&W8DsMFPm`73D`I+M!mjK{9kn?5QW z#r&>2?}?+w*rCbfVO{kprB?a&sytSXWlL%ROxl+erlh>t?a$ye^Fe(*>d2}fL++&Z z=v~4<9m1i~mYA88l-{i<%MPh`#zFyBhOw$u@9*3^+?Abd&iuL|)r zAT?k$8U~aHl;@D(V-W#tinuHUV6d7yqP>_v>4QfYA*fEChUITc|eCiYB3T^+Z%#W{E6C-#iH9 z@xZ+gr~*_BLC_AUWsXSG7Li!8*TYv_<&ok@8qHSCz@LZ?y7HETY7hJn-qQr9{tNL=a<@ypD<|R z7L3;o&~8OJRM%L$lT(JZD~+3N2Oi#Y{=}m^BqJUv1Qs|XoZ7W$Yg=tajmLb>pysDc zkE9BGuwOKT@M2PiffELKd{-g^s!E3iN={FCuyT(DP3vk~t!;JUC4~A#YX}P_LNaU! zi?+U*U}2yNxRseeOMI6OgYQe18{x9jezVESDkk$fqm8l`-*U0d{+KKWy4`35oiywR z?-ooQoIo3yPGq1R8qO{&x1v75bFxI1g)evLtlJ=a2)ElfZ|6tt;*jVeo6dlv^38Nc zyUC|FU`!JVm!G@hW9_b&Zafqw;Rpepp2o?|U}kxu=$)NdJ`g;bIPQPRr z3u>tQbsy7MZ>+}AqlYa6kZ0E}Uyr}}|M*YEZz^%m-KXLge&HA5$iZW=w74uqoL#Zv zvK!*x;(LQbqD}EuXpvJRfRR&G>WwuoI`b+;!M3)_l>xOFk`jV!U7DSb*|~Xx&wY+y zmlG|H9Xk}2v2pLP;hKsmj^S3vfQfUC#u_P)cp@FQv9Zw@9cS{t?49w0L#%2nt~e3h z`UC(eKkVM&2&^F~Jn(cZt1r196+T}XlAoWlgW|CY&ki2kNEbeh@kF%R1v^9-@Xhrh zr&KT4QgTaeR&A$PY3cM?pfJJ-0+lH1Sy35$9fNpsUc|uDeyWSn(J_bT{_=j6-S_;f z{N>81u>2gO4mEM=%f^e&KTaT_(u0ynOlgZI`r4CIxEHaR9XOFz9u^4^oR|~ zD~Sxre(}88dR#bjP80?(=yDrTRvEo;Lv57f=r&ZwwdI94^Tx|jug**TkPNL!_h>A} zdUY`x@~bZ|#hN;z&DvtD$*)#jj^^4*tT$KHsgtfEyQ#Ls6;L6arDwe=+0UfD+?&zFN@-69BO!RN6jkFbJy}6?NR}^nqa+W9h4g>2-qumf4rQLG9y=dpsKNNpER443i z_bChO!kNm3e$>+vzSgB9$x7El|3jZ(ApqJv&+ZeM{I1RnS4wb%n<5~Jq>t$pSwmdu zY@&9t%~W4-H5Lo-?oPkbWugH6Pc@}HRHt%h!3dqzB=(X_WPt>I1;k7`W=~-mMSzCr zzyg!%Wslmnb#-^*R5ZQQ_Mnw&JuMu9{#mpN{W8eTahtp^EC~PTYN3HXBv|dif{Q3oq{4*tOwK#M(3%5`;q9qFwm~` zn@;aA#pjvBf%luN@P*e%@Sm3EJIYhTrD$0xagp=*0*JY?% zswP)Vm&v<$e8Pask@C&vt@6(%mmeD1?Q?6>jTek{3h<_fEpB$-aZ7ZP47@uVhf#ug z0E=(RL0dA7>)8~KX8@b_nT@2Iz|}W;h4e@UrOcld-+H*7Od`S#(k_0Li?H%@n-wo= zqgSrC)Qu4jvW`ujOu(gn4ucuU9`2^>6HEi*)9@Jk@pOHqG5`xMI|WYe-``N7`B~Hf z_TSeOd8VDOyX&t|?{;X)IJ~Kj5rEXa^7iL=+9g(VHrFY3+TPA41S#y^dVz}L(N z-Cs~{DcYOp#`%$^o{3}gYOSyXVbWK3Y(i;HC_kZ%97E577PBFXRHpQ_>7qxMU9zA9 z&s!MU6lLH^xScje`BPlGch!40mT|965T(ZD4tEA+BG$ruI^jtZ&7rbTahR3+K*Ccac6jj6Rz5FI8>$9{$3h zL#Fbu5SGOQECNWUIaTz|B30{)9{f`pGW65Q*KJM~T?abj>LV5qFrHJQ?MXKtQ2OWr zY@|W%rpX}cyC}0@*gdPxBBa1^5WPpCFysW#R7m*)2r~LJytkq8vKV-20 zt!AeW!NO+cp+3mn_dl%uG^dwJe^8&9$^Qcf_r^fEU;USL)$f?{VSJF?aBcO27$Zbi zg4Tdp_?KS7n2r1C%8BMNK0IPwl)-CouslB_J&t^xBRT!jOz;DIp)+O^xG zbT^KdXLl%R0BTEpLyok?e@uNGPE3FgrW4ad|LT8%1151)I+vqQ{V`wBrgz3VSe)xN zCVJ+3MEyWc!(ibBh1*zPxO+=yqs{Tq0Z||epu>1@!}jlTaUX5&Yf@GRifg%lcw^@@Zg>eubG}*4xZ3)SKOSI!wZCt z&2ELi^RXpOtmrz!QiQj4EE&CbOt-t`muQ(<W zO$PjBe}X#*alw62511V0R)#WVlirC_#4X}?hxxNj5x*$E^QBG7b{pJpE>jAlcHgDH zWYaxniXm)wnmg~L%X@~SFwmqB*5&d88Irmc>biLj_`#ZXln?K;YfM^i-ITjah#Ou#~C-zK^#gBjLkyuezd_%I};<-2CpZ@aSkFCa% z`0YrP%6)P0;DPwm4}V;`!CL(3mp&ig{>F3h{`WrNQ%5=Zhz*o$(jgf4CV${%T#d0l z@QOT1@=)*!EKgeYOLj|7!f&U}Rga8A3BS2cY~|x8vM7(;>B(3H?xcl|<#9jBg50Od zN33J`wi|H-L`qwHfW)N_u;7pJOBOD0;%dDnULbvrWByN^xGhedzAdK4Caf2*&~jVa z!P@G4{N``}ySO~N7Q?09I6BcAw;etbmoJ`+)rF-PRj4W9`hD+yERGx)j_vwN^hjr0 zkvv1*03H+JY&u0w(ayb4REGY5+rEDiFYc3W3BbsFWUsGkmc~_WZp5`Y)j2^q%?aJ6 zPTx-2l+tA39ShgEqH%q*PclhXI`81%q;!CN3Re=3N{ig+J(8yBN-&8V8duOa^aj4@ za~to*r|4Wb0Tv}{-Ut5$1{RSsW{p0!BVXYEsXmGCYs8gk(veeoPLn`|LBFw^V8PW_ z3KT?ddXQd;?uq`!_fB04SLi?m8SKae$$iBs4XLLLO6A7b5-?(2c4U&vo5~NZdc&7! znmlkb|DcboZ!&RtDGxqMm{V#82h|TKS16gqRtIpiS&v?I(&*^%WHt9jYt_5f+Vxf2 zErr=)f}CA(40N+fDIk_}y)$pgmy0q;|B!tliex`Kod!-k_(7 zmIe}t3;ia=+lhM8AX%?~!m8eF835QymWfGlG+#yZuzo6ec})`2mkSvb7t2z{?ZdPyp&*ZOe2Z z(azub3kD_p{DdJ+a6NIh}%5%rCm%PATtGqEWpqM}YU-wYD56Own8z|5TRt$Iss zMtI$z4J7^s51{URSEJ~eJADRyZ@Kt3_!T^@^}52Q#R2ZXzJF>F%VT+?;PW@0Ecby3NhsJLyo5#69=l6H7}=(G*YT z82-*=ujF&9hE~7nT@%o3T9mRG3v;uvCSJ`TC43Aw208}@)Gh=UM<6xC`!F00McYBn z@7c3o?Y=4c-u5IoY5G`3>l=mQMOZcH+{QBQJy9VGpJZVSr?oJUl}^V zKkp0%aiXwHoQ=i!_uij5Ky|;_wyijPlPnF5Z2D>#?o)!alqOEk~Ttr>) zu}ETcl*I%iUfdU@Vfi#AlVw#71{aY#;E=L<$BL9$ycEY3PD1&heM? z&s~k{m(Rw`m2;At=VEF4LM+Z+Qip0fmKU$a+QPM{Dy`b$eAHHCSC>2(-m1&4Ek~2x zZ&kU;*5tlcMeZ|bDcVqdar{3zlMtJ9Al1XR%7lK^mQKU!?G5xD#aU;;0O8f{I8L`u zbk!%Fsb^d9ao=EJf%GoB@1DhPpt8~!od~E^z zfJK)I$8(!FqW?b8STD3dUP`k^X+uBT9Lp*{P9E5nKO@*Zz0wmE2c-K>VAUf>`6_X{ zam9gkKc&Udwk(2#-!a(CfV;GF%L^=1Xq#^U0l=!D&TDPBu$uD(`_K^7G49b)N(2i@R*gABy0?O>rx?(XDDfT1!S)XkWm zpO<@@)PdG8rtjh%c5ZsrcULn`VPreBev^?nNzZHvZ?*SEGAS?Y%_h5b z`{#0Wo8-4Ues_9VY-Q6u@lCi}mr{AjFYj#6_Ex5QVve+Ko~Y~2pZDCa!Vvzg^3P@9 z*`0T%Y%mfYDbwPeut~0DOP)9500W%2%0HJo=hI2UUvD<{FW$33=WzaA`R4R?>agIq zqEEtQ|89dkL@qH<^SZh`_ZdxZ}^2Imq0SbV5#wD@(V;pSJ5o zLy{dVqQd%g9<@#M046qBxJG)Ma5GvN^9feN#p1dwbHr%ewm7wm!SHqDC^B2{ZV6Vd znsJ%L2s-6flO}GqZ*ATR)3-La!kcv}XriEtTbf(??c|+dycyP1w}l^ND(qVs_=tH4 z@{sy2tWvUnY7YF62s+ZO&G*UT7AB=lHU+31n6pV}0~UY2wV}ULv?;>Bm7(nsCbw1E z%uat&?r%}&NpF91>AK-H9VtHc=5)RvEx~I5JB2CoP)9K3nGAJeJcPR||3Vxi*;W03DB*`fyue79BNp3Rs z(W~~tNr;>b#7T;Bv`pnFcP2rxJ`q6Q>Es`zn57G_v4Rr;SP-z$klxd(Mypwkru2nYeO3CE;;P(~gE1+> z_o|HEFrw<{GhWc)89Sm3(l_*6mtNCij0oLpvt^yvCnKqBEN);Nh~4(hW-aQ{xoT^x z&J(*XooZb=8P`H_>LdfN@KO3x43wrZK;cMr$%$R1-cl?stf+r8q%_6P&^e@o)Yb34 zF*E1-H2t7Qt6n`WVI2rs-}H{XL8slC(RM>mGf_*U0Peo9nn z<+MGnh%Tdk2{f`JB560pD~7hHaPTe`Kd@YV>K$~A&h%;xT$Io7kiSfbssh|F_ zn7jB!eC<#FD8`3-(zx=oVn({0Ii#`7Oq3()U`F~n1WF7^+_F* z#wxp3--y@FUW;X^p*@U&_Yd(xC8WAi87q>L=%OqjVeAQ+>B%J3CvBO!0aw%~v_w2m z0Kbyhv*qYyf0Mt^!Uzm?oWdr2-E6x3;GNyy`F-a*<@7?o2l6|t*6g;B3ps9g8r|=^Op&iU6xf^w&cxwi7n!d3b{7&UZyTLN zHG!cVo(wGLJwN4YTJe>^0v4-R%bS|`OoMS~+Pd1TlJvMonG*=d#$s%IGAg4ZesWbS zi~W1mUbe++dKix@4M`5N5UMQEGZ-awtxCn?_Dlk?ScQd2$Q~x*R71%sqXc7t0~vkV z?C=Lnl53{3!XNrkgG_pB;r4&j++0VuswZSVc(Fgc3{$|JX(yXo#lP9y9OwI)A0({n zST;}2=QeyRvonF8aCdW=@$bfe_d98Jr=4-1CmFlV%}(yOyj5k%-EHUxG`YMOXo87V z^c#=IAP3<~k|P5Gc5P)*`oWg?UQZmJ9EtIv-e@e$M${MM!Fx`|QKfP9(sZ18{pI-5 zm%pfbuE)OphvMXIhu!~p?7_E*cOQx`eBlf6(n~MLhn{+OeC$)7mL9Mf7cN~i9kbvc z`bh1Kd6+Gq;Ei0$8(QJRxjm|>C7C7bg++3s_^t=zNZ13ZvbYl-Klm~2oiTxRm6gSG zEZ~R#`pOpCufAAx;{{6et7Lt0ir@n-=dC=vNJo|(?exF4B?oBV6_o`zeFbkT^?gp9 zI2N~`IvJA_qt*vQa(7gGV0rOI{K_x=ea|H^(zS2@WQY=lra^2}|Ue7(@5tId$V zR1?#kpmG?)Cw-A|8S5(4jrB0qmk{nclg`37#`P|Jnnjh2+4{r*m4!`283!Gsyz@8D zO#8G$zSH<$&O!;UpipqbD2-fkQhJohi#?G|isO?2q;sJ&qFd#K1o#1m%O$w1Q?QX;@#vG} z1(|(vr^<=PLQU*IX+%0HdYgm_LWEt@?D|U+_gIb-kaDB?M6qOojKly2<%cH|5I(No z1eFHv3BzCl0}l>c3WtXv*@?u$0gn0Nu3dM)RCLnr>4#7RxYEj%&nn2wL0u`b3^vgaI)m+zIO)29y28Y{2+6kbf6PAPVJ=7!3?9+lzIn3&qDIQ>?DSC*HfQLV;S zQ=OseQmlyy)s|*ry*3}*<%YX@NS^rTxZ152lm3m^;Lf(HzSVhR!oE>kiMBda3|99s zSS+U2Bb@b!AO*0}kQ)mJl-`yX3Mg!AP5!IV+ggp%b|cDt>g=h3l!RMO6zEs&a4)ZD zJt~!c35J0fsi-41$jJjLzB-nJ{XG&!>=0yUKNFj*tl{|lp{TIRrl3z%XQCN!D9gL) zMl`@=1|4u1B`bru+pAm=&6YXZWH|Qk-|OSnpyxYoJFWBw)v!csVlpRAoQVDVhGUQl zR9f{lbx6e(2M2~tgPC!_%sk-@&C%hKn}EXVnCg!Ox#cvt+&um0n>>z=z~WogY5d)!6%2fz2Z_^F@y+4#A?{B!Ypzw`To zr9W7xaQx`8IB@V_jIu*cN+ou1sBFf7g%+8Zr;Wn5c`_;I1s0S6293}V%`v#Zdz!R? zR%s7(Sd-9(nr-#IbMJ6U$4}9?7YC58+7<4J5hFN=!xJ&6r_)kWPA9b?>eE)=X{E8! zvpdf$NC==f#cpsE_uTfpx}Ek|d}A1LNPf7JZ_1l*2gktM3aeIUr&MNgCvRwsGARi> zRaO?6h`T6h@ddU4U`2xjGCmcC-59jrb+y^H>HyHq&ro#W3!-6(h%}ie3!5dLh)2mu z7{Ms?WGAF|KnNf3O^k6)T>XKCe#$7p&}TT|t4UCSS~@G>AuAJU03Mej4|sFomV~+9 zT=rzh%9aHQUX(!Hruyx+sV(l*Q%BJHbPwDWhxBuNs^q3LO&6W|!wmq6FvSTr$&#Po z)K9Vs=J;Ld7jU`W6yJUdqF=qsO|YOGDmU?R1Sxe3$)>#WYjy^6sD6$x1-bQ z`UaPdUxXLUq`PAk5(QIuYKvDSzLntJR9!Lo)gu~Va31l)QT%zw0`RdTiN#Ftjiz|i zKl?|&9M3-U&6wOb5$}5Jk@&Hn{Ba+@wXm=#`bl{}_w2kTeydS)**Ibp-V5!a6Oa#s z9dr)Zb;8|U@LyyO@vKB415{U>jDkUqq2zCJf0GJ^qQ;`Bb_*Pew)3i6+Mx_jjwzxI zV9c-KJ>VV*zAe7Wz^2lryck~rBY0?+sojAeo+v|vQM=t1GLP*<9y=;+NYw^Gj{ zC|=H}h}~ogJX$$d1aAfxL`p#L=qWfH9tB=_LC2WCdc_>r3ltUBJLpt~w#u;1E`0s5 zFpZ<=m1V50E_+8+sbB3u{>XAKE|CnQTutF{0BNOn?q8j|aXG4MOzzM5$lCSQ1?9gS zeH_Q%Sc>iDvUlhAZ#87=a<56ZY3iN9_-3%sk)3fHwQ97uLO?Pb_!(4XVx5U|DUW(G zF)rHQuBnVlYr7RC`LhsmP_Pe(Hv2+uAvd9~Uv$$WouF56;?BuQ{m~G=t}ES!EeiyC zH}&0?z6ET2x6?OvTNa(FPw{$#0dW=!a6Bt*QL0|@jo_z3tyeh9G(d_&F2`3nPdWDxhl|eEqbpaBcLre4_ym(M$gWlm=rYx0{ z{siKL#V+2(FT*4wx)I0YO7Zo8a=L;cPp$c zGSRkM_mNdJg)iEGjB#0F8_)5b4pU$eLy*}A|&#W8}dX|Vhdl(`C>Ah7~sg5^^$=0T&&sxOw5Mm!Eb$2i-;mpXHroH1u6qq1&(l zkN#aGp0<>O0EIFAxlw?_TUf$6{LOw8;8vD=5-zd5i%H+=jaR zu+GnJGWmJ4DaxHlBEh%=YZ1q^lxUXse8*|l&!JuGHPAD3PI$-3_N@%%*KY?0DLj2T z_v?yu654j)Pv5sNrs@17P)?WUj^+BY3?FSY(_2zJu!@V0@g^gjDp+?tcAM;+a6$U1 z9LC6{hMdGTXw)A5!gppoN4{dH{HtN#1T9UK$ z;}7lI>ji2zX0F9>dB_vk@K5Vtsw+0dB%}7AvNKV=iToCSLR{si7il9WHWW|c`qjRz z7f9Fak(Cl6*{b#@9gseGt5p?jb$uhRnP{f(CS0hElMYAhtEcbrin8=By8|Pm8UcC0wvvL0Qm*sXPn$nrLMyZLe#OX)s zBd`f%ZDlrA=B~%eJjdIE59JTM;y0WKAYt#tjBV*lZRxk`vMiXCnjl%PzQlm=(=WY4 zx<`Mn^rvDmqZC@zD<>bZ=r37LM)G0^=~#RZNiQi63HA~=+4ht1qyE0ObYSU_(qo1z zed-5ECsyIP0;*Sf5Q}=7&3Y`(&UJM2A=MG%A?Vw*DfG{_bS@SVq=|inlRt|NxsucR zit3lKP3Rikhw&r$Dl`Od9%93k@J~H5tkYClYfDv?LG4{lm$6XVBJv7jdUd-H2=Esr z1!<=Rufn6!g7vV&7y6_bo6dNF2g|^FF+I}xEnCHx+bpCI&X`2-X`w8X5RI`b(bs87 z7h*tLu~S-ZH&{q1;fLqC`)%u90Kg`r^Or8VeK2uKytJEAZ3^3i_C2V)1+$W7qO*^- z=J92PR~ux^bzqlHh zhzB2hK;>=4vrm6rcwLVB?z%JHcF&#BT(}x6T0x)Zim!h6g~*FR@S#{-T`|09rm7m@ zRPA34aA{>RzW2%*`LX+SP;k;WWzlHD_n;bfS^W)8NHt9eMYK0gpkNVB64l@^^;ww! zkoExwS|tR@&5QvhpLnf>%NpSwG?_PO7Jc~ zJMxAnDPb{*&+(pMvL(H)v=c3mK$_%7>pkpm7WedE36RgLI#N&*UY;gl8;0rh)x)eLv3>#0SmGMG5^Ef=?kV+f3oF+OA zc{?2|Q&>3(XYHmNJ+YuMr=P}>y7DvaC;G?canx)`GP}JQ_GHl6i&|2>stwUDoEGgV*-pud ze3WYXhHlYY%y_b!f)uZ5oSLWdD8_eqdcKT2`i6p|;F&9eG6y=F>JoV(G&a$;$jn`j!CH~!S{)W>ZXTCv#`R>~e#m7JWv3Tjl z@5Sdn_os2<=+XH3pZz)Y)%V8r>(`|})zuegqP!M8;;HUS3JCH?E=cY1(|vih4I@(- zGDgR#JX<1mu-w~C>3@w@ z{MK*&PE?l#kZON?q#TD1?~Cah3vuJxR!oic#~nv{;{N+iMGQ-+X;veaq;m*g!_wVM zfAB!s6YsPqy}OzrlgvX^e&!wWrXA1g3RRrw))MR&u3i_vQr-LHY(*3vgdU@t@85lAPFLTSTwm9BX<#Hq#t#xvd5CWbhi&{#i=r(9AzCZwou|;HaKfE%Y~CxS72VtJ z`f(jWf5Mx`W2LKrLu9M1V8${YN_g@yt;kY;I9UfNXg#+#>Wa{*ZbVD88DNy3$DUdE zATS-CGAnSJ7Z6;+8KaO*ur5oGit{N-S}~1@nREEYuxk?`MuqC~h_zN*CG@tV)~m8-L0R+zfEq zt@JSJn|XO`zi`thccmy@Y=|olQfI=uV^Fj=tCP$y>84T?dn zR6Yg(wpE6@SM)N7PUT}*II+CEs_!jN5-@SGI5!_xu3WP6eCE|x;>_!>#kn_Ljq_(; zvAuleHFfk}iZ@<+Azpv=`FQ2U@5ZYyJR2{6_dD_WOV7*i#W;WFCEGJEy%=X+P`DSq zC;w;SyU%?qo`3F}@!Yq*8qa>`EAh;?zZx$+|1H_?D2?ahmG3E?=e`rKJ@-speD#I6 zqP%9VT!^`Amttw=N-U^zIeX(uT)%cPW~ML2^6Ydh&R>uD*=y=(UyJ$qYYKBC78hn! zuKB30EW~PcK^@P9s4gwW+S;-j&T=#w)mUfuoH}W}!ncpO6@6o&Pwuf{Ae+88{d^{W zdWC5gFX5w{>-ry}Q&$x+I3f$po+hx$jGz4rKM|k(jnBr}*I$nn z!QN82HUulo40vFIdZeRC1Uq-qaT)?_yI+4LDVO<}Ji_L?ush#8i*jL1yJm)@t+++l z%y{T{`CWX)X)%!l05aeJjX-k0z_{sd7rDKNHvm(%KBk9v#qGo|6eYJe(vdY(qzfmP zp*h<6E9IF6AyMk|PFoW#u*xc_RPd(6n$jr}Sj2|%wi{952Z#}1RCv8J@c}NF@M6M^ z@YxcUQw+HCkik>%QI;Sd5^u$_gO!O)1k9#sD@~HHrCnw3-g-QTGXp+QLV1VTAfo`RxDQ*7Uv}2 z7CeZ|sSG@4=VxMJX9HiQ!gRSO8`^lR+6b zpH!XE>DW^IO~Jz=1^88m{^$(3z7kC)cXSLTqBaIy@^1bF&m{4JPEEH3j`ZWOiH?a+ z_d#yLIebkCJ8!pb7AGh!JJ@O~i$2~=;@NKAF$cb7xLXr=;2Rh^fY26lIm;qWxfz=n zl{_0X595R)7I;jIm17Ub>8fMHK=;1!VRf`B(x*!5)DFb{iLp4iZ%-UPus8NkP3V2x zcK@F7m{NLsC&yxsI<%8x!~UKe!wxIm;TRpO#K`DSj8q1^aHg#KU|}Lh+*Z_Hux0dQ zA({z+Qc3Lyog~_7D^k$aVeH#%O2=a&zb?F_>jTs{-P!Xg0F71EFDC}bUAoP-I@>%6 zyCwIn)~edeQf$aJ8w;^sTXGx*^4(!n8}0SU1Su{@`jIZ_Z7ZMcb>-o7Qhsf&ENHGM zon$xl-j;6FuCGKWj!!p`KH94`%qa%_$fO>1AKNT`sH@&MWuhe#p&d_e5Ph=*%0z;=2I)vZRdGP?W(&MNzI@~NCW|0A%Hc?#+ln3I#VRb}h z)C!^@K0tgHBcT>>EeZn*qAJr73>BS?Rod3e9MjTER@5h&ruCo~@Wi@}=q2?xQaU^B z1Rjy>tqiWV?iY|=5#Eg}8Qg98>HbciHYA_YAkQujnQj<%S<)b$ZOX6v-6HWOljG*H zWs}|gE*u(ndUn5O(;e?-lPA%ghoamJLmKSvDuXR);=7YBdFE$^gR}_$gBs%IbTS+{ z&un;SSIEuo-_P7CTqk~^v*dhlnQp;bJMV?baWG7zaOD@W*YaDsu?MNtCXtbK$<3PD zxMh^S+wgbck2K(!=*FC2!+?SPuJzjUmHnbEHjs z&bq%BWJ?|apXCtsq#`i3(p_eB>5PY)anW7oe;?DG7SC>c|DR>LX+9f|&vrd`nXWcp zkilCU0u+G&Ih9BCp)9?5;s?Y_;sF`A>0Q6cv&+2MQ?peXBp!I9>+V%P>}|7l4%9q1;!734-Z!iERux9`skTM-XFIa zM-Cp08&{`eer7rv9d51)x{f1b6N6Ma+rmF zTlDvohvaHngvY|cw(m8sl%zjMhhSl1TAav63(0ip0?21u=@FzA(k(a@NCAXeg_q93 zb&g!g$dyK1i?m+95%tws>7kd^rUgffiT-8j?8|R}(6YPtzQB{5{8W8SSs9BPZUfPzATVmgIFhe^VJySf0hD8~Que?#V?XR>=Kl12HJRmu zaEc#!Y*Pfle^B$i&+;;d-EU9kS)wdc#=CmZ|q(5$`{K#?^e|r77bVzh5uCHTfCuO{P zO*FnNm=q2f%MNd3Tk0FNV@R-b>P<;_LWfn?R`1{o{HJl*gvV63l#f0Ob?D72`p)iH zIDCHhq+cgq3fl=sd~prE+Aqn>ov_&u7L)mF){QcY&go_2v)0IjtYWWLnJj`9+$z6a_`ik8_{(h- zB;~x(@p2xZ?pEn{%q^8oITww@G*#50)6*+-NV{TU62pQf+>saXR^|km&r=hKWBd2U z-ie7=xXJ>-={R<1UmQJjASB5wi~sl!{+;5j$DnxsNO>qu-+m%K{PB;(`3q;`o8SIc zutD;}Pkk)*OHX>^?5n4%>N8?M8R(y< zD+Kkp(zhlk>{m9CGumqjb{X&b^P zS8w!+*E3?XsSZm%2|%n|ZbAxq2p)I=Clzv)W&Z$+#?&v9yyGZ$<|7%GJ#pkj9G3iI zu9R^=#$6fbM2`KNfA#Cee~9J;jdn2>{|VBm94g>?`k^fnKF8gTJ+^> z8DyGu<{cZt6boAT=QmBdNA& zN=M$VSR+Vy=v84VLnCqjLyyFPBd3&>;?~v@92n&fKn!pj9vB9Gc9+$AVn}?$xPTd~ zxOlV?e`CE{Pyh~ACo)__}vsSOlk81@u1VHJjBf8P5 z$EpbT84AoYZR$b4UuO_H2FM0BciJO|ja@-h`D>Vs=6?GKX)XA$W zecwYZKUQ6>F0aH|VZCe8J148Lwz}rQskIgEw^rUON?-ATp(X}eRb2e$W@lut$LfkY z;q$YxaQRZa`r>nOWBRJ_vZ;>iOf1eXs*yEg>cCVSzwKoFi(mWKF{=hU0)4B#-~Ggs zamT4UVqtDUcoH2`iK;p1u1}m^?89|V!}O6&`baBPsb@%&MJIF|IogBzg;`V6FdkN2 z*UTi#2rbnq1K{s^;+^rn_q|t)U?{F!yA<;(!}3Z)akm8TQoQ)RSL3&S^S9%l{XdODs2B^!ybai@Q-{E0HMHoaAH`GO zW*#ahCnG@L2s~&#DZ(m@I+S{mlgdcr621Cc|G06A1__(N*@!m*ydTMw;;aKf!}(;sb;6^@F-NTcF7{i05Mk55*ZSivlRl!@FP z$v?qB*)WNB@tH0BDfy=`SpeBzPwJGyy1w;v<5un@Oq~*(6Hz!e*+&M#pO{=Mgi#VM zJV`g@18m?o?T$`(fXAACJfI|cfp&<8QfG!_%s9N2P5xDO^B?$<((<4I4vLrIx9}yh z=+MHKKiP3RQZB`{E0^Q!nX`&niu)dTTZ~Wbk9u<>e(hgiK9N(C_@v7d(vwQ~L(dGI|0R zY2nTy1{OcjHZ~3iSy0NjM+*!YNaoJ$v___pKd z(IavG`lV<{uNfIF$FS__!$-YXsx2JB113f%;?Ti;F+MgLLsF`_)}o>gY?*;fWSy+9 zUV%P5(T0_bGT#>OrSqv`u?W`1gEo*$LL~ByDnO^Dv&V@q9OWx*PjS@lXjWTHp3|cTW{JrJ>HCI*Nqn2;6py!EYwwMfajupg-0G)BNm0y!`TuQJ2m)T9H1ZxZHKdz`<>&PQ{~-KBjg-r)D#r zfBt)M{@g{M2*5x^MbG=iLs)3T!qBOSJuzCYsKZo`i*LLh-+J{Tn2&>F>XfP$Xg^R) zkKkg^+;U(LCG6M+4b!J_Dh2{H(C8VUE&qr=_x{T+e|j4#5>*&gcDaKq>P~!)0UbNpO|KblW+XMogRIu@0i0T z!?P=AUUU4ecjLgcLCuhFj;|m)uU)1qO&fi?&6uLnw_zvEB0Z;_5_1TG5Y50VOcCF8 zXGTD~ksJL-N6^24PVa9v5SYB9+S6+T8zVx|22}!P)l)U-i`=5;K%gb@``sH~42R{%e zj~<9c>A2{xwUw2)eCa|w{k5l+?q&>0=cRp~Jb5UNo!BcqLisS>(pd9AAu=5O64|`D zp7dTXxG9W!qddKSWqBmb~4F9I|Di$(stFKU# z?pdiw=Tto%+;=S2s#TBupck{yms6?cZ%oIm^5Qxqu6tw`daG8IKFb)*f^f9#=j!6J za3H(3sCX-aWhs_f1iQH46Q7p!51p#>eb6JqqNSSJs)qp{F>5Sh+gRl$D}*nci&xc z`P#L3`NdbGZ-CP#r5BD2#isCg`|T%Ud~z&KoIDb@pE_zhLt2b+MQc-iuFuA@borBG zdt$G23>IUtA%F=5B_D?lACKcFZd2a_&fD)vLDcDf@T@S5{~*&Ce?@=WRQ!JFSua2L zTwFVUL3-;*49O224c#4nfPP6}_>RX0Oxw`1jPiz-eZ89K9vu}O*2nQnCt{MD`-@=? zY_Om$;ZGBxck8|ZO$RxnC$No*=I3VT;+piZw>@~jbnH=)CZ{V97T(9^u_1v}$U(uz z?tjKQypUb_Fo}u$@@cH<2h87D?2te-Oh%JmQX_fEtS z>F=E4@Xc>M9eqQ?@%R((P&rDXwHtBn!lkHgwBzux+r_3!>eDLUQdzj}ja9C=qfAPd z9ftIiXlHD|VNt?ecia)fL-c=p)UVThT;+R{7{N`-orV$CzN^ zdSD$qCfN)=6;=44QnEWwkgAZI($2;A=$i^ErHdY<2*l$Dfc5bBg$-#6-?B~vKFlNg z>m#XKNftSX;mGl2(DBKObQQ+DSNT}}$<5Ug!Op@u(NdPMh*oF_n@}Arh4$@oaiT+Gj=ZDp&{I3Q&llSwh9?J-6r#OVnNm=^97_|m^~ zRNDZLoNfj_wumFoJibaq#zs9BM4J*FqPloPmtY|-I7~VuDe_El`BjMGyRba?5sX=P z*=-XZa$T8z0KPE!o&HskSb7$6Kb=@a82q3w@T)JU=NA$YY31;Q?W)I4n!8PgC+FW8 zpSw9epFfaM`Be()4wLJjeqc@uD%XqMjT*a~Y6S;)N2gABvo7fOKN|p2Old1L_;nVF z>-3$GUirp}kSRv8GG@lmO+jB>Fzn=Cge?qh4&B^ztsw1oujP|E#Ays4IRedzuW>3N z^E@N{{qgSm?~Xf99gMllm*X2>`qOyeo>TFekG$VwT3`Lr=i?vz5C5N-*u&!f-Z*^d zK>YA0KM@BG9f?2q{oji>&YXz{9=Iny_30n+5XYsf*JD9)zE8A1I?QeN!Zxy{;FD>x z8ro3#l{dxVv{Uc`fAXRQuz@`DyaR6ZgB4bCtglZpjSZRo@?++dc{s&K&RI`X8eW_N zLM$tbKD02y)i311DU!_HxNn(4%1?2P;sSX%_egdNZde)RnH-;#4A}I9FlC^vQ?bLt zBhpc7apm%*Xw=uE%oS1cuhqEfbIVseZz%oIN=f?3oMhK@-2K3*7#Zw|rnJoY#ag^} z@nUS(Td}FMMyK|gUceDd2fhSo%xzkCv>XAijNhPNz$XQ;WG%WNGL(&8t#*@A$K9uI zkNp$uG>@nYCMNW~P%kBF9C*Qjn~B~fOWADbD`KUaG^Ouxoe=XG+tL-SkEm_Zp3fBQ!ObJC3Jq!m-2%x0WfsJ{H*)S3dLe9Xt7U9HsRTLL?3}o zz0AF_7#{jWZnA)dxXdYYeFB@TJO?P6X2A&Ia{9hTKw-RiRCK-JF6_q>@^HfU0ArNW?^zf^{4^e(>Ylzf^LubRuEJ_<)TaBMc=GTs zs$m=%C<%G&gccDp(8`WrRt}(~=+nqjzA6CpUsQxA_k~!|1%u+AfK%lm7+EyHK(+5b zhY&?1C{Ly#rIiN?Xdk#C&v?R|-0-Q=qQc~?boC>X1%RJ0M++~CrvQ2{d{Y{#DDoz3 z(nAEFls^c~$Y%gr@8pRuinoupb<2XdEAik%kH+3ZC*-yf?OIcDAUl#(S^y;iB{+kF z&RYmdK@cpYTEJJpkvi`{s6fQy`T}5469D=&m@3ncQoNx~T`SgV%P~+Mj`E&;>J*Ad ziK@68QcO3N7vlUIuey`zqbQ_MFN+9oEH2V+WADC$@xXoe$M~2SklZUT(W z!_h9IBbC@QJ`od>6Y9)P#<)5dqr<~dDG&Ia0dnrR<34iYi`GPtJuxy|iG5R(F*TvM z3OAwe38gd1jyZLZhB=;}dRE!!td`Z`;3!&lEAN>cl`=mlIEFk(oKBk3E%N7zlJ)$ zEHsRGAH(G}6$iYK#=Y99Zzy00cjVsI5m>T9t zv)*{@vB%=>JMUHpa^Cg3T?h^lx#6dAn{MPr15foWV?t3LOt8?PX|QT1zI#S+Z%DWz zb1*PA+EcU6j?+f4gOtgG_q_Xw_{^t29q)Mj$vAT4pcr^rbu|(P4ot=VJ!7uhvoF07 zU--haY{Db($H=d5OXU@mM!kjwnt6oUKG%?a zLtn^~hmU+m)Yqfzv9KUCIJ4vvBh}aY;w8D{cMQ4`G)61UK%A?_G~d*Fw(l zDVlzg@JjRobN7x%0q%tm%9FAu+%XZ9(zH+$4`2b8E%a9iK>R2-yxPK1f6y*)p(wa5 z*+1cqcM6ZYxvJnIuHAVG7~;uuSN!x$?!cS<;FH8jHu*U`X*dkIFnz-!kd%M&FWwCo zZt2O7ajN>I?n%%m85Hl}9wi~gkx~9s3zqM+`;vN3@KA2R1O~%P6kyiV-$hz{b-O`N zq+@>)cT|1~IE#1FPT`kmF}Y*Bh>Z6XA%(RcVUbOccv29&ni>aiCLV|~(Nf#=6(VYn z^J>qWz_T(x6JL4yYw^c_^7**y)X8}2Lm!G?_{G2EwlX(An{WsCqC<`zT3>GnepdW) zPNr~W9xI-scHECFl7h#8-AHB7J0IYoGcz-B=Im<<-yioq{8$W2`Q=)Y-}v=kH*N0Q ze<1ESecHTrtTGhudhB8ExV}DpRWfZg`UX5?nc(mrYA1D}Z`u~^-fbeaPxK1!3Wlzs zbwy0HDL*|e8(fbmtyCZK*OT&Y(r%R|3KeOn^%$?iQk<~hjYU}0r#s`|mp|GfdFdM+ zrX*gRCW3(h-6bn!#g0e$;omQ1ET5`G`y-t7A(vOU9^&K=cElu_M|tCX)?hqFDY90*>K}Qc`NwTFhTQ8?QY3VsOmX;P7~C2{#}3;U9_KgZrbs zDw(8C)wVjO=wzG@L2X|=^IE+2@^djLrFKYo*}r!(&cAxblm1hCr((9c7`64fWMV0f zN;cki*J8y1@eu1+t3?)QUgbD*>XySbOx{TO|oxY{ydRGpoH=w zuU#g=l@>;PEw^-a z1q&g_N9i*WFPi2kVPq(7oHoE>nGJR9n1E-|0m}Xsi%A6o_v^B#bDMfsTI|qg@H3^Q zbbZ9GaINsb!NLH-`95J502n^577+Y;-=Yk_AUv~!Yyg>}v{@V~a<|MT{Z?aLykSZD z#d3W2xo7mgXnk@-^-wNXd{x!a6G!9Gx4$Er%5!C9HC}r8mAG)>yuyr_-;7p9;vMgL zSh#P++>IMCJ~AR5XG%KCN?dsPjd=Ryv%+vs>{BmxSY@Zf#R_cN07|2G>!=e%N1YDa z@R)SfiG4~h=^C^vcbt`mE7f?`Tk+|pAs4=3GlX}Y;*aqQHqo~ub(w3D3y zIDIpillfL*{7Q1PDH|MA&+-5UjB`TQahkjHO>o}?i$7DlOa^(7Gs2|rl)M6`Z1kWF zr+kyWDcz1KBFLmPc<#i@rjz|mCgF zH~T42QEzm3=bMdjDDMo?~X^``-HE~diCX(;~QW2N(}atV&BA` zcY3s<;!1o9_9W)$;rOhyMH2%AKe#& z=#)~{*4NgeCb{R+tyl=E{hfI7PG9mSjxD*lzav?{JU<`H3p26Gri+Co z>HM4$$f;837~%1Ssi_IMSCT%Y{6H4>Rv)|lWXxWhj?2>LRu>ioqjV_2eeLqaclZA%WRQ~neyhGJdT2^#s6}H~Y3WAXn7$SZ>K9$VekIOGcU+iXjK#%O=@PTj zN9Kh;bc{;eamO8T^0pH(ef37XarT_`*0owa%EM*%>W>^dB3$&u!2|mh?nn%mhvnK6 z%d-p7TCK)s|JAR>^7>|+9OpEtz20!*@lWYH;A8*(gK_(vcS+Y^@iPmNrLT$B7}TX- zqsp^>!bx5O^cST2a8>53FT4=bmoCJf5$Q~-2rtSMe&FLCQxF|tt#9j@cn3T^we=DM zz|!fN`1j-`1NCWjMqfhmo8n5yY9X#_D)X8 zZ_`(Bv1^omEBY3Tkx3Wbg|czl=71*_Qh$dDpwbYFh#7B|?WKQ{`cB>rFZdb1W3YRm zSeVQBc5Q7b`h>K7ED%;-=FBUvsZRRh@Zr5Nao}Jy<@VeQFURUyJN6zp7Uhw>UQ)Z= z%aPY@mAj&}dey&W!xLjw(tkz6=>)NeLkIT7)a0<>-;Vm4`c?G}Z@dAQ^nZ{JzGhas zBgZ1shd>sm#Q}nuYBZ}+c)etP4pNiqp)l^>B1d5~3Ev)P!dv~Kv=I!x1{YO4ShfBf z$ItZMZJ3zGg|w1M{?sX9cftb%VH1b8UZgt7^2Wdt6rSyR*9UP^v}DZB@#`|=p+_3m z(Lc!*Hs_sTaULYd1BJu}io+DYMVu5UJ&E7Z-?BiF*$FGt^@`c$C*w3k5A2Er2G0qo zo)|CYbPY<8unerUiPSEbQ-B6o%pw=}Z5WpUZz;{BGm$QReUAfpOpdnASODX_gi(`| zzoI0*yGi_1$~5MYzUf0TUPpUL{&1k8&7;3j7F+(h&CXXk&i*FTaTA|#x;y4(Tw$^s zW!(KN>LDXs=~`q9wq$Zz@Q?(h@npgve)4~Fp4|{7lk>!otxMb$tlNK=$M-b_zboKm z*L{kazVY({!3?jQZHrJYi{FyjdEbeDD|55^?gfm(m1xdml!Y^|2?X$pyPih95wY9& zJLTW)X;_tz7aPbMdT)qNGUk!mGUI+CIQ5$ucPOdP&(()hl6yb)k@v^sa4Ek0`OnAY z*Itj$eDY&)>cruwE-c2c{>m@M>o2_&Cr;iLL!$N5cb8nwa40!U% z_r#IoCt`^!sFml~=$;r;|C@zhEWn3mOff1B3-p<+M;>@Gx6{86Y{)IS_m_s0E{plg zY1h3cKG}WGW)#{U3kTAqFuS7D7+hiP&uPR%k*=yynuMq9iUWT9CwJ!1XmOk*&NmAT zdex@k1BoXje#PR6bm|8SsuRzkP{iSyUpAu>o+~{U=I3H%b;aX_y(%lPo;-Pb96fzo zj4RqwV>4cP<7_69pAh2}+(zkKhVOuBKX-j=9dMlH!L4ptL`Ob_QSp#Bz~u!2=s7|#S7Li^ zPBgcnXPTb{>!>$rQ2^Z7{Db`U?#BCRMT!5o|BOq z`UN4aa};vfb29W@1q*BVVQFo~=#;;ch40IQ1qwEd8Uo9@Vv)G{0?c+8rMs zjUnkTN+v2JWub;E8PaJ296_S(gaExh)4v+1be&1 z@g2ovW%gIPX|V!#cyZT;3`D^x01TEY3(RotakD-2HQLUPZ9j=5>G+EWD>Z8I0y_oBO>W*D+o`h1l zz%NHXvyi~Y;;XKi%%2d`7*!t7#-PYR)v8Wd>Y#dHS>^IkldaUi1*z&g)f*-qLtISS zt7FH&HA=b%qGf$7p5XR9`g-@#8|rLvM;gcNZL6FNz^9#ez$v;T2PLl>O;&D9?58`T z)h$p*bVOxjcOZ)rHq?=x8XxmfHZ;o7%CPUGotR(+a$k%Mj~Pdvj1yRmYE$*W9U$Oc zIY0}lPl{w3Rhb1W5lk0$6Fp@^yGY0V!ziFZaKMhiln3}qCl{=$p4FjNrxyC4v&Bgq zH2jZ!{6q0y{#SoHe&ttwDgM^q`p@HuC!UBqPTi(5Ngz_E!rk2LO1$>kx%k{4e>r~X zm;RUd>;K7r9-scykH-&v?4$AHpZ-+*$A9(z7Jv7@{cqx*{?aeU@BG%kji!U|-F~~nGJWPtV4o;58fr&BSC3s-(L>$~d8AlFI`FTKg-`?@qGsz;7 ziY&a4lx#1t8FiY!cG&2kJ=mrJqE{30Ju-C#6xiq{$FYjuxVDOG$+J`!vzT1e7K!F=N z4CX09j`1X3(zy%Wyn%PZCmfQdCrWVxe@cg>2*(01(~i>Q>KDgV+VFfVe7#_KMZDd% zO14ISA5)VM3B4j?D)W^cka$cCSqx;eA_Kq>S#NtSDvb?9@SrdrwpbA zeKRNk9Y9~QrZwD+Lw#>?+&(+q6VD?a7Pu?}B*2kX#;1rJxrF@c75{H5PPMiyT4=`p z15=ij%^GT^tpL!z8l&$3nJDIRPrc(8s zhn4&r7-Ux!ivk$nlHB8|aOgn;>R_Y}h{~;aK*+$@kmSs;I#zr4?vFiF`(koxDkk@0 zCu2(Qd-qSpp(6+5*lma7#Hr(P;>6K7b^1iy|Iod0$DJqQ;IRYop7*~8^hLc|vlfIL z>=@B3CqmHCv9=Fhgx}UON4d8>0G-;CtZ-Qb69xU%IvJ$0Mu>%pt$TJQvkz7JEvW7537%s zm)^tm5a1GcTin+V-&f~@L2|B8V6mbI+qA_A0UX^wRw?`Bn~Cue)#J80pR2J}Tae7> zPWUy+*5z1RosU{|AsTCG$J^TK;*MK&F&ecMJ(oQ&?^CdpKV;MoFBKo}Vc=SA5?y7B z(<>}L#4E5;zM07Hl|1m_a>CDmzB%cMYXJJUSq#9P|Kc;!eR{WQieDFrssp)I6>rl! zgZ0RfHWT|xu~A=+EhY-2BdiN9j?k?y&&Eb=Db{Q3?2}znm?m*s>G*zj=ryX|k=Fx{ zQTf-UUu-rxLRjwWE3#F|w>4W%|KNIn-YvnVe7Bn`%6B1}D@(o}iGhHYI<*`JNZ!@? z>#EahQJt0UaN}w$&s>W7$_?Rk&PUjZeTtu`%;IYtA4~i7A{FsU_|Z1>nS{F9UMbe3 zV>KGWpXh?s=ffjozUInz$f=!=j*beaPBu26Pw_!fF%&8?QzuPni;AQxsCTg?-C1dK zOq*$45lym%zCb+FvC@HKr!b;J$-~q(M1!;y$wQVN5w2sQR~z0_2kG`fny*nR01uxRi_ps(Lh0sO%QFbgiev_fBB&mYilBeOh*-)M?t8&{K zx7aQ}*-jdra0+#cbhD0-;v}3VyF30ZVec~CDO=T zdOWl3Hr?*se&BRBU1Uoh*lxFEyYq)2sKcD*&eIQhC;h-X{`H;l*|83n@R8ycc!s+D zjjR9%DbJjK($#1w$#xsQa(X$u`<#-O-Fg(ZqgRV(^Ui&p@^zd}o=Py4S!EYJcEg`? zOZAoL2wEVol#dJO_nYxpq))oC#OyXZ>EGI9e=Jp))9u77((TT(C$~LY(nxqUKXF)@ z98WgGl+sIB&SMANCgFy%s*b_8+XM0NC03DA#*y)qOv?7AG{JG<7V-tX20!Eq z)_RWMfQ~V3=_kO0uE8l%j8CyB9yjl%lCH=*<1_;Uy&gB9tfFfDNw=1qsEk&emW}!y zw2gI6N0jB%9j@cy`kvbIOsvdJOMcEtesUbX^bP4C?V4nBy{h;5Xe?r9qBb`Z^`+~6 zN6s$KT#nV*8+xCP<>{+2fB9m}TsRlkFPx2QXJ3oUuf7o1&b$y;Uw^Vqs>+*HW>o$&zw&34RR zyX31KFTe3>TzKOZU-4L97ECkOWA@6$c=dbVQ?<9_;GQWE8#Ted*<4b&mR#12wfWdY zFY&cj(sP6xj`J@MgvYAN5`m1@uwYm7dSTB-RsD)utgXzsY^%a6eV5yBzdiQtJs^F2G)5=Jt$)*ZKo=QS#`FtV za zI;h8=R1YXjs&3VV;Mhp*5!jjVauY?jS00RI_$l2Cyf9`uIX>aB!KQRk?&lwsj?JRw zap__sBO|J_p%`GIhVeta(?=W{DC@~}0qHLOp~{HdhSgshim{Q&IC}Ji@V__et1MXA zh{68B7@yoDeQ#X&r4LmK?m}k=9rx*$OUhSyKY0H`>W|)t=brv%Y)Mx4P!)FX;PQ( z#LK+Xym+a^7P8`|xWI5z z-r4v&y(D+#}6Fm%>W7QE#Pi%Y6dj1-W1FM&k`!fe#fh!B*gI6Z|MHAEZHikRSsup$(He z{cZ5<#cEE6K8$>%cXZ1^=+AY5QzuSg<(sTN<)7*@`8j>p6*ill@ZIKST;2+|=0ET- z@R0E)o9eHyjJb6AWtncAVJJjb*={zJKbfwyO#_AbGvdFM0q2w{>!YO2xCvJT<+h7W z`kLHn4_$c9U?uIDy1F$CH@iEz;)xYT1WR|l++=f}&=9uxF33kl-(BHcCfN>tRX%8b zQ!<;?XKWn9ZnZ?8a%ap6O^DMHC(EU{?Z|{r`g!&n-;nEeJn`s5KBe;9t1riwKli^2 zDN0jv`q;63@$iFpyKTMv@~d(E@^qX$c{1+2=Uy+kJA3|8l={oD|KNcbDi6mx6DULC zF)DPw@)?xwG$ejLB>6EkD!DLR_C}!%RW11J>m5`W?w+4i{4w$3N|XjiBvVFXU}#iw zXxw&KvS?KDX>@E)j7{#1v59^9-YfaOM{)N^ACv4F9#fo=7^o0;*y(0l!pd!+Qi*=% zG|PigVp3HyEQF_R$qQS_`a$NtMn;Xls^nIkc@n)-PIL;cjAm0O8>;TO^X?c^{v7q5 z^vSKLt<>Y+{r2yIKKA{_K6dv@R$sY%)mKSP4)?`fw;hfB`^Hp2s6@C)bT0bR!~B(raJli0yfqEVjqmiH z_^`*r1wUaep9KTf`js4{3RusAo{>G^-ZIGFX}r=rf(BuJD_{jbenOdir`L<<(}4Ty9<%qBJ%Xzo&07Hr9KBs{k1L>6Ly$z0%fN>k6&5w%*)SI?7A1 zGpEdW7KLDe3T+CW(c8;uqG@i{xm|^IXIcv!y=4LpF!KaQwpHi9`8{L%7%x3a>TYU{*53mt^AAJAuUsU;y(2?O- z1B)2FGkNMescD${CUIpgf)FymX~M2vL1qfiF&=VK#tJXu1^}n4bi9&PM8~(sw9smW zd<21ai4xUmssiQT2Xd)M0f!txAtfCPZCQ+>iJm3)RG|<}N5GI4-y{dZoi-6b9(jrg z_34QxWyYYOJQZFDRN6i&L=o6+_Re>`FGltp6u?q?>vg-cgV2N{Fvt%>n#gdk!~lRN&ZJU5Ryd zpxzLpou8SLGF?%{_Qt9@7xQA)98EDkwI}Yp^Uf#_5BW%aa9tMBaqK@Gfj$TZVW7O) zYTPtH78uYe$*WpeO}^P?*BvY&O^|^DHMi8EpyASh^kfwdh$yLrdT~jADeVSlP*-WW zGbsg{9rd7!hU-Qk0%uhvEAIy@tV&jWm&8Ox+$Lgm8Y&V7Oq_6>^`JToo?xLvK?6Z> zs6mmAcR#DKu)>N7Gsp#6DyehMH;Wu-bY66#G=|8(M`?lmMk|))=3{)g5{C}#^<;ZX zjlCiUfAHv`7~8Wa{_XGmL7bagiYawADdW4}{bbyG|9vrYea4C>!rRrZ`i9bc9J>rF zy_k^ku6W@Jip8bHSYjpn!hF;k z^{5C>_uq9_eCp$$jGzALpNpUWnZFP}`RD$8eB{F)iznXkcs%mZgFd3?_LE2Aj@wVf ziKBbt=#fM2L~W>(JFgD^rHkj|jaOfZ7rysweB+x>#}_{T`S{9LzZ`$^xj%`gzxLJm z!WZ@Zm9NCpUw=BDdG^_O<~z^Ccb@r9JpatIit}8&_~Hxk`s=U6xi`+nxpQZf-i4UH zIvumuZ`g8D!s6nBI^Zj@ytv|125JmI)Yha_)P(ajpH_i_!8^}ZQ=MkDzgCMYB^sS? z?~>4Wqsq#E;>ev{0IEm6TTRlFo1UcOm1?rBrsA!$(~}bmxL2Hb#pwmMa^u7UF<5rh zvICJ_qI{=kI`IJ7aQ@`o6s+W@cytzthcNXDa|9>#u66?Bg~@vXlAdmR8%)G9NkqHb zaDE6t1mULIA8GLJv>bV#l2w{TIs4$#ob4#Q`AB-K+>w*CRmx!=rzgVPcH#oNW_x&DLtQ=L-(K1nE5s&Q$>4S(T-_ax`R3$V#u@Pk6X6Hclr3l}{Yr?Pqx z81*S1aH(R7rwRvhgmzi1z<`cdvmy&hTt|W)p>opRMEl@?lLFxNK7LOw;Dp`8X#rzN zHpzl?I)U;;&PY~sx(l+$$JZBpJ)I<=G!RTmFY7MQBD<))5CeJ!3SiND-8-39tQew) ztk>7$Lr;BFkVvgxn2jFMNK0j*Y@8|pm7#wD6YxXFJ+($GbBvgH zfCY=oF2C~Km{DgD-1QGuVnzJy$shV;lqI9KYIVs2(VXgn#Ut=$V7hqr^*DR>H7^w4 ze$P`1J2O2UD{IS99vz9X{Zmm@n?YCMe*YtT*>xs8PO<~~g=c~r_*=UrJc~au@Jro7 zZ@}v-afDA6zM!XgL5TDcPH9L3f&CDcaS2@lzX-_rfTvV`qfK9l4<|lJM}$ROX(#^H zW?cc&&U$v(vw$%j!w*bb!Onj0#ZG*(;J^iT=@W-5msl8BQ9W?d6Yt=Z_Yt)*ukVxQc%kTnVe&9Kw3B;ZBel9embkY35oJQg*quh%5k zHpG)wf_rVfz*DlTF8tT3%dy>php3|>SY{I6%r_W!x?k?LK^jRW=X{(7&I)Te zLZ4mmxt~oNl68_94kJNn7jfa2$rRrmybb>hy`@#y=W^yz!&&z^~If92^Iklr-4XHR_agYS*!UU@xM z8Y}V9Pkc1?NavkfUU53~Yu8sB@dtnK`!3ta;E*zG$G$ydaodT5F+3<9-l|D;ZF!P% zcxWKT*hww=EeTGKzx7Iglg`g#fdRo+5}<63;Fw+}#s^de#)9$d*^(}z{#*Fo|8>RP zl3p8@<81Qq;zZfajaBIq+p!`22|bv8@`a1%JdROYTk#S5>_*zNN4nF21F`B22%HF} zHZ5ecaM?TIR3-*i8K2u|aC*<2Yk_{N0(tysUVV=~5zU^xd*XfX{yarE%P zc=fe6;@r7&;-}lvpM+Bp1p4vBo^j!1PaHjRB<{HDHo-im(r(1U_1S1kxBl(l_}8)0 zY{%^r6EP~CgOj6B7s{1#R7T6vXU5~mkz+Af8I3-b6WZI7ythh0E}}W>-$E>7bsP0H zeQm@m&p#K7*RRIj@sX$q*DNUIw5M$b{Z%FuL({y?pJb@?VAGKoT*|UDlii1G3YeOl@^wreD^OYJoA#?;=dmq?B_--4 zZQw$$QhKUWFN~I@jj@9=-JeWD6;_=F$yM`V3a4qIXDoWKSiU)W<;C<2=waV=gT4aGy@? z0UBTo=8oHLkMWV-Xw(>sO?(Ny&X^|s^9?qG38O9b|G82M7Q#dV@FRn%67a@YU*_TN z)5|iMlCE!{R0^lW6TgBNCc`(;(mQoae7!4(X@(lYy$G95wN*Ab4oq6U`8yeCnDF?d z#E1_==i7_f2;WI0)nEGCan5`ceoMP>pPaJDJ^Ps7`@;zq@A`rG6gKgDmQE9oLKSZQ zc3_$O>}2e8c_!Qy;cbg|-1X{U9JxKbV#R4@qx!Z#8j6G zXZ-6)9Z(>~NXW;FA6#}lB@?zpFP!i|J$Y=6{-J1tov8+o^8W)*J{+qvbMciw`FsqEAOH2g z`WNHM`SbDIH@+U9`;$N79M3p@>`)v$cqrce#5;Tq^XI?xRf*yCc;vCiueJ zi~8Ixx0No|f3UYH+2F0oOSQ(!nffNOC|;@6y3!wza>1U$G`{_M!7 zWE7iIc_c~1veYU?XZZ?_Ey*;GX3}85f!w$vfF0Odj1BZ*m9H!(0dSfnyZp(Y@7_L6 z7${&~nOUcC;`W2Wlf<<2n>Q|8iPx`8`>Gq{5aX>rc~f-{ZM4N- z&~2!1WGrJdjNkc+2k9@+2koU~deFP7nz744g_)bV5$}2DgK=1@>1wSmxmyv;0v>V` zoQ#Vh^T>xdcx;ImsmRi+ked;WRy8<*N<}(jgG_AXSv9w{qf)}?P_nAZd^oGihHv#~MhgCk|> z%+NG&tw$Z*NI3MSC@M{Snf5_`#PtbLTierpAN$zxUsMGi0U^R2MV3tlKp1S}SX{m8 z$t119C^6rC%dS#0P!-lhpavo&<$F)5XyW-g0lgsTI35>5QZeK&@(__(D0sp@bu0kb z3Zoh(3Tqm0pke7xjly|KQ4t@Ll@eSG;(8!LElh;cpncWgw zYAC87uCVA+InA#Gmq<_PLg%Txi3f3p%G(36AVF8|8;OH^jzo327Hidp zI~aS^2`-iUVa+@kEHO@}%&Fz-Ugn-4kbEPuV zm}!S7n(U@c$F(u(2EhUp^hMiK2T6ZkMXoep1Uw1LAUVh0DI;}0RBmO#ifhUVT@DSa zoPxDf8IEyvwojZs9gjTzSiJwKkH(Mw*pI{~KK6-t@00I|M<0D89=P}ZxaaP>S^3}L<@lsqo_j;Up{grt2r5ED$S6>n>z8o(; z|9pH;_PJ-i6W{*EH{x4g|9X7$>953Bzx>7c(ii_EzWDhjX5oMU0RQw!L_t)~#pgfw z$9{f6mgk>HiTKmc{gM5@@TEVEFMQ#1@#QbG&;a{|_}bUL6kq?^SK?dW_-cIXn_rLb zeES>m{Bz%qmtJ@|UU~7Qc=hF1;+2Mzi~BgNGMNVyQuGrdcUl?yc{=fT+#d0n3274eL7}tOvjDs z8GYY~*&A~)KRX`_^EYC7X+boyQrwZaYRi=hOxW?{xMS`vTwIuq`MH@`T9}U&#aUjQ zSD1R#*Xnw%My;9_5VUGFbxzmR=2XY-q_R7eNg1_4+L@0F>k;kr`P3AI1#J!MMJ-+} z#tAKIXQC~3BqP9ir`7m%sYO_|1RwoAH&eeKD4+O#F?;!w)|g zKlO7z7mq*o80hrED^8;TuRiWj<>1NbKUjFV2R0;eecw`B?;9$&SBb-020X~tZq}nI zTJJAyik9c%<(JOJfaK0Q-}Am0o!rmn^7xJ4_^sI3XvX9o@ttEwL}R^C5ii<1F)W$7 z9ha}3_YscVv*krk!ZAmQBg411n{sPP<}~y!87F><8ww)>e*|YzzRhY&RcT*8^gw+; zlhm)C&;soQI+8nsW8Cw|(fG(C*A;Z3df!%CVpmFKctmn>)VqS39a?=Q zQLbenGEikvIcWdw<<(d^cP;vx+c6je!lV2oh!`mH)hU7`4XU7tK#PJ^z+2xDtxH0g zeki-!o}aR+#~$TLefIRHI!LEvC~evg?Lu-4VCZaNw#9oyfJ&k%-Q`A{ef~M|N(Q9q z3`vLC7>LI|@|hUB`>xooZ3p+JNi9*mC@vMGbh*d%>>KC2Ypo?b+<(_8(O)gDT)75wjbzSJ|QHYC2GN}}zdb1`t#tTWNxit$nDLki3Z9&|>gFJFxPoE$MZ z8LLZ+aplsPIJkG8;2-kcx}0Lb2^ie_%LF|G$}Fy9k->myr7pNOTXg)S*NHBCGKf0w zs|(A5rOyM|4A{?JyCwixh#=Th8Ku6BSZ^@lKPMduVrZ+3Rr3iN>p;IaY_p|yREt^3 z(C43fHmYlD#`~Cb(^6@~iz=6vR^#}o+v2?+cuM;2^{CfY;~QW9T3oqyB?dimFPaq& zj~_qgtEcE|%-xuYV@D4Q&U!4*EyVAA`CFW$5Ql}YLDAm2^kjA>vcQ*i$1%ofwYKO~ zbjZL10}Z3%mke$*=*xf#i($Y$x*Ep?=E1$Bp20ix5R#A`w3pPmlqZ#k;ahc2Q&KE{ z`yGpHfT7}sp9ffJNc4qBi*N9V<$=sW_8@n8r#qSCP-<5SlJloc<|lH^??pZ;mjjcZ z0YR4}Kc%a-EeD5nAj?I;q=FL0@3`BKv=olAy1Z(~!Ywho{kJ9aEU&?n;UGStK%U-# z14hCtOvzo>I7E(E~hTr?1YB8PraC1(wpgU^4y;n|y#BSzsLL z=`?^;@6faGo^VK5d1U-5j?FQrHu;k;^+|lfI^QDQPWsfdnO#c5 zM(^MWr;G=}=R9NyZcDkKC8rNx$DijlbfH}Cf`)+ zdNZzZ(Ua&!xv6!0>uF1vusvM-*%dQa*`6yZB0C|%i z2AF8)scghWrjj;UCio4cLz?dtc+h8psS6&1D*7XA3XtH<@T9Of0|PHI9ezW(i#k-g z9C1qBn05=d!~+z?aAb@!q{nx%J{7EhX;(glzwybto?h?>E^=6cV=}D9lN@O)_5-KH z&lM*19f+4?v+<{I#(N-Q`XWunJybByBI$4w+ zvFGrUA;OTU;pDT3DjClO6Gp#BOi>{C4;ZcU5gJr_2D?Me}62hZ`UIo zy4hf%UQhhtAN_&#lrkG_l=sxcNF3jPK<`^VhM%1~V`}jKOS<63`m%H-=@^@J>9?D{?h%=f#@UuE-e|J;z2Vaz zHx-7RWoy#O&#AxMYSla@W9_uwR6hiGxtFvqTx_ViHmsMk(7i03d1Ryxuc#a?_2-w>H(HI~{Eg2>O>mu1{f?^NwoVDey?+F#{^CndR}_NM*| zyiC1Cw@dRu>0&JQn3&{Lttn63u+uS*v2L{FFCu0loyD@=O{y}bF)sRW^oKX4TX6=K zvcjs)88>8+I}k7iQK@ipfb?AHYUK)J8G?G9MKv2y9_$ruE<EogwCLXFV))oDh z+zSIeY|}R>{0OApsos&_@X;jO;hjvLb>t19l~3w-75uaKW=zO@*gTwk3k#hA7v-`| zX>390%7ro^3*FhE{G?3*DSwy0bz;I9E@2Mj*#HZ13O`)%w>w|x6?)i`JN}e~@{$&5 zQ-|<+{8Je62b7G1P0{n#CUHoJe6Y$By+&b;uTGljE8znMP1$&|&|P%TIFS61`tSqG zpiOYD^jsEVLq!Rf$e^A+`p8L_XY7sfCGeE>GUPD@^^x(;9hiVsaJyWD(-R!S8=Xda z;>LVS@=xD>I{yrRifiNa$Rnkn-U*L;GVq}e4Ov&zpKwSVbnR|F2(;Tt)2WaJhR2G+urP!;}urr+^PQ}pH4hzK<};>!OwdEcdBE;CI{@9yoO>-u2*Jv9_o_ z`Io*BhxYD^KmX$&7O%Y+|KNZ4pVVK!AwIc3Cda4Zi6`F~ciekVeDRB4j*I6m#F4{? zvR2f(Q8hs-4J{;xph~)*mkGT;cQ*9*`o(XQ|ei&~_NAIg= zFZ^sU?h^06R^($k-3%n)7$&m?`5q+&DaoBH*=o_p%+A69bW z+dV^)QDuc^;=iKs(uXAf(-_u(+)A=smCnxpA?20mov|P`pY}?2Gq%@kElXCkRR&J2 zsVm*SSeT!WD_3TsR)r_5M@2dxSI<2B$is2_$)mB{;3|Q($3%`EJ{F~sh>;4r^q1qi zubi=LFAa}GN&3g~VBs>HHV5c~^VsGs77`pI}^>{4FBw65bTj{&0{{|%}1vZd_67eCQM4&ccIsr#4 z5D^4E`9L_@R9?RN6F4Q?(CfWf6WY=@3q06l$#^=n0c_L>Qn1l%$Ld;5db0W%rlXQn zmo4d)4eK6kR`I;J@XDk=I8Ga!S>OTvO4pU{q#}s6QrgUskByFbtdb_cN!87brs1jA z)%T#hN|H^<&?}29U2dhv$+Z2Y{^`Dtedy>fsu>&s(I5#$U$1&k$n>NmwNF!0!CguE zvw#<}ay*Mdb1c0n3GUKl0ukkn0ZM9|3P_3yB7%HyLrKL+`N&s*!LH zlsZacj-L1?tn*FpieyVU$jN0QT^9&<32nb8xKg&jpWyYUTn!IIOeG8F;B;O}|DHYj zV{G4k;kKeW5;IXpa+6~yYO8Vf^*5rbPHCkwA|^B#D~)=rumE6vBgXbj#JzXlEoFTy zYHMp=0nP;BmI#GOMJD}G{5i&sa&aAlC#K|vf&!$h;$^ZBV)W`r8Fn;N9F%k``Ajw{ zUV2XxS4`M*PfI#tS_M-%X-LCjUgcqReAM!|3pzCvb(E$2b7w0PVd=;b;X~-XL~bl~h*vR)@Av zjq=Ln%h9jKKdFuuNAE8!FM1Mv-+_ZMGCmx?`Y(PX8cKp==cv>Vedt4R?8LE{otbex z(xcv^Ul*^*{fHs#kFhK{(7b zIA1*FruPd<=luB#O7~J+R$5mShry8R*RMP7)k~M-#`N{LruR!q4i- zyQp+{KYx~|(mHoGE}j?MmoCKRa~I>>`SWr4;(5b)NqJpWI@8xwPQiIia8C=)>q?(R z4l^@1M4vZe=Ej`wfS;RNh{dHP?;>YM%bJ8!l~WWN9KF|+K&pFyhtm#NLCBNN606p* z;GXuDo)Y+KR}4mQN&!5QduB|#YIp1u!4F}{5ki|%KGJGH+BEH4-+K21u+m{5B=7h@ zsIsHLf>sI%gDHOB;1mXUnA(NoB;_Kvf6}0}(QceCyaIQ%Z+HL}zQp1tFTjG^Dz5KU zEZ*IbRX9f52t(V3^_!O?zC?t`3EHtIl$9TTmX#_aZ8)U?KT@8X%wQ&XaAOAo18G)M zfk`-PbIpsC0(RqYg2nQ}Qe3@pJb;D!Z63;@xVSCD^Ju(FPruP8%}E^@8jW z-TU5mm4zMS&;T?Fe0p&jymJx=i*?wUPFPOjppKWT%jUP7E|T}kFBRVzwx$jNgSo)Q zA|NKRNuPGNsJ6H)K5bn}c&T`jyAq4y<+S6w4lCaV(3sbHxiSiyiCUajAId z5Y1BjII+Sq_4a(sow=-flx`-xv(STG;1)+RsrxCeNEvx3V)kMMo;Z5dEblVtD&Ao! z{}$wYXRgM!f}u7+hZQZ66DH7uP~s)r$VL9C|FMGy89aUMN}PT5$11n<8Fet$BrE5| z*Wdr#2V(evdt+s8PUT+|uF$=%u2%ftH{x>bgq%z8A6Y*hL|r|46# zC{0c}f+wj#Ivk5;;9U&1a{5n`%E%qK`jl5r7C>|TJzZWj$}YkAx!Yb;z>Yom2iJbl zMpxCA2Gtp{9wK^&Y@jD5-B|_A0)QLWZ^o;yys5k>k8~VPR*;U}-PS-@jWpU9vs@qGFhuvLW=1=StJb3s(jE+bjSK8CpZ^X{YapABQH?H1{FMad-f+%9Q z!mDd+o{LUv9<4fN0f+CvRa`pa4B&Q%4hBa?th4z}R>c?9s&Y2XCutJAZ|Ujqcu~J_ zXg=w|Kb%GPsqCk*ZpSxq8jRc>EPrIl0E0e=TbV}jBptiw(>kM{g*WX8xXNe>SHhX* zj`X(W$D;^W2A8RWX14|p8D~X+GC9(t!IbF~Pm4i?0}G^)iyL^~%0C~()w76RH+I|Z zm@$fYr77ay;o4yCNTY;P#(0DuIj?lX{Iv2L{)YHz?kJC*yyY(@!_$||^3Y+?L*EX^ zVoq=)H{%uKCpw0nrCU9{?fI4cR*Xo}b!s>rPw5uH6Ze>e2Y%+|Z($RTH2J-i9tLdW;A@&~ z?{~yc)FSV2lU(t)f3}`=_=@M9-(~(T3@_O}#W@OGq8|c#KAoS#=?7dI&t=)RjI(k5 zm+2P%MgC>@)Y}@&d*Cb6EuUrh{FVmeCtsxnFU^dD*yE1$ovwX6D40J#-IQtaW6uK1Ji!9dFMjb0UbNexdgUaszMi(&w`U>-`nrNEec0vP*Qa3opvO%_mkdn2GM2C*c7H<;sC&k3~tx zka8}VL%LkQPe|xiJWhS=ZtsZRzMkl8WihAvV!{V~3Qk8tHz7WY;nCSxfKieD#Nxib z!BKDOVCVUdUw+M}9<(*L$6ZH{#lF4!RaC_dv9g@EPSQ|Or!Z`pCluKyKPT~&H0 zd<>3i{j%Uq;3apNE4tF~Q}yLj8_fUoaMEx(J}^SIZDGv6O4*Sn`5w?0as7?Wym(K3 zxfQ8Q-!d6qSjr&7AwR*qjgLZC3Y%%_S@`8R2dQQLW-K;@!(UE*rp-85<%2Pe@+4lS zmG9(-M`^ZwIM{Ym{GFvu;(sfdGCToP&-!$+6d~(LK^-;|@UJJ^x8Tq4g&z}Y z?|~Wrtb@{Ki`0v&o0#C9khI4lvJ=MgpLLzr05ljri+Zz;Mi~^2w>0Gwciw?r26%Xo zzKy<>w(A`u_p+YTl+X93Th?cqGF%#DXNe5!wIz8t4yGZz0Z+W9JMS6R3moj%SYMd@ z-qNRv^2u!F(P-{;yW^X7)ex_IZ!E_)Q^#Lv5}x`A0EPXj<_=HK2l$OAZ}|lrg{o~s zU4bsweQA^wHJI}Y>5w23mT-wbP?&lWPh_dQBm-F7N8g$G9X7(ivs*<4uf$jW;1A-__un6%_{ih&#_Olzpa0YU&WqXx`@6hx^A~^N=VNAe zA-@0Z@0nlrir+r?z=M*pl5^FHens%1>n$buFS7L?R+FV!81Yofn0Haj-H= z%tzl$^t*!tew7|YCq9jchQov$lYZnkI6A0KW}gSt=PL^Y6h(m-$U4lx$QSm2aNzh2 zPY%%t*UI$dtGY+1tmMsC!Dl($J0+g6MIxYx77KgZd`uj z5g~y+1Vp{4rmGuN2 zfeXrofLmOu#Prm3%+1cl{Cq|Dhao|gR1Qo?cvL4Dfk#wUYQu!OdU~Qu!eC%zXAJM$ z9XlrX#@<6mO6wqrC#h6iIr?PqXcIEIIEQNqxmz6S?*&5I}GDFg6^ zx!GBtT5nS+xU|KwBP8J1dUhpq1EO;=fR-N}X5DW7&J_&)-6e?D4d?`2=z4$-a*R*`BJP1FDSE~FkY#N=BqgwXkn=5iaPZYv^<$Cga71&UUu|} z?A`kC8Swz$!KioIzIlRqvf72(8_Ep3NF>JW&SX+odzbIOWOpu;Yw$l`nWOMNkwJnK zg~clZ<;DOP433F4gfx@9S1w&py+#CVKZ?_;hs#pL=cnhQM@sjY_!37RK7QiS_~gew6(9V-b8*kT$1Mxy zmlll+gg%p9%d~efBX%z^aE78p`-f||<0H0UP9i$=PH$wR_;7zPg@JVb~Pv``isVa(;#=->3N;y3krLt5a`&aE?HK|UDj6tWJ1=-&|WrC6xBkSB@>&))40w-#11jZJ*$iM znR5naO)EFj9SU>^zhBbew!Oj3@XjUVKJ;_z>Zlpod4( zzPY-<>5A{bhv0WyJ;12|Eclt9xh)uOi_fWi;%VTR#hmCJOAGAeKPP%yjp4y=)7FxB z<*fK=SEukO9L_5b=xkcNc0_WE0dweyYjy^E2P}IyA|HB#M{x=U6@xtT%43y>L0;Mq z3tZY+NiO$XltVkOdVsrEopx5RbCN(`Z%1%MAi5LPOx=pNiuTJ{(uBTvD3VIHwNq=~Jhpw~NK1 z%1bnN{QhH-#eHfg>y|q^#zvKQEoRjYzVwanic2-c&W_gTVP}!b$v}wp9nm5a4IRo0 zB1XUB+6qp87!ck#YLEqO&_A?^@qoBqJ&=_ooy0G9>-O{~cs3YhxOu5N1hRAl>S0KP zcl;ICeF!(a!sxwx;%25-n&Q3gR^(#|RVLsbm7i(ync+&eEm%C^%s?BaNESYjKc&gI z3?}mDd<}P*COQn?dd(j{srfBf^{zgZqin+k4~1#S4v^}}G=d>VJ1abs>F!7Q-JTgo zaJw(3IxL;j!=6Q0{S=u{cj(lGw~gc9k*^0*%XEvl=~hO>;+9EmmAlB(j$3Xvgc1GR zS%2l9-4bDiDc|qF9|T$$@*;KTfVaQ(gPvL7YO5YgORqtH;GqN&cPCVa!GNm_Z)gAT zs7FfN@$_$D94zb0lSnqylOB=-P|BZg6v9EuaHNseF#A0Xm_Rq!ZW_aH|8^Y;uGDwz zSs1w)*2c7o_j-BDr+7p|nrZZOHI?5O<>&g|f;scwsy~dAm$M)GaTtgHDc1Z&Pd@8z zjo*bS6HdTn#OE9ojc?1;>xf`Zns{a0(xh9q!*pphNhhV@UABA(w(_rzi#yWE*TR9p zB3@mlj8%_kQ^b`OMCruS-aM&GM@>`UyC7e^FsRIvFmhB__oMO03cuV_J1zM_0>O-PP`GjckYP~f8->UKqb38O4M$@(`Z+ zg)e@=r@^(8m*gulbW;7y@xgxeAvl3ZeG17a>zvXzkVD8Wsa`ogQ`VLHEx=3XXW9BJ zx+^Eb`Iu?xCO&ycfjJS5D)ktF^jF4l2y4C0iwu}-m)w&-IxD(un=)KfpNkW}I8}&6 z(+qHP4=FO3F+3)3(UZ}4rlx1TAbw7|>4N&A>}VSq9hJ_|9UIDMV7Onhbxr-aYV;2d zNoJ1;_8i0Ll-#8Sd7-a#1SVx^$(*Ky9*YQOLbgNw6~gvQ$Dm){+mpIRXGgcMXCk~$ zEEJBoSC>I(^cPM60%z=aqQ5w&J_~(Ijz8|~QNMkJYoQ{}N-wUc%$!K~*ojAC=lDdt z@WU6qpr7lcRvD<3zBV>78vW|`kM0XXLzx6qijD17> z(W5dlCPn`bfCxyA_OnDCrj8oMy%G#_X)blBXA~tIp`} zTK`c&JTarR;J2hn{}G+7tFt$TR7U6!Ju`Rtqwgzm!YRD_4J^u#J_b#TV3YR9OMS(5 z@@E%5P?EReLu=rZehFisU=F-^!h$lY?QqQ_*U?da@*WuIjV0CX+o#@%j-LLw=e`GG zVQD2!o<18l=G1=@vl|}Y8(o9L(wUdkFxst`!;_&Su4G)1-cRcQJ|;G3kNfxRXS1Hd zsR!(K20x+yPmfrA73s<_m^HPfT19;}!Asra2Wxh{$RTB8(XZy!p%j%s_?a)ca%>#j zMtJZ$e%meXTjA0;sUn$tQW>NHpWKQ^AWacvIPPo#RZka!((G8pJDow+)h+FC;TYjQp%VFXvRVqVa zRlUx^a-q?J52XQy>beemm-=#@VJS0y<#)Oj2`E?*Pll9Se$kAxke5oDA&sc=TWyB= zsp9}cx|saVcAckc2olzDHRl9k_%XK&hj#;ZrnpN1!LGPc{0WB}CE+#SF=aJone#18iIXz?orZ`-+BW5PVebIAo`y2C zE~M}6LGTy+W@6a%zq8z1Cc|%;@>^DT$&vzzaA&8iC#!XSD`?3X=-V`x#%SGOLcB-^ zhfGON65z=%PyG@^@(p~`KWE}`Lwpb3O5MTpT9h$;YZerCwQR=9+*Ewv#Dnq3@zJ<( zc{;xE`@b8{Jn?uu@z?|L!|(qfzVyYEXO|ZUbab&0Qo4a;q5JBR5fTt82Rrdw z?9R2Dtx|HB?8Y!wz9t^cTqA#+gz2j%RaZ>HGj;?LyvR`c1`7?4K^XKIc;A}J&ZZSM zjOIkOzJ(j)4*D1;-!Rw3Vh1LIYc=t6PSs^jP$?ku*euHx*zi%tdC}Xvxkhr0IE-h{ z&dbis#^TakjEs$X6GukwdN*Qj zHkyWpqf2^3Q=7`iA_CPVyd@`&p@Wo_Y$n^3oMcf2Iv-tr#OuI$)DznPjVAUtF(6M|ZdIr*u_UZ4i-6F3=M?l@8t8VIn=(MlN#oQ#3oxLOoICUQ zykzrM-%$LXgp2UhA)hRi@Q#1#2u$f6e0l6yZ4Ec_fFF26qx526WYGri9>X=vEz((8 zQ~=B^UIfdy7vtJY$X9D@TEt)Q2u2o-Fpil8LQj?dN==_@(cPssAsXehP)?d7edgxK z54uj=h`yyO(iD=;Q4^J?{3zdw!qe88IXY`ybw*neK3b(`dDHajMl4kpRDREuUTyj( z|LCzFssi7yyb9)kRkA`n2&K`OIiT>U;i$Tlp?;+h3py8!@Z6z*p-usoBO>U#tqCxt z2q~&_l8q^McC%td2nP>BjOxXP3gS3@(u6tiO&A}=B9oK8RHU*8K~#bQkgmfv2wRs$ zx#MSA($n892?xmJ_*K%wwl=9DgFGp`&FZ*~NICz{|NcLW9S81-CNazTnHe`I@RAu6 zK+8A}m}gx;W&XHBgyqCP%7d#)?kIy^P@wX&vdE{m03*t{{+U>4t}VsIOXp(Wv18FQ zvQt>>jOvmU|LRgyZ(fh4+3WG8&wVZ~oVpOhVk8@_-7zD=d-1Keqq4dh`;HunU;4#g zjDtrH#Mx74V`=7g4EL&I($NwNb2nWl?D}sN!GSyO#7NvgeEFV}(!!-+gs`CrXuy+V zB0>l;l`6{2q>{=}Z`5KY>ZGn$YJ$5W#=7pKp=q=W3yUJgX*WFXcWLd+dv<$RJ&s}` zn&a*j@&bS0Ggk{iYcPStHF<(#+%ufM!7aGtUG)MT=E|3Xo>hi&WihqJ(ZIF2xmcg7 z#%nLX6kW|-@z4YJdH3~=o44cWeRsvddya`2w#6_0t>1|2s;m9Gx>Sz-_=o@SAI2k( zKIEh4nNZ<+gAJ4i;fo`$vSp|(budZDBnfn=jv)=3mH+DG^{F!otk8nn545kC_~TYG zIfM$=6<`RT3sSPpxx8X31XFdn9Z@a#b$0ZsgCkm3IXP}dY({BPrz_Fo!F=eSh+DL3 zx-4L_l1P55luLr3gTa8D7{#49UgQ8nrEzkstm+=zf@|{N8-E_A4cKf!|9rb%<87uQOYpn>7iCPG0AI$zSfG-QU?cmamGy*C@vN9_=G7RyCmbBFO<7f1 zmhNYhvDO1X`W1SaZl*VDtDDb$Es^^QiNYcArSuIpSZE=a9PSib)~9s zNqgXjK`%uUjWRHi@~BgaG`xhnu)DkZm1kd!k59ye7q9GyvGEfgYC*r~T?~W(m ze}9bkwZ@ru-%+gyE>^9s3MP1^jA;nPMEKyHCme})NgBZfTs)~j+A|GXJWcRZ|MfJ4 zUs9xvW5B=5DMQ()7t-M5klFb~(WG(_4Ga%3VcMh42ewnn@w)1$=7AmX$8PHKu1fPi z;j>LRCvE7&IFp6?O2h7PjPD$yUInviS@kPEFE}@wQ5bVE09Vm8Z*Y8Mp0wfZPO49K zP*~C*u+K`Olu$bOOE10^ zm%jGx=u;zXTV^RIZHPq(f=G%qofP2blppT4XVBTSFBn?IzbWIo^aKx9wyF&%eU4;n z?&*y7UbO?^tf^CS&Gnf)2>F+HrF9tx>ft4ah6iyJ{aP(vf8o3Fy)S)*bsjM{Ck45+ zFJ`JM@eBXpzlab0H~&@CE?W9jy_c>J;V z$1{&U82`in=U>F-shjceGw+XEs^f*Z+cCa-JU*h1>zL|zqqgXy&)|6-ooed^FQaZ~ zbJWj<+F#OLbGAdZA)lh*Hj2y;j=+toh_=rzIIc%(k>XALl`D;vvSl~4BkX$7BZ}U@ zK!0p5OQ%!^duHmE7XXZo4hvV^F^8V1df+6BTeolcWSm{QcBsSL9@8@%VSB?1+j^zP zO;0UEMIGzQ7ca#<$BxIqusV~Ai$0ZR=dQi6chCNqV5>6hcRS6+=}(fM!v*6+kOzV@fFbLWKXhx>j1*1!MP-q)nBh&qoZ?QEPAOq){sG{y2R1 zu^1g27cO#@y!8*-w9-O`xLvF9QlA*Nap@7X=ej;1Q}P&0hS~Z?CT=(Klhb>4_$XHl zG?;G%PeV^7D1(nSMS7b#fWf!(1K{Gj(a>%T6THqQpIn!dea9l6qumLUT#`l0zsSGf z*}?IsE?dUQPQCo-%rqFwVT_Mcl{c`E1Vq=6-&Q``VW1`EOX05g`P6q2wjQ_9)cp#4 zG^7g*ZoqPL+&l8b+*xkNktxIT1OzYIc661CM_Jx7&3gS8_$%?$P=9F(Sol_$Er^Qr z>hvZa<#6I#(1+dRtq3J*8tE@-L!r>)>mcz?0rVs%V6DShKGRt5mSIUGudYvrM;>hD z1ueN7QJBV5GKoyb#<;0d#$RAErN%O4U1V&0!NZ-Vp1;cF_S0Yze|Ukv5{gXI^tmm) zBBh4*hySMJHF`-sEdJi-`ukpn2o^(>wSBdWep3RLs=Ci~X1EU5>QH&q8mwENU&tJO~llLBr|NgiC zU7WpeK7R1+Ka0Qj>%SiC4Zr{B`{JMe^M4kf`QjJiH~-l`iGTVp|9Skzul+}HQ*x2T z!{VIqiggJ^hst* z_m|vhMF&wiH(0kUoN#n5<8hVhy!8z>+c05CIjuYDi5zF#jyAcbej4TTv8L!{(qp(g zePgX^dB=iA`l}2^qR((j9sSf1=_JU#8I?r}osWHI$C$)w%+Aa!-;SuMPs2%jebVi! zf@!dSFpk}OkFQ2pl3eG`_4DU1#>Few;`lxH!~+jJ=>Ct#p3t|HK4k!I>rB*{WHxnz zD#GGT%7Q!nV-`A;chxhlf^Jpz*U?wdl^F-2T<9TmI?=&cAb;-c<+yTlDi)Qgl+MSuYWJDU%48+odfZA|MuUB-8=WizxrSPfAMGE|6c4qu*VA&*;sSn=)M@= zIU4&9?2C_l^x1gq(I5|rN#K{zy6=&`a7rM z(5~?q6b-E{FQ_l3dTCV!L)*dz^y)$1mNv$Jp#r^ga;GpV7+IX&B;5ynzO8RS@Tfke z^L6(3t3TH66A3&q(uK}0S&e*DT=dZmz5Ap9(w8o}bN6KI-@n%zWLyUd>hTQ0fxhN( zAN4%P6Sn zz4(%C5p^tu%x!bs~pBX32jm4aQm;Og(HasL`9t1^-MVf64&A zZh*6%9MQ>NII&xi-+T1LI=ov3K1>4d)s-<@`EP|I-J~5%LN4AXcX3j^OSh){%8MPi zrP+ct%eG}Qg-n0Dp)P+C%r=Suj)}GvBIQ!uC`?%|psvg##oks}#zM9m@VAvJvn+%z z^%!)EZSdCRUYSnfbF--!OLimlOFrdK{6hRH|01yl{UJ@J_fv6DntHlfMw1Cqm0r@F z^jIZHI$198x`ms1ehv9&o_de#|p}3i!a9be93P;7P+`2IEm_{ABtp z&3paGKXH-m&Uh#)EyqwAP2yv#y(mv5N$-|;r4H&gO&Z1cducL#ai2AKmPGl=hWR!p z0C@aU0-Xhe%wdJ(TvzL+WXyW3+_)5f|3CU$@u{cw#EU<8JAVCF|3>`%|Kooe|HXg) zcjEu?U;nr9kN@%iE%xr+AL`P_hd=mSeEQS>VSM)UzaMYEb<&&F?mBWbKK{v1h_|}RbFH?8yJjB!KxD5kFgfUnW1ej1OQJedoD1`u~X)i7=!Wn zjQBEf%1w9HgWA*{MB|*)xhNjZ1{?f4I&S$3ulHrV9bO`@kpS>Wt!Wca#K5-qd@;^m zxe_BILxQ_Mmi{WnY2Fn4OP%r%kI2i&HbPvNU6Nu8;IQ*oMlfa<4Bp zMUUhiJNPFCn&UtFn;(sXyLvp)f9uvlv~sdiM{~5Vs(zGQv+&y@WPvkjfzhdc4vRnf zBrn@HD=Oz)+`RElRF|#`CoFtj67h+~xEhVQ3dz^1^fOLh<;n!wJ-E*CH}t!l$D*## z(a8r3{pb9h=s=(Fa^^^o!MIV!_@lEjR*qXa?px}V`ON+*@!*l*L@vUW;+j(@U*+U= z(qwFz`6poHv|+}Xp%XSw(N;-kg$-sfGx4USnsgMsqO-6NfXRQCQ|Y7gu6X{GHp`sD zs`TY~hwYK>+}}H3cxc0n>5&hxagq=+sa>LrP5EqyL1)1bX1SL0WsJ45Ss2}n^1@)b z{+V)=`dGEH95bRx<^bE;I)2phv5)Nhvg(%@3?>r;Tk zXqXrl;)y{xJ=yW-%mf5(x$2loq%-2qR5-D~$|~ zG5aS|lqeBJ3U!|TG@>kA$dnYQNIi4qB_7I4?}YJDAA+GYkU)6?0CBseyg&Nr)6qLL zCb=o*A>=`qRA2%jnAPN9RZOJ$XlU{URcyuJ@q~LPQaHo`iR&!UDW}~{8$5Nx1GJ(sgSWXTQMGH0t#wk(G z9PB1zwG|VT5EcxRazb>Jld>?*h(Kf)Bj4;aBRt0-ga0+<;R$%^i;hA&J5t-dlBrWn zxwWTT^w=R9YZEGIsA96hJ3F@6t;K!JzK>G)^GapWUf!9(y<198ofzNkD@Mpl^A@Fz zAKyO0O#Z;_RZC2S(|`q2RSf#pjq75F?J>GzM0mZWPWEE#+Pf6 z>hDsT-SN?nel!LKhkeAYt5#`Kt1NQKwjwvEOPwblsV3Zt(2_J%-<>XVrxK}iD?8lm zEQQRQgbUZR;s{i?0R|XMp%jL^M3*p@aw3`fcQp4$@4$d3NPE>8#P$rxDr~m|7?T~H+_R(hf$flgcVB;WsRP{IKP0#Q=pGo< zdw%!GcJ~h|jbT}(%TwRI14BO6JncaA=xKj{4@KY5fGp1;pS&?RG8#jpV=*+gBZkMu zV`O|R#wK>f==iSKv2#*(w>r`LVq$Vn?BZI4-Fstl@9x;M=YX%2*tegP5e~+|gNN18 zJ`@KI9?I`ShvE z`_y^g!!-@EyY}pk-Fx$!BO3SVefMs;2`;W^nB27|cJ7+=l?}V}9NW1whE%?R;UV#s zG4JeP5_(YO?(J7S^!G_A!Kw~rJ390fo>~3b)5o{!LG_QEY42pEXM1obA#EO6pvI}E z_=MYl{zRPcYYzgB6(^`h+6t4iyZ!(6|9OFZFP+`e@?W~PTdAS0YGZ@o`z;QzlmlO2)x2r7XBZf&9~x2`w*3b7=h%R6G$S1@4aG z(Mp)LTGZ5@*$pwL=XI42ybq0x#o?oOiH~>2XMg9j?p%zBE{2CkqOZ3r2Gve?s|}2g z4GLG6tt4~zS8E%0v7L}A{;$T=?He(B`?lPrNQtI;*-_+0Qs@riX)EmD zmYg&`RsK2*WxV9%}HKX-3eKopOZp8qx@H5X<=4)oQS&~JQ1~PS7TLl)uPS@<@0@D zEKE>)+N9IcohzuJjZI}(JAXB1&zzAW&&fh8MnZ9={zWJLv$PRHRc;j^;YKf$69tm- zt^&Ecjq=~rr80|$!^h3bB~ul|I)IWUmRIAF4)rBM$IbMnHru?p9G6bM6&FsOvW#ca zn0ps1qOZxL2jkeMJ{4T?8b5mV4Jo-Z z(LKnZW>-w9odffJ@vTAercKE)blon=1!M`krCeS*ix!I18}ba<&5k(1Op`$dB3IF0 zg)8)iwzfWphelZOocg2irzdpE>SgO9(mQBp$ZYO1>sAMtQ;4dynOInyj-~n87#@*K z>gtrtT=L{PgBvW+=oGA^#YrQah=M*iJTfSJwTdpKV<{}F>nA2A@;+M8FvsnI?>==Z zSXseDKQM7}32^Y|iV)1WS)UjE`p8}3gOe>dMTx~J;G42>9YLisFPV$n=42+WA6QU5 z3!jRM{HBd^+C{FYR@Q=rqj#6)*W&a$=i}n#S<8p@^_F1e`;hnpE1Xrp`1nUZ5|fgP z=PzFLeZ4O{|9o7(aoNY5<*67easLCyV?ufvd0S;wJqI%SMp47*;5?&e?mrO7-o{FWBPlz6}rtBAKPx}c?; z#Mw$nMlayMA+S5F@EDZPhBWHY_`#y5mjT3s?MM^{y2FrG-4w7ExWJk!GWs21(>P7s zU7-!}%e08>zFrZRCt-s@WwZM(sgG4D(@`M^(roq$T$S=iuCK0L_VjxvN*8V*PpJuRz& zB~6w`PW3oty0|CIZoa%Z3*m(cDLs*mKMMNV4?fTEd+Z&pj6_%A`&5rZQiRF3G$f{@}%U z>&-VkfZEzD{Zuc#o$WC>J}y0k1%k*Z@pI+Nq%o7q=*8&7-sPtBSR4SG90&^jM#CK>7;!(idS+n=uknVG_1WeYHOI*%-iN zXCOPlI$9W$kuIZjr3y(8D8?3~i;SY{OLra`9aTSfT>bF@>7^ag)ub1S|MW`N8d0BS zQ2l6NLzj{Yg9XqOCgyq=PV_{m#3m6mMC0?T)9Pdd&Fx*0;VT z{o}g(t7|Mw7R>Ek?J==)+eI8xngzpC(#K!>{tNN#?|eVH6@TY&zse`= z1bq{pU^JkFWjcM;@OV?(1U| zFxFL0j?||OpcQN)4@Csfacw4JzwpacAo@no2(xE6H#I3rMx6)J}2{=?1bq$;jeHvf2rMhN^KPN+E z`b^Z>1zXTwYn-1AnXYiHYlOxn}XW|c85=)c1^OmJOG_g>NM67pixAerFE<~@e zyjvEioYsBikC;fEDGhBI-07|O)=$w`wjjwkq-AYdx&lw*jJkIl%`#(1qRb-YzoF+t z|453&yPs<4*8j!p;M@4vP=5x4Zy+fxIL)%yQg#_HG*kr2Sw|<~^}ba;z6Cdi#Pdb3 zL=ar1lVK<<54#uN$xv`;F!hMrOw!_(UzTMHcIY?pL;ZmZ-iy?UMt%FE^do;9N{ey8 z!u}-;r3R;s)K7-&;n#_e=-)9Ib#&;Y?$~Ux{BI|L^2-Vv^G-#Yh#DjMV=Z-!;GB z_;!WqX^8uuSR8vo?~{g30~`SUTz_;*)FJoWU` zarDTM_`&lp#O&;h5y}`f!2pdX<0i1SJS8B*v7v?}7`I|054sR2V#6)-CtSCf z3osbF6LfkfK6(xyATyD7>;hl5`b6DJR(CQk(h)0F7G$bU1PiCxqU(^mRXE8NcKcUq zwYYfca@?4nRk<0vUGWCqC!cyEMmc3j^0Fd0|MWZG_Gwb%Lw#|pB80T|#q+Pc6id=6 zd!=J-s(fCQC!GRG!z441=k*})ER}-sOEwcj6UbfRhtqSs7?1^_q5;as^#EMY{l2@$ zLTC|;>nc;5uL)>T2@vhz1#+QkTG+@cMGIZ4X$`$bHK%koC8rs4U*&{}Cdq77luDR# zRk{JEN-~!yjV?IZ(Q{9x4<+iFNt1Y&j8{o~GP$uxeySf;81r|)%{Un0yirNG@InC+ zfQFPFX+sxm2xGn#`Nv7($b9OUERpTNhwg-atWdI~qA$f~9Lsw7nsimwmFZLXqW(!| zajp_G(#_V^noX;m1OT0_DL=-L_+IiAuS!xdQ+CGa+XXXYgyhZq9_hU?5QXtwu~@Y*D>2SLD})b~JC$gMMj3_BVBn-LN2SG+{CQFIWT38OSpY4K$^cA0 zYN}oi!WdflsX|mKU0pr##1qd(4_5$)0O(*~yfVa@l25UMUf<~gp(f6pUJ-7~Q1y5z zfc+RGR(fhhFpS*MERtt{RrxXD0>fZ}j0sEMDu8K(zt4AH0{=t!8_bR&u zTg6B@N}781bqB5@(L{+$>P>!`AM`5toOMaN$@t1|EI~OkI_U{N>R4&8f{ES=cvrI3 z`K6efnN>%&KSoDKyvl2FX({#|IOxgF>DlS{(pSGD5y@S2ozdMr7$5!EN27P3Kc;To zHoehUO~(Mv(@-<8o|7Zs)yH%Qo>buxK1^bA>~jhy%1UGI>+SJ3G(vJovy?}1#q6y3 z$=`xbVVWhp=)`ub)5_htbWG{MvXg*`aNqf+blW=A`4f(Xa={^HA~$xG!|bR(@I`)D)*GMl>gy zn30}1(rxFr?A+Xfk0j&>Y2wXt)V0DdN>Ffp z!xE=vNJ+~lWud(5!4($1lTH@O5>+FOO;fbX&UWZhb>H3NMG`%I{V~`-jD7nL#({lYzi=e( zI(l~;kv(?o-gw_V?~D76-=FsW2jjl?JrEDx_fS0az$5X{!|#vxKO}qNv3TtLkH?8e zACJc#dm^5C>dAQeiKpXYlABbn3Rv6i* zpLs^S_(_F*-2P7}okt%#5f48k`|!g`n|$8y>n6B%;(-UadV;55y6=IwUohQw-~Dm? z-uvTy_dgKF?|Wa|dtCP3<8l0+<8k+~du;DIcDL*?Ur%x5uDjxpcqbFv2M--C7GNBT zec}a&xPyJ~zSy;QPwbkUjNSY8#OU}AUn4Q8_nniw?7oX#5(f`@0mnXHPjM*r>_4D1 zk9d*B`;On|Bm3_c%=ZbN$4)#NPpO)k!#~>p|iPn}E zy$E7OPwq@-0+}?CLu=ww49HqXpmU>l7S6S(p2+8(!*|DS(d6QpQ?XvDh_?$~Doiqf zrNZbAo8spzszd&;;EXz|T)YyC@1BWP<+15bkV*p1^al^|B)`IWz(p8L*$E$)6)rs} zsIqfXgn}q6O1JV6e{b#-?TE&Z;f95@(PyARg=dmoKZ1jS5Qs})iN~*27vjR(Z^ex( zSH!ov#5bkANLDS0C-+b6jE8^emt!5hLE*St*0Nq{v5@G-waanl^jROH&rZ0ZzMeR) zwtM!{1=Z=5Xi}$?#aPI1I&AxP?TU$!5!Dk~J2D!1$Dp!w9@Tx5ItH{QLofQWEG!1{ zzz=@NAmk!)9Qnx!5Gy&;#Cj$KrJ*cHwd8r(f+k^BgN=KbQ2C`YiNhi5ivIU)-1)WQhQl{n-X&hpF4{#d4~>k+hd=ru>A3xI z>EcDTjaq#7yWfoq7cY2WAJ;W7HuBhG?~jq;ez)oCq8UyjXqFCq@$BXJ`gebbB#d$C zklkv>>!R^i20?@)^e4*4M1=tHcGch!`@U3aN40UY!MEpJCewB zWy+^fTL!9QV#~kLsI2ue*uMyr1y5M)RHXNl@-FdZJmqVnNhfz;GXf3_#2HUc{Jqg| zmSg%76uwA373C3bl18Arq6~bcr(UoGgHIF2pEl4V!EM1YG~%$hWgc}~MDpMxPyO0S zkxedkENK~oV?K+hHiotIDh&Qqx06s}rKJ!aOsC96+RmRm{q^UVOw4fSfNZAih$MAK z0^8deWzS%w;3jWHC1;F4$-vdcQI+TiZ9hg`Aiour>q0XRqIv>7ymg#1B>-p5< z{Y9pXzumxl(-i46z+wC}nEc*yOE~_5{2S8n8};UP%s0<1e@ywc&Bu7sm+AZtV=w)f zCQ4tvi+3JnTb0|kA`zxeqj%u-JvkNSESyb~CQ`|1IZ{=216I8=iorTjVzIb=W??p3 zq+fjEul~IHXBA%^_4~i`I||wqdq&6O6QB4*T$`DVFaPNu$6@siKK!WQYsD zQfvJ9*3 zP9^xIpEIaOTx5HDM{b57tmVD_81VIiRE4LH$b>t(D>9lf8FYhGqoqG9U38g_cFA$| zGfA5(gtG6EI$>{LUW>>5@5tdH7A+4BN)OqgzC~+P7o_K+cL)X+7Ys2`(94dyt+7V9 ziS>BtzR)uL6egZsX2H$a5hnxCzb+RmprO^IhXAL?uaq8(2w32ZuaBLVj_9!nh4IM( zs#g|?vjBTV1Gl{qFZ;L3(%pV88SP=~UwN zTxY=bfWu=W>aQJhx!2W4@9gQ0*=twg`LBH=UU}`!=u_Et4lywz{ZP6J8y#FXqN#Q3 zz|v9Fm+R~qmd-3c(FwW};DSN*dVQj zYvy|U7(YxrW1!O_v}w(`3pVPKz8Y6v;!d40mcuEA(1K6&lOC8?uBmUOcNTm~b5&Z% z43$r1Wzn=60_akmuE|z>RR#GiEJ+`oU5IzDUWvZ`p_n{yBw9O%<3}&P9G7n1j$P_M zb4nRwQM75sg4hWQeKHZWQe$j#Md(o(L@!JPF|IZ^JRE&$`}9Mh1M*_L2|RJUvl$8X zqFFYy~c`nO;1p&3DGhj0w%3jNG!7Qcio>FvgmOp0-RK7M=twzX``T^oSdo%U3*| zNh%B*51e=1@V;g3CAh3BC=O5a{UT4GW1*~_^lmWaSL!yHqF6F=vt#K{;EDfwm=rmD zv%}xcrJhj0N!FeWlP~%V;;Z;k7)Es!3!KW9lgHbNSeQ&f^dK5S_jlQxH zPnT5|KXOc6CKG~I^1C!e#P`7Q7i3(TJEN6G(a9g2m38`_phn(LPr&-5%%6{g?eWVL zO}D~<@!>zXo!*5kF9s-$fg{kJnV>HerqM4G;m4n8C}-+3%r~sBtch*WcI2g?EGmN@ zn#J3gSLz}E%0hhNgHOl)@wRy9)P?xUm;Nx`|Hwme_uWV0H~+i;*4OEbvI%T(D4u!t z=@=XuiXXl5Qq0cYjt=qJz5Dlxf3jddbr8l#xL%y$AjV#5wQN^%wmcGJu(iz5lg%20 z)0bdlfgo~#@jk(Z|EBn`{C$F@`uylH=I4^P=n&`#$lsOKn#cUSI6%CZo#MznitdfO zDmN3mq|3uiS_x=?|N13uhckdKoe&xmC6ucR*&N-%%dMyIO@C z=0P65|1RsMi%Y_@;NZF;%8EWrBhL1!_RIo5{jp(!av=#&U#TC)KbNWtlBX5bg~Br5 zE1V!NIc40N-$>4~pL)@U4X~3w^dNmign>?kjJ4L`F)Y3l4ua;huq0|)p&co0$yv%t z`-ZfjC1g0Xz!>KSbm%c%XknA(P)eUN;4Au3zNMU9t|Hg-L~hbkQVPdOnJTaJ(G}71 z^2%l`sN9uGH8}2{%|0Ryg~`cyTA9byR0rrez{WMvjJW|nX{R15-JVmBo7tSBBm@W7 zSb6eablB0!0!H+IrKdP7q+tFIng&{~1_xiH!(s>c3OGk6Zl=&mPdK>R@}UoneOe0H zq>sf_AV?Ym(KFGXf*yoH3{qR<=E|lL91crdAsZwEZNaFrK-rK5QW2gs0g;MFam+@j z&#XYbdazk9o}?wHzv;M8Si+#RDP0lm=4uL0rJNPbpOYv5Ocw|576&XUl=ADxXm;fi zP$Yn2jz2h18JI+S>WSx~dr(TccmsoyO%m=5k~VL$idT7WE@yPgtA`x*PNuw)Aul+l zHBA0iTm>VIR+Y)8AkaWnS#U=tav7**6%D&p*=;s7Hl+Bh=HO0Zbr7X!GNpR{%)4>* z+=b}q?hv7=F^LOthwXwIVMUdG;Nbq4oZJ%|4En1c8H8rVG`qT(7;aa7xnfB8)jI>* zJiQP?%!#{Lyy{zo2i)19)aYedxIw1@qV#HF#iJZ3_u$fXDWat=Y3wjT6l)kV4HQPl zG5;P+7vm+Yk4si~(_dMS;EQ_Rl$a7JvC4*B@Ki8)~asS~SOi%}aQ7cXq|Z#13@?ndDuHgGUcWoA7)3+?n|14_=9yk{aym zjG>|N`0$6+*%vOEL?Uq}%b~YiH6)l^0ObW|0Vd@Sg8>3h+^eCoTNx%oTz0Rr6M(^L zI^pc@WYP+_ArJgH){VB-!jb09Vj#lZfI8Tt!#lilxVL8@Iy<>1PaPhGUtLwJLAWbk ztMJ{?(e3mZD4+vR9k#IpK+K4V1D_V4G)cEzf{jiS-|gK@D)g#@)uVcA6)jPRFrqdI zc6G!=rxw1HK{#Y`g7hc{lLCF+{T?{z?;DP;uD<9LPRXBz3eJxS?5>Q*yYp9EF9<_2cg4jnq8=aJZZV1FFYH@oI}?qlM8-$75@b9w@&DjYp>G!7p=5=W2T6?ffr zPuzXiu{d^&_j}`r-R{!!NE|tIcN{u+L}3rdVWoeNcexRd3Hg0{56AcpCi-{fVgQcZ zW}$@gI&_c|7nH~0d*kR`N=NyzsNmosrLE^(UTknYj^BTO96SC#!9d=}4C9`?2UY&j z7#<#TTVt{l{s4-)U99Wl7#yY*MAhM5U- z-{r5omYEQjzZDpV)wjx5m9o0bjvprG=_sI3txK+`9nu)sX~IqtChC`^kTE&GB&C{x zL{?+Y%*@8stAgdm^|*QcmcmJ%wzrvgN(@D1ZbqG#EusTXOF=fY^hkb+hl|lhi}-XC zatL`Uo`7Ury>dO~&zy`FrOWPn1QB#Z9>{;oQH9LTcj|6A&j#__&KVveSKOCeEFFJ8oaQYCg|VZ|pdmXJLc* zz>`1wDIfXC#50A2r&<1~nA6j@;=-9LarNS5)$4SyyMM1}j%zhezH=s;r6jj=+33%$6vo;zb1U`MvnUQ&-_vYbK}YGL>XfiMjT3lS zpuvD8olsVtgKM`*!Hb^ZG~n;TMV=z1&KA0q{Cj#LsufNJnO1v{Ocj0xhx^s8C9~C0 z8XF(+jv{uuu~=hj>XvlF*4TgWfb`QIqhr3;PMCQ^~+r_V- z5l$ySH&7CB?e?sX0cLR?$NzOn518Dw+c;)*G(7x?rym!e*l@?VR-KQp{mD1t(uGUz zAT7)-gm)P{^Jq+rj;cLY;?l(nv14>Zy7+pWJ$)v={rs!$fRD5{M;H30>P=dW%g^d} z^e7etfPYW?shn%;Eow{bAR7>VSYe&V|GASVNll%hN6`1MHp$>#A(!b3SfSizBx|kU zmF=c9ibEqs_bAfwdlUT!;YpwNw$Tfn1NS4`(i8+YzuEfLPE0J68Id0O_)yg)b3cc(xnkd0Sk}9r$+}fPWSBcW3@6Oc?P98lAKpIPFIq zeLFFK6)%Ntcuk`;{M2vVOI2*Q*bpyw8?`CRSANR? zM``^{0zl2X_$IGR9O16|_{$lkrQloe7S`#O`Q)R>Kse;dr{3|SE>-V&ngHdOZI*Dc z0mpvGQaVQ{icq9e)JftE2Yy6Pq(j>?d~zcnJv>k>-DX*`@V0c_cFE@te)1Esf^4j; z#qa%_|2G#r?VXUH7U6n2&J&3AnLO)+NEXZD; zf+n0(4{Op5<`?F?tDjTGFl#Dbt-_`g@ek>g#ZD&Gm1KJ>;|`2z0e3HnD{cA&s%4cI z{eD*I(JxZ;=;|AZ-3Rx@b@f$Fy>r@QK8MslJbvt6^=mH2_g;8G`p!xWj0{*8Lnj;@ z8H^6;loPvm#j&G@y-{REdVg2+={-o%@`Z%SDAzj(H(RE%MwWm z9c&V5wGPxZFd-dGN+5b7^q}%k|5ly_ct?ASclxiYzw*XQFUIOpH3oaTe2SN^ROJdZ z(o}g!H)SMfN3O^X3wzT~v+CNqoq4D{)D|}dpEs;1JYyv2a*W}598hk2lMtsgk{;oJ zk1;!7?degSDGes8(D_-c$RcXLtK7{@Fez`wj8_>XS6wr4&)6P|Qdd_}r>F1djY+ax zJ4gSDO;@l0SO#OHEY37Eazl5gpSZd{?PlF8+M1o76)WG2AHV*l`pC_3?A`~WZ**sT z`+MJy%V#b`&-f12`EYdh^rC;y+}(Y^&a7g_jF46>Hz!@o;Im>!ATpZ{neF+G9i~IpZUi}nIie{EuI5h8`7`! zy=4hNo=(7S8Ml;G#OGV?glDmZb;GUn(_}f(rBhcWY>|%OCW>)l;@1cN#)lvyZ_>u` z4x8{`1Uu#QH}C0MH-Zs5$T%u1d6Ow|46DkhN3+Uroa2O{U!X^$O;g}R8Cm|Q9-2M( zfgHtO36yCx800otQg zpJM9X6plD*tA0Say!qx`DJad7R?><}#8aR>CLY<$%ET-A8y5=BLV9#^ z#`)T1Spd+wx)7iI@UyXNq&ZHVJRe{9+-JSXs<)#p{?-5V&z$y-vC-JIYiB(1#N)x~ zyRW?Ra-6+zHb#a4I-N;?(j0G*PE-wj(HOo+>zbMV+L^ot($;RqN zEH74~L+~%m&c=~Ldt(2No~Us>vdY0YmxY&Nav~`c$T+ErL}x;l{2S&Xg@wQp#{SIxo8TM6*bD918&;Gi-=6qa+O5+0a`!*u zR_KZ1efBSSh{Cwk{FAg<7kq^I7f!_$y^GGs4V`qeMNlXlw8ADAHsv7C(S1-nJa0@H z4WrsBP@}KNG6qB%73s2UJcR=1XW2AblODksQf`P@7tcuerHd;b8PfJhj|D9Zy+QxT zBXk;X#-VQ7n49H<2Gtj1xX@O+=vO*2ctF?6(-3GcDv_Sc%tcjJz3CMra`U)&>iP;I zi%!QFwX(X}^1-Kfd|C}~l8JcgU|l~<6!BplR6k5E;Y236T0`GThzVF{EaMdw03${Q zqCpUqK$vn;0wg0itSHHw0CW^UC{@BFF3Ke-QlTK-?g$9AZjR|=qC6R&y#+Ajj8TOc z7KmI%Kk!Nufvgo<6N>WWsXq{BqA@`bm`oJToqCQe>yIa%{6O>!3@INbC785U`owX< z%2f)u+8arbjZBfTzY3z{Rki@nCLc6J6iCA9OT=l#Ssy6n?D9t$<~S859Y!a{4Igzw zr=VHPs7sx}i)YTpd08e-E47W_xH*oWzd17F8%m;3pxyn zm&dsG`tDm+f{%=jsDs(9v?z}1MESONcAEy6s0N>`f@h~G$KjJFM9gFnf`eV>rZYDp z78ckC;ko|US~AXux8 z%j!5_Pn(McP*i|}iF?ulUeUPAr|^2#l&;HD0CkTSsPLypX!o6je1oYJr`PuQG5ot<6uQQI>!bDj*I zotxM9T+AshcO%U#Ec%Xh?7ZU7&9T!}PfpobP#&BBp|&gJB#s9Hf^D#U;?uq%~VJWwx+`m5_6T5cDS-`PKn?oj5&n0XoQ_d@D%uEwlTH4W#}h&263#Cv@`KIDHKZAlT{M6D7=<&`e1O5 zC%n{m&bKfDEWYiDS2aTMDj!!5&eR53HR^#%gd96C)}`F2?XRupl?qp`UNsyYU43!< zzK3J)p~LaH&woBHT(}h6H$I|rj_(-uz}4Y{`(x+mK-{=?E#{|hTjs2Yj^WP?#!!wG zP8SeudX+3Yj%BOjPs^fJPOssvadyKqiO+xyr^r-=Bf>6<{uYHRj@_CQ9nQ~9$Mo%~ zn3a+}EBaiRpA(JFdEo#P_9#KK3$tz`EV%0F>5So_;TRP^<=Ay5TBr;GTWq zQ0d??q_wT9K+cCt&pL%Ilq^MVycn@*kY?h-Df8K~aHbHz`<0d0cTkSojc`W)II z?<`Q$n-`=out}#4q*zZDg7O#oP+6sS(D_&p&tf$+6a1_SZ*Jqt2l2V7sp#+S6OI^k z&5pyOXq*)2xFU~$jideX-#Iy{yy?uR9E5jtDZYqHojvI|s;{p0UKO3Y;KTzuSS&zY zvT|5FP<4i`fPR8bL>h`D*nt}zeN$~yI1ui-dZd@B%;;7iqrX3|Dxh3GhD|(^V}9W- za!~r{9fti4mblk0FMB!?sugzFP0PKbZpXFdonmMRPJ zr(gd@+`4(oCk~*u52^m1eBx2xnKv_gJFZ;15_@({3byrl>y4A~{3~xor{EiGYl>dZ zuu~Ik6RzC9P`SF)PP_|J@9re4bHA}6S%4lTdf+sc!6Xe9B}2SIYsQ332&ntb160!yzR7|KL54SSQH9OZzQ+goXf z$Q0?2nZLI_>)++Ozlm7*mw7va(y52>&sH9dmWSag^CceN3S9RuUil$N@x)|Y>N6eu z%{v3=Hmai{{S2b-p9F_&rcf9L;{BADGIWyqDYCJF^c;7aApm(Q&U^gQkdFJRyc=Eu z7T?7)M>xqlKO2E8Li`j{`exX=aXRmfY|iSuvW%E~WZ$SXpsX<}I4r2Y!Ip-+ouOXk zUU)flQC~LXS<o5%g8w_YG$X^Cv5=%)ZDWkkV$VQk}cn=DdE_~y!@J5SG`BEla_js?t zs}8ur7qsNIz<5OIC$j~`mdW=_A9(9#d%?;eGQ~^zUDmCh+5QTg;pBl@Oj)+VgD?_W zTXbFc@FUq)Vh4U^{!TkmqN)4J7W9G4U6FjD@4PN~apKu0eYF{bZ~yLJ|7-b0>=_)3 zhu{B5G z6?TkDkC0B@E?5L3C)82ZobJSlKSw9& zdT#|b^($8B%gT*DC^Sj`GbirVKVULdau)p(+3LYd=^#o;a61_r_tB)oQf6^d>p|3y zG(N$i{+ZxI2lGZ3#qr(1ijNNCz8<=`U;rcZQN43t0Xb2xqJF{+=@TsIUs+SxyZYn6 z(Zg~2-ShG8x%1wf@bCi}gRKY1K)kVS>+|Fo;Tta zVV77;t#9dl9*}2Ytm61ye3gd@QSKRTru^VT-*h{;W7!i3DlAXhC;iyf)mha$$Mtu6 zOsb=^KVJILt8w|-TpWM!kr5+r#Q}p-siPbBd^jvgq`ewvqTpI(P(3u%~K-aIWvD;24f!9da zG(LnE_qQB})~P3`q<%{4G~rG~rOEhYwxZpY{4;e8Oy)0#h8TS6yQnkARXp;u48~o7 z9fO^1HVWgUNS{db1^DK@e3mxT=N&l)DQ%fTF1UUYqT*pwy4BMuZ2kxaWpD6^FYAd> zyqyVFaH)Q9hbFNF2I}#Oul#j>Wtn)#CN7moP>Jxcvk-=bB@p5OWSX$Hv4u&(b$qfeYl=Q7EJx(v&cREqBcp0Yi?C&@b<6ff(x zp7@_{>e0*al?rM{JZ=tF)l;H$gKYzs$89i~XMG)+v3 zW?1jcrCj-_8!wRI+kj<)PRrpN;BUy=(FqRB&X@KmSoO|v;EbQaOPZ9w!-{YDSa|s} zuhiMH8J*%|>x(n-sgHahCU>;NJ15V_AAaul;*s}1sJ``beC3b7;7oRm49EUM2Yd=E zn?Zi~qaVhNYgc0D&Yd0y^~N&BTHvW2j7Nx1N+vL#v$o27j{s*(k~thU7)fTZsRG@I zjXqu=sIW|AdION+GUmdk$W8UM zraU*+mnD~^8R;_0GGo(KqO;KHIxF)md;z25|*=NoSm>E(OS&pSSl>V&t z!gxE^z9uu6VP8{LI* z`*m;F6s*!2&^Z~Wm$&Ft=_ns~0{xi|eRItOxZ-+b%LC?+g$-nl=dws!bc+5`>iU>G zT}p5$J+Ag@YHq&T@_}c@KE2W0JV}{pi11&OX`@n^2!8?$rB#{vU6o5Xm7Z?`@eT4p zoM38D0PzP&LI60mm2xX!NmofK%$Bl=F~A^Bh7~M%!~w;VQ31VE9i%ET2HM>L5Mn)H z3NbuCD9PrD-=}*o`mn~?TaU#cqY301}J#2E0H_n7=X@W zIjL$9wqD`OnO55I#0Pnn#`qzP4B`ePicsDVdrQ<-ab56nxgS-W7UaJx7+>*I+B zRK8Ajh*2?u6TDN?z{R93ok@0+WupH;oqGvl^^`Fa)@2! zd7^;oS+-T*ZDLm4-BPGkM)29H&LI=o990g%=c-N#2{C_;*5wEjgchBQmD*~sgOAQK zFtB=y)nH7#luQUL15+AOY>?a<5SREe*@|Q$o)Ex^*ghMrS@PMI% z$3U}>TobNY8BPO0VTCTayO(1sN2Z#mkrDU6t+CS6+`FzVLFq`r|j_$Jp0j zm)jfh+G}seTW`J-Z@hjo-g^6;IQiDeID76woV#!-u3Whm*Kgbm?lNByL#tG3u^{Hh z1ThN*IKG()-+V54(qo=u{#BQYiz{+p7Os|Kjt(JkF%d1?G5N`)ZiR_z#b0C+ROu}; zNv-F+{F&@!GTW2XOb{!Okh&kgCAnp4yQC2_#lkk6o#-+p~%YNIsS$D zl{_Ut!bbF3Tb9zmMcp-q=l|O{&d-6MiR!=Jh z;slhyBJrmT#-y_6J9psp8M{PqsQ-$FMsh%@wRwIM`v5~_4miU$L>`I>(SrU z8slTbUS-N6G^8gq#y}9eS>Va=ag;0+e2#qQ9R&US|XD+Q39%Y8k$+8DXZaqhj{?c(*Es)J%j z+N;Jb3l2EU#Dj^VBTmVYM{rOUcVDs#i``Sl4j+lhLxkX0zo>C@?$dm=Jnfg>hwv)pNY|-A<^pEk zGU|$C!-6VBFzJu_@0ASQF*4>laYsc8eN{9B-+^brN6{zEAD~CUOB-rsC9Ysm;e==g zN$JS4kO*AlK;ok043eDzZ1ft-cC5+@-N7T=DH4xGSLzjOG|{=BeR7X2eq&?fN_*1_6U3m(&unXaJ!(RWkkwd zxK%p5ladoO<_jZdWDB+7Ct;;8(U~(LV?1F`{NkZJY#azfZ>2D1LZy|v@~6|cHRuVw z>7mSWApm~q4it?hCk(NfsCV$;LR$4a2&ZR*srP9NAMb^6z{F{q?GbTz)NTGaV)a@k zRrgOrHi?1s0M49@K*Pcm1F%j@zba}Sw8D}Fo!o=-c>jFm5m8<$9xhax}iN40iC%k3R3P zMngRS9~eEQhoyTFKmmc=Fc!h;i(>R%)|Xm#kqTeL%XU&X3TH3-@n5v_bgY}4h$*R| z(Y&|*8coU!;wB!b53WozKhn4HDtsl6(W~=z=)zy_df}9ugjGH$a5B)i(<$&>hoGT? zOS&+8*aQU!er&eh3WI~f7`BoKXor45Ry6XUpJp=EEW1pdND_9N43fq?w=iUkpPnWE zK~5O>h6${~C*h?0ZuEB;`w^dZij^!(AJv4)B>85ehx_u<9~jV_l`gTN_j~StASQP2 z_5}PN{Pu6fl=?IKhsS~|#oGHi;@jW*o^*~DUkUc`Lk|m#CiOd{->bjF_|Y40ycy>& zosXJe8;~qwx6ZDK!Pq6en4+rQhAFP}*iKG_la7l)7h?Rwx}pqYFyxN$1U|ZiU_eJo3INC)9~8E&uhPHb zl)|Jr=@Rs(R9417$csr>!Z2}-t_H5$Zx((TTX3JVnpf zD!HGAZb%=EzB;=sxzm=#-JFu<6WmtSA2U5Ff2E3E33}RrnU$L?pq6Y!??ff$PGu%- z=t8sU$Kz=#*Xp|Z1fn0{WKh4aXCU5s>tvj}a4{ZwI;(lE{9PXg;@O6x5o}v}4dLbQzEz1c8H3{l zw%kgC`xZ^{gf1Aj;=4@urcI+U`flwkkbOp06)QyZBp&WJ{a)@XvhoLoqh65$~Kl7hnF;m*cTV zABxlOoQ&6g{G!twmF{xieaGW{58fwUx*pHJ@O<1>AAVAL(w;qgJr;({K!;%r%oDW0 zCzz0h@Y>{nzyd>t--)k6KjMAh7Tycr1vBOc=sL(4mxFN!rLTO@eVAJz3}b`HA?C_h zT!0&Rp@`JUh9_NXb}R2;r#vh+<4PRH^-!LuDHaA8X2A|lncHGsjMI#kD>a|Y+1cI^ zoze?w(@d^2R@kmWpqFr}CsXGF(K^zsbcTypuE({TH>0h+Tjgnq{{BHPOnd6dXS_N0 z`t_T!raF23*@!HLJ@r}2lNxIOw;BM2iwX54>$OvKJ%AyK^l9g-7 zTFG30ukjSV8Q(&WVE>LtcK3K}uDz2DOA5QXES!+G z^ap6NpmA_$l2zGTTNsC0i-non0*T%A=$~Ai%_0xU#SPUBNvs$8x7niTRY>+`ZPj7o zS$YX>1swxp{;F1#7wS6UfC2v;pOKfop(w`b%`-%2zCJ<#dZVP=$sfHj7cBydU=&_V zm!zSztjT5;Pl@VS8lbHjaaCt4%DvLz0bl9X|2Ln>KI-KAe=E? z@AQ=qe1y|bIH`&`Z1gg8a^RpWEHGuBmFp$C+VeDU>eq8BvZSNJx-Qvp>M>V}tDIdQ z{He^;4}Rzs=!|SWR-3%q^3+o!pI+N+p4{M=Xg4s`g$hOzxr+DwMug3~6(MJo zup6mnY)LdG3|Ww|b(}<6{IWw%ObkS$G=wgAC=;zuJoZfV^bfgVaBK<#c!o^{$b4B< zL@dgxbo9EVWTc``xC~o3%TOr<$3F)c6h?VikN{H)tBRQDUXDwbF2}9w*W>8jN28PD zRHV3qY#18@^f#_vi;M5RD|eJqbq*Qe7VJ0F;kl`f4@b02OziSqEnOm{B_?ibVn}Ut zxtMframS7cDXNt8A$mZjLyCVAJ%i(F!fFJBRh1~b3v8nFM3&<5o-hiN!nqKwcWa`nV4aC zwT(>D4IlxNlyJe}V_=GvWHdAcAoudYhy{rnwHSgrj%#A#wYhmuM)mg&N_b7iyC+Yp z9htIzl-}vS?e5c*_zW;oD_xbO~Yp=f%Z@m3hT)KKSuHCvB zbBl|fp!Ke4${?oGB%#;At~hqiv1(5ES0j>tlP73rRo@cX z+`VvzvZ+2)7d{Dr`c&n47e7x4PlP556PAVlWz|PjN(d)2R4VM=U3NRDidJZcEJk4v z2wuYkGYdv4w5ghChlK|UQ(4Ya404fz!YCYzMV6}i7B46luq;%VQ06JCu!P~s;s?Ua z&*!NINzhFAd7UaY#cRfj=3fREM^DWZrAGKyw*jvr_8ohO5h zC`gpQtDRlhk||09#R*x*-Q*lW#zHA34%yknFbTb!QR+8F&>Ba?~7gH_v2$b zV_3>Jr|E!AD45O*aI$i^vn!7YNA7urYIB!h0PV#}Y7w(#xbzVZAPasE zcytexXSX$#H5W*z{7vd6tIn3BPoXC^&c=t`Dx>A(bwA}T}Pca(L!vp zs7kzMVbSf9D^lRkEN0;p3u1D#Ka=$65$G>m$uugRZcX$uH(N2^?-E_Ju##2t@JNmZ zgmW+m2~QQH_H&I1Xy8`LqDH}hzNTE%j_AG_r)jw z>d(dej5?mJ?QvWB$Xjo|72o{!x76m^eJTeMX6KIa_`nCBj*hO@IQiC_ar@@&xa;mC z0%aw>{iolG^H-;(&2j>OI!>Y?R^oGS8r>j~ZFZnkHZLL*T%uCd4=eaZzpB@s9`Og2 zP1T?n=(-ksf>)7of%f(1GU8%-7O$kC@olVbneBs#83sV|F8(4;GZMei6ybzVzn7U| z^jSLF6!~Tz;Gb{33#QU7?RG;jTr+m@t9)>WTJ54FoEN4c)7;S{gIl9jgze=mpJ_z; zkj$1*9Et`CG4|UY#bF9>neHW2kgwhJ7eUHG!@b4f>{pU6T$`jhn0 z>61ZSOvF?DoR|h)8DD`1u)x4Dv*SJvy-b3y<*eQOIRBy?MsXEO^Y9lfzEYLGv=>0oM~7xqmvUQIABv4Buf6y}tW_4J=XOi$l4j2Yx%5YxLh>+pgQ^o9 z13b!udUf3w@1D4_zub$a>z!eG$E_SAWgHFJ@8ozV;T)h7JTk*idkH+ppcgLCY7vl9dUXNDr*4-Dw z6BE(V*C)MsO}aN@5KLIKs$b5jb8~L9)B$*4Y^0a*Nj5SGUfP|CWc;8H(m$pjPTO<8 zTlmtOX%3pF4bksr?8ZC`gg61}LO7$YiI?b87^O*F%38kgj;Zr95P>PbsT1P)k$KpZ zIt_JAI@FgwO&~mh0}GBZF(b@(xYgYqR02ZSO|e&mNSh=s)% z2YMPm1>HJ6|5$k@YBEh0!)5$3h0>JY8P10E8p1i9MtE(MF~unr341Gby*hTegI*@f zg3U6PWzWed8XI5Kdm{|Ms|?5^gBRtI;pu$x19z%#dwlv{TX9IM48Qf8hId=)fxL|D zf(g_E7C`(I^B(lnBc?}DwKjICf0qR;9l)LoQ+$^uzoDa&PX0Wz6(n)~Q%&8g%&&YR zf8<%>q)4OG#kOp^D^#H?s6Cl}Kw|pbY0BE z<|EI>?y;6QckY@u_dN8_@i_hVn{oc!X~~DS7#i%42OfIB*QTOFyzs*FUU;{Aa?%R` zNSyJOPRZ^L$w|gV;CbjdUF`$rt!yOik$%uAH)K#7eR#e5swDAT#s@h~lN0@T`YLIK z>y#cw+C9v9u~<*C1T=8cU#t8XFZ5WVbP>kzm=mOhfCuCdmhg6^jSDtdH1dfJNgI+! z7}_#exr<@3E6yTTuLuhr8y-|W8|KHNDe(<5qWAFIeNpPA&tr<99vfPcRI8h;ZIWtNQiWjOl#KP{*b<^hoCTR7cDO zGIz-$090eHhg}n#q(|LT24qLlDC6vEA<}pAJ9Q2W^F!#?jA=241a++GolVQ|RN5AL zNp3P!I*N}zx>RHSh(!g;NABqIlmoGeWvo~M^?`20_&Br=U9}kxp7UE`vm5iynGYVG zqY@l>%`~TxQoo9zM5(jHX@1iVd}_n$+SQgPPmFp200e*{$I2D}v)}~|1C{_WSn59t zrvxy@x%_>%wGgtgt_H4mcPuO%n}mdn&tN@;@`3^t()SoB0hkpEaUO}T0^zoi1uJbT zFFw1I?28-SdGd@ zcu|q6&t&S{y&)HsZA0%gCNCBtV_KqKJV3u8W~7W*SzKKdvlcFE^GmTbyAYdt-!ZOE zf;!+Q-#jTg=!=7Q9ggY6#rVT7ep&5XOj3Q{!I7aj@#u-zv2&+8zuYkmOu!3@P!I%& z@?c^l>6wlJIAdiK#SoJQ$Y=}>4;lxAV|6x*5~^Zo zw{P5xcTT+%fBYw3iO>Ao&&2=wzkeov`?J3tU;pwS#-Dxv&*J5mUXF7&uXu3)IPaCP z=5BWGD!=F65I@?3sAv{U_r6CmxIYA9y70yZ_-h{=Ns}*gf~fk-Lw@ z(WCeHw1WeO4#k0khhp!(eWIty=2r85uCYn50rf-iM!XFBn@Wd18DP9u;{%RYn zTE`FC%bnL5pVnPYUNJ#06nauzqS54$JNOZ#SvTIXuXnd-psCI78$TpJ?mQaZ@~ceMeb`uz^_yu4;Bf(LXhz$1SU@u zK9J2my#T_422_9U6sua+MTB&mi0g#~!Y66+o`Zw>7Ej`CZ(#ADhWI?n5!LTb4GHNZ z61HEc#OW z7H-fXP-r-M%TJ}nQC{vis1w2B0F?hu`?cuF;9w3q^@~sS_O->RD@|Cc(K$`%-(Rowf;1%CU0!M$EqbPBg2d&@4E_9%LkQK{&XDx8NeY za_pQb0s4^+1#KuR=cgaHVF#dYglA5`@bUhlZ44b-@C1+ZfG>2ce=4;JW+nc!EJb$e z{Mop6<*Liw(Ut?cHF_)~vHWowTq=|O_pG(djyb93rowa3e^ycjpH zT#AYDVbyPY?Af>5SL(d{<{L35{OugyC8fC<9V#E!E9~C2%XN&bX401d-X`l5c})?z z1@N-a5M8QWGy}bDHl-3SWVj8=QDyFQ9n-01(7COH4w!TgwRsO>h@Yb5v!Wc-A=A3L zaxn=s1g+#TxtqbYLqj9OZnrELV%NV?ih;pg6u@Ex^q;}OLEmu;kDH#GR~r&t`$#W# z#7P#2cXFK&bj0E(lock{p#tmB>fC!`K;>AM9)up|1%L2!N8hDgBSEKbh~z6`NCql*QG<9K7Bd*qiOM#*FwY?U`d@fAIYucxN61>_ejSCehO2g9qZHAN`;Q7hZn( zhX~X-cK1<*pN~KI{XdMw1$He+XA;Bcq3%NZqHxmUGBtUHJ|_ZDcjhaq^JaD6U1qgF zI$C{#4gPB#4Ej}C9x#K}sReXdFsbxxlnyxMj|KssvNNCZb6)@kCX=st!2L-VS89cq z(x!1Io3>Ye%Vu~0(Tmc|z6N>7Iw0?vzr*O;LDFH%7-k?KYU!JNwn!)=)nO-HWP*KU zh+n+AyyZvTQKs=7vmJ`Yv}~MT`GPURWeN`$zVu7^McxgZW%z%U~(bHfX(@WDBpfFh(1q{5TZ3tmg zdS^Y#qQCi78QZT6fK^#|2PlPyx4@$?b-eH_9)_O`^mM$!)Ik!TB5V=Vzn0lfvY=D> zvdbsm<>r72M_e1Vy~53w`XKYdW_#itKf)2Oj7uFA@Rv@G$1~G$6yYTCli!RDP=0m} zW%+0ez=Vv-Z-gVR{M+y(il0e;^ZD@zaBNelOzKLSGn_RheWhLyPh=;$$s=c7Tfpr4@GC+aI^^4@4fJ%^^_y~568zp_OW>N z4jKZ zsm3r9*{!Nm(U5maVqE$3-}FL0m{eL^Z&}B3V!@I@0dgYgUHZFs*a<&G1&8#byg-Ef zi*%FkC>@2NU*T~==`YZe`ws9y$|txsILQrqV$gk=@ii9YFIk5yc_7D+Gj`xBE67Il z*39lSCSBI5%8fy8;h`Cw63u^EdD2%^zl-rZJw1UcUd=+0SZ|3Q>A{yTT#X;S@KWqP za3uB~x=VWHOuY2UkHpeM*XkSXnA{uf14CY1&6QK=%=85~HDX!)c}}4~hXqE;pPP^9 zE2FzhheWrQzXBMy^rhjQ9xo9}=qn)Jd>y0u|Hhxfu}LJ~q4BIT@KvDIwC+A5)u#7M zKl5;)Y$W;a-!d`=KWxf$bks7REklhLc^7#m0A*a2!FkxjcrD|T7G|6Lz|GG5lyGA7 zUGSU~QVvu%Wm@ilDm}cxT@t4H{lM=yIUZh@-4cCPnq&P z4RDpOm&cFwU*1#g(&+_!z}KzEP=9Xc#7Jvcc!%C(8huF;e?%iNIZbHB&(dxg z<2w;-Q-4*q8Hd!%IPjM2Guf$*i!xFX4zP>&Eqy$#cYv4S{xYUa+E3z9M||9AF?zZH zTi?(feA+c+`ZI9G5jKfHaLFraGM^>x%al@G(wF!V!65C#aib|5igYqv-UR^>fILuEux1@%1=xXm^}DeI~A7y(nFwDFz1n;=zX=^2Q4$ zn}77;3%+V|-`>43xqH$)ke&Z5z~}yQ^oo_`s>kQMI$8Wixsf9&C-rIhWx}BcbxLOTc6Ud&$7tC7&msZj5a~$|Qr&XOCW{HU%7t+lc!tNmrH?QNh;GHg zL{Ez9oAClam#rh+VOjh5a4=?}dg=l98AOh5~679t>@8He@)0LEtI z=EZrk)V{9+Fx@o8TCHjs#R9x}>8V`xSY2o1+j1;a*)Y2ioo(IHp*ZzPe2+1E>78Cc zNPOvK$Ru#cqPSM!eSJk*^YVPGtSu@G^OMWUL$V2(OO5g)SfO6@H@ANQhWv#$P$z;? zR`sD@-YsVc1APe3)D=YIrlj)mFs&4FlT#qjKba5A>%*mws@yEjV_Y8cXC5LW7L7;m zsSnGaF>uv4IMTmi_TmslSyA14{6S!RZ3bGtqz<2cs<5HsPX8g`>ai4XsRs&~FA-)jy|}qW`Q^*Mbd<=>1AT zwAmZgMXtbL-a-1a(&Wlbt`Oj=q?d$^+fqa9CQ{y0BKw4;ZrcfD3y}vN~u5?N{>eD)jzVn*H%wgrsI+ijUe4X zEG|7={qfY(AB=8w7AQO?0N4*ga0tZ}gGqs)_)bTLZ(!tAX>M0QWQuSK<-hz$Vr9}h z(+_nzY92_QzJ1#}r6$J5V_Y50>fF2;-KLl`Ol(tlox6F<$8odcl+M+rh|5Q?FIA(f zR~@mQ-q^Q$p9fTzmKJ=hGYyQD*3<*#rjx~StYw{o36yg>bPNQ02bCv5L_ku6*=0%j zFsvB&gjRR22dY_-#SXf1qTa_Y%X$$7v?lJsy=F|{t*J4A2UZ`M=G0-Je|oue~7wHy8(w9Emd*uf!jI^}Awhta@*cq0!Me@#vG$JJ9bk zAlN7?^liurp^jpa^u`2hmP0^zFxrz%@`LG!wDnZk1Rp#8NsEc4MRiK9-=6ZW>1z@S z-}v^owh!;>M#7Y_}QQN`FQ5p55{vJ{zyC^p?&vVcgN(u{V_JaGX~Wu>Qx7q z74+ijYCG&QfEHlxbhADZS6hLuFh_!XpD(|AA`<>usio0oy@LV(L#s9 zaLPl^pgU>3{R7dhCwJXITljGop6|jD{dnSqG(>NC3IXXc$(--4s!tXZw6v>DgKxo$ zAOU7Ia={EE7h@Hj(Ba1)k>}2LQ8w;FCN9SoGw}pXz>NKrr5+e?VBr!ID1`M+N&J~K zkiXwqyimMDk6v(~d{7#oSA(v^kBCPvalQ5|+E3Oi%%8lLlwBbr^O@5*c30MS9ULnFR4&(*!QV)_J z>ZAjw%P+W4LYZJ<;H$lzBNqFk$9GJ#yP0b);33qx+OBAVmHHf${xJxE|6tC60Yw|8|-wCdK;9bxkbj&w*LYwdP$iI{0p+(E) zZ(Y&Tt#8q0y9b+cXSo$gDMziXqD|$^01`ZvtMPB20j2zlg?KJ?BpiJ}2yB|J`b z3GETiC!L7S;Tx12q3%IR(=e1K9;R}Tmg`mu-Wq&J32{;dJUkc2L6bS)$Dj)S`n<=R|$TkSXiOUD`zCTRygvV@C;tTCrDc~33mPTCK=m@>jM)wyAH`;y{#5tq`#uW7U0b;AyQHPS+V;o=5xn~Ocb7bjh@JIeRf zfme5q++3tekX$Rdqhl$7oV?meB8;REMmxo?oD|wH~j&_Ili!nOFIC2oY_u zYxk}gpV*}i$SrY{_4w<*@;72|bXrA39U$ zG7R)sUbJ+30U)%DuE8q)4vuaUKk*_D+N$EJ&`LSmf%_ZE3%=?>A$ddAHcB@?^n{`K zLm}bsddmIxqO{wy-4UBr~S_s;)-(zn#()5lU2Mnx#q4t+ML9h|jY2@Q##{b)U(skUk=n4^A zHo|2?+=eu5fRnaCawJ6@r`Z^;Bvu09j>V1CNm#GNc(Eie7K2F-WDEy2Z+=}<&+y3SWdK$OH&0d>>$fL3@eOK?Rl97`7@sW>z zJo+cbpRaYJX~x*yWc)AZ22 zR1bUrHeEXDO?T82Nxi8*#{NDJ^H@_RtzxG0mk4{AYK)>^${KKQev3u`6pF-3(&=+$v z(>~3KlOxwFwfNWn@?XS-i#OuHNMH0YF(A0TU_&&8PVFM|fu5zJuQGVwKB)d>pKzl( z6YpqNy%Wxh98?Cb6kB7QY+*iL{n7KBSsTNB>U%1IJV8qOmcm=EDb20L9<&3b0e(eW zm3#KCFu)@?6plVK_3Zwa`flYy3fH*$T%_Vf<#KSnDUWQ2O0xy}$%P*{d1175Wv+^= zN=HQpta{*?aXr30#w=Jk%AdumjQja&Sm}%$Kg|O3jTQA3SJ_R+vB8Y3p(!%mfR3s9 zN5@uQbfvc7dPT1_i3{%Y=P$<#&r3fX8;POu$!P28h*w^DITq=+iY|BVI~YA9!!bRp zJcI*ud5>2pKdwe~0A4_$zdw#vq4q-Gv#XaY!#K`Y+OhgqOh!>w>$-B=Y)Snw2lA6{ z=~McI!;M^QuO5xt9JJ^PPy!Ye!Y652MwVPOZa>`FhoC;!|s_>m^hbir)tLEDY8+-6dS5uOhKE(?{W^v4fbDTouk!Y~ntstImz z<~QFEearro6U5vN0W5tQuEO}cc*i7+ zGL4R#9$w$BryQTb@T8`AmiVvZt`1W^J+_|>vhXV`<(4yxK72wz8NZ14o~P>D+y=0V zBrs*D_tHU{1PA!x;h9OjkEU7B7F`0=KACi(-s6`BL*EqZbd0GoW; z7?;`*k6m7jpZVAarQfu~h4a_r#TTBB9pl4s^2e{m?Cfoi9dJd;J;(2fBZm*hie%Dj zum3o1T)z?f_wSAIv9VZ@9N-j{94A8-v$>!q9f5v1^D$sO7H|t_e9f~WSFTBEg;Rc(MPwUIlO)87RGYj9?a66~G zHlssfSWH-5T8ax-Z^e!2c~Abc*@qJ-d%Jt%;fEdy7Ej)oy6vl$PhYs?4TfVoCu3w{ zXI#5|Gftg3AM?wMu}LNkaCJaWj;})_RJRORWgKpuF-+k`h)Gx(*PtxI1Juqu1ryTJ z0IcgF|IypH?r3asr>|ueVq#%_KBgCDy>ZGLiIqQfihjW+LZ3*hbdZb4NUjRlT$Aon zofRBarGflMml9sZx6t!^CAaWz{Y94g%_p=B*GxYXa}h?=;BP`eLk_RBaOE8oSm&{x zp?VYSe6Mn1D7u2ea1t@|+CCu@nM@l|#PU>LpVlhbSAx$F}TXIV3c2T3nx>lOrj&^kSHJNxJ(l;t+F(e#NZ*6-VzeBEP1@cWp-SbL4zE^HXTA%}woIX~e`=d%G0e zZYFG5)zcR3>XmkNcY5bGmfhJL{oc+5*}y<__vbFPq2W;9goW!uC5F82I-PJ3hHW(v2`=he5s0J@Mt&CBh&_H=a+-SqIX>O>C{tOk2!09E;o%q`pNO$BcK!E@>5q6)@!gXr!EeRM(FrIz(+4v8C{Ws$O{+s_e{)d11KgMtVqyK08 zSO3lbHGbt+{#KlL{E^r%;XJ%!G}`;SqHkm9)T*&4x?vI+Mgr@l z6V4NcRuz9(Sj@ZID^;~|Rx_!6M@xsw!!f{;FX9#KDrXly4GqQ)GxFW~qUk(>SiDA1 z`v^C+Cr=Eko!C#dz*;sLq^~T@(cg4fX~0w{!V7xeRQ|!ef*rD~zT?&%8aOs5Ldppd zo_-fii?C&j^pqA8(Z1`N2~{!A@=4ehb^6LD3lF%nr;Q0;(K)NEIKHQ?gPo&#%H6h$ zu(F-BAL1&1#bxs+Zcb+M4+Dig+-#rgz+G5OUcDJ8dV>09qNdE!1Jt0(U_XY6+E263d| z+?tMC(=$qET68uS3)KZ*Suro=Xcb=C+@<qA5S6>czUoy?k#OBCXfO{{mFLp z4@T#}P_*^**|vB0M6387ld0^aX0buD>QW3Nn!8npOz7fA{plTzl8H}gqpmx4l(Z^c z;OB{{uowcq z%F&t(5W>SPD^a45i)wG4pk`qKi>%<2;+s~w#FHdZ{EkdnURe?!hUZCMIWF=f`4NLN zoa9sTCtyoODwQ*_4&|MLlKJI5yC>YB86@LzN4{x<+60CO_gt0X^JlhZ#<|F2LIqE@JIM6 zt3EklM>VBzOzi3%o+8B)Iz%^dpGIJ$@~*gwC2VKC3(uCl=p&|se7bWaH~cx$Ov;L0 zg(&DuPs?o4mvtq=NUrKb@%8BeS#`|xxhmS=Zhr71ESLsa*x}u6f~Q$!n44ONt5>f_ z^be?xS}ZpxqgNP%J^h_AxqD3Tp%^c_?XcjhDqc4` zv!HrK4-lV69wCb;Q&C?i*frHbO>&MaL(o|`xq%~OIRRu={Ap43#1W!PYQtRl!8I(j zfhzip>3dN;m<3dlrz#^8<{USc&zjo+gM&=w_x6rTuNsf8-ZAlxLDiG^Mt5I~PV9<- z(H*J-7G8>fC~Rks^fvg5_&@oB6BaHZlZ0(`0Hl8ipK7|2S=?XDAh>jPs#E2IoX|sP z>r{&urlDV>TPrPii5F*yhFZj9+ob!D7T2f1DL4gT75U+7co^_sQ8>|(>X*2vK1$m9 z0P^gX+Wr-FSmDRyJv2NJyY^0+=P{YGckf<(?}Nky3n#n?e~UFA;o7P6;Bzd@Xlw0` z;o%)>XWg+hEBr0gyn6ugP2DWg#8iHjLhTMXL=SQkcaXdIwVs@o5}UFBBia&YG2ASq zwGLY4WSAw>f>)jw^q|18#!>J7M!!?x^yK(Wa!9*ntoxHL;bnP(d&ep4rg)m)RkojK z{~|-3`B}hK_DKq(Dk#FFMUTsgZ}M+6TVc|cri4|=&~{N^WwTwAdf_h~{gbTA?{G!@ z!Zg08*~%wP!s&r6ztfk>_cRG7yf-a6e(L$ucWKBc={?i;z_`Ou4#8-H-<{=7<9>tU zWZIOeD6``i?;f-gY|xZ#3Fmv$dygrDG~g^Pd64#BY=kBK7WvWWaLdP5I)zm+WJ@qbgSQ2KOvg&Yv|B!1cfv35`@68@DZAmFcSZiT z`cy{P`g;~?yHR?j+)M9-XS;2vCjvQOx^1=3Et9zy@FD08CvX?<{^p6vbiMn|>De*; z3!8PAaqE1=yiWdgo=N+oZvc#>CBw6PFHHlJz=sJZ>nZF`l01gDmjmKi_Zg0XV+Ncr zUbtl0h1s=pr+!sGo6B*1>Y8L%HFocxh(76`^RrX&#;MojFRf2HQcK%Lba!{gp~HLQ z{`>E>JLO;k8ksXacUy8+x|8%>cJZ~`0IhyzKtC)(wLUzaqTrrNeu0Em1n^b1TbEM_Qiu5Mag{l_)2)Vo_eU&EC zCfyc&nfAKO;&53=0@+)~o8W=;R@cyTazP|J-{^C!uA(2!#nR%e#|n|x^mjsicc~Q8 zjW_{Gx)cio(2-?C;nF!))Hhx5qI6Dxqt81pnLVR^C08)5Foq_WDLZ&zu#;`-(p%dRLZRh8s7hO(R2n^MKZq#35 zuzYoSL3Js;S_yfhjP%#F^=09uVqU+FC_cc8|=2o-+y6SKpH|fdr6FGgZy2SCu z!uRY#EY3^6tLDx)HuG>g1mSE|Kb*Kpe~JaM=*Knn2baOcx^PPR>r2XiLH&h!(a4PS z%vsT&-t}DNeS?MPb8b6}%15nMeQ5d3Ms4&h3%5iQ&j)<6qBJ(IoZExMRE8 z7>h}J>Gz|gDxExSXx((jLWw$qG0n(s<5d|pYtc2Cbyv8h9)?wh(3k67p-TQnUd~VM zZtQ81`?e0RbnE;R{vaMIQ`cXyN-y8_EZ=#S?@o{94QX=+xM&M7m4BnzmR3YE* zZ2}l=XjP&Y$1f~psmE#XEA`&emf`umBY#ZBPm9?=hp(rV;S&z(f;v(Cx{mW3bgSp6 zAKG1GeaVS7h*f#Yw0Nd&kak<08eXMYq`g)DS%-$Ph)1s2x?7nBbtc=){Da{nZb1Vw z7QcmoADaeT{&Xw#hq^6{!{kdnt6V5?hbeTJlIPdUKt6_3rg#c7`jxd_UEra}S3N9S z5=uSvFKY<sD@9K|BmQ;th6RTk8@ zmtCq@HlTB~HnUrKS-h90GLgJ#5&v&dyTtlL82L*e2|i$CbP8Tdek+RK5^SVo(WBUd z$2N2$!NnY)tzhnGVd23>KBapg2AV0qa3U_~ca{lyfJD}1fqx61Oza}d z7{BA%=++i=JK-L^%5A+VR;6=x4va-h$3O^G_CqIw9-t|VPj=Qfu=pF8fVrUG zauaasD{GdUd^5HUeK7%E#>q5{6#c8dR2S5lb$nz~;fLPCfD;Szpm=b`5&B$F&NwgE z0xc@v1?epd(oL8bTTO=*hiITc`)1-)z-Q`-?As8yv;shzEGK_>T^h~wUaJQiLEQp&9AP??pGI`xVG;=PL4m7_Q!GFA{H{IbFbtBM8D zA`UC%AX{ns$R((gZyKwaAp}e%iZCDmoRTK=O-TP39t2VHK*IAvc+#Unq@1at9MsW96+oxrOFLPL%cJm-?h@qL}Pnjj~bvFofy^9k{FX1-}v}WF~APtLuFPc&bu4MP}yb6 zE_v#f#_rvs0*O_+-VLY5z)EWm&Z;3WSn2^&H4Yj8Macml+MQgWji#2OancE-6GLYw z8DO_MQeCnzJ|?@Eyy2KFU|>Rqx)%sy4oXvhW)?V5uT2a#D@_Kqp)=7F-{8+ibmd*# z#1}0l-LYz{HMkpE1ynL=+4&8EH+umrd}1IN+hdd*{C_xaVm(D+{>nbmi0wR?#yE zKRi52t*hfSCx%mrtGBMX)A7Rho{xX_ul`kh>1%%!vz4V78&{rBelVW;z=z_Oe)(_3 zCqDVJ@u{EtEAin^ekzU~yC-`4`-N|HT2z)>H>TtIjazZ^)~#3&%4qj@X+LEIgGU1S+_o32`rI+BStt+HChe z{R4RAX4=3{?i?>fn`5^hyuj~-fi@J^wpG^jB*v}pEO(}FKA^Cff5uJTl4${3rk8YJ zH=d$RknQ*=Vw8VE)`UkU*mzpup@A006I99_{;!;sm-wjCV|NgiD^SSS%95

S{4JDmo#dwCC`-fCQSYW5+9jAc$U$H9=qiJ_J78h^5c@({0(oIX$u+8!Ag%UbP&Qp78mu@pcM(wBgT!51l-<^$Yc)p{?wg= zeRVh_L3h+dxC75sMiCzwb%9k!j|{f!R2td<4g%SRl>XhK4qK1W0?|v;v$Jh{enBI! zJ@S@sZ(sL@d)oVd=U3XbYfhM1CI=J&M`Q=7okSM5jUDVnyjRwD+V6bm6YXF8*H$*3=C(%Rpt!jt86MTDQ&XGEZgQLW z+CmOA`Dqk)I625QSl?)$f8we3#1o%uYb$H*{L+QCIDe|$b^k-{#`;!UrnCHuKmVcj z-~RW%+txNW+PMph?QLK8`t}1q_}%SQZ+Jr+I&&T*Da*(rc%c)w6gC&8AtUbW8kq0o$OT^q{O+Smxl1ZKo5yJJGvg6xgJV11Tggvh68K`7W&OU-;3@4Z&X-h2Ge$SqvFl8+fUY1rscIM(*juAn}pi>{d{GBf7;$^GPD{vTB)9f5LqR z1kk#=NgVl^XMK&6G^9;i2h7NbnfP9pxWftPBkW4o`%{>dDW^?AKw;-fJo6=OJbbsU z>)I-knq0oP0bxIj2=-DB)q$BbGQIU2n?t#DQL(!>H`g~)*B*ZGLHY>O&;Sa#D5VMp zj#V!4RNs`1mJ)=APWCz>tG<|-nQrIKU2GRGT+E`tFFf^ywzdOeq`yhMe&*7R_Vm+F zw#}`Ld>(9Wb}FZVP1A1}xMK`|ao5Imk-;qbVq#)8$E=Q7pJRgw4oXBWoc!6%M7zh| zW}yvrSv$=EWZ7vuJ-h1P^Z_a8@;nf zGXW1W08CzE*mBa?Iwip!k8^jQNBGY#Oty=sCfoOa=XbX6{f@uXw$_&0CqD7{RzP+8`xLw8xjjY0vZsb)m<%AkqKJ`vo6K1&dA)OK*cZ3gjm6;?FZQlF7adtD( zjvLoq$z!zNQ`1xEi3Q|mJ{wCcWAXtT`%7jMo$1xl93QT}oK>IW!WSI3#PA1Pa);i1 zG+pV~qEp^UYaV!%zrP8x?C9BjC)jHfhAo?IK+k6<&^?J!+4k9qbr())4@^=2oh%!o zUo$e~1lnjmr?{}>rv$KpHU_aN04UV76%v?V@Uec0m$GBqCqcO^W>-{^?8^|{MjZdL zcuyTDkC5X%bQH}R?ht&&sY}IMb$;wR<-oM)Y3pJZL6W~4eWu)reBpF^J$Bi|#fzGt%?yKQN%wHb87^fa`hZBmcy zVSj|yFiC0(NLh^jvP_^gw>X;J{^Juvv@hfBt&iT-zV)52Z{Pg&Z)o56t#51R?>*aw zz;Oh8N0BF((NN@`9q-5E(Sy)}@(oQ)wFBfqs*Y&SA7WQ}5$?CJwK*dE&`Uq{i}=Wk zVRv#;#)yct@8qQVztF;{VT%3+SAFkT16?J)u#<{Eo@J!~b<$J*`beV4VxIZw{A(dJ z9%fb<@^AeD*Dg)fx9S6cblOf&dL5gdX~WbnzvXXxU_nHEkM>n*i!1{#i^1Uk9({Fp zblMLWZ_DOajB;c1&C7-YQzFz=bo7$K;)nIcGh|D8)9T7*I^tpHI@TOJ_0mwa z*)@ix&Jr1^Bfn~~U4>Qt=@|(ZxlymU!EBN1!s*4>4@2;9uqk~@CggxR%CQ;sg{NxR zX9*d|O1T$p%O6=uZ7W&PSw?k+@ui33`N9-O=LW`*f5&p{KRA}Sy1v@(zxQIBpT)+* z)^sjF;4T=6tZ0DJwnkq8Z`?<$=Lp1`fFbH&33o3FX^*vcZ&j|fhmVVLPs+>srQxclNV*#tp6;Ya4B2b-k^vVcXd6 zb?lr=m>Ih_@8r?DV0r9BgzzfQt?m~37|!*T=oWW)cxmJx{hc59amsRMC-)?P7|Em) z$mFSLo2h`4l!#=YL298x9D@3|TrpOCw@f_q4`E6mi4X!M517JJ6m3>kb?Loy`s-9% z;)j^ldjN_r&s-8)B37ricLZ=*j^IQ{!ieWRD>Hqi!e-EgXD3U25N1t)cF^)}P~nqE zxFHlk!c50Q=)rS@MFo>eVPHivRp3$#%VJRIjNi%23Fds4(LcgNw<9q48J6cV#QHLd zSLJu?de zXFD;mOK3nfX4R!O0CprYfhnH33SJe-pOVXyuHO5LymRZ)9r^XRbo>i5#ff+@LmD`l z1vbbMB_5hi+6i#lSjV7PxVyb{Wx4&rPrj!;`^<}NJe{X+@Ro34WFRQ! zbIJ!{8f$(t&Ex;YUwoascc9u;Q7dcf+2v;0jq9Z19>z!Ij6-h1BOm$65#tz$PM8Fi zvcu&>qIt~COw)mO@_({TVl+=rPm@wy=?n>gl=baybr22aZcV~D(B?q?_F%XD=KFuE z&Cvm`-3r6$1Z3yW>C=HXJ3rGNy!YOA?*sR=JMX@`oj-TJ&0!F^l5!ox-Q&E|G0=z; zcliUHe6vj>f_yEI4NZ7&nMT}o;sBHHmI(vK-9L8Ld?uYnv0=5lk7vr?JNZ{e3ol_1 z@3A2|-g_xG+$~ivAHFA+M?YFO2=$5)lmQLnbn+F4;OEhJfl#n|IJt>SpGJ8pndzVP z?u~o3Sf%t^cJ8<`oqVQljJeB@gj~voi%Nv4;c6T^U4?_LU>lGD?(SKaV)fB<33DLQ zI$`IajBqjy-kH!)*2&k?0yZ`W?fUhb*>Nh59WW4Jl}q3-vy@MtW$;X+BAmpDC;#dj z^Rd0^{B!e%I%a#sV+}5q>?gFl_jEOFeEKp5w$Hb|XSZvg1{hwxr|rvo53}?^3}GI? zuuCU@xa3O!`;H7csR*vybiDj@gWo_6EaHjR?zX!;sgsk_(S`2z^t-e#UAfX8eBgeJ zBRZ1~+?R{^fOn(a>}NDx94+fx+8ZBU#MF|Q+GPhZ0(4w(>95Zll)jN1Uazh z0y*S`dmD0~$S~!bdK{1>0JqWs=*t-L)C9s~gB_u5?eJC;<9Sa~p_xWqi@Y1gt6{yK z$*u>{Q}#9by7<;>7x^5);YwA-#WoiY;#vpGJ|#UcjPJyh188ID9~Z&pcum_!Z69=x zyHhjRfj+6-4di2v)XDS}#DRBr;npA%ucV>2;w^3t7-5f8Dud$hz@B=c?0Z5XxpiCn zp%#xhknF-9KY21}yIUJ=ZLr#wuYA6pT3T#>??3x{?b4NJ+Y4WOs!h*Nq7$jZ@Fq4V z?W_X`fK470CySmu4*L&2{>SY-@A=902fzC}`3%)0W!yg4Z5tcgIW@$#Fq4FIX!i)8 znDKkw9+5o-{*s%v5|p*SEv$M}T7+XsC$FT4vB{gvl`q@CJqLlHhxBx!Rozy{imKBb zTq6`3IC&DAkM}+O2zEiE1NqU7Z(WRjpnHT3+FDT4c>wlPd#&~ zef$$&Xz%%{UurL2*=lFcpK5P?>s#8pzxQ43UElQ`?fioe(s@R%&Fhdmd>q=h{qfTR z5YQFi4wAy4$d84h%nqoyODlDVv`$!fij$`9^($A~+VWcajoY+ZH-L z?TWHQ8J(=Ry*r@26W5&`o-HWEm8Qx{Y-GZuZR7>INMB{co6pL14JyD7V9RgiU%5)X zl#aAHWlxxhXk@diT}^xyKqjdyhy2se%jn_?&E+p3!;dU3ko8;wBPaaCjd=X|9y?M5 zlZZ$05+BQDmj3acvJfWzu}^I0y12n3aqu%9E+w$uS%5yhI2ZszU{{e+FTsUq7+e*P zD|J?UEP$vgL-aeB@t~=o`tl^P<7Z&XT@%$$1DD}q) z1?54pcERJuV5_a#pFz&@Spw+2PT8+sUT(krfseF*`7`fp|LiB<+kXCcKiZzTay|W8 zIW;?p{sERdQ)2>yqn`)xh-J%b{hOQ^$w_e!-hZ*pVD~S~&$VxS`&-)gQ^!Y#_u3Pm zf1-Wina=}rr=2~0sx8hhv~#DI+N6u_v9oG7o)aGQJ+Y(Qd7QS3GTJ}1zhIqr@rU&w zi#X9i`P>4RHkk3PAGLTzJVh3gkS@pcg13FZ-d^|A4F|k!gWSy@K0tF53ERG}gRH9? z>z%CDE`HM}s^a3e|Fdu&zL=kKKf5pod_TLe7a7eiIpiR93+P#ZfxhjkQJbJ|>jF}( zbWf!dzUg-NJmM9-2F#qy)5*1U)q4^r44EN*oc>)s@;C1%FUz5Ko=FphrH_}6_oM4p zmsrF#Nq?_4Rwvy~xKdEB;Ci^X-~0UWu@ZOg#ACV8pgjA&)6h;W)Vub=i|tST;Ir-Q z8R!j9jb@yDT8Bzd?OZfX{?aZImVP~W1Rj8@FH6MaQ+eoeHzQnhRp;6g#~UB2{#^;Dj|6{e^1jfUUVjP3+)v;nT<9?CbOxZPMA;6OoP)q!EJd=)+Lu|a z?~|~-GLfrZ zMLCd89SaT9_rxaJ!RE5Bt6aDkVGsM%wkD@wl8ck4ZErKY1C7{4)yJ?L+(Ob$MxbTJ z1wxaOnec~)$Y2D;AH&M8c%nQW%WMad_x`2g=rUQCXZ*VOeLCNbsjMW1bhbW5-jCy* zyn7`JaUp)npzR0V*1v*rfE-XR;rb4)+SbE7yXebf(A_j-eUI!w9v8Qm))QMCr*-_u z_O)`xzQ9(%O3pZywu5v~)@nTO5F4`=4huthk>~Qv&m!8cd5X-`%oLDFZy!V1r2R`- z@XNxp?Tzd(m6zHqTi~L-I_|=-sMO-ak)0+E@XbkJoiqy{dRilY{;A&*kjtQo*wZ!g zq}UoR5QRs7?tPpimrQVmDH{Zv1ElU`sQ6>5ETRB#$&GqeJq)fEh6w5zor{@$o29Qa zH#ZgCV!K%VO#7FT6khX?g@G=#0!B9eQ2!mzb@7q<&wTq|U5|g-e{i=<*8g1KB|eTx z#Ric(6lr~H4?lF=;(Ykv2@m2S-Gx)SvC3{?g_X&3UOnz#o8RUp{Z{NP?*?kuicdbx zA`itSG?E_jFdQhb#IwH~-G1cA%;@k>nhGoJh+_rZwRP+=xnLoSf~*_nC*{E*eLnQV zHu%XS%VuB1(8Al!m57G$2yZA^vpOvS?>pV*G2{)eUNUtwYNOvNx zzPWX6Y&^^deur*ipv=?W>}INHt!E zGzbXSgyt*3^*cao#Yi)61F*f^$)0pxecr#46t0lRN-IK1*yXL9BwZOTK_d;BO2fF&!((G4I3cwVKzJQ(lPNdz z2~PIJ~emFVdm>0@HWP9GnZLxag8qJ&H&@cHz9@d?r$O z#hWnoPh7%`fuc%diVjj|iITd}$)jEYXEz&QVq_8?LsZm~zi|38l)??*Ag4j#3z2Qn zDxhwmVci5&*voIC?i)}8=lgp$S{@fTN#lk?SCdeZ4X1fkzesXq-^S z+MR4COBz(eRpqaHH8PZ!#xih%-;U9|~)s2`R^gVTXicd}@B?;dQ$n6Q=12?%3!5_yfh zYv=@0al2NiAs^sT3q{0_&NJcJMLaQ@T_$t0({z*^92udL8q_mnVn2oLnL z>?yx*r9XsU+!5!Brz^dTPnTBba|R>Sl`&~ZY@H22F8MD&nM>SC3mwwf37>TF7Z+ej zH*jzfjp?i27mk5UB9`U4&ClHxY+UY8qh9;UO5<|lol&vBhj)LWO1fVCrrx^y;83X}3yn^kFx{)KD*tg9pK<%PUDd9O4_ zmyvHtSM|{oMk>rS#w~ql!CjP6dZh9kQbz`c@Z>S_N_=T%C)q(BSIN6rLfRYEK~;A` zDI+d+s`Ll#+Ksh#?#yDFUzllEuU$!e^_+vaqskF9Q%BlCmPT2)gE8w)Ect9lJ9h^- ziI;^umVwyfTk>Ic#lwz@DYy67S^D=vV7t-L(kOPKv^VOQT=kWP^B(QNR@SKILJpm{vFI|ZG9bMPshVi3)Qp8T;KaUdX#EmO*)e;0N{AH)6k zO&#OCC{%5zuCM3a6`1<#`Z)R{yQWc&PFyDGIGXrqgc4SVy+DUmZ(KJO!zYA5tamaF5o?XgNld7)# zPT9uMpW~w$q}!o$Z9C=30uBt)Fy$!QD_5@2`Eyt3UR#)%ZPOR-Xz%*&ceVM&`S$+b z{vi6~u#HVkK{+~u+XWxZ*GBQP9n$967&z(M&^w3i^UuEA-uElN-G1#?ei>ad-tN8o zT$`DlXcOa8nV=k5Tq<4Z4qkMRyIICujXc)Jzsuo<6nI&MsYS z!{gJ4>q0ui8|&NccYg0r+yCo-_{H{%AAY*6-tb7x>GrxeJlwwL-QU*!;=8}QO+dpo zKHJ=VzlY5}GBJm&lb&Z@kQ3^Gc9sKs$d0mJ@*XLo3_h0}di<@3D`v|#mfN3x^3(0b z=U!~DdGt}dhuWsQx0D2ILgmMJ`a7BCB2gEFxj0PO=@)fY+pW#w_r+}st1SQtvo!`h z;yJRkN4aS$Wd^`wzYs36y$#?3+AJs_&rC$%XFHkGfS-1X1JKX9GQ zNm~Ea7tnDS`%ikRA2K$eH}Ru41dD&Q(com+16TU_F0Hpx-~D`oDnb0Som|=g$g-Hf zGMpV%PKHp{i+9}F?!N2Z$m;s~dgR+U6Vg@v0Xh6;wWssw@2ONS#I=sO0QCCJoA5wg zYoBqrO;P{+?*HttuV-Jdnc0fgXSQ;^kGsUT9A~^;G+_&pnaj+b7i_ z%7t){pV{d->Ii+rv8r2sa?E~tcEh35t1mN_Nr&ml87|^uFS}4+mijgcp2nGWBGJKB z>VcQEanYpD+9573P-gvJ?Zh;?20iHR<59{nHZj@8fIZ=NT;0LW^W@AFdT6&j|H8}d zqaXfM`CaKjNdO-f5KnbzqR4y4*dn{r4jRq9`JO*tZlnHc=e;@ zj0}0KxC_4QFR!Bu?IXAY+{vfZV#<}ahm4*4hn3;uIP&4Rf_fy2Ajreh`BEoE()!ZV z!HNa_)Q?U;TgPgw#5PzxA&2lw1w3{_E1R-+#5}Y`{E1WK!F$Mr`z1jd`_9%Mvv}d*Vy;;(B6BnAps_b4xPOTgI_2aiCSIQW1J3A_M&v$8!{M7hi(N$+yEfnBc zevzMaOCq0T$azk8v8~wg6E@iCb`Ct*lXrLJM{fLt5HwWIg{{uF-qa^Ql-a{Bg~xn*672w|s6yupcpttV~W|7szIGkh*@Gc1F8T zJ4W_T*@I1+B(=%+RK(h*pw~yFLznN zFWkWv2Co6Ky|d>y=q@&mpL<$sUwr<0d+Nn?>|N>v^mF08X!+?uPbYL8z%FUDbsTrE z`M<(HwpO>usfr^_gIeBABd_{;j8|}v9tXL|n7TwIibSNX!;2V1Q*UxdCXxocQcwLO zPVBV6ly<)B-@%B#Fw|4<$9Ka_FW*Qj4w5sUqzPIjUnJ|=Nc5=uk}t7MwK;i44n3JE z?G`>sCk`1u0d@*WI^dXraUyR7h=JbnhexU$@8AgU>kGCGRe*CkZeJps@#>meezUARLxajA*X=wWbtII5W@Lp}6Wr|J{8Ny;4hu4P5 zQ~NXJ(r50`;qDml_MkS#xyQjF5&&K(Zu%V$^0Dw};2>>=EhNz@;F>qrn1cmrlV^OZ+A%OZDiRWXX z0U7CR(JH9IOk!oQBc?LA1%_7(d2>_DTAXheZpm;GC!BwTTF>^O*bxgWQ zgo&MTG(>us+2y2=Mn?sDQXJ)JW;&wyX!(1;qp;5T-qY5|3FlIo)QZ_Ieuuc>M#-}o z*x2wVen+v7o<$J2qGchu+cc+vG9Ip!Hy3^yb9PvxlQEdpV;&(Do9evn(o1{SyV(mz^sW+mi7XI)hN~;<>}KDiT+h&dzoQhvlWcuDT%N(k^ct3tSqIo~maztfY50BTIV&EiXNI|J z)nlyeSoy8m`Nf4cGq=zdPMvD!&R=Y2&Y#Z!+Q>+^qTG%QcUh;QRg(hZrXgF6Db)>Z zprCiI49biRx7k^b=J&|C$sFH5H8m9+ZS=)oe(&wL8x5XOeqr|m+kR7bV{N_t>aYH0 zc6+-ze{S|v$PJk-i!SvOTDm}@bW_p2*KM~u!>Vo_$C2*gQP`zlcSWaiFOu?8^+_7a(bSa! zk$3a9Ov+&#@bkn^pKry4$DOfQ4K6-i$M=?oiu-Xn`nx;>mYpTwIfx+tJQ~zNPrnx* zenGDcXu~5rRT%)lKluY!qdz;Qcs4gX0=)|z6^`H`KII?0D5Jr_2bov*cv^#vc8=); zr|?D^h%)c)R=($d(Ewb_nX<+}sH}sVVS05q7{|J6c>jJJx6*|^ zp|!5sB%So`pL>8QlP+?ThhsmSPpPZ6>AcuhCe(IP-E27|tGpMsaY$c=2(RMDpV-uq zd)yPT_?%;O;fs{J(f_XDRa3VoH8<)#CS;Qd2 zi0mMg16P~K#~O9q1pq6X8*LNA&a~jGWjh$J^XQy)xLoJo@%#38mG%|xNAYiWeY`)%ec5PVW%@50@5|wJoBRK3 z{xZn;T*_=*AFqE86LIzKZ?tddY?wnfI^-2^KUd^nEwunIWlsBRf*U$)1X)oZd(4@W zqjqky(6NKdd+cOoC^j3HVP@w~V47QxBX{kEJfU;ddDP1c5l|m%qOvBj^9@GerZm0c zOB^RWsW%Qv>TA23`q$|~^G*PH5mzOBu;;=ocb*GhUBCsbTv6iI`DznV6rP;`w(WM& zrERbiG~-U#*>;d!=_a~wz;J*lleN^b!vos@+V8zBP{8JoU#;RcfBQLJ8ORXD1N_kg z;^JiD*u+%y&kk*cb==Po`RSUegpIbr>Xr7=^Dne_zWwd(Z~oZd%B0(~UwkS@Mml-7 zfo{+i+(S>8fgM6W)1t$>P14>@!e&32^gAE;c>7mB^S<``ANalY>{HL6p9YT9w87R+ zTUp&`E9;y1SvDw&ZL5r^S6vvA!AzbdhR2{!OrW>MJ!*IYy=S}bx4#`+BU3ub9)sI&Yfi+P&y5V%rG$vAD=swg}OuFpzW``xfsQ^swVlAD_aCye!}|D)roRtz;-}+-9ZOC+4%b({DTb0 zzWUX#%gHp_2KFI0)>jkXHgelzzKO5B;CEKl_1gAMNbb8hguLBR?BsJM`H^MY(JZEx zmEGO?V?F8@%cWvlSHRCtQMj|;PXpvr7da+%Ki>&SZtQ&U0+^T>FM}k6L?@hZeVf|**WE&NSLd|T${*OBZmGdd`JtcT zymw_KMi*||tmjErVbM#5Nw?6f@{kwOwFY&OM{!Ct^Myv#_3H1r8^)u1x7*zOWbE)W zr%$yg_qm4m9=*H z`f?VYc$ECwV5hCQ=ur8h9&aHJU=dpjRmd~;uXPL^G}zh8=ZO4#-C!5_M0U+<%Yi8E z5HH}kplE&IQLhJW4;~$eKX9~fJk?;p{mS(l?Xf4HX&?B|$J#Ib>igSIzwcMt&%E!q z+AsgXC)&pzf3jU&@f*oF%I4yAT9`J4esjQGJ*J)JC-HW7)t|^5ayJFtW@e|`0yffx zrNws2@A#jdZ?AdPeeH+8_ucJ{Z+ft;Uc1~r^O-+^)`NCxX_oNQZE1o2SM>WqTi<|J z=nxmaY@r8sfG>|G=!?61+J#5%jxLc>X0<;x71jN=G3YiIC%DLgj(0yU;o=S#YWgkP z46vFTaNRAHomA8*Cos3S23#A^XcPSakLW|j{zaEMfip`VQC&Jm*{1!J1G1w4?9mn= zP{^qEXDwK}aTEOqoyB))@f7vAT>no-+Mfco&Nomcj*Uxx7OC$hko_9 z+u~_I6KK6wD?vMH1b(q4h?f#U!`K&q5B)M;qa0O!8HbT6ofFj#-QcZ?DxIk9LA^xp zsF$;gSG`SI^UIBK+zz~>r}t_-{f7xUsTytNJxSHF}!#J42%u1h(( zy5iMAOL)(DqXu_?>Koua&y2;h<=Ai7QTSNbpj!I#^3!9}ZMRp~R@;r0<#y%em)i@M zF1IJ2zJ~65sXhDLb>6R`mo}(N*fMH&CYDg>m7|#?S=K1_ux|41X0Q( zjScgjg+C~T8lRyqcbh%J3*fjo%x7Kb2=BL@fRjJr3m8zAEaIl_Iyo?Rda=Fr8{gT6 z&n!hB4*{3P8iHCW$03nCAYW}^U~SOP-9vAD?$2Imzx?Z$BBC?22}=u;*oPx+ZFQxs zt)q#rKKH&xK@)23d=@Rl@U4lGDmug=@ zlPn@u7~pMvbDDQ2&O>ir336dkeacly=_I71b>eq@2>aK{w7!XFRB_B*S(UGq-oSe9 z-+2;u@~YkR$8j@>hYol15ih{OpMUx79d@J}gJw*ft4T@u>nRkp&&Yyz7YXldtP|Ny zOPzf%f_+TG>xo|ibKDE!#s<{>6OTY6t}--ATNrzbxGv({Qod)FL;rRcLFbX)EXE7SNZr^)->?;yyoJG&(VoNxp+U+WO4_b)-%y+NQ1A*jz7q zQ^q=Z@UZzwQa?eHQ!ca>ZEL_WV;miQiW8z*?=>k%u=3owv>mv}fEVe*SDr0%e+i)H z$4OVbEf@eMR#)#bj>tt>#lm~pd=)jG){EduJoOl1JKAO7kgV`>M?VZ~GZW)&adx8J zdGSoU^THWy4^Ku@e_FrbBe2}8A|2FP+SsV+iqxRr`>(=V(7g`-_rJ@K7qEA_+zB^9(#m1%4j}8ik#WTD_{9Uk#+-R zlb4Za2I4bz=tLq| zSJF=H9<;6P{kAzcXsa9aFW0e4U9gV)*QYGTs{S}WM?ccOVoudaTS*zQd29>DCnlf~ z_77nhmj|t?yH$VHH+qU&=nP%dNht>R!~v(2J@TVa7|&-S<+WfbE8YvP8Q~B3EOP9^ zG)B_SgI}eo_FS93+Ijd)+`vkj$~$+{T2Au@b}d*^W~!D*>skmjoHCOHnHEmaEZF+> z@cc1pN+%2n;m@JK{mIE_ttOxmUjPJzqt)<-hhwD z6n#d;S({h+P_O9n*PG?Ou+Y zbzjdupm^{5F^;AcVE1nRc@#J3cQP;AMsJDl-H1GkXUgKMVc?&hz-83WE06v;;%e8f*inZJKF6x_`_ zl}-$z?7Daz%v$VF0%3FqO;fSEvI^UPG(f(mEash(>C>g745kq_uQU<$s^*Db55p&2 zCyZu5xc)_=!YkjdL@348Nt(WFI`6J*icyZ2{npAg0J?Y;x9|ssWde6yg*cdJIrx>1 zDb6vBluwQ2>KFa}uY^1*yj}SWh{MmM8k)YPq4J5?8fE6MBIPeDp8*|3NFFNP9K}Mo z6N5H*QISdC;p}tLcldEXE+?0?wH)1ZCg?=S=)N1&$)zg^bA+Pbfkh@f649|~v^6+R zw&|Hu?ezHv+Li0q+rRs{Uv9tnzF%p(7$)b>oof?wvov5cDMP>epo|8UFBKmpY?=Jt zf4)yo88rs7s+W8<7$_lz!B97-2QJ|0Jv0IDl0W%sXR5AYKBb9853?fTx$x1=DtqGk*jI<> zPp+bYk$ndyG&kNcd_&~5vS{LRskeeo9OEiu3F+Rst3UM-h%WF8ZeD|%@$t2OcSe?z z|M|=Tq4iBEk2&@DQjcP^vjuMrM=q*k$3Hi}7Z|-7q$v-ytkU;56m8Q^L{IkmNcr^I ziF}$yc?-{wE7Rm?yzs__9CX$O+wI!5Yx(ZG`DY-Yx!^^Bb=9DR&&pfTC7nWW)3EM& zaExQ=_H@LQ8#^#K=_;NYrs7(caCJO)`q*-LPZ{+lQC~jW4%ug0tgI^aPRz;2?C!_^ zaAZHaCjCJ#_+k2?ed}`R9u4Hkjh!+L+ye*glwHdLd^)-Aezb$@DGAE4pK%a=c7`V2 ze)u^{$9HdUr)_L(Q0Bwzc5x@@%a<>qGY9D?<$HvLjnc8uNZJbz=v)EUg$31NkT!Ih z+_eC$e2%V(tQo(f6Yr^C;c>};frj|w@#xwxVHC8g>?9@qo{nfR?E*bE831$xWlQRG*4UHW5w{m)DvjnPN;5{B;f_b=ma!JqhP zJnJ!6;Gcvc9Z$SxV%>+6i8@#(-ebfg4@E!yde_71{e;g&UGtvKszFiW)I(szmL$r7 zb=3L~4^dixA8!X^w1*=#f#3wK3jw0v)q5r6{CNj}?R~^lU7{QrMm~6}BZ6;dKc~h( ztExFw?-5>p_7EBBxPzn_%<;lMx{umOJmp>mW4$ch=dK@Gd@64ATgp@A6?`2@kA?xJ z-!-;Fd$qMd}moK;F z8#mhNGZ)ZJ7g7J%31ZL^62D>6Dm8ua(m+O=i5g=@u%(Ae*L5EfBY98 zYCrLNpKBj`=0>|QIBZMUai`8sx7WSuq4s@0^quY7zvnyJ^%hj!t9KP0qt- zWPfY{x?n>>YrnCmmUrOF!Re|y$UZnpybQ=&kKkP<&k%kGXtp*tvUBI^<*V&I|LiB) zgZDku?&b26HHxmgz@0qKz!kcD-BtXn4h)yvDI*RlDyQ~kYY>s=$gpy#uC5*V`{;59 z9+f>8%p4xp!jtS!L5?#~0d83U;O8hzkL=kehoaVGevRwkrN@oaJ_L?wsvaLtU@A-A z>(ni2lsX~aoork8Y%^`2Y^N-T`4cJl^lBixyoi<^mX=Fef|K<2y%q>kPH@1IqHN_? zlM6l>soZin~P-Hr**txyB|(+Xfz&*SM~4 zwHIDmZC`xpTKoLdFSN%#|9pGw=@;7SkMa@_y60gl?hV zn$L$&C-$iWO0@Qq_E&Z(@vMReb4cJMKXr2tE?Eb#qgE*Mi!Z*^zVOT!+vgsCvVG!{ zpJ~7NYroyz`*Xk5e)j#JXutN+KW%^X_!rw#mv6SK8#@h)hdgxN?XjVk1f{&8m&Tym z#Dw&db;iU$?SY>|7|ZVcX=pV?d_Rxy#@9X4e)O;XmG&3k@s4(|wb`C|_L=tli_f8p z4%+;}Ty{Fjk3(o?UA%td8vU)^Haj~N9j-LgaoyVI^c!8+1pMSnJ@fjN z0}QCi9X$-kC-2r*sd&N}9s1eUCvF;hJ$33{>Fu3zTQ9r64tSgzBrbZwJLT=7R4nBv z1n9!+wNNF@_`tRp1*uXcQegJ@5FYZ`A5dmWf7;frUBBKQ|Jd)fg{66DXuT${l$i@; zlqu`Gu+j%brcx$gwEb%5D&IoFFandd@JR0d6t2<&*!#%V_Ku5zorJL+==2P-Y(1{L z>;t>NgW4+`ikt}xaRjt*g>|%$(rki8$%wPGs#j$4u5S2DY(Lc6UdpLqlzC zooATnPSWL-Wy-sezUS=16!CZ3CUtIyw$ipZ?W-CH-BPFdl#VG`u@7gFsKdmwJ*~E{ zo~tdi9V8azbev%b8`{@|cXio`|2^7|Y}$gh?$nfT_SG}lJG9%*-+7@u{7qkv4KfBk z{9!u+9J>mb@?M;{?}PIeea3?=bjQj{`+aPkk37597NG@ZVVg#kX)|uwC)^@0XeV7K zF;SQ1#@od+r?PNufyj?Ma98`z@BX%S-?w~yo0ulTmU(KMmc8MLdD;_0XlwNVLLQmO zBm;7j!7$|B1(1q}T3kM>OH|p?H7A3>LnrU*8|hWYB4#Egf)2q&$u z^pEVsb+VfH@=I5?i|_g?+i}84lu$M>Oq-yZr7>OLC`!_e&kPj!!x5-$W+4jV-jO4U#KQ z`}Il0=_$mMeD{ziY`+1r;7M3B^Jm%?{%hB-rJj$`PPlN)vO3YP+|11`q%FxrKWubo zueM8_2s4(kmaVnzwzjq!{h^#MFW=0fRL4u~N6pMkwS|R6@~qRpT%?l)ZMK_~d7nC! zep$$)?C{`!Xv|$j8`_m$>bmi|_gDg9Z!aIk>m&g#De~Ssb)0o5cAiYoDls2zmP8P~ zS0_Jt7B%w(mg7~(th^%?|Ju9QID~uRji;iyxH;d~CJ)<6X>iAdQ|-dJg?4svmbjFg z`sMzff(&g9$AfhlzY{KcH981ezZQn=aNol&uze3$#ghd5gG4XuhwfC? z78Vb97a06SM+RBS2G)jK8}#b=DA0UH$-l0S^IsVdKYd;N)BlxT6mXZkvd{}%ui589 zpMo#HxPa|x%CSFPh$#xd!#nhowtx-|;s@QT{(Gf(MGFv7UU>=aQrEeZr-)dB^urb+ z?;01Te(Gl$!;P4GmG`84rtTBp_AlcQq_1${R9jl(-NZ>jg(L1fBt9@*C`JCJ&8MQ! ztxl>t7CJUr_MbMdn;*3|DHS|IU#jzescs^&7D{4&kF4sH_{&k!EE9Q%Ljbo^vJe z6{>F>K0AIs1&P50`?c-X)fDTW*TQ=hxS}_U^K9=8C zyfe<*@zJ~`qxvkbN3LMxsZd=>zP5{_)lL_*Tj>>qJIxb+rxvO(0UGhqp7muN}+p{1nUv0C&(;l**7Wl^|Y_H~zY$L4jG|BcnQoDu{^? z;h6x4qF0@BwdRo0xJA+C5T7m>lFd-`X4u_g^PH2KXzSyN6yMq;SnmHgKx)!tQbmOU3nb9j;pR% z9WPVwx8&26N56cxvVZod6XUs4NQLP=4kbx8u8hqQhom=ek9^6l|KZ&>=}EO?Hi#P4 z8aH;Zx{gmexD_|hjw*VG(-D1bbuDw3Zxo96jxRXr08^{nLCUn;l`rs>&(S{(gmP?+ z6U**uI@jj!d9=0d>2`g2z5SDa@_%c;@(aJyE?l_KMl{0491L|(#T{T8CQkU&bBr|& zKpP8CI^45N$^aQrN5)m`7$C#@TWyATlNc@Il-o4!RMW_DP$`WjGN5^s4z*+e|Kfu< z*EgOzF`+CrAQYI!fY>~rs5`hkn;!Vqm3=D^uBU6%9&{;Zi7MWg!07MDj{~lS6Y2YU zgd#18A`1RN$%fVPV2sY6TEb(Zt?zDP=)i{<^57d{*#V=o=jy6qcl^pRbOalWl%}w$0OVJT<@2rW7(e`WPyEoepr<4^S|`ciN4cSKzY);Nxwu zwho=g+6$LoYVZA-_tDur%(09%=8rt|V0-XYkF+~4T;$zv_U^Y8I&*$gRMAszGdXHK zw;a+SddPtp8-I!gb*;ZKvODJlvSra_7q)aGSQuWWD(5?IdG)8lhpd0{3HpTZz%cI^ zJm90c6<760@+K^kq2ed64$9CqPNg#sI|f>v!Y@r3{YU*aj`;ySdeWVoyy%Tqw z@~-RZ>v7{&GOSUhT=-2-aUL5x$Y9y@>?FpMofdaHdYs)T{B z{1S%zK{{vqPQsBkg9F5^`X?{cdDI^8%rRg%2ln*q`CW0!FZ9Jh0e-tW#HE3mg68UI znP=N#b(em^>3G_;OO>{`Abs)d@FNbqDEjb@{`6U+-LSDyJD1SP75;%&vLSxzW%ZFf zJu~49$fR-YfZbdjwC{P>UvAGl_iTIm$uRd z72fVzce?u8T~6|=f0TtC>bKv!-56|U0l$#nd$2xzFY(-zj@MMnXo-C8kOtsz3g@O0tytulYd@u zeZ0U7j7~ zZ=30RpP%8z@9{YbKjE}VEPwKGw+(RvuY=pguK{?&z=bWB|iW9umi_)X&r;8}k2J8>&6B~zvlyzv>iA8bgY&+1 zNr(1K2gHff$u>FTB)P}BfzSSMKIj`!}!;X>YJEHq|{_`@qu101U9*aqB>glpAsVgyJG;`h3A}5_y`P z8E$vpd1rgw>tBstd3Af>fd|^fJ5GboB>GZauRcF$V-CW&8yI_Ngmg|oxByIfkXC!} zRz5ok;lfJ@2Ys|V$lJxUyh~ekpGQ(U$lzd?yQVkMRm(S5+m)BDwHGeE)LwY;YJ2wi zm)q*fcDu3M+9tBPG>a8Gb=Vfq-Pxw5$J(7|PPMOj(}T1N-_`DV@SbKzVJ9{sGKH+? zo061J+r@>5a9k*@-~Iu3SvHH= z+m{yF3E?71VCNHOxC*cM-W|yA?NQq?>xz7=u&Oh?GoCOjJ!OpzPV%kovQa?vUpClXGy8 zUw-#KxP$KmcuL!)Q)k-iU;SWv%bVZaPR}p4wN-b)Uuj!w8_1u1X7VMmp9;|4w+?J> zY(>^A2cV!W{*WoV*HAgqLuAF=(^8Zh>!|~=p_O>ko)Q|Faly7b471w}eP(>~R4#d} zK~l>mkIlmYRRYZ4OL^0!&quu4vF%tuP0VX^PWcH1-ZRmXG67qd5Lw;t=7JsS{RTR6 zZEX#G+1kv)Vq07~)8=LuX=CK$BzkeS?bvRU&=Xo_kXQ5f6A(J2wD}nW`%F{F&l&pF zqvPnx?d>))PBDCCPhd4B@0*CBn|$fIQ(B&%!g!gr5qUb0G&3i=f+2SFW7c z8XB?fD;-y3rbU;IOFWoa@w1m}oid|e$^dN)i@m89CUI=VJalDo64UVud6D-@*YoKR zTcPqW8D*3v1PRx8m8+Nzp&LETsOY|zc8EG;-Glyz2iUE)0WNlPvGmwpzUMtYyn(J* zXs6FFk>ya^Sl)mqw6SJCiq0_~a8hSjCoPwgYpKKJZvNG#P_8)Zwq>!*&>x+t%MMO+ zr@hU>Q|KYhB$MS+$EbTOqwV9=^dz?00(Qr6d&@U`eS6aneh)^>Civ`9ueCF^<0+4L zqK{?F&gy#GzImgqT)xyk`>D^ipZobgY9D=Ozdd|vqD_zZ$vf9$q4Bu}JJ?)!j86=; z^GmaBX<@3Jb^{7J?BPcqYG3pAH?^_zry${4+q`n6VRj(Pwhz*kl0m=hg4Pa0iBwxb z*_DIT1?T}0(-(-0sxwVk@*;0)2fw@tYltiVp;dHb!Al*}IRVLgWTd-%cl@CIiDG=H z+=rDjFKNuZmodYU7@hG7pFVw`?lyNg-oZ7!X>3@ljf(V{7TU!>t~NW!_Z@8sc?%s} zv^p^}-Dc)bVLD)2jbPsokU4DdUD|W|+oCo;Hp7MONuC4R3GGlnp))=;-L_U&+veI0 zc!6#P2ao9=o0+2BUv1kP)z?@47Z=dc(=%;-W9{Op@e(62!PV^8m zpiSZ!(~aeo_W3Wq)Lyy<&j#33$g6Upja1`+SoRqQL^t3kJ)##Bf=*sV!@49@w0S~4 z#0*i0d*|C8Y;Sz+tK0Tqt&LKc?ZJB68tl}kdQ41#M$or8B?xC8o&ewpP1*#j=#32w zHWyE;iigLLZ{#PsI=S-fC%8_!j(^fTZI(Pk#xrh*qvgo`$PveK<&ik4^8roy`+kWu zMDXPKp2o1{#NTv|t(l*T!I7Bo)j2!cGZ$c|O!nc?qa(w$2vQk#VYp%TY1O)+XZqM( zoyA!%`+j$iV;u0c4@AgKn~9&k2xxP+%=%TE9q-s+DL`Jj8!!)PbAnTs@6m#c9*+RoRmkly8?h=wh(gG?6E8Kz_ojj!!*`=}+0d z!Ld};M@Z}To5!_n=Am)uyZl0WIzceb-RU=R5p5aFNx)W2=hul~9=c)n&e{8Y^9%6ePBxk*3gX1jd(O1pN0_8=X= zaryvNesk0IsLd}`#*?(JcYW#}*Xh)r70OF&x+uynZ614g(#0Fk#yvdz#K=GTJ3sK_ zWN>E;K1;?0iHK!~UE-FI$!Lj@N(BIY9i(sq-MIRDw?QX6_>E#vqeAQ)$7npmXfWzX zoJup?I)gTa1gc2{t8=u*O+(C+SKZgoW=b>$PLD5cWyP)Cnw z_)7!xP~q8$vQgl0|iN4Y168LgEUK+0U|`Xp{WCX zp`Y2NJm%-KapjkH^LCdM`KJS?vg~k_9_B#@GrLRecnZtn8pH!+YYn>pwgo zX$EzPuMJ`TiKDQRXO&2L5hrx$#<8r7gdXaL5kQ@il@f*t_9^z$3*qvyum>r#PC7T^4zkNB?=H@+_!SBv4w)xrlHaR*D?MaWz z=N@3JE7I#|M2}URk30>w58BqsR$E#+)2^>4b-u140hY}DN z+=br=ba$}F@5m{24xTGlkw@>yLneR4T?s@s(_mR9{HtN|UVPS2Flnu9)K9EC4IuwQ z2bH<7N(WX0YQ#QY9a4u;nL)(B;z~57sPZU&iZ#x9tloqs(GBUK)3}AN^53w+ClgKp zaJWUC<1x3{k#GG|#4M}j2<-AmdturHmR!37OM_h*@m(DIBcdX|uH>~Yl26i*R=#J) zzq!K)b+f$X))~g&<0n~=TPI?qjs6vfpWujmLId;7_w|M+3HSR}>Llp|VBqA$24$8w zPWWtNluHYFzp}d4Tt#jDb2Y5wPdh`IQy(ag9TVTf8q(WpS^mDf>KyS%ha66oq02LI zpjPBm{dA<~Nhdj@Gx4=;vtEm5)qVMP0O!!V$_4sYy(cftGv9-gnh1g`qqn!E12{-L zZp9NjBO092#r%*17dN>o*~xXvBW=}9u6P$m4gDA*z)-K%cT>@=)-!ri%7k^cI%AD| zH|YSmDtn?0HrLvQJ_ciAZY+ZbmENBEH=2*J~|k9w3W5>w&R2(^Z_mMaMz#Tgq#MJaK=K8EF=M-x;>>P6&CB*!N; zzLDn%(~~85sXtGjUTXilzx%h_`Mb}$RkjUQu0qq&8+NeVZJ>d-hf%5FISL(nd5x@z zQ16J>x9i9EJ`VB8wF=ya>FxbwSO{|B?o}?)av^__;j&jUpz9!TfA8URaATkzOs)$Lk1B;w$+5b+Rm8x}&n5jN7~Zx${4fKAp%DWvc=3AVC<<;Gv`>Uc7HjU-0zD z&-$makjwXK2g`NwyoXl4XbbRc-Hr{Xw-Tz|N;)T?hmqO9(Ow20>VD)a_&Ul!=Y^!h zqYBb=8yTO4l(Z_*fn`1O}s=Gew$XHJU@??@R?6k27aoa}+ zd5rBob|3TtuE(=#_nFti9JUO){QUV-?c2ZaN7}c%?d|R9XTH#0y7WTZylDm_HQ}Gd z#^@}5QHNpA$oLTY$~M-8a_B#4L^xDx8|cfI>2!Yju`jgufAFL2XMf?>+E2ds=h`2B z?2pU{mO^iFaO4e+OPliC)@9Q@bm4X zpS{$ccy6U#THbHhP+=R8con+OQI^@$Bki7h&bGhs&NsEc`d7ZceeVx`f1AJmq1F}_ zX~OJl(6&s^QC{@vn7ZF~h(fFTD0A#5F25~reN}I!K~TmBw>ir|qB6pBbQAT?g)1IU z?5N+TKKaS^`ycq-_SUz&rTGb|8`m#KUi<`$w&KoU&<5D8NM_z$jdifMNxtNR>}X?n zyrTUgCwo2NWp{I%_LN{Uoz$M-K4CkH;@{mGw5|0)-W>=`JGDdGqAfxm_Cs74>~7=( zo03>tu;#DLrK&t#1x&`pGYF;@J}t z(pQ4+T3R_yeqyxUd-r|qfqNf7#^*Dzzj|#0JVvOe$R6>GzeoMvCa=M+cEFHd@n}0_ z<3RNm<>5Z_C(j%iDFo;i**A$Lkxr?fT|!yRv@JF0XpBz+tk38^T1~V_e_(FUBxhLAj`c3%g=?wOz zNEbPPCG^wGM_bhXT^4%NKFNpJue25Rg;j*wcglWt$xx0F@TuK^)DQ2Ol%ZBpPu)3X z|10X(hA{j^?@`aT9c*{>6xiC8brh_9Vc)UmY?mmT0+YqPmfN?;%P#4++ZOzAaYz;q z(N1YUo1fneSC=?(v7ghfM%wJ`nVdYK-dJB-N3U+81NLGgTM~7dxw&Y-&!Wj6C!q7` z0UK{@LaReQKM1YVl}-RnlHSb-WJwz8A+jW$vZD>0he(%a+CDR=1zqqMTO<945hp&a zckmgxmMf~68Zd=sUbfXKy>jBSfWX(vrhjZA@>S>LZi%{hrb%1hg>}w}g@pM>z0eLT zoozawg>8Ic!EX&9!u@u3?38Xis%%iM?m6fMKX6i>wPWye0)Gd7otm0zfB4Z4qvQAB zhbKEuphG6|$v^A(=3osTRv$GxAbY(Ud95-kd;8e=w#yn;7R9wxy3HNug?{{}Iu9U?>PTbk20hayWs*?p@kS^CfTDLbU(4lkgqWfiJ@@4kLm_p$>Vo#w=P zx;@4sf7=&z{rtjoo13Q(i7bzd??P8BN7If>U=Pi;rMabcX7*IO>--(<^wQaO`s`V{ zP{VC=&y(Z!sVD0c!?7Pf?>LM;s86&+M~i#PjI?oCPsgFJizTIV>{k%?L?2Hj7z_%g zb;i$BrZ0$WSr+-7zB{J|FSU<+__6lb6C23XaGR5#&|=Jb49~{L4%*_W!**)%u$^8wY-di>Po5oW zcig?u-ti4@YOndmuW7@lPqm?qmA1dUmbPWOsj$?kjJtNWQ^q2skazT^^jC%>$4>G!`XjU^ zhoq}HFv`=I7*{Rns7+YN(<2Vto;sUslDQGu49h&+HF&CRSF)KL73`Qrq z#a@2$#uNFFi;-z$8GB%`aU;6ku_3=}?*yZ5fp*=<#3bhIS^Dj>#BFW)`t`QGzLJGq zv)H-f;0dI$wfIvux3Kv+zS7KwX-@HNjfmPRv$*lYwy$wg6#tk7~4+B0FHP_E0C1Vp*NoCFG_GP zb+Ar3LrVxZ@l*iv!uYl^$_xHP^VGUpRH8oVAdhqO*WaJh}zp zwa4Dur}rvh!h&ipW;2RNt;t@Nuv%_3{(P{$5V(2cO23ztgk}aQ|Is3NiEtS zu^)k(I$@hce9Np1z!m)DojS=&T5;KT=UE%g`kVGAxp(ax{MTL&-QaiiNlO1mKRJGa z%s4zIt(0ryXDk5^`5Bo6xO&aJNNB`d+VhUxM@G3VHM@CLs zsX&gxJC_C;0ql|UJp<|xQW+PC_!+NDtKvvrA|-+-t}@&zygyop9MggFPnT7QR7wr4 zU6hJ2<1pY&f8YSWd2-LY`GT|hJSPnhsB(8Fc8-WtsY)CSKjZElj(?00SFewZVs3z!odEsECQ;PTdt_WZgh^WNlD_}OIKYFe3aKx%G|W@-JQDIQ zUjHtjPNotMdhqND!b4Xdx&s3v_5i*d49~R}F0HjUe(iU)*`+h>lb?99eeZYwXghUg zs?9Ap8EmI~DzamvTSsQv;#B_bKoEDsoluQo3-MiKF(Um?6vNwehUj$oeMDDndR)9G z064*4E3?u(rd*>odX_{zXFdw5O7C`uQYrcG-h)T)ipP-`aM0L@1Ls}_P1_q(4wn1Q zL*kg;*W-I%FMyTgz~`=Cj+(?7md=B%DR?`+;`b=+ksV^xI+@rolH6%kyPdiLUkvf= z)_1@L!(Bt&33qqhN=r`w7^VZ`gunEfUzlx6ON;SeT3X7^`$^K;Sy zaOJ98Md#!9ijy)0=aMxqPj8r|ljUSf`%NOHvIQ?3toEmlLo!+aH}qnELl{cQh-5t)uD|7pRDDeFlJfM}GN!>R>PNo_zY5_Vm+F zx6Q$JSy5oEs>?NoV+-q4^+cG3kN_)Bu(8eiATLFjEA zLVrt-ECQl_7+*G|&WQ(kNGk_H>x2ovduN>nR_KomN6)CUxWFZR?p$@oD?jy}`l63x zaiwqILob~&VLanoKI6H_$4=0mJ3gR?I@9`N`0~vG`A)Ytzx8!(b!)dh_4%jTCY@ft zX_q5lssFp%eixg3(e;~jn8%ReQ!_JddW1T?v)QJd6ao(imG?X{AGi({PfyP#U%xr+ z0IQwFy`%QC_;s+0ez5#qnRX8S<}3OdIatSNU!&u^ys_49EU#vlK$UM2U4=Yg&?`4i z!elZJxYEIs4fJkJpMHz&-%By73Tt3}*pAwT0=lp3pPa?PJ&1Z=Cx3mN01A)pyJ9U3aTIo`9>n zHIPJ|K&p#$k#!$BU@5PzgVWW+_(y>H^k2?>wfXbdXW6-mdYrz(Z%c3Mdb`&x?v?ZJ zlhon|VOKn;oxACZu0j{& z^FE+`_frSdtJB z+cq^)N?IE3;DvbY(018I*cZpt&JMI;+f8URk6yJ6_FLWBPvZ!Pr>>~KcF^CkqsU~K zjo?dr>=-v`|-%lO0$G6h{}t0RY=+7(y&JWpwJDR8}n_bG7RJBhFz zlV8-g<*U~-boo>7`RDDIf8iI~|KdOXG1|-NcKPa+Oz1h#dw`wpXKUo4-~YA~pB>`X z1Mu5KwpP|w+w$7YwzljpD`hPZ8>E&o*Hw(s*w11FlC%*>8>-lHQVfdsU6=#PL5JNctN0gpbd z&btE=p&9w>t6yL%W1`jZR@5wquINm>kh%v0PVJ}(kT3k3p{a2S!e(J9>UUYyu=&$9~&35_n<>Ws%f2!Sa#~s)e zw72x#zWv+3qkYRae{0(%{a}49I3N+RWoD-6havkeh_Fl6SV!?`)icgGO0JHLsaUN znM&{}LUv-^$o$|Iu>gNP)aX?ZPIwb$}S{XFR>%S9h`9n{1M=#Ir__nQZpgZsA1#K+b5@kR*9?Nh0E*=@ghGz2l z09s@M0vZ?=o!&jicF$n3I|!AR(M-&1j~)Wo;l;5@+G_NSlbGAIZ7b^=;EL@CzZaHH zq3f_;$EOp28XJ0kehC^+XUC^>5~g`->!Kc^?>;(x54xXi3ybtkkiU@w+D+Qd!vpj# zymxnS_@BC_d?T;Of@P2E+j+!5R)pkno!ycNcJI(Df>k;$UiqP}cKSd)96%>sFRvt# zkEs|(uH?2Hav7ZsEd%FEoj9L7aWb;5e$TyditJjp4BG0UE+rkob(+~o8(h8I_OD)T z|CfL8pSLmkgMQ-CzHAncKx0p_*`Z(IvweN_s_mh1oJi$IY|Gzls2K8H8^OGbwpBLK zl4tsYYqY=k;d1NrP=>hFNRuD3sr=m~z&14s68NB2w^ zAm`A)aRX0IEWO#`sa)hPoYJuspJ(~uK%6dZ4t~f`;`uC2UAvoS)5MRmQm1rIF!*f! zwa&XsdU0_v<1V&+8ylO!YsTHTw6h0$>pV}kvv-}rE}3mx+iUIfk3X37Tu8=VinS)edJ)Xv{^ zzTNYxhuSy&)xXjX&s}Kk%H^U1@T~I`EH)rBO@0F9(f4w_h-@BqyxwP%xRR}76WjQ>q zHky#!mHFs~$elW~XkYELF#CE~vY{R{ymYg5-nyxbs*_805|6Zj4<3~t(8JG89&vr% zI@9Q?4V35PMB9)>58Ov+kCf%T0d21QMur5d&L5kdYeO^IV=fMMB5%;PS8k$Dw+Tl^ z)Zb15q7UeVXQEHtaDa|jzIrL`wsluovJK4%f6!r^JZI+4kamnbx7&@EpTn$Mj!ig0 z+pxK|O?`xB*nzvWS(%i^{+ybf$=Fvv&f?B+PX-Il?qH_aPEt;IG6D2}Drw`)+fR)6 zi6i+Yy*vdebb<0(0NNR{^r)W+lZ+?dLf@18fD{uh1CPdkX{ahddwtWX!YN&lpc z+>r>Jo93E{)HRuDxDaksq6JmUg*t?xkv8BSDQ?GU?*@7sEhbe-b31G zSb5#@c3EUx(iV z?fxoDuh(+&uC&Zs`PE&S$3KLF7SO4mH_D8b5OYT7n{#cv-y7JNGX<^oolaU+x3@`{7b z=fpAGG#M8$AMqkw{}in7>|q{_lRAFgx%l^V@wxLmN#iUG!1ehnuLhOwk3du8D8Jry?Scycl+I%uOHOo2O8&jfW&*xYaf|I;e zSC*}1eUQ%w=O!arJ|fivu2X#W!<|L||U`^O2sQ<54vM39(W!ZZLr zE0_W$zg)!g(vYlp9deZ{x6UW`Vk8$KDvKy65$*5~@?JdGY6#;QU?s3>!4z?^!3UQd zM+3o}WE8L9NSr9PXpj68wUmcwI>?p54)f_X9@ek#HYLHwoKjXZmtZ_7EYuR=Rifi7 zf|s~o>LSmDMc#v=xQe^?C^H_#*{}{!;>Dc=T+b?>qIJDX)1FtoJNaI8H(l>%nSA!H zEB}xXn8rU+LS~U*e=H|yeB@pgv5dEqP9rmUo`jRT@%1B3F_r9FmmH*o5FFkpV zEcNBCyGEU_wc4)2<0l~$X5ukPUD{MN_xy5)W5?JE&&oNWNu(0r|Sa%I|M|f9KzrOpa$)A1)S5{GB=D}&}%-b@%V;nI5&*O6Bt}T+ z1g?RJ?$l7}D`D`U!QviV^tD{6MHb*zS2$S)^Qw1U=`Y!AIh6A%iwimATXqqo^NrC4 zpN!)Kb___%%muXQa&eQtKF2sQA7#}TQDBxsZv+*Wya~6yX`AiwniY)+E4mnNUOsA+ z#Ly-mJ9;-)R@#jl%gBwB%#-9V!8^EtAw7t$ZjcXpM_-DU{?HU1W4t6Gk7FL`#DxHP zf`3-T0w;d{a*O-`YzG5#8+k2*8l0h(`Xj3x;hXior)^G1=<1*HCg96d?=eA5UW=A@n-lraa&DW_%8_{ktE;U~+JlPD|$WzHYCxKh=l zj>ktPs81fzzujJX`C5C!>t7$7U%GS&Dp02Ce3le@TE)1babR%|*$I&E@>U%oId);VeopjnQrB1Mcr9-ZGWoyGy^ahV zqb*oy(=&7Ei`}fMSFd?$fG0$3?-GyBnj$p`uBpe;jCbj)zV?$bP7e7kX$OQCm*&x< z+wI4G_($67zUCX?-DL<5JUZ|03{{TA+Is1y5YSqbmx5U@RT6R1oGSw7^GVnFg)b+= zEIT)YeC>n!Xu0^wMMU4bfY^eaKTifkF3NR4Z*g6iw$v@|gx*NIe0#de?$Vmp3C*T=QKs{hI_Pn4gU)V=q=x(d6+$9(eAm$$-GUie!#TOnLJJEpBV zQ?^veZsya7d}tdyGToZrrX8L{#klLn)#G%CfoEz_2v0(C?qrR)$7a>Zx9sSYC%NFI zK~C0*P@HWtLBc_ZwUlF<@t(ed39@La+6_(glRVZh@=rj%(N3qyIvzX2cH9Mi)QP~P zu0*#739xfen)s}^L)p?XB`iuIj9{T^X_UFOAEm>Y4mS>*Nz2{DExUb%;H`LTM^{eU{n(7ubG@}3m zqLI~Z0k+)O$QH&319AkRKdcA_%NCYE0zyG3Z2hAETUdfNqG{-Ps;;i4>UCFj&C?t1 zeD9g(F|$o#*V|-aChtE7z1ObLY-|sDG9R&*F^@!515!3^F)&F-$|} zdQ*lx@Xh+fTUgq^yd|LC`Y*lt&xA5~OFQXNi(_dY14Rd(xHI5<+SV57>&SEU*(ckF zzW3wp-~7QJXy5hzmjI?fS--Nq=o@#w(H`DCY8R{PrR2kpzZ?zgYpebl~scdLE# z;ZA#PYroyv+;6XM9ksjL$L;R!al5yF+U^460d8~PP}erR+T%E&5*Hdc0h}F?rs|DH z`=fWf_+0z_zw4vz-}-^y*Z$3a`-j>S@B3hzUc27fJb6VpeFePEE>8H2Gb@pi>^c#f z2)%*j4hiu?LQkTpFEVJqYeeWx8qpyTLGbGC8GW4-z%I7^@qhMD+PypX+KbOUPyAHd z-LBoJl)atJ?a0R7&cn8kjThD%ufN{D`qeMAFMRQf?X$1E(tiE(pKo9P=Bw@1yLa2` z4<5F=^c%MxZMHY=-EXhH_C~w?&2Qu(@B@z1gZ*~>(xrB7V?E;mJ;6)|*k1T(UG5sO zzqi#+orT2i`=$vZ#5H}b{hkfAYeT1e(=7c%8fi-DW3@S&PB|6Gb8o*t2uXZnvn9U? zr03ea<`G1{4;?FC^j%m;QuH@KdTnu@;JXoC%P-4XhNw&&!{02Xflum9ExI1@mx7S{Mt=+oyMtkG-?RNhWei|@il(cg|(Pvs) zTW#-p?{~Bpo_{`mOg(fGKSfSovwTt`oRk%Q*%K#^DyTow#x3{_`cKM?yM8F=js^2p zJay|MS#l9O^1|~kv=?7|p}qa>Pqy8It+s_upB$tA5Ym2q>X@A22jfur4C%5xwREG> z*kA2Pb{46HG}==iWM#(_(j31@8{42Aa@P!%Fn%>-DaTEYS!aA(*s`r^w?Rg!3np zKHm1B59#NDz)fiASzgOg>2FkZ7P;Usj69{!V*>q6fqV7UEICrX2MH?WB9h}IatrbOSKwNAJ@|bZo zc@6QIQqnea?YxHVw+Vq~JI$e{#Ny>{=>7JcY;yZvyh-M)PbTe*$iAGEdARcz~g+oFFyKB?Va4ALlXX4?+w zL__Ugf)j1j2ar#->jAArZxYf!l+Cw~$wM1pWOqmGq&!z1+;PpFf!r$o`FFmjJ@L+W zwkdf+Umfa^4OgmZ$vAG?FY{rl~RKISvO^7;1ZPruQ2iC@NkURuRthqfzAQ*CW! zrd_+b)~;PyY#XbKZGCO2z4X%C+ed%r_qB!VSAmJ|y1R=F@rh)ObK|?B@3BM5q;0Vu z_R6L~?`aSZFa7cG2OWrLP>hxB>WB{pM3vQzJJe!`=zUKEb%SGbA{# zfugnP1$SBzJ$!k51Lp!EX6%A}hc9T%EisOF@c?q;&R9NkI676<+!aiJRC&gC$wT{n z`ig}t7Fk?e$y|i~jEm&_RHC1(I?(>_@3uqw#6{;&h|ewqM@RX@MCKMj>cEHm^aJ+o z$G~uR0Cy|Wo~uzVC|z1!3NL-z-+t+EpYaX!)DPG~e|C19w{VdNz7J{f%jhFRD+)%h zL*~w_5Vh-ywi12|UokO7nM-yf2(m8m;S1N{l~+}6axNjr{W!)PV*r>&-QhFW%317C zfSZq#Kgxsvy#{7Ms^B?t7a6sfF^n6ai#lTY*gNY1hM#&cU7y>R5wBmq)SkO}9llAc zJ@|y4kXNkT)kL{MqoaH7M4Cllr0S!jW&C8J4CW(aJ)9#K4PU>q);88I<8Sy9pq7GQ z-wS%~PL;Qqx$&HFwDF8-+?jf*2~T=xW{1yIyeod1tR7;cpk2-5 z3Xc@d`j)kTo_nvcqouRdZT-NvjhQo-R>Gma^nY6W#Bp#oPwFu)g~;RC0|cd_c-l67 z$5|ZNJd?6cY=Uw6f%UAQ%OCq#d(UtAZNNBe zds~n4GSz}MPv#&)emW%ebDL+G!Np3!x#L>2wDPW>b;Ds=oejAXQ1B)nZJY2RL0e#& zfH`-$$#sWU1SuIC&r{9=ZH_z|3g;oug};{_P7jlNGjM3@q2hZmf6n}d9=0hb5$kbg<3=W$m<*gQz@crg&HRNQb^*ZD%|R%W&k-Te znAD@=MlfFQC)pU|kl;PvDe8Q=kBl zYlBK2!3%)%AgYGo=(&jT=AfeVP>RAm1=2pU=YA5Z-9=2pfLD1&-Zz)2`#RoyUm=aD zOM>mT;-XwHljNR9+M9*;+V1GJcvfnMO@!?lybS@kHl4qk46U}fwO<@r>E~)FY>{!k ziDbH>ko;6qa$!DUsk|ja=S`{f1m(rb6RK`Q#Z~Z-Bp zPrRe8UcT1uy>X}g&A<6~+PA)Ox4q-pn>ce$LQb{)y?WTMN;pZG2VvYeapeSqlZQHX zJ{Zfl-bin!zU|40E(y`OAY~(N|XNNIlzDBMC!> zpd4qN60Ygks&fpwHZ63B-RCcI2JOte9wB)f8;qvo5zc9b4&`{*J7J7l-wvL1sll?PYrF3@4m;UN3Q z+Va9;&g<)ITrbcrY?pQ<*l5R~Auxgy_u&!99OR9<+P?L**Y3BU{Mnyt-|^0Ox8M1D ze^(}5?%ui8_Ho!Ho{oWaYhN_-E(RE~X&Vi`yQ{p;;ODVsC0tWEeEZ8fdQ~OtE|+#mUd+( z16Fm`L9l&p@bST)gJ@_VT^*2}xCnqadz`!XAGGDg<@VwW&$d@zzm*4d@?f6PVcGZeo%Sv5!vw5$bG4Qjmu#fWWcGa!~*62zz z>wIhYHgO7k21}I|pNjUBJe-&CoNIoPHWCtgX2K{p((7cQ^Skr&2&1y%k)G z%C`;Y^}jj%M)@BPl>atazda4VU9f)bRNWcsa^OGk8|{3&IwPVabxS#6sXK2K!HqcsN2i+O z#hDYF?ogsnb?A5bcep0b`fL(cP9W4nmSr5bIrLq77R!fj*5qvl-Zy)*S``iEa-}`skpZoKFx_#&Oe}B92 z>{BiKaPDlkQ@3n)kp_0r2Lm!(d8zM5>S!qAw1c4ToMi{4+{qF`$!yA10%4rJD%nha z(p}xZ`QTxj#isx0|L~8pUEkYo-h^LM+5N>Wxi=p@Y;NPY1wZcHyVq_r7Wn3?-)dj} z(pTH7ufE#0cDCp{T$F`gLhL#4a=LJteo~q(a4d%SQ*Cc+zumk0u-& znr{2M^kb2e>D1+LI`F5uTHuN6_4D^>5v|+g^R`b^P>uZ3|tNE(RgD8+XP#j&tmn zPXjnMBAkdfV&0B__e(Fecf9k(_UyB7V;s2JR(#Nay>vXdichezG#hbvSW(6O?%zdL>~j8Xh#RwP15zP99#Jbr+iAyYl*Eb#h36+Z(&S>Yvy(+ zlWnqT+k*OzE_-&J!Ri{gEY5=CVq068Z&%hA+Kub$?dFqL+6UhE9`yO;_KA;vxc#P& ze5gJD+%xUk<&EgUF8X}$&h7T#{{6hk2!io1eXu{qYm4y7I$b=G$>XTJx+z^d?Y&YK z9MkH9a;&G z5AfYdAa8+#uj5ngj|;$s?}Hlcyo;H<=buBIeK<$|G!HLlEekytxVGKsw0^L1s4Un{ z*|Cp*>BC z-x}5>pu*rNtuh`~R_$!`#guE(bQ@}}Mq+!Gs@@R7f_@!&CwQCu$dvgX3yJZ^ES#p# z=v4yqVlc7CX)MwKj?fF|xES6H!{TWVw5JusXlFeapUy)sXPZ5FB%Ssg`u6uy`}v># z$@bv&op$}|T5yr3E)+43ywV`t@sTdJ*kwBfnQeGYEL zUHhGT3g|;DEiJ?IwZQhlh-q3!|AnJ3ShSRB$U)$FTW5ZQL*`^Uzr-haWZtFliMRr} z9{vzx`F^Cb71 zMw)iOw(HyF6OVMHdppj-9Jlo~bOf;GnMcn=*S^3vAk(MBHp(NK%1>;weyEsTu=7PH zapZr1SzW^ZUD;?K_|OO2;!XMw;!oV-AGr!tWtjG4_gZYP zAL&109C_>OueHy7<}2-&U)jWVpwrsbrPFp9QM-C+v0dF*ZI`jH%h-v_8<*SrKKz09 z!pC1G(<%mHw;egp2p+z%n?-`)m_<(LP54K(X$I%Qqpt0rhg!w&gQWyDCj-Y+2AxdE}g;e8`l5NJemQYmDuzoLpNc!J?U8zEt?sxa6b@NTz+LEQKH_Y* z+Qp@m6piH9mzK}Lgf#o4@IA0CFP_$oW^zpQQQ8M)pjCb8%eWV~I7%t#qF!Z;YbN!@ z08PCXFY60_#0h^KLQlH)z?h^>;=2EpD#<_MZ3Aap;BOXbcn^(-FK1pCc21sM(4HFq zh#CNmr+o7Dx+h~3>e8PRr?Ftz-2{UpaP;GYm-N$aLx;TJNSvGr3=LLzNFCBrdULi+ z?5QO3OuC`U^|D5IQRpVroa8*?)LnQv-Ig~t+Q0sX{z$v>u6L5S+x8wl0%pyt%7+;{ zQ1Ur8R3N{ z81xX|#1nF_z4e;<%eI||!tXrvti9@S1ERsxGocyRgDweHpny*dkrmOn5C|B3 z(!Ezv@Ir*#!8|UkXJkk)zLe9v!ka#GRXurpbyKAd5r=Sa6`aT~^R1t3;u>5?kx@kz z+ceHeNZqMEWl}sx<;ST^f|+S2@#%oL7`mZ8n>7QHi-CJrd)TFlZy73a6R#5db7A#5 zr}OK&Nhn6&PiNs0&UD_D zvGUj!%X_PAm~I_OFCNB`%9a-jpaUx4%wiZpNrB9FS2*%L$gtH;+2Ylq#j1qZod&rU zzQH={PE1C8P#}92bI_iocnu#T=T3t3K^zzavV&DSI=`KD>FU#M@uipX!`Irz{FU}2 z|NZ}`-MDeJxgBaX&xiluDXpgapu<(eobv`ie~3LlyO34+P6YT6E0d>mh`uG_16{vm zDNX0SPp9H!jPy`ua1N(Bj?f{K$#O$kB2n6=bL;1N!;Z7%fs?mvxGx=$%ceP4NA~N>*Oyhojk`OAOSp;58Yaw zabZ|vCapw)G@XS-Ctz@*(+1_C>#nr&Iy(g5I4$D{udFP#m9>?24aazSX$ksSA8qUO z$y?VpRX|iCF203vN{4uK;9JzK?QU+jAOD4)g4U<)fA&K^)NWk6ffJ_D-pu5T--olK zuZ~uype6vKA3c;D6(3S>=qV01pzFLe>i16SMOVfF4TT^*JWm`Q;d!Q9=2W z2!qpBd2TZBMX~x6=_G{igcAgj4qUs^aSy+gH{m9}+AZy~0APE!-MROm9qb=vf=?Vq zNLi~QPAq$M6`-ka=*xr}aELz_~A zm~(jM7m#=eB9mpf;mDLp�vvnKzNIxf-ouqYQIa`k70Q%OKHbw@? zBU$1uGdp`H?Gf^{aec9U_=7LC-}MLnC|ulchr74h)Zs2{@~uz%Mf*s3NuT3Hm;KA2 zGqhA#`2P(<(vk;$My5F;sD`(eG)Ay*3X|719y`W^#^eWPt_N(-A-Mw57@g+!8_U%~9ZfP&dW@rZtnGgspWsi6lSh`z*-z7cqLAwuSuvte3 z?c`vuo#cT(*T;vX*Mke~L~JtccfsB4%pzo%M^vy=7@p`9b!mtE)~0Vs+4rmS`K~&L znVj_T+rIh}($d}L^b569S?nZvG-?nq{Ufp{Zl?R)PZuR>i?jpTzMLX6_;dOyIn0N> znFF`*AFw z?U{GKul<(a{s-D0|6~8t_T4Xkur063dk38RdFZx_P4O+=Q%s;fkL0&)8EOzj4a=A} z)G}d7fK;HXEwoRb;U2n0MjQ*);8r5b=dnA2pUZKMThXw7`vn(bxu8kk)d!c0oNqpP zy?xI|Kiq!s5C6gTr~cfZY2W_?f1Go8jf0l0@JHcdo zZG(R8{@y;a;rAPrTXw*w7Q=?5K;a#B&dxmh&W{QbZn?e^!MU(1iZ-i}Ir7*r2xamuA+{?Sy;_-s;Wpxsh`x zZn@ruC*SzzY%zE|o3YA@9{>7$tF2f1hnYPG9Zue5i+>z(cTwX1fn z_;Nm+R!>89$~&P~_u3!%S$3asVK`HVZONy^z*9TuId7Cht1M`sPS5dQ;E!A6M}%R3 zTl%lT@240`zVO`J+WWubz3p8uz0jU}=4KuQEz-x&Ax}OeaF>cKW=2=7S#?pbrKiu$ z!VhlzR7pK}@&S|!0Z8QD2mW49XhYLY{&_2dsS$H*H~_q__RTn81hZw5g1u--gy!$){Pr$Bhil&svTMn&rX($2d+lpHf!e*u_8Vmo`XS z$>$(s;o|BF?OD!vH4FbCfc0m4c!a?>LdCCa5OrY#{r;#s0A%4jap`xs@U2BVzNsnw zWAU&rR;siG;HqFuWSkJY!Zq}*gZsppNcr5S(3?VH%+5)iGX69F#ly0dvoih5go}P? z`f1z4UGOmod)#=gW605%KXY6iA*b*Z{L(M^=9jdF(zr_A*z#g_z&bw6HMrE9%EptIJXQ6eRc;c>f75PiBkI9Mk!c{d3~*|xEKkWxQ?D*zH+&J^!vWI-S{~0@pZJ@ z`ogg!fRzo>+A;dDg<*k*>6~-vkDt{v|hZLa!1Y>f;%TZOQ}J{G8iX3ksYJ@?0R{J!kz!>k~oC zP^JBtx@iau4)Yc?;T6olflC!i1|VACm9CL5IB}_H7sv#8VMNAYk?X-1=d~SYU~lz1 z;7jHrg-525>ClCu5~- z<^X9&&82pV^>!C6$OB(X2a2afOEth4Gx=ZrRTiQtF4_ff5MrQOkNmQvv=O*;_XrGg z0oN$=d#&tCYRhqCPMf|+`uqGRi)I+dM;64XA4!i5K97seTTS@q(fcuudiYKVz%>uz7t z({WnTB|jm$lfJ>`n4daMz!$$rzuEHjyp4GBg}T(^C%1~S#1T(amG#XK=^I~;bmIJs zl%KQEV>me4r!TqM{*6ET@8jUo-aY!(-5q#bbEJ;->V;Bpu}(=@&`Gyk0Qv+dHG(hGJ-c3@dpZKn#p}^c;YN4e0k29s2uXrvw6fnC*e{q zWh>lG!B^%L;P3>Nb(0> zPab8eF$5f;3|!JdneQc(E3`S;m^kAeZq?&iqK~(IgrOm2P;95nz1tI~o)mT9 z2@_m-N*NDQlCHyVFux(N6a*CQVrOTl9Mz6&yG)48NZbqg6u$aY0T(eMRm$@DCJXdT zXO>WOt_N=Ca0boG;Gc(4jmF0Ij(m@YN%<&pZ&$2S0r0*I7IaYI1{~Tg{#hy4^QpXE z$ADVg{DK==md2G)Gf&`h?beN6Yt(djv_o_Abe!`ibf(7)RvFaK8;4qw=0guVTkEuw zEkhTT3A_v_Fn9??_=Lip^fZAoG*g8)M5!}-B8nx?S&ZOdfV9(7J0@hR&9|xf^>%Q2 zxm|wxoo#d1709jq=YRcgwqN*}PqriEWNCes`b&Q7nCx(~1x@gSUcmGLTX^Gw6+#>? z9J6`k-n|Cs;oAcEE->Km%@5yPU(7^_9SL-hPP9S87?Udry!HZo2hFR^M8&99kZ$5} z%E)2Jm+7`yxRL!+2EV|KjH(05#RwYo3r1WJ+Ba|2)sQ~}a`ln~IzS)7nlIm~CZorp zvm(@Dz248v`Vfs$I6Y2?qFE_xec26Qkpo)fTaFCAEaytr(KP94e73+^TyB>yt+vZo z*V@&qSKE_MJl$M*t3h_XD+u_2=3nTcBXar?o&wtx)W-F(>o)Bo&mwSVW2 z|FQNxpZLD^;K76b@ZaxkK_}`R0cfH5MSU7{N_^+XXfv_7AE96oPr7;2Arh)nu4B}9 zQR9tECuo_}NopLuu4hU~PqXy6KBgYSnS-7fY=ZV~$c5Y|A2G3m;;l_gn)N{Z!pjG^ z)m9I&6U3-@wAVu!GlYN0S(QJhLuA{-hdManYwIg*ZEYp_)px&{>4Vf^!!2tY)dll- zA2}3$$2mHo94cQRX1f&4L8Hkc0c64ndCT}fSNT+5kChQ*%RHuKhk(dXYr8uK?W^DX zR{OPI|9so#+D}dlx+uGrtv;rBVeoncZGrezT;U$N3!@9>C@Y+c^b@~xSj9^%!{{5G zObyfv_MlKGIF@x$(f9^rO=?r`O!Yqo<6Wc>f5{rVF(+w<>uq5bM-ezm=E>ost})9$+C zPAKxxiE(Y>!PdPtefTKbCDKi_Gn`&+sGnKcV*owJfLMGTq)Qk1Esu@U_W7_Mb;z|c zwu6(rv-_y+?jPpC{NBNl?m1@>kmn@BIWdY_{XgZ?@C@oi-Dl0cU4^fR{I0 z@E3+$C_i>5@+ahm&TkIjPLw9vB(J$j$|UH}{!4_iCZWJ21d`u?&Gks>@SxMT4Nllz z3<`k#Oqb~dqm3(jmInyARu9N0>gcU~$VItxK|$I@h<&r8 zr<|>h-)Q^MiRcCTb$GPXX7LqffpdCHKg*y=`#u)9*!J>iw82F?AMyxB+QcgbPwC}W z={^hz%?Qp%rZ4FD0Wt8x8(})hhs{WzK%R^Pi1R$kl>U)dB?49+=4s>T2ep5rKQX@) z^4dI}kLeFQC*AsKlQ!71_u2&)091YE!#>W|H-Eh^TZN6ZjgbxCVihL78!)xE8&{re zQ}{rQw%@<^uzj8W=}TYyYWv(*zS6$>wQsao5v)q#B?opsT;E#iY<2~gl{DCIZrhBan zNTLX}GFdWEWx9-V#7|L%p`lGVp{`qB`+7UPeYgFU|MUN;xf}a?-}Ron75ur+eGb}f zreAn?|53j2zxnV{`hq+}L_d#^BNwr#NAk=GJ4I{aZ(FDM($S<<#E&@Xcyx@sg2$Eh z<@Sm1dbz#u>~pDG6u10#EJ400(;iMnI)Uh$=1wFw1?_j8 zZPMhxF#x!H=gS{yzx!YRz3sj4{{ZoGZ3q4E;b$HMQ=dB==||Yl`e5~V=p1b|h`+j_ ztT`#@!`6If{?xZw-2uP{?zKZvb~T|+CxK9(%sI?C9>dO9<_Lk4Ci+D#T!^n3+PMS8 zR&;uEd#gQo^swE%f4@C^_z?c@16!ZIJ}IZLWK0E~E|$r|Eqxa5%O8_RotCYSW1zooyxi_uJ>0l_1)Fj?52MysFZJ(HFX5{}3(B3G9t4ixrIkO*SL6m>7$){mU&Zt; zACAYO(ZDIX);Ee>NH0Elcn@4*NoN-ZL{?~H9+)Hd!FvRl;O*SJ-)crT_;a$?9Rl=; zr85=M2he`fglWACJo25+9^rE zEp`^4O&{GhX{&9IWqpvp?Zaj4yAOI-pfe0?GYgInWgRU-Q9HD3brOd_jM_$ zFS8H?DmF~~Gj)sf?$4ydP4G+{eq&8M+UpT$TL?rbo$vX2n7|=e3|5is&w%?c876pG z9e}2MIO@%azj!{ca}sD@x(M;LPc`g7i~0HX3qSf#+u!)}|CjcG_uPQCOVQ7gvw|6( zIv=JkAg}7EagkYY$@kyUyOCG;(FO7aSA%|pLE;nC%M*Po^69(TCoCjuJK!X^Db-gcFG+(&*|9CO=r#fUJHnnEYdf)zcBwTN;39|Ok-DWfs~ufM z-k*8;a(m*+D&wab$IVSIwAWsHBX3OC7*D@~N~ud)1_yD~4oF1kf2e)dMxuXb=!tps zH+_Q>q(Z~YA%TQ8Di3gpyLSC*TV0!L?|J!s?K^+pA8ZX7QT|TBU);kV{aD&d&Fye& zr|mwv*Y4tz{q&FhT>Gd0?5pk7H(I;-WNTM(*`C>$2IkXYr8^(4U0Z7(|Gtm6^>@68 zL7uMf&I{{AImAu{KN+n|A_ww}@q2_Ly4Z1eJlJ=JN4d#w+6>OpVB8xbha6l5eDZm( zfGN*r6JdF;oz$Krf63QGzI#0t>RR8c&6K_>a}*{j6TM>mAUz)eE#NmcfXw+;QJ%5 z*1xpqV(!{C@ZsHCkQaVI9?*w^+a2_bcww7k;LO0?$HOI?4Pu`#ktI->~Oz> z&v1-h?IL^9VbIID59#3p>^uu0E^{vMFLb2;3+?3s=P^Iz{7ypp1$j^~6ScH{>eC2zm6;gqk`9t`RzY1 zw%6_QhaC;B1aHkD%_3I>=OLVZG0yRw4}{zCUdG$mb&EW~laLpB#FyGK&WUV_C!V8t z0;4j@XXQ+)gP)7Jt#kPE#BO6}=I7Sh-Fx&K(CMZ3zuf-(|MSQ2Z@z`C*=_gl-=idQ zX@3U1dE3+d-ftfY#|rX#&@8+@>H#8_#+-MNhpKiqA%?%ZqJ#2wkk;Ol6Y&v7if zs~=^s&eYql#nU{YGof$-pNY?Rj&C)tLeg`ueD(a5{>iC3KmE6A0YDPZ0pz+XkJr+t z(o67%BHDqtx=i|yh7364VMrZSE@@tagn}zFF@zDEk@uW|$&V1kpkw^RJNXeVh1eBE zfh!XrYJIaytt`057-G*dr(oH?n~$TX9pm5vJ<4o;nLOO)`Xxv-LfXXj#PFR}{nd1E z?t(blFs^G7lM*^^Q49?<(5D%ck#88`#P>E28VooW!-(ol3N39*XgVT$2#gN@q5(CW zP5X91<%uEcicu2>MgKf(rdS3E9=-DX(uUx#fOst(O`%Mb*DR39)yOtixIWmI4zqK1 zv=~c#t~ne-CnM(O;V5I$jHM{xePGdvibgGfkT&wbv_63C@|6G&Vjy}*J7SH*$nuu0 zwz;BCT;`@1xfXkz0as?xQSBekx9vk5^DED{#g%LA)dvUd8^89g_GkaxUuc)FY_yfj ztCZPGeZD~;ywM>^=RB|h4+juF%vWq3(75g5$}$EXSvuU^YfE%C3p$K6dJ#G;EzGwC zIxHubq?cQYPDA%hJU|2KsGQkBO@i_{bWG_@8Sjyc$S&8!4F_HPoVYqVbo;V`CH}|o zG52Yo0>ao>z6M<$BlOR(n@>!nWxkWk z0EW(=+2MjqblrLdayZp(0knX&N5|XQ#?|k-{eyq>4{`SP+yDMg{RfesTesZ$f3ILk zciNDJ4m}i~iwCAVm>)Vg^w9b}dl;mn*HSzK-tJiYfPTQU7N8tCkUV1Q_}llo@e!@yPVd zLfbn$%mRQ{KKuE$y|>#|mR5mjG0Os{J9yS(IfK`Riz|?Gj}t5|Cuh6CRaqFc3NG;L z%`l3OX;gwv3%}&4#TB$k+>Ipo8H#*LLA@SvI^wn)ehW*wWC4&i891>`)+r92;bGBB zis|%}JbAck`sh%{2YUy-6bs9{qlxL}tv;3n+9sH^4f|?7CojcCo0iqh;Nm3qy+_CG zw|@U8+Wy{Q`>CJ(nS6`i&!qSe-wAep;#|+8|1-x7khWiM^T1v9A(Vq@s&m^kZJY9` zf8bVm4zT^cq&&*x4mdf$a=^WN;J2B!GfD4c>h=!#tY;ouge?l|9zMHlIb>;m6`xPuwRC^pw9k<~px2rN z;g~HC_0SOqx4~cgGqLStr!5(<=FYsis6d(zQ1iCd?%gaTaNG9`XlbVlZhVNR&D5_F z7muP7#Z5=Af){N`y!dcmiwJ6xaXf6Yof+t2!<+!Khcw2uSmuL6qQF<$fd)6;8zK?5v6s^h+P1>mcG^w21$-0tTGr>HcPW{j1+-U;N6K+n2ud zrS@yT`i1uT>#xV&`7qJ%ko&MqxY`qO)Rsjy3bCXOTMdS>-;(^=)yv>;wLSm*^X;XV zUTW`r=eyg>ANf#g>rYb3H>sf6)Hdy-5j&6C*@6C;Z{zNU{&_=h!R<4hkcBT!;6hhv zaYnjlEPdKO#h@Va2_Bk?#BuGoBK)7&kLuTyM+42MFw%Rx{fKoP0q2dcey!d8@)z1) z{Y!s2lRWQw=^fDhB>nu>&Sq$}yX&#v4zc^bSqsIZ2WRN-fjU568(vRyO@ir$DeIvF z%o{u63C`+`f2ssMRdDn@f*bSG?R!4<;r1ObKHn~_uh*8x=&=u7(9&nS6=2-oVmdAimQ-{iE`X|2F96Wa}tbdV(6ATr<0@le3ge|E6dkb{q?{;ZI^=HLl|Y$^GQeEPsXe23Lpq(hsP z^BT`pUn9-j?a5u}M*FQJG)KpRJ31&WhE4S?P2WxrUWD!h0H?X)G*<4+|N_!{^ z+>NK6%0lCO&vj|7t!-QdW<4ZYSX^!!m)G0I)wMjRaxtqk)#rD?sGn|_g-Msz7x7c) z+PuEx@g5D7AI{nN;GFgy7C1K#u>&O30-ChIRadL$^k3Ot$6%0Bn4EUEKl=T7b z4E^cXKKrZfFa61XzrFC(MqBC&MU+F)oIXV-I^XqA795-QHrrpT8?m?AcW6+3zOq>I z2xf(6@X}WkAN$J?tyQnRM-VpK~#PE${>{+ExCW z*JQN>bT2$;v4V_Tr=0ask1~DQ{oi4q74za3Fsw3tmA#n;#j?str6-E~wlQ>!U+I0- zYe*gTKNsV$E%~N3vVVNC9s5!XMX|=%9_>`=+eusEdIcJ+FRir8>r2r6v|V2Hp_z@jGU_Z6_@c(xe+@v^}G(S#Zkv z>h(+Q%8iZoy}$jpwClh5`x(4B_k!&`*aH{aBjG9EWp(@R?RJ9y@bxc!rTzH-@H6cv ze&!B7S8LBd*V+p=TDy5|4f}bGvF&_&>g`Xr5B;W(w8igu5oZ9IINYUQ+G-1ass&wj zyqWgUGK0Kvbf1O)>XPyBXYybJUK_gj_dF%<;j4I(=wc(9W)4%6o;+Pv4BkhmtfRyA zS{SG8!u)kA?&XlV*jnN)&OZj0xwJQ?b$N-cj2@L7cyEnw2@^X|+RWukk!!=j!$)mr^C5Uh*zINAvW3`#U>r9~k-y;<>WCmU#P# zN5s1zW_NEZ^f34)@1?6(8Tu^)f4}WK+-&#m-0?HjZJB;yab>A(Z*I0n5B$dWVf=*n zx1E>L!1*$5R2D&!MuP4Oc<&}Xy90oWReEnfXYl$wls=95q|i7#uTZ>IE-F3u$_sg% z!y^V>Xs;+M4#Jy34ih6TgkD|hg>U)-c>@@lRwx_{!5e)KPSojn9Kp?_1J?#7Pkix# zzj(Qm7MXMvz|A=4>&=gZChx&MzUbl{zSd#;{1?8|e*Me08{IN#kojOKK9zm)GM(<+ z=r=_PyL)wP^4g0Rn$38_T;dA;+4*L&vM`a-h zf4q#A-6Qa<})OmVhlBIOGZCg=n92jLhpdx&u;feecFWas>;_K7?IcSysQ2F$w1byAiE7;!_-3+rcjJYW-ywEpo zEj@{!Vmm|~e7f2tRy0BbMvnTM_IbP1feC8IwAD`<0FXd$zyH2J@i3;w6_V81kSxd0&i z09@J|dtCljKA{#K?{H^dhTjq5JAOIN0)Rcn?7NIDA3fS>kM3=^yLTS8+jk$ey?y!w z^wYumvZY?7k}V-e~L&gba zz$jR5J>l5`;^>5#_)NB7Kmn6Z1MS>}p-v^`^x9N#l80-Na2B^ z(cI*rZ8AKCR5DB3I2UabeIAa>AXg{T%NqIufE28Vel9u(57T=X_vcY>u4t4o zjI&`F&hYJGz=1NDHLLMb#EFgSD677zQ_NJpbEw?YrovC7$Tg{5RC>HE^1GNtr*wF9 zit@e|036eyx#E^%VQ(=WEKeD2lu7ysOU*gpT&&$nX+gl@&O zxJVabr!2C-8!Pd~14-?UZ5`Ve>KJCrFdVsQDm+65R~Y21xosg1qMrqDg2rp#B(*Pc z@@9^KU)04P^iJblye!!*hoeH|pMo*)w#iAp=qqKy2XQMM_xnW=lQIn>+2btw-(u^>_bX`(r=!$J#SDpKV)P+jN*X>75=~5lXpnKiV^S;N0=E zW06u$@_0WS>GNQ!ZA(+&M_ntAUJ1*cGR799l_re3&Xz``WwHR+C+UD4tM8HJk@NA~#^A<(NJv79g77jI;>mWwhdCRfT z%=uWSd2yjVdbrm%Ha6OKf8TFvKl-CT#r;F%VKur(PMQbK^z-v`v=5ox-?^P_!&k7M zd%K(MaCf^c``{k>?7DiN%6(8jhpqAR4SpBTog1vnEhl$&_wq)a{(*IDlJ4qE2mB5$ zklJd$b{jeoL0)Xse)XGEGY8oTAiR?xl(XFq!sU?PmenEH-^C%^+k>7u=iA}g+U3Qz zdZo1=|M4Foj}Ohh-i{v89{L1#?aIJ`wmLY0LV+7A5qSeg@AZE|@{*N=q!UDksK^f& z5dobO!em0|cq{$QE8`g`~n zJ-~lcpq#=s>Kj3(cG;eUu3K*kkGGqUwh>=3qdgr~r|$@lQ#JaOHu(m#5YUwXqL1n{ z&_+bAGHUbT|2*Z~b;!4*v77W^XVXsTn;RKbCJ;+~5C_^WT5q zmlOOUhw1SfkUizC%656tC-7`p2Az>fXb75-zp{VHnzHf7?RVv!3#rIBy#RjDIE!?S z>P^ykT|WHv^8lF$p)}RTzt12VI8GFI8TV{_-jpGjUo}x*Vu!^GyhA?-oBob=X@5q% z4|`o$N><`JTyVA?%h$Hq)kgWLou)-pCePuC`Nc@T!8Ug3AZgk^ZIAe-4bT(}Vk@1b)^2L+GFh^= z*3Rbf-6|)CvceeaXfG4kzTqU@_V@6$=4^*Fmo7B{NQ5_tzp_({;mUupMrB@FN@M+S z=&fv=(W6Pz=x6m`+UXjommCZ^mDimW#iJf?l=rHiZ>%^O^!iu7-fsW;=i6WXD}Sji z&`&@A%(L+!zx?H|)`YBbes+*Ie)gPPC6B;;;JSY_-45YZR_k+GkQei^9C z_8~%V3dh(l<<@LD`1$Au-`hU$&X?LU{>9;*b=1SdLuA9nWLbPeJuU{wLs?)OjGM^) ztoFb~Dj=cGgNT!I-s;zDd(^k7=_+qN%a3Eoao%j@Jmg(0C-y1uC=W5sGs1u&Oub<_ zdLGEBuMfT4eL=reJk*I89fI=b7|<;~ebepEgNN-~uf3Yx6%2@Fn`a!8g}~UBJb+Vk zsn5?+`00rCE9<4}kriYcy6Nvv;~RYVLmz4XlYiq6w&$LIk@%^$y|s&;xCMMYjMmp* zS`faId$ZA1@%BN+pi3qXZR3e=_?PdcEpl;&3z|-jY9W$4d>BrsU*FmV08S+NP-hOt zpSlPez4O7X_2hvGHY{`ozH#C-{Bwi%wJ1<~P`vVCR(LI*vNtSNhs2d1`+shPBT1YP z)8}y^f%G{*Pn8p8-3QwG*4p?iyv##<{chieg+A~lyv0_Gg}vfr9x+fKv&a%#bbNvz z1g(5nB9CKc!Al;CYis)ds#DqIqICuBpWE_hVF2_KKgWY*M@!CJfXXR(EbB33Sw8A( zdhcgoGItSKcix8z?8ByN{zVzgE)03=x}-MtkUHI#Us|Qj9kv~%Z;30T2J@@`>Rseg zyUo~Z?22W1eL^S1<9|Ze1NyW_+m9&Yo22-V*{RBhQR{T|l$&FGys`nmW-^gDi_h#9 z)r;|+r}Ch5X}NZylBP%QHw+z?ma)+&iT)#9L%XuEnt2M}R+{kxB&P?YAAvJ;I9w_G zwOL*`@k^U2tXPnc*bAX+I(ZbU%2fPIfHqR6?kep~`A9Y0{xsp(@xwXKzpd|O&{s-0 zPcOLj0z<#)`fjNF_Upy3QNH4=-*|cB+vk3w;nvr_+5Yxl{14jf!BJa)=04oZViZu7 zzv(o9?_{&(02FvNiHmG23*adV%85K7x@3CN5uEpb(d&kcL7T`kd5oJ!K4#~Uk;sIj zxav3j45SOWG=MfYI_`rz@dAczKc7!1n>g&p;E!$2LEfCr)#IXrfbx3ewS4aNvs0$- z(>{GN7bNtuo+oL&yw`z2*`Yh`*fAcI_yF05c(j~!?O)j)gS(JiS-;ed@lOsIt1U7p zI@;f9ON-NO2|J*Edn~W6w`*6fw5`3}_N{OF(#Af%Q9bC_g|J?C&~m;?`(`|7z(a8F za+o$)ZMX5mzIe={M6|9w^~|;Q^z+ZQ-|~BZcbj_G``XF9d*BN0=v8_rDV)rU zBNO&9!Y-M$4d*ySW`l$EC|f4br%Iq6^m&Jk7f#|xKl0@+%4mOGOnP!M6TjCtH}jd% z*_pPqakb4}zSK^#o8MeJ!N)#;e+~Pu4CSSTwt)Y=(~b|2N9Qk>SAv%h_YROhdeHP) z^NU4S{Yc9$ui&fGM*Y|A&4+D|!_VQ&Ei7^@#Shrse2_T;!{LsLyk^^_^~-H(Z5@2d z&OF@SY`YI1M9!Q;Sy{%X@;dBY@T(*OaVBdJCV`5vYsY=Oql|lP-z}}P*c&^u z??YSdg!RCPds{p0^I!N{`}*sf&>J6PW-&I$J|W+Jr>wknae<<_yflk1f7(9szIS97 z0LNVJXltMe@n`L~^^08$Xuy4JTRYppJ#2UH+-vvlF%H3>bpe1rO+B-s;Ori%Edfi` ziw3>@;@kO@zJa7PKRhe?+vgbcZH04@c7v|r9etbhmPVF|JxAIH&9m5m?BXO1Qv`Z= z#l83rdALYmOuouco${ME*^>l;0DjdKDc!I6;!fWI+jB+00 zDGnq5XoooUqLuFA1=5_?=ZDy$r(INDvcg}h{K$BoFW3)5JLl1{LG@4=pUXK#?oH2v zfnMJ<_N#F8kcOjv>gF8%@XoezpWS7|i`d|WKj*#e*~cdA@6aEuEVk{vg)@5uZ>WU%K0#E_HGH=Ns+y05dA&v=i*B?tYVbTA~} zX!{7_o#ND;_SnDB@40XjUY_{^AJ?&&jQbq>9V7pa8MMc{oBQp-=2qK&xYKUkdC>0N z-Od64KPT)009w`Sldk0})$S`*uXPxb$M$Jgxfi9;2A^l^GigEwuS}mb%rZUi2=RMP zr#?CJXa3ml`C*X1X4*cGL2v*204ah&fp%vV+0+;*IMzaa_v5*18D3dTUNxX{%>iV1x=fQhv zQ1ShUVsf@(uP&4u*XQ{SqE|!A2iO)Auliu@DZ{_za1QWbH zhh>?>6Y9HVW~p{ey8}V5BX0ze=$rr9a@ICQ(F;x9`Q|Jh&rJCdgPJ=TpY z*D~17mSy(msSZ4mE#Y|7+qKjUK2F&A9YVK)$%9)Y`? zD`}B=+Zg@mX+!rjm?JLRJK1RgcM@zDaGj*~;hqyFIv9p*tB3*g?Lpftwi-CUgXMQ= zrQ!S>IL`QX!&_1X4IjN7T955Q9u9Ey zfA;5pp}p^Y?`hxtvF~m7ai$OQP_W)S8WV$Mk49VToHkyRb!kD~iQMvMFs&RNc_W=j zfj70V1>7vB{L3%?v-5(s3fVdTbOzlbg@tG*==YkO$TKE7Mq1j3EtB+NTIw#`dbzaSzIYHQ*b@c}#@sR0fWD(q2Ka zWOd@g2LN~P-A68tGx?A{FIh|O3l|*U-ll@V89r1WibDyF$wdk17bk)|^4|J;nNY{e zx+Q{(WaM91=I|EUqHW1y-N^@hofN-?8s{i7;6Mf1iZf|$CFi%R<`>a@<&Xidwnq?i zOAC3!#mP&zOci=ya~vMI&6JC#7^oWsABrzr65*S>)wk9luD;_ec0~_yHBJ5v$4*(y z7HkB!^xNENdk(6R%s z9&K$0e&(8jC+uq%Wzdq=JC+d@Or^5-IBD3NPT=mX} zvimrB+q+xPbRh%rLuldv-a)@@cj19DtR3-TfHLZ8a#x4m-?`VGe(Fm5PyXY-(4KtT zhe2nj9d5tQK!JgO>>F}&jB|}{fr+dR-Oiml?&URhFm$j%BbYJ1N^vbe9Ch8xD}!EU z0b}ns43>S%K^l6}Graj6$9=>Z&eKKjHx4Fp1TN?F=bWwY+lHQIa*pChuJhCJe>@=j zm1JJ?r9eOTw9!Vxw{zYSE|xQNytVV+uI>S^hsU87DybYC>a3n7Xm3(Nov|MHqx`v~ zN4uv^LAO&nvKc4O^>>kZ7Z&=UQlDIZ)5SP7V7Ba}8n1}6f3T`9BgQ2|$6LPUo=fs- z=bYGxjU=wOo_l1KbiajcQSD9oAblY4$l&M@ncsG&liKQ4x%MIZ2?)%&I0s#yU07~2 zoTuh(BerlsowMCiw|oNI>jYs2TIi79-1H68)I(iGV0_pt*#AM|N^oJ@m8+C}3%Dvyf^zzrPS zqTERX>-5bn(2P%DUMC}+Sj-L~v`vlHF1ypp;-U|O^#7`Ezd^mewjO>QAMUmTY!E~S z2j6P*0jO}Xk=QSY#XariGCDR(E4^NxYjdlY+v3KJws!sL9G9Marfod^Y`gl*+jCsG z`9iyV^X+Z@iMO@Y>(8|LOV``9I}R*g0tURA+J`SY9J_7m5FGZgwR_lW+i7- zmTv~RSZC$A?lhKT5- zwf*u>nbiMuo6h}An3m+#nHsB_qVpu+i;$!)7Ou2~x~Hcd@aI;QX}es%@!B1(PukTh ztC{4TB7=RBvb&F*s27e8K)+;KABMIKU(Tp}UsahECw&wMssC6q)amP4j73@f2L7-W z8GD(N_^N|z&bBewk9Iizkhh)w$(ul(KsQ$0M8P~J`)y~P0v~Ro~f(_E9 z62$?0aPuJ)1{d4tm=nLoT?w?8j-`(F+RpYS{?+}YXMzp8tau+`(`KQk-|ts{-TL2g zX1;408pw0B8k)MBN9{gkQ>Q3{j|rWny?&xw+v+FVCpmUI)TaS+zkO`qELy#%hn{O;}!BfzhYP>LERlJKt!>84FSGDz@4QH3mtvV$O5X7hvFLD^o7ujqKCMX3`cI_Km^GYm&~dyI>kfNOszk3(C~Q zEZ52de`05y6|~2kxJ^uM2&c{?5HtK*ufJ^=@#9?W9WSs{eYpU%*CStt+wWE1%+*OX zO;m6v9*Xa6ZM8*k|Mk!O3f;vS{j6`?s;|nWqW$QWgMPrZ?y%pPMdwj0095=Nklpy<9f$aV=>6g5qjvOYvwi6cUvHoL+#BH7=+9ew{^_;$ zwrk7n61cwf9q(=L`>qeS*_Yl99{82eRR3vKJkZ_vbl8sZSwj6MmrW?VxewW-C+(7W zGSGX+g{Fs~##MV<@LzrVID~6zrF$eOlb#_^#w8Qom#17SY!NueAVs?Y(frD1q|EZl zweYkL#GcE7a#8h-wpe}g2M;p#%ozm>uFHKE!gx;KV_DP7Zd*QcyxsrzP|CSre1R`L zyVPcuS0f9@$dA4#PD7h#EHaHBs`ZLQ`f%C45yolLwa3o;`0(BSCcD4T?~9mzyF78u zMPF7uAB#=Qx47hP9xhlsYkrH{{&01Dy{)XTA(yB4n)mbFes`P6!m)+r_9TAF@}&(L zPj7`C*nae=Z9Tlt*yp&dudZe8*-z2lzw?ms!wzti3~W5@)*msXALDQ#XXpkw$3soq zpE)ScA+@wV=La!4$7XsiRwPL8veoiRdP5xL#Cv}8!ky`gM)H*$<x7~Sh`$xD6oPrXXHa4c@}+;2B|;w0u!0C zK23r?k)L`{Z>-xzq52Ka-MmWTetU55upPi-rQI<~cA0?BC%_nU^>dVQeo9~H=^Iy( z2l1MUZ=hYbPU#f9_!D=;BlM7`yL;|j>0BZAjst9mO-y^b9Z=2_JR28)IeR&yu8K^D zAO2Yzak0;_3ED$-YUHWloU?gDpXBA5ytVKJeJ~I762Hs|b1zK&zAUUn-m<%a^w2IS z102e#ptalSLqXJXIVfY$542u-L+%Q%;7R%VgSGHzL++*vd|XkpMLO9{h8L* zF13@LyO?SELv-cjWSX&)3uEU(s~S53#P&o6yyh&u2%&wduKFz!R43_$MRE==ik zOj7a`>;$sLp4foE&~|pZk$jpRM1&_biMOA?4>+OEIP^h3w&7rRzqu3L9_QU%7ks-g znli{{V22VVUxVJMKUG>w;U}HV?PVI}zx`j*z$dh}toJ5%ItIpLK?Y2<*Jl3o5B<(8 z0EnWEj?M{i{*E1h5ud}jH=SuPGUH&dkf0KUp0pxbu@*WIEqR)?`?;)@Qs6ygs0V%r{N5?Umb}`CY8TG7EHmzSVHz8yLq2&{Z0rWkP%wnr&=cYEM1=OuKUZ zTDx*(BMygJ>EJ7aO7ICv;$z#q$8NY{TH45y6P&nN2Ko*lKl0H}G`~yyX!AkxN`D{z z*92DCmB>H#qx1?iIoqNP(kiafi`JI)>?aPSnMY;Q#ue;gSEZBSzyP_e zwm?xKbH)g(0-2nv2?$f-vr3IsuzDk>74mJ|E(sn$L#|`v-g2J|}k3OLXr(1I{CKXnNO+Q{Re*EWMe&Q1pTO+NdFg~h3M@4;q! z?(I&B%(RE#kYPo53rzZlbm8!P7W*e{vjrOi_=k_S+RpZFCj1?MKiJ%)&2F>0(6)E? z+N-bLjx7h-yhXc%)4aQXM7#IJq|Gk)@WHqCl~ecuZno2jK-=vA%!m35KD@Gh^jS;3 zfkkNtbO!B(;}va*4{tH9Tv9Y#c$^8t`(qoMGe{Y5oXf zwEsf;-gG<`Kj2Z$JA>7a>%h|{8R0?VNQpg6YRO!Y#B0wEUX9>DCe7#r4JU+S zPp~8U)p;xVXs^v2`*4^+CpwXbQ{Whjhi=TGFLNua`TUPM6dbX)DN8@^gsN}+W5Dpw zxJGB}=X@9x`$C=hya`3iJpnQ_H7$Le=!vp;aT5Rdp=o3J#P%K9Z5=^({HUFR`a>t% zX8X0PD7fcY^syF8I=NQcbF1f0w0X{CV2_iwP89m^fA|g3IJPh26LBh@N;6T@Kk`ka zRTo46YZ?1$7UB4SQd~<6PGjrzUGxcE2$db9u=g(7nkADq(X!T`1qje^Z+o-t+XuKn z%IgOY+U~=9ZSUc2j$6dvZU>vMbABVo&iz-(^IALFy5064zK#z}8uwoBZ<2>Hdk^ll zgRQN$hmGCadYFk#{WT}MAXRY4=dZxYzFl5t+{2%Ac01$gDSYpYC83YJmhN&%e>F!< z8ntN`1(G7K^?`&ECKs>DzcC__bv7!&YkAKpg-Uz8mLFRWAAt8k`^P`>4(~VVv99yPyKySB{inX{N3S%0dwks_HOmLhjsGEjm?P z`VVz4%HlnGWm!>6dc_B#g%ddETHq!NyC%1?04Ffui)BM&f2-_}ciSNiq-7S_5q(HM znRbwlCUSk&KL5GTwXeNSpGX|Naq}I_MdUmal;9x$(iZIw`K?8`S)?IQbU+-HIos<3 z!{yaA#!J�PunD`~ZA)i(Y**7j`I%rQeZNif3ZbNg8pDov~JNwhdh6Jlb0IM9=X% zs@3Q$f6~K=(k#d#zY^d%FCJif_(9LzNq{);5l7o0F5ww}PU`zmx%_8+CTZyd1}BFZ zBUxTJ_;5#zsgC=?qyub+Z=<;IDsKRi$9ikQz;Rnd?%c+I0U^wT1Fo|JnRdkXOGe9x zTX?KrSA14{Om~rz-=+8a^NwL1lS+%7o$V}YQf^O9d{fhfKBh}!$NAb+IcxtkWJJ2x zST%uDXh8kB?f254=C&>_BoAf4 zS6S}7IAn~@c@fKXmUQgd_~R3Vp(>x|_f^ zq`VRSR0d9|k&5X1<0o;52q9JF>S7o%%0Jf={|S2xqP<3f@mvQV&qP^2Z8P*+lC94> z>-LOnF!otnU2i}8WB(*O09@I)480r+sKbtRkl8Ws!Jk^+QBIuLoI*GGuI)99F-|O& zc-TR_DUjbRmi9t8qNKd0@3S4ESoTRg%_gjR89>Ms>&&WO?<aFY1?962n#MQJs zP^qXlh4Zpitt%sHHuO|F+y$~H=i;7w2`OHb?CkZSmG;r8*VzKjpHN+&aK22T5K zWzOMeclP&ovjc)V@o5`faP6WW^~LY3OPfdN?XBDRBZrJdiCd@N+;sOI?yC(&%Pjs8 zKal|5mk2_qLVioK2NmF*VH2ik zpSxmZ-fC~J9pG2&+}bmB|ubbJZ!S+J9|Q$pC~m zHEE_M8^Fe#d1At)ruV;~s>DtD6 z5lIxBtC+?gMez#@#3X(`w%N}G%+4<4r2@w@cn`rp{U`QYS<7cF;E{6Vr%io{ATO6d zsCpCsB!0V#iu5_pu=nnyW*@P*u+rAo*V+>PN){9&5BjHDo4y6^&Ox(zz~_YO=DqE< zg+DK?d?0H11;!83A@WNb59l9!7*%5shgIrWQ8qilC4{im_VBtwc*!}r`6-%L-U{Oy z-pK>{A!!lt$eIssHCurxEMI_;*UCqVnvq}P>fB{704Sm?BxA3q3#H{NUo8O017nk- z3)WkIk-rWrVU+ieD)FGw|BAbF+CFsoMF-CUkw0~a9Dwr9|2GBhEuVU&F*njN^zkHZNgwp{EPnA@ zur((f%FyU{u&Pw0Jyq_?kCrFlfx1YXI3<60MbpswfJ?l2yp9d$!ek=?R(SAq(0RbNTbCxc^jEHk)yH(v zn|xWncC|hG_UGG2fA9wb>}2x+xVcaRt8-L7a{7TTXPK|(aL!vFxjILwVS`HE3M-@Q zoJAtVM3M?l5V4$ijRpzV>s~HCrd7O&=L42!j?|g#ZwltkAuT#S<{L0w48Y6oV=-_*B_`{ zdp54_4TA<355t=bfAFk$YpL>`ZYoX3Z2}ger?bxXNY#bs z=X9uefpSV-j^rUt#zaY~?KJ@_9GMKPC50`u|>X&cXHT)N=*v;wwVs5*S_KGK+-n*O~kxe0Ui!C=AE#>W0=Pw z^-cFUB+^UTfcA{|gQYmjiP1?^>ujgAvcsgv=qzMJ_4a??Ni}POJuN4 z6ugTwMV^|_J!?-r^$gDaLbm?j-u4Y%AM`n~UvGII^JHSBJwE1fmIdhtowFPz2QO{Ftk$xcpMbZxy81q5QJg#V?z}Y z(v^q8%>g92!!9SJ`)WUex@%{BNJswK>u*K=}sHV zZfA)u-G1Zs_N6a=x&8BB{#5(Lpa1#xbHDKO?H7OH7uu&j^{MvR&wi$T@z=l7Zr#3> zt&ibATU%IaSA46&x1+FCZc*j{$!$!tv-x%QGwq+y>jJpd**F7+pDj>+8#c5*p_`8QZUjiu+X;95s2uAj@8!o zR@-vY8GCV`Hr%=UD4!OYUqZLh>ugjS zOH?3#>S0R_G@J~yUqaeD?yl_Y3nluX%*pb@!<{T}x_j>qO@}x0Q|;3FN_+W(?`Z$w zU;b-StR3%v4g0o@{lo4Y?6>6=X^eBoz&*6n{%b?@n~;&3w5rL12!#Yt`K66Al(^Er zI(i)E492)FJHxg4dRmU)ZrWof45qp_l(xsXt-F(N2ZK zg#bzF<;L+FmN$4kXw~z?53*c85-yaxP=}XMT?6M_rd(oRofh~!gxpE&hbzYWqe~&npzM&cJC9y@EKp8+;{!u7O)`7qE%KXlwJCL{IrjWq zXb9u`5l4tWkoX<}4o-MlMju>XFI(L0Bgebj$o-bDI?{*N*4GYzeOtzbGqcO9ZJI-j zmB)xShnRqi?poe&x0dpk|*{||4)C!2ROEo7L`m&Cz;p%hse3BsiXz> z+S{?safzm=SKQz=XYHqzcyIk?QGU6^n`_%!kJ|p`e*5{K`H5_~|Kt-loG)p2 z?%d5Jojc*=O&4V`b^#(&)&*-P>NXYWZ=?#wG-6V9AP1?G8}Roby=g&#!~Wm5M&{8| zKLg=IkUa!Vvh0%a?i+X7)s6K$d|060wf`yqr5+9hAE}adsn6n4ypR^P*aTgoUaM61 zmE$ZH;1E{wcADg#Jmm-K7kA&W3g^0LT>0v{!wwwJ)Gs~EYOrqN#jYmAC7_t(qLtM9XQkN`Ip|&-v5CQWUR0Ml1VS?fCq}6GR#dLW&?&+R==P2k`!`jw}1%* zvi9~O8}WMqCw@w9=pP(uFM1#Yq^EDnYuk?ZORmaSFMK`ZW$45?_APo{_~ng!2?;&a zX$TSgozRO;AgkPEET3Illmi!4ho^__RKFQL_H%P-gGxuj8NL4_k-h2*=Y|poNV1#&H(a%R_67 zUW0G^zZpYX!c6_pR(mKtEU&M5bf_Fk zx{}$&rFm#bpG6z}7W>fwwDVy-<@G(oM0k57pK0g(l5+_7@!D?gvnXxmw9SK;y0DK; zUYJ{st+3v_6KWBHsz{j6d6&lc5NfycI{@5(5r@1R}iA z4;3FPv+3j7llT2P3s5IO6Y5I%%7fR!=9GK}@1r4{pk?nLq5~t#1YQaz|2U$LYEYC- zR4H#P{JymDMEk`b|EF#1;a0nT)fYX4=>uwf80rXblvi}B46kq~3y&hd=(i7P^I0UT zrZ^HHi0HVqFA*;p@Ek3PgW-siR?#MPsiPG8Oa}X(`&QT_9v7*$^37yY;R=|;` zABLQRlx;>1xG(!_9*_!t#?vMaQL4Gr6d_r?>3IpEp%`n|h#PWMd4f&Rg(E?{%x5+Z z!T7LC`r8H{E?K8LI^h*yN6#j$%%9HT)f4f{4V@<4sK;-pFFyBVd+wbt zwCCUR?l$}E3)F*-GESsqoAs^p*lO#?wkQO^_BOn-j`WPuPuf<0D@i>EdyR+W2XOoX z`C%pDn}A3UkIYkcwhD~1p!!)_84`=F`b4EZWLOEPV#^8orG?ue+}^^NBEFWA)?`Xc*i-yFx#wkhp0 z`Hvm@Ae;70GfTCQNDN&RryjWDjP}?aNRM3P3vcJK-_EV9thwXMDYEYu2=?2~<|E{K z3zIn2*4EbA!pb6X+wH;q?a(ECB=#NfX}7o==Uu(@Vc%2!UwA6M>NZF5$x@CLrKQJt zNEAV+mNWmL`9!|Bt?{Y44qWHYlo|OwT>2Om)bA>)ZKdETd;#)!1!Zk zk~w_mcp)M*||cx_tw*xXhCpZfHvHb~sL zMjy1tTqeJGd98nC!IB4kz~UE-fIa9fK6~|Pq$zYNAlF3?^9s}ag+uw5L%u5BQ%s9- zQcU3Xkkw zAWxFa*?PT^io!@*FPnJBLF&JAmh$7cEnT|YKJk6Oy*>Bc--8j_YX^*P^+$XWN`TH; z*tU8$Q#us>8K#lP0h%z*g^0*|BO)ay&ez$LYM6Rwq=zGV+iM->=ikF3z$v;`J|iPt z#4BCVb=#>7P{^@Y;Uu0ys5&waAWoK!kt&=jigL+Y=c-fLGtt0V_7lZL`)GzDXqn#TIDQ>Q^750UZp5VnzsbkdYcv0wANXxQOy_V@ zgeR5J7>y9O3=~0>&LgxnjIUrS`A&O*qnP9|3c`WAuiHd@FqEc@v zlr_T3y(KF{?v)#0Sg&d6%z!0Wo0bPG!Zp9;k}i%iG%=N@Unkzfw)HYYRx}(VU+_%O zVNrB+(2NEb(nhOozU4g^DS+WM=W4mG02w5VcU=Gsb^Z8#o!@eFXbGZb*3hK#i+AN6 zMkXmadbXaY@|l$L2sR>o!&r{8N%tt4Ni`ujK{keQNf;Crs#Y@p_yIVP*gXMF*ErTF9Tt0}N=k0|6<;*KhE- zl5=@wqfIX?wQYRzcYVjp?bhqBwr_mx)po>y#5YKEQVe!lwQ8_B4m+H2I2mAjrLzx- zbjD6F3QnLZmrlm$Fo}cUJi7|n2Zq8;ho4T%&JpzqXGL#(D1J|N*WWUJGP3fBEU6RVqs*`#G!*jtfUgylS_=K*x zFBasXStgv1vP!ic=t2+Q4n~)!xwif06{c@p_~!Ao8#mguCpOyiFTA}y_4M0urf}rv zYg_U>j3BQ#`%o{Fjs)-R@cgqat_(J*CcNDeeLiohxO&qdo<0~b6wj)oG>ioRp_6SV zcpvI^eIDuOp{|py2a~0RLE9D6VmZz+)Y8xb9GY39%E|6c!O5)|&OVeCfzlt#b&{l`b)q`;Q9kybgxL21Ui?f?o#(-OWjB2C zk2=yWBIRH?C-yIu*A?Q2N5{zMX?yk7>uqOmKa&VH-m*hJs!z0r&i{c34bMQu+e8)J zW#FWr@)R`B=_7t%)#Ky^1C$On4~n3VZ=CtS%J0DW){?rQwx9Z`pKU++ zGe6aS>KA{${puIK(7yiax7wWt58CG5Zr&d9?KqvoJ>-9LYp1>b#;x|HuYSG#%4c6` zzxb(NYM*`O*V?^%ce5(p9V>j8^Jr_6_SpW}I60Yo0V(+~_-UKHwk5Sd5V=G!b#j~x zjU(P2`^YYBlN3%=x+30*WIyfkXmgVR&|%(!lGb^{kF)7nI7a?iY_Sjj-5Ps)XBS6n zCtLhK+S~y}@BvpB2KZeyWlCO1M<4hL%X{taF>TQ4)2&l7q@y@VqkJ+(Cz5asz2vWN z+-k?1Ak4%da$^s0p!os!oPC)$3urVq0#qp8!GCxdJL<2cvggd}AG`Lti3nOmYby5fY8 z)p?~4NO|?d^2GR=9T)F8f$lc{=8eo! z#QxiF$S*9cwz=h%Ha)-6PStOaIyqpV4EE z=EEE(-~4tkak019eDaB58cQH1XM;MNw`|C#t*W4&bFv)qK7iFu7|fG4Q~&7e)%WaB z;K0pzY_7IGiv_?bJDh|+;_HO+V(`@d4SQ(6=0vlHX+EHlZ^DUB#o5J3`XPDCpE{iA zv0tqGKESbGl-C7QTw;F?X`gt@H|meQ;96Vhgr;%6rMf`By0Em+7V&9?x!|HM_~V3*WhIP~~7d961;k<&^GcIaUS z+JSRIKe^lH!Q)e(`o(ttjr;8~{oGU6Z?s#t?_k%D^2VWhW}Wso$6UK$USIO~m|Csh z+NnanIMo?MS3Y=2xu6B2!H%D_gFnX&zR8}&0MNvGmT9w-hPUqAZqGb*qpd(|CxY!G z)6z)YoJHofo9}$byV|?n^Umx#cIFO#!Oclp z^;q$kMkM<~Tn-At+?Uhu^esJUsRs^UF`(0FrFTp}vj_ z4qWgZKZdh#^WeJcc@;xGX6q)+g#?AGi@vmt>TQ*yIaQvr+Mv>9<%%QMPF`Zq zuxqg)K1ibuO=xJ#wKxp>Q`c&cu;q|+*N7HAv19trIg2D?Sn%J;YuaxJX&(-_XRX%~IF)Q6+uSr$} zVcbm4Az}|8G|Con&%?+kX=L6=cZE#~^m~0K^U5OQ1J86KhCekq$X7~Lw+72f<4MF@ zgRQmg(Ob@;1GaWXdjwy<@`YF08(;lqyS{O`7KB6&krB#j-l|-+wd|@@>N8m<@yK~z z{E^PYl{`Rq^3_9h3plS4h~!5$%*tQpJK%Tv8u;dDqI6BWgMQO*T4k`|Uzsz63e+|1 zMMyw^ow+tH=~l_vkah#4%9TD_Pi5(FyjG9W6!Hm69Bo62gP#}SY1PFDEop<5RgZ_) zWj|{|+M{f9#(d-z4e4S#jBru*yboQ`+bndX|ETsm{+9~chdf%zSk`{@5Ic5qTnjex z&PL3qp+4~yEq4cWoT$B7MnBir7TS%QPq$~j>mvZ64cc?` zD+1AZaOT|6p*u9~wG+mle#`&S!~5-xS8uhQ-Tn65GdJ4Xo?LBjf7kOIFSNO9SEy?~ zV|G8u5_rM|oHVhYkWPpK@?bwhT^vPE^GG5oL0rY;NHR1D4SPsG-|1cWdhf}x#mHA@ z<*(?#xu?|`e8PLK{7C?>fe(M#m4Y;JobWxTepZljVLM9pk|p|NaxuWmD&FBJH^x%G z_ff&56^tx&7Dwt1ejN5p%C#*$A$=P8!LX7xjqKPbI?q}U=6zsyMnC9^Dg7=N0%smc zJ%r!c=|WjT4wa4U=xBfKPF)Zbe zi*vQm4FA*d(mcNO0^^!#`UF2q=*vh4`@3y_dz&(cZH_jquCCy}ue3eNJbLhmcAV6F zfz+|m*bS~BvvYFCv}vER)sNC9a!O%kT^f!Z{KmbuD)z0%RbSruq-Jxj)Deo_3QT0g ze8TGDm3z~}cLMjyR;7z%=ezNdv8rp}a1oyh0haO=FMQ7RgX zJdQ8c2@g*pAo5~dx8H?TEojIN9cGw@kLdf!5&bf@bHNRR{NjjXjR zCw7MRE#dE8xpJv3FTrE&g?OaB`iiM8GFyI|xZ>*~YmN)`GnCuni)V=_|B5EIWB5GE z?eIfP507#clxPcbLjLHhI-OKtLQCz91qR(MD=6(OrIPN_)VO}6nXhsZC?DHC^cZ~+ zr`qjHTT}f^K7VGPi|*7ouW(mxm4g}^TE5527edC1Ia6M^icnx#(B(|Tk+tNX`h*jh z<%QUFHQPYp%$IwJH$BCf<4|#-60;|^<7qR@6MSgufTv(OMy!Yb(gOJCU_Ct6 zP8V;p;1*cnVPJE3FMP4-cydu`VP#z>06To6F2_B#!Eer=;cG5lz1sfhzx(gC>C5yl z`o`Oj;`2LSF20h=c4bGe&SOL5Ke|-mV%iAmfV)H0P9LNz4C<&v;9eb2{zqBGAiK0k zL+KLu)xLTb=&hORGV!C$(j{~n^r$+$ta~rc(}a6|l>A%_se4jCCyJb%mOs%kF(Rc4 zhBa{Rb>=yngtmOz%HLT%&#ObaxcCD)1JrSeyT&?3*!AuIt-ZFN7h3fv110<$c-pp* zqHZVghH?FPn?fiHagp{x)5_D~n$*)P)LGmSn%8Fj-9PkwKa4TC>7XhDkPOTa41{G& zCe%Qdv+d(r29HJ;>8M;mpu%bZl|(0xOaaPzFYYO2RB+OvkV&b(0LEZO z##1MGP7qK9Sw{A}0V`ZfLp0GHjj{D+ubTXoVP#0HGe_<%Qf1!4^d5{a!)SWPJ;-q< zZBop5X<6+n-t_iskWyFSpMFsxBGdyg^Lv1kS!`2i-|LE1>a8YS>?^JtfaJ3d`EI8w z?=+_HUE_}-@mnfxNtcOAnv!%fD<+n)lNb*d$(%t^jF4@y#`Fi)>&g16qewzXk%aRj zjHEo5ir>i*g9HZEPQW?eOZt9x99 zd4>ZW8%{?jC5n&MRj>@wT-gi1qyb58z33bN3k!WKX&Rev4Ou(!LU3ZxNgH)2leo4& z^eZ|^79UDUsEnhnIW#rhdZnNFIqb~1o}u2r3Ad!7@xm=W%bS5`wV@7Sl*5r)LO(kz zw*k(BjgRlk2EVhmG5e26;5X$CD0f3 zH*aW!G8|ln#{5Yq(?b{KP5yJ8tk5A&ijGezv|bBWyK`y!;AiY6w956c2~=m*;*D%* zCu2|Ghr#k5(jbm;uO|h`qh$F#tk@OGYV#w5I)la~ui3@H_-aErzGu*XKHoQ!{x#uf z9T#M(kc;ysMVve+nJfHE7Pq>VMx(uD_eOm61AtVA#93ZI3m1o2MwzJ!&7%Vq0=L)R zc&$Brw9O!HzB+QzN_&bo$?rXFQL@gNMHetH@R4G+ zL%wAKE4or`wQWL%!qCl$7Y5KE;G3eE_>_mKwc0JI+%k1-eT{*zi!a<-+KJ%~lf3HR z5`*GA7(wr!dG48Z_wL>HwQqeZ3yXZiH8_!bb89O)FF&+F;^%-~9kS1k3c-H|@|Ka# z%1G(I`Kv~ECQ&xzfuBSW=`1eNz7X$Zy7tJy`=c!e>G)HM=;e)-_T-Zr?Qi~_ztI+# zUIfys?cl*TX!SumJ=l(}%}&)MgZ9JtXYlE4giUl@&xTcLcf1gATHoWn&(lEhx*eGZwF4nixW}GS&HYzVEx{7nE#{d z|Nlq%9x{<@yl_1C<1OWSo$y(kJ?wTMnXfJq7vbu+3EfU5Ogn(5PQsW3_AJfHwii9M zjecWxVl$DG)Ou{%Jea6Q%N%ZPxlGE8R+H8V41CfY5b!op$PBVmNTtG&5E(A}V>Yf6 zTRLg38XNeQsdMiS!%N@t8JQljg1d@^H2>yQ%C!) zZEsP2ubp}v9=0>!XjHMbx+S0_{mO<~oexC#5v9aSIS1;R(U9FfQhlK*8L-LE*a~f# zZ?D%b9p<&atwn>{Bm2|hpM56~(`R@{2X#FU-{pmUtMp{tSafB*(B>EBK{G$$3C6b+i!S1 zAwbB)PsVLZfa+zdEN^|#&$seD`__aH#i|`nl)GTTYwTJc+>wTjwB5R0dcia>o zk>QM0kx|>DyI)cO4fLyQZm6LAij&s{^7|_;l*L+ymTtkgzqL&}7TRZi<(JzxzV!8W z?aHP0gg1N>pg9NhBu7j;N`Q5`5xj-Wx^p3LWO@ROeQ zlkyXowu?0T9mh)t2dC}m^q@U){Yqp)%ooAcZQae72L#}jMno1W)FuxArVkdL~vG zThZ~>r+KE@!u)cZUs`G({NM-M^Upt@?WN5t>b5Np;DIAu=6r)(8ArzIA-8lPNHb~D z%S_EkZ)gHeSQF?U?(xSjLgLsUBqKGPl_1J6e=^)Q+TZ(K&iK2?f_j%-;;7#-;H)y@ zU)pF%;9|?f!G~kYkUJjaEqC=87}7%dcD zok{~DKBrsVFVGZi=0WgDzeMIU4YB=9_balnhq#jMI@Bc}{`+8jEQZRK-q;-Zp;^d- zCgAvnoNqW~;g(F7S?FDK5Eut8A_L!=BLKeh3H};V5v}*ixb0j(R@Fjjq%8Rmz=a7e7?BgS7M!gM+Yed9O^uQM zzGbA%wZHMLRc)mGj&DROp@=ud;h(s4+cnu)CHgK3>Uko9uWpR29k^?YwG+vd#6aQJ z9MjD?TWa42t13QYg^0! zf?3#|h31TF=_j;<+8cG)x4B~qoTG+#j=|-JFLC&w#W|(C+yNb3xM*cA)^pcG_1vA7 zMB4|r@>N8l^VCs0YaAdB_GnT=-8^e+Ys+nMWv0FKo_Dp4m%r2RG9~qd@tyNu0!+K{ zf8CmS&(9*^QygsXvyYLUwihY7uv#8PeHml+JOIgy4M!a zBl}6MyIK`qQHzM^+bMG4fQXF{U2yzh9(iHEKw0yKKNglhocu{)p_A8i1W}(W1W71a zHrVf%D2RoJWz%aRSDeEy{v*s>D65%!MP8!Uq?z7f5BalD`DJyT@oL8{c@*KKsf@{i z?3?a?8b(a8sQjr*9Ppok6Q7D?bC}oamA8z~3tG;>OTEW#W$~94+s|W%%|DA>wcl4B zWZE$-xeNI&Eb8bnAD+OA&G=Gvl;Ulwh`R1aYI4)lP9Ilq^*J>9=AJ&yT7@ zxcbQA!Bu1e9}v458w8B$%%50B?}$H;3?)*1_1})I51^?&+dTECx66#Fu5YZh)y0{1 z=l*8fJH$V84ov?KwERSmzLzhBJbB}4yRxyK9pI$B?Ql#ZK{8eXk9v`f%D~M9@$NXf z@2*(l&S75^n-d~>h<_czneZ3dLuY4{K{*NFo@wFXSR3j)#KS-K* zq;Er5WJ@r^zmVgp=ZxS6F%T ze->=O%ehSX77_4LTJ(ID5vFw(PTbRAcTGBW{2Y0quJmE$%hke#PE*c~BODW3*0IR& zH}!kSO<8CGovRi?HD}NN0AfzQN`XT|z5xZ@0UQD`%4$^m6Uw$QqfwVL8abJBx9Z;rUgJ&3O ze(UjUypVbTZ*MEgbY%^mnO~ktryQpDa-+Dgv3rFMa6QEbCPI3I2I`Dwf^D(wx!2Aj z>t;i*j33X;kFojo5l1dgK16r7_uKA12RgjFOZ>5NXVvu0Rs|mFOwi{?`j!k-cvE{H z55Y&Cl21JekN}#5Dlgq~$5FiC`n8!q`2)Z8hpF;rwZyhK*!RUPR?h6Vd|5hLd;T zmU=XL;@1y@EKPy55hf;0oUBKo=;e*`aw3G*)Pwj6-v_|rOjHb4CvF@O?o-NqqvT+0 z>LWJNSJxMD4E!Djy+#OqdYs0i!sF%6%U9_62Q2C@I2RO)->lM!G!_1q8)eEcb8Tlp zh=NM5WK(gu2rMHkZ+eH#wYFbHz=&ri4GknAB3Y9;86GDfp(090Du*vXtj~A z!i76i_%(re>1+N$hTvuBXHJ>$aiSBWU*7{KnsPHU3vF(34TH&`YjGWzYwc|>yu0ll z9JjB1`F8vJKk|w;sy3a+-%;D5-B@`C#9U!8c*Es*o~y z7)F~NxQMzAgsVOskT_{m1I*%q1O3ow&@;_e|HPnpI-xa(i&X+t{3d78Gq4Q7V?gYj z6e)9DG(s7SCQzq&%r73$O?er4#f)eIXYZ}wE6O+l=$rmNsH?%3w5UZ8POvO4EN3Fd zmDf&C)dC3zelu-pWw~u!*@&a;B7mo!d8R$VAnNj!Yrs;5pguHq;FpQg!O!rtI$DG3 zRd1U>A9(GG@@!+ofY5~s^9#NiQ*XGLC*R+tyfliP>N@Im-3wLvmz=&u&Ir;Vc_wF0 z=un;gAq>3{4wt4?{vNmvEoo@gNf7_emu_juL%SH5ShVbsyi=#jd5IqCn1)Zvdesw{ zIgifA`beaX&`N$AkKr8yJVO>L1g;oxCcko|11Jp9f@0>(AuPkBQw<$#hd45znw!ds zW)}cdoHQ=GtS%Klt8W^xEN`e=;_Z+l|6L|0>7IN2UIWaDv`l7DSL8b7$wSe)uaF1N zDL>s_zx76Y^l&?G06F_YVH^24G_W%Cx|3UlYr#{v8NQ+HkK%+gGpYtuxdF)9|Hpd2FYlgh~ zTaNWCQBVb3t;LBEZIg!+h;EDgsbBeYyLIb!yL#n%Tfej(d2qG5yjovh4WESNw|xhX zCiX-8adg#l2VM@sjMKI`0iz7)(8jiN=r~=zw9%GV*4q7itJ%bvY-^{EIw55JK3H&T zyl0=i*}nM2FSk#B=9N5Ta`0h2(kBnI%pd)*?UaKd`;(=$*B=kXyc8$7+?A@$*O(? zOzCXD;XLH<@h~a)*!edI1KtQ$TXGaKhFPS%a8hdn3R7E|KF%Q`QIEf z;`@(ukiPNrdN9tzs*{e5L)N3{IgtGrX5_Roafx1XUG_j-;@X%gw^R1vj`F=kRW3-u z3nibbSJ-gvpZBTHYh`Q5PRW$%c_V~84&2oFAzOv279JUNkRQLg`c?nTYkxhrgq?9) z>9v(Mv$Wc#&{1($j`e>I_ubj!u$}Jjw&okn(py-ud)DdX8}S9G75?Nk*Y1`xf z+h3y}PJ+aTK`&?MH$pccm2QhQmPY+G54z=!E!kZLoa#Y#*?jdv*xIf54&al%WLt-ZZh{DS3VlszV2bLz#MraoUdd{I>pd`_<2W zrhV?SzuK-`y40S2@~L*~&YiZsy;FS>{f*xf*7i#`Ng>_x$vLj$b990_oxCx06}%*v-*wNMXvnG)afUM=i0CuKQK&jH zroK>L0_Cee*M8Uret}^dR7G(r+hEOAsulp4UixWV^%*jXa|bWw)$>{e1I<%NcpTiR zXUeO6?%EoD!~A0V)@$EtpMK?Y?ST4}-g%C&mp>=<(&zE#4n;l~6(*2soJdA-%VI;W zYa+Gkwm$2!yxY4g6TV^Rg1ryD{BnEl?a$@yUUy9quk!Vv3&rE>)8A`{%D=^)q8G}L zNYugg|xhaI=#lX36EJIB_xE#raYb9^gBgm3luJmfQP{9)LKNwb~qmLZOQ8cSJs@$NA)q>c7dSkl_L8hPfmJ6-PX z?`7i7eBqP6FE|S?izaA~I;Xt&>8R{90M7cBE;>KlKZJgz&raG$w@@m-FP_Cyz{n?X z$m7BT-vG3(T3B?F@q`a4HI0gkJLkxC`*%Px6oohGFuuILdj1Q8M zILXoN3u(D2csU1t$-*RPgc3%+5js4}c$u<~g%M>@k$g)Kwz%*s7*T*H`d;>Dd4P+I zog5stU-+>f4efk*C!OO%s57CJErxEw2}l72bmS@7(ut|p;$eF4rzvON^rh*KgokXK zJ9r?|@XK$B+kY-BF2hgz&h8U<0Khtkv~YD>>PAHTJ^&=Fjx=VUsS z=r^JNXW(*}MXg8eh;iWd_I}%Xu+^4<|6L#XKzq;2?`^Yds}iq#3E<>YEr5`o;<+|{l8+;2@szjF*SsEtynvA6%0Y)GC}b1%5r?b{ zI6_BmbKm7fDA6a0Mo7J!vjaxYTZcJI*8{GHY_Xm)rVF?5lShWal564H{>Y~?WjPV_ zEWL~d3hk&emMWPN_EBsL!Uydy7?rgW+?d|kS&b`DoP+j_7t4F@~4|lSY)*RzEeSGb_ z4~DlMJ#0&hwLmV5bAfIDEZ*ArgM(dU)^hGP;0_kPj8Z;@x@CW2Us1S25sA*p^2H;4 z8hwUN=T5ZfWR9{6UWavrfZ*B^>UD*FCQZ8#nKhHPO`a(S$uRQOS&(K^CUX{RF5-I< zhF^0Y>V6_m`BOF%o%6Yz)%^$s*9dT-{vp$WFl#%thb|z29u#-6A*2g^qywp4bSZDU zXad>e^M{q>u|Y06ynOjmTU?%P_aE)VKQN!cG2haHFS;DI6~@SyRv4?ptMUVCK~ee` z;5p7K`r0UIdWtOVww+xU0F;dQQ#+-N&>JkyB9nvI_+26i&K&WxOftLr)M-(n&ho>k zjJa)EqV4anS={KKglDE353ORurL(pyb|2fx7%-KRCyU(0gF505($?5I+su6)zSCw^ z+TKvVtMJh#DqJ|D*nCKgVYP{H%xz0KzcjT1eSa9(!Z?ys%hM@+TxP z3tpioVbm|r#CQd$A6o|wE{-etf;Juh z=&8Oyhnp~z-CFd;3PCQVwQ1)x$@oNJ0OyPcLgT{QvhuOUiIyoDtRIikdv(BIno32}WH{Nt<$$_{sT<3>buQbtDkJCoz^h=DaA2>yi|5*LChc%%@KBMxpbCyvW>Tq8XO zM;vjKGH|OhWq4o>eT5JxeKrk>`ppt-{&dUkn!oqRJa8pG8WUDqksWPfM{XTOB2s zoL2o&q%bSV1m$4hUU>_qYR$czVS(muDn%16l#G6;v*0=^^ z%#B7WI|2|HdCFlX%M2qSy*;X(6}S(hZ42j-5ThVI7vfUBItjWVXb!f{H-erkb6nlO zxPG-g@yt8g>T~aID_5UrQ{}HKuBqRN9F@a59fUivIwsS6YiAMX{MwUG zwdM7dcKOPc_S}mvWC6hP%B5_h`;Y^j#2Dq^xOQx^dH&rPrL>knr1#q*NxIH+?**x`JpEcc*Zw`jf+eR zmmfdZAg;=g1&w#I*?NRl0}$JzevN*N8+i|$s!PjeZt{gjmZ7ZjN@7WBQd6fgRI=;} zZsqHw-MM!^ZpFu{0^HB``iLr=iY6~omlp}Tej1OL^J69=H^y=@ZcVBumSUD?TM$axA(vJT>IPq z_5U4Qo}le-w4DcEYKJ@b+p=pkDCgVKQ^0Z+uZX!DQ`ivyMw)U1UXk=ls3W|zZJnXE zHTS?7c|7FfM2|<_O7pMEcpGV`vvK2?lrb2`De=@-H~lcq(C-IaLtxpCO0A!PQ_nZ* zA7S88nS1B}694~edGWsxCgnT4$=PtR94Gr&^)z|LVK5&V%?I6$^)?}WgVqJi;p5b_ z3*ora78#H3a6!Cweb@)(ub;`M-|06|GkG`TLMi+3Wy(1jWPem`nn7soPpn$$Mr}(6 zsW14b6aI@#FtTQj*m&-hJL@yL3e!62 zml=#sAMK)a+x7Mo_FP}fw+r-FupT-)=)fY~s%2?2eLzj#6dmLF`w$(S3ytU~N`9rK zG=m@Xcakv+YV^4{fEpTLiO55`eGBE%mk{JdCQ1_QTdVIem_Mzc{XQ(RjK}yqntbu0 zzmtda@WU<5r|2&%E>4+Ppgwt7lS=lHqEiC&D=Oz51dgxGg)A|b! z?+aJ`l*K&MjIFf&!LvUQ<&gM*NIc10bLKjeK>Bit6j$4+4fND~rYRS1kp~;5EaYt` z%GHB7;5s3Ya)o2!f|q&0M-k*+)+DG`5kUSYG6+qlYXUP8Yc%@e;;Z<@;j3sCu2CHt zBwCro;2ggqI;)%mTzL(A&Z)yVD-DRKWMo>OfJn4)TFnht$ps-TN< zqE8rE%9P_xyNFTxn3x0EW7Mqm?NWd13zLk2OZSKjN&&p7C z>^7&*;{wc#+puAyj;hCTDma|_u--9zjgJby;;i|CCpcuRD)H1K&f=7vR;W98aIJmI zgH5v8F0;ok2cB=%JBAx?{13j>9Fl$%dH@~59_OvJ-%ipw{zjLjvpc`}W~Q=ZyRzlG zw5N>imLE>8I#F-^Wy4QGBc;{2B^>^MLnDrZvkMR-HN2qt;gc5poOK2ZF+6EUY zWFlX^r$TLac6ErHAkVbJ2R|fGmwnG!+hbg+EVx6#$*~WfI<2YOPb&D9(ZbRyIx-XA zcA5HA@GKxx4xyzs!?Bj*eA_$K-1SO(DecbBBV;00Ag1wW{UOjKkFv+L@d@VldgP;& zik{?)c|0zJ@Xavbj<*x6__9gt z1^3F2{jv|S=N5bzx!4w$eE|ahDn5c3Q{PO3ktBZ4OeRUa` z$0w#z7XmCmKgVag_?+fLA?3*G0jBQ>Vi9vntLd=X_~x#s%b`#qm%?ecm% z+1mzLAE!CjLn-Un}Dx7|=-7h%g5l-TTmE3`^ zjZ0k4##P-TuQ<85Ks6{EseD%rX0lxRzO<8jXy{x1R;un%965vj$^g2Z&*I>h`1!FJ zcZXQ!5F23Y-)A8g@RU7YR+_>#pJCUP*@g8L#`o~*;2`qrqGf$LpF^A;9JYhqUHHG1 zomrOGRwLi`H$E&3rW9Jh7jnMKc}W*4O0BI24_&Grrp)gLMKXmTYc?jvX zuz+2k!hfM3u-$%ZIy;EaRxs}KRYi~Z71|x}%={qu8*hIi{iImpz$vtrmJ*wL;Z6AE zk1`)#%hNtik|**CnDIB2QIM-Lh<;doDYuo=!c!N zgulqa?EF;h&kAiL)GZE+3UXM&2e4dZeM`vAp~; zKK>{v*n#wm%9a_Z$LAWEbL6m^f+Y-5PU9zW>a=6Ol#@QlbMOfdIER3hU-45rC*xGaX4KEM;1dN|Hqz^Dt_-e%fF~5H{X!Q9clU{O~b|iPzPt8K_@zz~w;-r0W zM-Uf?OfR{R_O$)sKl;CD8}InecDi>TgsC4N@#v^_vP%V>_6f>)DzzbEpkNWWFaIx% zCY$l#kfPnckQ&-n@ypoGcGmb$zVsAwi+jNu@CTgXr&rS|kL(Zh4a(-0k z_>#kN<9Up2nPf>A*z}M~+e;X@c(NUYEPx}v7JT@evePECBQBTlTl=XjIJUs{`dRPv z+uR?zP|*Xs;MV`UJNs?l1!WHG6(SI}pA9>^y*Y1B1?6nC^}Nj&g0u<#6{1%%$GjKf zD-0S|0yKSX=1=^g@BLwn%uPE8JA^U8KN>PR5aCAXr4PstXFi6p19WR3Z%es79GD{% zl&=Amu=wqSOM`H1BhdV3b+_9`kvB{y-MUk`4C21Fj3P53t0Hwq2_cl(NQ$!_VNOCi zd(&g6Jd<+VJ4YvijH?Gnc3d@(2F1Y2h{^OoGlFwf-x-I&usRfkt|AOI3?d^(-uRRI zVSv1^cATdfM)GLmpsm5UN&E;g{<)~rB>!Xg8nATSW#C9iG$zO-E~=xqT-DR*P1{GE z4#Mk7o^T=!Q=78#Hc+ik`uFmAqnY?jX7b0C1?wP>4>-ejh*@x$(BY#>8m84vtvR~X0= zX%qs{df^0C8sl>@8R_NeDNxv>w#9UkI1!wZ_jQEjzNPWIHpOK8tw+^~gZ4{HEv2cFRNrzQ%l z-{U+KeSjbD#k@sFY3P7_)J>L1toS6qj*PmKfww$YMol9w{3yN(r*?^;EDlugCf$ir zT4(VR;vA?0Ob%+OfI9XTW zLmulVuc1ESVN7D-nt#HoGQtd8gsXTv-8LE2-MRZ9(;pP00<>~z5~ z41J6PVP()~Jcuc}kQV?)Cx`G#dr)#XKfeTTkWKn#+q}8?p#8I-_=)z)=RY5KhDCHm zJn}Xdwj)~`q6^vO!hRk*<>aRiv!sop8eBO-pIp;+{<38qe`$xZG~>2~;13<#V*iz2 zd!^mJb+@fCpt*kS5(5+`6pz{}y6OabCNBx9*#O^Y=o_hL;_VMrjcy>eB-P>%B?muh`$dKP=a|8EeBKmG%Mu>BwZqrWZ{sO4VU+xj|w=6;)>gV(3^)|d~iv^RO<78qFsCLYu^ zbVH=LH_jPj44AFF2qw}&^u>1D))Xz!d@ zuECqpp6Rzt^WJnadak=~cOLckdItU@OzMAY`Oq|ac_B>7U&Pz+*75~oAUh7=P2h4) zKg{L0PoNyPWpDKptPT5uKxIoj`QGMYhb-qrj0@I+FSOEU!N`+$8QEEBzWMJbi;$+k z479-IIttFAO~iMuex_grulUoP$(vJj01eauMn_$gqQYlV3;E1L1o8+cdgR2V_Afv@ zEK3;;jtd-}9CH$lfzJtpp%c!2G62P;Ujl{$HfR{TE$ITU`Y#`j6)x2;Roe_5e+?JE zM%$-5Vdoa;!I1&4Z|21R!q)i^FnzcavlP&N`H()o6?W3MNaq$8Gg++NEMF!R$yr^r z-_$25`%Jk$36VD#ILj-|nzrA?LGdB5Pv);TYblRyc4wA?!5O|e9+X=L_vz&CbLez<7Kw}!~abw0&I`PfL#@=DlwFsJVTEU73B z9SbR?S=2$AZ5#{p5@*|?g%k4OlRWTRI(5Ct1VE=jc!#WDEYoMmy?T%&@90OsgC4)D zD14Nb9C_e35019Wwu`5EqYLtz0w(Xnv}3-T=dU{ez13+V6kP z<4Zoecdz}*&-{Ge(p|^roTJ^lyW8p0^v@5_0k?WQq&>TP_4aSR^$sk3-n1dLR0vtd z+4>5FiX>PrZ>$sSOOz!i9AXPPq~!(rMe3t!WN5lQb!~%d^bh*`4bu1|1jtiugC>#J zwA&P60*%UrS>t@~sR!O<+@&lfrw2%CE7I=^L&>~|II$sIKRfOc9(|iQ7*Y>uF78vH ze0a0Cw3OXH(&sKMw=aC*%k4AYdKD?4-GH43htL3;2kjzf;8;x-ukcs0OGYB?uWi14 z9UStqCg~-tpyi)>td2^Xq2HFV)#{iqwR`GMP>W1??E?hSt?!(7yGna4XNAfq zmNN?yj1ObRSwFK4{mZ@d9_=9F&`1un(;r z-xilvn%n&kA6LC_GTPdX`+Si*z2w0P8LV&C&vAg~_Rc2!l@{gaX1i#3mroc_F1|Jd z&%zGw%eSnj0GpifEne0v$jzbrh@nhkq z`mj%bXB(Fmmm=r$*zqMF_|q8QP8wUJyWnU{!cq~8v$cjgr!V)+z4F5E#9mYp(t(le z+8+9I_0)=#{ho7j&Xqj(hyZ$VA#^*P8aqlA9)~o}kCiu2u5cv;$i-l6kE0z~FbO}j zbCEM|4-VTh*?#30exV(1?dJiVi@$vMGH<^}Imj0|uTZp+m#L4Nv*?A?s`%i-Z<0el z4zH~`Y2@zFiz_Q_dF@i))EtF0CcXcKdEL4tgtI`TETq& z8p`)9d@>)E^27=6yodnv<0+S>aS{1S-KN3co@OtY1qjjEvg5Wruv6GzM$6i77f$hQ z)ZY}NoH9f<7t_n@EEMZLu?ud(L;f;HiO##IXJ_}3oggFw4u$o1LnE%^lfmQ6qrso! zlLOM{YtcUb*_nPJEVFv&Sp3$%i$PA@>5Tg;SFfZWsj(BV_!A#@$)W^(0&S+PF7(;m z-UFY5wlM1)SaQaVjrDI0~0f9@Ka#ecDjrWZWx z%tFb;@uQ5{yvV5b$u>HVN}LbDDXh)$ZC|giZQRIbyEb=rXe(yoaIY=kXDlr0&!4pQ z_2qW;@@o3}v>Uxtr){_X3K+#N&^OV3Y0GwY-2DXEQ+}i0g{RFCMEQQSsv2773_8Y?C20LMX@lF|VsXmHpGDOBLYkJ1f z=Apdpqnt2Ak$=*)FWuL+BJ#wZDJcBo&2d%_^#e=G^v48Jq9_fh!*Or$h*ba-XZ@M&YU3ao&@79MWyF70Ji+;5A2HvRdY_%SLX`aognn4!@A%Fm*HRTwIiMf zjYeK|u5?8F$C$d`#eWFj^gYO@?f0`p$Mg;R=HAa64-S0)5ykSk*p;F0d1m4Sn+ zcFv{`iF82bfW2rV;^JYxS|R2^oShWOLMZ{47=>S%0FS~maEP58L}^|Hpb`cK*^58a zV<%`mMxykYaGAg~BZ_9_ImAmGi;saNjVzzM@~)TnEI+dHJIE7fK56S2N9hbB9Y;$? zrRZuGKr*3tRf4L)>us^_C^CmeW$aZ+ljLmaP9(Mp}Q{n|&_~2V| zZnY?VW<1XrU$h#y6$1n$UK~@t6%OBW@8k`%5pMAD)(1{f-WA=_jVNVK97sFBs4ikS z?|19$@9Irg2WN{^oJikX$Ts+#ePDZx?5Xe4C4&sxB)yP%|MDPMdS(*YhR{Ch z7@e9cA{o6v_a&}fT;RiWJ6mWk|D$sk=oQ7K-NhE*QlmVf?ZV3>Hyw6h`UiYFSK&!Z zC9vi; zw(->EcJ;<(K&oTM?b`MA_RMo{Z_m8_?d_?zJsTWOfOFR0wjT!CcEv%8lcd~IAqm3Q zQMAdf@bV#^-{_M6d2258R2B+fgN}-F;(g>L(b56eI}=C12d1_mla;X*1!IKbjdDsk zA2>^+uGeR0^@h~>KwBM^!?g%P{gbERg}e=1@St4jAcbkabX9KRL%};gGvRl~oFEey zw5ptPVE9mV&WA?YQG*Iid?v7v{Y(^UXNi-SUE$L{zwzrJM%pQ#vka(gM;vg{wIy=@ z^o)TqI+&d#Vq;8GR;y0isVoPj*sW@pa<9|lcPpL1HBZspV83Hqy_D7_P&fFL10s*4 zi7=s|btu#x(Rashl!NwOYsOy4 zv`^3-I?JPyNmpfp*9mi?ABvbXA5m(<7(U4-*0>S zhv2o4ZBzG-kx2(tYb)*Y#zuSo#do#e@tZ%<{+qx1|7dT(Cs$e@?4P!kLJ3)Ow-3-s+)Art@w!Qs` ze&DEWZtk@CWw*jUYcD+gRQoG`=`Xh5@%#P+aMgp`?G%T+(LdzvY=}Rjj>r?Pm04v? zx+qKG1$3^U%qT117;24k$>}8MLzzeag+4TSMx5tKUg>Nd@G@!U6P^HWK#{+-d!(WL z+A<^FJO4{gH0&GUenwpxUr(4^bP$Tipc51#cAs7E@T zr<>O0e{zr>7{E=iE)OfQu2Fx-A;-ngY2gFw&|@4ymcw}S^dtHD*~4u9q#kSlXKki7 zBHR3Q-JLv`1rIG;9{8w-(8H@Unweh1)~{re3+o|pQh|+L`N*c;wk=z1ihf%r zQ=~WYTj}1v$M3ZmdYVNk=vLlnip)YuWInbVeV_8RU4nja(GFaN;r4gv z2*#=%a}5aeOIYCVppnD==ID3_9kB1BU-FGBbXprXgU+2|yUtGMsT`Vy^$ zy}I7&R;zu}ua_6yoG-|X-~(81wue7(b?UGlp)37n<7odiJBM|j|q+19B= z>^43`6m0tS9t>Z~)gHD&O87yad=}^Xs z@Gh{tj?V$@xh@}H*@1@ea~k=wO;fAuZEEkR{qtY?BqF;H$m4cpeXTv(y5BY*-OWP0 z#TD8>S-0Zc#7U|x~Q&aQR=~KTEP{G;76@waZ6Yw*>4UbCpHJZikahL z`Ko*70z*rxU3=Sy?OiXu7~bhOt4oJR+whFmbJVx?#<D2in_TcpiN{WxRHP&mzv%hp4yvyNu;_ z_p<;*AJjLuvKSc#9U>olKK!Af*%1JnpuIRf)^1Q8t;s?p?Ij#@5-i{3g*JI;4?|+l zWnkJXJ$)EU8hLF~WIv!vPGf5kZT-gz@J~W(C-V6h(!8Hlf4n9KSu=ig3};z=aN?34 z;PwON5jM27&h*1opT4KECtg_?!1$~djw;3Q1z=g61(4I|r8K zbWOC<#^7N)C7L!CJYy4;gCp!4@`U|7f_L^E(=$HI*8g=Om3EVB!a^0+J1zPA4m-;jPqJVPt!>q2wo1r(dxf$A6Qle<%Zw9qG29FCMF z`n!|kh4{i}*iLrs>)<#Rs`Gr#s`i-ew7|huCY#Z7i zmQLWWy0eVF_$fSdXTce_Fz32#f&Hy~v5D%B283($O&Z&;+s~J*(0I=2qmldKrSd^M zGGA||_rv_oy(k{)GJmDlp4Ah}azMin!HnaGzIVEmUT5^KVzTz zcvqO|zp$BJtD8Q&v!8d)vh1pPsJheBv8>}w%Q^nb*cQEAUS4TSYs=9^d7}(XJ0|Jy z^#5WLg-zM|Jd|mkJu1y>X*VW*sTAC*kICE^=giAVUrA$fe8iFN_>VaG30`?P5O`iI zo7xC@3>@u^<&}32aj{gB)cG(3aSH`*X`xd) z`h015jXrv@U0z*ji_kUvdv zj*Fg-JLmDCoTFR1bftatdp^;ouRHobfrCsU#)jiu=X~jq9e!RyX17CVOzm)kVcoUyKz83Tm z0sb@Qv(K5*F9S~+JLPUpLl@|IN1(m3#+4@uxhTW4|D7$P92B%cOL4CpzN3QUm$J9 z@A&o{S@?<$)`DtZBEbJ8&QJW=ui596b%lb>6}LN_nGx`UjQ$8Gu&Zlc9p(`&R}N?yF))= zA9RR6a&mxw0e*YO+U-WaL?5w?Y#$x8hY#+y%?I})x9;3yzqzn9TXW0!tlBT3I`Ne6mKQ!HIq-{>ea9EnLyyKiIxD~RwfIxS+({(^M=;$3j$vpDAFoQiJ zde0Y3=v&qO(wYk%_{GL01n5@WB_4}ne(qCTE3<7kKe4(Lyl1?N5Q=S_i+Y^10 z3epd_BF9-D-!kH(waK`F3VmVFx#94U_B%HyIC-g$cy1G}m8{oeBPu^O%<2W7MslUc};36-D#&($;G9|9Qv?xbI4M5$g_+(2ddZ}$MrsQac2piU+wzr>FB5} zUBBEu{PCCD`+m>w2JTKfc<>s6xzmp6i+s_^1?;xh=SYt0^cnQ6BLl#N!NilM5AVE` z&(^|gX&FpKfP;U=G&N?iUm-&I(RRpu?!(8#SBMVsTYd_4MBhBiM|nm%wne|iF@&T5CLG>W;g$@sQ;OB9k`8l4nU+@Kwy*>RBK~O(3B1x-0 zet>r|#2$In4Vlby9;&^Z`?dOQ)Z_6ZcqxeLONBM{+RXp*_kZ^fBWyP_2n7iTDe^cw z0635mIeyY+?lVY7*u^(*9N3Ofrw}aylin+~Y~HfhjC4UMB$AwKkx_W+UIkRx%_ly| z(DM->T&knuimGzPh+h)T?7s{(j$nsim1*BFl4-MT5kHJ4k)G?p21oFWwGj)z@E+~GUKOx5ZJeZ~6jQher7OpOUAr94! zbY>&7aVlkQM_l1sVE(_H{dtgQOLpIfWq0jcSMS}g-@eRX=3$w^V6YGv0>nxXAPEo) z1%U(!GBga?l7b>Bi?#&YGNq6iLE5B6i;@}s%MM3a5e&*<1xZjQ2!_cJmk=OQ7+?l7 z^Jd<>z4xwtukz>fJ(<6%?)M%zR#x3#-kW*yvL)#VoDATi(-wKZ4->CD%_ zicc~NB0b5U5S2ZO+>S)KGmVoy=Ka>2??$ASFI;SgyT|Q!|IiP%Z~5f6wtxPQ|0#{x z!Srm~^eqw*dm&Gz^)1omr3*P0dzrzB6As3U-lGC9uCBC;7cVuBdJVZGbrRd7rF~OQ zI_&uDG@L}xSa6a{K6#%`C=9U-7KhTL6E!2a8hFj zXI|l_y`J~*q1pwo=?O=C!PI0Btw&nGJNRk5M(NVhLMBu^f?I!Ki2>HaJT$n_JD;Q0 zM!cINmN1g9-@2LdyP3i=KlRkDcJ0=4ZROHMjOrDXs^2rjD8yQ-2eSzPqunLp*R zHU?Cf6>2GL15n4G1t^}#M`1MRbeYKP`X@Z@d@EmG(G6h4AKGDqFN532vhn2)r~HQo znbw{0;h%n5hh(8CWj5K8d&&kx#&{t-1E_LVx1mSA2Dxi>qb`M!9-U#qbEd6Ve9o{-pyFu%~1VAP>CQYS$-w_% zZs7SaH}HL!8~G0R#PcNoC-Rxl-<=al`e)jExt>0}(LI@8;fbn9yL_^|kL$1Z(J^r& z-zW1Q@K5Id1Rdbd1W6CJn$u6)uhUM|+u{y3%BbXLz%C5;>W^9|vMpd!aGmp;|L7DF zr>!V6H3_3r&zYWfYrqJXd~eh*h9xftZl)!OALX6~9$YnAQSXtIpDYrmfA4%iSJ}Th zG3wywW_YyM24d1v_<%3pm-Zl!blegmQy3 z{o{#)=gL=n;RC$awviXvsV^)=FkE_c_L0$f*$63)6Y@fL@q7@wrNUf*k=F2-EA?PC zgg@F)E>>}&m)#hrE_G7G&CnT-ByVj-?crWG^2K{`DIVGb+6UoV<>7O55BHWSmw2V0 z`|v9la>ld#CIfijB&fV_pbeh1OKAX3feYX9gWLP@Y+Z|kJ85&EAF#?RZ8UlNFHTeV zY&-Q?T6&l+`a`M;saPb(u=wKBMgF~)FksA2fb=`m>&|vNL>|!o?W?bSfqrDY-MD%M zJ9NI?ee@ucxAqYZ;E@h+xd?!~IYF#^@z-;TQ+!6&I;9n9BtP?%dg$kWihe`q_r2}* zeX`lf0;{!ar__rwu)V(4p1F0S_qjX3x|t2&)P?QMe%kzbmInU}NY<9w`so=^3Ks_T z(}wcf9Rg#z(T)(F#6>40z3rX=Z;8la758a-rl($JX&WAo>SuLMc8=NtC(u^B^wy^()BpTRJ7-B+(XeHoIt?Xv^eU!6Y#EbAc?8D+`}9}BdeVcucJoWdwdid7k8ExOt~M&TICIh#7Z=D2I~g70*iB7h{mvkp zep1Fd1)99hx8;?kw!O2{<`(CXb@bK|^iQF+W~O4Vd%378k34M$h_<-kn40{d)47hE za1)+*!T{x>4)ZoM{gGo>qNoV-^BN907B1mcPvPIOxU6e&Cx(j~^$OutyT!-yt6c{{ zTtUQ#@Wb1fkh`=}_LP9azWGJ8}(Kbyw;953iLyeQgUj64nO$ zU2G{&i^yp&^4&IN8Pb?YYqsQ-u}64m-F%R5w0`Fac?BOqp;cXw@C1;m+EC(qeGGho zJM#=Tz-4;{iuBsB{)HzBQId-zlS?p^dk#xd3l*JU~PI-7I*fxLTlz+pmi5} zWzR*YgWcG)!>%vAg`8#_LfOJw`&DE>*@RxXI8-YEX>+!pa@rWR$=3e%bZNOrfmu+ami|9hbK^~$=iEcjHa}LTy;F_lspW}JQlY6_hDd75* zD{Va|G++Z+r}n?_{K_Tg+7===sSD_ExMzK&@8DUSht!v}ZftI#>$ai~)KP}jn995I z)3(y!iBG|Wv2GI`oI2Cl-(>TQc~Y*dg~>VYjZ;S4fV@v)=hnsXqD4$nD( zjvL=Lm(z~xr4~Lc5$u)|(U4e{@@{1Trv=T(0Lmi*o5n+huq7?!@b z%Z2(Yo7$~E0TN~bioelHjzQ9QSq5#ttwzx&+<=6gt%WImt0%9fn3nnXk66>l*S){hd!1lkU*Z%xJ6VfKi_EA~R_n zM$fKXYCrw+Ki@9C^ddIQRy(F&bkia^g4Y+o=&xvZ@~7qnh%mkKk;kGVZC;wX4L9j? z43ECH48juat$3pU@!(ZttjCAP0Vp1gdLhP0F8g+TH)7}$UZQPW0ONupZsaXqJnUJ1 z3cLci&cYKi$(Jq7Jc zyW8mEy!eZ)vUk9E6g$JVh3+tI+JKkkdMs;mKI5to<>XU;l%4@QY=kkso8Sw4=U2)- zBF?=t^(TM7Cjhjkt=bGe5U^4J7P2zvz+z70H!O_;A@pq{ox+P!Blz@`HxJFQ%p23Q3gnIc~Yr8zz&GFD;IQA*#PL1Yr# z=F4-Khzm_p;+7nhfs-$h(lSW0Iqj+q7Gq5aY3Bm6_)7utM$-rv?xj2_5s#*=XTz1Lv+;Jwrm^1O z3G~Xhnq(7qQgu9?j&5kHPht$tk%su{PzcuuLvQ8nDR8LkqThnOj03~XL%DD#ux-F* zEU(iEBpp5ptZ$7_C#|G$)}4#UjvcOst8eN%NKTpZQ=T+dJ(eD*g&N|-34r;eoNmBL z87-{L2~@6<6MWmA33V~nyyPY2P~3vgNmJkOnaV(B$#0%H@kg1YBk&!k_w_8F3vcl> z3=cjyp6A@AuBv_o@f-Nu^*G8WGqcFqc!L3)Gw7bJ_V@q( zue3Y&?@D=Fn41U2U5?f6>^CigZ>PIpH17bI{4TCsXe$>Na}@5&FTc`0_UgylCqMD+ z?c=Y$+CKL2kGIQLu0_YVTRGn?+Hn9$SsVyJWG@N7){{FmTk~6sE{IFFI`7T5-e~J< zo9$yCd8N%SQ8wzxqm+G1NjbJG+02jvsatB`?EqdLX2Xtch<0gfH4kxTXFL4NH`EwJ zt5Y_~*GbImZkK1s%fm+x+Y8S=*Isz~B03VC&EV^A|2O}2TiaM~H*Z`<&N zT4{gzFaK}aPyf+BO-1_FE%kD8r=9GrQ8oqx2bZ?5WKDGveXD{88_qu8?c}_OAtq*qUC6N2WHRu@6XRqJEiKGzKbMTzK zI*vpSm>zcaf)2|$$}7Jr#yY|Wm3*r(`9!?Br>8Tox;M|W4&52v>CC75ar$R3-5LJH zvn~g29w+|$FMj!l-|9f?j=}JVx7PRR;rhMZDoj*P&AWiBBkHS1T+G*{;V_dN< z2zSCewmGyrspU60vx7V&`C~b-O3?+LNR+ov;h%4H*5)^TX`$_7ckS$~Q-(T0#L1<} zU>!WM|ES+FL6}8DJ1jEE!eeyTY;FO&Y6 zUPu68px@PoDS$LCK>gSIQ1nX7ZU7F3XzFF{n$A>-*2*O1~#&X|U|l2~3Xq zh9{Xc}D|q?a|yZPE&7uEa$?4fCSCkZ+u}Jp9YC z*8Z7)=;MlvIeCu{?ijhaym1Py$WlsGc1iLfMh3OY>!wO{K$a2G@ zz4fguo;~;6v+dV@{hznjU;Ao%?wP0COE13I?%liB?!NmTy8o~}0M1D*H?)c|Zzh2( z_9gU>Z|Wy2iWyN&5ZnrJJ>K%lYpFsWMlzj6w|KJ2BwZ}pB7McfM{Dhwr>?e@<>j`q z>G7iKld=)h=A|jflriy^o-w_+>V!O8^n`KBowO5*FJRu{Fq*Mh_$RRAjMTl)DM;#B z9W}-lsVCCsR0QH&_;dp6VCSG+d+K_7``x$OuYKiBx4Z??-}J9!T?;q*MK6+&T$Hh% zTYf`xzjfhX8(3W+k^^*vJofES-*oo7?c3Ndi*wixz+Ss@y?y3;zL$1fws7oX#v$gU z44e1S{&8EUe|H1Gay~Iu8w+Arg*F$AXQH2?C&CkXWnA$@TEG;F!&~)e>=|uLd8>Xg z)@O28Cz=>+r2}-m9T=YW!o6GzpK-Lygt*awFq5Sm9X5FoUTxPpH;=Mw6Gpzy_oR;z zle81AohT12nQhPf#d$7@+C0^@_EYFMbk6L2jR&P+wAs*LdlkmM1010}X}|?9wQ>FS zYTmMipVoiilBYEFdrIQ?I&YH7*Up}roIY%OzR8H33*$mdP5zQqaGAR9h4r$%S*&&pV}YSX9U!BeoA7Oeq<*b2$Ugd z@a<0-S!4a#wy?C4KB??9Cy3z%c-%mxKeGVsmY4XV*9`>pIq0Nm#~11s^QR^q|GKd6 zV*cFRN_e4+s{fVe+4)6cOE1bpvpf&1^xHP);ge$MhP~LJ)F)BZPbi+W`-o?L6s$bZ zCZ$(=(rMicS+lkTwQhk`4)RQBuY-aURgas5i+i{ccIpbVcz_ETm0u;}eu4+59_aRL zLPuOY6ZC6>D?i-ARrq_Jag-nzkJ>-_Z-2dAyT*8Q@d7Z|Ajqzp_mqj? z_bov%1QXBd6&D=jd+EgP1m!0r#TF|$wf&hkvK>?&BfTG4+(-V#H}G?0xE-u*5wzKe zLq5KGZ0;rRl2>^aEk#GU4ii09;rh1I(i0L!xW5w|)t8@zkM2kuFyNAebo@LfZSv%f zy3fWYpNR}D$TN|l(P@)R7nxjrLcILZRi5;#_HB6V++pZZa3a71^-(0SXIorqzcZqn%Nv!pW7kU>#I_6fER>GEyF@ zUeXT@7(?_UTU8sNo4|+A1Xh?Z_A3>eKxh_*`y|xq7p+;)ht`zQ`qcT5H$Rg)c!1_X z>WVQbKVU^;$qGdJ&js&;N;;%pTm~2^oIEE6;GF&#erq3dPurVr)G<2R6J2L!7m+VF zXki!1%L8<|`68cLG)qyS5IkFEhxi`q%IG3+K2%y6ZC+F7(f~jkWdm;O^}lA@8SPlygs9RquQHtoouh zBD=B7F~v?6pu#tskmcnj-zTq$B!cXVpu%{ZR!YW2;12(kY0F8Wt>x4SzdAR&6i@Sn zZ(Vd*=)6Ab0+veRimduvuw`H38AUQvD!P$n*9Q2j`8xSx--EBNsLTmhAJu&i0){#Q zj4JN5NO#esRCQ=B!=rQ!NQimc=6q64M!dFqPCrv1#DQ1*rPRoVCjqOc9i!J<^4bB6 z|A2FT>f*{J?2Ln~8)%|0dpS{Fvr1(h_K+=(y;~48%oO!tha5p=v>WU@AXTeoYS$_sO zKIYyw36H{ifIV3+<9VKIt)cSE3B!~#c9s0K|KZvDv|Z~q@Rq&&h>p+ZBP_H=<|IHF zfytIhIx1b!7FEQRJ~883WkuPo`0^doOM~ga5*QQW@nf30imuqW5zZ>lbm}UQgi$l} z18_3!$Ho5x>`85lEEb2m0{a^RkhQ`me;q66C`s&N%>?buL+w^OOS1qxn+vcToijLw zHs>w3w>H|9>o?j@{nG!Mjt!dto7Bxgev(EVazy!j*}=Zed1>b*y!TWJkU$-k-Dgde*P_I~}pb@Buo9%%>e$9%v9!7d`>zW%k0?JU;EPzr+&W zQdju|K;Nk0#xOUE?e4hwm@x%*By!j9}CIfsP1Hcj-3tAQEKqnJs(wn^W zd6Y)enWq()eEVb=C_;h&QKovglru9h%7%s4aE55*pw{RM42b+x~0VCW23RP0~xwt&pwZ|cTH$Zo(PRYm_On<$MRe&>$_M<+(I$V)>)+(Df35B^AO%1&Ow z3YX41(ei{)vK&s?w*A!+T8qUm=nuXACjYwE)rNPgc-zE~-08 zvcH4jNEx?xH!^^ghn_Ow0OY-Q@3b$z_T_f#>8GG?Ap;A`?{E~ddd?P&LY+eMe{?rgO4=&D%; zKu!?V0@{tu`fbwyMSO+FaGVpi*tYV4lczb#4S>Hk3nLIy~j)DIS z|KdRyx9YbZ@Pse9z3z0vE9>ToEyiQPh#_RTe&0k>^HJ%SFH0lWQHK1rjlEqyCGKMgQLdf$Auz0A;e+Bc{t zwKtpy^wS{1N&^iRoxlJby0d`_d~PVT4^fxcr#cy9zmk*0Na&G`K8w$GnF#<%B`$CD zP(BB9(hT#9SKg9F-_HeTkExT=cV&|%G^R}S*MXyq=($?Bmj?QDbe43b#>Fig<-Pe? z5B<$D%ckzjLP0NU`UURw^CocO>lsq%lcc%EGty*Tn9hPZ+%!c7x^c>fVZh2aXz37% z8xyLr<3XZ)#z_nL=FL%pA|u{t&jqHgYHMP%fH>`3^GLL?+L{HEjR8hb4@rlOQ_L2( zmw2!d4C{U-CgP-^XjV=`n}t5pp2O?2&*V#Yoced+$MP>NF10&vz1_a_o1f3K0M>~U zs`uB`L7V=k2sOOF1s|d^@@BGe+>Q7;P}f$xo{J>q!HfY z0?04{`lvS|+^}MO&rzp0u3gXDKyGBS98Nr#Z^jr1LE3OGaT;SB`H|kbh%6;kSnh48 zz$+^}z*mX&796obf*~-G0_surW+KBsXb{#sR0*QV$zJF!-=d_kw}1BQztJ{<)uN+S zrY<}_T{?-QqWks8! z*r+UYdc$w)gJ*eaZkEeufz7|mhV@~bAll^N2mk&Eqbww?avK~x8V0O&Z5^f_GuZR0 zc5nL01ebV`ot!>{u2e`P2c{34a%EcKXlwrQY>Au)SEoA`^yIeEou%tYfb37VIqW_c zuWctORQ%$b@N?L5jycM{RR@b-Aa$A}Mk$sb7S8dE8$c;LG^%S|$n!-N7rd<}ZQs1T z$u;X6?{l?qUWO;Idfh1HiMxcynugEt_gRvQy{gSLw07ceZy3@d*F@*st20 z-g`=b{Fs?@%#2+MEz;)Xta9aMX8VkkfqY#2@pDzmz2&ohXwCe&fzi#}^YnwZ6;jO9 zSWqcZyUj7ym1o=7+=^T_(p$Fh%adV%Mdln|YnS@Tt9fuZm**xI?OoqSvcIv8wZYv) z;6je$k;A?HjI+fVU6aADRS*5zr^9KpkY#vC$*dOyrALRsr*$=@XBlr7iYrESKQ{2fP}<3;Vm3|GORx3sGM6Dg~L8L`kXrVEp^+w@*s_F zBqBj%t>lWFA-dXp`MM$N#R7jQqq@=l*GkN@X=;oWC<(KEy%#oj+@HZ8fQ0Gu48Ief zdgWK%7$+>g&o#eS?K|xpz2N}QIWHTda~4TWlBy4r%U|w;yTVNy)k%ouQ}0Xij7%de zHXbr;pl|eR3q_NlV_baOvKE^7#((x_E81Ei5gx zTQ5A{u7B&Npm`sf51ShX^X8;|hZ_sP=~%HoQ{{#YXa`0+@6xraZF==GB*7EMe9)P> z=x(36o|bOJqFM@Qmk)6X8!~FaZCk_MQSQLzrkng3Z-VmGv!+T|cW!J!GI~vozaHOd z2u&4IPGTkmd5;%D3^^n|18x0C_;B;U4&$IiW}ugiA!MK9ZOi)@{r_tRO(XVzJKTUecZ)b@6^+5z^8 z>ky8M=6x9odEVPQXdCoT%G?5dT*gtMYKnenuWfCwWscAO_W)Q=TXeCh+9!31h)SdN zth_61)jo+9LUF-aRxcyucTQjH?aM2+b|5SNN&Vr{{>_eXxFLp0bXFG+x)bhm65zVr z=C(_*$pZu?<$>@dkF)PIKVbzNczT`#SNClPMQb6D!o&y;yf1`~c*|W!paauoG;N0Z zPv3WjZ*Kf@?Cp4@>?k52KC()lhsa3e#&I=p%gYzAd9X?M4zn@Ujq{f-EVb2T<&IVa ztS=PVm%F*tag^z`LCRjJ%^jONo9G%!ZpG2IoRdsGWUD>ViyeKdyrzr^D_@)Iw7l{V zT$I(4q~5J__#OMoIWSSC%+>GMR@CW{@1Cw?4UEze9#J0J*RZ*AA}~CvH3)dBjF2`9 z>gVbNJxPk6I8d0Ua!VTNP5)tg%D4d>#wDHWB*Ku0u-LarOq$^14?p>e!%qO^k9;HR846E01)25P~5w%-w{VbBZx(mGhCKCas;%c-;A zJ)d~;!uT3nMOfpgryhUSO;DM400w^HtlJKwu=e=J-9Rv$X@s>bILc_U$a<0^S3Ohf zL05eZ7l8R}9(8MSH~qC%NB_GX;AUalt@_v(=-i-XAF%6&V@w2301zcPkYCR~^gT{< zbV+WAm6pe&d-+eDPlip>4V-vk$@|?qQ-AFDe&=WDbe@hfldu%h%ufX}LH-~x)d2~D z@Tojx1cj)qiit9=6t@hbfD$0nh@0&Aa3Cu@4O)PNiNm})PrSErJNXqP!ts}!h)nsj zBgM*6psEa^9oSNwkZlDe&z?46z_`W9-G>C2LPOE36q*7;-X=-jC6F-$aOrn6rW23D zCk4DH{JjWgTo<@ZkR@?0G3!QU4xW#S9O0I{Zp_Gg%cfuat3Pl8UTI8^XZUDr2BQFY zsC&7Dg^+<>(m=am;0^Bhq7V@mMO{$p2Y=63Mb{g%^hJS7rXU)z zgw+6n@}!SJQBIYyPtshakX18wAYAFB!E}!WJ8)9 zq`M&H+ssZz9AXe09qgvu4$AEq-At1s0i|0b2X-j4{>dxpQ(0>qxVzAu%yv@pAd|`R zh4w@gc*r}Q96p(ZFJO^()zJa#J%80P%e#J&*06IuFCKzJ?l`#-+&&XGaE?LU+&uJv zT3M}jrz>8Hs=}(x+Ya}+^2Qvv<&EDF)?jr~Wo>;UZ|ag6*^a5%R0#R#@*dnRKX3NCzO`%S0D{%9A0f zf9uN$0C<+;C_}$3!Z%YS)cPpgF05qX0sO|hkSwXzo4YPENlo4PMr7)da@p2g)Kzgo zF1X=Hc%g5(&Ce25ob_)0<~!vZeE7Nw^M*XYooB9+GoG{iTzsZfX1xlxu8g#ry!8YC zC(|6LnWt4*VDeH9>C5w~vuf|c2vpqcx%7K=4L&9*2zN5tKOiO?@OSd~ib5x-gt% z!4+AFu|3_kHaC#E$bz+KVr}(7xw;zPtV0zxH$O z_y5>W=IGJazV=#sc=u5|INC#`NkbXOpj6f4RZ^){no!|j~?A^8=KOw-yYn3*k)#xHT1@L7kK=B|Gjp!zf0M6 z+s4MD_RgKRfM0_*`)zx3v(2I@rq6qf7DnApYgboV`<9Q~Xn*eC`BUvb{Ez=i`{*}) zA6&276!>F+M$OA6M7;X@`y$-M6rYP*hu1en9g0)!R)fNSMHh z!T!I?RxldJbAZoY!wKJUM&Ntf;gXEUKp%4*PuiSy)UPD^I-F&q4DY+T{ZdXkx*wrsP)tCJ1vqHH^R=)m^?*6dA)l6K^dbiPc((%OSNOmnJ8R`5oKtw?-7=O z;0`fouEJe)D~y-Vv~k*Y`fF{n&QE1Kl94#V&<$CX!Pc0XT}5s#Pz84Q^@$DR%WeV2 z-%fKf@t(Vi182(D=_fAsK5ZrwlIXFJNB&74U8jC8U1-^|xa2DhB1J*jCbgZ)wg3)Z z9w#21iq2P`xIj?@slr!(wDX7&zTZ!_JYK0oos$|!20yV&?Eln(^aq)Mm`Bgexj;hO zJ?#C5e3c%`l23eiA_4KWC<~30KY4>w`0S{Mc8_4rrb_J@`ya|;dDOcevFV9RruR)e zC;pE2tsnbx`dajuC%9PF*ktHoZK+yJwHT$xhmKEQs<;B9-RJ~1vEV-~UzHUf-lZP6 zAN8hPp^nsM^d4d0A2|7_ePG@M_pHNmc|Pj0e8Q-s(}e?Vg~}_we^ySy3O6^EuEXd; zl1&={Tx>@>yY1aKzl=RfU$MR0KJxM_sgpOp z_HJ9-J8lce-#&fk?w%Xa;2&}?X6dfBQ+W?rDn|%Fh(Yyd%7T-MlqK>Ki298!$DOPb zo%o!H6Uux%_VMNCZ-I-xHtL5^;2gte!>h7pM#@D};S4i%eDUxnZ}N>x$uLe-_Qm5K zJ4PClLRzW}(j0If`Wr!#X}d_~vSTSw+^n04odX&1EpZodn7*$fVy(yPvP z)4t#B*VdeDyyD8ZPZ((M2>;`Qqb&H@rsae7sJ3TK`nlLo|45N%)Uy^H-RY(yk8Ac@ z`g3#38S8s0i1l)MBAjI?e#);M-vA}dHnFfUmo)Y%et)`hLXTzv$_1u|#6E+-gfC{q=wP z8{~A@Zd|`ceR{le=>yxvImZJ%n>^#oe;4v56UQUpJS1V_l^4HFZrxc7=bhx0$<)2p zm2s{!+qb%4DE`dRAdi{9BI&T%!sa;Y9DKU;;p)1sL*!Y)g!ex7ug@a#nZL+V;U_Tg z#rvM_Og_4y>wJ>7$+P_Ez!W!588ysJ;pXjPX><^&vHpCL6xvia%Z1XWt`~F@XC^iRyatB0idi}_M%^|36;Lrr@TF19bMH_Dww<9%$#+Jf!nf z>fD?R=IJlV1D!cneWtdC^lG;s?QfCo0eV^b$%D4*G5>BrbZ$*~mtVFA{rZd{eFQR= zJ{#IR#kHY-JSKW${So2E&CkAg66)g0g>3kG_~3p!*xHURkeV6#JNq*2{sZSe=|iHk zQ4-r*zF?s}Re9PL+(?vm%oSSz-Ih9n>C%|mQh@@#vY z_mnz#@21D)#RcrrP3(dFybSWxjg_QzZeP9NrXlB|>^roNYus4&gkf77+d0ZTpR7z@ z2x;&%%=Ffiallm>st+3WtYdBqIqKHSPXMH( z(&N8j(r2HNAlsw#R=<Q%BXV!}Ap zKEoFEZ|2Vk@)Td!qWtWSbJF|R=?8m9vCnrr)oIhy#;or?zA%f>Azt&$&j&l@yMP{Z zRd2&D-k!P+Zy2{3OYk5}hks}47k>2HKTGHJv;h*O;aUEv0b5QCJey9z=ou*L&4PIK3K$^FcnjO#wAIoV-(isG$ipNoCya$WuS7TB}ZCk4eL#2+3dR=B#V;Sg@w&iJ`c zf}W2UPw$cXeZ0$(AGC0+M%vB1oj2e6lA8+z4 z4_pim%Oy?~s{D+r{B@K3<47CNy5l0bGNxn*XT2MrH{yw(0l-OKkMX#45lBG>^LWTkP&EpOg#w zLkmjYfoUcXr~^ABPX@3vN@quMF{FzQPl-9mNJOZ_0VSqKJ?2C|zV zeCuilBUgh*`rY~Jg14Uo@Vn(6-~G}nue9f1dburJxWItSBg`+ya6fP|@qo@NZB`kn zHa&EO_nBaa2lv~FNKReoi5?T%MW>4@?#k!v-qI%K1NCID~&o%diXb# zaOGA!-2hR#lCF5}065>N3u!J!c(l5MSvzbxd+V!DOo+p8xjTR{Uya3ug=NzB2_n*+ z^P94?VZT8cEEA|(?%85rL9e4hI8*k3mQ+`V_7dp`j%7ZVB0p{ZPz zX>_{OLIs0SUX|>D*LVnWf-#dJWG;`hkWPMi%K=z-7Vd2Bw9kL;x7vGm-vwjohNY#& z99Mh&`ptIZ`i=I7e&Q$E|NcMyi|vp9@jud@dFq+A_Gq)+hR*x<*4n+>584ChzIX3I zd${(ft)rK=!0AGVZxe28Y_vyf4|CGM+S(dn+vMxX9DD8JrHkzcf8Yn&Pygi4wvWB~ z@wU2pv8``zx8(%}Fc{drWn?K`FjTi>aKS+Li(mTf_Ufyzwz^{pQv7^FRLw+F$)Y{iXJEzxcmvFMPv`$-lMx3;^Dyuej589+B718iXFS zIR+`d=`2~wL2N9`P8l=6Q3QHf)2DK8Ss!!ANMI&%jZ2!rx1N3goN0fLJK=>TKIuDN zo%#N&F6b)1u$W&`cN8D)ekXc#PtyG){K(*mjJp1UsD2H;eF#AVe&WvLpXB8~mcQZZ z4R2}t19R0?^*yCb0$~(xJP$YUiHAR;^~Yv3T^%`S{?W|6Nb-9gbW5aJLS4<%X51u( zB&E|(UjZXsqA{%VFmyd$45F+r150O?UUVYP{JiX!xN^^hEf5gXAg{7d-7YS3r}v5s z3lut19-&PHm4sFKkahKE=oGi~38J0NWq<9&2ZJT=-Blf(?3*aYISWp{;)=b^zkR%% zng-F76SMRa$9)IB@b(*dDgm*-${P&YIw=$7ux|oJ zyGFZ593I=b<06uLQ5NjyEUR^P8H zeaa516Jb0eyT2tt{+Yyto@8TxE$eaNdFsQD7>|}SYA2mu^lgIZtgJt`!NA} zKhGmCZ+!8K?asTmbAXO_O(@pq7z1q6%Q+^Y_~PV+Tv@@?4e6pt+KP!>?eE*|kIE#NK&`?mA- zn@_bL_<j3#vbY~?UnKUO|4X)^^*R`+MC!YlZ~RMY$5tw<}*fY{6kf`3jB*GVVJAF=vE zNekhQ7u7MQ8z-V+nI=H}3~2)S|Qp)IFw^k{rP_a@!4zzxAJhBzj2 ztWb4_ExEs+@weajcSEahaPIBzBwx8AOv#mHDZa`db-DJaI@h;AmzP&!&+FVc;5b#h z+LZNK8R#NR{VgK0PyJVa>1PP$U@Lqms zuYRWR*u^j8dv>A9<9JcqK+((m3pt-oxbmCwArpjK?rMu?9q`?MU)03r67Pt_Hy^yh z-LHyy%++(nzu!q?cS+38HR7Ww8lTS1zx%W$ORs zPkviF$9VC0cN1Oi@&EJ-d$bqQSjWzH9~|t^*2oiEbC=8Zy0ElLUDeA!R77mq*xIBc zlcbFdQQpq3FhU(U;YgYe^w}>AXM2kNtbR#;1jeYI&alVoz+3CEske=*da1MpYival zkT@>yt+vQW#pUFNIN?uk>u1D)k34iqhs#*ka;RkDL{{}Ch@s{exl02(O+T&?rX>`@ z&QZ4sRxjcw9JIZBb+3$~^SlRQW}a~r@S ziEW+$=*BI1U2`^o(C_*AnA+Snl{a#8qL^u*GxM>u)xxV`DTg~$E7AbeP~n3uwfa|52IfNEDp z7V=*_(HU0lk8&GqJihdCTNlq&-Guczm{dWWb_4-|5t#VokpP@r$64o90RbbxpWwI7 z$9jzc&h<=Q$|K0jWPwkwSFXw=|I>m;=wp6h&bVabC6(SllwqZ(byY_FQ#r*|qR^&` zj9R~p#g!f9y5fGNH>YA3?CiJIi!&Kpm|tW>9ir_N zyVLs7zITzGGIH;y1GYD}V{@6tu+#~ZRDFiLx|ie;Fma=N#35tWpS+fTSW*wb&v=>_8VYxyrX;p7o84D=l4yZPS#e9&6%;Zc!aAub8PHp_M%4}Kwict zPs=4gD$M#x8>O5@Q}@-U_=Vdr>(PRX-!v6wlSmr#F}L71oHm(sK8x4NI9Zs1EuQ?~*`ajAz_-NmafCr2pT=+j|`@5e0QQB1$`OGJO5F{DFnX#i`XIyB; zFFuz$d#|f<^@rh*zo74r6(3CA;Lg1>^$S1zou5U}vH?Jx$;yTvpd!pGoq$(cm(voBzRtoo4 z%D~|<7dE&t`2k45qs1~&NaYxwT^ig+uq0D*xI6&>G~Amvk0MYZ`XG!8%?c42qh?Hy z@ai0jOYd``F+T;DiZHE}G*Jch?Aw4t*w579V}jcKs&w<$AD07B8{EK&lh%Ga@C+Pj z3|`?m9{dxh26Ggj3^v?9c~00!s;gr+;e>B`={$m1z5p}os`#W#@F!T0OT8t)u)3Eu z$pf}n=i-g6WhXbU7c%F5+f zNVqC@<)J{i^L}o2wcUTT+uG`tcDQ}izV_RnYq#Hd2o1+=d0{Dw(i-mW-rU&QY&&$+ zI|m+Vzn#gKISkI(IVUa(|G~jtj+(N388jm6z7e4K*)isr76&O&uqrbrDxfL6mAiB* zFbRF1L7rvI1h03(*;$64@G19}*=OSHEbWv{ci!EJPU87&k0F1+YyRX(&Z%zzs26EM z7fZ@U|H8QxZ2qpnMCi6_isw;k9zKyoL&yQ5qL_05Y6$OvD)xZ1w;)1Pi1 z{ibhjAN$5{ZcD2d+uVf-oO8pq|zMptw?5ed9U5HA#^OvmDCOG(6%l27)@h1_?-laJU$%scVa6 zCsy~R+w`lv2uI|g+MxghCd{V_i=Gse%(D}g%f0*1~g%{c{{nCHf{^Y;$i|x|tYTL*7xO3-j7WCKG z)}RMG@HQNPK92{bgvili_*h1bSvsi-U(^$XXOi3jC^9=cJKZi`Txp;B)F;|c{h^;| z-}cFGYhU=%Z?rFc`HR{8n0g=s`*rKb=Z6m;w)^)UwoiP^w*-}xw_UU^eWb35 zfnl9wM{>z;`XqRuZaVT?RouJ#^&DllzqhSjxNtR_7v!OBKF6q0&W8^lv~T$63vHG5 zqK^9Hr@p;?>>FNcTO0SR%{l+d~%`_PAUCkte-^_d$ME+7XzL^n>s&X~v~H>pywv3SS&SBlLH4 zDvLmNU%I`lH|R+{eZY0(l2&A&akcUG&bN(*VKnQYXS__eSUK5tb-G3UAe$^@r!3 zkx`K1tblV<^tDDD`v$kSz(@a?QMuq68KYBTGMYYZ9iOlx7eMwrKGltrzR|LWPUK<$ zb+Sua#CNBC=p&dqVv&k8cIn7l7>KfqdpUZK2|g%&jiSz?AkUM&bnta`F&$xIjSYG5-#i#KaJu zeunzb(j0Ytv~!TWpMLJy_M89Xzie-P^=mm6{_>TpBsp%o^bOnF`}CjCLffRPPQMd- zm;5S6amve*CDeUoIXu+}wEO4y7jelXbc#ciU2!J=xc72CZKkJptn3Tb7nkPS3brs3 zl{S)NK}|z>A_E{PStsFVi zP!V@N3+CGP`ScOv8R0KpyW0NgfB)-k4IEgmW|YM(>59D`K9ozoW+xG(mE=)dUcyq6 zl288=;X)7gihuA>=Y*Ozl;2{te_XwMxqadjpUB()E~srXJ`=&BCnohY+AMw#-ie#l z3#+l|!ZYK-OWLPx8+%b+*W0D0m48)&&?DO{lVQL_0=x$lO+=WufOEr$;GUpIy!BCW z%7OV*-KkT&!TU`H6mm-_>?ZCsd^g{RCv=5_F3@Z#*G0S>$ru2k3yyXE2mQ z^pZB2V+G?ZMeIRfkN{vQ3(qb(YcECCpw9W3Eb=6K^{@FAtz%3&O?~JqJudt&F05vQ zhMT_U<`x*w)&ymhMO#!kae>WAakx$0?(gqlpViU)(xI;`yD7^FINuO<6Bt3jJH~XO zhdLwPjRRQ}Ij>zi-=^oZ>3QyBGdJUV`h(*y`QzKcPU`wS=WHk^FY!1o%Gl*x?~ffb zzy)#4&AMsJa?=L@=cWiZ$nI@R7yYfgQ0^?>xxL7R<1Xh2T=>z3Rc;+?PtDHN8>+Lw zpbP!f)huwC8*p4Ojr|Us;}6Ftwx*1s&{=+;)P8JbX@xpccIr54%jox{O;?lG$`Ddx zev~P;s(l{3j;u&uWL>AcTDycP3pyJt{XqH->rUlsXRohh@Sh*$+FPBK1D5x>63F*m zX%qg*yZ)?{WwPD4;47E;q{Q{~U6++&p$_%DOBmzvEJDBoWW?|0I{phEX_w0vZ?-S} z%m1N${$Jc}FFt>%t*%^cySv-STAiF=Ubf%pL-0vu#3J&`z17^ysv3|SopBlW*#1eX zoM@+5O^(}ryHHzjUiq6}NE?~=(}#1=ifjpIyRW7qesTH?P57C=Nt;d1J>QaFAoFja zn@fidIn^cdR-#N?_$vMsCUC(CGYb=Z$9b4{h?%d@FtU z*7Kl28yX&^&qC+e_MGob7dhP+HHIJGk<<^@*Qx}JfnU~&##>0 z(j>Lfs!T`l^&_u-yj}h1%S4)PzfD2U;VwD`y`paM1P3>+rge}<&BIK!1?p>h^-}nU zase;dqnGklzhMR^SKq>{GE(dc!=@>-3WSt3r&V#EF}e{JM!VGH>Qjm>sO8NZyb*zc zyC$(dlV`76?}aKauJWx*{);d8b%s$UZ4Z0ewCli8>MncbP7?*yKp=eIs96iB5^P_G zH?jW&5wCaTgRE2=stcwP>^%T!`_iJmkvBRMYMw!FNG47IjgIwp!ETH_UxwAU)#tmuOL%ygM@qre3C5vKXA|R!|NKRVsty0HWsjnV4 zVNpGG$G!9ada}Yl%3q-VdO@5nfdPs$|J3DqPx8=N54Ks6b$a}x9&EqKBo}ESzr+c9 z+~*Jcml4JoJlYvoM_F)_^uin1aeJS+(zSD}|? z+hc4C+P}KcajABtp8}Xe7PdB#p>qfAIyN%I1dsNE_Db=+<{S;rJP-J+Z(r2e-v>W< zW7A*)v)^pH+Q~i{aeckHvO%yhwVXO*i0N zvH*Y5L1TqmcNVYsDJOKohd$wx2laOZJz&i#1u3VzksjcslPl|!GEJt)Htw7cRnNE% z;h4e86F~OznNxJS?OUFNk)kUtQ)5DTSoE7;>`CCYO+&Z!V^-9W?XwpDrH?dJjh|=m z_Gm&!0XX!9XS}ea0 z-HrUv_w^FD>5NYa3CA~i!ul0?_7=XHmA6GquGp_eJmHK^v9%AA7ce|$V>fjtDcSTx z-+$cKv}~vQ2h{E;?-V1SLE-gGPHv(y6ZMu?!3u;k}Kd=mC@RUqq%+ zdD+3aGsYv7YC@snC{5ubWg4q_4u^6{w3U`PA5TlHD-usD=QMpi>yJ2HjzkglxK2*FX0rj;!7q9Z{;d+LYQyx$&a|x zuw;NsmZ?NM*p=P$AWR1UjLKOAnarTWILBV9Po{@6;48{n-=t84+#t;jdV}s$Ezzk?roD}r>ghF799)#Oy~Cz{qE+qo7da5 z>(|;eeB5)e22HyM`|Z;8tL^g5TNzYI12saHPIjOH&?);lipBfhzN4&5i+II|H!Lv9 zD=pVB87YjNK9H#gob@9=b-@E%(j;x&NA^zq27Bchx#Uk?$~Pw)H8zwteJ5Q^uXDFw z-qJ~1!Ql6}uSbvC?b~0Ji1SVCl#jd*F*0`Z#!Jy(M_?Z3 z%@TPx$AF>QiN=K!LDmI>Q|iabCU?ZcTIjT#%9j&d(B^;2gK>l5V5erFb*65q!IdHW z@$5HP<(+bBnoMNsWSw~X#Y2L}i5?Sqt>8FMEAZLDi{DEt!;wxhye++M_Q5%^#trLr zG4eQr)9|W4>EhnDAx}t`_~u(SbR{D0Wh1ajI~CseCf=SKbSx*l-bPM64(a8WUrPS&h;;HbJck|C7b!8Sd z&I3S_{r0Ll&QD;hURWfJr(W&mZL6gvc($|EUVQ$UcJ1O4?dlE(m^HO%Q@{eE2O#B2x3s_}*6{r;2A@Jd_G ze)H>b1L_Ir80Y1b`ww>?7AO5@U588geka%I$m~~cKE(ep*YTgYfpX+!nvw3w{Cju= zlDe_(bYExw!>&q|bHxWHlj)$;{)rumJVbYx&50YJ7TdWQC;1nMmsTfSfFs-VwK2AA zKlU@Ei!0hIp37kE1v5&R`qDt*urEw+{?d{1RZJ}?qR+TjYn)@iBf2ET7pDVg00~p? z5uidrf1P*2Pg$ou_-1vEo1>8);0-oLJr8L1!G$zoBpP zi4tIZ>%})c4`}DUQ7H-PPZwtDjZ^aM_PG6H`bxr`cyS^rX~B_+;*jc&Z0LZ#!)zSz z_&yhDpeK`kl-I(`2l1#gDZKTg8jro>gaZ9(Ex@bKov5&%tbV{N?Oy&_ucWiieY>U&%=LsZAn_AL{-&{=4@(Uf6Cb~^us*-QflrWeV`lOfkFCG`4(#PT~N8v0% z%ODvb(TJiz(Y2S@T~bXaxD7wtrF~+z-!YJPWh|7 zm0cS65|7f51SOcX^Vs}emo8lbW(K*hw@`Fh0FCTv!_h8DGxE&FN6GRJWRb_*lQwde zM__C>_~LP6vn=ju`026{%DGB^`Iba%ANb+IdyOBa@|mPBymv>mZJzPR47@S9Z!n2L znxw+As(L0W441K<32~a|l4n7rLonN)c5jhYfZD6s5KX(duyO(Lsdnw^)%IQA{kyQG zmZ+VR*v)>HXnuaN+A^2vZTrfgwvCIh+GHcnw@o9*5U9R#^2d1fW%w-dz5a?G%SCum zjq{#x=S$>9Dngp2zx3xsG$~`HfFk78D1%3*M@*0PldPiy{_OK z>*n5I;&N<%jzNRlT`-on?KkS3n{(2MLCR;P;*rahYo$Agqk9VxO-BM`ym zW>gni9RCrK1gHq?h(} zT6gxnwzd6T$4r$zV<}|2HsZTT@3H>Gdz>rzc?@(d%DdTmw#~b!;;B)DIUdir1n$ti zg>Dyxg9E%_8Ts4ay2;EpJ6+tjZ8*;JQC8reu{RM z4Z@7=99!I4ZdvH#&;v`0o)Us;L#~&Wm#bgX-t0D;-#ITi zq`&4qHYu3Z|6V0arJEvC)QR43B}o;o@3ZM^Ga`5mNIK#4`puJW(zt&Ir+f!y04jgV zYbQt%OMl?Q6Ud~mii|6Ik|mcL_!AAiHO`c7IF2l-&vFU`*FXC^|F~Vf3cr<&*cP?G zC!Y$fbd)YhA4N=xKAwG$m*Ksz^bLejRf(%j{OSQe7cRC-SFcmgD|Q&zLD(qB zh8CMLUG25-CHvq-UU;ZyTWer*x~vPIG=T>O&k_bhf-1b= zq^X|O|MeDR9xLAYDPWXi`5oHHUp&O74y8Sl>*$so$qrmj!XrLoWSC|gDM?Pd;BU-N zsE6y*3Cag};H!O;I?c@!ON$HHc;qG*$BMg*6SXf-`&?;PN5MzN#Gm-i?`n&;o+cac zF@|mEI5&#ezl*=-x6Ru=fxNg}be*N`Irz(hJSb0~IrSoc%!5lF`nkF)KjHSDk^#+w zukkFWr6scF*8&vpybD<3H(@Tr3w~1fKK612FZl(eyGh-4_XgYI_ousoeztoc#VDr1?4g{Il*9!|gQ zxB(=wUu?t9TiO>W6+6N z#_ip}^Gt-`gBu*~lxi{a?+VxJ^yka8Hz>$~z zNb0QUR+g$i>~cKlAxz3cuW-bLr|MkCFVGp9$g9!|Qt*Dd%JmV+(2lk2`HTm+oiotQ zF-EL$g1oMmpil^DFWRyyBKwD6G#|^BlsIUvhrVWjPRKe0X(S=GJ2&#lcomj&q}s2( zgl72;;myp-YWp+)!Cz?Y3gf}eyX|;u0}2n4#*G5z5&n{sY0_WG0&t(Wk<5BdT|;Zw zDW}FI{rcsq4DwDu!Yu=S%57ZI5E4YdS&;6Gt>;h@`1jAB=?30Y7tjeV;)dQhF8f7s z8SXSo^Q$|{a9R%iqU98){ki-oyoN{S^*dj(-XUr5&-NU>Pn$K3_O`yKy4VLq&ogGJ z4a+s>kH> z7vl2~S~B8KKMgVslXwvLEb$gxdgD+0#vts?xOFU`IhyZ*V7K?bHdWV64GHtI!6HfSMpQ%?oRIuAd@1(K#WoDjuKTJ zrzm`-$?oKyR{yl(%SHUG7yhivTxkFupbD6D6$ZW$Y2IX-&I-SR5iouycqeJRfQ819 zf1-Rg&3TOV1Urc55_dW*6`aq&rIR*4Jjbs|(3N*!#WjBWbH_QJbp{oju6Sb2D+4YG zX5i%?0|a;1A5rOcWFFIhurHpeyt%r3^+tQ{Bj42i%Kzc7wcY)rcH@TM`KmWLEKH0o z?(G=zT@VaQ>&=~i(jK0gmy5QCltWSIvR%8tkI9cT@X@2nGFf7$2Op(1a!T9q=)M^` zAnPuROr)RfZd<|IN3UMMNow9LZogFe| z41JZZWT?6YVt{v-+5wjnedq}cQajN~4(tqh)8U!N+~Rb*a`|eza{XFccA{lry4}5f zyFFTe*w(i(ba{T^xtH3~wd)MZ9I#_tx!^!2>^C~2c>S{;`N;;O-quSWNe}tjh$%sK z7u#+FzgOw46E6B!9KgMyk8l$7`vBl9DV_6q4Je-acN2xN-gU5pcsl6RlbbzSCWAEg ze0$#o)3t|>+MU~X+v{I@yS?_MFSpU;b+Q{BM1}eet)y(7yELFSRdz;fw97 zuYI+B`L!>#H{WrvSKtVl2iNymeCTNBxG!B>~dc`-La!NBqKPA|SFb;+LY06wN}Jf`ljJ-Gjnc0JR6q&3~FyE2IpvR(kpZQ8xw$*=W$p1Mj1+*=c})Jo=s8S)9Y?n!JJgep(-QPvDP%NSJRBJmxywzlt8?CopI8Jxky3(?RDo&kk(Z5u+x=4xf*0!f~lwNdTnvDV2cIVM8=ckcD zzcugo(#=yHBM#7>a|1)_Pc+05VRjj$X&X_t*oHb~#CB%lL5uY#e9FQ_e8j3!)IC4v zZO;h+-VQIijKTkbG^<+)XOQDWr}nr<;AK+HKH4yR-J7wBMz- zfmZts7iMZqpiWOb<;jFFdAQ&qf!a47onL)ST`5Bm&8>?>;32Kd_VXWZ65uJgJf&%C zYb`tX9k8XpBY)`@Mja-OEQo>6IB7~Y{8(RMg?tw-bda~K>M&l?uin(|Qm<*N*#}x? zNwfWhc6{?yrs!=_zD#xyXIeLvssnRM0645?CmqbQ7nz@;p>PslJ&V*hH;I^yj<{*! zoD8jJ`RV@TY_2O##?ce&1b{o5Eb!D-wEU&cv-OVg8ZtU3C9AR6zC<8~*H`#W1@Y$x4JL!S) z&IWOGxSc$=Y9goL zwf~!5m~FrDTVH4gUH{C}XKT}$kBhGIAsf);3;1(YcEJ_;xsTb7i)TFH0g;d3o19oB z{gnUENG@%Rix)4pPk;K`+s9x180~!r9@96^Ot%$mX5Ydxy`LS)gsqc%;P&*OwMXl< znHHEy`_&?W*i5LC#!P*Kgj;XASt_5@#P|D64;kL1{a97MFJiG=Td(_2^oB(GoZqy<^x(3+r+yGp} z0E&iw6_F#nptX$9uN|Hf2IP-68Gqu+XUoW^Gep6ta&Q}YQ^!=s;Z=^wmoGi78!prh z!Z?0%GrjlL!@O@ngU7yPCvVmJxd!@{J$3yt_Bi#X-330!%^82WD9B}@(hj)7o9GYA zmRbv4%8(hTdsFX}PZ{u21oQOA3ya{V-!hFa2G}pS5qN8RHyaEx?*tt#%9jn?7}Lx} z9tn=kyF*`OojK>>#%te9wy*MyVn6F9c?a2Kgv|?o>coI4_#lq+wE2Tb7$5eXD? zIb#-?Kcr5JcgZaLi_QhDxd@`Z_fs-yqP>k}-i|-p=FAj|E(8%%4EEAVWA6`bljGnDw^$bbHs7#z)T!l zRU04m;^L}pz_wt&?PnC{X^-~Td%IgA#?JP14eV6g?ZRB<)mE-tZlC`C@3--qQ#OBO z6D0O*ULKR)(2PtG=EfY$H@~!!H@2l!8vI;53_hu(5TNDe64W z3QTv#Q1WCNUIJUTf_;i`LdXlDO&hPAX=IlPBB{Yc2*=LS>T<{QJ`K2e+5Oj203vsbK zq)lAX4Bw<7G7#McJaN{6jlhk&epxBSnIPw65jLC~ zOmd10`2FOA@b+Qa1bf(#hg@dp34iL`@B*5Y$+0oOxw_-sgSNSj{6X#f;$m#0jB${y zy=}tD(^GgdXNcXYe6OuNYHJT4qV^7v>#|?OuWoTe(hRb?yYI2n^h0ule8NX%953^D z@IX0)Y~md2K);+NPA7cSxO@b*9wYAwsK6m)W*EB0H#>Q3WGCmt`u@rN@$HY#N~kG&-=#*;Np z{!`PGp>!N=nEOgo7t%qU+cvdTtG@CXUdp&YTg_)q;OR7U>)WnOU-fAi_8x6KFDyX6 zq#3VGMRSc4RvFOAM<_|z?Xz~=yvn1Sf3|iG+BbgWW9_>>`%A6u+-c{ww%YFYTIAQU zt#)PRm(!LMnc|!C;mS_hi*dFC>5EhdYdOn>wti8ARo6-Dzj{{~ejuvav;qDH^0D;U zFc-c^Ormi;K`wb+X+(53Z8_ryeS}Yt6#dqX?o>Y+cIxDR(g%lEwNd$y^-cYbv+B$A z>Pvj;QQe__uJN4|blZb^mZ!ed;)1`MmB&c{&bPY}!wtr@F-uhZ1h+kW`E`}74Cnom zWvDRAA&TcygN(NaLHqkg|oAhKAmwx8J9u^HZejd7t#lO zI#qn{qg>3ZLq%4QktpNBnV5bLmc$wA!v8pa4iClbgp8d~(P5`!axjrzZt8m_Gn{_q zU#^}X53{hR@X0&)>3C4`IhHK&#T4hXT);*}r_s`5F$kM(OIL2T)u&!;|LuSMH`|+E zyVst(bt{IL-}_L$<`+E5ujIHUP`f;tcVrNP$k#!gCki;+bdx}pM+%^h4C}3m-cd6+A zedzMC`R20TNyDY#bL0Y~NJ6omxG10fLeLsl;o$>d!H1tguM-Hx;d&+_JJ8{ea;5Pu zKU}zQG1dD=8|&@ux882Q_4zNf&;91-+n2ulmGF^Z9@iS+9loL zUKVuM34afm*T42g`|=lGYo8U17(V}R@ifKlJVi6^*i6Ut%I3s?F+<)T#TCR2F?>u`h8 zs=H2`iMSsIUoApA!FJT%edl)D*x178tMAfV{{Z*=#a6y?F}Bif55@bH3m4$8d|qatsv%qxt@gUgh}0_+*1-&IH=hrn3{JF) zBX48v?Y5mAWYdKZ;=cELKhu8ZXMVojzkip3%?A3*Z(am%_*-9dNw$d#j$fcMB&CpR^u{JyWd=6ANXX=m;b+u}y9zXYS)~%br|mRHANU4MMJKK%Vz@8z zi=Oly#w*8VZ@b~mLD2DGZ7d>$c>cQ1joq)@(^jKTi^MF}sZZu-Jf@QJNe(|Q$~i&c zrXn{Oxp~RWMZS5txa8Zq^Z5kM!qOtoi}{wf3v}|vg(v&m*hysMCP+7tdHkYBlF!ly zX7U%Fs?VGRwnSMp+TTJiV7Dr4Ax#S&xpJ+{W2^A zWAHnU#Ewvo?4LcQ!f&!7FzxA=UTsU*s(0RaJ(HqNyv{GohK z6UGwz^zAM}irIRt`T&mzz$==dl055AA}G2sHPI=2&&=CAmn`@!eU@D&I?+)WO+ks| zSM-B&0i{_`Jl`&0SL`0{w}1Ked#PdHR&zX)iF9Z@(9lc)r6*oF?{$*c?DU9dXJE9Ia{?FXbMgoH+=%2`+Kx{S zsbh7_rHdDnN0FHfBTvSwE2n4qWt9DQJW7}Vq4f?#bo-D9!FNKf}%LpG- z;D+nM6dcdte`io=iL0@(3n-zj>-1{J&?%3A4Y*=%mgZ-h5(02>xgoLIl{%wjF>=!@ zqF{n0er;Y+->OfsWwqJ*m@s9QGr*{eg~{e)#}>)dw$Swnxa6aHHvHsSc#`6@4PDT$ z@wDk{6F{8|wz9m`mX}tt@pfTpJ{#*1L(-EkeTd^rCvDaDN~@iZN9DUv?)@0=Xs`Kc zn3REMUB*e&k$popufwzGSuWq!4BvtG1dl>JS9JHWU$qa?gx7Aqb}WcYQBJFNF8p9D zoQXj4n%-|q^lOeQ@+LCkG0m8GVSc$CSqA$u`Z*^R{q&oQQTA)*1y$rDU*<9vwp@<) zq+L0iUvToh?Dpbq)r%Y9GA5_&P9kSA5>C}dWXs4{N4bz87Sgs#{aT07nFvHt!&wcmLy2KaO;nevRbOzpM%5v)WEAG^<;8Xdn=YB85nfoF>$&B!yi?EZ~EZ$ld z+VjqXqz%j{&&0#8cDU1?d+OQtkAL|ew%zTWc6oIryjM1rXJsV0*nibsGRnLNyvlRr z8fV7ocL=gRXutVpvQ89u(J|D|=H_-=-*mBu@fEmdXI&(+Z3|?+g-$0uu^YPs0H#1$ zzcX%~U?4~zC=6wdv1+0KkZV2KTq^aA&Q+ll0%)n!0r4AW!r+L&BG~(<;${DE| zeSCO8R5tK;-UL@iXn&XmN#Y7$g`22y78Kh2q<|0R0dD7m{fvh6nZ0d3x`1|I-Ks0K z+s27hZX$QS&UR|wVcYW5PUlCiKKo32HJJNH7_XhQRd>%Gv_@RdNv4@FMK1}L);-n#fM8Xg5g3B>a zj~AQe^6q3T?45Cg21}8y62RDP$}R1v@}u4*AhITY`zRM6J&n`(#s$V7)-Pdm3+?>8 zn|WMdqx{H+Ey#W&b9l&%;|KGNE+E77L(W5bf+2SLdFKMmC46&JwDcnrr1vEs<;nQ0 zAH*F8hxM9M&<;+rVa*qW_MFcot(yw=&@=XTj)mNK=6uuk+D6;n-i)5Iu4d8i&U?6V zfWTl^kNH{3R5vi{c27}r5lXp@ok=-{U*o@2BtG!Us+Y{Q%w9(EY#*OSm1o*P;I!^e zoq39D)P1GKkyI8;p5q9df>-+8C12^Xg87%ie#2#b8t*;-q18AZ%uZhioNX+(meU*A zkFHLr#r5)!{M`>Dz2$S6k3K*2^jyeN&tl~5<+JgjgTIs+0dlcE;~Df$R8B9RI(m$^ zbk6V2EuC-Mk9M#(>t&~^m!s${?+;`D6pvlZH4o>8H#T;uYwNS@Lkne``rQTA;-eIf z{KSjjY0+L6|Ity6g)?{1$Hk66JrT#!RmyVdudtIuW4?Z|!wrPCALZ9MemBv&NinCa z^tK~irVE|jP71Ba-MRC91B-3UJi$HUi+{jTo-v+HSlWBRsPD_w^YG(+t+zg$3A^S#?t^g&N$fHX@|5ysq>ojwfNaIjX!K> zPizyfZ7?)}-m<2Q(v&~@io@st>qQ--EKkk59yQl~>gWGNyZMRlZpRPbLzeDS#mDG{ z8Q|UE;b&@;Ve21E^f`uk+G0-XN}2JWaHHPFl`!Cxn-FFa!q00zbOsifWfm;})>)p) z1y3UNNBC*pMMePlEBoba{yJ}KPR+{%-Ok}&@j6XY_qr$3Q?v>*xwROYf+_wfkK{!?kFjCw6{3KAXX+Pz_}e~v?)>r7BFlH(!w`UI zXyH^6-~m2R9wCH&3os7E5p#f4oCyt8{F{-rf*T!+9hCP81G;!w3{|i+C^ET~JVhn( zUKChA12m1@6qEG)I;b7_TLj@kiO;~8H|c94K!`K)|c9^X3M+B;!zzSNd3U2047v+ZyGtzT^~zxW)D9hCAJ z-y^QwdFjHMZ;P0(EVM~YDWlMBr{eb{H5RNNm4ex4M;L!DkXuA)jA2Q9sc~s3^5tCy z_&isY+CjTmz#W~kWlw$6M)F2~ukTE@L!0S~=Ul|wMue$)mlry5IJwe0a9rdG9-7AZ zDUWfqUVJ88WB1S#6F}kQwe2p)Q3*{s%0K~*e$TfUd^2ZbeG^j@9%DGKUcB5EmX_M} z8`sYnBQyoB;wtYlRx3`QauSy^COJP;UyjDLTq&aok!9Mi zcrAC{#^uk+I2Q?gF-Mhi|KWr7;L&;(t4uob8EvP^E)9b|!^*dvjJ{_EW6*GqBP|Cn zW~Q2-3vppega23m!9So~pmUa&+bhq%(Egu)^S{c3_1*iA+MPT1X+LWrAciQio6k?! zmZ8Bnj-2R@0STdb8!40B>O5$o?B;J;+o}2cmp9CMyU&rT;BvCpV@?k}@|M%a@z*Br)_wL`%_jPYPb%Xq-X#3<#IpvRfv8tMO z#-&_1n30FpnUNWcr99{XXqHq%Jyv&PW3yekb}2@eJ8$5#P7EI3Ivg=*T?SyK{OTX-R3fuo5&V0u;(t7`-`=BuCAx#LNEQj=LpFnldqbsDaAaOQlM^g>Rv&m@_8)eD z`a)eMp5W5uq>+3A(#B8e$*OznO=mI)IBVWYE65waH)%J7SMC=)@`8EAeh- za#A-Z0@$yR*1ot-9u(8X!dm>MH$(7hLX|v> zzEv61-l_Vt^7V41!VK3|gg!DVITi{0D?*a4WLaHM&+(-(h=Aoj@J8v0B6Jsw`AdsZ z6FnJx^4|o4NtYOTPM8<8bMIqpd3kD1;x#%y{pxzwzCD}8$zwW`wIsu467TfFVw+yR z)Gl9IZOe;`?TerPjl6xc;)yfZc-y;M=?i_E)Xrr3!V zR`F7=EAQH>_J_07)5^+)jI|tlS#KVT?8077UZa08cRx3y%(-Z*%VvftXoXjvDkF)u z3+qfdwO^9)mMtd|aGy6glg@f({EGazfUK=Pw+Nl~pO)7apDbx$p!| zCk^*@=zmGGw0t3Ut^NG`JY&YCMas1ZbZQBE-BW<*r5J+*7ku^&GB?kvi`>YN$zJ$F zNx2+zmrk#7R0Ig$BGn{+=KCzEGDJn%mmXv5I$z=@s1+}{h&jhrXlf=cEYpl(&liUqxu!Zm?jq4>wLA~qR+inr{OM0J};So=cjw&SwS1i(MZ31Zxj zYD`C9z3^{>u3Kz57SA%0OY*lXiao=<^iu`NM_$&3PudIl2%EQR*$JP81fZnH1oF0+VwnSjA6b$BfrU%EBAiN24g)srLcHe4 zo%I9G@Ra(9Ek}IWbl#gzIGM_Sm(kQ`F2RAxDk(YPMaJ=kj7(WG1O!hO4A=eK>W69=Ffb;#3!%Cw$t^wm4bfQO?wpbD(a{I){En0mP;|M;+%|*pTh# zFcAmj%09{$C+5II-_y$^J+&|iN@T~mVf&-lap)lJjAQJK-MyXE)AEItwm`pN5w|zC z8LRBpX3xEYHn+Uk=CDs!Rxgp4I^X$J=(HTjhg|S9$s_I4{W|$fSA46w9r`0YBPeU> z{8azl`J4KID*`A7gw>_fud)M~=iJ*kxQ{KG059vEJZ!WRM}YcPx#V`#Ux}u;xCv3p z4(WU*GBOiANd4NkQH5C(D2$1V?E`#`cW_3AN^k1Q_)XbS9z~5$PKhh|r6AIgXVdGH zH6Mgi=A_+w+Io*FdyO>GZUCg)$0#RGA~`>mjk1gnorm>h7Wc>gWm}& z-5MF;(pnXz*+oXk&w|%_19lO%_Y7?$V|LnLZKg4fa_gtQtT*HBGi!X5`jcnY9e?JT zJ|l6`ZF}Xumm#tt9gvXrEiPaNo#S3tw#8{VjMG^L^U`Lv&6I9U=^lfpRh1k?bmgt~ zoOGj!J@JEja9u)uzP-7@Gd3S}=XQ$V;gZnI0opcgOX01&cH=p=t!3zCJ5Rmjla5M~ za_Ao@tn2!G)joB1(qv`AP{`|-5aXhE|-r$mV!cH4Yxr)~xym;Kn z&%zfsWm$^HfzN$ix{=>tu+J4YK`?>UNms=ZKgmQ__fasDdIi$pk=(@ZbMi`5XYkR$ z(~cCA@5Wt5o@qKn+hPKjRw^L3BLi%(D z!6^dssxp6d(b}W-#v5<6FMsJv?X}mw++P3cSKIx2_uIzi7P7P3b`PpUcc=fhn*g?U z+k;0N?cu}qwz0jPdhvvbWq75LZ<*a?sqs8NH;qj0wXb~TtL+P4_+qoRql?$0VABRs9Sa4XZqKNlV@4Gb4%M zqJwtnXY?1rsvJ<)>AY#TN)Z#?b?$foB&ro)Bj0hWTFghH!npWkQ#1= zju_|AH!jPoL8I?CP}es$+Xgz%osNDM;D>+sN7`@xkDqIM2V3M-CkMR#mDk%-Pv2^n zFJA*Il&HVpK^Da$b72nuRhL#we0g-SZ1Itjfq zJKY}Lzte7AyWF08{(3vu-C)4wqzr@aod=A?z?N^w?m!8HEq9|6=3AT7(`sgF-~J~? zA+R1dh`~-ql=(1rh{u}iD?_P&K-Kqtl0xFGb}x#T_F#XMVxh-GI|d`C&UB|vG>?Z2 zr)-Jo$=LC1oOCC*etnRG(`Tc*9KIi(`iM91cRvdbXPtC?kn8v+_;I;?qWov$zb^M- z@E^>z_qz|{Ylk{50$jtljDb@rd#nCSlKH}TnkQDx0tacXDzM8f?vq-#l0ErDF z&I!x1(<&^26*Sc*V{@4o?Lv&28h9xVIj1c}2JuUO(jF48Z0ZYx+-NInJEUJdM5b$^ zGcY)P%i;+N>O1unNdx`;0095=Nkl7?q{wP(yvY9%~b}DARweP$bHt$B<5+E{ zv(PcWxR6OUPeEC{z(xPMgnhDlNjv3YY?Ygn-lZL(j;c^XMtvSa=dX z{MQGMa0L%v*VZv+p9d#@v5 z`aJ&3&kc7L!#DkuNBQZS>K+a2g5ygc|CY8iKi}@X{br7ip9SA4Hto^zPMe)0y_jet zhld~uKg<>Uz=gN)O%e=FOht#TAuD7W&vMqL#tN@-PHPL;%S#K>Z4v*YFFhSPJ@(v1 z1Yza<@GBt~DCBc6^-S~YkKw&Ob+Lm6O#Ix&r5=0`e&B;g%BJ5mEZXT=yPPatywpDb z`Omj4;?!J`0rZu*3r|kSzV=1WOPqxwENnUA^Wfhqma?p%pmopT(tl(9z?gXVj;*;W-z3DT52kPGY*Sn-javO=FjD zlM5}oxAEeWv{CFzAvOzKZrkbX9DV%mxn zGInIbN8MF>HBIz~fVjwnZNg`H$7MeDRi+zd7;Q?Lq@~{OuQbUQIL8CN^)kxnRcX9q zEGQzf5=I{G^*5xX(akkExs7M}=_0=i;-2`nxVX~hsYgHOWVke#{s0@xjafUpTa203 zYvTrbetb$Lwxj(IIXm{xQQp{ON77`!X}zgeur+N<)`z-K>9hM(mJBbQtPNaaV<+R& zSL59H{?7JZ=+L=wH;X>-$_)aZ zq9tFY(U%9TnRDblJF}4XWWGn>a_!B@Za;f7zc6Q#wu@cuSZ;2yP9kt((Y)QT;AFAm z^F_aVj@*}Cw}g(<{rs03Up;Yc7G9o%R&Cn`tYdwsuzrz`Aw{X(Wg;X)7OM-_4m3wg*@Au58S4$ zo$@4{=H7Zz>_n?wMM^%cLRV~L|1c@xi0cO-Ryr780Siyb5J+Qe@|q+bal1OVHbY$ z_x(_tTfNZsAKfJ_{g!WRrw-`j&`UW9z`o4-qQ0H;a=s<@6!^30k8~*tKgBmo1YN|% zo}Tm{$O-?HkY+&{BR|TDac7+JBu;`872Jj!Ai!{iqh}Xq%P(ee!IQ}7gs!>@84mu+ z?}UpwakxpHR@$^b2(j*@r|_7r>gv>aPuv+IBZ?<3##<(ErOddNgX*ZtAMuw*?(w*IF=wU=epc@=U+Bjo1XkLhf193 z-9%vW$ci#a{mCDBD_zsXyXb!uS^!T&1ytOm%L`lPp{+>2Fq==iF$$x7Tla2)v#(sF zk655DQ49H*NyjoL^qU9t6@FStTW6VZ$q6(fjj^frNE?RmuijTS)jyCySbmc?R22=j z$+IUAhc4v;U!L`_n*S3o1+e6W3TNGq@)1uRWaF6qFwZ4VZlEIg49i#!xdjcfgG`6I%1_c_CwQY1UuR%= zB+lTHMn7fXhWwe>5YB&ReD9bTJ!qfl{kU9P$b!CQ@fihRyUB*-{Mi@TH`JAW%SMBU z85^A>4V{*{Z2yHnX$YwF;_ooPkGOG_?MT_emM%}Z-Vf{eZL*FeH0{$iI_4ZaVMANW zd5hg0#tMuX-4HsXULvh}-jXIQLFsSJo6Ibpc3YhWDs#E?JG`o|PAW!U*k{cx##TEw zJJk-cn}7D_ez86E>F)vGM%#UGryYCx2es;_iQSxTK2;u&Neg(a->jwecA|9Xr@V%B zH|jk)K0*sUKop$*DV>H5=beu>bLr1>B09_&*q)rA&U15!ekgtx^XjB%Yiv&O*te}uqWThEOQD@QZK;dkEG9Tc4 zBF|!U5)f`&F~|ma$g_%U$I)pK263m4?3Imba(xVU<=`^EiDD|G1B#Zu!7nmBl}oU=>zz zr5wdm5>;A z+q*5vfBGER*@tNxM5=k(!xWYP-nHAsx=c^{uw{-o5sP zFa37Ac;#}No?UD^Tf1#zYlHMI%DEG+==7>%Fli&kT38B|#Ugi$*@+PoUQiwfoi4g* z5U8xHe)UaFJ)vMCw^j>(h&7!=$((-VU38|1{13{ycGEY6dI}`m1xQ zGCOc`^MgDuO-=+?8HV*Xggnd9y>qqePx)|>+BANL(YG)zU%A$vy7f%E1dlFXyNZ9Q zUA=y_-FW7ycKz1%cI&xYZDVI6lRe6>ZxKKD(kpGrZ`|Lw*_Llyhc*oPgPnE`C8~0j ze{SSC+&6{BR!!=71iIC2JJy)5HY}eyZ)5op4?ptlBj5Bjfj9t>kEMGoY8etHhY!Lw|(W^`|Z8^Ywg~nt#+UI`|I27-Mf$4oA2IhZ@=*_Wjbh!$dwbG zPd|07P0gKakH}+pcfCD)crTj_zWUCa8ECxt!n5tll`B;j(BM&I_Lj7f~K2oP2p0jxUI%koa-(S-0=<`9JML%GtC%2)vKOYex!#gI0A zl0mr}K%8Kyq*Z^T4ozFnBs0^l*Gg|!eq$kj>>v-F$h9R9uPQ=komyhc z>mb^RW07a4DSYyqw$n57*`RUf_I=_ubpbkS7rM zrmze9>h0-#SF$!QRXOt(zXmeThS`6*dpK{=(+1@8^o;V{z-4{Ni!!Kf3$iDy}Jg_m+|y3$8Aurn{?ycw5HUj`Lj8bBG) zhEC(8d%%8}ixq`$SxiM)D63`G8J6f=6_&h)uhA7JfKEDxe>So1jPGs8gx<4P0u#aY zb-1*%!Miipgw}tBOTL!lNjkpH{FTYa@;2S@PwVa!n|b+L@#Y;nPWcDUqyDvQr{);w z(XP*(%pvRQmWp?B1%&~L_{x{|XY%Xi?e*axBzcApr)8`#Cw<6QTPyZf(PFV9b7ct8 zG!A(N&hX?iUqGP%B?WoZvxa=E)hkI>8o38z$dNg*M`qBOd`(}T0Cj)0s0@~kY zuteK+6F??c(+QInd`>K+gz1mTnm8Bqb1DpN*7j+;^9@iJKTK0^?ARWqMOyijHgFz; zZ^lhiORH^u`6BeqXK+1Bzo;EIHNRA+4ai?~@)5Xe!pw<5`QXH2NSTzoXmvr~cxueII_t4?vJA>82f*ih>Dqsktzde6@}G?tD7=hUs26p2ja|?m=+5yrYY|`>)i;*Z zi5>7zm1buju3ibTgcZ5|x{&AQ4lw4@hO*x68;En&iM-MeBv;dGU5QG@sz2;2&d6vl zdpUBEo;>11GkyjnQ<1r@uOFo8@r*bHgE{}O@3Hqhi|H7vO z-sc1=-;@JhC|jPmakA@z5waBRXL>rOd}&t-MpF8s2<+C-OlYF#HN~}ExBpT z{&Rf7N4i}^7EQ)c^gFRqLXy0ZJ|l^}OMTIIt2j`YCKgL4ktm$8kWyLB_0 z?RK`efw5Mq4HQqogAWP?sFxNOv_5e|e)=cWnSH&o;lxB-Hdg#3vESF8maYd`#}< z=fEQWw)r}#2U~{r`;k{Z(tiB+|9Cd-NdF#nB~m|&m1CZ1W3m6yF(UUH(bm)a%pBEI zn;NCa??n4)AK%`v+|#pFNztv~HO*Mi_j$-=`NSiS(Gk#Ydn#W#Esrp2ddp=Y)pv=2 zHt8xH(wY07F3$Stc|<399~@S6B|q^wGENp zXMF1@<@1j+=IwpT?WUNibDq#r_RTRijdpJ24fyCbbb=d6U1+nOJa*K#Z1a{I_?<9b zUS7)LwI_bf%>heY+8(9fg-#d#-NfZYo8QoMGlnO|jmgca^T-u=4Ridc3(b^W&7@83 zCe2;iiS@R(zeoR48^`96hdE_}GRb@CwWRXgZ@oLdv~V>>sl|NzD#zaH;oUt?E7(V_ zT&xGzqMzNWe#bZVGwDsarPIkj-_9(VL)MlSGd?~?Z=~R30#{Shl&-~=EoA^ET!e=)i=*HqxHaCF6@t`N`*hYN&-A#kq zy~=@oLU5^8y%&M@WmwWXkmww7^Q<0I1{018V0YY>N-2IOh=97Yq1&$Hlg~X}aB)Ab zxYP9U5+^NZ%ja z@t%{*0MtnmzAS-lLjBK9drE|DY7SbEBmR_^)A@;nm|60Rm_hUDf(d*_=Vg2aSo$7s z`mbfz6>NojA7`1gP0gXF6QuG6FJShiqk4o16hFBraZXPQZNy0%iIQFhn`dx#or*rS zZ}(3QlDg8SjTQg3qm*+OUyL)Ix<>Ab!v5;ec8KiHO{2ePdk6cZ7X+I8OwAnQ(u>zF zw{QHe@A5dAcCx+RroiFGrPw~UEAU92vR`wR)#potIybhO?Sr}r3kAhhIi#Jq;3)e= zyC5L6!48V+w39@BB`%_3`xL3NYMa!z0N^7$d^OsHJdJu&wz9Dx?ZyD|)xPWTv610r zz4eXmH<_2Ds5SH_=$fYs6`kZ~9>`=^mU_0IQ+5fnande*F+`inISp=t1D4X>I_W6~ zwRTv2Z^=;sz)kW3$+`mqS+ZeLhsRD3M&u`e~G+~;GTh2|uvzAyzCOix|nh72WK z-2r5-s*@2HJUZ~puY?ljB@Z-Rj(z*sdWE-eK6yqjryZ13OKWaK7n}r`SUhQeJX?lb zL>qsaz<|eGB`rF}c*(Mz_pSMUUpS1nU5cMa!^0o%rAt2ApUVR-@p|u{mF{WA$`$F$ zHm8JS9(EwKKu3eQokRU*6gA6aYUdVSAEB zeyum>20E<<8(#Os!&lS}Vc0XkVC4X!t}2}*xIgkdA>>y&w%S0NAbu9|p>qlyI-e7b zY=_!!=+yJI7BJ=r^HQO{X-GT9!8K-tFUltTt9CcW&jklEYJg1b6FLq0qYtuvY97sc zGE5lK$qwalz=3n5Fa4AP5^+d4UdGbGNsr+o7(9_-fJgsY=Vepx(B|@)D#i;rnZtg; z4da1!9fCR}pXre&c%+d>ob`b0&|Xv99WMbuSZ2#3AQFXc(Iz-QHJ5b*$0Zv(>uvS= zmG%pN@h^doK5cug?QA@1$NRg{AKI>C9mYB+Jm4oE`D<&FkQ2wFsZ{Cnk`5Vj#>oqu zB0XG+pi7%GLfY$+heBA+o-{G?K6y+M<3|3L*E%#!F(|I;tF8}n8N1l_LR{rvRsICL zbarR?#3#=qFDb9>nACkd=jP?uhNN>2#`&n++plAD`$C1!>F13H-f@*1LOcP$aY5$9 zX`i`QNBao??KpW1c_m%op!~h%#nnj>@=;z{N8w)QgY;WAyQhs5NjZg}7!_@i2O`f%z{u>_#s*#+VT?FHs-cZ+K;ca~7vNKv zic2uh=M{d1)AU?+-m*rXWa5d&nMniF+tK38s|wp06CLb8Zcx;&_kRF;HAsf5W@4sZJ-4ti}$p`(PV>a1weA5M_OhtJ0Mh<*Zq zxXL|INgYQC0nZ~EmpF0uXWf7waXRVC#4FD!yU(UeeEA(TPK@;ok^E1Tt#Dn5i$h~F zCHF@G<1gQ5B(2m&TtYB01d1~_I{v~{6T2Qq=U~ePh#Ej(#yKbhU*yV8NP~^~%^(3t zkyOJkSLHB2%bRF^@?Q_iM*_PuX@s-Nps`m+ve>CmHTbi1aLSC|e*{LN488hvz z+wVl4UU}uC?H~Tbf7C9oTyGZ^mq<6A1zIOLW@c4r(k-<}ig@9J@5FdysCim1V`9V! z2p5t4wzZL&TywCJNp6(3b?6`IYLJA{rVMk{uJbWj2p0)-VCu1e#^t++8o;DWC!;TI z7L;`IHg6a9PBV-7;e&Ag%O~p`dI^$N7cSl9=i3@JX(JC&esoq|zNMj@xH&+h`N7(w z_Go>Lu95roNA2FD2eAJ*-@M=4s5jj&U%Aq5KJ`pnzI>_8EMT}}962B}KYH1`!DqFp zyzSuZL(RhuGVPxHr8Ihmn^N-1e3yL!vkRPb+2FzbsRLHw4ZP_pjZVd1@|8!Nu-1XR zthX-I1@83P+S$pQ`?v4jZSTEzyS@3w8(I9n_uxKZ_uCEw&h^c`_SQT1+S~6wZ0|gB zBfw$X4=Kt!Idj3$hHcm8OT{M3yPLc1-P;e_>u=p@@7;a3%~MY=JpXjNaA7s+XQ@XA zs;_-`Z zB)sQd`GY~l8`QUS8L<0mc+mwt?QAnqW`f)Si+j~=z9h2{38x4zt# z(LaCvKmY6Plb`-XyZzq%cJI!ewzj!ZI?90{@V+79CnBOR$j3IG`n3%cpuS4qs!k=& zc3Y8VjBX+UGLVb&(YH$r`<45H|5+8Rb!A zLkW#E^?RSHPNhS&ROOb>CM6(*%V&8>+tJB3QKU~RS;&Nf%JeZ;@wtNrqx(~Sb!Uw} zp8QEz8Q!K#>=`_DKb{|VDOqG9ZsrpY}&=X!GOm#CQm*1s&TxydiBg1^3c1yWrvy_R+L&WTGF) zqZY=v@ZB_co_;y~6Mv}!>#f_QIhn`H#T{@by?Pd0o|5BtO|uA%T)3E){_>n}R26M9 zC^ar0(oP99ocp|$0)wn8tE2|GKu5Re%RAGUWXKeO)+>I*$GVj#Rs{c1o5)A_kV+?v zz_Q7!EW#;+JaCcL^j<2u=rHk_Fu&YWXG95;#9ZNtaQdC!!x>Vt%6)-@Tbj!i zGrmx6v9+Cu^^F$az%>ug=sMWl1qb~qG+cY>JZoX4=}R zBb$9|SDAsO5n1hm^d!hq$YQ+XT}%mz7F#M#Tfw$^E{ojq#h5wxdhx;{V~pAMUElsK zv4wZ?R*;Fbo) zFTRcK_n)Ql(dK$Pgs;2XyZO9>HjT&2OShBpwn6Er%}LT{&DaK@P}!CCD33uSMGWy2 zQo2*b6qqOh2}sdtZ>e+IqH&_rHb`5u1*^aK^I83Fo;_($aN}hgJ)aF-2J0hnb`LyH z2hf4$sWGfFL%8ib_6{wQGI0 zF7tMq*%MUlx#g4%?P({=@}10`ed=sCFzPkimAcM4a+84dJn!NsdRW_CS;;8_jCri< z%}vIb_aC)~5AL;%wTIZy58L{~hr}H+?()ao+@alb>M=pGCLoFYJ-HEl07wHx_P=+-fo6h)~EUH9q!X6 z76GTfp-kGv=I3W>)caG8yU4>jbo^-V=R~p-*#4DW3pyDf3EIX6AaByHJVhvt^vZpA z+!_jS+5^}K=xK;i%LuP*R3^By>EjB+`+VD0-E_ZrT!!h$lRNPcOXmQv20k{uRa)d+qY2C9-p4NX=Qv*R(TuWY|Pknbz<;^e~8U z%;wRtX+sGz&bB3Qt$+FuWO3H<@bYrIa_wrncI`&yNcu_DDH+({?Gd`ocUjU*=@H3!w7Px*$y5BAJd?b+jPr zOWlWkczh83r)_5`mM*TeXP$q)J@bigrbDBi_IJWpfU(!K$Uzj+$drubdtK-af@o2M zQ>U^3n#yJ_J{Yd=oS&ufPbd3|g%%_?IN;<#zqfNyUc@oNVwV}!!{nkmI5JLH%1c|Q zBmJvg)zPu1I!ivU}j$j`Qg)Z<} zrXCZ^G)&dtrQU_{MFkL4st%9M>= zVL*CR!V4!-L&t=@cgPqei+T>f#A}-cmV)HF&&TM=s^pA0!Y|Uhf!R-ux$q~2p7!Ub z1@j$#u!oPt+3)(ssQtWl+#zu4@L7*ghh{e}dh*Q9rY9(FRsV+$zi{aybu!aDY)bjq z-P|PoE)U2u_Ne}>I>5ff(7hkO+#VF_0*#=YUf z>vs&?*VSp!Sy>YY2IbdFn8*g0fXO{!rVn;yc#vjZ{MML`%Q6`NK>YNZ0@L3!{*&+a z4K5P;eQ@hT`&fI)au>gzny!uZ3KokWxV}Qm4RpDym%Ie&t8QO zUdR{ANO`>`eH6ZPf5ufi7gU1E7q{i%A^eL2K4UrLIb#R&4Ze;iSJ9OGcn-aR@8#su zAL+)u<8?e-txLJFReRr4JPe=aCr8BFrd$^ZSJ4}mr3lR>PI+AO7xrC*5u^RD%vdpz zCo0S^)9gEsLYODgYEMotO}G0S>+Oes{13Ko_}xF>+WLF#V12!vKiG**R=0U#P-wOc zmK#jIvF`W*(!p+>E2B`FK}tDgE;36RU1UD#;SoVK1`NfCPvBW$F;*{7-1q>0PxZHX z3>fdLZwhn*#oNQUP2LH&K1|V_0xV3YPdD0erK$Qmb>5Gwr?H%&eWdjs9!37)xeod~ zZPMs$s7k|ejj4RcmU&^K_ZRi~NXIf>Cppo}@rj!PcG2~D@s4T+B>AWNkRIBCH2DHr z=^SY!rkA0--ah#WxuMrMALOs$iNMwK;Dz_X*K<$TPTraNBR~Af&l2#o0jG#bPQ@zN zVNhzK!gb>|h@3qqGaEC%gsG%VZ>B*&))q?Ng1hg?hChfcqb9~w5JQ$Jd^QDm2EkU$ zaFasNi3c>D7#)LW4x}8!$5v9HtN4NOS?1$XUh@jPgRx>(c798fFysRpdFEebEq$Gw zl+%SZ<8qNc#r8pup8%3)XtzQH(2-{<4nt5j$&*QA(v8Zh4x`G8&!2w&4NGALZo`US zSzxdn-5ECMDO}~tKkm+4U1+SkUiSr8*Km2(i1G*_ja+G`F=m(Mpe1wxhnGo`1ole@ zeU{tD$Qwa9`d{(T4_r|Jn;;gQSx8;%2)FC*sCA?Wcw%o;-9v)+xHcj94kAH zx8KAtQ<=M{rHo|)gy$Gvq3C=jyZzpzyYlViod`kDxhxp`kvtWsJTV>5$IzR$L;ake zstMmrhQN?a=HNT=qCwyeB;THQ!oYARwfye3WuC!^oSqd&F>!R5)j@|P;4L~b0dAN) z%uT_|6L+qSp1+8p`LFD|w92ano=`**UNZGB^{J$!gS z$2(rSwA!9~@uha<){VA$`BLa|;_bqftKq}Z?rwIsI{9ShXg!qCQth|&34SQ1UBQbA z^H~^+(_x`8Fd{bIIDYcYofI5Q;MYL1JjOZ5GftVXzB1sZv(GNh`8s0J4G>#fo9)rs zTDyDuZhQUp*W1_Lc%$8W@UXr1`n&C&yARr%?{eMWXj|?Sv)#)~u1Ewj@1|hvrSRLT zqJM$bQ>oA$LihU4Vf)&hM;L>T+Rf`%+EX_!x8*CB^H$g{gNED0ZES6~XP>^+UVQr5 zk{kF|M+u%}l5}o%K{Z*%E`Q;MoxIOHNQ1D+AAZy(063=}S5_$hFo5e)|46R`TwNX# zDevq*Wp?^+)4TXZI6*OFZSNX6%gY|IY&%p`E3Nfra`EeGa4_^=4;&oIP$On zE4EkLCHHAh22oDZ{yq3)dcPk>J9iRR9;h>s;Y_mI*3FVKCGWT@&%_A#?GJd6xAUBoAj~!uJ)-V~ z_8Kf#`q6(?dvh&L-GE-1>kE~n*=2B|GW1(uo&=!#gFpF)+7~|mh4vf2@wp6k9zD9( zE?v3NKKjbb=z-nHlACutLN9f2+;`DKCj=C32TviRXxA7$VxYZ%{_&XZ?d{FBzP8>x zj_ktnD)^8O^q+67edViPY2Wb5N807B< z2TVJqXH>eZ!=~|`dHzsme)>VeeT4Xo^Z6`ZuMTVYcDLBEPSbf7&}KTFg8`DQYVROkCCBlUOQj>U$|pe~b=@Jrop8FD#62*1fs8!ekR2^XKw>8q%hoP3~; zC*Q)U{U%K=z7;JpQQeh^Pb#P0ez#85joLtz-5~T}b(Pg$X0ig_w zNkZE40;k+Z%o19D`T#f13?0*dGua*FfeMdITo4o_8H5yM*7|`s^NFk=m#JUM?#}(a zZFu1z|EMi2Ew!onm3DFfia>S0VWvI(>bJC;SFh8T?O@y81NRZO)_Pl8$3CT=jt)H` z3j%x_$9kmx;ZXe8Jp?-k#6PyP8#kdt8KB)sp8U}!i%jz5q|*vxi*J4PrFP@$mGXnP_xtA&?^9x1f-7&n z+D#yD*$iS52+Lf6kUm?F5{qEf!j$?*T}>@358^jX+OP4{6<3WBD@;8oGT&PB?I+u2 z+CAZJ0MQl(fPeBhqXW{;an`jLxn=`R_^GT!27w{}L-dl){4J{}Gx>?Wu1|#3qPmOV zvMX;rQHQ&Xy*x?g_;9;zZEmzj4XlakA6f zBGnjftL{sUlsxKeFL{ps=PnC7+$h`TIl*mY!3>OX%5vRwzw+q`xYR3#aUT$PlRSG< zLU;N34>#<2ytXo=cF|=#056@>aO09&fi6Dta(nH!e!YGE*Y30%H~s9$rN~9}uvtXj zsYmP(OX()gw0vn7E#b9l{N&`ti;VAASJDoq z7`wYM(q^NL;Mho+QI?efuNreyd#g6cBmU${9@K9#!{?@R3K_Q#tae`;cL)_7`13dL zsmaCFb&(1#x+zF;$jGs_O!73KU|>w@_Cz}l7{8yMKNq;j=lE!fOFajCgs$ucQ`dq` zZCF>2C%rlhe0k#zyHh3FzVkcU zg%@7}0tP!TXkVqycbrYx%&R{A=1D-E_c-e^IuyDUAKl=U;il7fe$2kvO-=-b7HeP0 zi2Y)*ebT4+LqFFhy~1ZY1;8G-hX890%KwtGz*?{P!9`p)KascnN|-(B3ODkOEaKOu zLYzs+P<-MEE+ahsORRY)XWk--XB_E5EA&_f^Y)7Uh1^Z%$<=9kY15>U1~N{+5fFZ& zCwztu%X-2%iHvNk+VZ)lj$_k1j-cL8(jUPKb*>oE@20g3`4pe1opcj7{kHwY{?2X| z)3tp=o>?*ea<0R&Xv6z`eR?IbwEd|kq?`2MM5$);qc0(hPm%3#rR>VSbPA8W_}0It zg}Nbl&eO-VDad4#{>x7^I(KTHwaD0IesLK+O}U&WqC7hr+v)S9C;9j}8EAF$bjnEk zjh=y?L&_!pv(XTI_EXYom?9gwg&X;mG)Oh57KCL2b=vDTmjGS=_UP_L-B-O2xzR5t z9xv|TTSN}u+AjJCNP5E5i~LoZ!l&HSvz|4o6Q<0 z+l@)2X_U{Y8l!Dh1Ez}OJZQ%&tTe(wxDDUoE0^<3*SCyi{*%8@T@Bc-hS&huSBk zbK{Yu20~xhi*4)P>+NW3t(`mCPWe3v z6e$;xJ;bmbw7EhWY@-)Dr0yLKT>AvCCNIkcy_HbaFG+ixmV_W)(LNr zF!7C&5gmWvy*fNDHWt8I9tQ)&*29Dwc#sdk-z1u&r}kxkG^!y$$1d%>8Adq{d!uW^59DZ>ECI}ZaT~- zEz@2-eavU;KNwOjsxtXkpIl`q9t5ChNeDE8*9{&38+qoUWU|hu;IRA^ZtBvT#fMS$ zDt~)t>KA_GTR#hPp3Wkpfh15cz2fw}wG#0HGY}~7Do>nty;#(@cDyJ?m#M9&}To{Ee zj(eU@{USu=m|MXUQsKi*=*>+*=yE5W`lp4$RRcJ6b#I>O5YFJQa-PO}ZzIbt@kv(R zC`0(bq|u2h@#N(rCw1*q(ZGK@k$8-A4K?gC1Cl zOZkQ|r*Y#O6h52AMc|E%&9=F<)84y%uf6-uo%Z_cZ?rFb@hk1E+xOcWZ{2Ngy>q|a z-x+WEGjxENy<1WPOg<4IT?(0bsdQsZVR*WeS$;anrGBkR za(o*HF7=?)fwXk5X#7d^X?s=9lo!e=|6+*2R|i!P0RaC_H1Rm^_cWIl`-D^tbi|pB zS^iru_|~hPxOhi?GE3Mj81%aF^3Hxh-cH_z#EYKtiqjWH2`>XFxVy{Zn6#uWoEVvg zx7*q55n})+t(H%i$fHIEPCX;}%3-qMM_wG$#`6X&X`CFKn_EODY|}0d+f&zXv`6cY z+N&?W)PCtN{KxI9U;b*lbLS3tj@!!0O7ii|C5?Iw^wJNo6P~M(lY=~<9b^y@d4mAL z)GNyTffG=W=I|jXVIn&>is|f|mrE%5uDK3kW;V0$d&)n=k-RkA}_X6rj;bB^MgjHg?-{&s?Q$7)&E$$0z8Nlil#bV=!Gz zErU>Lk#F#bd$-zqw4gMYwHAwIkDO!hrv)JU8i-@TC2tH;hD^o_3z{Jhse|s^gpxw> zUw%plJWxD-&=uHmAAu@YMxTMFt~TVINz|`Yoj=)x;6ulgwsF=C{HFT=f25m)o1gAU zxbjZ%;hTR-i?a=eo+s%25c+c^?~&hd1Aljez7NCmzVaG=);;ve|{@Hbb6Syiz|(DS9G1x^-!s<9-&bN8w0c6F=gV=OozMeY40Uxl~ib+pjnuRTFEsvU&54Ct!x;vW= zoG^js)3exA@JoBlOe}JtP9^ZH4I`~C?6?5yA_8^c_oB%&YJoDTw}sU&A#l=QgyBb8D66s)Ikwy>yD<1^Hx(DS_$EHR&muKttjU7PJ>v%mN*l4DrYS~N zc+zuYkThV&X1ogToG{K{n>ORySK8_adt2=UJMS1HZt><*?bYA)>GsW^_~y1uf4scv zXNzXr^xSm#GCL=|$8DO+Hv&XgeE_`mfpXYa&4g@Z5&B~%P*?JFZu(qX#KydEVZPnG zwAxme=G*sw?{~H@eg5;@a8QCHGKUbfxPl}r0p3k><9EjUT$X9Sj)mgu(^+KNZuUbxUMU%A{q{;_Xp-|_9= z**^Y_AIov_@_B9jA@reL=hUgk*pqd{yhpAp7EZ>8elph%Hb?hcU07PlW&-7FZt;Bk zhL66Q%}Gwsx~XW`ye{5&Uq`M}q2yKL2isQ3qGq4GIqhF;8|CN9J#ps_(@qLCDU8BP zcgO>h5;_B$I^$lPcc zE$HW678jPXxbK^k%AlLbJvqSd#&_DfU0| z*>+Oj)t7GZgumU51s6ATEr54^cA373eh~R~g3z&vvT1)Ia{C#NuHK^#&qKF!0Zt~` zS12em^YYoc@$w`=`c}qzHReBrj)k^Qzu+ed>|>m8T}GxYgKgUdr}QPhu?o$87R!yv z&OK0C!W`#NZGH!eOkwG6N6Lv6ALS7Q9zByK~qMN9YZ^ zkr!TkvHkl0_wUl@(R(6`Rt&MV-yjN6cTk)V`mwDuZk~i*57)Iu#o${c7KEka2zbR{MdH>jURXXlu$$$)PeV z|H?TImzUBkFS9AaF@XA2+Hgvywm|hy>Q?2?{9GKge=e6zx`ac-B&~UwUF6Gdfd`}1 z4cgJp%SBHFiS0t#I$ybJp`~z1Q|T@AuXB5X2G)AXL!29i;tCbc_)4dMo37RZz)i+I z$3z#d0?ke2rNWA)Y8Rm`fI4w^KlF9`(>!_BuCj0OQ$W7)e~$5mCxE$NdT_8Cn@+yD zsmId@_xE=STjbUJ=yP~>BMF@och;qS5b?^5HoA0C=V{-xNu4#56IkRSg?3)mzQD!% zydf$hD1!Y?%>&y9qVu<%58aOaJc}-#n_mnae#X&HwAf!;{(1VXrNz~vMV>;Jo2vJ= z_MidVoI1&zCS|j)bAB}^i;%bTOVT0-OGZY0DjU*o`6{5+U+|&LYAYR1orKrcpLIcc z1#picSAN|O7d8+s??OijqcH(>1WYLR;^I#EN?R_uO&nLx0w?{18myDd z+!sK>86XZX(+WhW*~I=Y{N?v_f_Pa@(yPafO4$V=&ikxWaIftyFO+%Zv2?BY&_~XZ z;ADK9+stkv1VLmtmIeI7-)l=ZTSd()j65`5@gjubc}lCt3rzi*|F{ZH-~21Rxh3#SynRTuqhJqAub+UZ57fscAJQg_`3rCP z>Xhla60VL4GJoVHJO_vRMZFQ8NIP-%DK7A)s9=;WGr(jHfs4`yU-Dtple(rI27SsY znLKwL5;{&8>!?S7x3AyFUd-p|Y**;vJ^F$3^B3Ds{%b$eUi#smMnrD6qeu6!OR9fx zK3}?~kVAPNU6D2}zoE19SJ|}=qN9ySi3(1qED0wcgmqjmL}0=ooF0XaxP|G4$;-Np z<32W+_Y&hX350RuKY(D!G`)22Y>ga#RD8wDK*z?YVq#k#ObNu4` zvgHGZEz}w3zZHU@4O_I_{G&PFYKSgzFCn#5+^J_+#Jk zS%j^B0-!pGpE)sL&osqi;!7W6N7C6fT`I@b2ub#>-`)QGnUSdE$N= z2ewoO5}*NQz8PqyuAxhzpdos1yp6GFlHSxr@JC>c$g|<*FOl5EBl3jw;<5qT*?>!R z5uOCU6=>s<-{xJ<^5&HP;wry(s&K()#V4vqz9m0-SowFtdYar;NCaFM1)_X~F5h{J zVkh2?hx?&8%qx>(k`o>nD$Sin>ZGs>P#vY!tQ?p{4e+XN!&ha=fuHbs146~By0rXr zPBahT)8S>KMZsdhFJ-fhQ6T@(OFK;Mh7a}}#QrXFp)zwRR_Of%GonDYw za9RKI4dqU!IWq@Nisb^E3%%P;#-MC6ZATfVW){PqbKvwlJ$pM0UTr(F0AC|CZ$Bw_ z%J;eYhMe*gJuSbf#Cn@qr2|beh)mOA^bX#|G{0jtJ4ZfV44^E#$8O8ei&rkUYd3CW z0`B3XhwZn&^xN$`Fmv;66rgRA_pRq&Y@hg!?`hAz@Jd@*y@~|Qw4Lq!cKxXr+T!)+ z+PNLdwzHK%SO(C&9oNL12CnyZfK*S~pN5oFBL{v{8KcX^ylUr2=n*(NTzXoEd+P!*tC61k}U~n8_2W;%5F<>MHg8&HxNCG4@mQ>@L-@EgC`}6t!&i>!~o+O)n z@A;o|_SyTlfBQG>-yY6BmCiWj8wrqRR$KbVS)h@1klq1r$uKcZSz~Q|bE|FPj9t5W ztG#sPdVA^e)%J&<{(O7(%g?tjeBp(5WqmuJJ^*A6R}c)TX1SElQlyT8DU> zudJ-;=p=xv+s^t(t=^=7>wcI1>XkOXgu?-jtG92r>sPNq`;B()?78;XqmKi}?IRb# z8x5Rr;KSU0`hq?a&W84Qnwxz)P5ZTls&r0C$gB1<1D+tJ(=UGNSR7C7ow9>GSx$Gp zL_W?S;v^5_-%1NXQnnX9_YEJvakjW{3K}byc918jue_n|)Y-RIcnCygQP5IO2b*}( z=mf-8iw=d$U*9&6&aoSmmCl7%;2kFodCr83GEQ3c)_PVDhEyIF>0mE>v&;dS9XgQD zJ@;H&UBA)h=4Qy@z#Tbt3LK0@Hl=GEGlbIBw;7~C#>Lu)dv-QK)ytMA%Al>EL#P~evN?+`(WC9PmIN*^18*L>~0Ocr~WM@KZ_Cc0% zC@0#M&d}~5vbNAxmV8sDweR}Vf2#e$FZ@dT^0QxRTO0fB%FUPCJD+}gTUa^O)^BSM zkpp=PjI!a zbGtqH%17Je>>22M4LH}KrNz|B`FxUR;B^k$2XR1-VI%jpUkWeulHA}DO94I8E~(|@ z8J`5+7}rk7jyZV%t&t1qqW@BUh4>?nq(7VxSGC?U@;ihRxa6=<EieCkW|I5_X)a3#5_d;=rXTX;kT}4z9rdvN_82`M%Fc7nDX-`EJit!*hU4;c z915GX>Tb@Ql%IYi?a5={&xl7oO^?6poHRZcp4XORo{r~m?I}M!&u~te!t=55*7Ib3 z<4%SotRvt%4Aa$FRJ^OjBKkVEX>_#5XJ{9YtMCsyJTdG9KwKK3(6-l0oC?QGLZuAO z-UGh|Y|0dKT``A<{7nQ8PGORXKrfPqxtK5&Vf;QPL1PJ1)=2-jYz3LK~EcViQZg zeeCrz02`YGQ7|2b1)B)&S39JcFyq7!?sS^RzD5IurFHaOoYoKDzBXQT|wJc?X>X4x3-10?K)`t_?B+- z>|zY*vz>ivpmlyGYeQm|U<8&}SgOwhcmCehmUhaO(<>@JY-6Bv9Ajg1t)n z3#P)WHlSq5khJQC6en-`4H$LqT+lEciPP-^%mv+ptAh&OIAlhxYC-7I!E-bd?`N{VF z_r0}EQT|6i`jPNuX<@$2OfLm}{hthiBM7!azO|sAY@AihKs|VI7koQ#Vwtm_Bb%Y0k*y8Mm(P@em)jWbf~Y#LpAmTk7D>3?Z#P$Oi%{yjwhK?7 z_mPuL+5?J+wUB?@PE9H^`b)qky%Ry{-ylnc_i(@l{j23ui|w9!F0?0}e5^hF^w+gF zzVXe?-NN+u;~&DO3Fzx5+kBgQW_lr$oa&s_*PoR?)6@D#_;1v6cOThChx|N%p9ngC z>TD+eT`*w3$+~L`{j5XpN7P~yFoc$lF(7iJgYJr>GAPc-h*DnzhNJRjaLG|e5j{PEh7bkEJ4}5D8dyZc66PdEk`uPVg z3=&Ts2^sA2S8qegM{GQ_j~84ukMMEyv9pCS_S&5w_R}{D zLo5b3`a@js??CbK*pWJcJLT1Z zO-2O>7{466YZ6`BP!4jFzL6$}v{QlYjsV&|?TB0O0|z=PKjx{yZu;xk1Sfa$2|XvA ziC5RgCkvRmo1F#dfB9y=Hn3i1ImiUjb{3Oif51ySN&^D!_z~!&i_9!n9%kSgdP!&P ziaIbgiGMGRXt!OsF@e7eS(8RO`nI?IGb!ueB>FyyJ@nITWhP55tVebPuyK~LY5Q%n zhO>=~{^;NnWPn&jR+htiZL|Q5S2ws!$ivDT7~~&d)X}dMERG10PCUxGDgdRuu2brwXX8{Ce0AFFJojN_+{>?A_9`<9c-E;0t zcKAh2(LERH%jMdi|jA2t*^GV z&D-b%HiI%e@9uA4bEb1V^6DqrE583b8XZ#1?iTRSRe< zCQlZOq#X))nt$@gC~+f5q4PM42>27H^hKNU=PrGNT*_K64s}fBk2YimFvtK$pD2zD zDLb@9W7>Xe6Wx{$MT_F4!Sp;6ix7;wTw7M=5MoD^6AR~vo|Avjg#J{)I=#2YToy#Oehq?J6MXKe#KQ!ZL}D`iYh4xp`5FKjTh`Yd3F) zzq7Pe^Ybf#p&z)p?szS*pzS1n%+k{7HbuK7|Hkpv90%Xt-e^1P`s0=--lVlnz&~}d zwUbeY_!m__bWFmbU-qf`xA&D>`PJtT)Xg|%+6A6x>vsR3YuFG$L#Dl#=E50lvi2fA z#K5ok?@&h(@F;tKd?*u^FTC=Jgo%85Z#fyOvz?Kz0^!zjWVrRnnP(}FvMkU3tMTw= zBv%}{F2Rdxh+`HVjk!<~TN0T=7To!Ux`|u#M0q1V3%0;dFl9TfgLo?Iz{#9}b$6Uf z+gXRUJ@%Bl^ks!lYIR+h(!pKFve?V9bK7v@T>$2y0cD`{35`Q8_UvRr59M!teXDI? z+tkGdNwbgB&|Pu8nj2ujWp8sIJ)l(T>Qo_;prE-bWUEM(rZ|-k0BVXkQRr zO8W&3;y1#6#~`O>9214Iz)zc`e>RYvyIhAqz$Yj7$f=6r)6n0zf#2d5ewbf+k>@#5C=hWPoa>+>y()XGdXmrH(3MgPxHZQmWZ`xgs z9e+q}9cXNf>>JUSav_h$?)Dbyu3e+PUl*b+bU@e-E7ZeXSp{o*M%YR)T@v0T7Z`sV>HS-k~h_{ z+?2wf{SFW4X8T@&GdvBiNuw;;76!CY%Bsl>q{&WD;t}O7In)M)Uyf6&W6(7E=e^^W z+-E*ONRep|`(5A^`5<=*L&^vy)J3&)OxZ5fkuQF6h7r*Z7grZyp+0{2!QM7JM89w) zT$sDJTe5TDIFLng?Rghlyo*BW`xQSpi@!J~h!YrygmfEZA|dh0wfJ){3MRC3$B~7b z@*p%q$|`x%R~|FfeLv^==;+0X|KMBR`~j5jQk>7>L3IZEePy`gUIL6l<&l^wU$#^p zV)VdClD`f zdc|`OSJQEg5XO%io-2^RQBZuAQ9R*-pFp~hsuV%2@*AptCuz~+D&*Qc)^*53Y<>gZ zI|ZF|Q7Y?Wkz7}O4V5v2C(%~$2)v3nUqSlHP;ic~Pv#%Ma<+l-d8O%w|74IW8WqoG z%TtV7D=V5Fh)XE4gt!-Emcptn&qDQ-R$-}EQ~Su$XSA$LN{!Lit}*b0=D=P^>0 z=|D&t4HW;_36v7x72{#KBc!;b-ihiJ_kKu&!1k1qC(4iZ`?i^bclm~g28=x0IFoje zSI(S@)A(B-`$+rvuYagL`Ph?fUlhTy1e(cos#P-8sh#Gtv!ziDSP>+QONY_P`^rXpg<}iFW$jy>yoM+BQbiYKz-_ z#LL+I%{6p)8wX=H_wvR;DdkR;?|SY@ypHuAo4|G12yW$}#tH01zigV0*r3ySR{B_W zpszwte!C{-}MK>l-(3wsq`| z;OF4AHfz=aA3E`8PM>M_p1;^GQuq7L-`mcgJ>SlqInz#`T4@*0pK0gLooyE{oNH&# zoNgB`-rLTeUE%q~b_V*LK6R!o&d;@#<%Kv2Q^>rcu?;=!=&sXwU0$4POUq}FnTZT! zUb(s2mX}uA>18@9IEfBsIr*v1TPJ13x7z(~jgzPjIh1a>s?$}7qy@f}kt!9qian|O z=%BKNoP~_WMJMaZ$%F81$l@eGrPTp)Vqmh((Meidnn%xc%8Cca2b>WY@!@?O4TJJ5 zlS}p@eB6sdP*YB|0SSr7Ss3Ps4U)eD(rF7g+hI3PNJ2knKIUI@*6 z`&`^?LY!z1Ebi48?VECzaw2zhOou@O>F%HonQMoPMG*VqLkHf3><7$9Z-(c11fTtku<3eCDzC&Ue18 z{qisUdfVLDYS&+SzFoTXP`m%$``eA%w_y|bQr0vMZQ4Y1GW0{fo#;}RYcPUx2fW44 z!Fm@!xKqW#g5P7d^X_*{Ylj2}@O*Rd#g}ik`z~ETe_Rx`PRCmrsKFH%o>})uhGmCV z{Ap{&A%hmkqaLA^w=)htcBX2pME-Q3`4dXe6b^L?-Cek1dqp%C{F$ff5=o7q-W@FG zWVrJ_iwjd;5AX?Gg1bs4e&!h^sq$HU)2xU=8E{A;mesu7Y=lS8(>DBM4MDp}wNbeDT zTFELi6FmGy7KCFvj$KGKY4`PMC!F-3n4<$-6Bb$2WXD!egiHvT9vlKzMZu;BfcM{RGHX5+a++w z7S!08_yMUib~S@O$2LBGvNVRNww-yC+5F&_iG<+gptEIxyBh*np8SYSfkK@3!N-AS zZEaSYgO~QrbY7d*>+lv_9PpDqPE=O=Qaq6jhP+8;dH8!S)N+u*`ffpoed)u!hi^2v zz%}V|u71-o9+fUU1-OeF)QPk|&>^;2Kgxe_a^V&i#EFP_aUHy{wblVxC3n&f+eNCh zt)#EMP0_L*2t#=qz!}*ETRD{$c_8YjD6k7Ql*rgjvx>Sd5zkp+^Jd|5{hGCEjRK{*=uOM(m}6V!j@+_+tL zy+9V_x4uv04jx7RX@a4p2h||{YR`6AXv-k&|O7FZmi0n(m zZl|=9)*qpdi~|+QDcc^zVWX6#t}lkfb7Z#=;*{^4&1xL96b>npSDt2oX>U6diViSt zZ8({=-L|>*b6&oQ-xovIn~qM{J4wu0-r0B5DWwoJU>G0*N6xasP_mOG9QJjzX+9*6 zf|I)WEA6ab^%vajuC}bVA6hg{9{l|f`Q?GIFkPCI@QPWuIYx490T;i+%y+n!=j3SVVipaiSp!gKvZ2gb$Jx2c_I zm*0N2PTEOBan`Qs%L^iXlX-U$n9E!C`pbQ{KILu>nU@CAtJ;xnFCBb^Hk5B4#2qNa zm&ahIUr9Oow&cr%oLmxj=|cO8f}jh__{A=G!>>lZU2Lw5BD(mvHQ+B@<+-xCNdG#! z8eq@;{DlTHa!sb$nW>C7WRgSQ7lD`|P8vA+>F#Uli3|N)gftmCN_TC5d{Z7Qk01MQ z-YP4SIgv!u#_98CF;z&fe3_u~Mh7T23s{o{PVvY~%d*5o3&U)~VLk2$9bTpWD(ABK zX(zpP9M8D?N^@~F4*!8VxPZhL4Tw}wpba(}p1Vj-{v07&+T2GTd`0{15C1~DdS#cs z1b&C@96=d$2euvbQ26eq;3S&58F!qZOo*TSRmMusdZ2u9rJkXWQ7}#$r9KrOks4%S z8oq0%oiyrw*T4d2iB)}2Sp$uUkIh#9X?Fr6EUSFcQ?JdVT^#r#d20-SIMZd3ToA21 zwDFziCqrlz!7#t+s@?9vs+ddLP#hr$C;pR9EK?i`wqp^7*iYb!Q`uJ96=EdAvVE@ z9`Qf$6A{=``+*9O^@8lcuE~U=BRDE>!Bsj|UHco9@-^uY2J_`JmevjY%f}Gcf)^On zRg;Hg@yL}p3{0p6s8ITXD%CJa4@>o%Be$M4q6X@XX6I2^g4Q?&T1= z{7DvVdKLg4x!B%f_%Shg>dp9SU)EIHr@p(pBV}?6UZ)NepOZB zv?u+glNRcdW~+9hP`69pikV5sz5E#XIKZfK>ZZ(|c};JSfTk}# z$7WcLXYBMKlH8a3Qo*|Noclmk7lrTGiag~neDM)~0tlqg1n6mJD}iZMu6jw&gXbVw z?e<_#v=y}JWy3tGxPl>s!au?I+*jyNQd^5Mqsq*=CNv!n)P^{wrp{v%z{N2*WnyNs z+VunqHoj;9O@Y3?xt^VdwV41SzqAoYWSR8w>*coyfQ)^07aA!4qGF%Hg&;mJ8zhW^ zQ+5_uq_+=OZI!!mdF^5F7a_Hq_u@a+B5^jEn)egl7? zXY35+$JS7$GLQTp`19ZS)(;?rmo!2OPIbbH zNgfJlDV@akzLcm3%p*rvF4CoNk+(qaNo!>W1iApE42hizng)T(b8q=c9jkmxk5Lfr z2v$|H9Grq3=SKiM&td=v$DxduHY%==@|^oamDGGi(2B~@-|2m8yG{hIbZCeydpLwtIAS~15EX?HLlz8i=vCgHj{uHd>WFD`%@+T{g zP{!ni7ShN@z(*KJS2`jpT-zDtk&`f){1+yNim$P{*cdOE1!u;B`^Jy*|>ALM6dXEKPB6?34_w=i>H!U2L9qM+hnG5DkoPR?h_Db8Ku z!Kk)w1;^0NBsKsC&9`D5%-G!9%4D0ys0L`srj;bEgA-cY3_Rd$&Nv`Li;_2rf$yM* z`Vm?WN8ViEFilOyjb#wU2FSNyTtwhNj%h5z$?;paZnyQd4PfoJGv`mYdmenKU3&20 z_Q;b@wNvLVv`trM?}F#%T3g$=*`9jsYuXXcMWcb+#?j68w2DTuz^BP=t6BncioMzpccz&^+cM{(>U7-2O zndSD#BM-N4|MqWd@Bj91ZQt^3-`d{$-uJX`eE&DLuY1c|+MC||miG7)PqYj7UCKby z_MQV+G$zQ?#*Ni{G65eZ((TI6we6#J<>lL8z1Pm1TA?x%Z3}0`!TpOD&b0GqSMs)# ziw7L+${X!E3e?v{8XLa3FzXvKv#~d0(mpHd_u@?0cxAF!dXgtJwe-%6&kM>|_XTrC^(J_tYIWk}w=`nDl z(FkVpo@X$iWVY@_uWTDf>H)4ihS6*%pKk`spUPR_+X4>xd2K7^T98B{KLJWN->nTTG zSua-w^QYuU1Fz5k9+aFUF6hl5AnANvTy*;7K*Ab97|pom{Hbl5{8PbZJqSDza!hseYmdOTqicU*owrhF@( zz;$47>WBE9jPP{wzPv*Y1Ls_xxWD5t2+uIwhY0=IiE}Rdy`f*J57Z5>ZuKR5RTz#5=2>_0voK7bA5!+QoC1|Tk zzr8jWX|SE{KB2Aj4e!{rz&D|p0H_Z0*X^bMmak97Ba)HhgwT4ezUmhYf^HV2%X?b(z)e*ZF|odbLJ-mre>Gg z^!!R2)8C>z2i~&7*Dm_09n&W7;UjJPhAcLQ^Y)fow?RwNyNz#bmAFd>2Yjs8V0-+Q za;>O`9#qSEN>c|^Bx>H0$mD;~OgcNMRepnXlNO;gd7VfXHgrXGvOtmN<%3AzYP($Y znTZLWRsW>?nDA9&Pl#`lHb5OX$#Y->OP@x1>_dwzvf%6|m!!LXsGZX7t>GAUw|jt`4hf6Xd<7P$Ba6Y#gFtQ z6CaM`K6X`fhP3-To1BJ;bL7y)W@EI^%FX=w3+?{LA8v1X+uPbVzxzGynYX>UJ@)8> z_-<~|deElOy>WcAaeVVB^lzTFd7AcQmSYaFnx8mmrxvE#{r8-2k3aTco1(4w;upRY zp8DCGO?*oi4ETA8d=?8i*oSx4u=n4C7jmfYWSxepl0p>e7hAWXo<7fl4JTxX%bO9@ zL!Wuqw;X6gvvW|Wr!S6f!XMM>YDpJFEVATeWwt=i17sY$d|@U#e?Vdf$nsV^WjG0| z&drhD1pud47U@6T(;k2P744NzKG|OT`q#BrzUqng7{2lO3+LPN@)`UZzvoRTZ;-On_ndg=}MkUT%xnF&0-&$k@G-z3JXeav~s~TQEJoj}ryL@GU?YoyAL*rS6H3gI1h#J(7XKS4{j<@6s-CsH5VV zejR7;<$-+GM%p)X!qLwzxiG$`ae_sk+4|c@6`y=sOq;CTKyK0pCD^A;A2ow!^lwOG zo^ARq`n&dx-BCjsbBBr@`YA4K)|S~<-FGJhY`%6Rwinng@N!YDuq@L>w`Att9bUFl zR^hgv{*g9uYkPgLi#vH1o&w7;B5l3(uFp!$rjO+Ac3J#DLU%87&^ES^5dKiEJBRHf zLw-)h`fqRTr3}eEO@0>;L>KwDJZTC6xv}-MEy4GnTi~&iR`khuoJGLaH+IZ?wn;Tn z;7%RW=&Lqd-Y+aI;pgsW48l)pWMLZoO8*JG)O#nf9prTPw@fZ8$Jm`o+j|S}M_KSf z@o@1JwpDz~7bleXl*v|#tYDO?PL+V59&wyp0FYAxI-t!S0rarlIe9-EQ8dpa9s%%K zoimYP{7}C7>TTdB09O?TxOOD&CXdp8;Zk}!x#wbPDA5jG2!Jm!xwzCWp1-I4{7?N_ zJA2UyI`=}NO`*Pf=++)~O}jjYP1{9xH#XOTzvQqVV4uY?NOeG6(HF1^`AnS{hOW>W zgp_CB4vd{r9suGZzWIgvoP~0t{F^3ir}d01#K54J`ZL6F?buaXTh2)r8c81LFDuB$ zf9T~Y{kcrONTlMC&&wtNCvCPtYKaQ@O(aClrn7#@3j?&x6`v%~);!!KJGcz_Yhp|c zsB7%QV6TR}W|x>$J6Nfk6(^o9!ch0>B^t*$Y%^wPSpAHQpX*szoXf=Q%9+#oY{=X^ z*QZz7%)(@Q+qZp7o4a&>JGgbL`S~9E+4_)s+vqFyBz6$r&vw9aOD@IB_E08=n*4!; zD60(0dj6;|zmZzo5RL+1n^At7HZbk6wBvA5dH5%dfHi4r0dHN*U^`Vd8$3cIV1_Qh zOdG`$?nj@1p=^V3!N}+Q}MaSTYxZST^i(JuLd#*{A%0i%yOWx>b6R(O|1 zYWnTiblX(@0~k!1+H@eH2id+_ULn7>fBB4O6vpSsjWP$4MQd=8iHV5lW?Lfa@t0t$Q#)u&T=$y z_7jxJDics7&4ePnhudl9B z6Kg}s&{APSB8P~_}#!un=l;q2h#@xUe29d;%7S(8daorGkKGOID)cZ zT4@;?aqa+oK!d+yj$QVu4yG?(EpV)hv=%n-)G2wsGt6&f3{hDkul0#s0Y=kr-IaZJ z2|Prv-3exI+r_vWZ66=Z&yc2FfDe9p#`Na5ofVUkeM{vCKdP_qn8FNn>wd1dYImGi zHjkf3^sWAN{L@Qu~4b;=cqC{^QN7ZQ6b)b;!aNV1`!C5n6{6GHP8s zE8SG^_&N)|2AYYWKVEx9UA)&{0ow3b-c#vUaB7=Hqf7wZX`(C@-HO}Iki{mLfjzJA} zz|Zkmwsmqm4P*+3#8B_Yz^Y|<}g4NI9Lq7B5-U)Kf zW=%dHoD43?NKk>v*LGrf-L^4Ghf#OyFFRvSj!#Ws7^g6n(|b5YI%U3*fm{%$%(+FF zlJ!gUFh8FrUPTJJD6std^(eKzJ0c}xzkRc zyQhs$Ewo?$@UOK?7hl)@tN-HfwbSQcfe~<<`!j80XD{Et);YDav56svh{4xJqu#6) z7j=jR0Y}w|ItO*~mOJSBybtuM^ub$4w;m2& z2xn_|Cxa>*t83X7)bG|WF3h(3E)Y|I$t$c6qnQ!{e z#)G!EwMm0p1JINfx!T1+c7QA8(}7U-a}7P>RHMiFyny=75qd!%zr|vsFYZM{QFj0d zeiC2Kf}I5Ij1^1exF;SOm%Jawz$^E+sMGewe!F`8TDx(5wSDprKiz)y!@oftHrnRS zA-tqx4qWN63r_dl`(Rr+x7_Au+y-o_J$&!I?ce+UA87CT`uDW?bBie}N_oe*hc2Ua zmfzQ|U2C8GAS5qdC5!Z zpiW83C;!CwZ2OIm{&xH9XFs2Txh>?<0XJ%?LQ?IaqHb4>j4Q8^GmM_wwTioM=uFQ{ zwDq;k=C;90$p8NSW_B6*(ZBh(+h;!Y>G1Q+>C??^pQCHkONZRS5eMcSj5OZ3RpfGO z3m&S&4&Fi2-3{M3QpTra3v_bLV@KJogyolSaxKm;wr!kRw_UZ&X%`JZnchz8oMxab zJhOwM@rH6#R~nV=ul7c{vdq{ejW!`$rQ$#>VQ>D<-}yW3|Nfu-&35*l``bI-_V)I= z*FN2*DZhB^#QmT@>998(C~-K1R&&@f?6PnD(f)-eIK9~d6?yR9t=!x?PkSK_(%Xen zeoA3sZm&V&_V0hsHxqxe?XJB*4c9WLmcbU~OM{^!I7yu%!paG8q*V8{F~!*~Xv&8t z?h`1}MFjF7_f>y~Zz4;!r#``v@1cvz9ijUR$m`ufeH(XCOoHiTLbY4L6TFna>ev^4 z$MP>txJUoWm-u&u0gtZ;9k2d;IE&oL(97@XdzrSgw$FEjyYM+ln7eTu`4dC@==ojg zMtJq~9cIT1SxVI1aE37Q+lKtTenSW2D??A;?VgvOfs5l(Vxk?uqq9+;a2w>sb}&iD zdU^#k=VP0c9!b~r&*^}$ughDjRpZq$&`YWIu0J<;U*9tGMjL7eheKb@&WDW&A(KzG zr^3fi*CwhvLPFb0ZV%l$oIKQ968YHzL#hTCVXJ#~n(|#;+U~Ic`!qx;Nc0`|v60>#oGLjMMh)UuJ0@M-YJG0eMQqsRId>PDYE!i3Zqevs z3i}_*ytMaC4&fY*@!V@|RQxOHU+o4zq~~6KlP_(I{=D^a<}12bcFuuJLrwBm@N+Do zU*eQmuBm^~^qAJQz@qN;`|uu;);h{ZOHuZ+z!rbVfgZP#l)En6a`1g}Vs@ajK8kN> zAs>|GR>MeVWYGFM@nAX!8~k?X+O6B|`t_SxIC%3W=T&^h_0=pY4V}@mN&c=}c{%O+ z`sQ_aB5tQopQ8O*Y%g7Yxoxd(X7P&rvMTxnhLTNi4zJKHWdJ%kSmw2DoOMV&LsR0Z z`y~9ZZTIbDCo3aUgFML(2dYu);Nz5_{*-mHFSE0^9bMF?*xk~9a0_9oKRJ#+x<(%D z0(^!l4!Y~JOfAi~`T5GD5460zoC#FVlP*~7vSR(P3x3BmJ{$MuQKnsVX&NVFTyWxc z|JGlg)i&wKDLzNHH-2g-im2RCEi4dusi%XL$_cR256k4KlL*w5=OsJJ&43RiH2!!1 zs$^;$-895j5sEk32@1-I4b~Qj%V4W4-{7;+ZdEC3o34=lNqwFqymc@#aLMlx zdv6}(O0SExXPXb!lo04ERd2-m0kubN%Zr^kQ*&p`BVj9oct5mGyCO*xi-)@$cwL z;twpeNqpnw)92Iw8lRqN8|&`gf(_z2J6I63>5I!tS(NS|{j7_{w|3g8#Z%31$!42U zx3FJW%*5w}JLn)+(!?F*KrM^y+(BSsd>1@7+BD@&lFrWnWXG|=rq%+OA;cydpsl1) zCP^6|H=%f`d?wh=E93FUx_)t>566@9h7Omc9n{I7?j#K9Qm#QhoPaaBuJ*$6b3#_^ z<7%^Am~8egkC9WdmEQV^7HD9968q>D|LZSaZ6Ew!eyaV#PycV&o$>bI#q-5)jDvaP z^B6FkcvH4ql${CwfmYGe-u8ty24ptvYsr~g&>oS_Pg*!;ki}fc*b(;b^!W?z^w~3R z0YSVz3h-WVOK6ad{5gMUvK~35C3QzyXT&E z`0{gz7rvzS*a_D)$DH=)j}AN|U*QT(%N*o+#8>(ZbUqGco2=-#k1)Q+5R7Vl%3*5$(h%}Geq7L5$LUA0|f2cG0m7P6&UKe9Jo?u<_X;)-idjrLQPz)1>V@7$i7acvKJ#Z2> zpH)=fKv%gmSdKf+B1lCGY^k5gbmE5^maM_YeRt;B+-b8j6Kxin>?)7&baEEoSn`0o zZGm<;J}@}wlBo&1e)W*4{$FpzMM2GJq0QCpd*|KPxi8L(8QF=h^XikdLd*!uF_?1j?|!>F+^B z`Q%*2BJFd01^8$=%J?q4cLAhhU+eT4KL7d8xAkpzdjcBr&Lg!d^w}LImQyFimj0M> zn$HMOzVe4y6dgF?L#OTpY^5(0DX&=ofRCXsi9X%tz?U4FY z^c!d{J>o0b#twWL$Wnd{6?t8~O zTHDxZTQ{z^v8}bXy}#L}>2E6&nJiYu#6h}QHdX6#7}*UHv@Fr1uCw*Lznz)w6&b@^}10XiqtVlZ7e^QKB& z8|>%0PsoAz)P2vd{SuGjQSC%n?WV@Q+ED9m+pPYOK6>PtzRbQaj#Pi5^0}xMKKTMG zaV9zeIHqt6h-gfp7@J@`duHGoEX29w?AnhPa3rlm3sx%2GkCDh>vm7 zSd_t6h-onG+^cl_mC>}|Vq(D*Ij*DR!neW-i_Z|Qh!OFru9fkZ5Mop%5^^Gy&pGU9 z=Fvc?R1~6Qr4!Hd_V$opVuh$Ya3WhLzz)24)WqPB)<2yxh*ZWOP%$*-A)N}~wF9&w zF@&#nJ&Kdh0d(RJlbnY#lGZX!+T+Emc#)?8$4}A%)XO}nFrpJ-go8iC=ZOX)K+G%E zs{>Z>sgJ3Xfgi1tt#B013P@}42~Fg467pO+9OIaD!TC7f=or-<@VTbboJlIuVPRrC zqpX^E&Y+iXrIOZ8s$0*N!4_JVEsZXB#tEhuWsM_tmRB$hK9(MJ;ZMca8O$-zHbC`e zgg8{a;sDV=9olLlFFK086@BaGwS3R|@yDNPOH1e5FaP4NwuPm+wzIX@*61|2-Pj_I z*&fD2J#%{(zc-ujpT%lbJ)~(3T|(oe^`*A%;MUGIc^Lqc84iv=99M;>{!z3W|H-@f;IzpuULUGHhD>#Oba&pz8;x&}?HKgSl;eg4|5 zcKgPywu9WBJ#(s^UYT#F=BKiiWd`VhYX`|U*=*pVL&RG@0c8aws=z5Kp}kb24v*xE z_evno2m27>h5=p^HP|9U=*)lKveb6K*FI3G6SP35&9~Ph&nU1U!@FdbZv|HwKH0#B zwZ?m4Qgy{51OR>&ue{`G$<2@^D-cuZZw<@7hik{ znWVi1$KVVN9MF+gZs)mtYN0K_N4F7n`_6UWOr)?+{lOo$zyEjtyY^%M!++oY#~=R( z?L+_im)alx!6)1A{?6~TkNx(?+V6erciVGc`dmAI?sR+A6R&Ld-g~aCV5j`XsBiJQ z1zxtz#$ITX@-`W8)tR|09#ChX2Y)?CVtpFhVA&#lt0PgJKfj|YomZA;+ogN%ZD0Jt zm)jqI`j5ct58M0R|IO6pR2DGkENSl?xR%$FU%MoIe5=gCXIDg*BWkB!K1;BX6zJoSa&W^L9X+C$93{_NMAwzD)`OhjIOd$h9__ zIB&$o%ig9A{3kQ%JbY>#!JTY^lmIIzf_H@E@4@S@JTkFG`x*H)M40Q+^Klv0gY!_F z^sEHA_!ERd@$WvQ&!`S3&K-|_B)!i^5p@>tiJf8@un|Su~$-}slhkIbzTKZeH4f@wKf(ozwKXkzI5CSo z@Vl1iwtWFE#M!px$WAf-(jK~_OAZIUT$7YnjwmmglXML4hk z`(%0t3f(a!Z=Yh{+*!&U7oMw1C0(kid$B=r*7~-WVD-uJ=I=kt9t+7h4tufW2GI2zEOtAzA~X%QRa9y`mreS!jo0n_pF**}p67H3-3Vu{K3#-UvM&%s*mDL&?09WEb9CDkO5c={A zJeB>t$%6c364^BI-IYD{t+ko?8QLoKSuyj?THgwtX=l!!ZTCO&PDE| zy$R{gMNYujcW{B;3_hISZl7xxE?#UG-3{UX2in6AJ=mUj;uYv8N&8LkWo+%f36A$|-i0b!3M&%9} z&#od>l+f!hwoMy~JW2CpBuc-^iGb*%X-r*@UC0U6>5B+Qo&_I(PtiX3CY5Mq!L8y8 z&UqV`ycuILpE!{xZ<7nbI#Qp!ImI;v0**X96Xg!4%aHcRxYVszDzXDflmiE|sF1p? zy)lpX(pp<@eSOwpm9Ohg1T~QC;TVAam^?wzxR*cb3Otcd(As{hxMq?M`=OlrhWZTh zr7ZaNq;It=2fj6>zV(TFkPP8!;7 zsAJMeJQBb={YK=;bJ^y)mhPrfCOw2Rh5m$|IL8ju+8=g6-ib=z%HJcuZMHiD=IwCk zDeMXAF*COq6Xd;pG`FOV4%^nyj=B3o#%Vb6xd$X?c5a?3A}i7*zIwGA@u#)HAUH)| z-u|?5sLuQ8g!()HaN#dp250I4-qt+h5UK*#c}oC%8a_otBZsvnZ#Tl;QEp|ROj%5t z*dfkJbv+!?9$yZ@3@3xIM8NZKh*0NAo&#Wj5s~@|n1+f_v$S#YP91YRCH>vVi|rP& z%e6G{&Av>cJGSYV2laOcnAg4b_3h{X_0P61e|fjvbABG(#2zABRxu{jf&vm9h%O7; z$yFD%(C>#R+JtJ$OpVI{!FqcwTdX)_3Ow{NaIaJQ;?NKxN?;quQpR{F@lAg%Ov-jY}?@%6PnHD+D7>@ESozX#e ztPy#?zifTf19$GQZF5_H)n;aHvMnr6wkO~H4Jn((wYe3$Z}(|uwLMbQa?@slTlw}p>KEwX6Svz%atT59W$LeLQn?cf56N5M?}XzRPF}-4lyD#!jk&nBwS?3 zwB@^iqooYj<}$D8JklHmo^`MtQ$nnlH88aT$r%Kc6ZtFLe8wOXdqbPS+42jep{=y8 zk|JC5=2Am$4wrGt!hp8tKH(5bxoGj)S&+npkX+^(Sel3!bU9buAK; zj;Gd=xZ5w6eTk1UbZO-%yPS}Yf^xXc1ZC3Qdc+M_(&O|%@ z_1p~p8giC)8X3|rvL3ei-G_ANknWr8?z^S<2L7I(fhCT1ae8I}J25`;#k2D(`K{^p z>Sh*?Zf>u0dzjru+w!1L%ldSZI(B+X4PA0%eR!1=Q* z+xH5CJV`qy%%43zdQq z`h%e#Lk8F(4z1V+(u8uY4+YCA~683_j^&ZZ-M<{;J6)V=6m zFg%hnFud|U{xN66$#dER@|1s1VaQqPZ+`IU^vI{Alg%G!WuIK0al{@%bM;1k`Qnn3 z!0x*1;jU)#!M@oc`jy4#j-A6tVah*w61?S|)W+V{H}y@!edf-H<}Cm9MJDmV(jTX+ z&GpSTxiAA?H|P^S+P?9J{_A$Md!y}cuC})04g?#4uSBF?76D@Y&GF2q5of>2I?ESv za!lHD(ZjPLgg8FuXy1j1DSC(%Yz5C z01uu!pLT>D#m;zbn`v3ub;bjpxv)#w$xe&(>$Eww=vI9kbPhC-uBun)cT~m;?zC9-1FUm=dEU9W;A!0t`C1Fs1Ir+bnfI!e7DhI!V0d(4n%f7@)-a z=tO<{^)U>NA>GMvoTcb<9Exei^&`PI4o)O1h6|_oa(5Wz>*tf14Ff%W92ibaBrRv- zVSn-LIA4`>e4pe4-;arLS7kLek&oeCs9ulP3M?+j0t{uzvME>tY5BNl!3%NVnFEJq zxP@h$vGu1>HNvt7n&ndX$M|@c`&`sr(i;UX9x*VWtQ;tZK92)I8V4WjyzAi2PS1;a zd+7d$+w$UKd&_tK*>-Avr9J!UFM>PrOO}P1`F4AAt2Oqf) zzrn{v2cbFD)JW;TW)Plu2m9RiU9i$2i)Dbnqk%(zoh;i;qlpd!PjOOSwsA_FNZi>a zFHX(M*|TkaelakvT)o!*_%okxzx(?iYtO&%Ebwt;Z{5n|o)f=MzxzEDhU0Pj230!9 zAdW`Yv)eg3m@OmJTR-dJ^+*q-CI22$QTvd$3jFhtKR%313xmcud28a|hEg1B06<4f zx~Ed1-}dHq+uGi4YisN6#;qIexi7!i{@|0JY9IRbUvInHzys?1tex<&cKY<`cJbo* z_V_EGXiq-;iuUc_{4MPt{KJ3P-u|9RU85$y|f; zX#{BNG64dwmzPhqZ~K;SYTx%~zpp*|@Pq9IXgg2*EI!$$gZIdty9UfJEwpo|mfNL^ z7qS4L2EWut1VH*A|FiS+Y1G`h*|)(czS>am%)|mtk9Z%Yu^n_9`bjhOCqm~TyvRk} z%Lzj!Cc^`1&Mz{eEQ^od*_@kQKnIm+b7}>J~}zTl*e{zGtnPgV;T?WSxE>* z>~w(h&ks2G+mlo|77Z+}b!3fEu9JiMm45h5+u7*M)$7-rpSnl`Pd@c3Z|ZL|-)jHMPySEs*=N6y33cDBT3cIdU;5G)+GjuW>Gr8l z{ZaemCqCYO=XZXu{mjq&i}p|d**`%(4%*83m25xfn;;I}s0((0wRhzg(Cu+>8o7$G zp)S%y0>^Q-!(oqs{{wNAhSX_a88aoHX~&KJ<(2>dP+zZoEB=9o^!& zb#n!N21BJOY2~AX`P1N1aB#xh`9U#J7AViIe|2I?E~cj@N{+$Loey--a3OJ+;LnzW zm%Huk8F!nQ#irHUR66&n!t|5_s+-hV8=$OVORTnMppQBqAQvk5a1_X+F*HOD>r%O_ zvvOL}7wfCcenlA4-VsvL=xSJ&n=pPltKml&!Rh(# zKKF2U{^4GF+>z89j}z%bvpW)<2zSR1`MxSW4JUbIxHG?e&YAqZ)EwYY*Zo|_T|^!Q zi!X7wGrpWsA6Sy@Haaur;3u8v$t8qp4!zMP$UpTR={T_c1t2n?5iKOf?~p~VfuT#N zy|J?^bY)U8C_6@e?M5{&9MnsgmJS*j3<;uSKMs>N_#Cztk(qo*}!(_OgK?VE!=4i09q17*0Hy#^c0F2|07uOe8JY87n% zLo@w8Cx7)lILlKP1vm(y&+0^(Z=LF^J@?%6**@|7xr^=Gg^T%QkDqUFGR_5B8O#y? ziE1~zmoVwP+m28_+kERENuf$gmvu}ZK+)2t4Lz-|LoNPFSLM5C(CON5BVH0yML4m@uRlK4&wo+IRxG=78JoqTqDD}=PxxfYn!1gTo2 zKP~;%*4Nw38#m+szVz}-?fR9g?b_wbd7J;`m#-3krLCF<`fjXmwOwS|Zxo+BceXwC z>esag?t7q}JA0ua?ePo!gwHIxp`6J>`%ERr>S0y7&d`%Q>08?mJCrA9Wtd7Ue0Mr5A0kP`hkbCuisp{E&NJlZE%?-$GW9h*3YU~Klvl_Zx=vL2eWJ~%im!9Wn*ysGg6CRbq@8uL*u`7m32vR% z(RB{5ZAS{q0GFHcCksdYsq8`~9@y$EB%x33=N;@rI*~3M%W(0zaNRmT{;>Qff10|K z?f8|0s??cy7wfB=2IbwYS4G1G1}^Zfu`ltg`gxrR{Ha<9V13F?)&!5p7afXz_6K}! z2n~skKTI0oWKjYAe|MlN|D8IKZ`x0Afu|GCk~q6skvqE#I>2r}A6sdE_()wvf2Yv_ z7dnK#V=jcP!D4s7a3QgNvG8rL{FImYPwPutfBHx9kD;FnF|yktw9SNL>f$b0_`psY zam+|J>Z+_}aMW=Z>n018EBk-Qpb|`GaOWDDANx!FN4e$_lAT;2prjhCOCm{R_A{7du8^-d4QF}({vjn{9n{t!<;vzR+R2@tW5?)$V`8>r=)N{!S)F zN$W0B)*)k(;8zHWbE=ZVprv&JUqROfho+98Nk z>!Um?I%#?$nE_6@(Aki-$awP;kK}?g$AE`zhl^xYLFGiIFL|CTo=yL zv;4T8!)K;9Zjb|?$H!3SxDP%(h`aV9IRmrO*1gp0aieTL*JunWl!*>AGUp7KM_C%+dZzCoSov3V!6(q}5;DPuuDNqY76=V>)E;WB#fD;m zL0jmHL8RCI`h9fAnKFJ#7TYU7E2EzyzZ}<<-;UjFZEWN7t+yH40^5l_($8ePbh>RK zpS!}{_Y*iICvLbQg_OS_0fUyMP2o|+?`UpZW`O@UYF#Foy}d< zb-=rJ)5;ZboP?J?HPir?UXT7P6S8?`-aJZ(OeR!5+h2KL|6Je2YJoO90eM6@15KyE|NXqsjPJ$OnWOX8A~JxTb;H2$nG)f5wzBKFuirREXQ`!XZDS0 zp#y{e_$HAh;J9OmyVX=)9#Xb8zRm>?3`k2EATMrtu}N41ywDC}cpaEtNh|-Xyj=2wxhI(G-0L-01 z7+DB&BJ})s=Bsn@r$X^QSEI8>;Zr>7xEU{PtcCK}p$ufpy*4ZnwArcVkjO<3a=7e7 z#Z~C=eP#;zRO2H05Y2is+vc#?_a)xI}PW#*V+%xg|qn&GM;V!pJ;3-_kNa5 z|7`CF{{lH7PFq^LM}vkcP^b)W`cbyaYilZxtZ;5m1Z$8;=umv-jMx-U@2L1)!Stic z7bpJGpLy>GaPBUt3>pcAu^$48a2l_&*p?1&&XkgyPJs|&A(B7iVO#_;urLqe=Y9u#(;9Rtt+Rb4I9 z2(C>V4i9OFOGDjp9Lf^syUSN0EJdmbAoZX#(nIb$4nrAc?ff)!xOJG^a)+&12r6vw z<|=e6A|<|uj*jt0)72gFD80ZBA9Yx{r!h$TF)=u?rYC7Hzl^jvc^t|PpLbHZylRl2f>7S3mKg978mymboyMGf3&gDUh{Qd*G|z% z|CxXGuiDZ%XbcG*4$JaQm~?KyM?LgyVU2`u z5U5v28Yc2&%PQy*5us6YKu(&*fbm=v6NVi=cL3PgrgKA{nd#Y@tlz2ce7b#IZEr&3 zPN%gGx@d5TZ9Ds0be#5pS&h`rP7UCP_u#pUF%_1R|Gt@U`}S(KJayHfTN3#Rgin3? zQ|&i@=XcxZKKpEY?!_;)YqxH;=U;r8^oQ;J@B5~K5qr@W`I-paI>8Ty` z3}3i+0fL>z=bwAAedvR~-ahneztJ|flmm=Ed1=cNPF*+I^{ZFm<3W4oZBMt0_gtcLv7NH&%^4gz;2d&xSBC2NPKJJ>sf@Wb z(eAEvF{0i!tQ*h6rDyT>hj_Bd!o+E^l*Q25=~&tdJ3$Wm%_7$>0+^>0R0E-6Mp;oi z^`-Kat*-mxqXn3_+Tq%x=; zQoDBR=4!ik;}(v93z{5UMD~E8gB#q);{cv%`vL$D<;}NnGT?`N=bLfJ;o8P(`}S}9 zj!a6sGe9|#W08G3m(s^K|6MTi;?)=1*4jq5@?`@nzn7uqlV(udmG>aE~^9J=78 zvUN#tBGG}%8`o~M4}S1pw}0_3f405hb#G{Iee)Z!KeWv=Q*8rW9E6^m=02T9I<5g{ zdm1oRKWJ)I)1lDGhw1s_StbZc>?aB2i5SeyPPa>!&bL#GXWKvjnV)Vu(DU@Um3Hym zg$yi92WgcFLNe%U*im(>V*M5Er(41~Ko}_c zcC;PTFY-;0_$fT~jmivCT3-F1ILN2{RNkGOQf}=q1vdGCpViH#jvWN2ad4wN3Z*VQ z@f^5zEGbrhhqlQ9vC`w3^of3JhdB$*fhFHy$pArWL6vz#OyTUC1kj*l4?PhlP;y1P ziwIoortO@Zop00g%WZ05sg2Jq0n^Wwl${>m+ir*J8*R+DTU;1O2d!4lf_|?HZae3+vK0N7mX53KOm9;>%R{e2(A3?2R0jC9X_TXllO9eIgf3(%Kc@yLYr%m$u%7PlR1?(w zja(<=q=Wp>&$I1||0DzOr_-Ln8u1sNyl|y@R;+K^t~+^G157T2Qr6b+Rb9pIcjsSt z;iW7xb1~Wd_dnPkdgQSzpqhnGZWHOY(+*lprmd(?3i%D?EasxFwjVWMR0{(Rvy*~t zB1qw9#E!?t5e==~)g=grn$naI{EhQ6(rZKo`7`)-{a$*Rp!|Jd8wb3^J836flmTfI zu4ND)0!w^gB`vyWpG17It-D(;2Hk|lJ`)W2_UQcM_bSVoG~pasut;bseK+VMY?Dtv zWM5yFe71KE+!d2+z)v2?+xf+%w7E`*o<4WBonAhLyp%l8(w8vk6F6Wb|1CFVTc`*S z>>n4J$SKuvJX4tJLFC7}*`FXqy@@4#i%t%NOB3}ti#&xAdZS-s({1_mO1pI5gYCfw zA8wEOnW%s?hP`_4;#IWpD_#}O%2fFjPbPaivuJA~CDZMH=2_Ep?$>1z3 z;(5K5Zbj8aer?2|l<+OmJBcY=&2Q;yVoA@LXPG2`zmc^73AAL6y&-R&aj4p%t1<^1 zam_9P?u3G@ZQQ=y*6{^hgzO}*{7??E^MN?T?@>$eDWS8^T#%Eo8jV%zQF1R%h07F2 z)xB&_*_}$4n&iHp#ZSM7(jxcn43##)ek*j7p60DB|FwlD%G?9Dp^W;Jqw&6x58Y=< z_w8q8;R!`%u~7KJJv2``+7#=febdJGH`$z+v0akxo_Fad8oJ-9&DPG&AV<@4PM}Y; z`Ft9W>*>iX&exZo#di65JQ?KTiyHhLNABDOL!VO}c2S0ldYqVn570%KEqrP#SKq|$ zlL*qpdgpC?%5jH_X`)i+EeUP5>E~KOW08XF7@|`42A4%RR-T-$Y!Qo*obZ|mdu{2NF z6dS(6r*?28-p8aZbK_ZThCpPO|2zkVw_FTIg|wS0-VnSxzEw}nG@M1W`=C{5#rHF1 z-35LKl}0P(m3Y+HWc3;JVe-_rhpSK&y6jtbs;k_~A9sJTU7I+y*rv&Hv~#n4@?)QF z8}uC>xO6YJ6)GYRwgt+8Hp^{d%kL6~IxzIvg^T`b&$Y+Kg}fl=0u;wI@`k6uc!F=Q z+t>8d64KUfnrG(G`Lth@EpXCGzpl^HL{62T$G`lbj}x2->CZ?9Z2|XYiLO(bjISC$ zzzIxw?0qs9yrN~57hCL^zl;Y{NBtMG2-nBzyKnDi2Po(oK*6Wr#BLC+E$Q|}x@4CZ zna7d7CN{|S!U`K*_-Y@~Khtn_49a5mp__{t^C=$e>E5Qc;;^lpU1@K9&pX=kLl0)* z-`EbaO6GyiaYWX@GH~3MI`|K-4~{i=3pAN{ znVxjQ;bG}@@zhL-S1-7&ev@^VpTtwX(8AP#e~DMP3Bvd2!DlJU_=>2wU_%HV-u660 zJhe+1oNMfe_6xnC^6Z~nXM=7N!oTm=PiEXvRFX61xMQo<<+sC zqv&ux83TZ-k9cd-NkcjXbPsvj!>7~#vL1d9U7Tz$)J1jLwq#??9Zg-ZHUmtO;J@tA zmTYdVhnFk(P8ttiM3@AIdhO?bT>R&!1;k?yxm;g&Om!~{i%y+#A?8BTO0-VvePlKL zpgsK|>McQIGbL~7lDd-xc)``aLiH21aS5JHkBo*N-f%=F2)5zHx5VarGW2JXHKZ-! zN&D{vz4Xl38u~2+CC+%^T6A`GwCx(`u3XeupMf*=s6H;Y);p>*+=s!YQZ~vD$U-*z zz_2^x?dlWB9QQPx_SdqiF5+MsK`BSX*+=ou{+emVir-aD&jW6jqi&d2MNd7GOsmZjckqBxr`QJ4Xb+@|!7{Cf^dM2?9Vn-~`Ya2z1G5f8WdG1{YjH6? z(K)B>v+dQ6P>(4ZZ_CuKMR-V5i{ybDVInvdh!D8AFZz-GH2pSTHrVkK#e4Cy(*6NI zbBUz0p3=Db?C2qJ_SeH9$`qE%#H_zBHFMwnZ2-ZJ4(q{ZgzH%*1^O&>& zYtkj~%tXTX@Gj|mp3HZg>zQY95l-D3f=XZ*2D{`HH0gLoCW#lj3I(Sh<7s~YGj2af z$i&k(H;>>fGJG{V*YO*|Qx2}fgKjqx?{jaA^BBf;egeQaj-+k%GG-m{ zcb9$;*ZG{02cqHv1N%vG-Y$MZ)mKah$6m*9h8(a}RShkQ!A|CZ90; zrm^`858;r_oWr@&Sy(_PgrFWi3r52;qrJ}kkeJZRs;3@iud~Qj+A8O(mpfehOu9%@ z|2YrIN1?+#nYV)Zc~*oSLdD59(^dUTLd}-EDbM(V>!e(-vq7UzY>q0cOZ71x$2b(+ z3dVbnp&kWcC}*C*Nj_6PSDeKYE_ttGO5R+PJd^zxt8VNN6llZ4z~`qxSA5-_h%dZz z81LWk%qlAv(p#D2LEH{E{Ab(cn_F#V zX+8rEQ{z+3?|l09xZhBpn5L0*fLaI4MmmkD1HxSMS1a`$=x{K+2EyzJIf%HAQSrRH zyMckyv2oDHH)4Dvxds!Zi{JiK_G{}d?w#->uMUxu$Lieff!`?E$lLns8#Hp((@rR7 zS1dVkugSE-xw+*xDDA*XQtA)?kLXO8HjcK&6WT1zEw)pqPiLFP)pfsZ?t~egJ>=(e zU;I+GOy8nGefIe;gyxFf6R&(_doPXLQQM-Sa}ut;@o8P+jH9_4;|k?m$FLJTMSf)w zXX7*Z%)i)YqCI5RWe=HZcBnm+QxqwUi9d)p8Gr5|kn$=~|V!e4{_%HFP>HytV)hw9h{ zlgQ$5#F0`SkdX{})qOg(+Gy~UE`D>yH_$WbVP}SBCU2hMeELnp_kY{Bx2M1E4ej$^ z_&hqX*{0#w6fi#XxzD!8AAY30=G9NaODF$J7HqT}Ow%p&EkG&Zn9AjtjT4f2AMM{lS6i<(j1BwnL#u4VvEemgL-fG|a{%?i{zRkKF z-H$BDd+gsdox_)2c)mURr7vN3ZnvNK@tTK^|MjQ;Y5SF5`;GR#_r9ww&o4*U-3D}% zPN+B90x^x;S}`jz1nVG1=*l;Z`C|CYZ!LM{c z#<4BCW9{;lYwdyi9%=K7v+YpbB&l!x@9k~0*%_`UfkS<=Fikvw;ed-}bO+eKoOz~< z)KUC2#4aWDi%|rZgNzv`{yn5F!!b%P+!JBce%=kY&Nbu z^yEu+FYh?`^`d@GoWE8W;C1}q&DYMuGuw011VIkZMdGiKZGitZL(eqSe?*TS{)G8Q zyz4={?nJ0$r^-<+I^uS;^M=6KG-Q}-hZD0ra?lEx*x_s?NW+-z?A>w|T7g40<`>>L5+gBsLa2@|h*-Ck%t%bang8z;OTqzhQ0}|6zHh zO*qMcy~q}7bh4a0-SvBzTWf91c2Aq+E&v)TX)S!+rWcA7W|sSR@cz4V6q?12YuuT8_@n3M395B-5BO*3HM7b2K3MC1W9(YMpD zi;w3(qzfc$t9WMlMH3HT1GoG?2OTW0>aQPWhqKD9^gWcE)xC92+X&wBpp-tT(XyV@gebn?)_Lnrq#nGQXrn*%sv z?E(tXjIku15eg_`17yzWpmnj*un3A`{@ZUEW;%|X~XX{gMFyo|h zcCcAl&DQAAi}AK{=5%}H;a9ZBpLo2TI(N1$EG@>jz}lovevZgpA;jH2ciz%ZbBR2H zf$6#)jnT%c6TZ#n;G=ZaNATO`E+%oH((idrj=3Wm_dIu?=HQTi!oY7Nh;j#BLRV>9 zL3(Q9z?(eMAu=+=h3EW*9_R)fp?b=RuzIin)0Cd(T;$5NbkwdWH?e;uB$coJE0|^o zxW;r}}R<@M`V+RdBvn>IJw>aE*t3x7=c@_X;uk&D8Gm4(ca zxnq0M?aN6U<8>xcLX&}RyZU-*B;8Qhjhysp4oq`e5Nn`krvX@g)N@Djg$6; zLtXD3_-3PaNIAn>a}bR>Xt%PUG$h4olv&V4dnI4(uk^uXzmw`)mfFbBGNg2ZyB4hoyiX)MC7{Ki14vN8?Iy5Bip1Ld?KQ9o2Fl&6oG&}qDZ!(U9AAp5`2b9BKP0C?sXYf0aXYH(?=IM)tYG(|}r2Ooswvr~PH%>!lba*kg^xq*}Rvik2CzoDX$y;QyKLnE$_ zX`MuM={ZN;*16yKsCbYATcq7v~q+{M>XqfAMU4_jiAD8zW9R z-QV46<1VIz&mvhl)Qw%zeiZ8L~3s zQP(b>mfG%$p=czZGC$Ng^+q?$U+=par$1^x3%sC#?MYrXQ2sJbTK7~6I?6L>Z1L!1 z-r9xs$38oJM-KO~y$IaUHo2&*Y^Sz9lYm^iQ>3LyxiWLg8q-2>>w=b4(>wUWDHcSbAkKEy_E}!0{#;w)Q9dEzS)DFC z&FK+aV>$j&uCmc7ZT9=`HL003_s6WVTd<|`eyrsyZSEcVqZ*YWiHDVq8NgT586;x@>)aom^QTNbz} zf8=u!Tx5^&kK-#yBUef9(V9IY?e zNgv4cwLP0SzY(Dam5g?yP`!wEhzEi2cSg~o5TF0Ri(P7ar`XD zyymGgV(g6nTs>$#oKOCQ&9Iy2?ZJ0$fo+vrt4J z<`w@~(PUS)PSPrPgt1=%1G4V=jM ziLy*Hi393DYUCQDurRmOHaAw=7r*?O_PVED-9G%$KWg`!KTSh3RtCb3AO^yEPvR)L zwe5&B{AF;M!xi4jln$DmP4&feTU&HyFgVJ$PR7BJ1`r%=OrTv2dbk3q26s)XtY!jv zIGp^qk4(FKm3BL`8oclJ}JPP44ubwC4n+c+3HiTnEw+fC#fpypdzJk#bDe4Akc zV|SQsNgsUZ!3+-it)N-AFu!`O?csgfF}rg0Mtkw)%kB2<+wHmMpKs58@r&f$Z_B{= z^FR25?aYOT+t~Jvw&fN#Iwh`D)e*u*Rv+d`8@fX`mYtWFi+mty= z+S$dacHaXJw#OfOto_)J|D*QCr{7w|5%!?%=7y7sc3K>4f|kIvo*ge6r7T?VdWcLz zA+E@eb<<9n*T%w05$RKt88s=C!FLW<5vw(iKK6?C-QV#Y?Q@_0eEa>6|85p>tRaWH zJFD#tuYO(LDsvKcePbObXTMf<(z);(hc=MP1B5^xY#58~wdJU|)QZr;m@^msP@a*v z$gwrCKH{G`aGgn6a{AW01H}{Ql5e(qIDnN&cI2vTTjyUdse-&JG8Jzv&)*@E1f};G z;NYFmN8ZrV$iFK30U6Zn2}Egfh6 zGDtC$uhS!jo~^qfgOmq*SrDYtu728CeE<93-_}=Mn6s6M194JMSC*HXZx}uQ@{3t? z^jCi9ue1+;_%{Q~;I_C63v$c?}X##|L?4{kZjOLr%gS=Vm{2EWEX5`2QuXt7 zgx&$|pE{WQ_|n(4t$ zPdI7#8R~M3|K0fX@U`N5@Ht0_^3~G*3G?@Q^g{#gW&I#Go|U~+fY`DF86a@7-*z3l zH#W7{d}A7E=WtMue0Ex}Z%%U6H|F-x739ySj$&7t)J2XkA|5!d`)vbI_Eif%2 z`j@QO&q?fw-^$O{thP@U;Z8=_r|s>vVW*iL0kkn})Xd^?n^`{F#+_WA_l+2LQK5dj zw6%L1t!=Nj!;M>zTHn1U_$@2n=Py)Z8?2XYRD8%@h87;g%kPBnLcao=#vdi6oWiwt zF01{qGb;UT_w_NW6Y1j4T7aRfWnkE}9KuNllOq%E22c}UeZks}*z%;6Wz$)uz0bsv zZysQ5E!(vEJGLLTp7P#_N86x>?|-Ph;SH~EPrm9^ZD(t@t*>o1-=aOWvQ+kd3I@o%cX33JDV8+3(e8`}Zu%K5EE*crEk90ot?H{t`-sizqeWGR# z%u$#8DLaBE1csU9UEmFqTf!mz2nXP&SfbLm16OHj$N(jDk36G5%7*Fkxe3zA|9nrD z{7%xjmA^RD+bT6#Clq;F5->-)y#+~RzE4kzrLw2WQ>(@Dzc0f+pOoSyZ0l&vhH04M94 z?>2Lv1rzGBvXr#?=%qK(X>8mD4%Tf*V;zMre^ZGHC+W)LHZ_aCNxtw!o6Wzu%&Sd^ zEhA6mGpLiLmtL12S}<$CM!TZjp({;lYY_Wb{3DjlbVA}`+M};oNPvp^xf2B(du?x?&e;MsBhI-yj88DRmvmI>pm`{ zY$A6A|&tLx$ea(&%@-O@#-%yr5U*t9P)JEw$(yxHObMy9n!~uLrudio* z2e!3$@?06L{AHi?mv`w)$q)OKpqzAj(8a|v+I2xryKoxW0zL4t4C`n~{9rp==;ER< z$st#jZzpxtL(4XZqdd>H|KxX))rtB{!V_$wU3d>=s?PeS*v%RgL|$A74R8l|_qMg& z@;aEL-lCIT6y!D6E@++1L=bq9&c2q&(lR>% zAOmCCWJ;QtnP@YUGvKz9x5ZrupskQ5eo91M?i}nyE@!4gD011h=yMc}gA}swOAq{V zt$$o~37Y2MxpCsF9QUA_`8@up|LOs`wBL#Nw}yi5xqPYs(Rk zk{jk#)+T-`#(|+nFejb}l9~t|5X5}36 zGoHm@zC|oN7cX^LQP(1Kwi^(~wsQ;yJ60sxX+M=I^5xSQ;Gi!OTLT~TeY92D6U%dv zwF^j;&n(gcMR%W!F8~}vwQH7NZPp?D4m~KN=mGyyH#rChu21%Xi&qs2&H#6yv$-M* zL;AFh-0Q*<$qza#T{Ui@HP`$*rlj9kG_9mrRB1gqS1|>c=4si&Yiof11|IRL53!?NPJ!k4 zV7+{WtPvmnf|S0tXyTDl4ynn4q~=-e4ghrGqpQ#&3*+UT_UOd9F!zzyzz{aqC*`W1 z5if>m*tk`1gBx-_w6_@x=dpMRH!m_#j<6(GCjHbY>|Dltpn*Pa7O-JUT?jUgT~$2g zjWo9{_eGlcP~@$`Z6~z>+VuJ^xP^BAC9)+wqQ4X9_YQ5ikFf z!Ea@f=i}HHKm8&MAMS&{ZF1Tgbj9F;IP*pp4cZ6rvTo7I@I({=jzSjSJSts(ZYUV$ z^SbW!IWs{FT=lm4KI&MsBeW@)T^3BIXqT?U1d`BV#?uEPp2M_R@FouOk3`Ww4#!FL z59Gtn)^^6U+mVZRr|DN6(_glD9%JlhvJA4^R=Y&ZGa}9lr>4di?lrMRT20k#}|LIxe9?|LX@7SJ49m&gixWTe{qOq#87@g&nPX-{{|#|dXdV95 zwZc))M*_lqI$IH(F5tu^4T|A`QhH7$0=Jylf?3XD#a9EAZ)^0liLbn6RK=zOMUdts zQB^va;uxww!lEvq{*8@5`Ksu2s6TCxT#YvRUi`ipHux+AjOdHL`=ac5GM*y z=!uje-g;eEC$1}e#r<$LK#>9d0M9ticV!usV~!yeX~?T$4xvyhzM05$1*dY9fENCq zs~i~~Xca%y++^kruLwyS<80ze_bfaQ8+z}N5x4yiWby%H zc%wn#mo~?t<6CJBb${m}`Z*KIP$(GQi=%OMwt`MjX2t*3R-2mBaic+)bIa4owuQm? z_V4(P_QFe_gQPlHJMHx1S@Jo!HPeVaO5>-l@XVD-zKy@Rts%rYot@A3 zc(?8Rkk2uv3jx0L{BwDe?DpDfyRo*><`*X01NT4NzU3R=*S_Jq|HIU8Z|g>Ib5K(UIrXQ0 z86X8$Y2?6Zm(`NVYFI$1jI2%}_YQy=Egz+&%&YXuLpcdHpmBb5&WFapPE2jd=pqPL z(vG)l*KTC%t!KaZV*95*{V&?*udWl1vrdD4;huBt;YS{54?g%<`^I;^r~TLe?cd8F zqrrh;?U1mX+%7v-WxK#Yhak=h6*W!jl3pD*X^fH0*w|zHr|j!^_`N|V%bnnLEBQFl z7`eQGf1Gx+B9T@nYKlPn$dTgrw{>ML-Rm3xshxtDITc3VYTfXm}wtag&wt04L zHl0b|Vzl9@PVN{q_xqO)Z0W35Bf5t}6g`pH306S7y&v$6k5haq+H1b8M>$D(`zr&9 zE_|7VMhW;n_tL{q&ukz|ccUb5sdPS_Jb;LD$s>1F9$c*gz0!eV>LZ}{F`qi#5a(8xh-9ajf!q)+h=dnpXj1ekF; z;x+IMt~OA_t#7ThZ}|Fm3NmeLb3L8KnYoF`&cwuYTV9%POG|Dg=T?%F$ogjc zAAjt}+G}6;n)djkkHG6O>N%4)DC~UHz;Edk9E<^THF3yCS#p4y4|G)zLJhdZIFb3MnOC?r{w!HcgFh!1f)~ zc@Vvct(>I&ouYlgo*#D_kZ(JZ%0783jsk3F_|wj&`rxBoBvsp|JUOf7OnMHt;73{M z(A4<=o9|9Q+7QckVli)f`(^=lB6f{9{d}uxsbxPY+9NbU1JPY497vTnwRU#4Q%~D6 zj8>bRr`?%fXwwTTjvP}5fRSh4&n0bdwf)W2cDS+L_IEck7$&Y`d4q$$p(7nV2S@4Z zAXaE^hg*_R7h!6%v&a#go%oM!0max#>lqpoXI|?D+-w`J4F|8(mwNkl#l+NM>McEN zk0o!miN_X-4eS6m>EVHIP&=TIK?Pt)`98Rw3E%E}7%0o@@XM${Ooc0M<+qgoSTZs6 z#|Bxz70kp!7VB{qem-@iEes*`e6Z-Ci`!YkW*^@4o0wYVbfqKo?GtCP9Ph7%LY!W6QY-Mc7%3Rty$ zpp!(Nt8XB3ku~@(F40GHad~m6t*orj-Z?>BZ}v%e-|iK0@zc*Wz6MO8kvNjy-3Qc5 z)9}ce$KRWr8q2r-U7YCRxYK9Owl}@$P3_V{4@CZD^$vcyh)9nRjxzo1m~AZ55V*aM z;{>mMj0+0aG_G^2>D3wpnfeZpdU#a-Y1@=zjTcrKznVcb#jaJg0Eb1R;H~G_zBZ86vCqe z@Ngb( z&ov-hI%Mc)b*bRVAMq#{uqb@;UOzB$DctyX_*l6wIMFrgB9TNZ>5wb!Q6>pJ^QS&K z=3_o>ZI>nM8!qsmFGvtO^N6N2wBKM^>ZoUNRbCCYUC~o)s`gQMS{fl(n7COU*o6;% zuX%H0E9E$FX_?y7^uuXSj3=R0gva*TxCSDW&(3*S2&L`tOx;{OH#sv$c`h;lS7?2B zSnZ_4FzLq<7e8A$0+#X-S%Xgwbh?$iw!(#Ewp-$`OZyK#_+AwM~n)^xo;rn7K}BfU|+KtFOHbk!>0K&!a#0!wa6pbk!==hH?muz~hzqoI%br*39Y-^JVdKIoev zlUcMYeqPYUX)c0aUYx_0&d}#O)h^z9u3dQOq4v--Z)pdsxAA$m+7#`m{;5c1mrd%R zei@_>RU_CQS#$70@8oo_O}z%(!(ZwoA$%qUa7eZ3`my4jaSiRX6_z&srSA3SFV!1( zS-z^l`}o12KnW_N>eX+GP8wiGd$J!(d3uzJO=McVI~OOj@XyR5GQ~=ctJc z?(z3*Popz=fll7(FKd5GUW;cSqaKBymR|MB-xo@)g)!T39a`3=R>eh{Ba>dNCvYt)^>w?6q;a^z%iwp&+LsYg^W z^{=#s*nZMFIq4H&CynhXd6Wamk#SNE`6x}4a!a*cUt6b6$T20tdwrsM!Ay8NyJcVc zXQG6az9uq}d;7-3DG&8Me`Nq)1KLhxTu`KyZ5JS$e(($NiS*mMpExo2&yHiJU63}N z_rVkFh#$2XmIGb<97e2yHcI%BPift=TMHrZK-zl-7L16jICf{+!1PFZ?tz#7zf>|o z;yo;f!zbDYKjA=Ze7WdJ`G$V_R{za;o86sFns;~Iz@NK)llu*LdzjsRMFnD#kA6<@ z9e4|VX{)%6O(k8%AH^}53YW6yP=K=BP*Z?X|s~T3}dj z{!PO_H~^3F2DqYo#?*{XEybq2`%AN@gY12zRjqcax<43G!H?4u>(r z`53PEonK=wPW;FJhi}XRfKkH+^|(wjjZVr<2`aB~BEuQQErqT^VxY)W+q0K28cbtH zqb7VXbQYcm)fn|M@=T*a#T1SVqH$-%V{nAW;dROtij^m>$|4^`)Y6uq$XJPtrG^_^ z4SxbPjd_wJ*QE>=vM;|o^e3U8yT*$4Feh$ zPbl0f40Y~lxh$Luo~U{+O7gLu)xBG}V<%FU4j}4J2ii9>*4wF-Q+ZqWPksNNYd`fb z{(o)t)@s|v`EYQ;w{p*&JJ&YpWNd6~)6lqOC-p?Ob!aAaG_5dYS0_!Qq~6moaiOlN zG}W1^XOTP3F)-j>`aFSC1kPN?Q3lt2&&8e?G2c$gw+t~{I#A~KO#;8CwL!dExV^L2 zZm(`-qJN7{(IgJ++`>Yf=UX?o+RdA{+v@r{&Mb}yj@J6xM!R+Udb@Gsa=U%?dRyJR z-ClU{rFQN5^-TW1bos^h#b=+#IRgRe{MuK)w*8I&>hFS-&cJ#*q@h(Bthe-%r^>Hy zf;do8&Z&cr)|+&x&>&afol^(~{W!c#$g|Ev=!34&cQ_F37aiTqN??df-zvsK!5C5&dO}vE)%YmSc(BDZ* zVJ4bGG%`L+M0XbCX%Y;UJ<_I=#<^^-{H<$`07)Va0N819Ak9Gub<)Q(bBpO*C{k~K z&o{I$eCD(5lYj8>3`YI_CqC7_?OVU8J@DAWZENjTa}{jfZUe7O1mMucF@tV8&^ppZ zKb%XP>>bKdPof8r$tp86PdlJab_Dqy@^(G)$V3d~Oyh96`qVd9biBh>o_BaQmQ|<9 zXIY~O*a2>`kS1g#h=#PyQBOyqaFm|XvEY^y<5T#Nzp{^VK$~iwuA4O|jfT)+o1L9% zSCIAFt4@g2V4>gWR1eJ~uHl~#0T}tz9uM$sAC*0~Ve<`sJAUtZ@4K-hEA95po1__Q zi;K&o8*A5ZTxt8z{{Q)3{^#wBpL;=M+x&uW(jR7E!%1^_>vv<*v7jthq&m3n;EA%4 z!GHMT0KEF;B*6^li|5a$6M60GZTP!M=V_z;+^_snd)G7XY_ERu@wT)$->#y^w{NXy z(m=;KPKpJOrzU0ZzaPH}cL)v=w z_D>W{Z|HcOsPic9#93rUu)X)-QlDjX?J-<;{HhZCC-OwN`@Wa|Ck?&+L!Nw-v)`XQ z44fmM(iEGiEl>{gJ@kd5ghubs&c+tJ0c0xWbx{Y(e=a2NgEab;J&gK7sy+D66E*(rRNsTto;q&+d8 zx&RmddAyCR{l=GmuH zyhEqgfv_zO{HqLzqT{i{horN8(KcIuaI)@Yy9;N12Cs_*P8v=(-)6{yKah7YMEb^u zbTQggy@8rRW(NbXJLdLn(fD40Q*BmYLSKnsxla6~e)}RJ{h<+jl2+m%oZ!b#I+>j7 zdKi$QPLL*TinyE4LX7t!{G^s1rOV~(6-?ynXfgx;d8^dHFs@TS6Oh(+FyEU8&+O{7 z4WDfb`15VmeRn@0tv-Z<5bA^VTv<8O{8shu?m=5yUrYXV{M+lR*MhrS8P813WK!3n zw2uxbNmpejzBACOzQ#GxQGLj=fh0;5y(Vqs*3zXHc?810oK#kQ=8-`SE>w3D`1U_J z3Le47!s|uUwP`$SGvuvw_o!r9EN8m~-IUYpT9ab(G)Lr^=hE3|xIip$lZN|4X^C{3o{@1(wOZ%0B?o(oFK|IaqAyADh`p$;#R#Vj(OnGG-Gi#2gD*#)_~ zz?z+(fe+JJq^Ext!cf=*^zVaD=|@n`G(3|Rw*6v%oBqijeG)%qr5uREG&1kvX6ygp zgAcZ+-~8q_ySNZvBK{%p=4dPRTb)pM7XkIrb3TuN?5kJes{Zu1lZeU$uM?fMV4`$O zxlbQoeFi6W#2{6LFAJrCa;)qqcd@1LtaQT#HqyKX`orfe%(L#Mkq+{jv)4Szn~uh% zB&!N1foIY7#q<&7S^8c+wJs4Xb8sGfP;Y@4*a};h1?_-(0FiRmHd1uzmhVPtpLAHo zk$Pw0A!Uer`MbyjU}i!;C$Q$*;?fdy1ULGeS@0R0D4n{iaD{)BDbMpJC#ge^0WE{| z5RP&>n%n{p?Q5R^FjPAN9CwPV$Mi zE3{N+wNqr)$5ju^XF7Mq)2<52#RbCLrH?Eh-B!O(9;mNQ{5be$yXaQ?^5K9s-EWF& zf88leSjxVG=-Fupe8w|jsc&vS2$`VW2OP4Eu28=$M2pN(m+8`1X(Am`7gumos&(Ah zq&-K578e%V9__n&SvppWj}P{=J9}+@ei2{XMMrztbqva&v)VA_$#FDbf=k{awEoJf za;*-8!=9mOjxca^7hazX@S~?AS&o7+q)Yk}M-q4#PM%Me+2ai-ukQ?LKRBnY2wdAB zWzNZ4?T&cp52!mPrj9BsvSy9U5^bXB*e9OZcbg^sqi=bpefqb5v;D!xFSn&-OWUJ;rmVa%&i|zTkmfeUQWocZeF|tVuG-Sbu09XJ+)f}l!L7_1 zlLgA!QSfrnNASnK=hH|2g9qX;>#m6||Q3j#( zR$C}mq*F)LFYd%SHi`5;F81={7VhZfn3H8DOrRY4h~!S$hTexiLRO`@TkX0iBA=%) zp1kVcc-pVug=*;xYSeu`lYNyUEwNvNMwx_=S9c}5shuXpF zjW&s%j2-C*tLMsulk?gT(FQLo0CUQo6ynhrWyESpO6a3cChy$3V{BYHaqXM=<`*u! z1xoV9UiS$N>qy=9@V6{OUrAZ?6HXFP9Hd7k6`&KR*a|D?qJZp_$#ZF;sFW`WEkv90 z)83gMU9ZIff{k5SYJP&k|mj*P2IL^Z6 zneka@+XMJg%j7YyybDg~mKg&shd%ES^wE9}WBk%Sc8}7@C|%5LTW};GuJv)1du_A} zF#Wc^JGf{EBN(B3$xZCPV5cWab!(NX?PfAkd9XcPICZMc%`YLt z^c(Si^rf>?6J<$?jMo7x?U@I*NqP!56www?A1S@Bd|{Kc4}jqyT`o*n_oFjM_y%k; za9?sG7jrLs=r8}mQhC^qWaozvzaVV#hw|kI&uoY5?RhhchhmpHK;L&cD4j3OhDCXq%Iyd4Od_@%eNUN>%T)L1Xu^IB$y7dsQkfz(Y@Q6#zb}a-e z9{N0dr9dg<{T_U)T`c$WvW$zpH>tO}E-Kc&{IbH=g;ClVX)Ox!!M=_4N+RouY${ii z$f{$o>V-R{xb1&@2HO?ef8?ijcLU>LPR2SrhgWqZQ2*42_-a3j9-)cprIYu_OtmdW zAD^XXXe_A2^S}W3x)g@}hN@%AFdt7;_j=Rb#2m7UBkh{{7N9OwLn7Y zK=+Nq9!R_G_0_gZzslWNAzEOMW7BP8eMw8XPo09Fesb9;@yyE+z@7}^9N-ZhC7tMP zY;Q@fwn@3;_?p|mcUQH){$u|*xbEM)(GIWyc!9+)Xdj`X{?#6HADUPP^%N?$t4 zSZLA{7fq2)%Jv?^7RgtltVgFU>8vVgyo-Zjvby714O8~zlkFY!5qBZWV`!Oi{k-&I z8Nf+ae<)Xd;hCSO)bFr8upj8+>byt`ICplg$$#5jMZ-l&g0TH}EXiE})x80y)IglE zRVq67R@=6{%I-8U(LgJXs;_Z{ogvR`myF{u3wP8`d&+OmLkPe8gG6Q{$%_+z<O^E`k^2WAJxP^=~2cgdm2&W1_dFn&mAlho~5Te zl{bb10UBLXhWC|_@_-d#6(3`&vZP2E9WO%~L+?Ek6@dwHA}u3A_4GZ6bHZKm3C8)K zXH^f+Ct>BtUB&g#!Bt~gX@`qCn|!#w8z;l447tT~lNdKjduMT@^l=~)I_#WAA#WZB zUdMYVUo-(Bk9!$aE)QUIhhM??!lkD*B#FdVS_zQ{fhLJ*Js;<*qikI|KAurl$&4tJ zoKUv$aa5&+56EQG6Q}|hcwEP7xJq|<23AtoRx>H zRPwD@o@anzYL3R;Z|)vpxDMKl8&2?V#A(cS;da!qwT}L^9h>a)*KgITv}|8KK4+%|G`Jv_kPEB zwZHh6e>gA<4({k!s55yxgE}dS`!ohM=*KPm?uJ?%t&_GZgL&cUpkL&3A}Fg9ZD4?5 z9_y49Xm&_Qq+IBj&CSkbAj}SjgEvxR6DM~ZNBA9dbZ=h2*}nAL7u&5{eqVO2z5m_s zZU>}YURcVcr?^onIN?|W*DVaQs34A#G^|Mi8$uD6&dHN{bUK2IvKo=BID_%o`iBSB z&w&XC*&O6sm|x5Qw=|P32I0!@;;}(k#p9CsUcPZbhjUqgdInC~J^y`Z?t#~7EllI> zU>gfg)yEJ%P!H2|XBI-FU5bwJ=OTt3Y{BhYw-OiU!Zg%PItt5t(zDkixDcmvSrh5- z510o!MsDLMxtmK+Qte#@G!0?I&scpseCfrd3Q2IeoN3fI;~EYXXyFs zT#FmL;2d17gQ4f^>3bON?~eYOp(pI{?#8Ewp6;&ms2nd&9){7wAygekA#H%X>)~Yn zeta!_dN7S)NGHru+{rM+QJ!?y4$y*WMi3u5+1hFCy8}jaINUbUfgielWYT_P7p8nm zb`IH}p;PV-DGuVmlelbLX^Wx=!ZeN_WE`QVTy%%jC6nkz+QA_`x~OhMSBP`?$^pxC zGzrQoYE`z;LM)`9@_UcKZR+_I2i+6j5={p^AT zMLv%l7~9@xyBiys{NHm202f5i;ZhE3!pr`GeUTctbKBue@JMFrs~r{38npCNA=ZJj zgBvbhkno}({A?Ye4rl(UhX+1}XYkd_m`EOMHT6xKg|C+Zkb$P!ru}pP9Xz1E+J5j! zyWi82KWzd?)*ytkQ02~h?xwgDQ8$){oqM^OMr0sL;C2^k1mn|FWVwSR%%8c@)(0W^R$zcTlQaS@o&@O3% zv(KcBH_Ea}i+<9p(s`7<%O%7pxRovwBq|UwotIKAZvjCI2MrRSUk+h-3k{_uR1G`_ z)TO0#4UL^7947nqHQa?pA1ym2ARjK2$RG*)wtbpbPLL7#>$aRT99c{fSrWIFExXfq z=FWjk4+sAyp|O+c@f-CYfVFsPIkG3ceZ$LI_-4J39N?}26F59^lr8)3z4v1Ct>8WE zjsqJ`wl6QAN}U$)37>xBo7yX1^He*1_AEaCR@*{H<#FwxAx|xvc6p}0MXe7J5`(yl zwv|CAy+Ti53Rf9$QGjVeck1qfhit7#x(uu#9~qQXm-Y2H^fBDY$Y)tZ6d3}HbmA!4 zGOlzhvLP~+EFxocB78=kLKgjpQt2Ndqoz{_)qGNxpFJ zSLIrU-*GLOAm6U#+3q>cP<4wh3~aB(#hoxZeGK+{jPtp2>4F;<-MXuVzM}Z{zK!|) zw!Uu**={+hZ+jpvJDGS`5AOR%_31F%*F(ON6G}WU{Ub35_cpUv>YuLSZT`4Ul;VC({r{w>?2W-<56@ zzE(Urd6cIS4QoiSVA-qgflJ8_b%CBpZQ-SU6CW#q9F>eXVc_BqWW@F!Ie5!6Pq*Lv z<)3R4%G?gN9@`umk~eZ1+3?=-K*Q$I3uLku@|1kXrtq)t;X%>V(J?xG$FVjyKZ8EeN1mT<=kGn!W)^4LYv28zHa1UtbnAK>$H#Gz zi#}%-N`QxWT2_tjdtGvoNq-r|KZO@*bMParP49#TI2jk8+Z_5GDlhkrW2(bhEbEI! zJhOl7_+os~!JpgRB942vUe9y%HlIoe<@DYiU&5YHuHK1nA7)JK0G)=~=`&ECexp8N z78y%#V#KXvBee;?YBHG{8OSvj9sG%hktWsePPy_gKCd!jIfHBjufmRkiLXOmo2E#S zrwL}w*}Q@Qc8z}(?TjqAl&KsgKjmTzEjJ|vE}7xAI~yL2O=bK*`tu#$D zneG0~FMj}}kGSQ|F|;H6MCa89ZNS3vQa=0Sf=SW04RN<8cQa5o@~IHfkU!uUeGoL) zS+qhyj`#XvnR-MO3T6g#YCz*D^EH-f- zU&;R8Ea&nkT+B{i5`Oz~leDy-6Q4jIhH`Do)wzrz`63Fqx`4y}Nc1|1f@<*3oR~V2 z`jpHV{3biKs2f7)ECURrHJJ;cOme(qhg4QZ%P-3=e!w;W#z>hd;*`(U?bIgeaI}Lp}5S$Soi1_2YJ(v zA>KZ$`QrQZ=i;mlvAm>{Ce~|!Wgmt3)Ya!+=kUMkZ~NmRp2ye2J-CGM(p^JGY>&8_gRDz+2@gM40hGUn1>uYTbpfj{Z`xFsKq$5D6KCg*~V)7#m9_b zRPAW=#JdV9Z{TxTT9eEBvcZm*>L(xC7n@sd)A;m6A<0B?B<5 zXRiIp1;Kdg>7qX1Mca1o=>$&H}mMY+Gr@JgxMWS*gs#{SXJAq6?mFVNQcZb2ClYaC!WJ?z(Xb15Fzu#SWFhYS}-6CjkjsdUzHMOIl$fzM$nEeK&sNlLn!`w`WPH*oj9L( zRliD2zF}ib91M=g3J*FxfRPXw6}%!vm8r0pmTMy^HEC*qF{u(KoWjp%($QOS>7abMRMz?9)1aBTfaSAz%|cflEF4&w8;1Uz8!4GkhqPhH~N2`4xh z&$$u?O%=#;3l|^RbcASJS6tGHPp(Z<7dDKQ$7i7me>SoJ9`0<=k~c-07@Xk0DTFWY zt(V5#PDv&hO=tSZ0Y@MVGLf7d!tvgR-lu*(6aV2pWmfqy=fs8W;ujj4PTHDJSmSMG zb~;XY0`+rJXbR))py`FP7upgA{nlr{+}{4J-`;-z-+ZKfhUvuU*B2##@$u@Gsu zbBhtP%wZ*^@$xU%1Q!`7l0GeZSWodf=355mV|<|kjkh$7vqyXufVgUm)OI*rnbf~& zdH{@Z93ttlvoqGVwp~%U*VfmzF?Q;u0}yE+F?feKOKz(~M;KidLsy{Qx_!01^x{kH zg%@9JFJHgbu3Wp^Zrr-jR@c|si}PuAqq5!} zdhq`C>L*^+CYIp8gVfMVER-}miQ=dOnF$I&>xA3+aBKQ>f=sJ^q)hXXOgbuC103U{ ztGO%qs+)L6XXw1l&dz7hYNUsN>s3ytZh)V~4uF~%_w6(859;vR?^9~`rG=_tdT}nE z*Abb?wH-L{3{QcLJ*zV9$XgaP4P(H0z{9w}B~H3Ki0**B1Fbi4#9icLc`@a{$eUx@ zFy*b64Se-D_2+Q1x!#m@d#A~^xv}2f`1;o&TlZ$tUsT)@ZhCgCJ^$R7+F$#TA1zcs zVhY&V=3d1_=Cvbk;ptYfr%x}pl~aq*i86sb=O;>VUKbZ<+YFtc<;4}^k8p4|GjOl| zxINmb<;868Y^Oq;Ze6?De*L#T(!Tdke_y-j;`w&=%&B(m>dn-@Xi6In-paWX4$4C8 zigt--%5%s`$mD>;^O|_y&X5OlQ&a89#~*LM^RbV$-}}g~wkKZwx_0s6rQl{KMEfFs z*=mn+)Nce5ATrMZx#CNBVn>NTL-6AqJ{mOnCOib+S>&B{;v$jD|k&K&pe5L-B%dS@-6fl4)PYBCg+?>evb4%PLw~?qd)$4 zhTtwd6MGDGwT|M|kAxFv(+9p2&pqu|3j_Wh zUiSM=*SqnDc4^PKblcV=2SAZ!{X-`V_jlHj>m9OSvr{+ii+0{!0_M=KS<+UUuB7P~ zq@CcpzO$K2{>%8;8Ljq%Nb9e^CrauHN%f!Xv`Wql5+madHrn19+}eNIZ^};NtAJ;A z9WcMeI56hGs|^z|X-};iIA?JV=!|1`#A}lB9Q<;SR@$q-V{=n&dSQ_^k2Xi29-TYX z)@`pjvegcDH`<2pLHBqEi0&hzR~Q00c@<3 zDrIsm~@Y3xF9guL4(k%+zwZ+A&pZNG6 zW+$3^?>pBXxc^c+b7rN@FSy07+mb@d(Az;Ws{ao-NOQq#`!qET8zi}fyMsK{$pc|EwuJSlKK$M5{@!oz{Xek{?4;`hO=}nf9 zdxFX{6cPV`^2W85AZ1?Ps5;oGMFI9lGT{k5YjNy=1C`Lf)gSX)o5GWC*^-_@rkw19 zM)JW-(chtb^Hu$WKnAs){LigrxljhCo}$e^LmMssGME6r-PuQ)_}uiqQ9e%Fd3t5J zz4N{AXlF0n(?0vhpKdeCEpo04dFJfd_T;OdYOj6coA4X_PB?Oj&TXNewvF=Kc7J@@ zNqKz$`|`)Uv!Kz~0WqO97e3Dd0te`ghIS5`r0phc>@(@~736sq{ehQtRWAD{7Gh6J z(Mc|CD4+7Qi}AVmD_=I?sIO&-6HoL@A}{hr8ta3Llb;h1ruL=u#zCX*zt!8AsYKGH zU5z}Nr|ObA!Lv(KAR=bJ+b?eqO^v~~8 z`{^VXWcy8e{Grg?`kT*nRO-rjXp~8IbWa{QDee(oq- z1y1N3qLVJR@^gC>5M1nI*=N&#wyn>WzVL4bJM2zw5?0wQ3cEuAzsR_4pgyo;57L*a zBYR_Q$v$1|kVQB-to?y!wDbDs3v>3jpc#1h2KD@M*$n*=<83n4xx6_{iLi!iW!pA6 z0t5Yqew1>e&Lv*^KG@CSw@?0OQZ<*tD0wSAD0m(2@nPT`89Z^XJsHZ%amVwo9wPFF z$T-APT$L^IIhj|_qO`z&VgNUK2EE6hQ|h2L2y5LqOR0(V~$Z` zofTg{aiQ;G9VyD{rEckKsk62>oYOZ5pRJ!*8myBx#C+0*dPE^l1fna7`pLHh5L1Us z@JDCU1gYVOj6=JU?RrLuWI#47D{;1Yl|SrBUgeB)@S$E@?jy54Q8#R}c4&+J2EIG; z=2HNgH}cKSL94k%vg(JEp8C1ST0hplk9>1+oy67N=s!po-{xK4T+4zw@ldJlcUo%R za0gFi!?sZy7#S9hjRa|uZ?DxqVWD593mo8aADV$%#@C5*oJ^fm z=aN;Nj*un#9GlylZQWfA5S?0ZE4KP*<(EnG;4F6PGdd!hg;Da0Kb3`y`q=nY^R>ID zeP4YZ8&-B*zSPu0&lU_jb$v>Y3UL}!;eiB4E5?{#_e;@zNozS%ZPX6z%-NpxB zLnXJ{3}wzDLw+jEaT;YdyZIoP;)o}hvcf;FhVqmepUcOv!!+Q#pHJw*Bid-;tkK7w znptXZeBz2QJ+1>MAT>GIz&>=kQYnMcnMAx?J06|Y*c2xQ&^`T7ZyyIvg#r%`m3Qxx-UE3c zne~Z|CR+IB^%;ECj=(2rs?G8dE&x0LN{S#Kc3YmrW<#@#=g0nNSGmVXjLCP~U~P{$ z0?0Nl{THrnldJzn8>&A5Wcwl(CLi4OYj;O^!RBzLiNH2-ZFY`B+SKg$#Emvp_<7mZ z=b|f)Vr!g{L;baJ(gVy>cVnZhl|1yUi`*#nm1g+uVMv}3z9pY$P6}dk2Y~PY<_{ng zSpcBpR)Rw!n41QlqZ~Sqy5<3SHB=%!2n#`R@wW_41+SAY1uLkg9h6psk;YmARC%d{ zmFC*JQka6L0WqWT7M6h&2a>$^2c;X~&ogn2pm2W&P9*lZadEH&N`Y zP8$CEN|Qtm^)DFS_c(c=5iMXLM~7?K-tf%l{iwV~SHe2?O!YizgO|*y^ai)36psR1 zX%oDcE*P^kG9Em$17!nfoP2R0A^i5aWst{4A)o}43!mkLcnqqdv8)vN&y{#YHZ@57 zX&D#KqHSzohIWz!WsT*5+Q<>HG>kJDtlqe_noj@C8*6Q2jSk}WMtkM!pKM=#?o&AL z+wH=+GbwX(L&tI#qd!Sw;A&h8($OdZ5-nv%Sy$ciR;pxDUctyl*H8n+gC6lmO0G3< zICUCA@ze;V!xH2O({57X`J*|Y5xk3Zg?e$yM< zkNow&+Qw!sfbV8I+TFwvp*|XajjXkpnn_2`t-6YjRbM$7w_)fgmT@-&9HyQboYBw| z89N~HhEQ@Z3CNGb7w2n8Q!;}gCw_-cy$+P$$$#nj%k5wOtDkGnUR%v`w?cpXiAUNi z9(}5P*EfH2`UW_q!_3t z{FzQ0sPp9jPx0PK3FXc=eQ@-lOw~mI&=6XnbSkAC=9Gm*cu zv)bPEj(4@WrG>V&ekc7CPig~TWkeyp2y-~i4&ptOUnDEFbWxt1_ zt2C}>CW-^E-t(WuWfnW+?IAoXMf5IlcJ>UGrf#~57;?Au^|f~G`b}u#WHGQU!#n_t zqYORtA@>jJBdW*0lqG zb&mf44IJ1~_Nqf^8H0QxGtxGWJY{cSOZ16WPOY?``d|NPTRwBK-FMGD84%XdRJNT6 zu)GYY0xfSr$zy0CJ#};wPq`%`r8fo_5BavF+W|(jsA~o~kMP{&eFljfoE~pCZe4FX zbg0f>xHp}6o#+h20$rWb5lyGh7@lb(Q?}PBZYYx@1HXmh5aU(dd9N-AHz7zH`q|La z8pz*ee~3Q$^f>=l2JsuN>eo{UVd&3FhPs~ikP`VR>JqAYgwe$i0yIbD{@;R6g(3f_ zxB;K9$k!9xeeU?*9Yz2v^kmU>pAAQ7G8~;rIr!<<^7QzI<761{H}A`mv&bD*n6dNV@;KQ}} zTh~$0=9I3IHM(izqE2T0KRWAMY;JL8oVuLt{(WO{279dUo%X2mkPtsde1%;)7Cjqt z023)9y_3FZJ>}S;?r?qUbGnWD*8jvz1{THNPn!6x|2;a|`}h+yD6v6-4y|n4e8V+5 zP6hO5;=4j;t5R0>m3( zp8l|8$g^~nr=bmHSz6jE@E6Yvo^duV_ks<5X(Qc^*6sYEvD@nlQ{TE4tU6JL{~X?w z9tlIfImoN8CI6KnQPY2L!JlcmZUx2Qqg+%unY;uiul37aLY6 zftT=wG%j?zz3SGtdszTDJ3G-HfPZg#>(lMlt()!nFFxB=R#vjxz=eD6ZBM`DE&1O6 zHooxM+U>SQ-)4upN=O%KrY+~sGG(!4SXbL*2fOtrGI`G-KjI7EJ7qH1!9)kXBZEG3 zaNM_RoDjCH$49o_*1tHB$~!RPL`Y~c@P#LY;!W{s5Q71yBrqO)sxM%I^8ck}jRTlO zdQunQhVO)Y7L1TdUY;c+uW-5NagP^vmyI4q@}^K3l14mSZMs8@6J4>J@CUdqP%@nokv`0Jaf@Iac^cA@sA2>6Y&!})5?MNz%r$04YHR#00++EW4R0c!zh!oW9ZL&7!r)4dxoBV2zO`uYJqx; z@Bpt&zS<^3(>zvI;ok_);jreBW3M^eS+U)=&6`_59zbObTY@mPx#gAi`mcL+``MrR z5Pj;Iw!OB|X7C5}57YrV-``cI@l)Lmzy&-RS0Qc2 zwY(;+^>LEM1^LtT<%H{`t9B*3W8jab%|1fE@niio!?{Zr+5@k9J%p$1wbkGkos{nI zSz4yu08y{)r%NnbztqEu23=^-8(l6R4Xz z0{8~+u3HW7YfD^UWFHv+Pr8yjpY6cL>~8t_4i~jj22Y%5c5%=?`3}J00NbOVV5Zo2 zc@M+mH_CSpajuKV1cDXZ%U86+LY%fFvSd1a7M>^GbeySw{1EF(F_By8XTcITvS1uf z)TI#5s5;j@&qG5F(^*F1Ob_LA2~H7M{s);8F5Wx+C2ZQ%9QWmaJDHz$p8qUxv~9s& z$9IXWS&q79HI!LyZR3SWh;61n;o%}p>ASNDt+gK<>F}&NhRX;dy>Jm)(bwx$hzUn>@G^p4e0w zz|3>`39`9pdftPR%Z~Y5dYj`0m8D zJM3n0g$sYCXPWC={DjXw`mgWOj$AN^jlx#Adzp(2vuGJuoA?L$3@(M&7?w6x`|Z7p z2uSOn)d<=lNI!{?KY2dp%Sn-K59`iV>R$Fgx?~<7MBX_=TFbJn(Ql!29uq0Q_|s0f z!$tfA+HgNnySb??Rrg8b4|nCexbRY+y86lDpe(vb!o}V0I+i{wbh3;r>Ls7`6Hjr- z0t9HIKNf5_TeqCS+w{^@9f;5F#SrS*F&;gA>T0kJQ{J=%J~LgO8z*I|Ea8^?REUfa z#M7WocGx9%iA*|TL8PZ75U<}>={T^*`a8aK&}bVR3)xw{g^X>sDcY?WWW=^q{k3h# z0ulUm;fZ5xhD;M@coa4YyDq9o_-(6^aU1IF=S8#d2%76Z?vBm0l?#{J5B-1tKDc5N zum}5FYuO3Wp3j6YGYt8K9eD^~v8XbnMvhKf>mA~O7wJPk0Y!#G$`CE527QzoOXfy36?mG^SKZ1NX4&n=tSpa}x zk|$o5-WgJA`XE_&$%9(tZvD!pmcLgSjMFqbZsNGKsD(uY^CXpVsc&)@-Hi?m;SL!H ziIF$Mk+>i?O5+Xy@6Q4Nvg;yf;8#*1w^Cp1~DA&lAXxOnB zA9H&e9WS3rFQI6J2mA*9h=;$%jgYZcNn>Og?nERYueml)r}1#_+50G~Gz1ccN!sWb z`UaMHeCD9(%q*Skqs_KRr~2E!>pR=O`N(g!Ti0J|8yJwS_1$)A`BXX~zRBPgoXVmG ztPido^G(?nFN)+I_HaQRpdJ1UL4#u+>q0tdN?r3-k{u0jR_4(T0q_Vn83)mAbQczu zf#+LCV>ni|nn@V0s+^u*0JM{W8oGlFm~7iP;vB7QZnWFCZnrB}UurL3ez~o#-D=x( zK#bqqa(lyrcH`z+yL=0W7+lwBTpxJgQhVL2pK5>YM}N32UU((tY~#soLQHt<;0$RO(MkC;0kPhbct2f%O{OWJEU;6D&L=V!joSkfsKmJ5}-@D({e&lcc-^?-uW!FMW zN3$AV`~Yhi@;3KI6v8C6QBat;4IGi|hz~Li;>WsR{^BKFyufoYfdB#*d9lGUozhYr zY@j4R4q7HM$J;!ohwr_=edt#{)PCzDA8&vD|M&xK`MyhS=k{u3(ScEQ*ntCSlkc5@ z*A_fC)k-al&2_Fo)#t)@w*&|O78g#1PI1oVH|2&5 zLtf(wmVCBu@=KUSTOaw{JfV*bz6D8PD`98ehvuQ|JQE)-rF6F!hAKx#t0N&^J}Deh zOVaDOISAx#5$e#jTi0{#Tll{DXIs@Rns3xfS26+%nZvHxDAxili#S5Q{QSm#yKwPb zd(Eq!!UtT>R`{2%zMOAR{pgSW4Utgn(Sz}}Fh579em8?lu2P+!pJ~er^KEH%ww+m; zZ>N@~+p8XZpgs2B{q27Ed*;-cwv0~i?CrKIFWrn&vV{ICGwwFfC((4q&z?EgR&S%z zI6b@Q{j<-0zMVaHzP^+7n?!*CCAX8$x7Nokvb-hw_#? zGA<|wa&5^|M%rrMh@9tet4Qpkb`Bj$=Y)=k?W)Cxx9So3Vh@31`p6~qET2$4u~mR9 zc4RpKgwNt+IOT(j?Tl~yPvW7Yg?9%-ZVrd{Y!lSi2#$2ZG_EHE5woxp$0!Y}@88*7rU`XbU#y{;{O zD}M&op}7-~@ypDQ|0JGHSh+yciMG7GslDKgH402Sl&RamYwcv@9lm%e-FRFBim7|| zN#v(K392EU%o( z_3F)AS`{LGu%g?lcv&wu8#?ea^P+x*f(d)1Rqw%5JkjqT$7_r-_3 zjZYZA06Djx<&H+~IXCT+q;(zum)>17T6O>iF_wT-6M@l(~;JeL;z4Bx4b z{$K(GjV$sf9i(SIBLM-UzwsH%Cdi}k6oO~)9oqDTeAVv-TV>LPOxjT*;y>oBZ|0Tx zK}4wJ5oYrGB=Q0f;0dSl4|sB|e2jFB=nRzQ@HsLLh?~VLRcX{KdZqZ}qLy!MF9@UcV;vzymhxpX*<12Q4Sx zga(d_coieSzg_ zD|X!#54N`JLfR1O ztSopo4vGY>*HE0_x=To!9!g#-ouTWHyee47&rY0&a=v!D;pkC!g+vDim`ZE#sKqfz z7w4)ExLJR3=Tg~D{UVRP-KAZDkF#_5N%Xxow%WnkTAOrn}WSY5PF16dT`+Fb>^K@EKCdzP}%aPr&!I3D6o zGNP?*t1$q}aLmG;;?jkC{1NN~O8=I?bdv7!4H0!eo2wsmI#+S3HV2 z2N(L6T4MWtL04SCOTD#rPNLR0l=X9rr0SF(?P189hy>Fn5;&@keqgjiTZd}N`!O!E ziy`OOGI8a+yH$&lv5Ew-CqHXj zdbhZiCrAeewE`D=AD|fCdf}VmHh;>8jLSkVcr7g35#eEV$c_AwCw!LQ!h;ObE|WIM zad;Fu5c1Hc(#kx-577vu%`@*!XDQ-oI-`8X#gM&ZY>48SJTdrLKPP;Tu#@A1T|5+b z@X5k3>ZbmTBPZG_+trM>Dsu`Zz}3a_X-P-jEYct0juG!YB$kXZL!}QQqMQu!3GZxA z)JGRJ>T7Optij9ejCW1Cs~hcvdLp01U;66n?`&=X*w3~&xeuSgUwdF$gZ$S&n3$Fy zwYYT01*klmn_Fzt`P_{0!i2xTuSL)B7C%*9A06UP_&j8zTB`)dIx1zv6 zp9>KgrXX935j_@{lfuQwjPl}^wP+g5gz4HAHBwZ$IR&r9oH_AxK9NQ^u$x|R)5 zUz8{6&viE;$){j(;|_r0X>V1bE)jSZHu$K%L=?P2iP{(S7Q$BFmm`Pt9R*d9Op#DGi= zHa1{_go%<+K;3Hf?Qr|{jg|kEtM2=Lf9ss8>e~`9v;TGKoU_l~Yp=c1UOSw9V+nN9!t$nbcZz;AKU2!@4h>VWM7xkE3@M<-$jY!5_I zc_f{EZEpK&Cy#w{F{Qk!qxxA#Yz(l6MxhV%w(ZTFhWJ`FXlk6S;il+kEhtNx#G+)( zw1;al(^Na^xV65Q@|bZC@Z6^oGSK-+>m z=+e4Fp6S&qd6(U`LTTV?-qZmZBz_sgvn*(d?kme;ueeLgon8BRzR0xK7MBKXbZUbB z*zNY;{`LQ{9een#80)3Bhy2~%q`$X{Ujc7)B!!}`r%Wrzxq@}8GDYTqE_sr3LP)G! ziKppGF6()ph-cv9(fi+q(6~Fo5@nhSiT=0vTf|;g+4)?)agQ8&DOIZN zDW6RoK$F9Vos0`ie7m!8@gz*{^T6<77mN!m;YfodOnHIPF96`;Vwa9kggG$sW=x5> zLj}7+I^h1LAdpN&bP`cs6(RAZ>~&PS{=Np=%K&EbpL^rV(KZbXCB76I_+6QOeuL#hF_7fvUpYzldkS5q##)*;uOik&tx5}7li_tXwm1ND44GRb%0-lS56%ARzdUV z>qt^@ke?>JqrC8;uS*^m{GK+6qfRCc*FT{CVvz#jyHHaE_f@XXI7^%UG7mVXVNqUd zkj}ERa>_X5N-(ahw?eKwUSD5qn>*|6oBrH?*QPKSANZADZ6{9OMVg7WG(X?gHka+= z3z&8U;N6ALbm5Q<^>2Qp!K^&jZtyYLS zvFEDctTx^W;WMx)Op6h229&HHuzed}=W}CwosP&_TUlPFZZ!}yiZdz=*SL3)w5u&$ zq+^Hd{Kd2FuJae#n?CpT?eG4BztfJLe;DZK;@}qeKwpfljcA-c47KLdRo-rWW6f$H zP+cYudRgB>>pXDz13P4i+oSP_aKPu16Yq1Ikb2=*nYW*m6}Cpzu(YzA$=6SR`nmQ4 zKlnp!&B0=x-S@yf?bMmm?d1KVA_XM@) zsY=rN$l$XKbkK=%aKee5NgQ+st|upTMx9tCV|3WEp$_?T)Gof{kNYPxH(xq*P$=rPIz~g|B@g;LYI8M-Y?15C%EIhUtS;f0d;|Uqwb9E z!yhL9XAL}ykHa}M@#ye=3EVz(>9j5UoS?AIu?^(aQpXo2iS6c*HqnlZ&7w7oegCEG-N9WF+$A4Xq-Bmv9U*$XP(gYa8_?eM#+UD%0)9a14 z!+7dodyqKFiCy!WBlv=YHYSS-I-2;j?38{2aq+z+hBSazc}{trI(1YW^?&y7Nfkdr znHDbiMUM#5D>8xn)+CYa)5oaHHCP3o$g6CGW@;XB@*wSrhyF?I0qtx0CM74*+PC^L z*~fF=65e+5L;3(wy;Byd-&1~ymq2BGXfr5s`{hJWtbu!(=JPrsoY08_pwW4?Jj z*jmFs8MK+XWBBFc?d7j{MSO^x^EctyM0?fiKBry0cy|`x#=pmJux@?|MBE)r?SGZuIxW4c-2SAo z3kov87&#%Ik9`XN>g`u$VyO8|bLkwfUik7C+~OaIZ)i(A>C?XeUi4+J<0HIN-eigQ zv1gV=oOwfgRHg>!#I<`Z8w!26>8L8v-c9sBW< z@XPecL#3=(7nJ$hK@Pas+rah!2M5(1d^QeD@U85JmaUdP1-mPca~2A7OioQ;lYnUf zJhMJ->*-s*`sf?(o&mkpSAYZu^N0Q+yY^|K&CVQ!Zbe5YMO;`T|K*)$`#bUPvGK}> zbe1Oiv3bcKd$n1i5oY zH-sI26K7ju6{@!UN`1& z{|@~(HrLWV`dKVtl+2(b9UYZ&adlA~lwFyY~IA9{G* zc^}MYOV@=vi|c~X%0c{faMC99ZKL|G++;x)Kc8FsfI0+TdYH#CO9z*?Xy08lK}17< z$$z`O>Wy!0*RDR%o_^xFEDBj$aWVHqo0u4FTO0Tz+IxO(PanSzd~A=mZ9|b~@sU5; zZpUA;$Xa_u*|Ng;m=)6L+o=mqfIA`6;;U)*%7+t{1K3&INa`9p^2H8ivFe^9b+iBL zec3Q`X#3(vc9RPbibDsaw}nP2of4L49|u?CD)=Ig=}pr$)oH=)-e&@-{{cOb;0LIXlkrCUQp-l9vqkG%S_ zK~_G`J9_!A3z?{r7Cf_^v5kZHK8bA!N`$ogq;cHEPncwD@Ad7Bt*z1TUAVQ-R#rGK z4%&_R)pl!nyrPQh{%1FSWMltUU| z#Q%*A>-?ym=2nJ0)piO)+|eIxBG1$x{zXsY`{_%>KOr-6Lz*45mw8PMRwVJ{ORDZ19*ms~;KfL~iZRZJ-nD#4TV) z*W3X?TM1e_*kLWv;$W+-Y;L#Jjoaok2Fh$1%(Fd->VMrV7v@ZGbY#FLigU ziEEFNv4f-99j!NMk=H1bEe>&x3znSgXCLSZe@U-CWE>W`T*Jo6-}K)*o$c|7wSjK( zUftCno0yoU?{<_nev%?xFyjj~`jPG?>*4};5uIjUVMI@upk&h<6Y z7xgftF5r-Q_B8b#t8abM|FhoI6TJHI*OE~J|1y3-oR#j|YifsD+NRk*wFMz*_+b8+ z7Wg9sa)L|so%HPmNIK5(;k+i_#yUEJJozHT>dHd9b?s_fcQN1aUYnwily&M*F}o;* z9`eOL1F*p(Jhrs(hyVinzJ=+b(}6!W(zYoxb0gH>MIV0J@#u+@?d@OkmF*4R@|S=` z|Lw+=cI)ypZHRWxg&Vc#0RCyG`PFV}Wf%HDQ5?afBBWdTNFMS+IWv^3LV#S0V}&~R zc$>Fi9@ORVp|y>_+vho)-@H8FvfxnHkSF()&LLOidD$A`v^~O%Fi4*yCJia6SJAX^ zj{Sg-B{S4Sqvv=$_8I1Kk8XP{T-rd#rc$D9uTRA`qdX*z>WH)PqLr~1G9+K<(8)f} ziXIS*Q%|#^m9^j&+E|VuXZ26I5N|o3Iq<;DUJVUh8U9P({EqiRk&DI(BW0S1g3~Z# zkW?~beI|p#6qu3kd*VQ(;SHm8qJ^Wtdtc6j&s0cE1lOuA&oLzZ;9JV!Y(uVsSytXY z0gi)h8X(h{H;rW!K`7osNJ$J~LW95(g79i^LA--Ihm)fkY|l2-8YT0U(iCyy_-AD& zb=Gv6zbl7=>fp|&)+&vmwGt-{TOI?KpEre9sssAu6K%`a_*t&-bc|JQx0P5CR^B5LW(Eq&$$?JSShkQ)o3b3Y+PLYq~?c?eHyFHm5jj#`;fT z;}oYiICOZ_!{Abl7k`!+3G})757O<^i1amh(Ba?LL0C!8v*hP&qr)#NW5hXhvym1b z<&aw^?3BmUiEBCoDN#K5^IMnRdnAyja5Wt_c@w9emRwanLn&Mh^0XuJ#d|wV+(N(V zJa%=AL2&@V&Xt2%I9|6m+O7F(?fA*#?cDj34UfJ(^m(7xW@%`D=4XBmXXb@AcXX2m-zD|YSV!EaX_ZEXVImBRUM+HD*nIzbR34Ut9$H)wqg=V)yqIJmfA z8z(WVOJVi!&=eJt=dOx%aKZs58$B98oEmk|L0<<6(haL_$f12?y+b5ir_)fd{$ z8#k!RQX3n?3ec$?#u3Jbwsjh|&CQ{tzB*B3hiyTw#3nu$CzpOgn>tXtHD5jJ2||)N7?Y&0rcC@ zC9A!FX&Rp^*JZc7CoiySTPIiEZqZ2+4+k85!xwt(uIC%KtEWO+7nmi$qRh| zasIAz?dc|`K)GO^8GT>Vt+May>9{np>0aol9hdS>ylyarM-`aB$XJ}Xb!?w=Mwkc%o zhyTy_xADo5_LX1!N7}^Xal}o%K*o?Md9K4#gR0snW!wP@x9{|g$ZBlmiMUGVY=Nns z_$)^?uF<2ALbyY1d~_O`PjF1P+1a@^13t=v_9)-_u{NX+|GRS4z1T@`F;p22+!2qy zMGv%}DXZ*7^fc|2`~d#0D>A_%a1PP8yHelk6Akjm!jUtP==f$wIpx9+-vZ4b8tKL- z$1>=C_1d-Ip+32SaCihKn*w*XkqO|Aj7)`>#oKN*J&r+kwINDpV~o!A8$bUIW#%dW z`RAT%|KuP4FX`kHQ@%50WReg5A3t_9lkyMVbFsbUjjwC(eA^eb*T434ZJ3Vc{HgE{;xx3oD061+CPXe4qDeE8e}uadehL>>sSRU#;{ zUt$s+|?|HaHnV|;(~Wq0`OUw83=M>@_}K}drCsZC6H@$-YT<(psl z)|Knva2WpK`@_c_ar^nY<0bg_p-*Etl)uNvOLl(J~2=-FF%)l{v zdK>-Fp4thEFN1zK(11$-41W%`iktT}ckry$d~*sgjDY#9f8tlXMO8mJK_#c zc`FEeA3rhTL_!YL(=@(mmI+6QmG(R~n)=ZWR2t&QYFFGhSB?z%$(zx%CHnQ+qD(%4 zbLtH~c_RTExed>>%?^H)4@9}ZQD$5;xxKSa=X8yB*-mSOQQl+Jn?e@w)cJKH?z6QFX__$#=eHoB!Whf!FpDHy>(X~xW-SB#u*G1 zjyL9s-cW8j-bF9dyK933{gGQ}A?Kv0K5!7soV>NHNIN7XSvGhzRI&hp4p{cyl_Q-E}f^OLFGIh^G7v-2X z-<*}t9NNFJNnuGaNl=3>!s0RYaI$xbcx_l6nY7}W{zE^Rqx?!!-+1)M8?PLhREyuL zOm0Fe`$6lp-4oGad6GBq@Ezo%_DUZ4ZC%UqP4pt@;d(t%eq2OR6WYR6UX^*zeVWe3 zb!3)8u^SOr>CV&Op}bZi?V@SQp8^jTY5+fd5i1GLw1+P8@Quj4Dc{l7dS!7Lg6lV# zT?AqOB6Nq&%91{Yy!Z1Yq4O|y-+qjC4f%9n-bF{FPU@=%Vg{e{$tVp6dgZ6~Yy-!U z0ViTb%g;W^KkcD9wY9kl3}}#v0qg`wSbXm*s)yRSJ^J>2KSx=!&z|pTN0yWqKWpH& z3Oi_6U0p)A){w_Nd`h?B9nD08RI{Fbf26on2wc&>=oA2OK##w5 zc3WuMK6zp~_0RUs0p>=(*omgGu}SQE$+({eG0rW-H#S#*x0AkCb}MV+`0P_{-K~W2 z4Tl>18N#Mema?FqAP<#2Wj^wOt*{TGVHW=o3aAOBtw}cTJ(sTJ?QZQ}{6Jt(kDPOa z3LFO0n72D?xBK#qvn=lu?|paHAN6`auYv27V;*E%BSRW@mnmP&w9waN+9#-ywu(H} zesEY82}+jKab*`O@W`#`k3b6tjlIt{;q=K~N89*Yzy87Y?30(<+WHD|^V3Rx z&v<=p5ZkI8E9=U>J4a3AtwQR8juZ)`fpruzxIjW}zW!!NjsBG`Ry!$;^0EkcTBboA z&aO}Twdlufe5a97Z0)Fa%RY0potWsfRZikNVY;Wjuj4y%b}RIa0k+XMnvv5r`e55W zs4X@3gl$02VYxmtsdDw)JyNEVj(?Gi+#0Txrv!iPxS> zZu0it?P^;}?({Xv@5;im*bLitWR+)z-5qxU9l#Ivqtmw* zH`?W!gZ4X*Uu(bg+n;V9ee(JC#Iv{B)0c0yXRZv|rRSG8IA30EHx?aFoQ3z=#Oc^O z`8z&d{dy-i9P?6^^(!+@2=2DM@;>5#1b}TDaTmO)DQup&)4$htVte=W!ITFDB>s`@ z88+52(&(vn!ay2zE&<(;PCh4JjrSAHQT-S1rJ44j+7ItDmWCeaKWQKRhI{Z7hT34d zx4;2Y>8GH3-+De|LBl$psZC#*PIe;+Du{ca+KgSMa+d(;k9$RmDB z-S_=P`wjqXE$IRhLMU zF?skATd2-~h2uWbbIDIGkj4q@ZG5QlN!l&SSYO|1PhNhZee%+C?W0fKXdij*h4%65 zH`)_7=G!N)-)tXy{#yIU<*V(?iFH?FlkjL(j`hz*=O?wjy4_`vwPgROSq{5@@E z`dC{*=Wf&Xt*@hh>$a8l33uA$)L5Gw2UqN-3;0}!QFNl_$^bC+d90|Gf{J%X1@$Cp z!8`pMX%u?)w!>W9C$DGILTCF1+G=?eo*<9VyZ1kRab|0GowV3~{1|!aGxarnA!!bc zF^TlkkT~*gA!ouK!Oxj-rwQ7k+1aD`$oL5O0gmCpg4j{#0mR##gY;8f_@e);or~WX z|5bY_AIaoFvG%X!tMcxSBl?-??~+fuvIlO;jcC?*y%knY%`Qsn*XTQpY_vX=` zxBXV^T<9geq`iHS$Z6_L!IW=*T6!qYju+ckw#^%?F4ATU+9rLS!P3pPx;)=D@Xv13 z7L41T;w##A=~wIP*`G)oEPj4#BevT*7j3O;`p?wUewW;|y|WtXoXvslqi_4)S{Ps- zrrQRF<>LK!w?FX@zaLomzH2wy^7YH@_VQKwNR%+?Db9cJi8<`SWakyfa-k8B6a5Px@VP6pTfC6V#u5E4`fDvsCGfZNAPT49((C{S9}-j ztNpK91fX9nI@%u>g;d{M+nX2FkdusIsXM|^zd77%m z8;Y)G?$B#Vt-V8j({*x8NkK@x_@7gMuZ+nj?xl4C_puM%VU%^}q)Di857MefW-9`| zBt!}YWi>baonTy-iX zGFWdz#rFdAlF;Q77wav_A|TdCqp>ecMYo=2W=THr^W4>=CK9=}k&a=Ox8^@6|3&G! zRuSrKT+g4vKi8hczp^B*8pJLFnS$FE1z)QXkc9kJ)%K;9jfYT5f z-bd8rw?9gXW66aLM!#c}X zueUH*X*{8mi*6h&%HR$^C+ssA0A4Y87((ABkO(6f!8i)Yy$wpXHNg;Ot3n$(cHGr2y7>+7<%Ny5xU}9@R_P!d8E?l=PPTLBPqjy0c6a-h zum76%m;U-+Y*WWIHybpv(80z@l4*2lB&B9nZgs|0GD=PB7H20$ykt83!=8AbRG!k{ zd%-zYzI)ckb$s+*UC#E2qd%4aDO)h@L1@2Suwo9@0e~ znUQQbzBLd~+f6EOzU`9lfzg2&3|8C9U?pdFVOU;r7X$8HJx@9ZLTzLx#>O+yI6ORP z6Eib7JvZC4m!G9GJ=|XN*kf%78QfS|j=aHPIwbO2`H?SDqh+8p3sNFig`d~TOejy3 zX^qPu36-*bp^xdf@jGn})=l7W`c2B(R^1Yj0|5~1bmuG$?Of_?bnS+Eon%QW-Kwn6 zk$au7z#~p^3Qi_gPDrHwbcquJQz_&u+q_MEfg`O`SMGhTt;!++XcQT8z!^tP2+K<= zZE0mCvYDL}NVjX9eM&y_7B1*j>Xkpzq60XIP7F9;|L9{6gL~ayyLPSp%kTe}qM6_p zha(&(kDq8K&YW!L&YW&9yZ5g4#@D~PJ^1nmvVi56Kls7+<3IkN+7G<{C)+2Uxz?`D zueHTZryQvg)mmNMY|DOoU*|@ItT*Llpc9^IpT&6-C&$5~P2|k&FlZ-t1(8m!jOt`{J9t=sHULj5~qij zHylYkF);%k!x=z!#pm4I@eJZ_Z^BnP`c76>+W`OL>;f_O(h(TSX9}%?os2P55(?8H zkBoM~2f^h~IP^Hz9_Gm*+&uY12Jy@PE}Q=g-wJo0J6!r((J#f zDK+n|slQoLXCL(WK3lls`Qh|{7@`AH^zPz6E4&`h{(K0z-#2vW_QM_b|99~3d|^?^ zw!i!&M`V(VTCinDdJ^Ac3OU6l4mklwe#oCQ|8kCenOAugrg^<2A#$B0`pCWiL`-}r z?(GbzBkHvi9qK$zLv*H>j_50Q>5n8h99%w#V-mtR68nq~v0dLn*0A`^Mc<5f2O`@n zoD}fE?yNf@KghOrPP$fGTZ7LT=(1ka#epg5p?+slkV;w?znwekgocy<^o-r!({vfA z>-?13rp?8d*wWZAum+}nQWjs0T8<=?8v2oTf@=V@Xl1?O0Cl%a+dzHIQ9s3y$+enX z5@|bfwOf+c*gEZwP`U00GqT-WAjubK3H;bD=;pwfG^zSWNPAuc`&*S0;? z4%wDeTUve|bOO)XcHd4a_3rY?yR=d8O1{NbDs(BoaFFh{Gf5-u`NhBBFu#eVuQ>KN zt*>oT&Q4pW{lB@e5M0imJ=@NmJ>918C8S@r0f&d#9l=2m@aZQAvd}``URw}*zv2MoI#5(cBWss8l@|j3ay!5vwvY_PC!@B!SS$BY-=oP|r zLHal`AsiQ`XOi85N#dJY-uJvY0#mb%ZpsFil;r?&@gr}LA?y0ulEE4a-@3*(f(NDh zMT_x?nw*s*PSp7}e+DRkQF&V?NA>v(#Usk@@K>*-VR$ai;9{J7o1b53H|KA)8QRhN zU-nQtd+~hmu>JC_NGFcm&A~QyY)oBq7mf*JLObEWmNHQ|qz|U7s9)%nbW%R6hCVMj za*aTlbVA0Gt`F2R_4U#-S=FD&W@x7FfQK}* z3~grzzc168*LTD5soM$OD{`~VMt%= zqRF%zb<#mczaK4Zx3aE9MAb)~obn?O$GvnD2s8+k6inbaF| za$kSNiE;EJ5T!Y=fv?Q;I+qD;?20}h5|nyro5MHYs~6^VY$h;Ze%XKNA}vfw4 zDssZTTg{83epfyTV}Zz;1M4n`^w?RaO`;9kbJBEsEk4}##vqd}d%Lu6=&OrKozU0h z?cy`i+olfCfx1rMxB3#q*2a3<9FU)WY!-%5PbZ?LCdb13#Z#zj^oE>YT?h! zXgi6&a{8`|ZSvUBcH)5t@MqCG@Yn;^ZQE63Td9{U;_i5uHa&ICz5K3)p|@eUi)5{@ z@gC|}NDhsqchd25*Dvok*72jT6HbKbzirVkM-AH={qfD!^|pBJdRtraE&i2ueQBdD ztnIdsefoO)^^d>M{_7`hwvS$0Z!c^PwPjGfwcgt0rQLRkxF@d;IM&-!9M={$+g0-4 zT3Ky_E#M=k6rRO>(s7*rg`ey+Y|`F4CS+ecV}Z~^d2@kv?4yeStv5Q+aUY(KLmqhR z19_}*=!3GXtdQP1>SG-&0Dv|KjOHU_00F>>?3v8(lchug>EbI&VB0X$5#??LPNHW5 zl0H&%eS?#4*t!wNl4u`z$bH#$#wgzDg{a4=e~bSY0@@zlLby19s9QC2-ZXRt|J&zFq2lZ!*`t1SBxBgw^zU5+| z+aqnwaZLE`Ck~!pSZu%h_@(xVXRf!W=Qr9sWY(-ONUySbnNc&u^cZ*BXyx+#-oJQnFEs- zOzaf^(&<;8pIE4%(B?mgq(H<4`D`9`K%UW%w_C!#m&0{l$H_ z{X+<$Vg){xsNxIdS(n!5+;nMmlDy@P3l+)9Wp1P7R=Dy~ zrrC(D21Hz>l4*U;WtYw)*T9oOn1-Y;;wk{-1(@3u>x`5P*pOGbhY4(`J0EZa*}jgJ zWgY8r1&D8ks#-lG1Shk4v+p38fYV+ysC!s+2wWp9lcT4#A#{}nK1(P8Pq5yW<Zd;vS-zaq_70c7-Rcab6o&67GGy z$!5IQ<`=iZCFR5*z?)$QO6odqZFxpUYD=2RYbV$IH7V@@*pw%ny3Q&8dz`_yl29Ib z3f+q6Lgnr1A?`^}TH`F=2rjd7)l2Y}CgFwIK!j3agd9xU#i>|X@*DXpZEs|!ec>0r zyFL5dQ|;0d&$Q9;-FB4D_YsVcgIZ2*Oik3=B4adgPyv?N;jD(&GHi&qaULyP$HRut z0RRU#>`aJ@nxIc-(A2kw2a{e!>% zPXsF1g4e-M$G`$PayCJSa~*_U;PPxVw#S7HS&Mt!C%|J2DD&kqjm6~0s4gYhkicN zVH*Pv2Pb_aWoCxXIcHnZIEf?+2~(z1*5uR{sAhryX5O0cNzy@&=wNq4ke234<81Q? z?jDvOI!4zIf;2XJ?3Wj?zPGh$&^vf>hTxSGT-LolmmrU5$8>fnzzO?xI%~caG+5tg zZlN`5rxeigH=v=HIv%+Za%9$cYz0o`V~;(G14tP>eCml$wjcYkAF-0bi6rFPYe!Ga zwlil=wudjCZ~x67|8Lsd?II(h9hiogGL@YAm zS(%GIxI>R`%MbZx-z;$sWYPA!Vi-B}7k{GTy*48g0$khq$s26}tOd4HvmJjaWm^*0 zPEOm=!Gfi2@mv3H=P3|5EX=?aS7lasiPtqR92}UUf#I2NYi=eZD~M05~~`R{lTuVk68eOb0iuPT!_lv7Os?_Q<1y1kD^cmO=ViH3;DZ(`4*` zlTA1=c*nujo=}`AA#Wtx36>Lr_Z(yH3UClfhtjs2BX4kIQr86xg^vSs z+6p_T(j~O8UzNpGpu6jY5O7GRKT`uR)>=Q@HbwlBlwfreOzn6b&J_J%jSt{pvnF8AW+Lbi#?Nix;e-ZhvS z=|WBcE<@tua#E%^G#mTh6b3Evx8QU{$1DqgV_(@COWUk5FL?b`) zk38UONg1F*hJ@*0w1vpC_=E7#!QZ@XhFmzwXn*_&a--RZC;&jGyr<&V*hO#=g&;!vly@H-{3gLlfjvXbZOsWyuGO7fy; z`N|HgDVJHuU}qwdiMF&8a?tOjMFkLqYYu-XrPN(alR1p!_cO|`LgU44w$2A+%F;UJ9< zAE{4Hb)PcAN9*LIWetQGgqs05h?}xGtkXUa4*PNG&qB~#gvGJ;B-n7(4Q@(Zg?4Y`n{08Hsmxp0$YMSdI`pxV4_UrWA zT$`MpO55o|dbb;ntk`}~x4rPz-Kyw!J5h&S7^h!g|H*P)B%vL!+#1Ug2kH!e^z)n; zmPSrUxnNv6Oihnx(e(y?zuy;kGTH?|+A=3UWRhF9N9Liy71?~UUgLtT8p9^}nK}E*Rs*rfTmOE~ zUt42)w&z>GD2wAq@n6IeeRBNBHq*r(`U=XDHZGHr`Wdvd^2?n8vbDY}vRvA3x7l|W z0^82S+4jltI<^;bubvFrdq+muD2 z?p}mU`c~%J>QdWAk9IZ(ZFi$4t$k@_C<`ter&?{B0~eEShBi+APmPVV>8Y`HY-XyR zIdz=qp$s(Hh$);%EBXTn&Tblm^=&(ZJC16B~Q*}@(7&{B@mxBs81gtb&#%-c<%-OfRp}_ zm!!`kfs8S@NE+Jbz?V`lvCYHd^v&(hxF~=&Z)D6}!dxt?Ps4NTN|{xxI9QD3)nVP^ zUmDLp+W;3pWPvigckf*8oF2nOzKOyXGRssqCz#4yKs+Mos|fCn;2UQOt*3yQ8jG zoxM{gYq6;I!g%=|e-s!8WB!D&L_YO#0+3e1;@^J!LFnmQ1{Rv;S{#%spBYcvxn)1c zcDDQ({Vc~7^|@@HeN%lC8dK3Mn+OM1_{$?y?kePz;SDRr#=3{QhVysHEhCW7MDKsvip&jp?2%$a=W>>8rr+N z)-JZnod_Hcat91|aR!;>5M$mU!-pGj$<7;@l#Ry@0-vK^J>f;v56_AAahQFGY^yC(B{e%dd<#YG(nY`~^SpLjLO; z@P1xtjJ9yJt>If|2NXru311gL0M{-5T_8FxcX-V{Z9R(?9Mx5e2j|cUdwtLtqwg`dXJKUGJ-lx z73y3AW?myuoXlIICm|GSB0RbVWgjLe2+M03Uod!1G70Ie4U0h|q!5_S3sXvb>tU2Z zCOQxkUpD!Q;1MbX#G8t(vb?qYoa+$lJWBx}l6e0SB!YNYN&oW92R-fuzprN)?kQ=7 zTjxH+7+TL57~+B7)A_T7JO*fW*v)VASdl7p;=1f56o#tr;qpCaWs^@WG!MJ_?h9~* z$|sL|LJ6-7dIjUU_}r6UT9dKLEu&Uxj5a?9Y3wxk_L_D#V$$W&C>s|e#W`hzXHX0< zVFVnZ>SkQ-ypc!52jhK0l*}E34nSv^qLe;I@%u80gM@wfmZiekX`u1q=RlrYGdXED zLI-JTl5&Q(+qZtxd)vF;{?@j&ajRWld@fs+jgQet(^<#Nv|;MANykV-Xanzn@a9Hs z>n6N0oDt=NrZMF>IO%|LzV8NI+=42TjT%lmPdbV9eYG_@gzLbh8^LosXgXk$aBHyE zHr58vTAg!}e}kslhIa_OM>FWWy6nngIxWb-v7@tX?&w6jaMy`;-(4r${ha^QpZKcw z-GA-Rx4U2YvNp6b56&yV-^}2!+c-5GnBa}U6?e&|ZiBnUH|=ewq+WGQ1{>?DV*B#DyZh6}`i~ld|Hi z17?lLRCI__r$lN4O1A=s&Wn@H(%w#sTdGgZOlQ(tB1jkMC+rlD?b42^e??pA$G_fw zk|%oTgYINpodsYz;Z}MvQZF+YZ@&HD^STe~0I+`~R=Fv8N@opyOnUIjcTNNENCx=k z>BKKB;ppM0?&2)ifpB{{=|t|5XLmFJa1GwngqL`#w<7AQZAqTy~o>6{nStR zIAzW_5HvN>PMkW~E}S^lzT~UE41HW`KmBvR*#5=8dVjmTw606iCO~$S%AmH%tJAE6 zSqFUPjOADjzpc8nQEysl6CEr!-fyDGE8i?}YiVU#XL`(UV?l#Ayy1=Ta3T{|*>V`b z(g03%^(mfZu$N!yu>JwG)_y{BASR9cmJYRA|43V0SZ=@dTfalOgZ9ctUPb(922G`1 z+BM|MywXaVSjGOZvgJHgU32(kUxs{!T&kZkA!pNJ1F465b?p%6;93R`Ajay-TDyMB zMNm`i*zq~CVG$|b1w6h1V&}>HnK%tN>(&KjC~^Eq19BXe+dVe~UxM+UF#zDuY5)A$ zf(05r1H6N*KD!g(9f0?5sjOiiUfHFy;gDkWuTPWr?H6RSiy-{n3#e ziQMRMIPi_G6RnR#A;2M(Z(kK<WAdwQ7(0`ozQlwM%$Zi30vEyOH&t# zmS0r)0GFT*GU&UP|4ubRJPa_dDL`5x6XMYKM@d;c0FG}MfNkJr5i>Ot|Looc zYzK^`m-V))(IenGK`8pdQI-supfF*FL;2JW9&g5{zjp0@S|i&K57Zb3j3^@<;zzGyLx!iRT%ZHexf^itBW;w<<4c&wfZcX_;NH3 zW`s7DOPOikrHtk9FQ3A&-vixkr~7dMVd`71tqwAw7Tu+6C(9W8 zbID`qpL_9?<|Xf97#@lf|BS16p4@R=q0W6;Lu?I~eVig@9ZODxQTcmbCLgruZH+!; zCjRZu@>6E5OLlpo9r0Sb8@b9eucd1(klBaBIW|iB2RwPFzv^x+S(pKB(S~~Co&3_- z0X-LP5=~ji33STaI+j6wQWi9fA5PGAXF&0++JWn!$p3+;6#cGW2- zQBf9mI#XuH@t38W{W_Cm1pq4jc#6Lqp_M+im+`-_xzt^9erbPCGen6Nw&- zf>ZQTUd!|FTe*Os`~Q7RKBEcc|CAp0q4IbR5U)7;>%*bT6c~p7Y&%Zq$~aVVckV-< zpqF3pHOV3T3_U&xUZ&qjS}#qnKIhJRBGCV|Q^?y>Kyi zslN-|^zqb)PzSo8ImzQ7x^VRSd{Jc67fhVYcand7khaM2ryXBL*;s9>_!O&43wbkj z3qL~}DGp<)6LnDdxp&-ZNIG#nIWgKMd}9+EKRG_ujvk$9bJOE(nnT&v_n4kRwoI=}PCJSt{Y_IHJQV-Jj7O@l6_OaIo8`OETE#J7+uH9H{FU+sEkA7;U z{g;njZy&m{)ov*-ATS1%os%D>oN@4z{j!-JgC8c~IemA|l%Aj7&IUYN8tk^`uF>}2 zT5d1gT#2tdfkMN2WDp#{ZFX)BDEg;PaL`7=vub0oMfl+(QxlO@3)S{G@lLg=3MEMw zj#M~)m4qX}GyTqB5@jO|K5a>Ng{Y7>lqLOo9j$}^K1X)7Lj7Bn;(@geAm-{?eX6?;X;)7{M{8EG|IyEK5J8=cn-dehiTFT>nE{LTBPVj8BrprGopx zI@E>F3cubSNr(MQc~n65L#3A;;({KaE2i0o=*G$0E$Xy}zOK`6a#DMBcep)+Zv6bO zJ<%4|c9CmjRbZ6U(_Z^WwPLz4q0j<4TORkyP<$iq)sJ!TR_+`J+e9X=4z}Bs7v|fO zm#$zFciY8#&bLw8(RFlX4ZllXird1Se9+6^a{VrZl`YtK|%+P zV6m?wz1)jgm(EIgm+MNG@x{=q=r(f1onTzB=)wv8$HLJzq=-x1)K@MjA^wFs8u`N8 z7=DuT1}+S9A)1R-Oz8r_<(1{gPZo(Gd)k8X+m&(QXoZj?^eJQc#H-`Vl`?O8Si3Tb zM#)d)iZq_>7iu@M^Og1P>5*G?TX`bh9|>#1yJ(n&IjB>7Hn^X0$4%rGJ+*ldzQO!k z&V#t?6YX63W5~Y~|7&YY@ZVim=*KSJY%2@%@x!FATfN)fWE=u`>V$o-!y)Y`bO&IS zr|xm5i8=pSfzV*-jrFQoh{n6IeuC&cX+P{bB>VMSa?)?O@5f( z#!BOD#fUe(=LlcHjM43kZw4R9?-o&#z?3GnGS$EYIntIPkO1Nv=PXam@0G+7kE&pZ zM|8F%RX|Kid=!+jv)!i5626AS>l_%P4suU}KJ)}rvBf`mQ!jAy>=wS1RR|I-C-(6y zN5Xq*UC;TK`hs)mf`+B=FB%sPg@zdp9nw4iMXJM7`s8_HE2Tg@2+}Y}*diuahRzxH z4lHtSLlT_enKUeZ%4gH_g!`mVuK9= zSI!8qosKR~Rs?az=I5DlI$0UiO?`>0%*65EsNiiJS(T;!eqVk`82X7y)uo5k-ya+u zpy1)X*BpMoJwQ34facAE#0f?tn{7gf)9|PRzEy4e;hV0*7^IoGv3BO+m$f&&`yFj` zFyDUVzx)ygWwY&UuF}|!wkbOa7^sbPSFFh!S0rr!XV6X^JJ!Z^9#wivp(q<18$}xm z8&AI{*ez(oe%(MsYf1u5tzv%Az?Zsw?S?HcI** zvYEvWugl)~$m}U6g@}hZ@XXbZwppDFW>RX%ZB)NuPmnh<>XB_s~~0>EJWXF5!C z_`Xk1u%ML5;Uhzm_|JSVK5EAr>#2MidRXGp!RU_{g+79Fdfgaf->kz{npOOq z_`kii&KY0Fg#dJ{twDCEvHrGKR)}jS)ND^|AI8SM(0Uw<5rVXam#hG?GT~WTZTc29G zjUHygSv;g01h>DDNif^Q(9D)Z%Z^Rr+DR}M0a(Sp-78yL{(GIv-UUEc2U4U1sEu^O|F}EJ#ELnqo>Lo7PE9(p$NM4gKW%v7f z7>W+?!!z;J*jNthFHNW;GGDY2a6L&r+ZFqw{K6C3!wu>jf0~~@vhX~zAlba4!C`fh zhx%p^4jiPj`jWRXEzX?aU4sMaeaAoRz^`xz%g(Rt{RP`G<-=SSD7EyvOq;r!n{ta^ z#D!l)Q{^DCf^0h3bM5Ml4En$NwXYL0Fib}&WKF$NE^+)hk?!`o+AH}do*ATu4*E0I zzJis!Ao%|_goyk}E6)6JR5>P<#@P+Q`k0?QnbfvXC-m|w`WbY@?wL{cO4e;B$H2vj zNbQvi25ramW7L~Hbc_lFXSdjN2LKme`#A;u?)U-fhPE*@BD=_NDSpvi>}bC4ns{-e z9AIbAob=LMpF#YkSK2!3s4!SW2e%3&)9ZCqbPoF;4qgZ?(Clssr`3M_bw^%7<3Ixvih= zclNc<`CJTj6TXAC+V9Cp-@Mj;_FKUASJdC)qxE!BR=IK@+A0$-!u8H8wg$ zovBOG(FW7EmK>A#`%_2f@Vh6{54OC#FaZ29KU;x6;2U$kkt;=fGtx24 z;GPn!afz$ zD)iSLrm5?aC*{#RTzT7tUgr1VP>){z>b~T>FW()E(!V3_$dGRw>lcs?8FHdh0y%NI zGcnO7xF4BwCxF+qkA3*V?Wrf8Za3#w+sQLCZ5$b$K%UIvq*8Wxpf6T$j3fK%s^c^8 zPhG0M70+zLz(d<9&6R0&N}HWGQh^o9fDfhDGvi>ryHo7$4k!yBDDBloH}Q?nNG`u; z?)LjJ(zIa?tfL7)^A?}QdLswJ*>RIHlr!5peH(nMtqr&DuYq@WCv9%;vBCfoVb zXWEJ5$J&WgC+IIvW_N^4LPZm+5HaC2=CdZc}T-||y@;x_{`?UZ&ZdgYj;Z6bV!AKG-=br-@N@nr^)#Rl{f z4lamr5#HYItu{eFIg^Vkt8H}wTfaV_{;Tck^_%U9=dQQ^@axaEUw&$}J+-;l=<)g* z5EKUkXuYL=8Mu}sasre&=SpVe+8-R)-zi`f{eEF(yFGV(vpxOnVjFDCx4X}uZRgLO zMaEo&G7`OCSzboAuzC0jz9ix|zmMRD59{AY2b6E*>Bwa2t4`O;fXW-!`tOGoi)(F0 z#gW%G5xS-SEq(nDUDYjN@9Uuz^m*bHGWY>sBKO$6y*>RB$B}#!8(PaVcP1d;o^3OA z3Qmwj-zQL`C*sHXh}0{bctC#Rxdbmb2_L!Vu7>?MbVA1sU%Bg&*?5DAU#R zjMAdO$ly9)h%+63xN4DAy#u**T)Ti;6uA0E3{O1mb0`grRuNWyk)?Txe@e9*_A zz1n{HmKcL%9a=Q4de1P@UL7TsEtKC|-*#>K?%}>Vqa#j|Y@pI<`=Lh^G z<`8mh%i`h(@k_l0N?K*ig$edmtz+u0PEdwE`{;-h+$2R7tZ3=EGJ=jH=k7XUU6t`H zh~s%)jHABF$=3Ed=YF(PNXU~u44%p~D;5^530w zoJ{uY3zz9j0XUQW#UKjN-?0C!emg#joRMcsej`s=*h6yZo0k!w#U^d2zI}G>p}#m- z*=!3d3;24c+82MtSF|^O`(Fko_IYik?JO;}q4deOX_96$7LlFi6h6v#w}Hq3WtWr~ zFX_aOpb{t_-gkC7*XA(;^eMUHP;Stx+$OXCg@I}mnJggO zppki$M4>?@`8nI)s$JKV18r|th77Y@aUz{^lX2bTBadxpU_t|0|rR9k3 z5N%G$Xz0_2_FV77Fp&!Y{=2XLqAUO~veMSMH&%(^Tmbs#l_m$OZ@~)UKH>^Pa&~Dd zW2J#oL5_48oXMqv;gt<$!RX7ZNgxSUd4(eLlyl`sW|vW^e5T0&jMv~{rzngjK1>VK z$xEJ|DJ2L6JB;iAmOyF0a0>EWD8qDcZz${)W^Z_&jKulZo$Z8Is5%*P_A=-G`fyay z*I?c1zDhfIU_#Cvo)(?!F7$&ksYjA@7?#eum6^B#Ln)u)922$!S))BEfR`R+S;Jrbhup6 z<45P(V6fa~CPvdKk_S2nD=SOwI*sP$`er+Ne1ZHQ_Yx^Jn#$Rb~|B64-sA1b$yxwlU@QmbwS>x38OdFdymTmu0)G~k)23*v`OfYck zeRSJx*@!58)CSabc*1=R2CMhhk6YsNEDWoVr2Ok`LKv5L9?+osR$kt~BTjqZXAX4o z{?+?_BGPg8%&B(f%z5~7Z~I68^#7H-34u#PB(8y}j0-c5_Cw#vDZ^Kng(K%U^~eSV z#n}&`e4rDd~t{@3=m|K9)He(QHX)UMKTcA3B& zowO0MPvJC-2`R23riWg_QH+!(?9uB?RS3bH`_fI&a|V) zPPC7F_@iz1=xlrAo8L}{bUrfZ)>1lmWXhZSHjs80+%_(&#Bn;ct+CG%tgBn}uM44l zjCOUN9hCD8?+N6~Eof(FXR|U@J#~Ou0u;}LrHpj2*EWX+1E{G{>TDDR;SzYO9h4f5?X;Od(=5mp^V96ac#^PTMxtsA(mZLDEy*V@e7 zbeo)=z*cET9C)pP07E*uIEd7oJ7|{jdDi;{%2zHcy>U3&e2!{P9)WxOO` z7n1=HL;w8F^PQo`uRoWKI!J!U^Yjk9?GmIjRUsnK$wATopiHBy6T+e0_zdVIs?W3k zyeoJAd}rwBz8^kYx;w*XOZOQN`;4K|*^c(GO0(2MeFJ4g+>jk7{^(#2WztPOtM<*h z`ct$bx5|=(ub~k#EUdubKC}~V`M$M@_3jXspc5pN+?A6H>Z9Tva+^nAiwzh+LQv%!R zn=9_RF+6Ukxh6t)u&aK*&$`^k#`xygwr^|MQ3JkkY+oE45T69@>-*p6CD%>}Ie2Ir zQyuj3-<^obck5}Fob>M}&a%ji${yG<4lg8A_e;A6oN!QE(ZTfENfm|JPNKd=Z7Wy( z4hQv|jFBK%8|v=_4dv5DgEcr=ZzrRCTNPX>)3Tjpa!^k=)~^_AiG^HvmiuZq#XY3S z*=LnOT9(ft3J3AZkpIo(U?;iKre$FwcrW6MxSi?soAd3XAA7vbFYL5+aDyrBzI*R# zcU`>Dj-5OaJl(R~w%+Zk1D|7LY_c`lHJAj7y`&X>(R;7=?SlV}?CWrd7l{|9gt4yD zpG*51)IIq)0%AYZU1(uF)q~)`kNj0E6o4lVin&ek+Uk0~!P+*Q5OG4-#RsM_rSjO} z#5p`6)_UhH;SN{jO!=061}A9DvnfpUsb45(I}zL>L)??S+Dv%h?h!=y{*%EsL6hEU z*{*kKL9O|1Z*?YhCSE_+31zpTymIA*HZe8X9)86mS&Uq}f=EXPG)n&qA8@rjq|8kI zE9Jm9P4%^~M3#>nnx4{0hcr_^{U7rgq%TO2P7wYpJ`|4lkq(ljjjj4cZ=?6Xvd=`E zdbqn)lk%&}gVgc((WBTE;(3-yX7JG-DYsd;0Kmx^Cr?LG&(%R~!Rec!4${d>`5r%0 z=5sFk7mA^UIOuO!SMX2#92m5L@x)A#^a- z!7A;e2rMlwW~Yt+^b7yF-CW#m58QLM%}h^rvf@~Ya-{9mmKNQ#by+k7tGpM-#8X#~ zJ}&m!d@1Z872nbU2yNDdQI5)859={Sa!*O{6p~B zz?RsKt0(KUWp2Z-Ois_tBA>wRr8k@R$rP0&u*+mlM1% zo`|RQ>#sZA@R`5Fb)mbi=N>(S569PC&iz5v`)6EnmfqeR_>;L$yrDn$AsKVt$H()5 zDF42p^xbn0rw^q=`DqWKWp=&s=KwW!O_^|--iZKt*P!9MzVfTurKdmLu3veEx{b6A zY?E)y+deo*?&416#04kT$)9ECj>Ig z55;MB7rFCIeERr)f8X}ZiC^EY_q)jUZ?@^f?rqsd;B(-I?BH{4k;m&HY@Jpg#M-uR z8ar7*UJ_AvCzLkY?aht0<#zY0D{Xb*W?P)U)o$Ion$MdI)9&a3p+;?v_Gs3zfSJj5 zbY==$HZGP;S(uC6b)gRz~Yvu&(ww)v%%_T07Q z_PdYYY`^fK8|~6UYx7Vy-?qnRotQ8mc)`Yu4N-P+y&&h<7td;!49ccsczO(u4Y;1d z?3kPO?DL!L>djkidSbLqjt^r~s(qcr@7B&wOt?Ga6ga?T4)xWA4KDCi-Z3ha1D(_( z(rfdApgtI~7@HRr2XE!XgyM+Ji~p4y@2RKCXx={L+A@=Yx^X6*Fw_n2?ZIclkn%G5 z!cTi|KRk;RDJSg;?SniDKImh>^ScdS4|KXH=Hd@0#mNwTihbu$m#n|OnX*!Ss*(>U z=+r4~py~4>2<5tfB7Tc$DQ9AQ3foS*Ly^1gib1?KL3=DuwZE|=mK&b{IwI%UEfcvJ z9(AI*n=Gfiwe?lRbt8*5+`&Uzps!QAQPN&*=>LTv!HbL3vF260kUr2;zeV~<8*t0> zq$G4S5KfK9b>Z-JLfs$s4~n)PLhG-APu`4~@+Xh1lP{Inq7lQ{Z~kx5_U*Qn^{uwP zbEN(D$De8+efm~J$O+m}+9z$C?OH^@axrSVf+L8A`Eur>d=vdfbb@%Z^4G@o zk;sg6bA7ultn9WYp1RynkM^opysT{k&>`t#$LQaHpSy`c-jSr80BHwnl1W1?udJ99y>)lq0i=mwCG04RbSjq z*q!(`)l~4@#!l#C>38aryEvfePKNj_E_5_$cKA&-p-tq@{Io+6*U~r3keS&vN*Tsp z*J4Z3Nk`!J^vPS?=%a0=bQug*(1W$;)FgfDq1~-^W3wNaRo?};ky67x!;vV}AA?pPc(rT=& z6KC7Je7=u7rnjA{(9U4UyNTS9laoVeXe* zg?DmfFDS|*lY^$a=U!{0o-~) z4Z`$SJ4<8$~HbONv9E1597_qq!q zknlYBhxa*kK?kdofhsm}wH>BTtP#TXD8p}+w29U&9Wm0Rflp`0d}ZKFThD~VGb2UZ z*Peq*51W(sfkkY0pX;RS>BW(WqfQRGxJoZ<>!NOWCW^A8Z>6(&6Mq^)9c+xf-!BVo zT(!2l(FQ9E?eqWG*R(gh`(5qLk32-DWUc+)?|ro0y0O%b&gsPC5RSnm8o>dboh=%D z>pTUVrG*7LZiAu+vTx_bH`vFZ!C)4mdLcH8BMxvuZ~ZG^2>d2;UGmEUMEnz zv;J-kx4OFA78h39(#^&8lkfkD_7}eMJKN(=eu@;rt9KxL??b1-Hwm0L9?yh^PTyDt ztL$Lda0PRK+wok(LE3;1Pdxox`>CJ!;dHLIksDXLzVnN}qz&)jG)Zq7(scH4K#q)# z<&C(k=qGOm%sg;)d`9zV?f=NL+j~)t`kPSl`{djah&F%HchAB?aai#98{^B zoLm?0L&q-c)0DalxglP3u;;OPb>MimOZlXwT_^ScX8w}DY=USOcx8o)&Q=)ui4mHO zJgc_M^u6u_KRQnR;H4d4>0l%8AkOsUM7wtVMppgnR1fGd9MQ|Pj@nP)Wl>y^uR(Pk zTPIv}##dHW+G`$rb(@$SYnMLtsrKVP_M^dHG^eL0+ZVp&t?k^sr`tdMzVFv9PC$I} z_J|#Wojq+Nb)-zi&u{vJTCR0&?IcS$Yb6Go{9A{w8yj00pm9~d1E6jZ?h4i!99O@A zwzRUA6~V3!UR_;m-}WcIr7f)!NBVMh%UKet9e~u6a;lxCkgolaw*8_0Nd^Z&-4#OI zX6KH!5C86m+T$PlXghY|YFEzx&4i*tyV;^*sVfT^S4(!lrM-QApTw1&Z*LK^96DQl`xRYEC#_3Zy zK%gAxm;zgjWSW1p#WFC10{(pHIqi{HeWqZ&0_Bc_^a|&lL0J8f(4PyAtE+Q8r2R}3 zM_=~g^PNW@`fIJvp)yjLL-+kzAIGAjhaLp!Ep4Pr7xsBpM5&O=x|hG98NZTR^M_&)G4TtOW`-Lu@22ncoM^)+%5vn}A82*!otmVYV3c(Mr%brNnVdSg1f*2d7 z-Hr@F!(rlvT(wOucMNDF*dArmMmvfiXeW`pKr&ImykQZA%SsI+E^uDV$trQ0*4{la?vF-qHvRAyANmioVTBWN|Qdm9wyNhL)WiDNp7t z-7p{*&?M*(mwJJ$q!G7ta!qdqOw^x~&i2fKtCgg#RIJN&TtN5Q*rrz**l z$spqQkCsx?n8?Re_He(Zk#dhW`LcatH{vu8?2oF{&I3XzWPx}&_}#kz&&T$6#iG< zf(Q2u;Odi;N1sVr)gWtEZ;q6Qf$0Y+rIBR~;U88TVG{8?2;!PFUA$*6>)uKf|4ZIT zm--eB%qIb|C@T4kgD&a^&y;oFVw5+b6aQj=03{DcsF(cGCfio^q3j(tD7M`GgEC3` zIJ5x>bY%y*0lo{3l)c{9D!PCN_cbOU4&a9zW^#UfCa~oD7O)-QR=;<*Hpo7NtZY*5 z?R-K)xDF<|kYop$bFr(RFVi+S=xln+bF#)V5|15mf+sutSdm?LL0c)VPSPsFR&xNq zl~3z9H8Yj(!rK0iPfTNPYrFUGH)W0|-xj*6PCziqtA%)$vrYSJT5;4j7{!;?wpmZd zQtW?er?gMw6TU@T3yJ3D+=Xp2Z*H$Fukvgd|9d)v&bI6huG+82=K!!haFuK8Rdz}y z*4gLU1h4N5#(5~4RQyOSkgIU$ESx^6$9^c8GEF5>|5T`YR)=!aos%{XyU^$F!r@1U z!i&-v^s$Un|C7mUA4nPHP&udV)=p|mc~%P_wMC_Wx8eI9JaFsfBiP>8zvh+g$;Ut0 zo_+dKTZZ?(eXZV~J9{z{-^#5P%JCPd4sWp)$hvPNy6_}@T;-g?VvE30U5gz=sDdu> z=F=9CnzSW%Qb_x=w(hQNzP$|2$oAIiMtm##eoo%I5MX%7?~zkQ+XfeQs`upCbuk>z zn{PjI?zi>dUR!RPD@$!{u-w)b7u#TYFiA zI~fO=otWYs#xUTN_X{ zbZVEfz%Oh@L58r&;eY7rq1}Ss&@%lzbS)vY*0<1qB0aL8PP+?1cA0am{pW0cn>Xv~$n`SkoAX=k6Hi`l)04yPuCpiF(V1!JuYJAUrluw`wkV7m zZ-*CCvu$*0CbnqLH)65J`c*ozHR(ZWXroAh0sqQZ?MyPy{I70m11d<%lI5h>57pkH zQ%S4TmQGtvJO%8YZAxU@w3Z1G;%A}{d4oUgh|i#z1S4n0Yclo-Ze6}=^LT0=bJ$*Z z?@8y&s!$i(o1H!T9{6~^y{>IQzm-peaE9^YZSx{B{_EpyyKRfL75bq^^b@h$$V0VT z^jEf=r@(H2cNRe_hrXz^O}&uIBkr_@j!T!#&6V;WfbU|$?4BxRz_mVKQ$9r&ibI#w zo`3c$piNUWLii+a%~%T`>)Pg|jGIA$hZP!cOccauXnmv0TSaOo8^bX_yj1nvyyHReMUJglgvu`|{a6 z(w`X`n&L{kBJah2E57tPK7$KG7MBLt#7%7RM#i?&7Xd)}rj(PH&j6}EIc7YGzFUs{ z4CPOJA}iRpt+mZ8gkGATZ#S;qjPK!&G)}bpe1}8dS$e1nh+GKnFkSDD#{bj~DtkQJ z*yNmR^-JnluSzQk#Mknrmv&Hu(w{-M^g+~b>1}_@7jVQQz6~%&L1L8W&aH@xV+iv% zuD8KpDSacf2!H2D{2g1z5z3E_aHPM(*%vA8V}Vu5?)?Dg?Cb;Y?ya{m{43k%(3Q5s zzKi`NckD5*i!BDUi({jcZISk6WNfCr@y%~&U;j7150F!BXW@mm`oi@#w7pKf@v+7y zprN0XpuNYiySPZYxWFXi@CND|YN^|YLoYYRt0a9$Iu7k<_)0y%S*gn}g5@)>Ng6Pf zC-{e+@CRq?$ib?QQ|frh#;QQ_AvXA&RZ!tvL<++bR+vl9{U%$LOgmV*rJ?cs~0UGtEi@Hz%S z+zVfijF%9vT~0q3dzLw5#}Uxul8K_D`(b1!d~Mx+#fo-NEObm%=#$HwfgJXQW?ksp z#H8h1<@nsYk~41wnRd%^gmI@_5Y+2Kpbb}s|NI|&`+EU#v5>K1p(^3U0ICrsAS4d9 zmu-+G6)yrJ*W0`x*v0j*`yeT7fXynoG~fo41a^w{hHEO6ymBrN3XX#);u--W^ksNx z3>`elwZ?(#I$&bb$m4_OIZ_U35SR>dn8+X@9SF3TgW@?CGtk<)SD|>eub`|<63ZIO3637}JTw$|9g$0~Et9JRk;vo1tylDV*5SJ!s6*g7AW^d5 z^B%|0onQYpu2pfn5w`(z<>AiOfW~{7PSiqMefp`keEFI7^s}F8_day7ed(8fNhWEp zJ^y@r*B^Ovd)1>a!y$Qj`|>Y&M|<>D54DFMzK_P9&KHN@y*Yd4WIKLxw(SgUwo`Lc zZFO*?tu3##`|m#ACMHMQfHE##IMFVgJb@Fs)$X}?uAM!3ti9sZkF+m(_nX^mU-fX? z-dMmf^UdC^cIMovcH$_VvbiJe`0>$p|NW=iSAN+W+h6%Bf2O_bE8p2BMliN3H{11R zKHXO7^tmc)l*V^tc#@9HM4OvA*M=sKA}`Pk$4rAP{~Vai_ER#)#tuDIUr7LrUdGi7 zb++z`59(=6u*G?Wn)0TMc2Yt2$^hk)E^IU}uykN_s)g+!jc@vP^I=#(Y771YKMP^2-` z5#qidSg;eARsDGB?xv#b+Bo|5Od3Y&lDAGmn+ozHD}G}q)PZsYeT!}O*m!Wy;FQWu zkRD}5qxd6qEunx7`QB90b<3JDEvi1sCJ33#` z;vIkFi`xW_t=pn13wGojsFgNF;~L=3WV-a?@V>6Kp~ZK3E56dkt)`We8#m|MfBDb< zq0JsW*6zuyp~kt zg+ku@=Ctjt9mGgB$+OCzZsILE6$^_??aX;U2cQWnJWMH`ViZ~9TE1DnxTpSt^*$Gt zR4HB>e0KQUh2A{=f#JpZBgeVz769Dw{NViALXW33Fm(6(+4|?*S>5QLIT(<-_PQyL z@&vNTo4W2p>82MxtLy!^{qWh!|7@X8_y?uyLl6JX^d)b9a1eL%_USoO7ySVBuRaH$ zL(etwd83vX2V&HPeckQ-jrb(etrGy`*swoA#jU!QjtzFaCeZ?sc&SbTK-46(ffzck z;uxFT!Oa_FdCN*anf5yNL|lP~v524SC<=kc1mV!x+O0R=vUQ#LEQU+<*#WP3KHR%3 zEp+fLg^B45kPmsNdl19UoNu2O}L2pz*0XQ(xij>$!E*rc#`rm{HF~ci6>c zv=!RrOd>%K+sM(eY0`jP>LAUri^AbgUPw>vQQradO`>XlD8@lzc_7U>tgpEFe8+x7 z(gQ@Akk$#Sn&__y80+oll%%OU5QuBuR3e{mFi=wHn|tc&+ivN@#IgvN|I{h<76o}h ze1G1zdxZWvu!5Jq7x>fmXTStoX6=1ueIpl~on+3olC&RAhOG?NG6A-{wAwCTx!#_B z?rQOsyzVgMVEoxLXWE1J-_u6%H>TaCW>o%+C01zX@Csw;jo3Byrs3Y<%iZ zdh0@^IILvpLwz#JdV!~pM1r$)l5y6z28oIu$=<^-AcC5E_&b&5c)~#q5K;K8)eYJDwE+C1rK2r96wVet>xo$m!EGl z)a9|q9?OD0qDf<46kc+cALj9h-$yxGjgU{;%O>K=rcusN)i-v`v;fi{%vs)W2us?F zvjD_#WOO1l(T}#yE;e-1AzOcvOmLQ`Jc^qLII#IEAN)Xj^x>DcyYIORpUOAIy1`A7 zD6ds-?m9fO0|5Ef*Ihic0UxI`kn95Rh!y#ioAL?7Tb$Les-KC}kHmBBz+h?<{VRF% zP_Cof=1~UFszyde>Q^jL>fStuh^*W0`Ic&?c0k)4?9@`pmcVgeSD_uzAV7ap4 z8-6Z+7G7UJ7u6_-*2As*<&Aka=qrhx6Fg2zxyV(V`nJ@G2LQKC?87>?)o%@xIy$jK z+c7#Z8ByBMR$5^?q&}ORo&p#2-TH8LmoM8v#F0=o#WripT?U|)3s$91eWO>}l?~Ef zdLMiMKZZU|;loo+WYB@ty}S_!sz8OW4yHmoKgHtUKNJZ~D%4pt>)c-$-(ByA{#noK z{`sBf&ldVJ?PHKSI(X;|9a22Bc`>M7p1bg(X9wX$X%fuGfAu{uv0t3s=C#sNF86NT zYrB)Q{EMz;F(;C>;fB?_*kO2#{I~<;JKyuwS-|x1k9;Cyhil7g>4c3>PP84`4fT3r zcpSR#k!}mQvK_)#7{*SiW5|1S5prZVIm(RfC%ydlEH1X0w)y%OJCtjG&Pl7`A>Y_6 z4f692!XH;&D0hsE6W}st`@;P=GUdd-3orA=9WuL1A9Nd^Y;%1zTi9<7*4x%#1zBEh zd&oW@bC}iL2WOCHC;X4k9c@QxeYL8$m}4R?G8n9B3kSgTyY;pB z)0ZL~tC*OW$?g}kGgIySxl`@jnbVo@zx%GcvN-GleX_Y@vofx2?XI`c$?>)Z+?9>B zwuhchU%V^ylV@4(@!2Eav5z-(Jto z3h0(N`YER@Ld7;|+gFyCn%@<_xxCSyxwh7R>8BR8QPFup@Xt0?Gwh-cvVwPOgU!~SeePBp ztU5k;CKF~ZM6>^zNi2?ykJBWiW{>LmNLniR~x7zRdRapvb^;^OoPDO(+~3 zvnd}2*q-vtquRm4{daY+Cf)c8uTtwupL&Z&AL^Os@WlQq=fa`tk$6It*I(xp{jh|T z#jP~27FF3lg0AkirIkTjT;6O?zcAlE^65pjlJ*ICB*iZH3DZyb*v{uuJK*5L3m2uI zJb9{}JawA$nKpa$L_2-vY@3l)V=o5FDKLMHVB%Tx1d3=0XY=7p<>vK|jZ|-5~{EfIhi>6?X{`D`C0F zIsOeYCr8$?i|KC%n!XgaAHMstkAAu1#kLJfj*Gc&&EIItt4rABbz}!#pilO7^ue{? zer8X9O1ym0(DFsq@kiV0ETG{`G4ZMOpPi`p@bgZi=(_nM*UBvAf;i{$SB8$%%MQ_9 zp7-B!KVpB2d*N)*RxB?qVA~d{+X|H3rX8~X23*?WN%%ZY|4G?$4#tUK%ao@E^{4l5 z51*_5g1&H_&!!Tzx$3R7b+<9c+ikmDG&MFe-bRkjwAa1y3)>(6d*4SH8yMdCws`AC z8^P8}hpFk>U1|q=>%wgNYWD5+cUq*b>UBObC>J`uza$uaC_cJoJ^b-ZKPHL#{Ql)o zzLI%A)U7lyx1nrICz{k($?5uA-gnnMo}SY;CN5)ArsH0~H7CM-`r*~q>W5cf+I#Xq z3uRF_(m4wd!P7VWU5F)2VQE3^|C$%)gR_2iEeOD12A*x1Wjn`Y-+X6xt;(@X?Vq^w zyk17DBlS>Mc%JbY$8@N?)O2^Ae*C(Q(s1!pKl>fvYc~ShZs2L_gbOM~F@hi^1Mz*( z2GzZX@}1~|5YYA9{<#a$5~E^#hM?98PZ1UtsJZ~)3*XD-MS+A-Jn0LvLS`$W_wd?y zF{?(*by5)v3eB%8TS2O`aa|vZ^NOERH?V-^OQmm>D?$2(#>Vvmxc?MdR)eAxg?+*d zay?8PH}hGp399^Zd<0hT0>^U)J88POocH;DP<4R_YABp3m%RNG z?!A+|o{dj>;W*F&{%)6>!79^+PnHiws7DP@nMXXyV<{%`Ttwkv4FKgn>HByhL$Ch6 zJBw#3CA2^!Y1O|D%j(MYv32j#)_nz)yV4_X$egQfUA=54Xm)0<&CL33931ermF?UI z^EcavKlFjNIkMWm_n-g2+pFLCrR}k|yrDh%sz=+<@(b;?Z+$~MHjX<0ZXf^T@3x!Q z=G(&$oNu4|IS;q5``WK)gTamV_(wn9j!omd+;yhC{J}Hra~`{|z3W|X0<(>_wX)Pc z=aGBcw|&buv@iXRZ*8x5!yDSmFV40p>hkkH_lr0UdvvZw+ACjip?&`6J>0(HOW)M~ z?4SGQ_IdC5(l#?qle&1TEprT(t_5DU3WPUXI>_kE$jDI|w-dDd$B=An2VT-J^>#1; zrEIsUe~dncG6pw|fb=hVMW;{+&(f`zt#r6Mdm(+x(JVsyykx)!rY~TgHRRc13dhg4 z@%(n&jhindDFTfkQQvCtzEI@{=H5 zTy}Q}Jr0S@gOY6_=@hwg(-m~zp$_;a$nw%soWu1^ z8cE>dOSk2PRnpJ4w|)K_+wt?K^VY)d+Dh3?j=Z@8z8Y``pIiWtH_d^g>}Tb*6|{~J zC|f9-muu@s!Ex5a!Nm&4jyY*OjeK+y?ZA+(H8?1h4gFb=#dpm#MFl1xeG(o-|13K+ z7FM-sUf1)ESNzlFSG*CXOQygVmu#Vk-4IwWciR8FnZ%zs7%%QRw06)PREw<;PtH!_ zyNdhfO>_f?P~Z;SDJpqOfI~;bHp;hAW1j%54EQ#Y10DC?dry1do!3-Gn3QBH_94Mx|Rul^Xs#Wjuno+ zW#nL~bal(%iHWIlOzkuT?oJk%%*>9rcf9Q#q?tjk7Lh5pGj-<)jFOZFr{JM{9D&ys zNO?tTsczXyp>%d)(YKgJ+uZT#Hh*)e{pkDOM;S-k=~HLg>C>l*-pGEON$`}8S&0vX z93XGU%?Y})>%`j7$Yb(oOW-*1K30H;Lnq=?10k|#PtNO6;UdS_4BH$Fu+4GB|HcM9 z<=QRKZrr%l?z#7&Hag*8H1MIFbaOk`+A_7^)j_+%k+-)azs4EBvBRMb?+yL){@jHm zxg+i~hAxeulfJOgD|#`={^muIpCJ^CI(i%l``P-q?%c<#*Ibu>s6?jn;X?&4IvpkS zI%y_Z>9f-3{R~~WqnyJyea7JP9*55a@6OP}@8S06T9tlq|Jj0d>iKF=j*vF8Ica8Y zWue+Q|JiY}v!ZSzr!daL@676xIdG%2`U!{FDCH<;{L-w_A(L3VglXC1QWA=9F*wT*2zo&ih&8G?u zhP$%8CV8?78~?}eA&*YXv?G3x(7j-X+?Bu%E&Q|uE;)~7f+b{>DYTH!HbASNQyU+xh`3?EJ{UcHYhc@&nY6lhDS0~M?4^uu7 zDR>?qU3O&xglp1UR$z1XCq0TL)O|}Dk{=tE0W<1lU7e_^H;~9Eo1_o4Q&vsWzuDpB z!N#WB5$?8?PH`K7tWn-BM!b(exn_V zjB(W974+v?JX34xVDZrr=wCV@H9DJYb&VrrKpUA#n_?%LbNT?4l5_o}gLQZCzw|>O zmZGf4f8yKpmMiklkL%9|HL?#j*EK7m7qE@ZJbNVp~D@BZ$G+dJO< z&UWnRTw7fqWWlq1GfmM6e4(|2^U9OE7&sZbox%03nm9uCvk--}HGWX?n8jA;Za(7# zKO8TU3K9Girw)K=`L`sx`O+76?i!34NI{a~>%g;3Y z%rE?Un@;N17$K_~Z#vutn>H!)LJ`bE0cmllRR z@8b>*<2>YF_z7I^)g9~O09e`t09(h@lSGjr6Qr6Ye+5$w)vP1Q9@zCvpg_p9YozV< zB)UtKvPDkH<%rEqEsDkB4r#`xq0m3M~vMp05xmT}~ZZwO_#48JLNvAyz9g!>jX$RyT z2-H7U4aCoS`DqpPY@Gb#Vqef>C-~iwjiN)3Od?|YvNimMZ>EpcV)3z2aNbP+&jrHDR(y~IO0qEQ zQ?Wfa6>{0%oBq%7^9Q{pqNOL=R`~7Y?T`zCYPZW2{r&Lk0{LKxwY{uv4c6##jlfrlR zN(c5u?by*7e2fe2;)S#A{Dm{^)R~iQdUm=^OpbxO+}LgltBdX0)#uxyJ5q97yY@o6 zdhNM(>(-Ta1boiD@r{%N)zHZu>NBkVflHbV=%}pcEkbD;sB7(qe)iYeFFvu+ZUT249vs2G z?>aVkM4M$zuoX^zI&KBKe1SvTWAma3agxZL9ehJPI}(6!b|>Jhf2WY^tFBrx} zxk;JOMI=fts}0B^LDIwrjP97B;`ViQpUCQxDD&LAz_;$|0YWfJpFth4Ju@w51<1PZ z%0CYC#CH}I^7@mG+CLYN9f7_hBg(%h*cOW`ur1#7!qKkz<32m6$kQN(9Fu-mxugKg zQtS1-;)j6;*y0CvixNpISaB&|kn#h6FJme;Mf#oER`c(nANhO)M73OT@pBi06?bJt zc!suPBWzbPt_DuF6E2{%{Sk)FmOAcF*%n9bhBUB#mMw*CkL-^bm$8_z&bV*^ctwjo zA;3~ZGgI@tc^IXR_yb}s^N7uOBeH{0Usdb@ISrG4m=*RoT^ z_>|wPrw$;Lz8tu?s8wIlu^)fFpmJ>XXzJigMbjsax2fr4$m~qp1_8ZH--vf1o1gk| z{M4T;4}Kil?f7lPkAu%zCd9F9l)1w58}n=QbLQL2@4pLMU3_%+AlQ~grSi`)AE_p@ ztzp$&n+qi)Xi5Pg_QZSbxTVyy$a0^C;I-wrlR%zze(UXB@|lOC;ztra7_4WptJl_9 zyH7TfC}%{93}^fI`Fs+3(EGuW5g{s*q|-ObOR=2o1G(dxAZ&+x0cmY*iFRP6EzRF* zOW4=VZQFv)wt;Nw7xjLr3j(aoDAx!K@M>YAGN8~GXDqw&_{kTg$xr=B2JMw{Iz**v zJOL!Y+XVvJJ=>sNcdvzQP7FJ*vPs*qbn|*!nZMCC2g~H$ZYSnW!c$B2lPK1db`+el zm=1hLM~A{gONigp$))0!5M|(iHtG;nAfNRi8eMaDC&wf=*EZu<`C`!6)KP5qCboa9 zz511pv~T+d{}}kk+wG+*ZE$n3?W`}+wz=aH?c?+$w5mS8K7hJxd*;H-YU9yc+a#*5 z`RwOmwXN!*_#KGN{zb^81em|#rLLFc(=ONDp}fR#Q?gl=SJEn2xO&K?XJp6v8V#h(FZoOA-N|yDtNDN?he}VN&czQ^*@L_!O}g&ved=7mxy^$v?!_T-hvExP9ew`5FwUZk zPF#h=clS}KR3#A5yGJKoM5Kc30b+1asnOq+M>Qd{`cN7|*2{01lw+Nj(3udcMYQ^(tX z`uU$}=T6PGx4q?a+qZoCH@B&g?e^-&UfHhR{Itz&d*spk+qZq&H@COF<1Ot+fB4_F zg_}3qYaV;3{h7b;=i1TNKGxdh$J^@jpK42Vl62OV2AA90-~8(KKmPT<+}`meZ)vZ6 z{VUsj4_$;}>uqS|Mq9q|0v)WY)XHuO4R(y3PPkhN9j5_2-KLLSXhR-D6B*FFO^5La z2^CviK^=gstU?E)hbq0a)cO&msqsa>JnHln=Qw;b_70wL-Sg-mba4bbWtQ&%ivVuk zoaeOHe&ol0x;=H}YCCo640Jr(KL1T$(EjKj`^L&daFEAu`U)o^M6i+D4?KuW5Or8K zO8$&Qev~uOq9V~z;&oWPzI6*{YGJ-T`^@F`#3w%9zUzCwyWLtEv~x#KwKH=k+RV(P zBR=i!yDqdxAA7W&K654{xOro~ed_T~x2G?Cx;_8=m3HIC&9>~edpKhobQ<=KxT4#@9Buf$7Q7)q6$^nhk*w~f%*anMAal6J+nwB?ke96k5kbJ(f(wMja5Cr_Wq zc8=Ve{P=%DQG@fYb;viYoF+o?ExrWBHwSL|&2=p>PpEWY>ERMJWS9 z_$1QBPJdRC#)qiwUA5oBb1}@*&s@TWA8)5l&p|W152~)`;p$@3X90~d_#GBBl&y#< z#6lCe?m{_o%Hl!Xi?6$2lxZXPpDlFHEGy}x1!33;X39u0K7%eVlA=E-nDJ29&vv*z zhYstk5_IGt{52kn*N4=S(l z?#po?E%%_FjvIeah9410B0S@bL1wU*+RP&(ey5i*JNx4T0QqSaMLuurNA}IG%%aoC zT*}fm+O7h-w{t?zgcr{Og*?7_pH9*?@{8=-sr5|~-)?fi0HYN+fq{*1)xGW45OCbV zCEK>Am_|8aUg@l{PtRFC=wUA!Ss0aFbs7%#YOV>h3jpY)7=S9tl>2e;KUgN_-qd zjGQ~5Vm|56)w%i)L)`$!KsdjmI0u~^%+-b$ZeZ|Gf#=(O_;|i$lF3u*6PqBy(uzFFruW7e)HQbwvOME$-_uu0q1FJs z#9sK;t9ixG+?Fp`>&jJVEuGy$(ah4@E!8I`u?P4;=93fRU|q^i2}50>pENzu9=QL3 zyb0z+rQJm7vbF1W*0tzvgZldhrjwm60B|5vyy~sYmN&v||FV@Q&(hu^A67;E3T;I) z{YrTRj-j^;{G@^W>Do)HBz=`1WV>itd=>xjoOt1cAC4T8rrI%dA3rLSd-yMYZ*^;P zyZy#*{C4~J<*V)7+<1H7?(^-;nNy*!3m2r3enYmm&l#Hty%Przb3?+HGariC6)TPq$q^Zt2@P_m)N;dqi@ie;OqWIO78Xz5~t=OSW@@;TG4FD&}SMn_W z708YJ0l{WLhS7JCA^`c?Wd+4<0ResiNe_{vAyTi)_{?GqpWcw}N> zo__V(20Sa9kOg1Z+Pq=yLS+gRo;nU{+~;5JwXEurIQ4kzbJ&N;0ybn>COPRbbYwFq z=2ID@Ox=)Q$uQ<3N5DF1GoCjSks0o@s{wMAu~W*iZ!GzG5rz4~+7 z%!7}(l^#5yF8G>9q#KNku1Z_&v@KEWk_0WgBAqJEL(_R(i)Gl1jEKH35oF*!z1TcPVG9Lc8=PaHei z?z(WkojrTD-F4T+cJF<6wPVMRw=?I@wFe(~pnbuc-`d{!&Uduez3%hcYhU}?_V6oS zL4WyTI|i*zo;cCY`!WbTIyyI(vRuq>pVs=g_LeSdTV7bpXB-~B?{s*%va-;QLN9HZ zlT^NeZrkQ~-R-nlA06kdRF zHXjk(+KzC>CLMwQxi_bdqV1vZr6YOrxf$%a{IL%+tV{<7{1R|Fvg-~l;HE9(hv-*# zm~yn^_UZNIGC}G?DwV~>oW*u$t8_8?6rI!N7k=6u!ryV5u920CrsT)b%C%JH@>{5{)H zsal^!a14SUWjanPo;i#lcoq&73(N!&BjmFM(psJ99`TSeeQvufT-!RcR9vO+o)Z@z zjPR?1lC<@A;bcxBkxm+`jWK|M%@refytmfA-t|MEmwX^&RcK-}c`2 zC;#}LXz%%|_q5M_!yDW9u^D$2Zzqo{>$H`V6QR!l+dGnlFSMIfS=~eh+Z41O8}((q zt?U|b@xqzdQGLkJkHf`J?iQwqYO{@(JmI-i2o0!v;!0+URQbo@N!oBIa>V6PuB5*g zS`^{Y7o|ziZ!oK}Wj}5kIZ>`$d^Lr>`UxZLv!9l6oBs)Y0PI!T5Ax~H*ynTsqP|4$ z-&;5B+JuYq`5}raV*=UoMWU%`;PY$>UqPSS&-1M;EoJ9YKVQ1MGT)YN&1V6b@?Vp# zz~_2neXVV6xatrePTRj_d$CRd_}1XRyTLVS+ijk0Yoj;QZUU&_P*2lsYz(ph&7EnN zm*(4Gaj`AW-)M_BueH^sg}@j?SB?zf)7dA2S1u}(WJJe5tM(K*v@aMoV!4n(>uUci zwgmYL(|E27=TlrG>bGr-X6OhqQ9I?icy5!v^6ev&?fB_q?a26ad-T!IY481~-v{$3 zbmb~I-fFw(au!9PPx@OfV69IcDx#(B<47ETYiMi(N zn5*TrqPzG6hVYHo-;bR%TA8Fy7RDiLEr&jF-rf&R&`lxDoUgc6e1CQUNwhim@q7rm z;#&InM8esAkk~(-6?HnSq%0Ru7CVqJllUeSry|P!dEXuzL?vZ%DLrIf-Q|DJ!~UH1HDD~yR6|#W|Ll9-{9Yov zPXH(+p3Nl1NhBwp(|t6&bZ*0~S;eZO+-r9r2r z$ElOj)TO5hWeUUMjWaFJF|%D#?vqNK!6n5gW9F;!qG()3jwr%({e+(~Te4W8f9p6a zM2W66lo74KzJ#clKr@_(+^}5|IoIcksyyBs@`oiUQeF-> zZ?&_>j~~Kq}V|b zjw?ZZ155j>?1m50BlVZ=*4vH;9TNxG=$uT9jngT0cY@=w#aSSL)8tzfwf&B|MXuZp8Gwmk8<{HfrRsV`%$B%K&- zr%#21)r4CbEyHG})dIwDGq>r7{KF&eOJVQSmoK3qmYGte?zVhl~d*A=} z-?s6|nKm;u+a7u3;S7Eb=w!%KsGU5fm&38e9GR>?B!@&z6*@H|?bMkw@Y?M=NtPm<1gaIyI*u%(*M6UotC|GvRIJ zT+oh{?pMCVf|~N(8wTHUQg(e_ZKXbP4bUmh`o8H@YeO|I>IQZ#HjLn8N8bTauD#sC zHc0`u1m4-$h=MT1~7bH{3oj3mO2aUr_B&*(dFlbk|3%!A@yjv^I?$P6xgXcE+-edGd3Q5zyZ;o$Rr# zv*TZFjBiNnQcou%E#J3QZD*B(gOI*}Kk`cl%g%sB`Yxo6o=bP@FK+M`JKe*PPGt{^ z{|@es9&vXP`*vk}%O*k=nv~hsLg=dSm7Td~T(eaqJmHN4OO|elgTYEEY z*35;w+s-Qfp|sM5!gH_neIsAsQSG_WR*W*i#rxn5AO~;L0ieFsPavPq;ic7%4@P;F zuljoZ*-GTws9qE2L`iJ7PoRxEROFd-HNW)+|144v9S&jJS4m$3Tr#jgeeFN)`rSqn zW&%c>wu#Tea?=z&!w;VmLPuzr?W4mdU~(vb8So7*&~tk;X>G&xnM@QuarhYvbt!TP zf({x%{ghAPq_MrK_Cx+l-_*>(Z1Oqi|J<{e+kN-l-|oKqLK_TLl12b&d(}fikBh#2XC z{8n9@q>>-9@9dfLZFpiLeo5+yUD?2=(MAd1iL)JT2Q+oU!#8UjM3hDg_{=M`(WlOw z!nV~yR%#8NC6AFq1NR2+b4a2D=m}1qee;rP<+>77BC9C9YLE-MlAkCZ08`s)UMgaF z+FMFZ8S;I3d67QVp#AjE{&X8)PVYK7(;j{KgPCL#iW6>`j3d7;Ui>M}@z14e=n1Wn zj|>uqpDDfMwBxVndK;{jeGa@cpCnS5&7uT;2Y#}7yZk)QM2et&?1^?D(w6kX)t)GQ zgvu7$g_q=0zI%UJTEy3c@3n9S`jNBOW$mJ4I?6-=tY`H$Co+KSYb%6Z_Lh7}Upkm= zUZI<~3B%nmOfxw-o=>3&Q+T6!+j=N|u3PEq13MuHLhm2G{Mk_l~oCj=6*(}g^e9$s|(M@mz-grxI2N<>hYx@UjrP-X3x zdnh~Tb!FTUl13~#nNAX~JsY3_^;7=pFz~#y35I;vmiD52Ph=Rr_2U%=eJA}87){pl zF_eR0lysDWf*L%)b7@D8A8SXC9c^Fou0PT~{?U)NCoetIRyWt$^@Zg&J?Cx^_L=qn z93xl{9=50YL59#?KBt{juCaCa#U+dCB)`allT{lT?{G|Pa&ojy;(IwUu3fM@piP?? zcbjZ=qIB7I$M#Qq>4c-U%gJ>o6vsyaI!?`Zv(XPgQWX%~THbyZoHq^vM(L#69=7iMt+bJCv_4P!k7El#&no5sa1Z z&`=t4@K4>4r}^AS^tk9dB2TeD+Nmrk1y**`PzK=B=GJx_%-_n3CaY^(?H4}yvGy(-*$I{i#3w z-u74i>i^LGh|_`y}P~hOWxhy{iR>rzWl4cvc2c)|5*E$Z~fNxW$%7> z`v2}+LTlgJ-*KB0eqr)b3L! z4;K!=ppcB$G4LB|5?RppWLEV3^QDIGngHTjHfUQ)GU2b_?szKwPPAXGm-6KO5WXSE1RTVeTJ zfsZwj7YFTlcSaVqDc&XyyiF&2090v=S00R$&O(a{p)bNr8lTBW+S%U6BfoW6p)K0l zJ<^_i;byz^{Bqks*4*_X)J3lR{KXjU-PG({7Tx(-u8GOn_J+^>y!Ksx<8QR@{hq(r zzUYhI($1Ydo<%L=iU6?PHDhXGxScpQ+wQsReEZzjzNUTMSAR|W)<5}Y(YqnadbZuT zb|Wt|xhPl}%Ui#o+V?@^owRk{+Fg9kojTfZEwhuVw!->pH|2GF8}&sV^UGp*$sjx# z3K2@KiI+D#jZcrfh2hY`TGzo-3nxv_3w^I7MM2Alq7>+(oGK&Y<4adr*nqA~(*93l z*JtQ+Pe2$CKfh`{YnP+Fwz=ar{rXvsqxO9va7QPslZ%k1yIo{$%XW8Nh~g(l^j-A5 z%J)|7>`QObw^*l5SX*9h>nlrbX=%RA-@MV578iMrJxLowS=h|mv{}QH=ORPL?(83D z=bO+;8$zYQN#BKDMcZ*FjHTta=EAR)TWxiIzAfCiN?G&t2Uf7h+ie0mx`4xZK?FZ9 z4cIP(UeL)g+Uz#w7$4DC6<+tRaFz=t-PcFA&GxY@(&`+>?&7KMTzC1#PS?WSWFHMVOg zyoyiC0GA%Yk!RX=1%kimsg?FVvSd9=#?sDppek|2_i@TqcB?jj(t9@EbY5`wnbG7? zhD{RNQ%M2Y;pcNA)uZZTRXO!c+tS-2{1wmCS@^N7G4jPLZKHY`|0a0oqj1K)=U zTV+e#X-LH2zjUqfLQ}ga(w$ig*R&fPsax`ZX z7*a={!530r8UBv=ZR0o>pO$)!w!b+#z46=>NJ0VJj)WPQWsaF8YnQV*m;>fY7AKZiCpO`&1kTj1fm zJ})FvnpZjZ(W3hzx(3I(4;~S2?)Uj;#$0vhE^qg5AC~_}n!AchxU*g*Y#znuit~JM z9T|y};dKv;^P%+lN!rA4=5e9yKW@lZZ+`)Uhbpm(m({EIk`*W2J0tF2S9aic)efz@ zIwnm|4nonUcvsh^>(Am*X?!jS&-L7=CHED_ndsc7kV=sw*A;5;gD4VeuzVDm$LTPL zt8S6s&xYF3>8W<%!dW^@qiwLd(3Y-V zq5*LWDH_XbFSK)aooGADH`?;8D{VkSbn5s_RzglrjkJ{)o^30u^U!pwP0|QY&Q2&j zIJzrsW9eF3TfE*D=C9J}TEww*t1#l%96a2lv7|B8+3*cT&f^o)G-}gyh;SChkG7HV zQ>|?yov4eva!@&MSy4h(lH-W$n7T!+`W{28-lLn!WF2KI4Bp4MLZKH0_0s^oUPNp{ z*bYr~e$@?wlLQpg*6CE=xPHAo^USmDKmF2gq$Bj`BM-G#J^E<-)8Fyulh*E0Z^U=; zNF$5NlOgg~=lwu|y*wO*y4P68X5|wi)}^IG=J(ZKc;QNW>h~UR-}`s|cDsD-W_$4B zc{*Mb=@5U;D_+&k!rv!8@p$|Bpa13d;m1GTt}U;G3KM{K3UY!v(KX7D${2#EO|V$s zaQ53;D{JfR%8mK<d zcXO4FmX0g{LA2~!eM{D@o@!;X3mM9ZJY46*QNJ4CRX$unK87Pc)s7y+LB}DlPKCN{ zT5t`I<#(N>jpsfH-y%F>kGoJ#k320M-PcX7ZLBLFM+^t?kpUgNgQH-`*Uk>~`H$cv z5}eraUfjwSXm89*XCeH?d9_3AHDxNt4o*+vI6DdDx0BtnUplETNL11|km#E`>Vs`z zO~$Av$Qqrv(`Qa(QvUdfqwR-(=!b(#7L8B^W!Qm5-|jE`)fsBm$!1Yb-c;vc<|rMf zk+JBTPW=RUjdlwh{AQH{s1A7g4gLYXm-=CcRXM8fW4rsnbUq>BGY2?b&HLqF_GRs! zd+usC<`=_nwbM6FoHUOuf#2iL|~R~VG?ls|4bOvpj`2iMA@x{h;c zI|Hf|l`?&-b7?0+dS@Vb7rmy#zr3>6)&}eC_{n3DYjuki$myR{d1G37YE~b9%5F^i$jn1oxymI zKGU6<3;&mfJJWaPJF^q4dv6Q+P;nkz#D3`UyaVn_LZ7ZXM@9_FuHla7FA2g`{yIFo zm&WQldag>urbGrhdDdoOFNa560Y1&MaUMF*WL^yzw=N(cHG9Ui;^8{^;RDv9q-hg{B$5Qz~;c4^>YCLu=5rT zcsbZBO#2n7U+V9haX8FbX#n;mkkn zRr*j=)xMRov4H=fpEl}98|f!#=QlS7?cR%bwMSn5K)pFiy^oA3&y_}8YzM?Sc&ks! zHKFPut~>*miWD#F(BUOE@&ixD2~T;|Whb5W^{VWuzLt;bzri^9Z<)$N$-DUu%H-f# zc3Uu=ektvx3-Wwp#EC%{L^QXMjSWD~px%}^bd6N(yL|!S+8_4-Hm!54G z;g4?->T`-4;0eW#(1swM{nqBe#YB{?d^3{r{@MY?l`V**1%_$e!kp@pBQSX&>o6LTlrPB(n&{oVR5l7(muNr*4X$6vPyeQIS%yiheLj)l<;}UNZC#4BQLaL*2%b} z=?-&M9&xX3f1A4pQt@tBd^f_IGA@0`SdZ!d| zCY`#4w1X>n4e2-64jxWC>Ib`vm;D@Zh`bThmB2C|JR)=2E8h;*hmVb>6lF6$JT!3P z-j@I*!}i8*o1#DFr|RNwfSY*RemcP*>{0Sr%_A-lOb|a8$pKc|!iAqRkS@_m_ z6c6IMr#|4`$Xsw#y8mn8j`VkC|ARpCJaXI}2Wi1DHo%DfFdUrIt`r$KO2#E-Y@C7T z+VrRe!J4~sgA2MWOF7S*TgnvtyzN9Gef&4S>mBs*FSKW#eZJl<+_>FV7Z%&-B>E1W zH`cWmT^Z7k%h-nW(d}oT2MP}HwwbnP`7XU}nYI8Nl{s~1*l&=7V2!Jqp0u3p56WBH zmC%BtCO*N(^tNf>?*agQ1sWF8(3hkSTU0}9v~S4T4*KM_%;Mx?8b6z6o3pmI+V+mP z01p{Lf1Ie9o*r)}P8@3|j?c97XV11X^m|XAKG}{Qoo+Mqq4h!ZaWlC}nUfQ<2H*xJ#PZD$GpMcVJ8 z_voE|raV>e<6m1Lj>KEHJeS$h6gqRQj!R$zmSIMz(-|^UCEmfAe~K;pSHR zkH7HA_LIN#OuGho9kyXHK`{XHU2L?|-P>_u&0)=J@gU%P3>#H z;hWmmedX7*d+&Q$TUhaPep}7YHpnAuZFLpdt-(rr=EmV5;XZ{S5S&38v{i$E3*ROFB z&P4!*n9zhSBk5}VsC)*cJAR!uQN|d!)sBgix{#8RMw#Y>UgDg!J^Vc9JtG|XaZ?$Fg zBA@$#4*Fa!9@W>{+papOA1+3?{`@s~JJgOHoeFRCH>A03rT&1m)i&4UKXu@2C>jGO zc0xM!6jZi=Zv!oz`AZ@$dOxm0o#nl7l|g-MpKEJ2=%1`C4Kl8+U$aG>eK|?E`X%vq zIQ+i?MAH6Z)ez_TX$jx3yneb;nm1Yhee*hO<4rxNTrVTtpaz zsaN$?S{7GMO-@Gs#n_h&^rd$2z1CJ%+VvN(htEIXp1=Bh#=URcxZZBOaFw{{+Vhv6 zYtLPJx?Q>aOuNecb?&cWLvQ*Sm*<{ox30d>mKJWbrCT?_4W?tiCywm3>4}jxGew&K z|MbUPlpybBXQrtWHcJ|&U+lQFSV`m%I{Y@M60u99&9yHwh2-*zgF0qc9qd3B+~Uu< zfX)81ws;$1J$~X`8=~%C`1UVu-}JZtAqI7+-CnuY7H{1G%3gM0m8Ysu>v(Shd(;0Sm_t^dYxdpP6oq>`bV@%oy+|YxRLM*RSr3l z&TB79Z@t%^1`X3`KRo*g`s7TIu3*Mhi~-l2^APzm2%`WJfDv4~JD%;=1tQ76_#*a+ zo!keuK9mMZx~A`oy^am@CqCFbpOG6FW!tQyebx{j(CR~#V-)4@`H~uRATA4vV*p8` zA%bu62+RU2!k$?dl5j8na4E2CHv>CqD}9w;ZB+7Gb?VvoUkuVT^h9=Z?%?eQo*&o) zQR{Fwd*B~D4^Gx8nexQL1psf!Cjd;Oak`x?djf$Bo{Lx+coBYax_eGimj?8F7t1!i2o2T5Z2nmXe-7SruRu|mI}Y*~Qpc)u zUvJM2Aa*j=Ju^M|R&Fg9ze?(A&N^261l+YEUBPCZD zhygh~z>PcQLomvqpscCRR91Xart)bWtV_xyUDD%V1uyQ)x#POyi`U6(`N8|pVY>Q9 z=!l(X6%ORNi;J#Qxu);wU;HiNm^w7;<%swr7$E6xgP2ugrjg$&J?U&@hK{)M+dMdQ z6tIIaa&pcUP9tN;@a@gEiNo)f>P{m17Q1gYZPU@(SY4vtdo<)DJfESXvq__Sxvi`& z&`^!E`yY9zjnRpjzjmc9E?lQ$w%R6f1YFT4{qr`)-a3uXDl+M|Iev2qDcfCZev@r; zQ~ki%@$FYv%{ovsGD(&FmJ>#De7a2@J=1PaoWNtEevt#{5kty}OLepFD679Ukkbv; zYs7PymV4?_da5n(xj?i5g4LiV3D^01QSe#fxln#RbbJP`M0czF^_BJ?f9^lGk3W4S z=^uIY!S)aS*57K!PM=A}bW}m#VjWZ&!2zNcJh5JsYu%8;Dz_`FyX%9sH;3E!)vD=| z6FZs%-|$|=@qO~s?K}VKUulnj`kADk!Vx%o=2UyrTfU%OxOlPs^pF2c`?XJev<7m+jA#OX%qTrfTSrwpX7eS~Rk zFt{`_IDmAA3k>5D9((UOZA&kAKDQpI$9N4upfVZQv%g>4r46^ujqSF)xPqJ?ZL@PT zZPShqwoQ3~EtCokqz6D&~N3b7$z`bhz~J_mi8Kpb;0}{zy1<7RTg)@GmLb zV;|sW36?7y{k_BRjrY?0hmO$1!*J-_?KpGr+_Jso{1W_))8DAOTlJz~{hcuydy#8u|$#gR8P_3r2^r1>lgyMc5g)zSGv`8*JpwR_7?iHs{DR@YPK_ zw?|y{PC1eIblRNs-)MUqZWW8|14rM|)P`usYRf{}3G9frCFG3#6(9$d9UYsh^iIrC z&!USHJFW&7mV;xoMOkt0TUuacM^ZdVoGqY1&_V9(=;Gj?RHts)N_=c0?U?t~S(hH# zIc+_8tCl>MwxT3G#S{B(TT1rn(9M^m2fU)QO`~fFY0m6>>>o(HfbMwkAOP!)M4-_e?qo zvy#X+;-O{wjU0J{p0oMlYoIULjY9vNpZt?X(nb20ya}vAz2RX>(?NS=%dOwldCL|j z+iqc-PB?zzrrLCd6w}KRiuKxlh`aEKhID~s8Ppte(TDaUbW!GuALVm`clar7)Km4C z_7Whr-%eV(utk{aYzDQOINMI*|H~5vT3bTMWEA=&1=S_}&zex8Ujpov)z$XtPd(Kh zc<>(D+zINnkwJI$z&zPj85;QA>V^4R?bMkQ==2nDlz%9zD)GyIc^|oxG8CJ5@jQt4 zEZZDJD!l|>`4eAM|B$p)F6Adr)x$EhFT$*s5K2$2$6#%>EiEp!AN;|eKrUPJYhMpN zba%UW{#5AV8&M9tI*}_p`?ux477XbaKR38JiLWiG`##Qtn@;Y9VZ7P*$gIC-A@=#^WK zi=Xy6{(Gun`PPa4r!Rv5G`h#(GbdF{udLP}pxDw}VE?HRx?mp)xu)D<%8Wlp(b=U# z97*fK+xY0%tqG(NWqG5=cn12=Sy?@hhI+l6MfXD;M5O4uZFhMt7Y7xmG?)AL*K<%Kfv{6R0(2)oJw9U7-Nq|7uE)Kzup`)~w0K4xs zkF~eG@pIet>sQ+2Pky@HT3CnPYi)}5_V}sUz;7eSpu9xZwFj1Wgg9+%%&eU-5AcdD z555*xKC@{?l_Bra?=JhCwpjt@30Nmj)QtRAG3@|lIds# zv-p0LgG~AognpR2Z}_=Puif5y1Q^*thW2P=Vl*&^M$`@3it!^EYdCgdrky@}vQ3^n z)ut|7Y(r=7rsDWrTgdg^23-l?$ixoC_Q^keALJmEvEn5sv1N(*|1aT`xLT z?`!R~c0Ik9oRpJ75=bBf5dk%X2q@2>K*AxSJg<*uK~S$=^#VS{3kpb4jG%yk5Fn5q zNP&ceK-x)iPLk8xuB-R*KA-Pz&b9VFNx<=6bIm!&9OE~B5H^Hz>!+f8Ts|OjKQ6IDNWJ zjZL?kufL{!+dZ#sKlE?@ZTs@C_{w(j%+?O(0aW18tSL=#cVAeG_j6FOu}=Y!cI8=i+p5 zR9@<0dqMk`l8ob7+BW@HX=2*$Y%`UezAOVV6;DAHUEsMt^1ArK+%N;tpw9x>TD+`Z zR`?vXFOJ!`sLDlPHVD>!_Qe zR{b`sDu!hj^PB|Pb%VV6!)cRAQFfn$^cmnI?>>DCcOtM26_~zPt^-V8vHEYRod3J_ z+90g{TDdA3093+o)F%N<=UEG*A7qU6NaFnJIyF+oa$Rr2YNO0IOh00~t!`|!i;HXR zv9lN3Q86QCVWW?d+H}WrJ-2aN7nZjzI{IBW^cE4>2o)|%mq?c#~!jnh;>rl z^zkXr>$DjzaKdM?FSX?-VD{+qyQl?!VtdQa0<2B5Pr*sQPTGsNGJxcP3qBfo)T5$@d?j4g{wtfxP5s#} z$O`QSb)tUu3Ee?_Vr&Zi9>-tOXT)b49B*s*P0rhV>ubNIeZ_bFBXVOumd>|}zWKks zjr@+M@2{U*euH{ZJO3$XS=5O<4eGDqZ^l=!{{tOu3spDuvDzeMqGYM&CaptPACXT5 z$Y5FWhx?Ku%hta1c}&ZbTyv*S0A8j4eHx3+c{t>3CQU z58TZ?-~m!k_@DY{gS1JdyEd$;oA%Q&zwnFaWHz04$2euuotRu0U_I=kCol1Sa#~!7 z;73}YMY7R5WmWq592#Zph$3t=deLyQUXp8X^_epsHl(j_yP*vhPoFFE08;)On+8XB zIgxz6+$#?5x|6m@Is`az0PpoA^tEm>noEQDT@grknAY3;Dx@9u+&wcyPPzn1d5|3d zUUmU2I@dSQT2&>TE8Jh=P$c2#)vhA$>+eBq9p7&okNAQ#Fh( zBt0Fjv>@2Xs$@O~GWkeLqwI5IY_qZoha&@sI;OyukUKDZceh&%0MRq?@4fO0;mv0; zJ`-9dujHG5Bq`xu<-@L~4JRmRicJ)?mK}Z_@Qigm2WCrhYuVX}B{{o}p z1J)Kl_c`S#2bODLdMSOA*x=dw3WbYj?<)OK==1bJS*mj^0dVC}R$haL0}>fXghnwY zG@8;Y-%E#A8OB6DO)qSXkKfCjm>i|?8N<(nh&W?@YsZG&Rirw7(nCknK@KO=);Cs> zd%p)L?a&dNp~Xvnb8;X_SRsU*4ZJqrts{A|p6u;xddQRd4&s z54R<>V|T2LPoHdq$F2d~IGtU24iM^~{>E@i6!1|oZJduf3?=tRKWQcbWtNYnpZ7)d zL)vR-K5;^Mdyh0Y_Sw)n@kOcc-vJBdWr!C)QJ^$pn_N#CB_4bx`{BFDM>eFp* z!jiXD0wX=HPgAcXG9(xoS*^VPMcWko#jn}*egh$%mrn@{)6tdz%m7=g)r{PfH$_uz`YIlwJF<1i>>WVMlx7!t}(Sf^@3 zj_4_^JzG{psIFB=^Vm>RH(`22p1Nn51e0Op&lXVRE4$OTQ&p}GSc-tx+8Qo8JUYh( z+`V^OGTRzE-RWqUm%7=c_zk%|oR{Iz!M5OH64EbTx|DC&tDn9BD(^HsmR;M#(`ld# zo&00R=CY#qmfLP>x8Hh8d)Is3)jo6oebg76`DrWCF$o#ad-zc+TdNICd&zTQ>If?j zt4n^PYdJfJtgf!Nm23&P+1A#!+XlTZvMJ_mlq#d zPoHkz@C~nOmsZxY@-{2e1q7e0M`#*jubdR!QivT@?rop@cEZJ7!ra~5YZuNw)qdrd zf3eNZ9BXH;zP6n{bE*wG%Y>b%ZSVlu(T+b@m4fOB;+(y;xFBJo?%qy|0A!Rhz@y+` z`tGnT8AcCmzaty|Y|Dwyz%unn=ilelZyzA8tj;b9s~c@`c{vjevokZ{(J=CFJ7YhE zlvE!%5iezmlQNpKr5CD&iMFM?yW*@b5@8f;PiFG@(cu<%{;KXFL{8$9!(@g;UKSF6 z1n0`o-#;x+g3rYF@d*iC_*dsk?aN*MJD{JofM2AJXsipmhP(|=BNCIyij$PIpUH#uj;s>W zW@0D(@u>IFyN8kO(y{y{lw|jzCJr5}RsZtYA!XNrXs$~~O=m~h4vwRaf*8+#zVd$-l7)hl^6SE5h>)Os<`1%_qNrZwGyx z_RWd;;Xzl-V;^aAws$wkQ+PReJ~#wC+7~DGYwKOfQ*U89S#8@b^?lGx&zQh?~BY<%v%QiX=FHP$hWw{53wl6c3Fn93*Ag`*0e3rBBauWv;UL z%r|g^soapxdgmI6rk!&l$>4ymKBqWlFg*0H#T4Q%p6*_7aoItA`~YmZ9pO>nFD)&$ z`Pu3Aj5}^`LsJvYZ~m%o4yYOL7rZ)*U@>g29 zz4xUH7h@;y{ltCk_wWBqn+4YKg|T+)EjQ*9Quaxm{MCn6h{VHhLj#(!%4R6nJ^min z*XKR%eZBZsXYDmXl&c=Xv3|*9vZO0Lg$CUB^{e)w^fk{aHs@+9O6H3;9>#b00pC2m zjV=2leU93ivV%qI($_j0XWwCz_9oliV>h+0hu_=;j`C<5BF|$JqeI05C*_@3v5ai1 zPO!Ilr595``g-(7o{K}~RX3}FJ?wCU;F(tp@kT}wMYI_?|R6U}eZ`yE~1(jJKY zE*;Lmubm#u|@L7Kp ze+|W3kGkk}w?cn?bk@$ffM40k_@Q(rzT~y+mi3Aq5O{~H<1n@<%7)`U7OV_u*9Y-O zl!Z(PPENEJeZ`lyXFl_;cJ7i}$FH=DtA2J7ov22al;c z_zh-)6q=!9>SZP;6>I*5b=r?mPT95Y&^W$h_0=f^+rhnx2yRY{8oz_?U%a%~7N5G< z&YjT?Je(nq&=ZO0T?ISOuvV|%+Jly*I#J2+6TmCb=O?o{>eZ7NA2(b z(|^*Qao00Le}mSiUYN0Ma*RNl=x@k-?3Xq~u@-mm$=J9y4_*xTsRZ;LCTaAVpKkif zul(}%>c9Rsvvv0B`KQqF{k972eYf}Vvj^>((^Ku1>y9CJ>)DO;09l=yo`L=oS=fmc z04Jr%I#VCT9GazU@w9&dOw}X0K+>FPJ4`E0iRdzMGDMfUXB^SBWHgWI;FJDGXP=D^ zOb_UZfDo^}^t$X#7KzzTAp7||7}>(}LW28(YnznqwxuudGtt8%+Y9Yx`Wv)0ncShi z_Gxh5DS<$``sWzo2s&CXN2M(EEV>5GZC||hvtM5O&G@tr^c6%Wi)^7?jSrUYXq#;R z^ufl*r$80V(UZd+KP;JawjnNH+onC)LVx;ps9qRBK9y~_79Yy%4!(FuzsuwLkT!I$ z?)x!+L)qF2!BHd$+LZv8Kxn_C{J`rL3K-OH^QEOF9caW4+@P+ToBQq3#x`=d+%B%~ zv~^!_86Hhrc>+6tH@4B2m9=)__*|Q$uj-~s!niZS0lS%44(1V#Frxz z=fJE9^h!H3HJMM&&P;P?|GA#!+64$R*qXVSY3`@m^f>YOCo{;w4DE;U)11|<_>ZVf z7PE;Dbtu0WS*9-WF)g{Hq3D!9=`HQFx*%^nph?*ob<6a?2wAmpX4F2|cdmBNHda1t zY;3^;=rId@{AAC}O#8on`2T4)zU;5Gw)$|}xpcOzTwH{X18JY^i%EamRl&P}Bc)IW zd8sYXUbsj+ZSn3lX`EAWH)wPf)YM4|2h|{$U|s5#1VAo*ESafPecC$fG5D->zUMhL zr2y%e#p#g^pW8l?&#XSrC21sAhtS7i(ZR3WQ-)PQ?U zd4&!Kf)&s~s%wvA5hjs3Dn(cuxMF4o!ontrQyq2*pEp?rgfaIVMsVz!Cyi+_+dTR1 zgAFZKPv!U61;0uMOw#}-Z!}O3S7H=QjxeNaNTGpsk+6jl_=^LPD*SYGZs<~N2e%t-^L%n#I-@3lF1V^gjZEZ ziw3kG>~fG?rEwS}@)RA>*_9(c6IMCC);Z6qWAx;ZC#6^Alop@mPcn0aN7TLKwB**J zN(P0mUSLC@tKUPDuAlnUXWOs5^$*&$*DSQFuR7KK&UgKtcFS#d2A)T@u)}U-@Uw&C zlZ5DxXLYKcCD4!^J`*o~hSZOESHZi`!t$Sd@`?7b4}Yxvr=R#yI?$VK9*6SGiBs+Q zFSxs%TsYm{_*-vopL+D+NAtSr+h=wRTM9@)&)M;oePYIWjK2t_5 zcbkWP4{2<8vaQisFy#iFkwNU|Ro9$}Oupt@UX4&p0(&`dcGuUT*EZ$hbm0&>7_EHd zTVpsWY22V|2HBy5E9^Xq2l&Lui{BQsUNf_E>5xPYBIh+=qN+~OY=$Y4bu0t9^ zOU~*~+7aLQQ)yk)V?F(;lTP^HOj$n>>3xEN+6~TAj+2lg?1F~XOG|V%M%o{L z_)pqf-~2o6_{lTvy6bOjv$L~FtKA!g&Ot0TfwQ*4E9fcv!e;qJWaI-JDwIr`GaXtJ zNWlo6gB3onx_d5JwWyMSMd*Ne!Id0Y@BBs*Xu$il7;C-u8<@d98++QP|`kqzPQ zLT|T`R)2#`{154@y>cQ^_?G5>WX)$X(_{%;<9}4>A`h#FeKRTtG~sOi1O>~We#q!U zfF?T3)E$=B1O0qKJ4z5{XlNdT`8u9_!N9Yw{5Z^sE8O;J*DlYQ_Vf_GRF^KM<=Uvo z*HQTe@ACWp>hd7nJNyry9ulxSd*5FR#OurM`}BQOhkwz+P-ok7kUn=iLOP19U{`+qB}fH9iKf%qOf1K z<4*o*zp&4?^A3^#L;5DWN1kCr?Ev!AC-H2%1qkvRhdm%o=&3zE;F)h%qE8M!sAEnR zDEk_gVLI|S5af4(AY7 zFc%^^u$#{$Sg+caP}`KZ;HZpS~rW-xk%E9HnL;O@|*gNZg;k533n(%x@ z+;1cQ#W(P$PBqv}+W>CHIRWTmi}=34a?ysc^ENZEYT&^Eua1`B<$VTw!Q0Ob#V@n{ z5C+d&Sog_K-q-H9^BMShr`rbpZ(#WxqKOv~xvoJFQ0Ll#sWB)2Ir~ z*~6b1!527&ue?Q_Hrk_)KH2{81MhDy_@WoKlUJQA-2m1eKG+WQ62D<|((ThXC~M)w zJT_rG_*s8ze&hqnFem}9m1li9^~HWvWS2uaD;G+Q`3;d7$*0Z-E>HkvvMNIvG>$9a z+5XuNvoE-|vW{KbY`^iw-)iSpH`~eS!FKJ{C)!QdUl;y5DeG3MWKDkK{5H66x#le} zcwV0vDSuiV3$Mbh@b*ds#gluRUxM;$5SPfpAx)}J*vn->GJmC!H$?~gb^=##Ol4d+ zZqme=#&ha!O61x>u){ud3hnTqF424GANbbKwBn$ynNC~fG!?cIozfNw!ZxOGtpQdC zYV|{1jHOIwq92Fy9UVHu;Hez`a+`?d$dcR`c`vsEAEp!$*Om3Bls zZad<_llXDc-LmBi@G^k}owb8JY`X}HlbdVEadx~xLPjP?hi~Vk?mo{~>7&aF+rUxZ zx~9EzOMk~@c4>pGglTz7TS)tgepXv{K%Z+2N(?l2>yuXY0o*AtlTr8&;^WR?W8<}` zz&?q39sPwqe!gWAc%vi5e;0|%8g~gyTOljtyKSEd47TT{?(dTb{wN2=9mbdcSHW|D z5xPW&>Ap}X{X05}pNAPcFi$J=vQRR6D7X3wNe0Etqat@Q*~3rpk#anYUCM$*2k7CC z3gt8Huk8!j)y+go9NPIz*us1L5ho>WBb>xjxXeB>MxPsAz>GF9HPwz^eWtzaWnYSG zwbkzX%%|G}5BM$R)i#s21y)FXUCNMls=SbgI!vQND{l>fbL+7om^+46;e%}vLc)S>k}H&Z4y$MA>7Yx3V6 zH6|uU^0uZgr%bpQRNoW4cIb~frXWgLNH*-kSNt$xNH1ua#fcHSvIn+DWAsVKCnli9 zXx{c8oAeU^lrxK-K&RLpWSM_$5B->2xT+0KE)Y-ub!SZj05o;4>M6hSCLvYOti_%n z53yC!2mVRByzl^C!nYmYLl>pufV%5*V4Xj=*mgJe+J`>$Q2V)G`$#*#JJ41rWQ;nk zql3H1*Z7EUHqX$WEwt;-T-9FvE#KV!rrxUWiq~vwj3m(F8ky+{uZE8HSt8>aPKg9{_v@@Lz)Bh5mwDa8C zPg5qc(9H-7P0|Aizxl7VAN-LY&S!4vMqvw|Y)i|_ zh{R)Y7K6b9X`#tY!n>+ZBl!HiQ!2pzP4naH2E6qw7^xLL;UnxoalE5(tf$Nt+Ya(%OZDQ!Pa8$W@jcdp6D*4`k>Opyux+T z*0}-qw*BA#dFr_z z`l+u{NqV@Wvc8x;b}tt$dNxhusPkW~QFgwej`pQY8yys{s-XY4wl0R^Tlp`>sl)ZG zU$Ci6*_J3|p$X4Rcacpw>Fxj#zz!Da)9$F_ft^~Q^0q%vOBscJrWPAvRiD5-{13iG zcSG7Lh-$jT2Z!>j)vn;>oBZ-7DXJV_Brzg!$z0o%T(DVaTabi*tjUmiIIjb`) zqd=Cl4tkW3J7J(vgQ=XoRtQ`X9fb*zgl#9vfhh-IG&FYd>;PhT^4zktNf6$F8_SWP z8K8$~5l-Sdl8U>Xm8t~s4zzHUr>TS&UMm3BuVX@f-8~>n$YD{ObLkSIB!r%!cNg+V zyb6jx?+fnd2GgYwPBo}yD1Jny6CtC46gsKkl`1+$z8M@&0095=Nkl15X4W;@{E7|ikr}pdu0}LkY|T0 zaPt51Q1A_s+yvgMTdG4fL;|Si@+AYT2(AvEdT1kPy`m!&zDq;x3iNEHNIhJcJLVe# z;HQ2F+r}k$LK7R1yj{qx0~$2}M4I6-x>-2507;sTQNDfWy#t}@cm`U4lh145%nuHX zp>s4!=*Rv6<$hS{o|~@RWKhRS(7Vgs6{yix6(^PP@`^_E-i6C)ZX*H_q9Lz=zZ}yfGnF-t!_+%ID72RkmTX<@e|R>HmTU-p_PeNVy>!pFe|sC6 zm~XpV=c|@PSS8*bp!$Vz7ifSoLMb)?@$asj|B4*kr` zbQ>HAmFNgN2%-#_lYH&UB~v}STvMor|IgDaXzaaL}{E;bt*7r6oc z`Sa(DZol@1Uu_@%$Vb}g(^s`S?zj#4nM-Hf-T&NzGI9b0<=z1>ZTg@CdBm1e%B5#x zJW;SDw+^xC+HUbAzj#WA$OlJ!A3KmK!7SQ%=@tm+51k}nYDdTc?~Lu*0*>*CN$P_w z-aW_w?OeDFuu1gmiuMWabCrY%Z?jle+o06K}Pj*1h&y{HZx2T`j+F_H8;&-_~ z%teL45qvYTOdC)O0ID+>n9{(GqU5D+rW1B1h(!gYV|PqTC>vA@35ws=Nto12-1(F5 zb?*R;bSQf+To2nQpBuCld6OFivh5urt(`79Y72KW5i8%vMhDx}v(u7v(zof{sbg^cCQRKg!Y;)?}Zu#37pE4}J~^ zh*#lW_$x?hpM9P*4so zuAdIxwk6tX`58QUo~{3(?KX8#zQQGCEV`$ku$4m}K)S{s5GjIn3ge^;d3^h`c3!a0 z>$Bot7IaWu&z<)g$z+jl zB}=oKNIl}A12TStG<_3nq?5LgHH#fCTsYTW_N6atzDT#Zww8(8$fq=rceomBtJD{G zbI0bv*-2f?hN{{x`OS^IEMHc-L3`vwoUaE@)GI`^ky5(vd1V z1N)p4r%$2Ygg@x2ezLaV5Kq%q>uQ=BthJp&3)L$T%bR;8JNzeo)w>>wN?%^x%T}Q% zOV&FJg`_m}EKjlNZp@Xr$PZvJB5Ey>? z37c%eP1)&70;Jv^wmfaD1)_V zCG{)SAb;u(|NX3hZ$Zmv-+tS4u?q<@kWafJO?sJi@-*8XV^p$0fINoSLx>}O*&PSF zAsu~LUx?Etv=VEZz#NqP1k0`X7|6d346y~>^?CjYr5 z6duOZVd$?v-~H1LPgleV59pD}JnX{d*O!M%U%|6j)qVc!Hzib@3p9J1S5J^{F3Jb8 z4XgUtK1dIBzU1G?JPBUqm#F2H-f>H9BhX`?UAUx=HRx^uo3sV^7q{R2oc84}|H^ji z`ZMjxOXt#;cmPn; zD~zEYF7nd9QwO)v2W6>jf@4D3vyT7hl=*z)Rh_c`sXcdl^DIo4PRLkf8~t?qZN~<( zz%^D+n}**z;-Ub@lHAeBIL_oTiGHdRyO!eufAQB2in*G#i7oamUq+h`Na8l?!rp@^|$^}yZ7QobD5CAmlo`_UVqJX?WWsrYG)Qsx3Bri zKi7WbKmJI2{)@l3OJ$u3CnwJGVg>ZpKhZwI+2E&N9$SyBX`T(roN1EZ2~Y&XcE<-o z+{GXD1p;?3JjlyF>+N}8{NnaS&%3)lc>gEb;_`*Ig}*Q|vfqx)Pqv$HIDx;t)kY_< zCEzej-_~H^6&%nBT16>r}Rs^fPdx0 zDKPXC(+1|+wo;V!o2(nMrp!YN+Zrd9Tr8Lc4WS1=V5@&UY{vkluH@0)81GSSAZ1fw zC(6hHcdEV4IHEKX9{A{wMz^ga=_O&6161joI*4h7EWFq5bb68Ue4M%=bNsW7I*^~} zh`3qY9_cRmHvY;+JG;8k9z5#;rKv7mrb9S`;T7d-quph}z82Vmm%QelcFu7~7rw2m zuVx%|b$OjCV*9ps+bh5R>)Q=CT-QGSk&p9iH;bIII2C#${aFOy4pj2W9dfLf{-zW2 z?i?`MwXJ3I6&7%%{*EolyD-}m=boPNi@c1Al&?_FOJ^!w^-p^}@|pBjjvar~KN{5^ zhz(X}s51n&eXz~9U+uyz{ZtQkZPB-JOxeDycv_79Cq(Rgwp#4vi)N0EOXqQHc5FO0 zJv#yElplR*Wj#91?n*Lr9BYNcm zyTJotIw@ZGXEy}&!#=-sQBJkkE;6Z?z2JlG2efxCFpH428xmUHgg@11(+|yJPH5}c z18AUk;RH5*FuT(<+LGDh^KJITY<7is>6d>^`#*m4C)&Z-OdGiPcw0GtA^nh{0YBg6 zjuc&=f;y&8sPm9eepj3c{_--Y@=9Iw-<0pnv?dj!0iuugY!Ih9XQ-6*C z=$<()!SWSvuEsDLd!z1zu1``P1`Ce=vRoaJ6}PA=B&2%86f0UDFl`&$wLk z+&0>yqeCna@!F5#HFZ?pHPZ5CUw^m!m%w*Sz+@>8G}GpY4@c->8N#rwFa_zYU7jHi z_Wya7w$n3k16$Ch&l#+`&vR5d+gNppbVes$*&@FObnaiq5k_pSAi+r;M&2#yzyV+JE7nk$kl1t3|$+BNC|rv5~GT}-r02M>9i>rPhqXNN|@DpcI!hwbeG zSL>zlE0;;f^8xZJL|(&UJ961#kxV)wb*(X~!8wmj9B1WVd&fqMTqwJ3sVkmqveJ&B zU|k_1u9QoG)*mxX!5U_$yN_d&K`9w7BY37Tg1?ngfm2TfSY?AQiA#OCPI~hxG!niu zHqbU_D`_6e5cYv@q)6Xg2PR10rzM0Qlp*X=Y_5%ravSGgf6<-go49tk2PW_oHU%rm zl8c`WNeoCCrZP^H;Q@~WQH6`ZRh^1QB@d|zv=ucCsKGkN5DQa02kju!X!(W>AbciZ zzeSRagRU<3S_g>3tvp<$ATKyaet43BWb2|6O@6V-AP;pcItw(hWB_A33Ows4T1t_n zf$%+hU~~@aZSX8Tgwer-mZ6)gk}*coD$nJS`r`A*2AyNmNb@xEg^#k90nkoHN|vBq z_@TZLmtff*>Z7=J2XdlmjQ(|WH_rN$b6xrN_vkzhNoVhT(@dJ`ETGR8Lb-c84#Ma( z(ZL!V1*(NB>u@}dt*fq;85h;yOc1{ZU-F&cQ5>fcbpr~(^IglPo(_n(UGezj@isbl zRcm8&py@VY)DM}+7NzLwf!}UF@SAJuKYZ3f(-{Z0q9dPU*dnu5qUf799<8@><`2GE zL(euq($Ww^*d1wD$pe5cjrakvLceMZKKl}`swe}ltdW*_N z2d-!b1#qrt)}ZhF!c?1`8fwdn=i3;38v)EX8K};Q%&&IF{=(B-WaKr z>#>?JQdcQEwilU;jsAKjpHJ38VZom<~d;K;cR zt8X&Q;7nE->by8wf6$E+Vm;I!ag;W0H*IIbZQ0yPYXW-MDOg=zZf+H?oUYS}u;V5U zE(q`qcqg>Bam?Ks*KGpl=cm%RwKnZKC=I4%8=Ue2gudoR|SO2WzVBR1S9cTq*1z??9VBzRlaOtCFd_o;$a@*0`YVWi8IIx2%H|3G^TuXvJ0PqA(9G3vJ9bWV?c#DPOkLPTWU-seka5tE6=Sf(3`J%DFPgeYOAx7u%7%(FrZ=thjBf zo%Z$3-L|@B=W41QJ9aW#x$YpG+t}hEWJ^cW#awyoJM9ay3#|@pA60JkBr>`cInJ9D z;@^i{TPNjP+i6~PK?SJ#=d&Q>RVmcHw7VXGYY>Qs^+`@b<+XjNbHO(az(^-Ai5rm4 zUAj2U-4P(B4Kp|_Q++dSR{`t8Ppfp8FXed@Fa0D*+0r^Ht>tn`Ze%MA)!(o_$aTu{ zkND*6;hV3^e;Lf)w<-Uq8Squ_l0L7pN zYRdX2Y-6XzOCQ`fA>zlV1C*sMgwj-3JG0{;!Om)M*9Wi!JBD_O^Tvw42E11X z10yowWFEHJiF)gA`v3_ehTx(iIjFF|=Xdnf8wXAtG~qc3?I-x=(SdO75TieXehYc5 z1|hBK9(L8mXzo0s{q46#j9B9fMH~pIL-TVi4j#F4jS0$*6;G{~Z}jDwIvH=3^2wX+ z>b&I}(8#=lJ7W{}dxm0%{f?vZx6kt}blo@1T|DTdzi-9+cByvLw#jWO$FO4*Pcc6uNGN?eEsy%fF zC)puCs1tBds4e%L-ap{@LFZMmD0TdeE51CO?6 zWo@~A#mirkZ%Yrz2mK8E6XisHYQu(yoJdoWtf%_Lo%$mUIJ;OddMR(f-3fPn=q$nu z9?E?Akbw)b)>D|Ji)Q3d_K2>53gX3w`U_V$wUFI+Me%A?ZP+kAg7AFP-Zw+zb8C;$6$c?L zV;|ptM|$ZCbhcWZ&l^VgnNA?-qeXU=N$4dHUErBrEhy8smF?rWSb_=yPhPfm`!nu9 z{@m5Tx25q{q3a%Wm1p{ozD?(+)uw0X$yfHO?4@`B0F27V*AFb8zs5wM5A_5k@;EqH zTlYIj>|~FAU>2ZC3;I9~7H(`fA>bq|nxw6zPU@`RHFYai>*$Uk#y#=G+4j>v|I3s< zfc6fx8?U*p-FD0ElsigZcS1oP{Eoc5;BX>0vZ}#~Ob8D&Y)41v>xntPT0L?MBjYcm zw{0;_e@gq6ett*u;G<5YJn=9cN7rVy&KFhh07U>Fecp@#7~z9hIG$_ADTy{je~{`* zA?%>GPgx)$<<+%%Que(3)lKxo?ZsU%qU z+ot{4r%ly&RAvxm(t;DbaN(MZ;dC*zfg`^4O>pt9J0Q7bvDJ2g@7N^m9oPEMF0l2@ z*-`pcF7|h_D{qZkZtN80YU{bTnl8?@opNyA#mCtp4*ThMXl*0?%%AuW8w+1a-h07@DG9@E<$kw(og!9K3x~vMTHto9CiKObRgvP7tPTgG$)R^lzzyyO*UO ziNEQtt4;*j_lz$~K5e}dL*nAL#%@_Y>csoxOq&>=My@!pW$rvNIx>;5kR0@PeZ$)E zf?@T+b|2g6YVQHRuMX|v--BR3KB=7(b$3_VppA13Kt0%RewK2AYiu&*4vx*X9Vb#O z_rxg`gNjf`?6*A2#HIWrrxP36IO~*pQAA|bI;V=A?UgXldcgLD=GRFcuE24xEiEmz zixHK(xHCM8sh`jg&6%#cTWWwx0*IMasbN zlyw?!H{5zt`^Inh+V-hqKR+Q4bbMaF1n z6?Vr=G@$xaPI||u#t1o%5}CJOj$XQBWe6~2J;fO`;cfKSGimG7XXb&v#v`wp*!P}m zjgtO>I;s7%uMtxdUH37v65k76MsBy;-WGOUe--(8UU@OW2}|$g9f#Kkj+4o| z*k&it@pq}m=GJb${l5ZjKK!Y_Hzi`$RA{`GC<*bK7Q+Sv>1?K7W! z5P2HTP9J1xTly0C2brspZ{cIv)bba=9Xg1!a{yVe&I9`{_D9CX@VV#{(sCw47X2by z_T%P{9V7ivd+u|e-)_78S?yQ<*DvEAxa>+{BmFQwCG~)%ZFzB{O`u0})3n$8T#&Ah zIErJWPdha^dFZoeL9HOdkWRk9II`b>^KHbyxiBlkoTk6eYG`f!f-0Si4- zHT+)u#MpGoo}dl!4f^qM$1xpivmdSR*V}D)%-O}@>a1;0{9kyff8-~8p={(%J7}H} z${wU$$zn_OoVr*Bt~_zJyyVe-LQ@yX+n02qMHUv5SDzmY!mn{;d=z;fM2V4D(o!d@ z0*qRms$SRzRevk^3KtP`*8mqD3}Fjx53(RY9+E%15>sx`RC%;sF55sBNe7pe=;&VB zZ^)|ew@Th=r<|%;}Z#s;hq zv?85gO_+s(u}#WXEe4M-L_U+0Q>2`eC*=_(F3_d7|Bip?_JcTOvHTVe8AaLp zHQAEix`I#JBmaTCc09zf4lSPbm9Ee?;|d(m#QYwDvU>V|rmcAhVENy17I=>{k#h92 zg=2p&x&mI2-Sj);n>d3nyvm%YZ8-FE>`B{^-CPLikJ-*xf09|No{wgW@O7cL38nYq zEZX8&A?%Ya$VQEUi1$%`gSSZI<3r!^$}hPGP}dZ|0+Nnol%X=m@Z|75rGcDjU;u!8 zRvkZ)^DL~o4ziUCq!eHnq)-qa_!$I}ILb;9=WtN~zg-$ox&s0QxBfaI}Eg|7tHIt9-zD1=s|uZtKNszc#*(Ik0H!AO>+gWWe}$hCF>y^ zX;I}s1cHM+NheW)V-i-G1j`8;!66Y)x*`*o^7?e$w>gsAL@CRoLokg| zLH1EsT`p`CxPjj$xABOPfVP>eGa4cEYyzvMd{!E9;LN}LLbtwt9bFSd$ZHGr@Va|u z8W2*}eN#`4rb;nh^<4X_q!!9uzDk+wp;1SNg3tfJEIL)5itj_=2F%D1*Ahi0dJitu zq3!Ac=#;K(q&%H{C{G@x!*uCD3CFw`6&;%_iZCzF!+vz4V5&nJs*(?n>WsmQlx+vn zGRnELqvuWnyEv?Q`+w>r8OLcq#0{-LN7MKLS6Fr|)9|V9G^(Y?TysvtLbyE6v(f!C zYnzUeb%PG3DS4{8^uc)P>tpUXYlno1GehL#Pdtf3z14o}O>b)J%bV@4XW!Yr_n-cY zL-}{B5_hXV12c<<<5R|LMot zgAYH}uD<$IzK8UTJML`fpSsvS^x=;RpndUkU)=u9kNjYJ&1?Qv`>o&n#r6;XPjTFD zL)f;g{>QGlt(;rnP2udhlfl3K=l`hPb?dF|Q=h!IjZaRsQ}a`8oJMa9oc)IEF8J3s zf91V2X*j%787O+)>%Oy%Ow8k5UjjCCjttNN^v!O{)Dc=+U&R@=GXihv^avvZ#Q<{h zMA#1KIsh;^HIXg1Ch7dU6>Gk?30zg$Ml!0+0T#t)Wea?Cc6EY+Cw#0ka;xkHCu|dG z%hsqzQcHVvbgfg9fm|C!;^l;IxMk~V3e_=nK+XYZ2eSS4e|@*pc+=Ri7b$5~4v5G) zaArEU<8;8>fn{aE#S+>bSHIeU8XX>w!|lYlyjodXMW)8v_19mYPUPmclWKJINt^HI z10!$k_y!WRwN25H-5`zf@vLV)yZz$-{H2mjoP1Y_+Tq;B8BJcR4=uOgjeR47CPw;1 zk9{a@b(ljY^@9J2YxL-)eb5eLv4C?w!P|A-;jk3@+Dh^!WVZa zQC?KdecR%6fVj6E#u2J*Yip&ew0iQ1C+NU#wzZ3^?SK91FSif=(MQ@jH0CvhPbCX_za$|jK%g>>FEGNQr5LF&6hL;T?iG3|AmTJ)Ty6pQ9_&*3Ejy zSLkEB4{gN3G7TNvLtN`V>`v&OAHwO*hw1xskI&QND;(1O5Pnx@>LDkTFYTU_f0=|I znZowaW-7x7o%R663y4G=4F4gwSu}`z)NU!o%jl*D*J-n{U!p7?(OvS?1SxUIMh$Q& zRkVe%-&KNol#^E3ci{MJC9qk5KgVA7y0nZzvNS z#)+rf{#i!a0_B|BnyA3`qAy@VO~9*zqF!nzYC;m+CFkB|+gYpc%?3B>d_%b=$l*0<^0vvS+poIA0d~oD411M@P})l9Z5wEPi+)ZD)$RlG&+qcu zhe@EYELhSOz{gaTpsi5k#a~%a{E|k1{KaoHJ$5;7kIQ)ZtcY>cfOxg7ksE;$AN}KA z|HmP^Rrr z%A-AvtPyX%$T@Q1cA`$E`;BPvusXuDoI{%cV7X+yrJH3%M&wWMKn{^fGulVK|GxY3 zUEw+SVS6OBIt)d-#PBZ@x5%+&s&IK`)oqWcifv{lhIp)gs5jA_w9%$3+WIz-Z)2Hm zI|Jy@Nj-Pa1VG{~%at+p(+Pe1?V-P!gil{X*!9+^G@`DQ;{^YurN#J5C-HlC@aKi^ zw&q1o{nA176Mg|#+oEhLE55;;F$NIr<;KIH|I){YZ`xD{0YzY)vMC>d9h~sPs6UBI zMtJHcjl|c<{@B;l$)nz|a#CM^$BEoH(M6UY9V%g`t@?13NY(i=yAg}$WYp-Zr$WuV-=VKtgkAXuW`kPPuc7GHQQSp4Ws7*Z6XN z?!{W<9^J@z1m&cU!o7By0_by5p7ldsTtMerk(rd>unwUgcsoe$0O25M#wVO`b0Qz# z0NmX9BaNu6aYH_v(_Jt|W*opqPNlEyl>IQzw!fA$;a2JBpES$ciQpHX9hsPz zGTy~Co7t*8Z}f6ZJApep)|QqowsBZ8Hl_WBP!>!%p)2~4dUQn0xf{ULFOD5Cjx^@^ z(}QU)4^O}E^Y`&jJ4e0^-TCSFhw^JMPvLHCL7*Nz$b_RO3BZH;9KFpbxYg!V5T5x$ zldimwgnyZ|OGH6c{?wDAjk1HS#8&u5ql@VV^dmM`Qtk_0_OkXhulOtN%=I_6r!HK| zX9HdAy|S{3D2$}<=T1n~H$>)?FWY=|gmZiscM#Dp>+FXz>Y#P-6a^|@Z(<|A$h$J; z!U4}Nf}WZjO@D84av~E-+QY5X$#z2GDZinadSIY__AiFU$J?L_ z4rb?b9-W)V#Xis^AwpKHKtte%jgP?_159OI$tHj3Vj%&+Nv*0^z(LQzG z*QfNePQs$>r4>J!xZ56iY_a{^8$QyOcF|RyxP#6Bw!+=)ZhywD?d1G?`;R~VAKUX@ z;9LJfx1GReq;IUfw9P1q1VQVl?TgrBABV6@$U??}h3j)dwF|lILOtu#si&b7HqEx# z^l2vtHrodNnV*{Z+OPVm_9uVxk#_d{`S#gQuW|p(cH=cuZEJP0&CM=At1)cQgo7P) z9qGHcNCW(oWKcM$Xc~OrN6OXJ;IW}XQm9!K$YI%ZLlV;WOXq9 zH+72K+8@A<*-i$w?FZnZW5Skh3NB6w zE=2FrZfifqHE$iGW7=GOInVkAPA2LH=qo$s8ak8LwrDhOqhqJ79x#Me{z3JJZ2$Cc zJeQxzbIv{&eaDi-*m(8Y@s!Gs%ut59?OW$Q^Cvy~Hn!$I%5GU#dA0ZwDf8{!@n3^1SWhI`IX@I&IQZi>qyMW3Vl*?zXY% z>2~u?H@7c(;q%)M|Kv|6|9$s8+CKZ(g|@oB){e~|Z~g9F)&d_#0BG z$cyqJ?&$+34hUT9N9I$E^2P)FrlzK_TO;w0m5asY#dhN-D zbh*nT*`o_wq)$x!^M?KwGKkGl*SEGe;y>CJl%K6Wn%+gXj(fToT^t?rtR0bj4izIW z!n3?y&MY)-N^nBn(x0}E3O|X@?xfP0>#{4PvyVX5$N`6Pti6pa`pkIBGOzTOUM_aZ zoSAhDzSM{QbMaDLL!ZP4z=26$$$MqNMK?(oJy#ypFY6=Ap?AH)XrIfp z(z15jpswKLB(w|Z^~p}t&)ADQe`q9#{%IV|6Fq2Ye%LoV;A9S3DlKG#pg4puXFZH_$F-pApHo^KDRk&GSeUgHe z+&QN{>IeFq7<~@XV`aG!0QC|+a8v)T+&;VJ$V+7=@KY8cg(LgQ1$dROb>w?1;{h*S zlYz4f2@GjFP3JS~&(k4&UUkX780k1?!GL1{JkM@EWU)?2UOHZM$b+hG@>g8RjCcs&YbibO_|R)#`6c&|`5K9BL(xAB zF;byEKKXO@utH%X1j+;~Ns~T=sr*%GDk(ElQH8}c=I78j?APkcr1Losq9CNKH&zPi zBA!Qe)_uMw0T771f$THdOo^IpN+c?APX0%1(ejE1FNNZuI(ac>o<+()Itgnvsy;;Z z!V4thB%L9&h|?qSC5nVE0{4W@N*$j?n9WeIJqstaF{{XU_L0C%-<|8C$}9;zI$O2? zQf6WdU7iRBM@dQH)4}pw*TxAW(Mdz0Tyq27y@K?#CT!g(l4@S9OuZ2d8{Xwg!p)_ z_6%*g4u8DxTq~ShWH2&9XTZ0xC+6GavFlnJp5=M19JH)_@5JiISjczkW(On<5n9T9 z?11s)G#thS32RjKDVw1^5ug!83-8mZfPmpM^{=>kR?_QL<@5WY=P#UZ4?X;7``!1x zyG@Kvv~T*xSG6yF`Bw&>!Mwfv=rl*xyI-wHmTr}{53=2VC2Jl=cmjVLBr%|4vPTEv z{KbpyLm&L(_6xuA3vG8}r!5>?Xm{Ls2do)tzwqmCXvb#9+o>~GwIBJnKit0JtN%iK z{~!Kgd-b=yw%`dz&{iMVHe{!c&$VmMEVLi|-tTTNdBscHwUezq>(1NS3+}qTJ?F-o z+EpixwH2J%OBdGKDzwKfq{9HPKu^EDoq^cl(SdwxerRy7eaCmawvA042d8t9k*aq& zL{4P*=KtE-3XY#05k(k4;Usx%q`QH#gTt{eB$suG8q`Ro=9> z(*~U^L7v;O_fr$v=Qw;gaRW5Aq)a`frxPi5;^GwIAe0W14tVORz{d3C zbeo+!)~4rY^WCz^u_?;39JitEI&V6bTsyF5xl_J{!?kV1`s!LHj9n<8gYIBLIu!XV zzmOKe zad35SIYH0>ICkE294DcrpOo07L$$uSo~^P!^x+S*|L`Awq+NT>>Gu5R-ra7x@y0ek zf2^(30i^WeeKeXgavwQRM#0^9t5EhHzFT(cge^dXG(o<7v%BLb8A?u+SyIN|@p%@F zP?93yYUbLRLBC=b8Ahg&5j!Y(P8oJyicdA^AW!4e0n^o@?y}+}pYo+WyR@>Ftu@C- zeG|qvRY$8FjI8fTw<`~d z-069hb0MuG3dhoRbSZ5*(XW4F3iX0>11UiBy6 z1?)SO+I`XL+70i;N#7uo|LBm9o%~5hoN^pgarYJ9;7a+z+jIAsjpjDR`3^4nF+Si4 z_$2gM`Yqgr?>BLy!=RG(2fq8QTHnYKEsuCH;Y;;@#M<+rtp=uaZ~;Kxs!BTTs`<;F zC73sL=FGGBM}a483x7GS44Yr*;Rx`mj@}iFz##!uC11favH7I2Iqz7Q&H+yZyY#5 zHb@yxJ~>ci+u#6`6AZpN>o&h z2l7Ib+8s#RnN(f#-u%RIe?Z?aZ={Hy_uVr?>=F5dAzk>A@CAU>;^LV>Cb$o z-HDHBTkXOCb<}~5%2mN`w&jPc_)YqCMQ8s}L;F?uiJ=?h%=Z53rM?>HEQkXq{Rejn z$O1Upt$hA~c2{`AgSBu{{-ph*0PEfl7Tbo(&R%{m_w)2SoCuHXl3q1%$0L2Vyy1ku zq)l;QW!ZV^gs!UBE@U=;-@Y0GpQ>5j;eBL>IFgiJpyTkGDnU=*@Y_mUd|2yhet88< z^JOuU7nC0v(C0vhO&k8=4*2=6OG|6*mwxq^+XjW7o}X#Q=cd}-&%C`&PfdW2I~M4F zb!8xT@qOTNy&HkJDz2`rywp&AeM-|}`E*O6iB=l8s&v+aTHN%T30fk$*{E9reh zSX%|elAE(&x}% zxVXv*F#BA2vxu}?Tf6ktY9L>HoFr0@%D#2=!?t=>7R4>qfR4(C{<8~Eef!#4n9%R^ zX6GI7^36f(uWgv2kK^YKcB%j5SM$-nopC)p{Wi{dV(2wBlq_$>Wa5}49OBzYr#2;atP)887Izph>X?B}+ZecfMb zchI&z@aO~WGmkvn&Yr#0E?&4mfAM@|Wra4^g46~58yA#07G=Awu}GiXNl|U4woF?> z*|yJqYGT5-=FyiK`nc16kDGq$4Cl%5sWvv@j(D}0#XhQiIc=}B*TkOyAiT01nXc?CPW)qC73ZHnQ$QhE3YM_4Tc`yt0K4vC=M_Uv5u4dA2?E#JTn) z=Zohrw3XGBEclX6%CmhdKOdl8U?C`Pvdt`N%%kOMED@(Mo@N8g$x?sDsQ?xZUic&8)2wS-Pw1H)>l__zN zjwDLkL;4WW_^L|`=_4vK_6D#)t;uErQ96$W4QMX zbSIblpkGo5c=9aFI#=5yCCZm`;#9|1>bUcc&-E#4@s@3BrO%e^=$ZAf&*vg*cW~2x zN&79%;ZM;Cj%AY8YV>}x!TVG#j1f<1;S;Z_{mW03`6a!4=wS3VG&Hpgog_-#+TCs& z^pn(k$6?PcZnb+qdm%Pwc5b@eamyX;$A0d=leV?beCDzC@Z*oSwT-oQ{N(XAGc%k1 zP}BcZ7XTS`LAJ^s6fTY}DJzOY##Vel@Xc?>cKpOGg*kL(xE-6Gl?kYY zeN-14D4&4u?2I(@T(VD)hDJn3q_+>KQSd38YTnJvT;o`1C;C1^UWCQZS+{Szwb9nr zv?+TTSKIPMBj9-WIT!QlFT3N!x}`Uc31I7w5__D7{p>=3_Q;Pm~{ z;3`AV75&Y;3vt^W=%$HhU^< zeR7j?@s~f-P|r$Z@Lq@|M~Cs8(-*bU>YDY;xvt~4`pliTls!VyN+0NH+gAY`Nnf@^ z->%YFu73dVA19t`OO$_ckcjfeMCy~tlwskgT8qbaKc z3Ok>49L_n}Cd2)tO<^&k&{=rG;A~J=Vv{UC32iL`O$~&xkZmZshL7w#Y1CI}Vr#k=))D8dw@|*?>10|S%>Ht(DUe~5GOLgi1 z8$ql1zVjl-&>;OW&`C}8f-ZFBRVa8l`-`wvoJqjke;j@KI>%}Cd<9^_Az0!$ zLTfI2A-X)I46{ezJGkL9^{~g<-J4EYByEr}6yJ~TJ9tQ+ve64=(ZY*9y*Z2!K4%(< zTyw6v99E#auhgM!A&~kELH{Tl@p*GoJTW*`UZt%=g9}z(hi5$Ve|QP>fb4KNJnH`F zyKmyhf71s&()g^Gfi9b{Vwg=7{uoj6=CvvvV|f|O!%&$IuanGc^~ihb=iq#06BnWR zGo8;XKIcBppM2xjFQ&+oz=0pBjJ%RJ(vK((=OwbbPFe|5UAN#)Hf!<1u$qx=edr01 zXR=KlyS5DoPC}$yRxODb|D>Hg5Qf#9QF8pTmh2;Fs;l+ay})&YGTawiE~&3g8+C)%5S`?uPCpSr(IO-*L7 z?*-3&ZhP0e-c6PF+AY`L)c*Cq_~-3;U-qK*+0Q=Ie&k1fxP9znpU5}1f>K3Pqi_7hW&%d)R46nBje(+DI+vDxh;$oYhpKZ^4_Osf}H{Q@b`{)zx?8W8q zbr`z%W{P~6opyrdpuOg`-`d8fPqv-)3vwMN%ZAUl%isg)oS61)KJT@;;T?Yt1nZnR zY3#S5931e?|GBwY9L(w1g%Fb?Gy?C)2X;C9_L>s=flFmR_DfsdwI4chmZ8kL$V<7d zm7ub?m&>BJZ_D`1H?SOxUtL|#cfS0_*Hh;&wYBw)wu}s{6MqQ@&Mo^MedMur-zV;G zAO7&i+MoRKN7@HI_{Z%-(D9Ex_>uO9AN)}J$VcyO_kQxe_Q_9usy*<)Bkdyjc5wWq z`zGzs>e@!z*7=4mI-_p8=SukF3#W0qONPaAZNu-l5r2Fk6GN^J^bOC=&8-X&`W-L% zqpbL5K4(2%FW!SYGd8bf7W%=XVt}`=p?Mq(r(ss?&SLb;q!DzpuweU6r z)9N3*7y`E6@b!}*4l-UicLBaEk!Oca%0~Ox$3N13{HK1Z&CSlXm%jK*+f6s!kb$MG z?e%Q)t=({^1h>QO+u7JC?QLI?Tu5iJR2sO>b1fFGogJSMoOAQ?^V?y*J?wU=b_l)p zGYr0|H_W}0nqy&glejTRFOXM~HN%o>otX+o$hC_u#O;{khtaqvRFeqxrdTsIPc& z&JkyVzab~0$5B?=QRKrk*~*L@hxCMBUYCP!q`GTa>Xfz{c}VqmE={7Koql(=HF`^V z>5y}gc9&%J40-j&1n8IFG<9&_!N~o62Mv%pfX*kBg#a zn`b#2)qWFImAua{iBo* zA2EKEl8Htaln8rfa;#0`$C&@(;zgbgwI`lhp*=rnFaDzEG%RIvTTC74BV4L{+cKER zq1~ng$~BEPULFl27Ymk1JZ{)eScJ-}(artHX5P8dhxQ)$?_UTW5x;+;kbADky?X*D| zHm!Z{s&3&LJEb2$St_F^`Jo)U0Ny-V*hYxW2^|V|+xfh$?sx+Ak-nDac!3Mzf~##C z1?mTgTWCugmU@{&S(C>pz(H7^bMIE8+dCW4BL@|oWSY%)%iR&F?2>TRMa)Cqh8xDt zI@#rJ1fXAZg=YC4rt~yZ$H$mv>R@oNGZQ5OBIG~$GJ#Fqpib_6P_k3D)p*nRrn&b} zAjw=1wY9$8R#sNB>)0E9q0fv7I?{B7=?MH|d+K(vv#tllX0I zIgqz2(a{={LLZ@}az-?ASTsvt^H6pxuJ@M!QfDh7X2)#q2I23 z&u7ZWV_MrV^@qsdqJ8y^HYyN$sHf5`i=4tk`c=qr7LozSwsp*jHS+=n6oM1llsawG z=W*ehc3_D5_}I6FP1D;XzZZ}Jhv9R| zdHT5zUAn{c{aL;3F8|c&6vAe#~b}pQ6!^F*AbEduI>%Oji-K)Q~z4VJ;(x#7}Xq)8S z2e9(!qA?d>Z_>xwbSrcKYj2m=R@%TY{`dN3TST5N zEo~;vQ|FerUT+sJF5@?>&<|fsy2a&{jFUK7W&4w{UGPggXj{N_KAoeEL$V2XADEeH zQws}ioA!PHzwuyHn={r9T!iDNqa>}1&|CmO`=HH2OQJWio3RyL+XD^tatwagyMLDF zl<6*92h>YA+Bu-nmtAR(KYq47^zhmC>u>#Fdvp~Wj(V!Ix88nhJ9FK2?Y7%)Y(Mwk zf1wSJ%>*{B42N~AMf}Q9zuSzqMcmX|?N}D?cD4i*31uftQ#M}wDtMP9NuWsr4!rR` zUuji#*fMOhv~~TYeC%-AgAOq-suJD+$7|$USf(>i;F?ER+^e@PwxZlj(m;od zNAk>mQ}i4;4OndsG^w^+CV0&m9a8^dhixOXbbTvo4A)T(x4kd9+0%$PqPf z6ZA!=Cr3EL?*nX=iw=-k+fRMew9`WD%S)LRaw**pY0g=`@x##O-ue)kG{&Tl86ESE zy77O*Mbq{xD9iN9gR&;Kl>_^+(!hRlL`FFvLf&P;qH>I{=^`$7c=P2ach~cUsrn?U zx~EQX#P3uOMF|-O&w3FT+KLr0^NCB!l~2-Ar(5607r<;^_SJ9b6Pv+d+r>qvv(`z; zQvdkld3Jsvy>wB&DKX`zedy%Eqjm>TC$&>U_&HAKBPV0dQJ_QUtqZ#4jo0hG*oIBl zv^clnrznO=JAd*-yRfm*UjE8gw(t3|pJ^vapvvxi^G>f{Kdzz@5+5; z@6Y6w_9S=mzxW)^5K;8&7Z%aEg@(2}+MzBDhk6&i?()_rj5sGr;Tf3fy*OBM($zD| zDUq&wKLe6F8&8_3LMrWZIUNBIO2up2a}CceN4eEDT0SAgn3()o5KVm23b=bzZDzr# zYimr#;6WDTwGa`!e!c#;GOXX8Ou(yrwjb$pr9IZhAjc%h&a4(jn$lD0YuyW1S_;ac zT*sc2Rq>AukS4s59u?Q)mpJ(+p23#`Vy12A>fb?w}u#>`n9j#oY?gfWR)0G)o=HN=XBBQ8e(%J?)QPy6EhijvU zxlm>!m!`J9sX*16$2K^bzz9D)gSS6%^;(!oXed|Kv+1i$(gi{OXY$+>-kZ#_y1cpe zxboT>9l|_3{*;i)U7kk#rF##vs~CVon}j~S=Q{GpWK<$XsC$y{i~-Yc4yLv|vtH6w z{yGpR?}moSBauVfqjGG}b!vu3CfW|ha&+ocn_RetMtp|s8aG-&vc`CUq#c>qjpU{A zz%fzgmCZ7GgIy7RQ`dc_8m#pZW{AH4rF?PveX&$i8-tu{9`oj097`|yM9fd?OMGjwLZ`J29> z-Sc<2*x7C$|JaAyUC+3)z3)9A zYX9TE{}!F^-FEWC@w};h!<~1vvGMu#u0QxA(4`}#BSw9+o3k^s@bsX)=5Ky$o0vP@ z4z@2fw{6d08V;TvaR(w+aCrPqyE{9Sy)GOKwj;icf!c@kA1Se>%D)_e*Fz^Y`^-yezU#xt#5Ddde0xUcXPNk z>xcj3P+^pe%ZnfpL zwM?ccH7>Sr*9D#VTW`I+P17Or(*^JU!0$(L9doxY zlH?wSoFmVqljO=haDDqnd@;K`$5xhA2l-xJ>f}>B zEc>h-&MpYH!*pWl#^sIv-R9Qf`|aYn^X=Du?bq9X{;#iZClb2Z~>>DY;^2~+LYU0PWYA#_xzjSnQutB#kCWXF3M3Z;}1a>ceDPWgKK>RsYDkD9gS5)yAY+)-Px3oceL14%Nn& z&8a|K^wW6i=;a|{l!?pk1F^sMisu$9EMW(R_qHFN{ijWiWtX5_i%*qx7*dXH7UyKL zQ}@3Su7Ksy-8*FxLt+}s^x!_WKs#J#sV`<;SLonKuR{!^na_{zkB%-J)}@Dq5FNtg zocy*C{qsJAc=;;`P%yH}J^%7;F}Ks~R~IKu>EKEpHU5MN3jAj7q7KiZad)O3~& zeYp{bUe|pi1@Ab6N49+i%l5mf(o$GCODP8{w5tbh&&c6h(g>usdep6mv0=6aY0s^U zhJ*68|H72?`smWzHyxD4Os1QIhb|tl9RWFXnRMa|Y$xm;5R#wuHZ*0zztQe3vrNpG zZFj3}VHaHlaB#2@I1vE~&x8`V)f)!X7+WEH^-vq-pr*JPGI-@DU4Sd@PLO1)ehi&` z5N%A}Y6GWiCkT#q(xt7um+B9L>wmC4;^HtnNos5O=+tB9Z@l);LC(qK3 zZKqwFon2^m+;L}{oIMshJ}~Szdy!Yl^Nms`?UZHhloNf{H!4gL2OGr0$q(z2g$up@ z_rbLJCw+`h1fjdOUer@Lm9Fs>2(}06O8Iz4d2376;%EF-0El0BsTu3auKq30(qFaB z^V}cuC{*UTI#q)!A|wC2x8K_1W53z0QNw5O$ixr4mCyP+z6opv;3^nG5z<@cVHtyo$e=!td7wxJ@Su41RQZ0FHQ<U?N*DSuHUi=b=C2mKuoUt3wCPcsw0 zc@#gwwzyur2!{w#Od8}w)!}3Gp^l6zU^m33;`@AmM!4MnyZq>QmI4n!8Q-7#qYKHC zi^Jmx_dg{(t$h8ls-$v#8LXI$qx=4#$_Xw6uV6&cAu@_VRc(|H?OWL!b%1in?5%yc z*j9L)2^)MTF7S<}8U~^b_=fwK2Yr(3?`*feU=wYVW~ZJ@lw0ExZa*Td#RmUS|;83_UPkJ zwTB*ltUdDRW9`w$9&Z;foog3KcYblXt*otQF}!cr?IZKUb3p$|PXf2_@) zI9Z=JoSCApJOkYhXd_mT)y+(}yQ`t!KKE_cix-#M#>QG(=XzploPHMmEd8~K>4i2l zHVsRL+uj;|F!I`VYcsTSa3UfV9>5TJ6CN4QStYb>2|tV{uVY$hd+dMX7znj|xdF(!@Wquf%z5tlOOk zdu|)BjZW#e>BG4oSiacTD87R$`7KxfCpxbzc68CMOF!+b3$xr2s3^ca@Pre2;fTLV z-rnBy3jk`TVd);q1rHal7%Us|c$ROQ#0v<&VMX}mnQbe|YgYgv{ zTfq;~Pjqn_#HO%X{Fw!&z>AHvT-g-nyD%KzjEC0W@n7Y>zR51zxF^*peYE)5$}ADm zO*!#!XB-zdNmpMw@zVg46ZLY_m%scg+Ry#m&!#UpGBDD%Nhclr6swEp_1p8EdypE} zKjnJDiF0KG@>`y?3Q0IiTlwDez7MfEzO|in{L6*)^cx{F)v7Z3YsQ|z=0 z??-5V)Q|WkkpcZbVCWO#cgXMR-yI-7=Db;uL1IvZrSf3>vc<$|5M#V|U&F$5U`HS-4=w#b) zM;G#6TwX>FcH31~oo@f?M}D$B_iMhL^qXzq!lP}Ac5-KPnI-^#etQFb_fu`vrbr`w z;^>}OLj=cPN*Cdob4a9>+ZorH-Sd1H%ma+{CAbALqTqAeBowIA%`5_d2Gv%1 z*gi-P?F;EVOEc}4i+xP1?5Cf}eV^XA$Y{x0Y@THwrqT8sl@r+`U63O-3thR&JPq(32pM zj}QF8Pk+xJLXQ_3S&qdKL2@7EqaX--1))F&y&+!3F@OQV$EIx&Wt>=XYlsLqo884mhF5sj??j{v@~d-Mw(B&!8*xtPhen zHjs&#yuCS69eHd#a!kB$W+GE=*(y5*N8lspY7~T7Gzh&(Ckx}(6Ef)gd1a%gv$PO9 zLMI2L>dvO|x}z5~HAStM^y=xbDg-C=qsO7%^n{1ftqWNgaWGnGGTCi7D(A{zBI7&* z3wUM3=?r*Fc~w3--(^j;#iQrpV6W>%v)x?><6I#))duK*?hnqj*@au%$eEkTIN5eK z;K9%sfB*>W-UjH%W^$5-GFvKY2QtX25j;A?=kQwN>9zW7!_GBGMAwpTr_=AtAr@P;?EfA;;~*Nz=uXbZ4|pJnK^vq-S&|OFSO|~95(2De0ID&^Vzr3sUL4|`olk_i3OYCqN9`R*PJ?y z?rgVr|L)t`iJR_jJ4^RLbGJSByO0fseJgKiUwGoaMNo~F%!FTTl= z@2-(dXM781ZDV6Y>PI7VY?c<++NVEtU;Fe!5449LeY7nuuC(PHNMEt+#0mHPF`L5R0eZesWg{gB_Fkx!HE(%{R3debI~Cop;`z74_4T zlQ^2tZ}CFtWgQIC%`K@fE-$tRAAGPq^`rwNi*$B&G8nhIzLLSn`T6;F+nu+xFa5F? zw>|nnf9aKfu`MrMqN6iTJGb3N=_ol+EL_PYzP6!Oqd%yKTS>b`d7f8pHQ`tTe$I+AwC?A)2oZQLwV=iSbdGV0(i!dUru<|IPo zcV8#9+A}S?lMGTv8B#Ph&=u>hOd#fM3#X-4`^y{h4;Ed<90-Q=)01QE^qG_G*xVF8 zD;+-k89N#y0~^B!KdWK?LII2Mntp*r)?^CH5Ej+}nho;AfjIZW?d(&GyH^ z4u#%{xLy4W@XBXaI)7ppo3_1G?ZC*m1Dg)u5s$98U?Ffw>sH9VDOs?v8ylNhd?txU zM>@J=H`OC`HhS2#8^&3dZ+7fBkrREA8^}Mj%5EE$iQUwW)Z!nW(qVKFkNHh4EURok zLjAYBv%bRHMduCb`2qbA7fa^tQ$K%1n%E!iwB0VaaAAY~he6)uJK^{kmRsfc#?VH! zWgYxX`obrDSto%^L)NsNwvG^oUEtcT#bF@+J1?Hsu(>!^X^tnTDluR7Ta`kV>80$+hrlGh|F&lEC3qcw z2>dQyJm22>j^As4<6FMDU31NC?cBL@k*nlWmgwW@S6jCBLR+ECL+~T}iLOe=v2iD( z{5(V9&6DWh5bbi+op`dQ4d=N7NvWqk8!{{ZGDrrk)G77Qco)35I8qqXb2DvfY8u|C z(^Z~>>guMlUph)SDE&82WYRMz3A@*=^rJE$RQ4f#yH2#N|6XSRO|LylTnEqUNvFO~ zVHyXMJ=>?ye)e*aGkLA6Fz~4DNPy4eVM=?5SIS4HpCq+h3gxTqm#1| zwQV2ZOVjzwn{Ci(WO$<5I@%a%E>7{2?Vmt8^V<)LzH>h|K8Nq=K&Ri1UujdbV{Hz7 z+u2@=?Hk5kPm_LndXm20lldHg-+Lc}ocr6GRAMtU?CG!lu#8^LFCW9GpkN*GRyq89 zK^T|Iz!iC}2+Fjf?9vgu?*8fFbNsi@Q+{6-&%~ijUSCch`tpR<3i9F7_GSqbC$!!bue6T6nfK%Ji_~rt*dS zLsL$a0Cx?Wx^%HUa{q(v;$x4uCm(t^?ZU>2ZMN+n?KtULC-kx?Vi)I{vCr3h(Ah+h4iVcGlM0%7ydj<7yTIY;O4JitX|z(9_Aeg*Lgs zaq{{$d&3<#jnmkk-L`$<$u_)pKJ`(rwPAT7MVZCcdhHm1cD%;QNTYio9TSufeM=+N zdwiJT$@ci8kK_9+w)-Bu(Ej~T|7QF2Q}{xpK5^=J8=M_#H(hs2`&a+!|7tIJ$)C4k z@MdcED<6CftA|?=lJ&;6cnz|vO2 zV|^gU=(ef1&xWxpCRw@oV0N4MtFL-#I|=Xp(+|9s{^>y5T13X$sWv!u0z)$EP-NPl z0coc{NL>!-vyS$@C}#ooJB5{QbZvL~c|5BdaOv=~{+)Q$8~(_laXufyzaBkS>E(&f zs6*(1tb}IJC1XsasfBRR)b`)4mw_REGNC9#urTn_r;@%N!V;IX867Nn<5O7JcTcIo$8eA6r6q+%|2b)XYRN0CDeCmnRxWPFWV!u zZ~+5}E3Wf}rM)!Qs0o*oWqVXQWv|MimhoA(dmj$li8*Rdo?UFq^dq;2#@om5d$@i5 zU;V4?`~Tffw8e`{?d*k%ZDZXZkF; zW2G;#rQ(I1jC_>^%Uk*`9*RuD<2_{8F(F|EF7Tz1FNHWJlpRw^e{6QXz2hBkX$j9PQ`0i|R&0@A{;WvMydlD+D3; z2Fo@`cX_Bx9MU~W@MX{ouU({GpY&0m(NBD%#Y>mk^5Rl_e#c1d^YjpfV?Qx;fRKSp zzLBxwon<(_R_%K-rLP*=LkDrwAHUq*IL@e^Y-5K-T{u~^oV2QzzKngh%D1;$JD~RW z2vhDYyYh6t8qYi;e>zI43-z{66-Y~VZ4WFhd?1-)eBw}j8FfUPO8rP|ms*)tF6@su zXE-(E4l$#&O~~dtwhO=8m+7AItY^2^y!M_pa>w1YJ4?`exNSUnF1oi%UB{hQ0*B}V z%&0mX1#t*X#hrB4CAyNAunNBl*d-rCrYoio8CRnL%HxiU>Ca#{)UUwN28u_@;|%?d zbX7bc3h6^?q7HUy{8JtmZM%M)_g%i;C(m3ETdeJ&+@wZ<~a9-2B{JFo+dGoOwAl&!Tq@ghHXNb_BE%^;n`G5TOVB2$L41IkD5cl+Gn zxliBU!-EeD{K0?zoX#YU?!$q?CFNqmOvHpE9-U*>ALQ%i+0PhG!pRW|jd!u?> zEVmk@GQN3pP56EO{5xVGS6p*}@CvU_dj!uYa(VBK#(-Ok2z=8ge+{7ZeC_!c{6dc| zbntU@B&w&?IE#DXZ&6}fGE_9~@e;Q9_GJjGd$YkTl3j}3DpA^`kM$MoFxCzCfAUY%7Nb{9pYzQ9i%(1qUk6s}b-!x0}# zMhuyx?1|E+^)1JaLV(G2ChWU#K>20Nj^h6GP_XQ%M8rCpsDD*{;bS9Iq0SXgeaUV< zL#~bE`k<{R&bJW_8pG%fPespi?EHClawln=q#%XvyM3HCNBE(=0;gdHsAV0780#bc*a#KdQ#j_3PBZirt_uL##$fy5AO7L?AAaUP zw`)(GYPa8fLl!3Zmie=v`^@&0U-7c`uYc&@WsvqC|NXz;{`3E+Z(@xgZ^HoI(C%^{ zl#pG;)B1|3abrm%0-apu+EDjXI(zI?hw%nCDKipu={kS9v_JmXhuafRJdw8y#LKr}W+vv^(18QXAcfA`Ioro^u#J&NPSk2QtXBqZ zILAgpH=jG8?e_*9K%P1FZHMqH{a6kx#ynt?e~0V^OF!~pL`P9)Hkr5@Gm_i6PH%U^K>{TjSkaYnd?w`|gv8so zC8ogR_%S+*3lkYU+22{ENHlYQ3HaJee5U5gWbhfl9-{58{*A36o%qttB3(C_4^D`5 z1qnrfROE%M$cER3n$Y*APfSQV#!2IuaP+Ju)%$zf3)2)`2KG2Z=fmfHAql3zCL5(e zh|TooX+o%tJq#V3uAMs!yOW?yzQg$b{4`jf6Oe=0ZwG(~Hg*ZrBXZi`f1%LV>uLTU z@zOko_zMIlO>(BgZab{?8yr5CiS9Ny2Is03w=I?el2w_F9&{ZvakL{%f5H^Vy>BN@ zB71J>*U=4{WFpXZ(H;#!Hk7ZXVN<5i-8=zr+f?O2yG19(o)QwAviv6A5cnN95f5J^ z-2mkuU{A$Mc(%WpKqaUL=5vtB=i;n<_kNa@+O-q3jn1I69tL&RHZlvVC`9}2Lx+OVbxOIe9bDjv zJhWU?P`Y_-@L49|FAJ7mI4TRkC(qtCuw3w9n+&Y=_4T%aoy)+cZ|Gn%rPE_ioof$1 zbgtd|$p_j4=T_P)zxGSo&-~|~K>y+G3~eTUhYKLI=aD1pgZ&5|7_NLa!8+^j@qBCD zopUy@oqp?`_B-~Y~(Z}e0%sQ$)HzXZpCth2M{!|~W^Bt2H-%Woc{Q*Kc z-ITHKn?rtzuKXYIFl}9fzx+YQglF7dY;~VTdG9|L0Uc?l95l$z4?I&YU}EIQ(94Dc z%GoNM`nX{5P`-I)5bwMdj1Ocv2H%`Ezje5{c%i-R?Z4N)<<)> zrnC>W04~UL!HsnDZ8zIi?XI*DM<@K26nJ$V-rKCnD{m{;3W`MNkEV2hmu%{ZMh9eJ*Np0%m! zsIt)~f+zGA#of*4oyJG#C+^l{xe zbSPQ?ls_F{WPKZd!v!hmV{_5X<_0>jR&5P=#%7PViL0(}qsMO{{XX@>_q_N}8(2D< z-7I2%_;c4s>6gWR*s@`CA`1X4ntVRPU{&epGfjKjS=(sCUQD1DW8>}qPu$-&msZ*v z-}#aD!@u$&b7hq6hFh-#kFoYG-}H^`AAIi*1O^RYwNcc?KC-*LSlE#cbUuVQ9L8Zo z=oe~JXtnYzlK|w?{#_m-58X9TVn-7POkaFxyZEWw8~E1S`S$wy&RXQk@yr4G)?Saa zl{Pd#*Z#q4{(SpyKl{h+Pu_G-yZVlkZSB5C+R)DNHZpU%4NV?z`y-S@@tIC>97S5T zE%JAZ7VL8IqL6WKMiYh%(!p?xr^i(}FX(P$lXe#xMHoxzjDnTGnu zt_ef_l%8m}co6v3+a13w2MLC9*vDbyMS7>t1TBM_Z3cJnKYm8|QSISj9V`L(^e_NN z8hrqcolQ7RzU(we&@Z$vRPzlb~rSi{AvH8uP;vR zAm`@a*{sP`>#q#irYcX{_+wVvmzS(}`6#q`9EN_pDy9Rx6yNNQ#@YN?002oM@5q*M zj*AUsOKc^&>X&PPgw~;9`BKg&X_JcnTtW}T8{qWeux-Aby~X|V+F*P1sm1oxg$+cJ zz7}oS&%XYb+f{cxuRZYTN7^G#Jr(C-UOln6}iqx!75o;Etxp<|f-yPn>Q4 z=_h`={qukM&ys#_%!POKQ|KFyL%!Q@yQ*D(Y^I%Bm}<9NdtFT9xIUpa1{*9)giNGD z$H-|%`_S1wdFTx;)H^)!PTEr$WkP4pF)qie%&)y&!!E7*`4m4hdHw?QAIw;#V^!+) z7WLTN)L+xbt-4u^Cl`l~+O_~!I=QGlc&W|Ub9bQfevq~#VHn?m^p259yL{)LwmoD0 z$ha~R{~a3Z`zt5*6SuK9)~oncV|PK{iE!)BVZTUx4QcBx3kTf-e&Kr_9xeM8og{>p z>YnsXUl=@;bM=gLj+wZS)&7h5wszO@+0D(p^>+2ClkK)=KBN83*L_bLzT-teTWbfK zt8HLqHE;cIt*^B`{3PW>w6t6CF@fXJm!EcCT?ldeHk!jU9iE3F{rJk;hrkmCLD|jq z;R~Mz=_jp(W!@^QChkjK;6n24yJJR2LUG&Qb-bu_pC?7{TK&S8< zGJd4M(4Ven^3VKbtCJR6prjn;wS4SB(zP0g5hU_@%wrfcSEYT?SCa1VSxO6A zdG#2?&j|?IqORX9e(q#!u%EB}cEN}?djlKp=j%5%tNr)0N;}(>g>MnQpexb?2;i%H zSDU21TYurJL-JT_l?7y7yXiAUVPBehZ$?7c)aT<2IX^J;9k2Y7dtlHtc23>?#!7`r z9I2cF9l*pDIFwt47SZbs?#f#YF_beE6|yY1;jwZe5u)3|bWE{vwL$V5Q%=|zCoJJg zl&s*UXt(#zgbwljGlgA)l2vIWk_-~f@&cQ(f`|EqL$Ogf&sIa6sh3f?1NXz5)YDt< zdz^rg>t1N;xr%T_=s^$)WYMyRnE+B5U^==rVd(775PWj(@B7qTcmE4){^j}QX$)87 z=@ctayZHLwJLG-ZzV5QAU@8+uGRiT|GF9-@uY~KctWMyC*XONg^1yo^9nm*N!t2Bk zgeQHX?8chE(pHG%9o_p3*y4m5CZEW=5ls^2k6c@#?$-o~c`Yl|B`?j0ArlQ)iw@gXK#X+uEho_Leulsonpn``XFl3+>d&6Ybu6 zKS8-W?W(IzwSV$Y{t1rSW_#?xhuio6(|_1Du$hxih)_;f`wUn~Yr{79W;H3-eiM=^ zq)fRxj)OHWPogWO`EEZxF+Q3J&gxj6VBPivr5vSdIMT*4ct8BWBkk>Pc}M%X|M`pUO>ci| zd-6P;{PC%F;^gUe;`r%y>eRJua$=^v2f;l}IR z4L9A?))rUVLl52GHgKwaJ3~6lTXCvyO)Hb?Brey)5d1;^A+^+o?3oO9Rdc86Ief+` z_QLPXWm1lqGI=%F>I9_QWji>jop|oucem@WzpkxptYim()wQLzva;OVig7CoM^@Tr z?*DZAz2ALjd()fV)ZY2dH@C;1cog}YXgA+-V|&(}&uZ6Qb6uOCS)k2#lE=k2(zYh^ zEYn2*mcMOB5IuH_FnOY1;!pJA%V=|`k1M$ykk+|$duqR3IzB$!W@aYZG{@ZBG(4YZ zGm{hbSpoPy%25^%yMZohr?h&~$%)deV&**Nx!s+skB8i1(On&)Bid{DukKP88ww|dGid~`qfzNx zI55aF%I!N#(j)e$lYz8{)=gmmUJ@88A%_V)b-LKc)m?WcBz>h#zJ%_ad$1Bdl(Wr? zliEKAP7U;M1b(6}3tik{=pK5yJU#| z%1~{8Dfpvyl9B`)*d&O&rd(j}9jL$#1espDqrZ%=jlXTaeA`{z9Du>DTbB4`Q6Dy8 zNEwTK0-9$YZm%bOGAPLrU9=Ogj*13+&%yw80~ybo|D;bljh^jNPY?B1eYaDs-OCE< zEjv(KU}k%u?@^P7%5;4e%sSY{I2ba*wH>SK^t!mlx@(8cV;R0xX)O-lVJJLW}y@skdrH$Qo7pUXOA%V5rxMvbg8 zBeUEAUNH4Myd=KEE1|p8x2P@9dpSTxXe-@jSKiuQ_H;_;)Cn_Co}HgR5P6V3uef(1gV~g$9dUyFzWYDZ?tbnw@y$*)w@OyV%-7SY=bPrO@7udz z(~0w5HqzdLc27@(pS$R{^t8O@q!EAiMdVRxEDfZc_6~n93qc(aMRufv6Z5_y<~Pt? zgyR5kzr&J8DhZ@&zkmyUlxXP@-h{^Tw9;hqKWQpnp700WR?xateZ)f@HLW@T5-Cfp zwR73xS#czirZ+s0Hturdn|R8W--blwLOaXyxe?JBh$(q5Endv_tPel@NPGQnzb!+k z3)rLUu07SB{j59iO=$PQ!8*GjaeNXRNt#sfkRO~W%TMT4J%QfIZ|ZCU%TJoB9|&~U zU7Q;01eeditMU<-P;hLc_@6Y^N0=phWy7-5O4pYQoG=I3C!aE?TxypsQ`<`cnOsmd zGPZ<6r7mep5*I#EP0syw$~7x^XsdfUu)ItX5yaJWkq<($ba9qnXWJa)8hnMP-m7m0 z%QB5Pi-&L=C{|9jNAg}BR?hV|#Upawm9NaYV~pt>WS4%K=pn6TN}3E5Q-8%OvQC-4 z0O10d^6RTD*Z$i!jtu*`9mmS7Os^-{#v$8d+8OE~Y`>pd-|H1VG71ns0U|2`Pm;hN zcMfnzITr`ZKNk^?jZU{M7e3H09-lzpkUa_NiK^Md3n()$p zvJC56{Lf95OC6#M2_R#*;@+cpe0eB16%MbT4(BlR_kDt=pT9sjeEesmBUq0LSHcc% z2QE6wZNd@i6&ZE%lEc#>gq|?UqYw05vCZnC*0E1%8QMg78vCgXc%U~KtHK^Di^`_D zl{`H6@Xd4==S9ZV3*{N!9{6Iz<|2K!Roa=7@$4KmHPeQUA8+H=T+=3RxVA0aaz{J; zoENlfp7+9b!xw!?yXnO*;dp7g=|x}CZg{~Lx9gt&qIUXOcejPx?rhUHU)M%YoN5De z^kc`cXq4}qfo=5xa-*;60!|lU4(uUcPS|g55Wh(}Cp6db1Mq!l`}Q_g+rIZsa?&>K zu9J?mUhjGC4kuTAdwvkxd$5J>434!nO4;=752oka=;d!G@X_Mkua4A*Z+KHG=km=*iCz8JB(xZd9M z?vJ$lo?LCHR2!ZeZr4In`^G==qpwf)gM&3_QFdSpy2L*One<~QD2qO#yxi)ih*ioA zM)4O=`do{%@?e|4x4nlSiJwY4zm6?(XOHETOKk<;Yw6PYwsL8)tt?%_hpm?iU_!YVIEOl#JROeFH(=G6P_-AkTiWfi?|C^3 zSdKkAZsB(4)sFl5KtGj*uZpctVuBOde)`WAHe*QS?d`Vyqewbn-N%VixzS}MXt@ll1x2?4V^_qiHMLF~} zO%}ph4Gzap&5=SA>8V4&C!F;WtP?F8q5=Ns%NF?@f7>E{#lFwZKzsfdy`X*FH~sat zwzAb8J$J4ho11M@6H}p^obY9eOcH`z@aw{eZ7I7KaW*d4Ulsn_y`dhz68p#)T z0H9I?p7Sg_H+H)0pY%0k%o+S7tp1DhSMClpMq9a!4cehzE33=x>;Cd9+I{zZs(td~ zAJ0xd7gpBM4P*>h)9`)4deGN%v616!(qE|hmEzaaO`LP~A(y@EfgH3ioGNWq-l+I# zN)a@T4FPxi?co#u;(K!4_mf>X?V@_eGW`U=_*Ct}Z4L++}5vzT7JIdIMX1X>qwNcb_TUqMgwXvOm|*hb-!+9{PKu$h5wnzQzdt zMLu!m7^^zPPaiA&EvrfW;v=dXWtdX$J}B$bB4cNyx6iHbra$KRpbMq_bc42e8+{T_ zcb@ZQWEV5>bHfeon_m4l+P8hr_q6AK)2rLy7=U+H+REb(w~fU$ z>O?z)97e~flP}_kmby_oXno7~MX-3Lzsj{b!?U#K95OHMN5}ID^4K~a#a%AvReyA; zo1gIBn40YGP<4+_V_kq#vM&VSg-)a)t9B&% zDqW=k*SY7~(iMwS@U4##sYy^zeO;s-z3^dZ zA#MihoZ^h*PJD9P-4`%I$?2huY3=c$@BI3g+yf!5F-vtU>!F>JRE&EW6C;yH6bh0@ z115GljIXo`Nm7MkxuBBO09ZyL!!v76W2BO}8Y~K~K=CMXb`nVa?co;fxg#5SMfHjXpMbe)m2Cd5KgZawH=O%F#gl|G&i)RNv1MNlsK5sRI zJU`4Xe&luECW5r_zW6L1tA5@aq?^g*VO8H7(jVrr9O8LgtgrGa`;p)YGl|HT`)-n< zT4T%YaOtdG)vL>^+$B1|I4md`gvhxj2d!(IHS_RGW8Tp_ifLkz6b6n1$+;w-&&**2 zJhdpg{zMs1J#}<|<3ylFOxcRu2r@9pYueENDEKU8_DBAi#a?=`?rIlzTMsA0`k4W?UVi7lrQqzYn^90#Wc(pFD|vov6=SfcfPfqJ^w`e z!5{dy?c|wj$n2nyxB`!QYUi|TAv^e|{1k6QD4H@7`gntMBmgCk?O^yQgG)Fjev5B? zX}!Jiw|}#pKYJltFM7^ajMG!?InQ}^`+vRq+uPdeO8cERzNP)fTiz0QwlaQeN_S~! z$Tv}Jy!16L)1khcDjj~%(7{Y?;|>X+0{PV~>;Qb$DYU~O&1Prm4ALoo&1?Q<;oV*$|x8$NvKVg?e zKIZ}40qkK-s3kbFaPA;dD%)PKa_K{Dt|N8o$Z~x<0eyM%nLmvts zjvqgjZ3ky(<|DH@zS)8qD7$pNhlX(C=rGv$I(ThaR3i@R0k*LQ;N*GX=%6RakXDCL zIgVr`y^Vc26zWh{C(>qM)5&`POD~0B2PgT`g~j&aPu$zy|NalOvuB@bCyt+KH(Y;x zyWz%b+p+oiILZqr=G*w#SlZ8x&2`A=)>NyZwQs%+fSccped)`-xc!g+@$=2^`Nk0) zbW3a(G57|dZ?6X}#RI3?ZOm#iJ%i_VhCnrMa>^6_G{12qphxPcj>#PWm7Od3+F09a zJJe4$`BwhPt4_BUe$n&VE;N4X>=SMM(rSC~p-0>M-}n3Noo{tZt@gIJzpZ`z zl<{i?IXJ$F?%?NarpTzU4PH zH5r{$cbAvs>uz?j$@XolbTr_{^zeI%2F^By7Kq6YhHV6{ti|L_s|x9PJj{t)u-=+^5b!Yz~_fyoQF2l^V8_@`Lc9D z9t6HZ5j%k%+d0?wBcJWG6Y^ppPusq4_tpUX!5(L9^S%RD$kw3CxG3L=^t`><4U~k0 z!At+A>ZC359RJ08^jFyWO{Ppy*i{cBe2dYATfV7?aL4x1?K7oA1DK5(RMHLInX2|no1lHJMO)cQll0oU-lxh#nUHf^I_IKLbl z`TqBPqCM*wx3^oLe;1VpQQ9Eiu-)0j{*6ILcm%=tLj$&vI)EEDOxgKvH+Xul{723_ zu-Wn|g3KYl%7bmLhiO7V&gqX*T?e0RN~CM(iLG>y4~phT;@DfC=YNQ*%-fD5Yc>)* zu#J@9rs?G+=14hLchD)HQ4x}nFN3c7yOg^F;cR26qkP}rq8>aAAFTt2&s+fG&Jp@p zK67!216)o5+80s2gqnp0U3~*b=7HuWy5O4->uc-nzR!HNJ@@WswYk}awz=W^iN)*G zkD#t+SFqGwSy6w0t`3=wpR}~!CT+8zmtRe;D+BvksIwz(%87492q`dndI5oZsP|Se8XZ|Bxr`gZMePngM_6cIm>U_*y^xUtixA zH@4dxzQ>u9)9v|pKf9eeeJbg*Fn4$i8u*sD3j)L)dBq3O&&c9G@ERU;5sdZcF=wv} z@6;^#rw)=G+&t31CFq-^uZImOzsaMR;5mE|vgysz(a0ka#Mgj&@h;IM%0h4cs$a&X z&-_8|*m!6!elCC#mu(ke@ld?wnMb{;TKD1=K%_NY!PreX)T|H-g8Vma7kWG^N___b zF*b@h#;ej?Y?Lp~` zIM(hR)Qqz82?Ek(0>-u%{Ky_TkV*BOU+gXPk)Qg+PF7@bYunBuS-)+q%=l@5@;9qq zSB8#fR|I)MIE?n5!@P+*O8>ce30LGZsJmWU_T}mO>(2w@ir{k&g(|6-STk9l(Z*vOSQu`y{l{Zst zfGC*}c@{VQ=`5<{`S>K)6K!B@imu5d^nnJnMT46wZFqYvY{o)gE9w*mUZcoWnE_%1L4wu_9M*pMKut=L^(X&?L0FSp^n zwRYQe3vC>}PmIi^?gNhTgo)x!eZbw%ep#Vf5bULhgEuri8k>VpUerxwCH3V1%i^oF zG0t8G*JQXMkULt1%TXVM(z;R<;1 z#i5Tj*toK<`X-bQp0dQn|LUQA2R}vO;$!7;cnm$GFXdMB`3|~1g*eI!5#rt&T2yQ| zdgYEZV3Rtivw8EK8_RVexYthnIblkY;DFjg0`cVln2VkD6NH<3NG%Z6XR$o%>s#4y zEYH=2$T$28EZ_?-Zl5$JofXYwtaL+%YrNTbVHE$ojxQ*$`0Wok`Dl4sC)z!d?7*x2 zHqkb=_uAv<&Y>sM?KR)=|8B?6T+<$S_-q!V$S23%v}I1{+U_{2>08?4v`@B`(=$`_ zFQzl5;<%~(5yxd5E2>FSYK<dJ1QydN(XI6%RiEQL;zpfmi+iEW5LoN z-PytC^BoU&t(=)1Z7=`wKiA&*=C`(o9(xEJ{hTARM%rO)$BAQ;ZBqUdHHr*n!kdDv zk0@RqLcUZ6ru|XS&%Ea3C}{~k|%xDei1+@4*_i-+@(mEz0YL7$HnJUv(w<^ zOHY&8O+mk4Xmo($b^KEHy89!>Ljk+Cz_;c95F*ufHKgj;P zi<;e0Rlio>CF8QzzdNLrJ8bRjn+6|@G8*3cs;pQ4TLqBPke!n^nErSy> z28)A#^aFgtF71#P=t<@rk-xHKRu_5pT3s+I441*h=;CQ-+l}gida3?a;GDd~B_7@e zj`D(H>f_pOVT{tR#kMC+l$LVi(`l=eZDA1~J?0rcvicI82>w1NuN76yycZ_-+3AF0 z?Gwj$5eM7rEbJy;AAq>>r^&1Qaqo`up_vV1HL8)zB4E|WGQBiPsr4rfstjprK6IJB zP+m5%IB-gT2*L@^{IN-$Zd98tpJ;z_PaCH__s28GQ_N#k(+>b)rXZOc`ORP2FDlja&()ZVfBN`4oK6KBkUV0Ci4;KK$nD8)_gFso)l8i;*Law4rmo0mw zm|P+_DBgFTJZEyBGYK=WrSbLNfo|V&*hRr>C3z8MZ%5;*VTMRM#ObgZPvvY>ujt%E zK>2QCsZ81^k(B2OoPwS|jv`DS#M`?fniVOgUl}A^rA?Z^FpqIPvHRFMNmQ80;eF_tSf7(lo|wi%A?eCCq*(Zr^jedS51y;uUGP!hQbxe@ z&wCuGz(}NL^LuoPq(k4QHSoM{{bxiFN;piSiebQxU{&bLh{AhH!Pc$HgSt6~hDoa| zc6kU^t)LM{n#u_+$)>E9LGfI4tnz&hJ|+TIwqx|tGSpS-sB>Y-2IK234)Q2RDc^#j zCWjy`wZ|$&ToJ^bQ#7*=lp#MXGsn~N9maPLu6X#ga7Yhn;7JFakbL~5?tK|YaF32Y zo-;W0pJOoepAoRtDY-Q`mf)^d|Hg7a;{Z~xC-MLIfRTj;2Rx}-L} z&TVu}Td+q5g${`m8uh!MiF${(7zv$f69>23DZt zT$2(u++}B!rArqsww0C5_J<$*qqe%S(f-8`{QH!aZDP!$4A>C&G!IWZ_!e!QfG2*^ zvJmAIS|#%6M3i*v8^m#${XDlW__ip-)YsRxn1kj z%xs%onCCbKGBl9VEXEmTQEUdniZSRRG)Z9$-O{eT_C{do5RbM^96u*n`o&=*bc||S zDdaVxIuzLmZ+Y&3u61%Hy=_ipl=6F7^OC)C5UHpg_9vjbi2lo;;> zlpP3HkotC;x^e%fKil5&_BXeWef*Ol(FC0vw~g5yet*<<^UT*L2 zy)=^O(Ur>Dr8#PU_wylBWyy}rNShiTZ|ihg-TmPG@Bd)?y?4H&z3V-{+dlZg_qE3! zd$?_Eu4O>`h8wPH&%E=__N+Uf(Vp|{XSbVgxUpSx^|cvXv@D%}C(xvo6YoyqXCmLh zOmQa-n58qGH*mR&)2lPcZ*)X^yO#<5(Qd+hVr-<%n@pChazPzDvhyn7Y^jr9SvDewWf` ze%svdm#?~g*!BjxIYy_IPMEt6pntQ|a~bHZHwQ-AHXW?rs3k9;T_& zX=C*@BDdh9I)iJk;SP|*X32B|32cMvx=xo}e4fxh;~XPqc>1{ye-_-Q-5n9I>H>9-l7!X+`@`n+Ff& zr?$>*fmQrNn`nnEy5?jUa)jj8G4ybp2$Ja>OvUe|D>^9@L>kSOzT-2={fP0 zZU}$R2|(%Uo1|En*ko;H?MyTrySK5v-Xdp zY-ox<=_nZp&=gSy^?)u4mv8yR2Sk3PZB4#gw$wnDZ33XJxWq5B9^CZpw?`JT00TeJ=xQS0|=fbsOG@3BVVvc3m$!$S#-^xgM>$PSC3N_Ng27zB3#N*&h=-4 zt2E`ATa9mF^NCHp#a~!1cC;PO+y7O$lpk4gLQ{DOZVoI^wsnSPiA!ooOB#2iaj}6u z#QmRnpgsS2&uKH$3vHS92n>@>`W}Yh4Q{CboCQA6lcpb_+EUh2RxJcULQX5z2MC2azs=Sye|JmW39j~7PCv@%k9z+dp`?OE~DpSIkxOE!d2&y~}H>Dhjl zeSg~)9#URm=%ORjN{f_lJt!x(0DV(7T{LX>DTBJCT=NZL+P(}Zf}iaL+0wrB1Ay6y zhDdEp7KqtCboSYDxuIPg;o7(MT&PPMWL{~(Gi9cB0I=570a}TKcu{cPKos^CWx8m< z1;)Y-iM27*NhWE7gl`)hyUH`quCvN=?G-9G@?A@g_~^)Al^C2L&x%AAif;0EALCj``~5N&ue(9&!F5L`f42VTCKc1C`$oJ`iyg32GgJ(2q(5i z9&xtqO;Mz^4N*w-p{fmqhg`QUp3zstY01{&ds|!CCBR*I@1*-J-Sx?DqtwO+Gu2>##oeF z4rExJaSQm+$a=U1o%Yx2K@!Q7h4!?K=Gmh^w>#(p)(J#ygaqi>&PE&DS;LRjMpSgTfAER+{*Rq&tE8VlvC!u5 zdv3k$miCXouiN^cP%PB9t_6zr_4A?%p?JiR9Xh3hk<^=0KdzuJ#t4zWuLX`(N!dpZRp#T3M%kT5gX!Id^F}lW~?|tLVa8_55IWsa<;VlWqBl zhuRIN$J*7WuK_oA4nWq%r`iF1ZFe(}mzu3zKP^H2EY`5D-H|qkc*ob|N$S==%#HD$ zlyi9v7TyboIC*~XX4OA6wi=h8RNuF#dT^TvgzPE4olN~a%?bFMEt4&zK& z?M=oMGe1%N8T|nl1PVCq6K%4Ks7EJh)3LcuMCu2Lr}osb0AZRw*=?7Am&tDQ&bHiV z(kpP0#k|oBef0X(r?X?FU zdZI0yJk`GAb$_>ATv|&zH#Ier#n>Yg)tAfS3l2XAFgWUV_P%7X)s_}7w#AFSIJMj^ zT)5B{moByQ7tTdD>}N<^$77suwk{J~V;Y0cIqJC{8lJ;_Lom`_Ui&u{=B2vEF%#4ji;;tEHr>wEr>_KJI6*Y5c8m$!krsWz~_)CQI=wAJ(2@AHdobA5$&(GTgmn4{#| zU19rvm-O<8&uF^?6O31eWKT6+>S>LEz@6~GG%{7V_6rSzR+da@2EwdLcXYr5`$*Er z=Yd1HT&PMi%OZ__Val~$fd!n5 zOIc>;2gX2CmrfpIXgXUEThT=aVIFB8&F}ni-!C9L>8u}VOfP+;3(sB1Q#Ku$4juXo z+N-)Zi$1z(^3s#INKSC*%ur~Sc|0!FcNYNgkGtMh@rQ0y-HdjQ*+>hBN5X-fuw2X1 zuLq9#<&XU#L*|1BW-DJ@JiP}la;(oITo>kKPJ(=ypMXc9HR;R?mgY(tgLTTK_*pm3 zsju^*SgDTqcZydL1-~v#V_biD3&oAkv+i*& zBrefuEKCEveAcBs6bkcuP8zS|pg4+?*Tx4>!XcwNoV?E^o z`aXq8hzl=`GJhA;fhsNqVx%YFB( zhu%59hLi(8yCeyT>x4bk&Kdm3Yq=TxwoLP=Z&5hm6pzKVI_DZ?aHtw3x&Jco0pNNQ zzwj*b)O{|y|Am@FSW;O#9(&X|Np`3pTl6Lnh9-eY*=BPK0*oPcswbyk+=J95O0PD9P9HZpsrO`N!u z=TjIpo z;5G!CbPz@-#@goEYP)Vr0KGA;no$rjpq{Nl8s%`YF7Knpq>CH6}p7Ztx=QOf5 zuF??NY8Rh+DwyohQIQU| z2d1fQTc_H>sWWZi_{lan(QVNd9*L!RsawQp_rb)x?gRp-sK2!4-o}|W$<=krj`P<$ zIIueFD|EECaT?$IuJ^T{`kDXQKJw9z#qOW*ZU4CwZDJhf8V7iTG!7d09ezu;qizS! zxBi`cjePTQN5;Js}L=hx3YxI$NEk4j4f$2Xn)`L@rP ziy*kRV`ZCfJM03T3?AvMDqH(CxM`k4r>F~7uk44l1P%o(A=6Jjey)Ay)1PeT&OO=o zw|3HDo27$r^{K1cb=O|oPMIbo6`K0r&01u(~-z$48VO< zKSUcYZ8@^#CAek>8V)C%9FQKjbAc>7`R*2D)!7=S12Wc*QP%wIRNYVcR-LMP03A&0 zXE2<&H~&aF)KCF=#JR`GHQ2^PR;_S|;+uM-&`5s$h-y)_8uLJ%kO9PrX{mJgo6 zS9w9s-8yZ1lg>OHVBt(n&*fXKQ*=_sM#Y&<5*?l8<&|vZC=X`l=76oQm)Lw3;pvi z6vUDA!cFk(aXGBdcMz;_zyl#TS�xtioODFBr;d*45F`htJI&dR}%u3P)uY9^Lg9 z2pPOIP0S7aLLREUr{iyDF20&JOP`-O<;wKh$-Geryeur^%1&6NqK%~E5~-861{djmb7-R0-o2@YndL;8GvOVNRdev(^wE}W7celEjBVZH${ zf?fA5M*GM_ua;e?2jBpFB>qGJy%2TUB8l`{@jIf3ol*BPki zH;P^H4gS%w(cmd5e7nHy!2M*8-#^q>a)L%0r(cw|1Y0~pJ_qZtX-4b<-yH>fwld#b62xmHtwxfzbbY1#s6Fa)VM;8rAv&=m{Ra-QmDTEevSbwb6FT4*&$f8Uk=wqaMq!W8>xEJ!x%iwNObywJ?rJm{Il13iHM@?tlvZTAPJd?$Ktb;CT zKdb*MUreVy$hYtkB=nb(hqS@Q1q|vcaaqi{1Fo^{HE~SFv_aW5!lH%ZAOSxYe!93K zxBy=q`$hKpQz=uu6E^Uo+f>xH_EYyg&|dI@=VoBefqz5s4<5*Gj#RiqZYje{x z*n~lR0sIl#e|I>^Cq;m%&u;zl);uw`A^Mv7w3a2By>H?nuTq$Jn_j)JwpPc2O%u{p zT}|hG{9WNCA$Y@Ek{*T*BzdX~X<0#F#@N5I?zRi!MY>+5h*vh0U+r-w6Kvb$1?BZT z);AJczd@@_jx2(la6C%~_1wiHne-;E?3L9jo{?0AszH2jxVFX79+3jL#UB^U1Rv_r zFWOPI+`-C87Pn}Qok*RW;PI1M+sL4kHzF%JfK2-#k9=APn=MA=S?oP+2mY!wQfGDm;#U4HV3S5!h(KQ3nEyX# z{{in=cGY*H`{sOa)va9B)j6n@TL~qSNX85`~9tR{`Xd?8yN3<&;OjW!`dtDwb$Nf zpMB)BeOv94<(R~suu#@(4e_O&+Ll%ChJ)9CMCh{( zidS3s!o$G%3+gvY#QLKT>U5v4LdlPdj`sKx=tbhV_pE%hGabb@hUl(2yHtPB$(k3l zj6wa82JAj?9F#bD?5Co%|Jv35p?g;V81^ll{NTTaHH$N2&$=3k&!4n*wxs$=>$W&Q zvBe!7T*f{GEODOG&ym>0*=~k>frWa~wS#Y!6*_oYw>bDf3%ztV#_OH08r!T*M88ET z{(y-1uU&Mm3i!4`AJPrwO=xVTXl(mAf#o(5Bz=>b#?Q62hM&p>gaVGXmA8lq{W*4s zczxh};~ihng|f@b*RnPAqvtNR|LdpT-7aI=HpqYc^r^ORWUhVx_x``zEw|lMFnsYP z?MHadAyj=8utD8Jm;Pa+cBSl}snCnB@1pcIUxrw|oKGS8LdN^w^MUq%{O|w1{j-1a z&)bjv+%LDczvsQ}4?p#p_V5!=whLER+qJcwc4cL=U0vJe*lJH*yw=|Pfe*Kz`h{O= z|L1@BiS}E+^i-oDKPnAZ8MkSVkY&imUx^AikBe@k06M(qy?UZdUF@B? zMB|Vaz^{|8o<&x+kKT5sO_JTZN%U-}Hc49Qwf~X7wi)a24D6JzvMD1d7;j0-L^q+) ze!_ri2CnBkRHHq)z{cY4W~JTFWCNvRU(vty<<-<@W3W$3ACvYCu4E0Z1}e3mR1fTH z%eO1~GOofO;I5-f)(yq}J5F$pOW$-O+fLUUhC1Zu-K5cLpQT8KvQf7}lROFkD}Q`P z;K_!Jqs(UsU;cCfg>_rKaszo^<`SNAaEGwWJ5a{HD?TP^R7tpfAOr?0f< zKk$n7`Zv6xt*maeNk7rKFxQTqJc?T3LqW5P*3iU^Ey64PPWbF+7&g}2QS(Z>dhv4b znVOhrhZh#wi4!NY`AIkA38+;cfGCBhHPv0gW0i4-oaZj<+9_gI;5?x zYe-1n=_gdCrl+!vK8(s|KI}i={qFaWk1-*~@3>MQReJTU==Yn)!#3;KaAkEF8k~!F ztcH(5c^3pIL-khwVtI8fi&rQan0_wCm%I)y9&5MVd1pI)`>pNNt!LZuvuE0wJ8t8A zOFMbn?QIdeH-Ge4o19x{Tj;u<4cMaYO_;ib?OIt|VGOv^))^CRq1PK*emcpWPO)qF z%Il1!{fxjmJl;i@*6F`Cp=A@fV`19*7Pu|1w3W?u;7ZF%ySlX0me-cs>c(o@+#%jM zGRC2k@cXX2?`p67;@7lq_@-}aZ~U(BYG3?kzpLH(>aS`O#}C)s31j}Dt&O(Dc<8!s z`){w%M{KuI8lEqL`rN}8BJEqUxK2J&50fmX>GZ9l#Q+W?U!sPme+2ngJvmOqRUdz`vV+<@tgSNOy1=8 zo;=dj#qaAjhgH{4>Q7$aibKXK;t)7hB6dr(!HHU<*X0|T7aZ(A%p=~|*x1>QHd+uc z%g@h4`Rn!E!TVh7ShwJTLmipdR~P7m%9bNxU}wx@XJ(y+6~FKseriiIAIx7C@QJ=T zIXnKBF2`Jbzhue8Sz%yFi>JcHbJb9(#>pdn!dUFkCJbw-Q2b3eH}t`u_}<@xAureP z6le`y4aV&FGNGh#Q4t1JQ7;Zl4EnCZ7jYC&y{2Nx6OF8BpwXLV$0Y;urVD40$RNw@ zOoN=0Xf*-iwq{XKVC^sf=?x^&#X$8zLMf-9YQaFyU^==qq)u37CLCmfXYW0fWToxX zmiX@8fntR|pGr@jC{p2lFsO9K^$v(Ze%8wBDxV~T8l!;1WNa6HK^yLi& zZy*1_?BL{$1>zHWdJg1IdP31>s3PbA*)3YDg6a_daEv56OWMe&69f*-xpwlBv)|t_ z-fuuN9!}a2hTk_uB?OyMjTzuhx(dI4E57F+6du4$oRDpIeK1I-IDTQcIkJa!b3FV~ zK3&Ln`Rl4q9bSVS0KOnR6INvJs*#-VlTM~qbjnxCU{Dh5;C#?R@H>U#V|RAF`Nj^m z0edktGS_y8PqrhcpWj9f-$LUYM`LIKcF^vP6d~=xXU_1+$r#(?&K3g(?0^FU+|x{~ zyC^8_w?n(ya@hfK?FU4*pIS^*?a4#uH0>OI^*fT}0Gl$nV2{1p!ImiV>^uS`Pd$0Q zeeeUHX}|im-)e98)^BOw_YeL>SNB?YBM(#@>o%lqs}l+r9v~t3>EPMS70|?-Gr;L( z5PV8ELqqV|ZcjY+2!pA2w14u0|ExXoiK($Rjl+0l^-_E3i(Wyw zz4j|_eOqAkA`%0)$I^bpuE>u}$n1x(g|qEmKDy;joJD+-rw$mVKLRZDYON zcIz1&)Uo!v?|Wx!Gq<+uYo8AfR#(=6HPoJX_|bO3@3v3OwDr|>cv^3J`Hi!w@u{|W z{6sr(>XtTjcphYFAIckg&Etw|+p0L_0aWZ4JkDxa4poI8W$J{MdEK%JDYoUSOL-9V z$m376pa1!vZ%A;QQcb0vfh+_(n_y&vA@}4PXXYk)it8_apg+_>IB* z>Q^Q^V7B^W`?IYVtde)mwi$I>UFgyVhm#Y~eBI9492Nto@WE~ZUicsi0C{k&UDM(9 zySJ;$YshS;efgJsY5U^WzOtRfncmslY)2O6+Z7ztkAM7=?Y-}PPg}!zK6CnH`bAHz-}bhL+CTkg|4aMDpZ`T!kFGlfp&lS-?c0cbEcnp2?c2u3OJ8XkIN*M>8XiHb ze3FK=D?^Z{yd={syps&Y=Ueoy_&vmcYG&b3yYr5_L&G$BF+V@srlw}v3HI<~r>c&)czDqsIV---08 z$n7#_TkoJEOWTKiY;eNy3*H{YmFAL%eni&DFY~3@LqK3d+fMwJ zKuo>r-Ub6HKhfZ}d``~gt!vt&le}&(J3clO{y4ahe+G5M0nOEm^|_73BXjMRTaMC) zp-|6f!1wJ(2%EOqU?2_W7mmv#`sn3N3^)?gjMY|7uwpYg)+6ZA;*ZxDysq}+e+iOYp zP3`sdjeIxV$s-pRh>LGQn67VQJ$5zgwC^3-sN4lo{2Jw@9~%DaN1*R17ke{AJ=i|{ zBI%MIK*b(u1Hz+D58Z9Sw;NnYWZ87#k%h2&|GI3WeW%}njr}4|9|DhK5TXmh*w5nv zusS1))PtQu2eo(R<0|Rujg!#BZKmq;lLR84MM;H|_Dx=%J9n<_Zq!>6+Mx3nue8s7 z_ObTSk3HBPdie47wcq&V?SK2X{{r~PaDJ+}W6B7A9K@tdwW-omX|$ew-q1e!`_NYU zDD57$@#0g_XqMa~?`*S8f8k_C7FUqp{vbRgJ(D8R9$n^Iz2{tfEjy$xOTS~;svFMG zj=JiCzlVK|c={H8a6=DM5A9}W6P*DUW#P4N;#emA1_+yfOI;FZ@N(=(yt;D(KIoYu z;2w7a6HtDXxcL^x4sAvKxp4le_LkpxXZ!PS`nGoGZTGZ`=P%Gcu^r?$*e~mgSf9Gs z^G>D93jyG*nO5B58Ge#1zV^3pfom67IvHIH6b#X8Y*e-;rd;fTeXaGv$H)=5mWypN zjdIcLKq#E_zvCaOpXy=K^@F8f{pkGl4z?yxlh8~@+sxy9qqu-!w7G73wG}9n_!aCu$*v|jf-~e z+v=e^b!hiH-deYLjCV1KV;Zryj-IC}BhSf4$&^#ZWB7c&VJX)2GKg2&0w=POy>^Lu za^zu@ZM*1`r}Z$@H(XtKMZbUzkSEp=J*8i<>?FRJa7VOyKNv3#EMI;gu)7@}^ME@u0EK_m z0WN9JJ-muHqtI_SW})qW!=A&2O|xaM%F-7rpw6 z+VLYt+t2>Of2lG%>kt_$3Ak!6;wEh+FRr?Ak?q+Kn}QAVewFc{Z;(Is*kf(UxAiwx z+V8*jL+wBO*pIe{&Rs};T5+2P0ZdfdbS>c2{%&Q5x~)~(^_Fd(K6qSPHPlwt>6bZg zZSJ)r$0pmK`}V)s-tf)e+@AZKyV~61e0%Dl-*4+zo@oEJ|r+ug)}gO<(RmGE&)UtXC*t8*TP=!o*p+@e9kO;;b$A#UxPF)&;x zM{G6*vtH(^eqCL^zBffOwaHH(_6Q?-)XP!0^wabO{rpXM&A3~YqKAmg^<}eAfjav> z#_w6X_%e%61FLj5dJ?d;AWYf>=^~r!`tkM~o9GSs>xBSlLYa1eV_ok4x3Rn$d#uf! zbb$(Z`Zj2f_vWU)Yt30^?i0O={CxA)dDP6Op-WpGZUp-nr(N|yf+KWDk9xC*9t+FG zvG#SD+s1xar~L#~Rr}%4-<7}m!08uB@6&eC4e1iT0SLiM`&4aIo5NX|kj0jh^PY<~ z!H7O`o4huq%d5L>eRHBch|YfEGmo|J{Bv(^-}{6As;xeKf&LMlq0ifZprAw*rl05H0{OGHx{+~Wc66hUEIE`t3!@Fv?Mo`$sxR?RcF{rQF9YH6 z!lCK5P5b<^ulTa|)VU|x9D4fbY`f>wLc9C)iFV7Wqx7$gKZYG+2)g8+ z1u$JuS2X7k7QOEx>)h#yKCf~yT3q9g(AVp0M6SrmPoAzUEyd5@>UJe?0f6&sZsNJ? zLPyH`e8e~D-Kni#4C9#k+RAzxpPOzoa|`Xh`|oWpe%Z_0>D$h<$;BfLN5qC8LsUEr ziiTI+hbWCtXVU>(2RJpy*K4c6`x^f6^3`kY@?{rpjI_s}e5_r(c&V)|ujG@b>)CCI za^%ep2GR&_mQ^=BtM^mTterb~=43m1_-H$PY@wYza;%*?bGjWla=48gI@I>&pi4hi zoWNyoeX|X%q2Hjr?rt`;QTa70zpt<6de9$W!+LvUyyI*8UaF0LXBSxF%_DwwO)=5} zrg&hI=8p{q9!>N>T`3a2_1E#8jgl_|ytSV-XP@>T`m}weO9z8;K$6q}Psvwx znUAuj^m?N#urlTrX6Wm9=={2$rc!{~7V9fN37jEUaitFj>AYx4?%uZB51Mo6x?>Fa zLp<<&X6<;{op7~{j=QrHy|%k!k3rr=BN!9N19>Cv8Iy_!BC-7?fS>at?l!iwNn4{n zpFufyzqZD;bt0C{%mtNB*NZsRW1Z5heU;B}!al3gO5R0x2`2Lo@W=6%-09;F<|(=8 z3ta!e(1(8Ndw&n1yK-mIBj6~&ez2Yp-+?h#A3|s6Xbs8@ z1^`e^`u;(U9Q)2|SB?&_XfMqe-g7l50*K|VV0rU1|g37)73H zy!$kGU)NPjIkDnr0cg0c`g4mh$%x*Eo&)+41Ug#O8;~dieh~jk51n0fPeRX^ z8v^tv^p-`dMHu$YTlpWu3RE103Ozpie7P|^Lmkf&B&X-4hOuBnkBN3@Spa}t&=w%) z9p6?O_B%o2xVP5q=^_7$0gLdQ1>b*UK z#@4rbAA90ld&^ti-ah%c2irgXr~jmV)z`f-b-4gQCsD|L`CFFYU>z8}0b)6vf8cYeE$5Ns4{fGGKAAC zZ;l*3jKk%d#yjm!oV#1j9BaS!_TQqCBW-u};kLcOpoM|Q>e4cJKirmZD93TorOkSE zM17lVZhopxfx!C-Yw>vH*U3r54#v1i92r%ya z`HSuSzx#pq3%~S>aU4$`JKj#5IZHd9i9D50R?n%;02uMZb=2>5`A3ZR|pR9>`41yo3XuXcYH{qEfKzslmjMdZ~;gE)`dK%iEel;JVq@$^%n%lgE1OJ@nX9psOI5X}{( zARhj2qh$8>{X<9nnw~%X3i8lhQL(E|fcOo42e}J#GwswZceK0jySJS>c{bl^oAx_y zz|jGdH!k?|L4n`77xoDKi~}#7RtH}Ws@$s8w{oM;)RW0{c;dYe^jvJhvLNC}L3KBuvpg%)rz)o7G=Gr=bk& z117+8H4`=)?aGyF4f5OBTaLGd!+u8&0J|&jz=up;uhG6;d{u33M7|C57er9NexfaH zF?Qb^U0lBL*(<8;w{VdDl<#46PWb)=Xm>vdeVu*i>oinaST@qx&uSQi&TksrccHJZ zlfl3lc-GLBzZorpDsA22i^6%PP)7Vu5N@vECcFuyFJD-XtI74>oSZ7ZQ+=;D735d- zyXvR94P35WD5V~X_W0yn8=ja31^a5{ROQuKx8*dS4~cpmPr39%+Fk(AChK7c@#v>^ z+Tn$94(Ki88|{{NcZvRO@_xV>zm&55#H8CC_7ASTSKn>l#$j>e&ns^qx;km5?XXTK zBeZ=^Ob(CK8}-7lybGcnIQgxA+obi|rbXX{0ox3uv=#a8<77=HX5lfEhGzuZTyUp7 z2um2!n1uwBw83@T3vCrUV(>fN*)0e-;^spZdFPh@irWV!Q8X5EwdFy<@)~}1YMe>d$e6OwX1`IeGAjV~7%>IwbU z&~zqGwzld))CBy@rxEa9D}lc{0TPjeK|kL%qV5{{KB*7=xKmo{;X8zQ5Hub3 z>G~$4PLh+)ai)2NY`Xf}+x75OFDsKK-556hCvD6H?4(}z?Ev`(I>m4B1%5B%Dqmw& z-^Ouagt8eM9_PMjuwAO}>Qf%7eBoE!68h+$0Gdfkb(( zo`!Ce$??^=3vl3f-h4yHrQNZ)WxY3uhYvv2t2`7YE^oL|=e`~%Ybvp<5B;Ufx`H3U zc*k$0<63j#9iwRDnm$+QixVgPLVk5U_DdddijRXYV1HASJlX-bh-Hl9M0;0fKW5Fs z3wSLI<9uMI-Oy)J4j$e+87&?1PG8wFYggCWkNn3UYae~09@0GL^y&7(`%kqy?zp`j zU0kfPv?+b2aoV-tLe)o+ht?ZEXQXu6g#g8;;%%Khtg{dLw0-+8{Zcl4$aqFUdfmZL z?s*_AKfUilWDs6*gI>or5Z90G`n>8;pVvvgERdJqgzT`_>yi0wk3%jHA)YKd(C-$; z26aVxorH7I43MGMK8gB}ul}gCs&ml??E^5jw+g<8yzhA=jB1A^A7$=@UudG7pIh45 z?H=58L44Fz^SkA+upHwE+Xw6YgSffb8T>uJkLzT6SGVI(c&#tH#TdpmKZ!p&Hl;s| z&4G^XuHBCl2O+_Q|LBU>;}bLJC*wQ74KXg#AGD72b=X)ZDgEwl{OK_lzS%C+i;}4i zn%u!apK{E_g7Crb_{JyZaNA?;g1q({wffn9!enl4mUP-C4BcK|X;agqZEj|wO~Zpx z{B0LCl_rC0nL0S%m;#Gg9<>g#{ z(=&xW{~r_j`Io-F0lYuDe%r|(8D!2ggwP1RnZS=7Q>TlEX`i;?uI}Pb-ye7BgtT#V ztNMwJw1N2Ho?`ckpRvaH~&YeD>t%*|(2X&^OzxwU*D-*ldI0l@B68+u@-f7fo2FXrBQXcyr<*EO$H{Sd(7gUDk_ zGZx*>_T#eS-~QM;+W-2iA8IG?ErxK+G0eLZ&P+RBx-ytLlVpL?qP^iTd+`{5t{k8Rx|2~}>MGi2A|qUae=UEl!~Zr1_;!R_+nxD;ITKr?o`*9+{WxZ8hw z58d))k3)M&heO%IDG%GYH7)egSNW3CK_03Zy{<&R`?6^hpyCg~e%=rkhfm;Rd-TDN zJ~w_lN>%L#rRdvgWJ1WCGP1EoZKCpt|3R{PBFN|zW#xN(AN+aos(JnE8Hebrm}VcQ zdO5A=JcPmWjx%ZuQ*mSPZF6IT`p|RuQ#w?9)ZgkC6J!B{xPrI#LVwjN5 z3(L?nG|~R>p~u?CA9|{N-}n84_MLy@d)l7gUEdpTtI%-y+Dd!mu_xN43s1M7_{pDZ zzw+z9Li=_1(Pscm9IB-i8-bE;y%=Eicz6au#N0+w|mUTe^0+tuJ40_rKu9skfgYIXOGl z?mTg*z2v!fwmVNBYYtb*4#p(|VY>sJL0cGpB127bJpfjZ3Ky>{pY2SY0Dth(e*1YK zbxMCny%+BCwJYtZr=CK7;Evo4`W>U=Lv4KxepA<`<8ksDzlN=u8l7&Z&Yo>Af6Z&! zE5GXXtsT3aQrdTHV%wlb{j$R23(vhAJone8NkHku94& z{G7Ch&k0@|@If2)txdpT{l@G}EQA^pSI`m&Ph2^pQ~4ghWA*xKFj`6mQEJ#J3QopH1z>2a#p`ePdRroEPjDUZ*w_2mlYV!D^*eAwC{CoPkd5i-*J3C3^`ZCQyoa(uygcoL z*MYaU_koeW_HAGG=Aq%?Gf4>RG=2(>Yk`~Kot<2O!2lQ=1PjRE2vSi*5%1Ew&ZE*i zNF%;FCNkQY5Saty^F3?I)b?5J8_Q;5Lz%(n9xl@-3BTXr|HoBS=sC)BA;Zn{+9u_b9{B^D3tCU^2-b{qQmA68b?Ze&vN7HE<#yva2mildI!tkH1c+HH#C!$S(L1 zXM=VdYOW|lJ`Hh4zmsiFTl>q`5AK)r5rdPCyC()DpczN474 zjXU%WV!HFQg=ebxnS&0fff@oItm{5}6AuiVhY!PycX7)w1Fn%t+TlnWpE=&AmhH#8`CqRsp`f9wavH zRtr%BCX4ol(R}^F0gbV5q)7YKm6i744}P?*;za*X-~BypW?@lC;7I47uAl?j7Ax4V zgFspTs*0ALMASr+oXKft25XI?5pM z(2=9~Vl#A3*m&%>t6Qrx7O4&~g~-1@@}umPlg4AE59rDAs@p?tw5wNE+UGz2`Su@w z{3qKx-u>QoWbtrT_nyA>_BK7cmihP5n)WT&c=Lo1!o1*2oTi6?|4=iU{;tpMyD_JaJMV{jR^| z`-N1ChY#hOn-j=ob!oMI=wlzrs=?#O7Td;#+j6bsP1zT{@P+N#(xr9{9vwZph(kKk zMkglPefK{nGMom+iDO3xj)~jd&oNM$nf4)k>4tAoN#WFlldhv#3G73)JivFd5FGp# z|IAdqN$>aZXBlKHGKg9{JeS=gP98tm?z!jQ_NrIBDqH-|yPd5LIy});jR405 z55f$uk+Tnr@=1b;dh0FU{vItpjE%cE0$irX(P7#EEX>x&$lM404uqY=)LGCWbL-q0 z1_cZA^ZDlg!n{0mQom>)CBL5sP&UHK7Of?gz>6~s95?{(B@egEbC8wAOVk^OjRBH+ zXW2M_mXK%Q?w~P)auiBG$M2E(_TLr*_Iz^|mnLr>^EZk;6`qS3gn8xKRj$|Dr7IVs z8`MY}SZ@o57Q%na`S9q%)0d)GzWF6zTsT>No2-D(wl`;vb-4M9qey)QucF_ygY;`E zy1exd*AgSDa8(IxE_nJ~iIw!8@*xc1@Y`}-g!-xWqy z4y5c`eP9&73ctWbSeYm?KY5c+8;xG-cSTPrLy@-Yq!jVPV?)jFJ*uZ}Md&9!-1^@w z&CTyaxtDf)*o41Y!ldZZQncJ^Fe^_wpYCPrJxE-;&)uE(a}=!t!i@*&7s47 zSVix6r97oWetIpAHQuW9@Bmo#ZEgJncpqp|4qKl#*gf!)hCCER7pNgT?)#$9L9l#$WY2vawA-@D zK*BDh-4jf6(gAc598=c>@@`wf@BQ3E54Yz&_rCZHt1C_b>I+$OD3DBoID7U7T!a<2 zl@DW?_-kC-UUNp*-BwlGq0aWZZ>X;p_JMYAY1OXtwmme;-?DF|b3i6{PYd-?r~RqH zNmmyF*0&dn$D&iX7Ce2tUKfO-kL8=!B}4F_Y%MfGcB!f4VoCWH^+r`&bhiT3>a@5XO-`}W!m zX=1ECsUaT}Yw@mdbyB+YR#9*bPs1w?gFKYBw9(urSKT|_HBvsA2Hv?v<_7ZwXJGnQ zytwf&FMS=5xR(x2YW zX$0&?kY?;BwW8pKv*Y}@>-_Y1TgGCZ&1)a$r={elTZD>-xaBis=w|vc%Iha82Vp3O zTA-ypo9;eA<3sjZ_@vM3H)h4#_6=nE9E1*P*6o5%defqVHlzJ=thWurvY0~O6T0Mu zeW&&Li5cJAm$g|0M4a{8_xNUM+8H=RU%SO|vNvy>!dX9KqkVIty023m3tync1rN&1 zmm;R{=ar`R$4~Z+PtXojk-aU(A<*kvkvkjsz~EtDZlLa(1M6<07!;B*(Toa_*#SK; zZ)}#figVJ*U+2=xgXJq6tVftR5&HZ$hlBO~5n+H=5AaV`{}&{GL3rj2MI*I%Ru0He z8RVfOaY?oc^hDcY{?g%IkE;!&Ji(nFw87B<`uch!*>v+^8*D3NwXZSQ;`&CK&)OKz zeRLxA6#bM61*wax*lKjM^q02EQJ?21ovmFVDP!9Sh@OxN+RfBqi*{asBlEoUv%$yo z$U+>dv2&f8=0Lr9~w&5vV*0Ip7u=xQ*C`(|&h6Xv^m>U8HZ_YCrrlztzqy@3zH5jJ;1E zZ6|NNz5SiP`%ePf;9JnPZ~0VxkTMp0$)J1(a}LFKG+qDl;>8Q?+SN<#kw?$9hd=vZ z`0-A;Ks1F&5dK5@Ed2K**DAGs#`c4_{4sm3J$Ak=!&19(hh(>KG`Va zZ+(?17umv%`PuO{4eus=vmbl*u6Mt+ee$CpX!qZLPdmK0&_4U&54ER0_gH()tL|-6 zvt#v;c*Mm`b1)A+YnO$1vDZ%`RRgXvyzGUHeOq``Bww^e*@9*>L2gcuXp|XF%77j=H zb9G;skqIL0+HdAW$e(7BhQhUIb+#&P(mEpxQf#NDr?zsO2?X#c!bbI{Cr`jh!{pt9rKl~#<-hSYJ{yXiHpZGB2#o|{5^k=BX zp?Y!7kwS5FYAiZ;y-iO|hHiIVym;wy`>CJ)srHU{z7^h#wUZ}~lN-z&qk@-@CRlnA$8Kv z*x4SGr@HLt4pvuJh{JC92?IdP99nE+v-9ow54^nnh423E_QikZJK8NTctLYJlbtK) z+wktywzYJr?QLA+e7Wr`U2WSd*V^_H$0~=rm#r?fowe1rySCD{mY0bq&+3)7wQ`lX zYouMRWBC$sPq&?GPq*u9=P^_lTU&dYJeS+>*3~vd9Yb3_{NI!?)P}=X?&0U&lrK6! zq7QOubB#LNI>*XooULn|FSgy~^X>Z572sbc>Pp+ZdbO=yyPA3T)vH&~CH!(1_}C5r zl{szSUfRpFkfvZh+E}*mB@W%zg z8HZ3LFsNG)!l~FWBRI%=WBMxW5A7f7z9MCXWe0d12%RpU8~H2W^2Kk4YiBIvpM!@K zAieU)G+1T-sxxy1Y^B!FAmykBx?Ipz;-;m9bF5^S~s@!L0Og6I2pew5suO z420|1GW?ZO#W{6(=2SoX=Kxc&10Z0fy>Jx1d?Y3J9Hf;q-qT|#=@e%Lrc z!Em6C1n>K6&bd5z57Wb#?ii%?;pTfnSy<|}JopF4AOcz86!*F(wtFWiT>!u!Vc0iS zagerYtD{qG@#KANXyI;ZphKp;y2?w#7bE6~#HFE}YOtr{X=g*4)!C}e!jGh<4SQ|< z@V4GWGauK2(b0rd?gW{#%03eb8i&wnqz0aoQAw|fO+(@Ul85Hh<96KddFOlEk(0;U zpZlwSD|rlg0~lQ}|1;@Zum8bQI+CZ$mr(aL=mw70yp+n*8)r&Y!;JyJ!CuaPPu#-Y|RDIpm#_SX;DxD#DL6>;0 z6Hoo}ksn>d>;?dcG4OFkn%3aR;#_;-bDz_``VFrK%w*f%xE#D!aGoxlKbN;(1g4{D zyY&Ig^uj`0JaV)hKY6ANxhj+TTr?o{8SKh2TcA?Sp!{YD1r$UEQma>9#_Q5;=a zzPz&0KJuYIY(Mn>{r|N`AAKwjo^LsOdpmyW4Ez{w*OqWNLE8SphaI?P)CYJ8_Xyqo zlnjjP<>z2&cXtEx>I5T6Vn`Nss{j0eMWbU0mX~+7jJmD?8a#JT2A!xl1XX(+jyU25 zk*dz#{?RjDM7F%oN^Az=(G%n_);(x)i%#JReVOY|e)7R~6`i^Fp1a`nVw;3lCypIz zXU?8#tLtm+6QB6Q3?T2k^>kZYJf2-0UhurMi&a~wq~)9S zQUilns&0PQ&4=Y!$GoBELv0Ti2Gj#5AJR{@sp*M!=FI7K|8wtd54`wA?d30fWqaQ9 z@6UJ6GAKqU4!q={);BQF7q_;ZqWm~@7&kROo$rwfQ$2DLbwss4DRjZc<5 z#vR-s4|hSxws;x`?!|8wy*td{Wqy999a=ctrl8gDtxYkwagxjji_)qbBT)vS_1%2h z6zPR2_v!^U!U-&O#+YRd#mefTlx0?>iT-VC$1jpOW9XULeI?HG_(x2OP8*| zhqY|y>Z)%SQ`WcBX_w%tij}O))kqc`}O2YKw$Fb^$-IY`>^JjQGZjZ}nkc z(x;f0f_dmj8UGZw`W)Tqfab)#yYVpk%^Q#Uz~=M8h;P>@)AZT+>NW6#9(^JJmafAy z+98dD|C-pMm8M>CEc=GOL7)qMf=|TU*;U&$?ZtuT#`-e!(0;KC*?o!5&7FYqMy5Dg zkn&Dw_z=%YFDEU0c7`ENmt+9O+dcnjhpQ)(yQO`;o>PteXSXi_|}7M%(}>%hmc6Y zaRK@2uuQ<3HT9`4@=21V&jj%V)7Y5|Y?UhdFv4FAo{i@mAO+BG>}tm+CdT5EjEsP* zeH^wg18N{nPEWSQL-mHUZ>%mHKGdeBwD}Y5-h1zDk3Ifm``D+?wMRbtu`E7(;R7$j z&`q@6wN-qLjZExm?*gyG+v>$PZ`0ZT+7H`C&^vXu#x6A`1^bk9!HBAyiBUT{=~jQ` zG_+*k?}45LwCT`qeW@$-+Xj`_^(-vO4s2wzO~g)9f3{Vn?I;W3)|i8TK)67q@BaaV zTury#pkLtULwfZ@UO9p6gB^qQyT~i!XM%YQ`{SnaRr-YsA&s+gbYZ>R*w$~OZuR!F z4?WVJ|Gek4qetfPp{_zE^;5`+3Wo$YKxvT~Pq=8;{# z4Yv({V}_LiaHYpQ(h)l*Yslb)v-*`W8j)oZ0U~ddopO>uf-w4T;b)>c(bWdD=iw)j zJGMP=lkfKBv5izRh7D4V>7zkDI&B{X9@%w;AgoNRrGFE!v{(Bt`ps@Dg)Yh)mkAwY z?(QZ&WHFl$4BYYO-M{nR_KWZOos>H|J=<=-^+4@k5h zDvWjmU(FqKY)2Vu0Yv)ruk=;P$ba<@QsrVS7eR@C>2ui)+oZTjzcBo^uKbovcMCHu z4>HtCaIuW~E{*!Zj@06Rin%mI=-`bkc9aA7_<+MR8Kg9H*k39aU@1G?E|6FwEKY9HX-|K3BbZMS)+2_46 zBqz!+>^w*EsNdGbfsA5zmBx zl=N1}8uAY&n&rwARuIZLf)b(*}gSLQnLtpAk2Ph>8(gg&42ga>FC~oNQ?&f;ibN3zm zhGE}OcL6|rOb_ao4>ChMWB;q3G+cxC$>SyMk9J^^{=kLvSK6mP_hfs^+dtJd$#eYl zk+v{D(f;BazqLL0C9mlU=@U55T?^PV2N!@H(C~#{j_SuM*0y_P=~7#{y52tYf%muX z`g4D|UAnvy#@HSBINHVS_7&=foZF%fx01)$1db1^vQQU0>v%VF5qmDOBr*N&aGRN) zi|@U;g?|HWs}$UTr62nA!|nX%KGDAHD_+?ypL?WT_|(VSOCPwk&7WS#4ktq+jC&{N za&|63n>W~v5;`MC>fLi!Q1mdO?)4pP^Z6c$Rwul+AC?a7V$!r>#K8xt6kq9b@oB~q zKp{3|lz$#T^RLw`|3_R(qmoAiIJ?72@G{N%kxk}i_&2XR35(OOF<6j-#QG^E@H>Dq0mxxfj-Yg3cy;~*lX9;H`>!zuC)zN z``+*UJ8kOl@%FjTJ=C5$ccJ~r5C3TUhyVB=wNHNHBPmyP^yd-yq}GdraK%;KvOfe1 zb%Q^**S0-u!E5KG#P{5}C)?r*-*wcs z;n_tezBD2~^j#eHqCSz$!jXlx0v+n)ZTH>V{^DQ$8|@`;cyk*+a-8z;72keneT}jA z8e=FI^y&xeV8nOTo@zsp1##N(ob~T1wMSc^ zQZm{G)n!YW25xqTIa^44iC0It*DuXWW@e#$#);f`1{3%PZTwyD z76iyQtXbJG4`mtoSr^bf+YVyT>OT0%_l~x5g}mqNk9MBF*y7-Cvgg7#xrW85#xmJW zNE-w0)=_>Ahc;fDlzJ@ZL59E)UZtB9+oO$+JmsC`kO^m6ZeU1|5aL~^yLnJT47^j0 z?q;H2t;&Su%0SuBw!%+wRp#nKcAlv@5bKm;C<-jc7yWZ2t!ecu)z-7v-#LOUc@GVu zg)Wqgge`{F>$Mi*z`cKUTOm48H=2s%piZfZFAzRe=&!pv&W-$)Z-4EZxj7@_6oM-! zvXa5dfYLt*DTu_$cvGz?z)G-%Tq}Y&=S>tWj5?HWd_(LG_c}@$Wa0Sk;GAr2+ckGg zF`jhc%5dn)Ss{2h5K|$9uX5Wsjk6&eXSx1i-#&K?!u!G_L^x5FBE52{bcQN#wmNH| zh}9@nO!E7{&5#DbeJLY{aQdu-fKCXa-Pw5gd>~^-zoFnsJn#f3-0tW@CggL=kL9AQ z(pTp}&cQ{&-aiZD;9*AVs5Gzpn;x%%U-b7isQ8uM#V5V;TfB}w-kai^{8!L%t}3Pr zb=Q~eZ@g5=jT3s3ybs?y*^waIP~^Zak#+T&rVfXnnt+mP-Z=0N13dz)YUH$sfHXrH z=;(`hm?iKuFdZ#~qS>QM&pD^u_i=_?_dF6Ieg`yYxB|zsb#q64Q^9~}BKq?7Ovd$x z1Y1{sovc-N4s%&HL78}lE5v0Y6Q?^J83s%Px}z?UZv^Mwxa;GyZQ;-@ZSwd%z@KAK zMI)Eb*om6>kf-v5QbAvRaRR}PoBRHt-b}(19~?xr)7$>6?pGlbJDAc?LQNXE<6Nf= z!lBYbJ49`SxR|$e)-4%*qt`bwE?v3O-uBkFx7+W$vwg$2eMi9c12pwPxY|VR*ABMJ zhj%*G!qQ$D2I3Mw;$cY6;?GaTOAmi?-T{RMU#DYzbrrj~(SGAMf2%!tdAS{3m~L~k zvw0)V!Jkg0TM6f3Klamy#I86W*I@xxVCI1Vw_&K4y*?LiA)P}%MwG+CE;za6n=qDj z0>9oeY3<07L+!;cd`|naulX8^x--D}=2pv>pL()gxpI}fKD?}L%YEa|LCf62;dbQc z@iwt=D6l0xT_F`qiyfXZ`2!ziVqZ`XDsYf#M-En;RClHO=bgTF4#4;L7O?ZqT{3rSL|4h(jjT&!~&lZ>T>y zaAqsL(I$jtx<^^8VGosUmxzmF+T~N9b--#;1$lTbK;{e#9VGG5S&b;bLA0`UEs&ky z6IQmZN5A_1&j)8U8R{0FV>sv2ZGLu|!O_F*`R)Gu?rV2a_O3hbXwQG{3)+kGVE<+9WiNecd-W?{*B70+Vh|XW|cpJO@^=PbG~_ubMH3E>gxh+V*47f8@bWFEVY&HvWi{I^~OyU>cdC6P$JQ-0fmQ8XV|Dx0AlTqve)* zKG^fS;ja4EzB%FR*0J(rk^!Y}ez~pq&bBsT485Tp!Y7r-g$`a%GBC8R?d`(<^3tUY zFm0p089hdwQ;9O#sdB8xajVYT^` zQ8822oHB;AjC#z^wG2zV=W4fsr40@o^BI?n-I>FDCLO%*F3YwXyfGvrp}+6Xsg&F9 z+=oH=E?@`5|Ar%>KldRN65s*)mC)gJLlrj{`+{&&{euF}Qjj|0?|E2{_;am>X^C?U zzjHuJz2##5it_=vIA?OnG>T^sW@)qSIw;Y$)Iih7l2Zly2j!ZFVc0+)tY#rgH-M=( zX~+l@EY?48$)GBEOV6{yoFn~va44JVxX~ylV6t_xIG}*~>lrXPp%A^=MYm~_>ua5e zjOWD92KsGZpe@t}ibEd4*oM)6ahRTRlEpV?tL?d^Wcpm-_-UGP+MIcPkmBb?tkcCl z?x>>uUtM0!q?nGCberF@zBTDiB0l`LAJ%RNUq&kq5@LKK_Ey~D=GI6(`tT#JK1^})=QNN-gF6K{vC-3dk z($7{9o;>x)ZkD_=|bTE+t$#KS|}$?pP9`$NZks?gX( zeU`zq-^fl1h1{xvN)0Y5Z!^k~5#CwvK+{G?wY&ORl)QfJ*75uUghP(_yS>ZrlCk_i>A zT+op5CVh}F)9h2OTREJ^ZC=4;x$SlnXMF2A<+naLkv(~VTqL)BZiSj_v$r< zARwHGb}=3NQ7-DSj3ZOu9fneCz2W?WHezUORr`NSnrw&`+7@e26@l9tBU%`h(-6lQp?S zo*J9=l*(J^)D);2@+rLR5at|{1l~Q*@@t%J+Ol3lW5JOc;1DS+gZQM4b2jwyFqn4) zI^jUr7&}4Ue!eKMi8s6Bg!nSZUEfQ2Y1a#{lC_g`o3jbID?mM2EWPr)P3pljm?@@Go8MGa#E_y)L{KAbu|yOfFXR-^3aQVwGmmQ z2E5?nLrvg0{!wSM7+ZdIbXup^8K;qMUTKrod=`khYy*z}q$~Le)6?!KGlZWy+IBYA z+T8q1-crP4MwQAA^@lo|t!)*y^3G4`N`=aot8SMgsc z+r!k}MvtpZ!K*f-s${_s=fp`5cvagg8CpfkaSqI!Xa~S8n`^yYQiv6nZB3oG&jlBK z7-^M!;m7XN-pI7)ODOcGdyJ`k05oD>w&xqWzy`kUCS&{XNBoP&VWngnxX&E36=X|$ zzaLFMj?en!#ntw<-}z+w$Y-ARkyE?luG`xvW8Q!CPk#_wGzZGs2pRX zs^AL&-;4GinAGFqdB>LV1>x0(z6I9DZ=SmVSNotn5f^vJnxL<`zCF}tus1WKyY2kb z7u#EY=BL_A9(aD+y7K9E@0k2ocHsxs-y$>|{+Yf)$1r6GCMeeGoC ztE9E;YIK7-s&9ZV5j0SUvu5#he$u>TB8^L7<*a@{CzUz2uy6CQzZgOud)V+kzkRvw zLElfvE+S5ULw)HJl^Ok(cHePBs8FkHoK}(PG8IM zOTj4rI}0W#FZ-oef1jE{w-0-Jd#VaLCM@5QYsfdhJL6<~a?0B)w2|ru`-KD9P+@KqoBYU+{%{st z+E>%O~6d3DByK?VYbaqO=M?)v2pr!aIA&C)+KnE4gVsCvK8{rds*qdV}h7KB6{65 z$b}L5Zta1K4)S6YHgI-srhWeNkF}+h<;=BBOiZ`M<0so8##gJb^5iXdv^Rgx_p~qi z*6(Oz;McCNw#}S|wg1>#3Aarw_GV?y< z+W(fyq-=Nx2*nbQC+z~ z*({8Sjna+^Ur1!x#TT|d<^*Xu`AEe@fkT?Z@0s4%k zbTuz_2$%aTFz)CIE*|Dji>u0d*>Y9)V$LO1)jd3`-OGu$PD0vT7pl$Oa6UKk*S__2 zZ^j^=F;1ji(QS|BD&TCx%P}2@OP*lYlyfKhP_AHVh9hlVBbGk_b&k*~n9;e<`bI zsMO_6e)FjNrb*RlVQ~2;c6`#KY)q^SN?I(c5-*eI1(ITI~J;tiX zBY0Vfa+HtaEx$`|s(jTEnrbcUfphUTfcyuhwOfy?m^2KL3y1pYfT-Ro6i@LnUXqX) zd4~P&4E1N=;Gw?h7W#0o+mWg(@(j5R1-{qT4F6KSKk~*) z%_r?C7S1^Npz$Ji#I6&yOS<0&0}looaYjaQ#1@XXxs&&{z461K3ztA^bZojh;Ls!< zz9B&y+yxJu!wyrLfVgX*d?=tnsKC(ERrwz|Fo{MN3nEw^9)ufN{D_{+bveW~9#QfIJ5ln(uBlPK@R zIA3oQ<4)mqe1VRBk~H$8&IE8yxnaMZY#UH!waY}>w^@`4I=6alS#Gs=z31KS;YXip zb2x5?7ml^9ZCVgcqzf+eaeT|sEhm?7ls7P}<2doYwQkVD?P{N+h#h=+*0IcBCsTYQ zp1LL8H^rSS(@F4crKyRTc5QVXUv8w`a{5I3;#a?EN@s`X zH-oP(ue3`SF126%wg1}w^Dq5UJ92og-F?rUZSm-FoZDe|u+dgmmf9u*dE0TE=g@B7 z;Dl7QJ!M{WtLbGOtkVvD7anKP4+FC}t)i45?&PJs&YA;c?Up)`I<$$lH~XCMvh*M; z)}e{JpE@<=+e8dXUFr|0Vf!Z zpY%aDGIzIukulmEvbs)twci>accMsp>fqJ4G&b_^WDmLfZNZtTsdnVhVmrQgtew5( zmik=6^h_q-wUK*!TWxCx{_RlKZ-4oahjy(Uh|K`!@e#L%bK+Ac*@sW{a8X<0+g3H8 z))qQg9A}VNEBCYY7k1Kh8~OBrw9{5Mwl=a=|Jv#bGFojr*i9EXO!~%tw`I5wM>aRM z+vQ7F+m$N}tTwmX>L%yywYIXk+Lo8DA#-^$P8*{Pb?u>}I;Bo9_&wLPHFvy#H}H$$ zQCnSNkk5c+Wo?7{rrOlZe74xu=KAp9`j(&S7>}x^ugn0wbX5OH-3U+AMeg?CyZR9) zF1CxaZ~3S_(P1x3mjlPN+?&^PS1$3!bw{+90Dab#fuC(D!Lx0~I;y3{c8beT@5h+M zHogz1u0@FRb@bXe`o)4*{a@wbK4lG3NeKL+BXPl#Ji*a{*N%DYEB`n$CqCUt6y z@@AC2j<{4Eu{HRWLLc%?9N?D?6dZ7@UjJXdkd9_ksQ4l#>aRYwJ_F50DN!GGoW5|^ zEzN;HOq+EKF*-?_tbmWZyY&tI%((?0c(6N?wy;IHdO!gl92tPop3*mjH;gNM8)N*K78|`vp$TU%5XuSeV79*A54n(n!9VzUbOoz${C~G z`2E1mb?p~z4)k2eB!2p+KvTluoBogk=QY1$PhUJn8hOP-poPWsiEgwiq92W9p4!@^NGVA#}d0S({@o{0%*THwc`McqE#o#MEe$Zk4YRJQX zai;K~q@Vm-zBT1|lj_mg8Uw&#^jf*NuqV2$-R;K^%Gqq#iA<<@N^KEYIXpPr=V@{jpjt(`>tWR6ixXDFM#;zwsm@o1uk_DWcwJhc zHTDYqzTP&ecat$YV>#;1w%&?DJ~^)ekuHLFhb!B>i)FQY(x5Gt|MoRL81;MqGqa5M z?RVs%eW^U*sJRa4Q69)Ik^m9q5yX^j_~*T6gV(~YGIbc^eSdW2Q-)*UM^?T2*VbV^ zgZS7!Jd~$rjQ7md#L z`M3r~-SspWjSOSgJbIkPpZY@^G^LdE7XHq^rhQ3Q*VcO9x1R)7MD=4O_A$ku`o3^S zXUZTyvuHcK2p&Ce#nJp+A-CS2tO(B@mf?u~sq$$%q0ejiSM|}V2iE}XU|5$l7JkV? zd(;jZsvUYZUsqpv48G*EUE5EG27hc5@WVdGmlN`tEcxT&J6}+9&KcbwrGBZg`n-Kd zDYsk>%j{c}i{x~?$eDWJAlH-x75xXtUoOr*cj;RD;U9lnTcM~;_Rf3GwiiF|x$SG; z@ST>9T^Si-Oa{!oUD~Dgn+hCr>QiVF^y9DZY8Tu|L>n;FuFzj^tgg3*KK+UIXWsM| zgUb|kA6=Ynb93|9BgU`mjyGw))Z?c?Fp+JJap&atB>2%6f#V!)eQtU_w$?e8_4Iw( zPx@l}bnOrQK^E|VnmY*C7w$3!U#I*sw(s29Q2X3RKi-~y+p+e-`)_TFM-R2lHS8$- zome>4hUaJ74rAHtJImPYP5LfmpZ*dgl?(KQoA%q-x*Cs)wK_-|d1T8TI|{$Lb+u-(SgPj!%g z^ryDk=IkGM#21VsZ2v58Y*&=C4FFl6!M-UAn~_s|=g=Y@Jx=;M>bq@T`|HkRBeg&f zKPkJSG7iey+Q8b;KSDR%-7I|&$H2G3KZuQ|E}yd~Z^yjj6Vhbg2yWD>z0gn1vlDc6 zdwYYL!4dnSUDwX*H*3#h>+MV6xwtrvvhT>ESdKgck{7VM9wh1KLkXtqUBS2O9GEk!?sDPF%*SrpiZj{mRNtyKrTtUER3ezUd8rrrmn?eeJ@9OYL9% z>wnqa^}hF{jCR2nude%`lIw|?3Hmm#@fE<^MY0CxdB7laLLu;r2cr}8b;=d~)H9$? zZs>hJ<4D_h+dF={J@nZ}+qZnv*SAB57uxjnA?TiH>+slJv-C}UXs&F1*ylpV_05$w zG3lo)XMpRx(K>aj9`w`5WM}(&o0=SMU-IRzYyaxs{!8Sw-6oNtymHJrOS_z(nr>6* zgFEF-j-#`xWOPrO^grQm#(Vnn@Y8shyo+8aV?*ZLZC9j~kH$S^#>Yp2=bV<~@!>WN z-&WU_GWRq+yTI_sc~sgJ{>yj1=`XaeeDmLKlSfZc5Bj#d+O{^XwjGW!+VL1RHtkQH zLaz;eI-(jRHKr|~0~kFYxYi;x8?lFZ)Dd-)1Np?Kqn<3{O#h{h7tPXYdx@_`y!2#F zH~K~X)TP}LBG-XyyQD79)SF$!V+WKuQI;W_G8dkB^y9kvbg}o;LwcSqN*cMGA8<@! z>kaDAGNfS_SW>K0EU?o?5iI9#`Yzy??yG+aS9;JVY+-y8(^HQ7$k_aj{kms9!~>ao z+f?O~##%&03JN2~VvD*liT>Z7%g?T3gBb(1<|nG02gMH4#)CWUFgC(*mS@=jn)29% z@TFSv+c>T|8P&b?$eXe$nYVxrOsb~?f5O`rby+8*CID`yXFs-P{}zj3uLToO!@ap&@qTF zgJnkmeH7}AuETfxw0?1}byXXe9O+4{cO2Ys>h;i2w&Xy>fl&6mog4Y9Z~U@1lj)2D zYzHG@SQNDfAY2hLO@SpTXD{=nzX+#sae#pYRl}3KVx({Txb2_BWR+niU?@_Egdh*o z?cl1c2}QL1s76p;Bn{;iISo^?`4b#^qD7qh2-f)IASjHYE7>#}gqs-^26u6u?cjTc zAIw{q{e8iW6#P$gUoXK%&`o3n(_8|ySRiUdf}cjSLs_oJanhqRxyvj`g-6NCYzd*2 zkWM5QX5_!Gq(65BLTB;50yQ9$}=E8<{&z4Sr+H zZct}~LK4T!E^- zxMHvzz|vhiO?g!h2?c6&Pe`Tf89{`Rp?e7cRH3qDkh ziNNr?de;eJC!D6Ut-f|}HBDarDlXWWDx}@PQXfoD9uCu0wY;&Pi6_f4JOaQFw2tEh zIvHdKH%V`H%bDZt+rIhh+pW*Jr(IvW*p{wbY3s`?p}QVrj%Jnew*GRH61Q^)Z3~hU*BT&`*F(80r{o#7Z^ydwo6Z5YQOkP zZ)u^`XK!uy-TU0OaA-a&)-PSV(pFd8a?bYdz`%i)5Y@TTDIE&$5oZzaukFq{ zb)a&lyo^xhmP^NCd-l)ao+tdzMogPeDxaowAGHEIE?&u+uHh4J96Yq zd(n&TZ|5I-yd6GrjIw?{;9B%d80+gR^u^ol{Ka$a@|8>3YVG{_^X=-@OYP#Ni|yQ# zPqvE}FSbjUFL1t;HuCh-=iAfgpJH%!p*?-U>kIAbm8Xa&6!;59uxorNLx z%f4;fzy0OJy0RHe{IfFnm5S3=4EaJA^3k&n@1cQ_lCyusLCr-8b z`S}d071q}FT6~cm8br4IMwYgHdFY|eic2OyX($c~`YnK!h;eztYSg#AoY<{N5%J8* zW)*_7FoQoGkacZs;%+m@wmn#{bh?$QE3T&|r)kgf!Y!q+JLs=6uL_{Zn}4Rm0G!v#;si7 z=E;KzaCAb-K@fu;`<8@`m*VFGybbhIhC2b4w}gRZJJCMb?>f1q?q`vredE|@7R9*p zg)8|T44OY<3a-V`Nl+;WM(zv!5gY3}Oz%Xx;j8DlYVL=;+`H;%R z2I63lgZU&2{E4lgUG`_&ur&F`hh@B$P5KYEb7}JpO!-u07?Q_Mbo`LH!L~NlYvZdA%iBrtF}M<+iB|HPPk+aU-r**Ebis#o`*wT22fTf~%^y0{ zPMtp97LbPvsowh5x8t9Tw&&k-wyk6DW)2^UO|*}(|8qf@2s&vVy1F2*%S;u|qd%Ot z>=Ov`Df&-8YdKlqx!PJ?XE9qR3+;*WjNPZth)-dDNHI?)BxnycDOGX4DG(o58w>xl zARzsbhX8CtC98a^FgCW@lXb1R-KOK^=$ey1HL0&YxH!$lEY>AI-H=MYs~YNodhgcz zE*>cPDR*=jKBbR=o~!g7pZ)w(?f(02YbQ?~Z)@~3?q*{j;pD%Qm@Zy%H-jt;5CC;) zPt-Zj@+k{HlI#&=(Qpmp05y#eZQc8*z{v zc_^QYr`kgK*2_F?q*`V%BHR65Dudk+HctyDIhuc!ducZ0j~~_mblPYboF(q zSJig4Ew#&sJ3U;$M{@G`k;fis|Ki{OOq(V9%=mCSxp=7Ee%skBq?{OYH>5GnG5#Db zkWvo{x6C(i7FXM#i*?PIi83-(KOk=;R@`lmp2?P+2ly9!VTh+bL+EhK%^`oom(oe| zTO{XT6Pc>_0y3X`AVfz2)6c>i_`jWSVUBqs?AWoS~xdR1y z2jk+m`>+?9I-brw(zvx3Z98GPvW|>NFBtXyAKZ&Ek6rZ)vx^2gTx@) zgYvXM9R;z!uDiM~_`=Z_QTGhC)X1)DJ~BkG6$lC)%67{X5!S&wU~JOI}%&)zfA5yPM6oXWcO>Z!HQLo@)=>>F(OKYwgiT zA8qFz`E2{Mf8kB^$9ruC5OiB<-;U)r>7yNU&dn__?wkdms4)#q4V zUTZ7sE8&mx3C>MqS2_BvtCU&OKLwr(;x&oKipKbIlIztgEA3^^JK1i% z=V;qpV=Ov6)h3UeZhQFDJHU{yn8r5bLuW+`y_tKB;Vql54I6YT+AQNc9@*Q*o^kjf zjuCLmhJVuC*-`W70hoE=MFBLPgSgDO(9i3i>XXKxSqO;EeK`-HytBZvvuXSGt+Eay zi}=_aeY$TW>l+P8r~aFGq>Oa~)4Iu5&qv^W`ZocRhl@>kl*MP)%Pz|+%j*LOud6=i z@Fp;98}bvqvt4p_uGKc0zKgu_EPjOjLlrJQc6jd~Yv(=E$G}Vb1s{0XN1DHU00Mb4 zPA8wJRNq>M`N}7*`hrvZ+Tx{gh*1n zzVd^={|D`V{dfN^`5imC=*>l=eO-QHDGDV%z$*XC{?do^ewxQ|2WVS67nW5X^|=6H zI?hB7X~X00$T&6K#z68DpZrw&-S>Z>efgKa4j7YdZgw&c?H#MRW99ai3n^@`_~q2? zI3(j|;KWB)t+MP27Gv7rX89^_T+WZdOi2* zZ!US5O_>7-uuz4%TPXX!MbyZSm0=VvgdX6D-R%6eNkailFn+pAyu zCGEfafq&Q*PM@L@^zrI>#$u~&b?HhQ!bXmv)NwcT4@O$eR=7)yH8I2UxNG!Ty!EqIOI*c zCMCSk0rNi~;3Iia^Ux45*2Ou`ljLteSxfz<>H62@vU7zkT!@-K_5*k-kXQ9QcRcnWld$1 zYGH-x!Y{bRBJl}w9Zc_uA!=Y7;{%Wr{qN@(Fv(Rcx8$QhKmN)bC08a2zoZL~fm88t z)AE6(y!f>|e9E;+UXWWHdOyKR!_O41b6ppH0+0U4ciHD&P8RHSn9}U0~NWn ziEo0onR48^06er1%c-NOXs*~U1jqgReOUY9K-{^Jzxr)102n?Kp!qWZ7-DMhK}_m~ z<>`R-D@~`^ty~dt4PLe~kU%uf)LI2+T6H{5{AC+J&%hscC7(({Rb(L#c~zbWqd$aG zILM6%$vbXqH3lIZ_CgB8(~tCEiHU zq3%vuN?}wE&vwj~Gg)Lf_KJQB_Ji@Lh@EK9?$kjb%}`42+50Z8&y#b(?$F_<2h^Xt zGCJL4lAp0q33aw?|KQ&oMO&S{tVU4$lO|yX2k0V+&^J!IIA{#7iWgbIOj^B(XC7e! z#DC=7*I~%Dy(d9k%Q?6b@5H#gHFL^x_Fnjjr#{mvIxy`w!w!uPaI(#zFloGXWtug&|*vC z+=WZ+*WUV`_J(i!#`eI=UY+`j2Tq_~Z@#Ia{G(r; zoe&p8IbRf!cR{K@`BV3m=9%*3FP_*D;y{t+M6GWCedfVWwcmO7``Yv*Fu`%|&>Rl! zaNF8g%Nz0HjAhMst<%%v?eb*?c;vGwh*#baj2y*D+iR^Af=$pMrw?O?oe)y*Tbmji zVIWz9fUB!3`G&gV<$LbCyM52!{#%sTVW4-hU3mKGY=^bFx&|*t>9lYmXyY>rhug`M zXWQW;N82a^zjUfYzZSCODl`lefsuZ%bWWUTs$y4E)4T{zUuG zhd$cwy6^UO_SV}o`MI{b-j);7|_!0dMu)@fdnd*f7qdMq!$Cj>LZJ++^$J;UZf9u(^>399K#F!7D z4=uJu;!oakhNiaFwl>|C-tB^G(%lDE?r7nbXglgxoDdwt-bsPETRuzxrVCNbW22Ey z2QjWPo}`}X>HgdO?gmj?Z~MSsJo3g8Z7+HUZ9Yt%a4-%Kze#3&4x)E_8y4TdMIhEC zKZc=u8l9V)pKT|O9&0C$pKd3PooL68AJ0O7>6vNT@mAaL&0uiPM7WYgHW`q26Qb%v z&f%~!sX}#uE!PM3nE(JM`x5hxlGnunV`C2fsF!vL3yFhQ#Q6Z`8UwMFm8)%iYn3*) zt{dOB);HVg#s*hD5S?ttj-6@?hYo}982MM)mCH*-8*R&-5o+5{bq5ecA!|&sKjJS5XJ=tACYG`}J17 z&KA0IJ&1VfU~_N#7f$IevV*R08{JNR+NW#o=e)v{M2DtXQX3*aS z7gaMby&yQ0SD|Ynl>^)2M4^3kWNb1i>FM@_~JCkMN8b4U_x9j02Jjmxlx;&fM85gzXyMko& z;lF(JP5z>#c;c=Aj%}Qf^Rp;n5FB(O(1mf?k`wymcar2gxC_gmo$!cHLp{c3p#kZ_ z^gES?TF~TMsrArUyRN*ow`tG742($6Q02-O&lLx9P9FKkZ-;~BO;y%n;I;fy|HP^K zJNTwPDP(jrS}L#LfvS>4U)Vd}I&zD1@z7rQhP(ZkdQjVVyAWeGI^Y_C=RD`$cJBP; z_Tdjd+CKG}kG2(T;8%Rr*R;L4^wK+-tXScEiSW`c<-)7HB304O-*av?}2 zTe!DABh@YYK6&N>1}EaP?L0+&*leGu4rI~8iyr0 zd!?@C92wZ=i~j1r7=2JK@VeZ=h4}J|!P7p5pzJCiGKQuJ{FV6EIq;33U44pQ2#C*Xnh)3W-omL-*{-PwjQ59RiVFCSAm`F68(dDwo7F7Xcx$7BNF zIzBFavky_E(M7@8t$=#7I{<~QyHIL)x6RH^X92*F4*;>*r39i$z0}t^g{kLY@V?qt z?sD%OS>%(l%0Cn(N8NXq&lpS;NZk$Y2S?#ifpdTzxbKeQ3Flsa2YCz!@;^)Y{kWUT zYg_uH12k2U{@&1C590c>^9nrx&RrM_Sp!_;~2j#sWPgh5Ux^`YlSQW?vPCjTz zn>BxILzkDf;3Dg4-}IRqt8I@l?H*$v-%6)-@<*SDy(t_x^dl^+9pIuM@!R!H+A}N* zw?1IA{f=e_oo&YF@BipS?HB*xp?2yB?QCMGEuOxk{rw;K2e6n902n^pE85HF(0}k7 z{I)-Bfih4|rtM%4U99iI_VZ7lYhU-LznO8Mxa_qlvf>7SHn?ZU0Q}^&^>*pXwf4l7 zt6Au`1E6L6o;BL9+f>VEEMLY9>lzUu;g@A+LTrh%_?k2A)i$(xDCUtYT;d+TLRE9; zs1X4%A730eMUFCB z#tqb$3k04yO8fuFSzS=($Z6Zpl0g^z5>onryd=apA-ol)@s-C+nFk`$cGo`#aj}Me z+BV|`X&jxXofrD&9ulLUXNWH^ErQ`*GKE!sHT3i6&S#-Vz0KT2Es7SGsv922OZXv8 znd1oql?DGhLyU>ke`3M{BW+JWr(DgQg1h}P{HopAIz1IP%TXr$42C9f4zk?Y=X)mI zF>1zwz}$qFBNNkYi$3h>i%abluX=4ef9X>D{_p>Rwu0Uo9OL-XMB0Y<+do4?`4iRe z>o?iw+J^j$(jMci%=^Kc>{Lg-si~=qM^q7?JLr?!s|we)vAT&}Xj|>kC(gBZz3V;g zP2csMZEAKLS?#u^YuBI?9*&m(;#{`lUSAUNdDyPc8LX{PVhm$F*Qs+d^QHEE!$k^1e)!x=}8^#B4 zk(8>i+qF5{o^{4<^BYr3L>9_c)kG(Qsa-^!O9yjFQ1=X^Iad>V0L-Kdv_BV=JYWvY z(&H{;;bOa1lDQEUuo1Gduyp)H(+KK*+9gL{Mq6%$q9MTKwCY@wFWLR6v+c|7PT1K^ zEFq;5(#|;Sk1~eo;sPKy-WUIbU4(gWoxXG9rd5zxKmTTb}dW8Kq^t`!F|07(>dZcL`kWoiQ@*CuVD2rF|&;>VUO2M8SFF})= z&j0M^EzDQehAYd3j{#;(>3webi9I*k2=a-eXO14P@Jud{UwHi?EJNk1w3;X4E_o_G z^JW=bTR(V;9GUd{Gj4bMuGxWi^4(^b#n<*t0Lf=*Ali1AcH7y@$RQ95wDrc9Cq83% z?LCLj%e7Nwvj;>K4CUY1SfSNiK%XYijz%N9qM>siOz!`PaBk$UecPA4nOtY0;AU1J z64{kYMg02!qAKnxA&*F3&$bc}r=pQ~-R-Mm$V~IPR-4pHIO5WgKs;&Ut-+GFNlVqj zF}{Nb(=y?a{N5K%!HbZAy7ve`!UHvFr3jTzVk*Bil(HME@zKB)%!AD8@B4bHv+H3M z59(~3_>@wW5UWh;;HQp)Y3O06u_V7oG|$Au>&#|gtAYpLQUsN~>h>^u(Qi~2li$FX zG!ugMe)$Td6OV*CysC8KQx@Lj)=VmucySn&`m3HkzxVq*A`lfgIoEwc48CU*$SZjR z7({m+s`PeINh>}ZPyj=B-Zn!NU1T~e3FdxKN0sZ#55ge64;_pH@~czFk3ss)=RSDP zVSbFPl;`0@90qx9S3+Ba2XT92;QZ#GZ?O6n>2#Z(J=127+|}CT5nxSbM;XzSXWD~- z{Ed{QbT*lsO{c?ILyN(ao&8%$il@xYBRqbg6htdhTJlr4vEwm`V z(p8Uxq3YZZ>c)W}frM(u=oNf-!ewF}r*L_>z5o5c(>6A@+X{op2_04(!o0zVgYTAQ zYYYbMU{0Jk(ym>*9EWal42RRf9rd~;S03=$U&_M=@5+fG zL)5|tyCWml+nslvZEyVZf2D0LUux@X%kA>z%UOx;M0y_bf}g9+j~qYMj-Nc;h9{?i zR}Xg5PxuEeHR%Npb59m{84T4w*};#K&6m!f&v(-=pTE@p(@*|nJNLx-Jm@`r@)me8 z3GExu=fffJq>KwQZ0+Ufp|{c@xNzLMY*1&I>VTI(goN2wTX22J$3PwNZ(~fBK z4(g=v%q0#PINKR%XROB`ZY|@(YpY9nBBRG#o24db zpiZ0)g98u;GD~- zK^g6ZcsgNP4_Dl}n|e9>J!QAD^dI^e+BvT2*@1NxY^EvY*XEqttx zT-_E>xyqBZ&Gk(DxaHdPjGt4OfN$Wj3BRCAxZ>qRf!ieeKy9A(cocp)x%~KJPtk7I z(TNG#Ds2#KTy>w_2hs+ew2&_yrlCK?*EvBQl?G%@h5LSzWOVv$M5)4oV7V$Aqf!?+ z`W*zk&EUWWV_Qgj=tE!jV4R`Htgo=Uf2RJ=%dVcmVcjzB0Q_WeBHQRbboGYDgR~4X z{R!&t&r}e<8W5VHBn{}Y9FZiXPwCmAE*UR!#&_rVmCWAru%oz{lM-?JQgAv?59`r($WH&RKmf3!d0$OB>YKNES9 zhVJ(dUd0*ob7gySv5#%Q?=Jd{c%i1PVNZl(dsbha*wh|POrp!Q8y_ZXPrxL3v6LJ> z__j^c-oeq&3TVf((0Id17;PuCNh?Tb$DBmUTO7JM{5ipLy=&*!Dd!uQ*-e1>Ep5$) zivo!Ep%UbhobNtwtop_j{UY{K+ms0$>X^X3C>L!|26);#ZD5o^8&fjTrqvrid!b*O z<8^EqaGc!HZ`BV8j`%2L*J=?&jj{BxO4<<@Fywp2OsUQv5OyaWvuF&6^3}ZhHg-#x|N z;loVLPPN6wL%`l`EBHlk|IPPPZnr(~+&kOG(ngzEn1|o1ZE6x5iY#2vq0Nh(5)as* zoYD`$1F5j@(uU?i7kULc;k8?y>heYB`IB#k@RjSqwkdfrDx#Ndh`B_&jjCYJ% zW7Qh7I>1L!2)RzYvegAHdD)jowxku#_BBojRG*Mfa`9&weGcW3ajrm-qcnI}xA@rT zGC8aqh%5Q358y_9EBFb%5q0j|lkJfwueRGxA8q&Ea}Pe}O8V*@6x)ZM%GK|T`>74v zNG+^|R(T>H0IFXX+W=gv_U#5TkRX$YK@jpWx|4e-QT8Nfw=m*ulRMTYp491md{Xk| zVWuRRuNY8gDp%(R6QblQ{gq~d(Zb*`Ui}kCKyxnty${MV+wB4<+uu`fcDO=E(#TB_ zoxW`dgoygKrl0@7Cqk$6N2LAgwX2co(`z9qmd~ygTKv?4ivt`Bx)>lb1Fj2*4Uv~XBc#&VJXl0WQZM{5 zPJWM#6K4VF(pIaBey&P(QH1t>9e;8fJL@NGTrjHq3c`+yLh;aiSa zA=i1kE_M-GJSZQ!3p&Acl4&o-X^J{`fVSS+wdGa%fbI6{Z~MLWu}3bq#o3AMSopFB zUeUh#>%KJ*0)RHWz1{gh-GBfSLK`cbYi_*!Ip0El^wCG!wJX=!kN)_Nwhw&pBbq2+ zumUcO6W<^`;beCk4iJOUpR%#(fzUnAf5PO_X6 z$D$zP2hQr2K{%nIk_#4ysE5j^7fWn9|JG;UB`?ieeU=H8F_(nUotliOv%iD!1kv(I zt^@{+$2kiZZ|_j|Q5nO2`h=d4U+QY7>!MToqIb}( zjHO$B-=YsadHiHM_wYmQGoSi&TP5Qxs_L#9#}?+Y2*9zo^U;wB1^0cv3ei_(MO@63 zGNQ(0QBk_QB_A1H_C$}zGV*SGYPuaie!4yIl9#ow{B!?(o4@CI3}Ba9TVHL zuk8;~u7lAlXmXQjNK^o_eM1qKX4k~>V z7v*<%c-RgDukv~D%Ne}tCP6$aC|lD7Lfw(I%!IGD3-L56Ke3wC(Rc*{(SZm+c*c1` z!pIA&&UKZ^HuH2&zPu-0^_IVFUzs_sBj?8Jx3$KZ*2ld)d$NBQJ>PjCD0Ov)$x6O)j#4UWw za~!NY{8d!MXHI~7=g(bKwN2lj=jrC@@JNRbeD8~i9jpW@>tN@%OX|$ge(39e=5TK0 zZ+zQVzL{KS1Q3#ROw@MjbkLA`MV>#??O@9Qs2~d32m5wJPI45JN{L`1jLc*m@xD>( zpgr5^Q8GkwwY$^3X`z0kAs7_1Oke;M;ncNheMc0gBx@;it-K*VeCmt~Kc)?2fOIs< zVIJr!Kph0bK}9)u?v1exx9UV;gdc@0BUO3L=T(Q7lIRpwt#P#|2#}{cYxpeSw=jZ| z-0g&|V)2`L?SwlcZ(15h(){i6n$BgVQCCVAFA}djs{o4f67uCbE?CT-Uj~X z9v+KJ5)7Sk`<-_($kyUh2cWMv!E*Asc%48Nv--UZQ3?&=r}5Gydeq+;Y>l$%`@#HX zws7%l@Iy7lGt){>rBxj%he*0Hl-Yd%+&F|@x*-jwi;MCSsQ9Dzg3N>Qq9^=zV4qem z1Y$VpS_pT4v4i=nhqEvuW5IAXEs=@wr#t^qhbPMBP?3e##U~igX^^K&FXUuJ-G^`D zh%=1~kZ<^Km2c;$r%o1(PH{cc#;0+#j@^rce+zl0X^%LxzD+%52gA|piX95=Ufx#c zqRhCrqv{;iK}RDRFjXGsnqRL?&t1o-LEzTO;MA2vXUT_V#8F0hStjL3tGWwL+8uUA zJLJRUah!ACG<*9y-`igI6<^U_`bDn}T=`LxB~ILz{mNTy92I&#_vcQ2eaNLh(4`JW zuQ`-`5gfeK1t+f!CzQQ9v zda7*ddpS-bIw-WUt3#Xtm#Sodba#u1qUWL){u`%0$QES*ekpGnKeUl#lXV>UOBc_#OII$mTh73@Gbba{cQh+ zy+C`zGvAi4N!J=&M?N6w;Md7{`550p81mmiF$}0JdmRJ{)9-}W1h0529r-Lh*0;T} zo`)>!oU_nD95iqIxp-l6i~$^VS!Z@X$k_)?@KHHZPvi;5G>iHswM{m94BVCVE4T;6 zr|sP}+TdneUcK5@8JyKO+~hI!`&~uwQvb$BrjQf-8J%u!*}S~G+OAw&A$F)0vDEF`e~#t68JSR#;EmsMtABpz+u*L6S<2d0>zO6AtQrjb4*(Umsy|sGEXIS><9I0PLVGxT^{< zl1A|DddualZei^DMg=;W2a)!l(nK2a)(^1HiVsbyHqP?)X+Au)eUFUUFW3&Z+Q#Z? z>afoe+l{pq%5TR`)weG7k82WhXA>B}9>G7cJ;z>ZC*g??@wGXAw#LuwMD+wnmf>M{ z^|4JVPvc0CJQo{~x89K1&P1Pexl@W0tb5St!n2)?t&|nq+R`etY+zH%?+}g;SPp(V zB<D)ruDiRW?=KwQQA-yj8ZI9`2(bwi^y@amrI?O%9p``wkC{&7={yJ74Q| ztJ-2GpX9&(j$^Itwm_&I3?}k+j{TYCCTHqR9s8o&Z#ml@e*9c}@N-YMkA3t5?cs+% z*S_)V-_Y8jV{Hf-?V%UGY35=`@%Hl*^2b595B}wQct*&Bf9z!10e|{{YwMe#DGOO3 zReCL(iIDVD-5EZm4@tYj`(*rTo42|64bj2YfCPD8b&`K@y&rEjCoS|{!g_+AYEfP; zD69IbuT&?pID)h3;z|;S?Yd;)f;7KHZa(`#7c-4TuhA*+_93PCZ3B08Wj)tUGCca| zU8C)ZCmw76>W6-$tx($pbsSwBZ)eY(X|H(cOWWe1Lz#edgx1nQ8BOkvcj@0ojPu4A`ybETqEGo7LdPUp zg1NU&;}epXYw~In>K+*KRh}iP^j%pb0~fI?;4>H(1Kajra;gU>V&2=5Jn%P;wqZ)z zO7sEV!}DqnzJa+7J=UZAu>t6xJd-u?J82-|QT2#RU55`GPPD;k$}rXiklfV{nMXa% zB28d$E7r|V2*?j@N%})z#9nhBmT+i;vm=@P1@!3qiT~#MM&QVBcWSYX#FkRVH*}@f zHnq7?+lP-%&=+be#Rq-RFL!4uxtmX=04x2~p@ReJm!_%?}cnE_d(z70}MwPAQ z(wF(ki}5^U(k_evXNs|#3l|Lf%j&i}clr6M3H(F*<4JWK{DhN!K*s~~k$r z33ZXUy5cad&j26ob5%qSU=FhWk->V^{bxE3*43Q@^x)A4^BYQMl}q0zmOh%>>w{%I z`mkRRL7|h$lmH&Og1839I){X1qshv3Q$|jo6`nit}UbT0`oX1M=>Z{XOQo8F4_#4-QWGa$umb=y^8}yAB z$w%7^{`Qs1Tz=>S>=94CvXSz{J7i71ETAB7`QF4KC;KcP;uCFlU=kl+Q~E+$?)7I4 z)%Ib5{WsUOfXulZ@|ZocG6UspbLwgGdq{!)p4axt<#&Yt%EXf7RaWxB+@ciOiUW!C z-0)A_hT9fn%O@``wa1>k(4PCii`#$vsh`h0<{0ID`0oopsRJmJjJ?Fc9d8z9XWPl6 zN7`3^`IogfzVVIii(m7~_S)CJvi+&A`P%mF-|{W(>%RW=?R8)Ey7ux{ys{mgI}Dty zcI4=>cJ?g#39in6t^3V>aNXT=Yk&O-Zs6DE=5qVgXFk(5mX_K#eZyC`>G{cwC*8SP zy6sC5b;cOVW)~TXpFFY;Rz`>=b-GA-a&jiV{jK=fKl~H_Ne;DX+L1f$EY450lZ*5C zf$jj==|VULFt5UB=$G%{=J-mP$Yc9_c}Q$`wv6ZEx&A^HEWrDT$;r%lA31icJ?E>w zqP5xiRC9fEt!*$a7`MIPAG;Go7S`HsJRB>h{aL36?WB{T)rsEFuzpzKTR8NGY2-66 z?XK*#)q1Y$ocCOyWw1=2rw^4^@O}A;t2(sw`tqs6K++I|M>nn0eCZ#x>9&)~qf?c3 z)1jAOqUF14Jlg~f%Gm(-ldl#tXiGfHbAxT54$WCT<00<>ymM{GB*5+%u+DP9g$VzJ z&7bM}{3UeY#61g<$yaUGGm=sH0?0VqPT@HiA0c1cp~3s>d+S_vJn}xU>@N#m{P0wE zfX;9pI2MclB3ar3_0#g&X}M3C!1J&#F~;`7z47oay%;q0V};P-J!#SP+*`iz)#g`* zo<{NNakCuwQO-ZpZ5t_#Ff$KN)_(3n{@Dj|A^D_BleV^CXM9RO=6n;)opcZThZ+|| z_R`GRFQzz}K;V@t**Ge&(k1M2?A|G!>0z zC||+!WH%wz+@LQ6^Bjz;@(Jfg{`$9l<(nBKo{`BitzXq5frcoKg6=BM_5~R89*A$K z)_^UyoE-p`;kJy9jKn#~<8~}6sDy4ud8sE4K=$Yq%8>XcybRib5z>oqMVxWo3_{_e z(@;jL)FJtW=b=!D3(n>>9flSIi_U|gr>)@DwI#~o?eD)Z-I}d8Q5=F!dLMBhq--)* z$&#->8Wl5Cn)#C62WY&p-Y#9-x{e@aED!wT>lG#80KI`z{8Op>EXfPR)!W#;>aO6u*uOUUa$$gJRFS7!a;<--f&I5$| z^UdL={CybUbaVPm`MfuZdP)w~Bkjn~Rj9^jaO4R@=@?FoPqphf0izR(ZT9FLZFKHd z*gapXm?)d=HQfST`ePrIy>MvT{%O!$kwyd7CeWzMo}`^>R|*G1^uRL!JPw9~_cibh z8T^Ex7;5Ws5;?R*w%vp17;|B8kVi3#id!{3^vGlFH{SLB_Nv#sti9$-Uf01CAF$!Z z&aWQgISF5IYy-lVhJ*TSnH(K{hCU^sixv;dIT@7i7LRPTEhm4W;lm&PeFl7+?a;y$ z4sdNnK000wx(=}XUR}AC$)=gv8QAJ}vEW6Wn%lgw2yO+x<1u*GYBTbHy4|Of_R)t^*GLL z;B$n(A+BtJlUKDpC9p~N&Evk!*QEPiyLS0xJr=IXjebw$O$P;lWTx;=@ zD$g}>J}UuPL6M>FbU1(;v8n<(a0$KqGjvWiL*JjI-jo40{O@rvHCH+zoFFB57z$nw zs7~d)p(jKucO5A6_#n;6`KcKnc2Bko=g$kE%`@QfA4GbKgP=))P`%-n`;wU9ujD=P;->bHHIi(Bej{`OfNqhOeM^7a;Id6Wl*@mFLr zjwcO3uG`nSayxqI!w2BQW$N6Ep83Y_`bPI%|1H|5+kPW^2l3+I8x?t|#Jyz{tpmT< z2z4pSpifha2=d@XN76|sviLw@YjZo__}|)G%{LmCR+iiH@>0e5W-5B);(>eu1i1N@ zKV{c9)}Tjy9Inq|Ty=rMZkw2yq)m?kXEnNWXkiXI_u9pamq_1fbF^9 zV7U3d4_$qI_!IYk1fvds4J!2{g>Yki+KwB;v%r0}`k$@13E|02aO9&l!+4X>&#*+B zDa~GU4BnbC(W>>=Q-HF>OHbK6CnJ!peIjYHKK`_zEIW`fWx0-j#aUbAWKe8unI+;X zpMUB`>Nkq^c^%!_rOjU7-4s%6e?9!Qptj45X084{IM`RG9c#ag@HNlPB-+o@eid}u_P_1brgjny~fY4zy7{i~C> z4#YO|Kn9}`+vq?u3vcY3@mn(SlP%Q2ADVnyTYP%X{Ic-zw&I`SgbhRt0F23jHMTPakhz;~?3GP3WJH{V`pj z)|tF8WfQsk?P(u4=OMF`;NX1p*ztDaikmvG7;MGCHGu&;#}j=q*JE& zXPm4l-ejUx_}rT=Y(wSszQ(?$S3LQUOZo%fL?>F@BUi^|6qiqy&B77s%0fs>mkjd8 zx;&xb%RVUo_zd?C)8EK2^mU0mw<&0MbwvyiO z&qQ)3Lt#@QZLa6B!FUh(EDxdwq~*c7@#4%qm)HO&=B={gT!Ww|*=KKUT zNxSyOhp6it9q!|!^u=54;~)EY`L*K73lz`{K^9W${EB>p4S3iW5CHs+bD50G?w0a073~Pa_;YQF18hY*5!@$rg3{$ z{LUpIMZ7k@XCg9BlA8_=m&9=oO~6YK_2YT#{GtJM5^4OKXJ-M zY07^x)_9?cM*q0CeqC^7ZW~trBYR|A<9K&s>DsF8h0N(w$XohHo!GBj(-!;o#SaPS z1T^Wd>3{lA+&6K}BOU3pls(5V;~{q+5pUazdSSY-^bNPt{=g}$kXJw^-iKPdSd#`%!mHx73wPZi#Z2;7 zp+#@G)^3F_M9X6rq&T+R#qS&&+ry`y&H{kVwWVxTEL_`J1#U8DRQGUFB7$Ge^+P)M zb;VI}{q-Qp0A4rTKWlilOnrP`M|XZ!m^~eRc=qzW{`N$SyL@^I^j{b}94{?_xaLb}f1*CT+@3{@|lxhpy9EY+)0%doDV=^X|LbU3c8o z_T0^JWGXcJ{q=F+YA0P36yC;ea*Z9!&OqR)|FX8u{pMafu{hFR|0Q>|4aS^q(LZzS zmNqoK(0120V`JP&#PSm5gF5|f^*xL6fh{);*QGnWHr;BKEqUk<`N@&{7%b&r`_DZ!QWti=$CZD1ll~B# z{fyYq=y=;>JhCCp>LR*9X*D_JmASsBFn8%MQYZLHYsLf0Md*bpV+4Za<3`LSBzMi2K`RLeSgkv0jIRE4h0N?t?Z)x}5cTc-^^=kXvXC7?tdDlDJyWalx_Kvr`t^LX` zy`}x~FZ@D#%Uj;k-uJMjSf>*G9f8`c<3xXxR}oK zCN}5z(WC8Wf8i(F^3qy6G=&eaeZ8GHGT)9Y%;xR?$$VxsD{7y&@r%RNNGFAya1&4}QjT60f@K@_H_x+^TL2m2M5<(Ln-K zwMjL;Lf8LUm--iGNmIv5E+dcRgO`l&J!jEZ@)vxINk-K{k{7n~?mB5fvf5D}iU-A$ zh)+Mr81h%}esE)f{afnqWvOiyPqd7CudT-{%Eo-Ob*_b*d9=t(KeuF;2icU(q5v%r zXVaW>jK4=2@3lvw6kpr8J^DuB1$l$G6g>%{XP-tx@R6}$PNg|UFMdgb`q!O3B}?%- zZ?;PZ{XkfUpJG2M%;-d}i4P9eWp?qDK4s5|x>J8wcOCr7#~fbo-{*!B+zs;aOBKce z2fhR*RfM*VPwKqpd1uS{1JcbZp5WBOr?G&O{VtcW!{TObW~N3HL{&Lj4~ZG`n@oL~ z>xRB<7n)0_asjUOm|1(P@_FaafTp?$zJocMihb4)ITQ9_xC6kqf5n@*JtGi?H-kj; zh9OlVf{T$%jEF%1RP3QZ8Qepl=lIk$OoP!JAQd4~dBvwphpWFJTuKIgNiPGNj<`$f8Po^!rXx$RET=de z!mP6n4Hr4Y!94v#raDYB)VX(j%5bP094fN>CvWlJIH~ThQ?fgXrh>t}6WA(=|KuH% zC*+M6?~`b;{B(!cmcdB_MBefW{S}`)(!-T2BNIbZ-}zTv*3++8v=-yIBVM*u-R9vO z5PjVT>Y>xk$qbgswc>@p@VMZapX3xvj@(x$Jn~J%;6*Ce*`m99Ylw%xsaE_H9I$<5a_f4tVcE*QY+pt|DLffY8JCGW*v(%?u!48136A`^^ z26+hmSp*{z>XLmyU7fSTP@YyFzIrY_EL|lJ_mMd^!-r*A-~fQfo_MbrioojrS| zeZw2RC9oZA*kOh@!txJb>SH;!HTjtoD=DO`w9W}!=f98Z&qYh&sckWn-w>v`A_p2L@DJapwj1-jviOl?eXiDNIOa2k7m`RcW{wX)m( z;3FSszxpe`*^VAP(oUT?1zmmuVZ2?wyp&G^WC9EE`4DDmbQ*lr8{0TM^r5Z;LFkT* z2|a&H54i4lP-)`R@kE`njS`_4%XzG_rFWkDvc`L=^w`X>85%p@aua^%UYN0A7KX?0 zAYDh=p!M+2|C(VeZ9EkJ4WA~ntc*SCabrf1vK9LZ@%@BOPq64s3cNr@`gQbo7p>IQMlC7}A{w=(L+^9sF7}!5kIzc`Rlf$(YPl zOMPlan7~N;tF&i7x?sH`{ssM?!SjK<2Sc({zM#{+aZ%a(G6(1x99vVDxS%&{B>#Jv*kJ-7@2tExCj`^==?;N$pLINJPV6bmjybw@KZf=(4RK}Z4;!& zUZQihoz9&@y`$a4qoe45zIBZ)v7Z!|_>Vy#naOAS!%N`WH*uXP@jrLZ`k`>PP)x&_7q_ZBs~W0SoKMz z6Pf&hf7YR#v~QV^RpihdI}@L(P%*3hR(Q~zt1pRGMK*y8{EXRve|=Y6kTYo+Y$hl0 zEkjv#EoWo8he7H%)$xuWimSf z6z#KfbJe%ZPPhA?^PKjnPk*j`{4*EY6OVnq{noGlX8YPd{dMiwnOjg5C#&(Fu;F5; zf23^nEtF?pf6xn$?dyX>m%6T9mH)N}kHCSa@YOo)@0_E6d*NsCz3TT|ZNXZLjylX+`fso= zuWcR+f8*WpC^||V$}nC(fQ; zZ1>!GXM6ceUy=oozBL)HK+_~T=&nopgg&sy+yALATnHYvNf&r(J2Od2dH;&l;+m}7 zpa))u%%U}UC@&xTBYn5+7C4bt_@acctyPTpYM+^KlaHypzvgK zXpEdNEx_b5h z`fHCQ_jwMUZ$A3Ez4lT#T<5lnyWzZlyCYxT*$}p-^k)?El4OEIH)s9Lz+HhLVpghgd1h{Ora~2I{DWoB(C^yuzY`6 z?XZ_`M~k$=^YAi_gt)GNzx!iwLt;1%@S#tW#$0#zgx)8zUf=8da#%x+qsIMVeQAq^XZ)z6JIX@k7Bt$B$hz^0Ic>uqq|pwD$N-TQz4lkN9D z@_2TBx$U;w+LylORqac^`kMnc<9FJz?9j&b{#F9|qaCr`*0^`5UAuO*UA}m+z5VTP zZ@>5}zeXHs9CBfgF`>H#jWRZOfu7sr`-v>=OYjG-eajuU-_e#=R@zk;kOO-gpTP&K zvy)@(dH3Ga{^bw+llI+z^UdwPyKis*>1TdEwosbN2h#>)i;$Iez2=@AE23Kk!DDmm5JSZ2M^byEgyr-;1>GOB+;~hus?lO*n z&W!8HYu~0W`3WTZme4Mm!;Hc8aos@=B6k^wyMR;M?tIz?V~NNaerLx*pvcdB3X! zy8*0~_3LeFdaiv2pY*Ydms7^iS;#+kKK1Q&<-G-;r>7<}R(R=4UfNEcIMM#~zxko| zWB=)=+Pi=E{q0kq`+R%k+|%vRC!cChUB23`tgg3BR*{#D7KG9+(te4C4huV{=bWj*&*RM>S!`!s_@cJ6wnn#s?bu=r ztGr|u-`vAEv>p4dkxX68IDIM{gq@2AMNVWe0 zS2Rb5fZ5MK4;XgkjnkMAzJux+#v@Sns)xpF})IAxc@#%7C+V1sDEq2&VOy z;xuUglud&Zeoq6JoO--Xx-p0ozvy&5^OW13IRu`gE2QD{K4rRc;$k#7_-`t`4 zTc197(OwVxEmMBEyNKpltC0rz0Fnhu-fUH!0_DJ3nyITq;L^uUo|knpguou2p8%-P z+Yajv1k#8_pNhx8LD$0PFtX#lxJ0)|LI1_fw4R>$iY6PaUGdUu+KD<wN3gNbr->_DOntx{)j-G z`s3-hu(6KW<+u7~9m3pkcW3N^TmO5nJsiyC1P1xMNxRXGhZ~?P&0=Rv)hCPOE2msj zAn}IW*5xw=7XWWhBDp*VN8F5PG=8Sx#Uv29`6YzdbI&zB zUwHS|RlWnScY#p?s<{-@Ev#{$~rsSrGL)^b)Z z^1aSoj+Cn^#k1mj{ByP;CP^KsJUt#&&R||+aur?N4;`G2|AR8As^U!-k%7!j;go)o zPS5XcYXsBbI5_7%P7R_BRw>sX9nMr|n*SAw|NWaP{k>(XRd#afL-^`NuRmS61LgOF zWdn%l?z+#{55NbA+Y#+r$q|9ci<3ceaVeyRmhL zacIe|oX{;N!ev7n#o6bq$Z&I$7DMhi?HS;9_#L+|T+u!(&I7u@WB2PK* zYEM=}!g3!nSihZt!NE#}4*#5l>_GrVmvSI<`oOtJvW?bi>7z%EGN9XRzyJRCGDaf_Kok7f%0<& z{1DFe5DxfMJ92Eg-Fot5`?Fv7`gZ^G?{AMk{#cv`zm+w2=ukU;>SUW^a5ul;0)Rv5 z*zLeFSkAx=or0)xhOHeeGKhj_oeY$vAz1}|m@Iws#@5-C@mo8mtQ_bK;$=6pmD6`R4SJv>soK$w_k16nW0UJ0Cj|aXBV<58IF3&jZ#kxBP zET?_=V0Ih0M~@yQ+elkoUWSX9Yw(|$naG18CwwyqQu#Qd(+bzNW1Q#ikOeuD(X$b8 za=qvG;6&tpNJe0JzB#CUluo~2cgdFH-O2LsKq5-#L3XcM?sWQos%5bRi+fIklUouJ@A z-Vr9*-4-t)1_;@Q2V>Qb`-4vS(A~UQcw^bF{<2BfE8C}oBy`=0uk?q(#fK%-Z+}A} ztI9*m&e6{#mT<}AVF-MD1=A$N$qENjieksMNn6gMuc>LyPJD<1aKY1HJ00=?DGkht zH$O#^{*p}EtUMH~DE!93aVFz! zG6>&TYnLvrWVfJGhZfu6g(>(rmW5H;PFeu?)Yka%^|+7C=E?;I)@7ezzqY=*7Wu-# z(hvI&j_PYg5-!tU=;!8*X80*fqkG_8K8<{NEIBi+o~&^gJ+3J938~`!CO9>38xeCHmg0 z_`IKb_!0C|e($ymPe0l2xaC&*qO~?XJp-SW?OvOh^z%R3b{D{COW>6CBSW`9cc&hR zA@BYIfNI|yl#A++p7s*GQa4H`)RmYF!pLyv<557HVf(cH%mI)|KIS`#)J@jFwjyPr zyv$n|%iFpaEp@GLSJGj;_*Z%z#=8T63smB31rjo}{cm9x;HeYw;-+4^JHmD*{CC?7 zKJzYV=gvLZe&*+XuKmJ0-`Cv7Xqu|doIKXf96#D#{G#W!h4}?&*ouu%H)qEEoC$Se zPl8W);)x&efwuT9q5?j7Ag1rq;q0L-NXd0SF}L4QRGSJ6c^sQBLx_`g)RA=J(%*OE zck?7ISMUm6=B|U)2YGIu^f}fwVxOVS5VrbWvdfMs=!rOH-p9D;r1L`hlzwX#C{BFi z=ak(@6$#P*@JIZ`L7c&eY9&h?(^nur^VNLSe!EL9dBVT?mlp(-acrnKI#E^mQXYCO zy9XS{0=^tzyOt-m?HcdZgZ!YSwuryuLxMEvLuZ#N@_FqWbv{r6G3=9VTwSd)DVK-O z+HY_ozdA9b?Z@u;R&2&QkTEtn0X*_TgS*RAzwWgQ6ZHG35mJOsN$^sZqytOZqBFpE z%qu;%6L*wz>)}xsZNXD80PZgOith_7ccyY)q=%ITO(nTb=+3?W1D>*7dl*c($#u#Z zxO+5Yaq0e;lxeHNt!_DVs3w`-cxBpwq|3CHr`p_r$6X=5`W`akxsluZ_ z4#MoNZ6CShCp7eV_VcFgc&lw0+~0VV*s4u?5JjkaulMh{?sUL%7R(euZ$g;Mi3UIH)1?b6Yb91r`qdZeLK9}ZR^<1xy7?>XnwKnZP|zJU~@VhMrO{>X!Fv~ z)91*H(ii0=uSNlaeK*neu%)op`qqGXZVW2j_XHc@j@rYkdL8|I_OT2Za zU6mbFH1VOi&lsLIA^-P=u|bRr?s(x#+V<))^@BflWp89M^Mlzv#JZ7hJ}rSBM0eCl z&Qt?PV3NnPI;|ufADA`>oljGoFZ`Y@RGRsV&IItvE-+sDqpgq!ZROJH zUHbOoYw(xGSGPw)FDtr_4(;_qAQkT59f6WW?=z#rpb{Xih%f(Xh z!auSUq0lNAuFW7V5gc?-fB94u26cC#c+>MP>E}lN#&>+>o9RH$NQ6YCGe~9*nMfr* z)T{<2lT@4{z)E$NERb*9iyVUMCsH&#R_Z_|Zvc1Y(}@)V9K$g<86*-%MCgig3O`5} zBl{y28XqMwjuZ9C2dIdHlPpR^8p5=F#^hQin1?@qEB}5TLtiG@!(>C@)QN~|JNQ12 zN>FtLPaW>$B`3GWMLCH}9xF{88RC?3;Vefu&aCr+tN{H;Z?eaq*NZ#s-V9& zgyakE-gNRLcLw*JatLU^4^HNF0w;RHQ7}yE8WvY7sX{O;RUXwHF z)qpy2QX4$qto9gG8dR)gst=K$&s z183ut*2fw8JUx7lzKT8oS3^-o&>c92XoKX#nbj$6!;@`qVxi3}+}5U!JrB?7D7cg( zJL1YGoT%E)OP2cf9&I`ujH{|`-x|$r62T3cJ%@JWAg_C8Gy0&;q^<9V@J(eP^g82T zMNm)9##?8nk7TzTQ6_O+GL7>?gvPxlU>qdvw27(dcJADT_LjH4qn$mqsHJZYyyDeD z2KTKzOwhs8L5^Iry`Cd6Z#9|s{M&mfAqm(`~nN-Y0qthN89AgVe;&> zkAL)|?TIJOwWXDvHa$7fmRHu>47j?QTIV!x*ic>;=D{oU&(2S`nYpQat2iB(@(hm| zSm4-Ydr8{TD4lh-2qFy@kf-HS2!{_%wj*=%?cTfZXy5mJf3rRG;OE+7k3NQD=wx|o zr_Y>jXKuZ{O}JhB)I2c9gM+*no51;$@6;IwA6~hdS*O%`)C}8+*yLe0#QX4nb8WRf zb?&M5AAkI(o8N<9JbX0U=Gj&qJg%?#p!Rwu)STSbao<6fzE!DH>%$!NF$+tgQ}R^3 z28ZBLOv>3`*k5=2nV%z~(}9T(YCe=G-lTHl61|Ge>2$sBOt5?kr(D@5?g;}qbM`uK z`>3DYeV4=`Orfdk;0<4-!M76nwA9a`zS#dJzJn9k{5eqa;kw`98ylZ&mo8ptORG!m z{`>B4hZYyo*ZOw3^yXYerO1H$4@wZ90cqmgzS<8ArgVXboJwGM4w-mInl<&7%`Gn`$Vst_HRbZ=_WRTbvzex|@J}Tycv?U`|iXw8IP_wd1~(pbd6mfc56hQp!!w z&b32Fj>c)N9S5-8@Nvu?4WKO(4B9I6S*mQQkm7G-&<6*sN;eZM!LWE71tIMDEzADa zvt@JUsGsObwHfZav@DoOpvGkD>I@t+Ji}q|Qbc=rgV0|ej5`pFPLle;MPPPyCLFl# z>Pz;UgON#i_WJE(Zzy}iy;rt-_0J^k!K3#xVgkUgu;Dk%Q8qpZG?u*a8R6fsI$3@B z(3o!+(r!j3BJ--J+JtS^KH4@Xj1gd_&EmtT=S~#aKZvE12P%SX$~KYO)qC!Dee=k7 ze$*Sl+;1SlqHA#Lr|wK+eEgr?t%{|eE8#AH$IScqAgqf+jo&y9hN6k^ug0J zvw}%GFlYyR-*7iO8#s`1!5(mJ2j(NqH>1>J`yB1XIQ^h~k@_!U!gB{3^>|Ev(H689 zJ}4D;`4HQ~VY|-5V0h$2V{ESvS|QbO#u#{Nuj~h1_!PHB)&g7nTo9zal9#@HuIgqS zM1J#FZ=bGTU^_Ryc&|Qmwki36-O0uYA!+g2IIsJ&<~N2%4-K>|4B~&;TbuKNATBXM>bCKI_ZC$joh8=b*Pw{hNo7$kUXe}P-58Ee+hi#)Kz~VPxCB@seXS_T3 zC@UY-_lsPWefR`jnQ-wS&b~rXSU&gqH1tbiV1l*B3UiVAxKAIlx`D3hTf1O$D`{aE z7q%h&Bim7xP}lt|0zN#IxR@-vB2ac^X`?;(xd+?h=dQL5ayq$m?b=3r_>l+mw)*7c zc=OX!(&1Q7-19(FeuO>>=`En2e!@@TkGkMUyCpA84h|jt$il;Rpbe-1Pi2C3j(Fcb z4nItf5RxNx85fzD#v#l=pxua%4`roQU3F64vRNQNw%9i=v~NTMN8}5~=zeUWmDK`< z^mR_a#P{FHwyi*+L_H9moWu{Io{>ElBu%uL!zbEjKJ%IOga7- zPA;~)@4UUe>?JQ~hvpVC=`Y^ek-5p4=*`%K4{^hk=!k6xb|hqym%}#=9fnqb1`v-u z;jT1=!+Xc3;4hmhR6DCg&nA?vc%3x)>N$epU)=*h;3&(!2snCOe6kp6`~^R3{|h)0bpeK=jZlS7c9t;P+u8FzWmZ;`)_&uVdXDnfqhgtS>k7$kX+j&=lQ ze85%{=iN`@dz+PoB^|ygtcs9NSo%5<0r?6a^uzwmv$SxXolCH1w!t0x3g14oytr#O zv{&xnU~tSB+9{)tDF3X#`Z)R;+Kh6Or?E-IJLz7I6|@pBcV(n7q~Cb+ySO*I3rI%C zTj|l(jtot}F2~z0K(c=nFZfD6Lq5OBpR%Z35v!SDd5Pquj0Ej?$ zzn*g#S!sjoF4~|x4_=7FzKpBw1;5;z?mhCe{_D;Qb@+E_h8uK*U~1Ak{Xv-$Uzv*5 zz#Guh*{EDfi}3|xe_=@ngV47LUj!4(=!E_pO6>x9+Qr`18h-0q?)4pq9e--~IO!$E zj#|+1Hy{KbtWqsk>HDgFKP%__N#5Qg=D|mvZa@B0zum^BTN|IAYPa3c715(+}ANW`?YJfLnhzxTm zNAWh&_MyD6UBlzpNrht`?vby)_z=d+oz6q(1P8j|p6ND6!N<>=EBb0Vt8^ivnwSAFg4+CTVvf3Mwr*PZSE`VT)IkaPHF(&}dwKlSNP zwKsgzx3_3s2lFTfCPa`bue4+$($PM7E)my43@x+*ZOxVP5sBWM6oCZT>zV zQ2|kK`jEVy2mMx;uC4G=n>)A`KY>~oFgtj1!sm~WlX?Op=<|~%D~02J=~`_hf)i9ok9r8qdDtN#WM>Yd?sC?bmDaK!>c!cqet3W}*m?PP+Ju zv?adBM*2jze)fa>8H7qxH@&vqyHjuR&=dhyn)i`u@GKayRmmfqf|cwkD_*@Ej8HGV zFZ@i)eegCO$d&p3Chb3<{mZWVKJ2{S%fGkzPajMH2#|zGf+8u3U>7NgDlD>PCy{~` zE2ON*k+T*fX~ipYmgP7O9Z8WbOIEXJlN3o41RIDp17HA5!3+k|F}=_H`~UU+^7(xC zz3;vGV+Mqr^WOd4b5Gf4pMCb;XPoam#lL=p1liLTo?BG&|m+#AN>gaGkuyEC1ENY$cItryh0dq*HtCjVD6Ys z1tg|4-PAbptpym6v^7bBLbOBFU;>Vl2cX@#fQms;@ZeQA5hmJ|g$FKKi#WS=N5ZDF$(-OUfcUxfoLV#~zfQi0uy(G* zrZ7x+()aDxrQ1vI{UB|68t(<*@Vfy){@Tk&SSnZEwZx;WuEAq?rxeStsnApH$PF{<*z9CG>EV%hsga7%{CrreosD= z`kA~MY7ai}#Wp%V(O!D(rS=#9>`%6#u_?-{#s8Z55C;`@6)A6Ph#6442v1c3^-Rh2 zz!f$zQ?>biTk6WtE&~7`ayU6L*^VAP(Vl+diMGmM?9kk7o1U7;cidbU7Nirs9@#F9 zCClO>2J!Mbcn!7jsi`(LSw~FP0#0=()|>2E^wh5{AdAdRPhwzCwbLh0w;QfI(*Dlh z`CDyZb+x_x+RMPI1&#T`$J(juZa|Jl+ZK5TN5&z`2;zWYYu&-SjJ~3Wvf{#1p1XE) zVB=)6)%06@SFT)Y%Zp3xqrdafw!X66PM$c~=H}+9&u$j@C3kk8n;+*c>b&VqefmyZ zTG%i7=Jpme$zru-CkM{7q)S2ry$YQ!P7imCWnos=X?i2M$)_eD&alKMz?lD27C+RzuS?-%n$nZuj;xM*F5TfY$Ar>%)x_^PuvvUrC6G@oVE5wlfZ@L?b* ze!kTkndjdL1>e5(twa|cGx&xS!tiu~_06@EvhV2?Kw6+k_|4iw1>y=PfauE zoAk)onp~cmm}=9LQ*Hjx;db=UG2*7%B)Z5?09fDERgL$S1^z`(2cQw^(;MKgt-!PD zY7CsUQ5KgMBOl@*zf8|eA}8<+crLFHhTfT;8g=+WJUk37%C4dioP5`}Cln#1{IzJF zwD?Y1M| zqK5|u>9=6GylHGgWx@qgt4UcknFUPSSy-f#U-C^n>QeVALcG*H@_zIeI?+iA7aX1B zQHDoHU7Uc6$PimU3v=QD4&)I=WENg@6Pvm`lVjAYPR>{d9j%cXfcZUs7jUJ8i+e77 z0-S^xg3fhX0KCZ;wU`S;|x3y<%Lz%^8Iw^KbaPZDQVGbWH&T?2ul_9yoxn^PTv;6B>fd})M`yfa z92&itcJnP~$b)?fpG;!^Y(VdE z>^C>Fn%5068GAws7sOnQ(NCggjHy(}k0!r+4-FUQYgT(PgT?OEKYx z*GtYiS?gRTy38-l%?B^_cBNU%R<&*69okHOk+&?%H_=UFpDm5j9-V^p+ku61mEs01 zH@GOri;Ii-%#X*Xy7}z!=bmp5Jn>@M#%XHXV{C#kIPs5u_31V{IngGt1q#3V6oEL~ z>@L)cqZ`}O7p5I8vnDJHC+eo1D*Y^kaO+E8$)f4K3`#xZdGZ=W8ev49#Lc>wxb5+U zF6H2|m;w!I!M_I#ABr34vq6aX!0d3TJOxi2Q9y0mgA54Nj7=(aA)xYxHM&Jk)&?IP z!mhe?z$@rskXmm$G(A< z(opUDmhACapEuZu)pFt zI)cB@j_0&v*Tm{@`LaGiP&$2Qi-^Dzw%(Gxs(Y1ZW*1wEwq1RZztVHHAY*xc7KxN8 zUtsdC{f}_$D|F&hlXSH~b%gPde$!gM<&KWm=d`pHGj1WQCix0CYEGXv)Zr8#i*fz{Z00Lir% z#|2)+KKLU8y~Ce+c45`IRQ@inW71w`oWmMnjH?j%t@EAD)g1pnu;XR`Y{T%hGLPKh zt;H}r&>*&CXn_mu+N?w|3o&+9Y1lbeftML*na3w z{Y3J*u$IN%J-x52mVPQ(wJwy#e@=f}Uv00v{9^kr|K5MmUVZbeq`&#*v+d}SlkKnn zM?c^GH-Gi7wKraSh4CErCY1V(-~6pMIX#Bmyv^8;wxF!e)CidyUKJ!OD$hO@XXSE(PMDGB{-Q&g~HO`NS^)rsMp}B6 z#8-aw9`<72FK8=iPdV<%JY2^w1!^P`VBhSVgE+WIT>X}`LqC(w&B%j}*QkS^B9j%I z6Uz%3){8`*%9r^B@`_6~grNVO^K-MNeWT+BHz`Oj+l5D~M!bDy^+WcL+Lrc}dE1xr zo!iyUsRer-C6dp2iieus4Uh!;&vVf!w2&@wma*@5oMT@Y``)%AtTwRIdLY$brCX|h z4DDzy=VcaGvHN#N+M`cC+unp{CP?MRwb9VSW*^R+=+v0?oz;|6_|_NjHKo=8kb zpqHO&&`ArO_{;ZR;t;z6nn{Pg4t~brU;g+1ynXj~e|P(_AOEp731fcp?t9wIk@>c~ zun0W{nxALAaP?~Y;UD^;w!FGZStD(QyLB%L$sJW*`epk}+CXPJ)k%5q5VE<(SmsM# zxCc9u`atWWQ^Rcz8{o*H`8G8!PmoSoEA9+U(++}5`%@We-Q!>N*CUNhX89#Y+7j}< z`9()w^3%R|-t|6ER&KEEhlpid=2+GFPT@-1UPf$%+B{t+H&yvTCF!i^{sMM6sJ!p% zyxtG;L!Z9xr3b2#ICwPm$|Gs3NYZvN2=8IkC!6I3@E~3EtMXv2J8bS?R-adscBgO1m@ zAzQwBKAQkSmuf#F#W`|gqQ3DhS2)P#8gT8m-JoA5P8w%EF%(sFELUhFX#5=~NT6;2p9OCPF#I-e8Nr|ta*oWfs6?^(Hm z9|Xvml;T2I!VN{3YI4BmE-dl-#NBc_9w96Pe8Jf!Dh3%OZDqPD$4dcnBG*L?+b?ef z6?mR|p2X}~q(-n4%IjO}c}wTP2Oew>f8~kxXa3wzw3DZA1fF;?;Mk@U zRq+E*e(G_n4ox2C*|@H()Flhzgmto?C?bri?DCdD+?8q4Imp?@*mP3*&b#iy;2LR< zKlW(8k-xmMSd-doYZ#y_-6E+5v>mx^P&-bSR#w|O15$T^Ta3qyi@0=N_swAqHgV`^ zISyUsXQ$fq)O@@7?Dg#*{#SnwR^Dn4J#>EtB7TSW*r_w^>@Byn@u>;&GN{g?=s5X( zOAkP)=K(#eez!f;kKDygd3Hb(V^V(G+H8xLR@!Gi`N{U=<4?9j^T*oZ!$+Z;n=?FK zvolcAFBFYk)O51Tc2tHP1g+(*Ym+;xYW*rPZZPx!W>11}Sk} z{NBw(l|S*eyX|IJs97!@f5N)EUUP#_Uy%G4Cp}0L6bUbb#SF$5&gSi9A$2y?JkX+y z5_&)TW!l|7)Xf<4bMtMLHjm+ttUv$4i|y8%Z_PK9msXZAGPbhtud(j{)y0t<9mHju zPEStPLRXHj-^^(ezS(O(Zp-P??^$?PmBQc6DEf>4O`3@k=m zT=xxs<+t+YjdbeoB9cY*t_BG3!thuBI{0NNElHq=in{ z8h)F1bY#4ZbNNP_i+<@B(RUhT4t`);OdWU*+IR{@Hk$xMz2icmWo-H>0T&gap~sOA zppV8G+~sXdcz14owoOmZP*(>GwqZ?Bj3VpPlXICM7#$mH^Ye#EH%mUh`M=fHSN+bq z@UbiS8yXsG6B9G&1W&+dZGCgOt*tE6_6u!eWsUMlH?)&)%{mcKrw(joLG0Mk$de?-?x_uC|y$3Xzw>VZ;zxeJLpW%U%fl%e`mL!LU;$ScXyHfue&5re?iZ0 zqU?Pq9prfhr{G0W{4{}nVEPx*|0RaPyfLi;<`J_l-*zNo;?|Az+sBYa@?Shl7z{3-L|r(h)Tw zm2b`?Q-!njr2q1*F!C=U6TYSI+^co2&j>IeTdPw$oLJB<_Kj^f7};kTUh<#}+PBdo zq9ew}YlDJqxVh!q)=t`KGk`64d&Il2t@j}Mt`?AikAA2(W~=_f6z|Xmr;fC(!j8bQ ztz*ORCoOFsA_!A%T`cR`8^VEN%1B;Qf;Is@L^tamIpJ}g zKsvrrXXmXc0Qg307THLbg@5l(6=wVMGvZOCzn3&+|dTW*k&*_1uo)Njtpe)O}wnz2815=BDR`#3gNx09w_)1$^ zxYVvYbD~X-PeH@se15~tfeyE=$0&V<^#^0CR{X)G7?us74go{!Bz$B?9K-+oxv_%; zs3+T4S%Qfi_entOY3nIx!lsDmorG6eJwKWzx>4Se8ur`7$;^Ym>tP7bw@-o_vSvM{ z9yi+B#a#3pJR&a99d4+jvb)fBbkvQ2@(H|(J{_KbCiK6*`MbZ<{@Y*wSVQuFzugW` zPqm|mr*lN}2j2HSY_S>oa>te8+MwY$d`B75GjC)SkL1$>CU9}aw4iKV6cO|4#I+{D zOv5w58Fxs2pHt7GMPKLei}|DzNtLCFFBLiSe(-S99bpSXZZ@nBV8$#4&V+w!L zF>g=NZjSksWhpCk>!3#-?2ndBI_RrhI4M@g%X`_TG=h0(12>4)_yMwjgXq@VOC+&R z^AA{&VPGqBWm^kF-m{+QR&->)I1$`a?`r#<=3AliqWa1x{}m2TFh ze}liMR`}9%o?O;DBgJi=7^vSru$w+BsF7*?LHiTO?0)mxQ=Oz)NBg`jCXZ!o#ZBA>)r-(e5?L=3OGfBemzlkEb(e1C zjF)EmyL7Py_aFDJxB#wu!oABIh39^}?x6o0xWM@4^GnBl{BiLgyn1)g{|#JMeu0FZ z(lB}7x%?!2TNE^mfa5)g7Xbz99o)fWNQ2p`U41^-W(Tgct#LY^y;u8{>(W`DxPvq` zFSot8K5aeir&YJ$%a3{b1g152}jHuL&Z4X9CTdmJQsyf zC-tP|n1+5?Byej~^Q(_P1$h?RPyFd0Za@3eKhr*a_ucLCrA2DLOZn&t8pOU}8=CkT z$_dJuqE0uS9%(=J;aj3pRyWq#+_4+lz|66>x#V~7J?Tb0rtTLhXc%6x&bAfg^oFi- z{9r&9Y>a@PwRh33-1P$kI!P=2E?}mQApM~J4z85739V<%)eEX%QQZ?bruv~t6zI1VTjDbFH`#(iI$ z_8>fCAD%t6)G`)UHro;g=@U=C(k_!9>$MwGjI?paYEYnEynKPNmz(bjU;A3`Wg{4- z?Ey`pr+uI{wDGA<@agoR#6VG`{czfTtIM08{VJ2#K)je*a4TUlKP24jn{QEX_0h;kr9=;-{Z@=}X`wSmTd zdUJiXEnd3VzVxO0+6I+7HZ$C&v8RvBO}ClZI?=RFd634?&StVM(odX2PpXl=A5WN; zwEj8&OeJdv-S=aV(*LhOAft%1{T(5*7mGy36Q9`PkCuo7>aFnJb#ZId|Q=hl^P zQo>@#!#i2L7te5f=j@Y6UvKhDZ+XnRILA>u9vIlNFe>*hJbFr*rY(8)p7EsXjCr<9 z^NFJ}T6L=WcQiz^9Jg zdc*py}S+f0U82W?$6rDM6^PP_C?O44d8b%DR6E0m$TZ3c}T z?@MRsEo|pbJ^tU9)&0DnpHL}B-p~y=fXvv%HWg~x+%SIp5C4!*dhQiiG*V9eELeR8 zz;VAXL8xC#kL#+H`$`nd`VkhtwhRB(GxMUUk1_dqo3l)5r3(#t#=SoD3qSh<9|2xA z0H`pm3g{8nA*zz^i0yNPhmt}pI*;06RtTwpx2Qz&rLLrRkk&CXKj}0yyei&YAT9&o zEZtw71nS}HM7pr5I0@c8<=!XF?ve<*^)LWGc~f=n($=eoQOujj4oBk+FP!>&pQ_3; zzY~zarN;_%nIidw)HPbl+9P(kPB|$Im43e*`8epLF$hY2>89g7da= zI*j}H-tGBFYh2G1x;0a%bPnP`K^dffGpc~ zC>#n#37Cb~bJZ>RI=IrrIC;@luJQsn{&M}jaE`L;@<;kpq`c89_`*GMK%yuu{o)yZ z>hRNg6DII7*=>aB#RJ5|t;b;>7hUHUUims_hQhI2`2#0Nn%b*RA* z`%YZCFelriAh~;l;n>(jl<@HAOdFd%*`|;B=KmQo`SvO8!C&6&r)Z}i)erWwc3uP9 zt0=E}iw|XhTrj}uIvU$XWgY`idV1%%=#0OA?M5fO&V|k$^6`5~w**yODO7!BWYt7S z=ZNcaXJW?5EX(%omE@oeov8PXKiZC8cRl4oJB-K?CxnOH zyx~Nj#)dRT{|q`QMxp7246BN!{j0%52>NZeOBXJ-WoZ2B%P+Ox`1N0FH{N(tJ979; z-fDKy)(+RUlqZ!e@z19u03+S~9{T9$I0M*?9LJ^pNc-AyJypxJnm(PueblxCK_H^b>C@r?9xf z5s&%h-)^whU+F_AM!m*k;5)(Sp#9S2OF0GL&f9O#pxr@(3oSjrxbr-lTb!UL^YF+- zn{=@)?OJ$>PX_QQOHSx?bK?EAQ0vK^_|%y4bB_UE_?yK6qJz6E4xoFK$yBVXul=dz zxS--EEQHr@MlAb58Br5Cv2OX5KSm`Wv@4V+`K7+6RCY<9uGc}<%d|B~>TZ4UajdtK zZ}Q8Fo|NF5!1ALoo#Zjx(dNlCVRGXfjZqqZZR7y{y-EvVg2O77%_uJ2M)nk+uG9_=zds@hIo_j(uC+a!s@8=^1M)JkvL?ts=kMS@iI@ zZAqL-6>>X>b(50)iEk*ofasecrK?ni{)+<~4UBbQX{+RjQ0OEab%Z)fxfN$4lw)>S5jO0XI(e5Y*3W)Pf~HRzsYy6@ar<_(c11Q;LCCEf8&`U0P(33viyv`r z-ZGC9m9Z&+vZc<-;GM_5-AEhy6H;R?8`~#n0p1zpgByU@o(RF8@j3a2cH3O{6E`dH1?`4jJTy1gKK$VyhR4v^=U-@kHsH5D`tkP6Gf%hE zNB!*07I~m2u+_8c>#M<6Ixa3Or~NG3HrN`FcPKIK>ZUqcApMAXQFB5YMBC5y>Fw3( zlfo0RXTeuOkk{ONY(;+4=&-25DqCs)`enec^}9^EdkyKY=1Z^vMyvHiK| zhl)mJE0(Le^frcBAVWM!!+-2Q0QB)?R}@cHV7c%yP8w(OBNKl`SK~5oL33GF$(Oj; zwk3}_xyhzre4|eBoBy8dvoqi(htW1UHdY%`)|{9j&(LH}RQPBA;+NaU?tj>l+wtkS zcHObV?fCq3yX}@6+U>XAn$5cQdmim;+jx4=B>i@H9uz|p=%x;EQ%k>)Au?HL>12so z~q7T)MLf$Ii<3G~7C&Z?a@byC*DAfZL- zXH94*$)5V@upy)Sbb>QH=or?8$5s35Dr*vR*;YIW)Q~RC*Sj9pwfA>R|8C*ex*pE8 z?(6gK<9Hy?yJdg(h-+M>6K27W#N*yxe$j=rUyoLW8P(wzr>x)-rSX+ws{Lb(`CQCs z1Wo|vVUQMQxX)c0%TGfc2==1=_WXJFr%&&*WtxeH81rW4vg|H6cYb6elLjr5b*>h3 z`#|BUlZq#I)Lmq76Q}jJw{U~|Amb$%Ieq$uZ~t#D0Utf0JX?SMVy?5W@m);ZOBklQDUTOd8-~2}V z;{ErxuRZlVkOp!Z&iTq}VXNWp`P+esjbPVn4vkJ|{|6TNK-u_1qdbE1xh)hqr zSZ^2Y)mMGLm9|Yxx!~a?c!K39$Ie>TcEQHi+!MNzD;p1p3qJ6RY23_~MP}N-BikL% zuPxJll&K33Sug%#V;a`Azncv03-Th1;iPes>ej|e#{=qWbarCJK|NbGs4%s|!ibdN zxWO_k&yB63n7Ht@`3c}IF9<92z*qe^{&X{@)2k~0~77JSKn+`7%NF}XF6@iad^@fmmGV=!2_&qpqlcl0BjyCpL z9``iHY(S!vI+>9^!L-O*vQ~~RFJ(Xva+g2!qu$b6GLpnT+n3(*ayoZkOps=^Nyd7T z2qCtwCV( zKWmJscXBk6ck%Di*~z$4484@pL@q@ly{Njg}pKm9<+@Ls!Wh*+NSi34z@ zsY1$duPf1k!6UBf;iM@v(Qm_ZPdZ_mCcpcx2DpVs;J}PBSn^vImx8~SzbouoIJ&^d z5st)$bAYX%I%l#{3Ib09P-mM<16_}U_@=y$j-Zz-Fik2-U7p}aI~7h{{?NN?L#~3~ z(JJwE@4Ov@cX84(JD{k0Us&K9U#*|PgT`h4@SCorGs$CEq-?`0%J!(;?gDUtOWk}{ zJ6ng;RarnbJn=yq1qYemc=0uJ)hfR3Iegj&Qki}JFnwt7lMMUxm&|f$#-1l3sQI9<=5Wt?I}uoM7b2X5aN4 zA8OZKe_bYcoItyJ^-7zbmQN z#LhVBZoTDf`>X%nPq)AFm;b%CzOdS!efDX}-)%=voNUKWU*AsObR#Mb-Dmx%_xPwg zRKcBp2S9ed7M6PJjFMP%@TD_tZRkUN#KAWyot)lWUTweodmn3;FJ5WapS=YT7|?Wl zZgP-^!}IVPTq;P45qNv9?x}7xZW_1lQs&&G~HE&b#dnG@^*X?8pf$BtMe{H#Je& zZupOOQg76o1#WK9P#n#h8@@H{8=NaSsiP*NN5`ky1_x8iwW@b{iAx$hl7{7xc;cj@<)iFtaN$eI=+P0 z(`O>zk!|ZlSP@X=NIFjm@HCd_;!$MNg(mdAG%~Lm7+9G|qn??lkUsFRQ(uyKKv_$D zmBqrMuf6@At{(#!>Y}>ruNk@wsIzPP&&$~ z8vsU!>E8h8=^AY-MO-Y$6*4uV+7%pnO_Yk3|)C7X`&HHmo|CNb#_*&Zj{r z%L&jmaLxjhZ?`xRBhD`Rsz=jq7Ec?Es55~NUed@D9DLKdD6g$kn~bbW4SESCb6SXg zSLAvT$Yf+r?oh6^&BU*lC+R^OS{d`mn(~D8;5p&MOYiVI{e05sSBK-uF78~Rm+0ts z@@AsQ30ZkDd|U7h$CrBNp8mU>ak=10QP{>l937cxqu4JVvFc}3vVeu1BCESm$>aIl zTro90jsEjE_Q5uXt~)e`9z#by@zhi8&C9#(@uy#Fzx=De*Nz{aY9IQJ546MBc2m=b zf^WW8y19-n-lV+2=-zEVp94dviIZOjmD@k;nLu)_D6b?Y?z*1|ZNs6E{vcfNwUYB%SS0H1%{Bbwb{@rp(YE9`S@9 z`zP(5<&}l@+N-a%uRi)%`-6MF+#Y@7qNq2wSBwE^)bS$a3 zZZ97__-#VteAa)GyxY}zV~W{dh32jDCjSRC&JXW_0IDK5R4loi06p0nzbp@g&5nW{~!OK_SA(74!0wp^ag zAY7cZm;Y)`hnu=v>Jx6Vq#?d+$=V>P{U_aRd!4YA5pshvWr|a&p41^5V1$Y7X;tKj zy;A~#m(!^j$7-+HzJ8nA_S2pe(b|}zQ?}%zs*AGef`N7MZQ#(BxbQLQq`URd_4PNg zWUG3oVfcf$kU8Pm|MZ*CH-Td{l+)Uv)8WZ8MP*}&{e%25#+c4Apz|=2B5yL=h870? z!trh1yhRLr%Xbs3)zP-})GFI=YiBLESb%99E2S{|g>-t(X<%p4FS@WeHY)DmO;K)I znHmFTP9bqaciDsLAgB$$+P0!pvLj6I(rnN%e|Ul)qavsP)h>!{l$`v_+x48}I{DP` zyLC-wUG**AE3CT=+t0Xv7bn~G+G{`YH+ItIo6O(K#oL{2ZoT?T_l?Vb8=SuU@R^tV zM_6RJTx7F+-Mx$EQszQ$@6tPpq{Cycoc^*H+tT=Yt}=Cf8siM>{K6ET!G`U@?tXSh zJAV(y`t)sw>*KuJ;bgwbX9CRb_@Qg-@K{SD}0%0uFzPU`Odi6(k^6uCkm zLB+lZS>Irc!&t|0jwkL&6X~GN7*cQfY~Lkq7(*7X_PlOCO?Z?aWkeB`Zo2Fkdu6Tt z!Kd$S&%L$Mu0MOS-AG^gm;Tz{AT2qOpKNwQZj*hlQdO6-7fMfPw_mxu1UC(~U;3qA zYA?L>3UcWud4}50{>;y`Kk+9&OrD+g@(V8l59^&V`IC=5+5Ve<@c-Qds~PELao<*? zri4gYBdg5{m$VQe*g(|58m>G4qCUi(KX);A^G;6AqF=^itNLkQo6qsy47y@wbg)*+r}dFrdazy)u?uG*oFow9Z>!x5~%d2jir;*3HfZS z{gf3NU@VP(&v?jsKqvcy0Y7~LEEkh$(t>AZtLu+4l=>1+c)fUFjDIAD@bcC02($!A z+c=wB-AIyn`xj`&IGmagX5WL)^1;P16Ed7G3tUl2(9 zNCMj2(^EIs7I@H($L>HUYoj#!FgI8__Q)s8;9>bE$N1xk-6Cx$(6Nyuv_I5t&PIL0 zYXiTwC-qAGq=)s&jk*&rqKw&KzvxlSkXRi`bgjo3^ksR$}hvNIzdFX2mvt{}28^yZ@eh+Y7J0 z+OW>sJht!J%6j{O@BhJe^w{yXxVX?}rf1Wx>l|gxT<|hD7Ow$*WWPWhe*d>K7{K0NS*H0J6Oq4PcTrxwY&r8CbfGzLvPQ@i z8$LW~Ix~6cuYA|$*QE>S{5spOLh7=0YW%sMH&x{^AzfyRRMwR%yZ{*A!Ru@p^O1l%FjAimCbz1G^pN)|*M3-UzjO}VtG|-Vy7Gh@*o8y-8sS*? zjGb&F^2y(}q4sN+UK!769v!TzgLMLy@||u$2zLZf_7J5ad9JrVe+K#EA2jxiKJBTr30uVk9ysByyGWf zc(4s^l@uZT;AR=-R;~&MUvP0O5kI=d`4^M#t&1voxeLfJVFwcTYCq%E8B%ayw>I!- z!{@~*Y~U^Qtrol+=e9SXr#1w2D`c#5FbbK}oyz4f{z1mk85^W+B#NaK-oX)go;GCJ z_Wky&p4hC6x|Z%n^Q?bryiFUv>1jd{3VmczdK;L&Pd|CmhCHU+{YzK;Q*pT0 zhkpJqeE&xXJ0p{1XS5L(M7o-!C}i@sAkR8M1dfUbBy?8UFp-j=IuWsQ!fzIhGNdWii@k@uEO4X#^~UL!K?5tO`o|VlmtxL zO5f+JOufJIRjL}`)w6l=!yqNp9ZZvVa3TcK%zLg58Jwl3(UY{&U+ebZBD-42t)a2cqG(tu0B3af2G;KDqT&$)ID{qlt~jXsWZZpbUNunP}2Ks z+6ph%+y>)CS#tstU%rJW5-)rbBp07E^o`z-}|eaAjGSX z5tgCgFHA$kL42hh^|X7{n8k!dUx4S(7>WmlIx^SB=dNp0M{aL}+}q$Zg9!1aLvV5u z*{mj22LKAA5zPg>x(LXXg&Gu^SeY)nt2EFF2`-`~N*bEtN!#{vQjx}Y_x`zaNg^Qg z#o2}i(%u^Gcr8O6C9ZZ{{qZrC+&0;18w`%NsFM>RzU6BtIWsrgzW+~re|z-F$J>2h z`ck|3=G)qdLx*B`kBm=3UOM3D5n$o3(*W!UI-clokyY)~LMw6YsvurrV;%a?331U9 zXJKqEFSmuOm)pw1<@WN6FSJ)*e62nFm4_hlR=fVX)9qW{|GxJA_rABCz3KXP=k4!l z@BfzfwVQ9bp`E(!Y&&)GXuIY5)A^3nA?k4iyiXoK+KwDP+NLHZF;r*T(L=NC#IYl7 z7K8In@_pog`?KvY{Dq%r#}3W3ix)4ox6Z!--n;Gi>C^4>4L7&x!^hf?Z>E5s-_>=Y zBS%fs{+X<$E~VnB2ZI$mE%kgg0{YzkLRxrH(GB0OT3v2yi%ab*Uw)uH_`sLjnKNhF z)aW!2I{gW6I$7poPEDE@!R+j_pt8A{|#pE`WAE&Mhy_E4eTtdZSu3{ ze5&@rmm;d1aH;LoJ*Ry6*?Jlz+=}Y%Mi5Av)YEwX%wtTK*1U>m;)%0pjb7zEY3zT* zJLP+q#|Spc?9d`f?jdtkB9H2MbhwLamC93y*3K? z{0Ov5)5JTO&LD&hxs*5t&WQXWh2xMNWvs-=X~Qe#*%XDf*g9s@3Fdgn0)YL z7V5aR-~$JxE<9Mz(2=s$1g2>!7V(`_ag$Mw3KtJ>SXx{TuHtF9ZR6(w#80HW8|UtDv@h`MGyqF= zK)LE0&KQJS8&1q@wiWb4E#x!c%;Knv{Of6-;SuOb8sE}fSzUowMsnJ~+A3wNx)?e_ zy{3b!ytRl7`bKOFRrHt(xlW)v`A~+nbWx5|%MJpT<$#Ac?ZV^o{zfOSzHz^FbrBg@%_l4-Ca1E<;ZbRkV}XO4{j%^z z$J_u={Ac_Y;l}yBZ70M$$-_7J-Sjq=PiAy)U2m#SHlQ>}s&m0xc-G5)rN>=(I(1s# zAIX1y%0RjD?axhYp=}3l$c!|St{xF-a^cx#=2u@kX{TOTbEA=c0PU6yL+BIbUH!3* zUQnx%g*bkIBi^+u-H~8NKcB(R_O`IdC=}}PEMDPRXy!9onv*ZI=L%lX#&kV@sPBwR zeqja$+p5whZAt(?+9v5b`awaSjggPKv`JsQTsYN5?!@`26E}*itu0eu+7uldT_t;g zhl}2RzQk{@%U|lj_4TcG@zN#q`B1z4&O6(C@4BN+jZ@Ff#Wu9F-tPV4SKDv?=5My+ zM?I2vtSu}oR+#{G;(R?PJk`Qs{|P&fiQl3;%^2UJi44K4c>@5F=Hv+aAmys3pO@{e zt8uO8 zK6~x)gFFE5Sl>-}vuM-P6#{?|$#O*o@-K{{jWSCf5vKX2pYj{~MH)h1`KJE{ zX8JYaBL~XtE@L>xK{GRxK-oq%H`)gM@8tA&8^(UVc=>AkjgS3q`$xa>J4#yfV;?7v z9BoJEXWR8>j(~xi;O01$)#7%<6gSV z|KKkhulU`ZeO%^NtS#^j)fSc44URp7EM*);obb!;GPk+ttFYayr0_Gz?H`3iuL47O z!pgV{k9t@6GJWCcI4vlWM?0`&Q5TfCGSL{A>UMho>`ND7=!0Y)am?G6@P(&1h?#9> zSl$!@ws;AvJ`GcLcj@xfI+KP@7)ncZd+J1t{u<+`OWcg>CQskWQ${Ra{sceMqMwGx z>lozBKR_t`fkjzT$G$i;x9-r~g$VmSOY^(_;U6xagA4iG6fr#wuexc1v6l6po*Y4c zs;AarQE&(zHAa_@tX5$qILZUZ@QV)nFY@n|Aj>t04|=+*TD`&&&MiMt^xai?s-3K@ zcyo*EJ=fd=`<>ig#&7K2HQ6_r|LfAdGd?9l2kWEjXm~A*fFLY{=jxw(C;!_9hP$A? z7X>`R%#t9DE!w4j&Sk!Xet;xCmr`gSZdl>E7jL^H4?L0l-LrK%`0PSPp0B;Sa`}lL zkA8VqUE(;Re~j~<1p13kJ@45sn_oz^5XQ57stz4uOosh{C;aWFp?2NewasP90qj0x z#RAk9(XIZ3-wa2mNVE96%QVzeHB>6aesic@zPi>Hm)6>6zwlr?e{r*2fBot9p$~m` z`{N(}5fkC0UCFlXADp32@D1Le37$jPoTh#C)z@+Yz~B1CznkY{M-R1`>FM^j{>I;I zXKuK$z4FqF?b4--?fjb;+ufi1Wc!bP{%;6skBfA6=T^_K*muTTtg_5=Q1A{uqxh9R zhk^{(Bf^r{7Xkbvo_flSxo&_T4$nDn*4mgvU~GElt@G{2e&nup;bLtXIDF;~BInx{ z<4()S;tcx9n&uun z*+Nf}g?TzU!!vF&O`(ZL3lf&iP__kSJC@gO^MpS)60EIkVOwotORc6|{iNd6%S-LT z`Af9*Lc4U4Yk}+1Lc4P5a=Wbkb!DBlztpZ?U2azvmarGDB>WQh1#Gg*SILhJ;YR<( z#npC>@D*sfu(+JLhb{UKPc&0{KqKFXz=4yX$>$S1_KUoal? zSwgB6yA@kH^Izn0?x_03>cgyem^sh*h<6SkE~N9v8+Yh4kUm&`Dj3qV`p+b*^1BZq zNn>dpy}?uef@k&Bq=4UD1M-CzB&@B|c1zTMb$z5g@yzq>^`&+A5o=L>Lmusg^~hSy zafC-~S7>1y>We+#JX42H_?%0Ya_m+7u1<~(?q20N_V?BV7*-@LzY{(SrFJ)dkp z_M_k5&c5f4cK63W)z-jc3cL4}SKeyh`fcCd-uJEVjr~on+R6%jKd>Cz)h1TSM&ZEL zF6kHN?0?6x1sASdZF7_3?SJ~UUv5`v|7mcapBQU%Gt=$x;c5D0c$-Q%*0Q=*EN#?P zqvmJ2F})%rWD1B)Wq8s z#$Gj6DgNlfvZ9A4|p-?@t4&#UtZ2>Go_77gD4Qt4@6)cAuu zY1iUa+b5@Q^boa4J0O5OfU|jY@|6iFGo@L!P|BZr`;;_XWREV}8{bqp=}Y?O5U{)F zraa*>1xd1ioAH@+_Y+yP_&@xxJThL=_rVT#a=C99>^}%`H~a-gW{H zcsA{!KFMWE6#wgD_lN)Vo6k%5l_p`{qqBf2D3Vuh(IHTr$S3Xt^mX2Si6ckylflhF z&QbYth-3fc6Tb44Kkn2ir2{yhA5ebC8~XDf{TG}q4;WmuCi>m+3FBY%^%iA2$Cb}V zh9^jivjiz63*|X*`OKwaVvrk!qfl}IFtlHd_kkr0F(7{m`Tfm3kF9KUc02bjS;n$%43n?DNBw7^fI z6z`xDm^@VDN*Ztp%xVl1_HS|+AJ|<*%T>NwOgRwV!S6hH`}l%oy-G2PXcxwRO-yw4 z){!T8_%@u6;?e(w36dlZtaA^D7$ik&%Pl&aC%675?(OyITn1ncvZGOK;pSfHBPa5+=%e0m>ux-gAaxd>5?v@E0VhT5fWxyS~eLYQ6ol^KR_x>t)3Q58DPyQJO4rwz% zLRjixda_fz{Nlhmjt*O4%ObDg(tJcZKpXg4F{sW~oIBis7d8$+FVh#iD#M8>@t58O zm;bo5L*P`N;=S&WbZw03<^+LnWckhLyzvGkCq}|+R7D-90c*KYA)V{nV<-4Y zck5u{9>cDF(zl~k?&=f${3beWE#Fjo=B+x7W9VUd;%aAa26x-0?LvPHW$?`;i)}&@ zC*d@jk79sbcj835`Q}^N7a#mm`}D_uzukQ64egdY-bY5tp^a47@~Y`w^z&^AkM+uq zKkATL2viUE=)DqV{NTownt-?ZQv-KsXeZpdxOj;~+ieYl?B!=(Z2!Z*`e*GQ{^S3q zec|3ux3|t+XbYDww&lgE*(rL<4QI%|(Z(_C-h0Pw?YqD0TiXZkx}$yI19!FezyH1M zz3;uVz2~-@+buU<-)_70ruM!&-qXJ4yZ%`F@DF@X`}2S9C)*GH;PHIzPHU?e>QyWcdZ5~!|#_%@4P7lu73B*E-d&f%f#jFIyy5axBNzV z?IN~4ZKL6~$)MJSjkn%9*Z%dt`seNF;bZOCq2mmEHYtBK{4#-Ynz!b-Y@a# zf%$zjcSF0dy12ZQH>eLCI?P}c*xT!wh;t#iCZjvD)5c+6aFPCH*b*mCc_*&CP9n-{ zwo}iK@|Xie`N}XtxYfXeIOU%(c`co@`MKA`gr4HBOft)F#s`Y!7XRYo-~7HU2X7@o zUw)Mtx#lr8Mff^MBr21zIOVE;V_%t{o}S9F?`vzTu+(mQ5Kr6&$v41Ktq z%#{~}m$m|)KAk*Zxu$mm!N#UXpxIXPhix>})-n2EC!DrQa!G99!bj<+7)T)#@}7(O zD{D(_eFJ*0ue8;b74p_wtZuMy;ol7lrmOl6rd&T&VYqain4D{4o;EP#TWs?0F7S4; zpzk-yeY4mFdwH!k2duO!S1xB_%SqnxNf#oALK8n-vbrK&hT81ReA;Yse5OrK&IGQD zUoIGngWtyC?F8^ho&41o*xh=U$4tltPr0NN(HY?&$H>}H}BAtMj^DP8}%_?-tPCg;QFUr?dcAn z%1;CIfA-f7KC(y*^SYtdb}arSEuhsCp2kx$EzN`_$c@NsP1aQz$pgQn&4D5BD);%G zvpmGzosYZlxV)J+ebF_k2H^eXp_Nwm+E+TEn0}eCyfu)7>LG0?b+~f90q?HTANbju z-8M5f(&Q4cF{ZET;$2T!z1bm!YE6gw(Up9v1^cVKk1=uVBaDR>X0nf z^SA5dFtWahtfg-Nml5HCy9=|~po8w|-y(Ihj;!Rwk`pFl<8}11M?_{Z38qw^W)jY^ ziPOw6ZkmZpnk};3-s#2Gz3@iJgK;frj$)! z>A!uQOIDj8ZD+bD1DtxGzibBIv@brO{Uvb5Zz-h0!;8lG-0_WR1;&pXj_TPo_5tGV zK)Bl2K2Dubo5-B(HdxyL7J}9zLlbQUy}5$^9Ge_##o(b~Yq% zaK4H@b@1!axX0%Yx5?4bcI)l8pm#Uh)6YKBE-VkUXBRfx?|l68?Q4%d*go+7yV@Oh z+#Vj781s$WAl^36m2O~B57&k?kb-}5$^~U*tm||9{Fo1Cd&t|->9jMH1ZU|l-D*H@ zACq)oTJ~ODE_kQSph3akh)kHye!3R^!4NuHrfra8$~U*$`l_~$DdoeK{^pg}UTyc@ z_dxs97angfUR-Kdm3>HWb4zMB01VLvZemp|h37(Gy^V~`=35JdQ=PcH@jxFcP1aX- z+6I8ufc4~CSK2dAywJARF12Gvk7Q%1eTJK1+)S+fA?|JpaPhKOQ9F{dxo6sndP;k7 zlV5yed&3hrtY>&*guL*4$Gn-~3BM`(#pBjKV_j)t8PL+_yk#ig^6#R*6E@kD)6vnC^IOz7tu95?OmZqOD`BQj4S z>Ra*;AZ0GpGn~BMx=UAmfjUOnOJ**(G4?4iltqG)%%2WG9@7@!MgN!a3^a(?gJUcv zb$`+d8y!v?)R@cM(zxWs$z~Jg6EN1#@<~^V)0IbAu?=M0ylDdsgyVg5O#I1}IPj5q zahXQ|AKN@mz9vspYEFHlhQJp?@ld9MA9Ry1s!Z!A&cab;QFw;E68R${5XEGy?ii*IG~l_DRpm6N~)iff67M*jOX}j z>NmWN?i_AYZn|<|l=MUL0W`^YFSb$Xpx9zzkZPY4Z37t=5>-f&B(|QbJ^N=a$|!#y z-rc+K$gq!j_HLT;r##bFA-GCg@rH3(o?-nJ`RLQ@EbqYc|G)X^|0*3nS&Yv^J?~w` z8~RI5`}~9{=RRBCFeYJ%Hmpn2C*Kq1&V{xrb-YQlFQ3JeB_9EGAlsfxUCl_Ob6~wc zuD@HxwPD=j^~ZQ!5Fn^;tIqlSHn_MwF!Tz!`n+7qku-_&;fg)tC0zKpGybh->O53! zr!F^-n?n7*wQ02%r4F%Q-I|gI?y)I z5sOP(?c&1K_P|45X$vdc?bO*5?Pq`HFSYA$zcZPFw)TOUESqPGS05yfpCLc(p@T18 ze(B}*{Ik!tU;XvpNdDu;PPF+$v+eKv;@_q}8EY@U{6d?;Ui#O+{4d*o_MiX#E?@pr zrBn?XSO@Laz31?pO`@SEzqg^`p2FbdsZQ_r@?AAh=i_0gx= zqhEQlJ@)8x?JEyG(H?r}k@o0UA8(I5{8)S7fk)btPd(FKeDSsR{PQoiH(z_JUATBD z_1oP7e|X7-{+X%iw6F7Q1K3g0!xOEBLHm~y|_F;SW z01@`(o?0qc)kUu!O>f*5?RU^Ai^0*l0wAYNMm~`T>g8O^^RK*#y*=7K@*n(_ybSjI ztFN^wZy!cTcu$h~VXY-br7XZiuUaTF))n0Oy?6D3 zhDFSM{l8zP{H&d5T!$xdy&Ttg-Zt^JA1;bD)*+4MkS3>KUe4tatHwqT?kA9&%iz(f2|4{zI8M#P^^d-Cyu;j3ko5J#DzA&JT z;}i9pHu#owa}Jj)FTACEfh&vT120e0bG^hdbmqQ=Y`XBR?Es$EgTjGcIOPH(#w}aU z2jy~3xR*EK5Kq4PL|sRlR6rkh9MGVfgR9w&-r&^M;KBDfEOoz#~+n3usT6Vt3 z+S=+OoU_}$_q#sSZoKt|_R)|1FYSeA9&K}z7*H5r3zsgpjn$>LwZ6pjMvnd7!I+9U z#yxy!IaVbMp%EvxK_GE4vOQvEZKbWRFSM(dFSM(^$p*b&dFgBIxBl(FZJ+w|C((f` z?Z)d)w(t3l?{2r=cw;+t?07qK=489>%!%ea(xJn1?dY+iZFYL1O=Eyg&-iWs={7ez z-A)}j+HSt#hIZ?%H@7=(zqQ?R+l}q|8%`59Mcb^kH4NGF=ih2$<3nwFjy#7>wA*6XjgTW`9xjnQK|027WU41gSPpb=S|JU5^1W?TBklx^pvnuhfl&+@pF>G@_b z?USRYe`YVL@3uxf+1nactHkj#j&RyBtrRvD&`+`~uNwHhAb-WiP5 zzv|;ooO+gQb-@Da4JTk-ob#SgMEww4aiJpNQAV5^j0U@7Rou$;oG#i3tZt_ zOD*1Va#MpXZ{kf%F@PR!6XTOP^cGuUb>-J!BXU@ThIK=L}3DuJo09 z^cvn>2V7*0puNk|-j?Lr^qofsEYs(1UN5mRP5Ka z7T|p#cQNXp(+~T6z*Uq9lbFl&dXNR$IBzIuyCh)}=qVR3{BCorAIO(a?5MMlm%KFz zuh{R%_b%dP!JTx`X8|Y0ln2`eK( zdHOd8*kEjb=pxR<81hXWeY4jSLe@7|qMx$)0=e?YcsJ&_(Z=zMpAnHJv`$X#5GUcP zd+LoJVo29-60%LCUswXCE;U^Gg@!!$?Ud#O#Y|M_?|yArdC$67zf5!+)}=Flb?d-Z z#&L=107lOHem_dxyo6^_$t1?BX9-LFc+ME23#Xq*8Zn{03uf_!dS5h(|Ku(G=;8@= z=UCBngMJX(Qu5oy3e!|K&ITuzm3hpKsS)cRHU(x`b|23bY@@ePhEx{Ak)socs0^xA26Nux+J3Po}D| zDRK0hj$2*4ckw&zDbGqb@VAdmJo4aT9^SwWx62Fbby^eHL9RA~e|8M{0JbkN`_`Uu zQoQFCWl6r0zFoCshg_!cXwUSW1o%0rX~L$a#@bsKueQe?f4)8b#N+f+*u4OD?6IHi;zsQqTxcbB8l%0wF;AlP6Rrq_PNL5EB%j3q;9>{?0nOMqFj~ec#FzrWa zk;&~Kqg`jbu#t1}X=9Nwc2_XOFNzQ?Fza(O(oiZ zLYvSH0Ga6Y2v%gx)Sh-CBGC=hcZ6|~^9`0ISwcJf@|Kr0wA`L2vN%IR>DW)yyJ1@u zY@0dGnx39PAE1S^ATZDlGnVv|aNFCf#S4Cg*}||3q=&A;t-P+|x9&Ngh>PpuRR~so zFMS_Q1ItIWOTp;#6}%)%2=0&InN`=7VtdZukPj=Sq|6_#EeRpT2tr zp1)mc{(ROIymA53xgJ>v7%vf)W%=RAnsC{I7kW9&*CmBkSGB&cL_r ztSq+O-18XYw$@Iay}td8zxkiG;i(xiAjkCA`HUQLDay)diKRB3b7|@r;95Lr zz4q#>?Mq*}uYLONdrZ{krpMcvvnSer_`mxrp~G{}eyu(H;Dhb|`hWX-iSOCT7o-KN zd1fDBi`cJGJ3@UnzUmQOYaP41>I&1@Kgz=DUpG=ZmK2tY&ts&K9xkPfQqc4mwgA@; zf6s0019zOrDUyR@6YcPs+glr-j(t7o(fKY)N9Juu?Y7cO_R2o*4%lY+hesKA(r#EU z>Ozl&ZafRaYhcLF54ivg{ZkF|(&q|hhqe}RBEogl&&pX=5q2QIi z{AByubFa3SUOU&`eDg|s{jG)e8rLQAUcgQ}cX_3~Cd|wDFDU4L|@5Va8K-?ya(cqmT=e1_v0)^eid)wgw|3+9miGeAg2;7jgpQBZiO+<@?}y9Dcr7d8E1yq z`J>z_Qh%cNo^6q0EbA?;orkbRy7EI;>u4HrsBw&7@>!W~Zh7Cts&>~ICI*3TTox{u=Yh5X zM$M0ut31{WM|}EKGWY9?+Pk5l2?a{^ceiVNVbg}*ra>mNzCizA9_30-aDz?cjK082 zC^?QFc=l7;-nCw#>&vOi*;lA2f+OCnG0-+{9Cr-jiw4fm=ChEf0qrwoHdF`eWQ7crH>|_R zB4uU?sDVU(gyOJ}ASuJc*ugB=Sv{GFzmf8*B(j^idqch#*zGMBFof*@NyROUs$T9X z+xi&-yhw^dO=5BJk2pJ< zq?2YfXfvIuVQSJ^J#-9@g0g(-lr%joX-DRvqTztMY7@?nb(aQtCeCM- zW`BuyS2yVi+a#S=@r41r= zwv*v?Z@cwPRNKWiF@3K%GpS`=zzeQVeS0Af#kM=%jL{`B!UMK&Nl^kUu2q-lUw&K! z-J~NBz8#Romz;1%0jX46SjyWEz}*m^(Wy3l^rkj*;(hha|J|v`qHoQs@xmJx)5%NT zcG#qI;a-cPbpk*p<=5A*sSQWhTBaNypq!EwsnmpsA zqDndZ)8+RnTAI!~b=&bMaPcV~)1a%42E3uY^mT#Ei8tSLE<@S+G0?%N^GNxzj)z=? zJv1}lj?Nx!cieJY``S}4v|s&|Uu#c&^`SOOo>NDUwj=X1*y7M=l(yYi4O&2~y2vZY zdkva9egOR#G0?~F3b!5KQnX&HZRNtn_Sy^2xBvN9|5f{~-~ClmZM55OyuQ8v&fD8h z{`e2K4}IGQ+Iw%mxt%?Aw4LH|r|9IwSeu=nZAXqCYDbS8Zii-%wCSnoHa0fdW*B(R z&mCe6=Afx2vR4@dFD)*#H{W=*tu0?|>nm5<#Q0d7J~W*H>$iOHyV{|fZfYL=UNWsr z(dkOZ9Q$v*TmW_l9tN_5GdtTX7#Dr*sOvb{;*%^Ct0!rb9gO6Kh0E>I`Sb1bpT8S? z4>7QvYYR(jP|bz>>U?vQnLGC3ecybUoR|fs3pu_$?JEh^O<&~?@n0&?$ z*V~l#>DnXN=n;8D+jpI_$G?~C?zBzX!CigB$dRWE&^lS*R%C2D7kndQ)Xihilm+G0 zNi(Ohhk@aDd%cg6&#)oV3`g(ycBk#{y&C|*^WvIs@2b~?MS6ET%XiARe=*p$pi7jf zn*!i>KZ$^hWit->sY))7oAs^0oekuLH0saE2{%bBx7D>PwEI=)u^K~Wd3Cj&JU-WM zy8aA)Cm&a#*|=Df&W#1N=My;0OUt`FWu7`>%z+bpW?eOMh|2{% za`K4uhhN;W?iIt%HZYC))7{e1QR;4WBzJV|&ekw^BiDNu%>ZvGXxY^aTjluek?PB; zAFvO!ot1t0VKi?m$5>Pi(^fTrfu^7(?R*QW>ZUPS_KbavwhvXx1g!(M&F<~~b*>HH z8usK9;B6rLzUeQwOpMn?iHY%7tt)+>_Bla5( zTJ#S@O+Ncx+BRGG3G04Lt2bhp?mEFd*R6$eg6Iu?!l{1*If#@z_`*MlBCD0dX`ET z^t|3BzpS?#yF$FH?CNLH=3VA8rY#HDTvg6~`MFZa!SMd>#7hn2>)E9EQ=Wa7{hxHQ z3?sd9j}8&8I)e)wYoG|tsS8+P0tr(cE55e3i^ujU>L}ktP!>!=uLQ5#f7Y8WfTtmA z;D)Z@s&Ah2s2zu#DQg4RA#JC0d=03OY5V1D#(}RWZ+&Ai{5U!^4S$aTv)bA|xnKOZ zoNe-?Y|64<@beqlm_%PEEGMScHW%CC)hlgrVUc`m?Wld*v18FcksH#Pb~`73tTI5f z4%#(aZFym%+RJB55&Nln%MzVPQ{*HQs<4jEs}lk?+Un|Z`b0l9fQCT!(E&CKIp%QbWoDSL3b5h%!m$vM1${;)HCxw10pe;dI*@n%aM;v$QqTg|?x+itXsz>#-lx zj(BNk^93S2q`Yh!-|>Ig%o4*}#TPlqu6_bc;^Z?4=iQ4q z{PxBDcqzJ@22u?SL}`v>W#H*9O>P>I81qtC!Me!$0oAx9UmQ} zk2~Gw=f~UZ;pz7BYcI7|-`r}Ksq(|GUTVMho1bV4S6*x1@u3g48*aD}e1Ut^#?;NiLp-qhC9d`wliVDABgI0dhls7FKke zgRa~HUN&!}?*neyhl_FF=*VQ-ST#SDY~$oZbB;}rYB+C@J^0`g?GHYCU%Th&=i7_t z=r72wbjrG3V`Q5EtVjC}*pCrrn}q)OBj*N!D_O%6Tbf+9JG3^3SV0hyI0o}=e%cCp z3^9(cQ*(BkA7Q$PtiAZw<@VTqo-{IZ7(4ppNp!+u`yc=1FSmdEo4?oIxVj=GXoZH&Ki5tl zooi>WJJW8y@rG=sotv3zQs-YtWA>5Cv3KH;W2N3c3W`VGhW(au zIHfMjH;w#i`P!wiN7Xel47{K|vpjXPx+l7Tud$P7pq%Iv;0Esl7ibX%2ZV0OFZQnE=zNj}??QcAyQwt6n-N z9V-4(LA@ju{LGRYGRAC2_;h4=5_}zt$dfzhb;tSkTcz8>v%-Uh!q~+|bxy)|&QZ*n zzfx}znFNHZ5b%`USmRa4&VsQF^}QQNJcYrw^n@EF)P7DFwHec~G?p}e?QwOU{kv-g zX&z{2^>oAVpEMFL`#wqIsTAXk`@9ch(~VG%nVC_>V(3@KqyqzMQiyy4kv{6Bf?K@t zZg-t`66c~)VPfG_edL{l=j!qx%kg*epE#rJv*a`7-?Ft!tb{N{Xb(gO+XcYTS$JGK zjyp{6olAI8dOH2IMQ{=jyka`?tNV+uhcT&Xvi^UPiNd$71{pM(D3Cx7+zlAd0MRocusnnh3=B>^bC%qtw_ z(XtIENFCF)Uxf%)0N$mMfW%K8InaJw)zd(w+<5BRrScfq!2$QY{ZIQw)!WX&!FEHx z`<%AnE{$+VYG^NPWhI{gp?&kB0{x0(Q{NyL|6RsD+l+k%HW%8^E`0!a`nf{wW&3Lk zuk^#EZxtf#CF12Da2_0Fe7sp>QQz9m_%+{49%yThujr%BpTF8Z_r)jMGcUXj&g1Ql zJ3rX|++Y0&@Ia6*ESn>!v%FPh%RA~LG!Hn*H?>iRIB4DBpCG5JC5gP2pa((`Xl;I0P#4<5S4KHDEsR4MyV*U zv=`xd>ld5Y4IZ)&^>N%D-gA-2u`CRuj&-B`mVL;EZQzFBwf6FJ=iB}FJ>EWh_kHbm zf9F%}bH`*Vld$Ru_RbsZj`Yok*$ zZG4*hUEOG}pI>SZedXzP_q`9$=J&Qg zxcjs1ktd&T=NGTEshPQamh{lkqm(t@hNj?e>=sXMWeC*Pv6Z%wiL|Bb9Nwrq)@W!! z<)DSU+}T3(H<1Xq<1JXG^prnSxNFU4z}cy$u?wFrRM8lVS9X4}`E`&zpIoDuSG0^WK#_H<#a z=Dg7nmJz7n0e@-(W3x9PkJlBQSLwW6Dw8i8D?R{y+Xo9@t9@}m&KJ!HT z(I5OUJhIZBdHV6hH2CIwzW00D*&A-isd6juhV{3W+gXBX z^GFN-V#~-!6cl}8z0%*1DJ4|6uI47nUk-oz6A54RiSurLb$s|qT;(lNrf>0H?G#U7 z7L1fFEcqFFIBw9cb8H{m)2sRN3}I;t^)30}rPxJb2+Fb?{Nb&R|FwBj4CU>=(ao)M z;XCZA;z9nDGcc&RNbAm{sA=5Uv2ocxc?M?eS@DHG=$wF!Y%5cY|9sn9y4MNH;-`iO zpj8j*U|`;-2vv1PrO=17W8OVRc{wiy-M89CL&I#tXx|z#x(3)qDn*{7Pi+ zFC9c$%cG3c)jpQ8vPqC|bw<{=)h(%|G-M3vCIG_Jd!7oZJZ(AFMaI{W`^>#imUCv( zwKkRj!}h~gkymPSH|1L;#zEQ@^#YD|j$>N+v2={MiGxFf+5oVFEQRXXGI7pTdx(hGm6kp=^9&tSl12%Mvy99_Q~@~v$8dp}Q#iqcU+Y7mm8LgR zNmEHWk`R}=)mphfhJEnEz?xE_u5#1A4E(Vg#}(hJk-YB9q7UsM#9AO**16F9lNJVKZ+5A&Z2DV60L)Dhf*pm}r|>J6S-7eu;!gV1fhX~*#=>9sG;((@x)&Y2biMrfY@MY+rK>dk zC60#ikSm)I0G-Jt@@2q7+c+WRB;=@V3G6Eii`=){%+yr7{id7S_k729kY>C+_S7To zbD#Tkd+5ubYcD&+urx7W~FRB)Za#rEb#TVGjio4%>Ly581SSK7uJ{tX5m z?iO_sVD(CS?ZxNX!(aYFd*J>rv?rf@qJ8JLd~5rQfBL7|AOFxFZ{Kpq``V2+o@sM4 zGi`0t1zf**yGCA!NOiOEii$WN@+0)O{Tsq0K}0J&*;{naLSgr#ia4Ro=@ydF3-fET|atSsipYWiYX-UaQjKDceb50}qhf(F*r1#jPmr9TE|jo-)saQsxl z;kh|$7#_I!ruPKrG2dRzQNKG`@Qq;v9^z~p+LpGvCXO2jq^ZZ|l`*IMqOV6r;YBaX zAMO36f4s!kg|*sYZ5VmsjVzAYJ~Y5^H{i&^8DX9*5TlN}FzVESg5w*v*~BvnEaIze zi65#3cfb1>{e>Kx$BhWKmo|m2zl%rZ_w9V;(FMWPEN*yTSzgPL)Gp}RmVR;|Z7JWV zFQ}v6sP^sSs-Ffa>QMfHFC5%%!0+alzIj`ibiNfUFUmOXm@gHouB-uoazt?2P)Hsz znIj2R;{1Cwp^GO@NM)xe^06x4tSrMn^6dbF(n;EOqK(q-zCAWJj(?m1v;7V9mH$>( z;X7bUqsi&%ETH5Q9fXO^{LCbS^5XMaL~$xe4g!RHA`i*~hS}Hcx!&L3O{lA9C+wo* z_p%64cM!%qxjtRzy8fbEuu5C%6jgnB>cZYFf6S|Rdpx^*y3SK@D*ayEweb4!3I&y;+sFG1Ef5|l&rVNQDimO8EUKYbQ( zEPS+^0JeY8j#9t*{dW5v#}<~Gg-m3}(@@4HCfn4^lq3ZHc62}%ASqS3RW9PdOL>x} zTN~FZ{|%KoTn9(p0E!k zrG(}`I6z-rHllFvdA@9@@|W#z;}~K4Y6Ia2uLWJ(z_Q^Dl1VRV)a%3kay+KGl5#_kzyQ;5Kkk&QMx-P#jRzubQHSAIKhb=l9& zVrO~c!-8+v+n0mK#+n27A^6iHSK%e-A3KPD-_nuBwzqT|8g%mueTi*rzaIL8v|QlM z-F_gnObmH!UkEG@uCH&wb8b4>%&9Ma_GJaz(a-q2cIjff|0|ES-}%I6+dW@>vb}Ke zGG(_m1rf%8I1jtcAkR~Pn?R;Vhvh+AtLUYy#YVtydnb%b8dAN&hd{YN`nh;2KjDqD z4$4t2@;Zs4Z6@7Yo96P(;V~YrEUdMcUOv~JedeY1!V538mBkfg&QH_~r!QQmAGJ?S zA55Y7j0bYuWUN)NJWa*@w;R7LU5*L7iw$!$+@;MDd3mF)> z>iJq1KXus;vSJ6H%T=EK^10IWe7Kjdd+t)DRI@_bIMauYonN|Yp9PJ?(GHPwb$R72 zy;l$|M_gj#0w=J&lh+h3hSjqhT(nW`Gt7@Z$EHhJ6l+;kp86R3SX)QgftcEb_94Fg zZUMec?+I@9Z`xx4kjdH37UpkX?h3YjWSxFRP0;godXvsnrT2mte)U!CX=1do)d`08 zrxrwa(&japNP6mI{iDOFj~mmpOZVtU9J@|eU>=F-j?+p+F#ex$+UsfjwPGGW7*fR4c@wNr9Jl57u(mKKOY(Xz_Tl?vs{17^1ur04FQvazocf-vz-$?oZzd%60e4%F{1Ng)0Zmu*b z1y_&2vvUgc$F2=H77(`~-}WCP9C&W(Fq8DsmHk*Rwrp*#)_6}{no@viIWAy=x3IK1 z7X5brrAx7`a-9G93+<6dp3Er=pZd(b?aNYEV{$-!n z>-4Ra4f@N0$@aAuUTtqIGae(IHfzRCVhbqy%e+-9FWSoFH5_klc{xyz#o+YzS9q+C zcn4>964v5JvZ)&Eg8ku+9YcfI#^+Wly|=SwAX)3oJv*In1X@B99Qs-|7K>?ZP! zk`w5aemJz}M*X#yrMLV~K*p(_F8SgMFSL*T_Q%>ZWsVN-v_o^#?fBvOc8CGWDE+m^ zwre+OyNg`OBjM_#4t7)|&r;b>X=z_%TmfP^^^G>M{R}HR$T?5v;%HawryK5iPuo>a zk(Zk9lg<)B%25}}_IPwkr962~Vp(X_FYs+^^>s&W?|Z7d4+{Q1RKDcz>Sg-M-@D3G zc{}*fAshUY-8{l0m(S9>&*MCbX}e3sHR!mdi`NzE9glY5_6Y}_G&WuCNvqWChx;u2 z-bOMoKxh+Yc9kLQx+oMgTD+&^39M4r8;rNrGP^@@^ z`uu_GJP5F9U$fb-nMca#ly(2YOSE^<9;fHv)wzDYAr}B6)7DQsx-bNSW|BsPsrS9` zXAO}A@~m`B-#gg7@WADZB(6Us9(=TY-2_o{TJ@Ya|C7&+BjRN_RfcrImp>>ex`mUM zeiD?eAq@?OuN}|WB+w~(4LUJA<#G-O9f~YDR*gN59`YtMvxuXgKy+@zm;PMC^hAKd z$uc_J2v6T{6H;zIOD7J|yXZsUTaI(`ZWiu&SpC#DuI28#)TZi|aNFBbxf0dIzl$q= zjeC9QumAK9eT1OB4FJ705d%i(f!bYGJ&ozWNI(YXXAh_i;DBgvauKBwLjZVLB!a*> zK7n*HA6!q~nt;&l=@{=+FrLn6O+)G@eHb_QPJu{d)8>*PY+#ZGx57({!5yPvR-Oxf zDH^I2x7T1~>Z*jR_)@s0sVlBp;BBtQJ4}{x?7VQ6ktoaXins2@TZ4d;s(NG~^OxWJ z@%yMkAubFpt_CHoCE_}NLFVe7`vIlTDhu&Q^86O;jy&)^xBywJPHn)?5MbWCNBv{MtZ}z`Cuk~RLGW_Nf>ujf{#VT z0mr%*fP$a5W2CnVxl2ZZ?q0AnKtY}|F+i9~)HX6algm+L18p5c&2RRqICuO$`^a1y zpT54$pSZKN$?GX=inQcM5$F3$D4@tLVfDQzCp(>b(Ox_qAv3pv6muO!8Bb5x4RMxUfwGs7Y~xt#XsN3 zbFxkvxoGadP})1Gqi*v%OinC`yNhEp7-u(}J=xxK;|=Zf$zxf}e)+Xm+k+3>+a7)B zzV`T6zSthV@1FJ;&rf~z{`R#;9%|1%@lbpEvHRQ8k37&`diKe7{^e)dlaGC|efhr6 zwJ&|~v+ecQUu;`z3+-FqduRKRAN=7qKR*{#EzgB+YEqpJ4dXHTem~f^)BJS8=DHI* z*2zV5KPRx3NnIBpU2ON;?patN?bx`T>i`3n>0DzBOr{yg96xhoJ9_q(HZ(p9QAXes zdBlk*-yD&q@}C_s{FKhP1`X0zLZ*Cw_xQ8g!iC(V!NJvSXWe7>hT7#z7urMj-``Fg zKb%Er7q)A$c(Bb(PwXw~`qqypOn5B)!qP&!eC2Z9hIWUZ1DutW<+i%Il!1tESbJ2R zi#3@Dq+Q%BA)-!NyKueX@nq1;a(&NkhtP&^YCmO zX|qwlIC#bh0+0F2i5T6b{N%pl;uPr&ck!R{-Gt!gfV};qk>0`eJ3}t&+m0@3WP#jz zIH=>#L6i&NX$zj$mWoH5kW>s^z*zApYRYlJ_teQ#?LD`>ryV`Ypmk~rdagjr#r7TF z_N~x(ECY67`AtxF7pKYKvutPpX9ojYT0sE!2t(z!EUcqv z!86J(UtW5#7zcAoC(#!8zU~kBbi#>0dEbR1`Aj~^q;h1Z=$Rv-Od>a9WLE!q&G z%44!>7FE;g9w4W2$Yilc9zFjQuS6||$h$&Ahfi}|4Qb z+zFRm2CdZ3zQQ({n4F>+A@TsX zf@OH=I`x73O;}<0R)_tGn`NeE<|EVU2m4KN(Z#+Je=1W) z{`*!hGNJyUuUK97^BI2nr)VBM$3wqZoSol zF(f|qmk~Kt!HxqrJXjl0(kI9l;O2MNL99(VEh^o=bvluacsz506l-2D%x{7cB8HH7Bh9r zI7lMHDj_wrt@aAg@L>8YaZgIypq$M^(>J=Ew&p}I3ARYPgp8eATy4+3`c`}Nsps2E z&wQ;dW1D#Dhi^A&`#AwB?UYRcd4i37k8dYSv(Q&~^uaD-t}ZPj=WdX2vk+nG8TcC( zfWFx5@QX4ZnGsQJ7vB=}#3$i;8p0@c&NO<$xfv%}rlzOciR-STy>{B~|H1FKzyDAE zWqa`X7jxXdZkl#KGC$eov6*kW;re#djn}u6Cr)Kyb%y>?SibQ!KISF>fKnG1{|Ei_ zhxGMN#UMgdrfdQ)X=_|@HWH9@wvAyGzwhd~@+Dnh>IzO>rpxaEzjyO+i2~1=MD1`IpesW`ZGV(-v7Pd zTU8P5u4&4iQ@2+6sd;p)vR(^zo9MDP-+Zh6{r}>>CJl~!+ZNL4|-)*1$!oBUKmtKKKhT4f^*SFc3BW+}C2K#Xe8)cR}!?F3srf1-% zQOd#Ul2DdKnXX@iR(?(u`_uIKG_0Es)VJtaFWYv9I8U8bzssjPww&`_qvLJNPb^MN z!8_xb18{D}w?O^8;LERJLp}0b`}pTR+a7-85$x85cH-o9?S>m~YNt-0&bURKe4}-3 z#TR9^@?wW=*PqfVmTPX1qaCwJn!EO3ohYe|<%U`NPk2yXr%GegH~gOyIqbJ=FKMcc z>l_MvOxp^}tAF9d%z4vQKRjvK_SrthzDT&C&Wm6#*iJiR&}XVW{=!S`&Bb;2-8Ss( z3Gi`FQ>LkY4}PIu8P^IJ9^ThxVUU80Eh6sD z*VCNNr>1&cn+%OOmft|eCfcoc-buK;?7Uyu``XbKpx<((p51Zf=Zf>b@At13$1BX6 z$yKS2+00vZ>)Z2bIoh`SVLH=Rx;{_rQLY_NLNp}haM@PWDM#4L1Q6djGb6-5b3I9` zcBDS>lQeJ%O`UA@;%BE<@KhetSSR^H*vTRumA7D;ER7LonU*7+dtU1Cs`Opk$sa+< zvfzmd3=MU{Nl=G(dG*^yc_@GXZ2fVeg?Ur&gw%gW2dk!ySYtYQ5!vo(%q?^#(E#?S ziNnXmJOwMR%93wl)5?RUl}EV?wdls9yr1L+S6emo3Js;JygLGfQ661ukaueiRJ~Jk z9P*=}DM{U+40UBITe`F#$@+4V-Xie#r_Ba0Q>Jb7`fEGX{H&Mr1(MlKRoP&Mu5*0o zb2dFyj^O9r7uTH!uv@Z^)9lEcPwgJ`leS zLu(g*ejy;$q!zpPm=B)*LnMTg|5(MFV6wzQ!bf7u1zuc zlehBkySL{R?$^Of8TlyKw) z>D#$@5V-J+=?Lg}H5a(3Sfk)N72qvz9!o#qWuntOWHr0WHM@-C=Tge8!;pguYuXt} zByh;Ri=ovk;>w^>t+r1<=;_V!EoUq6vrq@DbX=WG+Me#r^jM;8kAg+nx>IUmWU_6c zkVeMl+tln$ZT8rETAMjT{>h}bqA~WjYB9ia>pMisCdx2Mu*&op;Gh{p5Kl;9h)$YV zcb?@NL-yiqEAJ|QlcZAQu)J~ydGnyFXA3L0*Vd_b_snIeX_MOL?#d*#MlH_I0ay>; z;&oD_j_b8^8pop-hO-ud2k z_U!fT)Tz_$@bP19l);Q|ve==h^Tx)UkP*H|KTWqAt~*Pr96B1WIJ;DOdFk^r?;ww{~Up+OxUSPqMn{MtdDr*D^iBO#jDjG6@nZq2Qet% zY4BHP4Z@f%{BNyqwy!??a6A9z8|~PBwFZg&nwyuo2bp3wXI<0-9zbp1X@oF_x7j(!2ZF zlMfsq_8R~kJoT3g{N|}=;_?=!3;5kd8yQ|Q4qrJ5*ukLC)FCp{#fekt{px4Kt6VO& zPBQS7Pa|)U;f;#xdDBe-gxSU}j!#ef&H9111|6iyp}E<1BfNO#y3-6!#`AXg%1V9n z`juB-Yv(RrX+O%qWo&YyEi7COZ1vjq0QKzJ%>@?AClVZMLROx(m20q#j$!0xUI*Ob zZ9Qs|u_hq<#iH5(P>V9+s{xlVCnaBej7&WF`?DGvv-{P+gB&JucJSoF;UfK`h?uG&{2Y-c^{fnf~j`7d7 z^sW1K2Cw*OBW^@wwYBK6GR(EeaI3m9|r^KF8= zn?l?`FfmTK=qKNTnH;w+t<5qJ&9^dvm$?97=WPJmJ>2V$ZK51?eJ36w6h1HfxynU; zLqi`DggcP$-^m3^=PGaMtIp{^_`K&t0-T{`=@YJmn?9bea|Z84MQ-28Mf>c#y>!>6 z?;&Y_NIT2bXMfp6z0DE#wt9%L&wZG*caWhxc2U6ep6@op(`seyk)Xn}d}8+4P6TSh z`<%Cg9n1+ryr7ZxKo)IDU>_t8$nRO86QFr413j-kkpHZWVI``y2)~K^+4A9i@OlrijE_TcDgFSh0Qq@OO@BJEnro|>8p zKAz-}w!pu(;t3Ze`#I7cST5p^BX?7?^RY)(mX|vHVZYKH=Lp`}D5LG<<^^RMyzNt5 zyjLFcc8VBMl(exv=2a)q2*G82V+A^)UzIZ{3ZCky>f@b2v#i1BQC**3@siZhK!h1y zdYcQJ)lJqzomjkruS?@!!orW`_bNLQN4V*VA8~kXzrdye3!Y(#3QyRuwtZk3E-w0b zPP=neE?koHBXM#5nl`*68+Hl32j3Jzys+>&VN>Ru=L5jQz5UU(9t88qwML#hCPrAWOJ7XOl zT)1+jt+=6){=f}?8`#y)Jo{q%%9BsFPu%mR_S+x7w|(iU7uu`z)k>txb4um(&}f^3 zcjmE6r$=}5_CKskThsSK7;OaYz}P<$3|eHvb`iC7o^1my&Xqj2SsqHZ ztG!8QXCQ=*FYTVi&u;{ebBzIKY?zBYhOe#fv{%nxZqGdVLVM)l$JH|HmCn{j8O~1@|ks2huGiM+xE5Kti9ld8v0;M6<6q? zder1{DCUIfF6ovjXAK!%THa`%xclDr_y55^X|%{YpT{e!&v8HAi-BrejQQu`C;_U0KnS(WCtmFemp zXt>I!6GY{yd&z6X?K{&K+-u|Z#4l~?-RI69fsz+`X6W!EgSw-46`zcGM#JT|>|0cBZFPqho z12@m5?$QyTHnS%lm|l=ZW`PWs&1%~0@SXN#9s94n=mtFBLd4cZJSHZQu~GPV2#B!} ztDp5>C%SKUeL$77pRj2BHrLnJt4Ms_DU^gRx`KK#JlIkVWJ zIQ=^L-XsU`tsJE5vwlxo z`j`JBGHH0rg5W0Ov=9H_549iu(I0L9{XhN3?ZSnHHj3PsM5py9<$N((mU`P29O?>v z4NJjw8Lm%CRJc$}cU51C7rfQLYVBd@HbEbF`p86k|9g(M@BPrXw3VgBEFK>^e!7hw zJB@fbCU@MJmW)1fd>LK6r>ulK*qnNduJyBnGc#>$YMOLdH`>m!FF#p$$N4EnH&|*D zxM6a@MRatdN8{UHc&g9hqMy55&3u}3py%GY)b9Vn1MTB~_}TWrLl3vh7cRDm=|k=0 zsq5S6(>Jt}r*EWfupQ8sDN}rUqTnn`ed|KGWoUQiBsK3ogO93Hn@@YjKCz#hb>2w3 zMZIjlP-`4k3gG1T(+G|={4A*BcxyQ|HQA0FJ<<*zKGJTy{)T*m{?PnvbAQ$oUwgKF z;odK{yFdT=_R4E-G~XPyjWPyt9tOR>##qa~cXX^aS&EmR!t_m6iUL2y!;@o>vyt)Q zOK>ZGr>^1&{@LIrSJ*e>>+O#46PHa=>n^zPbU!vRyf{fY^5vh*+fY0EcWID)Pgq;x zu3!HY{v?Z-~Zz1Qu{_DRMJK1r=;ra1)bPn5dcAD|b2t3to0HB)vrsp7dz?F2t zTM$&flqR)lu}%)%0T0Kn{d{97BvN6vsxeL;Ki($h4!1$bzcA(ZLtXH83-&ZyoW0J+t@oRLE9-G1D`biIEcuFO!WYtg)(b>Yu69CVOr6yors z{9+i?4AKY6?y~pma4F4t*na%Sgur8JQ)Fz}!AsI4uazs0ZYroLSMMR4k#vS}+%DV83Y^U;N0@!Q{} z-e%#)^nu^Gc>N)ibkcFJo$!~m_s^E@t$XhB)Dis_&^&9{U{GR%N_{^D*42($A z^P%|Z_wH45v)M$Os!-Rxbn!r@x<__$@!!dIl1p=|4t=tT7}-{aT#K$bBIjnXA=I1b za>p)ZCx}`3WwF$SKgG_&esW3}_tNuKbjCFu&GXEUJRwZWq07iTt)~DuZyeogym_co zy)2LOvAY9gA7=k+(-AICp@W!`ru;nZg_pk@_q{&!^MBz7KEfdEjGZ&p;?KcUF_Qe& zXR`h^m{a*usWgDZE<{pTS7?}#Y-Tk&N8WO|(2#E`5$Pg_%-(k#)}iv2VWzX}sss;~ z#orDLU`fTXW};D2m%>M2sjL*?Hv?YpU0UxpqnuE6EBw9nb=3t1SKm1#OPm8u^VrB; z-fNv1pr=NUL@fM(;~V2Jezg{UciY&}1`O$}qglLCzTpx+xsxoyAeX6$<4jkVw5lLe z*L~kvRuF-~i1(5bB&lQv({>JUk~C3xgx*hXR8b{#UI$(O+{e|kchidp&(=TT;?$+@ z;PvI|&(5u+D;MGR9$5N&Ve8WDCq3XgJjm;0YfZSyck-I;(Pc_m#B*0!gx8bDNbdf2 z_20vPz;88o22DNX4g;Jz0NEjwyPu_p zMz4TXPIf4MFRPB?RPlLPcjX92e$0D39bYMXfyi?@Nd1MUuCI%lraUTA%PIYkAYt@v zdBBCf5XHR8K>{m^=NN(TDNBOIr1l6Qk~4FP=6`TET~D zgQ8i8Bd>&p^%z5`!`aXnM3hS)JPM9m>uqgirL8S5wEMnvZyO@5JA&7sp%daOD+>(N ze9QkF_p5Dvt=?ddi~whH%I{%~wb5ZGyP)wHd<=}*I7CMY%`(t(LkEPY3%Mg~1pS!Z zulOC1da?yiJQLqGiT5oS4P__dp*P_@j9ofn7yk2>5^a!0fBXm(=`0I4UM?cJLBMpz zfm8U|K_KOZ_qsgd?OztLL_rEdOE+_v-wC^M3~`UCcG18EV=-3$xWKG|?whBk9cI9) z!J(MCWBe3@?%Qv@BX9jXkoB$EMFuYN*VV^buVxa!VzVky2E{{TUKOZoN zM0Gk#?7&Ulq~>B5{B_>#1M|5A@+d2{=tW-Wr6FNHjW-RuS~xeo3)!`s+)V*4s`+-m zZ;slBxJW7Aw!zK7b`rf`bk`WmqGV(Y9Hb}phq%zfjV}(!9dsL1Mq1hi-bh=3TQ+V$ zuWaN8K2oeWm*#U|k|3l9YNmRW6P@(pDSSm==jCirRL>XqmlHzH)C&6ULrvgZy>lTID9 z&hv1aot|w|ljEs}$4^gt%AB_9j$a>OzzgldP!r!dp9CQA)h_%NM@g}D;gMX-fE}V7TN`5RNiEsvY*f` zq){hr@2-|I!5BhD3@d^6ev6So4=)T#k-tmoTDg&>MWkPIy)rm7@#J6VP zZ{$tB_Q+#TknwG5b)gq`mbZeymnx}~rr`O>@u_UCQ$GCMKo*CPLuJU*Pe#$P znP?>3jYeDa7wa3V(Ji)JSx~kEdI&m5i(W3JoVd9W$%QTJn9m{5etuux2|#Ue7Y?=G zGB^yhf+ua&ZSqA8B#ogh-7z29o8HfKBn$EOLnbo4bhEt-6O+>Q>hHk9ADfG|*iS1* z=(y{b1$c@G!#~A4_R#QG@Zq_aKXo;5%;0mzDM75-dM`{q1VWZv$zK~t@bV)b<+?DQ zW%Y;+H1aQb2~$^&4ExPXCo-!)p@ist-~M(Ibkt8a5I#LIomTR5IA_nC%&9|D<0I|- z#kbn|^Rxrxxe8A`_2!lK*@vHMAN};b?UR4-apK>|M$qZm={5ncfmNFrn+RXIXr~(+ zv+WvX(Z3-#bY-GiGL?P|8RF6QUZ8JyT+8%FYV$2KP}QG+g6)~X0sz(Z9K5Kx)Xbxhjw@oz5z!kNS$=gggfP{uhXB(JA~PW z_Q_5N+kR;)%E)-6$C*UZy`HuF_!EX<>6^Xe7wt`KPw3zxqIyDRcj3~FaqEEcL;y${ z8Soh`)6Olgwr5^_t3CDP3+>UbJk?%(^#%H)OIhrn#on8l@wnXiHa#`l{M?3oI;Bp4 z=Hv8nqm<{VL!O8wzlcl8GqOrO-4x)4+cEUs&$^F~ zPv&!K(q?$r33%xujT8g@Mbmx~E4YU0(26#63}-q(;>3X8q)Px!j;WZyO=ME>C%yIC zV8T z6n-|$1%ue5Bv#Mq)FV~Rs=rpE{+(^ca|)BZkB-bZ$nvnUEHmQ*_$#{Fc(PlLsnep^ z-zJU{lNo4vcl54K;@&QLsflLAWSL`qSHh})_a;{|$2kt7x zz^=#m_u_Ge*H7;D^_Aw2aC_w+C_CfiF4#`Wy6Ar=cWr+8Ecw5QvUTrN{@&FW+LwRe zdG9h^=R%S`%{N&-yi#p=&9zt7JG&Ia)!Uu*X0iUlJ>z+N#u*w1O_ZG^i>tBeo{LT( zKVcI=coxefopBGkqyA&koO$Z}?2)R%; z{EPtOH!)Ku<>+1nbc1VsZM!}9!fWjd_dVK%uvdpB$J&4LfBD5WN?#LO3*0O#3lEVq z%Mo|O)O5-zb=n!mc3qFIeBt>Q+Asg||Ab9i1N<2`x1tX}_#NNTe&ok~wEe&S%l~I= z2=hiaNhk2`UIKlaOB1KOJW_Tl7}_RN$~PRJB*9Xn*|74Q+68o~~gq^CH$z+r(a`=6tzSfFPB{AM{Y>DR(#N zr)S#G^c>~!j6RKHOf7zHj4GvQ8={M`qlVm!H!%gy*yM~6Hn^l$PG6!;-+KM6_J@CX zPy6^Ee4;)2*b{A$F|Tj*PfQ(ZGxJ!8G_-T1l>BSm^cP*lS$^>^b~S$?Cuw5eLrcd3+A8*?4r~0jf9OF&`pLFB7TcGQ zv@aQxgOg=1EiAWJUw*UQ`^7J{-}vp1wzZATcKfZjwHvO#G4ncGnHwBp{8OI}otv5u z|82mldE=JmU0;K)^c&+76ZBhd;_K&uz$5+VCe=kgLI-6_p0Hj$GkKRcocpk^BVHRJ z>p5JqIb}+ZEOgr5R6d_tC7FD-y+glCij6JD5TotQOAGDUbC=tM_8t9{a8|J?LNqdG z{>lE&IUg$_T>064J2-v4Gi$knc-vOqyDKoN?R7P`Es`Zy_S0uhoNg=2i|zGu=i4~x z7O~U6?c2V+-T8sL+VaW@Y3)6w7wKuo=tKn%9Ao1FS6fHhTb!J?TE^zR`;)&<{$Ed;b3!0sKyB#6! zAh!o=KeOo$>iInP(Or7`>za!Z2QR!JCfClstoZ$e!UpZE^AT{i@#>5A3> zsC$axC+X}5gaOaTm@L|+7+mH>f`eC;Rg3@1j)@BADpu&xj69?QT>b(tS(V2MtMygk zz-49wAF&M2mDu7egRrhVvkFTZ*rrLbO?yFys&e5{b21%X;$dBh3%QD+iGdFdu+0w0 zk9yC~dR2YN8n}U!AA@bHz6@AEd3}(gk^eF824@2BpWK6};mMv6NkPm=5@xIE5JSU1fxc4VW1kaS!$%H_P0&#}hXH-1$* z<(qF04o(KqxJZGf>&ihOlqtPkR$P}x5Q0yH<(qK7E=6nOy7c(_IPV9qw}RwGOo zC1hs|KdF=khM^VZJJ}=6jMpe08uJ@t!0r^+2nL*_vPqEr!VMgp>@dD2+@ym7T}n^| zD_^Evx-@ZW329FKO|Kznhp^qXLof8UtqOE&6WLVBi6AgXN5`o*abvS>a{h)k<2V1O zt|P~E+j8>>X$E%IQm?>A`9xloFh=g#$hJHXyw^sBZ#kaW)(ZXL=9sPtvj*hY3XmB#XiNBnMT zEl@hCpKq~VUCM4^7Zt=^8kpA!6_4ySZ@#5VEwY0QW#}8r=>TX`7f2l>nAgj)Z9Dav z>@L@3(UXV0g-KlLQ003FJy`g=ds;?H9Y8#Eiw?1DrJ!)LEr-XP>=|nl(^GAHi~$e# zx%s1Qcxr~x8yzO?;jzh&S=`zD1Alpj1s~ zOkZNHiA%UgR#R8Mog9MzdPk=N+x{gyDbAz`meeJSFdM~Z(%8jP>+A*$H(x;5f;Z(Ee6AiyvOXPKgO>v&_K}ekY37* zZ_JGkPqq^$kG0uDhr_4m&!2BEzxsMxTD_clxqv=3H`{Kw?uPc-E3db6=gzhN`PSFzGZ-*aTst-DwuSZKzZ3Jv|_GfhN5^ zRTi>=f=AU@C%goWpKeS5HA>7oO{ z#nzTy9&wgaO*|>3(_!U`FMM`T@1&^o+}tUe`$mXy>T%1`iK_h9^PooI8no7)QIDA3 z$zPAq>-#EU%KPFc8aBBDYV8zxL1R}^=@*~8y-03h$%{MLSxw037=z(S;MCv_)zQ|7 zSH`5Y3m?j&tC6RTuu()=A6Y6eD{jM<{Q!iK*!}L3?hldGf|`8yO#Ib2C%u zmr>f-jRk}4(EJqFXq%ZDZO4u}ARTFA!_aYSiJ}|;xukohMGHIu0 zK&0*hD8PB@U9aQise(l!X%$*lJ+5`(7mMiQLO=WJf|cF>4$y#Qn91^;HT2>(IJ!9D zn;zTtN%T*aB(D|z@EpI}yURSQU)4+UB=(E^3-f046ZU%9Bh}u1Th$XheB)$?cxA%$ zI_1@Rdjw$?yOASxZ$4u~zogDxT3rLK6Q0V@KwDnC+7=eBM7K=QPuYLDna(=P=ASuJ)r0k)ic4m#C^(q?68HF{E~JP*?Uy3pt*f<%LPC6h}n$q%ie=eocuTp4%W}QsFQQ52|}1pyd=)_K&EYK;9u}79{z%6 zhRG}D7q%=JdnkO-mszwhOd22TPsA-7$Z&n%kWW0&ZtBc@R-&`toFrcJlMOttdLoSd zBl`8_SKnw)J^y@r`L%QH^0`ZGaTz{Z^=?+`b-SUJ(=W(2PIX&F>!p5o z0e=KqppM&w6D8PElVivNvPboymhD&@Tii&J_Qjr%9-&p&e)@YFc{iQ@a_S7q_1XLs zUV0hV^;^AqGmmd&*Jsv>)+r~?1urO=j>|YV_}2t7Y0)v#zseq<-rVI+;;ipMHLCyU z^hmq;rW@LsGkz!cc$+&ikB#EVNa9O9xGR5_BTSF4U0H;Op}Qy9Owu08k()*}n=f2h zZqGjZa{KZlUu|D|{?&G&D@(dVy*5WXj>Ef?=B3X&eE4v?;mp}~^w^O$IZ3(b2v6J5 zKAp-b0O%L`OJ&H7x73Wk(l0;`ot>zv`gXJ;MfyqQgm`(=(0Z0aQ4D+Z^WOUvwt^w` zAT+Lrt&5*D=D*hU`FeP8MV`g8hao=V;h4kk01L-_$s5@C)k$gZ(t#Pj(pc8R$t>T1 zw~gVNf@$|6KWg)Ecg&s9rSy$3$K11l-l^g2Jw6kMo?q}BdZujEuE+!(y# z0+ev=tF+&wi!S45+c!GU{#M$-E7)DVEbQZ|9#B@K!9*LKo{3I!v$vagrPZjLjnJXW zM)HG?>Fr;pr)PoP)x`_C*~Ye|V@qv!`&8d393NAsfH(D@@YJcPp*HUcVgqiHT>>~b zQ(1BHF3rOW#d6_;@QhcdbN5}f`ycJP{Jm2N*dA6+7yp6y;BdgvM(^nQ`7`sEr*7|> zzQfnD!W7rm2iK?RT^gW=uj^rx&N>OFuZPa(Yu!8H6S&_b{obuU2XWEu!SntOdG_F3 zlkbmm?|^63^4;)yy9-;zk={+iz4HXR#1Rf|>IYe)$1{9~D}5%tR2J|fPqiR`4xsHv z$EQ7un>gh4>P2kR1&UcG`DPn(J~}4=z~_vyGwxKz$?colXtUrK8FG$>vhr@0r&nch zjWU#N@8r9(y51J9Y_!ik@MQa^|NOVx@YGbBJbbKu>Ve1U#3#`qNB?GwlKU;NzX+mHOIpJ+4V!`Lrd?bzY@d;;gEe)i|uKlp$D*X@se z*9Y5E&%Kf!1IYQ?0*<74n`m`+owN+?C12N^ znV;Ng-+IfD_IH2&2iuub)9uw)UTbso*Jtng*4CzvTGO_z{sJKve>8lvIZK$@$GTxG zD*9bC74+dk|LyuY0EoUzoUI#n0|&>;_7Cy}JpqB}DK2fT%NH+W_q%wBjwJmRp1=Iy zquJ=^DFB`ZHa<4lX6EL}R($0+h)z6u*bR%N_p48_ZJZbJo;FYW$Rph08Nb0V@p_)D zP5Y%kc6Qv1Mw@D@Stogje8QDa^~(!_?scrE7iR869OO64*wvoEs#seA%>nF2`X`_| zPvIvcM=0;g6*u)4KaY;?v=4sZ{q4s-{NZ-f*)t7Q4_?lJxCv&B)Gj6uVE?W#_V+Vn z(9bywP#A*9!jWHn`6lyi%7eJeuMjMKi_8K2tV_R^V|~^ce`uphLq9=aylXl3>(+0i z8-oZhHmfwJ-7vA!eycm9$nI==;L+#WzrFX7HbcHG(%DaYvL)o9=HLkrI8U(!uEG$W zmt)9-to-gyz9}Ajo9hHY+?0L!EDdBB$q!G?wL9;;y`4FIvi;8={q1)8(BXFV@0UCaK-#YTDIrDvaLzwp2RNPFSM z7v=Q!o?|oZTkbgB?z;K9QO3QT5lU3b8I)B zc95a8h0U9MT>6HFx7x@!`s&c3_7nf%Uugq}=Gxl1w_?Zn!i#wOLP+x9H${j_w}!!?@Q5=(1UwJjouCfa-dU986&~dVGiKI7XZSyJeZ-fZhVJl< zE~gsc>NBssO!G|HlCTHqyu9V~W1!Hwe3pdH0t|1uQ*>= z1#Wl}c#&`0qw812yVBLIa4CIgJ>pjgXX|T6;2i)MK zK8MHgyE(}?pQ}G!s1@~6`gM#dOC@0*M_=Ue7A1mWQe$ONfT{!aJN1= zA$X(s-m$+YGS_L$TiN8dvhF7u%?qh1LYSc~Kr`3j7!iENwYfc=n0qfHrWN1Nx@+?q zYdC-9Njc6ZuePO?^?dflYh}gxZTk({P~7D_%Bs0+Efwlp;8U-HZPOL+?KK*%+N^)H6Wu(pRMEeefB$ zsk@9z2~Jv?#yBe!CbDF5*=UuNGOKL;R@CNCu^Q}lFfj=5%Z*9I&4fDes_)q3AwjzA zNb8B4z#fZpRIHZztELO$Sxm{ zlN35>vxh{1_Q(IO{9KBT%B9ceKq75f<(eUrxwdo4t#nb2A%^&xuE!r|k^zsJ?bA_4 zqV${Icocqc@rnxT+MYNUOm^!?TeGA@yy3NU4$4+3d+(@l{)i=w$|2B@gB{wVi~#8* z02S^iM$CBI9hqrUb7$Mkv2ST@>I_jcC?M%UJ36642T|3*P_eVL+)g=qWs7WN*l<#b zJKoxeQFJqJzquuli1(-80T&)h8+2_UG02kzXBh5j<-X{Pqf3wPPWPUV{MSQhVd*M zP)rA9t9k5=JAsFXCyFkimh|B+y=+7oSe`0I=HE2&C0*K=0gp9z(ZqI^znzrx69DJm zdb54>H-4=R^6Z<|3zsgn)s=;|y1q>3x7Mbn$MdGKCnIe6HV-sdrUP{1d~sm``YdHP za~%`A$mO@2rER^j>038$tZ_iIo&}uEEFy@zVJ?EZfx<=Ft##$7Glsb2@oWs>s<)16 zaxQQD&~EO?%OttAQy!eev~38gw|!_w8d77Tc%%5dI%+TTN-1G?&yg4VKjVgSI)|S) zuuS>P@X%j4kvZBM1I5o(XjtV9Q|Nl?*r|5&EqA~-yX~cyUT&ZJ+`aAL$G+NL!;rst z`EnhFx3HLFu&3u{pzlcDOn=`8zO~(U%gyc0H{PT^TkyAw&(sNC6i=PF?;9;8YlhjS zM6*M0<;4eab`L^A!wQUC?qtF>m*S zpY7$Mobt0typuI9;^bRNrqL)??gEDaDLmzZpF5y!&lHRcE@=ymZw8alW5kJT=;!g{ z^1h24PLM|D;iQ3YIQ9!>>Id@;52CMuVn1hZBF}Cz`1MVC7us`L1TK9PZRXp|=FiS+ z@_BT47Vb9TO~R%oJsHG-vKP{We2N0CuSrH;H-l84WBRcXH)ptU%mo$by(NE(2~7e{ z>f~AkqHGr|NGoohWMIukCosf^8MJEn60p9GuA%+>M2WOe7ChQ|YI>3ZJUSlShy8@mo)OPo^{>*ZM4$$@zkpiisK=& z)zvrYZ2@EzUu>A*W_|P*obV4B)X!DElO3i2l0a?0Gm?~pTa9j@;Mlf=snu<7Cz90D z@~6y|L$Pd zQ+-V96M}AcSuF>9 zK;4DT8Fm3)+CrBc|CaiyGo`kFIKnI-{e$fX8T&q~H8%x7W6BgC^9#S=s1u}jh1Z06 z>0<%*_!pyb>BseOdKxD*f%+*YWfdHCuZ^yLBsBi$g1|Rt!Oo6<+m1M4_4l%E<(uM* z!n5G(Z2N3{4S#ztUa~F@pf}vn)RpC=f|FO;N*0^(1(>LNC&t{^=1DJZ(8T5d+avRH z?fUDEx0|j%)vh~r0$$i;z`xMemi#o4lx@vrKi^(ogjdgFi@bPop?&Sm3+?GQF0{v9 zdZRt~>K4sjuZxnFeSSa&q_(~B!4D4(guW+wx_>u8A~(U zywE34@3mRzuw9sRa^dtX{#-PmbQzjdiS{_M-`!AG8KkKFfg zd*<1v+lw!}*e+hY+%8_Y*p?Tsw96MR&^}9ItuKk-| z`@QySpMS93_vAC}W!k}ZloMUvaKtW+XD5bp?C#N{$J!0oU*B%K^|p51b*G{iCZN9) zDqXqW?>HNzk|Vkulx$0R!z9{Lielc_ag1cul2d)-sb!X9 zKK$rHS{t5mqL?u9sqZqW2h6lLnboE0tKPPDvdo2XZSE~~b8I4Y-VVG{3+Yl!d#Lg- z1{xth@jhoG1$6Y;wsFx+y=nS>-obQ!e>pZdX?;7#&A;lN;co0#c)2N!wnX0gd93tL zmVm#G(Fb4nxO8aM-*(QN1@y7cQU?0AlhWdxR-)epBKYtm-6>xhIr*n9v>(*0av{VG z1*41hDMa_-6UsFS6dKTbH{DNljB_@s+SJ@s^PskYDmF0QMe=I% z*N64--G%+R_qgW(lrZntT)oqqJbmE@T?df#ksF`vQ{t89yZAT&NsEK-n#Xc|V26L^ z(71>G-;BE^|GQo~?!8yMzb;-U0S?gPk8&OD+~{AlMCgNi&42!pu;i`qPk+g;>KS@~ zK5L@*Q8~p~PF*Ep@}cr)Sjj`|TI6Stv8)TCTkETBcXO!?Vgot1waeXbm0RYz0@}O= z2^Za9Aaxl`FGNK668(W(`Iv>A)Z4MvZd=3-kgktCMc@D63vK4;bi3~CjqT6><&OxQ zILo8HR!=!Y6O>*KIBP*!H4m`miMW8)W__yq%|O#(L%wr_i>)2Wb=8SBQt2o+sjFCbd&` z+g&#vZU6qC`@VMR;<>ht?LB?uSerh5J*m*!(n;E(*Zgc&Y(8RQO+;tVUW9K$pP`|N zHh_MzFU`hybhUQ3?XNvyayJ=!LZzFdbdV-v=vCTadC|{)4Yy0@FSRdy;lcKYpS-(0 z@#NFQZ?&n3Lv3dIP&<6|B>jV*!<@{9dS9f_gjerYpJ^W~AE@J{hw}>C>5HPv6Q&-p z-*FsLJYl}-m#Ys{uiKvXIenfyo7Vtb%^TGi!Z`bwv5}dKv4vT4K&GJ|)wXkZ_D7>!G|=>!oYKuPit(nG;7-9jrI& z>cUzlN3wwYUH!6~<+N98z9cXB04!4q^pV4(lWhUJ?b+wv1lT}Z7YFSp_(1Ruld}0k zw!qeul6LiJGyLYM`!{mtQfhQ^vyNP0c-voHWP6&%w1iWq>8a`FDWM7I_EX~qYl3kkFjMB^!Z1>C`IX2;MWkG)ZLVND@*U_GC zsu^#GW+vJ(#v`6gMh@sK{h_Nm3}D%q9~lyVb%t@4p(YS#+sJ-gtcn6!YWJiPtfRLUeQ zFR&3-s94M1Nn;X`2`T0-0^0eZbi&C6{IXFE^;sAeX?T@sy}LVoPH%ULm+*yWHrvsf znx}YETrGykz$w@X4>J@E$`vhqEI#x{KIbkSX@~H=!3kHWI@u7m_kgkv0)+@C4)${5Xb(;E7{ilpL*2yUVJm*C#;z6e7 zbj9Cxa1C(-4$X-~vjt2;PdwAUIL%;CTKcLlersJtfsJ6ffE<6@W7l2|G z?uqlhCt)PBBoL!w{z+gLiw&2sHVQ!jr%?NBM;u|n@8xc(RIUpP0{N%X+zGAl?9@|4 zy60f4D~9#bRouQ4|J-AM>$-UGPW`)_=Ie>(Wqks>1H5-v5gjOhFFj#~05*wxW-*6h zNtZ_qRU`bTg7NEY1L4X%T>2BEANnMj!NN&eBYU_>SoE=W&BcyDzb`~GP-2rN7%i~V7a0b=T*26*U5>`!b}-H((XCoA>*tr&w)7Et*4*P zTft2rS3>K*OOrB_N&I_+>Ry7}r6VrNxA*HM&RBnCohZ95E;zdYl6M`jMV zlXiT*%^be9O&`Cjwb`?jIgas2I?@jzqYnPvi7ZYrb~QS>(a+a zh5y3q@?=Nz`j#;D>=fwM=1L~%U0`tGU~Nsqe+Q%6#k0|N2m{IE{5?{3Y+@q%%bvgv z!Uar`PsikB7~%c*Ave^4@@nKBV|c_4Z;;m9$(y_`0BGd9$Y5PGs68Gh1~Z1@AO^qR z2zLR|!OO(tRGXfiYx9SWv?Iq(v=gUJgV(8c^3d_{51e1+L@Mv%qAa<0Ke3 zIGN>JDb|UA++$GMK~uipK$A9m4FpwUOr&w(Y;GkJ{RMsbO?*+XyF+L5@xQ{L#D&<4 zZ@%8{zxQ+P<{NKv({&adZF?7JP3T0G$0>VUvpgqF>zkSImF>o#w2e^!-(w;G2b?J* zM{3c=?p}8DfCIB&l=^L_{k^*g<8kP|b(#r8!ou6|yKNPHL)rlK+5GYqb#VhhXy~8> z7BX3?>W}?b3z5(lC;JVXiGr9N5E3dY@@42Tv^2}4v+V=_>oV%zsI+UZWC#J?@3`su+nj7nz z?UvhaYwx-B*7nwgw^DzRF4@AI?u;vqQt_~F&g4aKc5qsHK=UetJ^9~yr5@6$+Rsfd z8eYnob?{qmwvEPB+E_!2daGA7d>m9+FXO$`Ih)YKBkZzJ2HxwNYZcDGH~4UwKQ;wr z*WLUzc-Pnq-^e4@Ml}m9)zeNGRDY?girWTtllC@$wY_zdZ(^IIjUj-#7MamVb>dq6 z;Npuq)biZKqP+U{qjYgFuOX@a$%Hp}W)hvEvvA6@a{vD$?$5tGO|JVqFe>-_=3ZHQ z^$IjV09-&wBn425X_KHxTM|uDo9*L~&zymOvE;vK&K%hq&5Xy?P%<}(v?Nj{0D=Uu zH1@Tz^xj=vTkh*T&*w&DW)-?g`plW*$b7%?zOmf6apT?_HG@Ui7mI7ed+mS~tQeP*btn(uEb(2PRWRp{A)QwXC%C3qObmzTc-G$F|=I%}* zxtQk9bR+*r@3Z-<56bgM^0sb7mS483k4olW#66n-+>Pg6uhi*n+z1x^+{=yJM&5no zhfC=jP7Ke;7Cd>Fa!`0qgA5G$ulA4@1y{QupM)VYb+vtnNSW|X(kc_P#oNk9`b6J= zj4t9CK5}A91rS|eJb&tA!(#i}=c(@oo+PjD55s@RM=e%5*=ZW(l0FbV)r?VB?Cv}a zK8wCpo$S<(^vbU?E?fsz`A#ahl-#*EKiB(hoV$^aADHV`JL?8+tLuYer_le3fc?Q(YMdl z!}5`f0)BSFQ&JvoyI64CF0U=OC!f60-v8YDu&c1KF0Zt?X)Wf{wmvu27Ab7PwLqJ# zQr5!sLc6lE(w{H!jnMsYvbn#`LJI{!#&eIHn2jwk-hY%GbZwKKG>;+GoDX_53&6pM3Uf?GHcseEWk>e7618 z$3N5l%^!cE{qYyS(Z2u2{dNE@6L4@CIS+@fQjZOE%o63VLFdi&^|o<&qusvwM0@J# zr`naxjW$`HL|C6|Lkx5L9 zUdPfG(Q)V-^>k%C;{_%iEJiedPnO&ST zkNLL`ur-94F(ddWhrV%SxF-VC=~eU_#AV|KIHoP3r95H0r+B%+XO41hB5oUMsjW=r z&;yol8)%1i-XwmsTafDv6q<8U{G%>ubtZVsQRnmc$DQeo>uH<=@zFeS@04$ZKNe3t z`b=v*zg@cY3qEtDLbzlZ;g7j67XF1wD<75AJj1C8N?slz(6}AtrcC^FY&~+Mk6rf{ zaXtN+KiuQby?&#f#*3fHexMQ0U84T1>j&TtUk$4p&+p^{r28cw^~m@EI)(Q}dju5x z?k#d)Ty4OpIG-U>PQu{y>&H9W?euU5vK}B0>LAA_j&-8Y1XwWDo!YS30APXY zGt#6SpMmW-3%*qDELzEQp6I|a8tK0B_3yUlzw+I-y0PBg|06%%e(rDmO7hi91mpoH zAt~=?dY(&J)vvm^nWy4jd*#*kPyg9Ji#?@gcfMdBo8{qyd+n!x=C8NU{ORZ0w_fnA z;c`+dET$9QMIR^3l=*jb1+LRW=mPCBhXS2qd#QXZBy*keI`kpT1?^K`qM`jf0zaQuAl~T% z?US4gfc0Py4i2`RmNc43L6vMl@}&qy!rGUyRr0S2^?CfZz{ z^5-dP+PT>fG3Cj9lXiLi3O1l8r_HqQegDPwo4@t1+RHEBX*aIjXtSsA7X5@W>IRBq z+F*WuiMse%N${gz@e~A6iyEd(`!?CYD`O$*md(b>OAw+BiPN^T|McVoQUkzt_Y#iJ zHHP!e@<xzB8rukXf>gf(>oL$0i?wJX%^ z#7}70W;o>_^4Fh*b!JxqJ34}}D#O6eKirS2@_O}qcp*H+y?^3V7u4wE*YonqojpYt zdsWUn3WsdrNqK}Yb$RT}_%MfD*KLZ_d$`ypbsIa{IuRC%20z339r)l`L#R;C!KKP8 zpL$rV;~@Krv;{cybb0|tTp;pbTw=k?{-@?%q=(V!6hL*FCo=)skpe6G&~UkwJL0Mz zOJGu1-`Ji43jU>AD~aWB7o~4|Rz9!rZRj`PnzQ0!(<;B#H(@xGM*4B!6mK{_GOYMe zyVrSuYyjw+ywqE!sj)d>$W8c}wh$M?gPW=%Xo$mr-*M<}q&NJGN4*z+=_Q)_!jm%a zcD*!Kb+j*c?z+NjBW=B4=7lYRjyAhIt;7N4Nk6axDi0|ikH3>FpYE0 zZaz-`0R5u#C`&wY@`~dI%l2ig%yCP5^V^;s7-@=geD>1gC&{X=!ma1>hutUrDoh$x zoMjOB^7KFXyFd3a?r%$=x=frjXU(k32pf(dlMq9APR@DK3u<6vBADVzz|0X#g(670i$Yq*F=oN#%ec-u{fZvor}r z=p|ed36meAMs(7ZD`Mr2@C4z^py0U(FzR2<^vQ2s@X|hEU?Q!gWD1?R+`-j>S{s@5 ztj;!IZ!=Q_j(8gn{gX@On!8>m?#xd)-T=x$Ykw|!R{5!i>46*gCN?^>^#-RmHcTXo zc^zQ$;Gmfx%dE?|^Jn1~Jq%CEio+;zp?|yuUUl+H66Bxsgy|}uhSoKZa`Gt?YfjKcfa&B^%78Id;Ds)B0MZ}B<765IkHVc@i~-kX zC+*VQMq6Barmfug(bkr4k#0FgunO7ZyviD(PRA~q0U!Bvu;Zdw7MS``PlHb34QWW( zDmo`0blIUOei2^Vq4G(ZxYXV7AH4WUx;kN^G9(-~+FV|9us8Ul@(9s#P~4uP5IO|G zK*{C37j0pukm~XmLJC&?lI}A#5w6jbcA#E%1c%T;S~{6>X>O*iG04+k@;k*It-rjo zLQbp9dl?x{T-mW6EH{eb-G9>+@POyjo+f3m<0y=Awy%6Aaya*H(P#U7gU& zCJ8PFLDtNHl%>luW%OwX02`jxFm$5PV}K7mR)>yNiLuiOeHc*KH>E!l0HCZ#>W%N4 zG~Q+KqkO2#xl9+@k)QHCdp84H+3Aly`Kk7YpZjdvKJeYS8la17@wzbJL)(^n>1BDIB=G(p z{^9n%_y14^TPF;LGl?QDT#_Jfe+h#tR3K7AC$a|3;<~(>N&O^`rJJ(;(n)~Cn zM>T3tL{+Sti*s4LvtG6hFg+b2pG}FxP$vjHa1Bv#!H+T`Lr~kr02fO4gZF+0kD0t6 zucwIkDGux7n+k6B@kpsd>RXFZa3QjEkP|83|Gm9@(m_f(K^z*OBllzQC>d+}ObTP^wAcM;(idw74RJ$&%6 zE&C=VxGl_ijIwXnx=HDf0qGpNN!(p^mQw(fBlK37O19sO8}aB2i}XlU|3|$K1fvTcZKh{4Fdv`E{C20dDSd z0!{ehY=5f^9_P4BHzN6IgE}ohz2=4l(;g77?#w10@UsqC;4dPRp0Yzieh9Q45}|aJ zcon@2w13EgjrE5JhWi$jb+QT63x-)g^F>C?M;p|*DH#)H+1fp)tp<#SSNh6pP;^P) zh_h{0@uC_qTG;H-}&bXVO_Cg0320u~XQfpX_*f7@J)W~R~E^sSzD zqijeQk8>w6`Vt$8FgKQLtSz?{+WrRi*Nv-}+x#5-^WbjVg;!iqa}&uN&ACXK@40rp zz327@ecB#oGWSC;15m6bW3-2|}I zuA-NH%iPOFd{1_g2W>9Jb6yx8-?n$*u{H*l>^l*y4OeeG5N>_RAA0Aq zl?lVweyMxH1IIi?yE*O`DJL5Nxb*v)q->owK2%?+qX-vJ_)nhzU&_$(+2Q$Nzn2wK z4-)I!7EsYqW}86s$sBzfdSQ8ap>1HxU%k4SPYv9-ext3fVAC%yKri{#%}d}5ofapP zY@)EgpT$PkR#aBiWy-g8^Alnjm#Umnw)N(ZxZsXE=kdByA#lu{Kk>U>hSqtOmLpGs zx^H@o1ItXFvW*g#3rcVacTA4yG6_fK&{W)doVrCfycbLijY%#UF%bqmT+p#BQW}SIgmFw7H-Y)Kr)4@3 z=;rVmr}Z+p@8F=fJm_W``;98wI+boW)k*rj<;h|;1DD)2WWYXewl)>PMuUjbF| zbyu=DaCc!#ma*qjb}n(rcgOoX$op=aR(=?loo380e=CsA^Wex=3qt}>PcUBZ^B>VO z$gnPIk37mAA_u*cOHzaHKDNNY!Abk-H(qRi@|o|pjT`IjKmJcX+CKD=zhO?~Tzv)r zXzYf%Ae;J?zD&1|+)m>zN@5eRVSy;c1>%Pz0eT{xybrAaSWB)UQZqNoW!oK6$#!TB>JZ=Br zZ-1mcbA7GdyMGtk{Ip$t>fLQ-^9uHDZN8t9zU0Zc6xM98e3csyv8pp(bV$k_r=ZNgvw2RZ1!6zA~&{p!I`qchGq@$j| zrTR_h(bb_^EN)Ps4Kex>2mb1CZ6Wn{`j_bI+0aGW+K>3`SmOv=bZ5IxH+0Oe9Sfku z&${U$4eUnd>R(;VpT?eDURtA`)RFR(KXILhj&Htkuf6#4R{Qklzt(=|U;jqC_Qcce zCqDF(ZSyjA?EDJ-9%Ctn>waQVy!Q`xbMmNl%v=z4tN9-6F^`kyCa0J>XJem9cvzV< zqrVw_V(v*-EnNL^(P@r0P$rDAA9F5Lo_C`I{TTg`FF#0w7hk=HJ*SQ8x4=n9y=@tt z^Kj7-mRsV=v&9&1oV5D0oQW^`-9`Tb!}(1g^H*gq(hm07Pd)ua+gRUdU;f%xvz6$^ zl`HM<{_@|2kFhqzll1;cJ9Uah#Qu{945z-~5BqCqwT0Y#^^0F?&wu%gK$~vs^w+CP zGwtTpE7{=dd{^bwb`X{Th0VV z)E5`>mai>hSRT%t_Vx`2c$UTs(#wfSts(DblDiy!5dtC8u%zzrq9*s(EDza zi@*GpH4=iwf>urIl5AxbW*{dJai?zBm(Jmpg4ic|wKPFxI3I@_LS z^m8D&ZXlnLHGFnSJJs=|`pz*TmtziJMCJa{l(urE8GK~goKTmO!A3aPpw}EL0+wg= z6})-gKGZqYDBP=VT>U<2z*+gxNeknu*{oYaCbK;DC!~E|%7Pyw6Wuw7=BD__YT7^^)1BjI-q;rAmv4iMZC3N+_M<76OB@py9YFey zzmT#~NmxEx-#p?Qo3hSbguGPLKIwZOKGWikn$hGs&&$^h9Evx59*kH#qA%(dX56Toyw!yi_+zqegbhWU zdh5hRPx$)6%$;H6HyKHa@y8_`Qwht-+};+It&>xHDs7xFh)6lP?}5Gn68hlApvpMf zE^rn2;8Xaf7761*7P5nEI2A{LM_S0Q-|3P9`-L)f2eL<~c$CPo2IZ9Bj9vva#*@ci zZ9Ll+*WcBq)^1@yI++e61})R#mAcW@)8RV-u91_8AQEMlC$gd#P0v(1BnzK>{^Z^^ z6nEQ&gmDDmGU2ap`jN&5tCjo}$1n9LSVjBdk34tfUFnNgquf!nwv9jfiBCu5Ls5Ca z;71Vg#bG=XoA~Gh@ ze*OCOwz=t>`B&PFYq#6x6~b=bY`1UTZntjTYS*vbB+ZR>PDMPmfO6OkaH6T4D)m}b2&as+#M9u+tjr%;bd4%9?_wQ zzj7+gtT)e2>x#5h&gOKaP?QwRTyhH>x&`3W&Pc|+@Z%#X%K^aqBcyO|%n zb*JrM(g`@Oclyq4p{@4RuR?QZX2;9>;KQU~a3hd+yf;2Z7pbBplrTw7UL$>Pgo zaVe9se*0?(J=T2TcRXQ{sqr$^5Q|GDFy|PrFTsvnKYoSW7X3M zwQ=m0bkX9k#ogiy^(d*zsB@!&uz`n!QNDDEWEXLyiMPVC%#(ICwPc_t@; zPx*%qPHh+3PvgW-QoQE_A86nD_6zMdf9rSK_wT-$IxfKzE*ea@T-aV%a>5!o(g#=$ zIP^usBZfZs;~#3zz316>_su&QJk-K5I8&BJOa_6{NrTJ*1UM@bHE5O2;A~q26MT2u| z>ml_#4eXumtr&!P^9-8odQt*(%7!Fbtu{eOBlG~cnbv&KQ|bW1thaS=!QUg3ZR@4U zQjYtd_N0h;zi;k_rKz?}UQcUqV#I;q+S*dyDz#i?N*;4F$|Y!8uK4QGCE5%6kffY_ zo8}|U5M6<@eX5O2<;_J`Y0d$>lN$-jHciJ4&~b!q+@A?!M4tP(>pRf@eD{O-bDN4? za6jkI%!2b-)_-3<{h%P!UEG!0B?VoHIqh8U$u$R92)D1l9=dml?vPVA?df~}E z(oGUOp!KvBowBJ;Gp)K)7L7KfKK_~4O>SvFqD!|i76|N;EuDJGIQ3sWS6I;-T4KMH zZbo-WYr^%_{7yu6Fv+7+w;C_4YLQFZP}rpp1Dkdq>BB?XNohao-eFqbs)=n>Z990u zbg6S((aQSgyRziS*a-M30Y8oCmea$pd`gH(+eqH< z6DgT!pk3t4xrI6OGPbe{XtU#ucqf~yPgl3+Q$w_!_J-fi-q=`cmp4}7PbdH7>*|;P z(vN+x{rGcF=dJx4mnZEiI`gS(m)q^lwRUZ7rCnKGXg7)T{xWg5E^oH0o2%{S<{Rd#d+i+n=Z{mpR^!t;8FWXCt-5(24TV# zR^Bj%-4mWM2si;Ziyrc)ZExGsF3M=e8S**z+7$W-Uo^VyI79!kjO{lM?)g4HS0M4_{cq_q3SYCy?LB1o#-1v?WWCC)*ZW zyw9d1lAY@i^8qhutq)JYR(A|1xa7sYD4ko2&3RkWHne=3*|to2qy@*v7dHQpUyA_nnKo1moXfdusuS%`dWF5{L;&s&=&G^P5}T`;%Va<7yOYS z^@-p8*KTuz4xq97kc$PVg^atv0JXrq@T~SQu@y_pa!DKSx-&j_7{_%nE{^BHKc;<0 zXBy+v0C=Up>;BKmH{u@ivuo)5@dx6&4f?1f&WfC8`azd?J<=$5XK4pKVfZh9TbBxY z)S008@f?qlsQV-S&vlRGJM(d)d|k)88tNk6-T8QyR-AhJ$J70Qy8t(C_%;7|{`^j^ z!`B8NsYOQ*w;N0sP}TS>mL>7dRjKrrzOcd;Uw`ZeRG)i*57T zTKm<%_g}Ur-}3?Tk(TyQt$JEVN>RS_DWqVVO8K&{tQUYR_l=idZNK-s|GM41{~%!I zsJQ+79R1r%FTRrT;W+6~l=iapT zw2Ry{c>Bs?d)LiL`_F&=ue8Y=z`Xy~-S%K>r#*OUyS@D4Ywh=b@Auo6 zzWCL)xp}qSxcPLudHdJzNCHheZ)CsRVX>`%;J{e*Ov=Ur5D^J(l* zX+^rslYpz6`?WVpKNBMzouiblTs0@7>`@>1%p?CdH?Fm(p1RdmmnM{R+WznlKhb{e zH~vMNoouwtwJYu6!`-$I{C&nizR0BA;}PA#8=_dwcwvd!AyVASXxwYhTO;i}GmOp6 znX9&nw5_&*r_qja1k4>i$USXI34vX*KsIo3g3V7i+Lyif>TB)(UTwq;E`sjk(^wa& zH_w&59C4QI^I6#2x)J+Ws^~L#tqW6LHX})Vc+j!u6Hnf5S1(^~pZUTU)Ff?fb))@f zzxcP>WMw6NvZu?XDm1M9VovqrIj6Z2FMP*+o+@O!Y(Ko;KK;o*YR`Y=i=aQwjQ~vu;UmyFZX>aN03d~~5{ z-KE`Z+o)9A(O~+EdXdbRx;&w<)}wUkYrr#YB`;8<>lL*Lpe~@+C5@8n+=ZUeAEf9& z4LZlEy*&Nj{k@;fCjg4zHf&W$;9+7inh6+GxignV*`tXO_mC;bN`(x7jKityY7*X# zOx~2p##eeL{?qvqnTx9Am-xJq?W2{=RcSIoq?ALzOd65aWRc;NQXTAYskp)24Po#K z9aCRpipkCirQ((;r3ncLj{%xE!u$)JOs6ZDGMBW{lqi~#&Q(`^lbJ?^0p4-cA;xWG zFXr)~NO*>OF}{g{Wa3RWzOhts2AFZQp=qk@R5n6jD2fu_Y7JVAh4nCuHxU+bO9lf7 zw$Or|{N^z&Kc^ZC9a-^DJB5End&q=^o6k}s6VAf49g~LGfT&5R2s}@AUZJ>gAp?;Y zVI{LSlxc9BbTr5eWDJc>rz=PH&GlU<$! zW|cxf{R^Pi3ul-NBEKE0uu`F-jc_SH3;%YkXBz;x4PSs32+;yOV<>xEFZVOkOZl$P z>GXV?o@}#%QwkCPZ=j|*2YE_FQL$s8=1b|{)bqS41O@)WVUbNSaFrt+KH>7J|{pA|E#V2wN4r>K1)`@#`j-raR&kh*6}!<&g4l4 zq&%cJ{P3f(0!gWXXL4a$wq6Yav9shkk9&7JrR7IqZs zxL!|eVkpX=cJhcWsLY>lyj16qlC85C5|D{sJ76m4b%%ipotTAo&qpgUSi;~+1A!J+L9C8tE+9Dfz0aiDuX#EyrG?My3P5m{Ml*?itz1q z7fn5$+{^a0zTrREu{M`!7*}T>wB?JSEzS7PWk00eFTVDw6D_=_HtNeTnL54dtaEr& zgJi;@$GxP1B=X)M<;lD~E`^qiiHOg&c!f06?zWrXbj#7}3JOrSLX=@0*OyMFaX zyLay%JVTid{@^KTYWqgZ!s|{pidS}BgM%pNn0y*rLn`V3{EIJK@Ctyya*?_Q#k4W^ zURlVGkyZL83;u@Fj=uSo&l|uGF}ytfpK#xxKi((2+Y>(5+isR4{F;Q<=(Kd@F&koz z{idYx+$SBhH-V_;+5Ez+KtkDPvR|6x0z{z{UAvGEB2btH zwzIXJx1)EqACPa0^lprxAz(7`b0N!q#P8dVw(&^1E%I&e?6} zXKOE)w3O!g?)qFEg)Z$~EcKhy*Kb{IzBzj8@qW;3Wo@-BuPlP?Azihb9(-G`+fC3d z{gLcVfc(S(m(!buvRaX$dZ0AQt%w{s(dI2G?I!wEk_ z%ANiBaXB~d@f-vwCKH~m-m%fPfAiEeq2cpYl^u(J~9#mdW2J&wlSj z+nHop{G9Tw7AISMQ+|?7CQhm^(1-eo6gOx|k8A*eM|}Ir1!>=)jXeb!R~IMn2C1mG zGH|fH+qT|(vn|e^wwvoqc_V*qGT&})UTe$bU&D4;2JWJZ`Pg|I*f!U%Y_=;NJ-@aV zcZWeH{et|slgKxMSZ=aJ1aO&kGf8P9uj>7W${aMhbnVihXLO)j)_X!i8 z-w(IVe8YaxMMv<7l?zTrQ{s7|0t_XTLWwz1Z(-?-7PUw4D;S~lga ztW2_z*ONfh2iC#k{^z)N@n2q%zm%b}H`Gt1&*UZFaFM~JL)A*&Ek2GfpBfc>s_qqE zX{1(#npfZS!YCQ=Dn925ECTdU`PRvF(bK@n z!V)k{qa2B6$(#DeVC#f#x9zC9^;Xu>A0TUaD<0UPlQIv2vAuy~cJax^5N`=kmtLYB zrKu%4e#_#zj1&EcZ@tJfHYC+tk#W5a@Q-h<%6kdpDqRwUxvV&Ju`Y&#m-gf_V{1!} zu83bc*%nEAn}+AondBjja;omKzDGyce%Q!P@GC^-)sD^P2*(dycT+3Ib;UgKUa*L~_;Ff62} zm!S>sZT16~QKpI0H$ac{!8HlI>2BO#4C^lKlN&BG>v!13ot1+-19PFw;NyL@v40m) zI&)`ms@>lf|G0~B|2yk%`aJe@!=0tQSkB|lxRGw;{|mIAes29KUaotu0q^m8=(?Z0 zx=ItivK^eu8BQG*9+mIRQZ|(Gt-SeG;WYZ%c@vaZJHkdg!j7BqL?*`2r^ut@bah}h z0jM{SWpV?rsI6YP9a!qmq#$2G?Jo7~^rC%y%G;6Pqu#%F?~V5P&wQ?Z`CBjK*^%wA*VETc z7Tfl&Z_1Mq)PkZgcn)ZtdICA}&nm0alCbl`SvuUfwGYtt*Y499ZfeW4OR%T&t^Vt4 z^X;P_eztw^hn{G=j7v?w=nEcCJVD~w=uB;kQ^o_O-;1s_pGUoUmTpsi_EOpqE{-Rc zoIhK&#OQiVHgv2T?&)u!&z-y5jJHqQC;#w|+pqoFzslJ7=B+2%)$32T^|fpCM{^8U zX4}rbb9l~GoG?~*9s-cG6+}$?$xp&_(W%A~jxXHgD;>e56Q}z0qF3FEs<;ZnaQlp^ z7p3Jn_t4nz>Z8nSy)CEnuxZQ!9;Wd*b{zdd#zp}u4DiavHQ%nF1n_iow4V(1({3(G z)wqIQiN;x2s857y7cO$|!afPw8*klf-+1j|`=gJ4yuHc&PyXab+I!#oo~UT&8C|$u z!v>PSe8~lIfbPf+f6j-Bj@;@=J~`EZ_H`4Sgmu2azS2J3HZ`rA2gcY&8It^x%#9bG zlB)ETJQp28Sx*q0I%yMl`_(t@wpZWU!~UCUN46DhG>@I`H}>lYb9-T_O&O9Z`3)Pu zbSd?1E}ecQJWK8xjru@Djnl!ywsDaU+K%xBe;1PhaVc@5uYTD{z-7e94$%F99v=g~J68ijx8Pk#Sm z!-qa*>KAbdF+FsWOqs8cTqa4=(N^heOhY$u=-m5B=0Kf;&iu3EQ_`nRMG`c!Day7_ zn_G%<(Z|4)=Qzwls8-2u_>&qy4_(GNAd(Y*@tsF@Zru4`PpWYPfVx9H;aVRM^OubX zgh#!q9E@k|TosZ7ysF&Do$W0lJ6}lKvDw=^K9k_(>3{t9e)eN96_yJb!V3a{g)D$e zfY!4Zy(k!Ch_DRE1eRbcYq&zFNIIDDGR(z(eD1T$jz-Duxg;$_Ge9Ffp}|=?M%WAg zlz;A%7NP_f{%n8#li&1VB<{l(+{+NU0>_WfBhUEN^R5$4B3^XXy_6Ji#h2&gL4bJV zegLR)4Xd(vP{E~TtZn&a6nH7<3ha2c4B-HDK#RW&%nFA2stlf?chU;#F+5YoML2=d z^X1?41uqkbi8EXIX;Jf<9fCQ?EKt_D$LV@|6l|Q8)-x+V!>=&&df)Mgvuvtzmd~fU zl9%-Q%CAWhuTl{X&*n>Ne{i*-q8UC#g|H5Op7nlgyaMRVZiqZCR!Od1LVT?NeuuBpnk#Y5|n%@A%)Y;0uBq@wBP}k z$1c8|_${b4QacwqOqgp@S6{{&rkE#K3aVV{g|5KMZ^Za~k%0 z2fkV6kuxW49|QY$M|t|~xBG9s)xPqlf7%YH>)Og{JJ{WBZ@%#w z1C@C?x!rdE!GpHz;^xi{Jj!5gSK7E3bCAW+hkHA1>%k5Ky506*YpXrXw|ckphP4R! zCtu~Qap;h`S#g|4*#QYDYnjwAv zpf!FbHHCjB6Z*7^u*d~!+)JNi=cjP>*^b?TRTf|Ox4{!5V&1n~-Q3gK>e_O<`P7r` z-~GFP*M9%ApO56vV}&gCO(VMP-L33)_qd{LPVw7^w7oWh3*s(Fd-VJA%1Zm`k9?$E zzOvcwJ-C~+B@ZPlQpQCBWdkJ7_(qiDsCd0mN}QQOKll(l>#cq7Ya-G~QeoGOV}goJMr`z(&p z0bDw1@8m*ciWCl1#UB#OJB>BE>2u{LdELRO6YY!9(gDm8o(tq zvn=G=6AyNFcEjh}4?XVO$p+gWHgoiaa}$JkYbaZP?TOhr7pt%vUfa3M3df6@H?S4Mlp{YCO$@Gb8h%lxxlkwwY7-~`qCu}4s`7*kfGSAY)tp`LL7 zEPn6L9S+(+mZ7ThFZ`)os_#t4zuGi%X1k%g++by=64fTJ0oOsH_eb!m3%qXnN)hS| zP{~3oxTzB|$RmB;tcQn#6Xhx=ku&(y*@wfUePr?=I#e0-6b}2|dV@xN;(*MfKM%?7 zqDS@F^BxNdOg}5*W|SOtPJg`z&+i}XP-YFdYn=2N z3&g<@8XO+_>5f|b_Y?$e0plc?xLVf??ntXV&CdEJ8nSJ@BGb}G{#OsGcWPi~o2y6D ze$p6uFI%PafiP_r+r{#-Aa5A-_Zhlm;=u8P`Y~+=jn$XRW9%VmZkrSiik$=Xv?mBy z3!Uv^sl_v;1Ko6L=$O0?&MCLj4g1Lg%$2^zi2{QbW^{ zGI1hxA&Z%A0s(QD{Z4{y!4?%^}v z&R<*cO@H8mlbiGGMb+sp#Pf|lwsPO zoZ~{H_>#tQ#l?F2Y+dT=uv1>aF-)0qV?%HuK8ri(6{W`JR6f!t@?ifZzS20~!sXsc zl2gV)_GRPKWpF~^Iib9|zS>qit!sUq3meE!chOEcB7McT{M|^gkaEqF4S{pu=!QP< zQC5Qs{H5-5OyI^Lb(H!;yEk^G@(8_4j&vn|<~B^7r5kCCJGV__ZasYEWPrd$?u^vS z4_6@tzbh9#3|DeWxyZj*sYgm~E56ZAy2_$_ijy#u4{2T-zRz(Vh|~m^byT0{6J0tJ z0Y}|xaWlwYIutkfXx`00@M2Cav2MWBd1^^c zO##_@i^^_Oxu0qIR1@vxrf}c#pPP3R_$7GSH~p1k#zHv_V5Y6C&bK9eK{?)aV~)J0 zSN60{--hV75-35vGJj!Lxc{JgtQ^4xcInn?ApDW+2i(BpF+cNX-dL_T;~MVbJudYk z%)%Er}bC0Dva|4HqxRA#7%PEdNUx;&ILb)ZPKG&f>A%)M<8yTAM zXbbv<6F0!;gtR@HVV7*!uk<5+-Xl6%9j7h=LiN44z$Bh}2(#uDZ9_^y{_=U!&MQXw zqy6Wmot=m3jM?^yPkz3A;mfbJr{8^}{a63zkF}YFH8Dhnu~*eqmZ(ms66-f*c!M6| zPkmh**H7B^y?gC*pZh}l)90Tj$!SjDQ6H&mRwm1g$@bf>`j`CUeJgdR_{IL~Vb(|Y z2*>dg^Qrqui>ok8lTOu5sD3Ciac=lqUYKeds|)RkYxC{D_~=iz^@)qeTWt=TX64%L zHoJKPdaLc}E9e_7#QC*L(=KK@KH>7Mf8PR~Uf{l-FZsDlYzOaiG{5?2$`_U{<@BxX zoul^V8~595ufEa#`9J@k+6&+QUb}JQiT2*-KG3e;d;%~F?cw%r+s+AhzBu5U^ZEzr z&n1I863C&W^|Y^Zt|6z2InL?rZF`8f{ewib?bC07OBQU@@#<^*Jev<9+P>A-a3!xG zd*0}7+KT>!QF-#_PZ)&RpL&vKj($GyB(my*#nlb|+994?q#bq%n+u>B%ZsPwXTFWJ z&aZ6kY-OCZ&6vghVQqE2-MaNeyN>PSrhu<}>)Y+O{_StKZ@=*UcJ1bk%nx{?vU3ts zjPDn)X^+6Gx?gE0-&v--pdAV^rKM@?W9_GuMf$eXFa4}_E&PlVN2RS60hJ^B&!TVA zK%TrH0^QtL+mPqxX>-p7KavrH9QJ{gc2@t9=l zOK#{hhW(p4aEOvT`nF#j`6O2OnJiIs_=JxICxz)!K@S;bf<%aG@7XJFfjSZ8F}4#o0Pc3#G;1%7QsUR9U)ogaqY7AwQ6KE_!Yp3P!jX+*eJ zf{3^I&5*f)PNU4Fpo0uE*!2oe<+4w3es#G|2DkD?8y6E6N*7yJ;4*V>qlJeto;dX|ymtl3o za~DP#CJbhSYP5(xKbGlV=s|q)>KkS!Q{`LFbEWLFchzRPki+^5D_{v9_$Qd~a9Mw( zlSp}l+XMyM7<}43N;Mp5xyJKj9&s_Q&NJ{o%K)HdHn((8dfKv(h;u!tLTDQa4Ivf3ZFr@eA_%7o_K%*Wu0{%rt{Wt?&a;0 z330Ry2Yf*oU*$$g`_5FYGJu;Y`&{aZoIc$TDU?fPGDld@ay-_Jpse5>`K-5YExI~X3=hgq z-BYjf%@Ktup7t!^DSpY1Ihf}%5>Q^oL9@uJBsRJ2o13CflsbMYXNQ%tOpGH_$vbhx zr!8d^|I@Di^Am8~^>5K|+smSDNU!naq*2ONNde1A9IwFW8G;X;ns4sU&3HWQY$k-A zoV3|<+*kCPZGtZ>0az3;J;KdhdZ@GLH4069={)eF6C+5;Q4GFLKF3>ZKs@=UcCnokvGu;J{fBF9k@tCV|v|FYr|51!^xNg`T`l@<%bA% zM0vEGQ&7R9Jv44Jc?Tak3FfI5ufOzC`}e=~J8g!1S1w;}ds}<$d*A(j+uz=Y&a?2M zhQxIH&UapHpZxfrw9kG13mC&+Y+wG;H`pS0UZ{54w9&T;XmeX0_ zS*DGAThDKAIl$9-I)rVKx3);g%G211w#5!%=_N=P~3`AMlCH7e5!ROjmrEZ;<1KuAwzN zkTmr5C@KewwV4H76XWVIMphQ$l_~s8fN<%XH@Z~ce_>HSW#2NqjIxc2&JI9_jZxvc z8W@<+^o8pfth?yrg8lA}n^5Z5e%m$+hFcHX-hmr*_JNCTBfrM03k8eI?y|2p{%7ai z&3!`Md`qGF>wLqXFzf0@m%aVnwoAKO$65I{1HwyLoG94@=V>RZpdCEo+sKw{e{%wV z<&7_SI5pxPe(^q^B7hZZQ^i5^^A7p0KWVL-$M)ys5Ypt+0o2)J|7yrw&&xujd=G`Q~}vvIZ~88}?GQiTVX8;yJn` zyySh?@i*s2F)CSq}yUOZ`xybH5@?@XnLR}^& zECab0ms%JB8EC4E$e1VSuEYJ>2$#MLVz{s-rpm6mOc{0ajAbix%8Y|8-_*4(Zm#j< zfC+ljxSgFX^4A6gk3x2{-S)$+=r~oClWgMQ#ukg*CjHLN1M-SDby2|*af}}DG#20P zS0_*y@qKduJRjM$Bx!`K&gCSZxwM0_>4qj{o}}rUYM@8GdcW(UzmYX~SJzptut{tG zA#iAw_ENB*p(&+f>=)Zt+Ea%qCsKi7A31gEfZQnQZ=1;nv=<4pJKuhqzko;~IMN0u zt}Lv9N7*9cSA66Zn+iCUmTVTTPq{Uy7q)TgEu(rqi*QyscfwN_SK-2;F#fqoP^A%8 zQ0OqCzbiq~+IC3^U#O!CAL(pf;hM%9bDP(+4yv`~JXxn`Vq4~zMH1#TA#9ALm8EP{ z^c0@FWlp>2Tl?g3QG8)`4&OJH=3`gC@#a0sJ8nPq6MwmFuB^1RrPVeEtzBGPUS1|2 zdFV5i;1L&(Cv)@zl+eh%0r;Lc9y_E>t_Y4Gmmu zpPQM_;(Fddz}FU#-{(B7fHZDEu&L_Eex=+=dN&k!qLSvSi}Cbll;`Cs0a?87-^}-9 z0o8;Pq1whyhFXqzs!Qy{Z7T6pHs_Q*>YRmtd^bHfIU7-YlV2x-S z+CzBSk+qSdczbpVyBxU@2P_lliLHbkxxIIoO&z=FBV}>clNi=l+WO{dyK?19+ra*G zgU0IWN=~tvEKS6kN0`w8=1+; zU1BZYAAJmaUy{i+$~Znn z#%SZ%Ptr-fW`5F-an4zp+Cb#j2D3W8VPwJDHlYuFTG8Khwb&52;S%|GF0i~ZM(cf! zG*H%aZ&HDaClAU~^e=5|5w>04Y|<8?yi5pU=h%|bnA(;^xzWYBu-Zs=ioK5B$DR&O z_G=bHx^nbG@Vw)kJb_>6X&7}Dw*&B*X8botf29729qdLH{;ae8VJ1jz!XksWLVKjFYcV_-rlM2 zlep5=M#AnVtY3-lzA%%;hcSPp!-v?9C4O=GmScd=YtWNVb41pGgqzdsL}rY{_>=l_e8ABX=q9yuwJw2UL-KN3IOfd9BN zeAureO{Zbn;KCJpdFd|r+A@i+kTZC}MZaHh7kw{u0yx4G27uD!)-T=s!x=0Khde)B0oiHGbp>mKB-T1f9A9$|)@_*x(bP+Zkd0gxkh&J-}xe<e&Ut$Cd>T1s=&3yYg3Nd3ubQ%3^qt8cE5sVE#=aK)lF~IbP2gJ-RF`fkxX!+Kg z_uC(R;?wQde(hgoVgJ_cC)&H7eIItyO4{JQi~o$@wXwCiLj!8B{!Ci|#?85-&yi4m z>gnk8knu6PkTPer$DmdC!T$nMd7;S(d^Gx(jDhTD;@j6*&zh(8jzVFd;u$J{hn{yF zU|#wZ=O{7{A&-eC@oq$Le#(BuKGfnx*Ug}H(hGJ^Y&2|8{)FdgfIINKeT#OT^DNl_ z0k7@u!S~o$)3bgCptX&SP0}A{v-BHp+;4yQ{FmD2{^Yam>XmEl-OoMK*4EbB{f7^! z0evg7q|Ld%zZmM$X->A=6)2Oim z&oJb1Xy;~`meZQ-o3|Kuz5eD_dw6uIEz-AYCm$aGPZ~luDP3zG(pb3m+x}Uia1(sy zM!4y#T%W3aboqoa_~oS^br)??Z}GyFp)d9?{*Qg&{p}Av`N?RvE3516$=kQu-}p~{ zmi9h^whuB#FU@TFl!47^eY{V-Ezg4R9^AXzKJ_0y(Z2AN=YclWE-%crjitGE>-tsb z;CS$m_IE*5bi}FZACxH|;2~PZ)#Mk30vH<0TgY754P6FO@ssEZ?ZC_%k=N57-+Hjq zo_qIu+Wgh)?U?bhb+!F!?I#8DCl2Jz<^XNP>T^;Ze+sd)^yQ4V-Z)@I&zcAPl14A_ zMWe2u@EreoZ=AZi%a)hAGd#3!;M|Cru3 z6gP1l_!mBwU#I{xH@1oS99<7Mp>xXNFONu5avUxwbav+`Md>KIEMZJ0P)j&(@J z_=BeyiKQQoUX9(kz%>`UwoWJkq;bvz@erK`)XWp$XP%jgI~S5a+r%s7aS;}mXYmLU z_{Aq;XMMf5{x$wEUd}L<{9OiQfsDt@n|$P#o(d~&=wpDgVUebYszk*Qa$`+Oq^HsgZk7b%$E}Yey%gm1igZ$%Y{EWD7bCc(@uOhTxDF0myc z-EmJ)l7Q#%B{n~~xXUFr#aP2T%;wkUEd}I0mMm!>I zHWcLqsAZc*c-#;l!Q@ZjQ5mFB6abmlzof^74%P{HD3-t!d~i!t536>t<|!ljbdUPx zOIgB7)y&H!&OLuQnaK3CN8nc*hhe;WvP`hmUHt;H>PWt%E2bI7*C@a6?`9`{=wUvb zpj9&FAeL}Kg72Vd$dnT6doAF0#+(I@5`){I(;0t}vf@(;_r;G&cfz@kXLwT7Z+Vub zAou%(@sRvH_qvzSUG1yVh#&fPXeh&!Nm_$dr#U`Fo-u`C`CNpwmy8HJg<)#(JV!Y* zD2Yq$AcL-Xboc~?HPdG1H`>PLyW9N6vyApPFx==6G@=g?$fJEJLf-0}vY73nHnM3a znYOm>kpuCUaB`vBVKz&G4{6+hBF z{#-H$eR(Vp{?LwTCwR!kaKLDO9PIiQ|8aZud*5z<{O|vuEn&dhnZEGt7Z`lJ+3w!G z+rIL(uVeUrrG5T$Uua+a`nTIFTRUxw!HheR-`d{7Al+)OKX}kyyL-QV=cQNLH(vN& z`@+}0*1q!PZ?vy`^PBDES6*okFu1oV%kN-%)SLKa=N|R-Ej?R26SLG)x@sHQz7B9m zLcheYjZWXeN+}fUN-is*-jo02pU4T%@c1yMlssC6yd;Ldl=&=r`y5(nlov>07nmGO$_EQe3mJT+Zzb%?6W80n`pw^L-+F7S%>s5|)@c%D9k~a17CyjZ z-pM@%5PnC_NqLU}cEVh#bVA;@z~#lum#?%BfB2_>v)J}_xBEf_eC-6o;Fn&v$f+0c z;R6I;{U0N!^HBza)X%|~GA2GXIa`bUd8X};pkdy!XE5pgxGFsZaRz+8DPMzXXoSvk zV3T81tEemjl)O+PcnH;s}eB7bOSJBp9@;Hqp`hoK>~Kg8zl0 zu3R|DhMk%G-!8VnFMf@P%B^N;puEri2FYfVjHBLtWv*(8}6c zbcG9{LUJ+0g`mmeQl5uQ2W@!9{%+2;pDsP)@w3X>!-w~3hdc$VpBCmO@X=!Q)AGt{ zTVLDACJ^c8#I}a6xIaXOwjORr#o8aZiD2vDgKS!eew(KZ%3NDsX)Ei?ZGp1obKy|U zEDUHPSl6*w5Y`CV-W0ucHR$Uh=fS;QdOb&D3+OD-xc4&M<8c?u@t#Y*IrB%H&U+gT zKf!||dKw!juKQ=t!}WAO;J;J4Ki|Q+{&v6TO#h0DEy1HS8u7WIyisJ2%OFR7Nwi@G zs%!9(U8`bW(sPIB)r_@p<)DZ#{qx)45Ae`OLD-IayE@WNc#`S4Ip z2$7uul4V(yI{gE_fj>n{9*O;dE>ZzmO!-+g<=`ZyLh` z)H`}q-ABGGrs34hUT0$k@|W#fxVW$nq_B3`E2XEazNEGx$S+bWgv*gjF>Tv3w(%?Ja=)XALN}1tD0hN8{T(xycKj5Yh;4Px_d`o;` zGT%1VHlqXA)>hlf3i^9_sV$+$58Z6Ff70H1^X0a=z77x4_V9`)`M9BL1s!aep6a4} zSU++0xK`_I-E(RSWm*UOO!1$AhECRH?B~6_!?TO)ZdB0*sD=A^#_8Aqz&5>a{VGS& z-w6qM+6DJ~%b&b%B#{3D$Mo_&@xGZ~Z~fO>e{K?(Mt|7KE(qk?hCG8s7WHRnhb)GZ z!c7AU!qLV}o{o>5p*%&HdGZ#L_rM}$*m9g!K=Ity#soK~`S#Zm*YdJH{UVne2x`Ke zp$$BEz}V&ivMNpehQG(zUcY&*J@M2N?ef(tZEba>t**PcItR@9w!fP(d`=s9(ZA1Q zr|P@tuN=DR$4!8qbYlY*pE}OhVQZorJaNgHjErY!$4qpcvFkbi*`~&i3sB()S+LsQ zeDZ~TlVO#&?7;qPUef7m{AE!6$t!34uI?LpgTnHs4{oBAd-B(q*vq_@Rdv+Q8~usF z4l)TpwU`nBfvF89KBX7TFAqBrM;POqva2l1Qk;l`r_m8rew8ge?G-n=7ygz*`RW(T z=cmR}j&s~#UHwhj8o;5h>Prxej`!X;C)y$}$V84xj=C{z(FtxNZ`H$cEGSR536Jtp zEn-xCQa@7ych)yr$y?>oZ^26*ZED$WJcuKIbU%6As9Yy?70srRyBrBj-UAo=fj>RJ z1WztCk5%=J&TJqhZFtW5M}~C0-Mb8HcV^=+ZQv(4tc}MdPtTw`2uJ-OGt*q2ya6o0 z)P$wAp+4aO`EKaj$6Sbmd%HMXvYV#!XaC{1=vY*};7k|S{j)ryej|Ly0oTY9H=sT4 z<8nKmAIbEn%UF;2N9t5`eWd)j$Ki}T7yKjM0cNRbJd3*e$$8y`tfp)NOcV{;^;q+(e%N0JQ zzi{zjnz2%p1bWyL0H*1;T{!dj|Jk%L`Oz7dutml7Y~KaJ>ek4-;Q5LE@v=_(c1*_g zAaOO=MN)Zad*`t2A75&p`pg&FH($8Zo_X$e`=$Tl|3uotbJ&K;aW>LMhYD**1y{-| zT3Ij0?|#ne+LbHqfA}B&Z+Z7)y5nSyn>VkwJ9qD*H;%F~!1~Zdhl-&em(@=H%T09T zBY!6G58Yxqp3~>54@plAy~DRJ<`=w$H$%U7v+y>Y4i@<;zl`@s92YpFuqE)oBdpc|L91 z+lTEgcKe&J-f6$_JHOkWfBviOJ@0;Bd+xm-XiwgH27R{LwmrFP{~+{rGooXX8T4T- z)&kQuru}0F^Jm|Yx7OhW^{_ZvPvh-dtfzeky%bcjAL+id_f~?LgY;SC;xD#5^;4(n z(k~Mq*xZ{gZ9v^DCw+dNjVG@>1|0E~-(2k1uFuO7jL|(=&=)2=_0n;Yr(rs0H8V%~ z^1phYejwvM{^u8$V#{mGxbWtjfOd+TsORS=ZR6_IcKteb5%$!>hdb?m`=gJymtK3l zUEa8yjjxWwW@s1tp?PR&eQG@9JcfOf<2X0P@dSLwLp4X6>W5}oi_j{f-;IM$mcB`{eiFUDA+xmx=>8%aUFb}H4h$BKVL`# zp!4n0z+8Eb5E1~-IJab5xkhHUWq<9aUTtze?|O7l^E)|qUmA*s{6^WPPn)Dsig%R7 zd|r>M`d-kodX!gVN?=guY}f$}Q7_&F28kUn)Je09X*0F}FL4%m?t=s6q(`zXEr2qq zZD5;BCq+yvPU*6w2Uqc_k9F^34|;j}pZvX_{}|!7rE>sD&}8)4MsUDIYM!ltlRW{# zpM#W(Em(svk+&c^UMeJ^&F3UXP5Kv#rp+XMO7$ujV?g4!)eS3!VmN6+)Jj{G0*6?3 zJ|mpMkUPvzGD|m~#3;I%H&rAV>B2ZAuvHh~5FS@)Z6uj6OxE#~E>Z53RfB2sr&ykA z0tXB_h{DeYQ40e*x#|w1Xi)^hfr;_K-Mqpp<}X?kZ23m`khJ{rqd=#5QohesXXABI zf-2Kw7OPIz8cD}%J*J?aij@jSK7jdpfmt*Qr9k= z@|vxbhq^2?q#k9^=C**pqchW0m{)K6XlC80kH}MB6)(xgvrhCZ*+20r{KXge@_SOr zJ(`5_InkDzAMq;Um@PY?Gbw2z;ozrjjZ8tQpOYJ@gITGRWu^nl#F8|lZ0Ml^@mL?* z=?K~%TPE;NNWV{09H14BF`VVvsTfSlZF%)}Te|#QYjd0JNWM8No{zEvh8uaTw3BDb zqDPlH(dlUisQAJq^|5S~AJ8>y)83XZEv%A!V2H*+r>nT1(FLEdOkP+wjF3#ca!DWa zNY9d8v*=q!zttz5x5|073;20&SblZ-@(OUlzi3458}H*)k2$s~hDeZr(AzAg(mE_mBXq^*jM>0Fhq&|Mxsr+Lw|!cBR} ziM2%dlgwPw5-&1A8tx;vy`6KJ6y)7;ugT#=-Tuz5A4P7T`u%_3zW)3d+A_w%o!8%N z_aEF(d0+p=x7t@;f4#l6wbLFRFvu|uM74QJMwEr(nOY1Nwqc}~Hq#6?kL)k4iM(iBGFLvz@%z_K z`nmYEG?`?v?C@l_UA=w1{nl^)c6(_@YJ^?jS=x*dyLHH0S`?Y@)w^NHG6~Ou0rUzs z;NrxC#>`}CxxM?{?`}W#!#^56KJ+bf68Z+5_L#70(h?`n8wecX$#*dt)ArUM{JGeP8K9>s~M;bjeuC{^t$T|cM;d@zr_??T=%P^9r z-jhHCy8g1>Uhb^)CKJKbC5tZT4+qaF6F=XvGpF%0zR8$sTfNX$chLd@`{qLfW#F1Cr@S`p?eU5jfZ^@*?%%!%tG!N#=Hv?S;<$fz#cIEdDdt1g2^>lCJEIJdE%8_U2UxM(mwL|PGQOC%F_`zc@(3iD7tP2c=&*4#u2_btRn?ebkZ{YUE0XDuR?++m(TWX+8|aydZ(>{Enae}^)!Dq6Cscyt2fi@TX?5@9q}jpKB5WIJ zptZZ5D;)j8^GyIvHhTits9yMC_s(mu`BV4`} zUTp?&)2S1{zG)ABi6@Vs;0YJ!b2^9ldCuDkJkQU1dWE(n>G%=Tw7E2dM)ISR8-}F6 z>~NLSY$&kL0S7nfxL9d_C@%6t>}`DYhvj8Mg7@TiB2q3STP;wcd&FyQ#szugihgey zov;XO-vpa8zwPZ&=J;FKf^Xg3ioG~D-`dq{SK7PZ^R6uZFR!iSjep;io}0tA%CY~R zvapzq7wK=Yx#ppl8;U$NKwC<=R@Sl!1UoF_V=nP`u{v!7d-%5DKK*>vquc(tF}_fq zOsiAoye&zzZJ|5$l`_gKR8}=4lIr6bQfX^EsBd~531ni^qyTtWrJ>$5O$5B;)`Ej? z#7Su}+|1?^Deuj%{CKBMQ^vK%txM$>pDd;%x%uGa_?BsW=>-=DB5yRPx6a_BprO7x&$?Rc7`&g&X@^C-Om0RF>~BdcEmAjO8m^2gPu9=R(j3G#%ZXg3OrPMKR@s)B zj7L7?En#H*0DRH%GhOyyr^e4u;9u-AbdU8AZ^tl_L^+F7h*Kx@9FO*uZbp)4mAM*s zTPI|dYhnJREiYcmiA@Xh_B+9q1&bmn&c|66AJl;K^NM{V{?f3;tEcYPjsz|&UYW|AomOH6My>o_SLW5YajUW_qG4*SN|Jeii?XYRTu49%Zj~~ zHns023E@Kr9-r(-XR4QWb|18r<+b*I|L6a-J=}651KAi$*Qa>q+AD9~No&swoBZlQ z^pQg+pqk> zN7~ZDRE{uzfSr8(x%aoZC!cMn>VDyaj|;f~MXysQKT%5ionWUp&Nwx0ZqhEz(te)Y zsqL4OO6RE0bi22GjLm=CzVW56w}1A}|7Yy??e^4D&$cI@d9JN(T&M2P2)(;a*?YVD z*dy8!jz1{fIZ zCd!2!MBkwIO%>%#Ie4I&~=WxR`HV-5snsD%BOc9v;IZsh1 zkkP-v-{~8~2LSTp9Q-G~I|pv!*KWe*gGbz~tlefl=TiNwsEc;moVL#rcIm<1VSD4w z8|`afeW88s%bz9vrS?NV^jv%L=_jy@R>%kbw1+f4+}lY%9ods$&{(^8f}OUHyt(;R zdTU=xN5^X}rb#O|d|FrObEGU1roH58YoUQJT%7LEpJ{_mWwXM{!b-b)=RtevjqS`Y zYR9gvZ??^=o9*uX2l+ezV1V!7qRIw*%8)MAr-e+9y8f(mBtxFdPx}DW`8$KnYn#kW zy8+-5edt{Kp?5vk{^fUa;A` zY;Whp?%jK<{ra!{PwkagUL@gEyF5SJF0V|t8&@v3<%v9Uh`k9-fv0UByG|Zervk{a zK*lpNfHG#gP#5RHeeu@f&(uD3(us|mZKaWmp~o(Mg4_N9Wnd$}>qmaLonm`A?=OAj zLGf3Q7vDj5?Geh#NvZ_Z3&JXlbGn-|v?qP(Tpqe#n+uHN+2p3P4Bf~-<@_nBeWTwn z=q0bH!Ahrh_|gMhGhb_2HVkQkt9gd}8fWd1v85+c${mR7Mo{p1S~Miux+&EsA=#p+LYG8{x1~hc`Cit z%js0!m=a$tWLgPgejThreX8wYLSiWs1_ugZ{TUaS{FzSjWc@)fMFWj zDSBEQ!UyKJ?>!%%={v@(%+)7)(p>bG<38tZ7}p)3ceB|k`HNObFMU&P$zk}5{zJK* zq29AIj@gTE{lrE#-6%Iar!-|T$^o7vudVB}g*ri<=}Up*1b}R)vreXyhF<*n9}<9* zvaTE?jB01h(P=Bixe?hj7dYlQcP~%>_y6FdA0zO#lRp$2fhuP95c!h`clEwWutab`nnr&<&;|Qv{`D~s6uhLpf5wa&1A`i*l(}q4al8KXM zz~q@UDhwXLn}#wWj56q&BBT2$Kid55cWE7zSMHDD4SJzO+H{ zJiirc8`f>PBmpjnMV4Y<8x0)VUbxf5Ne>~Ly%e_jY)7O4o``BB-OT(V-U12&#oHzr zjT4mYN}FH4(Qe-Qu{OE(!>uiCk`_Z7g{I<8r-^V#3&C4|JIqLfIPsis0a_OnkMfqc zosS*Fq_v0|m6GQGFTr}raS)z#mbUJ0&GD%io;9hDDt4eCKIo-mI{@qA z<-`#2)uC1XaUbQ6a`tUr@wQ#6<51U`H>Kwa+WmR>MM~R)xYXPFTPNkR+Q}=^)JbgJ zBwxGR(ijlns*>V3kv_c28h2kb%Ra+cR`4G(P# zhP1Sk)EJzCCaMkd`(4a?Z@tytdg+z$)&1;RcfxG1z53c~600qcS7UVIpk;a?27_g3 zgo}$4#qNH0DDCnFLp(0zfcpjHYQCd*kAe8-zw)*A;;XMxWdr+YiQ z3@oNI7_w7$lFqlFsVb1mAhG>)Svat=_Q=tJ8oou-kPh09BVR2(8%n4{48_pXb^^BY zV4Lx5u#>%+Jj7^lQ-MvpZ`Mp-oxEC4_W50 zxqQoCUPvEJ`N0VoX@3|Xaw*NYym!&vG)|b$O%~eX%zXRMhd$VzdFmPRAGPhBhtvW7 z#pn>QI$yjT*g1d?eEE_xAbIFw4HdZ<$L0Z+U*SKA)4?~r$JBikksMEG@~7DXYnHwcu^PVUYi%vJQx5mGAX!Z@0a@t>~6A(BUE%Lg1IY1%@v4lLH=|DDLV2Cj-Q5f8PxW^|=6b zoRc31$m5Kmr3tG@B-TW8bIU~HbDEokp*534WDK(e=kHyXYYEXHX?5ZwK^;@J>Y7ac=NZKAmH z6Gk>+Bs;QHPA1ij{FZmYPq-Neyy1kyHQ=1Z4>*rK58NO3&&nBY#1HrQ^SQI=l0ETI zpef7ZQo&x&I1?5gwaCMLj}y&dyCkbJ(<9{>3n^$yEh$nc&&n}6$fNGeKI5)%qQpgK zyZJT;kL~$gaH`*rZft}rL;2o(s zlP5&z^tax5Yesz4*?=hB84xLF*irI9#s&7{BMd0EO`?gfNfK=rauOK(#DWAyI%QT_ zwO!03&23uCs8a?25B@cgRC1fmQY2D`cQzoC`qZ|tt;M6@lAmk)~3e=d;|DXM4wu zI$N&zdl5#{o{hP{<1jofjyzMJoD2isKJ(1eZS(4S^ThbAt?g_WTU}kvg1c}JeT!To zq@G>ZrF?OrtV^CQ;p72agS@1;#L*t&f$wuRq(Zo93X-<+lJupgEBldjp{4Z0g*ADy zAIt;=>D@5kTg=LXGFO{oXKKQRa(&y$NsIbMKeiWbT={w$%5^a!_L+Y?TTds%0SGnr^EpUtCF zICPz7N-q%RJc|hxa{BIX_NutWHCd6Yu~KygsaKWC`p^6 z#YcHWC_lO(M@g1N6ZN&WJG@Gec2XwKh|lIo;1G0@O_<<7Ep<(rhfTjVMQXQeGWLtKU-pl&P#h2gJv6UpJ|^^Y7u!wF^jZD{prHUTM943ZY3lt?tfT$dNRqm*YB(0x!Qk|CwJd+3Uo+p;93k(r+9H&^%+Qjo4QJUXI<4P3yVwb z_y5DcZQuIV_mO9k0%mE6F&Q?+@?^1{Z11!MXdsM?ugMp<0Ps>*+1`BZRr+9XUS4U} zpLt(v8=LKP_aL-%f&Ubpg$hL7sL7x${lVh-{et@5et|G}DT{!{k-$E{jdt>zxX2sn|J3ubtDqfi?fBllGIG=p+e%k?Ax&>sz1||> z{gi)p)n6J`zn5=Y0x&KcZNbk^;vF-t(>C~tpZKZvr=R{zP5`)d?M8d@>8IM?`lY`^ z>XUZo-UIXkwu|is|BHvVmvS9G(Eiabf+oU!{neM-Z~UuYZ+Gszp0-@a)?AxhYB#T4 zX)E;G)95WXyh^z&9){pzUvPBcL4YIesh?!1dhzQ;7+K@ATeRiX6FPA^#xBqvn_Zmb zC86t=FSiB8$Foaw?Qm~5b+HZQUB|xK>NN)n&*AcRcrffhm6t-o2XL_P!o#1SPrRFr zYR;weMOw<6)S}wWd*M-D(NcLLtoymV7eD#Qq7#SDPpmD%GRCq@;S(Sk)$g`JFbk~; zp2(DqLhiukSvbK<$31~HXIXTy4RV4nJ{GKn2S!}?b20(tcrr`giWi^ILcMDVrFAJX z^sqefNk3%XgchxIp+~}r(HCwiM!5BouK^ty^F}3a7&=r2$huTyw8}S?Z4jOz9o-sb z)3(Y0LKd9uOC0AA#>f#Jq8^Z+NUsj_PngO~ibGc+_rmk#%B)A(=Hdy?g}?V)y=JKc z&ZxStXPxQIKjL^w-r!6K~u-U>ZlX!?94;yqbof{)zeRQ_jlSM zdgk!Zc_a0^eTnS@4E~&#wUs^HUifExOI;n?N**QC&Gu8EQGCrO-JI9-K zaW@{IKb(Vg9FRG3E7IGg>Q6S{TF1;g+1A3d2%Fb*fYz7SOgo(XE{p-|f_r)TAOHQI z`PijP$G3eOxF(CdR4xwe%%I?ibTN#K6Pn_OsXda9v`Z_d zArwdOOXH}djp8n4Co#UpfG)mpAe``m2aIHLgeQU-n9^IdE35VTU9#it`K@;FH=po) z_8z8CP=!G|hItigX_EnBrOD;aOkhvB>~@LH$X%P3m9{1iWfY~VBhORHx=^n?R~Ua{i-~7i>bggVymi_@8F*}>C)TI z-S0&smGlq-b6BozjF-In74Gi*OTbl_twYhumMK{BY|)iC=>SYcL;)3k7Qifr4zG9z z{7bunZ^5dt19%7nYtBUUC3-tz+scr<=q0Y!(M4F{!mHwk59-M;k~wKa z9-EQ6I$=vv;+4^3+YPxp@v65-#Kt;?=O{A+TJ)|tr+|~mZNCpM4e}-dN zK$Vz4muIZAhKc>)gRT4EO+Pt()V}a_=tdI8XNqL_MxCs&aOrqDlkiTtyK7!qbg(`@ zJ0S!HMzcTJw?AtPiPA@xd#-!vJ42&l6M_YJ1F+M za@8)vI0pZcr{c4j=|wIVM;1f#sY|{gT}RwAqylL z&+v@jiOph!{auV{8x;g&i@3}Fu_)Edb4-kBgeS2YYRM_kIj@7mO~=NWX)&x3?XB@GZ5&Y!Gn6&U`lQC=W5D zZC`aIMJ_~lBH!TWW`#xMYSjf+jD2;br!;9Fd0Ix^FpF&NaBn~2TOsQ3YMZKoaMd|J ziL;DtJ~;}y>XTkMl9y~l^-t2%*5#Yu_R@#O_U+-FDj_hvPUpFtDC_-^`OTZMaN@*$ z9`n=73%(bgA1^=fJFF2Gf0V1v^xpb;-{XUxKcgJ@)v`z*J*+-Wob?emXee!YuD($E z5RP1zya5w^CeIhafGO(U%g?~9o~+!NO0`U6W_)TW10C+|=Yl5uVINaRbJ_>+L|<@( zBFd=|DYA8_M+UUO%~V@7|s03*VF^ zGBT9SKj=ht6Y}iO!X`F0m`GN%E=Ga zcRPVk8!CU$JPZBsaPlHUeoDqoZdup>kML~bur26M{EX5Z{iLT}`1aT|V~J=f%5x&P zCWuZ+bE!@nur(;#O`Wr|jAe-P8_ep89QW@A2XM)^^U*IZ-0FLR8N`7Go_OP;Kjk1h zE?jzKzcM$6e*kS}m$aOYu^ac>Yj<9UpHJKEXKuFlf8fX3>c&Pk{&?cf{K8CIm^f(; zoQWI#=G)?uZ~GHQeBSt{9_qPh1dL0{PM;nmpf+@}w+p%o^1SsqM2ZQeWg=e9(un3Am*DfDvA& z-x)Gjvf{lh1a$ih_$B;LJ?eYKlnLKCkC8W4xPrf=NZo}mTxANpxqMP5oaT&5@2w+c zs6>t`e0s`>aLaX*gz^I{@U3#lN15uqDdf~e91FBB%4V1xbL?C4==K9_7V;GNYJ-6C zZh6{<>Z|Bj`wno|9yW2|tSkNP#I5aMIUcE8b57bGc)$os)LR`;o7_5{xNMS%?PY%J zF$D~8wLe)90c~9DLHZ*%c+nDwFf;+V8Td}#I-;*Op*$5DBER#K)wZy_l!bx4-~_dh zJLO_-W`+9+c$dwnPEt1gEQX)M$%!GMrJC~q*_mS8HQSaav)QP*0RJm@wzD>cHez^A z-s|lsZ9*q`sic^DFJYE^6ydo{LfV>J@gI7JPe{}GBu;sfhkYA5b1GZeiHxW7q)KDQQgXwXMM}FzNXdb3)p;?WgSw`qt9pc9vAS^CcPfs`V$$q zoL*+y#qvy$L^$%NJ%rzV>qC5WNK)7y#AW{Ty!~QR1owg!dG;D`2HqV?;pgSOX?wWz z%_BN4x&~h3UdeM4cz8!0D(azw`Mif$)ywp|8k>Hf{L(&s=*c#q=(`!GW=<>kTNiXX zaIj!BdT3Jhr+Tz5`}1seM@Ei4_7@w{lcHi{W&FulHe*iEMIN0;a*gxg;FPh|QG0Oz zsQv5T{lj+Wt=;z1Kl9=C*Z<}(fhFTv#vSSb=es@GLmi?{a{OhVWPR)xWF2in$CNd$ zQ?E_8Wyb3F?{Br=`JI1@C^^=?gq<lEn+ho9YWyzEqa-Ry8_- zF#F(K86QIv6GP`L=72*sCqer;=Z!)yHw(CA4?J}%IA33#X}8u|`^CTUm)kG>%17J% zS6^(ezV?0EV6I)hb*n8r>*756%`pJ_$;JPMy>JYTB%JoT3;w6G%WY<1o!a^mllC|^ z;tY295&Z&$xN~o(9UUCB-~3;GyM6hKU&(2OH*YQtw)Px5jV)o~nUZPWkEJAXiW>w9iB2iBTYVu##l`Tb6!IW!1-x*q;bFsbp`uE#gclX=< zgT3}(>mHODcNeDW4GSCagA}q1lGW=p`263P#H(jzL`IY z{ZG9aH|`(6C-g5&4qsK^`F^u*@PSB(}>Z=*)FCpz4;LqZHowhai=t1YtA{*%k;Tr7; z8y$C&HJPAQe4e%y_XsclX;6|=uQU*eA}7+fd`~XS7?Cz~jUnxA{gRfr*!SF1fU)-F zMIVFY8}+>DjjeH{cvSzlADNAPKwlVp(KwRS_&uoMYWyDf0*7urCR@{=@~ph(~>XihIY#$h~SQ&x||8Ej)+*kq5vE z-%yH#gx;k;2FKKtd*^3M*8-?tu~Dsue&{1?+o$+E^um5|Q%PQ)0K%AuoMs$q8y|a; zTX@Pvb#xW|nsX>e=7E3w1mtn_m7h#G%!w(kZTUGG*8nXu^9bCP{ZO!M3h#pz_g33a z>7~B~x7x^ICTSzhN)Kf&i;l~2nYIO13hC6D$`<7!+gVpiR_TXR7I0Z_mzOC}J1HAJ zj20Ke#E*OVTyo`}$l~j|*V#5T7Rm_#Kl?F~-cFS1$eB!5@sU>`ri2@Y?3d7Fa!NI8pwUaq0-?_QpbM&Xv5G z?mNP~W zaY4O2-qbeDZm#aH4?S}&gyF0Bh!Ym#0Qk(3aRbKI)>eD%^*7pN*(2uYRB7)cc;2`7)S~LlT8!%L zY#&v`$64~ZpMhF0OFy#BU(rHZffKL`|Kh=gCP$h3CK2wh6Z+TStee0aY2}O8JF{sA7w!y|Td$7%sAfc9fr^voX+~ed_7GW*_ z{hClgA7O0CKOXU>5idPRko5V!6?TH{Bg|n#6nq`&d)sK}1wQ)E6DG0{ADH9`kCM&> zN8w0+=?w*iC+!{3$X`Qt3Z`)nV3OQ_c2wQ%~tHZRQ%-SSKecJR10bfuL^@ zYX?y(I>)bKsC%h5@IA6ndRflm{9>E&s5cF0%G=-fjs1h{gx}e9(}kZP*n@tSNq;ft z+mS9}&b8&GNqCvzaN4C#8Ne8p_|gnR*EgzdL)$ZNT;gkssTUoLTi?9{2755k5;7AV z&fmRzcZ19I>zi%Dp!OL2_er<9zSh=OoYW};*v(hd)bYfD678YNR zZ@ACyR0#~`aoxBLv2|U~2*2F88Gg?vrUsE-zheD0ex%h2&#+!jhhO=Ck^V~eX!&Q) zXRwBUUVeG+ly3Mx2sg&MzDX#*@TmCI#pj-V@Z*3NJ=FO%y2g6=Crr|sH{WoxJPQbY zP2YK2ot}T?!th!obO4sN3N)uCZR6@clh%QkalkW|HpRh#c0QDa*IhKWuk(%i;1F4I zl88L;MJ-O1bK>X_IXOZvctVEQ9PS~z$m`q!e5zeUs0;4daZg=Ln+-f%%A3Ano}{6S zdfYdKHjg4#zdd;PfWBd-dGxpViL3V3!r~;S1MI>79zp4VSvgkE)F(cqi=|7`qNn;* z{`R|^HQ9;XFxMv+)zbLNf_g_fq>mx}*t8TETAH-zpnaWZCvxkCu+qgoYX>{FO4-1) z4*XQ>bW+;>pc~z0xrQK({DLj*=3mlQ!NA8OS>9@PW00_1grkkp&bh3Ea!VfQ93B&L z_)D3u_`K0V{p1YUMmZ{4kryW&2OoRoje-uN=q65jVgVN>4m_*|PF~O%R+&sySHZ3F zhW-%Ga;D+8$D5?uuj9k5&!FGqOJ^G9Rr(8gfM=a_bFxbyhUFM@>{TfuJxp&OBER}p z?#k+7TS6~Cy#KJ>$1Y0$NqM!QvDz)410XD$ETE$k?Y5ORXm9|Q`CJfJPVyabfuxP7 zMTd39r_?{2K~ib!ToW_p@J*6>lHWYaVc-;vQ(ynAvsnV3^yagC@A(sEHX(>N;o4oo zgvF93Z}W3GF=Shs##0S^Q`t#nPac??uevE8Zn~LS^lg$V$K&3s-V4B;&QY(2;J4#v zl8})DLkhwb&Z9+Gykz4v|3wp&j>*%nupQis4qcW7670)TY(q#sWk@a?(K6MD}~ zIk{yUC}{G6damd-=%kC@fk;IY=%(J|A^Mgp{h0Jq{s#||K;8&Z`g3&dMb{ae+@Z00 zB{rxHk~XfqB4^N$Nof*zqn-Ni)O6u_CO9LuyyU5y4p;o~w6p3_U1e!K9=ca~Jszoa&G%-$IxySsa;-lJnv5{|FGR^W2d}Pd~5$9YV9-j{3^0 z-wKtFJf#9`(e-dC{X?ate0V1z^r zRr@A#l@eP=edid?-qNwm&hAnBo!|Wr?XCOA?Snu0f%Y@M_^V{v1wQ(OU;9Ay zi@L$S#Enh)1c=YkUmyV}?RhuERmHVasS`HAzxaRtvuwoM0sjdyzPY;8u3g*AscZ|2 zv-GJ`ZI3aU&FP!{f^a^>I+8qGX&LI$facN`x8I(pj$h*TTi2eB z4%|8L2zuYnJfQ!cP2X)lJ~zu46x_2R_eA^Owz973b?R>4?O*t+(^?)#;}?I>3nZNj zfZe1p%$ci^e5nDL(uKbwU+Pg~KPzj{|KI;}+uFvK zrJN1y)+;Lu?WtQg+wuguz=_r+i@Er*x534E9`B3Z;taBmA4P%ylN%@hS%czJ(-4mC z%9u*lf$rK4wxe@q-g~N%Cxp%}AzO@%5wMo>*cWJ@g2U6-V=tzOysNd+nw7WcA*$p!CBwR%_n4mA5(^W^nWt3gEw)`5O zu<92Di7zi|qb3a4z?B+t!T{yypCT*wsi={mTk)(;fI+aHNLTbVOgH@QYzxb?L3@jS&X^p!0(&%d=~pury$fG) zphgs$&G3G1OLi6qub^6ev)W0Q@i+fD5dip}h$4=8$qhTiQvlo`Fix7aKk@}QKox2d-M(hM6 zZyMS3#Jls-?pB4Mfux@fjSk|-%0Nu>d1au(K*_oZA;`d>7r%K?e-KybHf*oZFjT`( zG_C{hR3rn(MB#kyes?~))4g_2R5=xDLqb#pqG!S@KlLT9oW8hLUIpWP5|6N#$JA-=l=bga<5xL+g9e^SE08+U3XyY_4^Q5cL+zH#5_&y6jH~hkPz)Qa1$R$nr zuXf-@MLWv1tqkk@R8E@VSqJ?wW+VaX~yL|PDwz~dgo0?o}lWWhmHg~z5?#oXY*Yi%KgBv(xN1FI%=e7Zs z=OBpzgFerGdpHYX$F|>~3Eio(iyT`I+f14DaIUBRig$&7YK1I-#Qs~w7b*ZdgYZ&g0Ek>-Y#LtIrVPksE8?fK6> zk8yd@?vm4Q*9rcTn??d#-l}qsVwh>p*kNM=%WvT)dBw7=6S#{4cXn{HfTqkLzA0^| z2Rl3M&TFr=RgZgSP?L5i+@sw!s5AKo&3)U`Z)*8Qj^(Cyl0oRjLb#%5(W}~`bOxz3 zd_;f&Xcon&8^FVVC407m5CA4{fkq~jMQdUj@*HV=AGga_HrnfV-)i5vBc&{ex&fn( zQ7s%GMDEMU%jA##v}g?nWE+>AyChp$S#I+TDE{W({Q35Q54^v2avvWwkFWQr;!KG9 zEjd{L9E2R>&cOyxnLOvJV{mK1D*8bALtds}nqwO}U}qI{6WM0v)7ZE#p=7mbTJ;Y^dPjx)PjspiHtG-QH#a*02Ju8oK@G<`+Uc~{V=ad6{=RSfD;eSj-v<||P&%oa!7%tYZ>!r3)}0XBA{8VIohTghm?(Z# zKJn$oedRxUj{TBM{11N7jr5Pf$fJJsKTZ{x*9NQss?PZCpSf}W4&|Rcdkq|hkLzJW z{flRc!?=$I5_cm5h9B{%hwkb=6AAH2D*)vWufTg_E@2yBVdhh3C90s9H*vjf@{A^p z446+nWfafC;R=syt0J#8nNf0<2{d5kZDPgD0`xm5c``>h`WQa8YUIFIKMY$%UQ~8{ zyGL}gsVAEWM0YAW&62_z=7&D&Ge3o49{XW8dO5k|CYpy29<)W@$iSA+Hn7|k^r3G) zY?E(iYX=?U+tO}$ax;Ot$~PWFTV5_Yx?xeg>O>Q9FWK}#ZgWCf{ULnAvseo)YjRN< z1tfS&YaVE0PB zP`WS>Q0bjr;T;Lfl}?u91d6?I^tuzb;;$V?e%mEIfN4R-dSu}-`1MUjq?ZlyhN;y` z0Mbp_!V##;y{P=bvl4~Oc%P?$hi3YpS`6+9(nC74yTQ&3;giBZj|d~ zUdt-jGH38NJ{(mGZSq0vJ*fgdrp<&Vpsb6u8E?&dGJq4&!jyMyAKzGa5qf!f30*nY z?moQN-hA_|=>Fx&N?TZ}h46ecochRz@}+&EJe%)p!aLQs6n{XRlu<;mPP7?im8&`h zXKk4H_S4jhA9?bOCZS95mvt@OAXI9PAZiz>`=qGF>E}3Pl2N5xHBkh0xSt1C%gsbP z@^Am8OxwST_MGDuHxPoWN4wf51rJ)BYu6e z|K-;yn3d_s$yI9brN<}+^eaKjb7y(3I{NMhv$X|ewy zJZbzhq4ynxC=OThYg#;iqGY{{Q@1Kp9lkC$eI@lO{bc`7i&KEmOd!tH|52D_s)Hhn zg@=8%0P;*qX3}{eYL?wcv5e7&2kO>Yo~) z3DQ_`kS3PW2@{^zuX@LZ6_RzZB?2@Fk}o=jv4&3DRsAVn1V7S>hrOOQp$i^k;k@8j zwt0b!pOfRTwcXIFEhQoeJF51ytb8^D-g9Bh$wl?3-xH4B;_@i-IX~}2_@1AuQO5?p zcfo(8?z$d#q5mtVK0hm@-(Gk= zbK}0+@9g>ir}9ZapK+dWAaZ4tdQT(Al5JL` zhdX3W9j5Z2y!5%Wb5uf-8;1=`x%O?239Y@U4|b2+AAIWL?bSDr+7nN&xBv9-{W4`@ zqfRdaLK{Q7Lfc9E#D(>$Z~9*TZ65K+5DtjWNyL`m>EvWT3krYo$A66OaNgv!UB7mv zz4+Z1+k?ArV)O0;;wde0?peF}Oj~&7 z-C5jk$BZ+b1D=}a9@-s3$HhsTTHb(u*v9e~aYu*LKl**X?d;M{Q*-U!od@j;pZk3K z!;k+_JJ>#G*Kgl!S2k~EjX*nWpRu!V{cA7OX)n};OIz4+wI|tS7f1TkbA5d#s$8eO z5ejuBdio}Hp(IaQpF=gH>xzTt)U5{i5B%_9(kC|_TuMPT|1tVA%8}SU%UAf}=P`4F&l?b-~ zD_d-tG5^l)etZ4Z*W0(h_5JqZOE0x&Z$H(ZdFt70RCCki_5)99DEql=@>BXbPhx<7 z|X^TP9aIIY9aA9pPV3_z?5qq zx%VjkMaEno`RjkRefCq&x1H0y_QZ`R+Ry&%&$S=_$qxy!-S-4k#-V~VU*q&GqvK}XbDC6WC1PGKa`PM22I)KSl>|?M@hJbuk@@-a-m$8ACB?8`2rg&3T=uY~ zSEK(%T{^8RjUy)K&q>zxW!lv0R^qcOVSy2VNf$a~!w-KQ#|rN>O8c3%!ZRms;X~0bo@dC-0*cp z`?I*0r~k=6{MnBYa@(aRm3rPDJ}c-#Whh|?!iX1fci@dh!t9=KVxM#>#3PiRZ^4AH z#JQ^>op!3kudr3mvS2axKb65DD$3Npi*o58))q8K4j`X6*pb9A zgMqSZEHpBPAIf&pBg1G>1-33)_)U6-iuC|?337;*s1cW6D`<2 zrw_2Q#t|$Y(K}`XmV@!V?OlxAg|@u0A?SpiGC*COEVb`^^V{v4U;Z-10*ise0{rcp z$U6+`b`BggU>K+4FL>VNGzJYS7sfC)twRd55}}6rhc=)>O1^J08g8$mDQ5?~PbdER z>u+QdE(@jbgqr~5`7C16FS!t%Z~Q`oU_kp>PKv^LLPpiy_Ryy++dDK%aO#@R#h8gC z`MiZ4bKn>TuF_n;8;)uOi-;UhR|m=a43<_luC_0H>C4S8+|8v8d$o1pakEp1`0^mS znb<`12am@PnVV%Wx0Xd*-@be5#;x`@f8iIw-S6hU-u8F4v-?}x`F*#!8Q-|0Ao0N{ zR2QgPtNdzf?yBA?x6%wl*t}^K5UPdwTD;S^(y&sW7TxDZ|O+)0w>h1txHl7nT;5vbf}1Fn$8V z&kbb20}(xj&$lE!fk1@!w)fiZ_BQZNa*~E80=Ox~3GQ8FB@;;`^|J??SJ$Yeb@tdk z=?Q%>r0sX4XZqycuZJG#2aCQnV4z|VY+cc8^uxt(-qiEC8xbR4f4)%8@P$z_SkL7; zybEv>Z~vVKFEf@eS3l|ooIk((bKkU&mg|H_ZuyZ<93)i^AG{ms^MB^FS0bmQ{5~KU zI~`tGv>QJ{LbAjK(i`#6#1_fXOAYICV!`q_8a_$ z9Hv0xeUr*|(=OERKBQmy_IF;&yyi2{KGm+?xZYMb*4s3?KwUHk{oHUdKViIt>?{9n zWOE~d3kPmOu-=)Fvb>ImbS_@XUE!vmU-=a$TM#a1;{Ea@%ri_aBZ% zp{e2aAC<52+V^d zE1Z4+INDrJc;+oQV3qFcc9{4pFT}@wMHdP;Z|k8SJT1ijG-HB}57C67OWdq7JHbYD zgQk6niv?~X(uVi*2~NPuv+$TSM?WiPF7|uch@bdyAy3|3oWI0)3ww`idYb+uV-o98 z^zmlY)!Nn^OzE0xT%5FuOQz5-Il>RbnU1g!T{nESzQI6C`uv#yG>U)8HaU*_&4}ZA- zf0R!e=Z+FJ;zzmNhyUt!zk^bC`AUK|D@Wlmw&<~A!2r9&M(JeZ8(P;G*Jg*E* zJ5H5U$lT%f4nvM@So4rP+D{bd%zVb*DhKMAjoaEd$a3NZ?2lz?Ss};Wb}Kso{FG1H zBj}rlG{(94FT79ofZ}IqZM{8YEcdM!zS*96=E?SpfBWa@%X|sP4Q%`5bCFfs*)qt8 zezj!pGiQyTW8vxNgA4VFJVWpO-G}YQ)tl`%fBpZ_?(AJ^Yc$dB)=s;7_imj+m6v|{ z*m-xfKMq{eH>Zl!ggWK(K0GgIt5A-|s4vp5uPn~C&9zCpwldo`=1$v{$)$E}ZMwbp z)_VJqcU@^8`LSo(-~NTa)_&rLpQaTL+s^iV`q-s*Jh#+VZavdxZaqProWGc(Z=OR3 zE>M3@fb^(oeCm(Mjy_rjC*K;LZQH)#czDWq0C+ol?TcUh64%$;)yr4g&0DwI<@M`1 z@n;5jdxzNQj7j&hFjI2^=?myz4}B}%XX%!(@{8}}9iJfqb#<;HJm^LNeR%?UWPHNY zC0~G|e=v>CIk~hGKQ?*VyvJqiGWegMV&RHQUpg|zmCmj41bGS}vfIvP3_}tI&*k1o zORE0dg3TgtpN+}*DICO8>B?Qo9sQYCRCW;8J|LAirjF+4o;0DGV}}#x#!j(WCyb*O zvk_AZ|HT(yYOlZbR`~e(wN2SI<0I`S$287)%wX%<4{JKue~&So74RGLc{u}nE}N7c zIMGjQ1AD50X!yw~$8*YwncSRN_9FELH9y1UX1A;BYne}+I$;b*{g?gB6Nx7DfiqKw z*~E%jPZx>LpDivtYn(dgqOZoU&s|Zr#3PR}u|5f($yMof3);lgXP!F0{{uhV{^XON zYC9;atLs2Q=Y2A`hW}BwgvE7qEElFy3np)an242J=kMx3dX>;EixYC?-cq=U&kUfKA`{!DRbxtb1P=m zx((Ocq;#Aqiqq9$650Ei&Krm5pQoR=jm~9k(C3YUI~8%`Lf962+Lv?8Ui@a^x+=GD zD8I@ZWL7@=lelu5lMt~75-h%Yl^Rcyf{nO=w8^ z0G$a(`3Z|62(IM*mw~U}(QvbmaIywNd)GRN{DZW>2hkG=*Y@Nob}hbu z!e@H;7GYs2-jcasG|@>N#q~lR>R#kXsZc|2qed7SqS8{1G&B4>Pq~6+MGPkm^5}EM z#nf52jz4u77wD&rDzEory=Z&_Kv6tGp6cZ>=WJZ7b`Gd?TjraCXjo22lvwLZmK~fZ zOC4(Ynh5H%d_paPXFVh-ID=ZXl38m$3wF*h#c_i!xKOs6a&lU%k>ZYD^!C2W&G>~p znaAM9r-Y|~a--dKm8SxN_qls{`k(xRU-%f!cH8*G0Y#-B;K@iJ1xMHdM5eOhR>GnW zgRn$)@n0f4VSYYg?ZC)zItpOuOAHIEKN_<)df~{AWqOZaVIp?%;x`_V*#T&x3NObq zkJ3#eA&v~$4b5MNK#=8Pu#mj!CO#Ec&NMPCcsK)LBZ_yTrGbs)Jzj7)chb#tgYd(- z%iTKHVu8#`i6SUv`2dXUw2JboD}s=GDlp6PGH=21xx(?neBO^Z^Vm%a zSH)#Gm(1&*IK@dj!-&fr5L2$HB(Ast3tze#Kj;|a`ix#BB!!c76i&ZJ`{m*McBHY? z^RyM2Nl$MRU6V&A&?jAp<)@C8Q}s4Jd=^K1u7Jv=6Ys*co^{^aNmVC=queaGCQc*j)hFoTl=)=m!7 z*0*-0qqL_U=#MC1@!&r)oBYJvf%JTqZ4-}4PT#2V4GHs&$?IxQaMyrI@#1Ehf>YoB zDczNLV2-lWS>PYJVJSMR_&e#lENYfbxib>nV*DuwwgU$8_K=DG=SHH@r$>}mK z)EC)^k#K@7EIJ81pZdmRfz?o`({@VVM8)s?Q%IV9jebR``b3EEFTNaBR4qSCJ7 zDUJhXG8$~>o=zqf0-J^Y+5VZX~);&BXPtMHb zyKOV`GZ{er=fC{R?TH(=7;ta32eiZ9-cBaPb@`SW1NX{nDYWM-0}ChctusWlMX_G-&VhMo6KN_>yXbMW@2LxQ`hai$d)!eCPT+tT^yhMS1pMM? zP#``solQ*gie;PL!M&Ra291&qh=cbnfccOysPPknsj;&|Vhzz3JwWM#fhmKd-@LIAm?crXBZQrn?x|^Pky&I(+kwBs`bB!WF?ZU57!s@JsB1 zk)guR{P4lJMjW`>P&$=^`txG>0~X%dTO^6Um_}!eX^~<0QuY}9OCuzvV97g_r{INu z4HKV&8~tRu1a&;)$d~K_^ahEd9$uOFA*^(+MnKUhpLPHd`)K*X&vckB3lJSvWPvme z!g71WJ$gWCA6W=B(^^K71Iv!igZuJwo=Q7vN{BVqV`0 zQC3|Xae+@7xS_=j0KUDTUfT1GFv?t9n9Da*eG9~Xsd)G>iz@QDc8wbn4)+eDyTF!u z)@EhPtA(E9=mA~c>{RB!N87yODM-5mnw}iHFpLdqy^%fX8+}Te(1&|v3MJ14?RiE3 zQap1-I$OCf_- zY_eJ45oZC}Jei3d+ZY^y>!PnVkT{h-CoSz^-m*=!0ZB`kdf7LCYoS;<0FFR$zoO2) z-OBL{PPVr+)ef}JD4hUT;r4^F+IHtwRnW@FoVy25fc2iey-VLazC zrOK*x27ex?w>W^h_;W?J!WJqm9g*@4zaxZdLkmN2sWTV1;a7e$H(-Mr9k7))Ve**m zHp*4Fl%>%pSe9efd{bOl%7}2fpESage&isqT2&l64B;sQ5q{FvKWTdnEI;z=vsbli z>Yh3nGH7EN)|n^jW$EG&yUDuvXBp-&4fItno-m#irY<%MJRJB0Ii^(JYH^_KC&QiS z%2+I8!@1ZbZi18!2}94@Pt@_n>b5G|jmEZzV`k&6ld@?#b*6IZVuP?;l=oRZ;~2&C zeg-BKv|%Vqov0gQ+OjjHrL_mTV_@o|Y&*eNeUWe9RzDS+?U+^pmi4Oz5cnIuc4Lg2 zTuM(_24KNmJ57Bt2mh*14&nLOZlsYuC2zLlEuQRHGFfH|mjKWbBgpz$hGl8@s0>m@$2B-nRdi71(vJ_?O~&lM|9k(kedEQu?aG{b z16;_u06iAbd0+uNu-Yd*W3&@>o3MeS&cPCZ<*`k{o!+=jGp1Ysk$Hf`N0(ilKWW$L zf3GdfwD;azXg~V&M*HD+ZMF}+??(HNe&YS@hn~5Cu40_HyUqBWaXbC$!j$`4JdFijGxL)+jeS0aE#OH2TRVI7@!)*8-#-1vpKV|L z+Be(c>|(om{YG0_T#Zikg@hf(rMp|ZnJd`Y-icjp-R-lc(TP~d(bL)~CvNmm2BatNp8p6AaE7wxTG zFVD;W&YR58Zl1*8e6w$+f@qsxo`>$o?cu|F?e&*lZ7+QPyY2es<@W5e&$ZdbNjA9N zd+;#imY20VT^Ky^^c(yc`YKOu5G7yyJao<(Igb6wILZFlaljEg;OE-rY5(+_!1KHE z+7;TXzD(-LZrY%0ON;Hs<;(5n=2}~WU*~DpwT0O>!ET+tw3iJoAu)w&gG?iLX$Sa3 z$yLr=CGZ4NFlc0@?<9S?fBIJQw?{rbQBb>z|w z3*=jzYU|Mb=H@1HFb7_YSJih8`asfV^rW>*$fA1CzP%RSu~+=|eQY$tZCkMC(#}uw zcbzy*J@P`0G~zEDM#V3k%ti0nMnq?farM?S?G&5hcn{l-@fjF|9@^}e{3NmS{3MQ@ z=lp^ED+0RQrozNYN%Deg@qVQl-!7LI)t6Zq&ojTd33in212|jNgme#BWp5ebSh?ER zdPnY))HrS|LYnZDcp0T0lFR-#@+YxOPrZtcRcFKW<@flJp`JWtlyCiXj9JYv6-w%68J;wT4RR$l@k@G`fr z-Ke~ZipBH0fEbX${^l)PdR=uVd_FTNtfJ7(IN3Ck5*2x z@~+xXA(TjX`sli|y7`<-nEVQ#8?kr>?!(Us%uZFYOHNJwe-fR{QQbe23o|Gue{Ti!{LyU3n67%Dsqx!?xB=*7mtg?5jodwKf5 z`}-gL7{s`3QsW}1+(k;1(zy!3?_XZZs4RFOmOBRX}NjSHyxgNM}CQxa*{w;5(Ea08|9t*=jGt{ z;L09-f z27|;&X*%P47_dm-pW!`erPVKO99MOa!Szj^6c>Dp8~7Pkd32S>>#_Fh?dJ2yQ|a`% zdVb+Y9uusTkGPaqVES|Y=vh{pN!n0g%JdrL0pAWUr&^rl zGmZz9v2ew~PSFW{({LXpfr0GK9~b{;W|!LRJVrCuC5+;e>C4E^OndFsH&9fqEiG-f z>G?JK%_Z`y+>{?>3PnoiR`fx^0q^9<#YqIyNd^V<#EBCX9P$^1C?871!nx!zRuSkI z|2iR26Dv+GsPs^d%6^okZ?pQ1Y7BA|mvQO9LL=)}zWPB$>q4IjUphFT%1%j#Dk?Kk z;79WWK#1>i09PkFrl%HTvfFO^w=BAR0h<<*!%7GC;XZ*Ragr)HQI+;VJ^;WlLG)%jsu)JBAz z?Dp7CzZL4qq%VB`dyJW(UuX?o=QSd#U~z~KFA85b_}~cne&*UH0095=Nkl zSzB3a8|xd*H;L9)*V?cC(yz7;|Kv~Pd+!e)K7#PUgpIc_^K85{6c8}fOTNwM08ND+giX#xRmHA{X$1zH;bfw;M04uR2nh^YjfDUtDaJ zXY7lO-)ErbhL|}9_-+c3Kdk!pgRS7bIGJrLtILD|i^g?A;HiP+)n4~CIi41wvK^tYmJaP0d{HFeq0o=gPTZJ@rOe)f;7cMySjUtVY zCtmRqW)p~OJ@b$NZ>>woL5fUU84iww#ijs`3n=Ebs^&9m^|it(*y=+yxOub%aw34B z0@+`lkU4oxKJqAV-wwHV|4n$-!P8t8_W=tp9-;qg5ijEm`VsGaOT|T!BkYZ>EjJF# zw8>-zuqWBz5Jdv;H z6zf<=J-Qero+P0hCr{{Cct%42uj~n(GN7Fj+CY~a6Rmht=aLPtqM=BrbCO1Wa-r0A zFrELDj?UePVsQqtRF?OiyQI&Nf&&7&9%$ zB`Rp&RIYH57NuWh%1RgfhLb2M#5Cr$U8RV9l2;N@mgNxN>16rJg>|R73SZ(?ZPr0p z#ZK}QVOE$l_PJ1|%`dt^*G1^rwv63z`O4+Cu;{mN>ln}nTiewSa&K5|j_0yXDaU%c zX|9gMFWX!C)v=4<0V2lhEZ+?%;%Z&I#4-E=nP{rSK_-#!W#FiDc*JDrZQEYGZ2jds z%gnbTsblWr%RAoFV)A9b_gLTb)4+y;*c3(I;?JUI>Hu7yl^fsQcSDSpo1c-<{&CZw z*zF_h_aAPzS6+Lw9bh9p^X_-G+fP5)u3o>|=9d=fizdzEpgk$XxBTbleD)19-;S=& z2zbP;)sqMr-&4=jTW1~RhfFR?mWrwL7t-m(e?(BX0;DgUy5_e1!Q43MognFHGI%EJ zl`_nmoW#`?-pp7~df}O8U=Y5Qh8<`!%c2k^LIx)D~g*Xw3!$Xe22(`cu- z$%`aPX{wYlg+-Fre#nPR(rGUdKKcV~dN*PTLp?0tS#8USK2n!wLS1a28oXtkC5cz) zoie+gux^C-=fZ_)3=_tg-USEkIB^?#TAY;qSwDy78@lvA%8u!y6X75Ee|C04Uh>25 zC*c=2bGmV+%JPH`VM)+gE~WI{NcuP2pAWb!_Wwu;s>Vpk9djme$3zWCj;{Q15UUjO)j4= zrX9~0)BG9koyzI-IEUT+cT8`&7aZ-|Z{ERw%=LCTcmGG0Z<=2I8F?QoKm5?|A8!-A zGrZ58Srg?i{E#N^m+h|PC*G_*%!lI9vd8UTT#(HtTa z8^Psq6^G+k`#Jj6oD9Id&`no%hVno?C?xW4KI5%tuWQDFQ{)%33wpHlWNm1|2tsbB z+h6@_AIaGB_Vtana_w@vw6~3}>A0Ps)64?y+NHjnY5d0lXm0sQoFS|Y0uo;Y(Mnu&Gy0Po@hVwlkaUm^Wh(9fBmPPYd`zf-rs)e zL+@+vefmb;M7}~<*RHN$!(-Djwm+e+(Nl~4?f)w`QM`+UNqokNwh!ZQZNSV&k~VJE zIffZ*etC4NJ>1@H4<78aPk!=G+PA*_{kFMrxvi~VZnIM^Ix~iLlL54m?mG3bi^}Rl zX>42Mg(Jgh3tu`?f7`fW1OF_lvL`CcR7{fgPQTXu^h4NB;RzcoPCl~T%~SS_uJ|T* zdDewDw3CS#eOeGnl95guh~NT)3sRNVG9pvD;=2N16wl^WKDjTjz;r>$hrp}Q$~O8{ zefi(~#ELifX5y}=$Jfr}31f*{=t@7UY2YIEWU|nf)>cx^%P+muUcd7e{mopvdUb;_ z+G6#|&Ibd#UQVF?y?pK2v={uIvM5AZj(kxY=Z#_mkjJ))YzI@?)JCNpvY{5bI=_st zWy6RYyVsT$+U3g|Z3F&Z1;@)=YwI)Z`jw@2`^pNukIWz&OS6~S3VrPAJoSNhR}jKQ z(D73m3o~34X{if_BTw<+uf@5kwg3_4>2EzNmgHF3(Sp$EVu= z<-h%Ju}3F4p(2a@R>688*N$tgc;@iH-}xfvbPjfR)2`qB*0VJ`$EK>F4S)2G9H#e4>sbTsljCuh5+Gq`za-(f&Hq zmmSkLFf(?xP28;f?)SW>O;PuQot-v~pD`@(q=O(%9p@63H^S|Il(7!G+SyximSZV3 z3~Oxn!6_$g1V8XD2EK6Vk)6a>SzQp%!eifX@9^T0qHLoSRB|`chfZlP@FVY_U+i@C zi^-^9DB0^!Nk;qH21$xXU3$9;FSHd#c)>EP>$rEGTALPqoAf*j1RO+d;E~O!<^f*% z%>+wh6)ny@dcn=B$~2ux3rC-oZ$qRTC3iac0*b-WT^W-VT?Tp1>It-hU!n0#L;am6 zNe0TvSd_oaUE#zh`bHgyyjZ`Q_i)VNT$6Hik{8nAJ02GolBeItiE8G_)o-M!(mNuQ znU`E=F42#vyQCR$=z$tLI=%pJ@4&+wH}hE+AG3*?n)N}z=DRxUH)+(1*4F>B4xU|BW6_NLP#oOImsZe zM2Pgy3h4Jq%5#D{5krtR#{=c`MmbyH?S60?WlLplt1gwkzQf{uQUKqZ^RQCHD%ujf zG-8L4 z^%$xFvmE!NkJ6A>N!G&>Z(iZ~WIiV(>sE!5birfbQ@+Y4k-erRaHIVGG;Val<}vu|Cs4^IJtVJi zYMaR$6y7WD+A%^d-);*l7~j(i!0_m4>gU1>FrCOp)xkT;j|)GCC=vWq6x`8X!W=YZ zu~PoGvjeXakHG50DPFO*Wi;fNP|7i#ab6mDZu-&4$zs~kF5%TN3NsUwlySxrzzZ!= zrfE`mFVECRct-~oOaK{ptn}amw7|?8t3(L1CL@b~HH=lTS^UJ$gcj+ebiKMWUnZuk zHu1-9PU6|YOAT0{10{xcf(B_IP9(Ke$Tcif%;nr5$ zeYoAe{q3)}JFnkw%gYQJ=phxK6(_~Xm$!8Y2!8?7jWCcgwUbVxp5hZdn!48DgJ|-2 zL{_D@*12pmVe*jU+kv@=)$e!kT<{5hQn$Pfcx?Y;4?_LS5E$lH#~@KL{Jfw~Xl5+; zKG=qBpL=;iUXjPi6B#fK_(wL(t5T;4R!cmHnNdia1rboE1v z3rpym*|xvyLNd79rqU21Dl7TE9=?<8b+oq&T=KYm4s8qVYf`_r0q960VMj-RTi#B9 zCvvP&7ChY5=pY{*L>-(Q4PR30V}K0(;k-!RoWcmw8Lp!!pLixObc9Wz+u)mp1o4Zr$vot_;=H=oc(yL`;dR+3Y6@UT+u4la%^onag0HfdASslO_`^7VTHrmyZMk3H;@-heXyNZXSR zm#Cgr`=J(0^K^ZmD!+E9*Wa}(fBIBlRkeYcx_J>#U=>qkm4hd9A7QjHf66vgNFK}f zh{u#i*?|>DNWA2=opqxO!woipc@u^|vLJmcQTRjW$tu!*_LyMx6G6yUwT=8%vR;En zCmfxXNg~Rzjy7a$E#Ub5 zREw{pzkv(P*c1e&&sLYZgPnK^tF{9tH#mqx%B$MNuWpxnd<#pvNWJC}jNvU{25;aF zdm{Es*sb`%K0#R3pI*Qd@@9?y%pcwYt}tw8%N(`=5s42pVZ(c&b;`FaY0nR38D2Uu zfxxIDCiw^sKERu~>f=A2Q--wAhT$=94_E;J_$_ZYf!3Syw2ihge*07>_oSU~=ejt( zy1L$$e8YF%$u;a~7pk+NWxgH5y9e<7;bHZSP9E4Vq}}J|voWUL$N?cP-=q(+bd1(E zG{s4I{TJs&r?^ztm|j0+--;0c_>`Zw9Bog_qpr@=PHzAx`r8hK^Hbxt^6Ox~$g}BP zpppO8RnkYe$}xV1~gr5D{E_Q9$HHG1$eajuua0y3&_52j=LEkID>~LWCY>Vr`oMl zU8h&>)(br1AK}uEI&gWf0Ds_P>0r3@g0{1sK+~ORlfGvWTxHY4<;}5ckSPAl0=zg7 zU-`Wk&26(0r_TkhfDce=kpwk?R(TEA*%CO?SDgyBE<6<5)aNQoomMcAHEjx>)$!5O z)YoFnQ-1U(eCOPNZ7nB;E~Ix6J_GK^qnQeC+sm>_pIT<&n=uopWT6~=>_WQ>2c8fm zf68atS}=~a4L*^1;c|(OEuqd<25OTE^pY=?b?PE-8!jeUU`joVCokjY+5QQ(yzS{< zhp(>4H$B1KzRGqH|7s_TK=aV=oSx7xWU`(*Qb=tM^T=!DLY$EUWg+zsIcCuXtAXiQ zNA~ojihP=buuR5NfMvVr4`{;O{I<-vKwaWB%UEi$-P~MnTgrs5Fd{{^gLM@@owO0B z++Vmx`+IZL;oU0;`qZ7zFJDQ>H5>>|p1I2t)UlHBLl(=w#$hLe!3p7Q7REnMdE;zpZ!l|TJfbQDj(bMuS* z6WezNJ+DrO=T#CH*n3ylZL{axFFn!nq|IUbsBe%gvY}fd^XMXrjE*u6c}?feopNPO zU4l#x4tc=?Iged}?1MpJh}%YXi-+F$sUKi&S>pZkgSC;#-1v>*DyqrRBx#-p>gym(GM=GyIB zTkY1Xx9DS?zrSe5r;KyypSNFmy{)|Vxz?69z}5GcJ?bAj$N5NYC3zXbYGJ|d7x}V9RXxadX+DD*{9F>20U@$Y4KH$!UdEH zIO>r;*>)ROO2CcrLN8;)^?M8sQv%p_j$eFov)_d0!CAB`4p`x7X<)xo<>f^&xe_?( z;=)6w*Cq4R9cQ@~!ad1SS3LGP;A-FM*zoeq{sP$8^e*!IhQIB#w(bVCi?*}#tUZ4B zgZ4+?{G)dN*4_5`FMJ+Q^i{Nt8(Fmzjt`IFpRy}6Z{#Uh6bJby<0-M%YV=Pu3nyyn zo3)Oos9VO7T|hlq%XtXtAuKMn_coW>&9$Yry)xIpt*v(F#*KDod%JC|ZnkCWvQAk$A3n>cE+KhgfxfBk>dp6x$xJI@cS z26m_OP*%zLv(t8>zKAZW&E)pWM@I)#((D>unuEDAOw|^KF$r{oYN= zT=x?Uj#X`6+wKgy(dNA!<)I67h2vKj@fQ}5KWXMg8)q@s3w9MRd8l|t-YXkvjxvQM z3(U=}CXKen;^Io%+uv^wUwznC@7>K@9EoYiG$id%JZQaa!`c8)ZRB{R<``^0@72a> z=7e{d4o?~eB>Wkp^!Q5MF9ia9D-4}-oxUdH8)bx4#QS*?%dboMHG_V^4w(E>hPHP- zQ?B^t&;F0_;GkaUX~Kp)J4X<`GVFYwDO`LeddG4*Y`_Lqa1A_utppKjlq-qOUA+ZvOV>77Cn4+AhPuwiIPjP^CY-id8Niu0i_(88cTXXr`(Q7%Krjd8WL$ z?nVIZ0%7#BL?~-*hEH%Sin)#|4sIG+V*H{=WDG)9@7CNttm^E|KBaKV>5J*3Tt{;q z-Su;E6Nf52^-y<%L+TKOjNx}e7wRWUaik4==|~>$hM1_k^4jOjyfgP#{-s~~Do+p7 zIawARnh6Rq2}TUeuVS~MYLd*ji%jOZbKAGzoMcw4yY)eJLL&B=U zQ!y!m(NM^P=tMwu#^aYcxE0Z_oyx=1zTKRAPf=nvUofs1i&tZ zSuW|gN{D)3hZ7-`>wQ}`AAOjm>EZkZ&Xf7Gs$n^t%zpmN1q4HIbT1d& zyi(7EE>Cm%jRtfEJ>aA))&>A;0Z+-_;2a~N8!gtOWFIBvq`7xwUFuksIltkVbSUFN z7Yzkr^*Sb$Nd0a~xIpP@+^H|r6Y3Rp%$T&b4C!C(SU@IzVd;mK;&sZP&~Ic1C-SL# z^sA2~hm%sm6Q4{gb!2o`x{m4J_C)&UDKlwM?qk9k9`yYPLo)d^RsWFFHxAp#83xgZxqNBVTx*2NEQ z(%#cYlYq;(^NPQ!~NrN`jG_$k!(6=cEICyg!7e6$g1qV z9{Fq+Wl3j!y+l=`S9n4;OvCT&>?k_J%QDMmvCd9(U$s;8$MKQ)t4qm8895G-M7}K> z(-~YcQOspNfYV6uf!;--Kvh0h@cX3%8Mcm1H1bwYF>hXq@(Pz zPAmr7!hxPGHWbV6J1Zx?T70OFW*1ORXJ_~iRueK@vIK^K6QP(>oLkY z-Y)(4sZT&FWrmG4c$o2KxZdB#)DJMyZE{WteFrllbvkugInND1YKxP@=PgCzo&Nh=V*J4jFJU zxLI^#QC}3<-GA0D7<5^XIJmg%rZ6`i=8by7@^&@ljXc8e=ui7X<=;j044xdo;kWPK z*jOix_CPH(m#&5*g+X1;Nh5J)=eP?MeWYGiuGMcMq+k6d4sO)dZ@*%k`qY7u9& z4X@x;{Ahm>8_YgKqT!SaY3duIzLDVuF!f==r7^j*lcevUxw2?IGAY>gTZl@U*l3YG zbsA}g&g^B?8&ZBtv)(4nV351;g=u0JkW)T`R440XIu~o*Kq`$)Cml#cS<)rhfKh$a zyhjL%4%cX-K{xRee;FtC67iPhWd_4Wr(f#7=rHJ|({J+9PbHhhZTjUw%1?4_Bx+9I z6H@Z5Oa2aTl$|jC%2^Llq>m%qr1pL3YJ_iqd}7_i-!v*Y5~Lo|oHEmQF44!*u3E=t zlP7G)jjheLb?atZzp>f2Z)~SuuI+^oE;fp9@yxJW^C=&2F<*}6hv&*J_bPfAH%bgO$d5n(hq_sJ`ws~b*o0HAbYhP6k|O_J6hepg}f$#@?>532o#wpYaWz5$VJh2q{EBm-n8xc!B^f(*|7p{>ZCFmNRWW zffgB2f=h-2BKOKuX^a2MFCW)^y8j#Dy67!KZNBgY{#@oEPCF&}OGYE7=9LGO*DSga zq)vlB!(#x5-XNcT!|D?N$^zN2odOa-#cQOOcauKg@S+zeQ~Nb~P5Se%?y=0l%f6wn zJ?3{cy=Magd6dm;qBR?rcsr(*W97Th1X|U}RVENT7=eaw%>_S#Yx23O8(EUC3&Et7 zd(1!VIssXx_p~*C=jcbw#`INVJmx~a8+qyoQOBs@MLfBwmnVBIx0P(J)OLeDL5QR? z^0l%|c|6P0i^~gb3Ay!D5jOy+>*p6{+SpXjC&d=g&L`BS)Qyrx~BKf4C| z3F$uKdWL^o`*Yya$1neM$~VnR?Kg2ezYsLij~hRUhu3=02o|(`xiaE|&(X&|rel>K zlRB~bG8CA8(l94z7Mj7SwUc&m>;~~;#)F6LVE=LZ;Je>! zJCEOQdpqy8Q|#JydPv?Q3}5i4pSwKVZ5Mkx?UFwK^58ijcH7zE<90^a>E1Kq_X*l* z=Lb7&dFI3&=ra2F{nF3;X#2&V{^9oD z;`+0{^5gB#{lb^qpZobAX}`q%$G`l!_T?XbqkZ=EyY11#TWw4yl>$Y`^F(EqV^q8?a{rD9&-TA^w+kE&&o4fr0n#{Em z*WI|!`UwE~FuR2d>~=SUyK%#PTT13UW6IrUdzAM<`}N=YTKoFfzti4${ZZTAyb<{J z0iH~F#^uX`p4jNzO6*+QL;ENFgwSx%UcYUvePthP+u1fm@T2`_A7YPSp3YNodWv7m z$(3az+09p1CBX)13~1*bo}g#MxY3CzSB z;WK#kafi_v!vL^~CBQ3m^8Vr#`9m}^?BiJ~DHA+&`dDVxqtkz8BQkeS7(6^U$QWv6 z&AFvJvH9Mi{l5FoJ8f}cwLN%vx2>#hz@GH|+I@YVDE;~(w%&1_O6kTn+a}`y3UYIt zFG*;J_1-q)Cl}*9X<-pN&vBJee)6XJUF~T5R_w<<*Xzllo_e#ow$yIibaM>ts14wJ z6CLs#e#brw(A5(&Zr#{uw>Q_?z1!REE_V3s?ag-Y7XF*t?at@Jf9=iBPy%ql|B|}pZF>OAa)6FaFYMX?pc8KC_Tj|AzZ!p)+~rvu5-&fR zM<=6pM`e_8H$5m9%23Haer3RTd0Awfr{!rPKhk2zF=dT;{-B^t=rOHV(2{qnykzHI zuxi{J`_gf7cQtMvX|!dz0kbrbUg6sS##7tethG28?ma2%p6c5!wuf;*(suTO1I(VZ zlA8FWOB<3gk&f%Avw*CEWd*)o{lcug5ToGA6Y9MbZodW~!U8Vwq$9qgj?erPI_r(kpz(lYaGM?0?&j^2=7$UEd2XwCOmNt2PY%L8vxx;DjLo ztV<9Rre)MUe%f)2M`~jdwRi5+c@FSZ51pc)WH|8?Ow~!@FxxXYV8cfTg2o~= z$w{a-g<~etg+IU{HQ`y76?WFe`<#xr%4_y>-lGq}bCgH6kTSBDR_Jv~T+yhWs)cl; z&R(Xebd^5vloLMz@W1?*fBCB<$_4-l#LR}N%9WF7g|vz6uqIJD@QeWmoMQ4~iYOb?DNN?~S;}=M zpYh*e_xNeRc+0?>3d5;H#-O+y_$g2l-J}6Eh;2NL4V`pjkgKb5h35cOsaCO8-D{xE z<=y(?&m>vk16w#TSOp*~FO6i=R@oIUElN-)%5S(lRQ;s3ie(r~DLaZmc-(ZRH#}w5 zz1k7K3M~nNZAI%v|L%`#nw!pCxY$mCfVJ-;#dGjW9fYStZ5(xsk*n`tU# ze$$4-&m|PglXq8Fx9Ef1>A%#T?HyqNU**k!-RBs>vny@==42XSSrWTp@Ba3=4-|zD{SLKjM-=6WiOhtQ@ zn2LMofKrO8rPPMI8o_3^=K6anMfOPB1oA;!YUl+v4aa%M@qj&oaVKGI|^wp2g7O zffyMWZnjyP38gH2k0`RObY&EfM!qFHbm31WAG_Idh%3UHM-!iUh5=-`!!EW(H`xcJ9}Q^`wm<=OgLdSsEJlV`bhM^XF)vGi#`Ub3B1H1!+&BhlF4J;(%~}bu)U~)x1+;@z;e;67PT~3#T5jgAHajJ0|5E9cqEvo zo({Z%cvT17N;X65G?-3*@rB`vPo?lXw^JJ^kS5=s1Jh z*e+c>%;P2jD=1!07Ev|YgGkDuX{*&JSEJLk;+jQEP*Zn0dF>!r>WXjlEqoZf`v#4r z`DJiHet<8(96PWCIkHE$YByN76x7>5b?u^PCQHzN(Sg)S-QePn15)ef=Q^ap5(DR@ z#U)@L=BILfzP{dW-ngqtS_{i+iR4M z5}b8o2SgxW*?QKI3)tD^4X*wTr!-5K)WaHct5`9`T_^o`XM8TxPbVJ!PX{3VIxr@w z1MiRFlJ~l-vJ-fweNzH7s11h|Pe}gblV_1b?V8vmAwtzh`SaKOY3FM6Q_3g5^h_PO5{HZarXo-y zG^Sz+FSxy~+o^eI>0!Bl45AVyn}N`6>gbs>5I`-k-8b^P1Vk)Gxw|GJQ^{=I=dUE8d0)@!9a z-+$T;Pmj{q|7j%0&$Y0U1%|&r71JM?5Z6NXXGTfF-Ej!Jm z7ZlL$HX!39!5WzaiKp=7kIgGS!j7EQ_-sa=?sx5rEBhK~$n?V~SEhp_a|E>) z$8|+}`BFUr|Aaqn19g)B+G9?bDOaV-%o9E%-Z*i~CRPyBzA=|LMvl?B(Xq&jn-hKG zCT}?63RcnT9NilGXWqBkN~b$^QWi5lj$Wy@pV-Mb=_$^ZWx3)m9>+&+XtTb`X7x?# z0`aKDMB)U-<@}f(s*44V^?f{XV6?mldWN<&OJsrnOB;Bkf0Yc(2!?qoTL&@S%S5!h51>pAor07IF#)`qSgx zO&L3RZY3wW%woft_Y^y?F;<(sJZ`g$A$|Mb)Gm@aZ|J8yrted`-vZ(slV@3-%K?^|vE_`pG7 z+rGUG?AP19M-STCy*q7=e)sC|pq)V{hXtq5Wb4)|ZSmo&ZDxH7yBy+O+V|E8T(KQU z2fS{_Fqd*Vi8I`M@^RUcfo5AlTfgfr-)NJ1`b_33p#3U#nPaWJ-NW|mgU9Xn zzwrm{%U}MH_U3D^wZjwg(8n*)#?EQ#GVf~eSbHe)<$L$Im)6K23 z)Ga56hzfSY;v#)5=@uw^9$CIRJ7l2ed^veYdrn;!;j2Z;KRrN(j@+1X){aij+w#_G zd*#(f?SJ)e|J!Zj&fWImdmlna_!OL_Yt6H``OxuTy`ZzSL}}oEy0_adPmUuWzxLPv zTKlcv`+EEA&AGO=Hq*A(=i9yQrFQ4$YTH=Co(H$(*#*XOv;p?BHi2WzvWILqXa}6U zq~pq?6qJ9tVuylI=9D3ISX5e5XYbl5vXr(@*iIchcNM!NaL9LnU32Ho?Y4#;KEFcS zGIsaV4RV!pWRvkMFul@fao)>6;m0spg;gy1N@R;4dU`j2WK${-b)~z@Uwn(-d$;GT zU(rJOt8}`;rxOT=_25=OT4$nLzU)391Qm};OMY=Ah#>uy_ zYXE$`wkF+8qpeQ6(f|48_{_X^FMN%xiL3VMWQ~jT-h-#>emoZjzzHsd=LHPYnZ~jT z4*>Iz90AYIx%r&*fD0NRf0fh2(2eA&(*rsj6M$b5r$G6t%C2hqGec8F;-I51bEsFAXv2^nCkpUy#( zvh6VA=ce!`cPdqBJccQGs;r(p=_f8x<{5Bg>~d0a`Z)>2-NJk{uduun0C~jv3n#%< zkNUHef|oeLC+SSXmG<+`a&**Bcq4749(S*5-(Cs){R&`8;~|Uj;oTBYQ;L11@xRI& z$_F36bzOl^fvUK|PFH!03e`3#n81Yb?4w7IB^YNJX~#TQnZs{AvPg_KZOo;j_nUwo zpLul1zyc-gk(P6FYi;xPgZA)^A8DWe;Xl>pZodYE>gX>oOb<=m?bsKwGLZsvUtsjq zw}CKdE*KC=I~kQUIaU!G$xhB=I3Z!jGe28Lnffh7@sfTTuWt0I&v#Hi@z&VZ;4qJq ze$>+iEWb}^T`J50k)MPBmp;LgH)~Y_I2ZACDHmg3W$pmVN&n=ZGe7WX3d_&JKZai3 z1cXiw3TmRKz5`h&2q+)m>}DjDqGdt!Or&VYIpIPc0s=P^hPdbnoNp=HJmO(n|e<)sWe4Lc0!?8H@dK;b&&Lj#+1noGJY2H{*N7eDg=en5f01V=7oE@&IW zGpMl`@Df!aSbQ1ccHpM&JV$~Id;(g2)OaX-&1;&ls|k|Eu=0c_i6k{o4t_KQO;>bD z1`Qf>gZrEdmPDl!(|82Q$g6w{zS803^aQ?ep=Jd(B;J050iAR|2maR08}0L-`+R%) zwxb4t=UYT_i#%$VBo9N@epT6caaI&O;CpLgUSkITT0@_g z88|G!9~$0kE6Z)&qs`zQ4T!uEbfMu>5QX58edbecgd5Uy+8JNh2s5Sn8=vjVp9^`> zK{fzU)wU)jA9yH-eiBFCrvYL=T9Qiweqt^|f8ZO<8dO}c)c(R>_>1k$M{iQzReSs0@3kk7pQaB#IXp=nL^}g9XU@4QP^KEnaBuAzmdt!nUqAs17 z7kUfdRu?ZT&eQ1UV?@Q#CcPrw^@?A~U&V(nHIxT$D97p~b%t_9jBjW;z#JQueB&d; z#$Pxof0ol_OyHdt3Nzc5U(%kcAJ45rwR3DH2azzYZ!%=V18K71KzhpuAg(@Bw}md~ zY16AqW%_(yoASj`k`xW7ix@@!p);L43tm>U>*2)JVw(Cz9hKwO88o{n=pg?1K zx6=S9EH`XAg4M3}jsNwv4RAhb`}+rNcXu}nx3-0Y<4upzgZI|fmbtIDd-v}&KXcd4 zg-g2vv)UAT!Rxh2j&@OY!2-HIGO*C z<36T*;ni=s;ftXz_(^~s_$+sV9h9lheD+`ZOs5}iIu83ePUDt{Oo}Ff{4W+&Ke<2T zR&aEc1m|)46;`<67Y00p_786xpX1!(!>QwlH_hOE+g!PhtidmCL~(&zXW8lj!(_ec z_kCj?zDHi=LuJ81unWo_8SZhA)wi5cHW1`Na8nZ8gmQ4O8(f@#w4NT%=tRpBJgjZ7 zgiNTze6!&YSuB~a`Wznahwt5dqx?Jg%0wWzDZ{a0_;but19I`Nc2>sR#A7{UBc0X& z&-y5n)=9nLq>A|r7x&N$mwK4a61ZE349L_$+-$Mvxuk=x(H-){UT zi?1-qQSkb6@B)T1AKamV8=iXO6EFRGS(M``I<+|F<_h({d{gwV^w5cP*2i{s0nM}| zM&CO=tTf{3B%O9m@CB|sV7XP6%8+XMwx4Aty!5Jf;dn1san?5>hD#%!EXPOnS6~}L zTsBG=gx|}UF}WXFn>+BUUV-mDgr;85DU+k7M{v}=v4uhl`M3HMkupw-tK&76LND!G z?sF(e`*!)qH`uSB)AsguyK}o1y0>p_VmB=}zk%<#dH?vZ9m31L1#kj?slx$U^cF*k z6mC5mvlM+FUPuM1PNUws+_|iyc-tP@7=~XEY67ZcwA!7#>IBkQ4v~2Sgh=Y` z-7sa1IygB?Ile`d({qqj`^i0IaA$w7eYo?iJ$b&{_6`o(3}cQPEp9p%A;0 zz%V4`lhPmIPf_$|xO`LP>+14S0)&?BMBig%Vo7xw7ddtN{;qA#6+Ln%iq>c2o4b>xJBP3;Sd36{ z{a*SSKNV7&J00uIw*~k#pS)Oy-^srSdP@twEHK}0ZEduz&DA!$1fExnUvlb#m{&Wb z@|LJKjXLFwk~S5I{HCUFK^JHK!K~3i%0iv=qC4|#a6d7LOwXPjGIl;f2v4H(oOjFomkWs4 z64^}XW-NGM+-uXs*{O%vN z_rLr5?VWG`PJ92`|FAuN>v!9GfA|mE+u!)zw)^3?+wR+c*q*)pjkfpJx7vHZ{de1k z-~Qe9y?^w#+xy=n&)5G^`{wVF_S@fX-~NN&=lX;8`@i$s?e~6%@NfJ<`|fwY)!zEn zx7&BW^PTqe>C^UP_h~!5JdKXJ``~u_>=!=UzVziUwJ(18$J(PWe4*WX^bb+`ttG~> z2YW~D!}mXEU;EnMYJcZ9|4zGm?|!?neT(iJdmr1tH3wf-@J(^;MCTe}Z=%b`xcIU$ zRz9uq^UzzdrPbx;>-~`ZU)nf+J@OiN!tD!&3$NMAHaz$ESPUdAy=ptAa!a9Z*K2wVMz2mW7#tjt^ zd&fxgkS=jXqz;ua{W6!IS&SVE{Vm(??B_&TWeGS^MmoC*+Bd`3x7V`Cb?50$dyh7K z_4PNy+pC1t=hx~KLB|%3;cC-N*%#t@e(apbX}*(fi2YnPk-Cw@6re6m&i#Cb@sUCK z3bq8tY;psq1@4yN2JS=n^yqMhxJ&qIy}fe(UV9B*e&yb+cE^pjYuJjA!zpSP<1HW7 zg5uBt{#$S(Pi&D3{PYBj*)Cj!1NTexjP>#}iOWlC?GQTLz5k&7<-h!|wV(cr|3aH# zT>9+Ub5K=h!Ow0y1X=Zj&1a8q8%fvmgG2b-X_8&&aM=Fvw|~7|J^o(%g`fV3_9I_- zy}f$(M!U7S+_v55O8q?LBx6-zP#!KXX%gaZAE zbTnw>CFKSM;=z@4@=oR|+yG02ZErl$DZlu;c+ls_$Yg^67y3ert>{d)e%apMf{UJT zQO8j@xp5~l>Cjs~UtcvJ@1~O~8w-=WbDeUoZ8G4AU$|LVwuJ?z&LfOi{nAu~!ugIX zEt}lLTV7mHllF#HTkEdx9lm7-ao{Oi&f6x=tNN>?!(T6T5gr}XVfa^%IAA4?cBeD6 zU6bE73_R(?Rb;EUB%gvg)!w?~!AZ9`_(O8*LU*~1`=ni=XZ$4hv2ZWCUOVgTv*mNi z&{m%eIYCW$02uf=?nwEamcs>OgK67y`j#3Wc=x18bjq1+4L*Io#2o7Hys6Jqum_&y z*sccT8Dug(K_>F44qwKhFfpb@f|Hld_RZKMV=Cb@P65Ukqj)l4UXTTL$_)O}OWwq1 zpFx_eFQl?HZqbGG;@ff5iC^x}SXc0j%k|FO|NOuAvtK34!&EUeu7)wAgDWSmee2f2 zyu56}0+au07%!QMyj;PZIFZ*c<5;30^^CU@D1-JX zI94*C#gWF#!Y0Vq)yv|KBs5-CJo&l;)8%>MIu05=T=5H|r|~YXgQ>mQ{*?xy1)nOd zaIy{($uu(1H9Skg36^x`foJdwpBR}Z0wugMLQR{zq%lmF#ut@deiUKS5k`42dP(Xe zfO%^or^=5@Sw0KXj4B>(DWfM6Zji-0t`4_S^>jsL{%lF{pnL@sSMYnjejjnxcZ6I2 zs+;kBVr}9AAz|aPV+jvYqL&>?CQy>J=xz@)nMhB5JfIq$#BWrhM_Jyv0w>R;6UHb* z+^ru`4(ttea>loqlbrbJi;yaSTuBY=7*>XHk1Qcm$HL+)bx4Lu#02bmpX}2CZ%mJ@X7=%^~#12Tp1kFSQ!m< zF^-ALf^R^jopk!GlZ!}h=vcB%PtM!+#ztFP^%Mh4h$}jzL`yc)f{*Cs=>z_@y--zGad(NQfDD5omG>oLF z*DxZ)UuXL|d7&P&Z5{k&(wIc~&Y_hdz4g+QXJv@H>8h>{;FPgWFA=7#EXxJFY)FDH zlx@F(=0aZvyR?gXRl_=xNI7!Z>}-8g%65sNXB!JrzwychVa6L)2C#Lk`if^Z@uXdq zA;ah31$53*O)SahG3-)bb-0ZK&bEPBLnMtltyOHQ!Q&(f?joZu zBbHRAE4m2M1_x295nc?P5 z-&$QjhL+pf#v*OG*H$3s%^UKM-(!SE)G>CP6?HOBog+_Ew&e#`zbQ?Qm~De);bd+hKEaQ!Lv){Ocy=4RpV7PL7V-@uA;sES;eYp$$NPM z`bU_-(VLydq&I)Y+A>&&)7cX`>7m{Fv(fd23)l-T5$bGp$Vdciujmhb-3B17@}aZL z=vrbbsLD1Ept@7vPB2fN;A#xKl(`dRE}Jm5ZvsvPp+V}Re3vbwo5*v@;!auWAoZqY z$G%h_6rkXt>^N4fy42&8b9DurHTI3#nBz1UgxbKs9 z$|aR$k89$l{oRG|Q)N`xa&0dV1J+R`|JS6XiC#O8udK1|kr!+rXi5y;?V6UAqrqw2b$FpJ9wniS$ku?|a=9F{eqx{ql$^^;O zJJv<})4MnIlyVYQB%gSRw>sQU3Zx%#>{U1_ABDDaOMd>&QwxD_AHTSkajqMd=8%6s zQM0mwoYI%ADZ}Wrjb*<%zR;Gj2Ue)#3S<5i`hUmXD=SNFU0Z! zJKBBP_Mg1po`3k=_T;_qv}cdsZjT?o+jgIS*ba%iymHa;1bn~+e3lkxvO#DSST}BN zwpSk9Yp=igsD0)$ueHy8=?m@Q5C2fR_4=c>cK?2xzq#G6v_*WI-^wXJiKC%V1Hue7PUpEjy-K=l*0 zh5b#&*yBvB;%v1tdsqL{Fdb934I1weZbY8}P+G3a5p4E>ETe??KpT zQLGPKlXOM^zdntjzhF7G)BbaTS$p64I2Thr1;F_xKL;UCWqyDxrd@(>ZG(5;db|C> z*T3F=_UC`Pt#7Tj8SFeyy0TAl!?k_0{hl)7r#w%NPqJCgx@af)vRQBePvKFSl<&I_ z&V6JoLYafsOUREW3S?6x1k@(cUiY*QGzHnu+bMRli|6aBi|xkxR`~wS*FV!f|K=O* z)%&ls74l2-i&Hm*Gv2|UlPCNfhy5tBba8x)zBxxOXCt?p^tns)%g)!%p_hL8XMVo@ zTmR1grp+9lBZJSQd$gtf#HcpRoTuqQg9X}gg*u)e9kiq8PvLXQJKAq&J5O>V$l3n; z?MFWUM!UU1pT6Pu!&~%2Ha)_M*^DkP%9qL@e#c1slxp%PHjYvwC8YzL0M6RL_T$zy zd`TK(Ll13r#RwWG9SN^G== z#5W~g>t+j}{VdPN1nDPl!qRrS07+P3RL(gA@l6;!lglURVniXyx65Pj;{&R4v_|a60cX*WWfig z+((YnKE~TN$|!QFH!oP=!Buw0-?lGpNj7nX@EM!=*$}~w@mXk^wpKo>?PU*L=GIKp zuapt~p%5R{HF%V7;j2-?GwPqfPqm1%Zgn@%Tk0KL{TbJAolfBg>5D)2-rk)sHCJn$ z1YTjff?KZAQhuU^18mwm<6+vS2v(Qglxwm44Lx0XTyv`zQsw3JACUi|pGfUS^)bc~ zfpjYr8vaOK@OdXBZOK0|8GFpnd#VVnmbNgBy6Yl++_f&&xpA=b70E0wYweB+a>?rA zBW=7Fe~Uv9N@I{QxY1tRb;7gmUIkBHAgzUbf};-a%>4cT@Zb3jGJi!vIsxZh00I;# zCyVuqoOh4h4zi^6dkIc>tFIk;WkUeU^dfJ?sW2+s+Ke}0rA>9F+-ZR9M9Xv4M4}Dm z@Se*1*cHHV;$nChd@5?>wE=Xdn>g{{*)%ao0RVmDuFHI}oDrV#XoNhIZ@^G^5|;+y zS-%7!tqM3{xnywboEzzCB54-J364=N+%T}jA?5fi^Ejv(@eu-bXEn3 z!>DT=Y6MAz#iePv#5S$v6+Wpwd^&UDhe(iHt`P+rfvfs2AM(v2|h!&tg@ z*KMMEVHZ>4rt(%16Bj}25|ATg%xAr=$AAa^`V%I<^>pQ`eQFVX05_gnr?;-6EXS}a zFJ*Df&iRIIhtJ*VN8O8cB3F*+bjSr8yHJ@o?0|i7 zLcJ(oT-6)dc?rJxh7x!zEiC3uoRj!=*mZ1C;bFQ=t|}_QjXcmv-O`|itVbY=U7ac}9RTj;t z4#6L~D|7iQ18L!;Of)b0W=KsW3DA4=hU$d&yyVI=hJi2wCxCn70jY}WWy*V{tO#=w zyV4owF<^xs90A@=ObIu-0YgRl+0Mpmtolu>ryssgn|kb3;kt;SY?|XE2B%AlS%`6S zii0*MjGsSw+J5_MzuA84w|=Yr?Z5dq+p`azW%BIo=#)X>aW=$U0LLA-XA~v zYTy}^sk<842?U&NV&|KVkGj+XPHNbeG5(h@!tdX?-R{3~)0NTY_s`3~rv12EZ(ZI9 z*A0VGqq@^5pPv}|grKx5vLlb_Y_r6LN3MB4cMy!MN_t>k(0LzGyPf&iTTd z_VjHg&{tOcM*NbK-ZA{<+5-kNfBGvw*?#DYUuyU7-cP!B-+!;|?CfO0%{CLSDy#E` zcjz%Ta5(wzRlC3mx0fD#i(fo^D^Y!^uF9f_eAw%$GrbEn^KRrJz4|ikgZz#3ptm z;1{zb+-ty-w!>)mErSerHKeG5VP0Vi)PnhR>L#zqcjzJB$~rs&okMSJCjz8p^bBDR zriA6BMHXBZJr-^WeO$UI-#&DbOB`vpwdJ+IG_U1bj)PC(TKfZ z=F~L~%*0a?oPv{YH(R#4GzN3(cbQ`ywdMST&HBcT(#6ncb#=Y1t*vBY;Q7v8d%n*= z7i1jRZr|8wEA(Su{;@B#8{13m!M*kN+N)bMIlR}7svqilvdy}apEh;ECAI|gD)vi! z7OB-y5`NlIuRosaKqs!+{nNS(SU=W%T0A~o`JqXi`hjP%!FG6x$F3dvcj%sOlhlII zC%Ko(b3o~h_M3LXkav^bdnr*8VlOx-1t~ZyK@*#c@c8MIVF$u6q;4$+?7AVTM_C3ngz!P-D;o%`UXS4XB&f7oSYkPZpZGC;S zEyI_i+~-f9wY}Y)wz<89j$CfX2Z!z8U?1LHMGlZ@h=9*4rw)qU zK@q0OW-(!4?~F7$*}`(fr|6=Mpp2@3h}U)_eGT9xRke+MgW(r5$0D3?FSH`>HCosW1X}T^J{vx)P&QA%@yHJ*WXwhh- z@rt&#|AJ;C&OTIHm{YihB~Mi?eVF!wapMH1kw&_UQ+OC>zu>cRbua$YjXWbx{#T+L zvnYF|kFMn>dX;irh%cIyte9V+%^f#j4xYD;Rko3|6@>&ZwDtLkdKo%n&v*}wNHBx^ zU8-}h;D2=H3HFb?zWZz^Z_W7$pF?EFjV(^ln#M90wX?Vjdu?s4EzPtQ+!r)cyK|jfZ{?LPKxY1r&5bn=ar}5ar*1B> zUpr?kaZH`h;r$uyeaNYNY7efgt+q8}+HaxzHnxihwp--NiA6B5j67R8+N9pvt~!ZZ zInvR;WMWvF$osaf74CYOkle4`Xzv$kPxT`gCdEXLv#so>Bf zb@7j9WtjAl6LIS0V<(DH(YnekS7`>!0eAQf55L;>Jdbvt^ed@$hbiCpxew(|f9T46 zw6iW@;5XoTAG8+Ez-7pSyi%JZJG|&EbWgXhEKC2eL6i%i2cG#9RZskiU5y)MXlv@u zPSHcS*g~XrQ9}JGFV==iaZy&)sV*3uH$RP~o*Dc)aIQOi%78+@c5mhbE)PPVoPb5c zT2|oNUz(3{7Z*JxMTsMvG}gtw<#7Kfb6tMBcyVbZ{H>neKR!xXo?f9YgUq3Yw$=RN zQtY@(<#++v^@4VeiI>o~zNx5OtM}&GDfa&ycGr5l$z{Bs`nmGNCFF2I16=y1YO0hi zzQaFp>VxhET~cAw@@#D7(oN<2zD{QQIMtu;3^>xN@( zO8XypIGbNf{!qDVW$sxr%h()@6>(HwNS^ClY_RjTs$jASF^;QVZ$Ep zwE;@uqO|pGaxRoSF}~J-Ii{@+UD=Sj;L&d{!L_eg>)kjFVi=;5zCjR9BXQKsQc1(@RxaGi)!OaZtFr|KEc98 zs`P(DKMsD;B_00Edq_{5{Nfl~8b|;4u)#+|kD51^;l^9eRJuxYowvf#4WZ|}bScKd_h{loUv|M9=l{yTs67m?Gmwt^ZxIiy|p_dK=-R-=43#-N{1 z4&aA_y~a=hS;`p^;3Z$mFKg+yj~RDq+s~T_`rGE>Za=s}pO~@0lAm3>XlKVK(GeHq zSzMw|r@ZRRZF1;=NoJpIAHT$XWBW#1UeeaAF_ZS`;Q`mt9{1-N_c{J_{_y~QJ3gU& zc>fGKou5IJmCZK4al8G`{4wyt>DXW7>F z3LkfVCj&q7Vez>9Tblq%wqn1cH*Npai!kSvg)#DEZ474_H4ZEq=yLD=;es3Z8s{Zl zV$V`;^heKMas@u%I?)U0(j3B&^|~JJ!o2;ETu7K2HDws$dQHO1wl&tNvC+QMc8fNo z{fKb#3`{VJZpoZb*H6J4ZJiBvq)Q!ji7$WFM>GFccw`>PwJ7@qY1d(yCbnXyN8A)` zubn)ql+_$(HiO}=%NuFC&hgoH?rmdOxvql`hn{bmI5B=oJcX)+?2MflAbIl1bLkKznuUtX z)F8wtTW}nsmjy;LSLF)%ssQZ}CYV+zwhhlPN-<9{d3Hrm;gKwFlW-RgeFKhZg>6TUJhJ;RX@p}K%!tPwlVTI5#nrBC)A>|kLkKiFDnF4b48mPd2yW>* z%v5y=Oqstz@S9h|u-Aq1NQM)4%1I+jL&JCs0?;x3`thbq(xM6vC0O;Wun{Q`+N-h+ zCaP!s<+Dx#;vUlIH=Ui5xF&Zl{qXmRrdbmr4+T9Qu4Yl7mM3im zL0UVZp0(F;^2ogdi4%Yd%uvHk+}kKZ1pe@&#fNW+H%Xy`$H_3%f=-|K$Pd-d)+Gq# zF6rb;-?A*jYn}nmDjhK$zPl$es5fstXm5Vz%WdWE8|`Xs8)F@gbbM>)cj06R&vYQZ zDLS*@K!S4YsFd4t21{-Lh=vf)njFZE%G8OnugHt>;Dkxu+P6;dU+|^Auv7eAH3@A# z9q_Cxb;z#KT-M9UZ6`Nf@a>9|GJP|tj%@PiB^Qxr+w#hC-g3xKxQD2srVwIKLdY9>ExSAdek+c!JX0k$R3I^-W!O zfRQ_S<@BpeIF!Ee$G4&4N1tUU7Uh&qtASMMCmUL&>`i6P%SPrzH~RA2b^)|}5-&I@(>zEMUF9Kqo=mCp&^Klfzw!^+$wF5Q0o@3bHQeB=UH~{I+$Br(73s3hkJVr0G~q|>gSQ~ z9i;^ZZ)*$$-~90(Z$Ir6X#oQ^@g|uJn~T2d}G!-s4l=4UW@Qv zb}T|aYh=e~Th;|`(9H$!48Spv{C4Ke&2IN`^N}b^s6qvieclUSTmxC~ulPTGS42?1YM2JRgrB}9z6IE%P_VB?U${|i3)a8u; z=%vi3?YiMpyUMR+WypZexFLIKuY7}7e7S=|`d{v~=|(uVo%}BhF7S&URFC5?xrc__ z1>^Ud#83ED5B&wh{MIMm7U%Jt_Lmp6o1ExSkQ~IxzfL+pVd5Pi)c541tNG0*Y;jsz zqFq2!eVd)j(C`eMD%{94=`(l&b?d7xQ-0l8;U<{VW7-LFs-u-p<=}t;yX~-bV?B5t zFnITqPHwhY-B^bg&fEIhQoHxyMq6K>VZ6Ts-%3wuW4oW?w>;?@4>#_>D%~k&RtI{= zUIIULSQ>d;N%4c7vM0Zd>r-YMPR_k9^B<%9>9TW~@B8H?&S;8|+rqOlRKL9Wd8UK!~CL)II0Nyp#^l7i!heu z!q7=$Cl^dF(&@Op5a2+x+_nhuKh|?cp2Bo^;{qNcPdmPyc7XXPK7oQGLq3 zeY(6F8%dgQS#R;F{>aKAi*CHURCbkC>dBwG$+?9Lxp0v~=_jphUvBD%(SB8)aZUzN zuAdO#PkB~PkGHm^T{a0MoC{b!iE=3 zPWr7fJD^#OJH`a7IH-Sp3r;YWmkBHMhWf!kkLh+Jxb?AhLO1@rOSkA=3NBooj1=#( zn=a5B>dO>ree{5rY)J0pNt`?`@~Ilg5;$zKx^z{~;3z$G;zIehU0{(0l;@Aj!~$Z*4n zlbFYu99X>4D9ahw>|pN-GhU6#Iqa74o{9Uf#GQx zCuc{Ihq7ILGvB_}3BeijK|wEGEW2DW4_4R#&zT zxONYB+q2!B_H^%Ad;Iiq`{3D!ZD((%9V26xG|duqx_S3jyL0cA_Ufb8+WiL)+MWCN z+xm^|eA~axEI|v8I!C_K|3QCf;mH-V=m_o1;0UckU+N(J&*=lhQ{84UJoV$MF&TcY zN!!OoIdx~eik-}&xp z{Kk=&e2h#;O5G^mGR&*rcFM$=04M$Ai#o+F zC8zF1uHtdwglCm|fp@3OTDtLL{9>A#H|R2BI_(Y#LIO#wo;TjI>y3Uh%8T+QG|f21 z{AF7OC!F`v9g?7EH}vq(cV%mV0i-BY)hl|0Su_-_st2-&-D*2tU<2talOOb@X{1Z( zmHd;@msYf`Gkz!L2>Dab**C4Mtz@Ha(t(GkIz?AI9zs7lKcr3V=IE3GOgGNXh36T& zF3A7L0by&%%kqk+<6WW;7`NP7Z?|u4wMFbL?dBQ!8udO{a>*+^r`@Xl@<{5;U+Kgt zCjq(>SCdw|$*0n2`V!8kaYf_nOZPE(r+F~g<%YYKzqr(6xB-*Pw5gkJ>fXoi9T-kn zk>)rpJS&S$CXsi{0mzA7I0 zfX{|~;zv2PpsOwcHsz?<3YT)FhY1p%V`L}mO`|RmGIwIblhLzTrKu8?6K6^sbEsQR zj*r?2_T>V=v^VqiA2LeMWp|ab#4QUf=#2`r{zYr?(_WgvHg%q5c6qJMt*nJc3rj0) zW^t{x1!y)8jj&OGBO=rTj9eGKIHnKJ$d>8Y2R*#hkMYW z>>p1E@Kl7Ql?7~aaDYa0i;Hau{;>URxGdPX0PI}H1^UA_QeXR$i0$tNRvLy$C1ao zZDd)MOq68E@y37bg80R^@>!Gk$&@vZ=ygJU_K&cPwKErGe_*L5uQ~-k!nDZ~<;tc1 z;YsP^ybF8_Pr1l$-{YKyC;#mq?dRnI#{kY5%RBP^5pDK+zxVs?bDw>q-MMum_U!)N zbNV>j0M4WBT(p+Ev@@h>=x0A*JpHhBT%gRd{ozlW(Rx}v<%4{B-H2eDEnw^A6i4dl z!kRD0 zvDF^jyWQS=?NNK(PY7V2KID4z@KJm7jW^q$_|Y%7pZk@6zHPkzX2Y8z+UiM z|MRrP3UVg?sG@35n?;_0FXb`kp`wf6FF8ux=BZfOluun_XNisN4GkQ3lOeQ=Ef$*l z6v9em@U=ahGs6CXfa+0gG8ZttoA%lM^LFpfy|%Hv4MGd;Ia^@GV%hm^j67Ct|XNY#6o;~T`DJ! zXl`H!z7x=0UuNbw?TMq@S^9W+FKtBTdSP3BGXZUtXDByp)6m8+%PJZMGwx$9bc`>? z`diI|Sia@x2M>vueB_pwdibOhC%^L~A;0D2-*UBwOYdv$*Eqnqy4!Y_r+rNbd2#IE zhIQvK^64(%#@5c*$MlAyr~I>e>DYM&u5uIEr5ehmgG0$n^t0{2 z&CdwsQ;B^}_nbb>wXpPU;t?8=)3VafC2Q(wlzvLg6_|v^>o{jYS7j3hoU|Rzv`H^bd!CWss|qWZ z=kA0t@Y1Cdjk8SB#<;^}U{ZMtUgeX)Z7g?cg~9s{<_WXAldG3E@_MBs=N{p+AnHGG zpLUbyL4O^2hX3XCNhkd~VJ7LgrVWWf?-4&}RR;gm^8k;cN|*wXtc6b|R9|o}KVRdj z(?6bt7aH^IoBVOmInO*635$+~nKroh=iyvzC`Z~TAmX!dWxC=6mDosDN}cv2AaCbc zr`!$ojnkw`87flKMbV1`NSWWae#~P1=Iul&`Vy_Ngu%On(R}mX8}0Uk&$rF{pQ96Z z(qIlbqf+5_@-K47?=fk1IvVnR^ZycKM`2WyRf~Supbl9(yVBw^6THC_1F%#58&HtII(;B^ewiPT8?V9hFmQC{8Y7F3dA1iNS)-0bVBLgBywoH}FTF z=APqS!A&{Lw_frMWR!UlK$HGvq%x7Z*zVqKd-CVV$nKm7%VZVtl~z{gTznK zkaDWCo8}QG@`&fzj(iqlyWWJG!}ih$Bp!=59rmCR_gr*5fkRwqu^sb&N1j$78ly_-cbe>`_CS?-~QS^Xut7Wzux}Q@BV(6wZw>gWiU>HiSe*a9%24@~&IWeJJt7gM{sUulzQtA#@iz#I;q3>*6WI++o zAQLM52cB7kd+0WnZEE;eVmI2%M2Wq%lW0{cA`w3y7}O|9i1GKuIn7=R!7jz zwL5ojrQLR(?Y6V?1FCu6Zr<8x5ALtFH($TgzVc(QgU9o>Ot|0c^gDWkzNw#18rv3K zyhePTOw{{~kGP`$4|Hj>DF=W2ah!Hy^~?I}UhY$tj|_agdl8RO#tTl|U&1%6hO5bv z(kUNLpK?e-$Fyg}9x^YSRL|WBq6X&0TN7UQ4b;x-Rp;V=c|K`P2j93+#{5^8#b>_3 zoU|gW{3l)M8T-)KN61F12mjkwoU4Bj5X0=d-R$R)jE0-mH~F7Edq#$i_d+lBHj=a3RbrW55+MYk#X~#!< z+0bmhCG08RD9PImRK>c@Ayb7@=^k~Bco=qidYpV-j*I-9fO+#4=sdO=E}ILG-Oz%% z2(u<-sx6}*dfWQR1Ivgl27Jd4K96=ExC}<2b12^pZQ|;rq>E>v1Nk!mS67NV=}bH7VYqH`LK9BdMdOkY(>oCn ze6XF|fR$4bw2!X91HDI%;5^!5f?xV6>qAn~%Ii5Ul`wIz{k+%0MB(7G{dvZQ^!Yxk zbJgwh&?s#zf8b^>7O;^|kP!+%Kl`SWJT1RJ`S2;W(bJNXCr`NhrX)RY+PF?xg2}5q zXj*ub-J$K0H|Z!wTPhP3)>YXC_iV0>J%fA_C*Ra@|J~LHPToq;Zg;#uKo*xNK+Nrj z7FU+@R==|0w_%r-SHmC5pHABpiKo13BMO4#%hl$+Kb&#ngyouEBJ-aUdbIhf34)KV z+JuzopsFi=dDwe*rSp2muB%Na3)MV?ZD;z*;{;^zAk0#SlU4&x^%I@vy8T&tRT_e@ zxDfAHeZq5wnNeJ=2=|h08F=YZ?aMPZl zC4beH=8v6eJ&7$lw#p4l40(!xR-JXlqqfN?AD%#mTW&UN08?1z7k2a@{w(g1Mp+Wx zDf)Ji@j_0I?0QfBRTtNWv}#Z5c8pyj-tw?vj<1z}?VrGQA&T(OkviIb;-Kys^hmkl ztt|!LrrzigrTomdi$e$K1KbPSFLf&!aO|1>2mUsl_NE&o_Ya>(Mwj7n-!Qa{$O-c3 zc*V~K_(rpvfoISs>+9?JT!(XL$Lbmy&Evxtm$v#i*$vdb;X2orRu|gZ`eM6vbFFP` ztP*b}q(5U-WUe+~=%AbOl(w{vy3S2BWcBg5sW#Mw-3^P)U+G8EYge@Ueoi=Jj_BHb z1kknXbR8~bI=Kv8hYeoyW~O<${86uwzuzY(0F3yub*IZIH{kWVcFXs3lX8BL`=oR~ zn0|4egvURT^3D5lx(=_)_eb1>mwGzw4b%2|D2H*#QkqwpjXpuf1c{!|p$+IX>Qhh3 zxs%_Q=o04=F3*qK<A++MInhdMi4FznlydnXnfj4m@4#L!m)Kam5y;UeYrgMXj1Rf^WUO zZTaACKLBl|7g{L&Y#h7=Bv7Efv+Efj3&Zi7Z!D`y9Cf0E+TyL-t!><4u)NvYs&D^q zP&PK+h4cX@V5&*gb4QF3wT1uaAO55EpZo{^zMVt#;riwl-t)F|c-W30hZIIqmf;?5LUO1Ar)h9Xs;0AyrKRHRhnaid01KGd|%9qeXzA#Mn zpT51ffwZNEaRuNnQ4e0eU4U9 zHETU!|h-;3m97kK94sZ1DBfrl3^?0seUMdgLK=YFR;f%g;x_p(c zZ7+lic^#e!+}Qcr?~dPEJMpug&`7wBJM6c#XIxabA6lVJ-h1!ec6#`v{U84i|GW0! z=8d-h?8CMQKO7(KGNwAtsQ_99XUKB9VvL0y>v+jcGI?7SB!}&xEoM3Pwc?xkTliSn z)c$wrTeGpof<=2@j=6CD+x-2_UB?y1c|xf#RoG72_l_U(t$fm!a}MbWe1Ty;;yi_S zb$z?7ti$W{Bgf=BI-$jv*V_Fr{dAk#zSmBXnZx4)$mk~3tG0iz8~gm|c!z5r`}}b` zz&77`@_yTW`nVnLK55JFuqP+1Qjaa{g>Cp;yFwerO&02z*l}Q`-KQRiy#}9Am8vWF z)&uvvz*pmO;pB5~=m*!_#{9KREIqVQq;dEwj`Zp^uG*BOt=7XyySVCSOqSY0=7+A@ z{YS60KlA7Rxi-V|@$U1?5k+<)U+}P-5IXR@cnKWMMhM7dJLxYs(ZRUM`i=ZKg2VsR zSshJXY%AM8bE)zRjA@wKEBf2ZZ2nLm3m&in$2x*Q;PlJq#8)Ezg<-iR2Sv|%p5&2U z>Xa$`b!nG;vQ$vSQGZDaajI;%R$Mb?L|#mi6Nki=_Ac1fuT0jV!UxXDizjeQs@!$m zkQ1y6P^WX@k$Y$~=xQ4A@*+HVz#k;F_^gXkPcRN$gNKImlxw|=SN6>uKQQYm zxR#R*|6OzPI(5$Vgt5p)ZYDes~3l3elNzEm^FQqj^;eonE{$Kq z1~PGY22Cs=t^~|_RSfi{yktv$f(ld+gS$;locN1V-P;ukKN=bephbM6(&y|%g=c-U zb8&ei6K1z=zuq2x_RDSU{u^kFE#%LM1`U4FV`Sv`ZH%K?6inU>M0uQ@V630})<5_; zfPfZJ6bvjhSTh&_PWS+vGMR}I&4P8B$Ky;q@)koH#a;{X8l^5K)p1MNA&sz{ui8StP@~D$KG2jq=1CGeQlUnYeUS8Ii zr-Ou~8aQ?+<~cq*LNOjs^pbdJEPAAqJCEG~=_IH)oSu2?xC@Uca@&rEQYMAxo39#> zHmh{-ttj6HG`{?9AW(s!>l^^|Hd8jLQzKvX@I?#nVNS{+G;iS2Ay`-6G|V^5a%zfg zqi%KJpg75jM_TVbd)BsZZnuTociP(KX8X(!eYXAS zAODH=!(aMByZ_3awsm`>E$2J*x3AKZ_w=izAERwY<2LH~lxb z-`c>~USG%fUTfP=9dIz1_E=$0s+fh&i(U+(Q%6Z9$~bZLjsLve4H}tL2cL8B zuzvNXB0MS1$}@d@ZSXs_X2{EOWM>#&ZZgm>M_8|X>PJFo9L6bga0_Uti*h9Q1$?!? zmlFV7Ko~nn<%vq~lkg1}KW-Y&@NhOO(6(k#1`Ts(X2b#d`Rz6Jk$IIXamq;|P7uML zL*62nJfBf74MFu*WJ7%j0^Y3`tp_bE47#Xq(*8cG$7=yU6D+`#kIxx!yNSy3=iyf; zgM8a-!QJxK3VhYU;j;*4=|bOzQD!_c(eK8VL8;*hjv9f->PKuZ(^CEk_)MpA*}hlgkAC8Ba=cy!dcKKP6Zbj6iS!rfWe3^H)b9R% z^yWG;w73MVNOyR!N19{gY^A;W$_D9xdVbWF7ObD`3Br^qu4zo`%>$wJnD#GQC-#Fe zktf5G30G;ZpFhc|=S(v$-PH34`UK~OuHeg6x|g;}%%?0L{;xFsN*#0~tlU(J@Vnhc zp;)iRfd%8>S;I15$b4hIstG0M|=BbxNkuj9zE0LLR&;ROdqN|RT4lb7xcsG1lrL_H}!?i z#p$zC`ItW2w~tiCgy(Gt^2+lE2Zs#W4)ZN_Wv+D1W!s=Xk*81TkB*K#!g8jqt!^Ou z*d6H31N4xxwhHerF0Kd7d}E#RsYY-QA4Dfm7zmfmQ#4hNM-Pzp@Nho^NZ*#%wh-TQ zbgS4#?zz;d=_(}}ckxiplrQT|XwqrR+3x9UDXX5Ltz!h!m_2}?g#XHz%7F-Vp&Px7 zSGIY8PPVw0_}5#n%A9bbvjheItIM?Yte^D?9}!<~TnJta%T?b9R+OXPjXiGmuq=`W zr@T!jo;E)>!!t46^Vse%LhL)oU#1v#TJ$wxva#{<+}OYt+{Hot?*dA^a(|4>n={ckEA^9 zofD5P?z-1HdlHs;+^A(8{dTy=*)A?a53+eQc;0Su6V|f)j$GUK!TT3IknlS>m`W~Oy$Kwq8G2+tXgFb z`z~Tc+My3DOq!~X{al!ON;@u_kf^u%R~_kQ6BiCVvRER;jsd2hDY5OcXehhT5YkY) zMi~{i)R`aaX<1IBkMUnl%gZ0S9ej?Qpkw^L&e+|JnVz!c*h?L8d?YUPh2+TzEY#Oc zQLA~g*GYQEn9z%MbDX@gjQzw|*?KK=d8F|wW3K!6w%U#D)i#HIu)ejC7`*+Hez@=% z{9n8iqV}ubLNke3LBr*d^wbW|53*mz^GBVr+Iz_}t_q#HPq`48dw9!df9#JE=)+XD zQSP|wEQ8wwN1vsOZe&z;bp5&Wamt>gnYs`Ab8_{#kJRUr+{^Z-d2}y7e}em`S-zy3 zrH@O}QJ&8Gcs6pl!Apb2I{ZmidCRufncu|7U8KN%jxhBCq2Yhua$j~V;+!&iKwWTo z)Gm&8k&_d2nEZ|%MH|Zhnd?N(Yk>&4bu98P}EcJFn+x$Fr;4U51m8Yj3O)ebmEz)T3 zfH$!uk=Mb&IVZ|qSfStcrGyjlI0*c7L!odZbOnZQFdQSfUC+Rs0UVa08PT0VV z&Xf+yyDZ@|fpy{n0XBy}{;NxT#uk~Gt~6Deqhu*-U#dTC-s8!pLjY1{D#WE)`vcd#?;<*QOS)K;^bH( z7x+6@n+>!9TQVZfE>2fnNZs?&*HMoVMcY>*V)TNyB_I+%c7NY1Yui zFE^$hAD+0yA8BpnX?8xky~pSHN%{US4&MadUH{ z?d>15*B-v!=5K7ZdG#c4)M?sWwrltb8!#Bs4w}2LJdU;qFONILCcIqmP&Q>Ek>ojZ zr#fxf#8=-TuBF3!dfKbtDreLg`1NOFjzW5v`1sUu(8X>NWo5I$Um=dExb#v+#+E(= zY48x&;9GFY_OgUp-=OM{Ra^{)F0_-fW_~wbr9JqId?#)n%KAp;smg`7J3e|t zXUdj-g0Hb7xJnY~4_L<|8H1=N%u2lD@H)-j4Sg>3OHblFUB^Gt2n!OWtXg{}KLArN z_{UBBZr*kAKkW`KvB|}s)TyR&Y#xx6mov8TS-<_7eW3Fu`V%JV(%N!_SM6e3P%J0_ zpEBGyRrGt=fhXlAW;&WOD-0l4SMSXH>c9I}euDwtSFDPKX8|CKUlIksmDi5~7wM#d zfFdK3?jj^Qf~d0uJV@vT!$xZg#Y>)+4R5cl3qhCjZ&@#lhC=;mGXdsddXE=>lIPP{zDMm0KpnP-SSKsvyS+1ma_{GIsJ( zO5stsc${t=aH`J5X0Z2Zh|-77K18?REa1mvU`CJBBbNVWa*+ock@zud^zVfLL@quy0sJ?EAIn(i8hY528c9T!88SQMJL z_y(3vw6ge2Ctm}G+2;4Z=Y2zF^G194@XdDPmDk$h=6yPfb!5r`KQwnj`V`uNo|A*s z(OU14O}`uBvHyN3{Tv>Sa^rFbs0wk$Ng7-XS7{*bb}nhUs$1T`!|<^l)-9U^9OU%+ zojZ|Dy(}+0ge&>KKsS_O=0u5nC0{t%lkYfTNQ5-}s6X;%KDfF&Sh{+$fv{^~QMejA z#s7X@MBcIt-`E>(?FxTfQKsB=<9ph9D=C9GJ9O%TT)5DsGrfz^*16Br^tk<0y05N`Qoh`I>?DSvXhh!uims9@*me7>D+dAcVaAR%s5cY&R%%Hx9<^!c6sa(;)iX{_M;snlarb1T;B%u8_q`MW#2}038?;{r2XYueTdF?zHXg?Y4d6Mh4eTI36-E&iC58u5*IiwwF$47sV$!KMf)e zN^fO6@&b_^h>2_ADMrPw(#-D4Ue z9~aYf;%{8_*}|dT;ANV?>-7x3B)xazijEpzdAq{0NalBI4HN?@`Z14+8_9qt#2(~TrRY=wT+bRcc|6T z;%Gm$ee*_O9Ut#!Gtj;J+sMg5J9zdXbv$p6UfXQH^z%Q`Zf;6z-~I=-`knkvV2O{% ze2U-bhtn4+wCHJ>ZU~5*${%_|&u*HQyILmP zH1=2A2R_0K?cwXd5H_vqNdUB0^=p-++E1BK2Ej|-2BwpI@~ML&`!4Mn-^w_bui=r% z5&S7%I^m-pI6pnkqWY63kJHzve{P@)7trOheEMYU&y%C0_Rc%+B<)AUi=>-JM~3-t|3-|Uyyt#5GYv{IME4vBnS*IPUs2e|>k zy!JsWD{F~2tqyAexkaZ!4{;X{^ZEw1G|Q3DZn~f!b27I+10uZ&cb)de*!5km?43*+ zV9P|-tgGV?^b9>SH5hnSTX6<{S)OI zADm~aZ?zmaFYpX^Vm@VzdtX=(e!-?5pf)=vjfe(z%E406Hi}Fl54urS!U}FaJJ%c)xVM{cvrRb0b(bPf)0JbBbB~<@eYrTz*5ay8EeiNFC*+ zZeP+4w#AYQ6B!Fv+bBY_OWKB2SDj_q<|Im*^!4F+Xg7mBumHU~&8cK;h-)KDdKH{$ zm$bOciT)mTNzpm#3m4p}hv9kd`O1+8eHOWP!Y7Ki`Y@^a(>ZZ0Ty=bO zL+^jo-#Hr8ehoN*0iMDwIyeDdyyMFy+S9U{G*chP=At}xg|zo{37_R{PYhUG@H^JW zq2~g;bbfZwmR9H!u|>ph0Uq-cV5jJreBzA$I-eha?;Rr?9PS2>-mP8)aR4jOZ1z-AMWMnX$R~H{@s0~ydO~h zN9@m0&*45={%Ff2%?S$AWxP~g$7kr^YrRu2uFIz!7$Tpe2>f0yN>5%LQ3EFc;F$G0y4C%q_QzlVeKOrb8Cs2j{Uo^*}vDCg_AspghDLD43#H z3q-YHR%iL*;n=Rqy>83OLwac#p0zEO2(Qz5?Dz61PH>w=->u$UZ?oIC{2(|<{oX%) z`}`U>`q>`GwM*zMHv^ovy=Tv`H%{9A)5q;^{J;N;_S?VtciSSco$pWwTlN9=#}zh! z`qVcswGG|0IERk&lQH<>74c@qd zFFgGfmN^I#8aJ-AC+%0T?N>)Rl}FdtqvEvx!UyQkl!xEB)X_HwFZv5<9=^wUszKRq z^ilNBwwKP$rDZ>lde@w2U=VKpVQ&xI4Y!}8B>GO|68i1#zSnj>+-?8Hzy2?_Kl`&k z8@}E7@O|Qd2@L1lu++gFxjy4MIn3Lxmlx0-+}wa8O`Qw8vc0JPq6^V!-@?`Ma~c?I zYx9WP9Q1O|ba8$o_47OW+O_9rej4Ho*wAOxmp(zeMCX{zdZ{c&vm7HyX6*v;_jCd`zxert$U6<`$z8GE zsHb$6Z+rt>I=fzA-DjHParI+==1Lu+PiZR`tkqSNnYAiUdjL@v`DLeb$ME2sH}h#% z^QOM)ap?=q)d1pd+p2QX{=|ugm6)4bY=?|#H<5ie1HJObXWN&5_NN2OmyMz$NGEQt z-wb}O@iDMse0zz1!PkwuW$Ut0W};7Z>QDU9tHc4t%!3vAlK{;cXb#z@MWW5iM**28Cc zu+or6Js=~K-|@ZS@>!}di%zyc(nDdWSal67G^EdJpNWDZzwz3#%7yKm_9a@lvYT|i zupV63>$1jssSkl6^B4tw!gn3z1)6%PFG(AGd)tH`sSi&{8a;%}>!hXib^bI9-lQMA zq7%-r|J41;LuhaK5(YTdDOGMAy_h5zGEX{xPGRLyOCyH*y$;Yubz*B#0Gt+Pz z_oVk3m-O9DeCBKIH)sC(|LL#%27tek&P5E#twId3Xh6RcTL|0PimU^-BKFzo6$tkN z+;JA@qk%!sLV}g*7D|AeO`T*oV-%2brsYzRC#D$9{MCTf$QmNa$2~-Z0TV6`IC*v) z?-smOB*t~7$uTy;tJ04nQ!K|utAV}&;9L?}&wb$LQB;xh;HM%+u&Hvl8z5gVrydJ; zT=>T4Nes{SqP?CY-N0jnSytg%bgczQ;dBKsH2@eertf32hX-YwTh+a2Om-)x@Ky(? zr|y$>8N%kk3VJB8=64_+`f7kntC1gKC+Y ziMw=>4nv8hoDN@jJ{P}?cGGX&Oe+n{*Xb5N4tzdZex4>_!|5FcA{i@gY5*F#q8L-W#?Be=tTidwV?mT>>ZM^b&Tfg~`tV_VbAVVQ}G^lS{E4>b)<`!4; zMvx0SDvHnxIXu5SO!|w{-WfU|l2*uRjv%qEJ8x?Ykuz0yqOoXL@cmX&22b#n9hL1E zcyuP}03A;#qFTIFVH)qi$oecTYdH9p2Syw?+D6tnN|+xfD&0AHd9IN{r$XBZw|F1i z7;s55-H8j#=pN~6IktId2V9S%7H1w)uANjA=dsGXc(2-V$WQ4RcUJ1`$X(oWQ`0#H zg*%axe%^94xNM(A(y!2NPI_aA5$)uaMxJ%av0#0`Wj$=;(-ZZk`q4DK-qIDAb@GA+ z75YfT$s?Zniw@8jI{Nl@Itg%f!PW(0`NiG4<0xTy%6R2(X3j;|GT0m(I|-#RTyUWS zy4W_BCyO7Yhi`yObJD1X!yw3)6Loex49avzdwcEO?|rkq|IWMZC%*ENZR_>Vsee87wICMG|b&)=Y}EMM0s?ed+NkAd98~s zyhp#2NjGUiz4Mk1XzuU(wq?OPI6P|mzNNPFn0i2e;^iGD+MS5IM2<96pFe+|Qwh#a zz>&P-<3MtW4*P=--fQo^{Q<^2#yR!34|KpH9T%6NGx)0~)2>|dn{PhHfaAFqK9;CE zZ4W=1Pa4?wq`mDgpp_HL4jQcE)hTk2W|(hlTQ}N+N3XPpk6vrrcW<|iyk$M#cf8kP ztj7R(S2hc{#-z{@}jEz{NMq zi(g6?Mux37mu_Z`x>|qRQF&*eFArATOBV{abRg|P+r0Ii@3!Cl-QUe3-Nxz$;U(v* zF2Esw!BgC-Vl|Mb3}sk3@yencxI5u#S?UJyoku^)&-VE#oA%S6d?AK5i@2A5TUaPL z6L#o}4U-dIz`>1H%94xkqaTcchS5mw=+78%K$}AbZ4Wo#oXX!hi9jQ>7Vd+y@ccvq zuyRy0cxd=r_iXe69$EmJ7X3m`DrP#{dIC)Qx(Jqk8oGtd5~o_tuej z=sj@EsWCuVSn^~JXtqFG!vhXn_YV$o)YWBVYXQF73T)#~DO3589vj3@EZO^#gfB%5|wntpgFP*c=6Mh_S;PnE& z6L$?a>AL+h?2p)zJ}Lw8^mx)2yy~-uy-_>obv<8XwF)+8>3UtksWJ>3^%jQD!ZVDv z31TJtF=+d99OYJ9QEuAR@=4n_^q4mG8kC}`;|dNJWy_yVXv+I;B=hYDM^g@N9O{w? z8F4aYHpiX6_wHNtN!U2Z&HDOg+d@}*)V!0V_V*6n-hco7_Uzfywz;|4Zr{0?GM#`@ z=PQHTH@7g`U6?sRR#(E;hx_m{G9a!_Py#7@U$nEItxG&DTU$!M@~pG}Ful(DAx8C# zZi&vvslU#(U!sSk6=9MS!l(<}08krCEvxu4;6v}quWlD7C!XB^V10ZRP45;NJqSGQ zE01kdulc67@v2@Yh=VunRT~bk+A{r%V>d zgeFcJyD>qV!lPnM?*dxkTlS)Lv@CMQ@1nHNzO7s$RZOW@R4mJUPP|#?Oyu+IF`AJx zl9MEK4h_Tqz{UQyUneFoax^cl%CJqa8%RFiZCYW9hj6@)c1<4z@6%GK01G0W^)(Iv zIn97FtgC89zs^tVq^Bn^8v?*vI+{;=%t=Z=;pAk3N0l39KX-a|h^=)Df4D)I-W#4S zd}^Xmz79R8Q~DqL<;uRyQRd_`eRPfVvfcXlt@5kOB%FCGd_reW_QKV|PxZgjQ9E9G zr*^i3^au^AkI-KnL|VWGes|I@8_aY=U)sXx6+g|V-W;2qrKavWe}xadtFzrKPn|od zKX8MOZ8h*RZO1KgOh(%^$tUPlzPhMm?`N!Wa6$)Mz*ATx2_L6z=(MN8&+x9g&U#3D zX<22KU%pk&4Ra%9#cL~z)4;FSgOY96u`%6E+IjQR??dB)Bk65N?GRn@f#ZCrMI4d| zn4*_*X8Df)b>>NW(qzo!ro#nw;zIaOJ)#qLb&0mPV;pr#PIUmU;z|6})iQlnw`6gU zaf-*N&dt~O%{P=h!DxATopP&BaqR45og1-i7wwPvwhW!IRSpkG=R*Hls-S&zc6iD- z;#uhC5&ghDgwky6)lOIG^CusU)w5Dfk`52|fdU zhdatMz5YqQUS5CJdYicL+%>Uc9`}AY3k}#6&RygU)#xAe#K8_ce%NL`_TTvd`5hUC z%gF;gV3x`Om^zs8k_&3iTMauheT+qmX8Is?p?=B+U*Ghje!S2_TJWe7TCH30Q{eGzx#c>2z*9JF^64!5(L_|d^C_fJZwe!}2=xza zan{~29euqnY0OIeoCKb@-rrz1xF9Ki0Vc3T$~W9oKKT`y3-v@63s)4(_1?qyw?t&?6%+d+h1${*?;<QZpPPkAGV37rLJ$@Xt!?N zVqCga^__(uI`7*1oM`8$yEE4aP3bc-uAW;aOnMi8T+;U1xPI0y?FoG>Tbo%Pk@xbM zUhO#2+g{=zA5?z(MPH0Oh7N?2BpVJ$lQs+-+q3v1Jdk@m8|VDj#VNRl_sB6~9M-#KObB9G%6 z{q=$;EU1zn`7EkdL3s7Ro7Cp|AC%zR<`Md!(7| zY~P*@jR2SC+OwYY0dYc0<3lG2MO(-h%9bx75llI>xfP}EE^IC=uBJSlddxW_>mHp3 zFL)PkUmCPt&Lb6Wh_Uld*it&6CB1gKgwUoEUtv3rwLIxTj=(ubkH#U-DKD}@n?u*| z3AWqp;!;~%-)JZB%#VHLPqq7RJc7Ur?TGQSbw7u1wATv%;txwGU08Lp#-XPd^oafp zrHkU@SbgncX{t<`)=i4)V&x_CvgFGKfOGXHc>;?&>4&TG%VklU=YNm2iO>`Wv zc|sNpEghu(O)3(b#{95lXp&88#N||pDK%(d0!#kPV>EW07zIr*Tqe27qJqBe*d}a{fQ0Pg&GYeib&jRXc+uCo={r7+QC%<}ee)iB~5Zobl<_-x8&0;?Vc+}bS4uo?@NEEN};BzhPa-|G^ zsxa?q1Qij{WdIUKgVG#PjA9yL?yis{){eA-a1t9BDaSO$482bHg-M!TIg_Qrp@Q%W zuE_&mP-f)Ebh3`lj>u9caD5h*_|O8rX%HddVc==QCO?lAX2t=r7-)p|*eX1gF98_Y zf>=*)xH6a{M3MnNUgL&hwO$GAbkR+4z)i{>`J$A~-{X2k!6J8qtpA|bq+BuQIqhhe zMq{E>9C&~u9ZO(w;FtV9M!ENJ>l1&KHwo_wKxZ>2t5MdIx)pzz6YLV7!mEt9m*FLI z;;V`ko_xt$`OB}f4Jv(I6gPm7_hf* z-)Z+AyxC?M{2!of4);&n`)|MB-hJy^?frMZ)eiQbv~xRHCj}Ok+REm3I^yh()If#K zP9}L=$mrxoyX8(<9xbTRn~665qSPbLUHkJRzB%FwWoB6BD?wD4DwXl}mUM~|p)I3q zNUpIU`~{4J`cwer*myWnMn}ZBRiB_}oyZ#e;RK{*dmMF@2Y$q>XFMW2iV=K;?*vIE zP*AM&E##A~;Ej?MpBQE2k0A@qhcmzRtp@UYR!)NYeU)SM+j$0!F~Gn{1G4JkZp{^F zQi?tF;_gxC;_S1XrpLyKdx4hs>u&k-Mi&05pE|Snsg$r2R@QA?ZlI7((k2sr(pjO0 zg5uzF^ce9v+tEK;x!%620jSlsyxMSJSfw_&xx-0}Z}4c+U=zbn*c><``V?&k?@TM(bK!zq62lKt{$&r z|59BVW!ujHQ~q>O7`(1BXw9VBGKTBsW}AUm*4I|s?K^j}Ky>@=?RNYA?e^f|{r2F|gLd!! zz4qFpH`<-MH^YPCmI(_;WM3$yq|eEj3&jkWY!_V7y^M4FN@?uI4cpZ|!VL~4_1JwE znOwwKVBnI0*wRXS^|eRsi$Ckoh06&JfMkx5A2;sKxKdwEVH)V?i)1ukVb-{>YC&5}0MMHYzlJFjju zsiWshhjau8Q#X}gXBjcLkbf8HK6vkg$g#)Asas5|?4c;atEmr<#iiyiBhUd|WE;>( zSn3sC_L-k-kP%;Scak0gJv(S#m%}D-CVrWsV{sh4&bHtEK9xw zl&`KJrNuIaYL0~jFHJ}JKJBKVivCzx zsfhwl4NwNC9r$rU&w1#zja95nd+iB35DPOhS7IrA0m;*IO^FQn7;bd&qY0Kt-$yLlA|<6h36iEa_7 z`glphkuK(DcT?VptE0Mb-hp9V%Ds%wOLx*w>TOWy=iRvO>GYe>@dbLm7?(Z)|9Gy+ zE`5fZ+6T&@&4Y39?%YCH=p;h1%Y1|`wxhv4eL(01wH%!akJoX}T<}!l$>8ZGR>M9% zZ)=BOk(drxH?86-xx@o55 z0h=rftp0(MLKA15sB7dX+K^{2@ag&^wFD1k#5CvmOaZ#yi6N22WfBoo)suOSd`Tu? zN>467gJfORKgwYGNzw%;%C~;nIKv+E3R$Iz`WQW(`%xI7hE!@)S6>i8hk_QuP8io3aEOPw|6!O>f@N z5JFhD5CA96(%38VA1zcmNc}n{NKKJr{2;0Jfk{Jm7YwOK?6JTvnul)-Ea`$>(t9sg z_c0Etvc7;p1~5mCsW+VZl8o77;IfwGTXj!AXhOVx)J2S{5g0KW!O5OXGb3KF9*< zqO#&5y!M`bn*EXeQuXWU`|LBh({!EgIVmxjZOd9Ta1BA3TP5hk+h$?l?RkW$R zftiIfVQN+QEZu}lnq=Bj(5?+PsWUeynQ%ac zpGMh+X1>8|#2XHM2i$}^b78v-xL(E;z?5w&ujG+l#L4Hz*~CTX!q03a6UFzn;R<~K zPicqR(T%ZSWEPqT_Z)uIflcIqk^@5anKG+IP&C4Tl0&{ zp}lQb`qr|w7a8L&FXv6UQ#Y=t;WsG~j6sGvS_oO~g@_&re zs1nbV&2uj3VYUGy@~0aCFWo2UdOarfE&5n~kNJ2fzAw0!pF94eUeoo{rOs3L(5FA^ znZWP&m!BuV#(mJMKYs+jm)%S8lk~CdxvSHmiQbM=LGJL!{UyAv?eOdNQ*Rhw`m5xw zCh%=DWxtQt2#c(q9dX}B6~pHjCzN5I%Gk!fO8h9IJ_W<&JQ6bPLP+{b@&~@7Lhjbb zXLV*yvI%On`9lW2ESIoC=8cl!coYT8s-k9k-?c(rTue zFSHaES1snuqbqHvd32N;05&kl{p7%Eo8P?Eu9jAzHJP(OW#3Kxv2`&La$LWsFdRJH zX>a|}TkXI2Ykw_xT^#vh&Pl{Kr{iI(I{#HK-(Z8I2h(1}Yx_E0JvgX24>!W)hPIQC2G|JZHlbmx71J2Uf!;ORnnHdLz1t(*23_$H5cPfcbVpSMi4%d`RQ?_5{W zW^=ww+fQ50F;Va<8_;x?YhR+hr$3*nkRDg|d+K`RAmP>DjfKl>syh;|qs+Vcmc4yh z+5gtfAK=H{ryn~;_IN%+2CZM_HB)b71)Q`eE!R&BWGXgl}IQi`FwM*TAo=UgLare_*t6YAvbAd2%&e9fF#&Yne?R|E1l<}52DYm%tL*yBAQu>uE z>nCm1llm>+_&M;j-K^DkxsFnjPT8{N)+7AupZCy#HjD~C%=hG}+DN=CJ-1-3QVHmDQ2#jmNT9i*kz&lE%WQ_Vo&1hwc_9-No7J+8@}@0A9Q- zpNGV?k|pVs`Vwh96eH;*oP-jV3%|UO&2Q=obw^FjdV{?N>4 zYFFh7qvrv(FbbY&%~tI=(v_a8x>VX!qWD0#y?sTec(Po$k3^xl&t|}xIyiOtDSKG{ zue|BoT(=Ol`Ux(z!o$zU zq&`XA$HK^8N1xzp`UR70!lNk2o|6XB�qqGs+Vmv*B0PK$}8sBjc<`;b^;vP232h zP~)ZDxKGm-_s-m3{)Hd?>iNax!_iPqHh4L)U79H^}#xt$xDl}CW)0QKGNMXd-5(YS>lF4U0X%j~BFoVP>LMI+vGr{VB z$Ulgm%G)r4v5b@h9I{%zcvN|UB1NK2Cu|d$h0MZ8ZFC6?j)hy*QRCCQDTgbXi*pG@ zDG}1-5}ud2Dn8G-8&+v5TxXgdt)uM1S-3A=I`IpRDW{(KO}@IAePC`pses*=Am*Qou)AQ<{Fj^YS?f$qRqk zT<)4B|DbK<)x zbt7-Gx$Woe_djTFzx}QD-EaS4`_3Q!L3{t5Z?xT=ciS39lJzj%D%b4NG9B(3svlaw zs5Z~c3e@sDXjatjwwd-34qwEmBqI9>>j2%XH@&d%$** zMt+rCwu50N#0<)0jFZ8ELp|k#i{!0)m1kRET*Ct+|1hlmY{r6b#zyCywZr5692K7L zph-7Cx|o}VGUzEsD339Mtb--m(eO7nGC}&;sk5w0|nD&p^+ufUQWQ}jT!tJ<`ycSIe|NIw6D6g?s&8oEGxz8`ytNpU6og~5L~j-><<4}%G2Yzj9e0AOQ+@k23me4y9Yhz; z&K%fN7v~&;r2AJe29cKSQ*vcXUOTzW5Z^6Un zk|$yuURaKsL&P0^RzIeE^$>bY9SC7;GiiP7h76uXEczR4)_&G;jg1;1& z9v(618wDQIczk-4v5fu+e6+B zRk>)B(n};Ed7A1YuBV+`y5Tpkx@0QcGCq=SI;_gOewN<&C;5yYaU*P8y6gOrdi4Ts z;BGx$b|ssp*HiH84SlY}jZ7s&M!k41zfN-be4S_DQ?BBso$0Qx>%65)YcOFx!*E9H z_i_01pyO_FqmsEve;nzO)O-HQFXEP&{yJ?hzs|=_Q05}j@b_7GQ}&?W~)Jt~cHTza~Qd=Ltw=PQRbP>0{j%i9>K%WBL)HlXH6-HUw z3H1n^RG8-)MC+&2Db}MPVh|VoC=+(=mQJP9iVsS!(Dvd)y!vlAQ$=r(s`6JJ zZ6I@Id?IZK3$k%jV?V3T-|$<2c~elpRvsTKzvbBW+Gx-=?Q6Ngq=)w~xiWlGc5zL> zn#TGPM>x=`zm`{ey5vt9>0fO?tqVr#s9i&wOkkT|T!;(m6`s$1@TX0ZHpkbi)6g4IlB#*Lf`5Bwg=JH$KOen;wZP6!U79|=*YP%Z`kEjIfJFHw> zYS8`igOKD8V+KtKGoNXti?Q8SoGc6^9%s^&FLiquc>+dfIxh2oJpmnV5=deBqx|Nx zy%Xo7Zw-z5C4G+&o^}*>+c;r3;Rh|@bQ}h6YxgB9w2QqN{veHVZ=Pf%jbYJQ@Bsd* zi*{=1>G6iBZyd@u+E$LWV-smp$j1o-!UPt0&~6#i5iU+cuFugA(MLQNj!?upm)8=* zAHvlR5l3wk=~QpaB?%WW7~7>@(9KWV)tI+zm!JW@XBYK3BXyg2x{pI0l|^#XQ-*wC zYigqjTszhc582!TogAMz_KW^Rf6p>jaARuZ75(cePE-LNF8+2bqWrn=Bi?Hp8(HkP zHK|=2p9Z|0Xg8F`}1c ziqa`_(p>`C&hm#`V!a_q+Nns|T|M|$uHSPTVbz}3abdtubn+Sn^(USF>1*17XM)r4 zxHN2CFNAd$xC5V$af$Bf&EH|z!d?|dD|`gD;beXRy&gBIuP}7qr~EimOikL0bY3G}f6hO!$ltYbxlT7z73}!puP5cw zfDVsw!AMd;>H<>fx1WJe(FSd1vBu)z+bhO}#|PL-`)#2e!tNg1eFCpvc)lnZvER5^ z(`+iUf25yuAQg-DoL4^4h<$!~a@ssC;PCLMZ>U1oVjH>u zyRo*~R@PQ1(>YI1rrK}M_Z~OD+io>I9YH%%7*;Fa`j;Rn2LNC|pTE?(kN%FzQXk4a z^>dN4SVi_5t>Hw)0?2eFGgx@BPQ^XMgsmD2ILy8rt@@nd1ZN=^P2bBdgl>zV$mcw8@uHJ~HY( zbLy1i{H|jdZ4`@i-a|%LhHLYHPI&+>&nZjAAkX{f939WGQNT%?z>SA8j`r6Q{jg(4 z=YY&pwgvJ4ZW-4oD*DOqOLMESDA1_ zi|yhjLd!8}=G3~55l`>h9FcRLimh(Pl!O}&6`K|0a~cOGpexJF6@ zmPy9=Hg)E=o~56c`iK~R5@&l@n04~}v zUpj%W&mj?iQbpPg8d|FJuIhnB_@ho}waOsr#Q5GgcEs7E6Bt;gzm2=uJlSJZ4xMiyBTqG;F;Ok@{(5j599C;Fjgpyx& z;DIsb?n@U!1mDuQp{HdJqd0~VboSXYj*l34xM)nix>I)Wla|6zcb4D#z%g$- z+Ooj(xjJ1_TMoZGt0xUzfbLG9&On-x4x9K7=OCiTo4gS|to9mI$2Y=4&lgl}N_W^uLM?7VMtx-jA(zI3v0 z>z|w)M(0{y)f>9V$CgQ59i;horh}631Ah+4#8)1&&6OuW0>e(VcsTfhyT`trW`I_- zbntO>?6+|lOyaLiZN!sSI1bP}x^sVz0jF<~LW6s++;4Au=8bm$l?QEObAz&7psF3Y z8X+|x=U;rmO=>wvapQzC=%SPpdP}R)Vy>-kY_!|RvTywRrvIIL_uAI>EmVZxBt36? zhX>?wlLbvSj2PR)fsOhq|8AIIumHTYFR~{Z(j;=Mj8)dO@8F4S0gd5tl$5-ipNBxJMakL# z;{Y}T1@lWc2<1IT&USp&A;Hx;QRmcGd8e-7eJZQ0WC4zJnWUB%_zjypiW{mH@!!9-mCpg(zP(9W z>U@DN&g6#^*McuxdV|P3c)D(~y}c!p(7JsJCye3b7c+YG@Clshbh;6tuGEd^;)i1SQYPfN%CFt>^G*_z|XwenBrSugR0}JvNGR=_e?ZcX>5- zj=a_T1zqVM3d2scf0D>c=ou&7PLY9pXA~r|F)w_m969+}{Q_baT+2QnW#qDKDz8l5 z(PvnXEZz}jnOOxQP`&B1dcaAhz!oSj_7Je_)5l_9ebxpUK2zt^W;AI5H-(yFIP}37 z_~n`YQeB{5dqa9f{?QYege0AIn))f5p9#wXS?n=+3>ca0vyQeyO`>TR3e(HF#qt1W z|1u#)Uh7bHWQ9{GSN&27xQT1w=>)D1*D%Fd`WR+?Y-@>E@o7il3)lQkq=~Pvd5#TW zJ<@LCN(4??>aTLRlTHvLpW(w9b}c{VO-OI2@|!%*@Xx@#;_F^@@LoFKDBHc-UOn!_ zY_~P~Ri__eS7C=}8^*r0zaWq4b7Z>uJaEdc92;;8CgH-tpGn*F6;7N}jwe0{gS??* z??=@0wrzL;xzVK^sfTy_Ul(RQ8a5}>z(1MrOt^iVX=;+P(l3y19vieKFl$4aHmMUJ zRgU(CWQ!!xj zVVFMU7%9I&C;1?6+xPh427go@{61bsSK67WzP{J9^7VQn8F{l`f9|INqkOqxNc&e* zhFzvhdA%I{(>MV%^a!jJV~fhKUZ$(?Y{W|&5T+q#i3If|v6b{FS5-e!pqq@Z%R1?_ zdFEq72W|>2cMeaQFXiKM=idbokFj=xh_Fm2j4G#WkK#{dOqhh@mxryl>Q3F@!h#wg zn~p6uiP7`XccigT81`otJ+P2Og?i4w(*y=ywF|nAOa+nhv_E0gY&sWt{L%C2AAmL9n3i1=C6j>v~QDOu;Gm8ka^B3*ERIqkHjL2o-*9ztqigKHd$`$={QI zvKt9rzIXRAcoxu4VZWFR8kC!Yt9OF`%kkIgQuL%UBRxEj`==;>+D+;)+{i!RkNB6< z4Sc5Q&ox2wrFHMKC)cu23 zue8VHS1x7NF!ha#j;DwF?dtpxFutjL+GdfT^sRu+DLl?i@vm-(-DDvt$26qv_JJGn zy(`nD9!l0@TiIvQUnasj@;?^D`LFcioifmGRd~{+em?7x5ctufdXpeSo!vHhwqp4i4QAbTEMrgzC(bL6J$<@7XFHW&A3Akqh}#6mX-HGbjR=j-cjc5N$f|2O9)7FP16ukBG|Z)`a9tvsS#{`7;#?d0&d z{jFd7Z`$Ae+rOEOQSZO=B%el6zoqTYud?y{oVIYRq;2lp*TL~oJ0spj5A|lopwe$s8b_IDvzYC z?KgZ|zFt%azu0$4uh7{31FWQ}k7O1{`zkm6TCDvzKSXmkJ-3wC%O+`HrJaK-;bzo7 z`X~8b`^~;2{Rwp-PMi!Y`%qgCJh>d3u;dAG={Hz!Cv)&n-Q2wwn6W;Ak%aqDP z2E@;?RB*B?_NU+$c@RZ$rylW3AIi@7fb_0Kn09W?Pm3+I4lF+*1D0WgJQQ89Blu|oty@8kjBK(Jx-4_9H)pIj+h{bsvakZl-#!yjJrlDCy7;3%)e zO*Zp-GgnWqD|=}aOE}3OHDoM49=N4!eFQ2Wm0uk^V5l1{Kz#YNIKj#9_y_K(qJ)ut zoxd{Q#a(Bzk})pmOl~y?@xqlyO|Kk@m3SIQULSlGw8Gh4Xkw&E2+l@0!YHv4L%#}iz^R5#tBV%-R zQ|H&gvw@-h6{SK0S9sfx*PRIHY1yeHuKLKLpM;t`Crvp*qQ9(y&b~NhQcr*;q}r3( z1b^v4J<*5CSgmWEJ3r{uAflh5e5745$eX!p;U7P;^ez@vu>mV|m6u13Jn@ z1M%o~fH-tsuM;hyAMrUY$3B2ElRo7Zkno*&2`jLJEtm0CXL(;lB10Wl-JqWrpx>GM zOF#3aub!WuJ#-*r;UR(r=d$CV!Rf^1I(JrCsB)5u2+@f(pHqg_^(JtTNFK$R61bi z*)$2}UUmR)y*6S&!``f7B&Bm0p`U3rTG|jf*ICd5XIW+1_^g1eE zReQ=m>OZfL%E4@>iRJj#Z|*8;{C09R8KnM{j{0lzOFGrWx1FbPbr)ReGo;DH(wn=w zQ&r>QXa-)Pjl2m~&_6pkF+PiDOU5z*OM7SHpFGx8_>n^mlccpRM?LhL!2AU};TFu@ zdN}bl3}#!D-YA?4i5RNt>1JDMx6?XRq9K!-E{m39vPG!SjK+7v+st{mG> zdLQeJG-*g0_`=j>7(ver27JqHxovH3wnvX1wFh_a=Ubn_&vpc#>{xfBh;MR(fZwR| z&35@{$;ouuMOxgxd$&D!FTsjAP_&UJTCQ^ss~A^NbcnS963#sKd4UQzf&Uew6ZKtxZ4rz%Vs z!uAqe7NUXUq`!kz2hlaCDO`nN`?^^Ct#5vlGB1&B>WM5m7_yBpxvXlO=mZbzM42&= z`K#T{gz-EQTfZc>;skmNUDUyPa03rr;a&1E`VV*N)@BDDTNX!vGz;WRX(@8e6Kri?##3$>gf*n^Yg>(VBgyC6aZv@gMsEed`2Gyg4D^426CyC zlit-%nheEh;ODb#_-P#NQvBNUB|mTs%iVav@iW>hu&1x#%%d}*SMfu+k&gSP%CCUd zQ<8s@BPkf-@fzjztJ^QZeF7tu==X7zQ+Foq1l}amr*pAiM5tfs>qc9Z%VbbM7Y`SL z+@}-ieD2?{j&R7IJyF_3{gO7S+Q;!6J#RSa7knn~;9d1!`e!FC_)~tw!qYO61v&Qn z$%?n%-&~kqp?|4Ok=j0ayAwI|yQV8keqXT`X)M#XHSKpJKe*Tw$_|O_qm>(FS=_77 zF1;5$3NBgymlPsG-Kr0-0j&BhwwVfuH0l@a5amwYrF|l8qyv)9Z^@=|u0&Lx(iNdW z$|MJEAPxv-*D16=mo|;Zh|UBYncC}A6M*I0#miX zw-UBxm7NJ3=$O9Ul;%@*q+2JD?N|J3+m80Na#C9v5U#G$4;+$S@+9qOgGw)adAp;R zwDL$cIgwU1=487zl6pZGy{$f@p2}=^(uD`>2JFDKOz|irN$U~9+WTd5=EN&cNfCDX zjv8-e!Vy}g2EdnRw5i0$w+^-Qh3B)hN^^j)x)fH9uC+4tnXbZRq|ZGEtpAQ@8p%by}{}}Y@#BZKe_u? z&*PeQJk$B6ob^h+?o5_EmIti4 z0kgvmsF8m9Zrpgr4V#7xp;7e6RQxBpPUqe(FJ6Z0rtb0jGp=dZ%d`I0De0%vP2Z=( zb(V*l1b-Ubb;`vx&fzJ;@=arHis@2>*`E^O^C+k8p`C)4dnG9ykZ7V@NTeJW)SO3v zf0W%T7h6yE+xhVxO?XUmoRSwDki+!(z<1NVI4P&;e*rqjm{Dbxeg(NRMHb<}VdP6a z)M?rSxeTwkTIdi4$>c5SFDz6*{KjU!#eR-{uQt!yHzI?u%&1qsbiBwZo_0;wjCw-MBLE zm*n2HF9~GJwhkJ+HWrQMzr1)T#TlQgssi=C@2Nqrs1$L0j~DdU)&Alk<)AVxvJ zqfaq?#`J_~^CvFUAU?QjpI1Fk>`>ZQiR6!(hH@?JMfAxOo^Ym$9atUOq}yi;z2lZ!g4$W>qXu(U!$Cm z-?5-`CHCoV`deP{L?8Nb!&laTw?e<}ykX%c12~VJxv`YFQ2Cb^+S2M$TU-Us>MGAm zZFzM$FT5?UEVmUdWyUdY$pG?*4Y448c6U%C8icl@YQ$@U$y&BkZJ?p--#@LlWDPLscVflfZSx|nTu@4p&7ARqco zcUz+73FRHj5*`3(_h9c$ohwLdVw=IH(Ykt)K-uM4c~Oo{D=^C>iGJ}gf7u@LIlQ7A ziLXfsFB`iTi9e8%-W%}I4Z=(L;xXgPnBw474;bXxiH{!@fKBa&0RVuk>cxk z1PAUeEL$y4mCcw;9E>9YPh33F=E6$t4G+Hj(+)bz4D8UE3w&x*0f~8z?$fJAh;EQh z;Ho|{m2wKG;tgS{Y;eCcV zjQCrr)B{?Gi@a%5STF0~_`&`mIw@I&*l{tP_=_J88Pn1ZH3mdpGw)ln>*+Jn&@|c* zo-9_U;zAQo8?vv{W=NlBol5s#Fb;CgwKjga=^wv%kUB21)Lk`A$Youxoqo$uGfVI5 zxZ=^ebQL%A&fH)6=`Z#Z04%tKEDBpfkk(GsvXk7n4%mTU0U|Omdto3n8haGq+l;~s zIi1hwQkue>@NUP$*KLiZm39ky7AdhjfhPp-=;;&1Y!0S)_R`)Bcnc!kh z@Dm>~DS??Hm=}2H5=L5?F-I-&Z=)pXRC?W1?)Y>)N#Fz&KUS%pi$*alWxj+}hPn%9 z+{>jsOeZcqc;tz*9wT{(!+**%K;-eumwB#EW|CefZ12)==}BQQNGIFmtiG~(`CP~#2!fv*EjcVqg7 zQ>6uE_#y*B=pvqDfiQ2gqx>B}SWYd{nMPK1K>+1$-DnGq@>2#24ob2q0l)fE7k!zI zGLH!_e2Tu(OlR4Z)-d&Cm18>FD{T-Xv=%>{fU2V?3y%d1P}@RdS3Gs*Plv-@7N|%Y3I9!`W|LiiL%p93Qn~((B3?mlE}N>Kn(F%fKj$_Z_#Y1AgLc6ZApV5#78VLxPJsPi=>xM~7ov}?&xjC9*2&&1h2*1y=R9N`4=UxAiea#=he+gzWJ zQ+=ztOW8snog{4{PvDgo`A^v<{j69|v@@?IXtHqL% zNoiB@r5|$%7f^x8QyGIb$jk38)nO6^KFu5_fZds&3pC&e%(y#o-_ zX<%lei8||Q^1}Rn^U$M|9n3lb4({i^HBoZl?q21M+!jHbSq`+g)R+|ik~4Kg8Jopl z%Y8GNaxL$PxBBzytoX@|2FjY~fJW$G0b_z&+HK$1$XnGb4tgnfYip(5yT1vZ=WTV_ zI=NA&`a@;PiKZCr5=>%sXFbc)Z#<8ycou(e$GVcL-=*Iai~&SqOuG^M5!d;C;$F_* z@1H7vPvouR{gKNXAPe6Y%dW6%`(@m#&M)Ixt_B^~?vv8#3Qk=UH|;g9*Eev8OEnVK z;ZDjkf~%Bp{IW<*BqtNjfbJb0-p~Oy;5I#nr|9b99bxJow2MZ3X4n2@ZK%m|5JhTx8KVjO@dU-_-;5ALt`D zV*xKRMta};NM4#rnYTXb!O|n#dta;mlke4u;%Y(KB>4sr#|9Oe9X*$Ce($WzL5?DXCG(ZV6){;x)W@h(xJmfEIsbY3))qN z`^HEH%}%;ek;uKU_|pdT?&PB`?S>Oi`#t+-C-TH=GAW=Y?X+`p!^tfB56dW9smiEJ zI!Wu&wYGy{>MM1aE@j&%qDyL#y!v?nYPrXDG=F&6V1IC_2l19W?J8VP)wlRrH}d87 zf*bgjD?TJN<;n)$D-F2!iDMo@7wa-?z`WVUbA?rV)WpNsOsefwX{6PN_cSIaP<^hn zmB;bL$+3QUx$Ic)!yc}5_03v!ec;jVnPdnag?H7%Fz%KeJmFXG+U>pXGGA?E=vVSm z&%hxcyjpqX1;Qkk{0oh9>I8Z0w=C1iHQ50Q@zdKP8N659Pn=ByolQ1z_|pzqz|x-^ zt`ZGC2)rrhgi+}%0h$gDe=$kHeZh4ep>C6MapC9iJx+nOem$#cgj;wdQ^MUS6u#$5 zykQeuyT3mwXGN>)ceB$#aQ4 zDsA}Z(fSLU_@piP7w9fVM3GhXj65bj^=(^V@EqMk?ok){N; z2{?<;eBj#eAHjXNk>2~bu5TZK^AXn>b>coL-OFxLKu66V!d%TA z{>i1A_D{QBUi#I#%2qG=&e(@%seF|&Y}r(F4*6u@+3`WUIz51GPTHLILG*FyL#+<` z3uRe-kQPiE12Q@YnKvEJYA@TrY!Ca*ZVRDX>g{=SL-Z0jgm13vBA)5*($@kvHdnz5 zYXv?(z*ZPdA3Gt0i%?dW7)C+r1!nI%i@pT)jzwHY=T&8B&8QdsQ*Tw9N-MK4#O4P% zOP+!;(xhIdvp$ANKc2Gy8HE8{qOM%57eB*zwwmg;=u6UMBiGz=n_pgTvwm)CiEFhM z|1WGW$F%eHc{rd1=PNf10ONT7s9{L7qoafN|N6iEKil)CkK6jnN?wN0E)@TR-2;+< z8-LQ+(+;!Q6^11fnK@* z8G3M8Q3&X<<<3*()IG|O{_=-5iaNVCjM%*9*A4+AH=@}1@{{(9I$55R{7Ekk#M^e1 z2GAE)uxh<^Z5XtQ%C&1{hu*}IFq(Z*aSbF zw6O|lEBTi2Y=j}s{>#reN+s=t@G6((I=5lp?OmBEnGg_sqy0y^e3C$04z|#a6*o`M zaW30>u7`-LV(kFyNe^d^+p`-@JD*&;@^= z$0h+!1u~x-1-!(|(-D@JDt}&vqa5pDxnxTlD&tba`Bv#5o#nA?1c(l{o#&useG<>H zuH(7WK9&eQ%n$KOw^&Buq&^q#=vGRxjnyMLteh6tZdG+Ry3uF=6eU9fNl&!@ zv`I=4_IK#-qzHw+fJ0#iElc4b!)A~Qn?cJY!2tw82ownr)m>fV9nO62IewnsFY`U` zy;T?8l=GbZ?%bIx*IK!9<;s=0bMFm4?V&oEKH?eO^X zbD&GgIct~l>Mz%ZtO1ka7-m1UwM7y+ahZpCQb!IMQY=|JR(AYpbL1m7#Xdzpx6Xhi z#NQlaE~!x);OzTzz7YFIJ7kR1PS~Gs+~(ew zbL!eY=iSonLmkS)y1bpe=Ayj-N&)O1`Qg%s2~Y{Sbv`i;B;F zqrLL;V#*=+-$`=AMm7NW7x@Y&Hp_vF%dCaSk$j0W)C!Y&t{u{hz7Y?efr`kdTmR`_ z`OTj~;7Kyc$znACUZIAtz#u({OH`N$z>y#|k)YRRDKLq8g82bI@lGcuOlA4TxM>3? z`7~SbWG4^86Ni(?+?+5EaGN4^CRPQ9CXJe)5*P}Q5ZurcWtG7^4tTmC$cTDFnD;>En; zTJb@MbW&a}?(%Y$ztNHGWvQPV4EVET+Rbvdg(&({SZT?Iatj>k26cjTzhQ33jT&$b z>4mQC**3TRg%UI6Lp>7yF0l;BSs=z8FR(-}TL z?V_LM9B6g$G4v}<;Zdc*3< zn@lbqyLi%9qKeyvNzdNoI$+|lcSt?`$MHEhIAYG>uH(Y#uxK^iO_o=vg2(=8T!+S2 zR$urD00yB8FK!Eu~d@#dxs?R{~6itfD#F6FB}NK-i*@>(<6FPl!(O?k6_ z$s%!`3VD+M%G8@Bluz5xUf@%{>PZ%~fwAUgxoz(2uQKS*-ot>MNg#E!`pRx+$0zf3 zIP9nDG>*IuJ-UQobJO$w@%`n|TaT8eze9GQQh4$4vby(fA z?#kAi&<1&{jc}rRyPTmT|MZ{!o-8a69^RwvqCuNQWNnx(IPLC z8EIj2fY0Pxh}p0BbFtTS7dNt*NIjc!5gz=pSCNTnI(oy#(Fda4W%U!*6}LB1?|aI9Mb*DQNjJezz6)Tm9nDvN%UAPFGj1^53xzcK??Q_GTffsjWnaw`SUK#A zs_PbduFNP3oQsa>&7_l!i>$AN&w@YDZ~g0A?1w!U99?|YN65mCn+E7aSrEDCrp)?` z((-I+?T~$>Y4%y;?X!h-{B|RtHhXiccFB5Z^X%8+H6D9uAET6Ivk0lp0bZMseu`nx zT;%gU^iAc^v{X3oK!)$o2dCqwHrGuL+C}+|KG8RxT^n@~r&UQC zyK#u|9z2$DkxMv(^>6`Dx{hC}L4B9X!8TUD!7WbGt%t=OyZwHrNHVc4O4_71yQVy` zT#37=+4+z~I71>n6!djIDo15f83`#^7D+fFnzUJEV4Td*SOuLJ`OY9|n}G2aeyj>f zv)&<25Ow2s?J7DCZ&ub9?s$-E#`G-SjUe5Rd0(F6la5a{*?#eXJkYajRLU7c2UL*GoxrfsM<3}kI(Ma1OxC4>FmEW5V#7NK@wVLQKdzcw`oMQq+TylU#KuboOwL?>I5?&@qFx-F=H!le!t@>hUg zB}}*#p&mo7p~ty&rkua)Zw6!X(D_&c=YgkT#@7$aco;Xd9=|+5d+^q>qaNM>iCjlW z_Pm}n*25^Tegs!ceB?_F>=ZlQm3iu5S_0kJSH(As+o|j!oXltj*D8 zN~=}!dfMRT%}nhNtV_=bAve@_y0O|IZpVSn&y3qgw42g)ez+AITC z``96D)#lz22JT?F-8lqKzZSnvj`W$3f%9Cy`;AnV8|T5;$j?6gv*o}1&;QHi$eWe> z+j*eF@9=AzZ7+RnKbxWqVLLK8^=W>55#IoMb){dq1=&5$exeh8&=>9LK56<*-u&}( zl5YI!jZ^29^csc~{U+Z4p!{?hLDM-xXj9H38*BQLMcIm<)W zOzXRK!@qsTIz{K?U44npNEbS!$(=W=v*loaXLXLJb zw;sK>?BUyNZS5gL`V=ASaFmVCawvRR(k4gl0)!9q zbge?1qp*B?GM_kNEWhSMAiN4sT6jb+`?=TVY16mI7~uTO%`ixF*@g$-6lI;@lqx#! zAx_&`z9t|)q$%Ipi#e8=uI8yT_8<8XJP#O=ADe-^152=6H~%OJWyr5KfDRXV4jW@L zYs+e*eaMKKKcJNvr`BQgmM>(7i%WLM;%-Ayj?jfD%inPT!p%OS|Kz90;Ys?4S>^+zpt2icqzjz|lOWgldJSB; z@JdSjnpPtIPU0R-5C2gF!z>@BlHGFY9P(j0f8rLh6)=Bx$fk+EiqOiJx2Aiv$9diS z;^6QV?wmEc8q2%-sF0THbu1VK=4Fh65GD&IpXLn!YiG>e9U(LX*+ER6a+&kw$B@_G z06%MKSkG^0x*=_0y++-`vj%3B-)mxx8`Z#?VtXdfAP(V)_GiUI98z!_R*NE5T6hso zBW?1ir`IdmA_`9h@;728eLBy|NA+kqJO6CI;<27ZWZc5z>;%TE;zLaRR(i>h3-l2afMQByTsn z6`!AtXT1@RbITy^ch_~4 zP*^`v+bRUC1+pf)WHQuO-E9PLtSuE zD?ag4rZ-K*qYZ4ky3qsDIm`n*KP&xDHgVet#;l8DfZx0XVH|WFUHgJ`B&Dp}+yON~ zXh$b(8_R>Y9_1;Q=%0faGIAp5_rldR+um1P`ARHODjpYx5A2HL?l$ebgK(^GYZ zdeNS4aPcO*?U0QsE|83ndV#cUgk1QIU1PwEoZxGQY43!Z@-WQBgE~Zt5jfK3pJfaY z2|q4y(xp@9I?t7d_&vwjBNz`ZLiiiJKq@j;=IX6uq6@Na{P^mtua-af7r(zeeDK!t z?z?YCmdeGA8!pnPhT@@q(fJkL+B0Akfc|xH&6o$@+6LvQO&6yyt-GIOaM8s6 z=tL6Pf+shH;7d7b+kM-@%L{j$7S083+tSZZIR4(dcW*g5I>z>$_mdRx<)rrN{B+q( z-?~{o`TpC)l?nEkGSPd*9i4^q2!iXVQFYe-Z4!mTufm!O)Nejo_U=3m7yLYX*`Ls? z{JsA6(R_7^^Wbs9fBXEVW0W6o;`MkHX8kC9;nu=j!48GD9$Wt3hI~G>?8mc=Dqyr;p`;TxXk;*6J-4J6xa1+tA`P$lRe3)m?pF+>4 zF%O{UctSr@|FjR=($IEx%u#01QSx(^d4kdKw=jWoU{7VVX;$HGOwo45?~093QrHyo zd(=*e*YqHfjy6X5hPLC7)~1x}y8&%E>StNN!ch1n8b1nKpsh1sJ0?Eut4Cj5Dn^HT z{R`?$nN05Gfl%A3w7VeW0-|;9%~Qs3Z4g19OB~XcPH&2qKkZI#WP>-~j|Wa9$|Jb= z(H5otxv>rR#NBwfs*ARfpnYf)DbLAeWgTP!kL%zwZ6Od^l8?jyfQx17r zA~+{PX~}Pn>T%jb921`3;8xC|F%Ga=PW-v)?KOhWA~5NBZ7aCCQP?8nd&X|Z|0O+d z6mTJfGU`eevq|^qI(^YBa$3%Qqz|J`fIIU5(~zq;Tww6J^XKxCri?84vwq?kb%+jz z8TtnDNF6!+O)^L2R2`dsfXNC&is{FU{j`cdBvmhF>0OCR^l*i0(@U zu6>lVG%SOhx`CQ@Ce2q6D>JMxu=+jvLh@&>_Dp@C9Qg98X3jehNiOi#zkOL=ZD)9l zJsQ3~<>T)lR&{c7v$`y-BAD%9+_zOIm-vCnqkhh#u@53nyLrR@(i`dCzdA~~b1dgC z(smPpc`okTf2@QNCD*y#=!jMr_$Mz?rF%7Stn>Wtu;adsc0h_)zgQ@c{^vtX%=T*A>Zs_!Pg<0wj zr2S-t_LX4bIKEZC5tKzh27-2T!nh{ft?7Buc*e0rE8{;etd(9nH(1(~c?{xQyvFBR z$W7`MnvQRG<$79OzcEa}Uk2Au|2na89zp*zN%ST4Z06hcv7xVp=%9yr!WLfrbdKT8 z*Q`Y{k8oYZ4Q($k&z5T+v|t|LM&Zp3`=>YcfWsa+rh9|#ntr9hiVI1*`a1vGYx|^& z^S!wo-^YGq|0TXGzFz#!Ux^)?A{G~s(7*=7mc-}hTAtyx8#!AC3)BWxuBO#a*M4?E ze&$I_drO{UxVFQ74X+&!?{Lf@gn3!VGoN_J*N#K+KVAPI#hSZjpiS{f`K*;P=M*;i z@r~S7+DDAN~HnTK=2=>)%{{?UNs5%=!B3Z}Q4_=l?IBpQNif z?l@;Qtxx?k_LwfO9InefdvUsa^Yq#Bi!Z)ho}ZpZYTh{7KiFCBGY8f8&v()-5an3F zcNQEQH~VmAw@-x=^WU`@Z!{`r#cy|XEq>~^%@bb-B>veZnRlR98TX=NwmIXIH^A|C zP0JkIF#+F8ee{i_@#DNX4$t~$jv2mgKW~W<&P@rKzh5OkZxm#V@_ci1nsHN_+59Pe zbqU(lC3#TCBbD?M=h;@0ridnFT!(+_FvofGT^CTd6o4nw^($Tf3?BN+)y-wz6m}tR z@OXAcUxkm0@TMH<=SxgIif)GGPOTc*ZsF&2x>Xo&|U%-WkG$EP5P|7@#`Db zGCxcI>fEBf07;Z|U9t~a$P+N|W!M70?Q6TwoA|CZ>(kih+D7H!oPN58FXJ1> zoM)@ck3W3kO@h=T^6`&0P;bPaHXCh{pkRfh&xzAkQa3^)k~8qDj=92Sd2q#+rm@NP z9J$cWg5<)vY|e3 zB2uSp2lA{3>1_)R)S(+6nqE8|cVfpqz^5TX%fKrh^1y3O+D>}fJLKWYYIF!x8;qav z#YJmf0a1QNI_Iko-_Hk{FAnnc<%J8p{i1Tud^YgP2j3ms)k)|4*d+90wej4sjy@Tg zQASc7`=BFsbV1p#Y;R;jJ;-ZZ{skN|B(%S9&7n-!s9Dc3k7n&40n6Z+Y#8Pvt8@Qj<5^+y@xk z#HKH^ryVpsWUX^;tZVG42gg92bNV*Cb$;f35q%uTnDUkl07KV97D07=&9WwuN0p}> zBRfT<7$S>?sgs8EB|_x5vGM8F-}v3%{wd|3r~?`F6pBg_qEU|!zH_2=Hb9u{!h{w_ z8EK~!APc>NuwnD2LMcPQCQICHh&oUz4XFyz82@#U8di(PlqqTfi+6_Lk{26;Yw3Au zS~{_58aJhfLn;bkgTd+mpM|H<$O$X-qkdpXJ!}x_qI~WK+r|Tzf6GuZ$}!-A(8IWg zMVlXmCvBx<9N|6Xg!5O~&a)k+d24J2K1O9;!|Uvf!Ed?IMz;AtPj$TJPhGK%W0=ek zzymyyng8N2byc`$3Fc(^6mEP5tpt~opJqu=ep=UgWFbKc01DqQ>2=_e*R=U62tQ@! zA#Tf$a^^=ivrN+qPTo2yLZ2!rZ>hMy3jwy7*dMMdcu+GZJ=@jZU%s zS-;6$ym|ebyn`z@?a9!%X~24;`vxDPH$^XTE>3ZPvNKf~Q%Ifl2Gr^4b8y#L(FVE5 zlD7{)SVOm-+Qp^A0B0*w6nRl{Q6IRefj>6zQ61q_q)^Hr1jGu#y8P# zH$V7hflYT1TR(8B0oTfcddAixtJrJnCp(rM<6YV;F~uWX+dHprCcQFfewsbe>O*29 zFFl1gJ*8zfb>_gMh)kbO*5ISxejbaqmJc7r*S=8wa+8l|cd(zGy@<~I;lKLB<>x>D z+49N9-&^kAzZZC0EjO;^4O)36n+J981Wetwj-{szK>JFaOVew4)&3UHJo)p`m$5B9 z(*J-lJ>{UagWmXkLl?252Rh*B0vLd^zap1WS3?%nCjCk|y7;Bdoc@l%bSKGH+YM4y(|^YZ#c z)bOrkp$ovSH|5|f;{m#*?INrE$*X-&0}a#phBSDuz;zCsQn%lpot&kNuf6rvj2SmL zc4@ogqXT$2;k>gP?{6+ghrVqE{QApLH~YRiWZ$BGR>o?YM_gXh$o6%Ul5r>N?+%4s zowGPO(+C-t4X>Zy6wLRq#)SXL<$u-i-TY$#h_BV-b+|VM^Eu=@puY^h{UbbQhK7E_ zLQARhqx?Lt#5>9)tkDuz{AZ$pwMP>)B(^PAX?pB!$l6ckHTBkR7|7^v3wxgStUid4_WxKEZ5A@HpRaC0XukD!c%m7ml^1nMp6!sZQPD!X{6 z?wc=oe4hGl((XBck#7&%)5E$r&XC4AbLkQ9_@G`6Jo0M0(w4>H892%aQyppi8aX-U zOq{t!{@UX#(x{v8WZpcoh)f$sce22se65)Bk$q)F{S~)r7x9pmN;hBF%=3v&=~X_; zt+pifquKA|t3u8A2*8%MK{*fE3MaY*aq%0x)&Cg+PNx0BkTzSL4jDVf)_x009!-}cYV zZPV0MWkTVIl|Rn0v$x&s9eI(jzis{8@U6Xc^Q5w=|5=+Ne@f2kPTOPod1K!rZzTX` z;HGb5?>@fy%jLz3Q-QG07Tlm)DyF<&((&Gx6 zt?BFK#`Wv?WG|9gZLt4ua(A-|1^wZrWzg z(u!RRzS@wE!}VvRsx5J?OS@#K4OdsWhG#cb#jhYQ8@#bCx<(m~GLb4x3?TARMjS~K z$U^*(pSMV+{VFfVfIJOOU9_#%%KG=l3GJdU<=YOiTN``F_}2F@V$3zQu}Dh9UPeXQ{(YNd6 z6GYlI>T!V#gn4lR9Ou)^v*nxT&zCR0Ia$7bezN@H^RJh0UOZjSF0Ypd_a80qzVl#t z@ZiC6e0-Q2kB%>XLiB=iieZ26o>E1w>IQmZtH+8@}X;Bjftw zLo$y0(bn?7pY&ykO!B}%Js_4k$=5tb$LOE(pLNXCt$aFu8t)BS@bPDJx-r*p%s+ea zeEI6DFPE>LeX~4&cEY^qS=#QzH+Nq2&8Yeeu9do(!G2<&kc@QY(fX*nj=ip>xQ6i7 zgNMui@-P4H^7QF9=?B_M*P2}i(Km1nd2TLIruKLK(N*QHPOE0QMM;SBl<XM%^K6FXMC^F)@F4M7@FvXLbr|8C)>>R zaxOV4Gu!;xi?e*f_~@O7^$}^CP4J>Ew1xJO0y99uY6XcW`wi_In+Of%3nNlRqyn4Q zK4>3FU$l=9_iXS!Nc;jWGh7+5t%VK2p6yF&h3#dZ$h;k}d}ac<-!8t91{lRoUgiS9 z4%36x-@zaMfwacE(XFPXYmOo|W zi3{I)5?jBj3%@}u2X2`0vovl1fDhRl4Pd*qO-a`%C=+e zg4v`N?jd6u!Zs$yAZ}1WoQzn#@CdU(nK5btN_<4g)UlY%&0n(>UIo>&#jWrFN(lYn z6RsIil0hJ)MHKvni-vI7JaId)a^-*&94)(oHzxi>*UvLAr{M6ybaikpp5#I$FcNxZ zN~iozJciOK{{zb^eTKpeJP9*W*j3=Nu+0~5>KS>Epk5I-zk?)(cEWQR$r8;?BH=AZ zaFCritw&x>(E?2mu<>5!kVlJ*cxm*U`lf{CsI}V6l3pk))rd29fv{51ylG@>&* zkvm*><%#o`swYX@NlyBDNE!cAxANJu^|!BZ=7^Dkegvw zmbZ3~7(m}!jvhT;-hTgk%l7?u@y-qzbX>r7QGkINDp&y|g52pZHAbUX+b1~folNt;ceqtbrJBT?j$oE`;k)S%6@6KW$oWRww zkjAo{%(-a;ZxqNpY?ii z@_e~GJ5AmF-ikc9NTdVf&91ZpKfVdT0ouG6Q5;eEzRCbsrzvfuBSzet>psDrCv)?5 z0-PV^G1^+5T)3LM=%AAJ5a&D$9XOPqPQEguI67^5VNx6FRb5k0Wg`{QS_JK zW|9oYebluG#XOXv)IlD3b-R;s2V2_;X0Mpg zq}}Xe{B<&3`t1|MZ69RWr0-PFyCd>;*EP(Ds%W_txn+eNm&vpC@~doS-ujqW zw8UvU+x*&B5SmC@)N0MfrM$Q}R*vn!XHeg5m%M=kfqCtlA96#3zkm5J|3%7w{Lv@D zt8S_NSy+*U=r6VczDr-b38v6+v&68c^wmx(UOhyj0}6N!_1Sh@_fKZM?UT9o4|3K$ zdJdn3k*0lap1VN3O}VgW|I@o8lyApP8m3&U!%FADAFg zD%aw(jBPgK!h|obw9XV*oTAYy8ZL@;^E{L=Lyh<@w3-(I=0WM-T5&4tsa?VtMq={pI*z zH)D@5nRFy56M~gDo=fCEW##3njdJzpgn{c@=Mkc}hS$%(EL2AS)64&=;k)_Y*z6?k z?r8hx@0IicKhB1xkMnEr>tVt*&QA@$|ErKPrq9$@a3~9|9XD1W{P-VSCXna}Jwxe^8(83qHl- zroEm0U3>>`Hs36V2X4%&O>}Xq8@$|<(>~8=Ltkn?ntf2b_ECFQSS93%D&QT5Gat!j z7myM%5taZUV={2|Epgeu#Onl5+pnz_cm0RfIrho=@qcxpPnVG|fAX$LCgPQii;?o=VqQPBP~Sj%I^X;t|GuKt>Ss}6*b`+Gyb>i% zeA4Q?5GJ_Db<&9tS_c_yp7NGQpFR@8&z6M!#KH3t+?Me#ZBzO2o4@d9{-!B1SiVZb zqSR44KjQOmp%C>s#A<;G6vXD8_P5laQ{||q3>gt8poE__9Bup{x;-i`VSe{>g zR>!P^*$JgL0~$GEqudnAfQ!yhmLBaz&Yi=WONMk)&QpGb@YMVf;#tXyzIp=ZIKGt4 zwRi{3`5V0SCHo{PS@b3*+zOVnw0aCo71zK+dU%{Nm)>Y^?uDT0$ec$rP0`gHgL)aT%6KO3A+PkmkW#nSKd9w4h>lc5oRO#2DlqvQL_ z(Y=1lcLzUce|+b6|7dS{^w!(U2OmFK?!WaQ^7KZPoAj+spQKWD?IWILU)!pX)Wg)r zDse=1hH>2}u*Q*FlYogo-RO6PS-kM`cMW*x->ac;VK4w5>WTeNK09WXpYpwan3f}C zgd05ea{jy;=r0YH9sMqN8a2Mco$IDGzkE!XbspxmU~`==ee4av{Q5aPd)?IU3IoqN zzEwTGE4-GsLi=WZUJbLrcf;SYG2vc2YoA70YZe=wyup`Zx9$c{k%KmDw6_QPn*PL} z^XKTV;{kreOCLya9pLg5{l=d{H>>9yH|UNxSLqY_7IVI$4Z}4^pUBMz`blg_^xvDf z*b4H54Fz22L>xQB<#6x`hL+f@OD_8V>~p5`isN7`NY5L*WRSdx+ikc zpURCBvBDFc6lvH|*G%@k`G0@eI6Q>mqvdvYZ&{d!x)J`yPr=!C&O5!yGIX~3&~-@d zt)}JkpMAbupPnuM=l|;ewA?#@7jWImPE?&FJE3! zFUt1`PMAafwnJnVxnW~%J0${O)+W?T+DKSm1#bIh9Fb3Sv1_Ha`CYn9YO~ZT29?4( zzIX+RQ-#q7Bu2o!?xFrFHR)%aRUJi-+!&IoAbDxjnTQ?|JUmHjMv={QF%p!*v zHf0m7Sz>#AqbmdzPe_+PJ5F&lb`!Jv2}AJ+Mo@bHc1~k^IOV`KjmV-qyc{HiUM_P zcKj8_a@kA-tad@VS$9A0zzXlq8Q{l>(rKB#4{&1xr$V$JrTpC@$Dx@IDIR|AlxQ5wrsx(|fKNaOaiZdz| zG6=YhZ;4j4XynGOI70w(jzG){EDc0Mp;6??BP9fd-Ygq|jq3`pIi*7wtx=YM8|{=h zjpDyIFm3r<#~4Y^L)lvzSP)juc8W=(Y30?UWuzV6?Ic|HDB&m|<;9^9k0em~*^WYr z-Y47p4QJqb>S4foXJRjoLA$t{E$`AH1)R~C!r2MP?Q6(6WG~UuHhpzNhx4p&X^2;D z;nhMiHFTv<;Wz(|N#hbQ2uC|6Z0SqH%jFwj(MH|SorF$AnSso zJ#NN&lxO;p1{#CnHwDG&H~u%>X^TUC_||*NCqMe_<@kf2EStOUf@YVFuj8yELV5bs zCPsJDoBPNtJ6vf)LkvT>;146xc2~Dd966lb!8GHh_o@G#4EVN~#!n-EjRBJ1I6j8w10c{m=iJUkh;N77Sic&PC(^LoaIj_LGzm+u6Y$W z{*B-59v@5f_ z(916O@UMox%w~_57w$~e(N@>WcLrB0g>DX1QpGXv)Ni94}_n8QlL zQwAH$j>SXTPr->=g@{v2k~FkQ_JiU_|CASTSWE5HXL(ZxXhpLiAu$Gg_;7JUXYBC@ zAD~x$CrkEi1MsEJwnZmB*@ds(C%<}RT|IeZvO+uBx78t%EDj@LCD?rux9!B^I9(?{ zEOFf@0`BxNxK(?K>9|QFas*aqF0W?gIujCPokh@rQ(n|BcQ}SubHv9_b(Qf>ll4sWWeEJzawAEdYnfE1_*0+3m)Co{5(MlF z(w+Sw91UKSP2egB5^|<{Dxfg&@5<7XEcjLFp^ilG45?_Lt8%a1+vqcw8Mj= z)X81tE-vgK7r%A))`R=Y4}a}@%hB=H^78z}^6`i7E&GQcSN@yS4|`A@sR;EEl$Z1k z@n1a5`RV|`d(UDEt@`7-h4wD`@4Non(DJQiG(ad`Q`w1&VJ-m znr_lM#V^tB<})gx<)CqENh`jI#*Bo2uB_7Gv> z>zn2k{m~ox`URHNW;Y*d+Ir-X33`xON0}UacujxT1{mgsgw~fj2?DRvhcPdFa-B&o zS6Z?xT0qw)aZ^iuRnE#NzQq{FlwT{w$?=$waUl8+W*LEwyjq7m4MVy-&+*jash{ck zOe9>^cqg5q2jt45wmx{MbH@t#q>OLz(T5EFL2uASS>!Zl$M1Ql_rbsTG?gTZ55V9C zY2~SW0dNe-Ie#)PU(zhC1mVexe>334SV&2vmT9tXj3q5P()Je0-vlyKCh0>CX>R2t z92Y5%&Veh!41DqBdf?8P(4+Xx?Zwh!G zWv1a{;gVOoGTN8Ca7miQZO7nF8<6Jw+-TA` z_=943Ql8dHexR2$#>16Ol2`N;>M5h%$Vlr@K5c*fcV(K_abx$2NB+zwT|!jWD)_zK z;8EuRv<~)3HN`$;v&NUipU42E2=F^~Ee}K7AA$48_zV8i-%7y!5qf$1K!p z%ajLy*O`~yX0h9|xcDFO+DSv|s2;#n=e49!C&!w8HcsEpGR}qdId_-C!^7O{_m$Pp zo;}SR*w^^S2BBE;<@h6x(A_&&9=-DpzR(d}R6dvG-o2yT{D1iH!SdlJA1!T8kKhATfE-PXi6X3yzHIBP5?g@OjMa_4L>R4%1v? ziSS=L0yu&SnY0;RC$a|ndPpM-7~?C3UMmxy>*fEvFbhotjzw#{kGeE?9`FHQ z&ocp8j``mzgvS{gpElk8wO-FH{IJb!<{PHluNb3q^Z)uBvFhhJXX@IGHozNkc_V-# zr0;mMGt*b{v?maVuaq%Qp8>R)`(ZEqJfYv`&JAnFchar#!aOW(q8$aNI1KTHos$8h z+NCciqD>Wd%Ty(`jU7*~T|bci3XcGSnBTIo(WIL-?YMk)gOKyB9>RE&!!@+PBkFB_ znqPmt8w*Q+(u^GwH?me+ni@7aux{Sqr*8RdDA(pw8}KO`?40y=4wtR{Lto2@E#mOa zpSuSxG|}wRh7a3i*`=U`)dLQA%3PIe*RO9bZo39i&9n36$@}jv@4S6~xqo~QUT7EQt(hz9A7#AL|8LD*gVSBkSf;#` zCF%3Vk8^5$Tzy4tZ`u-i9ux+zH^9_8+Sm4XQ+;Ttqiy$jtTqb_9rY8Lz8S|c4B8kE8|CaD%Gx%Q@8A$8{D{+*j1F^l{UAP8`kVTp&%hZe<^~>p$8oW1mhvk~ z$6Llb^kVOTe#zOlss7^2&zGNl^?bRck2=Kq_C;~UZ{=TZVhV^m?&)jFm-us|n)qEm z0Ka;!esEl0okt&j_jmrI%+cI9kTofA`QX8h-_A!r#HCU@m*`W@jD|7g)(QUY@8GLG zS2pUjv{Ogw>y7!{eQb`ZN;*)WRBo0Y5J@p$p)a!G50cW#2NO z{IylO69oIG{dvc?#$o5c7#a2LW#F}+*ncRH&EaAc2lV~)M05~-+GYgO(&rfm`UJM} zR9m%BB36Cm4PLE|wv!5|ht|-yEx0a4HGlzU{5;wc*{7D2uivIhtE=*$pT3!gI($26 z*I)POOUhQ6YLlSj#!LMz{%jKE@56m^ChyM845#O(=oLN_b8FYT)RDeH!1jVC>Z}gg zKF)pI+$j%XDmr0YxlloMKG;hyZ-23Wm?oTv%#L1&*TXdH2;UK(03H@;S<{Utf8=F- zE1;E0}a~XK%zBiP&P9Y_z5eoo<GSrU-AEgNB2i1C8a~rFM;joLz~O*NN~3vfugl z)lp84#OYJTIyerXzPic<8G~;j@H^_(vrofbX5mKXUZdQF9&i3Lpi%Wap@D;xr#Em; z9U%N9gN~W6+V&>4^|{vR#~6ELp_$->MP8I~!);#ket8AF^ouO+63=V4 ziKnL^M+YU#lInGRaMGb1_M2z;|GW8{&0>ZCX2Ac=gpocJNz+14~{z|-#bFUlDv zj6^-z7UCC2bb)rZ4$q!_y*&Nm=giu!^3?Gq4&K$-3+jUIB0EZl&$Qp!8Rc{uFLQ&? zHdOvjmb?M11D%_o&_g1WdD-@%+!bx4*TgBQ8U?FIn*NgPxSFZ`rEhy3kx%H9c}4u@&;;3v+IO<3Kd;NaO1 zINMacg2(hF{&G{=bn_)mZY5%wIj)LFXr(p+z)68uopyCAn=iPI?od`;k18fqcMQ^+ zeL>vvYntu&;_34c`3L`>e-Qn6^1+A89=en7fJG-JPpC2W!AVVcC*QG2g8p@k1kOK_ zrw)6bae07)rUhOGY4h;72ID_R%8P&KRLhVT`M@4PE8QCB?JS~LM~;Ab6I_{V=hYhf zIW6J@9J`?&dzdC)+93N&C%I0L-E@I|0qbO1IaFuc-=&(x0yhS!f57b6HsES~TYq&} z>ghwMuoG(awy%JMbC?xh7S7Q%wZd^k{;kh^GA27xIg48wJ{7GF=hc0b*Dl+)%|AHW zT`o_$A?TfV?=5fNJ6ujqzg|B1@bR*9L_5;nxd8!g$BH~({UHxAH$zp@!LK#zoIFOg2f5-*S@2< z)Gk`C3rmGiN9}u@fHoZ%{(L)y{UMu<)Fl^N;HQgJH@RV>UaP~7X)jJrM6etl9mVEn zvWIO?|DR%(Rwt@g*fKAT{5?T4m`&j;&@v-K07eKKQ- zJ_fc@(fKAK^TaPLZG`<>rX4?(M;7>2{J7ywc?#Rz~h9KN#5rKt7R;S=6Vb&?WOchl;3ylLSAC z(OTs^4~0HpWU&hg)D@g!AZe5zw#ItqOkL!!tZ>MyIrc5%l!2Gjum2$!e8$B+kpmom zfwr}Mr;e?Mrc6DX2q(>XC@J-YsJw~WoWQJwaUD7XCavxK&$1?uBVk3uAWdaVpzK6# z&0b-fjM%_*zfo6li~{nMdAt|eqsR$h)5z;hD$Y}ez-T57(hrQdIHD5-$6OK;FOS5h zu7m;cm=*wz*&ee`ftUQ)b6~m{myGZ~H~e|+d`_8B!|DxXXTQkeH}UlC@aIC8K8x}w z4t<=A?ZD}48)6csY{4xh2@B^%Zi>WayD*JE<4r$8{1@wE`%SxOox=lVD4QWhgxJJ9 zrEeh3xrud9@2;_#nVSNb^1x2n&bKq!FXucG-r+$O+>Z{Am%Rh(1H+DI_wV0do_zFVdF#D*mhHnm z<~zQ@&bjsl{5xLJX0`=Pz;1&KQTMi`l{T$M>(!(tHq@_I|9ZVrwi)g~?}BNsq;s*J zrXGD~u)Ws{jzZ}8t6?4HJ3{oW`qwb&y)EsC%OCvHf4cnR|L^ZD@4fqYxp#cDeDTE>q`!#F zcCe$DV~*>0|DT_p^of7aWZla7Ecm>Mt=#ik^4!32eXG8`zU?0AAjF41UH zYiGH4Z#NGh-FtAbJbnHG-Kqbt9ZI_^2m`!2*Z1uBlQG#g<}A&^n%DKa`MPUyb^PEU zp0*xwHA?jqZk{7Nj^2!vFymw3ETitinn!psKH$jK?$n=vLUf(<;3+;Y?at0441sZs zaLn==`tkiNFD*z+zu$hIu^PB+fQ8lxbD_^@r>~wd9+9!{3m}Fau!w2-6r;_rK8(KY zd@DK@`QS$|cadvDZc5U_`@G8Z#}$^f9`nCT!C@VzN+r~sn_-gq>;02 zh#zjh82up+Sa8;-4;}vUaL%wl($hEbGUBlx058j~DHVhZt00U51J1QT?T7x1e7i2M zPmqlT$Wl3FlLNr>!54YTeG_d9IIV|ooNe7*@2(84t!qS4-VHl2OH**MR0)j{NQJ?Js-#hv{?KI3XNml#lE97w3Lz(ueb|;}?m?HA>%*&{x)P<`YQ| z??1pNI!M3PY``}B+bGrts?^D{Nix-hbRaHB@7&LGa?>Y#*v!6^d5!WFH~^-J)4m0s z*hSk#RO$wKfdfyjEoQv^B3TK|6W@%7%76{RB(}|k5nna{=$jA^tbLex(rC9l%;Z^} z<}V%9eEk4$8HBcs`P#hT$sy>-)2^7Gvu|nuI(1f79O~#5ZArd*lJz=>`LIIPB*>2p zl{H87n=!dOagAKHC$T>o8RB`6)V5Im-tfn6st3Y#{*A1(lcc9zZ5xBmI{d*v_>q4r zKF7t1Gqnzw^)=G|jXZ_%59OKbk10#R%EK{JeHM>xAq~rj*!g?mf_M0hvY^e?E=nst zH)rdj4791f1!0c!*#MCBe)TOB(sX2j%W^%GRocrchK6I71Ln9EAr@cI6pU5gLyG6_%Ic z8Re3YhN@!4XiWITxe}y5>1c?aGZjjL%3>aecxem=jSgh3c!g1XWk4lTnaXck&`uwG zq7v3ven$C*@Dw><+$9-9FD-)Bcva4H&nj|h$zlQuq*C;WK#j{Cov_GH8jDUA-5+^5 z%X$>v`YS)nz>5Y*`3huN+Nd{oEF%TsL?9D&Sd6#M960OJaKK7i+Y7iVPg5QZ8j$Q_ zH%^|qlWge+oP&d&zTfq6l_+-pQnVtE`NQkIJ{wyd^K(d1DvX~ zxjfnjkv2RyV7aqN^tqWzy;sIC2Tq&#b~L;$#AqNf{1|zi1r3m@Y+C98ihQF49tHTJeQJfA9}cN&S$Tj=T+>3Ed#1yp-kqo|J>4bypLN*w3BIwV$XPZL4&L3|8PY zPrU@Ed^L=Sy5u*GsjH60vv0m!zWn^NzBd2-*>ZziUowfmrfm<@UHZANkGh~ePoF)7 zUz|=W619RT1D5RWC2!>9cu*Tr#Og+ zTU7pO@53+rlOdljboeF^`@=Wie6#$s-~VUJ8W`W+e9Xist3V37ib~!pe$Yl27@|1q(1mOgF7t4=+^nGxFH#b9vjYChf;Y?`_ zdzLU{l3=^g67r+2g-K|hR7ii=tK?^f8|*mp66kx zJ@MS){Ltqr9O2vVa>(NhcGek;S}bUUHE5gF{w?6}wzHAJeiq1w_;IL2WUvQ#Hw9%J|r~eRqZL@vn#j}&;3_GnZ9vz{-jH$l$ z!nXiq^2hjLTKjG{LVDIeQAV90YR|2!eL`E6%G0mt1K^S;)6+l6ii*+CeQkbkY>`HB zFq-kS}LPMNcOysbw@fuJIu9<75jgez~Cy^m1va(1ECq!ruX!XrJwwYvY zJ?rlnK@o}tFE}h*-%43fCiUyhM$5~Oa!wyq_BoqY{D$0=B&ES}{CdF8+yF4)GlmjE z&a>l_WNe^Owtl8ngBJN6caxoh@s?=oY3mKMJ!jj)Xt+kgR*P~Tz(CBh^9i<&E5ZxQ zIrM-DQx2MzPj*fe&SRzwp%R!#F;}gmGGjFo8i`7ItkN4=U)!W*Gv|38GZ0_VC4Sp=mv0|$p#QDl{7*cC zPV9jpm0UXvoCCIgP5-mZ7CNu|L!NYWE_Ho{_t%1*JpG+=9QHwv#Np@9DY+KJu?G1j z{=nDYMm_NTq$5+Y z!#T{;9>I@B(;(BS;YeHQ))cyjdsq5X{r8s* zHvr%#-eSXOsO0M?W&RskaJC;A&w2uJHv_zcf)yg~*1EX10$X2y@y+so|F8dB&R;FR z_Q?;hl{d?u{Et6d{6ylGpWVVAwL$b_o}Hd9&rdIqFiFt#4Wu@=^Gcs^)_2TW(0G_d zJFc(in*p@fSJ?4uV4q@-fBxB1#*y9SN8kTwdHB}b%h~B!HV5px`3lCej+DOB4Zvwj zSXuc%qkVHDC*zqn|Ig7=*EzM(obhSxlbL%vZe}bRW326z_Mtr2!DTX`<9o-xwr_ne z)g|MpX_ko|DxX;%1de~Uf!)Bq#a~omMQh{-jud{31uunsi=$|z?3z9(9&Z49 zQ}>j<_Os7EU!MB#4LEFib2Phx8YgA?(`$p9MqGb$oKFAd z&$Ysz{N&e{_uqNM_-vh3w`f`Ftr#~k8rlrkzwF!27p0%4;W?YCFO-Kjk;4tTVEt+% zErF)nr<9|-03$AMUT;Zbggh`sxxC$hV4bp_gFd@Ps!!8;+D7EzPqZzoGt!)&I?~^A zJ>9j~`Y`Go{G^Xise#9vtA6%2^9=FZ?$$$o2k)^h;B@9`O6Iu;X5z~{6PWa8+Z{Z4 z%Nn}qdX6?Y8v_AiY)%9>g~8kv$* z8B@}^59wV?bcK*Q$FD4nuBQ?A91b|BOU49sz<|9IgzJ$9P1-z zb!#Uw?S?tW9LIU{DhbP}gZ4LwHZ8$x7F(U>X&i@>+#;3IQy8a?^x z|ISG&W7Awqk#4)Q?WRqn3~icqK7;MczvGj7Vt;X2d)GSo0EIw$zX^Qc=xx{yNlMqW z@|?Au(KfOFv|sKw!G|n4@3DVha+q#6NtL4;3c7qL0@(!S zs9VaF{9Uk^u0`P8#v*KphA8 zrsr!Ed}_d*aho^`dq;=4VSRY;XnE`1C(FnGv;P>+;$S&HJzt(Z|1xNOeJusSgj^jE z!~+c-d~t`q&L}jz_C~GWQ_t&IrJBlgF|@7nYq+9xM*#gZNlsMa(cpK z;QAu?d_A?iUZ1~Q{ARB-rJYX#;E4I8oVz*q5BI=%kX@HP{VzWU?r4UOi_5d+(w*e9 zE)1>_6BLnLoLQYypK^APjnl-SSHpV^?|pT>#=nlyMbf>_5FI;YHR?RcpWl?f^i2cM zj8Y~5Wm~1w+h)}X5T$Ls^34P7-iIG9hlj_&;3R`zj(y5gSnH5F zQCC@;dae_cjXb0~*c&olA*J4Sk%g|ERSyq#-Y&A$2AdYWhA*$JZ|LFlYSWGmj*zJvgwO-}o4lIOKmD`3 zspHxSQiCtA<%6sX?A^HL6$iXnZ~N5Y!G4}}bCZ;lV!w5GgTC)DseJ3f;qv(LJInVz z`EYr3|7bZn!p3glNc-jwClSbOQ+-sAqD&Pf$6e?KgEo82F>A~6y)6xBmsnP)^I)?4b#E`fXsyv~gO+}|2H_g+o z=EjqY5?fu^$^;9WojxABEB}rI^&#vdE@Bu=GsG6f?##+l@AO~&q|SA^*kOB+hW{hI zPA%qNFk?4(9FHeo%E{)AzVn}(jqq=KEBf4c=CDjA>#-}cK-_#rUTJ9a zq$2@Yw0)v=@NXT0o}ct3uGjmN@gxgGAp0(^ji2hI^KZu_I~;+4uMV?ENBXZ=hwS^i9~y@<@swM=NJ4@u!@5U%~f^5wg~ zq`kDfy5BPDRf715+tq?=@X`94SUIs5c!{9DF``aGAEM;ftL zhlfX@r~UFtN$0fQ_jJGP^3R9QTNk?=o@+*e*{xpdh z9p_b8!yA6SgP~W#>*d#ihGB(n!p#KR#`qe&1mR2zaiX)_oA~&1=)V$d2Tm(C+D}&r zq8C7(6J9%e<9{XztbP0z?{5miy@}6X6~}A!U*|u4f~e<-&w9)`h+&)tU~R!G0wJV~ zeMg&Pb_?lWMGd&#aAP6y4d%r@+3#A%7QpsnN5_nir1`WzllA3R|LUc03ha}%&bOVv z+))tr@cQ&v??1i}+(Tu&dfI`?zzF8(>PiULZx%}l#W?qKIIy6BkD zH83u*qd)unOJsPnJbveX%536e``|$&gZ!22+@NbvC$eS(om7bQG;=CfyY5-3T1Q$w zZIwLdyy%?Q7Re1bb=NkzQ@(_;P~G18$bT_<-_;iC4Hk0aUn0)DU@60 z2jLkk$_LTdZ8A36pTdSe8+j$Sq@?HsDzws{=TX@Pn!NbS;-K$D4NN!9mbIUdRv(BA zP%0-kNjSffubo|WQvJQj9Pk>O<+s;kYWT|=E5#ou%2UR2`UN}aV(!zpafW>1)R8{W z1|IY^^Fgj%7mEJ8><#&=3)h}aLr$Q!z3tw%lRgU~Pr92O-2lm@HG-y}c3{p*#V#%` zmXqf%vf1Os^JldewB^*^Y9 z06pLCvFrL0GCzZF-}>;@yKgTW_((79m%tcoUvu!C7)6*vDJWyy|t^Yo68ZeJ{0noRX4<&jNs zf?s~E7j>$g#BSP9gU$dW2my>rNtNs810&)Q)MEM0{6XH3O+SIB4RXxrVOe4Fw7>b< z96edi`4lPrAsf40=L5&IVUD%&yZMiPN*~?Ux3488Ut1}C;<+cWvRRy=rC2?ZCY*H^ z+9I|Ke_0PQhU_v8%d|gf$tpWhA}hxc8CBlUv|rf<@exK^>OlQwo!S60kz$ci=6Pmm$JI&e zZZ_xeD^Et&0`}?Ct-tZRf8nQqJrSFFt15>(Fl69SsA*6u)`Bu%157wcVIFc8gNQ6r z#z~R5NK0+iSCR{A2-*dW)nyMheZ$<;YLj(zC-1 zbj6qC)@>S9L-HbjEQimZJyD6^_}@{_P(vmlvnY-r>Dv@8CXYeQW?n z97kQ_k_iB~1k?6F6T9 ze|~4chkkd(+)m4XnGtjWa7<;=&Ef9KHDz? z_C&7EI6@wm6{9CFZ>DKIst>h`=_kG_kiXbJcTrNOIA2^>#z7~Cl!8~wd#F#gFFB4oeq+v! zG~ayn_44=s-~X@WgAYDhKKb7F(+9KLpGm(q!Zy-WyQxJd(4F9NTh!_~_4Hb<)T8ih z#d_33=#}TWfTC>VOJ4Yo-_i+1>rhA3cuS`a;->#tPt(*xbi!Ndr%p?dT%=&fJfY7{gG0ttTB5!_@sYfEV-3q>ud70oqG|T1~xtxy~5GwRKV!>W1#Ook6<; zo?ADblHQy7>Nscm&=70b?6`rAMtA%sW30lh-~B}AnT%yT14sHK zygIGUx82%*?b~56{n5T(e(N81lNGIwyxicJ{;c|`2l3N{wUy;tyS<$<^&hM*ZNu2% zE2r&myZU&H3*EHVy16K0yXh-up+Fe*)zC z9_nM%gLYKL`c#nteYd_t{Rzh-gY5*{;PJY)J`25+$tDCBav~$Hg-N{Qi)mhq!#cTG z;Zv{Lmj*H&w~(*+rK{brtnH2z$tR6E#V%VP>L?G5lV;y*l}X3W6Ki@lalT+E}$e-%t)-bxa$k zz_cma2mNPwwr=!q`-Ohx++^#9Y`Jl6C>nj9%w@op@0J4OwLxCogz5MGU1XDvi+1v4 zo^K|KkLIE~wBs{U3E%8u{j`VI5Q(n|KyC=eemU3Em)c(TnWG;c+{2H#%FU$A&!F?x zTMx4dAdB6MDaQS5z}~@rzJ2RcZZ43&^WNLbC!c(>90R|hKObMbn>Xwqj%~4T+Hq|- z^|3iTY8R9pGUmuzH8eso8kE<1uq=T(tZ=@LcZHhk z>A%k-Fow0X95bDuZhFqoI=o@ngL$3)#}OF-o8-x}7q8KO1>gGhn}YhwX@cd3zHKNi zGe+>*eqgXoWg+dP>kC)s^nGx-DOr0Y1r1~7J^sZW=tB~Z z?^OTD{$GEN{Pb__xAxV(*fRQAnQwDwD|$oSvt_~Q)5_urO~qmDoc9ptT71eh)lf$9 zk!t^Av#Eb>q@};hwtVZq3QsvtEzH?<2xI`S;@6J(*K^^@%ii)5-mHnQ``_H&r%UcF zo7kvj$2S0AYmx8G7Jj}qmcw~tXh<96;hKKj4YG%cyRIplRY9(qr@G-4D$5t2f3f_{ z|LXrj+gvU0z5N09^>+E$XFtat?l9MtUwlpE`}K?G%X9K}X^Xt=5k3`MAMkMvK!4qN zEpuXTypU`D-p(Gj-gQJ}hP{W6GxTN)o=(w$ufM@Y1OLPCeX{Hy?^FJ4Ilnli?2tXW z6CaJkdTJx{w5#|Mx9$R3Z^yBwM_2XPw5e1+H@zwxx{~9jdPkag(hllSvN;_06<9a? z*Dj~UG8VMWxnAi4^spnYQ&x}QUFIVMF-%zSa+z_$JnLqJy!gb#9cO!Ek0V8WbIuWw z&E@&Yi{+jmqY-n=L%6i&{ z;Ju&^QzC10)!4Cq?~6AJE0D+r_`OPljq>q)U^4uYFbdJO6DUFV~e7NGM<1XKeou)|o zjyP60)luJGVglv(SLZ;NkTGhhKioA0HCUZQXWU39Z2`%5W0HF0PL;mZQ`zzt`$nB$ zMt%azpCNNUdCwI%XF7mszafvTi$XKiBh9#dtiYB5cK8(5$+6b;m)Z`}kf;4Vdd$DA zWV(L1y6dKt^k~Ys{*tj5T%I?x{s`eg$Be8Kh|Beh4L>a=O>hFDoi1z6ZK$(0k}}3` z^iPmc;`A-`JZrbH>*@qDcsbUqZ}bcd)Qw0!8ARK54X7dgh_=;s>@dDLH`;O?G_t5& z%leImEb#f|P`)w+RNA(>o}2#YrSlEvGhoWwZz?zHY8)ilB&A6B(*&e#2aUElITn!` z$?1o{Oue0Pr1t67U;CY({FDZMqF^#e0h5jd1}ob|W(A27X+*OF10aPRCZ?Dg6pA4- z7z8aRWQY#tU@3VbT4&J4PL5>`0f+_%4M!k*`AYS;2LQg&f zNa#5WHjS<28Mq!XliP7eg27H0e5L~{KPYDCjD~67(69(64r!F$Y5`<~iJPJWw%6;S z^uUpNSKcdFCvL%A1P;muVY|RL5l$IsSO~UD@D`t7sSE#89-$|U7sLgXlsA|<@t!!% z7d}bBogiBZgG<}fwvK*TX2_#Jgw(@Ju4nm#86RhOEZ_2_T$?;YP`{oyNykA97U2h0EP z`~PzJlYjZomp}XYzh%(fUfzD^-R0i>$JssSt|K*AXWQLMG3fTo&>{_k?swLG<-frl zf$luax0{fKhETdSYAx%HBkO1T`-J?SeC~B2#2YzaOJCpH^VMN4p8MpvyZdnDvzxE4 z^)c@2jl7ZW4Ra?%2m43My?YPx`X3FRyA3sH7gy)Y#mU+7>=T(=!)UZkHq941fdT z0I~-iX}J5)34s$lWvdbOHQ{+(uQ&SPSp%Co44o17#p%g%c8b%F!=3N?v?N?rt1)TB7G_V}z0h3pMva|``OhF~UzK44daVA&-a=Z-{W{| zdFQ?Nm+$}RC(GvE!SeFrB2KE`@Yi{mHx~`^=!9P<*qx!7G)qg|0Z?YO8MCh|8$Tyt zd+PYMD`K-_lB&nH`?Uiiu(pG6>VXor4t~ccPibEbJ4VQjQu6DB)p<$%iQ5*Hj|+q1 z9(*aT0pmp|KvTW<8(c(lZGZ1%LI>go-8~&QMIX~veo_LR3a`L}%XZ05e$b{o{6#AE zN8|^0;l*}RhCX>Oa|7z!;z;w=ofhd1h%M-DtuC0*<}%ix<=NV<71HNWLVqZKHD33om5$8 zbZu-fx<;Q(lZF$J)CYduC}1D-nkEcxCvD2Yu_d&F1Dl22vc2RtP9*rX4Q}LVfXDD@ zKS72=EZzNGy;3)_Ky97XC3PF!()MVp>}$%zo2ufzy3n?}X=^i+`t9wT<@=v}z<6-7 z934y7h4!EWKMpW(NQnytN6T@lpET-BPJs<&Fn&#A<3Lw(txYB-5{c-Wh+X)JF9+$I zSK*bs9;VGBJe4tli+@GiLptl%Ly!cp!5(s(S^i9aV&a=&(wxA`A^gw!&v1vo;r05h z;WP|e8k~gQurl@XNRlTAg2Epk65|IP+Dj#kg--e0*zpKl%WB{COOB5Bm)(8* z1IIV|j(F_j8SjA6eu+ExWc~IoZd8nq>k#WFVm$~?7dKNR8 zxQk9c+?2e*gi*gFi#-JW7Y9JqNZ+V8#@%dye~?W@jw#q0>~3%}e&t$P!0Lav*yCps z{3K8NeraU>1Rd?d%Uk*}gU)i{rV^7fjC`xUsu=*ulTW`l;pFR+|IwHQx zS>AwybmpkGnQ7<;HHwPOjSy)k?V;(Sl2L;FLk;Y(A#b+5`PIn)a)>VrQy$V2CXhvz z1qtDD>CZR^(xY;Wn8@pKDJQIQ@`8K&V4{Fl`C+97z zZ`-qQ5;W3!S|o>sE$0PDYERUeqBN8>xe_1!Nj&o!Eao+s9z^Q-N zD$A+({7;;#L)fHKj%ZPvMtSl>(}N7{kZ|E>mX(}?sv!Lt?OXp#JqR9YBn%x>Z^TK4 z@{SBgh^$;V!Z~sgMv8eQt>nlmjv@@LaTac{0GnRq$QpB_u#R)^f%<$Gz(i!IBmnUA?l{~ z-#q7dtzPYT78XhOCSYauGQM2wF;LD&Z_z_Pq2=4;-1vNPezu%m_-3=+<>8};_-=iB zl~4R1Ke(SK1-ef{`Z%y2Zw1ZUn#9%yO9AoW~_jp(6UZ#ivSVQ z?&?PLfvXTt;-=bUDmVbU4!va6EiFg}hI+Zpr2+s)-g- z=giTFYg>FBd|ro4SsaDLozMAD!wvm?16%G!w03U$00;d+k4=1<6J4flXxN8EE!S9y z_6-hoKmLrkpf5~%glt%zzU^AWHvKj>Y1FfGFgl8AnLZnhOHR925VkH#qGBLC?{@ zxe+R|7bo!Eu+OXVv16{IVI*_2KdKqBrj90Te;~X z?P|O0Z|Qr=kK>liz{#}vULphecCP>5{?~sKA8&j4;ISJ3u9rXl<9`D_eei8pzLw`N zUM$bg&LKq&_gu@`SYA?3Wt6oZQd~cB!<@?+a$5L@o={qV<+_Xhy%N=Lb6rE5ZFQrT@a&DA*l5o#aM*^@ z~=jp<0e-xUYsqTe)h$3j?5jU^Y$KOnN7~1P&dS#aoYwUluvX{ zyof|ym6)8b2Z^ZbU!ZHqDdu=ukGI@Rys^;Bv0i@wh&Q zvdG5r{cUuL177o)PSR~FZ?LOZKCSN@#s?{Ui>&jMyp@DBxzo?^As3&Fx2;_VQdjh) z#`ivuRGX3j;*?;4xJxU59nlLZ*&uy;7}#Axt@Bcciy!1rl{Au*c{Dn_flVJxGv>-rfmVM(OugoezQ`tiO}i6c;Mo8*vRpttDk95;wg~6$Y0JTqwn?$ z+b`E5P_x^wV~aVP9uz5S$zU!|i4;e2KqP*zEUV_*HUR^mHOb7amO{cK(Ijny%AHc$I&!?Z7fNAFBuNXupo`yAxR zQ?v%FA|5EiI{hiv;_e#zv{%Msx44c^KJ#)YOKL4%DH%H2@8B18I2Y7v@_p|+5LYxyBvE$~`B(2m(ou5n~;L*4Ad z`T^Q)=ML(;pWh&vGcejw__dtniE*U=H8G0F?NVA;UXes#!3&@!$*VI8gq0D&lLunI3XOr;dTTREEsJhpytijh1O zCoLf}!qRYb8Zo6(Dcfc=SiFpy1{Bwn0taQ%X*q*y1A%KS6cI^*qflW6-rR(VB9X>G zRh^Qr8kxJegRdPzWu1UeJOH4ZA} z8&DeS5yOSScM)LY0=&vg;>xSheA_OXsN&NudF4c58UR!_sk1y`fTMKsB4^S8R@#)? zxOFhr7+Bu2&7X2DUe8>TohMv?%gtu$Do%|_Uhxr)Q}HZgzC0S*RzV<)hw_rYEEo&# zVX&^`dr&U4tOKD*)Hhg#BQ3f(WFXJfBXA?%(7LyLZ+J|4!mD4(MOgvhb>QLIwoDs( zv|QSRb9Kc<%_|1*W!XRQMh^z$mI)9#rqQ^6|Niou|G{5aHgQzH{Oq&k-~G`aEdTv~ z_qUcm`O`mKE-ufPx8M6H-wQlCI9|4o+_4FLjT=U4>z4Lk+_}eq{E|T*-0DUaY#gL9 zX5NU|-#x_9=+47Be=iyIFTn3jcX#fcQ|@}%jgbQW@Lp~vSx=vQcW0ww(1Gwfh}%ps z()Xo~Gp$~@NS@aiWpG7e|79p9R>5$8ILDE(UKPrroKZ^|5{@C(nV^k=m#3 zyD{PpzPoz|(AdK%@tgIwpM8PiX?NO@_Pav%ww*WQ&d&UVf=(6_o$MZGpop`e9dIC} zP2n%Eb&tVTkC6`(rA)%;tJXcXEqcMP2X^Wf7?T-4!&}FA23vJReWl#_nHvyZE-#+b zUJgX+6BDK*cT(=qo)kVr9(|o1Jt_-s^evwprOk7L(LP8#?Kb+l^g8f5`BDZ5!TLKf z8jEGy?i7#R!P!KvK24;&bb9?Z@b2-$<@lWs@)|iSq267*ILRyi9N=8^jHBtG&H&hr zBR+v?U$B3TMKJqL1`bZrP|Yc$vw42uLM1TT18qe5qc;q6ZtP>?G1w-yjhi*PsHn4j zcHRvqx9OKSJ+aLWpgQ9A7bmdLgFo#IeoGD=r>n@@Nooh}wwwKofK0v4D}iDkns2>4 zqrf=-=gW(yr^^fU{+)NS2A;f8+Rcqm3}~WxDd& z-qm(=an(cJ7nQGP)PCqZwGB+CukN8A^kv_`;CHr9m~4LW#aGKe`N#ic`TqAlSw8;o zN$Pt=Ir-a1Pu;AftYbH^J3evd=TYnnKJk!me{VXW(SFk3^L84~v=6Br=PDoNmGO^u zur3OqDlVk$MP6<`DO2f=ky%*vfO55^wku_{6S1L|^ahx?fR&|802zbqPdfUfX)^S) z<@dWv)%Rip;&q=i3_SpF3`((w5#}_m7uD z#vpA&UJJ)@4iDi6bO-xx7<2LZ5}T_mt{GpRVXrrrPd@ozIoh|r7wJ2h@Tos99ow~4 z+MnuTpDeK*Z66r%Z2P?u%=7RBOzZ)NaT{1WZXILK3BDR^=tRnSN-*CB7{|mJnM$|i zaziI{CSCg-hp6OL`cpokUmUV+R7&?ax#$$xAxmU?>O!^cRe!p>ANU5@nn1$wm~}5y;T0%NZUhSN4t#QNq<$I z)&-9devl=p+3*RxP4D-8wTbz3L+q#h#kO;7il|5fuQapq54%Sh)3l4(c&EBiPJ5>> zY1?#ctgLe)I2P#(*w|T&L{|DQTp)k=gPwZhIg5G225WD$soHIE=E)}%k*yrH_RtQ<>~ zyLB?IZN0%{U!U#PxIT;Rb_-ncv}f{#eB>qLiKvInp3WpDO0k<`FiC{!jQvvigd8o$Bs3^HdE-bVR1qW%!xncl2I{OwZAFYt?O2BL?KCo>Y+I zFg}l&_NJrs?ha{iP}3gkaObw)f3MH$18Z|G1ErSw@4WI;|)mBVPsMN(4+RN zbc7#x;vYzB$Vd8_`%PUR=lEr&OQ$r^3eY(Yh*vp6Bk*PdlRiJ|I_e#{uggH3o(Dhh zUpP+d>8tiL?%8a4QKnv4-+@0g*J-RDucbNoL{G*MUroI8n6L)VmS4jmFY?GaHjbce zli#QQt6tyIHaqmGZH~@`NtPCRvBkV|=iuJ5?f2iUzc!1~)}q|b^wvW3a$fjlS^*BTsKqzjO~A$`v$?`=b=Mn<+F zN5*{hHvT4Yjty*7=Ah0mIl7@7ztPPGTjeMGYhNMarf}`7^F#~^5q)X(4F2?2@MYWp znzs`Wj}gYt-E@Avb&zYnH|>PtKc_*E8hv;@MZr!{fJ? zAAIsLa68NApM9B!F%GclyF0!Dzq;W1-laZ2fBLuCjE_D*I&B(Xg`;DK^6`Y;SpS?! zQXWwNWm!4ePPez`lv7WJKHG+IZmp-f*F(I3fT%vXzJO18dlR|7XnhFlCcczY@1UtY zbnP$t1PyXH^qD-O44>X5s1wP1Ruzi$&=vqIc zz*y6)V<+y$50!;f=Jj?B{l6{W9_gEY;KMmX)+fZwvk8R;mNgD=bF-U
n`s#!Afh}+P`!-%5V#0rM698>$ zL%RVw;~4h2H~sDN@BrD24OBpWx9NXb&vP9cTWT9`%Of_!4dAZF#E*J;-Z>Pyq94{Z zM)ZaGmk*I`X8fb8Bgd=UAMpMGI*)v=DeFAZ`kkNpa31(BXnSnM;gRz_W$PNFZ>j`N zD!`~B)(5V^L;a%z>Sy)B2fem;4wn5xH&4PJa?vk7zJE-8;0QmrDZTAX)7HUNs$NbX4iskSLY1ij);Y|F0`K|6>7#ib(}QxM4d?x#M9p7SSf<;*p zSLbBPYx^vrW0xr4&HkG+@NBi1KLR46K zhXyaIPT^5}-~vY~?GaXiC2q&Cd>Oh6pQz!KmySgcmj(m?Qrmfx+{nal zXS7_7@F;WudnO2*i8n#Tss<=D^FleHIPLVDgO)QaOFN1aL67ui5ya>J#M9K3JV_62 z=uAG^!AO(a0oYDrxpjW#xp|hEX~i>fG|{S*M{$mHd$@VkpOKeX^3y@rt~ER@M>IWi zv;hNb^Jn`3KV;ZoJ?-S=n3#6F<1P5oh^7l<-bg1*xYl))A9>LQSy{ozYoa78|8bG9 zUO#@7lb6x!WQ^bdVbQ?{c;fWnAh;Oe;)J^a!#4GGKtDRVw|w#S*UKOLt3O=+&7b_~ za>;tI+}L99ow}a|GV7!7vAy-@o+1-0~eCI)Z-M)**I%l_eTHbmH>ysVsZq$+XP<9&r?1ZGe ziyz`TJUWU~aOO?WlheGu|HX@ExhX1r9VQpvU3}T!cY}h9@qH@)7w@!%;q~A1BUESq~Da;Mn3>FLxI^r2r;9q&!Zo*wa z*GX1xin(|m-O`z@VVpbarreGDy)wVmsr(&;VguAof(!DQ#BjMi|}B7RbKnZ#f94AgJbw(;9vmpMu%@luwAp8 zms(iQ+VIA$SDz;FS`a0quFIc&ys*7-fE=t7p?!rmrww%`ZsZZ2xV}1DUb+#3wrCxz zb1s@DPTkxvV%uBp5_n&G9-SOAfr08VK^)4%o2IgtS5qY&5<^~7mUU2ddQH=jg`xvF zf7#4H-E3}h@QBUE(3lC_wu4Xyrd1vzdQFbxgjX0(X2|13uEPP)PVD7Zxl}_ z*^MsV9C`ooV|0?qH*%DJCs>g$2c7^pT>JwI1a#H3qkgN}n@F}xp0F5gm!Mn=-b>p> ze6+uC(_v8dZ8O@F30Q2ZHqk=Se{euon5iG>7x@!!`-uH@JxE?W-b9h@Oh8Fj*7oW2 zA%uGSa&vjggntKp{Nq3V!{x;{&zIl&&7Umyv3;p0^n7wE3*FWe$60>T-^8yQ=eS{I zr7IMC%6G0(HxJVsOL{%?qS9U^Pl3@so+B$^$Wwp#>jinVpBVDwd~}V&ayo*PEZoc&WM*1??Tst0&JYj6n@Zk_>zC*@$kkuyX03xf!4z7fOqwY-meOb{;{IhbM5?qNnHBr z9SvNjZu~TD!g5Yk0ican>Dtr^a4k$E;|s}jcwuZn`@y$xxnR$UvN2Tc5y@&@^&EmApPu`xKR2rj(y(D(toobbkf-|`}DML zr%``>v&RhP$*b1Dg`~X6XN)zOCxKl5qT@IPJV?vJQ-&!nI4lpy2=*KL7xq2<(meQx z&Bpcy0cmu1(&Pnt84qhO6{jY`O;vLvlJ(UGS=S41-YFv|kxAG5G6s})c!EduSKTqL z4qF$?NJln;Au`360bgt!Jf!$w{l>8L)=nJFCgVG3S+MhXW zY?9iS12E2%1x`9rPO9+pFaFOHhS_R})F9xPj0 z;d_RO$E?VsFkvRTYP?*O6ZlVgVf`O*)A@Cxnso%D16A!4*OY62Bm>-q8|4sa*|MET z4Xk;y9#9_&tqytamF*_Zo~_S{zxp$d5(aUG|D;RH zba)Go(DR?e8$8m?%{FOK&%%}yzwKH%$RlNFz1)T zGst`9m&haik^ZdjZCrdVu=&O~`@VKhe=q&iz7CBUSAOZ|kwtO#%|UQ4Ju69i&IIj* zFr9PDZx3nKuPvSU{NSzZkr>MAwR~yUyh(I;bjk6tmof>*+2s&gyz(Z{4L!db0ePuxsJ9`rd^JFJYq-?sfQy zbDjtMEFT`4^4(#U8^JoRhd1)~hV;NUFR1b}uKi1zSLz$QYTrDk92vSEk)87zZ5n;` zvYV0h`?u)Z`cxf%3G^v(dQ)GtK8WG`BsbNyq3WRh0bD6@;|2iG(}$chW@AA5vS9iE zKJC7{?Cl@nbLb4UueWc7t@J-{fH}sBN1ZS)>j?DW+$14g|6zyztUU-%vdf&SYajBr zw*E%NwQb%kkIz8dF+;@iCC}pNjXr%8=hZ1X#AQDLE;l|%prF~k$r~`}w|_fd)h8=H zH(((fY2){8;Iq2M1ucPQ@ue`CrF_QDv)?RS)Q~rDa!Sw{% zAT|j3$DbU0W(+fxxuJu7Hl}X8T)z4Go8`azfBv1!QLTq>LHYD&pW?4thnwa3$@%j1 zg%9nJYZEd5y@j4*)Ad#TILEELH7DaJaFmsrV`IqVQ=b}=}A?N+w&E@3u z#qy&ceh*u|yL|ci*Rk<_T1)>$du%~@vUSrgw1YQ+Vvk9c&g?@gUpbe?lN>oD*)mBJ zn`2^ixb2*ul&vgB;e^zzcuWr-0r{t0)hgFnG9IKXNBk=FWZ(nbK4v`njee&8WmC1| z<`o^6&mGVR%;cc6Ii`8KlX2z+`i zJ=eyhm<>l^nkzR$n(v>w(wRnhP|96jylWpHaG=q-E-s?!qh2dNKXDklK)Q7hPyBlA z8G7Q4eAi3#g)*0{Ez?hPMe-S1GR(vHv&m$Hl=$#dX3UY$b2c^U1)pKpGXJ3qPPdXZ9fon0;U4FU4s^&)qUXqRD_I`|Mv=9aX{ zy@&Ug{o{k^gZc3(1yfI=kpyHx&8Ud!O2Zbjt(t9a<{%!>u?1~Bv;B?Sh*y%fZ84}O znXlQ<)_>R+;gqeQrnOIty9CWg+KHDY|LU2FVLJ+&HWbD-1NwD)83~F zmWQ|M!Jtb@P|2{w)Z=tCd}GHQbXNd_KzzUb(BF+@kfZsIb;4#t0C}5PYZE8+ME>U4 zNcICUcU^=S#iSdaJYXd}#RnYo^yn6;oXx8qw_I3Gu+n0t6=+}n!DU_N0d8AQoLq0B zZ;q7`47`0Q>lZU_e{|i^GPNzrBtEj`Os8c!KHKiqg>Vp9`)2DDx$2JyiQpr3B8eyW%DF z?sZUr<4)N*z!rNZf$P|XSN#WW26z~^$#GwREg%Bt>$jUW` z!R#asJV8!fY*(=f>nU|L+jHTQ;cxH`W<_FD^un2!j0xwJY0Wb}2(Q8@MsoQKtC6p| z=7ssDi_bKqvEfZ_2R|1Vm{j20YB*nW|U{QYJ3 z=olx9^1Fw4vm81>vIHK;JcSQ;if-kGpFF#GE)6;HW|hu>H~+oqbq?Rc93Hw`(3{yhr{y;jFzO*+XJ@C$ zGk$UIV0TNIBhG-+kFSBXE$-bvPQ8PdcFE4#VO%|=DXjAEYq;IrrY-YHF$W}#GlKyz zwtLG_o^*|+mdZw;MDxL2%VnZ5{(zs(pYh@m#zig-{7dl56ZkluKRa1oyg1F%)80gW z`t;fIvp@TE`Qpp3$a}v0=tsY{939^Wj|0mXK$vr7af)_`lZ@Ss9wzQU*|xMv)2z{7 zc%g1tbXIRkSKQf&3LkM&^VhgH)zuXj2&L&KA2x6>{eH0nq%<;kT7vwxUCSsxzh&d* z4xe6(Z6uE>BnaCI+8QUB?yBpz^d%yk z`E_ohD>}gGBb3*{BF|4K80w6}Cvp3p=awgu`nJv7@heXmq;bAaUz{(O7gx*U4?aZK zePRgxQgZ)iV<>7pAfCeZso7Jv-P5N-Y&i zZPiQjRaoGi4O&+xzDC3mSc~+WK)mA%ht*QXQJ>ls>z3BOCZB5?pfmu5H=WZ=UeF8M z(kJqjWo&=MC$HjEHm#HA>HJl{)!8msTTc4E)x!-0wvhUG=~isv!87}n?uoohQAhRV z1rxUDEZ5N1*P6U!H?8GJaRERZWBYpZFHb(FjopFoq|KX))>{2VC&TbqP&Y`UR zQRIrrLs?RCJ=hl_-T=&Fp4QR?L|az*>*p1L^}{e}HLY(9F*HBCN<2&&4}&DNzY)7j z_i&*`0MUjY^T#=1#%I}at=)>gry$3h!hrXbFs|3rJrb0ybs}lHJQD(IWXNXb>8BZ@n4FVmWF8Hjka|c@T%1#6wqFVtU!t?DeWgj-FDz4Zs{Ni+ zCS*mSK9@J2dhk;LE}%HB$Wqeicis^5GZ`*U*jMwpCQ#@P80s_Fcl8DO)(rda9=@CS zuH$dWJN*;#`e2sHc*5cQ+rDg?{*pXp*S5gc9$ML*%{lf5xX2|HG=;L@x!a{3z~soU zb7v>}`kR@%;MZxdsAc-9DySV7-)e3H59Vvj9B#J&TG_wBzeXqH5CGYzM84yg!i(P^ zD1YLr{p@*XbJz)Jl8Q{gWyrNpr!Wq{w{^EZc|w@B_p?=Um$s2E7sfLN%Rd#+mPFQu z(Z1R6IAG1Am6A7au*O!sf=2@F6ZI>U1e}t9^-P>B@bG85afF;{B9uOdDi+(SA7$Nb z7TXSZb-QD=WvvUmj*)s;^wbU zt*dJ-+$Z&n7p0+jYe7EK=Zk-I9u9(8Ucs{RG2Y_*1hqfPoXeyO2Omb1$BYyL9?DyJ z7&KClV|7j{p$NQ8Qu61Ohp^3`JgH&k395?aMoCbLtMFxqbHq$5m~1hi1hF>OH2#`# zPWn!{RR6rD4LzzG9pm~o2+8zDN&1kyP+&DzmVStF_MktePl8O2i`td7XWBDVY zx&E%{#Crc5vdN)w$hBdlsf-hLbvEr+3G46!Pw@S+@|K%r4fFc712oF(+oAb)S;97L z3MJ!s(k3sQ?*ul&$kR9_Ojzet6OeNZft|-oHr8o>BQ67A3!*XY6E(pF%D z!Ts{mHz|o9n}=Vry?>C+=6)_SJjS4vMr?4_S+LRCI4)(u`HVN5jc5KN9qO^WcZm9M zkPBzA6a0Wn`$#H$^{GZ}DE81wVJB`}i*YQ`$Hb@9zM9o16)V$P73r_*hrGqa|n9kyL# z^Qmij15f7tT*Hszs~#vv`L~{=D-ZbdI_v)O3E0!Gzgqs$KluC0$=T_$alNyA^6^K@ z*Iz$fo__UWxuzbcXP3(}4q38x-OR9;`L0h}I&aLHTiU`kq9WG6v2D2M-{gt7O>6L+ zRFXyEdZjm_pM3Da^5WU^JiMfjZ+_N@#<;b`b#BDicF|Ed<9coeXizVp%h88Tkg=7P z;_zh(Wv#%RF|O@r)*T+eQS_7!-G8PMwS@-uTVRHUuZVvfA~N5k`jVrM}G) zXL6Aa2}~0@By&;Xq~6}IXlI1>4R9-2*evNyM`lfo5eLCAI5XuC6B7P zwr*#q>-N;F8$!@0{h{-dljRb>&vtQb%1`Bq%lWasne7DYnd4~-m86@nB5!ctc!QVp zJZ#1sh1xEA(7N~5y=4RbEn_=-)7QDv3b*YZ-Rb?X=zRKGWf@z{zjnuSWK4cQ?29It z9w23FoP5F64fFXoD1Y(GyD~CVUc$jQeO2vBIs|5aR6%S9sR|#zevJ2~uO4!>9uP5G zS$9t4+Cz0Em`#(ftT&kw=BcOmi(FH%&Rk~$ns!BB6`X+Qt#y%!a}R)Q5ap$=b0`bz z$4}zo&e*IlA~ES%&+2ck9d8`dI7illpUMh8pj(>uEpet6B#MWt^dIrrrWhUZ4>?(9 zMcXx6`zw?@sJ|O$WXl`n)ot}B(n5zg6e{Z&HpVY|8PyJArl&W&{SNsOoQlw)W`!>dX z`t8oJBkFa=5&A?wLp%77>;M$1{D0cVJj zCulImY3Lc45ufRXdA-6>pcU_{d?ipcuLjF`Qn}6dYy$D%81SM7O&YAM%AaY0TG5Fi z8e!%c5+^S=Ci1GPN}{Ol>ZVll`fKHrbAtvl6CmRxuAxqtwC)IUn;F4Dk$qc_9meFC+KxvRP@*BC?VW8|Tr+ zZZ0@WUECe2Y#acbkk}CA^Ck|F*S@L1MIG^;LC^ES;X&%{j;%N$)HA!J;my|&`8kBV zDoMGdeK4L5V73qK>>CPv+9Ty~co2N@b%OeeG8`;#Vw2`8m}-B(fsM{LVaT7x*gPk3 zI^W_@zx|V(%`t=UzV0gvV;J^+Qy!W)YVJ_Iyz-L`zRLb3GCf<)&(D_6e(^bJm&?)N zUf-aQfn{sCcl@9ODFY7dkT!_WURBjPL-vcU@wE-6M_REp_!9SeMyk(VHf|-rvxLaQVq)XszfY1D(Qb(gLXxbO3}9*G*F< z9&vC8DWiZhs6+Z1Cnh<>CB#+o33Y7Xje%5oh^Kf7%8QDouh~A~g?!@4wygMu-pmTf zYVs(jPM@EiFXy!B4(p=Sv z%e0;NQ@C*2_8GT`bW@Z+b&*S47TS&Yl=}>&LEUN7eg>nqL#a$XQ9sn9P6n|Zad72> zi2?O~_Vg+Bxn2Iw-~Bu2==Sm(KmLvG{EV2@kMdz@pSYEOouauTvh7gb4RMT8mbhaP zagP~RHw;Bcx^(1o9tn;u;u9~Ywq6CMWnEgu{Q_u3r5#+ROdFtoRa3|hZ z*d#yw@R&*d!-ogUqeq9!JMTVNHm-bg%qjG=7TKOMs``KG;$B;9jQxn9LN{1JryAEPp1R(MJ9 zctg0$nRLcE2k{j>+QTY%9T|D>%PDo^oRXXa*6iq(d4lBABJi2-=?ibxcHc;(PCIEc zo+oDXaeO)j{VpEHob(~}t^F^-D#K643zFwmGQ)S+s6pk$T3f`03W}B;2REH zq#hQIFyu^{99NDp&!a09*9=e&o!DS#MFx?ZeAPCZ7W)RSx!~-glD1Ah1+l7>%Ac(o zKSct;1v1VcjBL9YVB47tym^G$df=_E0<{?QQdVfrQtUOsvnx7Y4q4{27~+r*jPE=* z4J^vda-iYAWrfL=_|gK+8v(Wx(jFs(Z!@S+@+(}@XG!P{K1#3j>zSAbTwtf>NyoM? zd_E@tFXjfL;J|m@R2rc_!iqP~MswZ#mJ`xq&QBVSAMS(XVLd$iOneGj!c+j@C;W z?Sq9$B>DQVl@+)nOS6=NHUd1dijGJp<7(8OGW==haO~Re%}|dAE~OEXfk0ngA5eYH z`h_&f%esM3mLq?V&y%0_Ki{6Ee{enQrTzuL&?vlkT*%W_gzP|Ix3SBZChxUB*6&U> z-XL1{ZKuCKVWhvZ6~Ry41*4v!!iBQ&6ENC;=hFJC!t5U(E=Tw8VY{^t)DV1rr`ox$ zW5Ln=qhu`TwnDCZgAtU^%<=+$ru62}e2zMZNW!j(lM`nqy zpyqjo!KZycVe;K^=q`Vzzg{o#S$4ut{1e}=7*_aanC;chYP> zj{(8<(9!d-0`|b~__`5t-r%!-+NGEh^$R#{L~hhzzx=$4eYM|Ob}V(Xf_Bt$+7ysb zEl2b}$xg3#DoP4g8 zjdM$Fn$kAp1 z)P)r24Ot6i~OkU|W-LvHy_c8(e(}xn*_TfjKd`go+9n#7P0*AMAJQ33 zu6pQyCWyx~KWidcg^CI+(Id;Y9esjceeoOh-k^4k;DjK1 zd8j9@V{7m9vtna_Q+{)OP@a5Sl5f5B>3ruY^&91>JgZ!8a`XW_pU9uC2 zz$XJv{Z2YKEZ+9op)K6tpgnan0QSev(Q@{_g`exS&OZE;%>rcT7v_yj;L!HAeLOV( z(r`nAH(%@PX%Ds2e#Uoi?+{32fK8Nk*3aP4kWX0~MJ6!GwU*EX2-yhZDuj)t14@N! zqn^cQr!|N-^58^tx-))r5mdxisbGjZF`U-9xA zJd`P`CQ>yc0wt3}KYg~lHbgLC;0HfCYwRRugOdCKW4^j0jMa5b*2;um;2d-1L85am zX#yIU`UHIvU_Ac=nhfbrJ64&jAEg~S)T@Q$+eoh+6U-OC>knPa=a2+-%*Hm)tHXTG z!lG-rSB#8f2jnlP>~F=BF*xbyAZIwiUyuRLmes~_wwOo!0MDeQPUuoT%`)a0kul}% z8+m&$b-X6+%6vbkL0!Dg=XbFOO@`#>D$NN+C84fqrNZfkLvJij~>6(n_ zI{sH?nID5Q8$Rea*S>{7oq!h~o|I3myvU>-In(rctC6hrC-sbMl{GjeR4I^SnO8!u zg(rNv^`HHf-}))Se&Tf%)6AhFA}!z@qU?BAAT^kj3Va&gYv7{uRzG-pc-=~v=M^Q5 z8{wFAW=MrbTFwYj@ryh{kCEqg^05YtRNgE=`R?3!G&q?MYvV zyy-wZqt2dFHy13Wmz)2b&Gt1PX(Q4su7g8d^(`~R$Z!C6_e^%E$_Mg-WAg9ld(^m> z|CHyDy2fuBlgan1!90G&v*KAiBOc=>VU8#g=kUo{yfT@*wsFuy(}5?o82VxmbNzg`T>+Wuv1G@{n$x9tZ$sh>>Nv~0mf(gQsul#cfe)(3}Bflg!3=*qEPBZ&Xexv?5 zyw(ZzUpgd>5;`4evsp*2dU#6BbJ%VgCI@(*o=|c=>F;a)Gw6}8O2qJhLqp-~laC*~ zwHzEDFS`dE_Z}^q2lqjPLv9;NRl}>?prWi}gxn1*49wX7op{afy=13Z&KM%%bC$Nyg3^u0O#d^!$Hv}?B)Ob{4_Z7L=%76wTZLg)Al+8I&B)w>sxR9pDxdy zeVsDOq>JS0-~#Rg%I+)&`#xdOr%q2^yjV`p&uL)|A@X7J;&SSCUZ3P-L_<$o(g$@U zW3=H{!RuVQqftCMV=mfU;wWj*ZG)qu{R|S_C5IW!S%Rc8kV>6t* zIWXE*RzTXE;iKb^Rgy-sz)?G|e&sUGK--E~^u|INEJ1Gfwm#~i_DYe4eI(@eM+0XuD`A7foA1}Z4+dp1DeEcD_th;T)-Qbb~%$H#(k?-K+B;`Su7d93j{d59Q8FZp-|E+@?Y1y98_p>ARjqnecPZ3yW z?fm{hH+Y#Y3;)MRegQMlq!~58*FP3-Td5BF%e^jR(>I3iT`{9Sf zCNyd@aNYK3d?Y5MZAOqc^HMIsJlj(_K!~F@pl}JfHq2|z^OzxioYw=^@gF|ms-=bu zza6v$=6S5g?_lNM{`sB7<$1oo5I*CFp})Zqen$L_!8*_St{sDa1m^z;6OZS4Or8fV zfm5J*b$1wM-Z&yx=%-Cy$1UDj?#*HLSo+b`e0U! z)pst91D-zb*}fdz1B8AspZdwuG5Q*t-T1e?OL}Y)#rbjE_YDE+Z8zgNzN!=6Fu#dk z(*Ep1j|*7(Rm#3&ZpX~_WBK$v^<2M1|4p5C6QK(!e($mUzE7}PpN`+ArBvYMPc&kZ zrVA_sz!Z-9n9_>xAS^UCU6irk@}G@Aox-Tr}#tn~Mk&mPz4J9@T79ptAnvK@(w(=%n#eq4bx z{*OL|sI)l|-%3aE`}HYOX4rD*#`XnX`UG%?x4?+Nnn}Os)YA~~P)<8G-s^_DxFYxX zt}yFx7wmfgo0NWNa4a*eNCcPr{Hu7t%Y)pOZe^2AdZee#`#%bcoNLles|1C!OrQW! z))C-1OfLnmi(*hrk(RIBmM0GR)pjDgiF;trZ^Ga)(Lvk%jvwf}ekl33-l!l&645PD zjIxnMbON4lgfm{b){o^Xd|569^C#n(Bc~dqdy`3!ktghIALY?@vL3>}9?%;4NNtZk zPV28-Dt+rAn`#%=oTV$S!1EVhfCTBJZw5}x!s*ADu1{GxMpI)u^)veF*ly^x2Ijd5 z;wAIL`h&G6B$1ZpopmGK{x8zZ9sRy6ZJ2r@H;cJ3`Wk=1;2dxA>CJb8bxVJtp7o`q zX`jcwFo$xEpkFKx@@jeKnX3&8X-nrX@~pksT#oPG=eUROazc6L!}#s`KSxJL`PP3w z<+Z`};c|Ft7axUq61;a_jWhk30I6H~*Z)`s%S>zVWWfpfCa$ zCaP7$^E{Nf=YcV0{F(lXVeP!ipNGM-^bPB{|9!%D$zdJ;I4A1QIAXhuuhC4K6aQ6M z$U2oWSUaY`-xNk)PssdXonPh8Sse7O^$-p2e|gjIh$F(-zqK15z)#o2ZoNFg6yU?$ zT*6-GsJ?BfYoYj1@a=2=ZRdQ?*m)JU##yjW1Lhm_!outI`Q>t*&my{>2Fy#xY34+` z`^)yB-^|yRaNuKvIoI5L;08Y<>tXbHLvD6~+d9^!=)-A~IrL>xUvG5b4*;9>8|$h4 z(heGp@g@drZmi=6+4iG%$JRMsWX+6rS>2>izTk?j1=PCTP=98AB!k~i;BggS15j;Q zcm#jm3?=WGYhvg2p&7bRO?}4v8{fraV+TLfvZT2w!Z!lsDN5xepZbrcQGcv?aOaal z&VTKD#xq~Z$V5=5EGKVao^;A8A8V#R%<;`vUoICXr^`S4Xa9V;JiT2WzyCP&{>MN5 zx66yOb6|W*{B}8az02_kRIVRsdmRsAqp`)g>8A`M2z5p@bMov(=g;8ZM)s})igu1k&Y5#V&^o5(v>#QDK1T=ALZk^#tC1#r z%0xyoN<25>iXpyr^vY}E1I+Y1NW-;yrVdeAXbD?Aj_!;Qy-_wJE1`sTp(8 zo2>a*7{XBvWeUvhUf+slfxSa)0~P(NfAy~}zwzTAFJFG~Mf&CL{(f#eU*bo&Zn?L2 z0PoBT?20edU2!27_7ts}KczB+1K36i&&$uB?7 zbxiAA!S?hMX7bdv9%ZPmi_3LYeIq}?y0_!oH_$hL9~D zI}A|2$QPsFCMDN_Q&;ArK0zFRhq!)HK1T#!+DSJrC>GzwW<7ma&(8zs>&T-w(rr)6 zsW;lCY)0VlVK~=XZOLkM`?ELywbT7%0CpRBb)M$^fb;NT-#ElC@=Z$OPJfHeIrp<2 z&^_Ar_6A#}9g1why}L`FcjM(gKGp+dx=WWrHhD-dRbDozn;hjvW;TX#rhJ6SA+EHy z97~jZuM1$@HX_gRNh1V#^*$-19qG-$c_c}|5g&S%qQ$=QvD1FGP5VsNYfQJV#C{o9 z2h3*-3c|Etsl29HTukv<6$2QxTJ|v8-8J|D8 zTpFnbIF<1r2xu%8>F5z@t#M-!U#{iH(FrhU7&0$6J?!)xxlpEQ*0Fi?G=7b7cD%B~ zd1dE?x2~g*M}f$|dAuq1Jln$-{;E0T!SiD$DDa^a1?`W$Acj{Ubvw4Bm_#t$*x4k*Op0Y1a|#Pusp> zQgGp|Ha~r63Qf^|Ql<#SI}Wd)E06xluk^il=vkUJyXy-XgL42RX?|?Jpz>OI5%=Np z0K66~YiJu-pIm>s^?&$Z{Dtue04pWHs2s|_2AI&oo|_jqi1l0sXT=O&rfS16OF%ng zq%AWWIpIZGUfnIXLV>NS+{tw2t)^le{ZvpI@GbmQRA}eEKxqHUqq`P(1c+2DFt% z+)3QY>GSMnau~`hsohzHp6eiFVVXX-3q1#e%X6IB8($wMjJ9`kIXgX#?0WOxCr>IL z7i`>ds=i%c;K)I1zPi5s*iRH(An$I_$i$9$yr?%kmupKqt>LXWalk^GF)o=$uLngcsIy^dX$ifLzhi;ai z{p>UNdb!*?I$YjC9%X0dTr_L{s;*BbG<^oTtHG4t!d7R!`EQ*9y9!=Y z6qx7_vQg)VL{1$9)hpr*pgNHAiGLjx%R;>aCpglU#*vFMkG&?%bO$=6;&`BpJWr=5 zPZm_r3F39pHH(bl0hVy29oW+k$q#=f5IoPPi>#lAx+4BQ-54<9>P-7tuHilH9GMxn z%mBvrL~o)74ANKX_c{IHV)@|7`_XC3g^$Q_=t$1B-zlzM5TES;OO-Jp@CHeaa#N>r zlccTKJ|Er$p5??v+CWhp1@OkB?UF0?1npC=JdzKhI4U}}){pBrgOrm``=aM*`>p%s z)miHKkACkTf^TE_oxk`y*bOEP)Za;!Pl#DB`4M;UgTs&;?D7dinM@L|yn2qEhnu|C zIO#2CJ*w*#7K}JHHE6-5J{3=oNpF^)=|VQY3oydLMULu(^+cA`R~s5xD-QptmnzaX zzf?br7cZDc+N5pJz8OLj89Om`LBl8c4O0irpHJ@lrh@bhMZllG+IgK!Z(_=iuY*f} z7&4J>Y3KEG@MM2AuI)cNJCiwTPW#2?*G?$E>J!wJ^1*U&e^0Ej6WO6`pq%;^hdyoGI^=A7n3bzHkPWaD_F9-FE5tM4w#Y11sFzPhl3 z^2Ybs_DK&Wvh*a-cieZe!_uJ-eiyRrulB|AMz7+eU9gy}#inpr#(wCS=>VWTk=Nb~ ztzTjp^J9Uy)bzS(VUA0}$UDpiw;Rxe-CX?UCu6j7pq}!oZzm4^Eto^Q;hTr#FZN0L z#+93Vlcp{2L>2mEN>iWW2K(V0PJ7@t@-xX5CuO{ONCZBiNjVp+^gTtY7~(%V20%+2 zBaid-l+x<=4y_yVp&jI>HxRn$B@P4RIJ8mP7I~0bH(^{e)>IG56AXp#v{5By0*7a9k=z%>^sxHb==R> zoZ=^*yqwGLC`_T^o^nXOaO#AAlYK+@;Gk^%C}~^wW?OdZ=Cm8-we6uQj?2R8-@zy`%kQ7vDucUE6a_y?3?`vY>u&e7GFmJI?0;l+oe6-Q}%E50?)= z`e1qY@q5d1d>~bk;rIGPpE7_y<~@1Bk}_$7!Qbj^NZZt=8MW6Wk~T5!F$;`#SY6Lx zf%P!TCcK{a?-PF2vTuT0$jUULUVHRHR=eys19QEOPvgu-*=Kgcv#FO-jV1*{LF%wqZar z9=5;QpM1*PwXC!;Y~OgJo;lzq{YdrK#R`t#>t+Dw+VKZ&=ri^ozwsZ8+9#kL|B%7W zWuN$;8w1_!=bUBB*Nj6G<1p}QQ#rtyZ*KcmOKo#}p4y_=C`jeT1NFB5yPhEa_=Rv# z+eUZJJQh5zryqW2^cJ|^$Z0q0p+kboc;^!}xCr=(P zAAkJ8^80`IKP)HDPx3aE3+9*lhL>6YEHB;=@gXJcwomW70XJj^8P#odp}rH9a`Y{- zq#hT~@*kr`ZCPBH9rdaLV!=I_l$_J2QXP=5M(TQJ409qjvtA~$8Z_Tzf2YoKsXHSMqYJe+e>{>-=E zxoOC`oOZ~0R76NUd|Q(BKEF7Fp7ecS5W7boc6~jcctHN0|4|5;x(UL%#J8hw`v!qa z<;k4%-u;Kmo}Vp%x7bZ#XwvxD)Gzv(&0)$KhQX<3^=7B3TEXhy%u`1^r%X;ZVc7?Ie|#_yFk!fPrB23vu4r)8Z8*T$0!E zqjj17yMAmQUUWX1bo)v_BXR2+ktvrsgGE!?s7%<0$Tq_=z=Sq5RV~+7UimcKoJe?( z7L=2plnod-2O)z|WkHeDm22D7dMQJ7$+&)S-n=8Yf6})9xOvEQ;@xK@hcdJbf^slRHH^Hc{ z_yW}V`pU|+UiEpbjgoGA&iUvRk9N<8w)8dZZ~63O_^xf3YpBx(%i9d%NI!s__sCW^ z{q=1;;8~vX=IOD{(hvdkWSz}dKhmbJ(ov2EucwUH!lzsR$N$xT^ivGd6Ag<@HeZox z;6aAM1h2wQW-F7pH+jvnNb*VmOi9eH1XC$LiN`4;+h(x`Nw)|Z!l?8@^R8UdL)Vf> zPQyVy?$oNX^K201ESu}Wf39WpW<7UQ+hH@={@(s?#fg-8UwXPHmWmhrgR+$tsBt2bSpNnT>3UM5>m3WmY6^n;Ld zoED6RgBpkV>Yg{yQ-5IN=w4zh$jbx<{(Obd{o|wMo%i2gb`Ou1?SrFb_pJ};+yd!b z531l2uLfBfHlu2&lj3U(uU*i$>;>LQRc;L9K*&p+B}-jkkg13}LcQ_jYs_4bk+wAAfH|Q7uYXq;S8;B3_uObu z2PT67?d(9}4eS>uUoU6pXEDM$7JJ@2BQFCd?dX8yh6rz#J0Q|xDVH1A(98FW;K^6O z`06;7*?yS8GklQbO#sR{G4tAVd2YUPNbTTX{UmK3;^%DN5=469jowfP2RqxaJA<7V z?kwBL!}CL&CzE~3`pYlBUY?!2Sl)gA-Q}IP-(EJ|$U%J$4-To9Jkyec zXZwM9=Exh&_!R!I6V@RC{_IQPmm^LTX&ESuJDI*@a);x8c78J6fYAj)X*gI)$GBYR z0BbAZfUY&_NuO{h>GjJ~drWwAu=9p=7S$t@9@G0`EUAZg(IP;5K#?1%nC=1h`!bbQpPrlPeIzj3eS>4b;9f6xpwC${8)Vj*E z?b^D@8-fhaN3OZ3rUk5n-L{`bGW34_2SRKf#38~B0W}hauoP1AvAwFBa?F%IO z5gv3d;mM6$A{VbbRp+It4t0^q%2=;!Z7@2a{m*vD3@qitm(iwXc{6;NrQm}q_o zROt4GzN?UI$Tx(vsP-z4^Co$x+{#cRc4#Z?0GM>b;svrILgYNFc-{jD%=x(YCwW z?)K2X>YwZ7UW+yj28`X<4K&kg1|tR`1_3Hbr5fIF$8)Fte7=!+?s>OfsbqKFyHD<& z85tQ785tRwJNMq%Pz9~lW4W^V@>2HB`fL{)EAjwGEmYLl;2&UI7oFB;S#5PB#D6t~ zY1;0U_-T=o6bHZMoj+*brBwPl9(niXr1AuLrCYe7N+vnhetvb6}TJzr_lEuWaPpYa#@=Lem}ey=kJ_f2=eOz>&k z#DC@5cDr%oMtk9v7u)3aQ#1hZ*g5IU4N54{2Uo7z9Ot}qTIV$PATZ(bxR>kbzK!02 zaxP3ypC2QCF^I>YFu#kSKHI?a!-2@M1J4Hq=^zZ6Q;u`-JSoV>W3uSwEz@z7{Xq9r z@LWrmr=y_pI4*?&;~QNp6uFH$7URpuz~{N)3Ampa7WrPlH*D$$)_*zl^a~~b&S5SS z+idXQp`R~p?}KL~hfV`=+Qu>*%gBE*&tLPkU+Q>pYjf7yvU9lfL;F7V=+N(0?{e-K zM?Z)Rv}gUTDx|xKsxcpQo`g~0+6O%BI*hy8Jzva${V5w%<+DD({Kgw;`U|0fBM*|< zuN_O^%elL68!H$_QTn0w!(d+*NL%On#F;dtzt)qn+78z*{A{Ef6#e{wwOcZ!?bkSs z%d5a@lStOC=rk<*kV@=;z5;2SYs0hVKz{7B^hgVSjajtz+mW~t9rj0|NmR)~? z_T2bzodn)$3_+IoTW8YYJW*O!U1OV~_OwIRR6uV#Syvn*{aWQ=TPX$33HG%!2XWgH zxc8n877g}A*Qk_{^8(-6X5G>$C#eJYtOtSJdM6vcnUgemS+@PP zSC;UFKGXCQ*Ak_}Ig+w5*EiqVZq6HPO~i-$+&pMoy0-2OZ#dvOpG>5#Y%Ir4x^A7d z4{FVgQEdoqe1tx_uB|QeCa7zhXl0sV6?|@d220w_yGI3 zy}gAv@V$_YpBzx5N){{By%jj?%%_f_PuuNWX*$_#JJ+t+kd&a`A5o|F5E+@D`B)$O zZFFhQxvk5&x?46(?X=&KZ#sX~eo&HrrQRY4f>-*@>OtK&NU4%J6q zViO8wmz{p4>6}+$&WlEMllC3+2Qb+u4XA^lq20u6P@r6H(uS`BRNiN&`n$$8SM;qS z+I@-iAsxp_*O^N;1Y{EbmQ&xj=!TQ?vQ$q?U6`k(VkbE*EFQE^@}_R_Pf!mmCkx85c*=$tanGbx z>YY1F8`rX;&9|*BVp-Awm!$k;tO;IIl3H!m!#E$Z9RbqbSovq4{oXH;cGrS2j1aHE zbwNi`Qb>kmoI+6|8&^aTK1`8DLXdiG@Y1vS0Gb5Lb%HpL!&vQGLDoz%G3t0LqN*u^7CZ@wKl!8990`u!7xUr~2d!ziRY5L|&@9t4tST4C8I@Vpj8 zSR?YxP7=}~&)O}QCm2Ou1^Ba*7^J1@3XV?ZMcT!YbTNW>DBB2&4IliWXQA9c+(Yn? zH&^s%T$KqAoi6~z=0)UG;L>1Rbyht&0Z$6_GQ=+YAi>dLvuKt)`R_EF*UQy)od$0% z>@G>E-`=SVskfs^XQwTnuISVJ=_!qY{x*4~kj>;eo5A#H~^eFyuGn%}XX z!TT}wW%uF<{Nt=``1&I1sBZ$<0PZ+EibLeL|3ee9@CKqzf;)N-k2-G%Lc?vpR2GMM zV}K6IcJL|xjmbtf6A15uy|L9uzh$LU!|I}z?M|!J#DMw|)PH%2!Lc*|>;5cjVynVmj zf3(-0e)@L1vN3IIlWCkuzgK0x3-sa`Vd&%tL;b@u>6VE+w8^A9SjJX&+6ND&rCX|# z9y4PHNe3w>;NF1J(eAd={tF+mYdZ5>S}r%|-H1UR8Pzvvr{m-O40wt?`fVN5?F|lt zbd0|1AZ@*Ia)}+VYmwUBu=S*0ZiYaIxQ5f><`LOZpR-$)vZiM-5uJ-ndjGeNm=}i< z=TQxb<7V5FE*`V2%O?jOJ!H}aA9wFP8@(pC&5%ppZcqr3z4RANmK)T$6I{>ljg z3O!Zc%Fs{~RAm7z%9pcDaH69+E~d+Jx8o^BOu0CDTw9NMIwWDhb7(MK+^G&4LxNQW zMx6o&a;aCK^@Rz_zWdI*f%h{Y|KUIUa(m@NFSl1-`Vc%@9^nA!gd_G9!Kw45b45oS zGAfb>HzFiZcSx5{|FE~=w;!Wjk{`s#z!DBA#ss91Sh)0Cz*00}oOFErmrX1=#!2@N znjGhs*3jTM(87+kQl~6~YD9-DUOtj=C)lo{(IVET-V`17m6~L!8`RhFtk8e0-@{P+ z)@GIy7q94F#gBQVMNIF_vyf|_&dA1+ z&;T$$wza%S!!o$4FF`Bt+dEA7j*r@{TUXnwFF)7b{N`V_kALDL$P7DR$8a(ue-LCH zB?p6Ypf2Q97RR8>Gl-L2@<`Ao$?9X_0?*^|2%~(D7x1MGsoP^t4&~Fg;!{+}B(KA% zp{YU;Q7PEO{g~r<52012pI!v&?(t*c@&dZ`r-I92l)Dr!*A3YB&40gOw#RwdtS}j$ zc4R7-fnxwK9>N+MM%oj?UAH0ia4SO!BVl&QDBBVK%8YI`T$fu_=X{H`|w=ZXt0ya?pGIGF&axqD{X>+t~ z+7v?;?Ic*b?ep4e(@cWCk_#MQ-!h*7U-*DV?XUEvz4}<0Z#4q1C}PJPU#*BXvd&>c zTe1{so+}Feg7fnCCV0#10OaoD!;RY%%Ru zxX93EX)AZ_x4-+~8BQ2B@JKCpe+t>1eGOyYq zuevqC3+KgL)pg;G-LXwPqD#FsnZJuK1*Ny+R~*$&@?1yym3m-#t}8FKSV0})Nw6$Q z&Krwxt}gP2Z|ZgFXu%t#A$4|H#90fd+r+dC6Oy zw$G&;LEkWTg1pcp-cFZkzRk(>@>lKOE;I}44MD4}1!QG7g5{K%Pv3Le6QRw%Ppp4a z_&`Ws6Q}+51lwwVv#dBOSU1<8ccbtOTO%PWYxU`*@nn%>y>z&ksZ^%Sq1HF27LU!= zR;^;+H@CMkZ`@!G?h}eGy!&M0P5hdt@7!*;nWr)l+%! zvbx2^F^|j;w63~#RIH%(pjm%uj6w^w-=U0xcp7k3(17Z4@Yb$35z%YK3?vZ{Gn?osM z;rz^zHx-zFV~|KkUp=QkWu2$%O6DWZ!=2yaPh^}Qf7!$+-h$2g0RD--inW`pKa~@} zv>W;~Sr_2Y=A`ehXs_ISKC45dJ+R-(KTH_dY51OR|+54L9 z73b8P!wC4Dk1HQ#0^jnU!B#ud&-)mCY6b8ZS;;5Yj;Cqi+`uZ^p5TTyPAy`#WJpUk zj4`K_KiBIvr<3;OKl_vRUw;2@n%@9_?ZYo)hL+my{-bvP;a=N4Vm_!0WP|qfrl#Xp z^dRkBH-)9p_BHq$k>rqo#ZcF3SxVT(oH_IRi!c8ym!|Mb7oyHtWOsTlEFMF7;LAfyw7F}!hVYE+v|M#`mU;l7B+}mrjgPK#h@pps? z^D09yB0Jlr=7dFG{2(svhp97CR)E{)Rq1UwBG_!9a!^jb^_~u}dPj7|} z-^Ckt`TWm1{hjmiz=wWBZsuqHqda_5fRYJ0RS$LObJ+aC)D1gnDcY*XiM_BC)r%L)F<*$n^4`f1}6Y~U_()jpIy>r1U=O<(_u z+Un*ibz@m0Km9m2ztrc0wHIBN)L(FD`?XiTxga)`e;@ve zQ!47I_OsH(QHP-56D4^Ua|He+mA_%*t*iWeTFi0etk(PTW;{2gIPTae77vd(6-oc0 zEO8EL$e;9~3lI=qNXXc2MyFFe7voGykRV;qVY+pyCxIDo7Vl$4Fk6)=NkZ=yI@Dj^TD==TRAxvTAw&mOxf6a>Tl6I`0(D# zDzw60WCH22!8x}R^)^&4j`<1j5giq$?T>~L#SLC)3ydG&?=5MV0^Ur?Ymq@*9dcFw>u@(*@E9(XF8)mA3=0T+93I`(r1c3^a zNn}7&2+NsQjU7PC+9MLUwzxnZ;CPH+Vh>t{E0n(H;l5Sb;H)UjNo&#zCzqb}!%&J9 zP>sHP&GUNQHlXPi?pY*P2!5_zfQ6^%dODj&oufAZNRY+Fp6Zj3WeQ-rmH@OQa)^7ZB<2knb*MGpw`nyzaEwvEU)N3uS_mwx)nq0b)}| zePISb9aT=H#cWHY4)d+M5C(mheQqcuxoFY|lzwaMXb;*zqm#}|ACQmL$s_~gmF=B& z?WtRB^O0*~LZJ@UK#u;!ZrT>P9slQ(ZSHnwU9AGG?a=o4QpjZxe`*bNP1 z7vjdo6o+owrkm7}PXxHjw&j16ntAC(34iMdt+NMK)hHiD^8M#LN_=};Yd=Bc>fSZ?c zLUUc^N~dfW`FC)12d0j)x?{Yxxz(;*-Og*Y?%unL{3dx5z_-5nt@if)x7$a)=cDcB z)f==mgY4=F?Z$*tVNg@_4|>Hj_z(Rkcj;&iI@Vi+)cmuI^?H1pb7d{Rr^0pRt-(^$4=|SpHxWdwL$cI+`QkFr{ zd=!@xh_TZ(PadLkOFn_DTvC7W9$Xff2W`qSIwlQOTh+yR z5M;L-d0yD5@KH;u1!D$heESaP+jaE!W{Uc^0^cQz$i*ymG^+`v7LC zzqUg9IEV%f;`0;^vVdgSs-tX0=|<^T`41AIO)tGthHf13tt9Rm$4!j>xKSznj1b2d z*;w9S-P!DtrmHmDAyd56fddMz;@6`#cUeZ0WzKt|ptL zdz0Gu%!NM6FP1Tw)K3jXPn|s$QqHtqKC}&7gXaO`seRpZ^GMGEe9uc5=Zk4i3g!=7 zh8|z$<9&m09*Z;@Yz(iBrO(R&qil)%vCTc5hl0-yvxFW?&p43HqK%5!6N{;?Ek62u zaA<4wAxVo>mF1v1W4v0yHjSr4=p%sz6W1;X+Z4X-!nXxv9D+tMA0D`I&DSF`mZ9%% zu*}4c;5cGhAhjnDB|WiI*gWaPI>c+X4xeDnc%yEK02s$G`*6J?)u(3lS0uXlt#PEr z@z{K7)0S#uTsU{K8(?6DEV7Bg6z!2?xdz;lsav_5>(ejbUvrG5UzVs7YwMIRkb%>d z$fM(I7R>1H+RAzoqc*v%tSM9JEW0P2_EmY%_Gx>33a=*4(8QSR4t+Q3S!w2b(B?u? z#!29mQWhP75nlW0IA*?WpC{?09l1D>mKN)G@gakjIKeXXNI9p?$WyMPy?SQqtLuV~ zpFrykTk1DF&9;uTvuOdWdPXQ6tynlW?m3Sb<*E*SvVi4U*l~<7(#Ls_7agiF=om6BQhYE48 zgA<goi>)AH@W;vs_EGzC zo-iZmbDLFm+V;hN<5Zj{_z|YsCN_yYiOc-q8+q=}F>^y{5|>Yth2Gxg7xdAdz?4to z8%HRE-z}{`QxAQ;K@WD;IkPvQ@;MLW>|0!%OFHL{k0T!VaBjSPWheG>^U5}MXqqP% zuU@^UKuPT4ZaIjIjDE0 z*_I>yN%8xI!ZYvt^DrvCM9+JI`5r~b2g4f%yfW{Vru`NP7JMo%+97$s zQ!d}bPlxX|hWsU;FxL(^zB+C?FURhCQ~sQJ!r6&^h_cjSQ|3)fj@9x7362Z3dC{uH zEu6I4n}XVIS~!F!%>?^=>f6kl#Bv3cFn47uQ zq3h@zT$%X>MDgp3NJQkt|H_J=5YczSri+(4q(WR-zmO*F8OTEG5^_yn;9CC)*<_4> z#(E3(*h)(jYf;3aZNd1<6b z?4FmE7dtLZ;8hR2ak9<){_B7L_4Xfs<^O57u5DspkJ@K`;FE3t(S!Dtzx-PJHpAtH z@&vCpSM~7>+VJQKcr%1RL;Pass1~h|HVR!m%{K4n{!*GBj}vg~6DEuK=rg%7?iysC z%!CHlC;j|`xN6K%pYd1GrEzmk{=zGl(v|K;8beCb(#9xBJbPi&s+Oty4>i^yH*uJ5 zK|*|VPoXt34E3T;BFC_Q24G8D0$b}(;_K+uW)PqszD}4FX z{QmA<+D*P(BliKm!^81VoDXZ9Ce1gBSfQISym?7+>c(F>RzBz%GVpD1wb7yEViDJC zl)rx1{{Ft=ZyVd&?eJ)?t)kyE*J|{l*$qnRxoH}7LtZ_QgF$<- zoy`NtBy9nS{CQ^6I){jcC9Cu;dC>uKxJZwZP*oE;1(vtyu4RO9oS9hvtGe8kFLn(Dp+DUj*h zIZ7#=%)|0FJr%&4|KvHZLRpjVkuk-vTqkwM*q+$lY-M#>)Tl zxlewH>wE$rhCRe8>uOXT8Pj}HMB}J~SH`1|dzMA+h^3g}$NzbFyiziR0Z%n%h-^6)d0$mc6*jxq9F$%)Aompe2DAlvP)cqqhd*e!g520Eo?Hr#5uQXsF{crWJ~| zI=8=+4pu@8=gIJiH}YKe+k!4ySf4T~ zqgAgZQzps?=RzYueg?JVmof5&jOi}qcthUT9r`UHofx@s@%V6dfD?d&2xh-6AB_W# ziy^)tAg`0wVc^dj{dq+Z&lK4nAf2n%?{fd zGEvs{fsKtxoCJ3-`zj{b;@Tb7Yn)@;+(nLz-SOs)!WoW*Mo)3LXJ{XX&`-y-geOm$`k8~G<~gluHT?iPn~>8PvqO%Tf39RDS-*f zXR)otl}Q`twNtC;=2FC^O9(HtXJG0R5M_gmtk>Ps-gxsE-~QHt+nP?=1bIx6+10B% z?WI><%E0-xzxrBx_-MC1edkWwnBYuK$^oo)Dd)5FvL+lJUOONeBc_xL$WguIsKy0(fj&1g=rm!`LmdQOrJI_K0jpUE>a-4`= zkcfSvBGPTYkO-bQ0F(*2{>e+1tvBWe4vNj&U%s7S)$5#UauOLi{yE0jN3Ec&P4JZ* zy%C}=(Wc<7SIs-&bhl(Zk-mzz|EGWcC+*L_{O9cle&~DKy*u}?2h@qK*zS|HY50jO z!8zZIVvVv+waT|{5|@DHlR7VD)ILaB-OTqA+WHt|g6)y1^4 z#1%RK<;)w7e)%@GHL}xYsH0XOO~Sz6aTeWm0MU8$UA*!g-UyBt;?y=|M|l>|s!zrq zgcwe_&a1Sk!(g3RtS2uw0>CS|k&8Tses!gs(b{OE3`w>xaALcO_ryL%KesJ~$qjz! z^@hBQZQ3=1DC@+2tlq!`jo_O_c6P3`o$Vd$`~efrL&`6;y}gI|=Hn)IeRlMyz4DPe z*kRjXMn7;<78FxgWUZ4f&k4%8j?}44)7Sa}L3D*^9Plfy#3T&Aa-9zANgd-?J7?PkhNH@R0zFBR)@03($HPeJv3e$nT^9zu(X)X; zS>}`<36wUwZAc`|BJ)?!jbMSr_(`Gj7!c1Ff_*As#2*w!J;L`6@Sd_>Bd73++~Co_ z$3ri(P=DCYQEjelj0)Dni|W6Iaq}#n9urT>rU1~uaUT7K^BXyu})5>lNz6;U7y@@>yN)qUw1<9CQa>L>^W8Wo!Z!B`xbCzC$J&FuD}o7 zEHs)YrJ)hq5*r})&YOKWyuh2b$~rS1ov4{q6ui*l1^~GAhPuJYt~}KkO&WFP9GPk# z9LK~fwBwet(OzdP?siGs_Cxv556vQmV;yBry8kwO569^-w!{ZuJPBcU!Bi z^NmOP7QwB}<z<1A^c3IqWq zCo#w@nT?ya@qkqyg#}O5$wB%UW5QNJ)(N57rP_8;_Ngv#L7q^u5nrX7pEF>UvpTGp zGe5LE7t*9&M~Cr`Nyvh=bms|D<)_amuI|5AsEsnxXL;Ll^mEbMN3?}rr~i?7f*&!? zvnVYQ{@SlY6LeTFc;mb11M)X+(krLY4$R&L zZ3pe!Vc+NjE^LqdXOZ1ar^L$E1$C*;yvk42F(2R9oaR$^lgU-hrwJTI6G z3wcipbNKH9F8%v5e%A>M3RomDOeI<2b z)lZ0EbM$GvxwI5Jz}!(gg>BYO>s#mx*q5xP)`$BX2pl!vrH(%S#2#4APd%k9a59Ac zYe%$8nHNJ>cym39`H-JDSaCilKK@*P@KxAuR-gojc0iv?=dkE;E>&}IA8r8$v}>Qv z$j_7HeVnlR*n(w^pqQ|d`W#%wCTlyLdov!oHnz5oT$p<=dt(H8ns3SjpFBH2)EJ{Y z;91$gmn~QI)wT7-N77btAm#j(59W9w>@oS`uzZD#S?F6mxu|`&J)w>9LHd;2F@Dd} zH*dE;{V#vm_I7vM-+cA2+KbOU)9yTdtNrC){dIe^J8K)r5U%j^5; zp`gS`^D~w?M1pave&Y z!q3rGQO8MqwtC`TnmqK!{4{CiZ=5R=>r-U5aUR+L&(6+O+HOBK&$lBe$IZ>nY$e?Jw)f41)|eEes;5xN%(U&vnFO&v?M2I+c7Hqb56U)-z~ zx@rtEKQcidlxxWo>cCZ-*Tlp5pjlY#q~-ILo6ui6-pL`4!wck8ax;)m`)6(vxjBd6 zz*lkOjO+CFfy{aOyy%>9!MBOk!-(LDE{ax4pw%}FK&#f&hd}H<)gHDpeTYL{wy%cI zRZ77pO%NODyvDT&c{0drHeOI(p;mj?p6avqqUiF_AINZM#m*( zp`5ghqwSKv5T|_&OYM!7fAP5=_!1MRyF+O;UNRQO5g>Ru>_8SqKyx@7Pb^jlg~Svi zJ+y>*ow{n=g^Ixy`2|ld`7tIvV5OvWloFKmim_DH5!7OY3vyQrf|VZ13zw%H zIEwBelM5MMw37-*f(CF-muYoK7bwDy*HyVe-12Wa_`{=_qb~7#H} zpi5_Cc6tzFcjoRw!e4SPV#1f1M1L8!MULAHMnE9qeGjU z4ZH!bZE(|@O zn>S*AfBQFntG)g1+wFTl_5I1q0E13XrapnckvdW#N2xcw0yAugX<0ONFq3AT53{_~ z0f1K>Ex)NlE0N(uzjQygg#YLyaJG$jGijwQ>^^z(gS#2YQ?7XpA3Eq}4JXgO(ZPXE zJQ=dALQK?W)egj2OiUg2U+Si{#8rbdhw0FslF}pH@?{^SoUqka)%KxTdZEucD@Xu+ z8F58q;pLI(7Yz#3*aQp^E#l7g!(qB>2+NjR#-c2;DdEy3`OSC;p?(9)@59;u{oX$il(hf*mKe%nubzGSM!UkK@SAV_ zefz=Bd@pu<7kZSrwu8gU=v2}e{xV7H`k7q#_fKNyPoR13VUXyOSK29UWmt?$hSGMC zPHNJ~(=QXBIu!huj|wFpe)_myKBsM3?Hc1?tUiSgVJuXIv%r?}#A)kJ_j`f`dYmzV zBeeBc99fX7HqTKc)iDgT(POc&1BheJ?{^A4e}E1d2iN2B{NOO^GM0bnpW|IPz^TUN zG(R#&SYph7PyGTc(%`vrS4TbPh`ddt;SAG1uZXbWho+zA$ z$SPJ0Ww++9yCY}^$O=8fM(fX7&fs;vN!Zh}(Fl8Ue0qpIDL*OoM`ufiJr7Ud(>K$n zGR^DZTe(=N{Zn?S3na0*w2}5vI2Rdwk_$-a6n6z><*PGup0Ufcl11v59u9d*U~E&C zj#2U(n56qtz{=AxB5xyr4$H@;fpd)C_4Wb4u`xB?cG-#pJK|?nqGP1-OS1V@Umb?z zfWQ2jk`sB93$EZ5!k>aQT+~HS?}3j_^|bQWiuWqZk$yE`Mw*dr4m`53O;mJ{TPJEKF_Hh;|-gO}v z(nH8RUi!H#{St-woqjTGt1+*yb}!nrA@ybgF6?8$Mg8cWR>nDxb|W-VCjHgCvqSKL zAKO%Uz(}Cw!IO5gg6|k*a!4S$jo707O}+zA{U~|ZWYcf zT)7zmDwU!3(ta0vQl#~Mec{Vizw`_{i65%2WG*5;XbSD!-bDud<)%LHp^f}}BNe%A zFi-LgbIw(rYs#N35Ln>!MYeXXv<>G~#Ov7kovYW{y_fE`7his^t>3&35eO)ASW2q2 z&>LDMo}fQ5`7Qf$q;nUx&Fx;^O`A2(+B#sr&Hc00+c{|L zU&bg_%4RGpzK3s+f+h#1uR#H)9@!qbQHAV%2Sx|BY`U-bY(Q7A2r zV@Xl>s=-x*Ubi$9pRoazSp9(0e3P8C%y+1GYkRY8ZLPO|@VS4`UV83c-ij#>AHK3Z zGbSQ)b-@1O#yZzlT)TA*YS1w5r9?$TDZxskGj(b5khCm@Locjm4#YVPn{{y8fILah z4N7#_*U-BWLEBL`u(1Q=tuEm|WKL$fn+R7kCyZaq-%92={YD?McP+(`buQY+`N8aX zA3q2i#5Iis-L#qZP0;9Jt!>)widFf#iQ5M(>ZZQU>PDinT|NZn4fxb^beulqrULP} z{-bPWS>uC3(q>0BH*vp^4~HqFtW}YxJ;d@w#!AjQT&v0(qY%)+OnbM~Zrr%h{2aj% zbgCwKJAx0=NuS8&OMlf&?JQC3E=cOWpKXf0psnT4r@ObWU5UP~Qg_y(fyqWH+dZx0 z;V)OJgNCMsxtsT zk2yj&bq)VKb7T+Kj@@ua$l9dz3k>eqBlu6*eq=6eeO*SWe=axkm2crofe3@^E)?hG z;>`UAuBBsn$vOLwseQ%q0=U)MSXX0`_)C<4)E{OokesrstE z8B1GyabJJ3q<)K|bT#r-|2a6c0m4Eq#PkxE(y9&$v06;9@8vJZLwr z1P;MFuIiKpmrCu35Uv7;l3uPlNxul4;^bshD=v{IoogNL9%#;90l)$Ar}7MPWCw^m zy-v3DBQKT2#N<>+@!&J9)6wNq2j`c?1Z;|gmacdvz_0SVn5fFfF8NOv54z}l%CfUxJsHogt0J~MGuW$Zn9t)CL3+K;Wyfs+H`BH zJ@?`Z+1dNx;luXLciw87SFX3M>vt%reB}jd!jJDaS#NHjaz0^@ zpZX*}$9S#k35IQUe3;$K>dY~Nxw@X_;Agrzjp1?vB>d5lugqV`tN-&|BFeiXS_jQt zn>rw4hm?c2FnjyEx!LY3*cI(M#!hME8$a-(0bk$Pz`?6mswoTQ=Aw5Ni!^NAVUiyW zpA)538sqTL*Z3de%p5^3j^wl)w&NKL@GpLMSo!^~sd_WWs823$Ihj7(;++C8whZ_YfD@OrM___BtfwC95HCmSw6FU*AZ0*WUa6(?K$?2yIH&{h?%VHX z5P#;*v+2XxU=!YLlX8+hM#%1QWktoQhpaRSp=_TwGP-PlF)weB;4hmngy(1QT(9O= zy$VZ>5nWfquDcNFleATywB#=il;tBC`j1YM@Rnab&BUgjY_G|KuK;sXka%s!|NNW( z37fFde*A}iET5{F%?>F@E>*ip$2=XWQzj#E_B$VSLl|0tlNal@UuA3sARNecoe|8i zJZb(YMrB8o>%#2aF?dN+k5!M@Nn|2Vd7UZ6 zl0=GS=Xy>*)?d-z%WL%+vs7_$3Ky{o8+d4S*6SJ9soVPC)jm2V|E8aGTO!Tn=U{)h z*5^&S`Wd|7<1oz-z8Jr10$&P-J||Nd)Zg*4)Sws!WYN_34g+vMj=X!a&_wXC5&0)X5g>`p4Wzh!eO|0 zLI8S+UFaJ0q#Qhhsq#ISH+}P5TZFQXJ<2`;@t`fxIZM*6q~k#rtmpWm>A(7z zqYW&gEUH-2L}^CJVdPki8O5(*=!7)IcUC`bLtAgfKfX`Wy7Q8T!cfm`gawG?IXyaY+`*hQ}fe9M>G#+Ts|&^-X|`? zfO!J!VwjhE-2W=3o)G4wUB=(#Fb|h`7(sXiv`{|(>O`Gan4f%?;r-y>hmZG!QT`Hq zfO^zCi34%3QLY`Q{95N58qC?K`cvY1psgU#v2|({DpRzK*zS zE=KV_PqH7`7f-r<(SGE{oqijC{z|X)OLO`yu%>CJ=Ka>ie4ulK4RX?_!I3v*PVgUm zphYq{^58=ESM+K0MZAeXZt4-=>WZK9^nr&-HeP8`^K>9Eu@U-vv}4W5`Lu)~E7Xc?XZlY#mwLPV7rBvS6&|QU!E{2z z;n`SUI`<7!hq z*1K=FBj0)|1lPkyS`1y|dLe zC)4)gi+9@>zxcCl4Y=J052Jd9&`w4A@IwySOf63Ji`ew?<5DH0l#CKcEL8q-P9K+s z=xLIbk!eM{rSqmk4$D}-JtR5QSIaXc%d{2kA!I$chqlDel4ccxZ0dI{3{N7s>W#2= z@io$Ydv1SAfc-JNi03p<>}Ty$#iE?PPUI`O;ICKlJD;>HIs{*?cl%JAIILsUzE5nM z`}U%|RWoan&h1<$Ka9U)IKl4uwoXYu(U0|xB}n{?t`FNBf*01W0!|;mEBbxwr*FVJrkrNpHNi=It<}68K!3|OpoCB7ht$1G zJA&1<^7c>l6+ak)bJXi;U0W}*3SObQ+z2U$&=bRp`n%#2|E?VU-rI)Ee%I{)GH-?kA}8`hU`VJAkj<_uj-9 zZ}Wfxw4eI6ZflTC=m0CWfN|UVeCV_rTWa$VW&HFZd~1iI3hwDvroq zdxIHw4rU)qn^4OMbdt?s8nHTV{W2SoML?38cCUj>|2+pV?V^lQs6yHLr8ido#pgfu zCE)Is!$iZVKpoIR))f4ttB3hM1EF@SJMg$uGYR~RqZ?^XYcd56jdD**;M9h!@A?%( zMW_N24y1AbVC5AfcAiowiwUD4aj_^2aZtCs2=4?jC^%1^!MJc0&MIq_sWuM|}-0Liwdgyhe4Ko3PS%RIkb~d{3;EiY<(~Y)y}s-29h>yzD!Um&*z2ut+6mYD2Wc5g z;6?rLMxQ%m*VnyiUpHrcBY=*QeWRQ}b;6r&TUosFH8b0rJMF1!H`*I-e7*hESO21& zE}yo$_nv9%*lyqc;F}thla6WzK1gt>-B_)cr`pVc4uRiqJcSM0hQYG`CDV3v13;V|u5~kPo^v2Qv!Y7Za2i;l0fr=Gf%Hg_C6$%NT@0V=A}ccM2`ti3B8 zBDf$f$c2%rL%r(f%1UT@U~sA*5*NP4E7Su)9{A%FOE0_#sv{EnSV)R4V5~9=7Un^> zZ5msF&N=CkU-8Xmd-VH5CjMvb5C8c0+h6|KU$h_okx#eh?mbui3Uu~;ilS+5IGDr^ zdIQQ0Jtb=^B_KcXXM;-!V(fzj)uqNE`!PHSuH2P17Y+f+qj)#VJh(iC{<$obRkcs~ zBNhTD-8~ZIJd@erkVXTkR7gIL$k6q zok`}XPZ-y?s!jC)){&Dwg!~~rd<$XYYuRo9h(LG0p~G_8Um%cO7V-&>zfM@GA$GrD zkR>qT(Y_Hjl&Lg;8y3-woYze?9djo%y$(!q#vtp5Ru4BRIoUrt+-p~_?6e!#uePnt zrS{^7@3m`Jx9Bnl=ooFm7$T11TYspYAgRVA*w51j=9}iB?KduYOWm-WmDZ1d5155m z+dbMqe!>$6bt!ui3huFBdQZy@De-u2nt6sr8ZVBb&2xisr$0KxX?!P>l&Cr*yoX`H zDC?f)3}1PK_R34u;+dD_2sl5&)CG}ofESLQ_rzd+5L8GzSdTI=9zh%na8Jb9>&lI7 zdFU{azu^-7Hjw|KmF~z3nApE@-P0D!&5uE=q0>g$^1aOGlCt(8VYLY|LeCNBIw$xt>?8zC-HM9;w)JdWq!i2nrLE_&IAkMQ4o(pnu? z)PD0{yJxu?->7x?8lj`=$YFv(nP_hTw~PxzdF7vU0hb9pW4&ToRxa9C{(?}tmwbXC zRU$j>8nVzAAg#s@$8Bg#1+_T>g3?|!4itZ>TmFYV=!bHMj)^a62J!ZM+XA|Bqc4jD z#2f?Gq8H`FWxIJTjn;4bF$P7qImnXc^aC$t%&a*IvDJlHwV^v#0;gP!L5qcoKV@Rj zHwgJbz3-rKM2Ldl?H;c1nt@~VM;uKDEIXB7iK>UeE_Gq1{+ z9~qg)dFc+iIs9d1>>HBo(5aUrvK2X8+gGiFOFtnLyae^!-{@dc`dit%xS^{=ffY?&N@FS*|!hsch$ao%hDkmv^1 zY;G4G!i?>!YuIRgr}Zo@PnzHJrDjUQg^O|cEgPHLZF~Dl{MntIt#*j^yY1Bv zzuLB*da5mN;(Tg1k$c{V7+K&0Bg+shF6zoUh?mef+74#8P}h5g5ftDhCNBLZBqlTe zJ&ll_YduN3NUsoN9~|cPYQQT?&qvH5jmaJ`Jw15~{!2oS!$0TaNLob~v>K0k2F%D? z5tp+R~E&W1jVq=BeXu#`EguChM2jv2){>KJ2{i zTtA-zqVh(V@Mqe6Z01s*|F1Z1VjnV>a;~l~!T_NkRU4JiJDi4kN|!zd-Dk%$U}jY(fb?Arqly3TBcz_H25pRf54?vXF+WHWP_NnNTiTGoJ__hMbbS~qQ$V=rLkU*a1J>hNZBZIZ5arJ^r%!DBX8(=OUnj8*2%uG@tM`B4Au1=5rprK@7N?E0MQ6>R(1?-M0IdEZ48D-F><(9o}!h_N%|vws$7&n}7HB z?d8{AZXf&DN86wO#hfAKR|@finBQy4iU&Ib?BYYLjN-2+EfP z$E&m*Ny>U->gvOJd+yn%nV;^ohxhNNZZVeK1>S3&RjwT?vjAA9{yK6(FDz&M8GF%FH`BR(>$m^^ z@jw1zd-myP+oMPK^Xb^BdJGCi>-3GPW%Okw?|cVeVpaLLwk^TxmUT-fw5e+;CpprU zEO3zyjU_AacX}ZUtl}lv{zJ6vx*|T7G&nBHi!kwF=rcYTMr|F3HI*NR_k>N-$HIQa zw^?eF-0VZ|?f+RH(`N#g{gyJWx%$*^`ESSA4!`Ap)O_$v8<9<{P*rPP_)qciyeV4q zDQ)7J9g$GN0095=NklL%T8s?+khu_k)|zklrQemka}&U7+uQfy3x(pk zuKu9um@npCxiOCoOMCSP^ESK*4}j+fhrY4X^__Z)Y~I!j0f+lO{eP5ouG~X|yWPEm ztj7zhO+;eE$AQnyP3xRF4|XF0xF&0{cdq}qKI&S~)f?9`o@65wa!`-835XCl>e3Ey zLm=3k$}#Y61c9IU2-;%$f%CnS9*0k0^&jkJxsSwH?bscyp&n_^S`D^0}y;)8r9@&Ug$U2IGzqksws+PJc~-s%H^b;G@Wur|Rf53Z|kJwn4XphVX?y zoj^G1kjJE&kmfP3gg7rBcwsm-4ooj+{s8w{gJlTK!pmd9r*Y*0I;&uX>YP3FMUZ?V zz|$lm%lvY5nT<|4@b}!1QU=NEK{rYgTwa@Rp20k2FyOgX9&>&#@K%xDfO--J<(-)1 zs<_J7(4~#$mQ2#<(3%uN^3fS_vZeyoZByijlbS%(LyzYafNS?HDKJkO2c@m8XQ67`ltk*QL7{lEqghQSRQ8_Iyu? z0b1VchT~bA`JI0p8eh$?Jbe|rMs9Pu3Hln~rBMg=;9$4y?H;!MgGaeZcZf6KjrOgr zD}l`oTXf5Mi|_N?@OPJDJt{Xqt5^pX=gpo25|OnU0ZF4=zQi?MP}J90Qs zdsD%|!B>bX=L~?PjYWPpHYg7rH|Z3gyFRCz6L?<9(=C}$Fj!AFrtJ$~`04iBfA~A? zkN)V7+KubC+MU~X+LfK{$R}?@kT2k^#|bfosRf=8k?GcziL!%{454osI5P>Bf8|4) z^^F&{n{syN>d^gqQpdueAa>!=Pfoal&Cfz8-{O&GpV~HP%neu$<()BGedfBVv_53YkMxPxCoKG|4&2RUU7sCk z?eE_BTKkXx{@2@&{`hCwhhO_h%GsCg-|Ch2Vhw$BbB4Ak8*RX;U^9u41}l~i(sIM5 z_dOFr{s_V)l)Uq_mrifzy?8Z{t{x-fODvp=R;JG^~Cf(uWF^Dkp*Bv@_8 zMRqFNhW>)5?7L1aPGZW7b+j!|;23>d+ot1}r&gfR$-%HCE;?t!B?hzL!4Hpv@7Pyl zuP!U!L67xEc1N@gjgoK7q4T!W{^0?1)Z(~0t|C}QJL=8J*mLj->rPl{u{!Ol^=Dy2 zLBXzlSv#N|UO`5PzU^%LnVHRY?fRs>z56wE;N`Zl4u;cx2}+&TZBU+(vBY|)k04fN zSs+&i$>SW@Vw-F0H{C0hDrt2c#ZFrYJ<|M9rnxGZUVaXfxaxmTTKreeiU3ljr_LS^ z1rWYGlI@R+ZpMXY;4_4#_Z$W7vB~5Yl?UmGFJ(-guh+~19R7v5b9<|O8#2h4p-n=T z8lbeFW3ly_$~FAeyp90>E~=I4fg@dt8KQocd`Pc<%8nwIs|{lm0KWVl9Rlj>?KPJu zv}=>0u+kZ$<;A}&8gPV)E{ibT%ZF0apgY`9r$n0M4U)%0u=Lovdf6Uw-mmMocnO)t zVcHXG<&1i&eGA&SoKRnF5oA9zaMRlfM>*W{Ocif3_{qaS3ur9@x| z_?x*Q4t(&7$3M#km3E2Tjw87t3;YRO+h=Hhyr`sTBIU>Omasjgp#t#KCk}CrxoCKz zT?c>p8|gh{!PRz5y0Sn=)E!fMS+s!I>!QU_ID{`;YJY9Akmn>t6~d7}F8HWp^4#TW zEzz%}hvfbvj7WOFK^7-02fi7_7Z15^n0C81P;FJ7BV%bfj{km;vF&$ z0O`h&2f7qH!MXig0h=zo>DqzRrR<3WBKLPvyD#T%qU!kbfnZq@J!IWy(u+#(K4S;~4?ch5^&$?f7GATtC3 zO0SF7io4^7`i{mS6GYQM!W=p6L* z_8Vc2?|tZxUKf33W2!tFZL_3&*V^d!GW~z(xpw{Zux?mr#xmZ6PQQPNezVM2w#+6(FlNZBmyP4VZovczJvGdCioUN?67$pQTVX~2hpzIq^n!?mfS1AH>qEx>0n?2exZ za2~g|*2}sWuf(Lqc^398~0ft#U6Zx`kOZ<{K%C&hTUHKxTYYDTD6x4X%AlpKEe&KG?NHXrmzTgTCUX zXza&G7k_A$hn`5txp49C3X64?Y@~&{qsRF6v*Wh8J#AOEueDEo@{{eSKmU_$cXzKH z?C%0epGBW*(-3w6o%LZY$8c?qH_Wq+0Y25)x+3S$UIckoe_oWVBgeF$`JPG-eLpe} zSRFPjNhxDO*tL8z;R&sCPM~5ietA>9)^s?q71@-e4gi0QuE!P~pT#fH)>w)9pSj@S zakmx9*o_gsS)|_Jn)PpJp_Z%_YUARA0V@sKLGxW#^-~_=Tt_Y-ifu4&cXuBj1v<|B z{HAZ=@FB7K1h1cqmEQ4Gv-qS>J8AuH2-x30psmVB)n;W=BsNg~q}w+YI%QcHV;M80=i#vg$v2VdpQodz7t_ ztgK5?{aEJ+ZSdklSAu4JLf%Lp>k+Y^+G%+U55-q>O#bY{^pkocfcFZx#&(P~)O?-> zIIlw@XtQB~3){zTpEVdzi_^O8i@x?>pJp98CcYIV*J!iyK(AZ_&cm~Al7ZWjF*HJt zyet3EAH32@U0issjU8*`vy3m4i+-1UY^M}YquVCx5?HxZO@$flHR>JbJ-o5s>oqs59zYLsx#+V>=KubVTvU#=E?8ZVyXbqgkA-qm+QMm zBje}j&rtlYKD7=wvmy>PhqRa_{?(xY%^h4(+x-D`Y2+povx!zuU<>mG_fBDz# zFaGi`@@*tJYPNI|%9tZd_04IF|fUgXrze$seCBA}5 z=gI!(+X1ZG8%DDOCLPpqbZ|gE_*Q)dPs=y|m!MGty}SE>W4G-e?0WmK8!Epww3C~C z8Xk8XdgIe{#V4;vM>_KGj!x|D9<Pd8+pWM8H?kwsY`;8+f zc!)=Xs63&|8{gudxw*iF7>w}-cx`{Rb$`EhQ7*YK4Nkw|=w}s__u=fI?LG1>1k~^A z*t#ynkW!C>UT#WjxJ@whw$u61xaO5_G6*rX0F1tmh-MnkeKm^KVY+1XZ3 zR>{ad?_lZE{y326$-(|U%`#s32VM@#LTzwc&R5(y;rHzm4wU05A9cCxl&B$qG-~(C zgsNX$p7P@^S<8E5Kz88PNwRFUT0vdibFx$_ih$pda$cK&x$2 zCTVkEb*j={LP$`869D17E?R z>R;qkhMJ0%D+N_L+AKQluQJCb$3DlImDZ9+ShA>m&?XrdlMU}Zzd$&Txsz8S=*p{( zh|Q~S(i?o56)g14!`VJ^ZtYv&{zm(yU;KCN)z@EYKm5Z#(ym{>&II(Jo%YXih+&Gu z@7q3{bnG7-Wta4r>`Q;zL;bNISiq2l)1eo%p*&<(t-B3Lk>PuA(?olIiw@BGbK0L;48 zJKIL5)!2SbPp1BIWNR{A8v~>$jxPEyoHoJDb558=n9njmL!PqL?pvcwQLCH4?ALjd zhTxQ~9p~sjHaDLtpuFRReJ|q$bXUK!e>iSoXT*lA>rEtevrSv+0+^ewESK-#A*c1p za=UV6vrWV|Icx8{^G5s7YcID=#%mY+AXdsCU93utI7(Ksh|RF^$k(yAfzr-tSI#r0 z1FL?k99#!HQl^ba^Nge(`jt15C>9;cwfgRP1<5Ir0(2=@*MKd|wpWF^E`<|`c>DRt zOFZy)JOiJxe2imW#sIETWcaTBF|vzaF6UYq9`X@crNTZ+gxO;ldLwr>ZB z-%-r4aH3ydERQ-yW{`6TBVN-z3g1QO{D()w;D3}K>H03@6=6LMe36$2X@fV9`suN< zucO|$HQM+hPdZ{kyUs=bLbiR5tAKhg4zEqHZ`+R@*T835`WS73{GEu|SAF&2=El^j zAk~d;GfMbY0Qv^@F5gI`ayKyg=A|rtQIA%}@8r6GJOQp~Qva>lKF)ZVYJJ6|eZYo* z9{?O~;-^rbeoTA_b4im$Uk{mnld-!(>1!W z%eUB2NcyEoKBY|^Uil#Jc;mM)}@|RZ^>6DrHq*a<6TWjMavXTyc=SYcMG~hpLqIs3(bFrPnW>*k2&vuex;6;kaW^ z4=Y^I6MWiVE^|YVy6VOb!L}nQ8{w>5+D#7LoPgP@4o`laS*s8o-gb+Q47yDvjq~&t zPNMA@BRjnMZ`(t_&rz$rDFmN7~1N_CboCdZr{GuZa@1>d+Ftu+x6$4YfH?j6!>{IYdL?l zFUzBC9_r`3rjIKhDo=ld=3#&0-(iC)-Lxn|HwoG{Vt*7=no}6+t){KC-aqF@0WO5a zYx6IKu2UCq(m?2bK^;BK^MDQ5g*@wy-z_~RlrC4Dm+Aj59l;6xJLfs^&;#Of^rVbI698^mFML!WFxG+i*F%}_IV7CxQUWJWV0B|x18tS zq5jyHQGUQJw3Q+DR5&mE8Hd4m!1pl6cUdk5>7q=Bi48(oqt5U#?3y7qp2H0QDsA$(@-q%brXK{Y(xJh2a*U78xnLlJ*OvLna;R==m|3XudF_~=FwS03}qkrcusHZjV zN~^ZoF<#$6KfrQ%k`Mpu^ym>6)R}R{hg)2K(}o;_Pn)S-TV}4}jYoJy_ngo8`eBR& zV?1fDPaQpMXNUW3=8a3rIyX`X-eAr8&vc_LP4rhN2aaRy1H5R5sR*_Y<5$!TKId1A z3fSvfn{do2`;avuM?#7^(z#Z)HrZ|~(`|*J^r_dIWzasx0F#hJ`9bfb_D}xF7u${NH`=>zzm<(SlPTlw`dZlv@$%=J zT*dFkn9*0QtKOhueI+9(0FLjtcCx|8>sR(%564PMv;KNsryuXEnzWqdNI%A0&SHn(=bMN82h z-q%ph5qstu-KYz~r^ zE+2Ms{yoUZ8ainVS*>^aoL(_tnOSX0ENvTuv4i+-C`=(Q@e!A^e&dSKK&J7r$ zCA#jI1y1P7TLDNbUgWS4r7Q*F512}_cE_>J8|G<0LOyMS{?;5_J%>)|b__PQEi#Tt z7?pY^-Ed^AP86*5c>oV+-4~TVXvn=mU__yASQ>t69%ldLP{uOFp&mOf*Vqh9<|Dc< z&Z(3oeJ2f6tANVNx_m36^9gOviM|Er25rFFy1s|^pH}KY58&+!K1`v`*hj)g^c$S& zS?B{df2LXIur;pHc=$$b)5m=(=SyKjH5U&*HFk25Q2g4ix$kT{I6zTHa?JzYmQt_E z$2xms<$wLc59JL25n_T9y)ydM5ES%JB@N#Nn2v;C;tjdBee(~J8s5|xB zX$BsZ00hwJAXG9XowSl)$I6fTUU0 zb7l5;+oWRPLSuvvY{(_H!EAS7gcYZU2EWspBLfDfgWfD;iO0IEn;dXm$ZjD{5z5=? zEfqW!KYlJ9;;A$fz1BfyZj#s0$M}KEo5Ob2jje5*-<$1Ezw*`g^>2PNyZ3e&{HB|0 z?Z&NV+w&j$9z3jR25txZOpr2gQwj1tJM-xTCR#X5?z;4*zAWbL0=6R-%E?A8Q0io0 z!MOGtK01-o;&ZM0=?`VBbEfeacNuam$@DJ9fjxF*?Ge z*!Dx7PV#g(bPBZ_>T2)@&PlVfaqt4MokacRsD348-%e3C9kZhim^|$-Ezl}&HOQGy z+o|uwmK{NxQ=0)C7_0q6UyFsRRMj7cR(`^&KQSmNPjx|Q_mBqbcfldf2`=X09zJh5;-6{6semCa*K8 z^k$ZAr-Sh3TW_kuZSU}5`#*pFmzeB5*FN(@Kbpzj!w0+cPw}gl>L?R0;$!pyg{nye zx)&!JDOyc6EoLFq|8 zHV_>CWg-8>`lPFu32o{_cSgaXUGSBYI+Afh0raUjHxZ>0>hsep=8M64oIq+ThW_R= zC{7TSDTlbt&rWl6$nVCDO*Az&p5~QzZdzJjTVm|mXg98IwX55k=+JG{opb2|eeGVdqG^?Gn(uf;Wo z4U`D$)PAIGjsDn9b&bYz9!P?EBdzxT!dLoc7(m3}mxYUR1`|51oZDAZK4~Tw{TE>Y zpP^^x=DNO*N0_bhVI%ki;s@HjnBSD@*8%jL`s5?zr(C}d7(elcg4DuQ)?fXex=y@-eqVxR4Lx95LqHm>AKJhrl*(h)9se%}J1bK;U#x+N-3FH+X17{FablH9Y zh78B3$~S}x6)#I<%2+vgExn%C_~W3EC~{Mc?F+OkI9&*Klb<$C|3-K>H%X7C#kZcU zkso_(AFvwak(<`^<5~cx9J-zjWDu&)BI|CT(w?L~#@c*4l0SVcZ;<;<&DaC#%J_y~ zfrx>(jD2pguazvsHDDoV(Fm~`l|-V+t@Q` zb+KF=UOSGtAQ#)pwf0B3$%FaYGPtRB(RT&z6j|j-EjJ&~KUKpT=Q0wcjLqvO+R6aW3krizKiOz0QIqa;|X<7{NH+PQ0m z>aW(4eofhgxlD=qbH1p*=y?eYNm(EAD7(DWX4K(#ipN5I4)Cf=F4+49I&bi2^Rqef zB!i$_S;J4kC-rrkeq&Rg$F|tm+{j$XClC)ZSE&Vf=i+YAbTh!Uo7dX&FFeDhpek}Sro1! zRk)uEkeZBzt?K|k7ApTDT)w^((pPiRIMHllqP66+el%PBQS;@4Rb99nj&r&}4lB`!In<{0{yye}@-uRIW}a z3;nUf`U3E9bO5d3INrr(9PCjav_e?cyF8{FZMw0EZINz2U5QO~4GY^lo&a9bMpDm- z>jKy!{li+XUPWAf!{1kdPckR=0h&A!OkV7UJ|XGCEK|qn*$VSH=FRXk?9o^g8g|Hb zkPgj>K1294L5`NZu?qa?tGxwduAL6Tv!|xZf8i2;=?$D$d2DWQ2%WkIJlLo#=(_J_ znb-v6adL9d4z=5>)Ao^%yw<+)wZCW&9z1B@_`5gSH@^9;_LD#P+4k_!yY2orzKt#| zwLRL}*N|tEI`y2vqYANvj`W)n#iv70Rb7EN)Iqi+Nu4%$mGQyg9+LHSAh^79=c#t* z_U-o8TW__)LvJ!Vm#@Oskyk0xj+skJgN10)V3IGRLQAk9zge5`$R$!JfMFL?TDc*U zR0RTK&1H%Vd4oSiQe1q#m5&yUugZ1rqw0e5b^QVBto9JTXjV7e0PxoCVR#c=^1~=p z2?MNERm%9e2n7r4-Bd`c^<<+A<5+xL`lM^B`WYLW(|jIwdt;+*Z*R4iUwXd%&;R59 z)NbCq(cXOf&9-~64;<~83LWcD>j`?!eM_dnbwIOi6ZxQbC0||3dP@$9D+Xj09SrXT zWk)bSI)JR~e})q4;8q=?UG;fv2MWlGFlk-Dve{S0rNidibnhLN@eZUwvLeH;ts-h@qUEY1YW*Ae7dOAx&{t)tsd@{*FO z;fL}yZWeF75WJ{!WgGnQ&4KncyL}Q=Bs(JK!3#mY9U|)#q-Sj8koK%+k?wgF0bV>@ zyJl?pVK)pVD=Zf>)^=yUK_8Vq^+i3A)H%I9SUyd{Px6$3btwZXR*$4lJ&HWwA#&y@ zT9oII6TX0bPI?oH=fY)Ql&5MB*CvvkpOT~YvQkQhXj=3l^9910pJ3FsqbII^y zdY<_QFuBRjxjfJ8tJ=A$D0N6h$&|AaBxOA8YgLcFf^!nr8`iOx6Z(pFQ{C1cn4kOC zq#0*TLY$Vj889;X$lB?WH~hr5KqKm2^TBEtb2V2nb&I%c0 zbCQj2getDgd;`Ewf9gw|-ZjYv3|fifuu=R|wo5BGSu}JzcbB!s)yiNJarqQsCO?YK zS7}-g*DeZ&Kz>3JHD@72{Mo5hi#H0HDIR6PlLe_#25(ShXO2wS4kB}>jCE0N>>jeN zOuW<=&rRyFDpEvv0lqx(V~{@;QmJ|gtr>iKdK68^Ri0I9Vyar+u1>5zt^ zP3JjsQ~BlLvBR=Be@pW3LRBaM71t^}^35(bcV^<;l|`bQoy+=k)Zris@9sH5Jq;yS z@?3Q)u&5Sk++X(rC0o@Bv?OWVMDdcBc}Fvh6}+$wUXV`&AvaFSJ{OujUH=&>@WrvYgj1ZzP$LbIREo;D|xCZ*YBrv*Z(#-lX2z z*=~RTjlXXX_wF+gx*3BK)G^&!Z}(n(vF*J4T045@?e=K@D2o|b4OCErtO1~(9O3-w z9Hd_Q*oiV^BH@iTYI8#5xL7*~T>y3Q28ZkDAPXQ4J~|feAWXN0fc^c4ZI6kM?d-re zUEj`NuMwRc>0HdxUfbI{6i`+!0?pb16D6O**RbyIJ@D-J|pA*6*bWrV5XKH|; zUd2S4u7cC=4zDs4e;U{8n!pL+x4qA+9$sEQ|%Z3?XR@2eD$mC zrI$X`UV8O~=->*pC>#A#+bWX`>amZlfrBjYLJ57*cC*cFZ`<09B5pKLj&@1yrfe&p zvSIET_4VWoxbz{*$H@j(TTtIUN0z|aFHVo?Gw84bnNMAwaUF*dz0?+*Am4g>00Ot& zGfsF&q>BZ>aADi1_qMAdw_Fw!wd?8_M|SyZ)9CwU|4bJbJQyPcDB@JQ<=@tjaNCu$ zewuO0C&b`IJybu!6X`m)`wtJ>!~45!d+SQuxpI|~{H@p*PoVb@ak+5d6Wk?7H(K~) zl@o-cnQfz!to~RJdI?`86KRKM7ewSV=0@726(Fup&*)>|bv*UPiQiv#cwS!ZPiF9U zf}qOzmmvyigtUXmWNBHO<=E}cUr3`Jj zJ^M24ewuHBXIKn@OYx&b5cTPwmukSZ#OULJe|WU19PEzq3R1%~#c-W4<9I z{DX6q^40gS@$`3fMP{ZOt4sjb^3?wtdNrM{W9K*9*2Wr>f9%Tgar^#He+p3F2=fSi z-=jtyY9@5r2mh1}6e}LD4Ys8cF;RI&rz0wX0!<8$_Te*aBsTNRO%CELTjCAg=(5lV zNt|vX5XD6-2q)yKD_P7m9eHY#&mvr5$wh9ao$Dub+AUXgkZ&9&)GuTEpLDBWwyyQm zb^IEYoiynpu?|6k$5{F$u=FR}+e0fs@T3b1eem1lpEy2@*Yu0hJMjZU`Mf;=9FZ-* z@vV|?GO*(9^u#BM-$p&cXxz<{E_KKJ#&c@3*}8CUiLsw{lu<>r1TBFVE1FP%Ucl|JY zAUEZ#V`p)jvOsx4KRic&H`X@W=JX11XN&_6qcbj==O!3+)eZcjBet5~j93nhwx{?q zft7Zdasg-9SH~yy8M_y|<~WaD!=Cd2hkz`c&Sfp%rMKxb=(_#Sg}<~gc22)}37er! z&Fk~vJD;LL9@=sj0J5=x^Li7B8$R{(kq7nU#=dq98(ZVEepkI&%KF`$q#a$$_i+6V zr5V7i9@^kS^VCO;7s~uEUdGiD1PW10= z2WYhYq%Cqs-e=kz&WFxvN9&p~8XGw3b_|TY8M2H#l?fD^<`}PjrhUZG=eX!}>Jew} zf7-kBAq;5nepE z?!u82Wt@Wx>a)GdraEUAciHWdhq0;3PM4QX*=!OL+0X0kZ#G+=$O7Nlxu4|0mL#1y5wvJeY_rUpvBSzinP#C>eWR{p z`ibf3=lHP_RV>21O`JBXwsHa2c|sOt`xsU%g>$XWpPI{q@d!8T$` z*UM%cx(OWIS+IoHS)Qz8w1DR+w$}NdJe+jD`PQ|Y%#SwP-r=Lru)n|CHn#k1+*-SO zWvAV~bEn<8`+R%mx#!#M=bvv&*PcSX9s4u0#kX~_Tlwi{sJF^M-xIswoErM_bZwX} z#f;p1TY!J^+WDk{AMlg9KT56VT(9^x=a-@w`9^f?aNtnv+%3I2t85{#w2)1SjK z;(`9yWbt}Tw~nWus>sy;Gs5n7dq$v-mp47JQ5S`LXAzQ_CfrBwdpo> zZvZ<<`q{A?1(@?5?_*zP&Bb=tIF^`)OE=a#woc?L%vG1M4f1+|?esg~s~k+3^J&7_ z-hSKL-HV9AC%%X`%9ee*2!}W4l{uCS+k8ZM@ai9&9JX`p7|uj|gxFN<@A8_Pk)Q=z zwYCo3^m*5KyuqKve>Wj0SA9h0%5L5c%h-*)ozkc4wfWS!f*-1#$#^S#nh{!v^$Q&f zGLD^;AeWh+jWttJ2=E(U6u$V&I*jMi6gv+smNzW}t};`9)Ftrf%Zo+-$!n8?Q@v4V zpxyQW$0;Q43&^nU>DUi*Ll z?C;tESXT9M`8$y*P-^M3v50V<`JO%&wo{#xPK75eQX+AjdbPUl*ri;c2SKgjXL`f; z>1S@XySHz)Z++_<_}A(R`TCB)Do@8JpRmk$3eJr45~3oFerSExOQ>;Ef7WEjL<-0O zFy+4}By_!}J0|E0ilcl2E0#86`64FWvD@WCeOY~0Yq3r9SqEUHt5n(th&Jhe`ZDT< z^Zxbr@ZhMux#wG4#aMJG4+(XIty6ih98s=|zq(lm4PF9iUHYbONHO0Bx}0z85BT5} zpyfBilXIMBpM9$R(|`Jp+bb`>)ZTpi+wK1Sx7+r{7W~k5RD5!zzo(ucS>PSRUBkxZ zDH`mj;M32Dya~cbAC)6EN&nOJtmwClsiU0AOWI4Bl3e$UXi11HU$byJEzH`9*UPSb z+NWorcaBx-Ue1L|X!?Qtc}lR&<6oI?acCtq_DQupWn8q6t8=*z1zc|W@duUQryS*# z>9V%LwQFcxoovQV*e1tE=k4I|KDMORT>X5ie(2ubUVIvF`p=GzX(QhdTQ}21zJKI; zjp=JycRW0lpO%~RCo}5DL}?#Ru(A4I>bW|vj=64liap-ndjw5*ecCVhnlR7Rk2}~u zKpeCSKIG=+HjJ(_FFkKpuU>Bldq?CSX3hW6BiA>Ty+5n%2wQGn(1!UoTOSs49+yq< z$miI5Bb=|VZsje$OU$EP5037GD83c6jvK8t-%yWJ0f+j8{R3V%J4Ev2pS5y52j6mmS2e=>O4vYQ?C}yePZ=AGSch*mWW24dr`?$2H7u`+L34BQ>Y$ zV+n<*I%CnNex`n7&#Xh7-S3wsbZqI|@r-ffoNalgIYm6z@6TJ2f>Pt-| zYFER9H8=(}aX1I=_8WafbjVyEol^YU2Q%MsO#$V}!*>}EYA&GMtt;XNqing3q;crR*nf}xw8rsa?(NjWY4R){d$D|3ha02mcFF<0 z%=}%NX%jd3`4E*4fVhsG4TOxxuE`F5YCKmzv}erwU?ct(b=h{>5GwXRYr%C71r4{q zrj6tVdb55}aO#gR6-+3aFGBHdTmjxR5dAvpX(OJVzp?VKzwm=!!l}3`BAHAKuoc)T zT)1$@oI=<6P+6=y2RLo8A|@A!GaVw$RGC+>vQH`rrM#JR61#XwN|Yc-Be_DS9t)#l zlNn(|*@?w#`Z+*h-C|r1_z?%?APpS3Da1Kv$=AMnoxwh|a+ojA;^3$71p%divqJGE zl2EWkxovp)lT#ppiJ%DsM-9jq_+&I`Q7n0qzA_g=2$l|)Jj)rIp_J`#Iua->_2#ft z$_Rw=ODE?ef7Cm0mB>5J1l!$dp4> z+izi{r)}bEaa{O3cF`G{?SD)*KuDhxC-uRr&WLrLV6dc4y*Y@rq=4Gwj$mKg=nW}#m>SAhl7a*@nHX`?c%)XXx5WuIuT4nkhPyK$jwe`&7dX?_K)gQUOUjG;cZ{~ znsw%BTNlmJ=X<+y9(bESIPGtQbm|nRtWHEdL95K7Tgk1U+_DocdRRJ>L9y520FfJa z$f@+5LpaWrqivs8btbD|0DVA$zp^;RfVYm!Om`!O-x2=e&;FzKo4@y4?N|S=f8Snx z`So^VYm3PjvfG;EX0C6LupT!_*sHaz8DJO$wedQkS-9g|IN!wJE26yFkyqHLAJpd< zP&(>(K%0^%>9ITzG?xJBszPjb?>OppdX+*Y#Nne*HoQPfbVL}CR$D34elBvy9*(N9 z21!3VP!6LOlT@?$i9?gFQ|&TBoEd47KH8Z5CRKE`HkmhYxVp zAGED2+kMjvl;Kc%<6Sw(wlR$unJW+{2KnY8IHQ-bz3L)7S%o+<@VUralg-KpkK?m_ z)qKiDp0ta4W?eolW*=%87bGSpT<8Tr1_%bQl>dH0=nuqVgq zVP5QlQ|_JO{M`ODFFp8 zGB(4paKvGnJo-uDROdv5+`I&E-ab7H`|OdvFPO0~IO| zDdbj!KUB1 zdA&_Z!)Hg*$4ul`+r2wC+RfWHTRVCH-aX15fe!#=QQ!J1Iado6@1Dk%#%~cBxwezA zvh4w&f2vGw)~nx^^;-IY2_nmThJMtIe)}4pLL~=(L5Z!dy)mTgA#F)C&pHP#WugQ_ zq$wq%QJ28EE+Pmc4iI5nSCE$xlH+y$qlg@(gGE#Y>rSa&RQ|k(4au1So)_ya0CHbf6jTtp`?{BTI0euuICr{j|{6IuLS zmqsG@!F8)$x>je44-%G*F`8bh3_^Ea>y%)?ymutVC%PE)I za?-3aH1u@w5KJ2Ik(ZdFtLQ{{Cr|sBYvEE1sEhV9lzruYSpMjd}{z~f_u z8eoJC8VZ+hV7hU&XL^Bk`DZ$9SBp4RPh?%#3O3~nhZmtc^cGYO6C+rpqHfGe`VM@TC94 z##n5fuWfLytvT_DGW-kcT%6a)ne$MYag(Lhf>`wA&$$unW#Xe5%-| zew~~MuN~Q9KI*Gy*ViUl4A-9e87UkM=BD@^%gBZXaAVfh>rb^Cx1MUxzx+yj`L);E z(|7N-YqxH-wmHRD*>8KyWsmlDq0cv~xB*hTY5Rs*PpOyK(DkzVhxI|UX;~PpwvUja z(1Q5GbuNsEIm&iaTb6t}{t*_&|Af(i!8L*~quhvlJ#)BmeL3#oGG4FMSJ!hwYbDR) zQH?zNAKHnt2%fWC&-=9pC{68<^-E%hOc?XXq{;i>Ut5$gayde?aZif;iSuKEVaRV> zUy8@&obLhOujh=uUzj84!Rv9z@MU~G%;mkvm*MiYGT_`JI>#~C^O&Dom)Gw@|6KUQ z!9$#~oO{d>b$HK#b7N1npDEgh!W_NL?r!?4_(;r6R5zsy>wFB`dG0IleZ$$wJ^&|q zJD4_6zE)0*4Z~gHJ+-3bO(tMkwV}kPYrlRXF>;282uJO33DUj%K&8Lt6;EHa{1-*_SV@I*u z5v%JTCe~NKnuf6@vZ(+-+m1<*p&@pj2|JA3fF&-)*Po5k`ik7bTLmniV@A#>#<$GhY z+L)@grw$stKWqV_7(ZMp z1CA`_nrXk{6v5cuNdi`T9z4k>XB>pGr_x|O72B@Pn=y;2$??oSC(@KBG5)UpifAm8 zqj1$tcr?L-r1F)^ewkmmgq2n`2KZb>qbv~Ov0mX_Ta?Yp)FHfGvg#c;9J93fNPu)X za4tRkrrp^g zAZ?oZu0F_D-uhx1x@(^4c~AqNr8ZC(-?6Q-N`2x-=#hyyQYx}=d|F9g^UaSQ_9y3J zbh+Zc^HXmJ3abhFM!`UWe|{<<*V3dd)=o(i>TdtHP9KnneS&vsHVsCDFY1=Iv7x*e z?d?&O)~0yJy^^8neT=9L05Eu0#)CisbLs+-#Y-WoG7Vl;P)ZxYB$h#u>*SCJm`z(k zlQ<3?KO&dFvcRR{5T(f#Mp2Y{JS`{E4knxlRzxISDiKLq(AuFRi~=vAi!~#==iroi z^O8j46h2jpa@uuESS&GVmiL@P1+!DxsL9KY4l}KF(Adj)P+v+{;hd!2oKhn4k{Z`?0bE7?b@8$ONE3boQxjlI2QM-QY>9*uedJx56fy;q>1qZ^#RCh_+ zmRV47cy)0ZgP=Y-AXv3G#SaelGvK=jZ5wScaK}Ky=J22v3C`SvvcB3$ak?<&rUQW# zkF@&gcg3uun@h?L|r&P0l#UQFWb#QIM)a#11p5p)9M)4v?b)gQ+BkfeuSt3M8+`N4%#jf>Sz}7A|5tPoThYm*39l*V*?C#^(9(oC z)B%|EGY~prKJDV`=U32apGbWDwb$D(|H3b~|MKNO#!3HJd+vpMZF%cD6ZFIEdiTk6 z>(8L+A2`5{tm=Hk))P486Z(Kb{tC`tf80poi7wv=>b~6NCb{zBFd~J69Qd@2GJqjl zsmSiT$eQ*7OlQOPk`8T~PYQbjMd+Z!$*h~(PKF#X(L3=4XDSQcN~-G$miK^Oil@uq6|M9r;p-Y#iND zw&<@bLsghidL&Kuano}{3Ah21o$KlW+42|NGs80CMT9!G_QClZ6jD~)9=4ybb#8k2 z1F(E2v5`j+Cq}uMNPd;oPQs()#7H@bJI~}Qy?}hnVFlWit@ZYOpZHK)nqZIj z-oo+q=D)AVR*&6~P<2bBJXMf?%W9gN@Ic$P5SiD{c#bo8Dj95DTUYoSYe^p{4!E5W{bR2`< z%lwP;i9s3I?jBtABaLT~e&lJF(3MO|iB}j4m^Xs?hLNV+2}}5&(S^FKAdbfpdXV?J z0cVIlWCykv%9*|$QVIZzTj%84KlBIe*LjUAWA8L})hA+V(~Rf?qd!VH`|SSS9%UUr zm@vBGhIAKi+yLp*DB6-uJ;DJJH8oc~?|aHMdzwhmqI@AVsC$ z5|O4oU=kde_;WfKof83mM6bo$(WVRl$4eJ+D-4oAD$d=NlSg$ulP^F9YXORppJZT4m(OlWK=TH z9+do(2VXvkCyZUgg+Td{JY^?s!KHwO*M3$TkQjbJ$+^TTp9*>1AZaCX zN0O?u@OYumG~kV;kt<+wZNny1y*~Ayyt&;{#+KpVzNxJQ|J4T|8QP3Nn=&5dd*eg+2ad8q=rVoGIkfXG`$87C$+M4HN%#Q%g!1B} z$EL5=6ZFi79J_$gX60L!=&*AavN#oiu z;)^~}QMU*4e1o`+7t#V_=&{$~qt;&@hd1V+|Ie$lpTcMKOJB;2@IYS>Rpj7x{yET- zoT(t{vFkQoTR-|X>{kkpN-hWCN2vNM6pkuC_%|%X!Y&pcdD^Ok0sDS+k06}K6Y8Fa zQP+Sm9*1G^eAL%*4EmLI(ufCYK_xdud*XZO@9mdhSK=re$25CtDNLUWZu}ypEeGI0c%unVw(048*|FI z%KXxYU3}Bp`I2uMWd^db(UyEgwomnAbMz(7+*ojg|3trCVVu$C`01sUHQ<=no@vp?GI-MQ1=e(UY52l@SLZ$h8CF3p^%ds0k^ zc>M~?`i2i_qV}|N`Bw6hO)|=sb7Lh_c2Zf8Mw__qkSFlvBOs*|{@LGZox!y7ZSqh- zny`Gj__$mb_tjTYuRdPzl|0qG=w+uT7{G-DgZKP+ti9Oxpi1A!dOL5qK@DzSMLs_5 zkhNiAW$gnx>nqcCMBX7X8vWYO4mei2Cdce28xGu%;oE0-cX#oD_5@@4@KD?LDO8Vdz_EjC&+rth*+?4k6fgriv!6(zk zDhoduyzcrucH1^o91zZBb3{%1gX*_?-2>9H?CQ}idJYIrm& zbvmDxW{$!wj-KZcK(TiIA+TYxtKtA?a_0VA6I{n25 zK(v4P3}mit5B(;`wCeZi`^ZE7BNutru4g8roTazsnYKyZnB3{lCI#1df%IXAYye0j zDIdy8C)d)c4_<6YBYCodAV+>e?+*rwj|~9lcQrzWCTH^lAFi!fg;7Z4_c+R;kP0mLE0mEP9l&oY+gh?-4o{^A zz849z?EdG=#_e85jZ1b0W4o_66wHK zIdR~MAo`IuB`=5NQk1f!2%qap53J|5ky*;wjKm$E8MZr7a!Y|@lvSJDNK#(lQDP*z29WGv1by;9uPUeP%JQTg`MU6vqd4;uCRX0yH|Xs7 zIRJG+JO?@?7&rU6vUGRh+C*dNV9;ThtQ{O2WdP5jFawKx_~ggoe(fN2VkZupp%!SX zEp#-BwW(4$6y4}eZJUg;oD>XkZqS&{|CnSI2nl9+PMUeO zfX;bzOI|#eCgJORk;bH}*1+eJ3M9mtkvi*g@f$j8kXfP4W~x*c+0;63mVfT2KG%Nt z4}P!x>aYG%d+Ej3+RnN=I?vkm>pMcEKHJTZ`oZo%Bk}TB@1#o!7COE?xAis)u$!T((_q zut2Aqa*ojJhY#swZKw4X^Xz|sYP;7k}&Tt zgvE1%ml^jsQA`GPuU-O568w}d9Ia>@ZOZFSNGeOs-NWm zu5?6tD572|&o19OgsnF529;Ua-AjSmpul%8LVT*}66G`dc+SzF9&~oM4wB5S4)1JO}3%YjO-ov-aKR`F#01!=!Er_rv zM-sSAAhyw^)3lLphlj>I#aCdVh2MT|Vx5;v#uc%$7DfK*8cybTQu#u#Eh~>O$GUp$jnhK+|bx z@CMx>zEGY5q1x%@lrpQ?Ea}*jaUTEsR9Vpv4C1ZhqEFbTvseLf(qLCMO?w<$$Jk_f z7gECqJZ5uZ`n=q!JN$8)ZVBQu{PvpA7S0jEJF#Na2UL{Wm85gd5Ls}PbE0qEjt7if z*td&lR3J`ZJ;`lJ`SGyZZdePu~YCgL&F{Hx0;WetVLw8BFQu|?_+1ttByl_@i%ND0TQG)IUFEL!IHm08xS8MDLoZARCy4kxuv=4OamE|0||qFgX3-p z>cRZzPHcBR2pTXy>A<*O$0pO&&6;jZ{Tn;4}XVvI4Z? zl#$Oms;rb*w(F~h{05&veEp0QWgZaDmAhkMz5@xq+<@Xd?Gf1%q6?5?dn@zE(z1gF z_0fF#+*~IN=39|;SszI$-}?bawL?J*uilq{z0{9@2I|Spt4@2KW&t40A3%H=qAWx~ zbLo7^{N%*NBk@^ZEvTBWZ>{Fz#N*8xrdgkSS6Oh-w_Skq?Ly+Hg;x2k zI%C5ceq{GIEK5z`lMXx`@D^_U3g@@Vu;PyrE)55kP{m+BkzWWY2GIO4@(?@K@J zmw5@NQQ!f0nT~;Ip&mf=gXWtz%BoBy~s)0`-TGOWvr7J{IUPE!-{ex>3H{H=E$G^%%|HYzVBo0Yk&3C_E&%X)wXwV z&_4X)i|v_bZ?XefX$+L+7=~2i&f`WR0csZ4HrgY@~6L zN$X>AH3Ff>quh~9ECIp*~~shWqW2Rts78T%9vU3g+AX3f;2HZ5^TV1``eErI` z_WFljZa@9`pKKrd*z0-t)_Pm!M-lMkN~j?_|biuQh|PAs{u)?urRwTlCI zdFH4$-wr>1;P$*^h=QaZ#G6<0yKYwSZdgQ*s99dzSRf3>tJX8IQ~Db9X5eZ+iA){T zAK#P12lm#qRiv#?*V_~uV;lQMTi@K|x|?#>*@Ok zN71KE+Rpy3PnkEhfM2~|cg>zQ%fookmibe3guZuljE&U3w(2wZIIjQtwu!Bs?L3sH zprpZ@oZi3iO?Pfi@C^gm5Y^nwZJS~P!?16Kpbq>w#}hw=D=t6h>zbyYFwtL}2k5X5 z)D27fLbYASn_N4Vcnu7&xv`(Rdb`;O=~xz57o>qUN`rf@?Ip^(sp%$DC-LKUpwj!9 zN%QM5PxL%RuotjClyXvBMrD*x{nj@*%(l&JmZN91SBbeq$?wyJUvf%&&Zk zzy1a;ZDaLGaR_57aFL7hD!q(^9p5AC!dq<;Y{8xRoq3(l@#Y&hq7)qYonADIwvMxH z;h^-556=6gJ)0TAgXP>9?6rMN82Jz*{Vw>Vt#}I^k;Q@>)bDJhD4x>SfXmv3RhyQ% zo^!sMvyA>|Kh0aroF}ML*w>Pa1XyFWrTRVah6d`^mvDVu`Rd24BI`Uz$hrCV)3R!7i>U-7Hk@-6PFr{<=%!Js*7&Cm!b6+$O8DiSY2RreaiqXfCGD3xU$!TN+F zy=T3#@~=Pl$uA+^yQYr9Aw&TZD(xc9?y!E864TIPgo07=Dd!5Sc5iSHVvyw=m^7Tk zD`a=T*uW_|(&S2kjr5Crk9wsfX(C-x@On)hD;WA{mLxbBo8Mvk5tJz}(?LsIbg9^pTNGTHm2=S)MYjkIDJ6Nzttz@G zFQ7qLljgTcs(urtmlMIg9!G+85!unLpV;r`+W`g2AA}b!&Xrw{6G~8?=f-hbe^({DVS=o2#>PZ3lzIzHBa2uQL;`kl8uzzvKAuYa5ly@XTzwRYqB%{IMugE7g$7ClWB{Ad8^UplCv!0L#6n*QW;#sKUtJ)f38 zZF`S=Vz&lejfVrc1GGE&>gGQasFPy`+!Y*+dYV+aY9g}IrdyMCg!AT;Aa+lk&FomB zzU)-w&!@~?l+iJ{dE*NDVNbbdW0vAy-N8Syxq0vWHC1i8E|UwD`)>~$(`lc#I-41@3ie~ZMWAx z^pW;&f9@CB@BHrXw3nX0*FO6354WZ5o8UqQ=#&oOI!>Jqm=jP3KPRzS9L>P*V55KP zBp97T{#kIhRS}KdL2{Lan*_$dtB4#o1hsECpq8UsHo=VG>oR9bhHjFcysCQ)&?7|b z(nKB7QI25IB|foOe>N5HhbVGgozP;|keuzIL}~h2lpN#FtM4q|K^i=`G~ar@$gmTbq=9bWW3^T^5$NuSl|vdhpS9icP09 z{MZMfZDnN|oG$9+<{aXPy`lYmWv?5*T%_6Bn6#&#zS&-V;U2Q|`*TbNPq5YJyXc-2 z=@${8NBuzjQX)G&hC<{WdPyv~*uQ%F8=G#Mgqxl&x8c3|fN@~@cw7r;DOhn1;TNwx zq>h;XJc*Mu@swWV)TD%d;q^FzQyPs8#>#S?NC5S(*J0e##!(v`O{4DUxYx!UbDRsI z1b7kj+w`3xr*ZUhf{x?;LS*KF+?40QF_*K!bfU+297hi+laqzxz2QQZmxJwC^_DJF z|0^`3&|*A}A-|p{jgqyngMLn@^7Rn3C#639YWw>@;rQC;CuOfXB4y7}?Md5_r^wK3 z^d9>i|A;?r0eu4_O1kqPprK1`Uz@rCYdvGh;rifJtNcva)c>e>WbfW@FnC&tp2xnAl4PEsh~1UY^`gvQ6DJ$&UI$b}63 z;XxTXo;prQN2sa#6NHmUaOywlA(i}Sq1fkIP%9aFE?!y_n#3;`@k@wnOe)!X#MS^b z;H!*sDtMn5uDP%0+JAM}{Grb!)8Oerdj)iSuhL5dl7mcbV27#LAK=NZ{%%?FIzHuO z1LZ1>>ui*3@rhal9-S7CqUECBJ${V-mx?(I;`R^ui z-U&WSu*F2K3%x#H@C69w{G8UuYBb7 zcJ5UldAWh3A4c}|d77b@m)FEZy&V&xbf))X1obw-d~s60Nu8Ip{k)Ewq==QU+I-Rlmo-EQY5b>PHfs6W8-l324bB;NT=_4^21n{WO zoi2TysDo8#Y%)HR$NV9-4ECM-IcDb#HrhjP2!TT%A)=5p$420N64meUI=^>*k?O*h zH~tw{p)=(A2E29ZTAM<*elmF76nK2t&b+zh`c>@RTGoo(D5PC*W6;?#bM%=Tkn2X4 z8{hRYw9O~8{dSZm|7Xl|z3E6<`u(amANAKJleXepKq!|rH*BE~$>@tH7H`05HUSMh;p13%Aj3SQSA%eMpm>SlZ4V?Wxy|06HAzx>LdwXb~nPuuMD zsD1L|pK4$H;?K4>zxmDf?Qea(J-F|9?3+@NJN3C9lW_u4@`fqKIbX1)kD0YH2zP^k zexLNMG5#R2Hermq%9!xf^{YM=ll9NyOS``kpF5u#0M1s-gBN}qUYP*b6%<(EIX6wp z7DQsZM^2&GsO)#z&^8L5k&BdaB77mSe15Gb^*JVTAx^FxrxUaR3F;DKE&VD_>q{3^ z`e|98sDH4l{%IZ58T|z5u*o}39PwB9%eYIdp4x{66xON@@e$A`=Ot@2#^&~RyZ6kU z_7k7|k@jOh_T%l|-DlgBesFZ)rsiYLCv9`XPm!Lt33DCSc?~<1yM6OkJ4D`Zzx7ty zU}IywZLIprc$$W>T*0^o8T%uvq18wlISH@Ilgo&th%7&?u4}aN0Ssg)P%=30zIw*{ zF5~2Js3+mQmv{c6uH|Yjr)WoY6@4ZJ^sD9^rctw+5L29d{<0Yn znVtKcf7&v2V>cwvzUZbW>4<;hS}y0Vzbb3n*-Z%Q^8t3voA?%V?l?QaPJ;LF@FWj* z=@;$p?Z#(Q7o2ZO?U8Ho&?l^O*t)N^+>R-ihv%rTUgLk32le#3{H((XcKpcu4rfPg z=gKxTLl?aGmI2=ku)pUGN;m|UZv$|{w^L85#|~g?vB%m?ZFl4f(fWf^{JZr@3=r_H z;a6LwZ}qmSw$tA=T^nnjwxP@4>obI4yMRjHHczFeKk={qByae=wo2|P#^<(efC1Ka z3O4(WxTGP6|LUQhBIl@knewDBD*cxAC|*Lph+|Zh zMdwMJz|=g{Jn={)=fTatViGQVdx$gK)^*BMS^b!-`-fuZuh2`fIw>Au>e^U5;&&aj z&c&x*WvwuMqxy^Yd}6bqNfvz2A#_?yxg!6}{gGjwR0ntIpbtyfHqhz%L#mK&+lqR{ z!MSZ2a+FJWl12&ko(391X6AbO5B=N%M#uKMbKm~sMncZv#(BICHO?#xuQ3^fx`$nL zQmZLIBPaAOZyQ$sNb|uB$Krg-gFhdZ@{LB?P5G*E3w}tG@5n>$z%S;=!m&dErEMOk z-Fcjwe(H&ThQ3@&OSMlP%8-&yZ|+@nxk_`T5%myxr=A{cle48aR{rG|KJ_Jl?`A=m z64me$Vj+)c1r>7uQVSs!G!i;1)gV>I!6u7Pz4CdTRG2^=a|KSoxwD?IBd$tK2jF}( zwm~@2q$6=5hBT9DG7ZkWVkvcji9bEbCuSy9iY-L4(3|oH6+&Nq7gEZtU7E2vxT7 zJ-6&IUOF)3m>-#&Cx4Zny#8BCBuvL8FT(Zwux4KV;Xx%&TbMj#)M?Rx%6oqUKnjnt z)Tey@`yU7l-rNKi1sS!f{?Og~JJ2KAUKio_0Yl_F44Jj7>-Y9AQ>T4an6YSFH#hnO**dL50TT%mI@Eu4C4IxA@@t|)>`cLK1ZcAKj= zz<8rJQeklVsn31B{qa}6(thse|M&Ld(=WC=x3=1YcX!*y)+ElmueFmlL{oA={wFwk z;XP!QLy!TC%4)Ztbvv+ArVpG>uwAIn08D|p;j9d?!2ATC|HQJMH^c~Ic_w2Se90S& ziUn6V3@MX*2j8A<8|6;`kQgXjJ(qr{Bwjp z==A&$XI%ZaaW*%$pl=p^bAw9VAg0{~WE`?lRv4U}48$=Y-Oqx^{}PUu*QtqW?O&Q1 zcNDtOP2M3a`X0yCbMXV5ng!pcwg1YGT)FwkKB*4nhK`CyDBYFc zI)75I-`G`bp8+4+7yHa1;~5W1S86=!_-w1RvHSzm+mnRK2akN|eQJ*!QzJidrI=D0 z0T{?mbL*H{e-G-1Iy?`cTpUkr4(lriS2El%MP0O~^`|8RD_zo~4YBT`*LF)^)?VA- z!BzHX&L=T!M1OM5-{=?d!6dxrh2>N9mvV|DbqNQ2?MPLJ9@hiuQkTI%9gI!FF*ew) zv!i`rouHm(a+R~5jrRPjFSV7kUFzIxXR}B2|3~mzud|bY(v#{>p*;;Q zVZ_6xB zzn)OP>MH5u81!TTCX-BQ^tw-UpshlW=hqbqg>;i{2a0 z{d|{U)EW9O3WlyZ?}m~0KFHH{i{Z&|h4)FJ_^1F7?HAa~@>_tbxxhepfw+>?vt0Re z5z|HqUV11u`DScP0d+M|hnt70*ZKs{74{_c^WT3+cVW%3L>tT?iU|F7ZQUqX&eQ&G z#&Q#li+8rE3p?2CE@+8ceo)YL3=T`g4t2rI%Dkx_vqe?ObC~sFpOx zR^j*yF3F~VREsu?q1ZEFMqRNL;En%Ey4QyA3}3l+?CbDlS8Bm)WyuX_%e6qMO%b;` zJo>hELzfiAe-I(%9P=Fiya{X54;v_)`FWMD#DsEwwdtb|_$MrIju)Qmrx?=jf$hbT zm(Ot#OS8T}EsjaQK>8p>YuW=iYgzR94C7j&F+|wQ zV~XwJQGO`6$S*$?c?35&hpn}5q~^En&WZZU;jzl zx4aIm^kIGD%ymnMmV(8lJoSIXd{dV58d=J%Pqx}RKKCPEQH%XqT3OH2rH}Ua+m-Fj zyk=H89UVG%Jf_T0iLt>8VqM+?SRmv-w3Du5J7?ns?B^S#hs51#2=)XCXj7s?p zi6n&e%UpLQsjb#3gmGL22@$phaa9oo_1 zznC06oEXpbbA*9tj*w^H(u!~ ztu5B)aXn7oFi)!ja|#i{X{5A8@sAX2(z|}9ofUS;YOyu*pcpnY<1YWy9=>9F1*eR* z%~r|gjquJv9DhW#%8E2c2IBnZHz)4Zns`t}BM)XlcNF~N^prMNb}#?=!Sx+K|6 z8n%O*0ZdS6;Z>P=icTgu+*Tkz_2yJ4w9lufJyhY4@rf%w6xb~Joj}Sh4kOoa)T9bcO zZyo)1!`-zjwW;pv)tz?l-gE8yzxR9F_kH~Jy75o{+GBiu=fQ(^IP>$Z_G4&<5Bu@j zYHeh(57<`@=$rdTNAagOUF(G=%jt{z79VdOI|s;3Eocs<{3wf(t-eChpG=R)%X4iG zu@~p~_UNCm$>v-f=4UfTpu{h%a<*3ppWXotef=PQSZXKTwm?XeZ|C=i_`PK;5(*Z9gk_nQVD!Jr4$Y6W?^{naSfJZI6wW z5AU^Ud;NS1mE?g)?<*|30UnWNy(#x}>;)*~uz^aA{^3wI+B=&!q#HQlYb{2ZnWk+M z&~y6{$W2ElO_9GU$2o;^SYPZOu`o%uuAXl{j9+S9ViC;iK=aqf-{AC+k6P2nyrL8l z0C}n7A_%0u5m`V3?Nz5Y{=Xo>>$&kLuS|(Z3v{PHx*k|08C?VcJjCj#3|yB*t?SrS zePeyyAILdl35PfTY4lv@rnlw$n4%77v()Fbhiuv&=uc?3 z>0&Rd-I6a{B^4bIRebgkBK4nOk@LZaM|d`!^Na)bZ}@dxJU7<4*7o=Yi@`HB_VZ4I z%sP$@oWKa?dar%obyGJk`{^==)x5D9zQ>!N!xnkkgt1;cZ%gfW5>hdBmXBp$$VRo) z%Qy;qq^6JAN9%z}T@_O;4!w#5Im<(>X>i_cf9jZrY739>d3OHB%D?)lAN&$k+|}TP zNzh8Pgyppe!KdSdfOV>L27!(9W5wqf}6zy($OFGJ%XTdZaiZd|243 ztP&hFQ?k?Xq;t6s9)ha0#KGsuz|li?%0`r?$*VMor{ru^dG&6mt%|Bd!uU^l{@?;U zq*b#Js{RVp-4|Ep@jP!KtX6%YBa4*q(c3SG2_=^ZIBgWr$g5Jod=k!k*+Zra!yw2^5}e6vXgNE*ALQS#5l=Q|0yrs z%F9NyV9&>Jn4N?4s;kZ`k#cE^s#hau@TR6W(cLAKGGuzw+Si5I`KKE@?e3ks>Ck`o zcVBD!`v>jmXJ2S*TU)7mEYMiLixUpIE>g+23(2btmKuo@9Rcg*h_T2k%5Xv$bo{pA z#^zR@`rm!Dmsg{yOWxS`#=l0x8}EJ_(ZSm9eIM?|zbBX6_A{q#9^FQyYB6rG{^2GB-1 zj2KFJJe(b5$F=v*m5K$5Ggl$kOf_5< zy_74LF~sS!PSkLsz^x9L7hTl`a>V(h?^|cts5DU52~N~sc#D(nrLf398!^C>Z;T0t zPS_W!Ui(z(QzpbrT0CDmw=F0u%_P&rBwDvTXm=@x5ee?36~3BSG5{k77vGha1JQ)` zbz**XYo~qqU??>p85Aix~iKB4Z9`l8E%2ohpK$yQB z>VmRS2DFcVVVh}wzBlTa$Rq~6f9qRswxi>NwzG9J&OlyUNO{LDbwKtxN!>o7JhDw!fM;N&z=DK#^ zYD?7p)4XEU1zKOt^(K?@U;M>iYJc!YzYjl~?TbJAkJ{@W`FN!0Ck|3UWQ<){_X$?o z?)WI@I=iEt4CxCwgjeS)E;-j>JBLKmWHfD6QH9=aj*Y;H!{CCAGRy>oP>bo67yR1p znDdL+=2j{lj5GDm!@jRBI}wEseFai{$0;s+~}>7q>d= zW-fKyn|HOlUwu}`*VeYONljfkaKVPQ4es#l!ufgI+*ohh8`F0E+GczD>8tH*_7FKe zY9~jJfZ4-7OP%nxhkdGiMAK|b2;dsxI5koQYMkj8fg54SY%$GXV^{`#*Uv#W?9*r@ z2X#}Zq)xdwU6E3bq%DMkC1{V;1Hr7+|71vmWxS|kT$h9Gsdac#?dU~NWJ1b``9W+z z_8^WLC*3e`KOU5iL0t2?3`Llj_~l%`E5PD2ZPaO*CmhNvIEd2-#DIq_(^Hpt4CWJ= ze)*W=^xWY2J@K4-TJ#otOV?+9n{^*?>^j*?tv9min*Ep__T8qYq{S8U+O(?s! z_b52+Te_229O_X%+UDZTCQ37D3%dPN4)$g3S7_%fwhTO$@;bDJXDjd&yqpSQJ>d!C zsMeb1O~`=bvt$zh@tE^CnkGOaBPaA!eR+Egh4cfKS2j>3a5KIM?wh8p+evK38&?Pj zq05*u>8Zfw$tK27eVHs2QAa+36T9x_f|60jV(SBmXrxM6c=&VzS6LL5)hxVQng8a8 z1#Mx$CC#qG)f)6Eb$zEAOW;`^t59Hvole__2TPa_J+w>4^loQ8<~{`JwxvEwZjPE~ zInI@0aDY#FR@+;jIBX-XORWf&sd70t*mpY$+hHuIWW3j=7P<=aeqTp?p}GC!IIsD0 zSn~%)%WvT5adgc(axGvg_JlJRGSxeIwr!(yJy-auCnQ)dvgHsUm%aa)q>v*=$6|0` zOC5`}3017Hj<6_mgEWjV^5*`iizl?PPmH;_t!zIbKh-7$UuA=f|LBToU8uvKDIs&n zm~g4DU=j;Lp66-#r9(j@rgmgg6?K=?EIPwAO?K8ZE( z+YWLlT3gV+)I&ej2h6;VG5`43d73x7+@!w3oNGpX_BZ>kIFz3{Y??!P(OUVokKc>+ zh8XgNCi`A}YZlt$6WM+tIegjUvUr9MXLC9aTgHD_cRsULH_5EeIfFgH&ky+BPJJQY zpyCj>#yoR#XNSfB5A)Vt{FYz)_1|n?`RZS_S3dMwd;X&zYK=MW(Ifp7_=CO`UqK8_ z&ZC`^`2Ar}VPl+Q*$@h?+EV*eC&o&#(vv(wgxukBe3!wxQcpj6|EnO*9w%Im4U6Xl zpCF$TCQhO`UB;IVBl801Px((3MqDF3*iRKmEF=zBkO_h*N&VqplAKDQmb~ zelc(zGDo`c;cYw-}4QFwC$3-W-`P%+ zD?4rbndjP5ufEtm{fX~yyZi69Kl-iTZ#VFfUpMXhzpuUa{0nVkg}Ej)uHjQWeDDbT z)D=$9jukNhCf4;d4I}^&Jt=A^)sT(_OCu@=aA8&v59+7)yRc39d z1*pQtCfQ=E*I`f3NYke7nK9FtG^U4skikszDzko(USu-U|1oB2=|!zE(}Qffd&m|E z5Fm(+*g;|;P(W3oP-|w!zU%Y(9`|4TGO7{yonT^L z_Q`30>#KpU4R0HC!K+Sw@U8G_TW`Gnx)`@xH*QA$bE0N!hm*EKn&rhhhQ6KJ2Kw9y zSM-cX#z9ATCT;u}hahB(lzPKB^CU`bIq8FUwIK<@NN$pXBF~XkAApw{Ag^*78-?W7 zTG~$RMBrGi$t{YwY>4Viv_7<>e3~b-GL^-D;$3u2T{^ExUSDc6z&4Vj@k57m(%PXE z>>QvnkA34D{jqwK^tHg}n6Wl$I^Qnd(y4wuJlaFgxE4UU=tVy}x8yhXLD(|gu;Ioj zl7>!Ci*&pbnE)~M+!A(_y4B4hdHKVIest&R(n@T^ql1&2ymbB6_0Vh@U!>DNw~YPB z^GsU_UyiUFCCAg7cINOA|>3?~y%?0GO|4f-aE6*h9G8cbJ>_sTI&>Gn_wQ!UV(g-8t-Hyu`3sm94 z1=n7VumzZulqD~1i%GA+oGi5=#dhHQoN6DLinKPuAh21>B21c~Y`z8>kbUp;l)?lG zPB^h1K^NiSqJRvhMjS51c&`G(4EUfgkqTtQB@NTzfStS%18L)WJ`>;+xe4SfAS*0B zi#`9mfL2h!kyd`?U5BgFm**%X@`krS6@I5J(KfV{Qw=Up`UeaXqBN4N zk|n+-EKMs-ghlc!%M9LyFw!T4JGc_&8;uq2kIK<|)!Dd5@#Jofa<-#Pdc;{iX`^Ti z?-M|RFkdEzK$`mVIc+fIh$noIp76TL$OL*EY22uhv_*qn^CZZ$xl99i&%4*#-{7l; zEneJ`UV6OOHJmUsFh6VW$E+&LP&D@q;h8 zk=119bP@p*U1g_-l0y0B4R&G{7FW_fwys=*#<}+DPk-JX?CfAz-D)?k-^>I?z6I6^ z7Y7$%R90X%QRFJ2yEo67m~mcK#j}bbN9^XUqUdJaNkC;_3>9 z4E#p^c6axZ=KR!em}4{_x^cn!c7d(Fk!FqiR=I4@u;0N5S#iRJyM~-63oI~c8C)Eji;hB%4Wqm|N^l92&~@G`D){M!+z31T z5vOEG3H%doTk;W_Lx=5qz%gxfkFX3Og^|V!uPuc?6S?DGcqPZ(k4FPk<41VnB)x9< zm}4Ly9Oc9f66$BuYlzL#wEy1U`@8KYKl`iphky77?Y(#3YcGHNB?d??+^I9YZ2x6w z$^Zv#^2{*#Wc|4l&9pwd$ePaem`=#vv(puW*~Qaz#7aJ4q}OlfEWGfayuz`6sU2>E zCObcK3BO36D~lh+Cq20=r|7SDAB+cFF<;03=na{!BWIt+=_ zL(=n19Ck|E+ularXY%I!?#_d3Mp<3Eg1nxdAO?Ualg0-LXVD=^0!D;K6H5~eVgpv$VJwTg0=8jS+~znM^qm; zf#2bo-+YEBFXebj!NM+{n#cZ6+>O#e&YNKr6uICUwoc%aE^sY_XD1iL123-d(3!?W zKBi3bnj*wnZso6Q=o#f3`m%w4YV?)nY8IEau8!^~ESqvY{pEN!0O$x$ z^+2B+>xTWHo9OIETn|up=p7d>A``zb^e8JmGOlDOGGh32)CZh2nP37m1w{KI!$a<> zpDO(@x3Qi@Y~|ktr-$|zdq-^vyGNa6pJ<<9|KTyP>HDD9E=X8nr{1wV?HNlDZt^-p zk1Jl=w$qm2neEpxLdFTv5#46s8oFuF5FOcr;?#GsQ^1ELMkPM^dRw;NK>ymml#VlO zKK5F43t@iKvG6?2H#0}s>YqO_!TiVd_*fPQ! z`&Rn}CvHgNpgZGJ?8evv(ia@z0k%`0YbG@{-)5{ZVFnGA9@+Pr=ihXM@Zv!@{%*18T+bGbu`!haM38EAPO^ zKCS${|4nj#>`S@#iV4kzWFs^Atq;=yFOoiRLH;Wod5ld`_)nsf8jopG&Tz^tX=7p$ z?gowtLMD~B1SyYcQv(MXsk~pVOW>K0YuNUWTqp1s9)};Ks(Lh^S*DCSYpc@!Dt*yz8xfhnOkv^zjLsYR=QvFJ;RLw#DW_`Fw2Hi)rppAD?Ss67j_E77 zkpAqB+ze$k=1E0^W%#C(F7*n}Jt#MSdfiT^oh15AU6-AS%K~mZb(_Rr zN;h$r%F8@ZJeTg$@F8x14eI{L8{KrdhCiUQi$x!;&Cm+y~p z6L=qyZn(#lKk9D+#`yj)$usSYGrZT&sJ{gJGXM=3b^Cx5w(0x)S)P-2l=)$0U${vr z!cY#VN19*y<-D0kTT#xVhi&t;8Es|9ubx!tq%7C%2b4vFA)emQ$ zl=hyJH)4}G-YWPh1N2=SkEUSk;O8n`thIfs6Bf8;mufLr9p~b|dc#j(F0HOL2B?{v za_nxnL}bART~s!iMDmZA%`qOH_YGHq)C-n%b^u%#zC29;`05DXyj`SBKcgv*(ckEb z^hcDV$fM!-(}o}q1)x?kQSbMHtqS#DUbIgfpfsaG zHvAE1;f{KYY*dO6q)WX(xBRDE^;+tR=;&x{nR6Gec+MidaP4k`gSeR1f~fE^`-4z}VbaZMQbo+S=km+aPR( zzG{^^!kU(hcJunxcIWDPd-BHhc7C+qb{}r%d+5#;93LID{ezMj=S;^&_`)CAgs^(j zmAq;bS|9SzImh&Q$UyOjAO6E1;b^~1h1;IxRoF@1;#s+{Nk^D#E5*%f|CXVgRbuOh z-xLDA;$R(H20rv*)udgn(Rc!8!^@X-| zWvku3eY?H%vFF>TKl5^X@r4)KbI;sso9mldjJ6bUbiaS2`qrq2p9{%v6+ZnkT!RRE} zWujhJR^AwwC0sctz5H|&lLH&aZmAh;K0)iuoA#k#>ks_80@j4Ns}E>Dz;t2J`f64@Rv$7D`&*0~Yz$je;H4bSto32}Ajlj&=tlkuq!oD_I!gJCX|I!Axy`X=%` zn>YKd5H|v-<5rdkhabN9s2%8r&85Yq)R`MMGbVP;A@#n19zy3q=K{DbudhWfg|{iS zc$_g&#K88L;Hu;N^#BQrbNuJMKg%kTM2AvN!aM(>Z{6?_K`?#AnO0fOe47BAOYxdG z!g)-*ayrSb{vHe1>Ve9mPO%=P#l!_aQ8@81Gd^MV=ak#8;Lbhup1j;UZqcqHw)P5kBYZc^|{?OOw5o|}nL)n3UIlYkt_N{661iOLRF^u=c zx#hHJPh!q}vFfWX>36(SW2w3;L$-KSzb%jS*r(B2`w$M=Hqbd@|`|qa;rl zQl%#y!>D?WA&{B~Qiw;7@4YJ&VMw|SGH@t+bTWlQ;i*w)dl?M#D%7EyOrawUDuKWuEDPSn z*lN@~dlP^BP*4xKo7TIR_*jO%(HK-BLr{z>xTY~oJ_OH!A6>f*R&YbO;=pk{kF=hk zAe?G&;FYpVc!cYqqN>l-_a%f0*@%#;8^92I?<_2 zlTnzH{N0})8n9(vzQP;9gL|IK(8=5lo6~2?r0k{Hb8p8_j;Ke^t$?HAN z#KQ>(c>cM7lN~?QU5@554xZ&mWWs$LVr_lB-MDkR{p!s(+iS1Cp7`5$?$Y@!x78~f z7!w|WCg76AEMU{#lpmFhc=#5XM{zq@Vu$U7%l7tt($x`Y%S)?ib7KQQkW^8KB|KYW zu(aTS20DF1e|P)+7>A0@$ua5h%mY`0(zaxnow@iN9~@&0I#IKb@2Oahon9S#J(stM z{l3cH;XY|S#;6pSZlWlmwV3IoxVt}n1AVvc?r+1|^Xy(kYD23yn}-@pwOFor)CmHV zh3p+2*Ktx-mIFTJqTa$bhJ0v6hj&XD21txUK%{PxR|BN#DjU(#QNvG(DG1%! z{^6Tmm}>d9xf&4Gz+HMurT;dGEb<8FcNyyOp*$yM~NuOlx%8 zUZmTOoPkMbkZxd~9zun<6<@Mr(7tX)n1p2^5qcsA;w|AdVVXP;o$vo~Ssxl%_IE3b zt7$*twm`ncrIO!#zn%7y4MrF{$A|Eq<|4n`D73e`-M;aSZ?=E+|M=JBJ8O6E-E05g zAO8LJ{PQoQY!{mIc@KbDW^91eLG{T_!04>Epi8N#lTM$!HZ4K@Zk!V=NHt;dD2Up> zOq_*P`r-#^yD?~;$JqqJnks8-j6ur;RPx$_1VNe-L&M}@pUg2G&*DD#e&LG7{DlVL zcqwbNIl^4rvyDiqVpcLz^p?E>k_cDoosLNy>!>Yc4t-tlEJ=B&em%5MuVaIvaa3?{ z%rkftZF-a^@}@qlt6o02B2x+n<<(})%5VJ%OGDG$-wy0nmsd!?5t(;D=%x{mi<6(8 zaVBr@p4U^b}CLZ_^h<-}1i zI0O}%o`fq`(3>DrDjywJGl6?i>Tf8H9*yfVs9lzryg>&H!bW}NF?|3J1en#}L?VB= zTdbjOM2t&{+)F;k^JQnaR}$frpD8!VHsbLqJ3;I+Oc90N6j*V=8*h^P>^+rV@G<~> z#7!8}?~idKpUDPr+@ro}7}Nd_b0dA#oBk=VAL6FUj~n$p79fZs&UO*`o#Nkz!|iE4 zz>k5)$TZ@6`h@o|3rqc(M|_C0Z267LO<>0PU{s~zvd#L$#rQ)-V6)fe%|y48$mrhH zHSDsJb8NNRFy@I(JJ>yko}|>}RoM^Dl;OZz9aXT6Bf>ag=UF{r|6g=gIwNU+gvDOh z?i6KXXw&mS%r@5hNOcN->Kdc8or@G6EJdEi9!iPHXZseeWt2@yIF}264pQ~cL7)Au zr$Jes3vZ4me6ytFpH_?BjD7^Jwkn#8zC_ytJ%~=Lx8llX6azJhn;_H^_Ggo$?SP#H zQs^u@O24=|XfD|jj&_p#%%GZjD0-k7eQ4h3(6Fb*Of_L=8rxy8pJvk15kGjq5moAdh9|3sYJO57Sin(lA`w zQ;?M5$`k?Ao!~+jFbWB~K6om8{G+%>%ElR=x8_nO6+X)Ma`FEJ*&#IaPTQZg@FsuF zXPXj_!Z~@7MLZoU$s;4yvosrEnRuWf&)p5b_eW?KmNWxM7&+yFdeLt7x%w{q`^sbf zL4O|brQK*l_$SS#IbTR~bpl$aypPGM*acF+|Bx$r;J8FP#Ek{o)6oOUy?8l6ZZ7Z< z#n`4iD+@(~e%guSU7|dp&9^yf!engW)>I^ z9<+x$JHg+@I={W`8)U&XR0D_c=`lL=)i;0HzVM~5w(tMwT>ztB_YT{YYq#19pLn7D z&hPvV`eC7+Vyn0j*W-$VOR*w+RhCO{cv-nglIO@di6tSy5eqZ-E6%%D(OLfAz;(Wq zoa2xEqU+(4_>Y3y!;^kG&F}~554r)?boLcVKMBN*{L^6{=8Wf>;5&`Ohqw>W)m?aM z*xxXJ@=fO*{*Qn+^4X66M&*w?viwNa>G)no%IV?Wr(un<)qT;gz23ZyzW+m=aa^O1 z8P#3Q6(`)syV%*J$s5d*^m>qc^~SvI0HEOGk^Xe5S)k7gTQ2O-uC({fmecs0WIsJZ z7xWVWfnWXUeAx?(cfHQ_+a8HKcm8PJa)IMw7UPmE?2%^PmrGXHn@9BFXaf`2U1 z`#tfw($n^*;*NfDv%7B0FQmD`qkQ`xxM3-}%?$vDd;9Hh*V6=!VBcBIFJZ6wd69GA zWMPB!bA&@g@iuc7_TfBIcW6b*!bVXqC$1I<)P=r86F$0BOd#=|CC{U5UGwPs-7V%HVyrsX`}ObwqMcSv<}8;05un-PDju4Crz-#FGLu)f1r5e)5}8d*S(B$-kQrz+aN^ug9o#hIrZ_b{OPM_u~zsXXCqud~Tz zyfPZw%``}$?Zzxt3Gw!o-mPE%>c86o+{KgT6?xlz=*wZLon$ws`4>s6{V#78q1TsB zd`o}p%0_$UnS1TyFTK=0_43Q@<(EIvZr#4su5De-9Dq3b88bKZIp^x!lM7th)Yi3c zax1sWXyz6u*F|aPWSviZ^2sMNLjUo1zTfUW^AvS@qa9Pv$ESypao^09yxAE~z$iLn zU$_7WzFDK79D$CdG>bpb84OefSL#y5_)po_73^sY{$mjCM#-9h4X%l4_33-rDsWqN!tW|K* zSCB^@8K<+X)SdCz-+>!}!r3;^(?dQYN6K-a7hfuhXXPl_#0T9RL<>$f@{l$Q#({s~ zgq@8CriCZUc4#(E97ow)(iPexvxZXd(&~u-)HPyb2zUE$+cJ2GniWAF`#R%wmf_s9 zwz`Y|_S4RN*sDM^xO!qVcvH`{=&gRmIhP<`a#>&2Av8%B&$^H0n2YcGs?(IQFLnkQZ9uS=JasqqMonM;#^K zi#}a&r%oJyxvtR27=n=g)OWe6d(x%Nn;pRRgs6fEM85z*? zB%f*0FnQ`%+@!bnS(wK4=PauGFe-si%Qwsl=3g2voPaK1dmDn6yN0%kA~YCB?%-r2 zvQnq6VT^@N^LrGfrf93ZQtu$1^A}9Kd(=R)@fY{R)|C1*0 zeGs0Wx)lwH2)-$+ZmElk;R00BCc=Q?gKsd0f5bcB&>%0xrwSFfh4$35&$NS`llHTp z{*+F0uU)-zz1@1^E=KZFTf4fB5sdPr^Ua$cy{$>79j3U|+Zz~6{!y0h@Nh2^sB^O` z&10ZWJW`!b*)YqNb|-CBNbbZ`8`;Kpc6QtD?n6Y|iPy!n15R5d$*z#*L=y@N_uxK-~L&gbY8gNe^z!oT2(Bl`qzi|p;CoRPxdGA>6T z2ci54SAC`k1q446**23&edd<>VZfCQ0EGD+s#ONfF4`YD5Kr;Iq67NJ|L8x$;96^c z_SG-9ul(uP+d6~XtJgQ_5nJ1%{tl2!I~ynM<&&LOcBR1wad9)qiIaWEXDudbREN0Y zSHA|SL#Lz;>~N8r>X?hy=8GPY52Ul4fJmCrwkv=14p+)CU)pjnuYd(sXKlcDPNR3C zJC!@~p_jL|*4pOULc4ZlwY~h}GwtfeGHJIlMBfLOBUGr?vpfIc4|Nh56+;RR-&M)d z-+2?}Wm>>2l{O2D=GUJv>Lg)aII@UqfaSsb6(3%o`~5rprs*#J3ak3B+;LN&o%C~w zF^xAHK{}9Rgl7W_K2`YT3vMxKzE;0&_qH)|mnEp}M%bg_a&n{pke?5-&G>QMUE(mj(d0d7~ z-!EUff^F?(!6%Ikzc=slN*N5#pii&CJ$po%pNhp1k-@OxrcV&m*I-CcBQlVM_S44PDRz&+5C+$t{*+|8S3`%Gv##r}1isI6N41N71NEy53v zc@(JjtH{bfZ(?5Vk{?+Hu4xhAb##=GjJPtf%pwe`P;!?+4h6S%qzv+oBK6IYP?nr#MS$f_| zBiv3)m#Nq}rU_dsUjeFTe(SkAQt&JN;w6ry)1fr}x^&=5e^5eO@G~i=eg;>|4Se&8 zyKg3%w3kPY;GO*9CS1}b)}M?=fn)!hz^>1GU!^O03ZKD;)E~D3q8nw<{_=-Q-6v6! zbzj`zPk1)2L{LBYwFkm??nX?Wk;h6apLFJu2Zh^3A+&+?)rN_GmFK-Ct*XpwKk~Ek z%iEDIzT~yqpD@b?lV1KKj|p`C1vl&rgVLzT zZ~fGwLXavyf0194n3Q5|R~ND`fV#9-%$ie)dYZzO_D4JOZPpz9&!4tpY*OO21(Jp< z=?w!;yMFUdd-|ED+NVDAN?TrC#rEB9zImwZ&qevDj~#P5zJWv>7!|n_ zH`*0HWQnVQ4{;;_b^&-?noG{gi|hA~+yL-6*UL-!({A8V?e7C_8f@C=W72(O`Q1(M z{RsLWOV=p>Z<_ydzVZAK@P_*j#@_{uY4@0Pll;T=@?y_==dvo5)1OSQ-#JvW)Gs0xV?%Jb{%r{~bSypNI^e=M8fC z%7t!=9`-Igc~04}6$o?8m;Q$I>T<`D_HoYHpTKv)#3sX|UtC>i%w`12&bQ;KS4*@0 zqx;}tVWJ&uarQULMGzE7xgk%LoJaMf7rL9js3Bl`>VbU8=YqlQ_z+wiN6TyC*ETm1 zd1yO6$fDuxTQ1^nw)fwC54`dNNnyur;nesC?p`tAL0MVL3dJNYuC4cV_!~N1}kxM0Z(f|*^y6a%kV67u(YpZaB-ed8^N^BQ^k>Yjq+@Bdk6b% z`@w^@M0?oW+&~tN+V;*4w4G&>fpk0nVtL_~d_lf#qq!)fc*Pw)T->$eWwBBW&ZP0# zcw&P?U?x92BTwLahmmfc_Br8HXqB(iKkTu3j@h8syYNYCeQ3KOuxW=N8d%yY&Y}7? zziFJ~cHX{(!Fj}^c5t|h{5!{^{)7*6N-xUx zJe_Ua_BhuL`~)w$cnP`4%X7eSGeFH5@lQeOH|Y~r=HZdwMP#L3wxe!=ZCOjpwLavj zzXy9qZEMSuHRf{?g!5bbjB%xNWo_gfgdbmD6>S>dUkqz}+ zc#FMTo1;NeX6VwaAW{Wmcp9YKskUMQA4_kk!=N_lO;h|kSD(|!9WzH}lHWY?%S$+w zc07kaCL>S5PL{x}+~y~b^=DeqN=(5SZshB&eF~O%CM5VLPXdUiQlpRg^FR2e(2k$| z4|Scffr`PD!P~TgF)Ufj5txnA)a47E1qut zsh7~~|A-flXjbttz83$%h^uhDIO<=U-e=QkxENSgl+N@PUyWOVwZovkC&r;ht&21Ye|7#YV1j3GlLQr? zFsl>D!V1i&j#Dt=)5|m;#X#@;K+{NS)fLQ6i?G5E0}GD+JZg~fm+y>K zCc@=Me7%mniU;zfVk}#@PGZO>B27@j7#>$$axd{t{hd)>*D{n-+mYg9-MWj;cyTjq@~IQbKMo&FXj0QE2g*Z#R>|Phj20}-X{Y{)O9~XD zy7H%7C0Kb9Upm9Y4~zkxws#@Bc5X3VGRWYvjO0ZJ+ab`g+aawnV9uK~(By)z-vwJS zY<<10VhH+ev?p)fMqU=$SHAk?_6L9PKVXP1wvRpkRJ(Fz1=#S2@-{cu+rx+3>AW;j zJ$b^m77Cgw6O7OB1BV76k zH`p?SSChIXwwNlXPHyN|l!$SOE;y6RXK>--sS@d~{KaCLT-nM?qz(JX?AGO=_8%1ynqr`z0at03XzV)m2 z#XtU1`-4CH!}haZzR|92UTrVF^lbax-}^n};Q7#BZL9`4e&Rzz$9l7_<)H(fOeSzy zhqj;6{l)v}+FmE-Gq~dP_jIz}5aalZ2R$0YrL#{_i0d991P6RE@Zy73=rl=e3G%A< zeV+6?z}CJcggO8UiIf?4g=&=MTW$RLD-W{>rMxH?(%0puC}O}wc%G>rc_2OVO5UUt zE>uhQPt>*GH0098q{V#TAbj<&^&5j; zz%og>0bu%73&Pdi`VjBpv+3RSJwI3dho6I)L)cVtvoj0~&JNqw`eIvMnQc!#d9A(j z;?smXVfGH9y-g)mdU9GscxL6Lx`3bO)RIKtwcYBGf2PCh1R`7seQ^Y8ciA^Xeq;fO zp#BQqGDED~Fbh95qO9pnA;wW&1$1XP&+?;u1ruQMQYo_4$Xl=3bM>G#?el>BVXh!0 z!MNoDBKK(*neX@EFDHSf4`K1s0`*+Fq?b79HY6bOb8$C5`A7H#wHIOBWxT@XlMG0C zm9}U$M!NMEEp=VMo8TKa{K+`w9#j6frywuV4cr4a0>t&3xM{dk@FwApa~*zop#!Cwip#fIdVA(-_ibHG;Hpv|-dAzWLz7)%xOE``J%_PLc3W8x@7` zsY$kl?1Trn_G0yCloi?2HnAOF*aPJ+8Ko(4cTipJe7ICAF}9iLh3+mtTWIVN;HU>v z1_7$Q@wPlEOqhIG(ovq1 z*p?dPUsgHl*UDo*n!G$;=xBLjVFg>ie|RGc!&MpHEw3)q zTW;Wx!+U}B*>L+C#t|}1K1vP=Mt+^?lRb_y;+M99;d3TzdZI~QcvV66kN6`rJv(rc zP6-h2lpiXKCOpO#zIBytZkT)|E%jwp8plua`jZXO(guD;<>UtL85+rR`a#>Uf#Gqd zd0YVk1ikerp9G%Jx^t`g5f|H=bd(;Rq!nN3CSbHj!<6AFLq1iV=?`xBN^T0yC`XsJ zMjAId>m0Xx+1_Ipr`&-{@wa$GUWq5WV?_IT>#o|9G7T(kB68byYCI^;v>E+``)u(6 z#(!kA(_(NIm#{B=u!W>b3!ymcIg4MA;$n(C^)pnKd)5~;(?)TUYyElhiT17KDnD*0 zmPgr4EZgkcK>o}PA<%aY{|~W|52^oS`~TUcc7JETz4QJyI%c7*UA@uXeYnS1WuNr( zZDnI4-!OLb`QQHAzunfa-fU-w`)zr3$-yyYmd)hbH0rT=#?6_;cL6T-Cgon>m^*He zT+I4#mv%E9Uvl%oOE>kLpy8L-R)nw=s8R56N&Mk%8qOo>r0GN4H0X)E8t65h|1u7b zalyIYAK}F`Pe&N{c&DC+f0F+r_!sZg^M{tBoA&=2_W}I>Cgq!NTpw0{I`dA2Ps+A3=My-}mJO_2Vphz_R9+ zSK17=((K|&JDYcm>)XAK5f~Hg?X)At&5o^ReLHs1g(G5oF~;(2%Z!`O#HZU9o_;s$ zo2}@k^k3lQi!bT{%O-&F4`V1lPq~DiU@UctZ5I7H7sUlU#)MqjbL!GrVET5hcAaS3 z=QE6wk%`y$4>Z~v<8?L5o|fUWhlwuamsP{!MD zy+wLwK%7SaKD0Vdv+rhu!=rra)R&F+_mm^;)avKUo`G81*2G=-<5NCm441d~tq)}n zul!M-49qk2W8H{htR3ctGBQnw=sG}w|Y9L_L1YU{e5sca`Uz)i7tX~eR6nh zbsbuu4_rAzWrgzY?cj zkDLQ{@@BKu82uDB;Aj)-{}B{@)YJN_AX`89g`wIrK4~&%q?SPcMRo&l;*ySjD$+t; z)AblG{p2;DyonCQpNTlMc?HL0AiU=1 zbv3psI^~n#9A7w>kv5k0=*z9u|D2-tOl!hyCd>o*z!Pi}XM4#@kMfLw`FT$^^krH4 z7UkE?${G9A_|3a)vbw4*+Rw{7>#O9X>I^(RDRl;g9UY}k6{pg1CtX*nZ{1KtOz`)l z;;OT7P2{PMPOtn7f2E;cJQsc3VIidw zgNcTNlsoqvaFzf%;UV-APOHZ!m0?wuwAH0e(TC;`8hyF1RJGkWFP)z$4Cvr?cHz zZYdf377VlqMJh;`aLrxyHW(h8{0p(^a{~+aW7Z5i+iMj3RfOi%XZ?}O;zQv{p7;kGUBT8R zMHHu5eUb=cgypSdrNZ!WpH+ENXG)kIw@N^xN<-vu|FFIMTc2s){nmHdyYJqobEPB2 zFk4;SY&UP*Zu@ko9+9=Uu?8=J)9c;NPXw(uu~T`8uS&?pSdIQVf&}I3WH6C_=RSj@ zg23QwHb*?WvA_=Be|wU0U(Ye{=#$Q)Q&g zEdp0V+6@OT7W@6Q`mUr$O-YB!a3P;QaDvL6rBTlKJ;KOIJKOfLi}=a_>D?VDEiUf+ zi2@CFC)NFqQP>V|=Z=OSK&W*=P$sheq(qThsutN@KVyN2$b` zsMBS^*}I99-%<#HGieXRjRBngiIFcJz{&)9uBwHCpbw|+il}i$}!&b z;1B%D>p#AZOYl)v%m@E+R3`dFp2<6y);88RQ;zu#sn_=(ypR4qZZCb}rS{p+{;l@v zFaD~1;q#wwU;ot*d+ILo=%x$xz4Zf?^2QA-S$OTDFA1Wp{!X2eMnl&+GB5ve zcANlgK$E}qEZ-|_JtND+yNkF6JHRtsRS#rUKEDY|X_jLhfq?mx8#_o-1-3?@aDq>e zNPOU?+)=*pVPUeG&pfs(E+SoIPlG0Wo_N`NihQT<0@uTx-L~^^ryU;brd@g3f^B&L zV`vUq<%9i+^_Z&Rd7ip;prHX;n|TOJji4(i&nvo_^0GTuoWKw4g0BPj1>3j!g@{>j zJ!R0~4qv|yw!h1LZ;wI19&J#)U^}H<0oUSoxA)rXuf5iO^y443Kl=B7+`dL#zqf-P zqWyp3r5D;KKl!Qldw=Km+N~$<(5Cijdrsome&&%W2NNa##Y6ci+-g@|G4i3+t4leF zz?FdX!^(2{Sa}ct#90AJSotcmPx-4pu^UYTB>dAKxX2Tpjdr;GE7I>k{|@yj zU(JUdDE;}X>J;DT2GNj4UD(b~(i*3~`hjR}afWuXM%H6bc{gw9fj0qVnI=N^l0-V{ zS$e7$&=FXf^AwqxT$bUbfJ1Nmb(PaIu18%L=ZSwhiG4_RuUnTeLH3HQ5}W$w*|6~( zNa8(#s4Ka??5x|kAW43olZ~Iq;ec`ZK3vZ~0pEH1fpi0uKhxK=yBzLaz?Z|PT`zy! zM-uF(JWQBB?jPnXW5f?aE~o#nvM;-VgYp6#O$dT~imauWf)ly$o+q)wgJIi*X+I--Pza`g^x#bGa!rxEk$YY1X+3cxKrT>vd z)_`x8#7+q(1#U?nMsGnMc9ih*##~RQJz3#32v7Zz$MNnoKE1|xt}<$}$2zDoDy+(~ zY~lInxFCtlpR`?eM7b1w@+M%)6}Z$a$Hm1=&or84|`CJ)eTWk$HT>sc?4-9WM=RvDN{$i3=+u~@s?+051OBt-miJL zFNqwaFS*F`=mlQ(Ie}g94Ugad3aWJY3%)k23rQuv;gvrabeX^tznXHf-MRYLP=(H)rJ? zIMt=!bmpl`Jbcr{boEWy!ZqkHo^q41oxJNiu>oz-j(gMt(M941PTHg3DXiG!{?QH& z58*4Yw7WbxE_7h)rp+NM!lJpg!_(uoyMLTb0Nd!xx8Hx*-gxuvcH_a={9B$cH8`Q~{Ht%G&dt>#7#0Sm=wi!p|0(kz z`u|)!Y3P|#ihe778eI)*&Vs)-$s)8m)-w&TU}`kB%rz}gvSCX-&9iE9#?zCGR}ZNl zO(#(FCZXm?SiR9-{fBcQLi6r8fAVq!dF)f{D~*%yo}}W7Jo>167JV9e+_XNM_=QFA z2MxcyeBw3}aDs83SCm*vG}cypT5F{LXTctj?tHSJ~H})Oz}J`Qyx6p4`iBux^FPGlJn&_CVPnv9qa9=Vd&O-?@LqdSiXP zUhpE`di0Z?&pi8V``B~OwO2m%O8dkoKF;$~@a%58aea%i{A%Vyo!j-3mPbeX?J$ec zH81Si!vehbMWR*st(|SZ?VN2s-vBNm6+0E7kbm-B8s&%cp30)181U_Mc^!DGe*X0& zbCaGh=*vjjFUs@KpXQfdC{B3fFwZ(XL>dl86P`xZ#fx89z|6R~=Zmgmf?JZ zEePE~TZ7wGmj5Z{0)FOX^;r&H$M$K*X|Mj%?g@9kWD)ztHJl+A!gP$1&!xf#%Pk&> zx3*EugIi|SaWW37mq*+%}qUeV$DrEQyN0qT!CR7am7A`*2F_wwJr6tx}0E(vLbZqGtw*H<`tf>MtpcN zGNu32uMAC4&uQb%S*fyTPem9M4Z_?-|0P5vb>%P`*JqH;$;_rRxb5#D_dg~uXd5fYT10y^QH5*=z7SiBwP;5L`Qm3K{Gy5NqQ$X(*EI9`RUO)ix|qj`L}|xv_zP@9$gIf+wgT1jQI?A zQcNSb_~1flHVJUA2~3%)0)cO^%{O_5uuMDii@SIQmU+8Bl1=@^kexikRqMsfk@PNv z6_9+Xd(!y!zja`J1{WH`m`qW3jr5aCaB<+8qDjkr80KR~t;dsyk`1ovu+vNO0n0KB zNCWiQdJIr-vAzT_kTzpapS#I)XYiM@tydc>x8Oqe-IiCD+UB*Z?Z-d-OX_2$t*yB` z@+|fA^fS+8SG3>DTD!7ECX8O>Sy(wjM4W+TGfrFUguAK0#rRR58uA|5y}P>&O3D;* z3o~tXbtCce!X3DKdl=pL7m*KX6i0VaFE6iBwu}F^Z4?a(>3EO97xJl5AWQ7@#Y8z< zj?67F_&9A39^7wx58uls0S8PuZgBzJQIHx7;#P}n7%YUj>u~`kDWCFP6M$zR#sI}# z?`~|^VQ{eTrwtegx-r2Cs3<$~9HRU@#XyB9k77hnPr|cq?a=IGVoaUY#8?gPy8J0( zq<2@E&t!vU>r|OhhLwMHk@51RmV(;fa1W_qWj@xj7U^varYlS$8>Z}`bsS9GK0 zkyL|pmO2wJ?v;fIJ&89TkMPZAVZGM|195jc-0@NkWZgAN4Q3KdJ>a)#U|bfADFpoH z+bXi>i4LFr)MwhS-g>kB!N2`d`^LAv*#7EAKWfk2d#YW#wZUMEK>~cSS*)zAX2)t| zt?StE#`f);s&5=jaE2d-8DKk1y~$H~P5*%$I0zn_0}P*h%a}jG$1o!zM}%UGSofpD z56`SL)A1a+$O2{%z`<7{9emt%Yp2>LHmDmnz7RkL;NbTN z|AXy4_;Apc;k8GrX#y>R4=ozrIMFx*zcI9_n>~byI-pr%!CC?l_P*n+!uRKc#0pmZ}EpeiO@R zb^F-~vNn54+&MOfM+wff>o->0iyyn&HrLSKyKloH(zxr^Z~xP7TqFd8P76iI656hG z`DdLd78Vj2PdeK=yd*-1;(Nls_&V^jn4rCiqEb#g(i6?s54i_x$tY0Bwr?$tCi?k(2(BmU|ky zwfDl0%H7hnWh=fBfZWrpSw$WKKJW4!uukPeQ)}M*=l=H62#lC!sh{6 zeuwXdk)C9+1Kh=}{Q68=s7Wy88NIx)Lha18qg~nwx_161yjIymdh ztL^0}9%le|5;4UggMn+A(4$kA=eR+ksN_hf?kJrDw(^hiMwyQJiZ{kn2C$vnwZA11 zDZDy8`3AjZ)dtcU`>DH-EZ_~>PW#8aHWBrrV<7SL4eIKDD4(_@_CFaAwC zkLXnIWK2X{^+l$`hF3qj*$Wur5ttMZPEeoPDB5o+hwx0UkgU>5yS8cJ5Zm5yUYC_h zBSaS*q>V76B!E!k%Rj=T;ZfY<(thIU@x}9!fgT>c(*49$z4?rNR^9@`#6B6WnA8AUqgyAV_ z<}EzP!&uN$QH~v_0Xg+Y-GKzPQDe}&SxfPU$EQs*`~=$HyT98W92~dZ!_!=Db6;HD zYR^9RvG&$mZ?*T{c{k(H8@Fz>7eDdw*vgxXf1kj%ZHzl-u&J%J8Xx)=4|E}`o;Hwr z>++j7T$RycpR6i3Rfmr_<7o>arhah3!gW0|xgJR~6&@hd*JU@I=fj-keAx9EHz@*m z(&Zp7Iz&BNvXtk?mUqcz5Gc@*URlyF4&KM5o4UvAPcL&4H*o(A^FPjh`A}Ydi2DHk z<{fv##-*FO$Lqa#cX7$TscVu4m*-1PnHxAq9=WGpJ|yjjIqgRk&L}_nI}wR~oJ(v- zv>fXm9l~G6eYD3+;FHS!O1)$(&c@miR3-(ib9rRI+pSU}=x1Y2> zxBpLHL6+D7Ku0(D88Kl;H>OWVPaFZqO$ESm%zcWTbab>6z2!?QE+9o8^Jo8moJD6h zA!uv(w(1i4$c6oq4SO9=TXMccnhK5!&FDU8JBQx6Mf4ha%yh?;sooMNam&jovO`^l zl9OZE+(g=(6o}q*{;p0oa6Z>QM81iS{f2ZyvvdQ){KW%khE6mSSQ4&3Hiczt|41J? z*1nFb?2DR5If0hy_uPn1n&ct93pnt`lie1uKivS}Nt|^G0z@ERRST&t8IRvb?y2OG z$Lgy{8AR+WfKUDrF5j$!b|!2W_~zT`0pmhHLAbDheqL`UJRj_Dx5I<|cKzDTY`E}L zQBP?&IoYRvPx~9jp1`o&=3Jm&TX9N*>!hu(dparg#U&jZSFg0I*RHo`o_W4~@|DjL z=HmW7eZ)yNb=|pho3wN7;PAMuIIoHGh;=^!pnSMUX>)h3!!B}c)KESy?7q4?A5{Lw zyVxhnx$VdLwI5gB49Cf;?wvT}Jceg$L>{IQ<7oTsR_{1mj&HX!fo@`gHTxqMTYgvS!hZe^e(k>Ge1tsx> zx?KkL`o?D4T)&dV|GY^K9!KDHL|ZsIJIp6A5M)&I?IbX+WEF6U)XXZfaT5Qqz-5y&X;@U7#I2CoEr$t zYkOBWYrA+2JzyJGHYCaV$(ZE;AkgJHgQrf~CoeA5=QN~W*>Y{j&uutoILcpMUIfS- z?d1SF&*S;sG@$M*JtQ4#>#OYuoTPIJ8|%c4B&Ub53q2Cux^O+fGSy2tZ9uwUZRVe8 zT|c;zI-PSr3AcJ2^HFc6PoLu$%e*)xHFK8GS9(hx zm4Eba*12m?!8TY z8XatVO)~TIY<{0CWcb0w4K?MIKIPI-;(_g&hO^H0BeoZZ2;OVazt(Yc0$Ik*N;MFa zckmQON+1mnIO%l_pmc-uDX%j#_IJ`rSnrn|%ex^?SxTF-KJ1Ibm%1viy1j6>?$Ulz zk6sm?UvWb&lzH(Z09<>YM;c*~vC08Y+peZcXs{ge+RV-BMt~D^XWq2$pHB-qV8WPiX{1lBRaL>l%b6|Zono1J-M z?!Wx|pZ?t0$=O}&In2SJv9YR%eA}!NyNgd_qY4+Hz_;Qp)CUSm{|c%%1?Y z;iyQ8Hj`GRP%dtOA-e9+qmY+E(OZf@ctgk|Jmjx(yAa@vS9lE5pSQXfxd>ZbUTSNr z9^Ji8=edUBq?5)Va`&8_`ufI3l2%w2mf(5|4k-G%4(f9hCu2Fw_^DroVx z`0$G|#jWV&pSdv6Wcus>ek~9cd85pLodbr4ExK4Q7cuaiK)}@Q=#arq1 zI@zlcsjP+`(wNWt2?HCARgbd2|KNRs&Z%3JCoOY^&i2W>Ph<41x7p=|9ABscwO!;S z0AQ=2TzD0We zwwzRQmmvcr+Y`9&?(Q{tKy2dyA_ZY~XkjBY!cX^LVv=~n$I>LB$ z5-IVki!oG(6NfUItGxViR}*Sf%70pE{2HuweAQW7h8-ud=1ct#IRpFNMuy8mJokuw z(xB+U(}8X}>fXRg4!G10x;!Sd%cDO0L_5IA&d8o-vMDM_N;g=a<}N=z^Wk@=yU55l zY4(#4!f>aU2Kl~&S@6_oJf*|&^8l9RZs33N`@i3Q_qRXQ9`0E}=iyFnF&q2vs*Px*2AkHbi?{aR7z9uF|Gf;lH}qW9e3w2wT}` z>uc!d<@H=1iRG@rd@GRl;^cvYGdImxXXj2T$Un+-hy3=#2W@A6CyVqRq4rm=zS@5J z>TB&=-~C?u>es*4{^Sc^Y@h$~*VIo9XGTd&;z(j$ea?S9R<_qGTvKm!_u9iV;a((zaRITKk_l5o`VmDW7Q7mteX#W&0G zq@bjO7tRIgYJ#53CdyFS$|9N4kOA zN0qN5Yy$VA;G2J3)9xdle{zWfT^|K+0(^mXnF% z)D`ixAIno;RDWi>A$`Y9Y=>pmwG91D&5Gq1V;F!qlYs2$^9 z79yd=XYDpp2B&aPd@Vbu;p0wJX!k1IjqK%c7uHDQAl~t97B_oZ%NzY&YzV@&{pFK< z&NK8SJv13!6GCN^7C*&f+~r~MAL#f?hZ{F|UC(vbnP=QnPSVv?a7NnBP#G6|xThj} zc@vP8x4asIZsN^Mxe2rU#6dB9tGXX>29A_B&>7FU3>b)*zT&H_34(Ek z)nW;8WK93>VBlKO@!~RcRM|flPPo$NU74#z z`*P}{LBF(_SAXmc%StOPd9BSJW)e=td-|F#<1dmsNh>O`IqZM<7x%nqkV&w)jMKIC zg=LZfOZ@Wo4S}job%eM&&Xls9ZN`O+2{p;B&gLQnhRVw7T07wR@bI+FF}D20Yp*k& z-o`e5(4Kh8HydxZAO7S=?cn&dZEkI~o7Zo*8`rM1-}}3Nr>$(Rwzjv8ed}?_j4Kz_ z*YF#?Ee&8|L4XG`uzj^_?zY*=`Xtx{}Jv#82^uQ({S`pU4bzb zKcaLW1-~Zc^jzedfHM4Xd?}Zt+A8WHT&ZJ!Xs`CK(Gf<_N^Ho$bZp%iFS_Xm4VF8N z9W>A%(VaMT33+QHHSNqXhLit*7;^$chs~V&DLmioSBui}(0Lxs?i>FuES+*cb%B>L z@jSX{X=ycYi$+hPSMnWn#>T>M0oaAi%!l;8+Chvqgh%+h(B>kQCvN(d|EYZ zG5nXtDXtG4sqNuiH#T1k{myU4q0d)YFD~%)JlZkpHjnI{^F=ZF!I(Z{PA>DOKNdOj z@*wWj#SD3=d-V?kFFn=~&T_m9%eZV>F$|tz2OA!k+!@+ZM@Q)wJdI!`n=ITgkHtxO zd)s>?IBN^o!oHR67}mK6;|~sY(edvGE#EYUWa2!1__Q6LAEWP&+WN+N^HhMH?VWc2 z;cmNi>qdLx_MOZr`Vzx6`hh2&xZRe4??y8(ZBgeQtuM(^6vOY(Kw$>rm&elSH7NdY zXNGcNQ_qoS;dYn$fnUX$1SBjQX1FWMMr!vPo|g)gEAse~mTkxRmkVSeX^e96fETHF;MZ6Htgv0ddT?3LBbGymi-ev$><4f^86`IUC_ z_OuT+Nh z=T&p`Zh&!v%@Ocwe%yS{ljfvpuG#?V8p1qsqKwq|HMr6zXrnnlMxwlYs@$owPma|= zr)_m*F{e4Wz`RJ?blvcPw&}dTV>LGoY;0`GWbjvSLLdC_r4x0Mn|Wk-J}tnr>k!(H z%B}So1}bFM4$tOIog_~FSDM(;Nm_D{aV&6CFN9^>YmV5#1tV~9N}pc)F=1Aqb+7#_ zQ1zObgl(iWC@lbwOJL#)ZZDfWRYosIP+s6{-l5AApCrndUNI^c8MOP!Tv89UjPgz0 zg;O^Dq~FK}E>e&_aCj23^(G?8L4DY_kO|rF4rcPq3;Z+Y&playg|39x9_8<6+UrGP zWknkuy5)Oxx_y1gSNdqPii+{#;)AdQtFXV|=Sc6}vMaovJ8j*WC-aE7(i!&2(jYG+ z%XspIC~vMkc=SIyjdtOt8aE6lTDa%}Ib$8!7Ohhh6>==x>bLDXOksiCvmq9f^_h#3$0?C2S?ZA3UB9;>Hj=g zg|zk)nF9cSc~le>-@+<>V6`0b=L60~XzOGU z-^i_nq~#SAy$eiBgrPW>JQ)B%#Mm<~rvogy^V13Z74BthX$;vp7;dK)TOAm$-q4slz;itKZt7?+C}$==7Y;SMCCIBiiHqtM`U@j?T>PM1I>g4Sx)lo!z#}fXYRA>5rUJ zriO(({7+DZ8l&Uw(8#$1n;7vf2wC~5y_8d(foU~5Y;zF^!qjJ{DF33h-a4}#Sif{! z;9L4cc-FCe2XEQKU+R}X(?`GAz>(kD6-yrfRqzUjcV$OdK9fFo|HVgK$?uU+emkqm zR8*_8b!VT+&5*%3*@v@i1v|%UEg%QbZ0DeXCA|zY^6PS7w5BP2x)0VL!YoLvbr=*R%$!_ka)}Qq)DIurwcu#)Pq_3hV zpT}i5)rH@;e#qY^@{^c`E9;KX2TbG==;d9B^NbGzNWcduQ$d82J^Zbq(rgU-8hQv(3n z3(vOQ)2@RuK!GQaQ({cr*C%?cgW~T*R&6(0<@gd3{mDHEh_A<_1Y0f$?1AR8tdax5 zl!0FN(b?p&Okq`9q`u~?^ZShDQpOJ>XgHn;pg|@PW4MN&|zq5I~ zk1j|4=(o2v=i4VesLaj&IQQyzX(XBpF}2t2R|xMpA#S+ z<_DHX(BqBHO;wkyLxQ4s%CUN;jy2vsC6QJU3 zy7K#*aHCBlGI14C5xA-RxqAL`buVaL=qA^ovp-Mhew=$u`Qv%Q(|(Whx!-%;U0fs0 z<>$w_%lPZK|607KsQRq{A56Ia5aw+4qZ=ycfHoSyy!s z3#8cY4|&q2#X-2>rfv~fHb{p4D!70ErVLMa6HaXLq%U4+TS~V!RN+xL z6n)(eO}inFJhPp2xh=cP$`l>jm!9IO?O(BWo*`t31$G4}l9tS*sR=FYB*p~3{hu-f ziF1)~VQC(_|43)Q9^)bxljz|IBXHBt!#~nbKg%q6)vrn1Wj7tJe>|7$U0egk z^z)RPJQ5e$N>(nMVctFppYHwx?2iin56u5j_RD4ZFT2N;Kb|A3T#v=y4*S>e>DThn zFz0)8(xJPI|I`&YxbOy-=gali`SEMZEth${KL3`{0E##ICm<|?OPwiBv>A`!cZ0f{ z=V|DyWK3XFiSq$`r$t87YB8dM^4jDX}^cQ z2y(F7jt>s78ICjew8XgB#cV8-j6+d!?d;$%8vvXuSzcO?J}FxVS{SF#IJcGYzkLeh z9v4-Y7+5Y67WlXnIiR5~e+e|3H3Nq-obCyq+GEaJo=~@bB2;k7 zA0z@Fdv$3cW%%~Lb*G)_36AeSe9-RPzTKXF`sus~=K_y!G%q2C&Z~QW@Zdq)et17~ zU%u>+#dffCqg(7<>6zGE(o<3psjwse(DpRO3*eY|TyJ}Zhld97kiHxEX&>0HCUgGX zzS1}sij;3Q02mL7K)cvGN<8MfhQ1q0)xC&22ZFbVO5)P9OEY8ZkzP}+tUC}PQaUb zIQE4i#xGZ|UTxbCcG_FNdJCLp+t%h)+AQVF(%wMjXRrPOd{^?B9zPKw&enJFui6G7 z&`%lu`G^Z|r7LO*{vr2%GQ?J4ogfMDky_-cQ>T+qP-IIf+--v$pt8%K$*2*+tIfrx z;<<{mE{V6i*n*~o2j;^cK5HkL!}&ArFe=Ls%944lJ8^K1J@yfQ>M#AC+^pOl`za*y zFOtR=>kP}MM969K6wB5%!6wk*)Qo}>=*?SJh)U%=bj-EYeq{Y4jWJvlpq zj^!*?`uRK8t(5Dd!$VmSI0~$vtabB1URFd;c?xn)m`&TGz{sz3%45@kf5lArFJjhN z=8tq~X8c%>l54+ezrvq$I{?X*aR67^p#EOxqfgCxBq3*XGO{f&lfsMq;8y-B-!uh_ zdNL+qz!llJksrJxlbwnLbv!0%OaDYikT3M%q^CSw(!$+*NCkKIVT|8TSWIVa!e6Ien(nOfJg;&nMr418AoFpfK=}g04 z^csDQ>s-n^UhW+)%S%eCFulUz8CSO+ z-<>+3BCOY$H|GB7-+AS8rzaw@X z1VjR4j1eY~0am=3jph!_$fy!(n$83ntv}9W-78pO+o_qiDo$oA2!)V&JN=z>)qKG8 zXMZX|$}+z=qW%)BpeJF$R7m1_IhL7lCy#|FD$N1-!oD@b*=HUP8 z`c_+9-DpQNfR*+280zvk%Gfq{D(!`%6H_^giH=m1lriPUH|F;bxA9kJdwO^l13(31 z2k-Z|lga3+e2zfa(sQChfr++*1B`n4X?=nsC?Pu-eEY}y(0o?1sxb~+%XJdn z1!rLhOTTW`w^=og3HR80cZjFGDbIBL%BJ;VefS+XKoMU?MLL)Mobrh@e7up9fmJe8 zcsd`E#@U9IN1d%%8F6!q<)x;Qk1Z*dlYH!0%f!RM4G3}pwp%xhySQL)x6A;4eRZ?lx^XL;Tvpu) zjZwY8pvoQ2>N|}_Kf&Ne3wQA52z?BMe236BFMM}oBb58_P`*26_PDft2Eqpiduda@ zeB;gbt?zxeef8_#YhV1rH`*8f{JZUszVn0jZNm34UNEoPTEAA{`3Bb}ReNJ&t=-<* zY@6Wpsv)aBt1O7|8gf~(j;ll!5XT+Z6xE%Gc(42;gEFaHWpIawwDmvH`UD<|l#NGSN1y=9@|0l!6*XF`(B%}?INMfmpB zZa8s3p7ty;W{>uOQEvdjIqIlrJ-3e+XSt{>*#@m|=#-y{6LI3zcgD#&yZeKNQjt~ZNLkvHm^{@XG^#0c)iN7L~D`jmmMCb#^nI!S@D30y4K@MKXZ zC_}(Xb&6l~N*U=yaP@e~%}Ex~Ddl1Za1Utj!nf=Id(@R2d-u1;Q+Ab8ag*@Lz4Do7 z7fK+-;+|D@Q%0KRv4;Dt$dqms3t$rd)9QbxvOC2EYMtJdc9wHi18ZJMs@V zN!Z=x=gaOf_y@kQy?YHfy*%YB%;35EO(Ab|RexTUSCNY|y^jThfq7c!Ej4kV>wNP{~i; zYy^;DK7*qoknuxgLA%C|q-d)Rkn&x-7W(Zj0BSjgJ4q+((GQtMIP%B3tqHsfI!%)9 zv-g{s(05{*#PVO5UJ_7c4E!vU^3cVJ2aa{=31jA~BPAvyEYoqzSJCCeA1 zO`#J%`wwLy`nm)pbX2Osmt<)YJ~{MpJW0)`J_AbtxIstZW4_8??Md1mgNhAO5PrxOzGxVf&QEet^>`y{xC@8#4Yrhaz_{ek1th>I# zr`mbJDtRgTY?nTdF-ygXs&XTI^6_W8cOsnZXhI4IX?qmIW&6dRs<-Y^ALj=ELf zP=;;9aoPMP& z`nKW8$xM6Wm%nQ7-G9KCJLDuYfM=i;EWi2=xzdLl-KWR|MB$u$oCuO zznty^^jv{rdBeeVT) z``B6MAxSf1pNhW70vGy4=Ua9swE1)Nf#s+=U9ehPW5i3JAARQ-O+7}u3-Jy6Yh`sM zW9`^I+D)XP_mZ;c{w%J!Ia4}yQE>Dx$JkT8feRhk&`hZOcz~|@~fW^(vk}gc739&J`~ro zF$s^X6@B*0k$u}cR~@5GIP{z#C-O=s-<_8`IrPnb0$e=Ko9T?J_dE^skg`2me5K83 z3nG7~&gpp?jQmEp^0_+r;VFsgZeMVcrUwrnW;6Zr+IpRINt`D^h_f%P_@a_?q}Jt) z8`s+^a&qPBm6RvnonQ1*GocmSr8oAUPmza&)=xGD+jou7emVa~kg{V2VVlNrU}OQd zjDF7XqjpB#z~#@foR61}zPz=zwn6>XizO?o>+SmW>+QuCUud8C^rzYrckZ;U&8@b( z`=I&BSLXn7%7fzo%5yP4FaIq%4skKA#t6PBwMzfz!hh^z(dWuyvb>!0Tk&o_=xxww z5=ny=q#|S%Y4DNC^9WxGrsX)7EdLZU+hMgI(#S^w9f;BlGdj|9Q&N*|9H?VT+=ZtN=Hbt7*jP@W6$Azb6G8dz@2>#K~}kVoLU zLB&lw&T}hw(pj$Rs_LgZ@b@#WsRMr_EIdSC>Plxt*TYL7aIe&Lb=c8crn4>jY!EU} z+Z$t(z!_yz&Uh|6+IW9UUJ40ID7n>_dP8msMlV-ALYm4;Sh7QLqLuxs=Y;B~+}smx znPlvsN`|ao;yN5Vy&+q1(L=NiYXJGnr*Q0N(Z87kET|+GjuV;N;~$?&&|FRryYPx9!_6bEoyxU4(#Zl*pG_u4mUreNoWUS{K~_K%cX) zsLrj;EY6K<^7t~WdL?iCFP_3HKw%cBBm9vkY45^TpGZf?-#$RxQ&)Jr=hPtLY_HJ` z`nhL41;4zp`iigc7id+T@JlI%6s>_0#VqkP50{CHP(rcJf9&VODHSI90kcaL?|KITyU_oSa-gM3~$a zxsXBW^=q5`J3T1Mb$7YDFz-?EP`7l>GdQ_h7C0KAcIX;G*$|MUC)`2ht};7d;}Jke zQ8>*%KksA|H9^=c!aKj95G$m>fLC>thm(swdt3~34F2&X&m*XWLs)3Geo7V)M(Rwi zWw#5@-kT>PG#c8x3&7wG4}D8hmj;mflaDTEncKK3BX2uc7lbM>8c{K(Xi18lMnO7W zWrcc7)skLA#RnxVjZb+IBse?Km0dzyPSpDLfHGrD>YOsfBi~HsFLi8G%Aww8Fk~%H znhg_Q1&J`*i|xi;M5FGbgmRZoI=t+FR92}kcTFji+?Q9@+sek3ws&;au3ov(=5sth zJf`9MCdBMKa)azRz&SqNiP3kkw~O-enl!dP9_CSGIV7#L3a%%UuplO`t(93N7K@@Rc4hsr6nbBg46 zbeQsuT;}b6{p}=?MrM?KIk8)$U+D6@( zPnj8fVRTxiehr*#FhM?}vlSasE-h_Dhkp7qpKZ_I`&fJTz5DG)KmJAg_IH2W{^Sc^ zX>J{tYGB&MwWvzb+2!VIsn)|+-VQCAGX(Cf3+uv$`^0lwG zKl}C%+MoR3NA1sE{YCrBciwNWVMxCZ{s-ofk6cUClS?JanzW)>vY|*FxxRU&J$3gP z>f~x<*>2r_f`RX~c9j9;(i(ZdM|oLXl^3glrf#l{0hDO{spF>l zO8bFYM3-g*?JK|peLJV#wrCFe|5KQr0SMvvA9+X|7?$N7nMntxpE45G?+K$!WkESj9_bkP13UdM;f~aNPIw%Prvc4_Rs(Gf8PGv|J%QAKmOiN z+VB7V-=*JKY`YJ)ql2>Ro<7iler|~>aBR{mZ)A@AP9E`dnZ@9$uNu=MbG8vT zia~e!er+948 zf+YP7aXl?nnlepi-|F~RS+;!TUKl#PJV1}Msynqox-Jt|*Duy-CTV!ggs=9twg!B6 z?5}Jo$N9ttcgu;qVe^LX-1BWt>aS#~+DNs%lA9V!d$)WLhp+V9q=y4s+JMdlXfq!n zkK_gh^_V(FCt(}7z%T!V?F4LX_R-FEY~ox4e1JKg{G*;!&nTzjJco^8m4|=oHA?W? z(vH*QopN8NsevW*IOaQ(KJMp~OH?~zjAmPPEZ|~ZP4bJ9w4CLE7_

+1{ek!g7p#~UN&Tb);X@ORC)Rsjkk>b2$Y{it1l2NgRIcj>TGR#5 z8#W6|hafBbkU*&T5ONV5Fc?$d3ERsG06BfBA+WZvRM71Uw5*jqqq_S(MVSYfkL}{J z3j2dv&_6n~rYY1|&YQbn8wE_e+zW+@c)-}&-;A?EhOgqIYYG}X{h-X&v#?w1XR_ru56h3Sj`heHPMVrB3o8&w?#Q!fs^|Izw$WO-wIG02&T(g&^I{& z#l969$BW<3#Twad&m`MV=ZJ$Z!#+#=1(z;7?RWwcYlRuFMwOT%&Bf`gkEDiQ<6d3l zxSPiS?2cJMsS|huR+OGycdAF8R9QSYbF&!kxd|*dDxr0`3xhjgLpWzZv+;R1@9zGf z4j#3W9zzppGMR|rVFo>*0XD;38bZ~-*8zLJxsEK*!e?RiV1(fjZr#gnoDn`4l z{2L34GNcjMG6z?{J1#S>{rF-Qj@bf+g52-y%@6YOi?TuV#vWhk(JHxkF8vv6jo<2c zBuuP$=lu6BpQL2_RnuaK2MXklrarZ6BUr28$;#6bfr&(I<>ikv3trJjcPcO=M}>7W z?ZCF-D7@zA@($C6G5#R%nI64~L=FEy zqI!D$mE=W|0f47M-vQgaK3^N3>{!joz?Xn98{4p>Mr1Z&(e6@cm!3XoWPO?##9o-L z_baa}yi!qFo(uKpCYy;dmze95WAQ=h}iW_8cIe_c8hU^Bl`gt!M zNZF3U>bus>_5#+4u0jrcZuiuHR_b@6u9|;)zn5y;4OrvbxOr}Hl9{Z6RlS%SnZ5Er z!_8o}tEhoFIA*cw9w5yoSq0sJg%S)1gUDeq6(K82g0~kilYl(ICBqd@mMgYg0-DUv(K`ytK}v#G+d@_1?XO5Vmmp0&2B*a$F5#!HM*it?lkQPz0^WqBJe9n zj4aG(?3a&H7JmK)9mkVJkKaljfnj$E<=ANmk=$;qAO@_=B91LL+b|pz%nXdH!N&U6 z-*hOY5r}&m7X=@WU`#(b!uhO6OeIyHh@H~#G_BHe_X@=Ck3%-DKu4F&#Y_m9WkajO z%xeS$0esOCvZ;lxfLw}Y&k@=2N=xCEDyV&`?G@sBCWio7m+dlJ#s4#dFJ=mD@AyLm|d=+vyx;P4Kxc^d)lEv&Mtg z2Hw-4ahbj8e~y_!{yV6tpq0571phY>+WJ$*fp$#d?utdtkEPa~;i^%t%Xpzf6$2v2O(S#@BB}mPx7-( zj^Zt%T6{S?tAY{^AM85mTe0LtZwrWk-WHm4u`ZGrfPzzPAN^X)4E$;kJQ?%4(BZ*b zM*No(b=Gm{^e&X#IE)?fjZnV}B|Zv4SD5RgB`!!wwa}k#bAo2=Z6G5;0*gfcK#UEJ z95{A&;8z|bn;pqnCPSPENlPM|UNBQTbYe0rk-(7Sj9C*?+Q&T%AFSPzgCfhu1N5C= z=RXtNjS8Y4O_16rwNZ8*bqpwXi9Tk#K24K(21~F{wUJ6WvPN4g;M0K0fX$h%gipU> zJ@Zr>s($OyE%Z%cgf> zQDAH#w5F6{P+ps(UGJJ*R$&&Bd4DbEbc}uzP=Gu`i?n1fd_{^zvO!JF|JH;tc-unU zYd_jiO`k6eL1{yCS-Kx8+o>l<*nzR6Eb@q1kL-!S9#v@qrS<{Jj;wOP!uHidQjnB(g8~@6(69+ zwXfn-+U}oH{<-T(zYgixk#$Y?K_8l1&PwX6`)S<||FMpE+nt|yNJ)Uw*soZ`&ZGN} zRo^wD;gsiDN@TL!=YM55@`M@_J1mlXW_a-(s>-$@o8#%4KF1qOYX5i_IF0V?h~4AN zMmc{=D~uZh2#jB;%!Lls7_iv$0Rt!uT-$d9n&lwhrKh*P7$oLlu!IZU5Fp7Q-c9- z`;ii&AD9n;mktz@L+|(6DXse`@*j@({O9obfY@{0aNXbFU)u(3t&yXg$VH3k=LyIV zOn=QEmOi}41v!9W86;>3=;20CXcXklQ%}h~q8A&G6l046aQykje*_xDqi^tzIu;fscEdqC?3ny9f#1!)BFX&wKIcfy zPwynsFi$+Ljg#eTSt$~0njqBmglnVbqkwgZp{V%Z-X4f}vtRELuK&(>*WU%gRp$lA zOzn0@!^`j84SE%`$Hb%Pf0&ZloP#3?c3|Qxx+NK_4RwAwED^~rSuxqI9Y_cwE`EAX zuwC%gz(lLKFEs8H(}fQoQL);`oY<#L^^u6qB_BwUB=z(6kZakO3et5{@@H|f-wMyw z>PnNCdR}_Fd;U1N$6kJ7*fE*U*CFzF=Jk`f`&g9rou$9$K!1vlZ<(UopR#T}cVHnm zzda-creQW|Syf7ntqH7Mr~Be!t1ZA45*oW?(0y+IR(-mt<&ZC(j%Mh!{ElzLDHk_4 zGfD$_C?w4PnYB~LNacf!uZM62^V+0_sZ3Fh_e6LqpcXK{!DQd9T|ChckB+Vi*Lk?;_|cO!mumtUfAJy$EKx)uB#~?acTh@r z_a&-O)GOzhURU&-bZP2JUOa7~`Rv)B-DJovX|cuTvj7b}?e9%I$kM=*pKUF9;;tc! zP&DJk5!Gi!4O*iT;R0x+#Vh6TPi))O6}LNIQGF9A%s2-kE{I=yj%KUA1?#M-gQsy&Dm9pv8bWt+*kHb zt-vBz6JuQ#BYaasFO<~jXFd&6SwA@AD-X8~Ma%t=NMSjOY)LKLQ8x5sx9jd^Q8E4? zCOIwvcLxg^BABbteSQiDLeD^?DhU1RNxx5g+= zG`m6Knl;b-X(o)!(Ce;_1M)BB_}sd5LtZ<0Y`QQ&EvahVhJ@Wms#V-le(9Iy(kc8U z31nW?l^1XJrRxx0X>1J`(N}$s;_9DjZya)tzt7vKGj<& zl`3w7E*|s$nOj7`W@UByJu3g1uO*z_)!LrvvN)_RT6EN4n8xU5YeT$-ktJrazbpVp zGla3(Uq_{V#Yq)8LJ24v%5!39;`2UrfsF3?!S^%SJ;i&nSYZRO6S@ac!ZdAHLR*@` zlfA1Xw{sUey;If*wG!p^4fgo<%unfYw-Hw^2P3zFmW> zf?42?5om)E!YKp`dkQ(Z#>XSW2*U^g$i5i9A2~Z`D>eu8c|~XNR5-Jgo+n#O1z(9h zgPw7AW@a*Qd{0-Yrfom*zO0$^n8$)>dfCl6Q?y!| za!`~BdH+7P`Aw7ToYLz2+GUiTd;UXtd6iG`@!wr<-aEY;H4x$CJTrbHz+pKp5jtkT zzrBYlI|M-f@kwz^RxbFggqtWpELRCV$vFAQPSSLm_E1F$4vEKhNJVRLtT z1=?jMBin=%!68tuAcY-gV`dzgg+jnw%fsJ@FWdlcr_!I zq@QFm7W+nYO8DkOoXKjR?>nn2yVp6xB~>Dg(sR%E78aJ;&ke?Jah`at-We#JQe$@N z4_1S@;V(Wqa(I6sctbD8K%AJQG-hGSAv(5bcDE~L*g{!tRaYTeL{5#FO3 zh56?0dD3A|X2>u)Acbkt@k_M06FqvrzGtTnA8*gN%83qeU|iisYRv6BQS3kEdD(uf zLPg}dx6fRC=^2lDjjcbDofcz`p5h^wf1*x!gA*kU2{=uqf_4c`n>=?mg1Vc4@-_11 z3xxO=klqP&r^Uup-|PlX>l3CBnd1pirZhViMYcJBH+6s$(rKe1y^PNe{rae0fAL8Rl_03b*NB4mkaAPpa9RmG@T@wkgDJ!B@5;7E1@0yJ~x(kU;UE9<gK1VSqr!z9{L(u@jBPYG6W!<}@`229t1T)!B5B60>`Jn6doc&Hrbfnt z-$OiCKZQ5A0I}$*@xD{bRF+>Vl16;w6F-+XQ~jeTT4`>ArIqSZA4|a#qwsqbUP=tz ztrgFD#o%#}sDT`;rPJwEgA{UOJVf&Rew{hn?<7{jgynmnA!T)}fN#F%>MYaCsZ(B~ zciFJ2=*u-Fo4^e!m7E#cY{_8*Yne@91OL{@-$#54U^2qR-`yLn^i`(3>xOl2Hj`@f zVzyBI^`DkiVw+)Dz+jnr*Q-^TiylF2#8UYfe$mZp%(v(2*%3U@kooJP*z_NiW32P5 zE`$CMKT+qJltUvviagMx{}-*)n0&G@*DG68=Mozi5!(xwTJ0<7QspwLfn&dtoi)`| z(f|2DxOsbLQpe_;I((sJ5W4mXc&QDd>jIoVu>5Z#TSU9?>%g3T5!chESa2csD^?A{ zTPEwl#pWN}-nNvPqEz`aCB+BBk&+z4J(*$60^3pnikjfM!!# zbh*=^_m7ntJQ}XmK0KsxO?Z1e{!MtK{zl&`c3k6+k=ENS!><+r_Y74dNqV$oa|V3s z-Mu}&zA(_MztIc6zW`N#i}??8UsAbDPtvn~;h|~-iMfeIriHwLg!3Pjb~~O60&dQB zH#ZC$`keW{C)56j_XeJ;y$idCQcnXpcHjk0T|2UhTy>06*@5-S+dZ=F54(D57YwU{r zIsByhYUG(h5g_q^XCq7epz>r*mJ{OI%(FSBBSP|%D**ECL!v8c{78NXdPjgi88Y;N zk`*4#Ee>IX<$`4OSU&jo@TnT9JRoouEFyG0>zU3GCXDVT3nowQ>TWMZ{W<># z2ML#VU?OC{|6Gro2qf>PsmCSN_5Z{koo|e9SXUGaI=psAZ(cRf4(zFK7|M`bY$I@a z9CWG?VGnnLO{)izmUw&QGT%2&HOZ47!{yB+%A7#KvuBH!mVkxF#)5{LQ<+;P@~h4) zfKsn-Z>q;z(h;Y^kOyMu#tuG_rxXLioGF<%o9jo-mPo2W^*x-BO6e4zQbi9hmcY{kWAzRIx^aOWZF@hKC zxRk<%fFsSIpH!fGKgWdt^)hc(LH&G2QWq3dxi#&a^Dgx zQam3zlP0F~hRP$No;uYdc0*;hy)rHZ#NhEC7))xZe18nWIG+ z_6J#%bp#WwZC{Vml2}HeZcZ93R%R@&G+ zD?HO$_Y#-9hpqDMJ`bxPf)VFN3{+Iw;6)s=I3Ax*7--P0YaW~i^80Bjh*Z_w+(Rj; z^~$dtA7_nC-gkQEJ<5+0+MQdux86HSexs;p{u9*<4MlQ$=WP#oH> zf7fpH%!tQ}i2lTkqDl!{JOiGHb}j_y%z~I})A6zUteF?4l9vA-*2XcOuT&S_;fP@I zq2uFL0^PknMwnsbd|+r(CC1}Kp}BDKo|_X0Lym#WCUX1LnKtBNI{2pQ{pRpA)fo@| z5YT6bLyP?dvuvK=I-z|Bz)nU_&yll*-RlhmR*c}O6mlUvYbG|3(`=WVQ?vSJr8qeF z+6N0GKnbs~XgjR#iYs6>`1}%eb^-!`38k=uId`N79$k!XLBkVUf^b@ud3N!|{h8)( z5EhnMfZ4H|orsR^a`_KtI%PdAq!IoWKMH&5lKe&WC$)Qazt<`Vt>jVnenuydvaDnO zQm_~s58Pr+W~5Av1S1BbZvdNQzFK8{6qFVN=x1q|&S{I>1Nmy32iiS>t`Q4c?~-zx zty8wSdOw&I{uN;zMSS}E-JuL=B>tJ|wV}#u^n!E+zvOxfU*X*DYgAQ|*!N04|3AaPhPIOIT6%_MTk9HQ1>jCFBH?1U~0=YM~`{jMEs@tEa zdiwE`H5*Myob}pH@iIaBBxr-&mviTRF_Y$G`hG`Pm@RqyGndUTUp4U^KKXD%AOeE$nOav`Rp)< zvIG>{JR)K$;Ol!D5KsGZsq=61W&C;7pEGfUdgN3}ON^67PCDmHLXz6{I7u>W z8o9#q@&nGq6Zz3Y*>(uYH%1r~X50euMB zQSld*)d#Xcnaxm3`4=Ovu&boeGDO8lI)o2Cv28?9VOH+AqXnW1Oac}bY*%ess462z zK3Gx>oW-EDo4eGq4UPWu2ehYr8s5`9{zl zjNQmKbYYex1kl9orT`D5Xs^A@W70!N@NcoLpJv@|EO)sFHjrJjA&P=wCDPfm^GcmH zn61&zx`Br#0+}}2^~PIN(gyxc$EUS2!^e{&o z&rqICX4GnMlE9hY(#INKf364CtE_7RTuSh?F%G{L8+Fqh6zkVXY z(QJ9m{|I3XlwsSlBE-37m3sdayJI4>Uy9AibvirkvQ_8{^^=lD)Q#sb%omuhEmJ$5 zsfn5Ur<1&CeZkH#)}LlA2KM?r9pu#%f3p5R0H#1$zY$N=i;kz#y@nHx2)!ZB)BS39 z-h>7Xb;rkx-XLdlpSPGm$7k(zD&=z~{C6n*_obWurQf^)_@d)R0D|nA<3@Bg{e@3i z-T*)vJJBuuz&WTtTcAID%uNQIQ-SBch|NmR%}E#Df}nnaJ?^j6E5(iA$Fh@l@MgSh zq0i6&emy<@JrR!d%Kj}~l;E19H@Oui(oe%f{FGG<7p91*cH_0{K=7n(^J#kV2S>8} zc>%@=zEeYDH^aeOkIe>R_)%te6NrR(BEM(f#eaw)op{(xn`K`|5An2rTqs=pt+8ne z)w`Jfryihqp1H|-BT$tBJV*<;7wI2A_$B}g7lH)mP2$3`-bOO-l7Wuy|N4`L7^1?G zk#8ul6~19Ub;;{aiq#j{*`1RdZZ4`e^UXidNMCdYH^&!#Bt}o!u7C~vB`#w?9E1V~ zeC9Vthyvc8&m`M3r3&31{LxM3;?Dg4^v{3vr^?^P!C>%81R98TwUMO+OlcH^5$tGi zOqU99S^L z&dY{Y??%8LcOG~Vghvrpk>hz{%O=)yoobJ z5(bnu&JYTpcwE=O9e)TZFP%~JK;?&lzS6hFEI{w7pNTU={xm=bpVJ{hdXese_&K&y z1@IsjFz?crF-fuf-{d4k^2-}_B-f21sQ%D}$`AAh;6H>;-sv!uH!Gl5 z_L1532G=Xog@HWRpDo{E*uJws)z*{hS)tmm3;G%6^QViZK)9Ye?f=Z@WH!vVxY|eQ zVZZ3@uRIGw#RI#N4m@K0H@{|$YX<<*5XSj2px_Xu`PK3BdGlL=kD1sp1uvFgGJs~X z<)zHAeIw|`OM;J6RR$mZ)evw~D_wawb7bO|lZ-+Gy1bBCctDf&xnT)y(rc08ibUeC z@`>N%%TLLp9cKaMc@>;_x;+7oXTq{t%?Bd%@e=;@9g&c?BGx~XprQ|Q&_VzDNniAN zSM76MW%L;5SF5SJNN1F9BazC#CK0 zp}mw8qRJT$oH)7Cgvf#tS+rfYci;Y`g<}#*TAmz*E`wF?-^{lfm0MquC&Uu#L2>>g}N+KE!4RZO*00 z@7lp~i-?3XgIHLmCCZq&!WSr<&ye#MapG9VK|R^2Kz}K{NU(mkW2}`T_z&t0CR9Mz zCZDS2uTGAk*U356SvSD=Ud<;BDcDJV_L!tfzYM{pE!pA@#>l6Lr*z2k3#FU<sO4p z$i;e!3tkKJ3$eDwQ`HxAYPcAOdwcP0V5*Jq26d%Nnot3zW(ML@$EnShjDK8LTo>J7;>WGyK(1}kK^C}`CrC|AAX{C!S}22_FJ#VKm133 zKmPC^{9&9-p2p{&eiD~1U6e28Vp(mo@#tARdHl3{-9N`nE^KaW?8d$O_u}%UOL60s zTk*vgU&UK*y%pO#`|;_QUk9)7M!Eq{l{^B{bKy+$KQ`{ zfA`(kKiG|5eehxY$N%)R*xQ-JJ8!-juf6d~T)KWGjt-9EFaPGR;}^gBK*ybP@#^jC z@yCDo_u|Jt`JQyV5|1Bl#Z&bSK7(-K!ueQQo|B&Qo`d1*W1;-dMVtk-`O?ao`m$!K zdG$pPkz@H(Ex)*6KQ1q=+OLa?OZIhLkKnTl{+8w;7o0f;Dfq-*ZIX;ySI_y=qtNA9 z$2cTkbB~9fc_(Ez2cbEY=q+F_H%BvtHj@QDhJoJljUoSS%nq?<0M(@;p&jQ33={^y z{64F^1DKV})OT74z&w<(iz>seHu^I(&^XS7rYK>z_f%`*VT`q`MZNL&I}qvG%>xu zOKywXeAXitpFU2za|0p$h_6)Wyek#Zk9>>EpT)PEzo=_vuB$-h5L({&q>-z*3)Btr znAgh9t;<(g_Rk(NZSzTjrg|Tj#$#Q;h7MfdJF02`|FIoCpBjJUT_&dvW#sXvug~D3 z23gy?FdZhj`HKjW(&hJ0o3z`o*D_?fTzR@b5D z8nw!MzN47&lnI|_ce)S_>`Yce+QzrTX@PzKUImrS>Id15sypWV4Y@QO5F=amNCuz7=m_wOV}SmxdNi%R8GYM#Y6H)=H+ z*!1^CG&scAKs`5790lK*M@DWqBZ%{rq~*NbHw$S|#hM#Q$jZ-a(Jqv+wpJQlj--=h zuwl$X40e3B0X+LnrO;V_`N{IsF5akDlcJ-a*cSueD1FHH`VYOKZCk~k>kIV+x;I0W zht0tO=UeCEK*;+CC$YbuPstxnbiOq?jDra`2dGSP_S9v!5eJ%Dx=g8P8Lawdx>2uCO zeIBa3ex)O{OST)_eCj^sptEmu5RTtoAia2-9Qx&F9ynJ@fB9J9zKU#OW!0ywXYn*W zZNsM#@Qv)2zMNK!z8(c*)f@PPC3A*5JaYAC8wipw;rI?e z`ArAb33_PdH&x~wDPKsB9M?K;7jE_eSSw8ck2SY!5UFLJ$ZsW!AI1gX^DyzVULaFU zpXQAT=qOyMahO$?o}M?9WbmCxus8md<*j-seyj#zx}DQco=djzZdoFu2we~-y+xQ3 z^piGoy@JXdLlhzVSbd_>B)D#fcD&VqbN6ZwLd|dfo142c|3CeU-}zH@q}y~Z9L&{8 zzXrm}C@*bz699%GfN8;#H6n;9WYFtC&fR2rN0HK4L%n2kdhUidU4Dm;r!$E2bttJ| ze_+p$cc+P#1+Fz91{b}!vtMZ)6ml3eTnyx7U$oVjsdw@`=3rwN6f7#oB;3NulJMcb zWEmyL7f~h2ca_-Ij#TR4=jQ0-+$g|g$pJ_8;>i;+`4#|`shp=3 zxJbkIYurq#tBYs&*l#-Uw?LrBDEUZ(&ic7b0p7N*=|d7fqN8n>#`x21As;FBCNS;h z?=umwPq9Oz$3AfB6BODnRMH-_1A~V)me;{$VP~1Dwmis)!6hR*Wb^SHckrm(2s5Bg z5N3p9$-t+1p&Z{T#_x*Hd19$D>P2Ip7Qfbp6yuW|ebNHl)Gq@U!RQ;!&Ms7a!?g?r z+os@r`lIxkiPWJ_GrO)DrX@;zoh-RtDA4*x&5F4uf^Q) zY#MY$E;+^YZ}%a1QJ;NfJen$+Y?9+^taE;{=WT?l?Mj= zEU-Q;KyNwCe{CFep%a$+@eLpQ*!=?D$m3uEZ)g+O6T9SlY*B7qs7D5T2~^=z^ltLq z?~+H&W5ba1VbK$cu7ys2m0K44mF87gq)h`ZptOCG@*JA}38cXt@Ouuu^|n8$uaILQ z4~2bMAExm=pJBKAF}z#o#-Fm1)Ai{=``I_ve<*{wJ8iRm$H^!4F5aze?00~CE4Lg$ z)y--@%S->bua#~Jxgc)1hNA85_V-(D2eJOu$7`Gv->Zn8B^FsRFi+N<3mR4-@ zd*j1;f$!-8$jA%Wv>{^+K8;=>H-{G+KNQNjj)fUM@JSWp(~MN+T8VHcI+AYco1WVS zb_m}~FCSNQ+(f>4XRyBQc*k6%)s6D-fEK=(K%V14#;g(iZA0}>uW9hHOi68Y%(8yi zglgB@l0jngMY0P{P3UsfxpmGe7!y(k0kVjp8k>jAp3*HH0A0ewqLd0 zVD|=za$3B5jcjAh&9c)T$>WuMl8sO8Z}H?Qa~z!fps=O>gU;>YQtWR1j^K6zmHv!9 z>IoWiX%FiPa3zbjKqu^#PpbXuSf6)G3s6}h+e0+f9?q-IcHrBjSHd=Ad#9bKgBuLA z5594}>_;E;%P^;L=JB97$BRx#>~p@QNqeSWlu^+5w4CI}aURjbHG=(z-)$4?Nb$#3 zKk2m4$)i2(U(o1q$g!Edtc)qd?iS-qBT|Mu)<@Y_G6==(sk-I}e!GC4LqN27tE~te z6Wp%ZcIKdb&XJ;Re)qg3ooR|**M=j=F^vuitP?@xA9^LSU(0680*}7m+m&+$|Jc~l z1p1h9vz&FN$C+c=3cl!={w}|yhNAGg4;jc(z!v+Yg7OYYcWV8K{yt9-M~^`O?$pnS z@grEU1UUZT8~a<}b&!Jn3tf|M9}}QejE)Yp3qH%>K2+shyD@Gw{*>oiflN(4j>vDk zmVItP^T_;iO!kj`QG%!R7kHvWA{JEJ@z7&chBw%aiU4u^jjBKG9fMh^2)U_q(;#b=jkp^#K=)HezpoI}T5F zV`+IIR#sMQD^HS4_V|>*R-8;uRK6Yu+lR69Y*VLH8inVU6PFA+$4Y&pJ8&YW<*)rIpvb zW?JS)@nc?Q+<9!a^9bXShYGop!F+H1Qdh@Blp9Ah{uZy&Tz!JS@X=pJt6T}@c+EvT zt(P+RPg<)~a!X&YTR>8o>sp!TAI)PvFJ&HOZX@01J+~h?K+%(Id<-QwW#n-OTL`21 zQ|UbZwR%vUPCe{{lIa*-Rn^I(-6(5$=40bkFmeX_X}YMci%45#IDROWANUTcdDCW! z^ZH9W{#j1qr=Hj-8>FK}_?!hGLmwg4^m7I}lJsZIz37SFwm;h${9Id(L;AK0Wp{;+ zkNk}9x?>(OL1nI2V@gB|H6{;J*EQ{ZM(YU))~-;?yJ8HOhN^O`U%_ zd3k+{bjfV`E96*0CizSc?aymOjN{BZ7>)L9?dP@=p2rV)0Njl8Is>*zZYM`E(Li2a zH)!Kp{Cv)5x#$bMd;+Vor;SrevcHvnDkJT@N_>q-WxftYiTFvET!UwyO1;Ei$G|)n zz^{OQO($M$9h2)S`1?O`92nEZ2cF5l?;q3=8m`r2)}yX?P?$E^$Hu`I*pz-re#NW! zsi(Uw6Q6`ph$Ld=ExB%)590x!rCj(TG zIo{~DJhW$n`$+P04A2ibr)7PAD4X#M`tZhSv(S20N-Etpphb7crnb^-I#1Zyw)XA& z{4y+$lbqklw>;@A73a7nVY?)YO?mO*`ki&Na^fwAd2KG&^He&qU*FHhto{HF7hbR8 zGyXLe*z`O;lHlAG7!H<67K zHuJ@gn*a_DPJ9EviG0oZDI4Z~J-_&J69fv8-#?s)7cv-!@{Qs{(fShtDj-k)a6fQg zz{hP)Jzi41WaAUH@4VU}*TRaA)VH;sIyvM`2C}Qs+8+B;F+RmT$2vz9K6dc!d(J0L zWDCa`jU>_I0Q99pMoiTH^vNUofq1ZN$oXGwN^pP2M)E=gmzJQ9sfPY{%$o-^4v#fQ z!QMAQaMOr*agz)7o0I~9`yaYGjXe5m&A-?uS{dW9U|IA}bW+x123+)!r`Q|y_=FZa zbA5R3glh!LDp!A|kNC|3tFB`X2&)v_JM^8jsuCkyp zcUgVu>|>gAuA7rQ4?-)&PuK%j-Mo0Fx6-@0->eeY2X6rQmp}efC2l(lSW-)iBoDMs zIvP}pksOag0*_M(VFbbRbGhBia7TJl|g75AYig44E&_xY3Dazk~TQ1ds=xR^M63ubeO3oXn9>4MU9@ zT8TWss7^S1Lcs^Y)|k8shNoiu`2zVBoH<@ysMzLt0Y`$4h=Lr{;DK#MP$=lz$r|7% zQdUCPuBMwwhSD4y-~e697}Dc|Owq>F8>It$lJsb1YURxryAJd+AXywAV1s7rn|wh#CnE4{=I%7r?!!pd3Wn8agT(e&gf3WBquHBx4pI zXy}-})cNxac%J@Fmcj_jxZ3Y@KNAevFQMqq01upm>3-oziR8mrovyF^NdJ-ioV*hf ze+<*~piJYX{&}L3^_eKTuAs#ue+}Dh82<+JW4*~&80&x9o}ABn{^L0Dd%IrTex;A& zfqxT5`7`ZoaaXUTZR+Q@tQyI8pH_wYd`la8K9r}5!f6YGI*v_4)4?)IPQ_VQ?LL$< z?f1fSB$)1}9lapSMQu>omJ$`WGBR10>4I!CfS3yTlT_2S^zHMB)rXRofj*ibez5ewphos8O_rDoGn8|Z z&3TNbO<&w#WA+eSPKpI=f&)B7&&e=1N$*c~eZk{ke=iQUcVhq9vzR=582kGNaq;Sv z_>;f?pT&2+|HC*Ci`~h#FAhC@_9TAx%a7ynqi3;pe%Ti#_9h2;@+%$I*Dl87VBeeD zTsXRX@k;FNZO1xq4Uqc0j{le^UiJ=RWpyQ1*B8~p)r-`AJ3Bkp#oz1H7~J36l^2&| ze`nKA`ycF0Vs~fPPv%ed_Jw<3TMs9bxO9HqH#^+<;*;1D?)<`Wy#B`PiWg#MYu9+b zi2!%eHuBkm&Z&8tU}0q;c6WERbl170@)s^#j-|zmarxF;@!fy=U&lG|*nM~>j-EY; zqm9S$*{^>UUw-*TJb3anHa2$R^0gcB?Qg#kfAWw2C@$T+8IQmII=)i~>_^zpNJ`ef5j++V-`YTUbfFBX@UW!EG={PGj&uoRcB zoR2rY^LE^R<92-d+2`@gU;ILc_QSZeej%>kz7;?Dy`RJvUw$3G{P2_b{KvnE{loqE{&&6;Z+z>`c=hJ1>dz;Omo8t4 zYd5aOdFjW~`^*(wq~e?Xz8OtpnrjoZ-_jx%(=_VU@3|n#MdszDl~`U`F%Fx}eD^dz zwV?JstZ#@O`%?no`P-B7H?JM#O#uL-fQwPNF#+UgsF-=cvD%UT-p#Wv04nxG*V0+q z=K$tOa3Fa0$Bjsl0DR%dX={V4%n(!Q-*eWAl_&<|b>k%)Rns*{eERJw8j z&oqf|xlKJVGEfG8#V$Z@U+8C2&_?ZbDsIr}Gwn~`SVCMKbn3btaq&;+`*DNe_=AQ3(f|+M zUGsRHkST=5-`8rWO2obm28AQJ>SQXB>zS&>ZL!iEzNA>8H ze~lCv5!U)F*G1H|dgB$YcqrdpaHphWC@iBDZr0^`kDzkWp3}g|V=Oq?c173ICzPCA zuXX*h#!sW~_=vajqD#I6euuJ@rf-(X9dHZ-r)8~s>f=p+_;!^=T=|UIG%A$oSgRQG zYYk((vV}Pk#0Piq^)9+{~}_eC&&ajdSp{-fQ~h6Wx;(BAz~m=y=UK z9q^_woWa*Tmk)+!`;{HOE+Q3boWcXXw$aoI;?FrkV{5P|$Egc^tBm$$4QUjybhOciLgahkpXpOC-}3sAzt zdN=u;df9H}j^s|iL4orX^U8BJ*3jOF=KM`%u=d6y>qy#;>tM*_uZgrS;)yHrMqK$t zYjWzn0W7-Q^an>gmF6vCz8L^KT<#kM*z8W3JWZc-R?20q&&E8!rmblt2cM_?Y?_l6 zJ$gz8uY6}6guQHRvpJu(oy*N@k?}M;oBYct=wY}2QDvAd0 zny(~hdq9UC@Jv1w`b`zsDIR{xUS%f}X?LP?6Q#NNJwMMNbOH>_g$;i67e4|zh$lBJ z_!a-QH*W&q1_)IPC;fte zV12_|;IM@AI%2n@_*Z|xw`|g9KTUtwJn>1NDBb*+eMxx*#wvTWqCmLQw$*cr5YR)6 ze62y_+%`(Cc`!+8eA^C}mmFk3TE3#xNUqO0E@?bh02nN=KdElGj13>ol=V%2G@=EX zW5)CLB{u;4$Tt9VlOMH>kvpqtX*7|-#DH-*arq85ZUkWPF(^gj4goHWN?D~<(>YfL zBNNDu((`6A?$gkvE?vGg3go7AUa<;{g)?!&xkVj_q*r1kp342r6^MnGCj`(ysRpt6 zOn@gEhzuI(ERS<=^4(T!*t>&*2Sr|3_5|r8p#E_>2;`e?Zgh7T%9Z7!&VtGoBtbej z?>7Rp3l==-?+%psJi%hWU`jR$T(ivfX3ibNbuH8hxCtq0<%->A(jU%QT5UoA5H48) zw-Nqob=7v6Z@i74`Q%8P^pt0+aFH*ZVsj=8`2*xLphBHI*#_sSE0k8(iVPkZK=YGr zYA`@Fxl?FOXmveGLOY>K7~zSvRmc+Ml^V)7S(Bg2_qa`$EExnBtJJ?B!x+p+q4l?x@8&!&pi*X*0pJW{pwhE& z6r3HF4P&6vBYOK~h&K)W++4mOJq(m9I7^yDiyH4K{x8vfL;Oq*_>N&l`vIPVl`LKz z&nDklt1vCUXfm#JhdhA6TxUeel5X0^F6d$(oQ+g@g6RA=o_{L_C+j@Mvm}3K`M810 zJjPTnvYz)K;4^seN0#EDKP!~|;0npCT`=%p61#o~nQdh{){)OLap2?+{>2`CmPw|? zuGF#FbR46TM?%UUiX5hs!65K&1lgc@?YIuG0=z&-XHiVt(jKRZ$QzO-7jtZOLn(UZ z$D$!$XiB)}v1UR(b1HE=Q*M!p8$m(SH3JdyNe|^Dczt3>zjf2LLLGfs(U9^6xp}8u zXM)=RiV{Sc0LY@z_{(*gl0>paCC+y=^Bb%0{eKZ%w&Lp}K!wl_aH9)^&57LX>C zjLf_JNaG`_l=oQWpEf0SI}+!`vF?JAVqf%>oCwq-LuS2&u)+UjB~ ztj>AE{TDy~PqDkd9pC@KcVls3B_2Gw8_%9?#@_B;EK8U5wH3c%;PCJ;HupAtA!9+h zX#~W^;|KBd@k9A~A%5q-`Dd~It>29kK0mPWG&aBfEFOIQRebTqm+{_vAH>G9Z8y>D zx8I8Y^1t|_`2P35EgO#F*T4E8=EU>jsNl=<%Q+*@%nrQ=Z0o6C_y1>q_SbRw@`ZT!-FM>B zh0A^$1NQP+h~0yI^_i2nb>mk2=tn<{8@FDG^XD(c+J*DMg-5>zlJP7n=a&}b%f(n* zT2@-;|7x?<^;I?50bSIa`AaLyvAC@BZ02P3M=s9M=b2Z0!ApS)MSKFm$9DCXMR-XD zecTs*)C!*e{1jr%7xY#7yMBnpD5t87UEa+tMiJx~8ADg?W4v$aJ^-Skz#lUK_Mo|b zI{3JBZb7NudFEnY7hZ!>rP_}=#o3o=pBB4jT<#L%+h5LCUG~R$`l6cm#hN@^&PZyv)9LIl~KyEG(oYG*iO`ZkQpijZEfnYS2~gG+q3IV zhDOT*h4q$kOXnbO)Y;BP3Q$QK?#Ny|R!dYwa zF0as_*$UW31>5a@+$v5%^Pl5}uOFA$2cq@$Wi!gSh5Vzwg*KMT-bT~<0r?uK{>~d2IFgOiBtTG02xg` z$J6W2y6)29cXcY4x^=@`-pD{{`J=#$yU_FM`DAExym@QH0^iquL8q&I36C5-q)h)SHm@XFB6tJ5ZkEka^-9+lzBY zTDP6V^6HVM$h|l`*%ID?rjMqJ86RtX#-5D&p86l!piaK}pC_H!VC2bnWTl^Te}%S3 z9_umWsfKmA&r6)i%QLF;#-?Q{U5i51Cr^HoZ?18&9(}xqL@(+{YYj^^HzUA~wPf`n zR`PX@$<3w3JN=gUE<#gP;zQqvfKJMT&s8PPjr`<0cU2=(w)-Xq_{t#Ti5Q>ZBlEWm zok<9ddfp1aYu?!)hxSnC9Nti6Ls|)MuzCZP&0lDw4CioKzcAMDA)k2jag&qjRN3~! z+c%Wq3)`H*+1hBIWSg>C4R2)5@$|JAri<)o!v_CY~A6t@N`bCk- ze6D06;t4+^h@JW24{?J(W$hS*pTLEGLXHj4@@)a|Ar$=WUBTHZt4YY z&IuF=x6P3#g6)|X5;qG-j!h9jJ1<3fU>z@dsVm)b>~I}dfCGtU@ zV?Zr>?1U;+RQ?<{0PqQbbGNff7}hg8|kmW{nLFRFK)wS4-W*t(l8&2TN3Hz=6!N@tw(4k3T%=wIUHBt=Pi{*Kknc$%K50pT?9;G@4^IB#o!E z4;1)>Nri~N<|7Xt$s}cIuu7>)6DxGSlMJQvghM>2f;YIPp}55^1Nyw}r^7b-X&B*8 z@gbdBSwVxDb)_$cS$HrQ%9$09pe;a)-#Hy*?0(WN8Bog3zPu})hK}Euy2bdaQ&H)m zeENy-z^u@LeS8}5SJh-({%k@ze^lInzC=uawjM2Xa0b0QH##>t9&=7eeac62UIZO~ zf$gt+>5_5I5ybR6${OCN{d)UnL(=xKQZ25s)9sh#J>C?|kl*t@4ZVCvGobuh{M5ce zK8;@mW$y^#RJ&JPaGJjCKTH2!cj7U>F%*w+EK1MUEi>?6@;&XpnfY!G8^LJ&<#B=Z zm`5cB?Prso-+y|TwjX{DGun^RF@Bcy_|?Bp;hk#7f-<0Qzl1T~G~f&SV>s?+Lh&2N z1&w~xPjd{bnDhX~dH8WS^6^GhT(Hikx{@EqB`E3y68r+_YTnDI&L~sf(!|_9)J8Pne~}YXa+X?#G!Jwd`inDhR#7_(CVn140L#7{KDzm&lnBg zcgYIq&Q|M`ylHyNv=L*x1h;+Hmm8WHC%%)0N{r)?U%1dFCb>oh`uTGf%o=|)VW5A~ z&}Mzy^J#d4`7>_-AoMzA9vspcQsvY){h%_Tp}G0nmqrqR*OCM1^~m|XpXVFhfWCm* zo>fl|+~K4#6Iz{l z0E6-8{#NA`k6?KG6Ha&hdLfML7kf*eAV8~T(TP7dQ}e?JZn*nHm;-BD~jei++N9w`1MwjO>Ho14$#^5wO- zdGo4Y8-L;ad7WP!cnSN-XLsW1(`U*@tgI}@g9nfNRQ=&(KeqSw z#OeyK|CgBKN$^zoqel;NV|h*G;;_`yOq5tW}zm7Y1?#J)`?(g{9`%j*3#Iwg6{^Y^J;)-AS&zk|Z zH+SNm}H;-dazTvG1`v*q~T!>v3-s5;A+iA{K`Qfb}z7scZ-Hc!U@`L!*`@d4t9LD+8 zwU}R@i{JnKpT;L&d>Oy~^wW6!A_jcmE+KmR~f-mjLn-uugsV}i`&MjkzbJL%+`m)ZweR1(fw(s#4 zhkQ!_^^1$k*~9(%Rtch$>A(GWerbse9{0+3^GL;NK%npimFi0b&_hyqU1iZ{RQ&r5 zx&+Uc-NDTzEiOG8oYJqL@S8mHvdinTDjGqqb-IkxIp2){ohtMMEf%=E>4Plt6(i%N zUYmF8)Z6M5DU(WRVe710 zta9YqH-|i}p0A`gu9_@w;@~_`zLXSr)2?igGjFo7j~(84#w__aTRin(1N{Jhs2-Vz z;>+6ysPh{Jl<=CK*t}DZ^i$UPh6ns&{ML?2`_l>Hou2}jh=%sAb`~@DBR-*!{etrg z^U`15nBrdVI(!8mr<c#BSS1A6H?HSEC4$7ARcGm{Mu4L-ijv;u8g;#7P;+4>>%6Z@XzjeBwTWE_see z@LLvycv?I^!mZ>1oq%DHCxR zoQ5JB0FNvkn1mWtah=5T&4pY{CXLK&G$|)1TsGGw2ts#yIhR_TB0SPi@|%2x<5p{4 zsDY<64F{xpNX1qPkOy6I%7qG+sAtl$8aOoAyxpgAp{J}lfA8@eKBSC0I@BrXeB1y4 zzzc3EIm?jJU335y@dh%K^QHm{#k&RSQtjktT$dLyv@MYzrO{ zZfFAapq4D+#z~{h&<11DIAQ*+^d&&J_Q$|>+fUgr0M$SATlJ+{H~tD*eHQ@5PU^>W zqQMu4?CMludyDbQ0`i;g_IqnT13VRpZ}rGiF!Wy+&W4=yfHq&W_Ul5mpMh~GB->Sd ztFFP{T0Z>|n|u57U^3(*9QTvtY5cCPfU+S}eG5kQPm3!QjOk^w9Ft?BhjD(W_f$Be z{YobGPw{$z|4zy6V0!#@VbFdib?D`L{3+S-HH{Pcm;O_8%<}*|1&%KsL*;{3?tnk^ zFIk97nT3-u-LKJeSWn!7iw5})zz3jByI&5Vn+|yl7|)+WStX-_z&K7g_*hcD9Rb`6 zo3u$d;e>}zJO}M(hbfsq4WOn@Q44JR(`4yKf;bb6z}*AV7RrP#?vr@xK|#RZ_jlUl}y(+MTZi$UbHynb33jl;GvOr3S| z`~e|Fje$b<-FC_iN~r(m9P<(|I{rBrDh7JFox3PAxWaXrMs6#c0bLEgAh6by?cT6y zuaC>NC|*EODI_t=FUNf!E!=EBAA=QnDQG(I3b)dg2U^<2W!q^;GY53tT+Yd;iB(M* z{xh-ykd23F7pLN-yuIA#sk6%BD<2N#6`Q#HHu-Xpx zt^#)~(^InzeFKi>Q;#Lm3@-O^pd)sU<$@L9UudaI?}L2%268g*yZ*a;;(=pYmyuvC zmmm#9-s?UMU=i}72g;=Wnm?8G8#Siw zEyf}@11yP;)Z{{fzf-4bpZobK51o+j9mc`lfxiQPvb!JK8ym54|86|^?9+Jo`Iqta zCm+QZAAb<{zxXKbfAy)_=rHyV58^;fA3c5=JKH<4wY?jAqMw)VRu@)cVPP)T*XCpA zU^|{YdlJvKH++HM;^hmmKiRhqPo6zfd@lAx&u0I^@=9D-&o>Y7DS#&rAG&_-@F*q+ zd!BE2lYmHLXLBp|_IGrScj71e%k!&i>v7@wy2kWjy!W$T$E$B%k878%#P;rH?26~o z@}lqh=RA=0(}whV{PbCD?(W3G>OwqtvK3FC>;zv&y?yIu{Pd6iB<8MMjr|9Aar;~G$;Y3?U;X)C_zeT^zWr_-AMzy3 zBtHA}%eZ&%PRuPX$Nu(SY|CG(%j>bUvf>MH`;()%|KL$Pdb;6F`8E0Qdq4bcyz}O_ z<7YqndA#@Dd+}6tw_drWVR9~h_j^B%&%XLHe)fy^;?l*7@!D&z#oFqcY~F}ZKKVG_ z`^7Kg-~ZX4#YeyXG`{@mv-tX(uj0{@r+(wYfppy3+KI0|{~{(kd$GPsd#(C&49qR_ zd?r9X-QV9+yXja$|65pef33A@J}Ji2B5e4pzv)js&{H@U>$sU>aY_Bz7XWo!^cs^E zR|^w6$$SP5W7W-`DEZ@j#*B#uu4}dCqdLYANyeEUMu1rhj3K9!j=26ZCwwRMa|C_i z6);Im(|5r|gM0_z1K3Px*;&xsVh+A8Gp)bZpWHxUJ%)@LU-Pd3BMQL2Ra?EQGM1Wg+ zReSl#mbe9hqn0*;36t1p#AnGr>-^WDXcRkH{+s1T@PV8klBUH;oUxOG@@i+%5A98x zGM6xw=|TE4{v3w7v#$5KSg)6Z{UQFDNC3^%&`}`!u)%AQ({fz|zQF4`J9!YyJKNDl zHJ%2dF)l@|x*@bM2K?TV9Sm$Hv@w|H@`bF}Nye^FT1eT|me2u|5#RwoVYo9=$Im!4 zfT7j{E-^oN^f29Ds8e1p@_Jc153fBq&Gi@i2N=Y#net!PcnW|Js{a|XUm^c=p9OOK zxFo#vUPJyd~_GWkL%+_c~4AND)GpgestElg?pyKN%ha5_L= zDh9!L{%LXcg#@hZ>8m`?S*F3~Yt^@Dx0&-NWz*MR(k`V9@U4(wUmKwh)n>EXt$}ob z*7{fuJn0jyUzinTLmuP{moNk?qIiI>WL>u?=yhVzkG+KEN6oe8j(LDoWu8ZlwB|jI z#T7mcwkNzjEu(nFzjQq2JV$79dftR7A?*F={D2DD!rxg}9h-s7kc5UhK{sWf_uSMmU# zbO!MJs(<-$a;Wt?ebskoT_=)!AQvmf8yT6Gq+bskA)Ci_Q271Uv zY_cmUeuV-WjqMhwK{H?6c@Nq}-NIUHu#r9wE z77*dvoWsRW?VE3PK#r&!#9J9uLDAry+11+C?xMfWPUDC0>9;E zeBm|Q$x9dWMmQAsSbw=!OBn(ERK|RveC|1uZeX8jq^O=?-Ey)QuIQC!E;IP1Bl$!u z4EqE~YC zMXx3k@T5wDd@Hlh^}}RsXH$lZ4h4SFLXE*208FYsE>5sOpCd*twsNxqezLDLc?sEX zUT`OeS%yxB265P+4BQ#K1vE;hC(M>_i%g?0PC^0|( zWju>)_)XnBK`O{J@*0Dr%^z3TW$q*5?sik&>0wCzqzyU{`UHmjsQ@AYIz`Cf&pIe} z9u(S>{#Rx7Cvdw_4qgTRQ~)2#r!etOg-+(II%<@h3i;=ZQze)(<;5@S3Z^oG^p6#8 zlTxRjp~X}=nK&fJoMuChnZK?7)7R0j#ba*Oe%X76ka$&}I6d^`tzZO{XZ=`i#yx$7 z=93Q2VBBCx+e%uT(AQ_$em69~cJ)O^V+>)MzT&PON09SZ@gm<6f}|hH>Y*Q3$0TRR;Pj9e0RA>G&8G*gI1E+)g7)}L`u~tm7kYXYIewM6ikLs?+8mES zV@9QX>JR{cL-}w%oB<`KYfAqN+I29#ETj>18+j_HjwHwUo(=s4eCGtJA>C-;TZa=M zOGqeH!TCl6ov&x`{RmFrF|2^L< zufTlaQr8JPQ(4$U5p*y}t33M*V$hee3RywYcR7#dA-|Swig%`+?aB&e=%cc~aC+PX z@`JLD*J=D}zvO)u-yWZI`cyIIS}*9p8RGqBQ1X*sYcH2M(c;8QTr!Do&=@@3Ri+ET zbkJHt*e)Zg^1AxsJbbfIaEh_2kk{5(!etK7DNobS?h%NNJa(BT!JqB`i4yAZrIa@f z<&-%hrm#;}KA|sjU_DZEQLCSh6R1u;X;;q^`|)YRX(OWeQ{v!m-pY^%fYaFvU6v)J zz0+j^&lj{cWrXIJmkEO=QO;#t=t&s0R~v_2T8`#ih;tWTSo2AA#NokVOpf+rvVWjM z(TVc=aWvVFg%t@J8yq0-udpg;ZKesOWmPsu-f{8SUoajY)$ zD#detVjf!-msZ?NoY?Q}?Aumg|8U}`URE!x#q!Ge*nav*wjRdX+Nx>jmy@I8VB49C z5qxrh3oN_)d-3GyW}I7G^6T}F7uVv|S8m21|MaJE`^P_ylikC(_u2dL?BO@&dFSh| z|gui{r9ydS$e z+up2ywzC=EdH36K=bLZhSMUEij*j=^$G`KFm|IzimG$+wd-s0)n}7EoWBb`8F5g&+ zZ@vC{y!EZu;`Z&=;?E;ruyX zAIxhX)iJ8Os-d`0N1w=I<>@$TNA#gQFT|+t^1)OAqs;Rs#dSWd<6n0@gL%>uC3USM z7%2w`JTuiw$TL5AUdg-zN%&GIK6NgaWh4{Fg-k204&0s|EGnE8RD!s41IUBd%9`Ao zf4g!)aVYRHmqJi#ZX`byq!((n^SP!~)349x&k%+>0waX-)hJ{{+=qnXSNU-~0{fD9 z6o63~ZSLy=^aHnN)ybEg{1!KV`i9V!FN|q`$q=ULdwVJ#!rVEVO@*8xm0&eDGd0d&TKcMk4F0m7> zjKIHs)b1YVnhV{xRW}R(zg@oRF@@u}m38*@c2*eDDW#Z_vUwcz>nQU>J{=GI!k9E( zjfe76>FFr(8=r*2Pw4x(BPALi0ZEUMA#{kf?(OjldRc(d%6I*gk|AGi;K;-oaY4x$ z=x2Uq+i5)j>W1}Q-erQ$9W8&R{DdT>jsVXwUbbtp$L7M%a@x)@PH_KDefstUDp9&u z06K+RUB)CcUw-KQk@bnw9a#B=Msf|MR|_t8KTEu!+#&ekLdVs6B0fJ5P+61;N^+hF(Q*Q}C9c~aflx`Hk zAAL;^A8+_7tz~X*kSji9QylBjKJo28GEH~iuq_-lMcaVJ65*(t&1AW(>4(*9SW>4?#T*z=KOLCv*E{A5~mX!f${iyrQOl+7#M{O9$E{;1EloeN}wAIQK4 zZm=;1a#e0$^m-IzKchZlfY^8h$+EABJ9_D9BJrdeJLE^7ZwVNS-0TBheo`RqK^NbU zLOc2fDsb7zml!tnB_rQlAx!wux71kFaq5<1fU%uz?YCCIL^jZOofNHA(+|OYLppdK zL0RdLwviUDeMA}>G(L@sgyY_RSXM+J5 zXBsycq-#2rMrPRUD3LVQ-3_mfhfR{cPafeDj?Lml8*5I_rd zTQn@L%*WyqE<6_t^Gkm9<2g+xb4y&9SaN!4WmRbnQl(FX<2G0<|4K2wTVXv@R~lDl zWSygwm3Aw8B-^bl`QFEcc09*`BcFAxFZu#*K3!PQ_;UakP~Q?6cYc>224s0>3!wJ1 zqRl@IDG$IYx5^SS?TO8heB6G-qw=!%Opu=T*NgB9I=gzGB~-rnjO3>wr{y4jP(HM@ zpgfpR^drpT+v(T)-5?vQWQtOB9#XCSPSMkzly!a`f$TE9MT5bL31x=bw@OzeW3{KVF1aP2gmC6FX!vu8gQ1J{!rl8v4MR*BzwP0#-0Gw;>H}=~|?=f`rO^;dAM52_O zv3(hH+{!t>m!JOTLRl+n(90A|wfhipfHMVYw~y&l$CU=1eg(t)kY~{j{X}T^13T-O zI26|%XT7L!s>Lq?)*~XW{!%cvZ6B6Cc~b@XUg(tH1n7{*-k#1dY;)`&f2!SzPO<9n zW#6ZPlB@ifUqR!GUjEKoIYltkb!xs6zpNXU2}(w%Z;ii!yeTN18lUFR8n?E^knzZM zzYz@8uakjkP}!OD=g7`eP>RjQD?j!=tbDU;e0=zgq^KsI3>uoLwp`ZNFuNj9>^aZE&FTj_1f{BbqR|kx_Tz~fb zn9o$#>315;r~0eukbN3|`k2)88~G*QoIt7}58AQlr$F`e@#-JX({mb%S&whI<(E#c z7OTyh1Oc>MU}$|nD02&o@#OJl-2diDY;SIP6MuVqJ0=G^?gNZR`VHrG1fDow5Y779 z1-Hf0(n?&ucp=WOUKAd$WS@)k%CD|1#=@n`5z8wPdy_bsOk(TVM%?-8PJHswhw;~c z_Lni)-HX-vxwvxSVys_?c=y|H#@^;$Z13;I?)IL)v+r;3Pmbf-jhnH)zHS}Pv4Op^ z6bDBq@!IQe#)Bu1V~fuZY;VMc^^0-k$`#S?*pBtJi*a~(7!SVsIzIaF!`Kk+!t#<# zpTvzDuf*$by&2#8@ps~zFTad`^Z)v{@#7!=I4)ec9M7I^`^^GdJG`x6(#~m*l&=Cdd9@a`9J=ia^e{PVBl zm!Cg~pZxfD;=lWE{z1I*%5^z@PQJLSbIzrB?ajAhWobFKwzlHSuRf2z{+pjk=L6Y$ z$!)r&@7iJdJyIAIJRCx%h+M`yI9Zxwv@oa@>D-KmOv+e;!|ac@S^CxfVbE;k)sJAG{lv zF0R<_x%rdWc(xS}A3pS6`qt*Yb;AFgQ?IU__nR9QbuN7Q^5uBt_4T-M{S{v$*xA`~ zZ~N@C&*JO*_tiHp#Lm`kymtFm{L$b4J@>(b1D+~7^k)v%)JALT*J5RLJyus&-M=_4 zB8#!Zrzm)ei&whxM33K8p}s*sCUA4c$&q;ZhW&OuLh~WM%L|nr2HEEea3Vw$1`m@gCyN%rubj8JNHeP2(n}qtgNOS`` z)WS+q+47w~R93pog7Ra}`1#QJeXMWgB^Nz-Qv$ffm$E56&`&g-e2zauo~`~AG`mLY zH&2~z>&9=`j*>ex&my~HX7QUo{+;dzXF;PM;C07@22=f3_^uxxFKYeL=`+&rOendD zSF-zlul!(r0KRd$cC4hzjnuj_2!#&U3WK{bgRBH?hnAQ-D>R2=$9QYL*sLX zpK1H){y3s*P-;z~8d^c)->pqM#Ky1bIUWnApwqL)bJCr)eg(QdKJ*!BaGM^} z=eQqs`q%ke=Z_b_DZdnNHjLoe%QuYVPha~7djBTt#4D52CA0cg9!IkLIM4s-1LqEO zDzP04YkRT0M*5Matc{qT;|cA3%@yn#vk}+&A=FAA9p?FmQd#GZKLJ|DalWBod@0E} zg=NVWZs_Hk@#v8JGDa`>%q_4vm_9cDHh-p!+-ytOjw78X0IXBJ@s3_{#G%&zmM33= z5ATIJ&SmDc{!AT~dE3{#{3LqneVrJGvT(G|wf@%{l>OiAzeAl;=fi`vi>K8mI_Dwg z$$C|*JaN&17(CMm2U$FA?^lQ$SkvNfHph9=S@D5zc~al$qu955%ctkmehOL>yHrJs z@*8h*z~9xz0R8723Q4NGYkI!Ff3PPUt*fy){n+r;Ryy-bIXYKky$$$!gPP5@sZFS- zMZ{A6q$1mm{-D-ZeQQT_fmWsPI@UT28;?%5jB_9x(SL4E=Kw;-J14w7KyrNZIC?06 z!=}GTJde8#U=3#_;Ds2}^%xKX>S<%J*jwdVhgG3EBSC8)HvHXJuwA@pZuec4$#CWx zJ`ce44a<=Z@HQ=iKxeL%eJ=CHG1?LOV-1NB+XD*@w(4tvm(xgFh)>Lzf;7wkk1&bd%OQrczI zOIGnnxy`?ln>I2xsMl&NW?x5-)QRj+FX;5+H;MGVsktl#4v)7b8}PN(W|vZbaqf%% z`@j4TO8g;A@~I4*m5zrYkrkGK^d)yfCQtY?FwgM{2{E)yD3$nDz!O+2Z>&qbfKVQ6rh2*jrckdlY>p<$G+NgFUti=_d5Q;>MrWyiM-zSFr~=n(Q!d(HM_lhY8v%vpEl3)`4JQ<-`}9xYK88X({Fx`TirXKwOJ+iwm)^ zv@D-4$UYI!nU@sP*!G`>&beiKigauH&dzM425P^AvWs3tx;Fy!x&eT4=L`K5G=AL) zX!7wZWDOa2cby1QonDW+0bsb%kDp2|<^ASB$?Nr}>f~I10hT_;!paM>X0IohbjUQc zNwxRO3!t;3wa~2fe7vTxRIu-uN-WS{e0O{C-G_mB4?HkGh z8r~`UGVbT+CeLqVP@ViUlshd`V4R^Xg*?ESf>uBFck$@-D;VZy@ofFT{NT~>%_ooj z-H>lEZBOYwmU|W?{;7E-cvT+I_=#^G|62bNM%N{$fI@hc9 z>!jldiwjXeo-(DW3O_0s* z5Ys;S|IZr$L__}6m^cl-V1X>*))HXc?+xN-2?IHGQ3)~iCNJ87u|whPjr)Y0H_)^7 z^R5q-&Kb|{OF-#SFx%GhV~@`|leVnv%ijQi&OLMt6iCm&Z|G@S!ymOtZL}3~o(PU* zjbPZeGxVGd#jnS==q8Ud$`o1pmEX`w{%U^YLYU72=NElpiKkRZFY8=~c&^>ie|&P_ zi)FmJ)7^_ZrPW_fme;d?%q{x-GYlZG@Km2F^MZEFOcjN!|fBCx5EPLlU_mq#h zQ26nOU&Vd#ymt9YoWFQk_8rC_iQj{VkK>o`eHfp8_C;Jge<7Z3Jd4-gc-1#RJlT30 zE30$y_S--=ikI%{mpwZISBDujz9eUpT_Ommwgcw+MT^!wcWn-+|=no zUWd4H;c~36Ux?N96*nXoAvs@u_15h?=bzg!-QLb|EG}?CbyfQ9#1~)Ri{-WDxFo#} z_9iho-ihzN`%c_`^=4eYdNtP8m*T1Xd~~>^eyjc`f3B+SmX|pm<($T*zvoN!EBdTv zB$XezO-N4^>JTV>BeH;?}vlAl_;&wIU0Y3?>PhjB?nsOYy6GJZ8tBl1_d|g z_u7Cr6p48jAK7d6J^geHE__<10rXkMJnfRKtRIJBZc|iDT&dMaC!X zdRnLIsdgMe=hL)vjAvCE&XoPaci$ZMyT8L50H)=Qfa42sYe#T%tW5Jke%3vQm!bOY zi}Y;J**(TDIjL8tPuC7-K=J!q-T=^_KhS3t)#9t5^^YEuH;H2EihjlhB%HcyI>H^h z((+2*PLF9w8gSY?A$)6i$}h}U)QFFG^?hKPFDTR9qxifCuOJI+(B{OQ1>HENZ{X9;cVm4Uedo*5KJ9n`ou8`D6@Jghww2rw^fz>C z{Eh0nYkk9ZwLHojUD?sbU#;7R^<96N1;xAgya;CaCH-;O?9UCsb4R5|0%hrTaEcAr zHi}O#pAFEwrl0AAaohHG`R&Jkr93{=`gvt17FTy;ab;Jlmj}{xM>q!>Bg`Fa@E;hr z=)FgEvXmR%M{HcH{M@0}g_zX!Q|ZbWb3OLW>!`_|Q9F07V{M0{;~&l8y$4kxhwtNa zyyOXZ_=4uAadrITMo|2EsPm4)Nv>OYi z#y*di;-b9#sJcULUXT(xy^-#`iA>?(M7$;4ywjfa;gS4BS~l@{6Y+H!eq!yc{NzZB ze(A$LR=yrTpO`PqI^r(=yyXx*OjFJm(f<#*H5 z#^Q_3@E*)3_@m}x06T3n{j2pA^2({Q@iD$+!pri~FC7oD2ag-5oNqKZ1bo`e>u7cS z9FO?8KX#Zm(CL}w_{sf%^`7};yITi(nDA?l7t%S_pXxL8Cm^4cSGrwSw2t%zr6pd_ z>jNLw-{i5Ywo3si zBp0+y8eoUnQ7D zvLWzl3CesY5goI21glArIs?3!EQt9I<2iLEGlP{P({d*foxQaQ3~V2|@kfyrP)yPT zL*>Y$FcFdmx~lWJ$w({eE+|SU%>&p^E!_inO)uAb^4NO$TK0SupTR0%L@98C0M& z{<5J1z8$@wOH)_$gZh~OUqu|Ctaqd9^dCT}X1r{Jf#X{s0#W@P+~VPC{S44fL95HA ztMOd)*^deus@S*koP|_Sb}*TbDTeV`Ks}+1eLlVyYnjTZ{MPxUpwiFhm-PxoKW>9l zFyh zt9qSVL5)`~9JmNQhymvCWSjFjc*39MTbC8`uzm*ab3ydI0?&cMX>r$nb;tCmUU8Y; z`y%7nP;_TNXX7}Aj!^BWeu~SS9z)?y<7b$qT-5uleKz^OT)lS>$u_t2vc^=pOpW6G-b>iBmT z)@Fm(QnsD322Jv>@ix>mPdW;x_?O=rOpPI6oG*YbA!QZc0;R>Vt0EhCI|bmw73}ai{d5Pd8K(3tCz0C>e@=&zyDR-yZ1OAJbW03 zV#fu{n>VkyZ(X}`DOQ%2VrP5XudU=YjOTcbpyW>u4kh`MO5?q)oB z{5Zb&{OdS4S&DDJ{d)XofAYI(o4MH8;q3Kh7n_cqlA z_oVOVvA@4BzwX82(tKRIdL^!3yDD8|yZpVdAU^WNm5Y}hKX`CI?%uf@U)}jUw&fRY zI(YhYBOX6|qV~KO8}i@I_Ky5?J}zFp9FI4*{6N{6>*jJ-- zbzpOAOY(PPZ-3$oiS$>l6D;^b4t<@z&GGcuPb%^G2QFpIw+lT9^lj16kGpX~T=f8q z2y_ww8zzJ5{ybfohLZJ#G>XUP574-(l}T1nP~=kMSCFzpdJp1Z?9x8nl~GT?SPI}l zm;uK3&nQ;liYYv}UIMxK0jk4KlyTk3KFhp*722P4aK0wYv!JUk9dVSIi$}_IJ65_U zNN|>V1)EHP5_L>0JrsabY4meJD=oV2xIh-1)UT5D+R+LfkOas2XOW@N_jZoJj7=y= z`!mzz5=vIJi9^x&r^S^0cI%hiC{U_v_W=y#ch{vlwtf3e%c}ISY!+CkSoH1pd>Hd3 zCXDGy&lm7>gqC+3S&0@>-ZTtpXQhW_rRTV;^k`7+(dk$7UQcH? zJ5YgLzzkl1_~kxF^?@F;y&B(U?^Jub9~CH#ypn6#6gTpolX9n4qQ?63N?i&<{XTD zD)AKW?p58=;h6D(IQJI?WLgI$FyDMyj4XVOPef~X7LFd1qIDUCE^Fr&I{(p9 zJ0{$0AiTpvHWaB(p5EXUIp6uGkapm;`n)2aC&@YIK@({6$#SJt!lt^*WCQ0tyx9Oo zC$V68oO)r2kIO3exAT!FJYv5=9DfVPHw=(Ex1f5Swg-oedu~w3^D1xRBUfu{XmTS} ztQDZ+Mg?q{)4c6{9%M@nTG$WB@D1hoM{d^Q+~tzzw};8spr_8bB5xd+9I3C_o~C!x z3*Fgt&CeZ_EcAgGKV&<@%jd2Q(9D?fvt6UU2x(#{jq?PO=GdG3@Z)9%+TWOBS^++c zC*!CD=I92g`Re?`xM-O;Amx4|Wb`Ep2RcgMe5ktNH>mNEkg}=A=EN&r>L=(VKUt~= ziu@#`yfGv3{22z7)rPzKvH1Hu3QBZC2K)#%qm8=;=v2O^Ja1UYAQJuaO@9bVSH-fM zeh4i(XV=F&*sL;a$2Zc)pJ%Y1X=8p~BK1wX1a1T5lh5GIb!4cYa-)3VTLE$bn4a!1%&cP+mBF zhegp=4Br;2%mOme;hIhj4{zT!rx;OuTNoTHwqD#ZB{_2o+>NFp^718m&&~5%fB9MI ze2t9|#Fy_p!~*_C2g>-DJ@B(`B$^(I(k|lumx(ZORLj8DksYtpPJ2fEpC@>igEjn< zJ!Y9Q3`MUP41Up#g1QmG#SJ`BO#R`X#LG0$==`g{PLCGPu}Gj^dE%1O(GjrsG}ve8 zAXM`s-*n9ZIe;!dj^RH9)D(!+co>=Tlw{GdO{Q&k__kCmw4Qlc5QB|(ez<51;; zU{HU;b~RkuuPT%35A|mGVhW1U00%yfU{(QUE;Wi{+t1<+8B~wzUkXa@^YM%SILkdftR)N7sI}fyE+u$SZXr6ja{9^TiGuYtsfZU z-CCj#xh^x}bX+o87F6R)Pg4wdbi5j5I@iM_XU9E$L6v7+){~qrPpEX~m*y{}GCx=C zH|j^H`c;J{i-j__6C=WYtI8g~^57J`e^4Vcj(h&So_*OcBpIQ6@8ck4W;wv6oQTGmp6Cl952rm%vaFInHa`%I^kh`&)w==7-Qp?*IaT!%(wau4h~{p;XvhkyF0PJ zvmM(T&tiA;S?q6a#Gdl|+q<#1y%RedyRoyk6O+9?oeb}b?_n%1E%=iFoL_OW%S9oc z5aKEQ$>cz;)A`}jqBoe|eD{r5Sep0d`27b@_r)o` z*%{LF;^j-$Z~tIhzBtaC4d&3!{V%dj2V^>}{Y_fm* z-FM@$Z29E<4`OX?J&t{$OMwe);>QyWym}r#Ki=Godk-I|n=HqzORMqS@7|8f^Gor; zd%u()9?35|@#vdJasS?fc(SpnbH$bT{U5v?-+KG4c>4HB9LNShw|0F{P8e8{OHqISy_!&u3vZk?bqIn^Or8gr=NZv zKmXZ#vAMl3e=qqd{fXL;8yl7u7USm4SK^qbn-*5%gZDph4bfQZG9!KUBBv2J)pzxj@ndxjn{>8UQGM=dzddU@vkmHm-bl}&c^5oT#+J?T)nu&`hoZII>$Z=1<6CHWR z2!9M0(pj8k%*FcSzXQ^hZ|QD4I<#@^2>_6Y&rmNfBMq2}X^ zO{acEatf-0sBfIEoCPEOhx9K6C1VD@^{mjhTiVgW92@w9Jb#Men=#}{V-CkRUlVh& zp`i%6L=RN9kkPDoq)(H3s(eP!kFX1`_?;0tyfNQtI*hCGf?<7y9+*RB!ArIu{m8x> zIu)5sE`PqnNnap}A#}}2RN3=z6gq$OdOklC-jLr= zMumpg>2GzXz~$X>jPQJ3rm??)@_YFMyk4InWL*pV5l_h_^t6sUzYOaoZT_$=`>TFv zrz$M3k1Gv$e}ODzq9d+uSGXNb*B>)>k?Trs2EQ4|m8FN$si4CUcRD0L>toRQoBKHh zRZn^P+x}ud#BSG`cz!RQ)Omx} zgC|~xAq2jeq+TmCpPTr>PCuNO>m7gM2-fV&MBY?TdFDAK%!8-Zz2`1P*~iItQJs*% zJ>upAts7Yr`n&n)bif*(&2vr_lHQw)qV@(R)aHpS4SXYcXIy0DQ?S(YEk9-U_76Yd245Dq7YqD^`>=P8>= z5H}WpZyal&y!2*{KjAzJXGs=-`>Z80Bnugw=N+l7{00H>$W8y;fc54-=Z~VNKlsK3 zIgj%u_Q|}7sy6e6E3tj$j^pRb@(TCWLt;+l65Vs#RXv7Axnfd(F$4>C6RI%qj|MOM@on zqTS}^X)O4FugDIPrgPj_(b}@{AJKG(!og!*}-ZRpDgpnPMl-1HB(mz1x4XQsT$GF5I)XSWIm=wZc<{X{~DbQM??#Jvm?%FkOICL09*wQ zT5iokhP1_M6rcE#w78W)!d-6YLQ((ve(I#-AP;zQY`|DbopeA) zXUi{q;tV_hFHVCxAloZLL8fbgJB>4-EuA`fw6}2NuUs6p;6Kv%V;Iwq=;z=h9x`%~ zKlq!{w*g|rgE-|YW?|xX%lbaRgQ(&bMyM`S3g7ZazE4l<;0s4)P<@yQgylpsH9!A~ zdY1l%#G3M{0iLR}EmcR{@M&=6bACw~gtU)PK7M}ak#Vj0~Nv>C-N_nsmblb!I5u!}RRXF`5OE)#E%55~uoaKG8AId!)Z4D?wWS z4%EA{g@R%Fc>sOI3K~n({8}K=PugB%i$20Jg}A~1r%TTc`jI>Z=5)6sWu?yx+Rrek zSEvIx!Sim>48wpv^&lok9bs5rx+czaSsxOJIYvnD)+dK2rHyq1bjk7z_^|PV2mQeO zo)^Gr^9L}pZ(6?$nXfvqXzNiMo|0cs`B~)yU@j|NhG7biGDWWA_&03p*^qiqhlDQC zfP9rz`gx&Xlpou1CN6RMaXR#Hw%%PvkoNl(X!IWi6(01Do5H~&+M_u#NDVSB3P`YJ?0G}2*Nc%IC{xd?Y7s=*NgJGX4d|()t z%&MRES1(sJEgnLa4Rsa_=Fc2Y8HRdL4kZ7<*q@To%7$~uENF9$@h!_q!ALL4I{y-< z4xT4*pXxK1IOUxobf|CQ!LLI3eHw3=A41<3Dm2<=H@t~OnR$b7ve5NY{l8m(oCQ^n zTnwj7KdnuE)Ah@g?5-S88eW~x2he16{+J4bd1uh}Wj`V7C~J5v_Ilp=grToZrr@<@ zo~U?JmWx}*!D|)8-+|+2u0y#YfQv(H#P2H{a$~^mZcO&J<8XgFjt+J;U-632{aBcr z`1_L!OHypU@epTDqx@Df*|n(iRyK*}k;4YA@)s^&ikq*zDx7n%wYd?WfAVQe4vype z<%_Yhx)M(wK8|6%_{=(haKZqlpyIsEaTCA;Ijjuoa&_0=4Sn`_%_9hc$&&4utK#(uC zcJ|`#{YP>4(MBw-U5eMQUY7sn;_2gO@#QBUN$z|+ee^g!|NM*i@{6zI(c?$)+Razv zAN}K>#=-7E{QCV5Vp+bueEC8wtGe))Xde&f}+`{mbh_nUjMwz`&|PB=P_ z?d_fT@~ivtOt#>kwF?*GihT9T?N{TQZyv-ifBCDpd*{AdZW1eN>*^14u`iq0w7-7s zMqImfJ(d=i;=NzKAMbtiO>AvM+_<$C-~ayGv9`1rPoF)NpB}r7muw@)o)ceC<+PnQ zB`nMTe2!pyhqpPb%9Y2lzOo$GZ(g&EOV=*PwQHB+;`$}ozZ_SuTy^~wmETbP8*jW3 zZ@hXd)>c+x?{E^2pKZye2XW`Yv$%L=HC})H)mU0tQ#ebSP&p6~~ zBzU#xqHikU27TV3uq0eAFmd6iF82E3jK>w-n?9cXIR{KXCR&wnDNlS-KDQs>k}Ak_ zU*}j^pW9L-zvOvt%TtH^_Ln-G5gZNI{J$x{pkny7`J>9md4uwxQZ9!Kk5e@#og#T@ zIOF`mFv$C#G;J^_?_ijI9uyu53s8K>u7aWARBRgu^R1087^a^C=KovJcgmHbXPo~T z6qme?PWpGWRi;!so^{8778|RzaRTj6K2dm}r(ul(Fo;dB;Bhnz1;g|iV3gM1=m#E{ z&p-c7ATIvc-{bZ6X4=q60Z!%?YxjXWE#=Tr|r;VKTTcfBu#Lij@wLTEG6-dsdB#|3mTg0=L< z_R;aY&Vy9OjR0)$W1nzYn?gbv>uHxMaAUD=N>Cp-lrC)Q%bcudQyjmt`R~ooBj0$- zRu$jF_f!68Ek4Zkq6-N`v!T`@#;21mcgn(|g8-9vij<28KKv`8Pz4c!6N0^cjyLQlj$2EC{x;VjAaR5Jg~ub3M+YmQ(2|a zU$$YYk}j+9UH03DU72{J96EbGOJ-ePp^AXdVCb}dq!X!Hg8&(y;|74=`%@Wk+ZTIa z#vq|iOdX3JT+;msE%nXWfXZNKr8Lm#Y$(gXLa^nVRtfMZurUo>1}_ld<038A(mrBtwouRk%HNjY1`P9r zoAi}jCwVkUSx9a1$F)3w(TTeta1R%OW<&9v z2KxClRKDClw0}Pa+t4Q3OuPObnJRL(@=88aSu~t=M3?uy04&UUIeH6 z{urn~YrK#3&KRE4;xm)B^x;f+p_~y6{F{84bB4H?J}vVMe&g~}`VI&p?0FSfyJdb_|)ed8P_*U6a&L}=(zU}c|1Y^0W&*?d($ICkA(ecD-abXwSsy5Lz@o z{=G=%sJ@dk;yZ?^@i&amlBe=hHZ-8zb;6#7XUbPJ@|4J=1n887IY>$MQPSKj;3xZaF0-)6o6_>!i0{i&N1h8ud;7b7Qrl0_ zE6+DAcXzhq$6kv`F4E&``?d?S1!kcZyv_IZ|?a5 z!L`fR;>qJj@#@W2gOfF0g?}_Th=(fM-<6z`{PzEj`0nlP#^U0FH^Gs=w#LobCnDp* zKH(z@--hS;;oI5;<&CP9}3$CxO#j~f+Vqv;Y4t8q)T7cO3k`(NFOE7z_{m*x2N2fvPQ9zK-L$mHV3aXj1Jk@Im%sifLhZ1=e%WtO;5>FyK6q6!IFHp&{QARB2u{%bdGD1R}oT)h(Cdi#yI z^{RBgbkX+usUyWto;-+6>3XoY7dyNeLh;7N<5*c;k6pC~eTB~upx1@Vm;JVh`}gHb z+5gI|8*$_6wb)TVIG!B(Ee_`|T#7lhAvQ4HcX#(pbELk>xi{m3^YR7sb+4uW(wFK@ zN_1i7G|t--V+t5v6f4wPVR)!)RZ}pXuUy9FK$5xGjJUec!;trENSePrj0DvDK|D2= z6kZ?u_@U$9Obu+*-v-9VgMyI+>6Q}mm)dEGFDt+B8virNnjFyFFBE8;DIX2ttnx9O zCGc-c{~NBqP96UW&eCfPV?AHgel70UpC#udSclzj1s9bUa^xHx^ScY(LUdI<`JPEGuyPWm)=jEWh%^3TMnWr|Hgsrf0PdoBTek zxHO?#8W<8Ex+I;JQ`gyF9%hff{+cm+4*;pICvayg#5-v@g@DrRjc4({hF@OWSmieB#fsk2JOTXb5# zyIg#Ha{!gfn>YHWqAJgQ7pDNkKs&$Xa5fM9&22esGC?m{<8ux+xw$zY)5p>)H@_9c z1HNoB=bD!r2v}3*6Ucc^t|Hb5Dxi$E^^FA7i@!JdkRu*(DBh~ziFWFFa^6q;tB$Ar z;YWIcE^-;)zGqW@!saM^M1&2w=`LLBuX0H}RDIq!z~-rf{eDj6&`>74R5S6oxxl=V zH#Y$QzTrSfixoXW*eK7j{M_EX1A7qkg)JwSgu zN+kz!Hgd4tdI*nA|GcpwWt)eP$ocaEP@tefA@CRR;YP<6n4k4(9xEz5zUSK}PCNzrZivgBEYbn%|&Kx&0~G zjBRj{sbK5CF$?GPuROBhh;dRF-*tdK`g0L#!Ydghz5qQ5evKd_)?nhz3`fUcTO5h2LTQQe7BNsd#F;rq~Rown-!Q~ zBn2L+9CUmxfP%EEDWS9_Dd2-)8b0`%z$C;0=s5Rk)#m<=zj*jtK8@E|ke`A``Yxcv z3_5PKaNPj%27nBevJS-Nr&Mb@^yLp8ro(*KWc!^C1vy|xqI>WH+QvKE(~_SR0L4z~ z=Y)11%Ptd4ZL(*SNEDKPE~#L%?h>f%9crhjuON22RY-cb zi0#V$ayE?hFMVd|PXI?TWa@NzXWxu4LuQV*F;x2j#Lxqs!0AAgKN-`0W7&i=Y(A|6 z_{C4q^{0{j2F0M7ccWqabiN;iAJuFDsw+hh4SnZ{2?SZ&m1&$GIM| z$r8csCmN5hLC~_M!+9~_FEe$nx4YLrkKf25KjcfE;}&J*Khi^-1e2dGRv|o)Rh7kK z$gAoGc^B#DWKq9lPRmc3%s+(8_fU961Auheeg~tIE?j_e#t!i)OyNBbC>KNiPNqqPnN;@|0yy`%}19|l(_Bt1SmHDygfRYD`sIw{sb`m{3LrBz_v5 zZv-Wb^%w|MAMb|ObR!C_jlU^C&QR&0{Qz+z`diPR=m>xdzY4SIa!og@j`pi^#hs4u zEg0%cC>-Lk-&&Zm|9Ox%pu*VWM6{_Vd@L;Mh-pKz9ZUdUeMep!fmc%?L#KlkxM`FSJvq^DJOhO`GTH zIst#G#&m;V@rp?{@$OT;fphG4)re=F_mM3Ed_$RJQUktD#E*m5FtW+d`H5ANLctLdLSc@nSRdM$3fb}QB|T!{Il`QXVP zK1cBQ*@NJ_@r#T3oqkT{7w4D#c>*qyEzU3b1_j!KCp6%<>nnjvaVR}cj+WzSvJ?wT zN>2`@*O8wb;_3gD)r-DpcyMqSk2f~r?l(^)>o_*H590P~x8utBMZXncPBNCI@6ytW z#>`PXeDqCxqrejA2S4~uT)%oPKL70F*xcBRo3Gu7SKoL|_$$(R-fs_W75w z&1?7<7JV^dOMD+adg$-rv$=io^5uBt`YXO@vbnVtzx?Hg@!;`BEJ`LmTad4JcXuW4 z2l4dTGtZUWbnyAz$FeEn!lgCe+`udTxwyrv|H0=hcX#JN?YA5&%gb^7`qj8{<%)E^ z8Mj`4B`%-88n@IQufFkST)J>E?%(@5KKbku+4xYt+=wTSAIJ9geoWZB7Z2W`$7c!` z7MEfozi}?DPN;Y;mQ{DCzObb9*3MRZ{K2QvV>7PZycpLmU5tgfrC3*AT9hxBS6AhW zCHG-?vwk|@6At?a_78oRn@;F#TpXojQNH8t67H-1CZv+88=gD){J-)HGx|0SC@^@*f9bh|GIL;Q|IoYXN{H@;}T^GC~trc=7X)o z6&jB1z$TS-dKG=)0@x1^;!0QD7~nU8TEBE8&_58HpHYQDbw?1gx3PWf# z4h5AjS|aQtK##7KABMus=(j%qb6YdAtNkg1{tU3q3E9s%Mv-eY;^*Ur-&#bz^Z^F3 z>arUReMW$%e9E;oAyi+h^svm}_Ml`$D#2zVw2;z`b-6Yx|f7*{o0^g50Bdq zNV})n=eM|imH^M;7*rAbhx1RDFKFcs*gt?yHZg#%*aRR;k@bgP#k0aoLi3ke(ApyoIy9IuC%x22xf zM3R=%I6rA1dp_9e^EKhy1oUN$h)>S@Y+g$adqwEMzK~=SA4xtN>iCWG5;n)7$&Gi} zr9LAv;c)$fG8HN9O=MV$ult6^DzEhS)507#{0FW0dBa{bD$!EZ>rEAA8y(HXgw6iL zyb3&VFAi0KX8ta^y@C%^B7UV~_BH%+ zVjb~I9P2!<`JYbU&1S`=gXNgBcw)2kG2)&hWLm- zGPtqc{ZM^R)YgOeSYy~}>MU|_J*@F^8aW>8sKP!id{yRsUi2+n#Z|cM6XQejK*=C3 zA7rmgf8vhZuy1wXTDB_SuRr6d9@#cuKpVO*(mwDmOxg*a`tvvu566O%A-FUDumAf$ z_){U?mPDaKl#Nxw3O5(#IF>gQ;$sNK~Qd*7p5kB2hsBBgnTOjX>JO@HlA=1KYU1fo&%P6DUEM2 zbMD-t)IbISF&1ul1u35Zh{ymR{-LH_vnVH`23_I|aD6W)p(pCDW$I0fY$eD@;T66lNW~`@IZC!MN^d(4+>sJwxGkW^vkzjlCl6A z$Zhpm=H#F*foukR*$Pj=nH3`_e>HtOyQlR>R!dFAXUm@rrF#dKr-H7ZJ-_`rXnM*^ zj$al|$=9OKvRo#fo&6LHWfsvW?(oM@cxQw4(0J+ET4SQxny4jT0L@>W9xa~ION{DZ zIM)A6NcyoJV{*X7#9|>%Ue00{)%9Djwozux;J(xs^jV zHHT!q*VaBqQ`&l$Z^wQ}{C7C-#aJMrZ1 z*BV8lIhy!Maz2Y7)uq#3>>ljJ&hEZwmSayg@9gZxwX2t7N$L}L<>dC(uKjr++;1K| zlq=`r=C#Z5qaS@QCeq=Xhu_5c3#+lVx)Mt(E3$tvE?>J6ckbSgzxk`5$HS*jbe_2+ z`RDvJ{Qkj2JohDcPjnaJwO4P&!opJAfA}~)`}E7W^WeVhU5Ja9F302u|L?`?Z@gi9 zpKfl&*7i=^dmx^R5zDK~zUac65%>n^vyG>+aX;4AR^7&z#Q(yD^|*fhN-QqPhl`7` zy1wFmb>sD$;;|eL?>&eQe)X&P^6NXn8y&7(;nn{);`+6larx3U_qDx)1K)_TARdSE zA?MYrYil?t_ISeo_%PO2m*U1NHzbc666WH;!_D~RuYM7?UVA0hmsiyTIqz0GsG%6= zOG`_!zP|1oCYF{LM6E_G0XL z{$y-YM!nMlMP&0w&>h>VpsR4oVRU>w#p5@ESwxmHBLGeB(!cT@6kZ?OUgDA3`$Xf2!-@u{W1loHJS})>G`tsP5(~s zrq3zzDj3IS!C89skjD|NUxdm?$b8esz8SzNzgArSAH$2~rwK`q=saFMzdgG1-_HIX zKg$z*$~=N$z0=s(+0~#!>2V9783uzlkcT{4+GVGC^y|G@buWThMq$5v=(PnfYQNIc z(NJa^-@-3jW~+>?`aM-%P;wh_hc`o4Lm?1@|02AzA@ydCAGF6Aba@R05H9mHbH8b7 z{wH8d@;l9D%)dt{`F)_Yx6AxSfJjPx?&s>lp*<9JYek@9LFca)cjw>3@}hm2Q0>>C z8-K;G0Njjy&QSgB1>^N)VCWvw8JZv6k9&k_yB>yq5_HL-{uDO6&aVTS-v$Qf4=GXS zGpIP`-?dMH^4Qq)$qn;77DAh)l#lhSe8pf2#{Ht@-Bv>Fuk#DvI!lIlvuPaXCjJD# z;*xOZwN^W)z@4K6fuD$GOV~QHiKmqpoBT?nH*<2{bV-|`HAMUnv!9}v1()aQrZ0dh zE*E%_M$C3GVkCp0KX0;CHDyXOoQ>JWF*sVz`rL!SCNp?!z+)ojV+U(&_LkU4UF7Eb z)_y)`5IgK(?~ct%aG`@2H$3|_kFYXVWH(zH!-4y6;D;%tT{NH2I%1fghZ{mhM(zTp4{ zl3^bBg}g9wlOA1EEm}6{x${D3F0&XmzQxCHwm^T&pbupF+&P`MfTse^Gq`cVP7@CP zQO8O-!skq%8w;@Qn0d=``Jo>w(of#VpuBCOZIM?X8rUo42V{}2ACkFQEKrPR{A9ls zQ>+1L8x_u}%tJyy@Jau>z~qd;>G>MMN}`kK)uG{$9XshEfiAfwJDrInUP>|@F*@3( zjuW@W4yqVhY}GIW*$$FVUqOH2bAF5IqBftRPaD#YP6}vifI8X>$bJuw=#?%W-LPza zwQMJxHRMz6e7+(3Nbw@1|B>JNI)3vWc9wfTT6XvWbkyQT3JX;L2OB9y9trHt4yO#Z zAV*pKVM@+(@CGmK%Ig{lwokz_L2zM~SDX8D04~gOVs!P5O|&`m2(?}5Gp>G-l^!YG-8=a_69Df4fr+=!{?NkbfU?e2zX*)m#MU>s!#BL-Er__T3AdfRp zT=h1o3H56a_(M1(!toO%W=C1^%4{YxZ-5VfGR8dN&Km&Su&QruU=rT|KqJW>8pZa3 zph$5JZpz@HfG(=|r+hjFwM#?I3tA*q(9xW&fA$YM zYY+-rek@!1c=()x;?v@4H_uB-my#8T{8M-p3R-?FUyw~JTS-_Bh3%(egJ@~bj>jMd z#pR4s;tB)Wsr*@x^y$m$J3!LfSI=$3rhmbx|K$Qb)0mr($tnbCYvWkpLZ^6jS) z^kpTl@}*D9ckS5haazIE{8W8~f|fti?u49INH{ByKAM-4TtA*qi^m{#J$^uc=Jz(w z8Ys_rz<3&v3zFyQk}<$XZ|i;reFx<)a0Q*c6{mjbZ+I%2;@eK(yHIq)GQu=ib%&wd zK-0%}QUZY-OW+5k`gW;!7gnWwzQXR$s#Ak|24eQe@ed!t*-Kgdj@p8j1QELXSGWo+Z_9<-N1GV z?<~260}N>zerH=AH{6ZN7eZx?;EPSvxuDt>sQ#R|$$GlaWIt@Q)WOQUGDcuWZ(Qh| zah!CiPlD=M>zjr;0esrLS%JDd=_=j|{u%qZ`PMPIRG@6y7DH#h5d?)Z%#$V_r;B&m zl|jXVv_*N3yG+jn(I?m-=1%CEExo{s0{0A7VR=M7h$w&scKz5VTWF+sQ|lQ=q>_^JHe-5teyVtC|l zz%MPVif+yu(UXbrCP%T%hV$y0P8IV45$97&s$W}Mi!0Zz#I*~TfcI@kKM{wxh{$oIiib%LjC|P6yIQVQ23k_IG)LJp3ks z%a^Xi@BhJ1VnJy4?tUYmEyS_(;%0$!u_U|pR*REWN zl|`QX3EvQa%jOr4tPh_*c;oe3@#b4M<1(+)_xwJwtt%^wvAV3|^6_CT$R}TXb|-%J zi(g286^V(PLw#lA*^V#xtgfuZ)f-ph;)P4GyEBQe@7#;efC}#+6HP>HOv3Z4F1q`?0%y z5R<(fwE>?3SoRGWAHM(V_~_&JV}EBeu3ub_Z@>HPc=Mfa$;LP1#*JHXB$((}Z{ z`HL6j*NgV=^5RlFe)=@Fx3_%5!{WlS^gWKf-R*eiTd&3IuiS{cckaZme*Q^p?`_7- zSH$b;Re$cl(};BEW&$oe@jN64bU(4})MwNbQ&azI=fmV;7O~Jall_W5?R2qIQ=GQV_aQyA^ zXTi(Op9Z7CvjN4-lcY;rS5Ku8oI4ZTe#9Bd4=+t$`n&z|h(_*uMe~S%@e;fH!pK8BRzomU(%b)Q9;hH_Zf-ZfQ zka1?It9;cDz>`28yZX@Z;A`H~ba{QTW$+QcV_l6dkPxeetq^8(&+4c64DzS^M`53z zChg0UM@t$q(VtJF6Dj;#NQ$$vrNK@49*b^kMf&Lj_z77zofgIv4hq``n)?!m_LHvm z5p7%TEihe;&k93kz&Jk*b^d>vewyx-J#I%bvVY3St^b|shXn9|LCpNw`B91Dky zPc6BFH;17I=fCu$W65Ak+Sgdo4Q;gUG%%0KE^lt4h}V@0(7{^Db0dIX#jkSoI_4Gn zlJD2}Bh&Rdw?U>P`Evvsuc(n5-S{BiSe;M0Hyy#{1_I-8BZ0IuKlDORY!)pKf8Yz_ z(jMqSo#+V0OuZ?C#+&p$-^w8Gw=E>7W>&mWsk33`;UR}l1^$@Kh2FD@Rv%ySzCDS zR|{+~Cr!>3+|<$ZSD)mjgBpW=O#>#iIo106Mv7*a0)NCH@(0-0jZQi1N|}$I=T;QY zE0%^*4rDhcP5o5PlY+oVUi!{Qrz`WQ0DP1vV78>SnX{5%(EZqLy7)! zBVH-tXS)>N^jB?&CtszBT0YhX^Gr7VA{;fOs@Hbhm;_+azDG0J`4 z^h=6Mo{Y(CuaxCBaZR=jeZ>A!@SEf;Pj#|8+d65u0n;}^@X)qsJwG&i)6Uf0`pTCX zw|3CK-*O0+af#KB#lvp{5ir*%&VhRfbM-jo0iR7&WxbXL@9l<4*syNkn z%9}5t5fb?|`QVmJUI9%xl^vSI%O2{R))|jZfJalCJwk$p}1&h@Q^@z!LeSB{|tiVb%nY zX2?3%Bhy5xf!wNSLUlg<+JSY1s^Jp^a~^>35l}?6vt)#iUyW;JMgf5(hdcltQ27>@ zj1DSa_yP|^cor?WX*$~6wf>=QKo4~K6kUtyw>@DW7e4)qws1zj<#hM4Aq@RM+cmsG zE8jZwc_*F4r!COHgF63ep_U?3Z$=e(F6w_AXKG~s7@FT}f~5aDmfQpm{MiRxRQ3{e zZHA4NZqVglPW~)Nog`>VPU@oub4g!U`i|pKKkv#5#`!TMefbCczKtsH3PWF4g(Le% z{SP$bcX&m22K@jmquD={>+%j#UL6DaIcQoARa10-2ki&YH{E3zyP)z(8)E%boUWMG z$0xopveCO|i-an?1eMLJvRDH>?vVuOQ9KHbf37J3bzoL6d zm@$7^j}~`uO0R<7QoiW3?1?RVq9vcfYdRBCSLOOuWZCdnU6=3D6;h9c9>2(H{f^i5mw4R%8V zV<-4Gead%{^ZFHva2T7`Pq8mB_PP{JrTv!g%sE(&=u7>?ZyBG%pa%dxGT;w@LmhUA zkNaubS-N#P0yTdWKk8f$^!oNd**L(H@d1qZRVl}bww_P+jn4X!Z}>WXxu)0%oW}O1 zYtguTpnuv0wV-@e`4*=i(WBSTbkaXB2DHHOyUmGH{u+Rb4d~PBROOuCEi7_wmTgVU zCallr#M&1D54mtAS_Pfb=zQwPPiOnW8-QO)ACb>jM)Em;$=)P;s(4`;H(dL?3U1=d zX9V~z|IX$Xg5vz8btJ_7yLWxzYG-FFRu`A8%Z}uo@HELpddUA= zKwh#>4)%9rd2LN}JY~KV@4WR^T)+8BJpSf!Y&_w$`Ny&OY{MJYTzK2vJ&3KX9g#{m z(R_L5Vcfa55i2Y6asB$WSXNu|+R&9{Uj5J826p{z{fn1YV|`^+K8sjgUWiK<*Sk&W z)s-I`myyUGh2H)f?C1(W9rqcQd)basT1N z*xcEP1wL&sFJ3x#-`(4ZTd&-XPe1-57M7P{Yxgj|`DV-Ai;JR*YDdH`E}n~(rNvlZ zU5WE+tFgMUpm^Cgx$pC8eYFp5a{1EvSi88Ub~%b~?mbX@9L4Ley%w*%`f6ObdMU14 zz9Rc*lSSFQ<8Pa~%h5koIxl%^t7~!oB5xKr?{E3@t|J2Av>L>A9mxtjkj+6iI>m0-CG4_bGYB zH)Bvmt#~BA@H!ah$8c8rWjwY1bWziXPV1QU8k%HHKhIX^^Fu&Pe_ZJC&1I19=sW!B zfF7APr6Bd~uBmkPXRQabkr{7EKLs6K!8kt+-Lbj&kNbCvJ)VG5dNq96X1z|yQ@-j( zq3j1pW@6NDPw6*~tN#`B>GW&*cP6N#_Q&yQ{4U78N+*sZL#C-$8Z~W7>09mSNYLno zV|@N7^5iL8GoVy)qoJs$hhO3(m{?I=?`Z8{0ZusE$KVJ$e-;#7!#h>qL%*iWIH8^Y zwRy)k0RtVM7TZqJP2O0q4yKq3#`!UvMgIai+TIOOc+J1~bEXf*d^0|C{sgLRh1K<^ zDVc(finK<~D@u){Sfk?x$k)b%5;+fLZX7KZpjhfF^uE*7i>*$}6Ua{<=rY~cEn0LFzz zx|ya$XnoKs8=b-r?VkY!qHnqD}*u~g%l&1d$U(3_hL*n@!vo5)Xe z4s;|}P4)zi#9H)Xea`xIa&Q#;ymd@^QnnAi zaJiwG*fiKFUm@Gz>mIU4bhZ~iQmh~LZzMTwmwbG~&B`=`)arQTxAf&V^lbdHoygP2 z=s#J;jS=Y|c+NC5DPHARr#$nxWz$aRTl5;I{k#TVDw5J^pUfq+I=4%v zy-oE>pdD_(X$jn&40;T6xC{ zWehA068N}L30yE~#@HzM@```b#0$dbZj7cA-~TwCNZsmGsD+O3T9h+*3K5>k!vo*m zM*g83mlrmGY%t27Ae01SYx&SBYK=A1YF3y%d9H=aV%L~Zi z&K&VlK&!b0zJn=Quu<8`xkHtyaq$7)#>5}0THw8kzUryKr0fnXJ`|~*4(o({#=t_m z91Xd#u7Bylw3oJ6pzAY8H~4uWN{qI7vCVajSNgc-ZR7nOG>B{Rb-%yb{!p-vDgzK|^z^~y`JkZIy zx-azfEtVKT(20!GTV;{-(%G_2oE_n z1ewr62YrT=FaOfcrfIm23CISCm{9!>7}R(Dt8f9Ohw=$_N=qyB{i+2I5aP_IUm8Ar zfI9${v>P0hK}N@S7!!s%DRlm$?OR>Tn?;9Tq4`}QPY!u?m*m;5)Zg>2^t8`M?dNYs zo4&=J{PdTWtFGhD#%}x>A5ePWzicDXaWsSnW_NzVW(AY>_V*{WR1o_1OW#x0^yvT{ zGIaBbab=_ZO}fHRex|2G`l%1dfpx;X{4O&5-)^=`2sX zk>7Nx`CrvJcHu<=T@*W@52egmWnalJ{uNSA1-oEMuNSvp2IexwrmeOk-So3gq><5= zb?sJQJnAao&$K0>>C?tv2AC8h(HKew^rS1V$TIr&q7Bb!{`B3&R9mm+^X?m4DUbCK z<$d2nIr?y`*YC7WiI*3Hl&<4n(N;aOD6`GzryND*C6K-S7$dGwa`dYx#|{0al~4Ie z!E;Q??AMRrimv0AJbV&{kE|Coia>$hIuc6f47IN9A!Sv_6Fc-$jNb<1FQM>T>^90g zd|3;mW7K9YJ5av#Xl?BPGJeJ1@McayRY@7xW@l7QWV3S*{;O`AR@ zqFlVBuG<s?#G26 z<`-V6&xJNNlaG!Lyzb{I^96}8ADu&T4mCH=MLew^xQNFm1P<8XmkfzKQrYH{XR-P8 zk#LS;NyN)K7n|1_il=@i2ResS|Kdgit~{-;UGOIX@D~@r<`&Pz`STY-T;s~s%kjH^ z@RQh^?8Lo0_oUZ;Y(3kI4K}IwcVx>Xo^5PmSuC$C#G|KM@xezAVtY5d!GGn#`Izv1 z{&P#QqWZ(biR$+JHh|Y(f87@XH=pgqE4OaNrHhxP(@CtXF3T76l}W5!;K}~WZl5o| zdKmW~KK0uPe152Qxp4k+eCK;_$NhT`;_m&sD$n!s{eyk+&Nkl>KI7Y;4p>@N*3K`+g|&6xY%w`Hii3lL zc=}{pJQm{o`bEE0A+HB4$c~j*KffNlGFHPsR+g5PK8ZyQqse4HHa9lphn?8m+>A?C zuEh6$_z5QmauB`hl z5YX@$06r%{AK>j1-1NYlJkQ%CWH)a%U=GPAuP~f*h9lw9Z@4ZYe=%lT|L18PA;_PK zJ+^AR6wssT_nOyK@A;j1%;O|^^g*;7|9l+)9^mNLY4|cPfX9GqG@2umRgP^}wQ)BL z$a6rx2^xRTvy8C{*bEah5)=={Zv}XW-n`N;y*>iyD_G7r48V9QQ=m6i6A%pVNC*=~Oi*Srob29R=9i7Qn0BIOnnQ0Vv{%XN}6CRA~vZLtG3tJ@< zmC)0E2ZMs7>wN@$E9=6j%y`5Zd_B;|_NEudY0EA>pd;Dn%HLgH5yvEzpQCL%ztMK2 zL1;X<2#Wodm&e`icq$zHn>J>E*lGOWHcx#r4Nvhay$!SU1qQMG4<79}NV5)TGN(gd zmU^{uYg__-Lwu2iI>Pfj4vo+6sS*u;pjUfXzm{$RDnljAMt)@7Q9csAL!F1y*Adc= z$sU8SQ(2x%HyMn1=E81XYOz85x^}BLuf3!xJ)Y+u%3446d1Lum<+P2@n>=q4w{#(( z%T#E5%!2y7mR$O=PONpk@jJevGhg}xaRqn~8W^X_2q`aVNZ0(4`dHLa>eH>QnjIb& z_KhkP=eaaINkf>BeUtVc_^t8p{o6vdpLF5cwE=n76~I14CAS-YjSt|M>t({7=lYBC zbA0k>Aj|xq&keH9FCM4*uVaGo2<2bW#PXRD;fEc`H+80--lup_Pj5?I(YfI;cEsDb z@V9aJyW~iU+lIWtu>G1}a(>Ft^*?NPn^Ry+s19-{(+AuW6XhzUF1^- zqxlyb;GgRg(}52t>o#hk@8i86^0*<{aHV^lK&ZnYSe-jRm zwSwlxOm@DJ$9$lKH)MSyFUMQSV?M$R>EKOc{7m~v_gvE;OR;$7xtqMAbv^!Y5RJ|C z7-Um0>%76tW+is3fb$LSKY3#l9L6x`6ySl6?-%UhW36F%T|S>B%YC5SfInfpOCfIb z<%xCHW=B#Ub$J6cHqS|)eKz$Ec{*QS-xtn=v7~bk&OdZsGtv3Vq0V!7!X5&aJcw2zjCh5o~GNzYXI7g{XqF!`p!${y!^&Z2>8S2$>=7XIYl!b z?WcCY=lKQ#wTkNL6DLR9K%sgCK3#wu--I9>T8|BL&+U>bi|7{eKYY^fxfT+x+ns9! zoPT1nn23UNNX9XrU}G(oeGocj0nD8fZ?%u-W5zVy&f|*yjcoR?KxAC!^%tF~2oK+K zKBh*X&c0TPz5#`l^*$puf#bOW!L%znLjxXu#YcqF4c%eKS+?+*KZTRW5ymV?*e02( z&p>3%%9_-!e$F|3D*g{{*+RA98lKR;oyYnZA_6Co!iFFYi0Jo(4|+l zo2nEK(~gUv+2C^9;(wWOa%cWu{r9{9VE(p+8^6T_6r{mXf|Fkhks-v&=VCNplV=jw zCl0dssf$P%sLMtsZ$oe2pvOG`R8PM5z(8FmCG_t9(Wzr^ukNWqI1{Z%=T-yCV9qJbwp z76~3$XF=J~z_vATdzAgXEThml9JTVI>vSkSMXz$9$}0UpQ}x9c+lc7@K$U|(z%nf{ z9w4FL>{t})r=>fPeCnHiQ}UWhBWF6E zwimpizoumanXmGxdJjO#9ChG4j*pWxESa^-9814pYW(J zHa)va)VLnmcZy#RIW7~g^s`DmbHQx-ZcI15dYHz~3l|B~cnt>jCy*zAOBmZTjtfek zaetj|zYaRNV}AYdFKryr4Iu5AuBT|)&Xix}r|T=->R3lMJlCD_&lrZhfN_2>f55I) z_c|~?lWw+7$)h~iWF6rEX3y)>`g%P~UDpq)pN{cc+{ssr>sY@shTN{yRa|%l&Ci4O zOFt%Ft}O?$U0$)Eq8tE9DR_;{g(TKGq(zS%&cn+kqEXD%Ay?G~7Uu-gmK%UoZ}wE6 zep|CaTRPSG3RU(QH!n+;W%xXXdIFSbt96%-ZiZwjuq4+Rm*XTnT*glUva`>XlkG|SRcG5iKT_7q?a&A0QP0=~7su(;^E-scw={Q7u5ZLe}RXE{$} zqn7h0H}TPl&bJOFgYyjK@rlylB8Ll1JW<5OjSNgh0095=NklyOBb&DoBZZ2xonzG z#GB3Ur3)*uvbGj)e(T$D@s(@w;m`j%wx2z*%eFVR<8bdFwl=v5;4n6K58_DYrcbx_ z;{KDZc(%!F`%hwdVJ9ctJWYe$3=e%-tP3*k`ERZX%{-cIpygK z6So(i71)pu=;xeQ^K`v$?or>`-rkl^x8tc~eDUQM@zF=0#NE616z=--2@fAViVgXV z>$vOdE9x^#vA4Y|eI{z>`M7@bX574fL-rlV*49Qmdh#Up_P65l<@0gz@|C!8<#KGO z9iBeikS$N*oo~P6@_f4K(0brg`V=<$X$tAclP!zOE9!gL)!!U|4Zc}G_WAXp?!MWl z8RL08pk-*gY`;@LvCtaTjPV4BgU_*)#HU0p?)i2cTFhLhS}wYRGj%*q9J3&anHD_6 z>BpkM7rDkzvVf+C+kC7OFyi+j=y(XMntrS=eqaV-K343xTsc+yx{<3X?_<+>en&T@ z1ui#{k3bEV(AEtNvDRIlk6X(1J{~k?f}*c^HaCEX`~D${>?1D6pWqMdG~{tNub%ko4G;kXz-AYEs5z@LBQs-;xbv+4@4wdCd!K#JiHK}S z>s~)Tt;v@!_wByNUYCGz{_XRBS5*l`on}5J_5IhkbqM(AU=4etx(Xr|2N-qv9!hGq zx2u+X^_S0k7{+lTc3FqU=XTrtk~a`D)x z&Ng9N&BwNB9w%<-Y5%9+AT{x#2>h$^__i;WW!`Nmn&P3dOxvl4_|z+JJjNGeRP#`r zV=FsET>H9?xctP7DJ?q|-YQsZFUQM}uZ(#HpD2^w=qv6uU&Q*3_GB#lh5T?nS_;S* z!Z1&IiT_W^dZ@=@eazocFWyvHmJ<}J9%+=(M&X!8G92&=KH!uvAkw|e zVVz)UTl=!b^AqqM)Y;VXBgUZ0y^dy_4&;OGn*#*AmFYUi6S&fxQkjqflA`bI9I?-l zcl0T9Fy3lEOup<0UXlwk(eaJ`v!BpT@h`isr=?SKQ3Y%>a4R1#;q=l(b?kDO+~y8rdn!qL%RdG@nf=UOLZb3H?`)|%be!X#EfykwWSun%9!T)WVET=KLYXIaXHd@iXdQh2!8Cx~U8~3fwGksWNPa+3Z)s6)U#|hzAaW9zY-4EA3QIo?}z`%(7r$ zKG;0$Z@Wrw@5Pix2)NYxrV8{EK5K96#9O?ysFqH`Qzd>&dtnS7uYJdkh`MIo4m0ec z*kfK4?nfo1vG``5`4a~+a3L7mlF|>p6^eMspeyZzKCmTSeTy3L)@$h|nK8c5CqlF; zBbSL%Y5Okzn!apYj*lsmR(hXGx!OoQ_6VLp;!|#Q17{e95c#m5ZHO4RB18cFaaWZN z0Qll3)F=u2li}tG3^v@O$GX-w9QsAFxN9D++(sPqr=efZ5QlYMt}&8bYCj-q11b z7{Tg4n-1P!hz=LjAO8?yUB)8d2OgY=2wR(0Pq65`R*6}$pd`=li>=F*iYTKOo0QGIxGeN zyul~QNgnv47HLUa=L7(_G)l%7|GFC@PDV+dRf7k#=%sS&9Z6@^;l7(nvTz)X8$8gR z0CGTdFpzqIhs?yG-gdE0lFguZ=ZZi2P|u(F$R(Hzbg+XL`tlc!q@cgBRq&vnZBJx! z@_@;1W8%A@?2yuzlVP;gciuq<5qer*CCIxVVwa_}M^L$S)wk|fC_mV0WO9w)ZL2ciERU$Jk203!F}+e$aSk=Oanxk zKlHG<%z{3lEKtIXR1Emj5JQzQ0f8)T*g7Zwfb4@xyAGy6l_wwQJKU&^oPJRx`!oux zmXPC1Y}}rq`~klziyLP$pKV*bP(OyiDf$KQN>TLsT~ezum0In@u#Cc{w$dX2KkZRt zB$XEQ^bzPYML@@2zcoiBpN`(+agg^7B;b>c78%jUw@sGY; zETE$|M-)= zOjG^Vhw9U?DL^^6$#)4*?!IyVWMT5_1N;Vg{ySNe6;yg$SM691)?vUV($G{`4ndbT zu~LN#gFZyPQbQpNkba2|I|#o!wh~egXG;b#1w+WX?)aCma{jDPP-*%bsQmK&tFmGL zMcFvy{Ap0^o0J;P7 zF?)#MX)NK8fJU^&7oa=8A}h_1=?)4QW*^xWLfoJY+wB97i}uP+fX?d z)sJ4kY;X4WwZ|9Fv+MTq;H*7<`ntV-dFrQ!HfQT?KDlnUcjxW)?OhtBwq5$d{>j-% z+un(%pV)HG=NoN%zGeNmVS>HZrB6;?i1uE)D_vi|JZxWo{jxoI`l6j2^OXCfO@+I= zv(xT9xbHUrJX76lw%sP;_4LJyb|$?}WhcfNPbXbzjD7L=nc8~g8#2yiubJYB%4hS5 zY_zUD?3H&(^ZCqU?NapUE^*T5wDDR~-e^FdopIt+wq(wT3;)NyiQ@41s6Bi3qCI{7 zqP=+Ws(t?W%l4~Z{kHw|r@v^w`Rupt`OD|pKQi7Y>YK~9v%S?G+`rfEJ^Y~U-FeXF zdpm7=Q#QNWXutmDFJ;#+CG)D?xp$}Cxx3fqo7?Tre)jA3C;#G4+oMO{Zr}SpZ&JAM zTMT$IjEnzgvU8na!8bYS;`5@)^N9*RXW-Q~CnFg$8f*6R@?qvTXL8KBe8u>49kD1B zx_O&<5Z;Qx@z|%{)rs2^XAFRoxa5*=&EF9y4^)Hmr5{3t;}ee1REXMw1wMuWvz1{v z^m>6)dh~QY1}Pc)YCr|9sy301I$=jun9X^hd&(9b&}41=>(BFkmhlTdc`>qHwnveO zr{{g;Q3Za~A=~Sw(802ZJB0jK}M-8vsW$cg}t^2%+Ci z= zL4Y~9nYT`9x9r!RpYj9f%PLfZDDTJ7>iv6#f=Z|SuD zHqF8eaj`6_(aYxoOfv|S6EaUp7d!_^UdOxWGj! z+c=k_)KHEfbgeRIOHV*~^oNi6#s0!I;uEZtcfa5xwKy}b9YY>{5ueRwS+p0(2v`XPP6m2 zK4t&L8Y|XI`lku- z^i6oOnV;0`w@)R}O5*{4&MRH|mtu^2Rr6;zs7xgv$&jYt$V54vX+MvCI`p=&P5Iz2BdNGJ6&BaK*9ezunaD#XF zYRoyb6Aht_cF_+8uSK*^p&g3(yVm{!XTYvLAB$Yc;vv&mqhl)CsePvC(G5HK!nJG) zYvg$?Xg?N?s_ciX>MAE?Z4#DL0e z3EQ~MLic9|EGxh`(!=cF#h6T*0MZ$wYtEk@!{Ec)`S5Y@VqQQ4XsJWbuwmjKA<5f6h;GLLcyymCZw+(9tovL}!|f*VXiCbd>&S zQ?wge>Y}|WqtVc)16{^(L%f0p{9)iH?Bm{Xoa>j4x6;Z7KDvflK*$w7I(cl;R(d&Q zE5>T131OpNfA_OQy1igp26R0P>9QO6HyBfyqEo-|34nj~$Lg0y;)yAv7DYgGiGmb& z&N0bISA&9?6mnj)GJ+{6haVTT;q5Su2*>N$Nik(;7>WoR=QIr$zZEuoqAUI+*mdA| z@KT0c%9KPt`g`J2hlNwYH|qRHXXSz=4CZR_hL4~tfHnN#K^r6QPB97-g&dg)Q+ZFG z#Gr%kas?biWouU#L`rbcKdb_8Payi?2ifSZKPFu! zMo+#8>-K?y)GPnY2b=0YG@;$^)}e=ioU}_orTf@+;lMlO-`SBA^B1E7WqZwgBGAD?mkm{me?;Ao%;o3Xkzj8vl>7Mf#52s{XrSx3RopFzP?iS#GCCpDu{}a{b{~^+6k@-$eeq zzM1$8kaRU-7B9 z=FbGxm;Dyxcl<{F|GxHHqQChs9BTvv{iyGC_~!B#^)Faf_8zd({=sj#tbn)G_4*_q zH4k+BJjkj(<=c%>t%8a2vUU7G9 zFov=a1(l188=O7N>|9#6dM&F0_g&#ilIg)_{hN9GCD-}ptaY!C^;#1X$J?_e-e@~; z0a&j96T*}H z-oFBH75fK8mKptD5GUoLnhW$k!Q;Qy&d)fB$ev36p}iEk@N_%;IT10DpWsh^a_52* zg5s_`PkUbp&-*!*UtL_pjRx9(axs6hG4}=XHPug~8g=verhSQ5lJcF$o!j@V!|~BU zJ32X$>dI|Q+lAENRra6$;D_zi!Rz+ZfBsL}@&3!Uw|7VMYwh*(7wzO|-zN<>Hn-ce z{e$-6%}Luo5JLzyv-+@?ZF2R+na-<_QmH<+Orq0{ObL+jhXDS<`XaMjk&Sl=;)+P zrt`Lca@JnH*l#C%7J)Wt-k5D}wa1U2wNqZlT7AfVT~ekRhrA-6C$=UkCy$LeamNWg zzb;>LCX(ZuEaZlqWShyJzF|k>fHBTR_g8NY<3S$nPwd-_ zZTYnKtHnSxt02Zh;&tN=UIX$PPcGx9@Wy;a2d|E>;??qpKsy&N{3N&|B54_2dLZhp z5J>i&QHXW_Y5*tBK@I7`9x*rfaOECk&gWbW$AT#F9K(FBv`yz^&LrXogdWLHejl}w zk5{=G8x|@^`CYwY{qy8z96w~RWEaOHwNvwlwUG~z3NT0G#SIR4nbLW_#$IRrJm}v8 zz+DC@M=-SGP3ZADK(Dg1(nUqQoJZD?$iD4pWVfK{N-_>${S+l>pRT%&XQf}pw%HJr zhkv(T%>0e@Y!8e67wd_e?Mi;5ad#81@3B744?`#SrBi5Ci6SfT17k%10*27d*{}lwDkDJLV+LM`q{aliKZ`ykdDSrW= z8TVhoIG_AGdl4(BJ*3|?SAknP_NtOtqyuNMEsP6rs!VC}Y4gA*MSh4E(CbEsd?et} z zdo6=(--JMWLBdy5X&m^6XRrGdjF@GwgJ^% zpw@)OLpGf04>7lnO0(C%WzfU@D=c*9TAK>(UntAxx49OA{qRZna$bUc3xLkv1ISOr z-R<;HRT+B@QEQ4*o%mtbb#4HVJaD0n;~)MTysSwcQ>2-(Yzqv-8VgyhZP10kTz|%p zf{7E>0e7+;{1@^8^QK(c8YgivI#?7~t_rZHl7?D;Mjr+~>F|XpgHAPFfHyaC(5~Vc z5OsmCoX2qZNfgjzu6NSU-1q`r_7Th~r^A4z%m2;LkD{|Y-C;95H#sxU;>-1LuTBj9!;|Ftih%jx}hr}dN zN3r*xqA5PaWdoOFdB>mh+S=odfAIJJ#*anz2L zug|AW#sdds85jgH+i*vT#xbt}4E&%;MKmEZjr&5sQocZMd2XTlf3`>Z;PmU0C`C#^)xiA`+Egya28wHv2W#KFvWRJmQMx4sA^ z8aj|vG~ z)c52Djxf2~(SyuX8VuM?`Dh0O(H7usHvnLBIQXpzq__a?blF4)S@Q$2_z;`QzkRvD z`a6*UpQyC4BX*L(-sjE6K2$)#fcF3(U#v;}9S;KPxW6j^ik-|dtBh}v7Y(EaNkA!& z5yjZUbVN#ITRmwQmkN$v(SL+GfQ+FlO<(MF%rT^3`wwk9f^MR+d=3Obw-ETIj&ut@ zV{Q;l7+PTo4EoRWi1|j|!DL07WnCUXZV$E5DL*frZvcKb)?LAKs`y9WD44u>WSK+Y z^>qnvv0p*i23+6ZGYn&X3m|;N3b2#sjX?;1dAEGSiuPCk1)!s%LI0Iqf#}=C{tLb) zieNJ824zubhxvD~1c37n z(+!~f7=lXo?RXb-i+<3=1utOe^;9YaJ7fy%)b4i{+A z75t)8p6_n^rrlcag0jI+q@{#@66k+t*}QRrBTh72%U(wFNhkIwJkF z-MeD|7lMfK1%c;?5hHYK6EU{4{S|CeirqM4 zbf~KULzJ-{O7CG%dG51vqY|hcje^(OmFE{7;2o_G-wcQ}I_I&Oy+M{!hQDD_+$W9| z_@Vsh5>JY{E1xLjmHzq4cD|WNAJARuL*A@L%+o>>?YA~~H9g$$XDvy1I{^DS0+cRH;{=L8Yhwav* zkJ>MO@@MVy&wkqO-?{703tULYXHTBAlcQt#_-5PK+-gr=?zd;J&f4j@cA2x*W(v3W zwtOSN?OR(4d+qq-z)#5^9UnJdP5OHOP&QeUE>oY>+S}d1G}7hJp9=su0m=CK_>*t7 zlk@BLG+*Dh)c6QpUgQNEB z#X)SNWkXI_%*30wFmUo~s{UAfR6n?pBCU- zp4@csnv4H@Zh;e-@`JOpGqw9nY1#kcQsqrH;AV?se*=|Q^mDTA&6_vvtFIonU;X0O z?YF=FO?&qAiTZNgpK18LAO5JVi_Z3vFFpF`gZ7d7<;}}y?fJ`B?d6+S?Jxe~7w!As z|6cpnw;zcg$0$34Z!Te9K9$~aY*OPrC*z^agBp?CoUg2X#>FvT*_SY=f1o8K*e@fw z9ArF&Sc_!voI<(?i8HvU&#!Tzp~j|RB!3m8UTc~!J#UOcj8O6S9AkIqM2`VrCwgK%TO+EDVks?xVZ`1`uec$F@X3Qlc zu$|QQaWIdYU=|BiRWRsB&7t8|%XtiK;Q7ONJ$v zAom|SuBHi~o=|#LOsL}y0l-#{I=~K57IZcK0*OAQ|7ZiYq(0i?Y6vTI{inID*7e30 zk8Y3UAID*SiOPwNFE9tqTv>jhOkIWf4c4J4~uuGw#dVC3 zDM8;b{@_Jb&j_T9BT(mH`4piy@&@*k@BohZ!p zxB)l#==iAP>m@e;C|v4~Z`_}0oyUc0uQPcdQpotE`_^(Gl_crow;(VEgcG0lP(W|4 zz}rsfqdcVGaeYO|OfPX^ILE8%SQqouJER{+bGMT`IB;f-1P%Pe$e#?rDa|8su?zGn<85W}h5-US z3vSf=32wEEK93t@RW5pWlhVHNLV5OX-38i+0uQs07TjikLYdO~wVkA6z41J1Yy6Q5eM1s@ zre8T|?*qONd^3#XTat9_-Y8`o!+;q1>^Y)O{J^VqQM1bjOisxb5y zzWR|gcJaf+?r)79$@g2dsWor)eRP$1VN>{)O;Hb)&eP|-y#s1w!Iw6At_oc6NORoq zxHew&dz>2v2d1HP;3t2_v=$ZRsA}>wLWGDDoA|--~D=TAncsz1(2Z>mTGmy#*n(=P$so15&2^ z+WOE&>1tcKF47LD;KGuL;}0RrOTKB~=Zt8Ow~u+*>MD(J6xYI(0`$P1o)NX#Ov`#G^ro*7jdI~_D|O*bcS3F zLq0d#HIhGqUe-;#<@|bBkXsYR7_$F*{^Pjk6Pa9{B*Z3&`ofENtXIz?`k{vbp6AR= zClp=v-@Cx$+z|6=FL&6VDtG$`?+GjW`5W*s^!P))D!(iZ^y$9L(Dlc-KH}oj(GS|; z@+JKjm1FyDDyb94$2d={sbp~Z^i-Sg6}Bq z#$Q7DJ^j(K8uIE2GrR*j+Eut&9*v14dMO!k1`it7_E9E>050z4DiVYQ?{OqXzk__# zn@)c`X>J^!{JHkY!V9hv8~3`MU*wZD0xnV$(2Wy`6E5s)Uv$C63ix>Z9d;_+dJFs!|(o{ z>JHj3|LkA14S!-lI$f=|gI5RbSHJwF+kE@Zoz^Dv_7|UhDcDMkV{KkP>d%JCK zXRGbZ=hpLRf8VdnKRh^Sr)ncl2|c)fN3yos;o(uc|L{TE-rZ~`$46~CXI#+g>$bJK z<4*t_9iFzQPoB5We)CwePTOocZD(pjeFlIh-#Li}@85p@wYp-xZOymZbbF>DHEBx1_C&9R?Ex_j@g_)Xi^ z=9XF@+3NR8UP;Ryo)blFX1`j`wz&*_(=YKwfFkz0bb@P~$M+O_Zl&-hm`*MOrd7PQEw7K(C`!VK?gnk8|Q2^&lo>>DPvma`O9%fSHU0OH(l`M1funD znS%TXSMTz|Ar52!MnL8{ess8tVF=xE>PGz{-fCEdx7;s)vAmEu;)DKTp<37YoRIq` zY|=rOUfln6>(BI0(1LD-zj}QTx|riCzfO6^b2{<`RZn@Olg|x3D(?9ZS9+*tU%r?& z4D?*A8{jMfYP+>u&OaSExxD>A$+4`CY_VUu^Jmxi{t&;%0isAhoeojoy+M|VGKvg^ z-|?zY{@;~ldIZ5Us~N9D<_STLj4Tky?Shs%#}%;CkdMl2*8#C$m><#)A+iHLOYr)# zz>m7Bk3RG9q>2iXZUI0`{TWvQq$$hs#IeEh&K-s4&Unbx6=wJ9$qc4e~@8w5+5vr`JhA6>v+@1k2->ceqUAud|`RmTv zJ9fgM^5gUEH(7ch z^9ilEQNX$ip9}kl<2VT|?V}I50l}~Lw@LW4fRs?4{SBWJ(7(*a1@h3>7h-9)7)Q^= zK8rPG^nrM*g1Ht9bXfrU1cGcc)wIz!X^_r!bHp6eqrB#&?g?xPT~_t}kBhPbd?W13 zxdpsUZHT5xSINWgtcxOK2^aSupnvo;1)>)rCLXWPjq?cWJ?fgXCIdccJ} zjt2JNpjU|bhuoM|f!9eR>fd7Zxj>PC&2BLhEpfzLQWv1B&@aKg6C z-5YK*{uI88Ah!xs({Au%Ch!m50J=n5(2M#ATsw`6?9wfd;U>x0Qqsplo=fObp%7CJ z5a&6j4w9eR4Umq!aLin~D33mIL7)B(T^B&OywTG1h<8~;Dk@fyaqKErluXk6QD^>? zCtAjjH~ztY@;81g0go6Vs0kE7*TXKa?*huRED8Y9x?{BeG6O_VKZNtMrU=ZIPuSLF$+7uh8-fvalH8x5^irp!XJK4~Uw&$RMMmaAsaxnQ0Pz=W`W?72- zld`2_v`Zz>p$mEbNoTeeCY^U?Jt1xe2tX(=_} zMLO6y8QB|9JdNlm`Bh*78p(ItDwvFP!J_;~2M+FTdWTF?N9W?^45-FpUy%0>E zVCMP{{R_tvDrrH*_-EkQC;;BXOxH2v^CkUF>@DviA0JCwUzhtREWZ-wkX z*9~IxFP)2L*&T$eyU~9YuL=PnXDIV#ScJ%z{*hP6@fS4JCs{WNG3Jya>$2rA)+0id z)9yZa{$9Xa#W%`t+CS#Jx6?wG3HrZ|{qS`ENf+v^TOvBo0-jhhJkAI8UEkIT75EM8 zXEZa(07T59yZ8U%a}yTZl(cTM(t{H@4z3>Lhq@3Ldy0ZxwZoa<$GBguiXba}9HieW zEcPEfhxsT2qAv6c^M`3n z4};LlOI41;uFn|xFsNVS=Q{)u8A{nB^I8lGrcu_z4+n`#jc;BV=auL=(20!@$eM)!x!nAyZj`fo= zK9LYM@T(rBH=mL)P@hi}Y)pk?JvQ2GHg6Z)OmNP|eA1@!o%8dHc69bq?Y{Pt?AzP( zc5ryyPEL>8y}NhXt=-*rc_H7MZM2Vn=UeS+dU|d{*JirsXCdZe*BDWlyzdKA*JB*=9SFz4_k$$?2(|=J%^!&rW@F1LuN$ z!k3#eWRG>`0@p+TXFvb*_U7=Yy?*_&{phd$sO{dm z?e=YMZCelCzTnsWPq$-_?@1$ch?7Uk<(RP(;cq#78H#{?KKGjAi#ah?B4kO%@K(C! zA7ZLptORKnkXEp;2yw!%av~V;cAXk%nbxK13X*bR4g#`#WdJG;eM4C*tp9%AeLEzN zZtkMp%s2R!bfU<*l`J63ZTLtVBik1C&Gl#M6E#Yk7hbKBD2!P0)!UJg{B`YL$idks z29;qx^NC03>0hlGTwzv$?KfKTf5rJTWCJPR#~ddHakU%zfNBg-5$c%kEy! zrLPtYX@)@!353r63O>K|_Wm=iA?b$n@XY<6f3i5*Sn*=0<4&-HM*Zhlp~jHwNh=-) z_Y1s|#yqo)BUmh}w0MLN)u%3*?{O46+2eG)Z2yo?k!9l}9;eDH?Xj6q^^P6F4%pqt zIgf+nE$Hzg=!a4T@KP7`LHK6y8`GFBA#nNYY2er4_zix<*)CV3A5gkhUR5QdYiI5M zQU>#=d4s3?jgZv|7FqMwD44B$)Oqm0XU@pB#9Wl?I@6ThgElEL6ix={Dev0aL*Xn3 z#tU`%G^FMCu$q=~Ma=hZFY-mZV*ZSNC9XjK3%?tGLEoK!F@6Tu6M$%6@$3CRpu2&? zJo-3r3U^pva?q#BkpW)eEDLFWbcV0T%dnikz8}YN^>aAFP#yuCs;$M)>4|8*La}vyzxe>%S+X1f6Gk( zv6jDR=a;I}`tb6CSv?H7;WezrE&-FiP`z0LGL1_bM)|BGcxF zt?0eArl8`iWa{GP5AVOBVLtXaNxjiau?p2b8OglDu58`018-qizn;jh!W6B?2Rf3L zud)AQ55UdQ>}hz&Ag(*BR(klln$mH74jm|mK4o_*u*bpAHQwz5@aN`Dd{p{p;M&&} z4HFHo=eIvm?){}a!D7Hw9^c~z1^6k&4_GzE@gW|(wRqLp{x&8VW4g{4cR3~~?HJwy z$MmrxKWyC5p}%MdrlJNLU`yp>+#0hLjAL3b^*itf>*{GN>f>Ar$G5naOF#I6xA}mW zIE+Kp`go>N=a^%aw;!XA_SzzZ#KGryQa9RRP<4#A)Jgf?28qC2gCU^7{^D&vSUU|I z`1>b$N(W)Z_z+M0&5oS1lfBCF`d_z3gC1Kk=%Ovwg<{JvBmE1XymfI!06q`8k2NVI zt8IP)D5|44+M6-}(vk^(1Cg`BgSY>yjyC{s13+t!f=M7oT`52{x$=G z^1!+-55Re1vx5?OfP)4@(An4?;m}dXA2$b(=Kz2dl%Vm-djR7EmWizB96w1;)|tKz zqq`jv!Tw5>uJZsPMI8$k6?X=+LkhZl)`6RHI#&9@4j#ZXQ?}v&7B-{P73aXP{gm~9 zqB`vgp667K2F|eo6kpkm!NNh%gJ8mml~`QGcPJHeqGcNP;${nh51V#M5^?P*qad}RCy2O8!^cxCpRvD3+`CRjvBMQLe*!?47dRFA8=FN z0>}>jhEettJ{5@}KTWsv4fgb4@6ZL6NIC%BYXegQv%~8MmqWGNC-ep6bucmy|7)_( zepn7wk$m0>tMtlz>Qj9b_9snym&pjaIPueEZ$LLrY%BPX7mfP@8NEF{&IjgQgc7hQ z$n$}~zwR^ixv4MXu2Kuo_1~EP2=4}@jA8!`VVu7K$^V^r1*`O0!RIe2-v>Dlz%hO0 z$K^LWtRFL^E3+cLF`idJ;K#u;()Lx~fJ0^l|CNyK`2Ruwm84FEgvD&-Mo2w%#tCEj z9vg!)Z14^hu}|5m##LnbDZAJF`7S6uD=v7KoeEaaYy5?t9Dm`Z@hc^}X=oLceQxyM zH`V8TVO9Gx|Nn3KEBUXSFh{UT|1rN+dJX9ZIO;$9RJV@m@i>k$DB=VSdRE#53PAQ> zm(RFs?+}u=d6F3Nc^8ykE8vE_#d#@*(EunI6uwiHtYngi|H$T~(an>XNvMnRGQ)bn z^@GZ9Fn>gI2)On|(vHyXW2O5V6kjtl*)bBX=>bcKmF;**I2`Ptl0=TEq>&ke<1?+T|b_ERRF$Rb}i34_PQf4^0L zwKD5t-j&Y!S+cnC*lTJw+8dmPL2vPKQ^Mal0p_<4h$hzCp+D;z-;k?z($?;{m44=a zw;gl;0E8)4KvsH<3~0`@O?f3cyx>oH;VFRF*?ASz{2yF97>|c+M<#`yY5y+5h;jjJ zzXeDR$aC^-)XPA6nXuQ-c17Pz8WbUnYXE&H1G);F}k=s_v__P&Q3KXuZ;&S{sS-e!8{SpiMP|^({^!s){c&klt1z} z=(#Ywy|*JR*4s>e#0>+SsN{Q_=+23Rm!E&#UOoS^iKuOF?Y7PNb~`*gZqL5{svR92 zsVCRk2OoUUe*T-^wqJktrtNHRYV1ju$y(dp;i>k!ZCg5BNJqx-;n9J|5&OYc`$uhS zd)7Yv^kaV$^sC2T8vpLyd(!);?Z19y`*Neh-mP10Z|`<{`TBMH<i;{JTz?%clB?%lbgzCq{9wz;|4c5mQ?n?l%K$&$raR zQ{^V@^T%IF#zi|ieA6C(_EmfE@SZ}od4-{WF72kWyMtG!4$#-{{Dn^N^74sj)Q;Rrl&`Qq4x)z*9nSM_Ln^W(uvLf%L31x32$#t zf>4a7{(oCbY*^T(odY=LF~*8=;LGpbfA7Kz=+@jO>uEU7y&r_UMVlzSa{M6!-vGa8 zs%!wBKgT@BctaUIFQ3nN$NnALuI@%E@arC7b)RcK=+5cVXAuH=8k^bthC0(0@>Gs! z-lPAb?`n=;#1H=oUZJ1rJtoI|OD@MAVztLn=*f*Qy2aa;xVFOuVJ@`Sx7@k$_4Je{-eDX2JwG*TVc8DbSF%`-+Q#@>)Q4hq(V9AN zl*|}<(|`&FY4&fT^Gyf}Wwy8}p>&g}&`E)d&n$m^6M!mwQJZoQ#GkhsT=Gz<^eEmz zk3{K4+g0vY@~eUy0?x4kJd{5_XZL6WP9*06bz^DuW}BTGeqAMf8cv zNJ9q!YaFEsWe@OJ2eD5=Z)AELiI(*XwEP)hp(wgsvmHD{hkh|Ph`_SI3Y_R~apoGL zb%-_*d(EXJdK+K+ch4Vz=5*MOI#j}TSRnGzKIKWf5I%vgj9R1dC1->s*X+T=?#Kll zo4Jj&TXoz3YW-=uSjC0)WDcD$Df0#RY_r!Xia@)%;u@zCD&SYz*`lgIu$sjf2zDD< zuW}s_AEp}KGLZl|FqHn53jyT;B&A)X(Njs*zV0P(S${%C#==QQA0UTf0(}Es)JF_} zXMYlOe2|#PehY@$U3uy}-zeKXG=Ra8=&fS`j&@-roikDo;PySTIz< z_1au?++7i@q7$&dCE6&a`N+jtK#)Ocq;L7@pXxTP)6tf=DS;3t4q?TRi;e>&3?G|A zDoLx{GSqR3nP6;ib>h|aiSg}-eo7%60d{u72b zP&l#~x_{#douD1QiofGW-iw@)D<48t>uj!IEr6fohnL8db6K^^n>eTC3)nRO(9P)n z1#uXFsO>I5I*;tCuXqJ$Ar^88qRRsq>1Z>05wC*8>GXRWE)e|Fjg+9R=wRbTkt(Z? zdVb`W@h6|b{aADfUIawIRRR~t{_Xm28o222*7g?vjIkGE(HZ>?NmK5y&}|j40FL?` z1J)zB1XOxhwhX8r@)_yAq%EFcJ5U-NJEi!%we7%2ua2(ozXe#quj^y#jZ@<-`4VBD z8}yaWxa4+EU@zi(KIwc3sPs4nZqna`;+J&gzkZT^#A_j51;tBYne2DzL%qTp{!~0h zBAsQvMsNEgpxARbenVXLAHhief++!H^QW=rx{iMjW&i5$$R~a$GfqlJcpFrG#_4}> zNii#-{C?bjPXEh}KiiRHwP(?8?}p;@E-0LgSB}5;lOHxw5PjJ19Xi}-U-?b--@8D4 zRX);<9rTCCn0^^Q%0%OK13}Ld1g;aGh3Snl$%}PS(Eu@yYYo?}BRYt*tBe<+mrI>+ z;A>RM(QU|M8Gy4SUNwKdlOOf)h<=cki~3SK$W!E)@#@;_zbh^{tK4P55!~J+m2i#! zqRH5eMC!4$4MRW&LfNtG#tHN)11KBH4}i7<BnDfUVz%T*fOSNU8y?eKE<3c z@LE*sXy5Fcbmf=Cv1br}zbS0eCbP}9G3BX!ULj7Hw)MFJ7g|;CH+-=!pR%_=LF5Ks zT%WbDzZ>XM{1u(4kPJ3Aoc!6CTYAXiCW3j}P+AGCr#E%D=K??cS;x~9t)J~e8i$-5 z*JhOq|J=&pP#2OXs$k+U7QbUzj``-oN zERp#Rf1t^UYb?*_yX)ZrYF!@aBUHg=_7zpFexio;`o+ zH!&O>?6E7+z?cSZc?F-p{ZOR)8 zns8rgUYNH(_|cEr?FSFr_kaKQ+M)XEAN}9|p>_D(AN;QR<96FnTj0lGho`LKVlpll z^N|#HJImLHeqCu|l3@qR$dGq!hM@1kl{aD@99S6Z%xN7xv1E}{pF~|kN2AyeZGZrD z7zrgWp`(L_bRItzV8|nM==oHPJf%AujO7MktC;6Yzn)Ln8-J=gps$a1BO}Vf?v)O` zdHM=!=7g*vr|C^HyZJ5pt8|73ha9EC{>E3@_(30dIUl91oK&#hO5i(uf(AVuf+~=R z;%5*SEdYN4^IXp5Z-u1m&{Cj|d+5y!%lW7RQVx*0xax+nLRVLMt%4z4#pr)?|4{ZZ zOVo5bl?RPFms4MX0zu(8on>R7n07(~j`#o2t-$Y~V?2lnW4g@0iy!q0;oC}cJf+y| zOU3}0a8h3QRDv(SE-AkOQO6#_>+f#-Q5y6^2%DR0pkZh8CjgoD4FFXJ4C{fEQ~c|^ zQfD8JzDODt^>{a=oD#PZ3Zdc}%aIEplk(rQ|9XAju0YALP3?QppPWYz+IPrH{&fL3 z{6z^D-=w#QPCr7>N4tAq1?0HMS)UeaM-!FZ{?mZ&gss1xy1GQFuypF7OZh*duQ^>0ITfCI_1$+CDbki|{w*pY^ zCTsr&{pp$3!KZNpHLtMe=2pLTN`2#NCR+P>ZD5Q2ibk zqm>tb_KWB6RDrJnp|qgvRPt|}o{Hela^eA*+mc_SW- z;`}EzQ)C4d+(s3+dF1&G-(}sje#Kr{IzvgJSkQ)8@6Z-(19_}}%9c(`FaBhXz^gh6 z#F1=~9J$zV-Z| zn5tOQMH=9|f&0idz<%PZO8gMN^aXD0MGzk5^aBjy;x`hwUU+5_Jk-mI`Bh#7@hbhO zi?L!<1N|Cp3ymTTM1REE5ggipja_MdP{?o8uzs{1JVZ&3uOkb;&LPpK#I{%RAg}*m z8M^hLimZt`^4KMCDsLR*8y9;ZTcx}K2j0jxZuWsth<&4aHAVp6uutTfhbW7Yhc+Ul zGLq%5@=R;R;Ri}V^ zn7c@fVxBP=EM9Hg;OTyvuAgf;N}OC!JW-+Z8Z6iq{b+;JyfRh)Fi+kD%eDtlL0$Hh zg;7R(NTSCO$eTgL&ci40-0rGUdC*fYpi1=>7W0)Rju3Rxi?s8=SRaAw3t<#@`YTqE zCvJfx`L~Bk_pw>#2Bz1iU`V?Jy6U7MBB;D;Ct0VLhk%Ngw0i*pucI%ygZ?X?5l4uL zJIcnvAioR9d_m8`0D92%aWpvl?;ZO03kv@zlJ7v0`uoBPC<|ezD`8N_pA+!}WFKP8 z^l=}S!Z)}9;08Z}+7-Ot-hLr3kC(_XtSApzLnyo`=QmVa)U_|`W5bO*xK}|DjN+pE zOVY_KZXDD7=z0j)%Ae zpkiQ{UW6fiAA0=)Y0y80aeg`U{IcLJdKgCi*T=O!c^8DN&_D8qQU3)Fq4XPuDi@-C zQIQrd&I1(Hc(6QlOgpUrU(oCr0QeMw7VYSw3c?11-UP@a40JXky7#}a;wD*qSAmId#A{x^457-0a*HN_EG5`9 z<^eKlk=CL;s@A4tx;YC_?0>D8z5b^z>O&6edG-pd<)^-oI+<;?H9y6_sdemB=-Nk2 z@p;J=9{9|>EQ9Om2~0~_J@r7D*XpVQYEA8RwbuFG>?!R{GB+3aqOJHNPr;ubQ$P8_ zqQZ=u0%(KuoosCSX>B&GoJ8WST5gZl*0e(jKZQ)J(x~ItYjfdp0iz7cPsI+YF=IsX zOzE||${a}7iKv1~56j*L=(_~o=}!RiBSb{}u8`x9_AHbaeZ*tv>mw)^Y1W_JoRi7S zMxWEqj1hwRIM<1!jlm5WN<~=$zM!t7&+Uu&*^CplO?}MM6SBE{;^LgAZ_b1meayG- zC+dGT-CUf%xQL63T+rop^q6$pul2ufUw-+u{pW)Z?zE3T{>Zclxc}gReHw-K_YdrA z7w4z#<+GRmmi*q%j#{EW@p|*(MLRt`Y4iE4-M)RRZSCx})3dAgCqMmd+nPyd`SOcb zZ`$44+wFUwK5ToEeQ|m$)@rw8O=c5+3-s`WHy50>hxc!{4<6iY`-l7O+4GleXK&iR z_2CD;Aph#st2UJ!ZYucTqYvBet$XdW&%S8?&D?tCdfA2qT|DAvL-*1mT`9wy%@J$qR z`O5zOVSD-ViR>tR!cF> zk&81#q$~%}vBT%jd{{zG*e?*H-3E>^NkW>xuAoDcuKIO*P_!!_LD!aS*Nu7q?fut> z@^K;h7^r|sOV%%iU$h0JdVU$~f0d@b+Ewu=ba?u8y5wZtsxbDuE{y4tH}W079Rf#m z$)EI6P;v1e!f^lhb_j$XFBA5CpyC4hi#8-p%IE_6;?Imf9gOJS&?D`W^(J+Qzw$?T z#1HdW59Vz?itQf}`&?(mt{(^04f4TjSf63gkJZK=SM6n}Am(j+E18*o>>GAoO0e%nUsV{#B8%BBc z9~J0OIE$f&;$JeWKWInQEy5yR=vDCzNF9TJ(GCM;5emntj(-*;EYdBBeFF!G<8}|} z6uNd{FQqj$e38$g=jVigi~G=c50*GC$uXrBt5`4~`uVeXn1$B|q{n)xIM=7D3Vsyk zob37$P~_nHJ{af+@vx5MN&2sc4p-oQQV>#_YN5;0#kHqoZ4{Z8?%(Yyu2BACs$1xTZ{Up?y zjx_@1tcxi_?iF{-(tnE1E_sE2@|1r5G=PZC(M!ctMXj{qnze9zQQjBcp@|0&xfuX_ z*Jq54cyib412v9oOYtTIS_Xe^8qoT|f5LS=bU7wblfJoNje-Wku|META>U7@YwyDx zj-FTA6S05cqV~1uxPigg?q^aHuTwmaqo8W}G=ld$$j{aS?gQgQ+Og=lj%bC2@6Hm$ z+xDCiIo$$|kV24F6(x z)oM^sf={?WG;}G2anYY5WLB&In&f(2E4nOEh<1bHH$K=FWk;k4OL3-pe%VJ#3JpF% z7iIVlILSvqK5mW?FUJ5hq~$y2?Lw8R{dO4!7%Sr}5(kfWP%)9Rwax z3Io7*J?IZ+6me$(9dBo5a>3!bqu)$5fvetk*=Tcb+dzb3;4Anh^+8z-R?$pIGcc$_ zA>?g|K0238cNo@8tyB0S^orC0*V=edBX?J0UDLmg?CiM!jLG6hUj*v*pRPmH~7Oa>%zhCmzu zSM>2Je%eP|^=a1v1}I9njsOf|{jWh&+Et;upsP0Z<8( zjTRK#@f5}AQy~$Ftg|BVtx1`R&?5sqHOvn}-&W#PpnBu3hTvCnGH;!NbRk&3Bm|tM zqS{x$gwpf4(JupVTd=oKj7xtSpSaSM2NXLQdF$J4VAB6Uad}`l41CgcWGyQZhS+{0 zpxQJDt|o1QE+0Oev{h#qrUOFHai9hs1w&pLEaN6bE%u2xe1`c<`5Pf*>33AV7*Yk0{K&$)vq^=5asDQR9`J}cE5dRpo%=Ra zdrAf|74%=~H4F>7P^N!}z7Hc5-(mV@P;CVB2FOEGb|l}Y`HL}`wpL_Z^i(Xhg|$P@cER0$BOvJ_sL^p zZGOyu@KXD{0>_7?5Nqf-nGk*Ab-#G<=eG}t4=0P-^``Wdp5i^-oGVYev4q)}mNDhc zq+&Q)H?8xPQXR)TAZ+8Hw(w-5Pg+nyk;{aS4F~f-Jizi=-gZt~FX=Da=mt=?1eW3o z7We`q`fo6QMjiVl+NR_n=yxI1IE=i06@~v5FSrin6n;U6q|oEEEBO|F!eP*DlP=Z= ztCAn^)!vDI_6D4N6#LBd7kP{OW<&bK$td`+uRCw2=O?0$Z_eXmZ}wlegEt55qfb6; z-}(Oc+V-tmjn|RR=9~Tu0DkGG+7&n<#gppXP;hH+Pu|M+IWOA&%a`rt>z6*_-~aG_ z+Y#M{^!fE~zG_dO9k#vQt#*2OC7k2-JD+^ee(>E-H7?iM{=vSVWS?rkNUL7&pSOd< zW8AiFZq3_ls_}Dm?kC!Lb^iSaciPt8Zo51^^$h@IXC{ur+p&;9V8k2v1ewE@%aUfL5_mFCE(Vr-L|{C-DVn_e1d_K!Mr)( zT>EUEa-t8pSkFxyoOqqiHhl6GyK!>W-vd>jon9QPe!p!=zuoO!@mW)P){ak(#pk;H z;UE1g?ZbcVZ?^{@J!qfY-ff2mN4`~jdLrim!Elkqko@^5mr2L z#<5Sw5}u6H*zw6d%|(9oDaQ#0hRXb^VfcuzQW~fJRe?0ZjPxIKk84#nYL0u9Hu%rd z^-B;)q|1ClQYp!Z^<`j2+sQlUJUE^+EvUkZ{hL9t(a4jiG*EFtjENz=MOuISR2dsQ zj;Zz6!!Z3801X`~bTCY>0E;L&W~Bdj?Oy@nq8qg{kDs=$And+8VEb6_hkpf+8cz`@ z58Nyxil3kxoqq`)RHh!;y~1!@fSbXQcM0KIZvF;0~mSs^3*m89iXaEZbo>42{B z$8na3H5;*&s4xTxciXLBU3icD7?Z69V)yN^1^F{ zunqB$SK5Mgi~Ac$dDzsX$`3-|D@h%=PD4XM#KAl0Jx@_)|AHQ%EaN1L`l3~!@10g% z7nld3qpmO4Z~T`Gmz56y#N#lADl5A2_@j(IMYi%DGgVGR-HZzv~UP%f68PsXx`mbPWeCFm=Z}Y?l>) z`M|eqx)}J4q<;91V*U5JcmA*>c>ESB2QJFYL-icvTp#1GLePQ}=msB~6Hh+my%}Re z^w(O@#h#S?*tv*U|A`M+IHPcQdk@!L{F-~;%_m({?!BDWjj_h0RdGy595)Lfju3Mv z>t(Vbi_*C0AA3If)7N;E=P70%!!3{X=A~$P>vLS}Ki9&KPl>U9w(j6vC@qFu$R}{q zLWt3NKt#N8EY3NgXI|V_yU62iQ_ecyuk-gcKJfxYb?a>&@u$k`Rq{ci@_+bxNv1dUT zykm`uJ;LW9Bk)E61ce`B59#AR6fSyYIn-!uWa;C#gqXAB;=2;dKFnC=2U+lO`!bLR z%uhUcON6|~xR#xI21Gg@AWJ*B2(JlQ{kb%CnIIjLp6ORk@GfkOIjs^n^^b8jpbKhvkCG109{kh0MYOY);vv{D3<1)E}ybuL-WAQ`~6PB2x zjI6-GsGA7XqpDX>?C%6pZd(0QhjYle`IZZJ$N26V8$<}qLe^X{4N}r~&?w2BQ!Y}E zGU4%R6!eu4%LBr2=!O|c3(6tnBp3~YSR84~5hnN!4Azuu!wRsgEX}KFJi+Fq5_ep{ zPkKpb(rh--->>XJ1M1OVSr$+3Kr!S*1yy!_g6$gZ@Z^S5nuec>bpTMh92OgT07`GT z2BJzl*`lH9D3x#(3)B!M720V8@mK)LXl#Hl`U8q$0AzlF4jC%1Dnr$0Y}W&T;^$rN!6$C@DuFGJ#{_tJuKktB^hD0A0 zbsy@IY3$Y6ooL+OE9@yh_d22DU-3|e;GOMfQw2`urBBDP5`zB3)Vm<`ata13M0^vf zY!t7s7i-6k&kY|7i2C9e5M{AukM(@4Ls_GD_2S2qy;DEc9~bx?W;4-E+GLa0?&rpw z3;OCLKQ*AXazdgzal%^p@;uhe1nZ)}+E{AZeRU_3%?#)^obc zb0P+ei;H%Ce&!3D#K_`ABNuVjr(43|B#`uzJYGBQSGv6~Gbc^dm)`eKs&=vG zVc&rZ7?E|f1yi5;D$P0>-9QZcWjp<=c1JP+M$jpO&oC^71>zo7;V=Y@`ip|bUl5XP?9Z`A@7(Qh&5G_cW+BbY!UG({kpG1w)a!qKq6%7 zc$#~@8T+S8`2`o8adXy%r)MYa?Bu*19UaU6@D=n}Z&&A6?fCelJ^J>e_MPv3zs2Z7c>{)yI_1Eo8zJKe^ zUfV<`jj5y4)AsY9|E8TOp3J80@QCl?pSSP+&WG)TJGa}Dr_cNZy#4=5x}Bc27q5@o z(aEV)i+$qt#hKf4dV17$_jcQ@?M*+Czqh?D9NA00{lNzxv^)16w8vk3rSL`D;mrcF z2j3SxlK#i1=V2r1#7zt*CueQCIcuBqZ9jQ`aCAs7)>hv~YEC>8HF*i#cU%ZgAh++H9MeFX$us>*)Bf9UdIY&PTox;pp&4I-UB-std-y z_|HYNy}P4!+-tY*-d1|vzV$obYJc$iKWYy@e9&$^xZgf{^qsaf+fuvM+iYvrjy3+D zK6%={{_5-Y!;io1dp-9a?zL^rTOU9AM2fBZGYy}A_C>p;{@S^-)pqaRci;V!|Kh)F zmuk~@zyF=Ky|dTWL}NQ>uHiEic(LtItj75UB-)sPbnoi&Q8hMvn-DY}JKt#hft&cn zE9qUklGgxk;0(v$66o+NUb24xXB;}c3l{b5VVo~0`o%bIxX!eH1KbB00OpRaOn-i0 zB~mW)gCI^5fk!`CV$tD{oHSgQJs0K3>$LnWc;v>Vt%RzIRIG3 z7(rH)m3|x>%|Z~zxxQ8Ak8%r;^I_(YJ*4XAW5pbw+#dt3(s2=DzPt%VF^aqXtGHmC zUqVl^KD9&IdFp_|dl#(28_Qn;CLPg#!}aG4IXC=yIYhtovb%o5XBPUMn2_~jdvhgc z0mgUOS6 zVlEy+Twq>=8#p-*ZUAxCn`W4i1 zSSh!0JRc@>{KoNU{H>H*G8bd|y8v@@@oIeB=B8$UdRR7f->BMML&7&KDYGzd4P{h& zBMv%+0z^e;N4|r_JhpKV>VkR{I6o+aA2JqLsDSln3ilW=8W)3=u=?!tRHpIArz!k- z7zJM#_osRd#acIVsH^!(Ik+e>X_wee^N?x3514fDPF@(chR^ z9E78^J6|g<4=%>1#8uWd+Wu}dy zP1KMeA|o2h2z(cd#{NJ)`BgAKLFF-g+yI1a!NophO95q(3e$x93Qzhd{^ zX`ATBET~gLa^zEwH~zu@;BWm{6T~B&CGHB%#@i89%0>yVCdk|4DIS<8FW8vGq#c8t zV*Mz-A()z9|1v+JIBL-_U6s71i?j({C$ak;UkuFSHZiFT!HO><#ld+6~ z5BlH;YRW5szx#$jf(}$9Du@EdzX2}Vh=~d#N)o+c96KhInh+SY*P5XDu8Uup!X#rs zqBO(kY)&S++r$gU^MnZxU;JkxziJaXtko8NT=bV6Y)ATqe!x~yj$LUfCm>sX76A6P zA!+)?2T_%I0w^1YT=%VYlkPsqq8;U?@Z@4uGH~aW$L_GOgRn!h`NTVByWN>vMg+!NE$680yfgAo??G`WN_4G-i5_%b3$07L$Vj*1gLjbSZ zuV7oEH)Sd3odA#GuV7kWz}LL_tvbqzChvR;lefYSAs!1FQrY`=?%tBu*NE&z&)PY?*8UlNSdLpOl}mYF;{o0a`2 z9`NtqY_~PJKHbnIH1!?qqPGbD(NbrVqrH1yr1X z-`PG{?lDL@_38o;=~5?b&w`6`&G%GT2jqb?2qq<6ya4jz;muOj(QcBdm+hf)>Q~V( z4etpxUkn3%C?0N(#6^4kvjocDv)=RodXvroitBp)gD)G|*AD211K;riR>oo=yi>E{0T0pFR= zu@;^sa}k!0!Lgqwm?xzz z>xS+dtc92QW0&%cu1%#E0Dk2wO>#y1g;Qto?xUTBUz7oN&|mpmsq#Pjl8IG9OkF4~huKQ7|4;YZ6W z(RqU^I&J`9y?u6e(k{=q5kRe2lm0uqie(G-5!%ln+j|wYdp^UiIUmtCnxAQ|_ZvA! zvgEF1O&pI^Df`;|YQL+hUU4+H3Rd@@p<5q?jc$PbmxfoMYy~b*{5l!s=fF^oc)7^hllOx;JDp-a94aT+n1mH)+cB_dhkGpgzNV7*|YZbmtVDK z&t9~>J9pZ{hj;vH{GFZc_WZ@`_Q!wnQ|UeTev)tT^LowS`_8x8-fY(P4-RqBXtQ*D zeR$lSz2vp_TOLo`Ku#NDKYY=)w>Le;=9}~O@Z*pCWW1k--`Z+-?%iph|MIi;)z?qj zY)kUQmzy0h|-~R6R+kgDu`;Xdx@b~_M_BZ~`ztjGWzx(gCfBV1x zAGW{sZ~Zsh2OoT^ooS7BBD&xH=Ck(f=?nS9Y5T>`f7<@ezw_T{li&HUZK=()W4amp z_*buAx1as=r|pM-@P}>ZyWeg{&tJ5y>2>>ypZrO?|Ivr-Ti^b)war;Om%VTR`p zzASfG3J&ofYObt0`NHg7JbY$fk9oQdZ^k#=;V+GN(bU0 zE-=Wuh@U$C0hGfFc#rvml^4*EXBA>wY6KQJyRhufG=Q6yOMm8t3 znbYEi7!->+O*qKr7>XEQ=-^m^uk$ouoKSaDXq#d`Ed~dh0X`9;5IZ^HEdAzGXII5( zmxx7648Vi878oz=6#XV34_XC7;Z&XCP7dY#q(8k6z4M~yER1lN~eznPC%SLb4ve8S1{g4&n^_r zJD|>YEa)aEJ5(?bCtr)FU2*=^b;ix=Fp_jH+Eq~cCt=stw2RBDOmQWZi1l%ue?gW? zb6yLc127xD%pvd|bEuem~4udlEHw@@a7qQQo+eeC~`k%PBbM#m2SEHT5L#0OVRMGKf z`52_~CYXK!Ql=S-Qu>FuShJZ|)}c)3g1m^$jrEfKnlY2MB%+<-)$fY&X^n&#>dupY{H@@p17yJ19XLO0iT>GiZ6pWDAY;X)C*HS!u3CRgznTy41H^TqX164sm%U^mM{^}_PF6QRO8~luc^ro zxgo&)#hV$DgQSN&un#;!MeA!vMP~*3E^5V(<$?hZ8h~1LK^>lwCMNhU=diF1c0+f@ z5Mu$ECK7(+&03N7yaH_FKCKG#RLmR*p3s5}9u$zFO=zb6(_GD7S>`YepAA4i0NJ>b zhp+X@Ix(ypAi}o76Jq#Tt+^u)M4b9rnXt9ns4*CJ1cy5MkO&(Ddhn;b?>_*&eM1hm zr(KjuAG9JBMbJ?aW!Rv2idqqNL_YI}dMa-yx$^fqln`>Y!#8FrNj^B@7x+W+;v5EHi5KNg!u7|R`k6K3ZSM~IVp{!RrO2040caFi5&UDyvgL~`LD z9H%Ti?(kusDl;ycQoi011^e6Rnl49;W}QZSQ4** z8~zgzw#y7WIy~ZPJCH~#`1CNO5xlGYqnH`M{YVB@OZ@chI`sV)Q2JDU1UK4S{-i1! zCiqppLeH;{t32Z5`wiQ_65eU|0*(#6o}mk{oc~R{v{y&AM1OFX^@}0#HShA*moLIf zy;5F`XE?0tzh0lvKQrC<3p)(^ZxO0sS*)<&7lO_X0A;J_|MCzzmfiu;HuA$Z4*KE- zl)XytQ9C*)X2Y0(d>4#hu`mzUUAlu#p7ixGr}&PTF6gDYF5mZ`;Vt@?T>{4OfSx(F zie0Y!!f|;NSCxVCj`+P`*ni7tyFRVsqe7hMBE2A7MN7t?q47*~+Qj(f@91=9fbCKlOF& zPAK}|p@3Z0d&2XHNY?1N-eeuly3!kM@$+WJ7c+R`I$!gz_2|ZIQ-P=dS)cN)ZZ^*s zqGKbbG~Z5FiVOK{uHDu+gJI3M*w4upF2-J-oVJtw!*+Uf*iLu?_|1Mhdi|!I9_+WX z<6~d6zdAd2`r_oQot>Sw^V1VqbxrZPH|HB`(m@;giFD;;3>R;)rJB?>)DBKS@H+BK z^EqwDM`!Kg_`pvT`$h-maqkymUC)MiHr*7TnaZ`U77s>w#I?>>z>Q=o-c%qa&VcXl z@Rjdjck`DV{B_VD&btoM=s(zaz2~tK<-wW?@kbOwlYKyDl82Fo`5Qnvka&e(`3{P9 z6nFSz7~?Mid;u_zOj}RoYzVd2#k4oZ6Zx_g?lo(F_7 z$ybEhe8Te$Cvp-t)Aa8KZ@5sOUrM)2`5E;OA3kUwf9qRqy0z2JGFt%cgwCf2MeAd$Y}E6Uo19Cw_|pC%I%-*@W{5 z^KEV((wGu2o?!C$7cM-wL1C^rWy;=Gyt&ZN{+E+f%t4fKg7r*||NQeO?bpBlZTpL# z{Ij-pe$`Gd&)ZM`#h%;gtNnxa{EM&K zAN{SrDL+#GNM~-6ScEud!kpmG*@2kgJ^uj0kK_1ZS0!}&IZB84E?9`ckM&-Z`8WNa z${1EX{!(zU5lIMQzPyP))J4#Z`}beJ+>9#_jp(|3!I(a5TsC#uv{(tYW17k8uo-&JYQ`UO^jq z*4Z(KRlfL*fpJY;$qgR8oai?nKMaAp;4`DR>K5BVns`x<4yA*_Q3X18dZ+#HZSx|J zo=TC;yi>+oG5?TfY*yTXnGE7pFl_tVpzKw+C2LGeeh^&EPaXeLf0qN*o`R}ajBTbN ztQdbhog5dS#(c&A9mf;mvNgC-Uh*8dnDS*X;-B;9O`E2@Qa9e*96Cn6he(IN${&R0 zV^$47U-*jSAi$-eK&oTkx$w2Gq`zRH$WPU3{mCU1?>4pO=7M|7>+l5?ev&7Qc*}HL zdtps!@xI_ETrrF_M8*md5oz)DR>5i39bAa#f<8B=BUjXkbD{M%H@WLyg<3~*K^Yv$ z=HlKOH!7%#da;u}@KimYk>lLq*LCD(MWwEuR4ay$XF zmp!b6`5GED_>spG0dC4me%Lz7D816YfIekS9(^8T*w<@lzj-My{Uu!yl>oV@^7rIv ztbV{^3F1m{5}z9|@on$D=@aZGI=GY#;$pL?+-K;)>LAYh)cjci$@mZtHH5FBCwS2| zP|S;Vix-kGuG@$S;0u-3gWh|)(W*jU{g-?DEe9$HnUo=eJZ%+M%MhM?)P02x@=HJP zOWH)yesFPF`=@JME8N&CiCvT`Z)x}u1eB@&Z5Qcb9GDs4OF8mMhhKyblr7-r)hCJu zZ{jO7ifi2y4Q7e7ba=fF3}V%a>Cy_q(~>OtQ=W<+xo3P-uO@d zhyUh}rO+c9OXIj}$KTW=k6`#r!V+$%%DN}}ab8s+6Ipd504LZyu%T1lO7W_CIkPuj z=E0pwaODJ{z>W$N9G=e81|G#x4Ch60{c(qv=6HVwgh}Z7oGSE|fu(zVlMow<1|7wk zlW=xuIa$aA2V)0M3~nCe#h;S#}+H5EC=%3Id7z&MY>_m~$ zQKx?^ruO9Bn^8y5Z_?s0{CaXvQaBbKcX^?EdqA&Gp>VFD_Y>FUrcY!i_EMt??u09^Wpc&J~GQTf2>c>~q2l*hNy6e}Mwby4ro z*(2yGuJo1aNqJ($kyH{g3qpUuBo0-tm^<$}`IMDx$`s(Qf|6IzDcS<*ybMOhxBGyb@-(U7Vah<>Z)K_W;h51k*@Wlg8~l51N=^Z>iEgvNI%#%=Mn&! zj@d|(uHfRRa|{a;h3rrh-TO8~#k<;{`gNfo^BwIf!2gNCBNUHuUa3y0Aq;INSk-^f zLS8ZRw{g^eHg(9ejB1k0umzcdOtW#p_MJTSKKRUC;CsOaZU&{VR`u z^x{=!GU`H(7gt$tTJ?M6d-^`TNFOlLv%{_YG8mS}I1jzarybzqasMtDw!hM%GbzXG z@((~bq`UT$ulcq@BD=27_dFuqhp<=mactmJ#+ag9I)$ABr;Fk1_Vx7!%LFQZm;DEA z?#cuUe8zNH2Whv)<{Vo!aDE)z|013Ys1N{2- z#>sE|Cc;o&Usm}FqvPRV`~?i{36QTe3^LB(6Bf(`$Z{Pr1+2rI&;D~7$V7)5`Z%ww zJXZOt6QG~l0TB1}wkH=rSx>6};%O2f#&orF*xqZ1#Ks#I<{IhAaAQ5n$rDvH-ZmvI zz;-z&b$HTZva!|HXPdsL;n)06*}QR)PV&0;ieH}Hh_S(m4>oqd&WJ(KKd(}q3iGurOCOenaCeF9ry2>QeQDk8=m zeCQL^MqkkbVCCX1+ald zCl0^XK94?D8GEicsiPG+`>pAEJ3TvXZ(hD`8}mu~=#!7ygAX3IwzJ#X*+o0rf7K2Y zPL3|x=6tj5?e4@0yvvJrdU4f`5B7c2$cOhIwymx0c6hkoKL7ml_T(afgpSP*T@!7?B`}EUK+XoNtXguv}Z0)zROSS7c z`mMLUy)D0K;OOL1Iv%%k=`YLJZ_hLyPBgx_iDqw4Z8$z?FJHZCd$(@4t32@ zZd~9ZEd9s&ojE8%%rhQ3^6i8D!*(S5`-X_4!}jdyQ`ust-P@bB|KtDef7|}#PyT7! zo~^eJzxUm?{q0ZNw$>Vd{%1dF2Pe!iFWd3SLAxbAc%pxP=Z@sh+pgL+Re$g8?X}N; z{oD5EKl#&k|IWkq2fzP=_Tu@=_N%}6S-b!6Uc3L%M`|DQ0P`+mUgJbApPHY}5_PNq6HMIg8am&T+yp2#8F}a`W?Eyu0|J*PlVaYX^h#=w11GQm-=HOHGmkS1 z`I9W*dGDk8D;*2)7oT)hzsUDCHa6S(eAebW+ii1et8MLVww-&o+Sa{2`Sjf9y71+R z>ZV)sHs8I~rnm35nfzjHcekx=?==y&rt!KaKI`)L4dvIjZz=35Z0p##+on6W+6+3; z%=T`zP1VC|Q{!}V_fFfQUU=(s<{J4gCz8}jOkZ)l)Nw{*k(2u22dZXF*jH4LeH)*y z0Vj~^xPEh84k&hVkXN5ZQP|!6C4N0*|G~d2E?na1zXf^#TLsxYs`+_4j_%H0Y2(9y*n|%5}@>dphAFC)2^BuWMJV z6|-E>o63lTP<+*{LU&b+^XjH(f3>fW7GZ(D^dIU3ba~_^!2%9+$c=SEzgHk6je~_4 zLZ{G;@~}&_tMX+}*~Q{0GlAp4srC)i=ztN5A2IczOYjnyBr+!V{;hOPFt0o&bUcUw z+1KNeG(ViPM`!fF_Rv~Lh5=tg*bOkA^$nV6hpRG;9C$_{o?C7w9S4>Y1rMDIH=Nbn0du>i+Eh>&uc8{>+3T* z7P4OV^&Yi_G^Yf-E=3fO?BK`z`0`#nXjZc}H13qtyX6?t#191@$eP|bXS_Pf>ag)F$bsF0` zKC+(i1!(oL^|*`~+;0hx|HrsRS4uIS0KEk0!G&I5Oy_gi(uO@TYiZxKa1J5#%vWaO ze&XhNr^9kes!^mQh&4IZrAE8sEXTb3M0xp9H@EN?N`?sO8EYGK!(ghDt)eZojW;X7 zBlvh#4KMwpigk6*Vk4#TH9+-TL$gX0$q)SrH|vpY;V+aBPw3NcNa33*_l^7pa8;(i zXiL(HnCAEf1a8t)9#8Yl=Jc;nxdusD&nd0LCFM|ZzS6{;2@SU zC8(HsDzF*k_RY?I`b9RnuW?QV|K@=U=)+bTnW|QSbtm8#p zZ=S#E)JC@Q?pr!Y5q@S{hn~D44+o2D-=VJo{u#<1#C3i-WFS{G7U!mTBVhD_3Q%9MG<>}ApZ$;j zTR+x>@W{km*oV+3MdZ44A}7y0_+rw);iDrk1`Z!|c6P^{Y@?hLDnaK#wmy{-Y5w+t4`-Tnm9^yaY4yatfnzY?sIaN+_Y6HnPQ`~$GR zCEo>G3C8j}c^+q!gI|T&=3!s~Xi!@?xjf?E#f+V}OCj=x|Z-_Ay#n$u8+Arvk63;k4poRrbY5mZ^_i?{QMj7?-OavhA@-{~a# znhd(g8^CvY1^o>3@gsIwQ0?g2QhrkT(6>{QA7b=yTt%J<6>jS9{_5HhU_6!M9~B2w zoRD;}pb!k&zdRn%kD;Sq4&9=lgN4=%^j$3PVY&Xx`tL?Ou#+z`I6vT5dB+*3E)>Na z6;!J3l^hbr2Q}g@;E(bF@W}R1%xk?9B3ng2`(zG5a2=Q(bI7$iY- zGxh?^se+KZC^G`l5R$#yR!Pr?-Cd*raixLES6SbF2l~TpDg9}ra99W`RdhA)cI|dh zy6n-Zka#O0=^1ccg9OE=7=hN+6DJ-1NzVPBcrMS_`U5UdeIBnW-?iVJ8A-tHf^Tf_ zgCyyQohF;~E+Z~nl~-9e7xFX6+peU+TM<&fZv1uS&L~8^Sv!Qj__52xJ^(xw7-yi5 z$Da$6m12%l)R{rjbvO!@g?BgJDF8wr)^nk2#VRv?r%>=J{7zBn@1n@pxVHDHobeK( zC{(y+%~pB-Ekq&tb@Gg~5XZcx^|jZeLC^4I{aEy2cO573Pu7XhX^lym8*ELaHu5w> z)x(Q#rTa<5`F5M|1pG|#*0$E3JncV^wWo0X%|R`Id1AmPM6@pFDg8?>$n$jn1y2>9 zw(}#URd#ZC(9XDUe{j^^ynN{kaVJMd?c_~7`Oo^8wa{E^+4+>GiRZq+IMH!`gL1ET zrSnDHI?0pAe$D^oCA*Y%adPe(0eFI0tK@dAxT$SE8K`3tExhKF0<(3sZBvo>svkGB ze&!~E{a3Hr#qn{QYVFO1V6Vkxy7M#HXSUI%vc+tB$0wQauQm2HSc3Tlz3^SdY(hC^ zMqflf8Q&TRPyOMID;Ge-!F}l(=mMq60$*tb4y!Ij4!)#uA1^I0DHj? z_omIQEI)agsrpO8ernz$c2-|L5`%PI&ROPIb@awf2F+ zyLInQyMO(jQqx#@?N<$UeM>w~s`bfED#Z+pAj?fl}Ty?p(uZSTyb@BQ}4!w2o`^1Qu%`KIjv z;Xoe0-oDfJW*R}V-%Pwu56{{g>AJhOCta`FvuCf`!2#dc=O!ENPMJ?M4%u&B@RYm8 z+^xM`*`AYW8U`8{7qS;)YBIZa9-Ns1Hw(-(PB>?Aa-uP>1UFaA=d-qbYtP@}XIydO z<6QFsduPTsZwrW%y6Pj@f{W|xqTP@krs^loCxnlOU$WQbhMcYWZo7Z)o^OueQvpBu z$xqsoufA&E`~DBwgNGk`|Gp*KZSmQG& zQT}$D>}<9T(X7uoe?)u5S^k5c`HePPTWv%0{MzPLTbpW*=WP@?KS9SHZUVx|-AA0b z=7jl$>f~>pvlsw?A7xDaHP_g#G-^Zb*VvtEJj`}>HILkCn>#wL2uE%xzPwR`lanBe zl;e!bITyuThE1u{h#eeb=n0Jz*3&tk+OGU+yt!Ve#GDwdc`^D&Nc`CM5-hv&AY{y( z4y^&%g6el)_#w`G?E{NWGF;R7gTS=ZktulzojdCCj`KHEm;&&HP`O7k^q(qbt`T^+LpZvi!6n*+j z*YA`oU`J>=b{(N1XoH8VoToLb@DvOia}e_6h>9WWX%)P?+l&^OA^HS8I{ky7(jhfYQB zf8Dt~awOmbf2UOk9rBf+3(5BN&Nu-_*Htloq&mvuS5lK7Pg#RQjf#Swa_r+^7w2gM z{LrELgYuFY*dfv7AR0hG$*@7#0$Yat3!)!XD`I5#{a82^GrxliA3@R$CHA;WAlqd+ zr8^yledId* z#|_@%?GDhMPJHn-Sxx&k?capUI+mNpy%(WQ$0XKp1Ycm+S^|nVyvO>3y#N=_t%-QU zoeS^2z)w!cdiD($7ur{xvz8S;hh~jK;{HYetr4^)@C9tdv0k$3Tn{6TYfJGea}XFhO{uN*r0dWW#ZkM#^X z#$3TT6rKG<>oND8^kiShUkDa%@W$@39%4-ogaT465Q<`#a#-=QuK(A`r zjlSS9{-leqt*KJ-%%<95`PAdz_*ATB%}?^Rv-G;n^MnN86ZSpQ<{_Pl%?F#v4yvFG zdEp1C8$mKEX$G~dLM)TlhhNiWy2Do2wqm)gfYIl?-BB%9p%{O3tmk2|OyQ|LSh?gP9lOR*9U?Lh#xJ(o= za71hO3(`={zS#QW@Wm8{-?|{-d@gRtm;UZ6@}x<|V2cf{aBVYqhy0EoV#s2TPQC)|?d(8Vw!7*( zeF{>37r;yA3c)`@*H-vij%GNX zpfGsGN#v04llb;kd`-o+Dd=)ewGF?FHYscQ&#gJn|knRx7y33$au7WD_k)?B-h#0kM_7<0%(gj zj-gZm^liyW`GDI;f*<%q?8U8(K^Z`MpjEwSd_$aMd%n)`7=hB zdHTwx4isKOTLCu?@G0(Vof(}>A z>0Gc>4tww$^9RRA){Ydx6ZY_$ig&D2xKIwitIOnf#Z!>Hws^NqCVOo%<%!r^jo16H zuk*CE*5GDzCcdm)pyf%3OPQUVbkn+;P2%O*MLT(O(oXjY$L-+tK|6kP&`w?-`C{qu zo1=E3c%8n^4f8}Q?Cxx}&COZcp7R=TRq<_PExu>7X*)VPX-5YKzRt3tu^spRUizZH zUwy9fs|)FN#`<6VE**JGz}5M2+faUOP5TD<$MyM{*44CGR#Ka|XgQy5NFQ!eIBGAS zJ#AN~7izy`aKUK0*RD=Pzp*L4vf^~7%{EnbDOnQQeH-%PT>QeH*e|f&@tWs)Q!)KT zgQ7lekQS2d8T8>x`d9xca_qICeS#9vyNqLzf{z{egJj@S^r7W5?~*P=5U8#X&oKvv1$t+rHJdw$c02K6Y_= zDxO#3z20u^-jcjEpCo+p_;Gvo_$&A0y7tJo@7!she&=`EeCw7JhrJK zzS2AeKjSI%ZOOfspUR$djfd%Uy=`w^xATkB_T=UBc6PxYVF0a-E&hn{5QW*zfaqbe)t28;f*%kZ@kljTO1M7x0+xl&s-PW;IX1)$1YZTlmB%1s zEAnyN0%FWLPr1grpR9E~yy26vWisxW|Cpl~d%2fVpI>_Z5f1jFjXEI2Ij5P#Xbi72 z|Dq$uwe@y+rFmWcWBJGw$;Q@B+uD8DcJF`EHXnY{rhE6=hQ@#nDyrnoFld4v3Ktsh zXXoweOuDMgjL)Y2caSWvA2~@Zo2+SUT*=p({I@Cg$$Oks$3J}bO66CY1BkB~x13Dp z7^(4etz*mexsKJ?#t*`Xj<#~Z!w!nqrrSEEY_;t>ciYZxycNJVuwfldVx05Z&@q_T z!s1`(#J7ZbV@9nDVtj1+H9R?7Jmo$Sx;uLRN4uj*6VKKWt8pnymEYbRd;`4wBC)T07L#Q2Kp{01)k z!^gwW)yE!0m`BR6JZz(=^hb^v$|gQGOMk7uXhY}>eYRyR-#`W)Bx613>MA34yD1GG zp^NCtz77-yWlvi=mc1*lsOz^Z16+o@AwLD%iL#Q1J;DwSDvN*(!;aB@=&J3dJM=lu zOP(-;U!-q@>?aFDzQ;qJdnsKRAFL0szt=L#d%jd%wV$;eX-cU}nc?fqM9=|@hvH2v zzNGO9_;$KMCSAM+f=9N;F*b^Jjr*@lcXl1}B%n8tagDF+znHg4XW$A^%A_jV55QM> z`?dJGO~~YYK+T`ZN_Q`VutVY4|3%;Bi>K_)ya_FO1p^0v3YRvE&aAx7gO2^bcqt#} z1OUbuyt#OVp7;%N&^6_{%>wrsY4KO=V%nN=mC3-|AXkDnLUAj4qJbERqiJ2qAU*a4gz8~^Edg*)fLsxj{7{FSEd8>R9!IHtbR&KcW zP2a8qkdrn*p7IX1EBQsc!_R95>p_?)^kXmra}%`mH8*0?2I(XlRQuu8k3r>19RPjU zb1{C+2Ys+90iP;6lg@erG}TwgL3coQl}vm&F493md$B3wuXqmmt8JEv?xH~iea0K1 zA;*p^+_8^rNq_oysJtIKRVvhD4=4MGh;ojFe)=ArpqGDl7qjVq>4}cKz3qa&6`k9G zJopO_D+WC_RlWD7T4!EdYON{de12)BHM`c!r&>qDQ^(u%W_z1^9`q1NEf%O+}oQ9|KA{MHjFqQz02ec~Wu(puC zvMbGTUt?QtbinVRfnKr%cbPm-_|8p*!9*r zdGXY5Su(%7nw*ML*dd^ff%BH%3Jh z>E?C6c+);VaBF_)v*wGB5i9HYkv@hub16QjNNZ8|qV=NB0zV!eM@xL0a`aKVSjWPr z^w!vh&ic_l(^cCeA3lNI*jGpn#|g>tdR@Hy`AW&xzj6b4qw){iYV9{i@&FF2fBd$P$ z*+MYdz>5?c6QyiaIxOFZm4_Uz#c( z`g4+kwn{mR@yRA<(NsL>bsIW#a0fAwpuJo{40gyT0XOyqL1u@?s_bKhW7Hlza^y3>(oT>xwb zM7v`_M%%Ff4oQF)0}DvJE-pUGNcC7`WS(DipdT{s0lNx1{ECa;s66p{)4>yd1Nn}^ zmIaG4fyI2-W8f!Uq5%8~)P>v%Q63QaY?sQTpF_VJdZ4Fi3!f#Zpe9r%L?#s%mYkst z`*_IP{iQrb^wqMWq65525+5cugjL&XW5orLenJJO+J@mm%bpR~)+{|v=z zm>STmgC_DZo{1yG!92PHl?5J-&tPz z5V^?Q6rlu=^&@-r{1fj+dBrKGi;M3_zOwFT{zy{*Drgl93mJjvJK4kXf^pe6U*oCD z$8kcZXW=Ml(W7xmWu0G&oMS=sPo!X8v{=_ z*fa6iWHVpz>8|;l?-ozmbbGH&XIt9JOjWjRd@la8myA2~wXJ5OtMxdqGUTmXJUxAV z#?ug7Ou6)T#(9mAT)HL$!NYJk@T^Exs98 zah-&NodH`olLbj%S^JyoDYwu*ZJpq&Yyk={z%Wl5exyqd3ILfWc2W7g;*{YvDf*|r z-g9-jDVD7Rk9_eT@>QSX*J>kF-UEpxAzuAQsrE_gA1)%2mz#MX#Ysb+x}VSEb@l97 z+0(+E{O(?S)0>liyawM-_p9G&%S?T{y|vXgdFohu!^`tHhO;+4m%s89{MP2SXlzy9BC<%Fhe;5YNh=h=tyU3B^ayG%Ex8b`aaFi6qZ-9U09;D z-~^uQ@IC43lkO@3ctZtqsn6>u?e;464PvI19p#f2sXC5Vo4fHgn)S)H(zn~r-bZcc z-UphOcZHc6q9P*Ky(6H^eY6 zO?-3Ax!S?oHP$YUH4bGnPTUD!ir73FSZc$j-v)v&`COQaIlqJNbK#%)hM{RY+1IeA zSZFM8A%ZlXM}6!IV9c?bI_$18PEuB15q15X;4*@Kh-)5qo`O10BtQq#yMr7|Qeb=q z^{2~?!udbq?C&v%AAZ}{RV;gS?F-!_-}6}Hm+ex&P+nuKKQFQZ((ig0DC0%ClF zUHpq~l<(-PejFF{`Y%H9Ta@4TN7Tf>c(^l*-q><)Fb> zfz3TPK_w5vM${HQG4+~PsW+Nyps)5NNM6*#lYHn;CiICokp$vrQ5i3se+5WDunIcF zgvk3?PP?oG{FG1Agsh^AxMaF6``%SaADLxw)o0w{SDgI~-zA~vQ_$xl#eA%^jOM4U zlW%r~>SDjc{*eBQ3%j&i8VJ>A(M=+GN}uy)9_oa5p|vLa1FNAmBQ6y;0cgEwKZGV2 z#f7wN6BqHMfOi8D$~v8G3ge&k2R8`wlz$v!b*vQ+xLxxdfA+*WR&z5dpBL~=0LbI* zK)H?ybTGx;l z5VG7+UBCA?O@$BAUeS3%#9nxx{;(4qQ81T}wm+rFq79NC?W=s~5`Giq(XXL*Un-zn z#YXjy>P#!G@H5lFH}r5buq%5KS|Hr=cl|^@>5Cs}*W5m0PCFz8ARrc0nEju-Hw4vpM69)H*Ee&Wpk{ij!KMrbKT<0L_flp&q#vta;Dj>KT zWv8Y?u+U}n34uWd4*g+VXmB3i&m*3k1PO<81DOCs?7NT@JV~gWI&UOlOnr7pJawZo z=`Z0@J+z~z+#(N7z@_>{5C;dNSU2wW;>i(Ce8!zefm`5%vm5W;k6^w8Aj_o!Os|+k zaT4icRP>Zy(8RYzrI(uGSN~Jai4gS(`uk23wZ;ABSY>V>|MX={4$&vdIxw68*$Oi1 z3_+Kbf-WJb^Z=LU_(lPgR=z{8b`lSRptEnrC;*{tSPc3Y+eRq41Nn}MU%@kA>MWHc3$8+^h|~>eYqpIE862CA@my#%HW^<=U?WFzON## zJTW21UkAY(sB-@n4v|86EK%2YS=7Y^A;_nKGAp zK+SX|(6cX+{yB!h_1LE}=dZ~05INPkI_pY*bdi%V|Mg_0s{kApcWJZ!M1A=~2Gjay z5CfGS>DJjJ!H%lBLhE3RPn_wlY$&gw@CNw9{Am8{Vmq~x6{>FFGXWz-O&9$U{!9XX z!lRD)*Tsr$Ix+-c7_ zaS|u(p|-OA98x%qJj<*a;V2I-ZE-AH)S4G?Q&fjK{6Ou^hGf(RgSAVX^tke?@Tc>g zwl!i?cH>^r-ywSQ}00cA>Sk^*n16 zHD)?pYv-q@?eNupJ32b_Mc3oQBVUx<;%#U1tv1=1wk^qV8_zG*uH*Lgmyg@g!I9V3 z+mh{9$!~A9>$!}qew@y^5tk=`F)kN7wVso$I6*}BU3k+=h?$r^Lch2PgFLvdcAoxm zA5o|fcZgRX;{)Ksj}tJlxsElN02JVAFxIM!^(UFa9gaU_knid!3V$JW689U~szs;?kcKdET-OY)<)3amw-(fqG@8UP;IG@ehlndTI`4>9c z_V|l}{cL;IPUM469)Bs7uY8ewzP07o^54CCr`^4OFZOCWew>}1wzJdo_U!o!pB#Dc z@UEY%=fnvY{kQn8_)L3iZk&<5j*iaS)0Z#(S%STt-L|{8sd4r~`ds;Q1o!Ukw3+yF z1Hj>d#>QmU_BOd;9J8JJZ3b9lw!71A?c8bmhsW(eKF-?@jx^RhM@ZI8ys+RF-{s%n zb>xh5nN01q>T2@(1esHfE6oZjn{G&mI*7LeaPglD<7`d2SjpQ)@LBe>w4HMb(>2~K zB7V#@^rLyNx5;$fH%st&fY=+u8yao^P+Qg~(u_IoS~fisf5sbc9yn{KvQN`~YX9V@ zeJPthfBm9;@!7B1=fC(D?Zdmf?a`wT+K2!3f4zP0;e&SP_MP_s_<#MQ_KQFNS=&?k zhd=y5?5(+}p5De!X6xI zkB=@Yw!#x{$O=pZL>%?-Vhp(;elqqLReb>E6i^vuK0XQQ0*8_wylsJVOTLi;zHW!? z$2iw<3ZI0#TuMG;let{C_e?Hxu4@3pWxt(KV$Kd6Z`0+@xjey zYdS1(gTa-KFI?o85RLyUl_`I5jvK1!mp-O&u8B7LlLcGzwtjKePF_nd9X~HMw|HrT z?}xW>}I&8(cIlrnHUlR>(hEMpo<2I3par7G77fq83&Ppbd^~rmlaOyU- zS=Y5UVdeh4LP4d6@(Cq#6;QuSpMo*Hd;p&jI=1uLp=;N;OkucxP{g$9IT8j82s0YA zd7=IE4+KE+5X40S;o;XQXA!Rt)z6mW08o}mz-#=n8*>x1j4>fG0l;fWZr;Nfh;*Dk zlU&caw35{{da5pkh95bVF;F^<{b$i1icbX6w6V&CU^P+~XdTE?xeTZyl-$Uh7wPOj zS`uZ-B8&MOzG?pmg4DP4p{+wY{W?wDw}HIotx_TRF$bpPC?-av=Nu(7hCzQ$116EO zn`r)W2{>?bVSxF&9Rv#W<4JmuSN}zZh_+Byie|Y1e-UjqB3y|2#s0eq(bh#?kXAXN zhwA$PWWm3ukEt%{C~|+0R;CkSq(Ka1+H!%4tF4RB>0lWV=&ev7{b$ey`o*T@Pd4|M zkgfbiT7t%)w^k~PYdj*8XuB9LsC5^7SOY@MI+~jSyaq525LFJD;yMq**!G24{gpR%WE=Lp_%G#fSEcRh^{DEE5xPNZ zhJt?j~8T^wyfw;|aEm7njoxwNy#>k+1ohJbv^~KM~_oR-Z8> zg5mWew@+x^bHi47^Mpoa)qbkYmOO2<4q3O(8@YlWG4Vrv#S!SY0HyFDe9C>s`bnkY z5w=5bXrzm90x$@HUvNSXzexeRVhhpw<`-xYbYWC>m7Nn#iG8vqYW$$ACyrN3dF z8vy=ic>{ndAod0pCZ1f_a4WDmQ@uCyOhm>}ECqenb}rc=uq6;!Oo84JRY3Nct9%nj%JJt4wH}R9SL|1sPuwuP8fD9RB~L6<{*Fx&|7Ib2!S@N z0H;-f??fqF=u!^@Y3nS#a2C^)g9vjwdKM0Id153)%7F=XP>Xu`SRCjieKGnX8y58r}ZX$u;F1sk=`SP*Y^d z(*>Y-kN|}+jti3Sh`$>Nbm-%HrEI(GkAeKi59}|9vSkqEUA*w=^s2hThJrL1v0l<9QW zUd9hC=DC3lH@|3-iJd>8S8N7eOUAI;*3^h;AZc_XL^^#&|3QC#gw`;Nzn$~qB@ zsX_ZeiSZHGNuflhGG6#iUd|<2Kvk5miVF&Pq~F2`bjbDVv7hyBlS0lv_+LQrukkmQ z`=0Vw*uU@Jf^j}@O1*+8yNSDu-tgY@Pr|VO4Aq{uLyeoUZe9MZ_5-K#Y3~kl91Zl2 zY~_3KLHvfCyaN_L@OU2AkD=rz|7HD0RrwKm7=?V+5$tcJEB{VK-BK7-AafX^a7CO@ zcpZMkI${g6a0@3RkAs4quT$j*Zl~xdq^+Ghl$-?cfLtth{D%GqN!M=`C%CPW%_2rs zI{G?)!yiWN>!J9)WBkFhtLoE(gg|M0rgVjeeCPdO$VWi?ePFBdaa@pkcJ?Y*A-CgK zkonq>01G%(SaI1|KkmE4WqwBm+6!vUpH+V&E=ayN`bYhTExY+uu}Q0*_DPG5UF54Q z0>@=SK-7*QN@~98rSTUKb<6>*D;XClOKU@()@2>(Cum8tW}M90go}5RxtcL+8`DkU z%tRk+e0cZ?2W`OF)Lrr<{V7lP@3-UqgZBExt9Jb6Q0bHQ)fZp2mtQ?;Uw-~Yd-3#n z<7s#ZQ9?7vanx-YnLp^>^AcHH*%_S(Y-58GTm`g(ug^V)oK%NNbL5#~taZePB=xxL*sC+u0T z+M#%I!iD`LU2tFYZw?N9qs`IbX*>5x&G-xeHevtIX9IwfV~u~=V9s~TXPcN+|5t6Q z@zL$2F5%7h%e7b4ANJ!D3%uO}`|!qv8S{_mHpB~iSQq)Sy+iG0e&VeSz5xNg@~H_I z{+X+!wBHJ#fQ)z(23~cov5j6c-;ltkJN$+Mjd9+lfP6Kl{p3&otM=Ppf6=y2&f8qR z^xf}#yWP39+kW;JKWqChkJ>MO_S5$L-~ZG%05M_58MCs z|MHL9lg~eIfA~j#RdeR7ot&Jcp8?o`a~Aq_nL^s8{4~aS=>4|iZqE~dM}X-@U^6-4 ztYAnBR0RVC*~Db)1Z3%xc+$`HL=GCW)De5Vl=bS5P_yJw2rp2ACxJFoj!$@QLl5`0 z!iM}yKETbKb^gaE%*7bq{?rRTD4p@OHTm9@&u{ELY}-3`+HB`G6S{`Txu2-NRDWE_ z_qo`Aetgibj#Z}P)|&i!%CSXEIiAtPseX8~%fz3!p?@5W9Z1ASbC8cK zqSKf?Ja}U~K8?XiUv93$?tE$!y=Iz2st>W{6`#ydnv>aH3uHeszs30>Xo*E8pdW2i zHO_tGS3Ofv{g-ic??G$t(N< z^rbHBwhY2f!ovY-e2hUng1*{4hN4?$KPOhi1*`P0<7mnKCh`fZ@W(LLC%_kNMu4>z z(Bs5cs5rLqu>bhS*4VgHbEIG5cYW3&_7L+Fe8@yu=`e||`i2<*PW&$FKaPtG@L^B* z6ve|flre+%`S2B>;sV+sOetnJfXd2_@m2x2%iwMFL;gWuc6H!SMq2FOaa@pm2JH*F z#r`XZx@C}da0-B*c-gqbloxN6F>cET6^Q8r{uo!Wk5Ijil^nC#Kb~uk8Ql3?7B}dS z)v-~>d|$-Vv9M8D=0hE88TfKjK=;53vc%i?*dPEN-LcUCE$wD6Ugg0v_=Fse`>qE?#l;hQ`NE^h zC}AywO?@K(blkvbOxgoKbgH#i@Q5@#!OWWjz!Yz<`&A&0ijhTKtS#_$XsrX)3S57a zc#lWw_zaj3AQFHd<>sa0yxeD$NJtl=%4!fupZw5EALMJQiH6TJ+ScfU zDd)L41j8^uY5O^Hr339M**OW3h=3jF1<=7pHpMX04=zEGyOCtR+ zkirk3wd*L4Zl0UuW97FtgX*Ejcj-yfVROw*z9AS$dh^pyv@!VEx31S7Z~U+S=YRLd znh+n=1^!aT-|<)8ck64yq<%d&Mko*`wD3JP>ELTqk8v;pAfp+V5>0v`97W5}8T>60 zIs&JRyKZ6=h(nN$7#fqwq0o^#!s2EGzW)M$D`9!a^6NatSG&+DrV2X4!a_$`T3|Y}k;d}GQ0;uTd9nuOe$~3`pkV|>V62Eucf!^6qVEcA{Z2}=6AE-RP zl0`Pl%%YPY?G126B;Nt_8o!#})MTZ7A{$*S=q98(9-FqU0`23G4D%t6{URUu-~q(R zHy2WdW$owP$iziUJCRwT%JCQ5dlb6S;|&E5gndfayFQhXPo#&aLQ9nn^qc%UA^V4YU*l`lkFhJO2Q&{AKh zF>fr&f4BVx6QrQz0_YO`7voHM4oWt0@mPd@jtSoJbY-O7E|V2);VAM1l|}rx9i&%4 z)zL&U)$MM#Xj5P6_$zQ~z_$~V7oKi}8{;pDE8pQIbZH}qbSzzgf3s3u*7t!5?4J7Y z36=%BWfv98Es6?zU)j6K}7|YGBR8Jl$uQ1$kZDKBi)e@ndLGOo=prHX^4<4=oU37t z&22~Wtaq{n)i-RirHjkPGSV(Cvc8OKmk5;ad;y@oLPFO@Gf+_fb?uWStb-&guJtCy ziQ``4dF%j+Ilh=SBhT*xd4g_k03rR>b~j$g0*FI?%rSMW#SW_wK#6w|mEJ*?)83a$XyLd?Y>PTb4hWx0&Ldt!=eUGEdHZQv!0H zJ$>3H^2N<5dyLkm(t`_?$e!}#=49$8l3B}hGlJhPrZK}4oa`fb1)(oAd4Cf9$EJi{ zt>IR_L7&92fx`5GK^i3tq*oKDy@VVFjM30S2Ot@xYR}`Y@@av9`QYL z$~U}J%FzG^e^0grLaxBY#SyIRDb#{zn(L#DE^ z=K|I9Rzc4XqMazu{+TgE`=-hx8~^aR290CRL2v`Ye5QE~?n39S3)q0CyI;O|ra9~9 z?Z5aZ|IhZp?c43I{PiETEsfQKi-Y$6`A7e#{p@GI(7d?Oe(=K|Xg=I()4O|ZU31o{ z<~=?E@yRFm+y3EE`_KRL|3mwC{=I*r?LD|1^NQvKWWXqQ_jrbVnG%k38PVM6c4~xR4lWQHLbHrO`&g6@i;wPU%mfD1DZtB8k{j`+= zK7{}Ia}B4tJ}3D#-n`O~FJKqUaHiwL>0!KS=Irc5#|9mLH14^e@3F)MeH~X0 z_j#+$t9Ev{-!6}j+Ud(zI*uN;xqN(n>%RDM>|sPR=ZV&oLR` zqOgE6%!HHh5#OHf7a^&laz^py-MKh?)=?N2)~C0!X^D*eF9j`OlMiNh4g@rXowv=m(ZS~>mlYI z%FS5)$@l|Aui)3k``9$@zl`CJJ@v<)s8bCcoJrg|e`Zij@u1#n2<(!8XsNTBoHR`&850egvr>l<;shl{mba}}26 zfNS+1$8B!v3npAJz<1k(qeSvRAMLZ84!!{)AMD5}h5n?$ zu~ODr(JbWIhoamzkX)jWE2*Z}%t4>9e?Svx(1*SdJ8;8}=mP)1fhTnOQF$89`##dD zCWeo>g3mA7qX4O>M+KjE#0T(yu?_zw{B#5J<07vh}uxq<-% zZvb5s=u6u@KT|01d}d*ND|~~6{)#WCrB9#+am;^esiHdc;xEXpi`@|zTi6D@6|{dx zz5~SA#fG6;7FHaeUIzzWDpzH{wxzGAqayM_jNNnI$UY4JkS_Frl3s@z=x6+&dS{bw z@`e`)(fW-8zUjZ(qIpK{7j0u+3!1n-9AnSACGT`&+l02o`Jdp=e8YIcUu-ksxern; zcrcFyNS^Fvd*J>X8ppPu^1_3sKl>|zH5VvjIsQZccKvwcfAizN`(vR#a^LeOMtG)A z780zu4CIqlOu(vlIce8K15$9D@v0YG~DTZjtLSDXM7 zBfJn=Z7OM-4=kdq!Gzth1u1@w{$eN?=fhUL|3_WgulldUAJJMmMExYVEH0THtq)-I z0kn8+-+vVwQa)f>`wf+a5Gb)oeK!Pe(T=-+!0!gou>esWl>lOe(5K`NEA@-MB$xoe zL%vyX;7=?ksH?cr!$4WurNhZG=E(|neem!nJffE~3?47}=Ys0L!T57=>F*z6C0w85 z5C8VxV*l9>-6ws&gq;dT9q^w0H{`bnL*8$L80U++pj(9EQ{%42(-_L%f)1Qjc#E=n z{4vD%>!7p8u)V|d7(e>2)4S7WnJn-Y?KteeC|D+6ko6&l?2Uo69H1MjQv4w&1hYQa z@A|*0us^y^^b~b4m_My^RW2a)@c(7)&v#^7vNS);VsH<4kMT@*$eUSNnKg7*b*t5& z*q|Va5I{gAAc=rT4bTHH1xhOZD}X|gZ=jdZb7-U_f~aDXDt2~dd{lMmWxr@oypVd0U{4+b0`9#UAcLA$ZN zO!uL`(tx_tIu{wWCp#(~!X2OOls-~WW##r=X+;)QA8LM)w)H;*$J%ziGp3*|Q#SrB z!`Gt3}0IJKF{XYRN7e>JI8y{f~s^N{@g7F%~Nl^VoCbsS5 zC;1iF+eu_D*XUiKwf=-?-)(wRs9`zYh_PiYa4nxll^TZ;lj&I=+1S>Yu{ZR!|7@su z%qYVZhKzBc#bz9vAhVl!x#C;clbf=bjLXIOX}LH%E3dx(x_tTTUzM}BCuMpw_OJkM zc*AMwlEq26 zy1damfCVN#F|h491VWGRprfDlnX7tXqZ8ofG}n^rNAbdU9m=@AFcsJHFy;oe4vA;p z;M&*t+qkZC0po5XI8(8tMn6@DP*dxf!)hA;*dd7ii|8e& zdYn9iA3;&>2AX+C2hV9R$`2Q1p#2i&@;Ef)B7MuI?O@=sJ!NJ?@A~Sj@Xc^G;o@y!bR*tWj=GIGs^2b>Oqh$YamcsB*<@z^ z1_$H`3KN|Jj7MmuTCukicOB+UoiF zdD)Vlz4+`!ITWp%>2-RY{!3I1BA$WtDvT;aeK-&}v4*&QNh3M?? zHooM;p#$ocx10G?0jkRUnK0x_+wn^VuGhfzZ2&T?miXK3lu3VHsFS4dQ76!&KK-_{ zbP`#y2n#)AflllVcO_r-IqGkE+xO*P{OsrD+uyz{JA1wIZ~t5WPMN8Hq1|t8?3L-& zMfv>upOs<1=eNlB_J$Oz?1;u6{ewR&zyGJ7l>g8F?Jvu}@jv|6%a+Q?!ZEj-PzL1G z>b=B?TmNqJZLmaBb@~w?ou7|>239&9u%Eet=L_;no{87m(1`ee)&;clj7Vb_X~`Em zX}FCAW*6cBszRN2n+C`_^z+wriHtDMxmmN2O?~pXxD_v~skqLlb96@axh*~9b1OT$ z2W9m1)3Uw0uLh~Kyy-Gs_*R;k>T7m&DK~HZUzdB@u5^VXBk0}^dgSaxQ#|lV3-iXO z9BH@GYx;2Ot=ir~bZ-~x1K9kR9&s3e{I~^YrsvCiHZdNX1Nix5#=Y=(d!J8*aHxmp zl@IdB%&x>2w{rMk(3NkpIls6l?_a%F+t)Kj`S|lVk^yUIEH3hC8a_KSRvPz1sEdlc zq4tC*>EE`zu^D|C2Zegu+{VGi1vNn1iuJZwh!g?j?mCD#UXZaGCN%wVw98eKeR2b4 zY8`;P&a`v`y0inrzuY#${~>7UE!!*5lrJH4^Ft8)q`it8eLUqdR6e`-%js6-7kxz= zU)_HGu7L;quh0LXoD3ZyXwZi>e~#3Db$dKP+A6;X<%3R5RqcSb5D`xM!%mzwO*dWZ zPTYvk`Fqw^^5M93hH`cM>*_DUmtjp%g|d|E=#jD7?=_)5LSzYdn$F=Zf( zG*ORf0XzXE&jX-B{E87e*nIqMXGpQDJu+DxZ*9^}s`u2H~jxun>e`q>PIMZ9UrCLPKvk zBI`NwY|SdPVUgZ=_}oSvqJR1}o_%EV!*I;5S*FMPkjs%M{`%EDfhErl^=;GySz z5#bX*opVr~%X#>e&rC&EEodwNO~3TXSXXq(Mm|%EO>2)xf)_UMF~a4OVDU!3@Er!P zjLlWcQX?M(SZM6Y7*)x|jmI^5N(b+_;TwF*Lw4$+by)FaTOs=2kgvmKx}rsfG0(P7 zbm2uv`e$R2^nT`(4}apzpEk0}hg{8v^db4MsyLIX2M_~1KcYO!k1XYNJaa19b~4^! zPYe|Ou%kvCW1FBa9+fvN!KdZNVN1p3XX0`^LVc4Y*AYk^5G{#yC(DsTAe56#iLLnH zb?qn%@wVg--o%B|8<#TbKm6d32Xz_s0JlCwCRQN_n&qu<4lIM`vLj>GzbdVor2syv zUuCB`!ms&|*SJxP#!s4Xhfc}mFo5y3GLA6#5FKy)N3`j-;KU70yB%NqF9k>67ooiUyVVAk-x!o!|8ahMMPTpOs`Fg5x-7j zX2~^hYkdX;t|A;~J{g~zAB8I}>y>@%gowu>6v+{<_0@$2ANoWFZsT_{^bpd7kq7Ej z`JhfJ#GwJ?7QAVO8+zag5&s#dJZW2X4Z$1s=(~BW^q?(6+llgKaPW@T`0|^o@d=Z~ z-0W`S<<|f4Kl~s6M1f!UG>kjPP%I|Zz6wote0yG#bGf}>#l|{(>0xqzUr!K2kOrTX zV`h}C$^k>c6oOQQDg%}{O>ys!sDX^1MLQUzBMW0eQvqgLmW1Gqeex5{&C++i6wdjh zK<;o^aHxw3;)%Biz@Rc5mI|Z)`V(=mLY^{KR(WaAbYg_x)$a}R=>o2t_w&fP%NkPe$eRsTHb2a%K{&f7c0SgM}1q8Ln)N0|1mqNGfN1 z`w05TfjlWWegT#>pgN_1ED)F{upEV~2!a6+?W;Tkm70+`I!9W(LAORW&L#+eH+*F2 zfc6Fe%Mlv*<&H*O@zd~+Y$D5G?K>dqrzWWN;DH|l4is7Ca!phJvKdgKs?mU@Tgd_z z12(eH^a9eP4uniHFYGj=1uyL=8#x=Fh1g1B6#~DSP*4Rbe{@OhPaglrPcn;*!AD+6 zye&)wK?jI1%8GcElikfgxq(cde0MfrTqfc&O}c~kfXaU)kdrdWsmzva#n}WCGSn66 zYW+0^emX!W>2QJYDMzqS!&cIpv|Ql1?;wBqvzSmzV`SBnBO98*WA+tk9>9bAQ-4%{ z$iWG*yGHqBey$Yqt$ZlnbyL4V=OFV5*kimU{v8gDpkv%l{qf^$m0!qTc#a4Y^(zrefBxu0Wa6_tLS{fn{h2oXjS(xDF_~x#Z9l{UMnx2Fo=6Cw$8(Sg zPJBy@tb2N#Y)sgR;NBTb?|B>J7yw z&koCgYs3fr^6dHdOMizsgm_mm{7pqoj^k4SqBWkXSX*Vt=3;-Rj3*PnxjEe3^+AE# zyNSogjOm#J41~wG*0-c9jMLdv<-5uQ<;C^mJzkL(__hFz=@+w2Z?w{G7jun;ofLahNHeeWiXzM%jNlnuX6Wc2j|1) zD4Q{Nh0hAGiN6p};#7ul7xM^ol{yC+d9E(5r6b$r`Lk!fG9R7dtzwFd{>EH(+n=_$x}22Vy?*)pvro%t zzh6#{-}{Pj=PQ{Eq~|O&qQl65x8(UO!L8a98~D7TzsGg?+dFDk2O2_%?)=toD|0|a z>S{Ui#yuPQ%!B+LQH03}B?S*U3DQAvTJl`da_+SA9{@s>DYnzQuh)KpARPccWm18KI0}9+e zz#JNxayVef!35z>Rkyt1y`g#gbUrCR{g*#0XUC`I==qcKZ~c${jq=0a`~C9kU;VcH z=GR}9KmFrBQ2)1Z+wF1Vf#l*_3byXbKlx|>W_fbZD_;ut=wK+`Si89OVF={ZQAumI z)i@Mh#vu_XgwXkA(?B}24Y*z7j{c+EaRZ}}EV2f9*%05*4LWW+I5mE0Y0Gw>Alqpa zl*aHNK)7tAM}3*5^N?06NIA+)Zz(j<<@t;LvrfV@DWYis>J23``gyYEFo+~*dfWuF zH5!$JCr?W6=sC(G9wDxA^W>`BU0(Wt#$v*zKUe)vRJSa0(|+k6q${Lh+#}*!Dl41i zs>E_PyD4+hd2x{E4jcS4>zEG$%=PRLuYBMCPR}9^y3M3N91@!7+2D0Dm4ibpcWUQz zwar_#qkHKV<(N+Nd{ln$aI0s_wbEW+jLYfqae4jrUHSH#Z_Bq|y)JKFy)NH;^<_Cb zHBsZP>>L5Til#6zM7N?pBB(F`R0lP1EuUqCWAdbE5at^W|>M?>6(yTR5P-c z4<6+Ar#1wL&$UB;Sf4+xZtBl5Z9PVK;70x8R}_j3cpZYws{c_)y)|QopcT3UL_SF; z+}*Hn*K{0wqg?54culkFPeU1^90B2Ym`{UHA^C3k56JOf8bG$4-Ee49R;8yh0>SMs zvZ{VV%>%q@ND5w;fAi@Z2r_MJ#~dh3F5gX{ravemO&4Qt; z2h?BE4I)v_3~TWK@`?F_^2G1C2j#(2P3G4P!+?vti8i(uG-pm`&V zc8Z_*wo;fZi2}#<5y@KPg9sYIYTRVKbSCf(U*PCRd0+u(NR*(`MXp2$xB)KHBtG%+ z`~y-isC?Wt6ffnY&XK7s`lTp0?!cw);&6d<6CHyS=f!gy`B(h~L*ZG50PyK&WV5%I zZH>H`yQrR|0m35=8e!lIBFR&<9if{xID|)j@uE%Tsgf{ypjC+U$yc;N8Rw<(1I2SX zg$RfKk~AD12Mb`VYdukXrRL_sjm%#EG%tiwH()$8kw1AWAD34?=}-F%QkV;&d|U|f z#NQg~4TBr~rad*ZlmTJswyE{&Hi^>F&k!yhbidC2XU&G5*{r&(D!bAKjff{7>YXt7 zBOUW>`$162v;NIf;t<_H%}h2jb^li@j=JUmQZV5RYuRMPZ0^qoLGtOyQ9;31_<`UZ zIxFG+wg2VTfASyxyFZch3wNrT6v2cUbLPn;-|gVBrFeC`_flRa=A6*FX9GkE18yi#-g0 zfyAgbC=IQ3P~$#?vIML2WxHPL1B@X31y@PCEpOA&pc;gxyudNZNW+64oK@bVv4%d- zb2{2R5Dxt5PCmPS+%>$`NB9-*4P4^!5>~?v!Q(Q7Uc;aJ20wrln%U{cR45k+xM|qn zB8Y^ncW*!nmiX%0k3ppc8qUU>9^{Iy)-pVxpHR!5U^xd)kkqfqJ(9lh3d!Y*R0atp z-eB#3=_mxdy7>d|gV4mU*xo=EJV-BpmA?VrR*duX1T?q^ShWL|s9OOl#)JB+u;NEw z=-<+=sUN0$6cTS0f2IB&h2`{3`)cyf@~-gLf#o*2oG#IZPc zZ*_W5XNP_qhdZ2M6abDVA_}k=q|3?QDiGE!zo`fsZ}mR_AvE=>9}hO=ykq<+KW@i2 zgaRj_mI=Vs_*{Q>R4Zdz{05eylJ?`{m-P@4t&T4DH!UoOwK!>KI@-Tv%>Yp{Cybk+ z;DYP>JU5mtTKf zzW(wz<>d9-G9S;pF*xjPmrq}ORt^rHc(a1BC!27Gdq?HqaKF6x{FCze?|r{KeetXe zcMr>O*!SlE80PS%zu)#(n#+s1c)0Z S@THR)_DvXRHzzKaFdq_Zh=>rH>Y9sr-KHUvbelBRAEW2TBteMqF4OefCQa@9Zc>d~I`MS3#3TiCGC zeOusI1J5(W}j}P#!~XxYJLt0JYk9!yW^|Jm5*003q8!F3TpF0rZ4TsqpM(@F*$ z3fdiwe82!8j&=ku%)7Z|VVh43s9eyb&x+3u@QDQ05Mm=VZUKOuYAa9bNaV~jhZ;Wl z8DTqh8cCZEt&R3I~EM94v;@;(*ep93x7bxskoMNbqBZv9U?0> z2MED`5dRR|P6Wn}W+KlrA8ucUw7akz7WCMFXxk`!n6&IzP{XfC+E5@k9gF#_^>7F) z+7W9R(hc-P&zC-ZtlGlP;XxT5K2^ny>Kc-CW->05v(vJ;I4k$+bG=co_PeFBaCCrv zzmKkP=)`(Q-qL5eH`V^7OyySXelefu`NF{s)^WrgIxjw56uEiB|DJc(6uV0qqH)rKi&l?q6y1cE&#cer1 z9hZ}nv-0ZI%kt${UzWf9^)JfLfBtjff2Hsf_d6TPfHpnc;bk24M=EPy?V7`FTbe(h zbF2mN7MgFlv6v}GtrzKLYzkyO@neVUA%OO+lpJu96$iZ{yanp5I&1puZyW!k(52Ht zcMgaZD-A9+z0qyFQ26UaQtcf zHTf^o!HrqZ=Tcu`M@xShcuIT-l8!CG`uxnYaT2jf(1P?hyQd|&DLm0*TrC;fjd*eu#G=oSa?q~2X%be$LVnHq;tC%R>Y;)3xBNcD zc?h5+&45n*(UvV7nbgk*B#}FTLBoB8=VGgRK;QoXeq27{kyFr0+tsCPx@<;$E27G! z<)GG|p8qiG-VdgQ=kaOfA=b)pffKe#@dAdxFU{EzRz(h2C)diyqw8lap$43Tc?-UR zwZy{?H)9E|hzC4oqmORpvMY4GeqG&5KyF0PA7Cj@(O?V~dmjSxlJvxn1&;m_Uvbs} z>DOoa0jOnj(nzaP>bJ_zS*s0*2lgmD#;iO9e(c#OMq@qpI8;6b`$bHp1;yhHHrz$_ z_I{yfVjW}D!vN-hu@}#KY!cG`vE=5eemuU$zm5q=@6G-NkKOb$=HO5O<4cV=gsVrg z4P%`<`*{EX6W6IzcKo7)uF1pLDDtkn*-brIYnw1lnVdhd2KL|3Pt#Mr`ft+nPZ~ur z{K)w&V7F;UzgL3=qOP$UvU8B+9pf{00u`%txk>7I*j@4+1e$&#{V6x=IquM*$S-sq ze}uz)U^#iku_3(@F^A=-Rw z%#}hssVyk`5L1my*@wI$j95P|6h-*PEiaoyc;MGE)=6DdSwKM1^*$DTvx5EH9)-k9rK1iE0C}j*|Or#_~qr+|LK4G@BBph ze8Gu3m;*fy1H{HT1>u@Su2_LdpEfopdT^STVYy?0kzCb@xV#jA%8(*i;caTB>WmZ` zJc*Fs`o~QW@opgUi-HR+H`K_R0}8pg;)ei2@&r#f3g5CrY*}{T3s(lYuu<+s5HqcC z4;bY001jb^o_Ugw#k#6M2rI6DivtVtQA;s#v1yN@CWmv1-P=%-hB^$2=nXKu@-%t`m z;0H^UkIGdG(CUO#hq)z{$)o>+r9)_+hTFz7`4T*HD*q*>tchWVW zn?VahHuRs}0vKKT?}g}2{tOC&RD{Jqnt-m6_LwLOKt|F}7gqhNQ0ef%4n`ar;;3<5 znvFZ{tas^q7lKU4)h7IprcZgey5vq9P?}8M039#@y=vUoJ;Ypw2l=N>V@`JmAh`-W zm^=BJhHd3d_BPdB`H9~SGzx{jJAwW)Owk0{`;dXuRqSwrSmpRIrxnONs-0zVy1z$y z*GD%kyy1_q;5Ea#5I8J8LNojChj_ceirCoV-VTDXjUdV@}__O*Bvzk~)uCf2Y`GYumxT!Q18(;I4u`e}eWny) zHOdSCnL*{n3lu=w)oSopp}~`l)AX>b$z-*f8pw1+fI~W`fnVA~o1b0$@ZiXN1fVph znQH(OiYCt*p5BK*Od8SgU=0kD8Evd^Pv{Jz!W zD*aLEjfQ2jH&CP`N#6$v4hnGETN4q!b3M5@FPEop%ge8RTYmntUzWFTU-~wH{n1g` zAMTaC(O%iz=c;ni*Z6_A6h{gljdsex{+{1-#&CV|*~xi1KRGFv=V#^W;;KxpF3Z`O z=$~JetE;PWa(rGUld<^c3hQ^Dzlo^k2d`Lh9C6CxRn~Z15e^O3QPyKc$`zz@C@!Pj5-$vQr z-Iu5|0-xWOi)-1DJ~AxUP?suqlu7g?8>$o>7C78S-TuU@7hbC_IRFEs4W!bb z7YbM1dweSTZhwTu;83OmT@sE{8YXKzD^WIdm<1s4?8XKc0KY@P`rgrH_;(OCd||;? zrU^UI0a%QAh8_B4gP8-AlAB}`^Tm7V#ST@hV)hk7m{8PCh6Cx={=N^skmi2Q+d*vX zjiujX6-i^^#ck;em$?>;Vr;y*$jUMnk>OWq@8aN|ucAjBk_U%c{8={Q_1EXx|G`e_ zNf%vw&69m21dBOpH$COk-;9mClaph$pP}?<&xZ)EuW#bb_P*NdR<8{CroHs%^5#-H zv{9ZueOiXnx!HK^vRvF;m%F(#+2jrXO{E)@-O)h$tMt-2=6jTnx0r^?hs}8}W{MZy z#%6P!HpRD6CpT(mq8V*cG8UikOZ(!jYX11<{od|wdGYkfb5wviF~Wj2^+~I(X%;?{k(jgztAqANnI_(6LlSJ zDc%6*_5m-b(Vo~~mK}K8dgI);NBA)&TlKrGXVqx$unZ2Km#xERA}d{8OeIUTz42ukpS&%Ln{(A0 zZE#D`;)S|ju*t8dl6gXJl!LE&FQwJ+;;NWCqW*nfnMn=RPgYrpHd@^}#4 z#fo~x(2p&Hk&SE9#8sRP4PJ64v?wT)U zwL_YXBVWMi#Q$!P^l>V!iP}@mhr#9QptZ&SEsuldK$b`&P+xb?UE z;0MCLgkz{~@VlmoPry%}@PRI7zd`-1>a;*4{PLRr*0^yfJ!0FXOsPnt{I_y1WW+Q_oq;hOu~n zQ~J3o9vCw!uOydzMLz|a#&z=FYYgBGO+CLUvd0v9c6%HyAAgKXXL=_4h91V=8gsCp zLs??H=0|>v7epuadxWRq1@hyq|G1S-=@cIuj-t$_CuEc_Z_+R1@Hg*8YayG>I)*H+ zX|o?2#Q=P=sTs0T{2sCuy>bYe94=heT3w zLe?=|#~u5Y&`3Z$@C-Wo83jW2$jFfjLr0_ynwME0C_iIKz_&A0d?ki9Yc6r~8F_}> z!O1eve&Q2!)FU<+l*`WG&$!53`EY12n6!)^4-#eH`4>xeF-eeO?{e1O@tph z4kn)P8dt%e;#ZVfuJBwaxkG>4-lz}r5$SB1lx6gCHZ-tbuds=xc@G&`_fertjJ z)&5%E0o+QvIx(vGC-sK&2koz7H2gjeQJ0DLu{@op2T&_6$AM$Jd*!9 z-K4)sJWl{Ru-X?jI=X6S|M?yAYxr5U6Eb}*|98<#NVh|8oU9 zi}^9m^rVML8xtblfb6m1zf(4cJ7sH=xBk_KsK|_G`93ilnQU+{0p$(oi{sOB{O(=3 zI6WyB=T~L(cB>3}{lfRR+2F=7Zr|Ki&&?)0o7x(y4u`v!~K1gYqMNlUY7CY zh2O-S&2ALe^UVe1IU4Qx^8r`mn{s_|U4&lVzdI{a@$ut7{89PA_dhGwmse%)V6Qy? zgn$!ozihywTRlARZ>Bp+l&*$IzH zxa7t>Nw)h(l^a=EZ}3-WT}rd0GU`{Mr!c0#OcJZg1+27JqLz|N--2GItIBz|8+Ow! zJJ0u|nQ@&wOaqiUA3~Kk{VYqIE^`4bj?!7z6`=7SZsu)lP%{U#eBhpW(H8QHx5jx} zcY~W6reo_A2UPBFr)4pnSZ}?#CmN=u{N)r*<&|=ir^gKUhvg(llkCGdY*G# zjik*7H)2qi*H>45gSaO>VEn_I`a6T3@RXN^Z>R6xmUn7%&!0apyQ72h{`j4A>bmS~ z_e4`9#7|#yJ>F#c&2RtKlm4gAj-;C-iD_PrkB=4p(uYb$JHwc-&Ns>@&!4)zo*bW* z8_lPuv$=1)7>zg_z?<*t{bpOfS>x&RBejpd+Q9qr_U&7>msvBHfyP{A%lS9_lgH81 zC*{e}fr@zV_O%dy)SKJ;Jj&VJLZUh*92&@kO;Qz0W$9@SyWzIZAtDa*%+&6-`DBLh zX%pnNSj@x+?LBC4h=$J@$XojKNviIvi%ZLjIWu(y-}EIn>JO%>bNHL9{jq_&x3BiE zK7h|4_-b^OmG-}_`SpCdD4%`)dD+$+`qj5@%fI&@{M+TnfBeI8r8+)4yO0b|%a4Bi z!_ps)%1HeXpN@dG%RLMIr;md-!1^h7&hM;8igf!0Xdkz<&xl_OP5WwX>-oL;Bg z%|^vTUsS(5Z>S@$@9+RrJ48RYA(F5S^##7(cdouy33)!!uBSK3KUnnh1B2dD&$N9$ zW#I!|nlGQ9bGYroH^aP9o1P2t>HeYmoPl)YrkpBHwPQVIqn>>4G=8Dq;Bz1F$)-Nx z3-Ne$b6swxSK{r?2h|SthUL@meO^BO^waYF-~avc;>8Q~BZyY=mV!|p+7Y+nAnWm! zo>yvvef58_;X)n6TZzae>Lce#p|`D`CA9wabE*}5=A9iH@CBd)kzd3?Ci4$i*0+a{ zG^-9}9!t6ld@Q|$q`Mv;9;3_;^Edyi^M}NHAjij`%DsW$KgPxZ4`to~$vfnjAYBIk zBl4$iOu4pChb_?x=+bNB;alN#one*7lw07dReD_(=+f!tRnvpl~#N= zYr<<_wZ86EpGzg|lsizv-_UI0Ti{kzDBG$oy{&zw2R@@5*0?hYvyQL4f@o`K{`0V}IoOOG1Z^+tPz{>0b^Hxdtq^^`!R@ zKihxcukf96zs>o3(9d-+^rl@8CrykWa{tEjpfW4BTbg6tUF(^4t#HxY)U$6>V@qxZ zP(0(lxyFv;3q6;wZv5=y5H@nh-r9^YCE?_eH!m<2$ov_{QhaP~*dfk`0+gN8xeYPC z;I?SdP!R6qIpn~>1kJkiT(>;*OuuDKP4kzyVVbeEN+x&8&1pfAtr3YZB;&0bUfV%~ z$6P{bd2YnGl7kB&W6D9?t>RMmNkg{gMPnnzUxekDbjb}5%0JS;D}b%C&NT-Szz_3) z-~qStmTr+7GIF|FF6KSlBFF*d0**eBWtGqoCtDRWujBS&BP${ z7QO`pv4Hk8KVd3>!&JRV*1b;jIl@O_v6cZD+^ty6B{FnY#2I1I1GYI*RHHH~LcpmJCVsJTf)$UO3A zo>qVaD{aqVOYSuKp*}?i?{}7SU4CmJ@vvpQAqKxI^`QUg4}r+D#Q~Pnr!h{`S0rxN zUdg9I(Eb=mrq<`*7_9hdZ8r$;Yg&G5Arbe-K-(75WEgc7aV<{7bwM%u%{;sK>-=Gw zhD>p#v*D}HK?Vqvf#->1!uWZC+sOF+s~-o*CP3!V_E!y7{8StHb<-qt-5-F|(XYt=|Qp*QMV;*Zlzi&4Uf~VtW2|$(W_uzU@|6R$ZV)z1ZNVANGKe zuoxS;?%nRI+kc^34z%n6+(5T3x(1MKXE(gcu<<8k++#AW`mOlJ#=tj!=pTGT!+gpj z&gCVXKWy;Z+#`I=A7oNa)x;056k4I{{w|=5)nr9|43I>1*5J!-Zm^)D>7(tbVfu;I zil0F}3k@1e6Uq!ih4lDut5@!}cFNYysO${(%J%L-+3XJ$-4jU@DqID~bkuC@|*IDpZ~JF{N{Bzd4J(C>3lKwW*w6s-sa`%VXn6Smr^?3#NI4By@3z5a*gp|I4HcO$XglH1(Qfh#&U4SBApK0moA$0x_)@wSZi2a*@{$^z7O+1cIoo8z8B zP~XyV>XbTW?&CQ?eHKu^$OOH?@~QFqmOz?l579o1Xiz%gnuc`>sA?oEJ3c!(`Wdn7l6MTcMJT=QQ)4$}`Vj;!^i3e@wLiv;-&hm~Zps z_vg9sjmMWF?Jko4X#b$}hthxP9=f!casY%6pB2CLJ)7L99yWZA2d4=BiV*FE+5b?ZIyyiym(ZZz?@) z@8awNg5}`gNO^CU@y$&c4tCr|C@*iu_XoW)6YlZx+w%0ubLsf5b(32I`0T}WcB?Y< z%af;1N^fV^`EjsjPr6A_s7mCfQh4Dr;pXPb8=f4v8Te4bhT9C^I*iRu4+xYB`n;tt z`RH%EPuq}7GGSit#vwnqi9}9!Za2LB@5L(paa#iO=E-ziX7g#Sw{4e!x94Ntz@i1$ z`tR8G8i9a~(Yz6aau0U6#X$X$@<7J)C2=FbmTw5?_qqLGUveCoUpA7_L)IlY%!Hl} z2fMDLzI206FR&@SP#^N-=t();ds=?!qha=G8M-KWl7TWV*liQu- zxwFT0|1Zkc-l6EAd-r8)ai>0XUN$+fGgcd;pB9hX(j9W)<{G~1@3yaY$Uz)l&Y&-* zJ#rW;+U~7w7Ve~y)*1eoznd=^mULJfn$IR~gS^!b4RoHvG3HZ-`r6v$QzC=1Ghzcn zZDu+t7w4zG(FUE^kxoB3cIQZ95V~oA#7{jYU3Dui{NNtIN9sd1BU4^xxdK4_Do6_=n$8)K{n zm-Ln9s&Vo(@NQDIuW4jJzUxe->2x?0-mPwY_F}qfT3s@{4Rc^0S|xnvd8vC*ISjQCOAV5Ah$c z9G<$xpW~b^aq!nn%zq0ldB)P<$k09o85iS3JQ^RU+>E2q4Gw)XP6c>Y5+4VwdH&yf z?7h%9(ennzmFT9%ki7BFbJ~Xng$*u;1bGu1eD;34|07ycjSsmw%QpllKl(1(3I|?^ z_a}H1=khB;sAYpUWMM=2E`t2g9#wX9T6p|{{Ye4D255(i!IJXi!BBsM$_0tdEa z!1rSgwk1z!HP1uqVc5#8;Vb3dl-Gb8UcwL0@Db1bkQo(DnaKxybcj7f12QzL{D?1P z?D;z~z>c;+-DkODFO>8ZSAo7|fw(z(2H(Wj)}sE~eAQG@FA72C$X^U_LsopgT^LlD zu>+!Qg*#*v>0Qnit${7oAM++~jPA05i=Vw=jmyPv${V%ySR0QD5p4ziBp`6Q`~Xqc z;y^9n=C7g>T}DNFw72%#h) z-nVCRXoEEHAUkq`Uu2o>l2FPIJ=KqJ!ExEpU-&VNphMYkwtb&-wC%bu9<&6)hc6o^ z5;@8&C&>u2p>9Oaas&JR{^i!6|40859{@nv(|Ci~3yCxU6#(U-05k^0-Kh}x-En9F zg&_IGIn+aFu3r?aqTH&gAZ>U!WW$28v(Zxo8@Ppfh@3`GSX*Bxx zU_o(S_)$QxExUGCuSlhRQ%m$&B8 z5{+`o8I>0}@t_WWo7O&sheAX=RS@N2L6mPM;`hYWRU_GK_1yS;l7zZbm>kMRbm~C> z5i_lT)@@h&P#)V|Z(VgTpnDRqQmC|E_^Q z%q>qRgj4)UYw9q4<6;TopZkL!K4{Suc zR!wy3jabRMKvh$jX5BX9iTX>C@(134$s3Nn4V9lMzSGSs)H`*3jr=v;<3;d;nJPJ~o-a#nvFurIS1S zl)n;5CZ!9Zy~NeAiEtdI=Lh3HHqv|;Q3K!2?LpZZ?3AtHLD^;lOp_EAEST)yYwUV` zaaHbSvvPHDQO@6=l#}=G%Wr=9%krDQ{cSmZcdYCfUo);OWdMpmb-%bbDld+nmQP+h zEBgm~6VmSPiq3u+jt1Un+dp{b{CG2ed^IlTXJ`H-08@BhajdejIJ3~$n{f*p zFpSG-&wR7-=IXjE?iM~Az*WvH2KdTx;W6H4W1p}Yw-Q|6XdFNH<{}&Y=a*M*H?%sx zMXIs=lfy%AoN&;9JWoze%f-ccSt!rZ&Q94sIw*V({7~iLjqKNw0ds_B&z_Z&<72<6 zdHn8uSxCM^$$<&(;n99MhtBDh;xAM`Y+7!LZ{>Wqt$ONdzS1k2U3jrzi!r@;Vk6mO zMAvufFpY+Z9_?QL(iil>D2CP*x`NKgfP`6v$J+B!n+$LtlYFGjTtnf+gdsfIuqmjH za=~N?9O)mg833K2SQmtC+=RKDH6O{2aOOm8bcL=;#}&&}?3)Vf4cRo`3q4T$LOL~< ze)?OWyix8O+@u?-JHPoa{o%R9)X#Z{HuV~tlq!|wP6R(Z#YD#enkUZr&M0qE^CmN& z4`4wc4(E_RvYbw?%Vat&yll>G1Z-HdVadWF8`9_`Z>u|8x`vLqwEFLH06=NRqCq?1 zt$S|YAd<~)HaxjyU`OSC{^Y4|tl(Sa=pUPkRORjM%-5^$4oA|zQg-F`yi$4>b4j)J z;h+rEE>Rigyq;^S{A}d+U32zC}d*bEQ8A9cZI&tKx&VnAu!Mh`z2>?Gmr~lH|EjV_ST4P=U{gjK^csV}q5? z67V?z4js`>rmFYJ_(t^EG~RX@`OL_OJoV?R+XdW4GE*PGA~5pjHh}B#mHMy)r7h*< zSKqkb``#y#r*wt&fNkZ+<|>=b3C!Ua=z+?XS|95!$U*$8oXqRs2U)NlfUdYVcUu(l zTBzFAwAX5@T|mcB7kJ!Y2r{|EZ&EDf&~*ny3l3NQBs|0>Ec1^yj3PBsb@r4|!GVLD zHJNCq+gsA7K~HUUxAaH*WoLK4NCB;Ts)TYs8<&~%op1in#@C`JnX5ehG@^X0)5Jm- z{k+=XY~niPGZuVaV}OqHTofbpsnh|tDZ+_wm{i+iO^3Ri>bb@9i#G;&8=q@~IZVgF z8*FNmI-5&h=oh%HU@BeVmY28c@4x-(+w$_|8>JIN!n?eRk^s5Zb~*B5;#*BhtD`DG@VV$RJ`%IjxFi?M9(hzlYKs6q5{ARZ}@-p z)vwF@x9|Kc>eD9G@1X*GmVlwr_U54M42C}JgjngDTrqY>-Jma&k5Br!_UW$)H)3nR zbrcHgFejGj0tnPT+E2U*3D9#0;3VHnwDU-Dc7mg^(He7I?| z59lQzqcB~_^};3`nRcKFuly$dT4>7Drt^Agh2^}dQ$a@#5uZ4#_A1nLo%F$*G%k6} zfxwdPv~l68e(2c1S2xc`>d(-j*YL5v{yrX(PC}QLuKO`~q>dZ@J{FS3V|lL9OMja` z_UamR0viTSp}sciE7HgJq}SzILtKuBu|W1Wv9_2vvg>#;b8$|MJXY;T>hGiE2k$(0 zMePBL>v>*<Gy&DDgH<{ekGjAxF7b2W zvGf6L{0f~0R{V`O^RNAh*FRO?iic)&1<|*l8(vGd+|UX7jMPt_$wz& z1kiAW?I7W@NsGS$m#1zJ0!l|W=S^If$K;A%wgZKEI{AV`B>(b*?Su+LGR%KT$>yf( z9vR5#buNvCpyjvyksUa=vtuJ!JO9Wx%S(vq2q7*?&=umu^AiyP0QNEmeY|db2iJ%DEuI0apOsX{7PbpSbyYIUYo`MJ?ht) zs9ctr!o4q|Iwec|EE{?ASmD%F@B}{q7(nu-e1sReCNF5Rov#f2jNOQYsM?Rx415r? z+AfR2=~u{~GMFvJxmXH!or*YqII~X4O@7g*26#^v9Lh+3l{O^s$NsHkB7YjdEP{uv zq#@^!5kC$GpO;9;5AsAPKgOjPynNcth{z>~icI*ur)uUDK?cF1?KrHW_Gi7NNsz7@ zGM8~1ksqF&p8~ULsOc!H{*42YJj6w?z>&s9VsaWcp>@^p45Ch51A@jmvKfQIV0U=56j|aidsv;g>+H%px48M>MJ8^ptXlYZ720 zPEFzVLZld@0WyK3P|?ANt~yV@-K9wiWse3UzxeWIKO5rq!&ADIujn$bq0dLtcq2aE zh$0~9c)~%cDWlwOkovQRfg>6;VDoK1Jjs?}p@s*5zYQaveY*jj6@1gXB2feBd7$A| zc_>452gs%zRvKg@paJ-O6mkstnjHM8K)kKt#zXyZ0AQQ436lve3!a+LA_ph&LXI9b zs9qJupV<&jn&>ycO;DKVbNWAMR9;xnm)8$sQDTKzv{4&(FcuwZLQL+Wqs5D zvhC!D_K#n5oH4uu#H}-cY&(_mjz!8S_p({#>oKT0o3t4~kd`{b>2Ugqz`+%51%<$t zznT~%RDaSFxSAj32YHdvs#${{gapA;TU$0hou1%CO;r4t{u*P1j{vth5fW#8bGDg z`1y9G>1K~>ont{|fA3KE!rAGUL4VWVVZXk-EEiW-<@o)nzmd#U`^+7BD&gXGUiiK^ z8y(D5SQP1R4N95cm-%>JZf?fj(7V^fl#Ldu!&e~h^pw}GCrOiQHU!7!gFiM^`78l% zS<*q>-_6TF_|(Zva}+j;+2rFp&Nt$3t}x2UL5SJSbvZdc@f+NiSC{4K(V@TJzOyro zye3nXm5t&1a&+*dTwPw3S6{ys{-s!6l;5rBM-Xl~>a(c<_n-394uli^KJi^zfgj_@)(>r8?dvv1 zKIAE9=4D7bpz+zEIg%STdFA-i1oWqIstUiQvN>N~fssv_J$IC@YciyIo+~NwO2%gO zXjq23du3y|XPGW;C*^)NRrkt9pT^n?zSqrF{gQ*`rECatMYi7z5#O80()W948BtBv`kj@<*?3ab~mN%z*qPx`-PdhARa?R<&2X%Iav^L2>G6T3N zBi^d-mmSHJs^$uF=Kt_z9yj|jy{_`1oyH9U90rhnBUZU-pK}&>RAqhTKOFYemJu^= zW~(lw&wSzzeVosy%8Jc@NnSLNJvN_KfcBNz-1YUDcwdx(>is~n;LU&9*;wtIK4(0h zxDI#J&iGV7yiI+m{$NkM6(1frJv}Qo*He{=tJw$2gSj7{ZkUu`{pw2}=K0a@|DM|V zW%-`^L&~-(`A^3;;z@cVIZ~e0v$mYGYQfYbKY^(p~ z(TT3nN6+WfcO2f};23YllQZ0Vy+rNRw*c@)kjlvIEGX=3!mR;%CR|_n6B%RGDTjo% z#NS|N;KMi6$HMK8PtEY@fsr@Zzj^(peEH>X%Gck1T`teA#MgE?zrHRPdREZ)!#Aaw z>N$pf-`vdfY`gTKvNvyDmFpW0rtqA&)-y@{6#bFPccpgz=J>e0e*3z-lYM!md}pkc z@fPFM2Zq?VP|3x&o_o^MH?Q85Z@zk^{$yO9K6_q9>I=5}>QhuV$Y)PFPoKkc$=Brm zYFFGQPPUyL|KEH5_xH(P;PGFWYzxsIIyfDmO#sox1@+n8HR>%DFXtK{U6WSe_0Mj; zL^=O}hxp-d4e1=?c<1l%M%VAaiO@7G( zHeqQ1*>-lr$v4A*NFUEYbb+x?=!9Qxax&%Lq}SM~K0}sFEki)kZXvn^%E4G5ab;T{ z0O({MnO=5H7l6NQe_;sj=KVNCQ+(L|!h9T!hwEob=lqjLAZb0MN&6){WQ|lguo`l2 z>KXZ(T+)<{Q+)@$t@~^62y%ZS_s8hJX|I;Q#`9nEZ}<@87%UKP<~hTZl3`u;RFglwEmgWE*a|XtRWE-gB5)v{gpZ zMZADUryyYl&x}hoeu5`tXFwk)2fV@=<*0_+20Y9pyy4S{3b)v~f>coNZIKoGc|>dc zMfu?)pML;#((L~QD|XY#&?A~Z(ab#3xNI6L`LMsH&#dtjExa2K21im~ow0R(0t=vY_S|HD$go|n7cM{VkjZJ>R(y8l#as&6Sw!#6_9BF(nWhAyyg$D zt_vA8v4n0qKTFA{)|qjuL5@wBvF~1P{rUgl-}{La`-?bH70k1d?1KO;T~&7dfr6|} z35%=$sd%GRq0m53NIb3(A+w+rOi;lx3g(oy6+w*&Toyh2P#{oB8>f?5IH(=H>VwH7 z4b4r;0s$w42`vYJL}J(nZEmbm}dGg5%{n zTnFeFcqD{KwQH(cLP71?YW?FRq((KOpm26OUmP-3=L5$-`VBkssqQKlvw{AYkBD{S zHI48QWhb4UlLsyxP$a8qYyAZkg*J?)y$tk!UVN(Q6Wq3{?j=bk?Y=%~1uPfE5!FVo z&kuIoxeD^F((pG4AB8CADqVyKJeI!VlQD&*({>WdM=*UCZt4+Nt>uPXn?4u6RuC$z z^7R;yDRxb7nsU<&0j`aKu?i`x2iXahgE~GtALC~YA;TK{%-d}Mw~XYs9v-8+3bo%7 zezeu-ue%1QmK^9l;e@qzrVsf6nP3GPS*D%%E6|bohoEgwncwe{Ke$nzRd9c_0#V-9 z)X-|Lz(?}m$|2zqdJVMkxV&&&pA+F1-*P^^kNQk)QCFPkx?}XnKe+3#jBzyHNc}bSTOADz(Pae?F#tnix;4!QBGK0QHhBo?ZkI1+FJ1z#Akx zd!;uRl+B%?;^3AV)U4H(vzw!5p`4*?&7=Ch8 z{^$?>pnU%M=N@m4r}5_UZ-4z|`SPpZl=JhG(%a$t;?K$_pL|}PJ%3(aym(%oJd=6) z#2ay3@jt%i+vM+jP4T;T?|r5Iczjtd&d(F@%ly8``Q3pR)7gu@? zv(d?$%-jCv|IO8n%g5#+w+Q$xVg1eg_T{AN&!*otj<+W7%Wya*;{(OJBFWN7T zp7?VGSC?G#%$wztc@bVuH;09{eqh?fF)zT7>`j2Okk_)CJSfiZ0MTTKp&vI<(pb(Z2|9;!SLoVr_3t zFu3x#zpZn?r6jBAS0ITgZ9W$=1@k12VF`yk^PP&zb@gm;yX*@#+@%j}2o82jpG|+R z#_z|~`SY7GCqN3{>*r11h4f;f`b7dvW!VsBUl!iI2q>9LLV8ZG=GHST!4i>PX={K_Vk8J14 z*Pl#KpJl>iCE6_7dY&&}QJg@*r-E)8eF432mbJ4O`q?A((H2SWnr3G$`wG#*&An>8n0n$})eAh&1wI8HZ zy+@w$J=nmjJwIM zy_a6`jACs=9e$Zf)_1BSZmWT&57zW0SEcnQ08~%6(hu~Lc7>jxL$oWd*dNd~q`%5q zx`rOd+6{t$7VXfwt2Ep?Ga2g{qkfQ0{yA+@Ic@E*@TWdpdOT76bI^rC_qIoRuATYK z|M#*_FVEF}c2%ug^$K`r5_Va;f8)ZP>c!q6naDXhH7aV+e{qjwD{rYtosFBmg ze0WIm;)R&O&dBYB^53fL=qhV{HWWgzcwsz1Cu}5iVU~@>;_ft zC4GgT^rsCb0C6Dw2^FuQlXl{-KqtQs!{0Z51*v=wfij2b!o#q2X$8MC{|W2qt>Yyi z(`Vdc<%dQZXw|^qA$>r_M+YEcWx`*LAw4-Z`2cwQB3#RhH1w~sX#9jo&w}SLEhJs? zUzf)cas|RxOf-hpG<s{no@n}-lI$&WDsy-4CFz14K9{4bZI0X8sK%J>+>vvxgykJ4X- zNAr4&?h16)F9IHxKV*&($L@3a6Zkov`3HWKDO^kNfKSq5C;keoBnLv@mQAj|w+X(h z{?gJj3!dCgla>t0DWjYY@_ZPbR0IaUEDk!<6bA2~Fi%qm&oah*?9=dhIG%ytBhe6m zXFj-|=P|~J)C>p0EuEdSan69|j>E>Hb*twkV~Dx%pwH&G^B`ZQ4UBa#5QzDhHwG03 z-!TqSWUT`qvV&jBMNpMNY|urCJU*{7CT+?i9-DZA)~4ov@I)BTW6A-+QQ9ojA=Ta!yGQW^R0!AI0FF*bR& z%@Ur%d9Emoy3~kIAyLmtDxTq&&3)>jrmyvtHhrh}@8o5UKtG!DgD&ELN3hMS;*2la z%vn5N6>gMYDLj@(28=!F%asmUn}YJAT2ttN_=uR0Eu^3zpF60>%Bc!aRlr#WV>D$zhP}a^3>@gSEi0zE@t!7yO8> zVHFxW!ZqKP75pvJZ~d|%ft)dfkhkMnJcn$Qms@}SXa92_0Pw=AJJ8rrE}W>ca8!Zv zMlw_rmvW#}kzjoKM1GftUd9zn=Y~y#Ksgjl%TTCq0uY|xL{?Z_nF7aZ)XEAmaxwyN~a-z8Pr3`ED9&b)1`x)!URxnvYY^@HUmM@ zx*mj2Sn^KVCXLe5nG4UD)SuBpj;2v*S1NXqDK$l$>ojqi{AziOr9a1$1~$N{dP4q? zF%C(UQU;CB(}a%AI>1}QYpZqQ9c(TYSZ4@Z*&5t$3zLZmBr4ZSn3i$^; zBl7bQ@(r-ej}+ino*tbb2%;>jbQ9l^imo!S*MzG96z3Cc;!{k*CkFrm5-0HObB1a3 z3vKESJ9%%Qi;G$1vjQ7%hMJaoHoEv=5ZIAP>V}Qbf*;a@BLl6J7dK-dQ}dR208Us9 z>AHSQLKmk3`9l+Mlzx_I<3@hqqycd~L>okZu@kTIpZ-q$xt<&z{`krK9Z>NBVM4c) zK7Oic{LAGJUVs{A`9BWdIf4FA{>}^ilhu(|@X-1{49ontQ2pP*f9Buvkan21zpwB> z&n182ts7xGw)STYe=msl)l2on*$`3MK zf8%yU*lC;}WxzI}2ul+Q9*4gR|K0HJ^Y3@bKZ950-)TPq(bogm=?X{U0O?*ypM92f zC~>-Wr~Kq?yyQFG-8xu-?2np!p@A*BO`e%nwzE(<9IT&w{t38EAo2qf+@xIr^y~xO z_Yt^Ke>HvM$NC_w)Ssd2{{R7vFXIg^xtsO|Ka43=Z<42}WO)_mv7+mQ4gXkd@I;KG z@Y_3OI65eszWTgZczdDTasBz!Z@u#-`_%SS-^ENPt4fgj&;xIN|sot@0$-B25ZthuWguR^_u1#-@wS|woR>q&H`pm}UvW6%RP{S8 zdu$>Kx39U(mdfCR9I8|D-QMgAM}N{4bcDH(4^3-)z+^QRoI=m6+h!WTfnmH?f$^qM zQr868eZbbqYnw`RUn2qeA(K47qg+CnZ5vOznxcg89q@b69WQQ$9aC)Q0uf_vU`T%xYMBFxvGSMzj%Cx^E6(`GmKf(P#9Z8z}~A zZ_l1QRbB&clHV`-iko=$@L_@rwHM}%JCY4IBv20Ynz!u-9AcTqW+|{U=#>Mt zGs=6bHcyps)jgZ^916I*S9OENTLbE zIGH>Xkke!1Picw>5>}0)6Q>idT8k!S4r&+{#2Nope+QZ-NSwwm$cDd&2!6sJI$}M? zK(ew=Spz~B&=FcfEcnVLJamw@OP#ZR!um~5vg{Ake?NIrws!YKQ2Z;M-)vHwE$Ywj zIkcnx((P3B7oT-h&51ULx>$cf_vo+Zvm51iTei9NM)iw+v-!WlXK*&vc9rgKq0&gF zM9qIJq{g%3x=e1ko`0g}!8Gzx-4E3k;TiEz_d`AR4i29vpKX8Z|LpXnywme-A|7Ic zU2R#q$}@?Le}sfYkl(4EfmeFI@L3z$2sh%OvtRt)56Y3AOWW%6`Gf~;_Wkj@a&d9y zPg^Wh#_Ow_a{B(vhvZJ)AD4G;UYCjZ9_;Ryf!Zer?lv|DDnqn0+BF;e$mi|r*X2}v za?8xFo@bPqPY`g!1^n^33?ItT^DlH$ZBg~c0zJ{i!gOqVk1w}!iJa?aq+4O&JN;Kp=$TnDT5S4~rI z_`8re0ZVT_+pNQHVO5vfO`Qyn@-O?-opmhZAob7U6DR#P!SvVo<0Ce>7~#mqNeB;e z>%D#YZOiYQ0-6UyhNu4_IDIWY`39`=AGBKo$hMQ8T7PL);JEnvopC!F9 z-E~mq+T1{!rsn@ZpMaXCOEbsc4cqWZtH{|R4$ z$Hk;?;Bo!~FUt&|BW{`L4ZdiH5u!AzPkUm#WPVICFg(tzJX$k(Ub$>4iJlp%NhMX@ z=%H}^ECcS)O~xi%<$tedKJ!ry{Y^dg=U|~x#fuLRs93%ppG|Dh;vfKrQ|TAUFT%x% z+8<-%yR4_C-^7-eH@e+s#EJNF^I+cvIoA0Y^dk+wqN8wMas`F7@L)bDkNnIpkOf4L zh1tdDjO0^UTn5R9Ao6n@W!K-P=1b8Ji>8dnqsl+Z7yv%$;EFM&LA*1j@%o7{+2_;0 za79&Psw^W#;qWJV<9*3mK4?Ri8lL&kFXTtTU|YYAQ*3J!tY7F+R@;$* z{K0o#htC+6Kl&SNA7p_a>n4d9&ntz~V?`BlC4VU_y=*)X%oc2jIG;nC4x4D2mv)y@EKUjp7IgL%8xFAL5kGr12|;v8@YX zh;pL;DSzif{nY%-9!BXMM>|${)NSZ@@LbCva2RNju^egm4cpm3*!e20{YG%QCVg5c zy2JD@+2eM8GSGy#*t-Tf>CuM3(SM5cuDsm(&;H|o|0fdV7oM;>b501;anz2Cj06xc ze=4A_nLtv6$%cnbwGyrnR5r#JC3mp%g}PVaEVxER4Uy~g?;AI`(Ew=sO!%1WvCx2< zYyP;>!%8OvUt6YlKLo&)VId>Gl^-XABHVcJ;Zrbj(c%D=o^l`qSAZTkJ_vxYxVl1x z@q!S~0|oiYLh;Omk&{Hc(Le;{GpGTPUv&KBhr-xpzXw?Ihw_EY94>bl($Rs10`XA5 zuNc|TR+u_H^4scBM*LAm7G5ZqWr36=H?CrI>BI*GH9g8~y40^a1R7Ta3x!M-)j)cx z?PQ{Z&@c}GNp>O1_t8*0Ai#A{^KgFQieE9D56mtE{{&`4@)I_6OsioN{wO$Z{jdPx z^n}SKA7YRnHsmE^pZG#(q_-}~PjYuI)lD$;uvK?zhB6Q!EejpUop_j0KE{voN711l zdQ0U*SkQL92K){PP&+ zyWkO=T7NNl1t0O(ExGX z3Di1xkZ1|q{%m*3ZOC-hUX^j<5BVh?2VYWV;YWW%e(P+MH2h(@ka(*y>_YJ0mEp(p zPfT0>BC1?^kK!Mim2T}nTKFCFZ_-yh%52DVJoEiqy^qMD!GD;(Yqe!fyHkJ0SuGEEtEM3) z=L-J~y$)Ri=>)5M)bu>J0?;iXttL;X>7(@mkK~ikr^n^;^t`-#_qLp#oce8gHbV~&kILTu$ZtFH{d{ja-t%qc zt-?2ndDD8fn0Nz!GMo83$T!#5o*VE6G#l&O3c$t=haK41xVpOVCJg#K+~>OYkso0k zG`KwHP5Vu6+;LEtx4r${^ojWC^)-=aznU}Y3kIU;=@yQQ7QC?|H*m zdCsJJ^Xr?k!J9s_X}O(D&2!Dkw6=LRI1~6(WlJZpV3|cI&*uc5e~NC|rjUvhFC2J} z?}o-T+I*r7-DH!RIUXB#d=h}?Au^!;(UnaOYD6TJJi@75Dwde1HZ%AgmJVxeC|q zL-@2&4hm6cywSx$p_$qeGGOfixl=dvCET>ZVFK#>PV%IFkUww!a|;1;=z+@M@siT_ zw|87NKIy?2iK9wER#52i@kH}7u z&J)d(KR%g;v)VKYD`n_j+SxYTI1HIC!n@ilLpSb9ucig(s>-lgI`mwkF#0PIKChF@?&day2UzfMX$7&yxvwo%m zr#K43+0TCJ zalpZoBd?P!W)hm73EaMbu5NCrZA)4Ghteu9+~}=eC=egs7~{`X8+dLf293Yb1)d2Y zFov-rDBce6+!ADky9K!e*)&2olWy1%)`f}>dD3*|i-FsvFR1h%hm6mBGXM0i&%fn6 z?UskG$@Jk4Si)Juf9n=*E$8E~TE11>52XbjD}UM*9>}r!9gbN|AAUbm;eSnf2djWh zdF>DUC^M!_=Xqt7M;Jn(6EX{k{9|Lq&VWw-@B}!G#x*THnU!(}f7MfY30!QMhQ~A& z%u{t6h+7J%&`D!h#d(;KZD-4a2CtJ*1}uyN9^+4%8`NW!N-jq#oUIv{~V zs+3b z0w?1uey45&H~oQ^+~F5@!cqHba0KGh=|tCi7B#;ZYY>b-%HnjcN5*DP#SJje7Qa{GzA*geny@G*w6DkIx!0#xgb8%zt9P zM^6>nvvABSxJpPq=9BRjn|4a$=RBFyU&`j{f8TKC@iTw_3V&t+BUwJVg{tzm8oQ2&##S7BGtM(ET*`tg@ND2Cc=4yr z73S(iKE_s3(Qw@_U+NAdt{%Vd3um=Y-s^)K5S{^lW~ zqHak@#mnxHhgX%*Wsk9*LnE&Ci!R$-M#Q|wQ}dNeW8I!V7s-nkd|GXwI$$i#d@poI zc-F_@fwCiG3hFv_eSqj_$JrKSd0n1hOdxvV!FaOZUz+MP%&&kA4m=T;{BwZCPGm^9 zg9RZ|%4MJ9;Fa=;X4Mmk$8{I@=)PRapO>=3O&x&>7eL6yb615~R_I@ijs7arD!v{t z;*Y%SM(^Y{4dlhY@gtt{@#8qTX@9p0$Bs4zshW=RSWfWeWR9=SmmA}s{a^kiKwI29xQ>I2$tU?Z-M z519~7!dWCd?aWp9O2D?3poa!PZc)lTUw9^wnVc-Kb`a? z8@%Bny0}e)Mn%Ju0b-#>Iw3;AQItFC5ZQ9IroytG>xocra8*3$)6q-26ouT8ncS5& z8KW&OUj;9^34EZW$|Le@{ei$lUAAOFp0v#AnEVA4RBhOkd2^>1|gH=2_ zhP=DV;JQKovV5wOE{PTWjXJ4r)5&sFJMkm8kQaEBN76=K7}8W)t`EUV`qY1Lqnwp) z(;lT)Q848tiv=4))xo4^eWC2MzX+mC>Z!twoXuw^fBN&y+WuW@53r%#wCBel^G{gzwD z59%-Z|2xPp@=SV(7vZ@v)6%W>1N{*mc|4YX!h`lFl&U}I%Tk+If|ZEJ=spHb`4d`y z=5J{5WFzzL^P_9$^8-GVo&!~en%*Z$-A9B!pQ&rp0}cOym-P*P=xpUH`2#=u2>M#} z|IT7n|71t~)@M)CZ_0*0@)sTVv!++`Re0oE`CHAOF_xm^dLx?r+A`69Mfwa|<)cFKo#k!vll)Ylt`5U${Uy|P*C{U(qC3&^ zRcRWhSq+qqaVG}g`IrYjdH&^igN;#dhHBhcxaxN}D!tK2WHpwVD=*<*o}X(nyj9-5 zeOsnD@R91Duy&blMyy+78?Ka zHs+QZLVs&iu5PYX=J<9T8>x%iS(%S-OMggSyWX(l`gAq}*$8CZJ-r>5(axY8zkgFs zF3!dBqC9{8v^;(B-1CO{M06(8ay7mxySuv@D{K5aWApvOa-1$0w{Mia-J`NQ7)neV zDi2qN-<8wTQ{i2fFTecSa@apODxZD!S^3eAepL34_RGcTnK$}*<8;j71Jws_`i@4s zmKSw&aIjaNK07McSJN_{UzYEG{yp*D_pJ->BqxUCZ2Io)K2cdG<;kkw7%1?j# z({k|QX?gLzXXTIn@E?>f{^0k@;PBAxhj{>Sb#|iivdPQgURSOA6zeUz$0V{DN1)T_ zUp)WP4OEq~99S4&Gne)mr^um2+YUM#ekB*=s>j{_%TO1Xgv4J$ywz8qVdU+;9QvYL z+pv5d$9O)%hMn7=(3ni~_J+nKbLj>3%4RY*D!8xR=1p+sJfqSdJ@cGNm8_J^(=_g3 z&L-XGQ2GXQJK48)@$G#USiFG58}YK)?56!KnEzQuuu*vyJr1gG^x5FAGN)b9)>RC2 z`ff3k4zL+cyA&M`n_P{(`OkQngC)K`)a_LBHj%v*f5hi!0I0Y5ghLIzGT2o%n zKBK@!sn-}-chLCP^{z66zR?as2l(6sHhlzf)Z6G_q&l1Wnt$@#5ikFPfAhaze(%RW zFbxjjUte6?X3YTp0d8QRy?7j{dZ&K#1{Z0UV$_D5Z$2mL_(5&Pwo)7JB``tsorDK{ zg;YE6;6(^(W7#&Vt#;_~2<37fF@Dbegmz-O#2GEx$GvpZ;|FABWDbVVmO0R+XO{9+ z^+;vG6Bj(Mq-WTyNeuQzWrNRi>`Dg|#yU)qo~pBS+u<`Eaablk&B45ixi@+HMu-J^ ztmlBACo|VG&!yerj_Wx#I=E4Sb(`BVo?M3PC2RT(Xx*bvYS*-5bcMAQ+BbIed$H-y zkbIj@NNfyM8yn^Q$-DCH*DuSv*YC^miRxDQ(0ym)e_aU!zZ zR=>=R1?R`_?0@?7Mfv2D7v+1OzbLz-oibTWR0+2}ym8NpDMleP}@NR!CMRD4clV zU?u(1_!Y?cRD-vIpY)P`Gylr)hx{AjZu(jDPhpkccKs^+tsd9-M(>lJ@@Gtx{D|LWJ&iWg#EVYwlk}6{4A1yE;Yw@-tg zhQ9Mv8F@1qZ}Rp7~VqPNhPOuu+Eo)uk{^>(50hn{m9a@f|RR1U@@TcXb#?MCP^ zZUs0X;7wOU%-i9OplpwngK?&-qPBl%2VOkieE1id;PV_8JBZ6P1dT2IrG0Uzm4nTc z-@{hojKhs(LHADM3Z@l|C(JKx(d}Q(I`4xHk9CzS#v2+NVbgDMnDb8c2Y<{V%$LRm z(7vCuPe9)E!SJOY!NT(xx#^ePZwg;L(VxN{2PFtLuWF;R50A?f$8(!{VxNIB zGACqQKTtjOHIDW?&AJ-0n`r{WXO||LZ@~{^aq{ESQK(jwQ@ARR_YTZM@J#)=OcqIm zg)HTBU5DFs#*J<4OR9f#F>60b-I|`~y+H$D!?~r!oHRa}#;xA=i#Nut(5AxUGYy(c z%D5ir50xL~=b$X>CyZ~|B=_gSx0Ocp(SOF-@ax;dHQu9ZM2@Ddc0)Z%QqCvq3!SIG zVEzD~&_SnJH}g6bf)a2xm(RiNj}GhjSv`U3dNRLqJaa)wRr8*hConIVNmi3_Js_@p zcv*R4n>K`?vz>(g!zuOY?09~l{E{tE;?}aF9BxPOg`S#9=wCixpzU&sE`TgEAF$#u zxJs)6@reU6biGzS&_Bn+59zI&a2M$`Ta7$;4vWuwZbT3&?I-4wjGa|Bc%n*4<3cG9 zkz&4Lg+%7WQD*F51q7EDAl$OBp2Gv}C2qcRoi;%GBE9gaTa|@)WaGRguh998zufxs zKl}H8B8)G@wiC)NWl$n3E=0|R_){XeL_uVTvQLgsXu<-Q&Ikol$EC_)vz_=@%#y%s zLqr-VbiBnRW#~y+`gAVh!khTgb2V(}M#GSj*^Lq^owao*Df=Yt2xt2&J zBB2H92UZq~AByQe0h2}1W76aAI*RXn`=7}ti%b4ip@5uBh_cf0x=x705KFa@XDDlg z$(Gv{Y@;Al%XW>otmWhVQ*?>jA*^|18>ZlG~pFU}7s7aq@`;2HZTK>0qJb zKcx@mP+<`juK@Cat#$wr;gNrZCN2T~Dxe>Rnoh)!7dY0Xh77(aSJcyLJ{_8d#Ov_k zp0zeS^2yH15S@f|T|T2GNxPH3YV^Zt)X-SED7llRU{n6uffGc2rjxR8Xt)RyuauiV zITWh8b8P4jHu!!RpbK;qRc}JP&_Gv0FCrb50GdE$zrtlxT?EI&gako7L1J9wt zQv`=|*er-L%7%VZm*k5kkO?G+#}$4Tyz6&H1gm`!_n`hNT{FtR=z*ei-mrHeGH-$0 zt?mmN;Ri%qMhZw;y*^FW)Z`T7h@)&0p3{#;@3& z{0$BMO8%}#g{2Lw)a?=^-BtPzU^Tw=NBhb=GQ8TNoBfE}P7|N_QNHM#6L7cbZ3#c= z1dv{C$_7r8KI2#HCHjKLN?QmP6739cO)op@EBw(0?XrI-w*?2S*2e=p(Otr;87`HN{4!m&)6YaITmj|}=(I=LL6bh>+j>j<3c@vJj$7Og zghPEmuePame1uGb?t}WN{c*#F!W;gUptie}@_HP`7**wqcCGZXVltxKG_ir6<79hN zg$XC=dV{j%lkYp;WYI`bV^>Y`r<#aeUHh%dljFDL-B&Nm|M&m-e=p}JC*^RoTmJE% z{KN7m|L7l=gTwuDetKDc@$=t!(=iq+?h4o39~~Td)6CZ&9~_jOA=hBXH?rTnc~y?z zzw=~zYxCA``5*4@`fvcZcU@gw`5VT(MZjEu4H~{F&f5koB9I3QDaew^>8>~YHIdQ; z@|KMr;jt8x8|}Q^7~d10m*+2@dE=$;H>G(aUlU-}5l?Y8JE1k5vr%+y)$8>*M8Mm} z1J~u`X6lU=-t6U?@!@F4*MN^kLkXSh$w!{N^7(`DjJL-{M55mranN>4eB5dxJM~Qf z)Jtzm<uB-&z6^|J%+N`R7{@fZHxoWL%_wFA&HWN4UgeqDPH!5;%Ti zy)NvS6!;_oL&_KWi1-Gp-=g- zSp=}Gp$iGpxel1WDy^^3-z>XoKhz70H^>!+ zupb-pY+OqRD2vN3{zZ?&>AqH7`tA0)v#qwlXA{s9_Ij0$`Vvj(EY!~V90G6PBP+`6 zjZXRq{3N3E$ODi13Ff|D@KYO?*L~e=JTH@L&CNCM+TP+|1u}|$g*U=EBtSXIlm3D@ z`V-?JXId5IuzhD)a>(BO1%*@|bJaI}25SPo)qo9c)yHr&D)Z|_d3Sp1O>5r7<~mgR zKu=LoA<5LT!s9~4W~py8BqE`CF2r-f2a6p|d8psOr~bo6F9Ee{`;%7K@g@v3l5VwW z-*#a0#~KSE@|y;<^lK+EqC*#e{>vAsX){rmd^_Li#1A}ipw9h~60s<|rM_#(b75D{ zDBk*K)e9cxlQO-!EO)9u+9>+TcSCuD$S-C{?mnO-eVa|kWui9D+xrqj=((O_Y_Mi##xepVt;LZ98`p3rnj@k*IkKnc#-o)p{ z`_qd{--7eyZ+~0fynI*Q>-ow4I-mAHhrG6;XU@HJi8gX}dR{Kohr<)RaS(2#cFj#1 zH!A1*)8q2>*Kd4l4hw|?Zlw?|WriMYjZePN7ezm=eqZvX?Q`gXv!bkXqg?)khSFVL zPeu2AdHv>XIU{fJv^N@*7kWdncO>0}kNC z4FN1jvev``|9$DZ&7w!h6BTCRp0>g`3JT~UcGWQI76%Ja6`bA})qqpgH=YNndk-Nu z?h+p21)&ztTy*Tj7mrR`GnPNg6_EH9TnIai*XG-%4=dBR`7GNBm0p9Nkn|qUf7$+5 zm4DK2>nYN#rhDkOD5^Y`=g(_^x(20@-zt7BPeW%lKGUVWQvTXsI=zCTp9+7}S>lBq z2NVOBpO*h+yw!#tE{BjR%iZqbaj zMM5flN6K9ZwS1=?;g(-h2P=3@odIm*$Z|4tCi08*Se%KQdXsXFd|G=s|G>vy#jExb z4?y)h924|%jVkM~g;oDDJl_7=a7eam<_cfRBg>Qc)o$n}ZrZEmU+qs;(0t|pI$e^Ho;je}k3;RyI5Bp#o(WuM@$ad*$6Xpz#{QGEUC%8J#UpQSsp9H; zHYORLMtadhRtje`dojC>PeUo1M~vSnmd74{=@=fIWxqVLBEnYd4j{d zQKV#I>9k8 zghFwgKkb&3Ad)LT$VPEZT0$`T6%C%1)CckY{6;M|NokAty?!Vl963NQ%0(Rf;=g;j z_2>Wl|K(5Q{=z}zA_SO(Sm*`}lCUZS1AB8uA*g0eBt#gcVsb?Xg7Udjac4pYM}6B?#T+I-Tr0>`i15B(aT5OB`GFj1qJxy}22(4SAnHYSRDH@e zaKo0tf>o5j{*aaZkw7$Vy`VsSs*iBT;K`wsAgjN}^+=Chs%FD4X=u(c=Ryh3xaZR4YsUK;|Biqb(;}g=|#(4mW`a2HO zNvC}H3tlh@*7%JDa#= zTqKr#j~A_$cugBfH|cT9xNOnBBB+zL@vlHfe&*39qoGAuq^X)gMTi*LOkE?a=JfeH6?{2eamg~T% z28vv^zv{K2aq$Uf(H$)9J7pJkxw2U)GwpUJf8 z+@IJ*n2blTgoHo)(gjJQ% zeV_ttck7R|_4yNV1mg*erpz+t-esuzZ?_QHD6!LtiCMN?hBnR{@8Ie=7wNM7NB)6D zMA}^=Xyh1nL$9Hapa|L6a?`ipBb)2) z+00~)nDm;Vc(aIfOg;~u9GX65 zN9IgYPfw1^&6Q*^8>hiV`Uy@hvlZlV5yPYrUoL;tE{@OV3EE>WWg)B$~9 z$e05(USt(;>jGt_?aiia{KGvz(T}n~M;c%0E}5}$PdJ;bw$%?ngH+V9zbQ(ch&*|0 zbBi7x{j-iiW^82Bw;~7howCv{HTy2yR01C-?@wH=Pe1zva^iuuBSu1*jDwu$yEtUP zHS^Japi7BV>L^GgP#AvVOs_8BaeJKGWt9gnG&s3J11}8zb`A)bxPmD+5dJ&>5c$9} zp(!8QVWv}l&~_YTXrB~Mb%y>cp%1=DF38k3J4(*I?V;K&pXiY8yRy{hPNrpXeO)$K zQ&C^b;uL*!^h@GFvZl|y({qex4SKeaZo(snM)>3e{W0}KTjx!v`Ajr9*rh(z?No7t z-i{a)lhVCA+1)!R`+Iw8PeVULSPVZqJ@Z@rJ9-|n@y@LiJez3OY%aW2pZ@CYn{s@7BKp!( zx9NfP;ORHIGCmd9m&h)%GzbwZm zm&)hZeF$yt`O{~T|3GbhP(JlGovXCglYWivuA8CysXw!$aI)4VR6F;HGEwxF(n)zSY zX%f=?uPXnJUSn-+f?7K_MsSw<1}5K`vU0s7GLMxw5X`U|M;^tKK`x>w3? z=!D^{d9O{M^BDtpb^M-Twz_4kR*SSOcyw~BTg~#E4eo>VToqo zw)q3y{K27*NZz24I0;?1H^*5%iKge;j*3x<+W5aaw z%X2YY{*Cc!hLhGl0>YMMj4C(#pc#k)=|A$rt280kC|5$LY?YVmJNzKU&5(&qe61l2G-8*$r^Lggh2e>@8#jAwd8OBWj%L`ieJ6(qAfVLLzP8T}67 zl+$|vG1&6>J9r3+sUM7I8Qy~Ck*sASS}J?URq>Pwmoq01!Z~oD@vriYu{>{~hx|Pj ztMk*~4;poSMwEgN7^!h$APz>Lqe^4?uopa#3f`Dc^)#m3R(M}yx}oOa*ge(ZrrWdn z0GBt6H#|j-F(hLr=9l!DrfED``m?Qac&y?+xUHW&Fykaj6a2_cy&+U2BRqFY^1`XB zQ(g@71Iio~TGN8BKQ{P{uRji&5FdvmJjhA;i!Sp5=9QB%hjZNkJr3`2=wVyPsvqYi3w+vx=SB*!9wT!R_U6CRqN~&+ zw;@=D^2bJZ=rnPp;Q)LW=yu72x{kFnkQI(REhBkc0GyGY0}p=bT73DC0J^0x4#sXP zf7ZEZp@bn9aHvb_gEqk@_}Gi((|-XWe*+ffrA+$u+y|e_LV9`NB=H4yJ3h)52X+EC z#uh;@Y{H~l!skVH%5cjGe?aN)7Pl|A{`^1w4}K!=7ZHk}mB_|t97)e2!UwKrP1q&2%)48#@$$~0Z zzOkrc;gcY3Xsm#8av8A!xX5p8I7y~SVG;*JvSGo90_ZcpqM=AsGIS0qyKJ9EVPQm_ z1vA4`*69=+#rFeE)g(L!!?v5BnxmY)F4S_U(Fugn2EeuQ zOHR~>9~8)pF!7*zh{FZKl}_kKQ5-G+M`6S%y)ZLZ=jo0OQh=*o9^_qVgAISd74EP> z4z~aqcmr49f{GC0mD6wR8;u{zC)`SdTtd1)6|88{)`@ZKmiWnh4Am{VsF1H~rLHXx z0fELRwF5f8Ei@CT%%3*?F(BGaxjPW$P_B84gbCjC@5i?M%PQUjvyq`Bj?g zY}<@IGRP$5hZ|aQJFiS%A>~Ir_(?RV1(2Kv6wu(S7PHO`!Ealy6|W&fS+S!ZAlOFq ztp7FNw2iYy-b?vsx+=pkSjj^d*3bevxbE-P?^$NoW_6jWJ502!L&JV(O zB77L=-&X22ik$UPp(BTeFWZUN)}PxpNZ`=-f~V-E@-%MMx9ctSpdx&1{Uu)y_*4Cv zj|gqroipx2=BNB+xi5D5-ywgciAkg4n&=hz86wjC1JQUf&M!R`vOG&|IO3fRPPdDS z_W(kFHh5lq5HD!9{Ne{ce8F7D79CvKMq9*gdaRF=AL?Uy@L%g5+S#6>tYHVV<#4#t zr2f>vK>0xaE$={Eu7H?GF){MERQju7fydGD$YLT9V>-w%iSW%>OnSGt9)GuNar%0| zHTK}f)#O)fFrQzPcduWUzy6nhRsQ;?KP#Uc9+rRfkN&j$(|_&1TZV(}^6J;$lwbem z>vD5>=JEOXYFaL@F1)eLTlC-i{Il}eXPoVu9;T?%kGGyYzx6N4$VBF=md-cbJZ8n{X9~Ns6DA$Ki z5U8(f_gmAFB{H7C=f%0_C2Y(nLgl?HJG+Cjw>v5`(Y(I8mTa!d%eSvRrWy?S4*RI^ zy*p$BA5=M$NMv(*b|JZ5NmlpenaaaBk4;E!Rp8t0$ZMu@oSmI%uCrZ!^W|@S$l?!v z_+xL-P>0YMUtgA|M^BWe^g;6wCiy)!iFXE~5epvPKvMS1wa_#+=~MQ+Y$T zRc?z!@)HlFbGIyrr!hwwawV?%)-wc!HX;WIgjYyTBsuu1$tr1ZZI_dCrtVK z_A+h)fKS@#cHcVg11^Km^KI&xNf3M~5pSApE@u92xNntHU30dHiEflrgb|Lw^R!y7 zyL;X^j`25qbHIQ_mWb3GP{waiIG-pJ)2dvEA)It^XaV6Voy8}=+D>eKMjzp{a$}$y z;k8~wkL$=efZ=cL@2LE2SV{nF2r}2?Ef;WE5P}!pP-es38?6fG5DuHs=p&0=)7hfz za8p8GX?ncH&P^G8yLb2Y%F)qt6-2TT?ofU5FGLqQ zD$6!+eyHzJ84$;o`f4A95N%#2pu}ChfY-3Ap4bgAua}OYmeOk_7Y6#5_=#8TCS5{| zS7=}G!B5Z&U9ExDP9qxTfp8?lvj`cfEhrg}K5hVDqu+-;K=TH=XlXxhxl_C*|V&rrdCq zsoKI&&#D)nJomRwx74oYdiIUgUvujVIy97?u&F;*8|7KVGmt|Auiw2ZZ;#)Zh7Y<( z8E6OeeYC&HbnLR^`~n5vpx>;M9C?0@b`RWtB0FS5S>C@pF0bCb z*K<>20rANO$4KJ@ZUb;TP@6qJKUKBdm;D2kNBsjd>5B(?PWiS=x#5$%X=h#wqHj|h z0iUuUU(&=H2$JsTTy^GHD}fgTzy;eNPpjkW0NH>;c8&6Opz3e4k&9VJFT+}Wr~M{9 zA2u?5`)p`y0PQ4Ln|-y=y0|=8qqo zn$8v2w)KY)VWF#G0v_Xc1)B1$Lh?<#%o2ZH$oy8z2~Yf1J)#UoS)ZO~it%f|*KEEw z^p@?WoU8R?om5;yC(}hd-Zl&f_o|LXdAl;rG-)Ta`36M)o$^@DYuRu8ZRFO*@#}G- zec?}&xipYS43nahkuXvB7Z+T6=Yx-=bR5(!O8-$i=gstb3byc>X zH`J+cl`i&Hh+%Ij@?kyzFtlQf$XM@|Jsdsv)pF|dRby7ozZrjXfRGnHnHOzqlIii8 zNW@04XqxY8Jmw`%UiguR&2#9I4v3fY>@>K#9Teq6k9oG=FPzF@MnE4a7v*u@wVVc? z|JeAWoJo)1hy&2YNatade!v8YPYz0a;)n6FfnjFX`ueqcxGdG349k8Q6zjakGi zVzq3HD>*QpV?3%jrdwW%aDH{XO4(cv0J)K=Y|~IIhYpB=Jqt;pFaZDjDT43(^Ok?V z$Dz)l$6Cz$Ny&VUaWr!;=E$mP`I#R>HToA=U_6e^!D+w98wlbx@}LtETT(~(b}Awz zD@)GxXZDOA^$+hboHDt-s%ZcPHLiXL=U+B+D`@B3H2N205`OQuB4^=) zM|y1DOsCY0t-bfkd{`o9W14WrevFf?H_jK?MS6dFin7b=x+T42%Q^$V%N%{ix*F#m z?C*I_R2JM8H&*?}XVn;wus6vVox`LY1Rw40nJ%}(O(vl)@mUO9gAb zPY=^m+>RppD(kN1HU}yjW9!|~z@MmOF2LFV>v??o4w8H}o4w_HCPg`Fc193o)e|?8 z#Gw+m9=FpxSOBLs$XGbB@lW4_F}2VK@n`8E5AY$cw#`$MHej02Bk#@fa_i6k?0@kS zG4KT;`H-U$NbSM9gy_YP1PF>aOo$B`-XNd?DWE1NOi(!GEydwvHVu@X2ERLH;h6#c zBp9E}flPj)GZfP< zm&^tWR(J^)xD3=qRg_H@DylMa6M)dv7Ybh!N_B`^TU=eL4zkA^PNK^M*R;Wn1z8@L zT-j$nvyL542q#mNkF%GbXzJDjD}uxeEa6wU7wJe>@tN?~#MQ3GK^=bsnLl};Ls9+! zXB18+gD-H9c|bdGO1e$rN1IY{WyfL=oa8$f%J30JW04hsUCt(v8 zxCu49^58f-^<(-U0NM*YGjMkKR!s8^ugZhVlKcpwK(ae+&V@|_{oq&*|Jt^bAITdY zGs@}ZB|+kFim$r;N1o|MUN%V}(g%ph$DH7&9#eLdIUEY74l%TCLiiPZ`VveHCv8KT z4wFFpp8Lgm`u`-bODi7i?gJkjh%*c)y^Q-ODjLyj>C^5fV|x&I6QGH5%e2Hk#OlFbqQVf zi!~hRv`NbyW5C8dY=bWIQXS#0c4N9gegg1KUna|C$o3lXt=@ES(z)jOV>`=_kvFsu zTCydFXhYD{_(er_Jaq#agDv`1;WX{f_yB%FK$L0PAAld+@PT84A08c^xSEKw_#ho( zBQPf5j0fl|l&`Yka4Zu*j|r3y-_2)IwV|;d-^;&avavM~{rIliloQtH7v<%*UzPvY z|MmY-UVZzjeE#gH{OLdb)AIc?j%4eT_TBeg}IX^os$8Sz#pOxp&o|nUxuJ5#krN4=&5a8>4KF63fjN{gLYL;Hf`{pc%KiCSfxVn@`KV z>h4}Lzz-ve^%$Gtq6f_FLJXoyU+0|kD;njp>z^*#ilg_(wLBi*Y~o0cVms!qPZ#bEB{PWwW5>j1yiQ7ATD}d^NX*Y{7qrC&`D+kH<4i9_- z1>Ycq9&*3Eo0_KQe#)A57H{=P=5F)Q-tblBFy?RRONVxN8;3SDpf6H>%8-Q++8*`6 zT$=jeusS9VGli_VX@vaQtf!vPgy=V=7vckceBj6%yTnmX5zm63w9tAO3ufY5`BQFW z;dvu8d{{wonoG0b1c(LCpXn>SsG_pM7cVs!SDerf$7XXZY7&oN{HX?!<}e02OuL&) z7A%4ttKYl0xG685y(qi89L5-GD!B0H0(>Be`3(HRt8DZ^zvcIjA`BaFa3l!FV^4)9PDOT;)(;Yo{sM6BVLJgbC+`RYn!`lM3NG%)DuZl? zp_Bd68}!T0aHnjk|5L~8@>Z9xG9}>8x|L$$3oR&u0C1qo7-4ebVK*4 zORnHVuX{V{uV7d5(C-QHfdxX>8hsU4<)|B;G;}`Xs zbv5!1h&3Q`jeev;O7aGS~v@yPC??U`2BXS48 zGtjMw$J}5yA+xM;Vob&cXRJTPypGLl(tyVS zgsf-Nf_MCkJI$BE1aW{@!7}U@QTA{P9~mewp83pQnD6jSdLO){9#uBx58m@rHq@yv zH4f&L1_FxMWIhx7YcY?~xR1dS?DPDioT|qdXNrI#QeQpZP!MnYk9LN>35jvuj>cq+ zQ5Y|hZyXHOxKH|F-OxCOGD6bx3ysSN7bni!Z$vZaspA&|SqtC-A%wo-Bm%xAJM_(B zrS77f{245`wm%7i5f|mb@VuZdsRPB4gmC?#kIYH4?3sq=IC5a#!P-IYB{4qS-qQTf z2f@RTF4zhk@!qDgVJ(Tnwyqjw8aa|ThXK%PJbZk9>rPJ~AI*U5GUr4j|XZmhy`> z;BklMFB|~6n`;g%o~*ab@nKKAc)bHTC>;K|iNt&=%+F-nuIkI5_n=;7cv+o1ec(ez znOQcJN!4Szr1zoTdmjM!U;jiwUxWYv5r(*>2rdL_IKTM8<%i7KK$aq-Fm7D4Xc(O8 zaG02Z29rfdiUYL~I8kSTLb4#@$|9~RCJQfOJ{us7Kq_K8Dl5XhxZ+LmXmkXF1ITCF z3orcJz(*C9!g*MDK}g|X2fs4vSZFvbTyU-FLVn8O3ZW$6Qwi|me94%&07PT2!jo6r z2J$w5gcX$ik^cF1~(5C+N}< zC`7!v|Mx=y2;RJakbeC|BK?C0D-q?iW-URynb(G1TYf`F9?OvUNtecnK2`VZ{W-4zulRjGQ(R+OJ)1rN7$*O+Q2)cha7W8sm0nPv;t zal`ufHeKM=?q9H+Uz@ho<$z_H1Sh@3vme3{8Hj*8un)^G`EQf0@KxJ$hDSE!X|EyT z19Ack1YePV;B*o#xdo54pW6NmrWbCj4?2#YqzaDr}h--Gi~5AfQyPK3&zW{af2tDIQb%Fyo6> z!*7!q%LHUdAO2MnG>l=BbTVC&UO~oi9I$Fv_?Z4n&d48kr~fpL>aWJxz&A0S`do&# z{^A*s&tLRFazP*EYUP42otaPNMVRytoJjX%L*rre&XYj*3n3f7-OT2I;<*;Rr^)Yv zaIv^mdgb!svP^GeF0abX=~?;bfB9dO%j2u^qwjxKe)yx`D?j+b7v;^Xm*va9{mPr! zFOK%g{{CSZPjAZc*?UjUm|XK_-^R{nnO@KQ)*|0De*gZsoL-#yt$I(!#M6wou2ois z!+a+H`1H8Ee*3muo?VIW+p@Q}TR!{r^YZ+Y=j8|A|DrrQdR7h(pZG0GCWdSj@}>pg zO?Zu$_V#%LbH{Hvvk)_$OjKSrwdcOZ{28|b935z~)hl!H!Q1hfCgn`K9N@y0)==f~rj%ggtn z_jV7PqKM4^!)s~eDlp$=TTKJJ!YkUGpZAY+j^aU!l$1= z*33ubH;7IwD#qf2A_$a;Iixom$zOdOWdXr&x69^Q`%TSdxY=TR7}x!8%dH4SYo4Y$ zy5|k`*`zFFyy5EcwJJ=s+1!irimvBrOn_?}a#J)jo;$gXvEe3p@pe#blvErUY|ph6 zN7@(|vj{`Eln?S%UUVhM6nygW0+aHNjTuQ9Wl~z_p*GCh^_H9PX!FRFg(!f!r~R|R z#{@n$w3$wD?SHg&()gR__>sE!_vX0gfgGma_Po_hqbqV>UtL+}27?3ZJYl>&44*8% z@b>kNWC1>N6Z#=;67VkgDR$8#cCfoCiFRJ|1lvyJ@h21jNK zJu-sh2~XR2O?U@LM-+8V`zMW2!zPkG0uS|{c?YnL5N+OzvvTvMINy-(^#*0Qe^9pe z_R5Cp&grE?EYxy{WImsip4)_YR*fvw*R#;JEk4ji4ry_Sjqo@iuqm3<1ARC*Ib~&vBc8H>{OzraWkid~#uOc~frc=UT|;T5jIxU%-dj&cWfK52oGRs84Y{ zZkG4&PRbkg%WUA&H$8v$LeI0kvb(z{ecVyMyeKDUmnz$x>UURiVqsf78+EI{EA>-6 zFZm>Z^;plm=jzjWvmRN`=4y9(9+1ZEKx{sb5L z6U7k(zIAy?&sO2FxD2lZ8^TQnD^T;R?Fd~7c$hx<2^)O4k7rl|&3H0#lV12;Z#+9Q z&C>W2i1tr@QOA*gH;ZKm`l%1l3)-1r)vf^V#!LE{rdxON)waDFxhG z1O76!JVbn!tIaPU)1`bBV*G&C*-6i{R_MlSHhKd$3_r{DhfR2zhp-}ttiKh0bDboi zrPtx#a*p~#mWgAtt`2~jPvSx4F^J^L@vAhc?F@Msr?5uPf^SsCSZdf-8`8*x#gF2! ztr~#?Zo{vMVxr$*{7Eoez>8lrP+q3(bjxxH!lcKIwJN`ePrHFyA0+G2$@c6Z0}KjQ z>J!XmJLq)tuc7g~!grb_`fhXJB`r*icG#-XPt(Vy>H1rqL0yhZlaTzh`G!Afgq{6N z!~t!&fVP~0lQ!v>@xpd`+x)7|CBMicaT5mN!%_oZ}q>l za?p5w`q`@IJ!9$3+vpcKK+e2`XE4t~#<126Wfa|VJiTIk4{7=3xL;6)cs^tF=r%5S zgX6NH8@3e(U?KIf&w)?sHA}ej*yp~`VaXlW%x}B;Zz(!BufI=!7tgw zL4m+UY8*VqMG$3WtcK;=y>U?Aj7Q`)zwD97AI~xP2zi+#2Hu40eKm#AQKC>LdcIqRIh#=swCDoKT~VnM0n6MW(`9w zZFqI3j?0D@^An;9m4R0)88jo@c&;D%A|pwbvHwc{?ag5NQh22P2t!9Gz#WG?l|<>m zXG59|Yac?V?9vbFfEwnYDD8wX=16lW#JqXm(|D74l{7%(#9mJ|TI+#vBQGXH=Tc+v z%Zn?Gf8k5{QP<`g%F3FN=Pa<@$ROg|dQ$l`N2KMN9)2Z2 z)*O&QLjl^2iDo?gkZ4FViSarY^Lg0n>u0EPFb_D`+btu_11LW)cJKlH&@bNiMw*=H)qD&|}I!y}$C9CBt1LSE(uSx9!l8Mkn;?gx+XqI{aPN>KI8ab3*>Zm&55dS3I>DSA!G|NM$y#|15ie{;_f%$yBMwH0Vd;cAH4J4P zN>-8&<(AysIU^gzvG8O%mX~D3;30XEO}Wn;;x2wtA z=nr1dov>XYH5oW=2yB`=y@Q;}w&M~)9>jL*H^RGk9|G!6#&w27BwGdwVz?Q^s$Szw zf9O^tKlXG#)q&-sUlx{)ocIU~5)Qz~k4>9X9r~2M{TUBXx5ztu z0pJC0)X}ZmGyLXBe;$A>Uf{M+h0JDd%`#lx-}?%ls@#o4GV)NIK;Gw&p5&{xCxft$ZhgAa#KFDmAFyAj;B7lw;t#L zOlXkfCk@c`r|IPHe58+!tU9*Qu~*Cz9RPO=Szih1H}AGd7begs=;P3&Z}3ReF}41Z zeiM$B?rP#0gEPw^c%=T8XyI>MwJs#PKneK;pK^BWETf$b;E6axt-m%+HCXBHn6{s+ z@=*Ofr0-5gIjC=NvaBmXqwkahSI`N*TN4Q@*qt=!N0m1DXin+luW385O3xc0HD?4Zko;m}l-zx360Y)W$Xidkz_W+?iDyE@ zV`_q$FzP$gCb8By9v!ITg?PruU4OHDW|Q%q^Tb3P(P1(y#!rng4cuW9;`U$}<4(R4 zR_ZTpCthRHsUJHxRe#70cm6Ed)ihA+K@byFh0t7mkX1bk(UK1nfmm#~jfDysZgFDc ziff7&qRB+-)+exMWqf&6-hBCW`K$ltpO@>i3%||({OKne%Pz{_{OoVa^(EISk3BA& z%@^g}d%gwyR+3nFBjf1FQQ6(yE$64_<>j~EmS6tOFUtwnBTvV^?(NVU^nGu_(Ns*HAre%qEeK)KfO=4LFK!3P_KY<}|XWME6<6>cJ6vc~2RoAxJXr(R4r zcyj223cbP58zFPi==V2$0DyX8<7F_2@2IoDzyis&(qE|_`0g{CDBL*E8*G(NKmEjG zbqpV9=7s=nT|gJU`RzBpKc!#D6J-sj3LiXj?0%{e&x+4zUjO@*eiEa zO)Tr?k2J&bLzc*jg^Xq#Ng0_a2soG-i!1Vb4y3sRV?XXouL|nJ!5lxw@9|=^F%z%G zHv-UpNa8juJmR2T(L6R5cnqp&M%`k>@gB-bW zGB*3riK8cM?y=Fj@PQk2#S2jPGxaY~AO7aJ`WbHfK%cjIEQmr=f6AY^dr!K_wfx@D zB>&s3*es_mHDBh6d_GAq^oFNoD4Fn9F>L`|?DLim8}^$j2lMLtNrd(KYKuMVIiEJ5 zT{%DcKIE#x@=M+Iws-?5ZW2HSF6$1rc=QXO+Ti=?EUX|iHgTcJ+y2zK>s{%#)!+6M z7UMs{sIj2MK`Z8NY&cWbKA@%YvBbmr0dI}Nhd;|9KG^st?Lz&quSQqDa?K%tH*d=R z=%74%c2xSi2a*-f3JzFIRTYvB{NKm;h4qCvumUC8mH0(|lp5ubEA7zb)KA)jLuEo= zT~7Y68$$@;6?kSTUAzH9K7?g#%#vLwf%^af^GkU2m7ofvt!-^{L!s(lb=FgVxv|4d zCD4U93#{^*j`~FNO&`SuxaP)uenOc{#@A&!nRwHl@9O)Ff7QDe5Jj5<8vq9c=IVdv zvvIlQSth!Jp7c!lEYwGE0AL}Vnw(#$|C~mc=Mb_^hr_)x((^CA`Hy5&2IWb0?C+nRM}NM3&TBu2HEC(iS93^oWB3ELd}Zhjl{M zqh`LUcqSQcl#%+#IE=$vS)=mg=xI4RJSs2r{N)xMp3Bq#bwG;0gFk6-eIvc@my^>o zmGiuO`|`Exf`b=7{_&5>^%b}K=o$Xvx$9ysMmadg?G-#<;-HfHKNTOd5JS{EZ#=~S zW>w#0&|S)By#>%g*0k&-R(Skk2aff$nl7|@UG?7arm$pkBRSf~Is+b%c7#Q6@Zvc8 zyL1R3JQcd~Q0IT?x875iMu!(!)tz`@a07Mz;`GYLe-xIu#O;_$r;$S)e@1))IQn-` z#Sf1F8xv{BGwd)C*FqbfawQ!Gz)qQc7i2zFmW(w_mj1_4HNNpkmLZSyM_s{3r##RB zOMO$DHr&g8{Vtg*zh?diTDM$DBk|Kjet`qT4Kc#hA2h)sK1}43X)qP)bEh=*2SmOG zqf<^<9pfm|i!?z$dBqS`5uN{PIQ!6Iw_*FkZ9+AI+_Fwb0ZS;=+fvJ#nyF~i~P-(`21JYJQ{h5R_UNK!rKRIfgK|0lA z%;&pt`3uP9dhghbSsYCmj^*@eLp%H{CjRw9yg>Tj$|2<=+kvucf4a2!Z%5)r`{hP0 z?39HivxO(LjIjb^Yi?ZPd5`;UE}V+b7;aNzQu=QWvHCz@2e^Hiarf;#b33K;gIuji z?U=C)<2SFuf`xuogl)Yc5?Iq0Mg0B_zwLrjzQx&{et5$ zoC>_f{u}hk*YyQC)-R%eOF0C)SMT8v8qjJ*^GbwMzsxEocnz;v} zL5|d2j}1Jr%bW+cB{{`;lwnzHs((BAXjQm6An1@sU%|3~fM!iu!AKcMA9v$-f zGu+E*jZ~a!hy>C*$GI8EInYMGU#<6i`q)7dlIKATVT_bhdPyq9A}nY`Lkn6w5Q)cc<#jyX@CWY-muNx-W#YSmUbv7_vPj_u#v%!mJxzRk z#he;gUfJgg7p2&AjRuX2IA>s}{M0f8gh$*49W#jodiX6S1M&mkw(>zPlr`S^XX2$i zB_}UXY-s{a$0Eg`q5D(_{J58-)3Hx()YZZ@ZH#5*@&q`_ab5d607kHcZuTvR>aQf0 z<&YpgrBAT|1G4BC5qjMOxYMixc`b)Wohvj7(*T`JNHyX~YpTlExUNqY?6AYoc+eex zhZVZ6EA$8&(UARi3Y3wvbcjU_7RxvUfK6{qEAqE$%z@I2+unK&T&xG2ifZD-V?^s>m1({|wukw;x5}kf;=)jA^BUWzYZU+mhi2C z8uL5rPv+B}Jm++}PiglUzo|YvCZN3q-nJ%P9D?Or=sh-Aws=!|P%bs8x*gy9t;RRs zzAnG|n}1PW%l`e(KQCYW@P}p8AC`Bg@5=7hpj=#Bly~pmmFe|Z_*_}937sZ{Km3P3 zET4V<`yOZi^}qb<^4CB8WqJ8VX*7A`t@H2y;PbM7cvv>Ml3f#!Z(e>~e)=zeR<3TY zrMdTI*n3hw`{aA^rsVFfc)ly=m)GU^?Xkc4b|d%2y}H|KjPcQeSd<0w>`OX zdAp~H+wMT}y(?FjHyYp1yh%D5@~MEhmYl1hV(tksYaGRxmTUQWOOp8GlT+c}_>D~t8jMCeDnFY}n|_OR)ZZ%? z=cnbDfBOq>q7H`~ayTsC`{DPzF$N9Z`u`U{{ck;~dwTTL8!ua%DzE7E2a>tu!=^5J zmb*m9EJMo(iDwG$6q)^@fSA1@6HiJeakCO?bWDssgI9;}(?)WY=Ii5MllxGmn z$2jcEq5xBBIi=qW^|v-uXWN<#6Q*$+2bVd((HriTo!$M?9~~7{kPy`lGzYt#O{l1H zH|P7^EEXsw8&9IeVi1%6+glbe#GM!Q*!X8o#9XNMYhu7fblGWVit{rho@YG&XouL% z0apJ}9_D>4?lGdHB;j{?nDa`uY=oi9X&}Va^eX$V6TM*=hJ~4-bSLSf!f?l(@ z=_?V%2a7ap#PeLZR=Y;Q`6hVW0&rgj+|;46F}I6@%bM6LjKzH*X?u zsx9nFzR;&!(3;PJf8OBXTZN3}(Fd-mM6PTw##Q2Rs|AZP9F{>kYz86|K0Clced@A5 z=)29#7Bepru|W?G3#mR#FPeT!8vUcMQyEw|y4_IyNnLQWz~oy<#H6q2tYygZP;S1n zPCNDnvch-R>_?`&^&vv!Gn?=xH3w)YtI97hmEouKhiV+^H}~BC?78pa{%uD&)%Q)$hKs8 zewfAN;U4ZWpLw`L-n_T6I;*Oy8JB(t6HSGE2}yyt8%`!BQY+dU%AIax)$2wD;5X4{5k%a$!0 zmYHqa#24>quaTGhbG}}1QW!F-mg6vJa_=HutTefGAwG4$7(qWlS{5b6wVXn7$3hit zM6$%Qza7g)bC)as$KJf>@W~poqdIM<58&+=-iD#y8*}T2#uKi~WZ^8n$w`|@X`pt} z(|E*<06mR09J1$733~fPaZitqeCWc9U+P=A`GEs08=E_6EV`aQf0d3jK6X$0iJNYA zcklSX8M0J(ecCm83NZ2a@d0s4F2 zT%q>N7=SHOBvWyS#Ql~_#rhO$25K|(Ym)C+oRxjfx)g`z&ixsIz5PQUrlYSoJ7bZx z@AkZ=vG3NcTWNcH+lLq!AH7i_0cfeMax=)w*Lxn5-|W9hcW&L0tV?#6#8btiKcYM2 z8?IC$lH$V{jFYR;e>qHY#CUAk1i9s>0CGfckJLr`iM56RVp1l%Yw3`$E5NwpxFJzt zfRaAiE%XGGd`rB5DemGAzB=Ua3eQcD(@Xy{?YSINyv4o=3jQ+xEbzPL|MvN(>t&`+ z3V@$loiG3OFhwy>*ZZZPrz`jcZSh#jJFTBwpEE!heH89G9zEqp>?rR%odGcq!o?4f zZ{{C(u9ZbY@xMlYu%3ndy$v$oa{kk7N_`bN2#dbB4m{7t3%Ds>jlLStODjTEi`C0e zxbeu(`f-0y-PW6-N=f^Z<>{f*M?QrM&`@uC#0?&3EY?H3colg@z9lXYv1b8%@@M@q z(-#mnS=IBjJ2DDW@c(_*&usl2*Uu_>FYGYy8C-ng=U~6gC%-TmR|zv-MpDf{ z%-=A`I{P~g)w~NG_?-27#X%2mXgucTDdwWgl^J81H;|UOJL59faEDG1W%DNmGF(&C zpvmQ@l_5; z#orZ{fw>i*FAPwg@Wg4LUU@OcQxfJ=Z0a(1x9-Y(Uue)hRvR(zuQsh!d~VJrPalRC zX77uMa_W*>$Cz7<`K+1zkYCS(bD4@wS`t%4gZ!aaijeb)QVLu@a?!3_UwB>xmnqAh zup0-+Et~pV;8no%4%w99>=_TG8w`FuRpO4v#k1HzKY();RyD8<3_rPb)3%HYTD4; znY9XNSzTfM0p243=!EiXu4--=b6Gx##fufkN17X>AFGP_Ox=p=je1;Voknw0^!ZiI z|Dlh$J8Kyi(j{Rz8{^2lKY=d3QGVv>IM9{NM@5W1213=6R34t;5zOh?Bj0>hvnjmm%7^^;JmmJ~db+iHgDB5ubj$mAU5p2jO?x>E@oX8Vn5*)W!h zbb}Xzz#j(zV2G%cfK$0tjG_uu2ndiJo0-C;3XKkfw1ErTbc=Cre54Tm0O%CcvDxI7 zn?(S3fbM7{97L1g`pOH@$^s;((EaAWItqVJL18WrG$=Yst@&Vj@rcD86&!2^KLd(j z`gqR4WG@dHN=N~?K>>&~=6Q|`t>%e6c%1o!Qc-b@`7gyi|{xC=GAWlZ* z7w^JWo~8#rf>l6IYI@{vj}lPqn}PWhvFQ6MuwdK_};C99NbMvZLp?HG;Uvlfu<{W8g;SIhD^dZG}mK zS~htI9ft%gL&%+FkALjw5bOpw?FAo&t0mbc)PInLaf~n_OG=A{Pmbj3$aQWfMq%cO z{eg%(t&I9JQ1a2QGb#{xnULC7`FkvrpHtv31I<$?xS%=miE#3R4}{g@Ujr3gN-1a5 z6@DyW$$||PiW0XDB{16-TDhzI=ixfPIuMS@{Y}A<(VNw-k6D;1ppl;6*=X?)8@jkN z-Wk9@CvryD#kt1siUL$j#$_n(y5Ds`I0nYg;wZ9KFTct3bEeC3D|dAQJNw@V1y{D; zRe$foN4VqMkoOJgt8!K8GmZ+vJ=0{a1>$8RJRVbP{F=}I+Hf2CPeBzu;zS=`i(f(Z ztMG$^|LfvTVTwxe8SCi(T*y!$-VG&Av5R~c`N;rXmQ(Qaoa2}AH9DId=AZnKp(@u~ zfGjiGDUJeFK(=oJ!i)JG^RIyw`~$Rz6@IUd~<-u9TsGBh(C5!4-U8 zNZi0w5CSzk!abGa(w=j8_NT8m9``Bs;iqQHMOf`QK=?pzoQxS*fy;Q_$;T2m%29z4 z0mTM#+`6BO%uksw7981D)H;SMK%h--e^u;xdmiLZzOqmCcd=3bOSh9X0 zH*cEqX{U)IuShXkVxy(qRvhX=ipH>?`bWd`^{;-JzWntsQ}^H?J-D@s@3dv`az{O(Qqm;dr7>3{giU!|Y_>hpAatDV00(I=_NRqcx%DLa?x z)r;5Z<*V1JKN|VXL*C|Pg3e}oZ_u{_i2qp19GiRnyh%5f0(#o*T7L+oY2?%6+yua< z1~`Glk11VawWY8|8caC2(6iFa)pV^U-`R@|DZ-)q@NhpA#CCiS zoierC(75PLF0Nkoo5=b*JvmDU`v(%$%XBvETFGlPrGQIu9kkjH2So^@&jjIZ43VMrm(-6?v=TucEel#K|5w;m4g|V zwtbjFY0zi50l>VK?E7l{=BhXRXs>L5GJcRB+?r5dGHl`_S7j5Qal?lXWXA@)@GrAC zE531vMYw58Y>+n_t;`$Vn1EB#XH-r$AIsI}Y~(^K_-MPnCBVuri#`i+5aELB%UM*R zZ>8>)w`k!F>#{J4e`BSk;z^_PI>5v-r7TT%;gz1`A1=QsCiz8{s1tYxUh0{*u$NQ@ z_ciLfsCN!ry?nEmPEWgOcYD{Hv22{i4F_ruN)PW!ebL9oc*)pH8H2a=vDZ9R9ON@} z(%`W?A`Y@CoAfsWoMk-#eh@o&;`T3GBn5A=BRujaoW&*%p|R1u)M%$ml~uw>LsK|? zf)c_sGl|7S7OEzGt4M;HMrS8!%z-fV_wkt*;gZ}htIt3#d{{!|i$z??sKw0O#4diuVI#(|4({@#ou?^c+(u;>mX7neDTxd?d@v4_#J;mwNTVZKdPc z2N2{PQ(f^x@ht`9q4e%LGfiMNkeAYH78iImCm}A|N01)b(3UlC)BMZD(Q~1}^EhzY zpSd%Mz%93JbP8vj^HeNZ9Q0uR&m5l{yV!IE&~3R%tkvT1Wbi-szL*}Wc z{<$}1N75xZpg*=gqP)%TC^PwzgreY&4i+@~$orC zN5<`Z$TQ~uF}HDqSZ~mLi}^3*V;;??)!?BIJfIILF5l+ov*47P`Et1#fwdcd4nZ=` zP2tQh(KnkM+QeUTbgr%6+E`ER7B69NxPIh3S+8LO8=VN!*EIy?$p$8G&WWsttqYNm}&Gu^qplkVTS z=UXcuJ${hx@7_(DqH%k3Q#!$lUld`j%P(rv9?>fi7152KZ^sjVq1`;zU+Ggsy6Yn9 zOQ9i{E|25EMdF~ls=mx$;aOI~6+LXOuccdCTWL#r8=nGLRz6L1HRXwZxxCE5czCRN zxhfa*1J)~~8yst1elSrN%TL_nQ$kN}$4_~)<&H*?WGw{P7Q zZ#U9bd;R%B|5yL@Ux>ILO&cncd^$YQX~9P!)w384MCT!4CBzE~XBHUzC_M$Ah?!J2 zLJL^PWwQw5&dHv1g;WS9@DXZ+TQoS4#HQ-N#5lXB4F-Ncm&&N0r;Skm9Wd-dK#&cS)nQzSq45a?6mob51NF?H2e0Z-onQ>`6(K;e-vJW6?8;D0=AsVX3Dp6J z*?eOIG$6C|3Pe4Z{N7G4hggUiL!Atrwan#(_?7WHtjIF-6RvblD>0xh-0kYY@b$1jN`)tDM+`|VANOob1S)AiOc3npSK~EcY9Nc zo94sJ@dq3XIO8p8lq4x-npbyE55-U3td=6Xu$xKV*OMA*37*w~;5rn9-a;kP}z-L4M`u)hAYnw~e_R()(q!5xk+ z(#g@Wl+G2W;Z4q4w|CRdot<>&{vF?9@apOFbZ~GY-Y%szni`LX65p};UglTKUzxMQ_jp#nseUpxm?NG7WU^F>MuRowBHI9N3L9xI zl@GuH0MA2+uDJ+T)$`0em#GPdkm;-8*Mj7jjpijOsA(m}|BDgxXXIM#NwyCFNcUns z6Lc%rbIo$IVn3G5IlqJwUF$2LOEgOZif8d$7+l6!C{h&F6F3WaE-zF1Q{vM6p$C1| z`O`$vCpqw-d>uzjJs-pdI&})|Y}Of%=KIc5WO!4Z!y2Lkh?@aegya(lW6K+9STI4( z*d)GO=sVaALgKodbq~(%)V!dX}Mz_YV%gHbe zBzFrJBg+N{viJmuL{R;Dv{PvF0Wqb4&*$PZSBCPv{`1jLQo+KP_#^!R`i3LPop`n6 z10gG^rSSq;TGx2C*jV+V^vMYa&7^%x-dLzUIzCQ&2YZR@gU1?k-DfMWo!g8vopjdk zrK7`cIy^a)Y)w*|!#5l#Q)e(%zrBBWtp1wYNtCb3-fF8ah8DLEu08G2LrS)CADqWM z4)gI$J=6YK`zr0kFP?}7kB@{)-Z&h1;hXkUwx(oJRfC+-hIs>#x7j!h!9l_CC=T7Q zHplqMAwsuZ$>ut55N(iNbcoNL&SrY^@}>JI+ATbLc=vJI*HYs0BZAAgXgD`MTqvJWcl#zy$)`#`1!T1J z0|n@xqWq)-N74UoqIVu|rmy68rJNvs`%DpvyGE%h|p z-sYf^{}N}8&bNWfFr5jziMp*#xz9RF=JPx%s1(v|uv^aqrzim1oz z2Pke3eWqeC(7mA`bW{*r}a4tF?IxQv%>jAo_6MW5)gIDT%K^| zF}ba2Y-CJi&U|sv)!bFO#)6(T#`Bpm=JsoR#Z(B&{gNj2M%|oEkaWt3l98vy(GxD6u zN#U6!mzO{Ki}-_`-ZdF_Wg2v6(6TsVRNG zwWhhE=0fO%KwER6b|+rIMHeSM^GNE#H<&4ZUKn6*i`^Td|MxVn)BIC&Cg$|a%@tj= z^Gp57t%5)&Wpr5pXp%pDlFN|W;nIS1H9c!4FR!Q*%L0BfE~9ZMjQYubmKDoJ$s=wx z!OK=2pjv zKgk7u2GTwEq=O!c*1b3T>BXA^%~hrIi`Hdsq!Yi8T@HM}7eEqfez>iO-#(0i?w;$y z2Sr3NI@ZN`zJX>@^JXMLGQ^7;;$mux*1NmA>BIZ?(pt0S+xYlmIWHZcn*a+dlsEKK zWDL5GC1ZQte){(1UV3?O;?I9c)I8^AeG0z#6Z*n)F;v}%>{XS=Hw|oTdL3?!PfRL+ zgUdxXfOe|7+{$;Ne(BBrfe#=f`@TI-W#a`JUXEE+y|-Gew7M+)f30a5FfN5Ji$9W^ zpDYZ2^?&~h(eon;;|5$Bwix8Mm|+x(oHB^MEIJ5x0Cq=50RdL{tVl;^!a`pGAnGC)^qp{1JR0fSp~z_f9bYC& zP!@R#____zNO@UYW8>Uq&;$m57BWL2Rr#npzD44}jxg1el33=6Bjfs3170j|0D3Y( zXCCbZS%NlrQPSZK-e9X^M+O)$CD?pRD8j{m>VPWa5QiGH4`nRIM8jFhIIbIn@FIs9 zFD0~F2;9+7M!^LgVgV3<%4G?fz!MPsG%BE~*hn;6*Z4*Jt5EolJ&nU+Kmkwi^FUfb zR*36$8Ia>u@v~hW0LXAnO!d_;12HEn~oZKJyD28RqM)*za3^g`SE(l5u27?a%T%2UR_k z1~3Qp{y*94VaFLdJIm-j=2Y z*YMd;=o8F028FZ;?L7N3(lVUtLTuk zUWbgowZ|JF*Pr=joPqDA{$plZJOCOXkl|pllsNgc3NHENkb_X zny5FM*Y(ifk9nfCVISP|IT*h#R&z!&g@RZr{Cc`uVQ#({Em;-+b|nl&#lNBuDAP-Ocnz z|HhxBW@j@kU7V$V{@?vs`svSKX)c=5<9qj|oHwNq4N{*g_9mDp9(6p?JP5@V;7Y$` z#eHyhCw=tzk%}^qV))7%prk{gXS4n8z1!*Wg9pJw&Ckb43fHi&u4y4{b~e%jh3)L@ zC}^AxPEONWV?|0~Y!op^qVH(58a@^8HwBa@^LrF(Hjj>uVxyfpqmKK1Hq2An-rjY3 z-grDdIudV=z0uEiqOBlnKDNEJ8ylw5CAePw;9$>TD73sS!ds>QZ;vnsK9%A+8ed4k zmGUJe&kId%d*k%z(WCUyhu>FTC+XJ;Ag{={J70SE5|92{+`*{(Jyl z`lI71J71Bl_FT%s?H1r;(F2*}E#f|(d*JPN$@0dg+L`pXch&sSEW<4WcL(w07*sDVp$u*_Rm=Wef~fhq{uK`bNzesf6Rt6^=ks7Cz2#j-66 zDoec0t+2C`V~2SGM)ggcl_qcGtoon?e#7w~eE~5)0Wdj zeGvyUik=8A)1zk=BcWRoW?Z=AIr3B^bNedgoQ~6Phr%211L%vSEb~PcB8|(u$?ZYq z=I{;&koXLZ>7ZPu2a05P^IY}AX8q+k?O6SbvY;%nVZG=@$>qjEYOxr#NSUR3b9g|w z$-@T)#H&H?OyeAH^~Zq$aB}dCdPini_gG3TA5fwF(;gOEe11amoLI24o zbBxqRWwZFUfZfR`!qx8w|eNTDCVT{4KcrTfN9-d9F#v9t7Z<&MvS$;}C zdj*geLzcS>*3aaq{EIygNT;BZ?_%E&zryzpm~Sudn@$jI?j7{L3vOz!?@h;iy!Xsk zKwB^Rag;^b`i=hUAm>x7C#R|D?{$6@ywO)hUbc}DL+QsN-W1-q+yShkWzsXu;-jt9 zVVZw|oAP^KKF;GSQ1ZR5i~&J+>EGw!CcLDpV)}+_(wTh z4=P7RLzSLBLE)tin0Bf>3Nl#V5{_JS(^}_uF-*V$pXxwm_MDP(sjQ|^aLEB{#i5Ky zHk+Vp(&ae>&DH1VOB9it#8#IZY0WqAG3S!*pg9=_G11?tWj4?IntOp8-I@7Kw|kb3 zHAm`eF1e&RItMUKpJE5^maGFJGxQzM37F@UK{3I{20in*#RWFHMQ`Q>H49LVQitX_ zG76tzvE!Ef6i2wt3jjG*p5(!2oWQZHI%m_pv$k$sYn=n)=A*(y19LzM%1iaZd>Bv( z%mWUnQmz%b0d9ms&*G3iYb%F`nqMkz92i%fZKy2LC83MA*b$fbWS<;jAM;l>^|3iP z-9!J>ydVAY>9ZH<#hd-~=1BSgJXJZCBp0+h_y{fBl*N8p=%3=bXrle$hUZZyp=Vv3 z$2!Qx60b{xZ!En;G9|zDRx^F__;LF1-d)L>{M6=sSa>uH9aMFbmIfkT{8U;F{tgGy z8#Kp$_Ua^k_2QN2?5Ac&I>(CQtV-6C`W!VmWPFi@9MRocS=OApm2TbI^&tSh(a(*R z+}2>7q{Sz^R^9I)cb@Mn|AFY>Q=HThz%7H^0>Dcq=*z4Xm4gGwSBSD&1-CQkS#0>p zLjPC)x4%%O|A-2rvw>Y9C{PK3(SR&Cas#*#xDqcqLKzl(Xv|8dC}wgD`g+I$ADx|q zP1(`m#IjCI;#@%}=RR4`tBmz{;SQJ5fdCYEuuc2%T)|lXS+#Ghs?g=PgI1qZ4&0 zmEKuWpQ0W7Q&v^rwAj#PhuXOu_NKytd0{C2Mp%#*D9CG>)Z$DJ07U0pcn3c%?`}`m zLB^5eSI_?w9RMg*mDwnu&f)L0?4BUa05sIgS%*UBJD@602I$1k^kUwHXPQ59QDlB5 zop@U|{jY%htNN~iC(bo67YdVRV4nV1e7yqm^;YcvFB|{}GJ{rRY95>psQ6rioA`7i z%*UU@_ZA2`kg<3!a1;HNYo;FFKmV%S8KU2r<6mt7p~yrufyk5i0j2(;zpCKI1%zq6 z8T4%Y>R-!qDQna#{e#@Kysw9h4`6JfUo>2kW+pttHTCV3Y@{2mNdx3@a3=p5xF-MK z3Eqv@<3ORm@HZa-FhB9ek2n>*>SGCjzlgJ8-7`n!U6RxS!ZW^KmDI#g-Q>yl?c zjDaP6f%)gF_3?JNDgN8@F%O_|zT+!U>-XOd^Z7?Ub>4Z-m$R==i(;cH<`yixvLVHo zPra}y#v9UnSDDRI6cjcfm}|d!^(H<4=F9Z@yQgVz8sGH2f9HPM+kcT>?Y)SNKHk!l z+&=p7gLHCmnw~y?mR`SnlQyJ$ef<5;{O$RpMhO zw5_x|+i7EcC-u)xogWIPa&#PT5i3uWCcxJ%^ES131%16P-%MsPXE7b_A9*t$Wr+z5R5;_1NOW`u3*cr}XJ(AE%woEh~QA<8B&= zF6080wJ(|;fBZPz`|xpUZ|`dORlAjDcg}{8-@;bCh<0X#=mc!Qh0Ln|6`xT1J;#x( z#C(S_0WPX=^dWkl$})58a9d|n8uUNwWSYzR_k$GDW9@EVXC4I>)z?FFzL5 za-Zh1C=T?MXH~O-?;a{37JmfRxnvWE`hqq#*)A?(Qu3jIm z&wX84AE4c^vES`-ZMt}+x}e>nv(PpzH%iRr<@)AETJLN+O~Ju8)~Pq*O(ZYoFGl62 z{>YCd2!z@dVi)iJzpSZqFykkkZH>oU}WHX(G8Mob=%zYX;OGo4Tx9 zSpJk5ZQkj?t~eZcqQ2c9DlWXCpCOET;#QZ(A3aWg_(y+`c5m;94U$h5gq0s@zz3aB z1+X3h4dF&AvDKDEBl2%arF{U^IDIn|98oX9r_dV}rg9NZQL$sqMm!YnSX)?Rlb!a= zvtnCMHhg?4tr@bn51RR1(5^Fgh5v67kDFYRdRpPmyC>gdu8bU4|>ig3-0Gw zNQ+OvjM9X~$Uz(|QO&qca0{85d2E8SalR}$Z>WFgGb0?}7!5|ECB`QDxr8&xXp$G1*GBN##(D#c`c;F<5RU8Zd+JNUA1i%`&j!pA1W{P z4e)4%&$_@`=|H?u(x5=>s4AQs9~5 zpF;FwrMv~v)p!Ag?s@(c|9N;{K3*w6hC&D7*V7b5x{2Odyp+H6+p{p2{&zS4;N<|# zU#Kd(fgAPr5~1*=*vucJ@0o=uUx~?+#}j2#@sG#xmr* zbJ-P)n>tYr;V)c1xS)1jXuu{NIvM&HaRKHq%;AU=I2Ekl0>*3zQ~kJD}Gf2=VyGS8?04n+7MESvJysmppx9PX#S(Q(b|43WR;4{7An)zpu? zs2OAee`J_j(b;5Xe$C4y=<<_159Mg8x^93*$ zM+}*VJ7WO{007eu?Q%pJMT_F~`fPL$Ja64UI!#YrzE00y?WGgxvS658sIAGtd>9!C z*@X`AUb%BKqGc0)BZn#_?cySFvm5rgZ&_H79>&HyVz$v-OW%9+DBZuclU5fSzKw#J zHt7l<7M7%=pts1N=MSZ;97>n}=Ebqh>vY^Z_W^(f)zeZ#G_hZycBTZPS95g9;F|K= zSZk+S$_JgE&42V>zVna1%qPWo*_*Pkp1_(9pZkLb40&^#CG~(zEX$_OR+hwP7gc(w z+77Iy>>QHt=MokcezNpW|MP$U$CsCrhZblAQtVl$z zl3=dl5GF59KXcm2-r3XNk)Vtcc9cC99f?P0$d!BuobkdvH3G5>^3+K}vz272f~KDh zTofGPhCVVNzMf;@$z7vTizK3;ETgRJA<7xTzQBese`?(D*c_6ZJk1LwS!M%xNx9or zh$@lqqEPv9y@W6>3ukU{7ZP~t!v`ubBZQ)07c| z2lm^5I0XarTxZ@Bn6N^W0h_eYM~6cB@Xz$Z_nfumU4OoE2=YClB0dW_G;ZomwokfA zRx}Yf_jmP#kTxJ;=i1DR)R&J2cW?ajh6A=HrBt@!(V6-Hff^}kIb}%Qdl*zFOZ)O* z?{*+J1Bc>!aDhIxG310l=%>>#PW@4r3()VnV_;O-d6t0-j5#4=V4~3S1Tq_-!-}1l zgpKSvK1Om;fg@+A1~P!S@R_(d@7z9TLHL!f95x8hv!l{*bAArb`5G4vN2(b>ww?Tt zfAJTXf8d_VWb`or`Cw1ub0v%G9$u)wv)gGieKUX`yb8CGS^jG2YU!_o7v^i9Z5q>uI&@e6=Zzc9~n!WO{RI?T*)lpiVQJ9_AE_C6U@lBo^~D=?qaZV`?D9e94b?kOK$T~ zZg?WU=z<+tLGt$kOgx97JjkD%b#O3!Dd~huG6`kV_Fqg+2w%+0f)`VdLpV0+aTha( z=bdnU{}nij4b0&yPzbN)597gQIy^c|Pk#MH`pf_E|1G6qHQnC1l^#EMm<|r#q*r^d(!HJC^vS26 zq+9oIr%tDpjt=+I&;I&n>G_jq>GwbRApOyw{4ibcCgTV*NrS;zdi`oY{l#DYBK^(Z z{w96-%|1sF()Rl8^n1VmY5MH@KS)3Ot6!x5=Kt|$>GN++HAlWoySt6Fy}OnAX9It0 z+utVCADb{JU%chWo3!W+T&c`ky%Dklo+|+0%T+hg<_S2JNzoE~!z}LhB_L^BZ;8Z)`gKJ9BL|LPs zLII_n@dg3&F}{<`#y84poA0T$8k+lGq;Buj8~$AT?CZsqT&Xn=TUH)i$0p@fZFe-} zjmohUWt3zTT_l{o)B2rT zmNE1JU$d`rDtU+@LIzkT328;2kcSKlUJ=jjTmNq1isb9ZC6}~)`Y(UqAO0x8(y|n2 zl~$vOQZC6YuJNY-N-Hh5*HdF{GcB!e#9PB0?xwONC-4$IivzUGqa_rc+Zv1TQ)lpJ zF)#R-X6bS2!Bnk52ZFJ{!iQ3MF3iG+=G0)Ru1p7^7{K?mu}!}WVeWs`f70&I-}s|n zV-XMZilF*jknExl@CdRd6ye0L!#t%Z7$yoB^c|RMOE55dn z1;yd$Ogy76<`6J-#~b`?E=x`nhpap6>nW|a(n#&2*NvMm(EZpPWy7xAX1BB={Z#V; z4oNJpHdA|pH|bR`!p}EFM`{DKqkccGJm)HRRgBuM;wlf)kY}&kOQ(k{c*dqHpQB(= zN>PaHPvb4Y`J27;+l-aw*HfJYd_8(gHb49f8B z&a%h`Uq_RE+JF7VZ=mzd$JW}i${sf>G+L`}yW9l9LLTd4amYpvBvd#;4r7c)<|%_4 zs!fC!886RQL1pC^`rICoQS=D%Q;5bzZUMMt5t~CAqHkIK>Z16@4IT?>&)ljfB4tMD zqBlqr$sup;`!gPD)2zGDD5xJ~mzyP6k62Qk=)>;TQ*B zWpkj7bsDZBr5zU;;FE_NTf3>P{&j6@UG*oPi|?^0$mb{cHfK|HL>r~Qp7_uTee2Qj zfob4-rd%~k9uu`8(pQ;V=VnuVUp(fez4KLX6 zad|cBfddEh4J(o_lKfa|)duSWy>B%VVA04D;&tKVa6G^+X zp6=egleRV993CHOdU~1e+}#z4430&AsPY2`knvRha=C8p3hyd!)gWH_L5GX(hB6dh%HW55)Z^N3zW#_yJjQc{ zS3o}Cnu7Z^(+?;O`lF-pGm}_Q($4#X_d4^wFjvmX%;!^J>Tmk5=nvUkQ-6+s1xo%U zjP%9EuNXkKo#E$sob?Uj=KQLB=gMVX;}4WL_{0Bcy#<6_C;;$*erk3GEbAFZF~&0& z?;0@q$RIqlDSe5|94*%a@t6y<2BLU1`73kW8AGR=VOkf{_7|V6oljKE4Pg1VL;v6h zpP-HldP>&JDI$SDcD4SJpN#{6m$-`&xNIdZz%%_1_MDkUdOOs3_PeJiPVlenLDY*- zcx=o5jFpN_7XA}lYAnL`uOs=1@{`^hs(2|a^$Z`G`7j@2ezv$Mor1Z9CT@`u8t8qv zT3+*LW!P+VG>4Ebv$Ezn+GNblJskRDe#lr3uc0OmOM(|X9>>{KNB@9LE-Sex5BlCB zJ96iuDpc}76!A6o+DI7s1M`oFhdelve03$D2VwFaXfWpEMmvCi%$p;^BKnISnoZ4*HHSj2VlGYDz~>v?5D4YVUKksjP%zY7(l-&ZnJxZv zvm&~*Z#|Hauk^3rDf>8-%{RGK%#oal9C!u`yq3m%igL;FI7lIIqvx9!$bg<}YnY*f z5}zMJ@#i`Fi4G2#ps|*E6f68}-AOT|PWgcKKlr_WkhZrrJfG#|1U^}~-sYR|t;DzV zc{86Y-&qf7Xs*dEg6$5UNFbaIWX*%61BD(Ghh|$!+gxp@c57X6gsZb@eQT9)>1kXe z-_bmAz0G0lb?GV6-&QeAe-3V~)$zQyy)L|@Q@%~n!UjLOVncdDv)M{pn_Kd4sZ6Zb zwAGGySvd~J($dg@R~482&BhA!bF&?K8vcBiPBbcSZhTl>#cw6u(%h8KbRpYbQ{fgz z@yj|L`i1I{`h?Hk6Go`0Ti1cy;&HUE-0sEuWyw~oeIR!!ZAjnQ+32Ko&FfpzJyxX? zy4H|Y0s0}s4~New4|DE`=IiJmFJ7H!?kt@~eF*$US3owY;D+=9_aBnomdd=TdfeVr zKApDoUg^N-+VENYhu6H#?*q(=8*2f=WEoKXDL(w6jyuwqxiya$Yp5GuE@(DYFN(v> zXB-G_fm5=8zA2o~m;Mc(r3A8sr)d8lLq|J;nBC4aWKq`Q;0h?J<~9VDI4a| z*yBVyttZm45e{EN=nJTWBv1Q`kqRR`un~-!6luz04xK{*$5lQ>(Q#wODF3Kjb%qLq z{-|ihRfoqS-Gl`vRg|xEWFbR}kPihY?m3-?4&-eFmdgy$uccmzX2 z5?8}V9(><}j7z5#gHQozkZ~tEr4N8M3{N>YAR^^((o{re;2BdATnF{Ax5ax8pvMe{ zXo+$LKjp3%s`{k($f#(G6Mzasc2vhq5=i2~S0>)%ms|0CiV9n}R6~xx$mk_I7Nxj8 zmV8v-k|RcqSZEDDpY}l}6^D44GW&rOGGg)37(iX+1FgKNDE#iE9197sm2F_FU7pJvK!l=JO zPt;`{GNKA$U7i)1nO5YoJWug+cn3b_JhJ>;$>y4Z)Zk4MLn!dnnnP{hVAPk(v zeg`Ojx8F<$la)vr?f>04a@`O$Lp)!T?pg)}|E35^d1`Sp6rM-<3;x;qo3`iq{0qK0 z+xWsc3)gyH1J{((a5a6k^_Sr~{>?!_Q*4^S6m!irbq4V4$mB5}nBhJ0C+)QV zE_k9mu{cmelO6W;PY&=Rkf21Y5)`{Y!9Q=)|LQ3}T9ySBk8PE>*Fd35mScI$1a3}K z)nh#Q7pvf~Pia0m7^sRSsJ!c*Gf+!dgDUS_K2t$nMU1YXyB?1rr!gwmgB-a&>ZL1& zh{TQj?ID{9B{mu3N&AcE(sdFS1t|{;9`AFuM2qap>sjfei>uW!wg?S*?OKfGb-O{PUDLQ)(? zX9I71=OW#Ih?24_rE*P$5WgfNON;RhzL-O?2%y?m!BB|VC};jn zas8I&(n{Le-bowVTT%);>5qQ+S^DhL4^#K>Fg<_sDm6O_-)N^#zyERi^t11!yLWb_ zly*E%XOo>pB9uRB$s6Ni-Ztg5Kgu(6W)$irZzl15IFx8^PJqE|dZH9trbQ>^VRMCR z`Js2Y!3p(FeCth>!`q&0k{<5w>p8%iWio7{pullh0oo|va>mI6hoaB=yd}w7*rRmJ zH~vL8r@EP^^M)$?;krqh${DBI<>uf5pCRBRINyOoC*eTyiPC@j&2QDUElNRerMvg< zi+^oz*07mI|0!;#@x?IpPP;xBfdZ``e;Qy_G9@ikL#@i{1z!X%1Td~dSD!(nklgs@ zDe*A+1@wtX=Xn#0TjJ@ZZ1viTuZ%NJt_EL}g4vKF+K`gJB1Lpb%IAV;uuuT9$KvUP z&B7s{SYy+$FMf@zPoZA37{FVm=t!0;VP`%LAGrZSxkf7$-OvdQ%t6tC=)&Cd$bp_$ zHf4Py7e+kyoEy94tAg9S_8W@a&jg)A?MpaY8s$h#N5@}iTC$_?$b zS>&9>!<8l*|H!tAZe5KHX~_a%qNnLAg(AWJcq_x!N9w0kB7^GsKPl zBo;_nD?t7zTWkRMQ8wj5fTo8w1CT+Wq;p*fAGaDJ>k*m0jfE!BDVbVR$SH?;T8hU_F!a@}^)*T>o~)z+S63=N8|Szp zDIJ9BAptG+8iHpS{F5NyAkc%RzhI zHllwW4@T+r%U9{-_#_=29;K6$)70;txj&$H=_6aibKiMkbmFmQnFY>u7FE^Xsf@@U2i!OmK%IKiei*Xa6CM`sS$GC7H$pHLnBSt8&41U8 z>wxY|b*BbMn_)~D)2I00n8pF$fI%}B4XXEL$rrak(htHXzUezw-n19$kOL$j=4~Iw z4zFwJZ!qe+pWWElN)I1APLCcuwEWW7cg2Tqzxy`5c=bYgssB*D56*^OEaqj26^-}f z%aP$L9fc-1zq)D@D-s)qTEeNz!p_MmNR+!`=_LX1#TIUK4g^M=Q<$l8oSaL z3zYEph0ORX@VKE@IPJtGw?q7yN@XRD|o04U>+}J!_IiYPx>l;KzW0a zc!eKf=J96xO8phc`iu>@;xl-!GeFH<(K`oo z^;gP!73RxzQ~l+5HN3=N)11vYz@@M*k&f^XJl_BC4sTJ$tl0kBa9f1;@hBqkyA zdGTDz@eY{DJY>!=$G_`AaTRY?_oau#UB{c@3QF~LGxb6Qa9oqta5aqEXeG^Z6_d-Y z{vUKMo|EZ;ysD@{WwaAPDQ~vL^gA2z)q! zG77xE;X03UdGlX1lHU7dv7#pMlhrB^!UMkDFaea)dJ=y5{Me3SVlaWON5RJMrV*L5hs zThYp1B=lo1F6JBRC^hAc%~H8#M1SB1G9KCZqw6AS!It2&Uy@Wp4n43H1+SJA>3iS% zINiH_taxc*g;4w}2cM{*U z@105q;f74%M!b;ukQwTP-&}#z2Qn{yBkr+ZlI2zSeLz{^e41cGI_3t4ETl`c)c(<7 zU31VR-HG`zZPe{l|Ey>5a!0o}OfO!ari1R-Ij=HD7jEBDhH#^YtNk@qN1Ni`?$&17 z+1&J1|7^~4xC7myE!~C#7wyib=HYGEAA0OEYiZ(T=%DCnZ0K|AB=w}cREIXX-lb5g z^r|!QbXELDe?@mty*yv~r~ldi`o{~4=MNEFHq1jw7)cn#HW~@Gm_K%q^V;^&qD!*EIuTd8is3PME!M%b3gZsM0GFu{FX- zr(k?gxKQpnnSt^iWze(yF-vkoFK-m_jZ-?6I2|;J4pV?4!=K9`LOocxaZw(O0$f6b zfU<~c;!#fdMtng&VHfn1Mk$08p;PSeGf!mUlp95qo|Ie|T4+qLC^r64=)_TVdI-4M ziiLv4gyi%6N1;$$8jaFIuNxw{kx3y8;F-E154m}ki8_KdXrN5c6=k54=Q#lUDjy!! z0*M2D>WB2@c^*JOI2MChJk1Wwl=NQc*huILA!9+NvfYu=|W#9=vc*Q?j ztmD7~VmdNz#rJ}wJY-maucHnpqZnzzL^uet!NOHf2i@f7DAbWG(mHyU7v$1zXh)tR z4&e5$Khss>XPBNqFBt|O1K^{qiWu3RknvnhU&;^wZ{Z(qelVl~ z=AY{jd~sNwP)7f1hkPlg!mdj?2jxxS>0g1IzsnGieGImpA^ZhONU4wfobl%bQ&T;c z_!+7Nft+vdR|0A_C|u?n_GK>^-Z`Jj6sY^j%+pcEfr{THZTP!=+Ewra;NhPi8QP!5f4hlha!Wp8`-E|J z+fPv=4h{kB)#3Ds)V2PdC2=Dj=%d|CP1TM{{gig4ee$1u zZRTO7{TDVF5AN(&p05F?pBew;cJ^{RJ>fjx0M0-6chslbJbj}B{RtNAresR=G9cb$ z7@ww*r_QQAWzB&@oR1tDb8`Y2px~o?nf|xb1Bz6{4}&b@s^}|W`bWHtN5+AJT7r{j2ok>369kW#LEvvwtK7Yc2JqB)xp`CjIU`A#?r$oD_}{q*VgKlNLWfAOz=nqIzot@-gq+P<}6Wp`y&N`;gQZ?2(SE;D~P z_XY%tH5=n>){Q13RRL}KT#AW!q_VaeOR252_eATT{xAMX+Foy`U;Ofm^!evsimn&w z_!uRwXXSEJW!c`@NuPZ7{j{~dk&aFeQ}^uD8^I0nX?yG<_k-tMX zQF=Ccy$~cti*mAg*%e>?O+nFx(#j?xs#w-eWR1^-M=fAE%*)WlkRbP_2QLi=}BHL)2+LA(!GyAN*n8~)aNQV z@tyDau_?r7&5llvMT2N;t)&I!rHV~U&2=k;(dsAQlxYLDIL0|vY_lE&lA5P(YO36Gx1b%cP1Q?Sr%I;FAHc5HqF=CY72a( zV3Rg7QZG&s_w=;qH+EPw;_U?9c39t7Pi?jN)mGC(Lto|dVIp)e@px-%Q~lkR%r0sim(gK_9qM?uMs-NN8pL|BZhgn2}MBRJ=hxIesh9s6cq->(jbpZ`N z0HIDV=5zX^ZMiJytmvC#J#$#0nVRY+kb$Kpi*=2(C|TeD(E@L<@REf3-{IctG}wQc z21jqw#keb)B+4R)P573^k5*?rEo*FGp=*(iZoV_Cwm6i`_9Sy7wP(KLPha4fhsKFP zPyUi;Z(=K)IDPev?$1|RX>DUGZSHKPHDqIZH+A&fQh!5#pdzX)92k%cscaYL8ha#v zy_18q_vTI7-+$vi+zaIx#?2wTRKF(L)>xbqtt{*vo$@J;zUt(}8~Gdnd-G;L9UdI| zlNbQ~F9&M2cDH>6J~a0R8Us`|-vFV8&jGM==f&Dg92fyuNM&loT7egCp?9qIO1q}I z5FFJFZG}T@Uf5NAvsg`B`Xc(nk=iX|mKR#7ekCRkZgW}H+=219r*?;YaBzU}&@!)b zvhYV99E4z;TVLOlpT=9YC(6q~G#1SF_72nMzxlQ12hY@xHGIp=_doe4@rh2>I5?yT zqQT%)d8;1ihs9fDPdSB+j3S3}TSloY(ZQc1;qqAH2SeO`CR=@Acnb-LB0&cFTKoXT zcie2`Q@L$;4l|3R0G)0fN*wUd=Q|70k4(p+h)bT75I#h``Z=4c@ORHYfL27DVhUgK zvv_bVxPKx}bAU3!B&9D}AVh&B8nnk(YXU zmqAgc@(|CK!7H>1YWi0x=QNLN;?MZuCg1A-f5NZBQG=p?Rx(uZ<+L}|Uj@_nF1ux} zo^QR}Q`_y*@f2UbUJoS1P1HVqratMRh+EdJ>82_^tFNK=jP>sb4=8p91_&hrR@vmmel@WT>Mu|Ll;FgE0C6`A_-p z2sOy~l!w(;+>&wgO#OQhUu?q4GlsNIB*9gBy78_$v z!Thtpm6lEAUD5)$OMSc#F#DdZKS~qp6>tS!>RCLs%*huB`IB+B%mI}%^%kjJ2VP^= zQ`7}%71SmO+)_T zaA9md0*rCEJttE}jPuqjkX`1|VFt<^-=2=Bw^-Ub` zSpMj(e1eI?nBMbK-ApvE=0#s#Ah=Zf^Sld6l-7kbFb+baaM3me)3Gt)5El89HtNU+ zIEmwV4Dq5|_7dahBfZ(Ia$+yiKG7?v1GpgiJReovGKb#K-2ao0KTYeM4duZn4cW}i zJYnOv1x@0U^bd3@bQkm!4mmTg;{XACiCfR4t0;~44t-d-3=V*RNk!DJaUzr1=d)*(i~TFTj%FFsoJFKi1m=Nxz8Mu^&{4f(3dF3 zDtSW#o6^!l*}RUIU!+5MUdW+&<-=ziS4FqwfP(?iXCSw0qOYm^Tgrpaa-DYh>>+!k z(r1+hTG-Q7Z2hSo!D~*l_Cb44I~R}asW3ElWOzmTeOr3rMyDmy@nHw*#WxHfr`{hH zE+s{5sLLViO#MkZJRPK$Z%)&R=H9gBruanKIEbsrO1>Z&+?0NGTe|E9w+yUpNPiNq z#V;ScP#WLZ&|Xh`I+6nc=-JQ#U*IKccGM9Eim4k6w_wm``6dO`H8&`j|Kf!Y3CQM| z+c(AM=S%4KfYK?UnUF9lI#Y7*szru9eJ_B|1T+u@PU@ zxUf-ZO^o?TO5ui{%=4)Y{R(%!#LFF+tB&;eVPVU-jKvUJCCM}zgd-=(F!OK~j24Dw zV_v)?jet6;KOD`Inb^ zmh5mKAU66HFE7UMJx~db(-O`?C2v4@k;aQPAy@uf0PV>B$fIb=wE6U)2M-y^LyWYv zELnEl@VXDTtA9q-cx?;$VBoBe*~KBxUCVcv@uu{C!WKBOot!f z$%^XS?U!f5vmvf=)WLO8gZz-QQIZHJ`7|FMiu^%{{O9rdc}iCx=D84_fdSPJaf3`` zvb2MOH``VG0xBDWM2S|#e;Y*nr~@?|o?Q%nFyN@u9^gEP19Kb~Bo3;<)pXNH3UmLG z9}UnF9;PYtCwMSxBEY@X48ZEjuT)$ON*K25)01s*>iL36nr!tak&hmXa1KxP9Q0Lr z&edP({{nfUx7{FD?$VYk8nR9QW?mIM zvoI^4%6(N3k#vvCg2hqg@h~FxiY)1&2c)9)g8ClT3{VV+O7{~zOah%|%#~pA- zBtf+07&C~2KV^r)07rHB)Gy)zQIevthQDWU`B~s(dcdyC{BzBiAe2qq*%U{4ew!)0 zFrH#yj4;-pftffplNsqh`{lR-(B~gnDyQ9oJclB(`60!CZy&R%zo6*^iuBULYT^ys z^{wsHKklV(e)Cm&`Q4M$ks`)h@f;l7QP{n^4-(&zX8QK!ufIxv{@?$<(wo!6G&~=r z&p!GzJ^J{Abaui<-6VbS)zkFVcQ4Z3K{s`!05{g8cx*MK#7RL|Y@|Q_lRrpz@7+m% z^_M?OUw-{<8V!b48u%VJW#Uw}bVxySr^EAq(l%{+smYfA%la$=R7Vt5GmHAwATbyf^5l!^4C0^x5;ocZb{So9W@B zN9pdZUC)=!`hD}+*ZFf8Q*8ysijDmthkB*(p*XTp;e`bKvFXBs0rPFoqa=IGvCl^r z-b_KKVROfGD@`<;R*HF4NJCs64I2-Zk8f=WNNco~F*}z4oi26=e)_9|sjXO3+ zS;*jYIH6oK-jY(=k;2C{g>2IJZFSM`;?*6wY#0xI&CZcd~SdRfa9b6I4pqzEZUf!`t5-wDfaEQ>Iy}1H7%`5VfVton%u3l zD1~uRWnPdnDI#nZms@E`vJ!9jOCfDaAzoWgD>BQ?4VhNDkV4Ob3py9N7<1x9+2j5& zovXgM)|)qe7xTQDa>QF891sAIHR{goN>QUu zrSpxEN9rgRj@T6QX?K;6Yxvj*^V{BeL(Fq(#Yb0QVSza!vO<`3v5cQC03FK<5Ry@c zbF+lzIuOmF0n566&_CH&=M8$~!V6WZU*`QBTtKc^X7FbM+$Ts@QQE}=+KLw=nB$8k z%L?C67e9pFZTwR0gYTjv&usWN8Z3tCS^2n6(Odxi&>O!V1CYV^%@r%T`U?{P8GjB_ZBtLk0`&*3 zHQp*0ztGrxO(Y9Ms$W_zT%+Bh-OQynkK!&V{ zSG<|e!U~@YAusf}ke!r<95P~4TXlk5tobGc7Rp!zLxxm7`W@2mC5z82N<*Iv^}%N_SXi@Orm}|| zkSus%dXXuBdeRREV3x_dBYHV-p!P>UK0==r@aEfs`oqD|Q5qfZX$(3@y`#eX}^@cz4&Gq;kDB-9CZor~^)%5a_r>f?E1$#ZwwffCYYJPzrZ zw}2Q!LqkF30CZeA^E4AUu5Dh64>goS^=PD@x%r zQpo2Hx~Cj|i$fqy-sDn#_wK7sB@+)dwoLlx={H||o%Ubsx&M0n@R9h|_P1%dC7~ml zSb`9hu10PNpdXZw8CY835U>SVZJEB5yeXT+Ux8(ws&dPNc@k|_zo3gWL2ynWIU%tF z6^D5;;Tf3Elb+}SY6C=f`s)nqFE|Qh{H95i&V zMBG_pv6cLY6LJJVzKNFsZQ*-aZ<{yt@FA}u_g+&MV%Cu(DfB3y!kg`?oRN-=scECk zQFAdjcLF@l5&RIewSj9!bv*0R0bgh+S`27szF%$2CEoq)_&Z z*^H;Yy=j5@u;ylZ=KJP+v!9cGbrUm;@p1j*cz<~AcY zA_8s*m6j4TI8V9IA)rs=GvhxAxVGN6oiQiSTmfU!m7aNm=fiqtqcdI{l&v&T4$2_> zLKSmQ;bN|?v`*u+VeBWgxQkpE=K<1jHH5$2I!{b@f;r=Q+0=q!n_p2O$71wIvdf{=~T9n zsrZB%v`Rk|u7+qqXR$m>ChQ)@&3Wj%=v}mU>jDbrFfh|j%a7`tDsp`okgJKj8GDtt z+sMdzHhXpWH&m9*PCM;L_t=!)%G}uqs7sZ%%&+6NiCBAJZRYH3lJq5$On%)B$K^{1a z;PnmZzVIjxCbg_Pv7W&p?zmAwX_YVfBF6j2taB_bK40kn^#A%7lDr?$5WN5fegs@f z4fccu9fesDhe0X?v6$HN)(Q(~04J7cP&6qP6i}kvuslG-DSg6ALywLHuEgnRxXW7~ zbkn%G2E$DAz#x3oprgU?j6*{2Z@@qoD_k@vzQryDEKW#qz6`+#g3O|Gs z9tijziUr^0kF-+9BMQ7-3RRJM821bnnXyL#mgRSaD z&t-s_g~HR4HuK-lGNvPFsD5C!ZcKWjQ{cdgWi#VsC~|s%ucs6#;BXO9;DTLMJp)#r zR9=_OdF4QQuNv^}ALp%tJ4E5sL-1apda?qKO+Y>{Lk#k`;0B2jSHPslcFC{QzaUp@ zK*8s_1c(QoVgqs)IlBgEb3OqES(Yd8<$RIL%owwR`iJJlj4$(H;XLZ3$StQmXJJ~l zm^?+i3e1l}cY#Rb$(;F9hq<_>`#qqlx2YZNggz8L;V%RK<*wLY@uTFE;}`!0wGs0t z9o zZHcxJ!kcZ1P^TTp_>4|@T;YGvF@@641r)piid&n{T?_N_L;o?;CL(|1rS6uP50<)7 z)GBYvq`b&4*9$?+Tlv=VNW5NM)clS4nBuK0x1{fh_Q_Iu^5r+_tH1qRO2!}^op#fm zySLKL_H7lPx!1_o6My-|7wPA}`ek~3aGY-6-$|c+^qJ?lCx^Uw*-t0Er1CSz#Vk%IlNxwo#T)I;5;CFC=QCfAj#7n!i_%1S&|Su=_HOqi^}FMA(mhSx zvtioa-cGINq7~@TV4RMRkIffRWIs-Ha{!d|9NK_RPG|!tzr5Yeypk)MPuchrE>3-~ zN&$R$|3UirlaJEIW;^Y@-cQfI{W|UM@2CBPSLxn^N9oZ=kA$<8Hg@i$+jsA#CivBs z`aR(jUdq{*Qo1As`~HU?r^ZG*sS+h;G4Ev^fQ`Czj=Y6#WKdxAzu53r5f$0Wy?Eh8 zh75l8w=${w(grkl(0qAvJ`irn12+_imLwev;QQttHst2EGU{IyMMGfc%rfMI-0BmM z9nW(WmwLc`srKh~By%C$d~2RG$csPcmc)OxF}|(HX4Zng%k2_N_LK+hPc|emmp>o( zRTt8eILJY`@?~DkK?dG}=Ue&?R~-WIgz|V(m$~~=SzJfYDMf}Ue zTeHhvEF`{fpHu$h!Gf>5M@GGA*&llWi;D2)DKbBnmYG-cCc6knDb&1+@pomZqel)L zAWPzr>MP_`G^h>mJ%1LLlv?eL>+_d=t^a}APFxYbWI5%Vft$PA(r1_5XY7{)Br6N~ zHjZ7>1daRqd*(IuJ{-j1ANm@~1--nEi{E%WM3ZGug9A66K#agRJMrR9d&t=jggXyy$KRt@+@e7DD@ujGwBOMLfY zlk!|s`M^&b^>zA^Bg>fBxFj8#dZz6&)gm3`f_C`N!U}$D>T-R3_w2;-#Kv)7`Ss5@ z=+IDK(veOsJzVXZg(!I2Jvmi8^h;G4o2>F4gy@p4x!?xg~)E9Tz?IbLwj*XUC%PL~_Pqv9`*&k=E4bH8*zB(soA()i+3P zllYJ%Q|IE(P-6#W8PK<=EiI^jX{bNub_4Y23$>>KhXF()H`$CQ1GhbHSzu!v{gt}s zKnS-$G}j`ZOAbm2Z@)j3TyQIk>PzK=C&cH*8L`&orM)tipx@EovH?CC^wYt<%A>UO z?|x%P_zsSa)4s;feouX+^o6RbZVEaFalr&F>goNTg{IKGWT zct-F7`Rj>)DzCUH*@Jf+bm2K^Eb5DOe3E0BhJ3GIfZXw!4Q$@(AM}+@^}|gF+yVd% z&!2tgH;v-0yTx?cJx<-DzMdD-fASyw$LT-#-~4+bM*Y>N|!oNU$eJIuyqF;`NA#h)hmHTVqAiKPD zDv0^qRA1*4Hj%on!{cYS%28+%nH5y@GT+SrJuXW(M$3T05SV4uu ze(GBt1wZ9szNiZH$XMsNRX!2!wG9Dd25?dERr6)?C7t9l(-Y;7_$3cXOtv!=S_LMR z6RU!0KDH{HLC2H_)6_YvP7eT0hI&jvH4l$^EbAypZGKU<`Do;a5DJkpqF-=)8VfgQ(LQzXXLJ zCC^KZ%bv$G*LQ^mRP-wS44$0dl&|6+ai?T?UUgNT3(V0&|10!lLDK{e>3!o@ey*qR zK9l2X)<+iNXCb@z#WR7_gW}^-K$K5&L%E$6f9WsYL_b&kDMQ~Vs<9dwVtnFIp+8B) z=ZG}M!IxO)*Ibe}%P*u~YwmM+xSx(s4)dIE;CU9(6yv#2YFsrR)ee}x3qyoiKG2`? zeggHY2tELy_>LnJ?U^ix|L6#JvaOBrRpZZnLM^qf*oL_1_BinkJ_yDTV zqd4RcG+G{rFE@GnIggi);V|LE-}Jz5(Lq^qKE(iK&cxw_(r=QNO5-=W_Fd)lC(l-tPo86NaC78?^&GYg9fkJBfdS%h zu%mmX`Kspe#0N|_?LLnY&|21V+}7oWX3LS=u*xF7GWHev10+X6MV?HNU1o6QGDRDf zpZX2FHKahnynbLd9;l7jq62hX4TZkx^uJj}_}Hdv>y{=h!2+Skc(Kb`C! zrjzcQ)K?olzZ|6flWzLW=ij7np1)4R^P$&w*bC(_M6_GvG(L@myf%3GVR4d%9LgO| ztS2w2cpRWzUt3F`e)u4L@aV2TyU2QxZvarcgb(O&)JJb{={28=QP1sv|KKEj@y)CB zt0yOEagBq+(hW7YCU55Ml)0&Px2C##aBDklZSu{2-tw0WG*_jIw|z^Vzu_;r-~a%9 z2?z8!98C{F&jO$~p|=6n8JI9^J7^EK7d1EKUw-`|LOng$Ev=Ejzl75 zf`d)fP}rQ50EdAN;r3hiY=Sc2QZC1np$`60s zaycAYOpn|VX5w;LMY^Nm=SU8p4wM0bD}eg)X-N1bw@(U@Mu_xF7a>L~o>`KvUUB{G-vGP$@kArniG(Ol zF3LD!2w;KWN#W!4IZO?5C+mD2C-U;CInz|9VrqN9Fj0l>f@d1D06g@`OHgH+sp@iV zSjZT?8*(7I@E}ZQBR|Os2T727>n^lNABZ z7a+bLk*^>y{4=j9W4W3qSB-%(`DTnnHFOa}KCK_}n zq@ZSjaLXVgN@Q7-!G?qg2U++5zP<>w#4q7B8&{5*X(nHnLSA@?5@>XcDHQy5U%c=S zlRc|^ruogfvGTJA_Y#I2>(QG|*~<_Q)BKAM6-v>k^1$eHC&I!_}JUtMF%=aY0T$;ixtgT`B=|UIpSK+tYrkKuIT% z)AcW>@dG)3|JLZAMyRD1L>{#?SAae&hnZhEGI(;DJs5mUPlmb!1mV(;@-qqW7i*TD zt56{_#0D=sxjw!9a(+(hxOE;YzWWy$Ijwl+kO1)FQ+zXi@}d9oAPW?`j(Zk@w2Z5w zv)JU#@6G80rtuB+@=|wm)XxH8?@B+Hzk~Ht;#od$@FU*j=NPwf$HD{P&Hr%ePjaUM z#vl0bC)^H$*<9;yVjeUPHNQIT8KT{X%TNhxe$EznqFU$L6Mj-6a0ESaxsAp{)n^jN z6()Bf!wE|M0DUomkv7W4*ho~ua{3D9(`NvOQIN16e-<3g^DEI#zhs+K^*`n7@sf$T z;=2B_Tj-g$vDo1&+g5qgTndWjrM~93J4mmdJxkBO{;jZpSjw;zg{H3|Z267FuYdb> z`pcjGEPeg$6JL3M=i#k%clTa;^Xe!)`SzLLh<|;2n7)1TJWWI+$_Q^SG+HaExxAX% zQY`-AAO9ddd-~F!1?cyCF~2}_Wz$rD-r&>x2Zg81XD4W?st@L)$c^=e4DbOz#EH2O zr|~(7-5YYaQX`(`rr_*w&3Ciy4I60ha}6|`Ld?IEwz<0EYwl6V*vyCxmm>vTq|NnC zx^?%SaBRlLVt<%={`T>LH{p15x8D=ZQvTf*@|=&_8T3Zp;Nb=^o_Twp`80Dh-Yfu; zKyAN0ks`@vKbuO>V>wz-{p7hdSCp?c+M4#UuprSCp4GKgzd^~#Z8l;!)X7|qE5Etw z+@D5ZkttAlfr9z_!t$k+K(1p(kMWz)3OhJHkdk|>a&QtnrMBvfxqfT4ow_F{-W2`l z<4;mcbR51p^k)d@E0#F{&fCKx48@g&5f-RiUjp7l@qA15QWk)CgBV5I`h)a0zd?Wu za99|HUOa;CqGK&^bt0SfVkc!}vz(i+F1T7-$~ABMPmmQU;T(8||Hu>yFmF{yJA@X= zju(K0gR~ic^h2T%MV9sD z{vN)b$zv?uKo5SrT@ZSkI3*mE58WGn8wV)CD_B@Qx1Nfe({n`>@w+R+H_}oXk-ErADw7`KG z6-0#+E%00T{m8TX4)qVBhqqHUHa3it&2e-rKDoymA3d&ESDRgFbiAO&Vh8cL`kpet zdp6l=V*q9L0u7s-*pgAcW4^?v2E==A@n9hauCM`$3@kU6{T2=V2aOIn;_V0eV)TE@ zT9Vw#4bOOUpIW7EiN{+f=;mDW4sNd4god>a-^N_?!HG_L({Fho1MruAo3;)=_%0^p z0$=ckIv9(NbN9={r>*%7kLB1%fc8tt#PR+?+BVf(vkEI1R zd^xbxP@mowE$V-GbBDtZDid##`x}w+_w|}=ej{rt>rj0geKYe04qBWk&*P`xr2Xg5 z)yEvBOYyqZ*-jhx9;NNO4^#Wzz0}y)^!1+?gJBw|9~$oMr_te2Iy*c}y^~|h6*pAy zX_mIi&dnuiq3%aH9OFJ-V~jXy8RbS7-qae2Kk%E4e-^>o>uuFR!-oQeM1aXq{Ik`XXLLaW~{P zOg%w|$40d+0`wbh!ZUbt>pGZ5$xatmPy)jC+&80u53KcgGZI3aPXsa_i_erfQh=i| zY6*xhk8D*?=9ylzvkSi*LALv5yZsyE1Gb;0K>k&F2+c6{%jso>iDw{UD<|@;a? zp!i+$3g$%?NN9JJoXn;(Ovzj0=uG6pO*{Ucz2`he6lrPuILQ?#7^>eX4pJp+u?o?ez-O0r`ngxTv3cjE8F`9lvQca z*Fj+}kdN~Rlj@JApm`XWnS&^5rd3$sL8c3LtThVUpUbT<#&h4S;QZ{XpBzu%@m@wb z83$#7*Wh)ZfDx}^KLevg)QjEFZX}|U&Ec54_y#rCzv752^kqJQN9NQGH?NJP>kX`P zYfff;mhuLlImiK8q4d1XpA{oNg;#>)l0^Q&cj`oLQ%4XXu<^ueMB7CE0xzkVba zdM3d5O1a=?l+SvcbRl$gW+3ROnm{>>`O)sqZu;!APyHFU&;=JXPi>@jM{{I8?PnZu z;M3pp*BsAvC47EEJ?HO(2e4)(nBFg7IN&yEkxIvHh2u9|L9H4?n@P~esHp^!) zS<9e*rT%!a7MbPKU*3yGMr0`WQ1j95XX$^K;dwhu4kBIf^myr*D_GzJ3XE_h^JbiGBNmz1~zKF zKq&>8iiK8$1BHoBk#C^*+n!R)*!*YV%AW}kayIr5YF`lvu`rF?O1`p#I)G<|BhMz( z)JJ3<@o~apVrXKRuhNibvG=v3x~s5QYIF%%fojd^}FM?qs|k8A4(Zu&*qn5f^O={8Ge(%gQWuK}8WDaD1~?De!k)fhXgFhvttW z(U}M$C!&`6p$CXmt|xw-2OW7lM?9>+;k>YAKpjuP{qjUkQMv%j;*2ceCg<>@e(Ry} zR5!r98~O^AFzCQ3YT&zNIr4G-jwb(NDjG9?_3R1o4DRb7@+$`ce4xU46+?I(0{0Z) z|9t$qxy}y*4#Enq?2qN2f-mYfKsNPHf8k`fb8Y3ibQ%Z7Gwmo`_!R?`_$5w>UyFy8 z@!t%DVoJPtHojUtISkwKU7|%E4g*{^>KGt%03KY0NHe9nGWN)B7Af)*Q0$;1~O} zBq?dGNgLr|;1PM1b{#Yl5XSNVPX0=o3TF*BevU97Ru$wb7#|36=lCWjOzFpFfVoUg z6KRVn`PKb^VqXU(UgTr)&41b)P}~mC*aBVZR|?H#LqE$)v_aCyt~%6^`DyzsP@Ze$ z#!@9tc}`d<*DScdQeHu?a8-NR*8vuQgz5Gi1%RsptRe9M>A{kz% zC6W6|0ppXM$a}r+N*W;J@?(SrV3+s+cIo%BUwvRHwjD}WXqt&YnM8-{Nbb_7R|SO^ zQKn3gEQQe?^h{ggia{Kc;kh@eDC>phY(90a@YgS2reFTm&ot$bk|V{I>*%+(cGGBl zkxtK!)31O1tMvc;=l>!-{pO`N*BdKbb9|9ry?K*PPrIoP8u5uK#iFkhhzn%&5Qc{Jd8i{>WOW7Juc z7nvn?;Ses$h!Pr`zL!>BLT6AgeUx~?4N6>(?CJYps)R-@cQ!Hn-B&_J$O=#dLab*;Z~Zrxo9XU@ zTi%#i$?d-1?Rx|0?!$X&ZL^U^ihpplpAPm8yxEJ)uqg=Nhr^!q9U0+WSrmUY7iDhD zn-FP1Jf<$-v&vQV&*URH;Y~`qM826wy}>W)28Dl7{NnHfDY%-R&Gdya*U=9(SLW?` zl;tQEGLP~Z2VX*NEFZ#&e8CI!GvvdIMR3$)kq(FQW)oj=iBG;LHWaY)Mz}v!XJhpj*TY!!leIakh(|`38e>LsgzUObc zyEjqWAs)EQZx+LZj4S!g8wGGsg+-NiXG87iTr`}iKjcP|f#iKfZImkzdFvv+n;5+3 zvn9hcP&JvvU6YbIbr)@Xl)(|%JO{@9>4nA1t=8%l~Yz{%fN!~E&tgkz* z`!PuBb#4#YxtkW->uP^0q51*Fx&`%zy~D#a zIy!aVay}aPkOBvcI0V&f#!V+)xKlkLGnA6gOww)vcy)SqYPwnc+Su4h8|yp%zCS!4 zsK1)1UiqX3Z>!T5Sg+v|GHjx=*hd>Jxh! z2f5MV==j8O=v&8Xr{Gv?H`Aa$a^JDJwUch$x|O!Kwk*H&e+Q>s_0!^&%6fQmm`+Yl z)5)>YtGp~QdNECSxO(*TEXFdX1$+X71-w|$Yx`gpZ?m!3=UX4(i+JD#Q`hUV50=mm z(ayTPvvhcLoK96weT@^yeXMD5V1R=TqF;WDO|)&+xdy|&58u%ESs3;~8sVaCbI=FA za_EB_0Vee2qMr?niRuSA^5-Zxq@lL4-r1CtFRMS`P+;6juqfI6`LBPIKKtN%X=iIq zvb3VH=tN`Ps`BB$P8_I;jSvnVadQRzraL>^Hmt)SkHV`~aTU}i=suwdGd&` zhAhW%FySDejB4!)bFiNYrU#IXEaS`k5+EWOD^hH>RS7dVwG5sB@%5Yy<3))YF>{X0 zr-hdJP(A*9`m2U;8U0@yuKJNf29TZOQJg5(G;Z*d2mom?x!eVa%HUZ6;g}rXyqTIP z?KD2mGpYTcaanaSjT?qvY5@tbPsWG43ik#+aa0xugd7Dw@hIiF3Cs{zYw$r1tX%Ir z(Ena%0wNAH_&3)Fe~Oj=pi{r3ktw1S@%gn2uTs)%goF7=&Z~|wVw5riIgaVhcx6~y z!EHYxWVdNC;7eVRmn_wTd5$d)x4tUwbwHR5|KJ7>JgECqc7rJh+rcWWgYYL${hA2< zV>?oe?S`t3WQb-b`PCx;Jl9Qt@tgT!nC}(-yS+mX@huF1p5&gk{g^9M6$lWQHcQ@A zhO-NtbZO`&Y=Y96G4CuqGP4D)Z}&s^>&P=1L4)>0nlkV9qH za0isoz+A6C?5Gb6oGH0OQ>3BzfiZ_EE_2|Bg9o@s9&=KGHxDa z=zGb#l+Mu{9<i}b8e^mPsFcw2 zZpjE)`k*E_r~?^p8(Y=5&iB62|CpP4OwMx_>%dCE{uA>d4&j0qeUiMO$Lq<;6Y8Ru z!zDi9uRq@XW$wu|k~x5HT~Q&8Sy91B-FSR;A6^R0m>cy5k1+5iKsfQ|4<6)zh@&*< z6(9$Yce_=ORHX7JU6m&dLL`|_axiCbdKugXC38MFWmA6UZthQ&H8N3X1zg^6d43im z6S@>i9+ZzXzu@x>9Ln&j1$dxMb>|zAMKgXA)dPAL8_vvwI8bTa#LeXe3Vl4g-{7@y zms?tjY#%C9S)F?+FG=-d`t(eB;ez!t`zfB|ke5me9&F@IiTpV<(WeK^nc@bq`}gjp zPk-rV!pm)WLuYXs4hTTExUG33dKR*do?u;p zxU%6dYau=)z@8%R9ETS|e5E`#dXNtbu)$9p4l&4$o~ArJ2a@fB*v0`bgirCAC;RGs zrSl;ICd%R~`>)oup-XuqmwHe*e1en0ke@m1&Y_RVIR|}NUt!~a+4Fk#MY&n=Nb_dv z9>N4pul2~pVHMg3@+v1Ejo?-Y>dCk-{mHQH%?m&-C$vN>B6V*ZW85<(qCg8C|C48hnAb&@NjUHdOAm^a5_&+Lf-st(MHI`Ggue z0yjls8XO9c9*co~-}V4c#RuEst8cq-9g|0?JL*7jWcy$U`4Y|_b(QOeaMkLT zyFffZB9?47HVi_K&0ZE?%nE}i7J!)_^vyxt ztGJ;yC^j}hghUyyL2?D)7Z&;Arm+`%;b(x4#E~67#KF|*)z1P0sUHH1$`y)VbkKn> zWClNjH{+4vh=U9uPu{SXT*Mpyk~i5@1gGeEbA_u-8N5k{pCW-Q%bHwn8f2(cyn_dN2!}2I z6duWH?Irh>QN>T`+^#U_J>*4Rs;ISMvlQ&vvC8C|~jT)`}ZGk_!emvHQ=j}W+iaOCoqXX4nN8myvNz|O#k?;M!d zAvf=aOjiwZb~$~)5_0BW93@%qSEpsF0emM}X7 z;iw)3b$12VdJ1T>1XgsB50Pthx&tXF6rbq%X)4d}0@{XtBiR(L<53N+#1~xSB`Ci0 zc7~EqvDHtyh#o&~hUoVNO2=j`A_wLvcFI-DtLC2Zuk&*TB2M%jQ#0#_dzwB63vkev z;^Zgd;2RVE92eVihEV~iGw{lf(dB)Bn4~T+b%TqsFh2?+Y}ziO47CUXKRQam6n2zd z0no-A$7u=4>Fa%pz*Jd==b+$r8D-~mGwluUs=o$GUikaBW(t&Y6u)ePw~AKW*`x9Y zB7Id|N{9ODk=a4~kRW(!`O{;iM60YJlbWMl#(a%w1T66URf+@GA)lQbrLTYUYcIYt z!H?^}FVktapY{)q(@%f;i}V+N{Ws}M;cJ_lsUw2|$7TR~{A}cJZ|{(y@>oe*+gz)? z;;V?c-t7Lp-Soi+kJ6W4e&x5_*>q%sVK`*-09s;x0evV9%s<1Qxf7q23x%YVk+ec$1jd`^fj>QMw*k|L_->7Fp_+lvG&>ULvXbs*l zl{_vlqVO|Elq_MZ{)%j1+&_>;h4Z)RShSH0(B2uFyqPWj6E}1~a- zb3mu^W*O`sxf+jOEPL&1feSiUTI!dzU5&a zmp&MLv=z^R$&zwWJ{f>Az?T5gDu3h|xh(C??LzV5(0Xh_1Nibcrz3E=h}T(OuIctW~{fTv@V%lYi_!~W5J91 zY^|}uAKy0Q)_|yg^mQ56f$+GGNU7(yqT>x|`YzhG>sb7ve;|H&n;M(9gIJg)e}5)G zwijUaiyxcvEGR(>Z-TQqI}vT9^*RCzjS36DI5&{;z+c=Lz;5WO$j1YlWHnbG&TU88$?;)5BdCnxFP)vGi(?WMuVY3g^+{PsQ@ zuS4obX}BJKqH$nw+D+rLUYgK{svm*=Q}r1~&tIk&pZ`34_r=fj{7iL!k+$#LP4_6d$9~&?vDr?2Z$uB%vha$abkXmp-tj>iaF9v;pueFk-qFV5h6c$c2So-P zu%d5gqkrV#kPUM_(bDtS)oibNtcn9Hqd2G`n(2FYcXllI z%prKopKCa|g#tO?0L;O`LHh3H%k=8iYvDihEj7Gx#M%nCt8_Zt4#15oEe+=jYL6@^ ztFPC1gN!1>ZpXB5_d(dmjEw4nhPNWQKn|#PjNe#O{ga*zCHfE3BYA^&9DbtR9UUE+ zuWELwsXiVV@&=Ut7=wo5BjYHW5gc6MW*ho+K9NH>VbDVui;x`Nfj{vEw@M)**wClk zZ%mMyJmDA3Y6 zJC1$?v>$OP(-XP`Ho6n>Kxnt(k%&!fbO6Agwo>`IS&%F170v4oK7>tl>OgpdMU)xp zre%lS_R~L{fH&kBx*h#-&`ym89~7A9agHd!KMI2{?r;%-IlK2K^~9k}G0D&O?%z+3 zAAaEZyx;a#T;{?nn(uMIl64*=&YQ~^&9A^}nML$;IK`)F^O?s^=hd%pBv{rKPdu{iV4n~PcC4T=yO zk**SS1w73}HwHRy7E|55DtW^5Ji+?Aq95f=A@Ec zwoQ)=aaeMSK$IU22B-mX%3l@iH~q2YkKxS`I%9XX?(B`!dAt08%Eb6?0YA0jPXJii z<_&;zFYLI%Qt2!fDI(vISAUVEAY;euYL!H`dmoy-4zptKbp z;jmk&jjL|5Kj2AzxI>WblAAiPw`@JbChElODtx1nBPbIY7Ob$tt?o9@}f?efz$G=j?4Vu8|d;Y@<~?I8+biQ0UvZw2f6G>4I&g-D@?8n zb!4;^5AejN1X#qD!wRi^Rp*2W7gq_z!mcn=_vVH0Gr~C?dD*EfBDEDJ@k&z{S;z$u z2vTB4R+w;wSGHx!$;dcyo)cwU@dwD}8N7N51xE%u{_(Z#tn?eDQkdv-v1cGsgmRP( z^~TJI_Nm4dG)26(Lx~6NVcb70vQPnDf(rrIvp7rNFjdqFzS(#;!;+OZ=Zy`IEc@5N zbvR328AoMqf8r-EYcPp6qDl-R_9aYu0cjK1GRdanp}U#-n+nkK%uv0 ze9K3j9$BUdym-kS4WkATHX}Qp6l9aV>{oCXS+96j40%R^oa+?o8lv-~OgT?M^j~4>&sCZ$CZNQh zhp9hy4Npy;Gi3_q8JhJ>AI!7stBxc5%m)`Q^P}pYO57T_o&d#}Z->G;!=?-s9tB1= zJYyI)XbaGfP5lWRTxfu&xOgt64hnR5ftiN@@~h+;I)h)#8w=muhvuX~FLgy&$qze6 zsOZmWD?B-_f=qsn9(M5*)Y4aNPsVQq2|U)@kB6`#T)ZGYs6?aNpC0|~Jm#4^%Y%}k zoDD}8>FneX=gysUptxhPuGv_tuHo;jt)&k?e4IY`=!5jt=ijF1FP|$9(YUyhc#dpRIG%uV)!UlqKQhF_CZ6ZLGAOv7b_;l89(*t-lq@!P*sNzm zCN_TAZ0Ai$Z*9GN zc3S822dixGuL&pL5M=&_uEU}L$|M_~Y?`;FG;{T{>QLI8GzKf2nt4k}!t2#*Hbcp=??d_O(7bKgqf@d>v*9H+g#H_B%*wRhIjoqKoE=H`Y^ zlG9eZ{WJ3tQC`%X*@q}pmmGX;C=Hwbm&|uZ9F~^tIj`ov)V1c9%=kPvmHb^&f8O|4 z9P3(|x2i%}*!3xX;SS|f`ia{{G=?G{aj2Q_Xv@e5gvB_7j4+p`y(5#f@5r4`DVTn> zCATNsN)O-4+?YAH=;${qmc@sr)!h!;*$nR46Ch7FJyx0(S_{<@bpywQVD3j8<%11Vv zedv1}NG2_OqMh*VL^h$3SxQgab{z{XI<>F3m&{WCY-%nqMLVJ2W0RHkgq-sh1v0+M zTkmQUEIgfc<2(HTix9j?4`LQMF3kTWZ`yNUnLdJzU-w|tz4Xu3)uv^y)owf7H#fi= zwU0qhG9=u*HL|+WQr*Fi3m*XTwfZu?vR?V3-|`8Mcrzws#Gf)y$>1$*3VETf&>wi4 zo-%qNN<47A2{*bvo0crlS|VYR_}f}tO&zsI7QEmy3xuShzw&_r&z_`L-+i54J^5{V_3UXn+gY@9T-%A^}?x*IhyJ>lA zJ1waH^367)&ztAs@j|bcQvW1f_D<3{Z<(v#obcHW(L>v$K1Y4}j#1(hC=I2ft;ZYr z`|03d&xe`li+PitPtEW-8u+m&+4NW_{`uCv$=V;-7;NeBXa~7WItW@Lhj4>%9=HvT*JG#^!cvO2&3??Iu2#&}gc^mV8@YywToM zU%H=OzI>Tpi^m5XGT==kwI}48PbzF~Zl&$5Tb4-Mzt|X;DIM=vy zp!QCGu`GGy7Ls`D|L4E@Rr=Ha;NO+((&^-bGo!xSJpBW0jT>+{locOk%p zQ{_*Q@tZSd5j+z<=1cId82J#OfAw=X@u~2HBaaHAC;S8O1E5+lG8#i53LxV^mcy05 z$D+s|v#2)W`5nRU0M=Wa#(7MQ=gM+n3T4-Wc;~)13^NfwgTsH|h!Pu{i7J@=APfI# zEc~4s8o)`I+FIzvq9+7a;XLD(xuQP+8UM9F9MV!=;?0)7(%%(C@9L-m&j)J8(}oI& zgog|?XL{cXjzZakckX{x=J$sh%`@f(D7?<)@fvkD{8@f0nGX7WWwUu9KMC!0eIml) z0ps~>eK?4KnkoFL0d>VZA089##&S)(4CpfsbzZ=1`wOY1B*=rD&bOo^C7@VNUY>~( zGUJW9Ts}ytV&KRR9v6*(2;?s}jTsvJ3@@1Th;NEljD8}Gq42l(75b5P;V8iX?TO^X zv!7g9wnA3)4F3xbg~fV`{9SiEgV#xfPtT$cUU{AZZZa?`$`myE1_Hw3@fuJ9aN6Yh zlcDdK&UM2(%6<))ei;#gEIJi_W&OnfUTBu3r$9wm2^UmE63+#R$2q9iN0=N?=#X(c ztZ*w1qTzUQW7(w`ekgh6^v=7UUS3ru*8ySRjdYGnU3y&%KQfm*lQOc<0ZE9S6n^;SIR7Z1B4aud_=b8 zjQGS;-D%Dje2X?(=e?hCkkn!DRr$b2(q=zm`H(hvHP?x_O5?ew5-W_hqfqDXP5hze z-Kv zp`Ex&XWc#5jdk=KW}2xZ^}F8mCmwp&T-qorWDushMwV>!%;XO|)VXZ;t>B{!GUkaH zPu$=E^jS8=OyQO7`ux?k93(`$g&m-0GP`X-gG)()_GguR1ep~hp8K2z#gpYQB zED>P+TKO=)-`(9x5AQ!nJKI}6%o8`PXSs!+q7@2;1Id;0;dU}~Vd*g2eS1fmQf$Idcc@g3LSM+>W2`L>-YK6fBC=vw|xKr9@7WHScWV!J2T5R3Oovq z+>S`50{rwNS0ImY@`T`EGFbH#ex`@GVv-0daVyB~AY3WcpujP2ym?O?btqOIrDX7i zj^EnWGv6viFvy(+1eb$&bbulm#VDZI0gfkISqOcE;dVTeh8;8-N*M^$KVx}49271$ z!ft%xh2!W4+k-#h#D*^fAu#-(2n-F8 zT8LpKXb!~LO`@F7_23c)y_CfTivMkzSpwI{?C|Yiq&83&Upt$|)m^q{m8q>10SlC+}xtrk#`^ zWHZ_TJf?A)aCob5(;ThKILsS5bwa2g%Ovr^FH3pY&v=|PY~|q&Oilxl9l)=At7oN$ zmvTEVM-V^dXGxF;ETlTkcOr;ZuH=arDh1&g2Xt6Av&=?#mRTEqh)RB8W56#gN=w`r z6!vtvI68)pKNM*ZbWD0R06GRN$zmxr!GEdi3mBY8~on-v)&b zH2_@{=RM$k^8w~*1g@r^@-V>dEGpb~DRab|PoJMHK)GGMwJ;Gj#TRm4+4jqT`--8` zuJLE_-T7x4DxhwOlI>zBqurSvKiM%g1?p}FPbVQo{PAxCShj1tD8&j(4tZn&sD6=o z{G3E~F$I^S1%;jrVdq~PTl&da#^bbnVdtM3?(E@53RTq!VI|)=nAY2TIDP`-t%Lk$ zaOHTFEtl~zp$0c)rgR05+)D1g3N%&nz*)kk0aqdL2iWcBTKwXLle}y3=V3mq#K-%( zbS3;Mgr9?`(@G{N5H8ud%q1j1wyRFIz4TyK_@{y@&(fbdIQtpo*0dm$4+2X$W=#cG z!shC^>JLjACa2i-JnN9@Ro;#Vosc4H#>ms*#-n_B#_h33UhvNKKP`Vfe)wmZ!q~5X z{G;fCNYB_5bd-2g8&@Tt6rVU4;-k!%*Zz-i>Xf*|fzBL1Y=!%W&7BKxgfNHZ%|zZ} z>>Zz`FMs~?bnyChy0x<%-wA8CQ?J`k-#mGle)>0mn@&##K4ExjYFVZjm=9lUDi&ygFN@lK40X#vYvX5|yGtwOxgD2>m;1sY(!v>h=^TOlqLFw2WLq$`DO3#58#=_VXW?TRV z{NWP^$SQR4%z}g}%?E<~)-Hdfk#|w_!9}|jfz}D(N$A^bpF_;Emn@7|M!x3t9}^Zr~qx414BJd%#C zK87~M+u!UTz;_lpXcxTwPrt$U`_b#sZP`5K^AeV;b4wqI)HWL4%=AqgtGr1OSFd_A zRx(4~)0XJNSRCT5YYyem*A3Z}l??HQI~%`V7?dF`yrv#lIE0os3_w}bkBDErGY(#G z)%l|E9SC>+tf1ODd?O6{N}UmpPdBg*!ObAF8$KPtH`qP?>mS{*x*bCNabJr3>G4A8 zFC_!i!=}=0bY#}ssnhDD$B!PT-CH~E1DfI;{G(4=<=gEW8$KL%>+Vi^@aTT};G>Vz zCq>jrS1EVQ&Vl9^+$h;Q{}x7eG~Oz$R3}GI6K%+{p0;KIXO`udaVBK zO!6@>UsW`>TQ-K-h-O2SH9ihfaF~oY@{bM=^XDd1r;@{!1@)Wi7d!1v+T7SqE6p{J zJ8WVvshy34pFWZHLcQ}{&A#aIAqx1aOsQ+e>#k_xO?VbNX=5yywZ#*Om=9)g5Q~Ey zmrLpgms`FCKx`I{1%JPv&Hdx!LyZZCX>b29z21M54)#ye+2BmHuKJ)Ca>;`0MD3+1 zS}6mY*3j5J6|MC5^k4jmk3OiT9)W(G&r#48EWeVAVpx=fzsMwqz&OBga!Oga!Qjws z5Ir$Y%nOeg4!i3)N_uB#-FD6 z&s$f#?Zhf5e4!1~)@f774DEvP3-E@3$`zSRjYQu)2J~Xb(=0el{jlSSzJOVqII|#% zlppwsQ|6WN7pcO`{m~|C;bdnT1JOGH`bc*LYU%6vub8TQ0n?}A3Ov{8f3t>PK(6A< zeh!ys;ZN|lij={z^k3*EzO5@JXvPp1Tds=t0Ff8~a=&LNbk&g-mwq*0je`&651bLf zmh5V-UN<>z#!@{MDCNA8u4cT!GH1$YbVY^>WrDZWmU;CJyQs``@Kd;Cx$TmSGTAR= z!Sj)-oWTc1Z0b!NT-0C2!`zXv5yDuq3WzY1iZQ-0MV`nJ^ixCsWn#>~n9oNV{A^zf zIm~4xPL5{_+Dp25Tds^ZOq8?ab6s0A&!@@fpcasw-rVAm^!^iy+)qqp11?BE;}8|Li1DUNQ#XPVGc#0=j*(QCLHcx+!iEz=6{r}KvhQkiBDYO zxnzXLTyDyr@UQd7&tW+}I%2R|K^5uj0S5?(mQyG+wv|ty&fSPg}0OfV;xYQu134A??%YFq3&q{}VDO!xf%}#Nl zE#t&CKp)T9z!aySLevgMb?6zD;sN57CrP+rD!hvTIgwS#32747l}b@YK2Sy(XfI>) zA2}#CGURJQ>6p10faJ)5p7?(4Wl|@kc+Ow^vkZu4w+Hex^B{%1AxD0~850iTkdAl+ z@`G&1A);Aa0Y~@|O>voY$j9BI@^R+*ssV@4G5~y67lr_4JCiKJ3uylDq;5E zXVx8m`H6C3{DeoT3&Jq^splC&<|H}kq6LV z?8x5P&*Aoz$=QbNmWWrub?5MGpr^PoRo?G{z(Zf>@$2u9-i9&L06y`eqQ^e^(XV9< ze;Ef8f`(#l)5PkHlD}e$Z4(se9cZnp|jZ8EavuC@YtR6 zusv@AVlELBN1t26X#-Gxxxr~3AnU}Hv0NdiQ=CCtfxt194W$Q%=Ns}50Y(W_VK=YRc|>CKy0 z>EVM%{+|Ecdk@pz!BP7Bi*M7v{^{SO7th|Lm3A}Ty1ktqKX@qAniC4otxhLBdZcH~ z`(D0$m0l?x8x_xAyw?0LHp|x5*$hwed!9~D&f;yP;5YLm=wuEZbIede zD2S)dgsO-_RkvAw={9*BkPY!SxU^HU0MEZ~BVaKPZqGvyE#xAD^j~}K_ zKKq`O(QW6$fdmywN@4Jh4Vu@_UVBmF-otp?-(k#SB$GYi?;dy4!NFlV+&f5Ta!-c6 zw6Y`_*x632ySLKH)|TjI{vNj+ghc1Luxv}JJiUP`MO#I74Dg3O7zq2Uj5SCJ%10$@Ycx6VTBt z$uq`oaPw9IpZn;mT(q&s!?~IS(M&?c$M)5vi>u^feExKX@US?>wdH)9oXvOm&b8lM zy$;ZB*-ZA$0n)YEFyy*IHWcYI;17q3r0RtYcD#K( zXQQnQ*i2QKRQA~9Y^j~a+wm)E?_Ax;K>>g3aXbQjY_JlHHA zuqdN;Fc|cFMgFkYPhHGu*X^@E=(~Nqe)&fI?Y=i(>6`dQC2vdfRz`b$BdxbL(>Zk~ zIYYF2Dm%5)I~)0X?~}<{I^`28;x+x!K=KM7;4STDsC+{H)xR})OH8t@_6Cn=>%Q$k zJf+WM)0qBrB3>eolm$7Ms9)f1lR>xdw_E5_SH;&yk3URn4<4jeXFaVyxRW+C9&FsX zo3`#gOk2CVX+!ONUG05sdnc`I?xv+?Tg|BCqqO(tI1R>UGJ|v` zd7<4bHB>(=%Bim$iT{YKuZ|TR90D;8`_p%*UKb_9`iph4xUGZ3Xxx55Ik_Q-w}^O? zpZaF~fU$*soXz)>)1&n2`AZu<#lxo_PP^j2WV9^wav*O>^}V{vGvBnOym3>6TC?kc zEB9%)Eb{Rt5kUV(Ur2lK=L@*eMJeH6&Vp8m;kjd@u=XlztchkaSB!t z5TT@-2mED>B;3A(9cZ$$OB4aZc_e2b|MbW>tq?Anxo9BKQMXjrTjBJ zrM`jd} z&vPRsg6hcE$pm1UHOAuzP7pZez#CoUm1WQ-Dqm1ahyj;6uZ)lKIi%RdZ3ncHGih+c zfBIq7{|x>BCy}3%ImkX)=8zrP3ILly#bIsU#c~|s%>=nD_!|*Cz&Mrtj0*PHFXAxi zBV7bWgsLq$G6?aW(j7P=-4&DRq`gLgqWo!bC_B<&Qyx2=F1#Z=8;6rV!n5ZL*gx~B z3ii4W>57fiP*n1CYF+pghjo{K73;G&;E+B|XNQ zy8}<>`CJ&fvm?NN_hFJ5`-R?{r!NL<^u##i9^;h1->o?^a~kFYjCmZGWNc*a#Z6Bd z3N8;;16St3^pZK#)!j38V2nylU%fOAw-uD1&O!23FSaLabo>0 z%I@`Pc;GfICvzJ5M_+d(#a3EZPtIx zNztdVP;|ixbctKHZ>NV3AE_LyLqwhPHajQNc`bCGOX$D92*Pn)P? z)*rowsd;OR+uYO?G|tqw^TQS~DwAJ2mYezR6emVVlvlw4kT&K?L6$fE$qT&Zpu!zL zo+TmKts>B#B==M@A=v15$OLm)@9V1V47n}MZ7AxU!{R;ZTIdhF2(fo~qPg{%;#~SW z`s7djhi=S#H}U|7>6M!oEV%ank#wnD=}fFmt*XuX^9z;@;v+cH&6Mv%I@x$Mu%2_; z9i*o(_tNuy>3hP*#{Zi12yU{ZY)$cRQ+hUQZaX`>nxk(^mqE{Ld2MTLt&`R@Z|BW_ z=F2_+z}!l8>Y8JH4*q3>TO}~qk2VIG6wWeK;^uJbQEt{?{7gD-0Qf)uZ~yeiQo)mCdeCS9QHdqA2!BO@!@|#5t5|fP&?TxYL=M4L9AifMB4(3d{J*~h({*^uO$O1WCVG#F!3#% z$ta3!e2OP!wK@PGd659cGaUzVi0mm0E&!HEE1=Lty{T*pvZUn>#^l4d%&$rwLA=+rMXVaq=X8~Gxq#t(0BgBt@9 zlyW(6`?+$6R_b3*GNx1hF{!}d7j~6rhH&Px%|ZB;^i@sCF`1C9yfem3Z^4vpgvfR_ zH-ZBB&v+{+{GNg0hdqyU?pgR;PZgeQUxU}ts^r;`>HpepW?w&KNoj6ZQO2g0J52o62}&43Z8%x69_l;o^c!MF@V|h6%;xO z6mc&38zK%8NIy7}q$;0)GVm1rFekGj~Smc&4KosNm+xEe}NKjF8<%O{TaV{^<0PL zK)@{M8G0>SXa?DjaTVL;U;(IrBg{a^tX;Eq;b)#N*uO*p$by-7C9d_r>=y1yCYfU? zf%B8wqU9(#j>c@1r_xVy(w8?ockbNt+lwbBC+W%8-)NqHnZEb&$5JdNX=~?Bdh*TF z^cR2m)AX1B`lpIB@VES%?X|SEwdsvB<|%iitZZ-Yq~m@sJ$v>ny?pW7n-*theZTd1 zd-t}#jlU-4g5t0N(dn%FO8QIQFjhIo!o$Wa8>%c`coSTTCT`{)@rG;6rI~wjtuo3z z48osHI>Lw_{wimk2cgEu9SrrMO4TFZ=U+n+()@M8cj=W6o4sSn1Nl%ty#3F~?5(X0 zDNIWdN4VQ6+xq5OYO)EaKQ;s5hc}x<6Ps3Sic=20<+igUK5eY~#5D+*&z(w1WRrsh zlAid*h6Q!SWG6pd9GI82MQ1rkfN1gEYhPh5 zfq*Z~rWAJ-i%*lmt6kL%-{_}(^L^{V*&w}o^(MV}{n~YNdv{l~H!Y5Q_iwYanbxH& zw$@gy_q;xMVWoFteM8D+Cp~)nIJNn{+^t=gq11)6G||c%r>AMyJ=1VOU8>$$aPr#{ zywyBX{X~6vvkxXKov-|3Ggf>o_0F^fdubV6Oi`7Cbum_GgxB?;xicF-@WsMFLlMux z>UM%HIY583F6cS5X6~W`zH+@Dws|iFnt2uqI13!`gGC_dj19TiSYp0TeOm7mO^_2; zv@4GMCqv#2VKY;6dnxgQ!I=-1@Hqq0jFca5IkPy2VFOb?s&|!x^2&w=zmYDw%RCRa zuYQ*fh92QEi*6Pz+%7~7SDIU&fKC~2B3=xQ6PY8f4+@Z!IwPM6B}KZqtzwy+%qub8 zGH%%tPsv~6WFD9PCdP8=2b_W5hYUmyeC6%sF&ngM8*DbZ{E}np&UNeZ#)c#K$M8&f zA=AhU^3Z6pnXmRI+)I-Ab;&>V$T|Z0C}kh@2mU4_@1f4Y_)hJnoU%$g;zNgmkm(f%;swPVU}lPo+}zpmH*zG zeSaICTR@&Zdzqd+eU+Ym_a=S! zOWQAjIow- zwRv%0W7LxBL%7GG8$*{5J*j>5Mm@{5isF7N$|QLZnpMe1XKf>`iH9qz?R2@aOP9QDbFBK~`~2{i+ZZ^gMR!6uj!(OiDfRD4k4*C^ z3KrE54iD3rWZ=!4H~ywS8~)tDu)VdeHc0!ST`j9WRG+ARVTc2H(;o**IH*BA$ES8Q zCTd*aieCgvV1lj~`j~fWMGM`08K7GMW1JFRb zfgi{jW~g!^S1fjNIE8V5&3XEevVn!1a}ejO+xHt#j0J2oEJL6APV!{!0(pR!w4KAF zW6{@?d}}OGpTz>IygkmvTARjRjb$8Sq5c>L-2bRd=B>ukvy+4L`Io;`sAYz z)J{ahpldlsF6hsZXZpqoGQ(*N`MLk6A2ZIYz);~z#TZl*BI2t43qCt;uxA0h#%&-D zcxCgS?fj!>dR?fM5gn1WWI2K}5aTMC*6cRqc@MbD%Rx3uPrP>;Wg2OUolOp(g8Cu# z)j{w6)-Qg1dy4)Z;=VSA>|?kKW2?+kWD$uri!2SV?L4$Uoy(|7b>d4 zuq)amgIEHmuVeub7W{$;(-rA%f)ZAXgHV%h25*2#Q!uD*NP(|69>%}ITqlzb*n>$O* zyJ5Qras|jUHr2qUzL2yU>u`G?KsFn(jJ>#-;}V~-nczzfR4NmAjy{SXya-C=Z{d>d z2!zQECa)zhpJ(n9aR9P0Yy5zSGw$axhG- z;!$**@K1i`p={{4&hL!mFPpYOywDHB&+URJ0(7_1{{w_`MBLfQjeax{&xV@23^c#; zd=PzLFxI?XdYj@Y$a6UkTe3+EX;FXV7xNk8BOpUXhfUNa|7^%AP|vh4@l?-?DK?j5 z{IOF-5}kR&wm`vex*bbk`O+L(bikjow@ICeRjIw+@!E~~u5{MNFw)7G10pWy1p!F1 zjQbvIO5yD}mdb;1IWUApy~F`3yx~LC0T8^N(obGdh+4Q!F1Nx;Kgcr_Fdjsk!6)Jy zMIG38QQnTCIE(Dn5y|Zun$eY6e^FG;iJ2F38yC7idQJBv^vb?;_9Jd9JL{)?=|C?v z&*!atXjoZnis#^gKH*f6X=|1{!rADT&ar!IJAL@Uqjc}~EzObHm|ynXoK%j>d|UFz z+8np0B6$YUhbWwC(riNo8$A;?5w7a^Z*Xavi`v#fR6NJ*16c|-}blvnV-iu z{yEso+`lcoC=TMQZ^(1@s0VRE6K5~N&)@XzH_;cXjP2v|XiKY(4G zn3NrYP4f|5EV#ShU`Y)g@ZA;_fV3jmgK4G_{uEv?UKoqB2!ZFC{)H8bWX{`6jC?^t zdf{|hGZ=^H(Vs%3REd{-0zmS@L4e?|a7yTC@K)I1vlpi6I3WtOWRPj|Tl}(TB!tlV zGqg}zf#TXGJ#}w>3#nVW|Kq2)=fGL#5dXsG{7~~1)bpQ0q%Re0RZ!s zjI#gK&iR|3S|0L=Itu;-;EFgo`{*78J{@eBvu*KH$9)}4;|HBNgCgrQAYAxU+2y&U zFLogi{+A#_PE)150g5nLZlbPd@l3&!%o$pMx76RYP>^T4qJLI)uEe_$2$w+~1ffXh zH1IbnJ?t7pwxYGlUsiqQGY4g&Jq^ruJ-vVC%I|!mg+_|_jd~9onxmdE*;}@qC12+Q zI^xQ>F6@(OEA@~Ol=6a;w3!aVqkc)JC&oeiFI9i$70+{^^f5+coPeGD8*f}-2ErGau`kpR8^s1yYq7Z2??O? zapyqurUszKcASjIK)WaX@7Vr|+E~G4;06~IZBM(`bVTGt|12&4`kbaIUk2(LLzOKyipwSRT{=xX!l0B z6oN~3O4I}vIgF?bZ{NP18XDu-4F2Yur|HlB#lK2F`{}RK!G1s8l7hm*duuf|Mo>N; z-+h!ecec~ue3+hm_cT3y@|+d?)IA%dMsp?Iy>&;51sm)QDNSS5I`pAT@;2fmjm~?X zALGYH9vi*TjUvjT6=9eH6bagy^TMWbD8LI`%u6bM!$;zVU+4h1;eqC^OUg@lxWbsL zws~82FdBudd;Svhf9CKgbv!qlOKEF!D;;)E)4{>9dDK~x5+%C1Vv)`0Wh>s7!Zq|9 zjmT%_FX;ZMNEJePh!K-*|lK>*D;)ILbTX8ux{C=k8tA-JP_G zqNp;nfwrXiJRCedJWNlXK1)xZzA*i~S-HKrsq*h=$_UT7S%8hDZaO$U_C^?U|21DN z*GZeVw$kS9J84Igvk#D9zI=6;;<|L{jO`~*@^=y;;``>8^COs zX+EyGIHO?9dE`<;6n;4+8>(RD%*0cCkP^nGIXot=H>>+>yq%|EZ|F~HoSmJf{?S?L z9v`Ky_&i~APW8=OujBq8jR%2?q48X@xwO2hzQ7r(Otc$l7ES86mn6r0G9eDG^9F{> zrLvSaUU?f9L@}?YBq}lGliTg;GOl+d5pw2*8hKC9K_v4W%JD$}#g9W0!X{d9pO4@Z z{dn*ko>cA7*&$2P6QG@hdZIlcXKYCNAdSM}`uzM>lJW%~czt+7buNs`lVYemoUg&74U9bSc8!b!7o@51pW)3It znF6kk@2QTcpJnw+yeUt74nUEfHhOw`;x=Wzs;;OP^Hyw!R!V(#awdF}^yc6&y?pg1 zef8D1>8r25@~1b>PKT+hyk5WF^CwZh{^oW1=F8{l^DjP6Uw`@Q^!(|!>O=ZIRKTGF z-rVSL0H+ymWUn-ogGEF6BE;f<)Gbcj$O8THaw z{b=uaFHHt#zGYyH-}uajIk+jv3vC$5%fiKq`ak(=-1Kx)v_KV~TIBk8*Kxlm{NvPY zaN7#9DLKe;!KY_7cXoZFOM7D{wbnP%qUxN@`$6y6dg}&!)%eC6`TooRJRPebv^5pT zhaUFcq~qgb(K*z3z~?DKjy#4a&&KLf+TPhox77FDxpOyds6RY9I!uQeFWCJ5|B&`y z&6+P+ejb)p+uQQT_Gv$u8G;y600D$9NCg=JAc~+wT_8dVg(y*C2P0%|M3EutGw56J z3+P@U1%nIIekp|+TOd;^ZZuk`@VZu)j2)V+B@I$D_5>u>CNAM!{;|2 zL5ni`Phj#xM`3Q@g55RX90KJW~;;RXE(XI_Z&pzemA&vv`YJYGAeZIPA{qfL( z{ea3%|JA%)Q%~qyroKwyOBD76;SGr8sdIY!YHxCRV89n6EcdFX%7q7*yy5SGsLQrN z{qZn`{hk4j_C5SU+eHSHFE5DrS%G=_sH_3VTxFE79(FkMz!UAM<&@WdkRPAu&#O1c zrpiMAqYbOFw(WslWZ3@I10UPlTe#L|Gk*Nzf7$-QKm7ZY>%Df1`s=vHb%-m<(btD; z;61REr~kow%YM#sy zV@l0+if-Ude`EwL{!Bw>O=JdYoxrq4KPHr00s77SzX7xHhOW994|~$PPS1!nfstM+ zvum7|fwY#hpm|u`;7E)~yn+8~<`3Y^pY)`Ui(|YZPcnX7ps_ruXSXSj0>fRGi{P1n z{6wa3Dw@rsVQ0h4f~$jxRo0386n;q=`A@znD0Vt78pKzyJKt_!Sm_VHwNcV!Wlb z(>C#+=+{A4m8Dhru)NFVT`ONbq0pus;7rNEE zQ3!&YFx-l6dQWu9V#FW(c?0xQRkn-$9P-FK8XC$ zzw=|Dt+}G!)&&h_62*Vok;D}Ta~s2o12>&3ZNm?K)VR3hr~K;@g5w_#H;*UcJs{0k z#c?-rNrm8ySVU^V*c^`#3vz0_o!3-F8)X@)<7b)USUORSpJO8FvCcc@s&%d6 z(cqtRrp(8Ti=IO~c~P!#T0;?c^S-sEb>w88F*##f#)tVNA#otil zZKhXx!10?J{y2tz(j&cszhf=c^aIAxO^4*8oTX8=O~x;j2PqMsycsTG1mkzyx~1^c z%iOJrU-VQ`wu}6Re>HEdvUXk-nT2PFaOOPDqt4Hc8S@?mrw2a{c0Evim;)e>pS*0Z z_Vy`zDKNj#Nm^-lLFQaePP_hPnaI!0yBqCWAAi_B`RD`Y2W!mj>Ir|>h^#w8J_kS< zIOlN;?FEHXXxQC9Zckq9wl81oHyn+YRboX^m=i$ydER%eeQofP}4-ggKlrQ*t9At^b}|?N zrF11!0qCerC;JQdX||AJQxYzr(O!z++0b?2NAbYVg7TDg%uI~an3C274Z@@W#4UI_ zKI1FE(xtzk?j(BHlQn2FH#KpwlEsTkyc#`|ndGbI8UIu~Ab1o&)5pn4Ms&l@(Eyka z)9miWI~t1g#)HmEI|~Z*1&fQf+sdjF$IG^a9${KtYV!!xEjpSkzRfZDq`~mJaku7| z+7+FTr;UVL3>=D98>C(iv(U>^x(h19wXrIoJNW5*stuuOv^SySnZ%!>R-fV>bj;8* zkoG}&LGdgnP<0Ql^oP&zGv>%iExLHpQCfZG{}wWy*4rmG&#lnNP;QjicL-jcrcoQYWK^}F~qLx)9I0}(CV{jXa7*V5PA z`AIu4X?qhC_qSJsDTL;c?v&2A!wA=P;@0WpeggGI{;O}9fe9a`^rzjl{6^hR+tI_# z@a^kir1N!UCrr~VAbp*zCxO3u{D2eav;))lH}ih2`J1$x1J5kqbgS{yb@vYPc!n=Y zckq42eYOuy9)(Hy3{P7kHJ^r#q3Z>f%^1@a4E$ZkKk47D=c1=D<0u`q#w^R&^hf06Pi~~zRF_y!S-Q${%p7XlRx`u`}EhJG8yPk*{?FG@H=8YHP1L^ znMuUl{APRc;${2t%P-rDmpj=s5a%kBiF^0%v_-~!V?royH%fec;M5HR#zw6jWRpkQ zLlb;auZb&tI&p{Y7}~K?O(Mym<08t=d^y$`wqN@vA)JW;-qlpXc`}w%k0t zyxqJRy0oyIjTXm94v8!yH~vrHzkE5`-)&p3w&QQPxdG}1mJ14bmMwWDCFT*{7T@3A zhktGk){RKNgDw3|S=9Y3S2lkR4ss~LabylVFmd#g0BdWH+Quf6z_rcX#DC2=^e`J~ z7j19*0P`u6L-+!|&CSjB$$K9`gYz6WyO4zq^2uDlZ#-XK+-`U8-fc^3D{b@9gLe1+ zJtms>pkXoL=b-WR(kep+#th`ao9ef&(6g}*U77oN@L{pWkLDE_&7uJ1oLel8)zp!b zOy8KlRJ$D%kq>5UvDBtG@vzq13>xMfciLC!rz|jI?(y|^+#PwtDdSw^F7lup;%~gj zoplXoY4w)%)p^}OzPxh7O#YrRuB;o`+E0&8>NYKQ$Mhc7a3kAs{~D8T%hn9}@iqO1&(BxAKn}>4W;cF4 zc$m2=^{+P0tKQAsuKv{dt7Ezg=B}mAvXQ3D$iq=D?JlH8a~9*^{qPH)dj0gKh;pdi zPY2Y-d>yRWynBbaW8K0QU*wed=2^a^E~LC}}GMywj=Zdwryt@ue0%gc(hM_-fOQ8_S=)EFWd7MFWS!5K5}qL{aB&wA7(*Tp16?Y z#;y6i-Ok%Qb0l#tt$4fJC*7CY!-o&sqYoank3aahJ$Uc{Hk`Ig#$LuhJUop2C}Tb` zX`MB19%2!u<%A4CpYmjWT;I^ou$D*UNWL?7^N@$-Xqiiwhg3YIPN1}n%kbY%8ErOp_@j%JK$~yQ?BCEULG)?O( z+{tZyH_f+pcH8OYL0esRy~NKHc<2SWBAr_2y4?ECQ8yy@hN0?D820fhHu4-$umYk391gEwg(DK~ZTtwP{VZ*RhIHG0WePlxnky7i9rvxz1#0#%0{^$$b?-5+j8=#*fS!q+gdXRwQTX#+vO*veme`D-QKNvqI zJn1j7`-)OF49}~7K0o&`rr)Huk7vwya8?JBPxctA9MX>;64$X8^COc;VXa=8c&`jp0JTG(l=`NoW{?q?E zHW+i!NgT_&m-R@0l$UnXjvhC`w5wheiT>1DgYMq&zqCHl&TH3iWJUWlZO3O-uF~r` zCVPxM9d+pqke5+MD{pX_F|!-3Lb=V-E)0BCe=Q%qZog*yVte8`a1Rgm9>3ZI-7Hh_ zW*-J{{vyru^T%<|78Gi!DlS!x9I=+AOBJN)+Zk`hFfOL<}L9x<|L~bvo1N8 z%J{JF`=C1y6YBFkrdj;A&P7;&&$+5=lIuWAAC{mq#f+-XTElFa?$p7tUL9h z+Gn4B*}i!CylvsX2#*BEx{UrQruoeMl^F;Kuk6P*@2s~^zV%`I;rGASK79Xu_*~ET zyUt_?!;d(Y)A8OxJ7HXY$ed*FV7ongzSsWpH($2jJ~;}l4a zYRw%VK77>f-+M0y04%Sy)jK&H?WL5pRmSP-D^T2di)Q|vjSfCqy$0_G$kO3%xUg6p zDbF5nHqPi5Z{7N5bN~7O=I{TALjJf;sZ?bEB}fq9R89rRuvIz4TY~N4P#1U^>0(Zl zg5QH8K!(W)WJyX`;&`jT$&}`SL;u)&{GJ>H_=75>P!wI-6k0@w1`czCr39vw9WMf= zjCFcO#*1DvLR`WYz51z&1}h+@I3-^Okv{C6&evY2Va5Q)e2@!oFp5zWLjI%7)$ICs2Qc#jIMWhXD0^-bPL&*yI}^tp#=1S12C^yh-PwD zA-YFk*8Fu)QmXvbpvDf-bodW{y9$JhF-2Z`xPlSJa@Ran(pkRJON5CS;hHp2q*3kB zh~E(_M>FY9-BsF&2M(VTOWW;6vbz4k_ZBt5C(PnTWup7GIB4|))IiFNc(W;x1_YJ# zG2T?vQ#6!+&=<41YW7d}5f)|nzU^<42ScfK``}mX_{o>#P3UfdS{KnGa7usthEh0H zgc)HId?>y0C!Hu95==!r%QU(R9nxl4IgN8B^{ z({BR^ieEzENm{|zmG;3?UCbB8(az@&{-wQ8MfQFl7~#}s;>?!gOu7|J zd)J3~5`XaFrg}?!-7!yi9M!#M@Kk*&zcQQWx4}$0G2evivBIn12e$)t%U&5w*;y`K zrc~(z2-AOuX;OX@d!?HLedcqhz>MD09x^%9WWY(9r3=cLeGwYvEjY38mSI99YNx8p zX_SA%?2j}RT6FA5zpTYq<)`8otuv|+2I3$2(p~=n#0`Gk(R!4ksnepjKjE=?2W+#- zvFriO@WVJoP~wysWqcUoUd`#@e%pHSf;r}SJ7+@k>)(9U{`sH&wEd3B%-QMd_VGub zG;ceuuCKTI4<2SS#apxz^y^=K+J5uf&)f5tud?a1cW{>H;e8(8NsG^r`K-1ZnvRpb zMeF2cZpj7wMJ7~k6fk~qavSFI%OKI)y-q-#OlIR?OoH$?57TjWvEJ3c&tZam2M;GN&3cD!aj*SxvByv)S1 z4s>|a^40TK@U;#K-0_y|olQAhd_&DM`CjrM1LOAvoE-P%8Gqs`m!2+nA<@lAZ*e;A za^vg#m@+>xpOiTh*+ux1y6)Ty+L}+b<3i%#fr}~Is#dvydg~lBVZ%2(l-=Z`%e@<3 zm@4~q)vSC`4{ckS5Mmu>|cHHWqXQx z{JXCb#)BR&pT1~YuU_O^;#)6YwjI>|?tXjhOB!E3Zm+yCyzM3y^&bMfX)Imd#<5>^ zbH5Jr$O$)Y<=5?OhefEuZ9&w}kt_%|Cf{(HkIpTi*C$gFL;^#tWh3f|=QH3n%|SfY-TAD=2$9&*$F%ujGo zZ`??+J#g{#+_uJ=FW=;iw|iW01Hbvg-5cQ9P$aMUGzaa6e2{P62J+VX%a>d2`SY#z z`B%@{r=NXE8@b)Kx7`e`cGm+z^S77a;Z<&6udi>m`*%0nM<2e|KKl6mb`SIY`*+)e zM-Ni=Uc&OM%!_AF!S^}z&^CYazJi%eS=+CC(-i_EztkPe-4`RysSDY-hA&yjB_H1C zpPzRFwA#`D^{T$Tsk588#eX=G^0BS&mAGY0Hzg6!o^UJor zu~Ka&d^cZiu=_pBwYzuetJc6r9%v_yjt;U>EpPm!!@<^WyKu7|+*`Yp&CWJ$<6?Vw z?*VQ4!_c(#YNtJa^@_aF9x-Of%QeVb(Q(#ZJm1MCIoZx<7><$8(Sa{t96-xX+s`2cWELKJSjkVuSeLAgeg?t* zATJppE6Qcsa`>;ZPqn{F8=$Pgzr37){QC-w&xZP>u(V8_XdV-1w)RvYc&4*4O-O0&;`DFL5YSt}Qtxux(1(;N4(6va!C}cDHxi z0y6gVUq5Sq^hZBvcch1Y_!2p}J-=8ll@N0*f_s3@_QQV9`f8fvMy+dO8HGTz%I>Dd zu7VW{4IbEM*b(%KF*j;f9Mc5XjGnStYbS% z{-C#LBb;eijw9aC6S12ET6CL9e^v!%^hx}xtEx&DQ`|?`g7EZ)x*no%bfmz|7{ZJm zqd#;M0o_#Kh!I+bQ&jOQXzjFOUQws^ZjPV|u~**S(}q-->L3e8DV+2pY;fYQDTo9~=|=s=liua8$?|k; zWgI<3Uou9rsrQ*D(pQXfw@mZ79LhEO0Fs1XS~zJ#cJl(VP)VRVa>9%@#kR3v_^A^& zf6m*0(2mJOVkuSpAWiU!#q89&um9?X6T?sgy1D0f$b0NKB+v2Eu0=e8euD?a2i-~u zhLa|s;~cn4)(cOW6J(;sepBrJ8KkE>cs1fr^#KO%dXM91IpsEF4lj9==E;yrG(K6c zjhlZVfI#+JVo4iB-^jeg@p?AKUpt<{>_A#KCU5yfGySM9isdsqBC1u%-%*-JamR18H->erDxE!?Y|={==#jc!?HqFj^C-P$E?^s2?Qq7TeLNgo z@Vn+V#I(J1Y)d|Df3r6xQvilC;hEonufC+_O!aD(dvu-do{_xZ1yXTYF5^?;XHM#BkA+~w^)K}cI>1&*h&y>6yq4Z7HcM^%io-FqFW4m<4u0Ou1G2v4 zC+&H44SsllHuESC>myUQkS%4c_5|U@C<~K|z4$gc!~lNtpsc6sGMs#hAJ$?vpq&6c zQ2FWJ{ax?xYyK6-d=~X1zH~5f6(sUyPq=jFh?EeSa0ihHktP@hjNXCGPQmRsiU&Oax)LO z*JTad{E7UPbcgq`>x8{CIc8K2e`&TH!K*Lxh{tpP+5hVA|LE%V>l^?`F*wOEA`#5K zx&kEu16fqB9J7r>H7rExiCOwCEM{>rVNqF7AiP4Gcm~yOTqmO4uJW%pZpDO9R(N_` z#m3(>bZ|?784XL?f@DG=q7sugC*Wx~5Ipq?P>qEUoZ=X%Nf%1op@#}qfs!H-t;OXQ znygU)3M+aw2v)$?J_m%ZTX2|4H2n!}1Ew_VQmDwZIbU!lBN?p1$Ax# zenYRFjl9eAZDDFDlTYqUE)~A=77w5(kOs}A#!s}+VBBtpRvV4(IRk~Kfcy+rhgZ6R zN277&6@r4|r50i$&4xF+G%cRA>bR@NyWKWRJM6>06#iW}~BN5_e zqpv)i6V?Sa8X@cr`ABCH-l!+V;1|kEzw#)4X&y>D`68T9GFeJv?6SZLSq=$T{Zg3| zKJiI49smjDsZfL9S0_J>Pp2kr#8tOm3jSKOBb_R@!XI8KfGF`*JtR!ZsKOY^!B2?2 z2FC4pps6;WOC>md(Z2zd_3Pn=?Plyj&q$N(Cw*jESAQldE~r`1t@M(W;({j_3(rj$ z;lByYZ{@jY02jD#a%+mO^a*!{^Nsw?Lb*tH66kT>5b}4*83)jRfM)5--+*D7x?F=<`me#wcwJW;%A|9crrU*~%ZJd0!>qqBYo0BS&|gMt zeve(av<08^_CSGw>>AP@R=BBGe}2>bTA2d9dBf1%(Narh_>~}&fH%^AC%hxvgx&!o z+>EaAjc_;Z1KtuJ$RNMN^*m4U)CQ4&91$j)-|wm?6L=RLHvw6m<$wBf?X$ELq{Zv` zHAsc%{mKLeo}1|pd^dC#EN?)f_VL671`pI{&4XvXZOeofJ^kYdr<*vVMn8x$ZV#eE ziO%51O&D%f22}^NCQ=9Ec4LQF4c>!IS^@%woEX#dPGH*C<2pQSM~I^u<79)0<@n$z z-+n*ZKW>k|eA<5Y7r$yh``K^X>gK(6cjGS2;AOk_-~khd#dgZXLyo`NdD(vPtKYV* z?S1eqv_)TjqAdL;zRw#kEzY$MKKQV$`<;JebaBa-puGKi&g7A?A%9M!{06b(MrkhE zz~b&0{%)|j(C_BMg`0yC-NEoG$IVl-;}TVsh2b?%0W)QqF|y7XTa%vSyFRfYW^UZB z`%QbFI`&!U)wTBE(W7?%?w#C}fB5jdc6aju>&M->r7D&`s#t%ye_8 zXm=C&s+}Fz#7%!UAq?cC(w7n3$S{tRKcD!|oQV9n3FnwA&#^n+f_8amc{u4_bOYX- zyrko^gW}uW+s(@WZnF98bH3wza+cfr^4Av)+_+gL&|O3#6l@Ie|UBIBzR!P9p2ZuQz7zGm-uHqmPi4`)%XCPY13*FXebb-gdW>zG*pb zJKNoFo~CdU>Xj}ccHD5AlySz&j;p&Irwo+Lz8UR2$+G58 z{y9Gx_#CS$7EZ9|QV$8+H=h(Srz(bX%&ScKyt#DyboR-ShcV9E8Rd0)>`mI^cCfeC zcAvj&zx&mv?N>khW&82J_-XsIKmD`z`R_h&zy9^F+mkQ9YA;{B$bkqKQO-{<+Oh{> zknf|Ted@$z%GQm%VQ%c>e4~J0P4Qs(fa{m@>#Kb7nbm?u{-p%)%@m{87H#U?)O%H=GNJ5QlRXjNp-F znoW8fEeFF@eXNBJH=8ZHTDYoPBIe1YtzTAoZ!CFRIrM=$pK)*=X}Zl9mZO5lR2Y->zOZa(~ z1==q^72#&OW#mh3)~w{I^HaXo=78L>F3IbfzvqyEvO}L}{w3SZXXV9%6Si9(%&FHI z>hlRmQh2UBctFMj0j|4v@XG^Vm*;hB$$qvD;nX^T;Ks6f_TbRP)iG@rd815h_w6H& z_Ybq-o|hpmkXz!dudKJtJAUG18JR6QSJr&il6(>0a+cN{9`k^Qb%OG-oZT3I@$^M# zlt=rA-U7bWHfiHlm)Fu}`B|2&t?fMPndj(nq-Rr@ecfLPG7`1Iy$u}O8e{r-5v7i z4SIR)Cc82(eit3ho3AJ2P#5@0N4}T+GzPq-0daGmh64Wf7ZY~r=Y5R?W&R#Ndzu#t z_O^G@=6J}$dTRb$ls0YUA{RU40p&aDn)TrHjP{T+a*^1>9M%zEn6VyNf0ZN4);eUJ zk^X$@=Qi~jdXA}susi*54lXG}l$D>mFr6GK0*Cdw4x`9V+o;!B4Bb80rN4IZ(wBxV z+W+(a{a?3#?;rlVZP`AZy5OM$KRaOf!ed)yBSG70o92r{;?yrGBKy%xO_tLn0I&s( zpK-J^|C9b%rTh4u1&5fkt~tCVEwk`foWEI5lAi&_*FKa^5N!QV%L;+gh{+#dlEH$% zV*0c3NB#@0~Tts92!TvVbD8{q$hfQf@PFp9M!R-ayxPm7PzT{bHenIpJ^Hs6#{hA zZ{%4G-E`Cub`Li=s`4ORNq9l(&I}}l#Eo?ZM%p)FY-C-$Yh!l%H*0 ziKj?PReRNAd{g;#Z2}cFNJTR}5UeNA{R-4o`lH9W{PM|>kZS&PlXp|vueGUxQQqA> z>0^FFUT@k5R2oxx8yus@>1eW^oiu75QqV{6_|@jw2&`82902Hj;$$5hzd8V57_5d9 zuYWi>7B;P0uYGZf@f$)h#)lc}(SO#ZJS8Ut@aG(sXc;t`e<&r*v=_k82S25ylWM^3 zE}g~8n!8JeWnds<)mVF~NENzj53g`nP%?qH!NUj4cO2s}&SpI7Yla@Ml+MT*dB|Q_gas(@R{7`GBf;%OgpJ^}M9UcHBV>BY+`r}V#~h@Y~e>hZ!)T;NAOm&W)ob3MkT z-u(A4g!9Fd!{dB+zh2C!7wIyVX0G>Qd$0M42VZ$Ky-WCNr4dIyt46x=SGn*~&7(&T z+V{Tm?e^_YK5qB#-p#?=HRd$d#XR{hFOfy-sPij7Q+alBnDvO=-Tn6B;eU*KavhouG9*|#VKDk1fS$_8^zx8$J9&5~x>Ok!Z<9j#%oj({~SyUE=JP&im zsj`mj~sK_}KAo6VFV|MW(cA~@&!g0f?-2Bt*WCs7Z=Q?aasbjEhpRz-!XDzF%0~q==T;jw@zWWCq$W(#pp-4Lm#KFZ+M-ec-Ftjj@_~_MnBcEy1>-iif z-n0Oq*3a@*rU{=>kqxe=$24Mg!=Lc4nItQ7KCv4fB%TREmwZWisN)`gWkww42|r0n z7*#?>ZqR0+{ED|O+M(A!^vhfRr6+;>wfpc7Z1^SL&{TOQEO?SP>tMoy6&^~T;Z3GG zYWXC(fw>x%al2SLBsC_>B7q5)vaUE7>Q`PE&CuC|9g6TNKk#8ev8)uK8a8K3s|x_2g2QxK}5GvhKu<_>upcs$zU2*gl{sQUxBw zD1Ay)@g?*GEJqx4>-ntAtBj{{O80O09l?&TrBR4%>ogJ(kWXMDqHuFd9Ya;7cd5cE^tCj-H&yJnpeDE)6D5dNn8l@icHPq#`( zXjg`?W)L^RR($LJz?pddjQ9q*T~EQKjYxZ|hXFK9!)A;Gm=`ClPV_7~ZwTh*B+clP zCokHc{qvu+-~8r_wtsNKBx1F#u5Gk$ee`j=bMJ2RwxJugUq1e#J%0AA9qoJbk%^#B z*E2aCVd)Yj2GyI52lixc#bkpL(Tem(@?ZlLHFQJPvaMMOUs5?G( zv(NO#W}+KA86T%C%ukhTEdV)gD0|91d8QwnGq2)zTwaW*wg4RRLH4A+JN|Mj2RsX} z>q+W%-c}&#dmq2o{^0!w?cez0@3p`6*Z;76|GVF6AAIzF`{8%K3!by~>c!J`$VAZm z_ynA{!1wnKo8O~eVB+N`$~<7;Eq^z4+>E%1M06`^duw zm(A}zt}-dy+_>LXDLME%ZGH1@yZ7jRUKemY=r`35_xGCL`aam(W%7KGe7f-9WY?0l zd~P#2bpzjCLc496W$L&%^Fsom*9i&l=``Oe{p)% z_IGxm<5o7hd~V*cqjQFJH#ecnC(QFSI`zf;IZu%0%rBXPxQJ1=TEa z9t=@td=A`2P4i-S>GwvBZQuDhGH4!^t&}-o!Utkf)^1wn<_3SZLH23h%Jz`W+}tuW z$W3yDEY(fvtM>HqSMBlR7w9kBs~y^cqw{R?I<;CLeQ$Z(y?ZAcp_k{k@=AftlQ^ya z`^U#Q7g)e!ev+OK!D0;B@}EkE~cG9@z0fQq~z@ zoBZ^~PHpHyH)ZJ4^u-eh==COj19_>wLVl2Y#fWxP{^*x7B&;5$QJ=o>Nz9UO$0mLY z9)$Q?uddpn3tz-{aVv+OD5I1M#OAO(^;ujls_kxXwY}ZF@WXc8b%hP*vcnt!4B zl8W=><5TJeMP|FCtmc!1<-lg!SE9;~q@#*7@miKafFt>@)Kome^c>&{j7$+Q*-Kg1*_-?{1LZ zMQ!XK9e`(te%Vh%%;nk5wUtfk?P=RP^1#(W+uqwF9CT7fR~M8Cb;vfi5!SA*fNympF98m<>u0xOXR_q2KEkU zmkw#)PH10Fk8XjbO^eg9m=5A{+Y9;ii2SNxN*_rfh5Mzod;P|BOLwuf5e~9Z7ouA3Sj5?R)txMMK$5q@939 z>wMNYP}wjdZ)79(P=apAS(UaFRoJFL(t)mZyFbQV1nyDYiM8*LYerS@X2BA-xQErZhEZesHiNfCmF8xOO*(}wEh#@7R$`HMe zUq{I(Z`SiMk27{;Fkrky?4Wz6Ip$fvieE||n=jo5biWCodO2j>Mz3puN%p$k*m3K0 zV$c~d^3Zh~N|@0n{Oa?s;LzChYxJYu$oJ4q=WS|)`3A_7(gU~UV7=GqFWkpnR#eh6 z>Ba%Qm*14`QT{#5P=OIfKhlZ2?S<|taD|_Ob^~<3l#}j&WrbS~ncS)keh;!{3}d>q zs~JZ-h9jtlfPLl0HaqhL<%>ABn?V=a35zxR3uHQB;tNd5s=F9Q%|Gg2h3xc#D_J*x z%2EI~`jG@noC;HMM)`|#M)joL1+`}br2Y>ueR(2YdBnIwI;1K7x)<$nGoPcq@jv-0 znv32U6%6WvZv4l|I9i5Z&-0+8E3#b9#_92^u1P0KvtdNlQCd7b8cx6qSKdb9BVlzEM)1QG!tiDKgR^PIj-_>u6}h0^i~ch+Q&Lqbu91L!@l%2a+iH9 z`*R4JgGZLWFaPP}xH(=b4?d>vx9<$OQ`+TODivijJarFWh^#5&PvgW^Zr05(vh2xP z;+3BfqHFKra>F}r!>%r#!ky$}sQF4jx(L9pnJS#3$3J=FNB`12(5{;Zix1Yb(OxDw zFwB-);bDFOFa6}6ft1itHsYxuE@|L5%5UH?4H>|!gk&unLRf}#rk&Wvz+Jkqno-KD z$kEL{OdQ%hkbt-IryRaDh;wPi*cZ;ZF5ANg58AzZ_Zfe$W?bf)kDo2@Vn@v#C8y>Q z(wO>VGVzdUq+?ngl8rd@e5D>zprN^& zx!y7JI_ji#0i4cv{B%GH-SH;;F}K$Fd-v|O?|=6P?R($;PJ8&lL*@pnIneBwePKzS z5>9%P7v_mazMghmYY)sVUhN#TXD_$fmoIkO?(rq#>)VZ@Y3u7N%w-nCQ4avDGvBJQn&4M|3q2Y6pZo#KzT%^G{C&%Eir(cq$?j8V$+0uaj$uuaE zCBrGELQgLwNpH7Oaf-?TX4D|yf`?UZ_;wm6`UD@0Ox^|tVkC@6M6`n?7EH=wF62pzxaSzlAvwbieDD&$Z?`cryJ3&R0@bQ;b@y#~T3t;&(m zi{u7;1Kk1B21~C8!5aPvo!-D~0_Lgog04@3^!TX+vp|5i#T)vt-i)t%{Cd7e{)cY8 zQX{z5$-#u*R}pc#0jPuR$;kWLREG(Dqy6b{jei}&ux=j<4ns-r4DL7B_DvwZ$kf0` z`az=7ZP*L{yY&ulqd)L_#!rRTZRi~)8Pmk36Q2z>+afe6Ic6fJunbhg^&#C-q0@;S6G8W^JU4VB#HKRYG7&+L05+Q}Aq?R+Vo`LwnhcWw@y z1+R2x^BW~pK59%f@}{1RD*X8GalB>h6Lrd$xC{T}#J=NEbVY`rth#S)1VW<$|;J;>D}* zXdc|InZWtxH@Y1Uxe;So$OE5B_F3wT`;aBW_!5AVOvi=Z6wgK>&H z_Wu5ECa2~loARavuU7l_xSfBvfoy(jbK8Z7%N)XRgLD42uN|zmhfGHOgn`A88=(D( z^D*HytYcZrVTH-#0?KdKMQY^j7dX^enceMZZt)H@MH+{vkYb%+I_x zhZg{!1i!-seU(YM@twH4>1KIZ&y~~m+PUNlfu68Y-Q6%E+5V zE)pV>@ZF7X#l?+AZzM>YM7nW*Ng11l8;|PV*7c>5i}TmW$~k=~IU@ z(}0%g!I`?v;(}(~yzr_1tF%StXR#j+uon z)EQw;PA}6=?7#Av|E;#=O%Uq*1?8ixiNj|Y@87x89z3`oT>1P7{mB`Awmp7-^X1NV z+u7Z32PeniIA~8jltX(X=M8&$LOQfTE;bz=91!cOy&%o!FJ5Lt+4^AFrj3Bd9zb`q zw|GNY>xaoF;9c0IhNVozW7p#*>>O>h`JC1}7YOkrKy)B&D12WcPx3XF9t=lbY}d{% z-I$oqf|l)_;9-i3W4{@HoVIKc{`p+~34HMN6hBwuroC;JhmBkiyF;2vnmtnE9#80^;H?`j0b{LHl4N`OQuUa^PIj=Gdx_PtzB91W5l{1)xT#(EYqsln}>w3XyV?9ZQk z)&9=E`ERDo_9=ViJNS{SJe6!42OY03Epv!;;hcP=5&*WL@?pSq9g1Hom@*Nc>o9dI zJOe836k=AUCgaH--okq;jM#7LB@e@TJ^qlDDZC5M|Bvav5q|hjVM2pC_N)i0`#Xt0 z;>i?Y*gH(DgRg~wf{ShH-|_WwoWck&)x#Eog*1ata0oZ!1Ne{U|Ld^rjFe2`UQgdJ zt|4WUCW{=H$Ts6B6&9ewl?~cvDZbk%39Rhb|cQW<2>k8-9ej8Ey)r94F-o zkgN15)9~*ozX?5EpLpMt{~O^ZFe2R4-vu}0UxzwmA+4R>o^Ixl$=k43I)sis(w&t1 z&?j_{@-J-)v*8UR#wnJ<=pTO9;!3A_(J9HH$#w%ROnc`Q)Q`I~;2avj4WDQhWu#Jp{@=2zJGTU#8)#Qwk`E7Vc8C!0V zapGe(U!w33ub;fKjBhjlGmM`*lLp5Lj_(~0J8$x}r$xpIf_UBRHzoUc!Pmh|d%Ude z+|3Z~=Y?M6Q@S*dy$5+1dr(NjEH4XxasyrEjtwXIRQ;vlhhN7D5%KSO%!1jEdEk-1 zG2-WZ(R9j9{Z#9L=uE`p6xg! zG~(~8c7yI()9$1Pe~rCspRRDqx2!**!|sGB8B-3RCt*?2&Yu#X+1Mp-(u)Q-2J;mq zLE7`$88Tqo;@sQ=0KVwBdGAhMrgARlnAO*>#@dT?46|aVl9rJN^*UTF#1nGNnTt=|;SSQMvhFw8*a7zxAL?$+mp)GgQ8E%J5$YqOLbIZ&nmPw-OgYlJ(nyXMwDD&%h+wcDW(I5O_ zd-UFeyj0~_eZ?0Qo#*uRsl2SvWzH8G&rY2qx$bk&4jK1<{`h%&{OXV(uNeo=Glrkb znAiidi?jvH%&RvyHk+Tzx_jqATSL!lVvOUZ)5F59b2%PG zS!>supP`b$8({cZcX6QV004!QPXJWGseryAYgyG`UBQ)k{#vn6NPPsVGgYS7f;9Hq zE<*H9&&CLb=A?1FIB^Nu5UYT?9F^7@*G6dV;H zaiB|{^r-0i@oxi#-Qhz>G8_$GfwB7HQMD|XIuOZ$g(sI>bW4YkMHFv0)v5FF7~#w7 z1SuXPg|L)EkV!`l0MvqX@vs{M^0D$%5h@>PFyo4fCVw<^9$IFBPBC#bCawl=C3IQ} zaJQ4P3=&3c>imr`mX#gDz=1Q=@B@$A8Q%C=W|Y5vUcnm+M#ju{(q?8ZW~ zcwlGYsIYmlK&v^{EKLFN8Lv8gJmO|iD|e|Q;tiVWe7s2#8gLA!jepw%B8I^k*$EE+ zQqPKxjCGTU@hhxoB(uuSbahCeUe?C_CMX&O+kk?hn|FDIj|7GkGgp53#a=XODQ$|+ za_htf$VaEI-1QMZ<(9#*xIjJ$V?5QczX>^!9xQ}Qyq;FfrRsGALIn*!1d;m%rfA7v z>QM*%Q#!O)ys6u)j{Hs17ygRz`wjj`jQ%6sP;vMgu)ZgbH1OXCh@}9_EFM~hd-sbz z%DcnO_9OI{+eEHr=(10-z6=%T=tG&09{g>}_AGIu0ki4rFyu&3AN1;H*x!f#nfOKv z{lY9hz@Wn~a-&^c`5_#3^KV&IG~Ijn#6y{h@VvWqd_kpQ_zf4^)j)iK+456q@5Wj8 z{PTn0R(sXqX8t?*0l7ev<%dlQh+<{OteA#Waj>z@kyOK6&r{t8!ak`|>Igo!84cvcw{n9j(5w-3Fil`(9bjYHUs zuW+zklGlz2^n*WRL&wR8n?H^(oFMpJ>fin9i}ur>{$+di^kutp;xO-K)!nwby50^? z&T<3w<;(5F%_hmoMK+PHE^9NuTLYeG_7t)crLFDfZFg@6N*!anv46qDu{N@O>Ri6o z33w+wen)ODVH_)8T+~=e7k%l+wKadUE z`Q<$O?02M{*sd`7@+Rq5PoI$I{r2To+wIH8+wIAd=k3?O{Jj1A=f7$H>`#Bu{^gJV zvK^i5f@LvH^!D=*CEE%d3o!Z`J1I+S*E>8(yCu5U)1t-6WNt&O7FAx!Hqa z3~t`t4D=RcZn2-9WpeALjO8ev<<+IOafgXw-*omyiJSk9*?hghC+E&xG$J2st7~m} zWdXjq!L-(VRn2kyEyi+gGWiLGEBf_3f9JewVXhq?oMcmHVR^kR-nrlA?>)*y7oyv1 z;`j!lq$ zj%9B_g9iZ&2QxCU&xW%bX9RL1%kS$cKY~1RqxEEmiT-E5X@CBY|Fr${KmF78=YRf} z?Js`%v-ay>KW;BxZownt)U)Wz^NVe5b3L??bn4IR3perHghtMx`-n30fQI9JZy5WT z0f}5$T0=INz%MW7wSd+24e-AL$4Sc4+x<(6ZiK!@b{W0F^TpfLE9PS^7Pz2m9kbmZ z^G@@Xr+s_9s7Zv2RM?S|%vEr)xSW$%Z?dVYEP<~U67lQxrw;op;^aXd=Oo}WGGLpe zj2T*4#unN4b%5IVW1Nd!zAL9jbpzEr$vex!4PoPE98Ibhv34 z`dttMPZYRblb5RtU)Z>!Exc+w+uLn#`&E1X;uZD&7-90e^L}p~IVGQd<9~I1g;a0l zu#5|Am#>%FDfP=u{a4$&ZFm2qogm}K)Jvb5hb?_>y_gO3?fw1s^!Xn6Pud=B%ai9@ zq`TW*Y`<(zA3trcUTn9;xy5$x?s^uLY|}i5<2*gHRS#nMf&ulwJk7h=-y1~R+mun~ zXX*p8n!d@6)5wlvDa)L>L*(7X;j-5=|EIy_Cp|n|W;^C>cn=M&t}WAU(8f~Vef`4E zl*pSa=06vgCvD9`MbsNNkMG^xY@6VpJ z?VXpTca#HX2m43JO=|~72iX)i?t70u1n+~kvPpYS8~xz@58A``-y@^mXj;j7#UAt? z9PGg(>iymUbSY<7U0$dY)IV=Q`3?U0dB2BxyM6K57xV`|Za+aid+{WPS5{W*6AvGM z@P1xc@lcVp?C$KK1bO;w-Hbp6t%tsha*Et5dp?;e4JXH^k-?P}^HJ^p2OoUYo__hHJ%9eHJ)&Gc z|IO#^zyG)X^|t=rhwXHK7uxE@0$<>;Pq7@w8jWeyZQ5$X0u({76)3_g`#SWxQfB_i zf4y+&SWAb2qX&>t?*ednFmO6%1f=5{RQiRhYm@M{?Y3fN4!k3N)p5e~_!E0tQsL|4 zSH!N(q;Gg}iEdWuXS-iL;0%;!^bxM0yHCQ;+J_n4Na@gZoADb#LN72&qoDsyx6nVz zEdeTApxY;5I}B$b-sBK^5-RT3(|7C{2&uYteC9uWrv2eeI&aWT{I}9goTNASRsDSC z1M0V}$GUaZLzQPY2hS+qp}MD9jFZ4`fGJxP1(qkgxKgHLi=H(tP1kVE>QnjyZ{&5* zC!pV?zt8`sYJ|OMzYd})|DOIN-72pnlR(a2-b}ZQ#;N$qdOd1%jlTt83a!PjwVOB}bo z0Zfw4>9Y2T-Nch#!eFqy>!jHJ6ralPcKQwQX1yF4ESo>mtj130$i4CPsE`Bk)J;J5 z-*og=o@df5m=NSonUhZOZvy&vZ`boOf52_j(-HH!JP>Z+?;$FE-Fl`b;fuB;6J7V= zCrnZRkS+M1bz$0qaoNmrR_cPJgKbo9hxc)Ijq5ExL*q8eByA)8b&qP_?;P3jL3m0Z z?HEsU`gFt4r_M1xbUeC1M8{f=8=X%$R`4Yv$3}+9rzPmm<(O*-xj>k*a{Pg-#g4P` zR`g+)MCS`%fs>tjJMf>ur$C;0L@ge+R9B%o`{!7Z- zzw?We%#}>X@A*G`aIgL0AO5xW{qO!k4mf)#cb$2yW7@pxK|ncU9lgaE*SVR8q`jDN zytk9X-mmr!+h<=rZ!ZqcTs~3}L+5T~$hSc9g1$XE@ujcWx{7>5@H*5gf-F8+hpM#KiH zHnDXpK{n@BYZr`UmtNQv`-i+S-%8f<>I$ zIlU|j@?k?A9?*cHbFQIibrSwL8{Sm6a50^`SvAA_pW2$v=NF+KF86+y-vMvQd9<1J+&{^Z^AM!$XKHx|#r z4`o4`U3G9VFq{8Qrj>;(c+<$((9kN>x0%i|A${!v8g>`TjGM+O3wWdjj%v^>HB!g{ z4Hdu8Yq?#KA4#uDX!xO5n9vWn*j9Ykw}bpMe)x#1_{<-A@??35D}j|vsh}RonvN^{ zq`dR&XXtT3*;wSkyj!>+@RX^Tkc=cd88{-)mBR`rZfQ?=6NV4jd?dv{I*gJ#VKn%n zDky`LelwVUm$OOup0D`vpUjqdq5=}@zx?M{TCLa8Yxrt8r*Y(E!VyNE6E7#ijUWD3 zuo`I0!cF@O1ikcO9xO_Shhc2s4jB39^iJE%au2!r)2h6wik@qJLx<)fyUM|GDyZ_9 z+J~ONZ|Z#mxU&G57&_dv-+)oyC-{mVgQrvXLC1h$mvJ*N!8fHprtnH^BfR-HEc`Hl z{!FOM|A-kH-bfcnJ;r_Te+IIFRR(^B650Wd!p%Q8RwwPnCZ;2%VmU88iSl`Pl8#^5}3g|1;r++!qWY zZpOcn4p41s2kUs^4In=M>A!UD?a?HCb&Vt4A(x~6b^C;@8+1?XlQgG#@-xX7F41j& zFjHyE&9Kuj6;JnRzDM6%(A(V!B+^?zcLGl0j&K8SxA**2TXz$i@nv4ODuh`$m?7W| z{MYqKKT@{jt^dSjmVd>AQnBNg_>vJb&eCa=q&vdn7g?XhJItN8$u@Svjy^apX0NAL zFipQy=hw*R&=Vguv5)w+x0u6Mb^J44v7It)ox2}y((QQENo{SupW#iKg!74bzhCy{ zZ@+9m{qvu(FM`M)GAg(T`3Kb7S5O46E|m>QcLZ zZzCJz_wKlfwc768TW+6x>tSwdis$Uyr{SC!9wbeNrtiQ1VSDfW_s}0P`P#_Mco#c% zxAxkzXD{0B{%#gjJh&S^pd810OI~-!@}d9Cr^*l8@rfJgwHZ^lG+er|Ty>jo-o!S{ z*-1Sk??$nk{^qs#Ur(hQP2T$5ad~l0S-PR*W}G+OWuqHUODhXZ7`>st8c}k1xd;w# z;JSgO0C_va*B+D;H?YJzzwnv~s5eX9%yQAF#&-+LmJ?-2dHE{B1vswG96M7M{N?ax zsseJFF=Ql#w3Q9ZsU`o)ZR7&w0Rku0=>v!ZhZ9zR=Ssn)tTJi7audnhDiDJNB=01R zrH@}oAkXg;_Tu6-GQ8W)ne1;rd(uAr#V^{Q|C4{(e*C9@-hTR*ziQiC_#u0ebJ@;w zi`9*>T_)sv`-eFgyp7>4ll56Lzj$g@2Ti;~zPaW`PvEdJo4pV0y zK72psEz;WC-)k>lQU8eNA(}gP*4x>cGUrXNxpwjTJU4T1WfL8K_xVY(h3=Sh@XGPG za_^>QEgaW`Ka2&kSqJ}=nd%4d!(e?KbxIDV&JoUXv2DpB81`%y10F6Ahj>c{DJS&Q z59cD#WV$ZEc@ucvCl875t?Ak%tOFCi$RQoNS^n8HMb`WjgP+!O{;R*~x{2t`Wy2{- zx$W#`XEy#VY;QLEi4Ek7^u1+1=fQdV0NYg%dB7xXkbll;$yeotI)O}kGwK}r^Mp8l zb*R8kW1O|UtsP`zJ2G(QZFItT0Kj_aLd)`M-RxFAJiM?=+xq$!8}oCu5VuCVdbx#ceSIRQ7MZIisspYP=3wo853p^w?w z*~`J0SNpFBca~f9_clE+aF6=9)*d~2zpZa{_@NA^2Jud$ZI#n?E^f>m5p`is+UHPQ{?E2$B)~yr!V033-b7ywC~W~ zKSX`d?%rF6o;yindv_oCa3lLPi>&j=Ldo%pf=j#L0=$QNXurVY=Wpb_FBxc-5Aso7 zd*H@?#~VGy&$<$H<&cbV&S|r@x9PKY)28p!c6jLE#fz8W;}Uhk^4Qy}*CN)|Hj5_8 zH*JOd2VoBRP<~o$7p2F#Ya1?I^0?ZG>fe8XDkk+GxTR0W&tD&Zn%;HYa=i|=*A-`^ z^ERlhI%6O6oJqW{rN0s!m|cSr;JTi1{VYuI47iEsI%MoY z9LX$x5D{y^(zG1GlW3h^@w}mr_yb1x0XOZlfM3r; zE;q&QdZ2#t&$vSu^q5T_KY2vqPu9O3|0GDq7uCZgtWl7fIBT!=!@k$N6rL*E5k8Ql zYAjTI0|)vz*yQ-gHCOjV98Ybo-vQfg=51fJfA-)0zuNqz2fprP6U(;Te%}5q`w6u5 z3yUj-uhsa`Ha_zb`l+PWe=6mox9YHCLB|D*XUzl3{^k7jX~yVzvCIfy7H|3j`WO!$ zW>SHWIPa-Ggn0Ckj*(S{w@*siz=R**^)F_SR~t6dZpS>eXRH$XWpO4dr}_SK33HapR1k`=eEyqPuvm zJ>HXW&Nnj-?H>M0H`u}Ne1>`}@09OOgCh>3XOAU~TDV-l&}-YS+5EWYV_ao9@=I{X zGp3g$mA6smiC<%o@PxqTJ##I|?+`jVAHoFwjcyp5dDDZ!)#r%{yDaH&h_2z|FHdQfBWBPKlt7c+aG-Qdu@Ypw42lqA3n$B(sV3U)7_DAx|qOp3JQr<;G$gnv!8$aqd)jw`{N(} zFqdpBNM9`U0JN;iVRPzr9)2@TV z!_LbKx7y0md^Y^=-npNH8|xc)C|}EXxvdgzm9lh=jsz($qyu(jzgSsI9o3v)v`Go! zr5O+X2(Ozs5~jzGdsmTpG|e8AF^1?#@A2G!`oI2nencsKJXx?-*Ha-eLN@5>5WH~! z^KQL%0_rBDH(?9|)oD~rjQDhPogizu~8a$QEn@|5R=S9sg9?D?3my*(hA1J3))x0fG7I6|{dl%WpW!%+T&n zZio|R4m#XinLCRvKkA#N^W?(@(hBE-UJ2SfaT8taVYmk#Di87?l>>SaZ|pX_3>UJv zCf}L38`Cg0Xy8ZzcU7B~Hge{d22`^hH+0;dyR;pE7G;2va^gs1k_94YyiI3;5BZ_| zd~OE*_}vnpj!sx@Ez)&X=KR51Z00`oJ@~rZb(%4B)#-^@ zI=XvWS(ODW&vld6g(Gq>!KIpss|vj{ct`q^dZZe0g&se=GLG>v$37E3e8*`*xAoSx z*YNOKp2O2gSm`%CA^EtOzHaK@kbYp8Ri-^_Hx6n-r@Co$J?#VGlBPjj6vywjt#7(;Nzt)YRMqYzMHCv96nSrlz@Y?po`j+y8CUun%dJQX}i=X~ld}$g? zLVlY}+KbNn+4Lv*LQh#4Sb9oTn_5aIO$pPLeT4X`{WPw55SMLlHyby&RXuIxo$%Gu zYAzhr4jRs~MahqZt-h~bD6-9~IQ*atE+0*+(?9a1+pOI*dO0Pm=?~f0t*4#-6`9J3 z^0eWUU&_>YD#5f&*K|rx|BzYa2-4}Y&e$$~Xq`Y3T7LO8cxbuY!S9o&Qmol^=uk{3Mh=DHjZ;9WsiR;+anTdb=WS=#XO>TyNcx1Fr^%IX#-5CI+yM7xu;W+= zZ7Zt_*|_mGs`<=~+Z>d%?wDV4N7%aY?ij{=^3V9uNv9iMj)m=`^|)k$LE%LUY7t z#Lsx=q|Gz_^_HuL7mvx8Q-ib96T*@Qix+H{@Zxa)uswPFvORwCEH`1@1XWgCgjNL&O4J(5Cv7 zvM(OU4{v^75-;OVWY3$|KAD|Y0VqSu%KUmWc4cY3%`HJabmqXpBIb-2%@pD0;Tz-O zm9?d|wy~PYu#1B7-Zc~HcOJ01c89q2?e0dtZo7F8YrMr7`B&<%tdABnyaItwzME8d z0Wx2l$Q*;Ygs&Vpp7a~i-nva2LOD4vx_uk@I|qkd$N3y)=TnSJ31e7a7|4Pt4D;Fi zn)ou&W|Dolvz2H5|HYsFY5U2a{j~k!XTNT*UhcLRFSpz23A`sS3kyD%?fmjqHd>EQ zn5aYR&hB|TpiGVrt=pc+_l1ddg3S>I_tT5!O&-5N@6Fb=^*eb+-1(F@+)s}7k*meF zdEaNsm#KRP?N^_D(GHKn=LYMdPj)X+r=T5vP~_zMG8c0S?(Wy@1@ZRXp)xPWKvvk$>aUgfW}>N+1j69?I`{jAMV!G1nKd=>;d$Y_hq#N#x^0*9hw_3pnuas-X*%qQBlR*jrOB}6e0+RJ zJ}$D!?;&mv;^$Kh#|LQ&U1%yf02g&tS#(k7_{h8}l+xzhKps$@$dd~#exAVv7~7WQ z6WO(`@j#1ZX*wR5k>`u#Z*}DkabCCm-M#kotH*5@+1WeTYe$Fs?UXk6@YHX`d(dMe zo8!vg5wf_uch+8R?dR725ozkK{4Kx?Z@GIj+|NnKr@aHhzBr-o)Ao`Nr_?Ld$K|ED z_WpbC=hcow>aOj9_1A+y&z`?*Po6w&`#bPMIa#W*CpT@ug(~`hbMkw1>}~fG>LQ$g zZ?Bopk8i|>OKF3VfVA7yhvh&3ZJK;Ey|c>`$^swTF=W2@w@jE-+C=4Xj&PUo{+xX8 z1-JEWg|=dzd|2mgSNy(ZHmrAd;muBa^?bWM`RaLl_T+hc`D(XqzdC5!@a)`;0@C$h z#2xye_docgef;6~via`|8h%=2b^UIe%i$!Lh5YOtcql_YTfUAjTHD*-0{_$Yj5Ky$ zJjWkCD37)aj6D~T;ic8Nw!Ct?tx~V-LzRmsPae15{QA?jr@WmXraZhIoKNJy-Ayc$|VG9LcMU}!3XLhV}UFLD;6%SA{X|li?loH z_F=wKaCYUvBIV)?xjaPvkJ0zr9(;cJ;zj%Fizn^rv*+#UldT*gdi822hl(8ASwH4! zKNsj1JjCaLf;i*d(UTlMKvaCqq8cy}wG(fA+#y?>h$eck3)2us~PqweM)9C2OOy11kroTp7v zyloBOE5#AIvgxATz9$Wv>IP&STK%BPO)+1x%X~gc#3Qa|co2Ok=MBasbG;9&@lTnj z(D97%-vm#!+sa8`jN2V^jz(C$38|nI^NJN%|mrM?Nky*`_NIr zp_#D8jT`k${892BZGvwmE~@&3QpxuueQ_0ymVM<%RrFR^9c+hfN3la+Zb};u6t<^> ze>HdLW!UEr#dB!{h?jXII755ru*^E$(J>8r!B`Ha4?QVYaT`t=YkZqBOZoM*Ct=kI zXIqtgnI>_ex5~fLlaADX*I}H?=YoI-@+@oP3|yvTyk546uNt)KhnmRGaPMK0_nszh zuK7f+tV^b4Q1qHRl3tJIAhiYD^V$PH(izW?k9gbpItMRZn^@4f>(S4Cv7+=Y%ecpH>GjzmY>mb z6KI_*&9%FmoAg7M?f>y#{+Dh0*)!}*?ZXf5w^iD}3+8$r;GSn(v%nbN_WgpfjB_&E zMqdh8ra!%)zoyY8eFzHuwCl_pyo6}3JfNNNK7E`Aj~&~}gE`yxtoawe9ZT9a<`ALl z#g1XooKxASILGwUZ;rLxTy`VQJdgIrzkJ(1GF*I-mRduBK6vJev|%Up(l#LBiiT>_ z^*26zjC@v{s;9PF^4Iyhb--}5^)JXO&XB*vS7D#HcY9BF+7^L(q@DEqGrdywd$mDo z-88S(Z)p~n@nZuGyi-2$H-zgI?&(x}3E%WbHY3NqZ8BL8UU=AgZf=?I=FvI1v^d5i zRov$FV=U)*#d2`0X!)AbGINi`701KwJ^A@6`~FLCK}+Tl)h@ZI9%`NQn|kU5WLi&T zhpd92qF0_lpB=pQ0Gg~D%9nK6F5(iFI@?wbm9J`3tvl|IIaVPLPAJ1`_m7yrqh9wl zNjaxaULj-5X&R3C^OBe}n+`-f26vbyk**Wefo0?9qz8e(=jF@H6CkbTr_jgzros{~ zVF*sD;YHd2qnmk0CFD8vm^LE;aAvOV_~r`#+wjKyhc<6tXhe7=t1yS3B>Xz^E3Zvx z@RE5Ve2^c~RuRx4$-0v8h}Q+Yb}h%auGuJlYj;-KKl-2lFWPs$_kGHm0KY&$zp=b? zIx@DpzRCE|L&h~Gn`2z>9^S3_E98SP#5Dt%Q<6^BO&plQ07)P1W$KgxOiVrfy#Iz0 z3};(QyW#8B30LJ8_wde4*&dlG;+dyjk8>!Ad2&HAFXK^;P0(E1ln%B_%NS?=Exd-USyha zjE7rK&yF&GkkpzluC(ue_dD&6|M0K3Prmgn+D>`K{BVuBhFs>QN9R$D&87S5!nHN$ z1!wux*YVy<=6VMi$A9+av-W(K`Tf#jZv5X_-)L)&$(a+cE-dBswDq-(cK7c6wt4S< z^Am&~JhuH^@pFth0Kl9i+^0=Y)-gHyS7z$dF#L_8(Y;)7>w3(VtAUUtgN!=Tm0?T3 zE0kmDv}?C+djQ~n_#-;UkDUzJNt%X&8$q&@K+v`GLsTM*!jhB#RMG+z;>d-K3doB< ztoTRiAKfhKkzdRTgjR8?@Fy*w+4q^YJny7?8XUv>i`@*kn32IL-W0G;Wtxa&xGhqHgTim7H?Djn1k%kR{Eq5!zj34BOKl*id(b_l{wxY*8sGG;6 zBRyChV5r+b5L;7$_|=UR1Dq3a3P^7_N7@l!NRkF`Lzu3&myn+Dan9Ls8n^_K5bWYe ziSz_exK%R#4%I>a8$>q;Nk&OonO|@LwP~uMwCUhjJpRgK^KNHn09+H+pAkvrPj5)j zQ$~qz<68}lgSXq+;IPo~Pugc_*qsd1xrFP)_0I*ydUDPUeuL{LZU|gr18kc`tT!-$aUArdCatN#3;O$dCbydXMtHwxk)|LJ>}JD5wG>f6h6^=c12_EN2w(WK z84TnZ{A*C|QyBF0aFg)wuutL-eZ=qfN%))LhCc9?rp&qFr?u+bH_-D2zMJWfau_g4 z=sHZo4ZH)Y{l1p2D6H#MdEz=v_lEvP`ZoZ)qMlGSLtf-=A|KZw!6LsZ%xOouTZcN; zIoxOHe=AhH(O*rn(es;n9YYU?bIK=pRr;yj+8(Tmaoa16kUxIM&hd+jSGp(`@yl;U? z{DSN4%&_-%3Y_X1hrM8gFWvS_Q}IM)zc8BzaTucaQv=T=pEvD;{&$4Ck^Ynq-99Og zL3{a^Z0ZnCVBjC&3+z`%KCXlISvUJ8HN&g(6Ynl}qDbFP+QT9G;OpSaO&IzAIxx(j zWfli%lVH-LQRu)?H{j&yo|F^MtCA7z<#_tEc4{}@9Idx7x`Gs2aYa0W@fwGWS> z_xhjoYE0p&YbSh;ZN@^BX=byN37$#F_B?~lV9n>W7Z|5`y7}P6PW#<&KX2RH`;gD{ zjDQb5ct4Be`v(VYe}{?B-T`Ah7kONixs}_0cQ^0lcKY#!{(CJq6)(nj(&Pvo6L@9UFdOvVWgoN<;{QM29M*b&adGgK2nC}wfJlp#~kL_@uy>C z!os&2k4)n;jDyX_*Pp4l8(XC9bB8&>&&12{-dJ}NCQl+Ry0OUwi;2|g>SEi-EnJ^L zKFf`5Uq5hB#96@sb~hB|&GM4Z{m(HOyVvgBxz{%DZZH{KgUHI4nw0O*hHqk&6{>;JQh#auLuncQU)gq|e*?Zsg>4Gi9v%2@}U`)G?v;rmFdsOD_K9 z#x7;;?Pl{~_`C>k;7!}T+^{@6Itm}}Y;3msn|Isl26QsvTqExulJFB?-VV6;@Imw2 za?4CES60{CA~^l#+_IBfCcnNmkXx?u-+JYknDK7D4ell&Y0HcHTw9fAZGu@BY(FR^ z{4B@R17*tbG7R!tg!%42dimpLJj*Wb@RKt3hN_$VXQxNGDeBXH$H=0aW-p(7-k$#M zx9ulC{z?1skAK`=JlV#N3I1zjl-LJ{C++a?Eah-cJvcr(q3iVIJri%#%Gz3PSi9kO zc6!$LUNTU}O7X|T@uQl3wY`tpWP zze|TN8@LAG=5Fd7d6kY7vRHCczvSYAc(obpD*-FXr-z4fa0T6ZG`@MS3`*4b8GNPO zrQ3Eei-^SYMuzp>eC87z(B}qx9fl*1^3FO&eX6=~Ncmn+cbxBl_vES_?C!SRo$a>2 zx7&^=TMttC>5fCn`V3jf%L}xNN2kZ_;P`~Pb<}qD4)ct=^1iriKIYppvg-jNZ(G^M zxKVDNyfH0JON3irTWt65Y_>ah*4iezuS?iID~leiabd^99=;x6AL45SPoKVMd(en5 zq)l*v-WLqqEcUjDPkPp&805%#9A%6&V6S?l0LYif2XUiQca;Up$a14}(*1czN8DL7 z1#+VdWjM>v%~lUZ*|$iqx4zx1-r3r2TRXd{1AY=CpB*9IGVR&shMxqu+wN0`9zA>y z^`Jd?grd#y^9|Bt9X{L#C%Wyg{MdT&GUei@Xk5Uv4U`AALt9&~+V<{tHltVI{pzx> zudHVi;{thAwp}RmHG?zgI5;?IFLt)tSMc)SA=;C!}Sa zT3m)k>CB;#<@VsggSO$~-uf!-axHqhF5@8)Uy1NWp9Q+NyPrOK7alx&=I46ox1d?R zS%&uiNtGY#i}l+2X8G4^0KL7gVov(Xi*6no88E zu^&o)q1t%)>;f`rU9hL9bphHg<}1DskQY9d*Pxp^PoH;ud_Y?djfB~}d%rz@iCn$- zs{PR){ZU&Y@4I_jZS%o>TCDSSLO*@!CWZN;e+V->M2=AtFwi^Uy5GQ}YrPyG&FB*x z)9}+Z93bQB@~d`tN>4#bV@9v`YZ8Bgm-agGyB)2-EWOOnv9Wd-uyam(r z3o3o$PUeS3yT$}*g?}F~o z_3P!N>gnGIH__jjzoEZL=g@OSk9p z8v5k*jgD{{f22R~b{vEsVM-tQEcqMNXHI^^zs3jZ0n)X-mfg{ZGwGRcTKJ4d>ky)0 zDtr#s8i_DD@QTvEXtGzT{#LTH(Q5kq+rRokPXdeIewQ)Q2JPM_?|;<(eWOI_N z*4UxSqt>CV3$x``x;)QqUuA5vPN5rd-2hY&C*??(nQ*i9qS}igpDLY8H?5+j^dZj^ z=;0>uhCFn=HbP_SVW;70JsW%0v8vu^PfR!X@vpV;9xO)aBsdb5wPUc9Kl(6b-C7yM z=6ImwE1oW|$LJVX+47RDV?e{_b3FFr#5AjZ_g#a}!8^WmzUXUZzCNZs>_Ul*o~?&-~16q*zJA zen$={H`XiNGoFxnm)1luQ4e4i@@ATsjEUhEG!mo8N%@EnBj(@%uX(BbN}qLIemO5U z-|#VZ{mfs%7=#%ru)L}mr>46Gg{aeX_pKSh{5%Uu| zi;mHbfjHp>!^yAcoqzGargcNtM_wW@k~#72vJl=0mMS<$J@&`ADxH!?9ecWzw{yuj zI){}u zt1cciNP2|LfjvpK+IPN}%od*}ybsT-rfQlf;}h00%o+`Dz46k7+h~g_yd`0s!T0gp zfA$~!yFa1>_*fwng@U5+RfBBdN|*vR3J{mzuFByJoP1kckO&(m>^uk#YP(NEDD-BqT2BD^n^v@z+-_ilEbY1JcBfavDJsX~>yYd#alxaOVz-uUV6{!Sc&6+MMy zZuqrxl|E_HtBm~ENLM3Mc2wuHE^cyr+9==Z^ko7U52yq-4n7BBXOT`t0wpB;gO3g% zL?XXMswH70TI7~PHz&Oq^e{HYQGPvKTMXUhCKQboH7NmvHI0GG{2M@H??6Ib3Tjqa z-VR_;hD)P+jys)+4XlG3H^e+mW&W$5+LY#7wAf5Ph#Oe`9+M2%t@UkYz9j~k=UUN4#qwK zPl?A`Zgp@&8i*r3A}~$!T><=s2Hi}v7XKzVjAxmWOx(i*`O@RrVF$W>;{GlWv1)`H z_F2WR`~+%KeHOH<%0FaDdz^-Q`CZdv9Cl%tg8}IFTMjtHBcRKONwnb(=o~RpX0*s- z{fmDEGkT1}E)4U{^hK)MNNYCEO~5bSBaHfu^k;dJyiCimpr@Z8hMmddTR|MenW*m8 zmAshRP^Q<@E$C?&R}I@x)1MehpSazm%e2&n{Pj4NpB_mw0m?aQB&c7U(dD-=@WVf8 z&fjeQx~?&3hnw|NbGPSo>cE8#iXK731Y#RF2H%wI2br80Bd6Qazk_X4M4rH()oXzXQMRnsIymo~GU1bAYKk;TF2V zTTWB_)s*@(+zf&8WNZR=djMVI$amzw=QaEmdf3PVVYK%A)WDB&wogg0Ak)yp&%(|6 z*Y%if!-vwY`%M0Wau#3lyuuC8tXofi*srOt1LK*7;bvvOyGxtxef*&<=*nyfPH66J z$dHL?d5-=Eh+p@QDw<1nI{lM;$Xju~rTyv%{F}ls_wplLh79dINr8^4Vng!-cZ4q9(`RDU$>)yD|M&?yJX0qtBZ!TKx9qzRgZxWN5H*Gv@ zAsr`7FuhIaWIWF}Z)~)UwdFhs=XdqpsEJsi@+^AdDWxu~Nkc6Pc#2zk^6V%0Mwy!j z=Sq&f7v^1%aZ_j|6FqNrkh^wvLiu}}(*-!_m5(m)89&@Ly~rqIO58l?WjUN2p0uOW zlknG5^FCew;L)S@@pnII?|=NQ_WrlO)t1)Q+k+23Z0qm6-io$yk2$+&H%8CD)FLx_=-%)O|8b>pbUi`K{D74;8y54Pk}Xx1mo5#AWj zgA*yg1)sk5S-%s)5SMb6U#Ev>ZP)L%fAK~8>A(DO`^C?G)t-L&f;vL^-&!Q!KHq=d zwtc4k;0W5KZIKE2Mfh(X-27TzT50$0J;)&mZ|(b%hO*;f3hS{OaLT&3lykNfT)0#`GOS$2y$nWo2@0Q#BM<2A+jaB5!u;*?6;E*&{+g z3q-z>ag;cgjh_nH-92vGul)AEo7YE_n+F7_XV7wVd`8|YZ$9C;yR(~(f4}$dixJAZ zFKd_&pKkO9x!;F`!PyY?*-2lCSYBCb8>@@$?%G;=|Ka^?_*#d&4ZoXDPIwrdbj%C7 zo63|Hb^MC<7CNxomhXCyg!cE|-3?^L!(i2B+9taYnjd`hv)kaB|q^km^KMWbBU zZYV=@%})`eT}?RJ)98*oF<_5eP(bMiz~kX1{jCQc5~%XFjuub-ZvFb&YV-T_K3RO{ z?%lS&zLrI@QNP`QKB7I^+uo;4j`Otp$>C8uB!644wotFyv!_qm-oZX}B5TC+2~OLt z3+CkVRr);awz{%O`p63YhX-fvHFari&$YwjlPs2!(Hsu3y*6JSlJHi$x92b5fhz;@1I~s|t`T`C%cq#-sjQm-;gouL zB+oo7;(-!3#Vz}Mo&f$X(63vL2~GJgxk!(B33cs<oyY`r6SxR9=*U>dOsTzw?5$AB+jiW(OqmqCRAIswuvYNv$nVMtZi)4HzIQp$wK=UFfUbZ1!bSY ztQni-X4nzlOw6i)x^<)KN431_#0?z-XSq$4FbJ)5>INm&pdR>43%y4fZVBIMh_`m? zQ;m`QtG2tssj45irs#jK`Hl1g*VCW63nSgqg$k>@>ZaK^m>p{;ZsDkO2Q7}Ox?gkz zr9*|g3C59UJzeQ`;Gi4&$j5+La{=`-G<6$wA~cF3LH!wVX7q{QfMK4NpXuj9(S)|r zOsweW?t$S}`JIsIe9mBTFh8ZoF8&F0GvO<)Kj6$dn08^L*WG5yYT`F>8+y%Of$Ty+$_JFYWxwr+{8KJ&*B;W3D{kL9%PdK#9LxRaH+J1!?WXwKAWB} zQ+_~w>s~|QOJCS|W7G*v-EFq~{JfDj%_)VBO&t^eH7AK<1;$)q%tyzsJ8fLn;tB7b zsBRu2n4i01p1@d!{$rlDbMwKy_Fcy6Prm%B{nP)?KW+crfBlc!)6br?i=(~9XsK;x z6UmL;<+kzQL0e#af3UsPP8hRTB zri-bT;e>yz)ACamNu{ioLIN7LT!tP1;Yfej16fZQ0oA+meCPr78*hG>|^Cn+n5U@qn^P5OJDs^(sWib~&JnzX#hRgZfcs)>999 zy9RIn?wl%5y%XQ^u5ob9FY^fp(sM(2eSMR8@CxN$FK)@>8XH(fjt^??V4fV0g-*x) z_`8>t=hBIl08p6qTJzrXB|g`=a_b*_p@n!Cj7=`Cm}fz=aWht89FQ^!LPuKqVP||L zFLGGgIf? zJoLadW!cNxgQpi~jE$LJ{^HYLwT~Wst3AB8$r$NwJ3BdKT(*=?n7OW#wH@lLbG_h| z9&n_dg3q-ji!gMVhootKDs5SyJ>dGMLpPvUSXO_;Bxlv812Ws!zLp=~~&-=gc*ZDL3ns z^`>4Es0_>h8aHR09TF^sdB)-Q+Yi6{?e_g2ez!fu?1hk(^~Ka1547fR6mp$<19{S8 zS$avq*HKP<9gTVB%iaCvb;41AIRJnvVaAJs z%s~dj;^v}NEo>=7n#~7B!dNj)rY2fe&|c|qvo|)cRyyP5@PmQ%(r#MlDL)GV$Vod= zu3$lue{VNuF%7Wc8<2X3v$xi>2%;lyhN+1vf8;eJFwn43O@l;TsAnIcLj2I4&o(fr z%PBn2m_O+*8mhsFxl)Oqvh0;<;%=~B9)aQQ0MtB;jf^OOa3v3h(kuc&`y?`8LphBT zMc(4(Cchi{*~q8-5*D4_;dQHL=xogMe3RuH^C`;mw=rRo4i7VNMkZd{v7BFK5k?tN z4z6CGfm1xFPKFSF-D1{b#uMmbl?CNMf@0Ram+AC(D7}!+9=UF}Cqne}X@F81mWJv0 zP+p_bj<8XN=^H{e`ufOceY3zGd;Qd)Rr#^O*9{rco$^zja`GtlUjLL4{-t%uQz%1b zN~}BWb_kV-jw!w}C_}^SPnTmIBYzsvM|&~}OgP*P5r3d^*dcPH`$&Jp`z8RdJSmhj zP!1!l*#iK!pSqS;kJxE9>Fc;gysp1N*9ctso!EQ%P5p1`1p{3ju!y14N+S>$=}rBw zL*c6zRi^f7G8$Bazq3kQH}MYrM*5Td3?P2c486B4SW0eghW`e*nJ;k#4>0t%LJv2T zXs(4H^ecZ;z4{3B5U=~lZ@Is%{NkR0VMdTv+2`{#XI<77Gx6pYpHxbe+Eg^ediLe^p`qxye7>hJUA}yA|~C)Wf(+6aS9Wa{ zoqVTkuHC)oM)hVpIzG<#m+Kbp?Y6eEnlv5rDz4smbrS8y!jL65viv@Mjjb59F}8Ih zdvU?dY$vI1UXc&ROY+u9P{zsRud2W-4=SAjgP$2MJFccKInJ2W9YABofFz$WtmD^= ztx3aR_$5B=YhFr#52}1S-J6uJPjb3?)rnMi3jaOzO`eZWPjeu`0}mF4n=EH12PAY( z{Bm=n=V1S!op`$y+UmQ`mKkHjY>?I>#NO^+l$(0qjF;ei8#!Z2$DNfY=Og*f{=Cn3 z&es#){JHUZ!Q|81@Rd(P%SUE9^YB{UWE^Yg+j;KW@T2_+k6?&-@j!<+{hgSuFsbxO zIlrUt_*NVTM@Koxv9!8^jXe5XKk0ds`t;%~2Muf=9Y1CWo3^UvF2!F@pED(Q5dnKP zYzbTPPC+T-lj>CNV@&EqJ}9$*Kv46F%sEuQMVaX4X7!cNtILyPXx(QLeRR-{_xIbu z*2`=JJ^Ad5_T{IawP#PBwl9ABxPAWFSGgVQP5<4kgM7dH_>lU-xZOn`U%!wB=QRP! zvYxv3Q)s>hu$Ghf%7x&DoC`44J4sjOoQOMbbivbwvn$G3o-R{IDYSM5t@D>z4uaS58WvMM!(# z;xTE8->`0MIM>~@osxeTWWDy#&(5Chi=PEhUT7mM@9^-Vou4qLl@i-V z+Ba`N`Im2a62_rtUQ4jeM22Xl(Gw$blW`pVM0y~MSpxybSVM3FAcaW1w%ya6q)5BU zH2nd2NjPv??(+7~S4d9J(tdi=-+sk~FAF)J4j^B?D&s<#d2*rBS61#lxZl=S)*=t` zQ#m*~J_NJR93Qti(z`hIC4|%PQaSPv%QO1LFCTx^p1ypUO>tjH*j!)FfvxqG%^c1; zJoK=~LGXIG!v%3)G_YKhA`g2xSKZ$?J?cNmm3G@f4?|e@?W=7^(jL-hChzu#X4|42mZ&SXzv~#{bQ?HGfZALN7dg`Gn+D_`j3iZIY!v4n>L7Zy1AS>;D`o=bXi8|~?1c)*Y@$)Ml>JY~%JTM&> z%2sGs?=o&VJlbtP`SD-2fA2r|2U#mQ^HWCj2M_MwN5*NBC^r{bYiLJnh(2NU4XANO zsW3k9j==APo8c{!!d2tT@;8Y}XTWf;b&m><%zbQYZP3V{qI`rA}g*VIZt?*9ocgIUs2bo_JPB3|C z82U`a4mBSNOw$?rz-#=hLD;t1@dYP=bSJ%vX^*Ru<11Z=t59Ta zf_zNm5O?}yFCb);Pn8m)(zUu1MMK*N**5CT?wTF)$wcXvFw)N~) z`^i7~X?ybINqh3;mu)Fy@Y{Kv=k@8K9Sh^knge8>3mvBHAuDOHKU<(b$;)WWKO9ea zVba-w8~?V+j^Xl&01E>YjuFSwf3cF>%1=w@Lw7qV;c zQMT4vRfP?|{Kc)tydBSs{13hs484Qp7XOZ~*Mn>*21YsswXQOQE3{}IdBH4>Z>WFv zxni!hR%smJGcv=;zOZm<@3|x#;cZ{!dy*vA;hk<3ud*vIw2!zNiGVTvZ0z>?hQW=p zRE`$uza6JIe_!z6<~-vU#xUME_U3UtHSUIH(V#q2yct(yFTpXN$v6h092{?&H*=fy ze%*`<-z+i9O9($4i%L@{w_Nn_2b(O#!GrN*4={gvgCny>_%PN7oIhXW0By?{(y@Rt zE6vWMl7{#QE1occ|I9aKF+WLKDotymI=u>gmlYv)aU~7#UJ{NvV;wi&4*hbVL4q;{ zllR6k(ZXZ>k|)-`d~RbxgPz9IP`t>#CAw<~rtKkYX%}bGhnMDAw|T}^SLauZv-jJN zfBrAp!_7PGgZCb_qciF&d9Xg$ArJZLoSnG`@*p15$=F0$R7nBpj(rW~tN`749dXR! z?D|<1VY$Z+Z93{?G&So>y!)@?hvId&t6|SRMW;i4Smz5Wtp4WN&t=rBRy9BH(60yd zj}Mr0cmSJuf^!gG^Lp|8ReR>EVvPHpet3A)=l{b;5=ncasq4;B^|LtFs2AV;_DAi9 z-~9nReVDPXYfHXhHs-8m)B_Ff85-Ylwd-hSw8JMShi&)ei}v!>PW$rN%l4ZuUbG`* zXKCSfTVGpdp19Q3mQkxKZG$%7%PDs$KX3kTZrn|Mbkl!jbuIb=VdaJ0*eIINpN5mX zbe{WD)J&D2o^lZ;!q@$g<+L*so|}d~;;yifSziG7zy5Fk;g6VLeyq)jW&{Z#wXv!N z2AN1i5fa8q9im0AI~|(@$F0*r)Wj*_LeL|e+9k-!rk(h7b{3$7Sut|TNHIgV=?gwP zCB2xmDKJ`U^A+P z@4^@zCykJTl#@%ubECD~3?FhTKIUw}Y9^K1w3VDN8fWR&q0+Gd$#>E*rEfBve8E^h zMXn9DxXq(~lZ{{(-YAfq0LGfz|Inmv1Lgv+1u+_hbVv>$jE^~Z65r(Hy2=g!hX{*U z(UTlMl?i?lp@Z(&ho9~ij0SOccfDpel`W%0vJ7(jh0Z90_Bv=2MrV8a-bjbi>v?%`;7Uw)N-;r=EF85&G4Je=hv zQ0R-cu9AU?y{tnKSv&B^d2Tz?5;vYV8oX{zi0Xb z!qOT1l^@d`N>HE1Df8ghFqN;63N}oU+1ZBoL*TYr>)Yqin-6A?w z@Klwa&$Q7xT~9xt9`+<+ckh0>rw;WrvS5rM-rY@?i9BgG(tuer(9^8&Q<>g=#HH+Wzh&T^K8bd zj$JNl^O?!cFaP2%+gG3YjsJ7TYb(uX%`b?vv%QPjZ|mzD*nHCb67zanU0H2Mho^08 zdpnyk%gdfB_E~y)%7lo?Va7D%HJcviZUSGnTWPmt}Rzl-kPG^uY3I?lJhg{Dk= z3Nm3Ba&HEeQxD0SU&a)Mmz91eJY!qNV{W`>Vs2HH2@E8ZA$e|j)q+};pWsbHH*>Nf zigF{wTb*uBXY7q~E(_lb$d5N1<)6GYuYTj%Z+kl# zJz_HG^K))0$x9az%&GH+JjDnd<}@38@WZjDo0mST?Hs`QLk^1&$b4tPO&t--Cy*#T z-Q67Zu&R?(WypgaYskdiyLWRa#OJa1w_dhq&z`isS6iejS1B99k_2gkcF_%R<{7SR zIPZXGIbgBu>2rF2bhw#R=tjvQ?pL-+2HpcKHO=VuXJUFAkTHmBy z`fSgNAN76uqvP7>H?!vHaNipuxFah*Lw-U%H=YM!ws-w33VbA8+dwy&W%3DWr#=wS zZ*2NW4bO1venkD=J2(No?TOFQFSq;Dv-RaQ%GCDxbvrydYX=9%75u;S+n5HSOynoUm}iIc z{dQViHA3{Y%4cSYNL(0=#j7wyTjXYIhx-hgY)HcNS1TB5C@ZDY<2 zCy*Q4uh*2@DQ6Y^$0>!1kb0k2)Gm1(va3YbaRPh3); zo7-y*5fXA35{po6ApVc+Vd%?KksIgaqOlXQ)FQJicZhVLQ&0A2cY>!)ub>NPF0UiHr@rxn*rC0095=Nkl%PRN4?eh=v5j@#PWO2+97Xw%1gK-j~t zOe*OUlqbfR)(!Up@RRPI6DUijF;zcd+b<|T)%T44rN*s>cP=6V!#e+SpT#~R_-Q|t z-Qu}{G!!?po&$fQXRTb`31A)VpztNkE6WL zMm(W2C)Q3s?tIbx9Os&OfjV<|{T;{DAzI^R3{Su8*vwb0-1KyV*f~$F3z!e*Atj?2 zkXLCd8nSQF^JjQJccz?4LxSUzd>I(s`N%EgIquLedh=H8PUXQ^rt9b4tPjpP#(ITx zt}bu3O~&AJj7?6D7`HJeOFsKp5<$xtCVLammHn*fMW^K#sfzR2ekD|G~ey6XxtLz``ZuURp<$^n~@VH~n4Z3HLp z;I!*U!PvjXraZOy^SEnkC ziU>qL1aM$hriM$_5==O}bnI7$x}DcL56&xZhllWFznwGQeED*#9Wd|v>Z_;itH;mU z4)Z$Y!AmuMuB6>^>?=CT8#;1WgVanXFWFN@uA6=M;Ro#p-~Vp=&L^KBZyPyOl>@X( z)GHM-P-`HLtLcx)OAaI;kFF2xZErD#-D*4Ad+m2mUbau49>N>f4PLjqo9L8}pE7gv ze|3#{0d>zcJ6}*+WbU}U>gOSsbNk=d2Cb8WT_ww1pOnRL?}284_Q*Wzmmm{<{87lNPOh&H8zLbEebOp?i`nlK%($_G~zY1^0 z4BG|I@EuRwZE!7|j^A(Cq|xq#DvSx{fKM~wl8Mw~4cPHlsFWi+SVLDPsdP4UOiZ%a zA_KZ!^+4Q-6!L0u7D2+A&2t-B^mKwmuz{_HwdcWn;+8z6Q^ZY5f=4qHNUw(2>^{mr z1J&M<)FPkhOO!4MogGcgK*CpEx=S>2MHWa0b2eh(y$b*}VI&U-vN&zXoTR7opb<5h z2)KiA{6-p9n9dliMN1eX?o@F8lNk-_9m2ywSNX+T_ntS+iJ_mACrsfmggMq!OWr!* zYJQC${FGT*(3*&5^Isu&?LrP67~%5VKS6VY%L(m;9g7P|y7xl_;=^9w&-CQHTm%=5 zNcgFo!M_RJ9fmwfX?&_sfuVO=;|m^9{~=(wNrR1pGPgLZOy+*VdsLc6!`ysamA zVp$qa&l&G9nKDamxT!nt^D_i4-q!;DA`_|0=1oj*tT;}t@g^{l$%09~$=#@~jxo$* z76-x`@`+z2-Hxw%JEv^ULQMe0SN&zWa~cLW1f6Upl7L?hzsg%s`DCR}O}~8gsvR60 zw!_1Nd>`(~V^4%1W>V<$%zl4xWqCFH`_6YiX&-&?LB_*w?z&;-X5qz!2SMQ*W8=H` z)-vYz_O6>yrsHksjmzi5YboQ_q<9I9{WO~!o@T$e zbhCx@T&yFHxq$;hY07~K%0M?aiLAY*osBB~=Fu&u@Dv*AQ&!%#_L=29@SnDO5AL=P zKKiiTdw9RyyMMoJ+_~HCU|!vT{x$Q=90t|EmHB{&3E+b_kL6i47ZyT|BTBxtS6aOo zGk*{=e#SBVCWUwT+Jg;@y}_kl_{jKN>337;(oOUIgLb&R-%cp6r=NeGXZ)Ui`M5n} zLjUB+R(tt;tG(JfK(^h$zsSvdW!lYj<=lfXexkwekvmUwK44zziSi9deT@wJgrd)A zA2(l!@Ff5z`aa1gF>YRYieFyd+1$*x0GAf(=8QLEUC_LL{|=M+jkdsK-o=$uWZ@J) z@p%Zta`EPkb>{WON&E2qM_FL=6$ht-KKtzk+T0?%o?A${W}2YpTm>FieQ7P5r1)Du zY&DdV)CqYCKb0dF``mDMBeL2Sc&aRk7swpdjZnu%oErQ6p^Tx617(-WoTUJKl$t?V1_$^#A1ME;ajX?CH{jmm5?c3NwbvR>K9 z%O(6-hh&H0{dRoX4X1YK;&)yv?$eXAc5-}_O-bvBH`Kl1=I!qNe9EB